1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Kernel-based Virtual Machine driver for Linux
5 * This module enables machines with Intel VT-x extensions to run virtual
6 * machines without emulation or binary translation.
10 * Copyright (C) 2006 Qumranet, Inc.
11 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
14 * Yaniv Kamay <yaniv@qumranet.com>
15 * Avi Kivity <avi@qumranet.com>
19 * We need the mmu code to access both 32-bit and 64-bit guest ptes,
20 * so the code in this file is compiled twice, once per pte size.
24 #define pt_element_t u64
25 #define guest_walker guest_walker64
26 #define FNAME(name) paging##64_##name
27 #define PT_BASE_ADDR_MASK GUEST_PT64_BASE_ADDR_MASK
28 #define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl)
29 #define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
30 #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
31 #define PT_LEVEL_BITS PT64_LEVEL_BITS
32 #define PT_GUEST_DIRTY_SHIFT PT_DIRTY_SHIFT
33 #define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT
34 #define PT_HAVE_ACCESSED_DIRTY(mmu) true
36 #define PT_MAX_FULL_LEVELS PT64_ROOT_MAX_LEVEL
37 #define CMPXCHG cmpxchg
39 #define CMPXCHG cmpxchg64
40 #define PT_MAX_FULL_LEVELS 2
43 #define pt_element_t u32
44 #define guest_walker guest_walker32
45 #define FNAME(name) paging##32_##name
46 #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
47 #define PT_LVL_ADDR_MASK(lvl) PT32_LVL_ADDR_MASK(lvl)
48 #define PT_LVL_OFFSET_MASK(lvl) PT32_LVL_OFFSET_MASK(lvl)
49 #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
50 #define PT_LEVEL_BITS PT32_LEVEL_BITS
51 #define PT_MAX_FULL_LEVELS 2
52 #define PT_GUEST_DIRTY_SHIFT PT_DIRTY_SHIFT
53 #define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT
54 #define PT_HAVE_ACCESSED_DIRTY(mmu) true
55 #define CMPXCHG cmpxchg
56 #elif PTTYPE == PTTYPE_EPT
57 #define pt_element_t u64
58 #define guest_walker guest_walkerEPT
59 #define FNAME(name) ept_##name
60 #define PT_BASE_ADDR_MASK GUEST_PT64_BASE_ADDR_MASK
61 #define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl)
62 #define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
63 #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
64 #define PT_LEVEL_BITS PT64_LEVEL_BITS
65 #define PT_GUEST_DIRTY_SHIFT 9
66 #define PT_GUEST_ACCESSED_SHIFT 8
67 #define PT_HAVE_ACCESSED_DIRTY(mmu) ((mmu)->ept_ad)
68 #define CMPXCHG cmpxchg64
69 #define PT_MAX_FULL_LEVELS PT64_ROOT_MAX_LEVEL
71 #error Invalid PTTYPE value
74 #define PT_GUEST_DIRTY_MASK (1 << PT_GUEST_DIRTY_SHIFT)
75 #define PT_GUEST_ACCESSED_MASK (1 << PT_GUEST_ACCESSED_SHIFT)
77 #define gpte_to_gfn_lvl FNAME(gpte_to_gfn_lvl)
78 #define gpte_to_gfn(pte) gpte_to_gfn_lvl((pte), PG_LEVEL_4K)
81 * The guest_walker structure emulates the behavior of the hardware page
87 gfn_t table_gfn
[PT_MAX_FULL_LEVELS
];
88 pt_element_t ptes
[PT_MAX_FULL_LEVELS
];
89 pt_element_t prefetch_ptes
[PTE_PREFETCH_NUM
];
90 gpa_t pte_gpa
[PT_MAX_FULL_LEVELS
];
91 pt_element_t __user
*ptep_user
[PT_MAX_FULL_LEVELS
];
92 bool pte_writable
[PT_MAX_FULL_LEVELS
];
93 unsigned int pt_access
[PT_MAX_FULL_LEVELS
];
94 unsigned int pte_access
;
96 struct x86_exception fault
;
99 static gfn_t
gpte_to_gfn_lvl(pt_element_t gpte
, int lvl
)
101 return (gpte
& PT_LVL_ADDR_MASK(lvl
)) >> PAGE_SHIFT
;
104 static inline void FNAME(protect_clean_gpte
)(struct kvm_mmu
*mmu
, unsigned *access
,
109 /* dirty bit is not supported, so no need to track it */
110 if (!PT_HAVE_ACCESSED_DIRTY(mmu
))
113 BUILD_BUG_ON(PT_WRITABLE_MASK
!= ACC_WRITE_MASK
);
115 mask
= (unsigned)~ACC_WRITE_MASK
;
116 /* Allow write access to dirty gptes */
117 mask
|= (gpte
>> (PT_GUEST_DIRTY_SHIFT
- PT_WRITABLE_SHIFT
)) &
122 static inline int FNAME(is_present_gpte
)(unsigned long pte
)
124 #if PTTYPE != PTTYPE_EPT
125 return pte
& PT_PRESENT_MASK
;
131 static bool FNAME(is_bad_mt_xwr
)(struct rsvd_bits_validate
*rsvd_check
, u64 gpte
)
133 #if PTTYPE != PTTYPE_EPT
136 return __is_bad_mt_xwr(rsvd_check
, gpte
);
140 static bool FNAME(is_rsvd_bits_set
)(struct kvm_mmu
*mmu
, u64 gpte
, int level
)
142 return __is_rsvd_bits_set(&mmu
->guest_rsvd_check
, gpte
, level
) ||
143 FNAME(is_bad_mt_xwr
)(&mmu
->guest_rsvd_check
, gpte
);
146 static int FNAME(cmpxchg_gpte
)(struct kvm_vcpu
*vcpu
, struct kvm_mmu
*mmu
,
147 pt_element_t __user
*ptep_user
, unsigned index
,
148 pt_element_t orig_pte
, pt_element_t new_pte
)
155 npages
= get_user_pages_fast((unsigned long)ptep_user
, 1, FOLL_WRITE
, &page
);
156 if (likely(npages
== 1)) {
157 table
= kmap_atomic(page
);
158 ret
= CMPXCHG(&table
[index
], orig_pte
, new_pte
);
159 kunmap_atomic(table
);
161 kvm_release_page_dirty(page
);
163 struct vm_area_struct
*vma
;
164 unsigned long vaddr
= (unsigned long)ptep_user
& PAGE_MASK
;
168 mmap_read_lock(current
->mm
);
169 vma
= find_vma_intersection(current
->mm
, vaddr
, vaddr
+ PAGE_SIZE
);
170 if (!vma
|| !(vma
->vm_flags
& VM_PFNMAP
)) {
171 mmap_read_unlock(current
->mm
);
174 pfn
= ((vaddr
- vma
->vm_start
) >> PAGE_SHIFT
) + vma
->vm_pgoff
;
175 paddr
= pfn
<< PAGE_SHIFT
;
176 table
= memremap(paddr
, PAGE_SIZE
, MEMREMAP_WB
);
178 mmap_read_unlock(current
->mm
);
181 ret
= CMPXCHG(&table
[index
], orig_pte
, new_pte
);
183 mmap_read_unlock(current
->mm
);
186 return (ret
!= orig_pte
);
189 static bool FNAME(prefetch_invalid_gpte
)(struct kvm_vcpu
*vcpu
,
190 struct kvm_mmu_page
*sp
, u64
*spte
,
193 if (!FNAME(is_present_gpte
)(gpte
))
196 /* if accessed bit is not supported prefetch non accessed gpte */
197 if (PT_HAVE_ACCESSED_DIRTY(vcpu
->arch
.mmu
) &&
198 !(gpte
& PT_GUEST_ACCESSED_MASK
))
201 if (FNAME(is_rsvd_bits_set
)(vcpu
->arch
.mmu
, gpte
, PG_LEVEL_4K
))
207 drop_spte(vcpu
->kvm
, spte
);
212 * For PTTYPE_EPT, a page table can be executable but not readable
213 * on supported processors. Therefore, set_spte does not automatically
214 * set bit 0 if execute only is supported. Here, we repurpose ACC_USER_MASK
215 * to signify readability since it isn't used in the EPT case
217 static inline unsigned FNAME(gpte_access
)(u64 gpte
)
220 #if PTTYPE == PTTYPE_EPT
221 access
= ((gpte
& VMX_EPT_WRITABLE_MASK
) ? ACC_WRITE_MASK
: 0) |
222 ((gpte
& VMX_EPT_EXECUTABLE_MASK
) ? ACC_EXEC_MASK
: 0) |
223 ((gpte
& VMX_EPT_READABLE_MASK
) ? ACC_USER_MASK
: 0);
225 BUILD_BUG_ON(ACC_EXEC_MASK
!= PT_PRESENT_MASK
);
226 BUILD_BUG_ON(ACC_EXEC_MASK
!= 1);
227 access
= gpte
& (PT_WRITABLE_MASK
| PT_USER_MASK
| PT_PRESENT_MASK
);
228 /* Combine NX with P (which is set here) to get ACC_EXEC_MASK. */
229 access
^= (gpte
>> PT64_NX_SHIFT
);
235 static int FNAME(update_accessed_dirty_bits
)(struct kvm_vcpu
*vcpu
,
237 struct guest_walker
*walker
,
238 gpa_t addr
, int write_fault
)
240 unsigned level
, index
;
241 pt_element_t pte
, orig_pte
;
242 pt_element_t __user
*ptep_user
;
246 /* dirty/accessed bits are not supported, so no need to update them */
247 if (!PT_HAVE_ACCESSED_DIRTY(mmu
))
250 for (level
= walker
->max_level
; level
>= walker
->level
; --level
) {
251 pte
= orig_pte
= walker
->ptes
[level
- 1];
252 table_gfn
= walker
->table_gfn
[level
- 1];
253 ptep_user
= walker
->ptep_user
[level
- 1];
254 index
= offset_in_page(ptep_user
) / sizeof(pt_element_t
);
255 if (!(pte
& PT_GUEST_ACCESSED_MASK
)) {
256 trace_kvm_mmu_set_accessed_bit(table_gfn
, index
, sizeof(pte
));
257 pte
|= PT_GUEST_ACCESSED_MASK
;
259 if (level
== walker
->level
&& write_fault
&&
260 !(pte
& PT_GUEST_DIRTY_MASK
)) {
261 trace_kvm_mmu_set_dirty_bit(table_gfn
, index
, sizeof(pte
));
262 #if PTTYPE == PTTYPE_EPT
263 if (kvm_x86_ops
.nested_ops
->write_log_dirty(vcpu
, addr
))
266 pte
|= PT_GUEST_DIRTY_MASK
;
272 * If the slot is read-only, simply do not process the accessed
273 * and dirty bits. This is the correct thing to do if the slot
274 * is ROM, and page tables in read-as-ROM/write-as-MMIO slots
275 * are only supported if the accessed and dirty bits are already
276 * set in the ROM (so that MMIO writes are never needed).
278 * Note that NPT does not allow this at all and faults, since
279 * it always wants nested page table entries for the guest
280 * page tables to be writable. And EPT works but will simply
281 * overwrite the read-only memory to set the accessed and dirty
284 if (unlikely(!walker
->pte_writable
[level
- 1]))
287 ret
= FNAME(cmpxchg_gpte
)(vcpu
, mmu
, ptep_user
, index
, orig_pte
, pte
);
291 kvm_vcpu_mark_page_dirty(vcpu
, table_gfn
);
292 walker
->ptes
[level
- 1] = pte
;
297 static inline unsigned FNAME(gpte_pkeys
)(struct kvm_vcpu
*vcpu
, u64 gpte
)
301 pte_t pte
= {.pte
= gpte
};
303 pkeys
= pte_flags_pkey(pte_flags(pte
));
308 static inline bool FNAME(is_last_gpte
)(struct kvm_mmu
*mmu
,
309 unsigned int level
, unsigned int gpte
)
312 * For EPT and PAE paging (both variants), bit 7 is either reserved at
313 * all level or indicates a huge page (ignoring CR3/EPTP). In either
314 * case, bit 7 being set terminates the walk.
318 * 32-bit paging requires special handling because bit 7 is ignored if
319 * CR4.PSE=0, not reserved. Clear bit 7 in the gpte if the level is
320 * greater than the last level for which bit 7 is the PAGE_SIZE bit.
322 * The RHS has bit 7 set iff level < (2 + PSE). If it is clear, bit 7
323 * is not reserved and does not indicate a large page at this level,
324 * so clear PT_PAGE_SIZE_MASK in gpte if that is the case.
326 gpte
&= level
- (PT32_ROOT_LEVEL
+ mmu
->mmu_role
.ext
.cr4_pse
);
329 * PG_LEVEL_4K always terminates. The RHS has bit 7 set
330 * iff level <= PG_LEVEL_4K, which for our purpose means
331 * level == PG_LEVEL_4K; set PT_PAGE_SIZE_MASK in gpte then.
333 gpte
|= level
- PG_LEVEL_4K
- 1;
335 return gpte
& PT_PAGE_SIZE_MASK
;
338 * Fetch a guest pte for a guest virtual address, or for an L2's GPA.
340 static int FNAME(walk_addr_generic
)(struct guest_walker
*walker
,
341 struct kvm_vcpu
*vcpu
, struct kvm_mmu
*mmu
,
342 gpa_t addr
, u32 access
)
346 pt_element_t __user
*ptep_user
;
348 u64 pt_access
, pte_access
;
349 unsigned index
, accessed_dirty
, pte_pkey
;
350 unsigned nested_access
;
354 u64 walk_nx_mask
= 0;
355 const int write_fault
= access
& PFERR_WRITE_MASK
;
356 const int user_fault
= access
& PFERR_USER_MASK
;
357 const int fetch_fault
= access
& PFERR_FETCH_MASK
;
362 trace_kvm_mmu_pagetable_walk(addr
, access
);
364 walker
->level
= mmu
->root_level
;
365 pte
= mmu
->get_guest_pgd(vcpu
);
366 have_ad
= PT_HAVE_ACCESSED_DIRTY(mmu
);
369 walk_nx_mask
= 1ULL << PT64_NX_SHIFT
;
370 if (walker
->level
== PT32E_ROOT_LEVEL
) {
371 pte
= mmu
->get_pdptr(vcpu
, (addr
>> 30) & 3);
372 trace_kvm_mmu_paging_element(pte
, walker
->level
);
373 if (!FNAME(is_present_gpte
)(pte
))
378 walker
->max_level
= walker
->level
;
379 ASSERT(!(is_long_mode(vcpu
) && !is_pae(vcpu
)));
382 * FIXME: on Intel processors, loads of the PDPTE registers for PAE paging
383 * by the MOV to CR instruction are treated as reads and do not cause the
384 * processor to set the dirty flag in any EPT paging-structure entry.
386 nested_access
= (have_ad
? PFERR_WRITE_MASK
: 0) | PFERR_USER_MASK
;
392 unsigned long host_addr
;
394 pt_access
= pte_access
;
397 index
= PT_INDEX(addr
, walker
->level
);
398 table_gfn
= gpte_to_gfn(pte
);
399 offset
= index
* sizeof(pt_element_t
);
400 pte_gpa
= gfn_to_gpa(table_gfn
) + offset
;
402 BUG_ON(walker
->level
< 1);
403 walker
->table_gfn
[walker
->level
- 1] = table_gfn
;
404 walker
->pte_gpa
[walker
->level
- 1] = pte_gpa
;
406 real_gpa
= mmu
->translate_gpa(vcpu
, gfn_to_gpa(table_gfn
),
411 * FIXME: This can happen if emulation (for of an INS/OUTS
412 * instruction) triggers a nested page fault. The exit
413 * qualification / exit info field will incorrectly have
414 * "guest page access" as the nested page fault's cause,
415 * instead of "guest page structure access". To fix this,
416 * the x86_exception struct should be augmented with enough
417 * information to fix the exit_qualification or exit_info_1
420 if (unlikely(real_gpa
== UNMAPPED_GVA
))
423 host_addr
= kvm_vcpu_gfn_to_hva_prot(vcpu
, gpa_to_gfn(real_gpa
),
424 &walker
->pte_writable
[walker
->level
- 1]);
425 if (unlikely(kvm_is_error_hva(host_addr
)))
428 ptep_user
= (pt_element_t __user
*)((void *)host_addr
+ offset
);
429 if (unlikely(__get_user(pte
, ptep_user
)))
431 walker
->ptep_user
[walker
->level
- 1] = ptep_user
;
433 trace_kvm_mmu_paging_element(pte
, walker
->level
);
436 * Inverting the NX it lets us AND it like other
439 pte_access
= pt_access
& (pte
^ walk_nx_mask
);
441 if (unlikely(!FNAME(is_present_gpte
)(pte
)))
444 if (unlikely(FNAME(is_rsvd_bits_set
)(mmu
, pte
, walker
->level
))) {
445 errcode
= PFERR_RSVD_MASK
| PFERR_PRESENT_MASK
;
449 walker
->ptes
[walker
->level
- 1] = pte
;
451 /* Convert to ACC_*_MASK flags for struct guest_walker. */
452 walker
->pt_access
[walker
->level
- 1] = FNAME(gpte_access
)(pt_access
^ walk_nx_mask
);
453 } while (!FNAME(is_last_gpte
)(mmu
, walker
->level
, pte
));
455 pte_pkey
= FNAME(gpte_pkeys
)(vcpu
, pte
);
456 accessed_dirty
= have_ad
? pte_access
& PT_GUEST_ACCESSED_MASK
: 0;
458 /* Convert to ACC_*_MASK flags for struct guest_walker. */
459 walker
->pte_access
= FNAME(gpte_access
)(pte_access
^ walk_nx_mask
);
460 errcode
= permission_fault(vcpu
, mmu
, walker
->pte_access
, pte_pkey
, access
);
461 if (unlikely(errcode
))
464 gfn
= gpte_to_gfn_lvl(pte
, walker
->level
);
465 gfn
+= (addr
& PT_LVL_OFFSET_MASK(walker
->level
)) >> PAGE_SHIFT
;
467 if (PTTYPE
== 32 && walker
->level
> PG_LEVEL_4K
&& is_cpuid_PSE36())
468 gfn
+= pse36_gfn_delta(pte
);
470 real_gpa
= mmu
->translate_gpa(vcpu
, gfn_to_gpa(gfn
), access
, &walker
->fault
);
471 if (real_gpa
== UNMAPPED_GVA
)
474 walker
->gfn
= real_gpa
>> PAGE_SHIFT
;
477 FNAME(protect_clean_gpte
)(mmu
, &walker
->pte_access
, pte
);
480 * On a write fault, fold the dirty bit into accessed_dirty.
481 * For modes without A/D bits support accessed_dirty will be
484 accessed_dirty
&= pte
>>
485 (PT_GUEST_DIRTY_SHIFT
- PT_GUEST_ACCESSED_SHIFT
);
487 if (unlikely(!accessed_dirty
)) {
488 ret
= FNAME(update_accessed_dirty_bits
)(vcpu
, mmu
, walker
,
490 if (unlikely(ret
< 0))
496 pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
497 __func__
, (u64
)pte
, walker
->pte_access
,
498 walker
->pt_access
[walker
->level
- 1]);
502 errcode
|= write_fault
| user_fault
;
503 if (fetch_fault
&& (is_efer_nx(mmu
) || is_cr4_smep(mmu
)))
504 errcode
|= PFERR_FETCH_MASK
;
506 walker
->fault
.vector
= PF_VECTOR
;
507 walker
->fault
.error_code_valid
= true;
508 walker
->fault
.error_code
= errcode
;
510 #if PTTYPE == PTTYPE_EPT
512 * Use PFERR_RSVD_MASK in error_code to to tell if EPT
513 * misconfiguration requires to be injected. The detection is
514 * done by is_rsvd_bits_set() above.
516 * We set up the value of exit_qualification to inject:
517 * [2:0] - Derive from the access bits. The exit_qualification might be
518 * out of date if it is serving an EPT misconfiguration.
519 * [5:3] - Calculated by the page walk of the guest EPT page tables
520 * [7:8] - Derived from [7:8] of real exit_qualification
522 * The other bits are set to 0.
524 if (!(errcode
& PFERR_RSVD_MASK
)) {
525 vcpu
->arch
.exit_qualification
&= 0x180;
527 vcpu
->arch
.exit_qualification
|= EPT_VIOLATION_ACC_WRITE
;
529 vcpu
->arch
.exit_qualification
|= EPT_VIOLATION_ACC_READ
;
531 vcpu
->arch
.exit_qualification
|= EPT_VIOLATION_ACC_INSTR
;
532 vcpu
->arch
.exit_qualification
|= (pte_access
& 0x7) << 3;
535 walker
->fault
.address
= addr
;
536 walker
->fault
.nested_page_fault
= mmu
!= vcpu
->arch
.walk_mmu
;
537 walker
->fault
.async_page_fault
= false;
539 trace_kvm_mmu_walker_error(walker
->fault
.error_code
);
543 static int FNAME(walk_addr
)(struct guest_walker
*walker
,
544 struct kvm_vcpu
*vcpu
, gpa_t addr
, u32 access
)
546 return FNAME(walk_addr_generic
)(walker
, vcpu
, vcpu
->arch
.mmu
, addr
,
550 #if PTTYPE != PTTYPE_EPT
551 static int FNAME(walk_addr_nested
)(struct guest_walker
*walker
,
552 struct kvm_vcpu
*vcpu
, gva_t addr
,
555 return FNAME(walk_addr_generic
)(walker
, vcpu
, &vcpu
->arch
.nested_mmu
,
561 FNAME(prefetch_gpte
)(struct kvm_vcpu
*vcpu
, struct kvm_mmu_page
*sp
,
562 u64
*spte
, pt_element_t gpte
, bool no_dirty_log
)
568 if (FNAME(prefetch_invalid_gpte
)(vcpu
, sp
, spte
, gpte
))
571 pgprintk("%s: gpte %llx spte %p\n", __func__
, (u64
)gpte
, spte
);
573 gfn
= gpte_to_gfn(gpte
);
574 pte_access
= sp
->role
.access
& FNAME(gpte_access
)(gpte
);
575 FNAME(protect_clean_gpte
)(vcpu
->arch
.mmu
, &pte_access
, gpte
);
576 pfn
= pte_prefetch_gfn_to_pfn(vcpu
, gfn
,
577 no_dirty_log
&& (pte_access
& ACC_WRITE_MASK
));
578 if (is_error_pfn(pfn
))
582 * we call mmu_set_spte() with host_writable = true because
583 * pte_prefetch_gfn_to_pfn always gets a writable pfn.
585 mmu_set_spte(vcpu
, spte
, pte_access
, false, PG_LEVEL_4K
, gfn
, pfn
,
588 kvm_release_pfn_clean(pfn
);
592 static void FNAME(update_pte
)(struct kvm_vcpu
*vcpu
, struct kvm_mmu_page
*sp
,
593 u64
*spte
, const void *pte
)
595 pt_element_t gpte
= *(const pt_element_t
*)pte
;
597 FNAME(prefetch_gpte
)(vcpu
, sp
, spte
, gpte
, false);
600 static bool FNAME(gpte_changed
)(struct kvm_vcpu
*vcpu
,
601 struct guest_walker
*gw
, int level
)
603 pt_element_t curr_pte
;
604 gpa_t base_gpa
, pte_gpa
= gw
->pte_gpa
[level
- 1];
608 if (level
== PG_LEVEL_4K
) {
609 mask
= PTE_PREFETCH_NUM
* sizeof(pt_element_t
) - 1;
610 base_gpa
= pte_gpa
& ~mask
;
611 index
= (pte_gpa
- base_gpa
) / sizeof(pt_element_t
);
613 r
= kvm_vcpu_read_guest_atomic(vcpu
, base_gpa
,
614 gw
->prefetch_ptes
, sizeof(gw
->prefetch_ptes
));
615 curr_pte
= gw
->prefetch_ptes
[index
];
617 r
= kvm_vcpu_read_guest_atomic(vcpu
, pte_gpa
,
618 &curr_pte
, sizeof(curr_pte
));
620 return r
|| curr_pte
!= gw
->ptes
[level
- 1];
623 static void FNAME(pte_prefetch
)(struct kvm_vcpu
*vcpu
, struct guest_walker
*gw
,
626 struct kvm_mmu_page
*sp
;
627 pt_element_t
*gptep
= gw
->prefetch_ptes
;
631 sp
= sptep_to_sp(sptep
);
633 if (sp
->role
.level
> PG_LEVEL_4K
)
637 * If addresses are being invalidated, skip prefetching to avoid
638 * accidentally prefetching those addresses.
640 if (unlikely(vcpu
->kvm
->mmu_notifier_count
))
644 return __direct_pte_prefetch(vcpu
, sp
, sptep
);
646 i
= (sptep
- sp
->spt
) & ~(PTE_PREFETCH_NUM
- 1);
649 for (i
= 0; i
< PTE_PREFETCH_NUM
; i
++, spte
++) {
653 if (is_shadow_present_pte(*spte
))
656 if (!FNAME(prefetch_gpte
)(vcpu
, sp
, spte
, gptep
[i
], true))
662 * Fetch a shadow pte for a specific level in the paging hierarchy.
663 * If the guest tries to write a write-protected page, we need to
664 * emulate this operation, return 1 to indicate this case.
666 static int FNAME(fetch
)(struct kvm_vcpu
*vcpu
, gpa_t addr
,
667 struct guest_walker
*gw
, u32 error_code
,
668 int max_level
, kvm_pfn_t pfn
, bool map_writable
,
671 bool nx_huge_page_workaround_enabled
= is_nx_huge_page_enabled();
672 bool write_fault
= error_code
& PFERR_WRITE_MASK
;
673 bool exec
= error_code
& PFERR_FETCH_MASK
;
674 bool huge_page_disallowed
= exec
&& nx_huge_page_workaround_enabled
;
675 struct kvm_mmu_page
*sp
= NULL
;
676 struct kvm_shadow_walk_iterator it
;
677 unsigned int direct_access
, access
;
678 int top_level
, level
, req_level
, ret
;
679 gfn_t base_gfn
= gw
->gfn
;
681 direct_access
= gw
->pte_access
;
683 top_level
= vcpu
->arch
.mmu
->root_level
;
684 if (top_level
== PT32E_ROOT_LEVEL
)
685 top_level
= PT32_ROOT_LEVEL
;
687 * Verify that the top-level gpte is still there. Since the page
688 * is a root page, it is either write protected (and cannot be
689 * changed from now on) or it is invalid (in which case, we don't
690 * really care if it changes underneath us after this point).
692 if (FNAME(gpte_changed
)(vcpu
, gw
, top_level
))
693 goto out_gpte_changed
;
695 if (WARN_ON(!VALID_PAGE(vcpu
->arch
.mmu
->root_hpa
)))
696 goto out_gpte_changed
;
698 for (shadow_walk_init(&it
, vcpu
, addr
);
699 shadow_walk_okay(&it
) && it
.level
> gw
->level
;
700 shadow_walk_next(&it
)) {
703 clear_sp_write_flooding_count(it
.sptep
);
704 drop_large_spte(vcpu
, it
.sptep
);
707 if (!is_shadow_present_pte(*it
.sptep
)) {
708 table_gfn
= gw
->table_gfn
[it
.level
- 2];
709 access
= gw
->pt_access
[it
.level
- 2];
710 sp
= kvm_mmu_get_page(vcpu
, table_gfn
, addr
, it
.level
-1,
715 * Verify that the gpte in the page we've just write
716 * protected is still there.
718 if (FNAME(gpte_changed
)(vcpu
, gw
, it
.level
- 1))
719 goto out_gpte_changed
;
722 link_shadow_page(vcpu
, it
.sptep
, sp
);
725 level
= kvm_mmu_hugepage_adjust(vcpu
, gw
->gfn
, max_level
, &pfn
,
726 huge_page_disallowed
, &req_level
);
728 trace_kvm_mmu_spte_requested(addr
, gw
->level
, pfn
);
730 for (; shadow_walk_okay(&it
); shadow_walk_next(&it
)) {
731 clear_sp_write_flooding_count(it
.sptep
);
734 * We cannot overwrite existing page tables with an NX
735 * large page, as the leaf could be executable.
737 if (nx_huge_page_workaround_enabled
)
738 disallowed_hugepage_adjust(*it
.sptep
, gw
->gfn
, it
.level
,
741 base_gfn
= gw
->gfn
& ~(KVM_PAGES_PER_HPAGE(it
.level
) - 1);
742 if (it
.level
== level
)
745 validate_direct_spte(vcpu
, it
.sptep
, direct_access
);
747 drop_large_spte(vcpu
, it
.sptep
);
749 if (!is_shadow_present_pte(*it
.sptep
)) {
750 sp
= kvm_mmu_get_page(vcpu
, base_gfn
, addr
,
751 it
.level
- 1, true, direct_access
);
752 link_shadow_page(vcpu
, it
.sptep
, sp
);
753 if (huge_page_disallowed
&& req_level
>= it
.level
)
754 account_huge_nx_page(vcpu
->kvm
, sp
);
758 ret
= mmu_set_spte(vcpu
, it
.sptep
, gw
->pte_access
, write_fault
,
759 it
.level
, base_gfn
, pfn
, prefault
, map_writable
);
760 if (ret
== RET_PF_SPURIOUS
)
763 FNAME(pte_prefetch
)(vcpu
, gw
, it
.sptep
);
764 ++vcpu
->stat
.pf_fixed
;
772 * To see whether the mapped gfn can write its page table in the current
775 * It is the helper function of FNAME(page_fault). When guest uses large page
776 * size to map the writable gfn which is used as current page table, we should
777 * force kvm to use small page size to map it because new shadow page will be
778 * created when kvm establishes shadow page table that stop kvm using large
779 * page size. Do it early can avoid unnecessary #PF and emulation.
781 * @write_fault_to_shadow_pgtable will return true if the fault gfn is
782 * currently used as its page table.
784 * Note: the PDPT page table is not checked for PAE-32 bit guest. It is ok
785 * since the PDPT is always shadowed, that means, we can not use large page
786 * size to map the gfn which is used as PDPT.
789 FNAME(is_self_change_mapping
)(struct kvm_vcpu
*vcpu
,
790 struct guest_walker
*walker
, bool user_fault
,
791 bool *write_fault_to_shadow_pgtable
)
794 gfn_t mask
= ~(KVM_PAGES_PER_HPAGE(walker
->level
) - 1);
795 bool self_changed
= false;
797 if (!(walker
->pte_access
& ACC_WRITE_MASK
||
798 (!is_cr0_wp(vcpu
->arch
.mmu
) && !user_fault
)))
801 for (level
= walker
->level
; level
<= walker
->max_level
; level
++) {
802 gfn_t gfn
= walker
->gfn
^ walker
->table_gfn
[level
- 1];
804 self_changed
|= !(gfn
& mask
);
805 *write_fault_to_shadow_pgtable
|= !gfn
;
812 * Page fault handler. There are several causes for a page fault:
813 * - there is no shadow pte for the guest pte
814 * - write access through a shadow pte marked read only so that we can set
816 * - write access to a shadow pte marked read only so we can update the page
817 * dirty bitmap, when userspace requests it
818 * - mmio access; in this case we will never install a present shadow pte
819 * - normal guest page fault due to the guest pte marked not present, not
820 * writable, or not executable
822 * Returns: 1 if we need to emulate the instruction, 0 otherwise, or
823 * a negative value on error.
825 static int FNAME(page_fault
)(struct kvm_vcpu
*vcpu
, gpa_t addr
, u32 error_code
,
828 bool write_fault
= error_code
& PFERR_WRITE_MASK
;
829 bool user_fault
= error_code
& PFERR_USER_MASK
;
830 struct guest_walker walker
;
834 unsigned long mmu_seq
;
835 bool map_writable
, is_self_change_mapping
;
838 pgprintk("%s: addr %lx err %x\n", __func__
, addr
, error_code
);
841 * If PFEC.RSVD is set, this is a shadow page fault.
842 * The bit needs to be cleared before walking guest page tables.
844 error_code
&= ~PFERR_RSVD_MASK
;
847 * Look up the guest pte for the faulting address.
849 r
= FNAME(walk_addr
)(&walker
, vcpu
, addr
, error_code
);
852 * The page is not mapped by the guest. Let the guest handle it.
855 pgprintk("%s: guest page fault\n", __func__
);
857 kvm_inject_emulated_page_fault(vcpu
, &walker
.fault
);
862 if (page_fault_handle_page_track(vcpu
, error_code
, walker
.gfn
)) {
863 shadow_page_table_clear_flood(vcpu
, addr
);
864 return RET_PF_EMULATE
;
867 r
= mmu_topup_memory_caches(vcpu
, true);
871 vcpu
->arch
.write_fault_to_shadow_pgtable
= false;
873 is_self_change_mapping
= FNAME(is_self_change_mapping
)(vcpu
,
874 &walker
, user_fault
, &vcpu
->arch
.write_fault_to_shadow_pgtable
);
876 if (is_self_change_mapping
)
877 max_level
= PG_LEVEL_4K
;
879 max_level
= walker
.level
;
881 mmu_seq
= vcpu
->kvm
->mmu_notifier_seq
;
884 if (kvm_faultin_pfn(vcpu
, prefault
, walker
.gfn
, addr
, &pfn
, &hva
,
885 write_fault
, &map_writable
, &r
))
888 if (handle_abnormal_pfn(vcpu
, addr
, walker
.gfn
, pfn
, walker
.pte_access
, &r
))
892 * Do not change pte_access if the pfn is a mmio page, otherwise
893 * we will cache the incorrect access into mmio spte.
895 if (write_fault
&& !(walker
.pte_access
& ACC_WRITE_MASK
) &&
896 !is_cr0_wp(vcpu
->arch
.mmu
) && !user_fault
&& !is_noslot_pfn(pfn
)) {
897 walker
.pte_access
|= ACC_WRITE_MASK
;
898 walker
.pte_access
&= ~ACC_USER_MASK
;
901 * If we converted a user page to a kernel page,
902 * so that the kernel can write to it when cr0.wp=0,
903 * then we should prevent the kernel from executing it
904 * if SMEP is enabled.
906 if (is_cr4_smep(vcpu
->arch
.mmu
))
907 walker
.pte_access
&= ~ACC_EXEC_MASK
;
911 write_lock(&vcpu
->kvm
->mmu_lock
);
912 if (!is_noslot_pfn(pfn
) && mmu_notifier_retry_hva(vcpu
->kvm
, mmu_seq
, hva
))
915 kvm_mmu_audit(vcpu
, AUDIT_PRE_PAGE_FAULT
);
916 r
= make_mmu_pages_available(vcpu
);
919 r
= FNAME(fetch
)(vcpu
, addr
, &walker
, error_code
, max_level
, pfn
,
920 map_writable
, prefault
);
921 kvm_mmu_audit(vcpu
, AUDIT_POST_PAGE_FAULT
);
924 write_unlock(&vcpu
->kvm
->mmu_lock
);
925 kvm_release_pfn_clean(pfn
);
929 static gpa_t
FNAME(get_level1_sp_gpa
)(struct kvm_mmu_page
*sp
)
933 WARN_ON(sp
->role
.level
!= PG_LEVEL_4K
);
936 offset
= sp
->role
.quadrant
<< PT64_LEVEL_BITS
;
938 return gfn_to_gpa(sp
->gfn
) + offset
* sizeof(pt_element_t
);
941 static void FNAME(invlpg
)(struct kvm_vcpu
*vcpu
, gva_t gva
, hpa_t root_hpa
)
943 struct kvm_shadow_walk_iterator iterator
;
944 struct kvm_mmu_page
*sp
;
949 vcpu_clear_mmio_info(vcpu
, gva
);
952 * No need to check return value here, rmap_can_add() can
953 * help us to skip pte prefetch later.
955 mmu_topup_memory_caches(vcpu
, true);
957 if (!VALID_PAGE(root_hpa
)) {
962 write_lock(&vcpu
->kvm
->mmu_lock
);
963 for_each_shadow_entry_using_root(vcpu
, root_hpa
, gva
, iterator
) {
964 level
= iterator
.level
;
965 sptep
= iterator
.sptep
;
967 sp
= sptep_to_sp(sptep
);
969 if (is_last_spte(old_spte
, level
)) {
976 pte_gpa
= FNAME(get_level1_sp_gpa
)(sp
);
977 pte_gpa
+= (sptep
- sp
->spt
) * sizeof(pt_element_t
);
979 mmu_page_zap_pte(vcpu
->kvm
, sp
, sptep
, NULL
);
980 if (is_shadow_present_pte(old_spte
))
981 kvm_flush_remote_tlbs_with_address(vcpu
->kvm
,
982 sp
->gfn
, KVM_PAGES_PER_HPAGE(sp
->role
.level
));
984 if (!rmap_can_add(vcpu
))
987 if (kvm_vcpu_read_guest_atomic(vcpu
, pte_gpa
, &gpte
,
988 sizeof(pt_element_t
)))
991 FNAME(update_pte
)(vcpu
, sp
, sptep
, &gpte
);
994 if (!is_shadow_present_pte(*sptep
) || !sp
->unsync_children
)
997 write_unlock(&vcpu
->kvm
->mmu_lock
);
1000 /* Note, @addr is a GPA when gva_to_gpa() translates an L2 GPA to an L1 GPA. */
1001 static gpa_t
FNAME(gva_to_gpa
)(struct kvm_vcpu
*vcpu
, gpa_t addr
, u32 access
,
1002 struct x86_exception
*exception
)
1004 struct guest_walker walker
;
1005 gpa_t gpa
= UNMAPPED_GVA
;
1008 r
= FNAME(walk_addr
)(&walker
, vcpu
, addr
, access
);
1011 gpa
= gfn_to_gpa(walker
.gfn
);
1012 gpa
|= addr
& ~PAGE_MASK
;
1013 } else if (exception
)
1014 *exception
= walker
.fault
;
1019 #if PTTYPE != PTTYPE_EPT
1020 /* Note, gva_to_gpa_nested() is only used to translate L2 GVAs. */
1021 static gpa_t
FNAME(gva_to_gpa_nested
)(struct kvm_vcpu
*vcpu
, gpa_t vaddr
,
1023 struct x86_exception
*exception
)
1025 struct guest_walker walker
;
1026 gpa_t gpa
= UNMAPPED_GVA
;
1029 #ifndef CONFIG_X86_64
1030 /* A 64-bit GVA should be impossible on 32-bit KVM. */
1031 WARN_ON_ONCE(vaddr
>> 32);
1034 r
= FNAME(walk_addr_nested
)(&walker
, vcpu
, vaddr
, access
);
1037 gpa
= gfn_to_gpa(walker
.gfn
);
1038 gpa
|= vaddr
& ~PAGE_MASK
;
1039 } else if (exception
)
1040 *exception
= walker
.fault
;
1047 * Using the cached information from sp->gfns is safe because:
1048 * - The spte has a reference to the struct page, so the pfn for a given gfn
1049 * can't change unless all sptes pointing to it are nuked first.
1052 * We should flush all tlbs if spte is dropped even though guest is
1053 * responsible for it. Since if we don't, kvm_mmu_notifier_invalidate_page
1054 * and kvm_mmu_notifier_invalidate_range_start detect the mapping page isn't
1055 * used by guest then tlbs are not flushed, so guest is allowed to access the
1057 * And we increase kvm->tlbs_dirty to delay tlbs flush in this case.
1059 static int FNAME(sync_page
)(struct kvm_vcpu
*vcpu
, struct kvm_mmu_page
*sp
)
1061 union kvm_mmu_page_role mmu_role
= vcpu
->arch
.mmu
->mmu_role
.base
;
1062 int i
, nr_present
= 0;
1064 gpa_t first_pte_gpa
;
1065 int set_spte_ret
= 0;
1068 * Ignore various flags when verifying that it's safe to sync a shadow
1069 * page using the current MMU context.
1071 * - level: not part of the overall MMU role and will never match as the MMU's
1072 * level tracks the root level
1073 * - access: updated based on the new guest PTE
1074 * - quadrant: not part of the overall MMU role (similar to level)
1076 const union kvm_mmu_page_role sync_role_ign
= {
1083 * Direct pages can never be unsync, and KVM should never attempt to
1084 * sync a shadow page for a different MMU context, e.g. if the role
1085 * differs then the memslot lookup (SMM vs. non-SMM) will be bogus, the
1086 * reserved bits checks will be wrong, etc...
1088 if (WARN_ON_ONCE(sp
->role
.direct
||
1089 (sp
->role
.word
^ mmu_role
.word
) & ~sync_role_ign
.word
))
1092 first_pte_gpa
= FNAME(get_level1_sp_gpa
)(sp
);
1094 for (i
= 0; i
< PT64_ENT_PER_PAGE
; i
++) {
1095 unsigned pte_access
;
1103 pte_gpa
= first_pte_gpa
+ i
* sizeof(pt_element_t
);
1105 if (kvm_vcpu_read_guest_atomic(vcpu
, pte_gpa
, &gpte
,
1106 sizeof(pt_element_t
)))
1109 if (FNAME(prefetch_invalid_gpte
)(vcpu
, sp
, &sp
->spt
[i
], gpte
)) {
1111 * Update spte before increasing tlbs_dirty to make
1112 * sure no tlb flush is lost after spte is zapped; see
1113 * the comments in kvm_flush_remote_tlbs().
1116 vcpu
->kvm
->tlbs_dirty
++;
1120 gfn
= gpte_to_gfn(gpte
);
1121 pte_access
= sp
->role
.access
;
1122 pte_access
&= FNAME(gpte_access
)(gpte
);
1123 FNAME(protect_clean_gpte
)(vcpu
->arch
.mmu
, &pte_access
, gpte
);
1125 if (sync_mmio_spte(vcpu
, &sp
->spt
[i
], gfn
, pte_access
,
1129 if (gfn
!= sp
->gfns
[i
]) {
1130 drop_spte(vcpu
->kvm
, &sp
->spt
[i
]);
1132 * The same as above where we are doing
1133 * prefetch_invalid_gpte().
1136 vcpu
->kvm
->tlbs_dirty
++;
1142 host_writable
= sp
->spt
[i
] & shadow_host_writable_mask
;
1144 set_spte_ret
|= set_spte(vcpu
, &sp
->spt
[i
],
1145 pte_access
, PG_LEVEL_4K
,
1146 gfn
, spte_to_pfn(sp
->spt
[i
]),
1147 true, false, host_writable
);
1150 if (set_spte_ret
& SET_SPTE_NEED_REMOTE_TLB_FLUSH
)
1151 kvm_flush_remote_tlbs(vcpu
->kvm
);
1159 #undef PT_BASE_ADDR_MASK
1161 #undef PT_LVL_ADDR_MASK
1162 #undef PT_LVL_OFFSET_MASK
1163 #undef PT_LEVEL_BITS
1164 #undef PT_MAX_FULL_LEVELS
1166 #undef gpte_to_gfn_lvl
1168 #undef PT_GUEST_ACCESSED_MASK
1169 #undef PT_GUEST_DIRTY_MASK
1170 #undef PT_GUEST_DIRTY_SHIFT
1171 #undef PT_GUEST_ACCESSED_SHIFT
1172 #undef PT_HAVE_ACCESSED_DIRTY