]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
KVM: x86: Use gpa_t for cr2/gpa to fix TDP support on 32-bit KVM
authorSean Christopherson <sean.j.christopherson@intel.com>
Fri, 6 Dec 2019 23:57:14 +0000 (15:57 -0800)
committerKhalid Elmously <khalid.elmously@canonical.com>
Fri, 13 Mar 2020 04:31:00 +0000 (00:31 -0400)
BugLink: https://bugs.launchpad.net/bugs/1866678
[ Upstream commit 736c291c9f36b07f8889c61764c28edce20e715d ]

Convert a plethora of parameters and variables in the MMU and page fault
flows from type gva_t to gpa_t to properly handle TDP on 32-bit KVM.

Thanks to PSE and PAE paging, 32-bit kernels can access 64-bit physical
addresses.  When TDP is enabled, the fault address is a guest physical
address and thus can be a 64-bit value, even when both KVM and its guest
are using 32-bit virtual addressing, e.g. VMX's VMCS.GUEST_PHYSICAL is a
64-bit field, not a natural width field.

Using a gva_t for the fault address means KVM will incorrectly drop the
upper 32-bits of the GPA.  Ditto for gva_to_gpa() when it is used to
translate L2 GPAs to L1 GPAs.

Opportunistically rename variables and parameters to better reflect the
dual address modes, e.g. use "cr2_or_gpa" for fault addresses and plain
"addr" instead of "vaddr" when the address may be either a GVA or an L2
GPA.  Similarly, use "gpa" in the nonpaging_page_fault() flows to avoid
a confusing "gpa_t gva" declaration; this also sets the stage for a
future patch to combing nonpaging_page_fault() and tdp_page_fault() with
minimal churn.

Sprinkle in a few comments to document flows where an address is known
to be a GVA and thus can be safely truncated to a 32-bit value.  Add
WARNs in kvm_handle_page_fault() and FNAME(gva_to_gpa_nested)() to help
document such cases and detect bugs.

Cc: stable@vger.kernel.org
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
Signed-off-by: Kamal Mostafa <kamal@canonical.com>
Signed-off-by: Khalid Elmously <khalid.elmously@canonical.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/mmu.c
arch/x86/kvm/mmutrace.h
arch/x86/kvm/paging_tmpl.h
arch/x86/kvm/x86.c
include/linux/kvm_host.h
virt/kvm/async_pf.c

index b616408edef5e39002e0940ec6bb78e218532a12..ea661b8cbfa82bfbfd7cd005e2f9ff504e09f238 100644 (file)
@@ -336,12 +336,12 @@ struct kvm_mmu {
        void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root);
        unsigned long (*get_cr3)(struct kvm_vcpu *vcpu);
        u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index);
-       int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err,
+       int (*page_fault)(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u32 err,
                          bool prefault);
        void (*inject_page_fault)(struct kvm_vcpu *vcpu,
                                  struct x86_exception *fault);
-       gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
-                           struct x86_exception *exception);
+       gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gpa_t gva_or_gpa,
+                           u32 access, struct x86_exception *exception);
        gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
                               struct x86_exception *exception);
        int (*sync_page)(struct kvm_vcpu *vcpu,
@@ -1188,7 +1188,7 @@ enum emulation_result {
 #define EMULTYPE_SKIP              (1 << 2)
 #define EMULTYPE_RETRY             (1 << 3)
 #define EMULTYPE_NO_REEXECUTE      (1 << 4)
-int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,
+int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
                            int emulation_type, void *insn, int insn_len);
 
 static inline int emulate_instruction(struct kvm_vcpu *vcpu,
@@ -1293,7 +1293,7 @@ void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu);
 
 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
 
-int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u64 error_code,
+int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
                       void *insn, int insn_len);
 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
 void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu);
index c000e35feccef79a786380e825a5fb5d61f28a94..e0aebdf3901e40d7f09ff64daf157b328a3f1f82 100644 (file)
@@ -3318,7 +3318,7 @@ static bool is_access_allowed(u32 fault_err_code, u64 spte)
  * - true: let the vcpu to access on the same address again.
  * - false: let the real page fault path to fix it.
  */
-static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
+static bool fast_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, int level,
                            u32 error_code)
 {
        struct kvm_shadow_walk_iterator iterator;
@@ -3338,7 +3338,7 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
        do {
                u64 new_spte;
 
-               for_each_shadow_entry_lockless(vcpu, gva, iterator, spte)
+               for_each_shadow_entry_lockless(vcpu, cr2_or_gpa, iterator, spte)
                        if (!is_shadow_present_pte(spte) ||
                            iterator.level < level)
                                break;
@@ -3416,7 +3416,7 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
 
        } while (true);
 
-       trace_fast_page_fault(vcpu, gva, error_code, iterator.sptep,
+       trace_fast_page_fault(vcpu, cr2_or_gpa, error_code, iterator.sptep,
                              spte, fault_handled);
        walk_shadow_page_lockless_end(vcpu);
 
@@ -3424,10 +3424,11 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
 }
 
 static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
-                        gva_t gva, kvm_pfn_t *pfn, bool write, bool *writable);
+                        gpa_t cr2_or_gpa, kvm_pfn_t *pfn, bool write,
+                        bool *writable);
 static int make_mmu_pages_available(struct kvm_vcpu *vcpu);
 
-static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
+static int nonpaging_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
                         gfn_t gfn, bool prefault)
 {
        int r;
@@ -3453,16 +3454,16 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
                gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
        }
 
-       if (fast_page_fault(vcpu, v, level, error_code))
+       if (fast_page_fault(vcpu, gpa, level, error_code))
                return RET_PF_RETRY;
 
        mmu_seq = vcpu->kvm->mmu_notifier_seq;
        smp_rmb();
 
-       if (try_async_pf(vcpu, prefault, gfn, v, &pfn, write, &map_writable))
+       if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable))
                return RET_PF_RETRY;
 
-       if (handle_abnormal_pfn(vcpu, v, gfn, pfn, ACC_ALL, &r))
+       if (handle_abnormal_pfn(vcpu, gpa, gfn, pfn, ACC_ALL, &r))
                return r;
 
        r = RET_PF_RETRY;
@@ -3473,7 +3474,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
                goto out_unlock;
        if (likely(!force_pt_level))
                transparent_hugepage_adjust(vcpu, gfn, &pfn, &level);
-       r = __direct_map(vcpu, v, write, map_writable, level, pfn,
+       r = __direct_map(vcpu, gpa, write, map_writable, level, pfn,
                         prefault, false);
 out_unlock:
        spin_unlock(&vcpu->kvm->mmu_lock);
@@ -3728,7 +3729,7 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_sync_roots);
 
-static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr,
+static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gpa_t vaddr,
                                  u32 access, struct x86_exception *exception)
 {
        if (exception)
@@ -3736,7 +3737,7 @@ static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr,
        return vaddr;
 }
 
-static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gva_t vaddr,
+static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gpa_t vaddr,
                                         u32 access,
                                         struct x86_exception *exception)
 {
@@ -3897,13 +3898,14 @@ static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
        walk_shadow_page_lockless_end(vcpu);
 }
 
-static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
+static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa,
                                u32 error_code, bool prefault)
 {
-       gfn_t gfn = gva >> PAGE_SHIFT;
+       gfn_t gfn = gpa >> PAGE_SHIFT;
        int r;
 
-       pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
+       /* Note, paging is disabled, ergo gva == gpa. */
+       pgprintk("%s: gva %lx error %x\n", __func__, gpa, error_code);
 
        if (page_fault_handle_page_track(vcpu, error_code, gfn))
                return RET_PF_EMULATE;
@@ -3915,11 +3917,12 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
        MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
 
 
-       return nonpaging_map(vcpu, gva & PAGE_MASK,
+       return nonpaging_map(vcpu, gpa & PAGE_MASK,
                             error_code, gfn, prefault);
 }
 
-static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
+static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
+                                  gfn_t gfn)
 {
        struct kvm_arch_async_pf arch;
 
@@ -3928,7 +3931,8 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
        arch.direct_map = vcpu->arch.mmu.direct_map;
        arch.cr3 = vcpu->arch.mmu.get_cr3(vcpu);
 
-       return kvm_setup_async_pf(vcpu, gva, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
+       return kvm_setup_async_pf(vcpu, cr2_or_gpa,
+                                 kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
 }
 
 bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu)
@@ -3945,7 +3949,8 @@ bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu)
 }
 
 static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
-                        gva_t gva, kvm_pfn_t *pfn, bool write, bool *writable)
+                        gpa_t cr2_or_gpa, kvm_pfn_t *pfn, bool write,
+                        bool *writable)
 {
        struct kvm_memory_slot *slot;
        bool async;
@@ -3957,12 +3962,12 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
                return false; /* *pfn has correct page already */
 
        if (!prefault && kvm_can_do_async_pf(vcpu)) {
-               trace_kvm_try_async_get_page(gva, gfn);
+               trace_kvm_try_async_get_page(cr2_or_gpa, gfn);
                if (kvm_find_async_pf_gfn(vcpu, gfn)) {
-                       trace_kvm_async_pf_doublefault(gva, gfn);
+                       trace_kvm_async_pf_doublefault(cr2_or_gpa, gfn);
                        kvm_make_request(KVM_REQ_APF_HALT, vcpu);
                        return true;
-               } else if (kvm_arch_setup_async_pf(vcpu, gva, gfn))
+               } else if (kvm_arch_setup_async_pf(vcpu, cr2_or_gpa, gfn))
                        return true;
        }
 
@@ -3975,6 +3980,12 @@ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
 {
        int r = 1;
 
+#ifndef CONFIG_X86_64
+       /* A 64-bit CR2 should be impossible on 32-bit KVM. */
+       if (WARN_ON_ONCE(fault_address >> 32))
+               return -EFAULT;
+#endif
+
        vcpu->arch.l1tf_flush_l1d = true;
        switch (vcpu->arch.apf.host_apf_reason) {
        default:
@@ -4012,7 +4023,7 @@ check_hugepage_cache_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, int level)
        return kvm_mtrr_check_gfn_range_consistency(vcpu, gfn, page_num);
 }
 
-static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
+static int tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
                          bool prefault)
 {
        kvm_pfn_t pfn;
@@ -5048,7 +5059,7 @@ static int make_mmu_pages_available(struct kvm_vcpu *vcpu)
        return 0;
 }
 
-int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
+int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
                       void *insn, int insn_len)
 {
        int r, emulation_type = EMULTYPE_RETRY;
@@ -5058,12 +5069,12 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
        /* With shadow page tables, fault_address contains a GVA or nGPA.  */
        if (vcpu->arch.mmu.direct_map) {
                vcpu->arch.gpa_available = true;
-               vcpu->arch.gpa_val = cr2;
+               vcpu->arch.gpa_val = cr2_or_gpa;
        }
 
        r = RET_PF_INVALID;
        if (unlikely(error_code & PFERR_RSVD_MASK)) {
-               r = handle_mmio_page_fault(vcpu, cr2, direct);
+               r = handle_mmio_page_fault(vcpu, cr2_or_gpa, direct);
                if (r == RET_PF_EMULATE) {
                        emulation_type = 0;
                        goto emulate;
@@ -5071,8 +5082,9 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
        }
 
        if (r == RET_PF_INVALID) {
-               r = vcpu->arch.mmu.page_fault(vcpu, cr2, lower_32_bits(error_code),
-                                             false);
+               r = vcpu->arch.mmu.page_fault(vcpu, cr2_or_gpa,
+                                              lower_32_bits(error_code),
+                                              false);
                WARN_ON(r == RET_PF_INVALID);
        }
 
@@ -5090,11 +5102,11 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
         */
        if (vcpu->arch.mmu.direct_map &&
            (error_code & PFERR_NESTED_GUEST_PAGE) == PFERR_NESTED_GUEST_PAGE) {
-               kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2));
+               kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2_or_gpa));
                return 1;
        }
 
-       if (mmio_info_in_cache(vcpu, cr2, direct))
+       if (mmio_info_in_cache(vcpu, cr2_or_gpa, direct))
                emulation_type = 0;
 emulate:
        /*
@@ -5107,7 +5119,7 @@ emulate:
        if (unlikely(insn && !insn_len))
                return 1;
 
-       er = x86_emulate_instruction(vcpu, cr2, emulation_type, insn, insn_len);
+       er = x86_emulate_instruction(vcpu, cr2_or_gpa, emulation_type, insn, insn_len);
 
        switch (er) {
        case EMULATE_DONE:
index 918b0d5bf2724c66953addcc6b0bfd5b776c6c45..cb41b036eb2646c98c806672b71b15a834fca5be 100644 (file)
@@ -249,13 +249,13 @@ TRACE_EVENT(
 
 TRACE_EVENT(
        fast_page_fault,
-       TP_PROTO(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code,
+       TP_PROTO(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u32 error_code,
                 u64 *sptep, u64 old_spte, bool retry),
-       TP_ARGS(vcpu, gva, error_code, sptep, old_spte, retry),
+       TP_ARGS(vcpu, cr2_or_gpa, error_code, sptep, old_spte, retry),
 
        TP_STRUCT__entry(
                __field(int, vcpu_id)
-               __field(gva_t, gva)
+               __field(gpa_t, cr2_or_gpa)
                __field(u32, error_code)
                __field(u64 *, sptep)
                __field(u64, old_spte)
@@ -265,7 +265,7 @@ TRACE_EVENT(
 
        TP_fast_assign(
                __entry->vcpu_id = vcpu->vcpu_id;
-               __entry->gva = gva;
+               __entry->cr2_or_gpa = cr2_or_gpa;
                __entry->error_code = error_code;
                __entry->sptep = sptep;
                __entry->old_spte = old_spte;
@@ -273,9 +273,9 @@ TRACE_EVENT(
                __entry->retry = retry;
        ),
 
-       TP_printk("vcpu %d gva %lx error_code %s sptep %p old %#llx"
+       TP_printk("vcpu %d gva %llx error_code %s sptep %p old %#llx"
                  " new %llx spurious %d fixed %d", __entry->vcpu_id,
-                 __entry->gva, __print_flags(__entry->error_code, "|",
+                 __entry->cr2_or_gpa, __print_flags(__entry->error_code, "|",
                  kvm_mmu_trace_pferr_flags), __entry->sptep,
                  __entry->old_spte, __entry->new_spte,
                  __spte_satisfied(old_spte), __spte_satisfied(new_spte)
index a0c176d55665155b1db36fba8df6f05869bc3856..152c2978a2563be65395e16015b0c0edfd970e26 100644 (file)
@@ -273,11 +273,11 @@ static inline unsigned FNAME(gpte_pkeys)(struct kvm_vcpu *vcpu, u64 gpte)
 }
 
 /*
- * Fetch a guest pte for a guest virtual address
+ * Fetch a guest pte for a guest virtual address, or for an L2's GPA.
  */
 static int FNAME(walk_addr_generic)(struct guest_walker *walker,
                                    struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
-                                   gva_t addr, u32 access)
+                                   gpa_t addr, u32 access)
 {
        int ret;
        pt_element_t pte;
@@ -478,7 +478,7 @@ error:
 }
 
 static int FNAME(walk_addr)(struct guest_walker *walker,
-                           struct kvm_vcpu *vcpu, gva_t addr, u32 access)
+                           struct kvm_vcpu *vcpu, gpa_t addr, u32 access)
 {
        return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.mmu, addr,
                                        access);
@@ -593,7 +593,7 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
  * If the guest tries to write a write-protected page, we need to
  * emulate this operation, return 1 to indicate this case.
  */
-static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
+static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr,
                         struct guest_walker *gw,
                         int write_fault, int hlevel,
                         kvm_pfn_t pfn, bool map_writable, bool prefault,
@@ -747,7 +747,7 @@ FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu,
  *  Returns: 1 if we need to emulate the instruction, 0 otherwise, or
  *           a negative value on error.
  */
-static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
+static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
                             bool prefault)
 {
        int write_fault = error_code & PFERR_WRITE_MASK;
@@ -926,18 +926,19 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
        spin_unlock(&vcpu->kvm->mmu_lock);
 }
 
-static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
+/* Note, @addr is a GPA when gva_to_gpa() translates an L2 GPA to an L1 GPA. */
+static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gpa_t addr, u32 access,
                               struct x86_exception *exception)
 {
        struct guest_walker walker;
        gpa_t gpa = UNMAPPED_GVA;
        int r;
 
-       r = FNAME(walk_addr)(&walker, vcpu, vaddr, access);
+       r = FNAME(walk_addr)(&walker, vcpu, addr, access);
 
        if (r) {
                gpa = gfn_to_gpa(walker.gfn);
-               gpa |= vaddr & ~PAGE_MASK;
+               gpa |= addr & ~PAGE_MASK;
        } else if (exception)
                *exception = walker.fault;
 
@@ -945,7 +946,8 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
 }
 
 #if PTTYPE != PTTYPE_EPT
-static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr,
+/* Note, gva_to_gpa_nested() is only used to translate L2 GVAs. */
+static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gpa_t vaddr,
                                      u32 access,
                                      struct x86_exception *exception)
 {
@@ -953,6 +955,11 @@ static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr,
        gpa_t gpa = UNMAPPED_GVA;
        int r;
 
+#ifndef CONFIG_X86_64
+       /* A 64-bit GVA should be impossible on 32-bit KVM. */
+       WARN_ON_ONCE(vaddr >> 32);
+#endif
+
        r = FNAME(walk_addr_nested)(&walker, vcpu, vaddr, access);
 
        if (r) {
index 355b302e9ff7bd4744ff575eb34926e4fbbfdda1..63edcfe5b5a4705b5e750b8dee6d207ebcf00680 100644 (file)
@@ -5695,11 +5695,11 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu)
        return r;
 }
 
-static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2,
+static bool reexecute_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
                                  bool write_fault_to_shadow_pgtable,
                                  int emulation_type)
 {
-       gpa_t gpa = cr2;
+       gpa_t gpa = cr2_or_gpa;
        kvm_pfn_t pfn;
 
        if (emulation_type & EMULTYPE_NO_REEXECUTE)
@@ -5710,7 +5710,7 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2,
                 * Write permission should be allowed since only
                 * write access need to be emulated.
                 */
-               gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);
+               gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL);
 
                /*
                 * If the mapping is invalid in guest, let cpu retry
@@ -5767,10 +5767,10 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2,
 }
 
 static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
-                             unsigned long cr2,  int emulation_type)
+                             gpa_t cr2_or_gpa,  int emulation_type)
 {
        struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
-       unsigned long last_retry_eip, last_retry_addr, gpa = cr2;
+       unsigned long last_retry_eip, last_retry_addr, gpa = cr2_or_gpa;
 
        last_retry_eip = vcpu->arch.last_retry_eip;
        last_retry_addr = vcpu->arch.last_retry_addr;
@@ -5796,14 +5796,14 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
        if (x86_page_table_writing_insn(ctxt))
                return false;
 
-       if (ctxt->eip == last_retry_eip && last_retry_addr == cr2)
+       if (ctxt->eip == last_retry_eip && last_retry_addr == cr2_or_gpa)
                return false;
 
        vcpu->arch.last_retry_eip = ctxt->eip;
-       vcpu->arch.last_retry_addr = cr2;
+       vcpu->arch.last_retry_addr = cr2_or_gpa;
 
        if (!vcpu->arch.mmu.direct_map)
-               gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);
+               gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL);
 
        kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
 
@@ -5933,11 +5933,8 @@ static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r)
        return false;
 }
 
-int x86_emulate_instruction(struct kvm_vcpu *vcpu,
-                           unsigned long cr2,
-                           int emulation_type,
-                           void *insn,
-                           int insn_len)
+int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
+                           int emulation_type, void *insn, int insn_len)
 {
        int r;
        struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
@@ -5980,7 +5977,7 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
                if (r != EMULATION_OK)  {
                        if (emulation_type & EMULTYPE_TRAP_UD)
                                return EMULATE_FAIL;
-                       if (reexecute_instruction(vcpu, cr2, write_fault_to_spt,
+                       if (reexecute_instruction(vcpu, cr2_or_gpa, write_fault_to_spt,
                                                emulation_type))
                                return EMULATE_DONE;
                        if (ctxt->have_exception) {
@@ -6006,7 +6003,7 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
                return EMULATE_DONE;
        }
 
-       if (retry_instruction(ctxt, cr2, emulation_type))
+       if (retry_instruction(ctxt, cr2_or_gpa, emulation_type))
                return EMULATE_DONE;
 
        /* this is needed for vmware backdoor interface to work since it
@@ -6018,7 +6015,7 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
 
 restart:
        /* Save the faulting GPA (cr2) in the address field */
-       ctxt->exception.address = cr2;
+       ctxt->exception.address = cr2_or_gpa;
 
        r = x86_emulate_insn(ctxt);
 
@@ -6026,7 +6023,7 @@ restart:
                return EMULATE_DONE;
 
        if (r == EMULATION_FAILED) {
-               if (reexecute_instruction(vcpu, cr2, write_fault_to_spt,
+               if (reexecute_instruction(vcpu, cr2_or_gpa, write_fault_to_spt,
                                        emulation_type))
                        return EMULATE_DONE;
 
@@ -8946,7 +8943,7 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
              work->arch.cr3 != vcpu->arch.mmu.get_cr3(vcpu))
                return;
 
-       vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true);
+       vcpu->arch.mmu.page_fault(vcpu, work->cr2_or_gpa, 0, true);
 }
 
 static inline u32 kvm_async_pf_hash_fn(gfn_t gfn)
@@ -9029,7 +9026,7 @@ void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
 {
        struct x86_exception fault;
 
-       trace_kvm_async_pf_not_present(work->arch.token, work->gva);
+       trace_kvm_async_pf_not_present(work->arch.token, work->cr2_or_gpa);
        kvm_add_async_pf_gfn(vcpu, work->arch.gfn);
 
        if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) ||
@@ -9057,7 +9054,7 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
                work->arch.token = ~0; /* broadcast wakeup */
        else
                kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
-       trace_kvm_async_pf_ready(work->arch.token, work->gva);
+       trace_kvm_async_pf_ready(work->arch.token, work->cr2_or_gpa);
 
        if (vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED &&
            !apf_get_user(vcpu, &val)) {
index 7d948827cc8e7eea15bfc5e94ef479a6517b38ab..254fcf4afc4d8de7032fda416f0e465db2ae0039 100644 (file)
@@ -185,7 +185,7 @@ struct kvm_async_pf {
        struct list_head queue;
        struct kvm_vcpu *vcpu;
        struct mm_struct *mm;
-       gva_t gva;
+       gpa_t cr2_or_gpa;
        unsigned long addr;
        struct kvm_arch_async_pf arch;
        bool   wakeup_all;
@@ -193,8 +193,8 @@ struct kvm_async_pf {
 
 void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
 void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
-int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
-                      struct kvm_arch_async_pf *arch);
+int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
+                      unsigned long hva, struct kvm_arch_async_pf *arch);
 int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
 #endif
 
index 57bcb27dcf30f61e14361c675617f08a0e995600..c03c8c313b708c7dbbb8066472370a8305562c26 100644 (file)
@@ -76,7 +76,7 @@ static void async_pf_execute(struct work_struct *work)
        struct mm_struct *mm = apf->mm;
        struct kvm_vcpu *vcpu = apf->vcpu;
        unsigned long addr = apf->addr;
-       gva_t gva = apf->gva;
+       gpa_t cr2_or_gpa = apf->cr2_or_gpa;
        int locked = 1;
 
        might_sleep();
@@ -104,7 +104,7 @@ static void async_pf_execute(struct work_struct *work)
         * this point
         */
 
-       trace_kvm_async_pf_completed(addr, gva);
+       trace_kvm_async_pf_completed(addr, cr2_or_gpa);
 
        if (swq_has_sleeper(&vcpu->wq))
                swake_up(&vcpu->wq);
@@ -177,8 +177,8 @@ void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
        }
 }
 
-int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
-                      struct kvm_arch_async_pf *arch)
+int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
+                      unsigned long hva, struct kvm_arch_async_pf *arch)
 {
        struct kvm_async_pf *work;
 
@@ -197,7 +197,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
 
        work->wakeup_all = false;
        work->vcpu = vcpu;
-       work->gva = gva;
+       work->cr2_or_gpa = cr2_or_gpa;
        work->addr = hva;
        work->arch = *arch;
        work->mm = current->mm;