]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
KVM: MMU: Do not unconditionally read PDPTE from guest memory
authorAvi Kivity <avi@redhat.com>
Thu, 28 Jul 2011 08:36:17 +0000 (11:36 +0300)
committerAvi Kivity <avi@redhat.com>
Sun, 25 Sep 2011 16:18:01 +0000 (19:18 +0300)
Architecturally, PDPTEs are cached in the PDPTRs when CR3 is reloaded.
On SVM, it is not possible to implement this, but on VMX this is possible
and was indeed implemented until nested SVM changed this to unconditionally
read PDPTEs dynamically.  This has noticable impact when running PAE guests.

Fix by changing the MMU to read PDPTRs from the cache, falling back to
reading from memory for the nested MMU.

Signed-off-by: Avi Kivity <avi@redhat.com>
Tested-by: Joerg Roedel <joerg.roedel@amd.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/kvm_cache_regs.h
arch/x86/kvm/mmu.c
arch/x86/kvm/paging_tmpl.h
arch/x86/kvm/svm.c

index 307e3cfa28ad158f41e771198b3c15094c8fdabc..b31a3417a405839e0c511cd6ee274acc318bfad0 100644 (file)
@@ -265,6 +265,7 @@ struct kvm_mmu {
        void (*new_cr3)(struct kvm_vcpu *vcpu);
        void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root);
        unsigned long (*get_cr3)(struct kvm_vcpu *vcpu);
+       u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index);
        int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err,
                          bool prefault);
        void (*inject_page_fault)(struct kvm_vcpu *vcpu,
index 3377d53fcd369146c2994f0d34458274c1b1a526..544076c4f44bc330ba28b74b07ad3966e990168f 100644 (file)
@@ -45,13 +45,6 @@ static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
        return vcpu->arch.walk_mmu->pdptrs[index];
 }
 
-static inline u64 kvm_pdptr_read_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, int index)
-{
-       load_pdptrs(vcpu, mmu, mmu->get_cr3(vcpu));
-
-       return mmu->pdptrs[index];
-}
-
 static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
 {
        ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS;
index 8e8da7960dbeed8be684c770a1ba4cc0c7653e08..f1b36cf3e3d0aff4ee1d587deb044a9618d51869 100644 (file)
@@ -2770,7 +2770,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
 
                ASSERT(!VALID_PAGE(root));
                if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
-                       pdptr = kvm_pdptr_read_mmu(vcpu, &vcpu->arch.mmu, i);
+                       pdptr = vcpu->arch.mmu.get_pdptr(vcpu, i);
                        if (!is_present_gpte(pdptr)) {
                                vcpu->arch.mmu.pae_root[i] = 0;
                                continue;
@@ -3318,6 +3318,7 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
        context->direct_map = true;
        context->set_cr3 = kvm_x86_ops->set_tdp_cr3;
        context->get_cr3 = get_cr3;
+       context->get_pdptr = kvm_pdptr_read;
        context->inject_page_fault = kvm_inject_page_fault;
        context->nx = is_nx(vcpu);
 
@@ -3376,6 +3377,7 @@ static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
 
        vcpu->arch.walk_mmu->set_cr3           = kvm_x86_ops->set_cr3;
        vcpu->arch.walk_mmu->get_cr3           = get_cr3;
+       vcpu->arch.walk_mmu->get_pdptr         = kvm_pdptr_read;
        vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
 
        return r;
@@ -3386,6 +3388,7 @@ static int init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
        struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
 
        g_context->get_cr3           = get_cr3;
+       g_context->get_pdptr         = kvm_pdptr_read;
        g_context->inject_page_fault = kvm_inject_page_fault;
 
        /*
index 507e2b844cfa96be64ee518e922916479f66a6d7..f6dd9feb201b9a8409fc5a47bd50ed0c76f2707c 100644 (file)
@@ -163,7 +163,7 @@ retry_walk:
 
 #if PTTYPE == 64
        if (walker->level == PT32E_ROOT_LEVEL) {
-               pte = kvm_pdptr_read_mmu(vcpu, mmu, (addr >> 30) & 3);
+               pte = mmu->get_pdptr(vcpu, (addr >> 30) & 3);
                trace_kvm_mmu_paging_element(pte, walker->level);
                if (!is_present_gpte(pte))
                        goto error;
index 2b24a88f2c67f3cc1cf1360e2adda077428a5a90..f043168a5ab1a7504bd81908d3277dc996c56afe 100644 (file)
@@ -1844,6 +1844,20 @@ static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
        return svm->nested.nested_cr3;
 }
 
+static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
+{
+       struct vcpu_svm *svm = to_svm(vcpu);
+       u64 cr3 = svm->nested.nested_cr3;
+       u64 pdpte;
+       int ret;
+
+       ret = kvm_read_guest_page(vcpu->kvm, gpa_to_gfn(cr3), &pdpte,
+                                 offset_in_page(cr3) + index * 8, 8);
+       if (ret)
+               return 0;
+       return pdpte;
+}
+
 static void nested_svm_set_tdp_cr3(struct kvm_vcpu *vcpu,
                                   unsigned long root)
 {
@@ -1875,6 +1889,7 @@ static int nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
 
        vcpu->arch.mmu.set_cr3           = nested_svm_set_tdp_cr3;
        vcpu->arch.mmu.get_cr3           = nested_svm_get_tdp_cr3;
+       vcpu->arch.mmu.get_pdptr         = nested_svm_get_tdp_pdptr;
        vcpu->arch.mmu.inject_page_fault = nested_svm_inject_npf_exit;
        vcpu->arch.mmu.shadow_root_level = get_npt_level();
        vcpu->arch.walk_mmu              = &vcpu->arch.nested_mmu;