]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
KVM: nSVM: Check that MBZ bits in CR3 and CR4 are not set on vmrun of nested guests
authorKrish Sadhukhan <krish.sadhukhan@oracle.com>
Wed, 8 Jul 2020 00:39:56 +0000 (00:39 +0000)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 8 Jul 2020 20:21:59 +0000 (16:21 -0400)
According to section "Canonicalization and Consistency Checks" in APM vol. 2
the following guest state is illegal:

    "Any MBZ bit of CR3 is set."
    "Any MBZ bit of CR4 is set."

Suggeted-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>
Message-Id: <1594168797-29444-3-git-send-email-krish.sadhukhan@oracle.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/svm/nested.c
arch/x86/kvm/svm/svm.h
arch/x86/kvm/x86.c
arch/x86/kvm/x86.h

index 385461496cf518dd55950ddd90243ebbe504e8b0..402ea5b412f0ea0e8abbd6a74126225bf1a20b64 100644 (file)
@@ -222,8 +222,9 @@ static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
        return true;
 }
 
-static bool nested_vmcb_checks(struct vmcb *vmcb)
+static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb)
 {
+       bool nested_vmcb_lma;
        if ((vmcb->save.efer & EFER_SVME) == 0)
                return false;
 
@@ -234,6 +235,27 @@ static bool nested_vmcb_checks(struct vmcb *vmcb)
        if (!kvm_dr6_valid(vmcb->save.dr6) || !kvm_dr7_valid(vmcb->save.dr7))
                return false;
 
+       nested_vmcb_lma =
+               (vmcb->save.efer & EFER_LME) &&
+               (vmcb->save.cr0 & X86_CR0_PG);
+
+       if (!nested_vmcb_lma) {
+               if (vmcb->save.cr4 & X86_CR4_PAE) {
+                       if (vmcb->save.cr3 & MSR_CR3_LEGACY_PAE_RESERVED_MASK)
+                               return false;
+               } else {
+                       if (vmcb->save.cr3 & MSR_CR3_LEGACY_RESERVED_MASK)
+                               return false;
+               }
+       } else {
+               if (!(vmcb->save.cr4 & X86_CR4_PAE) ||
+                   !(vmcb->save.cr0 & X86_CR0_PE) ||
+                   (vmcb->save.cr3 & MSR_CR3_LONG_RESERVED_MASK))
+                       return false;
+       }
+       if (kvm_valid_cr4(&svm->vcpu, vmcb->save.cr4))
+               return false;
+
        return nested_vmcb_check_controls(&vmcb->control);
 }
 
@@ -419,7 +441,7 @@ int nested_svm_vmrun(struct vcpu_svm *svm)
 
        nested_vmcb = map.hva;
 
-       if (!nested_vmcb_checks(nested_vmcb)) {
+       if (!nested_vmcb_checks(svm, nested_vmcb)) {
                nested_vmcb->control.exit_code    = SVM_EXIT_ERR;
                nested_vmcb->control.exit_code_hi = 0;
                nested_vmcb->control.exit_info_1  = 0;
index 71b1dda947e6619e32876a4a6a5c24ba5ba67519..121b198b51e9b9a5d300095489fd21610e559550 100644 (file)
@@ -343,7 +343,10 @@ static inline bool gif_set(struct vcpu_svm *svm)
 }
 
 /* svm.c */
-#define MSR_INVALID                    0xffffffffU
+#define MSR_CR3_LEGACY_RESERVED_MASK           0xfe7U
+#define MSR_CR3_LEGACY_PAE_RESERVED_MASK       0x7U
+#define MSR_CR3_LONG_RESERVED_MASK             0xfff0000000000fe7U
+#define MSR_INVALID                            0xffffffffU
 
 u32 svm_msrpm_offset(u32 msr);
 void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer);
index 549b3f7228ac6cf7105883ebbd7ed388e7354372..475456a14d76f3f4eacf1153bde2bb9ec6357653 100644 (file)
@@ -955,7 +955,7 @@ int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
 }
 EXPORT_SYMBOL_GPL(kvm_set_xcr);
 
-static int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
+int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
 {
        if (cr4 & cr4_reserved_bits)
                return -EINVAL;
@@ -965,6 +965,7 @@ static int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(kvm_valid_cr4);
 
 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
 {
index 10441fbb4073223527afb7c9b6c91fb8e77f538d..224670d7c24547199342560878585f2b5acf5f5f 100644 (file)
@@ -369,6 +369,7 @@ static inline bool kvm_dr6_valid(u64 data)
 void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu);
 void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu);
 u64 kvm_spec_ctrl_valid_bits(struct kvm_vcpu *vcpu);
+int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
 bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu);
 
 #define  KVM_MSR_RET_INVALID  2