]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - arch/x86/kvm/vmx.c
KVM: Add VT-x machine check support
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / kvm / vmx.c
index c379a3472fa9b2e2d7845607fc86ba614c336dbb..32d6ae8fb60e339eba6b85968676776d44293c8b 100644 (file)
@@ -32,6 +32,7 @@
 #include <asm/desc.h>
 #include <asm/vmx.h>
 #include <asm/virtext.h>
+#include <asm/mce.h>
 
 #define __ex(x) __kvm_handle_fault_on_reboot(x)
 
@@ -97,6 +98,7 @@ struct vcpu_vmx {
        int soft_vnmi_blocked;
        ktime_t entry_time;
        s64 vnmi_blocked_time;
+       u32 exit_reason;
 };
 
 static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
@@ -214,6 +216,13 @@ static inline int is_external_interrupt(u32 intr_info)
                == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
 }
 
+static inline int is_machine_check(u32 intr_info)
+{
+       return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
+                            INTR_INFO_VALID_MASK)) ==
+               (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
+}
+
 static inline int cpu_has_vmx_msr_bitmap(void)
 {
        return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS;
@@ -485,7 +494,7 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
 {
        u32 eb;
 
-       eb = (1u << PF_VECTOR) | (1u << UD_VECTOR);
+       eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR);
        if (!vcpu->fpu_active)
                eb |= 1u << NM_VECTOR;
        if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
@@ -2582,6 +2591,31 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu,
        return 0;
 }
 
+/*
+ * Trigger machine check on the host. We assume all the MSRs are already set up
+ * by the CPU and that we still run on the same CPU as the MCE occurred on.
+ * We pass a fake environment to the machine check handler because we want
+ * the guest to be always treated like user space, no matter what context
+ * it used internally.
+ */
+static void kvm_machine_check(void)
+{
+#if defined(CONFIG_X86_MCE) && defined(CONFIG_X86_64)
+       struct pt_regs regs = {
+               .cs = 3, /* Fake ring 3 no matter what the guest ran on */
+               .flags = X86_EFLAGS_IF,
+       };
+
+       do_machine_check(&regs, 0);
+#endif
+}
+
+static int handle_machine_check(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+{
+       /* already handled by vcpu_run */
+       return 1;
+}
+
 static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -2593,6 +2627,9 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        vect_info = vmx->idt_vectoring_info;
        intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
 
+       if (is_machine_check(intr_info))
+               return handle_machine_check(vcpu, kvm_run);
+
        if ((vect_info & VECTORING_INFO_VALID_MASK) &&
                                                !is_page_fault(intr_info))
                printk(KERN_ERR "%s: unexpected, vectoring info 0x%x "
@@ -3166,6 +3203,7 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu,
        [EXIT_REASON_WBINVD]                  = handle_wbinvd,
        [EXIT_REASON_TASK_SWITCH]             = handle_task_switch,
        [EXIT_REASON_EPT_VIOLATION]           = handle_ept_violation,
+       [EXIT_REASON_MCE_DURING_VMENTRY]      = handle_machine_check,
 };
 
 static const int kvm_vmx_max_exit_handlers =
@@ -3177,8 +3215,8 @@ static const int kvm_vmx_max_exit_handlers =
  */
 static int vmx_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 {
-       u32 exit_reason = vmcs_read32(VM_EXIT_REASON);
        struct vcpu_vmx *vmx = to_vmx(vcpu);
+       u32 exit_reason = vmx->exit_reason;
        u32 vectoring_info = vmx->idt_vectoring_info;
 
        KVMTRACE_3D(VMEXIT, vcpu, exit_reason, (u32)kvm_rip_read(vcpu),
@@ -3263,6 +3301,14 @@ static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
 
        exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
 
+       vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
+
+       /* Handle machine checks before interrupts are enabled */
+       if ((vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY)
+           || (vmx->exit_reason == EXIT_REASON_EXCEPTION_NMI
+               && is_machine_check(exit_intr_info)))
+               kvm_machine_check();
+
        /* We need to handle NMIs before interrupts are enabled */
        if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR &&
            (exit_intr_info & INTR_INFO_VALID_MASK)) {