]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
KVM: VMX: Prevent RSB underflow before vmenter
authorJosh Poimboeuf <jpoimboe@kernel.org>
Tue, 14 Jun 2022 21:16:16 +0000 (23:16 +0200)
committerThadeu Lima de Souza Cascardo <cascardo@canonical.com>
Tue, 19 Jul 2022 19:20:06 +0000 (16:20 -0300)
commit 07853adc29a058c5fd143c14e5ac528448a72ed9 upstream.

On VMX, there are some balanced returns between the time the guest's
SPEC_CTRL value is written, and the vmenter.

Balanced returns (matched by a preceding call) are usually ok, but it's
at least theoretically possible an NMI with a deep call stack could
empty the RSB before one of the returns.

For maximum paranoia, don't allow *any* returns (balanced or otherwise)
between the SPEC_CTRL write and the vmenter.

  [ bp: Fix 32-bit build. ]

Signed-off-by: Josh Poimboeuf <jpoimboe@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
[cascardo: header conflict fixup at arch/x86/kernel/asm-offsets.c]
[cascardo: header conflict fixup at arch/x86/kvm/vmx/capabilities.h]
CVE-2022-29900
CVE-2022-29901
Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
arch/x86/kernel/asm-offsets.c
arch/x86/kernel/cpu/bugs.c
arch/x86/kvm/vmx/capabilities.h
arch/x86/kvm/vmx/vmenter.S
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/vmx/vmx.h
arch/x86/kvm/vmx/vmx_ops.h

index ecd3fd6993d1a5e4f257405681c70eef77532bb6..173e3295d3d7aae62c232b8f1b413002396224d4 100644 (file)
@@ -18,6 +18,7 @@
 #include <asm/bootparam.h>
 #include <asm/suspend.h>
 #include <asm/tlbflush.h>
+#include "../kvm/vmx/vmx.h"
 
 #ifdef CONFIG_XEN
 #include <xen/interface/xen.h>
@@ -93,4 +94,9 @@ static void __used common(void)
        OFFSET(TSS_sp0, tss_struct, x86_tss.sp0);
        OFFSET(TSS_sp1, tss_struct, x86_tss.sp1);
        OFFSET(TSS_sp2, tss_struct, x86_tss.sp2);
+
+       if (IS_ENABLED(CONFIG_KVM_INTEL)) {
+               BLANK();
+               OFFSET(VMX_spec_ctrl, vcpu_vmx, spec_ctrl);
+       }
 }
index ba520f065251abc6940098a0d04a2031efe89b5c..8bc493c72e05eb764972cacc24cf2ffc3053abf9 100644 (file)
@@ -196,8 +196,8 @@ void __init check_bugs(void)
 }
 
 /*
- * NOTE: For VMX, this function is not called in the vmexit path.
- * It uses vmx_spec_ctrl_restore_host() instead.
+ * NOTE: This function is *only* called for SVM.  VMX spec_ctrl handling is
+ * done in vmenter.S.
  */
 void
 x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
index 4705ad55abb56be652f2a19c54516c95b894bf72..d72c572df3fb18ef3f77ad9001c26e347a5c2fbb 100644 (file)
@@ -4,7 +4,7 @@
 
 #include <asm/vmx.h>
 
-#include "lapic.h"
+#include "../lapic.h"
 
 extern bool __read_mostly enable_vpid;
 extern bool __read_mostly flexpriority_enabled;
index 857fa0fc49fafd9d366d372494cb2a1f277513dd..8a37ca74d9cf2ec7e62d667748cc65e5e9ab639f 100644 (file)
@@ -1,9 +1,11 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 #include <linux/linkage.h>
 #include <asm/asm.h>
+#include <asm/asm-offsets.h>
 #include <asm/bitsperlong.h>
 #include <asm/kvm_vcpu_regs.h>
 #include <asm/nospec-branch.h>
+#include <asm/percpu.h>
 #include <asm/segment.h>
 #include "run_flags.h"
 
@@ -73,6 +75,33 @@ SYM_FUNC_START(__vmx_vcpu_run)
        lea (%_ASM_SP), %_ASM_ARG2
        call vmx_update_host_rsp
 
+       ALTERNATIVE "jmp .Lspec_ctrl_done", "", X86_FEATURE_MSR_SPEC_CTRL
+
+       /*
+        * SPEC_CTRL handling: if the guest's SPEC_CTRL value differs from the
+        * host's, write the MSR.
+        *
+        * IMPORTANT: To avoid RSB underflow attacks and any other nastiness,
+        * there must not be any returns or indirect branches between this code
+        * and vmentry.
+        */
+       mov 2*WORD_SIZE(%_ASM_SP), %_ASM_DI
+       movl VMX_spec_ctrl(%_ASM_DI), %edi
+       movl PER_CPU_VAR(x86_spec_ctrl_current), %esi
+       cmp %edi, %esi
+       je .Lspec_ctrl_done
+       mov $MSR_IA32_SPEC_CTRL, %ecx
+       xor %edx, %edx
+       mov %edi, %eax
+       wrmsr
+
+.Lspec_ctrl_done:
+
+       /*
+        * Since vmentry is serializing on affected CPUs, there's no need for
+        * an LFENCE to stop speculation from skipping the wrmsr.
+        */
+
        /* Load @regs to RAX. */
        mov (%_ASM_SP), %_ASM_AX
 
index f7497ca2c9dc7ec68b3246b233217670e2a75ec2..417176817d80c239ace6cee2d7a1de63dfbd9a41 100644 (file)
@@ -6821,14 +6821,6 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
 
        kvm_wait_lapic_expire(vcpu);
 
-       /*
-        * If this vCPU has touched SPEC_CTRL, restore the guest's value if
-        * it's non-zero. Since vmentry is serialising on affected CPUs, there
-        * is no need to worry about the conditional branch over the wrmsr
-        * being speculatively taken.
-        */
-       x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
-
        /* The actual VMENTER/EXIT is in the .noinstr.text section. */
        vmx_vcpu_enter_exit(vcpu, vmx, __vmx_vcpu_run_flags(vmx));
 
index a8b8150252bb76a32b3b1abf50885618d5e35cd9..eb4568a3814a68e15d5ef481cdafa2cce35f55a8 100644 (file)
@@ -8,11 +8,11 @@
 #include <asm/intel_pt.h>
 
 #include "capabilities.h"
-#include "kvm_cache_regs.h"
+#include "../kvm_cache_regs.h"
 #include "posted_intr.h"
 #include "vmcs.h"
 #include "vmx_ops.h"
-#include "cpuid.h"
+#include "../cpuid.h"
 #include "run_flags.h"
 
 #define MSR_TYPE_R     1
index 9e9ef47e988c12f66ec3240de8994b1f15c67251..5621805feafac72c065be1038465d190a9e785b9 100644 (file)
@@ -8,7 +8,7 @@
 
 #include "evmcs.h"
 #include "vmcs.h"
-#include "x86.h"
+#include "../x86.h"
 
 asmlinkage void vmread_error(unsigned long field, bool fault);
 __attribute__((regparm(0))) void vmread_error_trampoline(unsigned long field,