]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
x86/bugs, KVM: Support the combination of guest and host IBRS
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Thu, 26 Apr 2018 02:04:19 +0000 (22:04 -0400)
committerStefan Bader <stefan.bader@canonical.com>
Wed, 16 May 2018 11:52:18 +0000 (13:52 +0200)
A guest may modify the SPEC_CTRL MSR from the value used by the
kernel. Since the kernel doesn't use IBRS, this means a value of zero is
what is needed in the host.

But the 336996-Speculative-Execution-Side-Channel-Mitigations.pdf refers to
the other bits as reserved so the kernel should respect the boot time
SPEC_CTRL value and use that.

This allows to deal with future extensions to the SPEC_CTRL interface if
any at all.

Note: This uses wrmsrl() instead of native_wrmsl(). I does not make any
difference as paravirt will over-write the callq *0xfff.. with the wrmsrl
assembler code.

Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Ingo Molnar <mingo@kernel.org>
CVE-2018-3639 (x86)

[tyhicks: Minor backport for context]
Signed-off-by: Tyler Hicks <tyhicks@canonical.com>
Signed-off-by: Stefan Bader <stefan.bader@canonical.com>
arch/x86/include/asm/nospec-branch.h
arch/x86/kernel/cpu/bugs.c
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c

index e72af25e2c736cb47e4bb97bb8720487d6c19103..a348c047a5ea577ca2e023411623df6ee14f2036 100644 (file)
@@ -228,6 +228,16 @@ enum spectre_v2_mitigation {
 extern void x86_spec_ctrl_set(u64);
 extern u64 x86_spec_ctrl_get_default(void);
 
+/*
+ * On VMENTER we must preserve whatever view of the SPEC_CTRL MSR
+ * the guest has, while on VMEXIT we restore the host view. This
+ * would be easier if SPEC_CTRL were architecturally maskable or
+ * shadowable for guests but this is not (currently) the case.
+ * Takes the guest view of SPEC_CTRL MSR as a parameter.
+ */
+extern void x86_spec_ctrl_set_guest(u64);
+extern void x86_spec_ctrl_restore_host(u64);
+
 extern char __indirect_thunk_start[];
 extern char __indirect_thunk_end[];
 
index 6ed84f50d74a0ea6f6e9437190e7ef6d3d79cd98..38a8626c894c63deb6494cb1bccf0ecce0e3b63e 100644 (file)
@@ -123,6 +123,24 @@ u64 x86_spec_ctrl_get_default(void)
 }
 EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default);
 
+void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl)
+{
+       if (!boot_cpu_has(X86_FEATURE_IBRS))
+               return;
+       if (x86_spec_ctrl_base != guest_spec_ctrl)
+               wrmsrl(MSR_IA32_SPEC_CTRL, guest_spec_ctrl);
+}
+EXPORT_SYMBOL_GPL(x86_spec_ctrl_set_guest);
+
+void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl)
+{
+       if (!boot_cpu_has(X86_FEATURE_IBRS))
+               return;
+       if (x86_spec_ctrl_base != guest_spec_ctrl)
+               wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+}
+EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host);
+
 #ifdef RETPOLINE
 static bool spectre_v2_bad_module;
 
index 76bbca2c8159abd010122b56266c1a513b4b36d3..4332cacdb2732401bca64628ce1f785a1f5cf19e 100644 (file)
@@ -5031,8 +5031,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
         * is no need to worry about the conditional branch over the wrmsr
         * being speculatively taken.
         */
-       if (svm->spec_ctrl)
-               native_wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
+       x86_spec_ctrl_set_guest(svm->spec_ctrl);
 
        asm volatile (
                "push %%" _ASM_BP "; \n\t"
@@ -5144,8 +5143,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
        if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
                svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
 
-       if (svm->spec_ctrl)
-               native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
+       x86_spec_ctrl_restore_host(svm->spec_ctrl);
 
        /* Eliminate branch target predictions from guest mode */
        vmexit_fill_RSB();
index 2beb77c319e8d156a1cb1daa8169fd7a7c357f4f..a3e3e67b5574aac364f1be765c626c74fa46b411 100644 (file)
@@ -9450,8 +9450,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
         * is no need to worry about the conditional branch over the wrmsr
         * being speculatively taken.
         */
-       if (vmx->spec_ctrl)
-               native_wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
+       x86_spec_ctrl_set_guest(vmx->spec_ctrl);
 
        vmx->__launched = vmx->loaded_vmcs->launched;
        asm(
@@ -9589,8 +9588,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
        if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
                vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
 
-       if (vmx->spec_ctrl)
-               native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
+       x86_spec_ctrl_restore_host(vmx->spec_ctrl);
 
        /* Eliminate branch target predictions from guest mode */
        vmexit_fill_RSB();