summary |
shortlog |
log |
commit | commitdiff |
tree
raw |
patch |
inline | side by side (from parent 1:
3ef956d)
A guest may modify the SPEC_CTRL MSR from the value used by the
kernel. Since the kernel doesn't use IBRS, this means a value of zero is
what is needed in the host.
But the 336996-Speculative-Execution-Side-Channel-Mitigations.pdf refers to
the other bits as reserved so the kernel should respect the boot time
SPEC_CTRL value and use that.
This allows to deal with future extensions to the SPEC_CTRL interface if
any at all.
Note: This uses wrmsrl() instead of native_wrmsl(). I does not make any
difference as paravirt will over-write the callq *0xfff.. with the wrmsrl
assembler code.
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Ingo Molnar <mingo@kernel.org>
[tyhicks: Minor backport for context]
CVE-2018-3639 (x86)
Signed-off-by: Tyler Hicks <tyhicks@canonical.com>
[backport to review smv.c/vmx.c]
Signed-off-by: Stefan Bader <stefan.bader@canonical.com>
extern void x86_spec_ctrl_set(u64);
extern u64 x86_spec_ctrl_get_default(void);
extern void x86_spec_ctrl_set(u64);
extern u64 x86_spec_ctrl_get_default(void);
+/*
+ * On VMENTER we must preserve whatever view of the SPEC_CTRL MSR
+ * the guest has, while on VMEXIT we restore the host view. This
+ * would be easier if SPEC_CTRL were architecturally maskable or
+ * shadowable for guests but this is not (currently) the case.
+ * Takes the guest view of SPEC_CTRL MSR as a parameter.
+ */
+extern void x86_spec_ctrl_set_guest(u64);
+extern void x86_spec_ctrl_restore_host(u64);
+
/*
* On VMEXIT we must ensure that no RSB predictions learned in the guest
* can be followed in the host, by overwriting the RSB completely. Both
/*
* On VMEXIT we must ensure that no RSB predictions learned in the guest
* can be followed in the host, by overwriting the RSB completely. Both
}
EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default);
}
EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default);
+void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl)
+{
+ if (!ibrs_inuse)
+ return;
+ if (x86_spec_ctrl_base != guest_spec_ctrl)
+ wrmsrl(MSR_IA32_SPEC_CTRL, guest_spec_ctrl);
+}
+EXPORT_SYMBOL_GPL(x86_spec_ctrl_set_guest);
+
+void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl)
+{
+ if (!ibrs_inuse)
+ return;
+ if (x86_spec_ctrl_base != guest_spec_ctrl)
+ wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+}
+EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host);
+
static void __init spec2_print_if_insecure(const char *reason)
{
if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
static void __init spec2_print_if_insecure(const char *reason)
{
if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
- if (ibrs_inuse && (svm->spec_ctrl != FEATURE_ENABLE_IBRS))
- wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
+ /* SMB: Don't care about ibrs_inuse but rely on guest value */
+ x86_spec_ctrl_set_guest(svm->spec_ctrl);
asm volatile (
"push %%" _ASM_BP "; \n\t"
asm volatile (
"push %%" _ASM_BP "; \n\t"
/* Eliminate branch target predictions from guest mode */
vmexit_fill_RSB();
/* Eliminate branch target predictions from guest mode */
vmexit_fill_RSB();
- if (ibrs_inuse) {
- rdmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
- if (svm->spec_ctrl != FEATURE_ENABLE_IBRS)
- wrmsrl(MSR_IA32_SPEC_CTRL, FEATURE_ENABLE_IBRS);
- }
+ x86_spec_ctrl_restore_host(svm->spec_ctrl);
#ifdef CONFIG_X86_64
wrmsrl(MSR_GS_BASE, svm->host.gs_base);
#ifdef CONFIG_X86_64
wrmsrl(MSR_GS_BASE, svm->host.gs_base);
atomic_switch_perf_msrs(vmx);
atomic_switch_perf_msrs(vmx);
- if (ibrs_inuse)
- add_atomic_switch_msr(vmx, MSR_IA32_SPEC_CTRL,
- vcpu->arch.spec_ctrl, FEATURE_ENABLE_IBRS);
+ /* SMB: Ignore ibrs_inuse but rely on vcpu value */
+ x86_spec_ctrl_set_guest(vcpu->arch.spec_ctrl);
debugctlmsr = get_debugctlmsr();
debugctlmsr = get_debugctlmsr();
+ x86_spec_ctrl_restore_host(vcpu->arch.spec_ctrl);
+
/* Eliminate branch target predictions from guest mode */
vmexit_fill_RSB();
/* Eliminate branch target predictions from guest mode */
vmexit_fill_RSB();