From fe17061207ee371f0cd0f60162947deb9fd16d4a Mon Sep 17 00:00:00 2001 From: Konrad Rzeszutek Wilk Date: Wed, 25 Apr 2018 22:04:19 -0400 Subject: [PATCH] x86/bugs, KVM: Support the combination of guest and host IBRS A guest may modify the SPEC_CTRL MSR from the value used by the kernel. Since the kernel doesn't use IBRS, this means a value of zero is what is needed in the host. But the 336996-Speculative-Execution-Side-Channel-Mitigations.pdf refers to the other bits as reserved so the kernel should respect the boot time SPEC_CTRL value and use that. This allows to deal with future extensions to the SPEC_CTRL interface if any at all. Note: This uses wrmsrl() instead of native_wrmsl(). I does not make any difference as paravirt will over-write the callq *0xfff.. with the wrmsrl assembler code. Signed-off-by: Konrad Rzeszutek Wilk Signed-off-by: Thomas Gleixner Reviewed-by: Borislav Petkov Reviewed-by: Ingo Molnar [tyhicks: Minor backport for context] CVE-2018-3639 (x86) Signed-off-by: Tyler Hicks [backport to review smv.c/vmx.c] Signed-off-by: Stefan Bader --- arch/x86/include/asm/nospec-branch.h | 10 ++++++++++ arch/x86/kernel/cpu/bugs.c | 18 ++++++++++++++++++ arch/x86/kvm/svm.c | 10 +++------- arch/x86/kvm/vmx.c | 7 ++++--- 4 files changed, 35 insertions(+), 10 deletions(-) diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 689dea581f85..67a5cbf150bd 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -176,6 +176,16 @@ enum spectre_v2_mitigation { extern void x86_spec_ctrl_set(u64); extern u64 x86_spec_ctrl_get_default(void); +/* + * On VMENTER we must preserve whatever view of the SPEC_CTRL MSR + * the guest has, while on VMEXIT we restore the host view. This + * would be easier if SPEC_CTRL were architecturally maskable or + * shadowable for guests but this is not (currently) the case. + * Takes the guest view of SPEC_CTRL MSR as a parameter. + */ +extern void x86_spec_ctrl_set_guest(u64); +extern void x86_spec_ctrl_restore_host(u64); + /* * On VMEXIT we must ensure that no RSB predictions learned in the guest * can be followed in the host, by overwriting the RSB completely. Both diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 384a1e5df1d6..934ed77137a0 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -122,6 +122,24 @@ u64 x86_spec_ctrl_get_default(void) } EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default); +void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl) +{ + if (!ibrs_inuse) + return; + if (x86_spec_ctrl_base != guest_spec_ctrl) + wrmsrl(MSR_IA32_SPEC_CTRL, guest_spec_ctrl); +} +EXPORT_SYMBOL_GPL(x86_spec_ctrl_set_guest); + +void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl) +{ + if (!ibrs_inuse) + return; + if (x86_spec_ctrl_base != guest_spec_ctrl) + wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); +} +EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host); + static void __init spec2_print_if_insecure(const char *reason) { if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index a8c911fcd73f..f9e7ddd56ea6 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -4901,8 +4901,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) local_irq_enable(); - if (ibrs_inuse && (svm->spec_ctrl != FEATURE_ENABLE_IBRS)) - wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl); + /* SMB: Don't care about ibrs_inuse but rely on guest value */ + x86_spec_ctrl_set_guest(svm->spec_ctrl); asm volatile ( "push %%" _ASM_BP "; \n\t" @@ -4999,11 +4999,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) /* Eliminate branch target predictions from guest mode */ vmexit_fill_RSB(); - if (ibrs_inuse) { - rdmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl); - if (svm->spec_ctrl != FEATURE_ENABLE_IBRS) - wrmsrl(MSR_IA32_SPEC_CTRL, FEATURE_ENABLE_IBRS); - } + x86_spec_ctrl_restore_host(svm->spec_ctrl); #ifdef CONFIG_X86_64 wrmsrl(MSR_GS_BASE, svm->host.gs_base); diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 8c638ef4f409..7a7bd88b2e14 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -9104,9 +9104,8 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) atomic_switch_perf_msrs(vmx); - if (ibrs_inuse) - add_atomic_switch_msr(vmx, MSR_IA32_SPEC_CTRL, - vcpu->arch.spec_ctrl, FEATURE_ENABLE_IBRS); + /* SMB: Ignore ibrs_inuse but rely on vcpu value */ + x86_spec_ctrl_set_guest(vcpu->arch.spec_ctrl); debugctlmsr = get_debugctlmsr(); @@ -9230,6 +9229,8 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) #endif ); + x86_spec_ctrl_restore_host(vcpu->arch.spec_ctrl); + /* Eliminate branch target predictions from guest mode */ vmexit_fill_RSB(); -- 2.39.2