struct kvm_ldttss_desc *tss_desc;
struct page *save_area;
+
+ struct vmcb *current_vmcb;
};
static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
__free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
kvm_vcpu_uninit(vcpu);
kmem_cache_free(kvm_vcpu_cache, svm);
+
+ /*
+ * The VMCB could be recycled, causing a false negative in svm_vcpu_load;
+ * block speculative execution.
+ */
+ if (ibpb_inuse)
+ wrmsrl(MSR_IA32_PRED_CMD, FEATURE_SET_IBPB);
}
static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
+ struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
int i;
if (unlikely(cpu != vcpu->cpu)) {
if (static_cpu_has(X86_FEATURE_RDTSCP))
wrmsrl(MSR_TSC_AUX, svm->tsc_aux);
+ if (sd->current_vmcb != svm->vmcb) {
+ sd->current_vmcb = svm->vmcb;
+ if (ibpb_inuse)
+ wrmsrl(MSR_IA32_PRED_CMD, FEATURE_SET_IBPB);
+ }
+
avic_vcpu_load(vcpu, cpu);
}
local_irq_enable();
- if (ibrs_inuse && (svm->spec_ctrl != FEATURE_ENABLE_IBRS))
- wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
+ /* SMB: Don't care about ibrs_inuse but rely on guest value */
+ x86_spec_ctrl_set_guest(svm->spec_ctrl);
asm volatile (
"push %%" _ASM_BP "; \n\t"
/* Eliminate branch target predictions from guest mode */
vmexit_fill_RSB();
- if (ibrs_inuse) {
- rdmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
- if (svm->spec_ctrl != FEATURE_ENABLE_IBRS)
- wrmsrl(MSR_IA32_SPEC_CTRL, FEATURE_ENABLE_IBRS);
- }
+ x86_spec_ctrl_restore_host(svm->spec_ctrl);
#ifdef CONFIG_X86_64
wrmsrl(MSR_GS_BASE, svm->host.gs_base);