struct kvm_ldttss_desc *tss_desc;
struct page *save_area;
-
- struct vmcb *current_vmcb;
};
static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
__free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
kvm_vcpu_uninit(vcpu);
kmem_cache_free(kvm_vcpu_cache, svm);
-
- /*
- * The VMCB could be recycled, causing a false negative in svm_vcpu_load;
- * block speculative execution.
- */
- if (ibpb_inuse)
- wrmsrl(MSR_IA32_PRED_CMD, FEATURE_SET_IBPB);
}
static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
- struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
int i;
if (unlikely(cpu != vcpu->cpu)) {
if (static_cpu_has(X86_FEATURE_RDTSCP))
wrmsrl(MSR_TSC_AUX, svm->tsc_aux);
- if (sd->current_vmcb != svm->vmcb) {
- sd->current_vmcb = svm->vmcb;
- if (ibpb_inuse)
- wrmsrl(MSR_IA32_PRED_CMD, FEATURE_SET_IBPB);
- }
-
avic_vcpu_load(vcpu, cpu);
}