]> git.proxmox.com Git - pve-kernel.git/blob - patches/kernel/0295-x86-svm-Set-IBPB-when-running-a-different-VCPU.patch
9b2262c68d589488e81edaa14e42d10cc860db45
[pve-kernel.git] / patches / kernel / 0295-x86-svm-Set-IBPB-when-running-a-different-VCPU.patch
1 From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
2 From: Tom Lendacky <thomas.lendacky@amd.com>
3 Date: Wed, 20 Dec 2017 10:55:47 +0000
4 Subject: [PATCH] x86/svm: Set IBPB when running a different VCPU
5 MIME-Version: 1.0
6 Content-Type: text/plain; charset=UTF-8
7 Content-Transfer-Encoding: 8bit
8
9 CVE-2017-5753
10 CVE-2017-5715
11
12 Set IBPB (Indirect Branch Prediction Barrier) when the current CPU is
13 going to run a VCPU different from what was previously run.
14
15 Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
16 Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
17 Signed-off-by: Andy Whitcroft <apw@canonical.com>
18 Signed-off-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
19 (cherry picked from commit 0ba3eaabbb6666ebd344ee80534e58c375a00810)
20 Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
21 ---
22 arch/x86/kvm/svm.c | 16 ++++++++++++++++
23 1 file changed, 16 insertions(+)
24
25 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
26 index a1b19e810c49..fade4869856a 100644
27 --- a/arch/x86/kvm/svm.c
28 +++ b/arch/x86/kvm/svm.c
29 @@ -518,6 +518,8 @@ struct svm_cpu_data {
30 struct kvm_ldttss_desc *tss_desc;
31
32 struct page *save_area;
33 +
34 + struct vmcb *current_vmcb;
35 };
36
37 static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
38 @@ -1685,11 +1687,19 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu)
39 __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
40 kvm_vcpu_uninit(vcpu);
41 kmem_cache_free(kvm_vcpu_cache, svm);
42 +
43 + /*
44 + * The VMCB could be recycled, causing a false negative in svm_vcpu_load;
45 + * block speculative execution.
46 + */
47 + if (ibpb_inuse)
48 + wrmsrl(MSR_IA32_PRED_CMD, FEATURE_SET_IBPB);
49 }
50
51 static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
52 {
53 struct vcpu_svm *svm = to_svm(vcpu);
54 + struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
55 int i;
56
57 if (unlikely(cpu != vcpu->cpu)) {
58 @@ -1718,6 +1728,12 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
59 if (static_cpu_has(X86_FEATURE_RDTSCP))
60 wrmsrl(MSR_TSC_AUX, svm->tsc_aux);
61
62 + if (sd->current_vmcb != svm->vmcb) {
63 + sd->current_vmcb = svm->vmcb;
64 + if (ibpb_inuse)
65 + wrmsrl(MSR_IA32_PRED_CMD, FEATURE_SET_IBPB);
66 + }
67 +
68 avic_vcpu_load(vcpu, cpu);
69 }
70
71 --
72 2.14.2
73