1 From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
2 From: Maxim Levitsky <mlevitsk@redhat.com>
3 Date: Tue, 21 Jun 2022 18:09:01 +0300
4 Subject: [PATCH] KVM: x86: SVM: use smram structs
6 This removes the last user of put_smstate/GET_SMSTATE so
7 remove these functions as well.
9 Also add a sanity check that we don't attempt to enter the SMM
10 on non long mode capable guest CPU with a running nested guest.
12 Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
13 Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
15 arch/x86/include/asm/kvm_host.h | 6 ------
16 arch/x86/kvm/svm/svm.c | 28 +++++++++++++++++-----------
17 2 files changed, 17 insertions(+), 17 deletions(-)
19 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
20 index 9217bd6cf0d1..7d9fd7dcbacd 100644
21 --- a/arch/x86/include/asm/kvm_host.h
22 +++ b/arch/x86/include/asm/kvm_host.h
23 @@ -2041,12 +2041,6 @@ static inline int kvm_cpu_get_apicid(int mps_cpu)
27 -#define put_smstate(type, buf, offset, val) \
28 - *(type *)((buf) + (offset) - 0x7e00) = val
30 -#define GET_SMSTATE(type, buf, offset) \
31 - (*(type *)((buf) + (offset) - 0x7e00))
33 int kvm_cpu_dirty_log_size(void);
35 int memslot_rmap_alloc(struct kvm_memory_slot *slot, unsigned long npages);
36 diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
37 index 44bbf25dfeb9..e26084734c1b 100644
38 --- a/arch/x86/kvm/svm/svm.c
39 +++ b/arch/x86/kvm/svm/svm.c
40 @@ -4301,6 +4301,7 @@ static int svm_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
42 static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
44 + struct kvm_smram_state_64 *smram = (struct kvm_smram_state_64 *)smstate;
45 struct vcpu_svm *svm = to_svm(vcpu);
46 struct kvm_host_map map_save;
48 @@ -4308,10 +4309,17 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
49 if (!is_guest_mode(vcpu))
52 - /* FED8h - SVM Guest */
53 - put_smstate(u64, smstate, 0x7ed8, 1);
54 - /* FEE0h - SVM Guest VMCB Physical Address */
55 - put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb12_gpa);
57 + * 32 bit SMRAM format doesn't preserve EFER and SVM state.
58 + * SVM should not be enabled by the userspace without marking
59 + * the CPU as at least long mode capable.
62 + if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM))
65 + smram->svm_guest_flag = 1;
66 + smram->svm_guest_vmcb_gpa = svm->nested.vmcb12_gpa;
68 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
69 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
70 @@ -4348,9 +4356,9 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
72 static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
74 + struct kvm_smram_state_64 *smram = (struct kvm_smram_state_64 *)smstate;
75 struct vcpu_svm *svm = to_svm(vcpu);
76 struct kvm_host_map map, map_save;
77 - u64 saved_efer, vmcb12_gpa;
81 @@ -4358,18 +4366,16 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
84 /* Non-zero if SMI arrived while vCPU was in guest mode. */
85 - if (!GET_SMSTATE(u64, smstate, 0x7ed8))
86 + if (!smram->svm_guest_flag)
89 if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM))
92 - saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0);
93 - if (!(saved_efer & EFER_SVME))
94 + if (!(smram->efer & EFER_SVME))
97 - vmcb12_gpa = GET_SMSTATE(u64, smstate, 0x7ee0);
98 - if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map) == -EINVAL)
99 + if (kvm_vcpu_map(vcpu, gpa_to_gfn(smram->svm_guest_vmcb_gpa), &map) == -EINVAL)
103 @@ -4395,7 +4401,7 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
105 nested_copy_vmcb_control_to_cache(svm, &vmcb12->control);
106 nested_copy_vmcb_save_to_cache(svm, &vmcb12->save);
107 - ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, false);
108 + ret = enter_svm_guest_mode(vcpu, smram->svm_guest_vmcb_gpa, vmcb12, false);