]> git.proxmox.com Git - pve-kernel.git/blob - patches/kernel/0014-KVM-x86-emulator-smm-use-smram-structs-in-the-common.patch
rebase patches on top of Ubuntu-5.19.0-14.14
[pve-kernel.git] / patches / kernel / 0014-KVM-x86-emulator-smm-use-smram-structs-in-the-common.patch
1 From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
2 From: Maxim Levitsky <mlevitsk@redhat.com>
3 Date: Wed, 3 Aug 2022 18:50:06 +0300
4 Subject: [PATCH] KVM: x86: emulator/smm: use smram structs in the common code
5
6 Switch from using a raw array to 'union kvm_smram'.
7
8 Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
9 Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
10 ---
11 arch/x86/include/asm/kvm_host.h | 5 +++--
12 arch/x86/kvm/emulate.c | 12 +++++++-----
13 arch/x86/kvm/kvm_emulate.h | 3 ++-
14 arch/x86/kvm/svm/svm.c | 8 ++++++--
15 arch/x86/kvm/vmx/vmx.c | 4 ++--
16 arch/x86/kvm/x86.c | 16 ++++++++--------
17 6 files changed, 28 insertions(+), 20 deletions(-)
18
19 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
20 index 9217bd6cf0d1..65e05d56602f 100644
21 --- a/arch/x86/include/asm/kvm_host.h
22 +++ b/arch/x86/include/asm/kvm_host.h
23 @@ -202,6 +202,7 @@ typedef enum exit_fastpath_completion fastpath_t;
24
25 struct x86_emulate_ctxt;
26 struct x86_exception;
27 +union kvm_smram;
28 enum x86_intercept;
29 enum x86_intercept_stage;
30
31 @@ -1550,8 +1551,8 @@ struct kvm_x86_ops {
32 void (*setup_mce)(struct kvm_vcpu *vcpu);
33
34 int (*smi_allowed)(struct kvm_vcpu *vcpu, bool for_injection);
35 - int (*enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
36 - int (*leave_smm)(struct kvm_vcpu *vcpu, const char *smstate);
37 + int (*enter_smm)(struct kvm_vcpu *vcpu, union kvm_smram *smram);
38 + int (*leave_smm)(struct kvm_vcpu *vcpu, const union kvm_smram *smram);
39 void (*enable_smi_window)(struct kvm_vcpu *vcpu);
40
41 int (*mem_enc_ioctl)(struct kvm *kvm, void __user *argp);
42 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
43 index b16353468b61..05c4d9dfbced 100644
44 --- a/arch/x86/kvm/emulate.c
45 +++ b/arch/x86/kvm/emulate.c
46 @@ -2582,16 +2582,18 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
47 static int em_rsm(struct x86_emulate_ctxt *ctxt)
48 {
49 unsigned long cr0, cr4, efer;
50 - char buf[512];
51 + const union kvm_smram smram;
52 u64 smbase;
53 int ret;
54
55 + BUILD_BUG_ON(sizeof(smram) != 512);
56 +
57 if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0)
58 return emulate_ud(ctxt);
59
60 smbase = ctxt->ops->get_smbase(ctxt);
61
62 - ret = ctxt->ops->read_phys(ctxt, smbase + 0xfe00, buf, sizeof(buf));
63 + ret = ctxt->ops->read_phys(ctxt, smbase + 0xfe00, (void *)&smram, sizeof(smram));
64 if (ret != X86EMUL_CONTINUE)
65 return X86EMUL_UNHANDLEABLE;
66
67 @@ -2641,15 +2643,15 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
68 * state (e.g. enter guest mode) before loading state from the SMM
69 * state-save area.
70 */
71 - if (ctxt->ops->leave_smm(ctxt, buf))
72 + if (ctxt->ops->leave_smm(ctxt, &smram))
73 goto emulate_shutdown;
74
75 #ifdef CONFIG_X86_64
76 if (emulator_has_longmode(ctxt))
77 - ret = rsm_load_state_64(ctxt, buf);
78 + ret = rsm_load_state_64(ctxt, (const char *)&smram);
79 else
80 #endif
81 - ret = rsm_load_state_32(ctxt, buf);
82 + ret = rsm_load_state_32(ctxt, (const char *)&smram);
83
84 if (ret != X86EMUL_CONTINUE)
85 goto emulate_shutdown;
86 diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h
87 index 0eb13204bbc2..04ac0cef8b57 100644
88 --- a/arch/x86/kvm/kvm_emulate.h
89 +++ b/arch/x86/kvm/kvm_emulate.h
90 @@ -19,6 +19,7 @@
91 struct x86_emulate_ctxt;
92 enum x86_intercept;
93 enum x86_intercept_stage;
94 +union kvm_smram;
95
96 struct x86_exception {
97 u8 vector;
98 @@ -235,7 +236,7 @@ struct x86_emulate_ops {
99
100 unsigned (*get_hflags)(struct x86_emulate_ctxt *ctxt);
101 void (*exiting_smm)(struct x86_emulate_ctxt *ctxt);
102 - int (*leave_smm)(struct x86_emulate_ctxt *ctxt, const char *smstate);
103 + int (*leave_smm)(struct x86_emulate_ctxt *ctxt, const union kvm_smram *smram);
104 void (*triple_fault)(struct x86_emulate_ctxt *ctxt);
105 int (*set_xcr)(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr);
106 };
107 diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
108 index 44bbf25dfeb9..68c9a771b457 100644
109 --- a/arch/x86/kvm/svm/svm.c
110 +++ b/arch/x86/kvm/svm/svm.c
111 @@ -4299,12 +4299,14 @@ static int svm_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
112 return 1;
113 }
114
115 -static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
116 +static int svm_enter_smm(struct kvm_vcpu *vcpu, union kvm_smram *smram)
117 {
118 struct vcpu_svm *svm = to_svm(vcpu);
119 struct kvm_host_map map_save;
120 int ret;
121
122 + char *smstate = (char *)smram;
123 +
124 if (!is_guest_mode(vcpu))
125 return 0;
126
127 @@ -4346,7 +4348,7 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
128 return 0;
129 }
130
131 -static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
132 +static int svm_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram)
133 {
134 struct vcpu_svm *svm = to_svm(vcpu);
135 struct kvm_host_map map, map_save;
136 @@ -4354,6 +4356,8 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
137 struct vmcb *vmcb12;
138 int ret;
139
140 + const char *smstate = (const char *)smram;
141 +
142 if (!guest_cpuid_has(vcpu, X86_FEATURE_LM))
143 return 0;
144
145 diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
146 index be7c19374fdd..26803e4d64c6 100644
147 --- a/arch/x86/kvm/vmx/vmx.c
148 +++ b/arch/x86/kvm/vmx/vmx.c
149 @@ -7725,7 +7725,7 @@ static int vmx_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
150 return !is_smm(vcpu);
151 }
152
153 -static int vmx_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
154 +static int vmx_enter_smm(struct kvm_vcpu *vcpu, union kvm_smram *smram)
155 {
156 struct vcpu_vmx *vmx = to_vmx(vcpu);
157
158 @@ -7739,7 +7739,7 @@ static int vmx_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
159 return 0;
160 }
161
162 -static int vmx_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
163 +static int vmx_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram)
164 {
165 struct vcpu_vmx *vmx = to_vmx(vcpu);
166 int ret;
167 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
168 index 1b6f92546f3d..e48e7b7b8dde 100644
169 --- a/arch/x86/kvm/x86.c
170 +++ b/arch/x86/kvm/x86.c
171 @@ -7853,9 +7853,9 @@ static void emulator_exiting_smm(struct x86_emulate_ctxt *ctxt)
172 }
173
174 static int emulator_leave_smm(struct x86_emulate_ctxt *ctxt,
175 - const char *smstate)
176 + const union kvm_smram *smram)
177 {
178 - return static_call(kvm_x86_leave_smm)(emul_to_vcpu(ctxt), smstate);
179 + return static_call(kvm_x86_leave_smm)(emul_to_vcpu(ctxt), smram);
180 }
181
182 static void emulator_triple_fault(struct x86_emulate_ctxt *ctxt)
183 @@ -9764,25 +9764,25 @@ static void enter_smm(struct kvm_vcpu *vcpu)
184 struct kvm_segment cs, ds;
185 struct desc_ptr dt;
186 unsigned long cr0;
187 - char buf[512];
188 + union kvm_smram smram;
189
190 - memset(buf, 0, 512);
191 + memset(smram.bytes, 0, sizeof(smram.bytes));
192 #ifdef CONFIG_X86_64
193 if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
194 - enter_smm_save_state_64(vcpu, buf);
195 + enter_smm_save_state_64(vcpu, (char *)&smram);
196 else
197 #endif
198 - enter_smm_save_state_32(vcpu, buf);
199 + enter_smm_save_state_32(vcpu, (char *)&smram);
200
201 /*
202 * Give enter_smm() a chance to make ISA-specific changes to the vCPU
203 * state (e.g. leave guest mode) after we've saved the state into the
204 * SMM state-save area.
205 */
206 - static_call(kvm_x86_enter_smm)(vcpu, buf);
207 + static_call(kvm_x86_enter_smm)(vcpu, &smram);
208
209 kvm_smm_changed(vcpu, true);
210 - kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf));
211 + kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, &smram, sizeof(smram));
212
213 if (static_call(kvm_x86_get_nmi_mask)(vcpu))
214 vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;