]> git.proxmox.com Git - mirror_qemu.git/blame - target-i386/svm_helper.c
spapr_pci: Switch to vfio_eeh_as_op() interface
[mirror_qemu.git] / target-i386 / svm_helper.c
CommitLineData
6bada5e8
BS
1/*
2 * x86 SVM helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
b6a0aa05 20#include "qemu/osdep.h"
6bada5e8 21#include "cpu.h"
022c62cb 22#include "exec/cpu-all.h"
2ef6175a 23#include "exec/helper-proto.h"
f08b6170 24#include "exec/cpu_ldst.h"
92fc4b58 25
6bada5e8
BS
26/* Secure Virtual Machine helpers */
27
28#if defined(CONFIG_USER_ONLY)
29
052e80d5 30void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
6bada5e8
BS
31{
32}
33
052e80d5 34void helper_vmmcall(CPUX86State *env)
6bada5e8
BS
35{
36}
37
052e80d5 38void helper_vmload(CPUX86State *env, int aflag)
6bada5e8
BS
39{
40}
41
052e80d5 42void helper_vmsave(CPUX86State *env, int aflag)
6bada5e8
BS
43{
44}
45
052e80d5 46void helper_stgi(CPUX86State *env)
6bada5e8
BS
47{
48}
49
052e80d5 50void helper_clgi(CPUX86State *env)
6bada5e8
BS
51{
52}
53
052e80d5 54void helper_skinit(CPUX86State *env)
6bada5e8
BS
55{
56}
57
052e80d5 58void helper_invlpga(CPUX86State *env, int aflag)
6bada5e8
BS
59{
60}
61
052e80d5 62void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
6bada5e8
BS
63{
64}
65
66void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1)
67{
68}
69
052e80d5
BS
70void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
71 uint64_t param)
6bada5e8
BS
72{
73}
74
75void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
76 uint64_t param)
77{
78}
79
052e80d5 80void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
6bada5e8
BS
81 uint32_t next_eip_addend)
82{
83}
84#else
85
a8170e5e 86static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
6bada5e8
BS
87 const SegmentCache *sc)
88{
19d6ca16
AF
89 CPUState *cs = CPU(x86_env_get_cpu(env));
90
b216aa6c 91 x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, selector),
6bada5e8 92 sc->selector);
b216aa6c 93 x86_stq_phys(cs, addr + offsetof(struct vmcb_seg, base),
6bada5e8 94 sc->base);
b216aa6c 95 x86_stl_phys(cs, addr + offsetof(struct vmcb_seg, limit),
6bada5e8 96 sc->limit);
b216aa6c 97 x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, attrib),
6bada5e8
BS
98 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
99}
100
a8170e5e 101static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
052e80d5 102 SegmentCache *sc)
6bada5e8 103{
19d6ca16 104 CPUState *cs = CPU(x86_env_get_cpu(env));
6bada5e8
BS
105 unsigned int flags;
106
b216aa6c 107 sc->selector = x86_lduw_phys(cs,
41701aa4 108 addr + offsetof(struct vmcb_seg, selector));
b216aa6c
PB
109 sc->base = x86_ldq_phys(cs, addr + offsetof(struct vmcb_seg, base));
110 sc->limit = x86_ldl_phys(cs, addr + offsetof(struct vmcb_seg, limit));
111 flags = x86_lduw_phys(cs, addr + offsetof(struct vmcb_seg, attrib));
6bada5e8
BS
112 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
113}
114
a8170e5e 115static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
052e80d5 116 int seg_reg)
6bada5e8
BS
117{
118 SegmentCache sc1, *sc = &sc1;
119
052e80d5 120 svm_load_seg(env, addr, sc);
6bada5e8
BS
121 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
122 sc->base, sc->limit, sc->flags);
123}
124
052e80d5 125void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
6bada5e8 126{
19d6ca16 127 CPUState *cs = CPU(x86_env_get_cpu(env));
6bada5e8
BS
128 target_ulong addr;
129 uint32_t event_inj;
130 uint32_t int_ctl;
131
052e80d5 132 cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0);
6bada5e8
BS
133
134 if (aflag == 2) {
4b34e3ad 135 addr = env->regs[R_EAX];
6bada5e8 136 } else {
4b34e3ad 137 addr = (uint32_t)env->regs[R_EAX];
6bada5e8
BS
138 }
139
140 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
141
142 env->vm_vmcb = addr;
143
144 /* save the current CPU state in the hsave page */
b216aa6c 145 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
6bada5e8 146 env->gdt.base);
b216aa6c 147 x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
6bada5e8
BS
148 env->gdt.limit);
149
b216aa6c 150 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
6bada5e8 151 env->idt.base);
b216aa6c 152 x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
6bada5e8
BS
153 env->idt.limit);
154
b216aa6c 155 x86_stq_phys(cs,
f606604f 156 env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
b216aa6c 157 x86_stq_phys(cs,
f606604f 158 env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
b216aa6c 159 x86_stq_phys(cs,
f606604f 160 env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
b216aa6c 161 x86_stq_phys(cs,
f606604f 162 env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
b216aa6c 163 x86_stq_phys(cs,
f606604f 164 env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
b216aa6c 165 x86_stq_phys(cs,
f606604f
EI
166 env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
167
b216aa6c 168 x86_stq_phys(cs,
f606604f 169 env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
b216aa6c 170 x86_stq_phys(cs,
f606604f 171 env->vm_hsave + offsetof(struct vmcb, save.rflags),
6bada5e8
BS
172 cpu_compute_eflags(env));
173
052e80d5 174 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.es),
6bada5e8 175 &env->segs[R_ES]);
052e80d5 176 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
6bada5e8 177 &env->segs[R_CS]);
052e80d5 178 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
6bada5e8 179 &env->segs[R_SS]);
052e80d5 180 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
6bada5e8
BS
181 &env->segs[R_DS]);
182
b216aa6c 183 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rip),
a78d0eab 184 env->eip + next_eip_addend);
b216aa6c 185 x86_stq_phys(cs,
f606604f 186 env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
b216aa6c 187 x86_stq_phys(cs,
f606604f 188 env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
6bada5e8
BS
189
190 /* load the interception bitmaps so we do not need to access the
191 vmcb in svm mode */
b216aa6c 192 env->intercept = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
6bada5e8 193 control.intercept));
b216aa6c 194 env->intercept_cr_read = x86_lduw_phys(cs, env->vm_vmcb +
6bada5e8
BS
195 offsetof(struct vmcb,
196 control.intercept_cr_read));
b216aa6c 197 env->intercept_cr_write = x86_lduw_phys(cs, env->vm_vmcb +
6bada5e8
BS
198 offsetof(struct vmcb,
199 control.intercept_cr_write));
b216aa6c 200 env->intercept_dr_read = x86_lduw_phys(cs, env->vm_vmcb +
6bada5e8
BS
201 offsetof(struct vmcb,
202 control.intercept_dr_read));
b216aa6c 203 env->intercept_dr_write = x86_lduw_phys(cs, env->vm_vmcb +
6bada5e8
BS
204 offsetof(struct vmcb,
205 control.intercept_dr_write));
b216aa6c 206 env->intercept_exceptions = x86_ldl_phys(cs, env->vm_vmcb +
6bada5e8
BS
207 offsetof(struct vmcb,
208 control.intercept_exceptions
209 ));
210
211 /* enable intercepts */
212 env->hflags |= HF_SVMI_MASK;
213
b216aa6c 214 env->tsc_offset = x86_ldq_phys(cs, env->vm_vmcb +
6bada5e8
BS
215 offsetof(struct vmcb, control.tsc_offset));
216
b216aa6c 217 env->gdt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
6bada5e8 218 save.gdtr.base));
b216aa6c 219 env->gdt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
6bada5e8
BS
220 save.gdtr.limit));
221
b216aa6c 222 env->idt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
6bada5e8 223 save.idtr.base));
b216aa6c 224 env->idt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
6bada5e8
BS
225 save.idtr.limit));
226
227 /* clear exit_info_2 so we behave like the real hardware */
b216aa6c 228 x86_stq_phys(cs,
f606604f 229 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
6bada5e8 230
b216aa6c 231 cpu_x86_update_cr0(env, x86_ldq_phys(cs,
2c17449b 232 env->vm_vmcb + offsetof(struct vmcb,
6bada5e8 233 save.cr0)));
b216aa6c 234 cpu_x86_update_cr4(env, x86_ldq_phys(cs,
2c17449b 235 env->vm_vmcb + offsetof(struct vmcb,
6bada5e8 236 save.cr4)));
b216aa6c 237 cpu_x86_update_cr3(env, x86_ldq_phys(cs,
2c17449b 238 env->vm_vmcb + offsetof(struct vmcb,
6bada5e8 239 save.cr3)));
b216aa6c 240 env->cr[2] = x86_ldq_phys(cs,
2c17449b 241 env->vm_vmcb + offsetof(struct vmcb, save.cr2));
b216aa6c 242 int_ctl = x86_ldl_phys(cs,
fdfba1a2 243 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6bada5e8
BS
244 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
245 if (int_ctl & V_INTR_MASKING_MASK) {
246 env->v_tpr = int_ctl & V_TPR_MASK;
247 env->hflags2 |= HF2_VINTR_MASK;
248 if (env->eflags & IF_MASK) {
249 env->hflags2 |= HF2_HIF_MASK;
250 }
251 }
252
253 cpu_load_efer(env,
b216aa6c 254 x86_ldq_phys(cs,
2c17449b 255 env->vm_vmcb + offsetof(struct vmcb, save.efer)));
6bada5e8 256 env->eflags = 0;
b216aa6c 257 cpu_load_eflags(env, x86_ldq_phys(cs,
2c17449b 258 env->vm_vmcb + offsetof(struct vmcb,
6bada5e8
BS
259 save.rflags)),
260 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6bada5e8 261
052e80d5
BS
262 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
263 R_ES);
264 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
265 R_CS);
266 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
267 R_SS);
268 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
269 R_DS);
6bada5e8 270
b216aa6c 271 env->eip = x86_ldq_phys(cs,
2c17449b
EI
272 env->vm_vmcb + offsetof(struct vmcb, save.rip));
273
b216aa6c 274 env->regs[R_ESP] = x86_ldq_phys(cs,
2c17449b 275 env->vm_vmcb + offsetof(struct vmcb, save.rsp));
b216aa6c 276 env->regs[R_EAX] = x86_ldq_phys(cs,
2c17449b 277 env->vm_vmcb + offsetof(struct vmcb, save.rax));
b216aa6c 278 env->dr[7] = x86_ldq_phys(cs,
2c17449b 279 env->vm_vmcb + offsetof(struct vmcb, save.dr7));
b216aa6c 280 env->dr[6] = x86_ldq_phys(cs,
2c17449b 281 env->vm_vmcb + offsetof(struct vmcb, save.dr6));
6bada5e8
BS
282
283 /* FIXME: guest state consistency checks */
284
b216aa6c 285 switch (x86_ldub_phys(cs,
2c17449b 286 env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
6bada5e8
BS
287 case TLB_CONTROL_DO_NOTHING:
288 break;
289 case TLB_CONTROL_FLUSH_ALL_ASID:
290 /* FIXME: this is not 100% correct but should work for now */
00c8cb0a 291 tlb_flush(cs, 1);
6bada5e8
BS
292 break;
293 }
294
295 env->hflags2 |= HF2_GIF_MASK;
296
297 if (int_ctl & V_IRQ_MASK) {
259186a7
AF
298 CPUState *cs = CPU(x86_env_get_cpu(env));
299
300 cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
6bada5e8
BS
301 }
302
303 /* maybe we need to inject an event */
b216aa6c 304 event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
6bada5e8
BS
305 control.event_inj));
306 if (event_inj & SVM_EVTINJ_VALID) {
307 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
308 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
b216aa6c 309 uint32_t event_inj_err = x86_ldl_phys(cs, env->vm_vmcb +
6bada5e8
BS
310 offsetof(struct vmcb,
311 control.event_inj_err));
312
313 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
314 /* FIXME: need to implement valid_err */
315 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
316 case SVM_EVTINJ_TYPE_INTR:
27103424 317 cs->exception_index = vector;
6bada5e8
BS
318 env->error_code = event_inj_err;
319 env->exception_is_int = 0;
320 env->exception_next_eip = -1;
321 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
322 /* XXX: is it always correct? */
323 do_interrupt_x86_hardirq(env, vector, 1);
324 break;
325 case SVM_EVTINJ_TYPE_NMI:
27103424 326 cs->exception_index = EXCP02_NMI;
6bada5e8
BS
327 env->error_code = event_inj_err;
328 env->exception_is_int = 0;
a78d0eab 329 env->exception_next_eip = env->eip;
6bada5e8 330 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
5638d180 331 cpu_loop_exit(cs);
6bada5e8
BS
332 break;
333 case SVM_EVTINJ_TYPE_EXEPT:
27103424 334 cs->exception_index = vector;
6bada5e8
BS
335 env->error_code = event_inj_err;
336 env->exception_is_int = 0;
337 env->exception_next_eip = -1;
338 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
5638d180 339 cpu_loop_exit(cs);
6bada5e8
BS
340 break;
341 case SVM_EVTINJ_TYPE_SOFT:
27103424 342 cs->exception_index = vector;
6bada5e8
BS
343 env->error_code = event_inj_err;
344 env->exception_is_int = 1;
a78d0eab 345 env->exception_next_eip = env->eip;
6bada5e8 346 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
5638d180 347 cpu_loop_exit(cs);
6bada5e8
BS
348 break;
349 }
27103424 350 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", cs->exception_index,
6bada5e8
BS
351 env->error_code);
352 }
353}
354
052e80d5 355void helper_vmmcall(CPUX86State *env)
6bada5e8 356{
052e80d5 357 cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0);
6bada5e8
BS
358 raise_exception(env, EXCP06_ILLOP);
359}
360
052e80d5 361void helper_vmload(CPUX86State *env, int aflag)
6bada5e8 362{
19d6ca16 363 CPUState *cs = CPU(x86_env_get_cpu(env));
6bada5e8
BS
364 target_ulong addr;
365
052e80d5 366 cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0);
6bada5e8
BS
367
368 if (aflag == 2) {
4b34e3ad 369 addr = env->regs[R_EAX];
6bada5e8 370 } else {
4b34e3ad 371 addr = (uint32_t)env->regs[R_EAX];
6bada5e8
BS
372 }
373
374 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
375 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
b216aa6c 376 addr, x86_ldq_phys(cs, addr + offsetof(struct vmcb,
052e80d5 377 save.fs.base)),
6bada5e8
BS
378 env->segs[R_FS].base);
379
052e80d5
BS
380 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.fs), R_FS);
381 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.gs), R_GS);
382 svm_load_seg(env, addr + offsetof(struct vmcb, save.tr), &env->tr);
383 svm_load_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
6bada5e8
BS
384
385#ifdef TARGET_X86_64
b216aa6c 386 env->kernelgsbase = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
6bada5e8 387 save.kernel_gs_base));
b216aa6c
PB
388 env->lstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.lstar));
389 env->cstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.cstar));
390 env->fmask = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.sfmask));
6bada5e8 391#endif
b216aa6c
PB
392 env->star = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.star));
393 env->sysenter_cs = x86_ldq_phys(cs,
2c17449b 394 addr + offsetof(struct vmcb, save.sysenter_cs));
b216aa6c 395 env->sysenter_esp = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
6bada5e8 396 save.sysenter_esp));
b216aa6c 397 env->sysenter_eip = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
6bada5e8
BS
398 save.sysenter_eip));
399}
400
052e80d5 401void helper_vmsave(CPUX86State *env, int aflag)
6bada5e8 402{
19d6ca16 403 CPUState *cs = CPU(x86_env_get_cpu(env));
6bada5e8
BS
404 target_ulong addr;
405
052e80d5 406 cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0);
6bada5e8
BS
407
408 if (aflag == 2) {
4b34e3ad 409 addr = env->regs[R_EAX];
6bada5e8 410 } else {
4b34e3ad 411 addr = (uint32_t)env->regs[R_EAX];
6bada5e8
BS
412 }
413
414 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
415 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
b216aa6c 416 addr, x86_ldq_phys(cs,
2c17449b 417 addr + offsetof(struct vmcb, save.fs.base)),
6bada5e8
BS
418 env->segs[R_FS].base);
419
052e80d5 420 svm_save_seg(env, addr + offsetof(struct vmcb, save.fs),
6bada5e8 421 &env->segs[R_FS]);
052e80d5 422 svm_save_seg(env, addr + offsetof(struct vmcb, save.gs),
6bada5e8 423 &env->segs[R_GS]);
052e80d5 424 svm_save_seg(env, addr + offsetof(struct vmcb, save.tr),
6bada5e8 425 &env->tr);
052e80d5 426 svm_save_seg(env, addr + offsetof(struct vmcb, save.ldtr),
6bada5e8
BS
427 &env->ldt);
428
429#ifdef TARGET_X86_64
b216aa6c 430 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.kernel_gs_base),
6bada5e8 431 env->kernelgsbase);
b216aa6c
PB
432 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.lstar), env->lstar);
433 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.cstar), env->cstar);
434 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sfmask), env->fmask);
6bada5e8 435#endif
b216aa6c
PB
436 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.star), env->star);
437 x86_stq_phys(cs,
f606604f 438 addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
b216aa6c 439 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_esp),
6bada5e8 440 env->sysenter_esp);
b216aa6c 441 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_eip),
6bada5e8
BS
442 env->sysenter_eip);
443}
444
052e80d5 445void helper_stgi(CPUX86State *env)
6bada5e8 446{
052e80d5 447 cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0);
6bada5e8
BS
448 env->hflags2 |= HF2_GIF_MASK;
449}
450
052e80d5 451void helper_clgi(CPUX86State *env)
6bada5e8 452{
052e80d5 453 cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0);
6bada5e8
BS
454 env->hflags2 &= ~HF2_GIF_MASK;
455}
456
052e80d5 457void helper_skinit(CPUX86State *env)
6bada5e8 458{
052e80d5 459 cpu_svm_check_intercept_param(env, SVM_EXIT_SKINIT, 0);
6bada5e8
BS
460 /* XXX: not implemented */
461 raise_exception(env, EXCP06_ILLOP);
462}
463
052e80d5 464void helper_invlpga(CPUX86State *env, int aflag)
6bada5e8 465{
31b030d4 466 X86CPU *cpu = x86_env_get_cpu(env);
6bada5e8
BS
467 target_ulong addr;
468
052e80d5 469 cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPGA, 0);
6bada5e8
BS
470
471 if (aflag == 2) {
4b34e3ad 472 addr = env->regs[R_EAX];
6bada5e8 473 } else {
4b34e3ad 474 addr = (uint32_t)env->regs[R_EAX];
6bada5e8
BS
475 }
476
477 /* XXX: could use the ASID to see if it is needed to do the
478 flush */
31b030d4 479 tlb_flush_page(CPU(cpu), addr);
6bada5e8
BS
480}
481
052e80d5
BS
482void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
483 uint64_t param)
6bada5e8 484{
19d6ca16 485 CPUState *cs = CPU(x86_env_get_cpu(env));
2c17449b 486
6bada5e8
BS
487 if (likely(!(env->hflags & HF_SVMI_MASK))) {
488 return;
489 }
490 switch (type) {
491 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
492 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
052e80d5 493 helper_vmexit(env, type, param);
6bada5e8
BS
494 }
495 break;
496 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
497 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
052e80d5 498 helper_vmexit(env, type, param);
6bada5e8
BS
499 }
500 break;
501 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
502 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
052e80d5 503 helper_vmexit(env, type, param);
6bada5e8
BS
504 }
505 break;
506 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
507 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
052e80d5 508 helper_vmexit(env, type, param);
6bada5e8
BS
509 }
510 break;
511 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
512 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
052e80d5 513 helper_vmexit(env, type, param);
6bada5e8
BS
514 }
515 break;
516 case SVM_EXIT_MSR:
517 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
518 /* FIXME: this should be read in at vmrun (faster this way?) */
b216aa6c 519 uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
6bada5e8
BS
520 offsetof(struct vmcb,
521 control.msrpm_base_pa));
522 uint32_t t0, t1;
523
a4165610 524 switch ((uint32_t)env->regs[R_ECX]) {
6bada5e8 525 case 0 ... 0x1fff:
a4165610
LG
526 t0 = (env->regs[R_ECX] * 2) % 8;
527 t1 = (env->regs[R_ECX] * 2) / 8;
6bada5e8
BS
528 break;
529 case 0xc0000000 ... 0xc0001fff:
a4165610 530 t0 = (8192 + env->regs[R_ECX] - 0xc0000000) * 2;
6bada5e8
BS
531 t1 = (t0 / 8);
532 t0 %= 8;
533 break;
534 case 0xc0010000 ... 0xc0011fff:
a4165610 535 t0 = (16384 + env->regs[R_ECX] - 0xc0010000) * 2;
6bada5e8
BS
536 t1 = (t0 / 8);
537 t0 %= 8;
538 break;
539 default:
052e80d5 540 helper_vmexit(env, type, param);
6bada5e8
BS
541 t0 = 0;
542 t1 = 0;
543 break;
544 }
b216aa6c 545 if (x86_ldub_phys(cs, addr + t1) & ((1 << param) << t0)) {
052e80d5 546 helper_vmexit(env, type, param);
6bada5e8
BS
547 }
548 }
549 break;
550 default:
551 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
052e80d5 552 helper_vmexit(env, type, param);
6bada5e8
BS
553 }
554 break;
555 }
556}
557
052e80d5 558void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
6bada5e8
BS
559 uint64_t param)
560{
052e80d5 561 helper_svm_check_intercept_param(env, type, param);
6bada5e8
BS
562}
563
052e80d5 564void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
6bada5e8
BS
565 uint32_t next_eip_addend)
566{
19d6ca16
AF
567 CPUState *cs = CPU(x86_env_get_cpu(env));
568
6bada5e8
BS
569 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
570 /* FIXME: this should be read in at vmrun (faster this way?) */
b216aa6c 571 uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
6bada5e8
BS
572 offsetof(struct vmcb, control.iopm_base_pa));
573 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
574
b216aa6c 575 if (x86_lduw_phys(cs, addr + port / 8) & (mask << (port & 7))) {
a78d0eab 576 /* next env->eip */
b216aa6c 577 x86_stq_phys(cs,
f606604f 578 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
6bada5e8 579 env->eip + next_eip_addend);
052e80d5 580 helper_vmexit(env, SVM_EXIT_IOIO, param | (port << 16));
6bada5e8
BS
581 }
582 }
583}
584
585/* Note: currently only 32 bits of exit_code are used */
052e80d5 586void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
6bada5e8 587{
259186a7 588 CPUState *cs = CPU(x86_env_get_cpu(env));
6bada5e8
BS
589 uint32_t int_ctl;
590
591 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
592 PRIx64 ", " TARGET_FMT_lx ")!\n",
593 exit_code, exit_info_1,
b216aa6c 594 x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
6bada5e8 595 control.exit_info_2)),
a78d0eab 596 env->eip);
6bada5e8
BS
597
598 if (env->hflags & HF_INHIBIT_IRQ_MASK) {
b216aa6c 599 x86_stl_phys(cs,
ab1da857 600 env->vm_vmcb + offsetof(struct vmcb, control.int_state),
6bada5e8
BS
601 SVM_INTERRUPT_SHADOW_MASK);
602 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
603 } else {
b216aa6c 604 x86_stl_phys(cs,
ab1da857 605 env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
6bada5e8
BS
606 }
607
608 /* Save the VM state in the vmcb */
052e80d5 609 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
6bada5e8 610 &env->segs[R_ES]);
052e80d5 611 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
6bada5e8 612 &env->segs[R_CS]);
052e80d5 613 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
6bada5e8 614 &env->segs[R_SS]);
052e80d5 615 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
6bada5e8
BS
616 &env->segs[R_DS]);
617
b216aa6c 618 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
6bada5e8 619 env->gdt.base);
b216aa6c 620 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
6bada5e8
BS
621 env->gdt.limit);
622
b216aa6c 623 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
6bada5e8 624 env->idt.base);
b216aa6c 625 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
6bada5e8
BS
626 env->idt.limit);
627
b216aa6c 628 x86_stq_phys(cs,
f606604f 629 env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
b216aa6c 630 x86_stq_phys(cs,
f606604f 631 env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
b216aa6c 632 x86_stq_phys(cs,
f606604f 633 env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
b216aa6c 634 x86_stq_phys(cs,
f606604f 635 env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
b216aa6c 636 x86_stq_phys(cs,
f606604f 637 env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
6bada5e8 638
b216aa6c 639 int_ctl = x86_ldl_phys(cs,
fdfba1a2 640 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6bada5e8
BS
641 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
642 int_ctl |= env->v_tpr & V_TPR_MASK;
259186a7 643 if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
6bada5e8
BS
644 int_ctl |= V_IRQ_MASK;
645 }
b216aa6c 646 x86_stl_phys(cs,
ab1da857 647 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
6bada5e8 648
b216aa6c 649 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rflags),
6bada5e8 650 cpu_compute_eflags(env));
b216aa6c 651 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rip),
052e80d5 652 env->eip);
b216aa6c 653 x86_stq_phys(cs,
f606604f 654 env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
b216aa6c 655 x86_stq_phys(cs,
f606604f 656 env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
b216aa6c 657 x86_stq_phys(cs,
f606604f 658 env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
b216aa6c 659 x86_stq_phys(cs,
f606604f 660 env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
b216aa6c 661 x86_stb_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cpl),
6bada5e8
BS
662 env->hflags & HF_CPL_MASK);
663
664 /* Reload the host state from vm_hsave */
665 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
666 env->hflags &= ~HF_SVMI_MASK;
667 env->intercept = 0;
668 env->intercept_exceptions = 0;
259186a7 669 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
6bada5e8
BS
670 env->tsc_offset = 0;
671
b216aa6c 672 env->gdt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
6bada5e8 673 save.gdtr.base));
b216aa6c 674 env->gdt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
6bada5e8
BS
675 save.gdtr.limit));
676
b216aa6c 677 env->idt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
6bada5e8 678 save.idtr.base));
b216aa6c 679 env->idt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
6bada5e8
BS
680 save.idtr.limit));
681
b216aa6c 682 cpu_x86_update_cr0(env, x86_ldq_phys(cs,
2c17449b 683 env->vm_hsave + offsetof(struct vmcb,
6bada5e8
BS
684 save.cr0)) |
685 CR0_PE_MASK);
b216aa6c 686 cpu_x86_update_cr4(env, x86_ldq_phys(cs,
2c17449b 687 env->vm_hsave + offsetof(struct vmcb,
6bada5e8 688 save.cr4)));
b216aa6c 689 cpu_x86_update_cr3(env, x86_ldq_phys(cs,
2c17449b 690 env->vm_hsave + offsetof(struct vmcb,
6bada5e8
BS
691 save.cr3)));
692 /* we need to set the efer after the crs so the hidden flags get
693 set properly */
b216aa6c 694 cpu_load_efer(env, x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
6bada5e8
BS
695 save.efer)));
696 env->eflags = 0;
b216aa6c 697 cpu_load_eflags(env, x86_ldq_phys(cs,
2c17449b 698 env->vm_hsave + offsetof(struct vmcb,
6bada5e8 699 save.rflags)),
30452029
KC
700 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK |
701 VM_MASK));
6bada5e8 702
052e80d5
BS
703 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.es),
704 R_ES);
705 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
706 R_CS);
707 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
708 R_SS);
709 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
710 R_DS);
6bada5e8 711
b216aa6c 712 env->eip = x86_ldq_phys(cs,
2c17449b 713 env->vm_hsave + offsetof(struct vmcb, save.rip));
b216aa6c 714 env->regs[R_ESP] = x86_ldq_phys(cs, env->vm_hsave +
90a2541b 715 offsetof(struct vmcb, save.rsp));
b216aa6c 716 env->regs[R_EAX] = x86_ldq_phys(cs, env->vm_hsave +
90a2541b 717 offsetof(struct vmcb, save.rax));
6bada5e8 718
b216aa6c 719 env->dr[6] = x86_ldq_phys(cs,
2c17449b 720 env->vm_hsave + offsetof(struct vmcb, save.dr6));
b216aa6c 721 env->dr[7] = x86_ldq_phys(cs,
2c17449b 722 env->vm_hsave + offsetof(struct vmcb, save.dr7));
6bada5e8
BS
723
724 /* other setups */
b216aa6c 725 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
6bada5e8 726 exit_code);
b216aa6c 727 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1),
6bada5e8
BS
728 exit_info_1);
729
b216aa6c 730 x86_stl_phys(cs,
ab1da857 731 env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
b216aa6c 732 x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
6bada5e8 733 control.event_inj)));
b216aa6c 734 x86_stl_phys(cs,
ab1da857 735 env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
b216aa6c 736 x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
6bada5e8 737 control.event_inj_err)));
b216aa6c 738 x86_stl_phys(cs,
ab1da857 739 env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
6bada5e8
BS
740
741 env->hflags2 &= ~HF2_GIF_MASK;
742 /* FIXME: Resets the current ASID register to zero (host ASID). */
743
744 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
745
746 /* Clears the TSC_OFFSET inside the processor. */
747
748 /* If the host is in PAE mode, the processor reloads the host's PDPEs
749 from the page table indicated the host's CR3. If the PDPEs contain
750 illegal state, the processor causes a shutdown. */
751
6bada5e8
BS
752 /* Disables all breakpoints in the host DR7 register. */
753
754 /* Checks the reloaded host state for consistency. */
755
756 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
757 host's code segment or non-canonical (in the case of long mode), a
758 #GP fault is delivered inside the host. */
759
760 /* remove any pending exception */
27103424 761 cs->exception_index = -1;
6bada5e8
BS
762 env->error_code = 0;
763 env->old_exception = -1;
764
5638d180 765 cpu_loop_exit(cs);
6bada5e8
BS
766}
767
052e80d5 768void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
6bada5e8 769{
052e80d5 770 helper_vmexit(env, exit_code, exit_info_1);
6bada5e8
BS
771}
772
773#endif