]> git.proxmox.com Git - mirror_qemu.git/blame - target/i386/svm_helper.c
i386: Make unversioned CPU models be aliases
[mirror_qemu.git] / target / i386 / svm_helper.c
CommitLineData
6bada5e8
BS
1/*
2 * x86 SVM helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
b6a0aa05 20#include "qemu/osdep.h"
6bada5e8 21#include "cpu.h"
2ef6175a 22#include "exec/helper-proto.h"
63c91552 23#include "exec/exec-all.h"
f08b6170 24#include "exec/cpu_ldst.h"
92fc4b58 25
6bada5e8
BS
26/* Secure Virtual Machine helpers */
27
28#if defined(CONFIG_USER_ONLY)
29
052e80d5 30void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
6bada5e8
BS
31{
32}
33
052e80d5 34void helper_vmmcall(CPUX86State *env)
6bada5e8
BS
35{
36}
37
052e80d5 38void helper_vmload(CPUX86State *env, int aflag)
6bada5e8
BS
39{
40}
41
052e80d5 42void helper_vmsave(CPUX86State *env, int aflag)
6bada5e8
BS
43{
44}
45
052e80d5 46void helper_stgi(CPUX86State *env)
6bada5e8
BS
47{
48}
49
052e80d5 50void helper_clgi(CPUX86State *env)
6bada5e8
BS
51{
52}
53
052e80d5 54void helper_skinit(CPUX86State *env)
6bada5e8
BS
55{
56}
57
052e80d5 58void helper_invlpga(CPUX86State *env, int aflag)
6bada5e8
BS
59{
60}
61
65c9d60a
PB
62void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1,
63 uintptr_t retaddr)
6bada5e8 64{
50b3de6e 65 assert(0);
6bada5e8
BS
66}
67
052e80d5
BS
68void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
69 uint64_t param)
6bada5e8
BS
70{
71}
72
73void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
65c9d60a 74 uint64_t param, uintptr_t retaddr)
6bada5e8
BS
75{
76}
77
052e80d5 78void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
6bada5e8
BS
79 uint32_t next_eip_addend)
80{
81}
82#else
83
a8170e5e 84static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
6bada5e8
BS
85 const SegmentCache *sc)
86{
6aa9e42f 87 CPUState *cs = env_cpu(env);
19d6ca16 88
b216aa6c 89 x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, selector),
6bada5e8 90 sc->selector);
b216aa6c 91 x86_stq_phys(cs, addr + offsetof(struct vmcb_seg, base),
6bada5e8 92 sc->base);
b216aa6c 93 x86_stl_phys(cs, addr + offsetof(struct vmcb_seg, limit),
6bada5e8 94 sc->limit);
b216aa6c 95 x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, attrib),
6bada5e8
BS
96 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
97}
98
a8170e5e 99static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
052e80d5 100 SegmentCache *sc)
6bada5e8 101{
6aa9e42f 102 CPUState *cs = env_cpu(env);
6bada5e8
BS
103 unsigned int flags;
104
b216aa6c 105 sc->selector = x86_lduw_phys(cs,
41701aa4 106 addr + offsetof(struct vmcb_seg, selector));
b216aa6c
PB
107 sc->base = x86_ldq_phys(cs, addr + offsetof(struct vmcb_seg, base));
108 sc->limit = x86_ldl_phys(cs, addr + offsetof(struct vmcb_seg, limit));
109 flags = x86_lduw_phys(cs, addr + offsetof(struct vmcb_seg, attrib));
6bada5e8
BS
110 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
111}
112
a8170e5e 113static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
052e80d5 114 int seg_reg)
6bada5e8
BS
115{
116 SegmentCache sc1, *sc = &sc1;
117
052e80d5 118 svm_load_seg(env, addr, sc);
6bada5e8
BS
119 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
120 sc->base, sc->limit, sc->flags);
121}
122
052e80d5 123void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
6bada5e8 124{
6aa9e42f 125 CPUState *cs = env_cpu(env);
6bada5e8 126 target_ulong addr;
fe441054 127 uint64_t nested_ctl;
6bada5e8
BS
128 uint32_t event_inj;
129 uint32_t int_ctl;
130
65c9d60a 131 cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0, GETPC());
6bada5e8
BS
132
133 if (aflag == 2) {
4b34e3ad 134 addr = env->regs[R_EAX];
6bada5e8 135 } else {
4b34e3ad 136 addr = (uint32_t)env->regs[R_EAX];
6bada5e8
BS
137 }
138
139 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
140
141 env->vm_vmcb = addr;
142
143 /* save the current CPU state in the hsave page */
b216aa6c 144 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
6bada5e8 145 env->gdt.base);
b216aa6c 146 x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
6bada5e8
BS
147 env->gdt.limit);
148
b216aa6c 149 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
6bada5e8 150 env->idt.base);
b216aa6c 151 x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
6bada5e8
BS
152 env->idt.limit);
153
b216aa6c 154 x86_stq_phys(cs,
f606604f 155 env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
b216aa6c 156 x86_stq_phys(cs,
f606604f 157 env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
b216aa6c 158 x86_stq_phys(cs,
f606604f 159 env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
b216aa6c 160 x86_stq_phys(cs,
f606604f 161 env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
b216aa6c 162 x86_stq_phys(cs,
f606604f 163 env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
b216aa6c 164 x86_stq_phys(cs,
f606604f
EI
165 env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
166
b216aa6c 167 x86_stq_phys(cs,
f606604f 168 env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
b216aa6c 169 x86_stq_phys(cs,
f606604f 170 env->vm_hsave + offsetof(struct vmcb, save.rflags),
6bada5e8
BS
171 cpu_compute_eflags(env));
172
052e80d5 173 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.es),
6bada5e8 174 &env->segs[R_ES]);
052e80d5 175 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
6bada5e8 176 &env->segs[R_CS]);
052e80d5 177 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
6bada5e8 178 &env->segs[R_SS]);
052e80d5 179 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
6bada5e8
BS
180 &env->segs[R_DS]);
181
b216aa6c 182 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rip),
a78d0eab 183 env->eip + next_eip_addend);
b216aa6c 184 x86_stq_phys(cs,
f606604f 185 env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
b216aa6c 186 x86_stq_phys(cs,
f606604f 187 env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
6bada5e8
BS
188
189 /* load the interception bitmaps so we do not need to access the
190 vmcb in svm mode */
b216aa6c 191 env->intercept = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
6bada5e8 192 control.intercept));
b216aa6c 193 env->intercept_cr_read = x86_lduw_phys(cs, env->vm_vmcb +
6bada5e8
BS
194 offsetof(struct vmcb,
195 control.intercept_cr_read));
b216aa6c 196 env->intercept_cr_write = x86_lduw_phys(cs, env->vm_vmcb +
6bada5e8
BS
197 offsetof(struct vmcb,
198 control.intercept_cr_write));
b216aa6c 199 env->intercept_dr_read = x86_lduw_phys(cs, env->vm_vmcb +
6bada5e8
BS
200 offsetof(struct vmcb,
201 control.intercept_dr_read));
b216aa6c 202 env->intercept_dr_write = x86_lduw_phys(cs, env->vm_vmcb +
6bada5e8
BS
203 offsetof(struct vmcb,
204 control.intercept_dr_write));
b216aa6c 205 env->intercept_exceptions = x86_ldl_phys(cs, env->vm_vmcb +
6bada5e8
BS
206 offsetof(struct vmcb,
207 control.intercept_exceptions
208 ));
209
fe441054
JK
210 nested_ctl = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
211 control.nested_ctl));
212 if (nested_ctl & SVM_NPT_ENABLED) {
213 env->nested_cr3 = x86_ldq_phys(cs,
214 env->vm_vmcb + offsetof(struct vmcb,
215 control.nested_cr3));
216 env->hflags2 |= HF2_NPT_MASK;
217
218 env->nested_pg_mode = 0;
219 if (env->cr[4] & CR4_PAE_MASK) {
220 env->nested_pg_mode |= SVM_NPT_PAE;
221 }
222 if (env->hflags & HF_LMA_MASK) {
223 env->nested_pg_mode |= SVM_NPT_LMA;
224 }
225 if (env->efer & MSR_EFER_NXE) {
226 env->nested_pg_mode |= SVM_NPT_NXE;
227 }
228 }
229
6bada5e8 230 /* enable intercepts */
f8dc4c64 231 env->hflags |= HF_GUEST_MASK;
6bada5e8 232
b216aa6c 233 env->tsc_offset = x86_ldq_phys(cs, env->vm_vmcb +
6bada5e8
BS
234 offsetof(struct vmcb, control.tsc_offset));
235
b216aa6c 236 env->gdt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
6bada5e8 237 save.gdtr.base));
b216aa6c 238 env->gdt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
6bada5e8
BS
239 save.gdtr.limit));
240
b216aa6c 241 env->idt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
6bada5e8 242 save.idtr.base));
b216aa6c 243 env->idt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
6bada5e8
BS
244 save.idtr.limit));
245
246 /* clear exit_info_2 so we behave like the real hardware */
b216aa6c 247 x86_stq_phys(cs,
f606604f 248 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
6bada5e8 249
b216aa6c 250 cpu_x86_update_cr0(env, x86_ldq_phys(cs,
2c17449b 251 env->vm_vmcb + offsetof(struct vmcb,
6bada5e8 252 save.cr0)));
b216aa6c 253 cpu_x86_update_cr4(env, x86_ldq_phys(cs,
2c17449b 254 env->vm_vmcb + offsetof(struct vmcb,
6bada5e8 255 save.cr4)));
b216aa6c 256 cpu_x86_update_cr3(env, x86_ldq_phys(cs,
2c17449b 257 env->vm_vmcb + offsetof(struct vmcb,
6bada5e8 258 save.cr3)));
b216aa6c 259 env->cr[2] = x86_ldq_phys(cs,
2c17449b 260 env->vm_vmcb + offsetof(struct vmcb, save.cr2));
b216aa6c 261 int_ctl = x86_ldl_phys(cs,
fdfba1a2 262 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6bada5e8
BS
263 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
264 if (int_ctl & V_INTR_MASKING_MASK) {
265 env->v_tpr = int_ctl & V_TPR_MASK;
266 env->hflags2 |= HF2_VINTR_MASK;
267 if (env->eflags & IF_MASK) {
268 env->hflags2 |= HF2_HIF_MASK;
269 }
270 }
271
272 cpu_load_efer(env,
b216aa6c 273 x86_ldq_phys(cs,
2c17449b 274 env->vm_vmcb + offsetof(struct vmcb, save.efer)));
6bada5e8 275 env->eflags = 0;
b216aa6c 276 cpu_load_eflags(env, x86_ldq_phys(cs,
2c17449b 277 env->vm_vmcb + offsetof(struct vmcb,
6bada5e8
BS
278 save.rflags)),
279 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6bada5e8 280
052e80d5
BS
281 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
282 R_ES);
283 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
284 R_CS);
285 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
286 R_SS);
287 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
288 R_DS);
6bada5e8 289
b216aa6c 290 env->eip = x86_ldq_phys(cs,
2c17449b
EI
291 env->vm_vmcb + offsetof(struct vmcb, save.rip));
292
b216aa6c 293 env->regs[R_ESP] = x86_ldq_phys(cs,
2c17449b 294 env->vm_vmcb + offsetof(struct vmcb, save.rsp));
b216aa6c 295 env->regs[R_EAX] = x86_ldq_phys(cs,
2c17449b 296 env->vm_vmcb + offsetof(struct vmcb, save.rax));
b216aa6c 297 env->dr[7] = x86_ldq_phys(cs,
2c17449b 298 env->vm_vmcb + offsetof(struct vmcb, save.dr7));
b216aa6c 299 env->dr[6] = x86_ldq_phys(cs,
2c17449b 300 env->vm_vmcb + offsetof(struct vmcb, save.dr6));
6bada5e8
BS
301
302 /* FIXME: guest state consistency checks */
303
b216aa6c 304 switch (x86_ldub_phys(cs,
2c17449b 305 env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
6bada5e8
BS
306 case TLB_CONTROL_DO_NOTHING:
307 break;
308 case TLB_CONTROL_FLUSH_ALL_ASID:
309 /* FIXME: this is not 100% correct but should work for now */
d10eb08f 310 tlb_flush(cs);
6bada5e8
BS
311 break;
312 }
313
314 env->hflags2 |= HF2_GIF_MASK;
315
316 if (int_ctl & V_IRQ_MASK) {
6aa9e42f 317 CPUState *cs = env_cpu(env);
259186a7
AF
318
319 cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
6bada5e8
BS
320 }
321
322 /* maybe we need to inject an event */
b216aa6c 323 event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
6bada5e8
BS
324 control.event_inj));
325 if (event_inj & SVM_EVTINJ_VALID) {
326 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
327 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
b216aa6c 328 uint32_t event_inj_err = x86_ldl_phys(cs, env->vm_vmcb +
6bada5e8
BS
329 offsetof(struct vmcb,
330 control.event_inj_err));
331
332 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
333 /* FIXME: need to implement valid_err */
334 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
335 case SVM_EVTINJ_TYPE_INTR:
27103424 336 cs->exception_index = vector;
6bada5e8
BS
337 env->error_code = event_inj_err;
338 env->exception_is_int = 0;
339 env->exception_next_eip = -1;
340 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
341 /* XXX: is it always correct? */
342 do_interrupt_x86_hardirq(env, vector, 1);
343 break;
344 case SVM_EVTINJ_TYPE_NMI:
27103424 345 cs->exception_index = EXCP02_NMI;
6bada5e8
BS
346 env->error_code = event_inj_err;
347 env->exception_is_int = 0;
a78d0eab 348 env->exception_next_eip = env->eip;
6bada5e8 349 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
5638d180 350 cpu_loop_exit(cs);
6bada5e8
BS
351 break;
352 case SVM_EVTINJ_TYPE_EXEPT:
27103424 353 cs->exception_index = vector;
6bada5e8
BS
354 env->error_code = event_inj_err;
355 env->exception_is_int = 0;
356 env->exception_next_eip = -1;
357 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
5638d180 358 cpu_loop_exit(cs);
6bada5e8
BS
359 break;
360 case SVM_EVTINJ_TYPE_SOFT:
27103424 361 cs->exception_index = vector;
6bada5e8
BS
362 env->error_code = event_inj_err;
363 env->exception_is_int = 1;
a78d0eab 364 env->exception_next_eip = env->eip;
6bada5e8 365 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
5638d180 366 cpu_loop_exit(cs);
6bada5e8
BS
367 break;
368 }
27103424 369 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", cs->exception_index,
6bada5e8
BS
370 env->error_code);
371 }
372}
373
052e80d5 374void helper_vmmcall(CPUX86State *env)
6bada5e8 375{
65c9d60a 376 cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0, GETPC());
6bada5e8
BS
377 raise_exception(env, EXCP06_ILLOP);
378}
379
052e80d5 380void helper_vmload(CPUX86State *env, int aflag)
6bada5e8 381{
6aa9e42f 382 CPUState *cs = env_cpu(env);
6bada5e8
BS
383 target_ulong addr;
384
65c9d60a 385 cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0, GETPC());
6bada5e8
BS
386
387 if (aflag == 2) {
4b34e3ad 388 addr = env->regs[R_EAX];
6bada5e8 389 } else {
4b34e3ad 390 addr = (uint32_t)env->regs[R_EAX];
6bada5e8
BS
391 }
392
393 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
394 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
b216aa6c 395 addr, x86_ldq_phys(cs, addr + offsetof(struct vmcb,
052e80d5 396 save.fs.base)),
6bada5e8
BS
397 env->segs[R_FS].base);
398
052e80d5
BS
399 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.fs), R_FS);
400 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.gs), R_GS);
401 svm_load_seg(env, addr + offsetof(struct vmcb, save.tr), &env->tr);
402 svm_load_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
6bada5e8
BS
403
404#ifdef TARGET_X86_64
b216aa6c 405 env->kernelgsbase = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
6bada5e8 406 save.kernel_gs_base));
b216aa6c
PB
407 env->lstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.lstar));
408 env->cstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.cstar));
409 env->fmask = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.sfmask));
6bada5e8 410#endif
b216aa6c
PB
411 env->star = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.star));
412 env->sysenter_cs = x86_ldq_phys(cs,
2c17449b 413 addr + offsetof(struct vmcb, save.sysenter_cs));
b216aa6c 414 env->sysenter_esp = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
6bada5e8 415 save.sysenter_esp));
b216aa6c 416 env->sysenter_eip = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
6bada5e8
BS
417 save.sysenter_eip));
418}
419
052e80d5 420void helper_vmsave(CPUX86State *env, int aflag)
6bada5e8 421{
6aa9e42f 422 CPUState *cs = env_cpu(env);
6bada5e8
BS
423 target_ulong addr;
424
65c9d60a 425 cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0, GETPC());
6bada5e8
BS
426
427 if (aflag == 2) {
4b34e3ad 428 addr = env->regs[R_EAX];
6bada5e8 429 } else {
4b34e3ad 430 addr = (uint32_t)env->regs[R_EAX];
6bada5e8
BS
431 }
432
433 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
434 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
b216aa6c 435 addr, x86_ldq_phys(cs,
2c17449b 436 addr + offsetof(struct vmcb, save.fs.base)),
6bada5e8
BS
437 env->segs[R_FS].base);
438
052e80d5 439 svm_save_seg(env, addr + offsetof(struct vmcb, save.fs),
6bada5e8 440 &env->segs[R_FS]);
052e80d5 441 svm_save_seg(env, addr + offsetof(struct vmcb, save.gs),
6bada5e8 442 &env->segs[R_GS]);
052e80d5 443 svm_save_seg(env, addr + offsetof(struct vmcb, save.tr),
6bada5e8 444 &env->tr);
052e80d5 445 svm_save_seg(env, addr + offsetof(struct vmcb, save.ldtr),
6bada5e8
BS
446 &env->ldt);
447
448#ifdef TARGET_X86_64
b216aa6c 449 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.kernel_gs_base),
6bada5e8 450 env->kernelgsbase);
b216aa6c
PB
451 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.lstar), env->lstar);
452 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.cstar), env->cstar);
453 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sfmask), env->fmask);
6bada5e8 454#endif
b216aa6c
PB
455 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.star), env->star);
456 x86_stq_phys(cs,
f606604f 457 addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
b216aa6c 458 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_esp),
6bada5e8 459 env->sysenter_esp);
b216aa6c 460 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_eip),
6bada5e8
BS
461 env->sysenter_eip);
462}
463
052e80d5 464void helper_stgi(CPUX86State *env)
6bada5e8 465{
65c9d60a 466 cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0, GETPC());
6bada5e8
BS
467 env->hflags2 |= HF2_GIF_MASK;
468}
469
052e80d5 470void helper_clgi(CPUX86State *env)
6bada5e8 471{
65c9d60a 472 cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0, GETPC());
6bada5e8
BS
473 env->hflags2 &= ~HF2_GIF_MASK;
474}
475
052e80d5 476void helper_skinit(CPUX86State *env)
6bada5e8 477{
65c9d60a 478 cpu_svm_check_intercept_param(env, SVM_EXIT_SKINIT, 0, GETPC());
6bada5e8
BS
479 /* XXX: not implemented */
480 raise_exception(env, EXCP06_ILLOP);
481}
482
052e80d5 483void helper_invlpga(CPUX86State *env, int aflag)
6bada5e8 484{
6aa9e42f 485 X86CPU *cpu = env_archcpu(env);
6bada5e8
BS
486 target_ulong addr;
487
65c9d60a 488 cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPGA, 0, GETPC());
6bada5e8
BS
489
490 if (aflag == 2) {
4b34e3ad 491 addr = env->regs[R_EAX];
6bada5e8 492 } else {
4b34e3ad 493 addr = (uint32_t)env->regs[R_EAX];
6bada5e8
BS
494 }
495
496 /* XXX: could use the ASID to see if it is needed to do the
497 flush */
31b030d4 498 tlb_flush_page(CPU(cpu), addr);
6bada5e8
BS
499}
500
65c9d60a
PB
501void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
502 uint64_t param, uintptr_t retaddr)
6bada5e8 503{
6aa9e42f 504 CPUState *cs = env_cpu(env);
2c17449b 505
f8dc4c64 506 if (likely(!(env->hflags & HF_GUEST_MASK))) {
6bada5e8
BS
507 return;
508 }
509 switch (type) {
510 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
511 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
65c9d60a 512 cpu_vmexit(env, type, param, retaddr);
6bada5e8
BS
513 }
514 break;
515 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
516 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
65c9d60a 517 cpu_vmexit(env, type, param, retaddr);
6bada5e8
BS
518 }
519 break;
520 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
521 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
65c9d60a 522 cpu_vmexit(env, type, param, retaddr);
6bada5e8
BS
523 }
524 break;
525 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
526 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
65c9d60a 527 cpu_vmexit(env, type, param, retaddr);
6bada5e8
BS
528 }
529 break;
530 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
531 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
65c9d60a 532 cpu_vmexit(env, type, param, retaddr);
6bada5e8
BS
533 }
534 break;
535 case SVM_EXIT_MSR:
536 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
537 /* FIXME: this should be read in at vmrun (faster this way?) */
b216aa6c 538 uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
6bada5e8
BS
539 offsetof(struct vmcb,
540 control.msrpm_base_pa));
541 uint32_t t0, t1;
542
a4165610 543 switch ((uint32_t)env->regs[R_ECX]) {
6bada5e8 544 case 0 ... 0x1fff:
a4165610
LG
545 t0 = (env->regs[R_ECX] * 2) % 8;
546 t1 = (env->regs[R_ECX] * 2) / 8;
6bada5e8
BS
547 break;
548 case 0xc0000000 ... 0xc0001fff:
a4165610 549 t0 = (8192 + env->regs[R_ECX] - 0xc0000000) * 2;
6bada5e8
BS
550 t1 = (t0 / 8);
551 t0 %= 8;
552 break;
553 case 0xc0010000 ... 0xc0011fff:
a4165610 554 t0 = (16384 + env->regs[R_ECX] - 0xc0010000) * 2;
6bada5e8
BS
555 t1 = (t0 / 8);
556 t0 %= 8;
557 break;
558 default:
65c9d60a 559 cpu_vmexit(env, type, param, retaddr);
6bada5e8
BS
560 t0 = 0;
561 t1 = 0;
562 break;
563 }
b216aa6c 564 if (x86_ldub_phys(cs, addr + t1) & ((1 << param) << t0)) {
65c9d60a 565 cpu_vmexit(env, type, param, retaddr);
6bada5e8
BS
566 }
567 }
568 break;
569 default:
570 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
65c9d60a 571 cpu_vmexit(env, type, param, retaddr);
6bada5e8
BS
572 }
573 break;
574 }
575}
576
65c9d60a
PB
577void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
578 uint64_t param)
6bada5e8 579{
65c9d60a 580 cpu_svm_check_intercept_param(env, type, param, GETPC());
6bada5e8
BS
581}
582
052e80d5 583void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
6bada5e8
BS
584 uint32_t next_eip_addend)
585{
6aa9e42f 586 CPUState *cs = env_cpu(env);
19d6ca16 587
6bada5e8
BS
588 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
589 /* FIXME: this should be read in at vmrun (faster this way?) */
b216aa6c 590 uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
6bada5e8
BS
591 offsetof(struct vmcb, control.iopm_base_pa));
592 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
593
b216aa6c 594 if (x86_lduw_phys(cs, addr + port / 8) & (mask << (port & 7))) {
a78d0eab 595 /* next env->eip */
b216aa6c 596 x86_stq_phys(cs,
f606604f 597 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
6bada5e8 598 env->eip + next_eip_addend);
65c9d60a 599 cpu_vmexit(env, SVM_EXIT_IOIO, param | (port << 16), GETPC());
6bada5e8
BS
600 }
601 }
602}
603
65c9d60a
PB
604void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1,
605 uintptr_t retaddr)
6bada5e8 606{
6aa9e42f 607 CPUState *cs = env_cpu(env);
6bada5e8 608
afd46fca 609 cpu_restore_state(cs, retaddr, true);
65c9d60a 610
6bada5e8
BS
611 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
612 PRIx64 ", " TARGET_FMT_lx ")!\n",
613 exit_code, exit_info_1,
b216aa6c 614 x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
6bada5e8 615 control.exit_info_2)),
a78d0eab 616 env->eip);
6bada5e8 617
10cde894
PB
618 cs->exception_index = EXCP_VMEXIT + exit_code;
619 env->error_code = exit_info_1;
620
621 /* remove any pending exception */
622 env->old_exception = -1;
623 cpu_loop_exit(cs);
624}
625
626void do_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
627{
6aa9e42f 628 CPUState *cs = env_cpu(env);
10cde894
PB
629 uint32_t int_ctl;
630
6bada5e8 631 if (env->hflags & HF_INHIBIT_IRQ_MASK) {
b216aa6c 632 x86_stl_phys(cs,
ab1da857 633 env->vm_vmcb + offsetof(struct vmcb, control.int_state),
6bada5e8
BS
634 SVM_INTERRUPT_SHADOW_MASK);
635 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
636 } else {
b216aa6c 637 x86_stl_phys(cs,
ab1da857 638 env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
6bada5e8 639 }
fe441054 640 env->hflags2 &= ~HF2_NPT_MASK;
6bada5e8
BS
641
642 /* Save the VM state in the vmcb */
052e80d5 643 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
6bada5e8 644 &env->segs[R_ES]);
052e80d5 645 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
6bada5e8 646 &env->segs[R_CS]);
052e80d5 647 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
6bada5e8 648 &env->segs[R_SS]);
052e80d5 649 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
6bada5e8
BS
650 &env->segs[R_DS]);
651
b216aa6c 652 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
6bada5e8 653 env->gdt.base);
b216aa6c 654 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
6bada5e8
BS
655 env->gdt.limit);
656
b216aa6c 657 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
6bada5e8 658 env->idt.base);
b216aa6c 659 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
6bada5e8
BS
660 env->idt.limit);
661
b216aa6c 662 x86_stq_phys(cs,
f606604f 663 env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
b216aa6c 664 x86_stq_phys(cs,
f606604f 665 env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
b216aa6c 666 x86_stq_phys(cs,
f606604f 667 env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
b216aa6c 668 x86_stq_phys(cs,
f606604f 669 env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
b216aa6c 670 x86_stq_phys(cs,
f606604f 671 env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
6bada5e8 672
b216aa6c 673 int_ctl = x86_ldl_phys(cs,
fdfba1a2 674 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6bada5e8
BS
675 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
676 int_ctl |= env->v_tpr & V_TPR_MASK;
259186a7 677 if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
6bada5e8
BS
678 int_ctl |= V_IRQ_MASK;
679 }
b216aa6c 680 x86_stl_phys(cs,
ab1da857 681 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
6bada5e8 682
b216aa6c 683 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rflags),
6bada5e8 684 cpu_compute_eflags(env));
b216aa6c 685 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rip),
052e80d5 686 env->eip);
b216aa6c 687 x86_stq_phys(cs,
f606604f 688 env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
b216aa6c 689 x86_stq_phys(cs,
f606604f 690 env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
b216aa6c 691 x86_stq_phys(cs,
f606604f 692 env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
b216aa6c 693 x86_stq_phys(cs,
f606604f 694 env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
b216aa6c 695 x86_stb_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cpl),
6bada5e8
BS
696 env->hflags & HF_CPL_MASK);
697
698 /* Reload the host state from vm_hsave */
699 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
f8dc4c64 700 env->hflags &= ~HF_GUEST_MASK;
6bada5e8
BS
701 env->intercept = 0;
702 env->intercept_exceptions = 0;
259186a7 703 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
6bada5e8
BS
704 env->tsc_offset = 0;
705
b216aa6c 706 env->gdt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
6bada5e8 707 save.gdtr.base));
b216aa6c 708 env->gdt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
6bada5e8
BS
709 save.gdtr.limit));
710
b216aa6c 711 env->idt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
6bada5e8 712 save.idtr.base));
b216aa6c 713 env->idt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
6bada5e8
BS
714 save.idtr.limit));
715
b216aa6c 716 cpu_x86_update_cr0(env, x86_ldq_phys(cs,
2c17449b 717 env->vm_hsave + offsetof(struct vmcb,
6bada5e8
BS
718 save.cr0)) |
719 CR0_PE_MASK);
b216aa6c 720 cpu_x86_update_cr4(env, x86_ldq_phys(cs,
2c17449b 721 env->vm_hsave + offsetof(struct vmcb,
6bada5e8 722 save.cr4)));
b216aa6c 723 cpu_x86_update_cr3(env, x86_ldq_phys(cs,
2c17449b 724 env->vm_hsave + offsetof(struct vmcb,
6bada5e8
BS
725 save.cr3)));
726 /* we need to set the efer after the crs so the hidden flags get
727 set properly */
b216aa6c 728 cpu_load_efer(env, x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
6bada5e8
BS
729 save.efer)));
730 env->eflags = 0;
b216aa6c 731 cpu_load_eflags(env, x86_ldq_phys(cs,
2c17449b 732 env->vm_hsave + offsetof(struct vmcb,
6bada5e8 733 save.rflags)),
30452029
KC
734 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK |
735 VM_MASK));
6bada5e8 736
052e80d5
BS
737 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.es),
738 R_ES);
739 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
740 R_CS);
741 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
742 R_SS);
743 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
744 R_DS);
6bada5e8 745
b216aa6c 746 env->eip = x86_ldq_phys(cs,
2c17449b 747 env->vm_hsave + offsetof(struct vmcb, save.rip));
b216aa6c 748 env->regs[R_ESP] = x86_ldq_phys(cs, env->vm_hsave +
90a2541b 749 offsetof(struct vmcb, save.rsp));
b216aa6c 750 env->regs[R_EAX] = x86_ldq_phys(cs, env->vm_hsave +
90a2541b 751 offsetof(struct vmcb, save.rax));
6bada5e8 752
b216aa6c 753 env->dr[6] = x86_ldq_phys(cs,
2c17449b 754 env->vm_hsave + offsetof(struct vmcb, save.dr6));
b216aa6c 755 env->dr[7] = x86_ldq_phys(cs,
2c17449b 756 env->vm_hsave + offsetof(struct vmcb, save.dr7));
6bada5e8
BS
757
758 /* other setups */
b216aa6c 759 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
6bada5e8 760 exit_code);
b216aa6c 761 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1),
6bada5e8
BS
762 exit_info_1);
763
b216aa6c 764 x86_stl_phys(cs,
ab1da857 765 env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
b216aa6c 766 x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
6bada5e8 767 control.event_inj)));
b216aa6c 768 x86_stl_phys(cs,
ab1da857 769 env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
b216aa6c 770 x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
6bada5e8 771 control.event_inj_err)));
b216aa6c 772 x86_stl_phys(cs,
ab1da857 773 env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
6bada5e8
BS
774
775 env->hflags2 &= ~HF2_GIF_MASK;
776 /* FIXME: Resets the current ASID register to zero (host ASID). */
777
778 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
779
780 /* Clears the TSC_OFFSET inside the processor. */
781
782 /* If the host is in PAE mode, the processor reloads the host's PDPEs
783 from the page table indicated the host's CR3. If the PDPEs contain
784 illegal state, the processor causes a shutdown. */
785
6bada5e8
BS
786 /* Disables all breakpoints in the host DR7 register. */
787
788 /* Checks the reloaded host state for consistency. */
789
790 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
791 host's code segment or non-canonical (in the case of long mode), a
792 #GP fault is delivered inside the host. */
6bada5e8
BS
793}
794
6bada5e8 795#endif