]> git.proxmox.com Git - mirror_qemu.git/blame - target-i386/svm_helper.c
cpu: move exec-all.h inclusion out of cpu.h
[mirror_qemu.git] / target-i386 / svm_helper.c
CommitLineData
6bada5e8
BS
1/*
2 * x86 SVM helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
b6a0aa05 20#include "qemu/osdep.h"
6bada5e8 21#include "cpu.h"
022c62cb 22#include "exec/cpu-all.h"
2ef6175a 23#include "exec/helper-proto.h"
63c91552 24#include "exec/exec-all.h"
f08b6170 25#include "exec/cpu_ldst.h"
92fc4b58 26
6bada5e8
BS
27/* Secure Virtual Machine helpers */
28
29#if defined(CONFIG_USER_ONLY)
30
052e80d5 31void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
6bada5e8
BS
32{
33}
34
052e80d5 35void helper_vmmcall(CPUX86State *env)
6bada5e8
BS
36{
37}
38
052e80d5 39void helper_vmload(CPUX86State *env, int aflag)
6bada5e8
BS
40{
41}
42
052e80d5 43void helper_vmsave(CPUX86State *env, int aflag)
6bada5e8
BS
44{
45}
46
052e80d5 47void helper_stgi(CPUX86State *env)
6bada5e8
BS
48{
49}
50
052e80d5 51void helper_clgi(CPUX86State *env)
6bada5e8
BS
52{
53}
54
052e80d5 55void helper_skinit(CPUX86State *env)
6bada5e8
BS
56{
57}
58
052e80d5 59void helper_invlpga(CPUX86State *env, int aflag)
6bada5e8
BS
60{
61}
62
052e80d5 63void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
6bada5e8
BS
64{
65}
66
67void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1)
68{
69}
70
052e80d5
BS
71void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
72 uint64_t param)
6bada5e8
BS
73{
74}
75
76void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
77 uint64_t param)
78{
79}
80
052e80d5 81void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
6bada5e8
BS
82 uint32_t next_eip_addend)
83{
84}
85#else
86
a8170e5e 87static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
6bada5e8
BS
88 const SegmentCache *sc)
89{
19d6ca16
AF
90 CPUState *cs = CPU(x86_env_get_cpu(env));
91
b216aa6c 92 x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, selector),
6bada5e8 93 sc->selector);
b216aa6c 94 x86_stq_phys(cs, addr + offsetof(struct vmcb_seg, base),
6bada5e8 95 sc->base);
b216aa6c 96 x86_stl_phys(cs, addr + offsetof(struct vmcb_seg, limit),
6bada5e8 97 sc->limit);
b216aa6c 98 x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, attrib),
6bada5e8
BS
99 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
100}
101
a8170e5e 102static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
052e80d5 103 SegmentCache *sc)
6bada5e8 104{
19d6ca16 105 CPUState *cs = CPU(x86_env_get_cpu(env));
6bada5e8
BS
106 unsigned int flags;
107
b216aa6c 108 sc->selector = x86_lduw_phys(cs,
41701aa4 109 addr + offsetof(struct vmcb_seg, selector));
b216aa6c
PB
110 sc->base = x86_ldq_phys(cs, addr + offsetof(struct vmcb_seg, base));
111 sc->limit = x86_ldl_phys(cs, addr + offsetof(struct vmcb_seg, limit));
112 flags = x86_lduw_phys(cs, addr + offsetof(struct vmcb_seg, attrib));
6bada5e8
BS
113 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
114}
115
a8170e5e 116static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
052e80d5 117 int seg_reg)
6bada5e8
BS
118{
119 SegmentCache sc1, *sc = &sc1;
120
052e80d5 121 svm_load_seg(env, addr, sc);
6bada5e8
BS
122 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
123 sc->base, sc->limit, sc->flags);
124}
125
052e80d5 126void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
6bada5e8 127{
19d6ca16 128 CPUState *cs = CPU(x86_env_get_cpu(env));
6bada5e8
BS
129 target_ulong addr;
130 uint32_t event_inj;
131 uint32_t int_ctl;
132
052e80d5 133 cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0);
6bada5e8
BS
134
135 if (aflag == 2) {
4b34e3ad 136 addr = env->regs[R_EAX];
6bada5e8 137 } else {
4b34e3ad 138 addr = (uint32_t)env->regs[R_EAX];
6bada5e8
BS
139 }
140
141 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
142
143 env->vm_vmcb = addr;
144
145 /* save the current CPU state in the hsave page */
b216aa6c 146 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
6bada5e8 147 env->gdt.base);
b216aa6c 148 x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
6bada5e8
BS
149 env->gdt.limit);
150
b216aa6c 151 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
6bada5e8 152 env->idt.base);
b216aa6c 153 x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
6bada5e8
BS
154 env->idt.limit);
155
b216aa6c 156 x86_stq_phys(cs,
f606604f 157 env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
b216aa6c 158 x86_stq_phys(cs,
f606604f 159 env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
b216aa6c 160 x86_stq_phys(cs,
f606604f 161 env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
b216aa6c 162 x86_stq_phys(cs,
f606604f 163 env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
b216aa6c 164 x86_stq_phys(cs,
f606604f 165 env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
b216aa6c 166 x86_stq_phys(cs,
f606604f
EI
167 env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
168
b216aa6c 169 x86_stq_phys(cs,
f606604f 170 env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
b216aa6c 171 x86_stq_phys(cs,
f606604f 172 env->vm_hsave + offsetof(struct vmcb, save.rflags),
6bada5e8
BS
173 cpu_compute_eflags(env));
174
052e80d5 175 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.es),
6bada5e8 176 &env->segs[R_ES]);
052e80d5 177 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
6bada5e8 178 &env->segs[R_CS]);
052e80d5 179 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
6bada5e8 180 &env->segs[R_SS]);
052e80d5 181 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
6bada5e8
BS
182 &env->segs[R_DS]);
183
b216aa6c 184 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rip),
a78d0eab 185 env->eip + next_eip_addend);
b216aa6c 186 x86_stq_phys(cs,
f606604f 187 env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
b216aa6c 188 x86_stq_phys(cs,
f606604f 189 env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
6bada5e8
BS
190
191 /* load the interception bitmaps so we do not need to access the
192 vmcb in svm mode */
b216aa6c 193 env->intercept = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
6bada5e8 194 control.intercept));
b216aa6c 195 env->intercept_cr_read = x86_lduw_phys(cs, env->vm_vmcb +
6bada5e8
BS
196 offsetof(struct vmcb,
197 control.intercept_cr_read));
b216aa6c 198 env->intercept_cr_write = x86_lduw_phys(cs, env->vm_vmcb +
6bada5e8
BS
199 offsetof(struct vmcb,
200 control.intercept_cr_write));
b216aa6c 201 env->intercept_dr_read = x86_lduw_phys(cs, env->vm_vmcb +
6bada5e8
BS
202 offsetof(struct vmcb,
203 control.intercept_dr_read));
b216aa6c 204 env->intercept_dr_write = x86_lduw_phys(cs, env->vm_vmcb +
6bada5e8
BS
205 offsetof(struct vmcb,
206 control.intercept_dr_write));
b216aa6c 207 env->intercept_exceptions = x86_ldl_phys(cs, env->vm_vmcb +
6bada5e8
BS
208 offsetof(struct vmcb,
209 control.intercept_exceptions
210 ));
211
212 /* enable intercepts */
213 env->hflags |= HF_SVMI_MASK;
214
b216aa6c 215 env->tsc_offset = x86_ldq_phys(cs, env->vm_vmcb +
6bada5e8
BS
216 offsetof(struct vmcb, control.tsc_offset));
217
b216aa6c 218 env->gdt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
6bada5e8 219 save.gdtr.base));
b216aa6c 220 env->gdt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
6bada5e8
BS
221 save.gdtr.limit));
222
b216aa6c 223 env->idt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
6bada5e8 224 save.idtr.base));
b216aa6c 225 env->idt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
6bada5e8
BS
226 save.idtr.limit));
227
228 /* clear exit_info_2 so we behave like the real hardware */
b216aa6c 229 x86_stq_phys(cs,
f606604f 230 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
6bada5e8 231
b216aa6c 232 cpu_x86_update_cr0(env, x86_ldq_phys(cs,
2c17449b 233 env->vm_vmcb + offsetof(struct vmcb,
6bada5e8 234 save.cr0)));
b216aa6c 235 cpu_x86_update_cr4(env, x86_ldq_phys(cs,
2c17449b 236 env->vm_vmcb + offsetof(struct vmcb,
6bada5e8 237 save.cr4)));
b216aa6c 238 cpu_x86_update_cr3(env, x86_ldq_phys(cs,
2c17449b 239 env->vm_vmcb + offsetof(struct vmcb,
6bada5e8 240 save.cr3)));
b216aa6c 241 env->cr[2] = x86_ldq_phys(cs,
2c17449b 242 env->vm_vmcb + offsetof(struct vmcb, save.cr2));
b216aa6c 243 int_ctl = x86_ldl_phys(cs,
fdfba1a2 244 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6bada5e8
BS
245 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
246 if (int_ctl & V_INTR_MASKING_MASK) {
247 env->v_tpr = int_ctl & V_TPR_MASK;
248 env->hflags2 |= HF2_VINTR_MASK;
249 if (env->eflags & IF_MASK) {
250 env->hflags2 |= HF2_HIF_MASK;
251 }
252 }
253
254 cpu_load_efer(env,
b216aa6c 255 x86_ldq_phys(cs,
2c17449b 256 env->vm_vmcb + offsetof(struct vmcb, save.efer)));
6bada5e8 257 env->eflags = 0;
b216aa6c 258 cpu_load_eflags(env, x86_ldq_phys(cs,
2c17449b 259 env->vm_vmcb + offsetof(struct vmcb,
6bada5e8
BS
260 save.rflags)),
261 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6bada5e8 262
052e80d5
BS
263 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
264 R_ES);
265 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
266 R_CS);
267 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
268 R_SS);
269 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
270 R_DS);
6bada5e8 271
b216aa6c 272 env->eip = x86_ldq_phys(cs,
2c17449b
EI
273 env->vm_vmcb + offsetof(struct vmcb, save.rip));
274
b216aa6c 275 env->regs[R_ESP] = x86_ldq_phys(cs,
2c17449b 276 env->vm_vmcb + offsetof(struct vmcb, save.rsp));
b216aa6c 277 env->regs[R_EAX] = x86_ldq_phys(cs,
2c17449b 278 env->vm_vmcb + offsetof(struct vmcb, save.rax));
b216aa6c 279 env->dr[7] = x86_ldq_phys(cs,
2c17449b 280 env->vm_vmcb + offsetof(struct vmcb, save.dr7));
b216aa6c 281 env->dr[6] = x86_ldq_phys(cs,
2c17449b 282 env->vm_vmcb + offsetof(struct vmcb, save.dr6));
6bada5e8
BS
283
284 /* FIXME: guest state consistency checks */
285
b216aa6c 286 switch (x86_ldub_phys(cs,
2c17449b 287 env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
6bada5e8
BS
288 case TLB_CONTROL_DO_NOTHING:
289 break;
290 case TLB_CONTROL_FLUSH_ALL_ASID:
291 /* FIXME: this is not 100% correct but should work for now */
00c8cb0a 292 tlb_flush(cs, 1);
6bada5e8
BS
293 break;
294 }
295
296 env->hflags2 |= HF2_GIF_MASK;
297
298 if (int_ctl & V_IRQ_MASK) {
259186a7
AF
299 CPUState *cs = CPU(x86_env_get_cpu(env));
300
301 cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
6bada5e8
BS
302 }
303
304 /* maybe we need to inject an event */
b216aa6c 305 event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
6bada5e8
BS
306 control.event_inj));
307 if (event_inj & SVM_EVTINJ_VALID) {
308 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
309 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
b216aa6c 310 uint32_t event_inj_err = x86_ldl_phys(cs, env->vm_vmcb +
6bada5e8
BS
311 offsetof(struct vmcb,
312 control.event_inj_err));
313
314 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
315 /* FIXME: need to implement valid_err */
316 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
317 case SVM_EVTINJ_TYPE_INTR:
27103424 318 cs->exception_index = vector;
6bada5e8
BS
319 env->error_code = event_inj_err;
320 env->exception_is_int = 0;
321 env->exception_next_eip = -1;
322 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
323 /* XXX: is it always correct? */
324 do_interrupt_x86_hardirq(env, vector, 1);
325 break;
326 case SVM_EVTINJ_TYPE_NMI:
27103424 327 cs->exception_index = EXCP02_NMI;
6bada5e8
BS
328 env->error_code = event_inj_err;
329 env->exception_is_int = 0;
a78d0eab 330 env->exception_next_eip = env->eip;
6bada5e8 331 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
5638d180 332 cpu_loop_exit(cs);
6bada5e8
BS
333 break;
334 case SVM_EVTINJ_TYPE_EXEPT:
27103424 335 cs->exception_index = vector;
6bada5e8
BS
336 env->error_code = event_inj_err;
337 env->exception_is_int = 0;
338 env->exception_next_eip = -1;
339 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
5638d180 340 cpu_loop_exit(cs);
6bada5e8
BS
341 break;
342 case SVM_EVTINJ_TYPE_SOFT:
27103424 343 cs->exception_index = vector;
6bada5e8
BS
344 env->error_code = event_inj_err;
345 env->exception_is_int = 1;
a78d0eab 346 env->exception_next_eip = env->eip;
6bada5e8 347 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
5638d180 348 cpu_loop_exit(cs);
6bada5e8
BS
349 break;
350 }
27103424 351 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", cs->exception_index,
6bada5e8
BS
352 env->error_code);
353 }
354}
355
052e80d5 356void helper_vmmcall(CPUX86State *env)
6bada5e8 357{
052e80d5 358 cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0);
6bada5e8
BS
359 raise_exception(env, EXCP06_ILLOP);
360}
361
052e80d5 362void helper_vmload(CPUX86State *env, int aflag)
6bada5e8 363{
19d6ca16 364 CPUState *cs = CPU(x86_env_get_cpu(env));
6bada5e8
BS
365 target_ulong addr;
366
052e80d5 367 cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0);
6bada5e8
BS
368
369 if (aflag == 2) {
4b34e3ad 370 addr = env->regs[R_EAX];
6bada5e8 371 } else {
4b34e3ad 372 addr = (uint32_t)env->regs[R_EAX];
6bada5e8
BS
373 }
374
375 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
376 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
b216aa6c 377 addr, x86_ldq_phys(cs, addr + offsetof(struct vmcb,
052e80d5 378 save.fs.base)),
6bada5e8
BS
379 env->segs[R_FS].base);
380
052e80d5
BS
381 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.fs), R_FS);
382 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.gs), R_GS);
383 svm_load_seg(env, addr + offsetof(struct vmcb, save.tr), &env->tr);
384 svm_load_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
6bada5e8
BS
385
386#ifdef TARGET_X86_64
b216aa6c 387 env->kernelgsbase = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
6bada5e8 388 save.kernel_gs_base));
b216aa6c
PB
389 env->lstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.lstar));
390 env->cstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.cstar));
391 env->fmask = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.sfmask));
6bada5e8 392#endif
b216aa6c
PB
393 env->star = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.star));
394 env->sysenter_cs = x86_ldq_phys(cs,
2c17449b 395 addr + offsetof(struct vmcb, save.sysenter_cs));
b216aa6c 396 env->sysenter_esp = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
6bada5e8 397 save.sysenter_esp));
b216aa6c 398 env->sysenter_eip = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
6bada5e8
BS
399 save.sysenter_eip));
400}
401
052e80d5 402void helper_vmsave(CPUX86State *env, int aflag)
6bada5e8 403{
19d6ca16 404 CPUState *cs = CPU(x86_env_get_cpu(env));
6bada5e8
BS
405 target_ulong addr;
406
052e80d5 407 cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0);
6bada5e8
BS
408
409 if (aflag == 2) {
4b34e3ad 410 addr = env->regs[R_EAX];
6bada5e8 411 } else {
4b34e3ad 412 addr = (uint32_t)env->regs[R_EAX];
6bada5e8
BS
413 }
414
415 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
416 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
b216aa6c 417 addr, x86_ldq_phys(cs,
2c17449b 418 addr + offsetof(struct vmcb, save.fs.base)),
6bada5e8
BS
419 env->segs[R_FS].base);
420
052e80d5 421 svm_save_seg(env, addr + offsetof(struct vmcb, save.fs),
6bada5e8 422 &env->segs[R_FS]);
052e80d5 423 svm_save_seg(env, addr + offsetof(struct vmcb, save.gs),
6bada5e8 424 &env->segs[R_GS]);
052e80d5 425 svm_save_seg(env, addr + offsetof(struct vmcb, save.tr),
6bada5e8 426 &env->tr);
052e80d5 427 svm_save_seg(env, addr + offsetof(struct vmcb, save.ldtr),
6bada5e8
BS
428 &env->ldt);
429
430#ifdef TARGET_X86_64
b216aa6c 431 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.kernel_gs_base),
6bada5e8 432 env->kernelgsbase);
b216aa6c
PB
433 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.lstar), env->lstar);
434 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.cstar), env->cstar);
435 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sfmask), env->fmask);
6bada5e8 436#endif
b216aa6c
PB
437 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.star), env->star);
438 x86_stq_phys(cs,
f606604f 439 addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
b216aa6c 440 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_esp),
6bada5e8 441 env->sysenter_esp);
b216aa6c 442 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_eip),
6bada5e8
BS
443 env->sysenter_eip);
444}
445
052e80d5 446void helper_stgi(CPUX86State *env)
6bada5e8 447{
052e80d5 448 cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0);
6bada5e8
BS
449 env->hflags2 |= HF2_GIF_MASK;
450}
451
052e80d5 452void helper_clgi(CPUX86State *env)
6bada5e8 453{
052e80d5 454 cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0);
6bada5e8
BS
455 env->hflags2 &= ~HF2_GIF_MASK;
456}
457
052e80d5 458void helper_skinit(CPUX86State *env)
6bada5e8 459{
052e80d5 460 cpu_svm_check_intercept_param(env, SVM_EXIT_SKINIT, 0);
6bada5e8
BS
461 /* XXX: not implemented */
462 raise_exception(env, EXCP06_ILLOP);
463}
464
052e80d5 465void helper_invlpga(CPUX86State *env, int aflag)
6bada5e8 466{
31b030d4 467 X86CPU *cpu = x86_env_get_cpu(env);
6bada5e8
BS
468 target_ulong addr;
469
052e80d5 470 cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPGA, 0);
6bada5e8
BS
471
472 if (aflag == 2) {
4b34e3ad 473 addr = env->regs[R_EAX];
6bada5e8 474 } else {
4b34e3ad 475 addr = (uint32_t)env->regs[R_EAX];
6bada5e8
BS
476 }
477
478 /* XXX: could use the ASID to see if it is needed to do the
479 flush */
31b030d4 480 tlb_flush_page(CPU(cpu), addr);
6bada5e8
BS
481}
482
052e80d5
BS
483void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
484 uint64_t param)
6bada5e8 485{
19d6ca16 486 CPUState *cs = CPU(x86_env_get_cpu(env));
2c17449b 487
6bada5e8
BS
488 if (likely(!(env->hflags & HF_SVMI_MASK))) {
489 return;
490 }
491 switch (type) {
492 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
493 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
052e80d5 494 helper_vmexit(env, type, param);
6bada5e8
BS
495 }
496 break;
497 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
498 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
052e80d5 499 helper_vmexit(env, type, param);
6bada5e8
BS
500 }
501 break;
502 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
503 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
052e80d5 504 helper_vmexit(env, type, param);
6bada5e8
BS
505 }
506 break;
507 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
508 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
052e80d5 509 helper_vmexit(env, type, param);
6bada5e8
BS
510 }
511 break;
512 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
513 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
052e80d5 514 helper_vmexit(env, type, param);
6bada5e8
BS
515 }
516 break;
517 case SVM_EXIT_MSR:
518 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
519 /* FIXME: this should be read in at vmrun (faster this way?) */
b216aa6c 520 uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
6bada5e8
BS
521 offsetof(struct vmcb,
522 control.msrpm_base_pa));
523 uint32_t t0, t1;
524
a4165610 525 switch ((uint32_t)env->regs[R_ECX]) {
6bada5e8 526 case 0 ... 0x1fff:
a4165610
LG
527 t0 = (env->regs[R_ECX] * 2) % 8;
528 t1 = (env->regs[R_ECX] * 2) / 8;
6bada5e8
BS
529 break;
530 case 0xc0000000 ... 0xc0001fff:
a4165610 531 t0 = (8192 + env->regs[R_ECX] - 0xc0000000) * 2;
6bada5e8
BS
532 t1 = (t0 / 8);
533 t0 %= 8;
534 break;
535 case 0xc0010000 ... 0xc0011fff:
a4165610 536 t0 = (16384 + env->regs[R_ECX] - 0xc0010000) * 2;
6bada5e8
BS
537 t1 = (t0 / 8);
538 t0 %= 8;
539 break;
540 default:
052e80d5 541 helper_vmexit(env, type, param);
6bada5e8
BS
542 t0 = 0;
543 t1 = 0;
544 break;
545 }
b216aa6c 546 if (x86_ldub_phys(cs, addr + t1) & ((1 << param) << t0)) {
052e80d5 547 helper_vmexit(env, type, param);
6bada5e8
BS
548 }
549 }
550 break;
551 default:
552 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
052e80d5 553 helper_vmexit(env, type, param);
6bada5e8
BS
554 }
555 break;
556 }
557}
558
052e80d5 559void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
6bada5e8
BS
560 uint64_t param)
561{
052e80d5 562 helper_svm_check_intercept_param(env, type, param);
6bada5e8
BS
563}
564
052e80d5 565void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
6bada5e8
BS
566 uint32_t next_eip_addend)
567{
19d6ca16
AF
568 CPUState *cs = CPU(x86_env_get_cpu(env));
569
6bada5e8
BS
570 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
571 /* FIXME: this should be read in at vmrun (faster this way?) */
b216aa6c 572 uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
6bada5e8
BS
573 offsetof(struct vmcb, control.iopm_base_pa));
574 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
575
b216aa6c 576 if (x86_lduw_phys(cs, addr + port / 8) & (mask << (port & 7))) {
a78d0eab 577 /* next env->eip */
b216aa6c 578 x86_stq_phys(cs,
f606604f 579 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
6bada5e8 580 env->eip + next_eip_addend);
052e80d5 581 helper_vmexit(env, SVM_EXIT_IOIO, param | (port << 16));
6bada5e8
BS
582 }
583 }
584}
585
586/* Note: currently only 32 bits of exit_code are used */
052e80d5 587void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
6bada5e8 588{
259186a7 589 CPUState *cs = CPU(x86_env_get_cpu(env));
6bada5e8
BS
590 uint32_t int_ctl;
591
592 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
593 PRIx64 ", " TARGET_FMT_lx ")!\n",
594 exit_code, exit_info_1,
b216aa6c 595 x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
6bada5e8 596 control.exit_info_2)),
a78d0eab 597 env->eip);
6bada5e8
BS
598
599 if (env->hflags & HF_INHIBIT_IRQ_MASK) {
b216aa6c 600 x86_stl_phys(cs,
ab1da857 601 env->vm_vmcb + offsetof(struct vmcb, control.int_state),
6bada5e8
BS
602 SVM_INTERRUPT_SHADOW_MASK);
603 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
604 } else {
b216aa6c 605 x86_stl_phys(cs,
ab1da857 606 env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
6bada5e8
BS
607 }
608
609 /* Save the VM state in the vmcb */
052e80d5 610 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
6bada5e8 611 &env->segs[R_ES]);
052e80d5 612 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
6bada5e8 613 &env->segs[R_CS]);
052e80d5 614 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
6bada5e8 615 &env->segs[R_SS]);
052e80d5 616 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
6bada5e8
BS
617 &env->segs[R_DS]);
618
b216aa6c 619 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
6bada5e8 620 env->gdt.base);
b216aa6c 621 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
6bada5e8
BS
622 env->gdt.limit);
623
b216aa6c 624 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
6bada5e8 625 env->idt.base);
b216aa6c 626 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
6bada5e8
BS
627 env->idt.limit);
628
b216aa6c 629 x86_stq_phys(cs,
f606604f 630 env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
b216aa6c 631 x86_stq_phys(cs,
f606604f 632 env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
b216aa6c 633 x86_stq_phys(cs,
f606604f 634 env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
b216aa6c 635 x86_stq_phys(cs,
f606604f 636 env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
b216aa6c 637 x86_stq_phys(cs,
f606604f 638 env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
6bada5e8 639
b216aa6c 640 int_ctl = x86_ldl_phys(cs,
fdfba1a2 641 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6bada5e8
BS
642 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
643 int_ctl |= env->v_tpr & V_TPR_MASK;
259186a7 644 if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
6bada5e8
BS
645 int_ctl |= V_IRQ_MASK;
646 }
b216aa6c 647 x86_stl_phys(cs,
ab1da857 648 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
6bada5e8 649
b216aa6c 650 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rflags),
6bada5e8 651 cpu_compute_eflags(env));
b216aa6c 652 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rip),
052e80d5 653 env->eip);
b216aa6c 654 x86_stq_phys(cs,
f606604f 655 env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
b216aa6c 656 x86_stq_phys(cs,
f606604f 657 env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
b216aa6c 658 x86_stq_phys(cs,
f606604f 659 env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
b216aa6c 660 x86_stq_phys(cs,
f606604f 661 env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
b216aa6c 662 x86_stb_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cpl),
6bada5e8
BS
663 env->hflags & HF_CPL_MASK);
664
665 /* Reload the host state from vm_hsave */
666 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
667 env->hflags &= ~HF_SVMI_MASK;
668 env->intercept = 0;
669 env->intercept_exceptions = 0;
259186a7 670 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
6bada5e8
BS
671 env->tsc_offset = 0;
672
b216aa6c 673 env->gdt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
6bada5e8 674 save.gdtr.base));
b216aa6c 675 env->gdt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
6bada5e8
BS
676 save.gdtr.limit));
677
b216aa6c 678 env->idt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
6bada5e8 679 save.idtr.base));
b216aa6c 680 env->idt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
6bada5e8
BS
681 save.idtr.limit));
682
b216aa6c 683 cpu_x86_update_cr0(env, x86_ldq_phys(cs,
2c17449b 684 env->vm_hsave + offsetof(struct vmcb,
6bada5e8
BS
685 save.cr0)) |
686 CR0_PE_MASK);
b216aa6c 687 cpu_x86_update_cr4(env, x86_ldq_phys(cs,
2c17449b 688 env->vm_hsave + offsetof(struct vmcb,
6bada5e8 689 save.cr4)));
b216aa6c 690 cpu_x86_update_cr3(env, x86_ldq_phys(cs,
2c17449b 691 env->vm_hsave + offsetof(struct vmcb,
6bada5e8
BS
692 save.cr3)));
693 /* we need to set the efer after the crs so the hidden flags get
694 set properly */
b216aa6c 695 cpu_load_efer(env, x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
6bada5e8
BS
696 save.efer)));
697 env->eflags = 0;
b216aa6c 698 cpu_load_eflags(env, x86_ldq_phys(cs,
2c17449b 699 env->vm_hsave + offsetof(struct vmcb,
6bada5e8 700 save.rflags)),
30452029
KC
701 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK |
702 VM_MASK));
6bada5e8 703
052e80d5
BS
704 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.es),
705 R_ES);
706 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
707 R_CS);
708 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
709 R_SS);
710 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
711 R_DS);
6bada5e8 712
b216aa6c 713 env->eip = x86_ldq_phys(cs,
2c17449b 714 env->vm_hsave + offsetof(struct vmcb, save.rip));
b216aa6c 715 env->regs[R_ESP] = x86_ldq_phys(cs, env->vm_hsave +
90a2541b 716 offsetof(struct vmcb, save.rsp));
b216aa6c 717 env->regs[R_EAX] = x86_ldq_phys(cs, env->vm_hsave +
90a2541b 718 offsetof(struct vmcb, save.rax));
6bada5e8 719
b216aa6c 720 env->dr[6] = x86_ldq_phys(cs,
2c17449b 721 env->vm_hsave + offsetof(struct vmcb, save.dr6));
b216aa6c 722 env->dr[7] = x86_ldq_phys(cs,
2c17449b 723 env->vm_hsave + offsetof(struct vmcb, save.dr7));
6bada5e8
BS
724
725 /* other setups */
b216aa6c 726 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
6bada5e8 727 exit_code);
b216aa6c 728 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1),
6bada5e8
BS
729 exit_info_1);
730
b216aa6c 731 x86_stl_phys(cs,
ab1da857 732 env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
b216aa6c 733 x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
6bada5e8 734 control.event_inj)));
b216aa6c 735 x86_stl_phys(cs,
ab1da857 736 env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
b216aa6c 737 x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
6bada5e8 738 control.event_inj_err)));
b216aa6c 739 x86_stl_phys(cs,
ab1da857 740 env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
6bada5e8
BS
741
742 env->hflags2 &= ~HF2_GIF_MASK;
743 /* FIXME: Resets the current ASID register to zero (host ASID). */
744
745 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
746
747 /* Clears the TSC_OFFSET inside the processor. */
748
749 /* If the host is in PAE mode, the processor reloads the host's PDPEs
750 from the page table indicated the host's CR3. If the PDPEs contain
751 illegal state, the processor causes a shutdown. */
752
6bada5e8
BS
753 /* Disables all breakpoints in the host DR7 register. */
754
755 /* Checks the reloaded host state for consistency. */
756
757 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
758 host's code segment or non-canonical (in the case of long mode), a
759 #GP fault is delivered inside the host. */
760
761 /* remove any pending exception */
27103424 762 cs->exception_index = -1;
6bada5e8
BS
763 env->error_code = 0;
764 env->old_exception = -1;
765
5638d180 766 cpu_loop_exit(cs);
6bada5e8
BS
767}
768
052e80d5 769void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
6bada5e8 770{
052e80d5 771 helper_vmexit(env, exit_code, exit_info_1);
6bada5e8
BS
772}
773
774#endif