]> git.proxmox.com Git - mirror_qemu.git/blame - target-i386/svm_helper.c
linux-user: Clean up includes
[mirror_qemu.git] / target-i386 / svm_helper.c
CommitLineData
6bada5e8
BS
1/*
2 * x86 SVM helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include "cpu.h"
022c62cb 21#include "exec/cpu-all.h"
2ef6175a 22#include "exec/helper-proto.h"
f08b6170 23#include "exec/cpu_ldst.h"
92fc4b58 24
6bada5e8
BS
25/* Secure Virtual Machine helpers */
26
27#if defined(CONFIG_USER_ONLY)
28
052e80d5 29void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
6bada5e8
BS
30{
31}
32
052e80d5 33void helper_vmmcall(CPUX86State *env)
6bada5e8
BS
34{
35}
36
052e80d5 37void helper_vmload(CPUX86State *env, int aflag)
6bada5e8
BS
38{
39}
40
052e80d5 41void helper_vmsave(CPUX86State *env, int aflag)
6bada5e8
BS
42{
43}
44
052e80d5 45void helper_stgi(CPUX86State *env)
6bada5e8
BS
46{
47}
48
052e80d5 49void helper_clgi(CPUX86State *env)
6bada5e8
BS
50{
51}
52
052e80d5 53void helper_skinit(CPUX86State *env)
6bada5e8
BS
54{
55}
56
052e80d5 57void helper_invlpga(CPUX86State *env, int aflag)
6bada5e8
BS
58{
59}
60
052e80d5 61void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
6bada5e8
BS
62{
63}
64
65void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1)
66{
67}
68
052e80d5
BS
69void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
70 uint64_t param)
6bada5e8
BS
71{
72}
73
74void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
75 uint64_t param)
76{
77}
78
052e80d5 79void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
6bada5e8
BS
80 uint32_t next_eip_addend)
81{
82}
83#else
84
a8170e5e 85static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
6bada5e8
BS
86 const SegmentCache *sc)
87{
19d6ca16
AF
88 CPUState *cs = CPU(x86_env_get_cpu(env));
89
b216aa6c 90 x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, selector),
6bada5e8 91 sc->selector);
b216aa6c 92 x86_stq_phys(cs, addr + offsetof(struct vmcb_seg, base),
6bada5e8 93 sc->base);
b216aa6c 94 x86_stl_phys(cs, addr + offsetof(struct vmcb_seg, limit),
6bada5e8 95 sc->limit);
b216aa6c 96 x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, attrib),
6bada5e8
BS
97 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
98}
99
a8170e5e 100static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
052e80d5 101 SegmentCache *sc)
6bada5e8 102{
19d6ca16 103 CPUState *cs = CPU(x86_env_get_cpu(env));
6bada5e8
BS
104 unsigned int flags;
105
b216aa6c 106 sc->selector = x86_lduw_phys(cs,
41701aa4 107 addr + offsetof(struct vmcb_seg, selector));
b216aa6c
PB
108 sc->base = x86_ldq_phys(cs, addr + offsetof(struct vmcb_seg, base));
109 sc->limit = x86_ldl_phys(cs, addr + offsetof(struct vmcb_seg, limit));
110 flags = x86_lduw_phys(cs, addr + offsetof(struct vmcb_seg, attrib));
6bada5e8
BS
111 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
112}
113
a8170e5e 114static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
052e80d5 115 int seg_reg)
6bada5e8
BS
116{
117 SegmentCache sc1, *sc = &sc1;
118
052e80d5 119 svm_load_seg(env, addr, sc);
6bada5e8
BS
120 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
121 sc->base, sc->limit, sc->flags);
122}
123
052e80d5 124void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
6bada5e8 125{
19d6ca16 126 CPUState *cs = CPU(x86_env_get_cpu(env));
6bada5e8
BS
127 target_ulong addr;
128 uint32_t event_inj;
129 uint32_t int_ctl;
130
052e80d5 131 cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0);
6bada5e8
BS
132
133 if (aflag == 2) {
4b34e3ad 134 addr = env->regs[R_EAX];
6bada5e8 135 } else {
4b34e3ad 136 addr = (uint32_t)env->regs[R_EAX];
6bada5e8
BS
137 }
138
139 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
140
141 env->vm_vmcb = addr;
142
143 /* save the current CPU state in the hsave page */
b216aa6c 144 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
6bada5e8 145 env->gdt.base);
b216aa6c 146 x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
6bada5e8
BS
147 env->gdt.limit);
148
b216aa6c 149 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
6bada5e8 150 env->idt.base);
b216aa6c 151 x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
6bada5e8
BS
152 env->idt.limit);
153
b216aa6c 154 x86_stq_phys(cs,
f606604f 155 env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
b216aa6c 156 x86_stq_phys(cs,
f606604f 157 env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
b216aa6c 158 x86_stq_phys(cs,
f606604f 159 env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
b216aa6c 160 x86_stq_phys(cs,
f606604f 161 env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
b216aa6c 162 x86_stq_phys(cs,
f606604f 163 env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
b216aa6c 164 x86_stq_phys(cs,
f606604f
EI
165 env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
166
b216aa6c 167 x86_stq_phys(cs,
f606604f 168 env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
b216aa6c 169 x86_stq_phys(cs,
f606604f 170 env->vm_hsave + offsetof(struct vmcb, save.rflags),
6bada5e8
BS
171 cpu_compute_eflags(env));
172
052e80d5 173 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.es),
6bada5e8 174 &env->segs[R_ES]);
052e80d5 175 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
6bada5e8 176 &env->segs[R_CS]);
052e80d5 177 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
6bada5e8 178 &env->segs[R_SS]);
052e80d5 179 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
6bada5e8
BS
180 &env->segs[R_DS]);
181
b216aa6c 182 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rip),
a78d0eab 183 env->eip + next_eip_addend);
b216aa6c 184 x86_stq_phys(cs,
f606604f 185 env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
b216aa6c 186 x86_stq_phys(cs,
f606604f 187 env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
6bada5e8
BS
188
189 /* load the interception bitmaps so we do not need to access the
190 vmcb in svm mode */
b216aa6c 191 env->intercept = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
6bada5e8 192 control.intercept));
b216aa6c 193 env->intercept_cr_read = x86_lduw_phys(cs, env->vm_vmcb +
6bada5e8
BS
194 offsetof(struct vmcb,
195 control.intercept_cr_read));
b216aa6c 196 env->intercept_cr_write = x86_lduw_phys(cs, env->vm_vmcb +
6bada5e8
BS
197 offsetof(struct vmcb,
198 control.intercept_cr_write));
b216aa6c 199 env->intercept_dr_read = x86_lduw_phys(cs, env->vm_vmcb +
6bada5e8
BS
200 offsetof(struct vmcb,
201 control.intercept_dr_read));
b216aa6c 202 env->intercept_dr_write = x86_lduw_phys(cs, env->vm_vmcb +
6bada5e8
BS
203 offsetof(struct vmcb,
204 control.intercept_dr_write));
b216aa6c 205 env->intercept_exceptions = x86_ldl_phys(cs, env->vm_vmcb +
6bada5e8
BS
206 offsetof(struct vmcb,
207 control.intercept_exceptions
208 ));
209
210 /* enable intercepts */
211 env->hflags |= HF_SVMI_MASK;
212
b216aa6c 213 env->tsc_offset = x86_ldq_phys(cs, env->vm_vmcb +
6bada5e8
BS
214 offsetof(struct vmcb, control.tsc_offset));
215
b216aa6c 216 env->gdt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
6bada5e8 217 save.gdtr.base));
b216aa6c 218 env->gdt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
6bada5e8
BS
219 save.gdtr.limit));
220
b216aa6c 221 env->idt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
6bada5e8 222 save.idtr.base));
b216aa6c 223 env->idt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
6bada5e8
BS
224 save.idtr.limit));
225
226 /* clear exit_info_2 so we behave like the real hardware */
b216aa6c 227 x86_stq_phys(cs,
f606604f 228 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
6bada5e8 229
b216aa6c 230 cpu_x86_update_cr0(env, x86_ldq_phys(cs,
2c17449b 231 env->vm_vmcb + offsetof(struct vmcb,
6bada5e8 232 save.cr0)));
b216aa6c 233 cpu_x86_update_cr4(env, x86_ldq_phys(cs,
2c17449b 234 env->vm_vmcb + offsetof(struct vmcb,
6bada5e8 235 save.cr4)));
b216aa6c 236 cpu_x86_update_cr3(env, x86_ldq_phys(cs,
2c17449b 237 env->vm_vmcb + offsetof(struct vmcb,
6bada5e8 238 save.cr3)));
b216aa6c 239 env->cr[2] = x86_ldq_phys(cs,
2c17449b 240 env->vm_vmcb + offsetof(struct vmcb, save.cr2));
b216aa6c 241 int_ctl = x86_ldl_phys(cs,
fdfba1a2 242 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6bada5e8
BS
243 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
244 if (int_ctl & V_INTR_MASKING_MASK) {
245 env->v_tpr = int_ctl & V_TPR_MASK;
246 env->hflags2 |= HF2_VINTR_MASK;
247 if (env->eflags & IF_MASK) {
248 env->hflags2 |= HF2_HIF_MASK;
249 }
250 }
251
252 cpu_load_efer(env,
b216aa6c 253 x86_ldq_phys(cs,
2c17449b 254 env->vm_vmcb + offsetof(struct vmcb, save.efer)));
6bada5e8 255 env->eflags = 0;
b216aa6c 256 cpu_load_eflags(env, x86_ldq_phys(cs,
2c17449b 257 env->vm_vmcb + offsetof(struct vmcb,
6bada5e8
BS
258 save.rflags)),
259 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6bada5e8 260
052e80d5
BS
261 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
262 R_ES);
263 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
264 R_CS);
265 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
266 R_SS);
267 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
268 R_DS);
6bada5e8 269
b216aa6c 270 env->eip = x86_ldq_phys(cs,
2c17449b
EI
271 env->vm_vmcb + offsetof(struct vmcb, save.rip));
272
b216aa6c 273 env->regs[R_ESP] = x86_ldq_phys(cs,
2c17449b 274 env->vm_vmcb + offsetof(struct vmcb, save.rsp));
b216aa6c 275 env->regs[R_EAX] = x86_ldq_phys(cs,
2c17449b 276 env->vm_vmcb + offsetof(struct vmcb, save.rax));
b216aa6c 277 env->dr[7] = x86_ldq_phys(cs,
2c17449b 278 env->vm_vmcb + offsetof(struct vmcb, save.dr7));
b216aa6c 279 env->dr[6] = x86_ldq_phys(cs,
2c17449b 280 env->vm_vmcb + offsetof(struct vmcb, save.dr6));
6bada5e8
BS
281
282 /* FIXME: guest state consistency checks */
283
b216aa6c 284 switch (x86_ldub_phys(cs,
2c17449b 285 env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
6bada5e8
BS
286 case TLB_CONTROL_DO_NOTHING:
287 break;
288 case TLB_CONTROL_FLUSH_ALL_ASID:
289 /* FIXME: this is not 100% correct but should work for now */
00c8cb0a 290 tlb_flush(cs, 1);
6bada5e8
BS
291 break;
292 }
293
294 env->hflags2 |= HF2_GIF_MASK;
295
296 if (int_ctl & V_IRQ_MASK) {
259186a7
AF
297 CPUState *cs = CPU(x86_env_get_cpu(env));
298
299 cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
6bada5e8
BS
300 }
301
302 /* maybe we need to inject an event */
b216aa6c 303 event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
6bada5e8
BS
304 control.event_inj));
305 if (event_inj & SVM_EVTINJ_VALID) {
306 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
307 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
b216aa6c 308 uint32_t event_inj_err = x86_ldl_phys(cs, env->vm_vmcb +
6bada5e8
BS
309 offsetof(struct vmcb,
310 control.event_inj_err));
311
312 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
313 /* FIXME: need to implement valid_err */
314 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
315 case SVM_EVTINJ_TYPE_INTR:
27103424 316 cs->exception_index = vector;
6bada5e8
BS
317 env->error_code = event_inj_err;
318 env->exception_is_int = 0;
319 env->exception_next_eip = -1;
320 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
321 /* XXX: is it always correct? */
322 do_interrupt_x86_hardirq(env, vector, 1);
323 break;
324 case SVM_EVTINJ_TYPE_NMI:
27103424 325 cs->exception_index = EXCP02_NMI;
6bada5e8
BS
326 env->error_code = event_inj_err;
327 env->exception_is_int = 0;
a78d0eab 328 env->exception_next_eip = env->eip;
6bada5e8 329 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
5638d180 330 cpu_loop_exit(cs);
6bada5e8
BS
331 break;
332 case SVM_EVTINJ_TYPE_EXEPT:
27103424 333 cs->exception_index = vector;
6bada5e8
BS
334 env->error_code = event_inj_err;
335 env->exception_is_int = 0;
336 env->exception_next_eip = -1;
337 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
5638d180 338 cpu_loop_exit(cs);
6bada5e8
BS
339 break;
340 case SVM_EVTINJ_TYPE_SOFT:
27103424 341 cs->exception_index = vector;
6bada5e8
BS
342 env->error_code = event_inj_err;
343 env->exception_is_int = 1;
a78d0eab 344 env->exception_next_eip = env->eip;
6bada5e8 345 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
5638d180 346 cpu_loop_exit(cs);
6bada5e8
BS
347 break;
348 }
27103424 349 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", cs->exception_index,
6bada5e8
BS
350 env->error_code);
351 }
352}
353
052e80d5 354void helper_vmmcall(CPUX86State *env)
6bada5e8 355{
052e80d5 356 cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0);
6bada5e8
BS
357 raise_exception(env, EXCP06_ILLOP);
358}
359
052e80d5 360void helper_vmload(CPUX86State *env, int aflag)
6bada5e8 361{
19d6ca16 362 CPUState *cs = CPU(x86_env_get_cpu(env));
6bada5e8
BS
363 target_ulong addr;
364
052e80d5 365 cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0);
6bada5e8
BS
366
367 if (aflag == 2) {
4b34e3ad 368 addr = env->regs[R_EAX];
6bada5e8 369 } else {
4b34e3ad 370 addr = (uint32_t)env->regs[R_EAX];
6bada5e8
BS
371 }
372
373 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
374 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
b216aa6c 375 addr, x86_ldq_phys(cs, addr + offsetof(struct vmcb,
052e80d5 376 save.fs.base)),
6bada5e8
BS
377 env->segs[R_FS].base);
378
052e80d5
BS
379 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.fs), R_FS);
380 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.gs), R_GS);
381 svm_load_seg(env, addr + offsetof(struct vmcb, save.tr), &env->tr);
382 svm_load_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
6bada5e8
BS
383
384#ifdef TARGET_X86_64
b216aa6c 385 env->kernelgsbase = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
6bada5e8 386 save.kernel_gs_base));
b216aa6c
PB
387 env->lstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.lstar));
388 env->cstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.cstar));
389 env->fmask = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.sfmask));
6bada5e8 390#endif
b216aa6c
PB
391 env->star = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.star));
392 env->sysenter_cs = x86_ldq_phys(cs,
2c17449b 393 addr + offsetof(struct vmcb, save.sysenter_cs));
b216aa6c 394 env->sysenter_esp = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
6bada5e8 395 save.sysenter_esp));
b216aa6c 396 env->sysenter_eip = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
6bada5e8
BS
397 save.sysenter_eip));
398}
399
052e80d5 400void helper_vmsave(CPUX86State *env, int aflag)
6bada5e8 401{
19d6ca16 402 CPUState *cs = CPU(x86_env_get_cpu(env));
6bada5e8
BS
403 target_ulong addr;
404
052e80d5 405 cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0);
6bada5e8
BS
406
407 if (aflag == 2) {
4b34e3ad 408 addr = env->regs[R_EAX];
6bada5e8 409 } else {
4b34e3ad 410 addr = (uint32_t)env->regs[R_EAX];
6bada5e8
BS
411 }
412
413 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
414 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
b216aa6c 415 addr, x86_ldq_phys(cs,
2c17449b 416 addr + offsetof(struct vmcb, save.fs.base)),
6bada5e8
BS
417 env->segs[R_FS].base);
418
052e80d5 419 svm_save_seg(env, addr + offsetof(struct vmcb, save.fs),
6bada5e8 420 &env->segs[R_FS]);
052e80d5 421 svm_save_seg(env, addr + offsetof(struct vmcb, save.gs),
6bada5e8 422 &env->segs[R_GS]);
052e80d5 423 svm_save_seg(env, addr + offsetof(struct vmcb, save.tr),
6bada5e8 424 &env->tr);
052e80d5 425 svm_save_seg(env, addr + offsetof(struct vmcb, save.ldtr),
6bada5e8
BS
426 &env->ldt);
427
428#ifdef TARGET_X86_64
b216aa6c 429 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.kernel_gs_base),
6bada5e8 430 env->kernelgsbase);
b216aa6c
PB
431 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.lstar), env->lstar);
432 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.cstar), env->cstar);
433 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sfmask), env->fmask);
6bada5e8 434#endif
b216aa6c
PB
435 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.star), env->star);
436 x86_stq_phys(cs,
f606604f 437 addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
b216aa6c 438 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_esp),
6bada5e8 439 env->sysenter_esp);
b216aa6c 440 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_eip),
6bada5e8
BS
441 env->sysenter_eip);
442}
443
052e80d5 444void helper_stgi(CPUX86State *env)
6bada5e8 445{
052e80d5 446 cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0);
6bada5e8
BS
447 env->hflags2 |= HF2_GIF_MASK;
448}
449
052e80d5 450void helper_clgi(CPUX86State *env)
6bada5e8 451{
052e80d5 452 cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0);
6bada5e8
BS
453 env->hflags2 &= ~HF2_GIF_MASK;
454}
455
052e80d5 456void helper_skinit(CPUX86State *env)
6bada5e8 457{
052e80d5 458 cpu_svm_check_intercept_param(env, SVM_EXIT_SKINIT, 0);
6bada5e8
BS
459 /* XXX: not implemented */
460 raise_exception(env, EXCP06_ILLOP);
461}
462
052e80d5 463void helper_invlpga(CPUX86State *env, int aflag)
6bada5e8 464{
31b030d4 465 X86CPU *cpu = x86_env_get_cpu(env);
6bada5e8
BS
466 target_ulong addr;
467
052e80d5 468 cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPGA, 0);
6bada5e8
BS
469
470 if (aflag == 2) {
4b34e3ad 471 addr = env->regs[R_EAX];
6bada5e8 472 } else {
4b34e3ad 473 addr = (uint32_t)env->regs[R_EAX];
6bada5e8
BS
474 }
475
476 /* XXX: could use the ASID to see if it is needed to do the
477 flush */
31b030d4 478 tlb_flush_page(CPU(cpu), addr);
6bada5e8
BS
479}
480
052e80d5
BS
481void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
482 uint64_t param)
6bada5e8 483{
19d6ca16 484 CPUState *cs = CPU(x86_env_get_cpu(env));
2c17449b 485
6bada5e8
BS
486 if (likely(!(env->hflags & HF_SVMI_MASK))) {
487 return;
488 }
489 switch (type) {
490 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
491 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
052e80d5 492 helper_vmexit(env, type, param);
6bada5e8
BS
493 }
494 break;
495 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
496 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
052e80d5 497 helper_vmexit(env, type, param);
6bada5e8
BS
498 }
499 break;
500 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
501 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
052e80d5 502 helper_vmexit(env, type, param);
6bada5e8
BS
503 }
504 break;
505 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
506 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
052e80d5 507 helper_vmexit(env, type, param);
6bada5e8
BS
508 }
509 break;
510 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
511 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
052e80d5 512 helper_vmexit(env, type, param);
6bada5e8
BS
513 }
514 break;
515 case SVM_EXIT_MSR:
516 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
517 /* FIXME: this should be read in at vmrun (faster this way?) */
b216aa6c 518 uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
6bada5e8
BS
519 offsetof(struct vmcb,
520 control.msrpm_base_pa));
521 uint32_t t0, t1;
522
a4165610 523 switch ((uint32_t)env->regs[R_ECX]) {
6bada5e8 524 case 0 ... 0x1fff:
a4165610
LG
525 t0 = (env->regs[R_ECX] * 2) % 8;
526 t1 = (env->regs[R_ECX] * 2) / 8;
6bada5e8
BS
527 break;
528 case 0xc0000000 ... 0xc0001fff:
a4165610 529 t0 = (8192 + env->regs[R_ECX] - 0xc0000000) * 2;
6bada5e8
BS
530 t1 = (t0 / 8);
531 t0 %= 8;
532 break;
533 case 0xc0010000 ... 0xc0011fff:
a4165610 534 t0 = (16384 + env->regs[R_ECX] - 0xc0010000) * 2;
6bada5e8
BS
535 t1 = (t0 / 8);
536 t0 %= 8;
537 break;
538 default:
052e80d5 539 helper_vmexit(env, type, param);
6bada5e8
BS
540 t0 = 0;
541 t1 = 0;
542 break;
543 }
b216aa6c 544 if (x86_ldub_phys(cs, addr + t1) & ((1 << param) << t0)) {
052e80d5 545 helper_vmexit(env, type, param);
6bada5e8
BS
546 }
547 }
548 break;
549 default:
550 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
052e80d5 551 helper_vmexit(env, type, param);
6bada5e8
BS
552 }
553 break;
554 }
555}
556
052e80d5 557void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
6bada5e8
BS
558 uint64_t param)
559{
052e80d5 560 helper_svm_check_intercept_param(env, type, param);
6bada5e8
BS
561}
562
052e80d5 563void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
6bada5e8
BS
564 uint32_t next_eip_addend)
565{
19d6ca16
AF
566 CPUState *cs = CPU(x86_env_get_cpu(env));
567
6bada5e8
BS
568 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
569 /* FIXME: this should be read in at vmrun (faster this way?) */
b216aa6c 570 uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
6bada5e8
BS
571 offsetof(struct vmcb, control.iopm_base_pa));
572 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
573
b216aa6c 574 if (x86_lduw_phys(cs, addr + port / 8) & (mask << (port & 7))) {
a78d0eab 575 /* next env->eip */
b216aa6c 576 x86_stq_phys(cs,
f606604f 577 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
6bada5e8 578 env->eip + next_eip_addend);
052e80d5 579 helper_vmexit(env, SVM_EXIT_IOIO, param | (port << 16));
6bada5e8
BS
580 }
581 }
582}
583
584/* Note: currently only 32 bits of exit_code are used */
052e80d5 585void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
6bada5e8 586{
259186a7 587 CPUState *cs = CPU(x86_env_get_cpu(env));
6bada5e8
BS
588 uint32_t int_ctl;
589
590 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
591 PRIx64 ", " TARGET_FMT_lx ")!\n",
592 exit_code, exit_info_1,
b216aa6c 593 x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
6bada5e8 594 control.exit_info_2)),
a78d0eab 595 env->eip);
6bada5e8
BS
596
597 if (env->hflags & HF_INHIBIT_IRQ_MASK) {
b216aa6c 598 x86_stl_phys(cs,
ab1da857 599 env->vm_vmcb + offsetof(struct vmcb, control.int_state),
6bada5e8
BS
600 SVM_INTERRUPT_SHADOW_MASK);
601 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
602 } else {
b216aa6c 603 x86_stl_phys(cs,
ab1da857 604 env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
6bada5e8
BS
605 }
606
607 /* Save the VM state in the vmcb */
052e80d5 608 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
6bada5e8 609 &env->segs[R_ES]);
052e80d5 610 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
6bada5e8 611 &env->segs[R_CS]);
052e80d5 612 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
6bada5e8 613 &env->segs[R_SS]);
052e80d5 614 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
6bada5e8
BS
615 &env->segs[R_DS]);
616
b216aa6c 617 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
6bada5e8 618 env->gdt.base);
b216aa6c 619 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
6bada5e8
BS
620 env->gdt.limit);
621
b216aa6c 622 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
6bada5e8 623 env->idt.base);
b216aa6c 624 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
6bada5e8
BS
625 env->idt.limit);
626
b216aa6c 627 x86_stq_phys(cs,
f606604f 628 env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
b216aa6c 629 x86_stq_phys(cs,
f606604f 630 env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
b216aa6c 631 x86_stq_phys(cs,
f606604f 632 env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
b216aa6c 633 x86_stq_phys(cs,
f606604f 634 env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
b216aa6c 635 x86_stq_phys(cs,
f606604f 636 env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
6bada5e8 637
b216aa6c 638 int_ctl = x86_ldl_phys(cs,
fdfba1a2 639 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6bada5e8
BS
640 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
641 int_ctl |= env->v_tpr & V_TPR_MASK;
259186a7 642 if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
6bada5e8
BS
643 int_ctl |= V_IRQ_MASK;
644 }
b216aa6c 645 x86_stl_phys(cs,
ab1da857 646 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
6bada5e8 647
b216aa6c 648 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rflags),
6bada5e8 649 cpu_compute_eflags(env));
b216aa6c 650 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rip),
052e80d5 651 env->eip);
b216aa6c 652 x86_stq_phys(cs,
f606604f 653 env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
b216aa6c 654 x86_stq_phys(cs,
f606604f 655 env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
b216aa6c 656 x86_stq_phys(cs,
f606604f 657 env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
b216aa6c 658 x86_stq_phys(cs,
f606604f 659 env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
b216aa6c 660 x86_stb_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cpl),
6bada5e8
BS
661 env->hflags & HF_CPL_MASK);
662
663 /* Reload the host state from vm_hsave */
664 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
665 env->hflags &= ~HF_SVMI_MASK;
666 env->intercept = 0;
667 env->intercept_exceptions = 0;
259186a7 668 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
6bada5e8
BS
669 env->tsc_offset = 0;
670
b216aa6c 671 env->gdt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
6bada5e8 672 save.gdtr.base));
b216aa6c 673 env->gdt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
6bada5e8
BS
674 save.gdtr.limit));
675
b216aa6c 676 env->idt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
6bada5e8 677 save.idtr.base));
b216aa6c 678 env->idt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
6bada5e8
BS
679 save.idtr.limit));
680
b216aa6c 681 cpu_x86_update_cr0(env, x86_ldq_phys(cs,
2c17449b 682 env->vm_hsave + offsetof(struct vmcb,
6bada5e8
BS
683 save.cr0)) |
684 CR0_PE_MASK);
b216aa6c 685 cpu_x86_update_cr4(env, x86_ldq_phys(cs,
2c17449b 686 env->vm_hsave + offsetof(struct vmcb,
6bada5e8 687 save.cr4)));
b216aa6c 688 cpu_x86_update_cr3(env, x86_ldq_phys(cs,
2c17449b 689 env->vm_hsave + offsetof(struct vmcb,
6bada5e8
BS
690 save.cr3)));
691 /* we need to set the efer after the crs so the hidden flags get
692 set properly */
b216aa6c 693 cpu_load_efer(env, x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
6bada5e8
BS
694 save.efer)));
695 env->eflags = 0;
b216aa6c 696 cpu_load_eflags(env, x86_ldq_phys(cs,
2c17449b 697 env->vm_hsave + offsetof(struct vmcb,
6bada5e8 698 save.rflags)),
30452029
KC
699 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK |
700 VM_MASK));
6bada5e8 701
052e80d5
BS
702 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.es),
703 R_ES);
704 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
705 R_CS);
706 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
707 R_SS);
708 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
709 R_DS);
6bada5e8 710
b216aa6c 711 env->eip = x86_ldq_phys(cs,
2c17449b 712 env->vm_hsave + offsetof(struct vmcb, save.rip));
b216aa6c 713 env->regs[R_ESP] = x86_ldq_phys(cs, env->vm_hsave +
90a2541b 714 offsetof(struct vmcb, save.rsp));
b216aa6c 715 env->regs[R_EAX] = x86_ldq_phys(cs, env->vm_hsave +
90a2541b 716 offsetof(struct vmcb, save.rax));
6bada5e8 717
b216aa6c 718 env->dr[6] = x86_ldq_phys(cs,
2c17449b 719 env->vm_hsave + offsetof(struct vmcb, save.dr6));
b216aa6c 720 env->dr[7] = x86_ldq_phys(cs,
2c17449b 721 env->vm_hsave + offsetof(struct vmcb, save.dr7));
6bada5e8
BS
722
723 /* other setups */
b216aa6c 724 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
6bada5e8 725 exit_code);
b216aa6c 726 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1),
6bada5e8
BS
727 exit_info_1);
728
b216aa6c 729 x86_stl_phys(cs,
ab1da857 730 env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
b216aa6c 731 x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
6bada5e8 732 control.event_inj)));
b216aa6c 733 x86_stl_phys(cs,
ab1da857 734 env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
b216aa6c 735 x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
6bada5e8 736 control.event_inj_err)));
b216aa6c 737 x86_stl_phys(cs,
ab1da857 738 env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
6bada5e8
BS
739
740 env->hflags2 &= ~HF2_GIF_MASK;
741 /* FIXME: Resets the current ASID register to zero (host ASID). */
742
743 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
744
745 /* Clears the TSC_OFFSET inside the processor. */
746
747 /* If the host is in PAE mode, the processor reloads the host's PDPEs
748 from the page table indicated the host's CR3. If the PDPEs contain
749 illegal state, the processor causes a shutdown. */
750
6bada5e8
BS
751 /* Disables all breakpoints in the host DR7 register. */
752
753 /* Checks the reloaded host state for consistency. */
754
755 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
756 host's code segment or non-canonical (in the case of long mode), a
757 #GP fault is delivered inside the host. */
758
759 /* remove any pending exception */
27103424 760 cs->exception_index = -1;
6bada5e8
BS
761 env->error_code = 0;
762 env->old_exception = -1;
763
5638d180 764 cpu_loop_exit(cs);
6bada5e8
BS
765}
766
052e80d5 767void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
6bada5e8 768{
052e80d5 769 helper_vmexit(env, exit_code, exit_info_1);
6bada5e8
BS
770}
771
772#endif