]> git.proxmox.com Git - mirror_qemu.git/blob - target-i386/svm_helper.c
exec: Make ldq/ldub_*_phys input an AddressSpace
[mirror_qemu.git] / target-i386 / svm_helper.c
1 /*
2 * x86 SVM helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "cpu.h"
21 #include "exec/cpu-all.h"
22 #include "helper.h"
23
24 #if !defined(CONFIG_USER_ONLY)
25 #include "exec/softmmu_exec.h"
26 #endif /* !defined(CONFIG_USER_ONLY) */
27
28 /* Secure Virtual Machine helpers */
29
30 #if defined(CONFIG_USER_ONLY)
31
32 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
33 {
34 }
35
36 void helper_vmmcall(CPUX86State *env)
37 {
38 }
39
40 void helper_vmload(CPUX86State *env, int aflag)
41 {
42 }
43
44 void helper_vmsave(CPUX86State *env, int aflag)
45 {
46 }
47
48 void helper_stgi(CPUX86State *env)
49 {
50 }
51
52 void helper_clgi(CPUX86State *env)
53 {
54 }
55
56 void helper_skinit(CPUX86State *env)
57 {
58 }
59
60 void helper_invlpga(CPUX86State *env, int aflag)
61 {
62 }
63
64 void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
65 {
66 }
67
68 void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1)
69 {
70 }
71
72 void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
73 uint64_t param)
74 {
75 }
76
77 void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
78 uint64_t param)
79 {
80 }
81
82 void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
83 uint32_t next_eip_addend)
84 {
85 }
86 #else
87
88 static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
89 const SegmentCache *sc)
90 {
91 stw_phys(addr + offsetof(struct vmcb_seg, selector),
92 sc->selector);
93 stq_phys(addr + offsetof(struct vmcb_seg, base),
94 sc->base);
95 stl_phys(addr + offsetof(struct vmcb_seg, limit),
96 sc->limit);
97 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
98 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
99 }
100
101 static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
102 SegmentCache *sc)
103 {
104 CPUState *cs = ENV_GET_CPU(env);
105 unsigned int flags;
106
107 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
108 sc->base = ldq_phys(cs->as, addr + offsetof(struct vmcb_seg, base));
109 sc->limit = ldl_phys(cs->as, addr + offsetof(struct vmcb_seg, limit));
110 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
111 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
112 }
113
114 static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
115 int seg_reg)
116 {
117 SegmentCache sc1, *sc = &sc1;
118
119 svm_load_seg(env, addr, sc);
120 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
121 sc->base, sc->limit, sc->flags);
122 }
123
124 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
125 {
126 CPUState *cs = ENV_GET_CPU(env);
127 target_ulong addr;
128 uint32_t event_inj;
129 uint32_t int_ctl;
130
131 cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0);
132
133 if (aflag == 2) {
134 addr = env->regs[R_EAX];
135 } else {
136 addr = (uint32_t)env->regs[R_EAX];
137 }
138
139 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
140
141 env->vm_vmcb = addr;
142
143 /* save the current CPU state in the hsave page */
144 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
145 env->gdt.base);
146 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
147 env->gdt.limit);
148
149 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
150 env->idt.base);
151 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
152 env->idt.limit);
153
154 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
155 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
156 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
157 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
158 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
159 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
160
161 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
162 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags),
163 cpu_compute_eflags(env));
164
165 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.es),
166 &env->segs[R_ES]);
167 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
168 &env->segs[R_CS]);
169 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
170 &env->segs[R_SS]);
171 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
172 &env->segs[R_DS]);
173
174 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
175 env->eip + next_eip_addend);
176 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
177 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
178
179 /* load the interception bitmaps so we do not need to access the
180 vmcb in svm mode */
181 env->intercept = ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
182 control.intercept));
183 env->intercept_cr_read = lduw_phys(env->vm_vmcb +
184 offsetof(struct vmcb,
185 control.intercept_cr_read));
186 env->intercept_cr_write = lduw_phys(env->vm_vmcb +
187 offsetof(struct vmcb,
188 control.intercept_cr_write));
189 env->intercept_dr_read = lduw_phys(env->vm_vmcb +
190 offsetof(struct vmcb,
191 control.intercept_dr_read));
192 env->intercept_dr_write = lduw_phys(env->vm_vmcb +
193 offsetof(struct vmcb,
194 control.intercept_dr_write));
195 env->intercept_exceptions = ldl_phys(cs->as, env->vm_vmcb +
196 offsetof(struct vmcb,
197 control.intercept_exceptions
198 ));
199
200 /* enable intercepts */
201 env->hflags |= HF_SVMI_MASK;
202
203 env->tsc_offset = ldq_phys(cs->as, env->vm_vmcb +
204 offsetof(struct vmcb, control.tsc_offset));
205
206 env->gdt.base = ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
207 save.gdtr.base));
208 env->gdt.limit = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
209 save.gdtr.limit));
210
211 env->idt.base = ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
212 save.idtr.base));
213 env->idt.limit = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
214 save.idtr.limit));
215
216 /* clear exit_info_2 so we behave like the real hardware */
217 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
218
219 cpu_x86_update_cr0(env, ldq_phys(cs->as,
220 env->vm_vmcb + offsetof(struct vmcb,
221 save.cr0)));
222 cpu_x86_update_cr4(env, ldq_phys(cs->as,
223 env->vm_vmcb + offsetof(struct vmcb,
224 save.cr4)));
225 cpu_x86_update_cr3(env, ldq_phys(cs->as,
226 env->vm_vmcb + offsetof(struct vmcb,
227 save.cr3)));
228 env->cr[2] = ldq_phys(cs->as,
229 env->vm_vmcb + offsetof(struct vmcb, save.cr2));
230 int_ctl = ldl_phys(cs->as,
231 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
232 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
233 if (int_ctl & V_INTR_MASKING_MASK) {
234 env->v_tpr = int_ctl & V_TPR_MASK;
235 env->hflags2 |= HF2_VINTR_MASK;
236 if (env->eflags & IF_MASK) {
237 env->hflags2 |= HF2_HIF_MASK;
238 }
239 }
240
241 cpu_load_efer(env,
242 ldq_phys(cs->as,
243 env->vm_vmcb + offsetof(struct vmcb, save.efer)));
244 env->eflags = 0;
245 cpu_load_eflags(env, ldq_phys(cs->as,
246 env->vm_vmcb + offsetof(struct vmcb,
247 save.rflags)),
248 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
249 CC_OP = CC_OP_EFLAGS;
250
251 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
252 R_ES);
253 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
254 R_CS);
255 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
256 R_SS);
257 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
258 R_DS);
259
260 env->eip = ldq_phys(cs->as,
261 env->vm_vmcb + offsetof(struct vmcb, save.rip));
262
263 env->regs[R_ESP] = ldq_phys(cs->as,
264 env->vm_vmcb + offsetof(struct vmcb, save.rsp));
265 env->regs[R_EAX] = ldq_phys(cs->as,
266 env->vm_vmcb + offsetof(struct vmcb, save.rax));
267 env->dr[7] = ldq_phys(cs->as,
268 env->vm_vmcb + offsetof(struct vmcb, save.dr7));
269 env->dr[6] = ldq_phys(cs->as,
270 env->vm_vmcb + offsetof(struct vmcb, save.dr6));
271 cpu_x86_set_cpl(env, ldub_phys(cs->as,
272 env->vm_vmcb + offsetof(struct vmcb,
273 save.cpl)));
274
275 /* FIXME: guest state consistency checks */
276
277 switch (ldub_phys(cs->as,
278 env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
279 case TLB_CONTROL_DO_NOTHING:
280 break;
281 case TLB_CONTROL_FLUSH_ALL_ASID:
282 /* FIXME: this is not 100% correct but should work for now */
283 tlb_flush(env, 1);
284 break;
285 }
286
287 env->hflags2 |= HF2_GIF_MASK;
288
289 if (int_ctl & V_IRQ_MASK) {
290 CPUState *cs = CPU(x86_env_get_cpu(env));
291
292 cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
293 }
294
295 /* maybe we need to inject an event */
296 event_inj = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
297 control.event_inj));
298 if (event_inj & SVM_EVTINJ_VALID) {
299 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
300 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
301 uint32_t event_inj_err = ldl_phys(cs->as, env->vm_vmcb +
302 offsetof(struct vmcb,
303 control.event_inj_err));
304
305 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
306 /* FIXME: need to implement valid_err */
307 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
308 case SVM_EVTINJ_TYPE_INTR:
309 env->exception_index = vector;
310 env->error_code = event_inj_err;
311 env->exception_is_int = 0;
312 env->exception_next_eip = -1;
313 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
314 /* XXX: is it always correct? */
315 do_interrupt_x86_hardirq(env, vector, 1);
316 break;
317 case SVM_EVTINJ_TYPE_NMI:
318 env->exception_index = EXCP02_NMI;
319 env->error_code = event_inj_err;
320 env->exception_is_int = 0;
321 env->exception_next_eip = env->eip;
322 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
323 cpu_loop_exit(env);
324 break;
325 case SVM_EVTINJ_TYPE_EXEPT:
326 env->exception_index = vector;
327 env->error_code = event_inj_err;
328 env->exception_is_int = 0;
329 env->exception_next_eip = -1;
330 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
331 cpu_loop_exit(env);
332 break;
333 case SVM_EVTINJ_TYPE_SOFT:
334 env->exception_index = vector;
335 env->error_code = event_inj_err;
336 env->exception_is_int = 1;
337 env->exception_next_eip = env->eip;
338 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
339 cpu_loop_exit(env);
340 break;
341 }
342 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index,
343 env->error_code);
344 }
345 }
346
347 void helper_vmmcall(CPUX86State *env)
348 {
349 cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0);
350 raise_exception(env, EXCP06_ILLOP);
351 }
352
353 void helper_vmload(CPUX86State *env, int aflag)
354 {
355 CPUState *cs = ENV_GET_CPU(env);
356 target_ulong addr;
357
358 cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0);
359
360 if (aflag == 2) {
361 addr = env->regs[R_EAX];
362 } else {
363 addr = (uint32_t)env->regs[R_EAX];
364 }
365
366 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
367 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
368 addr, ldq_phys(cs->as, addr + offsetof(struct vmcb,
369 save.fs.base)),
370 env->segs[R_FS].base);
371
372 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.fs), R_FS);
373 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.gs), R_GS);
374 svm_load_seg(env, addr + offsetof(struct vmcb, save.tr), &env->tr);
375 svm_load_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
376
377 #ifdef TARGET_X86_64
378 env->kernelgsbase = ldq_phys(cs->as, addr + offsetof(struct vmcb,
379 save.kernel_gs_base));
380 env->lstar = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.lstar));
381 env->cstar = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.cstar));
382 env->fmask = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.sfmask));
383 #endif
384 env->star = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.star));
385 env->sysenter_cs = ldq_phys(cs->as,
386 addr + offsetof(struct vmcb, save.sysenter_cs));
387 env->sysenter_esp = ldq_phys(cs->as, addr + offsetof(struct vmcb,
388 save.sysenter_esp));
389 env->sysenter_eip = ldq_phys(cs->as, addr + offsetof(struct vmcb,
390 save.sysenter_eip));
391 }
392
393 void helper_vmsave(CPUX86State *env, int aflag)
394 {
395 CPUState *cs = ENV_GET_CPU(env);
396 target_ulong addr;
397
398 cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0);
399
400 if (aflag == 2) {
401 addr = env->regs[R_EAX];
402 } else {
403 addr = (uint32_t)env->regs[R_EAX];
404 }
405
406 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
407 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
408 addr, ldq_phys(cs->as,
409 addr + offsetof(struct vmcb, save.fs.base)),
410 env->segs[R_FS].base);
411
412 svm_save_seg(env, addr + offsetof(struct vmcb, save.fs),
413 &env->segs[R_FS]);
414 svm_save_seg(env, addr + offsetof(struct vmcb, save.gs),
415 &env->segs[R_GS]);
416 svm_save_seg(env, addr + offsetof(struct vmcb, save.tr),
417 &env->tr);
418 svm_save_seg(env, addr + offsetof(struct vmcb, save.ldtr),
419 &env->ldt);
420
421 #ifdef TARGET_X86_64
422 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base),
423 env->kernelgsbase);
424 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
425 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
426 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
427 #endif
428 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
429 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
430 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp),
431 env->sysenter_esp);
432 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip),
433 env->sysenter_eip);
434 }
435
436 void helper_stgi(CPUX86State *env)
437 {
438 cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0);
439 env->hflags2 |= HF2_GIF_MASK;
440 }
441
442 void helper_clgi(CPUX86State *env)
443 {
444 cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0);
445 env->hflags2 &= ~HF2_GIF_MASK;
446 }
447
448 void helper_skinit(CPUX86State *env)
449 {
450 cpu_svm_check_intercept_param(env, SVM_EXIT_SKINIT, 0);
451 /* XXX: not implemented */
452 raise_exception(env, EXCP06_ILLOP);
453 }
454
455 void helper_invlpga(CPUX86State *env, int aflag)
456 {
457 target_ulong addr;
458
459 cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPGA, 0);
460
461 if (aflag == 2) {
462 addr = env->regs[R_EAX];
463 } else {
464 addr = (uint32_t)env->regs[R_EAX];
465 }
466
467 /* XXX: could use the ASID to see if it is needed to do the
468 flush */
469 tlb_flush_page(env, addr);
470 }
471
472 void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
473 uint64_t param)
474 {
475 CPUState *cs = ENV_GET_CPU(env);
476
477 if (likely(!(env->hflags & HF_SVMI_MASK))) {
478 return;
479 }
480 switch (type) {
481 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
482 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
483 helper_vmexit(env, type, param);
484 }
485 break;
486 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
487 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
488 helper_vmexit(env, type, param);
489 }
490 break;
491 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
492 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
493 helper_vmexit(env, type, param);
494 }
495 break;
496 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
497 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
498 helper_vmexit(env, type, param);
499 }
500 break;
501 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
502 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
503 helper_vmexit(env, type, param);
504 }
505 break;
506 case SVM_EXIT_MSR:
507 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
508 /* FIXME: this should be read in at vmrun (faster this way?) */
509 uint64_t addr = ldq_phys(cs->as, env->vm_vmcb +
510 offsetof(struct vmcb,
511 control.msrpm_base_pa));
512 uint32_t t0, t1;
513
514 switch ((uint32_t)env->regs[R_ECX]) {
515 case 0 ... 0x1fff:
516 t0 = (env->regs[R_ECX] * 2) % 8;
517 t1 = (env->regs[R_ECX] * 2) / 8;
518 break;
519 case 0xc0000000 ... 0xc0001fff:
520 t0 = (8192 + env->regs[R_ECX] - 0xc0000000) * 2;
521 t1 = (t0 / 8);
522 t0 %= 8;
523 break;
524 case 0xc0010000 ... 0xc0011fff:
525 t0 = (16384 + env->regs[R_ECX] - 0xc0010000) * 2;
526 t1 = (t0 / 8);
527 t0 %= 8;
528 break;
529 default:
530 helper_vmexit(env, type, param);
531 t0 = 0;
532 t1 = 0;
533 break;
534 }
535 if (ldub_phys(cs->as, addr + t1) & ((1 << param) << t0)) {
536 helper_vmexit(env, type, param);
537 }
538 }
539 break;
540 default:
541 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
542 helper_vmexit(env, type, param);
543 }
544 break;
545 }
546 }
547
548 void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
549 uint64_t param)
550 {
551 helper_svm_check_intercept_param(env, type, param);
552 }
553
554 void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
555 uint32_t next_eip_addend)
556 {
557 CPUState *cs = ENV_GET_CPU(env);
558 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
559 /* FIXME: this should be read in at vmrun (faster this way?) */
560 uint64_t addr = ldq_phys(cs->as, env->vm_vmcb +
561 offsetof(struct vmcb, control.iopm_base_pa));
562 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
563
564 if (lduw_phys(addr + port / 8) & (mask << (port & 7))) {
565 /* next env->eip */
566 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
567 env->eip + next_eip_addend);
568 helper_vmexit(env, SVM_EXIT_IOIO, param | (port << 16));
569 }
570 }
571 }
572
573 /* Note: currently only 32 bits of exit_code are used */
574 void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
575 {
576 CPUState *cs = CPU(x86_env_get_cpu(env));
577 uint32_t int_ctl;
578
579 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
580 PRIx64 ", " TARGET_FMT_lx ")!\n",
581 exit_code, exit_info_1,
582 ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
583 control.exit_info_2)),
584 env->eip);
585
586 if (env->hflags & HF_INHIBIT_IRQ_MASK) {
587 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state),
588 SVM_INTERRUPT_SHADOW_MASK);
589 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
590 } else {
591 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
592 }
593
594 /* Save the VM state in the vmcb */
595 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
596 &env->segs[R_ES]);
597 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
598 &env->segs[R_CS]);
599 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
600 &env->segs[R_SS]);
601 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
602 &env->segs[R_DS]);
603
604 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
605 env->gdt.base);
606 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
607 env->gdt.limit);
608
609 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
610 env->idt.base);
611 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
612 env->idt.limit);
613
614 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
615 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
616 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
617 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
618 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
619
620 int_ctl = ldl_phys(cs->as,
621 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
622 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
623 int_ctl |= env->v_tpr & V_TPR_MASK;
624 if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
625 int_ctl |= V_IRQ_MASK;
626 }
627 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
628
629 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags),
630 cpu_compute_eflags(env));
631 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip),
632 env->eip);
633 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
634 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
635 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
636 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
637 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl),
638 env->hflags & HF_CPL_MASK);
639
640 /* Reload the host state from vm_hsave */
641 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
642 env->hflags &= ~HF_SVMI_MASK;
643 env->intercept = 0;
644 env->intercept_exceptions = 0;
645 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
646 env->tsc_offset = 0;
647
648 env->gdt.base = ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
649 save.gdtr.base));
650 env->gdt.limit = ldl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
651 save.gdtr.limit));
652
653 env->idt.base = ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
654 save.idtr.base));
655 env->idt.limit = ldl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
656 save.idtr.limit));
657
658 cpu_x86_update_cr0(env, ldq_phys(cs->as,
659 env->vm_hsave + offsetof(struct vmcb,
660 save.cr0)) |
661 CR0_PE_MASK);
662 cpu_x86_update_cr4(env, ldq_phys(cs->as,
663 env->vm_hsave + offsetof(struct vmcb,
664 save.cr4)));
665 cpu_x86_update_cr3(env, ldq_phys(cs->as,
666 env->vm_hsave + offsetof(struct vmcb,
667 save.cr3)));
668 /* we need to set the efer after the crs so the hidden flags get
669 set properly */
670 cpu_load_efer(env, ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
671 save.efer)));
672 env->eflags = 0;
673 cpu_load_eflags(env, ldq_phys(cs->as,
674 env->vm_hsave + offsetof(struct vmcb,
675 save.rflags)),
676 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
677 CC_OP = CC_OP_EFLAGS;
678
679 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.es),
680 R_ES);
681 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
682 R_CS);
683 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
684 R_SS);
685 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
686 R_DS);
687
688 env->eip = ldq_phys(cs->as,
689 env->vm_hsave + offsetof(struct vmcb, save.rip));
690 env->regs[R_ESP] = ldq_phys(cs->as, env->vm_hsave +
691 offsetof(struct vmcb, save.rsp));
692 env->regs[R_EAX] = ldq_phys(cs->as, env->vm_hsave +
693 offsetof(struct vmcb, save.rax));
694
695 env->dr[6] = ldq_phys(cs->as,
696 env->vm_hsave + offsetof(struct vmcb, save.dr6));
697 env->dr[7] = ldq_phys(cs->as,
698 env->vm_hsave + offsetof(struct vmcb, save.dr7));
699
700 /* other setups */
701 cpu_x86_set_cpl(env, 0);
702 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
703 exit_code);
704 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1),
705 exit_info_1);
706
707 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
708 ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
709 control.event_inj)));
710 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
711 ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
712 control.event_inj_err)));
713 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
714
715 env->hflags2 &= ~HF2_GIF_MASK;
716 /* FIXME: Resets the current ASID register to zero (host ASID). */
717
718 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
719
720 /* Clears the TSC_OFFSET inside the processor. */
721
722 /* If the host is in PAE mode, the processor reloads the host's PDPEs
723 from the page table indicated the host's CR3. If the PDPEs contain
724 illegal state, the processor causes a shutdown. */
725
726 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
727 env->cr[0] |= CR0_PE_MASK;
728 env->eflags &= ~VM_MASK;
729
730 /* Disables all breakpoints in the host DR7 register. */
731
732 /* Checks the reloaded host state for consistency. */
733
734 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
735 host's code segment or non-canonical (in the case of long mode), a
736 #GP fault is delivered inside the host. */
737
738 /* remove any pending exception */
739 env->exception_index = -1;
740 env->error_code = 0;
741 env->old_exception = -1;
742
743 cpu_loop_exit(env);
744 }
745
746 void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
747 {
748 helper_vmexit(env, exit_code, exit_info_1);
749 }
750
751 #endif