]> git.proxmox.com Git - mirror_qemu.git/blob - target-i386/svm_helper.c
Merge remote-tracking branch 'remotes/afaerber/tags/qom-cpu-for-2.0' into staging
[mirror_qemu.git] / target-i386 / svm_helper.c
1 /*
2 * x86 SVM helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "cpu.h"
21 #include "exec/cpu-all.h"
22 #include "helper.h"
23
24 #if !defined(CONFIG_USER_ONLY)
25 #include "exec/softmmu_exec.h"
26 #endif /* !defined(CONFIG_USER_ONLY) */
27
28 /* Secure Virtual Machine helpers */
29
30 #if defined(CONFIG_USER_ONLY)
31
32 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
33 {
34 }
35
36 void helper_vmmcall(CPUX86State *env)
37 {
38 }
39
40 void helper_vmload(CPUX86State *env, int aflag)
41 {
42 }
43
44 void helper_vmsave(CPUX86State *env, int aflag)
45 {
46 }
47
48 void helper_stgi(CPUX86State *env)
49 {
50 }
51
52 void helper_clgi(CPUX86State *env)
53 {
54 }
55
56 void helper_skinit(CPUX86State *env)
57 {
58 }
59
60 void helper_invlpga(CPUX86State *env, int aflag)
61 {
62 }
63
64 void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
65 {
66 }
67
68 void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1)
69 {
70 }
71
72 void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
73 uint64_t param)
74 {
75 }
76
77 void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
78 uint64_t param)
79 {
80 }
81
82 void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
83 uint32_t next_eip_addend)
84 {
85 }
86 #else
87
88 static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
89 const SegmentCache *sc)
90 {
91 CPUState *cs = CPU(x86_env_get_cpu(env));
92
93 stw_phys(cs->as, addr + offsetof(struct vmcb_seg, selector),
94 sc->selector);
95 stq_phys(cs->as, addr + offsetof(struct vmcb_seg, base),
96 sc->base);
97 stl_phys(cs->as, addr + offsetof(struct vmcb_seg, limit),
98 sc->limit);
99 stw_phys(cs->as, addr + offsetof(struct vmcb_seg, attrib),
100 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
101 }
102
103 static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
104 SegmentCache *sc)
105 {
106 CPUState *cs = CPU(x86_env_get_cpu(env));
107 unsigned int flags;
108
109 sc->selector = lduw_phys(cs->as,
110 addr + offsetof(struct vmcb_seg, selector));
111 sc->base = ldq_phys(cs->as, addr + offsetof(struct vmcb_seg, base));
112 sc->limit = ldl_phys(cs->as, addr + offsetof(struct vmcb_seg, limit));
113 flags = lduw_phys(cs->as, addr + offsetof(struct vmcb_seg, attrib));
114 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
115 }
116
117 static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
118 int seg_reg)
119 {
120 SegmentCache sc1, *sc = &sc1;
121
122 svm_load_seg(env, addr, sc);
123 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
124 sc->base, sc->limit, sc->flags);
125 }
126
127 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
128 {
129 CPUState *cs = CPU(x86_env_get_cpu(env));
130 target_ulong addr;
131 uint32_t event_inj;
132 uint32_t int_ctl;
133
134 cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0);
135
136 if (aflag == 2) {
137 addr = env->regs[R_EAX];
138 } else {
139 addr = (uint32_t)env->regs[R_EAX];
140 }
141
142 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
143
144 env->vm_vmcb = addr;
145
146 /* save the current CPU state in the hsave page */
147 stq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
148 env->gdt.base);
149 stl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
150 env->gdt.limit);
151
152 stq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
153 env->idt.base);
154 stl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
155 env->idt.limit);
156
157 stq_phys(cs->as,
158 env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
159 stq_phys(cs->as,
160 env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
161 stq_phys(cs->as,
162 env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
163 stq_phys(cs->as,
164 env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
165 stq_phys(cs->as,
166 env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
167 stq_phys(cs->as,
168 env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
169
170 stq_phys(cs->as,
171 env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
172 stq_phys(cs->as,
173 env->vm_hsave + offsetof(struct vmcb, save.rflags),
174 cpu_compute_eflags(env));
175
176 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.es),
177 &env->segs[R_ES]);
178 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
179 &env->segs[R_CS]);
180 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
181 &env->segs[R_SS]);
182 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
183 &env->segs[R_DS]);
184
185 stq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.rip),
186 env->eip + next_eip_addend);
187 stq_phys(cs->as,
188 env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
189 stq_phys(cs->as,
190 env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
191
192 /* load the interception bitmaps so we do not need to access the
193 vmcb in svm mode */
194 env->intercept = ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
195 control.intercept));
196 env->intercept_cr_read = lduw_phys(cs->as, env->vm_vmcb +
197 offsetof(struct vmcb,
198 control.intercept_cr_read));
199 env->intercept_cr_write = lduw_phys(cs->as, env->vm_vmcb +
200 offsetof(struct vmcb,
201 control.intercept_cr_write));
202 env->intercept_dr_read = lduw_phys(cs->as, env->vm_vmcb +
203 offsetof(struct vmcb,
204 control.intercept_dr_read));
205 env->intercept_dr_write = lduw_phys(cs->as, env->vm_vmcb +
206 offsetof(struct vmcb,
207 control.intercept_dr_write));
208 env->intercept_exceptions = ldl_phys(cs->as, env->vm_vmcb +
209 offsetof(struct vmcb,
210 control.intercept_exceptions
211 ));
212
213 /* enable intercepts */
214 env->hflags |= HF_SVMI_MASK;
215
216 env->tsc_offset = ldq_phys(cs->as, env->vm_vmcb +
217 offsetof(struct vmcb, control.tsc_offset));
218
219 env->gdt.base = ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
220 save.gdtr.base));
221 env->gdt.limit = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
222 save.gdtr.limit));
223
224 env->idt.base = ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
225 save.idtr.base));
226 env->idt.limit = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
227 save.idtr.limit));
228
229 /* clear exit_info_2 so we behave like the real hardware */
230 stq_phys(cs->as,
231 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
232
233 cpu_x86_update_cr0(env, ldq_phys(cs->as,
234 env->vm_vmcb + offsetof(struct vmcb,
235 save.cr0)));
236 cpu_x86_update_cr4(env, ldq_phys(cs->as,
237 env->vm_vmcb + offsetof(struct vmcb,
238 save.cr4)));
239 cpu_x86_update_cr3(env, ldq_phys(cs->as,
240 env->vm_vmcb + offsetof(struct vmcb,
241 save.cr3)));
242 env->cr[2] = ldq_phys(cs->as,
243 env->vm_vmcb + offsetof(struct vmcb, save.cr2));
244 int_ctl = ldl_phys(cs->as,
245 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
246 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
247 if (int_ctl & V_INTR_MASKING_MASK) {
248 env->v_tpr = int_ctl & V_TPR_MASK;
249 env->hflags2 |= HF2_VINTR_MASK;
250 if (env->eflags & IF_MASK) {
251 env->hflags2 |= HF2_HIF_MASK;
252 }
253 }
254
255 cpu_load_efer(env,
256 ldq_phys(cs->as,
257 env->vm_vmcb + offsetof(struct vmcb, save.efer)));
258 env->eflags = 0;
259 cpu_load_eflags(env, ldq_phys(cs->as,
260 env->vm_vmcb + offsetof(struct vmcb,
261 save.rflags)),
262 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
263 CC_OP = CC_OP_EFLAGS;
264
265 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
266 R_ES);
267 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
268 R_CS);
269 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
270 R_SS);
271 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
272 R_DS);
273
274 env->eip = ldq_phys(cs->as,
275 env->vm_vmcb + offsetof(struct vmcb, save.rip));
276
277 env->regs[R_ESP] = ldq_phys(cs->as,
278 env->vm_vmcb + offsetof(struct vmcb, save.rsp));
279 env->regs[R_EAX] = ldq_phys(cs->as,
280 env->vm_vmcb + offsetof(struct vmcb, save.rax));
281 env->dr[7] = ldq_phys(cs->as,
282 env->vm_vmcb + offsetof(struct vmcb, save.dr7));
283 env->dr[6] = ldq_phys(cs->as,
284 env->vm_vmcb + offsetof(struct vmcb, save.dr6));
285 cpu_x86_set_cpl(env, ldub_phys(cs->as,
286 env->vm_vmcb + offsetof(struct vmcb,
287 save.cpl)));
288
289 /* FIXME: guest state consistency checks */
290
291 switch (ldub_phys(cs->as,
292 env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
293 case TLB_CONTROL_DO_NOTHING:
294 break;
295 case TLB_CONTROL_FLUSH_ALL_ASID:
296 /* FIXME: this is not 100% correct but should work for now */
297 tlb_flush(cs, 1);
298 break;
299 }
300
301 env->hflags2 |= HF2_GIF_MASK;
302
303 if (int_ctl & V_IRQ_MASK) {
304 CPUState *cs = CPU(x86_env_get_cpu(env));
305
306 cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
307 }
308
309 /* maybe we need to inject an event */
310 event_inj = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
311 control.event_inj));
312 if (event_inj & SVM_EVTINJ_VALID) {
313 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
314 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
315 uint32_t event_inj_err = ldl_phys(cs->as, env->vm_vmcb +
316 offsetof(struct vmcb,
317 control.event_inj_err));
318
319 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
320 /* FIXME: need to implement valid_err */
321 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
322 case SVM_EVTINJ_TYPE_INTR:
323 cs->exception_index = vector;
324 env->error_code = event_inj_err;
325 env->exception_is_int = 0;
326 env->exception_next_eip = -1;
327 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
328 /* XXX: is it always correct? */
329 do_interrupt_x86_hardirq(env, vector, 1);
330 break;
331 case SVM_EVTINJ_TYPE_NMI:
332 cs->exception_index = EXCP02_NMI;
333 env->error_code = event_inj_err;
334 env->exception_is_int = 0;
335 env->exception_next_eip = env->eip;
336 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
337 cpu_loop_exit(cs);
338 break;
339 case SVM_EVTINJ_TYPE_EXEPT:
340 cs->exception_index = vector;
341 env->error_code = event_inj_err;
342 env->exception_is_int = 0;
343 env->exception_next_eip = -1;
344 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
345 cpu_loop_exit(cs);
346 break;
347 case SVM_EVTINJ_TYPE_SOFT:
348 cs->exception_index = vector;
349 env->error_code = event_inj_err;
350 env->exception_is_int = 1;
351 env->exception_next_eip = env->eip;
352 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
353 cpu_loop_exit(cs);
354 break;
355 }
356 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", cs->exception_index,
357 env->error_code);
358 }
359 }
360
361 void helper_vmmcall(CPUX86State *env)
362 {
363 cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0);
364 raise_exception(env, EXCP06_ILLOP);
365 }
366
367 void helper_vmload(CPUX86State *env, int aflag)
368 {
369 CPUState *cs = CPU(x86_env_get_cpu(env));
370 target_ulong addr;
371
372 cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0);
373
374 if (aflag == 2) {
375 addr = env->regs[R_EAX];
376 } else {
377 addr = (uint32_t)env->regs[R_EAX];
378 }
379
380 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
381 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
382 addr, ldq_phys(cs->as, addr + offsetof(struct vmcb,
383 save.fs.base)),
384 env->segs[R_FS].base);
385
386 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.fs), R_FS);
387 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.gs), R_GS);
388 svm_load_seg(env, addr + offsetof(struct vmcb, save.tr), &env->tr);
389 svm_load_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
390
391 #ifdef TARGET_X86_64
392 env->kernelgsbase = ldq_phys(cs->as, addr + offsetof(struct vmcb,
393 save.kernel_gs_base));
394 env->lstar = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.lstar));
395 env->cstar = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.cstar));
396 env->fmask = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.sfmask));
397 #endif
398 env->star = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.star));
399 env->sysenter_cs = ldq_phys(cs->as,
400 addr + offsetof(struct vmcb, save.sysenter_cs));
401 env->sysenter_esp = ldq_phys(cs->as, addr + offsetof(struct vmcb,
402 save.sysenter_esp));
403 env->sysenter_eip = ldq_phys(cs->as, addr + offsetof(struct vmcb,
404 save.sysenter_eip));
405 }
406
407 void helper_vmsave(CPUX86State *env, int aflag)
408 {
409 CPUState *cs = CPU(x86_env_get_cpu(env));
410 target_ulong addr;
411
412 cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0);
413
414 if (aflag == 2) {
415 addr = env->regs[R_EAX];
416 } else {
417 addr = (uint32_t)env->regs[R_EAX];
418 }
419
420 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
421 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
422 addr, ldq_phys(cs->as,
423 addr + offsetof(struct vmcb, save.fs.base)),
424 env->segs[R_FS].base);
425
426 svm_save_seg(env, addr + offsetof(struct vmcb, save.fs),
427 &env->segs[R_FS]);
428 svm_save_seg(env, addr + offsetof(struct vmcb, save.gs),
429 &env->segs[R_GS]);
430 svm_save_seg(env, addr + offsetof(struct vmcb, save.tr),
431 &env->tr);
432 svm_save_seg(env, addr + offsetof(struct vmcb, save.ldtr),
433 &env->ldt);
434
435 #ifdef TARGET_X86_64
436 stq_phys(cs->as, addr + offsetof(struct vmcb, save.kernel_gs_base),
437 env->kernelgsbase);
438 stq_phys(cs->as, addr + offsetof(struct vmcb, save.lstar), env->lstar);
439 stq_phys(cs->as, addr + offsetof(struct vmcb, save.cstar), env->cstar);
440 stq_phys(cs->as, addr + offsetof(struct vmcb, save.sfmask), env->fmask);
441 #endif
442 stq_phys(cs->as, addr + offsetof(struct vmcb, save.star), env->star);
443 stq_phys(cs->as,
444 addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
445 stq_phys(cs->as, addr + offsetof(struct vmcb, save.sysenter_esp),
446 env->sysenter_esp);
447 stq_phys(cs->as, addr + offsetof(struct vmcb, save.sysenter_eip),
448 env->sysenter_eip);
449 }
450
451 void helper_stgi(CPUX86State *env)
452 {
453 cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0);
454 env->hflags2 |= HF2_GIF_MASK;
455 }
456
457 void helper_clgi(CPUX86State *env)
458 {
459 cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0);
460 env->hflags2 &= ~HF2_GIF_MASK;
461 }
462
463 void helper_skinit(CPUX86State *env)
464 {
465 cpu_svm_check_intercept_param(env, SVM_EXIT_SKINIT, 0);
466 /* XXX: not implemented */
467 raise_exception(env, EXCP06_ILLOP);
468 }
469
470 void helper_invlpga(CPUX86State *env, int aflag)
471 {
472 X86CPU *cpu = x86_env_get_cpu(env);
473 target_ulong addr;
474
475 cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPGA, 0);
476
477 if (aflag == 2) {
478 addr = env->regs[R_EAX];
479 } else {
480 addr = (uint32_t)env->regs[R_EAX];
481 }
482
483 /* XXX: could use the ASID to see if it is needed to do the
484 flush */
485 tlb_flush_page(CPU(cpu), addr);
486 }
487
488 void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
489 uint64_t param)
490 {
491 CPUState *cs = CPU(x86_env_get_cpu(env));
492
493 if (likely(!(env->hflags & HF_SVMI_MASK))) {
494 return;
495 }
496 switch (type) {
497 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
498 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
499 helper_vmexit(env, type, param);
500 }
501 break;
502 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
503 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
504 helper_vmexit(env, type, param);
505 }
506 break;
507 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
508 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
509 helper_vmexit(env, type, param);
510 }
511 break;
512 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
513 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
514 helper_vmexit(env, type, param);
515 }
516 break;
517 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
518 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
519 helper_vmexit(env, type, param);
520 }
521 break;
522 case SVM_EXIT_MSR:
523 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
524 /* FIXME: this should be read in at vmrun (faster this way?) */
525 uint64_t addr = ldq_phys(cs->as, env->vm_vmcb +
526 offsetof(struct vmcb,
527 control.msrpm_base_pa));
528 uint32_t t0, t1;
529
530 switch ((uint32_t)env->regs[R_ECX]) {
531 case 0 ... 0x1fff:
532 t0 = (env->regs[R_ECX] * 2) % 8;
533 t1 = (env->regs[R_ECX] * 2) / 8;
534 break;
535 case 0xc0000000 ... 0xc0001fff:
536 t0 = (8192 + env->regs[R_ECX] - 0xc0000000) * 2;
537 t1 = (t0 / 8);
538 t0 %= 8;
539 break;
540 case 0xc0010000 ... 0xc0011fff:
541 t0 = (16384 + env->regs[R_ECX] - 0xc0010000) * 2;
542 t1 = (t0 / 8);
543 t0 %= 8;
544 break;
545 default:
546 helper_vmexit(env, type, param);
547 t0 = 0;
548 t1 = 0;
549 break;
550 }
551 if (ldub_phys(cs->as, addr + t1) & ((1 << param) << t0)) {
552 helper_vmexit(env, type, param);
553 }
554 }
555 break;
556 default:
557 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
558 helper_vmexit(env, type, param);
559 }
560 break;
561 }
562 }
563
564 void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
565 uint64_t param)
566 {
567 helper_svm_check_intercept_param(env, type, param);
568 }
569
570 void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
571 uint32_t next_eip_addend)
572 {
573 CPUState *cs = CPU(x86_env_get_cpu(env));
574
575 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
576 /* FIXME: this should be read in at vmrun (faster this way?) */
577 uint64_t addr = ldq_phys(cs->as, env->vm_vmcb +
578 offsetof(struct vmcb, control.iopm_base_pa));
579 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
580
581 if (lduw_phys(cs->as, addr + port / 8) & (mask << (port & 7))) {
582 /* next env->eip */
583 stq_phys(cs->as,
584 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
585 env->eip + next_eip_addend);
586 helper_vmexit(env, SVM_EXIT_IOIO, param | (port << 16));
587 }
588 }
589 }
590
591 /* Note: currently only 32 bits of exit_code are used */
592 void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
593 {
594 CPUState *cs = CPU(x86_env_get_cpu(env));
595 uint32_t int_ctl;
596
597 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
598 PRIx64 ", " TARGET_FMT_lx ")!\n",
599 exit_code, exit_info_1,
600 ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
601 control.exit_info_2)),
602 env->eip);
603
604 if (env->hflags & HF_INHIBIT_IRQ_MASK) {
605 stl_phys(cs->as,
606 env->vm_vmcb + offsetof(struct vmcb, control.int_state),
607 SVM_INTERRUPT_SHADOW_MASK);
608 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
609 } else {
610 stl_phys(cs->as,
611 env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
612 }
613
614 /* Save the VM state in the vmcb */
615 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
616 &env->segs[R_ES]);
617 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
618 &env->segs[R_CS]);
619 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
620 &env->segs[R_SS]);
621 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
622 &env->segs[R_DS]);
623
624 stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
625 env->gdt.base);
626 stl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
627 env->gdt.limit);
628
629 stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
630 env->idt.base);
631 stl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
632 env->idt.limit);
633
634 stq_phys(cs->as,
635 env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
636 stq_phys(cs->as,
637 env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
638 stq_phys(cs->as,
639 env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
640 stq_phys(cs->as,
641 env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
642 stq_phys(cs->as,
643 env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
644
645 int_ctl = ldl_phys(cs->as,
646 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
647 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
648 int_ctl |= env->v_tpr & V_TPR_MASK;
649 if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
650 int_ctl |= V_IRQ_MASK;
651 }
652 stl_phys(cs->as,
653 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
654
655 stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.rflags),
656 cpu_compute_eflags(env));
657 stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.rip),
658 env->eip);
659 stq_phys(cs->as,
660 env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
661 stq_phys(cs->as,
662 env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
663 stq_phys(cs->as,
664 env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
665 stq_phys(cs->as,
666 env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
667 stb_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.cpl),
668 env->hflags & HF_CPL_MASK);
669
670 /* Reload the host state from vm_hsave */
671 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
672 env->hflags &= ~HF_SVMI_MASK;
673 env->intercept = 0;
674 env->intercept_exceptions = 0;
675 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
676 env->tsc_offset = 0;
677
678 env->gdt.base = ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
679 save.gdtr.base));
680 env->gdt.limit = ldl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
681 save.gdtr.limit));
682
683 env->idt.base = ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
684 save.idtr.base));
685 env->idt.limit = ldl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
686 save.idtr.limit));
687
688 cpu_x86_update_cr0(env, ldq_phys(cs->as,
689 env->vm_hsave + offsetof(struct vmcb,
690 save.cr0)) |
691 CR0_PE_MASK);
692 cpu_x86_update_cr4(env, ldq_phys(cs->as,
693 env->vm_hsave + offsetof(struct vmcb,
694 save.cr4)));
695 cpu_x86_update_cr3(env, ldq_phys(cs->as,
696 env->vm_hsave + offsetof(struct vmcb,
697 save.cr3)));
698 /* we need to set the efer after the crs so the hidden flags get
699 set properly */
700 cpu_load_efer(env, ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
701 save.efer)));
702 env->eflags = 0;
703 cpu_load_eflags(env, ldq_phys(cs->as,
704 env->vm_hsave + offsetof(struct vmcb,
705 save.rflags)),
706 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
707 CC_OP = CC_OP_EFLAGS;
708
709 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.es),
710 R_ES);
711 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
712 R_CS);
713 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
714 R_SS);
715 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
716 R_DS);
717
718 env->eip = ldq_phys(cs->as,
719 env->vm_hsave + offsetof(struct vmcb, save.rip));
720 env->regs[R_ESP] = ldq_phys(cs->as, env->vm_hsave +
721 offsetof(struct vmcb, save.rsp));
722 env->regs[R_EAX] = ldq_phys(cs->as, env->vm_hsave +
723 offsetof(struct vmcb, save.rax));
724
725 env->dr[6] = ldq_phys(cs->as,
726 env->vm_hsave + offsetof(struct vmcb, save.dr6));
727 env->dr[7] = ldq_phys(cs->as,
728 env->vm_hsave + offsetof(struct vmcb, save.dr7));
729
730 /* other setups */
731 cpu_x86_set_cpl(env, 0);
732 stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
733 exit_code);
734 stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1),
735 exit_info_1);
736
737 stl_phys(cs->as,
738 env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
739 ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
740 control.event_inj)));
741 stl_phys(cs->as,
742 env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
743 ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
744 control.event_inj_err)));
745 stl_phys(cs->as,
746 env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
747
748 env->hflags2 &= ~HF2_GIF_MASK;
749 /* FIXME: Resets the current ASID register to zero (host ASID). */
750
751 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
752
753 /* Clears the TSC_OFFSET inside the processor. */
754
755 /* If the host is in PAE mode, the processor reloads the host's PDPEs
756 from the page table indicated the host's CR3. If the PDPEs contain
757 illegal state, the processor causes a shutdown. */
758
759 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
760 env->cr[0] |= CR0_PE_MASK;
761 env->eflags &= ~VM_MASK;
762
763 /* Disables all breakpoints in the host DR7 register. */
764
765 /* Checks the reloaded host state for consistency. */
766
767 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
768 host's code segment or non-canonical (in the case of long mode), a
769 #GP fault is delivered inside the host. */
770
771 /* remove any pending exception */
772 cs->exception_index = -1;
773 env->error_code = 0;
774 env->old_exception = -1;
775
776 cpu_loop_exit(cs);
777 }
778
779 void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
780 {
781 helper_vmexit(env, exit_code, exit_info_1);
782 }
783
784 #endif