]> git.proxmox.com Git - mirror_qemu.git/blob - target-i386/svm_helper.c
Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20140513' into...
[mirror_qemu.git] / target-i386 / svm_helper.c
1 /*
2 * x86 SVM helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "cpu.h"
21 #include "exec/cpu-all.h"
22 #include "helper.h"
23
24 #if !defined(CONFIG_USER_ONLY)
25 #include "exec/softmmu_exec.h"
26 #endif /* !defined(CONFIG_USER_ONLY) */
27
28 /* Secure Virtual Machine helpers */
29
30 #if defined(CONFIG_USER_ONLY)
31
32 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
33 {
34 }
35
36 void helper_vmmcall(CPUX86State *env)
37 {
38 }
39
40 void helper_vmload(CPUX86State *env, int aflag)
41 {
42 }
43
44 void helper_vmsave(CPUX86State *env, int aflag)
45 {
46 }
47
48 void helper_stgi(CPUX86State *env)
49 {
50 }
51
52 void helper_clgi(CPUX86State *env)
53 {
54 }
55
56 void helper_skinit(CPUX86State *env)
57 {
58 }
59
60 void helper_invlpga(CPUX86State *env, int aflag)
61 {
62 }
63
64 void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
65 {
66 }
67
68 void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1)
69 {
70 }
71
72 void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
73 uint64_t param)
74 {
75 }
76
77 void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
78 uint64_t param)
79 {
80 }
81
82 void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
83 uint32_t next_eip_addend)
84 {
85 }
86 #else
87
88 static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
89 const SegmentCache *sc)
90 {
91 CPUState *cs = CPU(x86_env_get_cpu(env));
92
93 stw_phys(cs->as, addr + offsetof(struct vmcb_seg, selector),
94 sc->selector);
95 stq_phys(cs->as, addr + offsetof(struct vmcb_seg, base),
96 sc->base);
97 stl_phys(cs->as, addr + offsetof(struct vmcb_seg, limit),
98 sc->limit);
99 stw_phys(cs->as, addr + offsetof(struct vmcb_seg, attrib),
100 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
101 }
102
103 static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
104 SegmentCache *sc)
105 {
106 CPUState *cs = CPU(x86_env_get_cpu(env));
107 unsigned int flags;
108
109 sc->selector = lduw_phys(cs->as,
110 addr + offsetof(struct vmcb_seg, selector));
111 sc->base = ldq_phys(cs->as, addr + offsetof(struct vmcb_seg, base));
112 sc->limit = ldl_phys(cs->as, addr + offsetof(struct vmcb_seg, limit));
113 flags = lduw_phys(cs->as, addr + offsetof(struct vmcb_seg, attrib));
114 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
115 }
116
117 static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
118 int seg_reg)
119 {
120 SegmentCache sc1, *sc = &sc1;
121
122 svm_load_seg(env, addr, sc);
123 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
124 sc->base, sc->limit, sc->flags);
125 }
126
127 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
128 {
129 CPUState *cs = CPU(x86_env_get_cpu(env));
130 target_ulong addr;
131 uint32_t event_inj;
132 uint32_t int_ctl;
133
134 cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0);
135
136 if (aflag == 2) {
137 addr = env->regs[R_EAX];
138 } else {
139 addr = (uint32_t)env->regs[R_EAX];
140 }
141
142 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
143
144 env->vm_vmcb = addr;
145
146 /* save the current CPU state in the hsave page */
147 stq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
148 env->gdt.base);
149 stl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
150 env->gdt.limit);
151
152 stq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
153 env->idt.base);
154 stl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
155 env->idt.limit);
156
157 stq_phys(cs->as,
158 env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
159 stq_phys(cs->as,
160 env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
161 stq_phys(cs->as,
162 env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
163 stq_phys(cs->as,
164 env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
165 stq_phys(cs->as,
166 env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
167 stq_phys(cs->as,
168 env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
169
170 stq_phys(cs->as,
171 env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
172 stq_phys(cs->as,
173 env->vm_hsave + offsetof(struct vmcb, save.rflags),
174 cpu_compute_eflags(env));
175
176 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.es),
177 &env->segs[R_ES]);
178 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
179 &env->segs[R_CS]);
180 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
181 &env->segs[R_SS]);
182 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
183 &env->segs[R_DS]);
184
185 stq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.rip),
186 env->eip + next_eip_addend);
187 stq_phys(cs->as,
188 env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
189 stq_phys(cs->as,
190 env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
191
192 /* load the interception bitmaps so we do not need to access the
193 vmcb in svm mode */
194 env->intercept = ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
195 control.intercept));
196 env->intercept_cr_read = lduw_phys(cs->as, env->vm_vmcb +
197 offsetof(struct vmcb,
198 control.intercept_cr_read));
199 env->intercept_cr_write = lduw_phys(cs->as, env->vm_vmcb +
200 offsetof(struct vmcb,
201 control.intercept_cr_write));
202 env->intercept_dr_read = lduw_phys(cs->as, env->vm_vmcb +
203 offsetof(struct vmcb,
204 control.intercept_dr_read));
205 env->intercept_dr_write = lduw_phys(cs->as, env->vm_vmcb +
206 offsetof(struct vmcb,
207 control.intercept_dr_write));
208 env->intercept_exceptions = ldl_phys(cs->as, env->vm_vmcb +
209 offsetof(struct vmcb,
210 control.intercept_exceptions
211 ));
212
213 /* enable intercepts */
214 env->hflags |= HF_SVMI_MASK;
215
216 env->tsc_offset = ldq_phys(cs->as, env->vm_vmcb +
217 offsetof(struct vmcb, control.tsc_offset));
218
219 env->gdt.base = ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
220 save.gdtr.base));
221 env->gdt.limit = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
222 save.gdtr.limit));
223
224 env->idt.base = ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
225 save.idtr.base));
226 env->idt.limit = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
227 save.idtr.limit));
228
229 /* clear exit_info_2 so we behave like the real hardware */
230 stq_phys(cs->as,
231 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
232
233 cpu_x86_update_cr0(env, ldq_phys(cs->as,
234 env->vm_vmcb + offsetof(struct vmcb,
235 save.cr0)));
236 cpu_x86_update_cr4(env, ldq_phys(cs->as,
237 env->vm_vmcb + offsetof(struct vmcb,
238 save.cr4)));
239 cpu_x86_update_cr3(env, ldq_phys(cs->as,
240 env->vm_vmcb + offsetof(struct vmcb,
241 save.cr3)));
242 env->cr[2] = ldq_phys(cs->as,
243 env->vm_vmcb + offsetof(struct vmcb, save.cr2));
244 int_ctl = ldl_phys(cs->as,
245 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
246 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
247 if (int_ctl & V_INTR_MASKING_MASK) {
248 env->v_tpr = int_ctl & V_TPR_MASK;
249 env->hflags2 |= HF2_VINTR_MASK;
250 if (env->eflags & IF_MASK) {
251 env->hflags2 |= HF2_HIF_MASK;
252 }
253 }
254
255 cpu_load_efer(env,
256 ldq_phys(cs->as,
257 env->vm_vmcb + offsetof(struct vmcb, save.efer)));
258 env->eflags = 0;
259 cpu_load_eflags(env, ldq_phys(cs->as,
260 env->vm_vmcb + offsetof(struct vmcb,
261 save.rflags)),
262 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
263 CC_OP = CC_OP_EFLAGS;
264
265 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
266 R_ES);
267 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
268 R_CS);
269 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
270 R_SS);
271 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
272 R_DS);
273
274 env->eip = ldq_phys(cs->as,
275 env->vm_vmcb + offsetof(struct vmcb, save.rip));
276
277 env->regs[R_ESP] = ldq_phys(cs->as,
278 env->vm_vmcb + offsetof(struct vmcb, save.rsp));
279 env->regs[R_EAX] = ldq_phys(cs->as,
280 env->vm_vmcb + offsetof(struct vmcb, save.rax));
281 env->dr[7] = ldq_phys(cs->as,
282 env->vm_vmcb + offsetof(struct vmcb, save.dr7));
283 env->dr[6] = ldq_phys(cs->as,
284 env->vm_vmcb + offsetof(struct vmcb, save.dr6));
285
286 /* FIXME: guest state consistency checks */
287
288 switch (ldub_phys(cs->as,
289 env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
290 case TLB_CONTROL_DO_NOTHING:
291 break;
292 case TLB_CONTROL_FLUSH_ALL_ASID:
293 /* FIXME: this is not 100% correct but should work for now */
294 tlb_flush(cs, 1);
295 break;
296 }
297
298 env->hflags2 |= HF2_GIF_MASK;
299
300 if (int_ctl & V_IRQ_MASK) {
301 CPUState *cs = CPU(x86_env_get_cpu(env));
302
303 cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
304 }
305
306 /* maybe we need to inject an event */
307 event_inj = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
308 control.event_inj));
309 if (event_inj & SVM_EVTINJ_VALID) {
310 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
311 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
312 uint32_t event_inj_err = ldl_phys(cs->as, env->vm_vmcb +
313 offsetof(struct vmcb,
314 control.event_inj_err));
315
316 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
317 /* FIXME: need to implement valid_err */
318 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
319 case SVM_EVTINJ_TYPE_INTR:
320 cs->exception_index = vector;
321 env->error_code = event_inj_err;
322 env->exception_is_int = 0;
323 env->exception_next_eip = -1;
324 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
325 /* XXX: is it always correct? */
326 do_interrupt_x86_hardirq(env, vector, 1);
327 break;
328 case SVM_EVTINJ_TYPE_NMI:
329 cs->exception_index = EXCP02_NMI;
330 env->error_code = event_inj_err;
331 env->exception_is_int = 0;
332 env->exception_next_eip = env->eip;
333 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
334 cpu_loop_exit(cs);
335 break;
336 case SVM_EVTINJ_TYPE_EXEPT:
337 cs->exception_index = vector;
338 env->error_code = event_inj_err;
339 env->exception_is_int = 0;
340 env->exception_next_eip = -1;
341 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
342 cpu_loop_exit(cs);
343 break;
344 case SVM_EVTINJ_TYPE_SOFT:
345 cs->exception_index = vector;
346 env->error_code = event_inj_err;
347 env->exception_is_int = 1;
348 env->exception_next_eip = env->eip;
349 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
350 cpu_loop_exit(cs);
351 break;
352 }
353 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", cs->exception_index,
354 env->error_code);
355 }
356 }
357
358 void helper_vmmcall(CPUX86State *env)
359 {
360 cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0);
361 raise_exception(env, EXCP06_ILLOP);
362 }
363
364 void helper_vmload(CPUX86State *env, int aflag)
365 {
366 CPUState *cs = CPU(x86_env_get_cpu(env));
367 target_ulong addr;
368
369 cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0);
370
371 if (aflag == 2) {
372 addr = env->regs[R_EAX];
373 } else {
374 addr = (uint32_t)env->regs[R_EAX];
375 }
376
377 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
378 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
379 addr, ldq_phys(cs->as, addr + offsetof(struct vmcb,
380 save.fs.base)),
381 env->segs[R_FS].base);
382
383 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.fs), R_FS);
384 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.gs), R_GS);
385 svm_load_seg(env, addr + offsetof(struct vmcb, save.tr), &env->tr);
386 svm_load_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
387
388 #ifdef TARGET_X86_64
389 env->kernelgsbase = ldq_phys(cs->as, addr + offsetof(struct vmcb,
390 save.kernel_gs_base));
391 env->lstar = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.lstar));
392 env->cstar = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.cstar));
393 env->fmask = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.sfmask));
394 #endif
395 env->star = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.star));
396 env->sysenter_cs = ldq_phys(cs->as,
397 addr + offsetof(struct vmcb, save.sysenter_cs));
398 env->sysenter_esp = ldq_phys(cs->as, addr + offsetof(struct vmcb,
399 save.sysenter_esp));
400 env->sysenter_eip = ldq_phys(cs->as, addr + offsetof(struct vmcb,
401 save.sysenter_eip));
402 }
403
404 void helper_vmsave(CPUX86State *env, int aflag)
405 {
406 CPUState *cs = CPU(x86_env_get_cpu(env));
407 target_ulong addr;
408
409 cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0);
410
411 if (aflag == 2) {
412 addr = env->regs[R_EAX];
413 } else {
414 addr = (uint32_t)env->regs[R_EAX];
415 }
416
417 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
418 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
419 addr, ldq_phys(cs->as,
420 addr + offsetof(struct vmcb, save.fs.base)),
421 env->segs[R_FS].base);
422
423 svm_save_seg(env, addr + offsetof(struct vmcb, save.fs),
424 &env->segs[R_FS]);
425 svm_save_seg(env, addr + offsetof(struct vmcb, save.gs),
426 &env->segs[R_GS]);
427 svm_save_seg(env, addr + offsetof(struct vmcb, save.tr),
428 &env->tr);
429 svm_save_seg(env, addr + offsetof(struct vmcb, save.ldtr),
430 &env->ldt);
431
432 #ifdef TARGET_X86_64
433 stq_phys(cs->as, addr + offsetof(struct vmcb, save.kernel_gs_base),
434 env->kernelgsbase);
435 stq_phys(cs->as, addr + offsetof(struct vmcb, save.lstar), env->lstar);
436 stq_phys(cs->as, addr + offsetof(struct vmcb, save.cstar), env->cstar);
437 stq_phys(cs->as, addr + offsetof(struct vmcb, save.sfmask), env->fmask);
438 #endif
439 stq_phys(cs->as, addr + offsetof(struct vmcb, save.star), env->star);
440 stq_phys(cs->as,
441 addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
442 stq_phys(cs->as, addr + offsetof(struct vmcb, save.sysenter_esp),
443 env->sysenter_esp);
444 stq_phys(cs->as, addr + offsetof(struct vmcb, save.sysenter_eip),
445 env->sysenter_eip);
446 }
447
448 void helper_stgi(CPUX86State *env)
449 {
450 cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0);
451 env->hflags2 |= HF2_GIF_MASK;
452 }
453
454 void helper_clgi(CPUX86State *env)
455 {
456 cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0);
457 env->hflags2 &= ~HF2_GIF_MASK;
458 }
459
460 void helper_skinit(CPUX86State *env)
461 {
462 cpu_svm_check_intercept_param(env, SVM_EXIT_SKINIT, 0);
463 /* XXX: not implemented */
464 raise_exception(env, EXCP06_ILLOP);
465 }
466
467 void helper_invlpga(CPUX86State *env, int aflag)
468 {
469 X86CPU *cpu = x86_env_get_cpu(env);
470 target_ulong addr;
471
472 cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPGA, 0);
473
474 if (aflag == 2) {
475 addr = env->regs[R_EAX];
476 } else {
477 addr = (uint32_t)env->regs[R_EAX];
478 }
479
480 /* XXX: could use the ASID to see if it is needed to do the
481 flush */
482 tlb_flush_page(CPU(cpu), addr);
483 }
484
485 void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
486 uint64_t param)
487 {
488 CPUState *cs = CPU(x86_env_get_cpu(env));
489
490 if (likely(!(env->hflags & HF_SVMI_MASK))) {
491 return;
492 }
493 switch (type) {
494 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
495 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
496 helper_vmexit(env, type, param);
497 }
498 break;
499 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
500 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
501 helper_vmexit(env, type, param);
502 }
503 break;
504 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
505 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
506 helper_vmexit(env, type, param);
507 }
508 break;
509 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
510 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
511 helper_vmexit(env, type, param);
512 }
513 break;
514 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
515 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
516 helper_vmexit(env, type, param);
517 }
518 break;
519 case SVM_EXIT_MSR:
520 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
521 /* FIXME: this should be read in at vmrun (faster this way?) */
522 uint64_t addr = ldq_phys(cs->as, env->vm_vmcb +
523 offsetof(struct vmcb,
524 control.msrpm_base_pa));
525 uint32_t t0, t1;
526
527 switch ((uint32_t)env->regs[R_ECX]) {
528 case 0 ... 0x1fff:
529 t0 = (env->regs[R_ECX] * 2) % 8;
530 t1 = (env->regs[R_ECX] * 2) / 8;
531 break;
532 case 0xc0000000 ... 0xc0001fff:
533 t0 = (8192 + env->regs[R_ECX] - 0xc0000000) * 2;
534 t1 = (t0 / 8);
535 t0 %= 8;
536 break;
537 case 0xc0010000 ... 0xc0011fff:
538 t0 = (16384 + env->regs[R_ECX] - 0xc0010000) * 2;
539 t1 = (t0 / 8);
540 t0 %= 8;
541 break;
542 default:
543 helper_vmexit(env, type, param);
544 t0 = 0;
545 t1 = 0;
546 break;
547 }
548 if (ldub_phys(cs->as, addr + t1) & ((1 << param) << t0)) {
549 helper_vmexit(env, type, param);
550 }
551 }
552 break;
553 default:
554 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
555 helper_vmexit(env, type, param);
556 }
557 break;
558 }
559 }
560
561 void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
562 uint64_t param)
563 {
564 helper_svm_check_intercept_param(env, type, param);
565 }
566
567 void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
568 uint32_t next_eip_addend)
569 {
570 CPUState *cs = CPU(x86_env_get_cpu(env));
571
572 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
573 /* FIXME: this should be read in at vmrun (faster this way?) */
574 uint64_t addr = ldq_phys(cs->as, env->vm_vmcb +
575 offsetof(struct vmcb, control.iopm_base_pa));
576 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
577
578 if (lduw_phys(cs->as, addr + port / 8) & (mask << (port & 7))) {
579 /* next env->eip */
580 stq_phys(cs->as,
581 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
582 env->eip + next_eip_addend);
583 helper_vmexit(env, SVM_EXIT_IOIO, param | (port << 16));
584 }
585 }
586 }
587
588 /* Note: currently only 32 bits of exit_code are used */
589 void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
590 {
591 CPUState *cs = CPU(x86_env_get_cpu(env));
592 uint32_t int_ctl;
593
594 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
595 PRIx64 ", " TARGET_FMT_lx ")!\n",
596 exit_code, exit_info_1,
597 ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
598 control.exit_info_2)),
599 env->eip);
600
601 if (env->hflags & HF_INHIBIT_IRQ_MASK) {
602 stl_phys(cs->as,
603 env->vm_vmcb + offsetof(struct vmcb, control.int_state),
604 SVM_INTERRUPT_SHADOW_MASK);
605 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
606 } else {
607 stl_phys(cs->as,
608 env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
609 }
610
611 /* Save the VM state in the vmcb */
612 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
613 &env->segs[R_ES]);
614 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
615 &env->segs[R_CS]);
616 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
617 &env->segs[R_SS]);
618 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
619 &env->segs[R_DS]);
620
621 stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
622 env->gdt.base);
623 stl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
624 env->gdt.limit);
625
626 stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
627 env->idt.base);
628 stl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
629 env->idt.limit);
630
631 stq_phys(cs->as,
632 env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
633 stq_phys(cs->as,
634 env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
635 stq_phys(cs->as,
636 env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
637 stq_phys(cs->as,
638 env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
639 stq_phys(cs->as,
640 env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
641
642 int_ctl = ldl_phys(cs->as,
643 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
644 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
645 int_ctl |= env->v_tpr & V_TPR_MASK;
646 if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
647 int_ctl |= V_IRQ_MASK;
648 }
649 stl_phys(cs->as,
650 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
651
652 stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.rflags),
653 cpu_compute_eflags(env));
654 stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.rip),
655 env->eip);
656 stq_phys(cs->as,
657 env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
658 stq_phys(cs->as,
659 env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
660 stq_phys(cs->as,
661 env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
662 stq_phys(cs->as,
663 env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
664 stb_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.cpl),
665 env->hflags & HF_CPL_MASK);
666
667 /* Reload the host state from vm_hsave */
668 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
669 env->hflags &= ~HF_SVMI_MASK;
670 env->intercept = 0;
671 env->intercept_exceptions = 0;
672 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
673 env->tsc_offset = 0;
674
675 env->gdt.base = ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
676 save.gdtr.base));
677 env->gdt.limit = ldl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
678 save.gdtr.limit));
679
680 env->idt.base = ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
681 save.idtr.base));
682 env->idt.limit = ldl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
683 save.idtr.limit));
684
685 cpu_x86_update_cr0(env, ldq_phys(cs->as,
686 env->vm_hsave + offsetof(struct vmcb,
687 save.cr0)) |
688 CR0_PE_MASK);
689 cpu_x86_update_cr4(env, ldq_phys(cs->as,
690 env->vm_hsave + offsetof(struct vmcb,
691 save.cr4)));
692 cpu_x86_update_cr3(env, ldq_phys(cs->as,
693 env->vm_hsave + offsetof(struct vmcb,
694 save.cr3)));
695 /* we need to set the efer after the crs so the hidden flags get
696 set properly */
697 cpu_load_efer(env, ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
698 save.efer)));
699 env->eflags = 0;
700 cpu_load_eflags(env, ldq_phys(cs->as,
701 env->vm_hsave + offsetof(struct vmcb,
702 save.rflags)),
703 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK |
704 VM_MASK));
705 CC_OP = CC_OP_EFLAGS;
706
707 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.es),
708 R_ES);
709 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
710 R_CS);
711 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
712 R_SS);
713 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
714 R_DS);
715
716 env->eip = ldq_phys(cs->as,
717 env->vm_hsave + offsetof(struct vmcb, save.rip));
718 env->regs[R_ESP] = ldq_phys(cs->as, env->vm_hsave +
719 offsetof(struct vmcb, save.rsp));
720 env->regs[R_EAX] = ldq_phys(cs->as, env->vm_hsave +
721 offsetof(struct vmcb, save.rax));
722
723 env->dr[6] = ldq_phys(cs->as,
724 env->vm_hsave + offsetof(struct vmcb, save.dr6));
725 env->dr[7] = ldq_phys(cs->as,
726 env->vm_hsave + offsetof(struct vmcb, save.dr7));
727
728 /* other setups */
729 stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
730 exit_code);
731 stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1),
732 exit_info_1);
733
734 stl_phys(cs->as,
735 env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
736 ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
737 control.event_inj)));
738 stl_phys(cs->as,
739 env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
740 ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
741 control.event_inj_err)));
742 stl_phys(cs->as,
743 env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
744
745 env->hflags2 &= ~HF2_GIF_MASK;
746 /* FIXME: Resets the current ASID register to zero (host ASID). */
747
748 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
749
750 /* Clears the TSC_OFFSET inside the processor. */
751
752 /* If the host is in PAE mode, the processor reloads the host's PDPEs
753 from the page table indicated the host's CR3. If the PDPEs contain
754 illegal state, the processor causes a shutdown. */
755
756 /* Disables all breakpoints in the host DR7 register. */
757
758 /* Checks the reloaded host state for consistency. */
759
760 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
761 host's code segment or non-canonical (in the case of long mode), a
762 #GP fault is delivered inside the host. */
763
764 /* remove any pending exception */
765 cs->exception_index = -1;
766 env->error_code = 0;
767 env->old_exception = -1;
768
769 cpu_loop_exit(cs);
770 }
771
772 void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
773 {
774 helper_vmexit(env, exit_code, exit_info_1);
775 }
776
777 #endif