]> git.proxmox.com Git - mirror_qemu.git/blame - target/i386/svm_helper.c
usb-ccid: better bulk_out error handling
[mirror_qemu.git] / target / i386 / svm_helper.c
CommitLineData
6bada5e8
BS
1/*
2 * x86 SVM helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
b6a0aa05 20#include "qemu/osdep.h"
6bada5e8 21#include "cpu.h"
022c62cb 22#include "exec/cpu-all.h"
2ef6175a 23#include "exec/helper-proto.h"
63c91552 24#include "exec/exec-all.h"
f08b6170 25#include "exec/cpu_ldst.h"
92fc4b58 26
6bada5e8
BS
27/* Secure Virtual Machine helpers */
28
29#if defined(CONFIG_USER_ONLY)
30
052e80d5 31void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
6bada5e8
BS
32{
33}
34
052e80d5 35void helper_vmmcall(CPUX86State *env)
6bada5e8
BS
36{
37}
38
052e80d5 39void helper_vmload(CPUX86State *env, int aflag)
6bada5e8
BS
40{
41}
42
052e80d5 43void helper_vmsave(CPUX86State *env, int aflag)
6bada5e8
BS
44{
45}
46
052e80d5 47void helper_stgi(CPUX86State *env)
6bada5e8
BS
48{
49}
50
052e80d5 51void helper_clgi(CPUX86State *env)
6bada5e8
BS
52{
53}
54
052e80d5 55void helper_skinit(CPUX86State *env)
6bada5e8
BS
56{
57}
58
052e80d5 59void helper_invlpga(CPUX86State *env, int aflag)
6bada5e8
BS
60{
61}
62
65c9d60a
PB
63void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1,
64 uintptr_t retaddr)
6bada5e8
BS
65{
66}
67
052e80d5
BS
68void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
69 uint64_t param)
6bada5e8
BS
70{
71}
72
73void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
65c9d60a 74 uint64_t param, uintptr_t retaddr)
6bada5e8
BS
75{
76}
77
052e80d5 78void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
6bada5e8
BS
79 uint32_t next_eip_addend)
80{
81}
82#else
83
a8170e5e 84static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
6bada5e8
BS
85 const SegmentCache *sc)
86{
19d6ca16
AF
87 CPUState *cs = CPU(x86_env_get_cpu(env));
88
b216aa6c 89 x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, selector),
6bada5e8 90 sc->selector);
b216aa6c 91 x86_stq_phys(cs, addr + offsetof(struct vmcb_seg, base),
6bada5e8 92 sc->base);
b216aa6c 93 x86_stl_phys(cs, addr + offsetof(struct vmcb_seg, limit),
6bada5e8 94 sc->limit);
b216aa6c 95 x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, attrib),
6bada5e8
BS
96 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
97}
98
a8170e5e 99static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
052e80d5 100 SegmentCache *sc)
6bada5e8 101{
19d6ca16 102 CPUState *cs = CPU(x86_env_get_cpu(env));
6bada5e8
BS
103 unsigned int flags;
104
b216aa6c 105 sc->selector = x86_lduw_phys(cs,
41701aa4 106 addr + offsetof(struct vmcb_seg, selector));
b216aa6c
PB
107 sc->base = x86_ldq_phys(cs, addr + offsetof(struct vmcb_seg, base));
108 sc->limit = x86_ldl_phys(cs, addr + offsetof(struct vmcb_seg, limit));
109 flags = x86_lduw_phys(cs, addr + offsetof(struct vmcb_seg, attrib));
6bada5e8
BS
110 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
111}
112
a8170e5e 113static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
052e80d5 114 int seg_reg)
6bada5e8
BS
115{
116 SegmentCache sc1, *sc = &sc1;
117
052e80d5 118 svm_load_seg(env, addr, sc);
6bada5e8
BS
119 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
120 sc->base, sc->limit, sc->flags);
121}
122
052e80d5 123void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
6bada5e8 124{
19d6ca16 125 CPUState *cs = CPU(x86_env_get_cpu(env));
6bada5e8
BS
126 target_ulong addr;
127 uint32_t event_inj;
128 uint32_t int_ctl;
129
65c9d60a 130 cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0, GETPC());
6bada5e8
BS
131
132 if (aflag == 2) {
4b34e3ad 133 addr = env->regs[R_EAX];
6bada5e8 134 } else {
4b34e3ad 135 addr = (uint32_t)env->regs[R_EAX];
6bada5e8
BS
136 }
137
138 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
139
140 env->vm_vmcb = addr;
141
142 /* save the current CPU state in the hsave page */
b216aa6c 143 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
6bada5e8 144 env->gdt.base);
b216aa6c 145 x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
6bada5e8
BS
146 env->gdt.limit);
147
b216aa6c 148 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
6bada5e8 149 env->idt.base);
b216aa6c 150 x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
6bada5e8
BS
151 env->idt.limit);
152
b216aa6c 153 x86_stq_phys(cs,
f606604f 154 env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
b216aa6c 155 x86_stq_phys(cs,
f606604f 156 env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
b216aa6c 157 x86_stq_phys(cs,
f606604f 158 env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
b216aa6c 159 x86_stq_phys(cs,
f606604f 160 env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
b216aa6c 161 x86_stq_phys(cs,
f606604f 162 env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
b216aa6c 163 x86_stq_phys(cs,
f606604f
EI
164 env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
165
b216aa6c 166 x86_stq_phys(cs,
f606604f 167 env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
b216aa6c 168 x86_stq_phys(cs,
f606604f 169 env->vm_hsave + offsetof(struct vmcb, save.rflags),
6bada5e8
BS
170 cpu_compute_eflags(env));
171
052e80d5 172 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.es),
6bada5e8 173 &env->segs[R_ES]);
052e80d5 174 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
6bada5e8 175 &env->segs[R_CS]);
052e80d5 176 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
6bada5e8 177 &env->segs[R_SS]);
052e80d5 178 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
6bada5e8
BS
179 &env->segs[R_DS]);
180
b216aa6c 181 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rip),
a78d0eab 182 env->eip + next_eip_addend);
b216aa6c 183 x86_stq_phys(cs,
f606604f 184 env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
b216aa6c 185 x86_stq_phys(cs,
f606604f 186 env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
6bada5e8
BS
187
188 /* load the interception bitmaps so we do not need to access the
189 vmcb in svm mode */
b216aa6c 190 env->intercept = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
6bada5e8 191 control.intercept));
b216aa6c 192 env->intercept_cr_read = x86_lduw_phys(cs, env->vm_vmcb +
6bada5e8
BS
193 offsetof(struct vmcb,
194 control.intercept_cr_read));
b216aa6c 195 env->intercept_cr_write = x86_lduw_phys(cs, env->vm_vmcb +
6bada5e8
BS
196 offsetof(struct vmcb,
197 control.intercept_cr_write));
b216aa6c 198 env->intercept_dr_read = x86_lduw_phys(cs, env->vm_vmcb +
6bada5e8
BS
199 offsetof(struct vmcb,
200 control.intercept_dr_read));
b216aa6c 201 env->intercept_dr_write = x86_lduw_phys(cs, env->vm_vmcb +
6bada5e8
BS
202 offsetof(struct vmcb,
203 control.intercept_dr_write));
b216aa6c 204 env->intercept_exceptions = x86_ldl_phys(cs, env->vm_vmcb +
6bada5e8
BS
205 offsetof(struct vmcb,
206 control.intercept_exceptions
207 ));
208
209 /* enable intercepts */
210 env->hflags |= HF_SVMI_MASK;
211
b216aa6c 212 env->tsc_offset = x86_ldq_phys(cs, env->vm_vmcb +
6bada5e8
BS
213 offsetof(struct vmcb, control.tsc_offset));
214
b216aa6c 215 env->gdt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
6bada5e8 216 save.gdtr.base));
b216aa6c 217 env->gdt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
6bada5e8
BS
218 save.gdtr.limit));
219
b216aa6c 220 env->idt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
6bada5e8 221 save.idtr.base));
b216aa6c 222 env->idt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
6bada5e8
BS
223 save.idtr.limit));
224
225 /* clear exit_info_2 so we behave like the real hardware */
b216aa6c 226 x86_stq_phys(cs,
f606604f 227 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
6bada5e8 228
b216aa6c 229 cpu_x86_update_cr0(env, x86_ldq_phys(cs,
2c17449b 230 env->vm_vmcb + offsetof(struct vmcb,
6bada5e8 231 save.cr0)));
b216aa6c 232 cpu_x86_update_cr4(env, x86_ldq_phys(cs,
2c17449b 233 env->vm_vmcb + offsetof(struct vmcb,
6bada5e8 234 save.cr4)));
b216aa6c 235 cpu_x86_update_cr3(env, x86_ldq_phys(cs,
2c17449b 236 env->vm_vmcb + offsetof(struct vmcb,
6bada5e8 237 save.cr3)));
b216aa6c 238 env->cr[2] = x86_ldq_phys(cs,
2c17449b 239 env->vm_vmcb + offsetof(struct vmcb, save.cr2));
b216aa6c 240 int_ctl = x86_ldl_phys(cs,
fdfba1a2 241 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6bada5e8
BS
242 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
243 if (int_ctl & V_INTR_MASKING_MASK) {
244 env->v_tpr = int_ctl & V_TPR_MASK;
245 env->hflags2 |= HF2_VINTR_MASK;
246 if (env->eflags & IF_MASK) {
247 env->hflags2 |= HF2_HIF_MASK;
248 }
249 }
250
251 cpu_load_efer(env,
b216aa6c 252 x86_ldq_phys(cs,
2c17449b 253 env->vm_vmcb + offsetof(struct vmcb, save.efer)));
6bada5e8 254 env->eflags = 0;
b216aa6c 255 cpu_load_eflags(env, x86_ldq_phys(cs,
2c17449b 256 env->vm_vmcb + offsetof(struct vmcb,
6bada5e8
BS
257 save.rflags)),
258 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6bada5e8 259
052e80d5
BS
260 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
261 R_ES);
262 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
263 R_CS);
264 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
265 R_SS);
266 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
267 R_DS);
6bada5e8 268
b216aa6c 269 env->eip = x86_ldq_phys(cs,
2c17449b
EI
270 env->vm_vmcb + offsetof(struct vmcb, save.rip));
271
b216aa6c 272 env->regs[R_ESP] = x86_ldq_phys(cs,
2c17449b 273 env->vm_vmcb + offsetof(struct vmcb, save.rsp));
b216aa6c 274 env->regs[R_EAX] = x86_ldq_phys(cs,
2c17449b 275 env->vm_vmcb + offsetof(struct vmcb, save.rax));
b216aa6c 276 env->dr[7] = x86_ldq_phys(cs,
2c17449b 277 env->vm_vmcb + offsetof(struct vmcb, save.dr7));
b216aa6c 278 env->dr[6] = x86_ldq_phys(cs,
2c17449b 279 env->vm_vmcb + offsetof(struct vmcb, save.dr6));
6bada5e8
BS
280
281 /* FIXME: guest state consistency checks */
282
b216aa6c 283 switch (x86_ldub_phys(cs,
2c17449b 284 env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
6bada5e8
BS
285 case TLB_CONTROL_DO_NOTHING:
286 break;
287 case TLB_CONTROL_FLUSH_ALL_ASID:
288 /* FIXME: this is not 100% correct but should work for now */
d10eb08f 289 tlb_flush(cs);
6bada5e8
BS
290 break;
291 }
292
293 env->hflags2 |= HF2_GIF_MASK;
294
295 if (int_ctl & V_IRQ_MASK) {
259186a7
AF
296 CPUState *cs = CPU(x86_env_get_cpu(env));
297
298 cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
6bada5e8
BS
299 }
300
301 /* maybe we need to inject an event */
b216aa6c 302 event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
6bada5e8
BS
303 control.event_inj));
304 if (event_inj & SVM_EVTINJ_VALID) {
305 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
306 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
b216aa6c 307 uint32_t event_inj_err = x86_ldl_phys(cs, env->vm_vmcb +
6bada5e8
BS
308 offsetof(struct vmcb,
309 control.event_inj_err));
310
311 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
312 /* FIXME: need to implement valid_err */
313 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
314 case SVM_EVTINJ_TYPE_INTR:
27103424 315 cs->exception_index = vector;
6bada5e8
BS
316 env->error_code = event_inj_err;
317 env->exception_is_int = 0;
318 env->exception_next_eip = -1;
319 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
320 /* XXX: is it always correct? */
321 do_interrupt_x86_hardirq(env, vector, 1);
322 break;
323 case SVM_EVTINJ_TYPE_NMI:
27103424 324 cs->exception_index = EXCP02_NMI;
6bada5e8
BS
325 env->error_code = event_inj_err;
326 env->exception_is_int = 0;
a78d0eab 327 env->exception_next_eip = env->eip;
6bada5e8 328 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
5638d180 329 cpu_loop_exit(cs);
6bada5e8
BS
330 break;
331 case SVM_EVTINJ_TYPE_EXEPT:
27103424 332 cs->exception_index = vector;
6bada5e8
BS
333 env->error_code = event_inj_err;
334 env->exception_is_int = 0;
335 env->exception_next_eip = -1;
336 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
5638d180 337 cpu_loop_exit(cs);
6bada5e8
BS
338 break;
339 case SVM_EVTINJ_TYPE_SOFT:
27103424 340 cs->exception_index = vector;
6bada5e8
BS
341 env->error_code = event_inj_err;
342 env->exception_is_int = 1;
a78d0eab 343 env->exception_next_eip = env->eip;
6bada5e8 344 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
5638d180 345 cpu_loop_exit(cs);
6bada5e8
BS
346 break;
347 }
27103424 348 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", cs->exception_index,
6bada5e8
BS
349 env->error_code);
350 }
351}
352
052e80d5 353void helper_vmmcall(CPUX86State *env)
6bada5e8 354{
65c9d60a 355 cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0, GETPC());
6bada5e8
BS
356 raise_exception(env, EXCP06_ILLOP);
357}
358
052e80d5 359void helper_vmload(CPUX86State *env, int aflag)
6bada5e8 360{
19d6ca16 361 CPUState *cs = CPU(x86_env_get_cpu(env));
6bada5e8
BS
362 target_ulong addr;
363
65c9d60a 364 cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0, GETPC());
6bada5e8
BS
365
366 if (aflag == 2) {
4b34e3ad 367 addr = env->regs[R_EAX];
6bada5e8 368 } else {
4b34e3ad 369 addr = (uint32_t)env->regs[R_EAX];
6bada5e8
BS
370 }
371
372 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
373 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
b216aa6c 374 addr, x86_ldq_phys(cs, addr + offsetof(struct vmcb,
052e80d5 375 save.fs.base)),
6bada5e8
BS
376 env->segs[R_FS].base);
377
052e80d5
BS
378 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.fs), R_FS);
379 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.gs), R_GS);
380 svm_load_seg(env, addr + offsetof(struct vmcb, save.tr), &env->tr);
381 svm_load_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
6bada5e8
BS
382
383#ifdef TARGET_X86_64
b216aa6c 384 env->kernelgsbase = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
6bada5e8 385 save.kernel_gs_base));
b216aa6c
PB
386 env->lstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.lstar));
387 env->cstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.cstar));
388 env->fmask = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.sfmask));
6bada5e8 389#endif
b216aa6c
PB
390 env->star = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.star));
391 env->sysenter_cs = x86_ldq_phys(cs,
2c17449b 392 addr + offsetof(struct vmcb, save.sysenter_cs));
b216aa6c 393 env->sysenter_esp = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
6bada5e8 394 save.sysenter_esp));
b216aa6c 395 env->sysenter_eip = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
6bada5e8
BS
396 save.sysenter_eip));
397}
398
052e80d5 399void helper_vmsave(CPUX86State *env, int aflag)
6bada5e8 400{
19d6ca16 401 CPUState *cs = CPU(x86_env_get_cpu(env));
6bada5e8
BS
402 target_ulong addr;
403
65c9d60a 404 cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0, GETPC());
6bada5e8
BS
405
406 if (aflag == 2) {
4b34e3ad 407 addr = env->regs[R_EAX];
6bada5e8 408 } else {
4b34e3ad 409 addr = (uint32_t)env->regs[R_EAX];
6bada5e8
BS
410 }
411
412 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
413 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
b216aa6c 414 addr, x86_ldq_phys(cs,
2c17449b 415 addr + offsetof(struct vmcb, save.fs.base)),
6bada5e8
BS
416 env->segs[R_FS].base);
417
052e80d5 418 svm_save_seg(env, addr + offsetof(struct vmcb, save.fs),
6bada5e8 419 &env->segs[R_FS]);
052e80d5 420 svm_save_seg(env, addr + offsetof(struct vmcb, save.gs),
6bada5e8 421 &env->segs[R_GS]);
052e80d5 422 svm_save_seg(env, addr + offsetof(struct vmcb, save.tr),
6bada5e8 423 &env->tr);
052e80d5 424 svm_save_seg(env, addr + offsetof(struct vmcb, save.ldtr),
6bada5e8
BS
425 &env->ldt);
426
427#ifdef TARGET_X86_64
b216aa6c 428 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.kernel_gs_base),
6bada5e8 429 env->kernelgsbase);
b216aa6c
PB
430 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.lstar), env->lstar);
431 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.cstar), env->cstar);
432 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sfmask), env->fmask);
6bada5e8 433#endif
b216aa6c
PB
434 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.star), env->star);
435 x86_stq_phys(cs,
f606604f 436 addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
b216aa6c 437 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_esp),
6bada5e8 438 env->sysenter_esp);
b216aa6c 439 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_eip),
6bada5e8
BS
440 env->sysenter_eip);
441}
442
052e80d5 443void helper_stgi(CPUX86State *env)
6bada5e8 444{
65c9d60a 445 cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0, GETPC());
6bada5e8
BS
446 env->hflags2 |= HF2_GIF_MASK;
447}
448
052e80d5 449void helper_clgi(CPUX86State *env)
6bada5e8 450{
65c9d60a 451 cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0, GETPC());
6bada5e8
BS
452 env->hflags2 &= ~HF2_GIF_MASK;
453}
454
052e80d5 455void helper_skinit(CPUX86State *env)
6bada5e8 456{
65c9d60a 457 cpu_svm_check_intercept_param(env, SVM_EXIT_SKINIT, 0, GETPC());
6bada5e8
BS
458 /* XXX: not implemented */
459 raise_exception(env, EXCP06_ILLOP);
460}
461
052e80d5 462void helper_invlpga(CPUX86State *env, int aflag)
6bada5e8 463{
31b030d4 464 X86CPU *cpu = x86_env_get_cpu(env);
6bada5e8
BS
465 target_ulong addr;
466
65c9d60a 467 cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPGA, 0, GETPC());
6bada5e8
BS
468
469 if (aflag == 2) {
4b34e3ad 470 addr = env->regs[R_EAX];
6bada5e8 471 } else {
4b34e3ad 472 addr = (uint32_t)env->regs[R_EAX];
6bada5e8
BS
473 }
474
475 /* XXX: could use the ASID to see if it is needed to do the
476 flush */
31b030d4 477 tlb_flush_page(CPU(cpu), addr);
6bada5e8
BS
478}
479
65c9d60a
PB
480void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
481 uint64_t param, uintptr_t retaddr)
6bada5e8 482{
19d6ca16 483 CPUState *cs = CPU(x86_env_get_cpu(env));
2c17449b 484
6bada5e8
BS
485 if (likely(!(env->hflags & HF_SVMI_MASK))) {
486 return;
487 }
488 switch (type) {
489 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
490 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
65c9d60a 491 cpu_vmexit(env, type, param, retaddr);
6bada5e8
BS
492 }
493 break;
494 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
495 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
65c9d60a 496 cpu_vmexit(env, type, param, retaddr);
6bada5e8
BS
497 }
498 break;
499 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
500 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
65c9d60a 501 cpu_vmexit(env, type, param, retaddr);
6bada5e8
BS
502 }
503 break;
504 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
505 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
65c9d60a 506 cpu_vmexit(env, type, param, retaddr);
6bada5e8
BS
507 }
508 break;
509 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
510 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
65c9d60a 511 cpu_vmexit(env, type, param, retaddr);
6bada5e8
BS
512 }
513 break;
514 case SVM_EXIT_MSR:
515 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
516 /* FIXME: this should be read in at vmrun (faster this way?) */
b216aa6c 517 uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
6bada5e8
BS
518 offsetof(struct vmcb,
519 control.msrpm_base_pa));
520 uint32_t t0, t1;
521
a4165610 522 switch ((uint32_t)env->regs[R_ECX]) {
6bada5e8 523 case 0 ... 0x1fff:
a4165610
LG
524 t0 = (env->regs[R_ECX] * 2) % 8;
525 t1 = (env->regs[R_ECX] * 2) / 8;
6bada5e8
BS
526 break;
527 case 0xc0000000 ... 0xc0001fff:
a4165610 528 t0 = (8192 + env->regs[R_ECX] - 0xc0000000) * 2;
6bada5e8
BS
529 t1 = (t0 / 8);
530 t0 %= 8;
531 break;
532 case 0xc0010000 ... 0xc0011fff:
a4165610 533 t0 = (16384 + env->regs[R_ECX] - 0xc0010000) * 2;
6bada5e8
BS
534 t1 = (t0 / 8);
535 t0 %= 8;
536 break;
537 default:
65c9d60a 538 cpu_vmexit(env, type, param, retaddr);
6bada5e8
BS
539 t0 = 0;
540 t1 = 0;
541 break;
542 }
b216aa6c 543 if (x86_ldub_phys(cs, addr + t1) & ((1 << param) << t0)) {
65c9d60a 544 cpu_vmexit(env, type, param, retaddr);
6bada5e8
BS
545 }
546 }
547 break;
548 default:
549 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
65c9d60a 550 cpu_vmexit(env, type, param, retaddr);
6bada5e8
BS
551 }
552 break;
553 }
554}
555
65c9d60a
PB
556void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
557 uint64_t param)
6bada5e8 558{
65c9d60a 559 cpu_svm_check_intercept_param(env, type, param, GETPC());
6bada5e8
BS
560}
561
052e80d5 562void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
6bada5e8
BS
563 uint32_t next_eip_addend)
564{
19d6ca16
AF
565 CPUState *cs = CPU(x86_env_get_cpu(env));
566
6bada5e8
BS
567 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
568 /* FIXME: this should be read in at vmrun (faster this way?) */
b216aa6c 569 uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
6bada5e8
BS
570 offsetof(struct vmcb, control.iopm_base_pa));
571 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
572
b216aa6c 573 if (x86_lduw_phys(cs, addr + port / 8) & (mask << (port & 7))) {
a78d0eab 574 /* next env->eip */
b216aa6c 575 x86_stq_phys(cs,
f606604f 576 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
6bada5e8 577 env->eip + next_eip_addend);
65c9d60a 578 cpu_vmexit(env, SVM_EXIT_IOIO, param | (port << 16), GETPC());
6bada5e8
BS
579 }
580 }
581}
582
583/* Note: currently only 32 bits of exit_code are used */
65c9d60a
PB
584void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1,
585 uintptr_t retaddr)
6bada5e8 586{
259186a7 587 CPUState *cs = CPU(x86_env_get_cpu(env));
6bada5e8
BS
588 uint32_t int_ctl;
589
65c9d60a
PB
590 if (retaddr) {
591 cpu_restore_state(cs, retaddr);
592 }
593
6bada5e8
BS
594 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
595 PRIx64 ", " TARGET_FMT_lx ")!\n",
596 exit_code, exit_info_1,
b216aa6c 597 x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
6bada5e8 598 control.exit_info_2)),
a78d0eab 599 env->eip);
6bada5e8
BS
600
601 if (env->hflags & HF_INHIBIT_IRQ_MASK) {
b216aa6c 602 x86_stl_phys(cs,
ab1da857 603 env->vm_vmcb + offsetof(struct vmcb, control.int_state),
6bada5e8
BS
604 SVM_INTERRUPT_SHADOW_MASK);
605 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
606 } else {
b216aa6c 607 x86_stl_phys(cs,
ab1da857 608 env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
6bada5e8
BS
609 }
610
611 /* Save the VM state in the vmcb */
052e80d5 612 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
6bada5e8 613 &env->segs[R_ES]);
052e80d5 614 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
6bada5e8 615 &env->segs[R_CS]);
052e80d5 616 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
6bada5e8 617 &env->segs[R_SS]);
052e80d5 618 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
6bada5e8
BS
619 &env->segs[R_DS]);
620
b216aa6c 621 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
6bada5e8 622 env->gdt.base);
b216aa6c 623 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
6bada5e8
BS
624 env->gdt.limit);
625
b216aa6c 626 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
6bada5e8 627 env->idt.base);
b216aa6c 628 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
6bada5e8
BS
629 env->idt.limit);
630
b216aa6c 631 x86_stq_phys(cs,
f606604f 632 env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
b216aa6c 633 x86_stq_phys(cs,
f606604f 634 env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
b216aa6c 635 x86_stq_phys(cs,
f606604f 636 env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
b216aa6c 637 x86_stq_phys(cs,
f606604f 638 env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
b216aa6c 639 x86_stq_phys(cs,
f606604f 640 env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
6bada5e8 641
b216aa6c 642 int_ctl = x86_ldl_phys(cs,
fdfba1a2 643 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6bada5e8
BS
644 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
645 int_ctl |= env->v_tpr & V_TPR_MASK;
259186a7 646 if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
6bada5e8
BS
647 int_ctl |= V_IRQ_MASK;
648 }
b216aa6c 649 x86_stl_phys(cs,
ab1da857 650 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
6bada5e8 651
b216aa6c 652 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rflags),
6bada5e8 653 cpu_compute_eflags(env));
b216aa6c 654 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rip),
052e80d5 655 env->eip);
b216aa6c 656 x86_stq_phys(cs,
f606604f 657 env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
b216aa6c 658 x86_stq_phys(cs,
f606604f 659 env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
b216aa6c 660 x86_stq_phys(cs,
f606604f 661 env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
b216aa6c 662 x86_stq_phys(cs,
f606604f 663 env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
b216aa6c 664 x86_stb_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cpl),
6bada5e8
BS
665 env->hflags & HF_CPL_MASK);
666
667 /* Reload the host state from vm_hsave */
668 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
669 env->hflags &= ~HF_SVMI_MASK;
670 env->intercept = 0;
671 env->intercept_exceptions = 0;
259186a7 672 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
6bada5e8
BS
673 env->tsc_offset = 0;
674
b216aa6c 675 env->gdt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
6bada5e8 676 save.gdtr.base));
b216aa6c 677 env->gdt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
6bada5e8
BS
678 save.gdtr.limit));
679
b216aa6c 680 env->idt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
6bada5e8 681 save.idtr.base));
b216aa6c 682 env->idt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
6bada5e8
BS
683 save.idtr.limit));
684
b216aa6c 685 cpu_x86_update_cr0(env, x86_ldq_phys(cs,
2c17449b 686 env->vm_hsave + offsetof(struct vmcb,
6bada5e8
BS
687 save.cr0)) |
688 CR0_PE_MASK);
b216aa6c 689 cpu_x86_update_cr4(env, x86_ldq_phys(cs,
2c17449b 690 env->vm_hsave + offsetof(struct vmcb,
6bada5e8 691 save.cr4)));
b216aa6c 692 cpu_x86_update_cr3(env, x86_ldq_phys(cs,
2c17449b 693 env->vm_hsave + offsetof(struct vmcb,
6bada5e8
BS
694 save.cr3)));
695 /* we need to set the efer after the crs so the hidden flags get
696 set properly */
b216aa6c 697 cpu_load_efer(env, x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
6bada5e8
BS
698 save.efer)));
699 env->eflags = 0;
b216aa6c 700 cpu_load_eflags(env, x86_ldq_phys(cs,
2c17449b 701 env->vm_hsave + offsetof(struct vmcb,
6bada5e8 702 save.rflags)),
30452029
KC
703 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK |
704 VM_MASK));
6bada5e8 705
052e80d5
BS
706 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.es),
707 R_ES);
708 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
709 R_CS);
710 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
711 R_SS);
712 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
713 R_DS);
6bada5e8 714
b216aa6c 715 env->eip = x86_ldq_phys(cs,
2c17449b 716 env->vm_hsave + offsetof(struct vmcb, save.rip));
b216aa6c 717 env->regs[R_ESP] = x86_ldq_phys(cs, env->vm_hsave +
90a2541b 718 offsetof(struct vmcb, save.rsp));
b216aa6c 719 env->regs[R_EAX] = x86_ldq_phys(cs, env->vm_hsave +
90a2541b 720 offsetof(struct vmcb, save.rax));
6bada5e8 721
b216aa6c 722 env->dr[6] = x86_ldq_phys(cs,
2c17449b 723 env->vm_hsave + offsetof(struct vmcb, save.dr6));
b216aa6c 724 env->dr[7] = x86_ldq_phys(cs,
2c17449b 725 env->vm_hsave + offsetof(struct vmcb, save.dr7));
6bada5e8
BS
726
727 /* other setups */
b216aa6c 728 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
6bada5e8 729 exit_code);
b216aa6c 730 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1),
6bada5e8
BS
731 exit_info_1);
732
b216aa6c 733 x86_stl_phys(cs,
ab1da857 734 env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
b216aa6c 735 x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
6bada5e8 736 control.event_inj)));
b216aa6c 737 x86_stl_phys(cs,
ab1da857 738 env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
b216aa6c 739 x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
6bada5e8 740 control.event_inj_err)));
b216aa6c 741 x86_stl_phys(cs,
ab1da857 742 env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
6bada5e8
BS
743
744 env->hflags2 &= ~HF2_GIF_MASK;
745 /* FIXME: Resets the current ASID register to zero (host ASID). */
746
747 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
748
749 /* Clears the TSC_OFFSET inside the processor. */
750
751 /* If the host is in PAE mode, the processor reloads the host's PDPEs
752 from the page table indicated the host's CR3. If the PDPEs contain
753 illegal state, the processor causes a shutdown. */
754
6bada5e8
BS
755 /* Disables all breakpoints in the host DR7 register. */
756
757 /* Checks the reloaded host state for consistency. */
758
759 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
760 host's code segment or non-canonical (in the case of long mode), a
761 #GP fault is delivered inside the host. */
762
763 /* remove any pending exception */
27103424 764 cs->exception_index = -1;
6bada5e8
BS
765 env->error_code = 0;
766 env->old_exception = -1;
767
5638d180 768 cpu_loop_exit(cs);
6bada5e8
BS
769}
770
6bada5e8 771#endif