]> git.proxmox.com Git - mirror_qemu.git/blame - target-i386/svm_helper.c
target-arm: move arm_*_code to a separate file
[mirror_qemu.git] / target-i386 / svm_helper.c
CommitLineData
6bada5e8
BS
1/*
2 * x86 SVM helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include "cpu.h"
022c62cb 21#include "exec/cpu-all.h"
2ef6175a 22#include "exec/helper-proto.h"
6bada5e8 23
92fc4b58 24#if !defined(CONFIG_USER_ONLY)
022c62cb 25#include "exec/softmmu_exec.h"
92fc4b58
BS
26#endif /* !defined(CONFIG_USER_ONLY) */
27
6bada5e8
BS
28/* Secure Virtual Machine helpers */
29
30#if defined(CONFIG_USER_ONLY)
31
052e80d5 32void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
6bada5e8
BS
33{
34}
35
052e80d5 36void helper_vmmcall(CPUX86State *env)
6bada5e8
BS
37{
38}
39
052e80d5 40void helper_vmload(CPUX86State *env, int aflag)
6bada5e8
BS
41{
42}
43
052e80d5 44void helper_vmsave(CPUX86State *env, int aflag)
6bada5e8
BS
45{
46}
47
052e80d5 48void helper_stgi(CPUX86State *env)
6bada5e8
BS
49{
50}
51
052e80d5 52void helper_clgi(CPUX86State *env)
6bada5e8
BS
53{
54}
55
052e80d5 56void helper_skinit(CPUX86State *env)
6bada5e8
BS
57{
58}
59
052e80d5 60void helper_invlpga(CPUX86State *env, int aflag)
6bada5e8
BS
61{
62}
63
052e80d5 64void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
6bada5e8
BS
65{
66}
67
68void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1)
69{
70}
71
052e80d5
BS
72void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
73 uint64_t param)
6bada5e8
BS
74{
75}
76
77void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
78 uint64_t param)
79{
80}
81
052e80d5 82void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
6bada5e8
BS
83 uint32_t next_eip_addend)
84{
85}
86#else
87
a8170e5e 88static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
6bada5e8
BS
89 const SegmentCache *sc)
90{
19d6ca16
AF
91 CPUState *cs = CPU(x86_env_get_cpu(env));
92
5ce5944d 93 stw_phys(cs->as, addr + offsetof(struct vmcb_seg, selector),
6bada5e8 94 sc->selector);
f606604f 95 stq_phys(cs->as, addr + offsetof(struct vmcb_seg, base),
6bada5e8 96 sc->base);
ab1da857 97 stl_phys(cs->as, addr + offsetof(struct vmcb_seg, limit),
6bada5e8 98 sc->limit);
5ce5944d 99 stw_phys(cs->as, addr + offsetof(struct vmcb_seg, attrib),
6bada5e8
BS
100 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
101}
102
a8170e5e 103static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
052e80d5 104 SegmentCache *sc)
6bada5e8 105{
19d6ca16 106 CPUState *cs = CPU(x86_env_get_cpu(env));
6bada5e8
BS
107 unsigned int flags;
108
41701aa4
EI
109 sc->selector = lduw_phys(cs->as,
110 addr + offsetof(struct vmcb_seg, selector));
2c17449b 111 sc->base = ldq_phys(cs->as, addr + offsetof(struct vmcb_seg, base));
fdfba1a2 112 sc->limit = ldl_phys(cs->as, addr + offsetof(struct vmcb_seg, limit));
41701aa4 113 flags = lduw_phys(cs->as, addr + offsetof(struct vmcb_seg, attrib));
6bada5e8
BS
114 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
115}
116
a8170e5e 117static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
052e80d5 118 int seg_reg)
6bada5e8
BS
119{
120 SegmentCache sc1, *sc = &sc1;
121
052e80d5 122 svm_load_seg(env, addr, sc);
6bada5e8
BS
123 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
124 sc->base, sc->limit, sc->flags);
125}
126
052e80d5 127void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
6bada5e8 128{
19d6ca16 129 CPUState *cs = CPU(x86_env_get_cpu(env));
6bada5e8
BS
130 target_ulong addr;
131 uint32_t event_inj;
132 uint32_t int_ctl;
133
052e80d5 134 cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0);
6bada5e8
BS
135
136 if (aflag == 2) {
4b34e3ad 137 addr = env->regs[R_EAX];
6bada5e8 138 } else {
4b34e3ad 139 addr = (uint32_t)env->regs[R_EAX];
6bada5e8
BS
140 }
141
142 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
143
144 env->vm_vmcb = addr;
145
146 /* save the current CPU state in the hsave page */
f606604f 147 stq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
6bada5e8 148 env->gdt.base);
ab1da857 149 stl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
6bada5e8
BS
150 env->gdt.limit);
151
f606604f 152 stq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
6bada5e8 153 env->idt.base);
ab1da857 154 stl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
6bada5e8
BS
155 env->idt.limit);
156
f606604f
EI
157 stq_phys(cs->as,
158 env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
159 stq_phys(cs->as,
160 env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
161 stq_phys(cs->as,
162 env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
163 stq_phys(cs->as,
164 env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
165 stq_phys(cs->as,
166 env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
167 stq_phys(cs->as,
168 env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
169
170 stq_phys(cs->as,
171 env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
172 stq_phys(cs->as,
173 env->vm_hsave + offsetof(struct vmcb, save.rflags),
6bada5e8
BS
174 cpu_compute_eflags(env));
175
052e80d5 176 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.es),
6bada5e8 177 &env->segs[R_ES]);
052e80d5 178 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
6bada5e8 179 &env->segs[R_CS]);
052e80d5 180 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
6bada5e8 181 &env->segs[R_SS]);
052e80d5 182 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
6bada5e8
BS
183 &env->segs[R_DS]);
184
f606604f 185 stq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.rip),
a78d0eab 186 env->eip + next_eip_addend);
f606604f
EI
187 stq_phys(cs->as,
188 env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
189 stq_phys(cs->as,
190 env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
6bada5e8
BS
191
192 /* load the interception bitmaps so we do not need to access the
193 vmcb in svm mode */
2c17449b 194 env->intercept = ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
6bada5e8 195 control.intercept));
41701aa4 196 env->intercept_cr_read = lduw_phys(cs->as, env->vm_vmcb +
6bada5e8
BS
197 offsetof(struct vmcb,
198 control.intercept_cr_read));
41701aa4 199 env->intercept_cr_write = lduw_phys(cs->as, env->vm_vmcb +
6bada5e8
BS
200 offsetof(struct vmcb,
201 control.intercept_cr_write));
41701aa4 202 env->intercept_dr_read = lduw_phys(cs->as, env->vm_vmcb +
6bada5e8
BS
203 offsetof(struct vmcb,
204 control.intercept_dr_read));
41701aa4 205 env->intercept_dr_write = lduw_phys(cs->as, env->vm_vmcb +
6bada5e8
BS
206 offsetof(struct vmcb,
207 control.intercept_dr_write));
fdfba1a2 208 env->intercept_exceptions = ldl_phys(cs->as, env->vm_vmcb +
6bada5e8
BS
209 offsetof(struct vmcb,
210 control.intercept_exceptions
211 ));
212
213 /* enable intercepts */
214 env->hflags |= HF_SVMI_MASK;
215
2c17449b 216 env->tsc_offset = ldq_phys(cs->as, env->vm_vmcb +
6bada5e8
BS
217 offsetof(struct vmcb, control.tsc_offset));
218
2c17449b 219 env->gdt.base = ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
6bada5e8 220 save.gdtr.base));
fdfba1a2 221 env->gdt.limit = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
6bada5e8
BS
222 save.gdtr.limit));
223
2c17449b 224 env->idt.base = ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
6bada5e8 225 save.idtr.base));
fdfba1a2 226 env->idt.limit = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
6bada5e8
BS
227 save.idtr.limit));
228
229 /* clear exit_info_2 so we behave like the real hardware */
f606604f
EI
230 stq_phys(cs->as,
231 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
6bada5e8 232
2c17449b
EI
233 cpu_x86_update_cr0(env, ldq_phys(cs->as,
234 env->vm_vmcb + offsetof(struct vmcb,
6bada5e8 235 save.cr0)));
2c17449b
EI
236 cpu_x86_update_cr4(env, ldq_phys(cs->as,
237 env->vm_vmcb + offsetof(struct vmcb,
6bada5e8 238 save.cr4)));
2c17449b
EI
239 cpu_x86_update_cr3(env, ldq_phys(cs->as,
240 env->vm_vmcb + offsetof(struct vmcb,
6bada5e8 241 save.cr3)));
2c17449b
EI
242 env->cr[2] = ldq_phys(cs->as,
243 env->vm_vmcb + offsetof(struct vmcb, save.cr2));
fdfba1a2
EI
244 int_ctl = ldl_phys(cs->as,
245 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6bada5e8
BS
246 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
247 if (int_ctl & V_INTR_MASKING_MASK) {
248 env->v_tpr = int_ctl & V_TPR_MASK;
249 env->hflags2 |= HF2_VINTR_MASK;
250 if (env->eflags & IF_MASK) {
251 env->hflags2 |= HF2_HIF_MASK;
252 }
253 }
254
255 cpu_load_efer(env,
2c17449b
EI
256 ldq_phys(cs->as,
257 env->vm_vmcb + offsetof(struct vmcb, save.efer)));
6bada5e8 258 env->eflags = 0;
2c17449b
EI
259 cpu_load_eflags(env, ldq_phys(cs->as,
260 env->vm_vmcb + offsetof(struct vmcb,
6bada5e8
BS
261 save.rflags)),
262 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
263 CC_OP = CC_OP_EFLAGS;
264
052e80d5
BS
265 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
266 R_ES);
267 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
268 R_CS);
269 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
270 R_SS);
271 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
272 R_DS);
6bada5e8 273
2c17449b
EI
274 env->eip = ldq_phys(cs->as,
275 env->vm_vmcb + offsetof(struct vmcb, save.rip));
276
277 env->regs[R_ESP] = ldq_phys(cs->as,
278 env->vm_vmcb + offsetof(struct vmcb, save.rsp));
279 env->regs[R_EAX] = ldq_phys(cs->as,
280 env->vm_vmcb + offsetof(struct vmcb, save.rax));
281 env->dr[7] = ldq_phys(cs->as,
282 env->vm_vmcb + offsetof(struct vmcb, save.dr7));
283 env->dr[6] = ldq_phys(cs->as,
284 env->vm_vmcb + offsetof(struct vmcb, save.dr6));
6bada5e8
BS
285
286 /* FIXME: guest state consistency checks */
287
2c17449b
EI
288 switch (ldub_phys(cs->as,
289 env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
6bada5e8
BS
290 case TLB_CONTROL_DO_NOTHING:
291 break;
292 case TLB_CONTROL_FLUSH_ALL_ASID:
293 /* FIXME: this is not 100% correct but should work for now */
00c8cb0a 294 tlb_flush(cs, 1);
6bada5e8
BS
295 break;
296 }
297
298 env->hflags2 |= HF2_GIF_MASK;
299
300 if (int_ctl & V_IRQ_MASK) {
259186a7
AF
301 CPUState *cs = CPU(x86_env_get_cpu(env));
302
303 cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
6bada5e8
BS
304 }
305
306 /* maybe we need to inject an event */
fdfba1a2 307 event_inj = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
6bada5e8
BS
308 control.event_inj));
309 if (event_inj & SVM_EVTINJ_VALID) {
310 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
311 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
fdfba1a2 312 uint32_t event_inj_err = ldl_phys(cs->as, env->vm_vmcb +
6bada5e8
BS
313 offsetof(struct vmcb,
314 control.event_inj_err));
315
316 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
317 /* FIXME: need to implement valid_err */
318 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
319 case SVM_EVTINJ_TYPE_INTR:
27103424 320 cs->exception_index = vector;
6bada5e8
BS
321 env->error_code = event_inj_err;
322 env->exception_is_int = 0;
323 env->exception_next_eip = -1;
324 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
325 /* XXX: is it always correct? */
326 do_interrupt_x86_hardirq(env, vector, 1);
327 break;
328 case SVM_EVTINJ_TYPE_NMI:
27103424 329 cs->exception_index = EXCP02_NMI;
6bada5e8
BS
330 env->error_code = event_inj_err;
331 env->exception_is_int = 0;
a78d0eab 332 env->exception_next_eip = env->eip;
6bada5e8 333 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
5638d180 334 cpu_loop_exit(cs);
6bada5e8
BS
335 break;
336 case SVM_EVTINJ_TYPE_EXEPT:
27103424 337 cs->exception_index = vector;
6bada5e8
BS
338 env->error_code = event_inj_err;
339 env->exception_is_int = 0;
340 env->exception_next_eip = -1;
341 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
5638d180 342 cpu_loop_exit(cs);
6bada5e8
BS
343 break;
344 case SVM_EVTINJ_TYPE_SOFT:
27103424 345 cs->exception_index = vector;
6bada5e8
BS
346 env->error_code = event_inj_err;
347 env->exception_is_int = 1;
a78d0eab 348 env->exception_next_eip = env->eip;
6bada5e8 349 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
5638d180 350 cpu_loop_exit(cs);
6bada5e8
BS
351 break;
352 }
27103424 353 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", cs->exception_index,
6bada5e8
BS
354 env->error_code);
355 }
356}
357
052e80d5 358void helper_vmmcall(CPUX86State *env)
6bada5e8 359{
052e80d5 360 cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0);
6bada5e8
BS
361 raise_exception(env, EXCP06_ILLOP);
362}
363
052e80d5 364void helper_vmload(CPUX86State *env, int aflag)
6bada5e8 365{
19d6ca16 366 CPUState *cs = CPU(x86_env_get_cpu(env));
6bada5e8
BS
367 target_ulong addr;
368
052e80d5 369 cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0);
6bada5e8
BS
370
371 if (aflag == 2) {
4b34e3ad 372 addr = env->regs[R_EAX];
6bada5e8 373 } else {
4b34e3ad 374 addr = (uint32_t)env->regs[R_EAX];
6bada5e8
BS
375 }
376
377 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
378 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
2c17449b 379 addr, ldq_phys(cs->as, addr + offsetof(struct vmcb,
052e80d5 380 save.fs.base)),
6bada5e8
BS
381 env->segs[R_FS].base);
382
052e80d5
BS
383 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.fs), R_FS);
384 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.gs), R_GS);
385 svm_load_seg(env, addr + offsetof(struct vmcb, save.tr), &env->tr);
386 svm_load_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
6bada5e8
BS
387
388#ifdef TARGET_X86_64
2c17449b 389 env->kernelgsbase = ldq_phys(cs->as, addr + offsetof(struct vmcb,
6bada5e8 390 save.kernel_gs_base));
2c17449b
EI
391 env->lstar = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.lstar));
392 env->cstar = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.cstar));
393 env->fmask = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.sfmask));
6bada5e8 394#endif
2c17449b
EI
395 env->star = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.star));
396 env->sysenter_cs = ldq_phys(cs->as,
397 addr + offsetof(struct vmcb, save.sysenter_cs));
398 env->sysenter_esp = ldq_phys(cs->as, addr + offsetof(struct vmcb,
6bada5e8 399 save.sysenter_esp));
2c17449b 400 env->sysenter_eip = ldq_phys(cs->as, addr + offsetof(struct vmcb,
6bada5e8
BS
401 save.sysenter_eip));
402}
403
052e80d5 404void helper_vmsave(CPUX86State *env, int aflag)
6bada5e8 405{
19d6ca16 406 CPUState *cs = CPU(x86_env_get_cpu(env));
6bada5e8
BS
407 target_ulong addr;
408
052e80d5 409 cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0);
6bada5e8
BS
410
411 if (aflag == 2) {
4b34e3ad 412 addr = env->regs[R_EAX];
6bada5e8 413 } else {
4b34e3ad 414 addr = (uint32_t)env->regs[R_EAX];
6bada5e8
BS
415 }
416
417 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
418 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
2c17449b
EI
419 addr, ldq_phys(cs->as,
420 addr + offsetof(struct vmcb, save.fs.base)),
6bada5e8
BS
421 env->segs[R_FS].base);
422
052e80d5 423 svm_save_seg(env, addr + offsetof(struct vmcb, save.fs),
6bada5e8 424 &env->segs[R_FS]);
052e80d5 425 svm_save_seg(env, addr + offsetof(struct vmcb, save.gs),
6bada5e8 426 &env->segs[R_GS]);
052e80d5 427 svm_save_seg(env, addr + offsetof(struct vmcb, save.tr),
6bada5e8 428 &env->tr);
052e80d5 429 svm_save_seg(env, addr + offsetof(struct vmcb, save.ldtr),
6bada5e8
BS
430 &env->ldt);
431
432#ifdef TARGET_X86_64
f606604f 433 stq_phys(cs->as, addr + offsetof(struct vmcb, save.kernel_gs_base),
6bada5e8 434 env->kernelgsbase);
f606604f
EI
435 stq_phys(cs->as, addr + offsetof(struct vmcb, save.lstar), env->lstar);
436 stq_phys(cs->as, addr + offsetof(struct vmcb, save.cstar), env->cstar);
437 stq_phys(cs->as, addr + offsetof(struct vmcb, save.sfmask), env->fmask);
6bada5e8 438#endif
f606604f
EI
439 stq_phys(cs->as, addr + offsetof(struct vmcb, save.star), env->star);
440 stq_phys(cs->as,
441 addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
442 stq_phys(cs->as, addr + offsetof(struct vmcb, save.sysenter_esp),
6bada5e8 443 env->sysenter_esp);
f606604f 444 stq_phys(cs->as, addr + offsetof(struct vmcb, save.sysenter_eip),
6bada5e8
BS
445 env->sysenter_eip);
446}
447
052e80d5 448void helper_stgi(CPUX86State *env)
6bada5e8 449{
052e80d5 450 cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0);
6bada5e8
BS
451 env->hflags2 |= HF2_GIF_MASK;
452}
453
052e80d5 454void helper_clgi(CPUX86State *env)
6bada5e8 455{
052e80d5 456 cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0);
6bada5e8
BS
457 env->hflags2 &= ~HF2_GIF_MASK;
458}
459
052e80d5 460void helper_skinit(CPUX86State *env)
6bada5e8 461{
052e80d5 462 cpu_svm_check_intercept_param(env, SVM_EXIT_SKINIT, 0);
6bada5e8
BS
463 /* XXX: not implemented */
464 raise_exception(env, EXCP06_ILLOP);
465}
466
052e80d5 467void helper_invlpga(CPUX86State *env, int aflag)
6bada5e8 468{
31b030d4 469 X86CPU *cpu = x86_env_get_cpu(env);
6bada5e8
BS
470 target_ulong addr;
471
052e80d5 472 cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPGA, 0);
6bada5e8
BS
473
474 if (aflag == 2) {
4b34e3ad 475 addr = env->regs[R_EAX];
6bada5e8 476 } else {
4b34e3ad 477 addr = (uint32_t)env->regs[R_EAX];
6bada5e8
BS
478 }
479
480 /* XXX: could use the ASID to see if it is needed to do the
481 flush */
31b030d4 482 tlb_flush_page(CPU(cpu), addr);
6bada5e8
BS
483}
484
052e80d5
BS
485void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
486 uint64_t param)
6bada5e8 487{
19d6ca16 488 CPUState *cs = CPU(x86_env_get_cpu(env));
2c17449b 489
6bada5e8
BS
490 if (likely(!(env->hflags & HF_SVMI_MASK))) {
491 return;
492 }
493 switch (type) {
494 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
495 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
052e80d5 496 helper_vmexit(env, type, param);
6bada5e8
BS
497 }
498 break;
499 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
500 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
052e80d5 501 helper_vmexit(env, type, param);
6bada5e8
BS
502 }
503 break;
504 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
505 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
052e80d5 506 helper_vmexit(env, type, param);
6bada5e8
BS
507 }
508 break;
509 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
510 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
052e80d5 511 helper_vmexit(env, type, param);
6bada5e8
BS
512 }
513 break;
514 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
515 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
052e80d5 516 helper_vmexit(env, type, param);
6bada5e8
BS
517 }
518 break;
519 case SVM_EXIT_MSR:
520 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
521 /* FIXME: this should be read in at vmrun (faster this way?) */
2c17449b 522 uint64_t addr = ldq_phys(cs->as, env->vm_vmcb +
6bada5e8
BS
523 offsetof(struct vmcb,
524 control.msrpm_base_pa));
525 uint32_t t0, t1;
526
a4165610 527 switch ((uint32_t)env->regs[R_ECX]) {
6bada5e8 528 case 0 ... 0x1fff:
a4165610
LG
529 t0 = (env->regs[R_ECX] * 2) % 8;
530 t1 = (env->regs[R_ECX] * 2) / 8;
6bada5e8
BS
531 break;
532 case 0xc0000000 ... 0xc0001fff:
a4165610 533 t0 = (8192 + env->regs[R_ECX] - 0xc0000000) * 2;
6bada5e8
BS
534 t1 = (t0 / 8);
535 t0 %= 8;
536 break;
537 case 0xc0010000 ... 0xc0011fff:
a4165610 538 t0 = (16384 + env->regs[R_ECX] - 0xc0010000) * 2;
6bada5e8
BS
539 t1 = (t0 / 8);
540 t0 %= 8;
541 break;
542 default:
052e80d5 543 helper_vmexit(env, type, param);
6bada5e8
BS
544 t0 = 0;
545 t1 = 0;
546 break;
547 }
2c17449b 548 if (ldub_phys(cs->as, addr + t1) & ((1 << param) << t0)) {
052e80d5 549 helper_vmexit(env, type, param);
6bada5e8
BS
550 }
551 }
552 break;
553 default:
554 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
052e80d5 555 helper_vmexit(env, type, param);
6bada5e8
BS
556 }
557 break;
558 }
559}
560
052e80d5 561void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
6bada5e8
BS
562 uint64_t param)
563{
052e80d5 564 helper_svm_check_intercept_param(env, type, param);
6bada5e8
BS
565}
566
052e80d5 567void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
6bada5e8
BS
568 uint32_t next_eip_addend)
569{
19d6ca16
AF
570 CPUState *cs = CPU(x86_env_get_cpu(env));
571
6bada5e8
BS
572 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
573 /* FIXME: this should be read in at vmrun (faster this way?) */
2c17449b 574 uint64_t addr = ldq_phys(cs->as, env->vm_vmcb +
6bada5e8
BS
575 offsetof(struct vmcb, control.iopm_base_pa));
576 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
577
41701aa4 578 if (lduw_phys(cs->as, addr + port / 8) & (mask << (port & 7))) {
a78d0eab 579 /* next env->eip */
f606604f
EI
580 stq_phys(cs->as,
581 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
6bada5e8 582 env->eip + next_eip_addend);
052e80d5 583 helper_vmexit(env, SVM_EXIT_IOIO, param | (port << 16));
6bada5e8
BS
584 }
585 }
586}
587
588/* Note: currently only 32 bits of exit_code are used */
052e80d5 589void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
6bada5e8 590{
259186a7 591 CPUState *cs = CPU(x86_env_get_cpu(env));
6bada5e8
BS
592 uint32_t int_ctl;
593
594 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
595 PRIx64 ", " TARGET_FMT_lx ")!\n",
596 exit_code, exit_info_1,
2c17449b 597 ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
6bada5e8 598 control.exit_info_2)),
a78d0eab 599 env->eip);
6bada5e8
BS
600
601 if (env->hflags & HF_INHIBIT_IRQ_MASK) {
ab1da857
EI
602 stl_phys(cs->as,
603 env->vm_vmcb + offsetof(struct vmcb, control.int_state),
6bada5e8
BS
604 SVM_INTERRUPT_SHADOW_MASK);
605 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
606 } else {
ab1da857
EI
607 stl_phys(cs->as,
608 env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
6bada5e8
BS
609 }
610
611 /* Save the VM state in the vmcb */
052e80d5 612 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
6bada5e8 613 &env->segs[R_ES]);
052e80d5 614 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
6bada5e8 615 &env->segs[R_CS]);
052e80d5 616 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
6bada5e8 617 &env->segs[R_SS]);
052e80d5 618 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
6bada5e8
BS
619 &env->segs[R_DS]);
620
f606604f 621 stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
6bada5e8 622 env->gdt.base);
ab1da857 623 stl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
6bada5e8
BS
624 env->gdt.limit);
625
f606604f 626 stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
6bada5e8 627 env->idt.base);
ab1da857 628 stl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
6bada5e8
BS
629 env->idt.limit);
630
f606604f
EI
631 stq_phys(cs->as,
632 env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
633 stq_phys(cs->as,
634 env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
635 stq_phys(cs->as,
636 env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
637 stq_phys(cs->as,
638 env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
639 stq_phys(cs->as,
640 env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
6bada5e8 641
fdfba1a2
EI
642 int_ctl = ldl_phys(cs->as,
643 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6bada5e8
BS
644 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
645 int_ctl |= env->v_tpr & V_TPR_MASK;
259186a7 646 if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
6bada5e8
BS
647 int_ctl |= V_IRQ_MASK;
648 }
ab1da857
EI
649 stl_phys(cs->as,
650 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
6bada5e8 651
f606604f 652 stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.rflags),
6bada5e8 653 cpu_compute_eflags(env));
f606604f 654 stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.rip),
052e80d5 655 env->eip);
f606604f
EI
656 stq_phys(cs->as,
657 env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
658 stq_phys(cs->as,
659 env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
660 stq_phys(cs->as,
661 env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
662 stq_phys(cs->as,
663 env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
db3be60d 664 stb_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.cpl),
6bada5e8
BS
665 env->hflags & HF_CPL_MASK);
666
667 /* Reload the host state from vm_hsave */
668 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
669 env->hflags &= ~HF_SVMI_MASK;
670 env->intercept = 0;
671 env->intercept_exceptions = 0;
259186a7 672 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
6bada5e8
BS
673 env->tsc_offset = 0;
674
2c17449b 675 env->gdt.base = ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
6bada5e8 676 save.gdtr.base));
fdfba1a2 677 env->gdt.limit = ldl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
6bada5e8
BS
678 save.gdtr.limit));
679
2c17449b 680 env->idt.base = ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
6bada5e8 681 save.idtr.base));
fdfba1a2 682 env->idt.limit = ldl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
6bada5e8
BS
683 save.idtr.limit));
684
2c17449b
EI
685 cpu_x86_update_cr0(env, ldq_phys(cs->as,
686 env->vm_hsave + offsetof(struct vmcb,
6bada5e8
BS
687 save.cr0)) |
688 CR0_PE_MASK);
2c17449b
EI
689 cpu_x86_update_cr4(env, ldq_phys(cs->as,
690 env->vm_hsave + offsetof(struct vmcb,
6bada5e8 691 save.cr4)));
2c17449b
EI
692 cpu_x86_update_cr3(env, ldq_phys(cs->as,
693 env->vm_hsave + offsetof(struct vmcb,
6bada5e8
BS
694 save.cr3)));
695 /* we need to set the efer after the crs so the hidden flags get
696 set properly */
2c17449b 697 cpu_load_efer(env, ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
6bada5e8
BS
698 save.efer)));
699 env->eflags = 0;
2c17449b
EI
700 cpu_load_eflags(env, ldq_phys(cs->as,
701 env->vm_hsave + offsetof(struct vmcb,
6bada5e8 702 save.rflags)),
30452029
KC
703 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK |
704 VM_MASK));
6bada5e8
BS
705 CC_OP = CC_OP_EFLAGS;
706
052e80d5
BS
707 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.es),
708 R_ES);
709 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
710 R_CS);
711 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
712 R_SS);
713 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
714 R_DS);
6bada5e8 715
2c17449b
EI
716 env->eip = ldq_phys(cs->as,
717 env->vm_hsave + offsetof(struct vmcb, save.rip));
718 env->regs[R_ESP] = ldq_phys(cs->as, env->vm_hsave +
90a2541b 719 offsetof(struct vmcb, save.rsp));
2c17449b 720 env->regs[R_EAX] = ldq_phys(cs->as, env->vm_hsave +
90a2541b 721 offsetof(struct vmcb, save.rax));
6bada5e8 722
2c17449b
EI
723 env->dr[6] = ldq_phys(cs->as,
724 env->vm_hsave + offsetof(struct vmcb, save.dr6));
725 env->dr[7] = ldq_phys(cs->as,
726 env->vm_hsave + offsetof(struct vmcb, save.dr7));
6bada5e8
BS
727
728 /* other setups */
f606604f 729 stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
6bada5e8 730 exit_code);
f606604f 731 stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1),
6bada5e8
BS
732 exit_info_1);
733
ab1da857
EI
734 stl_phys(cs->as,
735 env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
fdfba1a2 736 ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
6bada5e8 737 control.event_inj)));
ab1da857
EI
738 stl_phys(cs->as,
739 env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
fdfba1a2 740 ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
6bada5e8 741 control.event_inj_err)));
ab1da857
EI
742 stl_phys(cs->as,
743 env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
6bada5e8
BS
744
745 env->hflags2 &= ~HF2_GIF_MASK;
746 /* FIXME: Resets the current ASID register to zero (host ASID). */
747
748 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
749
750 /* Clears the TSC_OFFSET inside the processor. */
751
752 /* If the host is in PAE mode, the processor reloads the host's PDPEs
753 from the page table indicated the host's CR3. If the PDPEs contain
754 illegal state, the processor causes a shutdown. */
755
6bada5e8
BS
756 /* Disables all breakpoints in the host DR7 register. */
757
758 /* Checks the reloaded host state for consistency. */
759
760 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
761 host's code segment or non-canonical (in the case of long mode), a
762 #GP fault is delivered inside the host. */
763
764 /* remove any pending exception */
27103424 765 cs->exception_index = -1;
6bada5e8
BS
766 env->error_code = 0;
767 env->old_exception = -1;
768
5638d180 769 cpu_loop_exit(cs);
6bada5e8
BS
770}
771
052e80d5 772void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
6bada5e8 773{
052e80d5 774 helper_vmexit(env, exit_code, exit_info_1);
6bada5e8
BS
775}
776
777#endif