]>
Commit | Line | Data |
---|---|---|
6bada5e8 | 1 | /* |
b3903094 | 2 | * x86 SVM helpers (sysemu only) |
6bada5e8 BS |
3 | * |
4 | * Copyright (c) 2003 Fabrice Bellard | |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
d9ff33ad | 9 | * version 2.1 of the License, or (at your option) any later version. |
6bada5e8 BS |
10 | * |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
19 | ||
b6a0aa05 | 20 | #include "qemu/osdep.h" |
cd617484 | 21 | #include "qemu/log.h" |
6bada5e8 | 22 | #include "cpu.h" |
2ef6175a | 23 | #include "exec/helper-proto.h" |
63c91552 | 24 | #include "exec/exec-all.h" |
f08b6170 | 25 | #include "exec/cpu_ldst.h" |
b3903094 | 26 | #include "tcg/helper-tcg.h" |
92fc4b58 | 27 | |
6bada5e8 BS |
28 | /* Secure Virtual Machine helpers */ |
29 | ||
726ea335 RH |
30 | static void svm_save_seg(CPUX86State *env, int mmu_idx, hwaddr addr, |
31 | const SegmentCache *sc) | |
6bada5e8 | 32 | { |
726ea335 RH |
33 | cpu_stw_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, selector), |
34 | sc->selector, mmu_idx, 0); | |
35 | cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, base), | |
36 | sc->base, mmu_idx, 0); | |
37 | cpu_stl_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, limit), | |
38 | sc->limit, mmu_idx, 0); | |
39 | cpu_stw_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, attrib), | |
40 | ((sc->flags >> 8) & 0xff) | |
41 | | ((sc->flags >> 12) & 0x0f00), | |
42 | mmu_idx, 0); | |
6bada5e8 BS |
43 | } |
44 | ||
97afb47e LL |
45 | /* |
46 | * VMRUN and VMLOAD canonicalizes (i.e., sign-extend to bit 63) all base | |
47 | * addresses in the segment registers that have been loaded. | |
48 | */ | |
49 | static inline void svm_canonicalization(CPUX86State *env, target_ulong *seg_base) | |
50 | { | |
51 | uint16_t shift_amt = 64 - cpu_x86_virtual_addr_width(env); | |
52 | *seg_base = ((((long) *seg_base) << shift_amt) >> shift_amt); | |
53 | } | |
54 | ||
726ea335 RH |
55 | static void svm_load_seg(CPUX86State *env, int mmu_idx, hwaddr addr, |
56 | SegmentCache *sc) | |
6bada5e8 BS |
57 | { |
58 | unsigned int flags; | |
59 | ||
726ea335 RH |
60 | sc->selector = |
61 | cpu_lduw_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, selector), | |
62 | mmu_idx, 0); | |
63 | sc->base = | |
64 | cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, base), | |
65 | mmu_idx, 0); | |
66 | sc->limit = | |
67 | cpu_ldl_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, limit), | |
68 | mmu_idx, 0); | |
69 | flags = | |
70 | cpu_lduw_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, attrib), | |
71 | mmu_idx, 0); | |
6bada5e8 | 72 | sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12); |
726ea335 | 73 | |
97afb47e | 74 | svm_canonicalization(env, &sc->base); |
6bada5e8 BS |
75 | } |
76 | ||
726ea335 RH |
77 | static void svm_load_seg_cache(CPUX86State *env, int mmu_idx, |
78 | hwaddr addr, int seg_reg) | |
6bada5e8 | 79 | { |
726ea335 | 80 | SegmentCache sc; |
6bada5e8 | 81 | |
726ea335 RH |
82 | svm_load_seg(env, mmu_idx, addr, &sc); |
83 | cpu_x86_load_seg_cache(env, seg_reg, sc.selector, | |
84 | sc.base, sc.limit, sc.flags); | |
6bada5e8 BS |
85 | } |
86 | ||
d499f196 LL |
87 | static inline bool is_efer_invalid_state (CPUX86State *env) |
88 | { | |
89 | if (!(env->efer & MSR_EFER_SVME)) { | |
90 | return true; | |
91 | } | |
92 | ||
93 | if (env->efer & MSR_EFER_RESERVED) { | |
94 | return true; | |
95 | } | |
96 | ||
97 | if ((env->efer & (MSR_EFER_LMA | MSR_EFER_LME)) && | |
98 | !(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM)) { | |
99 | return true; | |
100 | } | |
101 | ||
102 | if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK) | |
103 | && !(env->cr[4] & CR4_PAE_MASK)) { | |
104 | return true; | |
105 | } | |
106 | ||
107 | if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK) | |
108 | && !(env->cr[0] & CR0_PE_MASK)) { | |
109 | return true; | |
110 | } | |
111 | ||
112 | if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK) | |
113 | && (env->cr[4] & CR4_PAE_MASK) | |
114 | && (env->segs[R_CS].flags & DESC_L_MASK) | |
115 | && (env->segs[R_CS].flags & DESC_B_MASK)) { | |
116 | return true; | |
117 | } | |
118 | ||
119 | return false; | |
120 | } | |
121 | ||
e3126a5c | 122 | static inline bool virtual_gif_enabled(CPUX86State *env) |
900eeca5 LL |
123 | { |
124 | if (likely(env->hflags & HF_GUEST_MASK)) { | |
900eeca5 | 125 | return (env->features[FEAT_SVM] & CPUID_SVM_VGIF) |
e3126a5c | 126 | && (env->int_ctl & V_GIF_ENABLED_MASK); |
900eeca5 LL |
127 | } |
128 | return false; | |
129 | } | |
130 | ||
52fb8ad3 LL |
131 | static inline bool virtual_vm_load_save_enabled(CPUX86State *env, uint32_t exit_code, uintptr_t retaddr) |
132 | { | |
133 | uint64_t lbr_ctl; | |
134 | ||
135 | if (likely(env->hflags & HF_GUEST_MASK)) { | |
136 | if (likely(!(env->hflags2 & HF2_NPT_MASK)) || !(env->efer & MSR_EFER_LMA)) { | |
137 | cpu_vmexit(env, exit_code, 0, retaddr); | |
138 | } | |
139 | ||
140 | lbr_ctl = x86_ldl_phys(env_cpu(env), env->vm_vmcb + offsetof(struct vmcb, | |
141 | control.lbr_ctl)); | |
142 | return (env->features[FEAT_SVM] & CPUID_SVM_V_VMSAVE_VMLOAD) | |
143 | && (lbr_ctl & V_VMLOAD_VMSAVE_ENABLED_MASK); | |
144 | ||
145 | } | |
146 | ||
147 | return false; | |
148 | } | |
149 | ||
b67e2796 LL |
150 | static inline bool virtual_gif_set(CPUX86State *env) |
151 | { | |
152 | return !virtual_gif_enabled(env) || (env->int_ctl & V_GIF_MASK); | |
153 | } | |
154 | ||
052e80d5 | 155 | void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend) |
6bada5e8 | 156 | { |
6aa9e42f | 157 | CPUState *cs = env_cpu(env); |
481077b2 | 158 | X86CPU *cpu = env_archcpu(env); |
6bada5e8 | 159 | target_ulong addr; |
fe441054 | 160 | uint64_t nested_ctl; |
6bada5e8 | 161 | uint32_t event_inj; |
7eb54ca9 | 162 | uint32_t asid; |
498df2a7 | 163 | uint64_t new_cr0; |
3407259b | 164 | uint64_t new_cr3; |
213ff024 | 165 | uint64_t new_cr4; |
6bada5e8 | 166 | |
6bada5e8 | 167 | if (aflag == 2) { |
4b34e3ad | 168 | addr = env->regs[R_EAX]; |
6bada5e8 | 169 | } else { |
4b34e3ad | 170 | addr = (uint32_t)env->regs[R_EAX]; |
6bada5e8 BS |
171 | } |
172 | ||
d09c7901 PB |
173 | /* Exceptions are checked before the intercept. */ |
174 | if (addr & (0xfff | ((~0ULL) << env_archcpu(env)->phys_bits))) { | |
175 | raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); | |
176 | } | |
177 | ||
178 | cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0, GETPC()); | |
179 | ||
6bada5e8 BS |
180 | qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr); |
181 | ||
182 | env->vm_vmcb = addr; | |
183 | ||
184 | /* save the current CPU state in the hsave page */ | |
b216aa6c | 185 | x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), |
6bada5e8 | 186 | env->gdt.base); |
b216aa6c | 187 | x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), |
6bada5e8 BS |
188 | env->gdt.limit); |
189 | ||
b216aa6c | 190 | x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.base), |
6bada5e8 | 191 | env->idt.base); |
b216aa6c | 192 | x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), |
6bada5e8 BS |
193 | env->idt.limit); |
194 | ||
b216aa6c | 195 | x86_stq_phys(cs, |
f606604f | 196 | env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]); |
b216aa6c | 197 | x86_stq_phys(cs, |
f606604f | 198 | env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]); |
b216aa6c | 199 | x86_stq_phys(cs, |
f606604f | 200 | env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]); |
b216aa6c | 201 | x86_stq_phys(cs, |
f606604f | 202 | env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]); |
b216aa6c | 203 | x86_stq_phys(cs, |
f606604f | 204 | env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]); |
b216aa6c | 205 | x86_stq_phys(cs, |
f606604f EI |
206 | env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]); |
207 | ||
b216aa6c | 208 | x86_stq_phys(cs, |
f606604f | 209 | env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer); |
b216aa6c | 210 | x86_stq_phys(cs, |
f606604f | 211 | env->vm_hsave + offsetof(struct vmcb, save.rflags), |
6bada5e8 BS |
212 | cpu_compute_eflags(env)); |
213 | ||
726ea335 RH |
214 | svm_save_seg(env, MMU_PHYS_IDX, |
215 | env->vm_hsave + offsetof(struct vmcb, save.es), | |
6bada5e8 | 216 | &env->segs[R_ES]); |
726ea335 RH |
217 | svm_save_seg(env, MMU_PHYS_IDX, |
218 | env->vm_hsave + offsetof(struct vmcb, save.cs), | |
6bada5e8 | 219 | &env->segs[R_CS]); |
726ea335 RH |
220 | svm_save_seg(env, MMU_PHYS_IDX, |
221 | env->vm_hsave + offsetof(struct vmcb, save.ss), | |
6bada5e8 | 222 | &env->segs[R_SS]); |
726ea335 RH |
223 | svm_save_seg(env, MMU_PHYS_IDX, |
224 | env->vm_hsave + offsetof(struct vmcb, save.ds), | |
6bada5e8 BS |
225 | &env->segs[R_DS]); |
226 | ||
b216aa6c | 227 | x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rip), |
a78d0eab | 228 | env->eip + next_eip_addend); |
b216aa6c | 229 | x86_stq_phys(cs, |
f606604f | 230 | env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]); |
b216aa6c | 231 | x86_stq_phys(cs, |
f606604f | 232 | env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]); |
6bada5e8 BS |
233 | |
234 | /* load the interception bitmaps so we do not need to access the | |
235 | vmcb in svm mode */ | |
b216aa6c | 236 | env->intercept = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, |
6bada5e8 | 237 | control.intercept)); |
b216aa6c | 238 | env->intercept_cr_read = x86_lduw_phys(cs, env->vm_vmcb + |
6bada5e8 BS |
239 | offsetof(struct vmcb, |
240 | control.intercept_cr_read)); | |
b216aa6c | 241 | env->intercept_cr_write = x86_lduw_phys(cs, env->vm_vmcb + |
6bada5e8 BS |
242 | offsetof(struct vmcb, |
243 | control.intercept_cr_write)); | |
b216aa6c | 244 | env->intercept_dr_read = x86_lduw_phys(cs, env->vm_vmcb + |
6bada5e8 BS |
245 | offsetof(struct vmcb, |
246 | control.intercept_dr_read)); | |
b216aa6c | 247 | env->intercept_dr_write = x86_lduw_phys(cs, env->vm_vmcb + |
6bada5e8 BS |
248 | offsetof(struct vmcb, |
249 | control.intercept_dr_write)); | |
b216aa6c | 250 | env->intercept_exceptions = x86_ldl_phys(cs, env->vm_vmcb + |
6bada5e8 BS |
251 | offsetof(struct vmcb, |
252 | control.intercept_exceptions | |
253 | )); | |
254 | ||
fe441054 JK |
255 | nested_ctl = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, |
256 | control.nested_ctl)); | |
7eb54ca9 LL |
257 | asid = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, |
258 | control.asid)); | |
a2d57703 | 259 | |
481077b2 LL |
260 | uint64_t msrpm_base_pa = x86_ldq_phys(cs, env->vm_vmcb + |
261 | offsetof(struct vmcb, | |
262 | control.msrpm_base_pa)); | |
263 | uint64_t iopm_base_pa = x86_ldq_phys(cs, env->vm_vmcb + | |
264 | offsetof(struct vmcb, control.iopm_base_pa)); | |
265 | ||
266 | if ((msrpm_base_pa & ~0xfff) >= (1ull << cpu->phys_bits) - SVM_MSRPM_SIZE) { | |
267 | cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC()); | |
268 | } | |
269 | ||
270 | if ((iopm_base_pa & ~0xfff) >= (1ull << cpu->phys_bits) - SVM_IOPM_SIZE) { | |
271 | cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC()); | |
272 | } | |
273 | ||
a2d57703 AB |
274 | env->nested_pg_mode = 0; |
275 | ||
7eb54ca9 LL |
276 | if (!cpu_svm_has_intercept(env, SVM_EXIT_VMRUN)) { |
277 | cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC()); | |
278 | } | |
279 | if (asid == 0) { | |
280 | cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC()); | |
281 | } | |
282 | ||
fe441054 JK |
283 | if (nested_ctl & SVM_NPT_ENABLED) { |
284 | env->nested_cr3 = x86_ldq_phys(cs, | |
285 | env->vm_vmcb + offsetof(struct vmcb, | |
286 | control.nested_cr3)); | |
287 | env->hflags2 |= HF2_NPT_MASK; | |
288 | ||
31dd35eb | 289 | env->nested_pg_mode = get_pg_mode(env) & PG_MODE_SVM_MASK; |
98281984 RH |
290 | |
291 | tlb_flush_by_mmuidx(cs, 1 << MMU_NESTED_IDX); | |
fe441054 JK |
292 | } |
293 | ||
6bada5e8 | 294 | /* enable intercepts */ |
f8dc4c64 | 295 | env->hflags |= HF_GUEST_MASK; |
6bada5e8 | 296 | |
b216aa6c | 297 | env->tsc_offset = x86_ldq_phys(cs, env->vm_vmcb + |
6bada5e8 BS |
298 | offsetof(struct vmcb, control.tsc_offset)); |
299 | ||
498df2a7 LL |
300 | new_cr0 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr0)); |
301 | if (new_cr0 & SVM_CR0_RESERVED_MASK) { | |
302 | cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC()); | |
303 | } | |
304 | if ((new_cr0 & CR0_NW_MASK) && !(new_cr0 & CR0_CD_MASK)) { | |
305 | cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC()); | |
306 | } | |
3407259b LL |
307 | new_cr3 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr3)); |
308 | if ((env->efer & MSR_EFER_LMA) && | |
24d84c7e | 309 | (new_cr3 & ((~0ULL) << cpu->phys_bits))) { |
3407259b LL |
310 | cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC()); |
311 | } | |
213ff024 LL |
312 | new_cr4 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr4)); |
313 | if (new_cr4 & cr4_reserved_bits(env)) { | |
314 | cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC()); | |
315 | } | |
6bada5e8 | 316 | /* clear exit_info_2 so we behave like the real hardware */ |
b216aa6c | 317 | x86_stq_phys(cs, |
f606604f | 318 | env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0); |
6bada5e8 | 319 | |
498df2a7 | 320 | cpu_x86_update_cr0(env, new_cr0); |
213ff024 | 321 | cpu_x86_update_cr4(env, new_cr4); |
3407259b | 322 | cpu_x86_update_cr3(env, new_cr3); |
b216aa6c | 323 | env->cr[2] = x86_ldq_phys(cs, |
2c17449b | 324 | env->vm_vmcb + offsetof(struct vmcb, save.cr2)); |
e3126a5c | 325 | env->int_ctl = x86_ldl_phys(cs, |
fdfba1a2 | 326 | env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)); |
6bada5e8 | 327 | env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK); |
e3126a5c | 328 | if (env->int_ctl & V_INTR_MASKING_MASK) { |
6bada5e8 BS |
329 | env->hflags2 |= HF2_VINTR_MASK; |
330 | if (env->eflags & IF_MASK) { | |
331 | env->hflags2 |= HF2_HIF_MASK; | |
332 | } | |
333 | } | |
334 | ||
335 | cpu_load_efer(env, | |
b216aa6c | 336 | x86_ldq_phys(cs, |
2c17449b | 337 | env->vm_vmcb + offsetof(struct vmcb, save.efer))); |
6bada5e8 | 338 | env->eflags = 0; |
b216aa6c | 339 | cpu_load_eflags(env, x86_ldq_phys(cs, |
2c17449b | 340 | env->vm_vmcb + offsetof(struct vmcb, |
6bada5e8 BS |
341 | save.rflags)), |
342 | ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK)); | |
6bada5e8 | 343 | |
726ea335 RH |
344 | svm_load_seg_cache(env, MMU_PHYS_IDX, |
345 | env->vm_vmcb + offsetof(struct vmcb, save.es), R_ES); | |
346 | svm_load_seg_cache(env, MMU_PHYS_IDX, | |
347 | env->vm_vmcb + offsetof(struct vmcb, save.cs), R_CS); | |
348 | svm_load_seg_cache(env, MMU_PHYS_IDX, | |
349 | env->vm_vmcb + offsetof(struct vmcb, save.ss), R_SS); | |
350 | svm_load_seg_cache(env, MMU_PHYS_IDX, | |
351 | env->vm_vmcb + offsetof(struct vmcb, save.ds), R_DS); | |
352 | svm_load_seg(env, MMU_PHYS_IDX, | |
353 | env->vm_vmcb + offsetof(struct vmcb, save.idtr), &env->idt); | |
354 | svm_load_seg(env, MMU_PHYS_IDX, | |
355 | env->vm_vmcb + offsetof(struct vmcb, save.gdtr), &env->gdt); | |
6bada5e8 | 356 | |
b216aa6c | 357 | env->eip = x86_ldq_phys(cs, |
2c17449b EI |
358 | env->vm_vmcb + offsetof(struct vmcb, save.rip)); |
359 | ||
b216aa6c | 360 | env->regs[R_ESP] = x86_ldq_phys(cs, |
2c17449b | 361 | env->vm_vmcb + offsetof(struct vmcb, save.rsp)); |
b216aa6c | 362 | env->regs[R_EAX] = x86_ldq_phys(cs, |
2c17449b | 363 | env->vm_vmcb + offsetof(struct vmcb, save.rax)); |
b216aa6c | 364 | env->dr[7] = x86_ldq_phys(cs, |
2c17449b | 365 | env->vm_vmcb + offsetof(struct vmcb, save.dr7)); |
b216aa6c | 366 | env->dr[6] = x86_ldq_phys(cs, |
2c17449b | 367 | env->vm_vmcb + offsetof(struct vmcb, save.dr6)); |
6bada5e8 | 368 | |
acf23ffb | 369 | #ifdef TARGET_X86_64 |
533883fd | 370 | if (env->dr[6] & DR_RESERVED_MASK) { |
acf23ffb LL |
371 | cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC()); |
372 | } | |
533883fd | 373 | if (env->dr[7] & DR_RESERVED_MASK) { |
acf23ffb LL |
374 | cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC()); |
375 | } | |
376 | #endif | |
6bada5e8 | 377 | |
d499f196 LL |
378 | if (is_efer_invalid_state(env)) { |
379 | cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC()); | |
380 | } | |
381 | ||
b216aa6c | 382 | switch (x86_ldub_phys(cs, |
2c17449b | 383 | env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) { |
6bada5e8 BS |
384 | case TLB_CONTROL_DO_NOTHING: |
385 | break; | |
386 | case TLB_CONTROL_FLUSH_ALL_ASID: | |
387 | /* FIXME: this is not 100% correct but should work for now */ | |
d10eb08f | 388 | tlb_flush(cs); |
6bada5e8 BS |
389 | break; |
390 | } | |
391 | ||
392 | env->hflags2 |= HF2_GIF_MASK; | |
393 | ||
e3126a5c | 394 | if (ctl_has_irq(env)) { |
259186a7 | 395 | cs->interrupt_request |= CPU_INTERRUPT_VIRQ; |
6bada5e8 BS |
396 | } |
397 | ||
b67e2796 LL |
398 | if (virtual_gif_set(env)) { |
399 | env->hflags2 |= HF2_VGIF_MASK; | |
400 | } | |
401 | ||
6bada5e8 | 402 | /* maybe we need to inject an event */ |
b216aa6c | 403 | event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, |
6bada5e8 BS |
404 | control.event_inj)); |
405 | if (event_inj & SVM_EVTINJ_VALID) { | |
406 | uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK; | |
407 | uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR; | |
b216aa6c | 408 | uint32_t event_inj_err = x86_ldl_phys(cs, env->vm_vmcb + |
6bada5e8 BS |
409 | offsetof(struct vmcb, |
410 | control.event_inj_err)); | |
411 | ||
412 | qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err); | |
413 | /* FIXME: need to implement valid_err */ | |
414 | switch (event_inj & SVM_EVTINJ_TYPE_MASK) { | |
415 | case SVM_EVTINJ_TYPE_INTR: | |
27103424 | 416 | cs->exception_index = vector; |
6bada5e8 BS |
417 | env->error_code = event_inj_err; |
418 | env->exception_is_int = 0; | |
419 | env->exception_next_eip = -1; | |
420 | qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR"); | |
421 | /* XXX: is it always correct? */ | |
422 | do_interrupt_x86_hardirq(env, vector, 1); | |
423 | break; | |
424 | case SVM_EVTINJ_TYPE_NMI: | |
27103424 | 425 | cs->exception_index = EXCP02_NMI; |
6bada5e8 BS |
426 | env->error_code = event_inj_err; |
427 | env->exception_is_int = 0; | |
a78d0eab | 428 | env->exception_next_eip = env->eip; |
6bada5e8 | 429 | qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI"); |
5638d180 | 430 | cpu_loop_exit(cs); |
6bada5e8 BS |
431 | break; |
432 | case SVM_EVTINJ_TYPE_EXEPT: | |
eceb4f01 LL |
433 | if (vector == EXCP02_NMI || vector >= 31) { |
434 | cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC()); | |
435 | } | |
27103424 | 436 | cs->exception_index = vector; |
6bada5e8 BS |
437 | env->error_code = event_inj_err; |
438 | env->exception_is_int = 0; | |
439 | env->exception_next_eip = -1; | |
440 | qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT"); | |
5638d180 | 441 | cpu_loop_exit(cs); |
6bada5e8 BS |
442 | break; |
443 | case SVM_EVTINJ_TYPE_SOFT: | |
27103424 | 444 | cs->exception_index = vector; |
6bada5e8 BS |
445 | env->error_code = event_inj_err; |
446 | env->exception_is_int = 1; | |
a78d0eab | 447 | env->exception_next_eip = env->eip; |
6bada5e8 | 448 | qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT"); |
5638d180 | 449 | cpu_loop_exit(cs); |
6bada5e8 | 450 | break; |
eceb4f01 LL |
451 | default: |
452 | cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC()); | |
453 | break; | |
6bada5e8 | 454 | } |
27103424 | 455 | qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", cs->exception_index, |
6bada5e8 BS |
456 | env->error_code); |
457 | } | |
458 | } | |
459 | ||
052e80d5 | 460 | void helper_vmmcall(CPUX86State *env) |
6bada5e8 | 461 | { |
65c9d60a | 462 | cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0, GETPC()); |
6bada5e8 BS |
463 | raise_exception(env, EXCP06_ILLOP); |
464 | } | |
465 | ||
052e80d5 | 466 | void helper_vmload(CPUX86State *env, int aflag) |
6bada5e8 | 467 | { |
726ea335 | 468 | int mmu_idx = MMU_PHYS_IDX; |
6bada5e8 BS |
469 | target_ulong addr; |
470 | ||
6bada5e8 | 471 | if (aflag == 2) { |
4b34e3ad | 472 | addr = env->regs[R_EAX]; |
6bada5e8 | 473 | } else { |
4b34e3ad | 474 | addr = (uint32_t)env->regs[R_EAX]; |
6bada5e8 BS |
475 | } |
476 | ||
d09c7901 PB |
477 | /* Exceptions are checked before the intercept. */ |
478 | if (addr & (0xfff | ((~0ULL) << env_archcpu(env)->phys_bits))) { | |
479 | raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); | |
480 | } | |
481 | ||
482 | cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0, GETPC()); | |
483 | ||
52fb8ad3 | 484 | if (virtual_vm_load_save_enabled(env, SVM_EXIT_VMLOAD, GETPC())) { |
726ea335 | 485 | mmu_idx = MMU_NESTED_IDX; |
52fb8ad3 LL |
486 | } |
487 | ||
726ea335 RH |
488 | svm_load_seg_cache(env, mmu_idx, |
489 | addr + offsetof(struct vmcb, save.fs), R_FS); | |
490 | svm_load_seg_cache(env, mmu_idx, | |
491 | addr + offsetof(struct vmcb, save.gs), R_GS); | |
492 | svm_load_seg(env, mmu_idx, | |
493 | addr + offsetof(struct vmcb, save.tr), &env->tr); | |
494 | svm_load_seg(env, mmu_idx, | |
495 | addr + offsetof(struct vmcb, save.ldtr), &env->ldt); | |
6bada5e8 BS |
496 | |
497 | #ifdef TARGET_X86_64 | |
726ea335 RH |
498 | env->kernelgsbase = |
499 | cpu_ldq_mmuidx_ra(env, | |
500 | addr + offsetof(struct vmcb, save.kernel_gs_base), | |
501 | mmu_idx, 0); | |
502 | env->lstar = | |
503 | cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.lstar), | |
504 | mmu_idx, 0); | |
505 | env->cstar = | |
506 | cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.cstar), | |
507 | mmu_idx, 0); | |
508 | env->fmask = | |
509 | cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sfmask), | |
510 | mmu_idx, 0); | |
97afb47e | 511 | svm_canonicalization(env, &env->kernelgsbase); |
6bada5e8 | 512 | #endif |
726ea335 RH |
513 | env->star = |
514 | cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.star), | |
515 | mmu_idx, 0); | |
516 | env->sysenter_cs = | |
517 | cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_cs), | |
518 | mmu_idx, 0); | |
519 | env->sysenter_esp = | |
520 | cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_esp), | |
521 | mmu_idx, 0); | |
522 | env->sysenter_eip = | |
523 | cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_eip), | |
524 | mmu_idx, 0); | |
6bada5e8 BS |
525 | } |
526 | ||
052e80d5 | 527 | void helper_vmsave(CPUX86State *env, int aflag) |
6bada5e8 | 528 | { |
726ea335 | 529 | int mmu_idx = MMU_PHYS_IDX; |
6bada5e8 BS |
530 | target_ulong addr; |
531 | ||
6bada5e8 | 532 | if (aflag == 2) { |
4b34e3ad | 533 | addr = env->regs[R_EAX]; |
6bada5e8 | 534 | } else { |
4b34e3ad | 535 | addr = (uint32_t)env->regs[R_EAX]; |
6bada5e8 BS |
536 | } |
537 | ||
d09c7901 PB |
538 | /* Exceptions are checked before the intercept. */ |
539 | if (addr & (0xfff | ((~0ULL) << env_archcpu(env)->phys_bits))) { | |
540 | raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); | |
541 | } | |
542 | ||
543 | cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0, GETPC()); | |
544 | ||
52fb8ad3 | 545 | if (virtual_vm_load_save_enabled(env, SVM_EXIT_VMSAVE, GETPC())) { |
726ea335 | 546 | mmu_idx = MMU_NESTED_IDX; |
52fb8ad3 LL |
547 | } |
548 | ||
726ea335 | 549 | svm_save_seg(env, mmu_idx, addr + offsetof(struct vmcb, save.fs), |
6bada5e8 | 550 | &env->segs[R_FS]); |
726ea335 | 551 | svm_save_seg(env, mmu_idx, addr + offsetof(struct vmcb, save.gs), |
6bada5e8 | 552 | &env->segs[R_GS]); |
726ea335 | 553 | svm_save_seg(env, mmu_idx, addr + offsetof(struct vmcb, save.tr), |
6bada5e8 | 554 | &env->tr); |
726ea335 | 555 | svm_save_seg(env, mmu_idx, addr + offsetof(struct vmcb, save.ldtr), |
6bada5e8 BS |
556 | &env->ldt); |
557 | ||
558 | #ifdef TARGET_X86_64 | |
726ea335 RH |
559 | cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.kernel_gs_base), |
560 | env->kernelgsbase, mmu_idx, 0); | |
561 | cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.lstar), | |
562 | env->lstar, mmu_idx, 0); | |
563 | cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.cstar), | |
564 | env->cstar, mmu_idx, 0); | |
565 | cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sfmask), | |
566 | env->fmask, mmu_idx, 0); | |
6bada5e8 | 567 | #endif |
726ea335 RH |
568 | cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.star), |
569 | env->star, mmu_idx, 0); | |
570 | cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_cs), | |
571 | env->sysenter_cs, mmu_idx, 0); | |
572 | cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_esp), | |
573 | env->sysenter_esp, mmu_idx, 0); | |
574 | cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_eip), | |
575 | env->sysenter_eip, mmu_idx, 0); | |
6bada5e8 BS |
576 | } |
577 | ||
052e80d5 | 578 | void helper_stgi(CPUX86State *env) |
6bada5e8 | 579 | { |
65c9d60a | 580 | cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0, GETPC()); |
900eeca5 | 581 | |
e3126a5c LL |
582 | if (virtual_gif_enabled(env)) { |
583 | env->int_ctl |= V_GIF_MASK; | |
b67e2796 | 584 | env->hflags2 |= HF2_VGIF_MASK; |
900eeca5 LL |
585 | } else { |
586 | env->hflags2 |= HF2_GIF_MASK; | |
587 | } | |
6bada5e8 BS |
588 | } |
589 | ||
052e80d5 | 590 | void helper_clgi(CPUX86State *env) |
6bada5e8 | 591 | { |
65c9d60a | 592 | cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0, GETPC()); |
900eeca5 | 593 | |
e3126a5c LL |
594 | if (virtual_gif_enabled(env)) { |
595 | env->int_ctl &= ~V_GIF_MASK; | |
b67e2796 | 596 | env->hflags2 &= ~HF2_VGIF_MASK; |
900eeca5 LL |
597 | } else { |
598 | env->hflags2 &= ~HF2_GIF_MASK; | |
599 | } | |
6bada5e8 BS |
600 | } |
601 | ||
813c6459 | 602 | bool cpu_svm_has_intercept(CPUX86State *env, uint32_t type) |
6bada5e8 | 603 | { |
6bada5e8 BS |
604 | switch (type) { |
605 | case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8: | |
606 | if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) { | |
813c6459 | 607 | return true; |
6bada5e8 BS |
608 | } |
609 | break; | |
610 | case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8: | |
611 | if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) { | |
813c6459 | 612 | return true; |
6bada5e8 BS |
613 | } |
614 | break; | |
615 | case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7: | |
616 | if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) { | |
813c6459 | 617 | return true; |
6bada5e8 BS |
618 | } |
619 | break; | |
620 | case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7: | |
621 | if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) { | |
813c6459 | 622 | return true; |
6bada5e8 BS |
623 | } |
624 | break; | |
625 | case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31: | |
626 | if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) { | |
813c6459 | 627 | return true; |
6bada5e8 BS |
628 | } |
629 | break; | |
630 | default: | |
631 | if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) { | |
813c6459 | 632 | return true; |
6bada5e8 BS |
633 | } |
634 | break; | |
635 | } | |
813c6459 LL |
636 | return false; |
637 | } | |
638 | ||
639 | void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type, | |
640 | uint64_t param, uintptr_t retaddr) | |
641 | { | |
642 | CPUState *cs = env_cpu(env); | |
643 | ||
644 | if (likely(!(env->hflags & HF_GUEST_MASK))) { | |
645 | return; | |
646 | } | |
647 | ||
648 | if (!cpu_svm_has_intercept(env, type)) { | |
649 | return; | |
650 | } | |
651 | ||
652 | if (type == SVM_EXIT_MSR) { | |
653 | /* FIXME: this should be read in at vmrun (faster this way?) */ | |
654 | uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb + | |
655 | offsetof(struct vmcb, | |
656 | control.msrpm_base_pa)); | |
657 | uint32_t t0, t1; | |
658 | ||
659 | switch ((uint32_t)env->regs[R_ECX]) { | |
660 | case 0 ... 0x1fff: | |
661 | t0 = (env->regs[R_ECX] * 2) % 8; | |
662 | t1 = (env->regs[R_ECX] * 2) / 8; | |
663 | break; | |
664 | case 0xc0000000 ... 0xc0001fff: | |
665 | t0 = (8192 + env->regs[R_ECX] - 0xc0000000) * 2; | |
666 | t1 = (t0 / 8); | |
667 | t0 %= 8; | |
668 | break; | |
669 | case 0xc0010000 ... 0xc0011fff: | |
670 | t0 = (16384 + env->regs[R_ECX] - 0xc0010000) * 2; | |
671 | t1 = (t0 / 8); | |
672 | t0 %= 8; | |
673 | break; | |
674 | default: | |
675 | cpu_vmexit(env, type, param, retaddr); | |
676 | t0 = 0; | |
677 | t1 = 0; | |
678 | break; | |
679 | } | |
680 | if (x86_ldub_phys(cs, addr + t1) & ((1 << param) << t0)) { | |
681 | cpu_vmexit(env, type, param, retaddr); | |
682 | } | |
683 | return; | |
684 | } | |
685 | ||
686 | cpu_vmexit(env, type, param, retaddr); | |
6bada5e8 BS |
687 | } |
688 | ||
d051ea04 | 689 | void helper_svm_check_intercept(CPUX86State *env, uint32_t type) |
6bada5e8 | 690 | { |
d051ea04 | 691 | cpu_svm_check_intercept_param(env, type, 0, GETPC()); |
6bada5e8 BS |
692 | } |
693 | ||
052e80d5 | 694 | void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param, |
6bada5e8 BS |
695 | uint32_t next_eip_addend) |
696 | { | |
6aa9e42f | 697 | CPUState *cs = env_cpu(env); |
19d6ca16 | 698 | |
6bada5e8 BS |
699 | if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) { |
700 | /* FIXME: this should be read in at vmrun (faster this way?) */ | |
b216aa6c | 701 | uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb + |
6bada5e8 BS |
702 | offsetof(struct vmcb, control.iopm_base_pa)); |
703 | uint16_t mask = (1 << ((param >> 4) & 7)) - 1; | |
704 | ||
b216aa6c | 705 | if (x86_lduw_phys(cs, addr + port / 8) & (mask << (port & 7))) { |
a78d0eab | 706 | /* next env->eip */ |
b216aa6c | 707 | x86_stq_phys(cs, |
f606604f | 708 | env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), |
6bada5e8 | 709 | env->eip + next_eip_addend); |
65c9d60a | 710 | cpu_vmexit(env, SVM_EXIT_IOIO, param | (port << 16), GETPC()); |
6bada5e8 BS |
711 | } |
712 | } | |
713 | } | |
714 | ||
65c9d60a PB |
715 | void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1, |
716 | uintptr_t retaddr) | |
6bada5e8 | 717 | { |
6aa9e42f | 718 | CPUState *cs = env_cpu(env); |
6bada5e8 | 719 | |
3d419a4d | 720 | cpu_restore_state(cs, retaddr); |
65c9d60a | 721 | |
6bada5e8 BS |
722 | qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" |
723 | PRIx64 ", " TARGET_FMT_lx ")!\n", | |
724 | exit_code, exit_info_1, | |
b216aa6c | 725 | x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, |
6bada5e8 | 726 | control.exit_info_2)), |
a78d0eab | 727 | env->eip); |
6bada5e8 | 728 | |
68775856 PB |
729 | cs->exception_index = EXCP_VMEXIT; |
730 | x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_code), | |
731 | exit_code); | |
732 | ||
733 | x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, | |
734 | control.exit_info_1), exit_info_1), | |
10cde894 PB |
735 | |
736 | /* remove any pending exception */ | |
737 | env->old_exception = -1; | |
738 | cpu_loop_exit(cs); | |
739 | } | |
740 | ||
68775856 | 741 | void do_vmexit(CPUX86State *env) |
10cde894 | 742 | { |
6aa9e42f | 743 | CPUState *cs = env_cpu(env); |
10cde894 | 744 | |
6bada5e8 | 745 | if (env->hflags & HF_INHIBIT_IRQ_MASK) { |
b216aa6c | 746 | x86_stl_phys(cs, |
ab1da857 | 747 | env->vm_vmcb + offsetof(struct vmcb, control.int_state), |
6bada5e8 BS |
748 | SVM_INTERRUPT_SHADOW_MASK); |
749 | env->hflags &= ~HF_INHIBIT_IRQ_MASK; | |
750 | } else { | |
b216aa6c | 751 | x86_stl_phys(cs, |
ab1da857 | 752 | env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0); |
6bada5e8 | 753 | } |
fe441054 | 754 | env->hflags2 &= ~HF2_NPT_MASK; |
98281984 | 755 | tlb_flush_by_mmuidx(cs, 1 << MMU_NESTED_IDX); |
6bada5e8 BS |
756 | |
757 | /* Save the VM state in the vmcb */ | |
726ea335 RH |
758 | svm_save_seg(env, MMU_PHYS_IDX, |
759 | env->vm_vmcb + offsetof(struct vmcb, save.es), | |
6bada5e8 | 760 | &env->segs[R_ES]); |
726ea335 RH |
761 | svm_save_seg(env, MMU_PHYS_IDX, |
762 | env->vm_vmcb + offsetof(struct vmcb, save.cs), | |
6bada5e8 | 763 | &env->segs[R_CS]); |
726ea335 RH |
764 | svm_save_seg(env, MMU_PHYS_IDX, |
765 | env->vm_vmcb + offsetof(struct vmcb, save.ss), | |
6bada5e8 | 766 | &env->segs[R_SS]); |
726ea335 RH |
767 | svm_save_seg(env, MMU_PHYS_IDX, |
768 | env->vm_vmcb + offsetof(struct vmcb, save.ds), | |
6bada5e8 BS |
769 | &env->segs[R_DS]); |
770 | ||
b216aa6c | 771 | x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), |
6bada5e8 | 772 | env->gdt.base); |
b216aa6c | 773 | x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), |
6bada5e8 BS |
774 | env->gdt.limit); |
775 | ||
b216aa6c | 776 | x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), |
6bada5e8 | 777 | env->idt.base); |
b216aa6c | 778 | x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), |
6bada5e8 BS |
779 | env->idt.limit); |
780 | ||
b216aa6c | 781 | x86_stq_phys(cs, |
f606604f | 782 | env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer); |
b216aa6c | 783 | x86_stq_phys(cs, |
f606604f | 784 | env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]); |
b216aa6c | 785 | x86_stq_phys(cs, |
f606604f | 786 | env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]); |
b216aa6c | 787 | x86_stq_phys(cs, |
f606604f | 788 | env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]); |
b216aa6c | 789 | x86_stq_phys(cs, |
f606604f | 790 | env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]); |
b216aa6c | 791 | x86_stl_phys(cs, |
e3126a5c | 792 | env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), env->int_ctl); |
6bada5e8 | 793 | |
b216aa6c | 794 | x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rflags), |
6bada5e8 | 795 | cpu_compute_eflags(env)); |
b216aa6c | 796 | x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rip), |
052e80d5 | 797 | env->eip); |
b216aa6c | 798 | x86_stq_phys(cs, |
f606604f | 799 | env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]); |
b216aa6c | 800 | x86_stq_phys(cs, |
f606604f | 801 | env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]); |
b216aa6c | 802 | x86_stq_phys(cs, |
f606604f | 803 | env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]); |
b216aa6c | 804 | x86_stq_phys(cs, |
f606604f | 805 | env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]); |
b216aa6c | 806 | x86_stb_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cpl), |
6bada5e8 BS |
807 | env->hflags & HF_CPL_MASK); |
808 | ||
809 | /* Reload the host state from vm_hsave */ | |
810 | env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK); | |
f8dc4c64 | 811 | env->hflags &= ~HF_GUEST_MASK; |
6bada5e8 BS |
812 | env->intercept = 0; |
813 | env->intercept_exceptions = 0; | |
259186a7 | 814 | cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ; |
e3126a5c | 815 | env->int_ctl = 0; |
6bada5e8 BS |
816 | env->tsc_offset = 0; |
817 | ||
b216aa6c | 818 | env->gdt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb, |
6bada5e8 | 819 | save.gdtr.base)); |
b216aa6c | 820 | env->gdt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb, |
6bada5e8 BS |
821 | save.gdtr.limit)); |
822 | ||
b216aa6c | 823 | env->idt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb, |
6bada5e8 | 824 | save.idtr.base)); |
b216aa6c | 825 | env->idt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb, |
6bada5e8 BS |
826 | save.idtr.limit)); |
827 | ||
b216aa6c | 828 | cpu_x86_update_cr0(env, x86_ldq_phys(cs, |
2c17449b | 829 | env->vm_hsave + offsetof(struct vmcb, |
6bada5e8 BS |
830 | save.cr0)) | |
831 | CR0_PE_MASK); | |
b216aa6c | 832 | cpu_x86_update_cr4(env, x86_ldq_phys(cs, |
2c17449b | 833 | env->vm_hsave + offsetof(struct vmcb, |
6bada5e8 | 834 | save.cr4))); |
b216aa6c | 835 | cpu_x86_update_cr3(env, x86_ldq_phys(cs, |
2c17449b | 836 | env->vm_hsave + offsetof(struct vmcb, |
6bada5e8 BS |
837 | save.cr3))); |
838 | /* we need to set the efer after the crs so the hidden flags get | |
839 | set properly */ | |
b216aa6c | 840 | cpu_load_efer(env, x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb, |
6bada5e8 BS |
841 | save.efer))); |
842 | env->eflags = 0; | |
b216aa6c | 843 | cpu_load_eflags(env, x86_ldq_phys(cs, |
2c17449b | 844 | env->vm_hsave + offsetof(struct vmcb, |
6bada5e8 | 845 | save.rflags)), |
30452029 KC |
846 | ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK | |
847 | VM_MASK)); | |
6bada5e8 | 848 | |
726ea335 RH |
849 | svm_load_seg_cache(env, MMU_PHYS_IDX, |
850 | env->vm_hsave + offsetof(struct vmcb, save.es), R_ES); | |
851 | svm_load_seg_cache(env, MMU_PHYS_IDX, | |
852 | env->vm_hsave + offsetof(struct vmcb, save.cs), R_CS); | |
853 | svm_load_seg_cache(env, MMU_PHYS_IDX, | |
854 | env->vm_hsave + offsetof(struct vmcb, save.ss), R_SS); | |
855 | svm_load_seg_cache(env, MMU_PHYS_IDX, | |
856 | env->vm_hsave + offsetof(struct vmcb, save.ds), R_DS); | |
6bada5e8 | 857 | |
b216aa6c | 858 | env->eip = x86_ldq_phys(cs, |
2c17449b | 859 | env->vm_hsave + offsetof(struct vmcb, save.rip)); |
b216aa6c | 860 | env->regs[R_ESP] = x86_ldq_phys(cs, env->vm_hsave + |
90a2541b | 861 | offsetof(struct vmcb, save.rsp)); |
b216aa6c | 862 | env->regs[R_EAX] = x86_ldq_phys(cs, env->vm_hsave + |
90a2541b | 863 | offsetof(struct vmcb, save.rax)); |
6bada5e8 | 864 | |
b216aa6c | 865 | env->dr[6] = x86_ldq_phys(cs, |
2c17449b | 866 | env->vm_hsave + offsetof(struct vmcb, save.dr6)); |
b216aa6c | 867 | env->dr[7] = x86_ldq_phys(cs, |
2c17449b | 868 | env->vm_hsave + offsetof(struct vmcb, save.dr7)); |
6bada5e8 BS |
869 | |
870 | /* other setups */ | |
b216aa6c | 871 | x86_stl_phys(cs, |
ab1da857 | 872 | env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info), |
b216aa6c | 873 | x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, |
6bada5e8 | 874 | control.event_inj))); |
b216aa6c | 875 | x86_stl_phys(cs, |
ab1da857 | 876 | env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err), |
b216aa6c | 877 | x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, |
6bada5e8 | 878 | control.event_inj_err))); |
b216aa6c | 879 | x86_stl_phys(cs, |
ab1da857 | 880 | env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0); |
6bada5e8 BS |
881 | |
882 | env->hflags2 &= ~HF2_GIF_MASK; | |
b67e2796 | 883 | env->hflags2 &= ~HF2_VGIF_MASK; |
6bada5e8 BS |
884 | /* FIXME: Resets the current ASID register to zero (host ASID). */ |
885 | ||
886 | /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */ | |
887 | ||
888 | /* Clears the TSC_OFFSET inside the processor. */ | |
889 | ||
890 | /* If the host is in PAE mode, the processor reloads the host's PDPEs | |
891 | from the page table indicated the host's CR3. If the PDPEs contain | |
892 | illegal state, the processor causes a shutdown. */ | |
893 | ||
6bada5e8 BS |
894 | /* Disables all breakpoints in the host DR7 register. */ |
895 | ||
896 | /* Checks the reloaded host state for consistency. */ | |
897 | ||
898 | /* If the host's rIP reloaded by #VMEXIT is outside the limit of the | |
899 | host's code segment or non-canonical (in the case of long mode), a | |
900 | #GP fault is delivered inside the host. */ | |
6bada5e8 | 901 | } |