]>
Commit | Line | Data |
---|---|---|
c97d6d2c SAGDR |
1 | /* |
2 | * Copyright (c) 2003-2008 Fabrice Bellard | |
3 | * Copyright (C) 2016 Veertu Inc, | |
4 | * Copyright (C) 2017 Google Inc, | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
996feed4 SAGDR |
7 | * modify it under the terms of the GNU Lesser General Public |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
c97d6d2c SAGDR |
10 | * |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
996feed4 SAGDR |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
14 | * Lesser General Public License for more details. | |
c97d6d2c | 15 | * |
996feed4 SAGDR |
16 | * You should have received a copy of the GNU Lesser General Public |
17 | * License along with this program; if not, see <http://www.gnu.org/licenses/>. | |
c97d6d2c SAGDR |
18 | */ |
19 | ||
20 | #include "qemu/osdep.h" | |
c97d6d2c | 21 | |
f9fea777 | 22 | #include "qemu-common.h" |
c97d6d2c SAGDR |
23 | #include "x86hvf.h" |
24 | #include "vmx.h" | |
25 | #include "vmcs.h" | |
26 | #include "cpu.h" | |
27 | #include "x86_descr.h" | |
28 | #include "x86_decode.h" | |
29 | ||
30 | #include "hw/i386/apic_internal.h" | |
31 | ||
32 | #include <stdio.h> | |
33 | #include <stdlib.h> | |
34 | #include <Hypervisor/hv.h> | |
35 | #include <Hypervisor/hv_vmx.h> | |
36 | #include <stdint.h> | |
37 | ||
38 | void hvf_set_segment(struct CPUState *cpu, struct vmx_segment *vmx_seg, | |
39 | SegmentCache *qseg, bool is_tr) | |
40 | { | |
41 | vmx_seg->sel = qseg->selector; | |
42 | vmx_seg->base = qseg->base; | |
43 | vmx_seg->limit = qseg->limit; | |
44 | ||
45 | if (!qseg->selector && !x86_is_real(cpu) && !is_tr) { | |
46 | /* the TR register is usable after processor reset despite | |
47 | * having a null selector */ | |
48 | vmx_seg->ar = 1 << 16; | |
49 | return; | |
50 | } | |
51 | vmx_seg->ar = (qseg->flags >> DESC_TYPE_SHIFT) & 0xf; | |
52 | vmx_seg->ar |= ((qseg->flags >> DESC_G_SHIFT) & 1) << 15; | |
53 | vmx_seg->ar |= ((qseg->flags >> DESC_B_SHIFT) & 1) << 14; | |
54 | vmx_seg->ar |= ((qseg->flags >> DESC_L_SHIFT) & 1) << 13; | |
55 | vmx_seg->ar |= ((qseg->flags >> DESC_AVL_SHIFT) & 1) << 12; | |
56 | vmx_seg->ar |= ((qseg->flags >> DESC_P_SHIFT) & 1) << 7; | |
57 | vmx_seg->ar |= ((qseg->flags >> DESC_DPL_SHIFT) & 3) << 5; | |
58 | vmx_seg->ar |= ((qseg->flags >> DESC_S_SHIFT) & 1) << 4; | |
59 | } | |
60 | ||
61 | void hvf_get_segment(SegmentCache *qseg, struct vmx_segment *vmx_seg) | |
62 | { | |
63 | qseg->limit = vmx_seg->limit; | |
64 | qseg->base = vmx_seg->base; | |
65 | qseg->selector = vmx_seg->sel; | |
66 | qseg->flags = ((vmx_seg->ar & 0xf) << DESC_TYPE_SHIFT) | | |
67 | (((vmx_seg->ar >> 4) & 1) << DESC_S_SHIFT) | | |
68 | (((vmx_seg->ar >> 5) & 3) << DESC_DPL_SHIFT) | | |
69 | (((vmx_seg->ar >> 7) & 1) << DESC_P_SHIFT) | | |
70 | (((vmx_seg->ar >> 12) & 1) << DESC_AVL_SHIFT) | | |
71 | (((vmx_seg->ar >> 13) & 1) << DESC_L_SHIFT) | | |
72 | (((vmx_seg->ar >> 14) & 1) << DESC_B_SHIFT) | | |
73 | (((vmx_seg->ar >> 15) & 1) << DESC_G_SHIFT); | |
74 | } | |
75 | ||
76 | void hvf_put_xsave(CPUState *cpu_state) | |
77 | { | |
78 | ||
f585195e SAGDR |
79 | struct X86XSaveArea *xsave; |
80 | ||
c97d6d2c | 81 | xsave = X86_CPU(cpu_state)->env.kvm_xsave_buf; |
f585195e SAGDR |
82 | |
83 | x86_cpu_xsave_all_areas(X86_CPU(cpu_state), xsave); | |
84 | ||
85 | if (hv_vcpu_write_fpstate(cpu_state->hvf_fd, (void*)xsave, 4096)) { | |
c97d6d2c SAGDR |
86 | abort(); |
87 | } | |
88 | } | |
89 | ||
90 | void hvf_put_segments(CPUState *cpu_state) | |
91 | { | |
92 | CPUX86State *env = &X86_CPU(cpu_state)->env; | |
93 | struct vmx_segment seg; | |
94 | ||
95 | wvmcs(cpu_state->hvf_fd, VMCS_GUEST_IDTR_LIMIT, env->idt.limit); | |
96 | wvmcs(cpu_state->hvf_fd, VMCS_GUEST_IDTR_BASE, env->idt.base); | |
97 | ||
98 | wvmcs(cpu_state->hvf_fd, VMCS_GUEST_GDTR_LIMIT, env->gdt.limit); | |
99 | wvmcs(cpu_state->hvf_fd, VMCS_GUEST_GDTR_BASE, env->gdt.base); | |
100 | ||
101 | /* wvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR2, env->cr[2]); */ | |
102 | wvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR3, env->cr[3]); | |
103 | vmx_update_tpr(cpu_state); | |
104 | wvmcs(cpu_state->hvf_fd, VMCS_GUEST_IA32_EFER, env->efer); | |
105 | ||
106 | macvm_set_cr4(cpu_state->hvf_fd, env->cr[4]); | |
107 | macvm_set_cr0(cpu_state->hvf_fd, env->cr[0]); | |
108 | ||
109 | hvf_set_segment(cpu_state, &seg, &env->segs[R_CS], false); | |
110 | vmx_write_segment_descriptor(cpu_state, &seg, REG_SEG_CS); | |
111 | ||
112 | hvf_set_segment(cpu_state, &seg, &env->segs[R_DS], false); | |
113 | vmx_write_segment_descriptor(cpu_state, &seg, REG_SEG_DS); | |
114 | ||
115 | hvf_set_segment(cpu_state, &seg, &env->segs[R_ES], false); | |
116 | vmx_write_segment_descriptor(cpu_state, &seg, REG_SEG_ES); | |
117 | ||
118 | hvf_set_segment(cpu_state, &seg, &env->segs[R_SS], false); | |
119 | vmx_write_segment_descriptor(cpu_state, &seg, REG_SEG_SS); | |
120 | ||
121 | hvf_set_segment(cpu_state, &seg, &env->segs[R_FS], false); | |
122 | vmx_write_segment_descriptor(cpu_state, &seg, REG_SEG_FS); | |
123 | ||
124 | hvf_set_segment(cpu_state, &seg, &env->segs[R_GS], false); | |
125 | vmx_write_segment_descriptor(cpu_state, &seg, REG_SEG_GS); | |
126 | ||
127 | hvf_set_segment(cpu_state, &seg, &env->tr, true); | |
128 | vmx_write_segment_descriptor(cpu_state, &seg, REG_SEG_TR); | |
129 | ||
130 | hvf_set_segment(cpu_state, &seg, &env->ldt, false); | |
131 | vmx_write_segment_descriptor(cpu_state, &seg, REG_SEG_LDTR); | |
132 | ||
133 | hv_vcpu_flush(cpu_state->hvf_fd); | |
134 | } | |
135 | ||
136 | void hvf_put_msrs(CPUState *cpu_state) | |
137 | { | |
138 | CPUX86State *env = &X86_CPU(cpu_state)->env; | |
139 | ||
140 | hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_CS, | |
141 | env->sysenter_cs); | |
142 | hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_ESP, | |
143 | env->sysenter_esp); | |
144 | hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_EIP, | |
145 | env->sysenter_eip); | |
146 | ||
147 | hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_STAR, env->star); | |
148 | ||
149 | #ifdef TARGET_X86_64 | |
150 | hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_CSTAR, env->cstar); | |
151 | hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_KERNELGSBASE, env->kernelgsbase); | |
152 | hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_FMASK, env->fmask); | |
153 | hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_LSTAR, env->lstar); | |
154 | #endif | |
155 | ||
156 | hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_GSBASE, env->segs[R_GS].base); | |
157 | hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_FSBASE, env->segs[R_FS].base); | |
158 | ||
159 | /* if (!osx_is_sierra()) | |
160 | wvmcs(cpu_state->hvf_fd, VMCS_TSC_OFFSET, env->tsc - rdtscp());*/ | |
161 | hv_vm_sync_tsc(env->tsc); | |
162 | } | |
163 | ||
164 | ||
165 | void hvf_get_xsave(CPUState *cpu_state) | |
166 | { | |
f585195e SAGDR |
167 | struct X86XSaveArea *xsave; |
168 | ||
c97d6d2c | 169 | xsave = X86_CPU(cpu_state)->env.kvm_xsave_buf; |
f585195e SAGDR |
170 | |
171 | if (hv_vcpu_read_fpstate(cpu_state->hvf_fd, (void*)xsave, 4096)) { | |
c97d6d2c SAGDR |
172 | abort(); |
173 | } | |
174 | ||
f585195e | 175 | x86_cpu_xrstor_all_areas(X86_CPU(cpu_state), xsave); |
c97d6d2c SAGDR |
176 | } |
177 | ||
178 | void hvf_get_segments(CPUState *cpu_state) | |
179 | { | |
180 | CPUX86State *env = &X86_CPU(cpu_state)->env; | |
181 | ||
182 | struct vmx_segment seg; | |
183 | ||
184 | env->interrupt_injected = -1; | |
185 | ||
186 | vmx_read_segment_descriptor(cpu_state, &seg, REG_SEG_CS); | |
187 | hvf_get_segment(&env->segs[R_CS], &seg); | |
188 | ||
189 | vmx_read_segment_descriptor(cpu_state, &seg, REG_SEG_DS); | |
190 | hvf_get_segment(&env->segs[R_DS], &seg); | |
191 | ||
192 | vmx_read_segment_descriptor(cpu_state, &seg, REG_SEG_ES); | |
193 | hvf_get_segment(&env->segs[R_ES], &seg); | |
194 | ||
195 | vmx_read_segment_descriptor(cpu_state, &seg, REG_SEG_FS); | |
196 | hvf_get_segment(&env->segs[R_FS], &seg); | |
197 | ||
198 | vmx_read_segment_descriptor(cpu_state, &seg, REG_SEG_GS); | |
199 | hvf_get_segment(&env->segs[R_GS], &seg); | |
200 | ||
201 | vmx_read_segment_descriptor(cpu_state, &seg, REG_SEG_SS); | |
202 | hvf_get_segment(&env->segs[R_SS], &seg); | |
203 | ||
204 | vmx_read_segment_descriptor(cpu_state, &seg, REG_SEG_TR); | |
205 | hvf_get_segment(&env->tr, &seg); | |
206 | ||
207 | vmx_read_segment_descriptor(cpu_state, &seg, REG_SEG_LDTR); | |
208 | hvf_get_segment(&env->ldt, &seg); | |
209 | ||
210 | env->idt.limit = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_IDTR_LIMIT); | |
211 | env->idt.base = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_IDTR_BASE); | |
212 | env->gdt.limit = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_GDTR_LIMIT); | |
213 | env->gdt.base = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_GDTR_BASE); | |
214 | ||
215 | env->cr[0] = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR0); | |
216 | env->cr[2] = 0; | |
217 | env->cr[3] = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR3); | |
218 | env->cr[4] = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR4); | |
219 | ||
220 | env->efer = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_IA32_EFER); | |
221 | } | |
222 | ||
223 | void hvf_get_msrs(CPUState *cpu_state) | |
224 | { | |
225 | CPUX86State *env = &X86_CPU(cpu_state)->env; | |
226 | uint64_t tmp; | |
227 | ||
228 | hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_CS, &tmp); | |
229 | env->sysenter_cs = tmp; | |
230 | ||
231 | hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_ESP, &tmp); | |
232 | env->sysenter_esp = tmp; | |
233 | ||
234 | hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_EIP, &tmp); | |
235 | env->sysenter_eip = tmp; | |
236 | ||
237 | hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_STAR, &env->star); | |
238 | ||
239 | #ifdef TARGET_X86_64 | |
240 | hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_CSTAR, &env->cstar); | |
241 | hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_KERNELGSBASE, &env->kernelgsbase); | |
242 | hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_FMASK, &env->fmask); | |
243 | hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_LSTAR, &env->lstar); | |
244 | #endif | |
245 | ||
246 | hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_IA32_APICBASE, &tmp); | |
247 | ||
248 | env->tsc = rdtscp() + rvmcs(cpu_state->hvf_fd, VMCS_TSC_OFFSET); | |
249 | } | |
250 | ||
251 | int hvf_put_registers(CPUState *cpu_state) | |
252 | { | |
253 | X86CPU *x86cpu = X86_CPU(cpu_state); | |
254 | CPUX86State *env = &x86cpu->env; | |
255 | ||
256 | wreg(cpu_state->hvf_fd, HV_X86_RAX, env->regs[R_EAX]); | |
257 | wreg(cpu_state->hvf_fd, HV_X86_RBX, env->regs[R_EBX]); | |
258 | wreg(cpu_state->hvf_fd, HV_X86_RCX, env->regs[R_ECX]); | |
259 | wreg(cpu_state->hvf_fd, HV_X86_RDX, env->regs[R_EDX]); | |
260 | wreg(cpu_state->hvf_fd, HV_X86_RBP, env->regs[R_EBP]); | |
261 | wreg(cpu_state->hvf_fd, HV_X86_RSP, env->regs[R_ESP]); | |
262 | wreg(cpu_state->hvf_fd, HV_X86_RSI, env->regs[R_ESI]); | |
263 | wreg(cpu_state->hvf_fd, HV_X86_RDI, env->regs[R_EDI]); | |
264 | wreg(cpu_state->hvf_fd, HV_X86_R8, env->regs[8]); | |
265 | wreg(cpu_state->hvf_fd, HV_X86_R9, env->regs[9]); | |
266 | wreg(cpu_state->hvf_fd, HV_X86_R10, env->regs[10]); | |
267 | wreg(cpu_state->hvf_fd, HV_X86_R11, env->regs[11]); | |
268 | wreg(cpu_state->hvf_fd, HV_X86_R12, env->regs[12]); | |
269 | wreg(cpu_state->hvf_fd, HV_X86_R13, env->regs[13]); | |
270 | wreg(cpu_state->hvf_fd, HV_X86_R14, env->regs[14]); | |
271 | wreg(cpu_state->hvf_fd, HV_X86_R15, env->regs[15]); | |
272 | wreg(cpu_state->hvf_fd, HV_X86_RFLAGS, env->eflags); | |
273 | wreg(cpu_state->hvf_fd, HV_X86_RIP, env->eip); | |
274 | ||
275 | wreg(cpu_state->hvf_fd, HV_X86_XCR0, env->xcr0); | |
276 | ||
277 | hvf_put_xsave(cpu_state); | |
278 | ||
279 | hvf_put_segments(cpu_state); | |
280 | ||
281 | hvf_put_msrs(cpu_state); | |
282 | ||
283 | wreg(cpu_state->hvf_fd, HV_X86_DR0, env->dr[0]); | |
284 | wreg(cpu_state->hvf_fd, HV_X86_DR1, env->dr[1]); | |
285 | wreg(cpu_state->hvf_fd, HV_X86_DR2, env->dr[2]); | |
286 | wreg(cpu_state->hvf_fd, HV_X86_DR3, env->dr[3]); | |
287 | wreg(cpu_state->hvf_fd, HV_X86_DR4, env->dr[4]); | |
288 | wreg(cpu_state->hvf_fd, HV_X86_DR5, env->dr[5]); | |
289 | wreg(cpu_state->hvf_fd, HV_X86_DR6, env->dr[6]); | |
290 | wreg(cpu_state->hvf_fd, HV_X86_DR7, env->dr[7]); | |
291 | ||
292 | return 0; | |
293 | } | |
294 | ||
295 | int hvf_get_registers(CPUState *cpu_state) | |
296 | { | |
297 | X86CPU *x86cpu = X86_CPU(cpu_state); | |
298 | CPUX86State *env = &x86cpu->env; | |
299 | ||
300 | ||
301 | env->regs[R_EAX] = rreg(cpu_state->hvf_fd, HV_X86_RAX); | |
302 | env->regs[R_EBX] = rreg(cpu_state->hvf_fd, HV_X86_RBX); | |
303 | env->regs[R_ECX] = rreg(cpu_state->hvf_fd, HV_X86_RCX); | |
304 | env->regs[R_EDX] = rreg(cpu_state->hvf_fd, HV_X86_RDX); | |
305 | env->regs[R_EBP] = rreg(cpu_state->hvf_fd, HV_X86_RBP); | |
306 | env->regs[R_ESP] = rreg(cpu_state->hvf_fd, HV_X86_RSP); | |
307 | env->regs[R_ESI] = rreg(cpu_state->hvf_fd, HV_X86_RSI); | |
308 | env->regs[R_EDI] = rreg(cpu_state->hvf_fd, HV_X86_RDI); | |
309 | env->regs[8] = rreg(cpu_state->hvf_fd, HV_X86_R8); | |
310 | env->regs[9] = rreg(cpu_state->hvf_fd, HV_X86_R9); | |
311 | env->regs[10] = rreg(cpu_state->hvf_fd, HV_X86_R10); | |
312 | env->regs[11] = rreg(cpu_state->hvf_fd, HV_X86_R11); | |
313 | env->regs[12] = rreg(cpu_state->hvf_fd, HV_X86_R12); | |
314 | env->regs[13] = rreg(cpu_state->hvf_fd, HV_X86_R13); | |
315 | env->regs[14] = rreg(cpu_state->hvf_fd, HV_X86_R14); | |
316 | env->regs[15] = rreg(cpu_state->hvf_fd, HV_X86_R15); | |
317 | ||
318 | env->eflags = rreg(cpu_state->hvf_fd, HV_X86_RFLAGS); | |
319 | env->eip = rreg(cpu_state->hvf_fd, HV_X86_RIP); | |
320 | ||
321 | hvf_get_xsave(cpu_state); | |
322 | env->xcr0 = rreg(cpu_state->hvf_fd, HV_X86_XCR0); | |
323 | ||
324 | hvf_get_segments(cpu_state); | |
325 | hvf_get_msrs(cpu_state); | |
326 | ||
327 | env->dr[0] = rreg(cpu_state->hvf_fd, HV_X86_DR0); | |
328 | env->dr[1] = rreg(cpu_state->hvf_fd, HV_X86_DR1); | |
329 | env->dr[2] = rreg(cpu_state->hvf_fd, HV_X86_DR2); | |
330 | env->dr[3] = rreg(cpu_state->hvf_fd, HV_X86_DR3); | |
331 | env->dr[4] = rreg(cpu_state->hvf_fd, HV_X86_DR4); | |
332 | env->dr[5] = rreg(cpu_state->hvf_fd, HV_X86_DR5); | |
333 | env->dr[6] = rreg(cpu_state->hvf_fd, HV_X86_DR6); | |
334 | env->dr[7] = rreg(cpu_state->hvf_fd, HV_X86_DR7); | |
335 | ||
336 | return 0; | |
337 | } | |
338 | ||
339 | static void vmx_set_int_window_exiting(CPUState *cpu) | |
340 | { | |
341 | uint64_t val; | |
342 | val = rvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS); | |
343 | wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, val | | |
344 | VMCS_PRI_PROC_BASED_CTLS_INT_WINDOW_EXITING); | |
345 | } | |
346 | ||
347 | void vmx_clear_int_window_exiting(CPUState *cpu) | |
348 | { | |
349 | uint64_t val; | |
350 | val = rvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS); | |
351 | wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, val & | |
352 | ~VMCS_PRI_PROC_BASED_CTLS_INT_WINDOW_EXITING); | |
353 | } | |
354 | ||
355 | #define NMI_VEC 2 | |
356 | ||
357 | bool hvf_inject_interrupts(CPUState *cpu_state) | |
358 | { | |
c97d6d2c SAGDR |
359 | X86CPU *x86cpu = X86_CPU(cpu_state); |
360 | CPUX86State *env = &x86cpu->env; | |
361 | ||
b7394c83 SAGDR |
362 | uint8_t vector; |
363 | uint64_t intr_type; | |
364 | bool have_event = true; | |
365 | if (env->interrupt_injected != -1) { | |
366 | vector = env->interrupt_injected; | |
367 | intr_type = VMCS_INTR_T_SWINTR; | |
368 | } else if (env->exception_injected != -1) { | |
369 | vector = env->exception_injected; | |
370 | if (vector == EXCP03_INT3 || vector == EXCP04_INTO) { | |
371 | intr_type = VMCS_INTR_T_SWEXCEPTION; | |
372 | } else { | |
373 | intr_type = VMCS_INTR_T_HWEXCEPTION; | |
374 | } | |
375 | } else if (env->nmi_injected) { | |
376 | vector = NMI_VEC; | |
377 | intr_type = VMCS_INTR_T_NMI; | |
378 | } else { | |
379 | have_event = false; | |
380 | } | |
381 | ||
c97d6d2c | 382 | uint64_t info = 0; |
b7394c83 SAGDR |
383 | if (have_event) { |
384 | info = vector | intr_type | VMCS_INTR_VALID; | |
c97d6d2c | 385 | uint64_t reason = rvmcs(cpu_state->hvf_fd, VMCS_EXIT_REASON); |
b7394c83 | 386 | if (env->nmi_injected && reason != EXIT_REASON_TASK_SWITCH) { |
c97d6d2c SAGDR |
387 | vmx_clear_nmi_blocking(cpu_state); |
388 | } | |
b7394c83 SAGDR |
389 | |
390 | if (!(env->hflags2 & HF2_NMI_MASK) || intr_type != VMCS_INTR_T_NMI) { | |
c97d6d2c SAGDR |
391 | info &= ~(1 << 12); /* clear undefined bit */ |
392 | if (intr_type == VMCS_INTR_T_SWINTR || | |
c97d6d2c | 393 | intr_type == VMCS_INTR_T_SWEXCEPTION) { |
b7394c83 | 394 | wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INST_LENGTH, env->ins_len); |
c97d6d2c SAGDR |
395 | } |
396 | ||
b7394c83 SAGDR |
397 | if (env->has_error_code) { |
398 | wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_EXCEPTION_ERROR, | |
399 | env->error_code); | |
c97d6d2c SAGDR |
400 | } |
401 | /*printf("reinject %lx err %d\n", info, err);*/ | |
402 | wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INTR_INFO, info); | |
403 | }; | |
404 | } | |
405 | ||
406 | if (cpu_state->interrupt_request & CPU_INTERRUPT_NMI) { | |
b7394c83 | 407 | if (!(env->hflags2 & HF2_NMI_MASK) && !(info & VMCS_INTR_VALID)) { |
c97d6d2c SAGDR |
408 | cpu_state->interrupt_request &= ~CPU_INTERRUPT_NMI; |
409 | info = VMCS_INTR_VALID | VMCS_INTR_T_NMI | NMI_VEC; | |
410 | wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INTR_INFO, info); | |
411 | } else { | |
412 | vmx_set_nmi_window_exiting(cpu_state); | |
413 | } | |
414 | } | |
415 | ||
b7394c83 | 416 | if (!(env->hflags & HF_INHIBIT_IRQ_MASK) && |
c97d6d2c SAGDR |
417 | (cpu_state->interrupt_request & CPU_INTERRUPT_HARD) && |
418 | (EFLAGS(env) & IF_MASK) && !(info & VMCS_INTR_VALID)) { | |
419 | int line = cpu_get_pic_interrupt(&x86cpu->env); | |
420 | cpu_state->interrupt_request &= ~CPU_INTERRUPT_HARD; | |
421 | if (line >= 0) { | |
422 | wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INTR_INFO, line | | |
423 | VMCS_INTR_VALID | VMCS_INTR_T_HWINTR); | |
424 | } | |
425 | } | |
426 | if (cpu_state->interrupt_request & CPU_INTERRUPT_HARD) { | |
427 | vmx_set_int_window_exiting(cpu_state); | |
428 | } | |
b7394c83 SAGDR |
429 | return (cpu_state->interrupt_request |
430 | & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)); | |
c97d6d2c SAGDR |
431 | } |
432 | ||
433 | int hvf_process_events(CPUState *cpu_state) | |
434 | { | |
435 | X86CPU *cpu = X86_CPU(cpu_state); | |
436 | CPUX86State *env = &cpu->env; | |
437 | ||
438 | EFLAGS(env) = rreg(cpu_state->hvf_fd, HV_X86_RFLAGS); | |
439 | ||
440 | if (cpu_state->interrupt_request & CPU_INTERRUPT_INIT) { | |
441 | hvf_cpu_synchronize_state(cpu_state); | |
442 | do_cpu_init(cpu); | |
443 | } | |
444 | ||
445 | if (cpu_state->interrupt_request & CPU_INTERRUPT_POLL) { | |
446 | cpu_state->interrupt_request &= ~CPU_INTERRUPT_POLL; | |
447 | apic_poll_irq(cpu->apic_state); | |
448 | } | |
449 | if (((cpu_state->interrupt_request & CPU_INTERRUPT_HARD) && | |
450 | (EFLAGS(env) & IF_MASK)) || | |
451 | (cpu_state->interrupt_request & CPU_INTERRUPT_NMI)) { | |
452 | cpu_state->halted = 0; | |
453 | } | |
454 | if (cpu_state->interrupt_request & CPU_INTERRUPT_SIPI) { | |
455 | hvf_cpu_synchronize_state(cpu_state); | |
456 | do_cpu_sipi(cpu); | |
457 | } | |
458 | if (cpu_state->interrupt_request & CPU_INTERRUPT_TPR) { | |
459 | cpu_state->interrupt_request &= ~CPU_INTERRUPT_TPR; | |
460 | hvf_cpu_synchronize_state(cpu_state); | |
461 | apic_handle_tpr_access_report(cpu->apic_state, env->eip, | |
462 | env->tpr_access_type); | |
463 | } | |
464 | return cpu_state->halted; | |
465 | } |