]>
git.proxmox.com Git - mirror_qemu.git/blob - target/i386/hvf/x86_emu.c
2 * Copyright (C) 2016 Veertu Inc,
3 * Copyright (C) 2017 Google Inc,
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this program; if not, see <http://www.gnu.org/licenses/>.
19 /////////////////////////////////////////////////////////////////////////
21 // Copyright (C) 2001-2012 The Bochs Project
23 // This library is free software; you can redistribute it and/or
24 // modify it under the terms of the GNU Lesser General Public
25 // License as published by the Free Software Foundation; either
26 // version 2 of the License, or (at your option) any later version.
28 // This library is distributed in the hope that it will be useful,
29 // but WITHOUT ANY WARRANTY; without even the implied warranty of
30 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
31 // Lesser General Public License for more details.
33 // You should have received a copy of the GNU Lesser General Public
34 // License along with this library; if not, write to the Free Software
35 // Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA
36 /////////////////////////////////////////////////////////////////////////
38 #include "qemu/osdep.h"
40 #include "qemu-common.h"
41 #include "x86_decode.h"
45 #include "x86_flags.h"
49 void hvf_handle_io(struct CPUState
*cpu
, uint16_t port
, void *data
,
50 int direction
, int size
, uint32_t count
);
52 #define EXEC_2OP_LOGIC_CMD(env, decode, cmd, FLAGS_FUNC, save_res) \
54 fetch_operands(env, decode, 2, true, true, false); \
55 switch (decode->operand_size) { \
58 uint8_t v1 = (uint8_t)decode->op[0].val; \
59 uint8_t v2 = (uint8_t)decode->op[1].val; \
60 uint8_t diff = v1 cmd v2; \
62 write_val_ext(env, decode->op[0].ptr, diff, 1); \
64 FLAGS_FUNC##_8(diff); \
69 uint16_t v1 = (uint16_t)decode->op[0].val; \
70 uint16_t v2 = (uint16_t)decode->op[1].val; \
71 uint16_t diff = v1 cmd v2; \
73 write_val_ext(env, decode->op[0].ptr, diff, 2); \
75 FLAGS_FUNC##_16(diff); \
80 uint32_t v1 = (uint32_t)decode->op[0].val; \
81 uint32_t v2 = (uint32_t)decode->op[1].val; \
82 uint32_t diff = v1 cmd v2; \
84 write_val_ext(env, decode->op[0].ptr, diff, 4); \
86 FLAGS_FUNC##_32(diff); \
90 VM_PANIC("bad size\n"); \
95 #define EXEC_2OP_ARITH_CMD(env, decode, cmd, FLAGS_FUNC, save_res) \
97 fetch_operands(env, decode, 2, true, true, false); \
98 switch (decode->operand_size) { \
101 uint8_t v1 = (uint8_t)decode->op[0].val; \
102 uint8_t v2 = (uint8_t)decode->op[1].val; \
103 uint8_t diff = v1 cmd v2; \
105 write_val_ext(env, decode->op[0].ptr, diff, 1); \
107 FLAGS_FUNC##_8(v1, v2, diff); \
112 uint16_t v1 = (uint16_t)decode->op[0].val; \
113 uint16_t v2 = (uint16_t)decode->op[1].val; \
114 uint16_t diff = v1 cmd v2; \
116 write_val_ext(env, decode->op[0].ptr, diff, 2); \
118 FLAGS_FUNC##_16(v1, v2, diff); \
123 uint32_t v1 = (uint32_t)decode->op[0].val; \
124 uint32_t v2 = (uint32_t)decode->op[1].val; \
125 uint32_t diff = v1 cmd v2; \
127 write_val_ext(env, decode->op[0].ptr, diff, 4); \
129 FLAGS_FUNC##_32(v1, v2, diff); \
133 VM_PANIC("bad size\n"); \
137 addr_t
read_reg(CPUX86State
*env
, int reg
, int size
)
141 return env
->hvf_emul
->regs
[reg
].lx
;
143 return env
->hvf_emul
->regs
[reg
].rx
;
145 return env
->hvf_emul
->regs
[reg
].erx
;
147 return env
->hvf_emul
->regs
[reg
].rrx
;
154 void write_reg(CPUX86State
*env
, int reg
, addr_t val
, int size
)
158 env
->hvf_emul
->regs
[reg
].lx
= val
;
161 env
->hvf_emul
->regs
[reg
].rx
= val
;
164 env
->hvf_emul
->regs
[reg
].rrx
= (uint32_t)val
;
167 env
->hvf_emul
->regs
[reg
].rrx
= val
;
174 addr_t
read_val_from_reg(addr_t reg_ptr
, int size
)
180 val
= *(uint8_t *)reg_ptr
;
183 val
= *(uint16_t *)reg_ptr
;
186 val
= *(uint32_t *)reg_ptr
;
189 val
= *(uint64_t *)reg_ptr
;
197 void write_val_to_reg(addr_t reg_ptr
, addr_t val
, int size
)
201 *(uint8_t *)reg_ptr
= val
;
204 *(uint16_t *)reg_ptr
= val
;
207 *(uint64_t *)reg_ptr
= (uint32_t)val
;
210 *(uint64_t *)reg_ptr
= val
;
217 static bool is_host_reg(struct CPUX86State
*env
, addr_t ptr
)
219 return (ptr
- (addr_t
)&env
->hvf_emul
->regs
[0]) < sizeof(env
->hvf_emul
->regs
);
222 void write_val_ext(struct CPUX86State
*env
, addr_t ptr
, addr_t val
, int size
)
224 if (is_host_reg(env
, ptr
)) {
225 write_val_to_reg(ptr
, val
, size
);
228 vmx_write_mem(ENV_GET_CPU(env
), ptr
, &val
, size
);
231 uint8_t *read_mmio(struct CPUX86State
*env
, addr_t ptr
, int bytes
)
233 vmx_read_mem(ENV_GET_CPU(env
), env
->hvf_emul
->mmio_buf
, ptr
, bytes
);
234 return env
->hvf_emul
->mmio_buf
;
238 addr_t
read_val_ext(struct CPUX86State
*env
, addr_t ptr
, int size
)
243 if (is_host_reg(env
, ptr
)) {
244 return read_val_from_reg(ptr
, size
);
247 mmio_ptr
= read_mmio(env
, ptr
, size
);
250 val
= *(uint8_t *)mmio_ptr
;
253 val
= *(uint16_t *)mmio_ptr
;
256 val
= *(uint32_t *)mmio_ptr
;
259 val
= *(uint64_t *)mmio_ptr
;
262 VM_PANIC("bad size\n");
268 static void fetch_operands(struct CPUX86State
*env
, struct x86_decode
*decode
,
269 int n
, bool val_op0
, bool val_op1
, bool val_op2
)
272 bool calc_val
[3] = {val_op0
, val_op1
, val_op2
};
274 for (i
= 0; i
< n
; i
++) {
275 switch (decode
->op
[i
].type
) {
276 case X86_VAR_IMMEDIATE
:
279 VM_PANIC_ON(!decode
->op
[i
].ptr
);
281 decode
->op
[i
].val
= read_val_from_reg(decode
->op
[i
].ptr
,
282 decode
->operand_size
);
286 calc_modrm_operand(env
, decode
, &decode
->op
[i
]);
288 decode
->op
[i
].val
= read_val_ext(env
, decode
->op
[i
].ptr
,
289 decode
->operand_size
);
293 decode
->op
[i
].ptr
= decode_linear_addr(env
, decode
,
297 decode
->op
[i
].val
= read_val_ext(env
, decode
->op
[i
].ptr
,
298 decode
->operand_size
);
307 static void exec_mov(struct CPUX86State
*env
, struct x86_decode
*decode
)
309 fetch_operands(env
, decode
, 2, false, true, false);
310 write_val_ext(env
, decode
->op
[0].ptr
, decode
->op
[1].val
,
311 decode
->operand_size
);
313 RIP(env
) += decode
->len
;
316 static void exec_add(struct CPUX86State
*env
, struct x86_decode
*decode
)
318 EXEC_2OP_ARITH_CMD(env
, decode
, +, SET_FLAGS_OSZAPC_ADD
, true);
319 RIP(env
) += decode
->len
;
322 static void exec_or(struct CPUX86State
*env
, struct x86_decode
*decode
)
324 EXEC_2OP_LOGIC_CMD(env
, decode
, |, SET_FLAGS_OSZAPC_LOGIC
, true);
325 RIP(env
) += decode
->len
;
328 static void exec_adc(struct CPUX86State
*env
, struct x86_decode
*decode
)
330 EXEC_2OP_ARITH_CMD(env
, decode
, +get_CF(env
)+, SET_FLAGS_OSZAPC_ADD
, true);
331 RIP(env
) += decode
->len
;
334 static void exec_sbb(struct CPUX86State
*env
, struct x86_decode
*decode
)
336 EXEC_2OP_ARITH_CMD(env
, decode
, -get_CF(env
)-, SET_FLAGS_OSZAPC_SUB
, true);
337 RIP(env
) += decode
->len
;
340 static void exec_and(struct CPUX86State
*env
, struct x86_decode
*decode
)
342 EXEC_2OP_LOGIC_CMD(env
, decode
, &, SET_FLAGS_OSZAPC_LOGIC
, true);
343 RIP(env
) += decode
->len
;
346 static void exec_sub(struct CPUX86State
*env
, struct x86_decode
*decode
)
348 EXEC_2OP_ARITH_CMD(env
, decode
, -, SET_FLAGS_OSZAPC_SUB
, true);
349 RIP(env
) += decode
->len
;
352 static void exec_xor(struct CPUX86State
*env
, struct x86_decode
*decode
)
354 EXEC_2OP_LOGIC_CMD(env
, decode
, ^, SET_FLAGS_OSZAPC_LOGIC
, true);
355 RIP(env
) += decode
->len
;
358 static void exec_neg(struct CPUX86State
*env
, struct x86_decode
*decode
)
360 /*EXEC_2OP_ARITH_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false);*/
362 fetch_operands(env
, decode
, 2, true, true, false);
364 val
= 0 - sign(decode
->op
[1].val
, decode
->operand_size
);
365 write_val_ext(env
, decode
->op
[1].ptr
, val
, decode
->operand_size
);
367 if (4 == decode
->operand_size
) {
368 SET_FLAGS_OSZAPC_SUB_32(0, 0 - val
, val
);
369 } else if (2 == decode
->operand_size
) {
370 SET_FLAGS_OSZAPC_SUB_16(0, 0 - val
, val
);
371 } else if (1 == decode
->operand_size
) {
372 SET_FLAGS_OSZAPC_SUB_8(0, 0 - val
, val
);
374 VM_PANIC("bad op size\n");
377 /*lflags_to_rflags(env);*/
378 RIP(env
) += decode
->len
;
381 static void exec_cmp(struct CPUX86State
*env
, struct x86_decode
*decode
)
383 EXEC_2OP_ARITH_CMD(env
, decode
, -, SET_FLAGS_OSZAPC_SUB
, false);
384 RIP(env
) += decode
->len
;
387 static void exec_inc(struct CPUX86State
*env
, struct x86_decode
*decode
)
389 decode
->op
[1].type
= X86_VAR_IMMEDIATE
;
390 decode
->op
[1].val
= 0;
392 EXEC_2OP_ARITH_CMD(env
, decode
, +1+, SET_FLAGS_OSZAP_ADD
, true);
394 RIP(env
) += decode
->len
;
397 static void exec_dec(struct CPUX86State
*env
, struct x86_decode
*decode
)
399 decode
->op
[1].type
= X86_VAR_IMMEDIATE
;
400 decode
->op
[1].val
= 0;
402 EXEC_2OP_ARITH_CMD(env
, decode
, -1-, SET_FLAGS_OSZAP_SUB
, true);
403 RIP(env
) += decode
->len
;
406 static void exec_tst(struct CPUX86State
*env
, struct x86_decode
*decode
)
408 EXEC_2OP_LOGIC_CMD(env
, decode
, &, SET_FLAGS_OSZAPC_LOGIC
, false);
409 RIP(env
) += decode
->len
;
412 static void exec_not(struct CPUX86State
*env
, struct x86_decode
*decode
)
414 fetch_operands(env
, decode
, 1, true, false, false);
416 write_val_ext(env
, decode
->op
[0].ptr
, ~decode
->op
[0].val
,
417 decode
->operand_size
);
418 RIP(env
) += decode
->len
;
421 void exec_movzx(struct CPUX86State
*env
, struct x86_decode
*decode
)
424 int op_size
= decode
->operand_size
;
426 fetch_operands(env
, decode
, 1, false, false, false);
428 if (0xb6 == decode
->opcode
[1]) {
433 decode
->operand_size
= src_op_size
;
434 calc_modrm_operand(env
, decode
, &decode
->op
[1]);
435 decode
->op
[1].val
= read_val_ext(env
, decode
->op
[1].ptr
, src_op_size
);
436 write_val_ext(env
, decode
->op
[0].ptr
, decode
->op
[1].val
, op_size
);
438 RIP(env
) += decode
->len
;
441 static void exec_out(struct CPUX86State
*env
, struct x86_decode
*decode
)
443 switch (decode
->opcode
[0]) {
445 hvf_handle_io(ENV_GET_CPU(env
), decode
->op
[0].val
, &AL(env
), 1, 1, 1);
448 hvf_handle_io(ENV_GET_CPU(env
), decode
->op
[0].val
, &RAX(env
), 1,
449 decode
->operand_size
, 1);
452 hvf_handle_io(ENV_GET_CPU(env
), DX(env
), &AL(env
), 1, 1, 1);
455 hvf_handle_io(ENV_GET_CPU(env
), DX(env
), &RAX(env
), 1, decode
->operand_size
, 1);
458 VM_PANIC("Bad out opcode\n");
461 RIP(env
) += decode
->len
;
464 static void exec_in(struct CPUX86State
*env
, struct x86_decode
*decode
)
467 switch (decode
->opcode
[0]) {
469 hvf_handle_io(ENV_GET_CPU(env
), decode
->op
[0].val
, &AL(env
), 0, 1, 1);
472 hvf_handle_io(ENV_GET_CPU(env
), decode
->op
[0].val
, &val
, 0, decode
->operand_size
, 1);
473 if (decode
->operand_size
== 2) {
476 RAX(env
) = (uint32_t)val
;
480 hvf_handle_io(ENV_GET_CPU(env
), DX(env
), &AL(env
), 0, 1, 1);
483 hvf_handle_io(ENV_GET_CPU(env
), DX(env
), &val
, 0, decode
->operand_size
, 1);
484 if (decode
->operand_size
== 2) {
487 RAX(env
) = (uint32_t)val
;
492 VM_PANIC("Bad in opcode\n");
496 RIP(env
) += decode
->len
;
499 static inline void string_increment_reg(struct CPUX86State
*env
, int reg
,
500 struct x86_decode
*decode
)
502 addr_t val
= read_reg(env
, reg
, decode
->addressing_size
);
503 if (env
->hvf_emul
->rflags
.df
) {
504 val
-= decode
->operand_size
;
506 val
+= decode
->operand_size
;
508 write_reg(env
, reg
, val
, decode
->addressing_size
);
511 static inline void string_rep(struct CPUX86State
*env
, struct x86_decode
*decode
,
512 void (*func
)(struct CPUX86State
*env
,
513 struct x86_decode
*ins
), int rep
)
515 addr_t rcx
= read_reg(env
, R_ECX
, decode
->addressing_size
);
518 write_reg(env
, R_ECX
, rcx
, decode
->addressing_size
);
519 if ((PREFIX_REP
== rep
) && !get_ZF(env
)) {
522 if ((PREFIX_REPN
== rep
) && get_ZF(env
)) {
528 static void exec_ins_single(struct CPUX86State
*env
, struct x86_decode
*decode
)
530 addr_t addr
= linear_addr_size(ENV_GET_CPU(env
), RDI(env
), decode
->addressing_size
,
533 hvf_handle_io(ENV_GET_CPU(env
), DX(env
), env
->hvf_emul
->mmio_buf
, 0,
534 decode
->operand_size
, 1);
535 vmx_write_mem(ENV_GET_CPU(env
), addr
, env
->hvf_emul
->mmio_buf
, decode
->operand_size
);
537 string_increment_reg(env
, R_EDI
, decode
);
540 static void exec_ins(struct CPUX86State
*env
, struct x86_decode
*decode
)
543 string_rep(env
, decode
, exec_ins_single
, 0);
545 exec_ins_single(env
, decode
);
548 RIP(env
) += decode
->len
;
551 static void exec_outs_single(struct CPUX86State
*env
, struct x86_decode
*decode
)
553 addr_t addr
= decode_linear_addr(env
, decode
, RSI(env
), R_DS
);
555 vmx_read_mem(ENV_GET_CPU(env
), env
->hvf_emul
->mmio_buf
, addr
, decode
->operand_size
);
556 hvf_handle_io(ENV_GET_CPU(env
), DX(env
), env
->hvf_emul
->mmio_buf
, 1,
557 decode
->operand_size
, 1);
559 string_increment_reg(env
, R_ESI
, decode
);
562 static void exec_outs(struct CPUX86State
*env
, struct x86_decode
*decode
)
565 string_rep(env
, decode
, exec_outs_single
, 0);
567 exec_outs_single(env
, decode
);
570 RIP(env
) += decode
->len
;
573 static void exec_movs_single(struct CPUX86State
*env
, struct x86_decode
*decode
)
579 src_addr
= decode_linear_addr(env
, decode
, RSI(env
), R_DS
);
580 dst_addr
= linear_addr_size(ENV_GET_CPU(env
), RDI(env
), decode
->addressing_size
,
583 val
= read_val_ext(env
, src_addr
, decode
->operand_size
);
584 write_val_ext(env
, dst_addr
, val
, decode
->operand_size
);
586 string_increment_reg(env
, R_ESI
, decode
);
587 string_increment_reg(env
, R_EDI
, decode
);
590 static void exec_movs(struct CPUX86State
*env
, struct x86_decode
*decode
)
593 string_rep(env
, decode
, exec_movs_single
, 0);
595 exec_movs_single(env
, decode
);
598 RIP(env
) += decode
->len
;
601 static void exec_cmps_single(struct CPUX86State
*env
, struct x86_decode
*decode
)
606 src_addr
= decode_linear_addr(env
, decode
, RSI(env
), R_DS
);
607 dst_addr
= linear_addr_size(ENV_GET_CPU(env
), RDI(env
), decode
->addressing_size
,
610 decode
->op
[0].type
= X86_VAR_IMMEDIATE
;
611 decode
->op
[0].val
= read_val_ext(env
, src_addr
, decode
->operand_size
);
612 decode
->op
[1].type
= X86_VAR_IMMEDIATE
;
613 decode
->op
[1].val
= read_val_ext(env
, dst_addr
, decode
->operand_size
);
615 EXEC_2OP_ARITH_CMD(env
, decode
, -, SET_FLAGS_OSZAPC_SUB
, false);
617 string_increment_reg(env
, R_ESI
, decode
);
618 string_increment_reg(env
, R_EDI
, decode
);
621 static void exec_cmps(struct CPUX86State
*env
, struct x86_decode
*decode
)
624 string_rep(env
, decode
, exec_cmps_single
, decode
->rep
);
626 exec_cmps_single(env
, decode
);
628 RIP(env
) += decode
->len
;
632 static void exec_stos_single(struct CPUX86State
*env
, struct x86_decode
*decode
)
637 addr
= linear_addr_size(ENV_GET_CPU(env
), RDI(env
), decode
->addressing_size
, R_ES
);
638 val
= read_reg(env
, R_EAX
, decode
->operand_size
);
639 vmx_write_mem(ENV_GET_CPU(env
), addr
, &val
, decode
->operand_size
);
641 string_increment_reg(env
, R_EDI
, decode
);
645 static void exec_stos(struct CPUX86State
*env
, struct x86_decode
*decode
)
648 string_rep(env
, decode
, exec_stos_single
, 0);
650 exec_stos_single(env
, decode
);
653 RIP(env
) += decode
->len
;
656 static void exec_scas_single(struct CPUX86State
*env
, struct x86_decode
*decode
)
660 addr
= linear_addr_size(ENV_GET_CPU(env
), RDI(env
), decode
->addressing_size
, R_ES
);
661 decode
->op
[1].type
= X86_VAR_IMMEDIATE
;
662 vmx_read_mem(ENV_GET_CPU(env
), &decode
->op
[1].val
, addr
, decode
->operand_size
);
664 EXEC_2OP_ARITH_CMD(env
, decode
, -, SET_FLAGS_OSZAPC_SUB
, false);
665 string_increment_reg(env
, R_EDI
, decode
);
668 static void exec_scas(struct CPUX86State
*env
, struct x86_decode
*decode
)
670 decode
->op
[0].type
= X86_VAR_REG
;
671 decode
->op
[0].reg
= R_EAX
;
673 string_rep(env
, decode
, exec_scas_single
, decode
->rep
);
675 exec_scas_single(env
, decode
);
678 RIP(env
) += decode
->len
;
681 static void exec_lods_single(struct CPUX86State
*env
, struct x86_decode
*decode
)
686 addr
= decode_linear_addr(env
, decode
, RSI(env
), R_DS
);
687 vmx_read_mem(ENV_GET_CPU(env
), &val
, addr
, decode
->operand_size
);
688 write_reg(env
, R_EAX
, val
, decode
->operand_size
);
690 string_increment_reg(env
, R_ESI
, decode
);
693 static void exec_lods(struct CPUX86State
*env
, struct x86_decode
*decode
)
696 string_rep(env
, decode
, exec_lods_single
, 0);
698 exec_lods_single(env
, decode
);
701 RIP(env
) += decode
->len
;
704 #define MSR_IA32_UCODE_REV 0x00000017
706 void simulate_rdmsr(struct CPUState
*cpu
)
708 X86CPU
*x86_cpu
= X86_CPU(cpu
);
709 CPUX86State
*env
= &x86_cpu
->env
;
710 uint32_t msr
= ECX(env
);
715 val
= rdtscp() + rvmcs(cpu
->hvf_fd
, VMCS_TSC_OFFSET
);
717 case MSR_IA32_APICBASE
:
718 val
= cpu_get_apic_base(X86_CPU(cpu
)->apic_state
);
720 case MSR_IA32_UCODE_REV
:
721 val
= (0x100000000ULL
<< 32) | 0x100000000ULL
;
724 val
= rvmcs(cpu
->hvf_fd
, VMCS_GUEST_IA32_EFER
);
727 val
= rvmcs(cpu
->hvf_fd
, VMCS_GUEST_FS_BASE
);
730 val
= rvmcs(cpu
->hvf_fd
, VMCS_GUEST_GS_BASE
);
732 case MSR_KERNELGSBASE
:
733 val
= rvmcs(cpu
->hvf_fd
, VMCS_HOST_FS_BASE
);
744 case MSR_IA32_MISC_ENABLE
:
745 val
= env
->msr_ia32_misc_enable
;
747 case MSR_MTRRphysBase(0):
748 case MSR_MTRRphysBase(1):
749 case MSR_MTRRphysBase(2):
750 case MSR_MTRRphysBase(3):
751 case MSR_MTRRphysBase(4):
752 case MSR_MTRRphysBase(5):
753 case MSR_MTRRphysBase(6):
754 case MSR_MTRRphysBase(7):
755 val
= env
->mtrr_var
[(ECX(env
) - MSR_MTRRphysBase(0)) / 2].base
;
757 case MSR_MTRRphysMask(0):
758 case MSR_MTRRphysMask(1):
759 case MSR_MTRRphysMask(2):
760 case MSR_MTRRphysMask(3):
761 case MSR_MTRRphysMask(4):
762 case MSR_MTRRphysMask(5):
763 case MSR_MTRRphysMask(6):
764 case MSR_MTRRphysMask(7):
765 val
= env
->mtrr_var
[(ECX(env
) - MSR_MTRRphysMask(0)) / 2].mask
;
767 case MSR_MTRRfix64K_00000
:
768 val
= env
->mtrr_fixed
[0];
770 case MSR_MTRRfix16K_80000
:
771 case MSR_MTRRfix16K_A0000
:
772 val
= env
->mtrr_fixed
[ECX(env
) - MSR_MTRRfix16K_80000
+ 1];
774 case MSR_MTRRfix4K_C0000
:
775 case MSR_MTRRfix4K_C8000
:
776 case MSR_MTRRfix4K_D0000
:
777 case MSR_MTRRfix4K_D8000
:
778 case MSR_MTRRfix4K_E0000
:
779 case MSR_MTRRfix4K_E8000
:
780 case MSR_MTRRfix4K_F0000
:
781 case MSR_MTRRfix4K_F8000
:
782 val
= env
->mtrr_fixed
[ECX(env
) - MSR_MTRRfix4K_C0000
+ 3];
784 case MSR_MTRRdefType
:
785 val
= env
->mtrr_deftype
;
788 /* fprintf(stderr, "%s: unknown msr 0x%x\n", __func__, msr); */
793 RAX(env
) = (uint32_t)val
;
794 RDX(env
) = (uint32_t)(val
>> 32);
797 static void exec_rdmsr(struct CPUX86State
*env
, struct x86_decode
*decode
)
799 simulate_rdmsr(ENV_GET_CPU(env
));
800 RIP(env
) += decode
->len
;
803 void simulate_wrmsr(struct CPUState
*cpu
)
805 X86CPU
*x86_cpu
= X86_CPU(cpu
);
806 CPUX86State
*env
= &x86_cpu
->env
;
807 uint32_t msr
= ECX(env
);
808 uint64_t data
= ((uint64_t)EDX(env
) << 32) | EAX(env
);
812 /* if (!osx_is_sierra())
813 wvmcs(cpu->hvf_fd, VMCS_TSC_OFFSET, data - rdtscp());
814 hv_vm_sync_tsc(data);*/
816 case MSR_IA32_APICBASE
:
817 cpu_set_apic_base(X86_CPU(cpu
)->apic_state
, data
);
820 wvmcs(cpu
->hvf_fd
, VMCS_GUEST_FS_BASE
, data
);
823 wvmcs(cpu
->hvf_fd
, VMCS_GUEST_GS_BASE
, data
);
825 case MSR_KERNELGSBASE
:
826 wvmcs(cpu
->hvf_fd
, VMCS_HOST_FS_BASE
, data
);
838 /*printf("new efer %llx\n", EFER(cpu));*/
839 wvmcs(cpu
->hvf_fd
, VMCS_GUEST_IA32_EFER
, data
);
840 if (data
& MSR_EFER_NXE
) {
841 hv_vcpu_invalidate_tlb(cpu
->hvf_fd
);
844 case MSR_MTRRphysBase(0):
845 case MSR_MTRRphysBase(1):
846 case MSR_MTRRphysBase(2):
847 case MSR_MTRRphysBase(3):
848 case MSR_MTRRphysBase(4):
849 case MSR_MTRRphysBase(5):
850 case MSR_MTRRphysBase(6):
851 case MSR_MTRRphysBase(7):
852 env
->mtrr_var
[(ECX(env
) - MSR_MTRRphysBase(0)) / 2].base
= data
;
854 case MSR_MTRRphysMask(0):
855 case MSR_MTRRphysMask(1):
856 case MSR_MTRRphysMask(2):
857 case MSR_MTRRphysMask(3):
858 case MSR_MTRRphysMask(4):
859 case MSR_MTRRphysMask(5):
860 case MSR_MTRRphysMask(6):
861 case MSR_MTRRphysMask(7):
862 env
->mtrr_var
[(ECX(env
) - MSR_MTRRphysMask(0)) / 2].mask
= data
;
864 case MSR_MTRRfix64K_00000
:
865 env
->mtrr_fixed
[ECX(env
) - MSR_MTRRfix64K_00000
] = data
;
867 case MSR_MTRRfix16K_80000
:
868 case MSR_MTRRfix16K_A0000
:
869 env
->mtrr_fixed
[ECX(env
) - MSR_MTRRfix16K_80000
+ 1] = data
;
871 case MSR_MTRRfix4K_C0000
:
872 case MSR_MTRRfix4K_C8000
:
873 case MSR_MTRRfix4K_D0000
:
874 case MSR_MTRRfix4K_D8000
:
875 case MSR_MTRRfix4K_E0000
:
876 case MSR_MTRRfix4K_E8000
:
877 case MSR_MTRRfix4K_F0000
:
878 case MSR_MTRRfix4K_F8000
:
879 env
->mtrr_fixed
[ECX(env
) - MSR_MTRRfix4K_C0000
+ 3] = data
;
881 case MSR_MTRRdefType
:
882 env
->mtrr_deftype
= data
;
888 /* Related to support known hypervisor interface */
889 /* if (g_hypervisor_iface)
890 g_hypervisor_iface->wrmsr_handler(cpu, msr, data);
892 printf("write msr %llx\n", RCX(cpu));*/
895 static void exec_wrmsr(struct CPUX86State
*env
, struct x86_decode
*decode
)
897 simulate_wrmsr(ENV_GET_CPU(env
));
898 RIP(env
) += decode
->len
;
903 * 0 - bt, 1 - btc, 2 - bts, 3 - btr
905 static void do_bt(struct CPUX86State
*env
, struct x86_decode
*decode
, int flag
)
907 int32_t displacement
;
910 int mask
= (4 == decode
->operand_size
) ? 0x1f : 0xf;
912 VM_PANIC_ON(decode
->rex
.rex
);
914 fetch_operands(env
, decode
, 2, false, true, false);
915 index
= decode
->op
[1].val
& mask
;
917 if (decode
->op
[0].type
!= X86_VAR_REG
) {
918 if (4 == decode
->operand_size
) {
919 displacement
= ((int32_t) (decode
->op
[1].val
& 0xffffffe0)) / 32;
920 decode
->op
[0].ptr
+= 4 * displacement
;
921 } else if (2 == decode
->operand_size
) {
922 displacement
= ((int16_t) (decode
->op
[1].val
& 0xfff0)) / 16;
923 decode
->op
[0].ptr
+= 2 * displacement
;
925 VM_PANIC("bt 64bit\n");
928 decode
->op
[0].val
= read_val_ext(env
, decode
->op
[0].ptr
,
929 decode
->operand_size
);
930 cf
= (decode
->op
[0].val
>> index
) & 0x01;
937 decode
->op
[0].val
^= (1u << index
);
940 decode
->op
[0].val
|= (1u << index
);
943 decode
->op
[0].val
&= ~(1u << index
);
946 write_val_ext(env
, decode
->op
[0].ptr
, decode
->op
[0].val
,
947 decode
->operand_size
);
951 static void exec_bt(struct CPUX86State
*env
, struct x86_decode
*decode
)
953 do_bt(env
, decode
, 0);
954 RIP(env
) += decode
->len
;
957 static void exec_btc(struct CPUX86State
*env
, struct x86_decode
*decode
)
959 do_bt(env
, decode
, 1);
960 RIP(env
) += decode
->len
;
963 static void exec_btr(struct CPUX86State
*env
, struct x86_decode
*decode
)
965 do_bt(env
, decode
, 3);
966 RIP(env
) += decode
->len
;
969 static void exec_bts(struct CPUX86State
*env
, struct x86_decode
*decode
)
971 do_bt(env
, decode
, 2);
972 RIP(env
) += decode
->len
;
975 void exec_shl(struct CPUX86State
*env
, struct x86_decode
*decode
)
980 fetch_operands(env
, decode
, 2, true, true, false);
982 count
= decode
->op
[1].val
;
983 count
&= 0x1f; /* count is masked to 5 bits*/
988 switch (decode
->operand_size
) {
993 res
= (decode
->op
[0].val
<< count
);
994 cf
= (decode
->op
[0].val
>> (8 - count
)) & 0x1;
995 of
= cf
^ (res
>> 7);
998 write_val_ext(env
, decode
->op
[0].ptr
, res
, 1);
999 SET_FLAGS_OSZAPC_LOGIC_8(res
);
1000 SET_FLAGS_OxxxxC(env
, of
, cf
);
1009 res
= (decode
->op
[0].val
<< count
);
1010 cf
= (decode
->op
[0].val
>> (16 - count
)) & 0x1;
1011 of
= cf
^ (res
>> 15); /* of = cf ^ result15 */
1014 write_val_ext(env
, decode
->op
[0].ptr
, res
, 2);
1015 SET_FLAGS_OSZAPC_LOGIC_16(res
);
1016 SET_FLAGS_OxxxxC(env
, of
, cf
);
1021 uint32_t res
= decode
->op
[0].val
<< count
;
1023 write_val_ext(env
, decode
->op
[0].ptr
, res
, 4);
1024 SET_FLAGS_OSZAPC_LOGIC_32(res
);
1025 cf
= (decode
->op
[0].val
>> (32 - count
)) & 0x1;
1026 of
= cf
^ (res
>> 31); /* of = cf ^ result31 */
1027 SET_FLAGS_OxxxxC(env
, of
, cf
);
1035 /* lflags_to_rflags(env); */
1036 RIP(env
) += decode
->len
;
1039 void exec_movsx(CPUX86State
*env
, struct x86_decode
*decode
)
1042 int op_size
= decode
->operand_size
;
1044 fetch_operands(env
, decode
, 2, false, false, false);
1046 if (0xbe == decode
->opcode
[1]) {
1052 decode
->operand_size
= src_op_size
;
1053 calc_modrm_operand(env
, decode
, &decode
->op
[1]);
1054 decode
->op
[1].val
= sign(read_val_ext(env
, decode
->op
[1].ptr
, src_op_size
),
1057 write_val_ext(env
, decode
->op
[0].ptr
, decode
->op
[1].val
, op_size
);
1059 RIP(env
) += decode
->len
;
1062 void exec_ror(struct CPUX86State
*env
, struct x86_decode
*decode
)
1066 fetch_operands(env
, decode
, 2, true, true, false);
1067 count
= decode
->op
[1].val
;
1069 switch (decode
->operand_size
) {
1072 uint32_t bit6
, bit7
;
1075 if ((count
& 0x07) == 0) {
1077 bit6
= ((uint8_t)decode
->op
[0].val
>> 6) & 1;
1078 bit7
= ((uint8_t)decode
->op
[0].val
>> 7) & 1;
1079 SET_FLAGS_OxxxxC(env
, bit6
^ bit7
, bit7
);
1082 count
&= 0x7; /* use only bottom 3 bits */
1083 res
= ((uint8_t)decode
->op
[0].val
>> count
) |
1084 ((uint8_t)decode
->op
[0].val
<< (8 - count
));
1085 write_val_ext(env
, decode
->op
[0].ptr
, res
, 1);
1086 bit6
= (res
>> 6) & 1;
1087 bit7
= (res
>> 7) & 1;
1088 /* set eflags: ROR count affects the following flags: C, O */
1089 SET_FLAGS_OxxxxC(env
, bit6
^ bit7
, bit7
);
1095 uint32_t bit14
, bit15
;
1098 if ((count
& 0x0f) == 0) {
1100 bit14
= ((uint16_t)decode
->op
[0].val
>> 14) & 1;
1101 bit15
= ((uint16_t)decode
->op
[0].val
>> 15) & 1;
1102 /* of = result14 ^ result15 */
1103 SET_FLAGS_OxxxxC(env
, bit14
^ bit15
, bit15
);
1106 count
&= 0x0f; /* use only 4 LSB's */
1107 res
= ((uint16_t)decode
->op
[0].val
>> count
) |
1108 ((uint16_t)decode
->op
[0].val
<< (16 - count
));
1109 write_val_ext(env
, decode
->op
[0].ptr
, res
, 2);
1111 bit14
= (res
>> 14) & 1;
1112 bit15
= (res
>> 15) & 1;
1113 /* of = result14 ^ result15 */
1114 SET_FLAGS_OxxxxC(env
, bit14
^ bit15
, bit15
);
1120 uint32_t bit31
, bit30
;
1125 res
= ((uint32_t)decode
->op
[0].val
>> count
) |
1126 ((uint32_t)decode
->op
[0].val
<< (32 - count
));
1127 write_val_ext(env
, decode
->op
[0].ptr
, res
, 4);
1129 bit31
= (res
>> 31) & 1;
1130 bit30
= (res
>> 30) & 1;
1131 /* of = result30 ^ result31 */
1132 SET_FLAGS_OxxxxC(env
, bit30
^ bit31
, bit31
);
1137 RIP(env
) += decode
->len
;
1140 void exec_rol(struct CPUX86State
*env
, struct x86_decode
*decode
)
1144 fetch_operands(env
, decode
, 2, true, true, false);
1145 count
= decode
->op
[1].val
;
1147 switch (decode
->operand_size
) {
1150 uint32_t bit0
, bit7
;
1153 if ((count
& 0x07) == 0) {
1155 bit0
= ((uint8_t)decode
->op
[0].val
& 1);
1156 bit7
= ((uint8_t)decode
->op
[0].val
>> 7);
1157 SET_FLAGS_OxxxxC(env
, bit0
^ bit7
, bit0
);
1160 count
&= 0x7; /* use only lowest 3 bits */
1161 res
= ((uint8_t)decode
->op
[0].val
<< count
) |
1162 ((uint8_t)decode
->op
[0].val
>> (8 - count
));
1164 write_val_ext(env
, decode
->op
[0].ptr
, res
, 1);
1166 * ROL count affects the following flags: C, O
1170 SET_FLAGS_OxxxxC(env
, bit0
^ bit7
, bit0
);
1176 uint32_t bit0
, bit15
;
1179 if ((count
& 0x0f) == 0) {
1181 bit0
= ((uint16_t)decode
->op
[0].val
& 0x1);
1182 bit15
= ((uint16_t)decode
->op
[0].val
>> 15);
1183 /* of = cf ^ result15 */
1184 SET_FLAGS_OxxxxC(env
, bit0
^ bit15
, bit0
);
1187 count
&= 0x0f; /* only use bottom 4 bits */
1188 res
= ((uint16_t)decode
->op
[0].val
<< count
) |
1189 ((uint16_t)decode
->op
[0].val
>> (16 - count
));
1191 write_val_ext(env
, decode
->op
[0].ptr
, res
, 2);
1193 bit15
= (res
>> 15);
1194 /* of = cf ^ result15 */
1195 SET_FLAGS_OxxxxC(env
, bit0
^ bit15
, bit0
);
1201 uint32_t bit0
, bit31
;
1206 res
= ((uint32_t)decode
->op
[0].val
<< count
) |
1207 ((uint32_t)decode
->op
[0].val
>> (32 - count
));
1209 write_val_ext(env
, decode
->op
[0].ptr
, res
, 4);
1211 bit31
= (res
>> 31);
1212 /* of = cf ^ result31 */
1213 SET_FLAGS_OxxxxC(env
, bit0
^ bit31
, bit0
);
1218 RIP(env
) += decode
->len
;
1222 void exec_rcl(struct CPUX86State
*env
, struct x86_decode
*decode
)
1227 fetch_operands(env
, decode
, 2, true, true, false);
1228 count
= decode
->op
[1].val
& 0x1f;
1230 switch (decode
->operand_size
) {
1233 uint8_t op1_8
= decode
->op
[0].val
;
1241 res
= (op1_8
<< 1) | get_CF(env
);
1243 res
= (op1_8
<< count
) | (get_CF(env
) << (count
- 1)) |
1244 (op1_8
>> (9 - count
));
1247 write_val_ext(env
, decode
->op
[0].ptr
, res
, 1);
1249 cf
= (op1_8
>> (8 - count
)) & 0x01;
1250 of
= cf
^ (res
>> 7); /* of = cf ^ result7 */
1251 SET_FLAGS_OxxxxC(env
, of
, cf
);
1257 uint16_t op1_16
= decode
->op
[0].val
;
1265 res
= (op1_16
<< 1) | get_CF(env
);
1266 } else if (count
== 16) {
1267 res
= (get_CF(env
) << 15) | (op1_16
>> 1);
1268 } else { /* 2..15 */
1269 res
= (op1_16
<< count
) | (get_CF(env
) << (count
- 1)) |
1270 (op1_16
>> (17 - count
));
1273 write_val_ext(env
, decode
->op
[0].ptr
, res
, 2);
1275 cf
= (op1_16
>> (16 - count
)) & 0x1;
1276 of
= cf
^ (res
>> 15); /* of = cf ^ result15 */
1277 SET_FLAGS_OxxxxC(env
, of
, cf
);
1283 uint32_t op1_32
= decode
->op
[0].val
;
1290 res
= (op1_32
<< 1) | get_CF(env
);
1292 res
= (op1_32
<< count
) | (get_CF(env
) << (count
- 1)) |
1293 (op1_32
>> (33 - count
));
1296 write_val_ext(env
, decode
->op
[0].ptr
, res
, 4);
1298 cf
= (op1_32
>> (32 - count
)) & 0x1;
1299 of
= cf
^ (res
>> 31); /* of = cf ^ result31 */
1300 SET_FLAGS_OxxxxC(env
, of
, cf
);
1304 RIP(env
) += decode
->len
;
1307 void exec_rcr(struct CPUX86State
*env
, struct x86_decode
*decode
)
1312 fetch_operands(env
, decode
, 2, true, true, false);
1313 count
= decode
->op
[1].val
& 0x1f;
1315 switch (decode
->operand_size
) {
1318 uint8_t op1_8
= decode
->op
[0].val
;
1325 res
= (op1_8
>> count
) | (get_CF(env
) << (8 - count
)) |
1326 (op1_8
<< (9 - count
));
1328 write_val_ext(env
, decode
->op
[0].ptr
, res
, 1);
1330 cf
= (op1_8
>> (count
- 1)) & 0x1;
1331 of
= (((res
<< 1) ^ res
) >> 7) & 0x1; /* of = result6 ^ result7 */
1332 SET_FLAGS_OxxxxC(env
, of
, cf
);
1337 uint16_t op1_16
= decode
->op
[0].val
;
1344 res
= (op1_16
>> count
) | (get_CF(env
) << (16 - count
)) |
1345 (op1_16
<< (17 - count
));
1347 write_val_ext(env
, decode
->op
[0].ptr
, res
, 2);
1349 cf
= (op1_16
>> (count
- 1)) & 0x1;
1350 of
= ((uint16_t)((res
<< 1) ^ res
) >> 15) & 0x1; /* of = result15 ^
1352 SET_FLAGS_OxxxxC(env
, of
, cf
);
1358 uint32_t op1_32
= decode
->op
[0].val
;
1365 res
= (op1_32
>> 1) | (get_CF(env
) << 31);
1367 res
= (op1_32
>> count
) | (get_CF(env
) << (32 - count
)) |
1368 (op1_32
<< (33 - count
));
1371 write_val_ext(env
, decode
->op
[0].ptr
, res
, 4);
1373 cf
= (op1_32
>> (count
- 1)) & 0x1;
1374 of
= ((res
<< 1) ^ res
) >> 31; /* of = result30 ^ result31 */
1375 SET_FLAGS_OxxxxC(env
, of
, cf
);
1379 RIP(env
) += decode
->len
;
1382 static void exec_xchg(struct CPUX86State
*env
, struct x86_decode
*decode
)
1384 fetch_operands(env
, decode
, 2, true, true, false);
1386 write_val_ext(env
, decode
->op
[0].ptr
, decode
->op
[1].val
,
1387 decode
->operand_size
);
1388 write_val_ext(env
, decode
->op
[1].ptr
, decode
->op
[0].val
,
1389 decode
->operand_size
);
1391 RIP(env
) += decode
->len
;
1394 static void exec_xadd(struct CPUX86State
*env
, struct x86_decode
*decode
)
1396 EXEC_2OP_ARITH_CMD(env
, decode
, +, SET_FLAGS_OSZAPC_ADD
, true);
1397 write_val_ext(env
, decode
->op
[1].ptr
, decode
->op
[0].val
,
1398 decode
->operand_size
);
1400 RIP(env
) += decode
->len
;
1403 static struct cmd_handler
{
1404 enum x86_decode_cmd cmd
;
1405 void (*handler
)(struct CPUX86State
*env
, struct x86_decode
*ins
);
1407 {X86_DECODE_CMD_INVL
, NULL
,},
1408 {X86_DECODE_CMD_MOV
, exec_mov
},
1409 {X86_DECODE_CMD_ADD
, exec_add
},
1410 {X86_DECODE_CMD_OR
, exec_or
},
1411 {X86_DECODE_CMD_ADC
, exec_adc
},
1412 {X86_DECODE_CMD_SBB
, exec_sbb
},
1413 {X86_DECODE_CMD_AND
, exec_and
},
1414 {X86_DECODE_CMD_SUB
, exec_sub
},
1415 {X86_DECODE_CMD_NEG
, exec_neg
},
1416 {X86_DECODE_CMD_XOR
, exec_xor
},
1417 {X86_DECODE_CMD_CMP
, exec_cmp
},
1418 {X86_DECODE_CMD_INC
, exec_inc
},
1419 {X86_DECODE_CMD_DEC
, exec_dec
},
1420 {X86_DECODE_CMD_TST
, exec_tst
},
1421 {X86_DECODE_CMD_NOT
, exec_not
},
1422 {X86_DECODE_CMD_MOVZX
, exec_movzx
},
1423 {X86_DECODE_CMD_OUT
, exec_out
},
1424 {X86_DECODE_CMD_IN
, exec_in
},
1425 {X86_DECODE_CMD_INS
, exec_ins
},
1426 {X86_DECODE_CMD_OUTS
, exec_outs
},
1427 {X86_DECODE_CMD_RDMSR
, exec_rdmsr
},
1428 {X86_DECODE_CMD_WRMSR
, exec_wrmsr
},
1429 {X86_DECODE_CMD_BT
, exec_bt
},
1430 {X86_DECODE_CMD_BTR
, exec_btr
},
1431 {X86_DECODE_CMD_BTC
, exec_btc
},
1432 {X86_DECODE_CMD_BTS
, exec_bts
},
1433 {X86_DECODE_CMD_SHL
, exec_shl
},
1434 {X86_DECODE_CMD_ROL
, exec_rol
},
1435 {X86_DECODE_CMD_ROR
, exec_ror
},
1436 {X86_DECODE_CMD_RCR
, exec_rcr
},
1437 {X86_DECODE_CMD_RCL
, exec_rcl
},
1438 /*{X86_DECODE_CMD_CPUID, exec_cpuid},*/
1439 {X86_DECODE_CMD_MOVS
, exec_movs
},
1440 {X86_DECODE_CMD_CMPS
, exec_cmps
},
1441 {X86_DECODE_CMD_STOS
, exec_stos
},
1442 {X86_DECODE_CMD_SCAS
, exec_scas
},
1443 {X86_DECODE_CMD_LODS
, exec_lods
},
1444 {X86_DECODE_CMD_MOVSX
, exec_movsx
},
1445 {X86_DECODE_CMD_XCHG
, exec_xchg
},
1446 {X86_DECODE_CMD_XADD
, exec_xadd
},
1449 static struct cmd_handler _cmd_handler
[X86_DECODE_CMD_LAST
];
1451 static void init_cmd_handler()
1454 for (i
= 0; i
< ARRAY_SIZE(handlers
); i
++) {
1455 _cmd_handler
[handlers
[i
].cmd
] = handlers
[i
];
1459 void load_regs(struct CPUState
*cpu
)
1461 X86CPU
*x86_cpu
= X86_CPU(cpu
);
1462 CPUX86State
*env
= &x86_cpu
->env
;
1465 RRX(env
, R_EAX
) = rreg(cpu
->hvf_fd
, HV_X86_RAX
);
1466 RRX(env
, R_EBX
) = rreg(cpu
->hvf_fd
, HV_X86_RBX
);
1467 RRX(env
, R_ECX
) = rreg(cpu
->hvf_fd
, HV_X86_RCX
);
1468 RRX(env
, R_EDX
) = rreg(cpu
->hvf_fd
, HV_X86_RDX
);
1469 RRX(env
, R_ESI
) = rreg(cpu
->hvf_fd
, HV_X86_RSI
);
1470 RRX(env
, R_EDI
) = rreg(cpu
->hvf_fd
, HV_X86_RDI
);
1471 RRX(env
, R_ESP
) = rreg(cpu
->hvf_fd
, HV_X86_RSP
);
1472 RRX(env
, R_EBP
) = rreg(cpu
->hvf_fd
, HV_X86_RBP
);
1473 for (i
= 8; i
< 16; i
++) {
1474 RRX(env
, i
) = rreg(cpu
->hvf_fd
, HV_X86_RAX
+ i
);
1477 RFLAGS(env
) = rreg(cpu
->hvf_fd
, HV_X86_RFLAGS
);
1478 rflags_to_lflags(env
);
1479 RIP(env
) = rreg(cpu
->hvf_fd
, HV_X86_RIP
);
1482 void store_regs(struct CPUState
*cpu
)
1484 X86CPU
*x86_cpu
= X86_CPU(cpu
);
1485 CPUX86State
*env
= &x86_cpu
->env
;
1488 wreg(cpu
->hvf_fd
, HV_X86_RAX
, RAX(env
));
1489 wreg(cpu
->hvf_fd
, HV_X86_RBX
, RBX(env
));
1490 wreg(cpu
->hvf_fd
, HV_X86_RCX
, RCX(env
));
1491 wreg(cpu
->hvf_fd
, HV_X86_RDX
, RDX(env
));
1492 wreg(cpu
->hvf_fd
, HV_X86_RSI
, RSI(env
));
1493 wreg(cpu
->hvf_fd
, HV_X86_RDI
, RDI(env
));
1494 wreg(cpu
->hvf_fd
, HV_X86_RBP
, RBP(env
));
1495 wreg(cpu
->hvf_fd
, HV_X86_RSP
, RSP(env
));
1496 for (i
= 8; i
< 16; i
++) {
1497 wreg(cpu
->hvf_fd
, HV_X86_RAX
+ i
, RRX(env
, i
));
1500 lflags_to_rflags(env
);
1501 wreg(cpu
->hvf_fd
, HV_X86_RFLAGS
, RFLAGS(env
));
1502 macvm_set_rip(cpu
, RIP(env
));
1505 bool exec_instruction(struct CPUX86State
*env
, struct x86_decode
*ins
)
1507 /*if (hvf_vcpu_id(cpu))
1508 printf("%d, %llx: exec_instruction %s\n", hvf_vcpu_id(cpu), RIP(cpu),
1509 decode_cmd_to_string(ins->cmd));*/
1511 if (!_cmd_handler
[ins
->cmd
].handler
) {
1512 printf("Unimplemented handler (%llx) for %d (%x %x) \n", RIP(env
),
1513 ins
->cmd
, ins
->opcode
[0],
1514 ins
->opcode_len
> 1 ? ins
->opcode
[1] : 0);
1515 RIP(env
) += ins
->len
;
1519 _cmd_handler
[ins
->cmd
].handler(env
, ins
);