4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
34 #define PREFIX_REPZ 0x01
35 #define PREFIX_REPNZ 0x02
36 #define PREFIX_LOCK 0x04
37 #define PREFIX_DATA 0x08
38 #define PREFIX_ADR 0x10
41 #define CODE64(s) ((s)->code64)
42 #define REX_X(s) ((s)->rex_x)
43 #define REX_B(s) ((s)->rex_b)
50 //#define MACRO_TEST 1
52 /* global register indexes */
53 static TCGv_ptr cpu_env
;
54 static TCGv cpu_A0
, cpu_cc_src
, cpu_cc_dst
, cpu_cc_tmp
;
55 static TCGv_i32 cpu_cc_op
;
56 static TCGv cpu_regs
[CPU_NB_REGS
];
58 static TCGv cpu_T
[2], cpu_T3
;
59 /* local register indexes (only used inside old micro ops) */
60 static TCGv cpu_tmp0
, cpu_tmp4
;
61 static TCGv_ptr cpu_ptr0
, cpu_ptr1
;
62 static TCGv_i32 cpu_tmp2_i32
, cpu_tmp3_i32
;
63 static TCGv_i64 cpu_tmp1_i64
;
66 static uint8_t gen_opc_cc_op
[OPC_BUF_SIZE
];
68 #include "gen-icount.h"
71 static int x86_64_hregs
;
74 typedef struct DisasContext
{
75 /* current insn context */
76 int override
; /* -1 if no override */
79 target_ulong pc
; /* pc = eip + cs_base */
80 int is_jmp
; /* 1 = means jump (stop translation), 2 means CPU
81 static state change (stop translation) */
82 /* current block context */
83 target_ulong cs_base
; /* base of CS segment */
84 int pe
; /* protected mode */
85 int code32
; /* 32 bit code segment */
87 int lma
; /* long mode active */
88 int code64
; /* 64 bit code segment */
91 int ss32
; /* 32 bit stack segment */
92 int cc_op
; /* current CC operation */
93 int addseg
; /* non zero if either DS/ES/SS have a non zero base */
94 int f_st
; /* currently unused */
95 int vm86
; /* vm86 mode */
98 int tf
; /* TF cpu flag */
99 int singlestep_enabled
; /* "hardware" single step enabled */
100 int jmp_opt
; /* use direct block chaining for direct jumps */
101 int mem_index
; /* select memory access functions */
102 uint64_t flags
; /* all execution flags */
103 struct TranslationBlock
*tb
;
104 int popl_esp_hack
; /* for correct popl with esp base handling */
105 int rip_offset
; /* only used in x86_64, but left for simplicity */
107 int cpuid_ext_features
;
108 int cpuid_ext2_features
;
109 int cpuid_ext3_features
;
112 static void gen_eob(DisasContext
*s
);
113 static void gen_jmp(DisasContext
*s
, target_ulong eip
);
114 static void gen_jmp_tb(DisasContext
*s
, target_ulong eip
, int tb_num
);
116 /* i386 arith/logic operations */
136 OP_SHL1
, /* undocumented */
160 /* I386 int registers */
161 OR_EAX
, /* MUST be even numbered */
170 OR_TMP0
= 16, /* temporary operand register */
172 OR_A0
, /* temporary register used when doing address evaluation */
175 static inline void gen_op_movl_T0_0(void)
177 tcg_gen_movi_tl(cpu_T
[0], 0);
180 static inline void gen_op_movl_T0_im(int32_t val
)
182 tcg_gen_movi_tl(cpu_T
[0], val
);
185 static inline void gen_op_movl_T0_imu(uint32_t val
)
187 tcg_gen_movi_tl(cpu_T
[0], val
);
190 static inline void gen_op_movl_T1_im(int32_t val
)
192 tcg_gen_movi_tl(cpu_T
[1], val
);
195 static inline void gen_op_movl_T1_imu(uint32_t val
)
197 tcg_gen_movi_tl(cpu_T
[1], val
);
200 static inline void gen_op_movl_A0_im(uint32_t val
)
202 tcg_gen_movi_tl(cpu_A0
, val
);
206 static inline void gen_op_movq_A0_im(int64_t val
)
208 tcg_gen_movi_tl(cpu_A0
, val
);
212 static inline void gen_movtl_T0_im(target_ulong val
)
214 tcg_gen_movi_tl(cpu_T
[0], val
);
217 static inline void gen_movtl_T1_im(target_ulong val
)
219 tcg_gen_movi_tl(cpu_T
[1], val
);
222 static inline void gen_op_andl_T0_ffff(void)
224 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 0xffff);
227 static inline void gen_op_andl_T0_im(uint32_t val
)
229 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], val
);
232 static inline void gen_op_movl_T0_T1(void)
234 tcg_gen_mov_tl(cpu_T
[0], cpu_T
[1]);
237 static inline void gen_op_andl_A0_ffff(void)
239 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffff);
244 #define NB_OP_SIZES 4
246 #else /* !TARGET_X86_64 */
248 #define NB_OP_SIZES 3
250 #endif /* !TARGET_X86_64 */
252 #if defined(HOST_WORDS_BIGENDIAN)
253 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
254 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
255 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
256 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
257 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
259 #define REG_B_OFFSET 0
260 #define REG_H_OFFSET 1
261 #define REG_W_OFFSET 0
262 #define REG_L_OFFSET 0
263 #define REG_LH_OFFSET 4
266 /* In instruction encodings for byte register accesses the
267 * register number usually indicates "low 8 bits of register N";
268 * however there are some special cases where N 4..7 indicates
269 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
270 * true for this special case, false otherwise.
272 static inline bool byte_reg_is_xH(int reg
)
278 if (reg
>= 8 || x86_64_hregs
) {
285 static inline void gen_op_mov_reg_v(int ot
, int reg
, TCGv t0
)
289 if (!byte_reg_is_xH(reg
)) {
290 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], t0
, 0, 8);
292 tcg_gen_deposit_tl(cpu_regs
[reg
- 4], cpu_regs
[reg
- 4], t0
, 8, 8);
296 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], t0
, 0, 16);
298 default: /* XXX this shouldn't be reached; abort? */
300 /* For x86_64, this sets the higher half of register to zero.
301 For i386, this is equivalent to a mov. */
302 tcg_gen_ext32u_tl(cpu_regs
[reg
], t0
);
306 tcg_gen_mov_tl(cpu_regs
[reg
], t0
);
312 static inline void gen_op_mov_reg_T0(int ot
, int reg
)
314 gen_op_mov_reg_v(ot
, reg
, cpu_T
[0]);
317 static inline void gen_op_mov_reg_T1(int ot
, int reg
)
319 gen_op_mov_reg_v(ot
, reg
, cpu_T
[1]);
322 static inline void gen_op_mov_reg_A0(int size
, int reg
)
326 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], cpu_A0
, 0, 16);
328 default: /* XXX this shouldn't be reached; abort? */
330 /* For x86_64, this sets the higher half of register to zero.
331 For i386, this is equivalent to a mov. */
332 tcg_gen_ext32u_tl(cpu_regs
[reg
], cpu_A0
);
336 tcg_gen_mov_tl(cpu_regs
[reg
], cpu_A0
);
342 static inline void gen_op_mov_v_reg(int ot
, TCGv t0
, int reg
)
344 if (ot
== OT_BYTE
&& byte_reg_is_xH(reg
)) {
345 tcg_gen_shri_tl(t0
, cpu_regs
[reg
- 4], 8);
346 tcg_gen_ext8u_tl(t0
, t0
);
348 tcg_gen_mov_tl(t0
, cpu_regs
[reg
]);
352 static inline void gen_op_mov_TN_reg(int ot
, int t_index
, int reg
)
354 gen_op_mov_v_reg(ot
, cpu_T
[t_index
], reg
);
357 static inline void gen_op_movl_A0_reg(int reg
)
359 tcg_gen_mov_tl(cpu_A0
, cpu_regs
[reg
]);
362 static inline void gen_op_addl_A0_im(int32_t val
)
364 tcg_gen_addi_tl(cpu_A0
, cpu_A0
, val
);
366 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffffffff);
371 static inline void gen_op_addq_A0_im(int64_t val
)
373 tcg_gen_addi_tl(cpu_A0
, cpu_A0
, val
);
377 static void gen_add_A0_im(DisasContext
*s
, int val
)
381 gen_op_addq_A0_im(val
);
384 gen_op_addl_A0_im(val
);
387 static inline void gen_op_addl_T0_T1(void)
389 tcg_gen_add_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
392 static inline void gen_op_jmp_T0(void)
394 tcg_gen_st_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, eip
));
397 static inline void gen_op_add_reg_im(int size
, int reg
, int32_t val
)
401 tcg_gen_addi_tl(cpu_tmp0
, cpu_regs
[reg
], val
);
402 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], cpu_tmp0
, 0, 16);
405 tcg_gen_addi_tl(cpu_tmp0
, cpu_regs
[reg
], val
);
406 /* For x86_64, this sets the higher half of register to zero.
407 For i386, this is equivalent to a nop. */
408 tcg_gen_ext32u_tl(cpu_tmp0
, cpu_tmp0
);
409 tcg_gen_mov_tl(cpu_regs
[reg
], cpu_tmp0
);
413 tcg_gen_addi_tl(cpu_regs
[reg
], cpu_regs
[reg
], val
);
419 static inline void gen_op_add_reg_T0(int size
, int reg
)
423 tcg_gen_add_tl(cpu_tmp0
, cpu_regs
[reg
], cpu_T
[0]);
424 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], cpu_tmp0
, 0, 16);
427 tcg_gen_add_tl(cpu_tmp0
, cpu_regs
[reg
], cpu_T
[0]);
428 /* For x86_64, this sets the higher half of register to zero.
429 For i386, this is equivalent to a nop. */
430 tcg_gen_ext32u_tl(cpu_tmp0
, cpu_tmp0
);
431 tcg_gen_mov_tl(cpu_regs
[reg
], cpu_tmp0
);
435 tcg_gen_add_tl(cpu_regs
[reg
], cpu_regs
[reg
], cpu_T
[0]);
441 static inline void gen_op_set_cc_op(int32_t val
)
443 tcg_gen_movi_i32(cpu_cc_op
, val
);
446 static inline void gen_op_addl_A0_reg_sN(int shift
, int reg
)
448 tcg_gen_mov_tl(cpu_tmp0
, cpu_regs
[reg
]);
450 tcg_gen_shli_tl(cpu_tmp0
, cpu_tmp0
, shift
);
451 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
452 /* For x86_64, this sets the higher half of register to zero.
453 For i386, this is equivalent to a nop. */
454 tcg_gen_ext32u_tl(cpu_A0
, cpu_A0
);
457 static inline void gen_op_movl_A0_seg(int reg
)
459 tcg_gen_ld32u_tl(cpu_A0
, cpu_env
, offsetof(CPUX86State
, segs
[reg
].base
) + REG_L_OFFSET
);
462 static inline void gen_op_addl_A0_seg(DisasContext
*s
, int reg
)
464 tcg_gen_ld_tl(cpu_tmp0
, cpu_env
, offsetof(CPUX86State
, segs
[reg
].base
));
467 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffffffff);
468 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
470 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
471 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffffffff);
474 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
479 static inline void gen_op_movq_A0_seg(int reg
)
481 tcg_gen_ld_tl(cpu_A0
, cpu_env
, offsetof(CPUX86State
, segs
[reg
].base
));
484 static inline void gen_op_addq_A0_seg(int reg
)
486 tcg_gen_ld_tl(cpu_tmp0
, cpu_env
, offsetof(CPUX86State
, segs
[reg
].base
));
487 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
490 static inline void gen_op_movq_A0_reg(int reg
)
492 tcg_gen_mov_tl(cpu_A0
, cpu_regs
[reg
]);
495 static inline void gen_op_addq_A0_reg_sN(int shift
, int reg
)
497 tcg_gen_mov_tl(cpu_tmp0
, cpu_regs
[reg
]);
499 tcg_gen_shli_tl(cpu_tmp0
, cpu_tmp0
, shift
);
500 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
504 static inline void gen_op_lds_T0_A0(int idx
)
506 int mem_index
= (idx
>> 2) - 1;
509 tcg_gen_qemu_ld8s(cpu_T
[0], cpu_A0
, mem_index
);
512 tcg_gen_qemu_ld16s(cpu_T
[0], cpu_A0
, mem_index
);
516 tcg_gen_qemu_ld32s(cpu_T
[0], cpu_A0
, mem_index
);
521 static inline void gen_op_ld_v(int idx
, TCGv t0
, TCGv a0
)
523 int mem_index
= (idx
>> 2) - 1;
526 tcg_gen_qemu_ld8u(t0
, a0
, mem_index
);
529 tcg_gen_qemu_ld16u(t0
, a0
, mem_index
);
532 tcg_gen_qemu_ld32u(t0
, a0
, mem_index
);
536 /* Should never happen on 32-bit targets. */
538 tcg_gen_qemu_ld64(t0
, a0
, mem_index
);
544 /* XXX: always use ldu or lds */
545 static inline void gen_op_ld_T0_A0(int idx
)
547 gen_op_ld_v(idx
, cpu_T
[0], cpu_A0
);
550 static inline void gen_op_ldu_T0_A0(int idx
)
552 gen_op_ld_v(idx
, cpu_T
[0], cpu_A0
);
555 static inline void gen_op_ld_T1_A0(int idx
)
557 gen_op_ld_v(idx
, cpu_T
[1], cpu_A0
);
560 static inline void gen_op_st_v(int idx
, TCGv t0
, TCGv a0
)
562 int mem_index
= (idx
>> 2) - 1;
565 tcg_gen_qemu_st8(t0
, a0
, mem_index
);
568 tcg_gen_qemu_st16(t0
, a0
, mem_index
);
571 tcg_gen_qemu_st32(t0
, a0
, mem_index
);
575 /* Should never happen on 32-bit targets. */
577 tcg_gen_qemu_st64(t0
, a0
, mem_index
);
583 static inline void gen_op_st_T0_A0(int idx
)
585 gen_op_st_v(idx
, cpu_T
[0], cpu_A0
);
588 static inline void gen_op_st_T1_A0(int idx
)
590 gen_op_st_v(idx
, cpu_T
[1], cpu_A0
);
593 static inline void gen_jmp_im(target_ulong pc
)
595 tcg_gen_movi_tl(cpu_tmp0
, pc
);
596 tcg_gen_st_tl(cpu_tmp0
, cpu_env
, offsetof(CPUX86State
, eip
));
599 static inline void gen_string_movl_A0_ESI(DisasContext
*s
)
603 override
= s
->override
;
607 gen_op_movq_A0_seg(override
);
608 gen_op_addq_A0_reg_sN(0, R_ESI
);
610 gen_op_movq_A0_reg(R_ESI
);
616 if (s
->addseg
&& override
< 0)
619 gen_op_movl_A0_seg(override
);
620 gen_op_addl_A0_reg_sN(0, R_ESI
);
622 gen_op_movl_A0_reg(R_ESI
);
625 /* 16 address, always override */
628 gen_op_movl_A0_reg(R_ESI
);
629 gen_op_andl_A0_ffff();
630 gen_op_addl_A0_seg(s
, override
);
634 static inline void gen_string_movl_A0_EDI(DisasContext
*s
)
638 gen_op_movq_A0_reg(R_EDI
);
643 gen_op_movl_A0_seg(R_ES
);
644 gen_op_addl_A0_reg_sN(0, R_EDI
);
646 gen_op_movl_A0_reg(R_EDI
);
649 gen_op_movl_A0_reg(R_EDI
);
650 gen_op_andl_A0_ffff();
651 gen_op_addl_A0_seg(s
, R_ES
);
655 static inline void gen_op_movl_T0_Dshift(int ot
)
657 tcg_gen_ld32s_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, df
));
658 tcg_gen_shli_tl(cpu_T
[0], cpu_T
[0], ot
);
661 static void gen_extu(int ot
, TCGv reg
)
665 tcg_gen_ext8u_tl(reg
, reg
);
668 tcg_gen_ext16u_tl(reg
, reg
);
671 tcg_gen_ext32u_tl(reg
, reg
);
678 static void gen_exts(int ot
, TCGv reg
)
682 tcg_gen_ext8s_tl(reg
, reg
);
685 tcg_gen_ext16s_tl(reg
, reg
);
688 tcg_gen_ext32s_tl(reg
, reg
);
695 static inline void gen_op_jnz_ecx(int size
, int label1
)
697 tcg_gen_mov_tl(cpu_tmp0
, cpu_regs
[R_ECX
]);
698 gen_extu(size
+ 1, cpu_tmp0
);
699 tcg_gen_brcondi_tl(TCG_COND_NE
, cpu_tmp0
, 0, label1
);
702 static inline void gen_op_jz_ecx(int size
, int label1
)
704 tcg_gen_mov_tl(cpu_tmp0
, cpu_regs
[R_ECX
]);
705 gen_extu(size
+ 1, cpu_tmp0
);
706 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_tmp0
, 0, label1
);
709 static void gen_helper_in_func(int ot
, TCGv v
, TCGv_i32 n
)
712 case 0: gen_helper_inb(v
, n
); break;
713 case 1: gen_helper_inw(v
, n
); break;
714 case 2: gen_helper_inl(v
, n
); break;
719 static void gen_helper_out_func(int ot
, TCGv_i32 v
, TCGv_i32 n
)
722 case 0: gen_helper_outb(v
, n
); break;
723 case 1: gen_helper_outw(v
, n
); break;
724 case 2: gen_helper_outl(v
, n
); break;
729 static void gen_check_io(DisasContext
*s
, int ot
, target_ulong cur_eip
,
733 target_ulong next_eip
;
736 if (s
->pe
&& (s
->cpl
> s
->iopl
|| s
->vm86
)) {
737 if (s
->cc_op
!= CC_OP_DYNAMIC
)
738 gen_op_set_cc_op(s
->cc_op
);
741 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
743 case 0: gen_helper_check_iob(cpu_tmp2_i32
); break;
744 case 1: gen_helper_check_iow(cpu_tmp2_i32
); break;
745 case 2: gen_helper_check_iol(cpu_tmp2_i32
); break;
748 if(s
->flags
& HF_SVMI_MASK
) {
750 if (s
->cc_op
!= CC_OP_DYNAMIC
)
751 gen_op_set_cc_op(s
->cc_op
);
754 svm_flags
|= (1 << (4 + ot
));
755 next_eip
= s
->pc
- s
->cs_base
;
756 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
757 gen_helper_svm_check_io(cpu_env
, cpu_tmp2_i32
,
758 tcg_const_i32(svm_flags
),
759 tcg_const_i32(next_eip
- cur_eip
));
763 static inline void gen_movs(DisasContext
*s
, int ot
)
765 gen_string_movl_A0_ESI(s
);
766 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
767 gen_string_movl_A0_EDI(s
);
768 gen_op_st_T0_A0(ot
+ s
->mem_index
);
769 gen_op_movl_T0_Dshift(ot
);
770 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
771 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
774 static inline void gen_update_cc_op(DisasContext
*s
)
776 if (s
->cc_op
!= CC_OP_DYNAMIC
) {
777 gen_op_set_cc_op(s
->cc_op
);
778 s
->cc_op
= CC_OP_DYNAMIC
;
782 static void gen_op_update1_cc(void)
784 tcg_gen_discard_tl(cpu_cc_src
);
785 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
788 static void gen_op_update2_cc(void)
790 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[1]);
791 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
794 static inline void gen_op_cmpl_T0_T1_cc(void)
796 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[1]);
797 tcg_gen_sub_tl(cpu_cc_dst
, cpu_T
[0], cpu_T
[1]);
800 static inline void gen_op_testl_T0_T1_cc(void)
802 tcg_gen_discard_tl(cpu_cc_src
);
803 tcg_gen_and_tl(cpu_cc_dst
, cpu_T
[0], cpu_T
[1]);
806 static void gen_op_update_neg_cc(void)
808 tcg_gen_neg_tl(cpu_cc_src
, cpu_T
[0]);
809 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
812 /* compute eflags.C to reg */
813 static void gen_compute_eflags_c(TCGv reg
)
815 gen_helper_cc_compute_c(cpu_tmp2_i32
, cpu_env
, cpu_cc_op
);
816 tcg_gen_extu_i32_tl(reg
, cpu_tmp2_i32
);
819 /* compute all eflags to cc_src */
820 static void gen_compute_eflags(TCGv reg
)
822 gen_helper_cc_compute_all(cpu_tmp2_i32
, cpu_env
, cpu_cc_op
);
823 tcg_gen_extu_i32_tl(reg
, cpu_tmp2_i32
);
826 static inline void gen_setcc_slow_T0(DisasContext
*s
, int jcc_op
)
828 if (s
->cc_op
!= CC_OP_DYNAMIC
)
829 gen_op_set_cc_op(s
->cc_op
);
832 gen_compute_eflags(cpu_T
[0]);
833 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 11);
834 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 1);
837 gen_compute_eflags_c(cpu_T
[0]);
840 gen_compute_eflags(cpu_T
[0]);
841 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 6);
842 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 1);
845 gen_compute_eflags(cpu_tmp0
);
846 tcg_gen_shri_tl(cpu_T
[0], cpu_tmp0
, 6);
847 tcg_gen_or_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
848 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 1);
851 gen_compute_eflags(cpu_T
[0]);
852 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 7);
853 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 1);
856 gen_compute_eflags(cpu_T
[0]);
857 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 2);
858 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 1);
861 gen_compute_eflags(cpu_tmp0
);
862 tcg_gen_shri_tl(cpu_T
[0], cpu_tmp0
, 11); /* CC_O */
863 tcg_gen_shri_tl(cpu_tmp0
, cpu_tmp0
, 7); /* CC_S */
864 tcg_gen_xor_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
865 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 1);
869 gen_compute_eflags(cpu_tmp0
);
870 tcg_gen_shri_tl(cpu_T
[0], cpu_tmp0
, 11); /* CC_O */
871 tcg_gen_shri_tl(cpu_tmp4
, cpu_tmp0
, 7); /* CC_S */
872 tcg_gen_shri_tl(cpu_tmp0
, cpu_tmp0
, 6); /* CC_Z */
873 tcg_gen_xor_tl(cpu_T
[0], cpu_T
[0], cpu_tmp4
);
874 tcg_gen_or_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
875 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 1);
880 /* return true if setcc_slow is not needed (WARNING: must be kept in
881 sync with gen_jcc1) */
882 static int is_fast_jcc_case(DisasContext
*s
, int b
)
885 jcc_op
= (b
>> 1) & 7;
887 /* we optimize the cmp/jcc case */
892 if (jcc_op
== JCC_O
|| jcc_op
== JCC_P
)
896 /* some jumps are easy to compute */
921 if (jcc_op
!= JCC_Z
&& jcc_op
!= JCC_S
)
931 /* generate a conditional jump to label 'l1' according to jump opcode
932 value 'b'. In the fast case, T0 is guaranted not to be used. */
933 static inline void gen_jcc1(DisasContext
*s
, int cc_op
, int b
, int l1
)
935 int inv
, jcc_op
, size
, cond
;
939 jcc_op
= (b
>> 1) & 7;
942 /* we optimize the cmp/jcc case */
948 size
= cc_op
- CC_OP_SUBB
;
954 tcg_gen_andi_tl(cpu_tmp0
, cpu_cc_dst
, 0xff);
958 tcg_gen_andi_tl(cpu_tmp0
, cpu_cc_dst
, 0xffff);
963 tcg_gen_andi_tl(cpu_tmp0
, cpu_cc_dst
, 0xffffffff);
971 tcg_gen_brcondi_tl(inv
? TCG_COND_NE
: TCG_COND_EQ
, t0
, 0, l1
);
977 tcg_gen_andi_tl(cpu_tmp0
, cpu_cc_dst
, 0x80);
978 tcg_gen_brcondi_tl(inv
? TCG_COND_EQ
: TCG_COND_NE
, cpu_tmp0
,
982 tcg_gen_andi_tl(cpu_tmp0
, cpu_cc_dst
, 0x8000);
983 tcg_gen_brcondi_tl(inv
? TCG_COND_EQ
: TCG_COND_NE
, cpu_tmp0
,
988 tcg_gen_andi_tl(cpu_tmp0
, cpu_cc_dst
, 0x80000000);
989 tcg_gen_brcondi_tl(inv
? TCG_COND_EQ
: TCG_COND_NE
, cpu_tmp0
,
994 tcg_gen_brcondi_tl(inv
? TCG_COND_GE
: TCG_COND_LT
, cpu_cc_dst
,
1001 cond
= inv
? TCG_COND_GEU
: TCG_COND_LTU
;
1004 cond
= inv
? TCG_COND_GTU
: TCG_COND_LEU
;
1006 tcg_gen_add_tl(cpu_tmp4
, cpu_cc_dst
, cpu_cc_src
);
1010 tcg_gen_andi_tl(cpu_tmp4
, cpu_tmp4
, 0xff);
1011 tcg_gen_andi_tl(t0
, cpu_cc_src
, 0xff);
1015 tcg_gen_andi_tl(cpu_tmp4
, cpu_tmp4
, 0xffff);
1016 tcg_gen_andi_tl(t0
, cpu_cc_src
, 0xffff);
1018 #ifdef TARGET_X86_64
1021 tcg_gen_andi_tl(cpu_tmp4
, cpu_tmp4
, 0xffffffff);
1022 tcg_gen_andi_tl(t0
, cpu_cc_src
, 0xffffffff);
1029 tcg_gen_brcond_tl(cond
, cpu_tmp4
, t0
, l1
);
1033 cond
= inv
? TCG_COND_GE
: TCG_COND_LT
;
1036 cond
= inv
? TCG_COND_GT
: TCG_COND_LE
;
1038 tcg_gen_add_tl(cpu_tmp4
, cpu_cc_dst
, cpu_cc_src
);
1042 tcg_gen_ext8s_tl(cpu_tmp4
, cpu_tmp4
);
1043 tcg_gen_ext8s_tl(t0
, cpu_cc_src
);
1047 tcg_gen_ext16s_tl(cpu_tmp4
, cpu_tmp4
);
1048 tcg_gen_ext16s_tl(t0
, cpu_cc_src
);
1050 #ifdef TARGET_X86_64
1053 tcg_gen_ext32s_tl(cpu_tmp4
, cpu_tmp4
);
1054 tcg_gen_ext32s_tl(t0
, cpu_cc_src
);
1061 tcg_gen_brcond_tl(cond
, cpu_tmp4
, t0
, l1
);
1069 /* some jumps are easy to compute */
1111 size
= (cc_op
- CC_OP_ADDB
) & 3;
1114 size
= (cc_op
- CC_OP_ADDB
) & 3;
1122 gen_setcc_slow_T0(s
, jcc_op
);
1123 tcg_gen_brcondi_tl(inv
? TCG_COND_EQ
: TCG_COND_NE
,
1129 /* XXX: does not work with gdbstub "ice" single step - not a
1131 static int gen_jz_ecx_string(DisasContext
*s
, target_ulong next_eip
)
1135 l1
= gen_new_label();
1136 l2
= gen_new_label();
1137 gen_op_jnz_ecx(s
->aflag
, l1
);
1139 gen_jmp_tb(s
, next_eip
, 1);
1144 static inline void gen_stos(DisasContext
*s
, int ot
)
1146 gen_op_mov_TN_reg(OT_LONG
, 0, R_EAX
);
1147 gen_string_movl_A0_EDI(s
);
1148 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1149 gen_op_movl_T0_Dshift(ot
);
1150 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1153 static inline void gen_lods(DisasContext
*s
, int ot
)
1155 gen_string_movl_A0_ESI(s
);
1156 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1157 gen_op_mov_reg_T0(ot
, R_EAX
);
1158 gen_op_movl_T0_Dshift(ot
);
1159 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
1162 static inline void gen_scas(DisasContext
*s
, int ot
)
1164 gen_op_mov_TN_reg(OT_LONG
, 0, R_EAX
);
1165 gen_string_movl_A0_EDI(s
);
1166 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
1167 gen_op_cmpl_T0_T1_cc();
1168 gen_op_movl_T0_Dshift(ot
);
1169 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1172 static inline void gen_cmps(DisasContext
*s
, int ot
)
1174 gen_string_movl_A0_ESI(s
);
1175 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1176 gen_string_movl_A0_EDI(s
);
1177 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
1178 gen_op_cmpl_T0_T1_cc();
1179 gen_op_movl_T0_Dshift(ot
);
1180 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
1181 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1184 static inline void gen_ins(DisasContext
*s
, int ot
)
1188 gen_string_movl_A0_EDI(s
);
1189 /* Note: we must do this dummy write first to be restartable in
1190 case of page fault. */
1192 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1193 gen_op_mov_TN_reg(OT_WORD
, 1, R_EDX
);
1194 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[1]);
1195 tcg_gen_andi_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 0xffff);
1196 gen_helper_in_func(ot
, cpu_T
[0], cpu_tmp2_i32
);
1197 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1198 gen_op_movl_T0_Dshift(ot
);
1199 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1204 static inline void gen_outs(DisasContext
*s
, int ot
)
1208 gen_string_movl_A0_ESI(s
);
1209 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1211 gen_op_mov_TN_reg(OT_WORD
, 1, R_EDX
);
1212 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[1]);
1213 tcg_gen_andi_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 0xffff);
1214 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T
[0]);
1215 gen_helper_out_func(ot
, cpu_tmp2_i32
, cpu_tmp3_i32
);
1217 gen_op_movl_T0_Dshift(ot
);
1218 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
1223 /* same method as Valgrind : we generate jumps to current or next
1225 #define GEN_REPZ(op) \
1226 static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1227 target_ulong cur_eip, target_ulong next_eip) \
1230 gen_update_cc_op(s); \
1231 l2 = gen_jz_ecx_string(s, next_eip); \
1232 gen_ ## op(s, ot); \
1233 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1234 /* a loop would cause two single step exceptions if ECX = 1 \
1235 before rep string_insn */ \
1237 gen_op_jz_ecx(s->aflag, l2); \
1238 gen_jmp(s, cur_eip); \
1241 #define GEN_REPZ2(op) \
1242 static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1243 target_ulong cur_eip, \
1244 target_ulong next_eip, \
1248 gen_update_cc_op(s); \
1249 l2 = gen_jz_ecx_string(s, next_eip); \
1250 gen_ ## op(s, ot); \
1251 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1252 gen_op_set_cc_op(CC_OP_SUBB + ot); \
1253 gen_jcc1(s, CC_OP_SUBB + ot, (JCC_Z << 1) | (nz ^ 1), l2); \
1255 gen_op_jz_ecx(s->aflag, l2); \
1256 gen_jmp(s, cur_eip); \
1267 static void gen_helper_fp_arith_ST0_FT0(int op
)
1271 gen_helper_fadd_ST0_FT0(cpu_env
);
1274 gen_helper_fmul_ST0_FT0(cpu_env
);
1277 gen_helper_fcom_ST0_FT0(cpu_env
);
1280 gen_helper_fcom_ST0_FT0(cpu_env
);
1283 gen_helper_fsub_ST0_FT0(cpu_env
);
1286 gen_helper_fsubr_ST0_FT0(cpu_env
);
1289 gen_helper_fdiv_ST0_FT0(cpu_env
);
1292 gen_helper_fdivr_ST0_FT0(cpu_env
);
1297 /* NOTE the exception in "r" op ordering */
1298 static void gen_helper_fp_arith_STN_ST0(int op
, int opreg
)
1300 TCGv_i32 tmp
= tcg_const_i32(opreg
);
1303 gen_helper_fadd_STN_ST0(cpu_env
, tmp
);
1306 gen_helper_fmul_STN_ST0(cpu_env
, tmp
);
1309 gen_helper_fsubr_STN_ST0(cpu_env
, tmp
);
1312 gen_helper_fsub_STN_ST0(cpu_env
, tmp
);
1315 gen_helper_fdivr_STN_ST0(cpu_env
, tmp
);
1318 gen_helper_fdiv_STN_ST0(cpu_env
, tmp
);
1323 /* if d == OR_TMP0, it means memory operand (address in A0) */
1324 static void gen_op(DisasContext
*s1
, int op
, int ot
, int d
)
1327 gen_op_mov_TN_reg(ot
, 0, d
);
1329 gen_op_ld_T0_A0(ot
+ s1
->mem_index
);
1333 if (s1
->cc_op
!= CC_OP_DYNAMIC
)
1334 gen_op_set_cc_op(s1
->cc_op
);
1335 gen_compute_eflags_c(cpu_tmp4
);
1336 tcg_gen_add_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1337 tcg_gen_add_tl(cpu_T
[0], cpu_T
[0], cpu_tmp4
);
1339 gen_op_mov_reg_T0(ot
, d
);
1341 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1342 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[1]);
1343 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
1344 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_tmp4
);
1345 tcg_gen_shli_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 2);
1346 tcg_gen_addi_i32(cpu_cc_op
, cpu_tmp2_i32
, CC_OP_ADDB
+ ot
);
1347 s1
->cc_op
= CC_OP_DYNAMIC
;
1350 if (s1
->cc_op
!= CC_OP_DYNAMIC
)
1351 gen_op_set_cc_op(s1
->cc_op
);
1352 gen_compute_eflags_c(cpu_tmp4
);
1353 tcg_gen_sub_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1354 tcg_gen_sub_tl(cpu_T
[0], cpu_T
[0], cpu_tmp4
);
1356 gen_op_mov_reg_T0(ot
, d
);
1358 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1359 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[1]);
1360 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
1361 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_tmp4
);
1362 tcg_gen_shli_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 2);
1363 tcg_gen_addi_i32(cpu_cc_op
, cpu_tmp2_i32
, CC_OP_SUBB
+ ot
);
1364 s1
->cc_op
= CC_OP_DYNAMIC
;
1367 gen_op_addl_T0_T1();
1369 gen_op_mov_reg_T0(ot
, d
);
1371 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1372 gen_op_update2_cc();
1373 s1
->cc_op
= CC_OP_ADDB
+ ot
;
1376 tcg_gen_sub_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1378 gen_op_mov_reg_T0(ot
, d
);
1380 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1381 gen_op_update2_cc();
1382 s1
->cc_op
= CC_OP_SUBB
+ ot
;
1386 tcg_gen_and_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1388 gen_op_mov_reg_T0(ot
, d
);
1390 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1391 gen_op_update1_cc();
1392 s1
->cc_op
= CC_OP_LOGICB
+ ot
;
1395 tcg_gen_or_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1397 gen_op_mov_reg_T0(ot
, d
);
1399 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1400 gen_op_update1_cc();
1401 s1
->cc_op
= CC_OP_LOGICB
+ ot
;
1404 tcg_gen_xor_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1406 gen_op_mov_reg_T0(ot
, d
);
1408 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1409 gen_op_update1_cc();
1410 s1
->cc_op
= CC_OP_LOGICB
+ ot
;
1413 gen_op_cmpl_T0_T1_cc();
1414 s1
->cc_op
= CC_OP_SUBB
+ ot
;
1419 /* if d == OR_TMP0, it means memory operand (address in A0) */
1420 static void gen_inc(DisasContext
*s1
, int ot
, int d
, int c
)
1423 gen_op_mov_TN_reg(ot
, 0, d
);
1425 gen_op_ld_T0_A0(ot
+ s1
->mem_index
);
1426 if (s1
->cc_op
!= CC_OP_DYNAMIC
)
1427 gen_op_set_cc_op(s1
->cc_op
);
1429 tcg_gen_addi_tl(cpu_T
[0], cpu_T
[0], 1);
1430 s1
->cc_op
= CC_OP_INCB
+ ot
;
1432 tcg_gen_addi_tl(cpu_T
[0], cpu_T
[0], -1);
1433 s1
->cc_op
= CC_OP_DECB
+ ot
;
1436 gen_op_mov_reg_T0(ot
, d
);
1438 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1439 gen_compute_eflags_c(cpu_cc_src
);
1440 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
1443 static void gen_shift_rm_T1(DisasContext
*s
, int ot
, int op1
,
1444 int is_right
, int is_arith
)
1450 if (ot
== OT_QUAD
) {
1457 if (op1
== OR_TMP0
) {
1458 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1460 gen_op_mov_TN_reg(ot
, 0, op1
);
1463 t0
= tcg_temp_local_new();
1464 t1
= tcg_temp_local_new();
1465 t2
= tcg_temp_local_new();
1467 tcg_gen_andi_tl(t2
, cpu_T
[1], mask
);
1471 gen_exts(ot
, cpu_T
[0]);
1472 tcg_gen_mov_tl(t0
, cpu_T
[0]);
1473 tcg_gen_sar_tl(cpu_T
[0], cpu_T
[0], t2
);
1475 gen_extu(ot
, cpu_T
[0]);
1476 tcg_gen_mov_tl(t0
, cpu_T
[0]);
1477 tcg_gen_shr_tl(cpu_T
[0], cpu_T
[0], t2
);
1480 tcg_gen_mov_tl(t0
, cpu_T
[0]);
1481 tcg_gen_shl_tl(cpu_T
[0], cpu_T
[0], t2
);
1485 if (op1
== OR_TMP0
) {
1486 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1488 gen_op_mov_reg_T0(ot
, op1
);
1491 /* update eflags if non zero shift */
1492 if (s
->cc_op
!= CC_OP_DYNAMIC
) {
1493 gen_op_set_cc_op(s
->cc_op
);
1496 tcg_gen_mov_tl(t1
, cpu_T
[0]);
1498 shift_label
= gen_new_label();
1499 tcg_gen_brcondi_tl(TCG_COND_EQ
, t2
, 0, shift_label
);
1501 tcg_gen_addi_tl(t2
, t2
, -1);
1502 tcg_gen_mov_tl(cpu_cc_dst
, t1
);
1506 tcg_gen_sar_tl(cpu_cc_src
, t0
, t2
);
1508 tcg_gen_shr_tl(cpu_cc_src
, t0
, t2
);
1511 tcg_gen_shl_tl(cpu_cc_src
, t0
, t2
);
1515 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SARB
+ ot
);
1517 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SHLB
+ ot
);
1520 gen_set_label(shift_label
);
1521 s
->cc_op
= CC_OP_DYNAMIC
; /* cannot predict flags after */
1528 static void gen_shift_rm_im(DisasContext
*s
, int ot
, int op1
, int op2
,
1529 int is_right
, int is_arith
)
1540 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1542 gen_op_mov_TN_reg(ot
, 0, op1
);
1548 gen_exts(ot
, cpu_T
[0]);
1549 tcg_gen_sari_tl(cpu_tmp4
, cpu_T
[0], op2
- 1);
1550 tcg_gen_sari_tl(cpu_T
[0], cpu_T
[0], op2
);
1552 gen_extu(ot
, cpu_T
[0]);
1553 tcg_gen_shri_tl(cpu_tmp4
, cpu_T
[0], op2
- 1);
1554 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], op2
);
1557 tcg_gen_shli_tl(cpu_tmp4
, cpu_T
[0], op2
- 1);
1558 tcg_gen_shli_tl(cpu_T
[0], cpu_T
[0], op2
);
1564 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1566 gen_op_mov_reg_T0(ot
, op1
);
1568 /* update eflags if non zero shift */
1570 tcg_gen_mov_tl(cpu_cc_src
, cpu_tmp4
);
1571 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
1573 s
->cc_op
= CC_OP_SARB
+ ot
;
1575 s
->cc_op
= CC_OP_SHLB
+ ot
;
1579 static inline void tcg_gen_lshift(TCGv ret
, TCGv arg1
, target_long arg2
)
1582 tcg_gen_shli_tl(ret
, arg1
, arg2
);
1584 tcg_gen_shri_tl(ret
, arg1
, -arg2
);
1587 static void gen_rot_rm_T1(DisasContext
*s
, int ot
, int op1
,
1591 int label1
, label2
, data_bits
;
1592 TCGv t0
, t1
, t2
, a0
;
1594 /* XXX: inefficient, but we must use local temps */
1595 t0
= tcg_temp_local_new();
1596 t1
= tcg_temp_local_new();
1597 t2
= tcg_temp_local_new();
1598 a0
= tcg_temp_local_new();
1606 if (op1
== OR_TMP0
) {
1607 tcg_gen_mov_tl(a0
, cpu_A0
);
1608 gen_op_ld_v(ot
+ s
->mem_index
, t0
, a0
);
1610 gen_op_mov_v_reg(ot
, t0
, op1
);
1613 tcg_gen_mov_tl(t1
, cpu_T
[1]);
1615 tcg_gen_andi_tl(t1
, t1
, mask
);
1617 /* Must test zero case to avoid using undefined behaviour in TCG
1619 label1
= gen_new_label();
1620 tcg_gen_brcondi_tl(TCG_COND_EQ
, t1
, 0, label1
);
1623 tcg_gen_andi_tl(cpu_tmp0
, t1
, (1 << (3 + ot
)) - 1);
1625 tcg_gen_mov_tl(cpu_tmp0
, t1
);
1628 tcg_gen_mov_tl(t2
, t0
);
1630 data_bits
= 8 << ot
;
1631 /* XXX: rely on behaviour of shifts when operand 2 overflows (XXX:
1632 fix TCG definition) */
1634 tcg_gen_shr_tl(cpu_tmp4
, t0
, cpu_tmp0
);
1635 tcg_gen_subfi_tl(cpu_tmp0
, data_bits
, cpu_tmp0
);
1636 tcg_gen_shl_tl(t0
, t0
, cpu_tmp0
);
1638 tcg_gen_shl_tl(cpu_tmp4
, t0
, cpu_tmp0
);
1639 tcg_gen_subfi_tl(cpu_tmp0
, data_bits
, cpu_tmp0
);
1640 tcg_gen_shr_tl(t0
, t0
, cpu_tmp0
);
1642 tcg_gen_or_tl(t0
, t0
, cpu_tmp4
);
1644 gen_set_label(label1
);
1646 if (op1
== OR_TMP0
) {
1647 gen_op_st_v(ot
+ s
->mem_index
, t0
, a0
);
1649 gen_op_mov_reg_v(ot
, op1
, t0
);
1653 if (s
->cc_op
!= CC_OP_DYNAMIC
)
1654 gen_op_set_cc_op(s
->cc_op
);
1656 label2
= gen_new_label();
1657 tcg_gen_brcondi_tl(TCG_COND_EQ
, t1
, 0, label2
);
1659 gen_compute_eflags(cpu_cc_src
);
1660 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~(CC_O
| CC_C
));
1661 tcg_gen_xor_tl(cpu_tmp0
, t2
, t0
);
1662 tcg_gen_lshift(cpu_tmp0
, cpu_tmp0
, 11 - (data_bits
- 1));
1663 tcg_gen_andi_tl(cpu_tmp0
, cpu_tmp0
, CC_O
);
1664 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, cpu_tmp0
);
1666 tcg_gen_shri_tl(t0
, t0
, data_bits
- 1);
1668 tcg_gen_andi_tl(t0
, t0
, CC_C
);
1669 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t0
);
1671 tcg_gen_discard_tl(cpu_cc_dst
);
1672 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_EFLAGS
);
1674 gen_set_label(label2
);
1675 s
->cc_op
= CC_OP_DYNAMIC
; /* cannot predict flags after */
1683 static void gen_rot_rm_im(DisasContext
*s
, int ot
, int op1
, int op2
,
1690 /* XXX: inefficient, but we must use local temps */
1691 t0
= tcg_temp_local_new();
1692 t1
= tcg_temp_local_new();
1693 a0
= tcg_temp_local_new();
1701 if (op1
== OR_TMP0
) {
1702 tcg_gen_mov_tl(a0
, cpu_A0
);
1703 gen_op_ld_v(ot
+ s
->mem_index
, t0
, a0
);
1705 gen_op_mov_v_reg(ot
, t0
, op1
);
1709 tcg_gen_mov_tl(t1
, t0
);
1712 data_bits
= 8 << ot
;
1714 int shift
= op2
& ((1 << (3 + ot
)) - 1);
1716 tcg_gen_shri_tl(cpu_tmp4
, t0
, shift
);
1717 tcg_gen_shli_tl(t0
, t0
, data_bits
- shift
);
1720 tcg_gen_shli_tl(cpu_tmp4
, t0
, shift
);
1721 tcg_gen_shri_tl(t0
, t0
, data_bits
- shift
);
1723 tcg_gen_or_tl(t0
, t0
, cpu_tmp4
);
1727 if (op1
== OR_TMP0
) {
1728 gen_op_st_v(ot
+ s
->mem_index
, t0
, a0
);
1730 gen_op_mov_reg_v(ot
, op1
, t0
);
1735 if (s
->cc_op
!= CC_OP_DYNAMIC
)
1736 gen_op_set_cc_op(s
->cc_op
);
1738 gen_compute_eflags(cpu_cc_src
);
1739 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~(CC_O
| CC_C
));
1740 tcg_gen_xor_tl(cpu_tmp0
, t1
, t0
);
1741 tcg_gen_lshift(cpu_tmp0
, cpu_tmp0
, 11 - (data_bits
- 1));
1742 tcg_gen_andi_tl(cpu_tmp0
, cpu_tmp0
, CC_O
);
1743 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, cpu_tmp0
);
1745 tcg_gen_shri_tl(t0
, t0
, data_bits
- 1);
1747 tcg_gen_andi_tl(t0
, t0
, CC_C
);
1748 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t0
);
1750 tcg_gen_discard_tl(cpu_cc_dst
);
1751 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_EFLAGS
);
1752 s
->cc_op
= CC_OP_EFLAGS
;
1760 /* XXX: add faster immediate = 1 case */
1761 static void gen_rotc_rm_T1(DisasContext
*s
, int ot
, int op1
,
1766 if (s
->cc_op
!= CC_OP_DYNAMIC
)
1767 gen_op_set_cc_op(s
->cc_op
);
1771 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1773 gen_op_mov_TN_reg(ot
, 0, op1
);
1778 gen_helper_rcrb(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1781 gen_helper_rcrw(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1784 gen_helper_rcrl(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1786 #ifdef TARGET_X86_64
1788 gen_helper_rcrq(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1795 gen_helper_rclb(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1798 gen_helper_rclw(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1801 gen_helper_rcll(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1803 #ifdef TARGET_X86_64
1805 gen_helper_rclq(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1812 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1814 gen_op_mov_reg_T0(ot
, op1
);
1817 label1
= gen_new_label();
1818 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_cc_tmp
, -1, label1
);
1820 tcg_gen_mov_tl(cpu_cc_src
, cpu_cc_tmp
);
1821 tcg_gen_discard_tl(cpu_cc_dst
);
1822 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_EFLAGS
);
1824 gen_set_label(label1
);
1825 s
->cc_op
= CC_OP_DYNAMIC
; /* cannot predict flags after */
1828 /* XXX: add faster immediate case */
1829 static void gen_shiftd_rm_T1_T3(DisasContext
*s
, int ot
, int op1
,
1832 int label1
, label2
, data_bits
;
1834 TCGv t0
, t1
, t2
, a0
;
1836 t0
= tcg_temp_local_new();
1837 t1
= tcg_temp_local_new();
1838 t2
= tcg_temp_local_new();
1839 a0
= tcg_temp_local_new();
1847 if (op1
== OR_TMP0
) {
1848 tcg_gen_mov_tl(a0
, cpu_A0
);
1849 gen_op_ld_v(ot
+ s
->mem_index
, t0
, a0
);
1851 gen_op_mov_v_reg(ot
, t0
, op1
);
1854 tcg_gen_andi_tl(cpu_T3
, cpu_T3
, mask
);
1856 tcg_gen_mov_tl(t1
, cpu_T
[1]);
1857 tcg_gen_mov_tl(t2
, cpu_T3
);
1859 /* Must test zero case to avoid using undefined behaviour in TCG
1861 label1
= gen_new_label();
1862 tcg_gen_brcondi_tl(TCG_COND_EQ
, t2
, 0, label1
);
1864 tcg_gen_addi_tl(cpu_tmp5
, t2
, -1);
1865 if (ot
== OT_WORD
) {
1866 /* Note: we implement the Intel behaviour for shift count > 16 */
1868 tcg_gen_andi_tl(t0
, t0
, 0xffff);
1869 tcg_gen_shli_tl(cpu_tmp0
, t1
, 16);
1870 tcg_gen_or_tl(t0
, t0
, cpu_tmp0
);
1871 tcg_gen_ext32u_tl(t0
, t0
);
1873 tcg_gen_shr_tl(cpu_tmp4
, t0
, cpu_tmp5
);
1875 /* only needed if count > 16, but a test would complicate */
1876 tcg_gen_subfi_tl(cpu_tmp5
, 32, t2
);
1877 tcg_gen_shl_tl(cpu_tmp0
, t0
, cpu_tmp5
);
1879 tcg_gen_shr_tl(t0
, t0
, t2
);
1881 tcg_gen_or_tl(t0
, t0
, cpu_tmp0
);
1883 /* XXX: not optimal */
1884 tcg_gen_andi_tl(t0
, t0
, 0xffff);
1885 tcg_gen_shli_tl(t1
, t1
, 16);
1886 tcg_gen_or_tl(t1
, t1
, t0
);
1887 tcg_gen_ext32u_tl(t1
, t1
);
1889 tcg_gen_shl_tl(cpu_tmp4
, t0
, cpu_tmp5
);
1890 tcg_gen_subfi_tl(cpu_tmp0
, 32, cpu_tmp5
);
1891 tcg_gen_shr_tl(cpu_tmp5
, t1
, cpu_tmp0
);
1892 tcg_gen_or_tl(cpu_tmp4
, cpu_tmp4
, cpu_tmp5
);
1894 tcg_gen_shl_tl(t0
, t0
, t2
);
1895 tcg_gen_subfi_tl(cpu_tmp5
, 32, t2
);
1896 tcg_gen_shr_tl(t1
, t1
, cpu_tmp5
);
1897 tcg_gen_or_tl(t0
, t0
, t1
);
1900 data_bits
= 8 << ot
;
1903 tcg_gen_ext32u_tl(t0
, t0
);
1905 tcg_gen_shr_tl(cpu_tmp4
, t0
, cpu_tmp5
);
1907 tcg_gen_shr_tl(t0
, t0
, t2
);
1908 tcg_gen_subfi_tl(cpu_tmp5
, data_bits
, t2
);
1909 tcg_gen_shl_tl(t1
, t1
, cpu_tmp5
);
1910 tcg_gen_or_tl(t0
, t0
, t1
);
1914 tcg_gen_ext32u_tl(t1
, t1
);
1916 tcg_gen_shl_tl(cpu_tmp4
, t0
, cpu_tmp5
);
1918 tcg_gen_shl_tl(t0
, t0
, t2
);
1919 tcg_gen_subfi_tl(cpu_tmp5
, data_bits
, t2
);
1920 tcg_gen_shr_tl(t1
, t1
, cpu_tmp5
);
1921 tcg_gen_or_tl(t0
, t0
, t1
);
1924 tcg_gen_mov_tl(t1
, cpu_tmp4
);
1926 gen_set_label(label1
);
1928 if (op1
== OR_TMP0
) {
1929 gen_op_st_v(ot
+ s
->mem_index
, t0
, a0
);
1931 gen_op_mov_reg_v(ot
, op1
, t0
);
1935 if (s
->cc_op
!= CC_OP_DYNAMIC
)
1936 gen_op_set_cc_op(s
->cc_op
);
1938 label2
= gen_new_label();
1939 tcg_gen_brcondi_tl(TCG_COND_EQ
, t2
, 0, label2
);
1941 tcg_gen_mov_tl(cpu_cc_src
, t1
);
1942 tcg_gen_mov_tl(cpu_cc_dst
, t0
);
1944 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SARB
+ ot
);
1946 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SHLB
+ ot
);
1948 gen_set_label(label2
);
1949 s
->cc_op
= CC_OP_DYNAMIC
; /* cannot predict flags after */
1957 static void gen_shift(DisasContext
*s1
, int op
, int ot
, int d
, int s
)
1960 gen_op_mov_TN_reg(ot
, 1, s
);
1963 gen_rot_rm_T1(s1
, ot
, d
, 0);
1966 gen_rot_rm_T1(s1
, ot
, d
, 1);
1970 gen_shift_rm_T1(s1
, ot
, d
, 0, 0);
1973 gen_shift_rm_T1(s1
, ot
, d
, 1, 0);
1976 gen_shift_rm_T1(s1
, ot
, d
, 1, 1);
1979 gen_rotc_rm_T1(s1
, ot
, d
, 0);
1982 gen_rotc_rm_T1(s1
, ot
, d
, 1);
1987 static void gen_shifti(DisasContext
*s1
, int op
, int ot
, int d
, int c
)
1991 gen_rot_rm_im(s1
, ot
, d
, c
, 0);
1994 gen_rot_rm_im(s1
, ot
, d
, c
, 1);
1998 gen_shift_rm_im(s1
, ot
, d
, c
, 0, 0);
2001 gen_shift_rm_im(s1
, ot
, d
, c
, 1, 0);
2004 gen_shift_rm_im(s1
, ot
, d
, c
, 1, 1);
2007 /* currently not optimized */
2008 gen_op_movl_T1_im(c
);
2009 gen_shift(s1
, op
, ot
, d
, OR_TMP1
);
2014 static void gen_lea_modrm(DisasContext
*s
, int modrm
, int *reg_ptr
, int *offset_ptr
)
2022 int mod
, rm
, code
, override
, must_add_seg
;
2024 override
= s
->override
;
2025 must_add_seg
= s
->addseg
;
2028 mod
= (modrm
>> 6) & 3;
2040 code
= ldub_code(s
->pc
++);
2041 scale
= (code
>> 6) & 3;
2042 index
= ((code
>> 3) & 7) | REX_X(s
);
2049 if ((base
& 7) == 5) {
2051 disp
= (int32_t)ldl_code(s
->pc
);
2053 if (CODE64(s
) && !havesib
) {
2054 disp
+= s
->pc
+ s
->rip_offset
;
2061 disp
= (int8_t)ldub_code(s
->pc
++);
2065 disp
= (int32_t)ldl_code(s
->pc
);
2071 /* for correct popl handling with esp */
2072 if (base
== 4 && s
->popl_esp_hack
)
2073 disp
+= s
->popl_esp_hack
;
2074 #ifdef TARGET_X86_64
2075 if (s
->aflag
== 2) {
2076 gen_op_movq_A0_reg(base
);
2078 gen_op_addq_A0_im(disp
);
2083 gen_op_movl_A0_reg(base
);
2085 gen_op_addl_A0_im(disp
);
2088 #ifdef TARGET_X86_64
2089 if (s
->aflag
== 2) {
2090 gen_op_movq_A0_im(disp
);
2094 gen_op_movl_A0_im(disp
);
2097 /* index == 4 means no index */
2098 if (havesib
&& (index
!= 4)) {
2099 #ifdef TARGET_X86_64
2100 if (s
->aflag
== 2) {
2101 gen_op_addq_A0_reg_sN(scale
, index
);
2105 gen_op_addl_A0_reg_sN(scale
, index
);
2110 if (base
== R_EBP
|| base
== R_ESP
)
2115 #ifdef TARGET_X86_64
2116 if (s
->aflag
== 2) {
2117 gen_op_addq_A0_seg(override
);
2121 gen_op_addl_A0_seg(s
, override
);
2128 disp
= lduw_code(s
->pc
);
2130 gen_op_movl_A0_im(disp
);
2131 rm
= 0; /* avoid SS override */
2138 disp
= (int8_t)ldub_code(s
->pc
++);
2142 disp
= lduw_code(s
->pc
);
2148 gen_op_movl_A0_reg(R_EBX
);
2149 gen_op_addl_A0_reg_sN(0, R_ESI
);
2152 gen_op_movl_A0_reg(R_EBX
);
2153 gen_op_addl_A0_reg_sN(0, R_EDI
);
2156 gen_op_movl_A0_reg(R_EBP
);
2157 gen_op_addl_A0_reg_sN(0, R_ESI
);
2160 gen_op_movl_A0_reg(R_EBP
);
2161 gen_op_addl_A0_reg_sN(0, R_EDI
);
2164 gen_op_movl_A0_reg(R_ESI
);
2167 gen_op_movl_A0_reg(R_EDI
);
2170 gen_op_movl_A0_reg(R_EBP
);
2174 gen_op_movl_A0_reg(R_EBX
);
2178 gen_op_addl_A0_im(disp
);
2179 gen_op_andl_A0_ffff();
2183 if (rm
== 2 || rm
== 3 || rm
== 6)
2188 gen_op_addl_A0_seg(s
, override
);
2198 static void gen_nop_modrm(DisasContext
*s
, int modrm
)
2200 int mod
, rm
, base
, code
;
2202 mod
= (modrm
>> 6) & 3;
2212 code
= ldub_code(s
->pc
++);
2248 /* used for LEA and MOV AX, mem */
2249 static void gen_add_A0_ds_seg(DisasContext
*s
)
2251 int override
, must_add_seg
;
2252 must_add_seg
= s
->addseg
;
2254 if (s
->override
>= 0) {
2255 override
= s
->override
;
2259 #ifdef TARGET_X86_64
2261 gen_op_addq_A0_seg(override
);
2265 gen_op_addl_A0_seg(s
, override
);
2270 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2272 static void gen_ldst_modrm(DisasContext
*s
, int modrm
, int ot
, int reg
, int is_store
)
2274 int mod
, rm
, opreg
, disp
;
2276 mod
= (modrm
>> 6) & 3;
2277 rm
= (modrm
& 7) | REX_B(s
);
2281 gen_op_mov_TN_reg(ot
, 0, reg
);
2282 gen_op_mov_reg_T0(ot
, rm
);
2284 gen_op_mov_TN_reg(ot
, 0, rm
);
2286 gen_op_mov_reg_T0(ot
, reg
);
2289 gen_lea_modrm(s
, modrm
, &opreg
, &disp
);
2292 gen_op_mov_TN_reg(ot
, 0, reg
);
2293 gen_op_st_T0_A0(ot
+ s
->mem_index
);
2295 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
2297 gen_op_mov_reg_T0(ot
, reg
);
2302 static inline uint32_t insn_get(DisasContext
*s
, int ot
)
2308 ret
= ldub_code(s
->pc
);
2312 ret
= lduw_code(s
->pc
);
2317 ret
= ldl_code(s
->pc
);
2324 static inline int insn_const_size(unsigned int ot
)
2332 static inline void gen_goto_tb(DisasContext
*s
, int tb_num
, target_ulong eip
)
2334 TranslationBlock
*tb
;
2337 pc
= s
->cs_base
+ eip
;
2339 /* NOTE: we handle the case where the TB spans two pages here */
2340 if ((pc
& TARGET_PAGE_MASK
) == (tb
->pc
& TARGET_PAGE_MASK
) ||
2341 (pc
& TARGET_PAGE_MASK
) == ((s
->pc
- 1) & TARGET_PAGE_MASK
)) {
2342 /* jump to same page: we can use a direct jump */
2343 tcg_gen_goto_tb(tb_num
);
2345 tcg_gen_exit_tb((tcg_target_long
)tb
+ tb_num
);
2347 /* jump to another page: currently not optimized */
2353 static inline void gen_jcc(DisasContext
*s
, int b
,
2354 target_ulong val
, target_ulong next_eip
)
2359 gen_update_cc_op(s
);
2361 l1
= gen_new_label();
2362 gen_jcc1(s
, cc_op
, b
, l1
);
2364 gen_goto_tb(s
, 0, next_eip
);
2367 gen_goto_tb(s
, 1, val
);
2368 s
->is_jmp
= DISAS_TB_JUMP
;
2371 l1
= gen_new_label();
2372 l2
= gen_new_label();
2373 gen_jcc1(s
, cc_op
, b
, l1
);
2375 gen_jmp_im(next_eip
);
2385 static void gen_setcc(DisasContext
*s
, int b
)
2387 int inv
, jcc_op
, l1
;
2390 if (is_fast_jcc_case(s
, b
)) {
2391 /* nominal case: we use a jump */
2392 /* XXX: make it faster by adding new instructions in TCG */
2393 t0
= tcg_temp_local_new();
2394 tcg_gen_movi_tl(t0
, 0);
2395 l1
= gen_new_label();
2396 gen_jcc1(s
, s
->cc_op
, b
^ 1, l1
);
2397 tcg_gen_movi_tl(t0
, 1);
2399 tcg_gen_mov_tl(cpu_T
[0], t0
);
2402 /* slow case: it is more efficient not to generate a jump,
2403 although it is questionnable whether this optimization is
2406 jcc_op
= (b
>> 1) & 7;
2407 gen_setcc_slow_T0(s
, jcc_op
);
2409 tcg_gen_xori_tl(cpu_T
[0], cpu_T
[0], 1);
2414 static inline void gen_op_movl_T0_seg(int seg_reg
)
2416 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
,
2417 offsetof(CPUX86State
,segs
[seg_reg
].selector
));
2420 static inline void gen_op_movl_seg_T0_vm(int seg_reg
)
2422 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 0xffff);
2423 tcg_gen_st32_tl(cpu_T
[0], cpu_env
,
2424 offsetof(CPUX86State
,segs
[seg_reg
].selector
));
2425 tcg_gen_shli_tl(cpu_T
[0], cpu_T
[0], 4);
2426 tcg_gen_st_tl(cpu_T
[0], cpu_env
,
2427 offsetof(CPUX86State
,segs
[seg_reg
].base
));
2430 /* move T0 to seg_reg and compute if the CPU state may change. Never
2431 call this function with seg_reg == R_CS */
2432 static void gen_movl_seg_T0(DisasContext
*s
, int seg_reg
, target_ulong cur_eip
)
2434 if (s
->pe
&& !s
->vm86
) {
2435 /* XXX: optimize by finding processor state dynamically */
2436 if (s
->cc_op
!= CC_OP_DYNAMIC
)
2437 gen_op_set_cc_op(s
->cc_op
);
2438 gen_jmp_im(cur_eip
);
2439 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
2440 gen_helper_load_seg(tcg_const_i32(seg_reg
), cpu_tmp2_i32
);
2441 /* abort translation because the addseg value may change or
2442 because ss32 may change. For R_SS, translation must always
2443 stop as a special handling must be done to disable hardware
2444 interrupts for the next instruction */
2445 if (seg_reg
== R_SS
|| (s
->code32
&& seg_reg
< R_FS
))
2446 s
->is_jmp
= DISAS_TB_JUMP
;
2448 gen_op_movl_seg_T0_vm(seg_reg
);
2449 if (seg_reg
== R_SS
)
2450 s
->is_jmp
= DISAS_TB_JUMP
;
2454 static inline int svm_is_rep(int prefixes
)
2456 return ((prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) ? 8 : 0);
2460 gen_svm_check_intercept_param(DisasContext
*s
, target_ulong pc_start
,
2461 uint32_t type
, uint64_t param
)
2463 /* no SVM activated; fast case */
2464 if (likely(!(s
->flags
& HF_SVMI_MASK
)))
2466 if (s
->cc_op
!= CC_OP_DYNAMIC
)
2467 gen_op_set_cc_op(s
->cc_op
);
2468 gen_jmp_im(pc_start
- s
->cs_base
);
2469 gen_helper_svm_check_intercept_param(cpu_env
, tcg_const_i32(type
),
2470 tcg_const_i64(param
));
2474 gen_svm_check_intercept(DisasContext
*s
, target_ulong pc_start
, uint64_t type
)
2476 gen_svm_check_intercept_param(s
, pc_start
, type
, 0);
2479 static inline void gen_stack_update(DisasContext
*s
, int addend
)
2481 #ifdef TARGET_X86_64
2483 gen_op_add_reg_im(2, R_ESP
, addend
);
2487 gen_op_add_reg_im(1, R_ESP
, addend
);
2489 gen_op_add_reg_im(0, R_ESP
, addend
);
2493 /* generate a push. It depends on ss32, addseg and dflag */
2494 static void gen_push_T0(DisasContext
*s
)
2496 #ifdef TARGET_X86_64
2498 gen_op_movq_A0_reg(R_ESP
);
2500 gen_op_addq_A0_im(-8);
2501 gen_op_st_T0_A0(OT_QUAD
+ s
->mem_index
);
2503 gen_op_addq_A0_im(-2);
2504 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
2506 gen_op_mov_reg_A0(2, R_ESP
);
2510 gen_op_movl_A0_reg(R_ESP
);
2512 gen_op_addl_A0_im(-2);
2514 gen_op_addl_A0_im(-4);
2517 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2518 gen_op_addl_A0_seg(s
, R_SS
);
2521 gen_op_andl_A0_ffff();
2522 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2523 gen_op_addl_A0_seg(s
, R_SS
);
2525 gen_op_st_T0_A0(s
->dflag
+ 1 + s
->mem_index
);
2526 if (s
->ss32
&& !s
->addseg
)
2527 gen_op_mov_reg_A0(1, R_ESP
);
2529 gen_op_mov_reg_T1(s
->ss32
+ 1, R_ESP
);
2533 /* generate a push. It depends on ss32, addseg and dflag */
2534 /* slower version for T1, only used for call Ev */
2535 static void gen_push_T1(DisasContext
*s
)
2537 #ifdef TARGET_X86_64
2539 gen_op_movq_A0_reg(R_ESP
);
2541 gen_op_addq_A0_im(-8);
2542 gen_op_st_T1_A0(OT_QUAD
+ s
->mem_index
);
2544 gen_op_addq_A0_im(-2);
2545 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
2547 gen_op_mov_reg_A0(2, R_ESP
);
2551 gen_op_movl_A0_reg(R_ESP
);
2553 gen_op_addl_A0_im(-2);
2555 gen_op_addl_A0_im(-4);
2558 gen_op_addl_A0_seg(s
, R_SS
);
2561 gen_op_andl_A0_ffff();
2562 gen_op_addl_A0_seg(s
, R_SS
);
2564 gen_op_st_T1_A0(s
->dflag
+ 1 + s
->mem_index
);
2566 if (s
->ss32
&& !s
->addseg
)
2567 gen_op_mov_reg_A0(1, R_ESP
);
2569 gen_stack_update(s
, (-2) << s
->dflag
);
2573 /* two step pop is necessary for precise exceptions */
2574 static void gen_pop_T0(DisasContext
*s
)
2576 #ifdef TARGET_X86_64
2578 gen_op_movq_A0_reg(R_ESP
);
2579 gen_op_ld_T0_A0((s
->dflag
? OT_QUAD
: OT_WORD
) + s
->mem_index
);
2583 gen_op_movl_A0_reg(R_ESP
);
2586 gen_op_addl_A0_seg(s
, R_SS
);
2588 gen_op_andl_A0_ffff();
2589 gen_op_addl_A0_seg(s
, R_SS
);
2591 gen_op_ld_T0_A0(s
->dflag
+ 1 + s
->mem_index
);
2595 static void gen_pop_update(DisasContext
*s
)
2597 #ifdef TARGET_X86_64
2598 if (CODE64(s
) && s
->dflag
) {
2599 gen_stack_update(s
, 8);
2603 gen_stack_update(s
, 2 << s
->dflag
);
2607 static void gen_stack_A0(DisasContext
*s
)
2609 gen_op_movl_A0_reg(R_ESP
);
2611 gen_op_andl_A0_ffff();
2612 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2614 gen_op_addl_A0_seg(s
, R_SS
);
2617 /* NOTE: wrap around in 16 bit not fully handled */
2618 static void gen_pusha(DisasContext
*s
)
2621 gen_op_movl_A0_reg(R_ESP
);
2622 gen_op_addl_A0_im(-16 << s
->dflag
);
2624 gen_op_andl_A0_ffff();
2625 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2627 gen_op_addl_A0_seg(s
, R_SS
);
2628 for(i
= 0;i
< 8; i
++) {
2629 gen_op_mov_TN_reg(OT_LONG
, 0, 7 - i
);
2630 gen_op_st_T0_A0(OT_WORD
+ s
->dflag
+ s
->mem_index
);
2631 gen_op_addl_A0_im(2 << s
->dflag
);
2633 gen_op_mov_reg_T1(OT_WORD
+ s
->ss32
, R_ESP
);
2636 /* NOTE: wrap around in 16 bit not fully handled */
2637 static void gen_popa(DisasContext
*s
)
2640 gen_op_movl_A0_reg(R_ESP
);
2642 gen_op_andl_A0_ffff();
2643 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2644 tcg_gen_addi_tl(cpu_T
[1], cpu_T
[1], 16 << s
->dflag
);
2646 gen_op_addl_A0_seg(s
, R_SS
);
2647 for(i
= 0;i
< 8; i
++) {
2648 /* ESP is not reloaded */
2650 gen_op_ld_T0_A0(OT_WORD
+ s
->dflag
+ s
->mem_index
);
2651 gen_op_mov_reg_T0(OT_WORD
+ s
->dflag
, 7 - i
);
2653 gen_op_addl_A0_im(2 << s
->dflag
);
2655 gen_op_mov_reg_T1(OT_WORD
+ s
->ss32
, R_ESP
);
2658 static void gen_enter(DisasContext
*s
, int esp_addend
, int level
)
2663 #ifdef TARGET_X86_64
2665 ot
= s
->dflag
? OT_QUAD
: OT_WORD
;
2668 gen_op_movl_A0_reg(R_ESP
);
2669 gen_op_addq_A0_im(-opsize
);
2670 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2673 gen_op_mov_TN_reg(OT_LONG
, 0, R_EBP
);
2674 gen_op_st_T0_A0(ot
+ s
->mem_index
);
2676 /* XXX: must save state */
2677 gen_helper_enter64_level(tcg_const_i32(level
),
2678 tcg_const_i32((ot
== OT_QUAD
)),
2681 gen_op_mov_reg_T1(ot
, R_EBP
);
2682 tcg_gen_addi_tl(cpu_T
[1], cpu_T
[1], -esp_addend
+ (-opsize
* level
));
2683 gen_op_mov_reg_T1(OT_QUAD
, R_ESP
);
2687 ot
= s
->dflag
+ OT_WORD
;
2688 opsize
= 2 << s
->dflag
;
2690 gen_op_movl_A0_reg(R_ESP
);
2691 gen_op_addl_A0_im(-opsize
);
2693 gen_op_andl_A0_ffff();
2694 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2696 gen_op_addl_A0_seg(s
, R_SS
);
2698 gen_op_mov_TN_reg(OT_LONG
, 0, R_EBP
);
2699 gen_op_st_T0_A0(ot
+ s
->mem_index
);
2701 /* XXX: must save state */
2702 gen_helper_enter_level(tcg_const_i32(level
),
2703 tcg_const_i32(s
->dflag
),
2706 gen_op_mov_reg_T1(ot
, R_EBP
);
2707 tcg_gen_addi_tl(cpu_T
[1], cpu_T
[1], -esp_addend
+ (-opsize
* level
));
2708 gen_op_mov_reg_T1(OT_WORD
+ s
->ss32
, R_ESP
);
2712 static void gen_exception(DisasContext
*s
, int trapno
, target_ulong cur_eip
)
2714 if (s
->cc_op
!= CC_OP_DYNAMIC
)
2715 gen_op_set_cc_op(s
->cc_op
);
2716 gen_jmp_im(cur_eip
);
2717 gen_helper_raise_exception(cpu_env
, tcg_const_i32(trapno
));
2718 s
->is_jmp
= DISAS_TB_JUMP
;
2721 /* an interrupt is different from an exception because of the
2723 static void gen_interrupt(DisasContext
*s
, int intno
,
2724 target_ulong cur_eip
, target_ulong next_eip
)
2726 if (s
->cc_op
!= CC_OP_DYNAMIC
)
2727 gen_op_set_cc_op(s
->cc_op
);
2728 gen_jmp_im(cur_eip
);
2729 gen_helper_raise_interrupt(cpu_env
, tcg_const_i32(intno
),
2730 tcg_const_i32(next_eip
- cur_eip
));
2731 s
->is_jmp
= DISAS_TB_JUMP
;
2734 static void gen_debug(DisasContext
*s
, target_ulong cur_eip
)
2736 if (s
->cc_op
!= CC_OP_DYNAMIC
)
2737 gen_op_set_cc_op(s
->cc_op
);
2738 gen_jmp_im(cur_eip
);
2740 s
->is_jmp
= DISAS_TB_JUMP
;
2743 /* generate a generic end of block. Trace exception is also generated
2745 static void gen_eob(DisasContext
*s
)
2747 if (s
->cc_op
!= CC_OP_DYNAMIC
)
2748 gen_op_set_cc_op(s
->cc_op
);
2749 if (s
->tb
->flags
& HF_INHIBIT_IRQ_MASK
) {
2750 gen_helper_reset_inhibit_irq(cpu_env
);
2752 if (s
->tb
->flags
& HF_RF_MASK
) {
2753 gen_helper_reset_rf(cpu_env
);
2755 if (s
->singlestep_enabled
) {
2758 gen_helper_single_step();
2762 s
->is_jmp
= DISAS_TB_JUMP
;
2765 /* generate a jump to eip. No segment change must happen before as a
2766 direct call to the next block may occur */
2767 static void gen_jmp_tb(DisasContext
*s
, target_ulong eip
, int tb_num
)
2770 gen_update_cc_op(s
);
2771 gen_goto_tb(s
, tb_num
, eip
);
2772 s
->is_jmp
= DISAS_TB_JUMP
;
2779 static void gen_jmp(DisasContext
*s
, target_ulong eip
)
2781 gen_jmp_tb(s
, eip
, 0);
2784 static inline void gen_ldq_env_A0(int idx
, int offset
)
2786 int mem_index
= (idx
>> 2) - 1;
2787 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
, mem_index
);
2788 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, offset
);
2791 static inline void gen_stq_env_A0(int idx
, int offset
)
2793 int mem_index
= (idx
>> 2) - 1;
2794 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, offset
);
2795 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
, mem_index
);
2798 static inline void gen_ldo_env_A0(int idx
, int offset
)
2800 int mem_index
= (idx
>> 2) - 1;
2801 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
, mem_index
);
2802 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(0)));
2803 tcg_gen_addi_tl(cpu_tmp0
, cpu_A0
, 8);
2804 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_tmp0
, mem_index
);
2805 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(1)));
2808 static inline void gen_sto_env_A0(int idx
, int offset
)
2810 int mem_index
= (idx
>> 2) - 1;
2811 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(0)));
2812 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
, mem_index
);
2813 tcg_gen_addi_tl(cpu_tmp0
, cpu_A0
, 8);
2814 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(1)));
2815 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_tmp0
, mem_index
);
2818 static inline void gen_op_movo(int d_offset
, int s_offset
)
2820 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, s_offset
);
2821 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
);
2822 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, s_offset
+ 8);
2823 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
+ 8);
2826 static inline void gen_op_movq(int d_offset
, int s_offset
)
2828 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, s_offset
);
2829 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
);
2832 static inline void gen_op_movl(int d_offset
, int s_offset
)
2834 tcg_gen_ld_i32(cpu_tmp2_i32
, cpu_env
, s_offset
);
2835 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, d_offset
);
2838 static inline void gen_op_movq_env_0(int d_offset
)
2840 tcg_gen_movi_i64(cpu_tmp1_i64
, 0);
2841 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
);
2844 typedef void (*SSEFunc_i_ep
)(TCGv_i32 val
, TCGv_ptr env
, TCGv_ptr reg
);
2845 typedef void (*SSEFunc_l_ep
)(TCGv_i64 val
, TCGv_ptr env
, TCGv_ptr reg
);
2846 typedef void (*SSEFunc_0_epi
)(TCGv_ptr env
, TCGv_ptr reg
, TCGv_i32 val
);
2847 typedef void (*SSEFunc_0_epl
)(TCGv_ptr env
, TCGv_ptr reg
, TCGv_i64 val
);
2848 typedef void (*SSEFunc_0_epp
)(TCGv_ptr env
, TCGv_ptr reg_a
, TCGv_ptr reg_b
);
2849 typedef void (*SSEFunc_0_eppi
)(TCGv_ptr env
, TCGv_ptr reg_a
, TCGv_ptr reg_b
,
2851 typedef void (*SSEFunc_0_ppi
)(TCGv_ptr reg_a
, TCGv_ptr reg_b
, TCGv_i32 val
);
2852 typedef void (*SSEFunc_0_eppt
)(TCGv_ptr env
, TCGv_ptr reg_a
, TCGv_ptr reg_b
,
2855 #define SSE_SPECIAL ((void *)1)
2856 #define SSE_DUMMY ((void *)2)
2858 #define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
2859 #define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
2860 gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
2862 static const SSEFunc_0_epp sse_op_table1
[256][4] = {
2863 /* 3DNow! extensions */
2864 [0x0e] = { SSE_DUMMY
}, /* femms */
2865 [0x0f] = { SSE_DUMMY
}, /* pf... */
2866 /* pure SSE operations */
2867 [0x10] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movups, movupd, movss, movsd */
2868 [0x11] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movups, movupd, movss, movsd */
2869 [0x12] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movlps, movlpd, movsldup, movddup */
2870 [0x13] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movlps, movlpd */
2871 [0x14] = { gen_helper_punpckldq_xmm
, gen_helper_punpcklqdq_xmm
},
2872 [0x15] = { gen_helper_punpckhdq_xmm
, gen_helper_punpckhqdq_xmm
},
2873 [0x16] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movhps, movhpd, movshdup */
2874 [0x17] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movhps, movhpd */
2876 [0x28] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movaps, movapd */
2877 [0x29] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movaps, movapd */
2878 [0x2a] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
2879 [0x2b] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movntps, movntpd, movntss, movntsd */
2880 [0x2c] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
2881 [0x2d] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
2882 [0x2e] = { gen_helper_ucomiss
, gen_helper_ucomisd
},
2883 [0x2f] = { gen_helper_comiss
, gen_helper_comisd
},
2884 [0x50] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movmskps, movmskpd */
2885 [0x51] = SSE_FOP(sqrt
),
2886 [0x52] = { gen_helper_rsqrtps
, NULL
, gen_helper_rsqrtss
, NULL
},
2887 [0x53] = { gen_helper_rcpps
, NULL
, gen_helper_rcpss
, NULL
},
2888 [0x54] = { gen_helper_pand_xmm
, gen_helper_pand_xmm
}, /* andps, andpd */
2889 [0x55] = { gen_helper_pandn_xmm
, gen_helper_pandn_xmm
}, /* andnps, andnpd */
2890 [0x56] = { gen_helper_por_xmm
, gen_helper_por_xmm
}, /* orps, orpd */
2891 [0x57] = { gen_helper_pxor_xmm
, gen_helper_pxor_xmm
}, /* xorps, xorpd */
2892 [0x58] = SSE_FOP(add
),
2893 [0x59] = SSE_FOP(mul
),
2894 [0x5a] = { gen_helper_cvtps2pd
, gen_helper_cvtpd2ps
,
2895 gen_helper_cvtss2sd
, gen_helper_cvtsd2ss
},
2896 [0x5b] = { gen_helper_cvtdq2ps
, gen_helper_cvtps2dq
, gen_helper_cvttps2dq
},
2897 [0x5c] = SSE_FOP(sub
),
2898 [0x5d] = SSE_FOP(min
),
2899 [0x5e] = SSE_FOP(div
),
2900 [0x5f] = SSE_FOP(max
),
2902 [0xc2] = SSE_FOP(cmpeq
),
2903 [0xc6] = { (SSEFunc_0_epp
)gen_helper_shufps
,
2904 (SSEFunc_0_epp
)gen_helper_shufpd
}, /* XXX: casts */
2906 [0x38] = { SSE_SPECIAL
, SSE_SPECIAL
, NULL
, SSE_SPECIAL
}, /* SSSE3/SSE4 */
2907 [0x3a] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* SSSE3/SSE4 */
2909 /* MMX ops and their SSE extensions */
2910 [0x60] = MMX_OP2(punpcklbw
),
2911 [0x61] = MMX_OP2(punpcklwd
),
2912 [0x62] = MMX_OP2(punpckldq
),
2913 [0x63] = MMX_OP2(packsswb
),
2914 [0x64] = MMX_OP2(pcmpgtb
),
2915 [0x65] = MMX_OP2(pcmpgtw
),
2916 [0x66] = MMX_OP2(pcmpgtl
),
2917 [0x67] = MMX_OP2(packuswb
),
2918 [0x68] = MMX_OP2(punpckhbw
),
2919 [0x69] = MMX_OP2(punpckhwd
),
2920 [0x6a] = MMX_OP2(punpckhdq
),
2921 [0x6b] = MMX_OP2(packssdw
),
2922 [0x6c] = { NULL
, gen_helper_punpcklqdq_xmm
},
2923 [0x6d] = { NULL
, gen_helper_punpckhqdq_xmm
},
2924 [0x6e] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movd mm, ea */
2925 [0x6f] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movq, movdqa, , movqdu */
2926 [0x70] = { (SSEFunc_0_epp
)gen_helper_pshufw_mmx
,
2927 (SSEFunc_0_epp
)gen_helper_pshufd_xmm
,
2928 (SSEFunc_0_epp
)gen_helper_pshufhw_xmm
,
2929 (SSEFunc_0_epp
)gen_helper_pshuflw_xmm
}, /* XXX: casts */
2930 [0x71] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftw */
2931 [0x72] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftd */
2932 [0x73] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftq */
2933 [0x74] = MMX_OP2(pcmpeqb
),
2934 [0x75] = MMX_OP2(pcmpeqw
),
2935 [0x76] = MMX_OP2(pcmpeql
),
2936 [0x77] = { SSE_DUMMY
}, /* emms */
2937 [0x78] = { NULL
, SSE_SPECIAL
, NULL
, SSE_SPECIAL
}, /* extrq_i, insertq_i */
2938 [0x79] = { NULL
, gen_helper_extrq_r
, NULL
, gen_helper_insertq_r
},
2939 [0x7c] = { NULL
, gen_helper_haddpd
, NULL
, gen_helper_haddps
},
2940 [0x7d] = { NULL
, gen_helper_hsubpd
, NULL
, gen_helper_hsubps
},
2941 [0x7e] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movd, movd, , movq */
2942 [0x7f] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movq, movdqa, movdqu */
2943 [0xc4] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pinsrw */
2944 [0xc5] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pextrw */
2945 [0xd0] = { NULL
, gen_helper_addsubpd
, NULL
, gen_helper_addsubps
},
2946 [0xd1] = MMX_OP2(psrlw
),
2947 [0xd2] = MMX_OP2(psrld
),
2948 [0xd3] = MMX_OP2(psrlq
),
2949 [0xd4] = MMX_OP2(paddq
),
2950 [0xd5] = MMX_OP2(pmullw
),
2951 [0xd6] = { NULL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
},
2952 [0xd7] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pmovmskb */
2953 [0xd8] = MMX_OP2(psubusb
),
2954 [0xd9] = MMX_OP2(psubusw
),
2955 [0xda] = MMX_OP2(pminub
),
2956 [0xdb] = MMX_OP2(pand
),
2957 [0xdc] = MMX_OP2(paddusb
),
2958 [0xdd] = MMX_OP2(paddusw
),
2959 [0xde] = MMX_OP2(pmaxub
),
2960 [0xdf] = MMX_OP2(pandn
),
2961 [0xe0] = MMX_OP2(pavgb
),
2962 [0xe1] = MMX_OP2(psraw
),
2963 [0xe2] = MMX_OP2(psrad
),
2964 [0xe3] = MMX_OP2(pavgw
),
2965 [0xe4] = MMX_OP2(pmulhuw
),
2966 [0xe5] = MMX_OP2(pmulhw
),
2967 [0xe6] = { NULL
, gen_helper_cvttpd2dq
, gen_helper_cvtdq2pd
, gen_helper_cvtpd2dq
},
2968 [0xe7] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movntq, movntq */
2969 [0xe8] = MMX_OP2(psubsb
),
2970 [0xe9] = MMX_OP2(psubsw
),
2971 [0xea] = MMX_OP2(pminsw
),
2972 [0xeb] = MMX_OP2(por
),
2973 [0xec] = MMX_OP2(paddsb
),
2974 [0xed] = MMX_OP2(paddsw
),
2975 [0xee] = MMX_OP2(pmaxsw
),
2976 [0xef] = MMX_OP2(pxor
),
2977 [0xf0] = { NULL
, NULL
, NULL
, SSE_SPECIAL
}, /* lddqu */
2978 [0xf1] = MMX_OP2(psllw
),
2979 [0xf2] = MMX_OP2(pslld
),
2980 [0xf3] = MMX_OP2(psllq
),
2981 [0xf4] = MMX_OP2(pmuludq
),
2982 [0xf5] = MMX_OP2(pmaddwd
),
2983 [0xf6] = MMX_OP2(psadbw
),
2984 [0xf7] = { (SSEFunc_0_epp
)gen_helper_maskmov_mmx
,
2985 (SSEFunc_0_epp
)gen_helper_maskmov_xmm
}, /* XXX: casts */
2986 [0xf8] = MMX_OP2(psubb
),
2987 [0xf9] = MMX_OP2(psubw
),
2988 [0xfa] = MMX_OP2(psubl
),
2989 [0xfb] = MMX_OP2(psubq
),
2990 [0xfc] = MMX_OP2(paddb
),
2991 [0xfd] = MMX_OP2(paddw
),
2992 [0xfe] = MMX_OP2(paddl
),
2995 static const SSEFunc_0_epp sse_op_table2
[3 * 8][2] = {
2996 [0 + 2] = MMX_OP2(psrlw
),
2997 [0 + 4] = MMX_OP2(psraw
),
2998 [0 + 6] = MMX_OP2(psllw
),
2999 [8 + 2] = MMX_OP2(psrld
),
3000 [8 + 4] = MMX_OP2(psrad
),
3001 [8 + 6] = MMX_OP2(pslld
),
3002 [16 + 2] = MMX_OP2(psrlq
),
3003 [16 + 3] = { NULL
, gen_helper_psrldq_xmm
},
3004 [16 + 6] = MMX_OP2(psllq
),
3005 [16 + 7] = { NULL
, gen_helper_pslldq_xmm
},
3008 static const SSEFunc_0_epi sse_op_table3ai
[] = {
3009 gen_helper_cvtsi2ss
,
3013 #ifdef TARGET_X86_64
3014 static const SSEFunc_0_epl sse_op_table3aq
[] = {
3015 gen_helper_cvtsq2ss
,
3020 static const SSEFunc_i_ep sse_op_table3bi
[] = {
3021 gen_helper_cvttss2si
,
3022 gen_helper_cvtss2si
,
3023 gen_helper_cvttsd2si
,
3027 #ifdef TARGET_X86_64
3028 static const SSEFunc_l_ep sse_op_table3bq
[] = {
3029 gen_helper_cvttss2sq
,
3030 gen_helper_cvtss2sq
,
3031 gen_helper_cvttsd2sq
,
3036 static const SSEFunc_0_epp sse_op_table4
[8][4] = {
3047 static const SSEFunc_0_epp sse_op_table5
[256] = {
3048 [0x0c] = gen_helper_pi2fw
,
3049 [0x0d] = gen_helper_pi2fd
,
3050 [0x1c] = gen_helper_pf2iw
,
3051 [0x1d] = gen_helper_pf2id
,
3052 [0x8a] = gen_helper_pfnacc
,
3053 [0x8e] = gen_helper_pfpnacc
,
3054 [0x90] = gen_helper_pfcmpge
,
3055 [0x94] = gen_helper_pfmin
,
3056 [0x96] = gen_helper_pfrcp
,
3057 [0x97] = gen_helper_pfrsqrt
,
3058 [0x9a] = gen_helper_pfsub
,
3059 [0x9e] = gen_helper_pfadd
,
3060 [0xa0] = gen_helper_pfcmpgt
,
3061 [0xa4] = gen_helper_pfmax
,
3062 [0xa6] = gen_helper_movq
, /* pfrcpit1; no need to actually increase precision */
3063 [0xa7] = gen_helper_movq
, /* pfrsqit1 */
3064 [0xaa] = gen_helper_pfsubr
,
3065 [0xae] = gen_helper_pfacc
,
3066 [0xb0] = gen_helper_pfcmpeq
,
3067 [0xb4] = gen_helper_pfmul
,
3068 [0xb6] = gen_helper_movq
, /* pfrcpit2 */
3069 [0xb7] = gen_helper_pmulhrw_mmx
,
3070 [0xbb] = gen_helper_pswapd
,
3071 [0xbf] = gen_helper_pavgb_mmx
/* pavgusb */
3074 struct SSEOpHelper_epp
{
3075 SSEFunc_0_epp op
[2];
3079 struct SSEOpHelper_eppi
{
3080 SSEFunc_0_eppi op
[2];
3084 #define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
3085 #define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
3086 #define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
3087 #define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
3089 static const struct SSEOpHelper_epp sse_op_table6
[256] = {
3090 [0x00] = SSSE3_OP(pshufb
),
3091 [0x01] = SSSE3_OP(phaddw
),
3092 [0x02] = SSSE3_OP(phaddd
),
3093 [0x03] = SSSE3_OP(phaddsw
),
3094 [0x04] = SSSE3_OP(pmaddubsw
),
3095 [0x05] = SSSE3_OP(phsubw
),
3096 [0x06] = SSSE3_OP(phsubd
),
3097 [0x07] = SSSE3_OP(phsubsw
),
3098 [0x08] = SSSE3_OP(psignb
),
3099 [0x09] = SSSE3_OP(psignw
),
3100 [0x0a] = SSSE3_OP(psignd
),
3101 [0x0b] = SSSE3_OP(pmulhrsw
),
3102 [0x10] = SSE41_OP(pblendvb
),
3103 [0x14] = SSE41_OP(blendvps
),
3104 [0x15] = SSE41_OP(blendvpd
),
3105 [0x17] = SSE41_OP(ptest
),
3106 [0x1c] = SSSE3_OP(pabsb
),
3107 [0x1d] = SSSE3_OP(pabsw
),
3108 [0x1e] = SSSE3_OP(pabsd
),
3109 [0x20] = SSE41_OP(pmovsxbw
),
3110 [0x21] = SSE41_OP(pmovsxbd
),
3111 [0x22] = SSE41_OP(pmovsxbq
),
3112 [0x23] = SSE41_OP(pmovsxwd
),
3113 [0x24] = SSE41_OP(pmovsxwq
),
3114 [0x25] = SSE41_OP(pmovsxdq
),
3115 [0x28] = SSE41_OP(pmuldq
),
3116 [0x29] = SSE41_OP(pcmpeqq
),
3117 [0x2a] = SSE41_SPECIAL
, /* movntqda */
3118 [0x2b] = SSE41_OP(packusdw
),
3119 [0x30] = SSE41_OP(pmovzxbw
),
3120 [0x31] = SSE41_OP(pmovzxbd
),
3121 [0x32] = SSE41_OP(pmovzxbq
),
3122 [0x33] = SSE41_OP(pmovzxwd
),
3123 [0x34] = SSE41_OP(pmovzxwq
),
3124 [0x35] = SSE41_OP(pmovzxdq
),
3125 [0x37] = SSE42_OP(pcmpgtq
),
3126 [0x38] = SSE41_OP(pminsb
),
3127 [0x39] = SSE41_OP(pminsd
),
3128 [0x3a] = SSE41_OP(pminuw
),
3129 [0x3b] = SSE41_OP(pminud
),
3130 [0x3c] = SSE41_OP(pmaxsb
),
3131 [0x3d] = SSE41_OP(pmaxsd
),
3132 [0x3e] = SSE41_OP(pmaxuw
),
3133 [0x3f] = SSE41_OP(pmaxud
),
3134 [0x40] = SSE41_OP(pmulld
),
3135 [0x41] = SSE41_OP(phminposuw
),
3138 static const struct SSEOpHelper_eppi sse_op_table7
[256] = {
3139 [0x08] = SSE41_OP(roundps
),
3140 [0x09] = SSE41_OP(roundpd
),
3141 [0x0a] = SSE41_OP(roundss
),
3142 [0x0b] = SSE41_OP(roundsd
),
3143 [0x0c] = SSE41_OP(blendps
),
3144 [0x0d] = SSE41_OP(blendpd
),
3145 [0x0e] = SSE41_OP(pblendw
),
3146 [0x0f] = SSSE3_OP(palignr
),
3147 [0x14] = SSE41_SPECIAL
, /* pextrb */
3148 [0x15] = SSE41_SPECIAL
, /* pextrw */
3149 [0x16] = SSE41_SPECIAL
, /* pextrd/pextrq */
3150 [0x17] = SSE41_SPECIAL
, /* extractps */
3151 [0x20] = SSE41_SPECIAL
, /* pinsrb */
3152 [0x21] = SSE41_SPECIAL
, /* insertps */
3153 [0x22] = SSE41_SPECIAL
, /* pinsrd/pinsrq */
3154 [0x40] = SSE41_OP(dpps
),
3155 [0x41] = SSE41_OP(dppd
),
3156 [0x42] = SSE41_OP(mpsadbw
),
3157 [0x60] = SSE42_OP(pcmpestrm
),
3158 [0x61] = SSE42_OP(pcmpestri
),
3159 [0x62] = SSE42_OP(pcmpistrm
),
3160 [0x63] = SSE42_OP(pcmpistri
),
3163 static void gen_sse(DisasContext
*s
, int b
, target_ulong pc_start
, int rex_r
)
3165 int b1
, op1_offset
, op2_offset
, is_xmm
, val
, ot
;
3166 int modrm
, mod
, rm
, reg
, reg_addr
, offset_addr
;
3167 SSEFunc_0_epp sse_fn_epp
;
3168 SSEFunc_0_eppi sse_fn_eppi
;
3169 SSEFunc_0_ppi sse_fn_ppi
;
3170 SSEFunc_0_eppt sse_fn_eppt
;
3173 if (s
->prefix
& PREFIX_DATA
)
3175 else if (s
->prefix
& PREFIX_REPZ
)
3177 else if (s
->prefix
& PREFIX_REPNZ
)
3181 sse_fn_epp
= sse_op_table1
[b
][b1
];
3185 if ((b
<= 0x5f && b
>= 0x10) || b
== 0xc6 || b
== 0xc2) {
3195 /* simple MMX/SSE operation */
3196 if (s
->flags
& HF_TS_MASK
) {
3197 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
3200 if (s
->flags
& HF_EM_MASK
) {
3202 gen_exception(s
, EXCP06_ILLOP
, pc_start
- s
->cs_base
);
3205 if (is_xmm
&& !(s
->flags
& HF_OSFXSR_MASK
))
3206 if ((b
!= 0x38 && b
!= 0x3a) || (s
->prefix
& PREFIX_DATA
))
3209 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_3DNOW
))
3212 gen_helper_emms(cpu_env
);
3217 gen_helper_emms(cpu_env
);
3220 /* prepare MMX state (XXX: optimize by storing fptt and fptags in
3221 the static cpu state) */
3223 gen_helper_enter_mmx(cpu_env
);
3226 modrm
= ldub_code(s
->pc
++);
3227 reg
= ((modrm
>> 3) & 7);
3230 mod
= (modrm
>> 6) & 3;
3231 if (sse_fn_epp
== SSE_SPECIAL
) {
3234 case 0x0e7: /* movntq */
3237 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3238 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3240 case 0x1e7: /* movntdq */
3241 case 0x02b: /* movntps */
3242 case 0x12b: /* movntps */
3245 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3246 gen_sto_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3248 case 0x3f0: /* lddqu */
3251 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3252 gen_ldo_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3254 case 0x22b: /* movntss */
3255 case 0x32b: /* movntsd */
3258 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3260 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,
3263 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,
3264 xmm_regs
[reg
].XMM_L(0)));
3265 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
3268 case 0x6e: /* movd mm, ea */
3269 #ifdef TARGET_X86_64
3270 if (s
->dflag
== 2) {
3271 gen_ldst_modrm(s
, modrm
, OT_QUAD
, OR_TMP0
, 0);
3272 tcg_gen_st_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3276 gen_ldst_modrm(s
, modrm
, OT_LONG
, OR_TMP0
, 0);
3277 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3278 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3279 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
3280 gen_helper_movl_mm_T0_mmx(cpu_ptr0
, cpu_tmp2_i32
);
3283 case 0x16e: /* movd xmm, ea */
3284 #ifdef TARGET_X86_64
3285 if (s
->dflag
== 2) {
3286 gen_ldst_modrm(s
, modrm
, OT_QUAD
, OR_TMP0
, 0);
3287 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3288 offsetof(CPUX86State
,xmm_regs
[reg
]));
3289 gen_helper_movq_mm_T0_xmm(cpu_ptr0
, cpu_T
[0]);
3293 gen_ldst_modrm(s
, modrm
, OT_LONG
, OR_TMP0
, 0);
3294 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3295 offsetof(CPUX86State
,xmm_regs
[reg
]));
3296 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
3297 gen_helper_movl_mm_T0_xmm(cpu_ptr0
, cpu_tmp2_i32
);
3300 case 0x6f: /* movq mm, ea */
3302 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3303 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3306 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
,
3307 offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3308 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
,
3309 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3312 case 0x010: /* movups */
3313 case 0x110: /* movupd */
3314 case 0x028: /* movaps */
3315 case 0x128: /* movapd */
3316 case 0x16f: /* movdqa xmm, ea */
3317 case 0x26f: /* movdqu xmm, ea */
3319 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3320 gen_ldo_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3322 rm
= (modrm
& 7) | REX_B(s
);
3323 gen_op_movo(offsetof(CPUX86State
,xmm_regs
[reg
]),
3324 offsetof(CPUX86State
,xmm_regs
[rm
]));
3327 case 0x210: /* movss xmm, ea */
3329 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3330 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
3331 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3333 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(1)));
3334 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)));
3335 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)));
3337 rm
= (modrm
& 7) | REX_B(s
);
3338 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)),
3339 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(0)));
3342 case 0x310: /* movsd xmm, ea */
3344 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3345 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3347 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)));
3348 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)));
3350 rm
= (modrm
& 7) | REX_B(s
);
3351 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3352 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3355 case 0x012: /* movlps */
3356 case 0x112: /* movlpd */
3358 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3359 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3362 rm
= (modrm
& 7) | REX_B(s
);
3363 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3364 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(1)));
3367 case 0x212: /* movsldup */
3369 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3370 gen_ldo_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3372 rm
= (modrm
& 7) | REX_B(s
);
3373 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)),
3374 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(0)));
3375 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)),
3376 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(2)));
3378 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(1)),
3379 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3380 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)),
3381 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)));
3383 case 0x312: /* movddup */
3385 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3386 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3388 rm
= (modrm
& 7) | REX_B(s
);
3389 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3390 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3392 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)),
3393 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3395 case 0x016: /* movhps */
3396 case 0x116: /* movhpd */
3398 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3399 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)));
3402 rm
= (modrm
& 7) | REX_B(s
);
3403 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)),
3404 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3407 case 0x216: /* movshdup */
3409 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3410 gen_ldo_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3412 rm
= (modrm
& 7) | REX_B(s
);
3413 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(1)),
3414 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(1)));
3415 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)),
3416 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(3)));
3418 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)),
3419 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(1)));
3420 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)),
3421 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)));
3426 int bit_index
, field_length
;
3428 if (b1
== 1 && reg
!= 0)
3430 field_length
= ldub_code(s
->pc
++) & 0x3F;
3431 bit_index
= ldub_code(s
->pc
++) & 0x3F;
3432 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3433 offsetof(CPUX86State
,xmm_regs
[reg
]));
3435 gen_helper_extrq_i(cpu_env
, cpu_ptr0
,
3436 tcg_const_i32(bit_index
),
3437 tcg_const_i32(field_length
));
3439 gen_helper_insertq_i(cpu_env
, cpu_ptr0
,
3440 tcg_const_i32(bit_index
),
3441 tcg_const_i32(field_length
));
3444 case 0x7e: /* movd ea, mm */
3445 #ifdef TARGET_X86_64
3446 if (s
->dflag
== 2) {
3447 tcg_gen_ld_i64(cpu_T
[0], cpu_env
,
3448 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3449 gen_ldst_modrm(s
, modrm
, OT_QUAD
, OR_TMP0
, 1);
3453 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
,
3454 offsetof(CPUX86State
,fpregs
[reg
].mmx
.MMX_L(0)));
3455 gen_ldst_modrm(s
, modrm
, OT_LONG
, OR_TMP0
, 1);
3458 case 0x17e: /* movd ea, xmm */
3459 #ifdef TARGET_X86_64
3460 if (s
->dflag
== 2) {
3461 tcg_gen_ld_i64(cpu_T
[0], cpu_env
,
3462 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3463 gen_ldst_modrm(s
, modrm
, OT_QUAD
, OR_TMP0
, 1);
3467 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
,
3468 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3469 gen_ldst_modrm(s
, modrm
, OT_LONG
, OR_TMP0
, 1);
3472 case 0x27e: /* movq xmm, ea */
3474 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3475 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3477 rm
= (modrm
& 7) | REX_B(s
);
3478 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3479 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3481 gen_op_movq_env_0(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)));
3483 case 0x7f: /* movq ea, mm */
3485 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3486 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3489 gen_op_movq(offsetof(CPUX86State
,fpregs
[rm
].mmx
),
3490 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3493 case 0x011: /* movups */
3494 case 0x111: /* movupd */
3495 case 0x029: /* movaps */
3496 case 0x129: /* movapd */
3497 case 0x17f: /* movdqa ea, xmm */
3498 case 0x27f: /* movdqu ea, xmm */
3500 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3501 gen_sto_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3503 rm
= (modrm
& 7) | REX_B(s
);
3504 gen_op_movo(offsetof(CPUX86State
,xmm_regs
[rm
]),
3505 offsetof(CPUX86State
,xmm_regs
[reg
]));
3508 case 0x211: /* movss ea, xmm */
3510 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3511 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3512 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
3514 rm
= (modrm
& 7) | REX_B(s
);
3515 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(0)),
3516 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3519 case 0x311: /* movsd ea, xmm */
3521 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3522 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3524 rm
= (modrm
& 7) | REX_B(s
);
3525 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)),
3526 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3529 case 0x013: /* movlps */
3530 case 0x113: /* movlpd */
3532 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3533 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3538 case 0x017: /* movhps */
3539 case 0x117: /* movhpd */
3541 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3542 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)));
3547 case 0x71: /* shift mm, im */
3550 case 0x171: /* shift xmm, im */
3556 val
= ldub_code(s
->pc
++);
3558 gen_op_movl_T0_im(val
);
3559 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_t0
.XMM_L(0)));
3561 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_t0
.XMM_L(1)));
3562 op1_offset
= offsetof(CPUX86State
,xmm_t0
);
3564 gen_op_movl_T0_im(val
);
3565 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,mmx_t0
.MMX_L(0)));
3567 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,mmx_t0
.MMX_L(1)));
3568 op1_offset
= offsetof(CPUX86State
,mmx_t0
);
3570 sse_fn_epp
= sse_op_table2
[((b
- 1) & 3) * 8 +
3571 (((modrm
>> 3)) & 7)][b1
];
3576 rm
= (modrm
& 7) | REX_B(s
);
3577 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3580 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3582 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op2_offset
);
3583 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op1_offset
);
3584 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3586 case 0x050: /* movmskps */
3587 rm
= (modrm
& 7) | REX_B(s
);
3588 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3589 offsetof(CPUX86State
,xmm_regs
[rm
]));
3590 gen_helper_movmskps(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3591 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
3592 gen_op_mov_reg_T0(OT_LONG
, reg
);
3594 case 0x150: /* movmskpd */
3595 rm
= (modrm
& 7) | REX_B(s
);
3596 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3597 offsetof(CPUX86State
,xmm_regs
[rm
]));
3598 gen_helper_movmskpd(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3599 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
3600 gen_op_mov_reg_T0(OT_LONG
, reg
);
3602 case 0x02a: /* cvtpi2ps */
3603 case 0x12a: /* cvtpi2pd */
3604 gen_helper_enter_mmx(cpu_env
);
3606 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3607 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
3608 gen_ldq_env_A0(s
->mem_index
, op2_offset
);
3611 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3613 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3614 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3615 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
3618 gen_helper_cvtpi2ps(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3622 gen_helper_cvtpi2pd(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3626 case 0x22a: /* cvtsi2ss */
3627 case 0x32a: /* cvtsi2sd */
3628 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
3629 gen_ldst_modrm(s
, modrm
, ot
, OR_TMP0
, 0);
3630 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3631 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3632 if (ot
== OT_LONG
) {
3633 SSEFunc_0_epi sse_fn_epi
= sse_op_table3ai
[(b
>> 8) & 1];
3634 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
3635 sse_fn_epi(cpu_env
, cpu_ptr0
, cpu_tmp2_i32
);
3637 #ifdef TARGET_X86_64
3638 SSEFunc_0_epl sse_fn_epl
= sse_op_table3aq
[(b
>> 8) & 1];
3639 sse_fn_epl(cpu_env
, cpu_ptr0
, cpu_T
[0]);
3645 case 0x02c: /* cvttps2pi */
3646 case 0x12c: /* cvttpd2pi */
3647 case 0x02d: /* cvtps2pi */
3648 case 0x12d: /* cvtpd2pi */
3649 gen_helper_enter_mmx(cpu_env
);
3651 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3652 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3653 gen_ldo_env_A0(s
->mem_index
, op2_offset
);
3655 rm
= (modrm
& 7) | REX_B(s
);
3656 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3658 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
& 7].mmx
);
3659 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3660 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
3663 gen_helper_cvttps2pi(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3666 gen_helper_cvttpd2pi(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3669 gen_helper_cvtps2pi(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3672 gen_helper_cvtpd2pi(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3676 case 0x22c: /* cvttss2si */
3677 case 0x32c: /* cvttsd2si */
3678 case 0x22d: /* cvtss2si */
3679 case 0x32d: /* cvtsd2si */
3680 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
3682 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3684 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_t0
.XMM_Q(0)));
3686 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
3687 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_t0
.XMM_L(0)));
3689 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3691 rm
= (modrm
& 7) | REX_B(s
);
3692 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3694 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op2_offset
);
3695 if (ot
== OT_LONG
) {
3696 SSEFunc_i_ep sse_fn_i_ep
=
3697 sse_op_table3bi
[((b
>> 7) & 2) | (b
& 1)];
3698 sse_fn_i_ep(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3699 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
3701 #ifdef TARGET_X86_64
3702 SSEFunc_l_ep sse_fn_l_ep
=
3703 sse_op_table3bq
[((b
>> 7) & 2) | (b
& 1)];
3704 sse_fn_l_ep(cpu_T
[0], cpu_env
, cpu_ptr0
);
3709 gen_op_mov_reg_T0(ot
, reg
);
3711 case 0xc4: /* pinsrw */
3714 gen_ldst_modrm(s
, modrm
, OT_WORD
, OR_TMP0
, 0);
3715 val
= ldub_code(s
->pc
++);
3718 tcg_gen_st16_tl(cpu_T
[0], cpu_env
,
3719 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_W(val
)));
3722 tcg_gen_st16_tl(cpu_T
[0], cpu_env
,
3723 offsetof(CPUX86State
,fpregs
[reg
].mmx
.MMX_W(val
)));
3726 case 0xc5: /* pextrw */
3730 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
3731 val
= ldub_code(s
->pc
++);
3734 rm
= (modrm
& 7) | REX_B(s
);
3735 tcg_gen_ld16u_tl(cpu_T
[0], cpu_env
,
3736 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_W(val
)));
3740 tcg_gen_ld16u_tl(cpu_T
[0], cpu_env
,
3741 offsetof(CPUX86State
,fpregs
[rm
].mmx
.MMX_W(val
)));
3743 reg
= ((modrm
>> 3) & 7) | rex_r
;
3744 gen_op_mov_reg_T0(ot
, reg
);
3746 case 0x1d6: /* movq ea, xmm */
3748 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3749 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3751 rm
= (modrm
& 7) | REX_B(s
);
3752 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)),
3753 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3754 gen_op_movq_env_0(offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(1)));
3757 case 0x2d6: /* movq2dq */
3758 gen_helper_enter_mmx(cpu_env
);
3760 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3761 offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3762 gen_op_movq_env_0(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)));
3764 case 0x3d6: /* movdq2q */
3765 gen_helper_enter_mmx(cpu_env
);
3766 rm
= (modrm
& 7) | REX_B(s
);
3767 gen_op_movq(offsetof(CPUX86State
,fpregs
[reg
& 7].mmx
),
3768 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3770 case 0xd7: /* pmovmskb */
3775 rm
= (modrm
& 7) | REX_B(s
);
3776 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, offsetof(CPUX86State
,xmm_regs
[rm
]));
3777 gen_helper_pmovmskb_xmm(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3780 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3781 gen_helper_pmovmskb_mmx(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3783 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
3784 reg
= ((modrm
>> 3) & 7) | rex_r
;
3785 gen_op_mov_reg_T0(OT_LONG
, reg
);
3788 if (s
->prefix
& PREFIX_REPNZ
)
3792 modrm
= ldub_code(s
->pc
++);
3794 reg
= ((modrm
>> 3) & 7) | rex_r
;
3795 mod
= (modrm
>> 6) & 3;
3800 sse_fn_epp
= sse_op_table6
[b
].op
[b1
];
3804 if (!(s
->cpuid_ext_features
& sse_op_table6
[b
].ext_mask
))
3808 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3810 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
| REX_B(s
)]);
3812 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3813 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3815 case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
3816 case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
3817 case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
3818 gen_ldq_env_A0(s
->mem_index
, op2_offset
+
3819 offsetof(XMMReg
, XMM_Q(0)));
3821 case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
3822 case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
3823 tcg_gen_qemu_ld32u(cpu_tmp0
, cpu_A0
,
3824 (s
->mem_index
>> 2) - 1);
3825 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_tmp0
);
3826 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, op2_offset
+
3827 offsetof(XMMReg
, XMM_L(0)));
3829 case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
3830 tcg_gen_qemu_ld16u(cpu_tmp0
, cpu_A0
,
3831 (s
->mem_index
>> 2) - 1);
3832 tcg_gen_st16_tl(cpu_tmp0
, cpu_env
, op2_offset
+
3833 offsetof(XMMReg
, XMM_W(0)));
3835 case 0x2a: /* movntqda */
3836 gen_ldo_env_A0(s
->mem_index
, op1_offset
);
3839 gen_ldo_env_A0(s
->mem_index
, op2_offset
);
3843 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
3845 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3847 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
3848 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3849 gen_ldq_env_A0(s
->mem_index
, op2_offset
);
3852 if (sse_fn_epp
== SSE_SPECIAL
) {
3856 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3857 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
3858 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3861 s
->cc_op
= CC_OP_EFLAGS
;
3863 case 0x338: /* crc32 */
3866 modrm
= ldub_code(s
->pc
++);
3867 reg
= ((modrm
>> 3) & 7) | rex_r
;
3869 if (b
!= 0xf0 && b
!= 0xf1)
3871 if (!(s
->cpuid_ext_features
& CPUID_EXT_SSE42
))
3876 else if (b
== 0xf1 && s
->dflag
!= 2)
3877 if (s
->prefix
& PREFIX_DATA
)
3884 gen_op_mov_TN_reg(OT_LONG
, 0, reg
);
3885 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
3886 gen_ldst_modrm(s
, modrm
, ot
, OR_TMP0
, 0);
3887 gen_helper_crc32(cpu_T
[0], cpu_tmp2_i32
,
3888 cpu_T
[0], tcg_const_i32(8 << ot
));
3890 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
3891 gen_op_mov_reg_T0(ot
, reg
);
3896 modrm
= ldub_code(s
->pc
++);
3898 reg
= ((modrm
>> 3) & 7) | rex_r
;
3899 mod
= (modrm
>> 6) & 3;
3904 sse_fn_eppi
= sse_op_table7
[b
].op
[b1
];
3908 if (!(s
->cpuid_ext_features
& sse_op_table7
[b
].ext_mask
))
3911 if (sse_fn_eppi
== SSE_SPECIAL
) {
3912 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
3913 rm
= (modrm
& 7) | REX_B(s
);
3915 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3916 reg
= ((modrm
>> 3) & 7) | rex_r
;
3917 val
= ldub_code(s
->pc
++);
3919 case 0x14: /* pextrb */
3920 tcg_gen_ld8u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,
3921 xmm_regs
[reg
].XMM_B(val
& 15)));
3923 gen_op_mov_reg_T0(ot
, rm
);
3925 tcg_gen_qemu_st8(cpu_T
[0], cpu_A0
,
3926 (s
->mem_index
>> 2) - 1);
3928 case 0x15: /* pextrw */
3929 tcg_gen_ld16u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,
3930 xmm_regs
[reg
].XMM_W(val
& 7)));
3932 gen_op_mov_reg_T0(ot
, rm
);
3934 tcg_gen_qemu_st16(cpu_T
[0], cpu_A0
,
3935 (s
->mem_index
>> 2) - 1);
3938 if (ot
== OT_LONG
) { /* pextrd */
3939 tcg_gen_ld_i32(cpu_tmp2_i32
, cpu_env
,
3940 offsetof(CPUX86State
,
3941 xmm_regs
[reg
].XMM_L(val
& 3)));
3942 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
3944 gen_op_mov_reg_v(ot
, rm
, cpu_T
[0]);
3946 tcg_gen_qemu_st32(cpu_T
[0], cpu_A0
,
3947 (s
->mem_index
>> 2) - 1);
3948 } else { /* pextrq */
3949 #ifdef TARGET_X86_64
3950 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
,
3951 offsetof(CPUX86State
,
3952 xmm_regs
[reg
].XMM_Q(val
& 1)));
3954 gen_op_mov_reg_v(ot
, rm
, cpu_tmp1_i64
);
3956 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
,
3957 (s
->mem_index
>> 2) - 1);
3963 case 0x17: /* extractps */
3964 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,
3965 xmm_regs
[reg
].XMM_L(val
& 3)));
3967 gen_op_mov_reg_T0(ot
, rm
);
3969 tcg_gen_qemu_st32(cpu_T
[0], cpu_A0
,
3970 (s
->mem_index
>> 2) - 1);
3972 case 0x20: /* pinsrb */
3974 gen_op_mov_TN_reg(OT_LONG
, 0, rm
);
3976 tcg_gen_qemu_ld8u(cpu_tmp0
, cpu_A0
,
3977 (s
->mem_index
>> 2) - 1);
3978 tcg_gen_st8_tl(cpu_tmp0
, cpu_env
, offsetof(CPUX86State
,
3979 xmm_regs
[reg
].XMM_B(val
& 15)));
3981 case 0x21: /* insertps */
3983 tcg_gen_ld_i32(cpu_tmp2_i32
, cpu_env
,
3984 offsetof(CPUX86State
,xmm_regs
[rm
]
3985 .XMM_L((val
>> 6) & 3)));
3987 tcg_gen_qemu_ld32u(cpu_tmp0
, cpu_A0
,
3988 (s
->mem_index
>> 2) - 1);
3989 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_tmp0
);
3991 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
,
3992 offsetof(CPUX86State
,xmm_regs
[reg
]
3993 .XMM_L((val
>> 4) & 3)));
3995 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3996 cpu_env
, offsetof(CPUX86State
,
3997 xmm_regs
[reg
].XMM_L(0)));
3999 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4000 cpu_env
, offsetof(CPUX86State
,
4001 xmm_regs
[reg
].XMM_L(1)));
4003 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4004 cpu_env
, offsetof(CPUX86State
,
4005 xmm_regs
[reg
].XMM_L(2)));
4007 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4008 cpu_env
, offsetof(CPUX86State
,
4009 xmm_regs
[reg
].XMM_L(3)));
4012 if (ot
== OT_LONG
) { /* pinsrd */
4014 gen_op_mov_v_reg(ot
, cpu_tmp0
, rm
);
4016 tcg_gen_qemu_ld32u(cpu_tmp0
, cpu_A0
,
4017 (s
->mem_index
>> 2) - 1);
4018 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_tmp0
);
4019 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
,
4020 offsetof(CPUX86State
,
4021 xmm_regs
[reg
].XMM_L(val
& 3)));
4022 } else { /* pinsrq */
4023 #ifdef TARGET_X86_64
4025 gen_op_mov_v_reg(ot
, cpu_tmp1_i64
, rm
);
4027 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
,
4028 (s
->mem_index
>> 2) - 1);
4029 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
,
4030 offsetof(CPUX86State
,
4031 xmm_regs
[reg
].XMM_Q(val
& 1)));
4042 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
4044 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
| REX_B(s
)]);
4046 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
4047 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
4048 gen_ldo_env_A0(s
->mem_index
, op2_offset
);
4051 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
4053 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
4055 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
4056 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
4057 gen_ldq_env_A0(s
->mem_index
, op2_offset
);
4060 val
= ldub_code(s
->pc
++);
4062 if ((b
& 0xfc) == 0x60) { /* pcmpXstrX */
4063 s
->cc_op
= CC_OP_EFLAGS
;
4066 /* The helper must use entire 64-bit gp registers */
4070 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4071 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4072 sse_fn_eppi(cpu_env
, cpu_ptr0
, cpu_ptr1
, tcg_const_i32(val
));
4078 /* generic MMX or SSE operation */
4080 case 0x70: /* pshufx insn */
4081 case 0xc6: /* pshufx insn */
4082 case 0xc2: /* compare insns */
4089 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
4091 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
4092 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
4093 if (b1
>= 2 && ((b
>= 0x50 && b
<= 0x5f && b
!= 0x5b) ||
4095 /* specific case for SSE single instructions */
4098 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
4099 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_t0
.XMM_L(0)));
4102 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_t0
.XMM_D(0)));
4105 gen_ldo_env_A0(s
->mem_index
, op2_offset
);
4108 rm
= (modrm
& 7) | REX_B(s
);
4109 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
4112 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
4114 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
4115 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
4116 gen_ldq_env_A0(s
->mem_index
, op2_offset
);
4119 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
4123 case 0x0f: /* 3DNow! data insns */
4124 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_3DNOW
))
4126 val
= ldub_code(s
->pc
++);
4127 sse_fn_epp
= sse_op_table5
[val
];
4131 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4132 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4133 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
4135 case 0x70: /* pshufx insn */
4136 case 0xc6: /* pshufx insn */
4137 val
= ldub_code(s
->pc
++);
4138 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4139 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4140 /* XXX: introduce a new table? */
4141 sse_fn_ppi
= (SSEFunc_0_ppi
)sse_fn_epp
;
4142 sse_fn_ppi(cpu_ptr0
, cpu_ptr1
, tcg_const_i32(val
));
4146 val
= ldub_code(s
->pc
++);
4149 sse_fn_epp
= sse_op_table4
[val
][b1
];
4151 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4152 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4153 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
4156 /* maskmov : we must prepare A0 */
4159 #ifdef TARGET_X86_64
4160 if (s
->aflag
== 2) {
4161 gen_op_movq_A0_reg(R_EDI
);
4165 gen_op_movl_A0_reg(R_EDI
);
4167 gen_op_andl_A0_ffff();
4169 gen_add_A0_ds_seg(s
);
4171 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4172 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4173 /* XXX: introduce a new table? */
4174 sse_fn_eppt
= (SSEFunc_0_eppt
)sse_fn_epp
;
4175 sse_fn_eppt(cpu_env
, cpu_ptr0
, cpu_ptr1
, cpu_A0
);
4178 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4179 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4180 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
4183 if (b
== 0x2e || b
== 0x2f) {
4184 s
->cc_op
= CC_OP_EFLAGS
;
4189 /* convert one instruction. s->is_jmp is set if the translation must
4190 be stopped. Return the next pc value */
4191 static target_ulong
disas_insn(DisasContext
*s
, target_ulong pc_start
)
4193 int b
, prefixes
, aflag
, dflag
;
4195 int modrm
, reg
, rm
, mod
, reg_addr
, op
, opreg
, offset_addr
, val
;
4196 target_ulong next_eip
, tval
;
4199 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
)))
4200 tcg_gen_debug_insn_start(pc_start
);
4208 #ifdef TARGET_X86_64
4213 s
->rip_offset
= 0; /* for relative ip address */
4215 b
= ldub_code(s
->pc
);
4217 /* check prefixes */
4218 #ifdef TARGET_X86_64
4222 prefixes
|= PREFIX_REPZ
;
4225 prefixes
|= PREFIX_REPNZ
;
4228 prefixes
|= PREFIX_LOCK
;
4249 prefixes
|= PREFIX_DATA
;
4252 prefixes
|= PREFIX_ADR
;
4256 rex_w
= (b
>> 3) & 1;
4257 rex_r
= (b
& 0x4) << 1;
4258 s
->rex_x
= (b
& 0x2) << 2;
4259 REX_B(s
) = (b
& 0x1) << 3;
4260 x86_64_hregs
= 1; /* select uniform byte register addressing */
4264 /* 0x66 is ignored if rex.w is set */
4267 if (prefixes
& PREFIX_DATA
)
4270 if (!(prefixes
& PREFIX_ADR
))
4277 prefixes
|= PREFIX_REPZ
;
4280 prefixes
|= PREFIX_REPNZ
;
4283 prefixes
|= PREFIX_LOCK
;
4304 prefixes
|= PREFIX_DATA
;
4307 prefixes
|= PREFIX_ADR
;
4310 if (prefixes
& PREFIX_DATA
)
4312 if (prefixes
& PREFIX_ADR
)
4316 s
->prefix
= prefixes
;
4320 /* lock generation */
4321 if (prefixes
& PREFIX_LOCK
)
4324 /* now check op code */
4328 /**************************/
4329 /* extended op code */
4330 b
= ldub_code(s
->pc
++) | 0x100;
4333 /**************************/
4351 ot
= dflag
+ OT_WORD
;
4354 case 0: /* OP Ev, Gv */
4355 modrm
= ldub_code(s
->pc
++);
4356 reg
= ((modrm
>> 3) & 7) | rex_r
;
4357 mod
= (modrm
>> 6) & 3;
4358 rm
= (modrm
& 7) | REX_B(s
);
4360 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
4362 } else if (op
== OP_XORL
&& rm
== reg
) {
4364 /* xor reg, reg optimisation */
4366 s
->cc_op
= CC_OP_LOGICB
+ ot
;
4367 gen_op_mov_reg_T0(ot
, reg
);
4368 gen_op_update1_cc();
4373 gen_op_mov_TN_reg(ot
, 1, reg
);
4374 gen_op(s
, op
, ot
, opreg
);
4376 case 1: /* OP Gv, Ev */
4377 modrm
= ldub_code(s
->pc
++);
4378 mod
= (modrm
>> 6) & 3;
4379 reg
= ((modrm
>> 3) & 7) | rex_r
;
4380 rm
= (modrm
& 7) | REX_B(s
);
4382 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
4383 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
4384 } else if (op
== OP_XORL
&& rm
== reg
) {
4387 gen_op_mov_TN_reg(ot
, 1, rm
);
4389 gen_op(s
, op
, ot
, reg
);
4391 case 2: /* OP A, Iv */
4392 val
= insn_get(s
, ot
);
4393 gen_op_movl_T1_im(val
);
4394 gen_op(s
, op
, ot
, OR_EAX
);
4403 case 0x80: /* GRP1 */
4412 ot
= dflag
+ OT_WORD
;
4414 modrm
= ldub_code(s
->pc
++);
4415 mod
= (modrm
>> 6) & 3;
4416 rm
= (modrm
& 7) | REX_B(s
);
4417 op
= (modrm
>> 3) & 7;
4423 s
->rip_offset
= insn_const_size(ot
);
4424 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
4435 val
= insn_get(s
, ot
);
4438 val
= (int8_t)insn_get(s
, OT_BYTE
);
4441 gen_op_movl_T1_im(val
);
4442 gen_op(s
, op
, ot
, opreg
);
4446 /**************************/
4447 /* inc, dec, and other misc arith */
4448 case 0x40 ... 0x47: /* inc Gv */
4449 ot
= dflag
? OT_LONG
: OT_WORD
;
4450 gen_inc(s
, ot
, OR_EAX
+ (b
& 7), 1);
4452 case 0x48 ... 0x4f: /* dec Gv */
4453 ot
= dflag
? OT_LONG
: OT_WORD
;
4454 gen_inc(s
, ot
, OR_EAX
+ (b
& 7), -1);
4456 case 0xf6: /* GRP3 */
4461 ot
= dflag
+ OT_WORD
;
4463 modrm
= ldub_code(s
->pc
++);
4464 mod
= (modrm
>> 6) & 3;
4465 rm
= (modrm
& 7) | REX_B(s
);
4466 op
= (modrm
>> 3) & 7;
4469 s
->rip_offset
= insn_const_size(ot
);
4470 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
4471 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
4473 gen_op_mov_TN_reg(ot
, 0, rm
);
4478 val
= insn_get(s
, ot
);
4479 gen_op_movl_T1_im(val
);
4480 gen_op_testl_T0_T1_cc();
4481 s
->cc_op
= CC_OP_LOGICB
+ ot
;
4484 tcg_gen_not_tl(cpu_T
[0], cpu_T
[0]);
4486 gen_op_st_T0_A0(ot
+ s
->mem_index
);
4488 gen_op_mov_reg_T0(ot
, rm
);
4492 tcg_gen_neg_tl(cpu_T
[0], cpu_T
[0]);
4494 gen_op_st_T0_A0(ot
+ s
->mem_index
);
4496 gen_op_mov_reg_T0(ot
, rm
);
4498 gen_op_update_neg_cc();
4499 s
->cc_op
= CC_OP_SUBB
+ ot
;
4504 gen_op_mov_TN_reg(OT_BYTE
, 1, R_EAX
);
4505 tcg_gen_ext8u_tl(cpu_T
[0], cpu_T
[0]);
4506 tcg_gen_ext8u_tl(cpu_T
[1], cpu_T
[1]);
4507 /* XXX: use 32 bit mul which could be faster */
4508 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4509 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
4510 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4511 tcg_gen_andi_tl(cpu_cc_src
, cpu_T
[0], 0xff00);
4512 s
->cc_op
= CC_OP_MULB
;
4515 gen_op_mov_TN_reg(OT_WORD
, 1, R_EAX
);
4516 tcg_gen_ext16u_tl(cpu_T
[0], cpu_T
[0]);
4517 tcg_gen_ext16u_tl(cpu_T
[1], cpu_T
[1]);
4518 /* XXX: use 32 bit mul which could be faster */
4519 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4520 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
4521 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4522 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 16);
4523 gen_op_mov_reg_T0(OT_WORD
, R_EDX
);
4524 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[0]);
4525 s
->cc_op
= CC_OP_MULW
;
4529 #ifdef TARGET_X86_64
4530 gen_op_mov_TN_reg(OT_LONG
, 1, R_EAX
);
4531 tcg_gen_ext32u_tl(cpu_T
[0], cpu_T
[0]);
4532 tcg_gen_ext32u_tl(cpu_T
[1], cpu_T
[1]);
4533 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4534 gen_op_mov_reg_T0(OT_LONG
, R_EAX
);
4535 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4536 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 32);
4537 gen_op_mov_reg_T0(OT_LONG
, R_EDX
);
4538 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[0]);
4542 t0
= tcg_temp_new_i64();
4543 t1
= tcg_temp_new_i64();
4544 gen_op_mov_TN_reg(OT_LONG
, 1, R_EAX
);
4545 tcg_gen_extu_i32_i64(t0
, cpu_T
[0]);
4546 tcg_gen_extu_i32_i64(t1
, cpu_T
[1]);
4547 tcg_gen_mul_i64(t0
, t0
, t1
);
4548 tcg_gen_trunc_i64_i32(cpu_T
[0], t0
);
4549 gen_op_mov_reg_T0(OT_LONG
, R_EAX
);
4550 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4551 tcg_gen_shri_i64(t0
, t0
, 32);
4552 tcg_gen_trunc_i64_i32(cpu_T
[0], t0
);
4553 gen_op_mov_reg_T0(OT_LONG
, R_EDX
);
4554 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[0]);
4557 s
->cc_op
= CC_OP_MULL
;
4559 #ifdef TARGET_X86_64
4561 gen_helper_mulq_EAX_T0(cpu_env
, cpu_T
[0]);
4562 s
->cc_op
= CC_OP_MULQ
;
4570 gen_op_mov_TN_reg(OT_BYTE
, 1, R_EAX
);
4571 tcg_gen_ext8s_tl(cpu_T
[0], cpu_T
[0]);
4572 tcg_gen_ext8s_tl(cpu_T
[1], cpu_T
[1]);
4573 /* XXX: use 32 bit mul which could be faster */
4574 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4575 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
4576 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4577 tcg_gen_ext8s_tl(cpu_tmp0
, cpu_T
[0]);
4578 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
4579 s
->cc_op
= CC_OP_MULB
;
4582 gen_op_mov_TN_reg(OT_WORD
, 1, R_EAX
);
4583 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
4584 tcg_gen_ext16s_tl(cpu_T
[1], cpu_T
[1]);
4585 /* XXX: use 32 bit mul which could be faster */
4586 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4587 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
4588 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4589 tcg_gen_ext16s_tl(cpu_tmp0
, cpu_T
[0]);
4590 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
4591 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 16);
4592 gen_op_mov_reg_T0(OT_WORD
, R_EDX
);
4593 s
->cc_op
= CC_OP_MULW
;
4597 #ifdef TARGET_X86_64
4598 gen_op_mov_TN_reg(OT_LONG
, 1, R_EAX
);
4599 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
4600 tcg_gen_ext32s_tl(cpu_T
[1], cpu_T
[1]);
4601 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4602 gen_op_mov_reg_T0(OT_LONG
, R_EAX
);
4603 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4604 tcg_gen_ext32s_tl(cpu_tmp0
, cpu_T
[0]);
4605 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
4606 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 32);
4607 gen_op_mov_reg_T0(OT_LONG
, R_EDX
);
4611 t0
= tcg_temp_new_i64();
4612 t1
= tcg_temp_new_i64();
4613 gen_op_mov_TN_reg(OT_LONG
, 1, R_EAX
);
4614 tcg_gen_ext_i32_i64(t0
, cpu_T
[0]);
4615 tcg_gen_ext_i32_i64(t1
, cpu_T
[1]);
4616 tcg_gen_mul_i64(t0
, t0
, t1
);
4617 tcg_gen_trunc_i64_i32(cpu_T
[0], t0
);
4618 gen_op_mov_reg_T0(OT_LONG
, R_EAX
);
4619 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4620 tcg_gen_sari_tl(cpu_tmp0
, cpu_T
[0], 31);
4621 tcg_gen_shri_i64(t0
, t0
, 32);
4622 tcg_gen_trunc_i64_i32(cpu_T
[0], t0
);
4623 gen_op_mov_reg_T0(OT_LONG
, R_EDX
);
4624 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
4627 s
->cc_op
= CC_OP_MULL
;
4629 #ifdef TARGET_X86_64
4631 gen_helper_imulq_EAX_T0(cpu_env
, cpu_T
[0]);
4632 s
->cc_op
= CC_OP_MULQ
;
4640 gen_jmp_im(pc_start
- s
->cs_base
);
4641 gen_helper_divb_AL(cpu_env
, cpu_T
[0]);
4644 gen_jmp_im(pc_start
- s
->cs_base
);
4645 gen_helper_divw_AX(cpu_env
, cpu_T
[0]);
4649 gen_jmp_im(pc_start
- s
->cs_base
);
4650 gen_helper_divl_EAX(cpu_env
, cpu_T
[0]);
4652 #ifdef TARGET_X86_64
4654 gen_jmp_im(pc_start
- s
->cs_base
);
4655 gen_helper_divq_EAX(cpu_env
, cpu_T
[0]);
4663 gen_jmp_im(pc_start
- s
->cs_base
);
4664 gen_helper_idivb_AL(cpu_env
, cpu_T
[0]);
4667 gen_jmp_im(pc_start
- s
->cs_base
);
4668 gen_helper_idivw_AX(cpu_env
, cpu_T
[0]);
4672 gen_jmp_im(pc_start
- s
->cs_base
);
4673 gen_helper_idivl_EAX(cpu_env
, cpu_T
[0]);
4675 #ifdef TARGET_X86_64
4677 gen_jmp_im(pc_start
- s
->cs_base
);
4678 gen_helper_idivq_EAX(cpu_env
, cpu_T
[0]);
4688 case 0xfe: /* GRP4 */
4689 case 0xff: /* GRP5 */
4693 ot
= dflag
+ OT_WORD
;
4695 modrm
= ldub_code(s
->pc
++);
4696 mod
= (modrm
>> 6) & 3;
4697 rm
= (modrm
& 7) | REX_B(s
);
4698 op
= (modrm
>> 3) & 7;
4699 if (op
>= 2 && b
== 0xfe) {
4703 if (op
== 2 || op
== 4) {
4704 /* operand size for jumps is 64 bit */
4706 } else if (op
== 3 || op
== 5) {
4707 ot
= dflag
? OT_LONG
+ (rex_w
== 1) : OT_WORD
;
4708 } else if (op
== 6) {
4709 /* default push size is 64 bit */
4710 ot
= dflag
? OT_QUAD
: OT_WORD
;
4714 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
4715 if (op
>= 2 && op
!= 3 && op
!= 5)
4716 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
4718 gen_op_mov_TN_reg(ot
, 0, rm
);
4722 case 0: /* inc Ev */
4727 gen_inc(s
, ot
, opreg
, 1);
4729 case 1: /* dec Ev */
4734 gen_inc(s
, ot
, opreg
, -1);
4736 case 2: /* call Ev */
4737 /* XXX: optimize if memory (no 'and' is necessary) */
4739 gen_op_andl_T0_ffff();
4740 next_eip
= s
->pc
- s
->cs_base
;
4741 gen_movtl_T1_im(next_eip
);
4746 case 3: /* lcall Ev */
4747 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
4748 gen_add_A0_im(s
, 1 << (ot
- OT_WORD
+ 1));
4749 gen_op_ldu_T0_A0(OT_WORD
+ s
->mem_index
);
4751 if (s
->pe
&& !s
->vm86
) {
4752 if (s
->cc_op
!= CC_OP_DYNAMIC
)
4753 gen_op_set_cc_op(s
->cc_op
);
4754 gen_jmp_im(pc_start
- s
->cs_base
);
4755 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
4756 gen_helper_lcall_protected(cpu_tmp2_i32
, cpu_T
[1],
4757 tcg_const_i32(dflag
),
4758 tcg_const_i32(s
->pc
- pc_start
));
4760 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
4761 gen_helper_lcall_real(cpu_tmp2_i32
, cpu_T
[1],
4762 tcg_const_i32(dflag
),
4763 tcg_const_i32(s
->pc
- s
->cs_base
));
4767 case 4: /* jmp Ev */
4769 gen_op_andl_T0_ffff();
4773 case 5: /* ljmp Ev */
4774 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
4775 gen_add_A0_im(s
, 1 << (ot
- OT_WORD
+ 1));
4776 gen_op_ldu_T0_A0(OT_WORD
+ s
->mem_index
);
4778 if (s
->pe
&& !s
->vm86
) {
4779 if (s
->cc_op
!= CC_OP_DYNAMIC
)
4780 gen_op_set_cc_op(s
->cc_op
);
4781 gen_jmp_im(pc_start
- s
->cs_base
);
4782 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
4783 gen_helper_ljmp_protected(cpu_tmp2_i32
, cpu_T
[1],
4784 tcg_const_i32(s
->pc
- pc_start
));
4786 gen_op_movl_seg_T0_vm(R_CS
);
4787 gen_op_movl_T0_T1();
4792 case 6: /* push Ev */
4800 case 0x84: /* test Ev, Gv */
4805 ot
= dflag
+ OT_WORD
;
4807 modrm
= ldub_code(s
->pc
++);
4808 reg
= ((modrm
>> 3) & 7) | rex_r
;
4810 gen_ldst_modrm(s
, modrm
, ot
, OR_TMP0
, 0);
4811 gen_op_mov_TN_reg(ot
, 1, reg
);
4812 gen_op_testl_T0_T1_cc();
4813 s
->cc_op
= CC_OP_LOGICB
+ ot
;
4816 case 0xa8: /* test eAX, Iv */
4821 ot
= dflag
+ OT_WORD
;
4822 val
= insn_get(s
, ot
);
4824 gen_op_mov_TN_reg(ot
, 0, OR_EAX
);
4825 gen_op_movl_T1_im(val
);
4826 gen_op_testl_T0_T1_cc();
4827 s
->cc_op
= CC_OP_LOGICB
+ ot
;
4830 case 0x98: /* CWDE/CBW */
4831 #ifdef TARGET_X86_64
4833 gen_op_mov_TN_reg(OT_LONG
, 0, R_EAX
);
4834 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
4835 gen_op_mov_reg_T0(OT_QUAD
, R_EAX
);
4839 gen_op_mov_TN_reg(OT_WORD
, 0, R_EAX
);
4840 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
4841 gen_op_mov_reg_T0(OT_LONG
, R_EAX
);
4843 gen_op_mov_TN_reg(OT_BYTE
, 0, R_EAX
);
4844 tcg_gen_ext8s_tl(cpu_T
[0], cpu_T
[0]);
4845 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
4848 case 0x99: /* CDQ/CWD */
4849 #ifdef TARGET_X86_64
4851 gen_op_mov_TN_reg(OT_QUAD
, 0, R_EAX
);
4852 tcg_gen_sari_tl(cpu_T
[0], cpu_T
[0], 63);
4853 gen_op_mov_reg_T0(OT_QUAD
, R_EDX
);
4857 gen_op_mov_TN_reg(OT_LONG
, 0, R_EAX
);
4858 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
4859 tcg_gen_sari_tl(cpu_T
[0], cpu_T
[0], 31);
4860 gen_op_mov_reg_T0(OT_LONG
, R_EDX
);
4862 gen_op_mov_TN_reg(OT_WORD
, 0, R_EAX
);
4863 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
4864 tcg_gen_sari_tl(cpu_T
[0], cpu_T
[0], 15);
4865 gen_op_mov_reg_T0(OT_WORD
, R_EDX
);
4868 case 0x1af: /* imul Gv, Ev */
4869 case 0x69: /* imul Gv, Ev, I */
4871 ot
= dflag
+ OT_WORD
;
4872 modrm
= ldub_code(s
->pc
++);
4873 reg
= ((modrm
>> 3) & 7) | rex_r
;
4875 s
->rip_offset
= insn_const_size(ot
);
4878 gen_ldst_modrm(s
, modrm
, ot
, OR_TMP0
, 0);
4880 val
= insn_get(s
, ot
);
4881 gen_op_movl_T1_im(val
);
4882 } else if (b
== 0x6b) {
4883 val
= (int8_t)insn_get(s
, OT_BYTE
);
4884 gen_op_movl_T1_im(val
);
4886 gen_op_mov_TN_reg(ot
, 1, reg
);
4889 #ifdef TARGET_X86_64
4890 if (ot
== OT_QUAD
) {
4891 gen_helper_imulq_T0_T1(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
4894 if (ot
== OT_LONG
) {
4895 #ifdef TARGET_X86_64
4896 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
4897 tcg_gen_ext32s_tl(cpu_T
[1], cpu_T
[1]);
4898 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4899 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4900 tcg_gen_ext32s_tl(cpu_tmp0
, cpu_T
[0]);
4901 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
4905 t0
= tcg_temp_new_i64();
4906 t1
= tcg_temp_new_i64();
4907 tcg_gen_ext_i32_i64(t0
, cpu_T
[0]);
4908 tcg_gen_ext_i32_i64(t1
, cpu_T
[1]);
4909 tcg_gen_mul_i64(t0
, t0
, t1
);
4910 tcg_gen_trunc_i64_i32(cpu_T
[0], t0
);
4911 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4912 tcg_gen_sari_tl(cpu_tmp0
, cpu_T
[0], 31);
4913 tcg_gen_shri_i64(t0
, t0
, 32);
4914 tcg_gen_trunc_i64_i32(cpu_T
[1], t0
);
4915 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[1], cpu_tmp0
);
4919 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
4920 tcg_gen_ext16s_tl(cpu_T
[1], cpu_T
[1]);
4921 /* XXX: use 32 bit mul which could be faster */
4922 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4923 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4924 tcg_gen_ext16s_tl(cpu_tmp0
, cpu_T
[0]);
4925 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
4927 gen_op_mov_reg_T0(ot
, reg
);
4928 s
->cc_op
= CC_OP_MULB
+ ot
;
4931 case 0x1c1: /* xadd Ev, Gv */
4935 ot
= dflag
+ OT_WORD
;
4936 modrm
= ldub_code(s
->pc
++);
4937 reg
= ((modrm
>> 3) & 7) | rex_r
;
4938 mod
= (modrm
>> 6) & 3;
4940 rm
= (modrm
& 7) | REX_B(s
);
4941 gen_op_mov_TN_reg(ot
, 0, reg
);
4942 gen_op_mov_TN_reg(ot
, 1, rm
);
4943 gen_op_addl_T0_T1();
4944 gen_op_mov_reg_T1(ot
, reg
);
4945 gen_op_mov_reg_T0(ot
, rm
);
4947 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
4948 gen_op_mov_TN_reg(ot
, 0, reg
);
4949 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
4950 gen_op_addl_T0_T1();
4951 gen_op_st_T0_A0(ot
+ s
->mem_index
);
4952 gen_op_mov_reg_T1(ot
, reg
);
4954 gen_op_update2_cc();
4955 s
->cc_op
= CC_OP_ADDB
+ ot
;
4958 case 0x1b1: /* cmpxchg Ev, Gv */
4961 TCGv t0
, t1
, t2
, a0
;
4966 ot
= dflag
+ OT_WORD
;
4967 modrm
= ldub_code(s
->pc
++);
4968 reg
= ((modrm
>> 3) & 7) | rex_r
;
4969 mod
= (modrm
>> 6) & 3;
4970 t0
= tcg_temp_local_new();
4971 t1
= tcg_temp_local_new();
4972 t2
= tcg_temp_local_new();
4973 a0
= tcg_temp_local_new();
4974 gen_op_mov_v_reg(ot
, t1
, reg
);
4976 rm
= (modrm
& 7) | REX_B(s
);
4977 gen_op_mov_v_reg(ot
, t0
, rm
);
4979 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
4980 tcg_gen_mov_tl(a0
, cpu_A0
);
4981 gen_op_ld_v(ot
+ s
->mem_index
, t0
, a0
);
4982 rm
= 0; /* avoid warning */
4984 label1
= gen_new_label();
4985 tcg_gen_sub_tl(t2
, cpu_regs
[R_EAX
], t0
);
4987 tcg_gen_brcondi_tl(TCG_COND_EQ
, t2
, 0, label1
);
4988 label2
= gen_new_label();
4990 gen_op_mov_reg_v(ot
, R_EAX
, t0
);
4992 gen_set_label(label1
);
4993 gen_op_mov_reg_v(ot
, rm
, t1
);
4995 /* perform no-op store cycle like physical cpu; must be
4996 before changing accumulator to ensure idempotency if
4997 the store faults and the instruction is restarted */
4998 gen_op_st_v(ot
+ s
->mem_index
, t0
, a0
);
4999 gen_op_mov_reg_v(ot
, R_EAX
, t0
);
5001 gen_set_label(label1
);
5002 gen_op_st_v(ot
+ s
->mem_index
, t1
, a0
);
5004 gen_set_label(label2
);
5005 tcg_gen_mov_tl(cpu_cc_src
, t0
);
5006 tcg_gen_mov_tl(cpu_cc_dst
, t2
);
5007 s
->cc_op
= CC_OP_SUBB
+ ot
;
5014 case 0x1c7: /* cmpxchg8b */
5015 modrm
= ldub_code(s
->pc
++);
5016 mod
= (modrm
>> 6) & 3;
5017 if ((mod
== 3) || ((modrm
& 0x38) != 0x8))
5019 #ifdef TARGET_X86_64
5021 if (!(s
->cpuid_ext_features
& CPUID_EXT_CX16
))
5023 gen_jmp_im(pc_start
- s
->cs_base
);
5024 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5025 gen_op_set_cc_op(s
->cc_op
);
5026 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
5027 gen_helper_cmpxchg16b(cpu_A0
);
5031 if (!(s
->cpuid_features
& CPUID_CX8
))
5033 gen_jmp_im(pc_start
- s
->cs_base
);
5034 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5035 gen_op_set_cc_op(s
->cc_op
);
5036 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
5037 gen_helper_cmpxchg8b(cpu_A0
);
5039 s
->cc_op
= CC_OP_EFLAGS
;
5042 /**************************/
5044 case 0x50 ... 0x57: /* push */
5045 gen_op_mov_TN_reg(OT_LONG
, 0, (b
& 7) | REX_B(s
));
5048 case 0x58 ... 0x5f: /* pop */
5050 ot
= dflag
? OT_QUAD
: OT_WORD
;
5052 ot
= dflag
+ OT_WORD
;
5055 /* NOTE: order is important for pop %sp */
5057 gen_op_mov_reg_T0(ot
, (b
& 7) | REX_B(s
));
5059 case 0x60: /* pusha */
5064 case 0x61: /* popa */
5069 case 0x68: /* push Iv */
5072 ot
= dflag
? OT_QUAD
: OT_WORD
;
5074 ot
= dflag
+ OT_WORD
;
5077 val
= insn_get(s
, ot
);
5079 val
= (int8_t)insn_get(s
, OT_BYTE
);
5080 gen_op_movl_T0_im(val
);
5083 case 0x8f: /* pop Ev */
5085 ot
= dflag
? OT_QUAD
: OT_WORD
;
5087 ot
= dflag
+ OT_WORD
;
5089 modrm
= ldub_code(s
->pc
++);
5090 mod
= (modrm
>> 6) & 3;
5093 /* NOTE: order is important for pop %sp */
5095 rm
= (modrm
& 7) | REX_B(s
);
5096 gen_op_mov_reg_T0(ot
, rm
);
5098 /* NOTE: order is important too for MMU exceptions */
5099 s
->popl_esp_hack
= 1 << ot
;
5100 gen_ldst_modrm(s
, modrm
, ot
, OR_TMP0
, 1);
5101 s
->popl_esp_hack
= 0;
5105 case 0xc8: /* enter */
5108 val
= lduw_code(s
->pc
);
5110 level
= ldub_code(s
->pc
++);
5111 gen_enter(s
, val
, level
);
5114 case 0xc9: /* leave */
5115 /* XXX: exception not precise (ESP is updated before potential exception) */
5117 gen_op_mov_TN_reg(OT_QUAD
, 0, R_EBP
);
5118 gen_op_mov_reg_T0(OT_QUAD
, R_ESP
);
5119 } else if (s
->ss32
) {
5120 gen_op_mov_TN_reg(OT_LONG
, 0, R_EBP
);
5121 gen_op_mov_reg_T0(OT_LONG
, R_ESP
);
5123 gen_op_mov_TN_reg(OT_WORD
, 0, R_EBP
);
5124 gen_op_mov_reg_T0(OT_WORD
, R_ESP
);
5128 ot
= dflag
? OT_QUAD
: OT_WORD
;
5130 ot
= dflag
+ OT_WORD
;
5132 gen_op_mov_reg_T0(ot
, R_EBP
);
5135 case 0x06: /* push es */
5136 case 0x0e: /* push cs */
5137 case 0x16: /* push ss */
5138 case 0x1e: /* push ds */
5141 gen_op_movl_T0_seg(b
>> 3);
5144 case 0x1a0: /* push fs */
5145 case 0x1a8: /* push gs */
5146 gen_op_movl_T0_seg((b
>> 3) & 7);
5149 case 0x07: /* pop es */
5150 case 0x17: /* pop ss */
5151 case 0x1f: /* pop ds */
5156 gen_movl_seg_T0(s
, reg
, pc_start
- s
->cs_base
);
5159 /* if reg == SS, inhibit interrupts/trace. */
5160 /* If several instructions disable interrupts, only the
5162 if (!(s
->tb
->flags
& HF_INHIBIT_IRQ_MASK
))
5163 gen_helper_set_inhibit_irq(cpu_env
);
5167 gen_jmp_im(s
->pc
- s
->cs_base
);
5171 case 0x1a1: /* pop fs */
5172 case 0x1a9: /* pop gs */
5174 gen_movl_seg_T0(s
, (b
>> 3) & 7, pc_start
- s
->cs_base
);
5177 gen_jmp_im(s
->pc
- s
->cs_base
);
5182 /**************************/
5185 case 0x89: /* mov Gv, Ev */
5189 ot
= dflag
+ OT_WORD
;
5190 modrm
= ldub_code(s
->pc
++);
5191 reg
= ((modrm
>> 3) & 7) | rex_r
;
5193 /* generate a generic store */
5194 gen_ldst_modrm(s
, modrm
, ot
, reg
, 1);
5197 case 0xc7: /* mov Ev, Iv */
5201 ot
= dflag
+ OT_WORD
;
5202 modrm
= ldub_code(s
->pc
++);
5203 mod
= (modrm
>> 6) & 3;
5205 s
->rip_offset
= insn_const_size(ot
);
5206 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
5208 val
= insn_get(s
, ot
);
5209 gen_op_movl_T0_im(val
);
5211 gen_op_st_T0_A0(ot
+ s
->mem_index
);
5213 gen_op_mov_reg_T0(ot
, (modrm
& 7) | REX_B(s
));
5216 case 0x8b: /* mov Ev, Gv */
5220 ot
= OT_WORD
+ dflag
;
5221 modrm
= ldub_code(s
->pc
++);
5222 reg
= ((modrm
>> 3) & 7) | rex_r
;
5224 gen_ldst_modrm(s
, modrm
, ot
, OR_TMP0
, 0);
5225 gen_op_mov_reg_T0(ot
, reg
);
5227 case 0x8e: /* mov seg, Gv */
5228 modrm
= ldub_code(s
->pc
++);
5229 reg
= (modrm
>> 3) & 7;
5230 if (reg
>= 6 || reg
== R_CS
)
5232 gen_ldst_modrm(s
, modrm
, OT_WORD
, OR_TMP0
, 0);
5233 gen_movl_seg_T0(s
, reg
, pc_start
- s
->cs_base
);
5235 /* if reg == SS, inhibit interrupts/trace */
5236 /* If several instructions disable interrupts, only the
5238 if (!(s
->tb
->flags
& HF_INHIBIT_IRQ_MASK
))
5239 gen_helper_set_inhibit_irq(cpu_env
);
5243 gen_jmp_im(s
->pc
- s
->cs_base
);
5247 case 0x8c: /* mov Gv, seg */
5248 modrm
= ldub_code(s
->pc
++);
5249 reg
= (modrm
>> 3) & 7;
5250 mod
= (modrm
>> 6) & 3;
5253 gen_op_movl_T0_seg(reg
);
5255 ot
= OT_WORD
+ dflag
;
5258 gen_ldst_modrm(s
, modrm
, ot
, OR_TMP0
, 1);
5261 case 0x1b6: /* movzbS Gv, Eb */
5262 case 0x1b7: /* movzwS Gv, Eb */
5263 case 0x1be: /* movsbS Gv, Eb */
5264 case 0x1bf: /* movswS Gv, Eb */
5267 /* d_ot is the size of destination */
5268 d_ot
= dflag
+ OT_WORD
;
5269 /* ot is the size of source */
5270 ot
= (b
& 1) + OT_BYTE
;
5271 modrm
= ldub_code(s
->pc
++);
5272 reg
= ((modrm
>> 3) & 7) | rex_r
;
5273 mod
= (modrm
>> 6) & 3;
5274 rm
= (modrm
& 7) | REX_B(s
);
5277 gen_op_mov_TN_reg(ot
, 0, rm
);
5278 switch(ot
| (b
& 8)) {
5280 tcg_gen_ext8u_tl(cpu_T
[0], cpu_T
[0]);
5283 tcg_gen_ext8s_tl(cpu_T
[0], cpu_T
[0]);
5286 tcg_gen_ext16u_tl(cpu_T
[0], cpu_T
[0]);
5290 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
5293 gen_op_mov_reg_T0(d_ot
, reg
);
5295 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
5297 gen_op_lds_T0_A0(ot
+ s
->mem_index
);
5299 gen_op_ldu_T0_A0(ot
+ s
->mem_index
);
5301 gen_op_mov_reg_T0(d_ot
, reg
);
5306 case 0x8d: /* lea */
5307 ot
= dflag
+ OT_WORD
;
5308 modrm
= ldub_code(s
->pc
++);
5309 mod
= (modrm
>> 6) & 3;
5312 reg
= ((modrm
>> 3) & 7) | rex_r
;
5313 /* we must ensure that no segment is added */
5317 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
5319 gen_op_mov_reg_A0(ot
- OT_WORD
, reg
);
5322 case 0xa0: /* mov EAX, Ov */
5324 case 0xa2: /* mov Ov, EAX */
5327 target_ulong offset_addr
;
5332 ot
= dflag
+ OT_WORD
;
5333 #ifdef TARGET_X86_64
5334 if (s
->aflag
== 2) {
5335 offset_addr
= ldq_code(s
->pc
);
5337 gen_op_movq_A0_im(offset_addr
);
5342 offset_addr
= insn_get(s
, OT_LONG
);
5344 offset_addr
= insn_get(s
, OT_WORD
);
5346 gen_op_movl_A0_im(offset_addr
);
5348 gen_add_A0_ds_seg(s
);
5350 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
5351 gen_op_mov_reg_T0(ot
, R_EAX
);
5353 gen_op_mov_TN_reg(ot
, 0, R_EAX
);
5354 gen_op_st_T0_A0(ot
+ s
->mem_index
);
5358 case 0xd7: /* xlat */
5359 #ifdef TARGET_X86_64
5360 if (s
->aflag
== 2) {
5361 gen_op_movq_A0_reg(R_EBX
);
5362 gen_op_mov_TN_reg(OT_QUAD
, 0, R_EAX
);
5363 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 0xff);
5364 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_T
[0]);
5368 gen_op_movl_A0_reg(R_EBX
);
5369 gen_op_mov_TN_reg(OT_LONG
, 0, R_EAX
);
5370 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 0xff);
5371 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_T
[0]);
5373 gen_op_andl_A0_ffff();
5375 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffffffff);
5377 gen_add_A0_ds_seg(s
);
5378 gen_op_ldu_T0_A0(OT_BYTE
+ s
->mem_index
);
5379 gen_op_mov_reg_T0(OT_BYTE
, R_EAX
);
5381 case 0xb0 ... 0xb7: /* mov R, Ib */
5382 val
= insn_get(s
, OT_BYTE
);
5383 gen_op_movl_T0_im(val
);
5384 gen_op_mov_reg_T0(OT_BYTE
, (b
& 7) | REX_B(s
));
5386 case 0xb8 ... 0xbf: /* mov R, Iv */
5387 #ifdef TARGET_X86_64
5391 tmp
= ldq_code(s
->pc
);
5393 reg
= (b
& 7) | REX_B(s
);
5394 gen_movtl_T0_im(tmp
);
5395 gen_op_mov_reg_T0(OT_QUAD
, reg
);
5399 ot
= dflag
? OT_LONG
: OT_WORD
;
5400 val
= insn_get(s
, ot
);
5401 reg
= (b
& 7) | REX_B(s
);
5402 gen_op_movl_T0_im(val
);
5403 gen_op_mov_reg_T0(ot
, reg
);
5407 case 0x91 ... 0x97: /* xchg R, EAX */
5409 ot
= dflag
+ OT_WORD
;
5410 reg
= (b
& 7) | REX_B(s
);
5414 case 0x87: /* xchg Ev, Gv */
5418 ot
= dflag
+ OT_WORD
;
5419 modrm
= ldub_code(s
->pc
++);
5420 reg
= ((modrm
>> 3) & 7) | rex_r
;
5421 mod
= (modrm
>> 6) & 3;
5423 rm
= (modrm
& 7) | REX_B(s
);
5425 gen_op_mov_TN_reg(ot
, 0, reg
);
5426 gen_op_mov_TN_reg(ot
, 1, rm
);
5427 gen_op_mov_reg_T0(ot
, rm
);
5428 gen_op_mov_reg_T1(ot
, reg
);
5430 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
5431 gen_op_mov_TN_reg(ot
, 0, reg
);
5432 /* for xchg, lock is implicit */
5433 if (!(prefixes
& PREFIX_LOCK
))
5435 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
5436 gen_op_st_T0_A0(ot
+ s
->mem_index
);
5437 if (!(prefixes
& PREFIX_LOCK
))
5438 gen_helper_unlock();
5439 gen_op_mov_reg_T1(ot
, reg
);
5442 case 0xc4: /* les Gv */
5447 case 0xc5: /* lds Gv */
5452 case 0x1b2: /* lss Gv */
5455 case 0x1b4: /* lfs Gv */
5458 case 0x1b5: /* lgs Gv */
5461 ot
= dflag
? OT_LONG
: OT_WORD
;
5462 modrm
= ldub_code(s
->pc
++);
5463 reg
= ((modrm
>> 3) & 7) | rex_r
;
5464 mod
= (modrm
>> 6) & 3;
5467 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
5468 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
5469 gen_add_A0_im(s
, 1 << (ot
- OT_WORD
+ 1));
5470 /* load the segment first to handle exceptions properly */
5471 gen_op_ldu_T0_A0(OT_WORD
+ s
->mem_index
);
5472 gen_movl_seg_T0(s
, op
, pc_start
- s
->cs_base
);
5473 /* then put the data */
5474 gen_op_mov_reg_T1(ot
, reg
);
5476 gen_jmp_im(s
->pc
- s
->cs_base
);
5481 /************************/
5492 ot
= dflag
+ OT_WORD
;
5494 modrm
= ldub_code(s
->pc
++);
5495 mod
= (modrm
>> 6) & 3;
5496 op
= (modrm
>> 3) & 7;
5502 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
5505 opreg
= (modrm
& 7) | REX_B(s
);
5510 gen_shift(s
, op
, ot
, opreg
, OR_ECX
);
5513 shift
= ldub_code(s
->pc
++);
5515 gen_shifti(s
, op
, ot
, opreg
, shift
);
5530 case 0x1a4: /* shld imm */
5534 case 0x1a5: /* shld cl */
5538 case 0x1ac: /* shrd imm */
5542 case 0x1ad: /* shrd cl */
5546 ot
= dflag
+ OT_WORD
;
5547 modrm
= ldub_code(s
->pc
++);
5548 mod
= (modrm
>> 6) & 3;
5549 rm
= (modrm
& 7) | REX_B(s
);
5550 reg
= ((modrm
>> 3) & 7) | rex_r
;
5552 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
5557 gen_op_mov_TN_reg(ot
, 1, reg
);
5560 val
= ldub_code(s
->pc
++);
5561 tcg_gen_movi_tl(cpu_T3
, val
);
5563 tcg_gen_mov_tl(cpu_T3
, cpu_regs
[R_ECX
]);
5565 gen_shiftd_rm_T1_T3(s
, ot
, opreg
, op
);
5568 /************************/
5571 if (s
->flags
& (HF_EM_MASK
| HF_TS_MASK
)) {
5572 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
5573 /* XXX: what to do if illegal op ? */
5574 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
5577 modrm
= ldub_code(s
->pc
++);
5578 mod
= (modrm
>> 6) & 3;
5580 op
= ((b
& 7) << 3) | ((modrm
>> 3) & 7);
5583 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
5585 case 0x00 ... 0x07: /* fxxxs */
5586 case 0x10 ... 0x17: /* fixxxl */
5587 case 0x20 ... 0x27: /* fxxxl */
5588 case 0x30 ... 0x37: /* fixxx */
5595 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
5596 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5597 gen_helper_flds_FT0(cpu_env
, cpu_tmp2_i32
);
5600 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
5601 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5602 gen_helper_fildl_FT0(cpu_env
, cpu_tmp2_i32
);
5605 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
,
5606 (s
->mem_index
>> 2) - 1);
5607 gen_helper_fldl_FT0(cpu_env
, cpu_tmp1_i64
);
5611 gen_op_lds_T0_A0(OT_WORD
+ s
->mem_index
);
5612 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5613 gen_helper_fildl_FT0(cpu_env
, cpu_tmp2_i32
);
5617 gen_helper_fp_arith_ST0_FT0(op1
);
5619 /* fcomp needs pop */
5620 gen_helper_fpop(cpu_env
);
5624 case 0x08: /* flds */
5625 case 0x0a: /* fsts */
5626 case 0x0b: /* fstps */
5627 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
5628 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
5629 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
5634 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
5635 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5636 gen_helper_flds_ST0(cpu_env
, cpu_tmp2_i32
);
5639 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
5640 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5641 gen_helper_fildl_ST0(cpu_env
, cpu_tmp2_i32
);
5644 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
,
5645 (s
->mem_index
>> 2) - 1);
5646 gen_helper_fldl_ST0(cpu_env
, cpu_tmp1_i64
);
5650 gen_op_lds_T0_A0(OT_WORD
+ s
->mem_index
);
5651 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5652 gen_helper_fildl_ST0(cpu_env
, cpu_tmp2_i32
);
5657 /* XXX: the corresponding CPUID bit must be tested ! */
5660 gen_helper_fisttl_ST0(cpu_tmp2_i32
, cpu_env
);
5661 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5662 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
5665 gen_helper_fisttll_ST0(cpu_tmp1_i64
, cpu_env
);
5666 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
,
5667 (s
->mem_index
>> 2) - 1);
5671 gen_helper_fistt_ST0(cpu_tmp2_i32
, cpu_env
);
5672 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5673 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
5676 gen_helper_fpop(cpu_env
);
5681 gen_helper_fsts_ST0(cpu_tmp2_i32
, cpu_env
);
5682 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5683 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
5686 gen_helper_fistl_ST0(cpu_tmp2_i32
, cpu_env
);
5687 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5688 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
5691 gen_helper_fstl_ST0(cpu_tmp1_i64
, cpu_env
);
5692 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
,
5693 (s
->mem_index
>> 2) - 1);
5697 gen_helper_fist_ST0(cpu_tmp2_i32
, cpu_env
);
5698 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5699 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
5703 gen_helper_fpop(cpu_env
);
5707 case 0x0c: /* fldenv mem */
5708 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5709 gen_op_set_cc_op(s
->cc_op
);
5710 gen_jmp_im(pc_start
- s
->cs_base
);
5711 gen_helper_fldenv(cpu_env
, cpu_A0
, tcg_const_i32(s
->dflag
));
5713 case 0x0d: /* fldcw mem */
5714 gen_op_ld_T0_A0(OT_WORD
+ s
->mem_index
);
5715 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5716 gen_helper_fldcw(cpu_env
, cpu_tmp2_i32
);
5718 case 0x0e: /* fnstenv mem */
5719 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5720 gen_op_set_cc_op(s
->cc_op
);
5721 gen_jmp_im(pc_start
- s
->cs_base
);
5722 gen_helper_fstenv(cpu_env
, cpu_A0
, tcg_const_i32(s
->dflag
));
5724 case 0x0f: /* fnstcw mem */
5725 gen_helper_fnstcw(cpu_tmp2_i32
, cpu_env
);
5726 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5727 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
5729 case 0x1d: /* fldt mem */
5730 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5731 gen_op_set_cc_op(s
->cc_op
);
5732 gen_jmp_im(pc_start
- s
->cs_base
);
5733 gen_helper_fldt_ST0(cpu_env
, cpu_A0
);
5735 case 0x1f: /* fstpt mem */
5736 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5737 gen_op_set_cc_op(s
->cc_op
);
5738 gen_jmp_im(pc_start
- s
->cs_base
);
5739 gen_helper_fstt_ST0(cpu_env
, cpu_A0
);
5740 gen_helper_fpop(cpu_env
);
5742 case 0x2c: /* frstor mem */
5743 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5744 gen_op_set_cc_op(s
->cc_op
);
5745 gen_jmp_im(pc_start
- s
->cs_base
);
5746 gen_helper_frstor(cpu_env
, cpu_A0
, tcg_const_i32(s
->dflag
));
5748 case 0x2e: /* fnsave mem */
5749 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5750 gen_op_set_cc_op(s
->cc_op
);
5751 gen_jmp_im(pc_start
- s
->cs_base
);
5752 gen_helper_fsave(cpu_env
, cpu_A0
, tcg_const_i32(s
->dflag
));
5754 case 0x2f: /* fnstsw mem */
5755 gen_helper_fnstsw(cpu_tmp2_i32
, cpu_env
);
5756 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5757 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
5759 case 0x3c: /* fbld */
5760 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5761 gen_op_set_cc_op(s
->cc_op
);
5762 gen_jmp_im(pc_start
- s
->cs_base
);
5763 gen_helper_fbld_ST0(cpu_env
, cpu_A0
);
5765 case 0x3e: /* fbstp */
5766 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5767 gen_op_set_cc_op(s
->cc_op
);
5768 gen_jmp_im(pc_start
- s
->cs_base
);
5769 gen_helper_fbst_ST0(cpu_env
, cpu_A0
);
5770 gen_helper_fpop(cpu_env
);
5772 case 0x3d: /* fildll */
5773 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
,
5774 (s
->mem_index
>> 2) - 1);
5775 gen_helper_fildll_ST0(cpu_env
, cpu_tmp1_i64
);
5777 case 0x3f: /* fistpll */
5778 gen_helper_fistll_ST0(cpu_tmp1_i64
, cpu_env
);
5779 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
,
5780 (s
->mem_index
>> 2) - 1);
5781 gen_helper_fpop(cpu_env
);
5787 /* register float ops */
5791 case 0x08: /* fld sti */
5792 gen_helper_fpush(cpu_env
);
5793 gen_helper_fmov_ST0_STN(cpu_env
,
5794 tcg_const_i32((opreg
+ 1) & 7));
5796 case 0x09: /* fxchg sti */
5797 case 0x29: /* fxchg4 sti, undocumented op */
5798 case 0x39: /* fxchg7 sti, undocumented op */
5799 gen_helper_fxchg_ST0_STN(cpu_env
, tcg_const_i32(opreg
));
5801 case 0x0a: /* grp d9/2 */
5804 /* check exceptions (FreeBSD FPU probe) */
5805 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5806 gen_op_set_cc_op(s
->cc_op
);
5807 gen_jmp_im(pc_start
- s
->cs_base
);
5808 gen_helper_fwait(cpu_env
);
5814 case 0x0c: /* grp d9/4 */
5817 gen_helper_fchs_ST0(cpu_env
);
5820 gen_helper_fabs_ST0(cpu_env
);
5823 gen_helper_fldz_FT0(cpu_env
);
5824 gen_helper_fcom_ST0_FT0(cpu_env
);
5827 gen_helper_fxam_ST0(cpu_env
);
5833 case 0x0d: /* grp d9/5 */
5837 gen_helper_fpush(cpu_env
);
5838 gen_helper_fld1_ST0(cpu_env
);
5841 gen_helper_fpush(cpu_env
);
5842 gen_helper_fldl2t_ST0(cpu_env
);
5845 gen_helper_fpush(cpu_env
);
5846 gen_helper_fldl2e_ST0(cpu_env
);
5849 gen_helper_fpush(cpu_env
);
5850 gen_helper_fldpi_ST0(cpu_env
);
5853 gen_helper_fpush(cpu_env
);
5854 gen_helper_fldlg2_ST0(cpu_env
);
5857 gen_helper_fpush(cpu_env
);
5858 gen_helper_fldln2_ST0(cpu_env
);
5861 gen_helper_fpush(cpu_env
);
5862 gen_helper_fldz_ST0(cpu_env
);
5869 case 0x0e: /* grp d9/6 */
5872 gen_helper_f2xm1(cpu_env
);
5875 gen_helper_fyl2x(cpu_env
);
5878 gen_helper_fptan(cpu_env
);
5880 case 3: /* fpatan */
5881 gen_helper_fpatan(cpu_env
);
5883 case 4: /* fxtract */
5884 gen_helper_fxtract(cpu_env
);
5886 case 5: /* fprem1 */
5887 gen_helper_fprem1(cpu_env
);
5889 case 6: /* fdecstp */
5890 gen_helper_fdecstp(cpu_env
);
5893 case 7: /* fincstp */
5894 gen_helper_fincstp(cpu_env
);
5898 case 0x0f: /* grp d9/7 */
5901 gen_helper_fprem(cpu_env
);
5903 case 1: /* fyl2xp1 */
5904 gen_helper_fyl2xp1(cpu_env
);
5907 gen_helper_fsqrt(cpu_env
);
5909 case 3: /* fsincos */
5910 gen_helper_fsincos(cpu_env
);
5912 case 5: /* fscale */
5913 gen_helper_fscale(cpu_env
);
5915 case 4: /* frndint */
5916 gen_helper_frndint(cpu_env
);
5919 gen_helper_fsin(cpu_env
);
5923 gen_helper_fcos(cpu_env
);
5927 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
5928 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
5929 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
5935 gen_helper_fp_arith_STN_ST0(op1
, opreg
);
5937 gen_helper_fpop(cpu_env
);
5939 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
5940 gen_helper_fp_arith_ST0_FT0(op1
);
5944 case 0x02: /* fcom */
5945 case 0x22: /* fcom2, undocumented op */
5946 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
5947 gen_helper_fcom_ST0_FT0(cpu_env
);
5949 case 0x03: /* fcomp */
5950 case 0x23: /* fcomp3, undocumented op */
5951 case 0x32: /* fcomp5, undocumented op */
5952 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
5953 gen_helper_fcom_ST0_FT0(cpu_env
);
5954 gen_helper_fpop(cpu_env
);
5956 case 0x15: /* da/5 */
5958 case 1: /* fucompp */
5959 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(1));
5960 gen_helper_fucom_ST0_FT0(cpu_env
);
5961 gen_helper_fpop(cpu_env
);
5962 gen_helper_fpop(cpu_env
);
5970 case 0: /* feni (287 only, just do nop here) */
5972 case 1: /* fdisi (287 only, just do nop here) */
5975 gen_helper_fclex(cpu_env
);
5977 case 3: /* fninit */
5978 gen_helper_fninit(cpu_env
);
5980 case 4: /* fsetpm (287 only, just do nop here) */
5986 case 0x1d: /* fucomi */
5987 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5988 gen_op_set_cc_op(s
->cc_op
);
5989 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
5990 gen_helper_fucomi_ST0_FT0(cpu_env
);
5991 s
->cc_op
= CC_OP_EFLAGS
;
5993 case 0x1e: /* fcomi */
5994 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5995 gen_op_set_cc_op(s
->cc_op
);
5996 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
5997 gen_helper_fcomi_ST0_FT0(cpu_env
);
5998 s
->cc_op
= CC_OP_EFLAGS
;
6000 case 0x28: /* ffree sti */
6001 gen_helper_ffree_STN(cpu_env
, tcg_const_i32(opreg
));
6003 case 0x2a: /* fst sti */
6004 gen_helper_fmov_STN_ST0(cpu_env
, tcg_const_i32(opreg
));
6006 case 0x2b: /* fstp sti */
6007 case 0x0b: /* fstp1 sti, undocumented op */
6008 case 0x3a: /* fstp8 sti, undocumented op */
6009 case 0x3b: /* fstp9 sti, undocumented op */
6010 gen_helper_fmov_STN_ST0(cpu_env
, tcg_const_i32(opreg
));
6011 gen_helper_fpop(cpu_env
);
6013 case 0x2c: /* fucom st(i) */
6014 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6015 gen_helper_fucom_ST0_FT0(cpu_env
);
6017 case 0x2d: /* fucomp st(i) */
6018 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6019 gen_helper_fucom_ST0_FT0(cpu_env
);
6020 gen_helper_fpop(cpu_env
);
6022 case 0x33: /* de/3 */
6024 case 1: /* fcompp */
6025 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(1));
6026 gen_helper_fcom_ST0_FT0(cpu_env
);
6027 gen_helper_fpop(cpu_env
);
6028 gen_helper_fpop(cpu_env
);
6034 case 0x38: /* ffreep sti, undocumented op */
6035 gen_helper_ffree_STN(cpu_env
, tcg_const_i32(opreg
));
6036 gen_helper_fpop(cpu_env
);
6038 case 0x3c: /* df/4 */
6041 gen_helper_fnstsw(cpu_tmp2_i32
, cpu_env
);
6042 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
6043 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
6049 case 0x3d: /* fucomip */
6050 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6051 gen_op_set_cc_op(s
->cc_op
);
6052 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6053 gen_helper_fucomi_ST0_FT0(cpu_env
);
6054 gen_helper_fpop(cpu_env
);
6055 s
->cc_op
= CC_OP_EFLAGS
;
6057 case 0x3e: /* fcomip */
6058 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6059 gen_op_set_cc_op(s
->cc_op
);
6060 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6061 gen_helper_fcomi_ST0_FT0(cpu_env
);
6062 gen_helper_fpop(cpu_env
);
6063 s
->cc_op
= CC_OP_EFLAGS
;
6065 case 0x10 ... 0x13: /* fcmovxx */
6069 static const uint8_t fcmov_cc
[8] = {
6075 op1
= fcmov_cc
[op
& 3] | (((op
>> 3) & 1) ^ 1);
6076 l1
= gen_new_label();
6077 gen_jcc1(s
, s
->cc_op
, op1
, l1
);
6078 gen_helper_fmov_ST0_STN(cpu_env
, tcg_const_i32(opreg
));
6087 /************************/
6090 case 0xa4: /* movsS */
6095 ot
= dflag
+ OT_WORD
;
6097 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6098 gen_repz_movs(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6104 case 0xaa: /* stosS */
6109 ot
= dflag
+ OT_WORD
;
6111 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6112 gen_repz_stos(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6117 case 0xac: /* lodsS */
6122 ot
= dflag
+ OT_WORD
;
6123 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6124 gen_repz_lods(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6129 case 0xae: /* scasS */
6134 ot
= dflag
+ OT_WORD
;
6135 if (prefixes
& PREFIX_REPNZ
) {
6136 gen_repz_scas(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 1);
6137 } else if (prefixes
& PREFIX_REPZ
) {
6138 gen_repz_scas(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 0);
6141 s
->cc_op
= CC_OP_SUBB
+ ot
;
6145 case 0xa6: /* cmpsS */
6150 ot
= dflag
+ OT_WORD
;
6151 if (prefixes
& PREFIX_REPNZ
) {
6152 gen_repz_cmps(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 1);
6153 } else if (prefixes
& PREFIX_REPZ
) {
6154 gen_repz_cmps(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 0);
6157 s
->cc_op
= CC_OP_SUBB
+ ot
;
6160 case 0x6c: /* insS */
6165 ot
= dflag
? OT_LONG
: OT_WORD
;
6166 gen_op_mov_TN_reg(OT_WORD
, 0, R_EDX
);
6167 gen_op_andl_T0_ffff();
6168 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6169 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
) | 4);
6170 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6171 gen_repz_ins(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6175 gen_jmp(s
, s
->pc
- s
->cs_base
);
6179 case 0x6e: /* outsS */
6184 ot
= dflag
? OT_LONG
: OT_WORD
;
6185 gen_op_mov_TN_reg(OT_WORD
, 0, R_EDX
);
6186 gen_op_andl_T0_ffff();
6187 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6188 svm_is_rep(prefixes
) | 4);
6189 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6190 gen_repz_outs(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6194 gen_jmp(s
, s
->pc
- s
->cs_base
);
6199 /************************/
6207 ot
= dflag
? OT_LONG
: OT_WORD
;
6208 val
= ldub_code(s
->pc
++);
6209 gen_op_movl_T0_im(val
);
6210 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6211 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
));
6214 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6215 gen_helper_in_func(ot
, cpu_T
[1], cpu_tmp2_i32
);
6216 gen_op_mov_reg_T1(ot
, R_EAX
);
6219 gen_jmp(s
, s
->pc
- s
->cs_base
);
6227 ot
= dflag
? OT_LONG
: OT_WORD
;
6228 val
= ldub_code(s
->pc
++);
6229 gen_op_movl_T0_im(val
);
6230 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6231 svm_is_rep(prefixes
));
6232 gen_op_mov_TN_reg(ot
, 1, R_EAX
);
6236 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6237 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T
[1]);
6238 gen_helper_out_func(ot
, cpu_tmp2_i32
, cpu_tmp3_i32
);
6241 gen_jmp(s
, s
->pc
- s
->cs_base
);
6249 ot
= dflag
? OT_LONG
: OT_WORD
;
6250 gen_op_mov_TN_reg(OT_WORD
, 0, R_EDX
);
6251 gen_op_andl_T0_ffff();
6252 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6253 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
));
6256 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6257 gen_helper_in_func(ot
, cpu_T
[1], cpu_tmp2_i32
);
6258 gen_op_mov_reg_T1(ot
, R_EAX
);
6261 gen_jmp(s
, s
->pc
- s
->cs_base
);
6269 ot
= dflag
? OT_LONG
: OT_WORD
;
6270 gen_op_mov_TN_reg(OT_WORD
, 0, R_EDX
);
6271 gen_op_andl_T0_ffff();
6272 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6273 svm_is_rep(prefixes
));
6274 gen_op_mov_TN_reg(ot
, 1, R_EAX
);
6278 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6279 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T
[1]);
6280 gen_helper_out_func(ot
, cpu_tmp2_i32
, cpu_tmp3_i32
);
6283 gen_jmp(s
, s
->pc
- s
->cs_base
);
6287 /************************/
6289 case 0xc2: /* ret im */
6290 val
= ldsw_code(s
->pc
);
6293 if (CODE64(s
) && s
->dflag
)
6295 gen_stack_update(s
, val
+ (2 << s
->dflag
));
6297 gen_op_andl_T0_ffff();
6301 case 0xc3: /* ret */
6305 gen_op_andl_T0_ffff();
6309 case 0xca: /* lret im */
6310 val
= ldsw_code(s
->pc
);
6313 if (s
->pe
&& !s
->vm86
) {
6314 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6315 gen_op_set_cc_op(s
->cc_op
);
6316 gen_jmp_im(pc_start
- s
->cs_base
);
6317 gen_helper_lret_protected(tcg_const_i32(s
->dflag
),
6318 tcg_const_i32(val
));
6322 gen_op_ld_T0_A0(1 + s
->dflag
+ s
->mem_index
);
6324 gen_op_andl_T0_ffff();
6325 /* NOTE: keeping EIP updated is not a problem in case of
6329 gen_op_addl_A0_im(2 << s
->dflag
);
6330 gen_op_ld_T0_A0(1 + s
->dflag
+ s
->mem_index
);
6331 gen_op_movl_seg_T0_vm(R_CS
);
6332 /* add stack offset */
6333 gen_stack_update(s
, val
+ (4 << s
->dflag
));
6337 case 0xcb: /* lret */
6340 case 0xcf: /* iret */
6341 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_IRET
);
6344 gen_helper_iret_real(tcg_const_i32(s
->dflag
));
6345 s
->cc_op
= CC_OP_EFLAGS
;
6346 } else if (s
->vm86
) {
6348 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6350 gen_helper_iret_real(tcg_const_i32(s
->dflag
));
6351 s
->cc_op
= CC_OP_EFLAGS
;
6354 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6355 gen_op_set_cc_op(s
->cc_op
);
6356 gen_jmp_im(pc_start
- s
->cs_base
);
6357 gen_helper_iret_protected(tcg_const_i32(s
->dflag
),
6358 tcg_const_i32(s
->pc
- s
->cs_base
));
6359 s
->cc_op
= CC_OP_EFLAGS
;
6363 case 0xe8: /* call im */
6366 tval
= (int32_t)insn_get(s
, OT_LONG
);
6368 tval
= (int16_t)insn_get(s
, OT_WORD
);
6369 next_eip
= s
->pc
- s
->cs_base
;
6375 gen_movtl_T0_im(next_eip
);
6380 case 0x9a: /* lcall im */
6382 unsigned int selector
, offset
;
6386 ot
= dflag
? OT_LONG
: OT_WORD
;
6387 offset
= insn_get(s
, ot
);
6388 selector
= insn_get(s
, OT_WORD
);
6390 gen_op_movl_T0_im(selector
);
6391 gen_op_movl_T1_imu(offset
);
6394 case 0xe9: /* jmp im */
6396 tval
= (int32_t)insn_get(s
, OT_LONG
);
6398 tval
= (int16_t)insn_get(s
, OT_WORD
);
6399 tval
+= s
->pc
- s
->cs_base
;
6406 case 0xea: /* ljmp im */
6408 unsigned int selector
, offset
;
6412 ot
= dflag
? OT_LONG
: OT_WORD
;
6413 offset
= insn_get(s
, ot
);
6414 selector
= insn_get(s
, OT_WORD
);
6416 gen_op_movl_T0_im(selector
);
6417 gen_op_movl_T1_imu(offset
);
6420 case 0xeb: /* jmp Jb */
6421 tval
= (int8_t)insn_get(s
, OT_BYTE
);
6422 tval
+= s
->pc
- s
->cs_base
;
6427 case 0x70 ... 0x7f: /* jcc Jb */
6428 tval
= (int8_t)insn_get(s
, OT_BYTE
);
6430 case 0x180 ... 0x18f: /* jcc Jv */
6432 tval
= (int32_t)insn_get(s
, OT_LONG
);
6434 tval
= (int16_t)insn_get(s
, OT_WORD
);
6437 next_eip
= s
->pc
- s
->cs_base
;
6441 gen_jcc(s
, b
, tval
, next_eip
);
6444 case 0x190 ... 0x19f: /* setcc Gv */
6445 modrm
= ldub_code(s
->pc
++);
6447 gen_ldst_modrm(s
, modrm
, OT_BYTE
, OR_TMP0
, 1);
6449 case 0x140 ... 0x14f: /* cmov Gv, Ev */
6454 ot
= dflag
+ OT_WORD
;
6455 modrm
= ldub_code(s
->pc
++);
6456 reg
= ((modrm
>> 3) & 7) | rex_r
;
6457 mod
= (modrm
>> 6) & 3;
6458 t0
= tcg_temp_local_new();
6460 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
6461 gen_op_ld_v(ot
+ s
->mem_index
, t0
, cpu_A0
);
6463 rm
= (modrm
& 7) | REX_B(s
);
6464 gen_op_mov_v_reg(ot
, t0
, rm
);
6466 #ifdef TARGET_X86_64
6467 if (ot
== OT_LONG
) {
6468 /* XXX: specific Intel behaviour ? */
6469 l1
= gen_new_label();
6470 gen_jcc1(s
, s
->cc_op
, b
^ 1, l1
);
6471 tcg_gen_mov_tl(cpu_regs
[reg
], t0
);
6473 tcg_gen_ext32u_tl(cpu_regs
[reg
], cpu_regs
[reg
]);
6477 l1
= gen_new_label();
6478 gen_jcc1(s
, s
->cc_op
, b
^ 1, l1
);
6479 gen_op_mov_reg_v(ot
, reg
, t0
);
6486 /************************/
6488 case 0x9c: /* pushf */
6489 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_PUSHF
);
6490 if (s
->vm86
&& s
->iopl
!= 3) {
6491 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6493 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6494 gen_op_set_cc_op(s
->cc_op
);
6495 gen_helper_read_eflags(cpu_T
[0], cpu_env
);
6499 case 0x9d: /* popf */
6500 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_POPF
);
6501 if (s
->vm86
&& s
->iopl
!= 3) {
6502 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6507 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6508 tcg_const_i32((TF_MASK
| AC_MASK
|
6513 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6514 tcg_const_i32((TF_MASK
| AC_MASK
|
6516 IF_MASK
| IOPL_MASK
)
6520 if (s
->cpl
<= s
->iopl
) {
6522 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6523 tcg_const_i32((TF_MASK
|
6529 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6530 tcg_const_i32((TF_MASK
|
6539 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6540 tcg_const_i32((TF_MASK
| AC_MASK
|
6541 ID_MASK
| NT_MASK
)));
6543 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6544 tcg_const_i32((TF_MASK
| AC_MASK
|
6551 s
->cc_op
= CC_OP_EFLAGS
;
6552 /* abort translation because TF flag may change */
6553 gen_jmp_im(s
->pc
- s
->cs_base
);
6557 case 0x9e: /* sahf */
6558 if (CODE64(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_LAHF_LM
))
6560 gen_op_mov_TN_reg(OT_BYTE
, 0, R_AH
);
6561 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6562 gen_op_set_cc_op(s
->cc_op
);
6563 gen_compute_eflags(cpu_cc_src
);
6564 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, CC_O
);
6565 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
6566 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, cpu_T
[0]);
6567 s
->cc_op
= CC_OP_EFLAGS
;
6569 case 0x9f: /* lahf */
6570 if (CODE64(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_LAHF_LM
))
6572 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6573 gen_op_set_cc_op(s
->cc_op
);
6574 gen_compute_eflags(cpu_T
[0]);
6575 /* Note: gen_compute_eflags() only gives the condition codes */
6576 tcg_gen_ori_tl(cpu_T
[0], cpu_T
[0], 0x02);
6577 gen_op_mov_reg_T0(OT_BYTE
, R_AH
);
6579 case 0xf5: /* cmc */
6580 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6581 gen_op_set_cc_op(s
->cc_op
);
6582 gen_compute_eflags(cpu_cc_src
);
6583 tcg_gen_xori_tl(cpu_cc_src
, cpu_cc_src
, CC_C
);
6584 s
->cc_op
= CC_OP_EFLAGS
;
6586 case 0xf8: /* clc */
6587 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6588 gen_op_set_cc_op(s
->cc_op
);
6589 gen_compute_eflags(cpu_cc_src
);
6590 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~CC_C
);
6591 s
->cc_op
= CC_OP_EFLAGS
;
6593 case 0xf9: /* stc */
6594 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6595 gen_op_set_cc_op(s
->cc_op
);
6596 gen_compute_eflags(cpu_cc_src
);
6597 tcg_gen_ori_tl(cpu_cc_src
, cpu_cc_src
, CC_C
);
6598 s
->cc_op
= CC_OP_EFLAGS
;
6600 case 0xfc: /* cld */
6601 tcg_gen_movi_i32(cpu_tmp2_i32
, 1);
6602 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, offsetof(CPUX86State
, df
));
6604 case 0xfd: /* std */
6605 tcg_gen_movi_i32(cpu_tmp2_i32
, -1);
6606 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, offsetof(CPUX86State
, df
));
6609 /************************/
6610 /* bit operations */
6611 case 0x1ba: /* bt/bts/btr/btc Gv, im */
6612 ot
= dflag
+ OT_WORD
;
6613 modrm
= ldub_code(s
->pc
++);
6614 op
= (modrm
>> 3) & 7;
6615 mod
= (modrm
>> 6) & 3;
6616 rm
= (modrm
& 7) | REX_B(s
);
6619 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
6620 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
6622 gen_op_mov_TN_reg(ot
, 0, rm
);
6625 val
= ldub_code(s
->pc
++);
6626 gen_op_movl_T1_im(val
);
6631 case 0x1a3: /* bt Gv, Ev */
6634 case 0x1ab: /* bts */
6637 case 0x1b3: /* btr */
6640 case 0x1bb: /* btc */
6643 ot
= dflag
+ OT_WORD
;
6644 modrm
= ldub_code(s
->pc
++);
6645 reg
= ((modrm
>> 3) & 7) | rex_r
;
6646 mod
= (modrm
>> 6) & 3;
6647 rm
= (modrm
& 7) | REX_B(s
);
6648 gen_op_mov_TN_reg(OT_LONG
, 1, reg
);
6650 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
6651 /* specific case: we need to add a displacement */
6652 gen_exts(ot
, cpu_T
[1]);
6653 tcg_gen_sari_tl(cpu_tmp0
, cpu_T
[1], 3 + ot
);
6654 tcg_gen_shli_tl(cpu_tmp0
, cpu_tmp0
, ot
);
6655 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
6656 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
6658 gen_op_mov_TN_reg(ot
, 0, rm
);
6661 tcg_gen_andi_tl(cpu_T
[1], cpu_T
[1], (1 << (3 + ot
)) - 1);
6664 tcg_gen_shr_tl(cpu_cc_src
, cpu_T
[0], cpu_T
[1]);
6665 tcg_gen_movi_tl(cpu_cc_dst
, 0);
6668 tcg_gen_shr_tl(cpu_tmp4
, cpu_T
[0], cpu_T
[1]);
6669 tcg_gen_movi_tl(cpu_tmp0
, 1);
6670 tcg_gen_shl_tl(cpu_tmp0
, cpu_tmp0
, cpu_T
[1]);
6671 tcg_gen_or_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
6674 tcg_gen_shr_tl(cpu_tmp4
, cpu_T
[0], cpu_T
[1]);
6675 tcg_gen_movi_tl(cpu_tmp0
, 1);
6676 tcg_gen_shl_tl(cpu_tmp0
, cpu_tmp0
, cpu_T
[1]);
6677 tcg_gen_not_tl(cpu_tmp0
, cpu_tmp0
);
6678 tcg_gen_and_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
6682 tcg_gen_shr_tl(cpu_tmp4
, cpu_T
[0], cpu_T
[1]);
6683 tcg_gen_movi_tl(cpu_tmp0
, 1);
6684 tcg_gen_shl_tl(cpu_tmp0
, cpu_tmp0
, cpu_T
[1]);
6685 tcg_gen_xor_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
6688 s
->cc_op
= CC_OP_SARB
+ ot
;
6691 gen_op_st_T0_A0(ot
+ s
->mem_index
);
6693 gen_op_mov_reg_T0(ot
, rm
);
6694 tcg_gen_mov_tl(cpu_cc_src
, cpu_tmp4
);
6695 tcg_gen_movi_tl(cpu_cc_dst
, 0);
6698 case 0x1bc: /* bsf */
6699 case 0x1bd: /* bsr */
6704 ot
= dflag
+ OT_WORD
;
6705 modrm
= ldub_code(s
->pc
++);
6706 reg
= ((modrm
>> 3) & 7) | rex_r
;
6707 gen_ldst_modrm(s
,modrm
, ot
, OR_TMP0
, 0);
6708 gen_extu(ot
, cpu_T
[0]);
6709 t0
= tcg_temp_local_new();
6710 tcg_gen_mov_tl(t0
, cpu_T
[0]);
6711 if ((b
& 1) && (prefixes
& PREFIX_REPZ
) &&
6712 (s
->cpuid_ext3_features
& CPUID_EXT3_ABM
)) {
6714 case OT_WORD
: gen_helper_lzcnt(cpu_T
[0], t0
,
6715 tcg_const_i32(16)); break;
6716 case OT_LONG
: gen_helper_lzcnt(cpu_T
[0], t0
,
6717 tcg_const_i32(32)); break;
6718 case OT_QUAD
: gen_helper_lzcnt(cpu_T
[0], t0
,
6719 tcg_const_i32(64)); break;
6721 gen_op_mov_reg_T0(ot
, reg
);
6723 label1
= gen_new_label();
6724 tcg_gen_movi_tl(cpu_cc_dst
, 0);
6725 tcg_gen_brcondi_tl(TCG_COND_EQ
, t0
, 0, label1
);
6727 gen_helper_bsr(cpu_T
[0], t0
);
6729 gen_helper_bsf(cpu_T
[0], t0
);
6731 gen_op_mov_reg_T0(ot
, reg
);
6732 tcg_gen_movi_tl(cpu_cc_dst
, 1);
6733 gen_set_label(label1
);
6734 tcg_gen_discard_tl(cpu_cc_src
);
6735 s
->cc_op
= CC_OP_LOGICB
+ ot
;
6740 /************************/
6742 case 0x27: /* daa */
6745 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6746 gen_op_set_cc_op(s
->cc_op
);
6747 gen_helper_daa(cpu_env
);
6748 s
->cc_op
= CC_OP_EFLAGS
;
6750 case 0x2f: /* das */
6753 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6754 gen_op_set_cc_op(s
->cc_op
);
6755 gen_helper_das(cpu_env
);
6756 s
->cc_op
= CC_OP_EFLAGS
;
6758 case 0x37: /* aaa */
6761 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6762 gen_op_set_cc_op(s
->cc_op
);
6763 gen_helper_aaa(cpu_env
);
6764 s
->cc_op
= CC_OP_EFLAGS
;
6766 case 0x3f: /* aas */
6769 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6770 gen_op_set_cc_op(s
->cc_op
);
6771 gen_helper_aas(cpu_env
);
6772 s
->cc_op
= CC_OP_EFLAGS
;
6774 case 0xd4: /* aam */
6777 val
= ldub_code(s
->pc
++);
6779 gen_exception(s
, EXCP00_DIVZ
, pc_start
- s
->cs_base
);
6781 gen_helper_aam(cpu_env
, tcg_const_i32(val
));
6782 s
->cc_op
= CC_OP_LOGICB
;
6785 case 0xd5: /* aad */
6788 val
= ldub_code(s
->pc
++);
6789 gen_helper_aad(cpu_env
, tcg_const_i32(val
));
6790 s
->cc_op
= CC_OP_LOGICB
;
6792 /************************/
6794 case 0x90: /* nop */
6795 /* XXX: correct lock test for all insn */
6796 if (prefixes
& PREFIX_LOCK
) {
6799 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
6801 goto do_xchg_reg_eax
;
6803 if (prefixes
& PREFIX_REPZ
) {
6804 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_PAUSE
);
6807 case 0x9b: /* fwait */
6808 if ((s
->flags
& (HF_MP_MASK
| HF_TS_MASK
)) ==
6809 (HF_MP_MASK
| HF_TS_MASK
)) {
6810 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
6812 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6813 gen_op_set_cc_op(s
->cc_op
);
6814 gen_jmp_im(pc_start
- s
->cs_base
);
6815 gen_helper_fwait(cpu_env
);
6818 case 0xcc: /* int3 */
6819 gen_interrupt(s
, EXCP03_INT3
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6821 case 0xcd: /* int N */
6822 val
= ldub_code(s
->pc
++);
6823 if (s
->vm86
&& s
->iopl
!= 3) {
6824 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6826 gen_interrupt(s
, val
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6829 case 0xce: /* into */
6832 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6833 gen_op_set_cc_op(s
->cc_op
);
6834 gen_jmp_im(pc_start
- s
->cs_base
);
6835 gen_helper_into(tcg_const_i32(s
->pc
- pc_start
));
6838 case 0xf1: /* icebp (undocumented, exits to external debugger) */
6839 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_ICEBP
);
6841 gen_debug(s
, pc_start
- s
->cs_base
);
6844 tb_flush(cpu_single_env
);
6845 cpu_set_log(CPU_LOG_INT
| CPU_LOG_TB_IN_ASM
);
6849 case 0xfa: /* cli */
6851 if (s
->cpl
<= s
->iopl
) {
6852 gen_helper_cli(cpu_env
);
6854 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6858 gen_helper_cli(cpu_env
);
6860 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6864 case 0xfb: /* sti */
6866 if (s
->cpl
<= s
->iopl
) {
6868 gen_helper_sti(cpu_env
);
6869 /* interruptions are enabled only the first insn after sti */
6870 /* If several instructions disable interrupts, only the
6872 if (!(s
->tb
->flags
& HF_INHIBIT_IRQ_MASK
))
6873 gen_helper_set_inhibit_irq(cpu_env
);
6874 /* give a chance to handle pending irqs */
6875 gen_jmp_im(s
->pc
- s
->cs_base
);
6878 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6884 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6888 case 0x62: /* bound */
6891 ot
= dflag
? OT_LONG
: OT_WORD
;
6892 modrm
= ldub_code(s
->pc
++);
6893 reg
= (modrm
>> 3) & 7;
6894 mod
= (modrm
>> 6) & 3;
6897 gen_op_mov_TN_reg(ot
, 0, reg
);
6898 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
6899 gen_jmp_im(pc_start
- s
->cs_base
);
6900 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6902 gen_helper_boundw(cpu_A0
, cpu_tmp2_i32
);
6904 gen_helper_boundl(cpu_A0
, cpu_tmp2_i32
);
6906 case 0x1c8 ... 0x1cf: /* bswap reg */
6907 reg
= (b
& 7) | REX_B(s
);
6908 #ifdef TARGET_X86_64
6910 gen_op_mov_TN_reg(OT_QUAD
, 0, reg
);
6911 tcg_gen_bswap64_i64(cpu_T
[0], cpu_T
[0]);
6912 gen_op_mov_reg_T0(OT_QUAD
, reg
);
6916 gen_op_mov_TN_reg(OT_LONG
, 0, reg
);
6917 tcg_gen_ext32u_tl(cpu_T
[0], cpu_T
[0]);
6918 tcg_gen_bswap32_tl(cpu_T
[0], cpu_T
[0]);
6919 gen_op_mov_reg_T0(OT_LONG
, reg
);
6922 case 0xd6: /* salc */
6925 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6926 gen_op_set_cc_op(s
->cc_op
);
6927 gen_compute_eflags_c(cpu_T
[0]);
6928 tcg_gen_neg_tl(cpu_T
[0], cpu_T
[0]);
6929 gen_op_mov_reg_T0(OT_BYTE
, R_EAX
);
6931 case 0xe0: /* loopnz */
6932 case 0xe1: /* loopz */
6933 case 0xe2: /* loop */
6934 case 0xe3: /* jecxz */
6938 tval
= (int8_t)insn_get(s
, OT_BYTE
);
6939 next_eip
= s
->pc
- s
->cs_base
;
6944 l1
= gen_new_label();
6945 l2
= gen_new_label();
6946 l3
= gen_new_label();
6949 case 0: /* loopnz */
6951 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6952 gen_op_set_cc_op(s
->cc_op
);
6953 gen_op_add_reg_im(s
->aflag
, R_ECX
, -1);
6954 gen_op_jz_ecx(s
->aflag
, l3
);
6955 gen_compute_eflags(cpu_tmp0
);
6956 tcg_gen_andi_tl(cpu_tmp0
, cpu_tmp0
, CC_Z
);
6958 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_tmp0
, 0, l1
);
6960 tcg_gen_brcondi_tl(TCG_COND_NE
, cpu_tmp0
, 0, l1
);
6964 gen_op_add_reg_im(s
->aflag
, R_ECX
, -1);
6965 gen_op_jnz_ecx(s
->aflag
, l1
);
6969 gen_op_jz_ecx(s
->aflag
, l1
);
6974 gen_jmp_im(next_eip
);
6983 case 0x130: /* wrmsr */
6984 case 0x132: /* rdmsr */
6986 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6988 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6989 gen_op_set_cc_op(s
->cc_op
);
6990 gen_jmp_im(pc_start
- s
->cs_base
);
6998 case 0x131: /* rdtsc */
6999 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7000 gen_op_set_cc_op(s
->cc_op
);
7001 gen_jmp_im(pc_start
- s
->cs_base
);
7007 gen_jmp(s
, s
->pc
- s
->cs_base
);
7010 case 0x133: /* rdpmc */
7011 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7012 gen_op_set_cc_op(s
->cc_op
);
7013 gen_jmp_im(pc_start
- s
->cs_base
);
7016 case 0x134: /* sysenter */
7017 /* For Intel SYSENTER is valid on 64-bit */
7018 if (CODE64(s
) && cpu_single_env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
)
7021 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7023 gen_update_cc_op(s
);
7024 gen_jmp_im(pc_start
- s
->cs_base
);
7025 gen_helper_sysenter();
7029 case 0x135: /* sysexit */
7030 /* For Intel SYSEXIT is valid on 64-bit */
7031 if (CODE64(s
) && cpu_single_env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
)
7034 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7036 gen_update_cc_op(s
);
7037 gen_jmp_im(pc_start
- s
->cs_base
);
7038 gen_helper_sysexit(tcg_const_i32(dflag
));
7042 #ifdef TARGET_X86_64
7043 case 0x105: /* syscall */
7044 /* XXX: is it usable in real mode ? */
7045 gen_update_cc_op(s
);
7046 gen_jmp_im(pc_start
- s
->cs_base
);
7047 gen_helper_syscall(tcg_const_i32(s
->pc
- pc_start
));
7050 case 0x107: /* sysret */
7052 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7054 gen_update_cc_op(s
);
7055 gen_jmp_im(pc_start
- s
->cs_base
);
7056 gen_helper_sysret(tcg_const_i32(s
->dflag
));
7057 /* condition codes are modified only in long mode */
7059 s
->cc_op
= CC_OP_EFLAGS
;
7064 case 0x1a2: /* cpuid */
7065 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7066 gen_op_set_cc_op(s
->cc_op
);
7067 gen_jmp_im(pc_start
- s
->cs_base
);
7070 case 0xf4: /* hlt */
7072 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7074 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7075 gen_op_set_cc_op(s
->cc_op
);
7076 gen_jmp_im(pc_start
- s
->cs_base
);
7077 gen_helper_hlt(tcg_const_i32(s
->pc
- pc_start
));
7078 s
->is_jmp
= DISAS_TB_JUMP
;
7082 modrm
= ldub_code(s
->pc
++);
7083 mod
= (modrm
>> 6) & 3;
7084 op
= (modrm
>> 3) & 7;
7087 if (!s
->pe
|| s
->vm86
)
7089 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_LDTR_READ
);
7090 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,ldt
.selector
));
7094 gen_ldst_modrm(s
, modrm
, ot
, OR_TMP0
, 1);
7097 if (!s
->pe
|| s
->vm86
)
7100 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7102 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_LDTR_WRITE
);
7103 gen_ldst_modrm(s
, modrm
, OT_WORD
, OR_TMP0
, 0);
7104 gen_jmp_im(pc_start
- s
->cs_base
);
7105 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
7106 gen_helper_lldt(cpu_tmp2_i32
);
7110 if (!s
->pe
|| s
->vm86
)
7112 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_TR_READ
);
7113 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,tr
.selector
));
7117 gen_ldst_modrm(s
, modrm
, ot
, OR_TMP0
, 1);
7120 if (!s
->pe
|| s
->vm86
)
7123 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7125 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_TR_WRITE
);
7126 gen_ldst_modrm(s
, modrm
, OT_WORD
, OR_TMP0
, 0);
7127 gen_jmp_im(pc_start
- s
->cs_base
);
7128 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
7129 gen_helper_ltr(cpu_tmp2_i32
);
7134 if (!s
->pe
|| s
->vm86
)
7136 gen_ldst_modrm(s
, modrm
, OT_WORD
, OR_TMP0
, 0);
7137 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7138 gen_op_set_cc_op(s
->cc_op
);
7140 gen_helper_verr(cpu_T
[0]);
7142 gen_helper_verw(cpu_T
[0]);
7143 s
->cc_op
= CC_OP_EFLAGS
;
7150 modrm
= ldub_code(s
->pc
++);
7151 mod
= (modrm
>> 6) & 3;
7152 op
= (modrm
>> 3) & 7;
7158 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_GDTR_READ
);
7159 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
7160 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, gdt
.limit
));
7161 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
7162 gen_add_A0_im(s
, 2);
7163 tcg_gen_ld_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, gdt
.base
));
7165 gen_op_andl_T0_im(0xffffff);
7166 gen_op_st_T0_A0(CODE64(s
) + OT_LONG
+ s
->mem_index
);
7171 case 0: /* monitor */
7172 if (!(s
->cpuid_ext_features
& CPUID_EXT_MONITOR
) ||
7175 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7176 gen_op_set_cc_op(s
->cc_op
);
7177 gen_jmp_im(pc_start
- s
->cs_base
);
7178 #ifdef TARGET_X86_64
7179 if (s
->aflag
== 2) {
7180 gen_op_movq_A0_reg(R_EAX
);
7184 gen_op_movl_A0_reg(R_EAX
);
7186 gen_op_andl_A0_ffff();
7188 gen_add_A0_ds_seg(s
);
7189 gen_helper_monitor(cpu_A0
);
7192 if (!(s
->cpuid_ext_features
& CPUID_EXT_MONITOR
) ||
7195 gen_update_cc_op(s
);
7196 gen_jmp_im(pc_start
- s
->cs_base
);
7197 gen_helper_mwait(tcg_const_i32(s
->pc
- pc_start
));
7204 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_IDTR_READ
);
7205 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
7206 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, idt
.limit
));
7207 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
7208 gen_add_A0_im(s
, 2);
7209 tcg_gen_ld_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, idt
.base
));
7211 gen_op_andl_T0_im(0xffffff);
7212 gen_op_st_T0_A0(CODE64(s
) + OT_LONG
+ s
->mem_index
);
7218 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7219 gen_op_set_cc_op(s
->cc_op
);
7220 gen_jmp_im(pc_start
- s
->cs_base
);
7223 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7226 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7229 gen_helper_vmrun(cpu_env
, tcg_const_i32(s
->aflag
),
7230 tcg_const_i32(s
->pc
- pc_start
));
7232 s
->is_jmp
= DISAS_TB_JUMP
;
7235 case 1: /* VMMCALL */
7236 if (!(s
->flags
& HF_SVME_MASK
))
7238 gen_helper_vmmcall(cpu_env
);
7240 case 2: /* VMLOAD */
7241 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7244 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7247 gen_helper_vmload(cpu_env
, tcg_const_i32(s
->aflag
));
7250 case 3: /* VMSAVE */
7251 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7254 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7257 gen_helper_vmsave(cpu_env
, tcg_const_i32(s
->aflag
));
7261 if ((!(s
->flags
& HF_SVME_MASK
) &&
7262 !(s
->cpuid_ext3_features
& CPUID_EXT3_SKINIT
)) ||
7266 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7269 gen_helper_stgi(cpu_env
);
7273 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7276 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7279 gen_helper_clgi(cpu_env
);
7282 case 6: /* SKINIT */
7283 if ((!(s
->flags
& HF_SVME_MASK
) &&
7284 !(s
->cpuid_ext3_features
& CPUID_EXT3_SKINIT
)) ||
7287 gen_helper_skinit(cpu_env
);
7289 case 7: /* INVLPGA */
7290 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7293 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7296 gen_helper_invlpga(cpu_env
, tcg_const_i32(s
->aflag
));
7302 } else if (s
->cpl
!= 0) {
7303 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7305 gen_svm_check_intercept(s
, pc_start
,
7306 op
==2 ? SVM_EXIT_GDTR_WRITE
: SVM_EXIT_IDTR_WRITE
);
7307 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
7308 gen_op_ld_T1_A0(OT_WORD
+ s
->mem_index
);
7309 gen_add_A0_im(s
, 2);
7310 gen_op_ld_T0_A0(CODE64(s
) + OT_LONG
+ s
->mem_index
);
7312 gen_op_andl_T0_im(0xffffff);
7314 tcg_gen_st_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,gdt
.base
));
7315 tcg_gen_st32_tl(cpu_T
[1], cpu_env
, offsetof(CPUX86State
,gdt
.limit
));
7317 tcg_gen_st_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,idt
.base
));
7318 tcg_gen_st32_tl(cpu_T
[1], cpu_env
, offsetof(CPUX86State
,idt
.limit
));
7323 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_READ_CR0
);
7324 #if defined TARGET_X86_64 && defined HOST_WORDS_BIGENDIAN
7325 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,cr
[0]) + 4);
7327 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,cr
[0]));
7329 gen_ldst_modrm(s
, modrm
, OT_WORD
, OR_TMP0
, 1);
7333 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7335 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_CR0
);
7336 gen_ldst_modrm(s
, modrm
, OT_WORD
, OR_TMP0
, 0);
7337 gen_helper_lmsw(cpu_T
[0]);
7338 gen_jmp_im(s
->pc
- s
->cs_base
);
7343 if (mod
!= 3) { /* invlpg */
7345 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7347 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7348 gen_op_set_cc_op(s
->cc_op
);
7349 gen_jmp_im(pc_start
- s
->cs_base
);
7350 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
7351 gen_helper_invlpg(cpu_A0
);
7352 gen_jmp_im(s
->pc
- s
->cs_base
);
7357 case 0: /* swapgs */
7358 #ifdef TARGET_X86_64
7361 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7363 tcg_gen_ld_tl(cpu_T
[0], cpu_env
,
7364 offsetof(CPUX86State
,segs
[R_GS
].base
));
7365 tcg_gen_ld_tl(cpu_T
[1], cpu_env
,
7366 offsetof(CPUX86State
,kernelgsbase
));
7367 tcg_gen_st_tl(cpu_T
[1], cpu_env
,
7368 offsetof(CPUX86State
,segs
[R_GS
].base
));
7369 tcg_gen_st_tl(cpu_T
[0], cpu_env
,
7370 offsetof(CPUX86State
,kernelgsbase
));
7378 case 1: /* rdtscp */
7379 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_RDTSCP
))
7381 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7382 gen_op_set_cc_op(s
->cc_op
);
7383 gen_jmp_im(pc_start
- s
->cs_base
);
7386 gen_helper_rdtscp();
7389 gen_jmp(s
, s
->pc
- s
->cs_base
);
7401 case 0x108: /* invd */
7402 case 0x109: /* wbinvd */
7404 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7406 gen_svm_check_intercept(s
, pc_start
, (b
& 2) ? SVM_EXIT_INVD
: SVM_EXIT_WBINVD
);
7410 case 0x63: /* arpl or movslS (x86_64) */
7411 #ifdef TARGET_X86_64
7414 /* d_ot is the size of destination */
7415 d_ot
= dflag
+ OT_WORD
;
7417 modrm
= ldub_code(s
->pc
++);
7418 reg
= ((modrm
>> 3) & 7) | rex_r
;
7419 mod
= (modrm
>> 6) & 3;
7420 rm
= (modrm
& 7) | REX_B(s
);
7423 gen_op_mov_TN_reg(OT_LONG
, 0, rm
);
7425 if (d_ot
== OT_QUAD
)
7426 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
7427 gen_op_mov_reg_T0(d_ot
, reg
);
7429 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
7430 if (d_ot
== OT_QUAD
) {
7431 gen_op_lds_T0_A0(OT_LONG
+ s
->mem_index
);
7433 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
7435 gen_op_mov_reg_T0(d_ot
, reg
);
7441 TCGv t0
, t1
, t2
, a0
;
7443 if (!s
->pe
|| s
->vm86
)
7445 t0
= tcg_temp_local_new();
7446 t1
= tcg_temp_local_new();
7447 t2
= tcg_temp_local_new();
7449 modrm
= ldub_code(s
->pc
++);
7450 reg
= (modrm
>> 3) & 7;
7451 mod
= (modrm
>> 6) & 3;
7454 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
7455 gen_op_ld_v(ot
+ s
->mem_index
, t0
, cpu_A0
);
7456 a0
= tcg_temp_local_new();
7457 tcg_gen_mov_tl(a0
, cpu_A0
);
7459 gen_op_mov_v_reg(ot
, t0
, rm
);
7462 gen_op_mov_v_reg(ot
, t1
, reg
);
7463 tcg_gen_andi_tl(cpu_tmp0
, t0
, 3);
7464 tcg_gen_andi_tl(t1
, t1
, 3);
7465 tcg_gen_movi_tl(t2
, 0);
7466 label1
= gen_new_label();
7467 tcg_gen_brcond_tl(TCG_COND_GE
, cpu_tmp0
, t1
, label1
);
7468 tcg_gen_andi_tl(t0
, t0
, ~3);
7469 tcg_gen_or_tl(t0
, t0
, t1
);
7470 tcg_gen_movi_tl(t2
, CC_Z
);
7471 gen_set_label(label1
);
7473 gen_op_st_v(ot
+ s
->mem_index
, t0
, a0
);
7476 gen_op_mov_reg_v(ot
, rm
, t0
);
7478 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7479 gen_op_set_cc_op(s
->cc_op
);
7480 gen_compute_eflags(cpu_cc_src
);
7481 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~CC_Z
);
7482 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t2
);
7483 s
->cc_op
= CC_OP_EFLAGS
;
7489 case 0x102: /* lar */
7490 case 0x103: /* lsl */
7494 if (!s
->pe
|| s
->vm86
)
7496 ot
= dflag
? OT_LONG
: OT_WORD
;
7497 modrm
= ldub_code(s
->pc
++);
7498 reg
= ((modrm
>> 3) & 7) | rex_r
;
7499 gen_ldst_modrm(s
, modrm
, OT_WORD
, OR_TMP0
, 0);
7500 t0
= tcg_temp_local_new();
7501 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7502 gen_op_set_cc_op(s
->cc_op
);
7504 gen_helper_lar(t0
, cpu_T
[0]);
7506 gen_helper_lsl(t0
, cpu_T
[0]);
7507 tcg_gen_andi_tl(cpu_tmp0
, cpu_cc_src
, CC_Z
);
7508 label1
= gen_new_label();
7509 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_tmp0
, 0, label1
);
7510 gen_op_mov_reg_v(ot
, reg
, t0
);
7511 gen_set_label(label1
);
7512 s
->cc_op
= CC_OP_EFLAGS
;
7517 modrm
= ldub_code(s
->pc
++);
7518 mod
= (modrm
>> 6) & 3;
7519 op
= (modrm
>> 3) & 7;
7521 case 0: /* prefetchnta */
7522 case 1: /* prefetchnt0 */
7523 case 2: /* prefetchnt0 */
7524 case 3: /* prefetchnt0 */
7527 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
7528 /* nothing more to do */
7530 default: /* nop (multi byte) */
7531 gen_nop_modrm(s
, modrm
);
7535 case 0x119 ... 0x11f: /* nop (multi byte) */
7536 modrm
= ldub_code(s
->pc
++);
7537 gen_nop_modrm(s
, modrm
);
7539 case 0x120: /* mov reg, crN */
7540 case 0x122: /* mov crN, reg */
7542 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7544 modrm
= ldub_code(s
->pc
++);
7545 if ((modrm
& 0xc0) != 0xc0)
7547 rm
= (modrm
& 7) | REX_B(s
);
7548 reg
= ((modrm
>> 3) & 7) | rex_r
;
7553 if ((prefixes
& PREFIX_LOCK
) && (reg
== 0) &&
7554 (s
->cpuid_ext3_features
& CPUID_EXT3_CR8LEG
)) {
7563 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7564 gen_op_set_cc_op(s
->cc_op
);
7565 gen_jmp_im(pc_start
- s
->cs_base
);
7567 gen_op_mov_TN_reg(ot
, 0, rm
);
7568 gen_helper_write_crN(tcg_const_i32(reg
), cpu_T
[0]);
7569 gen_jmp_im(s
->pc
- s
->cs_base
);
7572 gen_helper_read_crN(cpu_T
[0], tcg_const_i32(reg
));
7573 gen_op_mov_reg_T0(ot
, rm
);
7581 case 0x121: /* mov reg, drN */
7582 case 0x123: /* mov drN, reg */
7584 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7586 modrm
= ldub_code(s
->pc
++);
7587 if ((modrm
& 0xc0) != 0xc0)
7589 rm
= (modrm
& 7) | REX_B(s
);
7590 reg
= ((modrm
>> 3) & 7) | rex_r
;
7595 /* XXX: do it dynamically with CR4.DE bit */
7596 if (reg
== 4 || reg
== 5 || reg
>= 8)
7599 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_DR0
+ reg
);
7600 gen_op_mov_TN_reg(ot
, 0, rm
);
7601 gen_helper_movl_drN_T0(tcg_const_i32(reg
), cpu_T
[0]);
7602 gen_jmp_im(s
->pc
- s
->cs_base
);
7605 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_READ_DR0
+ reg
);
7606 tcg_gen_ld_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,dr
[reg
]));
7607 gen_op_mov_reg_T0(ot
, rm
);
7611 case 0x106: /* clts */
7613 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7615 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_CR0
);
7616 gen_helper_clts(cpu_env
);
7617 /* abort block because static cpu state changed */
7618 gen_jmp_im(s
->pc
- s
->cs_base
);
7622 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
7623 case 0x1c3: /* MOVNTI reg, mem */
7624 if (!(s
->cpuid_features
& CPUID_SSE2
))
7626 ot
= s
->dflag
== 2 ? OT_QUAD
: OT_LONG
;
7627 modrm
= ldub_code(s
->pc
++);
7628 mod
= (modrm
>> 6) & 3;
7631 reg
= ((modrm
>> 3) & 7) | rex_r
;
7632 /* generate a generic store */
7633 gen_ldst_modrm(s
, modrm
, ot
, reg
, 1);
7636 modrm
= ldub_code(s
->pc
++);
7637 mod
= (modrm
>> 6) & 3;
7638 op
= (modrm
>> 3) & 7;
7640 case 0: /* fxsave */
7641 if (mod
== 3 || !(s
->cpuid_features
& CPUID_FXSR
) ||
7642 (s
->prefix
& PREFIX_LOCK
))
7644 if ((s
->flags
& HF_EM_MASK
) || (s
->flags
& HF_TS_MASK
)) {
7645 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
7648 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
7649 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7650 gen_op_set_cc_op(s
->cc_op
);
7651 gen_jmp_im(pc_start
- s
->cs_base
);
7652 gen_helper_fxsave(cpu_env
, cpu_A0
, tcg_const_i32((s
->dflag
== 2)));
7654 case 1: /* fxrstor */
7655 if (mod
== 3 || !(s
->cpuid_features
& CPUID_FXSR
) ||
7656 (s
->prefix
& PREFIX_LOCK
))
7658 if ((s
->flags
& HF_EM_MASK
) || (s
->flags
& HF_TS_MASK
)) {
7659 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
7662 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
7663 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7664 gen_op_set_cc_op(s
->cc_op
);
7665 gen_jmp_im(pc_start
- s
->cs_base
);
7666 gen_helper_fxrstor(cpu_env
, cpu_A0
,
7667 tcg_const_i32((s
->dflag
== 2)));
7669 case 2: /* ldmxcsr */
7670 case 3: /* stmxcsr */
7671 if (s
->flags
& HF_TS_MASK
) {
7672 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
7675 if ((s
->flags
& HF_EM_MASK
) || !(s
->flags
& HF_OSFXSR_MASK
) ||
7678 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
7680 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
7681 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
7682 gen_helper_ldmxcsr(cpu_env
, cpu_tmp2_i32
);
7684 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, mxcsr
));
7685 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
7688 case 5: /* lfence */
7689 case 6: /* mfence */
7690 if ((modrm
& 0xc7) != 0xc0 || !(s
->cpuid_features
& CPUID_SSE2
))
7693 case 7: /* sfence / clflush */
7694 if ((modrm
& 0xc7) == 0xc0) {
7696 /* XXX: also check for cpuid_ext2_features & CPUID_EXT2_EMMX */
7697 if (!(s
->cpuid_features
& CPUID_SSE
))
7701 if (!(s
->cpuid_features
& CPUID_CLFLUSH
))
7703 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
7710 case 0x10d: /* 3DNow! prefetch(w) */
7711 modrm
= ldub_code(s
->pc
++);
7712 mod
= (modrm
>> 6) & 3;
7715 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
7716 /* ignore for now */
7718 case 0x1aa: /* rsm */
7719 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_RSM
);
7720 if (!(s
->flags
& HF_SMM_MASK
))
7722 gen_update_cc_op(s
);
7723 gen_jmp_im(s
->pc
- s
->cs_base
);
7724 gen_helper_rsm(cpu_env
);
7727 case 0x1b8: /* SSE4.2 popcnt */
7728 if ((prefixes
& (PREFIX_REPZ
| PREFIX_LOCK
| PREFIX_REPNZ
)) !=
7731 if (!(s
->cpuid_ext_features
& CPUID_EXT_POPCNT
))
7734 modrm
= ldub_code(s
->pc
++);
7735 reg
= ((modrm
>> 3) & 7);
7737 if (s
->prefix
& PREFIX_DATA
)
7739 else if (s
->dflag
!= 2)
7744 gen_ldst_modrm(s
, modrm
, ot
, OR_TMP0
, 0);
7745 gen_helper_popcnt(cpu_T
[0], cpu_env
, cpu_T
[0], tcg_const_i32(ot
));
7746 gen_op_mov_reg_T0(ot
, reg
);
7748 s
->cc_op
= CC_OP_EFLAGS
;
7750 case 0x10e ... 0x10f:
7751 /* 3DNow! instructions, ignore prefixes */
7752 s
->prefix
&= ~(PREFIX_REPZ
| PREFIX_REPNZ
| PREFIX_DATA
);
7753 case 0x110 ... 0x117:
7754 case 0x128 ... 0x12f:
7755 case 0x138 ... 0x13a:
7756 case 0x150 ... 0x179:
7757 case 0x17c ... 0x17f:
7759 case 0x1c4 ... 0x1c6:
7760 case 0x1d0 ... 0x1fe:
7761 gen_sse(s
, b
, pc_start
, rex_r
);
7766 /* lock generation */
7767 if (s
->prefix
& PREFIX_LOCK
)
7768 gen_helper_unlock();
7771 if (s
->prefix
& PREFIX_LOCK
)
7772 gen_helper_unlock();
7773 /* XXX: ensure that no lock was generated */
7774 gen_exception(s
, EXCP06_ILLOP
, pc_start
- s
->cs_base
);
7778 void optimize_flags_init(void)
7780 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
7781 cpu_cc_op
= tcg_global_mem_new_i32(TCG_AREG0
,
7782 offsetof(CPUX86State
, cc_op
), "cc_op");
7783 cpu_cc_src
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUX86State
, cc_src
),
7785 cpu_cc_dst
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUX86State
, cc_dst
),
7787 cpu_cc_tmp
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUX86State
, cc_tmp
),
7790 #ifdef TARGET_X86_64
7791 cpu_regs
[R_EAX
] = tcg_global_mem_new_i64(TCG_AREG0
,
7792 offsetof(CPUX86State
, regs
[R_EAX
]), "rax");
7793 cpu_regs
[R_ECX
] = tcg_global_mem_new_i64(TCG_AREG0
,
7794 offsetof(CPUX86State
, regs
[R_ECX
]), "rcx");
7795 cpu_regs
[R_EDX
] = tcg_global_mem_new_i64(TCG_AREG0
,
7796 offsetof(CPUX86State
, regs
[R_EDX
]), "rdx");
7797 cpu_regs
[R_EBX
] = tcg_global_mem_new_i64(TCG_AREG0
,
7798 offsetof(CPUX86State
, regs
[R_EBX
]), "rbx");
7799 cpu_regs
[R_ESP
] = tcg_global_mem_new_i64(TCG_AREG0
,
7800 offsetof(CPUX86State
, regs
[R_ESP
]), "rsp");
7801 cpu_regs
[R_EBP
] = tcg_global_mem_new_i64(TCG_AREG0
,
7802 offsetof(CPUX86State
, regs
[R_EBP
]), "rbp");
7803 cpu_regs
[R_ESI
] = tcg_global_mem_new_i64(TCG_AREG0
,
7804 offsetof(CPUX86State
, regs
[R_ESI
]), "rsi");
7805 cpu_regs
[R_EDI
] = tcg_global_mem_new_i64(TCG_AREG0
,
7806 offsetof(CPUX86State
, regs
[R_EDI
]), "rdi");
7807 cpu_regs
[8] = tcg_global_mem_new_i64(TCG_AREG0
,
7808 offsetof(CPUX86State
, regs
[8]), "r8");
7809 cpu_regs
[9] = tcg_global_mem_new_i64(TCG_AREG0
,
7810 offsetof(CPUX86State
, regs
[9]), "r9");
7811 cpu_regs
[10] = tcg_global_mem_new_i64(TCG_AREG0
,
7812 offsetof(CPUX86State
, regs
[10]), "r10");
7813 cpu_regs
[11] = tcg_global_mem_new_i64(TCG_AREG0
,
7814 offsetof(CPUX86State
, regs
[11]), "r11");
7815 cpu_regs
[12] = tcg_global_mem_new_i64(TCG_AREG0
,
7816 offsetof(CPUX86State
, regs
[12]), "r12");
7817 cpu_regs
[13] = tcg_global_mem_new_i64(TCG_AREG0
,
7818 offsetof(CPUX86State
, regs
[13]), "r13");
7819 cpu_regs
[14] = tcg_global_mem_new_i64(TCG_AREG0
,
7820 offsetof(CPUX86State
, regs
[14]), "r14");
7821 cpu_regs
[15] = tcg_global_mem_new_i64(TCG_AREG0
,
7822 offsetof(CPUX86State
, regs
[15]), "r15");
7824 cpu_regs
[R_EAX
] = tcg_global_mem_new_i32(TCG_AREG0
,
7825 offsetof(CPUX86State
, regs
[R_EAX
]), "eax");
7826 cpu_regs
[R_ECX
] = tcg_global_mem_new_i32(TCG_AREG0
,
7827 offsetof(CPUX86State
, regs
[R_ECX
]), "ecx");
7828 cpu_regs
[R_EDX
] = tcg_global_mem_new_i32(TCG_AREG0
,
7829 offsetof(CPUX86State
, regs
[R_EDX
]), "edx");
7830 cpu_regs
[R_EBX
] = tcg_global_mem_new_i32(TCG_AREG0
,
7831 offsetof(CPUX86State
, regs
[R_EBX
]), "ebx");
7832 cpu_regs
[R_ESP
] = tcg_global_mem_new_i32(TCG_AREG0
,
7833 offsetof(CPUX86State
, regs
[R_ESP
]), "esp");
7834 cpu_regs
[R_EBP
] = tcg_global_mem_new_i32(TCG_AREG0
,
7835 offsetof(CPUX86State
, regs
[R_EBP
]), "ebp");
7836 cpu_regs
[R_ESI
] = tcg_global_mem_new_i32(TCG_AREG0
,
7837 offsetof(CPUX86State
, regs
[R_ESI
]), "esi");
7838 cpu_regs
[R_EDI
] = tcg_global_mem_new_i32(TCG_AREG0
,
7839 offsetof(CPUX86State
, regs
[R_EDI
]), "edi");
7842 /* register helpers */
7843 #define GEN_HELPER 2
7847 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
7848 basic block 'tb'. If search_pc is TRUE, also generate PC
7849 information for each intermediate instruction. */
7850 static inline void gen_intermediate_code_internal(CPUX86State
*env
,
7851 TranslationBlock
*tb
,
7854 DisasContext dc1
, *dc
= &dc1
;
7855 target_ulong pc_ptr
;
7856 uint16_t *gen_opc_end
;
7860 target_ulong pc_start
;
7861 target_ulong cs_base
;
7865 /* generate intermediate code */
7867 cs_base
= tb
->cs_base
;
7870 dc
->pe
= (flags
>> HF_PE_SHIFT
) & 1;
7871 dc
->code32
= (flags
>> HF_CS32_SHIFT
) & 1;
7872 dc
->ss32
= (flags
>> HF_SS32_SHIFT
) & 1;
7873 dc
->addseg
= (flags
>> HF_ADDSEG_SHIFT
) & 1;
7875 dc
->vm86
= (flags
>> VM_SHIFT
) & 1;
7876 dc
->cpl
= (flags
>> HF_CPL_SHIFT
) & 3;
7877 dc
->iopl
= (flags
>> IOPL_SHIFT
) & 3;
7878 dc
->tf
= (flags
>> TF_SHIFT
) & 1;
7879 dc
->singlestep_enabled
= env
->singlestep_enabled
;
7880 dc
->cc_op
= CC_OP_DYNAMIC
;
7881 dc
->cs_base
= cs_base
;
7883 dc
->popl_esp_hack
= 0;
7884 /* select memory access functions */
7886 if (flags
& HF_SOFTMMU_MASK
) {
7888 dc
->mem_index
= 2 * 4;
7890 dc
->mem_index
= 1 * 4;
7892 dc
->cpuid_features
= env
->cpuid_features
;
7893 dc
->cpuid_ext_features
= env
->cpuid_ext_features
;
7894 dc
->cpuid_ext2_features
= env
->cpuid_ext2_features
;
7895 dc
->cpuid_ext3_features
= env
->cpuid_ext3_features
;
7896 #ifdef TARGET_X86_64
7897 dc
->lma
= (flags
>> HF_LMA_SHIFT
) & 1;
7898 dc
->code64
= (flags
>> HF_CS64_SHIFT
) & 1;
7901 dc
->jmp_opt
= !(dc
->tf
|| env
->singlestep_enabled
||
7902 (flags
& HF_INHIBIT_IRQ_MASK
)
7903 #ifndef CONFIG_SOFTMMU
7904 || (flags
& HF_SOFTMMU_MASK
)
7908 /* check addseg logic */
7909 if (!dc
->addseg
&& (dc
->vm86
|| !dc
->pe
|| !dc
->code32
))
7910 printf("ERROR addseg\n");
7913 cpu_T
[0] = tcg_temp_new();
7914 cpu_T
[1] = tcg_temp_new();
7915 cpu_A0
= tcg_temp_new();
7916 cpu_T3
= tcg_temp_new();
7918 cpu_tmp0
= tcg_temp_new();
7919 cpu_tmp1_i64
= tcg_temp_new_i64();
7920 cpu_tmp2_i32
= tcg_temp_new_i32();
7921 cpu_tmp3_i32
= tcg_temp_new_i32();
7922 cpu_tmp4
= tcg_temp_new();
7923 cpu_tmp5
= tcg_temp_new();
7924 cpu_ptr0
= tcg_temp_new_ptr();
7925 cpu_ptr1
= tcg_temp_new_ptr();
7927 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
7929 dc
->is_jmp
= DISAS_NEXT
;
7933 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
7935 max_insns
= CF_COUNT_MASK
;
7939 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
7940 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
7941 if (bp
->pc
== pc_ptr
&&
7942 !((bp
->flags
& BP_CPU
) && (tb
->flags
& HF_RF_MASK
))) {
7943 gen_debug(dc
, pc_ptr
- dc
->cs_base
);
7949 j
= gen_opc_ptr
- gen_opc_buf
;
7953 gen_opc_instr_start
[lj
++] = 0;
7955 gen_opc_pc
[lj
] = pc_ptr
;
7956 gen_opc_cc_op
[lj
] = dc
->cc_op
;
7957 gen_opc_instr_start
[lj
] = 1;
7958 gen_opc_icount
[lj
] = num_insns
;
7960 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
7963 pc_ptr
= disas_insn(dc
, pc_ptr
);
7965 /* stop translation if indicated */
7968 /* if single step mode, we generate only one instruction and
7969 generate an exception */
7970 /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
7971 the flag and abort the translation to give the irqs a
7972 change to be happen */
7973 if (dc
->tf
|| dc
->singlestep_enabled
||
7974 (flags
& HF_INHIBIT_IRQ_MASK
)) {
7975 gen_jmp_im(pc_ptr
- dc
->cs_base
);
7979 /* if too long translation, stop generation too */
7980 if (gen_opc_ptr
>= gen_opc_end
||
7981 (pc_ptr
- pc_start
) >= (TARGET_PAGE_SIZE
- 32) ||
7982 num_insns
>= max_insns
) {
7983 gen_jmp_im(pc_ptr
- dc
->cs_base
);
7988 gen_jmp_im(pc_ptr
- dc
->cs_base
);
7993 if (tb
->cflags
& CF_LAST_IO
)
7995 gen_icount_end(tb
, num_insns
);
7996 *gen_opc_ptr
= INDEX_op_end
;
7997 /* we don't forget to fill the last values */
7999 j
= gen_opc_ptr
- gen_opc_buf
;
8002 gen_opc_instr_start
[lj
++] = 0;
8006 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
8008 qemu_log("----------------\n");
8009 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
8010 #ifdef TARGET_X86_64
8015 disas_flags
= !dc
->code32
;
8016 log_target_disas(pc_start
, pc_ptr
- pc_start
, disas_flags
);
8022 tb
->size
= pc_ptr
- pc_start
;
8023 tb
->icount
= num_insns
;
8027 void gen_intermediate_code(CPUX86State
*env
, TranslationBlock
*tb
)
8029 gen_intermediate_code_internal(env
, tb
, 0);
8032 void gen_intermediate_code_pc(CPUX86State
*env
, TranslationBlock
*tb
)
8034 gen_intermediate_code_internal(env
, tb
, 1);
8037 void restore_state_to_opc(CPUX86State
*env
, TranslationBlock
*tb
, int pc_pos
)
8041 if (qemu_loglevel_mask(CPU_LOG_TB_OP
)) {
8043 qemu_log("RESTORE:\n");
8044 for(i
= 0;i
<= pc_pos
; i
++) {
8045 if (gen_opc_instr_start
[i
]) {
8046 qemu_log("0x%04x: " TARGET_FMT_lx
"\n", i
, gen_opc_pc
[i
]);
8049 qemu_log("pc_pos=0x%x eip=" TARGET_FMT_lx
" cs_base=%x\n",
8050 pc_pos
, gen_opc_pc
[pc_pos
] - tb
->cs_base
,
8051 (uint32_t)tb
->cs_base
);
8054 env
->eip
= gen_opc_pc
[pc_pos
] - tb
->cs_base
;
8055 cc_op
= gen_opc_cc_op
[pc_pos
];
8056 if (cc_op
!= CC_OP_DYNAMIC
)