4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
34 #define PREFIX_REPZ 0x01
35 #define PREFIX_REPNZ 0x02
36 #define PREFIX_LOCK 0x04
37 #define PREFIX_DATA 0x08
38 #define PREFIX_ADR 0x10
41 #define CODE64(s) ((s)->code64)
42 #define REX_X(s) ((s)->rex_x)
43 #define REX_B(s) ((s)->rex_b)
50 //#define MACRO_TEST 1
52 /* global register indexes */
53 static TCGv_ptr cpu_env
;
54 static TCGv cpu_A0
, cpu_cc_src
, cpu_cc_dst
, cpu_cc_tmp
;
55 static TCGv_i32 cpu_cc_op
;
56 static TCGv cpu_regs
[CPU_NB_REGS
];
58 static TCGv cpu_T
[2], cpu_T3
;
59 /* local register indexes (only used inside old micro ops) */
60 static TCGv cpu_tmp0
, cpu_tmp4
;
61 static TCGv_ptr cpu_ptr0
, cpu_ptr1
;
62 static TCGv_i32 cpu_tmp2_i32
, cpu_tmp3_i32
;
63 static TCGv_i64 cpu_tmp1_i64
;
66 static uint8_t gen_opc_cc_op
[OPC_BUF_SIZE
];
68 #include "gen-icount.h"
71 static int x86_64_hregs
;
74 typedef struct DisasContext
{
75 /* current insn context */
76 int override
; /* -1 if no override */
79 target_ulong pc
; /* pc = eip + cs_base */
80 int is_jmp
; /* 1 = means jump (stop translation), 2 means CPU
81 static state change (stop translation) */
82 /* current block context */
83 target_ulong cs_base
; /* base of CS segment */
84 int pe
; /* protected mode */
85 int code32
; /* 32 bit code segment */
87 int lma
; /* long mode active */
88 int code64
; /* 64 bit code segment */
91 int ss32
; /* 32 bit stack segment */
92 int cc_op
; /* current CC operation */
93 int addseg
; /* non zero if either DS/ES/SS have a non zero base */
94 int f_st
; /* currently unused */
95 int vm86
; /* vm86 mode */
98 int tf
; /* TF cpu flag */
99 int singlestep_enabled
; /* "hardware" single step enabled */
100 int jmp_opt
; /* use direct block chaining for direct jumps */
101 int mem_index
; /* select memory access functions */
102 uint64_t flags
; /* all execution flags */
103 struct TranslationBlock
*tb
;
104 int popl_esp_hack
; /* for correct popl with esp base handling */
105 int rip_offset
; /* only used in x86_64, but left for simplicity */
107 int cpuid_ext_features
;
108 int cpuid_ext2_features
;
109 int cpuid_ext3_features
;
112 static void gen_eob(DisasContext
*s
);
113 static void gen_jmp(DisasContext
*s
, target_ulong eip
);
114 static void gen_jmp_tb(DisasContext
*s
, target_ulong eip
, int tb_num
);
116 /* i386 arith/logic operations */
136 OP_SHL1
, /* undocumented */
160 /* I386 int registers */
161 OR_EAX
, /* MUST be even numbered */
170 OR_TMP0
= 16, /* temporary operand register */
172 OR_A0
, /* temporary register used when doing address evaluation */
175 static inline void gen_op_movl_T0_0(void)
177 tcg_gen_movi_tl(cpu_T
[0], 0);
180 static inline void gen_op_movl_T0_im(int32_t val
)
182 tcg_gen_movi_tl(cpu_T
[0], val
);
185 static inline void gen_op_movl_T0_imu(uint32_t val
)
187 tcg_gen_movi_tl(cpu_T
[0], val
);
190 static inline void gen_op_movl_T1_im(int32_t val
)
192 tcg_gen_movi_tl(cpu_T
[1], val
);
195 static inline void gen_op_movl_T1_imu(uint32_t val
)
197 tcg_gen_movi_tl(cpu_T
[1], val
);
200 static inline void gen_op_movl_A0_im(uint32_t val
)
202 tcg_gen_movi_tl(cpu_A0
, val
);
206 static inline void gen_op_movq_A0_im(int64_t val
)
208 tcg_gen_movi_tl(cpu_A0
, val
);
212 static inline void gen_movtl_T0_im(target_ulong val
)
214 tcg_gen_movi_tl(cpu_T
[0], val
);
217 static inline void gen_movtl_T1_im(target_ulong val
)
219 tcg_gen_movi_tl(cpu_T
[1], val
);
222 static inline void gen_op_andl_T0_ffff(void)
224 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 0xffff);
227 static inline void gen_op_andl_T0_im(uint32_t val
)
229 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], val
);
232 static inline void gen_op_movl_T0_T1(void)
234 tcg_gen_mov_tl(cpu_T
[0], cpu_T
[1]);
237 static inline void gen_op_andl_A0_ffff(void)
239 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffff);
244 #define NB_OP_SIZES 4
246 #else /* !TARGET_X86_64 */
248 #define NB_OP_SIZES 3
250 #endif /* !TARGET_X86_64 */
252 #if defined(HOST_WORDS_BIGENDIAN)
253 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
254 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
255 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
256 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
257 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
259 #define REG_B_OFFSET 0
260 #define REG_H_OFFSET 1
261 #define REG_W_OFFSET 0
262 #define REG_L_OFFSET 0
263 #define REG_LH_OFFSET 4
266 /* In instruction encodings for byte register accesses the
267 * register number usually indicates "low 8 bits of register N";
268 * however there are some special cases where N 4..7 indicates
269 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
270 * true for this special case, false otherwise.
272 static inline bool byte_reg_is_xH(int reg
)
278 if (reg
>= 8 || x86_64_hregs
) {
285 static inline void gen_op_mov_reg_v(int ot
, int reg
, TCGv t0
)
289 if (!byte_reg_is_xH(reg
)) {
290 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], t0
, 0, 8);
292 tcg_gen_deposit_tl(cpu_regs
[reg
- 4], cpu_regs
[reg
- 4], t0
, 8, 8);
296 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], t0
, 0, 16);
298 default: /* XXX this shouldn't be reached; abort? */
300 /* For x86_64, this sets the higher half of register to zero.
301 For i386, this is equivalent to a mov. */
302 tcg_gen_ext32u_tl(cpu_regs
[reg
], t0
);
306 tcg_gen_mov_tl(cpu_regs
[reg
], t0
);
312 static inline void gen_op_mov_reg_T0(int ot
, int reg
)
314 gen_op_mov_reg_v(ot
, reg
, cpu_T
[0]);
317 static inline void gen_op_mov_reg_T1(int ot
, int reg
)
319 gen_op_mov_reg_v(ot
, reg
, cpu_T
[1]);
322 static inline void gen_op_mov_reg_A0(int size
, int reg
)
326 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], cpu_A0
, 0, 16);
328 default: /* XXX this shouldn't be reached; abort? */
330 /* For x86_64, this sets the higher half of register to zero.
331 For i386, this is equivalent to a mov. */
332 tcg_gen_ext32u_tl(cpu_regs
[reg
], cpu_A0
);
336 tcg_gen_mov_tl(cpu_regs
[reg
], cpu_A0
);
342 static inline void gen_op_mov_v_reg(int ot
, TCGv t0
, int reg
)
344 if (ot
== OT_BYTE
&& byte_reg_is_xH(reg
)) {
345 tcg_gen_shri_tl(t0
, cpu_regs
[reg
- 4], 8);
346 tcg_gen_ext8u_tl(t0
, t0
);
348 tcg_gen_mov_tl(t0
, cpu_regs
[reg
]);
352 static inline void gen_op_mov_TN_reg(int ot
, int t_index
, int reg
)
354 gen_op_mov_v_reg(ot
, cpu_T
[t_index
], reg
);
357 static inline void gen_op_movl_A0_reg(int reg
)
359 tcg_gen_mov_tl(cpu_A0
, cpu_regs
[reg
]);
362 static inline void gen_op_addl_A0_im(int32_t val
)
364 tcg_gen_addi_tl(cpu_A0
, cpu_A0
, val
);
366 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffffffff);
371 static inline void gen_op_addq_A0_im(int64_t val
)
373 tcg_gen_addi_tl(cpu_A0
, cpu_A0
, val
);
377 static void gen_add_A0_im(DisasContext
*s
, int val
)
381 gen_op_addq_A0_im(val
);
384 gen_op_addl_A0_im(val
);
387 static inline void gen_op_addl_T0_T1(void)
389 tcg_gen_add_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
392 static inline void gen_op_jmp_T0(void)
394 tcg_gen_st_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, eip
));
397 static inline void gen_op_add_reg_im(int size
, int reg
, int32_t val
)
401 tcg_gen_addi_tl(cpu_tmp0
, cpu_regs
[reg
], val
);
402 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], cpu_tmp0
, 0, 16);
405 tcg_gen_addi_tl(cpu_tmp0
, cpu_regs
[reg
], val
);
406 /* For x86_64, this sets the higher half of register to zero.
407 For i386, this is equivalent to a nop. */
408 tcg_gen_ext32u_tl(cpu_tmp0
, cpu_tmp0
);
409 tcg_gen_mov_tl(cpu_regs
[reg
], cpu_tmp0
);
413 tcg_gen_addi_tl(cpu_regs
[reg
], cpu_regs
[reg
], val
);
419 static inline void gen_op_add_reg_T0(int size
, int reg
)
423 tcg_gen_add_tl(cpu_tmp0
, cpu_regs
[reg
], cpu_T
[0]);
424 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], cpu_tmp0
, 0, 16);
427 tcg_gen_add_tl(cpu_tmp0
, cpu_regs
[reg
], cpu_T
[0]);
428 /* For x86_64, this sets the higher half of register to zero.
429 For i386, this is equivalent to a nop. */
430 tcg_gen_ext32u_tl(cpu_tmp0
, cpu_tmp0
);
431 tcg_gen_mov_tl(cpu_regs
[reg
], cpu_tmp0
);
435 tcg_gen_add_tl(cpu_regs
[reg
], cpu_regs
[reg
], cpu_T
[0]);
441 static inline void gen_op_set_cc_op(int32_t val
)
443 tcg_gen_movi_i32(cpu_cc_op
, val
);
446 static inline void gen_op_addl_A0_reg_sN(int shift
, int reg
)
448 tcg_gen_mov_tl(cpu_tmp0
, cpu_regs
[reg
]);
450 tcg_gen_shli_tl(cpu_tmp0
, cpu_tmp0
, shift
);
451 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
452 /* For x86_64, this sets the higher half of register to zero.
453 For i386, this is equivalent to a nop. */
454 tcg_gen_ext32u_tl(cpu_A0
, cpu_A0
);
457 static inline void gen_op_movl_A0_seg(int reg
)
459 tcg_gen_ld32u_tl(cpu_A0
, cpu_env
, offsetof(CPUX86State
, segs
[reg
].base
) + REG_L_OFFSET
);
462 static inline void gen_op_addl_A0_seg(DisasContext
*s
, int reg
)
464 tcg_gen_ld_tl(cpu_tmp0
, cpu_env
, offsetof(CPUX86State
, segs
[reg
].base
));
467 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffffffff);
468 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
470 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
471 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffffffff);
474 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
479 static inline void gen_op_movq_A0_seg(int reg
)
481 tcg_gen_ld_tl(cpu_A0
, cpu_env
, offsetof(CPUX86State
, segs
[reg
].base
));
484 static inline void gen_op_addq_A0_seg(int reg
)
486 tcg_gen_ld_tl(cpu_tmp0
, cpu_env
, offsetof(CPUX86State
, segs
[reg
].base
));
487 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
490 static inline void gen_op_movq_A0_reg(int reg
)
492 tcg_gen_mov_tl(cpu_A0
, cpu_regs
[reg
]);
495 static inline void gen_op_addq_A0_reg_sN(int shift
, int reg
)
497 tcg_gen_mov_tl(cpu_tmp0
, cpu_regs
[reg
]);
499 tcg_gen_shli_tl(cpu_tmp0
, cpu_tmp0
, shift
);
500 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
504 static inline void gen_op_lds_T0_A0(int idx
)
506 int mem_index
= (idx
>> 2) - 1;
509 tcg_gen_qemu_ld8s(cpu_T
[0], cpu_A0
, mem_index
);
512 tcg_gen_qemu_ld16s(cpu_T
[0], cpu_A0
, mem_index
);
516 tcg_gen_qemu_ld32s(cpu_T
[0], cpu_A0
, mem_index
);
521 static inline void gen_op_ld_v(int idx
, TCGv t0
, TCGv a0
)
523 int mem_index
= (idx
>> 2) - 1;
526 tcg_gen_qemu_ld8u(t0
, a0
, mem_index
);
529 tcg_gen_qemu_ld16u(t0
, a0
, mem_index
);
532 tcg_gen_qemu_ld32u(t0
, a0
, mem_index
);
536 /* Should never happen on 32-bit targets. */
538 tcg_gen_qemu_ld64(t0
, a0
, mem_index
);
544 /* XXX: always use ldu or lds */
545 static inline void gen_op_ld_T0_A0(int idx
)
547 gen_op_ld_v(idx
, cpu_T
[0], cpu_A0
);
550 static inline void gen_op_ldu_T0_A0(int idx
)
552 gen_op_ld_v(idx
, cpu_T
[0], cpu_A0
);
555 static inline void gen_op_ld_T1_A0(int idx
)
557 gen_op_ld_v(idx
, cpu_T
[1], cpu_A0
);
560 static inline void gen_op_st_v(int idx
, TCGv t0
, TCGv a0
)
562 int mem_index
= (idx
>> 2) - 1;
565 tcg_gen_qemu_st8(t0
, a0
, mem_index
);
568 tcg_gen_qemu_st16(t0
, a0
, mem_index
);
571 tcg_gen_qemu_st32(t0
, a0
, mem_index
);
575 /* Should never happen on 32-bit targets. */
577 tcg_gen_qemu_st64(t0
, a0
, mem_index
);
583 static inline void gen_op_st_T0_A0(int idx
)
585 gen_op_st_v(idx
, cpu_T
[0], cpu_A0
);
588 static inline void gen_op_st_T1_A0(int idx
)
590 gen_op_st_v(idx
, cpu_T
[1], cpu_A0
);
593 static inline void gen_jmp_im(target_ulong pc
)
595 tcg_gen_movi_tl(cpu_tmp0
, pc
);
596 tcg_gen_st_tl(cpu_tmp0
, cpu_env
, offsetof(CPUX86State
, eip
));
599 static inline void gen_string_movl_A0_ESI(DisasContext
*s
)
603 override
= s
->override
;
607 gen_op_movq_A0_seg(override
);
608 gen_op_addq_A0_reg_sN(0, R_ESI
);
610 gen_op_movq_A0_reg(R_ESI
);
616 if (s
->addseg
&& override
< 0)
619 gen_op_movl_A0_seg(override
);
620 gen_op_addl_A0_reg_sN(0, R_ESI
);
622 gen_op_movl_A0_reg(R_ESI
);
625 /* 16 address, always override */
628 gen_op_movl_A0_reg(R_ESI
);
629 gen_op_andl_A0_ffff();
630 gen_op_addl_A0_seg(s
, override
);
634 static inline void gen_string_movl_A0_EDI(DisasContext
*s
)
638 gen_op_movq_A0_reg(R_EDI
);
643 gen_op_movl_A0_seg(R_ES
);
644 gen_op_addl_A0_reg_sN(0, R_EDI
);
646 gen_op_movl_A0_reg(R_EDI
);
649 gen_op_movl_A0_reg(R_EDI
);
650 gen_op_andl_A0_ffff();
651 gen_op_addl_A0_seg(s
, R_ES
);
655 static inline void gen_op_movl_T0_Dshift(int ot
)
657 tcg_gen_ld32s_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, df
));
658 tcg_gen_shli_tl(cpu_T
[0], cpu_T
[0], ot
);
661 static void gen_extu(int ot
, TCGv reg
)
665 tcg_gen_ext8u_tl(reg
, reg
);
668 tcg_gen_ext16u_tl(reg
, reg
);
671 tcg_gen_ext32u_tl(reg
, reg
);
678 static void gen_exts(int ot
, TCGv reg
)
682 tcg_gen_ext8s_tl(reg
, reg
);
685 tcg_gen_ext16s_tl(reg
, reg
);
688 tcg_gen_ext32s_tl(reg
, reg
);
695 static inline void gen_op_jnz_ecx(int size
, int label1
)
697 tcg_gen_mov_tl(cpu_tmp0
, cpu_regs
[R_ECX
]);
698 gen_extu(size
+ 1, cpu_tmp0
);
699 tcg_gen_brcondi_tl(TCG_COND_NE
, cpu_tmp0
, 0, label1
);
702 static inline void gen_op_jz_ecx(int size
, int label1
)
704 tcg_gen_mov_tl(cpu_tmp0
, cpu_regs
[R_ECX
]);
705 gen_extu(size
+ 1, cpu_tmp0
);
706 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_tmp0
, 0, label1
);
709 static void gen_helper_in_func(int ot
, TCGv v
, TCGv_i32 n
)
712 case 0: gen_helper_inb(v
, n
); break;
713 case 1: gen_helper_inw(v
, n
); break;
714 case 2: gen_helper_inl(v
, n
); break;
719 static void gen_helper_out_func(int ot
, TCGv_i32 v
, TCGv_i32 n
)
722 case 0: gen_helper_outb(v
, n
); break;
723 case 1: gen_helper_outw(v
, n
); break;
724 case 2: gen_helper_outl(v
, n
); break;
729 static void gen_check_io(DisasContext
*s
, int ot
, target_ulong cur_eip
,
733 target_ulong next_eip
;
736 if (s
->pe
&& (s
->cpl
> s
->iopl
|| s
->vm86
)) {
737 if (s
->cc_op
!= CC_OP_DYNAMIC
)
738 gen_op_set_cc_op(s
->cc_op
);
741 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
743 case 0: gen_helper_check_iob(cpu_tmp2_i32
); break;
744 case 1: gen_helper_check_iow(cpu_tmp2_i32
); break;
745 case 2: gen_helper_check_iol(cpu_tmp2_i32
); break;
748 if(s
->flags
& HF_SVMI_MASK
) {
750 if (s
->cc_op
!= CC_OP_DYNAMIC
)
751 gen_op_set_cc_op(s
->cc_op
);
754 svm_flags
|= (1 << (4 + ot
));
755 next_eip
= s
->pc
- s
->cs_base
;
756 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
757 gen_helper_svm_check_io(cpu_tmp2_i32
, tcg_const_i32(svm_flags
),
758 tcg_const_i32(next_eip
- cur_eip
));
762 static inline void gen_movs(DisasContext
*s
, int ot
)
764 gen_string_movl_A0_ESI(s
);
765 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
766 gen_string_movl_A0_EDI(s
);
767 gen_op_st_T0_A0(ot
+ s
->mem_index
);
768 gen_op_movl_T0_Dshift(ot
);
769 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
770 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
773 static inline void gen_update_cc_op(DisasContext
*s
)
775 if (s
->cc_op
!= CC_OP_DYNAMIC
) {
776 gen_op_set_cc_op(s
->cc_op
);
777 s
->cc_op
= CC_OP_DYNAMIC
;
781 static void gen_op_update1_cc(void)
783 tcg_gen_discard_tl(cpu_cc_src
);
784 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
787 static void gen_op_update2_cc(void)
789 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[1]);
790 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
793 static inline void gen_op_cmpl_T0_T1_cc(void)
795 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[1]);
796 tcg_gen_sub_tl(cpu_cc_dst
, cpu_T
[0], cpu_T
[1]);
799 static inline void gen_op_testl_T0_T1_cc(void)
801 tcg_gen_discard_tl(cpu_cc_src
);
802 tcg_gen_and_tl(cpu_cc_dst
, cpu_T
[0], cpu_T
[1]);
805 static void gen_op_update_neg_cc(void)
807 tcg_gen_neg_tl(cpu_cc_src
, cpu_T
[0]);
808 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
811 /* compute eflags.C to reg */
812 static void gen_compute_eflags_c(TCGv reg
)
814 gen_helper_cc_compute_c(cpu_tmp2_i32
, cpu_env
, cpu_cc_op
);
815 tcg_gen_extu_i32_tl(reg
, cpu_tmp2_i32
);
818 /* compute all eflags to cc_src */
819 static void gen_compute_eflags(TCGv reg
)
821 gen_helper_cc_compute_all(cpu_tmp2_i32
, cpu_env
, cpu_cc_op
);
822 tcg_gen_extu_i32_tl(reg
, cpu_tmp2_i32
);
825 static inline void gen_setcc_slow_T0(DisasContext
*s
, int jcc_op
)
827 if (s
->cc_op
!= CC_OP_DYNAMIC
)
828 gen_op_set_cc_op(s
->cc_op
);
831 gen_compute_eflags(cpu_T
[0]);
832 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 11);
833 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 1);
836 gen_compute_eflags_c(cpu_T
[0]);
839 gen_compute_eflags(cpu_T
[0]);
840 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 6);
841 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 1);
844 gen_compute_eflags(cpu_tmp0
);
845 tcg_gen_shri_tl(cpu_T
[0], cpu_tmp0
, 6);
846 tcg_gen_or_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
847 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 1);
850 gen_compute_eflags(cpu_T
[0]);
851 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 7);
852 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 1);
855 gen_compute_eflags(cpu_T
[0]);
856 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 2);
857 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 1);
860 gen_compute_eflags(cpu_tmp0
);
861 tcg_gen_shri_tl(cpu_T
[0], cpu_tmp0
, 11); /* CC_O */
862 tcg_gen_shri_tl(cpu_tmp0
, cpu_tmp0
, 7); /* CC_S */
863 tcg_gen_xor_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
864 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 1);
868 gen_compute_eflags(cpu_tmp0
);
869 tcg_gen_shri_tl(cpu_T
[0], cpu_tmp0
, 11); /* CC_O */
870 tcg_gen_shri_tl(cpu_tmp4
, cpu_tmp0
, 7); /* CC_S */
871 tcg_gen_shri_tl(cpu_tmp0
, cpu_tmp0
, 6); /* CC_Z */
872 tcg_gen_xor_tl(cpu_T
[0], cpu_T
[0], cpu_tmp4
);
873 tcg_gen_or_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
874 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 1);
879 /* return true if setcc_slow is not needed (WARNING: must be kept in
880 sync with gen_jcc1) */
881 static int is_fast_jcc_case(DisasContext
*s
, int b
)
884 jcc_op
= (b
>> 1) & 7;
886 /* we optimize the cmp/jcc case */
891 if (jcc_op
== JCC_O
|| jcc_op
== JCC_P
)
895 /* some jumps are easy to compute */
920 if (jcc_op
!= JCC_Z
&& jcc_op
!= JCC_S
)
930 /* generate a conditional jump to label 'l1' according to jump opcode
931 value 'b'. In the fast case, T0 is guaranted not to be used. */
932 static inline void gen_jcc1(DisasContext
*s
, int cc_op
, int b
, int l1
)
934 int inv
, jcc_op
, size
, cond
;
938 jcc_op
= (b
>> 1) & 7;
941 /* we optimize the cmp/jcc case */
947 size
= cc_op
- CC_OP_SUBB
;
953 tcg_gen_andi_tl(cpu_tmp0
, cpu_cc_dst
, 0xff);
957 tcg_gen_andi_tl(cpu_tmp0
, cpu_cc_dst
, 0xffff);
962 tcg_gen_andi_tl(cpu_tmp0
, cpu_cc_dst
, 0xffffffff);
970 tcg_gen_brcondi_tl(inv
? TCG_COND_NE
: TCG_COND_EQ
, t0
, 0, l1
);
976 tcg_gen_andi_tl(cpu_tmp0
, cpu_cc_dst
, 0x80);
977 tcg_gen_brcondi_tl(inv
? TCG_COND_EQ
: TCG_COND_NE
, cpu_tmp0
,
981 tcg_gen_andi_tl(cpu_tmp0
, cpu_cc_dst
, 0x8000);
982 tcg_gen_brcondi_tl(inv
? TCG_COND_EQ
: TCG_COND_NE
, cpu_tmp0
,
987 tcg_gen_andi_tl(cpu_tmp0
, cpu_cc_dst
, 0x80000000);
988 tcg_gen_brcondi_tl(inv
? TCG_COND_EQ
: TCG_COND_NE
, cpu_tmp0
,
993 tcg_gen_brcondi_tl(inv
? TCG_COND_GE
: TCG_COND_LT
, cpu_cc_dst
,
1000 cond
= inv
? TCG_COND_GEU
: TCG_COND_LTU
;
1003 cond
= inv
? TCG_COND_GTU
: TCG_COND_LEU
;
1005 tcg_gen_add_tl(cpu_tmp4
, cpu_cc_dst
, cpu_cc_src
);
1009 tcg_gen_andi_tl(cpu_tmp4
, cpu_tmp4
, 0xff);
1010 tcg_gen_andi_tl(t0
, cpu_cc_src
, 0xff);
1014 tcg_gen_andi_tl(cpu_tmp4
, cpu_tmp4
, 0xffff);
1015 tcg_gen_andi_tl(t0
, cpu_cc_src
, 0xffff);
1017 #ifdef TARGET_X86_64
1020 tcg_gen_andi_tl(cpu_tmp4
, cpu_tmp4
, 0xffffffff);
1021 tcg_gen_andi_tl(t0
, cpu_cc_src
, 0xffffffff);
1028 tcg_gen_brcond_tl(cond
, cpu_tmp4
, t0
, l1
);
1032 cond
= inv
? TCG_COND_GE
: TCG_COND_LT
;
1035 cond
= inv
? TCG_COND_GT
: TCG_COND_LE
;
1037 tcg_gen_add_tl(cpu_tmp4
, cpu_cc_dst
, cpu_cc_src
);
1041 tcg_gen_ext8s_tl(cpu_tmp4
, cpu_tmp4
);
1042 tcg_gen_ext8s_tl(t0
, cpu_cc_src
);
1046 tcg_gen_ext16s_tl(cpu_tmp4
, cpu_tmp4
);
1047 tcg_gen_ext16s_tl(t0
, cpu_cc_src
);
1049 #ifdef TARGET_X86_64
1052 tcg_gen_ext32s_tl(cpu_tmp4
, cpu_tmp4
);
1053 tcg_gen_ext32s_tl(t0
, cpu_cc_src
);
1060 tcg_gen_brcond_tl(cond
, cpu_tmp4
, t0
, l1
);
1068 /* some jumps are easy to compute */
1110 size
= (cc_op
- CC_OP_ADDB
) & 3;
1113 size
= (cc_op
- CC_OP_ADDB
) & 3;
1121 gen_setcc_slow_T0(s
, jcc_op
);
1122 tcg_gen_brcondi_tl(inv
? TCG_COND_EQ
: TCG_COND_NE
,
1128 /* XXX: does not work with gdbstub "ice" single step - not a
1130 static int gen_jz_ecx_string(DisasContext
*s
, target_ulong next_eip
)
1134 l1
= gen_new_label();
1135 l2
= gen_new_label();
1136 gen_op_jnz_ecx(s
->aflag
, l1
);
1138 gen_jmp_tb(s
, next_eip
, 1);
1143 static inline void gen_stos(DisasContext
*s
, int ot
)
1145 gen_op_mov_TN_reg(OT_LONG
, 0, R_EAX
);
1146 gen_string_movl_A0_EDI(s
);
1147 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1148 gen_op_movl_T0_Dshift(ot
);
1149 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1152 static inline void gen_lods(DisasContext
*s
, int ot
)
1154 gen_string_movl_A0_ESI(s
);
1155 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1156 gen_op_mov_reg_T0(ot
, R_EAX
);
1157 gen_op_movl_T0_Dshift(ot
);
1158 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
1161 static inline void gen_scas(DisasContext
*s
, int ot
)
1163 gen_op_mov_TN_reg(OT_LONG
, 0, R_EAX
);
1164 gen_string_movl_A0_EDI(s
);
1165 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
1166 gen_op_cmpl_T0_T1_cc();
1167 gen_op_movl_T0_Dshift(ot
);
1168 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1171 static inline void gen_cmps(DisasContext
*s
, int ot
)
1173 gen_string_movl_A0_ESI(s
);
1174 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1175 gen_string_movl_A0_EDI(s
);
1176 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
1177 gen_op_cmpl_T0_T1_cc();
1178 gen_op_movl_T0_Dshift(ot
);
1179 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
1180 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1183 static inline void gen_ins(DisasContext
*s
, int ot
)
1187 gen_string_movl_A0_EDI(s
);
1188 /* Note: we must do this dummy write first to be restartable in
1189 case of page fault. */
1191 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1192 gen_op_mov_TN_reg(OT_WORD
, 1, R_EDX
);
1193 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[1]);
1194 tcg_gen_andi_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 0xffff);
1195 gen_helper_in_func(ot
, cpu_T
[0], cpu_tmp2_i32
);
1196 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1197 gen_op_movl_T0_Dshift(ot
);
1198 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1203 static inline void gen_outs(DisasContext
*s
, int ot
)
1207 gen_string_movl_A0_ESI(s
);
1208 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1210 gen_op_mov_TN_reg(OT_WORD
, 1, R_EDX
);
1211 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[1]);
1212 tcg_gen_andi_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 0xffff);
1213 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T
[0]);
1214 gen_helper_out_func(ot
, cpu_tmp2_i32
, cpu_tmp3_i32
);
1216 gen_op_movl_T0_Dshift(ot
);
1217 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
1222 /* same method as Valgrind : we generate jumps to current or next
1224 #define GEN_REPZ(op) \
1225 static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1226 target_ulong cur_eip, target_ulong next_eip) \
1229 gen_update_cc_op(s); \
1230 l2 = gen_jz_ecx_string(s, next_eip); \
1231 gen_ ## op(s, ot); \
1232 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1233 /* a loop would cause two single step exceptions if ECX = 1 \
1234 before rep string_insn */ \
1236 gen_op_jz_ecx(s->aflag, l2); \
1237 gen_jmp(s, cur_eip); \
1240 #define GEN_REPZ2(op) \
1241 static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1242 target_ulong cur_eip, \
1243 target_ulong next_eip, \
1247 gen_update_cc_op(s); \
1248 l2 = gen_jz_ecx_string(s, next_eip); \
1249 gen_ ## op(s, ot); \
1250 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1251 gen_op_set_cc_op(CC_OP_SUBB + ot); \
1252 gen_jcc1(s, CC_OP_SUBB + ot, (JCC_Z << 1) | (nz ^ 1), l2); \
1254 gen_op_jz_ecx(s->aflag, l2); \
1255 gen_jmp(s, cur_eip); \
1266 static void gen_helper_fp_arith_ST0_FT0(int op
)
1270 gen_helper_fadd_ST0_FT0(cpu_env
);
1273 gen_helper_fmul_ST0_FT0(cpu_env
);
1276 gen_helper_fcom_ST0_FT0(cpu_env
);
1279 gen_helper_fcom_ST0_FT0(cpu_env
);
1282 gen_helper_fsub_ST0_FT0(cpu_env
);
1285 gen_helper_fsubr_ST0_FT0(cpu_env
);
1288 gen_helper_fdiv_ST0_FT0(cpu_env
);
1291 gen_helper_fdivr_ST0_FT0(cpu_env
);
1296 /* NOTE the exception in "r" op ordering */
1297 static void gen_helper_fp_arith_STN_ST0(int op
, int opreg
)
1299 TCGv_i32 tmp
= tcg_const_i32(opreg
);
1302 gen_helper_fadd_STN_ST0(cpu_env
, tmp
);
1305 gen_helper_fmul_STN_ST0(cpu_env
, tmp
);
1308 gen_helper_fsubr_STN_ST0(cpu_env
, tmp
);
1311 gen_helper_fsub_STN_ST0(cpu_env
, tmp
);
1314 gen_helper_fdivr_STN_ST0(cpu_env
, tmp
);
1317 gen_helper_fdiv_STN_ST0(cpu_env
, tmp
);
1322 /* if d == OR_TMP0, it means memory operand (address in A0) */
1323 static void gen_op(DisasContext
*s1
, int op
, int ot
, int d
)
1326 gen_op_mov_TN_reg(ot
, 0, d
);
1328 gen_op_ld_T0_A0(ot
+ s1
->mem_index
);
1332 if (s1
->cc_op
!= CC_OP_DYNAMIC
)
1333 gen_op_set_cc_op(s1
->cc_op
);
1334 gen_compute_eflags_c(cpu_tmp4
);
1335 tcg_gen_add_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1336 tcg_gen_add_tl(cpu_T
[0], cpu_T
[0], cpu_tmp4
);
1338 gen_op_mov_reg_T0(ot
, d
);
1340 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1341 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[1]);
1342 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
1343 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_tmp4
);
1344 tcg_gen_shli_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 2);
1345 tcg_gen_addi_i32(cpu_cc_op
, cpu_tmp2_i32
, CC_OP_ADDB
+ ot
);
1346 s1
->cc_op
= CC_OP_DYNAMIC
;
1349 if (s1
->cc_op
!= CC_OP_DYNAMIC
)
1350 gen_op_set_cc_op(s1
->cc_op
);
1351 gen_compute_eflags_c(cpu_tmp4
);
1352 tcg_gen_sub_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1353 tcg_gen_sub_tl(cpu_T
[0], cpu_T
[0], cpu_tmp4
);
1355 gen_op_mov_reg_T0(ot
, d
);
1357 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1358 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[1]);
1359 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
1360 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_tmp4
);
1361 tcg_gen_shli_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 2);
1362 tcg_gen_addi_i32(cpu_cc_op
, cpu_tmp2_i32
, CC_OP_SUBB
+ ot
);
1363 s1
->cc_op
= CC_OP_DYNAMIC
;
1366 gen_op_addl_T0_T1();
1368 gen_op_mov_reg_T0(ot
, d
);
1370 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1371 gen_op_update2_cc();
1372 s1
->cc_op
= CC_OP_ADDB
+ ot
;
1375 tcg_gen_sub_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1377 gen_op_mov_reg_T0(ot
, d
);
1379 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1380 gen_op_update2_cc();
1381 s1
->cc_op
= CC_OP_SUBB
+ ot
;
1385 tcg_gen_and_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1387 gen_op_mov_reg_T0(ot
, d
);
1389 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1390 gen_op_update1_cc();
1391 s1
->cc_op
= CC_OP_LOGICB
+ ot
;
1394 tcg_gen_or_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1396 gen_op_mov_reg_T0(ot
, d
);
1398 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1399 gen_op_update1_cc();
1400 s1
->cc_op
= CC_OP_LOGICB
+ ot
;
1403 tcg_gen_xor_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1405 gen_op_mov_reg_T0(ot
, d
);
1407 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1408 gen_op_update1_cc();
1409 s1
->cc_op
= CC_OP_LOGICB
+ ot
;
1412 gen_op_cmpl_T0_T1_cc();
1413 s1
->cc_op
= CC_OP_SUBB
+ ot
;
1418 /* if d == OR_TMP0, it means memory operand (address in A0) */
1419 static void gen_inc(DisasContext
*s1
, int ot
, int d
, int c
)
1422 gen_op_mov_TN_reg(ot
, 0, d
);
1424 gen_op_ld_T0_A0(ot
+ s1
->mem_index
);
1425 if (s1
->cc_op
!= CC_OP_DYNAMIC
)
1426 gen_op_set_cc_op(s1
->cc_op
);
1428 tcg_gen_addi_tl(cpu_T
[0], cpu_T
[0], 1);
1429 s1
->cc_op
= CC_OP_INCB
+ ot
;
1431 tcg_gen_addi_tl(cpu_T
[0], cpu_T
[0], -1);
1432 s1
->cc_op
= CC_OP_DECB
+ ot
;
1435 gen_op_mov_reg_T0(ot
, d
);
1437 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1438 gen_compute_eflags_c(cpu_cc_src
);
1439 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
1442 static void gen_shift_rm_T1(DisasContext
*s
, int ot
, int op1
,
1443 int is_right
, int is_arith
)
1449 if (ot
== OT_QUAD
) {
1456 if (op1
== OR_TMP0
) {
1457 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1459 gen_op_mov_TN_reg(ot
, 0, op1
);
1462 t0
= tcg_temp_local_new();
1463 t1
= tcg_temp_local_new();
1464 t2
= tcg_temp_local_new();
1466 tcg_gen_andi_tl(t2
, cpu_T
[1], mask
);
1470 gen_exts(ot
, cpu_T
[0]);
1471 tcg_gen_mov_tl(t0
, cpu_T
[0]);
1472 tcg_gen_sar_tl(cpu_T
[0], cpu_T
[0], t2
);
1474 gen_extu(ot
, cpu_T
[0]);
1475 tcg_gen_mov_tl(t0
, cpu_T
[0]);
1476 tcg_gen_shr_tl(cpu_T
[0], cpu_T
[0], t2
);
1479 tcg_gen_mov_tl(t0
, cpu_T
[0]);
1480 tcg_gen_shl_tl(cpu_T
[0], cpu_T
[0], t2
);
1484 if (op1
== OR_TMP0
) {
1485 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1487 gen_op_mov_reg_T0(ot
, op1
);
1490 /* update eflags if non zero shift */
1491 if (s
->cc_op
!= CC_OP_DYNAMIC
) {
1492 gen_op_set_cc_op(s
->cc_op
);
1495 tcg_gen_mov_tl(t1
, cpu_T
[0]);
1497 shift_label
= gen_new_label();
1498 tcg_gen_brcondi_tl(TCG_COND_EQ
, t2
, 0, shift_label
);
1500 tcg_gen_addi_tl(t2
, t2
, -1);
1501 tcg_gen_mov_tl(cpu_cc_dst
, t1
);
1505 tcg_gen_sar_tl(cpu_cc_src
, t0
, t2
);
1507 tcg_gen_shr_tl(cpu_cc_src
, t0
, t2
);
1510 tcg_gen_shl_tl(cpu_cc_src
, t0
, t2
);
1514 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SARB
+ ot
);
1516 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SHLB
+ ot
);
1519 gen_set_label(shift_label
);
1520 s
->cc_op
= CC_OP_DYNAMIC
; /* cannot predict flags after */
1527 static void gen_shift_rm_im(DisasContext
*s
, int ot
, int op1
, int op2
,
1528 int is_right
, int is_arith
)
1539 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1541 gen_op_mov_TN_reg(ot
, 0, op1
);
1547 gen_exts(ot
, cpu_T
[0]);
1548 tcg_gen_sari_tl(cpu_tmp4
, cpu_T
[0], op2
- 1);
1549 tcg_gen_sari_tl(cpu_T
[0], cpu_T
[0], op2
);
1551 gen_extu(ot
, cpu_T
[0]);
1552 tcg_gen_shri_tl(cpu_tmp4
, cpu_T
[0], op2
- 1);
1553 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], op2
);
1556 tcg_gen_shli_tl(cpu_tmp4
, cpu_T
[0], op2
- 1);
1557 tcg_gen_shli_tl(cpu_T
[0], cpu_T
[0], op2
);
1563 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1565 gen_op_mov_reg_T0(ot
, op1
);
1567 /* update eflags if non zero shift */
1569 tcg_gen_mov_tl(cpu_cc_src
, cpu_tmp4
);
1570 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
1572 s
->cc_op
= CC_OP_SARB
+ ot
;
1574 s
->cc_op
= CC_OP_SHLB
+ ot
;
1578 static inline void tcg_gen_lshift(TCGv ret
, TCGv arg1
, target_long arg2
)
1581 tcg_gen_shli_tl(ret
, arg1
, arg2
);
1583 tcg_gen_shri_tl(ret
, arg1
, -arg2
);
1586 static void gen_rot_rm_T1(DisasContext
*s
, int ot
, int op1
,
1590 int label1
, label2
, data_bits
;
1591 TCGv t0
, t1
, t2
, a0
;
1593 /* XXX: inefficient, but we must use local temps */
1594 t0
= tcg_temp_local_new();
1595 t1
= tcg_temp_local_new();
1596 t2
= tcg_temp_local_new();
1597 a0
= tcg_temp_local_new();
1605 if (op1
== OR_TMP0
) {
1606 tcg_gen_mov_tl(a0
, cpu_A0
);
1607 gen_op_ld_v(ot
+ s
->mem_index
, t0
, a0
);
1609 gen_op_mov_v_reg(ot
, t0
, op1
);
1612 tcg_gen_mov_tl(t1
, cpu_T
[1]);
1614 tcg_gen_andi_tl(t1
, t1
, mask
);
1616 /* Must test zero case to avoid using undefined behaviour in TCG
1618 label1
= gen_new_label();
1619 tcg_gen_brcondi_tl(TCG_COND_EQ
, t1
, 0, label1
);
1622 tcg_gen_andi_tl(cpu_tmp0
, t1
, (1 << (3 + ot
)) - 1);
1624 tcg_gen_mov_tl(cpu_tmp0
, t1
);
1627 tcg_gen_mov_tl(t2
, t0
);
1629 data_bits
= 8 << ot
;
1630 /* XXX: rely on behaviour of shifts when operand 2 overflows (XXX:
1631 fix TCG definition) */
1633 tcg_gen_shr_tl(cpu_tmp4
, t0
, cpu_tmp0
);
1634 tcg_gen_subfi_tl(cpu_tmp0
, data_bits
, cpu_tmp0
);
1635 tcg_gen_shl_tl(t0
, t0
, cpu_tmp0
);
1637 tcg_gen_shl_tl(cpu_tmp4
, t0
, cpu_tmp0
);
1638 tcg_gen_subfi_tl(cpu_tmp0
, data_bits
, cpu_tmp0
);
1639 tcg_gen_shr_tl(t0
, t0
, cpu_tmp0
);
1641 tcg_gen_or_tl(t0
, t0
, cpu_tmp4
);
1643 gen_set_label(label1
);
1645 if (op1
== OR_TMP0
) {
1646 gen_op_st_v(ot
+ s
->mem_index
, t0
, a0
);
1648 gen_op_mov_reg_v(ot
, op1
, t0
);
1652 if (s
->cc_op
!= CC_OP_DYNAMIC
)
1653 gen_op_set_cc_op(s
->cc_op
);
1655 label2
= gen_new_label();
1656 tcg_gen_brcondi_tl(TCG_COND_EQ
, t1
, 0, label2
);
1658 gen_compute_eflags(cpu_cc_src
);
1659 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~(CC_O
| CC_C
));
1660 tcg_gen_xor_tl(cpu_tmp0
, t2
, t0
);
1661 tcg_gen_lshift(cpu_tmp0
, cpu_tmp0
, 11 - (data_bits
- 1));
1662 tcg_gen_andi_tl(cpu_tmp0
, cpu_tmp0
, CC_O
);
1663 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, cpu_tmp0
);
1665 tcg_gen_shri_tl(t0
, t0
, data_bits
- 1);
1667 tcg_gen_andi_tl(t0
, t0
, CC_C
);
1668 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t0
);
1670 tcg_gen_discard_tl(cpu_cc_dst
);
1671 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_EFLAGS
);
1673 gen_set_label(label2
);
1674 s
->cc_op
= CC_OP_DYNAMIC
; /* cannot predict flags after */
1682 static void gen_rot_rm_im(DisasContext
*s
, int ot
, int op1
, int op2
,
1689 /* XXX: inefficient, but we must use local temps */
1690 t0
= tcg_temp_local_new();
1691 t1
= tcg_temp_local_new();
1692 a0
= tcg_temp_local_new();
1700 if (op1
== OR_TMP0
) {
1701 tcg_gen_mov_tl(a0
, cpu_A0
);
1702 gen_op_ld_v(ot
+ s
->mem_index
, t0
, a0
);
1704 gen_op_mov_v_reg(ot
, t0
, op1
);
1708 tcg_gen_mov_tl(t1
, t0
);
1711 data_bits
= 8 << ot
;
1713 int shift
= op2
& ((1 << (3 + ot
)) - 1);
1715 tcg_gen_shri_tl(cpu_tmp4
, t0
, shift
);
1716 tcg_gen_shli_tl(t0
, t0
, data_bits
- shift
);
1719 tcg_gen_shli_tl(cpu_tmp4
, t0
, shift
);
1720 tcg_gen_shri_tl(t0
, t0
, data_bits
- shift
);
1722 tcg_gen_or_tl(t0
, t0
, cpu_tmp4
);
1726 if (op1
== OR_TMP0
) {
1727 gen_op_st_v(ot
+ s
->mem_index
, t0
, a0
);
1729 gen_op_mov_reg_v(ot
, op1
, t0
);
1734 if (s
->cc_op
!= CC_OP_DYNAMIC
)
1735 gen_op_set_cc_op(s
->cc_op
);
1737 gen_compute_eflags(cpu_cc_src
);
1738 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~(CC_O
| CC_C
));
1739 tcg_gen_xor_tl(cpu_tmp0
, t1
, t0
);
1740 tcg_gen_lshift(cpu_tmp0
, cpu_tmp0
, 11 - (data_bits
- 1));
1741 tcg_gen_andi_tl(cpu_tmp0
, cpu_tmp0
, CC_O
);
1742 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, cpu_tmp0
);
1744 tcg_gen_shri_tl(t0
, t0
, data_bits
- 1);
1746 tcg_gen_andi_tl(t0
, t0
, CC_C
);
1747 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t0
);
1749 tcg_gen_discard_tl(cpu_cc_dst
);
1750 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_EFLAGS
);
1751 s
->cc_op
= CC_OP_EFLAGS
;
1759 /* XXX: add faster immediate = 1 case */
1760 static void gen_rotc_rm_T1(DisasContext
*s
, int ot
, int op1
,
1765 if (s
->cc_op
!= CC_OP_DYNAMIC
)
1766 gen_op_set_cc_op(s
->cc_op
);
1770 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1772 gen_op_mov_TN_reg(ot
, 0, op1
);
1776 case 0: gen_helper_rcrb(cpu_T
[0], cpu_T
[0], cpu_T
[1]); break;
1777 case 1: gen_helper_rcrw(cpu_T
[0], cpu_T
[0], cpu_T
[1]); break;
1778 case 2: gen_helper_rcrl(cpu_T
[0], cpu_T
[0], cpu_T
[1]); break;
1779 #ifdef TARGET_X86_64
1780 case 3: gen_helper_rcrq(cpu_T
[0], cpu_T
[0], cpu_T
[1]); break;
1785 case 0: gen_helper_rclb(cpu_T
[0], cpu_T
[0], cpu_T
[1]); break;
1786 case 1: gen_helper_rclw(cpu_T
[0], cpu_T
[0], cpu_T
[1]); break;
1787 case 2: gen_helper_rcll(cpu_T
[0], cpu_T
[0], cpu_T
[1]); break;
1788 #ifdef TARGET_X86_64
1789 case 3: gen_helper_rclq(cpu_T
[0], cpu_T
[0], cpu_T
[1]); break;
1795 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1797 gen_op_mov_reg_T0(ot
, op1
);
1800 label1
= gen_new_label();
1801 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_cc_tmp
, -1, label1
);
1803 tcg_gen_mov_tl(cpu_cc_src
, cpu_cc_tmp
);
1804 tcg_gen_discard_tl(cpu_cc_dst
);
1805 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_EFLAGS
);
1807 gen_set_label(label1
);
1808 s
->cc_op
= CC_OP_DYNAMIC
; /* cannot predict flags after */
1811 /* XXX: add faster immediate case */
1812 static void gen_shiftd_rm_T1_T3(DisasContext
*s
, int ot
, int op1
,
1815 int label1
, label2
, data_bits
;
1817 TCGv t0
, t1
, t2
, a0
;
1819 t0
= tcg_temp_local_new();
1820 t1
= tcg_temp_local_new();
1821 t2
= tcg_temp_local_new();
1822 a0
= tcg_temp_local_new();
1830 if (op1
== OR_TMP0
) {
1831 tcg_gen_mov_tl(a0
, cpu_A0
);
1832 gen_op_ld_v(ot
+ s
->mem_index
, t0
, a0
);
1834 gen_op_mov_v_reg(ot
, t0
, op1
);
1837 tcg_gen_andi_tl(cpu_T3
, cpu_T3
, mask
);
1839 tcg_gen_mov_tl(t1
, cpu_T
[1]);
1840 tcg_gen_mov_tl(t2
, cpu_T3
);
1842 /* Must test zero case to avoid using undefined behaviour in TCG
1844 label1
= gen_new_label();
1845 tcg_gen_brcondi_tl(TCG_COND_EQ
, t2
, 0, label1
);
1847 tcg_gen_addi_tl(cpu_tmp5
, t2
, -1);
1848 if (ot
== OT_WORD
) {
1849 /* Note: we implement the Intel behaviour for shift count > 16 */
1851 tcg_gen_andi_tl(t0
, t0
, 0xffff);
1852 tcg_gen_shli_tl(cpu_tmp0
, t1
, 16);
1853 tcg_gen_or_tl(t0
, t0
, cpu_tmp0
);
1854 tcg_gen_ext32u_tl(t0
, t0
);
1856 tcg_gen_shr_tl(cpu_tmp4
, t0
, cpu_tmp5
);
1858 /* only needed if count > 16, but a test would complicate */
1859 tcg_gen_subfi_tl(cpu_tmp5
, 32, t2
);
1860 tcg_gen_shl_tl(cpu_tmp0
, t0
, cpu_tmp5
);
1862 tcg_gen_shr_tl(t0
, t0
, t2
);
1864 tcg_gen_or_tl(t0
, t0
, cpu_tmp0
);
1866 /* XXX: not optimal */
1867 tcg_gen_andi_tl(t0
, t0
, 0xffff);
1868 tcg_gen_shli_tl(t1
, t1
, 16);
1869 tcg_gen_or_tl(t1
, t1
, t0
);
1870 tcg_gen_ext32u_tl(t1
, t1
);
1872 tcg_gen_shl_tl(cpu_tmp4
, t0
, cpu_tmp5
);
1873 tcg_gen_subfi_tl(cpu_tmp0
, 32, cpu_tmp5
);
1874 tcg_gen_shr_tl(cpu_tmp5
, t1
, cpu_tmp0
);
1875 tcg_gen_or_tl(cpu_tmp4
, cpu_tmp4
, cpu_tmp5
);
1877 tcg_gen_shl_tl(t0
, t0
, t2
);
1878 tcg_gen_subfi_tl(cpu_tmp5
, 32, t2
);
1879 tcg_gen_shr_tl(t1
, t1
, cpu_tmp5
);
1880 tcg_gen_or_tl(t0
, t0
, t1
);
1883 data_bits
= 8 << ot
;
1886 tcg_gen_ext32u_tl(t0
, t0
);
1888 tcg_gen_shr_tl(cpu_tmp4
, t0
, cpu_tmp5
);
1890 tcg_gen_shr_tl(t0
, t0
, t2
);
1891 tcg_gen_subfi_tl(cpu_tmp5
, data_bits
, t2
);
1892 tcg_gen_shl_tl(t1
, t1
, cpu_tmp5
);
1893 tcg_gen_or_tl(t0
, t0
, t1
);
1897 tcg_gen_ext32u_tl(t1
, t1
);
1899 tcg_gen_shl_tl(cpu_tmp4
, t0
, cpu_tmp5
);
1901 tcg_gen_shl_tl(t0
, t0
, t2
);
1902 tcg_gen_subfi_tl(cpu_tmp5
, data_bits
, t2
);
1903 tcg_gen_shr_tl(t1
, t1
, cpu_tmp5
);
1904 tcg_gen_or_tl(t0
, t0
, t1
);
1907 tcg_gen_mov_tl(t1
, cpu_tmp4
);
1909 gen_set_label(label1
);
1911 if (op1
== OR_TMP0
) {
1912 gen_op_st_v(ot
+ s
->mem_index
, t0
, a0
);
1914 gen_op_mov_reg_v(ot
, op1
, t0
);
1918 if (s
->cc_op
!= CC_OP_DYNAMIC
)
1919 gen_op_set_cc_op(s
->cc_op
);
1921 label2
= gen_new_label();
1922 tcg_gen_brcondi_tl(TCG_COND_EQ
, t2
, 0, label2
);
1924 tcg_gen_mov_tl(cpu_cc_src
, t1
);
1925 tcg_gen_mov_tl(cpu_cc_dst
, t0
);
1927 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SARB
+ ot
);
1929 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SHLB
+ ot
);
1931 gen_set_label(label2
);
1932 s
->cc_op
= CC_OP_DYNAMIC
; /* cannot predict flags after */
1940 static void gen_shift(DisasContext
*s1
, int op
, int ot
, int d
, int s
)
1943 gen_op_mov_TN_reg(ot
, 1, s
);
1946 gen_rot_rm_T1(s1
, ot
, d
, 0);
1949 gen_rot_rm_T1(s1
, ot
, d
, 1);
1953 gen_shift_rm_T1(s1
, ot
, d
, 0, 0);
1956 gen_shift_rm_T1(s1
, ot
, d
, 1, 0);
1959 gen_shift_rm_T1(s1
, ot
, d
, 1, 1);
1962 gen_rotc_rm_T1(s1
, ot
, d
, 0);
1965 gen_rotc_rm_T1(s1
, ot
, d
, 1);
1970 static void gen_shifti(DisasContext
*s1
, int op
, int ot
, int d
, int c
)
1974 gen_rot_rm_im(s1
, ot
, d
, c
, 0);
1977 gen_rot_rm_im(s1
, ot
, d
, c
, 1);
1981 gen_shift_rm_im(s1
, ot
, d
, c
, 0, 0);
1984 gen_shift_rm_im(s1
, ot
, d
, c
, 1, 0);
1987 gen_shift_rm_im(s1
, ot
, d
, c
, 1, 1);
1990 /* currently not optimized */
1991 gen_op_movl_T1_im(c
);
1992 gen_shift(s1
, op
, ot
, d
, OR_TMP1
);
1997 static void gen_lea_modrm(DisasContext
*s
, int modrm
, int *reg_ptr
, int *offset_ptr
)
2005 int mod
, rm
, code
, override
, must_add_seg
;
2007 override
= s
->override
;
2008 must_add_seg
= s
->addseg
;
2011 mod
= (modrm
>> 6) & 3;
2023 code
= ldub_code(s
->pc
++);
2024 scale
= (code
>> 6) & 3;
2025 index
= ((code
>> 3) & 7) | REX_X(s
);
2032 if ((base
& 7) == 5) {
2034 disp
= (int32_t)ldl_code(s
->pc
);
2036 if (CODE64(s
) && !havesib
) {
2037 disp
+= s
->pc
+ s
->rip_offset
;
2044 disp
= (int8_t)ldub_code(s
->pc
++);
2048 disp
= (int32_t)ldl_code(s
->pc
);
2054 /* for correct popl handling with esp */
2055 if (base
== 4 && s
->popl_esp_hack
)
2056 disp
+= s
->popl_esp_hack
;
2057 #ifdef TARGET_X86_64
2058 if (s
->aflag
== 2) {
2059 gen_op_movq_A0_reg(base
);
2061 gen_op_addq_A0_im(disp
);
2066 gen_op_movl_A0_reg(base
);
2068 gen_op_addl_A0_im(disp
);
2071 #ifdef TARGET_X86_64
2072 if (s
->aflag
== 2) {
2073 gen_op_movq_A0_im(disp
);
2077 gen_op_movl_A0_im(disp
);
2080 /* index == 4 means no index */
2081 if (havesib
&& (index
!= 4)) {
2082 #ifdef TARGET_X86_64
2083 if (s
->aflag
== 2) {
2084 gen_op_addq_A0_reg_sN(scale
, index
);
2088 gen_op_addl_A0_reg_sN(scale
, index
);
2093 if (base
== R_EBP
|| base
== R_ESP
)
2098 #ifdef TARGET_X86_64
2099 if (s
->aflag
== 2) {
2100 gen_op_addq_A0_seg(override
);
2104 gen_op_addl_A0_seg(s
, override
);
2111 disp
= lduw_code(s
->pc
);
2113 gen_op_movl_A0_im(disp
);
2114 rm
= 0; /* avoid SS override */
2121 disp
= (int8_t)ldub_code(s
->pc
++);
2125 disp
= lduw_code(s
->pc
);
2131 gen_op_movl_A0_reg(R_EBX
);
2132 gen_op_addl_A0_reg_sN(0, R_ESI
);
2135 gen_op_movl_A0_reg(R_EBX
);
2136 gen_op_addl_A0_reg_sN(0, R_EDI
);
2139 gen_op_movl_A0_reg(R_EBP
);
2140 gen_op_addl_A0_reg_sN(0, R_ESI
);
2143 gen_op_movl_A0_reg(R_EBP
);
2144 gen_op_addl_A0_reg_sN(0, R_EDI
);
2147 gen_op_movl_A0_reg(R_ESI
);
2150 gen_op_movl_A0_reg(R_EDI
);
2153 gen_op_movl_A0_reg(R_EBP
);
2157 gen_op_movl_A0_reg(R_EBX
);
2161 gen_op_addl_A0_im(disp
);
2162 gen_op_andl_A0_ffff();
2166 if (rm
== 2 || rm
== 3 || rm
== 6)
2171 gen_op_addl_A0_seg(s
, override
);
2181 static void gen_nop_modrm(DisasContext
*s
, int modrm
)
2183 int mod
, rm
, base
, code
;
2185 mod
= (modrm
>> 6) & 3;
2195 code
= ldub_code(s
->pc
++);
2231 /* used for LEA and MOV AX, mem */
2232 static void gen_add_A0_ds_seg(DisasContext
*s
)
2234 int override
, must_add_seg
;
2235 must_add_seg
= s
->addseg
;
2237 if (s
->override
>= 0) {
2238 override
= s
->override
;
2242 #ifdef TARGET_X86_64
2244 gen_op_addq_A0_seg(override
);
2248 gen_op_addl_A0_seg(s
, override
);
2253 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2255 static void gen_ldst_modrm(DisasContext
*s
, int modrm
, int ot
, int reg
, int is_store
)
2257 int mod
, rm
, opreg
, disp
;
2259 mod
= (modrm
>> 6) & 3;
2260 rm
= (modrm
& 7) | REX_B(s
);
2264 gen_op_mov_TN_reg(ot
, 0, reg
);
2265 gen_op_mov_reg_T0(ot
, rm
);
2267 gen_op_mov_TN_reg(ot
, 0, rm
);
2269 gen_op_mov_reg_T0(ot
, reg
);
2272 gen_lea_modrm(s
, modrm
, &opreg
, &disp
);
2275 gen_op_mov_TN_reg(ot
, 0, reg
);
2276 gen_op_st_T0_A0(ot
+ s
->mem_index
);
2278 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
2280 gen_op_mov_reg_T0(ot
, reg
);
2285 static inline uint32_t insn_get(DisasContext
*s
, int ot
)
2291 ret
= ldub_code(s
->pc
);
2295 ret
= lduw_code(s
->pc
);
2300 ret
= ldl_code(s
->pc
);
2307 static inline int insn_const_size(unsigned int ot
)
2315 static inline void gen_goto_tb(DisasContext
*s
, int tb_num
, target_ulong eip
)
2317 TranslationBlock
*tb
;
2320 pc
= s
->cs_base
+ eip
;
2322 /* NOTE: we handle the case where the TB spans two pages here */
2323 if ((pc
& TARGET_PAGE_MASK
) == (tb
->pc
& TARGET_PAGE_MASK
) ||
2324 (pc
& TARGET_PAGE_MASK
) == ((s
->pc
- 1) & TARGET_PAGE_MASK
)) {
2325 /* jump to same page: we can use a direct jump */
2326 tcg_gen_goto_tb(tb_num
);
2328 tcg_gen_exit_tb((tcg_target_long
)tb
+ tb_num
);
2330 /* jump to another page: currently not optimized */
2336 static inline void gen_jcc(DisasContext
*s
, int b
,
2337 target_ulong val
, target_ulong next_eip
)
2342 gen_update_cc_op(s
);
2344 l1
= gen_new_label();
2345 gen_jcc1(s
, cc_op
, b
, l1
);
2347 gen_goto_tb(s
, 0, next_eip
);
2350 gen_goto_tb(s
, 1, val
);
2351 s
->is_jmp
= DISAS_TB_JUMP
;
2354 l1
= gen_new_label();
2355 l2
= gen_new_label();
2356 gen_jcc1(s
, cc_op
, b
, l1
);
2358 gen_jmp_im(next_eip
);
2368 static void gen_setcc(DisasContext
*s
, int b
)
2370 int inv
, jcc_op
, l1
;
2373 if (is_fast_jcc_case(s
, b
)) {
2374 /* nominal case: we use a jump */
2375 /* XXX: make it faster by adding new instructions in TCG */
2376 t0
= tcg_temp_local_new();
2377 tcg_gen_movi_tl(t0
, 0);
2378 l1
= gen_new_label();
2379 gen_jcc1(s
, s
->cc_op
, b
^ 1, l1
);
2380 tcg_gen_movi_tl(t0
, 1);
2382 tcg_gen_mov_tl(cpu_T
[0], t0
);
2385 /* slow case: it is more efficient not to generate a jump,
2386 although it is questionnable whether this optimization is
2389 jcc_op
= (b
>> 1) & 7;
2390 gen_setcc_slow_T0(s
, jcc_op
);
2392 tcg_gen_xori_tl(cpu_T
[0], cpu_T
[0], 1);
2397 static inline void gen_op_movl_T0_seg(int seg_reg
)
2399 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
,
2400 offsetof(CPUX86State
,segs
[seg_reg
].selector
));
2403 static inline void gen_op_movl_seg_T0_vm(int seg_reg
)
2405 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 0xffff);
2406 tcg_gen_st32_tl(cpu_T
[0], cpu_env
,
2407 offsetof(CPUX86State
,segs
[seg_reg
].selector
));
2408 tcg_gen_shli_tl(cpu_T
[0], cpu_T
[0], 4);
2409 tcg_gen_st_tl(cpu_T
[0], cpu_env
,
2410 offsetof(CPUX86State
,segs
[seg_reg
].base
));
2413 /* move T0 to seg_reg and compute if the CPU state may change. Never
2414 call this function with seg_reg == R_CS */
2415 static void gen_movl_seg_T0(DisasContext
*s
, int seg_reg
, target_ulong cur_eip
)
2417 if (s
->pe
&& !s
->vm86
) {
2418 /* XXX: optimize by finding processor state dynamically */
2419 if (s
->cc_op
!= CC_OP_DYNAMIC
)
2420 gen_op_set_cc_op(s
->cc_op
);
2421 gen_jmp_im(cur_eip
);
2422 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
2423 gen_helper_load_seg(tcg_const_i32(seg_reg
), cpu_tmp2_i32
);
2424 /* abort translation because the addseg value may change or
2425 because ss32 may change. For R_SS, translation must always
2426 stop as a special handling must be done to disable hardware
2427 interrupts for the next instruction */
2428 if (seg_reg
== R_SS
|| (s
->code32
&& seg_reg
< R_FS
))
2429 s
->is_jmp
= DISAS_TB_JUMP
;
2431 gen_op_movl_seg_T0_vm(seg_reg
);
2432 if (seg_reg
== R_SS
)
2433 s
->is_jmp
= DISAS_TB_JUMP
;
2437 static inline int svm_is_rep(int prefixes
)
2439 return ((prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) ? 8 : 0);
2443 gen_svm_check_intercept_param(DisasContext
*s
, target_ulong pc_start
,
2444 uint32_t type
, uint64_t param
)
2446 /* no SVM activated; fast case */
2447 if (likely(!(s
->flags
& HF_SVMI_MASK
)))
2449 if (s
->cc_op
!= CC_OP_DYNAMIC
)
2450 gen_op_set_cc_op(s
->cc_op
);
2451 gen_jmp_im(pc_start
- s
->cs_base
);
2452 gen_helper_svm_check_intercept_param(tcg_const_i32(type
),
2453 tcg_const_i64(param
));
2457 gen_svm_check_intercept(DisasContext
*s
, target_ulong pc_start
, uint64_t type
)
2459 gen_svm_check_intercept_param(s
, pc_start
, type
, 0);
2462 static inline void gen_stack_update(DisasContext
*s
, int addend
)
2464 #ifdef TARGET_X86_64
2466 gen_op_add_reg_im(2, R_ESP
, addend
);
2470 gen_op_add_reg_im(1, R_ESP
, addend
);
2472 gen_op_add_reg_im(0, R_ESP
, addend
);
2476 /* generate a push. It depends on ss32, addseg and dflag */
2477 static void gen_push_T0(DisasContext
*s
)
2479 #ifdef TARGET_X86_64
2481 gen_op_movq_A0_reg(R_ESP
);
2483 gen_op_addq_A0_im(-8);
2484 gen_op_st_T0_A0(OT_QUAD
+ s
->mem_index
);
2486 gen_op_addq_A0_im(-2);
2487 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
2489 gen_op_mov_reg_A0(2, R_ESP
);
2493 gen_op_movl_A0_reg(R_ESP
);
2495 gen_op_addl_A0_im(-2);
2497 gen_op_addl_A0_im(-4);
2500 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2501 gen_op_addl_A0_seg(s
, R_SS
);
2504 gen_op_andl_A0_ffff();
2505 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2506 gen_op_addl_A0_seg(s
, R_SS
);
2508 gen_op_st_T0_A0(s
->dflag
+ 1 + s
->mem_index
);
2509 if (s
->ss32
&& !s
->addseg
)
2510 gen_op_mov_reg_A0(1, R_ESP
);
2512 gen_op_mov_reg_T1(s
->ss32
+ 1, R_ESP
);
2516 /* generate a push. It depends on ss32, addseg and dflag */
2517 /* slower version for T1, only used for call Ev */
2518 static void gen_push_T1(DisasContext
*s
)
2520 #ifdef TARGET_X86_64
2522 gen_op_movq_A0_reg(R_ESP
);
2524 gen_op_addq_A0_im(-8);
2525 gen_op_st_T1_A0(OT_QUAD
+ s
->mem_index
);
2527 gen_op_addq_A0_im(-2);
2528 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
2530 gen_op_mov_reg_A0(2, R_ESP
);
2534 gen_op_movl_A0_reg(R_ESP
);
2536 gen_op_addl_A0_im(-2);
2538 gen_op_addl_A0_im(-4);
2541 gen_op_addl_A0_seg(s
, R_SS
);
2544 gen_op_andl_A0_ffff();
2545 gen_op_addl_A0_seg(s
, R_SS
);
2547 gen_op_st_T1_A0(s
->dflag
+ 1 + s
->mem_index
);
2549 if (s
->ss32
&& !s
->addseg
)
2550 gen_op_mov_reg_A0(1, R_ESP
);
2552 gen_stack_update(s
, (-2) << s
->dflag
);
2556 /* two step pop is necessary for precise exceptions */
2557 static void gen_pop_T0(DisasContext
*s
)
2559 #ifdef TARGET_X86_64
2561 gen_op_movq_A0_reg(R_ESP
);
2562 gen_op_ld_T0_A0((s
->dflag
? OT_QUAD
: OT_WORD
) + s
->mem_index
);
2566 gen_op_movl_A0_reg(R_ESP
);
2569 gen_op_addl_A0_seg(s
, R_SS
);
2571 gen_op_andl_A0_ffff();
2572 gen_op_addl_A0_seg(s
, R_SS
);
2574 gen_op_ld_T0_A0(s
->dflag
+ 1 + s
->mem_index
);
2578 static void gen_pop_update(DisasContext
*s
)
2580 #ifdef TARGET_X86_64
2581 if (CODE64(s
) && s
->dflag
) {
2582 gen_stack_update(s
, 8);
2586 gen_stack_update(s
, 2 << s
->dflag
);
2590 static void gen_stack_A0(DisasContext
*s
)
2592 gen_op_movl_A0_reg(R_ESP
);
2594 gen_op_andl_A0_ffff();
2595 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2597 gen_op_addl_A0_seg(s
, R_SS
);
2600 /* NOTE: wrap around in 16 bit not fully handled */
2601 static void gen_pusha(DisasContext
*s
)
2604 gen_op_movl_A0_reg(R_ESP
);
2605 gen_op_addl_A0_im(-16 << s
->dflag
);
2607 gen_op_andl_A0_ffff();
2608 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2610 gen_op_addl_A0_seg(s
, R_SS
);
2611 for(i
= 0;i
< 8; i
++) {
2612 gen_op_mov_TN_reg(OT_LONG
, 0, 7 - i
);
2613 gen_op_st_T0_A0(OT_WORD
+ s
->dflag
+ s
->mem_index
);
2614 gen_op_addl_A0_im(2 << s
->dflag
);
2616 gen_op_mov_reg_T1(OT_WORD
+ s
->ss32
, R_ESP
);
2619 /* NOTE: wrap around in 16 bit not fully handled */
2620 static void gen_popa(DisasContext
*s
)
2623 gen_op_movl_A0_reg(R_ESP
);
2625 gen_op_andl_A0_ffff();
2626 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2627 tcg_gen_addi_tl(cpu_T
[1], cpu_T
[1], 16 << s
->dflag
);
2629 gen_op_addl_A0_seg(s
, R_SS
);
2630 for(i
= 0;i
< 8; i
++) {
2631 /* ESP is not reloaded */
2633 gen_op_ld_T0_A0(OT_WORD
+ s
->dflag
+ s
->mem_index
);
2634 gen_op_mov_reg_T0(OT_WORD
+ s
->dflag
, 7 - i
);
2636 gen_op_addl_A0_im(2 << s
->dflag
);
2638 gen_op_mov_reg_T1(OT_WORD
+ s
->ss32
, R_ESP
);
2641 static void gen_enter(DisasContext
*s
, int esp_addend
, int level
)
2646 #ifdef TARGET_X86_64
2648 ot
= s
->dflag
? OT_QUAD
: OT_WORD
;
2651 gen_op_movl_A0_reg(R_ESP
);
2652 gen_op_addq_A0_im(-opsize
);
2653 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2656 gen_op_mov_TN_reg(OT_LONG
, 0, R_EBP
);
2657 gen_op_st_T0_A0(ot
+ s
->mem_index
);
2659 /* XXX: must save state */
2660 gen_helper_enter64_level(tcg_const_i32(level
),
2661 tcg_const_i32((ot
== OT_QUAD
)),
2664 gen_op_mov_reg_T1(ot
, R_EBP
);
2665 tcg_gen_addi_tl(cpu_T
[1], cpu_T
[1], -esp_addend
+ (-opsize
* level
));
2666 gen_op_mov_reg_T1(OT_QUAD
, R_ESP
);
2670 ot
= s
->dflag
+ OT_WORD
;
2671 opsize
= 2 << s
->dflag
;
2673 gen_op_movl_A0_reg(R_ESP
);
2674 gen_op_addl_A0_im(-opsize
);
2676 gen_op_andl_A0_ffff();
2677 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2679 gen_op_addl_A0_seg(s
, R_SS
);
2681 gen_op_mov_TN_reg(OT_LONG
, 0, R_EBP
);
2682 gen_op_st_T0_A0(ot
+ s
->mem_index
);
2684 /* XXX: must save state */
2685 gen_helper_enter_level(tcg_const_i32(level
),
2686 tcg_const_i32(s
->dflag
),
2689 gen_op_mov_reg_T1(ot
, R_EBP
);
2690 tcg_gen_addi_tl(cpu_T
[1], cpu_T
[1], -esp_addend
+ (-opsize
* level
));
2691 gen_op_mov_reg_T1(OT_WORD
+ s
->ss32
, R_ESP
);
2695 static void gen_exception(DisasContext
*s
, int trapno
, target_ulong cur_eip
)
2697 if (s
->cc_op
!= CC_OP_DYNAMIC
)
2698 gen_op_set_cc_op(s
->cc_op
);
2699 gen_jmp_im(cur_eip
);
2700 gen_helper_raise_exception(cpu_env
, tcg_const_i32(trapno
));
2701 s
->is_jmp
= DISAS_TB_JUMP
;
2704 /* an interrupt is different from an exception because of the
2706 static void gen_interrupt(DisasContext
*s
, int intno
,
2707 target_ulong cur_eip
, target_ulong next_eip
)
2709 if (s
->cc_op
!= CC_OP_DYNAMIC
)
2710 gen_op_set_cc_op(s
->cc_op
);
2711 gen_jmp_im(cur_eip
);
2712 gen_helper_raise_interrupt(cpu_env
, tcg_const_i32(intno
),
2713 tcg_const_i32(next_eip
- cur_eip
));
2714 s
->is_jmp
= DISAS_TB_JUMP
;
2717 static void gen_debug(DisasContext
*s
, target_ulong cur_eip
)
2719 if (s
->cc_op
!= CC_OP_DYNAMIC
)
2720 gen_op_set_cc_op(s
->cc_op
);
2721 gen_jmp_im(cur_eip
);
2723 s
->is_jmp
= DISAS_TB_JUMP
;
2726 /* generate a generic end of block. Trace exception is also generated
2728 static void gen_eob(DisasContext
*s
)
2730 if (s
->cc_op
!= CC_OP_DYNAMIC
)
2731 gen_op_set_cc_op(s
->cc_op
);
2732 if (s
->tb
->flags
& HF_INHIBIT_IRQ_MASK
) {
2733 gen_helper_reset_inhibit_irq(cpu_env
);
2735 if (s
->tb
->flags
& HF_RF_MASK
) {
2736 gen_helper_reset_rf(cpu_env
);
2738 if (s
->singlestep_enabled
) {
2741 gen_helper_single_step();
2745 s
->is_jmp
= DISAS_TB_JUMP
;
2748 /* generate a jump to eip. No segment change must happen before as a
2749 direct call to the next block may occur */
2750 static void gen_jmp_tb(DisasContext
*s
, target_ulong eip
, int tb_num
)
2753 gen_update_cc_op(s
);
2754 gen_goto_tb(s
, tb_num
, eip
);
2755 s
->is_jmp
= DISAS_TB_JUMP
;
2762 static void gen_jmp(DisasContext
*s
, target_ulong eip
)
2764 gen_jmp_tb(s
, eip
, 0);
2767 static inline void gen_ldq_env_A0(int idx
, int offset
)
2769 int mem_index
= (idx
>> 2) - 1;
2770 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
, mem_index
);
2771 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, offset
);
2774 static inline void gen_stq_env_A0(int idx
, int offset
)
2776 int mem_index
= (idx
>> 2) - 1;
2777 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, offset
);
2778 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
, mem_index
);
2781 static inline void gen_ldo_env_A0(int idx
, int offset
)
2783 int mem_index
= (idx
>> 2) - 1;
2784 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
, mem_index
);
2785 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(0)));
2786 tcg_gen_addi_tl(cpu_tmp0
, cpu_A0
, 8);
2787 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_tmp0
, mem_index
);
2788 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(1)));
2791 static inline void gen_sto_env_A0(int idx
, int offset
)
2793 int mem_index
= (idx
>> 2) - 1;
2794 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(0)));
2795 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
, mem_index
);
2796 tcg_gen_addi_tl(cpu_tmp0
, cpu_A0
, 8);
2797 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(1)));
2798 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_tmp0
, mem_index
);
2801 static inline void gen_op_movo(int d_offset
, int s_offset
)
2803 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, s_offset
);
2804 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
);
2805 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, s_offset
+ 8);
2806 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
+ 8);
2809 static inline void gen_op_movq(int d_offset
, int s_offset
)
2811 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, s_offset
);
2812 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
);
2815 static inline void gen_op_movl(int d_offset
, int s_offset
)
2817 tcg_gen_ld_i32(cpu_tmp2_i32
, cpu_env
, s_offset
);
2818 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, d_offset
);
2821 static inline void gen_op_movq_env_0(int d_offset
)
2823 tcg_gen_movi_i64(cpu_tmp1_i64
, 0);
2824 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
);
2827 typedef void (*SSEFunc_i_ep
)(TCGv_i32 val
, TCGv_ptr env
, TCGv_ptr reg
);
2828 typedef void (*SSEFunc_l_ep
)(TCGv_i64 val
, TCGv_ptr env
, TCGv_ptr reg
);
2829 typedef void (*SSEFunc_0_epi
)(TCGv_ptr env
, TCGv_ptr reg
, TCGv_i32 val
);
2830 typedef void (*SSEFunc_0_epl
)(TCGv_ptr env
, TCGv_ptr reg
, TCGv_i64 val
);
2831 typedef void (*SSEFunc_0_epp
)(TCGv_ptr env
, TCGv_ptr reg_a
, TCGv_ptr reg_b
);
2832 typedef void (*SSEFunc_0_eppi
)(TCGv_ptr env
, TCGv_ptr reg_a
, TCGv_ptr reg_b
,
2834 typedef void (*SSEFunc_0_ppi
)(TCGv_ptr reg_a
, TCGv_ptr reg_b
, TCGv_i32 val
);
2835 typedef void (*SSEFunc_0_eppt
)(TCGv_ptr env
, TCGv_ptr reg_a
, TCGv_ptr reg_b
,
2838 #define SSE_SPECIAL ((void *)1)
2839 #define SSE_DUMMY ((void *)2)
2841 #define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
2842 #define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
2843 gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
2845 static const SSEFunc_0_epp sse_op_table1
[256][4] = {
2846 /* 3DNow! extensions */
2847 [0x0e] = { SSE_DUMMY
}, /* femms */
2848 [0x0f] = { SSE_DUMMY
}, /* pf... */
2849 /* pure SSE operations */
2850 [0x10] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movups, movupd, movss, movsd */
2851 [0x11] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movups, movupd, movss, movsd */
2852 [0x12] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movlps, movlpd, movsldup, movddup */
2853 [0x13] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movlps, movlpd */
2854 [0x14] = { gen_helper_punpckldq_xmm
, gen_helper_punpcklqdq_xmm
},
2855 [0x15] = { gen_helper_punpckhdq_xmm
, gen_helper_punpckhqdq_xmm
},
2856 [0x16] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movhps, movhpd, movshdup */
2857 [0x17] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movhps, movhpd */
2859 [0x28] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movaps, movapd */
2860 [0x29] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movaps, movapd */
2861 [0x2a] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
2862 [0x2b] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movntps, movntpd, movntss, movntsd */
2863 [0x2c] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
2864 [0x2d] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
2865 [0x2e] = { gen_helper_ucomiss
, gen_helper_ucomisd
},
2866 [0x2f] = { gen_helper_comiss
, gen_helper_comisd
},
2867 [0x50] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movmskps, movmskpd */
2868 [0x51] = SSE_FOP(sqrt
),
2869 [0x52] = { gen_helper_rsqrtps
, NULL
, gen_helper_rsqrtss
, NULL
},
2870 [0x53] = { gen_helper_rcpps
, NULL
, gen_helper_rcpss
, NULL
},
2871 [0x54] = { gen_helper_pand_xmm
, gen_helper_pand_xmm
}, /* andps, andpd */
2872 [0x55] = { gen_helper_pandn_xmm
, gen_helper_pandn_xmm
}, /* andnps, andnpd */
2873 [0x56] = { gen_helper_por_xmm
, gen_helper_por_xmm
}, /* orps, orpd */
2874 [0x57] = { gen_helper_pxor_xmm
, gen_helper_pxor_xmm
}, /* xorps, xorpd */
2875 [0x58] = SSE_FOP(add
),
2876 [0x59] = SSE_FOP(mul
),
2877 [0x5a] = { gen_helper_cvtps2pd
, gen_helper_cvtpd2ps
,
2878 gen_helper_cvtss2sd
, gen_helper_cvtsd2ss
},
2879 [0x5b] = { gen_helper_cvtdq2ps
, gen_helper_cvtps2dq
, gen_helper_cvttps2dq
},
2880 [0x5c] = SSE_FOP(sub
),
2881 [0x5d] = SSE_FOP(min
),
2882 [0x5e] = SSE_FOP(div
),
2883 [0x5f] = SSE_FOP(max
),
2885 [0xc2] = SSE_FOP(cmpeq
),
2886 [0xc6] = { (SSEFunc_0_epp
)gen_helper_shufps
,
2887 (SSEFunc_0_epp
)gen_helper_shufpd
}, /* XXX: casts */
2889 [0x38] = { SSE_SPECIAL
, SSE_SPECIAL
, NULL
, SSE_SPECIAL
}, /* SSSE3/SSE4 */
2890 [0x3a] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* SSSE3/SSE4 */
2892 /* MMX ops and their SSE extensions */
2893 [0x60] = MMX_OP2(punpcklbw
),
2894 [0x61] = MMX_OP2(punpcklwd
),
2895 [0x62] = MMX_OP2(punpckldq
),
2896 [0x63] = MMX_OP2(packsswb
),
2897 [0x64] = MMX_OP2(pcmpgtb
),
2898 [0x65] = MMX_OP2(pcmpgtw
),
2899 [0x66] = MMX_OP2(pcmpgtl
),
2900 [0x67] = MMX_OP2(packuswb
),
2901 [0x68] = MMX_OP2(punpckhbw
),
2902 [0x69] = MMX_OP2(punpckhwd
),
2903 [0x6a] = MMX_OP2(punpckhdq
),
2904 [0x6b] = MMX_OP2(packssdw
),
2905 [0x6c] = { NULL
, gen_helper_punpcklqdq_xmm
},
2906 [0x6d] = { NULL
, gen_helper_punpckhqdq_xmm
},
2907 [0x6e] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movd mm, ea */
2908 [0x6f] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movq, movdqa, , movqdu */
2909 [0x70] = { (SSEFunc_0_epp
)gen_helper_pshufw_mmx
,
2910 (SSEFunc_0_epp
)gen_helper_pshufd_xmm
,
2911 (SSEFunc_0_epp
)gen_helper_pshufhw_xmm
,
2912 (SSEFunc_0_epp
)gen_helper_pshuflw_xmm
}, /* XXX: casts */
2913 [0x71] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftw */
2914 [0x72] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftd */
2915 [0x73] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftq */
2916 [0x74] = MMX_OP2(pcmpeqb
),
2917 [0x75] = MMX_OP2(pcmpeqw
),
2918 [0x76] = MMX_OP2(pcmpeql
),
2919 [0x77] = { SSE_DUMMY
}, /* emms */
2920 [0x78] = { NULL
, SSE_SPECIAL
, NULL
, SSE_SPECIAL
}, /* extrq_i, insertq_i */
2921 [0x79] = { NULL
, gen_helper_extrq_r
, NULL
, gen_helper_insertq_r
},
2922 [0x7c] = { NULL
, gen_helper_haddpd
, NULL
, gen_helper_haddps
},
2923 [0x7d] = { NULL
, gen_helper_hsubpd
, NULL
, gen_helper_hsubps
},
2924 [0x7e] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movd, movd, , movq */
2925 [0x7f] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movq, movdqa, movdqu */
2926 [0xc4] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pinsrw */
2927 [0xc5] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pextrw */
2928 [0xd0] = { NULL
, gen_helper_addsubpd
, NULL
, gen_helper_addsubps
},
2929 [0xd1] = MMX_OP2(psrlw
),
2930 [0xd2] = MMX_OP2(psrld
),
2931 [0xd3] = MMX_OP2(psrlq
),
2932 [0xd4] = MMX_OP2(paddq
),
2933 [0xd5] = MMX_OP2(pmullw
),
2934 [0xd6] = { NULL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
},
2935 [0xd7] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pmovmskb */
2936 [0xd8] = MMX_OP2(psubusb
),
2937 [0xd9] = MMX_OP2(psubusw
),
2938 [0xda] = MMX_OP2(pminub
),
2939 [0xdb] = MMX_OP2(pand
),
2940 [0xdc] = MMX_OP2(paddusb
),
2941 [0xdd] = MMX_OP2(paddusw
),
2942 [0xde] = MMX_OP2(pmaxub
),
2943 [0xdf] = MMX_OP2(pandn
),
2944 [0xe0] = MMX_OP2(pavgb
),
2945 [0xe1] = MMX_OP2(psraw
),
2946 [0xe2] = MMX_OP2(psrad
),
2947 [0xe3] = MMX_OP2(pavgw
),
2948 [0xe4] = MMX_OP2(pmulhuw
),
2949 [0xe5] = MMX_OP2(pmulhw
),
2950 [0xe6] = { NULL
, gen_helper_cvttpd2dq
, gen_helper_cvtdq2pd
, gen_helper_cvtpd2dq
},
2951 [0xe7] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movntq, movntq */
2952 [0xe8] = MMX_OP2(psubsb
),
2953 [0xe9] = MMX_OP2(psubsw
),
2954 [0xea] = MMX_OP2(pminsw
),
2955 [0xeb] = MMX_OP2(por
),
2956 [0xec] = MMX_OP2(paddsb
),
2957 [0xed] = MMX_OP2(paddsw
),
2958 [0xee] = MMX_OP2(pmaxsw
),
2959 [0xef] = MMX_OP2(pxor
),
2960 [0xf0] = { NULL
, NULL
, NULL
, SSE_SPECIAL
}, /* lddqu */
2961 [0xf1] = MMX_OP2(psllw
),
2962 [0xf2] = MMX_OP2(pslld
),
2963 [0xf3] = MMX_OP2(psllq
),
2964 [0xf4] = MMX_OP2(pmuludq
),
2965 [0xf5] = MMX_OP2(pmaddwd
),
2966 [0xf6] = MMX_OP2(psadbw
),
2967 [0xf7] = { (SSEFunc_0_epp
)gen_helper_maskmov_mmx
,
2968 (SSEFunc_0_epp
)gen_helper_maskmov_xmm
}, /* XXX: casts */
2969 [0xf8] = MMX_OP2(psubb
),
2970 [0xf9] = MMX_OP2(psubw
),
2971 [0xfa] = MMX_OP2(psubl
),
2972 [0xfb] = MMX_OP2(psubq
),
2973 [0xfc] = MMX_OP2(paddb
),
2974 [0xfd] = MMX_OP2(paddw
),
2975 [0xfe] = MMX_OP2(paddl
),
2978 static const SSEFunc_0_epp sse_op_table2
[3 * 8][2] = {
2979 [0 + 2] = MMX_OP2(psrlw
),
2980 [0 + 4] = MMX_OP2(psraw
),
2981 [0 + 6] = MMX_OP2(psllw
),
2982 [8 + 2] = MMX_OP2(psrld
),
2983 [8 + 4] = MMX_OP2(psrad
),
2984 [8 + 6] = MMX_OP2(pslld
),
2985 [16 + 2] = MMX_OP2(psrlq
),
2986 [16 + 3] = { NULL
, gen_helper_psrldq_xmm
},
2987 [16 + 6] = MMX_OP2(psllq
),
2988 [16 + 7] = { NULL
, gen_helper_pslldq_xmm
},
2991 static const SSEFunc_0_epi sse_op_table3ai
[] = {
2992 gen_helper_cvtsi2ss
,
2996 #ifdef TARGET_X86_64
2997 static const SSEFunc_0_epl sse_op_table3aq
[] = {
2998 gen_helper_cvtsq2ss
,
3003 static const SSEFunc_i_ep sse_op_table3bi
[] = {
3004 gen_helper_cvttss2si
,
3005 gen_helper_cvtss2si
,
3006 gen_helper_cvttsd2si
,
3010 #ifdef TARGET_X86_64
3011 static const SSEFunc_l_ep sse_op_table3bq
[] = {
3012 gen_helper_cvttss2sq
,
3013 gen_helper_cvtss2sq
,
3014 gen_helper_cvttsd2sq
,
3019 static const SSEFunc_0_epp sse_op_table4
[8][4] = {
3030 static const SSEFunc_0_epp sse_op_table5
[256] = {
3031 [0x0c] = gen_helper_pi2fw
,
3032 [0x0d] = gen_helper_pi2fd
,
3033 [0x1c] = gen_helper_pf2iw
,
3034 [0x1d] = gen_helper_pf2id
,
3035 [0x8a] = gen_helper_pfnacc
,
3036 [0x8e] = gen_helper_pfpnacc
,
3037 [0x90] = gen_helper_pfcmpge
,
3038 [0x94] = gen_helper_pfmin
,
3039 [0x96] = gen_helper_pfrcp
,
3040 [0x97] = gen_helper_pfrsqrt
,
3041 [0x9a] = gen_helper_pfsub
,
3042 [0x9e] = gen_helper_pfadd
,
3043 [0xa0] = gen_helper_pfcmpgt
,
3044 [0xa4] = gen_helper_pfmax
,
3045 [0xa6] = gen_helper_movq
, /* pfrcpit1; no need to actually increase precision */
3046 [0xa7] = gen_helper_movq
, /* pfrsqit1 */
3047 [0xaa] = gen_helper_pfsubr
,
3048 [0xae] = gen_helper_pfacc
,
3049 [0xb0] = gen_helper_pfcmpeq
,
3050 [0xb4] = gen_helper_pfmul
,
3051 [0xb6] = gen_helper_movq
, /* pfrcpit2 */
3052 [0xb7] = gen_helper_pmulhrw_mmx
,
3053 [0xbb] = gen_helper_pswapd
,
3054 [0xbf] = gen_helper_pavgb_mmx
/* pavgusb */
3057 struct SSEOpHelper_epp
{
3058 SSEFunc_0_epp op
[2];
3062 struct SSEOpHelper_eppi
{
3063 SSEFunc_0_eppi op
[2];
3067 #define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
3068 #define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
3069 #define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
3070 #define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
3072 static const struct SSEOpHelper_epp sse_op_table6
[256] = {
3073 [0x00] = SSSE3_OP(pshufb
),
3074 [0x01] = SSSE3_OP(phaddw
),
3075 [0x02] = SSSE3_OP(phaddd
),
3076 [0x03] = SSSE3_OP(phaddsw
),
3077 [0x04] = SSSE3_OP(pmaddubsw
),
3078 [0x05] = SSSE3_OP(phsubw
),
3079 [0x06] = SSSE3_OP(phsubd
),
3080 [0x07] = SSSE3_OP(phsubsw
),
3081 [0x08] = SSSE3_OP(psignb
),
3082 [0x09] = SSSE3_OP(psignw
),
3083 [0x0a] = SSSE3_OP(psignd
),
3084 [0x0b] = SSSE3_OP(pmulhrsw
),
3085 [0x10] = SSE41_OP(pblendvb
),
3086 [0x14] = SSE41_OP(blendvps
),
3087 [0x15] = SSE41_OP(blendvpd
),
3088 [0x17] = SSE41_OP(ptest
),
3089 [0x1c] = SSSE3_OP(pabsb
),
3090 [0x1d] = SSSE3_OP(pabsw
),
3091 [0x1e] = SSSE3_OP(pabsd
),
3092 [0x20] = SSE41_OP(pmovsxbw
),
3093 [0x21] = SSE41_OP(pmovsxbd
),
3094 [0x22] = SSE41_OP(pmovsxbq
),
3095 [0x23] = SSE41_OP(pmovsxwd
),
3096 [0x24] = SSE41_OP(pmovsxwq
),
3097 [0x25] = SSE41_OP(pmovsxdq
),
3098 [0x28] = SSE41_OP(pmuldq
),
3099 [0x29] = SSE41_OP(pcmpeqq
),
3100 [0x2a] = SSE41_SPECIAL
, /* movntqda */
3101 [0x2b] = SSE41_OP(packusdw
),
3102 [0x30] = SSE41_OP(pmovzxbw
),
3103 [0x31] = SSE41_OP(pmovzxbd
),
3104 [0x32] = SSE41_OP(pmovzxbq
),
3105 [0x33] = SSE41_OP(pmovzxwd
),
3106 [0x34] = SSE41_OP(pmovzxwq
),
3107 [0x35] = SSE41_OP(pmovzxdq
),
3108 [0x37] = SSE42_OP(pcmpgtq
),
3109 [0x38] = SSE41_OP(pminsb
),
3110 [0x39] = SSE41_OP(pminsd
),
3111 [0x3a] = SSE41_OP(pminuw
),
3112 [0x3b] = SSE41_OP(pminud
),
3113 [0x3c] = SSE41_OP(pmaxsb
),
3114 [0x3d] = SSE41_OP(pmaxsd
),
3115 [0x3e] = SSE41_OP(pmaxuw
),
3116 [0x3f] = SSE41_OP(pmaxud
),
3117 [0x40] = SSE41_OP(pmulld
),
3118 [0x41] = SSE41_OP(phminposuw
),
3121 static const struct SSEOpHelper_eppi sse_op_table7
[256] = {
3122 [0x08] = SSE41_OP(roundps
),
3123 [0x09] = SSE41_OP(roundpd
),
3124 [0x0a] = SSE41_OP(roundss
),
3125 [0x0b] = SSE41_OP(roundsd
),
3126 [0x0c] = SSE41_OP(blendps
),
3127 [0x0d] = SSE41_OP(blendpd
),
3128 [0x0e] = SSE41_OP(pblendw
),
3129 [0x0f] = SSSE3_OP(palignr
),
3130 [0x14] = SSE41_SPECIAL
, /* pextrb */
3131 [0x15] = SSE41_SPECIAL
, /* pextrw */
3132 [0x16] = SSE41_SPECIAL
, /* pextrd/pextrq */
3133 [0x17] = SSE41_SPECIAL
, /* extractps */
3134 [0x20] = SSE41_SPECIAL
, /* pinsrb */
3135 [0x21] = SSE41_SPECIAL
, /* insertps */
3136 [0x22] = SSE41_SPECIAL
, /* pinsrd/pinsrq */
3137 [0x40] = SSE41_OP(dpps
),
3138 [0x41] = SSE41_OP(dppd
),
3139 [0x42] = SSE41_OP(mpsadbw
),
3140 [0x60] = SSE42_OP(pcmpestrm
),
3141 [0x61] = SSE42_OP(pcmpestri
),
3142 [0x62] = SSE42_OP(pcmpistrm
),
3143 [0x63] = SSE42_OP(pcmpistri
),
3146 static void gen_sse(DisasContext
*s
, int b
, target_ulong pc_start
, int rex_r
)
3148 int b1
, op1_offset
, op2_offset
, is_xmm
, val
, ot
;
3149 int modrm
, mod
, rm
, reg
, reg_addr
, offset_addr
;
3150 SSEFunc_0_epp sse_fn_epp
;
3151 SSEFunc_0_eppi sse_fn_eppi
;
3152 SSEFunc_0_ppi sse_fn_ppi
;
3153 SSEFunc_0_eppt sse_fn_eppt
;
3156 if (s
->prefix
& PREFIX_DATA
)
3158 else if (s
->prefix
& PREFIX_REPZ
)
3160 else if (s
->prefix
& PREFIX_REPNZ
)
3164 sse_fn_epp
= sse_op_table1
[b
][b1
];
3168 if ((b
<= 0x5f && b
>= 0x10) || b
== 0xc6 || b
== 0xc2) {
3178 /* simple MMX/SSE operation */
3179 if (s
->flags
& HF_TS_MASK
) {
3180 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
3183 if (s
->flags
& HF_EM_MASK
) {
3185 gen_exception(s
, EXCP06_ILLOP
, pc_start
- s
->cs_base
);
3188 if (is_xmm
&& !(s
->flags
& HF_OSFXSR_MASK
))
3189 if ((b
!= 0x38 && b
!= 0x3a) || (s
->prefix
& PREFIX_DATA
))
3192 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_3DNOW
))
3195 gen_helper_emms(cpu_env
);
3200 gen_helper_emms(cpu_env
);
3203 /* prepare MMX state (XXX: optimize by storing fptt and fptags in
3204 the static cpu state) */
3206 gen_helper_enter_mmx(cpu_env
);
3209 modrm
= ldub_code(s
->pc
++);
3210 reg
= ((modrm
>> 3) & 7);
3213 mod
= (modrm
>> 6) & 3;
3214 if (sse_fn_epp
== SSE_SPECIAL
) {
3217 case 0x0e7: /* movntq */
3220 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3221 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3223 case 0x1e7: /* movntdq */
3224 case 0x02b: /* movntps */
3225 case 0x12b: /* movntps */
3228 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3229 gen_sto_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3231 case 0x3f0: /* lddqu */
3234 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3235 gen_ldo_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3237 case 0x22b: /* movntss */
3238 case 0x32b: /* movntsd */
3241 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3243 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,
3246 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,
3247 xmm_regs
[reg
].XMM_L(0)));
3248 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
3251 case 0x6e: /* movd mm, ea */
3252 #ifdef TARGET_X86_64
3253 if (s
->dflag
== 2) {
3254 gen_ldst_modrm(s
, modrm
, OT_QUAD
, OR_TMP0
, 0);
3255 tcg_gen_st_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3259 gen_ldst_modrm(s
, modrm
, OT_LONG
, OR_TMP0
, 0);
3260 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3261 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3262 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
3263 gen_helper_movl_mm_T0_mmx(cpu_ptr0
, cpu_tmp2_i32
);
3266 case 0x16e: /* movd xmm, ea */
3267 #ifdef TARGET_X86_64
3268 if (s
->dflag
== 2) {
3269 gen_ldst_modrm(s
, modrm
, OT_QUAD
, OR_TMP0
, 0);
3270 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3271 offsetof(CPUX86State
,xmm_regs
[reg
]));
3272 gen_helper_movq_mm_T0_xmm(cpu_ptr0
, cpu_T
[0]);
3276 gen_ldst_modrm(s
, modrm
, OT_LONG
, OR_TMP0
, 0);
3277 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3278 offsetof(CPUX86State
,xmm_regs
[reg
]));
3279 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
3280 gen_helper_movl_mm_T0_xmm(cpu_ptr0
, cpu_tmp2_i32
);
3283 case 0x6f: /* movq mm, ea */
3285 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3286 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3289 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
,
3290 offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3291 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
,
3292 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3295 case 0x010: /* movups */
3296 case 0x110: /* movupd */
3297 case 0x028: /* movaps */
3298 case 0x128: /* movapd */
3299 case 0x16f: /* movdqa xmm, ea */
3300 case 0x26f: /* movdqu xmm, ea */
3302 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3303 gen_ldo_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3305 rm
= (modrm
& 7) | REX_B(s
);
3306 gen_op_movo(offsetof(CPUX86State
,xmm_regs
[reg
]),
3307 offsetof(CPUX86State
,xmm_regs
[rm
]));
3310 case 0x210: /* movss xmm, ea */
3312 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3313 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
3314 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3316 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(1)));
3317 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)));
3318 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)));
3320 rm
= (modrm
& 7) | REX_B(s
);
3321 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)),
3322 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(0)));
3325 case 0x310: /* movsd xmm, ea */
3327 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3328 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3330 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)));
3331 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)));
3333 rm
= (modrm
& 7) | REX_B(s
);
3334 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3335 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3338 case 0x012: /* movlps */
3339 case 0x112: /* movlpd */
3341 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3342 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3345 rm
= (modrm
& 7) | REX_B(s
);
3346 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3347 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(1)));
3350 case 0x212: /* movsldup */
3352 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3353 gen_ldo_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3355 rm
= (modrm
& 7) | REX_B(s
);
3356 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)),
3357 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(0)));
3358 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)),
3359 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(2)));
3361 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(1)),
3362 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3363 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)),
3364 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)));
3366 case 0x312: /* movddup */
3368 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3369 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3371 rm
= (modrm
& 7) | REX_B(s
);
3372 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3373 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3375 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)),
3376 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3378 case 0x016: /* movhps */
3379 case 0x116: /* movhpd */
3381 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3382 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)));
3385 rm
= (modrm
& 7) | REX_B(s
);
3386 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)),
3387 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3390 case 0x216: /* movshdup */
3392 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3393 gen_ldo_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3395 rm
= (modrm
& 7) | REX_B(s
);
3396 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(1)),
3397 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(1)));
3398 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)),
3399 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(3)));
3401 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)),
3402 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(1)));
3403 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)),
3404 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)));
3409 int bit_index
, field_length
;
3411 if (b1
== 1 && reg
!= 0)
3413 field_length
= ldub_code(s
->pc
++) & 0x3F;
3414 bit_index
= ldub_code(s
->pc
++) & 0x3F;
3415 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3416 offsetof(CPUX86State
,xmm_regs
[reg
]));
3418 gen_helper_extrq_i(cpu_env
, cpu_ptr0
,
3419 tcg_const_i32(bit_index
),
3420 tcg_const_i32(field_length
));
3422 gen_helper_insertq_i(cpu_env
, cpu_ptr0
,
3423 tcg_const_i32(bit_index
),
3424 tcg_const_i32(field_length
));
3427 case 0x7e: /* movd ea, mm */
3428 #ifdef TARGET_X86_64
3429 if (s
->dflag
== 2) {
3430 tcg_gen_ld_i64(cpu_T
[0], cpu_env
,
3431 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3432 gen_ldst_modrm(s
, modrm
, OT_QUAD
, OR_TMP0
, 1);
3436 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
,
3437 offsetof(CPUX86State
,fpregs
[reg
].mmx
.MMX_L(0)));
3438 gen_ldst_modrm(s
, modrm
, OT_LONG
, OR_TMP0
, 1);
3441 case 0x17e: /* movd ea, xmm */
3442 #ifdef TARGET_X86_64
3443 if (s
->dflag
== 2) {
3444 tcg_gen_ld_i64(cpu_T
[0], cpu_env
,
3445 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3446 gen_ldst_modrm(s
, modrm
, OT_QUAD
, OR_TMP0
, 1);
3450 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
,
3451 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3452 gen_ldst_modrm(s
, modrm
, OT_LONG
, OR_TMP0
, 1);
3455 case 0x27e: /* movq xmm, ea */
3457 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3458 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3460 rm
= (modrm
& 7) | REX_B(s
);
3461 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3462 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3464 gen_op_movq_env_0(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)));
3466 case 0x7f: /* movq ea, mm */
3468 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3469 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3472 gen_op_movq(offsetof(CPUX86State
,fpregs
[rm
].mmx
),
3473 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3476 case 0x011: /* movups */
3477 case 0x111: /* movupd */
3478 case 0x029: /* movaps */
3479 case 0x129: /* movapd */
3480 case 0x17f: /* movdqa ea, xmm */
3481 case 0x27f: /* movdqu ea, xmm */
3483 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3484 gen_sto_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3486 rm
= (modrm
& 7) | REX_B(s
);
3487 gen_op_movo(offsetof(CPUX86State
,xmm_regs
[rm
]),
3488 offsetof(CPUX86State
,xmm_regs
[reg
]));
3491 case 0x211: /* movss ea, xmm */
3493 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3494 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3495 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
3497 rm
= (modrm
& 7) | REX_B(s
);
3498 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(0)),
3499 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3502 case 0x311: /* movsd ea, xmm */
3504 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3505 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3507 rm
= (modrm
& 7) | REX_B(s
);
3508 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)),
3509 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3512 case 0x013: /* movlps */
3513 case 0x113: /* movlpd */
3515 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3516 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3521 case 0x017: /* movhps */
3522 case 0x117: /* movhpd */
3524 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3525 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)));
3530 case 0x71: /* shift mm, im */
3533 case 0x171: /* shift xmm, im */
3539 val
= ldub_code(s
->pc
++);
3541 gen_op_movl_T0_im(val
);
3542 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_t0
.XMM_L(0)));
3544 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_t0
.XMM_L(1)));
3545 op1_offset
= offsetof(CPUX86State
,xmm_t0
);
3547 gen_op_movl_T0_im(val
);
3548 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,mmx_t0
.MMX_L(0)));
3550 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,mmx_t0
.MMX_L(1)));
3551 op1_offset
= offsetof(CPUX86State
,mmx_t0
);
3553 sse_fn_epp
= sse_op_table2
[((b
- 1) & 3) * 8 +
3554 (((modrm
>> 3)) & 7)][b1
];
3559 rm
= (modrm
& 7) | REX_B(s
);
3560 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3563 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3565 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op2_offset
);
3566 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op1_offset
);
3567 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3569 case 0x050: /* movmskps */
3570 rm
= (modrm
& 7) | REX_B(s
);
3571 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3572 offsetof(CPUX86State
,xmm_regs
[rm
]));
3573 gen_helper_movmskps(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3574 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
3575 gen_op_mov_reg_T0(OT_LONG
, reg
);
3577 case 0x150: /* movmskpd */
3578 rm
= (modrm
& 7) | REX_B(s
);
3579 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3580 offsetof(CPUX86State
,xmm_regs
[rm
]));
3581 gen_helper_movmskpd(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3582 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
3583 gen_op_mov_reg_T0(OT_LONG
, reg
);
3585 case 0x02a: /* cvtpi2ps */
3586 case 0x12a: /* cvtpi2pd */
3587 gen_helper_enter_mmx(cpu_env
);
3589 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3590 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
3591 gen_ldq_env_A0(s
->mem_index
, op2_offset
);
3594 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3596 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3597 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3598 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
3601 gen_helper_cvtpi2ps(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3605 gen_helper_cvtpi2pd(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3609 case 0x22a: /* cvtsi2ss */
3610 case 0x32a: /* cvtsi2sd */
3611 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
3612 gen_ldst_modrm(s
, modrm
, ot
, OR_TMP0
, 0);
3613 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3614 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3615 if (ot
== OT_LONG
) {
3616 SSEFunc_0_epi sse_fn_epi
= sse_op_table3ai
[(b
>> 8) & 1];
3617 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
3618 sse_fn_epi(cpu_env
, cpu_ptr0
, cpu_tmp2_i32
);
3620 #ifdef TARGET_X86_64
3621 SSEFunc_0_epl sse_fn_epl
= sse_op_table3aq
[(b
>> 8) & 1];
3622 sse_fn_epl(cpu_env
, cpu_ptr0
, cpu_T
[0]);
3628 case 0x02c: /* cvttps2pi */
3629 case 0x12c: /* cvttpd2pi */
3630 case 0x02d: /* cvtps2pi */
3631 case 0x12d: /* cvtpd2pi */
3632 gen_helper_enter_mmx(cpu_env
);
3634 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3635 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3636 gen_ldo_env_A0(s
->mem_index
, op2_offset
);
3638 rm
= (modrm
& 7) | REX_B(s
);
3639 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3641 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
& 7].mmx
);
3642 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3643 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
3646 gen_helper_cvttps2pi(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3649 gen_helper_cvttpd2pi(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3652 gen_helper_cvtps2pi(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3655 gen_helper_cvtpd2pi(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3659 case 0x22c: /* cvttss2si */
3660 case 0x32c: /* cvttsd2si */
3661 case 0x22d: /* cvtss2si */
3662 case 0x32d: /* cvtsd2si */
3663 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
3665 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3667 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_t0
.XMM_Q(0)));
3669 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
3670 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_t0
.XMM_L(0)));
3672 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3674 rm
= (modrm
& 7) | REX_B(s
);
3675 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3677 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op2_offset
);
3678 if (ot
== OT_LONG
) {
3679 SSEFunc_i_ep sse_fn_i_ep
=
3680 sse_op_table3bi
[((b
>> 7) & 2) | (b
& 1)];
3681 sse_fn_i_ep(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3682 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
3684 #ifdef TARGET_X86_64
3685 SSEFunc_l_ep sse_fn_l_ep
=
3686 sse_op_table3bq
[((b
>> 7) & 2) | (b
& 1)];
3687 sse_fn_l_ep(cpu_T
[0], cpu_env
, cpu_ptr0
);
3692 gen_op_mov_reg_T0(ot
, reg
);
3694 case 0xc4: /* pinsrw */
3697 gen_ldst_modrm(s
, modrm
, OT_WORD
, OR_TMP0
, 0);
3698 val
= ldub_code(s
->pc
++);
3701 tcg_gen_st16_tl(cpu_T
[0], cpu_env
,
3702 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_W(val
)));
3705 tcg_gen_st16_tl(cpu_T
[0], cpu_env
,
3706 offsetof(CPUX86State
,fpregs
[reg
].mmx
.MMX_W(val
)));
3709 case 0xc5: /* pextrw */
3713 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
3714 val
= ldub_code(s
->pc
++);
3717 rm
= (modrm
& 7) | REX_B(s
);
3718 tcg_gen_ld16u_tl(cpu_T
[0], cpu_env
,
3719 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_W(val
)));
3723 tcg_gen_ld16u_tl(cpu_T
[0], cpu_env
,
3724 offsetof(CPUX86State
,fpregs
[rm
].mmx
.MMX_W(val
)));
3726 reg
= ((modrm
>> 3) & 7) | rex_r
;
3727 gen_op_mov_reg_T0(ot
, reg
);
3729 case 0x1d6: /* movq ea, xmm */
3731 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3732 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3734 rm
= (modrm
& 7) | REX_B(s
);
3735 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)),
3736 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3737 gen_op_movq_env_0(offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(1)));
3740 case 0x2d6: /* movq2dq */
3741 gen_helper_enter_mmx(cpu_env
);
3743 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3744 offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3745 gen_op_movq_env_0(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)));
3747 case 0x3d6: /* movdq2q */
3748 gen_helper_enter_mmx(cpu_env
);
3749 rm
= (modrm
& 7) | REX_B(s
);
3750 gen_op_movq(offsetof(CPUX86State
,fpregs
[reg
& 7].mmx
),
3751 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3753 case 0xd7: /* pmovmskb */
3758 rm
= (modrm
& 7) | REX_B(s
);
3759 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, offsetof(CPUX86State
,xmm_regs
[rm
]));
3760 gen_helper_pmovmskb_xmm(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3763 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3764 gen_helper_pmovmskb_mmx(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3766 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
3767 reg
= ((modrm
>> 3) & 7) | rex_r
;
3768 gen_op_mov_reg_T0(OT_LONG
, reg
);
3771 if (s
->prefix
& PREFIX_REPNZ
)
3775 modrm
= ldub_code(s
->pc
++);
3777 reg
= ((modrm
>> 3) & 7) | rex_r
;
3778 mod
= (modrm
>> 6) & 3;
3783 sse_fn_epp
= sse_op_table6
[b
].op
[b1
];
3787 if (!(s
->cpuid_ext_features
& sse_op_table6
[b
].ext_mask
))
3791 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3793 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
| REX_B(s
)]);
3795 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3796 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3798 case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
3799 case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
3800 case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
3801 gen_ldq_env_A0(s
->mem_index
, op2_offset
+
3802 offsetof(XMMReg
, XMM_Q(0)));
3804 case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
3805 case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
3806 tcg_gen_qemu_ld32u(cpu_tmp0
, cpu_A0
,
3807 (s
->mem_index
>> 2) - 1);
3808 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_tmp0
);
3809 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, op2_offset
+
3810 offsetof(XMMReg
, XMM_L(0)));
3812 case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
3813 tcg_gen_qemu_ld16u(cpu_tmp0
, cpu_A0
,
3814 (s
->mem_index
>> 2) - 1);
3815 tcg_gen_st16_tl(cpu_tmp0
, cpu_env
, op2_offset
+
3816 offsetof(XMMReg
, XMM_W(0)));
3818 case 0x2a: /* movntqda */
3819 gen_ldo_env_A0(s
->mem_index
, op1_offset
);
3822 gen_ldo_env_A0(s
->mem_index
, op2_offset
);
3826 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
3828 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3830 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
3831 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3832 gen_ldq_env_A0(s
->mem_index
, op2_offset
);
3835 if (sse_fn_epp
== SSE_SPECIAL
) {
3839 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3840 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
3841 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3844 s
->cc_op
= CC_OP_EFLAGS
;
3846 case 0x338: /* crc32 */
3849 modrm
= ldub_code(s
->pc
++);
3850 reg
= ((modrm
>> 3) & 7) | rex_r
;
3852 if (b
!= 0xf0 && b
!= 0xf1)
3854 if (!(s
->cpuid_ext_features
& CPUID_EXT_SSE42
))
3859 else if (b
== 0xf1 && s
->dflag
!= 2)
3860 if (s
->prefix
& PREFIX_DATA
)
3867 gen_op_mov_TN_reg(OT_LONG
, 0, reg
);
3868 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
3869 gen_ldst_modrm(s
, modrm
, ot
, OR_TMP0
, 0);
3870 gen_helper_crc32(cpu_T
[0], cpu_tmp2_i32
,
3871 cpu_T
[0], tcg_const_i32(8 << ot
));
3873 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
3874 gen_op_mov_reg_T0(ot
, reg
);
3879 modrm
= ldub_code(s
->pc
++);
3881 reg
= ((modrm
>> 3) & 7) | rex_r
;
3882 mod
= (modrm
>> 6) & 3;
3887 sse_fn_eppi
= sse_op_table7
[b
].op
[b1
];
3891 if (!(s
->cpuid_ext_features
& sse_op_table7
[b
].ext_mask
))
3894 if (sse_fn_eppi
== SSE_SPECIAL
) {
3895 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
3896 rm
= (modrm
& 7) | REX_B(s
);
3898 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3899 reg
= ((modrm
>> 3) & 7) | rex_r
;
3900 val
= ldub_code(s
->pc
++);
3902 case 0x14: /* pextrb */
3903 tcg_gen_ld8u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,
3904 xmm_regs
[reg
].XMM_B(val
& 15)));
3906 gen_op_mov_reg_T0(ot
, rm
);
3908 tcg_gen_qemu_st8(cpu_T
[0], cpu_A0
,
3909 (s
->mem_index
>> 2) - 1);
3911 case 0x15: /* pextrw */
3912 tcg_gen_ld16u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,
3913 xmm_regs
[reg
].XMM_W(val
& 7)));
3915 gen_op_mov_reg_T0(ot
, rm
);
3917 tcg_gen_qemu_st16(cpu_T
[0], cpu_A0
,
3918 (s
->mem_index
>> 2) - 1);
3921 if (ot
== OT_LONG
) { /* pextrd */
3922 tcg_gen_ld_i32(cpu_tmp2_i32
, cpu_env
,
3923 offsetof(CPUX86State
,
3924 xmm_regs
[reg
].XMM_L(val
& 3)));
3925 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
3927 gen_op_mov_reg_v(ot
, rm
, cpu_T
[0]);
3929 tcg_gen_qemu_st32(cpu_T
[0], cpu_A0
,
3930 (s
->mem_index
>> 2) - 1);
3931 } else { /* pextrq */
3932 #ifdef TARGET_X86_64
3933 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
,
3934 offsetof(CPUX86State
,
3935 xmm_regs
[reg
].XMM_Q(val
& 1)));
3937 gen_op_mov_reg_v(ot
, rm
, cpu_tmp1_i64
);
3939 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
,
3940 (s
->mem_index
>> 2) - 1);
3946 case 0x17: /* extractps */
3947 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,
3948 xmm_regs
[reg
].XMM_L(val
& 3)));
3950 gen_op_mov_reg_T0(ot
, rm
);
3952 tcg_gen_qemu_st32(cpu_T
[0], cpu_A0
,
3953 (s
->mem_index
>> 2) - 1);
3955 case 0x20: /* pinsrb */
3957 gen_op_mov_TN_reg(OT_LONG
, 0, rm
);
3959 tcg_gen_qemu_ld8u(cpu_tmp0
, cpu_A0
,
3960 (s
->mem_index
>> 2) - 1);
3961 tcg_gen_st8_tl(cpu_tmp0
, cpu_env
, offsetof(CPUX86State
,
3962 xmm_regs
[reg
].XMM_B(val
& 15)));
3964 case 0x21: /* insertps */
3966 tcg_gen_ld_i32(cpu_tmp2_i32
, cpu_env
,
3967 offsetof(CPUX86State
,xmm_regs
[rm
]
3968 .XMM_L((val
>> 6) & 3)));
3970 tcg_gen_qemu_ld32u(cpu_tmp0
, cpu_A0
,
3971 (s
->mem_index
>> 2) - 1);
3972 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_tmp0
);
3974 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
,
3975 offsetof(CPUX86State
,xmm_regs
[reg
]
3976 .XMM_L((val
>> 4) & 3)));
3978 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3979 cpu_env
, offsetof(CPUX86State
,
3980 xmm_regs
[reg
].XMM_L(0)));
3982 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3983 cpu_env
, offsetof(CPUX86State
,
3984 xmm_regs
[reg
].XMM_L(1)));
3986 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3987 cpu_env
, offsetof(CPUX86State
,
3988 xmm_regs
[reg
].XMM_L(2)));
3990 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3991 cpu_env
, offsetof(CPUX86State
,
3992 xmm_regs
[reg
].XMM_L(3)));
3995 if (ot
== OT_LONG
) { /* pinsrd */
3997 gen_op_mov_v_reg(ot
, cpu_tmp0
, rm
);
3999 tcg_gen_qemu_ld32u(cpu_tmp0
, cpu_A0
,
4000 (s
->mem_index
>> 2) - 1);
4001 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_tmp0
);
4002 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
,
4003 offsetof(CPUX86State
,
4004 xmm_regs
[reg
].XMM_L(val
& 3)));
4005 } else { /* pinsrq */
4006 #ifdef TARGET_X86_64
4008 gen_op_mov_v_reg(ot
, cpu_tmp1_i64
, rm
);
4010 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
,
4011 (s
->mem_index
>> 2) - 1);
4012 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
,
4013 offsetof(CPUX86State
,
4014 xmm_regs
[reg
].XMM_Q(val
& 1)));
4025 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
4027 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
| REX_B(s
)]);
4029 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
4030 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
4031 gen_ldo_env_A0(s
->mem_index
, op2_offset
);
4034 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
4036 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
4038 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
4039 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
4040 gen_ldq_env_A0(s
->mem_index
, op2_offset
);
4043 val
= ldub_code(s
->pc
++);
4045 if ((b
& 0xfc) == 0x60) { /* pcmpXstrX */
4046 s
->cc_op
= CC_OP_EFLAGS
;
4049 /* The helper must use entire 64-bit gp registers */
4053 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4054 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4055 sse_fn_eppi(cpu_env
, cpu_ptr0
, cpu_ptr1
, tcg_const_i32(val
));
4061 /* generic MMX or SSE operation */
4063 case 0x70: /* pshufx insn */
4064 case 0xc6: /* pshufx insn */
4065 case 0xc2: /* compare insns */
4072 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
4074 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
4075 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
4076 if (b1
>= 2 && ((b
>= 0x50 && b
<= 0x5f && b
!= 0x5b) ||
4078 /* specific case for SSE single instructions */
4081 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
4082 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_t0
.XMM_L(0)));
4085 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_t0
.XMM_D(0)));
4088 gen_ldo_env_A0(s
->mem_index
, op2_offset
);
4091 rm
= (modrm
& 7) | REX_B(s
);
4092 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
4095 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
4097 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
4098 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
4099 gen_ldq_env_A0(s
->mem_index
, op2_offset
);
4102 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
4106 case 0x0f: /* 3DNow! data insns */
4107 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_3DNOW
))
4109 val
= ldub_code(s
->pc
++);
4110 sse_fn_epp
= sse_op_table5
[val
];
4114 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4115 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4116 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
4118 case 0x70: /* pshufx insn */
4119 case 0xc6: /* pshufx insn */
4120 val
= ldub_code(s
->pc
++);
4121 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4122 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4123 /* XXX: introduce a new table? */
4124 sse_fn_ppi
= (SSEFunc_0_ppi
)sse_fn_epp
;
4125 sse_fn_ppi(cpu_ptr0
, cpu_ptr1
, tcg_const_i32(val
));
4129 val
= ldub_code(s
->pc
++);
4132 sse_fn_epp
= sse_op_table4
[val
][b1
];
4134 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4135 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4136 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
4139 /* maskmov : we must prepare A0 */
4142 #ifdef TARGET_X86_64
4143 if (s
->aflag
== 2) {
4144 gen_op_movq_A0_reg(R_EDI
);
4148 gen_op_movl_A0_reg(R_EDI
);
4150 gen_op_andl_A0_ffff();
4152 gen_add_A0_ds_seg(s
);
4154 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4155 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4156 /* XXX: introduce a new table? */
4157 sse_fn_eppt
= (SSEFunc_0_eppt
)sse_fn_epp
;
4158 sse_fn_eppt(cpu_env
, cpu_ptr0
, cpu_ptr1
, cpu_A0
);
4161 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4162 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4163 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
4166 if (b
== 0x2e || b
== 0x2f) {
4167 s
->cc_op
= CC_OP_EFLAGS
;
4172 /* convert one instruction. s->is_jmp is set if the translation must
4173 be stopped. Return the next pc value */
4174 static target_ulong
disas_insn(DisasContext
*s
, target_ulong pc_start
)
4176 int b
, prefixes
, aflag
, dflag
;
4178 int modrm
, reg
, rm
, mod
, reg_addr
, op
, opreg
, offset_addr
, val
;
4179 target_ulong next_eip
, tval
;
4182 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
)))
4183 tcg_gen_debug_insn_start(pc_start
);
4191 #ifdef TARGET_X86_64
4196 s
->rip_offset
= 0; /* for relative ip address */
4198 b
= ldub_code(s
->pc
);
4200 /* check prefixes */
4201 #ifdef TARGET_X86_64
4205 prefixes
|= PREFIX_REPZ
;
4208 prefixes
|= PREFIX_REPNZ
;
4211 prefixes
|= PREFIX_LOCK
;
4232 prefixes
|= PREFIX_DATA
;
4235 prefixes
|= PREFIX_ADR
;
4239 rex_w
= (b
>> 3) & 1;
4240 rex_r
= (b
& 0x4) << 1;
4241 s
->rex_x
= (b
& 0x2) << 2;
4242 REX_B(s
) = (b
& 0x1) << 3;
4243 x86_64_hregs
= 1; /* select uniform byte register addressing */
4247 /* 0x66 is ignored if rex.w is set */
4250 if (prefixes
& PREFIX_DATA
)
4253 if (!(prefixes
& PREFIX_ADR
))
4260 prefixes
|= PREFIX_REPZ
;
4263 prefixes
|= PREFIX_REPNZ
;
4266 prefixes
|= PREFIX_LOCK
;
4287 prefixes
|= PREFIX_DATA
;
4290 prefixes
|= PREFIX_ADR
;
4293 if (prefixes
& PREFIX_DATA
)
4295 if (prefixes
& PREFIX_ADR
)
4299 s
->prefix
= prefixes
;
4303 /* lock generation */
4304 if (prefixes
& PREFIX_LOCK
)
4307 /* now check op code */
4311 /**************************/
4312 /* extended op code */
4313 b
= ldub_code(s
->pc
++) | 0x100;
4316 /**************************/
4334 ot
= dflag
+ OT_WORD
;
4337 case 0: /* OP Ev, Gv */
4338 modrm
= ldub_code(s
->pc
++);
4339 reg
= ((modrm
>> 3) & 7) | rex_r
;
4340 mod
= (modrm
>> 6) & 3;
4341 rm
= (modrm
& 7) | REX_B(s
);
4343 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
4345 } else if (op
== OP_XORL
&& rm
== reg
) {
4347 /* xor reg, reg optimisation */
4349 s
->cc_op
= CC_OP_LOGICB
+ ot
;
4350 gen_op_mov_reg_T0(ot
, reg
);
4351 gen_op_update1_cc();
4356 gen_op_mov_TN_reg(ot
, 1, reg
);
4357 gen_op(s
, op
, ot
, opreg
);
4359 case 1: /* OP Gv, Ev */
4360 modrm
= ldub_code(s
->pc
++);
4361 mod
= (modrm
>> 6) & 3;
4362 reg
= ((modrm
>> 3) & 7) | rex_r
;
4363 rm
= (modrm
& 7) | REX_B(s
);
4365 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
4366 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
4367 } else if (op
== OP_XORL
&& rm
== reg
) {
4370 gen_op_mov_TN_reg(ot
, 1, rm
);
4372 gen_op(s
, op
, ot
, reg
);
4374 case 2: /* OP A, Iv */
4375 val
= insn_get(s
, ot
);
4376 gen_op_movl_T1_im(val
);
4377 gen_op(s
, op
, ot
, OR_EAX
);
4386 case 0x80: /* GRP1 */
4395 ot
= dflag
+ OT_WORD
;
4397 modrm
= ldub_code(s
->pc
++);
4398 mod
= (modrm
>> 6) & 3;
4399 rm
= (modrm
& 7) | REX_B(s
);
4400 op
= (modrm
>> 3) & 7;
4406 s
->rip_offset
= insn_const_size(ot
);
4407 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
4418 val
= insn_get(s
, ot
);
4421 val
= (int8_t)insn_get(s
, OT_BYTE
);
4424 gen_op_movl_T1_im(val
);
4425 gen_op(s
, op
, ot
, opreg
);
4429 /**************************/
4430 /* inc, dec, and other misc arith */
4431 case 0x40 ... 0x47: /* inc Gv */
4432 ot
= dflag
? OT_LONG
: OT_WORD
;
4433 gen_inc(s
, ot
, OR_EAX
+ (b
& 7), 1);
4435 case 0x48 ... 0x4f: /* dec Gv */
4436 ot
= dflag
? OT_LONG
: OT_WORD
;
4437 gen_inc(s
, ot
, OR_EAX
+ (b
& 7), -1);
4439 case 0xf6: /* GRP3 */
4444 ot
= dflag
+ OT_WORD
;
4446 modrm
= ldub_code(s
->pc
++);
4447 mod
= (modrm
>> 6) & 3;
4448 rm
= (modrm
& 7) | REX_B(s
);
4449 op
= (modrm
>> 3) & 7;
4452 s
->rip_offset
= insn_const_size(ot
);
4453 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
4454 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
4456 gen_op_mov_TN_reg(ot
, 0, rm
);
4461 val
= insn_get(s
, ot
);
4462 gen_op_movl_T1_im(val
);
4463 gen_op_testl_T0_T1_cc();
4464 s
->cc_op
= CC_OP_LOGICB
+ ot
;
4467 tcg_gen_not_tl(cpu_T
[0], cpu_T
[0]);
4469 gen_op_st_T0_A0(ot
+ s
->mem_index
);
4471 gen_op_mov_reg_T0(ot
, rm
);
4475 tcg_gen_neg_tl(cpu_T
[0], cpu_T
[0]);
4477 gen_op_st_T0_A0(ot
+ s
->mem_index
);
4479 gen_op_mov_reg_T0(ot
, rm
);
4481 gen_op_update_neg_cc();
4482 s
->cc_op
= CC_OP_SUBB
+ ot
;
4487 gen_op_mov_TN_reg(OT_BYTE
, 1, R_EAX
);
4488 tcg_gen_ext8u_tl(cpu_T
[0], cpu_T
[0]);
4489 tcg_gen_ext8u_tl(cpu_T
[1], cpu_T
[1]);
4490 /* XXX: use 32 bit mul which could be faster */
4491 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4492 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
4493 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4494 tcg_gen_andi_tl(cpu_cc_src
, cpu_T
[0], 0xff00);
4495 s
->cc_op
= CC_OP_MULB
;
4498 gen_op_mov_TN_reg(OT_WORD
, 1, R_EAX
);
4499 tcg_gen_ext16u_tl(cpu_T
[0], cpu_T
[0]);
4500 tcg_gen_ext16u_tl(cpu_T
[1], cpu_T
[1]);
4501 /* XXX: use 32 bit mul which could be faster */
4502 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4503 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
4504 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4505 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 16);
4506 gen_op_mov_reg_T0(OT_WORD
, R_EDX
);
4507 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[0]);
4508 s
->cc_op
= CC_OP_MULW
;
4512 #ifdef TARGET_X86_64
4513 gen_op_mov_TN_reg(OT_LONG
, 1, R_EAX
);
4514 tcg_gen_ext32u_tl(cpu_T
[0], cpu_T
[0]);
4515 tcg_gen_ext32u_tl(cpu_T
[1], cpu_T
[1]);
4516 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4517 gen_op_mov_reg_T0(OT_LONG
, R_EAX
);
4518 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4519 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 32);
4520 gen_op_mov_reg_T0(OT_LONG
, R_EDX
);
4521 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[0]);
4525 t0
= tcg_temp_new_i64();
4526 t1
= tcg_temp_new_i64();
4527 gen_op_mov_TN_reg(OT_LONG
, 1, R_EAX
);
4528 tcg_gen_extu_i32_i64(t0
, cpu_T
[0]);
4529 tcg_gen_extu_i32_i64(t1
, cpu_T
[1]);
4530 tcg_gen_mul_i64(t0
, t0
, t1
);
4531 tcg_gen_trunc_i64_i32(cpu_T
[0], t0
);
4532 gen_op_mov_reg_T0(OT_LONG
, R_EAX
);
4533 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4534 tcg_gen_shri_i64(t0
, t0
, 32);
4535 tcg_gen_trunc_i64_i32(cpu_T
[0], t0
);
4536 gen_op_mov_reg_T0(OT_LONG
, R_EDX
);
4537 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[0]);
4540 s
->cc_op
= CC_OP_MULL
;
4542 #ifdef TARGET_X86_64
4544 gen_helper_mulq_EAX_T0(cpu_T
[0]);
4545 s
->cc_op
= CC_OP_MULQ
;
4553 gen_op_mov_TN_reg(OT_BYTE
, 1, R_EAX
);
4554 tcg_gen_ext8s_tl(cpu_T
[0], cpu_T
[0]);
4555 tcg_gen_ext8s_tl(cpu_T
[1], cpu_T
[1]);
4556 /* XXX: use 32 bit mul which could be faster */
4557 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4558 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
4559 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4560 tcg_gen_ext8s_tl(cpu_tmp0
, cpu_T
[0]);
4561 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
4562 s
->cc_op
= CC_OP_MULB
;
4565 gen_op_mov_TN_reg(OT_WORD
, 1, R_EAX
);
4566 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
4567 tcg_gen_ext16s_tl(cpu_T
[1], cpu_T
[1]);
4568 /* XXX: use 32 bit mul which could be faster */
4569 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4570 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
4571 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4572 tcg_gen_ext16s_tl(cpu_tmp0
, cpu_T
[0]);
4573 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
4574 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 16);
4575 gen_op_mov_reg_T0(OT_WORD
, R_EDX
);
4576 s
->cc_op
= CC_OP_MULW
;
4580 #ifdef TARGET_X86_64
4581 gen_op_mov_TN_reg(OT_LONG
, 1, R_EAX
);
4582 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
4583 tcg_gen_ext32s_tl(cpu_T
[1], cpu_T
[1]);
4584 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4585 gen_op_mov_reg_T0(OT_LONG
, R_EAX
);
4586 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4587 tcg_gen_ext32s_tl(cpu_tmp0
, cpu_T
[0]);
4588 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
4589 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 32);
4590 gen_op_mov_reg_T0(OT_LONG
, R_EDX
);
4594 t0
= tcg_temp_new_i64();
4595 t1
= tcg_temp_new_i64();
4596 gen_op_mov_TN_reg(OT_LONG
, 1, R_EAX
);
4597 tcg_gen_ext_i32_i64(t0
, cpu_T
[0]);
4598 tcg_gen_ext_i32_i64(t1
, cpu_T
[1]);
4599 tcg_gen_mul_i64(t0
, t0
, t1
);
4600 tcg_gen_trunc_i64_i32(cpu_T
[0], t0
);
4601 gen_op_mov_reg_T0(OT_LONG
, R_EAX
);
4602 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4603 tcg_gen_sari_tl(cpu_tmp0
, cpu_T
[0], 31);
4604 tcg_gen_shri_i64(t0
, t0
, 32);
4605 tcg_gen_trunc_i64_i32(cpu_T
[0], t0
);
4606 gen_op_mov_reg_T0(OT_LONG
, R_EDX
);
4607 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
4610 s
->cc_op
= CC_OP_MULL
;
4612 #ifdef TARGET_X86_64
4614 gen_helper_imulq_EAX_T0(cpu_T
[0]);
4615 s
->cc_op
= CC_OP_MULQ
;
4623 gen_jmp_im(pc_start
- s
->cs_base
);
4624 gen_helper_divb_AL(cpu_T
[0]);
4627 gen_jmp_im(pc_start
- s
->cs_base
);
4628 gen_helper_divw_AX(cpu_T
[0]);
4632 gen_jmp_im(pc_start
- s
->cs_base
);
4633 gen_helper_divl_EAX(cpu_T
[0]);
4635 #ifdef TARGET_X86_64
4637 gen_jmp_im(pc_start
- s
->cs_base
);
4638 gen_helper_divq_EAX(cpu_T
[0]);
4646 gen_jmp_im(pc_start
- s
->cs_base
);
4647 gen_helper_idivb_AL(cpu_T
[0]);
4650 gen_jmp_im(pc_start
- s
->cs_base
);
4651 gen_helper_idivw_AX(cpu_T
[0]);
4655 gen_jmp_im(pc_start
- s
->cs_base
);
4656 gen_helper_idivl_EAX(cpu_T
[0]);
4658 #ifdef TARGET_X86_64
4660 gen_jmp_im(pc_start
- s
->cs_base
);
4661 gen_helper_idivq_EAX(cpu_T
[0]);
4671 case 0xfe: /* GRP4 */
4672 case 0xff: /* GRP5 */
4676 ot
= dflag
+ OT_WORD
;
4678 modrm
= ldub_code(s
->pc
++);
4679 mod
= (modrm
>> 6) & 3;
4680 rm
= (modrm
& 7) | REX_B(s
);
4681 op
= (modrm
>> 3) & 7;
4682 if (op
>= 2 && b
== 0xfe) {
4686 if (op
== 2 || op
== 4) {
4687 /* operand size for jumps is 64 bit */
4689 } else if (op
== 3 || op
== 5) {
4690 ot
= dflag
? OT_LONG
+ (rex_w
== 1) : OT_WORD
;
4691 } else if (op
== 6) {
4692 /* default push size is 64 bit */
4693 ot
= dflag
? OT_QUAD
: OT_WORD
;
4697 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
4698 if (op
>= 2 && op
!= 3 && op
!= 5)
4699 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
4701 gen_op_mov_TN_reg(ot
, 0, rm
);
4705 case 0: /* inc Ev */
4710 gen_inc(s
, ot
, opreg
, 1);
4712 case 1: /* dec Ev */
4717 gen_inc(s
, ot
, opreg
, -1);
4719 case 2: /* call Ev */
4720 /* XXX: optimize if memory (no 'and' is necessary) */
4722 gen_op_andl_T0_ffff();
4723 next_eip
= s
->pc
- s
->cs_base
;
4724 gen_movtl_T1_im(next_eip
);
4729 case 3: /* lcall Ev */
4730 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
4731 gen_add_A0_im(s
, 1 << (ot
- OT_WORD
+ 1));
4732 gen_op_ldu_T0_A0(OT_WORD
+ s
->mem_index
);
4734 if (s
->pe
&& !s
->vm86
) {
4735 if (s
->cc_op
!= CC_OP_DYNAMIC
)
4736 gen_op_set_cc_op(s
->cc_op
);
4737 gen_jmp_im(pc_start
- s
->cs_base
);
4738 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
4739 gen_helper_lcall_protected(cpu_tmp2_i32
, cpu_T
[1],
4740 tcg_const_i32(dflag
),
4741 tcg_const_i32(s
->pc
- pc_start
));
4743 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
4744 gen_helper_lcall_real(cpu_tmp2_i32
, cpu_T
[1],
4745 tcg_const_i32(dflag
),
4746 tcg_const_i32(s
->pc
- s
->cs_base
));
4750 case 4: /* jmp Ev */
4752 gen_op_andl_T0_ffff();
4756 case 5: /* ljmp Ev */
4757 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
4758 gen_add_A0_im(s
, 1 << (ot
- OT_WORD
+ 1));
4759 gen_op_ldu_T0_A0(OT_WORD
+ s
->mem_index
);
4761 if (s
->pe
&& !s
->vm86
) {
4762 if (s
->cc_op
!= CC_OP_DYNAMIC
)
4763 gen_op_set_cc_op(s
->cc_op
);
4764 gen_jmp_im(pc_start
- s
->cs_base
);
4765 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
4766 gen_helper_ljmp_protected(cpu_tmp2_i32
, cpu_T
[1],
4767 tcg_const_i32(s
->pc
- pc_start
));
4769 gen_op_movl_seg_T0_vm(R_CS
);
4770 gen_op_movl_T0_T1();
4775 case 6: /* push Ev */
4783 case 0x84: /* test Ev, Gv */
4788 ot
= dflag
+ OT_WORD
;
4790 modrm
= ldub_code(s
->pc
++);
4791 reg
= ((modrm
>> 3) & 7) | rex_r
;
4793 gen_ldst_modrm(s
, modrm
, ot
, OR_TMP0
, 0);
4794 gen_op_mov_TN_reg(ot
, 1, reg
);
4795 gen_op_testl_T0_T1_cc();
4796 s
->cc_op
= CC_OP_LOGICB
+ ot
;
4799 case 0xa8: /* test eAX, Iv */
4804 ot
= dflag
+ OT_WORD
;
4805 val
= insn_get(s
, ot
);
4807 gen_op_mov_TN_reg(ot
, 0, OR_EAX
);
4808 gen_op_movl_T1_im(val
);
4809 gen_op_testl_T0_T1_cc();
4810 s
->cc_op
= CC_OP_LOGICB
+ ot
;
4813 case 0x98: /* CWDE/CBW */
4814 #ifdef TARGET_X86_64
4816 gen_op_mov_TN_reg(OT_LONG
, 0, R_EAX
);
4817 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
4818 gen_op_mov_reg_T0(OT_QUAD
, R_EAX
);
4822 gen_op_mov_TN_reg(OT_WORD
, 0, R_EAX
);
4823 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
4824 gen_op_mov_reg_T0(OT_LONG
, R_EAX
);
4826 gen_op_mov_TN_reg(OT_BYTE
, 0, R_EAX
);
4827 tcg_gen_ext8s_tl(cpu_T
[0], cpu_T
[0]);
4828 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
4831 case 0x99: /* CDQ/CWD */
4832 #ifdef TARGET_X86_64
4834 gen_op_mov_TN_reg(OT_QUAD
, 0, R_EAX
);
4835 tcg_gen_sari_tl(cpu_T
[0], cpu_T
[0], 63);
4836 gen_op_mov_reg_T0(OT_QUAD
, R_EDX
);
4840 gen_op_mov_TN_reg(OT_LONG
, 0, R_EAX
);
4841 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
4842 tcg_gen_sari_tl(cpu_T
[0], cpu_T
[0], 31);
4843 gen_op_mov_reg_T0(OT_LONG
, R_EDX
);
4845 gen_op_mov_TN_reg(OT_WORD
, 0, R_EAX
);
4846 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
4847 tcg_gen_sari_tl(cpu_T
[0], cpu_T
[0], 15);
4848 gen_op_mov_reg_T0(OT_WORD
, R_EDX
);
4851 case 0x1af: /* imul Gv, Ev */
4852 case 0x69: /* imul Gv, Ev, I */
4854 ot
= dflag
+ OT_WORD
;
4855 modrm
= ldub_code(s
->pc
++);
4856 reg
= ((modrm
>> 3) & 7) | rex_r
;
4858 s
->rip_offset
= insn_const_size(ot
);
4861 gen_ldst_modrm(s
, modrm
, ot
, OR_TMP0
, 0);
4863 val
= insn_get(s
, ot
);
4864 gen_op_movl_T1_im(val
);
4865 } else if (b
== 0x6b) {
4866 val
= (int8_t)insn_get(s
, OT_BYTE
);
4867 gen_op_movl_T1_im(val
);
4869 gen_op_mov_TN_reg(ot
, 1, reg
);
4872 #ifdef TARGET_X86_64
4873 if (ot
== OT_QUAD
) {
4874 gen_helper_imulq_T0_T1(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4877 if (ot
== OT_LONG
) {
4878 #ifdef TARGET_X86_64
4879 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
4880 tcg_gen_ext32s_tl(cpu_T
[1], cpu_T
[1]);
4881 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4882 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4883 tcg_gen_ext32s_tl(cpu_tmp0
, cpu_T
[0]);
4884 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
4888 t0
= tcg_temp_new_i64();
4889 t1
= tcg_temp_new_i64();
4890 tcg_gen_ext_i32_i64(t0
, cpu_T
[0]);
4891 tcg_gen_ext_i32_i64(t1
, cpu_T
[1]);
4892 tcg_gen_mul_i64(t0
, t0
, t1
);
4893 tcg_gen_trunc_i64_i32(cpu_T
[0], t0
);
4894 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4895 tcg_gen_sari_tl(cpu_tmp0
, cpu_T
[0], 31);
4896 tcg_gen_shri_i64(t0
, t0
, 32);
4897 tcg_gen_trunc_i64_i32(cpu_T
[1], t0
);
4898 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[1], cpu_tmp0
);
4902 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
4903 tcg_gen_ext16s_tl(cpu_T
[1], cpu_T
[1]);
4904 /* XXX: use 32 bit mul which could be faster */
4905 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4906 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4907 tcg_gen_ext16s_tl(cpu_tmp0
, cpu_T
[0]);
4908 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
4910 gen_op_mov_reg_T0(ot
, reg
);
4911 s
->cc_op
= CC_OP_MULB
+ ot
;
4914 case 0x1c1: /* xadd Ev, Gv */
4918 ot
= dflag
+ OT_WORD
;
4919 modrm
= ldub_code(s
->pc
++);
4920 reg
= ((modrm
>> 3) & 7) | rex_r
;
4921 mod
= (modrm
>> 6) & 3;
4923 rm
= (modrm
& 7) | REX_B(s
);
4924 gen_op_mov_TN_reg(ot
, 0, reg
);
4925 gen_op_mov_TN_reg(ot
, 1, rm
);
4926 gen_op_addl_T0_T1();
4927 gen_op_mov_reg_T1(ot
, reg
);
4928 gen_op_mov_reg_T0(ot
, rm
);
4930 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
4931 gen_op_mov_TN_reg(ot
, 0, reg
);
4932 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
4933 gen_op_addl_T0_T1();
4934 gen_op_st_T0_A0(ot
+ s
->mem_index
);
4935 gen_op_mov_reg_T1(ot
, reg
);
4937 gen_op_update2_cc();
4938 s
->cc_op
= CC_OP_ADDB
+ ot
;
4941 case 0x1b1: /* cmpxchg Ev, Gv */
4944 TCGv t0
, t1
, t2
, a0
;
4949 ot
= dflag
+ OT_WORD
;
4950 modrm
= ldub_code(s
->pc
++);
4951 reg
= ((modrm
>> 3) & 7) | rex_r
;
4952 mod
= (modrm
>> 6) & 3;
4953 t0
= tcg_temp_local_new();
4954 t1
= tcg_temp_local_new();
4955 t2
= tcg_temp_local_new();
4956 a0
= tcg_temp_local_new();
4957 gen_op_mov_v_reg(ot
, t1
, reg
);
4959 rm
= (modrm
& 7) | REX_B(s
);
4960 gen_op_mov_v_reg(ot
, t0
, rm
);
4962 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
4963 tcg_gen_mov_tl(a0
, cpu_A0
);
4964 gen_op_ld_v(ot
+ s
->mem_index
, t0
, a0
);
4965 rm
= 0; /* avoid warning */
4967 label1
= gen_new_label();
4968 tcg_gen_sub_tl(t2
, cpu_regs
[R_EAX
], t0
);
4970 tcg_gen_brcondi_tl(TCG_COND_EQ
, t2
, 0, label1
);
4971 label2
= gen_new_label();
4973 gen_op_mov_reg_v(ot
, R_EAX
, t0
);
4975 gen_set_label(label1
);
4976 gen_op_mov_reg_v(ot
, rm
, t1
);
4978 /* perform no-op store cycle like physical cpu; must be
4979 before changing accumulator to ensure idempotency if
4980 the store faults and the instruction is restarted */
4981 gen_op_st_v(ot
+ s
->mem_index
, t0
, a0
);
4982 gen_op_mov_reg_v(ot
, R_EAX
, t0
);
4984 gen_set_label(label1
);
4985 gen_op_st_v(ot
+ s
->mem_index
, t1
, a0
);
4987 gen_set_label(label2
);
4988 tcg_gen_mov_tl(cpu_cc_src
, t0
);
4989 tcg_gen_mov_tl(cpu_cc_dst
, t2
);
4990 s
->cc_op
= CC_OP_SUBB
+ ot
;
4997 case 0x1c7: /* cmpxchg8b */
4998 modrm
= ldub_code(s
->pc
++);
4999 mod
= (modrm
>> 6) & 3;
5000 if ((mod
== 3) || ((modrm
& 0x38) != 0x8))
5002 #ifdef TARGET_X86_64
5004 if (!(s
->cpuid_ext_features
& CPUID_EXT_CX16
))
5006 gen_jmp_im(pc_start
- s
->cs_base
);
5007 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5008 gen_op_set_cc_op(s
->cc_op
);
5009 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
5010 gen_helper_cmpxchg16b(cpu_A0
);
5014 if (!(s
->cpuid_features
& CPUID_CX8
))
5016 gen_jmp_im(pc_start
- s
->cs_base
);
5017 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5018 gen_op_set_cc_op(s
->cc_op
);
5019 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
5020 gen_helper_cmpxchg8b(cpu_A0
);
5022 s
->cc_op
= CC_OP_EFLAGS
;
5025 /**************************/
5027 case 0x50 ... 0x57: /* push */
5028 gen_op_mov_TN_reg(OT_LONG
, 0, (b
& 7) | REX_B(s
));
5031 case 0x58 ... 0x5f: /* pop */
5033 ot
= dflag
? OT_QUAD
: OT_WORD
;
5035 ot
= dflag
+ OT_WORD
;
5038 /* NOTE: order is important for pop %sp */
5040 gen_op_mov_reg_T0(ot
, (b
& 7) | REX_B(s
));
5042 case 0x60: /* pusha */
5047 case 0x61: /* popa */
5052 case 0x68: /* push Iv */
5055 ot
= dflag
? OT_QUAD
: OT_WORD
;
5057 ot
= dflag
+ OT_WORD
;
5060 val
= insn_get(s
, ot
);
5062 val
= (int8_t)insn_get(s
, OT_BYTE
);
5063 gen_op_movl_T0_im(val
);
5066 case 0x8f: /* pop Ev */
5068 ot
= dflag
? OT_QUAD
: OT_WORD
;
5070 ot
= dflag
+ OT_WORD
;
5072 modrm
= ldub_code(s
->pc
++);
5073 mod
= (modrm
>> 6) & 3;
5076 /* NOTE: order is important for pop %sp */
5078 rm
= (modrm
& 7) | REX_B(s
);
5079 gen_op_mov_reg_T0(ot
, rm
);
5081 /* NOTE: order is important too for MMU exceptions */
5082 s
->popl_esp_hack
= 1 << ot
;
5083 gen_ldst_modrm(s
, modrm
, ot
, OR_TMP0
, 1);
5084 s
->popl_esp_hack
= 0;
5088 case 0xc8: /* enter */
5091 val
= lduw_code(s
->pc
);
5093 level
= ldub_code(s
->pc
++);
5094 gen_enter(s
, val
, level
);
5097 case 0xc9: /* leave */
5098 /* XXX: exception not precise (ESP is updated before potential exception) */
5100 gen_op_mov_TN_reg(OT_QUAD
, 0, R_EBP
);
5101 gen_op_mov_reg_T0(OT_QUAD
, R_ESP
);
5102 } else if (s
->ss32
) {
5103 gen_op_mov_TN_reg(OT_LONG
, 0, R_EBP
);
5104 gen_op_mov_reg_T0(OT_LONG
, R_ESP
);
5106 gen_op_mov_TN_reg(OT_WORD
, 0, R_EBP
);
5107 gen_op_mov_reg_T0(OT_WORD
, R_ESP
);
5111 ot
= dflag
? OT_QUAD
: OT_WORD
;
5113 ot
= dflag
+ OT_WORD
;
5115 gen_op_mov_reg_T0(ot
, R_EBP
);
5118 case 0x06: /* push es */
5119 case 0x0e: /* push cs */
5120 case 0x16: /* push ss */
5121 case 0x1e: /* push ds */
5124 gen_op_movl_T0_seg(b
>> 3);
5127 case 0x1a0: /* push fs */
5128 case 0x1a8: /* push gs */
5129 gen_op_movl_T0_seg((b
>> 3) & 7);
5132 case 0x07: /* pop es */
5133 case 0x17: /* pop ss */
5134 case 0x1f: /* pop ds */
5139 gen_movl_seg_T0(s
, reg
, pc_start
- s
->cs_base
);
5142 /* if reg == SS, inhibit interrupts/trace. */
5143 /* If several instructions disable interrupts, only the
5145 if (!(s
->tb
->flags
& HF_INHIBIT_IRQ_MASK
))
5146 gen_helper_set_inhibit_irq(cpu_env
);
5150 gen_jmp_im(s
->pc
- s
->cs_base
);
5154 case 0x1a1: /* pop fs */
5155 case 0x1a9: /* pop gs */
5157 gen_movl_seg_T0(s
, (b
>> 3) & 7, pc_start
- s
->cs_base
);
5160 gen_jmp_im(s
->pc
- s
->cs_base
);
5165 /**************************/
5168 case 0x89: /* mov Gv, Ev */
5172 ot
= dflag
+ OT_WORD
;
5173 modrm
= ldub_code(s
->pc
++);
5174 reg
= ((modrm
>> 3) & 7) | rex_r
;
5176 /* generate a generic store */
5177 gen_ldst_modrm(s
, modrm
, ot
, reg
, 1);
5180 case 0xc7: /* mov Ev, Iv */
5184 ot
= dflag
+ OT_WORD
;
5185 modrm
= ldub_code(s
->pc
++);
5186 mod
= (modrm
>> 6) & 3;
5188 s
->rip_offset
= insn_const_size(ot
);
5189 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
5191 val
= insn_get(s
, ot
);
5192 gen_op_movl_T0_im(val
);
5194 gen_op_st_T0_A0(ot
+ s
->mem_index
);
5196 gen_op_mov_reg_T0(ot
, (modrm
& 7) | REX_B(s
));
5199 case 0x8b: /* mov Ev, Gv */
5203 ot
= OT_WORD
+ dflag
;
5204 modrm
= ldub_code(s
->pc
++);
5205 reg
= ((modrm
>> 3) & 7) | rex_r
;
5207 gen_ldst_modrm(s
, modrm
, ot
, OR_TMP0
, 0);
5208 gen_op_mov_reg_T0(ot
, reg
);
5210 case 0x8e: /* mov seg, Gv */
5211 modrm
= ldub_code(s
->pc
++);
5212 reg
= (modrm
>> 3) & 7;
5213 if (reg
>= 6 || reg
== R_CS
)
5215 gen_ldst_modrm(s
, modrm
, OT_WORD
, OR_TMP0
, 0);
5216 gen_movl_seg_T0(s
, reg
, pc_start
- s
->cs_base
);
5218 /* if reg == SS, inhibit interrupts/trace */
5219 /* If several instructions disable interrupts, only the
5221 if (!(s
->tb
->flags
& HF_INHIBIT_IRQ_MASK
))
5222 gen_helper_set_inhibit_irq(cpu_env
);
5226 gen_jmp_im(s
->pc
- s
->cs_base
);
5230 case 0x8c: /* mov Gv, seg */
5231 modrm
= ldub_code(s
->pc
++);
5232 reg
= (modrm
>> 3) & 7;
5233 mod
= (modrm
>> 6) & 3;
5236 gen_op_movl_T0_seg(reg
);
5238 ot
= OT_WORD
+ dflag
;
5241 gen_ldst_modrm(s
, modrm
, ot
, OR_TMP0
, 1);
5244 case 0x1b6: /* movzbS Gv, Eb */
5245 case 0x1b7: /* movzwS Gv, Eb */
5246 case 0x1be: /* movsbS Gv, Eb */
5247 case 0x1bf: /* movswS Gv, Eb */
5250 /* d_ot is the size of destination */
5251 d_ot
= dflag
+ OT_WORD
;
5252 /* ot is the size of source */
5253 ot
= (b
& 1) + OT_BYTE
;
5254 modrm
= ldub_code(s
->pc
++);
5255 reg
= ((modrm
>> 3) & 7) | rex_r
;
5256 mod
= (modrm
>> 6) & 3;
5257 rm
= (modrm
& 7) | REX_B(s
);
5260 gen_op_mov_TN_reg(ot
, 0, rm
);
5261 switch(ot
| (b
& 8)) {
5263 tcg_gen_ext8u_tl(cpu_T
[0], cpu_T
[0]);
5266 tcg_gen_ext8s_tl(cpu_T
[0], cpu_T
[0]);
5269 tcg_gen_ext16u_tl(cpu_T
[0], cpu_T
[0]);
5273 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
5276 gen_op_mov_reg_T0(d_ot
, reg
);
5278 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
5280 gen_op_lds_T0_A0(ot
+ s
->mem_index
);
5282 gen_op_ldu_T0_A0(ot
+ s
->mem_index
);
5284 gen_op_mov_reg_T0(d_ot
, reg
);
5289 case 0x8d: /* lea */
5290 ot
= dflag
+ OT_WORD
;
5291 modrm
= ldub_code(s
->pc
++);
5292 mod
= (modrm
>> 6) & 3;
5295 reg
= ((modrm
>> 3) & 7) | rex_r
;
5296 /* we must ensure that no segment is added */
5300 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
5302 gen_op_mov_reg_A0(ot
- OT_WORD
, reg
);
5305 case 0xa0: /* mov EAX, Ov */
5307 case 0xa2: /* mov Ov, EAX */
5310 target_ulong offset_addr
;
5315 ot
= dflag
+ OT_WORD
;
5316 #ifdef TARGET_X86_64
5317 if (s
->aflag
== 2) {
5318 offset_addr
= ldq_code(s
->pc
);
5320 gen_op_movq_A0_im(offset_addr
);
5325 offset_addr
= insn_get(s
, OT_LONG
);
5327 offset_addr
= insn_get(s
, OT_WORD
);
5329 gen_op_movl_A0_im(offset_addr
);
5331 gen_add_A0_ds_seg(s
);
5333 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
5334 gen_op_mov_reg_T0(ot
, R_EAX
);
5336 gen_op_mov_TN_reg(ot
, 0, R_EAX
);
5337 gen_op_st_T0_A0(ot
+ s
->mem_index
);
5341 case 0xd7: /* xlat */
5342 #ifdef TARGET_X86_64
5343 if (s
->aflag
== 2) {
5344 gen_op_movq_A0_reg(R_EBX
);
5345 gen_op_mov_TN_reg(OT_QUAD
, 0, R_EAX
);
5346 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 0xff);
5347 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_T
[0]);
5351 gen_op_movl_A0_reg(R_EBX
);
5352 gen_op_mov_TN_reg(OT_LONG
, 0, R_EAX
);
5353 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 0xff);
5354 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_T
[0]);
5356 gen_op_andl_A0_ffff();
5358 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffffffff);
5360 gen_add_A0_ds_seg(s
);
5361 gen_op_ldu_T0_A0(OT_BYTE
+ s
->mem_index
);
5362 gen_op_mov_reg_T0(OT_BYTE
, R_EAX
);
5364 case 0xb0 ... 0xb7: /* mov R, Ib */
5365 val
= insn_get(s
, OT_BYTE
);
5366 gen_op_movl_T0_im(val
);
5367 gen_op_mov_reg_T0(OT_BYTE
, (b
& 7) | REX_B(s
));
5369 case 0xb8 ... 0xbf: /* mov R, Iv */
5370 #ifdef TARGET_X86_64
5374 tmp
= ldq_code(s
->pc
);
5376 reg
= (b
& 7) | REX_B(s
);
5377 gen_movtl_T0_im(tmp
);
5378 gen_op_mov_reg_T0(OT_QUAD
, reg
);
5382 ot
= dflag
? OT_LONG
: OT_WORD
;
5383 val
= insn_get(s
, ot
);
5384 reg
= (b
& 7) | REX_B(s
);
5385 gen_op_movl_T0_im(val
);
5386 gen_op_mov_reg_T0(ot
, reg
);
5390 case 0x91 ... 0x97: /* xchg R, EAX */
5392 ot
= dflag
+ OT_WORD
;
5393 reg
= (b
& 7) | REX_B(s
);
5397 case 0x87: /* xchg Ev, Gv */
5401 ot
= dflag
+ OT_WORD
;
5402 modrm
= ldub_code(s
->pc
++);
5403 reg
= ((modrm
>> 3) & 7) | rex_r
;
5404 mod
= (modrm
>> 6) & 3;
5406 rm
= (modrm
& 7) | REX_B(s
);
5408 gen_op_mov_TN_reg(ot
, 0, reg
);
5409 gen_op_mov_TN_reg(ot
, 1, rm
);
5410 gen_op_mov_reg_T0(ot
, rm
);
5411 gen_op_mov_reg_T1(ot
, reg
);
5413 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
5414 gen_op_mov_TN_reg(ot
, 0, reg
);
5415 /* for xchg, lock is implicit */
5416 if (!(prefixes
& PREFIX_LOCK
))
5418 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
5419 gen_op_st_T0_A0(ot
+ s
->mem_index
);
5420 if (!(prefixes
& PREFIX_LOCK
))
5421 gen_helper_unlock();
5422 gen_op_mov_reg_T1(ot
, reg
);
5425 case 0xc4: /* les Gv */
5430 case 0xc5: /* lds Gv */
5435 case 0x1b2: /* lss Gv */
5438 case 0x1b4: /* lfs Gv */
5441 case 0x1b5: /* lgs Gv */
5444 ot
= dflag
? OT_LONG
: OT_WORD
;
5445 modrm
= ldub_code(s
->pc
++);
5446 reg
= ((modrm
>> 3) & 7) | rex_r
;
5447 mod
= (modrm
>> 6) & 3;
5450 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
5451 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
5452 gen_add_A0_im(s
, 1 << (ot
- OT_WORD
+ 1));
5453 /* load the segment first to handle exceptions properly */
5454 gen_op_ldu_T0_A0(OT_WORD
+ s
->mem_index
);
5455 gen_movl_seg_T0(s
, op
, pc_start
- s
->cs_base
);
5456 /* then put the data */
5457 gen_op_mov_reg_T1(ot
, reg
);
5459 gen_jmp_im(s
->pc
- s
->cs_base
);
5464 /************************/
5475 ot
= dflag
+ OT_WORD
;
5477 modrm
= ldub_code(s
->pc
++);
5478 mod
= (modrm
>> 6) & 3;
5479 op
= (modrm
>> 3) & 7;
5485 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
5488 opreg
= (modrm
& 7) | REX_B(s
);
5493 gen_shift(s
, op
, ot
, opreg
, OR_ECX
);
5496 shift
= ldub_code(s
->pc
++);
5498 gen_shifti(s
, op
, ot
, opreg
, shift
);
5513 case 0x1a4: /* shld imm */
5517 case 0x1a5: /* shld cl */
5521 case 0x1ac: /* shrd imm */
5525 case 0x1ad: /* shrd cl */
5529 ot
= dflag
+ OT_WORD
;
5530 modrm
= ldub_code(s
->pc
++);
5531 mod
= (modrm
>> 6) & 3;
5532 rm
= (modrm
& 7) | REX_B(s
);
5533 reg
= ((modrm
>> 3) & 7) | rex_r
;
5535 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
5540 gen_op_mov_TN_reg(ot
, 1, reg
);
5543 val
= ldub_code(s
->pc
++);
5544 tcg_gen_movi_tl(cpu_T3
, val
);
5546 tcg_gen_mov_tl(cpu_T3
, cpu_regs
[R_ECX
]);
5548 gen_shiftd_rm_T1_T3(s
, ot
, opreg
, op
);
5551 /************************/
5554 if (s
->flags
& (HF_EM_MASK
| HF_TS_MASK
)) {
5555 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
5556 /* XXX: what to do if illegal op ? */
5557 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
5560 modrm
= ldub_code(s
->pc
++);
5561 mod
= (modrm
>> 6) & 3;
5563 op
= ((b
& 7) << 3) | ((modrm
>> 3) & 7);
5566 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
5568 case 0x00 ... 0x07: /* fxxxs */
5569 case 0x10 ... 0x17: /* fixxxl */
5570 case 0x20 ... 0x27: /* fxxxl */
5571 case 0x30 ... 0x37: /* fixxx */
5578 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
5579 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5580 gen_helper_flds_FT0(cpu_env
, cpu_tmp2_i32
);
5583 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
5584 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5585 gen_helper_fildl_FT0(cpu_env
, cpu_tmp2_i32
);
5588 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
,
5589 (s
->mem_index
>> 2) - 1);
5590 gen_helper_fldl_FT0(cpu_env
, cpu_tmp1_i64
);
5594 gen_op_lds_T0_A0(OT_WORD
+ s
->mem_index
);
5595 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5596 gen_helper_fildl_FT0(cpu_env
, cpu_tmp2_i32
);
5600 gen_helper_fp_arith_ST0_FT0(op1
);
5602 /* fcomp needs pop */
5603 gen_helper_fpop(cpu_env
);
5607 case 0x08: /* flds */
5608 case 0x0a: /* fsts */
5609 case 0x0b: /* fstps */
5610 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
5611 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
5612 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
5617 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
5618 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5619 gen_helper_flds_ST0(cpu_env
, cpu_tmp2_i32
);
5622 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
5623 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5624 gen_helper_fildl_ST0(cpu_env
, cpu_tmp2_i32
);
5627 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
,
5628 (s
->mem_index
>> 2) - 1);
5629 gen_helper_fldl_ST0(cpu_env
, cpu_tmp1_i64
);
5633 gen_op_lds_T0_A0(OT_WORD
+ s
->mem_index
);
5634 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5635 gen_helper_fildl_ST0(cpu_env
, cpu_tmp2_i32
);
5640 /* XXX: the corresponding CPUID bit must be tested ! */
5643 gen_helper_fisttl_ST0(cpu_tmp2_i32
, cpu_env
);
5644 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5645 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
5648 gen_helper_fisttll_ST0(cpu_tmp1_i64
, cpu_env
);
5649 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
,
5650 (s
->mem_index
>> 2) - 1);
5654 gen_helper_fistt_ST0(cpu_tmp2_i32
, cpu_env
);
5655 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5656 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
5659 gen_helper_fpop(cpu_env
);
5664 gen_helper_fsts_ST0(cpu_tmp2_i32
, cpu_env
);
5665 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5666 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
5669 gen_helper_fistl_ST0(cpu_tmp2_i32
, cpu_env
);
5670 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5671 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
5674 gen_helper_fstl_ST0(cpu_tmp1_i64
, cpu_env
);
5675 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
,
5676 (s
->mem_index
>> 2) - 1);
5680 gen_helper_fist_ST0(cpu_tmp2_i32
, cpu_env
);
5681 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5682 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
5686 gen_helper_fpop(cpu_env
);
5690 case 0x0c: /* fldenv mem */
5691 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5692 gen_op_set_cc_op(s
->cc_op
);
5693 gen_jmp_im(pc_start
- s
->cs_base
);
5694 gen_helper_fldenv(cpu_env
, cpu_A0
, tcg_const_i32(s
->dflag
));
5696 case 0x0d: /* fldcw mem */
5697 gen_op_ld_T0_A0(OT_WORD
+ s
->mem_index
);
5698 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5699 gen_helper_fldcw(cpu_env
, cpu_tmp2_i32
);
5701 case 0x0e: /* fnstenv mem */
5702 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5703 gen_op_set_cc_op(s
->cc_op
);
5704 gen_jmp_im(pc_start
- s
->cs_base
);
5705 gen_helper_fstenv(cpu_env
, cpu_A0
, tcg_const_i32(s
->dflag
));
5707 case 0x0f: /* fnstcw mem */
5708 gen_helper_fnstcw(cpu_tmp2_i32
, cpu_env
);
5709 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5710 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
5712 case 0x1d: /* fldt mem */
5713 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5714 gen_op_set_cc_op(s
->cc_op
);
5715 gen_jmp_im(pc_start
- s
->cs_base
);
5716 gen_helper_fldt_ST0(cpu_env
, cpu_A0
);
5718 case 0x1f: /* fstpt mem */
5719 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5720 gen_op_set_cc_op(s
->cc_op
);
5721 gen_jmp_im(pc_start
- s
->cs_base
);
5722 gen_helper_fstt_ST0(cpu_env
, cpu_A0
);
5723 gen_helper_fpop(cpu_env
);
5725 case 0x2c: /* frstor mem */
5726 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5727 gen_op_set_cc_op(s
->cc_op
);
5728 gen_jmp_im(pc_start
- s
->cs_base
);
5729 gen_helper_frstor(cpu_env
, cpu_A0
, tcg_const_i32(s
->dflag
));
5731 case 0x2e: /* fnsave mem */
5732 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5733 gen_op_set_cc_op(s
->cc_op
);
5734 gen_jmp_im(pc_start
- s
->cs_base
);
5735 gen_helper_fsave(cpu_env
, cpu_A0
, tcg_const_i32(s
->dflag
));
5737 case 0x2f: /* fnstsw mem */
5738 gen_helper_fnstsw(cpu_tmp2_i32
, cpu_env
);
5739 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5740 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
5742 case 0x3c: /* fbld */
5743 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5744 gen_op_set_cc_op(s
->cc_op
);
5745 gen_jmp_im(pc_start
- s
->cs_base
);
5746 gen_helper_fbld_ST0(cpu_env
, cpu_A0
);
5748 case 0x3e: /* fbstp */
5749 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5750 gen_op_set_cc_op(s
->cc_op
);
5751 gen_jmp_im(pc_start
- s
->cs_base
);
5752 gen_helper_fbst_ST0(cpu_env
, cpu_A0
);
5753 gen_helper_fpop(cpu_env
);
5755 case 0x3d: /* fildll */
5756 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
,
5757 (s
->mem_index
>> 2) - 1);
5758 gen_helper_fildll_ST0(cpu_env
, cpu_tmp1_i64
);
5760 case 0x3f: /* fistpll */
5761 gen_helper_fistll_ST0(cpu_tmp1_i64
, cpu_env
);
5762 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
,
5763 (s
->mem_index
>> 2) - 1);
5764 gen_helper_fpop(cpu_env
);
5770 /* register float ops */
5774 case 0x08: /* fld sti */
5775 gen_helper_fpush(cpu_env
);
5776 gen_helper_fmov_ST0_STN(cpu_env
,
5777 tcg_const_i32((opreg
+ 1) & 7));
5779 case 0x09: /* fxchg sti */
5780 case 0x29: /* fxchg4 sti, undocumented op */
5781 case 0x39: /* fxchg7 sti, undocumented op */
5782 gen_helper_fxchg_ST0_STN(cpu_env
, tcg_const_i32(opreg
));
5784 case 0x0a: /* grp d9/2 */
5787 /* check exceptions (FreeBSD FPU probe) */
5788 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5789 gen_op_set_cc_op(s
->cc_op
);
5790 gen_jmp_im(pc_start
- s
->cs_base
);
5791 gen_helper_fwait(cpu_env
);
5797 case 0x0c: /* grp d9/4 */
5800 gen_helper_fchs_ST0(cpu_env
);
5803 gen_helper_fabs_ST0(cpu_env
);
5806 gen_helper_fldz_FT0(cpu_env
);
5807 gen_helper_fcom_ST0_FT0(cpu_env
);
5810 gen_helper_fxam_ST0(cpu_env
);
5816 case 0x0d: /* grp d9/5 */
5820 gen_helper_fpush(cpu_env
);
5821 gen_helper_fld1_ST0(cpu_env
);
5824 gen_helper_fpush(cpu_env
);
5825 gen_helper_fldl2t_ST0(cpu_env
);
5828 gen_helper_fpush(cpu_env
);
5829 gen_helper_fldl2e_ST0(cpu_env
);
5832 gen_helper_fpush(cpu_env
);
5833 gen_helper_fldpi_ST0(cpu_env
);
5836 gen_helper_fpush(cpu_env
);
5837 gen_helper_fldlg2_ST0(cpu_env
);
5840 gen_helper_fpush(cpu_env
);
5841 gen_helper_fldln2_ST0(cpu_env
);
5844 gen_helper_fpush(cpu_env
);
5845 gen_helper_fldz_ST0(cpu_env
);
5852 case 0x0e: /* grp d9/6 */
5855 gen_helper_f2xm1(cpu_env
);
5858 gen_helper_fyl2x(cpu_env
);
5861 gen_helper_fptan(cpu_env
);
5863 case 3: /* fpatan */
5864 gen_helper_fpatan(cpu_env
);
5866 case 4: /* fxtract */
5867 gen_helper_fxtract(cpu_env
);
5869 case 5: /* fprem1 */
5870 gen_helper_fprem1(cpu_env
);
5872 case 6: /* fdecstp */
5873 gen_helper_fdecstp(cpu_env
);
5876 case 7: /* fincstp */
5877 gen_helper_fincstp(cpu_env
);
5881 case 0x0f: /* grp d9/7 */
5884 gen_helper_fprem(cpu_env
);
5886 case 1: /* fyl2xp1 */
5887 gen_helper_fyl2xp1(cpu_env
);
5890 gen_helper_fsqrt(cpu_env
);
5892 case 3: /* fsincos */
5893 gen_helper_fsincos(cpu_env
);
5895 case 5: /* fscale */
5896 gen_helper_fscale(cpu_env
);
5898 case 4: /* frndint */
5899 gen_helper_frndint(cpu_env
);
5902 gen_helper_fsin(cpu_env
);
5906 gen_helper_fcos(cpu_env
);
5910 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
5911 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
5912 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
5918 gen_helper_fp_arith_STN_ST0(op1
, opreg
);
5920 gen_helper_fpop(cpu_env
);
5922 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
5923 gen_helper_fp_arith_ST0_FT0(op1
);
5927 case 0x02: /* fcom */
5928 case 0x22: /* fcom2, undocumented op */
5929 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
5930 gen_helper_fcom_ST0_FT0(cpu_env
);
5932 case 0x03: /* fcomp */
5933 case 0x23: /* fcomp3, undocumented op */
5934 case 0x32: /* fcomp5, undocumented op */
5935 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
5936 gen_helper_fcom_ST0_FT0(cpu_env
);
5937 gen_helper_fpop(cpu_env
);
5939 case 0x15: /* da/5 */
5941 case 1: /* fucompp */
5942 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(1));
5943 gen_helper_fucom_ST0_FT0(cpu_env
);
5944 gen_helper_fpop(cpu_env
);
5945 gen_helper_fpop(cpu_env
);
5953 case 0: /* feni (287 only, just do nop here) */
5955 case 1: /* fdisi (287 only, just do nop here) */
5958 gen_helper_fclex(cpu_env
);
5960 case 3: /* fninit */
5961 gen_helper_fninit(cpu_env
);
5963 case 4: /* fsetpm (287 only, just do nop here) */
5969 case 0x1d: /* fucomi */
5970 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5971 gen_op_set_cc_op(s
->cc_op
);
5972 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
5973 gen_helper_fucomi_ST0_FT0(cpu_env
);
5974 s
->cc_op
= CC_OP_EFLAGS
;
5976 case 0x1e: /* fcomi */
5977 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5978 gen_op_set_cc_op(s
->cc_op
);
5979 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
5980 gen_helper_fcomi_ST0_FT0(cpu_env
);
5981 s
->cc_op
= CC_OP_EFLAGS
;
5983 case 0x28: /* ffree sti */
5984 gen_helper_ffree_STN(cpu_env
, tcg_const_i32(opreg
));
5986 case 0x2a: /* fst sti */
5987 gen_helper_fmov_STN_ST0(cpu_env
, tcg_const_i32(opreg
));
5989 case 0x2b: /* fstp sti */
5990 case 0x0b: /* fstp1 sti, undocumented op */
5991 case 0x3a: /* fstp8 sti, undocumented op */
5992 case 0x3b: /* fstp9 sti, undocumented op */
5993 gen_helper_fmov_STN_ST0(cpu_env
, tcg_const_i32(opreg
));
5994 gen_helper_fpop(cpu_env
);
5996 case 0x2c: /* fucom st(i) */
5997 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
5998 gen_helper_fucom_ST0_FT0(cpu_env
);
6000 case 0x2d: /* fucomp st(i) */
6001 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6002 gen_helper_fucom_ST0_FT0(cpu_env
);
6003 gen_helper_fpop(cpu_env
);
6005 case 0x33: /* de/3 */
6007 case 1: /* fcompp */
6008 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(1));
6009 gen_helper_fcom_ST0_FT0(cpu_env
);
6010 gen_helper_fpop(cpu_env
);
6011 gen_helper_fpop(cpu_env
);
6017 case 0x38: /* ffreep sti, undocumented op */
6018 gen_helper_ffree_STN(cpu_env
, tcg_const_i32(opreg
));
6019 gen_helper_fpop(cpu_env
);
6021 case 0x3c: /* df/4 */
6024 gen_helper_fnstsw(cpu_tmp2_i32
, cpu_env
);
6025 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
6026 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
6032 case 0x3d: /* fucomip */
6033 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6034 gen_op_set_cc_op(s
->cc_op
);
6035 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6036 gen_helper_fucomi_ST0_FT0(cpu_env
);
6037 gen_helper_fpop(cpu_env
);
6038 s
->cc_op
= CC_OP_EFLAGS
;
6040 case 0x3e: /* fcomip */
6041 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6042 gen_op_set_cc_op(s
->cc_op
);
6043 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6044 gen_helper_fcomi_ST0_FT0(cpu_env
);
6045 gen_helper_fpop(cpu_env
);
6046 s
->cc_op
= CC_OP_EFLAGS
;
6048 case 0x10 ... 0x13: /* fcmovxx */
6052 static const uint8_t fcmov_cc
[8] = {
6058 op1
= fcmov_cc
[op
& 3] | (((op
>> 3) & 1) ^ 1);
6059 l1
= gen_new_label();
6060 gen_jcc1(s
, s
->cc_op
, op1
, l1
);
6061 gen_helper_fmov_ST0_STN(cpu_env
, tcg_const_i32(opreg
));
6070 /************************/
6073 case 0xa4: /* movsS */
6078 ot
= dflag
+ OT_WORD
;
6080 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6081 gen_repz_movs(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6087 case 0xaa: /* stosS */
6092 ot
= dflag
+ OT_WORD
;
6094 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6095 gen_repz_stos(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6100 case 0xac: /* lodsS */
6105 ot
= dflag
+ OT_WORD
;
6106 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6107 gen_repz_lods(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6112 case 0xae: /* scasS */
6117 ot
= dflag
+ OT_WORD
;
6118 if (prefixes
& PREFIX_REPNZ
) {
6119 gen_repz_scas(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 1);
6120 } else if (prefixes
& PREFIX_REPZ
) {
6121 gen_repz_scas(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 0);
6124 s
->cc_op
= CC_OP_SUBB
+ ot
;
6128 case 0xa6: /* cmpsS */
6133 ot
= dflag
+ OT_WORD
;
6134 if (prefixes
& PREFIX_REPNZ
) {
6135 gen_repz_cmps(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 1);
6136 } else if (prefixes
& PREFIX_REPZ
) {
6137 gen_repz_cmps(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 0);
6140 s
->cc_op
= CC_OP_SUBB
+ ot
;
6143 case 0x6c: /* insS */
6148 ot
= dflag
? OT_LONG
: OT_WORD
;
6149 gen_op_mov_TN_reg(OT_WORD
, 0, R_EDX
);
6150 gen_op_andl_T0_ffff();
6151 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6152 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
) | 4);
6153 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6154 gen_repz_ins(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6158 gen_jmp(s
, s
->pc
- s
->cs_base
);
6162 case 0x6e: /* outsS */
6167 ot
= dflag
? OT_LONG
: OT_WORD
;
6168 gen_op_mov_TN_reg(OT_WORD
, 0, R_EDX
);
6169 gen_op_andl_T0_ffff();
6170 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6171 svm_is_rep(prefixes
) | 4);
6172 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6173 gen_repz_outs(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6177 gen_jmp(s
, s
->pc
- s
->cs_base
);
6182 /************************/
6190 ot
= dflag
? OT_LONG
: OT_WORD
;
6191 val
= ldub_code(s
->pc
++);
6192 gen_op_movl_T0_im(val
);
6193 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6194 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
));
6197 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6198 gen_helper_in_func(ot
, cpu_T
[1], cpu_tmp2_i32
);
6199 gen_op_mov_reg_T1(ot
, R_EAX
);
6202 gen_jmp(s
, s
->pc
- s
->cs_base
);
6210 ot
= dflag
? OT_LONG
: OT_WORD
;
6211 val
= ldub_code(s
->pc
++);
6212 gen_op_movl_T0_im(val
);
6213 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6214 svm_is_rep(prefixes
));
6215 gen_op_mov_TN_reg(ot
, 1, R_EAX
);
6219 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6220 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T
[1]);
6221 gen_helper_out_func(ot
, cpu_tmp2_i32
, cpu_tmp3_i32
);
6224 gen_jmp(s
, s
->pc
- s
->cs_base
);
6232 ot
= dflag
? OT_LONG
: OT_WORD
;
6233 gen_op_mov_TN_reg(OT_WORD
, 0, R_EDX
);
6234 gen_op_andl_T0_ffff();
6235 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6236 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
));
6239 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6240 gen_helper_in_func(ot
, cpu_T
[1], cpu_tmp2_i32
);
6241 gen_op_mov_reg_T1(ot
, R_EAX
);
6244 gen_jmp(s
, s
->pc
- s
->cs_base
);
6252 ot
= dflag
? OT_LONG
: OT_WORD
;
6253 gen_op_mov_TN_reg(OT_WORD
, 0, R_EDX
);
6254 gen_op_andl_T0_ffff();
6255 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6256 svm_is_rep(prefixes
));
6257 gen_op_mov_TN_reg(ot
, 1, R_EAX
);
6261 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6262 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T
[1]);
6263 gen_helper_out_func(ot
, cpu_tmp2_i32
, cpu_tmp3_i32
);
6266 gen_jmp(s
, s
->pc
- s
->cs_base
);
6270 /************************/
6272 case 0xc2: /* ret im */
6273 val
= ldsw_code(s
->pc
);
6276 if (CODE64(s
) && s
->dflag
)
6278 gen_stack_update(s
, val
+ (2 << s
->dflag
));
6280 gen_op_andl_T0_ffff();
6284 case 0xc3: /* ret */
6288 gen_op_andl_T0_ffff();
6292 case 0xca: /* lret im */
6293 val
= ldsw_code(s
->pc
);
6296 if (s
->pe
&& !s
->vm86
) {
6297 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6298 gen_op_set_cc_op(s
->cc_op
);
6299 gen_jmp_im(pc_start
- s
->cs_base
);
6300 gen_helper_lret_protected(tcg_const_i32(s
->dflag
),
6301 tcg_const_i32(val
));
6305 gen_op_ld_T0_A0(1 + s
->dflag
+ s
->mem_index
);
6307 gen_op_andl_T0_ffff();
6308 /* NOTE: keeping EIP updated is not a problem in case of
6312 gen_op_addl_A0_im(2 << s
->dflag
);
6313 gen_op_ld_T0_A0(1 + s
->dflag
+ s
->mem_index
);
6314 gen_op_movl_seg_T0_vm(R_CS
);
6315 /* add stack offset */
6316 gen_stack_update(s
, val
+ (4 << s
->dflag
));
6320 case 0xcb: /* lret */
6323 case 0xcf: /* iret */
6324 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_IRET
);
6327 gen_helper_iret_real(tcg_const_i32(s
->dflag
));
6328 s
->cc_op
= CC_OP_EFLAGS
;
6329 } else if (s
->vm86
) {
6331 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6333 gen_helper_iret_real(tcg_const_i32(s
->dflag
));
6334 s
->cc_op
= CC_OP_EFLAGS
;
6337 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6338 gen_op_set_cc_op(s
->cc_op
);
6339 gen_jmp_im(pc_start
- s
->cs_base
);
6340 gen_helper_iret_protected(tcg_const_i32(s
->dflag
),
6341 tcg_const_i32(s
->pc
- s
->cs_base
));
6342 s
->cc_op
= CC_OP_EFLAGS
;
6346 case 0xe8: /* call im */
6349 tval
= (int32_t)insn_get(s
, OT_LONG
);
6351 tval
= (int16_t)insn_get(s
, OT_WORD
);
6352 next_eip
= s
->pc
- s
->cs_base
;
6358 gen_movtl_T0_im(next_eip
);
6363 case 0x9a: /* lcall im */
6365 unsigned int selector
, offset
;
6369 ot
= dflag
? OT_LONG
: OT_WORD
;
6370 offset
= insn_get(s
, ot
);
6371 selector
= insn_get(s
, OT_WORD
);
6373 gen_op_movl_T0_im(selector
);
6374 gen_op_movl_T1_imu(offset
);
6377 case 0xe9: /* jmp im */
6379 tval
= (int32_t)insn_get(s
, OT_LONG
);
6381 tval
= (int16_t)insn_get(s
, OT_WORD
);
6382 tval
+= s
->pc
- s
->cs_base
;
6389 case 0xea: /* ljmp im */
6391 unsigned int selector
, offset
;
6395 ot
= dflag
? OT_LONG
: OT_WORD
;
6396 offset
= insn_get(s
, ot
);
6397 selector
= insn_get(s
, OT_WORD
);
6399 gen_op_movl_T0_im(selector
);
6400 gen_op_movl_T1_imu(offset
);
6403 case 0xeb: /* jmp Jb */
6404 tval
= (int8_t)insn_get(s
, OT_BYTE
);
6405 tval
+= s
->pc
- s
->cs_base
;
6410 case 0x70 ... 0x7f: /* jcc Jb */
6411 tval
= (int8_t)insn_get(s
, OT_BYTE
);
6413 case 0x180 ... 0x18f: /* jcc Jv */
6415 tval
= (int32_t)insn_get(s
, OT_LONG
);
6417 tval
= (int16_t)insn_get(s
, OT_WORD
);
6420 next_eip
= s
->pc
- s
->cs_base
;
6424 gen_jcc(s
, b
, tval
, next_eip
);
6427 case 0x190 ... 0x19f: /* setcc Gv */
6428 modrm
= ldub_code(s
->pc
++);
6430 gen_ldst_modrm(s
, modrm
, OT_BYTE
, OR_TMP0
, 1);
6432 case 0x140 ... 0x14f: /* cmov Gv, Ev */
6437 ot
= dflag
+ OT_WORD
;
6438 modrm
= ldub_code(s
->pc
++);
6439 reg
= ((modrm
>> 3) & 7) | rex_r
;
6440 mod
= (modrm
>> 6) & 3;
6441 t0
= tcg_temp_local_new();
6443 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
6444 gen_op_ld_v(ot
+ s
->mem_index
, t0
, cpu_A0
);
6446 rm
= (modrm
& 7) | REX_B(s
);
6447 gen_op_mov_v_reg(ot
, t0
, rm
);
6449 #ifdef TARGET_X86_64
6450 if (ot
== OT_LONG
) {
6451 /* XXX: specific Intel behaviour ? */
6452 l1
= gen_new_label();
6453 gen_jcc1(s
, s
->cc_op
, b
^ 1, l1
);
6454 tcg_gen_mov_tl(cpu_regs
[reg
], t0
);
6456 tcg_gen_ext32u_tl(cpu_regs
[reg
], cpu_regs
[reg
]);
6460 l1
= gen_new_label();
6461 gen_jcc1(s
, s
->cc_op
, b
^ 1, l1
);
6462 gen_op_mov_reg_v(ot
, reg
, t0
);
6469 /************************/
6471 case 0x9c: /* pushf */
6472 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_PUSHF
);
6473 if (s
->vm86
&& s
->iopl
!= 3) {
6474 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6476 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6477 gen_op_set_cc_op(s
->cc_op
);
6478 gen_helper_read_eflags(cpu_T
[0], cpu_env
);
6482 case 0x9d: /* popf */
6483 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_POPF
);
6484 if (s
->vm86
&& s
->iopl
!= 3) {
6485 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6490 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6491 tcg_const_i32((TF_MASK
| AC_MASK
|
6496 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6497 tcg_const_i32((TF_MASK
| AC_MASK
|
6499 IF_MASK
| IOPL_MASK
)
6503 if (s
->cpl
<= s
->iopl
) {
6505 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6506 tcg_const_i32((TF_MASK
|
6512 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6513 tcg_const_i32((TF_MASK
|
6522 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6523 tcg_const_i32((TF_MASK
| AC_MASK
|
6524 ID_MASK
| NT_MASK
)));
6526 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6527 tcg_const_i32((TF_MASK
| AC_MASK
|
6534 s
->cc_op
= CC_OP_EFLAGS
;
6535 /* abort translation because TF flag may change */
6536 gen_jmp_im(s
->pc
- s
->cs_base
);
6540 case 0x9e: /* sahf */
6541 if (CODE64(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_LAHF_LM
))
6543 gen_op_mov_TN_reg(OT_BYTE
, 0, R_AH
);
6544 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6545 gen_op_set_cc_op(s
->cc_op
);
6546 gen_compute_eflags(cpu_cc_src
);
6547 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, CC_O
);
6548 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
6549 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, cpu_T
[0]);
6550 s
->cc_op
= CC_OP_EFLAGS
;
6552 case 0x9f: /* lahf */
6553 if (CODE64(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_LAHF_LM
))
6555 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6556 gen_op_set_cc_op(s
->cc_op
);
6557 gen_compute_eflags(cpu_T
[0]);
6558 /* Note: gen_compute_eflags() only gives the condition codes */
6559 tcg_gen_ori_tl(cpu_T
[0], cpu_T
[0], 0x02);
6560 gen_op_mov_reg_T0(OT_BYTE
, R_AH
);
6562 case 0xf5: /* cmc */
6563 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6564 gen_op_set_cc_op(s
->cc_op
);
6565 gen_compute_eflags(cpu_cc_src
);
6566 tcg_gen_xori_tl(cpu_cc_src
, cpu_cc_src
, CC_C
);
6567 s
->cc_op
= CC_OP_EFLAGS
;
6569 case 0xf8: /* clc */
6570 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6571 gen_op_set_cc_op(s
->cc_op
);
6572 gen_compute_eflags(cpu_cc_src
);
6573 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~CC_C
);
6574 s
->cc_op
= CC_OP_EFLAGS
;
6576 case 0xf9: /* stc */
6577 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6578 gen_op_set_cc_op(s
->cc_op
);
6579 gen_compute_eflags(cpu_cc_src
);
6580 tcg_gen_ori_tl(cpu_cc_src
, cpu_cc_src
, CC_C
);
6581 s
->cc_op
= CC_OP_EFLAGS
;
6583 case 0xfc: /* cld */
6584 tcg_gen_movi_i32(cpu_tmp2_i32
, 1);
6585 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, offsetof(CPUX86State
, df
));
6587 case 0xfd: /* std */
6588 tcg_gen_movi_i32(cpu_tmp2_i32
, -1);
6589 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, offsetof(CPUX86State
, df
));
6592 /************************/
6593 /* bit operations */
6594 case 0x1ba: /* bt/bts/btr/btc Gv, im */
6595 ot
= dflag
+ OT_WORD
;
6596 modrm
= ldub_code(s
->pc
++);
6597 op
= (modrm
>> 3) & 7;
6598 mod
= (modrm
>> 6) & 3;
6599 rm
= (modrm
& 7) | REX_B(s
);
6602 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
6603 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
6605 gen_op_mov_TN_reg(ot
, 0, rm
);
6608 val
= ldub_code(s
->pc
++);
6609 gen_op_movl_T1_im(val
);
6614 case 0x1a3: /* bt Gv, Ev */
6617 case 0x1ab: /* bts */
6620 case 0x1b3: /* btr */
6623 case 0x1bb: /* btc */
6626 ot
= dflag
+ OT_WORD
;
6627 modrm
= ldub_code(s
->pc
++);
6628 reg
= ((modrm
>> 3) & 7) | rex_r
;
6629 mod
= (modrm
>> 6) & 3;
6630 rm
= (modrm
& 7) | REX_B(s
);
6631 gen_op_mov_TN_reg(OT_LONG
, 1, reg
);
6633 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
6634 /* specific case: we need to add a displacement */
6635 gen_exts(ot
, cpu_T
[1]);
6636 tcg_gen_sari_tl(cpu_tmp0
, cpu_T
[1], 3 + ot
);
6637 tcg_gen_shli_tl(cpu_tmp0
, cpu_tmp0
, ot
);
6638 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
6639 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
6641 gen_op_mov_TN_reg(ot
, 0, rm
);
6644 tcg_gen_andi_tl(cpu_T
[1], cpu_T
[1], (1 << (3 + ot
)) - 1);
6647 tcg_gen_shr_tl(cpu_cc_src
, cpu_T
[0], cpu_T
[1]);
6648 tcg_gen_movi_tl(cpu_cc_dst
, 0);
6651 tcg_gen_shr_tl(cpu_tmp4
, cpu_T
[0], cpu_T
[1]);
6652 tcg_gen_movi_tl(cpu_tmp0
, 1);
6653 tcg_gen_shl_tl(cpu_tmp0
, cpu_tmp0
, cpu_T
[1]);
6654 tcg_gen_or_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
6657 tcg_gen_shr_tl(cpu_tmp4
, cpu_T
[0], cpu_T
[1]);
6658 tcg_gen_movi_tl(cpu_tmp0
, 1);
6659 tcg_gen_shl_tl(cpu_tmp0
, cpu_tmp0
, cpu_T
[1]);
6660 tcg_gen_not_tl(cpu_tmp0
, cpu_tmp0
);
6661 tcg_gen_and_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
6665 tcg_gen_shr_tl(cpu_tmp4
, cpu_T
[0], cpu_T
[1]);
6666 tcg_gen_movi_tl(cpu_tmp0
, 1);
6667 tcg_gen_shl_tl(cpu_tmp0
, cpu_tmp0
, cpu_T
[1]);
6668 tcg_gen_xor_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
6671 s
->cc_op
= CC_OP_SARB
+ ot
;
6674 gen_op_st_T0_A0(ot
+ s
->mem_index
);
6676 gen_op_mov_reg_T0(ot
, rm
);
6677 tcg_gen_mov_tl(cpu_cc_src
, cpu_tmp4
);
6678 tcg_gen_movi_tl(cpu_cc_dst
, 0);
6681 case 0x1bc: /* bsf */
6682 case 0x1bd: /* bsr */
6687 ot
= dflag
+ OT_WORD
;
6688 modrm
= ldub_code(s
->pc
++);
6689 reg
= ((modrm
>> 3) & 7) | rex_r
;
6690 gen_ldst_modrm(s
,modrm
, ot
, OR_TMP0
, 0);
6691 gen_extu(ot
, cpu_T
[0]);
6692 t0
= tcg_temp_local_new();
6693 tcg_gen_mov_tl(t0
, cpu_T
[0]);
6694 if ((b
& 1) && (prefixes
& PREFIX_REPZ
) &&
6695 (s
->cpuid_ext3_features
& CPUID_EXT3_ABM
)) {
6697 case OT_WORD
: gen_helper_lzcnt(cpu_T
[0], t0
,
6698 tcg_const_i32(16)); break;
6699 case OT_LONG
: gen_helper_lzcnt(cpu_T
[0], t0
,
6700 tcg_const_i32(32)); break;
6701 case OT_QUAD
: gen_helper_lzcnt(cpu_T
[0], t0
,
6702 tcg_const_i32(64)); break;
6704 gen_op_mov_reg_T0(ot
, reg
);
6706 label1
= gen_new_label();
6707 tcg_gen_movi_tl(cpu_cc_dst
, 0);
6708 tcg_gen_brcondi_tl(TCG_COND_EQ
, t0
, 0, label1
);
6710 gen_helper_bsr(cpu_T
[0], t0
);
6712 gen_helper_bsf(cpu_T
[0], t0
);
6714 gen_op_mov_reg_T0(ot
, reg
);
6715 tcg_gen_movi_tl(cpu_cc_dst
, 1);
6716 gen_set_label(label1
);
6717 tcg_gen_discard_tl(cpu_cc_src
);
6718 s
->cc_op
= CC_OP_LOGICB
+ ot
;
6723 /************************/
6725 case 0x27: /* daa */
6728 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6729 gen_op_set_cc_op(s
->cc_op
);
6731 s
->cc_op
= CC_OP_EFLAGS
;
6733 case 0x2f: /* das */
6736 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6737 gen_op_set_cc_op(s
->cc_op
);
6739 s
->cc_op
= CC_OP_EFLAGS
;
6741 case 0x37: /* aaa */
6744 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6745 gen_op_set_cc_op(s
->cc_op
);
6747 s
->cc_op
= CC_OP_EFLAGS
;
6749 case 0x3f: /* aas */
6752 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6753 gen_op_set_cc_op(s
->cc_op
);
6755 s
->cc_op
= CC_OP_EFLAGS
;
6757 case 0xd4: /* aam */
6760 val
= ldub_code(s
->pc
++);
6762 gen_exception(s
, EXCP00_DIVZ
, pc_start
- s
->cs_base
);
6764 gen_helper_aam(tcg_const_i32(val
));
6765 s
->cc_op
= CC_OP_LOGICB
;
6768 case 0xd5: /* aad */
6771 val
= ldub_code(s
->pc
++);
6772 gen_helper_aad(tcg_const_i32(val
));
6773 s
->cc_op
= CC_OP_LOGICB
;
6775 /************************/
6777 case 0x90: /* nop */
6778 /* XXX: correct lock test for all insn */
6779 if (prefixes
& PREFIX_LOCK
) {
6782 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
6784 goto do_xchg_reg_eax
;
6786 if (prefixes
& PREFIX_REPZ
) {
6787 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_PAUSE
);
6790 case 0x9b: /* fwait */
6791 if ((s
->flags
& (HF_MP_MASK
| HF_TS_MASK
)) ==
6792 (HF_MP_MASK
| HF_TS_MASK
)) {
6793 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
6795 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6796 gen_op_set_cc_op(s
->cc_op
);
6797 gen_jmp_im(pc_start
- s
->cs_base
);
6798 gen_helper_fwait(cpu_env
);
6801 case 0xcc: /* int3 */
6802 gen_interrupt(s
, EXCP03_INT3
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6804 case 0xcd: /* int N */
6805 val
= ldub_code(s
->pc
++);
6806 if (s
->vm86
&& s
->iopl
!= 3) {
6807 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6809 gen_interrupt(s
, val
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6812 case 0xce: /* into */
6815 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6816 gen_op_set_cc_op(s
->cc_op
);
6817 gen_jmp_im(pc_start
- s
->cs_base
);
6818 gen_helper_into(tcg_const_i32(s
->pc
- pc_start
));
6821 case 0xf1: /* icebp (undocumented, exits to external debugger) */
6822 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_ICEBP
);
6824 gen_debug(s
, pc_start
- s
->cs_base
);
6827 tb_flush(cpu_single_env
);
6828 cpu_set_log(CPU_LOG_INT
| CPU_LOG_TB_IN_ASM
);
6832 case 0xfa: /* cli */
6834 if (s
->cpl
<= s
->iopl
) {
6835 gen_helper_cli(cpu_env
);
6837 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6841 gen_helper_cli(cpu_env
);
6843 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6847 case 0xfb: /* sti */
6849 if (s
->cpl
<= s
->iopl
) {
6851 gen_helper_sti(cpu_env
);
6852 /* interruptions are enabled only the first insn after sti */
6853 /* If several instructions disable interrupts, only the
6855 if (!(s
->tb
->flags
& HF_INHIBIT_IRQ_MASK
))
6856 gen_helper_set_inhibit_irq(cpu_env
);
6857 /* give a chance to handle pending irqs */
6858 gen_jmp_im(s
->pc
- s
->cs_base
);
6861 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6867 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6871 case 0x62: /* bound */
6874 ot
= dflag
? OT_LONG
: OT_WORD
;
6875 modrm
= ldub_code(s
->pc
++);
6876 reg
= (modrm
>> 3) & 7;
6877 mod
= (modrm
>> 6) & 3;
6880 gen_op_mov_TN_reg(ot
, 0, reg
);
6881 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
6882 gen_jmp_im(pc_start
- s
->cs_base
);
6883 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6885 gen_helper_boundw(cpu_A0
, cpu_tmp2_i32
);
6887 gen_helper_boundl(cpu_A0
, cpu_tmp2_i32
);
6889 case 0x1c8 ... 0x1cf: /* bswap reg */
6890 reg
= (b
& 7) | REX_B(s
);
6891 #ifdef TARGET_X86_64
6893 gen_op_mov_TN_reg(OT_QUAD
, 0, reg
);
6894 tcg_gen_bswap64_i64(cpu_T
[0], cpu_T
[0]);
6895 gen_op_mov_reg_T0(OT_QUAD
, reg
);
6899 gen_op_mov_TN_reg(OT_LONG
, 0, reg
);
6900 tcg_gen_ext32u_tl(cpu_T
[0], cpu_T
[0]);
6901 tcg_gen_bswap32_tl(cpu_T
[0], cpu_T
[0]);
6902 gen_op_mov_reg_T0(OT_LONG
, reg
);
6905 case 0xd6: /* salc */
6908 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6909 gen_op_set_cc_op(s
->cc_op
);
6910 gen_compute_eflags_c(cpu_T
[0]);
6911 tcg_gen_neg_tl(cpu_T
[0], cpu_T
[0]);
6912 gen_op_mov_reg_T0(OT_BYTE
, R_EAX
);
6914 case 0xe0: /* loopnz */
6915 case 0xe1: /* loopz */
6916 case 0xe2: /* loop */
6917 case 0xe3: /* jecxz */
6921 tval
= (int8_t)insn_get(s
, OT_BYTE
);
6922 next_eip
= s
->pc
- s
->cs_base
;
6927 l1
= gen_new_label();
6928 l2
= gen_new_label();
6929 l3
= gen_new_label();
6932 case 0: /* loopnz */
6934 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6935 gen_op_set_cc_op(s
->cc_op
);
6936 gen_op_add_reg_im(s
->aflag
, R_ECX
, -1);
6937 gen_op_jz_ecx(s
->aflag
, l3
);
6938 gen_compute_eflags(cpu_tmp0
);
6939 tcg_gen_andi_tl(cpu_tmp0
, cpu_tmp0
, CC_Z
);
6941 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_tmp0
, 0, l1
);
6943 tcg_gen_brcondi_tl(TCG_COND_NE
, cpu_tmp0
, 0, l1
);
6947 gen_op_add_reg_im(s
->aflag
, R_ECX
, -1);
6948 gen_op_jnz_ecx(s
->aflag
, l1
);
6952 gen_op_jz_ecx(s
->aflag
, l1
);
6957 gen_jmp_im(next_eip
);
6966 case 0x130: /* wrmsr */
6967 case 0x132: /* rdmsr */
6969 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6971 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6972 gen_op_set_cc_op(s
->cc_op
);
6973 gen_jmp_im(pc_start
- s
->cs_base
);
6981 case 0x131: /* rdtsc */
6982 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6983 gen_op_set_cc_op(s
->cc_op
);
6984 gen_jmp_im(pc_start
- s
->cs_base
);
6990 gen_jmp(s
, s
->pc
- s
->cs_base
);
6993 case 0x133: /* rdpmc */
6994 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6995 gen_op_set_cc_op(s
->cc_op
);
6996 gen_jmp_im(pc_start
- s
->cs_base
);
6999 case 0x134: /* sysenter */
7000 /* For Intel SYSENTER is valid on 64-bit */
7001 if (CODE64(s
) && cpu_single_env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
)
7004 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7006 gen_update_cc_op(s
);
7007 gen_jmp_im(pc_start
- s
->cs_base
);
7008 gen_helper_sysenter();
7012 case 0x135: /* sysexit */
7013 /* For Intel SYSEXIT is valid on 64-bit */
7014 if (CODE64(s
) && cpu_single_env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
)
7017 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7019 gen_update_cc_op(s
);
7020 gen_jmp_im(pc_start
- s
->cs_base
);
7021 gen_helper_sysexit(tcg_const_i32(dflag
));
7025 #ifdef TARGET_X86_64
7026 case 0x105: /* syscall */
7027 /* XXX: is it usable in real mode ? */
7028 gen_update_cc_op(s
);
7029 gen_jmp_im(pc_start
- s
->cs_base
);
7030 gen_helper_syscall(tcg_const_i32(s
->pc
- pc_start
));
7033 case 0x107: /* sysret */
7035 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7037 gen_update_cc_op(s
);
7038 gen_jmp_im(pc_start
- s
->cs_base
);
7039 gen_helper_sysret(tcg_const_i32(s
->dflag
));
7040 /* condition codes are modified only in long mode */
7042 s
->cc_op
= CC_OP_EFLAGS
;
7047 case 0x1a2: /* cpuid */
7048 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7049 gen_op_set_cc_op(s
->cc_op
);
7050 gen_jmp_im(pc_start
- s
->cs_base
);
7053 case 0xf4: /* hlt */
7055 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7057 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7058 gen_op_set_cc_op(s
->cc_op
);
7059 gen_jmp_im(pc_start
- s
->cs_base
);
7060 gen_helper_hlt(tcg_const_i32(s
->pc
- pc_start
));
7061 s
->is_jmp
= DISAS_TB_JUMP
;
7065 modrm
= ldub_code(s
->pc
++);
7066 mod
= (modrm
>> 6) & 3;
7067 op
= (modrm
>> 3) & 7;
7070 if (!s
->pe
|| s
->vm86
)
7072 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_LDTR_READ
);
7073 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,ldt
.selector
));
7077 gen_ldst_modrm(s
, modrm
, ot
, OR_TMP0
, 1);
7080 if (!s
->pe
|| s
->vm86
)
7083 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7085 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_LDTR_WRITE
);
7086 gen_ldst_modrm(s
, modrm
, OT_WORD
, OR_TMP0
, 0);
7087 gen_jmp_im(pc_start
- s
->cs_base
);
7088 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
7089 gen_helper_lldt(cpu_tmp2_i32
);
7093 if (!s
->pe
|| s
->vm86
)
7095 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_TR_READ
);
7096 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,tr
.selector
));
7100 gen_ldst_modrm(s
, modrm
, ot
, OR_TMP0
, 1);
7103 if (!s
->pe
|| s
->vm86
)
7106 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7108 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_TR_WRITE
);
7109 gen_ldst_modrm(s
, modrm
, OT_WORD
, OR_TMP0
, 0);
7110 gen_jmp_im(pc_start
- s
->cs_base
);
7111 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
7112 gen_helper_ltr(cpu_tmp2_i32
);
7117 if (!s
->pe
|| s
->vm86
)
7119 gen_ldst_modrm(s
, modrm
, OT_WORD
, OR_TMP0
, 0);
7120 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7121 gen_op_set_cc_op(s
->cc_op
);
7123 gen_helper_verr(cpu_T
[0]);
7125 gen_helper_verw(cpu_T
[0]);
7126 s
->cc_op
= CC_OP_EFLAGS
;
7133 modrm
= ldub_code(s
->pc
++);
7134 mod
= (modrm
>> 6) & 3;
7135 op
= (modrm
>> 3) & 7;
7141 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_GDTR_READ
);
7142 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
7143 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, gdt
.limit
));
7144 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
7145 gen_add_A0_im(s
, 2);
7146 tcg_gen_ld_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, gdt
.base
));
7148 gen_op_andl_T0_im(0xffffff);
7149 gen_op_st_T0_A0(CODE64(s
) + OT_LONG
+ s
->mem_index
);
7154 case 0: /* monitor */
7155 if (!(s
->cpuid_ext_features
& CPUID_EXT_MONITOR
) ||
7158 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7159 gen_op_set_cc_op(s
->cc_op
);
7160 gen_jmp_im(pc_start
- s
->cs_base
);
7161 #ifdef TARGET_X86_64
7162 if (s
->aflag
== 2) {
7163 gen_op_movq_A0_reg(R_EAX
);
7167 gen_op_movl_A0_reg(R_EAX
);
7169 gen_op_andl_A0_ffff();
7171 gen_add_A0_ds_seg(s
);
7172 gen_helper_monitor(cpu_A0
);
7175 if (!(s
->cpuid_ext_features
& CPUID_EXT_MONITOR
) ||
7178 gen_update_cc_op(s
);
7179 gen_jmp_im(pc_start
- s
->cs_base
);
7180 gen_helper_mwait(tcg_const_i32(s
->pc
- pc_start
));
7187 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_IDTR_READ
);
7188 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
7189 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, idt
.limit
));
7190 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
7191 gen_add_A0_im(s
, 2);
7192 tcg_gen_ld_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, idt
.base
));
7194 gen_op_andl_T0_im(0xffffff);
7195 gen_op_st_T0_A0(CODE64(s
) + OT_LONG
+ s
->mem_index
);
7201 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7202 gen_op_set_cc_op(s
->cc_op
);
7203 gen_jmp_im(pc_start
- s
->cs_base
);
7206 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7209 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7212 gen_helper_vmrun(tcg_const_i32(s
->aflag
),
7213 tcg_const_i32(s
->pc
- pc_start
));
7215 s
->is_jmp
= DISAS_TB_JUMP
;
7218 case 1: /* VMMCALL */
7219 if (!(s
->flags
& HF_SVME_MASK
))
7221 gen_helper_vmmcall();
7223 case 2: /* VMLOAD */
7224 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7227 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7230 gen_helper_vmload(tcg_const_i32(s
->aflag
));
7233 case 3: /* VMSAVE */
7234 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7237 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7240 gen_helper_vmsave(tcg_const_i32(s
->aflag
));
7244 if ((!(s
->flags
& HF_SVME_MASK
) &&
7245 !(s
->cpuid_ext3_features
& CPUID_EXT3_SKINIT
)) ||
7249 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7256 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7259 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7265 case 6: /* SKINIT */
7266 if ((!(s
->flags
& HF_SVME_MASK
) &&
7267 !(s
->cpuid_ext3_features
& CPUID_EXT3_SKINIT
)) ||
7270 gen_helper_skinit();
7272 case 7: /* INVLPGA */
7273 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7276 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7279 gen_helper_invlpga(tcg_const_i32(s
->aflag
));
7285 } else if (s
->cpl
!= 0) {
7286 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7288 gen_svm_check_intercept(s
, pc_start
,
7289 op
==2 ? SVM_EXIT_GDTR_WRITE
: SVM_EXIT_IDTR_WRITE
);
7290 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
7291 gen_op_ld_T1_A0(OT_WORD
+ s
->mem_index
);
7292 gen_add_A0_im(s
, 2);
7293 gen_op_ld_T0_A0(CODE64(s
) + OT_LONG
+ s
->mem_index
);
7295 gen_op_andl_T0_im(0xffffff);
7297 tcg_gen_st_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,gdt
.base
));
7298 tcg_gen_st32_tl(cpu_T
[1], cpu_env
, offsetof(CPUX86State
,gdt
.limit
));
7300 tcg_gen_st_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,idt
.base
));
7301 tcg_gen_st32_tl(cpu_T
[1], cpu_env
, offsetof(CPUX86State
,idt
.limit
));
7306 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_READ_CR0
);
7307 #if defined TARGET_X86_64 && defined HOST_WORDS_BIGENDIAN
7308 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,cr
[0]) + 4);
7310 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,cr
[0]));
7312 gen_ldst_modrm(s
, modrm
, OT_WORD
, OR_TMP0
, 1);
7316 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7318 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_CR0
);
7319 gen_ldst_modrm(s
, modrm
, OT_WORD
, OR_TMP0
, 0);
7320 gen_helper_lmsw(cpu_T
[0]);
7321 gen_jmp_im(s
->pc
- s
->cs_base
);
7326 if (mod
!= 3) { /* invlpg */
7328 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7330 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7331 gen_op_set_cc_op(s
->cc_op
);
7332 gen_jmp_im(pc_start
- s
->cs_base
);
7333 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
7334 gen_helper_invlpg(cpu_A0
);
7335 gen_jmp_im(s
->pc
- s
->cs_base
);
7340 case 0: /* swapgs */
7341 #ifdef TARGET_X86_64
7344 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7346 tcg_gen_ld_tl(cpu_T
[0], cpu_env
,
7347 offsetof(CPUX86State
,segs
[R_GS
].base
));
7348 tcg_gen_ld_tl(cpu_T
[1], cpu_env
,
7349 offsetof(CPUX86State
,kernelgsbase
));
7350 tcg_gen_st_tl(cpu_T
[1], cpu_env
,
7351 offsetof(CPUX86State
,segs
[R_GS
].base
));
7352 tcg_gen_st_tl(cpu_T
[0], cpu_env
,
7353 offsetof(CPUX86State
,kernelgsbase
));
7361 case 1: /* rdtscp */
7362 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_RDTSCP
))
7364 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7365 gen_op_set_cc_op(s
->cc_op
);
7366 gen_jmp_im(pc_start
- s
->cs_base
);
7369 gen_helper_rdtscp();
7372 gen_jmp(s
, s
->pc
- s
->cs_base
);
7384 case 0x108: /* invd */
7385 case 0x109: /* wbinvd */
7387 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7389 gen_svm_check_intercept(s
, pc_start
, (b
& 2) ? SVM_EXIT_INVD
: SVM_EXIT_WBINVD
);
7393 case 0x63: /* arpl or movslS (x86_64) */
7394 #ifdef TARGET_X86_64
7397 /* d_ot is the size of destination */
7398 d_ot
= dflag
+ OT_WORD
;
7400 modrm
= ldub_code(s
->pc
++);
7401 reg
= ((modrm
>> 3) & 7) | rex_r
;
7402 mod
= (modrm
>> 6) & 3;
7403 rm
= (modrm
& 7) | REX_B(s
);
7406 gen_op_mov_TN_reg(OT_LONG
, 0, rm
);
7408 if (d_ot
== OT_QUAD
)
7409 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
7410 gen_op_mov_reg_T0(d_ot
, reg
);
7412 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
7413 if (d_ot
== OT_QUAD
) {
7414 gen_op_lds_T0_A0(OT_LONG
+ s
->mem_index
);
7416 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
7418 gen_op_mov_reg_T0(d_ot
, reg
);
7424 TCGv t0
, t1
, t2
, a0
;
7426 if (!s
->pe
|| s
->vm86
)
7428 t0
= tcg_temp_local_new();
7429 t1
= tcg_temp_local_new();
7430 t2
= tcg_temp_local_new();
7432 modrm
= ldub_code(s
->pc
++);
7433 reg
= (modrm
>> 3) & 7;
7434 mod
= (modrm
>> 6) & 3;
7437 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
7438 gen_op_ld_v(ot
+ s
->mem_index
, t0
, cpu_A0
);
7439 a0
= tcg_temp_local_new();
7440 tcg_gen_mov_tl(a0
, cpu_A0
);
7442 gen_op_mov_v_reg(ot
, t0
, rm
);
7445 gen_op_mov_v_reg(ot
, t1
, reg
);
7446 tcg_gen_andi_tl(cpu_tmp0
, t0
, 3);
7447 tcg_gen_andi_tl(t1
, t1
, 3);
7448 tcg_gen_movi_tl(t2
, 0);
7449 label1
= gen_new_label();
7450 tcg_gen_brcond_tl(TCG_COND_GE
, cpu_tmp0
, t1
, label1
);
7451 tcg_gen_andi_tl(t0
, t0
, ~3);
7452 tcg_gen_or_tl(t0
, t0
, t1
);
7453 tcg_gen_movi_tl(t2
, CC_Z
);
7454 gen_set_label(label1
);
7456 gen_op_st_v(ot
+ s
->mem_index
, t0
, a0
);
7459 gen_op_mov_reg_v(ot
, rm
, t0
);
7461 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7462 gen_op_set_cc_op(s
->cc_op
);
7463 gen_compute_eflags(cpu_cc_src
);
7464 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~CC_Z
);
7465 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t2
);
7466 s
->cc_op
= CC_OP_EFLAGS
;
7472 case 0x102: /* lar */
7473 case 0x103: /* lsl */
7477 if (!s
->pe
|| s
->vm86
)
7479 ot
= dflag
? OT_LONG
: OT_WORD
;
7480 modrm
= ldub_code(s
->pc
++);
7481 reg
= ((modrm
>> 3) & 7) | rex_r
;
7482 gen_ldst_modrm(s
, modrm
, OT_WORD
, OR_TMP0
, 0);
7483 t0
= tcg_temp_local_new();
7484 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7485 gen_op_set_cc_op(s
->cc_op
);
7487 gen_helper_lar(t0
, cpu_T
[0]);
7489 gen_helper_lsl(t0
, cpu_T
[0]);
7490 tcg_gen_andi_tl(cpu_tmp0
, cpu_cc_src
, CC_Z
);
7491 label1
= gen_new_label();
7492 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_tmp0
, 0, label1
);
7493 gen_op_mov_reg_v(ot
, reg
, t0
);
7494 gen_set_label(label1
);
7495 s
->cc_op
= CC_OP_EFLAGS
;
7500 modrm
= ldub_code(s
->pc
++);
7501 mod
= (modrm
>> 6) & 3;
7502 op
= (modrm
>> 3) & 7;
7504 case 0: /* prefetchnta */
7505 case 1: /* prefetchnt0 */
7506 case 2: /* prefetchnt0 */
7507 case 3: /* prefetchnt0 */
7510 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
7511 /* nothing more to do */
7513 default: /* nop (multi byte) */
7514 gen_nop_modrm(s
, modrm
);
7518 case 0x119 ... 0x11f: /* nop (multi byte) */
7519 modrm
= ldub_code(s
->pc
++);
7520 gen_nop_modrm(s
, modrm
);
7522 case 0x120: /* mov reg, crN */
7523 case 0x122: /* mov crN, reg */
7525 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7527 modrm
= ldub_code(s
->pc
++);
7528 if ((modrm
& 0xc0) != 0xc0)
7530 rm
= (modrm
& 7) | REX_B(s
);
7531 reg
= ((modrm
>> 3) & 7) | rex_r
;
7536 if ((prefixes
& PREFIX_LOCK
) && (reg
== 0) &&
7537 (s
->cpuid_ext3_features
& CPUID_EXT3_CR8LEG
)) {
7546 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7547 gen_op_set_cc_op(s
->cc_op
);
7548 gen_jmp_im(pc_start
- s
->cs_base
);
7550 gen_op_mov_TN_reg(ot
, 0, rm
);
7551 gen_helper_write_crN(tcg_const_i32(reg
), cpu_T
[0]);
7552 gen_jmp_im(s
->pc
- s
->cs_base
);
7555 gen_helper_read_crN(cpu_T
[0], tcg_const_i32(reg
));
7556 gen_op_mov_reg_T0(ot
, rm
);
7564 case 0x121: /* mov reg, drN */
7565 case 0x123: /* mov drN, reg */
7567 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7569 modrm
= ldub_code(s
->pc
++);
7570 if ((modrm
& 0xc0) != 0xc0)
7572 rm
= (modrm
& 7) | REX_B(s
);
7573 reg
= ((modrm
>> 3) & 7) | rex_r
;
7578 /* XXX: do it dynamically with CR4.DE bit */
7579 if (reg
== 4 || reg
== 5 || reg
>= 8)
7582 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_DR0
+ reg
);
7583 gen_op_mov_TN_reg(ot
, 0, rm
);
7584 gen_helper_movl_drN_T0(tcg_const_i32(reg
), cpu_T
[0]);
7585 gen_jmp_im(s
->pc
- s
->cs_base
);
7588 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_READ_DR0
+ reg
);
7589 tcg_gen_ld_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,dr
[reg
]));
7590 gen_op_mov_reg_T0(ot
, rm
);
7594 case 0x106: /* clts */
7596 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7598 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_CR0
);
7599 gen_helper_clts(cpu_env
);
7600 /* abort block because static cpu state changed */
7601 gen_jmp_im(s
->pc
- s
->cs_base
);
7605 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
7606 case 0x1c3: /* MOVNTI reg, mem */
7607 if (!(s
->cpuid_features
& CPUID_SSE2
))
7609 ot
= s
->dflag
== 2 ? OT_QUAD
: OT_LONG
;
7610 modrm
= ldub_code(s
->pc
++);
7611 mod
= (modrm
>> 6) & 3;
7614 reg
= ((modrm
>> 3) & 7) | rex_r
;
7615 /* generate a generic store */
7616 gen_ldst_modrm(s
, modrm
, ot
, reg
, 1);
7619 modrm
= ldub_code(s
->pc
++);
7620 mod
= (modrm
>> 6) & 3;
7621 op
= (modrm
>> 3) & 7;
7623 case 0: /* fxsave */
7624 if (mod
== 3 || !(s
->cpuid_features
& CPUID_FXSR
) ||
7625 (s
->prefix
& PREFIX_LOCK
))
7627 if ((s
->flags
& HF_EM_MASK
) || (s
->flags
& HF_TS_MASK
)) {
7628 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
7631 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
7632 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7633 gen_op_set_cc_op(s
->cc_op
);
7634 gen_jmp_im(pc_start
- s
->cs_base
);
7635 gen_helper_fxsave(cpu_env
, cpu_A0
, tcg_const_i32((s
->dflag
== 2)));
7637 case 1: /* fxrstor */
7638 if (mod
== 3 || !(s
->cpuid_features
& CPUID_FXSR
) ||
7639 (s
->prefix
& PREFIX_LOCK
))
7641 if ((s
->flags
& HF_EM_MASK
) || (s
->flags
& HF_TS_MASK
)) {
7642 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
7645 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
7646 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7647 gen_op_set_cc_op(s
->cc_op
);
7648 gen_jmp_im(pc_start
- s
->cs_base
);
7649 gen_helper_fxrstor(cpu_env
, cpu_A0
,
7650 tcg_const_i32((s
->dflag
== 2)));
7652 case 2: /* ldmxcsr */
7653 case 3: /* stmxcsr */
7654 if (s
->flags
& HF_TS_MASK
) {
7655 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
7658 if ((s
->flags
& HF_EM_MASK
) || !(s
->flags
& HF_OSFXSR_MASK
) ||
7661 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
7663 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
7664 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
7665 gen_helper_ldmxcsr(cpu_env
, cpu_tmp2_i32
);
7667 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, mxcsr
));
7668 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
7671 case 5: /* lfence */
7672 case 6: /* mfence */
7673 if ((modrm
& 0xc7) != 0xc0 || !(s
->cpuid_features
& CPUID_SSE2
))
7676 case 7: /* sfence / clflush */
7677 if ((modrm
& 0xc7) == 0xc0) {
7679 /* XXX: also check for cpuid_ext2_features & CPUID_EXT2_EMMX */
7680 if (!(s
->cpuid_features
& CPUID_SSE
))
7684 if (!(s
->cpuid_features
& CPUID_CLFLUSH
))
7686 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
7693 case 0x10d: /* 3DNow! prefetch(w) */
7694 modrm
= ldub_code(s
->pc
++);
7695 mod
= (modrm
>> 6) & 3;
7698 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
7699 /* ignore for now */
7701 case 0x1aa: /* rsm */
7702 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_RSM
);
7703 if (!(s
->flags
& HF_SMM_MASK
))
7705 gen_update_cc_op(s
);
7706 gen_jmp_im(s
->pc
- s
->cs_base
);
7710 case 0x1b8: /* SSE4.2 popcnt */
7711 if ((prefixes
& (PREFIX_REPZ
| PREFIX_LOCK
| PREFIX_REPNZ
)) !=
7714 if (!(s
->cpuid_ext_features
& CPUID_EXT_POPCNT
))
7717 modrm
= ldub_code(s
->pc
++);
7718 reg
= ((modrm
>> 3) & 7);
7720 if (s
->prefix
& PREFIX_DATA
)
7722 else if (s
->dflag
!= 2)
7727 gen_ldst_modrm(s
, modrm
, ot
, OR_TMP0
, 0);
7728 gen_helper_popcnt(cpu_T
[0], cpu_env
, cpu_T
[0], tcg_const_i32(ot
));
7729 gen_op_mov_reg_T0(ot
, reg
);
7731 s
->cc_op
= CC_OP_EFLAGS
;
7733 case 0x10e ... 0x10f:
7734 /* 3DNow! instructions, ignore prefixes */
7735 s
->prefix
&= ~(PREFIX_REPZ
| PREFIX_REPNZ
| PREFIX_DATA
);
7736 case 0x110 ... 0x117:
7737 case 0x128 ... 0x12f:
7738 case 0x138 ... 0x13a:
7739 case 0x150 ... 0x179:
7740 case 0x17c ... 0x17f:
7742 case 0x1c4 ... 0x1c6:
7743 case 0x1d0 ... 0x1fe:
7744 gen_sse(s
, b
, pc_start
, rex_r
);
7749 /* lock generation */
7750 if (s
->prefix
& PREFIX_LOCK
)
7751 gen_helper_unlock();
7754 if (s
->prefix
& PREFIX_LOCK
)
7755 gen_helper_unlock();
7756 /* XXX: ensure that no lock was generated */
7757 gen_exception(s
, EXCP06_ILLOP
, pc_start
- s
->cs_base
);
7761 void optimize_flags_init(void)
7763 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
7764 cpu_cc_op
= tcg_global_mem_new_i32(TCG_AREG0
,
7765 offsetof(CPUX86State
, cc_op
), "cc_op");
7766 cpu_cc_src
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUX86State
, cc_src
),
7768 cpu_cc_dst
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUX86State
, cc_dst
),
7770 cpu_cc_tmp
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUX86State
, cc_tmp
),
7773 #ifdef TARGET_X86_64
7774 cpu_regs
[R_EAX
] = tcg_global_mem_new_i64(TCG_AREG0
,
7775 offsetof(CPUX86State
, regs
[R_EAX
]), "rax");
7776 cpu_regs
[R_ECX
] = tcg_global_mem_new_i64(TCG_AREG0
,
7777 offsetof(CPUX86State
, regs
[R_ECX
]), "rcx");
7778 cpu_regs
[R_EDX
] = tcg_global_mem_new_i64(TCG_AREG0
,
7779 offsetof(CPUX86State
, regs
[R_EDX
]), "rdx");
7780 cpu_regs
[R_EBX
] = tcg_global_mem_new_i64(TCG_AREG0
,
7781 offsetof(CPUX86State
, regs
[R_EBX
]), "rbx");
7782 cpu_regs
[R_ESP
] = tcg_global_mem_new_i64(TCG_AREG0
,
7783 offsetof(CPUX86State
, regs
[R_ESP
]), "rsp");
7784 cpu_regs
[R_EBP
] = tcg_global_mem_new_i64(TCG_AREG0
,
7785 offsetof(CPUX86State
, regs
[R_EBP
]), "rbp");
7786 cpu_regs
[R_ESI
] = tcg_global_mem_new_i64(TCG_AREG0
,
7787 offsetof(CPUX86State
, regs
[R_ESI
]), "rsi");
7788 cpu_regs
[R_EDI
] = tcg_global_mem_new_i64(TCG_AREG0
,
7789 offsetof(CPUX86State
, regs
[R_EDI
]), "rdi");
7790 cpu_regs
[8] = tcg_global_mem_new_i64(TCG_AREG0
,
7791 offsetof(CPUX86State
, regs
[8]), "r8");
7792 cpu_regs
[9] = tcg_global_mem_new_i64(TCG_AREG0
,
7793 offsetof(CPUX86State
, regs
[9]), "r9");
7794 cpu_regs
[10] = tcg_global_mem_new_i64(TCG_AREG0
,
7795 offsetof(CPUX86State
, regs
[10]), "r10");
7796 cpu_regs
[11] = tcg_global_mem_new_i64(TCG_AREG0
,
7797 offsetof(CPUX86State
, regs
[11]), "r11");
7798 cpu_regs
[12] = tcg_global_mem_new_i64(TCG_AREG0
,
7799 offsetof(CPUX86State
, regs
[12]), "r12");
7800 cpu_regs
[13] = tcg_global_mem_new_i64(TCG_AREG0
,
7801 offsetof(CPUX86State
, regs
[13]), "r13");
7802 cpu_regs
[14] = tcg_global_mem_new_i64(TCG_AREG0
,
7803 offsetof(CPUX86State
, regs
[14]), "r14");
7804 cpu_regs
[15] = tcg_global_mem_new_i64(TCG_AREG0
,
7805 offsetof(CPUX86State
, regs
[15]), "r15");
7807 cpu_regs
[R_EAX
] = tcg_global_mem_new_i32(TCG_AREG0
,
7808 offsetof(CPUX86State
, regs
[R_EAX
]), "eax");
7809 cpu_regs
[R_ECX
] = tcg_global_mem_new_i32(TCG_AREG0
,
7810 offsetof(CPUX86State
, regs
[R_ECX
]), "ecx");
7811 cpu_regs
[R_EDX
] = tcg_global_mem_new_i32(TCG_AREG0
,
7812 offsetof(CPUX86State
, regs
[R_EDX
]), "edx");
7813 cpu_regs
[R_EBX
] = tcg_global_mem_new_i32(TCG_AREG0
,
7814 offsetof(CPUX86State
, regs
[R_EBX
]), "ebx");
7815 cpu_regs
[R_ESP
] = tcg_global_mem_new_i32(TCG_AREG0
,
7816 offsetof(CPUX86State
, regs
[R_ESP
]), "esp");
7817 cpu_regs
[R_EBP
] = tcg_global_mem_new_i32(TCG_AREG0
,
7818 offsetof(CPUX86State
, regs
[R_EBP
]), "ebp");
7819 cpu_regs
[R_ESI
] = tcg_global_mem_new_i32(TCG_AREG0
,
7820 offsetof(CPUX86State
, regs
[R_ESI
]), "esi");
7821 cpu_regs
[R_EDI
] = tcg_global_mem_new_i32(TCG_AREG0
,
7822 offsetof(CPUX86State
, regs
[R_EDI
]), "edi");
7825 /* register helpers */
7826 #define GEN_HELPER 2
7830 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
7831 basic block 'tb'. If search_pc is TRUE, also generate PC
7832 information for each intermediate instruction. */
7833 static inline void gen_intermediate_code_internal(CPUX86State
*env
,
7834 TranslationBlock
*tb
,
7837 DisasContext dc1
, *dc
= &dc1
;
7838 target_ulong pc_ptr
;
7839 uint16_t *gen_opc_end
;
7843 target_ulong pc_start
;
7844 target_ulong cs_base
;
7848 /* generate intermediate code */
7850 cs_base
= tb
->cs_base
;
7853 dc
->pe
= (flags
>> HF_PE_SHIFT
) & 1;
7854 dc
->code32
= (flags
>> HF_CS32_SHIFT
) & 1;
7855 dc
->ss32
= (flags
>> HF_SS32_SHIFT
) & 1;
7856 dc
->addseg
= (flags
>> HF_ADDSEG_SHIFT
) & 1;
7858 dc
->vm86
= (flags
>> VM_SHIFT
) & 1;
7859 dc
->cpl
= (flags
>> HF_CPL_SHIFT
) & 3;
7860 dc
->iopl
= (flags
>> IOPL_SHIFT
) & 3;
7861 dc
->tf
= (flags
>> TF_SHIFT
) & 1;
7862 dc
->singlestep_enabled
= env
->singlestep_enabled
;
7863 dc
->cc_op
= CC_OP_DYNAMIC
;
7864 dc
->cs_base
= cs_base
;
7866 dc
->popl_esp_hack
= 0;
7867 /* select memory access functions */
7869 if (flags
& HF_SOFTMMU_MASK
) {
7871 dc
->mem_index
= 2 * 4;
7873 dc
->mem_index
= 1 * 4;
7875 dc
->cpuid_features
= env
->cpuid_features
;
7876 dc
->cpuid_ext_features
= env
->cpuid_ext_features
;
7877 dc
->cpuid_ext2_features
= env
->cpuid_ext2_features
;
7878 dc
->cpuid_ext3_features
= env
->cpuid_ext3_features
;
7879 #ifdef TARGET_X86_64
7880 dc
->lma
= (flags
>> HF_LMA_SHIFT
) & 1;
7881 dc
->code64
= (flags
>> HF_CS64_SHIFT
) & 1;
7884 dc
->jmp_opt
= !(dc
->tf
|| env
->singlestep_enabled
||
7885 (flags
& HF_INHIBIT_IRQ_MASK
)
7886 #ifndef CONFIG_SOFTMMU
7887 || (flags
& HF_SOFTMMU_MASK
)
7891 /* check addseg logic */
7892 if (!dc
->addseg
&& (dc
->vm86
|| !dc
->pe
|| !dc
->code32
))
7893 printf("ERROR addseg\n");
7896 cpu_T
[0] = tcg_temp_new();
7897 cpu_T
[1] = tcg_temp_new();
7898 cpu_A0
= tcg_temp_new();
7899 cpu_T3
= tcg_temp_new();
7901 cpu_tmp0
= tcg_temp_new();
7902 cpu_tmp1_i64
= tcg_temp_new_i64();
7903 cpu_tmp2_i32
= tcg_temp_new_i32();
7904 cpu_tmp3_i32
= tcg_temp_new_i32();
7905 cpu_tmp4
= tcg_temp_new();
7906 cpu_tmp5
= tcg_temp_new();
7907 cpu_ptr0
= tcg_temp_new_ptr();
7908 cpu_ptr1
= tcg_temp_new_ptr();
7910 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
7912 dc
->is_jmp
= DISAS_NEXT
;
7916 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
7918 max_insns
= CF_COUNT_MASK
;
7922 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
7923 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
7924 if (bp
->pc
== pc_ptr
&&
7925 !((bp
->flags
& BP_CPU
) && (tb
->flags
& HF_RF_MASK
))) {
7926 gen_debug(dc
, pc_ptr
- dc
->cs_base
);
7932 j
= gen_opc_ptr
- gen_opc_buf
;
7936 gen_opc_instr_start
[lj
++] = 0;
7938 gen_opc_pc
[lj
] = pc_ptr
;
7939 gen_opc_cc_op
[lj
] = dc
->cc_op
;
7940 gen_opc_instr_start
[lj
] = 1;
7941 gen_opc_icount
[lj
] = num_insns
;
7943 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
7946 pc_ptr
= disas_insn(dc
, pc_ptr
);
7948 /* stop translation if indicated */
7951 /* if single step mode, we generate only one instruction and
7952 generate an exception */
7953 /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
7954 the flag and abort the translation to give the irqs a
7955 change to be happen */
7956 if (dc
->tf
|| dc
->singlestep_enabled
||
7957 (flags
& HF_INHIBIT_IRQ_MASK
)) {
7958 gen_jmp_im(pc_ptr
- dc
->cs_base
);
7962 /* if too long translation, stop generation too */
7963 if (gen_opc_ptr
>= gen_opc_end
||
7964 (pc_ptr
- pc_start
) >= (TARGET_PAGE_SIZE
- 32) ||
7965 num_insns
>= max_insns
) {
7966 gen_jmp_im(pc_ptr
- dc
->cs_base
);
7971 gen_jmp_im(pc_ptr
- dc
->cs_base
);
7976 if (tb
->cflags
& CF_LAST_IO
)
7978 gen_icount_end(tb
, num_insns
);
7979 *gen_opc_ptr
= INDEX_op_end
;
7980 /* we don't forget to fill the last values */
7982 j
= gen_opc_ptr
- gen_opc_buf
;
7985 gen_opc_instr_start
[lj
++] = 0;
7989 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
7991 qemu_log("----------------\n");
7992 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
7993 #ifdef TARGET_X86_64
7998 disas_flags
= !dc
->code32
;
7999 log_target_disas(pc_start
, pc_ptr
- pc_start
, disas_flags
);
8005 tb
->size
= pc_ptr
- pc_start
;
8006 tb
->icount
= num_insns
;
8010 void gen_intermediate_code(CPUX86State
*env
, TranslationBlock
*tb
)
8012 gen_intermediate_code_internal(env
, tb
, 0);
8015 void gen_intermediate_code_pc(CPUX86State
*env
, TranslationBlock
*tb
)
8017 gen_intermediate_code_internal(env
, tb
, 1);
8020 void restore_state_to_opc(CPUX86State
*env
, TranslationBlock
*tb
, int pc_pos
)
8024 if (qemu_loglevel_mask(CPU_LOG_TB_OP
)) {
8026 qemu_log("RESTORE:\n");
8027 for(i
= 0;i
<= pc_pos
; i
++) {
8028 if (gen_opc_instr_start
[i
]) {
8029 qemu_log("0x%04x: " TARGET_FMT_lx
"\n", i
, gen_opc_pc
[i
]);
8032 qemu_log("pc_pos=0x%x eip=" TARGET_FMT_lx
" cs_base=%x\n",
8033 pc_pos
, gen_opc_pc
[pc_pos
] - tb
->cs_base
,
8034 (uint32_t)tb
->cs_base
);
8037 env
->eip
= gen_opc_pc
[pc_pos
] - tb
->cs_base
;
8038 cc_op
= gen_opc_cc_op
[pc_pos
];
8039 if (cc_op
!= CC_OP_DYNAMIC
)