4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
27 #include "disas/disas.h"
34 #define PREFIX_REPZ 0x01
35 #define PREFIX_REPNZ 0x02
36 #define PREFIX_LOCK 0x04
37 #define PREFIX_DATA 0x08
38 #define PREFIX_ADR 0x10
41 #define CODE64(s) ((s)->code64)
42 #define REX_X(s) ((s)->rex_x)
43 #define REX_B(s) ((s)->rex_b)
50 //#define MACRO_TEST 1
52 /* global register indexes */
53 static TCGv_ptr cpu_env
;
54 static TCGv cpu_A0
, cpu_cc_src
, cpu_cc_dst
, cpu_cc_tmp
;
55 static TCGv_i32 cpu_cc_op
;
56 static TCGv cpu_regs
[CPU_NB_REGS
];
58 static TCGv cpu_T
[2], cpu_T3
;
59 /* local register indexes (only used inside old micro ops) */
60 static TCGv cpu_tmp0
, cpu_tmp4
;
61 static TCGv_ptr cpu_ptr0
, cpu_ptr1
;
62 static TCGv_i32 cpu_tmp2_i32
, cpu_tmp3_i32
;
63 static TCGv_i64 cpu_tmp1_i64
;
66 static uint8_t gen_opc_cc_op
[OPC_BUF_SIZE
];
68 #include "exec/gen-icount.h"
71 static int x86_64_hregs
;
74 typedef struct DisasContext
{
75 /* current insn context */
76 int override
; /* -1 if no override */
79 target_ulong pc
; /* pc = eip + cs_base */
80 int is_jmp
; /* 1 = means jump (stop translation), 2 means CPU
81 static state change (stop translation) */
82 /* current block context */
83 target_ulong cs_base
; /* base of CS segment */
84 int pe
; /* protected mode */
85 int code32
; /* 32 bit code segment */
87 int lma
; /* long mode active */
88 int code64
; /* 64 bit code segment */
91 int ss32
; /* 32 bit stack segment */
92 int cc_op
; /* current CC operation */
93 int addseg
; /* non zero if either DS/ES/SS have a non zero base */
94 int f_st
; /* currently unused */
95 int vm86
; /* vm86 mode */
98 int tf
; /* TF cpu flag */
99 int singlestep_enabled
; /* "hardware" single step enabled */
100 int jmp_opt
; /* use direct block chaining for direct jumps */
101 int mem_index
; /* select memory access functions */
102 uint64_t flags
; /* all execution flags */
103 struct TranslationBlock
*tb
;
104 int popl_esp_hack
; /* for correct popl with esp base handling */
105 int rip_offset
; /* only used in x86_64, but left for simplicity */
107 int cpuid_ext_features
;
108 int cpuid_ext2_features
;
109 int cpuid_ext3_features
;
110 int cpuid_7_0_ebx_features
;
113 static void gen_eob(DisasContext
*s
);
114 static void gen_jmp(DisasContext
*s
, target_ulong eip
);
115 static void gen_jmp_tb(DisasContext
*s
, target_ulong eip
, int tb_num
);
117 /* i386 arith/logic operations */
137 OP_SHL1
, /* undocumented */
161 /* I386 int registers */
162 OR_EAX
, /* MUST be even numbered */
171 OR_TMP0
= 16, /* temporary operand register */
173 OR_A0
, /* temporary register used when doing address evaluation */
176 static inline void gen_op_movl_T0_0(void)
178 tcg_gen_movi_tl(cpu_T
[0], 0);
181 static inline void gen_op_movl_T0_im(int32_t val
)
183 tcg_gen_movi_tl(cpu_T
[0], val
);
186 static inline void gen_op_movl_T0_imu(uint32_t val
)
188 tcg_gen_movi_tl(cpu_T
[0], val
);
191 static inline void gen_op_movl_T1_im(int32_t val
)
193 tcg_gen_movi_tl(cpu_T
[1], val
);
196 static inline void gen_op_movl_T1_imu(uint32_t val
)
198 tcg_gen_movi_tl(cpu_T
[1], val
);
201 static inline void gen_op_movl_A0_im(uint32_t val
)
203 tcg_gen_movi_tl(cpu_A0
, val
);
207 static inline void gen_op_movq_A0_im(int64_t val
)
209 tcg_gen_movi_tl(cpu_A0
, val
);
213 static inline void gen_movtl_T0_im(target_ulong val
)
215 tcg_gen_movi_tl(cpu_T
[0], val
);
218 static inline void gen_movtl_T1_im(target_ulong val
)
220 tcg_gen_movi_tl(cpu_T
[1], val
);
223 static inline void gen_op_andl_T0_ffff(void)
225 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 0xffff);
228 static inline void gen_op_andl_T0_im(uint32_t val
)
230 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], val
);
233 static inline void gen_op_movl_T0_T1(void)
235 tcg_gen_mov_tl(cpu_T
[0], cpu_T
[1]);
238 static inline void gen_op_andl_A0_ffff(void)
240 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffff);
245 #define NB_OP_SIZES 4
247 #else /* !TARGET_X86_64 */
249 #define NB_OP_SIZES 3
251 #endif /* !TARGET_X86_64 */
253 #if defined(HOST_WORDS_BIGENDIAN)
254 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
255 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
256 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
257 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
258 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
260 #define REG_B_OFFSET 0
261 #define REG_H_OFFSET 1
262 #define REG_W_OFFSET 0
263 #define REG_L_OFFSET 0
264 #define REG_LH_OFFSET 4
267 /* In instruction encodings for byte register accesses the
268 * register number usually indicates "low 8 bits of register N";
269 * however there are some special cases where N 4..7 indicates
270 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
271 * true for this special case, false otherwise.
273 static inline bool byte_reg_is_xH(int reg
)
279 if (reg
>= 8 || x86_64_hregs
) {
286 static inline void gen_op_mov_reg_v(int ot
, int reg
, TCGv t0
)
290 if (!byte_reg_is_xH(reg
)) {
291 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], t0
, 0, 8);
293 tcg_gen_deposit_tl(cpu_regs
[reg
- 4], cpu_regs
[reg
- 4], t0
, 8, 8);
297 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], t0
, 0, 16);
299 default: /* XXX this shouldn't be reached; abort? */
301 /* For x86_64, this sets the higher half of register to zero.
302 For i386, this is equivalent to a mov. */
303 tcg_gen_ext32u_tl(cpu_regs
[reg
], t0
);
307 tcg_gen_mov_tl(cpu_regs
[reg
], t0
);
313 static inline void gen_op_mov_reg_T0(int ot
, int reg
)
315 gen_op_mov_reg_v(ot
, reg
, cpu_T
[0]);
318 static inline void gen_op_mov_reg_T1(int ot
, int reg
)
320 gen_op_mov_reg_v(ot
, reg
, cpu_T
[1]);
323 static inline void gen_op_mov_reg_A0(int size
, int reg
)
327 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], cpu_A0
, 0, 16);
329 default: /* XXX this shouldn't be reached; abort? */
331 /* For x86_64, this sets the higher half of register to zero.
332 For i386, this is equivalent to a mov. */
333 tcg_gen_ext32u_tl(cpu_regs
[reg
], cpu_A0
);
337 tcg_gen_mov_tl(cpu_regs
[reg
], cpu_A0
);
343 static inline void gen_op_mov_v_reg(int ot
, TCGv t0
, int reg
)
345 if (ot
== OT_BYTE
&& byte_reg_is_xH(reg
)) {
346 tcg_gen_shri_tl(t0
, cpu_regs
[reg
- 4], 8);
347 tcg_gen_ext8u_tl(t0
, t0
);
349 tcg_gen_mov_tl(t0
, cpu_regs
[reg
]);
353 static inline void gen_op_mov_TN_reg(int ot
, int t_index
, int reg
)
355 gen_op_mov_v_reg(ot
, cpu_T
[t_index
], reg
);
358 static inline void gen_op_movl_A0_reg(int reg
)
360 tcg_gen_mov_tl(cpu_A0
, cpu_regs
[reg
]);
363 static inline void gen_op_addl_A0_im(int32_t val
)
365 tcg_gen_addi_tl(cpu_A0
, cpu_A0
, val
);
367 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffffffff);
372 static inline void gen_op_addq_A0_im(int64_t val
)
374 tcg_gen_addi_tl(cpu_A0
, cpu_A0
, val
);
378 static void gen_add_A0_im(DisasContext
*s
, int val
)
382 gen_op_addq_A0_im(val
);
385 gen_op_addl_A0_im(val
);
388 static inline void gen_op_addl_T0_T1(void)
390 tcg_gen_add_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
393 static inline void gen_op_jmp_T0(void)
395 tcg_gen_st_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, eip
));
398 static inline void gen_op_add_reg_im(int size
, int reg
, int32_t val
)
402 tcg_gen_addi_tl(cpu_tmp0
, cpu_regs
[reg
], val
);
403 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], cpu_tmp0
, 0, 16);
406 tcg_gen_addi_tl(cpu_tmp0
, cpu_regs
[reg
], val
);
407 /* For x86_64, this sets the higher half of register to zero.
408 For i386, this is equivalent to a nop. */
409 tcg_gen_ext32u_tl(cpu_tmp0
, cpu_tmp0
);
410 tcg_gen_mov_tl(cpu_regs
[reg
], cpu_tmp0
);
414 tcg_gen_addi_tl(cpu_regs
[reg
], cpu_regs
[reg
], val
);
420 static inline void gen_op_add_reg_T0(int size
, int reg
)
424 tcg_gen_add_tl(cpu_tmp0
, cpu_regs
[reg
], cpu_T
[0]);
425 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], cpu_tmp0
, 0, 16);
428 tcg_gen_add_tl(cpu_tmp0
, cpu_regs
[reg
], cpu_T
[0]);
429 /* For x86_64, this sets the higher half of register to zero.
430 For i386, this is equivalent to a nop. */
431 tcg_gen_ext32u_tl(cpu_tmp0
, cpu_tmp0
);
432 tcg_gen_mov_tl(cpu_regs
[reg
], cpu_tmp0
);
436 tcg_gen_add_tl(cpu_regs
[reg
], cpu_regs
[reg
], cpu_T
[0]);
442 static inline void gen_op_set_cc_op(int32_t val
)
444 tcg_gen_movi_i32(cpu_cc_op
, val
);
447 static inline void gen_op_addl_A0_reg_sN(int shift
, int reg
)
449 tcg_gen_mov_tl(cpu_tmp0
, cpu_regs
[reg
]);
451 tcg_gen_shli_tl(cpu_tmp0
, cpu_tmp0
, shift
);
452 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
453 /* For x86_64, this sets the higher half of register to zero.
454 For i386, this is equivalent to a nop. */
455 tcg_gen_ext32u_tl(cpu_A0
, cpu_A0
);
458 static inline void gen_op_movl_A0_seg(int reg
)
460 tcg_gen_ld32u_tl(cpu_A0
, cpu_env
, offsetof(CPUX86State
, segs
[reg
].base
) + REG_L_OFFSET
);
463 static inline void gen_op_addl_A0_seg(DisasContext
*s
, int reg
)
465 tcg_gen_ld_tl(cpu_tmp0
, cpu_env
, offsetof(CPUX86State
, segs
[reg
].base
));
468 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffffffff);
469 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
471 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
472 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffffffff);
475 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
480 static inline void gen_op_movq_A0_seg(int reg
)
482 tcg_gen_ld_tl(cpu_A0
, cpu_env
, offsetof(CPUX86State
, segs
[reg
].base
));
485 static inline void gen_op_addq_A0_seg(int reg
)
487 tcg_gen_ld_tl(cpu_tmp0
, cpu_env
, offsetof(CPUX86State
, segs
[reg
].base
));
488 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
491 static inline void gen_op_movq_A0_reg(int reg
)
493 tcg_gen_mov_tl(cpu_A0
, cpu_regs
[reg
]);
496 static inline void gen_op_addq_A0_reg_sN(int shift
, int reg
)
498 tcg_gen_mov_tl(cpu_tmp0
, cpu_regs
[reg
]);
500 tcg_gen_shli_tl(cpu_tmp0
, cpu_tmp0
, shift
);
501 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
505 static inline void gen_op_lds_T0_A0(int idx
)
507 int mem_index
= (idx
>> 2) - 1;
510 tcg_gen_qemu_ld8s(cpu_T
[0], cpu_A0
, mem_index
);
513 tcg_gen_qemu_ld16s(cpu_T
[0], cpu_A0
, mem_index
);
517 tcg_gen_qemu_ld32s(cpu_T
[0], cpu_A0
, mem_index
);
522 static inline void gen_op_ld_v(int idx
, TCGv t0
, TCGv a0
)
524 int mem_index
= (idx
>> 2) - 1;
527 tcg_gen_qemu_ld8u(t0
, a0
, mem_index
);
530 tcg_gen_qemu_ld16u(t0
, a0
, mem_index
);
533 tcg_gen_qemu_ld32u(t0
, a0
, mem_index
);
537 /* Should never happen on 32-bit targets. */
539 tcg_gen_qemu_ld64(t0
, a0
, mem_index
);
545 /* XXX: always use ldu or lds */
546 static inline void gen_op_ld_T0_A0(int idx
)
548 gen_op_ld_v(idx
, cpu_T
[0], cpu_A0
);
551 static inline void gen_op_ldu_T0_A0(int idx
)
553 gen_op_ld_v(idx
, cpu_T
[0], cpu_A0
);
556 static inline void gen_op_ld_T1_A0(int idx
)
558 gen_op_ld_v(idx
, cpu_T
[1], cpu_A0
);
561 static inline void gen_op_st_v(int idx
, TCGv t0
, TCGv a0
)
563 int mem_index
= (idx
>> 2) - 1;
566 tcg_gen_qemu_st8(t0
, a0
, mem_index
);
569 tcg_gen_qemu_st16(t0
, a0
, mem_index
);
572 tcg_gen_qemu_st32(t0
, a0
, mem_index
);
576 /* Should never happen on 32-bit targets. */
578 tcg_gen_qemu_st64(t0
, a0
, mem_index
);
584 static inline void gen_op_st_T0_A0(int idx
)
586 gen_op_st_v(idx
, cpu_T
[0], cpu_A0
);
589 static inline void gen_op_st_T1_A0(int idx
)
591 gen_op_st_v(idx
, cpu_T
[1], cpu_A0
);
594 static inline void gen_jmp_im(target_ulong pc
)
596 tcg_gen_movi_tl(cpu_tmp0
, pc
);
597 tcg_gen_st_tl(cpu_tmp0
, cpu_env
, offsetof(CPUX86State
, eip
));
600 static inline void gen_string_movl_A0_ESI(DisasContext
*s
)
604 override
= s
->override
;
608 gen_op_movq_A0_seg(override
);
609 gen_op_addq_A0_reg_sN(0, R_ESI
);
611 gen_op_movq_A0_reg(R_ESI
);
617 if (s
->addseg
&& override
< 0)
620 gen_op_movl_A0_seg(override
);
621 gen_op_addl_A0_reg_sN(0, R_ESI
);
623 gen_op_movl_A0_reg(R_ESI
);
626 /* 16 address, always override */
629 gen_op_movl_A0_reg(R_ESI
);
630 gen_op_andl_A0_ffff();
631 gen_op_addl_A0_seg(s
, override
);
635 static inline void gen_string_movl_A0_EDI(DisasContext
*s
)
639 gen_op_movq_A0_reg(R_EDI
);
644 gen_op_movl_A0_seg(R_ES
);
645 gen_op_addl_A0_reg_sN(0, R_EDI
);
647 gen_op_movl_A0_reg(R_EDI
);
650 gen_op_movl_A0_reg(R_EDI
);
651 gen_op_andl_A0_ffff();
652 gen_op_addl_A0_seg(s
, R_ES
);
656 static inline void gen_op_movl_T0_Dshift(int ot
)
658 tcg_gen_ld32s_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, df
));
659 tcg_gen_shli_tl(cpu_T
[0], cpu_T
[0], ot
);
662 static TCGv
gen_ext_tl(TCGv dst
, TCGv src
, int size
, bool sign
)
667 tcg_gen_ext8s_tl(dst
, src
);
669 tcg_gen_ext8u_tl(dst
, src
);
674 tcg_gen_ext16s_tl(dst
, src
);
676 tcg_gen_ext16u_tl(dst
, src
);
682 tcg_gen_ext32s_tl(dst
, src
);
684 tcg_gen_ext32u_tl(dst
, src
);
693 static void gen_extu(int ot
, TCGv reg
)
695 gen_ext_tl(reg
, reg
, ot
, false);
698 static void gen_exts(int ot
, TCGv reg
)
700 gen_ext_tl(reg
, reg
, ot
, true);
703 static inline void gen_op_jnz_ecx(int size
, int label1
)
705 tcg_gen_mov_tl(cpu_tmp0
, cpu_regs
[R_ECX
]);
706 gen_extu(size
+ 1, cpu_tmp0
);
707 tcg_gen_brcondi_tl(TCG_COND_NE
, cpu_tmp0
, 0, label1
);
710 static inline void gen_op_jz_ecx(int size
, int label1
)
712 tcg_gen_mov_tl(cpu_tmp0
, cpu_regs
[R_ECX
]);
713 gen_extu(size
+ 1, cpu_tmp0
);
714 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_tmp0
, 0, label1
);
717 static void gen_helper_in_func(int ot
, TCGv v
, TCGv_i32 n
)
721 gen_helper_inb(v
, n
);
724 gen_helper_inw(v
, n
);
727 gen_helper_inl(v
, n
);
732 static void gen_helper_out_func(int ot
, TCGv_i32 v
, TCGv_i32 n
)
736 gen_helper_outb(v
, n
);
739 gen_helper_outw(v
, n
);
742 gen_helper_outl(v
, n
);
747 static void gen_check_io(DisasContext
*s
, int ot
, target_ulong cur_eip
,
751 target_ulong next_eip
;
754 if (s
->pe
&& (s
->cpl
> s
->iopl
|| s
->vm86
)) {
755 if (s
->cc_op
!= CC_OP_DYNAMIC
)
756 gen_op_set_cc_op(s
->cc_op
);
759 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
762 gen_helper_check_iob(cpu_env
, cpu_tmp2_i32
);
765 gen_helper_check_iow(cpu_env
, cpu_tmp2_i32
);
768 gen_helper_check_iol(cpu_env
, cpu_tmp2_i32
);
772 if(s
->flags
& HF_SVMI_MASK
) {
774 if (s
->cc_op
!= CC_OP_DYNAMIC
)
775 gen_op_set_cc_op(s
->cc_op
);
778 svm_flags
|= (1 << (4 + ot
));
779 next_eip
= s
->pc
- s
->cs_base
;
780 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
781 gen_helper_svm_check_io(cpu_env
, cpu_tmp2_i32
,
782 tcg_const_i32(svm_flags
),
783 tcg_const_i32(next_eip
- cur_eip
));
787 static inline void gen_movs(DisasContext
*s
, int ot
)
789 gen_string_movl_A0_ESI(s
);
790 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
791 gen_string_movl_A0_EDI(s
);
792 gen_op_st_T0_A0(ot
+ s
->mem_index
);
793 gen_op_movl_T0_Dshift(ot
);
794 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
795 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
798 static inline void gen_update_cc_op(DisasContext
*s
)
800 if (s
->cc_op
!= CC_OP_DYNAMIC
) {
801 gen_op_set_cc_op(s
->cc_op
);
802 s
->cc_op
= CC_OP_DYNAMIC
;
806 static void gen_op_update1_cc(void)
808 tcg_gen_discard_tl(cpu_cc_src
);
809 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
812 static void gen_op_update2_cc(void)
814 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[1]);
815 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
818 static inline void gen_op_cmpl_T0_T1_cc(void)
820 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[1]);
821 tcg_gen_sub_tl(cpu_cc_dst
, cpu_T
[0], cpu_T
[1]);
824 static inline void gen_op_testl_T0_T1_cc(void)
826 tcg_gen_discard_tl(cpu_cc_src
);
827 tcg_gen_and_tl(cpu_cc_dst
, cpu_T
[0], cpu_T
[1]);
830 static void gen_op_update_neg_cc(void)
832 tcg_gen_neg_tl(cpu_cc_src
, cpu_T
[0]);
833 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
836 /* compute eflags.C to reg */
837 static void gen_compute_eflags_c(TCGv reg
)
839 gen_helper_cc_compute_c(cpu_tmp2_i32
, cpu_env
, cpu_cc_op
);
840 tcg_gen_extu_i32_tl(reg
, cpu_tmp2_i32
);
843 /* compute all eflags to cc_src */
844 static void gen_compute_eflags(TCGv reg
)
846 gen_helper_cc_compute_all(cpu_tmp2_i32
, cpu_env
, cpu_cc_op
);
847 tcg_gen_extu_i32_tl(reg
, cpu_tmp2_i32
);
850 static inline void gen_setcc_slow_T0(DisasContext
*s
, int jcc_op
)
852 if (s
->cc_op
!= CC_OP_DYNAMIC
)
853 gen_op_set_cc_op(s
->cc_op
);
856 gen_compute_eflags(cpu_T
[0]);
857 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 11);
858 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 1);
861 gen_compute_eflags_c(cpu_T
[0]);
864 gen_compute_eflags(cpu_T
[0]);
865 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 6);
866 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 1);
869 gen_compute_eflags(cpu_tmp0
);
870 tcg_gen_shri_tl(cpu_T
[0], cpu_tmp0
, 6);
871 tcg_gen_or_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
872 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 1);
875 gen_compute_eflags(cpu_T
[0]);
876 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 7);
877 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 1);
880 gen_compute_eflags(cpu_T
[0]);
881 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 2);
882 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 1);
885 gen_compute_eflags(cpu_tmp0
);
886 tcg_gen_shri_tl(cpu_T
[0], cpu_tmp0
, 11); /* CC_O */
887 tcg_gen_shri_tl(cpu_tmp0
, cpu_tmp0
, 7); /* CC_S */
888 tcg_gen_xor_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
889 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 1);
893 gen_compute_eflags(cpu_tmp0
);
894 tcg_gen_shri_tl(cpu_T
[0], cpu_tmp0
, 11); /* CC_O */
895 tcg_gen_shri_tl(cpu_tmp4
, cpu_tmp0
, 7); /* CC_S */
896 tcg_gen_shri_tl(cpu_tmp0
, cpu_tmp0
, 6); /* CC_Z */
897 tcg_gen_xor_tl(cpu_T
[0], cpu_T
[0], cpu_tmp4
);
898 tcg_gen_or_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
899 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 1);
904 /* return true if setcc_slow is not needed (WARNING: must be kept in
905 sync with gen_jcc1) */
906 static int is_fast_jcc_case(DisasContext
*s
, int b
)
909 jcc_op
= (b
>> 1) & 7;
911 /* we optimize the cmp/jcc case */
916 if (jcc_op
== JCC_O
|| jcc_op
== JCC_P
)
920 /* some jumps are easy to compute */
945 if (jcc_op
!= JCC_Z
&& jcc_op
!= JCC_S
)
955 /* generate a conditional jump to label 'l1' according to jump opcode
956 value 'b'. In the fast case, T0 is guaranted not to be used. */
957 static inline void gen_jcc1(DisasContext
*s
, int b
, int l1
)
959 int inv
, jcc_op
, size
, cond
;
963 jcc_op
= (b
>> 1) & 7;
966 /* we optimize the cmp/jcc case */
972 size
= s
->cc_op
- CC_OP_SUBB
;
976 t0
= gen_ext_tl(cpu_tmp0
, cpu_cc_dst
, size
, false);
977 tcg_gen_brcondi_tl(inv
? TCG_COND_NE
: TCG_COND_EQ
, t0
, 0, l1
);
981 t0
= gen_ext_tl(cpu_tmp0
, cpu_cc_dst
, size
, true);
982 tcg_gen_brcondi_tl(inv
? TCG_COND_GE
: TCG_COND_LT
, t0
, 0, l1
);
986 cond
= inv
? TCG_COND_GEU
: TCG_COND_LTU
;
989 cond
= inv
? TCG_COND_GTU
: TCG_COND_LEU
;
991 tcg_gen_add_tl(cpu_tmp4
, cpu_cc_dst
, cpu_cc_src
);
992 gen_extu(size
, cpu_tmp4
);
993 t0
= gen_ext_tl(cpu_tmp0
, cpu_cc_src
, size
, false);
994 tcg_gen_brcond_tl(cond
, cpu_tmp4
, t0
, l1
);
998 cond
= inv
? TCG_COND_GE
: TCG_COND_LT
;
1001 cond
= inv
? TCG_COND_GT
: TCG_COND_LE
;
1003 tcg_gen_add_tl(cpu_tmp4
, cpu_cc_dst
, cpu_cc_src
);
1004 gen_exts(size
, cpu_tmp4
);
1005 t0
= gen_ext_tl(cpu_tmp0
, cpu_cc_src
, size
, true);
1006 tcg_gen_brcond_tl(cond
, cpu_tmp4
, t0
, l1
);
1014 /* some jumps are easy to compute */
1056 size
= (s
->cc_op
- CC_OP_ADDB
) & 3;
1059 size
= (s
->cc_op
- CC_OP_ADDB
) & 3;
1067 gen_setcc_slow_T0(s
, jcc_op
);
1068 tcg_gen_brcondi_tl(inv
? TCG_COND_EQ
: TCG_COND_NE
,
1074 /* XXX: does not work with gdbstub "ice" single step - not a
1076 static int gen_jz_ecx_string(DisasContext
*s
, target_ulong next_eip
)
1080 l1
= gen_new_label();
1081 l2
= gen_new_label();
1082 gen_op_jnz_ecx(s
->aflag
, l1
);
1084 gen_jmp_tb(s
, next_eip
, 1);
1089 static inline void gen_stos(DisasContext
*s
, int ot
)
1091 gen_op_mov_TN_reg(OT_LONG
, 0, R_EAX
);
1092 gen_string_movl_A0_EDI(s
);
1093 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1094 gen_op_movl_T0_Dshift(ot
);
1095 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1098 static inline void gen_lods(DisasContext
*s
, int ot
)
1100 gen_string_movl_A0_ESI(s
);
1101 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1102 gen_op_mov_reg_T0(ot
, R_EAX
);
1103 gen_op_movl_T0_Dshift(ot
);
1104 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
1107 static inline void gen_scas(DisasContext
*s
, int ot
)
1109 gen_op_mov_TN_reg(OT_LONG
, 0, R_EAX
);
1110 gen_string_movl_A0_EDI(s
);
1111 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
1112 gen_op_cmpl_T0_T1_cc();
1113 gen_op_movl_T0_Dshift(ot
);
1114 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1115 s
->cc_op
= CC_OP_SUBB
+ ot
;
1118 static inline void gen_cmps(DisasContext
*s
, int ot
)
1120 gen_string_movl_A0_ESI(s
);
1121 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1122 gen_string_movl_A0_EDI(s
);
1123 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
1124 gen_op_cmpl_T0_T1_cc();
1125 gen_op_movl_T0_Dshift(ot
);
1126 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
1127 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1128 s
->cc_op
= CC_OP_SUBB
+ ot
;
1131 static inline void gen_ins(DisasContext
*s
, int ot
)
1135 gen_string_movl_A0_EDI(s
);
1136 /* Note: we must do this dummy write first to be restartable in
1137 case of page fault. */
1139 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1140 gen_op_mov_TN_reg(OT_WORD
, 1, R_EDX
);
1141 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[1]);
1142 tcg_gen_andi_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 0xffff);
1143 gen_helper_in_func(ot
, cpu_T
[0], cpu_tmp2_i32
);
1144 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1145 gen_op_movl_T0_Dshift(ot
);
1146 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1151 static inline void gen_outs(DisasContext
*s
, int ot
)
1155 gen_string_movl_A0_ESI(s
);
1156 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1158 gen_op_mov_TN_reg(OT_WORD
, 1, R_EDX
);
1159 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[1]);
1160 tcg_gen_andi_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 0xffff);
1161 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T
[0]);
1162 gen_helper_out_func(ot
, cpu_tmp2_i32
, cpu_tmp3_i32
);
1164 gen_op_movl_T0_Dshift(ot
);
1165 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
1170 /* same method as Valgrind : we generate jumps to current or next
1172 #define GEN_REPZ(op) \
1173 static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1174 target_ulong cur_eip, target_ulong next_eip) \
1177 gen_update_cc_op(s); \
1178 l2 = gen_jz_ecx_string(s, next_eip); \
1179 gen_ ## op(s, ot); \
1180 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1181 /* a loop would cause two single step exceptions if ECX = 1 \
1182 before rep string_insn */ \
1184 gen_op_jz_ecx(s->aflag, l2); \
1185 gen_jmp(s, cur_eip); \
1188 #define GEN_REPZ2(op) \
1189 static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1190 target_ulong cur_eip, \
1191 target_ulong next_eip, \
1195 gen_update_cc_op(s); \
1196 l2 = gen_jz_ecx_string(s, next_eip); \
1197 gen_ ## op(s, ot); \
1198 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1199 gen_op_set_cc_op(s->cc_op); \
1200 gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2); \
1202 gen_op_jz_ecx(s->aflag, l2); \
1203 gen_jmp(s, cur_eip); \
1204 s->cc_op = CC_OP_DYNAMIC; \
1215 static void gen_helper_fp_arith_ST0_FT0(int op
)
1219 gen_helper_fadd_ST0_FT0(cpu_env
);
1222 gen_helper_fmul_ST0_FT0(cpu_env
);
1225 gen_helper_fcom_ST0_FT0(cpu_env
);
1228 gen_helper_fcom_ST0_FT0(cpu_env
);
1231 gen_helper_fsub_ST0_FT0(cpu_env
);
1234 gen_helper_fsubr_ST0_FT0(cpu_env
);
1237 gen_helper_fdiv_ST0_FT0(cpu_env
);
1240 gen_helper_fdivr_ST0_FT0(cpu_env
);
1245 /* NOTE the exception in "r" op ordering */
1246 static void gen_helper_fp_arith_STN_ST0(int op
, int opreg
)
1248 TCGv_i32 tmp
= tcg_const_i32(opreg
);
1251 gen_helper_fadd_STN_ST0(cpu_env
, tmp
);
1254 gen_helper_fmul_STN_ST0(cpu_env
, tmp
);
1257 gen_helper_fsubr_STN_ST0(cpu_env
, tmp
);
1260 gen_helper_fsub_STN_ST0(cpu_env
, tmp
);
1263 gen_helper_fdivr_STN_ST0(cpu_env
, tmp
);
1266 gen_helper_fdiv_STN_ST0(cpu_env
, tmp
);
1271 /* if d == OR_TMP0, it means memory operand (address in A0) */
1272 static void gen_op(DisasContext
*s1
, int op
, int ot
, int d
)
1275 gen_op_mov_TN_reg(ot
, 0, d
);
1277 gen_op_ld_T0_A0(ot
+ s1
->mem_index
);
1281 if (s1
->cc_op
!= CC_OP_DYNAMIC
)
1282 gen_op_set_cc_op(s1
->cc_op
);
1283 gen_compute_eflags_c(cpu_tmp4
);
1284 tcg_gen_add_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1285 tcg_gen_add_tl(cpu_T
[0], cpu_T
[0], cpu_tmp4
);
1287 gen_op_mov_reg_T0(ot
, d
);
1289 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1290 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[1]);
1291 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
1292 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_tmp4
);
1293 tcg_gen_shli_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 2);
1294 tcg_gen_addi_i32(cpu_cc_op
, cpu_tmp2_i32
, CC_OP_ADDB
+ ot
);
1295 s1
->cc_op
= CC_OP_DYNAMIC
;
1298 if (s1
->cc_op
!= CC_OP_DYNAMIC
)
1299 gen_op_set_cc_op(s1
->cc_op
);
1300 gen_compute_eflags_c(cpu_tmp4
);
1301 tcg_gen_sub_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1302 tcg_gen_sub_tl(cpu_T
[0], cpu_T
[0], cpu_tmp4
);
1304 gen_op_mov_reg_T0(ot
, d
);
1306 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1307 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[1]);
1308 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
1309 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_tmp4
);
1310 tcg_gen_shli_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 2);
1311 tcg_gen_addi_i32(cpu_cc_op
, cpu_tmp2_i32
, CC_OP_SUBB
+ ot
);
1312 s1
->cc_op
= CC_OP_DYNAMIC
;
1315 gen_op_addl_T0_T1();
1317 gen_op_mov_reg_T0(ot
, d
);
1319 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1320 gen_op_update2_cc();
1321 s1
->cc_op
= CC_OP_ADDB
+ ot
;
1324 tcg_gen_sub_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1326 gen_op_mov_reg_T0(ot
, d
);
1328 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1329 gen_op_update2_cc();
1330 s1
->cc_op
= CC_OP_SUBB
+ ot
;
1334 tcg_gen_and_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1336 gen_op_mov_reg_T0(ot
, d
);
1338 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1339 gen_op_update1_cc();
1340 s1
->cc_op
= CC_OP_LOGICB
+ ot
;
1343 tcg_gen_or_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1345 gen_op_mov_reg_T0(ot
, d
);
1347 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1348 gen_op_update1_cc();
1349 s1
->cc_op
= CC_OP_LOGICB
+ ot
;
1352 tcg_gen_xor_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1354 gen_op_mov_reg_T0(ot
, d
);
1356 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1357 gen_op_update1_cc();
1358 s1
->cc_op
= CC_OP_LOGICB
+ ot
;
1361 gen_op_cmpl_T0_T1_cc();
1362 s1
->cc_op
= CC_OP_SUBB
+ ot
;
1367 /* if d == OR_TMP0, it means memory operand (address in A0) */
1368 static void gen_inc(DisasContext
*s1
, int ot
, int d
, int c
)
1371 gen_op_mov_TN_reg(ot
, 0, d
);
1373 gen_op_ld_T0_A0(ot
+ s1
->mem_index
);
1374 if (s1
->cc_op
!= CC_OP_DYNAMIC
)
1375 gen_op_set_cc_op(s1
->cc_op
);
1376 gen_compute_eflags_c(cpu_cc_src
);
1378 tcg_gen_addi_tl(cpu_T
[0], cpu_T
[0], 1);
1379 s1
->cc_op
= CC_OP_INCB
+ ot
;
1381 tcg_gen_addi_tl(cpu_T
[0], cpu_T
[0], -1);
1382 s1
->cc_op
= CC_OP_DECB
+ ot
;
1385 gen_op_mov_reg_T0(ot
, d
);
1387 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1388 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
1391 static void gen_shift_rm_T1(DisasContext
*s
, int ot
, int op1
,
1392 int is_right
, int is_arith
)
1398 if (ot
== OT_QUAD
) {
1405 if (op1
== OR_TMP0
) {
1406 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1408 gen_op_mov_TN_reg(ot
, 0, op1
);
1411 t0
= tcg_temp_local_new();
1412 t1
= tcg_temp_local_new();
1413 t2
= tcg_temp_local_new();
1415 tcg_gen_andi_tl(t2
, cpu_T
[1], mask
);
1419 gen_exts(ot
, cpu_T
[0]);
1420 tcg_gen_mov_tl(t0
, cpu_T
[0]);
1421 tcg_gen_sar_tl(cpu_T
[0], cpu_T
[0], t2
);
1423 gen_extu(ot
, cpu_T
[0]);
1424 tcg_gen_mov_tl(t0
, cpu_T
[0]);
1425 tcg_gen_shr_tl(cpu_T
[0], cpu_T
[0], t2
);
1428 tcg_gen_mov_tl(t0
, cpu_T
[0]);
1429 tcg_gen_shl_tl(cpu_T
[0], cpu_T
[0], t2
);
1433 if (op1
== OR_TMP0
) {
1434 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1436 gen_op_mov_reg_T0(ot
, op1
);
1439 /* update eflags if non zero shift */
1440 if (s
->cc_op
!= CC_OP_DYNAMIC
) {
1441 gen_op_set_cc_op(s
->cc_op
);
1444 tcg_gen_mov_tl(t1
, cpu_T
[0]);
1446 shift_label
= gen_new_label();
1447 tcg_gen_brcondi_tl(TCG_COND_EQ
, t2
, 0, shift_label
);
1449 tcg_gen_addi_tl(t2
, t2
, -1);
1450 tcg_gen_mov_tl(cpu_cc_dst
, t1
);
1454 tcg_gen_sar_tl(cpu_cc_src
, t0
, t2
);
1456 tcg_gen_shr_tl(cpu_cc_src
, t0
, t2
);
1459 tcg_gen_shl_tl(cpu_cc_src
, t0
, t2
);
1463 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SARB
+ ot
);
1465 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SHLB
+ ot
);
1468 gen_set_label(shift_label
);
1469 s
->cc_op
= CC_OP_DYNAMIC
; /* cannot predict flags after */
1476 static void gen_shift_rm_im(DisasContext
*s
, int ot
, int op1
, int op2
,
1477 int is_right
, int is_arith
)
1488 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1490 gen_op_mov_TN_reg(ot
, 0, op1
);
1496 gen_exts(ot
, cpu_T
[0]);
1497 tcg_gen_sari_tl(cpu_tmp4
, cpu_T
[0], op2
- 1);
1498 tcg_gen_sari_tl(cpu_T
[0], cpu_T
[0], op2
);
1500 gen_extu(ot
, cpu_T
[0]);
1501 tcg_gen_shri_tl(cpu_tmp4
, cpu_T
[0], op2
- 1);
1502 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], op2
);
1505 tcg_gen_shli_tl(cpu_tmp4
, cpu_T
[0], op2
- 1);
1506 tcg_gen_shli_tl(cpu_T
[0], cpu_T
[0], op2
);
1512 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1514 gen_op_mov_reg_T0(ot
, op1
);
1516 /* update eflags if non zero shift */
1518 tcg_gen_mov_tl(cpu_cc_src
, cpu_tmp4
);
1519 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
1521 s
->cc_op
= CC_OP_SARB
+ ot
;
1523 s
->cc_op
= CC_OP_SHLB
+ ot
;
1527 static inline void tcg_gen_lshift(TCGv ret
, TCGv arg1
, target_long arg2
)
1530 tcg_gen_shli_tl(ret
, arg1
, arg2
);
1532 tcg_gen_shri_tl(ret
, arg1
, -arg2
);
1535 static void gen_rot_rm_T1(DisasContext
*s
, int ot
, int op1
,
1539 int label1
, label2
, data_bits
;
1540 TCGv t0
, t1
, t2
, a0
;
1542 /* XXX: inefficient, but we must use local temps */
1543 t0
= tcg_temp_local_new();
1544 t1
= tcg_temp_local_new();
1545 t2
= tcg_temp_local_new();
1546 a0
= tcg_temp_local_new();
1554 if (op1
== OR_TMP0
) {
1555 tcg_gen_mov_tl(a0
, cpu_A0
);
1556 gen_op_ld_v(ot
+ s
->mem_index
, t0
, a0
);
1558 gen_op_mov_v_reg(ot
, t0
, op1
);
1561 tcg_gen_mov_tl(t1
, cpu_T
[1]);
1563 tcg_gen_andi_tl(t1
, t1
, mask
);
1565 /* Must test zero case to avoid using undefined behaviour in TCG
1567 label1
= gen_new_label();
1568 tcg_gen_brcondi_tl(TCG_COND_EQ
, t1
, 0, label1
);
1571 tcg_gen_andi_tl(cpu_tmp0
, t1
, (1 << (3 + ot
)) - 1);
1573 tcg_gen_mov_tl(cpu_tmp0
, t1
);
1576 tcg_gen_mov_tl(t2
, t0
);
1578 data_bits
= 8 << ot
;
1579 /* XXX: rely on behaviour of shifts when operand 2 overflows (XXX:
1580 fix TCG definition) */
1582 tcg_gen_shr_tl(cpu_tmp4
, t0
, cpu_tmp0
);
1583 tcg_gen_subfi_tl(cpu_tmp0
, data_bits
, cpu_tmp0
);
1584 tcg_gen_shl_tl(t0
, t0
, cpu_tmp0
);
1586 tcg_gen_shl_tl(cpu_tmp4
, t0
, cpu_tmp0
);
1587 tcg_gen_subfi_tl(cpu_tmp0
, data_bits
, cpu_tmp0
);
1588 tcg_gen_shr_tl(t0
, t0
, cpu_tmp0
);
1590 tcg_gen_or_tl(t0
, t0
, cpu_tmp4
);
1592 gen_set_label(label1
);
1594 if (op1
== OR_TMP0
) {
1595 gen_op_st_v(ot
+ s
->mem_index
, t0
, a0
);
1597 gen_op_mov_reg_v(ot
, op1
, t0
);
1600 /* update eflags. It is needed anyway most of the time, do it always. */
1601 if (s
->cc_op
!= CC_OP_DYNAMIC
)
1602 gen_op_set_cc_op(s
->cc_op
);
1603 gen_compute_eflags(cpu_cc_src
);
1604 tcg_gen_discard_tl(cpu_cc_dst
);
1605 s
->cc_op
= CC_OP_EFLAGS
;
1607 label2
= gen_new_label();
1608 tcg_gen_brcondi_tl(TCG_COND_EQ
, t1
, 0, label2
);
1610 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~(CC_O
| CC_C
));
1611 tcg_gen_xor_tl(cpu_tmp0
, t2
, t0
);
1612 tcg_gen_lshift(cpu_tmp0
, cpu_tmp0
, 11 - (data_bits
- 1));
1613 tcg_gen_andi_tl(cpu_tmp0
, cpu_tmp0
, CC_O
);
1614 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, cpu_tmp0
);
1616 tcg_gen_shri_tl(t0
, t0
, data_bits
- 1);
1618 tcg_gen_andi_tl(t0
, t0
, CC_C
);
1619 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t0
);
1621 gen_set_label(label2
);
1629 static void gen_rot_rm_im(DisasContext
*s
, int ot
, int op1
, int op2
,
1636 /* XXX: inefficient, but we must use local temps */
1637 t0
= tcg_temp_local_new();
1638 t1
= tcg_temp_local_new();
1639 a0
= tcg_temp_local_new();
1647 if (op1
== OR_TMP0
) {
1648 tcg_gen_mov_tl(a0
, cpu_A0
);
1649 gen_op_ld_v(ot
+ s
->mem_index
, t0
, a0
);
1651 gen_op_mov_v_reg(ot
, t0
, op1
);
1655 tcg_gen_mov_tl(t1
, t0
);
1658 data_bits
= 8 << ot
;
1660 int shift
= op2
& ((1 << (3 + ot
)) - 1);
1662 tcg_gen_shri_tl(cpu_tmp4
, t0
, shift
);
1663 tcg_gen_shli_tl(t0
, t0
, data_bits
- shift
);
1666 tcg_gen_shli_tl(cpu_tmp4
, t0
, shift
);
1667 tcg_gen_shri_tl(t0
, t0
, data_bits
- shift
);
1669 tcg_gen_or_tl(t0
, t0
, cpu_tmp4
);
1673 if (op1
== OR_TMP0
) {
1674 gen_op_st_v(ot
+ s
->mem_index
, t0
, a0
);
1676 gen_op_mov_reg_v(ot
, op1
, t0
);
1681 if (s
->cc_op
!= CC_OP_DYNAMIC
)
1682 gen_op_set_cc_op(s
->cc_op
);
1684 gen_compute_eflags(cpu_cc_src
);
1685 tcg_gen_discard_tl(cpu_cc_dst
);
1686 s
->cc_op
= CC_OP_EFLAGS
;
1688 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~(CC_O
| CC_C
));
1689 tcg_gen_xor_tl(cpu_tmp0
, t1
, t0
);
1690 tcg_gen_lshift(cpu_tmp0
, cpu_tmp0
, 11 - (data_bits
- 1));
1691 tcg_gen_andi_tl(cpu_tmp0
, cpu_tmp0
, CC_O
);
1692 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, cpu_tmp0
);
1694 tcg_gen_shri_tl(t0
, t0
, data_bits
- 1);
1696 tcg_gen_andi_tl(t0
, t0
, CC_C
);
1697 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t0
);
1705 /* XXX: add faster immediate = 1 case */
1706 static void gen_rotc_rm_T1(DisasContext
*s
, int ot
, int op1
,
1711 if (s
->cc_op
!= CC_OP_DYNAMIC
)
1712 gen_op_set_cc_op(s
->cc_op
);
1716 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1718 gen_op_mov_TN_reg(ot
, 0, op1
);
1723 gen_helper_rcrb(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1726 gen_helper_rcrw(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1729 gen_helper_rcrl(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1731 #ifdef TARGET_X86_64
1733 gen_helper_rcrq(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1740 gen_helper_rclb(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1743 gen_helper_rclw(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1746 gen_helper_rcll(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1748 #ifdef TARGET_X86_64
1750 gen_helper_rclq(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1757 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1759 gen_op_mov_reg_T0(ot
, op1
);
1762 label1
= gen_new_label();
1763 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_cc_tmp
, -1, label1
);
1765 tcg_gen_mov_tl(cpu_cc_src
, cpu_cc_tmp
);
1766 tcg_gen_discard_tl(cpu_cc_dst
);
1767 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_EFLAGS
);
1769 gen_set_label(label1
);
1770 s
->cc_op
= CC_OP_DYNAMIC
; /* cannot predict flags after */
1773 /* XXX: add faster immediate case */
1774 static void gen_shiftd_rm_T1_T3(DisasContext
*s
, int ot
, int op1
,
1777 int label1
, label2
, data_bits
;
1779 TCGv t0
, t1
, t2
, a0
;
1781 t0
= tcg_temp_local_new();
1782 t1
= tcg_temp_local_new();
1783 t2
= tcg_temp_local_new();
1784 a0
= tcg_temp_local_new();
1792 if (op1
== OR_TMP0
) {
1793 tcg_gen_mov_tl(a0
, cpu_A0
);
1794 gen_op_ld_v(ot
+ s
->mem_index
, t0
, a0
);
1796 gen_op_mov_v_reg(ot
, t0
, op1
);
1799 tcg_gen_andi_tl(cpu_T3
, cpu_T3
, mask
);
1801 tcg_gen_mov_tl(t1
, cpu_T
[1]);
1802 tcg_gen_mov_tl(t2
, cpu_T3
);
1804 /* Must test zero case to avoid using undefined behaviour in TCG
1806 label1
= gen_new_label();
1807 tcg_gen_brcondi_tl(TCG_COND_EQ
, t2
, 0, label1
);
1809 tcg_gen_addi_tl(cpu_tmp5
, t2
, -1);
1810 if (ot
== OT_WORD
) {
1811 /* Note: we implement the Intel behaviour for shift count > 16 */
1813 tcg_gen_andi_tl(t0
, t0
, 0xffff);
1814 tcg_gen_shli_tl(cpu_tmp0
, t1
, 16);
1815 tcg_gen_or_tl(t0
, t0
, cpu_tmp0
);
1816 tcg_gen_ext32u_tl(t0
, t0
);
1818 tcg_gen_shr_tl(cpu_tmp4
, t0
, cpu_tmp5
);
1820 /* only needed if count > 16, but a test would complicate */
1821 tcg_gen_subfi_tl(cpu_tmp5
, 32, t2
);
1822 tcg_gen_shl_tl(cpu_tmp0
, t0
, cpu_tmp5
);
1824 tcg_gen_shr_tl(t0
, t0
, t2
);
1826 tcg_gen_or_tl(t0
, t0
, cpu_tmp0
);
1828 /* XXX: not optimal */
1829 tcg_gen_andi_tl(t0
, t0
, 0xffff);
1830 tcg_gen_shli_tl(t1
, t1
, 16);
1831 tcg_gen_or_tl(t1
, t1
, t0
);
1832 tcg_gen_ext32u_tl(t1
, t1
);
1834 tcg_gen_shl_tl(cpu_tmp4
, t0
, cpu_tmp5
);
1835 tcg_gen_subfi_tl(cpu_tmp0
, 32, cpu_tmp5
);
1836 tcg_gen_shr_tl(cpu_tmp5
, t1
, cpu_tmp0
);
1837 tcg_gen_or_tl(cpu_tmp4
, cpu_tmp4
, cpu_tmp5
);
1839 tcg_gen_shl_tl(t0
, t0
, t2
);
1840 tcg_gen_subfi_tl(cpu_tmp5
, 32, t2
);
1841 tcg_gen_shr_tl(t1
, t1
, cpu_tmp5
);
1842 tcg_gen_or_tl(t0
, t0
, t1
);
1845 data_bits
= 8 << ot
;
1848 tcg_gen_ext32u_tl(t0
, t0
);
1850 tcg_gen_shr_tl(cpu_tmp4
, t0
, cpu_tmp5
);
1852 tcg_gen_shr_tl(t0
, t0
, t2
);
1853 tcg_gen_subfi_tl(cpu_tmp5
, data_bits
, t2
);
1854 tcg_gen_shl_tl(t1
, t1
, cpu_tmp5
);
1855 tcg_gen_or_tl(t0
, t0
, t1
);
1859 tcg_gen_ext32u_tl(t1
, t1
);
1861 tcg_gen_shl_tl(cpu_tmp4
, t0
, cpu_tmp5
);
1863 tcg_gen_shl_tl(t0
, t0
, t2
);
1864 tcg_gen_subfi_tl(cpu_tmp5
, data_bits
, t2
);
1865 tcg_gen_shr_tl(t1
, t1
, cpu_tmp5
);
1866 tcg_gen_or_tl(t0
, t0
, t1
);
1869 tcg_gen_mov_tl(t1
, cpu_tmp4
);
1871 gen_set_label(label1
);
1873 if (op1
== OR_TMP0
) {
1874 gen_op_st_v(ot
+ s
->mem_index
, t0
, a0
);
1876 gen_op_mov_reg_v(ot
, op1
, t0
);
1880 if (s
->cc_op
!= CC_OP_DYNAMIC
)
1881 gen_op_set_cc_op(s
->cc_op
);
1883 label2
= gen_new_label();
1884 tcg_gen_brcondi_tl(TCG_COND_EQ
, t2
, 0, label2
);
1886 tcg_gen_mov_tl(cpu_cc_src
, t1
);
1887 tcg_gen_mov_tl(cpu_cc_dst
, t0
);
1889 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SARB
+ ot
);
1891 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SHLB
+ ot
);
1893 gen_set_label(label2
);
1894 s
->cc_op
= CC_OP_DYNAMIC
; /* cannot predict flags after */
1902 static void gen_shift(DisasContext
*s1
, int op
, int ot
, int d
, int s
)
1905 gen_op_mov_TN_reg(ot
, 1, s
);
1908 gen_rot_rm_T1(s1
, ot
, d
, 0);
1911 gen_rot_rm_T1(s1
, ot
, d
, 1);
1915 gen_shift_rm_T1(s1
, ot
, d
, 0, 0);
1918 gen_shift_rm_T1(s1
, ot
, d
, 1, 0);
1921 gen_shift_rm_T1(s1
, ot
, d
, 1, 1);
1924 gen_rotc_rm_T1(s1
, ot
, d
, 0);
1927 gen_rotc_rm_T1(s1
, ot
, d
, 1);
1932 static void gen_shifti(DisasContext
*s1
, int op
, int ot
, int d
, int c
)
1936 gen_rot_rm_im(s1
, ot
, d
, c
, 0);
1939 gen_rot_rm_im(s1
, ot
, d
, c
, 1);
1943 gen_shift_rm_im(s1
, ot
, d
, c
, 0, 0);
1946 gen_shift_rm_im(s1
, ot
, d
, c
, 1, 0);
1949 gen_shift_rm_im(s1
, ot
, d
, c
, 1, 1);
1952 /* currently not optimized */
1953 gen_op_movl_T1_im(c
);
1954 gen_shift(s1
, op
, ot
, d
, OR_TMP1
);
1959 static void gen_lea_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
,
1960 int *reg_ptr
, int *offset_ptr
)
1968 int mod
, rm
, code
, override
, must_add_seg
;
1970 override
= s
->override
;
1971 must_add_seg
= s
->addseg
;
1974 mod
= (modrm
>> 6) & 3;
1986 code
= cpu_ldub_code(env
, s
->pc
++);
1987 scale
= (code
>> 6) & 3;
1988 index
= ((code
>> 3) & 7) | REX_X(s
);
1995 if ((base
& 7) == 5) {
1997 disp
= (int32_t)cpu_ldl_code(env
, s
->pc
);
1999 if (CODE64(s
) && !havesib
) {
2000 disp
+= s
->pc
+ s
->rip_offset
;
2007 disp
= (int8_t)cpu_ldub_code(env
, s
->pc
++);
2011 disp
= (int32_t)cpu_ldl_code(env
, s
->pc
);
2017 /* for correct popl handling with esp */
2018 if (base
== 4 && s
->popl_esp_hack
)
2019 disp
+= s
->popl_esp_hack
;
2020 #ifdef TARGET_X86_64
2021 if (s
->aflag
== 2) {
2022 gen_op_movq_A0_reg(base
);
2024 gen_op_addq_A0_im(disp
);
2029 gen_op_movl_A0_reg(base
);
2031 gen_op_addl_A0_im(disp
);
2034 #ifdef TARGET_X86_64
2035 if (s
->aflag
== 2) {
2036 gen_op_movq_A0_im(disp
);
2040 gen_op_movl_A0_im(disp
);
2043 /* index == 4 means no index */
2044 if (havesib
&& (index
!= 4)) {
2045 #ifdef TARGET_X86_64
2046 if (s
->aflag
== 2) {
2047 gen_op_addq_A0_reg_sN(scale
, index
);
2051 gen_op_addl_A0_reg_sN(scale
, index
);
2056 if (base
== R_EBP
|| base
== R_ESP
)
2061 #ifdef TARGET_X86_64
2062 if (s
->aflag
== 2) {
2063 gen_op_addq_A0_seg(override
);
2067 gen_op_addl_A0_seg(s
, override
);
2074 disp
= cpu_lduw_code(env
, s
->pc
);
2076 gen_op_movl_A0_im(disp
);
2077 rm
= 0; /* avoid SS override */
2084 disp
= (int8_t)cpu_ldub_code(env
, s
->pc
++);
2088 disp
= cpu_lduw_code(env
, s
->pc
);
2094 gen_op_movl_A0_reg(R_EBX
);
2095 gen_op_addl_A0_reg_sN(0, R_ESI
);
2098 gen_op_movl_A0_reg(R_EBX
);
2099 gen_op_addl_A0_reg_sN(0, R_EDI
);
2102 gen_op_movl_A0_reg(R_EBP
);
2103 gen_op_addl_A0_reg_sN(0, R_ESI
);
2106 gen_op_movl_A0_reg(R_EBP
);
2107 gen_op_addl_A0_reg_sN(0, R_EDI
);
2110 gen_op_movl_A0_reg(R_ESI
);
2113 gen_op_movl_A0_reg(R_EDI
);
2116 gen_op_movl_A0_reg(R_EBP
);
2120 gen_op_movl_A0_reg(R_EBX
);
2124 gen_op_addl_A0_im(disp
);
2125 gen_op_andl_A0_ffff();
2129 if (rm
== 2 || rm
== 3 || rm
== 6)
2134 gen_op_addl_A0_seg(s
, override
);
2144 static void gen_nop_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
)
2146 int mod
, rm
, base
, code
;
2148 mod
= (modrm
>> 6) & 3;
2158 code
= cpu_ldub_code(env
, s
->pc
++);
2194 /* used for LEA and MOV AX, mem */
2195 static void gen_add_A0_ds_seg(DisasContext
*s
)
2197 int override
, must_add_seg
;
2198 must_add_seg
= s
->addseg
;
2200 if (s
->override
>= 0) {
2201 override
= s
->override
;
2205 #ifdef TARGET_X86_64
2207 gen_op_addq_A0_seg(override
);
2211 gen_op_addl_A0_seg(s
, override
);
2216 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2218 static void gen_ldst_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
,
2219 int ot
, int reg
, int is_store
)
2221 int mod
, rm
, opreg
, disp
;
2223 mod
= (modrm
>> 6) & 3;
2224 rm
= (modrm
& 7) | REX_B(s
);
2228 gen_op_mov_TN_reg(ot
, 0, reg
);
2229 gen_op_mov_reg_T0(ot
, rm
);
2231 gen_op_mov_TN_reg(ot
, 0, rm
);
2233 gen_op_mov_reg_T0(ot
, reg
);
2236 gen_lea_modrm(env
, s
, modrm
, &opreg
, &disp
);
2239 gen_op_mov_TN_reg(ot
, 0, reg
);
2240 gen_op_st_T0_A0(ot
+ s
->mem_index
);
2242 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
2244 gen_op_mov_reg_T0(ot
, reg
);
2249 static inline uint32_t insn_get(CPUX86State
*env
, DisasContext
*s
, int ot
)
2255 ret
= cpu_ldub_code(env
, s
->pc
);
2259 ret
= cpu_lduw_code(env
, s
->pc
);
2264 ret
= cpu_ldl_code(env
, s
->pc
);
2271 static inline int insn_const_size(unsigned int ot
)
2279 static inline void gen_goto_tb(DisasContext
*s
, int tb_num
, target_ulong eip
)
2281 TranslationBlock
*tb
;
2284 pc
= s
->cs_base
+ eip
;
2286 /* NOTE: we handle the case where the TB spans two pages here */
2287 if ((pc
& TARGET_PAGE_MASK
) == (tb
->pc
& TARGET_PAGE_MASK
) ||
2288 (pc
& TARGET_PAGE_MASK
) == ((s
->pc
- 1) & TARGET_PAGE_MASK
)) {
2289 /* jump to same page: we can use a direct jump */
2290 tcg_gen_goto_tb(tb_num
);
2292 tcg_gen_exit_tb((tcg_target_long
)tb
+ tb_num
);
2294 /* jump to another page: currently not optimized */
2300 static inline void gen_jcc(DisasContext
*s
, int b
,
2301 target_ulong val
, target_ulong next_eip
)
2305 if (s
->cc_op
!= CC_OP_DYNAMIC
) {
2306 gen_op_set_cc_op(s
->cc_op
);
2309 l1
= gen_new_label();
2311 s
->cc_op
= CC_OP_DYNAMIC
;
2313 gen_goto_tb(s
, 0, next_eip
);
2316 gen_goto_tb(s
, 1, val
);
2317 s
->is_jmp
= DISAS_TB_JUMP
;
2320 l1
= gen_new_label();
2321 l2
= gen_new_label();
2323 s
->cc_op
= CC_OP_DYNAMIC
;
2325 gen_jmp_im(next_eip
);
2335 static void gen_setcc(DisasContext
*s
, int b
)
2337 int inv
, jcc_op
, l1
;
2340 if (is_fast_jcc_case(s
, b
)) {
2341 /* nominal case: we use a jump */
2342 /* XXX: make it faster by adding new instructions in TCG */
2343 t0
= tcg_temp_local_new();
2344 tcg_gen_movi_tl(t0
, 0);
2345 l1
= gen_new_label();
2346 gen_jcc1(s
, b
^ 1, l1
);
2347 tcg_gen_movi_tl(t0
, 1);
2349 tcg_gen_mov_tl(cpu_T
[0], t0
);
2352 /* slow case: it is more efficient not to generate a jump,
2353 although it is questionnable whether this optimization is
2356 jcc_op
= (b
>> 1) & 7;
2357 gen_setcc_slow_T0(s
, jcc_op
);
2359 tcg_gen_xori_tl(cpu_T
[0], cpu_T
[0], 1);
2364 static inline void gen_op_movl_T0_seg(int seg_reg
)
2366 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
,
2367 offsetof(CPUX86State
,segs
[seg_reg
].selector
));
2370 static inline void gen_op_movl_seg_T0_vm(int seg_reg
)
2372 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 0xffff);
2373 tcg_gen_st32_tl(cpu_T
[0], cpu_env
,
2374 offsetof(CPUX86State
,segs
[seg_reg
].selector
));
2375 tcg_gen_shli_tl(cpu_T
[0], cpu_T
[0], 4);
2376 tcg_gen_st_tl(cpu_T
[0], cpu_env
,
2377 offsetof(CPUX86State
,segs
[seg_reg
].base
));
2380 /* move T0 to seg_reg and compute if the CPU state may change. Never
2381 call this function with seg_reg == R_CS */
2382 static void gen_movl_seg_T0(DisasContext
*s
, int seg_reg
, target_ulong cur_eip
)
2384 if (s
->pe
&& !s
->vm86
) {
2385 /* XXX: optimize by finding processor state dynamically */
2386 if (s
->cc_op
!= CC_OP_DYNAMIC
)
2387 gen_op_set_cc_op(s
->cc_op
);
2388 gen_jmp_im(cur_eip
);
2389 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
2390 gen_helper_load_seg(cpu_env
, tcg_const_i32(seg_reg
), cpu_tmp2_i32
);
2391 /* abort translation because the addseg value may change or
2392 because ss32 may change. For R_SS, translation must always
2393 stop as a special handling must be done to disable hardware
2394 interrupts for the next instruction */
2395 if (seg_reg
== R_SS
|| (s
->code32
&& seg_reg
< R_FS
))
2396 s
->is_jmp
= DISAS_TB_JUMP
;
2398 gen_op_movl_seg_T0_vm(seg_reg
);
2399 if (seg_reg
== R_SS
)
2400 s
->is_jmp
= DISAS_TB_JUMP
;
2404 static inline int svm_is_rep(int prefixes
)
2406 return ((prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) ? 8 : 0);
2410 gen_svm_check_intercept_param(DisasContext
*s
, target_ulong pc_start
,
2411 uint32_t type
, uint64_t param
)
2413 /* no SVM activated; fast case */
2414 if (likely(!(s
->flags
& HF_SVMI_MASK
)))
2416 if (s
->cc_op
!= CC_OP_DYNAMIC
)
2417 gen_op_set_cc_op(s
->cc_op
);
2418 gen_jmp_im(pc_start
- s
->cs_base
);
2419 gen_helper_svm_check_intercept_param(cpu_env
, tcg_const_i32(type
),
2420 tcg_const_i64(param
));
2424 gen_svm_check_intercept(DisasContext
*s
, target_ulong pc_start
, uint64_t type
)
2426 gen_svm_check_intercept_param(s
, pc_start
, type
, 0);
2429 static inline void gen_stack_update(DisasContext
*s
, int addend
)
2431 #ifdef TARGET_X86_64
2433 gen_op_add_reg_im(2, R_ESP
, addend
);
2437 gen_op_add_reg_im(1, R_ESP
, addend
);
2439 gen_op_add_reg_im(0, R_ESP
, addend
);
2443 /* generate a push. It depends on ss32, addseg and dflag */
2444 static void gen_push_T0(DisasContext
*s
)
2446 #ifdef TARGET_X86_64
2448 gen_op_movq_A0_reg(R_ESP
);
2450 gen_op_addq_A0_im(-8);
2451 gen_op_st_T0_A0(OT_QUAD
+ s
->mem_index
);
2453 gen_op_addq_A0_im(-2);
2454 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
2456 gen_op_mov_reg_A0(2, R_ESP
);
2460 gen_op_movl_A0_reg(R_ESP
);
2462 gen_op_addl_A0_im(-2);
2464 gen_op_addl_A0_im(-4);
2467 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2468 gen_op_addl_A0_seg(s
, R_SS
);
2471 gen_op_andl_A0_ffff();
2472 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2473 gen_op_addl_A0_seg(s
, R_SS
);
2475 gen_op_st_T0_A0(s
->dflag
+ 1 + s
->mem_index
);
2476 if (s
->ss32
&& !s
->addseg
)
2477 gen_op_mov_reg_A0(1, R_ESP
);
2479 gen_op_mov_reg_T1(s
->ss32
+ 1, R_ESP
);
2483 /* generate a push. It depends on ss32, addseg and dflag */
2484 /* slower version for T1, only used for call Ev */
2485 static void gen_push_T1(DisasContext
*s
)
2487 #ifdef TARGET_X86_64
2489 gen_op_movq_A0_reg(R_ESP
);
2491 gen_op_addq_A0_im(-8);
2492 gen_op_st_T1_A0(OT_QUAD
+ s
->mem_index
);
2494 gen_op_addq_A0_im(-2);
2495 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
2497 gen_op_mov_reg_A0(2, R_ESP
);
2501 gen_op_movl_A0_reg(R_ESP
);
2503 gen_op_addl_A0_im(-2);
2505 gen_op_addl_A0_im(-4);
2508 gen_op_addl_A0_seg(s
, R_SS
);
2511 gen_op_andl_A0_ffff();
2512 gen_op_addl_A0_seg(s
, R_SS
);
2514 gen_op_st_T1_A0(s
->dflag
+ 1 + s
->mem_index
);
2516 if (s
->ss32
&& !s
->addseg
)
2517 gen_op_mov_reg_A0(1, R_ESP
);
2519 gen_stack_update(s
, (-2) << s
->dflag
);
2523 /* two step pop is necessary for precise exceptions */
2524 static void gen_pop_T0(DisasContext
*s
)
2526 #ifdef TARGET_X86_64
2528 gen_op_movq_A0_reg(R_ESP
);
2529 gen_op_ld_T0_A0((s
->dflag
? OT_QUAD
: OT_WORD
) + s
->mem_index
);
2533 gen_op_movl_A0_reg(R_ESP
);
2536 gen_op_addl_A0_seg(s
, R_SS
);
2538 gen_op_andl_A0_ffff();
2539 gen_op_addl_A0_seg(s
, R_SS
);
2541 gen_op_ld_T0_A0(s
->dflag
+ 1 + s
->mem_index
);
2545 static void gen_pop_update(DisasContext
*s
)
2547 #ifdef TARGET_X86_64
2548 if (CODE64(s
) && s
->dflag
) {
2549 gen_stack_update(s
, 8);
2553 gen_stack_update(s
, 2 << s
->dflag
);
2557 static void gen_stack_A0(DisasContext
*s
)
2559 gen_op_movl_A0_reg(R_ESP
);
2561 gen_op_andl_A0_ffff();
2562 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2564 gen_op_addl_A0_seg(s
, R_SS
);
2567 /* NOTE: wrap around in 16 bit not fully handled */
2568 static void gen_pusha(DisasContext
*s
)
2571 gen_op_movl_A0_reg(R_ESP
);
2572 gen_op_addl_A0_im(-16 << s
->dflag
);
2574 gen_op_andl_A0_ffff();
2575 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2577 gen_op_addl_A0_seg(s
, R_SS
);
2578 for(i
= 0;i
< 8; i
++) {
2579 gen_op_mov_TN_reg(OT_LONG
, 0, 7 - i
);
2580 gen_op_st_T0_A0(OT_WORD
+ s
->dflag
+ s
->mem_index
);
2581 gen_op_addl_A0_im(2 << s
->dflag
);
2583 gen_op_mov_reg_T1(OT_WORD
+ s
->ss32
, R_ESP
);
2586 /* NOTE: wrap around in 16 bit not fully handled */
2587 static void gen_popa(DisasContext
*s
)
2590 gen_op_movl_A0_reg(R_ESP
);
2592 gen_op_andl_A0_ffff();
2593 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2594 tcg_gen_addi_tl(cpu_T
[1], cpu_T
[1], 16 << s
->dflag
);
2596 gen_op_addl_A0_seg(s
, R_SS
);
2597 for(i
= 0;i
< 8; i
++) {
2598 /* ESP is not reloaded */
2600 gen_op_ld_T0_A0(OT_WORD
+ s
->dflag
+ s
->mem_index
);
2601 gen_op_mov_reg_T0(OT_WORD
+ s
->dflag
, 7 - i
);
2603 gen_op_addl_A0_im(2 << s
->dflag
);
2605 gen_op_mov_reg_T1(OT_WORD
+ s
->ss32
, R_ESP
);
2608 static void gen_enter(DisasContext
*s
, int esp_addend
, int level
)
2613 #ifdef TARGET_X86_64
2615 ot
= s
->dflag
? OT_QUAD
: OT_WORD
;
2618 gen_op_movl_A0_reg(R_ESP
);
2619 gen_op_addq_A0_im(-opsize
);
2620 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2623 gen_op_mov_TN_reg(OT_LONG
, 0, R_EBP
);
2624 gen_op_st_T0_A0(ot
+ s
->mem_index
);
2626 /* XXX: must save state */
2627 gen_helper_enter64_level(cpu_env
, tcg_const_i32(level
),
2628 tcg_const_i32((ot
== OT_QUAD
)),
2631 gen_op_mov_reg_T1(ot
, R_EBP
);
2632 tcg_gen_addi_tl(cpu_T
[1], cpu_T
[1], -esp_addend
+ (-opsize
* level
));
2633 gen_op_mov_reg_T1(OT_QUAD
, R_ESP
);
2637 ot
= s
->dflag
+ OT_WORD
;
2638 opsize
= 2 << s
->dflag
;
2640 gen_op_movl_A0_reg(R_ESP
);
2641 gen_op_addl_A0_im(-opsize
);
2643 gen_op_andl_A0_ffff();
2644 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2646 gen_op_addl_A0_seg(s
, R_SS
);
2648 gen_op_mov_TN_reg(OT_LONG
, 0, R_EBP
);
2649 gen_op_st_T0_A0(ot
+ s
->mem_index
);
2651 /* XXX: must save state */
2652 gen_helper_enter_level(cpu_env
, tcg_const_i32(level
),
2653 tcg_const_i32(s
->dflag
),
2656 gen_op_mov_reg_T1(ot
, R_EBP
);
2657 tcg_gen_addi_tl(cpu_T
[1], cpu_T
[1], -esp_addend
+ (-opsize
* level
));
2658 gen_op_mov_reg_T1(OT_WORD
+ s
->ss32
, R_ESP
);
2662 static void gen_exception(DisasContext
*s
, int trapno
, target_ulong cur_eip
)
2664 if (s
->cc_op
!= CC_OP_DYNAMIC
)
2665 gen_op_set_cc_op(s
->cc_op
);
2666 gen_jmp_im(cur_eip
);
2667 gen_helper_raise_exception(cpu_env
, tcg_const_i32(trapno
));
2668 s
->is_jmp
= DISAS_TB_JUMP
;
2671 /* an interrupt is different from an exception because of the
2673 static void gen_interrupt(DisasContext
*s
, int intno
,
2674 target_ulong cur_eip
, target_ulong next_eip
)
2676 if (s
->cc_op
!= CC_OP_DYNAMIC
)
2677 gen_op_set_cc_op(s
->cc_op
);
2678 gen_jmp_im(cur_eip
);
2679 gen_helper_raise_interrupt(cpu_env
, tcg_const_i32(intno
),
2680 tcg_const_i32(next_eip
- cur_eip
));
2681 s
->is_jmp
= DISAS_TB_JUMP
;
2684 static void gen_debug(DisasContext
*s
, target_ulong cur_eip
)
2686 if (s
->cc_op
!= CC_OP_DYNAMIC
)
2687 gen_op_set_cc_op(s
->cc_op
);
2688 gen_jmp_im(cur_eip
);
2689 gen_helper_debug(cpu_env
);
2690 s
->is_jmp
= DISAS_TB_JUMP
;
2693 /* generate a generic end of block. Trace exception is also generated
2695 static void gen_eob(DisasContext
*s
)
2697 if (s
->cc_op
!= CC_OP_DYNAMIC
)
2698 gen_op_set_cc_op(s
->cc_op
);
2699 if (s
->tb
->flags
& HF_INHIBIT_IRQ_MASK
) {
2700 gen_helper_reset_inhibit_irq(cpu_env
);
2702 if (s
->tb
->flags
& HF_RF_MASK
) {
2703 gen_helper_reset_rf(cpu_env
);
2705 if (s
->singlestep_enabled
) {
2706 gen_helper_debug(cpu_env
);
2708 gen_helper_single_step(cpu_env
);
2712 s
->is_jmp
= DISAS_TB_JUMP
;
2715 /* generate a jump to eip. No segment change must happen before as a
2716 direct call to the next block may occur */
2717 static void gen_jmp_tb(DisasContext
*s
, target_ulong eip
, int tb_num
)
2720 gen_update_cc_op(s
);
2721 gen_goto_tb(s
, tb_num
, eip
);
2722 s
->is_jmp
= DISAS_TB_JUMP
;
2729 static void gen_jmp(DisasContext
*s
, target_ulong eip
)
2731 gen_jmp_tb(s
, eip
, 0);
2734 static inline void gen_ldq_env_A0(int idx
, int offset
)
2736 int mem_index
= (idx
>> 2) - 1;
2737 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
, mem_index
);
2738 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, offset
);
2741 static inline void gen_stq_env_A0(int idx
, int offset
)
2743 int mem_index
= (idx
>> 2) - 1;
2744 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, offset
);
2745 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
, mem_index
);
2748 static inline void gen_ldo_env_A0(int idx
, int offset
)
2750 int mem_index
= (idx
>> 2) - 1;
2751 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
, mem_index
);
2752 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(0)));
2753 tcg_gen_addi_tl(cpu_tmp0
, cpu_A0
, 8);
2754 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_tmp0
, mem_index
);
2755 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(1)));
2758 static inline void gen_sto_env_A0(int idx
, int offset
)
2760 int mem_index
= (idx
>> 2) - 1;
2761 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(0)));
2762 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
, mem_index
);
2763 tcg_gen_addi_tl(cpu_tmp0
, cpu_A0
, 8);
2764 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(1)));
2765 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_tmp0
, mem_index
);
2768 static inline void gen_op_movo(int d_offset
, int s_offset
)
2770 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, s_offset
);
2771 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
);
2772 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, s_offset
+ 8);
2773 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
+ 8);
2776 static inline void gen_op_movq(int d_offset
, int s_offset
)
2778 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, s_offset
);
2779 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
);
2782 static inline void gen_op_movl(int d_offset
, int s_offset
)
2784 tcg_gen_ld_i32(cpu_tmp2_i32
, cpu_env
, s_offset
);
2785 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, d_offset
);
2788 static inline void gen_op_movq_env_0(int d_offset
)
2790 tcg_gen_movi_i64(cpu_tmp1_i64
, 0);
2791 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
);
2794 typedef void (*SSEFunc_i_ep
)(TCGv_i32 val
, TCGv_ptr env
, TCGv_ptr reg
);
2795 typedef void (*SSEFunc_l_ep
)(TCGv_i64 val
, TCGv_ptr env
, TCGv_ptr reg
);
2796 typedef void (*SSEFunc_0_epi
)(TCGv_ptr env
, TCGv_ptr reg
, TCGv_i32 val
);
2797 typedef void (*SSEFunc_0_epl
)(TCGv_ptr env
, TCGv_ptr reg
, TCGv_i64 val
);
2798 typedef void (*SSEFunc_0_epp
)(TCGv_ptr env
, TCGv_ptr reg_a
, TCGv_ptr reg_b
);
2799 typedef void (*SSEFunc_0_eppi
)(TCGv_ptr env
, TCGv_ptr reg_a
, TCGv_ptr reg_b
,
2801 typedef void (*SSEFunc_0_ppi
)(TCGv_ptr reg_a
, TCGv_ptr reg_b
, TCGv_i32 val
);
2802 typedef void (*SSEFunc_0_eppt
)(TCGv_ptr env
, TCGv_ptr reg_a
, TCGv_ptr reg_b
,
2805 #define SSE_SPECIAL ((void *)1)
2806 #define SSE_DUMMY ((void *)2)
2808 #define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
2809 #define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
2810 gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
2812 static const SSEFunc_0_epp sse_op_table1
[256][4] = {
2813 /* 3DNow! extensions */
2814 [0x0e] = { SSE_DUMMY
}, /* femms */
2815 [0x0f] = { SSE_DUMMY
}, /* pf... */
2816 /* pure SSE operations */
2817 [0x10] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movups, movupd, movss, movsd */
2818 [0x11] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movups, movupd, movss, movsd */
2819 [0x12] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movlps, movlpd, movsldup, movddup */
2820 [0x13] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movlps, movlpd */
2821 [0x14] = { gen_helper_punpckldq_xmm
, gen_helper_punpcklqdq_xmm
},
2822 [0x15] = { gen_helper_punpckhdq_xmm
, gen_helper_punpckhqdq_xmm
},
2823 [0x16] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movhps, movhpd, movshdup */
2824 [0x17] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movhps, movhpd */
2826 [0x28] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movaps, movapd */
2827 [0x29] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movaps, movapd */
2828 [0x2a] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
2829 [0x2b] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movntps, movntpd, movntss, movntsd */
2830 [0x2c] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
2831 [0x2d] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
2832 [0x2e] = { gen_helper_ucomiss
, gen_helper_ucomisd
},
2833 [0x2f] = { gen_helper_comiss
, gen_helper_comisd
},
2834 [0x50] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movmskps, movmskpd */
2835 [0x51] = SSE_FOP(sqrt
),
2836 [0x52] = { gen_helper_rsqrtps
, NULL
, gen_helper_rsqrtss
, NULL
},
2837 [0x53] = { gen_helper_rcpps
, NULL
, gen_helper_rcpss
, NULL
},
2838 [0x54] = { gen_helper_pand_xmm
, gen_helper_pand_xmm
}, /* andps, andpd */
2839 [0x55] = { gen_helper_pandn_xmm
, gen_helper_pandn_xmm
}, /* andnps, andnpd */
2840 [0x56] = { gen_helper_por_xmm
, gen_helper_por_xmm
}, /* orps, orpd */
2841 [0x57] = { gen_helper_pxor_xmm
, gen_helper_pxor_xmm
}, /* xorps, xorpd */
2842 [0x58] = SSE_FOP(add
),
2843 [0x59] = SSE_FOP(mul
),
2844 [0x5a] = { gen_helper_cvtps2pd
, gen_helper_cvtpd2ps
,
2845 gen_helper_cvtss2sd
, gen_helper_cvtsd2ss
},
2846 [0x5b] = { gen_helper_cvtdq2ps
, gen_helper_cvtps2dq
, gen_helper_cvttps2dq
},
2847 [0x5c] = SSE_FOP(sub
),
2848 [0x5d] = SSE_FOP(min
),
2849 [0x5e] = SSE_FOP(div
),
2850 [0x5f] = SSE_FOP(max
),
2852 [0xc2] = SSE_FOP(cmpeq
),
2853 [0xc6] = { (SSEFunc_0_epp
)gen_helper_shufps
,
2854 (SSEFunc_0_epp
)gen_helper_shufpd
}, /* XXX: casts */
2856 [0x38] = { SSE_SPECIAL
, SSE_SPECIAL
, NULL
, SSE_SPECIAL
}, /* SSSE3/SSE4 */
2857 [0x3a] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* SSSE3/SSE4 */
2859 /* MMX ops and their SSE extensions */
2860 [0x60] = MMX_OP2(punpcklbw
),
2861 [0x61] = MMX_OP2(punpcklwd
),
2862 [0x62] = MMX_OP2(punpckldq
),
2863 [0x63] = MMX_OP2(packsswb
),
2864 [0x64] = MMX_OP2(pcmpgtb
),
2865 [0x65] = MMX_OP2(pcmpgtw
),
2866 [0x66] = MMX_OP2(pcmpgtl
),
2867 [0x67] = MMX_OP2(packuswb
),
2868 [0x68] = MMX_OP2(punpckhbw
),
2869 [0x69] = MMX_OP2(punpckhwd
),
2870 [0x6a] = MMX_OP2(punpckhdq
),
2871 [0x6b] = MMX_OP2(packssdw
),
2872 [0x6c] = { NULL
, gen_helper_punpcklqdq_xmm
},
2873 [0x6d] = { NULL
, gen_helper_punpckhqdq_xmm
},
2874 [0x6e] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movd mm, ea */
2875 [0x6f] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movq, movdqa, , movqdu */
2876 [0x70] = { (SSEFunc_0_epp
)gen_helper_pshufw_mmx
,
2877 (SSEFunc_0_epp
)gen_helper_pshufd_xmm
,
2878 (SSEFunc_0_epp
)gen_helper_pshufhw_xmm
,
2879 (SSEFunc_0_epp
)gen_helper_pshuflw_xmm
}, /* XXX: casts */
2880 [0x71] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftw */
2881 [0x72] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftd */
2882 [0x73] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftq */
2883 [0x74] = MMX_OP2(pcmpeqb
),
2884 [0x75] = MMX_OP2(pcmpeqw
),
2885 [0x76] = MMX_OP2(pcmpeql
),
2886 [0x77] = { SSE_DUMMY
}, /* emms */
2887 [0x78] = { NULL
, SSE_SPECIAL
, NULL
, SSE_SPECIAL
}, /* extrq_i, insertq_i */
2888 [0x79] = { NULL
, gen_helper_extrq_r
, NULL
, gen_helper_insertq_r
},
2889 [0x7c] = { NULL
, gen_helper_haddpd
, NULL
, gen_helper_haddps
},
2890 [0x7d] = { NULL
, gen_helper_hsubpd
, NULL
, gen_helper_hsubps
},
2891 [0x7e] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movd, movd, , movq */
2892 [0x7f] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movq, movdqa, movdqu */
2893 [0xc4] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pinsrw */
2894 [0xc5] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pextrw */
2895 [0xd0] = { NULL
, gen_helper_addsubpd
, NULL
, gen_helper_addsubps
},
2896 [0xd1] = MMX_OP2(psrlw
),
2897 [0xd2] = MMX_OP2(psrld
),
2898 [0xd3] = MMX_OP2(psrlq
),
2899 [0xd4] = MMX_OP2(paddq
),
2900 [0xd5] = MMX_OP2(pmullw
),
2901 [0xd6] = { NULL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
},
2902 [0xd7] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pmovmskb */
2903 [0xd8] = MMX_OP2(psubusb
),
2904 [0xd9] = MMX_OP2(psubusw
),
2905 [0xda] = MMX_OP2(pminub
),
2906 [0xdb] = MMX_OP2(pand
),
2907 [0xdc] = MMX_OP2(paddusb
),
2908 [0xdd] = MMX_OP2(paddusw
),
2909 [0xde] = MMX_OP2(pmaxub
),
2910 [0xdf] = MMX_OP2(pandn
),
2911 [0xe0] = MMX_OP2(pavgb
),
2912 [0xe1] = MMX_OP2(psraw
),
2913 [0xe2] = MMX_OP2(psrad
),
2914 [0xe3] = MMX_OP2(pavgw
),
2915 [0xe4] = MMX_OP2(pmulhuw
),
2916 [0xe5] = MMX_OP2(pmulhw
),
2917 [0xe6] = { NULL
, gen_helper_cvttpd2dq
, gen_helper_cvtdq2pd
, gen_helper_cvtpd2dq
},
2918 [0xe7] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movntq, movntq */
2919 [0xe8] = MMX_OP2(psubsb
),
2920 [0xe9] = MMX_OP2(psubsw
),
2921 [0xea] = MMX_OP2(pminsw
),
2922 [0xeb] = MMX_OP2(por
),
2923 [0xec] = MMX_OP2(paddsb
),
2924 [0xed] = MMX_OP2(paddsw
),
2925 [0xee] = MMX_OP2(pmaxsw
),
2926 [0xef] = MMX_OP2(pxor
),
2927 [0xf0] = { NULL
, NULL
, NULL
, SSE_SPECIAL
}, /* lddqu */
2928 [0xf1] = MMX_OP2(psllw
),
2929 [0xf2] = MMX_OP2(pslld
),
2930 [0xf3] = MMX_OP2(psllq
),
2931 [0xf4] = MMX_OP2(pmuludq
),
2932 [0xf5] = MMX_OP2(pmaddwd
),
2933 [0xf6] = MMX_OP2(psadbw
),
2934 [0xf7] = { (SSEFunc_0_epp
)gen_helper_maskmov_mmx
,
2935 (SSEFunc_0_epp
)gen_helper_maskmov_xmm
}, /* XXX: casts */
2936 [0xf8] = MMX_OP2(psubb
),
2937 [0xf9] = MMX_OP2(psubw
),
2938 [0xfa] = MMX_OP2(psubl
),
2939 [0xfb] = MMX_OP2(psubq
),
2940 [0xfc] = MMX_OP2(paddb
),
2941 [0xfd] = MMX_OP2(paddw
),
2942 [0xfe] = MMX_OP2(paddl
),
2945 static const SSEFunc_0_epp sse_op_table2
[3 * 8][2] = {
2946 [0 + 2] = MMX_OP2(psrlw
),
2947 [0 + 4] = MMX_OP2(psraw
),
2948 [0 + 6] = MMX_OP2(psllw
),
2949 [8 + 2] = MMX_OP2(psrld
),
2950 [8 + 4] = MMX_OP2(psrad
),
2951 [8 + 6] = MMX_OP2(pslld
),
2952 [16 + 2] = MMX_OP2(psrlq
),
2953 [16 + 3] = { NULL
, gen_helper_psrldq_xmm
},
2954 [16 + 6] = MMX_OP2(psllq
),
2955 [16 + 7] = { NULL
, gen_helper_pslldq_xmm
},
2958 static const SSEFunc_0_epi sse_op_table3ai
[] = {
2959 gen_helper_cvtsi2ss
,
2963 #ifdef TARGET_X86_64
2964 static const SSEFunc_0_epl sse_op_table3aq
[] = {
2965 gen_helper_cvtsq2ss
,
2970 static const SSEFunc_i_ep sse_op_table3bi
[] = {
2971 gen_helper_cvttss2si
,
2972 gen_helper_cvtss2si
,
2973 gen_helper_cvttsd2si
,
2977 #ifdef TARGET_X86_64
2978 static const SSEFunc_l_ep sse_op_table3bq
[] = {
2979 gen_helper_cvttss2sq
,
2980 gen_helper_cvtss2sq
,
2981 gen_helper_cvttsd2sq
,
2986 static const SSEFunc_0_epp sse_op_table4
[8][4] = {
2997 static const SSEFunc_0_epp sse_op_table5
[256] = {
2998 [0x0c] = gen_helper_pi2fw
,
2999 [0x0d] = gen_helper_pi2fd
,
3000 [0x1c] = gen_helper_pf2iw
,
3001 [0x1d] = gen_helper_pf2id
,
3002 [0x8a] = gen_helper_pfnacc
,
3003 [0x8e] = gen_helper_pfpnacc
,
3004 [0x90] = gen_helper_pfcmpge
,
3005 [0x94] = gen_helper_pfmin
,
3006 [0x96] = gen_helper_pfrcp
,
3007 [0x97] = gen_helper_pfrsqrt
,
3008 [0x9a] = gen_helper_pfsub
,
3009 [0x9e] = gen_helper_pfadd
,
3010 [0xa0] = gen_helper_pfcmpgt
,
3011 [0xa4] = gen_helper_pfmax
,
3012 [0xa6] = gen_helper_movq
, /* pfrcpit1; no need to actually increase precision */
3013 [0xa7] = gen_helper_movq
, /* pfrsqit1 */
3014 [0xaa] = gen_helper_pfsubr
,
3015 [0xae] = gen_helper_pfacc
,
3016 [0xb0] = gen_helper_pfcmpeq
,
3017 [0xb4] = gen_helper_pfmul
,
3018 [0xb6] = gen_helper_movq
, /* pfrcpit2 */
3019 [0xb7] = gen_helper_pmulhrw_mmx
,
3020 [0xbb] = gen_helper_pswapd
,
3021 [0xbf] = gen_helper_pavgb_mmx
/* pavgusb */
3024 struct SSEOpHelper_epp
{
3025 SSEFunc_0_epp op
[2];
3029 struct SSEOpHelper_eppi
{
3030 SSEFunc_0_eppi op
[2];
3034 #define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
3035 #define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
3036 #define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
3037 #define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
3039 static const struct SSEOpHelper_epp sse_op_table6
[256] = {
3040 [0x00] = SSSE3_OP(pshufb
),
3041 [0x01] = SSSE3_OP(phaddw
),
3042 [0x02] = SSSE3_OP(phaddd
),
3043 [0x03] = SSSE3_OP(phaddsw
),
3044 [0x04] = SSSE3_OP(pmaddubsw
),
3045 [0x05] = SSSE3_OP(phsubw
),
3046 [0x06] = SSSE3_OP(phsubd
),
3047 [0x07] = SSSE3_OP(phsubsw
),
3048 [0x08] = SSSE3_OP(psignb
),
3049 [0x09] = SSSE3_OP(psignw
),
3050 [0x0a] = SSSE3_OP(psignd
),
3051 [0x0b] = SSSE3_OP(pmulhrsw
),
3052 [0x10] = SSE41_OP(pblendvb
),
3053 [0x14] = SSE41_OP(blendvps
),
3054 [0x15] = SSE41_OP(blendvpd
),
3055 [0x17] = SSE41_OP(ptest
),
3056 [0x1c] = SSSE3_OP(pabsb
),
3057 [0x1d] = SSSE3_OP(pabsw
),
3058 [0x1e] = SSSE3_OP(pabsd
),
3059 [0x20] = SSE41_OP(pmovsxbw
),
3060 [0x21] = SSE41_OP(pmovsxbd
),
3061 [0x22] = SSE41_OP(pmovsxbq
),
3062 [0x23] = SSE41_OP(pmovsxwd
),
3063 [0x24] = SSE41_OP(pmovsxwq
),
3064 [0x25] = SSE41_OP(pmovsxdq
),
3065 [0x28] = SSE41_OP(pmuldq
),
3066 [0x29] = SSE41_OP(pcmpeqq
),
3067 [0x2a] = SSE41_SPECIAL
, /* movntqda */
3068 [0x2b] = SSE41_OP(packusdw
),
3069 [0x30] = SSE41_OP(pmovzxbw
),
3070 [0x31] = SSE41_OP(pmovzxbd
),
3071 [0x32] = SSE41_OP(pmovzxbq
),
3072 [0x33] = SSE41_OP(pmovzxwd
),
3073 [0x34] = SSE41_OP(pmovzxwq
),
3074 [0x35] = SSE41_OP(pmovzxdq
),
3075 [0x37] = SSE42_OP(pcmpgtq
),
3076 [0x38] = SSE41_OP(pminsb
),
3077 [0x39] = SSE41_OP(pminsd
),
3078 [0x3a] = SSE41_OP(pminuw
),
3079 [0x3b] = SSE41_OP(pminud
),
3080 [0x3c] = SSE41_OP(pmaxsb
),
3081 [0x3d] = SSE41_OP(pmaxsd
),
3082 [0x3e] = SSE41_OP(pmaxuw
),
3083 [0x3f] = SSE41_OP(pmaxud
),
3084 [0x40] = SSE41_OP(pmulld
),
3085 [0x41] = SSE41_OP(phminposuw
),
3088 static const struct SSEOpHelper_eppi sse_op_table7
[256] = {
3089 [0x08] = SSE41_OP(roundps
),
3090 [0x09] = SSE41_OP(roundpd
),
3091 [0x0a] = SSE41_OP(roundss
),
3092 [0x0b] = SSE41_OP(roundsd
),
3093 [0x0c] = SSE41_OP(blendps
),
3094 [0x0d] = SSE41_OP(blendpd
),
3095 [0x0e] = SSE41_OP(pblendw
),
3096 [0x0f] = SSSE3_OP(palignr
),
3097 [0x14] = SSE41_SPECIAL
, /* pextrb */
3098 [0x15] = SSE41_SPECIAL
, /* pextrw */
3099 [0x16] = SSE41_SPECIAL
, /* pextrd/pextrq */
3100 [0x17] = SSE41_SPECIAL
, /* extractps */
3101 [0x20] = SSE41_SPECIAL
, /* pinsrb */
3102 [0x21] = SSE41_SPECIAL
, /* insertps */
3103 [0x22] = SSE41_SPECIAL
, /* pinsrd/pinsrq */
3104 [0x40] = SSE41_OP(dpps
),
3105 [0x41] = SSE41_OP(dppd
),
3106 [0x42] = SSE41_OP(mpsadbw
),
3107 [0x60] = SSE42_OP(pcmpestrm
),
3108 [0x61] = SSE42_OP(pcmpestri
),
3109 [0x62] = SSE42_OP(pcmpistrm
),
3110 [0x63] = SSE42_OP(pcmpistri
),
3113 static void gen_sse(CPUX86State
*env
, DisasContext
*s
, int b
,
3114 target_ulong pc_start
, int rex_r
)
3116 int b1
, op1_offset
, op2_offset
, is_xmm
, val
, ot
;
3117 int modrm
, mod
, rm
, reg
, reg_addr
, offset_addr
;
3118 SSEFunc_0_epp sse_fn_epp
;
3119 SSEFunc_0_eppi sse_fn_eppi
;
3120 SSEFunc_0_ppi sse_fn_ppi
;
3121 SSEFunc_0_eppt sse_fn_eppt
;
3124 if (s
->prefix
& PREFIX_DATA
)
3126 else if (s
->prefix
& PREFIX_REPZ
)
3128 else if (s
->prefix
& PREFIX_REPNZ
)
3132 sse_fn_epp
= sse_op_table1
[b
][b1
];
3136 if ((b
<= 0x5f && b
>= 0x10) || b
== 0xc6 || b
== 0xc2) {
3146 /* simple MMX/SSE operation */
3147 if (s
->flags
& HF_TS_MASK
) {
3148 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
3151 if (s
->flags
& HF_EM_MASK
) {
3153 gen_exception(s
, EXCP06_ILLOP
, pc_start
- s
->cs_base
);
3156 if (is_xmm
&& !(s
->flags
& HF_OSFXSR_MASK
))
3157 if ((b
!= 0x38 && b
!= 0x3a) || (s
->prefix
& PREFIX_DATA
))
3160 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_3DNOW
))
3163 gen_helper_emms(cpu_env
);
3168 gen_helper_emms(cpu_env
);
3171 /* prepare MMX state (XXX: optimize by storing fptt and fptags in
3172 the static cpu state) */
3174 gen_helper_enter_mmx(cpu_env
);
3177 modrm
= cpu_ldub_code(env
, s
->pc
++);
3178 reg
= ((modrm
>> 3) & 7);
3181 mod
= (modrm
>> 6) & 3;
3182 if (sse_fn_epp
== SSE_SPECIAL
) {
3185 case 0x0e7: /* movntq */
3188 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3189 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3191 case 0x1e7: /* movntdq */
3192 case 0x02b: /* movntps */
3193 case 0x12b: /* movntps */
3196 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3197 gen_sto_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3199 case 0x3f0: /* lddqu */
3202 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3203 gen_ldo_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3205 case 0x22b: /* movntss */
3206 case 0x32b: /* movntsd */
3209 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3211 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,
3214 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,
3215 xmm_regs
[reg
].XMM_L(0)));
3216 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
3219 case 0x6e: /* movd mm, ea */
3220 #ifdef TARGET_X86_64
3221 if (s
->dflag
== 2) {
3222 gen_ldst_modrm(env
, s
, modrm
, OT_QUAD
, OR_TMP0
, 0);
3223 tcg_gen_st_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3227 gen_ldst_modrm(env
, s
, modrm
, OT_LONG
, OR_TMP0
, 0);
3228 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3229 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3230 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
3231 gen_helper_movl_mm_T0_mmx(cpu_ptr0
, cpu_tmp2_i32
);
3234 case 0x16e: /* movd xmm, ea */
3235 #ifdef TARGET_X86_64
3236 if (s
->dflag
== 2) {
3237 gen_ldst_modrm(env
, s
, modrm
, OT_QUAD
, OR_TMP0
, 0);
3238 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3239 offsetof(CPUX86State
,xmm_regs
[reg
]));
3240 gen_helper_movq_mm_T0_xmm(cpu_ptr0
, cpu_T
[0]);
3244 gen_ldst_modrm(env
, s
, modrm
, OT_LONG
, OR_TMP0
, 0);
3245 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3246 offsetof(CPUX86State
,xmm_regs
[reg
]));
3247 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
3248 gen_helper_movl_mm_T0_xmm(cpu_ptr0
, cpu_tmp2_i32
);
3251 case 0x6f: /* movq mm, ea */
3253 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3254 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3257 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
,
3258 offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3259 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
,
3260 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3263 case 0x010: /* movups */
3264 case 0x110: /* movupd */
3265 case 0x028: /* movaps */
3266 case 0x128: /* movapd */
3267 case 0x16f: /* movdqa xmm, ea */
3268 case 0x26f: /* movdqu xmm, ea */
3270 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3271 gen_ldo_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3273 rm
= (modrm
& 7) | REX_B(s
);
3274 gen_op_movo(offsetof(CPUX86State
,xmm_regs
[reg
]),
3275 offsetof(CPUX86State
,xmm_regs
[rm
]));
3278 case 0x210: /* movss xmm, ea */
3280 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3281 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
3282 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3284 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(1)));
3285 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)));
3286 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)));
3288 rm
= (modrm
& 7) | REX_B(s
);
3289 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)),
3290 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(0)));
3293 case 0x310: /* movsd xmm, ea */
3295 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3296 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3298 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)));
3299 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)));
3301 rm
= (modrm
& 7) | REX_B(s
);
3302 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3303 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3306 case 0x012: /* movlps */
3307 case 0x112: /* movlpd */
3309 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3310 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3313 rm
= (modrm
& 7) | REX_B(s
);
3314 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3315 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(1)));
3318 case 0x212: /* movsldup */
3320 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3321 gen_ldo_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3323 rm
= (modrm
& 7) | REX_B(s
);
3324 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)),
3325 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(0)));
3326 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)),
3327 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(2)));
3329 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(1)),
3330 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3331 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)),
3332 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)));
3334 case 0x312: /* movddup */
3336 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3337 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3339 rm
= (modrm
& 7) | REX_B(s
);
3340 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3341 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3343 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)),
3344 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3346 case 0x016: /* movhps */
3347 case 0x116: /* movhpd */
3349 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3350 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)));
3353 rm
= (modrm
& 7) | REX_B(s
);
3354 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)),
3355 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3358 case 0x216: /* movshdup */
3360 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3361 gen_ldo_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3363 rm
= (modrm
& 7) | REX_B(s
);
3364 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(1)),
3365 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(1)));
3366 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)),
3367 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(3)));
3369 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)),
3370 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(1)));
3371 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)),
3372 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)));
3377 int bit_index
, field_length
;
3379 if (b1
== 1 && reg
!= 0)
3381 field_length
= cpu_ldub_code(env
, s
->pc
++) & 0x3F;
3382 bit_index
= cpu_ldub_code(env
, s
->pc
++) & 0x3F;
3383 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3384 offsetof(CPUX86State
,xmm_regs
[reg
]));
3386 gen_helper_extrq_i(cpu_env
, cpu_ptr0
,
3387 tcg_const_i32(bit_index
),
3388 tcg_const_i32(field_length
));
3390 gen_helper_insertq_i(cpu_env
, cpu_ptr0
,
3391 tcg_const_i32(bit_index
),
3392 tcg_const_i32(field_length
));
3395 case 0x7e: /* movd ea, mm */
3396 #ifdef TARGET_X86_64
3397 if (s
->dflag
== 2) {
3398 tcg_gen_ld_i64(cpu_T
[0], cpu_env
,
3399 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3400 gen_ldst_modrm(env
, s
, modrm
, OT_QUAD
, OR_TMP0
, 1);
3404 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
,
3405 offsetof(CPUX86State
,fpregs
[reg
].mmx
.MMX_L(0)));
3406 gen_ldst_modrm(env
, s
, modrm
, OT_LONG
, OR_TMP0
, 1);
3409 case 0x17e: /* movd ea, xmm */
3410 #ifdef TARGET_X86_64
3411 if (s
->dflag
== 2) {
3412 tcg_gen_ld_i64(cpu_T
[0], cpu_env
,
3413 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3414 gen_ldst_modrm(env
, s
, modrm
, OT_QUAD
, OR_TMP0
, 1);
3418 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
,
3419 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3420 gen_ldst_modrm(env
, s
, modrm
, OT_LONG
, OR_TMP0
, 1);
3423 case 0x27e: /* movq xmm, ea */
3425 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3426 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3428 rm
= (modrm
& 7) | REX_B(s
);
3429 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3430 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3432 gen_op_movq_env_0(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)));
3434 case 0x7f: /* movq ea, mm */
3436 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3437 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3440 gen_op_movq(offsetof(CPUX86State
,fpregs
[rm
].mmx
),
3441 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3444 case 0x011: /* movups */
3445 case 0x111: /* movupd */
3446 case 0x029: /* movaps */
3447 case 0x129: /* movapd */
3448 case 0x17f: /* movdqa ea, xmm */
3449 case 0x27f: /* movdqu ea, xmm */
3451 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3452 gen_sto_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3454 rm
= (modrm
& 7) | REX_B(s
);
3455 gen_op_movo(offsetof(CPUX86State
,xmm_regs
[rm
]),
3456 offsetof(CPUX86State
,xmm_regs
[reg
]));
3459 case 0x211: /* movss ea, xmm */
3461 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3462 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3463 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
3465 rm
= (modrm
& 7) | REX_B(s
);
3466 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(0)),
3467 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3470 case 0x311: /* movsd ea, xmm */
3472 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3473 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3475 rm
= (modrm
& 7) | REX_B(s
);
3476 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)),
3477 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3480 case 0x013: /* movlps */
3481 case 0x113: /* movlpd */
3483 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3484 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3489 case 0x017: /* movhps */
3490 case 0x117: /* movhpd */
3492 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3493 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)));
3498 case 0x71: /* shift mm, im */
3501 case 0x171: /* shift xmm, im */
3507 val
= cpu_ldub_code(env
, s
->pc
++);
3509 gen_op_movl_T0_im(val
);
3510 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_t0
.XMM_L(0)));
3512 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_t0
.XMM_L(1)));
3513 op1_offset
= offsetof(CPUX86State
,xmm_t0
);
3515 gen_op_movl_T0_im(val
);
3516 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,mmx_t0
.MMX_L(0)));
3518 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,mmx_t0
.MMX_L(1)));
3519 op1_offset
= offsetof(CPUX86State
,mmx_t0
);
3521 sse_fn_epp
= sse_op_table2
[((b
- 1) & 3) * 8 +
3522 (((modrm
>> 3)) & 7)][b1
];
3527 rm
= (modrm
& 7) | REX_B(s
);
3528 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3531 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3533 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op2_offset
);
3534 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op1_offset
);
3535 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3537 case 0x050: /* movmskps */
3538 rm
= (modrm
& 7) | REX_B(s
);
3539 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3540 offsetof(CPUX86State
,xmm_regs
[rm
]));
3541 gen_helper_movmskps(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3542 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
3543 gen_op_mov_reg_T0(OT_LONG
, reg
);
3545 case 0x150: /* movmskpd */
3546 rm
= (modrm
& 7) | REX_B(s
);
3547 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3548 offsetof(CPUX86State
,xmm_regs
[rm
]));
3549 gen_helper_movmskpd(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3550 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
3551 gen_op_mov_reg_T0(OT_LONG
, reg
);
3553 case 0x02a: /* cvtpi2ps */
3554 case 0x12a: /* cvtpi2pd */
3555 gen_helper_enter_mmx(cpu_env
);
3557 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3558 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
3559 gen_ldq_env_A0(s
->mem_index
, op2_offset
);
3562 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3564 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3565 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3566 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
3569 gen_helper_cvtpi2ps(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3573 gen_helper_cvtpi2pd(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3577 case 0x22a: /* cvtsi2ss */
3578 case 0x32a: /* cvtsi2sd */
3579 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
3580 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3581 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3582 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3583 if (ot
== OT_LONG
) {
3584 SSEFunc_0_epi sse_fn_epi
= sse_op_table3ai
[(b
>> 8) & 1];
3585 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
3586 sse_fn_epi(cpu_env
, cpu_ptr0
, cpu_tmp2_i32
);
3588 #ifdef TARGET_X86_64
3589 SSEFunc_0_epl sse_fn_epl
= sse_op_table3aq
[(b
>> 8) & 1];
3590 sse_fn_epl(cpu_env
, cpu_ptr0
, cpu_T
[0]);
3596 case 0x02c: /* cvttps2pi */
3597 case 0x12c: /* cvttpd2pi */
3598 case 0x02d: /* cvtps2pi */
3599 case 0x12d: /* cvtpd2pi */
3600 gen_helper_enter_mmx(cpu_env
);
3602 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3603 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3604 gen_ldo_env_A0(s
->mem_index
, op2_offset
);
3606 rm
= (modrm
& 7) | REX_B(s
);
3607 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3609 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
& 7].mmx
);
3610 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3611 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
3614 gen_helper_cvttps2pi(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3617 gen_helper_cvttpd2pi(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3620 gen_helper_cvtps2pi(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3623 gen_helper_cvtpd2pi(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3627 case 0x22c: /* cvttss2si */
3628 case 0x32c: /* cvttsd2si */
3629 case 0x22d: /* cvtss2si */
3630 case 0x32d: /* cvtsd2si */
3631 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
3633 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3635 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_t0
.XMM_Q(0)));
3637 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
3638 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_t0
.XMM_L(0)));
3640 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3642 rm
= (modrm
& 7) | REX_B(s
);
3643 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3645 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op2_offset
);
3646 if (ot
== OT_LONG
) {
3647 SSEFunc_i_ep sse_fn_i_ep
=
3648 sse_op_table3bi
[((b
>> 7) & 2) | (b
& 1)];
3649 sse_fn_i_ep(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3650 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
3652 #ifdef TARGET_X86_64
3653 SSEFunc_l_ep sse_fn_l_ep
=
3654 sse_op_table3bq
[((b
>> 7) & 2) | (b
& 1)];
3655 sse_fn_l_ep(cpu_T
[0], cpu_env
, cpu_ptr0
);
3660 gen_op_mov_reg_T0(ot
, reg
);
3662 case 0xc4: /* pinsrw */
3665 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 0);
3666 val
= cpu_ldub_code(env
, s
->pc
++);
3669 tcg_gen_st16_tl(cpu_T
[0], cpu_env
,
3670 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_W(val
)));
3673 tcg_gen_st16_tl(cpu_T
[0], cpu_env
,
3674 offsetof(CPUX86State
,fpregs
[reg
].mmx
.MMX_W(val
)));
3677 case 0xc5: /* pextrw */
3681 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
3682 val
= cpu_ldub_code(env
, s
->pc
++);
3685 rm
= (modrm
& 7) | REX_B(s
);
3686 tcg_gen_ld16u_tl(cpu_T
[0], cpu_env
,
3687 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_W(val
)));
3691 tcg_gen_ld16u_tl(cpu_T
[0], cpu_env
,
3692 offsetof(CPUX86State
,fpregs
[rm
].mmx
.MMX_W(val
)));
3694 reg
= ((modrm
>> 3) & 7) | rex_r
;
3695 gen_op_mov_reg_T0(ot
, reg
);
3697 case 0x1d6: /* movq ea, xmm */
3699 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3700 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3702 rm
= (modrm
& 7) | REX_B(s
);
3703 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)),
3704 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3705 gen_op_movq_env_0(offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(1)));
3708 case 0x2d6: /* movq2dq */
3709 gen_helper_enter_mmx(cpu_env
);
3711 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3712 offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3713 gen_op_movq_env_0(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)));
3715 case 0x3d6: /* movdq2q */
3716 gen_helper_enter_mmx(cpu_env
);
3717 rm
= (modrm
& 7) | REX_B(s
);
3718 gen_op_movq(offsetof(CPUX86State
,fpregs
[reg
& 7].mmx
),
3719 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3721 case 0xd7: /* pmovmskb */
3726 rm
= (modrm
& 7) | REX_B(s
);
3727 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, offsetof(CPUX86State
,xmm_regs
[rm
]));
3728 gen_helper_pmovmskb_xmm(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3731 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3732 gen_helper_pmovmskb_mmx(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3734 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
3735 reg
= ((modrm
>> 3) & 7) | rex_r
;
3736 gen_op_mov_reg_T0(OT_LONG
, reg
);
3739 if (s
->prefix
& PREFIX_REPNZ
)
3743 modrm
= cpu_ldub_code(env
, s
->pc
++);
3745 reg
= ((modrm
>> 3) & 7) | rex_r
;
3746 mod
= (modrm
>> 6) & 3;
3751 sse_fn_epp
= sse_op_table6
[b
].op
[b1
];
3755 if (!(s
->cpuid_ext_features
& sse_op_table6
[b
].ext_mask
))
3759 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3761 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
| REX_B(s
)]);
3763 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3764 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3766 case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
3767 case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
3768 case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
3769 gen_ldq_env_A0(s
->mem_index
, op2_offset
+
3770 offsetof(XMMReg
, XMM_Q(0)));
3772 case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
3773 case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
3774 tcg_gen_qemu_ld32u(cpu_tmp0
, cpu_A0
,
3775 (s
->mem_index
>> 2) - 1);
3776 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_tmp0
);
3777 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, op2_offset
+
3778 offsetof(XMMReg
, XMM_L(0)));
3780 case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
3781 tcg_gen_qemu_ld16u(cpu_tmp0
, cpu_A0
,
3782 (s
->mem_index
>> 2) - 1);
3783 tcg_gen_st16_tl(cpu_tmp0
, cpu_env
, op2_offset
+
3784 offsetof(XMMReg
, XMM_W(0)));
3786 case 0x2a: /* movntqda */
3787 gen_ldo_env_A0(s
->mem_index
, op1_offset
);
3790 gen_ldo_env_A0(s
->mem_index
, op2_offset
);
3794 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
3796 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3798 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
3799 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3800 gen_ldq_env_A0(s
->mem_index
, op2_offset
);
3803 if (sse_fn_epp
== SSE_SPECIAL
) {
3807 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3808 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
3809 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3812 s
->cc_op
= CC_OP_EFLAGS
;
3814 case 0x338: /* crc32 */
3817 modrm
= cpu_ldub_code(env
, s
->pc
++);
3818 reg
= ((modrm
>> 3) & 7) | rex_r
;
3820 if (b
!= 0xf0 && b
!= 0xf1)
3822 if (!(s
->cpuid_ext_features
& CPUID_EXT_SSE42
))
3827 else if (b
== 0xf1 && s
->dflag
!= 2)
3828 if (s
->prefix
& PREFIX_DATA
)
3835 gen_op_mov_TN_reg(OT_LONG
, 0, reg
);
3836 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
3837 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3838 gen_helper_crc32(cpu_T
[0], cpu_tmp2_i32
,
3839 cpu_T
[0], tcg_const_i32(8 << ot
));
3841 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
3842 gen_op_mov_reg_T0(ot
, reg
);
3847 modrm
= cpu_ldub_code(env
, s
->pc
++);
3849 reg
= ((modrm
>> 3) & 7) | rex_r
;
3850 mod
= (modrm
>> 6) & 3;
3855 sse_fn_eppi
= sse_op_table7
[b
].op
[b1
];
3859 if (!(s
->cpuid_ext_features
& sse_op_table7
[b
].ext_mask
))
3862 if (sse_fn_eppi
== SSE_SPECIAL
) {
3863 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
3864 rm
= (modrm
& 7) | REX_B(s
);
3866 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3867 reg
= ((modrm
>> 3) & 7) | rex_r
;
3868 val
= cpu_ldub_code(env
, s
->pc
++);
3870 case 0x14: /* pextrb */
3871 tcg_gen_ld8u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,
3872 xmm_regs
[reg
].XMM_B(val
& 15)));
3874 gen_op_mov_reg_T0(ot
, rm
);
3876 tcg_gen_qemu_st8(cpu_T
[0], cpu_A0
,
3877 (s
->mem_index
>> 2) - 1);
3879 case 0x15: /* pextrw */
3880 tcg_gen_ld16u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,
3881 xmm_regs
[reg
].XMM_W(val
& 7)));
3883 gen_op_mov_reg_T0(ot
, rm
);
3885 tcg_gen_qemu_st16(cpu_T
[0], cpu_A0
,
3886 (s
->mem_index
>> 2) - 1);
3889 if (ot
== OT_LONG
) { /* pextrd */
3890 tcg_gen_ld_i32(cpu_tmp2_i32
, cpu_env
,
3891 offsetof(CPUX86State
,
3892 xmm_regs
[reg
].XMM_L(val
& 3)));
3893 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
3895 gen_op_mov_reg_v(ot
, rm
, cpu_T
[0]);
3897 tcg_gen_qemu_st32(cpu_T
[0], cpu_A0
,
3898 (s
->mem_index
>> 2) - 1);
3899 } else { /* pextrq */
3900 #ifdef TARGET_X86_64
3901 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
,
3902 offsetof(CPUX86State
,
3903 xmm_regs
[reg
].XMM_Q(val
& 1)));
3905 gen_op_mov_reg_v(ot
, rm
, cpu_tmp1_i64
);
3907 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
,
3908 (s
->mem_index
>> 2) - 1);
3914 case 0x17: /* extractps */
3915 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,
3916 xmm_regs
[reg
].XMM_L(val
& 3)));
3918 gen_op_mov_reg_T0(ot
, rm
);
3920 tcg_gen_qemu_st32(cpu_T
[0], cpu_A0
,
3921 (s
->mem_index
>> 2) - 1);
3923 case 0x20: /* pinsrb */
3925 gen_op_mov_TN_reg(OT_LONG
, 0, rm
);
3927 tcg_gen_qemu_ld8u(cpu_tmp0
, cpu_A0
,
3928 (s
->mem_index
>> 2) - 1);
3929 tcg_gen_st8_tl(cpu_tmp0
, cpu_env
, offsetof(CPUX86State
,
3930 xmm_regs
[reg
].XMM_B(val
& 15)));
3932 case 0x21: /* insertps */
3934 tcg_gen_ld_i32(cpu_tmp2_i32
, cpu_env
,
3935 offsetof(CPUX86State
,xmm_regs
[rm
]
3936 .XMM_L((val
>> 6) & 3)));
3938 tcg_gen_qemu_ld32u(cpu_tmp0
, cpu_A0
,
3939 (s
->mem_index
>> 2) - 1);
3940 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_tmp0
);
3942 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
,
3943 offsetof(CPUX86State
,xmm_regs
[reg
]
3944 .XMM_L((val
>> 4) & 3)));
3946 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3947 cpu_env
, offsetof(CPUX86State
,
3948 xmm_regs
[reg
].XMM_L(0)));
3950 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3951 cpu_env
, offsetof(CPUX86State
,
3952 xmm_regs
[reg
].XMM_L(1)));
3954 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3955 cpu_env
, offsetof(CPUX86State
,
3956 xmm_regs
[reg
].XMM_L(2)));
3958 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3959 cpu_env
, offsetof(CPUX86State
,
3960 xmm_regs
[reg
].XMM_L(3)));
3963 if (ot
== OT_LONG
) { /* pinsrd */
3965 gen_op_mov_v_reg(ot
, cpu_tmp0
, rm
);
3967 tcg_gen_qemu_ld32u(cpu_tmp0
, cpu_A0
,
3968 (s
->mem_index
>> 2) - 1);
3969 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_tmp0
);
3970 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
,
3971 offsetof(CPUX86State
,
3972 xmm_regs
[reg
].XMM_L(val
& 3)));
3973 } else { /* pinsrq */
3974 #ifdef TARGET_X86_64
3976 gen_op_mov_v_reg(ot
, cpu_tmp1_i64
, rm
);
3978 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
,
3979 (s
->mem_index
>> 2) - 1);
3980 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
,
3981 offsetof(CPUX86State
,
3982 xmm_regs
[reg
].XMM_Q(val
& 1)));
3993 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3995 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
| REX_B(s
)]);
3997 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3998 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3999 gen_ldo_env_A0(s
->mem_index
, op2_offset
);
4002 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
4004 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
4006 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
4007 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4008 gen_ldq_env_A0(s
->mem_index
, op2_offset
);
4011 val
= cpu_ldub_code(env
, s
->pc
++);
4013 if ((b
& 0xfc) == 0x60) { /* pcmpXstrX */
4014 s
->cc_op
= CC_OP_EFLAGS
;
4017 /* The helper must use entire 64-bit gp registers */
4021 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4022 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4023 sse_fn_eppi(cpu_env
, cpu_ptr0
, cpu_ptr1
, tcg_const_i32(val
));
4029 /* generic MMX or SSE operation */
4031 case 0x70: /* pshufx insn */
4032 case 0xc6: /* pshufx insn */
4033 case 0xc2: /* compare insns */
4040 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
4042 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4043 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
4044 if (b1
>= 2 && ((b
>= 0x50 && b
<= 0x5f && b
!= 0x5b) ||
4046 /* specific case for SSE single instructions */
4049 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
4050 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_t0
.XMM_L(0)));
4053 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_t0
.XMM_D(0)));
4056 gen_ldo_env_A0(s
->mem_index
, op2_offset
);
4059 rm
= (modrm
& 7) | REX_B(s
);
4060 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
4063 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
4065 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4066 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
4067 gen_ldq_env_A0(s
->mem_index
, op2_offset
);
4070 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
4074 case 0x0f: /* 3DNow! data insns */
4075 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_3DNOW
))
4077 val
= cpu_ldub_code(env
, s
->pc
++);
4078 sse_fn_epp
= sse_op_table5
[val
];
4082 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4083 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4084 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
4086 case 0x70: /* pshufx insn */
4087 case 0xc6: /* pshufx insn */
4088 val
= cpu_ldub_code(env
, s
->pc
++);
4089 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4090 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4091 /* XXX: introduce a new table? */
4092 sse_fn_ppi
= (SSEFunc_0_ppi
)sse_fn_epp
;
4093 sse_fn_ppi(cpu_ptr0
, cpu_ptr1
, tcg_const_i32(val
));
4097 val
= cpu_ldub_code(env
, s
->pc
++);
4100 sse_fn_epp
= sse_op_table4
[val
][b1
];
4102 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4103 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4104 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
4107 /* maskmov : we must prepare A0 */
4110 #ifdef TARGET_X86_64
4111 if (s
->aflag
== 2) {
4112 gen_op_movq_A0_reg(R_EDI
);
4116 gen_op_movl_A0_reg(R_EDI
);
4118 gen_op_andl_A0_ffff();
4120 gen_add_A0_ds_seg(s
);
4122 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4123 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4124 /* XXX: introduce a new table? */
4125 sse_fn_eppt
= (SSEFunc_0_eppt
)sse_fn_epp
;
4126 sse_fn_eppt(cpu_env
, cpu_ptr0
, cpu_ptr1
, cpu_A0
);
4129 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4130 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4131 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
4134 if (b
== 0x2e || b
== 0x2f) {
4135 s
->cc_op
= CC_OP_EFLAGS
;
4140 /* convert one instruction. s->is_jmp is set if the translation must
4141 be stopped. Return the next pc value */
4142 static target_ulong
disas_insn(CPUX86State
*env
, DisasContext
*s
,
4143 target_ulong pc_start
)
4145 int b
, prefixes
, aflag
, dflag
;
4147 int modrm
, reg
, rm
, mod
, reg_addr
, op
, opreg
, offset_addr
, val
;
4148 target_ulong next_eip
, tval
;
4151 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
4152 tcg_gen_debug_insn_start(pc_start
);
4161 #ifdef TARGET_X86_64
4166 s
->rip_offset
= 0; /* for relative ip address */
4168 b
= cpu_ldub_code(env
, s
->pc
);
4170 /* check prefixes */
4171 #ifdef TARGET_X86_64
4175 prefixes
|= PREFIX_REPZ
;
4178 prefixes
|= PREFIX_REPNZ
;
4181 prefixes
|= PREFIX_LOCK
;
4202 prefixes
|= PREFIX_DATA
;
4205 prefixes
|= PREFIX_ADR
;
4209 rex_w
= (b
>> 3) & 1;
4210 rex_r
= (b
& 0x4) << 1;
4211 s
->rex_x
= (b
& 0x2) << 2;
4212 REX_B(s
) = (b
& 0x1) << 3;
4213 x86_64_hregs
= 1; /* select uniform byte register addressing */
4217 /* 0x66 is ignored if rex.w is set */
4220 if (prefixes
& PREFIX_DATA
)
4223 if (!(prefixes
& PREFIX_ADR
))
4230 prefixes
|= PREFIX_REPZ
;
4233 prefixes
|= PREFIX_REPNZ
;
4236 prefixes
|= PREFIX_LOCK
;
4257 prefixes
|= PREFIX_DATA
;
4260 prefixes
|= PREFIX_ADR
;
4263 if (prefixes
& PREFIX_DATA
)
4265 if (prefixes
& PREFIX_ADR
)
4269 s
->prefix
= prefixes
;
4273 /* lock generation */
4274 if (prefixes
& PREFIX_LOCK
)
4277 /* now check op code */
4281 /**************************/
4282 /* extended op code */
4283 b
= cpu_ldub_code(env
, s
->pc
++) | 0x100;
4286 /**************************/
4304 ot
= dflag
+ OT_WORD
;
4307 case 0: /* OP Ev, Gv */
4308 modrm
= cpu_ldub_code(env
, s
->pc
++);
4309 reg
= ((modrm
>> 3) & 7) | rex_r
;
4310 mod
= (modrm
>> 6) & 3;
4311 rm
= (modrm
& 7) | REX_B(s
);
4313 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4315 } else if (op
== OP_XORL
&& rm
== reg
) {
4317 /* xor reg, reg optimisation */
4319 s
->cc_op
= CC_OP_LOGICB
+ ot
;
4320 gen_op_mov_reg_T0(ot
, reg
);
4321 gen_op_update1_cc();
4326 gen_op_mov_TN_reg(ot
, 1, reg
);
4327 gen_op(s
, op
, ot
, opreg
);
4329 case 1: /* OP Gv, Ev */
4330 modrm
= cpu_ldub_code(env
, s
->pc
++);
4331 mod
= (modrm
>> 6) & 3;
4332 reg
= ((modrm
>> 3) & 7) | rex_r
;
4333 rm
= (modrm
& 7) | REX_B(s
);
4335 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4336 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
4337 } else if (op
== OP_XORL
&& rm
== reg
) {
4340 gen_op_mov_TN_reg(ot
, 1, rm
);
4342 gen_op(s
, op
, ot
, reg
);
4344 case 2: /* OP A, Iv */
4345 val
= insn_get(env
, s
, ot
);
4346 gen_op_movl_T1_im(val
);
4347 gen_op(s
, op
, ot
, OR_EAX
);
4356 case 0x80: /* GRP1 */
4365 ot
= dflag
+ OT_WORD
;
4367 modrm
= cpu_ldub_code(env
, s
->pc
++);
4368 mod
= (modrm
>> 6) & 3;
4369 rm
= (modrm
& 7) | REX_B(s
);
4370 op
= (modrm
>> 3) & 7;
4376 s
->rip_offset
= insn_const_size(ot
);
4377 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4388 val
= insn_get(env
, s
, ot
);
4391 val
= (int8_t)insn_get(env
, s
, OT_BYTE
);
4394 gen_op_movl_T1_im(val
);
4395 gen_op(s
, op
, ot
, opreg
);
4399 /**************************/
4400 /* inc, dec, and other misc arith */
4401 case 0x40 ... 0x47: /* inc Gv */
4402 ot
= dflag
? OT_LONG
: OT_WORD
;
4403 gen_inc(s
, ot
, OR_EAX
+ (b
& 7), 1);
4405 case 0x48 ... 0x4f: /* dec Gv */
4406 ot
= dflag
? OT_LONG
: OT_WORD
;
4407 gen_inc(s
, ot
, OR_EAX
+ (b
& 7), -1);
4409 case 0xf6: /* GRP3 */
4414 ot
= dflag
+ OT_WORD
;
4416 modrm
= cpu_ldub_code(env
, s
->pc
++);
4417 mod
= (modrm
>> 6) & 3;
4418 rm
= (modrm
& 7) | REX_B(s
);
4419 op
= (modrm
>> 3) & 7;
4422 s
->rip_offset
= insn_const_size(ot
);
4423 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4424 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
4426 gen_op_mov_TN_reg(ot
, 0, rm
);
4431 val
= insn_get(env
, s
, ot
);
4432 gen_op_movl_T1_im(val
);
4433 gen_op_testl_T0_T1_cc();
4434 s
->cc_op
= CC_OP_LOGICB
+ ot
;
4437 tcg_gen_not_tl(cpu_T
[0], cpu_T
[0]);
4439 gen_op_st_T0_A0(ot
+ s
->mem_index
);
4441 gen_op_mov_reg_T0(ot
, rm
);
4445 tcg_gen_neg_tl(cpu_T
[0], cpu_T
[0]);
4447 gen_op_st_T0_A0(ot
+ s
->mem_index
);
4449 gen_op_mov_reg_T0(ot
, rm
);
4451 gen_op_update_neg_cc();
4452 s
->cc_op
= CC_OP_SUBB
+ ot
;
4457 gen_op_mov_TN_reg(OT_BYTE
, 1, R_EAX
);
4458 tcg_gen_ext8u_tl(cpu_T
[0], cpu_T
[0]);
4459 tcg_gen_ext8u_tl(cpu_T
[1], cpu_T
[1]);
4460 /* XXX: use 32 bit mul which could be faster */
4461 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4462 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
4463 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4464 tcg_gen_andi_tl(cpu_cc_src
, cpu_T
[0], 0xff00);
4465 s
->cc_op
= CC_OP_MULB
;
4468 gen_op_mov_TN_reg(OT_WORD
, 1, R_EAX
);
4469 tcg_gen_ext16u_tl(cpu_T
[0], cpu_T
[0]);
4470 tcg_gen_ext16u_tl(cpu_T
[1], cpu_T
[1]);
4471 /* XXX: use 32 bit mul which could be faster */
4472 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4473 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
4474 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4475 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 16);
4476 gen_op_mov_reg_T0(OT_WORD
, R_EDX
);
4477 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[0]);
4478 s
->cc_op
= CC_OP_MULW
;
4482 #ifdef TARGET_X86_64
4483 gen_op_mov_TN_reg(OT_LONG
, 1, R_EAX
);
4484 tcg_gen_ext32u_tl(cpu_T
[0], cpu_T
[0]);
4485 tcg_gen_ext32u_tl(cpu_T
[1], cpu_T
[1]);
4486 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4487 gen_op_mov_reg_T0(OT_LONG
, R_EAX
);
4488 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4489 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 32);
4490 gen_op_mov_reg_T0(OT_LONG
, R_EDX
);
4491 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[0]);
4495 t0
= tcg_temp_new_i64();
4496 t1
= tcg_temp_new_i64();
4497 gen_op_mov_TN_reg(OT_LONG
, 1, R_EAX
);
4498 tcg_gen_extu_i32_i64(t0
, cpu_T
[0]);
4499 tcg_gen_extu_i32_i64(t1
, cpu_T
[1]);
4500 tcg_gen_mul_i64(t0
, t0
, t1
);
4501 tcg_gen_trunc_i64_i32(cpu_T
[0], t0
);
4502 gen_op_mov_reg_T0(OT_LONG
, R_EAX
);
4503 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4504 tcg_gen_shri_i64(t0
, t0
, 32);
4505 tcg_gen_trunc_i64_i32(cpu_T
[0], t0
);
4506 gen_op_mov_reg_T0(OT_LONG
, R_EDX
);
4507 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[0]);
4510 s
->cc_op
= CC_OP_MULL
;
4512 #ifdef TARGET_X86_64
4514 gen_helper_mulq_EAX_T0(cpu_env
, cpu_T
[0]);
4515 s
->cc_op
= CC_OP_MULQ
;
4523 gen_op_mov_TN_reg(OT_BYTE
, 1, R_EAX
);
4524 tcg_gen_ext8s_tl(cpu_T
[0], cpu_T
[0]);
4525 tcg_gen_ext8s_tl(cpu_T
[1], cpu_T
[1]);
4526 /* XXX: use 32 bit mul which could be faster */
4527 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4528 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
4529 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4530 tcg_gen_ext8s_tl(cpu_tmp0
, cpu_T
[0]);
4531 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
4532 s
->cc_op
= CC_OP_MULB
;
4535 gen_op_mov_TN_reg(OT_WORD
, 1, R_EAX
);
4536 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
4537 tcg_gen_ext16s_tl(cpu_T
[1], cpu_T
[1]);
4538 /* XXX: use 32 bit mul which could be faster */
4539 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4540 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
4541 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4542 tcg_gen_ext16s_tl(cpu_tmp0
, cpu_T
[0]);
4543 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
4544 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 16);
4545 gen_op_mov_reg_T0(OT_WORD
, R_EDX
);
4546 s
->cc_op
= CC_OP_MULW
;
4550 #ifdef TARGET_X86_64
4551 gen_op_mov_TN_reg(OT_LONG
, 1, R_EAX
);
4552 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
4553 tcg_gen_ext32s_tl(cpu_T
[1], cpu_T
[1]);
4554 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4555 gen_op_mov_reg_T0(OT_LONG
, R_EAX
);
4556 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4557 tcg_gen_ext32s_tl(cpu_tmp0
, cpu_T
[0]);
4558 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
4559 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 32);
4560 gen_op_mov_reg_T0(OT_LONG
, R_EDX
);
4564 t0
= tcg_temp_new_i64();
4565 t1
= tcg_temp_new_i64();
4566 gen_op_mov_TN_reg(OT_LONG
, 1, R_EAX
);
4567 tcg_gen_ext_i32_i64(t0
, cpu_T
[0]);
4568 tcg_gen_ext_i32_i64(t1
, cpu_T
[1]);
4569 tcg_gen_mul_i64(t0
, t0
, t1
);
4570 tcg_gen_trunc_i64_i32(cpu_T
[0], t0
);
4571 gen_op_mov_reg_T0(OT_LONG
, R_EAX
);
4572 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4573 tcg_gen_sari_tl(cpu_tmp0
, cpu_T
[0], 31);
4574 tcg_gen_shri_i64(t0
, t0
, 32);
4575 tcg_gen_trunc_i64_i32(cpu_T
[0], t0
);
4576 gen_op_mov_reg_T0(OT_LONG
, R_EDX
);
4577 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
4580 s
->cc_op
= CC_OP_MULL
;
4582 #ifdef TARGET_X86_64
4584 gen_helper_imulq_EAX_T0(cpu_env
, cpu_T
[0]);
4585 s
->cc_op
= CC_OP_MULQ
;
4593 gen_jmp_im(pc_start
- s
->cs_base
);
4594 gen_helper_divb_AL(cpu_env
, cpu_T
[0]);
4597 gen_jmp_im(pc_start
- s
->cs_base
);
4598 gen_helper_divw_AX(cpu_env
, cpu_T
[0]);
4602 gen_jmp_im(pc_start
- s
->cs_base
);
4603 gen_helper_divl_EAX(cpu_env
, cpu_T
[0]);
4605 #ifdef TARGET_X86_64
4607 gen_jmp_im(pc_start
- s
->cs_base
);
4608 gen_helper_divq_EAX(cpu_env
, cpu_T
[0]);
4616 gen_jmp_im(pc_start
- s
->cs_base
);
4617 gen_helper_idivb_AL(cpu_env
, cpu_T
[0]);
4620 gen_jmp_im(pc_start
- s
->cs_base
);
4621 gen_helper_idivw_AX(cpu_env
, cpu_T
[0]);
4625 gen_jmp_im(pc_start
- s
->cs_base
);
4626 gen_helper_idivl_EAX(cpu_env
, cpu_T
[0]);
4628 #ifdef TARGET_X86_64
4630 gen_jmp_im(pc_start
- s
->cs_base
);
4631 gen_helper_idivq_EAX(cpu_env
, cpu_T
[0]);
4641 case 0xfe: /* GRP4 */
4642 case 0xff: /* GRP5 */
4646 ot
= dflag
+ OT_WORD
;
4648 modrm
= cpu_ldub_code(env
, s
->pc
++);
4649 mod
= (modrm
>> 6) & 3;
4650 rm
= (modrm
& 7) | REX_B(s
);
4651 op
= (modrm
>> 3) & 7;
4652 if (op
>= 2 && b
== 0xfe) {
4656 if (op
== 2 || op
== 4) {
4657 /* operand size for jumps is 64 bit */
4659 } else if (op
== 3 || op
== 5) {
4660 ot
= dflag
? OT_LONG
+ (rex_w
== 1) : OT_WORD
;
4661 } else if (op
== 6) {
4662 /* default push size is 64 bit */
4663 ot
= dflag
? OT_QUAD
: OT_WORD
;
4667 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4668 if (op
>= 2 && op
!= 3 && op
!= 5)
4669 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
4671 gen_op_mov_TN_reg(ot
, 0, rm
);
4675 case 0: /* inc Ev */
4680 gen_inc(s
, ot
, opreg
, 1);
4682 case 1: /* dec Ev */
4687 gen_inc(s
, ot
, opreg
, -1);
4689 case 2: /* call Ev */
4690 /* XXX: optimize if memory (no 'and' is necessary) */
4692 gen_op_andl_T0_ffff();
4693 next_eip
= s
->pc
- s
->cs_base
;
4694 gen_movtl_T1_im(next_eip
);
4699 case 3: /* lcall Ev */
4700 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
4701 gen_add_A0_im(s
, 1 << (ot
- OT_WORD
+ 1));
4702 gen_op_ldu_T0_A0(OT_WORD
+ s
->mem_index
);
4704 if (s
->pe
&& !s
->vm86
) {
4705 if (s
->cc_op
!= CC_OP_DYNAMIC
)
4706 gen_op_set_cc_op(s
->cc_op
);
4707 gen_jmp_im(pc_start
- s
->cs_base
);
4708 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
4709 gen_helper_lcall_protected(cpu_env
, cpu_tmp2_i32
, cpu_T
[1],
4710 tcg_const_i32(dflag
),
4711 tcg_const_i32(s
->pc
- pc_start
));
4713 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
4714 gen_helper_lcall_real(cpu_env
, cpu_tmp2_i32
, cpu_T
[1],
4715 tcg_const_i32(dflag
),
4716 tcg_const_i32(s
->pc
- s
->cs_base
));
4720 case 4: /* jmp Ev */
4722 gen_op_andl_T0_ffff();
4726 case 5: /* ljmp Ev */
4727 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
4728 gen_add_A0_im(s
, 1 << (ot
- OT_WORD
+ 1));
4729 gen_op_ldu_T0_A0(OT_WORD
+ s
->mem_index
);
4731 if (s
->pe
&& !s
->vm86
) {
4732 if (s
->cc_op
!= CC_OP_DYNAMIC
)
4733 gen_op_set_cc_op(s
->cc_op
);
4734 gen_jmp_im(pc_start
- s
->cs_base
);
4735 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
4736 gen_helper_ljmp_protected(cpu_env
, cpu_tmp2_i32
, cpu_T
[1],
4737 tcg_const_i32(s
->pc
- pc_start
));
4739 gen_op_movl_seg_T0_vm(R_CS
);
4740 gen_op_movl_T0_T1();
4745 case 6: /* push Ev */
4753 case 0x84: /* test Ev, Gv */
4758 ot
= dflag
+ OT_WORD
;
4760 modrm
= cpu_ldub_code(env
, s
->pc
++);
4761 reg
= ((modrm
>> 3) & 7) | rex_r
;
4763 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4764 gen_op_mov_TN_reg(ot
, 1, reg
);
4765 gen_op_testl_T0_T1_cc();
4766 s
->cc_op
= CC_OP_LOGICB
+ ot
;
4769 case 0xa8: /* test eAX, Iv */
4774 ot
= dflag
+ OT_WORD
;
4775 val
= insn_get(env
, s
, ot
);
4777 gen_op_mov_TN_reg(ot
, 0, OR_EAX
);
4778 gen_op_movl_T1_im(val
);
4779 gen_op_testl_T0_T1_cc();
4780 s
->cc_op
= CC_OP_LOGICB
+ ot
;
4783 case 0x98: /* CWDE/CBW */
4784 #ifdef TARGET_X86_64
4786 gen_op_mov_TN_reg(OT_LONG
, 0, R_EAX
);
4787 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
4788 gen_op_mov_reg_T0(OT_QUAD
, R_EAX
);
4792 gen_op_mov_TN_reg(OT_WORD
, 0, R_EAX
);
4793 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
4794 gen_op_mov_reg_T0(OT_LONG
, R_EAX
);
4796 gen_op_mov_TN_reg(OT_BYTE
, 0, R_EAX
);
4797 tcg_gen_ext8s_tl(cpu_T
[0], cpu_T
[0]);
4798 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
4801 case 0x99: /* CDQ/CWD */
4802 #ifdef TARGET_X86_64
4804 gen_op_mov_TN_reg(OT_QUAD
, 0, R_EAX
);
4805 tcg_gen_sari_tl(cpu_T
[0], cpu_T
[0], 63);
4806 gen_op_mov_reg_T0(OT_QUAD
, R_EDX
);
4810 gen_op_mov_TN_reg(OT_LONG
, 0, R_EAX
);
4811 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
4812 tcg_gen_sari_tl(cpu_T
[0], cpu_T
[0], 31);
4813 gen_op_mov_reg_T0(OT_LONG
, R_EDX
);
4815 gen_op_mov_TN_reg(OT_WORD
, 0, R_EAX
);
4816 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
4817 tcg_gen_sari_tl(cpu_T
[0], cpu_T
[0], 15);
4818 gen_op_mov_reg_T0(OT_WORD
, R_EDX
);
4821 case 0x1af: /* imul Gv, Ev */
4822 case 0x69: /* imul Gv, Ev, I */
4824 ot
= dflag
+ OT_WORD
;
4825 modrm
= cpu_ldub_code(env
, s
->pc
++);
4826 reg
= ((modrm
>> 3) & 7) | rex_r
;
4828 s
->rip_offset
= insn_const_size(ot
);
4831 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4833 val
= insn_get(env
, s
, ot
);
4834 gen_op_movl_T1_im(val
);
4835 } else if (b
== 0x6b) {
4836 val
= (int8_t)insn_get(env
, s
, OT_BYTE
);
4837 gen_op_movl_T1_im(val
);
4839 gen_op_mov_TN_reg(ot
, 1, reg
);
4842 #ifdef TARGET_X86_64
4843 if (ot
== OT_QUAD
) {
4844 gen_helper_imulq_T0_T1(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
4847 if (ot
== OT_LONG
) {
4848 #ifdef TARGET_X86_64
4849 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
4850 tcg_gen_ext32s_tl(cpu_T
[1], cpu_T
[1]);
4851 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4852 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4853 tcg_gen_ext32s_tl(cpu_tmp0
, cpu_T
[0]);
4854 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
4858 t0
= tcg_temp_new_i64();
4859 t1
= tcg_temp_new_i64();
4860 tcg_gen_ext_i32_i64(t0
, cpu_T
[0]);
4861 tcg_gen_ext_i32_i64(t1
, cpu_T
[1]);
4862 tcg_gen_mul_i64(t0
, t0
, t1
);
4863 tcg_gen_trunc_i64_i32(cpu_T
[0], t0
);
4864 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4865 tcg_gen_sari_tl(cpu_tmp0
, cpu_T
[0], 31);
4866 tcg_gen_shri_i64(t0
, t0
, 32);
4867 tcg_gen_trunc_i64_i32(cpu_T
[1], t0
);
4868 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[1], cpu_tmp0
);
4872 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
4873 tcg_gen_ext16s_tl(cpu_T
[1], cpu_T
[1]);
4874 /* XXX: use 32 bit mul which could be faster */
4875 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4876 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4877 tcg_gen_ext16s_tl(cpu_tmp0
, cpu_T
[0]);
4878 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
4880 gen_op_mov_reg_T0(ot
, reg
);
4881 s
->cc_op
= CC_OP_MULB
+ ot
;
4884 case 0x1c1: /* xadd Ev, Gv */
4888 ot
= dflag
+ OT_WORD
;
4889 modrm
= cpu_ldub_code(env
, s
->pc
++);
4890 reg
= ((modrm
>> 3) & 7) | rex_r
;
4891 mod
= (modrm
>> 6) & 3;
4893 rm
= (modrm
& 7) | REX_B(s
);
4894 gen_op_mov_TN_reg(ot
, 0, reg
);
4895 gen_op_mov_TN_reg(ot
, 1, rm
);
4896 gen_op_addl_T0_T1();
4897 gen_op_mov_reg_T1(ot
, reg
);
4898 gen_op_mov_reg_T0(ot
, rm
);
4900 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4901 gen_op_mov_TN_reg(ot
, 0, reg
);
4902 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
4903 gen_op_addl_T0_T1();
4904 gen_op_st_T0_A0(ot
+ s
->mem_index
);
4905 gen_op_mov_reg_T1(ot
, reg
);
4907 gen_op_update2_cc();
4908 s
->cc_op
= CC_OP_ADDB
+ ot
;
4911 case 0x1b1: /* cmpxchg Ev, Gv */
4914 TCGv t0
, t1
, t2
, a0
;
4919 ot
= dflag
+ OT_WORD
;
4920 modrm
= cpu_ldub_code(env
, s
->pc
++);
4921 reg
= ((modrm
>> 3) & 7) | rex_r
;
4922 mod
= (modrm
>> 6) & 3;
4923 t0
= tcg_temp_local_new();
4924 t1
= tcg_temp_local_new();
4925 t2
= tcg_temp_local_new();
4926 a0
= tcg_temp_local_new();
4927 gen_op_mov_v_reg(ot
, t1
, reg
);
4929 rm
= (modrm
& 7) | REX_B(s
);
4930 gen_op_mov_v_reg(ot
, t0
, rm
);
4932 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4933 tcg_gen_mov_tl(a0
, cpu_A0
);
4934 gen_op_ld_v(ot
+ s
->mem_index
, t0
, a0
);
4935 rm
= 0; /* avoid warning */
4937 label1
= gen_new_label();
4938 tcg_gen_sub_tl(t2
, cpu_regs
[R_EAX
], t0
);
4940 tcg_gen_brcondi_tl(TCG_COND_EQ
, t2
, 0, label1
);
4941 label2
= gen_new_label();
4943 gen_op_mov_reg_v(ot
, R_EAX
, t0
);
4945 gen_set_label(label1
);
4946 gen_op_mov_reg_v(ot
, rm
, t1
);
4948 /* perform no-op store cycle like physical cpu; must be
4949 before changing accumulator to ensure idempotency if
4950 the store faults and the instruction is restarted */
4951 gen_op_st_v(ot
+ s
->mem_index
, t0
, a0
);
4952 gen_op_mov_reg_v(ot
, R_EAX
, t0
);
4954 gen_set_label(label1
);
4955 gen_op_st_v(ot
+ s
->mem_index
, t1
, a0
);
4957 gen_set_label(label2
);
4958 tcg_gen_mov_tl(cpu_cc_src
, t0
);
4959 tcg_gen_mov_tl(cpu_cc_dst
, t2
);
4960 s
->cc_op
= CC_OP_SUBB
+ ot
;
4967 case 0x1c7: /* cmpxchg8b */
4968 modrm
= cpu_ldub_code(env
, s
->pc
++);
4969 mod
= (modrm
>> 6) & 3;
4970 if ((mod
== 3) || ((modrm
& 0x38) != 0x8))
4972 #ifdef TARGET_X86_64
4974 if (!(s
->cpuid_ext_features
& CPUID_EXT_CX16
))
4976 gen_jmp_im(pc_start
- s
->cs_base
);
4977 if (s
->cc_op
!= CC_OP_DYNAMIC
)
4978 gen_op_set_cc_op(s
->cc_op
);
4979 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4980 gen_helper_cmpxchg16b(cpu_env
, cpu_A0
);
4984 if (!(s
->cpuid_features
& CPUID_CX8
))
4986 gen_jmp_im(pc_start
- s
->cs_base
);
4987 if (s
->cc_op
!= CC_OP_DYNAMIC
)
4988 gen_op_set_cc_op(s
->cc_op
);
4989 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4990 gen_helper_cmpxchg8b(cpu_env
, cpu_A0
);
4992 s
->cc_op
= CC_OP_EFLAGS
;
4995 /**************************/
4997 case 0x50 ... 0x57: /* push */
4998 gen_op_mov_TN_reg(OT_LONG
, 0, (b
& 7) | REX_B(s
));
5001 case 0x58 ... 0x5f: /* pop */
5003 ot
= dflag
? OT_QUAD
: OT_WORD
;
5005 ot
= dflag
+ OT_WORD
;
5008 /* NOTE: order is important for pop %sp */
5010 gen_op_mov_reg_T0(ot
, (b
& 7) | REX_B(s
));
5012 case 0x60: /* pusha */
5017 case 0x61: /* popa */
5022 case 0x68: /* push Iv */
5025 ot
= dflag
? OT_QUAD
: OT_WORD
;
5027 ot
= dflag
+ OT_WORD
;
5030 val
= insn_get(env
, s
, ot
);
5032 val
= (int8_t)insn_get(env
, s
, OT_BYTE
);
5033 gen_op_movl_T0_im(val
);
5036 case 0x8f: /* pop Ev */
5038 ot
= dflag
? OT_QUAD
: OT_WORD
;
5040 ot
= dflag
+ OT_WORD
;
5042 modrm
= cpu_ldub_code(env
, s
->pc
++);
5043 mod
= (modrm
>> 6) & 3;
5046 /* NOTE: order is important for pop %sp */
5048 rm
= (modrm
& 7) | REX_B(s
);
5049 gen_op_mov_reg_T0(ot
, rm
);
5051 /* NOTE: order is important too for MMU exceptions */
5052 s
->popl_esp_hack
= 1 << ot
;
5053 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
5054 s
->popl_esp_hack
= 0;
5058 case 0xc8: /* enter */
5061 val
= cpu_lduw_code(env
, s
->pc
);
5063 level
= cpu_ldub_code(env
, s
->pc
++);
5064 gen_enter(s
, val
, level
);
5067 case 0xc9: /* leave */
5068 /* XXX: exception not precise (ESP is updated before potential exception) */
5070 gen_op_mov_TN_reg(OT_QUAD
, 0, R_EBP
);
5071 gen_op_mov_reg_T0(OT_QUAD
, R_ESP
);
5072 } else if (s
->ss32
) {
5073 gen_op_mov_TN_reg(OT_LONG
, 0, R_EBP
);
5074 gen_op_mov_reg_T0(OT_LONG
, R_ESP
);
5076 gen_op_mov_TN_reg(OT_WORD
, 0, R_EBP
);
5077 gen_op_mov_reg_T0(OT_WORD
, R_ESP
);
5081 ot
= dflag
? OT_QUAD
: OT_WORD
;
5083 ot
= dflag
+ OT_WORD
;
5085 gen_op_mov_reg_T0(ot
, R_EBP
);
5088 case 0x06: /* push es */
5089 case 0x0e: /* push cs */
5090 case 0x16: /* push ss */
5091 case 0x1e: /* push ds */
5094 gen_op_movl_T0_seg(b
>> 3);
5097 case 0x1a0: /* push fs */
5098 case 0x1a8: /* push gs */
5099 gen_op_movl_T0_seg((b
>> 3) & 7);
5102 case 0x07: /* pop es */
5103 case 0x17: /* pop ss */
5104 case 0x1f: /* pop ds */
5109 gen_movl_seg_T0(s
, reg
, pc_start
- s
->cs_base
);
5112 /* if reg == SS, inhibit interrupts/trace. */
5113 /* If several instructions disable interrupts, only the
5115 if (!(s
->tb
->flags
& HF_INHIBIT_IRQ_MASK
))
5116 gen_helper_set_inhibit_irq(cpu_env
);
5120 gen_jmp_im(s
->pc
- s
->cs_base
);
5124 case 0x1a1: /* pop fs */
5125 case 0x1a9: /* pop gs */
5127 gen_movl_seg_T0(s
, (b
>> 3) & 7, pc_start
- s
->cs_base
);
5130 gen_jmp_im(s
->pc
- s
->cs_base
);
5135 /**************************/
5138 case 0x89: /* mov Gv, Ev */
5142 ot
= dflag
+ OT_WORD
;
5143 modrm
= cpu_ldub_code(env
, s
->pc
++);
5144 reg
= ((modrm
>> 3) & 7) | rex_r
;
5146 /* generate a generic store */
5147 gen_ldst_modrm(env
, s
, modrm
, ot
, reg
, 1);
5150 case 0xc7: /* mov Ev, Iv */
5154 ot
= dflag
+ OT_WORD
;
5155 modrm
= cpu_ldub_code(env
, s
->pc
++);
5156 mod
= (modrm
>> 6) & 3;
5158 s
->rip_offset
= insn_const_size(ot
);
5159 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5161 val
= insn_get(env
, s
, ot
);
5162 gen_op_movl_T0_im(val
);
5164 gen_op_st_T0_A0(ot
+ s
->mem_index
);
5166 gen_op_mov_reg_T0(ot
, (modrm
& 7) | REX_B(s
));
5169 case 0x8b: /* mov Ev, Gv */
5173 ot
= OT_WORD
+ dflag
;
5174 modrm
= cpu_ldub_code(env
, s
->pc
++);
5175 reg
= ((modrm
>> 3) & 7) | rex_r
;
5177 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
5178 gen_op_mov_reg_T0(ot
, reg
);
5180 case 0x8e: /* mov seg, Gv */
5181 modrm
= cpu_ldub_code(env
, s
->pc
++);
5182 reg
= (modrm
>> 3) & 7;
5183 if (reg
>= 6 || reg
== R_CS
)
5185 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 0);
5186 gen_movl_seg_T0(s
, reg
, pc_start
- s
->cs_base
);
5188 /* if reg == SS, inhibit interrupts/trace */
5189 /* If several instructions disable interrupts, only the
5191 if (!(s
->tb
->flags
& HF_INHIBIT_IRQ_MASK
))
5192 gen_helper_set_inhibit_irq(cpu_env
);
5196 gen_jmp_im(s
->pc
- s
->cs_base
);
5200 case 0x8c: /* mov Gv, seg */
5201 modrm
= cpu_ldub_code(env
, s
->pc
++);
5202 reg
= (modrm
>> 3) & 7;
5203 mod
= (modrm
>> 6) & 3;
5206 gen_op_movl_T0_seg(reg
);
5208 ot
= OT_WORD
+ dflag
;
5211 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
5214 case 0x1b6: /* movzbS Gv, Eb */
5215 case 0x1b7: /* movzwS Gv, Eb */
5216 case 0x1be: /* movsbS Gv, Eb */
5217 case 0x1bf: /* movswS Gv, Eb */
5220 /* d_ot is the size of destination */
5221 d_ot
= dflag
+ OT_WORD
;
5222 /* ot is the size of source */
5223 ot
= (b
& 1) + OT_BYTE
;
5224 modrm
= cpu_ldub_code(env
, s
->pc
++);
5225 reg
= ((modrm
>> 3) & 7) | rex_r
;
5226 mod
= (modrm
>> 6) & 3;
5227 rm
= (modrm
& 7) | REX_B(s
);
5230 gen_op_mov_TN_reg(ot
, 0, rm
);
5231 switch(ot
| (b
& 8)) {
5233 tcg_gen_ext8u_tl(cpu_T
[0], cpu_T
[0]);
5236 tcg_gen_ext8s_tl(cpu_T
[0], cpu_T
[0]);
5239 tcg_gen_ext16u_tl(cpu_T
[0], cpu_T
[0]);
5243 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
5246 gen_op_mov_reg_T0(d_ot
, reg
);
5248 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5250 gen_op_lds_T0_A0(ot
+ s
->mem_index
);
5252 gen_op_ldu_T0_A0(ot
+ s
->mem_index
);
5254 gen_op_mov_reg_T0(d_ot
, reg
);
5259 case 0x8d: /* lea */
5260 ot
= dflag
+ OT_WORD
;
5261 modrm
= cpu_ldub_code(env
, s
->pc
++);
5262 mod
= (modrm
>> 6) & 3;
5265 reg
= ((modrm
>> 3) & 7) | rex_r
;
5266 /* we must ensure that no segment is added */
5270 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5272 gen_op_mov_reg_A0(ot
- OT_WORD
, reg
);
5275 case 0xa0: /* mov EAX, Ov */
5277 case 0xa2: /* mov Ov, EAX */
5280 target_ulong offset_addr
;
5285 ot
= dflag
+ OT_WORD
;
5286 #ifdef TARGET_X86_64
5287 if (s
->aflag
== 2) {
5288 offset_addr
= cpu_ldq_code(env
, s
->pc
);
5290 gen_op_movq_A0_im(offset_addr
);
5295 offset_addr
= insn_get(env
, s
, OT_LONG
);
5297 offset_addr
= insn_get(env
, s
, OT_WORD
);
5299 gen_op_movl_A0_im(offset_addr
);
5301 gen_add_A0_ds_seg(s
);
5303 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
5304 gen_op_mov_reg_T0(ot
, R_EAX
);
5306 gen_op_mov_TN_reg(ot
, 0, R_EAX
);
5307 gen_op_st_T0_A0(ot
+ s
->mem_index
);
5311 case 0xd7: /* xlat */
5312 #ifdef TARGET_X86_64
5313 if (s
->aflag
== 2) {
5314 gen_op_movq_A0_reg(R_EBX
);
5315 gen_op_mov_TN_reg(OT_QUAD
, 0, R_EAX
);
5316 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 0xff);
5317 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_T
[0]);
5321 gen_op_movl_A0_reg(R_EBX
);
5322 gen_op_mov_TN_reg(OT_LONG
, 0, R_EAX
);
5323 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 0xff);
5324 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_T
[0]);
5326 gen_op_andl_A0_ffff();
5328 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffffffff);
5330 gen_add_A0_ds_seg(s
);
5331 gen_op_ldu_T0_A0(OT_BYTE
+ s
->mem_index
);
5332 gen_op_mov_reg_T0(OT_BYTE
, R_EAX
);
5334 case 0xb0 ... 0xb7: /* mov R, Ib */
5335 val
= insn_get(env
, s
, OT_BYTE
);
5336 gen_op_movl_T0_im(val
);
5337 gen_op_mov_reg_T0(OT_BYTE
, (b
& 7) | REX_B(s
));
5339 case 0xb8 ... 0xbf: /* mov R, Iv */
5340 #ifdef TARGET_X86_64
5344 tmp
= cpu_ldq_code(env
, s
->pc
);
5346 reg
= (b
& 7) | REX_B(s
);
5347 gen_movtl_T0_im(tmp
);
5348 gen_op_mov_reg_T0(OT_QUAD
, reg
);
5352 ot
= dflag
? OT_LONG
: OT_WORD
;
5353 val
= insn_get(env
, s
, ot
);
5354 reg
= (b
& 7) | REX_B(s
);
5355 gen_op_movl_T0_im(val
);
5356 gen_op_mov_reg_T0(ot
, reg
);
5360 case 0x91 ... 0x97: /* xchg R, EAX */
5362 ot
= dflag
+ OT_WORD
;
5363 reg
= (b
& 7) | REX_B(s
);
5367 case 0x87: /* xchg Ev, Gv */
5371 ot
= dflag
+ OT_WORD
;
5372 modrm
= cpu_ldub_code(env
, s
->pc
++);
5373 reg
= ((modrm
>> 3) & 7) | rex_r
;
5374 mod
= (modrm
>> 6) & 3;
5376 rm
= (modrm
& 7) | REX_B(s
);
5378 gen_op_mov_TN_reg(ot
, 0, reg
);
5379 gen_op_mov_TN_reg(ot
, 1, rm
);
5380 gen_op_mov_reg_T0(ot
, rm
);
5381 gen_op_mov_reg_T1(ot
, reg
);
5383 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5384 gen_op_mov_TN_reg(ot
, 0, reg
);
5385 /* for xchg, lock is implicit */
5386 if (!(prefixes
& PREFIX_LOCK
))
5388 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
5389 gen_op_st_T0_A0(ot
+ s
->mem_index
);
5390 if (!(prefixes
& PREFIX_LOCK
))
5391 gen_helper_unlock();
5392 gen_op_mov_reg_T1(ot
, reg
);
5395 case 0xc4: /* les Gv */
5400 case 0xc5: /* lds Gv */
5405 case 0x1b2: /* lss Gv */
5408 case 0x1b4: /* lfs Gv */
5411 case 0x1b5: /* lgs Gv */
5414 ot
= dflag
? OT_LONG
: OT_WORD
;
5415 modrm
= cpu_ldub_code(env
, s
->pc
++);
5416 reg
= ((modrm
>> 3) & 7) | rex_r
;
5417 mod
= (modrm
>> 6) & 3;
5420 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5421 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
5422 gen_add_A0_im(s
, 1 << (ot
- OT_WORD
+ 1));
5423 /* load the segment first to handle exceptions properly */
5424 gen_op_ldu_T0_A0(OT_WORD
+ s
->mem_index
);
5425 gen_movl_seg_T0(s
, op
, pc_start
- s
->cs_base
);
5426 /* then put the data */
5427 gen_op_mov_reg_T1(ot
, reg
);
5429 gen_jmp_im(s
->pc
- s
->cs_base
);
5434 /************************/
5445 ot
= dflag
+ OT_WORD
;
5447 modrm
= cpu_ldub_code(env
, s
->pc
++);
5448 mod
= (modrm
>> 6) & 3;
5449 op
= (modrm
>> 3) & 7;
5455 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5458 opreg
= (modrm
& 7) | REX_B(s
);
5463 gen_shift(s
, op
, ot
, opreg
, OR_ECX
);
5466 shift
= cpu_ldub_code(env
, s
->pc
++);
5468 gen_shifti(s
, op
, ot
, opreg
, shift
);
5483 case 0x1a4: /* shld imm */
5487 case 0x1a5: /* shld cl */
5491 case 0x1ac: /* shrd imm */
5495 case 0x1ad: /* shrd cl */
5499 ot
= dflag
+ OT_WORD
;
5500 modrm
= cpu_ldub_code(env
, s
->pc
++);
5501 mod
= (modrm
>> 6) & 3;
5502 rm
= (modrm
& 7) | REX_B(s
);
5503 reg
= ((modrm
>> 3) & 7) | rex_r
;
5505 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5510 gen_op_mov_TN_reg(ot
, 1, reg
);
5513 val
= cpu_ldub_code(env
, s
->pc
++);
5514 tcg_gen_movi_tl(cpu_T3
, val
);
5516 tcg_gen_mov_tl(cpu_T3
, cpu_regs
[R_ECX
]);
5518 gen_shiftd_rm_T1_T3(s
, ot
, opreg
, op
);
5521 /************************/
5524 if (s
->flags
& (HF_EM_MASK
| HF_TS_MASK
)) {
5525 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
5526 /* XXX: what to do if illegal op ? */
5527 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
5530 modrm
= cpu_ldub_code(env
, s
->pc
++);
5531 mod
= (modrm
>> 6) & 3;
5533 op
= ((b
& 7) << 3) | ((modrm
>> 3) & 7);
5536 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5538 case 0x00 ... 0x07: /* fxxxs */
5539 case 0x10 ... 0x17: /* fixxxl */
5540 case 0x20 ... 0x27: /* fxxxl */
5541 case 0x30 ... 0x37: /* fixxx */
5548 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
5549 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5550 gen_helper_flds_FT0(cpu_env
, cpu_tmp2_i32
);
5553 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
5554 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5555 gen_helper_fildl_FT0(cpu_env
, cpu_tmp2_i32
);
5558 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
,
5559 (s
->mem_index
>> 2) - 1);
5560 gen_helper_fldl_FT0(cpu_env
, cpu_tmp1_i64
);
5564 gen_op_lds_T0_A0(OT_WORD
+ s
->mem_index
);
5565 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5566 gen_helper_fildl_FT0(cpu_env
, cpu_tmp2_i32
);
5570 gen_helper_fp_arith_ST0_FT0(op1
);
5572 /* fcomp needs pop */
5573 gen_helper_fpop(cpu_env
);
5577 case 0x08: /* flds */
5578 case 0x0a: /* fsts */
5579 case 0x0b: /* fstps */
5580 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
5581 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
5582 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
5587 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
5588 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5589 gen_helper_flds_ST0(cpu_env
, cpu_tmp2_i32
);
5592 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
5593 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5594 gen_helper_fildl_ST0(cpu_env
, cpu_tmp2_i32
);
5597 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
,
5598 (s
->mem_index
>> 2) - 1);
5599 gen_helper_fldl_ST0(cpu_env
, cpu_tmp1_i64
);
5603 gen_op_lds_T0_A0(OT_WORD
+ s
->mem_index
);
5604 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5605 gen_helper_fildl_ST0(cpu_env
, cpu_tmp2_i32
);
5610 /* XXX: the corresponding CPUID bit must be tested ! */
5613 gen_helper_fisttl_ST0(cpu_tmp2_i32
, cpu_env
);
5614 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5615 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
5618 gen_helper_fisttll_ST0(cpu_tmp1_i64
, cpu_env
);
5619 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
,
5620 (s
->mem_index
>> 2) - 1);
5624 gen_helper_fistt_ST0(cpu_tmp2_i32
, cpu_env
);
5625 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5626 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
5629 gen_helper_fpop(cpu_env
);
5634 gen_helper_fsts_ST0(cpu_tmp2_i32
, cpu_env
);
5635 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5636 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
5639 gen_helper_fistl_ST0(cpu_tmp2_i32
, cpu_env
);
5640 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5641 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
5644 gen_helper_fstl_ST0(cpu_tmp1_i64
, cpu_env
);
5645 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
,
5646 (s
->mem_index
>> 2) - 1);
5650 gen_helper_fist_ST0(cpu_tmp2_i32
, cpu_env
);
5651 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5652 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
5656 gen_helper_fpop(cpu_env
);
5660 case 0x0c: /* fldenv mem */
5661 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5662 gen_op_set_cc_op(s
->cc_op
);
5663 gen_jmp_im(pc_start
- s
->cs_base
);
5664 gen_helper_fldenv(cpu_env
, cpu_A0
, tcg_const_i32(s
->dflag
));
5666 case 0x0d: /* fldcw mem */
5667 gen_op_ld_T0_A0(OT_WORD
+ s
->mem_index
);
5668 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5669 gen_helper_fldcw(cpu_env
, cpu_tmp2_i32
);
5671 case 0x0e: /* fnstenv mem */
5672 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5673 gen_op_set_cc_op(s
->cc_op
);
5674 gen_jmp_im(pc_start
- s
->cs_base
);
5675 gen_helper_fstenv(cpu_env
, cpu_A0
, tcg_const_i32(s
->dflag
));
5677 case 0x0f: /* fnstcw mem */
5678 gen_helper_fnstcw(cpu_tmp2_i32
, cpu_env
);
5679 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5680 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
5682 case 0x1d: /* fldt mem */
5683 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5684 gen_op_set_cc_op(s
->cc_op
);
5685 gen_jmp_im(pc_start
- s
->cs_base
);
5686 gen_helper_fldt_ST0(cpu_env
, cpu_A0
);
5688 case 0x1f: /* fstpt mem */
5689 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5690 gen_op_set_cc_op(s
->cc_op
);
5691 gen_jmp_im(pc_start
- s
->cs_base
);
5692 gen_helper_fstt_ST0(cpu_env
, cpu_A0
);
5693 gen_helper_fpop(cpu_env
);
5695 case 0x2c: /* frstor mem */
5696 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5697 gen_op_set_cc_op(s
->cc_op
);
5698 gen_jmp_im(pc_start
- s
->cs_base
);
5699 gen_helper_frstor(cpu_env
, cpu_A0
, tcg_const_i32(s
->dflag
));
5701 case 0x2e: /* fnsave mem */
5702 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5703 gen_op_set_cc_op(s
->cc_op
);
5704 gen_jmp_im(pc_start
- s
->cs_base
);
5705 gen_helper_fsave(cpu_env
, cpu_A0
, tcg_const_i32(s
->dflag
));
5707 case 0x2f: /* fnstsw mem */
5708 gen_helper_fnstsw(cpu_tmp2_i32
, cpu_env
);
5709 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5710 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
5712 case 0x3c: /* fbld */
5713 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5714 gen_op_set_cc_op(s
->cc_op
);
5715 gen_jmp_im(pc_start
- s
->cs_base
);
5716 gen_helper_fbld_ST0(cpu_env
, cpu_A0
);
5718 case 0x3e: /* fbstp */
5719 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5720 gen_op_set_cc_op(s
->cc_op
);
5721 gen_jmp_im(pc_start
- s
->cs_base
);
5722 gen_helper_fbst_ST0(cpu_env
, cpu_A0
);
5723 gen_helper_fpop(cpu_env
);
5725 case 0x3d: /* fildll */
5726 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
,
5727 (s
->mem_index
>> 2) - 1);
5728 gen_helper_fildll_ST0(cpu_env
, cpu_tmp1_i64
);
5730 case 0x3f: /* fistpll */
5731 gen_helper_fistll_ST0(cpu_tmp1_i64
, cpu_env
);
5732 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
,
5733 (s
->mem_index
>> 2) - 1);
5734 gen_helper_fpop(cpu_env
);
5740 /* register float ops */
5744 case 0x08: /* fld sti */
5745 gen_helper_fpush(cpu_env
);
5746 gen_helper_fmov_ST0_STN(cpu_env
,
5747 tcg_const_i32((opreg
+ 1) & 7));
5749 case 0x09: /* fxchg sti */
5750 case 0x29: /* fxchg4 sti, undocumented op */
5751 case 0x39: /* fxchg7 sti, undocumented op */
5752 gen_helper_fxchg_ST0_STN(cpu_env
, tcg_const_i32(opreg
));
5754 case 0x0a: /* grp d9/2 */
5757 /* check exceptions (FreeBSD FPU probe) */
5758 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5759 gen_op_set_cc_op(s
->cc_op
);
5760 gen_jmp_im(pc_start
- s
->cs_base
);
5761 gen_helper_fwait(cpu_env
);
5767 case 0x0c: /* grp d9/4 */
5770 gen_helper_fchs_ST0(cpu_env
);
5773 gen_helper_fabs_ST0(cpu_env
);
5776 gen_helper_fldz_FT0(cpu_env
);
5777 gen_helper_fcom_ST0_FT0(cpu_env
);
5780 gen_helper_fxam_ST0(cpu_env
);
5786 case 0x0d: /* grp d9/5 */
5790 gen_helper_fpush(cpu_env
);
5791 gen_helper_fld1_ST0(cpu_env
);
5794 gen_helper_fpush(cpu_env
);
5795 gen_helper_fldl2t_ST0(cpu_env
);
5798 gen_helper_fpush(cpu_env
);
5799 gen_helper_fldl2e_ST0(cpu_env
);
5802 gen_helper_fpush(cpu_env
);
5803 gen_helper_fldpi_ST0(cpu_env
);
5806 gen_helper_fpush(cpu_env
);
5807 gen_helper_fldlg2_ST0(cpu_env
);
5810 gen_helper_fpush(cpu_env
);
5811 gen_helper_fldln2_ST0(cpu_env
);
5814 gen_helper_fpush(cpu_env
);
5815 gen_helper_fldz_ST0(cpu_env
);
5822 case 0x0e: /* grp d9/6 */
5825 gen_helper_f2xm1(cpu_env
);
5828 gen_helper_fyl2x(cpu_env
);
5831 gen_helper_fptan(cpu_env
);
5833 case 3: /* fpatan */
5834 gen_helper_fpatan(cpu_env
);
5836 case 4: /* fxtract */
5837 gen_helper_fxtract(cpu_env
);
5839 case 5: /* fprem1 */
5840 gen_helper_fprem1(cpu_env
);
5842 case 6: /* fdecstp */
5843 gen_helper_fdecstp(cpu_env
);
5846 case 7: /* fincstp */
5847 gen_helper_fincstp(cpu_env
);
5851 case 0x0f: /* grp d9/7 */
5854 gen_helper_fprem(cpu_env
);
5856 case 1: /* fyl2xp1 */
5857 gen_helper_fyl2xp1(cpu_env
);
5860 gen_helper_fsqrt(cpu_env
);
5862 case 3: /* fsincos */
5863 gen_helper_fsincos(cpu_env
);
5865 case 5: /* fscale */
5866 gen_helper_fscale(cpu_env
);
5868 case 4: /* frndint */
5869 gen_helper_frndint(cpu_env
);
5872 gen_helper_fsin(cpu_env
);
5876 gen_helper_fcos(cpu_env
);
5880 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
5881 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
5882 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
5888 gen_helper_fp_arith_STN_ST0(op1
, opreg
);
5890 gen_helper_fpop(cpu_env
);
5892 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
5893 gen_helper_fp_arith_ST0_FT0(op1
);
5897 case 0x02: /* fcom */
5898 case 0x22: /* fcom2, undocumented op */
5899 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
5900 gen_helper_fcom_ST0_FT0(cpu_env
);
5902 case 0x03: /* fcomp */
5903 case 0x23: /* fcomp3, undocumented op */
5904 case 0x32: /* fcomp5, undocumented op */
5905 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
5906 gen_helper_fcom_ST0_FT0(cpu_env
);
5907 gen_helper_fpop(cpu_env
);
5909 case 0x15: /* da/5 */
5911 case 1: /* fucompp */
5912 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(1));
5913 gen_helper_fucom_ST0_FT0(cpu_env
);
5914 gen_helper_fpop(cpu_env
);
5915 gen_helper_fpop(cpu_env
);
5923 case 0: /* feni (287 only, just do nop here) */
5925 case 1: /* fdisi (287 only, just do nop here) */
5928 gen_helper_fclex(cpu_env
);
5930 case 3: /* fninit */
5931 gen_helper_fninit(cpu_env
);
5933 case 4: /* fsetpm (287 only, just do nop here) */
5939 case 0x1d: /* fucomi */
5940 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5941 gen_op_set_cc_op(s
->cc_op
);
5942 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
5943 gen_helper_fucomi_ST0_FT0(cpu_env
);
5944 s
->cc_op
= CC_OP_EFLAGS
;
5946 case 0x1e: /* fcomi */
5947 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5948 gen_op_set_cc_op(s
->cc_op
);
5949 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
5950 gen_helper_fcomi_ST0_FT0(cpu_env
);
5951 s
->cc_op
= CC_OP_EFLAGS
;
5953 case 0x28: /* ffree sti */
5954 gen_helper_ffree_STN(cpu_env
, tcg_const_i32(opreg
));
5956 case 0x2a: /* fst sti */
5957 gen_helper_fmov_STN_ST0(cpu_env
, tcg_const_i32(opreg
));
5959 case 0x2b: /* fstp sti */
5960 case 0x0b: /* fstp1 sti, undocumented op */
5961 case 0x3a: /* fstp8 sti, undocumented op */
5962 case 0x3b: /* fstp9 sti, undocumented op */
5963 gen_helper_fmov_STN_ST0(cpu_env
, tcg_const_i32(opreg
));
5964 gen_helper_fpop(cpu_env
);
5966 case 0x2c: /* fucom st(i) */
5967 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
5968 gen_helper_fucom_ST0_FT0(cpu_env
);
5970 case 0x2d: /* fucomp st(i) */
5971 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
5972 gen_helper_fucom_ST0_FT0(cpu_env
);
5973 gen_helper_fpop(cpu_env
);
5975 case 0x33: /* de/3 */
5977 case 1: /* fcompp */
5978 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(1));
5979 gen_helper_fcom_ST0_FT0(cpu_env
);
5980 gen_helper_fpop(cpu_env
);
5981 gen_helper_fpop(cpu_env
);
5987 case 0x38: /* ffreep sti, undocumented op */
5988 gen_helper_ffree_STN(cpu_env
, tcg_const_i32(opreg
));
5989 gen_helper_fpop(cpu_env
);
5991 case 0x3c: /* df/4 */
5994 gen_helper_fnstsw(cpu_tmp2_i32
, cpu_env
);
5995 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5996 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
6002 case 0x3d: /* fucomip */
6003 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6004 gen_op_set_cc_op(s
->cc_op
);
6005 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6006 gen_helper_fucomi_ST0_FT0(cpu_env
);
6007 gen_helper_fpop(cpu_env
);
6008 s
->cc_op
= CC_OP_EFLAGS
;
6010 case 0x3e: /* fcomip */
6011 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6012 gen_op_set_cc_op(s
->cc_op
);
6013 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6014 gen_helper_fcomi_ST0_FT0(cpu_env
);
6015 gen_helper_fpop(cpu_env
);
6016 s
->cc_op
= CC_OP_EFLAGS
;
6018 case 0x10 ... 0x13: /* fcmovxx */
6022 static const uint8_t fcmov_cc
[8] = {
6028 op1
= fcmov_cc
[op
& 3] | (((op
>> 3) & 1) ^ 1);
6029 l1
= gen_new_label();
6030 gen_jcc1(s
, op1
, l1
);
6031 gen_helper_fmov_ST0_STN(cpu_env
, tcg_const_i32(opreg
));
6040 /************************/
6043 case 0xa4: /* movsS */
6048 ot
= dflag
+ OT_WORD
;
6050 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6051 gen_repz_movs(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6057 case 0xaa: /* stosS */
6062 ot
= dflag
+ OT_WORD
;
6064 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6065 gen_repz_stos(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6070 case 0xac: /* lodsS */
6075 ot
= dflag
+ OT_WORD
;
6076 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6077 gen_repz_lods(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6082 case 0xae: /* scasS */
6087 ot
= dflag
+ OT_WORD
;
6088 if (prefixes
& PREFIX_REPNZ
) {
6089 gen_repz_scas(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 1);
6090 } else if (prefixes
& PREFIX_REPZ
) {
6091 gen_repz_scas(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 0);
6097 case 0xa6: /* cmpsS */
6102 ot
= dflag
+ OT_WORD
;
6103 if (prefixes
& PREFIX_REPNZ
) {
6104 gen_repz_cmps(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 1);
6105 } else if (prefixes
& PREFIX_REPZ
) {
6106 gen_repz_cmps(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 0);
6111 case 0x6c: /* insS */
6116 ot
= dflag
? OT_LONG
: OT_WORD
;
6117 gen_op_mov_TN_reg(OT_WORD
, 0, R_EDX
);
6118 gen_op_andl_T0_ffff();
6119 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6120 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
) | 4);
6121 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6122 gen_repz_ins(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6126 gen_jmp(s
, s
->pc
- s
->cs_base
);
6130 case 0x6e: /* outsS */
6135 ot
= dflag
? OT_LONG
: OT_WORD
;
6136 gen_op_mov_TN_reg(OT_WORD
, 0, R_EDX
);
6137 gen_op_andl_T0_ffff();
6138 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6139 svm_is_rep(prefixes
) | 4);
6140 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6141 gen_repz_outs(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6145 gen_jmp(s
, s
->pc
- s
->cs_base
);
6150 /************************/
6158 ot
= dflag
? OT_LONG
: OT_WORD
;
6159 val
= cpu_ldub_code(env
, s
->pc
++);
6160 gen_op_movl_T0_im(val
);
6161 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6162 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
));
6165 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6166 gen_helper_in_func(ot
, cpu_T
[1], cpu_tmp2_i32
);
6167 gen_op_mov_reg_T1(ot
, R_EAX
);
6170 gen_jmp(s
, s
->pc
- s
->cs_base
);
6178 ot
= dflag
? OT_LONG
: OT_WORD
;
6179 val
= cpu_ldub_code(env
, s
->pc
++);
6180 gen_op_movl_T0_im(val
);
6181 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6182 svm_is_rep(prefixes
));
6183 gen_op_mov_TN_reg(ot
, 1, R_EAX
);
6187 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6188 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T
[1]);
6189 gen_helper_out_func(ot
, cpu_tmp2_i32
, cpu_tmp3_i32
);
6192 gen_jmp(s
, s
->pc
- s
->cs_base
);
6200 ot
= dflag
? OT_LONG
: OT_WORD
;
6201 gen_op_mov_TN_reg(OT_WORD
, 0, R_EDX
);
6202 gen_op_andl_T0_ffff();
6203 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6204 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
));
6207 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6208 gen_helper_in_func(ot
, cpu_T
[1], cpu_tmp2_i32
);
6209 gen_op_mov_reg_T1(ot
, R_EAX
);
6212 gen_jmp(s
, s
->pc
- s
->cs_base
);
6220 ot
= dflag
? OT_LONG
: OT_WORD
;
6221 gen_op_mov_TN_reg(OT_WORD
, 0, R_EDX
);
6222 gen_op_andl_T0_ffff();
6223 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6224 svm_is_rep(prefixes
));
6225 gen_op_mov_TN_reg(ot
, 1, R_EAX
);
6229 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6230 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T
[1]);
6231 gen_helper_out_func(ot
, cpu_tmp2_i32
, cpu_tmp3_i32
);
6234 gen_jmp(s
, s
->pc
- s
->cs_base
);
6238 /************************/
6240 case 0xc2: /* ret im */
6241 val
= cpu_ldsw_code(env
, s
->pc
);
6244 if (CODE64(s
) && s
->dflag
)
6246 gen_stack_update(s
, val
+ (2 << s
->dflag
));
6248 gen_op_andl_T0_ffff();
6252 case 0xc3: /* ret */
6256 gen_op_andl_T0_ffff();
6260 case 0xca: /* lret im */
6261 val
= cpu_ldsw_code(env
, s
->pc
);
6264 if (s
->pe
&& !s
->vm86
) {
6265 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6266 gen_op_set_cc_op(s
->cc_op
);
6267 gen_jmp_im(pc_start
- s
->cs_base
);
6268 gen_helper_lret_protected(cpu_env
, tcg_const_i32(s
->dflag
),
6269 tcg_const_i32(val
));
6273 gen_op_ld_T0_A0(1 + s
->dflag
+ s
->mem_index
);
6275 gen_op_andl_T0_ffff();
6276 /* NOTE: keeping EIP updated is not a problem in case of
6280 gen_op_addl_A0_im(2 << s
->dflag
);
6281 gen_op_ld_T0_A0(1 + s
->dflag
+ s
->mem_index
);
6282 gen_op_movl_seg_T0_vm(R_CS
);
6283 /* add stack offset */
6284 gen_stack_update(s
, val
+ (4 << s
->dflag
));
6288 case 0xcb: /* lret */
6291 case 0xcf: /* iret */
6292 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_IRET
);
6295 gen_helper_iret_real(cpu_env
, tcg_const_i32(s
->dflag
));
6296 s
->cc_op
= CC_OP_EFLAGS
;
6297 } else if (s
->vm86
) {
6299 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6301 gen_helper_iret_real(cpu_env
, tcg_const_i32(s
->dflag
));
6302 s
->cc_op
= CC_OP_EFLAGS
;
6305 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6306 gen_op_set_cc_op(s
->cc_op
);
6307 gen_jmp_im(pc_start
- s
->cs_base
);
6308 gen_helper_iret_protected(cpu_env
, tcg_const_i32(s
->dflag
),
6309 tcg_const_i32(s
->pc
- s
->cs_base
));
6310 s
->cc_op
= CC_OP_EFLAGS
;
6314 case 0xe8: /* call im */
6317 tval
= (int32_t)insn_get(env
, s
, OT_LONG
);
6319 tval
= (int16_t)insn_get(env
, s
, OT_WORD
);
6320 next_eip
= s
->pc
- s
->cs_base
;
6326 gen_movtl_T0_im(next_eip
);
6331 case 0x9a: /* lcall im */
6333 unsigned int selector
, offset
;
6337 ot
= dflag
? OT_LONG
: OT_WORD
;
6338 offset
= insn_get(env
, s
, ot
);
6339 selector
= insn_get(env
, s
, OT_WORD
);
6341 gen_op_movl_T0_im(selector
);
6342 gen_op_movl_T1_imu(offset
);
6345 case 0xe9: /* jmp im */
6347 tval
= (int32_t)insn_get(env
, s
, OT_LONG
);
6349 tval
= (int16_t)insn_get(env
, s
, OT_WORD
);
6350 tval
+= s
->pc
- s
->cs_base
;
6357 case 0xea: /* ljmp im */
6359 unsigned int selector
, offset
;
6363 ot
= dflag
? OT_LONG
: OT_WORD
;
6364 offset
= insn_get(env
, s
, ot
);
6365 selector
= insn_get(env
, s
, OT_WORD
);
6367 gen_op_movl_T0_im(selector
);
6368 gen_op_movl_T1_imu(offset
);
6371 case 0xeb: /* jmp Jb */
6372 tval
= (int8_t)insn_get(env
, s
, OT_BYTE
);
6373 tval
+= s
->pc
- s
->cs_base
;
6378 case 0x70 ... 0x7f: /* jcc Jb */
6379 tval
= (int8_t)insn_get(env
, s
, OT_BYTE
);
6381 case 0x180 ... 0x18f: /* jcc Jv */
6383 tval
= (int32_t)insn_get(env
, s
, OT_LONG
);
6385 tval
= (int16_t)insn_get(env
, s
, OT_WORD
);
6388 next_eip
= s
->pc
- s
->cs_base
;
6392 gen_jcc(s
, b
, tval
, next_eip
);
6395 case 0x190 ... 0x19f: /* setcc Gv */
6396 modrm
= cpu_ldub_code(env
, s
->pc
++);
6398 gen_ldst_modrm(env
, s
, modrm
, OT_BYTE
, OR_TMP0
, 1);
6400 case 0x140 ... 0x14f: /* cmov Gv, Ev */
6405 ot
= dflag
+ OT_WORD
;
6406 modrm
= cpu_ldub_code(env
, s
->pc
++);
6407 reg
= ((modrm
>> 3) & 7) | rex_r
;
6408 mod
= (modrm
>> 6) & 3;
6409 t0
= tcg_temp_local_new();
6411 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
6412 gen_op_ld_v(ot
+ s
->mem_index
, t0
, cpu_A0
);
6414 rm
= (modrm
& 7) | REX_B(s
);
6415 gen_op_mov_v_reg(ot
, t0
, rm
);
6417 #ifdef TARGET_X86_64
6418 if (ot
== OT_LONG
) {
6419 /* XXX: specific Intel behaviour ? */
6420 l1
= gen_new_label();
6421 gen_jcc1(s
, b
^ 1, l1
);
6422 tcg_gen_mov_tl(cpu_regs
[reg
], t0
);
6424 tcg_gen_ext32u_tl(cpu_regs
[reg
], cpu_regs
[reg
]);
6428 l1
= gen_new_label();
6429 gen_jcc1(s
, b
^ 1, l1
);
6430 gen_op_mov_reg_v(ot
, reg
, t0
);
6437 /************************/
6439 case 0x9c: /* pushf */
6440 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_PUSHF
);
6441 if (s
->vm86
&& s
->iopl
!= 3) {
6442 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6444 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6445 gen_op_set_cc_op(s
->cc_op
);
6446 gen_helper_read_eflags(cpu_T
[0], cpu_env
);
6450 case 0x9d: /* popf */
6451 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_POPF
);
6452 if (s
->vm86
&& s
->iopl
!= 3) {
6453 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6458 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6459 tcg_const_i32((TF_MASK
| AC_MASK
|
6464 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6465 tcg_const_i32((TF_MASK
| AC_MASK
|
6467 IF_MASK
| IOPL_MASK
)
6471 if (s
->cpl
<= s
->iopl
) {
6473 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6474 tcg_const_i32((TF_MASK
|
6480 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6481 tcg_const_i32((TF_MASK
|
6490 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6491 tcg_const_i32((TF_MASK
| AC_MASK
|
6492 ID_MASK
| NT_MASK
)));
6494 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6495 tcg_const_i32((TF_MASK
| AC_MASK
|
6502 s
->cc_op
= CC_OP_EFLAGS
;
6503 /* abort translation because TF/AC flag may change */
6504 gen_jmp_im(s
->pc
- s
->cs_base
);
6508 case 0x9e: /* sahf */
6509 if (CODE64(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_LAHF_LM
))
6511 gen_op_mov_TN_reg(OT_BYTE
, 0, R_AH
);
6512 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6513 gen_op_set_cc_op(s
->cc_op
);
6514 gen_compute_eflags(cpu_cc_src
);
6515 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, CC_O
);
6516 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
6517 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, cpu_T
[0]);
6518 s
->cc_op
= CC_OP_EFLAGS
;
6520 case 0x9f: /* lahf */
6521 if (CODE64(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_LAHF_LM
))
6523 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6524 gen_op_set_cc_op(s
->cc_op
);
6525 gen_compute_eflags(cpu_T
[0]);
6526 /* Note: gen_compute_eflags() only gives the condition codes */
6527 tcg_gen_ori_tl(cpu_T
[0], cpu_T
[0], 0x02);
6528 gen_op_mov_reg_T0(OT_BYTE
, R_AH
);
6530 case 0xf5: /* cmc */
6531 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6532 gen_op_set_cc_op(s
->cc_op
);
6533 gen_compute_eflags(cpu_cc_src
);
6534 tcg_gen_xori_tl(cpu_cc_src
, cpu_cc_src
, CC_C
);
6535 s
->cc_op
= CC_OP_EFLAGS
;
6537 case 0xf8: /* clc */
6538 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6539 gen_op_set_cc_op(s
->cc_op
);
6540 gen_compute_eflags(cpu_cc_src
);
6541 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~CC_C
);
6542 s
->cc_op
= CC_OP_EFLAGS
;
6544 case 0xf9: /* stc */
6545 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6546 gen_op_set_cc_op(s
->cc_op
);
6547 gen_compute_eflags(cpu_cc_src
);
6548 tcg_gen_ori_tl(cpu_cc_src
, cpu_cc_src
, CC_C
);
6549 s
->cc_op
= CC_OP_EFLAGS
;
6551 case 0xfc: /* cld */
6552 tcg_gen_movi_i32(cpu_tmp2_i32
, 1);
6553 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, offsetof(CPUX86State
, df
));
6555 case 0xfd: /* std */
6556 tcg_gen_movi_i32(cpu_tmp2_i32
, -1);
6557 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, offsetof(CPUX86State
, df
));
6560 /************************/
6561 /* bit operations */
6562 case 0x1ba: /* bt/bts/btr/btc Gv, im */
6563 ot
= dflag
+ OT_WORD
;
6564 modrm
= cpu_ldub_code(env
, s
->pc
++);
6565 op
= (modrm
>> 3) & 7;
6566 mod
= (modrm
>> 6) & 3;
6567 rm
= (modrm
& 7) | REX_B(s
);
6570 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
6571 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
6573 gen_op_mov_TN_reg(ot
, 0, rm
);
6576 val
= cpu_ldub_code(env
, s
->pc
++);
6577 gen_op_movl_T1_im(val
);
6582 case 0x1a3: /* bt Gv, Ev */
6585 case 0x1ab: /* bts */
6588 case 0x1b3: /* btr */
6591 case 0x1bb: /* btc */
6594 ot
= dflag
+ OT_WORD
;
6595 modrm
= cpu_ldub_code(env
, s
->pc
++);
6596 reg
= ((modrm
>> 3) & 7) | rex_r
;
6597 mod
= (modrm
>> 6) & 3;
6598 rm
= (modrm
& 7) | REX_B(s
);
6599 gen_op_mov_TN_reg(OT_LONG
, 1, reg
);
6601 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
6602 /* specific case: we need to add a displacement */
6603 gen_exts(ot
, cpu_T
[1]);
6604 tcg_gen_sari_tl(cpu_tmp0
, cpu_T
[1], 3 + ot
);
6605 tcg_gen_shli_tl(cpu_tmp0
, cpu_tmp0
, ot
);
6606 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
6607 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
6609 gen_op_mov_TN_reg(ot
, 0, rm
);
6612 tcg_gen_andi_tl(cpu_T
[1], cpu_T
[1], (1 << (3 + ot
)) - 1);
6615 tcg_gen_shr_tl(cpu_cc_src
, cpu_T
[0], cpu_T
[1]);
6616 tcg_gen_movi_tl(cpu_cc_dst
, 0);
6619 tcg_gen_shr_tl(cpu_tmp4
, cpu_T
[0], cpu_T
[1]);
6620 tcg_gen_movi_tl(cpu_tmp0
, 1);
6621 tcg_gen_shl_tl(cpu_tmp0
, cpu_tmp0
, cpu_T
[1]);
6622 tcg_gen_or_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
6625 tcg_gen_shr_tl(cpu_tmp4
, cpu_T
[0], cpu_T
[1]);
6626 tcg_gen_movi_tl(cpu_tmp0
, 1);
6627 tcg_gen_shl_tl(cpu_tmp0
, cpu_tmp0
, cpu_T
[1]);
6628 tcg_gen_not_tl(cpu_tmp0
, cpu_tmp0
);
6629 tcg_gen_and_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
6633 tcg_gen_shr_tl(cpu_tmp4
, cpu_T
[0], cpu_T
[1]);
6634 tcg_gen_movi_tl(cpu_tmp0
, 1);
6635 tcg_gen_shl_tl(cpu_tmp0
, cpu_tmp0
, cpu_T
[1]);
6636 tcg_gen_xor_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
6639 s
->cc_op
= CC_OP_SARB
+ ot
;
6642 gen_op_st_T0_A0(ot
+ s
->mem_index
);
6644 gen_op_mov_reg_T0(ot
, rm
);
6645 tcg_gen_mov_tl(cpu_cc_src
, cpu_tmp4
);
6646 tcg_gen_movi_tl(cpu_cc_dst
, 0);
6649 case 0x1bc: /* bsf */
6650 case 0x1bd: /* bsr */
6655 ot
= dflag
+ OT_WORD
;
6656 modrm
= cpu_ldub_code(env
, s
->pc
++);
6657 reg
= ((modrm
>> 3) & 7) | rex_r
;
6658 gen_ldst_modrm(env
, s
,modrm
, ot
, OR_TMP0
, 0);
6659 gen_extu(ot
, cpu_T
[0]);
6660 t0
= tcg_temp_local_new();
6661 tcg_gen_mov_tl(t0
, cpu_T
[0]);
6662 if ((b
& 1) && (prefixes
& PREFIX_REPZ
) &&
6663 (s
->cpuid_ext3_features
& CPUID_EXT3_ABM
)) {
6665 case OT_WORD
: gen_helper_lzcnt(cpu_T
[0], t0
,
6666 tcg_const_i32(16)); break;
6667 case OT_LONG
: gen_helper_lzcnt(cpu_T
[0], t0
,
6668 tcg_const_i32(32)); break;
6669 case OT_QUAD
: gen_helper_lzcnt(cpu_T
[0], t0
,
6670 tcg_const_i32(64)); break;
6672 gen_op_mov_reg_T0(ot
, reg
);
6674 label1
= gen_new_label();
6675 tcg_gen_movi_tl(cpu_cc_dst
, 0);
6676 tcg_gen_brcondi_tl(TCG_COND_EQ
, t0
, 0, label1
);
6678 gen_helper_bsr(cpu_T
[0], t0
);
6680 gen_helper_bsf(cpu_T
[0], t0
);
6682 gen_op_mov_reg_T0(ot
, reg
);
6683 tcg_gen_movi_tl(cpu_cc_dst
, 1);
6684 gen_set_label(label1
);
6685 tcg_gen_discard_tl(cpu_cc_src
);
6686 s
->cc_op
= CC_OP_LOGICB
+ ot
;
6691 /************************/
6693 case 0x27: /* daa */
6696 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6697 gen_op_set_cc_op(s
->cc_op
);
6698 gen_helper_daa(cpu_env
);
6699 s
->cc_op
= CC_OP_EFLAGS
;
6701 case 0x2f: /* das */
6704 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6705 gen_op_set_cc_op(s
->cc_op
);
6706 gen_helper_das(cpu_env
);
6707 s
->cc_op
= CC_OP_EFLAGS
;
6709 case 0x37: /* aaa */
6712 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6713 gen_op_set_cc_op(s
->cc_op
);
6714 gen_helper_aaa(cpu_env
);
6715 s
->cc_op
= CC_OP_EFLAGS
;
6717 case 0x3f: /* aas */
6720 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6721 gen_op_set_cc_op(s
->cc_op
);
6722 gen_helper_aas(cpu_env
);
6723 s
->cc_op
= CC_OP_EFLAGS
;
6725 case 0xd4: /* aam */
6728 val
= cpu_ldub_code(env
, s
->pc
++);
6730 gen_exception(s
, EXCP00_DIVZ
, pc_start
- s
->cs_base
);
6732 gen_helper_aam(cpu_env
, tcg_const_i32(val
));
6733 s
->cc_op
= CC_OP_LOGICB
;
6736 case 0xd5: /* aad */
6739 val
= cpu_ldub_code(env
, s
->pc
++);
6740 gen_helper_aad(cpu_env
, tcg_const_i32(val
));
6741 s
->cc_op
= CC_OP_LOGICB
;
6743 /************************/
6745 case 0x90: /* nop */
6746 /* XXX: correct lock test for all insn */
6747 if (prefixes
& PREFIX_LOCK
) {
6750 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
6752 goto do_xchg_reg_eax
;
6754 if (prefixes
& PREFIX_REPZ
) {
6755 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_PAUSE
);
6758 case 0x9b: /* fwait */
6759 if ((s
->flags
& (HF_MP_MASK
| HF_TS_MASK
)) ==
6760 (HF_MP_MASK
| HF_TS_MASK
)) {
6761 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
6763 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6764 gen_op_set_cc_op(s
->cc_op
);
6765 gen_jmp_im(pc_start
- s
->cs_base
);
6766 gen_helper_fwait(cpu_env
);
6769 case 0xcc: /* int3 */
6770 gen_interrupt(s
, EXCP03_INT3
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6772 case 0xcd: /* int N */
6773 val
= cpu_ldub_code(env
, s
->pc
++);
6774 if (s
->vm86
&& s
->iopl
!= 3) {
6775 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6777 gen_interrupt(s
, val
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6780 case 0xce: /* into */
6783 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6784 gen_op_set_cc_op(s
->cc_op
);
6785 gen_jmp_im(pc_start
- s
->cs_base
);
6786 gen_helper_into(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
6789 case 0xf1: /* icebp (undocumented, exits to external debugger) */
6790 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_ICEBP
);
6792 gen_debug(s
, pc_start
- s
->cs_base
);
6796 qemu_set_log(CPU_LOG_INT
| CPU_LOG_TB_IN_ASM
);
6800 case 0xfa: /* cli */
6802 if (s
->cpl
<= s
->iopl
) {
6803 gen_helper_cli(cpu_env
);
6805 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6809 gen_helper_cli(cpu_env
);
6811 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6815 case 0xfb: /* sti */
6817 if (s
->cpl
<= s
->iopl
) {
6819 gen_helper_sti(cpu_env
);
6820 /* interruptions are enabled only the first insn after sti */
6821 /* If several instructions disable interrupts, only the
6823 if (!(s
->tb
->flags
& HF_INHIBIT_IRQ_MASK
))
6824 gen_helper_set_inhibit_irq(cpu_env
);
6825 /* give a chance to handle pending irqs */
6826 gen_jmp_im(s
->pc
- s
->cs_base
);
6829 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6835 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6839 case 0x62: /* bound */
6842 ot
= dflag
? OT_LONG
: OT_WORD
;
6843 modrm
= cpu_ldub_code(env
, s
->pc
++);
6844 reg
= (modrm
>> 3) & 7;
6845 mod
= (modrm
>> 6) & 3;
6848 gen_op_mov_TN_reg(ot
, 0, reg
);
6849 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
6850 gen_jmp_im(pc_start
- s
->cs_base
);
6851 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6852 if (ot
== OT_WORD
) {
6853 gen_helper_boundw(cpu_env
, cpu_A0
, cpu_tmp2_i32
);
6855 gen_helper_boundl(cpu_env
, cpu_A0
, cpu_tmp2_i32
);
6858 case 0x1c8 ... 0x1cf: /* bswap reg */
6859 reg
= (b
& 7) | REX_B(s
);
6860 #ifdef TARGET_X86_64
6862 gen_op_mov_TN_reg(OT_QUAD
, 0, reg
);
6863 tcg_gen_bswap64_i64(cpu_T
[0], cpu_T
[0]);
6864 gen_op_mov_reg_T0(OT_QUAD
, reg
);
6868 gen_op_mov_TN_reg(OT_LONG
, 0, reg
);
6869 tcg_gen_ext32u_tl(cpu_T
[0], cpu_T
[0]);
6870 tcg_gen_bswap32_tl(cpu_T
[0], cpu_T
[0]);
6871 gen_op_mov_reg_T0(OT_LONG
, reg
);
6874 case 0xd6: /* salc */
6877 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6878 gen_op_set_cc_op(s
->cc_op
);
6879 gen_compute_eflags_c(cpu_T
[0]);
6880 tcg_gen_neg_tl(cpu_T
[0], cpu_T
[0]);
6881 gen_op_mov_reg_T0(OT_BYTE
, R_EAX
);
6883 case 0xe0: /* loopnz */
6884 case 0xe1: /* loopz */
6885 case 0xe2: /* loop */
6886 case 0xe3: /* jecxz */
6890 tval
= (int8_t)insn_get(env
, s
, OT_BYTE
);
6891 next_eip
= s
->pc
- s
->cs_base
;
6896 l1
= gen_new_label();
6897 l2
= gen_new_label();
6898 l3
= gen_new_label();
6901 case 0: /* loopnz */
6903 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6904 gen_op_set_cc_op(s
->cc_op
);
6905 gen_op_add_reg_im(s
->aflag
, R_ECX
, -1);
6906 gen_op_jz_ecx(s
->aflag
, l3
);
6907 gen_compute_eflags(cpu_tmp0
);
6908 tcg_gen_andi_tl(cpu_tmp0
, cpu_tmp0
, CC_Z
);
6910 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_tmp0
, 0, l1
);
6912 tcg_gen_brcondi_tl(TCG_COND_NE
, cpu_tmp0
, 0, l1
);
6916 gen_op_add_reg_im(s
->aflag
, R_ECX
, -1);
6917 gen_op_jnz_ecx(s
->aflag
, l1
);
6921 gen_op_jz_ecx(s
->aflag
, l1
);
6926 gen_jmp_im(next_eip
);
6935 case 0x130: /* wrmsr */
6936 case 0x132: /* rdmsr */
6938 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6940 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6941 gen_op_set_cc_op(s
->cc_op
);
6942 gen_jmp_im(pc_start
- s
->cs_base
);
6944 gen_helper_rdmsr(cpu_env
);
6946 gen_helper_wrmsr(cpu_env
);
6950 case 0x131: /* rdtsc */
6951 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6952 gen_op_set_cc_op(s
->cc_op
);
6953 gen_jmp_im(pc_start
- s
->cs_base
);
6956 gen_helper_rdtsc(cpu_env
);
6959 gen_jmp(s
, s
->pc
- s
->cs_base
);
6962 case 0x133: /* rdpmc */
6963 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6964 gen_op_set_cc_op(s
->cc_op
);
6965 gen_jmp_im(pc_start
- s
->cs_base
);
6966 gen_helper_rdpmc(cpu_env
);
6968 case 0x134: /* sysenter */
6969 /* For Intel SYSENTER is valid on 64-bit */
6970 if (CODE64(s
) && env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
)
6973 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6975 gen_update_cc_op(s
);
6976 gen_jmp_im(pc_start
- s
->cs_base
);
6977 gen_helper_sysenter(cpu_env
);
6981 case 0x135: /* sysexit */
6982 /* For Intel SYSEXIT is valid on 64-bit */
6983 if (CODE64(s
) && env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
)
6986 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6988 gen_update_cc_op(s
);
6989 gen_jmp_im(pc_start
- s
->cs_base
);
6990 gen_helper_sysexit(cpu_env
, tcg_const_i32(dflag
));
6994 #ifdef TARGET_X86_64
6995 case 0x105: /* syscall */
6996 /* XXX: is it usable in real mode ? */
6997 gen_update_cc_op(s
);
6998 gen_jmp_im(pc_start
- s
->cs_base
);
6999 gen_helper_syscall(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
7002 case 0x107: /* sysret */
7004 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7006 gen_update_cc_op(s
);
7007 gen_jmp_im(pc_start
- s
->cs_base
);
7008 gen_helper_sysret(cpu_env
, tcg_const_i32(s
->dflag
));
7009 /* condition codes are modified only in long mode */
7011 s
->cc_op
= CC_OP_EFLAGS
;
7016 case 0x1a2: /* cpuid */
7017 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7018 gen_op_set_cc_op(s
->cc_op
);
7019 gen_jmp_im(pc_start
- s
->cs_base
);
7020 gen_helper_cpuid(cpu_env
);
7022 case 0xf4: /* hlt */
7024 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7026 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7027 gen_op_set_cc_op(s
->cc_op
);
7028 gen_jmp_im(pc_start
- s
->cs_base
);
7029 gen_helper_hlt(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
7030 s
->is_jmp
= DISAS_TB_JUMP
;
7034 modrm
= cpu_ldub_code(env
, s
->pc
++);
7035 mod
= (modrm
>> 6) & 3;
7036 op
= (modrm
>> 3) & 7;
7039 if (!s
->pe
|| s
->vm86
)
7041 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_LDTR_READ
);
7042 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,ldt
.selector
));
7046 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
7049 if (!s
->pe
|| s
->vm86
)
7052 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7054 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_LDTR_WRITE
);
7055 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 0);
7056 gen_jmp_im(pc_start
- s
->cs_base
);
7057 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
7058 gen_helper_lldt(cpu_env
, cpu_tmp2_i32
);
7062 if (!s
->pe
|| s
->vm86
)
7064 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_TR_READ
);
7065 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,tr
.selector
));
7069 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
7072 if (!s
->pe
|| s
->vm86
)
7075 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7077 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_TR_WRITE
);
7078 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 0);
7079 gen_jmp_im(pc_start
- s
->cs_base
);
7080 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
7081 gen_helper_ltr(cpu_env
, cpu_tmp2_i32
);
7086 if (!s
->pe
|| s
->vm86
)
7088 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 0);
7089 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7090 gen_op_set_cc_op(s
->cc_op
);
7092 gen_helper_verr(cpu_env
, cpu_T
[0]);
7094 gen_helper_verw(cpu_env
, cpu_T
[0]);
7096 s
->cc_op
= CC_OP_EFLAGS
;
7103 modrm
= cpu_ldub_code(env
, s
->pc
++);
7104 mod
= (modrm
>> 6) & 3;
7105 op
= (modrm
>> 3) & 7;
7111 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_GDTR_READ
);
7112 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7113 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, gdt
.limit
));
7114 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
7115 gen_add_A0_im(s
, 2);
7116 tcg_gen_ld_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, gdt
.base
));
7118 gen_op_andl_T0_im(0xffffff);
7119 gen_op_st_T0_A0(CODE64(s
) + OT_LONG
+ s
->mem_index
);
7124 case 0: /* monitor */
7125 if (!(s
->cpuid_ext_features
& CPUID_EXT_MONITOR
) ||
7128 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7129 gen_op_set_cc_op(s
->cc_op
);
7130 gen_jmp_im(pc_start
- s
->cs_base
);
7131 #ifdef TARGET_X86_64
7132 if (s
->aflag
== 2) {
7133 gen_op_movq_A0_reg(R_EAX
);
7137 gen_op_movl_A0_reg(R_EAX
);
7139 gen_op_andl_A0_ffff();
7141 gen_add_A0_ds_seg(s
);
7142 gen_helper_monitor(cpu_env
, cpu_A0
);
7145 if (!(s
->cpuid_ext_features
& CPUID_EXT_MONITOR
) ||
7148 gen_update_cc_op(s
);
7149 gen_jmp_im(pc_start
- s
->cs_base
);
7150 gen_helper_mwait(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
7154 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_SMAP
) ||
7158 gen_helper_clac(cpu_env
);
7159 gen_jmp_im(s
->pc
- s
->cs_base
);
7163 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_SMAP
) ||
7167 gen_helper_stac(cpu_env
);
7168 gen_jmp_im(s
->pc
- s
->cs_base
);
7175 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_IDTR_READ
);
7176 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7177 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, idt
.limit
));
7178 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
7179 gen_add_A0_im(s
, 2);
7180 tcg_gen_ld_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, idt
.base
));
7182 gen_op_andl_T0_im(0xffffff);
7183 gen_op_st_T0_A0(CODE64(s
) + OT_LONG
+ s
->mem_index
);
7189 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7190 gen_op_set_cc_op(s
->cc_op
);
7191 gen_jmp_im(pc_start
- s
->cs_base
);
7194 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7197 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7200 gen_helper_vmrun(cpu_env
, tcg_const_i32(s
->aflag
),
7201 tcg_const_i32(s
->pc
- pc_start
));
7203 s
->is_jmp
= DISAS_TB_JUMP
;
7206 case 1: /* VMMCALL */
7207 if (!(s
->flags
& HF_SVME_MASK
))
7209 gen_helper_vmmcall(cpu_env
);
7211 case 2: /* VMLOAD */
7212 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7215 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7218 gen_helper_vmload(cpu_env
, tcg_const_i32(s
->aflag
));
7221 case 3: /* VMSAVE */
7222 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7225 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7228 gen_helper_vmsave(cpu_env
, tcg_const_i32(s
->aflag
));
7232 if ((!(s
->flags
& HF_SVME_MASK
) &&
7233 !(s
->cpuid_ext3_features
& CPUID_EXT3_SKINIT
)) ||
7237 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7240 gen_helper_stgi(cpu_env
);
7244 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7247 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7250 gen_helper_clgi(cpu_env
);
7253 case 6: /* SKINIT */
7254 if ((!(s
->flags
& HF_SVME_MASK
) &&
7255 !(s
->cpuid_ext3_features
& CPUID_EXT3_SKINIT
)) ||
7258 gen_helper_skinit(cpu_env
);
7260 case 7: /* INVLPGA */
7261 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7264 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7267 gen_helper_invlpga(cpu_env
, tcg_const_i32(s
->aflag
));
7273 } else if (s
->cpl
!= 0) {
7274 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7276 gen_svm_check_intercept(s
, pc_start
,
7277 op
==2 ? SVM_EXIT_GDTR_WRITE
: SVM_EXIT_IDTR_WRITE
);
7278 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7279 gen_op_ld_T1_A0(OT_WORD
+ s
->mem_index
);
7280 gen_add_A0_im(s
, 2);
7281 gen_op_ld_T0_A0(CODE64(s
) + OT_LONG
+ s
->mem_index
);
7283 gen_op_andl_T0_im(0xffffff);
7285 tcg_gen_st_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,gdt
.base
));
7286 tcg_gen_st32_tl(cpu_T
[1], cpu_env
, offsetof(CPUX86State
,gdt
.limit
));
7288 tcg_gen_st_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,idt
.base
));
7289 tcg_gen_st32_tl(cpu_T
[1], cpu_env
, offsetof(CPUX86State
,idt
.limit
));
7294 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_READ_CR0
);
7295 #if defined TARGET_X86_64 && defined HOST_WORDS_BIGENDIAN
7296 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,cr
[0]) + 4);
7298 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,cr
[0]));
7300 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 1);
7304 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7306 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_CR0
);
7307 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 0);
7308 gen_helper_lmsw(cpu_env
, cpu_T
[0]);
7309 gen_jmp_im(s
->pc
- s
->cs_base
);
7314 if (mod
!= 3) { /* invlpg */
7316 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7318 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7319 gen_op_set_cc_op(s
->cc_op
);
7320 gen_jmp_im(pc_start
- s
->cs_base
);
7321 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7322 gen_helper_invlpg(cpu_env
, cpu_A0
);
7323 gen_jmp_im(s
->pc
- s
->cs_base
);
7328 case 0: /* swapgs */
7329 #ifdef TARGET_X86_64
7332 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7334 tcg_gen_ld_tl(cpu_T
[0], cpu_env
,
7335 offsetof(CPUX86State
,segs
[R_GS
].base
));
7336 tcg_gen_ld_tl(cpu_T
[1], cpu_env
,
7337 offsetof(CPUX86State
,kernelgsbase
));
7338 tcg_gen_st_tl(cpu_T
[1], cpu_env
,
7339 offsetof(CPUX86State
,segs
[R_GS
].base
));
7340 tcg_gen_st_tl(cpu_T
[0], cpu_env
,
7341 offsetof(CPUX86State
,kernelgsbase
));
7349 case 1: /* rdtscp */
7350 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_RDTSCP
))
7352 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7353 gen_op_set_cc_op(s
->cc_op
);
7354 gen_jmp_im(pc_start
- s
->cs_base
);
7357 gen_helper_rdtscp(cpu_env
);
7360 gen_jmp(s
, s
->pc
- s
->cs_base
);
7372 case 0x108: /* invd */
7373 case 0x109: /* wbinvd */
7375 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7377 gen_svm_check_intercept(s
, pc_start
, (b
& 2) ? SVM_EXIT_INVD
: SVM_EXIT_WBINVD
);
7381 case 0x63: /* arpl or movslS (x86_64) */
7382 #ifdef TARGET_X86_64
7385 /* d_ot is the size of destination */
7386 d_ot
= dflag
+ OT_WORD
;
7388 modrm
= cpu_ldub_code(env
, s
->pc
++);
7389 reg
= ((modrm
>> 3) & 7) | rex_r
;
7390 mod
= (modrm
>> 6) & 3;
7391 rm
= (modrm
& 7) | REX_B(s
);
7394 gen_op_mov_TN_reg(OT_LONG
, 0, rm
);
7396 if (d_ot
== OT_QUAD
)
7397 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
7398 gen_op_mov_reg_T0(d_ot
, reg
);
7400 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7401 if (d_ot
== OT_QUAD
) {
7402 gen_op_lds_T0_A0(OT_LONG
+ s
->mem_index
);
7404 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
7406 gen_op_mov_reg_T0(d_ot
, reg
);
7412 TCGv t0
, t1
, t2
, a0
;
7414 if (!s
->pe
|| s
->vm86
)
7416 t0
= tcg_temp_local_new();
7417 t1
= tcg_temp_local_new();
7418 t2
= tcg_temp_local_new();
7420 modrm
= cpu_ldub_code(env
, s
->pc
++);
7421 reg
= (modrm
>> 3) & 7;
7422 mod
= (modrm
>> 6) & 3;
7425 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7426 gen_op_ld_v(ot
+ s
->mem_index
, t0
, cpu_A0
);
7427 a0
= tcg_temp_local_new();
7428 tcg_gen_mov_tl(a0
, cpu_A0
);
7430 gen_op_mov_v_reg(ot
, t0
, rm
);
7433 gen_op_mov_v_reg(ot
, t1
, reg
);
7434 tcg_gen_andi_tl(cpu_tmp0
, t0
, 3);
7435 tcg_gen_andi_tl(t1
, t1
, 3);
7436 tcg_gen_movi_tl(t2
, 0);
7437 label1
= gen_new_label();
7438 tcg_gen_brcond_tl(TCG_COND_GE
, cpu_tmp0
, t1
, label1
);
7439 tcg_gen_andi_tl(t0
, t0
, ~3);
7440 tcg_gen_or_tl(t0
, t0
, t1
);
7441 tcg_gen_movi_tl(t2
, CC_Z
);
7442 gen_set_label(label1
);
7444 gen_op_st_v(ot
+ s
->mem_index
, t0
, a0
);
7447 gen_op_mov_reg_v(ot
, rm
, t0
);
7449 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7450 gen_op_set_cc_op(s
->cc_op
);
7451 gen_compute_eflags(cpu_cc_src
);
7452 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~CC_Z
);
7453 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t2
);
7454 s
->cc_op
= CC_OP_EFLAGS
;
7460 case 0x102: /* lar */
7461 case 0x103: /* lsl */
7465 if (!s
->pe
|| s
->vm86
)
7467 ot
= dflag
? OT_LONG
: OT_WORD
;
7468 modrm
= cpu_ldub_code(env
, s
->pc
++);
7469 reg
= ((modrm
>> 3) & 7) | rex_r
;
7470 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 0);
7471 t0
= tcg_temp_local_new();
7472 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7473 gen_op_set_cc_op(s
->cc_op
);
7475 gen_helper_lar(t0
, cpu_env
, cpu_T
[0]);
7477 gen_helper_lsl(t0
, cpu_env
, cpu_T
[0]);
7479 tcg_gen_andi_tl(cpu_tmp0
, cpu_cc_src
, CC_Z
);
7480 label1
= gen_new_label();
7481 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_tmp0
, 0, label1
);
7482 gen_op_mov_reg_v(ot
, reg
, t0
);
7483 gen_set_label(label1
);
7484 s
->cc_op
= CC_OP_EFLAGS
;
7489 modrm
= cpu_ldub_code(env
, s
->pc
++);
7490 mod
= (modrm
>> 6) & 3;
7491 op
= (modrm
>> 3) & 7;
7493 case 0: /* prefetchnta */
7494 case 1: /* prefetchnt0 */
7495 case 2: /* prefetchnt0 */
7496 case 3: /* prefetchnt0 */
7499 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7500 /* nothing more to do */
7502 default: /* nop (multi byte) */
7503 gen_nop_modrm(env
, s
, modrm
);
7507 case 0x119 ... 0x11f: /* nop (multi byte) */
7508 modrm
= cpu_ldub_code(env
, s
->pc
++);
7509 gen_nop_modrm(env
, s
, modrm
);
7511 case 0x120: /* mov reg, crN */
7512 case 0x122: /* mov crN, reg */
7514 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7516 modrm
= cpu_ldub_code(env
, s
->pc
++);
7517 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7518 * AMD documentation (24594.pdf) and testing of
7519 * intel 386 and 486 processors all show that the mod bits
7520 * are assumed to be 1's, regardless of actual values.
7522 rm
= (modrm
& 7) | REX_B(s
);
7523 reg
= ((modrm
>> 3) & 7) | rex_r
;
7528 if ((prefixes
& PREFIX_LOCK
) && (reg
== 0) &&
7529 (s
->cpuid_ext3_features
& CPUID_EXT3_CR8LEG
)) {
7538 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7539 gen_op_set_cc_op(s
->cc_op
);
7540 gen_jmp_im(pc_start
- s
->cs_base
);
7542 gen_op_mov_TN_reg(ot
, 0, rm
);
7543 gen_helper_write_crN(cpu_env
, tcg_const_i32(reg
),
7545 gen_jmp_im(s
->pc
- s
->cs_base
);
7548 gen_helper_read_crN(cpu_T
[0], cpu_env
, tcg_const_i32(reg
));
7549 gen_op_mov_reg_T0(ot
, rm
);
7557 case 0x121: /* mov reg, drN */
7558 case 0x123: /* mov drN, reg */
7560 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7562 modrm
= cpu_ldub_code(env
, s
->pc
++);
7563 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7564 * AMD documentation (24594.pdf) and testing of
7565 * intel 386 and 486 processors all show that the mod bits
7566 * are assumed to be 1's, regardless of actual values.
7568 rm
= (modrm
& 7) | REX_B(s
);
7569 reg
= ((modrm
>> 3) & 7) | rex_r
;
7574 /* XXX: do it dynamically with CR4.DE bit */
7575 if (reg
== 4 || reg
== 5 || reg
>= 8)
7578 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_DR0
+ reg
);
7579 gen_op_mov_TN_reg(ot
, 0, rm
);
7580 gen_helper_movl_drN_T0(cpu_env
, tcg_const_i32(reg
), cpu_T
[0]);
7581 gen_jmp_im(s
->pc
- s
->cs_base
);
7584 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_READ_DR0
+ reg
);
7585 tcg_gen_ld_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,dr
[reg
]));
7586 gen_op_mov_reg_T0(ot
, rm
);
7590 case 0x106: /* clts */
7592 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7594 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_CR0
);
7595 gen_helper_clts(cpu_env
);
7596 /* abort block because static cpu state changed */
7597 gen_jmp_im(s
->pc
- s
->cs_base
);
7601 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
7602 case 0x1c3: /* MOVNTI reg, mem */
7603 if (!(s
->cpuid_features
& CPUID_SSE2
))
7605 ot
= s
->dflag
== 2 ? OT_QUAD
: OT_LONG
;
7606 modrm
= cpu_ldub_code(env
, s
->pc
++);
7607 mod
= (modrm
>> 6) & 3;
7610 reg
= ((modrm
>> 3) & 7) | rex_r
;
7611 /* generate a generic store */
7612 gen_ldst_modrm(env
, s
, modrm
, ot
, reg
, 1);
7615 modrm
= cpu_ldub_code(env
, s
->pc
++);
7616 mod
= (modrm
>> 6) & 3;
7617 op
= (modrm
>> 3) & 7;
7619 case 0: /* fxsave */
7620 if (mod
== 3 || !(s
->cpuid_features
& CPUID_FXSR
) ||
7621 (s
->prefix
& PREFIX_LOCK
))
7623 if ((s
->flags
& HF_EM_MASK
) || (s
->flags
& HF_TS_MASK
)) {
7624 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
7627 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7628 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7629 gen_op_set_cc_op(s
->cc_op
);
7630 gen_jmp_im(pc_start
- s
->cs_base
);
7631 gen_helper_fxsave(cpu_env
, cpu_A0
, tcg_const_i32((s
->dflag
== 2)));
7633 case 1: /* fxrstor */
7634 if (mod
== 3 || !(s
->cpuid_features
& CPUID_FXSR
) ||
7635 (s
->prefix
& PREFIX_LOCK
))
7637 if ((s
->flags
& HF_EM_MASK
) || (s
->flags
& HF_TS_MASK
)) {
7638 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
7641 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7642 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7643 gen_op_set_cc_op(s
->cc_op
);
7644 gen_jmp_im(pc_start
- s
->cs_base
);
7645 gen_helper_fxrstor(cpu_env
, cpu_A0
,
7646 tcg_const_i32((s
->dflag
== 2)));
7648 case 2: /* ldmxcsr */
7649 case 3: /* stmxcsr */
7650 if (s
->flags
& HF_TS_MASK
) {
7651 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
7654 if ((s
->flags
& HF_EM_MASK
) || !(s
->flags
& HF_OSFXSR_MASK
) ||
7657 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7659 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
7660 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
7661 gen_helper_ldmxcsr(cpu_env
, cpu_tmp2_i32
);
7663 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, mxcsr
));
7664 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
7667 case 5: /* lfence */
7668 case 6: /* mfence */
7669 if ((modrm
& 0xc7) != 0xc0 || !(s
->cpuid_features
& CPUID_SSE2
))
7672 case 7: /* sfence / clflush */
7673 if ((modrm
& 0xc7) == 0xc0) {
7675 /* XXX: also check for cpuid_ext2_features & CPUID_EXT2_EMMX */
7676 if (!(s
->cpuid_features
& CPUID_SSE
))
7680 if (!(s
->cpuid_features
& CPUID_CLFLUSH
))
7682 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7689 case 0x10d: /* 3DNow! prefetch(w) */
7690 modrm
= cpu_ldub_code(env
, s
->pc
++);
7691 mod
= (modrm
>> 6) & 3;
7694 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7695 /* ignore for now */
7697 case 0x1aa: /* rsm */
7698 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_RSM
);
7699 if (!(s
->flags
& HF_SMM_MASK
))
7701 gen_update_cc_op(s
);
7702 gen_jmp_im(s
->pc
- s
->cs_base
);
7703 gen_helper_rsm(cpu_env
);
7706 case 0x1b8: /* SSE4.2 popcnt */
7707 if ((prefixes
& (PREFIX_REPZ
| PREFIX_LOCK
| PREFIX_REPNZ
)) !=
7710 if (!(s
->cpuid_ext_features
& CPUID_EXT_POPCNT
))
7713 modrm
= cpu_ldub_code(env
, s
->pc
++);
7714 reg
= ((modrm
>> 3) & 7) | rex_r
;
7716 if (s
->prefix
& PREFIX_DATA
)
7718 else if (s
->dflag
!= 2)
7723 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
7724 gen_helper_popcnt(cpu_T
[0], cpu_env
, cpu_T
[0], tcg_const_i32(ot
));
7725 gen_op_mov_reg_T0(ot
, reg
);
7727 s
->cc_op
= CC_OP_EFLAGS
;
7729 case 0x10e ... 0x10f:
7730 /* 3DNow! instructions, ignore prefixes */
7731 s
->prefix
&= ~(PREFIX_REPZ
| PREFIX_REPNZ
| PREFIX_DATA
);
7732 case 0x110 ... 0x117:
7733 case 0x128 ... 0x12f:
7734 case 0x138 ... 0x13a:
7735 case 0x150 ... 0x179:
7736 case 0x17c ... 0x17f:
7738 case 0x1c4 ... 0x1c6:
7739 case 0x1d0 ... 0x1fe:
7740 gen_sse(env
, s
, b
, pc_start
, rex_r
);
7745 /* lock generation */
7746 if (s
->prefix
& PREFIX_LOCK
)
7747 gen_helper_unlock();
7750 if (s
->prefix
& PREFIX_LOCK
)
7751 gen_helper_unlock();
7752 /* XXX: ensure that no lock was generated */
7753 gen_exception(s
, EXCP06_ILLOP
, pc_start
- s
->cs_base
);
7757 void optimize_flags_init(void)
7759 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
7760 cpu_cc_op
= tcg_global_mem_new_i32(TCG_AREG0
,
7761 offsetof(CPUX86State
, cc_op
), "cc_op");
7762 cpu_cc_src
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUX86State
, cc_src
),
7764 cpu_cc_dst
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUX86State
, cc_dst
),
7766 cpu_cc_tmp
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUX86State
, cc_tmp
),
7769 #ifdef TARGET_X86_64
7770 cpu_regs
[R_EAX
] = tcg_global_mem_new_i64(TCG_AREG0
,
7771 offsetof(CPUX86State
, regs
[R_EAX
]), "rax");
7772 cpu_regs
[R_ECX
] = tcg_global_mem_new_i64(TCG_AREG0
,
7773 offsetof(CPUX86State
, regs
[R_ECX
]), "rcx");
7774 cpu_regs
[R_EDX
] = tcg_global_mem_new_i64(TCG_AREG0
,
7775 offsetof(CPUX86State
, regs
[R_EDX
]), "rdx");
7776 cpu_regs
[R_EBX
] = tcg_global_mem_new_i64(TCG_AREG0
,
7777 offsetof(CPUX86State
, regs
[R_EBX
]), "rbx");
7778 cpu_regs
[R_ESP
] = tcg_global_mem_new_i64(TCG_AREG0
,
7779 offsetof(CPUX86State
, regs
[R_ESP
]), "rsp");
7780 cpu_regs
[R_EBP
] = tcg_global_mem_new_i64(TCG_AREG0
,
7781 offsetof(CPUX86State
, regs
[R_EBP
]), "rbp");
7782 cpu_regs
[R_ESI
] = tcg_global_mem_new_i64(TCG_AREG0
,
7783 offsetof(CPUX86State
, regs
[R_ESI
]), "rsi");
7784 cpu_regs
[R_EDI
] = tcg_global_mem_new_i64(TCG_AREG0
,
7785 offsetof(CPUX86State
, regs
[R_EDI
]), "rdi");
7786 cpu_regs
[8] = tcg_global_mem_new_i64(TCG_AREG0
,
7787 offsetof(CPUX86State
, regs
[8]), "r8");
7788 cpu_regs
[9] = tcg_global_mem_new_i64(TCG_AREG0
,
7789 offsetof(CPUX86State
, regs
[9]), "r9");
7790 cpu_regs
[10] = tcg_global_mem_new_i64(TCG_AREG0
,
7791 offsetof(CPUX86State
, regs
[10]), "r10");
7792 cpu_regs
[11] = tcg_global_mem_new_i64(TCG_AREG0
,
7793 offsetof(CPUX86State
, regs
[11]), "r11");
7794 cpu_regs
[12] = tcg_global_mem_new_i64(TCG_AREG0
,
7795 offsetof(CPUX86State
, regs
[12]), "r12");
7796 cpu_regs
[13] = tcg_global_mem_new_i64(TCG_AREG0
,
7797 offsetof(CPUX86State
, regs
[13]), "r13");
7798 cpu_regs
[14] = tcg_global_mem_new_i64(TCG_AREG0
,
7799 offsetof(CPUX86State
, regs
[14]), "r14");
7800 cpu_regs
[15] = tcg_global_mem_new_i64(TCG_AREG0
,
7801 offsetof(CPUX86State
, regs
[15]), "r15");
7803 cpu_regs
[R_EAX
] = tcg_global_mem_new_i32(TCG_AREG0
,
7804 offsetof(CPUX86State
, regs
[R_EAX
]), "eax");
7805 cpu_regs
[R_ECX
] = tcg_global_mem_new_i32(TCG_AREG0
,
7806 offsetof(CPUX86State
, regs
[R_ECX
]), "ecx");
7807 cpu_regs
[R_EDX
] = tcg_global_mem_new_i32(TCG_AREG0
,
7808 offsetof(CPUX86State
, regs
[R_EDX
]), "edx");
7809 cpu_regs
[R_EBX
] = tcg_global_mem_new_i32(TCG_AREG0
,
7810 offsetof(CPUX86State
, regs
[R_EBX
]), "ebx");
7811 cpu_regs
[R_ESP
] = tcg_global_mem_new_i32(TCG_AREG0
,
7812 offsetof(CPUX86State
, regs
[R_ESP
]), "esp");
7813 cpu_regs
[R_EBP
] = tcg_global_mem_new_i32(TCG_AREG0
,
7814 offsetof(CPUX86State
, regs
[R_EBP
]), "ebp");
7815 cpu_regs
[R_ESI
] = tcg_global_mem_new_i32(TCG_AREG0
,
7816 offsetof(CPUX86State
, regs
[R_ESI
]), "esi");
7817 cpu_regs
[R_EDI
] = tcg_global_mem_new_i32(TCG_AREG0
,
7818 offsetof(CPUX86State
, regs
[R_EDI
]), "edi");
7821 /* register helpers */
7822 #define GEN_HELPER 2
7826 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
7827 basic block 'tb'. If search_pc is TRUE, also generate PC
7828 information for each intermediate instruction. */
7829 static inline void gen_intermediate_code_internal(CPUX86State
*env
,
7830 TranslationBlock
*tb
,
7833 DisasContext dc1
, *dc
= &dc1
;
7834 target_ulong pc_ptr
;
7835 uint16_t *gen_opc_end
;
7839 target_ulong pc_start
;
7840 target_ulong cs_base
;
7844 /* generate intermediate code */
7846 cs_base
= tb
->cs_base
;
7849 dc
->pe
= (flags
>> HF_PE_SHIFT
) & 1;
7850 dc
->code32
= (flags
>> HF_CS32_SHIFT
) & 1;
7851 dc
->ss32
= (flags
>> HF_SS32_SHIFT
) & 1;
7852 dc
->addseg
= (flags
>> HF_ADDSEG_SHIFT
) & 1;
7854 dc
->vm86
= (flags
>> VM_SHIFT
) & 1;
7855 dc
->cpl
= (flags
>> HF_CPL_SHIFT
) & 3;
7856 dc
->iopl
= (flags
>> IOPL_SHIFT
) & 3;
7857 dc
->tf
= (flags
>> TF_SHIFT
) & 1;
7858 dc
->singlestep_enabled
= env
->singlestep_enabled
;
7859 dc
->cc_op
= CC_OP_DYNAMIC
;
7860 dc
->cs_base
= cs_base
;
7862 dc
->popl_esp_hack
= 0;
7863 /* select memory access functions */
7865 if (flags
& HF_SOFTMMU_MASK
) {
7866 dc
->mem_index
= (cpu_mmu_index(env
) + 1) << 2;
7868 dc
->cpuid_features
= env
->cpuid_features
;
7869 dc
->cpuid_ext_features
= env
->cpuid_ext_features
;
7870 dc
->cpuid_ext2_features
= env
->cpuid_ext2_features
;
7871 dc
->cpuid_ext3_features
= env
->cpuid_ext3_features
;
7872 dc
->cpuid_7_0_ebx_features
= env
->cpuid_7_0_ebx_features
;
7873 #ifdef TARGET_X86_64
7874 dc
->lma
= (flags
>> HF_LMA_SHIFT
) & 1;
7875 dc
->code64
= (flags
>> HF_CS64_SHIFT
) & 1;
7878 dc
->jmp_opt
= !(dc
->tf
|| env
->singlestep_enabled
||
7879 (flags
& HF_INHIBIT_IRQ_MASK
)
7880 #ifndef CONFIG_SOFTMMU
7881 || (flags
& HF_SOFTMMU_MASK
)
7885 /* check addseg logic */
7886 if (!dc
->addseg
&& (dc
->vm86
|| !dc
->pe
|| !dc
->code32
))
7887 printf("ERROR addseg\n");
7890 cpu_T
[0] = tcg_temp_new();
7891 cpu_T
[1] = tcg_temp_new();
7892 cpu_A0
= tcg_temp_new();
7893 cpu_T3
= tcg_temp_new();
7895 cpu_tmp0
= tcg_temp_new();
7896 cpu_tmp1_i64
= tcg_temp_new_i64();
7897 cpu_tmp2_i32
= tcg_temp_new_i32();
7898 cpu_tmp3_i32
= tcg_temp_new_i32();
7899 cpu_tmp4
= tcg_temp_new();
7900 cpu_tmp5
= tcg_temp_new();
7901 cpu_ptr0
= tcg_temp_new_ptr();
7902 cpu_ptr1
= tcg_temp_new_ptr();
7904 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
7906 dc
->is_jmp
= DISAS_NEXT
;
7910 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
7912 max_insns
= CF_COUNT_MASK
;
7916 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
7917 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
7918 if (bp
->pc
== pc_ptr
&&
7919 !((bp
->flags
& BP_CPU
) && (tb
->flags
& HF_RF_MASK
))) {
7920 gen_debug(dc
, pc_ptr
- dc
->cs_base
);
7926 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
7930 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
7932 tcg_ctx
.gen_opc_pc
[lj
] = pc_ptr
;
7933 gen_opc_cc_op
[lj
] = dc
->cc_op
;
7934 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
7935 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
7937 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
7940 pc_ptr
= disas_insn(env
, dc
, pc_ptr
);
7942 /* stop translation if indicated */
7945 /* if single step mode, we generate only one instruction and
7946 generate an exception */
7947 /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
7948 the flag and abort the translation to give the irqs a
7949 change to be happen */
7950 if (dc
->tf
|| dc
->singlestep_enabled
||
7951 (flags
& HF_INHIBIT_IRQ_MASK
)) {
7952 gen_jmp_im(pc_ptr
- dc
->cs_base
);
7956 /* if too long translation, stop generation too */
7957 if (tcg_ctx
.gen_opc_ptr
>= gen_opc_end
||
7958 (pc_ptr
- pc_start
) >= (TARGET_PAGE_SIZE
- 32) ||
7959 num_insns
>= max_insns
) {
7960 gen_jmp_im(pc_ptr
- dc
->cs_base
);
7965 gen_jmp_im(pc_ptr
- dc
->cs_base
);
7970 if (tb
->cflags
& CF_LAST_IO
)
7972 gen_icount_end(tb
, num_insns
);
7973 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
7974 /* we don't forget to fill the last values */
7976 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
7979 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
7983 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
7985 qemu_log("----------------\n");
7986 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
7987 #ifdef TARGET_X86_64
7992 disas_flags
= !dc
->code32
;
7993 log_target_disas(env
, pc_start
, pc_ptr
- pc_start
, disas_flags
);
7999 tb
->size
= pc_ptr
- pc_start
;
8000 tb
->icount
= num_insns
;
8004 void gen_intermediate_code(CPUX86State
*env
, TranslationBlock
*tb
)
8006 gen_intermediate_code_internal(env
, tb
, 0);
8009 void gen_intermediate_code_pc(CPUX86State
*env
, TranslationBlock
*tb
)
8011 gen_intermediate_code_internal(env
, tb
, 1);
8014 void restore_state_to_opc(CPUX86State
*env
, TranslationBlock
*tb
, int pc_pos
)
8018 if (qemu_loglevel_mask(CPU_LOG_TB_OP
)) {
8020 qemu_log("RESTORE:\n");
8021 for(i
= 0;i
<= pc_pos
; i
++) {
8022 if (tcg_ctx
.gen_opc_instr_start
[i
]) {
8023 qemu_log("0x%04x: " TARGET_FMT_lx
"\n", i
,
8024 tcg_ctx
.gen_opc_pc
[i
]);
8027 qemu_log("pc_pos=0x%x eip=" TARGET_FMT_lx
" cs_base=%x\n",
8028 pc_pos
, tcg_ctx
.gen_opc_pc
[pc_pos
] - tb
->cs_base
,
8029 (uint32_t)tb
->cs_base
);
8032 env
->eip
= tcg_ctx
.gen_opc_pc
[pc_pos
] - tb
->cs_base
;
8033 cc_op
= gen_opc_cc_op
[pc_pos
];
8034 if (cc_op
!= CC_OP_DYNAMIC
)