4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
27 #include "disas/disas.h"
34 #define PREFIX_REPZ 0x01
35 #define PREFIX_REPNZ 0x02
36 #define PREFIX_LOCK 0x04
37 #define PREFIX_DATA 0x08
38 #define PREFIX_ADR 0x10
41 #define CODE64(s) ((s)->code64)
42 #define REX_X(s) ((s)->rex_x)
43 #define REX_B(s) ((s)->rex_b)
50 //#define MACRO_TEST 1
52 /* global register indexes */
53 static TCGv_ptr cpu_env
;
54 static TCGv cpu_A0
, cpu_cc_src
, cpu_cc_dst
;
55 static TCGv_i32 cpu_cc_op
;
56 static TCGv cpu_regs
[CPU_NB_REGS
];
58 static TCGv cpu_T
[2], cpu_T3
;
59 /* local register indexes (only used inside old micro ops) */
60 static TCGv cpu_tmp0
, cpu_tmp4
;
61 static TCGv_ptr cpu_ptr0
, cpu_ptr1
;
62 static TCGv_i32 cpu_tmp2_i32
, cpu_tmp3_i32
;
63 static TCGv_i64 cpu_tmp1_i64
;
66 static uint8_t gen_opc_cc_op
[OPC_BUF_SIZE
];
68 #include "exec/gen-icount.h"
71 static int x86_64_hregs
;
74 typedef struct DisasContext
{
75 /* current insn context */
76 int override
; /* -1 if no override */
79 target_ulong pc
; /* pc = eip + cs_base */
80 int is_jmp
; /* 1 = means jump (stop translation), 2 means CPU
81 static state change (stop translation) */
82 /* current block context */
83 target_ulong cs_base
; /* base of CS segment */
84 int pe
; /* protected mode */
85 int code32
; /* 32 bit code segment */
87 int lma
; /* long mode active */
88 int code64
; /* 64 bit code segment */
91 int ss32
; /* 32 bit stack segment */
92 CCOp cc_op
; /* current CC operation */
94 int addseg
; /* non zero if either DS/ES/SS have a non zero base */
95 int f_st
; /* currently unused */
96 int vm86
; /* vm86 mode */
99 int tf
; /* TF cpu flag */
100 int singlestep_enabled
; /* "hardware" single step enabled */
101 int jmp_opt
; /* use direct block chaining for direct jumps */
102 int mem_index
; /* select memory access functions */
103 uint64_t flags
; /* all execution flags */
104 struct TranslationBlock
*tb
;
105 int popl_esp_hack
; /* for correct popl with esp base handling */
106 int rip_offset
; /* only used in x86_64, but left for simplicity */
108 int cpuid_ext_features
;
109 int cpuid_ext2_features
;
110 int cpuid_ext3_features
;
111 int cpuid_7_0_ebx_features
;
114 static void gen_eob(DisasContext
*s
);
115 static void gen_jmp(DisasContext
*s
, target_ulong eip
);
116 static void gen_jmp_tb(DisasContext
*s
, target_ulong eip
, int tb_num
);
118 /* i386 arith/logic operations */
138 OP_SHL1
, /* undocumented */
162 /* I386 int registers */
163 OR_EAX
, /* MUST be even numbered */
172 OR_TMP0
= 16, /* temporary operand register */
174 OR_A0
, /* temporary register used when doing address evaluation */
182 /* Bit set if the global variable is live after setting CC_OP to X. */
183 static const uint8_t cc_op_live
[CC_OP_NB
] = {
184 [CC_OP_DYNAMIC
] = USES_CC_DST
| USES_CC_SRC
,
185 [CC_OP_EFLAGS
] = USES_CC_SRC
,
186 [CC_OP_MULB
... CC_OP_MULQ
] = USES_CC_DST
| USES_CC_SRC
,
187 [CC_OP_ADDB
... CC_OP_ADDQ
] = USES_CC_DST
| USES_CC_SRC
,
188 [CC_OP_ADCB
... CC_OP_ADCQ
] = USES_CC_DST
| USES_CC_SRC
,
189 [CC_OP_SUBB
... CC_OP_SUBQ
] = USES_CC_DST
| USES_CC_SRC
,
190 [CC_OP_SBBB
... CC_OP_SBBQ
] = USES_CC_DST
| USES_CC_SRC
,
191 [CC_OP_LOGICB
... CC_OP_LOGICQ
] = USES_CC_DST
,
192 [CC_OP_INCB
... CC_OP_INCQ
] = USES_CC_DST
| USES_CC_SRC
,
193 [CC_OP_DECB
... CC_OP_DECQ
] = USES_CC_DST
| USES_CC_SRC
,
194 [CC_OP_SHLB
... CC_OP_SHLQ
] = USES_CC_DST
| USES_CC_SRC
,
195 [CC_OP_SARB
... CC_OP_SARQ
] = USES_CC_DST
| USES_CC_SRC
,
198 static void set_cc_op(DisasContext
*s
, CCOp op
)
202 if (s
->cc_op
== op
) {
206 /* Discard CC computation that will no longer be used. */
207 dead
= cc_op_live
[s
->cc_op
] & ~cc_op_live
[op
];
208 if (dead
& USES_CC_DST
) {
209 tcg_gen_discard_tl(cpu_cc_dst
);
211 if (dead
& USES_CC_SRC
) {
212 tcg_gen_discard_tl(cpu_cc_src
);
216 /* The DYNAMIC setting is translator only, and should never be
217 stored. Thus we always consider it clean. */
218 s
->cc_op_dirty
= (op
!= CC_OP_DYNAMIC
);
221 static void gen_update_cc_op(DisasContext
*s
)
223 if (s
->cc_op_dirty
) {
224 tcg_gen_movi_i32(cpu_cc_op
, s
->cc_op
);
225 s
->cc_op_dirty
= false;
229 static inline void gen_op_movl_T0_0(void)
231 tcg_gen_movi_tl(cpu_T
[0], 0);
234 static inline void gen_op_movl_T0_im(int32_t val
)
236 tcg_gen_movi_tl(cpu_T
[0], val
);
239 static inline void gen_op_movl_T0_imu(uint32_t val
)
241 tcg_gen_movi_tl(cpu_T
[0], val
);
244 static inline void gen_op_movl_T1_im(int32_t val
)
246 tcg_gen_movi_tl(cpu_T
[1], val
);
249 static inline void gen_op_movl_T1_imu(uint32_t val
)
251 tcg_gen_movi_tl(cpu_T
[1], val
);
254 static inline void gen_op_movl_A0_im(uint32_t val
)
256 tcg_gen_movi_tl(cpu_A0
, val
);
260 static inline void gen_op_movq_A0_im(int64_t val
)
262 tcg_gen_movi_tl(cpu_A0
, val
);
266 static inline void gen_movtl_T0_im(target_ulong val
)
268 tcg_gen_movi_tl(cpu_T
[0], val
);
271 static inline void gen_movtl_T1_im(target_ulong val
)
273 tcg_gen_movi_tl(cpu_T
[1], val
);
276 static inline void gen_op_andl_T0_ffff(void)
278 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 0xffff);
281 static inline void gen_op_andl_T0_im(uint32_t val
)
283 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], val
);
286 static inline void gen_op_movl_T0_T1(void)
288 tcg_gen_mov_tl(cpu_T
[0], cpu_T
[1]);
291 static inline void gen_op_andl_A0_ffff(void)
293 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffff);
298 #define NB_OP_SIZES 4
300 #else /* !TARGET_X86_64 */
302 #define NB_OP_SIZES 3
304 #endif /* !TARGET_X86_64 */
306 #if defined(HOST_WORDS_BIGENDIAN)
307 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
308 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
309 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
310 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
311 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
313 #define REG_B_OFFSET 0
314 #define REG_H_OFFSET 1
315 #define REG_W_OFFSET 0
316 #define REG_L_OFFSET 0
317 #define REG_LH_OFFSET 4
320 /* In instruction encodings for byte register accesses the
321 * register number usually indicates "low 8 bits of register N";
322 * however there are some special cases where N 4..7 indicates
323 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
324 * true for this special case, false otherwise.
326 static inline bool byte_reg_is_xH(int reg
)
332 if (reg
>= 8 || x86_64_hregs
) {
339 static inline void gen_op_mov_reg_v(int ot
, int reg
, TCGv t0
)
343 if (!byte_reg_is_xH(reg
)) {
344 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], t0
, 0, 8);
346 tcg_gen_deposit_tl(cpu_regs
[reg
- 4], cpu_regs
[reg
- 4], t0
, 8, 8);
350 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], t0
, 0, 16);
352 default: /* XXX this shouldn't be reached; abort? */
354 /* For x86_64, this sets the higher half of register to zero.
355 For i386, this is equivalent to a mov. */
356 tcg_gen_ext32u_tl(cpu_regs
[reg
], t0
);
360 tcg_gen_mov_tl(cpu_regs
[reg
], t0
);
366 static inline void gen_op_mov_reg_T0(int ot
, int reg
)
368 gen_op_mov_reg_v(ot
, reg
, cpu_T
[0]);
371 static inline void gen_op_mov_reg_T1(int ot
, int reg
)
373 gen_op_mov_reg_v(ot
, reg
, cpu_T
[1]);
376 static inline void gen_op_mov_reg_A0(int size
, int reg
)
380 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], cpu_A0
, 0, 16);
382 default: /* XXX this shouldn't be reached; abort? */
384 /* For x86_64, this sets the higher half of register to zero.
385 For i386, this is equivalent to a mov. */
386 tcg_gen_ext32u_tl(cpu_regs
[reg
], cpu_A0
);
390 tcg_gen_mov_tl(cpu_regs
[reg
], cpu_A0
);
396 static inline void gen_op_mov_v_reg(int ot
, TCGv t0
, int reg
)
398 if (ot
== OT_BYTE
&& byte_reg_is_xH(reg
)) {
399 tcg_gen_shri_tl(t0
, cpu_regs
[reg
- 4], 8);
400 tcg_gen_ext8u_tl(t0
, t0
);
402 tcg_gen_mov_tl(t0
, cpu_regs
[reg
]);
406 static inline void gen_op_mov_TN_reg(int ot
, int t_index
, int reg
)
408 gen_op_mov_v_reg(ot
, cpu_T
[t_index
], reg
);
411 static inline void gen_op_movl_A0_reg(int reg
)
413 tcg_gen_mov_tl(cpu_A0
, cpu_regs
[reg
]);
416 static inline void gen_op_addl_A0_im(int32_t val
)
418 tcg_gen_addi_tl(cpu_A0
, cpu_A0
, val
);
420 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffffffff);
425 static inline void gen_op_addq_A0_im(int64_t val
)
427 tcg_gen_addi_tl(cpu_A0
, cpu_A0
, val
);
431 static void gen_add_A0_im(DisasContext
*s
, int val
)
435 gen_op_addq_A0_im(val
);
438 gen_op_addl_A0_im(val
);
441 static inline void gen_op_addl_T0_T1(void)
443 tcg_gen_add_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
446 static inline void gen_op_jmp_T0(void)
448 tcg_gen_st_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, eip
));
451 static inline void gen_op_add_reg_im(int size
, int reg
, int32_t val
)
455 tcg_gen_addi_tl(cpu_tmp0
, cpu_regs
[reg
], val
);
456 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], cpu_tmp0
, 0, 16);
459 tcg_gen_addi_tl(cpu_tmp0
, cpu_regs
[reg
], val
);
460 /* For x86_64, this sets the higher half of register to zero.
461 For i386, this is equivalent to a nop. */
462 tcg_gen_ext32u_tl(cpu_tmp0
, cpu_tmp0
);
463 tcg_gen_mov_tl(cpu_regs
[reg
], cpu_tmp0
);
467 tcg_gen_addi_tl(cpu_regs
[reg
], cpu_regs
[reg
], val
);
473 static inline void gen_op_add_reg_T0(int size
, int reg
)
477 tcg_gen_add_tl(cpu_tmp0
, cpu_regs
[reg
], cpu_T
[0]);
478 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], cpu_tmp0
, 0, 16);
481 tcg_gen_add_tl(cpu_tmp0
, cpu_regs
[reg
], cpu_T
[0]);
482 /* For x86_64, this sets the higher half of register to zero.
483 For i386, this is equivalent to a nop. */
484 tcg_gen_ext32u_tl(cpu_tmp0
, cpu_tmp0
);
485 tcg_gen_mov_tl(cpu_regs
[reg
], cpu_tmp0
);
489 tcg_gen_add_tl(cpu_regs
[reg
], cpu_regs
[reg
], cpu_T
[0]);
495 static inline void gen_op_addl_A0_reg_sN(int shift
, int reg
)
497 tcg_gen_mov_tl(cpu_tmp0
, cpu_regs
[reg
]);
499 tcg_gen_shli_tl(cpu_tmp0
, cpu_tmp0
, shift
);
500 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
501 /* For x86_64, this sets the higher half of register to zero.
502 For i386, this is equivalent to a nop. */
503 tcg_gen_ext32u_tl(cpu_A0
, cpu_A0
);
506 static inline void gen_op_movl_A0_seg(int reg
)
508 tcg_gen_ld32u_tl(cpu_A0
, cpu_env
, offsetof(CPUX86State
, segs
[reg
].base
) + REG_L_OFFSET
);
511 static inline void gen_op_addl_A0_seg(DisasContext
*s
, int reg
)
513 tcg_gen_ld_tl(cpu_tmp0
, cpu_env
, offsetof(CPUX86State
, segs
[reg
].base
));
516 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffffffff);
517 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
519 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
520 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffffffff);
523 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
528 static inline void gen_op_movq_A0_seg(int reg
)
530 tcg_gen_ld_tl(cpu_A0
, cpu_env
, offsetof(CPUX86State
, segs
[reg
].base
));
533 static inline void gen_op_addq_A0_seg(int reg
)
535 tcg_gen_ld_tl(cpu_tmp0
, cpu_env
, offsetof(CPUX86State
, segs
[reg
].base
));
536 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
539 static inline void gen_op_movq_A0_reg(int reg
)
541 tcg_gen_mov_tl(cpu_A0
, cpu_regs
[reg
]);
544 static inline void gen_op_addq_A0_reg_sN(int shift
, int reg
)
546 tcg_gen_mov_tl(cpu_tmp0
, cpu_regs
[reg
]);
548 tcg_gen_shli_tl(cpu_tmp0
, cpu_tmp0
, shift
);
549 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
553 static inline void gen_op_lds_T0_A0(int idx
)
555 int mem_index
= (idx
>> 2) - 1;
558 tcg_gen_qemu_ld8s(cpu_T
[0], cpu_A0
, mem_index
);
561 tcg_gen_qemu_ld16s(cpu_T
[0], cpu_A0
, mem_index
);
565 tcg_gen_qemu_ld32s(cpu_T
[0], cpu_A0
, mem_index
);
570 static inline void gen_op_ld_v(int idx
, TCGv t0
, TCGv a0
)
572 int mem_index
= (idx
>> 2) - 1;
575 tcg_gen_qemu_ld8u(t0
, a0
, mem_index
);
578 tcg_gen_qemu_ld16u(t0
, a0
, mem_index
);
581 tcg_gen_qemu_ld32u(t0
, a0
, mem_index
);
585 /* Should never happen on 32-bit targets. */
587 tcg_gen_qemu_ld64(t0
, a0
, mem_index
);
593 /* XXX: always use ldu or lds */
594 static inline void gen_op_ld_T0_A0(int idx
)
596 gen_op_ld_v(idx
, cpu_T
[0], cpu_A0
);
599 static inline void gen_op_ldu_T0_A0(int idx
)
601 gen_op_ld_v(idx
, cpu_T
[0], cpu_A0
);
604 static inline void gen_op_ld_T1_A0(int idx
)
606 gen_op_ld_v(idx
, cpu_T
[1], cpu_A0
);
609 static inline void gen_op_st_v(int idx
, TCGv t0
, TCGv a0
)
611 int mem_index
= (idx
>> 2) - 1;
614 tcg_gen_qemu_st8(t0
, a0
, mem_index
);
617 tcg_gen_qemu_st16(t0
, a0
, mem_index
);
620 tcg_gen_qemu_st32(t0
, a0
, mem_index
);
624 /* Should never happen on 32-bit targets. */
626 tcg_gen_qemu_st64(t0
, a0
, mem_index
);
632 static inline void gen_op_st_T0_A0(int idx
)
634 gen_op_st_v(idx
, cpu_T
[0], cpu_A0
);
637 static inline void gen_op_st_T1_A0(int idx
)
639 gen_op_st_v(idx
, cpu_T
[1], cpu_A0
);
642 static inline void gen_jmp_im(target_ulong pc
)
644 tcg_gen_movi_tl(cpu_tmp0
, pc
);
645 tcg_gen_st_tl(cpu_tmp0
, cpu_env
, offsetof(CPUX86State
, eip
));
648 static inline void gen_string_movl_A0_ESI(DisasContext
*s
)
652 override
= s
->override
;
656 gen_op_movq_A0_seg(override
);
657 gen_op_addq_A0_reg_sN(0, R_ESI
);
659 gen_op_movq_A0_reg(R_ESI
);
665 if (s
->addseg
&& override
< 0)
668 gen_op_movl_A0_seg(override
);
669 gen_op_addl_A0_reg_sN(0, R_ESI
);
671 gen_op_movl_A0_reg(R_ESI
);
674 /* 16 address, always override */
677 gen_op_movl_A0_reg(R_ESI
);
678 gen_op_andl_A0_ffff();
679 gen_op_addl_A0_seg(s
, override
);
683 static inline void gen_string_movl_A0_EDI(DisasContext
*s
)
687 gen_op_movq_A0_reg(R_EDI
);
692 gen_op_movl_A0_seg(R_ES
);
693 gen_op_addl_A0_reg_sN(0, R_EDI
);
695 gen_op_movl_A0_reg(R_EDI
);
698 gen_op_movl_A0_reg(R_EDI
);
699 gen_op_andl_A0_ffff();
700 gen_op_addl_A0_seg(s
, R_ES
);
704 static inline void gen_op_movl_T0_Dshift(int ot
)
706 tcg_gen_ld32s_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, df
));
707 tcg_gen_shli_tl(cpu_T
[0], cpu_T
[0], ot
);
710 static TCGv
gen_ext_tl(TCGv dst
, TCGv src
, int size
, bool sign
)
715 tcg_gen_ext8s_tl(dst
, src
);
717 tcg_gen_ext8u_tl(dst
, src
);
722 tcg_gen_ext16s_tl(dst
, src
);
724 tcg_gen_ext16u_tl(dst
, src
);
730 tcg_gen_ext32s_tl(dst
, src
);
732 tcg_gen_ext32u_tl(dst
, src
);
741 static void gen_extu(int ot
, TCGv reg
)
743 gen_ext_tl(reg
, reg
, ot
, false);
746 static void gen_exts(int ot
, TCGv reg
)
748 gen_ext_tl(reg
, reg
, ot
, true);
751 static inline void gen_op_jnz_ecx(int size
, int label1
)
753 tcg_gen_mov_tl(cpu_tmp0
, cpu_regs
[R_ECX
]);
754 gen_extu(size
+ 1, cpu_tmp0
);
755 tcg_gen_brcondi_tl(TCG_COND_NE
, cpu_tmp0
, 0, label1
);
758 static inline void gen_op_jz_ecx(int size
, int label1
)
760 tcg_gen_mov_tl(cpu_tmp0
, cpu_regs
[R_ECX
]);
761 gen_extu(size
+ 1, cpu_tmp0
);
762 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_tmp0
, 0, label1
);
765 static void gen_helper_in_func(int ot
, TCGv v
, TCGv_i32 n
)
769 gen_helper_inb(v
, n
);
772 gen_helper_inw(v
, n
);
775 gen_helper_inl(v
, n
);
780 static void gen_helper_out_func(int ot
, TCGv_i32 v
, TCGv_i32 n
)
784 gen_helper_outb(v
, n
);
787 gen_helper_outw(v
, n
);
790 gen_helper_outl(v
, n
);
795 static void gen_check_io(DisasContext
*s
, int ot
, target_ulong cur_eip
,
799 target_ulong next_eip
;
802 if (s
->pe
&& (s
->cpl
> s
->iopl
|| s
->vm86
)) {
806 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
809 gen_helper_check_iob(cpu_env
, cpu_tmp2_i32
);
812 gen_helper_check_iow(cpu_env
, cpu_tmp2_i32
);
815 gen_helper_check_iol(cpu_env
, cpu_tmp2_i32
);
819 if(s
->flags
& HF_SVMI_MASK
) {
824 svm_flags
|= (1 << (4 + ot
));
825 next_eip
= s
->pc
- s
->cs_base
;
826 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
827 gen_helper_svm_check_io(cpu_env
, cpu_tmp2_i32
,
828 tcg_const_i32(svm_flags
),
829 tcg_const_i32(next_eip
- cur_eip
));
833 static inline void gen_movs(DisasContext
*s
, int ot
)
835 gen_string_movl_A0_ESI(s
);
836 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
837 gen_string_movl_A0_EDI(s
);
838 gen_op_st_T0_A0(ot
+ s
->mem_index
);
839 gen_op_movl_T0_Dshift(ot
);
840 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
841 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
844 static void gen_op_update1_cc(void)
846 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
849 static void gen_op_update2_cc(void)
851 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[1]);
852 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
855 static inline void gen_op_cmpl_T0_T1_cc(void)
857 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[1]);
858 tcg_gen_sub_tl(cpu_cc_dst
, cpu_T
[0], cpu_T
[1]);
861 static inline void gen_op_testl_T0_T1_cc(void)
863 tcg_gen_and_tl(cpu_cc_dst
, cpu_T
[0], cpu_T
[1]);
866 static void gen_op_update_neg_cc(void)
868 tcg_gen_neg_tl(cpu_cc_src
, cpu_T
[0]);
869 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
872 /* compute all eflags to cc_src */
873 static void gen_compute_eflags(DisasContext
*s
)
875 if (s
->cc_op
== CC_OP_EFLAGS
) {
879 gen_helper_cc_compute_all(cpu_tmp2_i32
, cpu_env
, cpu_cc_op
);
880 set_cc_op(s
, CC_OP_EFLAGS
);
881 tcg_gen_extu_i32_tl(cpu_cc_src
, cpu_tmp2_i32
);
884 /* compute eflags.C to reg */
885 static void gen_compute_eflags_c(DisasContext
*s
, TCGv reg
, bool inv
)
891 case CC_OP_SUBB
... CC_OP_SUBQ
:
892 /* (DATA_TYPE)(CC_DST + CC_SRC) < (DATA_TYPE)CC_SRC */
893 size
= s
->cc_op
- CC_OP_SUBB
;
894 t1
= gen_ext_tl(cpu_tmp0
, cpu_cc_src
, size
, false);
895 /* If no temporary was used, be careful not to alias t1 and t0. */
896 t0
= TCGV_EQUAL(t1
, cpu_cc_src
) ? cpu_tmp0
: reg
;
897 tcg_gen_add_tl(t0
, cpu_cc_dst
, cpu_cc_src
);
901 case CC_OP_ADDB
... CC_OP_ADDQ
:
902 /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
903 size
= s
->cc_op
- CC_OP_ADDB
;
904 t1
= gen_ext_tl(cpu_tmp0
, cpu_cc_src
, size
, false);
905 t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, false);
907 tcg_gen_setcond_tl(inv
? TCG_COND_GEU
: TCG_COND_LTU
, reg
, t0
, t1
);
911 case CC_OP_SBBB
... CC_OP_SBBQ
:
912 /* (DATA_TYPE)(CC_DST + CC_SRC + 1) <= (DATA_TYPE)CC_SRC */
913 size
= s
->cc_op
- CC_OP_SBBB
;
914 t1
= gen_ext_tl(cpu_tmp0
, cpu_cc_src
, size
, false);
915 if (TCGV_EQUAL(t1
, reg
) && TCGV_EQUAL(reg
, cpu_cc_src
)) {
916 tcg_gen_mov_tl(cpu_tmp0
, cpu_cc_src
);
920 tcg_gen_add_tl(reg
, cpu_cc_dst
, cpu_cc_src
);
921 tcg_gen_addi_tl(reg
, reg
, 1);
926 case CC_OP_ADCB
... CC_OP_ADCQ
:
927 /* (DATA_TYPE)CC_DST <= (DATA_TYPE)CC_SRC */
928 size
= s
->cc_op
- CC_OP_ADCB
;
929 t1
= gen_ext_tl(cpu_tmp0
, cpu_cc_src
, size
, false);
930 t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, false);
932 tcg_gen_setcond_tl(inv
? TCG_COND_GTU
: TCG_COND_LEU
, reg
, t0
, t1
);
936 case CC_OP_LOGICB
... CC_OP_LOGICQ
:
937 tcg_gen_movi_tl(reg
, 0);
940 case CC_OP_INCB
... CC_OP_INCQ
:
941 case CC_OP_DECB
... CC_OP_DECQ
:
943 tcg_gen_xori_tl(reg
, cpu_cc_src
, 1);
945 tcg_gen_mov_tl(reg
, cpu_cc_src
);
950 case CC_OP_SHLB
... CC_OP_SHLQ
:
951 /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
952 size
= s
->cc_op
- CC_OP_SHLB
;
953 tcg_gen_shri_tl(reg
, cpu_cc_src
, (8 << size
) - 1);
954 tcg_gen_andi_tl(reg
, reg
, 1);
957 case CC_OP_MULB
... CC_OP_MULQ
:
958 tcg_gen_setcondi_tl(inv
? TCG_COND_EQ
: TCG_COND_NE
,
964 case CC_OP_SARB
... CC_OP_SARQ
:
966 tcg_gen_andi_tl(reg
, cpu_cc_src
, 1);
970 /* The need to compute only C from CC_OP_DYNAMIC is important
971 in efficiently implementing e.g. INC at the start of a TB. */
973 gen_helper_cc_compute_c(cpu_tmp2_i32
, cpu_env
, cpu_cc_op
);
974 tcg_gen_extu_i32_tl(reg
, cpu_tmp2_i32
);
978 tcg_gen_xori_tl(reg
, reg
, 1);
982 /* compute eflags.P to reg */
983 static void gen_compute_eflags_p(DisasContext
*s
, TCGv reg
)
985 gen_compute_eflags(s
);
986 tcg_gen_shri_tl(reg
, cpu_cc_src
, 2);
987 tcg_gen_andi_tl(reg
, reg
, 1);
990 /* compute eflags.S to reg */
991 static void gen_compute_eflags_s(DisasContext
*s
, TCGv reg
, bool inv
)
995 gen_compute_eflags(s
);
998 tcg_gen_shri_tl(reg
, cpu_cc_src
, 7);
999 tcg_gen_andi_tl(reg
, reg
, 1);
1001 tcg_gen_xori_tl(reg
, reg
, 1);
1006 int size
= (s
->cc_op
- CC_OP_ADDB
) & 3;
1007 TCGv t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, true);
1008 tcg_gen_setcondi_tl(inv
? TCG_COND_GE
: TCG_COND_LT
, reg
, t0
, 0);
1014 /* compute eflags.O to reg */
1015 static void gen_compute_eflags_o(DisasContext
*s
, TCGv reg
)
1017 gen_compute_eflags(s
);
1018 tcg_gen_shri_tl(reg
, cpu_cc_src
, 11);
1019 tcg_gen_andi_tl(reg
, reg
, 1);
1022 /* compute eflags.Z to reg */
1023 static void gen_compute_eflags_z(DisasContext
*s
, TCGv reg
, bool inv
)
1027 gen_compute_eflags(s
);
1030 tcg_gen_shri_tl(reg
, cpu_cc_src
, 6);
1031 tcg_gen_andi_tl(reg
, reg
, 1);
1033 tcg_gen_xori_tl(reg
, reg
, 1);
1038 int size
= (s
->cc_op
- CC_OP_ADDB
) & 3;
1039 TCGv t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, false);
1040 tcg_gen_setcondi_tl(inv
? TCG_COND_NE
: TCG_COND_EQ
, reg
, t0
, 0);
1046 static void gen_setcc_slow(DisasContext
*s
, int jcc_op
, TCGv reg
, bool inv
)
1050 gen_compute_eflags_o(s
, reg
);
1053 gen_compute_eflags_c(s
, reg
, inv
);
1057 gen_compute_eflags_z(s
, reg
, inv
);
1061 gen_compute_eflags(s
);
1062 tcg_gen_andi_tl(reg
, cpu_cc_src
, CC_Z
| CC_C
);
1063 tcg_gen_setcondi_tl(inv
? TCG_COND_EQ
: TCG_COND_NE
, reg
, reg
, 0);
1066 gen_compute_eflags_s(s
, reg
, inv
);
1070 gen_compute_eflags_p(s
, reg
);
1073 gen_compute_eflags(s
);
1074 tcg_gen_shri_tl(cpu_tmp0
, cpu_cc_src
, 11); /* CC_O */
1075 tcg_gen_shri_tl(reg
, cpu_cc_src
, 7); /* CC_S */
1076 tcg_gen_xor_tl(reg
, reg
, cpu_tmp0
);
1077 tcg_gen_andi_tl(reg
, reg
, 1);
1081 gen_compute_eflags(s
);
1082 tcg_gen_shri_tl(cpu_tmp0
, cpu_cc_src
, 4); /* CC_O -> CC_S */
1083 tcg_gen_xor_tl(reg
, cpu_tmp0
, cpu_cc_src
);
1084 tcg_gen_andi_tl(reg
, reg
, CC_S
| CC_Z
);
1085 tcg_gen_setcondi_tl(inv
? TCG_COND_EQ
: TCG_COND_NE
, reg
, reg
, 0);
1089 tcg_gen_xori_tl(reg
, reg
, 1);
1093 /* return true if setcc_slow is not needed (WARNING: must be kept in
1094 sync with gen_jcc1) */
1095 static int is_fast_jcc_case(DisasContext
*s
, int b
)
1098 jcc_op
= (b
>> 1) & 7;
1100 /* we optimize the cmp/jcc case */
1105 if (jcc_op
== JCC_O
|| jcc_op
== JCC_P
)
1109 /* some jumps are easy to compute */
1134 if (jcc_op
!= JCC_Z
&& jcc_op
!= JCC_S
)
1144 /* generate a conditional jump to label 'l1' according to jump opcode
1145 value 'b'. In the fast case, T0 is guaranted not to be used. */
1146 static inline void gen_jcc1(DisasContext
*s
, int b
, int l1
)
1148 int inv
, jcc_op
, size
, cond
;
1152 jcc_op
= (b
>> 1) & 7;
1155 /* we optimize the cmp/jcc case */
1161 size
= s
->cc_op
- CC_OP_SUBB
;
1165 t0
= gen_ext_tl(cpu_tmp0
, cpu_cc_dst
, size
, false);
1166 tcg_gen_brcondi_tl(inv
? TCG_COND_NE
: TCG_COND_EQ
, t0
, 0, l1
);
1170 t0
= gen_ext_tl(cpu_tmp0
, cpu_cc_dst
, size
, true);
1171 tcg_gen_brcondi_tl(inv
? TCG_COND_GE
: TCG_COND_LT
, t0
, 0, l1
);
1175 cond
= inv
? TCG_COND_GEU
: TCG_COND_LTU
;
1178 cond
= inv
? TCG_COND_GTU
: TCG_COND_LEU
;
1180 tcg_gen_add_tl(cpu_tmp4
, cpu_cc_dst
, cpu_cc_src
);
1181 gen_extu(size
, cpu_tmp4
);
1182 t0
= gen_ext_tl(cpu_tmp0
, cpu_cc_src
, size
, false);
1183 tcg_gen_brcond_tl(cond
, cpu_tmp4
, t0
, l1
);
1187 cond
= inv
? TCG_COND_GE
: TCG_COND_LT
;
1190 cond
= inv
? TCG_COND_GT
: TCG_COND_LE
;
1192 tcg_gen_add_tl(cpu_tmp4
, cpu_cc_dst
, cpu_cc_src
);
1193 gen_exts(size
, cpu_tmp4
);
1194 t0
= gen_ext_tl(cpu_tmp0
, cpu_cc_src
, size
, true);
1195 tcg_gen_brcond_tl(cond
, cpu_tmp4
, t0
, l1
);
1203 /* some jumps are easy to compute */
1245 size
= (s
->cc_op
- CC_OP_ADDB
) & 3;
1248 size
= (s
->cc_op
- CC_OP_ADDB
) & 3;
1256 gen_setcc_slow(s
, jcc_op
, cpu_T
[0], false);
1257 tcg_gen_brcondi_tl(inv
? TCG_COND_EQ
: TCG_COND_NE
,
1263 /* XXX: does not work with gdbstub "ice" single step - not a
1265 static int gen_jz_ecx_string(DisasContext
*s
, target_ulong next_eip
)
1269 l1
= gen_new_label();
1270 l2
= gen_new_label();
1271 gen_op_jnz_ecx(s
->aflag
, l1
);
1273 gen_jmp_tb(s
, next_eip
, 1);
1278 static inline void gen_stos(DisasContext
*s
, int ot
)
1280 gen_op_mov_TN_reg(OT_LONG
, 0, R_EAX
);
1281 gen_string_movl_A0_EDI(s
);
1282 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1283 gen_op_movl_T0_Dshift(ot
);
1284 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1287 static inline void gen_lods(DisasContext
*s
, int ot
)
1289 gen_string_movl_A0_ESI(s
);
1290 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1291 gen_op_mov_reg_T0(ot
, R_EAX
);
1292 gen_op_movl_T0_Dshift(ot
);
1293 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
1296 static inline void gen_scas(DisasContext
*s
, int ot
)
1298 gen_op_mov_TN_reg(OT_LONG
, 0, R_EAX
);
1299 gen_string_movl_A0_EDI(s
);
1300 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
1301 gen_op_cmpl_T0_T1_cc();
1302 gen_op_movl_T0_Dshift(ot
);
1303 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1304 set_cc_op(s
, CC_OP_SUBB
+ ot
);
1307 static inline void gen_cmps(DisasContext
*s
, int ot
)
1309 gen_string_movl_A0_ESI(s
);
1310 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1311 gen_string_movl_A0_EDI(s
);
1312 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
1313 gen_op_cmpl_T0_T1_cc();
1314 gen_op_movl_T0_Dshift(ot
);
1315 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
1316 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1317 set_cc_op(s
, CC_OP_SUBB
+ ot
);
1320 static inline void gen_ins(DisasContext
*s
, int ot
)
1324 gen_string_movl_A0_EDI(s
);
1325 /* Note: we must do this dummy write first to be restartable in
1326 case of page fault. */
1328 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1329 gen_op_mov_TN_reg(OT_WORD
, 1, R_EDX
);
1330 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[1]);
1331 tcg_gen_andi_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 0xffff);
1332 gen_helper_in_func(ot
, cpu_T
[0], cpu_tmp2_i32
);
1333 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1334 gen_op_movl_T0_Dshift(ot
);
1335 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1340 static inline void gen_outs(DisasContext
*s
, int ot
)
1344 gen_string_movl_A0_ESI(s
);
1345 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1347 gen_op_mov_TN_reg(OT_WORD
, 1, R_EDX
);
1348 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[1]);
1349 tcg_gen_andi_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 0xffff);
1350 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T
[0]);
1351 gen_helper_out_func(ot
, cpu_tmp2_i32
, cpu_tmp3_i32
);
1353 gen_op_movl_T0_Dshift(ot
);
1354 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
1359 /* same method as Valgrind : we generate jumps to current or next
1361 #define GEN_REPZ(op) \
1362 static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1363 target_ulong cur_eip, target_ulong next_eip) \
1366 gen_update_cc_op(s); \
1367 l2 = gen_jz_ecx_string(s, next_eip); \
1368 gen_ ## op(s, ot); \
1369 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1370 /* a loop would cause two single step exceptions if ECX = 1 \
1371 before rep string_insn */ \
1373 gen_op_jz_ecx(s->aflag, l2); \
1374 gen_jmp(s, cur_eip); \
1377 #define GEN_REPZ2(op) \
1378 static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1379 target_ulong cur_eip, \
1380 target_ulong next_eip, \
1384 gen_update_cc_op(s); \
1385 l2 = gen_jz_ecx_string(s, next_eip); \
1386 gen_ ## op(s, ot); \
1387 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1388 gen_update_cc_op(s); \
1389 gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2); \
1391 gen_op_jz_ecx(s->aflag, l2); \
1392 gen_jmp(s, cur_eip); \
1393 set_cc_op(s, CC_OP_DYNAMIC); \
1404 static void gen_helper_fp_arith_ST0_FT0(int op
)
1408 gen_helper_fadd_ST0_FT0(cpu_env
);
1411 gen_helper_fmul_ST0_FT0(cpu_env
);
1414 gen_helper_fcom_ST0_FT0(cpu_env
);
1417 gen_helper_fcom_ST0_FT0(cpu_env
);
1420 gen_helper_fsub_ST0_FT0(cpu_env
);
1423 gen_helper_fsubr_ST0_FT0(cpu_env
);
1426 gen_helper_fdiv_ST0_FT0(cpu_env
);
1429 gen_helper_fdivr_ST0_FT0(cpu_env
);
1434 /* NOTE the exception in "r" op ordering */
1435 static void gen_helper_fp_arith_STN_ST0(int op
, int opreg
)
1437 TCGv_i32 tmp
= tcg_const_i32(opreg
);
1440 gen_helper_fadd_STN_ST0(cpu_env
, tmp
);
1443 gen_helper_fmul_STN_ST0(cpu_env
, tmp
);
1446 gen_helper_fsubr_STN_ST0(cpu_env
, tmp
);
1449 gen_helper_fsub_STN_ST0(cpu_env
, tmp
);
1452 gen_helper_fdivr_STN_ST0(cpu_env
, tmp
);
1455 gen_helper_fdiv_STN_ST0(cpu_env
, tmp
);
1460 /* if d == OR_TMP0, it means memory operand (address in A0) */
1461 static void gen_op(DisasContext
*s1
, int op
, int ot
, int d
)
1464 gen_op_mov_TN_reg(ot
, 0, d
);
1466 gen_op_ld_T0_A0(ot
+ s1
->mem_index
);
1470 gen_compute_eflags_c(s1
, cpu_tmp4
, false);
1471 tcg_gen_add_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1472 tcg_gen_add_tl(cpu_T
[0], cpu_T
[0], cpu_tmp4
);
1474 gen_op_mov_reg_T0(ot
, d
);
1476 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1477 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[1]);
1478 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
1479 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_tmp4
);
1480 tcg_gen_shli_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 2);
1481 tcg_gen_addi_i32(cpu_cc_op
, cpu_tmp2_i32
, CC_OP_ADDB
+ ot
);
1482 set_cc_op(s1
, CC_OP_DYNAMIC
);
1485 gen_compute_eflags_c(s1
, cpu_tmp4
, false);
1486 tcg_gen_sub_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1487 tcg_gen_sub_tl(cpu_T
[0], cpu_T
[0], cpu_tmp4
);
1489 gen_op_mov_reg_T0(ot
, d
);
1491 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1492 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[1]);
1493 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
1494 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_tmp4
);
1495 tcg_gen_shli_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 2);
1496 tcg_gen_addi_i32(cpu_cc_op
, cpu_tmp2_i32
, CC_OP_SUBB
+ ot
);
1497 set_cc_op(s1
, CC_OP_DYNAMIC
);
1500 gen_op_addl_T0_T1();
1502 gen_op_mov_reg_T0(ot
, d
);
1504 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1505 gen_op_update2_cc();
1506 set_cc_op(s1
, CC_OP_ADDB
+ ot
);
1509 tcg_gen_sub_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1511 gen_op_mov_reg_T0(ot
, d
);
1513 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1514 gen_op_update2_cc();
1515 set_cc_op(s1
, CC_OP_SUBB
+ ot
);
1519 tcg_gen_and_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1521 gen_op_mov_reg_T0(ot
, d
);
1523 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1524 gen_op_update1_cc();
1525 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1528 tcg_gen_or_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1530 gen_op_mov_reg_T0(ot
, d
);
1532 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1533 gen_op_update1_cc();
1534 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1537 tcg_gen_xor_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1539 gen_op_mov_reg_T0(ot
, d
);
1541 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1542 gen_op_update1_cc();
1543 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1546 gen_op_cmpl_T0_T1_cc();
1547 set_cc_op(s1
, CC_OP_SUBB
+ ot
);
1552 /* if d == OR_TMP0, it means memory operand (address in A0) */
1553 static void gen_inc(DisasContext
*s1
, int ot
, int d
, int c
)
1556 gen_op_mov_TN_reg(ot
, 0, d
);
1558 gen_op_ld_T0_A0(ot
+ s1
->mem_index
);
1559 gen_compute_eflags_c(s1
, cpu_cc_src
, false);
1561 tcg_gen_addi_tl(cpu_T
[0], cpu_T
[0], 1);
1562 set_cc_op(s1
, CC_OP_INCB
+ ot
);
1564 tcg_gen_addi_tl(cpu_T
[0], cpu_T
[0], -1);
1565 set_cc_op(s1
, CC_OP_DECB
+ ot
);
1568 gen_op_mov_reg_T0(ot
, d
);
1570 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1571 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
1574 static void gen_shift_rm_T1(DisasContext
*s
, int ot
, int op1
,
1575 int is_right
, int is_arith
)
1581 if (ot
== OT_QUAD
) {
1588 if (op1
== OR_TMP0
) {
1589 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1591 gen_op_mov_TN_reg(ot
, 0, op1
);
1594 t0
= tcg_temp_local_new();
1595 t1
= tcg_temp_local_new();
1596 t2
= tcg_temp_local_new();
1598 tcg_gen_andi_tl(t2
, cpu_T
[1], mask
);
1602 gen_exts(ot
, cpu_T
[0]);
1603 tcg_gen_mov_tl(t0
, cpu_T
[0]);
1604 tcg_gen_sar_tl(cpu_T
[0], cpu_T
[0], t2
);
1606 gen_extu(ot
, cpu_T
[0]);
1607 tcg_gen_mov_tl(t0
, cpu_T
[0]);
1608 tcg_gen_shr_tl(cpu_T
[0], cpu_T
[0], t2
);
1611 tcg_gen_mov_tl(t0
, cpu_T
[0]);
1612 tcg_gen_shl_tl(cpu_T
[0], cpu_T
[0], t2
);
1616 if (op1
== OR_TMP0
) {
1617 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1619 gen_op_mov_reg_T0(ot
, op1
);
1623 gen_update_cc_op(s
);
1625 tcg_gen_mov_tl(t1
, cpu_T
[0]);
1627 shift_label
= gen_new_label();
1628 tcg_gen_brcondi_tl(TCG_COND_EQ
, t2
, 0, shift_label
);
1630 tcg_gen_addi_tl(t2
, t2
, -1);
1631 tcg_gen_mov_tl(cpu_cc_dst
, t1
);
1635 tcg_gen_sar_tl(cpu_cc_src
, t0
, t2
);
1637 tcg_gen_shr_tl(cpu_cc_src
, t0
, t2
);
1640 tcg_gen_shl_tl(cpu_cc_src
, t0
, t2
);
1644 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SARB
+ ot
);
1646 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SHLB
+ ot
);
1649 gen_set_label(shift_label
);
1650 set_cc_op(s
, CC_OP_DYNAMIC
); /* cannot predict flags after */
1657 static void gen_shift_rm_im(DisasContext
*s
, int ot
, int op1
, int op2
,
1658 int is_right
, int is_arith
)
1669 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1671 gen_op_mov_TN_reg(ot
, 0, op1
);
1677 gen_exts(ot
, cpu_T
[0]);
1678 tcg_gen_sari_tl(cpu_tmp4
, cpu_T
[0], op2
- 1);
1679 tcg_gen_sari_tl(cpu_T
[0], cpu_T
[0], op2
);
1681 gen_extu(ot
, cpu_T
[0]);
1682 tcg_gen_shri_tl(cpu_tmp4
, cpu_T
[0], op2
- 1);
1683 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], op2
);
1686 tcg_gen_shli_tl(cpu_tmp4
, cpu_T
[0], op2
- 1);
1687 tcg_gen_shli_tl(cpu_T
[0], cpu_T
[0], op2
);
1693 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1695 gen_op_mov_reg_T0(ot
, op1
);
1697 /* update eflags if non zero shift */
1699 tcg_gen_mov_tl(cpu_cc_src
, cpu_tmp4
);
1700 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
1701 set_cc_op(s
, (is_right
? CC_OP_SARB
: CC_OP_SHLB
) + ot
);
1705 static inline void tcg_gen_lshift(TCGv ret
, TCGv arg1
, target_long arg2
)
1708 tcg_gen_shli_tl(ret
, arg1
, arg2
);
1710 tcg_gen_shri_tl(ret
, arg1
, -arg2
);
1713 static void gen_rot_rm_T1(DisasContext
*s
, int ot
, int op1
,
1717 int label1
, label2
, data_bits
;
1718 TCGv t0
, t1
, t2
, a0
;
1720 /* XXX: inefficient, but we must use local temps */
1721 t0
= tcg_temp_local_new();
1722 t1
= tcg_temp_local_new();
1723 t2
= tcg_temp_local_new();
1724 a0
= tcg_temp_local_new();
1732 if (op1
== OR_TMP0
) {
1733 tcg_gen_mov_tl(a0
, cpu_A0
);
1734 gen_op_ld_v(ot
+ s
->mem_index
, t0
, a0
);
1736 gen_op_mov_v_reg(ot
, t0
, op1
);
1739 tcg_gen_mov_tl(t1
, cpu_T
[1]);
1741 tcg_gen_andi_tl(t1
, t1
, mask
);
1743 /* Must test zero case to avoid using undefined behaviour in TCG
1745 label1
= gen_new_label();
1746 tcg_gen_brcondi_tl(TCG_COND_EQ
, t1
, 0, label1
);
1749 tcg_gen_andi_tl(cpu_tmp0
, t1
, (1 << (3 + ot
)) - 1);
1751 tcg_gen_mov_tl(cpu_tmp0
, t1
);
1754 tcg_gen_mov_tl(t2
, t0
);
1756 data_bits
= 8 << ot
;
1757 /* XXX: rely on behaviour of shifts when operand 2 overflows (XXX:
1758 fix TCG definition) */
1760 tcg_gen_shr_tl(cpu_tmp4
, t0
, cpu_tmp0
);
1761 tcg_gen_subfi_tl(cpu_tmp0
, data_bits
, cpu_tmp0
);
1762 tcg_gen_shl_tl(t0
, t0
, cpu_tmp0
);
1764 tcg_gen_shl_tl(cpu_tmp4
, t0
, cpu_tmp0
);
1765 tcg_gen_subfi_tl(cpu_tmp0
, data_bits
, cpu_tmp0
);
1766 tcg_gen_shr_tl(t0
, t0
, cpu_tmp0
);
1768 tcg_gen_or_tl(t0
, t0
, cpu_tmp4
);
1770 gen_set_label(label1
);
1772 if (op1
== OR_TMP0
) {
1773 gen_op_st_v(ot
+ s
->mem_index
, t0
, a0
);
1775 gen_op_mov_reg_v(ot
, op1
, t0
);
1778 /* update eflags. It is needed anyway most of the time, do it always. */
1779 gen_compute_eflags(s
);
1780 assert(s
->cc_op
== CC_OP_EFLAGS
);
1782 label2
= gen_new_label();
1783 tcg_gen_brcondi_tl(TCG_COND_EQ
, t1
, 0, label2
);
1785 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~(CC_O
| CC_C
));
1786 tcg_gen_xor_tl(cpu_tmp0
, t2
, t0
);
1787 tcg_gen_lshift(cpu_tmp0
, cpu_tmp0
, 11 - (data_bits
- 1));
1788 tcg_gen_andi_tl(cpu_tmp0
, cpu_tmp0
, CC_O
);
1789 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, cpu_tmp0
);
1791 tcg_gen_shri_tl(t0
, t0
, data_bits
- 1);
1793 tcg_gen_andi_tl(t0
, t0
, CC_C
);
1794 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t0
);
1796 gen_set_label(label2
);
1804 static void gen_rot_rm_im(DisasContext
*s
, int ot
, int op1
, int op2
,
1811 /* XXX: inefficient, but we must use local temps */
1812 t0
= tcg_temp_local_new();
1813 t1
= tcg_temp_local_new();
1814 a0
= tcg_temp_local_new();
1822 if (op1
== OR_TMP0
) {
1823 tcg_gen_mov_tl(a0
, cpu_A0
);
1824 gen_op_ld_v(ot
+ s
->mem_index
, t0
, a0
);
1826 gen_op_mov_v_reg(ot
, t0
, op1
);
1830 tcg_gen_mov_tl(t1
, t0
);
1833 data_bits
= 8 << ot
;
1835 int shift
= op2
& ((1 << (3 + ot
)) - 1);
1837 tcg_gen_shri_tl(cpu_tmp4
, t0
, shift
);
1838 tcg_gen_shli_tl(t0
, t0
, data_bits
- shift
);
1841 tcg_gen_shli_tl(cpu_tmp4
, t0
, shift
);
1842 tcg_gen_shri_tl(t0
, t0
, data_bits
- shift
);
1844 tcg_gen_or_tl(t0
, t0
, cpu_tmp4
);
1848 if (op1
== OR_TMP0
) {
1849 gen_op_st_v(ot
+ s
->mem_index
, t0
, a0
);
1851 gen_op_mov_reg_v(ot
, op1
, t0
);
1856 gen_compute_eflags(s
);
1857 assert(s
->cc_op
== CC_OP_EFLAGS
);
1859 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~(CC_O
| CC_C
));
1860 tcg_gen_xor_tl(cpu_tmp0
, t1
, t0
);
1861 tcg_gen_lshift(cpu_tmp0
, cpu_tmp0
, 11 - (data_bits
- 1));
1862 tcg_gen_andi_tl(cpu_tmp0
, cpu_tmp0
, CC_O
);
1863 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, cpu_tmp0
);
1865 tcg_gen_shri_tl(t0
, t0
, data_bits
- 1);
1867 tcg_gen_andi_tl(t0
, t0
, CC_C
);
1868 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t0
);
1876 /* XXX: add faster immediate = 1 case */
1877 static void gen_rotc_rm_T1(DisasContext
*s
, int ot
, int op1
,
1880 gen_compute_eflags(s
);
1881 assert(s
->cc_op
== CC_OP_EFLAGS
);
1885 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1887 gen_op_mov_TN_reg(ot
, 0, op1
);
1892 gen_helper_rcrb(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1895 gen_helper_rcrw(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1898 gen_helper_rcrl(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1900 #ifdef TARGET_X86_64
1902 gen_helper_rcrq(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1909 gen_helper_rclb(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1912 gen_helper_rclw(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1915 gen_helper_rcll(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1917 #ifdef TARGET_X86_64
1919 gen_helper_rclq(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1926 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1928 gen_op_mov_reg_T0(ot
, op1
);
1931 /* XXX: add faster immediate case */
1932 static void gen_shiftd_rm_T1_T3(DisasContext
*s
, int ot
, int op1
,
1935 int label1
, label2
, data_bits
;
1937 TCGv t0
, t1
, t2
, a0
;
1939 t0
= tcg_temp_local_new();
1940 t1
= tcg_temp_local_new();
1941 t2
= tcg_temp_local_new();
1942 a0
= tcg_temp_local_new();
1950 if (op1
== OR_TMP0
) {
1951 tcg_gen_mov_tl(a0
, cpu_A0
);
1952 gen_op_ld_v(ot
+ s
->mem_index
, t0
, a0
);
1954 gen_op_mov_v_reg(ot
, t0
, op1
);
1957 tcg_gen_andi_tl(cpu_T3
, cpu_T3
, mask
);
1959 tcg_gen_mov_tl(t1
, cpu_T
[1]);
1960 tcg_gen_mov_tl(t2
, cpu_T3
);
1962 /* Must test zero case to avoid using undefined behaviour in TCG
1964 label1
= gen_new_label();
1965 tcg_gen_brcondi_tl(TCG_COND_EQ
, t2
, 0, label1
);
1967 tcg_gen_addi_tl(cpu_tmp5
, t2
, -1);
1968 if (ot
== OT_WORD
) {
1969 /* Note: we implement the Intel behaviour for shift count > 16 */
1971 tcg_gen_andi_tl(t0
, t0
, 0xffff);
1972 tcg_gen_shli_tl(cpu_tmp0
, t1
, 16);
1973 tcg_gen_or_tl(t0
, t0
, cpu_tmp0
);
1974 tcg_gen_ext32u_tl(t0
, t0
);
1976 tcg_gen_shr_tl(cpu_tmp4
, t0
, cpu_tmp5
);
1978 /* only needed if count > 16, but a test would complicate */
1979 tcg_gen_subfi_tl(cpu_tmp5
, 32, t2
);
1980 tcg_gen_shl_tl(cpu_tmp0
, t0
, cpu_tmp5
);
1982 tcg_gen_shr_tl(t0
, t0
, t2
);
1984 tcg_gen_or_tl(t0
, t0
, cpu_tmp0
);
1986 /* XXX: not optimal */
1987 tcg_gen_andi_tl(t0
, t0
, 0xffff);
1988 tcg_gen_shli_tl(t1
, t1
, 16);
1989 tcg_gen_or_tl(t1
, t1
, t0
);
1990 tcg_gen_ext32u_tl(t1
, t1
);
1992 tcg_gen_shl_tl(cpu_tmp4
, t0
, cpu_tmp5
);
1993 tcg_gen_subfi_tl(cpu_tmp0
, 32, cpu_tmp5
);
1994 tcg_gen_shr_tl(cpu_tmp5
, t1
, cpu_tmp0
);
1995 tcg_gen_or_tl(cpu_tmp4
, cpu_tmp4
, cpu_tmp5
);
1997 tcg_gen_shl_tl(t0
, t0
, t2
);
1998 tcg_gen_subfi_tl(cpu_tmp5
, 32, t2
);
1999 tcg_gen_shr_tl(t1
, t1
, cpu_tmp5
);
2000 tcg_gen_or_tl(t0
, t0
, t1
);
2003 data_bits
= 8 << ot
;
2006 tcg_gen_ext32u_tl(t0
, t0
);
2008 tcg_gen_shr_tl(cpu_tmp4
, t0
, cpu_tmp5
);
2010 tcg_gen_shr_tl(t0
, t0
, t2
);
2011 tcg_gen_subfi_tl(cpu_tmp5
, data_bits
, t2
);
2012 tcg_gen_shl_tl(t1
, t1
, cpu_tmp5
);
2013 tcg_gen_or_tl(t0
, t0
, t1
);
2017 tcg_gen_ext32u_tl(t1
, t1
);
2019 tcg_gen_shl_tl(cpu_tmp4
, t0
, cpu_tmp5
);
2021 tcg_gen_shl_tl(t0
, t0
, t2
);
2022 tcg_gen_subfi_tl(cpu_tmp5
, data_bits
, t2
);
2023 tcg_gen_shr_tl(t1
, t1
, cpu_tmp5
);
2024 tcg_gen_or_tl(t0
, t0
, t1
);
2027 tcg_gen_mov_tl(t1
, cpu_tmp4
);
2029 gen_set_label(label1
);
2031 if (op1
== OR_TMP0
) {
2032 gen_op_st_v(ot
+ s
->mem_index
, t0
, a0
);
2034 gen_op_mov_reg_v(ot
, op1
, t0
);
2038 gen_update_cc_op(s
);
2040 label2
= gen_new_label();
2041 tcg_gen_brcondi_tl(TCG_COND_EQ
, t2
, 0, label2
);
2043 tcg_gen_mov_tl(cpu_cc_src
, t1
);
2044 tcg_gen_mov_tl(cpu_cc_dst
, t0
);
2046 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SARB
+ ot
);
2048 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SHLB
+ ot
);
2050 gen_set_label(label2
);
2051 set_cc_op(s
, CC_OP_DYNAMIC
); /* cannot predict flags after */
2059 static void gen_shift(DisasContext
*s1
, int op
, int ot
, int d
, int s
)
2062 gen_op_mov_TN_reg(ot
, 1, s
);
2065 gen_rot_rm_T1(s1
, ot
, d
, 0);
2068 gen_rot_rm_T1(s1
, ot
, d
, 1);
2072 gen_shift_rm_T1(s1
, ot
, d
, 0, 0);
2075 gen_shift_rm_T1(s1
, ot
, d
, 1, 0);
2078 gen_shift_rm_T1(s1
, ot
, d
, 1, 1);
2081 gen_rotc_rm_T1(s1
, ot
, d
, 0);
2084 gen_rotc_rm_T1(s1
, ot
, d
, 1);
2089 static void gen_shifti(DisasContext
*s1
, int op
, int ot
, int d
, int c
)
2093 gen_rot_rm_im(s1
, ot
, d
, c
, 0);
2096 gen_rot_rm_im(s1
, ot
, d
, c
, 1);
2100 gen_shift_rm_im(s1
, ot
, d
, c
, 0, 0);
2103 gen_shift_rm_im(s1
, ot
, d
, c
, 1, 0);
2106 gen_shift_rm_im(s1
, ot
, d
, c
, 1, 1);
2109 /* currently not optimized */
2110 gen_op_movl_T1_im(c
);
2111 gen_shift(s1
, op
, ot
, d
, OR_TMP1
);
2116 static void gen_lea_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
,
2117 int *reg_ptr
, int *offset_ptr
)
2125 int mod
, rm
, code
, override
, must_add_seg
;
2127 override
= s
->override
;
2128 must_add_seg
= s
->addseg
;
2131 mod
= (modrm
>> 6) & 3;
2143 code
= cpu_ldub_code(env
, s
->pc
++);
2144 scale
= (code
>> 6) & 3;
2145 index
= ((code
>> 3) & 7) | REX_X(s
);
2152 if ((base
& 7) == 5) {
2154 disp
= (int32_t)cpu_ldl_code(env
, s
->pc
);
2156 if (CODE64(s
) && !havesib
) {
2157 disp
+= s
->pc
+ s
->rip_offset
;
2164 disp
= (int8_t)cpu_ldub_code(env
, s
->pc
++);
2168 disp
= (int32_t)cpu_ldl_code(env
, s
->pc
);
2174 /* for correct popl handling with esp */
2175 if (base
== 4 && s
->popl_esp_hack
)
2176 disp
+= s
->popl_esp_hack
;
2177 #ifdef TARGET_X86_64
2178 if (s
->aflag
== 2) {
2179 gen_op_movq_A0_reg(base
);
2181 gen_op_addq_A0_im(disp
);
2186 gen_op_movl_A0_reg(base
);
2188 gen_op_addl_A0_im(disp
);
2191 #ifdef TARGET_X86_64
2192 if (s
->aflag
== 2) {
2193 gen_op_movq_A0_im(disp
);
2197 gen_op_movl_A0_im(disp
);
2200 /* index == 4 means no index */
2201 if (havesib
&& (index
!= 4)) {
2202 #ifdef TARGET_X86_64
2203 if (s
->aflag
== 2) {
2204 gen_op_addq_A0_reg_sN(scale
, index
);
2208 gen_op_addl_A0_reg_sN(scale
, index
);
2213 if (base
== R_EBP
|| base
== R_ESP
)
2218 #ifdef TARGET_X86_64
2219 if (s
->aflag
== 2) {
2220 gen_op_addq_A0_seg(override
);
2224 gen_op_addl_A0_seg(s
, override
);
2231 disp
= cpu_lduw_code(env
, s
->pc
);
2233 gen_op_movl_A0_im(disp
);
2234 rm
= 0; /* avoid SS override */
2241 disp
= (int8_t)cpu_ldub_code(env
, s
->pc
++);
2245 disp
= cpu_lduw_code(env
, s
->pc
);
2251 gen_op_movl_A0_reg(R_EBX
);
2252 gen_op_addl_A0_reg_sN(0, R_ESI
);
2255 gen_op_movl_A0_reg(R_EBX
);
2256 gen_op_addl_A0_reg_sN(0, R_EDI
);
2259 gen_op_movl_A0_reg(R_EBP
);
2260 gen_op_addl_A0_reg_sN(0, R_ESI
);
2263 gen_op_movl_A0_reg(R_EBP
);
2264 gen_op_addl_A0_reg_sN(0, R_EDI
);
2267 gen_op_movl_A0_reg(R_ESI
);
2270 gen_op_movl_A0_reg(R_EDI
);
2273 gen_op_movl_A0_reg(R_EBP
);
2277 gen_op_movl_A0_reg(R_EBX
);
2281 gen_op_addl_A0_im(disp
);
2282 gen_op_andl_A0_ffff();
2286 if (rm
== 2 || rm
== 3 || rm
== 6)
2291 gen_op_addl_A0_seg(s
, override
);
2301 static void gen_nop_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
)
2303 int mod
, rm
, base
, code
;
2305 mod
= (modrm
>> 6) & 3;
2315 code
= cpu_ldub_code(env
, s
->pc
++);
2351 /* used for LEA and MOV AX, mem */
2352 static void gen_add_A0_ds_seg(DisasContext
*s
)
2354 int override
, must_add_seg
;
2355 must_add_seg
= s
->addseg
;
2357 if (s
->override
>= 0) {
2358 override
= s
->override
;
2362 #ifdef TARGET_X86_64
2364 gen_op_addq_A0_seg(override
);
2368 gen_op_addl_A0_seg(s
, override
);
2373 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2375 static void gen_ldst_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
,
2376 int ot
, int reg
, int is_store
)
2378 int mod
, rm
, opreg
, disp
;
2380 mod
= (modrm
>> 6) & 3;
2381 rm
= (modrm
& 7) | REX_B(s
);
2385 gen_op_mov_TN_reg(ot
, 0, reg
);
2386 gen_op_mov_reg_T0(ot
, rm
);
2388 gen_op_mov_TN_reg(ot
, 0, rm
);
2390 gen_op_mov_reg_T0(ot
, reg
);
2393 gen_lea_modrm(env
, s
, modrm
, &opreg
, &disp
);
2396 gen_op_mov_TN_reg(ot
, 0, reg
);
2397 gen_op_st_T0_A0(ot
+ s
->mem_index
);
2399 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
2401 gen_op_mov_reg_T0(ot
, reg
);
2406 static inline uint32_t insn_get(CPUX86State
*env
, DisasContext
*s
, int ot
)
2412 ret
= cpu_ldub_code(env
, s
->pc
);
2416 ret
= cpu_lduw_code(env
, s
->pc
);
2421 ret
= cpu_ldl_code(env
, s
->pc
);
2428 static inline int insn_const_size(unsigned int ot
)
2436 static inline void gen_goto_tb(DisasContext
*s
, int tb_num
, target_ulong eip
)
2438 TranslationBlock
*tb
;
2441 pc
= s
->cs_base
+ eip
;
2443 /* NOTE: we handle the case where the TB spans two pages here */
2444 if ((pc
& TARGET_PAGE_MASK
) == (tb
->pc
& TARGET_PAGE_MASK
) ||
2445 (pc
& TARGET_PAGE_MASK
) == ((s
->pc
- 1) & TARGET_PAGE_MASK
)) {
2446 /* jump to same page: we can use a direct jump */
2447 tcg_gen_goto_tb(tb_num
);
2449 tcg_gen_exit_tb((tcg_target_long
)tb
+ tb_num
);
2451 /* jump to another page: currently not optimized */
2457 static inline void gen_jcc(DisasContext
*s
, int b
,
2458 target_ulong val
, target_ulong next_eip
)
2463 gen_update_cc_op(s
);
2464 l1
= gen_new_label();
2466 set_cc_op(s
, CC_OP_DYNAMIC
);
2468 gen_goto_tb(s
, 0, next_eip
);
2471 gen_goto_tb(s
, 1, val
);
2472 s
->is_jmp
= DISAS_TB_JUMP
;
2474 l1
= gen_new_label();
2475 l2
= gen_new_label();
2478 gen_jmp_im(next_eip
);
2488 static void gen_setcc(DisasContext
*s
, int b
)
2490 int inv
, jcc_op
, l1
;
2493 if (is_fast_jcc_case(s
, b
)) {
2494 /* nominal case: we use a jump */
2495 /* XXX: make it faster by adding new instructions in TCG */
2496 t0
= tcg_temp_local_new();
2497 tcg_gen_movi_tl(t0
, 0);
2498 l1
= gen_new_label();
2499 gen_jcc1(s
, b
^ 1, l1
);
2500 tcg_gen_movi_tl(t0
, 1);
2502 tcg_gen_mov_tl(cpu_T
[0], t0
);
2505 /* slow case: it is more efficient not to generate a jump,
2506 although it is questionnable whether this optimization is
2509 jcc_op
= (b
>> 1) & 7;
2510 gen_setcc_slow(s
, jcc_op
, cpu_T
[0], inv
);
2514 static inline void gen_op_movl_T0_seg(int seg_reg
)
2516 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
,
2517 offsetof(CPUX86State
,segs
[seg_reg
].selector
));
2520 static inline void gen_op_movl_seg_T0_vm(int seg_reg
)
2522 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 0xffff);
2523 tcg_gen_st32_tl(cpu_T
[0], cpu_env
,
2524 offsetof(CPUX86State
,segs
[seg_reg
].selector
));
2525 tcg_gen_shli_tl(cpu_T
[0], cpu_T
[0], 4);
2526 tcg_gen_st_tl(cpu_T
[0], cpu_env
,
2527 offsetof(CPUX86State
,segs
[seg_reg
].base
));
2530 /* move T0 to seg_reg and compute if the CPU state may change. Never
2531 call this function with seg_reg == R_CS */
2532 static void gen_movl_seg_T0(DisasContext
*s
, int seg_reg
, target_ulong cur_eip
)
2534 if (s
->pe
&& !s
->vm86
) {
2535 /* XXX: optimize by finding processor state dynamically */
2536 gen_update_cc_op(s
);
2537 gen_jmp_im(cur_eip
);
2538 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
2539 gen_helper_load_seg(cpu_env
, tcg_const_i32(seg_reg
), cpu_tmp2_i32
);
2540 /* abort translation because the addseg value may change or
2541 because ss32 may change. For R_SS, translation must always
2542 stop as a special handling must be done to disable hardware
2543 interrupts for the next instruction */
2544 if (seg_reg
== R_SS
|| (s
->code32
&& seg_reg
< R_FS
))
2545 s
->is_jmp
= DISAS_TB_JUMP
;
2547 gen_op_movl_seg_T0_vm(seg_reg
);
2548 if (seg_reg
== R_SS
)
2549 s
->is_jmp
= DISAS_TB_JUMP
;
2553 static inline int svm_is_rep(int prefixes
)
2555 return ((prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) ? 8 : 0);
2559 gen_svm_check_intercept_param(DisasContext
*s
, target_ulong pc_start
,
2560 uint32_t type
, uint64_t param
)
2562 /* no SVM activated; fast case */
2563 if (likely(!(s
->flags
& HF_SVMI_MASK
)))
2565 gen_update_cc_op(s
);
2566 gen_jmp_im(pc_start
- s
->cs_base
);
2567 gen_helper_svm_check_intercept_param(cpu_env
, tcg_const_i32(type
),
2568 tcg_const_i64(param
));
2572 gen_svm_check_intercept(DisasContext
*s
, target_ulong pc_start
, uint64_t type
)
2574 gen_svm_check_intercept_param(s
, pc_start
, type
, 0);
2577 static inline void gen_stack_update(DisasContext
*s
, int addend
)
2579 #ifdef TARGET_X86_64
2581 gen_op_add_reg_im(2, R_ESP
, addend
);
2585 gen_op_add_reg_im(1, R_ESP
, addend
);
2587 gen_op_add_reg_im(0, R_ESP
, addend
);
2591 /* generate a push. It depends on ss32, addseg and dflag */
2592 static void gen_push_T0(DisasContext
*s
)
2594 #ifdef TARGET_X86_64
2596 gen_op_movq_A0_reg(R_ESP
);
2598 gen_op_addq_A0_im(-8);
2599 gen_op_st_T0_A0(OT_QUAD
+ s
->mem_index
);
2601 gen_op_addq_A0_im(-2);
2602 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
2604 gen_op_mov_reg_A0(2, R_ESP
);
2608 gen_op_movl_A0_reg(R_ESP
);
2610 gen_op_addl_A0_im(-2);
2612 gen_op_addl_A0_im(-4);
2615 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2616 gen_op_addl_A0_seg(s
, R_SS
);
2619 gen_op_andl_A0_ffff();
2620 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2621 gen_op_addl_A0_seg(s
, R_SS
);
2623 gen_op_st_T0_A0(s
->dflag
+ 1 + s
->mem_index
);
2624 if (s
->ss32
&& !s
->addseg
)
2625 gen_op_mov_reg_A0(1, R_ESP
);
2627 gen_op_mov_reg_T1(s
->ss32
+ 1, R_ESP
);
2631 /* generate a push. It depends on ss32, addseg and dflag */
2632 /* slower version for T1, only used for call Ev */
2633 static void gen_push_T1(DisasContext
*s
)
2635 #ifdef TARGET_X86_64
2637 gen_op_movq_A0_reg(R_ESP
);
2639 gen_op_addq_A0_im(-8);
2640 gen_op_st_T1_A0(OT_QUAD
+ s
->mem_index
);
2642 gen_op_addq_A0_im(-2);
2643 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
2645 gen_op_mov_reg_A0(2, R_ESP
);
2649 gen_op_movl_A0_reg(R_ESP
);
2651 gen_op_addl_A0_im(-2);
2653 gen_op_addl_A0_im(-4);
2656 gen_op_addl_A0_seg(s
, R_SS
);
2659 gen_op_andl_A0_ffff();
2660 gen_op_addl_A0_seg(s
, R_SS
);
2662 gen_op_st_T1_A0(s
->dflag
+ 1 + s
->mem_index
);
2664 if (s
->ss32
&& !s
->addseg
)
2665 gen_op_mov_reg_A0(1, R_ESP
);
2667 gen_stack_update(s
, (-2) << s
->dflag
);
2671 /* two step pop is necessary for precise exceptions */
2672 static void gen_pop_T0(DisasContext
*s
)
2674 #ifdef TARGET_X86_64
2676 gen_op_movq_A0_reg(R_ESP
);
2677 gen_op_ld_T0_A0((s
->dflag
? OT_QUAD
: OT_WORD
) + s
->mem_index
);
2681 gen_op_movl_A0_reg(R_ESP
);
2684 gen_op_addl_A0_seg(s
, R_SS
);
2686 gen_op_andl_A0_ffff();
2687 gen_op_addl_A0_seg(s
, R_SS
);
2689 gen_op_ld_T0_A0(s
->dflag
+ 1 + s
->mem_index
);
2693 static void gen_pop_update(DisasContext
*s
)
2695 #ifdef TARGET_X86_64
2696 if (CODE64(s
) && s
->dflag
) {
2697 gen_stack_update(s
, 8);
2701 gen_stack_update(s
, 2 << s
->dflag
);
2705 static void gen_stack_A0(DisasContext
*s
)
2707 gen_op_movl_A0_reg(R_ESP
);
2709 gen_op_andl_A0_ffff();
2710 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2712 gen_op_addl_A0_seg(s
, R_SS
);
2715 /* NOTE: wrap around in 16 bit not fully handled */
2716 static void gen_pusha(DisasContext
*s
)
2719 gen_op_movl_A0_reg(R_ESP
);
2720 gen_op_addl_A0_im(-16 << s
->dflag
);
2722 gen_op_andl_A0_ffff();
2723 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2725 gen_op_addl_A0_seg(s
, R_SS
);
2726 for(i
= 0;i
< 8; i
++) {
2727 gen_op_mov_TN_reg(OT_LONG
, 0, 7 - i
);
2728 gen_op_st_T0_A0(OT_WORD
+ s
->dflag
+ s
->mem_index
);
2729 gen_op_addl_A0_im(2 << s
->dflag
);
2731 gen_op_mov_reg_T1(OT_WORD
+ s
->ss32
, R_ESP
);
2734 /* NOTE: wrap around in 16 bit not fully handled */
2735 static void gen_popa(DisasContext
*s
)
2738 gen_op_movl_A0_reg(R_ESP
);
2740 gen_op_andl_A0_ffff();
2741 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2742 tcg_gen_addi_tl(cpu_T
[1], cpu_T
[1], 16 << s
->dflag
);
2744 gen_op_addl_A0_seg(s
, R_SS
);
2745 for(i
= 0;i
< 8; i
++) {
2746 /* ESP is not reloaded */
2748 gen_op_ld_T0_A0(OT_WORD
+ s
->dflag
+ s
->mem_index
);
2749 gen_op_mov_reg_T0(OT_WORD
+ s
->dflag
, 7 - i
);
2751 gen_op_addl_A0_im(2 << s
->dflag
);
2753 gen_op_mov_reg_T1(OT_WORD
+ s
->ss32
, R_ESP
);
2756 static void gen_enter(DisasContext
*s
, int esp_addend
, int level
)
2761 #ifdef TARGET_X86_64
2763 ot
= s
->dflag
? OT_QUAD
: OT_WORD
;
2766 gen_op_movl_A0_reg(R_ESP
);
2767 gen_op_addq_A0_im(-opsize
);
2768 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2771 gen_op_mov_TN_reg(OT_LONG
, 0, R_EBP
);
2772 gen_op_st_T0_A0(ot
+ s
->mem_index
);
2774 /* XXX: must save state */
2775 gen_helper_enter64_level(cpu_env
, tcg_const_i32(level
),
2776 tcg_const_i32((ot
== OT_QUAD
)),
2779 gen_op_mov_reg_T1(ot
, R_EBP
);
2780 tcg_gen_addi_tl(cpu_T
[1], cpu_T
[1], -esp_addend
+ (-opsize
* level
));
2781 gen_op_mov_reg_T1(OT_QUAD
, R_ESP
);
2785 ot
= s
->dflag
+ OT_WORD
;
2786 opsize
= 2 << s
->dflag
;
2788 gen_op_movl_A0_reg(R_ESP
);
2789 gen_op_addl_A0_im(-opsize
);
2791 gen_op_andl_A0_ffff();
2792 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2794 gen_op_addl_A0_seg(s
, R_SS
);
2796 gen_op_mov_TN_reg(OT_LONG
, 0, R_EBP
);
2797 gen_op_st_T0_A0(ot
+ s
->mem_index
);
2799 /* XXX: must save state */
2800 gen_helper_enter_level(cpu_env
, tcg_const_i32(level
),
2801 tcg_const_i32(s
->dflag
),
2804 gen_op_mov_reg_T1(ot
, R_EBP
);
2805 tcg_gen_addi_tl(cpu_T
[1], cpu_T
[1], -esp_addend
+ (-opsize
* level
));
2806 gen_op_mov_reg_T1(OT_WORD
+ s
->ss32
, R_ESP
);
2810 static void gen_exception(DisasContext
*s
, int trapno
, target_ulong cur_eip
)
2812 gen_update_cc_op(s
);
2813 gen_jmp_im(cur_eip
);
2814 gen_helper_raise_exception(cpu_env
, tcg_const_i32(trapno
));
2815 s
->is_jmp
= DISAS_TB_JUMP
;
2818 /* an interrupt is different from an exception because of the
2820 static void gen_interrupt(DisasContext
*s
, int intno
,
2821 target_ulong cur_eip
, target_ulong next_eip
)
2823 gen_update_cc_op(s
);
2824 gen_jmp_im(cur_eip
);
2825 gen_helper_raise_interrupt(cpu_env
, tcg_const_i32(intno
),
2826 tcg_const_i32(next_eip
- cur_eip
));
2827 s
->is_jmp
= DISAS_TB_JUMP
;
2830 static void gen_debug(DisasContext
*s
, target_ulong cur_eip
)
2832 gen_update_cc_op(s
);
2833 gen_jmp_im(cur_eip
);
2834 gen_helper_debug(cpu_env
);
2835 s
->is_jmp
= DISAS_TB_JUMP
;
2838 /* generate a generic end of block. Trace exception is also generated
2840 static void gen_eob(DisasContext
*s
)
2842 gen_update_cc_op(s
);
2843 if (s
->tb
->flags
& HF_INHIBIT_IRQ_MASK
) {
2844 gen_helper_reset_inhibit_irq(cpu_env
);
2846 if (s
->tb
->flags
& HF_RF_MASK
) {
2847 gen_helper_reset_rf(cpu_env
);
2849 if (s
->singlestep_enabled
) {
2850 gen_helper_debug(cpu_env
);
2852 gen_helper_single_step(cpu_env
);
2856 s
->is_jmp
= DISAS_TB_JUMP
;
2859 /* generate a jump to eip. No segment change must happen before as a
2860 direct call to the next block may occur */
2861 static void gen_jmp_tb(DisasContext
*s
, target_ulong eip
, int tb_num
)
2864 gen_update_cc_op(s
);
2865 gen_goto_tb(s
, tb_num
, eip
);
2866 s
->is_jmp
= DISAS_TB_JUMP
;
2873 static void gen_jmp(DisasContext
*s
, target_ulong eip
)
2875 gen_jmp_tb(s
, eip
, 0);
2878 static inline void gen_ldq_env_A0(int idx
, int offset
)
2880 int mem_index
= (idx
>> 2) - 1;
2881 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
, mem_index
);
2882 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, offset
);
2885 static inline void gen_stq_env_A0(int idx
, int offset
)
2887 int mem_index
= (idx
>> 2) - 1;
2888 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, offset
);
2889 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
, mem_index
);
2892 static inline void gen_ldo_env_A0(int idx
, int offset
)
2894 int mem_index
= (idx
>> 2) - 1;
2895 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
, mem_index
);
2896 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(0)));
2897 tcg_gen_addi_tl(cpu_tmp0
, cpu_A0
, 8);
2898 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_tmp0
, mem_index
);
2899 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(1)));
2902 static inline void gen_sto_env_A0(int idx
, int offset
)
2904 int mem_index
= (idx
>> 2) - 1;
2905 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(0)));
2906 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
, mem_index
);
2907 tcg_gen_addi_tl(cpu_tmp0
, cpu_A0
, 8);
2908 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(1)));
2909 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_tmp0
, mem_index
);
2912 static inline void gen_op_movo(int d_offset
, int s_offset
)
2914 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, s_offset
);
2915 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
);
2916 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, s_offset
+ 8);
2917 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
+ 8);
2920 static inline void gen_op_movq(int d_offset
, int s_offset
)
2922 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, s_offset
);
2923 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
);
2926 static inline void gen_op_movl(int d_offset
, int s_offset
)
2928 tcg_gen_ld_i32(cpu_tmp2_i32
, cpu_env
, s_offset
);
2929 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, d_offset
);
2932 static inline void gen_op_movq_env_0(int d_offset
)
2934 tcg_gen_movi_i64(cpu_tmp1_i64
, 0);
2935 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
);
2938 typedef void (*SSEFunc_i_ep
)(TCGv_i32 val
, TCGv_ptr env
, TCGv_ptr reg
);
2939 typedef void (*SSEFunc_l_ep
)(TCGv_i64 val
, TCGv_ptr env
, TCGv_ptr reg
);
2940 typedef void (*SSEFunc_0_epi
)(TCGv_ptr env
, TCGv_ptr reg
, TCGv_i32 val
);
2941 typedef void (*SSEFunc_0_epl
)(TCGv_ptr env
, TCGv_ptr reg
, TCGv_i64 val
);
2942 typedef void (*SSEFunc_0_epp
)(TCGv_ptr env
, TCGv_ptr reg_a
, TCGv_ptr reg_b
);
2943 typedef void (*SSEFunc_0_eppi
)(TCGv_ptr env
, TCGv_ptr reg_a
, TCGv_ptr reg_b
,
2945 typedef void (*SSEFunc_0_ppi
)(TCGv_ptr reg_a
, TCGv_ptr reg_b
, TCGv_i32 val
);
2946 typedef void (*SSEFunc_0_eppt
)(TCGv_ptr env
, TCGv_ptr reg_a
, TCGv_ptr reg_b
,
2949 #define SSE_SPECIAL ((void *)1)
2950 #define SSE_DUMMY ((void *)2)
2952 #define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
2953 #define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
2954 gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
2956 static const SSEFunc_0_epp sse_op_table1
[256][4] = {
2957 /* 3DNow! extensions */
2958 [0x0e] = { SSE_DUMMY
}, /* femms */
2959 [0x0f] = { SSE_DUMMY
}, /* pf... */
2960 /* pure SSE operations */
2961 [0x10] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movups, movupd, movss, movsd */
2962 [0x11] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movups, movupd, movss, movsd */
2963 [0x12] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movlps, movlpd, movsldup, movddup */
2964 [0x13] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movlps, movlpd */
2965 [0x14] = { gen_helper_punpckldq_xmm
, gen_helper_punpcklqdq_xmm
},
2966 [0x15] = { gen_helper_punpckhdq_xmm
, gen_helper_punpckhqdq_xmm
},
2967 [0x16] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movhps, movhpd, movshdup */
2968 [0x17] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movhps, movhpd */
2970 [0x28] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movaps, movapd */
2971 [0x29] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movaps, movapd */
2972 [0x2a] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
2973 [0x2b] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movntps, movntpd, movntss, movntsd */
2974 [0x2c] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
2975 [0x2d] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
2976 [0x2e] = { gen_helper_ucomiss
, gen_helper_ucomisd
},
2977 [0x2f] = { gen_helper_comiss
, gen_helper_comisd
},
2978 [0x50] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movmskps, movmskpd */
2979 [0x51] = SSE_FOP(sqrt
),
2980 [0x52] = { gen_helper_rsqrtps
, NULL
, gen_helper_rsqrtss
, NULL
},
2981 [0x53] = { gen_helper_rcpps
, NULL
, gen_helper_rcpss
, NULL
},
2982 [0x54] = { gen_helper_pand_xmm
, gen_helper_pand_xmm
}, /* andps, andpd */
2983 [0x55] = { gen_helper_pandn_xmm
, gen_helper_pandn_xmm
}, /* andnps, andnpd */
2984 [0x56] = { gen_helper_por_xmm
, gen_helper_por_xmm
}, /* orps, orpd */
2985 [0x57] = { gen_helper_pxor_xmm
, gen_helper_pxor_xmm
}, /* xorps, xorpd */
2986 [0x58] = SSE_FOP(add
),
2987 [0x59] = SSE_FOP(mul
),
2988 [0x5a] = { gen_helper_cvtps2pd
, gen_helper_cvtpd2ps
,
2989 gen_helper_cvtss2sd
, gen_helper_cvtsd2ss
},
2990 [0x5b] = { gen_helper_cvtdq2ps
, gen_helper_cvtps2dq
, gen_helper_cvttps2dq
},
2991 [0x5c] = SSE_FOP(sub
),
2992 [0x5d] = SSE_FOP(min
),
2993 [0x5e] = SSE_FOP(div
),
2994 [0x5f] = SSE_FOP(max
),
2996 [0xc2] = SSE_FOP(cmpeq
),
2997 [0xc6] = { (SSEFunc_0_epp
)gen_helper_shufps
,
2998 (SSEFunc_0_epp
)gen_helper_shufpd
}, /* XXX: casts */
3000 [0x38] = { SSE_SPECIAL
, SSE_SPECIAL
, NULL
, SSE_SPECIAL
}, /* SSSE3/SSE4 */
3001 [0x3a] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* SSSE3/SSE4 */
3003 /* MMX ops and their SSE extensions */
3004 [0x60] = MMX_OP2(punpcklbw
),
3005 [0x61] = MMX_OP2(punpcklwd
),
3006 [0x62] = MMX_OP2(punpckldq
),
3007 [0x63] = MMX_OP2(packsswb
),
3008 [0x64] = MMX_OP2(pcmpgtb
),
3009 [0x65] = MMX_OP2(pcmpgtw
),
3010 [0x66] = MMX_OP2(pcmpgtl
),
3011 [0x67] = MMX_OP2(packuswb
),
3012 [0x68] = MMX_OP2(punpckhbw
),
3013 [0x69] = MMX_OP2(punpckhwd
),
3014 [0x6a] = MMX_OP2(punpckhdq
),
3015 [0x6b] = MMX_OP2(packssdw
),
3016 [0x6c] = { NULL
, gen_helper_punpcklqdq_xmm
},
3017 [0x6d] = { NULL
, gen_helper_punpckhqdq_xmm
},
3018 [0x6e] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movd mm, ea */
3019 [0x6f] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movq, movdqa, , movqdu */
3020 [0x70] = { (SSEFunc_0_epp
)gen_helper_pshufw_mmx
,
3021 (SSEFunc_0_epp
)gen_helper_pshufd_xmm
,
3022 (SSEFunc_0_epp
)gen_helper_pshufhw_xmm
,
3023 (SSEFunc_0_epp
)gen_helper_pshuflw_xmm
}, /* XXX: casts */
3024 [0x71] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftw */
3025 [0x72] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftd */
3026 [0x73] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftq */
3027 [0x74] = MMX_OP2(pcmpeqb
),
3028 [0x75] = MMX_OP2(pcmpeqw
),
3029 [0x76] = MMX_OP2(pcmpeql
),
3030 [0x77] = { SSE_DUMMY
}, /* emms */
3031 [0x78] = { NULL
, SSE_SPECIAL
, NULL
, SSE_SPECIAL
}, /* extrq_i, insertq_i */
3032 [0x79] = { NULL
, gen_helper_extrq_r
, NULL
, gen_helper_insertq_r
},
3033 [0x7c] = { NULL
, gen_helper_haddpd
, NULL
, gen_helper_haddps
},
3034 [0x7d] = { NULL
, gen_helper_hsubpd
, NULL
, gen_helper_hsubps
},
3035 [0x7e] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movd, movd, , movq */
3036 [0x7f] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movq, movdqa, movdqu */
3037 [0xc4] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pinsrw */
3038 [0xc5] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pextrw */
3039 [0xd0] = { NULL
, gen_helper_addsubpd
, NULL
, gen_helper_addsubps
},
3040 [0xd1] = MMX_OP2(psrlw
),
3041 [0xd2] = MMX_OP2(psrld
),
3042 [0xd3] = MMX_OP2(psrlq
),
3043 [0xd4] = MMX_OP2(paddq
),
3044 [0xd5] = MMX_OP2(pmullw
),
3045 [0xd6] = { NULL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
},
3046 [0xd7] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pmovmskb */
3047 [0xd8] = MMX_OP2(psubusb
),
3048 [0xd9] = MMX_OP2(psubusw
),
3049 [0xda] = MMX_OP2(pminub
),
3050 [0xdb] = MMX_OP2(pand
),
3051 [0xdc] = MMX_OP2(paddusb
),
3052 [0xdd] = MMX_OP2(paddusw
),
3053 [0xde] = MMX_OP2(pmaxub
),
3054 [0xdf] = MMX_OP2(pandn
),
3055 [0xe0] = MMX_OP2(pavgb
),
3056 [0xe1] = MMX_OP2(psraw
),
3057 [0xe2] = MMX_OP2(psrad
),
3058 [0xe3] = MMX_OP2(pavgw
),
3059 [0xe4] = MMX_OP2(pmulhuw
),
3060 [0xe5] = MMX_OP2(pmulhw
),
3061 [0xe6] = { NULL
, gen_helper_cvttpd2dq
, gen_helper_cvtdq2pd
, gen_helper_cvtpd2dq
},
3062 [0xe7] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movntq, movntq */
3063 [0xe8] = MMX_OP2(psubsb
),
3064 [0xe9] = MMX_OP2(psubsw
),
3065 [0xea] = MMX_OP2(pminsw
),
3066 [0xeb] = MMX_OP2(por
),
3067 [0xec] = MMX_OP2(paddsb
),
3068 [0xed] = MMX_OP2(paddsw
),
3069 [0xee] = MMX_OP2(pmaxsw
),
3070 [0xef] = MMX_OP2(pxor
),
3071 [0xf0] = { NULL
, NULL
, NULL
, SSE_SPECIAL
}, /* lddqu */
3072 [0xf1] = MMX_OP2(psllw
),
3073 [0xf2] = MMX_OP2(pslld
),
3074 [0xf3] = MMX_OP2(psllq
),
3075 [0xf4] = MMX_OP2(pmuludq
),
3076 [0xf5] = MMX_OP2(pmaddwd
),
3077 [0xf6] = MMX_OP2(psadbw
),
3078 [0xf7] = { (SSEFunc_0_epp
)gen_helper_maskmov_mmx
,
3079 (SSEFunc_0_epp
)gen_helper_maskmov_xmm
}, /* XXX: casts */
3080 [0xf8] = MMX_OP2(psubb
),
3081 [0xf9] = MMX_OP2(psubw
),
3082 [0xfa] = MMX_OP2(psubl
),
3083 [0xfb] = MMX_OP2(psubq
),
3084 [0xfc] = MMX_OP2(paddb
),
3085 [0xfd] = MMX_OP2(paddw
),
3086 [0xfe] = MMX_OP2(paddl
),
3089 static const SSEFunc_0_epp sse_op_table2
[3 * 8][2] = {
3090 [0 + 2] = MMX_OP2(psrlw
),
3091 [0 + 4] = MMX_OP2(psraw
),
3092 [0 + 6] = MMX_OP2(psllw
),
3093 [8 + 2] = MMX_OP2(psrld
),
3094 [8 + 4] = MMX_OP2(psrad
),
3095 [8 + 6] = MMX_OP2(pslld
),
3096 [16 + 2] = MMX_OP2(psrlq
),
3097 [16 + 3] = { NULL
, gen_helper_psrldq_xmm
},
3098 [16 + 6] = MMX_OP2(psllq
),
3099 [16 + 7] = { NULL
, gen_helper_pslldq_xmm
},
3102 static const SSEFunc_0_epi sse_op_table3ai
[] = {
3103 gen_helper_cvtsi2ss
,
3107 #ifdef TARGET_X86_64
3108 static const SSEFunc_0_epl sse_op_table3aq
[] = {
3109 gen_helper_cvtsq2ss
,
3114 static const SSEFunc_i_ep sse_op_table3bi
[] = {
3115 gen_helper_cvttss2si
,
3116 gen_helper_cvtss2si
,
3117 gen_helper_cvttsd2si
,
3121 #ifdef TARGET_X86_64
3122 static const SSEFunc_l_ep sse_op_table3bq
[] = {
3123 gen_helper_cvttss2sq
,
3124 gen_helper_cvtss2sq
,
3125 gen_helper_cvttsd2sq
,
3130 static const SSEFunc_0_epp sse_op_table4
[8][4] = {
3141 static const SSEFunc_0_epp sse_op_table5
[256] = {
3142 [0x0c] = gen_helper_pi2fw
,
3143 [0x0d] = gen_helper_pi2fd
,
3144 [0x1c] = gen_helper_pf2iw
,
3145 [0x1d] = gen_helper_pf2id
,
3146 [0x8a] = gen_helper_pfnacc
,
3147 [0x8e] = gen_helper_pfpnacc
,
3148 [0x90] = gen_helper_pfcmpge
,
3149 [0x94] = gen_helper_pfmin
,
3150 [0x96] = gen_helper_pfrcp
,
3151 [0x97] = gen_helper_pfrsqrt
,
3152 [0x9a] = gen_helper_pfsub
,
3153 [0x9e] = gen_helper_pfadd
,
3154 [0xa0] = gen_helper_pfcmpgt
,
3155 [0xa4] = gen_helper_pfmax
,
3156 [0xa6] = gen_helper_movq
, /* pfrcpit1; no need to actually increase precision */
3157 [0xa7] = gen_helper_movq
, /* pfrsqit1 */
3158 [0xaa] = gen_helper_pfsubr
,
3159 [0xae] = gen_helper_pfacc
,
3160 [0xb0] = gen_helper_pfcmpeq
,
3161 [0xb4] = gen_helper_pfmul
,
3162 [0xb6] = gen_helper_movq
, /* pfrcpit2 */
3163 [0xb7] = gen_helper_pmulhrw_mmx
,
3164 [0xbb] = gen_helper_pswapd
,
3165 [0xbf] = gen_helper_pavgb_mmx
/* pavgusb */
3168 struct SSEOpHelper_epp
{
3169 SSEFunc_0_epp op
[2];
3173 struct SSEOpHelper_eppi
{
3174 SSEFunc_0_eppi op
[2];
3178 #define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
3179 #define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
3180 #define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
3181 #define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
3183 static const struct SSEOpHelper_epp sse_op_table6
[256] = {
3184 [0x00] = SSSE3_OP(pshufb
),
3185 [0x01] = SSSE3_OP(phaddw
),
3186 [0x02] = SSSE3_OP(phaddd
),
3187 [0x03] = SSSE3_OP(phaddsw
),
3188 [0x04] = SSSE3_OP(pmaddubsw
),
3189 [0x05] = SSSE3_OP(phsubw
),
3190 [0x06] = SSSE3_OP(phsubd
),
3191 [0x07] = SSSE3_OP(phsubsw
),
3192 [0x08] = SSSE3_OP(psignb
),
3193 [0x09] = SSSE3_OP(psignw
),
3194 [0x0a] = SSSE3_OP(psignd
),
3195 [0x0b] = SSSE3_OP(pmulhrsw
),
3196 [0x10] = SSE41_OP(pblendvb
),
3197 [0x14] = SSE41_OP(blendvps
),
3198 [0x15] = SSE41_OP(blendvpd
),
3199 [0x17] = SSE41_OP(ptest
),
3200 [0x1c] = SSSE3_OP(pabsb
),
3201 [0x1d] = SSSE3_OP(pabsw
),
3202 [0x1e] = SSSE3_OP(pabsd
),
3203 [0x20] = SSE41_OP(pmovsxbw
),
3204 [0x21] = SSE41_OP(pmovsxbd
),
3205 [0x22] = SSE41_OP(pmovsxbq
),
3206 [0x23] = SSE41_OP(pmovsxwd
),
3207 [0x24] = SSE41_OP(pmovsxwq
),
3208 [0x25] = SSE41_OP(pmovsxdq
),
3209 [0x28] = SSE41_OP(pmuldq
),
3210 [0x29] = SSE41_OP(pcmpeqq
),
3211 [0x2a] = SSE41_SPECIAL
, /* movntqda */
3212 [0x2b] = SSE41_OP(packusdw
),
3213 [0x30] = SSE41_OP(pmovzxbw
),
3214 [0x31] = SSE41_OP(pmovzxbd
),
3215 [0x32] = SSE41_OP(pmovzxbq
),
3216 [0x33] = SSE41_OP(pmovzxwd
),
3217 [0x34] = SSE41_OP(pmovzxwq
),
3218 [0x35] = SSE41_OP(pmovzxdq
),
3219 [0x37] = SSE42_OP(pcmpgtq
),
3220 [0x38] = SSE41_OP(pminsb
),
3221 [0x39] = SSE41_OP(pminsd
),
3222 [0x3a] = SSE41_OP(pminuw
),
3223 [0x3b] = SSE41_OP(pminud
),
3224 [0x3c] = SSE41_OP(pmaxsb
),
3225 [0x3d] = SSE41_OP(pmaxsd
),
3226 [0x3e] = SSE41_OP(pmaxuw
),
3227 [0x3f] = SSE41_OP(pmaxud
),
3228 [0x40] = SSE41_OP(pmulld
),
3229 [0x41] = SSE41_OP(phminposuw
),
3232 static const struct SSEOpHelper_eppi sse_op_table7
[256] = {
3233 [0x08] = SSE41_OP(roundps
),
3234 [0x09] = SSE41_OP(roundpd
),
3235 [0x0a] = SSE41_OP(roundss
),
3236 [0x0b] = SSE41_OP(roundsd
),
3237 [0x0c] = SSE41_OP(blendps
),
3238 [0x0d] = SSE41_OP(blendpd
),
3239 [0x0e] = SSE41_OP(pblendw
),
3240 [0x0f] = SSSE3_OP(palignr
),
3241 [0x14] = SSE41_SPECIAL
, /* pextrb */
3242 [0x15] = SSE41_SPECIAL
, /* pextrw */
3243 [0x16] = SSE41_SPECIAL
, /* pextrd/pextrq */
3244 [0x17] = SSE41_SPECIAL
, /* extractps */
3245 [0x20] = SSE41_SPECIAL
, /* pinsrb */
3246 [0x21] = SSE41_SPECIAL
, /* insertps */
3247 [0x22] = SSE41_SPECIAL
, /* pinsrd/pinsrq */
3248 [0x40] = SSE41_OP(dpps
),
3249 [0x41] = SSE41_OP(dppd
),
3250 [0x42] = SSE41_OP(mpsadbw
),
3251 [0x60] = SSE42_OP(pcmpestrm
),
3252 [0x61] = SSE42_OP(pcmpestri
),
3253 [0x62] = SSE42_OP(pcmpistrm
),
3254 [0x63] = SSE42_OP(pcmpistri
),
3257 static void gen_sse(CPUX86State
*env
, DisasContext
*s
, int b
,
3258 target_ulong pc_start
, int rex_r
)
3260 int b1
, op1_offset
, op2_offset
, is_xmm
, val
, ot
;
3261 int modrm
, mod
, rm
, reg
, reg_addr
, offset_addr
;
3262 SSEFunc_0_epp sse_fn_epp
;
3263 SSEFunc_0_eppi sse_fn_eppi
;
3264 SSEFunc_0_ppi sse_fn_ppi
;
3265 SSEFunc_0_eppt sse_fn_eppt
;
3268 if (s
->prefix
& PREFIX_DATA
)
3270 else if (s
->prefix
& PREFIX_REPZ
)
3272 else if (s
->prefix
& PREFIX_REPNZ
)
3276 sse_fn_epp
= sse_op_table1
[b
][b1
];
3280 if ((b
<= 0x5f && b
>= 0x10) || b
== 0xc6 || b
== 0xc2) {
3290 /* simple MMX/SSE operation */
3291 if (s
->flags
& HF_TS_MASK
) {
3292 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
3295 if (s
->flags
& HF_EM_MASK
) {
3297 gen_exception(s
, EXCP06_ILLOP
, pc_start
- s
->cs_base
);
3300 if (is_xmm
&& !(s
->flags
& HF_OSFXSR_MASK
))
3301 if ((b
!= 0x38 && b
!= 0x3a) || (s
->prefix
& PREFIX_DATA
))
3304 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_3DNOW
))
3307 gen_helper_emms(cpu_env
);
3312 gen_helper_emms(cpu_env
);
3315 /* prepare MMX state (XXX: optimize by storing fptt and fptags in
3316 the static cpu state) */
3318 gen_helper_enter_mmx(cpu_env
);
3321 modrm
= cpu_ldub_code(env
, s
->pc
++);
3322 reg
= ((modrm
>> 3) & 7);
3325 mod
= (modrm
>> 6) & 3;
3326 if (sse_fn_epp
== SSE_SPECIAL
) {
3329 case 0x0e7: /* movntq */
3332 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3333 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3335 case 0x1e7: /* movntdq */
3336 case 0x02b: /* movntps */
3337 case 0x12b: /* movntps */
3340 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3341 gen_sto_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3343 case 0x3f0: /* lddqu */
3346 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3347 gen_ldo_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3349 case 0x22b: /* movntss */
3350 case 0x32b: /* movntsd */
3353 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3355 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,
3358 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,
3359 xmm_regs
[reg
].XMM_L(0)));
3360 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
3363 case 0x6e: /* movd mm, ea */
3364 #ifdef TARGET_X86_64
3365 if (s
->dflag
== 2) {
3366 gen_ldst_modrm(env
, s
, modrm
, OT_QUAD
, OR_TMP0
, 0);
3367 tcg_gen_st_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3371 gen_ldst_modrm(env
, s
, modrm
, OT_LONG
, OR_TMP0
, 0);
3372 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3373 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3374 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
3375 gen_helper_movl_mm_T0_mmx(cpu_ptr0
, cpu_tmp2_i32
);
3378 case 0x16e: /* movd xmm, ea */
3379 #ifdef TARGET_X86_64
3380 if (s
->dflag
== 2) {
3381 gen_ldst_modrm(env
, s
, modrm
, OT_QUAD
, OR_TMP0
, 0);
3382 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3383 offsetof(CPUX86State
,xmm_regs
[reg
]));
3384 gen_helper_movq_mm_T0_xmm(cpu_ptr0
, cpu_T
[0]);
3388 gen_ldst_modrm(env
, s
, modrm
, OT_LONG
, OR_TMP0
, 0);
3389 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3390 offsetof(CPUX86State
,xmm_regs
[reg
]));
3391 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
3392 gen_helper_movl_mm_T0_xmm(cpu_ptr0
, cpu_tmp2_i32
);
3395 case 0x6f: /* movq mm, ea */
3397 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3398 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3401 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
,
3402 offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3403 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
,
3404 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3407 case 0x010: /* movups */
3408 case 0x110: /* movupd */
3409 case 0x028: /* movaps */
3410 case 0x128: /* movapd */
3411 case 0x16f: /* movdqa xmm, ea */
3412 case 0x26f: /* movdqu xmm, ea */
3414 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3415 gen_ldo_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3417 rm
= (modrm
& 7) | REX_B(s
);
3418 gen_op_movo(offsetof(CPUX86State
,xmm_regs
[reg
]),
3419 offsetof(CPUX86State
,xmm_regs
[rm
]));
3422 case 0x210: /* movss xmm, ea */
3424 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3425 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
3426 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3428 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(1)));
3429 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)));
3430 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)));
3432 rm
= (modrm
& 7) | REX_B(s
);
3433 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)),
3434 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(0)));
3437 case 0x310: /* movsd xmm, ea */
3439 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3440 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3442 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)));
3443 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)));
3445 rm
= (modrm
& 7) | REX_B(s
);
3446 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3447 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3450 case 0x012: /* movlps */
3451 case 0x112: /* movlpd */
3453 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3454 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3457 rm
= (modrm
& 7) | REX_B(s
);
3458 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3459 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(1)));
3462 case 0x212: /* movsldup */
3464 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3465 gen_ldo_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3467 rm
= (modrm
& 7) | REX_B(s
);
3468 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)),
3469 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(0)));
3470 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)),
3471 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(2)));
3473 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(1)),
3474 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3475 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)),
3476 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)));
3478 case 0x312: /* movddup */
3480 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3481 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3483 rm
= (modrm
& 7) | REX_B(s
);
3484 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3485 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3487 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)),
3488 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3490 case 0x016: /* movhps */
3491 case 0x116: /* movhpd */
3493 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3494 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)));
3497 rm
= (modrm
& 7) | REX_B(s
);
3498 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)),
3499 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3502 case 0x216: /* movshdup */
3504 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3505 gen_ldo_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3507 rm
= (modrm
& 7) | REX_B(s
);
3508 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(1)),
3509 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(1)));
3510 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)),
3511 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(3)));
3513 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)),
3514 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(1)));
3515 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)),
3516 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)));
3521 int bit_index
, field_length
;
3523 if (b1
== 1 && reg
!= 0)
3525 field_length
= cpu_ldub_code(env
, s
->pc
++) & 0x3F;
3526 bit_index
= cpu_ldub_code(env
, s
->pc
++) & 0x3F;
3527 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3528 offsetof(CPUX86State
,xmm_regs
[reg
]));
3530 gen_helper_extrq_i(cpu_env
, cpu_ptr0
,
3531 tcg_const_i32(bit_index
),
3532 tcg_const_i32(field_length
));
3534 gen_helper_insertq_i(cpu_env
, cpu_ptr0
,
3535 tcg_const_i32(bit_index
),
3536 tcg_const_i32(field_length
));
3539 case 0x7e: /* movd ea, mm */
3540 #ifdef TARGET_X86_64
3541 if (s
->dflag
== 2) {
3542 tcg_gen_ld_i64(cpu_T
[0], cpu_env
,
3543 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3544 gen_ldst_modrm(env
, s
, modrm
, OT_QUAD
, OR_TMP0
, 1);
3548 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
,
3549 offsetof(CPUX86State
,fpregs
[reg
].mmx
.MMX_L(0)));
3550 gen_ldst_modrm(env
, s
, modrm
, OT_LONG
, OR_TMP0
, 1);
3553 case 0x17e: /* movd ea, xmm */
3554 #ifdef TARGET_X86_64
3555 if (s
->dflag
== 2) {
3556 tcg_gen_ld_i64(cpu_T
[0], cpu_env
,
3557 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3558 gen_ldst_modrm(env
, s
, modrm
, OT_QUAD
, OR_TMP0
, 1);
3562 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
,
3563 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3564 gen_ldst_modrm(env
, s
, modrm
, OT_LONG
, OR_TMP0
, 1);
3567 case 0x27e: /* movq xmm, ea */
3569 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3570 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3572 rm
= (modrm
& 7) | REX_B(s
);
3573 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3574 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3576 gen_op_movq_env_0(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)));
3578 case 0x7f: /* movq ea, mm */
3580 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3581 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3584 gen_op_movq(offsetof(CPUX86State
,fpregs
[rm
].mmx
),
3585 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3588 case 0x011: /* movups */
3589 case 0x111: /* movupd */
3590 case 0x029: /* movaps */
3591 case 0x129: /* movapd */
3592 case 0x17f: /* movdqa ea, xmm */
3593 case 0x27f: /* movdqu ea, xmm */
3595 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3596 gen_sto_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3598 rm
= (modrm
& 7) | REX_B(s
);
3599 gen_op_movo(offsetof(CPUX86State
,xmm_regs
[rm
]),
3600 offsetof(CPUX86State
,xmm_regs
[reg
]));
3603 case 0x211: /* movss ea, xmm */
3605 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3606 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3607 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
3609 rm
= (modrm
& 7) | REX_B(s
);
3610 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(0)),
3611 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3614 case 0x311: /* movsd ea, xmm */
3616 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3617 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3619 rm
= (modrm
& 7) | REX_B(s
);
3620 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)),
3621 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3624 case 0x013: /* movlps */
3625 case 0x113: /* movlpd */
3627 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3628 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3633 case 0x017: /* movhps */
3634 case 0x117: /* movhpd */
3636 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3637 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)));
3642 case 0x71: /* shift mm, im */
3645 case 0x171: /* shift xmm, im */
3651 val
= cpu_ldub_code(env
, s
->pc
++);
3653 gen_op_movl_T0_im(val
);
3654 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_t0
.XMM_L(0)));
3656 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_t0
.XMM_L(1)));
3657 op1_offset
= offsetof(CPUX86State
,xmm_t0
);
3659 gen_op_movl_T0_im(val
);
3660 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,mmx_t0
.MMX_L(0)));
3662 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,mmx_t0
.MMX_L(1)));
3663 op1_offset
= offsetof(CPUX86State
,mmx_t0
);
3665 sse_fn_epp
= sse_op_table2
[((b
- 1) & 3) * 8 +
3666 (((modrm
>> 3)) & 7)][b1
];
3671 rm
= (modrm
& 7) | REX_B(s
);
3672 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3675 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3677 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op2_offset
);
3678 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op1_offset
);
3679 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3681 case 0x050: /* movmskps */
3682 rm
= (modrm
& 7) | REX_B(s
);
3683 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3684 offsetof(CPUX86State
,xmm_regs
[rm
]));
3685 gen_helper_movmskps(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3686 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
3687 gen_op_mov_reg_T0(OT_LONG
, reg
);
3689 case 0x150: /* movmskpd */
3690 rm
= (modrm
& 7) | REX_B(s
);
3691 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3692 offsetof(CPUX86State
,xmm_regs
[rm
]));
3693 gen_helper_movmskpd(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3694 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
3695 gen_op_mov_reg_T0(OT_LONG
, reg
);
3697 case 0x02a: /* cvtpi2ps */
3698 case 0x12a: /* cvtpi2pd */
3699 gen_helper_enter_mmx(cpu_env
);
3701 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3702 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
3703 gen_ldq_env_A0(s
->mem_index
, op2_offset
);
3706 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3708 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3709 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3710 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
3713 gen_helper_cvtpi2ps(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3717 gen_helper_cvtpi2pd(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3721 case 0x22a: /* cvtsi2ss */
3722 case 0x32a: /* cvtsi2sd */
3723 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
3724 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3725 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3726 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3727 if (ot
== OT_LONG
) {
3728 SSEFunc_0_epi sse_fn_epi
= sse_op_table3ai
[(b
>> 8) & 1];
3729 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
3730 sse_fn_epi(cpu_env
, cpu_ptr0
, cpu_tmp2_i32
);
3732 #ifdef TARGET_X86_64
3733 SSEFunc_0_epl sse_fn_epl
= sse_op_table3aq
[(b
>> 8) & 1];
3734 sse_fn_epl(cpu_env
, cpu_ptr0
, cpu_T
[0]);
3740 case 0x02c: /* cvttps2pi */
3741 case 0x12c: /* cvttpd2pi */
3742 case 0x02d: /* cvtps2pi */
3743 case 0x12d: /* cvtpd2pi */
3744 gen_helper_enter_mmx(cpu_env
);
3746 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3747 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3748 gen_ldo_env_A0(s
->mem_index
, op2_offset
);
3750 rm
= (modrm
& 7) | REX_B(s
);
3751 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3753 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
& 7].mmx
);
3754 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3755 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
3758 gen_helper_cvttps2pi(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3761 gen_helper_cvttpd2pi(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3764 gen_helper_cvtps2pi(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3767 gen_helper_cvtpd2pi(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3771 case 0x22c: /* cvttss2si */
3772 case 0x32c: /* cvttsd2si */
3773 case 0x22d: /* cvtss2si */
3774 case 0x32d: /* cvtsd2si */
3775 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
3777 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3779 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_t0
.XMM_Q(0)));
3781 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
3782 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_t0
.XMM_L(0)));
3784 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3786 rm
= (modrm
& 7) | REX_B(s
);
3787 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3789 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op2_offset
);
3790 if (ot
== OT_LONG
) {
3791 SSEFunc_i_ep sse_fn_i_ep
=
3792 sse_op_table3bi
[((b
>> 7) & 2) | (b
& 1)];
3793 sse_fn_i_ep(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3794 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
3796 #ifdef TARGET_X86_64
3797 SSEFunc_l_ep sse_fn_l_ep
=
3798 sse_op_table3bq
[((b
>> 7) & 2) | (b
& 1)];
3799 sse_fn_l_ep(cpu_T
[0], cpu_env
, cpu_ptr0
);
3804 gen_op_mov_reg_T0(ot
, reg
);
3806 case 0xc4: /* pinsrw */
3809 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 0);
3810 val
= cpu_ldub_code(env
, s
->pc
++);
3813 tcg_gen_st16_tl(cpu_T
[0], cpu_env
,
3814 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_W(val
)));
3817 tcg_gen_st16_tl(cpu_T
[0], cpu_env
,
3818 offsetof(CPUX86State
,fpregs
[reg
].mmx
.MMX_W(val
)));
3821 case 0xc5: /* pextrw */
3825 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
3826 val
= cpu_ldub_code(env
, s
->pc
++);
3829 rm
= (modrm
& 7) | REX_B(s
);
3830 tcg_gen_ld16u_tl(cpu_T
[0], cpu_env
,
3831 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_W(val
)));
3835 tcg_gen_ld16u_tl(cpu_T
[0], cpu_env
,
3836 offsetof(CPUX86State
,fpregs
[rm
].mmx
.MMX_W(val
)));
3838 reg
= ((modrm
>> 3) & 7) | rex_r
;
3839 gen_op_mov_reg_T0(ot
, reg
);
3841 case 0x1d6: /* movq ea, xmm */
3843 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3844 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3846 rm
= (modrm
& 7) | REX_B(s
);
3847 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)),
3848 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3849 gen_op_movq_env_0(offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(1)));
3852 case 0x2d6: /* movq2dq */
3853 gen_helper_enter_mmx(cpu_env
);
3855 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3856 offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3857 gen_op_movq_env_0(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)));
3859 case 0x3d6: /* movdq2q */
3860 gen_helper_enter_mmx(cpu_env
);
3861 rm
= (modrm
& 7) | REX_B(s
);
3862 gen_op_movq(offsetof(CPUX86State
,fpregs
[reg
& 7].mmx
),
3863 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3865 case 0xd7: /* pmovmskb */
3870 rm
= (modrm
& 7) | REX_B(s
);
3871 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, offsetof(CPUX86State
,xmm_regs
[rm
]));
3872 gen_helper_pmovmskb_xmm(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3875 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3876 gen_helper_pmovmskb_mmx(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3878 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
3879 reg
= ((modrm
>> 3) & 7) | rex_r
;
3880 gen_op_mov_reg_T0(OT_LONG
, reg
);
3883 if (s
->prefix
& PREFIX_REPNZ
)
3887 modrm
= cpu_ldub_code(env
, s
->pc
++);
3889 reg
= ((modrm
>> 3) & 7) | rex_r
;
3890 mod
= (modrm
>> 6) & 3;
3895 sse_fn_epp
= sse_op_table6
[b
].op
[b1
];
3899 if (!(s
->cpuid_ext_features
& sse_op_table6
[b
].ext_mask
))
3903 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3905 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
| REX_B(s
)]);
3907 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3908 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3910 case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
3911 case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
3912 case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
3913 gen_ldq_env_A0(s
->mem_index
, op2_offset
+
3914 offsetof(XMMReg
, XMM_Q(0)));
3916 case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
3917 case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
3918 tcg_gen_qemu_ld32u(cpu_tmp0
, cpu_A0
,
3919 (s
->mem_index
>> 2) - 1);
3920 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_tmp0
);
3921 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, op2_offset
+
3922 offsetof(XMMReg
, XMM_L(0)));
3924 case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
3925 tcg_gen_qemu_ld16u(cpu_tmp0
, cpu_A0
,
3926 (s
->mem_index
>> 2) - 1);
3927 tcg_gen_st16_tl(cpu_tmp0
, cpu_env
, op2_offset
+
3928 offsetof(XMMReg
, XMM_W(0)));
3930 case 0x2a: /* movntqda */
3931 gen_ldo_env_A0(s
->mem_index
, op1_offset
);
3934 gen_ldo_env_A0(s
->mem_index
, op2_offset
);
3938 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
3940 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3942 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
3943 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3944 gen_ldq_env_A0(s
->mem_index
, op2_offset
);
3947 if (sse_fn_epp
== SSE_SPECIAL
) {
3951 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3952 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
3953 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3956 set_cc_op(s
, CC_OP_EFLAGS
);
3959 case 0x338: /* crc32 */
3962 modrm
= cpu_ldub_code(env
, s
->pc
++);
3963 reg
= ((modrm
>> 3) & 7) | rex_r
;
3965 if (b
!= 0xf0 && b
!= 0xf1)
3967 if (!(s
->cpuid_ext_features
& CPUID_EXT_SSE42
))
3972 else if (b
== 0xf1 && s
->dflag
!= 2)
3973 if (s
->prefix
& PREFIX_DATA
)
3980 gen_op_mov_TN_reg(OT_LONG
, 0, reg
);
3981 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
3982 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3983 gen_helper_crc32(cpu_T
[0], cpu_tmp2_i32
,
3984 cpu_T
[0], tcg_const_i32(8 << ot
));
3986 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
3987 gen_op_mov_reg_T0(ot
, reg
);
3992 modrm
= cpu_ldub_code(env
, s
->pc
++);
3994 reg
= ((modrm
>> 3) & 7) | rex_r
;
3995 mod
= (modrm
>> 6) & 3;
4000 sse_fn_eppi
= sse_op_table7
[b
].op
[b1
];
4004 if (!(s
->cpuid_ext_features
& sse_op_table7
[b
].ext_mask
))
4007 if (sse_fn_eppi
== SSE_SPECIAL
) {
4008 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
4009 rm
= (modrm
& 7) | REX_B(s
);
4011 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4012 reg
= ((modrm
>> 3) & 7) | rex_r
;
4013 val
= cpu_ldub_code(env
, s
->pc
++);
4015 case 0x14: /* pextrb */
4016 tcg_gen_ld8u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,
4017 xmm_regs
[reg
].XMM_B(val
& 15)));
4019 gen_op_mov_reg_T0(ot
, rm
);
4021 tcg_gen_qemu_st8(cpu_T
[0], cpu_A0
,
4022 (s
->mem_index
>> 2) - 1);
4024 case 0x15: /* pextrw */
4025 tcg_gen_ld16u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,
4026 xmm_regs
[reg
].XMM_W(val
& 7)));
4028 gen_op_mov_reg_T0(ot
, rm
);
4030 tcg_gen_qemu_st16(cpu_T
[0], cpu_A0
,
4031 (s
->mem_index
>> 2) - 1);
4034 if (ot
== OT_LONG
) { /* pextrd */
4035 tcg_gen_ld_i32(cpu_tmp2_i32
, cpu_env
,
4036 offsetof(CPUX86State
,
4037 xmm_regs
[reg
].XMM_L(val
& 3)));
4038 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
4040 gen_op_mov_reg_v(ot
, rm
, cpu_T
[0]);
4042 tcg_gen_qemu_st32(cpu_T
[0], cpu_A0
,
4043 (s
->mem_index
>> 2) - 1);
4044 } else { /* pextrq */
4045 #ifdef TARGET_X86_64
4046 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
,
4047 offsetof(CPUX86State
,
4048 xmm_regs
[reg
].XMM_Q(val
& 1)));
4050 gen_op_mov_reg_v(ot
, rm
, cpu_tmp1_i64
);
4052 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
,
4053 (s
->mem_index
>> 2) - 1);
4059 case 0x17: /* extractps */
4060 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,
4061 xmm_regs
[reg
].XMM_L(val
& 3)));
4063 gen_op_mov_reg_T0(ot
, rm
);
4065 tcg_gen_qemu_st32(cpu_T
[0], cpu_A0
,
4066 (s
->mem_index
>> 2) - 1);
4068 case 0x20: /* pinsrb */
4070 gen_op_mov_TN_reg(OT_LONG
, 0, rm
);
4072 tcg_gen_qemu_ld8u(cpu_tmp0
, cpu_A0
,
4073 (s
->mem_index
>> 2) - 1);
4074 tcg_gen_st8_tl(cpu_tmp0
, cpu_env
, offsetof(CPUX86State
,
4075 xmm_regs
[reg
].XMM_B(val
& 15)));
4077 case 0x21: /* insertps */
4079 tcg_gen_ld_i32(cpu_tmp2_i32
, cpu_env
,
4080 offsetof(CPUX86State
,xmm_regs
[rm
]
4081 .XMM_L((val
>> 6) & 3)));
4083 tcg_gen_qemu_ld32u(cpu_tmp0
, cpu_A0
,
4084 (s
->mem_index
>> 2) - 1);
4085 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_tmp0
);
4087 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
,
4088 offsetof(CPUX86State
,xmm_regs
[reg
]
4089 .XMM_L((val
>> 4) & 3)));
4091 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4092 cpu_env
, offsetof(CPUX86State
,
4093 xmm_regs
[reg
].XMM_L(0)));
4095 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4096 cpu_env
, offsetof(CPUX86State
,
4097 xmm_regs
[reg
].XMM_L(1)));
4099 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4100 cpu_env
, offsetof(CPUX86State
,
4101 xmm_regs
[reg
].XMM_L(2)));
4103 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4104 cpu_env
, offsetof(CPUX86State
,
4105 xmm_regs
[reg
].XMM_L(3)));
4108 if (ot
== OT_LONG
) { /* pinsrd */
4110 gen_op_mov_v_reg(ot
, cpu_tmp0
, rm
);
4112 tcg_gen_qemu_ld32u(cpu_tmp0
, cpu_A0
,
4113 (s
->mem_index
>> 2) - 1);
4114 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_tmp0
);
4115 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
,
4116 offsetof(CPUX86State
,
4117 xmm_regs
[reg
].XMM_L(val
& 3)));
4118 } else { /* pinsrq */
4119 #ifdef TARGET_X86_64
4121 gen_op_mov_v_reg(ot
, cpu_tmp1_i64
, rm
);
4123 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
,
4124 (s
->mem_index
>> 2) - 1);
4125 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
,
4126 offsetof(CPUX86State
,
4127 xmm_regs
[reg
].XMM_Q(val
& 1)));
4138 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
4140 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
| REX_B(s
)]);
4142 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
4143 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4144 gen_ldo_env_A0(s
->mem_index
, op2_offset
);
4147 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
4149 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
4151 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
4152 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4153 gen_ldq_env_A0(s
->mem_index
, op2_offset
);
4156 val
= cpu_ldub_code(env
, s
->pc
++);
4158 if ((b
& 0xfc) == 0x60) { /* pcmpXstrX */
4159 set_cc_op(s
, CC_OP_EFLAGS
);
4162 /* The helper must use entire 64-bit gp registers */
4166 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4167 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4168 sse_fn_eppi(cpu_env
, cpu_ptr0
, cpu_ptr1
, tcg_const_i32(val
));
4174 /* generic MMX or SSE operation */
4176 case 0x70: /* pshufx insn */
4177 case 0xc6: /* pshufx insn */
4178 case 0xc2: /* compare insns */
4185 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
4187 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4188 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
4189 if (b1
>= 2 && ((b
>= 0x50 && b
<= 0x5f && b
!= 0x5b) ||
4191 /* specific case for SSE single instructions */
4194 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
4195 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_t0
.XMM_L(0)));
4198 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_t0
.XMM_D(0)));
4201 gen_ldo_env_A0(s
->mem_index
, op2_offset
);
4204 rm
= (modrm
& 7) | REX_B(s
);
4205 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
4208 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
4210 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4211 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
4212 gen_ldq_env_A0(s
->mem_index
, op2_offset
);
4215 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
4219 case 0x0f: /* 3DNow! data insns */
4220 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_3DNOW
))
4222 val
= cpu_ldub_code(env
, s
->pc
++);
4223 sse_fn_epp
= sse_op_table5
[val
];
4227 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4228 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4229 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
4231 case 0x70: /* pshufx insn */
4232 case 0xc6: /* pshufx insn */
4233 val
= cpu_ldub_code(env
, s
->pc
++);
4234 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4235 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4236 /* XXX: introduce a new table? */
4237 sse_fn_ppi
= (SSEFunc_0_ppi
)sse_fn_epp
;
4238 sse_fn_ppi(cpu_ptr0
, cpu_ptr1
, tcg_const_i32(val
));
4242 val
= cpu_ldub_code(env
, s
->pc
++);
4245 sse_fn_epp
= sse_op_table4
[val
][b1
];
4247 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4248 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4249 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
4252 /* maskmov : we must prepare A0 */
4255 #ifdef TARGET_X86_64
4256 if (s
->aflag
== 2) {
4257 gen_op_movq_A0_reg(R_EDI
);
4261 gen_op_movl_A0_reg(R_EDI
);
4263 gen_op_andl_A0_ffff();
4265 gen_add_A0_ds_seg(s
);
4267 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4268 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4269 /* XXX: introduce a new table? */
4270 sse_fn_eppt
= (SSEFunc_0_eppt
)sse_fn_epp
;
4271 sse_fn_eppt(cpu_env
, cpu_ptr0
, cpu_ptr1
, cpu_A0
);
4274 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4275 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4276 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
4279 if (b
== 0x2e || b
== 0x2f) {
4280 set_cc_op(s
, CC_OP_EFLAGS
);
4285 /* convert one instruction. s->is_jmp is set if the translation must
4286 be stopped. Return the next pc value */
4287 static target_ulong
disas_insn(CPUX86State
*env
, DisasContext
*s
,
4288 target_ulong pc_start
)
4290 int b
, prefixes
, aflag
, dflag
;
4292 int modrm
, reg
, rm
, mod
, reg_addr
, op
, opreg
, offset_addr
, val
;
4293 target_ulong next_eip
, tval
;
4296 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
4297 tcg_gen_debug_insn_start(pc_start
);
4306 #ifdef TARGET_X86_64
4311 s
->rip_offset
= 0; /* for relative ip address */
4313 b
= cpu_ldub_code(env
, s
->pc
);
4315 /* check prefixes */
4316 #ifdef TARGET_X86_64
4320 prefixes
|= PREFIX_REPZ
;
4323 prefixes
|= PREFIX_REPNZ
;
4326 prefixes
|= PREFIX_LOCK
;
4347 prefixes
|= PREFIX_DATA
;
4350 prefixes
|= PREFIX_ADR
;
4354 rex_w
= (b
>> 3) & 1;
4355 rex_r
= (b
& 0x4) << 1;
4356 s
->rex_x
= (b
& 0x2) << 2;
4357 REX_B(s
) = (b
& 0x1) << 3;
4358 x86_64_hregs
= 1; /* select uniform byte register addressing */
4362 /* 0x66 is ignored if rex.w is set */
4365 if (prefixes
& PREFIX_DATA
)
4368 if (!(prefixes
& PREFIX_ADR
))
4375 prefixes
|= PREFIX_REPZ
;
4378 prefixes
|= PREFIX_REPNZ
;
4381 prefixes
|= PREFIX_LOCK
;
4402 prefixes
|= PREFIX_DATA
;
4405 prefixes
|= PREFIX_ADR
;
4408 if (prefixes
& PREFIX_DATA
)
4410 if (prefixes
& PREFIX_ADR
)
4414 s
->prefix
= prefixes
;
4418 /* lock generation */
4419 if (prefixes
& PREFIX_LOCK
)
4422 /* now check op code */
4426 /**************************/
4427 /* extended op code */
4428 b
= cpu_ldub_code(env
, s
->pc
++) | 0x100;
4431 /**************************/
4449 ot
= dflag
+ OT_WORD
;
4452 case 0: /* OP Ev, Gv */
4453 modrm
= cpu_ldub_code(env
, s
->pc
++);
4454 reg
= ((modrm
>> 3) & 7) | rex_r
;
4455 mod
= (modrm
>> 6) & 3;
4456 rm
= (modrm
& 7) | REX_B(s
);
4458 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4460 } else if (op
== OP_XORL
&& rm
== reg
) {
4462 /* xor reg, reg optimisation */
4464 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
4465 gen_op_mov_reg_T0(ot
, reg
);
4466 gen_op_update1_cc();
4471 gen_op_mov_TN_reg(ot
, 1, reg
);
4472 gen_op(s
, op
, ot
, opreg
);
4474 case 1: /* OP Gv, Ev */
4475 modrm
= cpu_ldub_code(env
, s
->pc
++);
4476 mod
= (modrm
>> 6) & 3;
4477 reg
= ((modrm
>> 3) & 7) | rex_r
;
4478 rm
= (modrm
& 7) | REX_B(s
);
4480 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4481 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
4482 } else if (op
== OP_XORL
&& rm
== reg
) {
4485 gen_op_mov_TN_reg(ot
, 1, rm
);
4487 gen_op(s
, op
, ot
, reg
);
4489 case 2: /* OP A, Iv */
4490 val
= insn_get(env
, s
, ot
);
4491 gen_op_movl_T1_im(val
);
4492 gen_op(s
, op
, ot
, OR_EAX
);
4501 case 0x80: /* GRP1 */
4510 ot
= dflag
+ OT_WORD
;
4512 modrm
= cpu_ldub_code(env
, s
->pc
++);
4513 mod
= (modrm
>> 6) & 3;
4514 rm
= (modrm
& 7) | REX_B(s
);
4515 op
= (modrm
>> 3) & 7;
4521 s
->rip_offset
= insn_const_size(ot
);
4522 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4533 val
= insn_get(env
, s
, ot
);
4536 val
= (int8_t)insn_get(env
, s
, OT_BYTE
);
4539 gen_op_movl_T1_im(val
);
4540 gen_op(s
, op
, ot
, opreg
);
4544 /**************************/
4545 /* inc, dec, and other misc arith */
4546 case 0x40 ... 0x47: /* inc Gv */
4547 ot
= dflag
? OT_LONG
: OT_WORD
;
4548 gen_inc(s
, ot
, OR_EAX
+ (b
& 7), 1);
4550 case 0x48 ... 0x4f: /* dec Gv */
4551 ot
= dflag
? OT_LONG
: OT_WORD
;
4552 gen_inc(s
, ot
, OR_EAX
+ (b
& 7), -1);
4554 case 0xf6: /* GRP3 */
4559 ot
= dflag
+ OT_WORD
;
4561 modrm
= cpu_ldub_code(env
, s
->pc
++);
4562 mod
= (modrm
>> 6) & 3;
4563 rm
= (modrm
& 7) | REX_B(s
);
4564 op
= (modrm
>> 3) & 7;
4567 s
->rip_offset
= insn_const_size(ot
);
4568 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4569 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
4571 gen_op_mov_TN_reg(ot
, 0, rm
);
4576 val
= insn_get(env
, s
, ot
);
4577 gen_op_movl_T1_im(val
);
4578 gen_op_testl_T0_T1_cc();
4579 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
4582 tcg_gen_not_tl(cpu_T
[0], cpu_T
[0]);
4584 gen_op_st_T0_A0(ot
+ s
->mem_index
);
4586 gen_op_mov_reg_T0(ot
, rm
);
4590 tcg_gen_neg_tl(cpu_T
[0], cpu_T
[0]);
4592 gen_op_st_T0_A0(ot
+ s
->mem_index
);
4594 gen_op_mov_reg_T0(ot
, rm
);
4596 gen_op_update_neg_cc();
4597 set_cc_op(s
, CC_OP_SUBB
+ ot
);
4602 gen_op_mov_TN_reg(OT_BYTE
, 1, R_EAX
);
4603 tcg_gen_ext8u_tl(cpu_T
[0], cpu_T
[0]);
4604 tcg_gen_ext8u_tl(cpu_T
[1], cpu_T
[1]);
4605 /* XXX: use 32 bit mul which could be faster */
4606 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4607 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
4608 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4609 tcg_gen_andi_tl(cpu_cc_src
, cpu_T
[0], 0xff00);
4610 set_cc_op(s
, CC_OP_MULB
);
4613 gen_op_mov_TN_reg(OT_WORD
, 1, R_EAX
);
4614 tcg_gen_ext16u_tl(cpu_T
[0], cpu_T
[0]);
4615 tcg_gen_ext16u_tl(cpu_T
[1], cpu_T
[1]);
4616 /* XXX: use 32 bit mul which could be faster */
4617 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4618 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
4619 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4620 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 16);
4621 gen_op_mov_reg_T0(OT_WORD
, R_EDX
);
4622 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[0]);
4623 set_cc_op(s
, CC_OP_MULW
);
4627 #ifdef TARGET_X86_64
4628 gen_op_mov_TN_reg(OT_LONG
, 1, R_EAX
);
4629 tcg_gen_ext32u_tl(cpu_T
[0], cpu_T
[0]);
4630 tcg_gen_ext32u_tl(cpu_T
[1], cpu_T
[1]);
4631 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4632 gen_op_mov_reg_T0(OT_LONG
, R_EAX
);
4633 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4634 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 32);
4635 gen_op_mov_reg_T0(OT_LONG
, R_EDX
);
4636 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[0]);
4640 t0
= tcg_temp_new_i64();
4641 t1
= tcg_temp_new_i64();
4642 gen_op_mov_TN_reg(OT_LONG
, 1, R_EAX
);
4643 tcg_gen_extu_i32_i64(t0
, cpu_T
[0]);
4644 tcg_gen_extu_i32_i64(t1
, cpu_T
[1]);
4645 tcg_gen_mul_i64(t0
, t0
, t1
);
4646 tcg_gen_trunc_i64_i32(cpu_T
[0], t0
);
4647 gen_op_mov_reg_T0(OT_LONG
, R_EAX
);
4648 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4649 tcg_gen_shri_i64(t0
, t0
, 32);
4650 tcg_gen_trunc_i64_i32(cpu_T
[0], t0
);
4651 gen_op_mov_reg_T0(OT_LONG
, R_EDX
);
4652 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[0]);
4655 set_cc_op(s
, CC_OP_MULL
);
4657 #ifdef TARGET_X86_64
4659 gen_helper_mulq_EAX_T0(cpu_env
, cpu_T
[0]);
4660 set_cc_op(s
, CC_OP_MULQ
);
4668 gen_op_mov_TN_reg(OT_BYTE
, 1, R_EAX
);
4669 tcg_gen_ext8s_tl(cpu_T
[0], cpu_T
[0]);
4670 tcg_gen_ext8s_tl(cpu_T
[1], cpu_T
[1]);
4671 /* XXX: use 32 bit mul which could be faster */
4672 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4673 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
4674 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4675 tcg_gen_ext8s_tl(cpu_tmp0
, cpu_T
[0]);
4676 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
4677 set_cc_op(s
, CC_OP_MULB
);
4680 gen_op_mov_TN_reg(OT_WORD
, 1, R_EAX
);
4681 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
4682 tcg_gen_ext16s_tl(cpu_T
[1], cpu_T
[1]);
4683 /* XXX: use 32 bit mul which could be faster */
4684 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4685 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
4686 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4687 tcg_gen_ext16s_tl(cpu_tmp0
, cpu_T
[0]);
4688 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
4689 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 16);
4690 gen_op_mov_reg_T0(OT_WORD
, R_EDX
);
4691 set_cc_op(s
, CC_OP_MULW
);
4695 #ifdef TARGET_X86_64
4696 gen_op_mov_TN_reg(OT_LONG
, 1, R_EAX
);
4697 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
4698 tcg_gen_ext32s_tl(cpu_T
[1], cpu_T
[1]);
4699 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4700 gen_op_mov_reg_T0(OT_LONG
, R_EAX
);
4701 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4702 tcg_gen_ext32s_tl(cpu_tmp0
, cpu_T
[0]);
4703 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
4704 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 32);
4705 gen_op_mov_reg_T0(OT_LONG
, R_EDX
);
4709 t0
= tcg_temp_new_i64();
4710 t1
= tcg_temp_new_i64();
4711 gen_op_mov_TN_reg(OT_LONG
, 1, R_EAX
);
4712 tcg_gen_ext_i32_i64(t0
, cpu_T
[0]);
4713 tcg_gen_ext_i32_i64(t1
, cpu_T
[1]);
4714 tcg_gen_mul_i64(t0
, t0
, t1
);
4715 tcg_gen_trunc_i64_i32(cpu_T
[0], t0
);
4716 gen_op_mov_reg_T0(OT_LONG
, R_EAX
);
4717 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4718 tcg_gen_sari_tl(cpu_tmp0
, cpu_T
[0], 31);
4719 tcg_gen_shri_i64(t0
, t0
, 32);
4720 tcg_gen_trunc_i64_i32(cpu_T
[0], t0
);
4721 gen_op_mov_reg_T0(OT_LONG
, R_EDX
);
4722 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
4725 set_cc_op(s
, CC_OP_MULL
);
4727 #ifdef TARGET_X86_64
4729 gen_helper_imulq_EAX_T0(cpu_env
, cpu_T
[0]);
4730 set_cc_op(s
, CC_OP_MULQ
);
4738 gen_jmp_im(pc_start
- s
->cs_base
);
4739 gen_helper_divb_AL(cpu_env
, cpu_T
[0]);
4742 gen_jmp_im(pc_start
- s
->cs_base
);
4743 gen_helper_divw_AX(cpu_env
, cpu_T
[0]);
4747 gen_jmp_im(pc_start
- s
->cs_base
);
4748 gen_helper_divl_EAX(cpu_env
, cpu_T
[0]);
4750 #ifdef TARGET_X86_64
4752 gen_jmp_im(pc_start
- s
->cs_base
);
4753 gen_helper_divq_EAX(cpu_env
, cpu_T
[0]);
4761 gen_jmp_im(pc_start
- s
->cs_base
);
4762 gen_helper_idivb_AL(cpu_env
, cpu_T
[0]);
4765 gen_jmp_im(pc_start
- s
->cs_base
);
4766 gen_helper_idivw_AX(cpu_env
, cpu_T
[0]);
4770 gen_jmp_im(pc_start
- s
->cs_base
);
4771 gen_helper_idivl_EAX(cpu_env
, cpu_T
[0]);
4773 #ifdef TARGET_X86_64
4775 gen_jmp_im(pc_start
- s
->cs_base
);
4776 gen_helper_idivq_EAX(cpu_env
, cpu_T
[0]);
4786 case 0xfe: /* GRP4 */
4787 case 0xff: /* GRP5 */
4791 ot
= dflag
+ OT_WORD
;
4793 modrm
= cpu_ldub_code(env
, s
->pc
++);
4794 mod
= (modrm
>> 6) & 3;
4795 rm
= (modrm
& 7) | REX_B(s
);
4796 op
= (modrm
>> 3) & 7;
4797 if (op
>= 2 && b
== 0xfe) {
4801 if (op
== 2 || op
== 4) {
4802 /* operand size for jumps is 64 bit */
4804 } else if (op
== 3 || op
== 5) {
4805 ot
= dflag
? OT_LONG
+ (rex_w
== 1) : OT_WORD
;
4806 } else if (op
== 6) {
4807 /* default push size is 64 bit */
4808 ot
= dflag
? OT_QUAD
: OT_WORD
;
4812 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4813 if (op
>= 2 && op
!= 3 && op
!= 5)
4814 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
4816 gen_op_mov_TN_reg(ot
, 0, rm
);
4820 case 0: /* inc Ev */
4825 gen_inc(s
, ot
, opreg
, 1);
4827 case 1: /* dec Ev */
4832 gen_inc(s
, ot
, opreg
, -1);
4834 case 2: /* call Ev */
4835 /* XXX: optimize if memory (no 'and' is necessary) */
4837 gen_op_andl_T0_ffff();
4838 next_eip
= s
->pc
- s
->cs_base
;
4839 gen_movtl_T1_im(next_eip
);
4844 case 3: /* lcall Ev */
4845 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
4846 gen_add_A0_im(s
, 1 << (ot
- OT_WORD
+ 1));
4847 gen_op_ldu_T0_A0(OT_WORD
+ s
->mem_index
);
4849 if (s
->pe
&& !s
->vm86
) {
4850 gen_update_cc_op(s
);
4851 gen_jmp_im(pc_start
- s
->cs_base
);
4852 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
4853 gen_helper_lcall_protected(cpu_env
, cpu_tmp2_i32
, cpu_T
[1],
4854 tcg_const_i32(dflag
),
4855 tcg_const_i32(s
->pc
- pc_start
));
4857 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
4858 gen_helper_lcall_real(cpu_env
, cpu_tmp2_i32
, cpu_T
[1],
4859 tcg_const_i32(dflag
),
4860 tcg_const_i32(s
->pc
- s
->cs_base
));
4864 case 4: /* jmp Ev */
4866 gen_op_andl_T0_ffff();
4870 case 5: /* ljmp Ev */
4871 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
4872 gen_add_A0_im(s
, 1 << (ot
- OT_WORD
+ 1));
4873 gen_op_ldu_T0_A0(OT_WORD
+ s
->mem_index
);
4875 if (s
->pe
&& !s
->vm86
) {
4876 gen_update_cc_op(s
);
4877 gen_jmp_im(pc_start
- s
->cs_base
);
4878 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
4879 gen_helper_ljmp_protected(cpu_env
, cpu_tmp2_i32
, cpu_T
[1],
4880 tcg_const_i32(s
->pc
- pc_start
));
4882 gen_op_movl_seg_T0_vm(R_CS
);
4883 gen_op_movl_T0_T1();
4888 case 6: /* push Ev */
4896 case 0x84: /* test Ev, Gv */
4901 ot
= dflag
+ OT_WORD
;
4903 modrm
= cpu_ldub_code(env
, s
->pc
++);
4904 reg
= ((modrm
>> 3) & 7) | rex_r
;
4906 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4907 gen_op_mov_TN_reg(ot
, 1, reg
);
4908 gen_op_testl_T0_T1_cc();
4909 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
4912 case 0xa8: /* test eAX, Iv */
4917 ot
= dflag
+ OT_WORD
;
4918 val
= insn_get(env
, s
, ot
);
4920 gen_op_mov_TN_reg(ot
, 0, OR_EAX
);
4921 gen_op_movl_T1_im(val
);
4922 gen_op_testl_T0_T1_cc();
4923 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
4926 case 0x98: /* CWDE/CBW */
4927 #ifdef TARGET_X86_64
4929 gen_op_mov_TN_reg(OT_LONG
, 0, R_EAX
);
4930 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
4931 gen_op_mov_reg_T0(OT_QUAD
, R_EAX
);
4935 gen_op_mov_TN_reg(OT_WORD
, 0, R_EAX
);
4936 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
4937 gen_op_mov_reg_T0(OT_LONG
, R_EAX
);
4939 gen_op_mov_TN_reg(OT_BYTE
, 0, R_EAX
);
4940 tcg_gen_ext8s_tl(cpu_T
[0], cpu_T
[0]);
4941 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
4944 case 0x99: /* CDQ/CWD */
4945 #ifdef TARGET_X86_64
4947 gen_op_mov_TN_reg(OT_QUAD
, 0, R_EAX
);
4948 tcg_gen_sari_tl(cpu_T
[0], cpu_T
[0], 63);
4949 gen_op_mov_reg_T0(OT_QUAD
, R_EDX
);
4953 gen_op_mov_TN_reg(OT_LONG
, 0, R_EAX
);
4954 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
4955 tcg_gen_sari_tl(cpu_T
[0], cpu_T
[0], 31);
4956 gen_op_mov_reg_T0(OT_LONG
, R_EDX
);
4958 gen_op_mov_TN_reg(OT_WORD
, 0, R_EAX
);
4959 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
4960 tcg_gen_sari_tl(cpu_T
[0], cpu_T
[0], 15);
4961 gen_op_mov_reg_T0(OT_WORD
, R_EDX
);
4964 case 0x1af: /* imul Gv, Ev */
4965 case 0x69: /* imul Gv, Ev, I */
4967 ot
= dflag
+ OT_WORD
;
4968 modrm
= cpu_ldub_code(env
, s
->pc
++);
4969 reg
= ((modrm
>> 3) & 7) | rex_r
;
4971 s
->rip_offset
= insn_const_size(ot
);
4974 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4976 val
= insn_get(env
, s
, ot
);
4977 gen_op_movl_T1_im(val
);
4978 } else if (b
== 0x6b) {
4979 val
= (int8_t)insn_get(env
, s
, OT_BYTE
);
4980 gen_op_movl_T1_im(val
);
4982 gen_op_mov_TN_reg(ot
, 1, reg
);
4985 #ifdef TARGET_X86_64
4986 if (ot
== OT_QUAD
) {
4987 gen_helper_imulq_T0_T1(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
4990 if (ot
== OT_LONG
) {
4991 #ifdef TARGET_X86_64
4992 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
4993 tcg_gen_ext32s_tl(cpu_T
[1], cpu_T
[1]);
4994 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4995 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4996 tcg_gen_ext32s_tl(cpu_tmp0
, cpu_T
[0]);
4997 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
5001 t0
= tcg_temp_new_i64();
5002 t1
= tcg_temp_new_i64();
5003 tcg_gen_ext_i32_i64(t0
, cpu_T
[0]);
5004 tcg_gen_ext_i32_i64(t1
, cpu_T
[1]);
5005 tcg_gen_mul_i64(t0
, t0
, t1
);
5006 tcg_gen_trunc_i64_i32(cpu_T
[0], t0
);
5007 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
5008 tcg_gen_sari_tl(cpu_tmp0
, cpu_T
[0], 31);
5009 tcg_gen_shri_i64(t0
, t0
, 32);
5010 tcg_gen_trunc_i64_i32(cpu_T
[1], t0
);
5011 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[1], cpu_tmp0
);
5015 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
5016 tcg_gen_ext16s_tl(cpu_T
[1], cpu_T
[1]);
5017 /* XXX: use 32 bit mul which could be faster */
5018 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
5019 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
5020 tcg_gen_ext16s_tl(cpu_tmp0
, cpu_T
[0]);
5021 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
5023 gen_op_mov_reg_T0(ot
, reg
);
5024 set_cc_op(s
, CC_OP_MULB
+ ot
);
5027 case 0x1c1: /* xadd Ev, Gv */
5031 ot
= dflag
+ OT_WORD
;
5032 modrm
= cpu_ldub_code(env
, s
->pc
++);
5033 reg
= ((modrm
>> 3) & 7) | rex_r
;
5034 mod
= (modrm
>> 6) & 3;
5036 rm
= (modrm
& 7) | REX_B(s
);
5037 gen_op_mov_TN_reg(ot
, 0, reg
);
5038 gen_op_mov_TN_reg(ot
, 1, rm
);
5039 gen_op_addl_T0_T1();
5040 gen_op_mov_reg_T1(ot
, reg
);
5041 gen_op_mov_reg_T0(ot
, rm
);
5043 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5044 gen_op_mov_TN_reg(ot
, 0, reg
);
5045 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
5046 gen_op_addl_T0_T1();
5047 gen_op_st_T0_A0(ot
+ s
->mem_index
);
5048 gen_op_mov_reg_T1(ot
, reg
);
5050 gen_op_update2_cc();
5051 set_cc_op(s
, CC_OP_ADDB
+ ot
);
5054 case 0x1b1: /* cmpxchg Ev, Gv */
5057 TCGv t0
, t1
, t2
, a0
;
5062 ot
= dflag
+ OT_WORD
;
5063 modrm
= cpu_ldub_code(env
, s
->pc
++);
5064 reg
= ((modrm
>> 3) & 7) | rex_r
;
5065 mod
= (modrm
>> 6) & 3;
5066 t0
= tcg_temp_local_new();
5067 t1
= tcg_temp_local_new();
5068 t2
= tcg_temp_local_new();
5069 a0
= tcg_temp_local_new();
5070 gen_op_mov_v_reg(ot
, t1
, reg
);
5072 rm
= (modrm
& 7) | REX_B(s
);
5073 gen_op_mov_v_reg(ot
, t0
, rm
);
5075 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5076 tcg_gen_mov_tl(a0
, cpu_A0
);
5077 gen_op_ld_v(ot
+ s
->mem_index
, t0
, a0
);
5078 rm
= 0; /* avoid warning */
5080 label1
= gen_new_label();
5081 tcg_gen_sub_tl(t2
, cpu_regs
[R_EAX
], t0
);
5083 tcg_gen_brcondi_tl(TCG_COND_EQ
, t2
, 0, label1
);
5084 label2
= gen_new_label();
5086 gen_op_mov_reg_v(ot
, R_EAX
, t0
);
5088 gen_set_label(label1
);
5089 gen_op_mov_reg_v(ot
, rm
, t1
);
5091 /* perform no-op store cycle like physical cpu; must be
5092 before changing accumulator to ensure idempotency if
5093 the store faults and the instruction is restarted */
5094 gen_op_st_v(ot
+ s
->mem_index
, t0
, a0
);
5095 gen_op_mov_reg_v(ot
, R_EAX
, t0
);
5097 gen_set_label(label1
);
5098 gen_op_st_v(ot
+ s
->mem_index
, t1
, a0
);
5100 gen_set_label(label2
);
5101 tcg_gen_mov_tl(cpu_cc_src
, t0
);
5102 tcg_gen_mov_tl(cpu_cc_dst
, t2
);
5103 set_cc_op(s
, CC_OP_SUBB
+ ot
);
5110 case 0x1c7: /* cmpxchg8b */
5111 modrm
= cpu_ldub_code(env
, s
->pc
++);
5112 mod
= (modrm
>> 6) & 3;
5113 if ((mod
== 3) || ((modrm
& 0x38) != 0x8))
5115 #ifdef TARGET_X86_64
5117 if (!(s
->cpuid_ext_features
& CPUID_EXT_CX16
))
5119 gen_jmp_im(pc_start
- s
->cs_base
);
5120 gen_update_cc_op(s
);
5121 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5122 gen_helper_cmpxchg16b(cpu_env
, cpu_A0
);
5126 if (!(s
->cpuid_features
& CPUID_CX8
))
5128 gen_jmp_im(pc_start
- s
->cs_base
);
5129 gen_update_cc_op(s
);
5130 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5131 gen_helper_cmpxchg8b(cpu_env
, cpu_A0
);
5133 set_cc_op(s
, CC_OP_EFLAGS
);
5136 /**************************/
5138 case 0x50 ... 0x57: /* push */
5139 gen_op_mov_TN_reg(OT_LONG
, 0, (b
& 7) | REX_B(s
));
5142 case 0x58 ... 0x5f: /* pop */
5144 ot
= dflag
? OT_QUAD
: OT_WORD
;
5146 ot
= dflag
+ OT_WORD
;
5149 /* NOTE: order is important for pop %sp */
5151 gen_op_mov_reg_T0(ot
, (b
& 7) | REX_B(s
));
5153 case 0x60: /* pusha */
5158 case 0x61: /* popa */
5163 case 0x68: /* push Iv */
5166 ot
= dflag
? OT_QUAD
: OT_WORD
;
5168 ot
= dflag
+ OT_WORD
;
5171 val
= insn_get(env
, s
, ot
);
5173 val
= (int8_t)insn_get(env
, s
, OT_BYTE
);
5174 gen_op_movl_T0_im(val
);
5177 case 0x8f: /* pop Ev */
5179 ot
= dflag
? OT_QUAD
: OT_WORD
;
5181 ot
= dflag
+ OT_WORD
;
5183 modrm
= cpu_ldub_code(env
, s
->pc
++);
5184 mod
= (modrm
>> 6) & 3;
5187 /* NOTE: order is important for pop %sp */
5189 rm
= (modrm
& 7) | REX_B(s
);
5190 gen_op_mov_reg_T0(ot
, rm
);
5192 /* NOTE: order is important too for MMU exceptions */
5193 s
->popl_esp_hack
= 1 << ot
;
5194 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
5195 s
->popl_esp_hack
= 0;
5199 case 0xc8: /* enter */
5202 val
= cpu_lduw_code(env
, s
->pc
);
5204 level
= cpu_ldub_code(env
, s
->pc
++);
5205 gen_enter(s
, val
, level
);
5208 case 0xc9: /* leave */
5209 /* XXX: exception not precise (ESP is updated before potential exception) */
5211 gen_op_mov_TN_reg(OT_QUAD
, 0, R_EBP
);
5212 gen_op_mov_reg_T0(OT_QUAD
, R_ESP
);
5213 } else if (s
->ss32
) {
5214 gen_op_mov_TN_reg(OT_LONG
, 0, R_EBP
);
5215 gen_op_mov_reg_T0(OT_LONG
, R_ESP
);
5217 gen_op_mov_TN_reg(OT_WORD
, 0, R_EBP
);
5218 gen_op_mov_reg_T0(OT_WORD
, R_ESP
);
5222 ot
= dflag
? OT_QUAD
: OT_WORD
;
5224 ot
= dflag
+ OT_WORD
;
5226 gen_op_mov_reg_T0(ot
, R_EBP
);
5229 case 0x06: /* push es */
5230 case 0x0e: /* push cs */
5231 case 0x16: /* push ss */
5232 case 0x1e: /* push ds */
5235 gen_op_movl_T0_seg(b
>> 3);
5238 case 0x1a0: /* push fs */
5239 case 0x1a8: /* push gs */
5240 gen_op_movl_T0_seg((b
>> 3) & 7);
5243 case 0x07: /* pop es */
5244 case 0x17: /* pop ss */
5245 case 0x1f: /* pop ds */
5250 gen_movl_seg_T0(s
, reg
, pc_start
- s
->cs_base
);
5253 /* if reg == SS, inhibit interrupts/trace. */
5254 /* If several instructions disable interrupts, only the
5256 if (!(s
->tb
->flags
& HF_INHIBIT_IRQ_MASK
))
5257 gen_helper_set_inhibit_irq(cpu_env
);
5261 gen_jmp_im(s
->pc
- s
->cs_base
);
5265 case 0x1a1: /* pop fs */
5266 case 0x1a9: /* pop gs */
5268 gen_movl_seg_T0(s
, (b
>> 3) & 7, pc_start
- s
->cs_base
);
5271 gen_jmp_im(s
->pc
- s
->cs_base
);
5276 /**************************/
5279 case 0x89: /* mov Gv, Ev */
5283 ot
= dflag
+ OT_WORD
;
5284 modrm
= cpu_ldub_code(env
, s
->pc
++);
5285 reg
= ((modrm
>> 3) & 7) | rex_r
;
5287 /* generate a generic store */
5288 gen_ldst_modrm(env
, s
, modrm
, ot
, reg
, 1);
5291 case 0xc7: /* mov Ev, Iv */
5295 ot
= dflag
+ OT_WORD
;
5296 modrm
= cpu_ldub_code(env
, s
->pc
++);
5297 mod
= (modrm
>> 6) & 3;
5299 s
->rip_offset
= insn_const_size(ot
);
5300 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5302 val
= insn_get(env
, s
, ot
);
5303 gen_op_movl_T0_im(val
);
5305 gen_op_st_T0_A0(ot
+ s
->mem_index
);
5307 gen_op_mov_reg_T0(ot
, (modrm
& 7) | REX_B(s
));
5310 case 0x8b: /* mov Ev, Gv */
5314 ot
= OT_WORD
+ dflag
;
5315 modrm
= cpu_ldub_code(env
, s
->pc
++);
5316 reg
= ((modrm
>> 3) & 7) | rex_r
;
5318 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
5319 gen_op_mov_reg_T0(ot
, reg
);
5321 case 0x8e: /* mov seg, Gv */
5322 modrm
= cpu_ldub_code(env
, s
->pc
++);
5323 reg
= (modrm
>> 3) & 7;
5324 if (reg
>= 6 || reg
== R_CS
)
5326 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 0);
5327 gen_movl_seg_T0(s
, reg
, pc_start
- s
->cs_base
);
5329 /* if reg == SS, inhibit interrupts/trace */
5330 /* If several instructions disable interrupts, only the
5332 if (!(s
->tb
->flags
& HF_INHIBIT_IRQ_MASK
))
5333 gen_helper_set_inhibit_irq(cpu_env
);
5337 gen_jmp_im(s
->pc
- s
->cs_base
);
5341 case 0x8c: /* mov Gv, seg */
5342 modrm
= cpu_ldub_code(env
, s
->pc
++);
5343 reg
= (modrm
>> 3) & 7;
5344 mod
= (modrm
>> 6) & 3;
5347 gen_op_movl_T0_seg(reg
);
5349 ot
= OT_WORD
+ dflag
;
5352 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
5355 case 0x1b6: /* movzbS Gv, Eb */
5356 case 0x1b7: /* movzwS Gv, Eb */
5357 case 0x1be: /* movsbS Gv, Eb */
5358 case 0x1bf: /* movswS Gv, Eb */
5361 /* d_ot is the size of destination */
5362 d_ot
= dflag
+ OT_WORD
;
5363 /* ot is the size of source */
5364 ot
= (b
& 1) + OT_BYTE
;
5365 modrm
= cpu_ldub_code(env
, s
->pc
++);
5366 reg
= ((modrm
>> 3) & 7) | rex_r
;
5367 mod
= (modrm
>> 6) & 3;
5368 rm
= (modrm
& 7) | REX_B(s
);
5371 gen_op_mov_TN_reg(ot
, 0, rm
);
5372 switch(ot
| (b
& 8)) {
5374 tcg_gen_ext8u_tl(cpu_T
[0], cpu_T
[0]);
5377 tcg_gen_ext8s_tl(cpu_T
[0], cpu_T
[0]);
5380 tcg_gen_ext16u_tl(cpu_T
[0], cpu_T
[0]);
5384 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
5387 gen_op_mov_reg_T0(d_ot
, reg
);
5389 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5391 gen_op_lds_T0_A0(ot
+ s
->mem_index
);
5393 gen_op_ldu_T0_A0(ot
+ s
->mem_index
);
5395 gen_op_mov_reg_T0(d_ot
, reg
);
5400 case 0x8d: /* lea */
5401 ot
= dflag
+ OT_WORD
;
5402 modrm
= cpu_ldub_code(env
, s
->pc
++);
5403 mod
= (modrm
>> 6) & 3;
5406 reg
= ((modrm
>> 3) & 7) | rex_r
;
5407 /* we must ensure that no segment is added */
5411 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5413 gen_op_mov_reg_A0(ot
- OT_WORD
, reg
);
5416 case 0xa0: /* mov EAX, Ov */
5418 case 0xa2: /* mov Ov, EAX */
5421 target_ulong offset_addr
;
5426 ot
= dflag
+ OT_WORD
;
5427 #ifdef TARGET_X86_64
5428 if (s
->aflag
== 2) {
5429 offset_addr
= cpu_ldq_code(env
, s
->pc
);
5431 gen_op_movq_A0_im(offset_addr
);
5436 offset_addr
= insn_get(env
, s
, OT_LONG
);
5438 offset_addr
= insn_get(env
, s
, OT_WORD
);
5440 gen_op_movl_A0_im(offset_addr
);
5442 gen_add_A0_ds_seg(s
);
5444 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
5445 gen_op_mov_reg_T0(ot
, R_EAX
);
5447 gen_op_mov_TN_reg(ot
, 0, R_EAX
);
5448 gen_op_st_T0_A0(ot
+ s
->mem_index
);
5452 case 0xd7: /* xlat */
5453 #ifdef TARGET_X86_64
5454 if (s
->aflag
== 2) {
5455 gen_op_movq_A0_reg(R_EBX
);
5456 gen_op_mov_TN_reg(OT_QUAD
, 0, R_EAX
);
5457 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 0xff);
5458 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_T
[0]);
5462 gen_op_movl_A0_reg(R_EBX
);
5463 gen_op_mov_TN_reg(OT_LONG
, 0, R_EAX
);
5464 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 0xff);
5465 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_T
[0]);
5467 gen_op_andl_A0_ffff();
5469 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffffffff);
5471 gen_add_A0_ds_seg(s
);
5472 gen_op_ldu_T0_A0(OT_BYTE
+ s
->mem_index
);
5473 gen_op_mov_reg_T0(OT_BYTE
, R_EAX
);
5475 case 0xb0 ... 0xb7: /* mov R, Ib */
5476 val
= insn_get(env
, s
, OT_BYTE
);
5477 gen_op_movl_T0_im(val
);
5478 gen_op_mov_reg_T0(OT_BYTE
, (b
& 7) | REX_B(s
));
5480 case 0xb8 ... 0xbf: /* mov R, Iv */
5481 #ifdef TARGET_X86_64
5485 tmp
= cpu_ldq_code(env
, s
->pc
);
5487 reg
= (b
& 7) | REX_B(s
);
5488 gen_movtl_T0_im(tmp
);
5489 gen_op_mov_reg_T0(OT_QUAD
, reg
);
5493 ot
= dflag
? OT_LONG
: OT_WORD
;
5494 val
= insn_get(env
, s
, ot
);
5495 reg
= (b
& 7) | REX_B(s
);
5496 gen_op_movl_T0_im(val
);
5497 gen_op_mov_reg_T0(ot
, reg
);
5501 case 0x91 ... 0x97: /* xchg R, EAX */
5503 ot
= dflag
+ OT_WORD
;
5504 reg
= (b
& 7) | REX_B(s
);
5508 case 0x87: /* xchg Ev, Gv */
5512 ot
= dflag
+ OT_WORD
;
5513 modrm
= cpu_ldub_code(env
, s
->pc
++);
5514 reg
= ((modrm
>> 3) & 7) | rex_r
;
5515 mod
= (modrm
>> 6) & 3;
5517 rm
= (modrm
& 7) | REX_B(s
);
5519 gen_op_mov_TN_reg(ot
, 0, reg
);
5520 gen_op_mov_TN_reg(ot
, 1, rm
);
5521 gen_op_mov_reg_T0(ot
, rm
);
5522 gen_op_mov_reg_T1(ot
, reg
);
5524 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5525 gen_op_mov_TN_reg(ot
, 0, reg
);
5526 /* for xchg, lock is implicit */
5527 if (!(prefixes
& PREFIX_LOCK
))
5529 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
5530 gen_op_st_T0_A0(ot
+ s
->mem_index
);
5531 if (!(prefixes
& PREFIX_LOCK
))
5532 gen_helper_unlock();
5533 gen_op_mov_reg_T1(ot
, reg
);
5536 case 0xc4: /* les Gv */
5541 case 0xc5: /* lds Gv */
5546 case 0x1b2: /* lss Gv */
5549 case 0x1b4: /* lfs Gv */
5552 case 0x1b5: /* lgs Gv */
5555 ot
= dflag
? OT_LONG
: OT_WORD
;
5556 modrm
= cpu_ldub_code(env
, s
->pc
++);
5557 reg
= ((modrm
>> 3) & 7) | rex_r
;
5558 mod
= (modrm
>> 6) & 3;
5561 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5562 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
5563 gen_add_A0_im(s
, 1 << (ot
- OT_WORD
+ 1));
5564 /* load the segment first to handle exceptions properly */
5565 gen_op_ldu_T0_A0(OT_WORD
+ s
->mem_index
);
5566 gen_movl_seg_T0(s
, op
, pc_start
- s
->cs_base
);
5567 /* then put the data */
5568 gen_op_mov_reg_T1(ot
, reg
);
5570 gen_jmp_im(s
->pc
- s
->cs_base
);
5575 /************************/
5586 ot
= dflag
+ OT_WORD
;
5588 modrm
= cpu_ldub_code(env
, s
->pc
++);
5589 mod
= (modrm
>> 6) & 3;
5590 op
= (modrm
>> 3) & 7;
5596 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5599 opreg
= (modrm
& 7) | REX_B(s
);
5604 gen_shift(s
, op
, ot
, opreg
, OR_ECX
);
5607 shift
= cpu_ldub_code(env
, s
->pc
++);
5609 gen_shifti(s
, op
, ot
, opreg
, shift
);
5624 case 0x1a4: /* shld imm */
5628 case 0x1a5: /* shld cl */
5632 case 0x1ac: /* shrd imm */
5636 case 0x1ad: /* shrd cl */
5640 ot
= dflag
+ OT_WORD
;
5641 modrm
= cpu_ldub_code(env
, s
->pc
++);
5642 mod
= (modrm
>> 6) & 3;
5643 rm
= (modrm
& 7) | REX_B(s
);
5644 reg
= ((modrm
>> 3) & 7) | rex_r
;
5646 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5651 gen_op_mov_TN_reg(ot
, 1, reg
);
5654 val
= cpu_ldub_code(env
, s
->pc
++);
5655 tcg_gen_movi_tl(cpu_T3
, val
);
5657 tcg_gen_mov_tl(cpu_T3
, cpu_regs
[R_ECX
]);
5659 gen_shiftd_rm_T1_T3(s
, ot
, opreg
, op
);
5662 /************************/
5665 if (s
->flags
& (HF_EM_MASK
| HF_TS_MASK
)) {
5666 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
5667 /* XXX: what to do if illegal op ? */
5668 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
5671 modrm
= cpu_ldub_code(env
, s
->pc
++);
5672 mod
= (modrm
>> 6) & 3;
5674 op
= ((b
& 7) << 3) | ((modrm
>> 3) & 7);
5677 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5679 case 0x00 ... 0x07: /* fxxxs */
5680 case 0x10 ... 0x17: /* fixxxl */
5681 case 0x20 ... 0x27: /* fxxxl */
5682 case 0x30 ... 0x37: /* fixxx */
5689 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
5690 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5691 gen_helper_flds_FT0(cpu_env
, cpu_tmp2_i32
);
5694 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
5695 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5696 gen_helper_fildl_FT0(cpu_env
, cpu_tmp2_i32
);
5699 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
,
5700 (s
->mem_index
>> 2) - 1);
5701 gen_helper_fldl_FT0(cpu_env
, cpu_tmp1_i64
);
5705 gen_op_lds_T0_A0(OT_WORD
+ s
->mem_index
);
5706 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5707 gen_helper_fildl_FT0(cpu_env
, cpu_tmp2_i32
);
5711 gen_helper_fp_arith_ST0_FT0(op1
);
5713 /* fcomp needs pop */
5714 gen_helper_fpop(cpu_env
);
5718 case 0x08: /* flds */
5719 case 0x0a: /* fsts */
5720 case 0x0b: /* fstps */
5721 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
5722 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
5723 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
5728 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
5729 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5730 gen_helper_flds_ST0(cpu_env
, cpu_tmp2_i32
);
5733 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
5734 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5735 gen_helper_fildl_ST0(cpu_env
, cpu_tmp2_i32
);
5738 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
,
5739 (s
->mem_index
>> 2) - 1);
5740 gen_helper_fldl_ST0(cpu_env
, cpu_tmp1_i64
);
5744 gen_op_lds_T0_A0(OT_WORD
+ s
->mem_index
);
5745 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5746 gen_helper_fildl_ST0(cpu_env
, cpu_tmp2_i32
);
5751 /* XXX: the corresponding CPUID bit must be tested ! */
5754 gen_helper_fisttl_ST0(cpu_tmp2_i32
, cpu_env
);
5755 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5756 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
5759 gen_helper_fisttll_ST0(cpu_tmp1_i64
, cpu_env
);
5760 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
,
5761 (s
->mem_index
>> 2) - 1);
5765 gen_helper_fistt_ST0(cpu_tmp2_i32
, cpu_env
);
5766 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5767 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
5770 gen_helper_fpop(cpu_env
);
5775 gen_helper_fsts_ST0(cpu_tmp2_i32
, cpu_env
);
5776 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5777 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
5780 gen_helper_fistl_ST0(cpu_tmp2_i32
, cpu_env
);
5781 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5782 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
5785 gen_helper_fstl_ST0(cpu_tmp1_i64
, cpu_env
);
5786 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
,
5787 (s
->mem_index
>> 2) - 1);
5791 gen_helper_fist_ST0(cpu_tmp2_i32
, cpu_env
);
5792 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5793 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
5797 gen_helper_fpop(cpu_env
);
5801 case 0x0c: /* fldenv mem */
5802 gen_update_cc_op(s
);
5803 gen_jmp_im(pc_start
- s
->cs_base
);
5804 gen_helper_fldenv(cpu_env
, cpu_A0
, tcg_const_i32(s
->dflag
));
5806 case 0x0d: /* fldcw mem */
5807 gen_op_ld_T0_A0(OT_WORD
+ s
->mem_index
);
5808 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5809 gen_helper_fldcw(cpu_env
, cpu_tmp2_i32
);
5811 case 0x0e: /* fnstenv mem */
5812 gen_update_cc_op(s
);
5813 gen_jmp_im(pc_start
- s
->cs_base
);
5814 gen_helper_fstenv(cpu_env
, cpu_A0
, tcg_const_i32(s
->dflag
));
5816 case 0x0f: /* fnstcw mem */
5817 gen_helper_fnstcw(cpu_tmp2_i32
, cpu_env
);
5818 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5819 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
5821 case 0x1d: /* fldt mem */
5822 gen_update_cc_op(s
);
5823 gen_jmp_im(pc_start
- s
->cs_base
);
5824 gen_helper_fldt_ST0(cpu_env
, cpu_A0
);
5826 case 0x1f: /* fstpt mem */
5827 gen_update_cc_op(s
);
5828 gen_jmp_im(pc_start
- s
->cs_base
);
5829 gen_helper_fstt_ST0(cpu_env
, cpu_A0
);
5830 gen_helper_fpop(cpu_env
);
5832 case 0x2c: /* frstor mem */
5833 gen_update_cc_op(s
);
5834 gen_jmp_im(pc_start
- s
->cs_base
);
5835 gen_helper_frstor(cpu_env
, cpu_A0
, tcg_const_i32(s
->dflag
));
5837 case 0x2e: /* fnsave mem */
5838 gen_update_cc_op(s
);
5839 gen_jmp_im(pc_start
- s
->cs_base
);
5840 gen_helper_fsave(cpu_env
, cpu_A0
, tcg_const_i32(s
->dflag
));
5842 case 0x2f: /* fnstsw mem */
5843 gen_helper_fnstsw(cpu_tmp2_i32
, cpu_env
);
5844 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5845 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
5847 case 0x3c: /* fbld */
5848 gen_update_cc_op(s
);
5849 gen_jmp_im(pc_start
- s
->cs_base
);
5850 gen_helper_fbld_ST0(cpu_env
, cpu_A0
);
5852 case 0x3e: /* fbstp */
5853 gen_update_cc_op(s
);
5854 gen_jmp_im(pc_start
- s
->cs_base
);
5855 gen_helper_fbst_ST0(cpu_env
, cpu_A0
);
5856 gen_helper_fpop(cpu_env
);
5858 case 0x3d: /* fildll */
5859 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
,
5860 (s
->mem_index
>> 2) - 1);
5861 gen_helper_fildll_ST0(cpu_env
, cpu_tmp1_i64
);
5863 case 0x3f: /* fistpll */
5864 gen_helper_fistll_ST0(cpu_tmp1_i64
, cpu_env
);
5865 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
,
5866 (s
->mem_index
>> 2) - 1);
5867 gen_helper_fpop(cpu_env
);
5873 /* register float ops */
5877 case 0x08: /* fld sti */
5878 gen_helper_fpush(cpu_env
);
5879 gen_helper_fmov_ST0_STN(cpu_env
,
5880 tcg_const_i32((opreg
+ 1) & 7));
5882 case 0x09: /* fxchg sti */
5883 case 0x29: /* fxchg4 sti, undocumented op */
5884 case 0x39: /* fxchg7 sti, undocumented op */
5885 gen_helper_fxchg_ST0_STN(cpu_env
, tcg_const_i32(opreg
));
5887 case 0x0a: /* grp d9/2 */
5890 /* check exceptions (FreeBSD FPU probe) */
5891 gen_update_cc_op(s
);
5892 gen_jmp_im(pc_start
- s
->cs_base
);
5893 gen_helper_fwait(cpu_env
);
5899 case 0x0c: /* grp d9/4 */
5902 gen_helper_fchs_ST0(cpu_env
);
5905 gen_helper_fabs_ST0(cpu_env
);
5908 gen_helper_fldz_FT0(cpu_env
);
5909 gen_helper_fcom_ST0_FT0(cpu_env
);
5912 gen_helper_fxam_ST0(cpu_env
);
5918 case 0x0d: /* grp d9/5 */
5922 gen_helper_fpush(cpu_env
);
5923 gen_helper_fld1_ST0(cpu_env
);
5926 gen_helper_fpush(cpu_env
);
5927 gen_helper_fldl2t_ST0(cpu_env
);
5930 gen_helper_fpush(cpu_env
);
5931 gen_helper_fldl2e_ST0(cpu_env
);
5934 gen_helper_fpush(cpu_env
);
5935 gen_helper_fldpi_ST0(cpu_env
);
5938 gen_helper_fpush(cpu_env
);
5939 gen_helper_fldlg2_ST0(cpu_env
);
5942 gen_helper_fpush(cpu_env
);
5943 gen_helper_fldln2_ST0(cpu_env
);
5946 gen_helper_fpush(cpu_env
);
5947 gen_helper_fldz_ST0(cpu_env
);
5954 case 0x0e: /* grp d9/6 */
5957 gen_helper_f2xm1(cpu_env
);
5960 gen_helper_fyl2x(cpu_env
);
5963 gen_helper_fptan(cpu_env
);
5965 case 3: /* fpatan */
5966 gen_helper_fpatan(cpu_env
);
5968 case 4: /* fxtract */
5969 gen_helper_fxtract(cpu_env
);
5971 case 5: /* fprem1 */
5972 gen_helper_fprem1(cpu_env
);
5974 case 6: /* fdecstp */
5975 gen_helper_fdecstp(cpu_env
);
5978 case 7: /* fincstp */
5979 gen_helper_fincstp(cpu_env
);
5983 case 0x0f: /* grp d9/7 */
5986 gen_helper_fprem(cpu_env
);
5988 case 1: /* fyl2xp1 */
5989 gen_helper_fyl2xp1(cpu_env
);
5992 gen_helper_fsqrt(cpu_env
);
5994 case 3: /* fsincos */
5995 gen_helper_fsincos(cpu_env
);
5997 case 5: /* fscale */
5998 gen_helper_fscale(cpu_env
);
6000 case 4: /* frndint */
6001 gen_helper_frndint(cpu_env
);
6004 gen_helper_fsin(cpu_env
);
6008 gen_helper_fcos(cpu_env
);
6012 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
6013 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
6014 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
6020 gen_helper_fp_arith_STN_ST0(op1
, opreg
);
6022 gen_helper_fpop(cpu_env
);
6024 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6025 gen_helper_fp_arith_ST0_FT0(op1
);
6029 case 0x02: /* fcom */
6030 case 0x22: /* fcom2, undocumented op */
6031 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6032 gen_helper_fcom_ST0_FT0(cpu_env
);
6034 case 0x03: /* fcomp */
6035 case 0x23: /* fcomp3, undocumented op */
6036 case 0x32: /* fcomp5, undocumented op */
6037 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6038 gen_helper_fcom_ST0_FT0(cpu_env
);
6039 gen_helper_fpop(cpu_env
);
6041 case 0x15: /* da/5 */
6043 case 1: /* fucompp */
6044 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(1));
6045 gen_helper_fucom_ST0_FT0(cpu_env
);
6046 gen_helper_fpop(cpu_env
);
6047 gen_helper_fpop(cpu_env
);
6055 case 0: /* feni (287 only, just do nop here) */
6057 case 1: /* fdisi (287 only, just do nop here) */
6060 gen_helper_fclex(cpu_env
);
6062 case 3: /* fninit */
6063 gen_helper_fninit(cpu_env
);
6065 case 4: /* fsetpm (287 only, just do nop here) */
6071 case 0x1d: /* fucomi */
6072 gen_update_cc_op(s
);
6073 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6074 gen_helper_fucomi_ST0_FT0(cpu_env
);
6075 set_cc_op(s
, CC_OP_EFLAGS
);
6077 case 0x1e: /* fcomi */
6078 gen_update_cc_op(s
);
6079 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6080 gen_helper_fcomi_ST0_FT0(cpu_env
);
6081 set_cc_op(s
, CC_OP_EFLAGS
);
6083 case 0x28: /* ffree sti */
6084 gen_helper_ffree_STN(cpu_env
, tcg_const_i32(opreg
));
6086 case 0x2a: /* fst sti */
6087 gen_helper_fmov_STN_ST0(cpu_env
, tcg_const_i32(opreg
));
6089 case 0x2b: /* fstp sti */
6090 case 0x0b: /* fstp1 sti, undocumented op */
6091 case 0x3a: /* fstp8 sti, undocumented op */
6092 case 0x3b: /* fstp9 sti, undocumented op */
6093 gen_helper_fmov_STN_ST0(cpu_env
, tcg_const_i32(opreg
));
6094 gen_helper_fpop(cpu_env
);
6096 case 0x2c: /* fucom st(i) */
6097 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6098 gen_helper_fucom_ST0_FT0(cpu_env
);
6100 case 0x2d: /* fucomp st(i) */
6101 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6102 gen_helper_fucom_ST0_FT0(cpu_env
);
6103 gen_helper_fpop(cpu_env
);
6105 case 0x33: /* de/3 */
6107 case 1: /* fcompp */
6108 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(1));
6109 gen_helper_fcom_ST0_FT0(cpu_env
);
6110 gen_helper_fpop(cpu_env
);
6111 gen_helper_fpop(cpu_env
);
6117 case 0x38: /* ffreep sti, undocumented op */
6118 gen_helper_ffree_STN(cpu_env
, tcg_const_i32(opreg
));
6119 gen_helper_fpop(cpu_env
);
6121 case 0x3c: /* df/4 */
6124 gen_helper_fnstsw(cpu_tmp2_i32
, cpu_env
);
6125 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
6126 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
6132 case 0x3d: /* fucomip */
6133 gen_update_cc_op(s
);
6134 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6135 gen_helper_fucomi_ST0_FT0(cpu_env
);
6136 gen_helper_fpop(cpu_env
);
6137 set_cc_op(s
, CC_OP_EFLAGS
);
6139 case 0x3e: /* fcomip */
6140 gen_update_cc_op(s
);
6141 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6142 gen_helper_fcomi_ST0_FT0(cpu_env
);
6143 gen_helper_fpop(cpu_env
);
6144 set_cc_op(s
, CC_OP_EFLAGS
);
6146 case 0x10 ... 0x13: /* fcmovxx */
6150 static const uint8_t fcmov_cc
[8] = {
6156 op1
= fcmov_cc
[op
& 3] | (((op
>> 3) & 1) ^ 1);
6157 l1
= gen_new_label();
6158 gen_jcc1(s
, op1
, l1
);
6159 gen_helper_fmov_ST0_STN(cpu_env
, tcg_const_i32(opreg
));
6168 /************************/
6171 case 0xa4: /* movsS */
6176 ot
= dflag
+ OT_WORD
;
6178 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6179 gen_repz_movs(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6185 case 0xaa: /* stosS */
6190 ot
= dflag
+ OT_WORD
;
6192 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6193 gen_repz_stos(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6198 case 0xac: /* lodsS */
6203 ot
= dflag
+ OT_WORD
;
6204 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6205 gen_repz_lods(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6210 case 0xae: /* scasS */
6215 ot
= dflag
+ OT_WORD
;
6216 if (prefixes
& PREFIX_REPNZ
) {
6217 gen_repz_scas(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 1);
6218 } else if (prefixes
& PREFIX_REPZ
) {
6219 gen_repz_scas(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 0);
6225 case 0xa6: /* cmpsS */
6230 ot
= dflag
+ OT_WORD
;
6231 if (prefixes
& PREFIX_REPNZ
) {
6232 gen_repz_cmps(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 1);
6233 } else if (prefixes
& PREFIX_REPZ
) {
6234 gen_repz_cmps(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 0);
6239 case 0x6c: /* insS */
6244 ot
= dflag
? OT_LONG
: OT_WORD
;
6245 gen_op_mov_TN_reg(OT_WORD
, 0, R_EDX
);
6246 gen_op_andl_T0_ffff();
6247 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6248 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
) | 4);
6249 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6250 gen_repz_ins(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6254 gen_jmp(s
, s
->pc
- s
->cs_base
);
6258 case 0x6e: /* outsS */
6263 ot
= dflag
? OT_LONG
: OT_WORD
;
6264 gen_op_mov_TN_reg(OT_WORD
, 0, R_EDX
);
6265 gen_op_andl_T0_ffff();
6266 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6267 svm_is_rep(prefixes
) | 4);
6268 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6269 gen_repz_outs(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6273 gen_jmp(s
, s
->pc
- s
->cs_base
);
6278 /************************/
6286 ot
= dflag
? OT_LONG
: OT_WORD
;
6287 val
= cpu_ldub_code(env
, s
->pc
++);
6288 gen_op_movl_T0_im(val
);
6289 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6290 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
));
6293 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6294 gen_helper_in_func(ot
, cpu_T
[1], cpu_tmp2_i32
);
6295 gen_op_mov_reg_T1(ot
, R_EAX
);
6298 gen_jmp(s
, s
->pc
- s
->cs_base
);
6306 ot
= dflag
? OT_LONG
: OT_WORD
;
6307 val
= cpu_ldub_code(env
, s
->pc
++);
6308 gen_op_movl_T0_im(val
);
6309 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6310 svm_is_rep(prefixes
));
6311 gen_op_mov_TN_reg(ot
, 1, R_EAX
);
6315 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6316 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T
[1]);
6317 gen_helper_out_func(ot
, cpu_tmp2_i32
, cpu_tmp3_i32
);
6320 gen_jmp(s
, s
->pc
- s
->cs_base
);
6328 ot
= dflag
? OT_LONG
: OT_WORD
;
6329 gen_op_mov_TN_reg(OT_WORD
, 0, R_EDX
);
6330 gen_op_andl_T0_ffff();
6331 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6332 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
));
6335 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6336 gen_helper_in_func(ot
, cpu_T
[1], cpu_tmp2_i32
);
6337 gen_op_mov_reg_T1(ot
, R_EAX
);
6340 gen_jmp(s
, s
->pc
- s
->cs_base
);
6348 ot
= dflag
? OT_LONG
: OT_WORD
;
6349 gen_op_mov_TN_reg(OT_WORD
, 0, R_EDX
);
6350 gen_op_andl_T0_ffff();
6351 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6352 svm_is_rep(prefixes
));
6353 gen_op_mov_TN_reg(ot
, 1, R_EAX
);
6357 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6358 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T
[1]);
6359 gen_helper_out_func(ot
, cpu_tmp2_i32
, cpu_tmp3_i32
);
6362 gen_jmp(s
, s
->pc
- s
->cs_base
);
6366 /************************/
6368 case 0xc2: /* ret im */
6369 val
= cpu_ldsw_code(env
, s
->pc
);
6372 if (CODE64(s
) && s
->dflag
)
6374 gen_stack_update(s
, val
+ (2 << s
->dflag
));
6376 gen_op_andl_T0_ffff();
6380 case 0xc3: /* ret */
6384 gen_op_andl_T0_ffff();
6388 case 0xca: /* lret im */
6389 val
= cpu_ldsw_code(env
, s
->pc
);
6392 if (s
->pe
&& !s
->vm86
) {
6393 gen_update_cc_op(s
);
6394 gen_jmp_im(pc_start
- s
->cs_base
);
6395 gen_helper_lret_protected(cpu_env
, tcg_const_i32(s
->dflag
),
6396 tcg_const_i32(val
));
6400 gen_op_ld_T0_A0(1 + s
->dflag
+ s
->mem_index
);
6402 gen_op_andl_T0_ffff();
6403 /* NOTE: keeping EIP updated is not a problem in case of
6407 gen_op_addl_A0_im(2 << s
->dflag
);
6408 gen_op_ld_T0_A0(1 + s
->dflag
+ s
->mem_index
);
6409 gen_op_movl_seg_T0_vm(R_CS
);
6410 /* add stack offset */
6411 gen_stack_update(s
, val
+ (4 << s
->dflag
));
6415 case 0xcb: /* lret */
6418 case 0xcf: /* iret */
6419 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_IRET
);
6422 gen_helper_iret_real(cpu_env
, tcg_const_i32(s
->dflag
));
6423 set_cc_op(s
, CC_OP_EFLAGS
);
6424 } else if (s
->vm86
) {
6426 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6428 gen_helper_iret_real(cpu_env
, tcg_const_i32(s
->dflag
));
6429 set_cc_op(s
, CC_OP_EFLAGS
);
6432 gen_update_cc_op(s
);
6433 gen_jmp_im(pc_start
- s
->cs_base
);
6434 gen_helper_iret_protected(cpu_env
, tcg_const_i32(s
->dflag
),
6435 tcg_const_i32(s
->pc
- s
->cs_base
));
6436 set_cc_op(s
, CC_OP_EFLAGS
);
6440 case 0xe8: /* call im */
6443 tval
= (int32_t)insn_get(env
, s
, OT_LONG
);
6445 tval
= (int16_t)insn_get(env
, s
, OT_WORD
);
6446 next_eip
= s
->pc
- s
->cs_base
;
6452 gen_movtl_T0_im(next_eip
);
6457 case 0x9a: /* lcall im */
6459 unsigned int selector
, offset
;
6463 ot
= dflag
? OT_LONG
: OT_WORD
;
6464 offset
= insn_get(env
, s
, ot
);
6465 selector
= insn_get(env
, s
, OT_WORD
);
6467 gen_op_movl_T0_im(selector
);
6468 gen_op_movl_T1_imu(offset
);
6471 case 0xe9: /* jmp im */
6473 tval
= (int32_t)insn_get(env
, s
, OT_LONG
);
6475 tval
= (int16_t)insn_get(env
, s
, OT_WORD
);
6476 tval
+= s
->pc
- s
->cs_base
;
6483 case 0xea: /* ljmp im */
6485 unsigned int selector
, offset
;
6489 ot
= dflag
? OT_LONG
: OT_WORD
;
6490 offset
= insn_get(env
, s
, ot
);
6491 selector
= insn_get(env
, s
, OT_WORD
);
6493 gen_op_movl_T0_im(selector
);
6494 gen_op_movl_T1_imu(offset
);
6497 case 0xeb: /* jmp Jb */
6498 tval
= (int8_t)insn_get(env
, s
, OT_BYTE
);
6499 tval
+= s
->pc
- s
->cs_base
;
6504 case 0x70 ... 0x7f: /* jcc Jb */
6505 tval
= (int8_t)insn_get(env
, s
, OT_BYTE
);
6507 case 0x180 ... 0x18f: /* jcc Jv */
6509 tval
= (int32_t)insn_get(env
, s
, OT_LONG
);
6511 tval
= (int16_t)insn_get(env
, s
, OT_WORD
);
6514 next_eip
= s
->pc
- s
->cs_base
;
6518 gen_jcc(s
, b
, tval
, next_eip
);
6521 case 0x190 ... 0x19f: /* setcc Gv */
6522 modrm
= cpu_ldub_code(env
, s
->pc
++);
6524 gen_ldst_modrm(env
, s
, modrm
, OT_BYTE
, OR_TMP0
, 1);
6526 case 0x140 ... 0x14f: /* cmov Gv, Ev */
6531 ot
= dflag
+ OT_WORD
;
6532 modrm
= cpu_ldub_code(env
, s
->pc
++);
6533 reg
= ((modrm
>> 3) & 7) | rex_r
;
6534 mod
= (modrm
>> 6) & 3;
6535 t0
= tcg_temp_local_new();
6537 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
6538 gen_op_ld_v(ot
+ s
->mem_index
, t0
, cpu_A0
);
6540 rm
= (modrm
& 7) | REX_B(s
);
6541 gen_op_mov_v_reg(ot
, t0
, rm
);
6543 #ifdef TARGET_X86_64
6544 if (ot
== OT_LONG
) {
6545 /* XXX: specific Intel behaviour ? */
6546 l1
= gen_new_label();
6547 gen_jcc1(s
, b
^ 1, l1
);
6548 tcg_gen_mov_tl(cpu_regs
[reg
], t0
);
6550 tcg_gen_ext32u_tl(cpu_regs
[reg
], cpu_regs
[reg
]);
6554 l1
= gen_new_label();
6555 gen_jcc1(s
, b
^ 1, l1
);
6556 gen_op_mov_reg_v(ot
, reg
, t0
);
6563 /************************/
6565 case 0x9c: /* pushf */
6566 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_PUSHF
);
6567 if (s
->vm86
&& s
->iopl
!= 3) {
6568 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6570 gen_update_cc_op(s
);
6571 gen_helper_read_eflags(cpu_T
[0], cpu_env
);
6575 case 0x9d: /* popf */
6576 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_POPF
);
6577 if (s
->vm86
&& s
->iopl
!= 3) {
6578 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6583 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6584 tcg_const_i32((TF_MASK
| AC_MASK
|
6589 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6590 tcg_const_i32((TF_MASK
| AC_MASK
|
6592 IF_MASK
| IOPL_MASK
)
6596 if (s
->cpl
<= s
->iopl
) {
6598 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6599 tcg_const_i32((TF_MASK
|
6605 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6606 tcg_const_i32((TF_MASK
|
6615 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6616 tcg_const_i32((TF_MASK
| AC_MASK
|
6617 ID_MASK
| NT_MASK
)));
6619 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6620 tcg_const_i32((TF_MASK
| AC_MASK
|
6627 set_cc_op(s
, CC_OP_EFLAGS
);
6628 /* abort translation because TF/AC flag may change */
6629 gen_jmp_im(s
->pc
- s
->cs_base
);
6633 case 0x9e: /* sahf */
6634 if (CODE64(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_LAHF_LM
))
6636 gen_op_mov_TN_reg(OT_BYTE
, 0, R_AH
);
6637 gen_compute_eflags(s
);
6638 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, CC_O
);
6639 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
6640 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, cpu_T
[0]);
6642 case 0x9f: /* lahf */
6643 if (CODE64(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_LAHF_LM
))
6645 gen_compute_eflags(s
);
6646 /* Note: gen_compute_eflags() only gives the condition codes */
6647 tcg_gen_ori_tl(cpu_T
[0], cpu_cc_src
, 0x02);
6648 gen_op_mov_reg_T0(OT_BYTE
, R_AH
);
6650 case 0xf5: /* cmc */
6651 gen_compute_eflags(s
);
6652 tcg_gen_xori_tl(cpu_cc_src
, cpu_cc_src
, CC_C
);
6654 case 0xf8: /* clc */
6655 gen_compute_eflags(s
);
6656 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~CC_C
);
6658 case 0xf9: /* stc */
6659 gen_compute_eflags(s
);
6660 tcg_gen_ori_tl(cpu_cc_src
, cpu_cc_src
, CC_C
);
6662 case 0xfc: /* cld */
6663 tcg_gen_movi_i32(cpu_tmp2_i32
, 1);
6664 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, offsetof(CPUX86State
, df
));
6666 case 0xfd: /* std */
6667 tcg_gen_movi_i32(cpu_tmp2_i32
, -1);
6668 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, offsetof(CPUX86State
, df
));
6671 /************************/
6672 /* bit operations */
6673 case 0x1ba: /* bt/bts/btr/btc Gv, im */
6674 ot
= dflag
+ OT_WORD
;
6675 modrm
= cpu_ldub_code(env
, s
->pc
++);
6676 op
= (modrm
>> 3) & 7;
6677 mod
= (modrm
>> 6) & 3;
6678 rm
= (modrm
& 7) | REX_B(s
);
6681 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
6682 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
6684 gen_op_mov_TN_reg(ot
, 0, rm
);
6687 val
= cpu_ldub_code(env
, s
->pc
++);
6688 gen_op_movl_T1_im(val
);
6693 case 0x1a3: /* bt Gv, Ev */
6696 case 0x1ab: /* bts */
6699 case 0x1b3: /* btr */
6702 case 0x1bb: /* btc */
6705 ot
= dflag
+ OT_WORD
;
6706 modrm
= cpu_ldub_code(env
, s
->pc
++);
6707 reg
= ((modrm
>> 3) & 7) | rex_r
;
6708 mod
= (modrm
>> 6) & 3;
6709 rm
= (modrm
& 7) | REX_B(s
);
6710 gen_op_mov_TN_reg(OT_LONG
, 1, reg
);
6712 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
6713 /* specific case: we need to add a displacement */
6714 gen_exts(ot
, cpu_T
[1]);
6715 tcg_gen_sari_tl(cpu_tmp0
, cpu_T
[1], 3 + ot
);
6716 tcg_gen_shli_tl(cpu_tmp0
, cpu_tmp0
, ot
);
6717 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
6718 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
6720 gen_op_mov_TN_reg(ot
, 0, rm
);
6723 tcg_gen_andi_tl(cpu_T
[1], cpu_T
[1], (1 << (3 + ot
)) - 1);
6726 tcg_gen_shr_tl(cpu_cc_src
, cpu_T
[0], cpu_T
[1]);
6727 tcg_gen_movi_tl(cpu_cc_dst
, 0);
6730 tcg_gen_shr_tl(cpu_tmp4
, cpu_T
[0], cpu_T
[1]);
6731 tcg_gen_movi_tl(cpu_tmp0
, 1);
6732 tcg_gen_shl_tl(cpu_tmp0
, cpu_tmp0
, cpu_T
[1]);
6733 tcg_gen_or_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
6736 tcg_gen_shr_tl(cpu_tmp4
, cpu_T
[0], cpu_T
[1]);
6737 tcg_gen_movi_tl(cpu_tmp0
, 1);
6738 tcg_gen_shl_tl(cpu_tmp0
, cpu_tmp0
, cpu_T
[1]);
6739 tcg_gen_not_tl(cpu_tmp0
, cpu_tmp0
);
6740 tcg_gen_and_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
6744 tcg_gen_shr_tl(cpu_tmp4
, cpu_T
[0], cpu_T
[1]);
6745 tcg_gen_movi_tl(cpu_tmp0
, 1);
6746 tcg_gen_shl_tl(cpu_tmp0
, cpu_tmp0
, cpu_T
[1]);
6747 tcg_gen_xor_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
6750 set_cc_op(s
, CC_OP_SARB
+ ot
);
6753 gen_op_st_T0_A0(ot
+ s
->mem_index
);
6755 gen_op_mov_reg_T0(ot
, rm
);
6756 tcg_gen_mov_tl(cpu_cc_src
, cpu_tmp4
);
6757 tcg_gen_movi_tl(cpu_cc_dst
, 0);
6760 case 0x1bc: /* bsf */
6761 case 0x1bd: /* bsr */
6766 ot
= dflag
+ OT_WORD
;
6767 modrm
= cpu_ldub_code(env
, s
->pc
++);
6768 reg
= ((modrm
>> 3) & 7) | rex_r
;
6769 gen_ldst_modrm(env
, s
,modrm
, ot
, OR_TMP0
, 0);
6770 gen_extu(ot
, cpu_T
[0]);
6771 t0
= tcg_temp_local_new();
6772 tcg_gen_mov_tl(t0
, cpu_T
[0]);
6773 if ((b
& 1) && (prefixes
& PREFIX_REPZ
) &&
6774 (s
->cpuid_ext3_features
& CPUID_EXT3_ABM
)) {
6776 case OT_WORD
: gen_helper_lzcnt(cpu_T
[0], t0
,
6777 tcg_const_i32(16)); break;
6778 case OT_LONG
: gen_helper_lzcnt(cpu_T
[0], t0
,
6779 tcg_const_i32(32)); break;
6780 case OT_QUAD
: gen_helper_lzcnt(cpu_T
[0], t0
,
6781 tcg_const_i32(64)); break;
6783 gen_op_mov_reg_T0(ot
, reg
);
6785 label1
= gen_new_label();
6786 tcg_gen_movi_tl(cpu_cc_dst
, 0);
6787 tcg_gen_brcondi_tl(TCG_COND_EQ
, t0
, 0, label1
);
6789 gen_helper_bsr(cpu_T
[0], t0
);
6791 gen_helper_bsf(cpu_T
[0], t0
);
6793 gen_op_mov_reg_T0(ot
, reg
);
6794 tcg_gen_movi_tl(cpu_cc_dst
, 1);
6795 gen_set_label(label1
);
6796 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
6801 /************************/
6803 case 0x27: /* daa */
6806 gen_update_cc_op(s
);
6807 gen_helper_daa(cpu_env
);
6808 set_cc_op(s
, CC_OP_EFLAGS
);
6810 case 0x2f: /* das */
6813 gen_update_cc_op(s
);
6814 gen_helper_das(cpu_env
);
6815 set_cc_op(s
, CC_OP_EFLAGS
);
6817 case 0x37: /* aaa */
6820 gen_update_cc_op(s
);
6821 gen_helper_aaa(cpu_env
);
6822 set_cc_op(s
, CC_OP_EFLAGS
);
6824 case 0x3f: /* aas */
6827 gen_update_cc_op(s
);
6828 gen_helper_aas(cpu_env
);
6829 set_cc_op(s
, CC_OP_EFLAGS
);
6831 case 0xd4: /* aam */
6834 val
= cpu_ldub_code(env
, s
->pc
++);
6836 gen_exception(s
, EXCP00_DIVZ
, pc_start
- s
->cs_base
);
6838 gen_helper_aam(cpu_env
, tcg_const_i32(val
));
6839 set_cc_op(s
, CC_OP_LOGICB
);
6842 case 0xd5: /* aad */
6845 val
= cpu_ldub_code(env
, s
->pc
++);
6846 gen_helper_aad(cpu_env
, tcg_const_i32(val
));
6847 set_cc_op(s
, CC_OP_LOGICB
);
6849 /************************/
6851 case 0x90: /* nop */
6852 /* XXX: correct lock test for all insn */
6853 if (prefixes
& PREFIX_LOCK
) {
6856 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
6858 goto do_xchg_reg_eax
;
6860 if (prefixes
& PREFIX_REPZ
) {
6861 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_PAUSE
);
6864 case 0x9b: /* fwait */
6865 if ((s
->flags
& (HF_MP_MASK
| HF_TS_MASK
)) ==
6866 (HF_MP_MASK
| HF_TS_MASK
)) {
6867 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
6869 gen_update_cc_op(s
);
6870 gen_jmp_im(pc_start
- s
->cs_base
);
6871 gen_helper_fwait(cpu_env
);
6874 case 0xcc: /* int3 */
6875 gen_interrupt(s
, EXCP03_INT3
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6877 case 0xcd: /* int N */
6878 val
= cpu_ldub_code(env
, s
->pc
++);
6879 if (s
->vm86
&& s
->iopl
!= 3) {
6880 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6882 gen_interrupt(s
, val
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6885 case 0xce: /* into */
6888 gen_update_cc_op(s
);
6889 gen_jmp_im(pc_start
- s
->cs_base
);
6890 gen_helper_into(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
6893 case 0xf1: /* icebp (undocumented, exits to external debugger) */
6894 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_ICEBP
);
6896 gen_debug(s
, pc_start
- s
->cs_base
);
6900 qemu_set_log(CPU_LOG_INT
| CPU_LOG_TB_IN_ASM
);
6904 case 0xfa: /* cli */
6906 if (s
->cpl
<= s
->iopl
) {
6907 gen_helper_cli(cpu_env
);
6909 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6913 gen_helper_cli(cpu_env
);
6915 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6919 case 0xfb: /* sti */
6921 if (s
->cpl
<= s
->iopl
) {
6923 gen_helper_sti(cpu_env
);
6924 /* interruptions are enabled only the first insn after sti */
6925 /* If several instructions disable interrupts, only the
6927 if (!(s
->tb
->flags
& HF_INHIBIT_IRQ_MASK
))
6928 gen_helper_set_inhibit_irq(cpu_env
);
6929 /* give a chance to handle pending irqs */
6930 gen_jmp_im(s
->pc
- s
->cs_base
);
6933 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6939 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6943 case 0x62: /* bound */
6946 ot
= dflag
? OT_LONG
: OT_WORD
;
6947 modrm
= cpu_ldub_code(env
, s
->pc
++);
6948 reg
= (modrm
>> 3) & 7;
6949 mod
= (modrm
>> 6) & 3;
6952 gen_op_mov_TN_reg(ot
, 0, reg
);
6953 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
6954 gen_jmp_im(pc_start
- s
->cs_base
);
6955 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6956 if (ot
== OT_WORD
) {
6957 gen_helper_boundw(cpu_env
, cpu_A0
, cpu_tmp2_i32
);
6959 gen_helper_boundl(cpu_env
, cpu_A0
, cpu_tmp2_i32
);
6962 case 0x1c8 ... 0x1cf: /* bswap reg */
6963 reg
= (b
& 7) | REX_B(s
);
6964 #ifdef TARGET_X86_64
6966 gen_op_mov_TN_reg(OT_QUAD
, 0, reg
);
6967 tcg_gen_bswap64_i64(cpu_T
[0], cpu_T
[0]);
6968 gen_op_mov_reg_T0(OT_QUAD
, reg
);
6972 gen_op_mov_TN_reg(OT_LONG
, 0, reg
);
6973 tcg_gen_ext32u_tl(cpu_T
[0], cpu_T
[0]);
6974 tcg_gen_bswap32_tl(cpu_T
[0], cpu_T
[0]);
6975 gen_op_mov_reg_T0(OT_LONG
, reg
);
6978 case 0xd6: /* salc */
6981 gen_compute_eflags_c(s
, cpu_T
[0], false);
6982 tcg_gen_neg_tl(cpu_T
[0], cpu_T
[0]);
6983 gen_op_mov_reg_T0(OT_BYTE
, R_EAX
);
6985 case 0xe0: /* loopnz */
6986 case 0xe1: /* loopz */
6987 case 0xe2: /* loop */
6988 case 0xe3: /* jecxz */
6992 tval
= (int8_t)insn_get(env
, s
, OT_BYTE
);
6993 next_eip
= s
->pc
- s
->cs_base
;
6998 l1
= gen_new_label();
6999 l2
= gen_new_label();
7000 l3
= gen_new_label();
7003 case 0: /* loopnz */
7005 gen_op_add_reg_im(s
->aflag
, R_ECX
, -1);
7006 gen_op_jz_ecx(s
->aflag
, l3
);
7007 gen_jcc1(s
, (JCC_Z
<< 1) | (b
^ 1), l1
);
7010 gen_op_add_reg_im(s
->aflag
, R_ECX
, -1);
7011 gen_op_jnz_ecx(s
->aflag
, l1
);
7015 gen_op_jz_ecx(s
->aflag
, l1
);
7020 gen_jmp_im(next_eip
);
7029 case 0x130: /* wrmsr */
7030 case 0x132: /* rdmsr */
7032 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7034 gen_update_cc_op(s
);
7035 gen_jmp_im(pc_start
- s
->cs_base
);
7037 gen_helper_rdmsr(cpu_env
);
7039 gen_helper_wrmsr(cpu_env
);
7043 case 0x131: /* rdtsc */
7044 gen_update_cc_op(s
);
7045 gen_jmp_im(pc_start
- s
->cs_base
);
7048 gen_helper_rdtsc(cpu_env
);
7051 gen_jmp(s
, s
->pc
- s
->cs_base
);
7054 case 0x133: /* rdpmc */
7055 gen_update_cc_op(s
);
7056 gen_jmp_im(pc_start
- s
->cs_base
);
7057 gen_helper_rdpmc(cpu_env
);
7059 case 0x134: /* sysenter */
7060 /* For Intel SYSENTER is valid on 64-bit */
7061 if (CODE64(s
) && env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
)
7064 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7066 gen_update_cc_op(s
);
7067 gen_jmp_im(pc_start
- s
->cs_base
);
7068 gen_helper_sysenter(cpu_env
);
7072 case 0x135: /* sysexit */
7073 /* For Intel SYSEXIT is valid on 64-bit */
7074 if (CODE64(s
) && env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
)
7077 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7079 gen_update_cc_op(s
);
7080 gen_jmp_im(pc_start
- s
->cs_base
);
7081 gen_helper_sysexit(cpu_env
, tcg_const_i32(dflag
));
7085 #ifdef TARGET_X86_64
7086 case 0x105: /* syscall */
7087 /* XXX: is it usable in real mode ? */
7088 gen_update_cc_op(s
);
7089 gen_jmp_im(pc_start
- s
->cs_base
);
7090 gen_helper_syscall(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
7093 case 0x107: /* sysret */
7095 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7097 gen_update_cc_op(s
);
7098 gen_jmp_im(pc_start
- s
->cs_base
);
7099 gen_helper_sysret(cpu_env
, tcg_const_i32(s
->dflag
));
7100 /* condition codes are modified only in long mode */
7102 set_cc_op(s
, CC_OP_EFLAGS
);
7108 case 0x1a2: /* cpuid */
7109 gen_update_cc_op(s
);
7110 gen_jmp_im(pc_start
- s
->cs_base
);
7111 gen_helper_cpuid(cpu_env
);
7113 case 0xf4: /* hlt */
7115 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7117 gen_update_cc_op(s
);
7118 gen_jmp_im(pc_start
- s
->cs_base
);
7119 gen_helper_hlt(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
7120 s
->is_jmp
= DISAS_TB_JUMP
;
7124 modrm
= cpu_ldub_code(env
, s
->pc
++);
7125 mod
= (modrm
>> 6) & 3;
7126 op
= (modrm
>> 3) & 7;
7129 if (!s
->pe
|| s
->vm86
)
7131 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_LDTR_READ
);
7132 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,ldt
.selector
));
7136 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
7139 if (!s
->pe
|| s
->vm86
)
7142 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7144 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_LDTR_WRITE
);
7145 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 0);
7146 gen_jmp_im(pc_start
- s
->cs_base
);
7147 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
7148 gen_helper_lldt(cpu_env
, cpu_tmp2_i32
);
7152 if (!s
->pe
|| s
->vm86
)
7154 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_TR_READ
);
7155 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,tr
.selector
));
7159 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
7162 if (!s
->pe
|| s
->vm86
)
7165 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7167 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_TR_WRITE
);
7168 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 0);
7169 gen_jmp_im(pc_start
- s
->cs_base
);
7170 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
7171 gen_helper_ltr(cpu_env
, cpu_tmp2_i32
);
7176 if (!s
->pe
|| s
->vm86
)
7178 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 0);
7179 gen_update_cc_op(s
);
7181 gen_helper_verr(cpu_env
, cpu_T
[0]);
7183 gen_helper_verw(cpu_env
, cpu_T
[0]);
7185 set_cc_op(s
, CC_OP_EFLAGS
);
7192 modrm
= cpu_ldub_code(env
, s
->pc
++);
7193 mod
= (modrm
>> 6) & 3;
7194 op
= (modrm
>> 3) & 7;
7200 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_GDTR_READ
);
7201 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7202 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, gdt
.limit
));
7203 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
7204 gen_add_A0_im(s
, 2);
7205 tcg_gen_ld_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, gdt
.base
));
7207 gen_op_andl_T0_im(0xffffff);
7208 gen_op_st_T0_A0(CODE64(s
) + OT_LONG
+ s
->mem_index
);
7213 case 0: /* monitor */
7214 if (!(s
->cpuid_ext_features
& CPUID_EXT_MONITOR
) ||
7217 gen_update_cc_op(s
);
7218 gen_jmp_im(pc_start
- s
->cs_base
);
7219 #ifdef TARGET_X86_64
7220 if (s
->aflag
== 2) {
7221 gen_op_movq_A0_reg(R_EAX
);
7225 gen_op_movl_A0_reg(R_EAX
);
7227 gen_op_andl_A0_ffff();
7229 gen_add_A0_ds_seg(s
);
7230 gen_helper_monitor(cpu_env
, cpu_A0
);
7233 if (!(s
->cpuid_ext_features
& CPUID_EXT_MONITOR
) ||
7236 gen_update_cc_op(s
);
7237 gen_jmp_im(pc_start
- s
->cs_base
);
7238 gen_helper_mwait(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
7242 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_SMAP
) ||
7246 gen_helper_clac(cpu_env
);
7247 gen_jmp_im(s
->pc
- s
->cs_base
);
7251 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_SMAP
) ||
7255 gen_helper_stac(cpu_env
);
7256 gen_jmp_im(s
->pc
- s
->cs_base
);
7263 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_IDTR_READ
);
7264 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7265 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, idt
.limit
));
7266 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
7267 gen_add_A0_im(s
, 2);
7268 tcg_gen_ld_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, idt
.base
));
7270 gen_op_andl_T0_im(0xffffff);
7271 gen_op_st_T0_A0(CODE64(s
) + OT_LONG
+ s
->mem_index
);
7277 gen_update_cc_op(s
);
7278 gen_jmp_im(pc_start
- s
->cs_base
);
7281 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7284 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7287 gen_helper_vmrun(cpu_env
, tcg_const_i32(s
->aflag
),
7288 tcg_const_i32(s
->pc
- pc_start
));
7290 s
->is_jmp
= DISAS_TB_JUMP
;
7293 case 1: /* VMMCALL */
7294 if (!(s
->flags
& HF_SVME_MASK
))
7296 gen_helper_vmmcall(cpu_env
);
7298 case 2: /* VMLOAD */
7299 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7302 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7305 gen_helper_vmload(cpu_env
, tcg_const_i32(s
->aflag
));
7308 case 3: /* VMSAVE */
7309 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7312 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7315 gen_helper_vmsave(cpu_env
, tcg_const_i32(s
->aflag
));
7319 if ((!(s
->flags
& HF_SVME_MASK
) &&
7320 !(s
->cpuid_ext3_features
& CPUID_EXT3_SKINIT
)) ||
7324 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7327 gen_helper_stgi(cpu_env
);
7331 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7334 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7337 gen_helper_clgi(cpu_env
);
7340 case 6: /* SKINIT */
7341 if ((!(s
->flags
& HF_SVME_MASK
) &&
7342 !(s
->cpuid_ext3_features
& CPUID_EXT3_SKINIT
)) ||
7345 gen_helper_skinit(cpu_env
);
7347 case 7: /* INVLPGA */
7348 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7351 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7354 gen_helper_invlpga(cpu_env
, tcg_const_i32(s
->aflag
));
7360 } else if (s
->cpl
!= 0) {
7361 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7363 gen_svm_check_intercept(s
, pc_start
,
7364 op
==2 ? SVM_EXIT_GDTR_WRITE
: SVM_EXIT_IDTR_WRITE
);
7365 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7366 gen_op_ld_T1_A0(OT_WORD
+ s
->mem_index
);
7367 gen_add_A0_im(s
, 2);
7368 gen_op_ld_T0_A0(CODE64(s
) + OT_LONG
+ s
->mem_index
);
7370 gen_op_andl_T0_im(0xffffff);
7372 tcg_gen_st_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,gdt
.base
));
7373 tcg_gen_st32_tl(cpu_T
[1], cpu_env
, offsetof(CPUX86State
,gdt
.limit
));
7375 tcg_gen_st_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,idt
.base
));
7376 tcg_gen_st32_tl(cpu_T
[1], cpu_env
, offsetof(CPUX86State
,idt
.limit
));
7381 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_READ_CR0
);
7382 #if defined TARGET_X86_64 && defined HOST_WORDS_BIGENDIAN
7383 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,cr
[0]) + 4);
7385 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,cr
[0]));
7387 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 1);
7391 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7393 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_CR0
);
7394 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 0);
7395 gen_helper_lmsw(cpu_env
, cpu_T
[0]);
7396 gen_jmp_im(s
->pc
- s
->cs_base
);
7401 if (mod
!= 3) { /* invlpg */
7403 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7405 gen_update_cc_op(s
);
7406 gen_jmp_im(pc_start
- s
->cs_base
);
7407 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7408 gen_helper_invlpg(cpu_env
, cpu_A0
);
7409 gen_jmp_im(s
->pc
- s
->cs_base
);
7414 case 0: /* swapgs */
7415 #ifdef TARGET_X86_64
7418 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7420 tcg_gen_ld_tl(cpu_T
[0], cpu_env
,
7421 offsetof(CPUX86State
,segs
[R_GS
].base
));
7422 tcg_gen_ld_tl(cpu_T
[1], cpu_env
,
7423 offsetof(CPUX86State
,kernelgsbase
));
7424 tcg_gen_st_tl(cpu_T
[1], cpu_env
,
7425 offsetof(CPUX86State
,segs
[R_GS
].base
));
7426 tcg_gen_st_tl(cpu_T
[0], cpu_env
,
7427 offsetof(CPUX86State
,kernelgsbase
));
7435 case 1: /* rdtscp */
7436 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_RDTSCP
))
7438 gen_update_cc_op(s
);
7439 gen_jmp_im(pc_start
- s
->cs_base
);
7442 gen_helper_rdtscp(cpu_env
);
7445 gen_jmp(s
, s
->pc
- s
->cs_base
);
7457 case 0x108: /* invd */
7458 case 0x109: /* wbinvd */
7460 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7462 gen_svm_check_intercept(s
, pc_start
, (b
& 2) ? SVM_EXIT_INVD
: SVM_EXIT_WBINVD
);
7466 case 0x63: /* arpl or movslS (x86_64) */
7467 #ifdef TARGET_X86_64
7470 /* d_ot is the size of destination */
7471 d_ot
= dflag
+ OT_WORD
;
7473 modrm
= cpu_ldub_code(env
, s
->pc
++);
7474 reg
= ((modrm
>> 3) & 7) | rex_r
;
7475 mod
= (modrm
>> 6) & 3;
7476 rm
= (modrm
& 7) | REX_B(s
);
7479 gen_op_mov_TN_reg(OT_LONG
, 0, rm
);
7481 if (d_ot
== OT_QUAD
)
7482 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
7483 gen_op_mov_reg_T0(d_ot
, reg
);
7485 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7486 if (d_ot
== OT_QUAD
) {
7487 gen_op_lds_T0_A0(OT_LONG
+ s
->mem_index
);
7489 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
7491 gen_op_mov_reg_T0(d_ot
, reg
);
7497 TCGv t0
, t1
, t2
, a0
;
7499 if (!s
->pe
|| s
->vm86
)
7501 t0
= tcg_temp_local_new();
7502 t1
= tcg_temp_local_new();
7503 t2
= tcg_temp_local_new();
7505 modrm
= cpu_ldub_code(env
, s
->pc
++);
7506 reg
= (modrm
>> 3) & 7;
7507 mod
= (modrm
>> 6) & 3;
7510 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7511 gen_op_ld_v(ot
+ s
->mem_index
, t0
, cpu_A0
);
7512 a0
= tcg_temp_local_new();
7513 tcg_gen_mov_tl(a0
, cpu_A0
);
7515 gen_op_mov_v_reg(ot
, t0
, rm
);
7518 gen_op_mov_v_reg(ot
, t1
, reg
);
7519 tcg_gen_andi_tl(cpu_tmp0
, t0
, 3);
7520 tcg_gen_andi_tl(t1
, t1
, 3);
7521 tcg_gen_movi_tl(t2
, 0);
7522 label1
= gen_new_label();
7523 tcg_gen_brcond_tl(TCG_COND_GE
, cpu_tmp0
, t1
, label1
);
7524 tcg_gen_andi_tl(t0
, t0
, ~3);
7525 tcg_gen_or_tl(t0
, t0
, t1
);
7526 tcg_gen_movi_tl(t2
, CC_Z
);
7527 gen_set_label(label1
);
7529 gen_op_st_v(ot
+ s
->mem_index
, t0
, a0
);
7532 gen_op_mov_reg_v(ot
, rm
, t0
);
7534 gen_compute_eflags(s
);
7535 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~CC_Z
);
7536 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t2
);
7542 case 0x102: /* lar */
7543 case 0x103: /* lsl */
7547 if (!s
->pe
|| s
->vm86
)
7549 ot
= dflag
? OT_LONG
: OT_WORD
;
7550 modrm
= cpu_ldub_code(env
, s
->pc
++);
7551 reg
= ((modrm
>> 3) & 7) | rex_r
;
7552 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 0);
7553 t0
= tcg_temp_local_new();
7554 gen_update_cc_op(s
);
7556 gen_helper_lar(t0
, cpu_env
, cpu_T
[0]);
7558 gen_helper_lsl(t0
, cpu_env
, cpu_T
[0]);
7560 tcg_gen_andi_tl(cpu_tmp0
, cpu_cc_src
, CC_Z
);
7561 label1
= gen_new_label();
7562 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_tmp0
, 0, label1
);
7563 gen_op_mov_reg_v(ot
, reg
, t0
);
7564 gen_set_label(label1
);
7565 set_cc_op(s
, CC_OP_EFLAGS
);
7570 modrm
= cpu_ldub_code(env
, s
->pc
++);
7571 mod
= (modrm
>> 6) & 3;
7572 op
= (modrm
>> 3) & 7;
7574 case 0: /* prefetchnta */
7575 case 1: /* prefetchnt0 */
7576 case 2: /* prefetchnt0 */
7577 case 3: /* prefetchnt0 */
7580 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7581 /* nothing more to do */
7583 default: /* nop (multi byte) */
7584 gen_nop_modrm(env
, s
, modrm
);
7588 case 0x119 ... 0x11f: /* nop (multi byte) */
7589 modrm
= cpu_ldub_code(env
, s
->pc
++);
7590 gen_nop_modrm(env
, s
, modrm
);
7592 case 0x120: /* mov reg, crN */
7593 case 0x122: /* mov crN, reg */
7595 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7597 modrm
= cpu_ldub_code(env
, s
->pc
++);
7598 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7599 * AMD documentation (24594.pdf) and testing of
7600 * intel 386 and 486 processors all show that the mod bits
7601 * are assumed to be 1's, regardless of actual values.
7603 rm
= (modrm
& 7) | REX_B(s
);
7604 reg
= ((modrm
>> 3) & 7) | rex_r
;
7609 if ((prefixes
& PREFIX_LOCK
) && (reg
== 0) &&
7610 (s
->cpuid_ext3_features
& CPUID_EXT3_CR8LEG
)) {
7619 gen_update_cc_op(s
);
7620 gen_jmp_im(pc_start
- s
->cs_base
);
7622 gen_op_mov_TN_reg(ot
, 0, rm
);
7623 gen_helper_write_crN(cpu_env
, tcg_const_i32(reg
),
7625 gen_jmp_im(s
->pc
- s
->cs_base
);
7628 gen_helper_read_crN(cpu_T
[0], cpu_env
, tcg_const_i32(reg
));
7629 gen_op_mov_reg_T0(ot
, rm
);
7637 case 0x121: /* mov reg, drN */
7638 case 0x123: /* mov drN, reg */
7640 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7642 modrm
= cpu_ldub_code(env
, s
->pc
++);
7643 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7644 * AMD documentation (24594.pdf) and testing of
7645 * intel 386 and 486 processors all show that the mod bits
7646 * are assumed to be 1's, regardless of actual values.
7648 rm
= (modrm
& 7) | REX_B(s
);
7649 reg
= ((modrm
>> 3) & 7) | rex_r
;
7654 /* XXX: do it dynamically with CR4.DE bit */
7655 if (reg
== 4 || reg
== 5 || reg
>= 8)
7658 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_DR0
+ reg
);
7659 gen_op_mov_TN_reg(ot
, 0, rm
);
7660 gen_helper_movl_drN_T0(cpu_env
, tcg_const_i32(reg
), cpu_T
[0]);
7661 gen_jmp_im(s
->pc
- s
->cs_base
);
7664 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_READ_DR0
+ reg
);
7665 tcg_gen_ld_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,dr
[reg
]));
7666 gen_op_mov_reg_T0(ot
, rm
);
7670 case 0x106: /* clts */
7672 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7674 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_CR0
);
7675 gen_helper_clts(cpu_env
);
7676 /* abort block because static cpu state changed */
7677 gen_jmp_im(s
->pc
- s
->cs_base
);
7681 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
7682 case 0x1c3: /* MOVNTI reg, mem */
7683 if (!(s
->cpuid_features
& CPUID_SSE2
))
7685 ot
= s
->dflag
== 2 ? OT_QUAD
: OT_LONG
;
7686 modrm
= cpu_ldub_code(env
, s
->pc
++);
7687 mod
= (modrm
>> 6) & 3;
7690 reg
= ((modrm
>> 3) & 7) | rex_r
;
7691 /* generate a generic store */
7692 gen_ldst_modrm(env
, s
, modrm
, ot
, reg
, 1);
7695 modrm
= cpu_ldub_code(env
, s
->pc
++);
7696 mod
= (modrm
>> 6) & 3;
7697 op
= (modrm
>> 3) & 7;
7699 case 0: /* fxsave */
7700 if (mod
== 3 || !(s
->cpuid_features
& CPUID_FXSR
) ||
7701 (s
->prefix
& PREFIX_LOCK
))
7703 if ((s
->flags
& HF_EM_MASK
) || (s
->flags
& HF_TS_MASK
)) {
7704 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
7707 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7708 gen_update_cc_op(s
);
7709 gen_jmp_im(pc_start
- s
->cs_base
);
7710 gen_helper_fxsave(cpu_env
, cpu_A0
, tcg_const_i32((s
->dflag
== 2)));
7712 case 1: /* fxrstor */
7713 if (mod
== 3 || !(s
->cpuid_features
& CPUID_FXSR
) ||
7714 (s
->prefix
& PREFIX_LOCK
))
7716 if ((s
->flags
& HF_EM_MASK
) || (s
->flags
& HF_TS_MASK
)) {
7717 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
7720 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7721 gen_update_cc_op(s
);
7722 gen_jmp_im(pc_start
- s
->cs_base
);
7723 gen_helper_fxrstor(cpu_env
, cpu_A0
,
7724 tcg_const_i32((s
->dflag
== 2)));
7726 case 2: /* ldmxcsr */
7727 case 3: /* stmxcsr */
7728 if (s
->flags
& HF_TS_MASK
) {
7729 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
7732 if ((s
->flags
& HF_EM_MASK
) || !(s
->flags
& HF_OSFXSR_MASK
) ||
7735 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7737 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
7738 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
7739 gen_helper_ldmxcsr(cpu_env
, cpu_tmp2_i32
);
7741 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, mxcsr
));
7742 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
7745 case 5: /* lfence */
7746 case 6: /* mfence */
7747 if ((modrm
& 0xc7) != 0xc0 || !(s
->cpuid_features
& CPUID_SSE2
))
7750 case 7: /* sfence / clflush */
7751 if ((modrm
& 0xc7) == 0xc0) {
7753 /* XXX: also check for cpuid_ext2_features & CPUID_EXT2_EMMX */
7754 if (!(s
->cpuid_features
& CPUID_SSE
))
7758 if (!(s
->cpuid_features
& CPUID_CLFLUSH
))
7760 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7767 case 0x10d: /* 3DNow! prefetch(w) */
7768 modrm
= cpu_ldub_code(env
, s
->pc
++);
7769 mod
= (modrm
>> 6) & 3;
7772 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7773 /* ignore for now */
7775 case 0x1aa: /* rsm */
7776 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_RSM
);
7777 if (!(s
->flags
& HF_SMM_MASK
))
7779 gen_update_cc_op(s
);
7780 gen_jmp_im(s
->pc
- s
->cs_base
);
7781 gen_helper_rsm(cpu_env
);
7784 case 0x1b8: /* SSE4.2 popcnt */
7785 if ((prefixes
& (PREFIX_REPZ
| PREFIX_LOCK
| PREFIX_REPNZ
)) !=
7788 if (!(s
->cpuid_ext_features
& CPUID_EXT_POPCNT
))
7791 modrm
= cpu_ldub_code(env
, s
->pc
++);
7792 reg
= ((modrm
>> 3) & 7) | rex_r
;
7794 if (s
->prefix
& PREFIX_DATA
)
7796 else if (s
->dflag
!= 2)
7801 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
7802 gen_helper_popcnt(cpu_T
[0], cpu_env
, cpu_T
[0], tcg_const_i32(ot
));
7803 gen_op_mov_reg_T0(ot
, reg
);
7805 set_cc_op(s
, CC_OP_EFLAGS
);
7807 case 0x10e ... 0x10f:
7808 /* 3DNow! instructions, ignore prefixes */
7809 s
->prefix
&= ~(PREFIX_REPZ
| PREFIX_REPNZ
| PREFIX_DATA
);
7810 case 0x110 ... 0x117:
7811 case 0x128 ... 0x12f:
7812 case 0x138 ... 0x13a:
7813 case 0x150 ... 0x179:
7814 case 0x17c ... 0x17f:
7816 case 0x1c4 ... 0x1c6:
7817 case 0x1d0 ... 0x1fe:
7818 gen_sse(env
, s
, b
, pc_start
, rex_r
);
7823 /* lock generation */
7824 if (s
->prefix
& PREFIX_LOCK
)
7825 gen_helper_unlock();
7828 if (s
->prefix
& PREFIX_LOCK
)
7829 gen_helper_unlock();
7830 /* XXX: ensure that no lock was generated */
7831 gen_exception(s
, EXCP06_ILLOP
, pc_start
- s
->cs_base
);
7835 void optimize_flags_init(void)
7837 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
7838 cpu_cc_op
= tcg_global_mem_new_i32(TCG_AREG0
,
7839 offsetof(CPUX86State
, cc_op
), "cc_op");
7840 cpu_cc_src
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUX86State
, cc_src
),
7842 cpu_cc_dst
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUX86State
, cc_dst
),
7845 #ifdef TARGET_X86_64
7846 cpu_regs
[R_EAX
] = tcg_global_mem_new_i64(TCG_AREG0
,
7847 offsetof(CPUX86State
, regs
[R_EAX
]), "rax");
7848 cpu_regs
[R_ECX
] = tcg_global_mem_new_i64(TCG_AREG0
,
7849 offsetof(CPUX86State
, regs
[R_ECX
]), "rcx");
7850 cpu_regs
[R_EDX
] = tcg_global_mem_new_i64(TCG_AREG0
,
7851 offsetof(CPUX86State
, regs
[R_EDX
]), "rdx");
7852 cpu_regs
[R_EBX
] = tcg_global_mem_new_i64(TCG_AREG0
,
7853 offsetof(CPUX86State
, regs
[R_EBX
]), "rbx");
7854 cpu_regs
[R_ESP
] = tcg_global_mem_new_i64(TCG_AREG0
,
7855 offsetof(CPUX86State
, regs
[R_ESP
]), "rsp");
7856 cpu_regs
[R_EBP
] = tcg_global_mem_new_i64(TCG_AREG0
,
7857 offsetof(CPUX86State
, regs
[R_EBP
]), "rbp");
7858 cpu_regs
[R_ESI
] = tcg_global_mem_new_i64(TCG_AREG0
,
7859 offsetof(CPUX86State
, regs
[R_ESI
]), "rsi");
7860 cpu_regs
[R_EDI
] = tcg_global_mem_new_i64(TCG_AREG0
,
7861 offsetof(CPUX86State
, regs
[R_EDI
]), "rdi");
7862 cpu_regs
[8] = tcg_global_mem_new_i64(TCG_AREG0
,
7863 offsetof(CPUX86State
, regs
[8]), "r8");
7864 cpu_regs
[9] = tcg_global_mem_new_i64(TCG_AREG0
,
7865 offsetof(CPUX86State
, regs
[9]), "r9");
7866 cpu_regs
[10] = tcg_global_mem_new_i64(TCG_AREG0
,
7867 offsetof(CPUX86State
, regs
[10]), "r10");
7868 cpu_regs
[11] = tcg_global_mem_new_i64(TCG_AREG0
,
7869 offsetof(CPUX86State
, regs
[11]), "r11");
7870 cpu_regs
[12] = tcg_global_mem_new_i64(TCG_AREG0
,
7871 offsetof(CPUX86State
, regs
[12]), "r12");
7872 cpu_regs
[13] = tcg_global_mem_new_i64(TCG_AREG0
,
7873 offsetof(CPUX86State
, regs
[13]), "r13");
7874 cpu_regs
[14] = tcg_global_mem_new_i64(TCG_AREG0
,
7875 offsetof(CPUX86State
, regs
[14]), "r14");
7876 cpu_regs
[15] = tcg_global_mem_new_i64(TCG_AREG0
,
7877 offsetof(CPUX86State
, regs
[15]), "r15");
7879 cpu_regs
[R_EAX
] = tcg_global_mem_new_i32(TCG_AREG0
,
7880 offsetof(CPUX86State
, regs
[R_EAX
]), "eax");
7881 cpu_regs
[R_ECX
] = tcg_global_mem_new_i32(TCG_AREG0
,
7882 offsetof(CPUX86State
, regs
[R_ECX
]), "ecx");
7883 cpu_regs
[R_EDX
] = tcg_global_mem_new_i32(TCG_AREG0
,
7884 offsetof(CPUX86State
, regs
[R_EDX
]), "edx");
7885 cpu_regs
[R_EBX
] = tcg_global_mem_new_i32(TCG_AREG0
,
7886 offsetof(CPUX86State
, regs
[R_EBX
]), "ebx");
7887 cpu_regs
[R_ESP
] = tcg_global_mem_new_i32(TCG_AREG0
,
7888 offsetof(CPUX86State
, regs
[R_ESP
]), "esp");
7889 cpu_regs
[R_EBP
] = tcg_global_mem_new_i32(TCG_AREG0
,
7890 offsetof(CPUX86State
, regs
[R_EBP
]), "ebp");
7891 cpu_regs
[R_ESI
] = tcg_global_mem_new_i32(TCG_AREG0
,
7892 offsetof(CPUX86State
, regs
[R_ESI
]), "esi");
7893 cpu_regs
[R_EDI
] = tcg_global_mem_new_i32(TCG_AREG0
,
7894 offsetof(CPUX86State
, regs
[R_EDI
]), "edi");
7897 /* register helpers */
7898 #define GEN_HELPER 2
7902 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
7903 basic block 'tb'. If search_pc is TRUE, also generate PC
7904 information for each intermediate instruction. */
7905 static inline void gen_intermediate_code_internal(CPUX86State
*env
,
7906 TranslationBlock
*tb
,
7909 DisasContext dc1
, *dc
= &dc1
;
7910 target_ulong pc_ptr
;
7911 uint16_t *gen_opc_end
;
7915 target_ulong pc_start
;
7916 target_ulong cs_base
;
7920 /* generate intermediate code */
7922 cs_base
= tb
->cs_base
;
7925 dc
->pe
= (flags
>> HF_PE_SHIFT
) & 1;
7926 dc
->code32
= (flags
>> HF_CS32_SHIFT
) & 1;
7927 dc
->ss32
= (flags
>> HF_SS32_SHIFT
) & 1;
7928 dc
->addseg
= (flags
>> HF_ADDSEG_SHIFT
) & 1;
7930 dc
->vm86
= (flags
>> VM_SHIFT
) & 1;
7931 dc
->cpl
= (flags
>> HF_CPL_SHIFT
) & 3;
7932 dc
->iopl
= (flags
>> IOPL_SHIFT
) & 3;
7933 dc
->tf
= (flags
>> TF_SHIFT
) & 1;
7934 dc
->singlestep_enabled
= env
->singlestep_enabled
;
7935 dc
->cc_op
= CC_OP_DYNAMIC
;
7936 dc
->cc_op_dirty
= false;
7937 dc
->cs_base
= cs_base
;
7939 dc
->popl_esp_hack
= 0;
7940 /* select memory access functions */
7942 if (flags
& HF_SOFTMMU_MASK
) {
7943 dc
->mem_index
= (cpu_mmu_index(env
) + 1) << 2;
7945 dc
->cpuid_features
= env
->cpuid_features
;
7946 dc
->cpuid_ext_features
= env
->cpuid_ext_features
;
7947 dc
->cpuid_ext2_features
= env
->cpuid_ext2_features
;
7948 dc
->cpuid_ext3_features
= env
->cpuid_ext3_features
;
7949 dc
->cpuid_7_0_ebx_features
= env
->cpuid_7_0_ebx_features
;
7950 #ifdef TARGET_X86_64
7951 dc
->lma
= (flags
>> HF_LMA_SHIFT
) & 1;
7952 dc
->code64
= (flags
>> HF_CS64_SHIFT
) & 1;
7955 dc
->jmp_opt
= !(dc
->tf
|| env
->singlestep_enabled
||
7956 (flags
& HF_INHIBIT_IRQ_MASK
)
7957 #ifndef CONFIG_SOFTMMU
7958 || (flags
& HF_SOFTMMU_MASK
)
7962 /* check addseg logic */
7963 if (!dc
->addseg
&& (dc
->vm86
|| !dc
->pe
|| !dc
->code32
))
7964 printf("ERROR addseg\n");
7967 cpu_T
[0] = tcg_temp_new();
7968 cpu_T
[1] = tcg_temp_new();
7969 cpu_A0
= tcg_temp_new();
7970 cpu_T3
= tcg_temp_new();
7972 cpu_tmp0
= tcg_temp_new();
7973 cpu_tmp1_i64
= tcg_temp_new_i64();
7974 cpu_tmp2_i32
= tcg_temp_new_i32();
7975 cpu_tmp3_i32
= tcg_temp_new_i32();
7976 cpu_tmp4
= tcg_temp_new();
7977 cpu_tmp5
= tcg_temp_new();
7978 cpu_ptr0
= tcg_temp_new_ptr();
7979 cpu_ptr1
= tcg_temp_new_ptr();
7981 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
7983 dc
->is_jmp
= DISAS_NEXT
;
7987 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
7989 max_insns
= CF_COUNT_MASK
;
7993 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
7994 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
7995 if (bp
->pc
== pc_ptr
&&
7996 !((bp
->flags
& BP_CPU
) && (tb
->flags
& HF_RF_MASK
))) {
7997 gen_debug(dc
, pc_ptr
- dc
->cs_base
);
8003 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
8007 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
8009 tcg_ctx
.gen_opc_pc
[lj
] = pc_ptr
;
8010 gen_opc_cc_op
[lj
] = dc
->cc_op
;
8011 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
8012 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
8014 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
8017 pc_ptr
= disas_insn(env
, dc
, pc_ptr
);
8019 /* stop translation if indicated */
8022 /* if single step mode, we generate only one instruction and
8023 generate an exception */
8024 /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
8025 the flag and abort the translation to give the irqs a
8026 change to be happen */
8027 if (dc
->tf
|| dc
->singlestep_enabled
||
8028 (flags
& HF_INHIBIT_IRQ_MASK
)) {
8029 gen_jmp_im(pc_ptr
- dc
->cs_base
);
8033 /* if too long translation, stop generation too */
8034 if (tcg_ctx
.gen_opc_ptr
>= gen_opc_end
||
8035 (pc_ptr
- pc_start
) >= (TARGET_PAGE_SIZE
- 32) ||
8036 num_insns
>= max_insns
) {
8037 gen_jmp_im(pc_ptr
- dc
->cs_base
);
8042 gen_jmp_im(pc_ptr
- dc
->cs_base
);
8047 if (tb
->cflags
& CF_LAST_IO
)
8049 gen_icount_end(tb
, num_insns
);
8050 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
8051 /* we don't forget to fill the last values */
8053 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
8056 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
8060 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
8062 qemu_log("----------------\n");
8063 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
8064 #ifdef TARGET_X86_64
8069 disas_flags
= !dc
->code32
;
8070 log_target_disas(env
, pc_start
, pc_ptr
- pc_start
, disas_flags
);
8076 tb
->size
= pc_ptr
- pc_start
;
8077 tb
->icount
= num_insns
;
8081 void gen_intermediate_code(CPUX86State
*env
, TranslationBlock
*tb
)
8083 gen_intermediate_code_internal(env
, tb
, 0);
8086 void gen_intermediate_code_pc(CPUX86State
*env
, TranslationBlock
*tb
)
8088 gen_intermediate_code_internal(env
, tb
, 1);
8091 void restore_state_to_opc(CPUX86State
*env
, TranslationBlock
*tb
, int pc_pos
)
8095 if (qemu_loglevel_mask(CPU_LOG_TB_OP
)) {
8097 qemu_log("RESTORE:\n");
8098 for(i
= 0;i
<= pc_pos
; i
++) {
8099 if (tcg_ctx
.gen_opc_instr_start
[i
]) {
8100 qemu_log("0x%04x: " TARGET_FMT_lx
"\n", i
,
8101 tcg_ctx
.gen_opc_pc
[i
]);
8104 qemu_log("pc_pos=0x%x eip=" TARGET_FMT_lx
" cs_base=%x\n",
8105 pc_pos
, tcg_ctx
.gen_opc_pc
[pc_pos
] - tb
->cs_base
,
8106 (uint32_t)tb
->cs_base
);
8109 env
->eip
= tcg_ctx
.gen_opc_pc
[pc_pos
] - tb
->cs_base
;
8110 cc_op
= gen_opc_cc_op
[pc_pos
];
8111 if (cc_op
!= CC_OP_DYNAMIC
)