4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
26 #include "qemu/host-utils.h"
28 #include "disas/disas.h"
35 #define PREFIX_REPZ 0x01
36 #define PREFIX_REPNZ 0x02
37 #define PREFIX_LOCK 0x04
38 #define PREFIX_DATA 0x08
39 #define PREFIX_ADR 0x10
40 #define PREFIX_VEX 0x20
43 #define CODE64(s) ((s)->code64)
44 #define REX_X(s) ((s)->rex_x)
45 #define REX_B(s) ((s)->rex_b)
60 //#define MACRO_TEST 1
62 /* global register indexes */
63 static TCGv_ptr cpu_env
;
65 static TCGv cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
, cpu_cc_srcT
;
66 static TCGv_i32 cpu_cc_op
;
67 static TCGv cpu_regs
[CPU_NB_REGS
];
70 /* local register indexes (only used inside old micro ops) */
71 static TCGv cpu_tmp0
, cpu_tmp4
;
72 static TCGv_ptr cpu_ptr0
, cpu_ptr1
;
73 static TCGv_i32 cpu_tmp2_i32
, cpu_tmp3_i32
;
74 static TCGv_i64 cpu_tmp1_i64
;
76 static uint8_t gen_opc_cc_op
[OPC_BUF_SIZE
];
78 #include "exec/gen-icount.h"
81 static int x86_64_hregs
;
84 typedef struct DisasContext
{
85 /* current insn context */
86 int override
; /* -1 if no override */
89 target_ulong pc
; /* pc = eip + cs_base */
90 int is_jmp
; /* 1 = means jump (stop translation), 2 means CPU
91 static state change (stop translation) */
92 /* current block context */
93 target_ulong cs_base
; /* base of CS segment */
94 int pe
; /* protected mode */
95 int code32
; /* 32 bit code segment */
97 int lma
; /* long mode active */
98 int code64
; /* 64 bit code segment */
101 int vex_l
; /* vex vector length */
102 int vex_v
; /* vex vvvv register, without 1's compliment. */
103 int ss32
; /* 32 bit stack segment */
104 CCOp cc_op
; /* current CC operation */
106 int addseg
; /* non zero if either DS/ES/SS have a non zero base */
107 int f_st
; /* currently unused */
108 int vm86
; /* vm86 mode */
111 int tf
; /* TF cpu flag */
112 int singlestep_enabled
; /* "hardware" single step enabled */
113 int jmp_opt
; /* use direct block chaining for direct jumps */
114 int mem_index
; /* select memory access functions */
115 uint64_t flags
; /* all execution flags */
116 struct TranslationBlock
*tb
;
117 int popl_esp_hack
; /* for correct popl with esp base handling */
118 int rip_offset
; /* only used in x86_64, but left for simplicity */
120 int cpuid_ext_features
;
121 int cpuid_ext2_features
;
122 int cpuid_ext3_features
;
123 int cpuid_7_0_ebx_features
;
126 static void gen_eob(DisasContext
*s
);
127 static void gen_jmp(DisasContext
*s
, target_ulong eip
);
128 static void gen_jmp_tb(DisasContext
*s
, target_ulong eip
, int tb_num
);
129 static void gen_op(DisasContext
*s1
, int op
, int ot
, int d
);
131 /* i386 arith/logic operations */
151 OP_SHL1
, /* undocumented */
175 /* I386 int registers */
176 OR_EAX
, /* MUST be even numbered */
185 OR_TMP0
= 16, /* temporary operand register */
187 OR_A0
, /* temporary register used when doing address evaluation */
197 /* Bit set if the global variable is live after setting CC_OP to X. */
198 static const uint8_t cc_op_live
[CC_OP_NB
] = {
199 [CC_OP_DYNAMIC
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
200 [CC_OP_EFLAGS
] = USES_CC_SRC
,
201 [CC_OP_MULB
... CC_OP_MULQ
] = USES_CC_DST
| USES_CC_SRC
,
202 [CC_OP_ADDB
... CC_OP_ADDQ
] = USES_CC_DST
| USES_CC_SRC
,
203 [CC_OP_ADCB
... CC_OP_ADCQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
204 [CC_OP_SUBB
... CC_OP_SUBQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRCT
,
205 [CC_OP_SBBB
... CC_OP_SBBQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
206 [CC_OP_LOGICB
... CC_OP_LOGICQ
] = USES_CC_DST
,
207 [CC_OP_INCB
... CC_OP_INCQ
] = USES_CC_DST
| USES_CC_SRC
,
208 [CC_OP_DECB
... CC_OP_DECQ
] = USES_CC_DST
| USES_CC_SRC
,
209 [CC_OP_SHLB
... CC_OP_SHLQ
] = USES_CC_DST
| USES_CC_SRC
,
210 [CC_OP_SARB
... CC_OP_SARQ
] = USES_CC_DST
| USES_CC_SRC
,
211 [CC_OP_BMILGB
... CC_OP_BMILGQ
] = USES_CC_DST
| USES_CC_SRC
,
212 [CC_OP_ADCX
] = USES_CC_DST
| USES_CC_SRC
,
213 [CC_OP_ADOX
] = USES_CC_SRC
| USES_CC_SRC2
,
214 [CC_OP_ADCOX
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
218 static void set_cc_op(DisasContext
*s
, CCOp op
)
222 if (s
->cc_op
== op
) {
226 /* Discard CC computation that will no longer be used. */
227 dead
= cc_op_live
[s
->cc_op
] & ~cc_op_live
[op
];
228 if (dead
& USES_CC_DST
) {
229 tcg_gen_discard_tl(cpu_cc_dst
);
231 if (dead
& USES_CC_SRC
) {
232 tcg_gen_discard_tl(cpu_cc_src
);
234 if (dead
& USES_CC_SRC2
) {
235 tcg_gen_discard_tl(cpu_cc_src2
);
237 if (dead
& USES_CC_SRCT
) {
238 tcg_gen_discard_tl(cpu_cc_srcT
);
241 if (op
== CC_OP_DYNAMIC
) {
242 /* The DYNAMIC setting is translator only, and should never be
243 stored. Thus we always consider it clean. */
244 s
->cc_op_dirty
= false;
246 /* Discard any computed CC_OP value (see shifts). */
247 if (s
->cc_op
== CC_OP_DYNAMIC
) {
248 tcg_gen_discard_i32(cpu_cc_op
);
250 s
->cc_op_dirty
= true;
255 static void gen_update_cc_op(DisasContext
*s
)
257 if (s
->cc_op_dirty
) {
258 tcg_gen_movi_i32(cpu_cc_op
, s
->cc_op
);
259 s
->cc_op_dirty
= false;
263 static inline void gen_op_movl_T0_0(void)
265 tcg_gen_movi_tl(cpu_T
[0], 0);
268 static inline void gen_op_movl_T0_im(int32_t val
)
270 tcg_gen_movi_tl(cpu_T
[0], val
);
273 static inline void gen_op_movl_T0_imu(uint32_t val
)
275 tcg_gen_movi_tl(cpu_T
[0], val
);
278 static inline void gen_op_movl_T1_im(int32_t val
)
280 tcg_gen_movi_tl(cpu_T
[1], val
);
283 static inline void gen_op_movl_T1_imu(uint32_t val
)
285 tcg_gen_movi_tl(cpu_T
[1], val
);
288 static inline void gen_op_movl_A0_im(uint32_t val
)
290 tcg_gen_movi_tl(cpu_A0
, val
);
294 static inline void gen_op_movq_A0_im(int64_t val
)
296 tcg_gen_movi_tl(cpu_A0
, val
);
300 static inline void gen_movtl_T0_im(target_ulong val
)
302 tcg_gen_movi_tl(cpu_T
[0], val
);
305 static inline void gen_movtl_T1_im(target_ulong val
)
307 tcg_gen_movi_tl(cpu_T
[1], val
);
310 static inline void gen_op_andl_T0_ffff(void)
312 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 0xffff);
315 static inline void gen_op_andl_T0_im(uint32_t val
)
317 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], val
);
320 static inline void gen_op_movl_T0_T1(void)
322 tcg_gen_mov_tl(cpu_T
[0], cpu_T
[1]);
325 static inline void gen_op_andl_A0_ffff(void)
327 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffff);
332 #define NB_OP_SIZES 4
334 #else /* !TARGET_X86_64 */
336 #define NB_OP_SIZES 3
338 #endif /* !TARGET_X86_64 */
340 #if defined(HOST_WORDS_BIGENDIAN)
341 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
342 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
343 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
344 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
345 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
347 #define REG_B_OFFSET 0
348 #define REG_H_OFFSET 1
349 #define REG_W_OFFSET 0
350 #define REG_L_OFFSET 0
351 #define REG_LH_OFFSET 4
354 /* In instruction encodings for byte register accesses the
355 * register number usually indicates "low 8 bits of register N";
356 * however there are some special cases where N 4..7 indicates
357 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
358 * true for this special case, false otherwise.
360 static inline bool byte_reg_is_xH(int reg
)
366 if (reg
>= 8 || x86_64_hregs
) {
373 static inline void gen_op_mov_reg_v(int ot
, int reg
, TCGv t0
)
377 if (!byte_reg_is_xH(reg
)) {
378 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], t0
, 0, 8);
380 tcg_gen_deposit_tl(cpu_regs
[reg
- 4], cpu_regs
[reg
- 4], t0
, 8, 8);
384 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], t0
, 0, 16);
386 default: /* XXX this shouldn't be reached; abort? */
388 /* For x86_64, this sets the higher half of register to zero.
389 For i386, this is equivalent to a mov. */
390 tcg_gen_ext32u_tl(cpu_regs
[reg
], t0
);
394 tcg_gen_mov_tl(cpu_regs
[reg
], t0
);
400 static inline void gen_op_mov_reg_T0(int ot
, int reg
)
402 gen_op_mov_reg_v(ot
, reg
, cpu_T
[0]);
405 static inline void gen_op_mov_reg_T1(int ot
, int reg
)
407 gen_op_mov_reg_v(ot
, reg
, cpu_T
[1]);
410 static inline void gen_op_mov_reg_A0(int size
, int reg
)
414 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], cpu_A0
, 0, 16);
416 default: /* XXX this shouldn't be reached; abort? */
418 /* For x86_64, this sets the higher half of register to zero.
419 For i386, this is equivalent to a mov. */
420 tcg_gen_ext32u_tl(cpu_regs
[reg
], cpu_A0
);
424 tcg_gen_mov_tl(cpu_regs
[reg
], cpu_A0
);
430 static inline void gen_op_mov_v_reg(int ot
, TCGv t0
, int reg
)
432 if (ot
== OT_BYTE
&& byte_reg_is_xH(reg
)) {
433 tcg_gen_shri_tl(t0
, cpu_regs
[reg
- 4], 8);
434 tcg_gen_ext8u_tl(t0
, t0
);
436 tcg_gen_mov_tl(t0
, cpu_regs
[reg
]);
440 static inline void gen_op_mov_TN_reg(int ot
, int t_index
, int reg
)
442 gen_op_mov_v_reg(ot
, cpu_T
[t_index
], reg
);
445 static inline void gen_op_movl_A0_reg(int reg
)
447 tcg_gen_mov_tl(cpu_A0
, cpu_regs
[reg
]);
450 static inline void gen_op_addl_A0_im(int32_t val
)
452 tcg_gen_addi_tl(cpu_A0
, cpu_A0
, val
);
454 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffffffff);
459 static inline void gen_op_addq_A0_im(int64_t val
)
461 tcg_gen_addi_tl(cpu_A0
, cpu_A0
, val
);
465 static void gen_add_A0_im(DisasContext
*s
, int val
)
469 gen_op_addq_A0_im(val
);
472 gen_op_addl_A0_im(val
);
475 static inline void gen_op_addl_T0_T1(void)
477 tcg_gen_add_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
480 static inline void gen_op_jmp_T0(void)
482 tcg_gen_st_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, eip
));
485 static inline void gen_op_add_reg_im(int size
, int reg
, int32_t val
)
489 tcg_gen_addi_tl(cpu_tmp0
, cpu_regs
[reg
], val
);
490 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], cpu_tmp0
, 0, 16);
493 tcg_gen_addi_tl(cpu_tmp0
, cpu_regs
[reg
], val
);
494 /* For x86_64, this sets the higher half of register to zero.
495 For i386, this is equivalent to a nop. */
496 tcg_gen_ext32u_tl(cpu_tmp0
, cpu_tmp0
);
497 tcg_gen_mov_tl(cpu_regs
[reg
], cpu_tmp0
);
501 tcg_gen_addi_tl(cpu_regs
[reg
], cpu_regs
[reg
], val
);
507 static inline void gen_op_add_reg_T0(int size
, int reg
)
511 tcg_gen_add_tl(cpu_tmp0
, cpu_regs
[reg
], cpu_T
[0]);
512 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], cpu_tmp0
, 0, 16);
515 tcg_gen_add_tl(cpu_tmp0
, cpu_regs
[reg
], cpu_T
[0]);
516 /* For x86_64, this sets the higher half of register to zero.
517 For i386, this is equivalent to a nop. */
518 tcg_gen_ext32u_tl(cpu_tmp0
, cpu_tmp0
);
519 tcg_gen_mov_tl(cpu_regs
[reg
], cpu_tmp0
);
523 tcg_gen_add_tl(cpu_regs
[reg
], cpu_regs
[reg
], cpu_T
[0]);
529 static inline void gen_op_addl_A0_reg_sN(int shift
, int reg
)
531 tcg_gen_mov_tl(cpu_tmp0
, cpu_regs
[reg
]);
533 tcg_gen_shli_tl(cpu_tmp0
, cpu_tmp0
, shift
);
534 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
535 /* For x86_64, this sets the higher half of register to zero.
536 For i386, this is equivalent to a nop. */
537 tcg_gen_ext32u_tl(cpu_A0
, cpu_A0
);
540 static inline void gen_op_movl_A0_seg(int reg
)
542 tcg_gen_ld32u_tl(cpu_A0
, cpu_env
, offsetof(CPUX86State
, segs
[reg
].base
) + REG_L_OFFSET
);
545 static inline void gen_op_addl_A0_seg(DisasContext
*s
, int reg
)
547 tcg_gen_ld_tl(cpu_tmp0
, cpu_env
, offsetof(CPUX86State
, segs
[reg
].base
));
550 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffffffff);
551 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
553 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
554 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffffffff);
557 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
562 static inline void gen_op_movq_A0_seg(int reg
)
564 tcg_gen_ld_tl(cpu_A0
, cpu_env
, offsetof(CPUX86State
, segs
[reg
].base
));
567 static inline void gen_op_addq_A0_seg(int reg
)
569 tcg_gen_ld_tl(cpu_tmp0
, cpu_env
, offsetof(CPUX86State
, segs
[reg
].base
));
570 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
573 static inline void gen_op_movq_A0_reg(int reg
)
575 tcg_gen_mov_tl(cpu_A0
, cpu_regs
[reg
]);
578 static inline void gen_op_addq_A0_reg_sN(int shift
, int reg
)
580 tcg_gen_mov_tl(cpu_tmp0
, cpu_regs
[reg
]);
582 tcg_gen_shli_tl(cpu_tmp0
, cpu_tmp0
, shift
);
583 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
587 static inline void gen_op_lds_T0_A0(int idx
)
589 int mem_index
= (idx
>> 2) - 1;
592 tcg_gen_qemu_ld8s(cpu_T
[0], cpu_A0
, mem_index
);
595 tcg_gen_qemu_ld16s(cpu_T
[0], cpu_A0
, mem_index
);
599 tcg_gen_qemu_ld32s(cpu_T
[0], cpu_A0
, mem_index
);
604 static inline void gen_op_ld_v(int idx
, TCGv t0
, TCGv a0
)
606 int mem_index
= (idx
>> 2) - 1;
609 tcg_gen_qemu_ld8u(t0
, a0
, mem_index
);
612 tcg_gen_qemu_ld16u(t0
, a0
, mem_index
);
615 tcg_gen_qemu_ld32u(t0
, a0
, mem_index
);
619 /* Should never happen on 32-bit targets. */
621 tcg_gen_qemu_ld64(t0
, a0
, mem_index
);
627 /* XXX: always use ldu or lds */
628 static inline void gen_op_ld_T0_A0(int idx
)
630 gen_op_ld_v(idx
, cpu_T
[0], cpu_A0
);
633 static inline void gen_op_ldu_T0_A0(int idx
)
635 gen_op_ld_v(idx
, cpu_T
[0], cpu_A0
);
638 static inline void gen_op_ld_T1_A0(int idx
)
640 gen_op_ld_v(idx
, cpu_T
[1], cpu_A0
);
643 static inline void gen_op_st_v(int idx
, TCGv t0
, TCGv a0
)
645 int mem_index
= (idx
>> 2) - 1;
648 tcg_gen_qemu_st8(t0
, a0
, mem_index
);
651 tcg_gen_qemu_st16(t0
, a0
, mem_index
);
654 tcg_gen_qemu_st32(t0
, a0
, mem_index
);
658 /* Should never happen on 32-bit targets. */
660 tcg_gen_qemu_st64(t0
, a0
, mem_index
);
666 static inline void gen_op_st_T0_A0(int idx
)
668 gen_op_st_v(idx
, cpu_T
[0], cpu_A0
);
671 static inline void gen_op_st_T1_A0(int idx
)
673 gen_op_st_v(idx
, cpu_T
[1], cpu_A0
);
676 static inline void gen_jmp_im(target_ulong pc
)
678 tcg_gen_movi_tl(cpu_tmp0
, pc
);
679 tcg_gen_st_tl(cpu_tmp0
, cpu_env
, offsetof(CPUX86State
, eip
));
682 static inline void gen_string_movl_A0_ESI(DisasContext
*s
)
686 override
= s
->override
;
690 gen_op_movq_A0_seg(override
);
691 gen_op_addq_A0_reg_sN(0, R_ESI
);
693 gen_op_movq_A0_reg(R_ESI
);
699 if (s
->addseg
&& override
< 0)
702 gen_op_movl_A0_seg(override
);
703 gen_op_addl_A0_reg_sN(0, R_ESI
);
705 gen_op_movl_A0_reg(R_ESI
);
708 /* 16 address, always override */
711 gen_op_movl_A0_reg(R_ESI
);
712 gen_op_andl_A0_ffff();
713 gen_op_addl_A0_seg(s
, override
);
717 static inline void gen_string_movl_A0_EDI(DisasContext
*s
)
721 gen_op_movq_A0_reg(R_EDI
);
726 gen_op_movl_A0_seg(R_ES
);
727 gen_op_addl_A0_reg_sN(0, R_EDI
);
729 gen_op_movl_A0_reg(R_EDI
);
732 gen_op_movl_A0_reg(R_EDI
);
733 gen_op_andl_A0_ffff();
734 gen_op_addl_A0_seg(s
, R_ES
);
738 static inline void gen_op_movl_T0_Dshift(int ot
)
740 tcg_gen_ld32s_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, df
));
741 tcg_gen_shli_tl(cpu_T
[0], cpu_T
[0], ot
);
744 static TCGv
gen_ext_tl(TCGv dst
, TCGv src
, int size
, bool sign
)
749 tcg_gen_ext8s_tl(dst
, src
);
751 tcg_gen_ext8u_tl(dst
, src
);
756 tcg_gen_ext16s_tl(dst
, src
);
758 tcg_gen_ext16u_tl(dst
, src
);
764 tcg_gen_ext32s_tl(dst
, src
);
766 tcg_gen_ext32u_tl(dst
, src
);
775 static void gen_extu(int ot
, TCGv reg
)
777 gen_ext_tl(reg
, reg
, ot
, false);
780 static void gen_exts(int ot
, TCGv reg
)
782 gen_ext_tl(reg
, reg
, ot
, true);
785 static inline void gen_op_jnz_ecx(int size
, int label1
)
787 tcg_gen_mov_tl(cpu_tmp0
, cpu_regs
[R_ECX
]);
788 gen_extu(size
+ 1, cpu_tmp0
);
789 tcg_gen_brcondi_tl(TCG_COND_NE
, cpu_tmp0
, 0, label1
);
792 static inline void gen_op_jz_ecx(int size
, int label1
)
794 tcg_gen_mov_tl(cpu_tmp0
, cpu_regs
[R_ECX
]);
795 gen_extu(size
+ 1, cpu_tmp0
);
796 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_tmp0
, 0, label1
);
799 static void gen_helper_in_func(int ot
, TCGv v
, TCGv_i32 n
)
803 gen_helper_inb(v
, n
);
806 gen_helper_inw(v
, n
);
809 gen_helper_inl(v
, n
);
814 static void gen_helper_out_func(int ot
, TCGv_i32 v
, TCGv_i32 n
)
818 gen_helper_outb(v
, n
);
821 gen_helper_outw(v
, n
);
824 gen_helper_outl(v
, n
);
829 static void gen_check_io(DisasContext
*s
, int ot
, target_ulong cur_eip
,
833 target_ulong next_eip
;
836 if (s
->pe
&& (s
->cpl
> s
->iopl
|| s
->vm86
)) {
840 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
843 gen_helper_check_iob(cpu_env
, cpu_tmp2_i32
);
846 gen_helper_check_iow(cpu_env
, cpu_tmp2_i32
);
849 gen_helper_check_iol(cpu_env
, cpu_tmp2_i32
);
853 if(s
->flags
& HF_SVMI_MASK
) {
858 svm_flags
|= (1 << (4 + ot
));
859 next_eip
= s
->pc
- s
->cs_base
;
860 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
861 gen_helper_svm_check_io(cpu_env
, cpu_tmp2_i32
,
862 tcg_const_i32(svm_flags
),
863 tcg_const_i32(next_eip
- cur_eip
));
867 static inline void gen_movs(DisasContext
*s
, int ot
)
869 gen_string_movl_A0_ESI(s
);
870 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
871 gen_string_movl_A0_EDI(s
);
872 gen_op_st_T0_A0(ot
+ s
->mem_index
);
873 gen_op_movl_T0_Dshift(ot
);
874 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
875 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
878 static void gen_op_update1_cc(void)
880 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
883 static void gen_op_update2_cc(void)
885 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[1]);
886 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
889 static void gen_op_update3_cc(TCGv reg
)
891 tcg_gen_mov_tl(cpu_cc_src2
, reg
);
892 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[1]);
893 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
896 static inline void gen_op_testl_T0_T1_cc(void)
898 tcg_gen_and_tl(cpu_cc_dst
, cpu_T
[0], cpu_T
[1]);
901 static void gen_op_update_neg_cc(void)
903 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
904 tcg_gen_neg_tl(cpu_cc_src
, cpu_T
[0]);
905 tcg_gen_movi_tl(cpu_cc_srcT
, 0);
908 /* compute all eflags to cc_src */
909 static void gen_compute_eflags(DisasContext
*s
)
911 TCGv zero
, dst
, src1
, src2
;
914 if (s
->cc_op
== CC_OP_EFLAGS
) {
917 if (s
->cc_op
== CC_OP_CLR
) {
918 tcg_gen_movi_tl(cpu_cc_src
, CC_Z
);
919 set_cc_op(s
, CC_OP_EFLAGS
);
928 /* Take care to not read values that are not live. */
929 live
= cc_op_live
[s
->cc_op
] & ~USES_CC_SRCT
;
930 dead
= live
^ (USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
);
932 zero
= tcg_const_tl(0);
933 if (dead
& USES_CC_DST
) {
936 if (dead
& USES_CC_SRC
) {
939 if (dead
& USES_CC_SRC2
) {
945 gen_helper_cc_compute_all(cpu_cc_src
, dst
, src1
, src2
, cpu_cc_op
);
946 set_cc_op(s
, CC_OP_EFLAGS
);
953 typedef struct CCPrepare
{
963 /* compute eflags.C to reg */
964 static CCPrepare
gen_prepare_eflags_c(DisasContext
*s
, TCGv reg
)
970 case CC_OP_SUBB
... CC_OP_SUBQ
:
971 /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
972 size
= s
->cc_op
- CC_OP_SUBB
;
973 t1
= gen_ext_tl(cpu_tmp0
, cpu_cc_src
, size
, false);
974 /* If no temporary was used, be careful not to alias t1 and t0. */
975 t0
= TCGV_EQUAL(t1
, cpu_cc_src
) ? cpu_tmp0
: reg
;
976 tcg_gen_mov_tl(t0
, cpu_cc_srcT
);
980 case CC_OP_ADDB
... CC_OP_ADDQ
:
981 /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
982 size
= s
->cc_op
- CC_OP_ADDB
;
983 t1
= gen_ext_tl(cpu_tmp0
, cpu_cc_src
, size
, false);
984 t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, false);
986 return (CCPrepare
) { .cond
= TCG_COND_LTU
, .reg
= t0
,
987 .reg2
= t1
, .mask
= -1, .use_reg2
= true };
989 case CC_OP_LOGICB
... CC_OP_LOGICQ
:
991 return (CCPrepare
) { .cond
= TCG_COND_NEVER
, .mask
= -1 };
993 case CC_OP_INCB
... CC_OP_INCQ
:
994 case CC_OP_DECB
... CC_OP_DECQ
:
995 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
996 .mask
= -1, .no_setcond
= true };
998 case CC_OP_SHLB
... CC_OP_SHLQ
:
999 /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
1000 size
= s
->cc_op
- CC_OP_SHLB
;
1001 shift
= (8 << size
) - 1;
1002 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1003 .mask
= (target_ulong
)1 << shift
};
1005 case CC_OP_MULB
... CC_OP_MULQ
:
1006 return (CCPrepare
) { .cond
= TCG_COND_NE
,
1007 .reg
= cpu_cc_src
, .mask
= -1 };
1009 case CC_OP_BMILGB
... CC_OP_BMILGQ
:
1010 size
= s
->cc_op
- CC_OP_BMILGB
;
1011 t0
= gen_ext_tl(reg
, cpu_cc_src
, size
, false);
1012 return (CCPrepare
) { .cond
= TCG_COND_EQ
, .reg
= t0
, .mask
= -1 };
1016 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_dst
,
1017 .mask
= -1, .no_setcond
= true };
1020 case CC_OP_SARB
... CC_OP_SARQ
:
1022 return (CCPrepare
) { .cond
= TCG_COND_NE
,
1023 .reg
= cpu_cc_src
, .mask
= CC_C
};
1026 /* The need to compute only C from CC_OP_DYNAMIC is important
1027 in efficiently implementing e.g. INC at the start of a TB. */
1028 gen_update_cc_op(s
);
1029 gen_helper_cc_compute_c(reg
, cpu_cc_dst
, cpu_cc_src
,
1030 cpu_cc_src2
, cpu_cc_op
);
1031 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
1032 .mask
= -1, .no_setcond
= true };
1036 /* compute eflags.P to reg */
1037 static CCPrepare
gen_prepare_eflags_p(DisasContext
*s
, TCGv reg
)
1039 gen_compute_eflags(s
);
1040 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1044 /* compute eflags.S to reg */
1045 static CCPrepare
gen_prepare_eflags_s(DisasContext
*s
, TCGv reg
)
1049 gen_compute_eflags(s
);
1055 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1058 return (CCPrepare
) { .cond
= TCG_COND_NEVER
, .mask
= -1 };
1061 int size
= (s
->cc_op
- CC_OP_ADDB
) & 3;
1062 TCGv t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, true);
1063 return (CCPrepare
) { .cond
= TCG_COND_LT
, .reg
= t0
, .mask
= -1 };
1068 /* compute eflags.O to reg */
1069 static CCPrepare
gen_prepare_eflags_o(DisasContext
*s
, TCGv reg
)
1074 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src2
,
1075 .mask
= -1, .no_setcond
= true };
1077 return (CCPrepare
) { .cond
= TCG_COND_NEVER
, .mask
= -1 };
1079 gen_compute_eflags(s
);
1080 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1085 /* compute eflags.Z to reg */
1086 static CCPrepare
gen_prepare_eflags_z(DisasContext
*s
, TCGv reg
)
1090 gen_compute_eflags(s
);
1096 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1099 return (CCPrepare
) { .cond
= TCG_COND_ALWAYS
, .mask
= -1 };
1102 int size
= (s
->cc_op
- CC_OP_ADDB
) & 3;
1103 TCGv t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, false);
1104 return (CCPrepare
) { .cond
= TCG_COND_EQ
, .reg
= t0
, .mask
= -1 };
1109 /* perform a conditional store into register 'reg' according to jump opcode
1110 value 'b'. In the fast case, T0 is guaranted not to be used. */
1111 static CCPrepare
gen_prepare_cc(DisasContext
*s
, int b
, TCGv reg
)
1113 int inv
, jcc_op
, size
, cond
;
1118 jcc_op
= (b
>> 1) & 7;
1121 case CC_OP_SUBB
... CC_OP_SUBQ
:
1122 /* We optimize relational operators for the cmp/jcc case. */
1123 size
= s
->cc_op
- CC_OP_SUBB
;
1126 tcg_gen_mov_tl(cpu_tmp4
, cpu_cc_srcT
);
1127 gen_extu(size
, cpu_tmp4
);
1128 t0
= gen_ext_tl(cpu_tmp0
, cpu_cc_src
, size
, false);
1129 cc
= (CCPrepare
) { .cond
= TCG_COND_LEU
, .reg
= cpu_tmp4
,
1130 .reg2
= t0
, .mask
= -1, .use_reg2
= true };
1139 tcg_gen_mov_tl(cpu_tmp4
, cpu_cc_srcT
);
1140 gen_exts(size
, cpu_tmp4
);
1141 t0
= gen_ext_tl(cpu_tmp0
, cpu_cc_src
, size
, true);
1142 cc
= (CCPrepare
) { .cond
= cond
, .reg
= cpu_tmp4
,
1143 .reg2
= t0
, .mask
= -1, .use_reg2
= true };
1153 /* This actually generates good code for JC, JZ and JS. */
1156 cc
= gen_prepare_eflags_o(s
, reg
);
1159 cc
= gen_prepare_eflags_c(s
, reg
);
1162 cc
= gen_prepare_eflags_z(s
, reg
);
1165 gen_compute_eflags(s
);
1166 cc
= (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1167 .mask
= CC_Z
| CC_C
};
1170 cc
= gen_prepare_eflags_s(s
, reg
);
1173 cc
= gen_prepare_eflags_p(s
, reg
);
1176 gen_compute_eflags(s
);
1177 if (TCGV_EQUAL(reg
, cpu_cc_src
)) {
1180 tcg_gen_shri_tl(reg
, cpu_cc_src
, 4); /* CC_O -> CC_S */
1181 tcg_gen_xor_tl(reg
, reg
, cpu_cc_src
);
1182 cc
= (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
1187 gen_compute_eflags(s
);
1188 if (TCGV_EQUAL(reg
, cpu_cc_src
)) {
1191 tcg_gen_shri_tl(reg
, cpu_cc_src
, 4); /* CC_O -> CC_S */
1192 tcg_gen_xor_tl(reg
, reg
, cpu_cc_src
);
1193 cc
= (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
1194 .mask
= CC_S
| CC_Z
};
1201 cc
.cond
= tcg_invert_cond(cc
.cond
);
1206 static void gen_setcc1(DisasContext
*s
, int b
, TCGv reg
)
1208 CCPrepare cc
= gen_prepare_cc(s
, b
, reg
);
1210 if (cc
.no_setcond
) {
1211 if (cc
.cond
== TCG_COND_EQ
) {
1212 tcg_gen_xori_tl(reg
, cc
.reg
, 1);
1214 tcg_gen_mov_tl(reg
, cc
.reg
);
1219 if (cc
.cond
== TCG_COND_NE
&& !cc
.use_reg2
&& cc
.imm
== 0 &&
1220 cc
.mask
!= 0 && (cc
.mask
& (cc
.mask
- 1)) == 0) {
1221 tcg_gen_shri_tl(reg
, cc
.reg
, ctztl(cc
.mask
));
1222 tcg_gen_andi_tl(reg
, reg
, 1);
1225 if (cc
.mask
!= -1) {
1226 tcg_gen_andi_tl(reg
, cc
.reg
, cc
.mask
);
1230 tcg_gen_setcond_tl(cc
.cond
, reg
, cc
.reg
, cc
.reg2
);
1232 tcg_gen_setcondi_tl(cc
.cond
, reg
, cc
.reg
, cc
.imm
);
1236 static inline void gen_compute_eflags_c(DisasContext
*s
, TCGv reg
)
1238 gen_setcc1(s
, JCC_B
<< 1, reg
);
1241 /* generate a conditional jump to label 'l1' according to jump opcode
1242 value 'b'. In the fast case, T0 is guaranted not to be used. */
1243 static inline void gen_jcc1_noeob(DisasContext
*s
, int b
, int l1
)
1245 CCPrepare cc
= gen_prepare_cc(s
, b
, cpu_T
[0]);
1247 if (cc
.mask
!= -1) {
1248 tcg_gen_andi_tl(cpu_T
[0], cc
.reg
, cc
.mask
);
1252 tcg_gen_brcond_tl(cc
.cond
, cc
.reg
, cc
.reg2
, l1
);
1254 tcg_gen_brcondi_tl(cc
.cond
, cc
.reg
, cc
.imm
, l1
);
1258 /* Generate a conditional jump to label 'l1' according to jump opcode
1259 value 'b'. In the fast case, T0 is guaranted not to be used.
1260 A translation block must end soon. */
1261 static inline void gen_jcc1(DisasContext
*s
, int b
, int l1
)
1263 CCPrepare cc
= gen_prepare_cc(s
, b
, cpu_T
[0]);
1265 gen_update_cc_op(s
);
1266 if (cc
.mask
!= -1) {
1267 tcg_gen_andi_tl(cpu_T
[0], cc
.reg
, cc
.mask
);
1270 set_cc_op(s
, CC_OP_DYNAMIC
);
1272 tcg_gen_brcond_tl(cc
.cond
, cc
.reg
, cc
.reg2
, l1
);
1274 tcg_gen_brcondi_tl(cc
.cond
, cc
.reg
, cc
.imm
, l1
);
1278 /* XXX: does not work with gdbstub "ice" single step - not a
1280 static int gen_jz_ecx_string(DisasContext
*s
, target_ulong next_eip
)
1284 l1
= gen_new_label();
1285 l2
= gen_new_label();
1286 gen_op_jnz_ecx(s
->aflag
, l1
);
1288 gen_jmp_tb(s
, next_eip
, 1);
1293 static inline void gen_stos(DisasContext
*s
, int ot
)
1295 gen_op_mov_TN_reg(OT_LONG
, 0, R_EAX
);
1296 gen_string_movl_A0_EDI(s
);
1297 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1298 gen_op_movl_T0_Dshift(ot
);
1299 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1302 static inline void gen_lods(DisasContext
*s
, int ot
)
1304 gen_string_movl_A0_ESI(s
);
1305 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1306 gen_op_mov_reg_T0(ot
, R_EAX
);
1307 gen_op_movl_T0_Dshift(ot
);
1308 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
1311 static inline void gen_scas(DisasContext
*s
, int ot
)
1313 gen_string_movl_A0_EDI(s
);
1314 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
1315 gen_op(s
, OP_CMPL
, ot
, R_EAX
);
1316 gen_op_movl_T0_Dshift(ot
);
1317 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1320 static inline void gen_cmps(DisasContext
*s
, int ot
)
1322 gen_string_movl_A0_EDI(s
);
1323 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
1324 gen_string_movl_A0_ESI(s
);
1325 gen_op(s
, OP_CMPL
, ot
, OR_TMP0
);
1326 gen_op_movl_T0_Dshift(ot
);
1327 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
1328 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1331 static inline void gen_ins(DisasContext
*s
, int ot
)
1335 gen_string_movl_A0_EDI(s
);
1336 /* Note: we must do this dummy write first to be restartable in
1337 case of page fault. */
1339 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1340 gen_op_mov_TN_reg(OT_WORD
, 1, R_EDX
);
1341 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[1]);
1342 tcg_gen_andi_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 0xffff);
1343 gen_helper_in_func(ot
, cpu_T
[0], cpu_tmp2_i32
);
1344 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1345 gen_op_movl_T0_Dshift(ot
);
1346 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1351 static inline void gen_outs(DisasContext
*s
, int ot
)
1355 gen_string_movl_A0_ESI(s
);
1356 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1358 gen_op_mov_TN_reg(OT_WORD
, 1, R_EDX
);
1359 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[1]);
1360 tcg_gen_andi_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 0xffff);
1361 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T
[0]);
1362 gen_helper_out_func(ot
, cpu_tmp2_i32
, cpu_tmp3_i32
);
1364 gen_op_movl_T0_Dshift(ot
);
1365 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
1370 /* same method as Valgrind : we generate jumps to current or next
1372 #define GEN_REPZ(op) \
1373 static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1374 target_ulong cur_eip, target_ulong next_eip) \
1377 gen_update_cc_op(s); \
1378 l2 = gen_jz_ecx_string(s, next_eip); \
1379 gen_ ## op(s, ot); \
1380 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1381 /* a loop would cause two single step exceptions if ECX = 1 \
1382 before rep string_insn */ \
1384 gen_op_jz_ecx(s->aflag, l2); \
1385 gen_jmp(s, cur_eip); \
1388 #define GEN_REPZ2(op) \
1389 static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1390 target_ulong cur_eip, \
1391 target_ulong next_eip, \
1395 gen_update_cc_op(s); \
1396 l2 = gen_jz_ecx_string(s, next_eip); \
1397 gen_ ## op(s, ot); \
1398 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1399 gen_update_cc_op(s); \
1400 gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2); \
1402 gen_op_jz_ecx(s->aflag, l2); \
1403 gen_jmp(s, cur_eip); \
1414 static void gen_helper_fp_arith_ST0_FT0(int op
)
1418 gen_helper_fadd_ST0_FT0(cpu_env
);
1421 gen_helper_fmul_ST0_FT0(cpu_env
);
1424 gen_helper_fcom_ST0_FT0(cpu_env
);
1427 gen_helper_fcom_ST0_FT0(cpu_env
);
1430 gen_helper_fsub_ST0_FT0(cpu_env
);
1433 gen_helper_fsubr_ST0_FT0(cpu_env
);
1436 gen_helper_fdiv_ST0_FT0(cpu_env
);
1439 gen_helper_fdivr_ST0_FT0(cpu_env
);
1444 /* NOTE the exception in "r" op ordering */
1445 static void gen_helper_fp_arith_STN_ST0(int op
, int opreg
)
1447 TCGv_i32 tmp
= tcg_const_i32(opreg
);
1450 gen_helper_fadd_STN_ST0(cpu_env
, tmp
);
1453 gen_helper_fmul_STN_ST0(cpu_env
, tmp
);
1456 gen_helper_fsubr_STN_ST0(cpu_env
, tmp
);
1459 gen_helper_fsub_STN_ST0(cpu_env
, tmp
);
1462 gen_helper_fdivr_STN_ST0(cpu_env
, tmp
);
1465 gen_helper_fdiv_STN_ST0(cpu_env
, tmp
);
1470 /* if d == OR_TMP0, it means memory operand (address in A0) */
1471 static void gen_op(DisasContext
*s1
, int op
, int ot
, int d
)
1474 gen_op_mov_TN_reg(ot
, 0, d
);
1476 gen_op_ld_T0_A0(ot
+ s1
->mem_index
);
1480 gen_compute_eflags_c(s1
, cpu_tmp4
);
1481 tcg_gen_add_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1482 tcg_gen_add_tl(cpu_T
[0], cpu_T
[0], cpu_tmp4
);
1484 gen_op_mov_reg_T0(ot
, d
);
1486 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1487 gen_op_update3_cc(cpu_tmp4
);
1488 set_cc_op(s1
, CC_OP_ADCB
+ ot
);
1491 gen_compute_eflags_c(s1
, cpu_tmp4
);
1492 tcg_gen_sub_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1493 tcg_gen_sub_tl(cpu_T
[0], cpu_T
[0], cpu_tmp4
);
1495 gen_op_mov_reg_T0(ot
, d
);
1497 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1498 gen_op_update3_cc(cpu_tmp4
);
1499 set_cc_op(s1
, CC_OP_SBBB
+ ot
);
1502 gen_op_addl_T0_T1();
1504 gen_op_mov_reg_T0(ot
, d
);
1506 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1507 gen_op_update2_cc();
1508 set_cc_op(s1
, CC_OP_ADDB
+ ot
);
1511 tcg_gen_mov_tl(cpu_cc_srcT
, cpu_T
[0]);
1512 tcg_gen_sub_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1514 gen_op_mov_reg_T0(ot
, d
);
1516 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1517 gen_op_update2_cc();
1518 set_cc_op(s1
, CC_OP_SUBB
+ ot
);
1522 tcg_gen_and_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1524 gen_op_mov_reg_T0(ot
, d
);
1526 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1527 gen_op_update1_cc();
1528 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1531 tcg_gen_or_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1533 gen_op_mov_reg_T0(ot
, d
);
1535 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1536 gen_op_update1_cc();
1537 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1540 tcg_gen_xor_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1542 gen_op_mov_reg_T0(ot
, d
);
1544 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1545 gen_op_update1_cc();
1546 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1549 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[1]);
1550 tcg_gen_mov_tl(cpu_cc_srcT
, cpu_T
[0]);
1551 tcg_gen_sub_tl(cpu_cc_dst
, cpu_T
[0], cpu_T
[1]);
1552 set_cc_op(s1
, CC_OP_SUBB
+ ot
);
1557 /* if d == OR_TMP0, it means memory operand (address in A0) */
1558 static void gen_inc(DisasContext
*s1
, int ot
, int d
, int c
)
1561 gen_op_mov_TN_reg(ot
, 0, d
);
1563 gen_op_ld_T0_A0(ot
+ s1
->mem_index
);
1564 gen_compute_eflags_c(s1
, cpu_cc_src
);
1566 tcg_gen_addi_tl(cpu_T
[0], cpu_T
[0], 1);
1567 set_cc_op(s1
, CC_OP_INCB
+ ot
);
1569 tcg_gen_addi_tl(cpu_T
[0], cpu_T
[0], -1);
1570 set_cc_op(s1
, CC_OP_DECB
+ ot
);
1573 gen_op_mov_reg_T0(ot
, d
);
1575 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1576 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
1579 static void gen_shift_flags(DisasContext
*s
, int ot
, TCGv result
, TCGv shm1
,
1580 TCGv count
, bool is_right
)
1582 TCGv_i32 z32
, s32
, oldop
;
1585 /* Store the results into the CC variables. If we know that the
1586 variable must be dead, store unconditionally. Otherwise we'll
1587 need to not disrupt the current contents. */
1588 z_tl
= tcg_const_tl(0);
1589 if (cc_op_live
[s
->cc_op
] & USES_CC_DST
) {
1590 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_cc_dst
, count
, z_tl
,
1591 result
, cpu_cc_dst
);
1593 tcg_gen_mov_tl(cpu_cc_dst
, result
);
1595 if (cc_op_live
[s
->cc_op
] & USES_CC_SRC
) {
1596 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_cc_src
, count
, z_tl
,
1599 tcg_gen_mov_tl(cpu_cc_src
, shm1
);
1601 tcg_temp_free(z_tl
);
1603 /* Get the two potential CC_OP values into temporaries. */
1604 tcg_gen_movi_i32(cpu_tmp2_i32
, (is_right
? CC_OP_SARB
: CC_OP_SHLB
) + ot
);
1605 if (s
->cc_op
== CC_OP_DYNAMIC
) {
1608 tcg_gen_movi_i32(cpu_tmp3_i32
, s
->cc_op
);
1609 oldop
= cpu_tmp3_i32
;
1612 /* Conditionally store the CC_OP value. */
1613 z32
= tcg_const_i32(0);
1614 s32
= tcg_temp_new_i32();
1615 tcg_gen_trunc_tl_i32(s32
, count
);
1616 tcg_gen_movcond_i32(TCG_COND_NE
, cpu_cc_op
, s32
, z32
, cpu_tmp2_i32
, oldop
);
1617 tcg_temp_free_i32(z32
);
1618 tcg_temp_free_i32(s32
);
1620 /* The CC_OP value is no longer predictable. */
1621 set_cc_op(s
, CC_OP_DYNAMIC
);
1624 static void gen_shift_rm_T1(DisasContext
*s
, int ot
, int op1
,
1625 int is_right
, int is_arith
)
1627 target_ulong mask
= (ot
== OT_QUAD
? 0x3f : 0x1f);
1630 if (op1
== OR_TMP0
) {
1631 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1633 gen_op_mov_TN_reg(ot
, 0, op1
);
1636 tcg_gen_andi_tl(cpu_T
[1], cpu_T
[1], mask
);
1637 tcg_gen_subi_tl(cpu_tmp0
, cpu_T
[1], 1);
1641 gen_exts(ot
, cpu_T
[0]);
1642 tcg_gen_sar_tl(cpu_tmp0
, cpu_T
[0], cpu_tmp0
);
1643 tcg_gen_sar_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1645 gen_extu(ot
, cpu_T
[0]);
1646 tcg_gen_shr_tl(cpu_tmp0
, cpu_T
[0], cpu_tmp0
);
1647 tcg_gen_shr_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1650 tcg_gen_shl_tl(cpu_tmp0
, cpu_T
[0], cpu_tmp0
);
1651 tcg_gen_shl_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1655 if (op1
== OR_TMP0
) {
1656 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1658 gen_op_mov_reg_T0(ot
, op1
);
1661 gen_shift_flags(s
, ot
, cpu_T
[0], cpu_tmp0
, cpu_T
[1], is_right
);
1664 static void gen_shift_rm_im(DisasContext
*s
, int ot
, int op1
, int op2
,
1665 int is_right
, int is_arith
)
1667 int mask
= (ot
== OT_QUAD
? 0x3f : 0x1f);
1671 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1673 gen_op_mov_TN_reg(ot
, 0, op1
);
1679 gen_exts(ot
, cpu_T
[0]);
1680 tcg_gen_sari_tl(cpu_tmp4
, cpu_T
[0], op2
- 1);
1681 tcg_gen_sari_tl(cpu_T
[0], cpu_T
[0], op2
);
1683 gen_extu(ot
, cpu_T
[0]);
1684 tcg_gen_shri_tl(cpu_tmp4
, cpu_T
[0], op2
- 1);
1685 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], op2
);
1688 tcg_gen_shli_tl(cpu_tmp4
, cpu_T
[0], op2
- 1);
1689 tcg_gen_shli_tl(cpu_T
[0], cpu_T
[0], op2
);
1695 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1697 gen_op_mov_reg_T0(ot
, op1
);
1699 /* update eflags if non zero shift */
1701 tcg_gen_mov_tl(cpu_cc_src
, cpu_tmp4
);
1702 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
1703 set_cc_op(s
, (is_right
? CC_OP_SARB
: CC_OP_SHLB
) + ot
);
1707 static inline void tcg_gen_lshift(TCGv ret
, TCGv arg1
, target_long arg2
)
1710 tcg_gen_shli_tl(ret
, arg1
, arg2
);
1712 tcg_gen_shri_tl(ret
, arg1
, -arg2
);
1715 static void gen_rot_rm_T1(DisasContext
*s
, int ot
, int op1
, int is_right
)
1717 target_ulong mask
= (ot
== OT_QUAD
? 0x3f : 0x1f);
1721 if (op1
== OR_TMP0
) {
1722 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1724 gen_op_mov_TN_reg(ot
, 0, op1
);
1727 tcg_gen_andi_tl(cpu_T
[1], cpu_T
[1], mask
);
1731 /* Replicate the 8-bit input so that a 32-bit rotate works. */
1732 tcg_gen_ext8u_tl(cpu_T
[0], cpu_T
[0]);
1733 tcg_gen_muli_tl(cpu_T
[0], cpu_T
[0], 0x01010101);
1736 /* Replicate the 16-bit input so that a 32-bit rotate works. */
1737 tcg_gen_deposit_tl(cpu_T
[0], cpu_T
[0], cpu_T
[0], 16, 16);
1740 #ifdef TARGET_X86_64
1742 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
1743 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T
[1]);
1745 tcg_gen_rotr_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, cpu_tmp3_i32
);
1747 tcg_gen_rotl_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, cpu_tmp3_i32
);
1749 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
1754 tcg_gen_rotr_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1756 tcg_gen_rotl_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1762 if (op1
== OR_TMP0
) {
1763 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1765 gen_op_mov_reg_T0(ot
, op1
);
1768 /* We'll need the flags computed into CC_SRC. */
1769 gen_compute_eflags(s
);
1771 /* The value that was "rotated out" is now present at the other end
1772 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1773 since we've computed the flags into CC_SRC, these variables are
1776 tcg_gen_shri_tl(cpu_cc_src2
, cpu_T
[0], mask
- 1);
1777 tcg_gen_shri_tl(cpu_cc_dst
, cpu_T
[0], mask
);
1779 tcg_gen_shri_tl(cpu_cc_src2
, cpu_T
[0], mask
);
1780 tcg_gen_andi_tl(cpu_cc_dst
, cpu_T
[0], 1);
1782 tcg_gen_andi_tl(cpu_cc_src2
, cpu_cc_src2
, 1);
1783 tcg_gen_xor_tl(cpu_cc_src2
, cpu_cc_src2
, cpu_cc_dst
);
1785 /* Now conditionally store the new CC_OP value. If the shift count
1786 is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live.
1787 Otherwise reuse CC_OP_ADCOX which have the C and O flags split out
1788 exactly as we computed above. */
1789 t0
= tcg_const_i32(0);
1790 t1
= tcg_temp_new_i32();
1791 tcg_gen_trunc_tl_i32(t1
, cpu_T
[1]);
1792 tcg_gen_movi_i32(cpu_tmp2_i32
, CC_OP_ADCOX
);
1793 tcg_gen_movi_i32(cpu_tmp3_i32
, CC_OP_EFLAGS
);
1794 tcg_gen_movcond_i32(TCG_COND_NE
, cpu_cc_op
, t1
, t0
,
1795 cpu_tmp2_i32
, cpu_tmp3_i32
);
1796 tcg_temp_free_i32(t0
);
1797 tcg_temp_free_i32(t1
);
1799 /* The CC_OP value is no longer predictable. */
1800 set_cc_op(s
, CC_OP_DYNAMIC
);
1803 static void gen_rot_rm_im(DisasContext
*s
, int ot
, int op1
, int op2
,
1806 int mask
= (ot
== OT_QUAD
? 0x3f : 0x1f);
1810 if (op1
== OR_TMP0
) {
1811 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1813 gen_op_mov_TN_reg(ot
, 0, op1
);
1819 #ifdef TARGET_X86_64
1821 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
1823 tcg_gen_rotri_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, op2
);
1825 tcg_gen_rotli_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, op2
);
1827 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
1832 tcg_gen_rotri_tl(cpu_T
[0], cpu_T
[0], op2
);
1834 tcg_gen_rotli_tl(cpu_T
[0], cpu_T
[0], op2
);
1845 shift
= mask
+ 1 - shift
;
1847 gen_extu(ot
, cpu_T
[0]);
1848 tcg_gen_shli_tl(cpu_tmp0
, cpu_T
[0], shift
);
1849 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], mask
+ 1 - shift
);
1850 tcg_gen_or_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
1856 if (op1
== OR_TMP0
) {
1857 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1859 gen_op_mov_reg_T0(ot
, op1
);
1863 /* Compute the flags into CC_SRC. */
1864 gen_compute_eflags(s
);
1866 /* The value that was "rotated out" is now present at the other end
1867 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1868 since we've computed the flags into CC_SRC, these variables are
1871 tcg_gen_shri_tl(cpu_cc_src2
, cpu_T
[0], mask
- 1);
1872 tcg_gen_shri_tl(cpu_cc_dst
, cpu_T
[0], mask
);
1874 tcg_gen_shri_tl(cpu_cc_src2
, cpu_T
[0], mask
);
1875 tcg_gen_andi_tl(cpu_cc_dst
, cpu_T
[0], 1);
1877 tcg_gen_andi_tl(cpu_cc_src2
, cpu_cc_src2
, 1);
1878 tcg_gen_xor_tl(cpu_cc_src2
, cpu_cc_src2
, cpu_cc_dst
);
1879 set_cc_op(s
, CC_OP_ADCOX
);
1883 /* XXX: add faster immediate = 1 case */
1884 static void gen_rotc_rm_T1(DisasContext
*s
, int ot
, int op1
,
1887 gen_compute_eflags(s
);
1888 assert(s
->cc_op
== CC_OP_EFLAGS
);
1892 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1894 gen_op_mov_TN_reg(ot
, 0, op1
);
1899 gen_helper_rcrb(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1902 gen_helper_rcrw(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1905 gen_helper_rcrl(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1907 #ifdef TARGET_X86_64
1909 gen_helper_rcrq(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1916 gen_helper_rclb(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1919 gen_helper_rclw(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1922 gen_helper_rcll(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1924 #ifdef TARGET_X86_64
1926 gen_helper_rclq(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1933 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1935 gen_op_mov_reg_T0(ot
, op1
);
1938 /* XXX: add faster immediate case */
1939 static void gen_shiftd_rm_T1(DisasContext
*s
, int ot
, int op1
,
1940 bool is_right
, TCGv count_in
)
1942 target_ulong mask
= (ot
== OT_QUAD
? 63 : 31);
1946 if (op1
== OR_TMP0
) {
1947 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1949 gen_op_mov_TN_reg(ot
, 0, op1
);
1952 count
= tcg_temp_new();
1953 tcg_gen_andi_tl(count
, count_in
, mask
);
1957 /* Note: we implement the Intel behaviour for shift count > 16.
1958 This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A
1959 portion by constructing it as a 32-bit value. */
1961 tcg_gen_deposit_tl(cpu_tmp0
, cpu_T
[0], cpu_T
[1], 16, 16);
1962 tcg_gen_mov_tl(cpu_T
[1], cpu_T
[0]);
1963 tcg_gen_mov_tl(cpu_T
[0], cpu_tmp0
);
1965 tcg_gen_deposit_tl(cpu_T
[1], cpu_T
[0], cpu_T
[1], 16, 16);
1968 #ifdef TARGET_X86_64
1970 /* Concatenate the two 32-bit values and use a 64-bit shift. */
1971 tcg_gen_subi_tl(cpu_tmp0
, count
, 1);
1973 tcg_gen_concat_tl_i64(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1974 tcg_gen_shr_i64(cpu_tmp0
, cpu_T
[0], cpu_tmp0
);
1975 tcg_gen_shr_i64(cpu_T
[0], cpu_T
[0], count
);
1977 tcg_gen_concat_tl_i64(cpu_T
[0], cpu_T
[1], cpu_T
[0]);
1978 tcg_gen_shl_i64(cpu_tmp0
, cpu_T
[0], cpu_tmp0
);
1979 tcg_gen_shl_i64(cpu_T
[0], cpu_T
[0], count
);
1980 tcg_gen_shri_i64(cpu_tmp0
, cpu_tmp0
, 32);
1981 tcg_gen_shri_i64(cpu_T
[0], cpu_T
[0], 32);
1986 tcg_gen_subi_tl(cpu_tmp0
, count
, 1);
1988 tcg_gen_shr_tl(cpu_tmp0
, cpu_T
[0], cpu_tmp0
);
1990 tcg_gen_subfi_tl(cpu_tmp4
, mask
+ 1, count
);
1991 tcg_gen_shr_tl(cpu_T
[0], cpu_T
[0], count
);
1992 tcg_gen_shl_tl(cpu_T
[1], cpu_T
[1], cpu_tmp4
);
1994 tcg_gen_shl_tl(cpu_tmp0
, cpu_T
[0], cpu_tmp0
);
1995 if (ot
== OT_WORD
) {
1996 /* Only needed if count > 16, for Intel behaviour. */
1997 tcg_gen_subfi_tl(cpu_tmp4
, 33, count
);
1998 tcg_gen_shr_tl(cpu_tmp4
, cpu_T
[1], cpu_tmp4
);
1999 tcg_gen_or_tl(cpu_tmp0
, cpu_tmp0
, cpu_tmp4
);
2002 tcg_gen_subfi_tl(cpu_tmp4
, mask
+ 1, count
);
2003 tcg_gen_shl_tl(cpu_T
[0], cpu_T
[0], count
);
2004 tcg_gen_shr_tl(cpu_T
[1], cpu_T
[1], cpu_tmp4
);
2006 tcg_gen_movi_tl(cpu_tmp4
, 0);
2007 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_T
[1], count
, cpu_tmp4
,
2008 cpu_tmp4
, cpu_T
[1]);
2009 tcg_gen_or_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
2014 if (op1
== OR_TMP0
) {
2015 gen_op_st_T0_A0(ot
+ s
->mem_index
);
2017 gen_op_mov_reg_T0(ot
, op1
);
2020 gen_shift_flags(s
, ot
, cpu_T
[0], cpu_tmp0
, count
, is_right
);
2021 tcg_temp_free(count
);
2024 static void gen_shift(DisasContext
*s1
, int op
, int ot
, int d
, int s
)
2027 gen_op_mov_TN_reg(ot
, 1, s
);
2030 gen_rot_rm_T1(s1
, ot
, d
, 0);
2033 gen_rot_rm_T1(s1
, ot
, d
, 1);
2037 gen_shift_rm_T1(s1
, ot
, d
, 0, 0);
2040 gen_shift_rm_T1(s1
, ot
, d
, 1, 0);
2043 gen_shift_rm_T1(s1
, ot
, d
, 1, 1);
2046 gen_rotc_rm_T1(s1
, ot
, d
, 0);
2049 gen_rotc_rm_T1(s1
, ot
, d
, 1);
2054 static void gen_shifti(DisasContext
*s1
, int op
, int ot
, int d
, int c
)
2058 gen_rot_rm_im(s1
, ot
, d
, c
, 0);
2061 gen_rot_rm_im(s1
, ot
, d
, c
, 1);
2065 gen_shift_rm_im(s1
, ot
, d
, c
, 0, 0);
2068 gen_shift_rm_im(s1
, ot
, d
, c
, 1, 0);
2071 gen_shift_rm_im(s1
, ot
, d
, c
, 1, 1);
2074 /* currently not optimized */
2075 gen_op_movl_T1_im(c
);
2076 gen_shift(s1
, op
, ot
, d
, OR_TMP1
);
2081 static void gen_lea_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
,
2082 int *reg_ptr
, int *offset_ptr
)
2090 int mod
, rm
, code
, override
, must_add_seg
;
2092 override
= s
->override
;
2093 must_add_seg
= s
->addseg
;
2096 mod
= (modrm
>> 6) & 3;
2108 code
= cpu_ldub_code(env
, s
->pc
++);
2109 scale
= (code
>> 6) & 3;
2110 index
= ((code
>> 3) & 7) | REX_X(s
);
2117 if ((base
& 7) == 5) {
2119 disp
= (int32_t)cpu_ldl_code(env
, s
->pc
);
2121 if (CODE64(s
) && !havesib
) {
2122 disp
+= s
->pc
+ s
->rip_offset
;
2129 disp
= (int8_t)cpu_ldub_code(env
, s
->pc
++);
2133 disp
= (int32_t)cpu_ldl_code(env
, s
->pc
);
2139 /* for correct popl handling with esp */
2140 if (base
== 4 && s
->popl_esp_hack
)
2141 disp
+= s
->popl_esp_hack
;
2142 #ifdef TARGET_X86_64
2143 if (s
->aflag
== 2) {
2144 gen_op_movq_A0_reg(base
);
2146 gen_op_addq_A0_im(disp
);
2151 gen_op_movl_A0_reg(base
);
2153 gen_op_addl_A0_im(disp
);
2156 #ifdef TARGET_X86_64
2157 if (s
->aflag
== 2) {
2158 gen_op_movq_A0_im(disp
);
2162 gen_op_movl_A0_im(disp
);
2165 /* index == 4 means no index */
2166 if (havesib
&& (index
!= 4)) {
2167 #ifdef TARGET_X86_64
2168 if (s
->aflag
== 2) {
2169 gen_op_addq_A0_reg_sN(scale
, index
);
2173 gen_op_addl_A0_reg_sN(scale
, index
);
2178 if (base
== R_EBP
|| base
== R_ESP
)
2183 #ifdef TARGET_X86_64
2184 if (s
->aflag
== 2) {
2185 gen_op_addq_A0_seg(override
);
2189 gen_op_addl_A0_seg(s
, override
);
2196 disp
= cpu_lduw_code(env
, s
->pc
);
2198 gen_op_movl_A0_im(disp
);
2199 rm
= 0; /* avoid SS override */
2206 disp
= (int8_t)cpu_ldub_code(env
, s
->pc
++);
2210 disp
= cpu_lduw_code(env
, s
->pc
);
2216 gen_op_movl_A0_reg(R_EBX
);
2217 gen_op_addl_A0_reg_sN(0, R_ESI
);
2220 gen_op_movl_A0_reg(R_EBX
);
2221 gen_op_addl_A0_reg_sN(0, R_EDI
);
2224 gen_op_movl_A0_reg(R_EBP
);
2225 gen_op_addl_A0_reg_sN(0, R_ESI
);
2228 gen_op_movl_A0_reg(R_EBP
);
2229 gen_op_addl_A0_reg_sN(0, R_EDI
);
2232 gen_op_movl_A0_reg(R_ESI
);
2235 gen_op_movl_A0_reg(R_EDI
);
2238 gen_op_movl_A0_reg(R_EBP
);
2242 gen_op_movl_A0_reg(R_EBX
);
2246 gen_op_addl_A0_im(disp
);
2247 gen_op_andl_A0_ffff();
2251 if (rm
== 2 || rm
== 3 || rm
== 6)
2256 gen_op_addl_A0_seg(s
, override
);
2266 static void gen_nop_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
)
2268 int mod
, rm
, base
, code
;
2270 mod
= (modrm
>> 6) & 3;
2280 code
= cpu_ldub_code(env
, s
->pc
++);
2316 /* used for LEA and MOV AX, mem */
2317 static void gen_add_A0_ds_seg(DisasContext
*s
)
2319 int override
, must_add_seg
;
2320 must_add_seg
= s
->addseg
;
2322 if (s
->override
>= 0) {
2323 override
= s
->override
;
2327 #ifdef TARGET_X86_64
2329 gen_op_addq_A0_seg(override
);
2333 gen_op_addl_A0_seg(s
, override
);
2338 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2340 static void gen_ldst_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
,
2341 int ot
, int reg
, int is_store
)
2343 int mod
, rm
, opreg
, disp
;
2345 mod
= (modrm
>> 6) & 3;
2346 rm
= (modrm
& 7) | REX_B(s
);
2350 gen_op_mov_TN_reg(ot
, 0, reg
);
2351 gen_op_mov_reg_T0(ot
, rm
);
2353 gen_op_mov_TN_reg(ot
, 0, rm
);
2355 gen_op_mov_reg_T0(ot
, reg
);
2358 gen_lea_modrm(env
, s
, modrm
, &opreg
, &disp
);
2361 gen_op_mov_TN_reg(ot
, 0, reg
);
2362 gen_op_st_T0_A0(ot
+ s
->mem_index
);
2364 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
2366 gen_op_mov_reg_T0(ot
, reg
);
2371 static inline uint32_t insn_get(CPUX86State
*env
, DisasContext
*s
, int ot
)
2377 ret
= cpu_ldub_code(env
, s
->pc
);
2381 ret
= cpu_lduw_code(env
, s
->pc
);
2386 ret
= cpu_ldl_code(env
, s
->pc
);
2393 static inline int insn_const_size(unsigned int ot
)
2401 static inline void gen_goto_tb(DisasContext
*s
, int tb_num
, target_ulong eip
)
2403 TranslationBlock
*tb
;
2406 pc
= s
->cs_base
+ eip
;
2408 /* NOTE: we handle the case where the TB spans two pages here */
2409 if ((pc
& TARGET_PAGE_MASK
) == (tb
->pc
& TARGET_PAGE_MASK
) ||
2410 (pc
& TARGET_PAGE_MASK
) == ((s
->pc
- 1) & TARGET_PAGE_MASK
)) {
2411 /* jump to same page: we can use a direct jump */
2412 tcg_gen_goto_tb(tb_num
);
2414 tcg_gen_exit_tb((tcg_target_long
)tb
+ tb_num
);
2416 /* jump to another page: currently not optimized */
2422 static inline void gen_jcc(DisasContext
*s
, int b
,
2423 target_ulong val
, target_ulong next_eip
)
2428 l1
= gen_new_label();
2431 gen_goto_tb(s
, 0, next_eip
);
2434 gen_goto_tb(s
, 1, val
);
2435 s
->is_jmp
= DISAS_TB_JUMP
;
2437 l1
= gen_new_label();
2438 l2
= gen_new_label();
2441 gen_jmp_im(next_eip
);
2451 static void gen_cmovcc1(CPUX86State
*env
, DisasContext
*s
, int ot
, int b
,
2456 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
2458 cc
= gen_prepare_cc(s
, b
, cpu_T
[1]);
2459 if (cc
.mask
!= -1) {
2460 TCGv t0
= tcg_temp_new();
2461 tcg_gen_andi_tl(t0
, cc
.reg
, cc
.mask
);
2465 cc
.reg2
= tcg_const_tl(cc
.imm
);
2468 tcg_gen_movcond_tl(cc
.cond
, cpu_T
[0], cc
.reg
, cc
.reg2
,
2469 cpu_T
[0], cpu_regs
[reg
]);
2470 gen_op_mov_reg_T0(ot
, reg
);
2472 if (cc
.mask
!= -1) {
2473 tcg_temp_free(cc
.reg
);
2476 tcg_temp_free(cc
.reg2
);
2480 static inline void gen_op_movl_T0_seg(int seg_reg
)
2482 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
,
2483 offsetof(CPUX86State
,segs
[seg_reg
].selector
));
2486 static inline void gen_op_movl_seg_T0_vm(int seg_reg
)
2488 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 0xffff);
2489 tcg_gen_st32_tl(cpu_T
[0], cpu_env
,
2490 offsetof(CPUX86State
,segs
[seg_reg
].selector
));
2491 tcg_gen_shli_tl(cpu_T
[0], cpu_T
[0], 4);
2492 tcg_gen_st_tl(cpu_T
[0], cpu_env
,
2493 offsetof(CPUX86State
,segs
[seg_reg
].base
));
2496 /* move T0 to seg_reg and compute if the CPU state may change. Never
2497 call this function with seg_reg == R_CS */
2498 static void gen_movl_seg_T0(DisasContext
*s
, int seg_reg
, target_ulong cur_eip
)
2500 if (s
->pe
&& !s
->vm86
) {
2501 /* XXX: optimize by finding processor state dynamically */
2502 gen_update_cc_op(s
);
2503 gen_jmp_im(cur_eip
);
2504 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
2505 gen_helper_load_seg(cpu_env
, tcg_const_i32(seg_reg
), cpu_tmp2_i32
);
2506 /* abort translation because the addseg value may change or
2507 because ss32 may change. For R_SS, translation must always
2508 stop as a special handling must be done to disable hardware
2509 interrupts for the next instruction */
2510 if (seg_reg
== R_SS
|| (s
->code32
&& seg_reg
< R_FS
))
2511 s
->is_jmp
= DISAS_TB_JUMP
;
2513 gen_op_movl_seg_T0_vm(seg_reg
);
2514 if (seg_reg
== R_SS
)
2515 s
->is_jmp
= DISAS_TB_JUMP
;
2519 static inline int svm_is_rep(int prefixes
)
2521 return ((prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) ? 8 : 0);
2525 gen_svm_check_intercept_param(DisasContext
*s
, target_ulong pc_start
,
2526 uint32_t type
, uint64_t param
)
2528 /* no SVM activated; fast case */
2529 if (likely(!(s
->flags
& HF_SVMI_MASK
)))
2531 gen_update_cc_op(s
);
2532 gen_jmp_im(pc_start
- s
->cs_base
);
2533 gen_helper_svm_check_intercept_param(cpu_env
, tcg_const_i32(type
),
2534 tcg_const_i64(param
));
2538 gen_svm_check_intercept(DisasContext
*s
, target_ulong pc_start
, uint64_t type
)
2540 gen_svm_check_intercept_param(s
, pc_start
, type
, 0);
2543 static inline void gen_stack_update(DisasContext
*s
, int addend
)
2545 #ifdef TARGET_X86_64
2547 gen_op_add_reg_im(2, R_ESP
, addend
);
2551 gen_op_add_reg_im(1, R_ESP
, addend
);
2553 gen_op_add_reg_im(0, R_ESP
, addend
);
2557 /* generate a push. It depends on ss32, addseg and dflag */
2558 static void gen_push_T0(DisasContext
*s
)
2560 #ifdef TARGET_X86_64
2562 gen_op_movq_A0_reg(R_ESP
);
2564 gen_op_addq_A0_im(-8);
2565 gen_op_st_T0_A0(OT_QUAD
+ s
->mem_index
);
2567 gen_op_addq_A0_im(-2);
2568 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
2570 gen_op_mov_reg_A0(2, R_ESP
);
2574 gen_op_movl_A0_reg(R_ESP
);
2576 gen_op_addl_A0_im(-2);
2578 gen_op_addl_A0_im(-4);
2581 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2582 gen_op_addl_A0_seg(s
, R_SS
);
2585 gen_op_andl_A0_ffff();
2586 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2587 gen_op_addl_A0_seg(s
, R_SS
);
2589 gen_op_st_T0_A0(s
->dflag
+ 1 + s
->mem_index
);
2590 if (s
->ss32
&& !s
->addseg
)
2591 gen_op_mov_reg_A0(1, R_ESP
);
2593 gen_op_mov_reg_T1(s
->ss32
+ 1, R_ESP
);
2597 /* generate a push. It depends on ss32, addseg and dflag */
2598 /* slower version for T1, only used for call Ev */
2599 static void gen_push_T1(DisasContext
*s
)
2601 #ifdef TARGET_X86_64
2603 gen_op_movq_A0_reg(R_ESP
);
2605 gen_op_addq_A0_im(-8);
2606 gen_op_st_T1_A0(OT_QUAD
+ s
->mem_index
);
2608 gen_op_addq_A0_im(-2);
2609 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
2611 gen_op_mov_reg_A0(2, R_ESP
);
2615 gen_op_movl_A0_reg(R_ESP
);
2617 gen_op_addl_A0_im(-2);
2619 gen_op_addl_A0_im(-4);
2622 gen_op_addl_A0_seg(s
, R_SS
);
2625 gen_op_andl_A0_ffff();
2626 gen_op_addl_A0_seg(s
, R_SS
);
2628 gen_op_st_T1_A0(s
->dflag
+ 1 + s
->mem_index
);
2630 if (s
->ss32
&& !s
->addseg
)
2631 gen_op_mov_reg_A0(1, R_ESP
);
2633 gen_stack_update(s
, (-2) << s
->dflag
);
2637 /* two step pop is necessary for precise exceptions */
2638 static void gen_pop_T0(DisasContext
*s
)
2640 #ifdef TARGET_X86_64
2642 gen_op_movq_A0_reg(R_ESP
);
2643 gen_op_ld_T0_A0((s
->dflag
? OT_QUAD
: OT_WORD
) + s
->mem_index
);
2647 gen_op_movl_A0_reg(R_ESP
);
2650 gen_op_addl_A0_seg(s
, R_SS
);
2652 gen_op_andl_A0_ffff();
2653 gen_op_addl_A0_seg(s
, R_SS
);
2655 gen_op_ld_T0_A0(s
->dflag
+ 1 + s
->mem_index
);
2659 static void gen_pop_update(DisasContext
*s
)
2661 #ifdef TARGET_X86_64
2662 if (CODE64(s
) && s
->dflag
) {
2663 gen_stack_update(s
, 8);
2667 gen_stack_update(s
, 2 << s
->dflag
);
2671 static void gen_stack_A0(DisasContext
*s
)
2673 gen_op_movl_A0_reg(R_ESP
);
2675 gen_op_andl_A0_ffff();
2676 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2678 gen_op_addl_A0_seg(s
, R_SS
);
2681 /* NOTE: wrap around in 16 bit not fully handled */
2682 static void gen_pusha(DisasContext
*s
)
2685 gen_op_movl_A0_reg(R_ESP
);
2686 gen_op_addl_A0_im(-16 << s
->dflag
);
2688 gen_op_andl_A0_ffff();
2689 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2691 gen_op_addl_A0_seg(s
, R_SS
);
2692 for(i
= 0;i
< 8; i
++) {
2693 gen_op_mov_TN_reg(OT_LONG
, 0, 7 - i
);
2694 gen_op_st_T0_A0(OT_WORD
+ s
->dflag
+ s
->mem_index
);
2695 gen_op_addl_A0_im(2 << s
->dflag
);
2697 gen_op_mov_reg_T1(OT_WORD
+ s
->ss32
, R_ESP
);
2700 /* NOTE: wrap around in 16 bit not fully handled */
2701 static void gen_popa(DisasContext
*s
)
2704 gen_op_movl_A0_reg(R_ESP
);
2706 gen_op_andl_A0_ffff();
2707 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2708 tcg_gen_addi_tl(cpu_T
[1], cpu_T
[1], 16 << s
->dflag
);
2710 gen_op_addl_A0_seg(s
, R_SS
);
2711 for(i
= 0;i
< 8; i
++) {
2712 /* ESP is not reloaded */
2714 gen_op_ld_T0_A0(OT_WORD
+ s
->dflag
+ s
->mem_index
);
2715 gen_op_mov_reg_T0(OT_WORD
+ s
->dflag
, 7 - i
);
2717 gen_op_addl_A0_im(2 << s
->dflag
);
2719 gen_op_mov_reg_T1(OT_WORD
+ s
->ss32
, R_ESP
);
2722 static void gen_enter(DisasContext
*s
, int esp_addend
, int level
)
2727 #ifdef TARGET_X86_64
2729 ot
= s
->dflag
? OT_QUAD
: OT_WORD
;
2732 gen_op_movl_A0_reg(R_ESP
);
2733 gen_op_addq_A0_im(-opsize
);
2734 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2737 gen_op_mov_TN_reg(OT_LONG
, 0, R_EBP
);
2738 gen_op_st_T0_A0(ot
+ s
->mem_index
);
2740 /* XXX: must save state */
2741 gen_helper_enter64_level(cpu_env
, tcg_const_i32(level
),
2742 tcg_const_i32((ot
== OT_QUAD
)),
2745 gen_op_mov_reg_T1(ot
, R_EBP
);
2746 tcg_gen_addi_tl(cpu_T
[1], cpu_T
[1], -esp_addend
+ (-opsize
* level
));
2747 gen_op_mov_reg_T1(OT_QUAD
, R_ESP
);
2751 ot
= s
->dflag
+ OT_WORD
;
2752 opsize
= 2 << s
->dflag
;
2754 gen_op_movl_A0_reg(R_ESP
);
2755 gen_op_addl_A0_im(-opsize
);
2757 gen_op_andl_A0_ffff();
2758 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2760 gen_op_addl_A0_seg(s
, R_SS
);
2762 gen_op_mov_TN_reg(OT_LONG
, 0, R_EBP
);
2763 gen_op_st_T0_A0(ot
+ s
->mem_index
);
2765 /* XXX: must save state */
2766 gen_helper_enter_level(cpu_env
, tcg_const_i32(level
),
2767 tcg_const_i32(s
->dflag
),
2770 gen_op_mov_reg_T1(ot
, R_EBP
);
2771 tcg_gen_addi_tl(cpu_T
[1], cpu_T
[1], -esp_addend
+ (-opsize
* level
));
2772 gen_op_mov_reg_T1(OT_WORD
+ s
->ss32
, R_ESP
);
2776 static void gen_exception(DisasContext
*s
, int trapno
, target_ulong cur_eip
)
2778 gen_update_cc_op(s
);
2779 gen_jmp_im(cur_eip
);
2780 gen_helper_raise_exception(cpu_env
, tcg_const_i32(trapno
));
2781 s
->is_jmp
= DISAS_TB_JUMP
;
2784 /* an interrupt is different from an exception because of the
2786 static void gen_interrupt(DisasContext
*s
, int intno
,
2787 target_ulong cur_eip
, target_ulong next_eip
)
2789 gen_update_cc_op(s
);
2790 gen_jmp_im(cur_eip
);
2791 gen_helper_raise_interrupt(cpu_env
, tcg_const_i32(intno
),
2792 tcg_const_i32(next_eip
- cur_eip
));
2793 s
->is_jmp
= DISAS_TB_JUMP
;
2796 static void gen_debug(DisasContext
*s
, target_ulong cur_eip
)
2798 gen_update_cc_op(s
);
2799 gen_jmp_im(cur_eip
);
2800 gen_helper_debug(cpu_env
);
2801 s
->is_jmp
= DISAS_TB_JUMP
;
2804 /* generate a generic end of block. Trace exception is also generated
2806 static void gen_eob(DisasContext
*s
)
2808 gen_update_cc_op(s
);
2809 if (s
->tb
->flags
& HF_INHIBIT_IRQ_MASK
) {
2810 gen_helper_reset_inhibit_irq(cpu_env
);
2812 if (s
->tb
->flags
& HF_RF_MASK
) {
2813 gen_helper_reset_rf(cpu_env
);
2815 if (s
->singlestep_enabled
) {
2816 gen_helper_debug(cpu_env
);
2818 gen_helper_single_step(cpu_env
);
2822 s
->is_jmp
= DISAS_TB_JUMP
;
2825 /* generate a jump to eip. No segment change must happen before as a
2826 direct call to the next block may occur */
2827 static void gen_jmp_tb(DisasContext
*s
, target_ulong eip
, int tb_num
)
2829 gen_update_cc_op(s
);
2830 set_cc_op(s
, CC_OP_DYNAMIC
);
2832 gen_goto_tb(s
, tb_num
, eip
);
2833 s
->is_jmp
= DISAS_TB_JUMP
;
2840 static void gen_jmp(DisasContext
*s
, target_ulong eip
)
2842 gen_jmp_tb(s
, eip
, 0);
2845 static inline void gen_ldq_env_A0(int idx
, int offset
)
2847 int mem_index
= (idx
>> 2) - 1;
2848 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
, mem_index
);
2849 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, offset
);
2852 static inline void gen_stq_env_A0(int idx
, int offset
)
2854 int mem_index
= (idx
>> 2) - 1;
2855 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, offset
);
2856 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
, mem_index
);
2859 static inline void gen_ldo_env_A0(int idx
, int offset
)
2861 int mem_index
= (idx
>> 2) - 1;
2862 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
, mem_index
);
2863 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(0)));
2864 tcg_gen_addi_tl(cpu_tmp0
, cpu_A0
, 8);
2865 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_tmp0
, mem_index
);
2866 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(1)));
2869 static inline void gen_sto_env_A0(int idx
, int offset
)
2871 int mem_index
= (idx
>> 2) - 1;
2872 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(0)));
2873 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
, mem_index
);
2874 tcg_gen_addi_tl(cpu_tmp0
, cpu_A0
, 8);
2875 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(1)));
2876 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_tmp0
, mem_index
);
2879 static inline void gen_op_movo(int d_offset
, int s_offset
)
2881 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, s_offset
);
2882 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
);
2883 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, s_offset
+ 8);
2884 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
+ 8);
2887 static inline void gen_op_movq(int d_offset
, int s_offset
)
2889 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, s_offset
);
2890 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
);
2893 static inline void gen_op_movl(int d_offset
, int s_offset
)
2895 tcg_gen_ld_i32(cpu_tmp2_i32
, cpu_env
, s_offset
);
2896 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, d_offset
);
2899 static inline void gen_op_movq_env_0(int d_offset
)
2901 tcg_gen_movi_i64(cpu_tmp1_i64
, 0);
2902 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
);
2905 typedef void (*SSEFunc_i_ep
)(TCGv_i32 val
, TCGv_ptr env
, TCGv_ptr reg
);
2906 typedef void (*SSEFunc_l_ep
)(TCGv_i64 val
, TCGv_ptr env
, TCGv_ptr reg
);
2907 typedef void (*SSEFunc_0_epi
)(TCGv_ptr env
, TCGv_ptr reg
, TCGv_i32 val
);
2908 typedef void (*SSEFunc_0_epl
)(TCGv_ptr env
, TCGv_ptr reg
, TCGv_i64 val
);
2909 typedef void (*SSEFunc_0_epp
)(TCGv_ptr env
, TCGv_ptr reg_a
, TCGv_ptr reg_b
);
2910 typedef void (*SSEFunc_0_eppi
)(TCGv_ptr env
, TCGv_ptr reg_a
, TCGv_ptr reg_b
,
2912 typedef void (*SSEFunc_0_ppi
)(TCGv_ptr reg_a
, TCGv_ptr reg_b
, TCGv_i32 val
);
2913 typedef void (*SSEFunc_0_eppt
)(TCGv_ptr env
, TCGv_ptr reg_a
, TCGv_ptr reg_b
,
2916 #define SSE_SPECIAL ((void *)1)
2917 #define SSE_DUMMY ((void *)2)
2919 #define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
2920 #define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
2921 gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
2923 static const SSEFunc_0_epp sse_op_table1
[256][4] = {
2924 /* 3DNow! extensions */
2925 [0x0e] = { SSE_DUMMY
}, /* femms */
2926 [0x0f] = { SSE_DUMMY
}, /* pf... */
2927 /* pure SSE operations */
2928 [0x10] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movups, movupd, movss, movsd */
2929 [0x11] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movups, movupd, movss, movsd */
2930 [0x12] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movlps, movlpd, movsldup, movddup */
2931 [0x13] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movlps, movlpd */
2932 [0x14] = { gen_helper_punpckldq_xmm
, gen_helper_punpcklqdq_xmm
},
2933 [0x15] = { gen_helper_punpckhdq_xmm
, gen_helper_punpckhqdq_xmm
},
2934 [0x16] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movhps, movhpd, movshdup */
2935 [0x17] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movhps, movhpd */
2937 [0x28] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movaps, movapd */
2938 [0x29] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movaps, movapd */
2939 [0x2a] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
2940 [0x2b] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movntps, movntpd, movntss, movntsd */
2941 [0x2c] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
2942 [0x2d] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
2943 [0x2e] = { gen_helper_ucomiss
, gen_helper_ucomisd
},
2944 [0x2f] = { gen_helper_comiss
, gen_helper_comisd
},
2945 [0x50] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movmskps, movmskpd */
2946 [0x51] = SSE_FOP(sqrt
),
2947 [0x52] = { gen_helper_rsqrtps
, NULL
, gen_helper_rsqrtss
, NULL
},
2948 [0x53] = { gen_helper_rcpps
, NULL
, gen_helper_rcpss
, NULL
},
2949 [0x54] = { gen_helper_pand_xmm
, gen_helper_pand_xmm
}, /* andps, andpd */
2950 [0x55] = { gen_helper_pandn_xmm
, gen_helper_pandn_xmm
}, /* andnps, andnpd */
2951 [0x56] = { gen_helper_por_xmm
, gen_helper_por_xmm
}, /* orps, orpd */
2952 [0x57] = { gen_helper_pxor_xmm
, gen_helper_pxor_xmm
}, /* xorps, xorpd */
2953 [0x58] = SSE_FOP(add
),
2954 [0x59] = SSE_FOP(mul
),
2955 [0x5a] = { gen_helper_cvtps2pd
, gen_helper_cvtpd2ps
,
2956 gen_helper_cvtss2sd
, gen_helper_cvtsd2ss
},
2957 [0x5b] = { gen_helper_cvtdq2ps
, gen_helper_cvtps2dq
, gen_helper_cvttps2dq
},
2958 [0x5c] = SSE_FOP(sub
),
2959 [0x5d] = SSE_FOP(min
),
2960 [0x5e] = SSE_FOP(div
),
2961 [0x5f] = SSE_FOP(max
),
2963 [0xc2] = SSE_FOP(cmpeq
),
2964 [0xc6] = { (SSEFunc_0_epp
)gen_helper_shufps
,
2965 (SSEFunc_0_epp
)gen_helper_shufpd
}, /* XXX: casts */
2967 /* SSSE3, SSE4, MOVBE, CRC32, BMI1, BMI2, ADX. */
2968 [0x38] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
},
2969 [0x3a] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
},
2971 /* MMX ops and their SSE extensions */
2972 [0x60] = MMX_OP2(punpcklbw
),
2973 [0x61] = MMX_OP2(punpcklwd
),
2974 [0x62] = MMX_OP2(punpckldq
),
2975 [0x63] = MMX_OP2(packsswb
),
2976 [0x64] = MMX_OP2(pcmpgtb
),
2977 [0x65] = MMX_OP2(pcmpgtw
),
2978 [0x66] = MMX_OP2(pcmpgtl
),
2979 [0x67] = MMX_OP2(packuswb
),
2980 [0x68] = MMX_OP2(punpckhbw
),
2981 [0x69] = MMX_OP2(punpckhwd
),
2982 [0x6a] = MMX_OP2(punpckhdq
),
2983 [0x6b] = MMX_OP2(packssdw
),
2984 [0x6c] = { NULL
, gen_helper_punpcklqdq_xmm
},
2985 [0x6d] = { NULL
, gen_helper_punpckhqdq_xmm
},
2986 [0x6e] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movd mm, ea */
2987 [0x6f] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movq, movdqa, , movqdu */
2988 [0x70] = { (SSEFunc_0_epp
)gen_helper_pshufw_mmx
,
2989 (SSEFunc_0_epp
)gen_helper_pshufd_xmm
,
2990 (SSEFunc_0_epp
)gen_helper_pshufhw_xmm
,
2991 (SSEFunc_0_epp
)gen_helper_pshuflw_xmm
}, /* XXX: casts */
2992 [0x71] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftw */
2993 [0x72] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftd */
2994 [0x73] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftq */
2995 [0x74] = MMX_OP2(pcmpeqb
),
2996 [0x75] = MMX_OP2(pcmpeqw
),
2997 [0x76] = MMX_OP2(pcmpeql
),
2998 [0x77] = { SSE_DUMMY
}, /* emms */
2999 [0x78] = { NULL
, SSE_SPECIAL
, NULL
, SSE_SPECIAL
}, /* extrq_i, insertq_i */
3000 [0x79] = { NULL
, gen_helper_extrq_r
, NULL
, gen_helper_insertq_r
},
3001 [0x7c] = { NULL
, gen_helper_haddpd
, NULL
, gen_helper_haddps
},
3002 [0x7d] = { NULL
, gen_helper_hsubpd
, NULL
, gen_helper_hsubps
},
3003 [0x7e] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movd, movd, , movq */
3004 [0x7f] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movq, movdqa, movdqu */
3005 [0xc4] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pinsrw */
3006 [0xc5] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pextrw */
3007 [0xd0] = { NULL
, gen_helper_addsubpd
, NULL
, gen_helper_addsubps
},
3008 [0xd1] = MMX_OP2(psrlw
),
3009 [0xd2] = MMX_OP2(psrld
),
3010 [0xd3] = MMX_OP2(psrlq
),
3011 [0xd4] = MMX_OP2(paddq
),
3012 [0xd5] = MMX_OP2(pmullw
),
3013 [0xd6] = { NULL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
},
3014 [0xd7] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pmovmskb */
3015 [0xd8] = MMX_OP2(psubusb
),
3016 [0xd9] = MMX_OP2(psubusw
),
3017 [0xda] = MMX_OP2(pminub
),
3018 [0xdb] = MMX_OP2(pand
),
3019 [0xdc] = MMX_OP2(paddusb
),
3020 [0xdd] = MMX_OP2(paddusw
),
3021 [0xde] = MMX_OP2(pmaxub
),
3022 [0xdf] = MMX_OP2(pandn
),
3023 [0xe0] = MMX_OP2(pavgb
),
3024 [0xe1] = MMX_OP2(psraw
),
3025 [0xe2] = MMX_OP2(psrad
),
3026 [0xe3] = MMX_OP2(pavgw
),
3027 [0xe4] = MMX_OP2(pmulhuw
),
3028 [0xe5] = MMX_OP2(pmulhw
),
3029 [0xe6] = { NULL
, gen_helper_cvttpd2dq
, gen_helper_cvtdq2pd
, gen_helper_cvtpd2dq
},
3030 [0xe7] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movntq, movntq */
3031 [0xe8] = MMX_OP2(psubsb
),
3032 [0xe9] = MMX_OP2(psubsw
),
3033 [0xea] = MMX_OP2(pminsw
),
3034 [0xeb] = MMX_OP2(por
),
3035 [0xec] = MMX_OP2(paddsb
),
3036 [0xed] = MMX_OP2(paddsw
),
3037 [0xee] = MMX_OP2(pmaxsw
),
3038 [0xef] = MMX_OP2(pxor
),
3039 [0xf0] = { NULL
, NULL
, NULL
, SSE_SPECIAL
}, /* lddqu */
3040 [0xf1] = MMX_OP2(psllw
),
3041 [0xf2] = MMX_OP2(pslld
),
3042 [0xf3] = MMX_OP2(psllq
),
3043 [0xf4] = MMX_OP2(pmuludq
),
3044 [0xf5] = MMX_OP2(pmaddwd
),
3045 [0xf6] = MMX_OP2(psadbw
),
3046 [0xf7] = { (SSEFunc_0_epp
)gen_helper_maskmov_mmx
,
3047 (SSEFunc_0_epp
)gen_helper_maskmov_xmm
}, /* XXX: casts */
3048 [0xf8] = MMX_OP2(psubb
),
3049 [0xf9] = MMX_OP2(psubw
),
3050 [0xfa] = MMX_OP2(psubl
),
3051 [0xfb] = MMX_OP2(psubq
),
3052 [0xfc] = MMX_OP2(paddb
),
3053 [0xfd] = MMX_OP2(paddw
),
3054 [0xfe] = MMX_OP2(paddl
),
3057 static const SSEFunc_0_epp sse_op_table2
[3 * 8][2] = {
3058 [0 + 2] = MMX_OP2(psrlw
),
3059 [0 + 4] = MMX_OP2(psraw
),
3060 [0 + 6] = MMX_OP2(psllw
),
3061 [8 + 2] = MMX_OP2(psrld
),
3062 [8 + 4] = MMX_OP2(psrad
),
3063 [8 + 6] = MMX_OP2(pslld
),
3064 [16 + 2] = MMX_OP2(psrlq
),
3065 [16 + 3] = { NULL
, gen_helper_psrldq_xmm
},
3066 [16 + 6] = MMX_OP2(psllq
),
3067 [16 + 7] = { NULL
, gen_helper_pslldq_xmm
},
3070 static const SSEFunc_0_epi sse_op_table3ai
[] = {
3071 gen_helper_cvtsi2ss
,
3075 #ifdef TARGET_X86_64
3076 static const SSEFunc_0_epl sse_op_table3aq
[] = {
3077 gen_helper_cvtsq2ss
,
3082 static const SSEFunc_i_ep sse_op_table3bi
[] = {
3083 gen_helper_cvttss2si
,
3084 gen_helper_cvtss2si
,
3085 gen_helper_cvttsd2si
,
3089 #ifdef TARGET_X86_64
3090 static const SSEFunc_l_ep sse_op_table3bq
[] = {
3091 gen_helper_cvttss2sq
,
3092 gen_helper_cvtss2sq
,
3093 gen_helper_cvttsd2sq
,
3098 static const SSEFunc_0_epp sse_op_table4
[8][4] = {
3109 static const SSEFunc_0_epp sse_op_table5
[256] = {
3110 [0x0c] = gen_helper_pi2fw
,
3111 [0x0d] = gen_helper_pi2fd
,
3112 [0x1c] = gen_helper_pf2iw
,
3113 [0x1d] = gen_helper_pf2id
,
3114 [0x8a] = gen_helper_pfnacc
,
3115 [0x8e] = gen_helper_pfpnacc
,
3116 [0x90] = gen_helper_pfcmpge
,
3117 [0x94] = gen_helper_pfmin
,
3118 [0x96] = gen_helper_pfrcp
,
3119 [0x97] = gen_helper_pfrsqrt
,
3120 [0x9a] = gen_helper_pfsub
,
3121 [0x9e] = gen_helper_pfadd
,
3122 [0xa0] = gen_helper_pfcmpgt
,
3123 [0xa4] = gen_helper_pfmax
,
3124 [0xa6] = gen_helper_movq
, /* pfrcpit1; no need to actually increase precision */
3125 [0xa7] = gen_helper_movq
, /* pfrsqit1 */
3126 [0xaa] = gen_helper_pfsubr
,
3127 [0xae] = gen_helper_pfacc
,
3128 [0xb0] = gen_helper_pfcmpeq
,
3129 [0xb4] = gen_helper_pfmul
,
3130 [0xb6] = gen_helper_movq
, /* pfrcpit2 */
3131 [0xb7] = gen_helper_pmulhrw_mmx
,
3132 [0xbb] = gen_helper_pswapd
,
3133 [0xbf] = gen_helper_pavgb_mmx
/* pavgusb */
3136 struct SSEOpHelper_epp
{
3137 SSEFunc_0_epp op
[2];
3141 struct SSEOpHelper_eppi
{
3142 SSEFunc_0_eppi op
[2];
3146 #define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
3147 #define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
3148 #define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
3149 #define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
3151 static const struct SSEOpHelper_epp sse_op_table6
[256] = {
3152 [0x00] = SSSE3_OP(pshufb
),
3153 [0x01] = SSSE3_OP(phaddw
),
3154 [0x02] = SSSE3_OP(phaddd
),
3155 [0x03] = SSSE3_OP(phaddsw
),
3156 [0x04] = SSSE3_OP(pmaddubsw
),
3157 [0x05] = SSSE3_OP(phsubw
),
3158 [0x06] = SSSE3_OP(phsubd
),
3159 [0x07] = SSSE3_OP(phsubsw
),
3160 [0x08] = SSSE3_OP(psignb
),
3161 [0x09] = SSSE3_OP(psignw
),
3162 [0x0a] = SSSE3_OP(psignd
),
3163 [0x0b] = SSSE3_OP(pmulhrsw
),
3164 [0x10] = SSE41_OP(pblendvb
),
3165 [0x14] = SSE41_OP(blendvps
),
3166 [0x15] = SSE41_OP(blendvpd
),
3167 [0x17] = SSE41_OP(ptest
),
3168 [0x1c] = SSSE3_OP(pabsb
),
3169 [0x1d] = SSSE3_OP(pabsw
),
3170 [0x1e] = SSSE3_OP(pabsd
),
3171 [0x20] = SSE41_OP(pmovsxbw
),
3172 [0x21] = SSE41_OP(pmovsxbd
),
3173 [0x22] = SSE41_OP(pmovsxbq
),
3174 [0x23] = SSE41_OP(pmovsxwd
),
3175 [0x24] = SSE41_OP(pmovsxwq
),
3176 [0x25] = SSE41_OP(pmovsxdq
),
3177 [0x28] = SSE41_OP(pmuldq
),
3178 [0x29] = SSE41_OP(pcmpeqq
),
3179 [0x2a] = SSE41_SPECIAL
, /* movntqda */
3180 [0x2b] = SSE41_OP(packusdw
),
3181 [0x30] = SSE41_OP(pmovzxbw
),
3182 [0x31] = SSE41_OP(pmovzxbd
),
3183 [0x32] = SSE41_OP(pmovzxbq
),
3184 [0x33] = SSE41_OP(pmovzxwd
),
3185 [0x34] = SSE41_OP(pmovzxwq
),
3186 [0x35] = SSE41_OP(pmovzxdq
),
3187 [0x37] = SSE42_OP(pcmpgtq
),
3188 [0x38] = SSE41_OP(pminsb
),
3189 [0x39] = SSE41_OP(pminsd
),
3190 [0x3a] = SSE41_OP(pminuw
),
3191 [0x3b] = SSE41_OP(pminud
),
3192 [0x3c] = SSE41_OP(pmaxsb
),
3193 [0x3d] = SSE41_OP(pmaxsd
),
3194 [0x3e] = SSE41_OP(pmaxuw
),
3195 [0x3f] = SSE41_OP(pmaxud
),
3196 [0x40] = SSE41_OP(pmulld
),
3197 [0x41] = SSE41_OP(phminposuw
),
3200 static const struct SSEOpHelper_eppi sse_op_table7
[256] = {
3201 [0x08] = SSE41_OP(roundps
),
3202 [0x09] = SSE41_OP(roundpd
),
3203 [0x0a] = SSE41_OP(roundss
),
3204 [0x0b] = SSE41_OP(roundsd
),
3205 [0x0c] = SSE41_OP(blendps
),
3206 [0x0d] = SSE41_OP(blendpd
),
3207 [0x0e] = SSE41_OP(pblendw
),
3208 [0x0f] = SSSE3_OP(palignr
),
3209 [0x14] = SSE41_SPECIAL
, /* pextrb */
3210 [0x15] = SSE41_SPECIAL
, /* pextrw */
3211 [0x16] = SSE41_SPECIAL
, /* pextrd/pextrq */
3212 [0x17] = SSE41_SPECIAL
, /* extractps */
3213 [0x20] = SSE41_SPECIAL
, /* pinsrb */
3214 [0x21] = SSE41_SPECIAL
, /* insertps */
3215 [0x22] = SSE41_SPECIAL
, /* pinsrd/pinsrq */
3216 [0x40] = SSE41_OP(dpps
),
3217 [0x41] = SSE41_OP(dppd
),
3218 [0x42] = SSE41_OP(mpsadbw
),
3219 [0x60] = SSE42_OP(pcmpestrm
),
3220 [0x61] = SSE42_OP(pcmpestri
),
3221 [0x62] = SSE42_OP(pcmpistrm
),
3222 [0x63] = SSE42_OP(pcmpistri
),
3225 static void gen_sse(CPUX86State
*env
, DisasContext
*s
, int b
,
3226 target_ulong pc_start
, int rex_r
)
3228 int b1
, op1_offset
, op2_offset
, is_xmm
, val
, ot
;
3229 int modrm
, mod
, rm
, reg
, reg_addr
, offset_addr
;
3230 SSEFunc_0_epp sse_fn_epp
;
3231 SSEFunc_0_eppi sse_fn_eppi
;
3232 SSEFunc_0_ppi sse_fn_ppi
;
3233 SSEFunc_0_eppt sse_fn_eppt
;
3236 if (s
->prefix
& PREFIX_DATA
)
3238 else if (s
->prefix
& PREFIX_REPZ
)
3240 else if (s
->prefix
& PREFIX_REPNZ
)
3244 sse_fn_epp
= sse_op_table1
[b
][b1
];
3248 if ((b
<= 0x5f && b
>= 0x10) || b
== 0xc6 || b
== 0xc2) {
3258 /* simple MMX/SSE operation */
3259 if (s
->flags
& HF_TS_MASK
) {
3260 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
3263 if (s
->flags
& HF_EM_MASK
) {
3265 gen_exception(s
, EXCP06_ILLOP
, pc_start
- s
->cs_base
);
3268 if (is_xmm
&& !(s
->flags
& HF_OSFXSR_MASK
))
3269 if ((b
!= 0x38 && b
!= 0x3a) || (s
->prefix
& PREFIX_DATA
))
3272 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_3DNOW
))
3275 gen_helper_emms(cpu_env
);
3280 gen_helper_emms(cpu_env
);
3283 /* prepare MMX state (XXX: optimize by storing fptt and fptags in
3284 the static cpu state) */
3286 gen_helper_enter_mmx(cpu_env
);
3289 modrm
= cpu_ldub_code(env
, s
->pc
++);
3290 reg
= ((modrm
>> 3) & 7);
3293 mod
= (modrm
>> 6) & 3;
3294 if (sse_fn_epp
== SSE_SPECIAL
) {
3297 case 0x0e7: /* movntq */
3300 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3301 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3303 case 0x1e7: /* movntdq */
3304 case 0x02b: /* movntps */
3305 case 0x12b: /* movntps */
3308 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3309 gen_sto_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3311 case 0x3f0: /* lddqu */
3314 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3315 gen_ldo_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3317 case 0x22b: /* movntss */
3318 case 0x32b: /* movntsd */
3321 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3323 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,
3326 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,
3327 xmm_regs
[reg
].XMM_L(0)));
3328 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
3331 case 0x6e: /* movd mm, ea */
3332 #ifdef TARGET_X86_64
3333 if (s
->dflag
== 2) {
3334 gen_ldst_modrm(env
, s
, modrm
, OT_QUAD
, OR_TMP0
, 0);
3335 tcg_gen_st_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3339 gen_ldst_modrm(env
, s
, modrm
, OT_LONG
, OR_TMP0
, 0);
3340 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3341 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3342 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
3343 gen_helper_movl_mm_T0_mmx(cpu_ptr0
, cpu_tmp2_i32
);
3346 case 0x16e: /* movd xmm, ea */
3347 #ifdef TARGET_X86_64
3348 if (s
->dflag
== 2) {
3349 gen_ldst_modrm(env
, s
, modrm
, OT_QUAD
, OR_TMP0
, 0);
3350 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3351 offsetof(CPUX86State
,xmm_regs
[reg
]));
3352 gen_helper_movq_mm_T0_xmm(cpu_ptr0
, cpu_T
[0]);
3356 gen_ldst_modrm(env
, s
, modrm
, OT_LONG
, OR_TMP0
, 0);
3357 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3358 offsetof(CPUX86State
,xmm_regs
[reg
]));
3359 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
3360 gen_helper_movl_mm_T0_xmm(cpu_ptr0
, cpu_tmp2_i32
);
3363 case 0x6f: /* movq mm, ea */
3365 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3366 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3369 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
,
3370 offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3371 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
,
3372 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3375 case 0x010: /* movups */
3376 case 0x110: /* movupd */
3377 case 0x028: /* movaps */
3378 case 0x128: /* movapd */
3379 case 0x16f: /* movdqa xmm, ea */
3380 case 0x26f: /* movdqu xmm, ea */
3382 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3383 gen_ldo_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3385 rm
= (modrm
& 7) | REX_B(s
);
3386 gen_op_movo(offsetof(CPUX86State
,xmm_regs
[reg
]),
3387 offsetof(CPUX86State
,xmm_regs
[rm
]));
3390 case 0x210: /* movss xmm, ea */
3392 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3393 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
3394 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3396 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(1)));
3397 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)));
3398 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)));
3400 rm
= (modrm
& 7) | REX_B(s
);
3401 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)),
3402 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(0)));
3405 case 0x310: /* movsd xmm, ea */
3407 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3408 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3410 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)));
3411 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)));
3413 rm
= (modrm
& 7) | REX_B(s
);
3414 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3415 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3418 case 0x012: /* movlps */
3419 case 0x112: /* movlpd */
3421 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3422 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3425 rm
= (modrm
& 7) | REX_B(s
);
3426 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3427 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(1)));
3430 case 0x212: /* movsldup */
3432 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3433 gen_ldo_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3435 rm
= (modrm
& 7) | REX_B(s
);
3436 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)),
3437 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(0)));
3438 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)),
3439 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(2)));
3441 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(1)),
3442 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3443 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)),
3444 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)));
3446 case 0x312: /* movddup */
3448 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3449 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3451 rm
= (modrm
& 7) | REX_B(s
);
3452 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3453 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3455 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)),
3456 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3458 case 0x016: /* movhps */
3459 case 0x116: /* movhpd */
3461 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3462 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)));
3465 rm
= (modrm
& 7) | REX_B(s
);
3466 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)),
3467 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3470 case 0x216: /* movshdup */
3472 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3473 gen_ldo_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3475 rm
= (modrm
& 7) | REX_B(s
);
3476 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(1)),
3477 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(1)));
3478 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)),
3479 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(3)));
3481 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)),
3482 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(1)));
3483 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)),
3484 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)));
3489 int bit_index
, field_length
;
3491 if (b1
== 1 && reg
!= 0)
3493 field_length
= cpu_ldub_code(env
, s
->pc
++) & 0x3F;
3494 bit_index
= cpu_ldub_code(env
, s
->pc
++) & 0x3F;
3495 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3496 offsetof(CPUX86State
,xmm_regs
[reg
]));
3498 gen_helper_extrq_i(cpu_env
, cpu_ptr0
,
3499 tcg_const_i32(bit_index
),
3500 tcg_const_i32(field_length
));
3502 gen_helper_insertq_i(cpu_env
, cpu_ptr0
,
3503 tcg_const_i32(bit_index
),
3504 tcg_const_i32(field_length
));
3507 case 0x7e: /* movd ea, mm */
3508 #ifdef TARGET_X86_64
3509 if (s
->dflag
== 2) {
3510 tcg_gen_ld_i64(cpu_T
[0], cpu_env
,
3511 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3512 gen_ldst_modrm(env
, s
, modrm
, OT_QUAD
, OR_TMP0
, 1);
3516 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
,
3517 offsetof(CPUX86State
,fpregs
[reg
].mmx
.MMX_L(0)));
3518 gen_ldst_modrm(env
, s
, modrm
, OT_LONG
, OR_TMP0
, 1);
3521 case 0x17e: /* movd ea, xmm */
3522 #ifdef TARGET_X86_64
3523 if (s
->dflag
== 2) {
3524 tcg_gen_ld_i64(cpu_T
[0], cpu_env
,
3525 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3526 gen_ldst_modrm(env
, s
, modrm
, OT_QUAD
, OR_TMP0
, 1);
3530 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
,
3531 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3532 gen_ldst_modrm(env
, s
, modrm
, OT_LONG
, OR_TMP0
, 1);
3535 case 0x27e: /* movq xmm, ea */
3537 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3538 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3540 rm
= (modrm
& 7) | REX_B(s
);
3541 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3542 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3544 gen_op_movq_env_0(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)));
3546 case 0x7f: /* movq ea, mm */
3548 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3549 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3552 gen_op_movq(offsetof(CPUX86State
,fpregs
[rm
].mmx
),
3553 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3556 case 0x011: /* movups */
3557 case 0x111: /* movupd */
3558 case 0x029: /* movaps */
3559 case 0x129: /* movapd */
3560 case 0x17f: /* movdqa ea, xmm */
3561 case 0x27f: /* movdqu ea, xmm */
3563 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3564 gen_sto_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3566 rm
= (modrm
& 7) | REX_B(s
);
3567 gen_op_movo(offsetof(CPUX86State
,xmm_regs
[rm
]),
3568 offsetof(CPUX86State
,xmm_regs
[reg
]));
3571 case 0x211: /* movss ea, xmm */
3573 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3574 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3575 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
3577 rm
= (modrm
& 7) | REX_B(s
);
3578 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(0)),
3579 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3582 case 0x311: /* movsd ea, xmm */
3584 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3585 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3587 rm
= (modrm
& 7) | REX_B(s
);
3588 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)),
3589 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3592 case 0x013: /* movlps */
3593 case 0x113: /* movlpd */
3595 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3596 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3601 case 0x017: /* movhps */
3602 case 0x117: /* movhpd */
3604 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3605 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)));
3610 case 0x71: /* shift mm, im */
3613 case 0x171: /* shift xmm, im */
3619 val
= cpu_ldub_code(env
, s
->pc
++);
3621 gen_op_movl_T0_im(val
);
3622 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_t0
.XMM_L(0)));
3624 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_t0
.XMM_L(1)));
3625 op1_offset
= offsetof(CPUX86State
,xmm_t0
);
3627 gen_op_movl_T0_im(val
);
3628 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,mmx_t0
.MMX_L(0)));
3630 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,mmx_t0
.MMX_L(1)));
3631 op1_offset
= offsetof(CPUX86State
,mmx_t0
);
3633 sse_fn_epp
= sse_op_table2
[((b
- 1) & 3) * 8 +
3634 (((modrm
>> 3)) & 7)][b1
];
3639 rm
= (modrm
& 7) | REX_B(s
);
3640 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3643 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3645 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op2_offset
);
3646 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op1_offset
);
3647 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3649 case 0x050: /* movmskps */
3650 rm
= (modrm
& 7) | REX_B(s
);
3651 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3652 offsetof(CPUX86State
,xmm_regs
[rm
]));
3653 gen_helper_movmskps(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3654 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
3655 gen_op_mov_reg_T0(OT_LONG
, reg
);
3657 case 0x150: /* movmskpd */
3658 rm
= (modrm
& 7) | REX_B(s
);
3659 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3660 offsetof(CPUX86State
,xmm_regs
[rm
]));
3661 gen_helper_movmskpd(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3662 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
3663 gen_op_mov_reg_T0(OT_LONG
, reg
);
3665 case 0x02a: /* cvtpi2ps */
3666 case 0x12a: /* cvtpi2pd */
3667 gen_helper_enter_mmx(cpu_env
);
3669 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3670 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
3671 gen_ldq_env_A0(s
->mem_index
, op2_offset
);
3674 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3676 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3677 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3678 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
3681 gen_helper_cvtpi2ps(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3685 gen_helper_cvtpi2pd(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3689 case 0x22a: /* cvtsi2ss */
3690 case 0x32a: /* cvtsi2sd */
3691 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
3692 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3693 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3694 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3695 if (ot
== OT_LONG
) {
3696 SSEFunc_0_epi sse_fn_epi
= sse_op_table3ai
[(b
>> 8) & 1];
3697 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
3698 sse_fn_epi(cpu_env
, cpu_ptr0
, cpu_tmp2_i32
);
3700 #ifdef TARGET_X86_64
3701 SSEFunc_0_epl sse_fn_epl
= sse_op_table3aq
[(b
>> 8) & 1];
3702 sse_fn_epl(cpu_env
, cpu_ptr0
, cpu_T
[0]);
3708 case 0x02c: /* cvttps2pi */
3709 case 0x12c: /* cvttpd2pi */
3710 case 0x02d: /* cvtps2pi */
3711 case 0x12d: /* cvtpd2pi */
3712 gen_helper_enter_mmx(cpu_env
);
3714 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3715 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3716 gen_ldo_env_A0(s
->mem_index
, op2_offset
);
3718 rm
= (modrm
& 7) | REX_B(s
);
3719 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3721 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
& 7].mmx
);
3722 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3723 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
3726 gen_helper_cvttps2pi(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3729 gen_helper_cvttpd2pi(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3732 gen_helper_cvtps2pi(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3735 gen_helper_cvtpd2pi(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3739 case 0x22c: /* cvttss2si */
3740 case 0x32c: /* cvttsd2si */
3741 case 0x22d: /* cvtss2si */
3742 case 0x32d: /* cvtsd2si */
3743 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
3745 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3747 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_t0
.XMM_Q(0)));
3749 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
3750 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_t0
.XMM_L(0)));
3752 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3754 rm
= (modrm
& 7) | REX_B(s
);
3755 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3757 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op2_offset
);
3758 if (ot
== OT_LONG
) {
3759 SSEFunc_i_ep sse_fn_i_ep
=
3760 sse_op_table3bi
[((b
>> 7) & 2) | (b
& 1)];
3761 sse_fn_i_ep(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3762 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
3764 #ifdef TARGET_X86_64
3765 SSEFunc_l_ep sse_fn_l_ep
=
3766 sse_op_table3bq
[((b
>> 7) & 2) | (b
& 1)];
3767 sse_fn_l_ep(cpu_T
[0], cpu_env
, cpu_ptr0
);
3772 gen_op_mov_reg_T0(ot
, reg
);
3774 case 0xc4: /* pinsrw */
3777 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 0);
3778 val
= cpu_ldub_code(env
, s
->pc
++);
3781 tcg_gen_st16_tl(cpu_T
[0], cpu_env
,
3782 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_W(val
)));
3785 tcg_gen_st16_tl(cpu_T
[0], cpu_env
,
3786 offsetof(CPUX86State
,fpregs
[reg
].mmx
.MMX_W(val
)));
3789 case 0xc5: /* pextrw */
3793 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
3794 val
= cpu_ldub_code(env
, s
->pc
++);
3797 rm
= (modrm
& 7) | REX_B(s
);
3798 tcg_gen_ld16u_tl(cpu_T
[0], cpu_env
,
3799 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_W(val
)));
3803 tcg_gen_ld16u_tl(cpu_T
[0], cpu_env
,
3804 offsetof(CPUX86State
,fpregs
[rm
].mmx
.MMX_W(val
)));
3806 reg
= ((modrm
>> 3) & 7) | rex_r
;
3807 gen_op_mov_reg_T0(ot
, reg
);
3809 case 0x1d6: /* movq ea, xmm */
3811 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3812 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3814 rm
= (modrm
& 7) | REX_B(s
);
3815 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)),
3816 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3817 gen_op_movq_env_0(offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(1)));
3820 case 0x2d6: /* movq2dq */
3821 gen_helper_enter_mmx(cpu_env
);
3823 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3824 offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3825 gen_op_movq_env_0(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)));
3827 case 0x3d6: /* movdq2q */
3828 gen_helper_enter_mmx(cpu_env
);
3829 rm
= (modrm
& 7) | REX_B(s
);
3830 gen_op_movq(offsetof(CPUX86State
,fpregs
[reg
& 7].mmx
),
3831 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3833 case 0xd7: /* pmovmskb */
3838 rm
= (modrm
& 7) | REX_B(s
);
3839 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, offsetof(CPUX86State
,xmm_regs
[rm
]));
3840 gen_helper_pmovmskb_xmm(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3843 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3844 gen_helper_pmovmskb_mmx(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3846 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
3847 reg
= ((modrm
>> 3) & 7) | rex_r
;
3848 gen_op_mov_reg_T0(OT_LONG
, reg
);
3854 if ((b
& 0xf0) == 0xf0) {
3857 modrm
= cpu_ldub_code(env
, s
->pc
++);
3859 reg
= ((modrm
>> 3) & 7) | rex_r
;
3860 mod
= (modrm
>> 6) & 3;
3865 sse_fn_epp
= sse_op_table6
[b
].op
[b1
];
3869 if (!(s
->cpuid_ext_features
& sse_op_table6
[b
].ext_mask
))
3873 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3875 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
| REX_B(s
)]);
3877 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3878 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3880 case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
3881 case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
3882 case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
3883 gen_ldq_env_A0(s
->mem_index
, op2_offset
+
3884 offsetof(XMMReg
, XMM_Q(0)));
3886 case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
3887 case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
3888 tcg_gen_qemu_ld32u(cpu_tmp0
, cpu_A0
,
3889 (s
->mem_index
>> 2) - 1);
3890 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_tmp0
);
3891 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, op2_offset
+
3892 offsetof(XMMReg
, XMM_L(0)));
3894 case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
3895 tcg_gen_qemu_ld16u(cpu_tmp0
, cpu_A0
,
3896 (s
->mem_index
>> 2) - 1);
3897 tcg_gen_st16_tl(cpu_tmp0
, cpu_env
, op2_offset
+
3898 offsetof(XMMReg
, XMM_W(0)));
3900 case 0x2a: /* movntqda */
3901 gen_ldo_env_A0(s
->mem_index
, op1_offset
);
3904 gen_ldo_env_A0(s
->mem_index
, op2_offset
);
3908 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
3910 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3912 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
3913 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3914 gen_ldq_env_A0(s
->mem_index
, op2_offset
);
3917 if (sse_fn_epp
== SSE_SPECIAL
) {
3921 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3922 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
3923 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3926 set_cc_op(s
, CC_OP_EFLAGS
);
3933 /* Various integer extensions at 0f 38 f[0-f]. */
3934 b
= modrm
| (b1
<< 8);
3935 modrm
= cpu_ldub_code(env
, s
->pc
++);
3936 reg
= ((modrm
>> 3) & 7) | rex_r
;
3939 case 0x3f0: /* crc32 Gd,Eb */
3940 case 0x3f1: /* crc32 Gd,Ey */
3942 if (!(s
->cpuid_ext_features
& CPUID_EXT_SSE42
)) {
3945 if ((b
& 0xff) == 0xf0) {
3947 } else if (s
->dflag
!= 2) {
3948 ot
= (s
->prefix
& PREFIX_DATA
? OT_WORD
: OT_LONG
);
3953 gen_op_mov_TN_reg(OT_LONG
, 0, reg
);
3954 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
3955 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3956 gen_helper_crc32(cpu_T
[0], cpu_tmp2_i32
,
3957 cpu_T
[0], tcg_const_i32(8 << ot
));
3959 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
3960 gen_op_mov_reg_T0(ot
, reg
);
3963 case 0x1f0: /* crc32 or movbe */
3965 /* For these insns, the f3 prefix is supposed to have priority
3966 over the 66 prefix, but that's not what we implement above
3968 if (s
->prefix
& PREFIX_REPNZ
) {
3972 case 0x0f0: /* movbe Gy,My */
3973 case 0x0f1: /* movbe My,Gy */
3974 if (!(s
->cpuid_ext_features
& CPUID_EXT_MOVBE
)) {
3977 if (s
->dflag
!= 2) {
3978 ot
= (s
->prefix
& PREFIX_DATA
? OT_WORD
: OT_LONG
);
3983 /* Load the data incoming to the bswap. Note that the TCG
3984 implementation of bswap requires the input be zero
3985 extended. In the case of the loads, we simply know that
3986 gen_op_ld_v via gen_ldst_modrm does that already. */
3988 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3992 tcg_gen_ext16u_tl(cpu_T
[0], cpu_regs
[reg
]);
3995 tcg_gen_ext32u_tl(cpu_T
[0], cpu_regs
[reg
]);
3998 tcg_gen_mov_tl(cpu_T
[0], cpu_regs
[reg
]);
4005 tcg_gen_bswap16_tl(cpu_T
[0], cpu_T
[0]);
4008 tcg_gen_bswap32_tl(cpu_T
[0], cpu_T
[0]);
4010 #ifdef TARGET_X86_64
4012 tcg_gen_bswap64_tl(cpu_T
[0], cpu_T
[0]);
4018 gen_op_mov_reg_T0(ot
, reg
);
4020 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
4024 case 0x0f2: /* andn Gy, By, Ey */
4025 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI1
)
4026 || !(s
->prefix
& PREFIX_VEX
)
4030 ot
= s
->dflag
== 2 ? OT_QUAD
: OT_LONG
;
4031 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4032 tcg_gen_andc_tl(cpu_T
[0], cpu_regs
[s
->vex_v
], cpu_T
[0]);
4033 gen_op_mov_reg_T0(ot
, reg
);
4034 gen_op_update1_cc();
4035 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
4038 case 0x0f7: /* bextr Gy, Ey, By */
4039 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI1
)
4040 || !(s
->prefix
& PREFIX_VEX
)
4044 ot
= s
->dflag
== 2 ? OT_QUAD
: OT_LONG
;
4048 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4049 /* Extract START, and shift the operand.
4050 Shifts larger than operand size get zeros. */
4051 tcg_gen_ext8u_tl(cpu_A0
, cpu_regs
[s
->vex_v
]);
4052 tcg_gen_shr_tl(cpu_T
[0], cpu_T
[0], cpu_A0
);
4054 bound
= tcg_const_tl(ot
== OT_QUAD
? 63 : 31);
4055 zero
= tcg_const_tl(0);
4056 tcg_gen_movcond_tl(TCG_COND_LEU
, cpu_T
[0], cpu_A0
, bound
,
4058 tcg_temp_free(zero
);
4060 /* Extract the LEN into a mask. Lengths larger than
4061 operand size get all ones. */
4062 tcg_gen_shri_tl(cpu_A0
, cpu_regs
[s
->vex_v
], 8);
4063 tcg_gen_ext8u_tl(cpu_A0
, cpu_A0
);
4064 tcg_gen_movcond_tl(TCG_COND_LEU
, cpu_A0
, cpu_A0
, bound
,
4066 tcg_temp_free(bound
);
4067 tcg_gen_movi_tl(cpu_T
[1], 1);
4068 tcg_gen_shl_tl(cpu_T
[1], cpu_T
[1], cpu_A0
);
4069 tcg_gen_subi_tl(cpu_T
[1], cpu_T
[1], 1);
4070 tcg_gen_and_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4072 gen_op_mov_reg_T0(ot
, reg
);
4073 gen_op_update1_cc();
4074 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
4078 case 0x0f5: /* bzhi Gy, Ey, By */
4079 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
4080 || !(s
->prefix
& PREFIX_VEX
)
4084 ot
= s
->dflag
== 2 ? OT_QUAD
: OT_LONG
;
4085 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4086 tcg_gen_ext8u_tl(cpu_T
[1], cpu_regs
[s
->vex_v
]);
4088 TCGv bound
= tcg_const_tl(ot
== OT_QUAD
? 63 : 31);
4089 /* Note that since we're using BMILG (in order to get O
4090 cleared) we need to store the inverse into C. */
4091 tcg_gen_setcond_tl(TCG_COND_LT
, cpu_cc_src
,
4093 tcg_gen_movcond_tl(TCG_COND_GT
, cpu_T
[1], cpu_T
[1],
4094 bound
, bound
, cpu_T
[1]);
4095 tcg_temp_free(bound
);
4097 tcg_gen_movi_tl(cpu_A0
, -1);
4098 tcg_gen_shl_tl(cpu_A0
, cpu_A0
, cpu_T
[1]);
4099 tcg_gen_andc_tl(cpu_T
[0], cpu_T
[0], cpu_A0
);
4100 gen_op_mov_reg_T0(ot
, reg
);
4101 gen_op_update1_cc();
4102 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
4105 case 0x3f6: /* mulx By, Gy, rdx, Ey */
4106 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
4107 || !(s
->prefix
& PREFIX_VEX
)
4111 ot
= s
->dflag
== 2 ? OT_QUAD
: OT_LONG
;
4112 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4116 t0
= tcg_temp_new_i64();
4117 t1
= tcg_temp_new_i64();
4118 #ifdef TARGET_X86_64
4119 tcg_gen_ext32u_i64(t0
, cpu_T
[0]);
4120 tcg_gen_ext32u_i64(t1
, cpu_regs
[R_EDX
]);
4122 tcg_gen_extu_i32_i64(t0
, cpu_T
[0]);
4123 tcg_gen_extu_i32_i64(t0
, cpu_regs
[R_EDX
]);
4125 tcg_gen_mul_i64(t0
, t0
, t1
);
4126 tcg_gen_trunc_i64_tl(cpu_T
[0], t0
);
4127 tcg_gen_shri_i64(t0
, t0
, 32);
4128 tcg_gen_trunc_i64_tl(cpu_T
[1], t0
);
4129 tcg_temp_free_i64(t0
);
4130 tcg_temp_free_i64(t1
);
4131 gen_op_mov_reg_T0(OT_LONG
, s
->vex_v
);
4132 gen_op_mov_reg_T1(OT_LONG
, reg
);
4134 #ifdef TARGET_X86_64
4136 tcg_gen_mov_tl(cpu_T
[1], cpu_regs
[R_EDX
]);
4137 tcg_gen_mul_tl(cpu_regs
[s
->vex_v
], cpu_T
[0], cpu_T
[1]);
4138 gen_helper_umulh(cpu_regs
[reg
], cpu_T
[0], cpu_T
[1]);
4144 case 0x3f5: /* pdep Gy, By, Ey */
4145 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
4146 || !(s
->prefix
& PREFIX_VEX
)
4150 ot
= s
->dflag
== 2 ? OT_QUAD
: OT_LONG
;
4151 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4152 /* Note that by zero-extending the mask operand, we
4153 automatically handle zero-extending the result. */
4154 if (s
->dflag
== 2) {
4155 tcg_gen_mov_tl(cpu_T
[1], cpu_regs
[s
->vex_v
]);
4157 tcg_gen_ext32u_tl(cpu_T
[1], cpu_regs
[s
->vex_v
]);
4159 gen_helper_pdep(cpu_regs
[reg
], cpu_T
[0], cpu_T
[1]);
4162 case 0x2f5: /* pext Gy, By, Ey */
4163 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
4164 || !(s
->prefix
& PREFIX_VEX
)
4168 ot
= s
->dflag
== 2 ? OT_QUAD
: OT_LONG
;
4169 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4170 /* Note that by zero-extending the mask operand, we
4171 automatically handle zero-extending the result. */
4172 if (s
->dflag
== 2) {
4173 tcg_gen_mov_tl(cpu_T
[1], cpu_regs
[s
->vex_v
]);
4175 tcg_gen_ext32u_tl(cpu_T
[1], cpu_regs
[s
->vex_v
]);
4177 gen_helper_pext(cpu_regs
[reg
], cpu_T
[0], cpu_T
[1]);
4180 case 0x1f6: /* adcx Gy, Ey */
4181 case 0x2f6: /* adox Gy, Ey */
4182 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_ADX
)) {
4185 TCGv carry_in
, carry_out
, zero
;
4188 ot
= (s
->dflag
== 2 ? OT_QUAD
: OT_LONG
);
4189 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4191 /* Re-use the carry-out from a previous round. */
4192 TCGV_UNUSED(carry_in
);
4193 carry_out
= (b
== 0x1f6 ? cpu_cc_dst
: cpu_cc_src2
);
4197 carry_in
= cpu_cc_dst
;
4198 end_op
= CC_OP_ADCX
;
4200 end_op
= CC_OP_ADCOX
;
4205 end_op
= CC_OP_ADCOX
;
4207 carry_in
= cpu_cc_src2
;
4208 end_op
= CC_OP_ADOX
;
4212 end_op
= CC_OP_ADCOX
;
4213 carry_in
= carry_out
;
4216 end_op
= (b
== 0x1f6 ? CC_OP_ADCX
: CC_OP_ADCOX
);
4219 /* If we can't reuse carry-out, get it out of EFLAGS. */
4220 if (TCGV_IS_UNUSED(carry_in
)) {
4221 if (s
->cc_op
!= CC_OP_ADCX
&& s
->cc_op
!= CC_OP_ADOX
) {
4222 gen_compute_eflags(s
);
4224 carry_in
= cpu_tmp0
;
4225 tcg_gen_shri_tl(carry_in
, cpu_cc_src
,
4226 ctz32(b
== 0x1f6 ? CC_C
: CC_O
));
4227 tcg_gen_andi_tl(carry_in
, carry_in
, 1);
4231 #ifdef TARGET_X86_64
4233 /* If we know TL is 64-bit, and we want a 32-bit
4234 result, just do everything in 64-bit arithmetic. */
4235 tcg_gen_ext32u_i64(cpu_regs
[reg
], cpu_regs
[reg
]);
4236 tcg_gen_ext32u_i64(cpu_T
[0], cpu_T
[0]);
4237 tcg_gen_add_i64(cpu_T
[0], cpu_T
[0], cpu_regs
[reg
]);
4238 tcg_gen_add_i64(cpu_T
[0], cpu_T
[0], carry_in
);
4239 tcg_gen_ext32u_i64(cpu_regs
[reg
], cpu_T
[0]);
4240 tcg_gen_shri_i64(carry_out
, cpu_T
[0], 32);
4244 /* Otherwise compute the carry-out in two steps. */
4245 zero
= tcg_const_tl(0);
4246 tcg_gen_add2_tl(cpu_T
[0], carry_out
,
4249 tcg_gen_add2_tl(cpu_regs
[reg
], carry_out
,
4250 cpu_regs
[reg
], carry_out
,
4252 tcg_temp_free(zero
);
4255 set_cc_op(s
, end_op
);
4259 case 0x1f7: /* shlx Gy, Ey, By */
4260 case 0x2f7: /* sarx Gy, Ey, By */
4261 case 0x3f7: /* shrx Gy, Ey, By */
4262 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
4263 || !(s
->prefix
& PREFIX_VEX
)
4267 ot
= (s
->dflag
== 2 ? OT_QUAD
: OT_LONG
);
4268 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4269 if (ot
== OT_QUAD
) {
4270 tcg_gen_andi_tl(cpu_T
[1], cpu_regs
[s
->vex_v
], 63);
4272 tcg_gen_andi_tl(cpu_T
[1], cpu_regs
[s
->vex_v
], 31);
4275 tcg_gen_shl_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4276 } else if (b
== 0x2f7) {
4277 if (ot
!= OT_QUAD
) {
4278 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
4280 tcg_gen_sar_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4282 if (ot
!= OT_QUAD
) {
4283 tcg_gen_ext32u_tl(cpu_T
[0], cpu_T
[0]);
4285 tcg_gen_shr_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4287 gen_op_mov_reg_T0(ot
, reg
);
4293 case 0x3f3: /* Group 17 */
4294 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI1
)
4295 || !(s
->prefix
& PREFIX_VEX
)
4299 ot
= s
->dflag
== 2 ? OT_QUAD
: OT_LONG
;
4300 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4303 case 1: /* blsr By,Ey */
4304 tcg_gen_neg_tl(cpu_T
[1], cpu_T
[0]);
4305 tcg_gen_and_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4306 gen_op_mov_reg_T0(ot
, s
->vex_v
);
4307 gen_op_update2_cc();
4308 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
4311 case 2: /* blsmsk By,Ey */
4312 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[0]);
4313 tcg_gen_subi_tl(cpu_T
[0], cpu_T
[0], 1);
4314 tcg_gen_xor_tl(cpu_T
[0], cpu_T
[0], cpu_cc_src
);
4315 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4316 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
4319 case 3: /* blsi By, Ey */
4320 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[0]);
4321 tcg_gen_subi_tl(cpu_T
[0], cpu_T
[0], 1);
4322 tcg_gen_and_tl(cpu_T
[0], cpu_T
[0], cpu_cc_src
);
4323 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4324 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
4340 modrm
= cpu_ldub_code(env
, s
->pc
++);
4342 reg
= ((modrm
>> 3) & 7) | rex_r
;
4343 mod
= (modrm
>> 6) & 3;
4348 sse_fn_eppi
= sse_op_table7
[b
].op
[b1
];
4352 if (!(s
->cpuid_ext_features
& sse_op_table7
[b
].ext_mask
))
4355 if (sse_fn_eppi
== SSE_SPECIAL
) {
4356 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
4357 rm
= (modrm
& 7) | REX_B(s
);
4359 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4360 reg
= ((modrm
>> 3) & 7) | rex_r
;
4361 val
= cpu_ldub_code(env
, s
->pc
++);
4363 case 0x14: /* pextrb */
4364 tcg_gen_ld8u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,
4365 xmm_regs
[reg
].XMM_B(val
& 15)));
4367 gen_op_mov_reg_T0(ot
, rm
);
4369 tcg_gen_qemu_st8(cpu_T
[0], cpu_A0
,
4370 (s
->mem_index
>> 2) - 1);
4372 case 0x15: /* pextrw */
4373 tcg_gen_ld16u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,
4374 xmm_regs
[reg
].XMM_W(val
& 7)));
4376 gen_op_mov_reg_T0(ot
, rm
);
4378 tcg_gen_qemu_st16(cpu_T
[0], cpu_A0
,
4379 (s
->mem_index
>> 2) - 1);
4382 if (ot
== OT_LONG
) { /* pextrd */
4383 tcg_gen_ld_i32(cpu_tmp2_i32
, cpu_env
,
4384 offsetof(CPUX86State
,
4385 xmm_regs
[reg
].XMM_L(val
& 3)));
4386 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
4388 gen_op_mov_reg_v(ot
, rm
, cpu_T
[0]);
4390 tcg_gen_qemu_st32(cpu_T
[0], cpu_A0
,
4391 (s
->mem_index
>> 2) - 1);
4392 } else { /* pextrq */
4393 #ifdef TARGET_X86_64
4394 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
,
4395 offsetof(CPUX86State
,
4396 xmm_regs
[reg
].XMM_Q(val
& 1)));
4398 gen_op_mov_reg_v(ot
, rm
, cpu_tmp1_i64
);
4400 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
,
4401 (s
->mem_index
>> 2) - 1);
4407 case 0x17: /* extractps */
4408 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,
4409 xmm_regs
[reg
].XMM_L(val
& 3)));
4411 gen_op_mov_reg_T0(ot
, rm
);
4413 tcg_gen_qemu_st32(cpu_T
[0], cpu_A0
,
4414 (s
->mem_index
>> 2) - 1);
4416 case 0x20: /* pinsrb */
4418 gen_op_mov_TN_reg(OT_LONG
, 0, rm
);
4420 tcg_gen_qemu_ld8u(cpu_tmp0
, cpu_A0
,
4421 (s
->mem_index
>> 2) - 1);
4422 tcg_gen_st8_tl(cpu_tmp0
, cpu_env
, offsetof(CPUX86State
,
4423 xmm_regs
[reg
].XMM_B(val
& 15)));
4425 case 0x21: /* insertps */
4427 tcg_gen_ld_i32(cpu_tmp2_i32
, cpu_env
,
4428 offsetof(CPUX86State
,xmm_regs
[rm
]
4429 .XMM_L((val
>> 6) & 3)));
4431 tcg_gen_qemu_ld32u(cpu_tmp0
, cpu_A0
,
4432 (s
->mem_index
>> 2) - 1);
4433 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_tmp0
);
4435 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
,
4436 offsetof(CPUX86State
,xmm_regs
[reg
]
4437 .XMM_L((val
>> 4) & 3)));
4439 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4440 cpu_env
, offsetof(CPUX86State
,
4441 xmm_regs
[reg
].XMM_L(0)));
4443 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4444 cpu_env
, offsetof(CPUX86State
,
4445 xmm_regs
[reg
].XMM_L(1)));
4447 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4448 cpu_env
, offsetof(CPUX86State
,
4449 xmm_regs
[reg
].XMM_L(2)));
4451 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4452 cpu_env
, offsetof(CPUX86State
,
4453 xmm_regs
[reg
].XMM_L(3)));
4456 if (ot
== OT_LONG
) { /* pinsrd */
4458 gen_op_mov_v_reg(ot
, cpu_tmp0
, rm
);
4460 tcg_gen_qemu_ld32u(cpu_tmp0
, cpu_A0
,
4461 (s
->mem_index
>> 2) - 1);
4462 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_tmp0
);
4463 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
,
4464 offsetof(CPUX86State
,
4465 xmm_regs
[reg
].XMM_L(val
& 3)));
4466 } else { /* pinsrq */
4467 #ifdef TARGET_X86_64
4469 gen_op_mov_v_reg(ot
, cpu_tmp1_i64
, rm
);
4471 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
,
4472 (s
->mem_index
>> 2) - 1);
4473 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
,
4474 offsetof(CPUX86State
,
4475 xmm_regs
[reg
].XMM_Q(val
& 1)));
4486 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
4488 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
| REX_B(s
)]);
4490 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
4491 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4492 gen_ldo_env_A0(s
->mem_index
, op2_offset
);
4495 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
4497 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
4499 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
4500 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4501 gen_ldq_env_A0(s
->mem_index
, op2_offset
);
4504 val
= cpu_ldub_code(env
, s
->pc
++);
4506 if ((b
& 0xfc) == 0x60) { /* pcmpXstrX */
4507 set_cc_op(s
, CC_OP_EFLAGS
);
4510 /* The helper must use entire 64-bit gp registers */
4514 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4515 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4516 sse_fn_eppi(cpu_env
, cpu_ptr0
, cpu_ptr1
, tcg_const_i32(val
));
4520 /* Various integer extensions at 0f 3a f[0-f]. */
4521 b
= modrm
| (b1
<< 8);
4522 modrm
= cpu_ldub_code(env
, s
->pc
++);
4523 reg
= ((modrm
>> 3) & 7) | rex_r
;
4526 case 0x3f0: /* rorx Gy,Ey, Ib */
4527 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
4528 || !(s
->prefix
& PREFIX_VEX
)
4532 ot
= s
->dflag
== 2 ? OT_QUAD
: OT_LONG
;
4533 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4534 b
= cpu_ldub_code(env
, s
->pc
++);
4535 if (ot
== OT_QUAD
) {
4536 tcg_gen_rotri_tl(cpu_T
[0], cpu_T
[0], b
& 63);
4538 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
4539 tcg_gen_rotri_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, b
& 31);
4540 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
4542 gen_op_mov_reg_T0(ot
, reg
);
4554 /* generic MMX or SSE operation */
4556 case 0x70: /* pshufx insn */
4557 case 0xc6: /* pshufx insn */
4558 case 0xc2: /* compare insns */
4565 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
4567 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4568 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
4569 if (b1
>= 2 && ((b
>= 0x50 && b
<= 0x5f && b
!= 0x5b) ||
4571 /* specific case for SSE single instructions */
4574 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
4575 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_t0
.XMM_L(0)));
4578 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_t0
.XMM_D(0)));
4581 gen_ldo_env_A0(s
->mem_index
, op2_offset
);
4584 rm
= (modrm
& 7) | REX_B(s
);
4585 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
4588 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
4590 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4591 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
4592 gen_ldq_env_A0(s
->mem_index
, op2_offset
);
4595 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
4599 case 0x0f: /* 3DNow! data insns */
4600 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_3DNOW
))
4602 val
= cpu_ldub_code(env
, s
->pc
++);
4603 sse_fn_epp
= sse_op_table5
[val
];
4607 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4608 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4609 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
4611 case 0x70: /* pshufx insn */
4612 case 0xc6: /* pshufx insn */
4613 val
= cpu_ldub_code(env
, s
->pc
++);
4614 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4615 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4616 /* XXX: introduce a new table? */
4617 sse_fn_ppi
= (SSEFunc_0_ppi
)sse_fn_epp
;
4618 sse_fn_ppi(cpu_ptr0
, cpu_ptr1
, tcg_const_i32(val
));
4622 val
= cpu_ldub_code(env
, s
->pc
++);
4625 sse_fn_epp
= sse_op_table4
[val
][b1
];
4627 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4628 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4629 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
4632 /* maskmov : we must prepare A0 */
4635 #ifdef TARGET_X86_64
4636 if (s
->aflag
== 2) {
4637 gen_op_movq_A0_reg(R_EDI
);
4641 gen_op_movl_A0_reg(R_EDI
);
4643 gen_op_andl_A0_ffff();
4645 gen_add_A0_ds_seg(s
);
4647 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4648 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4649 /* XXX: introduce a new table? */
4650 sse_fn_eppt
= (SSEFunc_0_eppt
)sse_fn_epp
;
4651 sse_fn_eppt(cpu_env
, cpu_ptr0
, cpu_ptr1
, cpu_A0
);
4654 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4655 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4656 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
4659 if (b
== 0x2e || b
== 0x2f) {
4660 set_cc_op(s
, CC_OP_EFLAGS
);
4665 /* convert one instruction. s->is_jmp is set if the translation must
4666 be stopped. Return the next pc value */
4667 static target_ulong
disas_insn(CPUX86State
*env
, DisasContext
*s
,
4668 target_ulong pc_start
)
4670 int b
, prefixes
, aflag
, dflag
;
4672 int modrm
, reg
, rm
, mod
, reg_addr
, op
, opreg
, offset_addr
, val
;
4673 target_ulong next_eip
, tval
;
4676 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
4677 tcg_gen_debug_insn_start(pc_start
);
4686 #ifdef TARGET_X86_64
4691 s
->rip_offset
= 0; /* for relative ip address */
4695 b
= cpu_ldub_code(env
, s
->pc
);
4697 /* Collect prefixes. */
4700 prefixes
|= PREFIX_REPZ
;
4703 prefixes
|= PREFIX_REPNZ
;
4706 prefixes
|= PREFIX_LOCK
;
4727 prefixes
|= PREFIX_DATA
;
4730 prefixes
|= PREFIX_ADR
;
4732 #ifdef TARGET_X86_64
4736 rex_w
= (b
>> 3) & 1;
4737 rex_r
= (b
& 0x4) << 1;
4738 s
->rex_x
= (b
& 0x2) << 2;
4739 REX_B(s
) = (b
& 0x1) << 3;
4740 x86_64_hregs
= 1; /* select uniform byte register addressing */
4745 case 0xc5: /* 2-byte VEX */
4746 case 0xc4: /* 3-byte VEX */
4747 /* VEX prefixes cannot be used except in 32-bit mode.
4748 Otherwise the instruction is LES or LDS. */
4749 if (s
->code32
&& !s
->vm86
) {
4750 static const int pp_prefix
[4] = {
4751 0, PREFIX_DATA
, PREFIX_REPZ
, PREFIX_REPNZ
4753 int vex3
, vex2
= cpu_ldub_code(env
, s
->pc
);
4755 if (!CODE64(s
) && (vex2
& 0xc0) != 0xc0) {
4756 /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
4757 otherwise the instruction is LES or LDS. */
4762 /* 4.1.1-4.1.3: No preceeding lock, 66, f2, f3, or rex prefixes. */
4763 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
4764 | PREFIX_LOCK
| PREFIX_DATA
)) {
4767 #ifdef TARGET_X86_64
4772 rex_r
= (~vex2
>> 4) & 8;
4775 b
= cpu_ldub_code(env
, s
->pc
++);
4777 #ifdef TARGET_X86_64
4778 s
->rex_x
= (~vex2
>> 3) & 8;
4779 s
->rex_b
= (~vex2
>> 2) & 8;
4781 vex3
= cpu_ldub_code(env
, s
->pc
++);
4782 rex_w
= (vex3
>> 7) & 1;
4783 switch (vex2
& 0x1f) {
4784 case 0x01: /* Implied 0f leading opcode bytes. */
4785 b
= cpu_ldub_code(env
, s
->pc
++) | 0x100;
4787 case 0x02: /* Implied 0f 38 leading opcode bytes. */
4790 case 0x03: /* Implied 0f 3a leading opcode bytes. */
4793 default: /* Reserved for future use. */
4797 s
->vex_v
= (~vex3
>> 3) & 0xf;
4798 s
->vex_l
= (vex3
>> 2) & 1;
4799 prefixes
|= pp_prefix
[vex3
& 3] | PREFIX_VEX
;
4804 /* Post-process prefixes. */
4805 if (prefixes
& PREFIX_DATA
) {
4808 if (prefixes
& PREFIX_ADR
) {
4811 #ifdef TARGET_X86_64
4814 /* 0x66 is ignored if rex.w is set */
4817 if (!(prefixes
& PREFIX_ADR
)) {
4823 s
->prefix
= prefixes
;
4827 /* lock generation */
4828 if (prefixes
& PREFIX_LOCK
)
4831 /* now check op code */
4835 /**************************/
4836 /* extended op code */
4837 b
= cpu_ldub_code(env
, s
->pc
++) | 0x100;
4840 /**************************/
4858 ot
= dflag
+ OT_WORD
;
4861 case 0: /* OP Ev, Gv */
4862 modrm
= cpu_ldub_code(env
, s
->pc
++);
4863 reg
= ((modrm
>> 3) & 7) | rex_r
;
4864 mod
= (modrm
>> 6) & 3;
4865 rm
= (modrm
& 7) | REX_B(s
);
4867 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4869 } else if (op
== OP_XORL
&& rm
== reg
) {
4871 /* xor reg, reg optimisation */
4872 set_cc_op(s
, CC_OP_CLR
);
4874 gen_op_mov_reg_T0(ot
, reg
);
4879 gen_op_mov_TN_reg(ot
, 1, reg
);
4880 gen_op(s
, op
, ot
, opreg
);
4882 case 1: /* OP Gv, Ev */
4883 modrm
= cpu_ldub_code(env
, s
->pc
++);
4884 mod
= (modrm
>> 6) & 3;
4885 reg
= ((modrm
>> 3) & 7) | rex_r
;
4886 rm
= (modrm
& 7) | REX_B(s
);
4888 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4889 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
4890 } else if (op
== OP_XORL
&& rm
== reg
) {
4893 gen_op_mov_TN_reg(ot
, 1, rm
);
4895 gen_op(s
, op
, ot
, reg
);
4897 case 2: /* OP A, Iv */
4898 val
= insn_get(env
, s
, ot
);
4899 gen_op_movl_T1_im(val
);
4900 gen_op(s
, op
, ot
, OR_EAX
);
4909 case 0x80: /* GRP1 */
4918 ot
= dflag
+ OT_WORD
;
4920 modrm
= cpu_ldub_code(env
, s
->pc
++);
4921 mod
= (modrm
>> 6) & 3;
4922 rm
= (modrm
& 7) | REX_B(s
);
4923 op
= (modrm
>> 3) & 7;
4929 s
->rip_offset
= insn_const_size(ot
);
4930 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4941 val
= insn_get(env
, s
, ot
);
4944 val
= (int8_t)insn_get(env
, s
, OT_BYTE
);
4947 gen_op_movl_T1_im(val
);
4948 gen_op(s
, op
, ot
, opreg
);
4952 /**************************/
4953 /* inc, dec, and other misc arith */
4954 case 0x40 ... 0x47: /* inc Gv */
4955 ot
= dflag
? OT_LONG
: OT_WORD
;
4956 gen_inc(s
, ot
, OR_EAX
+ (b
& 7), 1);
4958 case 0x48 ... 0x4f: /* dec Gv */
4959 ot
= dflag
? OT_LONG
: OT_WORD
;
4960 gen_inc(s
, ot
, OR_EAX
+ (b
& 7), -1);
4962 case 0xf6: /* GRP3 */
4967 ot
= dflag
+ OT_WORD
;
4969 modrm
= cpu_ldub_code(env
, s
->pc
++);
4970 mod
= (modrm
>> 6) & 3;
4971 rm
= (modrm
& 7) | REX_B(s
);
4972 op
= (modrm
>> 3) & 7;
4975 s
->rip_offset
= insn_const_size(ot
);
4976 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4977 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
4979 gen_op_mov_TN_reg(ot
, 0, rm
);
4984 val
= insn_get(env
, s
, ot
);
4985 gen_op_movl_T1_im(val
);
4986 gen_op_testl_T0_T1_cc();
4987 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
4990 tcg_gen_not_tl(cpu_T
[0], cpu_T
[0]);
4992 gen_op_st_T0_A0(ot
+ s
->mem_index
);
4994 gen_op_mov_reg_T0(ot
, rm
);
4998 tcg_gen_neg_tl(cpu_T
[0], cpu_T
[0]);
5000 gen_op_st_T0_A0(ot
+ s
->mem_index
);
5002 gen_op_mov_reg_T0(ot
, rm
);
5004 gen_op_update_neg_cc();
5005 set_cc_op(s
, CC_OP_SUBB
+ ot
);
5010 gen_op_mov_TN_reg(OT_BYTE
, 1, R_EAX
);
5011 tcg_gen_ext8u_tl(cpu_T
[0], cpu_T
[0]);
5012 tcg_gen_ext8u_tl(cpu_T
[1], cpu_T
[1]);
5013 /* XXX: use 32 bit mul which could be faster */
5014 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
5015 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
5016 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
5017 tcg_gen_andi_tl(cpu_cc_src
, cpu_T
[0], 0xff00);
5018 set_cc_op(s
, CC_OP_MULB
);
5021 gen_op_mov_TN_reg(OT_WORD
, 1, R_EAX
);
5022 tcg_gen_ext16u_tl(cpu_T
[0], cpu_T
[0]);
5023 tcg_gen_ext16u_tl(cpu_T
[1], cpu_T
[1]);
5024 /* XXX: use 32 bit mul which could be faster */
5025 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
5026 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
5027 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
5028 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 16);
5029 gen_op_mov_reg_T0(OT_WORD
, R_EDX
);
5030 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[0]);
5031 set_cc_op(s
, CC_OP_MULW
);
5035 #ifdef TARGET_X86_64
5036 gen_op_mov_TN_reg(OT_LONG
, 1, R_EAX
);
5037 tcg_gen_ext32u_tl(cpu_T
[0], cpu_T
[0]);
5038 tcg_gen_ext32u_tl(cpu_T
[1], cpu_T
[1]);
5039 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
5040 gen_op_mov_reg_T0(OT_LONG
, R_EAX
);
5041 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
5042 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 32);
5043 gen_op_mov_reg_T0(OT_LONG
, R_EDX
);
5044 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[0]);
5048 t0
= tcg_temp_new_i64();
5049 t1
= tcg_temp_new_i64();
5050 gen_op_mov_TN_reg(OT_LONG
, 1, R_EAX
);
5051 tcg_gen_extu_i32_i64(t0
, cpu_T
[0]);
5052 tcg_gen_extu_i32_i64(t1
, cpu_T
[1]);
5053 tcg_gen_mul_i64(t0
, t0
, t1
);
5054 tcg_gen_trunc_i64_i32(cpu_T
[0], t0
);
5055 gen_op_mov_reg_T0(OT_LONG
, R_EAX
);
5056 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
5057 tcg_gen_shri_i64(t0
, t0
, 32);
5058 tcg_gen_trunc_i64_i32(cpu_T
[0], t0
);
5059 gen_op_mov_reg_T0(OT_LONG
, R_EDX
);
5060 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[0]);
5063 set_cc_op(s
, CC_OP_MULL
);
5065 #ifdef TARGET_X86_64
5067 gen_helper_mulq_EAX_T0(cpu_env
, cpu_T
[0]);
5068 set_cc_op(s
, CC_OP_MULQ
);
5076 gen_op_mov_TN_reg(OT_BYTE
, 1, R_EAX
);
5077 tcg_gen_ext8s_tl(cpu_T
[0], cpu_T
[0]);
5078 tcg_gen_ext8s_tl(cpu_T
[1], cpu_T
[1]);
5079 /* XXX: use 32 bit mul which could be faster */
5080 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
5081 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
5082 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
5083 tcg_gen_ext8s_tl(cpu_tmp0
, cpu_T
[0]);
5084 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
5085 set_cc_op(s
, CC_OP_MULB
);
5088 gen_op_mov_TN_reg(OT_WORD
, 1, R_EAX
);
5089 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
5090 tcg_gen_ext16s_tl(cpu_T
[1], cpu_T
[1]);
5091 /* XXX: use 32 bit mul which could be faster */
5092 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
5093 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
5094 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
5095 tcg_gen_ext16s_tl(cpu_tmp0
, cpu_T
[0]);
5096 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
5097 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 16);
5098 gen_op_mov_reg_T0(OT_WORD
, R_EDX
);
5099 set_cc_op(s
, CC_OP_MULW
);
5103 #ifdef TARGET_X86_64
5104 gen_op_mov_TN_reg(OT_LONG
, 1, R_EAX
);
5105 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
5106 tcg_gen_ext32s_tl(cpu_T
[1], cpu_T
[1]);
5107 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
5108 gen_op_mov_reg_T0(OT_LONG
, R_EAX
);
5109 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
5110 tcg_gen_ext32s_tl(cpu_tmp0
, cpu_T
[0]);
5111 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
5112 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 32);
5113 gen_op_mov_reg_T0(OT_LONG
, R_EDX
);
5117 t0
= tcg_temp_new_i64();
5118 t1
= tcg_temp_new_i64();
5119 gen_op_mov_TN_reg(OT_LONG
, 1, R_EAX
);
5120 tcg_gen_ext_i32_i64(t0
, cpu_T
[0]);
5121 tcg_gen_ext_i32_i64(t1
, cpu_T
[1]);
5122 tcg_gen_mul_i64(t0
, t0
, t1
);
5123 tcg_gen_trunc_i64_i32(cpu_T
[0], t0
);
5124 gen_op_mov_reg_T0(OT_LONG
, R_EAX
);
5125 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
5126 tcg_gen_sari_tl(cpu_tmp0
, cpu_T
[0], 31);
5127 tcg_gen_shri_i64(t0
, t0
, 32);
5128 tcg_gen_trunc_i64_i32(cpu_T
[0], t0
);
5129 gen_op_mov_reg_T0(OT_LONG
, R_EDX
);
5130 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
5133 set_cc_op(s
, CC_OP_MULL
);
5135 #ifdef TARGET_X86_64
5137 gen_helper_imulq_EAX_T0(cpu_env
, cpu_T
[0]);
5138 set_cc_op(s
, CC_OP_MULQ
);
5146 gen_jmp_im(pc_start
- s
->cs_base
);
5147 gen_helper_divb_AL(cpu_env
, cpu_T
[0]);
5150 gen_jmp_im(pc_start
- s
->cs_base
);
5151 gen_helper_divw_AX(cpu_env
, cpu_T
[0]);
5155 gen_jmp_im(pc_start
- s
->cs_base
);
5156 gen_helper_divl_EAX(cpu_env
, cpu_T
[0]);
5158 #ifdef TARGET_X86_64
5160 gen_jmp_im(pc_start
- s
->cs_base
);
5161 gen_helper_divq_EAX(cpu_env
, cpu_T
[0]);
5169 gen_jmp_im(pc_start
- s
->cs_base
);
5170 gen_helper_idivb_AL(cpu_env
, cpu_T
[0]);
5173 gen_jmp_im(pc_start
- s
->cs_base
);
5174 gen_helper_idivw_AX(cpu_env
, cpu_T
[0]);
5178 gen_jmp_im(pc_start
- s
->cs_base
);
5179 gen_helper_idivl_EAX(cpu_env
, cpu_T
[0]);
5181 #ifdef TARGET_X86_64
5183 gen_jmp_im(pc_start
- s
->cs_base
);
5184 gen_helper_idivq_EAX(cpu_env
, cpu_T
[0]);
5194 case 0xfe: /* GRP4 */
5195 case 0xff: /* GRP5 */
5199 ot
= dflag
+ OT_WORD
;
5201 modrm
= cpu_ldub_code(env
, s
->pc
++);
5202 mod
= (modrm
>> 6) & 3;
5203 rm
= (modrm
& 7) | REX_B(s
);
5204 op
= (modrm
>> 3) & 7;
5205 if (op
>= 2 && b
== 0xfe) {
5209 if (op
== 2 || op
== 4) {
5210 /* operand size for jumps is 64 bit */
5212 } else if (op
== 3 || op
== 5) {
5213 ot
= dflag
? OT_LONG
+ (rex_w
== 1) : OT_WORD
;
5214 } else if (op
== 6) {
5215 /* default push size is 64 bit */
5216 ot
= dflag
? OT_QUAD
: OT_WORD
;
5220 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5221 if (op
>= 2 && op
!= 3 && op
!= 5)
5222 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
5224 gen_op_mov_TN_reg(ot
, 0, rm
);
5228 case 0: /* inc Ev */
5233 gen_inc(s
, ot
, opreg
, 1);
5235 case 1: /* dec Ev */
5240 gen_inc(s
, ot
, opreg
, -1);
5242 case 2: /* call Ev */
5243 /* XXX: optimize if memory (no 'and' is necessary) */
5245 gen_op_andl_T0_ffff();
5246 next_eip
= s
->pc
- s
->cs_base
;
5247 gen_movtl_T1_im(next_eip
);
5252 case 3: /* lcall Ev */
5253 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
5254 gen_add_A0_im(s
, 1 << (ot
- OT_WORD
+ 1));
5255 gen_op_ldu_T0_A0(OT_WORD
+ s
->mem_index
);
5257 if (s
->pe
&& !s
->vm86
) {
5258 gen_update_cc_op(s
);
5259 gen_jmp_im(pc_start
- s
->cs_base
);
5260 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5261 gen_helper_lcall_protected(cpu_env
, cpu_tmp2_i32
, cpu_T
[1],
5262 tcg_const_i32(dflag
),
5263 tcg_const_i32(s
->pc
- pc_start
));
5265 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5266 gen_helper_lcall_real(cpu_env
, cpu_tmp2_i32
, cpu_T
[1],
5267 tcg_const_i32(dflag
),
5268 tcg_const_i32(s
->pc
- s
->cs_base
));
5272 case 4: /* jmp Ev */
5274 gen_op_andl_T0_ffff();
5278 case 5: /* ljmp Ev */
5279 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
5280 gen_add_A0_im(s
, 1 << (ot
- OT_WORD
+ 1));
5281 gen_op_ldu_T0_A0(OT_WORD
+ s
->mem_index
);
5283 if (s
->pe
&& !s
->vm86
) {
5284 gen_update_cc_op(s
);
5285 gen_jmp_im(pc_start
- s
->cs_base
);
5286 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5287 gen_helper_ljmp_protected(cpu_env
, cpu_tmp2_i32
, cpu_T
[1],
5288 tcg_const_i32(s
->pc
- pc_start
));
5290 gen_op_movl_seg_T0_vm(R_CS
);
5291 gen_op_movl_T0_T1();
5296 case 6: /* push Ev */
5304 case 0x84: /* test Ev, Gv */
5309 ot
= dflag
+ OT_WORD
;
5311 modrm
= cpu_ldub_code(env
, s
->pc
++);
5312 reg
= ((modrm
>> 3) & 7) | rex_r
;
5314 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
5315 gen_op_mov_TN_reg(ot
, 1, reg
);
5316 gen_op_testl_T0_T1_cc();
5317 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
5320 case 0xa8: /* test eAX, Iv */
5325 ot
= dflag
+ OT_WORD
;
5326 val
= insn_get(env
, s
, ot
);
5328 gen_op_mov_TN_reg(ot
, 0, OR_EAX
);
5329 gen_op_movl_T1_im(val
);
5330 gen_op_testl_T0_T1_cc();
5331 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
5334 case 0x98: /* CWDE/CBW */
5335 #ifdef TARGET_X86_64
5337 gen_op_mov_TN_reg(OT_LONG
, 0, R_EAX
);
5338 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
5339 gen_op_mov_reg_T0(OT_QUAD
, R_EAX
);
5343 gen_op_mov_TN_reg(OT_WORD
, 0, R_EAX
);
5344 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
5345 gen_op_mov_reg_T0(OT_LONG
, R_EAX
);
5347 gen_op_mov_TN_reg(OT_BYTE
, 0, R_EAX
);
5348 tcg_gen_ext8s_tl(cpu_T
[0], cpu_T
[0]);
5349 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
5352 case 0x99: /* CDQ/CWD */
5353 #ifdef TARGET_X86_64
5355 gen_op_mov_TN_reg(OT_QUAD
, 0, R_EAX
);
5356 tcg_gen_sari_tl(cpu_T
[0], cpu_T
[0], 63);
5357 gen_op_mov_reg_T0(OT_QUAD
, R_EDX
);
5361 gen_op_mov_TN_reg(OT_LONG
, 0, R_EAX
);
5362 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
5363 tcg_gen_sari_tl(cpu_T
[0], cpu_T
[0], 31);
5364 gen_op_mov_reg_T0(OT_LONG
, R_EDX
);
5366 gen_op_mov_TN_reg(OT_WORD
, 0, R_EAX
);
5367 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
5368 tcg_gen_sari_tl(cpu_T
[0], cpu_T
[0], 15);
5369 gen_op_mov_reg_T0(OT_WORD
, R_EDX
);
5372 case 0x1af: /* imul Gv, Ev */
5373 case 0x69: /* imul Gv, Ev, I */
5375 ot
= dflag
+ OT_WORD
;
5376 modrm
= cpu_ldub_code(env
, s
->pc
++);
5377 reg
= ((modrm
>> 3) & 7) | rex_r
;
5379 s
->rip_offset
= insn_const_size(ot
);
5382 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
5384 val
= insn_get(env
, s
, ot
);
5385 gen_op_movl_T1_im(val
);
5386 } else if (b
== 0x6b) {
5387 val
= (int8_t)insn_get(env
, s
, OT_BYTE
);
5388 gen_op_movl_T1_im(val
);
5390 gen_op_mov_TN_reg(ot
, 1, reg
);
5393 #ifdef TARGET_X86_64
5394 if (ot
== OT_QUAD
) {
5395 gen_helper_imulq_T0_T1(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
5398 if (ot
== OT_LONG
) {
5399 #ifdef TARGET_X86_64
5400 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
5401 tcg_gen_ext32s_tl(cpu_T
[1], cpu_T
[1]);
5402 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
5403 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
5404 tcg_gen_ext32s_tl(cpu_tmp0
, cpu_T
[0]);
5405 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
5409 t0
= tcg_temp_new_i64();
5410 t1
= tcg_temp_new_i64();
5411 tcg_gen_ext_i32_i64(t0
, cpu_T
[0]);
5412 tcg_gen_ext_i32_i64(t1
, cpu_T
[1]);
5413 tcg_gen_mul_i64(t0
, t0
, t1
);
5414 tcg_gen_trunc_i64_i32(cpu_T
[0], t0
);
5415 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
5416 tcg_gen_sari_tl(cpu_tmp0
, cpu_T
[0], 31);
5417 tcg_gen_shri_i64(t0
, t0
, 32);
5418 tcg_gen_trunc_i64_i32(cpu_T
[1], t0
);
5419 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[1], cpu_tmp0
);
5423 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
5424 tcg_gen_ext16s_tl(cpu_T
[1], cpu_T
[1]);
5425 /* XXX: use 32 bit mul which could be faster */
5426 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
5427 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
5428 tcg_gen_ext16s_tl(cpu_tmp0
, cpu_T
[0]);
5429 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
5431 gen_op_mov_reg_T0(ot
, reg
);
5432 set_cc_op(s
, CC_OP_MULB
+ ot
);
5435 case 0x1c1: /* xadd Ev, Gv */
5439 ot
= dflag
+ OT_WORD
;
5440 modrm
= cpu_ldub_code(env
, s
->pc
++);
5441 reg
= ((modrm
>> 3) & 7) | rex_r
;
5442 mod
= (modrm
>> 6) & 3;
5444 rm
= (modrm
& 7) | REX_B(s
);
5445 gen_op_mov_TN_reg(ot
, 0, reg
);
5446 gen_op_mov_TN_reg(ot
, 1, rm
);
5447 gen_op_addl_T0_T1();
5448 gen_op_mov_reg_T1(ot
, reg
);
5449 gen_op_mov_reg_T0(ot
, rm
);
5451 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5452 gen_op_mov_TN_reg(ot
, 0, reg
);
5453 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
5454 gen_op_addl_T0_T1();
5455 gen_op_st_T0_A0(ot
+ s
->mem_index
);
5456 gen_op_mov_reg_T1(ot
, reg
);
5458 gen_op_update2_cc();
5459 set_cc_op(s
, CC_OP_ADDB
+ ot
);
5462 case 0x1b1: /* cmpxchg Ev, Gv */
5465 TCGv t0
, t1
, t2
, a0
;
5470 ot
= dflag
+ OT_WORD
;
5471 modrm
= cpu_ldub_code(env
, s
->pc
++);
5472 reg
= ((modrm
>> 3) & 7) | rex_r
;
5473 mod
= (modrm
>> 6) & 3;
5474 t0
= tcg_temp_local_new();
5475 t1
= tcg_temp_local_new();
5476 t2
= tcg_temp_local_new();
5477 a0
= tcg_temp_local_new();
5478 gen_op_mov_v_reg(ot
, t1
, reg
);
5480 rm
= (modrm
& 7) | REX_B(s
);
5481 gen_op_mov_v_reg(ot
, t0
, rm
);
5483 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5484 tcg_gen_mov_tl(a0
, cpu_A0
);
5485 gen_op_ld_v(ot
+ s
->mem_index
, t0
, a0
);
5486 rm
= 0; /* avoid warning */
5488 label1
= gen_new_label();
5489 tcg_gen_mov_tl(t2
, cpu_regs
[R_EAX
]);
5492 tcg_gen_brcond_tl(TCG_COND_EQ
, t2
, t0
, label1
);
5493 label2
= gen_new_label();
5495 gen_op_mov_reg_v(ot
, R_EAX
, t0
);
5497 gen_set_label(label1
);
5498 gen_op_mov_reg_v(ot
, rm
, t1
);
5500 /* perform no-op store cycle like physical cpu; must be
5501 before changing accumulator to ensure idempotency if
5502 the store faults and the instruction is restarted */
5503 gen_op_st_v(ot
+ s
->mem_index
, t0
, a0
);
5504 gen_op_mov_reg_v(ot
, R_EAX
, t0
);
5506 gen_set_label(label1
);
5507 gen_op_st_v(ot
+ s
->mem_index
, t1
, a0
);
5509 gen_set_label(label2
);
5510 tcg_gen_mov_tl(cpu_cc_src
, t0
);
5511 tcg_gen_mov_tl(cpu_cc_srcT
, t2
);
5512 tcg_gen_sub_tl(cpu_cc_dst
, t2
, t0
);
5513 set_cc_op(s
, CC_OP_SUBB
+ ot
);
5520 case 0x1c7: /* cmpxchg8b */
5521 modrm
= cpu_ldub_code(env
, s
->pc
++);
5522 mod
= (modrm
>> 6) & 3;
5523 if ((mod
== 3) || ((modrm
& 0x38) != 0x8))
5525 #ifdef TARGET_X86_64
5527 if (!(s
->cpuid_ext_features
& CPUID_EXT_CX16
))
5529 gen_jmp_im(pc_start
- s
->cs_base
);
5530 gen_update_cc_op(s
);
5531 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5532 gen_helper_cmpxchg16b(cpu_env
, cpu_A0
);
5536 if (!(s
->cpuid_features
& CPUID_CX8
))
5538 gen_jmp_im(pc_start
- s
->cs_base
);
5539 gen_update_cc_op(s
);
5540 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5541 gen_helper_cmpxchg8b(cpu_env
, cpu_A0
);
5543 set_cc_op(s
, CC_OP_EFLAGS
);
5546 /**************************/
5548 case 0x50 ... 0x57: /* push */
5549 gen_op_mov_TN_reg(OT_LONG
, 0, (b
& 7) | REX_B(s
));
5552 case 0x58 ... 0x5f: /* pop */
5554 ot
= dflag
? OT_QUAD
: OT_WORD
;
5556 ot
= dflag
+ OT_WORD
;
5559 /* NOTE: order is important for pop %sp */
5561 gen_op_mov_reg_T0(ot
, (b
& 7) | REX_B(s
));
5563 case 0x60: /* pusha */
5568 case 0x61: /* popa */
5573 case 0x68: /* push Iv */
5576 ot
= dflag
? OT_QUAD
: OT_WORD
;
5578 ot
= dflag
+ OT_WORD
;
5581 val
= insn_get(env
, s
, ot
);
5583 val
= (int8_t)insn_get(env
, s
, OT_BYTE
);
5584 gen_op_movl_T0_im(val
);
5587 case 0x8f: /* pop Ev */
5589 ot
= dflag
? OT_QUAD
: OT_WORD
;
5591 ot
= dflag
+ OT_WORD
;
5593 modrm
= cpu_ldub_code(env
, s
->pc
++);
5594 mod
= (modrm
>> 6) & 3;
5597 /* NOTE: order is important for pop %sp */
5599 rm
= (modrm
& 7) | REX_B(s
);
5600 gen_op_mov_reg_T0(ot
, rm
);
5602 /* NOTE: order is important too for MMU exceptions */
5603 s
->popl_esp_hack
= 1 << ot
;
5604 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
5605 s
->popl_esp_hack
= 0;
5609 case 0xc8: /* enter */
5612 val
= cpu_lduw_code(env
, s
->pc
);
5614 level
= cpu_ldub_code(env
, s
->pc
++);
5615 gen_enter(s
, val
, level
);
5618 case 0xc9: /* leave */
5619 /* XXX: exception not precise (ESP is updated before potential exception) */
5621 gen_op_mov_TN_reg(OT_QUAD
, 0, R_EBP
);
5622 gen_op_mov_reg_T0(OT_QUAD
, R_ESP
);
5623 } else if (s
->ss32
) {
5624 gen_op_mov_TN_reg(OT_LONG
, 0, R_EBP
);
5625 gen_op_mov_reg_T0(OT_LONG
, R_ESP
);
5627 gen_op_mov_TN_reg(OT_WORD
, 0, R_EBP
);
5628 gen_op_mov_reg_T0(OT_WORD
, R_ESP
);
5632 ot
= dflag
? OT_QUAD
: OT_WORD
;
5634 ot
= dflag
+ OT_WORD
;
5636 gen_op_mov_reg_T0(ot
, R_EBP
);
5639 case 0x06: /* push es */
5640 case 0x0e: /* push cs */
5641 case 0x16: /* push ss */
5642 case 0x1e: /* push ds */
5645 gen_op_movl_T0_seg(b
>> 3);
5648 case 0x1a0: /* push fs */
5649 case 0x1a8: /* push gs */
5650 gen_op_movl_T0_seg((b
>> 3) & 7);
5653 case 0x07: /* pop es */
5654 case 0x17: /* pop ss */
5655 case 0x1f: /* pop ds */
5660 gen_movl_seg_T0(s
, reg
, pc_start
- s
->cs_base
);
5663 /* if reg == SS, inhibit interrupts/trace. */
5664 /* If several instructions disable interrupts, only the
5666 if (!(s
->tb
->flags
& HF_INHIBIT_IRQ_MASK
))
5667 gen_helper_set_inhibit_irq(cpu_env
);
5671 gen_jmp_im(s
->pc
- s
->cs_base
);
5675 case 0x1a1: /* pop fs */
5676 case 0x1a9: /* pop gs */
5678 gen_movl_seg_T0(s
, (b
>> 3) & 7, pc_start
- s
->cs_base
);
5681 gen_jmp_im(s
->pc
- s
->cs_base
);
5686 /**************************/
5689 case 0x89: /* mov Gv, Ev */
5693 ot
= dflag
+ OT_WORD
;
5694 modrm
= cpu_ldub_code(env
, s
->pc
++);
5695 reg
= ((modrm
>> 3) & 7) | rex_r
;
5697 /* generate a generic store */
5698 gen_ldst_modrm(env
, s
, modrm
, ot
, reg
, 1);
5701 case 0xc7: /* mov Ev, Iv */
5705 ot
= dflag
+ OT_WORD
;
5706 modrm
= cpu_ldub_code(env
, s
->pc
++);
5707 mod
= (modrm
>> 6) & 3;
5709 s
->rip_offset
= insn_const_size(ot
);
5710 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5712 val
= insn_get(env
, s
, ot
);
5713 gen_op_movl_T0_im(val
);
5715 gen_op_st_T0_A0(ot
+ s
->mem_index
);
5717 gen_op_mov_reg_T0(ot
, (modrm
& 7) | REX_B(s
));
5720 case 0x8b: /* mov Ev, Gv */
5724 ot
= OT_WORD
+ dflag
;
5725 modrm
= cpu_ldub_code(env
, s
->pc
++);
5726 reg
= ((modrm
>> 3) & 7) | rex_r
;
5728 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
5729 gen_op_mov_reg_T0(ot
, reg
);
5731 case 0x8e: /* mov seg, Gv */
5732 modrm
= cpu_ldub_code(env
, s
->pc
++);
5733 reg
= (modrm
>> 3) & 7;
5734 if (reg
>= 6 || reg
== R_CS
)
5736 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 0);
5737 gen_movl_seg_T0(s
, reg
, pc_start
- s
->cs_base
);
5739 /* if reg == SS, inhibit interrupts/trace */
5740 /* If several instructions disable interrupts, only the
5742 if (!(s
->tb
->flags
& HF_INHIBIT_IRQ_MASK
))
5743 gen_helper_set_inhibit_irq(cpu_env
);
5747 gen_jmp_im(s
->pc
- s
->cs_base
);
5751 case 0x8c: /* mov Gv, seg */
5752 modrm
= cpu_ldub_code(env
, s
->pc
++);
5753 reg
= (modrm
>> 3) & 7;
5754 mod
= (modrm
>> 6) & 3;
5757 gen_op_movl_T0_seg(reg
);
5759 ot
= OT_WORD
+ dflag
;
5762 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
5765 case 0x1b6: /* movzbS Gv, Eb */
5766 case 0x1b7: /* movzwS Gv, Eb */
5767 case 0x1be: /* movsbS Gv, Eb */
5768 case 0x1bf: /* movswS Gv, Eb */
5771 /* d_ot is the size of destination */
5772 d_ot
= dflag
+ OT_WORD
;
5773 /* ot is the size of source */
5774 ot
= (b
& 1) + OT_BYTE
;
5775 modrm
= cpu_ldub_code(env
, s
->pc
++);
5776 reg
= ((modrm
>> 3) & 7) | rex_r
;
5777 mod
= (modrm
>> 6) & 3;
5778 rm
= (modrm
& 7) | REX_B(s
);
5781 gen_op_mov_TN_reg(ot
, 0, rm
);
5782 switch(ot
| (b
& 8)) {
5784 tcg_gen_ext8u_tl(cpu_T
[0], cpu_T
[0]);
5787 tcg_gen_ext8s_tl(cpu_T
[0], cpu_T
[0]);
5790 tcg_gen_ext16u_tl(cpu_T
[0], cpu_T
[0]);
5794 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
5797 gen_op_mov_reg_T0(d_ot
, reg
);
5799 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5801 gen_op_lds_T0_A0(ot
+ s
->mem_index
);
5803 gen_op_ldu_T0_A0(ot
+ s
->mem_index
);
5805 gen_op_mov_reg_T0(d_ot
, reg
);
5810 case 0x8d: /* lea */
5811 ot
= dflag
+ OT_WORD
;
5812 modrm
= cpu_ldub_code(env
, s
->pc
++);
5813 mod
= (modrm
>> 6) & 3;
5816 reg
= ((modrm
>> 3) & 7) | rex_r
;
5817 /* we must ensure that no segment is added */
5821 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5823 gen_op_mov_reg_A0(ot
- OT_WORD
, reg
);
5826 case 0xa0: /* mov EAX, Ov */
5828 case 0xa2: /* mov Ov, EAX */
5831 target_ulong offset_addr
;
5836 ot
= dflag
+ OT_WORD
;
5837 #ifdef TARGET_X86_64
5838 if (s
->aflag
== 2) {
5839 offset_addr
= cpu_ldq_code(env
, s
->pc
);
5841 gen_op_movq_A0_im(offset_addr
);
5846 offset_addr
= insn_get(env
, s
, OT_LONG
);
5848 offset_addr
= insn_get(env
, s
, OT_WORD
);
5850 gen_op_movl_A0_im(offset_addr
);
5852 gen_add_A0_ds_seg(s
);
5854 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
5855 gen_op_mov_reg_T0(ot
, R_EAX
);
5857 gen_op_mov_TN_reg(ot
, 0, R_EAX
);
5858 gen_op_st_T0_A0(ot
+ s
->mem_index
);
5862 case 0xd7: /* xlat */
5863 #ifdef TARGET_X86_64
5864 if (s
->aflag
== 2) {
5865 gen_op_movq_A0_reg(R_EBX
);
5866 gen_op_mov_TN_reg(OT_QUAD
, 0, R_EAX
);
5867 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 0xff);
5868 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_T
[0]);
5872 gen_op_movl_A0_reg(R_EBX
);
5873 gen_op_mov_TN_reg(OT_LONG
, 0, R_EAX
);
5874 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 0xff);
5875 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_T
[0]);
5877 gen_op_andl_A0_ffff();
5879 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffffffff);
5881 gen_add_A0_ds_seg(s
);
5882 gen_op_ldu_T0_A0(OT_BYTE
+ s
->mem_index
);
5883 gen_op_mov_reg_T0(OT_BYTE
, R_EAX
);
5885 case 0xb0 ... 0xb7: /* mov R, Ib */
5886 val
= insn_get(env
, s
, OT_BYTE
);
5887 gen_op_movl_T0_im(val
);
5888 gen_op_mov_reg_T0(OT_BYTE
, (b
& 7) | REX_B(s
));
5890 case 0xb8 ... 0xbf: /* mov R, Iv */
5891 #ifdef TARGET_X86_64
5895 tmp
= cpu_ldq_code(env
, s
->pc
);
5897 reg
= (b
& 7) | REX_B(s
);
5898 gen_movtl_T0_im(tmp
);
5899 gen_op_mov_reg_T0(OT_QUAD
, reg
);
5903 ot
= dflag
? OT_LONG
: OT_WORD
;
5904 val
= insn_get(env
, s
, ot
);
5905 reg
= (b
& 7) | REX_B(s
);
5906 gen_op_movl_T0_im(val
);
5907 gen_op_mov_reg_T0(ot
, reg
);
5911 case 0x91 ... 0x97: /* xchg R, EAX */
5913 ot
= dflag
+ OT_WORD
;
5914 reg
= (b
& 7) | REX_B(s
);
5918 case 0x87: /* xchg Ev, Gv */
5922 ot
= dflag
+ OT_WORD
;
5923 modrm
= cpu_ldub_code(env
, s
->pc
++);
5924 reg
= ((modrm
>> 3) & 7) | rex_r
;
5925 mod
= (modrm
>> 6) & 3;
5927 rm
= (modrm
& 7) | REX_B(s
);
5929 gen_op_mov_TN_reg(ot
, 0, reg
);
5930 gen_op_mov_TN_reg(ot
, 1, rm
);
5931 gen_op_mov_reg_T0(ot
, rm
);
5932 gen_op_mov_reg_T1(ot
, reg
);
5934 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5935 gen_op_mov_TN_reg(ot
, 0, reg
);
5936 /* for xchg, lock is implicit */
5937 if (!(prefixes
& PREFIX_LOCK
))
5939 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
5940 gen_op_st_T0_A0(ot
+ s
->mem_index
);
5941 if (!(prefixes
& PREFIX_LOCK
))
5942 gen_helper_unlock();
5943 gen_op_mov_reg_T1(ot
, reg
);
5946 case 0xc4: /* les Gv */
5947 /* In CODE64 this is VEX3; see above. */
5950 case 0xc5: /* lds Gv */
5951 /* In CODE64 this is VEX2; see above. */
5954 case 0x1b2: /* lss Gv */
5957 case 0x1b4: /* lfs Gv */
5960 case 0x1b5: /* lgs Gv */
5963 ot
= dflag
? OT_LONG
: OT_WORD
;
5964 modrm
= cpu_ldub_code(env
, s
->pc
++);
5965 reg
= ((modrm
>> 3) & 7) | rex_r
;
5966 mod
= (modrm
>> 6) & 3;
5969 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5970 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
5971 gen_add_A0_im(s
, 1 << (ot
- OT_WORD
+ 1));
5972 /* load the segment first to handle exceptions properly */
5973 gen_op_ldu_T0_A0(OT_WORD
+ s
->mem_index
);
5974 gen_movl_seg_T0(s
, op
, pc_start
- s
->cs_base
);
5975 /* then put the data */
5976 gen_op_mov_reg_T1(ot
, reg
);
5978 gen_jmp_im(s
->pc
- s
->cs_base
);
5983 /************************/
5994 ot
= dflag
+ OT_WORD
;
5996 modrm
= cpu_ldub_code(env
, s
->pc
++);
5997 mod
= (modrm
>> 6) & 3;
5998 op
= (modrm
>> 3) & 7;
6004 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
6007 opreg
= (modrm
& 7) | REX_B(s
);
6012 gen_shift(s
, op
, ot
, opreg
, OR_ECX
);
6015 shift
= cpu_ldub_code(env
, s
->pc
++);
6017 gen_shifti(s
, op
, ot
, opreg
, shift
);
6032 case 0x1a4: /* shld imm */
6036 case 0x1a5: /* shld cl */
6040 case 0x1ac: /* shrd imm */
6044 case 0x1ad: /* shrd cl */
6048 ot
= dflag
+ OT_WORD
;
6049 modrm
= cpu_ldub_code(env
, s
->pc
++);
6050 mod
= (modrm
>> 6) & 3;
6051 rm
= (modrm
& 7) | REX_B(s
);
6052 reg
= ((modrm
>> 3) & 7) | rex_r
;
6054 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
6059 gen_op_mov_TN_reg(ot
, 1, reg
);
6062 TCGv imm
= tcg_const_tl(cpu_ldub_code(env
, s
->pc
++));
6063 gen_shiftd_rm_T1(s
, ot
, opreg
, op
, imm
);
6066 gen_shiftd_rm_T1(s
, ot
, opreg
, op
, cpu_regs
[R_ECX
]);
6070 /************************/
6073 if (s
->flags
& (HF_EM_MASK
| HF_TS_MASK
)) {
6074 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
6075 /* XXX: what to do if illegal op ? */
6076 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
6079 modrm
= cpu_ldub_code(env
, s
->pc
++);
6080 mod
= (modrm
>> 6) & 3;
6082 op
= ((b
& 7) << 3) | ((modrm
>> 3) & 7);
6085 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
6087 case 0x00 ... 0x07: /* fxxxs */
6088 case 0x10 ... 0x17: /* fixxxl */
6089 case 0x20 ... 0x27: /* fxxxl */
6090 case 0x30 ... 0x37: /* fixxx */
6097 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
6098 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6099 gen_helper_flds_FT0(cpu_env
, cpu_tmp2_i32
);
6102 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
6103 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6104 gen_helper_fildl_FT0(cpu_env
, cpu_tmp2_i32
);
6107 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
,
6108 (s
->mem_index
>> 2) - 1);
6109 gen_helper_fldl_FT0(cpu_env
, cpu_tmp1_i64
);
6113 gen_op_lds_T0_A0(OT_WORD
+ s
->mem_index
);
6114 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6115 gen_helper_fildl_FT0(cpu_env
, cpu_tmp2_i32
);
6119 gen_helper_fp_arith_ST0_FT0(op1
);
6121 /* fcomp needs pop */
6122 gen_helper_fpop(cpu_env
);
6126 case 0x08: /* flds */
6127 case 0x0a: /* fsts */
6128 case 0x0b: /* fstps */
6129 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
6130 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
6131 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
6136 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
6137 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6138 gen_helper_flds_ST0(cpu_env
, cpu_tmp2_i32
);
6141 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
6142 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6143 gen_helper_fildl_ST0(cpu_env
, cpu_tmp2_i32
);
6146 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
,
6147 (s
->mem_index
>> 2) - 1);
6148 gen_helper_fldl_ST0(cpu_env
, cpu_tmp1_i64
);
6152 gen_op_lds_T0_A0(OT_WORD
+ s
->mem_index
);
6153 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6154 gen_helper_fildl_ST0(cpu_env
, cpu_tmp2_i32
);
6159 /* XXX: the corresponding CPUID bit must be tested ! */
6162 gen_helper_fisttl_ST0(cpu_tmp2_i32
, cpu_env
);
6163 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
6164 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
6167 gen_helper_fisttll_ST0(cpu_tmp1_i64
, cpu_env
);
6168 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
,
6169 (s
->mem_index
>> 2) - 1);
6173 gen_helper_fistt_ST0(cpu_tmp2_i32
, cpu_env
);
6174 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
6175 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
6178 gen_helper_fpop(cpu_env
);
6183 gen_helper_fsts_ST0(cpu_tmp2_i32
, cpu_env
);
6184 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
6185 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
6188 gen_helper_fistl_ST0(cpu_tmp2_i32
, cpu_env
);
6189 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
6190 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
6193 gen_helper_fstl_ST0(cpu_tmp1_i64
, cpu_env
);
6194 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
,
6195 (s
->mem_index
>> 2) - 1);
6199 gen_helper_fist_ST0(cpu_tmp2_i32
, cpu_env
);
6200 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
6201 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
6205 gen_helper_fpop(cpu_env
);
6209 case 0x0c: /* fldenv mem */
6210 gen_update_cc_op(s
);
6211 gen_jmp_im(pc_start
- s
->cs_base
);
6212 gen_helper_fldenv(cpu_env
, cpu_A0
, tcg_const_i32(s
->dflag
));
6214 case 0x0d: /* fldcw mem */
6215 gen_op_ld_T0_A0(OT_WORD
+ s
->mem_index
);
6216 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6217 gen_helper_fldcw(cpu_env
, cpu_tmp2_i32
);
6219 case 0x0e: /* fnstenv mem */
6220 gen_update_cc_op(s
);
6221 gen_jmp_im(pc_start
- s
->cs_base
);
6222 gen_helper_fstenv(cpu_env
, cpu_A0
, tcg_const_i32(s
->dflag
));
6224 case 0x0f: /* fnstcw mem */
6225 gen_helper_fnstcw(cpu_tmp2_i32
, cpu_env
);
6226 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
6227 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
6229 case 0x1d: /* fldt mem */
6230 gen_update_cc_op(s
);
6231 gen_jmp_im(pc_start
- s
->cs_base
);
6232 gen_helper_fldt_ST0(cpu_env
, cpu_A0
);
6234 case 0x1f: /* fstpt mem */
6235 gen_update_cc_op(s
);
6236 gen_jmp_im(pc_start
- s
->cs_base
);
6237 gen_helper_fstt_ST0(cpu_env
, cpu_A0
);
6238 gen_helper_fpop(cpu_env
);
6240 case 0x2c: /* frstor mem */
6241 gen_update_cc_op(s
);
6242 gen_jmp_im(pc_start
- s
->cs_base
);
6243 gen_helper_frstor(cpu_env
, cpu_A0
, tcg_const_i32(s
->dflag
));
6245 case 0x2e: /* fnsave mem */
6246 gen_update_cc_op(s
);
6247 gen_jmp_im(pc_start
- s
->cs_base
);
6248 gen_helper_fsave(cpu_env
, cpu_A0
, tcg_const_i32(s
->dflag
));
6250 case 0x2f: /* fnstsw mem */
6251 gen_helper_fnstsw(cpu_tmp2_i32
, cpu_env
);
6252 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
6253 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
6255 case 0x3c: /* fbld */
6256 gen_update_cc_op(s
);
6257 gen_jmp_im(pc_start
- s
->cs_base
);
6258 gen_helper_fbld_ST0(cpu_env
, cpu_A0
);
6260 case 0x3e: /* fbstp */
6261 gen_update_cc_op(s
);
6262 gen_jmp_im(pc_start
- s
->cs_base
);
6263 gen_helper_fbst_ST0(cpu_env
, cpu_A0
);
6264 gen_helper_fpop(cpu_env
);
6266 case 0x3d: /* fildll */
6267 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
,
6268 (s
->mem_index
>> 2) - 1);
6269 gen_helper_fildll_ST0(cpu_env
, cpu_tmp1_i64
);
6271 case 0x3f: /* fistpll */
6272 gen_helper_fistll_ST0(cpu_tmp1_i64
, cpu_env
);
6273 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
,
6274 (s
->mem_index
>> 2) - 1);
6275 gen_helper_fpop(cpu_env
);
6281 /* register float ops */
6285 case 0x08: /* fld sti */
6286 gen_helper_fpush(cpu_env
);
6287 gen_helper_fmov_ST0_STN(cpu_env
,
6288 tcg_const_i32((opreg
+ 1) & 7));
6290 case 0x09: /* fxchg sti */
6291 case 0x29: /* fxchg4 sti, undocumented op */
6292 case 0x39: /* fxchg7 sti, undocumented op */
6293 gen_helper_fxchg_ST0_STN(cpu_env
, tcg_const_i32(opreg
));
6295 case 0x0a: /* grp d9/2 */
6298 /* check exceptions (FreeBSD FPU probe) */
6299 gen_update_cc_op(s
);
6300 gen_jmp_im(pc_start
- s
->cs_base
);
6301 gen_helper_fwait(cpu_env
);
6307 case 0x0c: /* grp d9/4 */
6310 gen_helper_fchs_ST0(cpu_env
);
6313 gen_helper_fabs_ST0(cpu_env
);
6316 gen_helper_fldz_FT0(cpu_env
);
6317 gen_helper_fcom_ST0_FT0(cpu_env
);
6320 gen_helper_fxam_ST0(cpu_env
);
6326 case 0x0d: /* grp d9/5 */
6330 gen_helper_fpush(cpu_env
);
6331 gen_helper_fld1_ST0(cpu_env
);
6334 gen_helper_fpush(cpu_env
);
6335 gen_helper_fldl2t_ST0(cpu_env
);
6338 gen_helper_fpush(cpu_env
);
6339 gen_helper_fldl2e_ST0(cpu_env
);
6342 gen_helper_fpush(cpu_env
);
6343 gen_helper_fldpi_ST0(cpu_env
);
6346 gen_helper_fpush(cpu_env
);
6347 gen_helper_fldlg2_ST0(cpu_env
);
6350 gen_helper_fpush(cpu_env
);
6351 gen_helper_fldln2_ST0(cpu_env
);
6354 gen_helper_fpush(cpu_env
);
6355 gen_helper_fldz_ST0(cpu_env
);
6362 case 0x0e: /* grp d9/6 */
6365 gen_helper_f2xm1(cpu_env
);
6368 gen_helper_fyl2x(cpu_env
);
6371 gen_helper_fptan(cpu_env
);
6373 case 3: /* fpatan */
6374 gen_helper_fpatan(cpu_env
);
6376 case 4: /* fxtract */
6377 gen_helper_fxtract(cpu_env
);
6379 case 5: /* fprem1 */
6380 gen_helper_fprem1(cpu_env
);
6382 case 6: /* fdecstp */
6383 gen_helper_fdecstp(cpu_env
);
6386 case 7: /* fincstp */
6387 gen_helper_fincstp(cpu_env
);
6391 case 0x0f: /* grp d9/7 */
6394 gen_helper_fprem(cpu_env
);
6396 case 1: /* fyl2xp1 */
6397 gen_helper_fyl2xp1(cpu_env
);
6400 gen_helper_fsqrt(cpu_env
);
6402 case 3: /* fsincos */
6403 gen_helper_fsincos(cpu_env
);
6405 case 5: /* fscale */
6406 gen_helper_fscale(cpu_env
);
6408 case 4: /* frndint */
6409 gen_helper_frndint(cpu_env
);
6412 gen_helper_fsin(cpu_env
);
6416 gen_helper_fcos(cpu_env
);
6420 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
6421 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
6422 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
6428 gen_helper_fp_arith_STN_ST0(op1
, opreg
);
6430 gen_helper_fpop(cpu_env
);
6432 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6433 gen_helper_fp_arith_ST0_FT0(op1
);
6437 case 0x02: /* fcom */
6438 case 0x22: /* fcom2, undocumented op */
6439 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6440 gen_helper_fcom_ST0_FT0(cpu_env
);
6442 case 0x03: /* fcomp */
6443 case 0x23: /* fcomp3, undocumented op */
6444 case 0x32: /* fcomp5, undocumented op */
6445 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6446 gen_helper_fcom_ST0_FT0(cpu_env
);
6447 gen_helper_fpop(cpu_env
);
6449 case 0x15: /* da/5 */
6451 case 1: /* fucompp */
6452 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(1));
6453 gen_helper_fucom_ST0_FT0(cpu_env
);
6454 gen_helper_fpop(cpu_env
);
6455 gen_helper_fpop(cpu_env
);
6463 case 0: /* feni (287 only, just do nop here) */
6465 case 1: /* fdisi (287 only, just do nop here) */
6468 gen_helper_fclex(cpu_env
);
6470 case 3: /* fninit */
6471 gen_helper_fninit(cpu_env
);
6473 case 4: /* fsetpm (287 only, just do nop here) */
6479 case 0x1d: /* fucomi */
6480 gen_update_cc_op(s
);
6481 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6482 gen_helper_fucomi_ST0_FT0(cpu_env
);
6483 set_cc_op(s
, CC_OP_EFLAGS
);
6485 case 0x1e: /* fcomi */
6486 gen_update_cc_op(s
);
6487 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6488 gen_helper_fcomi_ST0_FT0(cpu_env
);
6489 set_cc_op(s
, CC_OP_EFLAGS
);
6491 case 0x28: /* ffree sti */
6492 gen_helper_ffree_STN(cpu_env
, tcg_const_i32(opreg
));
6494 case 0x2a: /* fst sti */
6495 gen_helper_fmov_STN_ST0(cpu_env
, tcg_const_i32(opreg
));
6497 case 0x2b: /* fstp sti */
6498 case 0x0b: /* fstp1 sti, undocumented op */
6499 case 0x3a: /* fstp8 sti, undocumented op */
6500 case 0x3b: /* fstp9 sti, undocumented op */
6501 gen_helper_fmov_STN_ST0(cpu_env
, tcg_const_i32(opreg
));
6502 gen_helper_fpop(cpu_env
);
6504 case 0x2c: /* fucom st(i) */
6505 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6506 gen_helper_fucom_ST0_FT0(cpu_env
);
6508 case 0x2d: /* fucomp st(i) */
6509 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6510 gen_helper_fucom_ST0_FT0(cpu_env
);
6511 gen_helper_fpop(cpu_env
);
6513 case 0x33: /* de/3 */
6515 case 1: /* fcompp */
6516 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(1));
6517 gen_helper_fcom_ST0_FT0(cpu_env
);
6518 gen_helper_fpop(cpu_env
);
6519 gen_helper_fpop(cpu_env
);
6525 case 0x38: /* ffreep sti, undocumented op */
6526 gen_helper_ffree_STN(cpu_env
, tcg_const_i32(opreg
));
6527 gen_helper_fpop(cpu_env
);
6529 case 0x3c: /* df/4 */
6532 gen_helper_fnstsw(cpu_tmp2_i32
, cpu_env
);
6533 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
6534 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
6540 case 0x3d: /* fucomip */
6541 gen_update_cc_op(s
);
6542 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6543 gen_helper_fucomi_ST0_FT0(cpu_env
);
6544 gen_helper_fpop(cpu_env
);
6545 set_cc_op(s
, CC_OP_EFLAGS
);
6547 case 0x3e: /* fcomip */
6548 gen_update_cc_op(s
);
6549 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6550 gen_helper_fcomi_ST0_FT0(cpu_env
);
6551 gen_helper_fpop(cpu_env
);
6552 set_cc_op(s
, CC_OP_EFLAGS
);
6554 case 0x10 ... 0x13: /* fcmovxx */
6558 static const uint8_t fcmov_cc
[8] = {
6564 op1
= fcmov_cc
[op
& 3] | (((op
>> 3) & 1) ^ 1);
6565 l1
= gen_new_label();
6566 gen_jcc1_noeob(s
, op1
, l1
);
6567 gen_helper_fmov_ST0_STN(cpu_env
, tcg_const_i32(opreg
));
6576 /************************/
6579 case 0xa4: /* movsS */
6584 ot
= dflag
+ OT_WORD
;
6586 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6587 gen_repz_movs(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6593 case 0xaa: /* stosS */
6598 ot
= dflag
+ OT_WORD
;
6600 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6601 gen_repz_stos(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6606 case 0xac: /* lodsS */
6611 ot
= dflag
+ OT_WORD
;
6612 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6613 gen_repz_lods(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6618 case 0xae: /* scasS */
6623 ot
= dflag
+ OT_WORD
;
6624 if (prefixes
& PREFIX_REPNZ
) {
6625 gen_repz_scas(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 1);
6626 } else if (prefixes
& PREFIX_REPZ
) {
6627 gen_repz_scas(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 0);
6633 case 0xa6: /* cmpsS */
6638 ot
= dflag
+ OT_WORD
;
6639 if (prefixes
& PREFIX_REPNZ
) {
6640 gen_repz_cmps(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 1);
6641 } else if (prefixes
& PREFIX_REPZ
) {
6642 gen_repz_cmps(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 0);
6647 case 0x6c: /* insS */
6652 ot
= dflag
? OT_LONG
: OT_WORD
;
6653 gen_op_mov_TN_reg(OT_WORD
, 0, R_EDX
);
6654 gen_op_andl_T0_ffff();
6655 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6656 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
) | 4);
6657 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6658 gen_repz_ins(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6662 gen_jmp(s
, s
->pc
- s
->cs_base
);
6666 case 0x6e: /* outsS */
6671 ot
= dflag
? OT_LONG
: OT_WORD
;
6672 gen_op_mov_TN_reg(OT_WORD
, 0, R_EDX
);
6673 gen_op_andl_T0_ffff();
6674 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6675 svm_is_rep(prefixes
) | 4);
6676 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6677 gen_repz_outs(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6681 gen_jmp(s
, s
->pc
- s
->cs_base
);
6686 /************************/
6694 ot
= dflag
? OT_LONG
: OT_WORD
;
6695 val
= cpu_ldub_code(env
, s
->pc
++);
6696 gen_op_movl_T0_im(val
);
6697 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6698 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
));
6701 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6702 gen_helper_in_func(ot
, cpu_T
[1], cpu_tmp2_i32
);
6703 gen_op_mov_reg_T1(ot
, R_EAX
);
6706 gen_jmp(s
, s
->pc
- s
->cs_base
);
6714 ot
= dflag
? OT_LONG
: OT_WORD
;
6715 val
= cpu_ldub_code(env
, s
->pc
++);
6716 gen_op_movl_T0_im(val
);
6717 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6718 svm_is_rep(prefixes
));
6719 gen_op_mov_TN_reg(ot
, 1, R_EAX
);
6723 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6724 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T
[1]);
6725 gen_helper_out_func(ot
, cpu_tmp2_i32
, cpu_tmp3_i32
);
6728 gen_jmp(s
, s
->pc
- s
->cs_base
);
6736 ot
= dflag
? OT_LONG
: OT_WORD
;
6737 gen_op_mov_TN_reg(OT_WORD
, 0, R_EDX
);
6738 gen_op_andl_T0_ffff();
6739 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6740 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
));
6743 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6744 gen_helper_in_func(ot
, cpu_T
[1], cpu_tmp2_i32
);
6745 gen_op_mov_reg_T1(ot
, R_EAX
);
6748 gen_jmp(s
, s
->pc
- s
->cs_base
);
6756 ot
= dflag
? OT_LONG
: OT_WORD
;
6757 gen_op_mov_TN_reg(OT_WORD
, 0, R_EDX
);
6758 gen_op_andl_T0_ffff();
6759 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6760 svm_is_rep(prefixes
));
6761 gen_op_mov_TN_reg(ot
, 1, R_EAX
);
6765 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6766 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T
[1]);
6767 gen_helper_out_func(ot
, cpu_tmp2_i32
, cpu_tmp3_i32
);
6770 gen_jmp(s
, s
->pc
- s
->cs_base
);
6774 /************************/
6776 case 0xc2: /* ret im */
6777 val
= cpu_ldsw_code(env
, s
->pc
);
6780 if (CODE64(s
) && s
->dflag
)
6782 gen_stack_update(s
, val
+ (2 << s
->dflag
));
6784 gen_op_andl_T0_ffff();
6788 case 0xc3: /* ret */
6792 gen_op_andl_T0_ffff();
6796 case 0xca: /* lret im */
6797 val
= cpu_ldsw_code(env
, s
->pc
);
6800 if (s
->pe
&& !s
->vm86
) {
6801 gen_update_cc_op(s
);
6802 gen_jmp_im(pc_start
- s
->cs_base
);
6803 gen_helper_lret_protected(cpu_env
, tcg_const_i32(s
->dflag
),
6804 tcg_const_i32(val
));
6808 gen_op_ld_T0_A0(1 + s
->dflag
+ s
->mem_index
);
6810 gen_op_andl_T0_ffff();
6811 /* NOTE: keeping EIP updated is not a problem in case of
6815 gen_op_addl_A0_im(2 << s
->dflag
);
6816 gen_op_ld_T0_A0(1 + s
->dflag
+ s
->mem_index
);
6817 gen_op_movl_seg_T0_vm(R_CS
);
6818 /* add stack offset */
6819 gen_stack_update(s
, val
+ (4 << s
->dflag
));
6823 case 0xcb: /* lret */
6826 case 0xcf: /* iret */
6827 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_IRET
);
6830 gen_helper_iret_real(cpu_env
, tcg_const_i32(s
->dflag
));
6831 set_cc_op(s
, CC_OP_EFLAGS
);
6832 } else if (s
->vm86
) {
6834 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6836 gen_helper_iret_real(cpu_env
, tcg_const_i32(s
->dflag
));
6837 set_cc_op(s
, CC_OP_EFLAGS
);
6840 gen_update_cc_op(s
);
6841 gen_jmp_im(pc_start
- s
->cs_base
);
6842 gen_helper_iret_protected(cpu_env
, tcg_const_i32(s
->dflag
),
6843 tcg_const_i32(s
->pc
- s
->cs_base
));
6844 set_cc_op(s
, CC_OP_EFLAGS
);
6848 case 0xe8: /* call im */
6851 tval
= (int32_t)insn_get(env
, s
, OT_LONG
);
6853 tval
= (int16_t)insn_get(env
, s
, OT_WORD
);
6854 next_eip
= s
->pc
- s
->cs_base
;
6860 gen_movtl_T0_im(next_eip
);
6865 case 0x9a: /* lcall im */
6867 unsigned int selector
, offset
;
6871 ot
= dflag
? OT_LONG
: OT_WORD
;
6872 offset
= insn_get(env
, s
, ot
);
6873 selector
= insn_get(env
, s
, OT_WORD
);
6875 gen_op_movl_T0_im(selector
);
6876 gen_op_movl_T1_imu(offset
);
6879 case 0xe9: /* jmp im */
6881 tval
= (int32_t)insn_get(env
, s
, OT_LONG
);
6883 tval
= (int16_t)insn_get(env
, s
, OT_WORD
);
6884 tval
+= s
->pc
- s
->cs_base
;
6891 case 0xea: /* ljmp im */
6893 unsigned int selector
, offset
;
6897 ot
= dflag
? OT_LONG
: OT_WORD
;
6898 offset
= insn_get(env
, s
, ot
);
6899 selector
= insn_get(env
, s
, OT_WORD
);
6901 gen_op_movl_T0_im(selector
);
6902 gen_op_movl_T1_imu(offset
);
6905 case 0xeb: /* jmp Jb */
6906 tval
= (int8_t)insn_get(env
, s
, OT_BYTE
);
6907 tval
+= s
->pc
- s
->cs_base
;
6912 case 0x70 ... 0x7f: /* jcc Jb */
6913 tval
= (int8_t)insn_get(env
, s
, OT_BYTE
);
6915 case 0x180 ... 0x18f: /* jcc Jv */
6917 tval
= (int32_t)insn_get(env
, s
, OT_LONG
);
6919 tval
= (int16_t)insn_get(env
, s
, OT_WORD
);
6922 next_eip
= s
->pc
- s
->cs_base
;
6926 gen_jcc(s
, b
, tval
, next_eip
);
6929 case 0x190 ... 0x19f: /* setcc Gv */
6930 modrm
= cpu_ldub_code(env
, s
->pc
++);
6931 gen_setcc1(s
, b
, cpu_T
[0]);
6932 gen_ldst_modrm(env
, s
, modrm
, OT_BYTE
, OR_TMP0
, 1);
6934 case 0x140 ... 0x14f: /* cmov Gv, Ev */
6935 ot
= dflag
+ OT_WORD
;
6936 modrm
= cpu_ldub_code(env
, s
->pc
++);
6937 reg
= ((modrm
>> 3) & 7) | rex_r
;
6938 gen_cmovcc1(env
, s
, ot
, b
, modrm
, reg
);
6941 /************************/
6943 case 0x9c: /* pushf */
6944 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_PUSHF
);
6945 if (s
->vm86
&& s
->iopl
!= 3) {
6946 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6948 gen_update_cc_op(s
);
6949 gen_helper_read_eflags(cpu_T
[0], cpu_env
);
6953 case 0x9d: /* popf */
6954 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_POPF
);
6955 if (s
->vm86
&& s
->iopl
!= 3) {
6956 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6961 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6962 tcg_const_i32((TF_MASK
| AC_MASK
|
6967 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6968 tcg_const_i32((TF_MASK
| AC_MASK
|
6970 IF_MASK
| IOPL_MASK
)
6974 if (s
->cpl
<= s
->iopl
) {
6976 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6977 tcg_const_i32((TF_MASK
|
6983 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6984 tcg_const_i32((TF_MASK
|
6993 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6994 tcg_const_i32((TF_MASK
| AC_MASK
|
6995 ID_MASK
| NT_MASK
)));
6997 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6998 tcg_const_i32((TF_MASK
| AC_MASK
|
7005 set_cc_op(s
, CC_OP_EFLAGS
);
7006 /* abort translation because TF/AC flag may change */
7007 gen_jmp_im(s
->pc
- s
->cs_base
);
7011 case 0x9e: /* sahf */
7012 if (CODE64(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_LAHF_LM
))
7014 gen_op_mov_TN_reg(OT_BYTE
, 0, R_AH
);
7015 gen_compute_eflags(s
);
7016 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, CC_O
);
7017 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
7018 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, cpu_T
[0]);
7020 case 0x9f: /* lahf */
7021 if (CODE64(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_LAHF_LM
))
7023 gen_compute_eflags(s
);
7024 /* Note: gen_compute_eflags() only gives the condition codes */
7025 tcg_gen_ori_tl(cpu_T
[0], cpu_cc_src
, 0x02);
7026 gen_op_mov_reg_T0(OT_BYTE
, R_AH
);
7028 case 0xf5: /* cmc */
7029 gen_compute_eflags(s
);
7030 tcg_gen_xori_tl(cpu_cc_src
, cpu_cc_src
, CC_C
);
7032 case 0xf8: /* clc */
7033 gen_compute_eflags(s
);
7034 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~CC_C
);
7036 case 0xf9: /* stc */
7037 gen_compute_eflags(s
);
7038 tcg_gen_ori_tl(cpu_cc_src
, cpu_cc_src
, CC_C
);
7040 case 0xfc: /* cld */
7041 tcg_gen_movi_i32(cpu_tmp2_i32
, 1);
7042 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, offsetof(CPUX86State
, df
));
7044 case 0xfd: /* std */
7045 tcg_gen_movi_i32(cpu_tmp2_i32
, -1);
7046 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, offsetof(CPUX86State
, df
));
7049 /************************/
7050 /* bit operations */
7051 case 0x1ba: /* bt/bts/btr/btc Gv, im */
7052 ot
= dflag
+ OT_WORD
;
7053 modrm
= cpu_ldub_code(env
, s
->pc
++);
7054 op
= (modrm
>> 3) & 7;
7055 mod
= (modrm
>> 6) & 3;
7056 rm
= (modrm
& 7) | REX_B(s
);
7059 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7060 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
7062 gen_op_mov_TN_reg(ot
, 0, rm
);
7065 val
= cpu_ldub_code(env
, s
->pc
++);
7066 gen_op_movl_T1_im(val
);
7071 case 0x1a3: /* bt Gv, Ev */
7074 case 0x1ab: /* bts */
7077 case 0x1b3: /* btr */
7080 case 0x1bb: /* btc */
7083 ot
= dflag
+ OT_WORD
;
7084 modrm
= cpu_ldub_code(env
, s
->pc
++);
7085 reg
= ((modrm
>> 3) & 7) | rex_r
;
7086 mod
= (modrm
>> 6) & 3;
7087 rm
= (modrm
& 7) | REX_B(s
);
7088 gen_op_mov_TN_reg(OT_LONG
, 1, reg
);
7090 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7091 /* specific case: we need to add a displacement */
7092 gen_exts(ot
, cpu_T
[1]);
7093 tcg_gen_sari_tl(cpu_tmp0
, cpu_T
[1], 3 + ot
);
7094 tcg_gen_shli_tl(cpu_tmp0
, cpu_tmp0
, ot
);
7095 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
7096 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
7098 gen_op_mov_TN_reg(ot
, 0, rm
);
7101 tcg_gen_andi_tl(cpu_T
[1], cpu_T
[1], (1 << (3 + ot
)) - 1);
7104 tcg_gen_shr_tl(cpu_cc_src
, cpu_T
[0], cpu_T
[1]);
7105 tcg_gen_movi_tl(cpu_cc_dst
, 0);
7108 tcg_gen_shr_tl(cpu_tmp4
, cpu_T
[0], cpu_T
[1]);
7109 tcg_gen_movi_tl(cpu_tmp0
, 1);
7110 tcg_gen_shl_tl(cpu_tmp0
, cpu_tmp0
, cpu_T
[1]);
7111 tcg_gen_or_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
7114 tcg_gen_shr_tl(cpu_tmp4
, cpu_T
[0], cpu_T
[1]);
7115 tcg_gen_movi_tl(cpu_tmp0
, 1);
7116 tcg_gen_shl_tl(cpu_tmp0
, cpu_tmp0
, cpu_T
[1]);
7117 tcg_gen_not_tl(cpu_tmp0
, cpu_tmp0
);
7118 tcg_gen_and_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
7122 tcg_gen_shr_tl(cpu_tmp4
, cpu_T
[0], cpu_T
[1]);
7123 tcg_gen_movi_tl(cpu_tmp0
, 1);
7124 tcg_gen_shl_tl(cpu_tmp0
, cpu_tmp0
, cpu_T
[1]);
7125 tcg_gen_xor_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
7128 set_cc_op(s
, CC_OP_SARB
+ ot
);
7131 gen_op_st_T0_A0(ot
+ s
->mem_index
);
7133 gen_op_mov_reg_T0(ot
, rm
);
7134 tcg_gen_mov_tl(cpu_cc_src
, cpu_tmp4
);
7135 tcg_gen_movi_tl(cpu_cc_dst
, 0);
7138 case 0x1bc: /* bsf / tzcnt */
7139 case 0x1bd: /* bsr / lzcnt */
7140 ot
= dflag
+ OT_WORD
;
7141 modrm
= cpu_ldub_code(env
, s
->pc
++);
7142 reg
= ((modrm
>> 3) & 7) | rex_r
;
7143 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
7144 gen_extu(ot
, cpu_T
[0]);
7146 /* Note that lzcnt and tzcnt are in different extensions. */
7147 if ((prefixes
& PREFIX_REPZ
)
7149 ? s
->cpuid_ext3_features
& CPUID_EXT3_ABM
7150 : s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI1
)) {
7152 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[0]);
7154 /* For lzcnt, reduce the target_ulong result by the
7155 number of zeros that we expect to find at the top. */
7156 gen_helper_clz(cpu_T
[0], cpu_T
[0]);
7157 tcg_gen_subi_tl(cpu_T
[0], cpu_T
[0], TARGET_LONG_BITS
- size
);
7159 /* For tzcnt, a zero input must return the operand size:
7160 force all bits outside the operand size to 1. */
7161 target_ulong mask
= (target_ulong
)-2 << (size
- 1);
7162 tcg_gen_ori_tl(cpu_T
[0], cpu_T
[0], mask
);
7163 gen_helper_ctz(cpu_T
[0], cpu_T
[0]);
7165 /* For lzcnt/tzcnt, C and Z bits are defined and are
7166 related to the result. */
7167 gen_op_update1_cc();
7168 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
7170 /* For bsr/bsf, only the Z bit is defined and it is related
7171 to the input and not the result. */
7172 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
7173 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
7175 /* For bsr, return the bit index of the first 1 bit,
7176 not the count of leading zeros. */
7177 gen_helper_clz(cpu_T
[0], cpu_T
[0]);
7178 tcg_gen_xori_tl(cpu_T
[0], cpu_T
[0], TARGET_LONG_BITS
- 1);
7180 gen_helper_ctz(cpu_T
[0], cpu_T
[0]);
7182 /* ??? The manual says that the output is undefined when the
7183 input is zero, but real hardware leaves it unchanged, and
7184 real programs appear to depend on that. */
7185 tcg_gen_movi_tl(cpu_tmp0
, 0);
7186 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_T
[0], cpu_cc_dst
, cpu_tmp0
,
7187 cpu_regs
[reg
], cpu_T
[0]);
7189 gen_op_mov_reg_T0(ot
, reg
);
7191 /************************/
7193 case 0x27: /* daa */
7196 gen_update_cc_op(s
);
7197 gen_helper_daa(cpu_env
);
7198 set_cc_op(s
, CC_OP_EFLAGS
);
7200 case 0x2f: /* das */
7203 gen_update_cc_op(s
);
7204 gen_helper_das(cpu_env
);
7205 set_cc_op(s
, CC_OP_EFLAGS
);
7207 case 0x37: /* aaa */
7210 gen_update_cc_op(s
);
7211 gen_helper_aaa(cpu_env
);
7212 set_cc_op(s
, CC_OP_EFLAGS
);
7214 case 0x3f: /* aas */
7217 gen_update_cc_op(s
);
7218 gen_helper_aas(cpu_env
);
7219 set_cc_op(s
, CC_OP_EFLAGS
);
7221 case 0xd4: /* aam */
7224 val
= cpu_ldub_code(env
, s
->pc
++);
7226 gen_exception(s
, EXCP00_DIVZ
, pc_start
- s
->cs_base
);
7228 gen_helper_aam(cpu_env
, tcg_const_i32(val
));
7229 set_cc_op(s
, CC_OP_LOGICB
);
7232 case 0xd5: /* aad */
7235 val
= cpu_ldub_code(env
, s
->pc
++);
7236 gen_helper_aad(cpu_env
, tcg_const_i32(val
));
7237 set_cc_op(s
, CC_OP_LOGICB
);
7239 /************************/
7241 case 0x90: /* nop */
7242 /* XXX: correct lock test for all insn */
7243 if (prefixes
& PREFIX_LOCK
) {
7246 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
7248 goto do_xchg_reg_eax
;
7250 if (prefixes
& PREFIX_REPZ
) {
7251 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_PAUSE
);
7254 case 0x9b: /* fwait */
7255 if ((s
->flags
& (HF_MP_MASK
| HF_TS_MASK
)) ==
7256 (HF_MP_MASK
| HF_TS_MASK
)) {
7257 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
7259 gen_update_cc_op(s
);
7260 gen_jmp_im(pc_start
- s
->cs_base
);
7261 gen_helper_fwait(cpu_env
);
7264 case 0xcc: /* int3 */
7265 gen_interrupt(s
, EXCP03_INT3
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
7267 case 0xcd: /* int N */
7268 val
= cpu_ldub_code(env
, s
->pc
++);
7269 if (s
->vm86
&& s
->iopl
!= 3) {
7270 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7272 gen_interrupt(s
, val
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
7275 case 0xce: /* into */
7278 gen_update_cc_op(s
);
7279 gen_jmp_im(pc_start
- s
->cs_base
);
7280 gen_helper_into(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
7283 case 0xf1: /* icebp (undocumented, exits to external debugger) */
7284 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_ICEBP
);
7286 gen_debug(s
, pc_start
- s
->cs_base
);
7290 qemu_set_log(CPU_LOG_INT
| CPU_LOG_TB_IN_ASM
);
7294 case 0xfa: /* cli */
7296 if (s
->cpl
<= s
->iopl
) {
7297 gen_helper_cli(cpu_env
);
7299 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7303 gen_helper_cli(cpu_env
);
7305 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7309 case 0xfb: /* sti */
7311 if (s
->cpl
<= s
->iopl
) {
7313 gen_helper_sti(cpu_env
);
7314 /* interruptions are enabled only the first insn after sti */
7315 /* If several instructions disable interrupts, only the
7317 if (!(s
->tb
->flags
& HF_INHIBIT_IRQ_MASK
))
7318 gen_helper_set_inhibit_irq(cpu_env
);
7319 /* give a chance to handle pending irqs */
7320 gen_jmp_im(s
->pc
- s
->cs_base
);
7323 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7329 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7333 case 0x62: /* bound */
7336 ot
= dflag
? OT_LONG
: OT_WORD
;
7337 modrm
= cpu_ldub_code(env
, s
->pc
++);
7338 reg
= (modrm
>> 3) & 7;
7339 mod
= (modrm
>> 6) & 3;
7342 gen_op_mov_TN_reg(ot
, 0, reg
);
7343 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7344 gen_jmp_im(pc_start
- s
->cs_base
);
7345 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
7346 if (ot
== OT_WORD
) {
7347 gen_helper_boundw(cpu_env
, cpu_A0
, cpu_tmp2_i32
);
7349 gen_helper_boundl(cpu_env
, cpu_A0
, cpu_tmp2_i32
);
7352 case 0x1c8 ... 0x1cf: /* bswap reg */
7353 reg
= (b
& 7) | REX_B(s
);
7354 #ifdef TARGET_X86_64
7356 gen_op_mov_TN_reg(OT_QUAD
, 0, reg
);
7357 tcg_gen_bswap64_i64(cpu_T
[0], cpu_T
[0]);
7358 gen_op_mov_reg_T0(OT_QUAD
, reg
);
7362 gen_op_mov_TN_reg(OT_LONG
, 0, reg
);
7363 tcg_gen_ext32u_tl(cpu_T
[0], cpu_T
[0]);
7364 tcg_gen_bswap32_tl(cpu_T
[0], cpu_T
[0]);
7365 gen_op_mov_reg_T0(OT_LONG
, reg
);
7368 case 0xd6: /* salc */
7371 gen_compute_eflags_c(s
, cpu_T
[0]);
7372 tcg_gen_neg_tl(cpu_T
[0], cpu_T
[0]);
7373 gen_op_mov_reg_T0(OT_BYTE
, R_EAX
);
7375 case 0xe0: /* loopnz */
7376 case 0xe1: /* loopz */
7377 case 0xe2: /* loop */
7378 case 0xe3: /* jecxz */
7382 tval
= (int8_t)insn_get(env
, s
, OT_BYTE
);
7383 next_eip
= s
->pc
- s
->cs_base
;
7388 l1
= gen_new_label();
7389 l2
= gen_new_label();
7390 l3
= gen_new_label();
7393 case 0: /* loopnz */
7395 gen_op_add_reg_im(s
->aflag
, R_ECX
, -1);
7396 gen_op_jz_ecx(s
->aflag
, l3
);
7397 gen_jcc1(s
, (JCC_Z
<< 1) | (b
^ 1), l1
);
7400 gen_op_add_reg_im(s
->aflag
, R_ECX
, -1);
7401 gen_op_jnz_ecx(s
->aflag
, l1
);
7405 gen_op_jz_ecx(s
->aflag
, l1
);
7410 gen_jmp_im(next_eip
);
7419 case 0x130: /* wrmsr */
7420 case 0x132: /* rdmsr */
7422 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7424 gen_update_cc_op(s
);
7425 gen_jmp_im(pc_start
- s
->cs_base
);
7427 gen_helper_rdmsr(cpu_env
);
7429 gen_helper_wrmsr(cpu_env
);
7433 case 0x131: /* rdtsc */
7434 gen_update_cc_op(s
);
7435 gen_jmp_im(pc_start
- s
->cs_base
);
7438 gen_helper_rdtsc(cpu_env
);
7441 gen_jmp(s
, s
->pc
- s
->cs_base
);
7444 case 0x133: /* rdpmc */
7445 gen_update_cc_op(s
);
7446 gen_jmp_im(pc_start
- s
->cs_base
);
7447 gen_helper_rdpmc(cpu_env
);
7449 case 0x134: /* sysenter */
7450 /* For Intel SYSENTER is valid on 64-bit */
7451 if (CODE64(s
) && env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
)
7454 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7456 gen_update_cc_op(s
);
7457 gen_jmp_im(pc_start
- s
->cs_base
);
7458 gen_helper_sysenter(cpu_env
);
7462 case 0x135: /* sysexit */
7463 /* For Intel SYSEXIT is valid on 64-bit */
7464 if (CODE64(s
) && env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
)
7467 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7469 gen_update_cc_op(s
);
7470 gen_jmp_im(pc_start
- s
->cs_base
);
7471 gen_helper_sysexit(cpu_env
, tcg_const_i32(dflag
));
7475 #ifdef TARGET_X86_64
7476 case 0x105: /* syscall */
7477 /* XXX: is it usable in real mode ? */
7478 gen_update_cc_op(s
);
7479 gen_jmp_im(pc_start
- s
->cs_base
);
7480 gen_helper_syscall(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
7483 case 0x107: /* sysret */
7485 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7487 gen_update_cc_op(s
);
7488 gen_jmp_im(pc_start
- s
->cs_base
);
7489 gen_helper_sysret(cpu_env
, tcg_const_i32(s
->dflag
));
7490 /* condition codes are modified only in long mode */
7492 set_cc_op(s
, CC_OP_EFLAGS
);
7498 case 0x1a2: /* cpuid */
7499 gen_update_cc_op(s
);
7500 gen_jmp_im(pc_start
- s
->cs_base
);
7501 gen_helper_cpuid(cpu_env
);
7503 case 0xf4: /* hlt */
7505 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7507 gen_update_cc_op(s
);
7508 gen_jmp_im(pc_start
- s
->cs_base
);
7509 gen_helper_hlt(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
7510 s
->is_jmp
= DISAS_TB_JUMP
;
7514 modrm
= cpu_ldub_code(env
, s
->pc
++);
7515 mod
= (modrm
>> 6) & 3;
7516 op
= (modrm
>> 3) & 7;
7519 if (!s
->pe
|| s
->vm86
)
7521 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_LDTR_READ
);
7522 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,ldt
.selector
));
7526 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
7529 if (!s
->pe
|| s
->vm86
)
7532 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7534 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_LDTR_WRITE
);
7535 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 0);
7536 gen_jmp_im(pc_start
- s
->cs_base
);
7537 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
7538 gen_helper_lldt(cpu_env
, cpu_tmp2_i32
);
7542 if (!s
->pe
|| s
->vm86
)
7544 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_TR_READ
);
7545 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,tr
.selector
));
7549 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
7552 if (!s
->pe
|| s
->vm86
)
7555 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7557 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_TR_WRITE
);
7558 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 0);
7559 gen_jmp_im(pc_start
- s
->cs_base
);
7560 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
7561 gen_helper_ltr(cpu_env
, cpu_tmp2_i32
);
7566 if (!s
->pe
|| s
->vm86
)
7568 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 0);
7569 gen_update_cc_op(s
);
7571 gen_helper_verr(cpu_env
, cpu_T
[0]);
7573 gen_helper_verw(cpu_env
, cpu_T
[0]);
7575 set_cc_op(s
, CC_OP_EFLAGS
);
7582 modrm
= cpu_ldub_code(env
, s
->pc
++);
7583 mod
= (modrm
>> 6) & 3;
7584 op
= (modrm
>> 3) & 7;
7590 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_GDTR_READ
);
7591 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7592 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, gdt
.limit
));
7593 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
7594 gen_add_A0_im(s
, 2);
7595 tcg_gen_ld_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, gdt
.base
));
7597 gen_op_andl_T0_im(0xffffff);
7598 gen_op_st_T0_A0(CODE64(s
) + OT_LONG
+ s
->mem_index
);
7603 case 0: /* monitor */
7604 if (!(s
->cpuid_ext_features
& CPUID_EXT_MONITOR
) ||
7607 gen_update_cc_op(s
);
7608 gen_jmp_im(pc_start
- s
->cs_base
);
7609 #ifdef TARGET_X86_64
7610 if (s
->aflag
== 2) {
7611 gen_op_movq_A0_reg(R_EAX
);
7615 gen_op_movl_A0_reg(R_EAX
);
7617 gen_op_andl_A0_ffff();
7619 gen_add_A0_ds_seg(s
);
7620 gen_helper_monitor(cpu_env
, cpu_A0
);
7623 if (!(s
->cpuid_ext_features
& CPUID_EXT_MONITOR
) ||
7626 gen_update_cc_op(s
);
7627 gen_jmp_im(pc_start
- s
->cs_base
);
7628 gen_helper_mwait(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
7632 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_SMAP
) ||
7636 gen_helper_clac(cpu_env
);
7637 gen_jmp_im(s
->pc
- s
->cs_base
);
7641 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_SMAP
) ||
7645 gen_helper_stac(cpu_env
);
7646 gen_jmp_im(s
->pc
- s
->cs_base
);
7653 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_IDTR_READ
);
7654 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7655 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, idt
.limit
));
7656 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
7657 gen_add_A0_im(s
, 2);
7658 tcg_gen_ld_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, idt
.base
));
7660 gen_op_andl_T0_im(0xffffff);
7661 gen_op_st_T0_A0(CODE64(s
) + OT_LONG
+ s
->mem_index
);
7667 gen_update_cc_op(s
);
7668 gen_jmp_im(pc_start
- s
->cs_base
);
7671 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7674 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7677 gen_helper_vmrun(cpu_env
, tcg_const_i32(s
->aflag
),
7678 tcg_const_i32(s
->pc
- pc_start
));
7680 s
->is_jmp
= DISAS_TB_JUMP
;
7683 case 1: /* VMMCALL */
7684 if (!(s
->flags
& HF_SVME_MASK
))
7686 gen_helper_vmmcall(cpu_env
);
7688 case 2: /* VMLOAD */
7689 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7692 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7695 gen_helper_vmload(cpu_env
, tcg_const_i32(s
->aflag
));
7698 case 3: /* VMSAVE */
7699 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7702 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7705 gen_helper_vmsave(cpu_env
, tcg_const_i32(s
->aflag
));
7709 if ((!(s
->flags
& HF_SVME_MASK
) &&
7710 !(s
->cpuid_ext3_features
& CPUID_EXT3_SKINIT
)) ||
7714 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7717 gen_helper_stgi(cpu_env
);
7721 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7724 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7727 gen_helper_clgi(cpu_env
);
7730 case 6: /* SKINIT */
7731 if ((!(s
->flags
& HF_SVME_MASK
) &&
7732 !(s
->cpuid_ext3_features
& CPUID_EXT3_SKINIT
)) ||
7735 gen_helper_skinit(cpu_env
);
7737 case 7: /* INVLPGA */
7738 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7741 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7744 gen_helper_invlpga(cpu_env
, tcg_const_i32(s
->aflag
));
7750 } else if (s
->cpl
!= 0) {
7751 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7753 gen_svm_check_intercept(s
, pc_start
,
7754 op
==2 ? SVM_EXIT_GDTR_WRITE
: SVM_EXIT_IDTR_WRITE
);
7755 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7756 gen_op_ld_T1_A0(OT_WORD
+ s
->mem_index
);
7757 gen_add_A0_im(s
, 2);
7758 gen_op_ld_T0_A0(CODE64(s
) + OT_LONG
+ s
->mem_index
);
7760 gen_op_andl_T0_im(0xffffff);
7762 tcg_gen_st_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,gdt
.base
));
7763 tcg_gen_st32_tl(cpu_T
[1], cpu_env
, offsetof(CPUX86State
,gdt
.limit
));
7765 tcg_gen_st_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,idt
.base
));
7766 tcg_gen_st32_tl(cpu_T
[1], cpu_env
, offsetof(CPUX86State
,idt
.limit
));
7771 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_READ_CR0
);
7772 #if defined TARGET_X86_64 && defined HOST_WORDS_BIGENDIAN
7773 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,cr
[0]) + 4);
7775 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,cr
[0]));
7777 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 1);
7781 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7783 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_CR0
);
7784 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 0);
7785 gen_helper_lmsw(cpu_env
, cpu_T
[0]);
7786 gen_jmp_im(s
->pc
- s
->cs_base
);
7791 if (mod
!= 3) { /* invlpg */
7793 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7795 gen_update_cc_op(s
);
7796 gen_jmp_im(pc_start
- s
->cs_base
);
7797 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7798 gen_helper_invlpg(cpu_env
, cpu_A0
);
7799 gen_jmp_im(s
->pc
- s
->cs_base
);
7804 case 0: /* swapgs */
7805 #ifdef TARGET_X86_64
7808 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7810 tcg_gen_ld_tl(cpu_T
[0], cpu_env
,
7811 offsetof(CPUX86State
,segs
[R_GS
].base
));
7812 tcg_gen_ld_tl(cpu_T
[1], cpu_env
,
7813 offsetof(CPUX86State
,kernelgsbase
));
7814 tcg_gen_st_tl(cpu_T
[1], cpu_env
,
7815 offsetof(CPUX86State
,segs
[R_GS
].base
));
7816 tcg_gen_st_tl(cpu_T
[0], cpu_env
,
7817 offsetof(CPUX86State
,kernelgsbase
));
7825 case 1: /* rdtscp */
7826 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_RDTSCP
))
7828 gen_update_cc_op(s
);
7829 gen_jmp_im(pc_start
- s
->cs_base
);
7832 gen_helper_rdtscp(cpu_env
);
7835 gen_jmp(s
, s
->pc
- s
->cs_base
);
7847 case 0x108: /* invd */
7848 case 0x109: /* wbinvd */
7850 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7852 gen_svm_check_intercept(s
, pc_start
, (b
& 2) ? SVM_EXIT_INVD
: SVM_EXIT_WBINVD
);
7856 case 0x63: /* arpl or movslS (x86_64) */
7857 #ifdef TARGET_X86_64
7860 /* d_ot is the size of destination */
7861 d_ot
= dflag
+ OT_WORD
;
7863 modrm
= cpu_ldub_code(env
, s
->pc
++);
7864 reg
= ((modrm
>> 3) & 7) | rex_r
;
7865 mod
= (modrm
>> 6) & 3;
7866 rm
= (modrm
& 7) | REX_B(s
);
7869 gen_op_mov_TN_reg(OT_LONG
, 0, rm
);
7871 if (d_ot
== OT_QUAD
)
7872 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
7873 gen_op_mov_reg_T0(d_ot
, reg
);
7875 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7876 if (d_ot
== OT_QUAD
) {
7877 gen_op_lds_T0_A0(OT_LONG
+ s
->mem_index
);
7879 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
7881 gen_op_mov_reg_T0(d_ot
, reg
);
7887 TCGv t0
, t1
, t2
, a0
;
7889 if (!s
->pe
|| s
->vm86
)
7891 t0
= tcg_temp_local_new();
7892 t1
= tcg_temp_local_new();
7893 t2
= tcg_temp_local_new();
7895 modrm
= cpu_ldub_code(env
, s
->pc
++);
7896 reg
= (modrm
>> 3) & 7;
7897 mod
= (modrm
>> 6) & 3;
7900 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7901 gen_op_ld_v(ot
+ s
->mem_index
, t0
, cpu_A0
);
7902 a0
= tcg_temp_local_new();
7903 tcg_gen_mov_tl(a0
, cpu_A0
);
7905 gen_op_mov_v_reg(ot
, t0
, rm
);
7908 gen_op_mov_v_reg(ot
, t1
, reg
);
7909 tcg_gen_andi_tl(cpu_tmp0
, t0
, 3);
7910 tcg_gen_andi_tl(t1
, t1
, 3);
7911 tcg_gen_movi_tl(t2
, 0);
7912 label1
= gen_new_label();
7913 tcg_gen_brcond_tl(TCG_COND_GE
, cpu_tmp0
, t1
, label1
);
7914 tcg_gen_andi_tl(t0
, t0
, ~3);
7915 tcg_gen_or_tl(t0
, t0
, t1
);
7916 tcg_gen_movi_tl(t2
, CC_Z
);
7917 gen_set_label(label1
);
7919 gen_op_st_v(ot
+ s
->mem_index
, t0
, a0
);
7922 gen_op_mov_reg_v(ot
, rm
, t0
);
7924 gen_compute_eflags(s
);
7925 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~CC_Z
);
7926 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t2
);
7932 case 0x102: /* lar */
7933 case 0x103: /* lsl */
7937 if (!s
->pe
|| s
->vm86
)
7939 ot
= dflag
? OT_LONG
: OT_WORD
;
7940 modrm
= cpu_ldub_code(env
, s
->pc
++);
7941 reg
= ((modrm
>> 3) & 7) | rex_r
;
7942 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 0);
7943 t0
= tcg_temp_local_new();
7944 gen_update_cc_op(s
);
7946 gen_helper_lar(t0
, cpu_env
, cpu_T
[0]);
7948 gen_helper_lsl(t0
, cpu_env
, cpu_T
[0]);
7950 tcg_gen_andi_tl(cpu_tmp0
, cpu_cc_src
, CC_Z
);
7951 label1
= gen_new_label();
7952 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_tmp0
, 0, label1
);
7953 gen_op_mov_reg_v(ot
, reg
, t0
);
7954 gen_set_label(label1
);
7955 set_cc_op(s
, CC_OP_EFLAGS
);
7960 modrm
= cpu_ldub_code(env
, s
->pc
++);
7961 mod
= (modrm
>> 6) & 3;
7962 op
= (modrm
>> 3) & 7;
7964 case 0: /* prefetchnta */
7965 case 1: /* prefetchnt0 */
7966 case 2: /* prefetchnt0 */
7967 case 3: /* prefetchnt0 */
7970 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7971 /* nothing more to do */
7973 default: /* nop (multi byte) */
7974 gen_nop_modrm(env
, s
, modrm
);
7978 case 0x119 ... 0x11f: /* nop (multi byte) */
7979 modrm
= cpu_ldub_code(env
, s
->pc
++);
7980 gen_nop_modrm(env
, s
, modrm
);
7982 case 0x120: /* mov reg, crN */
7983 case 0x122: /* mov crN, reg */
7985 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7987 modrm
= cpu_ldub_code(env
, s
->pc
++);
7988 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7989 * AMD documentation (24594.pdf) and testing of
7990 * intel 386 and 486 processors all show that the mod bits
7991 * are assumed to be 1's, regardless of actual values.
7993 rm
= (modrm
& 7) | REX_B(s
);
7994 reg
= ((modrm
>> 3) & 7) | rex_r
;
7999 if ((prefixes
& PREFIX_LOCK
) && (reg
== 0) &&
8000 (s
->cpuid_ext3_features
& CPUID_EXT3_CR8LEG
)) {
8009 gen_update_cc_op(s
);
8010 gen_jmp_im(pc_start
- s
->cs_base
);
8012 gen_op_mov_TN_reg(ot
, 0, rm
);
8013 gen_helper_write_crN(cpu_env
, tcg_const_i32(reg
),
8015 gen_jmp_im(s
->pc
- s
->cs_base
);
8018 gen_helper_read_crN(cpu_T
[0], cpu_env
, tcg_const_i32(reg
));
8019 gen_op_mov_reg_T0(ot
, rm
);
8027 case 0x121: /* mov reg, drN */
8028 case 0x123: /* mov drN, reg */
8030 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
8032 modrm
= cpu_ldub_code(env
, s
->pc
++);
8033 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
8034 * AMD documentation (24594.pdf) and testing of
8035 * intel 386 and 486 processors all show that the mod bits
8036 * are assumed to be 1's, regardless of actual values.
8038 rm
= (modrm
& 7) | REX_B(s
);
8039 reg
= ((modrm
>> 3) & 7) | rex_r
;
8044 /* XXX: do it dynamically with CR4.DE bit */
8045 if (reg
== 4 || reg
== 5 || reg
>= 8)
8048 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_DR0
+ reg
);
8049 gen_op_mov_TN_reg(ot
, 0, rm
);
8050 gen_helper_movl_drN_T0(cpu_env
, tcg_const_i32(reg
), cpu_T
[0]);
8051 gen_jmp_im(s
->pc
- s
->cs_base
);
8054 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_READ_DR0
+ reg
);
8055 tcg_gen_ld_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,dr
[reg
]));
8056 gen_op_mov_reg_T0(ot
, rm
);
8060 case 0x106: /* clts */
8062 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
8064 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_CR0
);
8065 gen_helper_clts(cpu_env
);
8066 /* abort block because static cpu state changed */
8067 gen_jmp_im(s
->pc
- s
->cs_base
);
8071 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
8072 case 0x1c3: /* MOVNTI reg, mem */
8073 if (!(s
->cpuid_features
& CPUID_SSE2
))
8075 ot
= s
->dflag
== 2 ? OT_QUAD
: OT_LONG
;
8076 modrm
= cpu_ldub_code(env
, s
->pc
++);
8077 mod
= (modrm
>> 6) & 3;
8080 reg
= ((modrm
>> 3) & 7) | rex_r
;
8081 /* generate a generic store */
8082 gen_ldst_modrm(env
, s
, modrm
, ot
, reg
, 1);
8085 modrm
= cpu_ldub_code(env
, s
->pc
++);
8086 mod
= (modrm
>> 6) & 3;
8087 op
= (modrm
>> 3) & 7;
8089 case 0: /* fxsave */
8090 if (mod
== 3 || !(s
->cpuid_features
& CPUID_FXSR
) ||
8091 (s
->prefix
& PREFIX_LOCK
))
8093 if ((s
->flags
& HF_EM_MASK
) || (s
->flags
& HF_TS_MASK
)) {
8094 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
8097 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
8098 gen_update_cc_op(s
);
8099 gen_jmp_im(pc_start
- s
->cs_base
);
8100 gen_helper_fxsave(cpu_env
, cpu_A0
, tcg_const_i32((s
->dflag
== 2)));
8102 case 1: /* fxrstor */
8103 if (mod
== 3 || !(s
->cpuid_features
& CPUID_FXSR
) ||
8104 (s
->prefix
& PREFIX_LOCK
))
8106 if ((s
->flags
& HF_EM_MASK
) || (s
->flags
& HF_TS_MASK
)) {
8107 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
8110 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
8111 gen_update_cc_op(s
);
8112 gen_jmp_im(pc_start
- s
->cs_base
);
8113 gen_helper_fxrstor(cpu_env
, cpu_A0
,
8114 tcg_const_i32((s
->dflag
== 2)));
8116 case 2: /* ldmxcsr */
8117 case 3: /* stmxcsr */
8118 if (s
->flags
& HF_TS_MASK
) {
8119 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
8122 if ((s
->flags
& HF_EM_MASK
) || !(s
->flags
& HF_OSFXSR_MASK
) ||
8125 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
8127 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
8128 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
8129 gen_helper_ldmxcsr(cpu_env
, cpu_tmp2_i32
);
8131 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, mxcsr
));
8132 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
8135 case 5: /* lfence */
8136 case 6: /* mfence */
8137 if ((modrm
& 0xc7) != 0xc0 || !(s
->cpuid_features
& CPUID_SSE2
))
8140 case 7: /* sfence / clflush */
8141 if ((modrm
& 0xc7) == 0xc0) {
8143 /* XXX: also check for cpuid_ext2_features & CPUID_EXT2_EMMX */
8144 if (!(s
->cpuid_features
& CPUID_SSE
))
8148 if (!(s
->cpuid_features
& CPUID_CLFLUSH
))
8150 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
8157 case 0x10d: /* 3DNow! prefetch(w) */
8158 modrm
= cpu_ldub_code(env
, s
->pc
++);
8159 mod
= (modrm
>> 6) & 3;
8162 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
8163 /* ignore for now */
8165 case 0x1aa: /* rsm */
8166 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_RSM
);
8167 if (!(s
->flags
& HF_SMM_MASK
))
8169 gen_update_cc_op(s
);
8170 gen_jmp_im(s
->pc
- s
->cs_base
);
8171 gen_helper_rsm(cpu_env
);
8174 case 0x1b8: /* SSE4.2 popcnt */
8175 if ((prefixes
& (PREFIX_REPZ
| PREFIX_LOCK
| PREFIX_REPNZ
)) !=
8178 if (!(s
->cpuid_ext_features
& CPUID_EXT_POPCNT
))
8181 modrm
= cpu_ldub_code(env
, s
->pc
++);
8182 reg
= ((modrm
>> 3) & 7) | rex_r
;
8184 if (s
->prefix
& PREFIX_DATA
)
8186 else if (s
->dflag
!= 2)
8191 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
8192 gen_helper_popcnt(cpu_T
[0], cpu_env
, cpu_T
[0], tcg_const_i32(ot
));
8193 gen_op_mov_reg_T0(ot
, reg
);
8195 set_cc_op(s
, CC_OP_EFLAGS
);
8197 case 0x10e ... 0x10f:
8198 /* 3DNow! instructions, ignore prefixes */
8199 s
->prefix
&= ~(PREFIX_REPZ
| PREFIX_REPNZ
| PREFIX_DATA
);
8200 case 0x110 ... 0x117:
8201 case 0x128 ... 0x12f:
8202 case 0x138 ... 0x13a:
8203 case 0x150 ... 0x179:
8204 case 0x17c ... 0x17f:
8206 case 0x1c4 ... 0x1c6:
8207 case 0x1d0 ... 0x1fe:
8208 gen_sse(env
, s
, b
, pc_start
, rex_r
);
8213 /* lock generation */
8214 if (s
->prefix
& PREFIX_LOCK
)
8215 gen_helper_unlock();
8218 if (s
->prefix
& PREFIX_LOCK
)
8219 gen_helper_unlock();
8220 /* XXX: ensure that no lock was generated */
8221 gen_exception(s
, EXCP06_ILLOP
, pc_start
- s
->cs_base
);
8225 void optimize_flags_init(void)
8227 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
8228 cpu_cc_op
= tcg_global_mem_new_i32(TCG_AREG0
,
8229 offsetof(CPUX86State
, cc_op
), "cc_op");
8230 cpu_cc_dst
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUX86State
, cc_dst
),
8232 cpu_cc_src
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUX86State
, cc_src
),
8234 cpu_cc_src2
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUX86State
, cc_src2
),
8237 #ifdef TARGET_X86_64
8238 cpu_regs
[R_EAX
] = tcg_global_mem_new_i64(TCG_AREG0
,
8239 offsetof(CPUX86State
, regs
[R_EAX
]), "rax");
8240 cpu_regs
[R_ECX
] = tcg_global_mem_new_i64(TCG_AREG0
,
8241 offsetof(CPUX86State
, regs
[R_ECX
]), "rcx");
8242 cpu_regs
[R_EDX
] = tcg_global_mem_new_i64(TCG_AREG0
,
8243 offsetof(CPUX86State
, regs
[R_EDX
]), "rdx");
8244 cpu_regs
[R_EBX
] = tcg_global_mem_new_i64(TCG_AREG0
,
8245 offsetof(CPUX86State
, regs
[R_EBX
]), "rbx");
8246 cpu_regs
[R_ESP
] = tcg_global_mem_new_i64(TCG_AREG0
,
8247 offsetof(CPUX86State
, regs
[R_ESP
]), "rsp");
8248 cpu_regs
[R_EBP
] = tcg_global_mem_new_i64(TCG_AREG0
,
8249 offsetof(CPUX86State
, regs
[R_EBP
]), "rbp");
8250 cpu_regs
[R_ESI
] = tcg_global_mem_new_i64(TCG_AREG0
,
8251 offsetof(CPUX86State
, regs
[R_ESI
]), "rsi");
8252 cpu_regs
[R_EDI
] = tcg_global_mem_new_i64(TCG_AREG0
,
8253 offsetof(CPUX86State
, regs
[R_EDI
]), "rdi");
8254 cpu_regs
[8] = tcg_global_mem_new_i64(TCG_AREG0
,
8255 offsetof(CPUX86State
, regs
[8]), "r8");
8256 cpu_regs
[9] = tcg_global_mem_new_i64(TCG_AREG0
,
8257 offsetof(CPUX86State
, regs
[9]), "r9");
8258 cpu_regs
[10] = tcg_global_mem_new_i64(TCG_AREG0
,
8259 offsetof(CPUX86State
, regs
[10]), "r10");
8260 cpu_regs
[11] = tcg_global_mem_new_i64(TCG_AREG0
,
8261 offsetof(CPUX86State
, regs
[11]), "r11");
8262 cpu_regs
[12] = tcg_global_mem_new_i64(TCG_AREG0
,
8263 offsetof(CPUX86State
, regs
[12]), "r12");
8264 cpu_regs
[13] = tcg_global_mem_new_i64(TCG_AREG0
,
8265 offsetof(CPUX86State
, regs
[13]), "r13");
8266 cpu_regs
[14] = tcg_global_mem_new_i64(TCG_AREG0
,
8267 offsetof(CPUX86State
, regs
[14]), "r14");
8268 cpu_regs
[15] = tcg_global_mem_new_i64(TCG_AREG0
,
8269 offsetof(CPUX86State
, regs
[15]), "r15");
8271 cpu_regs
[R_EAX
] = tcg_global_mem_new_i32(TCG_AREG0
,
8272 offsetof(CPUX86State
, regs
[R_EAX
]), "eax");
8273 cpu_regs
[R_ECX
] = tcg_global_mem_new_i32(TCG_AREG0
,
8274 offsetof(CPUX86State
, regs
[R_ECX
]), "ecx");
8275 cpu_regs
[R_EDX
] = tcg_global_mem_new_i32(TCG_AREG0
,
8276 offsetof(CPUX86State
, regs
[R_EDX
]), "edx");
8277 cpu_regs
[R_EBX
] = tcg_global_mem_new_i32(TCG_AREG0
,
8278 offsetof(CPUX86State
, regs
[R_EBX
]), "ebx");
8279 cpu_regs
[R_ESP
] = tcg_global_mem_new_i32(TCG_AREG0
,
8280 offsetof(CPUX86State
, regs
[R_ESP
]), "esp");
8281 cpu_regs
[R_EBP
] = tcg_global_mem_new_i32(TCG_AREG0
,
8282 offsetof(CPUX86State
, regs
[R_EBP
]), "ebp");
8283 cpu_regs
[R_ESI
] = tcg_global_mem_new_i32(TCG_AREG0
,
8284 offsetof(CPUX86State
, regs
[R_ESI
]), "esi");
8285 cpu_regs
[R_EDI
] = tcg_global_mem_new_i32(TCG_AREG0
,
8286 offsetof(CPUX86State
, regs
[R_EDI
]), "edi");
8289 /* register helpers */
8290 #define GEN_HELPER 2
8294 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
8295 basic block 'tb'. If search_pc is TRUE, also generate PC
8296 information for each intermediate instruction. */
8297 static inline void gen_intermediate_code_internal(CPUX86State
*env
,
8298 TranslationBlock
*tb
,
8301 DisasContext dc1
, *dc
= &dc1
;
8302 target_ulong pc_ptr
;
8303 uint16_t *gen_opc_end
;
8307 target_ulong pc_start
;
8308 target_ulong cs_base
;
8312 /* generate intermediate code */
8314 cs_base
= tb
->cs_base
;
8317 dc
->pe
= (flags
>> HF_PE_SHIFT
) & 1;
8318 dc
->code32
= (flags
>> HF_CS32_SHIFT
) & 1;
8319 dc
->ss32
= (flags
>> HF_SS32_SHIFT
) & 1;
8320 dc
->addseg
= (flags
>> HF_ADDSEG_SHIFT
) & 1;
8322 dc
->vm86
= (flags
>> VM_SHIFT
) & 1;
8323 dc
->cpl
= (flags
>> HF_CPL_SHIFT
) & 3;
8324 dc
->iopl
= (flags
>> IOPL_SHIFT
) & 3;
8325 dc
->tf
= (flags
>> TF_SHIFT
) & 1;
8326 dc
->singlestep_enabled
= env
->singlestep_enabled
;
8327 dc
->cc_op
= CC_OP_DYNAMIC
;
8328 dc
->cc_op_dirty
= false;
8329 dc
->cs_base
= cs_base
;
8331 dc
->popl_esp_hack
= 0;
8332 /* select memory access functions */
8334 if (flags
& HF_SOFTMMU_MASK
) {
8335 dc
->mem_index
= (cpu_mmu_index(env
) + 1) << 2;
8337 dc
->cpuid_features
= env
->cpuid_features
;
8338 dc
->cpuid_ext_features
= env
->cpuid_ext_features
;
8339 dc
->cpuid_ext2_features
= env
->cpuid_ext2_features
;
8340 dc
->cpuid_ext3_features
= env
->cpuid_ext3_features
;
8341 dc
->cpuid_7_0_ebx_features
= env
->cpuid_7_0_ebx_features
;
8342 #ifdef TARGET_X86_64
8343 dc
->lma
= (flags
>> HF_LMA_SHIFT
) & 1;
8344 dc
->code64
= (flags
>> HF_CS64_SHIFT
) & 1;
8347 dc
->jmp_opt
= !(dc
->tf
|| env
->singlestep_enabled
||
8348 (flags
& HF_INHIBIT_IRQ_MASK
)
8349 #ifndef CONFIG_SOFTMMU
8350 || (flags
& HF_SOFTMMU_MASK
)
8354 /* check addseg logic */
8355 if (!dc
->addseg
&& (dc
->vm86
|| !dc
->pe
|| !dc
->code32
))
8356 printf("ERROR addseg\n");
8359 cpu_T
[0] = tcg_temp_new();
8360 cpu_T
[1] = tcg_temp_new();
8361 cpu_A0
= tcg_temp_new();
8363 cpu_tmp0
= tcg_temp_new();
8364 cpu_tmp1_i64
= tcg_temp_new_i64();
8365 cpu_tmp2_i32
= tcg_temp_new_i32();
8366 cpu_tmp3_i32
= tcg_temp_new_i32();
8367 cpu_tmp4
= tcg_temp_new();
8368 cpu_ptr0
= tcg_temp_new_ptr();
8369 cpu_ptr1
= tcg_temp_new_ptr();
8370 cpu_cc_srcT
= tcg_temp_local_new();
8372 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
8374 dc
->is_jmp
= DISAS_NEXT
;
8378 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
8380 max_insns
= CF_COUNT_MASK
;
8384 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
8385 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
8386 if (bp
->pc
== pc_ptr
&&
8387 !((bp
->flags
& BP_CPU
) && (tb
->flags
& HF_RF_MASK
))) {
8388 gen_debug(dc
, pc_ptr
- dc
->cs_base
);
8394 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
8398 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
8400 tcg_ctx
.gen_opc_pc
[lj
] = pc_ptr
;
8401 gen_opc_cc_op
[lj
] = dc
->cc_op
;
8402 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
8403 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
8405 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
8408 pc_ptr
= disas_insn(env
, dc
, pc_ptr
);
8410 /* stop translation if indicated */
8413 /* if single step mode, we generate only one instruction and
8414 generate an exception */
8415 /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
8416 the flag and abort the translation to give the irqs a
8417 change to be happen */
8418 if (dc
->tf
|| dc
->singlestep_enabled
||
8419 (flags
& HF_INHIBIT_IRQ_MASK
)) {
8420 gen_jmp_im(pc_ptr
- dc
->cs_base
);
8424 /* if too long translation, stop generation too */
8425 if (tcg_ctx
.gen_opc_ptr
>= gen_opc_end
||
8426 (pc_ptr
- pc_start
) >= (TARGET_PAGE_SIZE
- 32) ||
8427 num_insns
>= max_insns
) {
8428 gen_jmp_im(pc_ptr
- dc
->cs_base
);
8433 gen_jmp_im(pc_ptr
- dc
->cs_base
);
8438 if (tb
->cflags
& CF_LAST_IO
)
8440 gen_icount_end(tb
, num_insns
);
8441 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
8442 /* we don't forget to fill the last values */
8444 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
8447 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
8451 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
8453 qemu_log("----------------\n");
8454 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
8455 #ifdef TARGET_X86_64
8460 disas_flags
= !dc
->code32
;
8461 log_target_disas(env
, pc_start
, pc_ptr
- pc_start
, disas_flags
);
8467 tb
->size
= pc_ptr
- pc_start
;
8468 tb
->icount
= num_insns
;
8472 void gen_intermediate_code(CPUX86State
*env
, TranslationBlock
*tb
)
8474 gen_intermediate_code_internal(env
, tb
, 0);
8477 void gen_intermediate_code_pc(CPUX86State
*env
, TranslationBlock
*tb
)
8479 gen_intermediate_code_internal(env
, tb
, 1);
8482 void restore_state_to_opc(CPUX86State
*env
, TranslationBlock
*tb
, int pc_pos
)
8486 if (qemu_loglevel_mask(CPU_LOG_TB_OP
)) {
8488 qemu_log("RESTORE:\n");
8489 for(i
= 0;i
<= pc_pos
; i
++) {
8490 if (tcg_ctx
.gen_opc_instr_start
[i
]) {
8491 qemu_log("0x%04x: " TARGET_FMT_lx
"\n", i
,
8492 tcg_ctx
.gen_opc_pc
[i
]);
8495 qemu_log("pc_pos=0x%x eip=" TARGET_FMT_lx
" cs_base=%x\n",
8496 pc_pos
, tcg_ctx
.gen_opc_pc
[pc_pos
] - tb
->cs_base
,
8497 (uint32_t)tb
->cs_base
);
8500 env
->eip
= tcg_ctx
.gen_opc_pc
[pc_pos
] - tb
->cs_base
;
8501 cc_op
= gen_opc_cc_op
[pc_pos
];
8502 if (cc_op
!= CC_OP_DYNAMIC
)