4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
26 #include "qemu/host-utils.h"
28 #include "disas/disas.h"
35 #define PREFIX_REPZ 0x01
36 #define PREFIX_REPNZ 0x02
37 #define PREFIX_LOCK 0x04
38 #define PREFIX_DATA 0x08
39 #define PREFIX_ADR 0x10
40 #define PREFIX_VEX 0x20
43 #define CODE64(s) ((s)->code64)
44 #define REX_X(s) ((s)->rex_x)
45 #define REX_B(s) ((s)->rex_b)
60 //#define MACRO_TEST 1
62 /* global register indexes */
63 static TCGv_ptr cpu_env
;
65 static TCGv cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
, cpu_cc_srcT
;
66 static TCGv_i32 cpu_cc_op
;
67 static TCGv cpu_regs
[CPU_NB_REGS
];
70 /* local register indexes (only used inside old micro ops) */
71 static TCGv cpu_tmp0
, cpu_tmp4
;
72 static TCGv_ptr cpu_ptr0
, cpu_ptr1
;
73 static TCGv_i32 cpu_tmp2_i32
, cpu_tmp3_i32
;
74 static TCGv_i64 cpu_tmp1_i64
;
76 static uint8_t gen_opc_cc_op
[OPC_BUF_SIZE
];
78 #include "exec/gen-icount.h"
81 static int x86_64_hregs
;
84 typedef struct DisasContext
{
85 /* current insn context */
86 int override
; /* -1 if no override */
89 target_ulong pc
; /* pc = eip + cs_base */
90 int is_jmp
; /* 1 = means jump (stop translation), 2 means CPU
91 static state change (stop translation) */
92 /* current block context */
93 target_ulong cs_base
; /* base of CS segment */
94 int pe
; /* protected mode */
95 int code32
; /* 32 bit code segment */
97 int lma
; /* long mode active */
98 int code64
; /* 64 bit code segment */
101 int vex_l
; /* vex vector length */
102 int vex_v
; /* vex vvvv register, without 1's compliment. */
103 int ss32
; /* 32 bit stack segment */
104 CCOp cc_op
; /* current CC operation */
106 int addseg
; /* non zero if either DS/ES/SS have a non zero base */
107 int f_st
; /* currently unused */
108 int vm86
; /* vm86 mode */
111 int tf
; /* TF cpu flag */
112 int singlestep_enabled
; /* "hardware" single step enabled */
113 int jmp_opt
; /* use direct block chaining for direct jumps */
114 int mem_index
; /* select memory access functions */
115 uint64_t flags
; /* all execution flags */
116 struct TranslationBlock
*tb
;
117 int popl_esp_hack
; /* for correct popl with esp base handling */
118 int rip_offset
; /* only used in x86_64, but left for simplicity */
120 int cpuid_ext_features
;
121 int cpuid_ext2_features
;
122 int cpuid_ext3_features
;
123 int cpuid_7_0_ebx_features
;
126 static void gen_eob(DisasContext
*s
);
127 static void gen_jmp(DisasContext
*s
, target_ulong eip
);
128 static void gen_jmp_tb(DisasContext
*s
, target_ulong eip
, int tb_num
);
129 static void gen_op(DisasContext
*s1
, int op
, int ot
, int d
);
131 /* i386 arith/logic operations */
151 OP_SHL1
, /* undocumented */
175 /* I386 int registers */
176 OR_EAX
, /* MUST be even numbered */
185 OR_TMP0
= 16, /* temporary operand register */
187 OR_A0
, /* temporary register used when doing address evaluation */
197 /* Bit set if the global variable is live after setting CC_OP to X. */
198 static const uint8_t cc_op_live
[CC_OP_NB
] = {
199 [CC_OP_DYNAMIC
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
200 [CC_OP_EFLAGS
] = USES_CC_SRC
,
201 [CC_OP_MULB
... CC_OP_MULQ
] = USES_CC_DST
| USES_CC_SRC
,
202 [CC_OP_ADDB
... CC_OP_ADDQ
] = USES_CC_DST
| USES_CC_SRC
,
203 [CC_OP_ADCB
... CC_OP_ADCQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
204 [CC_OP_SUBB
... CC_OP_SUBQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRCT
,
205 [CC_OP_SBBB
... CC_OP_SBBQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
206 [CC_OP_LOGICB
... CC_OP_LOGICQ
] = USES_CC_DST
,
207 [CC_OP_INCB
... CC_OP_INCQ
] = USES_CC_DST
| USES_CC_SRC
,
208 [CC_OP_DECB
... CC_OP_DECQ
] = USES_CC_DST
| USES_CC_SRC
,
209 [CC_OP_SHLB
... CC_OP_SHLQ
] = USES_CC_DST
| USES_CC_SRC
,
210 [CC_OP_SARB
... CC_OP_SARQ
] = USES_CC_DST
| USES_CC_SRC
,
211 [CC_OP_BMILGB
... CC_OP_BMILGQ
] = USES_CC_DST
| USES_CC_SRC
,
212 [CC_OP_ADCX
] = USES_CC_DST
| USES_CC_SRC
,
213 [CC_OP_ADOX
] = USES_CC_SRC
| USES_CC_SRC2
,
214 [CC_OP_ADCOX
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
218 static void set_cc_op(DisasContext
*s
, CCOp op
)
222 if (s
->cc_op
== op
) {
226 /* Discard CC computation that will no longer be used. */
227 dead
= cc_op_live
[s
->cc_op
] & ~cc_op_live
[op
];
228 if (dead
& USES_CC_DST
) {
229 tcg_gen_discard_tl(cpu_cc_dst
);
231 if (dead
& USES_CC_SRC
) {
232 tcg_gen_discard_tl(cpu_cc_src
);
234 if (dead
& USES_CC_SRC2
) {
235 tcg_gen_discard_tl(cpu_cc_src2
);
237 if (dead
& USES_CC_SRCT
) {
238 tcg_gen_discard_tl(cpu_cc_srcT
);
241 if (op
== CC_OP_DYNAMIC
) {
242 /* The DYNAMIC setting is translator only, and should never be
243 stored. Thus we always consider it clean. */
244 s
->cc_op_dirty
= false;
246 /* Discard any computed CC_OP value (see shifts). */
247 if (s
->cc_op
== CC_OP_DYNAMIC
) {
248 tcg_gen_discard_i32(cpu_cc_op
);
250 s
->cc_op_dirty
= true;
255 static void gen_update_cc_op(DisasContext
*s
)
257 if (s
->cc_op_dirty
) {
258 tcg_gen_movi_i32(cpu_cc_op
, s
->cc_op
);
259 s
->cc_op_dirty
= false;
263 static inline void gen_op_movl_T0_0(void)
265 tcg_gen_movi_tl(cpu_T
[0], 0);
268 static inline void gen_op_movl_T0_im(int32_t val
)
270 tcg_gen_movi_tl(cpu_T
[0], val
);
273 static inline void gen_op_movl_T0_imu(uint32_t val
)
275 tcg_gen_movi_tl(cpu_T
[0], val
);
278 static inline void gen_op_movl_T1_im(int32_t val
)
280 tcg_gen_movi_tl(cpu_T
[1], val
);
283 static inline void gen_op_movl_T1_imu(uint32_t val
)
285 tcg_gen_movi_tl(cpu_T
[1], val
);
288 static inline void gen_op_movl_A0_im(uint32_t val
)
290 tcg_gen_movi_tl(cpu_A0
, val
);
294 static inline void gen_op_movq_A0_im(int64_t val
)
296 tcg_gen_movi_tl(cpu_A0
, val
);
300 static inline void gen_movtl_T0_im(target_ulong val
)
302 tcg_gen_movi_tl(cpu_T
[0], val
);
305 static inline void gen_movtl_T1_im(target_ulong val
)
307 tcg_gen_movi_tl(cpu_T
[1], val
);
310 static inline void gen_op_andl_T0_ffff(void)
312 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 0xffff);
315 static inline void gen_op_andl_T0_im(uint32_t val
)
317 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], val
);
320 static inline void gen_op_movl_T0_T1(void)
322 tcg_gen_mov_tl(cpu_T
[0], cpu_T
[1]);
325 static inline void gen_op_andl_A0_ffff(void)
327 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffff);
332 #define NB_OP_SIZES 4
334 #else /* !TARGET_X86_64 */
336 #define NB_OP_SIZES 3
338 #endif /* !TARGET_X86_64 */
340 #if defined(HOST_WORDS_BIGENDIAN)
341 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
342 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
343 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
344 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
345 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
347 #define REG_B_OFFSET 0
348 #define REG_H_OFFSET 1
349 #define REG_W_OFFSET 0
350 #define REG_L_OFFSET 0
351 #define REG_LH_OFFSET 4
354 /* In instruction encodings for byte register accesses the
355 * register number usually indicates "low 8 bits of register N";
356 * however there are some special cases where N 4..7 indicates
357 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
358 * true for this special case, false otherwise.
360 static inline bool byte_reg_is_xH(int reg
)
366 if (reg
>= 8 || x86_64_hregs
) {
373 static inline void gen_op_mov_reg_v(int ot
, int reg
, TCGv t0
)
377 if (!byte_reg_is_xH(reg
)) {
378 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], t0
, 0, 8);
380 tcg_gen_deposit_tl(cpu_regs
[reg
- 4], cpu_regs
[reg
- 4], t0
, 8, 8);
384 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], t0
, 0, 16);
386 default: /* XXX this shouldn't be reached; abort? */
388 /* For x86_64, this sets the higher half of register to zero.
389 For i386, this is equivalent to a mov. */
390 tcg_gen_ext32u_tl(cpu_regs
[reg
], t0
);
394 tcg_gen_mov_tl(cpu_regs
[reg
], t0
);
400 static inline void gen_op_mov_reg_T0(int ot
, int reg
)
402 gen_op_mov_reg_v(ot
, reg
, cpu_T
[0]);
405 static inline void gen_op_mov_reg_T1(int ot
, int reg
)
407 gen_op_mov_reg_v(ot
, reg
, cpu_T
[1]);
410 static inline void gen_op_mov_reg_A0(int size
, int reg
)
414 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], cpu_A0
, 0, 16);
416 default: /* XXX this shouldn't be reached; abort? */
418 /* For x86_64, this sets the higher half of register to zero.
419 For i386, this is equivalent to a mov. */
420 tcg_gen_ext32u_tl(cpu_regs
[reg
], cpu_A0
);
424 tcg_gen_mov_tl(cpu_regs
[reg
], cpu_A0
);
430 static inline void gen_op_mov_v_reg(int ot
, TCGv t0
, int reg
)
432 if (ot
== OT_BYTE
&& byte_reg_is_xH(reg
)) {
433 tcg_gen_shri_tl(t0
, cpu_regs
[reg
- 4], 8);
434 tcg_gen_ext8u_tl(t0
, t0
);
436 tcg_gen_mov_tl(t0
, cpu_regs
[reg
]);
440 static inline void gen_op_mov_TN_reg(int ot
, int t_index
, int reg
)
442 gen_op_mov_v_reg(ot
, cpu_T
[t_index
], reg
);
445 static inline void gen_op_movl_A0_reg(int reg
)
447 tcg_gen_mov_tl(cpu_A0
, cpu_regs
[reg
]);
450 static inline void gen_op_addl_A0_im(int32_t val
)
452 tcg_gen_addi_tl(cpu_A0
, cpu_A0
, val
);
454 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffffffff);
459 static inline void gen_op_addq_A0_im(int64_t val
)
461 tcg_gen_addi_tl(cpu_A0
, cpu_A0
, val
);
465 static void gen_add_A0_im(DisasContext
*s
, int val
)
469 gen_op_addq_A0_im(val
);
472 gen_op_addl_A0_im(val
);
475 static inline void gen_op_addl_T0_T1(void)
477 tcg_gen_add_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
480 static inline void gen_op_jmp_T0(void)
482 tcg_gen_st_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, eip
));
485 static inline void gen_op_add_reg_im(int size
, int reg
, int32_t val
)
489 tcg_gen_addi_tl(cpu_tmp0
, cpu_regs
[reg
], val
);
490 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], cpu_tmp0
, 0, 16);
493 tcg_gen_addi_tl(cpu_tmp0
, cpu_regs
[reg
], val
);
494 /* For x86_64, this sets the higher half of register to zero.
495 For i386, this is equivalent to a nop. */
496 tcg_gen_ext32u_tl(cpu_tmp0
, cpu_tmp0
);
497 tcg_gen_mov_tl(cpu_regs
[reg
], cpu_tmp0
);
501 tcg_gen_addi_tl(cpu_regs
[reg
], cpu_regs
[reg
], val
);
507 static inline void gen_op_add_reg_T0(int size
, int reg
)
511 tcg_gen_add_tl(cpu_tmp0
, cpu_regs
[reg
], cpu_T
[0]);
512 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], cpu_tmp0
, 0, 16);
515 tcg_gen_add_tl(cpu_tmp0
, cpu_regs
[reg
], cpu_T
[0]);
516 /* For x86_64, this sets the higher half of register to zero.
517 For i386, this is equivalent to a nop. */
518 tcg_gen_ext32u_tl(cpu_tmp0
, cpu_tmp0
);
519 tcg_gen_mov_tl(cpu_regs
[reg
], cpu_tmp0
);
523 tcg_gen_add_tl(cpu_regs
[reg
], cpu_regs
[reg
], cpu_T
[0]);
529 static inline void gen_op_addl_A0_reg_sN(int shift
, int reg
)
531 tcg_gen_mov_tl(cpu_tmp0
, cpu_regs
[reg
]);
533 tcg_gen_shli_tl(cpu_tmp0
, cpu_tmp0
, shift
);
534 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
535 /* For x86_64, this sets the higher half of register to zero.
536 For i386, this is equivalent to a nop. */
537 tcg_gen_ext32u_tl(cpu_A0
, cpu_A0
);
540 static inline void gen_op_movl_A0_seg(int reg
)
542 tcg_gen_ld32u_tl(cpu_A0
, cpu_env
, offsetof(CPUX86State
, segs
[reg
].base
) + REG_L_OFFSET
);
545 static inline void gen_op_addl_A0_seg(DisasContext
*s
, int reg
)
547 tcg_gen_ld_tl(cpu_tmp0
, cpu_env
, offsetof(CPUX86State
, segs
[reg
].base
));
550 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffffffff);
551 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
553 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
554 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffffffff);
557 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
562 static inline void gen_op_movq_A0_seg(int reg
)
564 tcg_gen_ld_tl(cpu_A0
, cpu_env
, offsetof(CPUX86State
, segs
[reg
].base
));
567 static inline void gen_op_addq_A0_seg(int reg
)
569 tcg_gen_ld_tl(cpu_tmp0
, cpu_env
, offsetof(CPUX86State
, segs
[reg
].base
));
570 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
573 static inline void gen_op_movq_A0_reg(int reg
)
575 tcg_gen_mov_tl(cpu_A0
, cpu_regs
[reg
]);
578 static inline void gen_op_addq_A0_reg_sN(int shift
, int reg
)
580 tcg_gen_mov_tl(cpu_tmp0
, cpu_regs
[reg
]);
582 tcg_gen_shli_tl(cpu_tmp0
, cpu_tmp0
, shift
);
583 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
587 static inline void gen_op_lds_T0_A0(int idx
)
589 int mem_index
= (idx
>> 2) - 1;
592 tcg_gen_qemu_ld8s(cpu_T
[0], cpu_A0
, mem_index
);
595 tcg_gen_qemu_ld16s(cpu_T
[0], cpu_A0
, mem_index
);
599 tcg_gen_qemu_ld32s(cpu_T
[0], cpu_A0
, mem_index
);
604 static inline void gen_op_ld_v(int idx
, TCGv t0
, TCGv a0
)
606 int mem_index
= (idx
>> 2) - 1;
609 tcg_gen_qemu_ld8u(t0
, a0
, mem_index
);
612 tcg_gen_qemu_ld16u(t0
, a0
, mem_index
);
615 tcg_gen_qemu_ld32u(t0
, a0
, mem_index
);
619 /* Should never happen on 32-bit targets. */
621 tcg_gen_qemu_ld64(t0
, a0
, mem_index
);
627 /* XXX: always use ldu or lds */
628 static inline void gen_op_ld_T0_A0(int idx
)
630 gen_op_ld_v(idx
, cpu_T
[0], cpu_A0
);
633 static inline void gen_op_ldu_T0_A0(int idx
)
635 gen_op_ld_v(idx
, cpu_T
[0], cpu_A0
);
638 static inline void gen_op_ld_T1_A0(int idx
)
640 gen_op_ld_v(idx
, cpu_T
[1], cpu_A0
);
643 static inline void gen_op_st_v(int idx
, TCGv t0
, TCGv a0
)
645 int mem_index
= (idx
>> 2) - 1;
648 tcg_gen_qemu_st8(t0
, a0
, mem_index
);
651 tcg_gen_qemu_st16(t0
, a0
, mem_index
);
654 tcg_gen_qemu_st32(t0
, a0
, mem_index
);
658 /* Should never happen on 32-bit targets. */
660 tcg_gen_qemu_st64(t0
, a0
, mem_index
);
666 static inline void gen_op_st_T0_A0(int idx
)
668 gen_op_st_v(idx
, cpu_T
[0], cpu_A0
);
671 static inline void gen_op_st_T1_A0(int idx
)
673 gen_op_st_v(idx
, cpu_T
[1], cpu_A0
);
676 static inline void gen_jmp_im(target_ulong pc
)
678 tcg_gen_movi_tl(cpu_tmp0
, pc
);
679 tcg_gen_st_tl(cpu_tmp0
, cpu_env
, offsetof(CPUX86State
, eip
));
682 static inline void gen_string_movl_A0_ESI(DisasContext
*s
)
686 override
= s
->override
;
690 gen_op_movq_A0_seg(override
);
691 gen_op_addq_A0_reg_sN(0, R_ESI
);
693 gen_op_movq_A0_reg(R_ESI
);
699 if (s
->addseg
&& override
< 0)
702 gen_op_movl_A0_seg(override
);
703 gen_op_addl_A0_reg_sN(0, R_ESI
);
705 gen_op_movl_A0_reg(R_ESI
);
708 /* 16 address, always override */
711 gen_op_movl_A0_reg(R_ESI
);
712 gen_op_andl_A0_ffff();
713 gen_op_addl_A0_seg(s
, override
);
717 static inline void gen_string_movl_A0_EDI(DisasContext
*s
)
721 gen_op_movq_A0_reg(R_EDI
);
726 gen_op_movl_A0_seg(R_ES
);
727 gen_op_addl_A0_reg_sN(0, R_EDI
);
729 gen_op_movl_A0_reg(R_EDI
);
732 gen_op_movl_A0_reg(R_EDI
);
733 gen_op_andl_A0_ffff();
734 gen_op_addl_A0_seg(s
, R_ES
);
738 static inline void gen_op_movl_T0_Dshift(int ot
)
740 tcg_gen_ld32s_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, df
));
741 tcg_gen_shli_tl(cpu_T
[0], cpu_T
[0], ot
);
744 static TCGv
gen_ext_tl(TCGv dst
, TCGv src
, int size
, bool sign
)
749 tcg_gen_ext8s_tl(dst
, src
);
751 tcg_gen_ext8u_tl(dst
, src
);
756 tcg_gen_ext16s_tl(dst
, src
);
758 tcg_gen_ext16u_tl(dst
, src
);
764 tcg_gen_ext32s_tl(dst
, src
);
766 tcg_gen_ext32u_tl(dst
, src
);
775 static void gen_extu(int ot
, TCGv reg
)
777 gen_ext_tl(reg
, reg
, ot
, false);
780 static void gen_exts(int ot
, TCGv reg
)
782 gen_ext_tl(reg
, reg
, ot
, true);
785 static inline void gen_op_jnz_ecx(int size
, int label1
)
787 tcg_gen_mov_tl(cpu_tmp0
, cpu_regs
[R_ECX
]);
788 gen_extu(size
+ 1, cpu_tmp0
);
789 tcg_gen_brcondi_tl(TCG_COND_NE
, cpu_tmp0
, 0, label1
);
792 static inline void gen_op_jz_ecx(int size
, int label1
)
794 tcg_gen_mov_tl(cpu_tmp0
, cpu_regs
[R_ECX
]);
795 gen_extu(size
+ 1, cpu_tmp0
);
796 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_tmp0
, 0, label1
);
799 static void gen_helper_in_func(int ot
, TCGv v
, TCGv_i32 n
)
803 gen_helper_inb(v
, n
);
806 gen_helper_inw(v
, n
);
809 gen_helper_inl(v
, n
);
814 static void gen_helper_out_func(int ot
, TCGv_i32 v
, TCGv_i32 n
)
818 gen_helper_outb(v
, n
);
821 gen_helper_outw(v
, n
);
824 gen_helper_outl(v
, n
);
829 static void gen_check_io(DisasContext
*s
, int ot
, target_ulong cur_eip
,
833 target_ulong next_eip
;
836 if (s
->pe
&& (s
->cpl
> s
->iopl
|| s
->vm86
)) {
840 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
843 gen_helper_check_iob(cpu_env
, cpu_tmp2_i32
);
846 gen_helper_check_iow(cpu_env
, cpu_tmp2_i32
);
849 gen_helper_check_iol(cpu_env
, cpu_tmp2_i32
);
853 if(s
->flags
& HF_SVMI_MASK
) {
858 svm_flags
|= (1 << (4 + ot
));
859 next_eip
= s
->pc
- s
->cs_base
;
860 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
861 gen_helper_svm_check_io(cpu_env
, cpu_tmp2_i32
,
862 tcg_const_i32(svm_flags
),
863 tcg_const_i32(next_eip
- cur_eip
));
867 static inline void gen_movs(DisasContext
*s
, int ot
)
869 gen_string_movl_A0_ESI(s
);
870 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
871 gen_string_movl_A0_EDI(s
);
872 gen_op_st_T0_A0(ot
+ s
->mem_index
);
873 gen_op_movl_T0_Dshift(ot
);
874 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
875 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
878 static void gen_op_update1_cc(void)
880 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
883 static void gen_op_update2_cc(void)
885 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[1]);
886 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
889 static void gen_op_update3_cc(TCGv reg
)
891 tcg_gen_mov_tl(cpu_cc_src2
, reg
);
892 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[1]);
893 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
896 static inline void gen_op_testl_T0_T1_cc(void)
898 tcg_gen_and_tl(cpu_cc_dst
, cpu_T
[0], cpu_T
[1]);
901 static void gen_op_update_neg_cc(void)
903 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
904 tcg_gen_neg_tl(cpu_cc_src
, cpu_T
[0]);
905 tcg_gen_movi_tl(cpu_cc_srcT
, 0);
908 /* compute all eflags to cc_src */
909 static void gen_compute_eflags(DisasContext
*s
)
911 TCGv zero
, dst
, src1
, src2
;
914 if (s
->cc_op
== CC_OP_EFLAGS
) {
917 if (s
->cc_op
== CC_OP_CLR
) {
918 tcg_gen_movi_tl(cpu_cc_src
, CC_Z
);
919 set_cc_op(s
, CC_OP_EFLAGS
);
928 /* Take care to not read values that are not live. */
929 live
= cc_op_live
[s
->cc_op
] & ~USES_CC_SRCT
;
930 dead
= live
^ (USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
);
932 zero
= tcg_const_tl(0);
933 if (dead
& USES_CC_DST
) {
936 if (dead
& USES_CC_SRC
) {
939 if (dead
& USES_CC_SRC2
) {
945 gen_helper_cc_compute_all(cpu_cc_src
, dst
, src1
, src2
, cpu_cc_op
);
946 set_cc_op(s
, CC_OP_EFLAGS
);
953 typedef struct CCPrepare
{
963 /* compute eflags.C to reg */
964 static CCPrepare
gen_prepare_eflags_c(DisasContext
*s
, TCGv reg
)
970 case CC_OP_SUBB
... CC_OP_SUBQ
:
971 /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
972 size
= s
->cc_op
- CC_OP_SUBB
;
973 t1
= gen_ext_tl(cpu_tmp0
, cpu_cc_src
, size
, false);
974 /* If no temporary was used, be careful not to alias t1 and t0. */
975 t0
= TCGV_EQUAL(t1
, cpu_cc_src
) ? cpu_tmp0
: reg
;
976 tcg_gen_mov_tl(t0
, cpu_cc_srcT
);
980 case CC_OP_ADDB
... CC_OP_ADDQ
:
981 /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
982 size
= s
->cc_op
- CC_OP_ADDB
;
983 t1
= gen_ext_tl(cpu_tmp0
, cpu_cc_src
, size
, false);
984 t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, false);
986 return (CCPrepare
) { .cond
= TCG_COND_LTU
, .reg
= t0
,
987 .reg2
= t1
, .mask
= -1, .use_reg2
= true };
989 case CC_OP_LOGICB
... CC_OP_LOGICQ
:
991 return (CCPrepare
) { .cond
= TCG_COND_NEVER
, .mask
= -1 };
993 case CC_OP_INCB
... CC_OP_INCQ
:
994 case CC_OP_DECB
... CC_OP_DECQ
:
995 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
996 .mask
= -1, .no_setcond
= true };
998 case CC_OP_SHLB
... CC_OP_SHLQ
:
999 /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
1000 size
= s
->cc_op
- CC_OP_SHLB
;
1001 shift
= (8 << size
) - 1;
1002 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1003 .mask
= (target_ulong
)1 << shift
};
1005 case CC_OP_MULB
... CC_OP_MULQ
:
1006 return (CCPrepare
) { .cond
= TCG_COND_NE
,
1007 .reg
= cpu_cc_src
, .mask
= -1 };
1009 case CC_OP_BMILGB
... CC_OP_BMILGQ
:
1010 size
= s
->cc_op
- CC_OP_BMILGB
;
1011 t0
= gen_ext_tl(reg
, cpu_cc_src
, size
, false);
1012 return (CCPrepare
) { .cond
= TCG_COND_EQ
, .reg
= t0
, .mask
= -1 };
1016 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_dst
,
1017 .mask
= -1, .no_setcond
= true };
1020 case CC_OP_SARB
... CC_OP_SARQ
:
1022 return (CCPrepare
) { .cond
= TCG_COND_NE
,
1023 .reg
= cpu_cc_src
, .mask
= CC_C
};
1026 /* The need to compute only C from CC_OP_DYNAMIC is important
1027 in efficiently implementing e.g. INC at the start of a TB. */
1028 gen_update_cc_op(s
);
1029 gen_helper_cc_compute_c(reg
, cpu_cc_dst
, cpu_cc_src
,
1030 cpu_cc_src2
, cpu_cc_op
);
1031 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
1032 .mask
= -1, .no_setcond
= true };
1036 /* compute eflags.P to reg */
1037 static CCPrepare
gen_prepare_eflags_p(DisasContext
*s
, TCGv reg
)
1039 gen_compute_eflags(s
);
1040 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1044 /* compute eflags.S to reg */
1045 static CCPrepare
gen_prepare_eflags_s(DisasContext
*s
, TCGv reg
)
1049 gen_compute_eflags(s
);
1055 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1058 return (CCPrepare
) { .cond
= TCG_COND_NEVER
, .mask
= -1 };
1061 int size
= (s
->cc_op
- CC_OP_ADDB
) & 3;
1062 TCGv t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, true);
1063 return (CCPrepare
) { .cond
= TCG_COND_LT
, .reg
= t0
, .mask
= -1 };
1068 /* compute eflags.O to reg */
1069 static CCPrepare
gen_prepare_eflags_o(DisasContext
*s
, TCGv reg
)
1074 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src2
,
1075 .mask
= -1, .no_setcond
= true };
1077 return (CCPrepare
) { .cond
= TCG_COND_NEVER
, .mask
= -1 };
1079 gen_compute_eflags(s
);
1080 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1085 /* compute eflags.Z to reg */
1086 static CCPrepare
gen_prepare_eflags_z(DisasContext
*s
, TCGv reg
)
1090 gen_compute_eflags(s
);
1096 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1099 return (CCPrepare
) { .cond
= TCG_COND_ALWAYS
, .mask
= -1 };
1102 int size
= (s
->cc_op
- CC_OP_ADDB
) & 3;
1103 TCGv t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, false);
1104 return (CCPrepare
) { .cond
= TCG_COND_EQ
, .reg
= t0
, .mask
= -1 };
1109 /* perform a conditional store into register 'reg' according to jump opcode
1110 value 'b'. In the fast case, T0 is guaranted not to be used. */
1111 static CCPrepare
gen_prepare_cc(DisasContext
*s
, int b
, TCGv reg
)
1113 int inv
, jcc_op
, size
, cond
;
1118 jcc_op
= (b
>> 1) & 7;
1121 case CC_OP_SUBB
... CC_OP_SUBQ
:
1122 /* We optimize relational operators for the cmp/jcc case. */
1123 size
= s
->cc_op
- CC_OP_SUBB
;
1126 tcg_gen_mov_tl(cpu_tmp4
, cpu_cc_srcT
);
1127 gen_extu(size
, cpu_tmp4
);
1128 t0
= gen_ext_tl(cpu_tmp0
, cpu_cc_src
, size
, false);
1129 cc
= (CCPrepare
) { .cond
= TCG_COND_LEU
, .reg
= cpu_tmp4
,
1130 .reg2
= t0
, .mask
= -1, .use_reg2
= true };
1139 tcg_gen_mov_tl(cpu_tmp4
, cpu_cc_srcT
);
1140 gen_exts(size
, cpu_tmp4
);
1141 t0
= gen_ext_tl(cpu_tmp0
, cpu_cc_src
, size
, true);
1142 cc
= (CCPrepare
) { .cond
= cond
, .reg
= cpu_tmp4
,
1143 .reg2
= t0
, .mask
= -1, .use_reg2
= true };
1153 /* This actually generates good code for JC, JZ and JS. */
1156 cc
= gen_prepare_eflags_o(s
, reg
);
1159 cc
= gen_prepare_eflags_c(s
, reg
);
1162 cc
= gen_prepare_eflags_z(s
, reg
);
1165 gen_compute_eflags(s
);
1166 cc
= (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1167 .mask
= CC_Z
| CC_C
};
1170 cc
= gen_prepare_eflags_s(s
, reg
);
1173 cc
= gen_prepare_eflags_p(s
, reg
);
1176 gen_compute_eflags(s
);
1177 if (TCGV_EQUAL(reg
, cpu_cc_src
)) {
1180 tcg_gen_shri_tl(reg
, cpu_cc_src
, 4); /* CC_O -> CC_S */
1181 tcg_gen_xor_tl(reg
, reg
, cpu_cc_src
);
1182 cc
= (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
1187 gen_compute_eflags(s
);
1188 if (TCGV_EQUAL(reg
, cpu_cc_src
)) {
1191 tcg_gen_shri_tl(reg
, cpu_cc_src
, 4); /* CC_O -> CC_S */
1192 tcg_gen_xor_tl(reg
, reg
, cpu_cc_src
);
1193 cc
= (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
1194 .mask
= CC_S
| CC_Z
};
1201 cc
.cond
= tcg_invert_cond(cc
.cond
);
1206 static void gen_setcc1(DisasContext
*s
, int b
, TCGv reg
)
1208 CCPrepare cc
= gen_prepare_cc(s
, b
, reg
);
1210 if (cc
.no_setcond
) {
1211 if (cc
.cond
== TCG_COND_EQ
) {
1212 tcg_gen_xori_tl(reg
, cc
.reg
, 1);
1214 tcg_gen_mov_tl(reg
, cc
.reg
);
1219 if (cc
.cond
== TCG_COND_NE
&& !cc
.use_reg2
&& cc
.imm
== 0 &&
1220 cc
.mask
!= 0 && (cc
.mask
& (cc
.mask
- 1)) == 0) {
1221 tcg_gen_shri_tl(reg
, cc
.reg
, ctztl(cc
.mask
));
1222 tcg_gen_andi_tl(reg
, reg
, 1);
1225 if (cc
.mask
!= -1) {
1226 tcg_gen_andi_tl(reg
, cc
.reg
, cc
.mask
);
1230 tcg_gen_setcond_tl(cc
.cond
, reg
, cc
.reg
, cc
.reg2
);
1232 tcg_gen_setcondi_tl(cc
.cond
, reg
, cc
.reg
, cc
.imm
);
1236 static inline void gen_compute_eflags_c(DisasContext
*s
, TCGv reg
)
1238 gen_setcc1(s
, JCC_B
<< 1, reg
);
1241 /* generate a conditional jump to label 'l1' according to jump opcode
1242 value 'b'. In the fast case, T0 is guaranted not to be used. */
1243 static inline void gen_jcc1_noeob(DisasContext
*s
, int b
, int l1
)
1245 CCPrepare cc
= gen_prepare_cc(s
, b
, cpu_T
[0]);
1247 if (cc
.mask
!= -1) {
1248 tcg_gen_andi_tl(cpu_T
[0], cc
.reg
, cc
.mask
);
1252 tcg_gen_brcond_tl(cc
.cond
, cc
.reg
, cc
.reg2
, l1
);
1254 tcg_gen_brcondi_tl(cc
.cond
, cc
.reg
, cc
.imm
, l1
);
1258 /* Generate a conditional jump to label 'l1' according to jump opcode
1259 value 'b'. In the fast case, T0 is guaranted not to be used.
1260 A translation block must end soon. */
1261 static inline void gen_jcc1(DisasContext
*s
, int b
, int l1
)
1263 CCPrepare cc
= gen_prepare_cc(s
, b
, cpu_T
[0]);
1265 gen_update_cc_op(s
);
1266 if (cc
.mask
!= -1) {
1267 tcg_gen_andi_tl(cpu_T
[0], cc
.reg
, cc
.mask
);
1270 set_cc_op(s
, CC_OP_DYNAMIC
);
1272 tcg_gen_brcond_tl(cc
.cond
, cc
.reg
, cc
.reg2
, l1
);
1274 tcg_gen_brcondi_tl(cc
.cond
, cc
.reg
, cc
.imm
, l1
);
1278 /* XXX: does not work with gdbstub "ice" single step - not a
1280 static int gen_jz_ecx_string(DisasContext
*s
, target_ulong next_eip
)
1284 l1
= gen_new_label();
1285 l2
= gen_new_label();
1286 gen_op_jnz_ecx(s
->aflag
, l1
);
1288 gen_jmp_tb(s
, next_eip
, 1);
1293 static inline void gen_stos(DisasContext
*s
, int ot
)
1295 gen_op_mov_TN_reg(OT_LONG
, 0, R_EAX
);
1296 gen_string_movl_A0_EDI(s
);
1297 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1298 gen_op_movl_T0_Dshift(ot
);
1299 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1302 static inline void gen_lods(DisasContext
*s
, int ot
)
1304 gen_string_movl_A0_ESI(s
);
1305 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1306 gen_op_mov_reg_T0(ot
, R_EAX
);
1307 gen_op_movl_T0_Dshift(ot
);
1308 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
1311 static inline void gen_scas(DisasContext
*s
, int ot
)
1313 gen_string_movl_A0_EDI(s
);
1314 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
1315 gen_op(s
, OP_CMPL
, ot
, R_EAX
);
1316 gen_op_movl_T0_Dshift(ot
);
1317 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1320 static inline void gen_cmps(DisasContext
*s
, int ot
)
1322 gen_string_movl_A0_EDI(s
);
1323 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
1324 gen_string_movl_A0_ESI(s
);
1325 gen_op(s
, OP_CMPL
, ot
, OR_TMP0
);
1326 gen_op_movl_T0_Dshift(ot
);
1327 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
1328 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1331 static inline void gen_ins(DisasContext
*s
, int ot
)
1335 gen_string_movl_A0_EDI(s
);
1336 /* Note: we must do this dummy write first to be restartable in
1337 case of page fault. */
1339 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1340 gen_op_mov_TN_reg(OT_WORD
, 1, R_EDX
);
1341 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[1]);
1342 tcg_gen_andi_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 0xffff);
1343 gen_helper_in_func(ot
, cpu_T
[0], cpu_tmp2_i32
);
1344 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1345 gen_op_movl_T0_Dshift(ot
);
1346 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1351 static inline void gen_outs(DisasContext
*s
, int ot
)
1355 gen_string_movl_A0_ESI(s
);
1356 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1358 gen_op_mov_TN_reg(OT_WORD
, 1, R_EDX
);
1359 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[1]);
1360 tcg_gen_andi_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 0xffff);
1361 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T
[0]);
1362 gen_helper_out_func(ot
, cpu_tmp2_i32
, cpu_tmp3_i32
);
1364 gen_op_movl_T0_Dshift(ot
);
1365 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
1370 /* same method as Valgrind : we generate jumps to current or next
1372 #define GEN_REPZ(op) \
1373 static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1374 target_ulong cur_eip, target_ulong next_eip) \
1377 gen_update_cc_op(s); \
1378 l2 = gen_jz_ecx_string(s, next_eip); \
1379 gen_ ## op(s, ot); \
1380 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1381 /* a loop would cause two single step exceptions if ECX = 1 \
1382 before rep string_insn */ \
1384 gen_op_jz_ecx(s->aflag, l2); \
1385 gen_jmp(s, cur_eip); \
1388 #define GEN_REPZ2(op) \
1389 static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1390 target_ulong cur_eip, \
1391 target_ulong next_eip, \
1395 gen_update_cc_op(s); \
1396 l2 = gen_jz_ecx_string(s, next_eip); \
1397 gen_ ## op(s, ot); \
1398 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1399 gen_update_cc_op(s); \
1400 gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2); \
1402 gen_op_jz_ecx(s->aflag, l2); \
1403 gen_jmp(s, cur_eip); \
1414 static void gen_helper_fp_arith_ST0_FT0(int op
)
1418 gen_helper_fadd_ST0_FT0(cpu_env
);
1421 gen_helper_fmul_ST0_FT0(cpu_env
);
1424 gen_helper_fcom_ST0_FT0(cpu_env
);
1427 gen_helper_fcom_ST0_FT0(cpu_env
);
1430 gen_helper_fsub_ST0_FT0(cpu_env
);
1433 gen_helper_fsubr_ST0_FT0(cpu_env
);
1436 gen_helper_fdiv_ST0_FT0(cpu_env
);
1439 gen_helper_fdivr_ST0_FT0(cpu_env
);
1444 /* NOTE the exception in "r" op ordering */
1445 static void gen_helper_fp_arith_STN_ST0(int op
, int opreg
)
1447 TCGv_i32 tmp
= tcg_const_i32(opreg
);
1450 gen_helper_fadd_STN_ST0(cpu_env
, tmp
);
1453 gen_helper_fmul_STN_ST0(cpu_env
, tmp
);
1456 gen_helper_fsubr_STN_ST0(cpu_env
, tmp
);
1459 gen_helper_fsub_STN_ST0(cpu_env
, tmp
);
1462 gen_helper_fdivr_STN_ST0(cpu_env
, tmp
);
1465 gen_helper_fdiv_STN_ST0(cpu_env
, tmp
);
1470 /* if d == OR_TMP0, it means memory operand (address in A0) */
1471 static void gen_op(DisasContext
*s1
, int op
, int ot
, int d
)
1474 gen_op_mov_TN_reg(ot
, 0, d
);
1476 gen_op_ld_T0_A0(ot
+ s1
->mem_index
);
1480 gen_compute_eflags_c(s1
, cpu_tmp4
);
1481 tcg_gen_add_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1482 tcg_gen_add_tl(cpu_T
[0], cpu_T
[0], cpu_tmp4
);
1484 gen_op_mov_reg_T0(ot
, d
);
1486 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1487 gen_op_update3_cc(cpu_tmp4
);
1488 set_cc_op(s1
, CC_OP_ADCB
+ ot
);
1491 gen_compute_eflags_c(s1
, cpu_tmp4
);
1492 tcg_gen_sub_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1493 tcg_gen_sub_tl(cpu_T
[0], cpu_T
[0], cpu_tmp4
);
1495 gen_op_mov_reg_T0(ot
, d
);
1497 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1498 gen_op_update3_cc(cpu_tmp4
);
1499 set_cc_op(s1
, CC_OP_SBBB
+ ot
);
1502 gen_op_addl_T0_T1();
1504 gen_op_mov_reg_T0(ot
, d
);
1506 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1507 gen_op_update2_cc();
1508 set_cc_op(s1
, CC_OP_ADDB
+ ot
);
1511 tcg_gen_mov_tl(cpu_cc_srcT
, cpu_T
[0]);
1512 tcg_gen_sub_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1514 gen_op_mov_reg_T0(ot
, d
);
1516 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1517 gen_op_update2_cc();
1518 set_cc_op(s1
, CC_OP_SUBB
+ ot
);
1522 tcg_gen_and_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1524 gen_op_mov_reg_T0(ot
, d
);
1526 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1527 gen_op_update1_cc();
1528 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1531 tcg_gen_or_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1533 gen_op_mov_reg_T0(ot
, d
);
1535 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1536 gen_op_update1_cc();
1537 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1540 tcg_gen_xor_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1542 gen_op_mov_reg_T0(ot
, d
);
1544 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1545 gen_op_update1_cc();
1546 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1549 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[1]);
1550 tcg_gen_mov_tl(cpu_cc_srcT
, cpu_T
[0]);
1551 tcg_gen_sub_tl(cpu_cc_dst
, cpu_T
[0], cpu_T
[1]);
1552 set_cc_op(s1
, CC_OP_SUBB
+ ot
);
1557 /* if d == OR_TMP0, it means memory operand (address in A0) */
1558 static void gen_inc(DisasContext
*s1
, int ot
, int d
, int c
)
1561 gen_op_mov_TN_reg(ot
, 0, d
);
1563 gen_op_ld_T0_A0(ot
+ s1
->mem_index
);
1564 gen_compute_eflags_c(s1
, cpu_cc_src
);
1566 tcg_gen_addi_tl(cpu_T
[0], cpu_T
[0], 1);
1567 set_cc_op(s1
, CC_OP_INCB
+ ot
);
1569 tcg_gen_addi_tl(cpu_T
[0], cpu_T
[0], -1);
1570 set_cc_op(s1
, CC_OP_DECB
+ ot
);
1573 gen_op_mov_reg_T0(ot
, d
);
1575 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1576 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
1579 static void gen_shift_flags(DisasContext
*s
, int ot
, TCGv result
, TCGv shm1
,
1580 TCGv count
, bool is_right
)
1582 TCGv_i32 z32
, s32
, oldop
;
1585 /* Store the results into the CC variables. If we know that the
1586 variable must be dead, store unconditionally. Otherwise we'll
1587 need to not disrupt the current contents. */
1588 z_tl
= tcg_const_tl(0);
1589 if (cc_op_live
[s
->cc_op
] & USES_CC_DST
) {
1590 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_cc_dst
, count
, z_tl
,
1591 result
, cpu_cc_dst
);
1593 tcg_gen_mov_tl(cpu_cc_dst
, result
);
1595 if (cc_op_live
[s
->cc_op
] & USES_CC_SRC
) {
1596 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_cc_src
, count
, z_tl
,
1599 tcg_gen_mov_tl(cpu_cc_src
, shm1
);
1601 tcg_temp_free(z_tl
);
1603 /* Get the two potential CC_OP values into temporaries. */
1604 tcg_gen_movi_i32(cpu_tmp2_i32
, (is_right
? CC_OP_SARB
: CC_OP_SHLB
) + ot
);
1605 if (s
->cc_op
== CC_OP_DYNAMIC
) {
1608 tcg_gen_movi_i32(cpu_tmp3_i32
, s
->cc_op
);
1609 oldop
= cpu_tmp3_i32
;
1612 /* Conditionally store the CC_OP value. */
1613 z32
= tcg_const_i32(0);
1614 s32
= tcg_temp_new_i32();
1615 tcg_gen_trunc_tl_i32(s32
, count
);
1616 tcg_gen_movcond_i32(TCG_COND_NE
, cpu_cc_op
, s32
, z32
, cpu_tmp2_i32
, oldop
);
1617 tcg_temp_free_i32(z32
);
1618 tcg_temp_free_i32(s32
);
1620 /* The CC_OP value is no longer predictable. */
1621 set_cc_op(s
, CC_OP_DYNAMIC
);
1624 static void gen_shift_rm_T1(DisasContext
*s
, int ot
, int op1
,
1625 int is_right
, int is_arith
)
1627 target_ulong mask
= (ot
== OT_QUAD
? 0x3f : 0x1f);
1630 if (op1
== OR_TMP0
) {
1631 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1633 gen_op_mov_TN_reg(ot
, 0, op1
);
1636 tcg_gen_andi_tl(cpu_T
[1], cpu_T
[1], mask
);
1637 tcg_gen_subi_tl(cpu_tmp0
, cpu_T
[1], 1);
1641 gen_exts(ot
, cpu_T
[0]);
1642 tcg_gen_sar_tl(cpu_tmp0
, cpu_T
[0], cpu_tmp0
);
1643 tcg_gen_sar_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1645 gen_extu(ot
, cpu_T
[0]);
1646 tcg_gen_shr_tl(cpu_tmp0
, cpu_T
[0], cpu_tmp0
);
1647 tcg_gen_shr_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1650 tcg_gen_shl_tl(cpu_tmp0
, cpu_T
[0], cpu_tmp0
);
1651 tcg_gen_shl_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1655 if (op1
== OR_TMP0
) {
1656 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1658 gen_op_mov_reg_T0(ot
, op1
);
1661 gen_shift_flags(s
, ot
, cpu_T
[0], cpu_tmp0
, cpu_T
[1], is_right
);
1664 static void gen_shift_rm_im(DisasContext
*s
, int ot
, int op1
, int op2
,
1665 int is_right
, int is_arith
)
1667 int mask
= (ot
== OT_QUAD
? 0x3f : 0x1f);
1671 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1673 gen_op_mov_TN_reg(ot
, 0, op1
);
1679 gen_exts(ot
, cpu_T
[0]);
1680 tcg_gen_sari_tl(cpu_tmp4
, cpu_T
[0], op2
- 1);
1681 tcg_gen_sari_tl(cpu_T
[0], cpu_T
[0], op2
);
1683 gen_extu(ot
, cpu_T
[0]);
1684 tcg_gen_shri_tl(cpu_tmp4
, cpu_T
[0], op2
- 1);
1685 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], op2
);
1688 tcg_gen_shli_tl(cpu_tmp4
, cpu_T
[0], op2
- 1);
1689 tcg_gen_shli_tl(cpu_T
[0], cpu_T
[0], op2
);
1695 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1697 gen_op_mov_reg_T0(ot
, op1
);
1699 /* update eflags if non zero shift */
1701 tcg_gen_mov_tl(cpu_cc_src
, cpu_tmp4
);
1702 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
1703 set_cc_op(s
, (is_right
? CC_OP_SARB
: CC_OP_SHLB
) + ot
);
1707 static inline void tcg_gen_lshift(TCGv ret
, TCGv arg1
, target_long arg2
)
1710 tcg_gen_shli_tl(ret
, arg1
, arg2
);
1712 tcg_gen_shri_tl(ret
, arg1
, -arg2
);
1715 static void gen_rot_rm_T1(DisasContext
*s
, int ot
, int op1
, int is_right
)
1717 target_ulong mask
= (ot
== OT_QUAD
? 0x3f : 0x1f);
1721 if (op1
== OR_TMP0
) {
1722 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1724 gen_op_mov_TN_reg(ot
, 0, op1
);
1727 tcg_gen_andi_tl(cpu_T
[1], cpu_T
[1], mask
);
1731 /* Replicate the 8-bit input so that a 32-bit rotate works. */
1732 tcg_gen_ext8u_tl(cpu_T
[0], cpu_T
[0]);
1733 tcg_gen_muli_tl(cpu_T
[0], cpu_T
[0], 0x01010101);
1736 /* Replicate the 16-bit input so that a 32-bit rotate works. */
1737 tcg_gen_deposit_tl(cpu_T
[0], cpu_T
[0], cpu_T
[0], 16, 16);
1740 #ifdef TARGET_X86_64
1742 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
1743 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T
[1]);
1745 tcg_gen_rotr_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, cpu_tmp3_i32
);
1747 tcg_gen_rotl_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, cpu_tmp3_i32
);
1749 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
1754 tcg_gen_rotr_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1756 tcg_gen_rotl_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1762 if (op1
== OR_TMP0
) {
1763 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1765 gen_op_mov_reg_T0(ot
, op1
);
1768 /* We'll need the flags computed into CC_SRC. */
1769 gen_compute_eflags(s
);
1771 /* The value that was "rotated out" is now present at the other end
1772 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1773 since we've computed the flags into CC_SRC, these variables are
1776 tcg_gen_shri_tl(cpu_cc_src2
, cpu_T
[0], mask
- 1);
1777 tcg_gen_shri_tl(cpu_cc_dst
, cpu_T
[0], mask
);
1779 tcg_gen_shri_tl(cpu_cc_src2
, cpu_T
[0], mask
);
1780 tcg_gen_andi_tl(cpu_cc_dst
, cpu_T
[0], 1);
1782 tcg_gen_andi_tl(cpu_cc_src2
, cpu_cc_src2
, 1);
1783 tcg_gen_xor_tl(cpu_cc_src2
, cpu_cc_src2
, cpu_cc_dst
);
1785 /* Now conditionally store the new CC_OP value. If the shift count
1786 is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live.
1787 Otherwise reuse CC_OP_ADCOX which have the C and O flags split out
1788 exactly as we computed above. */
1789 t0
= tcg_const_i32(0);
1790 t1
= tcg_temp_new_i32();
1791 tcg_gen_trunc_tl_i32(t1
, cpu_T
[1]);
1792 tcg_gen_movi_i32(cpu_tmp2_i32
, CC_OP_ADCOX
);
1793 tcg_gen_movi_i32(cpu_tmp3_i32
, CC_OP_EFLAGS
);
1794 tcg_gen_movcond_i32(TCG_COND_NE
, cpu_cc_op
, t1
, t0
,
1795 cpu_tmp2_i32
, cpu_tmp3_i32
);
1796 tcg_temp_free_i32(t0
);
1797 tcg_temp_free_i32(t1
);
1799 /* The CC_OP value is no longer predictable. */
1800 set_cc_op(s
, CC_OP_DYNAMIC
);
1803 static void gen_rot_rm_im(DisasContext
*s
, int ot
, int op1
, int op2
,
1806 int mask
= (ot
== OT_QUAD
? 0x3f : 0x1f);
1810 if (op1
== OR_TMP0
) {
1811 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1813 gen_op_mov_TN_reg(ot
, 0, op1
);
1819 #ifdef TARGET_X86_64
1821 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
1823 tcg_gen_rotri_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, op2
);
1825 tcg_gen_rotli_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, op2
);
1827 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
1832 tcg_gen_rotri_tl(cpu_T
[0], cpu_T
[0], op2
);
1834 tcg_gen_rotli_tl(cpu_T
[0], cpu_T
[0], op2
);
1845 shift
= mask
+ 1 - shift
;
1847 gen_extu(ot
, cpu_T
[0]);
1848 tcg_gen_shli_tl(cpu_tmp0
, cpu_T
[0], shift
);
1849 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], mask
+ 1 - shift
);
1850 tcg_gen_or_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
1856 if (op1
== OR_TMP0
) {
1857 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1859 gen_op_mov_reg_T0(ot
, op1
);
1863 /* Compute the flags into CC_SRC. */
1864 gen_compute_eflags(s
);
1866 /* The value that was "rotated out" is now present at the other end
1867 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1868 since we've computed the flags into CC_SRC, these variables are
1871 tcg_gen_shri_tl(cpu_cc_src2
, cpu_T
[0], mask
- 1);
1872 tcg_gen_shri_tl(cpu_cc_dst
, cpu_T
[0], mask
);
1874 tcg_gen_shri_tl(cpu_cc_src2
, cpu_T
[0], mask
);
1875 tcg_gen_andi_tl(cpu_cc_dst
, cpu_T
[0], 1);
1877 tcg_gen_andi_tl(cpu_cc_src2
, cpu_cc_src2
, 1);
1878 tcg_gen_xor_tl(cpu_cc_src2
, cpu_cc_src2
, cpu_cc_dst
);
1879 set_cc_op(s
, CC_OP_ADCOX
);
1883 /* XXX: add faster immediate = 1 case */
1884 static void gen_rotc_rm_T1(DisasContext
*s
, int ot
, int op1
,
1887 gen_compute_eflags(s
);
1888 assert(s
->cc_op
== CC_OP_EFLAGS
);
1892 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1894 gen_op_mov_TN_reg(ot
, 0, op1
);
1899 gen_helper_rcrb(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1902 gen_helper_rcrw(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1905 gen_helper_rcrl(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1907 #ifdef TARGET_X86_64
1909 gen_helper_rcrq(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1916 gen_helper_rclb(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1919 gen_helper_rclw(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1922 gen_helper_rcll(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1924 #ifdef TARGET_X86_64
1926 gen_helper_rclq(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1933 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1935 gen_op_mov_reg_T0(ot
, op1
);
1938 /* XXX: add faster immediate case */
1939 static void gen_shiftd_rm_T1(DisasContext
*s
, int ot
, int op1
,
1940 bool is_right
, TCGv count_in
)
1942 target_ulong mask
= (ot
== OT_QUAD
? 63 : 31);
1946 if (op1
== OR_TMP0
) {
1947 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1949 gen_op_mov_TN_reg(ot
, 0, op1
);
1952 count
= tcg_temp_new();
1953 tcg_gen_andi_tl(count
, count_in
, mask
);
1957 /* Note: we implement the Intel behaviour for shift count > 16.
1958 This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A
1959 portion by constructing it as a 32-bit value. */
1961 tcg_gen_deposit_tl(cpu_tmp0
, cpu_T
[0], cpu_T
[1], 16, 16);
1962 tcg_gen_mov_tl(cpu_T
[1], cpu_T
[0]);
1963 tcg_gen_mov_tl(cpu_T
[0], cpu_tmp0
);
1965 tcg_gen_deposit_tl(cpu_T
[1], cpu_T
[0], cpu_T
[1], 16, 16);
1968 #ifdef TARGET_X86_64
1970 /* Concatenate the two 32-bit values and use a 64-bit shift. */
1971 tcg_gen_subi_tl(cpu_tmp0
, count
, 1);
1973 tcg_gen_concat_tl_i64(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1974 tcg_gen_shr_i64(cpu_tmp0
, cpu_T
[0], cpu_tmp0
);
1975 tcg_gen_shr_i64(cpu_T
[0], cpu_T
[0], count
);
1977 tcg_gen_concat_tl_i64(cpu_T
[0], cpu_T
[1], cpu_T
[0]);
1978 tcg_gen_shl_i64(cpu_tmp0
, cpu_T
[0], cpu_tmp0
);
1979 tcg_gen_shl_i64(cpu_T
[0], cpu_T
[0], count
);
1980 tcg_gen_shri_i64(cpu_tmp0
, cpu_tmp0
, 32);
1981 tcg_gen_shri_i64(cpu_T
[0], cpu_T
[0], 32);
1986 tcg_gen_subi_tl(cpu_tmp0
, count
, 1);
1988 tcg_gen_shr_tl(cpu_tmp0
, cpu_T
[0], cpu_tmp0
);
1990 tcg_gen_subfi_tl(cpu_tmp4
, mask
+ 1, count
);
1991 tcg_gen_shr_tl(cpu_T
[0], cpu_T
[0], count
);
1992 tcg_gen_shl_tl(cpu_T
[1], cpu_T
[1], cpu_tmp4
);
1994 tcg_gen_shl_tl(cpu_tmp0
, cpu_T
[0], cpu_tmp0
);
1995 if (ot
== OT_WORD
) {
1996 /* Only needed if count > 16, for Intel behaviour. */
1997 tcg_gen_subfi_tl(cpu_tmp4
, 33, count
);
1998 tcg_gen_shr_tl(cpu_tmp4
, cpu_T
[1], cpu_tmp4
);
1999 tcg_gen_or_tl(cpu_tmp0
, cpu_tmp0
, cpu_tmp4
);
2002 tcg_gen_subfi_tl(cpu_tmp4
, mask
+ 1, count
);
2003 tcg_gen_shl_tl(cpu_T
[0], cpu_T
[0], count
);
2004 tcg_gen_shr_tl(cpu_T
[1], cpu_T
[1], cpu_tmp4
);
2006 tcg_gen_movi_tl(cpu_tmp4
, 0);
2007 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_T
[1], count
, cpu_tmp4
,
2008 cpu_tmp4
, cpu_T
[1]);
2009 tcg_gen_or_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
2014 if (op1
== OR_TMP0
) {
2015 gen_op_st_T0_A0(ot
+ s
->mem_index
);
2017 gen_op_mov_reg_T0(ot
, op1
);
2020 gen_shift_flags(s
, ot
, cpu_T
[0], cpu_tmp0
, count
, is_right
);
2021 tcg_temp_free(count
);
2024 static void gen_shift(DisasContext
*s1
, int op
, int ot
, int d
, int s
)
2027 gen_op_mov_TN_reg(ot
, 1, s
);
2030 gen_rot_rm_T1(s1
, ot
, d
, 0);
2033 gen_rot_rm_T1(s1
, ot
, d
, 1);
2037 gen_shift_rm_T1(s1
, ot
, d
, 0, 0);
2040 gen_shift_rm_T1(s1
, ot
, d
, 1, 0);
2043 gen_shift_rm_T1(s1
, ot
, d
, 1, 1);
2046 gen_rotc_rm_T1(s1
, ot
, d
, 0);
2049 gen_rotc_rm_T1(s1
, ot
, d
, 1);
2054 static void gen_shifti(DisasContext
*s1
, int op
, int ot
, int d
, int c
)
2058 gen_rot_rm_im(s1
, ot
, d
, c
, 0);
2061 gen_rot_rm_im(s1
, ot
, d
, c
, 1);
2065 gen_shift_rm_im(s1
, ot
, d
, c
, 0, 0);
2068 gen_shift_rm_im(s1
, ot
, d
, c
, 1, 0);
2071 gen_shift_rm_im(s1
, ot
, d
, c
, 1, 1);
2074 /* currently not optimized */
2075 gen_op_movl_T1_im(c
);
2076 gen_shift(s1
, op
, ot
, d
, OR_TMP1
);
2081 static void gen_lea_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
,
2082 int *reg_ptr
, int *offset_ptr
)
2090 int mod
, rm
, code
, override
, must_add_seg
;
2092 override
= s
->override
;
2093 must_add_seg
= s
->addseg
;
2096 mod
= (modrm
>> 6) & 3;
2108 code
= cpu_ldub_code(env
, s
->pc
++);
2109 scale
= (code
>> 6) & 3;
2110 index
= ((code
>> 3) & 7) | REX_X(s
);
2117 if ((base
& 7) == 5) {
2119 disp
= (int32_t)cpu_ldl_code(env
, s
->pc
);
2121 if (CODE64(s
) && !havesib
) {
2122 disp
+= s
->pc
+ s
->rip_offset
;
2129 disp
= (int8_t)cpu_ldub_code(env
, s
->pc
++);
2133 disp
= (int32_t)cpu_ldl_code(env
, s
->pc
);
2139 /* for correct popl handling with esp */
2140 if (base
== 4 && s
->popl_esp_hack
)
2141 disp
+= s
->popl_esp_hack
;
2142 #ifdef TARGET_X86_64
2143 if (s
->aflag
== 2) {
2144 gen_op_movq_A0_reg(base
);
2146 gen_op_addq_A0_im(disp
);
2151 gen_op_movl_A0_reg(base
);
2153 gen_op_addl_A0_im(disp
);
2156 #ifdef TARGET_X86_64
2157 if (s
->aflag
== 2) {
2158 gen_op_movq_A0_im(disp
);
2162 gen_op_movl_A0_im(disp
);
2165 /* index == 4 means no index */
2166 if (havesib
&& (index
!= 4)) {
2167 #ifdef TARGET_X86_64
2168 if (s
->aflag
== 2) {
2169 gen_op_addq_A0_reg_sN(scale
, index
);
2173 gen_op_addl_A0_reg_sN(scale
, index
);
2178 if (base
== R_EBP
|| base
== R_ESP
)
2183 #ifdef TARGET_X86_64
2184 if (s
->aflag
== 2) {
2185 gen_op_addq_A0_seg(override
);
2189 gen_op_addl_A0_seg(s
, override
);
2196 disp
= cpu_lduw_code(env
, s
->pc
);
2198 gen_op_movl_A0_im(disp
);
2199 rm
= 0; /* avoid SS override */
2206 disp
= (int8_t)cpu_ldub_code(env
, s
->pc
++);
2210 disp
= cpu_lduw_code(env
, s
->pc
);
2216 gen_op_movl_A0_reg(R_EBX
);
2217 gen_op_addl_A0_reg_sN(0, R_ESI
);
2220 gen_op_movl_A0_reg(R_EBX
);
2221 gen_op_addl_A0_reg_sN(0, R_EDI
);
2224 gen_op_movl_A0_reg(R_EBP
);
2225 gen_op_addl_A0_reg_sN(0, R_ESI
);
2228 gen_op_movl_A0_reg(R_EBP
);
2229 gen_op_addl_A0_reg_sN(0, R_EDI
);
2232 gen_op_movl_A0_reg(R_ESI
);
2235 gen_op_movl_A0_reg(R_EDI
);
2238 gen_op_movl_A0_reg(R_EBP
);
2242 gen_op_movl_A0_reg(R_EBX
);
2246 gen_op_addl_A0_im(disp
);
2247 gen_op_andl_A0_ffff();
2251 if (rm
== 2 || rm
== 3 || rm
== 6)
2256 gen_op_addl_A0_seg(s
, override
);
2266 static void gen_nop_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
)
2268 int mod
, rm
, base
, code
;
2270 mod
= (modrm
>> 6) & 3;
2280 code
= cpu_ldub_code(env
, s
->pc
++);
2316 /* used for LEA and MOV AX, mem */
2317 static void gen_add_A0_ds_seg(DisasContext
*s
)
2319 int override
, must_add_seg
;
2320 must_add_seg
= s
->addseg
;
2322 if (s
->override
>= 0) {
2323 override
= s
->override
;
2327 #ifdef TARGET_X86_64
2329 gen_op_addq_A0_seg(override
);
2333 gen_op_addl_A0_seg(s
, override
);
2338 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2340 static void gen_ldst_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
,
2341 int ot
, int reg
, int is_store
)
2343 int mod
, rm
, opreg
, disp
;
2345 mod
= (modrm
>> 6) & 3;
2346 rm
= (modrm
& 7) | REX_B(s
);
2350 gen_op_mov_TN_reg(ot
, 0, reg
);
2351 gen_op_mov_reg_T0(ot
, rm
);
2353 gen_op_mov_TN_reg(ot
, 0, rm
);
2355 gen_op_mov_reg_T0(ot
, reg
);
2358 gen_lea_modrm(env
, s
, modrm
, &opreg
, &disp
);
2361 gen_op_mov_TN_reg(ot
, 0, reg
);
2362 gen_op_st_T0_A0(ot
+ s
->mem_index
);
2364 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
2366 gen_op_mov_reg_T0(ot
, reg
);
2371 static inline uint32_t insn_get(CPUX86State
*env
, DisasContext
*s
, int ot
)
2377 ret
= cpu_ldub_code(env
, s
->pc
);
2381 ret
= cpu_lduw_code(env
, s
->pc
);
2386 ret
= cpu_ldl_code(env
, s
->pc
);
2393 static inline int insn_const_size(unsigned int ot
)
2401 static inline void gen_goto_tb(DisasContext
*s
, int tb_num
, target_ulong eip
)
2403 TranslationBlock
*tb
;
2406 pc
= s
->cs_base
+ eip
;
2408 /* NOTE: we handle the case where the TB spans two pages here */
2409 if ((pc
& TARGET_PAGE_MASK
) == (tb
->pc
& TARGET_PAGE_MASK
) ||
2410 (pc
& TARGET_PAGE_MASK
) == ((s
->pc
- 1) & TARGET_PAGE_MASK
)) {
2411 /* jump to same page: we can use a direct jump */
2412 tcg_gen_goto_tb(tb_num
);
2414 tcg_gen_exit_tb((tcg_target_long
)tb
+ tb_num
);
2416 /* jump to another page: currently not optimized */
2422 static inline void gen_jcc(DisasContext
*s
, int b
,
2423 target_ulong val
, target_ulong next_eip
)
2428 l1
= gen_new_label();
2431 gen_goto_tb(s
, 0, next_eip
);
2434 gen_goto_tb(s
, 1, val
);
2435 s
->is_jmp
= DISAS_TB_JUMP
;
2437 l1
= gen_new_label();
2438 l2
= gen_new_label();
2441 gen_jmp_im(next_eip
);
2451 static void gen_cmovcc1(CPUX86State
*env
, DisasContext
*s
, int ot
, int b
,
2456 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
2458 cc
= gen_prepare_cc(s
, b
, cpu_T
[1]);
2459 if (cc
.mask
!= -1) {
2460 TCGv t0
= tcg_temp_new();
2461 tcg_gen_andi_tl(t0
, cc
.reg
, cc
.mask
);
2465 cc
.reg2
= tcg_const_tl(cc
.imm
);
2468 tcg_gen_movcond_tl(cc
.cond
, cpu_T
[0], cc
.reg
, cc
.reg2
,
2469 cpu_T
[0], cpu_regs
[reg
]);
2470 gen_op_mov_reg_T0(ot
, reg
);
2472 if (cc
.mask
!= -1) {
2473 tcg_temp_free(cc
.reg
);
2476 tcg_temp_free(cc
.reg2
);
2480 static inline void gen_op_movl_T0_seg(int seg_reg
)
2482 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
,
2483 offsetof(CPUX86State
,segs
[seg_reg
].selector
));
2486 static inline void gen_op_movl_seg_T0_vm(int seg_reg
)
2488 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 0xffff);
2489 tcg_gen_st32_tl(cpu_T
[0], cpu_env
,
2490 offsetof(CPUX86State
,segs
[seg_reg
].selector
));
2491 tcg_gen_shli_tl(cpu_T
[0], cpu_T
[0], 4);
2492 tcg_gen_st_tl(cpu_T
[0], cpu_env
,
2493 offsetof(CPUX86State
,segs
[seg_reg
].base
));
2496 /* move T0 to seg_reg and compute if the CPU state may change. Never
2497 call this function with seg_reg == R_CS */
2498 static void gen_movl_seg_T0(DisasContext
*s
, int seg_reg
, target_ulong cur_eip
)
2500 if (s
->pe
&& !s
->vm86
) {
2501 /* XXX: optimize by finding processor state dynamically */
2502 gen_update_cc_op(s
);
2503 gen_jmp_im(cur_eip
);
2504 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
2505 gen_helper_load_seg(cpu_env
, tcg_const_i32(seg_reg
), cpu_tmp2_i32
);
2506 /* abort translation because the addseg value may change or
2507 because ss32 may change. For R_SS, translation must always
2508 stop as a special handling must be done to disable hardware
2509 interrupts for the next instruction */
2510 if (seg_reg
== R_SS
|| (s
->code32
&& seg_reg
< R_FS
))
2511 s
->is_jmp
= DISAS_TB_JUMP
;
2513 gen_op_movl_seg_T0_vm(seg_reg
);
2514 if (seg_reg
== R_SS
)
2515 s
->is_jmp
= DISAS_TB_JUMP
;
2519 static inline int svm_is_rep(int prefixes
)
2521 return ((prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) ? 8 : 0);
2525 gen_svm_check_intercept_param(DisasContext
*s
, target_ulong pc_start
,
2526 uint32_t type
, uint64_t param
)
2528 /* no SVM activated; fast case */
2529 if (likely(!(s
->flags
& HF_SVMI_MASK
)))
2531 gen_update_cc_op(s
);
2532 gen_jmp_im(pc_start
- s
->cs_base
);
2533 gen_helper_svm_check_intercept_param(cpu_env
, tcg_const_i32(type
),
2534 tcg_const_i64(param
));
2538 gen_svm_check_intercept(DisasContext
*s
, target_ulong pc_start
, uint64_t type
)
2540 gen_svm_check_intercept_param(s
, pc_start
, type
, 0);
2543 static inline void gen_stack_update(DisasContext
*s
, int addend
)
2545 #ifdef TARGET_X86_64
2547 gen_op_add_reg_im(2, R_ESP
, addend
);
2551 gen_op_add_reg_im(1, R_ESP
, addend
);
2553 gen_op_add_reg_im(0, R_ESP
, addend
);
2557 /* generate a push. It depends on ss32, addseg and dflag */
2558 static void gen_push_T0(DisasContext
*s
)
2560 #ifdef TARGET_X86_64
2562 gen_op_movq_A0_reg(R_ESP
);
2564 gen_op_addq_A0_im(-8);
2565 gen_op_st_T0_A0(OT_QUAD
+ s
->mem_index
);
2567 gen_op_addq_A0_im(-2);
2568 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
2570 gen_op_mov_reg_A0(2, R_ESP
);
2574 gen_op_movl_A0_reg(R_ESP
);
2576 gen_op_addl_A0_im(-2);
2578 gen_op_addl_A0_im(-4);
2581 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2582 gen_op_addl_A0_seg(s
, R_SS
);
2585 gen_op_andl_A0_ffff();
2586 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2587 gen_op_addl_A0_seg(s
, R_SS
);
2589 gen_op_st_T0_A0(s
->dflag
+ 1 + s
->mem_index
);
2590 if (s
->ss32
&& !s
->addseg
)
2591 gen_op_mov_reg_A0(1, R_ESP
);
2593 gen_op_mov_reg_T1(s
->ss32
+ 1, R_ESP
);
2597 /* generate a push. It depends on ss32, addseg and dflag */
2598 /* slower version for T1, only used for call Ev */
2599 static void gen_push_T1(DisasContext
*s
)
2601 #ifdef TARGET_X86_64
2603 gen_op_movq_A0_reg(R_ESP
);
2605 gen_op_addq_A0_im(-8);
2606 gen_op_st_T1_A0(OT_QUAD
+ s
->mem_index
);
2608 gen_op_addq_A0_im(-2);
2609 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
2611 gen_op_mov_reg_A0(2, R_ESP
);
2615 gen_op_movl_A0_reg(R_ESP
);
2617 gen_op_addl_A0_im(-2);
2619 gen_op_addl_A0_im(-4);
2622 gen_op_addl_A0_seg(s
, R_SS
);
2625 gen_op_andl_A0_ffff();
2626 gen_op_addl_A0_seg(s
, R_SS
);
2628 gen_op_st_T1_A0(s
->dflag
+ 1 + s
->mem_index
);
2630 if (s
->ss32
&& !s
->addseg
)
2631 gen_op_mov_reg_A0(1, R_ESP
);
2633 gen_stack_update(s
, (-2) << s
->dflag
);
2637 /* two step pop is necessary for precise exceptions */
2638 static void gen_pop_T0(DisasContext
*s
)
2640 #ifdef TARGET_X86_64
2642 gen_op_movq_A0_reg(R_ESP
);
2643 gen_op_ld_T0_A0((s
->dflag
? OT_QUAD
: OT_WORD
) + s
->mem_index
);
2647 gen_op_movl_A0_reg(R_ESP
);
2650 gen_op_addl_A0_seg(s
, R_SS
);
2652 gen_op_andl_A0_ffff();
2653 gen_op_addl_A0_seg(s
, R_SS
);
2655 gen_op_ld_T0_A0(s
->dflag
+ 1 + s
->mem_index
);
2659 static void gen_pop_update(DisasContext
*s
)
2661 #ifdef TARGET_X86_64
2662 if (CODE64(s
) && s
->dflag
) {
2663 gen_stack_update(s
, 8);
2667 gen_stack_update(s
, 2 << s
->dflag
);
2671 static void gen_stack_A0(DisasContext
*s
)
2673 gen_op_movl_A0_reg(R_ESP
);
2675 gen_op_andl_A0_ffff();
2676 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2678 gen_op_addl_A0_seg(s
, R_SS
);
2681 /* NOTE: wrap around in 16 bit not fully handled */
2682 static void gen_pusha(DisasContext
*s
)
2685 gen_op_movl_A0_reg(R_ESP
);
2686 gen_op_addl_A0_im(-16 << s
->dflag
);
2688 gen_op_andl_A0_ffff();
2689 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2691 gen_op_addl_A0_seg(s
, R_SS
);
2692 for(i
= 0;i
< 8; i
++) {
2693 gen_op_mov_TN_reg(OT_LONG
, 0, 7 - i
);
2694 gen_op_st_T0_A0(OT_WORD
+ s
->dflag
+ s
->mem_index
);
2695 gen_op_addl_A0_im(2 << s
->dflag
);
2697 gen_op_mov_reg_T1(OT_WORD
+ s
->ss32
, R_ESP
);
2700 /* NOTE: wrap around in 16 bit not fully handled */
2701 static void gen_popa(DisasContext
*s
)
2704 gen_op_movl_A0_reg(R_ESP
);
2706 gen_op_andl_A0_ffff();
2707 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2708 tcg_gen_addi_tl(cpu_T
[1], cpu_T
[1], 16 << s
->dflag
);
2710 gen_op_addl_A0_seg(s
, R_SS
);
2711 for(i
= 0;i
< 8; i
++) {
2712 /* ESP is not reloaded */
2714 gen_op_ld_T0_A0(OT_WORD
+ s
->dflag
+ s
->mem_index
);
2715 gen_op_mov_reg_T0(OT_WORD
+ s
->dflag
, 7 - i
);
2717 gen_op_addl_A0_im(2 << s
->dflag
);
2719 gen_op_mov_reg_T1(OT_WORD
+ s
->ss32
, R_ESP
);
2722 static void gen_enter(DisasContext
*s
, int esp_addend
, int level
)
2727 #ifdef TARGET_X86_64
2729 ot
= s
->dflag
? OT_QUAD
: OT_WORD
;
2732 gen_op_movl_A0_reg(R_ESP
);
2733 gen_op_addq_A0_im(-opsize
);
2734 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2737 gen_op_mov_TN_reg(OT_LONG
, 0, R_EBP
);
2738 gen_op_st_T0_A0(ot
+ s
->mem_index
);
2740 /* XXX: must save state */
2741 gen_helper_enter64_level(cpu_env
, tcg_const_i32(level
),
2742 tcg_const_i32((ot
== OT_QUAD
)),
2745 gen_op_mov_reg_T1(ot
, R_EBP
);
2746 tcg_gen_addi_tl(cpu_T
[1], cpu_T
[1], -esp_addend
+ (-opsize
* level
));
2747 gen_op_mov_reg_T1(OT_QUAD
, R_ESP
);
2751 ot
= s
->dflag
+ OT_WORD
;
2752 opsize
= 2 << s
->dflag
;
2754 gen_op_movl_A0_reg(R_ESP
);
2755 gen_op_addl_A0_im(-opsize
);
2757 gen_op_andl_A0_ffff();
2758 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2760 gen_op_addl_A0_seg(s
, R_SS
);
2762 gen_op_mov_TN_reg(OT_LONG
, 0, R_EBP
);
2763 gen_op_st_T0_A0(ot
+ s
->mem_index
);
2765 /* XXX: must save state */
2766 gen_helper_enter_level(cpu_env
, tcg_const_i32(level
),
2767 tcg_const_i32(s
->dflag
),
2770 gen_op_mov_reg_T1(ot
, R_EBP
);
2771 tcg_gen_addi_tl(cpu_T
[1], cpu_T
[1], -esp_addend
+ (-opsize
* level
));
2772 gen_op_mov_reg_T1(OT_WORD
+ s
->ss32
, R_ESP
);
2776 static void gen_exception(DisasContext
*s
, int trapno
, target_ulong cur_eip
)
2778 gen_update_cc_op(s
);
2779 gen_jmp_im(cur_eip
);
2780 gen_helper_raise_exception(cpu_env
, tcg_const_i32(trapno
));
2781 s
->is_jmp
= DISAS_TB_JUMP
;
2784 /* an interrupt is different from an exception because of the
2786 static void gen_interrupt(DisasContext
*s
, int intno
,
2787 target_ulong cur_eip
, target_ulong next_eip
)
2789 gen_update_cc_op(s
);
2790 gen_jmp_im(cur_eip
);
2791 gen_helper_raise_interrupt(cpu_env
, tcg_const_i32(intno
),
2792 tcg_const_i32(next_eip
- cur_eip
));
2793 s
->is_jmp
= DISAS_TB_JUMP
;
2796 static void gen_debug(DisasContext
*s
, target_ulong cur_eip
)
2798 gen_update_cc_op(s
);
2799 gen_jmp_im(cur_eip
);
2800 gen_helper_debug(cpu_env
);
2801 s
->is_jmp
= DISAS_TB_JUMP
;
2804 /* generate a generic end of block. Trace exception is also generated
2806 static void gen_eob(DisasContext
*s
)
2808 gen_update_cc_op(s
);
2809 if (s
->tb
->flags
& HF_INHIBIT_IRQ_MASK
) {
2810 gen_helper_reset_inhibit_irq(cpu_env
);
2812 if (s
->tb
->flags
& HF_RF_MASK
) {
2813 gen_helper_reset_rf(cpu_env
);
2815 if (s
->singlestep_enabled
) {
2816 gen_helper_debug(cpu_env
);
2818 gen_helper_single_step(cpu_env
);
2822 s
->is_jmp
= DISAS_TB_JUMP
;
2825 /* generate a jump to eip. No segment change must happen before as a
2826 direct call to the next block may occur */
2827 static void gen_jmp_tb(DisasContext
*s
, target_ulong eip
, int tb_num
)
2829 gen_update_cc_op(s
);
2830 set_cc_op(s
, CC_OP_DYNAMIC
);
2832 gen_goto_tb(s
, tb_num
, eip
);
2833 s
->is_jmp
= DISAS_TB_JUMP
;
2840 static void gen_jmp(DisasContext
*s
, target_ulong eip
)
2842 gen_jmp_tb(s
, eip
, 0);
2845 static inline void gen_ldq_env_A0(int idx
, int offset
)
2847 int mem_index
= (idx
>> 2) - 1;
2848 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
, mem_index
);
2849 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, offset
);
2852 static inline void gen_stq_env_A0(int idx
, int offset
)
2854 int mem_index
= (idx
>> 2) - 1;
2855 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, offset
);
2856 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
, mem_index
);
2859 static inline void gen_ldo_env_A0(int idx
, int offset
)
2861 int mem_index
= (idx
>> 2) - 1;
2862 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
, mem_index
);
2863 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(0)));
2864 tcg_gen_addi_tl(cpu_tmp0
, cpu_A0
, 8);
2865 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_tmp0
, mem_index
);
2866 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(1)));
2869 static inline void gen_sto_env_A0(int idx
, int offset
)
2871 int mem_index
= (idx
>> 2) - 1;
2872 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(0)));
2873 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
, mem_index
);
2874 tcg_gen_addi_tl(cpu_tmp0
, cpu_A0
, 8);
2875 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(1)));
2876 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_tmp0
, mem_index
);
2879 static inline void gen_op_movo(int d_offset
, int s_offset
)
2881 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, s_offset
);
2882 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
);
2883 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, s_offset
+ 8);
2884 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
+ 8);
2887 static inline void gen_op_movq(int d_offset
, int s_offset
)
2889 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, s_offset
);
2890 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
);
2893 static inline void gen_op_movl(int d_offset
, int s_offset
)
2895 tcg_gen_ld_i32(cpu_tmp2_i32
, cpu_env
, s_offset
);
2896 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, d_offset
);
2899 static inline void gen_op_movq_env_0(int d_offset
)
2901 tcg_gen_movi_i64(cpu_tmp1_i64
, 0);
2902 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
);
2905 typedef void (*SSEFunc_i_ep
)(TCGv_i32 val
, TCGv_ptr env
, TCGv_ptr reg
);
2906 typedef void (*SSEFunc_l_ep
)(TCGv_i64 val
, TCGv_ptr env
, TCGv_ptr reg
);
2907 typedef void (*SSEFunc_0_epi
)(TCGv_ptr env
, TCGv_ptr reg
, TCGv_i32 val
);
2908 typedef void (*SSEFunc_0_epl
)(TCGv_ptr env
, TCGv_ptr reg
, TCGv_i64 val
);
2909 typedef void (*SSEFunc_0_epp
)(TCGv_ptr env
, TCGv_ptr reg_a
, TCGv_ptr reg_b
);
2910 typedef void (*SSEFunc_0_eppi
)(TCGv_ptr env
, TCGv_ptr reg_a
, TCGv_ptr reg_b
,
2912 typedef void (*SSEFunc_0_ppi
)(TCGv_ptr reg_a
, TCGv_ptr reg_b
, TCGv_i32 val
);
2913 typedef void (*SSEFunc_0_eppt
)(TCGv_ptr env
, TCGv_ptr reg_a
, TCGv_ptr reg_b
,
2916 #define SSE_SPECIAL ((void *)1)
2917 #define SSE_DUMMY ((void *)2)
2919 #define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
2920 #define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
2921 gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
2923 static const SSEFunc_0_epp sse_op_table1
[256][4] = {
2924 /* 3DNow! extensions */
2925 [0x0e] = { SSE_DUMMY
}, /* femms */
2926 [0x0f] = { SSE_DUMMY
}, /* pf... */
2927 /* pure SSE operations */
2928 [0x10] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movups, movupd, movss, movsd */
2929 [0x11] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movups, movupd, movss, movsd */
2930 [0x12] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movlps, movlpd, movsldup, movddup */
2931 [0x13] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movlps, movlpd */
2932 [0x14] = { gen_helper_punpckldq_xmm
, gen_helper_punpcklqdq_xmm
},
2933 [0x15] = { gen_helper_punpckhdq_xmm
, gen_helper_punpckhqdq_xmm
},
2934 [0x16] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movhps, movhpd, movshdup */
2935 [0x17] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movhps, movhpd */
2937 [0x28] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movaps, movapd */
2938 [0x29] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movaps, movapd */
2939 [0x2a] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
2940 [0x2b] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movntps, movntpd, movntss, movntsd */
2941 [0x2c] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
2942 [0x2d] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
2943 [0x2e] = { gen_helper_ucomiss
, gen_helper_ucomisd
},
2944 [0x2f] = { gen_helper_comiss
, gen_helper_comisd
},
2945 [0x50] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movmskps, movmskpd */
2946 [0x51] = SSE_FOP(sqrt
),
2947 [0x52] = { gen_helper_rsqrtps
, NULL
, gen_helper_rsqrtss
, NULL
},
2948 [0x53] = { gen_helper_rcpps
, NULL
, gen_helper_rcpss
, NULL
},
2949 [0x54] = { gen_helper_pand_xmm
, gen_helper_pand_xmm
}, /* andps, andpd */
2950 [0x55] = { gen_helper_pandn_xmm
, gen_helper_pandn_xmm
}, /* andnps, andnpd */
2951 [0x56] = { gen_helper_por_xmm
, gen_helper_por_xmm
}, /* orps, orpd */
2952 [0x57] = { gen_helper_pxor_xmm
, gen_helper_pxor_xmm
}, /* xorps, xorpd */
2953 [0x58] = SSE_FOP(add
),
2954 [0x59] = SSE_FOP(mul
),
2955 [0x5a] = { gen_helper_cvtps2pd
, gen_helper_cvtpd2ps
,
2956 gen_helper_cvtss2sd
, gen_helper_cvtsd2ss
},
2957 [0x5b] = { gen_helper_cvtdq2ps
, gen_helper_cvtps2dq
, gen_helper_cvttps2dq
},
2958 [0x5c] = SSE_FOP(sub
),
2959 [0x5d] = SSE_FOP(min
),
2960 [0x5e] = SSE_FOP(div
),
2961 [0x5f] = SSE_FOP(max
),
2963 [0xc2] = SSE_FOP(cmpeq
),
2964 [0xc6] = { (SSEFunc_0_epp
)gen_helper_shufps
,
2965 (SSEFunc_0_epp
)gen_helper_shufpd
}, /* XXX: casts */
2967 /* SSSE3, SSE4, MOVBE, CRC32, BMI1, BMI2, ADX. */
2968 [0x38] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
},
2969 [0x3a] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
},
2971 /* MMX ops and their SSE extensions */
2972 [0x60] = MMX_OP2(punpcklbw
),
2973 [0x61] = MMX_OP2(punpcklwd
),
2974 [0x62] = MMX_OP2(punpckldq
),
2975 [0x63] = MMX_OP2(packsswb
),
2976 [0x64] = MMX_OP2(pcmpgtb
),
2977 [0x65] = MMX_OP2(pcmpgtw
),
2978 [0x66] = MMX_OP2(pcmpgtl
),
2979 [0x67] = MMX_OP2(packuswb
),
2980 [0x68] = MMX_OP2(punpckhbw
),
2981 [0x69] = MMX_OP2(punpckhwd
),
2982 [0x6a] = MMX_OP2(punpckhdq
),
2983 [0x6b] = MMX_OP2(packssdw
),
2984 [0x6c] = { NULL
, gen_helper_punpcklqdq_xmm
},
2985 [0x6d] = { NULL
, gen_helper_punpckhqdq_xmm
},
2986 [0x6e] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movd mm, ea */
2987 [0x6f] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movq, movdqa, , movqdu */
2988 [0x70] = { (SSEFunc_0_epp
)gen_helper_pshufw_mmx
,
2989 (SSEFunc_0_epp
)gen_helper_pshufd_xmm
,
2990 (SSEFunc_0_epp
)gen_helper_pshufhw_xmm
,
2991 (SSEFunc_0_epp
)gen_helper_pshuflw_xmm
}, /* XXX: casts */
2992 [0x71] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftw */
2993 [0x72] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftd */
2994 [0x73] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftq */
2995 [0x74] = MMX_OP2(pcmpeqb
),
2996 [0x75] = MMX_OP2(pcmpeqw
),
2997 [0x76] = MMX_OP2(pcmpeql
),
2998 [0x77] = { SSE_DUMMY
}, /* emms */
2999 [0x78] = { NULL
, SSE_SPECIAL
, NULL
, SSE_SPECIAL
}, /* extrq_i, insertq_i */
3000 [0x79] = { NULL
, gen_helper_extrq_r
, NULL
, gen_helper_insertq_r
},
3001 [0x7c] = { NULL
, gen_helper_haddpd
, NULL
, gen_helper_haddps
},
3002 [0x7d] = { NULL
, gen_helper_hsubpd
, NULL
, gen_helper_hsubps
},
3003 [0x7e] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movd, movd, , movq */
3004 [0x7f] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movq, movdqa, movdqu */
3005 [0xc4] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pinsrw */
3006 [0xc5] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pextrw */
3007 [0xd0] = { NULL
, gen_helper_addsubpd
, NULL
, gen_helper_addsubps
},
3008 [0xd1] = MMX_OP2(psrlw
),
3009 [0xd2] = MMX_OP2(psrld
),
3010 [0xd3] = MMX_OP2(psrlq
),
3011 [0xd4] = MMX_OP2(paddq
),
3012 [0xd5] = MMX_OP2(pmullw
),
3013 [0xd6] = { NULL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
},
3014 [0xd7] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pmovmskb */
3015 [0xd8] = MMX_OP2(psubusb
),
3016 [0xd9] = MMX_OP2(psubusw
),
3017 [0xda] = MMX_OP2(pminub
),
3018 [0xdb] = MMX_OP2(pand
),
3019 [0xdc] = MMX_OP2(paddusb
),
3020 [0xdd] = MMX_OP2(paddusw
),
3021 [0xde] = MMX_OP2(pmaxub
),
3022 [0xdf] = MMX_OP2(pandn
),
3023 [0xe0] = MMX_OP2(pavgb
),
3024 [0xe1] = MMX_OP2(psraw
),
3025 [0xe2] = MMX_OP2(psrad
),
3026 [0xe3] = MMX_OP2(pavgw
),
3027 [0xe4] = MMX_OP2(pmulhuw
),
3028 [0xe5] = MMX_OP2(pmulhw
),
3029 [0xe6] = { NULL
, gen_helper_cvttpd2dq
, gen_helper_cvtdq2pd
, gen_helper_cvtpd2dq
},
3030 [0xe7] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movntq, movntq */
3031 [0xe8] = MMX_OP2(psubsb
),
3032 [0xe9] = MMX_OP2(psubsw
),
3033 [0xea] = MMX_OP2(pminsw
),
3034 [0xeb] = MMX_OP2(por
),
3035 [0xec] = MMX_OP2(paddsb
),
3036 [0xed] = MMX_OP2(paddsw
),
3037 [0xee] = MMX_OP2(pmaxsw
),
3038 [0xef] = MMX_OP2(pxor
),
3039 [0xf0] = { NULL
, NULL
, NULL
, SSE_SPECIAL
}, /* lddqu */
3040 [0xf1] = MMX_OP2(psllw
),
3041 [0xf2] = MMX_OP2(pslld
),
3042 [0xf3] = MMX_OP2(psllq
),
3043 [0xf4] = MMX_OP2(pmuludq
),
3044 [0xf5] = MMX_OP2(pmaddwd
),
3045 [0xf6] = MMX_OP2(psadbw
),
3046 [0xf7] = { (SSEFunc_0_epp
)gen_helper_maskmov_mmx
,
3047 (SSEFunc_0_epp
)gen_helper_maskmov_xmm
}, /* XXX: casts */
3048 [0xf8] = MMX_OP2(psubb
),
3049 [0xf9] = MMX_OP2(psubw
),
3050 [0xfa] = MMX_OP2(psubl
),
3051 [0xfb] = MMX_OP2(psubq
),
3052 [0xfc] = MMX_OP2(paddb
),
3053 [0xfd] = MMX_OP2(paddw
),
3054 [0xfe] = MMX_OP2(paddl
),
3057 static const SSEFunc_0_epp sse_op_table2
[3 * 8][2] = {
3058 [0 + 2] = MMX_OP2(psrlw
),
3059 [0 + 4] = MMX_OP2(psraw
),
3060 [0 + 6] = MMX_OP2(psllw
),
3061 [8 + 2] = MMX_OP2(psrld
),
3062 [8 + 4] = MMX_OP2(psrad
),
3063 [8 + 6] = MMX_OP2(pslld
),
3064 [16 + 2] = MMX_OP2(psrlq
),
3065 [16 + 3] = { NULL
, gen_helper_psrldq_xmm
},
3066 [16 + 6] = MMX_OP2(psllq
),
3067 [16 + 7] = { NULL
, gen_helper_pslldq_xmm
},
3070 static const SSEFunc_0_epi sse_op_table3ai
[] = {
3071 gen_helper_cvtsi2ss
,
3075 #ifdef TARGET_X86_64
3076 static const SSEFunc_0_epl sse_op_table3aq
[] = {
3077 gen_helper_cvtsq2ss
,
3082 static const SSEFunc_i_ep sse_op_table3bi
[] = {
3083 gen_helper_cvttss2si
,
3084 gen_helper_cvtss2si
,
3085 gen_helper_cvttsd2si
,
3089 #ifdef TARGET_X86_64
3090 static const SSEFunc_l_ep sse_op_table3bq
[] = {
3091 gen_helper_cvttss2sq
,
3092 gen_helper_cvtss2sq
,
3093 gen_helper_cvttsd2sq
,
3098 static const SSEFunc_0_epp sse_op_table4
[8][4] = {
3109 static const SSEFunc_0_epp sse_op_table5
[256] = {
3110 [0x0c] = gen_helper_pi2fw
,
3111 [0x0d] = gen_helper_pi2fd
,
3112 [0x1c] = gen_helper_pf2iw
,
3113 [0x1d] = gen_helper_pf2id
,
3114 [0x8a] = gen_helper_pfnacc
,
3115 [0x8e] = gen_helper_pfpnacc
,
3116 [0x90] = gen_helper_pfcmpge
,
3117 [0x94] = gen_helper_pfmin
,
3118 [0x96] = gen_helper_pfrcp
,
3119 [0x97] = gen_helper_pfrsqrt
,
3120 [0x9a] = gen_helper_pfsub
,
3121 [0x9e] = gen_helper_pfadd
,
3122 [0xa0] = gen_helper_pfcmpgt
,
3123 [0xa4] = gen_helper_pfmax
,
3124 [0xa6] = gen_helper_movq
, /* pfrcpit1; no need to actually increase precision */
3125 [0xa7] = gen_helper_movq
, /* pfrsqit1 */
3126 [0xaa] = gen_helper_pfsubr
,
3127 [0xae] = gen_helper_pfacc
,
3128 [0xb0] = gen_helper_pfcmpeq
,
3129 [0xb4] = gen_helper_pfmul
,
3130 [0xb6] = gen_helper_movq
, /* pfrcpit2 */
3131 [0xb7] = gen_helper_pmulhrw_mmx
,
3132 [0xbb] = gen_helper_pswapd
,
3133 [0xbf] = gen_helper_pavgb_mmx
/* pavgusb */
3136 struct SSEOpHelper_epp
{
3137 SSEFunc_0_epp op
[2];
3141 struct SSEOpHelper_eppi
{
3142 SSEFunc_0_eppi op
[2];
3146 #define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
3147 #define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
3148 #define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
3149 #define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
3150 #define PCLMULQDQ_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, \
3151 CPUID_EXT_PCLMULQDQ }
3152 #define AESNI_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_AES }
3154 static const struct SSEOpHelper_epp sse_op_table6
[256] = {
3155 [0x00] = SSSE3_OP(pshufb
),
3156 [0x01] = SSSE3_OP(phaddw
),
3157 [0x02] = SSSE3_OP(phaddd
),
3158 [0x03] = SSSE3_OP(phaddsw
),
3159 [0x04] = SSSE3_OP(pmaddubsw
),
3160 [0x05] = SSSE3_OP(phsubw
),
3161 [0x06] = SSSE3_OP(phsubd
),
3162 [0x07] = SSSE3_OP(phsubsw
),
3163 [0x08] = SSSE3_OP(psignb
),
3164 [0x09] = SSSE3_OP(psignw
),
3165 [0x0a] = SSSE3_OP(psignd
),
3166 [0x0b] = SSSE3_OP(pmulhrsw
),
3167 [0x10] = SSE41_OP(pblendvb
),
3168 [0x14] = SSE41_OP(blendvps
),
3169 [0x15] = SSE41_OP(blendvpd
),
3170 [0x17] = SSE41_OP(ptest
),
3171 [0x1c] = SSSE3_OP(pabsb
),
3172 [0x1d] = SSSE3_OP(pabsw
),
3173 [0x1e] = SSSE3_OP(pabsd
),
3174 [0x20] = SSE41_OP(pmovsxbw
),
3175 [0x21] = SSE41_OP(pmovsxbd
),
3176 [0x22] = SSE41_OP(pmovsxbq
),
3177 [0x23] = SSE41_OP(pmovsxwd
),
3178 [0x24] = SSE41_OP(pmovsxwq
),
3179 [0x25] = SSE41_OP(pmovsxdq
),
3180 [0x28] = SSE41_OP(pmuldq
),
3181 [0x29] = SSE41_OP(pcmpeqq
),
3182 [0x2a] = SSE41_SPECIAL
, /* movntqda */
3183 [0x2b] = SSE41_OP(packusdw
),
3184 [0x30] = SSE41_OP(pmovzxbw
),
3185 [0x31] = SSE41_OP(pmovzxbd
),
3186 [0x32] = SSE41_OP(pmovzxbq
),
3187 [0x33] = SSE41_OP(pmovzxwd
),
3188 [0x34] = SSE41_OP(pmovzxwq
),
3189 [0x35] = SSE41_OP(pmovzxdq
),
3190 [0x37] = SSE42_OP(pcmpgtq
),
3191 [0x38] = SSE41_OP(pminsb
),
3192 [0x39] = SSE41_OP(pminsd
),
3193 [0x3a] = SSE41_OP(pminuw
),
3194 [0x3b] = SSE41_OP(pminud
),
3195 [0x3c] = SSE41_OP(pmaxsb
),
3196 [0x3d] = SSE41_OP(pmaxsd
),
3197 [0x3e] = SSE41_OP(pmaxuw
),
3198 [0x3f] = SSE41_OP(pmaxud
),
3199 [0x40] = SSE41_OP(pmulld
),
3200 [0x41] = SSE41_OP(phminposuw
),
3201 [0xdb] = AESNI_OP(aesimc
),
3202 [0xdc] = AESNI_OP(aesenc
),
3203 [0xdd] = AESNI_OP(aesenclast
),
3204 [0xde] = AESNI_OP(aesdec
),
3205 [0xdf] = AESNI_OP(aesdeclast
),
3208 static const struct SSEOpHelper_eppi sse_op_table7
[256] = {
3209 [0x08] = SSE41_OP(roundps
),
3210 [0x09] = SSE41_OP(roundpd
),
3211 [0x0a] = SSE41_OP(roundss
),
3212 [0x0b] = SSE41_OP(roundsd
),
3213 [0x0c] = SSE41_OP(blendps
),
3214 [0x0d] = SSE41_OP(blendpd
),
3215 [0x0e] = SSE41_OP(pblendw
),
3216 [0x0f] = SSSE3_OP(palignr
),
3217 [0x14] = SSE41_SPECIAL
, /* pextrb */
3218 [0x15] = SSE41_SPECIAL
, /* pextrw */
3219 [0x16] = SSE41_SPECIAL
, /* pextrd/pextrq */
3220 [0x17] = SSE41_SPECIAL
, /* extractps */
3221 [0x20] = SSE41_SPECIAL
, /* pinsrb */
3222 [0x21] = SSE41_SPECIAL
, /* insertps */
3223 [0x22] = SSE41_SPECIAL
, /* pinsrd/pinsrq */
3224 [0x40] = SSE41_OP(dpps
),
3225 [0x41] = SSE41_OP(dppd
),
3226 [0x42] = SSE41_OP(mpsadbw
),
3227 [0x44] = PCLMULQDQ_OP(pclmulqdq
),
3228 [0x60] = SSE42_OP(pcmpestrm
),
3229 [0x61] = SSE42_OP(pcmpestri
),
3230 [0x62] = SSE42_OP(pcmpistrm
),
3231 [0x63] = SSE42_OP(pcmpistri
),
3232 [0xdf] = AESNI_OP(aeskeygenassist
),
3235 static void gen_sse(CPUX86State
*env
, DisasContext
*s
, int b
,
3236 target_ulong pc_start
, int rex_r
)
3238 int b1
, op1_offset
, op2_offset
, is_xmm
, val
, ot
;
3239 int modrm
, mod
, rm
, reg
, reg_addr
, offset_addr
;
3240 SSEFunc_0_epp sse_fn_epp
;
3241 SSEFunc_0_eppi sse_fn_eppi
;
3242 SSEFunc_0_ppi sse_fn_ppi
;
3243 SSEFunc_0_eppt sse_fn_eppt
;
3246 if (s
->prefix
& PREFIX_DATA
)
3248 else if (s
->prefix
& PREFIX_REPZ
)
3250 else if (s
->prefix
& PREFIX_REPNZ
)
3254 sse_fn_epp
= sse_op_table1
[b
][b1
];
3258 if ((b
<= 0x5f && b
>= 0x10) || b
== 0xc6 || b
== 0xc2) {
3268 /* simple MMX/SSE operation */
3269 if (s
->flags
& HF_TS_MASK
) {
3270 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
3273 if (s
->flags
& HF_EM_MASK
) {
3275 gen_exception(s
, EXCP06_ILLOP
, pc_start
- s
->cs_base
);
3278 if (is_xmm
&& !(s
->flags
& HF_OSFXSR_MASK
))
3279 if ((b
!= 0x38 && b
!= 0x3a) || (s
->prefix
& PREFIX_DATA
))
3282 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_3DNOW
))
3285 gen_helper_emms(cpu_env
);
3290 gen_helper_emms(cpu_env
);
3293 /* prepare MMX state (XXX: optimize by storing fptt and fptags in
3294 the static cpu state) */
3296 gen_helper_enter_mmx(cpu_env
);
3299 modrm
= cpu_ldub_code(env
, s
->pc
++);
3300 reg
= ((modrm
>> 3) & 7);
3303 mod
= (modrm
>> 6) & 3;
3304 if (sse_fn_epp
== SSE_SPECIAL
) {
3307 case 0x0e7: /* movntq */
3310 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3311 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3313 case 0x1e7: /* movntdq */
3314 case 0x02b: /* movntps */
3315 case 0x12b: /* movntps */
3318 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3319 gen_sto_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3321 case 0x3f0: /* lddqu */
3324 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3325 gen_ldo_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3327 case 0x22b: /* movntss */
3328 case 0x32b: /* movntsd */
3331 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3333 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,
3336 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,
3337 xmm_regs
[reg
].XMM_L(0)));
3338 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
3341 case 0x6e: /* movd mm, ea */
3342 #ifdef TARGET_X86_64
3343 if (s
->dflag
== 2) {
3344 gen_ldst_modrm(env
, s
, modrm
, OT_QUAD
, OR_TMP0
, 0);
3345 tcg_gen_st_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3349 gen_ldst_modrm(env
, s
, modrm
, OT_LONG
, OR_TMP0
, 0);
3350 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3351 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3352 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
3353 gen_helper_movl_mm_T0_mmx(cpu_ptr0
, cpu_tmp2_i32
);
3356 case 0x16e: /* movd xmm, ea */
3357 #ifdef TARGET_X86_64
3358 if (s
->dflag
== 2) {
3359 gen_ldst_modrm(env
, s
, modrm
, OT_QUAD
, OR_TMP0
, 0);
3360 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3361 offsetof(CPUX86State
,xmm_regs
[reg
]));
3362 gen_helper_movq_mm_T0_xmm(cpu_ptr0
, cpu_T
[0]);
3366 gen_ldst_modrm(env
, s
, modrm
, OT_LONG
, OR_TMP0
, 0);
3367 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3368 offsetof(CPUX86State
,xmm_regs
[reg
]));
3369 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
3370 gen_helper_movl_mm_T0_xmm(cpu_ptr0
, cpu_tmp2_i32
);
3373 case 0x6f: /* movq mm, ea */
3375 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3376 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3379 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
,
3380 offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3381 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
,
3382 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3385 case 0x010: /* movups */
3386 case 0x110: /* movupd */
3387 case 0x028: /* movaps */
3388 case 0x128: /* movapd */
3389 case 0x16f: /* movdqa xmm, ea */
3390 case 0x26f: /* movdqu xmm, ea */
3392 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3393 gen_ldo_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3395 rm
= (modrm
& 7) | REX_B(s
);
3396 gen_op_movo(offsetof(CPUX86State
,xmm_regs
[reg
]),
3397 offsetof(CPUX86State
,xmm_regs
[rm
]));
3400 case 0x210: /* movss xmm, ea */
3402 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3403 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
3404 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3406 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(1)));
3407 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)));
3408 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)));
3410 rm
= (modrm
& 7) | REX_B(s
);
3411 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)),
3412 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(0)));
3415 case 0x310: /* movsd xmm, ea */
3417 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3418 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3420 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)));
3421 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)));
3423 rm
= (modrm
& 7) | REX_B(s
);
3424 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3425 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3428 case 0x012: /* movlps */
3429 case 0x112: /* movlpd */
3431 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3432 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3435 rm
= (modrm
& 7) | REX_B(s
);
3436 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3437 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(1)));
3440 case 0x212: /* movsldup */
3442 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3443 gen_ldo_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3445 rm
= (modrm
& 7) | REX_B(s
);
3446 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)),
3447 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(0)));
3448 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)),
3449 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(2)));
3451 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(1)),
3452 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3453 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)),
3454 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)));
3456 case 0x312: /* movddup */
3458 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3459 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3461 rm
= (modrm
& 7) | REX_B(s
);
3462 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3463 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3465 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)),
3466 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3468 case 0x016: /* movhps */
3469 case 0x116: /* movhpd */
3471 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3472 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)));
3475 rm
= (modrm
& 7) | REX_B(s
);
3476 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)),
3477 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3480 case 0x216: /* movshdup */
3482 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3483 gen_ldo_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3485 rm
= (modrm
& 7) | REX_B(s
);
3486 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(1)),
3487 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(1)));
3488 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)),
3489 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(3)));
3491 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)),
3492 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(1)));
3493 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)),
3494 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)));
3499 int bit_index
, field_length
;
3501 if (b1
== 1 && reg
!= 0)
3503 field_length
= cpu_ldub_code(env
, s
->pc
++) & 0x3F;
3504 bit_index
= cpu_ldub_code(env
, s
->pc
++) & 0x3F;
3505 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3506 offsetof(CPUX86State
,xmm_regs
[reg
]));
3508 gen_helper_extrq_i(cpu_env
, cpu_ptr0
,
3509 tcg_const_i32(bit_index
),
3510 tcg_const_i32(field_length
));
3512 gen_helper_insertq_i(cpu_env
, cpu_ptr0
,
3513 tcg_const_i32(bit_index
),
3514 tcg_const_i32(field_length
));
3517 case 0x7e: /* movd ea, mm */
3518 #ifdef TARGET_X86_64
3519 if (s
->dflag
== 2) {
3520 tcg_gen_ld_i64(cpu_T
[0], cpu_env
,
3521 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3522 gen_ldst_modrm(env
, s
, modrm
, OT_QUAD
, OR_TMP0
, 1);
3526 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
,
3527 offsetof(CPUX86State
,fpregs
[reg
].mmx
.MMX_L(0)));
3528 gen_ldst_modrm(env
, s
, modrm
, OT_LONG
, OR_TMP0
, 1);
3531 case 0x17e: /* movd ea, xmm */
3532 #ifdef TARGET_X86_64
3533 if (s
->dflag
== 2) {
3534 tcg_gen_ld_i64(cpu_T
[0], cpu_env
,
3535 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3536 gen_ldst_modrm(env
, s
, modrm
, OT_QUAD
, OR_TMP0
, 1);
3540 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
,
3541 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3542 gen_ldst_modrm(env
, s
, modrm
, OT_LONG
, OR_TMP0
, 1);
3545 case 0x27e: /* movq xmm, ea */
3547 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3548 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3550 rm
= (modrm
& 7) | REX_B(s
);
3551 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3552 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3554 gen_op_movq_env_0(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)));
3556 case 0x7f: /* movq ea, mm */
3558 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3559 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3562 gen_op_movq(offsetof(CPUX86State
,fpregs
[rm
].mmx
),
3563 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3566 case 0x011: /* movups */
3567 case 0x111: /* movupd */
3568 case 0x029: /* movaps */
3569 case 0x129: /* movapd */
3570 case 0x17f: /* movdqa ea, xmm */
3571 case 0x27f: /* movdqu ea, xmm */
3573 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3574 gen_sto_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3576 rm
= (modrm
& 7) | REX_B(s
);
3577 gen_op_movo(offsetof(CPUX86State
,xmm_regs
[rm
]),
3578 offsetof(CPUX86State
,xmm_regs
[reg
]));
3581 case 0x211: /* movss ea, xmm */
3583 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3584 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3585 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
3587 rm
= (modrm
& 7) | REX_B(s
);
3588 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(0)),
3589 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3592 case 0x311: /* movsd ea, xmm */
3594 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3595 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3597 rm
= (modrm
& 7) | REX_B(s
);
3598 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)),
3599 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3602 case 0x013: /* movlps */
3603 case 0x113: /* movlpd */
3605 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3606 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3611 case 0x017: /* movhps */
3612 case 0x117: /* movhpd */
3614 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3615 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)));
3620 case 0x71: /* shift mm, im */
3623 case 0x171: /* shift xmm, im */
3629 val
= cpu_ldub_code(env
, s
->pc
++);
3631 gen_op_movl_T0_im(val
);
3632 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_t0
.XMM_L(0)));
3634 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_t0
.XMM_L(1)));
3635 op1_offset
= offsetof(CPUX86State
,xmm_t0
);
3637 gen_op_movl_T0_im(val
);
3638 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,mmx_t0
.MMX_L(0)));
3640 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,mmx_t0
.MMX_L(1)));
3641 op1_offset
= offsetof(CPUX86State
,mmx_t0
);
3643 sse_fn_epp
= sse_op_table2
[((b
- 1) & 3) * 8 +
3644 (((modrm
>> 3)) & 7)][b1
];
3649 rm
= (modrm
& 7) | REX_B(s
);
3650 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3653 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3655 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op2_offset
);
3656 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op1_offset
);
3657 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3659 case 0x050: /* movmskps */
3660 rm
= (modrm
& 7) | REX_B(s
);
3661 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3662 offsetof(CPUX86State
,xmm_regs
[rm
]));
3663 gen_helper_movmskps(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3664 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
3665 gen_op_mov_reg_T0(OT_LONG
, reg
);
3667 case 0x150: /* movmskpd */
3668 rm
= (modrm
& 7) | REX_B(s
);
3669 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3670 offsetof(CPUX86State
,xmm_regs
[rm
]));
3671 gen_helper_movmskpd(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3672 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
3673 gen_op_mov_reg_T0(OT_LONG
, reg
);
3675 case 0x02a: /* cvtpi2ps */
3676 case 0x12a: /* cvtpi2pd */
3677 gen_helper_enter_mmx(cpu_env
);
3679 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3680 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
3681 gen_ldq_env_A0(s
->mem_index
, op2_offset
);
3684 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3686 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3687 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3688 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
3691 gen_helper_cvtpi2ps(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3695 gen_helper_cvtpi2pd(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3699 case 0x22a: /* cvtsi2ss */
3700 case 0x32a: /* cvtsi2sd */
3701 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
3702 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3703 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3704 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3705 if (ot
== OT_LONG
) {
3706 SSEFunc_0_epi sse_fn_epi
= sse_op_table3ai
[(b
>> 8) & 1];
3707 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
3708 sse_fn_epi(cpu_env
, cpu_ptr0
, cpu_tmp2_i32
);
3710 #ifdef TARGET_X86_64
3711 SSEFunc_0_epl sse_fn_epl
= sse_op_table3aq
[(b
>> 8) & 1];
3712 sse_fn_epl(cpu_env
, cpu_ptr0
, cpu_T
[0]);
3718 case 0x02c: /* cvttps2pi */
3719 case 0x12c: /* cvttpd2pi */
3720 case 0x02d: /* cvtps2pi */
3721 case 0x12d: /* cvtpd2pi */
3722 gen_helper_enter_mmx(cpu_env
);
3724 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3725 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3726 gen_ldo_env_A0(s
->mem_index
, op2_offset
);
3728 rm
= (modrm
& 7) | REX_B(s
);
3729 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3731 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
& 7].mmx
);
3732 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3733 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
3736 gen_helper_cvttps2pi(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3739 gen_helper_cvttpd2pi(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3742 gen_helper_cvtps2pi(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3745 gen_helper_cvtpd2pi(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3749 case 0x22c: /* cvttss2si */
3750 case 0x32c: /* cvttsd2si */
3751 case 0x22d: /* cvtss2si */
3752 case 0x32d: /* cvtsd2si */
3753 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
3755 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3757 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_t0
.XMM_Q(0)));
3759 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
3760 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_t0
.XMM_L(0)));
3762 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3764 rm
= (modrm
& 7) | REX_B(s
);
3765 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3767 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op2_offset
);
3768 if (ot
== OT_LONG
) {
3769 SSEFunc_i_ep sse_fn_i_ep
=
3770 sse_op_table3bi
[((b
>> 7) & 2) | (b
& 1)];
3771 sse_fn_i_ep(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3772 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
3774 #ifdef TARGET_X86_64
3775 SSEFunc_l_ep sse_fn_l_ep
=
3776 sse_op_table3bq
[((b
>> 7) & 2) | (b
& 1)];
3777 sse_fn_l_ep(cpu_T
[0], cpu_env
, cpu_ptr0
);
3782 gen_op_mov_reg_T0(ot
, reg
);
3784 case 0xc4: /* pinsrw */
3787 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 0);
3788 val
= cpu_ldub_code(env
, s
->pc
++);
3791 tcg_gen_st16_tl(cpu_T
[0], cpu_env
,
3792 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_W(val
)));
3795 tcg_gen_st16_tl(cpu_T
[0], cpu_env
,
3796 offsetof(CPUX86State
,fpregs
[reg
].mmx
.MMX_W(val
)));
3799 case 0xc5: /* pextrw */
3803 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
3804 val
= cpu_ldub_code(env
, s
->pc
++);
3807 rm
= (modrm
& 7) | REX_B(s
);
3808 tcg_gen_ld16u_tl(cpu_T
[0], cpu_env
,
3809 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_W(val
)));
3813 tcg_gen_ld16u_tl(cpu_T
[0], cpu_env
,
3814 offsetof(CPUX86State
,fpregs
[rm
].mmx
.MMX_W(val
)));
3816 reg
= ((modrm
>> 3) & 7) | rex_r
;
3817 gen_op_mov_reg_T0(ot
, reg
);
3819 case 0x1d6: /* movq ea, xmm */
3821 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3822 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3824 rm
= (modrm
& 7) | REX_B(s
);
3825 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)),
3826 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3827 gen_op_movq_env_0(offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(1)));
3830 case 0x2d6: /* movq2dq */
3831 gen_helper_enter_mmx(cpu_env
);
3833 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3834 offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3835 gen_op_movq_env_0(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)));
3837 case 0x3d6: /* movdq2q */
3838 gen_helper_enter_mmx(cpu_env
);
3839 rm
= (modrm
& 7) | REX_B(s
);
3840 gen_op_movq(offsetof(CPUX86State
,fpregs
[reg
& 7].mmx
),
3841 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3843 case 0xd7: /* pmovmskb */
3848 rm
= (modrm
& 7) | REX_B(s
);
3849 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, offsetof(CPUX86State
,xmm_regs
[rm
]));
3850 gen_helper_pmovmskb_xmm(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3853 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3854 gen_helper_pmovmskb_mmx(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3856 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
3857 reg
= ((modrm
>> 3) & 7) | rex_r
;
3858 gen_op_mov_reg_T0(OT_LONG
, reg
);
3864 if ((b
& 0xf0) == 0xf0) {
3867 modrm
= cpu_ldub_code(env
, s
->pc
++);
3869 reg
= ((modrm
>> 3) & 7) | rex_r
;
3870 mod
= (modrm
>> 6) & 3;
3875 sse_fn_epp
= sse_op_table6
[b
].op
[b1
];
3879 if (!(s
->cpuid_ext_features
& sse_op_table6
[b
].ext_mask
))
3883 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3885 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
| REX_B(s
)]);
3887 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3888 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3890 case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
3891 case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
3892 case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
3893 gen_ldq_env_A0(s
->mem_index
, op2_offset
+
3894 offsetof(XMMReg
, XMM_Q(0)));
3896 case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
3897 case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
3898 tcg_gen_qemu_ld32u(cpu_tmp0
, cpu_A0
,
3899 (s
->mem_index
>> 2) - 1);
3900 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_tmp0
);
3901 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, op2_offset
+
3902 offsetof(XMMReg
, XMM_L(0)));
3904 case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
3905 tcg_gen_qemu_ld16u(cpu_tmp0
, cpu_A0
,
3906 (s
->mem_index
>> 2) - 1);
3907 tcg_gen_st16_tl(cpu_tmp0
, cpu_env
, op2_offset
+
3908 offsetof(XMMReg
, XMM_W(0)));
3910 case 0x2a: /* movntqda */
3911 gen_ldo_env_A0(s
->mem_index
, op1_offset
);
3914 gen_ldo_env_A0(s
->mem_index
, op2_offset
);
3918 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
3920 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3922 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
3923 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3924 gen_ldq_env_A0(s
->mem_index
, op2_offset
);
3927 if (sse_fn_epp
== SSE_SPECIAL
) {
3931 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3932 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
3933 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3936 set_cc_op(s
, CC_OP_EFLAGS
);
3943 /* Various integer extensions at 0f 38 f[0-f]. */
3944 b
= modrm
| (b1
<< 8);
3945 modrm
= cpu_ldub_code(env
, s
->pc
++);
3946 reg
= ((modrm
>> 3) & 7) | rex_r
;
3949 case 0x3f0: /* crc32 Gd,Eb */
3950 case 0x3f1: /* crc32 Gd,Ey */
3952 if (!(s
->cpuid_ext_features
& CPUID_EXT_SSE42
)) {
3955 if ((b
& 0xff) == 0xf0) {
3957 } else if (s
->dflag
!= 2) {
3958 ot
= (s
->prefix
& PREFIX_DATA
? OT_WORD
: OT_LONG
);
3963 gen_op_mov_TN_reg(OT_LONG
, 0, reg
);
3964 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
3965 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3966 gen_helper_crc32(cpu_T
[0], cpu_tmp2_i32
,
3967 cpu_T
[0], tcg_const_i32(8 << ot
));
3969 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
3970 gen_op_mov_reg_T0(ot
, reg
);
3973 case 0x1f0: /* crc32 or movbe */
3975 /* For these insns, the f3 prefix is supposed to have priority
3976 over the 66 prefix, but that's not what we implement above
3978 if (s
->prefix
& PREFIX_REPNZ
) {
3982 case 0x0f0: /* movbe Gy,My */
3983 case 0x0f1: /* movbe My,Gy */
3984 if (!(s
->cpuid_ext_features
& CPUID_EXT_MOVBE
)) {
3987 if (s
->dflag
!= 2) {
3988 ot
= (s
->prefix
& PREFIX_DATA
? OT_WORD
: OT_LONG
);
3993 /* Load the data incoming to the bswap. Note that the TCG
3994 implementation of bswap requires the input be zero
3995 extended. In the case of the loads, we simply know that
3996 gen_op_ld_v via gen_ldst_modrm does that already. */
3998 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4002 tcg_gen_ext16u_tl(cpu_T
[0], cpu_regs
[reg
]);
4005 tcg_gen_ext32u_tl(cpu_T
[0], cpu_regs
[reg
]);
4008 tcg_gen_mov_tl(cpu_T
[0], cpu_regs
[reg
]);
4015 tcg_gen_bswap16_tl(cpu_T
[0], cpu_T
[0]);
4018 tcg_gen_bswap32_tl(cpu_T
[0], cpu_T
[0]);
4020 #ifdef TARGET_X86_64
4022 tcg_gen_bswap64_tl(cpu_T
[0], cpu_T
[0]);
4028 gen_op_mov_reg_T0(ot
, reg
);
4030 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
4034 case 0x0f2: /* andn Gy, By, Ey */
4035 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI1
)
4036 || !(s
->prefix
& PREFIX_VEX
)
4040 ot
= s
->dflag
== 2 ? OT_QUAD
: OT_LONG
;
4041 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4042 tcg_gen_andc_tl(cpu_T
[0], cpu_regs
[s
->vex_v
], cpu_T
[0]);
4043 gen_op_mov_reg_T0(ot
, reg
);
4044 gen_op_update1_cc();
4045 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
4048 case 0x0f7: /* bextr Gy, Ey, By */
4049 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI1
)
4050 || !(s
->prefix
& PREFIX_VEX
)
4054 ot
= s
->dflag
== 2 ? OT_QUAD
: OT_LONG
;
4058 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4059 /* Extract START, and shift the operand.
4060 Shifts larger than operand size get zeros. */
4061 tcg_gen_ext8u_tl(cpu_A0
, cpu_regs
[s
->vex_v
]);
4062 tcg_gen_shr_tl(cpu_T
[0], cpu_T
[0], cpu_A0
);
4064 bound
= tcg_const_tl(ot
== OT_QUAD
? 63 : 31);
4065 zero
= tcg_const_tl(0);
4066 tcg_gen_movcond_tl(TCG_COND_LEU
, cpu_T
[0], cpu_A0
, bound
,
4068 tcg_temp_free(zero
);
4070 /* Extract the LEN into a mask. Lengths larger than
4071 operand size get all ones. */
4072 tcg_gen_shri_tl(cpu_A0
, cpu_regs
[s
->vex_v
], 8);
4073 tcg_gen_ext8u_tl(cpu_A0
, cpu_A0
);
4074 tcg_gen_movcond_tl(TCG_COND_LEU
, cpu_A0
, cpu_A0
, bound
,
4076 tcg_temp_free(bound
);
4077 tcg_gen_movi_tl(cpu_T
[1], 1);
4078 tcg_gen_shl_tl(cpu_T
[1], cpu_T
[1], cpu_A0
);
4079 tcg_gen_subi_tl(cpu_T
[1], cpu_T
[1], 1);
4080 tcg_gen_and_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4082 gen_op_mov_reg_T0(ot
, reg
);
4083 gen_op_update1_cc();
4084 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
4088 case 0x0f5: /* bzhi Gy, Ey, By */
4089 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
4090 || !(s
->prefix
& PREFIX_VEX
)
4094 ot
= s
->dflag
== 2 ? OT_QUAD
: OT_LONG
;
4095 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4096 tcg_gen_ext8u_tl(cpu_T
[1], cpu_regs
[s
->vex_v
]);
4098 TCGv bound
= tcg_const_tl(ot
== OT_QUAD
? 63 : 31);
4099 /* Note that since we're using BMILG (in order to get O
4100 cleared) we need to store the inverse into C. */
4101 tcg_gen_setcond_tl(TCG_COND_LT
, cpu_cc_src
,
4103 tcg_gen_movcond_tl(TCG_COND_GT
, cpu_T
[1], cpu_T
[1],
4104 bound
, bound
, cpu_T
[1]);
4105 tcg_temp_free(bound
);
4107 tcg_gen_movi_tl(cpu_A0
, -1);
4108 tcg_gen_shl_tl(cpu_A0
, cpu_A0
, cpu_T
[1]);
4109 tcg_gen_andc_tl(cpu_T
[0], cpu_T
[0], cpu_A0
);
4110 gen_op_mov_reg_T0(ot
, reg
);
4111 gen_op_update1_cc();
4112 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
4115 case 0x3f6: /* mulx By, Gy, rdx, Ey */
4116 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
4117 || !(s
->prefix
& PREFIX_VEX
)
4121 ot
= s
->dflag
== 2 ? OT_QUAD
: OT_LONG
;
4122 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4125 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
4126 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_regs
[R_EDX
]);
4127 tcg_gen_mulu2_i32(cpu_tmp2_i32
, cpu_tmp3_i32
,
4128 cpu_tmp2_i32
, cpu_tmp3_i32
);
4129 tcg_gen_extu_i32_tl(cpu_regs
[s
->vex_v
], cpu_tmp2_i32
);
4130 tcg_gen_extu_i32_tl(cpu_regs
[reg
], cpu_tmp3_i32
);
4132 #ifdef TARGET_X86_64
4134 tcg_gen_mulu2_i64(cpu_regs
[s
->vex_v
], cpu_regs
[reg
],
4135 cpu_T
[0], cpu_regs
[R_EDX
]);
4141 case 0x3f5: /* pdep Gy, By, Ey */
4142 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
4143 || !(s
->prefix
& PREFIX_VEX
)
4147 ot
= s
->dflag
== 2 ? OT_QUAD
: OT_LONG
;
4148 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4149 /* Note that by zero-extending the mask operand, we
4150 automatically handle zero-extending the result. */
4151 if (s
->dflag
== 2) {
4152 tcg_gen_mov_tl(cpu_T
[1], cpu_regs
[s
->vex_v
]);
4154 tcg_gen_ext32u_tl(cpu_T
[1], cpu_regs
[s
->vex_v
]);
4156 gen_helper_pdep(cpu_regs
[reg
], cpu_T
[0], cpu_T
[1]);
4159 case 0x2f5: /* pext Gy, By, Ey */
4160 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
4161 || !(s
->prefix
& PREFIX_VEX
)
4165 ot
= s
->dflag
== 2 ? OT_QUAD
: OT_LONG
;
4166 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4167 /* Note that by zero-extending the mask operand, we
4168 automatically handle zero-extending the result. */
4169 if (s
->dflag
== 2) {
4170 tcg_gen_mov_tl(cpu_T
[1], cpu_regs
[s
->vex_v
]);
4172 tcg_gen_ext32u_tl(cpu_T
[1], cpu_regs
[s
->vex_v
]);
4174 gen_helper_pext(cpu_regs
[reg
], cpu_T
[0], cpu_T
[1]);
4177 case 0x1f6: /* adcx Gy, Ey */
4178 case 0x2f6: /* adox Gy, Ey */
4179 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_ADX
)) {
4182 TCGv carry_in
, carry_out
, zero
;
4185 ot
= (s
->dflag
== 2 ? OT_QUAD
: OT_LONG
);
4186 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4188 /* Re-use the carry-out from a previous round. */
4189 TCGV_UNUSED(carry_in
);
4190 carry_out
= (b
== 0x1f6 ? cpu_cc_dst
: cpu_cc_src2
);
4194 carry_in
= cpu_cc_dst
;
4195 end_op
= CC_OP_ADCX
;
4197 end_op
= CC_OP_ADCOX
;
4202 end_op
= CC_OP_ADCOX
;
4204 carry_in
= cpu_cc_src2
;
4205 end_op
= CC_OP_ADOX
;
4209 end_op
= CC_OP_ADCOX
;
4210 carry_in
= carry_out
;
4213 end_op
= (b
== 0x1f6 ? CC_OP_ADCX
: CC_OP_ADOX
);
4216 /* If we can't reuse carry-out, get it out of EFLAGS. */
4217 if (TCGV_IS_UNUSED(carry_in
)) {
4218 if (s
->cc_op
!= CC_OP_ADCX
&& s
->cc_op
!= CC_OP_ADOX
) {
4219 gen_compute_eflags(s
);
4221 carry_in
= cpu_tmp0
;
4222 tcg_gen_shri_tl(carry_in
, cpu_cc_src
,
4223 ctz32(b
== 0x1f6 ? CC_C
: CC_O
));
4224 tcg_gen_andi_tl(carry_in
, carry_in
, 1);
4228 #ifdef TARGET_X86_64
4230 /* If we know TL is 64-bit, and we want a 32-bit
4231 result, just do everything in 64-bit arithmetic. */
4232 tcg_gen_ext32u_i64(cpu_regs
[reg
], cpu_regs
[reg
]);
4233 tcg_gen_ext32u_i64(cpu_T
[0], cpu_T
[0]);
4234 tcg_gen_add_i64(cpu_T
[0], cpu_T
[0], cpu_regs
[reg
]);
4235 tcg_gen_add_i64(cpu_T
[0], cpu_T
[0], carry_in
);
4236 tcg_gen_ext32u_i64(cpu_regs
[reg
], cpu_T
[0]);
4237 tcg_gen_shri_i64(carry_out
, cpu_T
[0], 32);
4241 /* Otherwise compute the carry-out in two steps. */
4242 zero
= tcg_const_tl(0);
4243 tcg_gen_add2_tl(cpu_T
[0], carry_out
,
4246 tcg_gen_add2_tl(cpu_regs
[reg
], carry_out
,
4247 cpu_regs
[reg
], carry_out
,
4249 tcg_temp_free(zero
);
4252 set_cc_op(s
, end_op
);
4256 case 0x1f7: /* shlx Gy, Ey, By */
4257 case 0x2f7: /* sarx Gy, Ey, By */
4258 case 0x3f7: /* shrx Gy, Ey, By */
4259 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
4260 || !(s
->prefix
& PREFIX_VEX
)
4264 ot
= (s
->dflag
== 2 ? OT_QUAD
: OT_LONG
);
4265 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4266 if (ot
== OT_QUAD
) {
4267 tcg_gen_andi_tl(cpu_T
[1], cpu_regs
[s
->vex_v
], 63);
4269 tcg_gen_andi_tl(cpu_T
[1], cpu_regs
[s
->vex_v
], 31);
4272 tcg_gen_shl_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4273 } else if (b
== 0x2f7) {
4274 if (ot
!= OT_QUAD
) {
4275 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
4277 tcg_gen_sar_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4279 if (ot
!= OT_QUAD
) {
4280 tcg_gen_ext32u_tl(cpu_T
[0], cpu_T
[0]);
4282 tcg_gen_shr_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4284 gen_op_mov_reg_T0(ot
, reg
);
4290 case 0x3f3: /* Group 17 */
4291 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI1
)
4292 || !(s
->prefix
& PREFIX_VEX
)
4296 ot
= s
->dflag
== 2 ? OT_QUAD
: OT_LONG
;
4297 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4300 case 1: /* blsr By,Ey */
4301 tcg_gen_neg_tl(cpu_T
[1], cpu_T
[0]);
4302 tcg_gen_and_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4303 gen_op_mov_reg_T0(ot
, s
->vex_v
);
4304 gen_op_update2_cc();
4305 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
4308 case 2: /* blsmsk By,Ey */
4309 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[0]);
4310 tcg_gen_subi_tl(cpu_T
[0], cpu_T
[0], 1);
4311 tcg_gen_xor_tl(cpu_T
[0], cpu_T
[0], cpu_cc_src
);
4312 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4313 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
4316 case 3: /* blsi By, Ey */
4317 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[0]);
4318 tcg_gen_subi_tl(cpu_T
[0], cpu_T
[0], 1);
4319 tcg_gen_and_tl(cpu_T
[0], cpu_T
[0], cpu_cc_src
);
4320 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4321 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
4337 modrm
= cpu_ldub_code(env
, s
->pc
++);
4339 reg
= ((modrm
>> 3) & 7) | rex_r
;
4340 mod
= (modrm
>> 6) & 3;
4345 sse_fn_eppi
= sse_op_table7
[b
].op
[b1
];
4349 if (!(s
->cpuid_ext_features
& sse_op_table7
[b
].ext_mask
))
4352 if (sse_fn_eppi
== SSE_SPECIAL
) {
4353 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
4354 rm
= (modrm
& 7) | REX_B(s
);
4356 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4357 reg
= ((modrm
>> 3) & 7) | rex_r
;
4358 val
= cpu_ldub_code(env
, s
->pc
++);
4360 case 0x14: /* pextrb */
4361 tcg_gen_ld8u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,
4362 xmm_regs
[reg
].XMM_B(val
& 15)));
4364 gen_op_mov_reg_T0(ot
, rm
);
4366 tcg_gen_qemu_st8(cpu_T
[0], cpu_A0
,
4367 (s
->mem_index
>> 2) - 1);
4369 case 0x15: /* pextrw */
4370 tcg_gen_ld16u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,
4371 xmm_regs
[reg
].XMM_W(val
& 7)));
4373 gen_op_mov_reg_T0(ot
, rm
);
4375 tcg_gen_qemu_st16(cpu_T
[0], cpu_A0
,
4376 (s
->mem_index
>> 2) - 1);
4379 if (ot
== OT_LONG
) { /* pextrd */
4380 tcg_gen_ld_i32(cpu_tmp2_i32
, cpu_env
,
4381 offsetof(CPUX86State
,
4382 xmm_regs
[reg
].XMM_L(val
& 3)));
4383 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
4385 gen_op_mov_reg_v(ot
, rm
, cpu_T
[0]);
4387 tcg_gen_qemu_st32(cpu_T
[0], cpu_A0
,
4388 (s
->mem_index
>> 2) - 1);
4389 } else { /* pextrq */
4390 #ifdef TARGET_X86_64
4391 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
,
4392 offsetof(CPUX86State
,
4393 xmm_regs
[reg
].XMM_Q(val
& 1)));
4395 gen_op_mov_reg_v(ot
, rm
, cpu_tmp1_i64
);
4397 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
,
4398 (s
->mem_index
>> 2) - 1);
4404 case 0x17: /* extractps */
4405 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,
4406 xmm_regs
[reg
].XMM_L(val
& 3)));
4408 gen_op_mov_reg_T0(ot
, rm
);
4410 tcg_gen_qemu_st32(cpu_T
[0], cpu_A0
,
4411 (s
->mem_index
>> 2) - 1);
4413 case 0x20: /* pinsrb */
4415 gen_op_mov_TN_reg(OT_LONG
, 0, rm
);
4417 tcg_gen_qemu_ld8u(cpu_T
[0], cpu_A0
,
4418 (s
->mem_index
>> 2) - 1);
4419 tcg_gen_st8_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,
4420 xmm_regs
[reg
].XMM_B(val
& 15)));
4422 case 0x21: /* insertps */
4424 tcg_gen_ld_i32(cpu_tmp2_i32
, cpu_env
,
4425 offsetof(CPUX86State
,xmm_regs
[rm
]
4426 .XMM_L((val
>> 6) & 3)));
4428 tcg_gen_qemu_ld32u(cpu_tmp0
, cpu_A0
,
4429 (s
->mem_index
>> 2) - 1);
4430 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_tmp0
);
4432 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
,
4433 offsetof(CPUX86State
,xmm_regs
[reg
]
4434 .XMM_L((val
>> 4) & 3)));
4436 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4437 cpu_env
, offsetof(CPUX86State
,
4438 xmm_regs
[reg
].XMM_L(0)));
4440 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4441 cpu_env
, offsetof(CPUX86State
,
4442 xmm_regs
[reg
].XMM_L(1)));
4444 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4445 cpu_env
, offsetof(CPUX86State
,
4446 xmm_regs
[reg
].XMM_L(2)));
4448 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4449 cpu_env
, offsetof(CPUX86State
,
4450 xmm_regs
[reg
].XMM_L(3)));
4453 if (ot
== OT_LONG
) { /* pinsrd */
4455 gen_op_mov_v_reg(ot
, cpu_tmp0
, rm
);
4457 tcg_gen_qemu_ld32u(cpu_tmp0
, cpu_A0
,
4458 (s
->mem_index
>> 2) - 1);
4459 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_tmp0
);
4460 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
,
4461 offsetof(CPUX86State
,
4462 xmm_regs
[reg
].XMM_L(val
& 3)));
4463 } else { /* pinsrq */
4464 #ifdef TARGET_X86_64
4466 gen_op_mov_v_reg(ot
, cpu_tmp1_i64
, rm
);
4468 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
,
4469 (s
->mem_index
>> 2) - 1);
4470 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
,
4471 offsetof(CPUX86State
,
4472 xmm_regs
[reg
].XMM_Q(val
& 1)));
4483 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
4485 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
| REX_B(s
)]);
4487 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
4488 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4489 gen_ldo_env_A0(s
->mem_index
, op2_offset
);
4492 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
4494 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
4496 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
4497 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4498 gen_ldq_env_A0(s
->mem_index
, op2_offset
);
4501 val
= cpu_ldub_code(env
, s
->pc
++);
4503 if ((b
& 0xfc) == 0x60) { /* pcmpXstrX */
4504 set_cc_op(s
, CC_OP_EFLAGS
);
4507 /* The helper must use entire 64-bit gp registers */
4511 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4512 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4513 sse_fn_eppi(cpu_env
, cpu_ptr0
, cpu_ptr1
, tcg_const_i32(val
));
4517 /* Various integer extensions at 0f 3a f[0-f]. */
4518 b
= modrm
| (b1
<< 8);
4519 modrm
= cpu_ldub_code(env
, s
->pc
++);
4520 reg
= ((modrm
>> 3) & 7) | rex_r
;
4523 case 0x3f0: /* rorx Gy,Ey, Ib */
4524 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
4525 || !(s
->prefix
& PREFIX_VEX
)
4529 ot
= s
->dflag
== 2 ? OT_QUAD
: OT_LONG
;
4530 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4531 b
= cpu_ldub_code(env
, s
->pc
++);
4532 if (ot
== OT_QUAD
) {
4533 tcg_gen_rotri_tl(cpu_T
[0], cpu_T
[0], b
& 63);
4535 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
4536 tcg_gen_rotri_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, b
& 31);
4537 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
4539 gen_op_mov_reg_T0(ot
, reg
);
4551 /* generic MMX or SSE operation */
4553 case 0x70: /* pshufx insn */
4554 case 0xc6: /* pshufx insn */
4555 case 0xc2: /* compare insns */
4562 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
4564 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4565 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
4566 if (b1
>= 2 && ((b
>= 0x50 && b
<= 0x5f && b
!= 0x5b) ||
4568 /* specific case for SSE single instructions */
4571 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
4572 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_t0
.XMM_L(0)));
4575 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_t0
.XMM_D(0)));
4578 gen_ldo_env_A0(s
->mem_index
, op2_offset
);
4581 rm
= (modrm
& 7) | REX_B(s
);
4582 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
4585 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
4587 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4588 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
4589 gen_ldq_env_A0(s
->mem_index
, op2_offset
);
4592 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
4596 case 0x0f: /* 3DNow! data insns */
4597 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_3DNOW
))
4599 val
= cpu_ldub_code(env
, s
->pc
++);
4600 sse_fn_epp
= sse_op_table5
[val
];
4604 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4605 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4606 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
4608 case 0x70: /* pshufx insn */
4609 case 0xc6: /* pshufx insn */
4610 val
= cpu_ldub_code(env
, s
->pc
++);
4611 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4612 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4613 /* XXX: introduce a new table? */
4614 sse_fn_ppi
= (SSEFunc_0_ppi
)sse_fn_epp
;
4615 sse_fn_ppi(cpu_ptr0
, cpu_ptr1
, tcg_const_i32(val
));
4619 val
= cpu_ldub_code(env
, s
->pc
++);
4622 sse_fn_epp
= sse_op_table4
[val
][b1
];
4624 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4625 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4626 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
4629 /* maskmov : we must prepare A0 */
4632 #ifdef TARGET_X86_64
4633 if (s
->aflag
== 2) {
4634 gen_op_movq_A0_reg(R_EDI
);
4638 gen_op_movl_A0_reg(R_EDI
);
4640 gen_op_andl_A0_ffff();
4642 gen_add_A0_ds_seg(s
);
4644 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4645 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4646 /* XXX: introduce a new table? */
4647 sse_fn_eppt
= (SSEFunc_0_eppt
)sse_fn_epp
;
4648 sse_fn_eppt(cpu_env
, cpu_ptr0
, cpu_ptr1
, cpu_A0
);
4651 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4652 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4653 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
4656 if (b
== 0x2e || b
== 0x2f) {
4657 set_cc_op(s
, CC_OP_EFLAGS
);
4662 /* convert one instruction. s->is_jmp is set if the translation must
4663 be stopped. Return the next pc value */
4664 static target_ulong
disas_insn(CPUX86State
*env
, DisasContext
*s
,
4665 target_ulong pc_start
)
4667 int b
, prefixes
, aflag
, dflag
;
4669 int modrm
, reg
, rm
, mod
, reg_addr
, op
, opreg
, offset_addr
, val
;
4670 target_ulong next_eip
, tval
;
4673 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
4674 tcg_gen_debug_insn_start(pc_start
);
4683 #ifdef TARGET_X86_64
4688 s
->rip_offset
= 0; /* for relative ip address */
4692 b
= cpu_ldub_code(env
, s
->pc
);
4694 /* Collect prefixes. */
4697 prefixes
|= PREFIX_REPZ
;
4700 prefixes
|= PREFIX_REPNZ
;
4703 prefixes
|= PREFIX_LOCK
;
4724 prefixes
|= PREFIX_DATA
;
4727 prefixes
|= PREFIX_ADR
;
4729 #ifdef TARGET_X86_64
4733 rex_w
= (b
>> 3) & 1;
4734 rex_r
= (b
& 0x4) << 1;
4735 s
->rex_x
= (b
& 0x2) << 2;
4736 REX_B(s
) = (b
& 0x1) << 3;
4737 x86_64_hregs
= 1; /* select uniform byte register addressing */
4742 case 0xc5: /* 2-byte VEX */
4743 case 0xc4: /* 3-byte VEX */
4744 /* VEX prefixes cannot be used except in 32-bit mode.
4745 Otherwise the instruction is LES or LDS. */
4746 if (s
->code32
&& !s
->vm86
) {
4747 static const int pp_prefix
[4] = {
4748 0, PREFIX_DATA
, PREFIX_REPZ
, PREFIX_REPNZ
4750 int vex3
, vex2
= cpu_ldub_code(env
, s
->pc
);
4752 if (!CODE64(s
) && (vex2
& 0xc0) != 0xc0) {
4753 /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
4754 otherwise the instruction is LES or LDS. */
4759 /* 4.1.1-4.1.3: No preceding lock, 66, f2, f3, or rex prefixes. */
4760 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
4761 | PREFIX_LOCK
| PREFIX_DATA
)) {
4764 #ifdef TARGET_X86_64
4769 rex_r
= (~vex2
>> 4) & 8;
4772 b
= cpu_ldub_code(env
, s
->pc
++);
4774 #ifdef TARGET_X86_64
4775 s
->rex_x
= (~vex2
>> 3) & 8;
4776 s
->rex_b
= (~vex2
>> 2) & 8;
4778 vex3
= cpu_ldub_code(env
, s
->pc
++);
4779 rex_w
= (vex3
>> 7) & 1;
4780 switch (vex2
& 0x1f) {
4781 case 0x01: /* Implied 0f leading opcode bytes. */
4782 b
= cpu_ldub_code(env
, s
->pc
++) | 0x100;
4784 case 0x02: /* Implied 0f 38 leading opcode bytes. */
4787 case 0x03: /* Implied 0f 3a leading opcode bytes. */
4790 default: /* Reserved for future use. */
4794 s
->vex_v
= (~vex3
>> 3) & 0xf;
4795 s
->vex_l
= (vex3
>> 2) & 1;
4796 prefixes
|= pp_prefix
[vex3
& 3] | PREFIX_VEX
;
4801 /* Post-process prefixes. */
4802 if (prefixes
& PREFIX_DATA
) {
4805 if (prefixes
& PREFIX_ADR
) {
4808 #ifdef TARGET_X86_64
4811 /* 0x66 is ignored if rex.w is set */
4814 if (!(prefixes
& PREFIX_ADR
)) {
4820 s
->prefix
= prefixes
;
4824 /* lock generation */
4825 if (prefixes
& PREFIX_LOCK
)
4828 /* now check op code */
4832 /**************************/
4833 /* extended op code */
4834 b
= cpu_ldub_code(env
, s
->pc
++) | 0x100;
4837 /**************************/
4855 ot
= dflag
+ OT_WORD
;
4858 case 0: /* OP Ev, Gv */
4859 modrm
= cpu_ldub_code(env
, s
->pc
++);
4860 reg
= ((modrm
>> 3) & 7) | rex_r
;
4861 mod
= (modrm
>> 6) & 3;
4862 rm
= (modrm
& 7) | REX_B(s
);
4864 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4866 } else if (op
== OP_XORL
&& rm
== reg
) {
4868 /* xor reg, reg optimisation */
4869 set_cc_op(s
, CC_OP_CLR
);
4871 gen_op_mov_reg_T0(ot
, reg
);
4876 gen_op_mov_TN_reg(ot
, 1, reg
);
4877 gen_op(s
, op
, ot
, opreg
);
4879 case 1: /* OP Gv, Ev */
4880 modrm
= cpu_ldub_code(env
, s
->pc
++);
4881 mod
= (modrm
>> 6) & 3;
4882 reg
= ((modrm
>> 3) & 7) | rex_r
;
4883 rm
= (modrm
& 7) | REX_B(s
);
4885 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4886 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
4887 } else if (op
== OP_XORL
&& rm
== reg
) {
4890 gen_op_mov_TN_reg(ot
, 1, rm
);
4892 gen_op(s
, op
, ot
, reg
);
4894 case 2: /* OP A, Iv */
4895 val
= insn_get(env
, s
, ot
);
4896 gen_op_movl_T1_im(val
);
4897 gen_op(s
, op
, ot
, OR_EAX
);
4906 case 0x80: /* GRP1 */
4915 ot
= dflag
+ OT_WORD
;
4917 modrm
= cpu_ldub_code(env
, s
->pc
++);
4918 mod
= (modrm
>> 6) & 3;
4919 rm
= (modrm
& 7) | REX_B(s
);
4920 op
= (modrm
>> 3) & 7;
4926 s
->rip_offset
= insn_const_size(ot
);
4927 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4938 val
= insn_get(env
, s
, ot
);
4941 val
= (int8_t)insn_get(env
, s
, OT_BYTE
);
4944 gen_op_movl_T1_im(val
);
4945 gen_op(s
, op
, ot
, opreg
);
4949 /**************************/
4950 /* inc, dec, and other misc arith */
4951 case 0x40 ... 0x47: /* inc Gv */
4952 ot
= dflag
? OT_LONG
: OT_WORD
;
4953 gen_inc(s
, ot
, OR_EAX
+ (b
& 7), 1);
4955 case 0x48 ... 0x4f: /* dec Gv */
4956 ot
= dflag
? OT_LONG
: OT_WORD
;
4957 gen_inc(s
, ot
, OR_EAX
+ (b
& 7), -1);
4959 case 0xf6: /* GRP3 */
4964 ot
= dflag
+ OT_WORD
;
4966 modrm
= cpu_ldub_code(env
, s
->pc
++);
4967 mod
= (modrm
>> 6) & 3;
4968 rm
= (modrm
& 7) | REX_B(s
);
4969 op
= (modrm
>> 3) & 7;
4972 s
->rip_offset
= insn_const_size(ot
);
4973 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4974 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
4976 gen_op_mov_TN_reg(ot
, 0, rm
);
4981 val
= insn_get(env
, s
, ot
);
4982 gen_op_movl_T1_im(val
);
4983 gen_op_testl_T0_T1_cc();
4984 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
4987 tcg_gen_not_tl(cpu_T
[0], cpu_T
[0]);
4989 gen_op_st_T0_A0(ot
+ s
->mem_index
);
4991 gen_op_mov_reg_T0(ot
, rm
);
4995 tcg_gen_neg_tl(cpu_T
[0], cpu_T
[0]);
4997 gen_op_st_T0_A0(ot
+ s
->mem_index
);
4999 gen_op_mov_reg_T0(ot
, rm
);
5001 gen_op_update_neg_cc();
5002 set_cc_op(s
, CC_OP_SUBB
+ ot
);
5007 gen_op_mov_TN_reg(OT_BYTE
, 1, R_EAX
);
5008 tcg_gen_ext8u_tl(cpu_T
[0], cpu_T
[0]);
5009 tcg_gen_ext8u_tl(cpu_T
[1], cpu_T
[1]);
5010 /* XXX: use 32 bit mul which could be faster */
5011 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
5012 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
5013 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
5014 tcg_gen_andi_tl(cpu_cc_src
, cpu_T
[0], 0xff00);
5015 set_cc_op(s
, CC_OP_MULB
);
5018 gen_op_mov_TN_reg(OT_WORD
, 1, R_EAX
);
5019 tcg_gen_ext16u_tl(cpu_T
[0], cpu_T
[0]);
5020 tcg_gen_ext16u_tl(cpu_T
[1], cpu_T
[1]);
5021 /* XXX: use 32 bit mul which could be faster */
5022 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
5023 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
5024 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
5025 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 16);
5026 gen_op_mov_reg_T0(OT_WORD
, R_EDX
);
5027 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[0]);
5028 set_cc_op(s
, CC_OP_MULW
);
5032 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5033 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_regs
[R_EAX
]);
5034 tcg_gen_mulu2_i32(cpu_tmp2_i32
, cpu_tmp3_i32
,
5035 cpu_tmp2_i32
, cpu_tmp3_i32
);
5036 tcg_gen_extu_i32_tl(cpu_regs
[R_EAX
], cpu_tmp2_i32
);
5037 tcg_gen_extu_i32_tl(cpu_regs
[R_EDX
], cpu_tmp3_i32
);
5038 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
5039 tcg_gen_mov_tl(cpu_cc_src
, cpu_regs
[R_EDX
]);
5040 set_cc_op(s
, CC_OP_MULL
);
5042 #ifdef TARGET_X86_64
5044 tcg_gen_mulu2_i64(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
],
5045 cpu_T
[0], cpu_regs
[R_EAX
]);
5046 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
5047 tcg_gen_mov_tl(cpu_cc_src
, cpu_regs
[R_EDX
]);
5048 set_cc_op(s
, CC_OP_MULQ
);
5056 gen_op_mov_TN_reg(OT_BYTE
, 1, R_EAX
);
5057 tcg_gen_ext8s_tl(cpu_T
[0], cpu_T
[0]);
5058 tcg_gen_ext8s_tl(cpu_T
[1], cpu_T
[1]);
5059 /* XXX: use 32 bit mul which could be faster */
5060 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
5061 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
5062 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
5063 tcg_gen_ext8s_tl(cpu_tmp0
, cpu_T
[0]);
5064 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
5065 set_cc_op(s
, CC_OP_MULB
);
5068 gen_op_mov_TN_reg(OT_WORD
, 1, R_EAX
);
5069 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
5070 tcg_gen_ext16s_tl(cpu_T
[1], cpu_T
[1]);
5071 /* XXX: use 32 bit mul which could be faster */
5072 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
5073 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
5074 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
5075 tcg_gen_ext16s_tl(cpu_tmp0
, cpu_T
[0]);
5076 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
5077 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 16);
5078 gen_op_mov_reg_T0(OT_WORD
, R_EDX
);
5079 set_cc_op(s
, CC_OP_MULW
);
5083 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5084 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_regs
[R_EAX
]);
5085 tcg_gen_muls2_i32(cpu_tmp2_i32
, cpu_tmp3_i32
,
5086 cpu_tmp2_i32
, cpu_tmp3_i32
);
5087 tcg_gen_extu_i32_tl(cpu_regs
[R_EAX
], cpu_tmp2_i32
);
5088 tcg_gen_extu_i32_tl(cpu_regs
[R_EDX
], cpu_tmp3_i32
);
5089 tcg_gen_sari_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 31);
5090 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
5091 tcg_gen_sub_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, cpu_tmp3_i32
);
5092 tcg_gen_extu_i32_tl(cpu_cc_src
, cpu_tmp2_i32
);
5093 set_cc_op(s
, CC_OP_MULL
);
5095 #ifdef TARGET_X86_64
5097 tcg_gen_muls2_i64(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
],
5098 cpu_T
[0], cpu_regs
[R_EAX
]);
5099 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
5100 tcg_gen_sari_tl(cpu_cc_src
, cpu_regs
[R_EAX
], 63);
5101 tcg_gen_sub_tl(cpu_cc_src
, cpu_cc_src
, cpu_regs
[R_EDX
]);
5102 set_cc_op(s
, CC_OP_MULQ
);
5110 gen_jmp_im(pc_start
- s
->cs_base
);
5111 gen_helper_divb_AL(cpu_env
, cpu_T
[0]);
5114 gen_jmp_im(pc_start
- s
->cs_base
);
5115 gen_helper_divw_AX(cpu_env
, cpu_T
[0]);
5119 gen_jmp_im(pc_start
- s
->cs_base
);
5120 gen_helper_divl_EAX(cpu_env
, cpu_T
[0]);
5122 #ifdef TARGET_X86_64
5124 gen_jmp_im(pc_start
- s
->cs_base
);
5125 gen_helper_divq_EAX(cpu_env
, cpu_T
[0]);
5133 gen_jmp_im(pc_start
- s
->cs_base
);
5134 gen_helper_idivb_AL(cpu_env
, cpu_T
[0]);
5137 gen_jmp_im(pc_start
- s
->cs_base
);
5138 gen_helper_idivw_AX(cpu_env
, cpu_T
[0]);
5142 gen_jmp_im(pc_start
- s
->cs_base
);
5143 gen_helper_idivl_EAX(cpu_env
, cpu_T
[0]);
5145 #ifdef TARGET_X86_64
5147 gen_jmp_im(pc_start
- s
->cs_base
);
5148 gen_helper_idivq_EAX(cpu_env
, cpu_T
[0]);
5158 case 0xfe: /* GRP4 */
5159 case 0xff: /* GRP5 */
5163 ot
= dflag
+ OT_WORD
;
5165 modrm
= cpu_ldub_code(env
, s
->pc
++);
5166 mod
= (modrm
>> 6) & 3;
5167 rm
= (modrm
& 7) | REX_B(s
);
5168 op
= (modrm
>> 3) & 7;
5169 if (op
>= 2 && b
== 0xfe) {
5173 if (op
== 2 || op
== 4) {
5174 /* operand size for jumps is 64 bit */
5176 } else if (op
== 3 || op
== 5) {
5177 ot
= dflag
? OT_LONG
+ (rex_w
== 1) : OT_WORD
;
5178 } else if (op
== 6) {
5179 /* default push size is 64 bit */
5180 ot
= dflag
? OT_QUAD
: OT_WORD
;
5184 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5185 if (op
>= 2 && op
!= 3 && op
!= 5)
5186 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
5188 gen_op_mov_TN_reg(ot
, 0, rm
);
5192 case 0: /* inc Ev */
5197 gen_inc(s
, ot
, opreg
, 1);
5199 case 1: /* dec Ev */
5204 gen_inc(s
, ot
, opreg
, -1);
5206 case 2: /* call Ev */
5207 /* XXX: optimize if memory (no 'and' is necessary) */
5209 gen_op_andl_T0_ffff();
5210 next_eip
= s
->pc
- s
->cs_base
;
5211 gen_movtl_T1_im(next_eip
);
5216 case 3: /* lcall Ev */
5217 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
5218 gen_add_A0_im(s
, 1 << (ot
- OT_WORD
+ 1));
5219 gen_op_ldu_T0_A0(OT_WORD
+ s
->mem_index
);
5221 if (s
->pe
&& !s
->vm86
) {
5222 gen_update_cc_op(s
);
5223 gen_jmp_im(pc_start
- s
->cs_base
);
5224 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5225 gen_helper_lcall_protected(cpu_env
, cpu_tmp2_i32
, cpu_T
[1],
5226 tcg_const_i32(dflag
),
5227 tcg_const_i32(s
->pc
- pc_start
));
5229 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5230 gen_helper_lcall_real(cpu_env
, cpu_tmp2_i32
, cpu_T
[1],
5231 tcg_const_i32(dflag
),
5232 tcg_const_i32(s
->pc
- s
->cs_base
));
5236 case 4: /* jmp Ev */
5238 gen_op_andl_T0_ffff();
5242 case 5: /* ljmp Ev */
5243 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
5244 gen_add_A0_im(s
, 1 << (ot
- OT_WORD
+ 1));
5245 gen_op_ldu_T0_A0(OT_WORD
+ s
->mem_index
);
5247 if (s
->pe
&& !s
->vm86
) {
5248 gen_update_cc_op(s
);
5249 gen_jmp_im(pc_start
- s
->cs_base
);
5250 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5251 gen_helper_ljmp_protected(cpu_env
, cpu_tmp2_i32
, cpu_T
[1],
5252 tcg_const_i32(s
->pc
- pc_start
));
5254 gen_op_movl_seg_T0_vm(R_CS
);
5255 gen_op_movl_T0_T1();
5260 case 6: /* push Ev */
5268 case 0x84: /* test Ev, Gv */
5273 ot
= dflag
+ OT_WORD
;
5275 modrm
= cpu_ldub_code(env
, s
->pc
++);
5276 reg
= ((modrm
>> 3) & 7) | rex_r
;
5278 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
5279 gen_op_mov_TN_reg(ot
, 1, reg
);
5280 gen_op_testl_T0_T1_cc();
5281 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
5284 case 0xa8: /* test eAX, Iv */
5289 ot
= dflag
+ OT_WORD
;
5290 val
= insn_get(env
, s
, ot
);
5292 gen_op_mov_TN_reg(ot
, 0, OR_EAX
);
5293 gen_op_movl_T1_im(val
);
5294 gen_op_testl_T0_T1_cc();
5295 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
5298 case 0x98: /* CWDE/CBW */
5299 #ifdef TARGET_X86_64
5301 gen_op_mov_TN_reg(OT_LONG
, 0, R_EAX
);
5302 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
5303 gen_op_mov_reg_T0(OT_QUAD
, R_EAX
);
5307 gen_op_mov_TN_reg(OT_WORD
, 0, R_EAX
);
5308 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
5309 gen_op_mov_reg_T0(OT_LONG
, R_EAX
);
5311 gen_op_mov_TN_reg(OT_BYTE
, 0, R_EAX
);
5312 tcg_gen_ext8s_tl(cpu_T
[0], cpu_T
[0]);
5313 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
5316 case 0x99: /* CDQ/CWD */
5317 #ifdef TARGET_X86_64
5319 gen_op_mov_TN_reg(OT_QUAD
, 0, R_EAX
);
5320 tcg_gen_sari_tl(cpu_T
[0], cpu_T
[0], 63);
5321 gen_op_mov_reg_T0(OT_QUAD
, R_EDX
);
5325 gen_op_mov_TN_reg(OT_LONG
, 0, R_EAX
);
5326 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
5327 tcg_gen_sari_tl(cpu_T
[0], cpu_T
[0], 31);
5328 gen_op_mov_reg_T0(OT_LONG
, R_EDX
);
5330 gen_op_mov_TN_reg(OT_WORD
, 0, R_EAX
);
5331 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
5332 tcg_gen_sari_tl(cpu_T
[0], cpu_T
[0], 15);
5333 gen_op_mov_reg_T0(OT_WORD
, R_EDX
);
5336 case 0x1af: /* imul Gv, Ev */
5337 case 0x69: /* imul Gv, Ev, I */
5339 ot
= dflag
+ OT_WORD
;
5340 modrm
= cpu_ldub_code(env
, s
->pc
++);
5341 reg
= ((modrm
>> 3) & 7) | rex_r
;
5343 s
->rip_offset
= insn_const_size(ot
);
5346 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
5348 val
= insn_get(env
, s
, ot
);
5349 gen_op_movl_T1_im(val
);
5350 } else if (b
== 0x6b) {
5351 val
= (int8_t)insn_get(env
, s
, OT_BYTE
);
5352 gen_op_movl_T1_im(val
);
5354 gen_op_mov_TN_reg(ot
, 1, reg
);
5357 #ifdef TARGET_X86_64
5359 tcg_gen_muls2_i64(cpu_regs
[reg
], cpu_T
[1], cpu_T
[0], cpu_T
[1]);
5360 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[reg
]);
5361 tcg_gen_sari_tl(cpu_cc_src
, cpu_cc_dst
, 63);
5362 tcg_gen_sub_tl(cpu_cc_src
, cpu_cc_src
, cpu_T
[1]);
5366 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5367 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T
[1]);
5368 tcg_gen_muls2_i32(cpu_tmp2_i32
, cpu_tmp3_i32
,
5369 cpu_tmp2_i32
, cpu_tmp3_i32
);
5370 tcg_gen_extu_i32_tl(cpu_regs
[reg
], cpu_tmp2_i32
);
5371 tcg_gen_sari_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 31);
5372 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[reg
]);
5373 tcg_gen_sub_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, cpu_tmp3_i32
);
5374 tcg_gen_extu_i32_tl(cpu_cc_src
, cpu_tmp2_i32
);
5377 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
5378 tcg_gen_ext16s_tl(cpu_T
[1], cpu_T
[1]);
5379 /* XXX: use 32 bit mul which could be faster */
5380 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
5381 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
5382 tcg_gen_ext16s_tl(cpu_tmp0
, cpu_T
[0]);
5383 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
5384 gen_op_mov_reg_T0(ot
, reg
);
5387 set_cc_op(s
, CC_OP_MULB
+ ot
);
5390 case 0x1c1: /* xadd Ev, Gv */
5394 ot
= dflag
+ OT_WORD
;
5395 modrm
= cpu_ldub_code(env
, s
->pc
++);
5396 reg
= ((modrm
>> 3) & 7) | rex_r
;
5397 mod
= (modrm
>> 6) & 3;
5399 rm
= (modrm
& 7) | REX_B(s
);
5400 gen_op_mov_TN_reg(ot
, 0, reg
);
5401 gen_op_mov_TN_reg(ot
, 1, rm
);
5402 gen_op_addl_T0_T1();
5403 gen_op_mov_reg_T1(ot
, reg
);
5404 gen_op_mov_reg_T0(ot
, rm
);
5406 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5407 gen_op_mov_TN_reg(ot
, 0, reg
);
5408 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
5409 gen_op_addl_T0_T1();
5410 gen_op_st_T0_A0(ot
+ s
->mem_index
);
5411 gen_op_mov_reg_T1(ot
, reg
);
5413 gen_op_update2_cc();
5414 set_cc_op(s
, CC_OP_ADDB
+ ot
);
5417 case 0x1b1: /* cmpxchg Ev, Gv */
5420 TCGv t0
, t1
, t2
, a0
;
5425 ot
= dflag
+ OT_WORD
;
5426 modrm
= cpu_ldub_code(env
, s
->pc
++);
5427 reg
= ((modrm
>> 3) & 7) | rex_r
;
5428 mod
= (modrm
>> 6) & 3;
5429 t0
= tcg_temp_local_new();
5430 t1
= tcg_temp_local_new();
5431 t2
= tcg_temp_local_new();
5432 a0
= tcg_temp_local_new();
5433 gen_op_mov_v_reg(ot
, t1
, reg
);
5435 rm
= (modrm
& 7) | REX_B(s
);
5436 gen_op_mov_v_reg(ot
, t0
, rm
);
5438 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5439 tcg_gen_mov_tl(a0
, cpu_A0
);
5440 gen_op_ld_v(ot
+ s
->mem_index
, t0
, a0
);
5441 rm
= 0; /* avoid warning */
5443 label1
= gen_new_label();
5444 tcg_gen_mov_tl(t2
, cpu_regs
[R_EAX
]);
5447 tcg_gen_brcond_tl(TCG_COND_EQ
, t2
, t0
, label1
);
5448 label2
= gen_new_label();
5450 gen_op_mov_reg_v(ot
, R_EAX
, t0
);
5452 gen_set_label(label1
);
5453 gen_op_mov_reg_v(ot
, rm
, t1
);
5455 /* perform no-op store cycle like physical cpu; must be
5456 before changing accumulator to ensure idempotency if
5457 the store faults and the instruction is restarted */
5458 gen_op_st_v(ot
+ s
->mem_index
, t0
, a0
);
5459 gen_op_mov_reg_v(ot
, R_EAX
, t0
);
5461 gen_set_label(label1
);
5462 gen_op_st_v(ot
+ s
->mem_index
, t1
, a0
);
5464 gen_set_label(label2
);
5465 tcg_gen_mov_tl(cpu_cc_src
, t0
);
5466 tcg_gen_mov_tl(cpu_cc_srcT
, t2
);
5467 tcg_gen_sub_tl(cpu_cc_dst
, t2
, t0
);
5468 set_cc_op(s
, CC_OP_SUBB
+ ot
);
5475 case 0x1c7: /* cmpxchg8b */
5476 modrm
= cpu_ldub_code(env
, s
->pc
++);
5477 mod
= (modrm
>> 6) & 3;
5478 if ((mod
== 3) || ((modrm
& 0x38) != 0x8))
5480 #ifdef TARGET_X86_64
5482 if (!(s
->cpuid_ext_features
& CPUID_EXT_CX16
))
5484 gen_jmp_im(pc_start
- s
->cs_base
);
5485 gen_update_cc_op(s
);
5486 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5487 gen_helper_cmpxchg16b(cpu_env
, cpu_A0
);
5491 if (!(s
->cpuid_features
& CPUID_CX8
))
5493 gen_jmp_im(pc_start
- s
->cs_base
);
5494 gen_update_cc_op(s
);
5495 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5496 gen_helper_cmpxchg8b(cpu_env
, cpu_A0
);
5498 set_cc_op(s
, CC_OP_EFLAGS
);
5501 /**************************/
5503 case 0x50 ... 0x57: /* push */
5504 gen_op_mov_TN_reg(OT_LONG
, 0, (b
& 7) | REX_B(s
));
5507 case 0x58 ... 0x5f: /* pop */
5509 ot
= dflag
? OT_QUAD
: OT_WORD
;
5511 ot
= dflag
+ OT_WORD
;
5514 /* NOTE: order is important for pop %sp */
5516 gen_op_mov_reg_T0(ot
, (b
& 7) | REX_B(s
));
5518 case 0x60: /* pusha */
5523 case 0x61: /* popa */
5528 case 0x68: /* push Iv */
5531 ot
= dflag
? OT_QUAD
: OT_WORD
;
5533 ot
= dflag
+ OT_WORD
;
5536 val
= insn_get(env
, s
, ot
);
5538 val
= (int8_t)insn_get(env
, s
, OT_BYTE
);
5539 gen_op_movl_T0_im(val
);
5542 case 0x8f: /* pop Ev */
5544 ot
= dflag
? OT_QUAD
: OT_WORD
;
5546 ot
= dflag
+ OT_WORD
;
5548 modrm
= cpu_ldub_code(env
, s
->pc
++);
5549 mod
= (modrm
>> 6) & 3;
5552 /* NOTE: order is important for pop %sp */
5554 rm
= (modrm
& 7) | REX_B(s
);
5555 gen_op_mov_reg_T0(ot
, rm
);
5557 /* NOTE: order is important too for MMU exceptions */
5558 s
->popl_esp_hack
= 1 << ot
;
5559 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
5560 s
->popl_esp_hack
= 0;
5564 case 0xc8: /* enter */
5567 val
= cpu_lduw_code(env
, s
->pc
);
5569 level
= cpu_ldub_code(env
, s
->pc
++);
5570 gen_enter(s
, val
, level
);
5573 case 0xc9: /* leave */
5574 /* XXX: exception not precise (ESP is updated before potential exception) */
5576 gen_op_mov_TN_reg(OT_QUAD
, 0, R_EBP
);
5577 gen_op_mov_reg_T0(OT_QUAD
, R_ESP
);
5578 } else if (s
->ss32
) {
5579 gen_op_mov_TN_reg(OT_LONG
, 0, R_EBP
);
5580 gen_op_mov_reg_T0(OT_LONG
, R_ESP
);
5582 gen_op_mov_TN_reg(OT_WORD
, 0, R_EBP
);
5583 gen_op_mov_reg_T0(OT_WORD
, R_ESP
);
5587 ot
= dflag
? OT_QUAD
: OT_WORD
;
5589 ot
= dflag
+ OT_WORD
;
5591 gen_op_mov_reg_T0(ot
, R_EBP
);
5594 case 0x06: /* push es */
5595 case 0x0e: /* push cs */
5596 case 0x16: /* push ss */
5597 case 0x1e: /* push ds */
5600 gen_op_movl_T0_seg(b
>> 3);
5603 case 0x1a0: /* push fs */
5604 case 0x1a8: /* push gs */
5605 gen_op_movl_T0_seg((b
>> 3) & 7);
5608 case 0x07: /* pop es */
5609 case 0x17: /* pop ss */
5610 case 0x1f: /* pop ds */
5615 gen_movl_seg_T0(s
, reg
, pc_start
- s
->cs_base
);
5618 /* if reg == SS, inhibit interrupts/trace. */
5619 /* If several instructions disable interrupts, only the
5621 if (!(s
->tb
->flags
& HF_INHIBIT_IRQ_MASK
))
5622 gen_helper_set_inhibit_irq(cpu_env
);
5626 gen_jmp_im(s
->pc
- s
->cs_base
);
5630 case 0x1a1: /* pop fs */
5631 case 0x1a9: /* pop gs */
5633 gen_movl_seg_T0(s
, (b
>> 3) & 7, pc_start
- s
->cs_base
);
5636 gen_jmp_im(s
->pc
- s
->cs_base
);
5641 /**************************/
5644 case 0x89: /* mov Gv, Ev */
5648 ot
= dflag
+ OT_WORD
;
5649 modrm
= cpu_ldub_code(env
, s
->pc
++);
5650 reg
= ((modrm
>> 3) & 7) | rex_r
;
5652 /* generate a generic store */
5653 gen_ldst_modrm(env
, s
, modrm
, ot
, reg
, 1);
5656 case 0xc7: /* mov Ev, Iv */
5660 ot
= dflag
+ OT_WORD
;
5661 modrm
= cpu_ldub_code(env
, s
->pc
++);
5662 mod
= (modrm
>> 6) & 3;
5664 s
->rip_offset
= insn_const_size(ot
);
5665 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5667 val
= insn_get(env
, s
, ot
);
5668 gen_op_movl_T0_im(val
);
5670 gen_op_st_T0_A0(ot
+ s
->mem_index
);
5672 gen_op_mov_reg_T0(ot
, (modrm
& 7) | REX_B(s
));
5675 case 0x8b: /* mov Ev, Gv */
5679 ot
= OT_WORD
+ dflag
;
5680 modrm
= cpu_ldub_code(env
, s
->pc
++);
5681 reg
= ((modrm
>> 3) & 7) | rex_r
;
5683 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
5684 gen_op_mov_reg_T0(ot
, reg
);
5686 case 0x8e: /* mov seg, Gv */
5687 modrm
= cpu_ldub_code(env
, s
->pc
++);
5688 reg
= (modrm
>> 3) & 7;
5689 if (reg
>= 6 || reg
== R_CS
)
5691 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 0);
5692 gen_movl_seg_T0(s
, reg
, pc_start
- s
->cs_base
);
5694 /* if reg == SS, inhibit interrupts/trace */
5695 /* If several instructions disable interrupts, only the
5697 if (!(s
->tb
->flags
& HF_INHIBIT_IRQ_MASK
))
5698 gen_helper_set_inhibit_irq(cpu_env
);
5702 gen_jmp_im(s
->pc
- s
->cs_base
);
5706 case 0x8c: /* mov Gv, seg */
5707 modrm
= cpu_ldub_code(env
, s
->pc
++);
5708 reg
= (modrm
>> 3) & 7;
5709 mod
= (modrm
>> 6) & 3;
5712 gen_op_movl_T0_seg(reg
);
5714 ot
= OT_WORD
+ dflag
;
5717 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
5720 case 0x1b6: /* movzbS Gv, Eb */
5721 case 0x1b7: /* movzwS Gv, Eb */
5722 case 0x1be: /* movsbS Gv, Eb */
5723 case 0x1bf: /* movswS Gv, Eb */
5726 /* d_ot is the size of destination */
5727 d_ot
= dflag
+ OT_WORD
;
5728 /* ot is the size of source */
5729 ot
= (b
& 1) + OT_BYTE
;
5730 modrm
= cpu_ldub_code(env
, s
->pc
++);
5731 reg
= ((modrm
>> 3) & 7) | rex_r
;
5732 mod
= (modrm
>> 6) & 3;
5733 rm
= (modrm
& 7) | REX_B(s
);
5736 gen_op_mov_TN_reg(ot
, 0, rm
);
5737 switch(ot
| (b
& 8)) {
5739 tcg_gen_ext8u_tl(cpu_T
[0], cpu_T
[0]);
5742 tcg_gen_ext8s_tl(cpu_T
[0], cpu_T
[0]);
5745 tcg_gen_ext16u_tl(cpu_T
[0], cpu_T
[0]);
5749 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
5752 gen_op_mov_reg_T0(d_ot
, reg
);
5754 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5756 gen_op_lds_T0_A0(ot
+ s
->mem_index
);
5758 gen_op_ldu_T0_A0(ot
+ s
->mem_index
);
5760 gen_op_mov_reg_T0(d_ot
, reg
);
5765 case 0x8d: /* lea */
5766 ot
= dflag
+ OT_WORD
;
5767 modrm
= cpu_ldub_code(env
, s
->pc
++);
5768 mod
= (modrm
>> 6) & 3;
5771 reg
= ((modrm
>> 3) & 7) | rex_r
;
5772 /* we must ensure that no segment is added */
5776 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5778 gen_op_mov_reg_A0(ot
- OT_WORD
, reg
);
5781 case 0xa0: /* mov EAX, Ov */
5783 case 0xa2: /* mov Ov, EAX */
5786 target_ulong offset_addr
;
5791 ot
= dflag
+ OT_WORD
;
5792 #ifdef TARGET_X86_64
5793 if (s
->aflag
== 2) {
5794 offset_addr
= cpu_ldq_code(env
, s
->pc
);
5796 gen_op_movq_A0_im(offset_addr
);
5801 offset_addr
= insn_get(env
, s
, OT_LONG
);
5803 offset_addr
= insn_get(env
, s
, OT_WORD
);
5805 gen_op_movl_A0_im(offset_addr
);
5807 gen_add_A0_ds_seg(s
);
5809 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
5810 gen_op_mov_reg_T0(ot
, R_EAX
);
5812 gen_op_mov_TN_reg(ot
, 0, R_EAX
);
5813 gen_op_st_T0_A0(ot
+ s
->mem_index
);
5817 case 0xd7: /* xlat */
5818 #ifdef TARGET_X86_64
5819 if (s
->aflag
== 2) {
5820 gen_op_movq_A0_reg(R_EBX
);
5821 gen_op_mov_TN_reg(OT_QUAD
, 0, R_EAX
);
5822 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 0xff);
5823 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_T
[0]);
5827 gen_op_movl_A0_reg(R_EBX
);
5828 gen_op_mov_TN_reg(OT_LONG
, 0, R_EAX
);
5829 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 0xff);
5830 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_T
[0]);
5832 gen_op_andl_A0_ffff();
5834 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffffffff);
5836 gen_add_A0_ds_seg(s
);
5837 gen_op_ldu_T0_A0(OT_BYTE
+ s
->mem_index
);
5838 gen_op_mov_reg_T0(OT_BYTE
, R_EAX
);
5840 case 0xb0 ... 0xb7: /* mov R, Ib */
5841 val
= insn_get(env
, s
, OT_BYTE
);
5842 gen_op_movl_T0_im(val
);
5843 gen_op_mov_reg_T0(OT_BYTE
, (b
& 7) | REX_B(s
));
5845 case 0xb8 ... 0xbf: /* mov R, Iv */
5846 #ifdef TARGET_X86_64
5850 tmp
= cpu_ldq_code(env
, s
->pc
);
5852 reg
= (b
& 7) | REX_B(s
);
5853 gen_movtl_T0_im(tmp
);
5854 gen_op_mov_reg_T0(OT_QUAD
, reg
);
5858 ot
= dflag
? OT_LONG
: OT_WORD
;
5859 val
= insn_get(env
, s
, ot
);
5860 reg
= (b
& 7) | REX_B(s
);
5861 gen_op_movl_T0_im(val
);
5862 gen_op_mov_reg_T0(ot
, reg
);
5866 case 0x91 ... 0x97: /* xchg R, EAX */
5868 ot
= dflag
+ OT_WORD
;
5869 reg
= (b
& 7) | REX_B(s
);
5873 case 0x87: /* xchg Ev, Gv */
5877 ot
= dflag
+ OT_WORD
;
5878 modrm
= cpu_ldub_code(env
, s
->pc
++);
5879 reg
= ((modrm
>> 3) & 7) | rex_r
;
5880 mod
= (modrm
>> 6) & 3;
5882 rm
= (modrm
& 7) | REX_B(s
);
5884 gen_op_mov_TN_reg(ot
, 0, reg
);
5885 gen_op_mov_TN_reg(ot
, 1, rm
);
5886 gen_op_mov_reg_T0(ot
, rm
);
5887 gen_op_mov_reg_T1(ot
, reg
);
5889 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5890 gen_op_mov_TN_reg(ot
, 0, reg
);
5891 /* for xchg, lock is implicit */
5892 if (!(prefixes
& PREFIX_LOCK
))
5894 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
5895 gen_op_st_T0_A0(ot
+ s
->mem_index
);
5896 if (!(prefixes
& PREFIX_LOCK
))
5897 gen_helper_unlock();
5898 gen_op_mov_reg_T1(ot
, reg
);
5901 case 0xc4: /* les Gv */
5902 /* In CODE64 this is VEX3; see above. */
5905 case 0xc5: /* lds Gv */
5906 /* In CODE64 this is VEX2; see above. */
5909 case 0x1b2: /* lss Gv */
5912 case 0x1b4: /* lfs Gv */
5915 case 0x1b5: /* lgs Gv */
5918 ot
= dflag
? OT_LONG
: OT_WORD
;
5919 modrm
= cpu_ldub_code(env
, s
->pc
++);
5920 reg
= ((modrm
>> 3) & 7) | rex_r
;
5921 mod
= (modrm
>> 6) & 3;
5924 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5925 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
5926 gen_add_A0_im(s
, 1 << (ot
- OT_WORD
+ 1));
5927 /* load the segment first to handle exceptions properly */
5928 gen_op_ldu_T0_A0(OT_WORD
+ s
->mem_index
);
5929 gen_movl_seg_T0(s
, op
, pc_start
- s
->cs_base
);
5930 /* then put the data */
5931 gen_op_mov_reg_T1(ot
, reg
);
5933 gen_jmp_im(s
->pc
- s
->cs_base
);
5938 /************************/
5949 ot
= dflag
+ OT_WORD
;
5951 modrm
= cpu_ldub_code(env
, s
->pc
++);
5952 mod
= (modrm
>> 6) & 3;
5953 op
= (modrm
>> 3) & 7;
5959 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5962 opreg
= (modrm
& 7) | REX_B(s
);
5967 gen_shift(s
, op
, ot
, opreg
, OR_ECX
);
5970 shift
= cpu_ldub_code(env
, s
->pc
++);
5972 gen_shifti(s
, op
, ot
, opreg
, shift
);
5987 case 0x1a4: /* shld imm */
5991 case 0x1a5: /* shld cl */
5995 case 0x1ac: /* shrd imm */
5999 case 0x1ad: /* shrd cl */
6003 ot
= dflag
+ OT_WORD
;
6004 modrm
= cpu_ldub_code(env
, s
->pc
++);
6005 mod
= (modrm
>> 6) & 3;
6006 rm
= (modrm
& 7) | REX_B(s
);
6007 reg
= ((modrm
>> 3) & 7) | rex_r
;
6009 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
6014 gen_op_mov_TN_reg(ot
, 1, reg
);
6017 TCGv imm
= tcg_const_tl(cpu_ldub_code(env
, s
->pc
++));
6018 gen_shiftd_rm_T1(s
, ot
, opreg
, op
, imm
);
6021 gen_shiftd_rm_T1(s
, ot
, opreg
, op
, cpu_regs
[R_ECX
]);
6025 /************************/
6028 if (s
->flags
& (HF_EM_MASK
| HF_TS_MASK
)) {
6029 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
6030 /* XXX: what to do if illegal op ? */
6031 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
6034 modrm
= cpu_ldub_code(env
, s
->pc
++);
6035 mod
= (modrm
>> 6) & 3;
6037 op
= ((b
& 7) << 3) | ((modrm
>> 3) & 7);
6040 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
6042 case 0x00 ... 0x07: /* fxxxs */
6043 case 0x10 ... 0x17: /* fixxxl */
6044 case 0x20 ... 0x27: /* fxxxl */
6045 case 0x30 ... 0x37: /* fixxx */
6052 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
6053 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6054 gen_helper_flds_FT0(cpu_env
, cpu_tmp2_i32
);
6057 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
6058 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6059 gen_helper_fildl_FT0(cpu_env
, cpu_tmp2_i32
);
6062 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
,
6063 (s
->mem_index
>> 2) - 1);
6064 gen_helper_fldl_FT0(cpu_env
, cpu_tmp1_i64
);
6068 gen_op_lds_T0_A0(OT_WORD
+ s
->mem_index
);
6069 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6070 gen_helper_fildl_FT0(cpu_env
, cpu_tmp2_i32
);
6074 gen_helper_fp_arith_ST0_FT0(op1
);
6076 /* fcomp needs pop */
6077 gen_helper_fpop(cpu_env
);
6081 case 0x08: /* flds */
6082 case 0x0a: /* fsts */
6083 case 0x0b: /* fstps */
6084 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
6085 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
6086 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
6091 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
6092 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6093 gen_helper_flds_ST0(cpu_env
, cpu_tmp2_i32
);
6096 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
6097 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6098 gen_helper_fildl_ST0(cpu_env
, cpu_tmp2_i32
);
6101 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
,
6102 (s
->mem_index
>> 2) - 1);
6103 gen_helper_fldl_ST0(cpu_env
, cpu_tmp1_i64
);
6107 gen_op_lds_T0_A0(OT_WORD
+ s
->mem_index
);
6108 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6109 gen_helper_fildl_ST0(cpu_env
, cpu_tmp2_i32
);
6114 /* XXX: the corresponding CPUID bit must be tested ! */
6117 gen_helper_fisttl_ST0(cpu_tmp2_i32
, cpu_env
);
6118 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
6119 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
6122 gen_helper_fisttll_ST0(cpu_tmp1_i64
, cpu_env
);
6123 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
,
6124 (s
->mem_index
>> 2) - 1);
6128 gen_helper_fistt_ST0(cpu_tmp2_i32
, cpu_env
);
6129 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
6130 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
6133 gen_helper_fpop(cpu_env
);
6138 gen_helper_fsts_ST0(cpu_tmp2_i32
, cpu_env
);
6139 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
6140 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
6143 gen_helper_fistl_ST0(cpu_tmp2_i32
, cpu_env
);
6144 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
6145 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
6148 gen_helper_fstl_ST0(cpu_tmp1_i64
, cpu_env
);
6149 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
,
6150 (s
->mem_index
>> 2) - 1);
6154 gen_helper_fist_ST0(cpu_tmp2_i32
, cpu_env
);
6155 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
6156 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
6160 gen_helper_fpop(cpu_env
);
6164 case 0x0c: /* fldenv mem */
6165 gen_update_cc_op(s
);
6166 gen_jmp_im(pc_start
- s
->cs_base
);
6167 gen_helper_fldenv(cpu_env
, cpu_A0
, tcg_const_i32(s
->dflag
));
6169 case 0x0d: /* fldcw mem */
6170 gen_op_ld_T0_A0(OT_WORD
+ s
->mem_index
);
6171 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6172 gen_helper_fldcw(cpu_env
, cpu_tmp2_i32
);
6174 case 0x0e: /* fnstenv mem */
6175 gen_update_cc_op(s
);
6176 gen_jmp_im(pc_start
- s
->cs_base
);
6177 gen_helper_fstenv(cpu_env
, cpu_A0
, tcg_const_i32(s
->dflag
));
6179 case 0x0f: /* fnstcw mem */
6180 gen_helper_fnstcw(cpu_tmp2_i32
, cpu_env
);
6181 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
6182 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
6184 case 0x1d: /* fldt mem */
6185 gen_update_cc_op(s
);
6186 gen_jmp_im(pc_start
- s
->cs_base
);
6187 gen_helper_fldt_ST0(cpu_env
, cpu_A0
);
6189 case 0x1f: /* fstpt mem */
6190 gen_update_cc_op(s
);
6191 gen_jmp_im(pc_start
- s
->cs_base
);
6192 gen_helper_fstt_ST0(cpu_env
, cpu_A0
);
6193 gen_helper_fpop(cpu_env
);
6195 case 0x2c: /* frstor mem */
6196 gen_update_cc_op(s
);
6197 gen_jmp_im(pc_start
- s
->cs_base
);
6198 gen_helper_frstor(cpu_env
, cpu_A0
, tcg_const_i32(s
->dflag
));
6200 case 0x2e: /* fnsave mem */
6201 gen_update_cc_op(s
);
6202 gen_jmp_im(pc_start
- s
->cs_base
);
6203 gen_helper_fsave(cpu_env
, cpu_A0
, tcg_const_i32(s
->dflag
));
6205 case 0x2f: /* fnstsw mem */
6206 gen_helper_fnstsw(cpu_tmp2_i32
, cpu_env
);
6207 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
6208 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
6210 case 0x3c: /* fbld */
6211 gen_update_cc_op(s
);
6212 gen_jmp_im(pc_start
- s
->cs_base
);
6213 gen_helper_fbld_ST0(cpu_env
, cpu_A0
);
6215 case 0x3e: /* fbstp */
6216 gen_update_cc_op(s
);
6217 gen_jmp_im(pc_start
- s
->cs_base
);
6218 gen_helper_fbst_ST0(cpu_env
, cpu_A0
);
6219 gen_helper_fpop(cpu_env
);
6221 case 0x3d: /* fildll */
6222 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
,
6223 (s
->mem_index
>> 2) - 1);
6224 gen_helper_fildll_ST0(cpu_env
, cpu_tmp1_i64
);
6226 case 0x3f: /* fistpll */
6227 gen_helper_fistll_ST0(cpu_tmp1_i64
, cpu_env
);
6228 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
,
6229 (s
->mem_index
>> 2) - 1);
6230 gen_helper_fpop(cpu_env
);
6236 /* register float ops */
6240 case 0x08: /* fld sti */
6241 gen_helper_fpush(cpu_env
);
6242 gen_helper_fmov_ST0_STN(cpu_env
,
6243 tcg_const_i32((opreg
+ 1) & 7));
6245 case 0x09: /* fxchg sti */
6246 case 0x29: /* fxchg4 sti, undocumented op */
6247 case 0x39: /* fxchg7 sti, undocumented op */
6248 gen_helper_fxchg_ST0_STN(cpu_env
, tcg_const_i32(opreg
));
6250 case 0x0a: /* grp d9/2 */
6253 /* check exceptions (FreeBSD FPU probe) */
6254 gen_update_cc_op(s
);
6255 gen_jmp_im(pc_start
- s
->cs_base
);
6256 gen_helper_fwait(cpu_env
);
6262 case 0x0c: /* grp d9/4 */
6265 gen_helper_fchs_ST0(cpu_env
);
6268 gen_helper_fabs_ST0(cpu_env
);
6271 gen_helper_fldz_FT0(cpu_env
);
6272 gen_helper_fcom_ST0_FT0(cpu_env
);
6275 gen_helper_fxam_ST0(cpu_env
);
6281 case 0x0d: /* grp d9/5 */
6285 gen_helper_fpush(cpu_env
);
6286 gen_helper_fld1_ST0(cpu_env
);
6289 gen_helper_fpush(cpu_env
);
6290 gen_helper_fldl2t_ST0(cpu_env
);
6293 gen_helper_fpush(cpu_env
);
6294 gen_helper_fldl2e_ST0(cpu_env
);
6297 gen_helper_fpush(cpu_env
);
6298 gen_helper_fldpi_ST0(cpu_env
);
6301 gen_helper_fpush(cpu_env
);
6302 gen_helper_fldlg2_ST0(cpu_env
);
6305 gen_helper_fpush(cpu_env
);
6306 gen_helper_fldln2_ST0(cpu_env
);
6309 gen_helper_fpush(cpu_env
);
6310 gen_helper_fldz_ST0(cpu_env
);
6317 case 0x0e: /* grp d9/6 */
6320 gen_helper_f2xm1(cpu_env
);
6323 gen_helper_fyl2x(cpu_env
);
6326 gen_helper_fptan(cpu_env
);
6328 case 3: /* fpatan */
6329 gen_helper_fpatan(cpu_env
);
6331 case 4: /* fxtract */
6332 gen_helper_fxtract(cpu_env
);
6334 case 5: /* fprem1 */
6335 gen_helper_fprem1(cpu_env
);
6337 case 6: /* fdecstp */
6338 gen_helper_fdecstp(cpu_env
);
6341 case 7: /* fincstp */
6342 gen_helper_fincstp(cpu_env
);
6346 case 0x0f: /* grp d9/7 */
6349 gen_helper_fprem(cpu_env
);
6351 case 1: /* fyl2xp1 */
6352 gen_helper_fyl2xp1(cpu_env
);
6355 gen_helper_fsqrt(cpu_env
);
6357 case 3: /* fsincos */
6358 gen_helper_fsincos(cpu_env
);
6360 case 5: /* fscale */
6361 gen_helper_fscale(cpu_env
);
6363 case 4: /* frndint */
6364 gen_helper_frndint(cpu_env
);
6367 gen_helper_fsin(cpu_env
);
6371 gen_helper_fcos(cpu_env
);
6375 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
6376 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
6377 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
6383 gen_helper_fp_arith_STN_ST0(op1
, opreg
);
6385 gen_helper_fpop(cpu_env
);
6387 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6388 gen_helper_fp_arith_ST0_FT0(op1
);
6392 case 0x02: /* fcom */
6393 case 0x22: /* fcom2, undocumented op */
6394 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6395 gen_helper_fcom_ST0_FT0(cpu_env
);
6397 case 0x03: /* fcomp */
6398 case 0x23: /* fcomp3, undocumented op */
6399 case 0x32: /* fcomp5, undocumented op */
6400 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6401 gen_helper_fcom_ST0_FT0(cpu_env
);
6402 gen_helper_fpop(cpu_env
);
6404 case 0x15: /* da/5 */
6406 case 1: /* fucompp */
6407 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(1));
6408 gen_helper_fucom_ST0_FT0(cpu_env
);
6409 gen_helper_fpop(cpu_env
);
6410 gen_helper_fpop(cpu_env
);
6418 case 0: /* feni (287 only, just do nop here) */
6420 case 1: /* fdisi (287 only, just do nop here) */
6423 gen_helper_fclex(cpu_env
);
6425 case 3: /* fninit */
6426 gen_helper_fninit(cpu_env
);
6428 case 4: /* fsetpm (287 only, just do nop here) */
6434 case 0x1d: /* fucomi */
6435 gen_update_cc_op(s
);
6436 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6437 gen_helper_fucomi_ST0_FT0(cpu_env
);
6438 set_cc_op(s
, CC_OP_EFLAGS
);
6440 case 0x1e: /* fcomi */
6441 gen_update_cc_op(s
);
6442 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6443 gen_helper_fcomi_ST0_FT0(cpu_env
);
6444 set_cc_op(s
, CC_OP_EFLAGS
);
6446 case 0x28: /* ffree sti */
6447 gen_helper_ffree_STN(cpu_env
, tcg_const_i32(opreg
));
6449 case 0x2a: /* fst sti */
6450 gen_helper_fmov_STN_ST0(cpu_env
, tcg_const_i32(opreg
));
6452 case 0x2b: /* fstp sti */
6453 case 0x0b: /* fstp1 sti, undocumented op */
6454 case 0x3a: /* fstp8 sti, undocumented op */
6455 case 0x3b: /* fstp9 sti, undocumented op */
6456 gen_helper_fmov_STN_ST0(cpu_env
, tcg_const_i32(opreg
));
6457 gen_helper_fpop(cpu_env
);
6459 case 0x2c: /* fucom st(i) */
6460 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6461 gen_helper_fucom_ST0_FT0(cpu_env
);
6463 case 0x2d: /* fucomp st(i) */
6464 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6465 gen_helper_fucom_ST0_FT0(cpu_env
);
6466 gen_helper_fpop(cpu_env
);
6468 case 0x33: /* de/3 */
6470 case 1: /* fcompp */
6471 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(1));
6472 gen_helper_fcom_ST0_FT0(cpu_env
);
6473 gen_helper_fpop(cpu_env
);
6474 gen_helper_fpop(cpu_env
);
6480 case 0x38: /* ffreep sti, undocumented op */
6481 gen_helper_ffree_STN(cpu_env
, tcg_const_i32(opreg
));
6482 gen_helper_fpop(cpu_env
);
6484 case 0x3c: /* df/4 */
6487 gen_helper_fnstsw(cpu_tmp2_i32
, cpu_env
);
6488 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
6489 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
6495 case 0x3d: /* fucomip */
6496 gen_update_cc_op(s
);
6497 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6498 gen_helper_fucomi_ST0_FT0(cpu_env
);
6499 gen_helper_fpop(cpu_env
);
6500 set_cc_op(s
, CC_OP_EFLAGS
);
6502 case 0x3e: /* fcomip */
6503 gen_update_cc_op(s
);
6504 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6505 gen_helper_fcomi_ST0_FT0(cpu_env
);
6506 gen_helper_fpop(cpu_env
);
6507 set_cc_op(s
, CC_OP_EFLAGS
);
6509 case 0x10 ... 0x13: /* fcmovxx */
6513 static const uint8_t fcmov_cc
[8] = {
6519 op1
= fcmov_cc
[op
& 3] | (((op
>> 3) & 1) ^ 1);
6520 l1
= gen_new_label();
6521 gen_jcc1_noeob(s
, op1
, l1
);
6522 gen_helper_fmov_ST0_STN(cpu_env
, tcg_const_i32(opreg
));
6531 /************************/
6534 case 0xa4: /* movsS */
6539 ot
= dflag
+ OT_WORD
;
6541 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6542 gen_repz_movs(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6548 case 0xaa: /* stosS */
6553 ot
= dflag
+ OT_WORD
;
6555 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6556 gen_repz_stos(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6561 case 0xac: /* lodsS */
6566 ot
= dflag
+ OT_WORD
;
6567 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6568 gen_repz_lods(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6573 case 0xae: /* scasS */
6578 ot
= dflag
+ OT_WORD
;
6579 if (prefixes
& PREFIX_REPNZ
) {
6580 gen_repz_scas(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 1);
6581 } else if (prefixes
& PREFIX_REPZ
) {
6582 gen_repz_scas(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 0);
6588 case 0xa6: /* cmpsS */
6593 ot
= dflag
+ OT_WORD
;
6594 if (prefixes
& PREFIX_REPNZ
) {
6595 gen_repz_cmps(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 1);
6596 } else if (prefixes
& PREFIX_REPZ
) {
6597 gen_repz_cmps(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 0);
6602 case 0x6c: /* insS */
6607 ot
= dflag
? OT_LONG
: OT_WORD
;
6608 gen_op_mov_TN_reg(OT_WORD
, 0, R_EDX
);
6609 gen_op_andl_T0_ffff();
6610 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6611 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
) | 4);
6612 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6613 gen_repz_ins(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6617 gen_jmp(s
, s
->pc
- s
->cs_base
);
6621 case 0x6e: /* outsS */
6626 ot
= dflag
? OT_LONG
: OT_WORD
;
6627 gen_op_mov_TN_reg(OT_WORD
, 0, R_EDX
);
6628 gen_op_andl_T0_ffff();
6629 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6630 svm_is_rep(prefixes
) | 4);
6631 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6632 gen_repz_outs(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6636 gen_jmp(s
, s
->pc
- s
->cs_base
);
6641 /************************/
6649 ot
= dflag
? OT_LONG
: OT_WORD
;
6650 val
= cpu_ldub_code(env
, s
->pc
++);
6651 gen_op_movl_T0_im(val
);
6652 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6653 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
));
6656 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6657 gen_helper_in_func(ot
, cpu_T
[1], cpu_tmp2_i32
);
6658 gen_op_mov_reg_T1(ot
, R_EAX
);
6661 gen_jmp(s
, s
->pc
- s
->cs_base
);
6669 ot
= dflag
? OT_LONG
: OT_WORD
;
6670 val
= cpu_ldub_code(env
, s
->pc
++);
6671 gen_op_movl_T0_im(val
);
6672 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6673 svm_is_rep(prefixes
));
6674 gen_op_mov_TN_reg(ot
, 1, R_EAX
);
6678 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6679 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T
[1]);
6680 gen_helper_out_func(ot
, cpu_tmp2_i32
, cpu_tmp3_i32
);
6683 gen_jmp(s
, s
->pc
- s
->cs_base
);
6691 ot
= dflag
? OT_LONG
: OT_WORD
;
6692 gen_op_mov_TN_reg(OT_WORD
, 0, R_EDX
);
6693 gen_op_andl_T0_ffff();
6694 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6695 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
));
6698 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6699 gen_helper_in_func(ot
, cpu_T
[1], cpu_tmp2_i32
);
6700 gen_op_mov_reg_T1(ot
, R_EAX
);
6703 gen_jmp(s
, s
->pc
- s
->cs_base
);
6711 ot
= dflag
? OT_LONG
: OT_WORD
;
6712 gen_op_mov_TN_reg(OT_WORD
, 0, R_EDX
);
6713 gen_op_andl_T0_ffff();
6714 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6715 svm_is_rep(prefixes
));
6716 gen_op_mov_TN_reg(ot
, 1, R_EAX
);
6720 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6721 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T
[1]);
6722 gen_helper_out_func(ot
, cpu_tmp2_i32
, cpu_tmp3_i32
);
6725 gen_jmp(s
, s
->pc
- s
->cs_base
);
6729 /************************/
6731 case 0xc2: /* ret im */
6732 val
= cpu_ldsw_code(env
, s
->pc
);
6735 if (CODE64(s
) && s
->dflag
)
6737 gen_stack_update(s
, val
+ (2 << s
->dflag
));
6739 gen_op_andl_T0_ffff();
6743 case 0xc3: /* ret */
6747 gen_op_andl_T0_ffff();
6751 case 0xca: /* lret im */
6752 val
= cpu_ldsw_code(env
, s
->pc
);
6755 if (s
->pe
&& !s
->vm86
) {
6756 gen_update_cc_op(s
);
6757 gen_jmp_im(pc_start
- s
->cs_base
);
6758 gen_helper_lret_protected(cpu_env
, tcg_const_i32(s
->dflag
),
6759 tcg_const_i32(val
));
6763 gen_op_ld_T0_A0(1 + s
->dflag
+ s
->mem_index
);
6765 gen_op_andl_T0_ffff();
6766 /* NOTE: keeping EIP updated is not a problem in case of
6770 gen_op_addl_A0_im(2 << s
->dflag
);
6771 gen_op_ld_T0_A0(1 + s
->dflag
+ s
->mem_index
);
6772 gen_op_movl_seg_T0_vm(R_CS
);
6773 /* add stack offset */
6774 gen_stack_update(s
, val
+ (4 << s
->dflag
));
6778 case 0xcb: /* lret */
6781 case 0xcf: /* iret */
6782 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_IRET
);
6785 gen_helper_iret_real(cpu_env
, tcg_const_i32(s
->dflag
));
6786 set_cc_op(s
, CC_OP_EFLAGS
);
6787 } else if (s
->vm86
) {
6789 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6791 gen_helper_iret_real(cpu_env
, tcg_const_i32(s
->dflag
));
6792 set_cc_op(s
, CC_OP_EFLAGS
);
6795 gen_update_cc_op(s
);
6796 gen_jmp_im(pc_start
- s
->cs_base
);
6797 gen_helper_iret_protected(cpu_env
, tcg_const_i32(s
->dflag
),
6798 tcg_const_i32(s
->pc
- s
->cs_base
));
6799 set_cc_op(s
, CC_OP_EFLAGS
);
6803 case 0xe8: /* call im */
6806 tval
= (int32_t)insn_get(env
, s
, OT_LONG
);
6808 tval
= (int16_t)insn_get(env
, s
, OT_WORD
);
6809 next_eip
= s
->pc
- s
->cs_base
;
6815 gen_movtl_T0_im(next_eip
);
6820 case 0x9a: /* lcall im */
6822 unsigned int selector
, offset
;
6826 ot
= dflag
? OT_LONG
: OT_WORD
;
6827 offset
= insn_get(env
, s
, ot
);
6828 selector
= insn_get(env
, s
, OT_WORD
);
6830 gen_op_movl_T0_im(selector
);
6831 gen_op_movl_T1_imu(offset
);
6834 case 0xe9: /* jmp im */
6836 tval
= (int32_t)insn_get(env
, s
, OT_LONG
);
6838 tval
= (int16_t)insn_get(env
, s
, OT_WORD
);
6839 tval
+= s
->pc
- s
->cs_base
;
6846 case 0xea: /* ljmp im */
6848 unsigned int selector
, offset
;
6852 ot
= dflag
? OT_LONG
: OT_WORD
;
6853 offset
= insn_get(env
, s
, ot
);
6854 selector
= insn_get(env
, s
, OT_WORD
);
6856 gen_op_movl_T0_im(selector
);
6857 gen_op_movl_T1_imu(offset
);
6860 case 0xeb: /* jmp Jb */
6861 tval
= (int8_t)insn_get(env
, s
, OT_BYTE
);
6862 tval
+= s
->pc
- s
->cs_base
;
6867 case 0x70 ... 0x7f: /* jcc Jb */
6868 tval
= (int8_t)insn_get(env
, s
, OT_BYTE
);
6870 case 0x180 ... 0x18f: /* jcc Jv */
6872 tval
= (int32_t)insn_get(env
, s
, OT_LONG
);
6874 tval
= (int16_t)insn_get(env
, s
, OT_WORD
);
6877 next_eip
= s
->pc
- s
->cs_base
;
6881 gen_jcc(s
, b
, tval
, next_eip
);
6884 case 0x190 ... 0x19f: /* setcc Gv */
6885 modrm
= cpu_ldub_code(env
, s
->pc
++);
6886 gen_setcc1(s
, b
, cpu_T
[0]);
6887 gen_ldst_modrm(env
, s
, modrm
, OT_BYTE
, OR_TMP0
, 1);
6889 case 0x140 ... 0x14f: /* cmov Gv, Ev */
6890 ot
= dflag
+ OT_WORD
;
6891 modrm
= cpu_ldub_code(env
, s
->pc
++);
6892 reg
= ((modrm
>> 3) & 7) | rex_r
;
6893 gen_cmovcc1(env
, s
, ot
, b
, modrm
, reg
);
6896 /************************/
6898 case 0x9c: /* pushf */
6899 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_PUSHF
);
6900 if (s
->vm86
&& s
->iopl
!= 3) {
6901 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6903 gen_update_cc_op(s
);
6904 gen_helper_read_eflags(cpu_T
[0], cpu_env
);
6908 case 0x9d: /* popf */
6909 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_POPF
);
6910 if (s
->vm86
&& s
->iopl
!= 3) {
6911 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6916 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6917 tcg_const_i32((TF_MASK
| AC_MASK
|
6922 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6923 tcg_const_i32((TF_MASK
| AC_MASK
|
6925 IF_MASK
| IOPL_MASK
)
6929 if (s
->cpl
<= s
->iopl
) {
6931 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6932 tcg_const_i32((TF_MASK
|
6938 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6939 tcg_const_i32((TF_MASK
|
6948 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6949 tcg_const_i32((TF_MASK
| AC_MASK
|
6950 ID_MASK
| NT_MASK
)));
6952 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6953 tcg_const_i32((TF_MASK
| AC_MASK
|
6960 set_cc_op(s
, CC_OP_EFLAGS
);
6961 /* abort translation because TF/AC flag may change */
6962 gen_jmp_im(s
->pc
- s
->cs_base
);
6966 case 0x9e: /* sahf */
6967 if (CODE64(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_LAHF_LM
))
6969 gen_op_mov_TN_reg(OT_BYTE
, 0, R_AH
);
6970 gen_compute_eflags(s
);
6971 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, CC_O
);
6972 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
6973 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, cpu_T
[0]);
6975 case 0x9f: /* lahf */
6976 if (CODE64(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_LAHF_LM
))
6978 gen_compute_eflags(s
);
6979 /* Note: gen_compute_eflags() only gives the condition codes */
6980 tcg_gen_ori_tl(cpu_T
[0], cpu_cc_src
, 0x02);
6981 gen_op_mov_reg_T0(OT_BYTE
, R_AH
);
6983 case 0xf5: /* cmc */
6984 gen_compute_eflags(s
);
6985 tcg_gen_xori_tl(cpu_cc_src
, cpu_cc_src
, CC_C
);
6987 case 0xf8: /* clc */
6988 gen_compute_eflags(s
);
6989 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~CC_C
);
6991 case 0xf9: /* stc */
6992 gen_compute_eflags(s
);
6993 tcg_gen_ori_tl(cpu_cc_src
, cpu_cc_src
, CC_C
);
6995 case 0xfc: /* cld */
6996 tcg_gen_movi_i32(cpu_tmp2_i32
, 1);
6997 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, offsetof(CPUX86State
, df
));
6999 case 0xfd: /* std */
7000 tcg_gen_movi_i32(cpu_tmp2_i32
, -1);
7001 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, offsetof(CPUX86State
, df
));
7004 /************************/
7005 /* bit operations */
7006 case 0x1ba: /* bt/bts/btr/btc Gv, im */
7007 ot
= dflag
+ OT_WORD
;
7008 modrm
= cpu_ldub_code(env
, s
->pc
++);
7009 op
= (modrm
>> 3) & 7;
7010 mod
= (modrm
>> 6) & 3;
7011 rm
= (modrm
& 7) | REX_B(s
);
7014 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7015 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
7017 gen_op_mov_TN_reg(ot
, 0, rm
);
7020 val
= cpu_ldub_code(env
, s
->pc
++);
7021 gen_op_movl_T1_im(val
);
7026 case 0x1a3: /* bt Gv, Ev */
7029 case 0x1ab: /* bts */
7032 case 0x1b3: /* btr */
7035 case 0x1bb: /* btc */
7038 ot
= dflag
+ OT_WORD
;
7039 modrm
= cpu_ldub_code(env
, s
->pc
++);
7040 reg
= ((modrm
>> 3) & 7) | rex_r
;
7041 mod
= (modrm
>> 6) & 3;
7042 rm
= (modrm
& 7) | REX_B(s
);
7043 gen_op_mov_TN_reg(OT_LONG
, 1, reg
);
7045 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7046 /* specific case: we need to add a displacement */
7047 gen_exts(ot
, cpu_T
[1]);
7048 tcg_gen_sari_tl(cpu_tmp0
, cpu_T
[1], 3 + ot
);
7049 tcg_gen_shli_tl(cpu_tmp0
, cpu_tmp0
, ot
);
7050 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
7051 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
7053 gen_op_mov_TN_reg(ot
, 0, rm
);
7056 tcg_gen_andi_tl(cpu_T
[1], cpu_T
[1], (1 << (3 + ot
)) - 1);
7059 tcg_gen_shr_tl(cpu_cc_src
, cpu_T
[0], cpu_T
[1]);
7060 tcg_gen_movi_tl(cpu_cc_dst
, 0);
7063 tcg_gen_shr_tl(cpu_tmp4
, cpu_T
[0], cpu_T
[1]);
7064 tcg_gen_movi_tl(cpu_tmp0
, 1);
7065 tcg_gen_shl_tl(cpu_tmp0
, cpu_tmp0
, cpu_T
[1]);
7066 tcg_gen_or_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
7069 tcg_gen_shr_tl(cpu_tmp4
, cpu_T
[0], cpu_T
[1]);
7070 tcg_gen_movi_tl(cpu_tmp0
, 1);
7071 tcg_gen_shl_tl(cpu_tmp0
, cpu_tmp0
, cpu_T
[1]);
7072 tcg_gen_not_tl(cpu_tmp0
, cpu_tmp0
);
7073 tcg_gen_and_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
7077 tcg_gen_shr_tl(cpu_tmp4
, cpu_T
[0], cpu_T
[1]);
7078 tcg_gen_movi_tl(cpu_tmp0
, 1);
7079 tcg_gen_shl_tl(cpu_tmp0
, cpu_tmp0
, cpu_T
[1]);
7080 tcg_gen_xor_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
7083 set_cc_op(s
, CC_OP_SARB
+ ot
);
7086 gen_op_st_T0_A0(ot
+ s
->mem_index
);
7088 gen_op_mov_reg_T0(ot
, rm
);
7089 tcg_gen_mov_tl(cpu_cc_src
, cpu_tmp4
);
7090 tcg_gen_movi_tl(cpu_cc_dst
, 0);
7093 case 0x1bc: /* bsf / tzcnt */
7094 case 0x1bd: /* bsr / lzcnt */
7095 ot
= dflag
+ OT_WORD
;
7096 modrm
= cpu_ldub_code(env
, s
->pc
++);
7097 reg
= ((modrm
>> 3) & 7) | rex_r
;
7098 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
7099 gen_extu(ot
, cpu_T
[0]);
7101 /* Note that lzcnt and tzcnt are in different extensions. */
7102 if ((prefixes
& PREFIX_REPZ
)
7104 ? s
->cpuid_ext3_features
& CPUID_EXT3_ABM
7105 : s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI1
)) {
7107 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[0]);
7109 /* For lzcnt, reduce the target_ulong result by the
7110 number of zeros that we expect to find at the top. */
7111 gen_helper_clz(cpu_T
[0], cpu_T
[0]);
7112 tcg_gen_subi_tl(cpu_T
[0], cpu_T
[0], TARGET_LONG_BITS
- size
);
7114 /* For tzcnt, a zero input must return the operand size:
7115 force all bits outside the operand size to 1. */
7116 target_ulong mask
= (target_ulong
)-2 << (size
- 1);
7117 tcg_gen_ori_tl(cpu_T
[0], cpu_T
[0], mask
);
7118 gen_helper_ctz(cpu_T
[0], cpu_T
[0]);
7120 /* For lzcnt/tzcnt, C and Z bits are defined and are
7121 related to the result. */
7122 gen_op_update1_cc();
7123 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
7125 /* For bsr/bsf, only the Z bit is defined and it is related
7126 to the input and not the result. */
7127 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
7128 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
7130 /* For bsr, return the bit index of the first 1 bit,
7131 not the count of leading zeros. */
7132 gen_helper_clz(cpu_T
[0], cpu_T
[0]);
7133 tcg_gen_xori_tl(cpu_T
[0], cpu_T
[0], TARGET_LONG_BITS
- 1);
7135 gen_helper_ctz(cpu_T
[0], cpu_T
[0]);
7137 /* ??? The manual says that the output is undefined when the
7138 input is zero, but real hardware leaves it unchanged, and
7139 real programs appear to depend on that. */
7140 tcg_gen_movi_tl(cpu_tmp0
, 0);
7141 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_T
[0], cpu_cc_dst
, cpu_tmp0
,
7142 cpu_regs
[reg
], cpu_T
[0]);
7144 gen_op_mov_reg_T0(ot
, reg
);
7146 /************************/
7148 case 0x27: /* daa */
7151 gen_update_cc_op(s
);
7152 gen_helper_daa(cpu_env
);
7153 set_cc_op(s
, CC_OP_EFLAGS
);
7155 case 0x2f: /* das */
7158 gen_update_cc_op(s
);
7159 gen_helper_das(cpu_env
);
7160 set_cc_op(s
, CC_OP_EFLAGS
);
7162 case 0x37: /* aaa */
7165 gen_update_cc_op(s
);
7166 gen_helper_aaa(cpu_env
);
7167 set_cc_op(s
, CC_OP_EFLAGS
);
7169 case 0x3f: /* aas */
7172 gen_update_cc_op(s
);
7173 gen_helper_aas(cpu_env
);
7174 set_cc_op(s
, CC_OP_EFLAGS
);
7176 case 0xd4: /* aam */
7179 val
= cpu_ldub_code(env
, s
->pc
++);
7181 gen_exception(s
, EXCP00_DIVZ
, pc_start
- s
->cs_base
);
7183 gen_helper_aam(cpu_env
, tcg_const_i32(val
));
7184 set_cc_op(s
, CC_OP_LOGICB
);
7187 case 0xd5: /* aad */
7190 val
= cpu_ldub_code(env
, s
->pc
++);
7191 gen_helper_aad(cpu_env
, tcg_const_i32(val
));
7192 set_cc_op(s
, CC_OP_LOGICB
);
7194 /************************/
7196 case 0x90: /* nop */
7197 /* XXX: correct lock test for all insn */
7198 if (prefixes
& PREFIX_LOCK
) {
7201 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
7203 goto do_xchg_reg_eax
;
7205 if (prefixes
& PREFIX_REPZ
) {
7206 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_PAUSE
);
7209 case 0x9b: /* fwait */
7210 if ((s
->flags
& (HF_MP_MASK
| HF_TS_MASK
)) ==
7211 (HF_MP_MASK
| HF_TS_MASK
)) {
7212 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
7214 gen_update_cc_op(s
);
7215 gen_jmp_im(pc_start
- s
->cs_base
);
7216 gen_helper_fwait(cpu_env
);
7219 case 0xcc: /* int3 */
7220 gen_interrupt(s
, EXCP03_INT3
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
7222 case 0xcd: /* int N */
7223 val
= cpu_ldub_code(env
, s
->pc
++);
7224 if (s
->vm86
&& s
->iopl
!= 3) {
7225 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7227 gen_interrupt(s
, val
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
7230 case 0xce: /* into */
7233 gen_update_cc_op(s
);
7234 gen_jmp_im(pc_start
- s
->cs_base
);
7235 gen_helper_into(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
7238 case 0xf1: /* icebp (undocumented, exits to external debugger) */
7239 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_ICEBP
);
7241 gen_debug(s
, pc_start
- s
->cs_base
);
7245 qemu_set_log(CPU_LOG_INT
| CPU_LOG_TB_IN_ASM
);
7249 case 0xfa: /* cli */
7251 if (s
->cpl
<= s
->iopl
) {
7252 gen_helper_cli(cpu_env
);
7254 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7258 gen_helper_cli(cpu_env
);
7260 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7264 case 0xfb: /* sti */
7266 if (s
->cpl
<= s
->iopl
) {
7268 gen_helper_sti(cpu_env
);
7269 /* interruptions are enabled only the first insn after sti */
7270 /* If several instructions disable interrupts, only the
7272 if (!(s
->tb
->flags
& HF_INHIBIT_IRQ_MASK
))
7273 gen_helper_set_inhibit_irq(cpu_env
);
7274 /* give a chance to handle pending irqs */
7275 gen_jmp_im(s
->pc
- s
->cs_base
);
7278 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7284 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7288 case 0x62: /* bound */
7291 ot
= dflag
? OT_LONG
: OT_WORD
;
7292 modrm
= cpu_ldub_code(env
, s
->pc
++);
7293 reg
= (modrm
>> 3) & 7;
7294 mod
= (modrm
>> 6) & 3;
7297 gen_op_mov_TN_reg(ot
, 0, reg
);
7298 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7299 gen_jmp_im(pc_start
- s
->cs_base
);
7300 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
7301 if (ot
== OT_WORD
) {
7302 gen_helper_boundw(cpu_env
, cpu_A0
, cpu_tmp2_i32
);
7304 gen_helper_boundl(cpu_env
, cpu_A0
, cpu_tmp2_i32
);
7307 case 0x1c8 ... 0x1cf: /* bswap reg */
7308 reg
= (b
& 7) | REX_B(s
);
7309 #ifdef TARGET_X86_64
7311 gen_op_mov_TN_reg(OT_QUAD
, 0, reg
);
7312 tcg_gen_bswap64_i64(cpu_T
[0], cpu_T
[0]);
7313 gen_op_mov_reg_T0(OT_QUAD
, reg
);
7317 gen_op_mov_TN_reg(OT_LONG
, 0, reg
);
7318 tcg_gen_ext32u_tl(cpu_T
[0], cpu_T
[0]);
7319 tcg_gen_bswap32_tl(cpu_T
[0], cpu_T
[0]);
7320 gen_op_mov_reg_T0(OT_LONG
, reg
);
7323 case 0xd6: /* salc */
7326 gen_compute_eflags_c(s
, cpu_T
[0]);
7327 tcg_gen_neg_tl(cpu_T
[0], cpu_T
[0]);
7328 gen_op_mov_reg_T0(OT_BYTE
, R_EAX
);
7330 case 0xe0: /* loopnz */
7331 case 0xe1: /* loopz */
7332 case 0xe2: /* loop */
7333 case 0xe3: /* jecxz */
7337 tval
= (int8_t)insn_get(env
, s
, OT_BYTE
);
7338 next_eip
= s
->pc
- s
->cs_base
;
7343 l1
= gen_new_label();
7344 l2
= gen_new_label();
7345 l3
= gen_new_label();
7348 case 0: /* loopnz */
7350 gen_op_add_reg_im(s
->aflag
, R_ECX
, -1);
7351 gen_op_jz_ecx(s
->aflag
, l3
);
7352 gen_jcc1(s
, (JCC_Z
<< 1) | (b
^ 1), l1
);
7355 gen_op_add_reg_im(s
->aflag
, R_ECX
, -1);
7356 gen_op_jnz_ecx(s
->aflag
, l1
);
7360 gen_op_jz_ecx(s
->aflag
, l1
);
7365 gen_jmp_im(next_eip
);
7374 case 0x130: /* wrmsr */
7375 case 0x132: /* rdmsr */
7377 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7379 gen_update_cc_op(s
);
7380 gen_jmp_im(pc_start
- s
->cs_base
);
7382 gen_helper_rdmsr(cpu_env
);
7384 gen_helper_wrmsr(cpu_env
);
7388 case 0x131: /* rdtsc */
7389 gen_update_cc_op(s
);
7390 gen_jmp_im(pc_start
- s
->cs_base
);
7393 gen_helper_rdtsc(cpu_env
);
7396 gen_jmp(s
, s
->pc
- s
->cs_base
);
7399 case 0x133: /* rdpmc */
7400 gen_update_cc_op(s
);
7401 gen_jmp_im(pc_start
- s
->cs_base
);
7402 gen_helper_rdpmc(cpu_env
);
7404 case 0x134: /* sysenter */
7405 /* For Intel SYSENTER is valid on 64-bit */
7406 if (CODE64(s
) && env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
)
7409 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7411 gen_update_cc_op(s
);
7412 gen_jmp_im(pc_start
- s
->cs_base
);
7413 gen_helper_sysenter(cpu_env
);
7417 case 0x135: /* sysexit */
7418 /* For Intel SYSEXIT is valid on 64-bit */
7419 if (CODE64(s
) && env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
)
7422 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7424 gen_update_cc_op(s
);
7425 gen_jmp_im(pc_start
- s
->cs_base
);
7426 gen_helper_sysexit(cpu_env
, tcg_const_i32(dflag
));
7430 #ifdef TARGET_X86_64
7431 case 0x105: /* syscall */
7432 /* XXX: is it usable in real mode ? */
7433 gen_update_cc_op(s
);
7434 gen_jmp_im(pc_start
- s
->cs_base
);
7435 gen_helper_syscall(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
7438 case 0x107: /* sysret */
7440 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7442 gen_update_cc_op(s
);
7443 gen_jmp_im(pc_start
- s
->cs_base
);
7444 gen_helper_sysret(cpu_env
, tcg_const_i32(s
->dflag
));
7445 /* condition codes are modified only in long mode */
7447 set_cc_op(s
, CC_OP_EFLAGS
);
7453 case 0x1a2: /* cpuid */
7454 gen_update_cc_op(s
);
7455 gen_jmp_im(pc_start
- s
->cs_base
);
7456 gen_helper_cpuid(cpu_env
);
7458 case 0xf4: /* hlt */
7460 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7462 gen_update_cc_op(s
);
7463 gen_jmp_im(pc_start
- s
->cs_base
);
7464 gen_helper_hlt(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
7465 s
->is_jmp
= DISAS_TB_JUMP
;
7469 modrm
= cpu_ldub_code(env
, s
->pc
++);
7470 mod
= (modrm
>> 6) & 3;
7471 op
= (modrm
>> 3) & 7;
7474 if (!s
->pe
|| s
->vm86
)
7476 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_LDTR_READ
);
7477 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,ldt
.selector
));
7481 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
7484 if (!s
->pe
|| s
->vm86
)
7487 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7489 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_LDTR_WRITE
);
7490 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 0);
7491 gen_jmp_im(pc_start
- s
->cs_base
);
7492 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
7493 gen_helper_lldt(cpu_env
, cpu_tmp2_i32
);
7497 if (!s
->pe
|| s
->vm86
)
7499 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_TR_READ
);
7500 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,tr
.selector
));
7504 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
7507 if (!s
->pe
|| s
->vm86
)
7510 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7512 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_TR_WRITE
);
7513 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 0);
7514 gen_jmp_im(pc_start
- s
->cs_base
);
7515 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
7516 gen_helper_ltr(cpu_env
, cpu_tmp2_i32
);
7521 if (!s
->pe
|| s
->vm86
)
7523 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 0);
7524 gen_update_cc_op(s
);
7526 gen_helper_verr(cpu_env
, cpu_T
[0]);
7528 gen_helper_verw(cpu_env
, cpu_T
[0]);
7530 set_cc_op(s
, CC_OP_EFLAGS
);
7537 modrm
= cpu_ldub_code(env
, s
->pc
++);
7538 mod
= (modrm
>> 6) & 3;
7539 op
= (modrm
>> 3) & 7;
7545 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_GDTR_READ
);
7546 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7547 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, gdt
.limit
));
7548 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
7549 gen_add_A0_im(s
, 2);
7550 tcg_gen_ld_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, gdt
.base
));
7552 gen_op_andl_T0_im(0xffffff);
7553 gen_op_st_T0_A0(CODE64(s
) + OT_LONG
+ s
->mem_index
);
7558 case 0: /* monitor */
7559 if (!(s
->cpuid_ext_features
& CPUID_EXT_MONITOR
) ||
7562 gen_update_cc_op(s
);
7563 gen_jmp_im(pc_start
- s
->cs_base
);
7564 #ifdef TARGET_X86_64
7565 if (s
->aflag
== 2) {
7566 gen_op_movq_A0_reg(R_EAX
);
7570 gen_op_movl_A0_reg(R_EAX
);
7572 gen_op_andl_A0_ffff();
7574 gen_add_A0_ds_seg(s
);
7575 gen_helper_monitor(cpu_env
, cpu_A0
);
7578 if (!(s
->cpuid_ext_features
& CPUID_EXT_MONITOR
) ||
7581 gen_update_cc_op(s
);
7582 gen_jmp_im(pc_start
- s
->cs_base
);
7583 gen_helper_mwait(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
7587 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_SMAP
) ||
7591 gen_helper_clac(cpu_env
);
7592 gen_jmp_im(s
->pc
- s
->cs_base
);
7596 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_SMAP
) ||
7600 gen_helper_stac(cpu_env
);
7601 gen_jmp_im(s
->pc
- s
->cs_base
);
7608 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_IDTR_READ
);
7609 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7610 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, idt
.limit
));
7611 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
7612 gen_add_A0_im(s
, 2);
7613 tcg_gen_ld_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, idt
.base
));
7615 gen_op_andl_T0_im(0xffffff);
7616 gen_op_st_T0_A0(CODE64(s
) + OT_LONG
+ s
->mem_index
);
7622 gen_update_cc_op(s
);
7623 gen_jmp_im(pc_start
- s
->cs_base
);
7626 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7629 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7632 gen_helper_vmrun(cpu_env
, tcg_const_i32(s
->aflag
),
7633 tcg_const_i32(s
->pc
- pc_start
));
7635 s
->is_jmp
= DISAS_TB_JUMP
;
7638 case 1: /* VMMCALL */
7639 if (!(s
->flags
& HF_SVME_MASK
))
7641 gen_helper_vmmcall(cpu_env
);
7643 case 2: /* VMLOAD */
7644 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7647 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7650 gen_helper_vmload(cpu_env
, tcg_const_i32(s
->aflag
));
7653 case 3: /* VMSAVE */
7654 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7657 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7660 gen_helper_vmsave(cpu_env
, tcg_const_i32(s
->aflag
));
7664 if ((!(s
->flags
& HF_SVME_MASK
) &&
7665 !(s
->cpuid_ext3_features
& CPUID_EXT3_SKINIT
)) ||
7669 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7672 gen_helper_stgi(cpu_env
);
7676 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7679 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7682 gen_helper_clgi(cpu_env
);
7685 case 6: /* SKINIT */
7686 if ((!(s
->flags
& HF_SVME_MASK
) &&
7687 !(s
->cpuid_ext3_features
& CPUID_EXT3_SKINIT
)) ||
7690 gen_helper_skinit(cpu_env
);
7692 case 7: /* INVLPGA */
7693 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7696 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7699 gen_helper_invlpga(cpu_env
, tcg_const_i32(s
->aflag
));
7705 } else if (s
->cpl
!= 0) {
7706 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7708 gen_svm_check_intercept(s
, pc_start
,
7709 op
==2 ? SVM_EXIT_GDTR_WRITE
: SVM_EXIT_IDTR_WRITE
);
7710 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7711 gen_op_ld_T1_A0(OT_WORD
+ s
->mem_index
);
7712 gen_add_A0_im(s
, 2);
7713 gen_op_ld_T0_A0(CODE64(s
) + OT_LONG
+ s
->mem_index
);
7715 gen_op_andl_T0_im(0xffffff);
7717 tcg_gen_st_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,gdt
.base
));
7718 tcg_gen_st32_tl(cpu_T
[1], cpu_env
, offsetof(CPUX86State
,gdt
.limit
));
7720 tcg_gen_st_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,idt
.base
));
7721 tcg_gen_st32_tl(cpu_T
[1], cpu_env
, offsetof(CPUX86State
,idt
.limit
));
7726 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_READ_CR0
);
7727 #if defined TARGET_X86_64 && defined HOST_WORDS_BIGENDIAN
7728 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,cr
[0]) + 4);
7730 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,cr
[0]));
7732 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 1);
7736 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7738 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_CR0
);
7739 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 0);
7740 gen_helper_lmsw(cpu_env
, cpu_T
[0]);
7741 gen_jmp_im(s
->pc
- s
->cs_base
);
7746 if (mod
!= 3) { /* invlpg */
7748 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7750 gen_update_cc_op(s
);
7751 gen_jmp_im(pc_start
- s
->cs_base
);
7752 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7753 gen_helper_invlpg(cpu_env
, cpu_A0
);
7754 gen_jmp_im(s
->pc
- s
->cs_base
);
7759 case 0: /* swapgs */
7760 #ifdef TARGET_X86_64
7763 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7765 tcg_gen_ld_tl(cpu_T
[0], cpu_env
,
7766 offsetof(CPUX86State
,segs
[R_GS
].base
));
7767 tcg_gen_ld_tl(cpu_T
[1], cpu_env
,
7768 offsetof(CPUX86State
,kernelgsbase
));
7769 tcg_gen_st_tl(cpu_T
[1], cpu_env
,
7770 offsetof(CPUX86State
,segs
[R_GS
].base
));
7771 tcg_gen_st_tl(cpu_T
[0], cpu_env
,
7772 offsetof(CPUX86State
,kernelgsbase
));
7780 case 1: /* rdtscp */
7781 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_RDTSCP
))
7783 gen_update_cc_op(s
);
7784 gen_jmp_im(pc_start
- s
->cs_base
);
7787 gen_helper_rdtscp(cpu_env
);
7790 gen_jmp(s
, s
->pc
- s
->cs_base
);
7802 case 0x108: /* invd */
7803 case 0x109: /* wbinvd */
7805 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7807 gen_svm_check_intercept(s
, pc_start
, (b
& 2) ? SVM_EXIT_INVD
: SVM_EXIT_WBINVD
);
7811 case 0x63: /* arpl or movslS (x86_64) */
7812 #ifdef TARGET_X86_64
7815 /* d_ot is the size of destination */
7816 d_ot
= dflag
+ OT_WORD
;
7818 modrm
= cpu_ldub_code(env
, s
->pc
++);
7819 reg
= ((modrm
>> 3) & 7) | rex_r
;
7820 mod
= (modrm
>> 6) & 3;
7821 rm
= (modrm
& 7) | REX_B(s
);
7824 gen_op_mov_TN_reg(OT_LONG
, 0, rm
);
7826 if (d_ot
== OT_QUAD
)
7827 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
7828 gen_op_mov_reg_T0(d_ot
, reg
);
7830 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7831 if (d_ot
== OT_QUAD
) {
7832 gen_op_lds_T0_A0(OT_LONG
+ s
->mem_index
);
7834 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
7836 gen_op_mov_reg_T0(d_ot
, reg
);
7842 TCGv t0
, t1
, t2
, a0
;
7844 if (!s
->pe
|| s
->vm86
)
7846 t0
= tcg_temp_local_new();
7847 t1
= tcg_temp_local_new();
7848 t2
= tcg_temp_local_new();
7850 modrm
= cpu_ldub_code(env
, s
->pc
++);
7851 reg
= (modrm
>> 3) & 7;
7852 mod
= (modrm
>> 6) & 3;
7855 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7856 gen_op_ld_v(ot
+ s
->mem_index
, t0
, cpu_A0
);
7857 a0
= tcg_temp_local_new();
7858 tcg_gen_mov_tl(a0
, cpu_A0
);
7860 gen_op_mov_v_reg(ot
, t0
, rm
);
7863 gen_op_mov_v_reg(ot
, t1
, reg
);
7864 tcg_gen_andi_tl(cpu_tmp0
, t0
, 3);
7865 tcg_gen_andi_tl(t1
, t1
, 3);
7866 tcg_gen_movi_tl(t2
, 0);
7867 label1
= gen_new_label();
7868 tcg_gen_brcond_tl(TCG_COND_GE
, cpu_tmp0
, t1
, label1
);
7869 tcg_gen_andi_tl(t0
, t0
, ~3);
7870 tcg_gen_or_tl(t0
, t0
, t1
);
7871 tcg_gen_movi_tl(t2
, CC_Z
);
7872 gen_set_label(label1
);
7874 gen_op_st_v(ot
+ s
->mem_index
, t0
, a0
);
7877 gen_op_mov_reg_v(ot
, rm
, t0
);
7879 gen_compute_eflags(s
);
7880 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~CC_Z
);
7881 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t2
);
7887 case 0x102: /* lar */
7888 case 0x103: /* lsl */
7892 if (!s
->pe
|| s
->vm86
)
7894 ot
= dflag
? OT_LONG
: OT_WORD
;
7895 modrm
= cpu_ldub_code(env
, s
->pc
++);
7896 reg
= ((modrm
>> 3) & 7) | rex_r
;
7897 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 0);
7898 t0
= tcg_temp_local_new();
7899 gen_update_cc_op(s
);
7901 gen_helper_lar(t0
, cpu_env
, cpu_T
[0]);
7903 gen_helper_lsl(t0
, cpu_env
, cpu_T
[0]);
7905 tcg_gen_andi_tl(cpu_tmp0
, cpu_cc_src
, CC_Z
);
7906 label1
= gen_new_label();
7907 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_tmp0
, 0, label1
);
7908 gen_op_mov_reg_v(ot
, reg
, t0
);
7909 gen_set_label(label1
);
7910 set_cc_op(s
, CC_OP_EFLAGS
);
7915 modrm
= cpu_ldub_code(env
, s
->pc
++);
7916 mod
= (modrm
>> 6) & 3;
7917 op
= (modrm
>> 3) & 7;
7919 case 0: /* prefetchnta */
7920 case 1: /* prefetchnt0 */
7921 case 2: /* prefetchnt0 */
7922 case 3: /* prefetchnt0 */
7925 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7926 /* nothing more to do */
7928 default: /* nop (multi byte) */
7929 gen_nop_modrm(env
, s
, modrm
);
7933 case 0x119 ... 0x11f: /* nop (multi byte) */
7934 modrm
= cpu_ldub_code(env
, s
->pc
++);
7935 gen_nop_modrm(env
, s
, modrm
);
7937 case 0x120: /* mov reg, crN */
7938 case 0x122: /* mov crN, reg */
7940 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7942 modrm
= cpu_ldub_code(env
, s
->pc
++);
7943 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7944 * AMD documentation (24594.pdf) and testing of
7945 * intel 386 and 486 processors all show that the mod bits
7946 * are assumed to be 1's, regardless of actual values.
7948 rm
= (modrm
& 7) | REX_B(s
);
7949 reg
= ((modrm
>> 3) & 7) | rex_r
;
7954 if ((prefixes
& PREFIX_LOCK
) && (reg
== 0) &&
7955 (s
->cpuid_ext3_features
& CPUID_EXT3_CR8LEG
)) {
7964 gen_update_cc_op(s
);
7965 gen_jmp_im(pc_start
- s
->cs_base
);
7967 gen_op_mov_TN_reg(ot
, 0, rm
);
7968 gen_helper_write_crN(cpu_env
, tcg_const_i32(reg
),
7970 gen_jmp_im(s
->pc
- s
->cs_base
);
7973 gen_helper_read_crN(cpu_T
[0], cpu_env
, tcg_const_i32(reg
));
7974 gen_op_mov_reg_T0(ot
, rm
);
7982 case 0x121: /* mov reg, drN */
7983 case 0x123: /* mov drN, reg */
7985 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7987 modrm
= cpu_ldub_code(env
, s
->pc
++);
7988 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7989 * AMD documentation (24594.pdf) and testing of
7990 * intel 386 and 486 processors all show that the mod bits
7991 * are assumed to be 1's, regardless of actual values.
7993 rm
= (modrm
& 7) | REX_B(s
);
7994 reg
= ((modrm
>> 3) & 7) | rex_r
;
7999 /* XXX: do it dynamically with CR4.DE bit */
8000 if (reg
== 4 || reg
== 5 || reg
>= 8)
8003 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_DR0
+ reg
);
8004 gen_op_mov_TN_reg(ot
, 0, rm
);
8005 gen_helper_movl_drN_T0(cpu_env
, tcg_const_i32(reg
), cpu_T
[0]);
8006 gen_jmp_im(s
->pc
- s
->cs_base
);
8009 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_READ_DR0
+ reg
);
8010 tcg_gen_ld_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,dr
[reg
]));
8011 gen_op_mov_reg_T0(ot
, rm
);
8015 case 0x106: /* clts */
8017 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
8019 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_CR0
);
8020 gen_helper_clts(cpu_env
);
8021 /* abort block because static cpu state changed */
8022 gen_jmp_im(s
->pc
- s
->cs_base
);
8026 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
8027 case 0x1c3: /* MOVNTI reg, mem */
8028 if (!(s
->cpuid_features
& CPUID_SSE2
))
8030 ot
= s
->dflag
== 2 ? OT_QUAD
: OT_LONG
;
8031 modrm
= cpu_ldub_code(env
, s
->pc
++);
8032 mod
= (modrm
>> 6) & 3;
8035 reg
= ((modrm
>> 3) & 7) | rex_r
;
8036 /* generate a generic store */
8037 gen_ldst_modrm(env
, s
, modrm
, ot
, reg
, 1);
8040 modrm
= cpu_ldub_code(env
, s
->pc
++);
8041 mod
= (modrm
>> 6) & 3;
8042 op
= (modrm
>> 3) & 7;
8044 case 0: /* fxsave */
8045 if (mod
== 3 || !(s
->cpuid_features
& CPUID_FXSR
) ||
8046 (s
->prefix
& PREFIX_LOCK
))
8048 if ((s
->flags
& HF_EM_MASK
) || (s
->flags
& HF_TS_MASK
)) {
8049 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
8052 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
8053 gen_update_cc_op(s
);
8054 gen_jmp_im(pc_start
- s
->cs_base
);
8055 gen_helper_fxsave(cpu_env
, cpu_A0
, tcg_const_i32((s
->dflag
== 2)));
8057 case 1: /* fxrstor */
8058 if (mod
== 3 || !(s
->cpuid_features
& CPUID_FXSR
) ||
8059 (s
->prefix
& PREFIX_LOCK
))
8061 if ((s
->flags
& HF_EM_MASK
) || (s
->flags
& HF_TS_MASK
)) {
8062 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
8065 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
8066 gen_update_cc_op(s
);
8067 gen_jmp_im(pc_start
- s
->cs_base
);
8068 gen_helper_fxrstor(cpu_env
, cpu_A0
,
8069 tcg_const_i32((s
->dflag
== 2)));
8071 case 2: /* ldmxcsr */
8072 case 3: /* stmxcsr */
8073 if (s
->flags
& HF_TS_MASK
) {
8074 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
8077 if ((s
->flags
& HF_EM_MASK
) || !(s
->flags
& HF_OSFXSR_MASK
) ||
8080 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
8082 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
8083 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
8084 gen_helper_ldmxcsr(cpu_env
, cpu_tmp2_i32
);
8086 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, mxcsr
));
8087 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
8090 case 5: /* lfence */
8091 case 6: /* mfence */
8092 if ((modrm
& 0xc7) != 0xc0 || !(s
->cpuid_features
& CPUID_SSE2
))
8095 case 7: /* sfence / clflush */
8096 if ((modrm
& 0xc7) == 0xc0) {
8098 /* XXX: also check for cpuid_ext2_features & CPUID_EXT2_EMMX */
8099 if (!(s
->cpuid_features
& CPUID_SSE
))
8103 if (!(s
->cpuid_features
& CPUID_CLFLUSH
))
8105 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
8112 case 0x10d: /* 3DNow! prefetch(w) */
8113 modrm
= cpu_ldub_code(env
, s
->pc
++);
8114 mod
= (modrm
>> 6) & 3;
8117 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
8118 /* ignore for now */
8120 case 0x1aa: /* rsm */
8121 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_RSM
);
8122 if (!(s
->flags
& HF_SMM_MASK
))
8124 gen_update_cc_op(s
);
8125 gen_jmp_im(s
->pc
- s
->cs_base
);
8126 gen_helper_rsm(cpu_env
);
8129 case 0x1b8: /* SSE4.2 popcnt */
8130 if ((prefixes
& (PREFIX_REPZ
| PREFIX_LOCK
| PREFIX_REPNZ
)) !=
8133 if (!(s
->cpuid_ext_features
& CPUID_EXT_POPCNT
))
8136 modrm
= cpu_ldub_code(env
, s
->pc
++);
8137 reg
= ((modrm
>> 3) & 7) | rex_r
;
8139 if (s
->prefix
& PREFIX_DATA
)
8141 else if (s
->dflag
!= 2)
8146 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
8147 gen_helper_popcnt(cpu_T
[0], cpu_env
, cpu_T
[0], tcg_const_i32(ot
));
8148 gen_op_mov_reg_T0(ot
, reg
);
8150 set_cc_op(s
, CC_OP_EFLAGS
);
8152 case 0x10e ... 0x10f:
8153 /* 3DNow! instructions, ignore prefixes */
8154 s
->prefix
&= ~(PREFIX_REPZ
| PREFIX_REPNZ
| PREFIX_DATA
);
8155 case 0x110 ... 0x117:
8156 case 0x128 ... 0x12f:
8157 case 0x138 ... 0x13a:
8158 case 0x150 ... 0x179:
8159 case 0x17c ... 0x17f:
8161 case 0x1c4 ... 0x1c6:
8162 case 0x1d0 ... 0x1fe:
8163 gen_sse(env
, s
, b
, pc_start
, rex_r
);
8168 /* lock generation */
8169 if (s
->prefix
& PREFIX_LOCK
)
8170 gen_helper_unlock();
8173 if (s
->prefix
& PREFIX_LOCK
)
8174 gen_helper_unlock();
8175 /* XXX: ensure that no lock was generated */
8176 gen_exception(s
, EXCP06_ILLOP
, pc_start
- s
->cs_base
);
8180 void optimize_flags_init(void)
8182 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
8183 cpu_cc_op
= tcg_global_mem_new_i32(TCG_AREG0
,
8184 offsetof(CPUX86State
, cc_op
), "cc_op");
8185 cpu_cc_dst
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUX86State
, cc_dst
),
8187 cpu_cc_src
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUX86State
, cc_src
),
8189 cpu_cc_src2
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUX86State
, cc_src2
),
8192 #ifdef TARGET_X86_64
8193 cpu_regs
[R_EAX
] = tcg_global_mem_new_i64(TCG_AREG0
,
8194 offsetof(CPUX86State
, regs
[R_EAX
]), "rax");
8195 cpu_regs
[R_ECX
] = tcg_global_mem_new_i64(TCG_AREG0
,
8196 offsetof(CPUX86State
, regs
[R_ECX
]), "rcx");
8197 cpu_regs
[R_EDX
] = tcg_global_mem_new_i64(TCG_AREG0
,
8198 offsetof(CPUX86State
, regs
[R_EDX
]), "rdx");
8199 cpu_regs
[R_EBX
] = tcg_global_mem_new_i64(TCG_AREG0
,
8200 offsetof(CPUX86State
, regs
[R_EBX
]), "rbx");
8201 cpu_regs
[R_ESP
] = tcg_global_mem_new_i64(TCG_AREG0
,
8202 offsetof(CPUX86State
, regs
[R_ESP
]), "rsp");
8203 cpu_regs
[R_EBP
] = tcg_global_mem_new_i64(TCG_AREG0
,
8204 offsetof(CPUX86State
, regs
[R_EBP
]), "rbp");
8205 cpu_regs
[R_ESI
] = tcg_global_mem_new_i64(TCG_AREG0
,
8206 offsetof(CPUX86State
, regs
[R_ESI
]), "rsi");
8207 cpu_regs
[R_EDI
] = tcg_global_mem_new_i64(TCG_AREG0
,
8208 offsetof(CPUX86State
, regs
[R_EDI
]), "rdi");
8209 cpu_regs
[8] = tcg_global_mem_new_i64(TCG_AREG0
,
8210 offsetof(CPUX86State
, regs
[8]), "r8");
8211 cpu_regs
[9] = tcg_global_mem_new_i64(TCG_AREG0
,
8212 offsetof(CPUX86State
, regs
[9]), "r9");
8213 cpu_regs
[10] = tcg_global_mem_new_i64(TCG_AREG0
,
8214 offsetof(CPUX86State
, regs
[10]), "r10");
8215 cpu_regs
[11] = tcg_global_mem_new_i64(TCG_AREG0
,
8216 offsetof(CPUX86State
, regs
[11]), "r11");
8217 cpu_regs
[12] = tcg_global_mem_new_i64(TCG_AREG0
,
8218 offsetof(CPUX86State
, regs
[12]), "r12");
8219 cpu_regs
[13] = tcg_global_mem_new_i64(TCG_AREG0
,
8220 offsetof(CPUX86State
, regs
[13]), "r13");
8221 cpu_regs
[14] = tcg_global_mem_new_i64(TCG_AREG0
,
8222 offsetof(CPUX86State
, regs
[14]), "r14");
8223 cpu_regs
[15] = tcg_global_mem_new_i64(TCG_AREG0
,
8224 offsetof(CPUX86State
, regs
[15]), "r15");
8226 cpu_regs
[R_EAX
] = tcg_global_mem_new_i32(TCG_AREG0
,
8227 offsetof(CPUX86State
, regs
[R_EAX
]), "eax");
8228 cpu_regs
[R_ECX
] = tcg_global_mem_new_i32(TCG_AREG0
,
8229 offsetof(CPUX86State
, regs
[R_ECX
]), "ecx");
8230 cpu_regs
[R_EDX
] = tcg_global_mem_new_i32(TCG_AREG0
,
8231 offsetof(CPUX86State
, regs
[R_EDX
]), "edx");
8232 cpu_regs
[R_EBX
] = tcg_global_mem_new_i32(TCG_AREG0
,
8233 offsetof(CPUX86State
, regs
[R_EBX
]), "ebx");
8234 cpu_regs
[R_ESP
] = tcg_global_mem_new_i32(TCG_AREG0
,
8235 offsetof(CPUX86State
, regs
[R_ESP
]), "esp");
8236 cpu_regs
[R_EBP
] = tcg_global_mem_new_i32(TCG_AREG0
,
8237 offsetof(CPUX86State
, regs
[R_EBP
]), "ebp");
8238 cpu_regs
[R_ESI
] = tcg_global_mem_new_i32(TCG_AREG0
,
8239 offsetof(CPUX86State
, regs
[R_ESI
]), "esi");
8240 cpu_regs
[R_EDI
] = tcg_global_mem_new_i32(TCG_AREG0
,
8241 offsetof(CPUX86State
, regs
[R_EDI
]), "edi");
8244 /* register helpers */
8245 #define GEN_HELPER 2
8249 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
8250 basic block 'tb'. If search_pc is TRUE, also generate PC
8251 information for each intermediate instruction. */
8252 static inline void gen_intermediate_code_internal(CPUX86State
*env
,
8253 TranslationBlock
*tb
,
8256 DisasContext dc1
, *dc
= &dc1
;
8257 target_ulong pc_ptr
;
8258 uint16_t *gen_opc_end
;
8262 target_ulong pc_start
;
8263 target_ulong cs_base
;
8267 /* generate intermediate code */
8269 cs_base
= tb
->cs_base
;
8272 dc
->pe
= (flags
>> HF_PE_SHIFT
) & 1;
8273 dc
->code32
= (flags
>> HF_CS32_SHIFT
) & 1;
8274 dc
->ss32
= (flags
>> HF_SS32_SHIFT
) & 1;
8275 dc
->addseg
= (flags
>> HF_ADDSEG_SHIFT
) & 1;
8277 dc
->vm86
= (flags
>> VM_SHIFT
) & 1;
8278 dc
->cpl
= (flags
>> HF_CPL_SHIFT
) & 3;
8279 dc
->iopl
= (flags
>> IOPL_SHIFT
) & 3;
8280 dc
->tf
= (flags
>> TF_SHIFT
) & 1;
8281 dc
->singlestep_enabled
= env
->singlestep_enabled
;
8282 dc
->cc_op
= CC_OP_DYNAMIC
;
8283 dc
->cc_op_dirty
= false;
8284 dc
->cs_base
= cs_base
;
8286 dc
->popl_esp_hack
= 0;
8287 /* select memory access functions */
8289 if (flags
& HF_SOFTMMU_MASK
) {
8290 dc
->mem_index
= (cpu_mmu_index(env
) + 1) << 2;
8292 dc
->cpuid_features
= env
->cpuid_features
;
8293 dc
->cpuid_ext_features
= env
->cpuid_ext_features
;
8294 dc
->cpuid_ext2_features
= env
->cpuid_ext2_features
;
8295 dc
->cpuid_ext3_features
= env
->cpuid_ext3_features
;
8296 dc
->cpuid_7_0_ebx_features
= env
->cpuid_7_0_ebx_features
;
8297 #ifdef TARGET_X86_64
8298 dc
->lma
= (flags
>> HF_LMA_SHIFT
) & 1;
8299 dc
->code64
= (flags
>> HF_CS64_SHIFT
) & 1;
8302 dc
->jmp_opt
= !(dc
->tf
|| env
->singlestep_enabled
||
8303 (flags
& HF_INHIBIT_IRQ_MASK
)
8304 #ifndef CONFIG_SOFTMMU
8305 || (flags
& HF_SOFTMMU_MASK
)
8309 /* check addseg logic */
8310 if (!dc
->addseg
&& (dc
->vm86
|| !dc
->pe
|| !dc
->code32
))
8311 printf("ERROR addseg\n");
8314 cpu_T
[0] = tcg_temp_new();
8315 cpu_T
[1] = tcg_temp_new();
8316 cpu_A0
= tcg_temp_new();
8318 cpu_tmp0
= tcg_temp_new();
8319 cpu_tmp1_i64
= tcg_temp_new_i64();
8320 cpu_tmp2_i32
= tcg_temp_new_i32();
8321 cpu_tmp3_i32
= tcg_temp_new_i32();
8322 cpu_tmp4
= tcg_temp_new();
8323 cpu_ptr0
= tcg_temp_new_ptr();
8324 cpu_ptr1
= tcg_temp_new_ptr();
8325 cpu_cc_srcT
= tcg_temp_local_new();
8327 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
8329 dc
->is_jmp
= DISAS_NEXT
;
8333 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
8335 max_insns
= CF_COUNT_MASK
;
8339 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
8340 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
8341 if (bp
->pc
== pc_ptr
&&
8342 !((bp
->flags
& BP_CPU
) && (tb
->flags
& HF_RF_MASK
))) {
8343 gen_debug(dc
, pc_ptr
- dc
->cs_base
);
8349 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
8353 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
8355 tcg_ctx
.gen_opc_pc
[lj
] = pc_ptr
;
8356 gen_opc_cc_op
[lj
] = dc
->cc_op
;
8357 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
8358 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
8360 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
8363 pc_ptr
= disas_insn(env
, dc
, pc_ptr
);
8365 /* stop translation if indicated */
8368 /* if single step mode, we generate only one instruction and
8369 generate an exception */
8370 /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
8371 the flag and abort the translation to give the irqs a
8372 change to be happen */
8373 if (dc
->tf
|| dc
->singlestep_enabled
||
8374 (flags
& HF_INHIBIT_IRQ_MASK
)) {
8375 gen_jmp_im(pc_ptr
- dc
->cs_base
);
8379 /* if too long translation, stop generation too */
8380 if (tcg_ctx
.gen_opc_ptr
>= gen_opc_end
||
8381 (pc_ptr
- pc_start
) >= (TARGET_PAGE_SIZE
- 32) ||
8382 num_insns
>= max_insns
) {
8383 gen_jmp_im(pc_ptr
- dc
->cs_base
);
8388 gen_jmp_im(pc_ptr
- dc
->cs_base
);
8393 if (tb
->cflags
& CF_LAST_IO
)
8395 gen_tb_end(tb
, num_insns
);
8396 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
8397 /* we don't forget to fill the last values */
8399 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
8402 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
8406 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
8408 qemu_log("----------------\n");
8409 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
8410 #ifdef TARGET_X86_64
8415 disas_flags
= !dc
->code32
;
8416 log_target_disas(env
, pc_start
, pc_ptr
- pc_start
, disas_flags
);
8422 tb
->size
= pc_ptr
- pc_start
;
8423 tb
->icount
= num_insns
;
8427 void gen_intermediate_code(CPUX86State
*env
, TranslationBlock
*tb
)
8429 gen_intermediate_code_internal(env
, tb
, 0);
8432 void gen_intermediate_code_pc(CPUX86State
*env
, TranslationBlock
*tb
)
8434 gen_intermediate_code_internal(env
, tb
, 1);
8437 void restore_state_to_opc(CPUX86State
*env
, TranslationBlock
*tb
, int pc_pos
)
8441 if (qemu_loglevel_mask(CPU_LOG_TB_OP
)) {
8443 qemu_log("RESTORE:\n");
8444 for(i
= 0;i
<= pc_pos
; i
++) {
8445 if (tcg_ctx
.gen_opc_instr_start
[i
]) {
8446 qemu_log("0x%04x: " TARGET_FMT_lx
"\n", i
,
8447 tcg_ctx
.gen_opc_pc
[i
]);
8450 qemu_log("pc_pos=0x%x eip=" TARGET_FMT_lx
" cs_base=%x\n",
8451 pc_pos
, tcg_ctx
.gen_opc_pc
[pc_pos
] - tb
->cs_base
,
8452 (uint32_t)tb
->cs_base
);
8455 env
->eip
= tcg_ctx
.gen_opc_pc
[pc_pos
] - tb
->cs_base
;
8456 cc_op
= gen_opc_cc_op
[pc_pos
];
8457 if (cc_op
!= CC_OP_DYNAMIC
)