4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include "qemu/host-utils.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "tcg/tcg-op-gvec.h"
27 #include "exec/cpu_ldst.h"
28 #include "exec/translator.h"
29 #include "fpu/softfloat.h"
31 #include "exec/helper-proto.h"
32 #include "exec/helper-gen.h"
33 #include "helper-tcg.h"
37 #define HELPER_H "helper.h"
38 #include "exec/helper-info.c.inc"
42 #define PREFIX_REPZ 0x01
43 #define PREFIX_REPNZ 0x02
44 #define PREFIX_LOCK 0x04
45 #define PREFIX_DATA 0x08
46 #define PREFIX_ADR 0x10
47 #define PREFIX_VEX 0x20
48 #define PREFIX_REX 0x40
58 /* For a switch indexed by MODRM, match all memory operands for a given OP. */
59 #define CASE_MODRM_MEM_OP(OP) \
60 case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
61 case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
62 case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7
64 #define CASE_MODRM_OP(OP) \
65 case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
66 case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
67 case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7: \
68 case (3 << 6) | (OP << 3) | 0 ... (3 << 6) | (OP << 3) | 7
70 //#define MACRO_TEST 1
72 /* global register indexes */
73 static TCGv cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
;
75 static TCGv_i32 cpu_cc_op
;
76 static TCGv cpu_regs
[CPU_NB_REGS
];
77 static TCGv cpu_seg_base
[6];
78 static TCGv_i64 cpu_bndl
[4];
79 static TCGv_i64 cpu_bndu
[4];
81 typedef struct DisasContext
{
82 DisasContextBase base
;
84 target_ulong pc
; /* pc = eip + cs_base */
85 target_ulong cs_base
; /* base of CS segment */
91 int8_t override
; /* -1 if no override, else R_CS, R_DS, etc */
97 #ifndef CONFIG_USER_ONLY
98 uint8_t cpl
; /* code priv level */
99 uint8_t iopl
; /* i/o priv level */
101 uint8_t vex_l
; /* vex vector length */
102 uint8_t vex_v
; /* vex vvvv register, without 1's complement. */
103 uint8_t popl_esp_hack
; /* for correct popl with esp base handling */
104 uint8_t rip_offset
; /* only used in x86_64, but left for simplicity */
111 bool vex_w
; /* used by AVX even on 32-bit processors */
112 bool jmp_opt
; /* use direct block chaining for direct jumps */
113 bool repz_opt
; /* optimize jumps within repz instructions */
116 CCOp cc_op
; /* current CC operation */
117 int mem_index
; /* select memory access functions */
118 uint32_t flags
; /* all execution flags */
120 int cpuid_ext_features
;
121 int cpuid_ext2_features
;
122 int cpuid_ext3_features
;
123 int cpuid_7_0_ebx_features
;
124 int cpuid_7_0_ecx_features
;
125 int cpuid_7_1_eax_features
;
126 int cpuid_xsave_features
;
128 /* TCG local temps */
134 /* TCG local register indexes (only used inside old micro ops) */
142 TCGOp
*prev_insn_end
;
145 #define DISAS_EOB_ONLY DISAS_TARGET_0
146 #define DISAS_EOB_NEXT DISAS_TARGET_1
147 #define DISAS_EOB_INHIBIT_IRQ DISAS_TARGET_2
148 #define DISAS_JUMP DISAS_TARGET_3
150 /* The environment in which user-only runs is constrained. */
151 #ifdef CONFIG_USER_ONLY
155 #define SVME(S) false
156 #define GUEST(S) false
158 #define PE(S) (((S)->flags & HF_PE_MASK) != 0)
159 #define CPL(S) ((S)->cpl)
160 #define IOPL(S) ((S)->iopl)
161 #define SVME(S) (((S)->flags & HF_SVME_MASK) != 0)
162 #define GUEST(S) (((S)->flags & HF_GUEST_MASK) != 0)
164 #if defined(CONFIG_USER_ONLY) && defined(TARGET_X86_64)
165 #define VM86(S) false
166 #define CODE32(S) true
168 #define ADDSEG(S) false
170 #define VM86(S) (((S)->flags & HF_VM_MASK) != 0)
171 #define CODE32(S) (((S)->flags & HF_CS32_MASK) != 0)
172 #define SS32(S) (((S)->flags & HF_SS32_MASK) != 0)
173 #define ADDSEG(S) (((S)->flags & HF_ADDSEG_MASK) != 0)
175 #if !defined(TARGET_X86_64)
176 #define CODE64(S) false
177 #elif defined(CONFIG_USER_ONLY)
178 #define CODE64(S) true
180 #define CODE64(S) (((S)->flags & HF_CS64_MASK) != 0)
182 #if defined(CONFIG_USER_ONLY) || defined(TARGET_X86_64)
183 #define LMA(S) (((S)->flags & HF_LMA_MASK) != 0)
189 #define REX_PREFIX(S) (((S)->prefix & PREFIX_REX) != 0)
190 #define REX_W(S) ((S)->vex_w)
191 #define REX_R(S) ((S)->rex_r + 0)
192 #define REX_X(S) ((S)->rex_x + 0)
193 #define REX_B(S) ((S)->rex_b + 0)
195 #define REX_PREFIX(S) false
196 #define REX_W(S) false
203 * Many sysemu-only helpers are not reachable for user-only.
204 * Define stub generators here, so that we need not either sprinkle
205 * ifdefs through the translator, nor provide the helper function.
207 #define STUB_HELPER(NAME, ...) \
208 static inline void gen_helper_##NAME(__VA_ARGS__) \
209 { qemu_build_not_reached(); }
211 #ifdef CONFIG_USER_ONLY
212 STUB_HELPER(clgi
, TCGv_env env
)
213 STUB_HELPER(flush_page
, TCGv_env env
, TCGv addr
)
214 STUB_HELPER(hlt
, TCGv_env env
, TCGv_i32 pc_ofs
)
215 STUB_HELPER(inb
, TCGv ret
, TCGv_env env
, TCGv_i32 port
)
216 STUB_HELPER(inw
, TCGv ret
, TCGv_env env
, TCGv_i32 port
)
217 STUB_HELPER(inl
, TCGv ret
, TCGv_env env
, TCGv_i32 port
)
218 STUB_HELPER(monitor
, TCGv_env env
, TCGv addr
)
219 STUB_HELPER(mwait
, TCGv_env env
, TCGv_i32 pc_ofs
)
220 STUB_HELPER(outb
, TCGv_env env
, TCGv_i32 port
, TCGv_i32 val
)
221 STUB_HELPER(outw
, TCGv_env env
, TCGv_i32 port
, TCGv_i32 val
)
222 STUB_HELPER(outl
, TCGv_env env
, TCGv_i32 port
, TCGv_i32 val
)
223 STUB_HELPER(rdmsr
, TCGv_env env
)
224 STUB_HELPER(read_crN
, TCGv ret
, TCGv_env env
, TCGv_i32 reg
)
225 STUB_HELPER(get_dr
, TCGv ret
, TCGv_env env
, TCGv_i32 reg
)
226 STUB_HELPER(set_dr
, TCGv_env env
, TCGv_i32 reg
, TCGv val
)
227 STUB_HELPER(stgi
, TCGv_env env
)
228 STUB_HELPER(svm_check_intercept
, TCGv_env env
, TCGv_i32 type
)
229 STUB_HELPER(vmload
, TCGv_env env
, TCGv_i32 aflag
)
230 STUB_HELPER(vmmcall
, TCGv_env env
)
231 STUB_HELPER(vmrun
, TCGv_env env
, TCGv_i32 aflag
, TCGv_i32 pc_ofs
)
232 STUB_HELPER(vmsave
, TCGv_env env
, TCGv_i32 aflag
)
233 STUB_HELPER(write_crN
, TCGv_env env
, TCGv_i32 reg
, TCGv val
)
234 STUB_HELPER(wrmsr
, TCGv_env env
)
237 static void gen_eob(DisasContext
*s
);
238 static void gen_jr(DisasContext
*s
);
239 static void gen_jmp_rel(DisasContext
*s
, MemOp ot
, int diff
, int tb_num
);
240 static void gen_jmp_rel_csize(DisasContext
*s
, int diff
, int tb_num
);
241 static void gen_op(DisasContext
*s1
, int op
, MemOp ot
, int d
);
242 static void gen_exception_gpf(DisasContext
*s
);
244 /* i386 arith/logic operations */
264 OP_SHL1
, /* undocumented */
280 /* I386 int registers */
281 OR_EAX
, /* MUST be even numbered */
290 OR_TMP0
= 16, /* temporary operand register */
292 OR_A0
, /* temporary register used when doing address evaluation */
302 /* Bit set if the global variable is live after setting CC_OP to X. */
303 static const uint8_t cc_op_live
[CC_OP_NB
] = {
304 [CC_OP_DYNAMIC
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
305 [CC_OP_EFLAGS
] = USES_CC_SRC
,
306 [CC_OP_MULB
... CC_OP_MULQ
] = USES_CC_DST
| USES_CC_SRC
,
307 [CC_OP_ADDB
... CC_OP_ADDQ
] = USES_CC_DST
| USES_CC_SRC
,
308 [CC_OP_ADCB
... CC_OP_ADCQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
309 [CC_OP_SUBB
... CC_OP_SUBQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRCT
,
310 [CC_OP_SBBB
... CC_OP_SBBQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
311 [CC_OP_LOGICB
... CC_OP_LOGICQ
] = USES_CC_DST
,
312 [CC_OP_INCB
... CC_OP_INCQ
] = USES_CC_DST
| USES_CC_SRC
,
313 [CC_OP_DECB
... CC_OP_DECQ
] = USES_CC_DST
| USES_CC_SRC
,
314 [CC_OP_SHLB
... CC_OP_SHLQ
] = USES_CC_DST
| USES_CC_SRC
,
315 [CC_OP_SARB
... CC_OP_SARQ
] = USES_CC_DST
| USES_CC_SRC
,
316 [CC_OP_BMILGB
... CC_OP_BMILGQ
] = USES_CC_DST
| USES_CC_SRC
,
317 [CC_OP_ADCX
] = USES_CC_DST
| USES_CC_SRC
,
318 [CC_OP_ADOX
] = USES_CC_SRC
| USES_CC_SRC2
,
319 [CC_OP_ADCOX
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
321 [CC_OP_POPCNT
] = USES_CC_SRC
,
324 static void set_cc_op(DisasContext
*s
, CCOp op
)
328 if (s
->cc_op
== op
) {
332 /* Discard CC computation that will no longer be used. */
333 dead
= cc_op_live
[s
->cc_op
] & ~cc_op_live
[op
];
334 if (dead
& USES_CC_DST
) {
335 tcg_gen_discard_tl(cpu_cc_dst
);
337 if (dead
& USES_CC_SRC
) {
338 tcg_gen_discard_tl(cpu_cc_src
);
340 if (dead
& USES_CC_SRC2
) {
341 tcg_gen_discard_tl(cpu_cc_src2
);
343 if (dead
& USES_CC_SRCT
) {
344 tcg_gen_discard_tl(s
->cc_srcT
);
347 if (op
== CC_OP_DYNAMIC
) {
348 /* The DYNAMIC setting is translator only, and should never be
349 stored. Thus we always consider it clean. */
350 s
->cc_op_dirty
= false;
352 /* Discard any computed CC_OP value (see shifts). */
353 if (s
->cc_op
== CC_OP_DYNAMIC
) {
354 tcg_gen_discard_i32(cpu_cc_op
);
356 s
->cc_op_dirty
= true;
361 static void gen_update_cc_op(DisasContext
*s
)
363 if (s
->cc_op_dirty
) {
364 tcg_gen_movi_i32(cpu_cc_op
, s
->cc_op
);
365 s
->cc_op_dirty
= false;
371 #define NB_OP_SIZES 4
373 #else /* !TARGET_X86_64 */
375 #define NB_OP_SIZES 3
377 #endif /* !TARGET_X86_64 */
380 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
381 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
382 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
383 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
384 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
386 #define REG_B_OFFSET 0
387 #define REG_H_OFFSET 1
388 #define REG_W_OFFSET 0
389 #define REG_L_OFFSET 0
390 #define REG_LH_OFFSET 4
393 /* In instruction encodings for byte register accesses the
394 * register number usually indicates "low 8 bits of register N";
395 * however there are some special cases where N 4..7 indicates
396 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
397 * true for this special case, false otherwise.
399 static inline bool byte_reg_is_xH(DisasContext
*s
, int reg
)
401 /* Any time the REX prefix is present, byte registers are uniform */
402 if (reg
< 4 || REX_PREFIX(s
)) {
408 /* Select the size of a push/pop operation. */
409 static inline MemOp
mo_pushpop(DisasContext
*s
, MemOp ot
)
412 return ot
== MO_16
? MO_16
: MO_64
;
418 /* Select the size of the stack pointer. */
419 static inline MemOp
mo_stacksize(DisasContext
*s
)
421 return CODE64(s
) ? MO_64
: SS32(s
) ? MO_32
: MO_16
;
424 /* Select only size 64 else 32. Used for SSE operand sizes. */
425 static inline MemOp
mo_64_32(MemOp ot
)
428 return ot
== MO_64
? MO_64
: MO_32
;
434 /* Select size 8 if lsb of B is clear, else OT. Used for decoding
435 byte vs word opcodes. */
436 static inline MemOp
mo_b_d(int b
, MemOp ot
)
438 return b
& 1 ? ot
: MO_8
;
441 /* Select size 8 if lsb of B is clear, else OT capped at 32.
442 Used for decoding operand size of port opcodes. */
443 static inline MemOp
mo_b_d32(int b
, MemOp ot
)
445 return b
& 1 ? (ot
== MO_16
? MO_16
: MO_32
) : MO_8
;
448 /* Compute the result of writing t0 to the OT-sized register REG.
450 * If DEST is NULL, store the result into the register and return the
453 * If DEST is not NULL, store the result into DEST and return the
456 static TCGv
gen_op_deposit_reg_v(DisasContext
*s
, MemOp ot
, int reg
, TCGv dest
, TCGv t0
)
460 if (byte_reg_is_xH(s
, reg
)) {
461 dest
= dest
? dest
: cpu_regs
[reg
- 4];
462 tcg_gen_deposit_tl(dest
, cpu_regs
[reg
- 4], t0
, 8, 8);
463 return cpu_regs
[reg
- 4];
465 dest
= dest
? dest
: cpu_regs
[reg
];
466 tcg_gen_deposit_tl(dest
, cpu_regs
[reg
], t0
, 0, 8);
469 dest
= dest
? dest
: cpu_regs
[reg
];
470 tcg_gen_deposit_tl(dest
, cpu_regs
[reg
], t0
, 0, 16);
473 /* For x86_64, this sets the higher half of register to zero.
474 For i386, this is equivalent to a mov. */
475 dest
= dest
? dest
: cpu_regs
[reg
];
476 tcg_gen_ext32u_tl(dest
, t0
);
480 dest
= dest
? dest
: cpu_regs
[reg
];
481 tcg_gen_mov_tl(dest
, t0
);
485 g_assert_not_reached();
487 return cpu_regs
[reg
];
490 static void gen_op_mov_reg_v(DisasContext
*s
, MemOp ot
, int reg
, TCGv t0
)
492 gen_op_deposit_reg_v(s
, ot
, reg
, NULL
, t0
);
496 void gen_op_mov_v_reg(DisasContext
*s
, MemOp ot
, TCGv t0
, int reg
)
498 if (ot
== MO_8
&& byte_reg_is_xH(s
, reg
)) {
499 tcg_gen_extract_tl(t0
, cpu_regs
[reg
- 4], 8, 8);
501 tcg_gen_mov_tl(t0
, cpu_regs
[reg
]);
505 static void gen_add_A0_im(DisasContext
*s
, int val
)
507 tcg_gen_addi_tl(s
->A0
, s
->A0
, val
);
509 tcg_gen_ext32u_tl(s
->A0
, s
->A0
);
513 static inline void gen_op_jmp_v(DisasContext
*s
, TCGv dest
)
515 tcg_gen_mov_tl(cpu_eip
, dest
);
520 void gen_op_add_reg_im(DisasContext
*s
, MemOp size
, int reg
, int32_t val
)
522 tcg_gen_addi_tl(s
->tmp0
, cpu_regs
[reg
], val
);
523 gen_op_mov_reg_v(s
, size
, reg
, s
->tmp0
);
526 static inline void gen_op_add_reg(DisasContext
*s
, MemOp size
, int reg
, TCGv val
)
528 tcg_gen_add_tl(s
->tmp0
, cpu_regs
[reg
], val
);
529 gen_op_mov_reg_v(s
, size
, reg
, s
->tmp0
);
532 static inline void gen_op_ld_v(DisasContext
*s
, int idx
, TCGv t0
, TCGv a0
)
534 tcg_gen_qemu_ld_tl(t0
, a0
, s
->mem_index
, idx
| MO_LE
);
537 static inline void gen_op_st_v(DisasContext
*s
, int idx
, TCGv t0
, TCGv a0
)
539 tcg_gen_qemu_st_tl(t0
, a0
, s
->mem_index
, idx
| MO_LE
);
542 static inline void gen_op_st_rm_T0_A0(DisasContext
*s
, int idx
, int d
)
545 gen_op_st_v(s
, idx
, s
->T0
, s
->A0
);
547 gen_op_mov_reg_v(s
, idx
, d
, s
->T0
);
551 static void gen_update_eip_cur(DisasContext
*s
)
553 assert(s
->pc_save
!= -1);
554 if (tb_cflags(s
->base
.tb
) & CF_PCREL
) {
555 tcg_gen_addi_tl(cpu_eip
, cpu_eip
, s
->base
.pc_next
- s
->pc_save
);
556 } else if (CODE64(s
)) {
557 tcg_gen_movi_tl(cpu_eip
, s
->base
.pc_next
);
559 tcg_gen_movi_tl(cpu_eip
, (uint32_t)(s
->base
.pc_next
- s
->cs_base
));
561 s
->pc_save
= s
->base
.pc_next
;
564 static void gen_update_eip_next(DisasContext
*s
)
566 assert(s
->pc_save
!= -1);
567 if (tb_cflags(s
->base
.tb
) & CF_PCREL
) {
568 tcg_gen_addi_tl(cpu_eip
, cpu_eip
, s
->pc
- s
->pc_save
);
569 } else if (CODE64(s
)) {
570 tcg_gen_movi_tl(cpu_eip
, s
->pc
);
572 tcg_gen_movi_tl(cpu_eip
, (uint32_t)(s
->pc
- s
->cs_base
));
577 static int cur_insn_len(DisasContext
*s
)
579 return s
->pc
- s
->base
.pc_next
;
582 static TCGv_i32
cur_insn_len_i32(DisasContext
*s
)
584 return tcg_constant_i32(cur_insn_len(s
));
587 static TCGv_i32
eip_next_i32(DisasContext
*s
)
589 assert(s
->pc_save
!= -1);
591 * This function has two users: lcall_real (always 16-bit mode), and
592 * iret_protected (16, 32, or 64-bit mode). IRET only uses the value
593 * when EFLAGS.NT is set, which is illegal in 64-bit mode, which is
594 * why passing a 32-bit value isn't broken. To avoid using this where
595 * we shouldn't, return -1 in 64-bit mode so that execution goes into
599 return tcg_constant_i32(-1);
601 if (tb_cflags(s
->base
.tb
) & CF_PCREL
) {
602 TCGv_i32 ret
= tcg_temp_new_i32();
603 tcg_gen_trunc_tl_i32(ret
, cpu_eip
);
604 tcg_gen_addi_i32(ret
, ret
, s
->pc
- s
->pc_save
);
607 return tcg_constant_i32(s
->pc
- s
->cs_base
);
611 static TCGv
eip_next_tl(DisasContext
*s
)
613 assert(s
->pc_save
!= -1);
614 if (tb_cflags(s
->base
.tb
) & CF_PCREL
) {
615 TCGv ret
= tcg_temp_new();
616 tcg_gen_addi_tl(ret
, cpu_eip
, s
->pc
- s
->pc_save
);
618 } else if (CODE64(s
)) {
619 return tcg_constant_tl(s
->pc
);
621 return tcg_constant_tl((uint32_t)(s
->pc
- s
->cs_base
));
625 static TCGv
eip_cur_tl(DisasContext
*s
)
627 assert(s
->pc_save
!= -1);
628 if (tb_cflags(s
->base
.tb
) & CF_PCREL
) {
629 TCGv ret
= tcg_temp_new();
630 tcg_gen_addi_tl(ret
, cpu_eip
, s
->base
.pc_next
- s
->pc_save
);
632 } else if (CODE64(s
)) {
633 return tcg_constant_tl(s
->base
.pc_next
);
635 return tcg_constant_tl((uint32_t)(s
->base
.pc_next
- s
->cs_base
));
639 /* Compute SEG:REG into DEST. SEG is selected from the override segment
640 (OVR_SEG) and the default segment (DEF_SEG). OVR_SEG may be -1 to
641 indicate no override. */
642 static void gen_lea_v_seg_dest(DisasContext
*s
, MemOp aflag
, TCGv dest
, TCGv a0
,
643 int def_seg
, int ovr_seg
)
649 tcg_gen_mov_tl(dest
, a0
);
656 if (ovr_seg
< 0 && ADDSEG(s
)) {
660 tcg_gen_ext32u_tl(dest
, a0
);
666 tcg_gen_ext16u_tl(dest
, a0
);
677 g_assert_not_reached();
681 TCGv seg
= cpu_seg_base
[ovr_seg
];
683 if (aflag
== MO_64
) {
684 tcg_gen_add_tl(dest
, a0
, seg
);
685 } else if (CODE64(s
)) {
686 tcg_gen_ext32u_tl(dest
, a0
);
687 tcg_gen_add_tl(dest
, dest
, seg
);
689 tcg_gen_add_tl(dest
, a0
, seg
);
690 tcg_gen_ext32u_tl(dest
, dest
);
695 static void gen_lea_v_seg(DisasContext
*s
, MemOp aflag
, TCGv a0
,
696 int def_seg
, int ovr_seg
)
698 gen_lea_v_seg_dest(s
, aflag
, s
->A0
, a0
, def_seg
, ovr_seg
);
701 static inline void gen_string_movl_A0_ESI(DisasContext
*s
)
703 gen_lea_v_seg(s
, s
->aflag
, cpu_regs
[R_ESI
], R_DS
, s
->override
);
706 static inline void gen_string_movl_A0_EDI(DisasContext
*s
)
708 gen_lea_v_seg(s
, s
->aflag
, cpu_regs
[R_EDI
], R_ES
, -1);
711 static inline TCGv
gen_compute_Dshift(DisasContext
*s
, MemOp ot
)
713 TCGv dshift
= tcg_temp_new();
714 tcg_gen_ld32s_tl(dshift
, tcg_env
, offsetof(CPUX86State
, df
));
715 tcg_gen_shli_tl(dshift
, dshift
, ot
);
719 static TCGv
gen_ext_tl(TCGv dst
, TCGv src
, MemOp size
, bool sign
)
725 dst
= tcg_temp_new();
727 tcg_gen_ext_tl(dst
, src
, size
| (sign
? MO_SIGN
: 0));
731 static void gen_extu(MemOp ot
, TCGv reg
)
733 gen_ext_tl(reg
, reg
, ot
, false);
736 static void gen_exts(MemOp ot
, TCGv reg
)
738 gen_ext_tl(reg
, reg
, ot
, true);
741 static void gen_op_j_ecx(DisasContext
*s
, TCGCond cond
, TCGLabel
*label1
)
743 TCGv tmp
= gen_ext_tl(NULL
, cpu_regs
[R_ECX
], s
->aflag
, false);
745 tcg_gen_brcondi_tl(cond
, tmp
, 0, label1
);
748 static inline void gen_op_jz_ecx(DisasContext
*s
, TCGLabel
*label1
)
750 gen_op_j_ecx(s
, TCG_COND_EQ
, label1
);
753 static inline void gen_op_jnz_ecx(DisasContext
*s
, TCGLabel
*label1
)
755 gen_op_j_ecx(s
, TCG_COND_NE
, label1
);
758 static void gen_helper_in_func(MemOp ot
, TCGv v
, TCGv_i32 n
)
762 gen_helper_inb(v
, tcg_env
, n
);
765 gen_helper_inw(v
, tcg_env
, n
);
768 gen_helper_inl(v
, tcg_env
, n
);
771 g_assert_not_reached();
775 static void gen_helper_out_func(MemOp ot
, TCGv_i32 v
, TCGv_i32 n
)
779 gen_helper_outb(tcg_env
, v
, n
);
782 gen_helper_outw(tcg_env
, v
, n
);
785 gen_helper_outl(tcg_env
, v
, n
);
788 g_assert_not_reached();
793 * Validate that access to [port, port + 1<<ot) is allowed.
794 * Raise #GP, or VMM exit if not.
796 static bool gen_check_io(DisasContext
*s
, MemOp ot
, TCGv_i32 port
,
799 #ifdef CONFIG_USER_ONLY
801 * We do not implement the ioperm(2) syscall, so the TSS check
804 gen_exception_gpf(s
);
807 if (PE(s
) && (CPL(s
) > IOPL(s
) || VM86(s
))) {
808 gen_helper_check_io(tcg_env
, port
, tcg_constant_i32(1 << ot
));
812 gen_update_eip_cur(s
);
813 if (s
->prefix
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
814 svm_flags
|= SVM_IOIO_REP_MASK
;
816 svm_flags
|= 1 << (SVM_IOIO_SIZE_SHIFT
+ ot
);
817 gen_helper_svm_check_io(tcg_env
, port
,
818 tcg_constant_i32(svm_flags
),
819 cur_insn_len_i32(s
));
825 static void gen_movs(DisasContext
*s
, MemOp ot
)
829 gen_string_movl_A0_ESI(s
);
830 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
831 gen_string_movl_A0_EDI(s
);
832 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
834 dshift
= gen_compute_Dshift(s
, ot
);
835 gen_op_add_reg(s
, s
->aflag
, R_ESI
, dshift
);
836 gen_op_add_reg(s
, s
->aflag
, R_EDI
, dshift
);
839 static void gen_op_update1_cc(DisasContext
*s
)
841 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
844 static void gen_op_update2_cc(DisasContext
*s
)
846 tcg_gen_mov_tl(cpu_cc_src
, s
->T1
);
847 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
850 static void gen_op_update3_cc(DisasContext
*s
, TCGv reg
)
852 tcg_gen_mov_tl(cpu_cc_src2
, reg
);
853 tcg_gen_mov_tl(cpu_cc_src
, s
->T1
);
854 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
857 static inline void gen_op_testl_T0_T1_cc(DisasContext
*s
)
859 tcg_gen_and_tl(cpu_cc_dst
, s
->T0
, s
->T1
);
862 static void gen_op_update_neg_cc(DisasContext
*s
)
864 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
865 tcg_gen_neg_tl(cpu_cc_src
, s
->T0
);
866 tcg_gen_movi_tl(s
->cc_srcT
, 0);
869 /* compute all eflags to reg */
870 static void gen_mov_eflags(DisasContext
*s
, TCGv reg
)
872 TCGv dst
, src1
, src2
;
876 if (s
->cc_op
== CC_OP_EFLAGS
) {
877 tcg_gen_mov_tl(reg
, cpu_cc_src
);
880 if (s
->cc_op
== CC_OP_CLR
) {
881 tcg_gen_movi_tl(reg
, CC_Z
| CC_P
);
889 /* Take care to not read values that are not live. */
890 live
= cc_op_live
[s
->cc_op
] & ~USES_CC_SRCT
;
891 dead
= live
^ (USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
);
893 TCGv zero
= tcg_constant_tl(0);
894 if (dead
& USES_CC_DST
) {
897 if (dead
& USES_CC_SRC
) {
900 if (dead
& USES_CC_SRC2
) {
905 if (s
->cc_op
!= CC_OP_DYNAMIC
) {
906 cc_op
= tcg_constant_i32(s
->cc_op
);
910 gen_helper_cc_compute_all(reg
, dst
, src1
, src2
, cc_op
);
913 /* compute all eflags to cc_src */
914 static void gen_compute_eflags(DisasContext
*s
)
916 gen_mov_eflags(s
, cpu_cc_src
);
917 set_cc_op(s
, CC_OP_EFLAGS
);
920 typedef struct CCPrepare
{
930 /* compute eflags.C to reg */
931 static CCPrepare
gen_prepare_eflags_c(DisasContext
*s
, TCGv reg
)
937 case CC_OP_SUBB
... CC_OP_SUBQ
:
938 /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
939 size
= s
->cc_op
- CC_OP_SUBB
;
940 t1
= gen_ext_tl(s
->tmp0
, cpu_cc_src
, size
, false);
941 /* If no temporary was used, be careful not to alias t1 and t0. */
942 t0
= t1
== cpu_cc_src
? s
->tmp0
: reg
;
943 tcg_gen_mov_tl(t0
, s
->cc_srcT
);
947 case CC_OP_ADDB
... CC_OP_ADDQ
:
948 /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
949 size
= s
->cc_op
- CC_OP_ADDB
;
950 t1
= gen_ext_tl(s
->tmp0
, cpu_cc_src
, size
, false);
951 t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, false);
953 return (CCPrepare
) { .cond
= TCG_COND_LTU
, .reg
= t0
,
954 .reg2
= t1
, .mask
= -1, .use_reg2
= true };
956 case CC_OP_LOGICB
... CC_OP_LOGICQ
:
959 return (CCPrepare
) { .cond
= TCG_COND_NEVER
, .mask
= -1 };
961 case CC_OP_INCB
... CC_OP_INCQ
:
962 case CC_OP_DECB
... CC_OP_DECQ
:
963 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
964 .mask
= -1, .no_setcond
= true };
966 case CC_OP_SHLB
... CC_OP_SHLQ
:
967 /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
968 size
= s
->cc_op
- CC_OP_SHLB
;
969 shift
= (8 << size
) - 1;
970 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
971 .mask
= (target_ulong
)1 << shift
};
973 case CC_OP_MULB
... CC_OP_MULQ
:
974 return (CCPrepare
) { .cond
= TCG_COND_NE
,
975 .reg
= cpu_cc_src
, .mask
= -1 };
977 case CC_OP_BMILGB
... CC_OP_BMILGQ
:
978 size
= s
->cc_op
- CC_OP_BMILGB
;
979 t0
= gen_ext_tl(reg
, cpu_cc_src
, size
, false);
980 return (CCPrepare
) { .cond
= TCG_COND_EQ
, .reg
= t0
, .mask
= -1 };
984 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_dst
,
985 .mask
= -1, .no_setcond
= true };
988 case CC_OP_SARB
... CC_OP_SARQ
:
990 return (CCPrepare
) { .cond
= TCG_COND_NE
,
991 .reg
= cpu_cc_src
, .mask
= CC_C
};
994 /* The need to compute only C from CC_OP_DYNAMIC is important
995 in efficiently implementing e.g. INC at the start of a TB. */
997 gen_helper_cc_compute_c(reg
, cpu_cc_dst
, cpu_cc_src
,
998 cpu_cc_src2
, cpu_cc_op
);
999 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
1000 .mask
= -1, .no_setcond
= true };
1004 /* compute eflags.P to reg */
1005 static CCPrepare
gen_prepare_eflags_p(DisasContext
*s
, TCGv reg
)
1007 gen_compute_eflags(s
);
1008 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1012 /* compute eflags.S to reg */
1013 static CCPrepare
gen_prepare_eflags_s(DisasContext
*s
, TCGv reg
)
1017 gen_compute_eflags(s
);
1023 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1027 return (CCPrepare
) { .cond
= TCG_COND_NEVER
, .mask
= -1 };
1030 MemOp size
= (s
->cc_op
- CC_OP_ADDB
) & 3;
1031 TCGv t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, true);
1032 return (CCPrepare
) { .cond
= TCG_COND_LT
, .reg
= t0
, .mask
= -1 };
1037 /* compute eflags.O to reg */
1038 static CCPrepare
gen_prepare_eflags_o(DisasContext
*s
, TCGv reg
)
1043 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src2
,
1044 .mask
= -1, .no_setcond
= true };
1047 return (CCPrepare
) { .cond
= TCG_COND_NEVER
, .mask
= -1 };
1048 case CC_OP_MULB
... CC_OP_MULQ
:
1049 return (CCPrepare
) { .cond
= TCG_COND_NE
,
1050 .reg
= cpu_cc_src
, .mask
= -1 };
1052 gen_compute_eflags(s
);
1053 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1058 /* compute eflags.Z to reg */
1059 static CCPrepare
gen_prepare_eflags_z(DisasContext
*s
, TCGv reg
)
1063 gen_compute_eflags(s
);
1069 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1072 return (CCPrepare
) { .cond
= TCG_COND_ALWAYS
, .mask
= -1 };
1074 return (CCPrepare
) { .cond
= TCG_COND_EQ
, .reg
= cpu_cc_src
,
1078 MemOp size
= (s
->cc_op
- CC_OP_ADDB
) & 3;
1079 TCGv t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, false);
1080 return (CCPrepare
) { .cond
= TCG_COND_EQ
, .reg
= t0
, .mask
= -1 };
1085 /* perform a conditional store into register 'reg' according to jump opcode
1086 value 'b'. In the fast case, T0 is guaranteed not to be used. */
1087 static CCPrepare
gen_prepare_cc(DisasContext
*s
, int b
, TCGv reg
)
1089 int inv
, jcc_op
, cond
;
1095 jcc_op
= (b
>> 1) & 7;
1098 case CC_OP_SUBB
... CC_OP_SUBQ
:
1099 /* We optimize relational operators for the cmp/jcc case. */
1100 size
= s
->cc_op
- CC_OP_SUBB
;
1103 tcg_gen_mov_tl(s
->tmp4
, s
->cc_srcT
);
1104 gen_extu(size
, s
->tmp4
);
1105 t0
= gen_ext_tl(s
->tmp0
, cpu_cc_src
, size
, false);
1106 cc
= (CCPrepare
) { .cond
= TCG_COND_LEU
, .reg
= s
->tmp4
,
1107 .reg2
= t0
, .mask
= -1, .use_reg2
= true };
1116 tcg_gen_mov_tl(s
->tmp4
, s
->cc_srcT
);
1117 gen_exts(size
, s
->tmp4
);
1118 t0
= gen_ext_tl(s
->tmp0
, cpu_cc_src
, size
, true);
1119 cc
= (CCPrepare
) { .cond
= cond
, .reg
= s
->tmp4
,
1120 .reg2
= t0
, .mask
= -1, .use_reg2
= true };
1130 /* This actually generates good code for JC, JZ and JS. */
1133 cc
= gen_prepare_eflags_o(s
, reg
);
1136 cc
= gen_prepare_eflags_c(s
, reg
);
1139 cc
= gen_prepare_eflags_z(s
, reg
);
1142 gen_compute_eflags(s
);
1143 cc
= (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1144 .mask
= CC_Z
| CC_C
};
1147 cc
= gen_prepare_eflags_s(s
, reg
);
1150 cc
= gen_prepare_eflags_p(s
, reg
);
1153 gen_compute_eflags(s
);
1154 if (reg
== cpu_cc_src
) {
1157 tcg_gen_addi_tl(reg
, cpu_cc_src
, CC_O
- CC_S
);
1158 cc
= (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
1163 gen_compute_eflags(s
);
1164 if (reg
== cpu_cc_src
) {
1167 tcg_gen_addi_tl(reg
, cpu_cc_src
, CC_O
- CC_S
);
1168 cc
= (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
1169 .mask
= CC_O
| CC_Z
};
1176 cc
.cond
= tcg_invert_cond(cc
.cond
);
1181 static void gen_setcc1(DisasContext
*s
, int b
, TCGv reg
)
1183 CCPrepare cc
= gen_prepare_cc(s
, b
, reg
);
1185 if (cc
.no_setcond
) {
1186 if (cc
.cond
== TCG_COND_EQ
) {
1187 tcg_gen_xori_tl(reg
, cc
.reg
, 1);
1189 tcg_gen_mov_tl(reg
, cc
.reg
);
1194 if (cc
.cond
== TCG_COND_NE
&& !cc
.use_reg2
&& cc
.imm
== 0 &&
1195 cc
.mask
!= 0 && (cc
.mask
& (cc
.mask
- 1)) == 0) {
1196 tcg_gen_shri_tl(reg
, cc
.reg
, ctztl(cc
.mask
));
1197 tcg_gen_andi_tl(reg
, reg
, 1);
1200 if (cc
.mask
!= -1) {
1201 tcg_gen_andi_tl(reg
, cc
.reg
, cc
.mask
);
1205 tcg_gen_setcond_tl(cc
.cond
, reg
, cc
.reg
, cc
.reg2
);
1207 tcg_gen_setcondi_tl(cc
.cond
, reg
, cc
.reg
, cc
.imm
);
1211 static inline void gen_compute_eflags_c(DisasContext
*s
, TCGv reg
)
1213 gen_setcc1(s
, JCC_B
<< 1, reg
);
1216 /* generate a conditional jump to label 'l1' according to jump opcode
1217 value 'b'. In the fast case, T0 is guaranteed not to be used. */
1218 static inline void gen_jcc1_noeob(DisasContext
*s
, int b
, TCGLabel
*l1
)
1220 CCPrepare cc
= gen_prepare_cc(s
, b
, s
->T0
);
1222 if (cc
.mask
!= -1) {
1223 tcg_gen_andi_tl(s
->T0
, cc
.reg
, cc
.mask
);
1227 tcg_gen_brcond_tl(cc
.cond
, cc
.reg
, cc
.reg2
, l1
);
1229 tcg_gen_brcondi_tl(cc
.cond
, cc
.reg
, cc
.imm
, l1
);
1233 /* Generate a conditional jump to label 'l1' according to jump opcode
1234 value 'b'. In the fast case, T0 is guaranteed not to be used.
1235 A translation block must end soon. */
1236 static inline void gen_jcc1(DisasContext
*s
, int b
, TCGLabel
*l1
)
1238 CCPrepare cc
= gen_prepare_cc(s
, b
, s
->T0
);
1240 gen_update_cc_op(s
);
1241 if (cc
.mask
!= -1) {
1242 tcg_gen_andi_tl(s
->T0
, cc
.reg
, cc
.mask
);
1245 set_cc_op(s
, CC_OP_DYNAMIC
);
1247 tcg_gen_brcond_tl(cc
.cond
, cc
.reg
, cc
.reg2
, l1
);
1249 tcg_gen_brcondi_tl(cc
.cond
, cc
.reg
, cc
.imm
, l1
);
1253 /* XXX: does not work with gdbstub "ice" single step - not a
1255 static TCGLabel
*gen_jz_ecx_string(DisasContext
*s
)
1257 TCGLabel
*l1
= gen_new_label();
1258 TCGLabel
*l2
= gen_new_label();
1259 gen_op_jnz_ecx(s
, l1
);
1261 gen_jmp_rel_csize(s
, 0, 1);
1266 static void gen_stos(DisasContext
*s
, MemOp ot
)
1268 gen_string_movl_A0_EDI(s
);
1269 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
1270 gen_op_add_reg(s
, s
->aflag
, R_EDI
, gen_compute_Dshift(s
, ot
));
1273 static void gen_lods(DisasContext
*s
, MemOp ot
)
1275 gen_string_movl_A0_ESI(s
);
1276 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1277 gen_op_mov_reg_v(s
, ot
, R_EAX
, s
->T0
);
1278 gen_op_add_reg(s
, s
->aflag
, R_ESI
, gen_compute_Dshift(s
, ot
));
1281 static void gen_scas(DisasContext
*s
, MemOp ot
)
1283 gen_string_movl_A0_EDI(s
);
1284 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
1285 tcg_gen_mov_tl(cpu_cc_src
, s
->T1
);
1286 tcg_gen_mov_tl(s
->cc_srcT
, s
->T0
);
1287 tcg_gen_sub_tl(cpu_cc_dst
, s
->T0
, s
->T1
);
1288 set_cc_op(s
, CC_OP_SUBB
+ ot
);
1290 gen_op_add_reg(s
, s
->aflag
, R_EDI
, gen_compute_Dshift(s
, ot
));
1293 static void gen_cmps(DisasContext
*s
, MemOp ot
)
1297 gen_string_movl_A0_EDI(s
);
1298 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
1299 gen_string_movl_A0_ESI(s
);
1300 gen_op(s
, OP_CMPL
, ot
, OR_TMP0
);
1302 dshift
= gen_compute_Dshift(s
, ot
);
1303 gen_op_add_reg(s
, s
->aflag
, R_ESI
, dshift
);
1304 gen_op_add_reg(s
, s
->aflag
, R_EDI
, dshift
);
1307 static void gen_bpt_io(DisasContext
*s
, TCGv_i32 t_port
, int ot
)
1309 if (s
->flags
& HF_IOBPT_MASK
) {
1310 #ifdef CONFIG_USER_ONLY
1311 /* user-mode cpu should not be in IOBPT mode */
1312 g_assert_not_reached();
1314 TCGv_i32 t_size
= tcg_constant_i32(1 << ot
);
1315 TCGv t_next
= eip_next_tl(s
);
1316 gen_helper_bpt_io(tcg_env
, t_port
, t_size
, t_next
);
1317 #endif /* CONFIG_USER_ONLY */
1321 static void gen_ins(DisasContext
*s
, MemOp ot
)
1323 gen_string_movl_A0_EDI(s
);
1324 /* Note: we must do this dummy write first to be restartable in
1325 case of page fault. */
1326 tcg_gen_movi_tl(s
->T0
, 0);
1327 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
1328 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_EDX
]);
1329 tcg_gen_andi_i32(s
->tmp2_i32
, s
->tmp2_i32
, 0xffff);
1330 gen_helper_in_func(ot
, s
->T0
, s
->tmp2_i32
);
1331 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
1332 gen_op_add_reg(s
, s
->aflag
, R_EDI
, gen_compute_Dshift(s
, ot
));
1333 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
1336 static void gen_outs(DisasContext
*s
, MemOp ot
)
1338 gen_string_movl_A0_ESI(s
);
1339 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1341 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_EDX
]);
1342 tcg_gen_andi_i32(s
->tmp2_i32
, s
->tmp2_i32
, 0xffff);
1343 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T0
);
1344 gen_helper_out_func(ot
, s
->tmp2_i32
, s
->tmp3_i32
);
1345 gen_op_add_reg(s
, s
->aflag
, R_ESI
, gen_compute_Dshift(s
, ot
));
1346 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
1349 /* Generate jumps to current or next instruction */
1350 static void gen_repz(DisasContext
*s
, MemOp ot
,
1351 void (*fn
)(DisasContext
*s
, MemOp ot
))
1354 gen_update_cc_op(s
);
1355 l2
= gen_jz_ecx_string(s
);
1357 gen_op_add_reg_im(s
, s
->aflag
, R_ECX
, -1);
1359 * A loop would cause two single step exceptions if ECX = 1
1360 * before rep string_insn
1363 gen_op_jz_ecx(s
, l2
);
1365 gen_jmp_rel_csize(s
, -cur_insn_len(s
), 0);
1368 #define GEN_REPZ(op) \
1369 static inline void gen_repz_ ## op(DisasContext *s, MemOp ot) \
1370 { gen_repz(s, ot, gen_##op); }
1372 static void gen_repz2(DisasContext
*s
, MemOp ot
, int nz
,
1373 void (*fn
)(DisasContext
*s
, MemOp ot
))
1376 gen_update_cc_op(s
);
1377 l2
= gen_jz_ecx_string(s
);
1379 gen_op_add_reg_im(s
, s
->aflag
, R_ECX
, -1);
1380 gen_update_cc_op(s
);
1381 gen_jcc1(s
, (JCC_Z
<< 1) | (nz
^ 1), l2
);
1383 gen_op_jz_ecx(s
, l2
);
1385 gen_jmp_rel_csize(s
, -cur_insn_len(s
), 0);
1388 #define GEN_REPZ2(op) \
1389 static inline void gen_repz_ ## op(DisasContext *s, MemOp ot, int nz) \
1390 { gen_repz2(s, ot, nz, gen_##op); }
1400 static void gen_helper_fp_arith_ST0_FT0(int op
)
1404 gen_helper_fadd_ST0_FT0(tcg_env
);
1407 gen_helper_fmul_ST0_FT0(tcg_env
);
1410 gen_helper_fcom_ST0_FT0(tcg_env
);
1413 gen_helper_fcom_ST0_FT0(tcg_env
);
1416 gen_helper_fsub_ST0_FT0(tcg_env
);
1419 gen_helper_fsubr_ST0_FT0(tcg_env
);
1422 gen_helper_fdiv_ST0_FT0(tcg_env
);
1425 gen_helper_fdivr_ST0_FT0(tcg_env
);
1430 /* NOTE the exception in "r" op ordering */
1431 static void gen_helper_fp_arith_STN_ST0(int op
, int opreg
)
1433 TCGv_i32 tmp
= tcg_constant_i32(opreg
);
1436 gen_helper_fadd_STN_ST0(tcg_env
, tmp
);
1439 gen_helper_fmul_STN_ST0(tcg_env
, tmp
);
1442 gen_helper_fsubr_STN_ST0(tcg_env
, tmp
);
1445 gen_helper_fsub_STN_ST0(tcg_env
, tmp
);
1448 gen_helper_fdivr_STN_ST0(tcg_env
, tmp
);
1451 gen_helper_fdiv_STN_ST0(tcg_env
, tmp
);
1456 static void gen_exception(DisasContext
*s
, int trapno
)
1458 gen_update_cc_op(s
);
1459 gen_update_eip_cur(s
);
1460 gen_helper_raise_exception(tcg_env
, tcg_constant_i32(trapno
));
1461 s
->base
.is_jmp
= DISAS_NORETURN
;
1464 /* Generate #UD for the current instruction. The assumption here is that
1465 the instruction is known, but it isn't allowed in the current cpu mode. */
1466 static void gen_illegal_opcode(DisasContext
*s
)
1468 gen_exception(s
, EXCP06_ILLOP
);
1471 /* Generate #GP for the current instruction. */
1472 static void gen_exception_gpf(DisasContext
*s
)
1474 gen_exception(s
, EXCP0D_GPF
);
1477 /* Check for cpl == 0; if not, raise #GP and return false. */
1478 static bool check_cpl0(DisasContext
*s
)
1483 gen_exception_gpf(s
);
1487 /* If vm86, check for iopl == 3; if not, raise #GP and return false. */
1488 static bool check_vm86_iopl(DisasContext
*s
)
1490 if (!VM86(s
) || IOPL(s
) == 3) {
1493 gen_exception_gpf(s
);
1497 /* Check for iopl allowing access; if not, raise #GP and return false. */
1498 static bool check_iopl(DisasContext
*s
)
1500 if (VM86(s
) ? IOPL(s
) == 3 : CPL(s
) <= IOPL(s
)) {
1503 gen_exception_gpf(s
);
1507 /* if d == OR_TMP0, it means memory operand (address in A0) */
1508 static void gen_op(DisasContext
*s1
, int op
, MemOp ot
, int d
)
1511 if (s1
->prefix
& PREFIX_LOCK
) {
1512 /* Lock prefix when destination is not memory. */
1513 gen_illegal_opcode(s1
);
1516 gen_op_mov_v_reg(s1
, ot
, s1
->T0
, d
);
1517 } else if (!(s1
->prefix
& PREFIX_LOCK
)) {
1518 gen_op_ld_v(s1
, ot
, s1
->T0
, s1
->A0
);
1522 gen_compute_eflags_c(s1
, s1
->tmp4
);
1523 if (s1
->prefix
& PREFIX_LOCK
) {
1524 tcg_gen_add_tl(s1
->T0
, s1
->tmp4
, s1
->T1
);
1525 tcg_gen_atomic_add_fetch_tl(s1
->T0
, s1
->A0
, s1
->T0
,
1526 s1
->mem_index
, ot
| MO_LE
);
1528 tcg_gen_add_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1529 tcg_gen_add_tl(s1
->T0
, s1
->T0
, s1
->tmp4
);
1530 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1532 gen_op_update3_cc(s1
, s1
->tmp4
);
1533 set_cc_op(s1
, CC_OP_ADCB
+ ot
);
1536 gen_compute_eflags_c(s1
, s1
->tmp4
);
1537 if (s1
->prefix
& PREFIX_LOCK
) {
1538 tcg_gen_add_tl(s1
->T0
, s1
->T1
, s1
->tmp4
);
1539 tcg_gen_neg_tl(s1
->T0
, s1
->T0
);
1540 tcg_gen_atomic_add_fetch_tl(s1
->T0
, s1
->A0
, s1
->T0
,
1541 s1
->mem_index
, ot
| MO_LE
);
1543 tcg_gen_sub_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1544 tcg_gen_sub_tl(s1
->T0
, s1
->T0
, s1
->tmp4
);
1545 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1547 gen_op_update3_cc(s1
, s1
->tmp4
);
1548 set_cc_op(s1
, CC_OP_SBBB
+ ot
);
1551 if (s1
->prefix
& PREFIX_LOCK
) {
1552 tcg_gen_atomic_add_fetch_tl(s1
->T0
, s1
->A0
, s1
->T1
,
1553 s1
->mem_index
, ot
| MO_LE
);
1555 tcg_gen_add_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1556 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1558 gen_op_update2_cc(s1
);
1559 set_cc_op(s1
, CC_OP_ADDB
+ ot
);
1562 if (s1
->prefix
& PREFIX_LOCK
) {
1563 tcg_gen_neg_tl(s1
->T0
, s1
->T1
);
1564 tcg_gen_atomic_fetch_add_tl(s1
->cc_srcT
, s1
->A0
, s1
->T0
,
1565 s1
->mem_index
, ot
| MO_LE
);
1566 tcg_gen_sub_tl(s1
->T0
, s1
->cc_srcT
, s1
->T1
);
1568 tcg_gen_mov_tl(s1
->cc_srcT
, s1
->T0
);
1569 tcg_gen_sub_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1570 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1572 gen_op_update2_cc(s1
);
1573 set_cc_op(s1
, CC_OP_SUBB
+ ot
);
1577 if (s1
->prefix
& PREFIX_LOCK
) {
1578 tcg_gen_atomic_and_fetch_tl(s1
->T0
, s1
->A0
, s1
->T1
,
1579 s1
->mem_index
, ot
| MO_LE
);
1581 tcg_gen_and_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1582 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1584 gen_op_update1_cc(s1
);
1585 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1588 if (s1
->prefix
& PREFIX_LOCK
) {
1589 tcg_gen_atomic_or_fetch_tl(s1
->T0
, s1
->A0
, s1
->T1
,
1590 s1
->mem_index
, ot
| MO_LE
);
1592 tcg_gen_or_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1593 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1595 gen_op_update1_cc(s1
);
1596 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1599 if (s1
->prefix
& PREFIX_LOCK
) {
1600 tcg_gen_atomic_xor_fetch_tl(s1
->T0
, s1
->A0
, s1
->T1
,
1601 s1
->mem_index
, ot
| MO_LE
);
1603 tcg_gen_xor_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1604 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1606 gen_op_update1_cc(s1
);
1607 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1610 tcg_gen_mov_tl(cpu_cc_src
, s1
->T1
);
1611 tcg_gen_mov_tl(s1
->cc_srcT
, s1
->T0
);
1612 tcg_gen_sub_tl(cpu_cc_dst
, s1
->T0
, s1
->T1
);
1613 set_cc_op(s1
, CC_OP_SUBB
+ ot
);
1618 /* if d == OR_TMP0, it means memory operand (address in A0) */
1619 static void gen_inc(DisasContext
*s1
, MemOp ot
, int d
, int c
)
1621 if (s1
->prefix
& PREFIX_LOCK
) {
1623 /* Lock prefix when destination is not memory */
1624 gen_illegal_opcode(s1
);
1627 tcg_gen_movi_tl(s1
->T0
, c
> 0 ? 1 : -1);
1628 tcg_gen_atomic_add_fetch_tl(s1
->T0
, s1
->A0
, s1
->T0
,
1629 s1
->mem_index
, ot
| MO_LE
);
1632 gen_op_mov_v_reg(s1
, ot
, s1
->T0
, d
);
1634 gen_op_ld_v(s1
, ot
, s1
->T0
, s1
->A0
);
1636 tcg_gen_addi_tl(s1
->T0
, s1
->T0
, (c
> 0 ? 1 : -1));
1637 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1640 gen_compute_eflags_c(s1
, cpu_cc_src
);
1641 tcg_gen_mov_tl(cpu_cc_dst
, s1
->T0
);
1642 set_cc_op(s1
, (c
> 0 ? CC_OP_INCB
: CC_OP_DECB
) + ot
);
1645 static void gen_shift_flags(DisasContext
*s
, MemOp ot
, TCGv result
,
1646 TCGv shm1
, TCGv count
, bool is_right
)
1648 TCGv_i32 z32
, s32
, oldop
;
1651 /* Store the results into the CC variables. If we know that the
1652 variable must be dead, store unconditionally. Otherwise we'll
1653 need to not disrupt the current contents. */
1654 z_tl
= tcg_constant_tl(0);
1655 if (cc_op_live
[s
->cc_op
] & USES_CC_DST
) {
1656 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_cc_dst
, count
, z_tl
,
1657 result
, cpu_cc_dst
);
1659 tcg_gen_mov_tl(cpu_cc_dst
, result
);
1661 if (cc_op_live
[s
->cc_op
] & USES_CC_SRC
) {
1662 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_cc_src
, count
, z_tl
,
1665 tcg_gen_mov_tl(cpu_cc_src
, shm1
);
1668 /* Get the two potential CC_OP values into temporaries. */
1669 tcg_gen_movi_i32(s
->tmp2_i32
, (is_right
? CC_OP_SARB
: CC_OP_SHLB
) + ot
);
1670 if (s
->cc_op
== CC_OP_DYNAMIC
) {
1673 tcg_gen_movi_i32(s
->tmp3_i32
, s
->cc_op
);
1674 oldop
= s
->tmp3_i32
;
1677 /* Conditionally store the CC_OP value. */
1678 z32
= tcg_constant_i32(0);
1679 s32
= tcg_temp_new_i32();
1680 tcg_gen_trunc_tl_i32(s32
, count
);
1681 tcg_gen_movcond_i32(TCG_COND_NE
, cpu_cc_op
, s32
, z32
, s
->tmp2_i32
, oldop
);
1683 /* The CC_OP value is no longer predictable. */
1684 set_cc_op(s
, CC_OP_DYNAMIC
);
1687 static void gen_shift_rm_T1(DisasContext
*s
, MemOp ot
, int op1
,
1688 int is_right
, int is_arith
)
1690 target_ulong mask
= (ot
== MO_64
? 0x3f : 0x1f);
1693 if (op1
== OR_TMP0
) {
1694 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1696 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1699 tcg_gen_andi_tl(s
->T1
, s
->T1
, mask
);
1700 tcg_gen_subi_tl(s
->tmp0
, s
->T1
, 1);
1704 gen_exts(ot
, s
->T0
);
1705 tcg_gen_sar_tl(s
->tmp0
, s
->T0
, s
->tmp0
);
1706 tcg_gen_sar_tl(s
->T0
, s
->T0
, s
->T1
);
1708 gen_extu(ot
, s
->T0
);
1709 tcg_gen_shr_tl(s
->tmp0
, s
->T0
, s
->tmp0
);
1710 tcg_gen_shr_tl(s
->T0
, s
->T0
, s
->T1
);
1713 tcg_gen_shl_tl(s
->tmp0
, s
->T0
, s
->tmp0
);
1714 tcg_gen_shl_tl(s
->T0
, s
->T0
, s
->T1
);
1718 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1720 gen_shift_flags(s
, ot
, s
->T0
, s
->tmp0
, s
->T1
, is_right
);
1723 static void gen_shift_rm_im(DisasContext
*s
, MemOp ot
, int op1
, int op2
,
1724 int is_right
, int is_arith
)
1726 int mask
= (ot
== MO_64
? 0x3f : 0x1f);
1730 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1732 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1738 gen_exts(ot
, s
->T0
);
1739 tcg_gen_sari_tl(s
->tmp4
, s
->T0
, op2
- 1);
1740 tcg_gen_sari_tl(s
->T0
, s
->T0
, op2
);
1742 gen_extu(ot
, s
->T0
);
1743 tcg_gen_shri_tl(s
->tmp4
, s
->T0
, op2
- 1);
1744 tcg_gen_shri_tl(s
->T0
, s
->T0
, op2
);
1747 tcg_gen_shli_tl(s
->tmp4
, s
->T0
, op2
- 1);
1748 tcg_gen_shli_tl(s
->T0
, s
->T0
, op2
);
1753 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1755 /* update eflags if non zero shift */
1757 tcg_gen_mov_tl(cpu_cc_src
, s
->tmp4
);
1758 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
1759 set_cc_op(s
, (is_right
? CC_OP_SARB
: CC_OP_SHLB
) + ot
);
1763 static void gen_rot_rm_T1(DisasContext
*s
, MemOp ot
, int op1
, int is_right
)
1765 target_ulong mask
= (ot
== MO_64
? 0x3f : 0x1f);
1769 if (op1
== OR_TMP0
) {
1770 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1772 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1775 tcg_gen_andi_tl(s
->T1
, s
->T1
, mask
);
1779 /* Replicate the 8-bit input so that a 32-bit rotate works. */
1780 tcg_gen_ext8u_tl(s
->T0
, s
->T0
);
1781 tcg_gen_muli_tl(s
->T0
, s
->T0
, 0x01010101);
1784 /* Replicate the 16-bit input so that a 32-bit rotate works. */
1785 tcg_gen_deposit_tl(s
->T0
, s
->T0
, s
->T0
, 16, 16);
1788 #ifdef TARGET_X86_64
1790 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
1791 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T1
);
1793 tcg_gen_rotr_i32(s
->tmp2_i32
, s
->tmp2_i32
, s
->tmp3_i32
);
1795 tcg_gen_rotl_i32(s
->tmp2_i32
, s
->tmp2_i32
, s
->tmp3_i32
);
1797 tcg_gen_extu_i32_tl(s
->T0
, s
->tmp2_i32
);
1802 tcg_gen_rotr_tl(s
->T0
, s
->T0
, s
->T1
);
1804 tcg_gen_rotl_tl(s
->T0
, s
->T0
, s
->T1
);
1810 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1812 /* We'll need the flags computed into CC_SRC. */
1813 gen_compute_eflags(s
);
1815 /* The value that was "rotated out" is now present at the other end
1816 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1817 since we've computed the flags into CC_SRC, these variables are
1820 tcg_gen_shri_tl(cpu_cc_src2
, s
->T0
, mask
- 1);
1821 tcg_gen_shri_tl(cpu_cc_dst
, s
->T0
, mask
);
1822 tcg_gen_andi_tl(cpu_cc_dst
, cpu_cc_dst
, 1);
1824 tcg_gen_shri_tl(cpu_cc_src2
, s
->T0
, mask
);
1825 tcg_gen_andi_tl(cpu_cc_dst
, s
->T0
, 1);
1827 tcg_gen_andi_tl(cpu_cc_src2
, cpu_cc_src2
, 1);
1828 tcg_gen_xor_tl(cpu_cc_src2
, cpu_cc_src2
, cpu_cc_dst
);
1830 /* Now conditionally store the new CC_OP value. If the shift count
1831 is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live.
1832 Otherwise reuse CC_OP_ADCOX which have the C and O flags split out
1833 exactly as we computed above. */
1834 t0
= tcg_constant_i32(0);
1835 t1
= tcg_temp_new_i32();
1836 tcg_gen_trunc_tl_i32(t1
, s
->T1
);
1837 tcg_gen_movi_i32(s
->tmp2_i32
, CC_OP_ADCOX
);
1838 tcg_gen_movi_i32(s
->tmp3_i32
, CC_OP_EFLAGS
);
1839 tcg_gen_movcond_i32(TCG_COND_NE
, cpu_cc_op
, t1
, t0
,
1840 s
->tmp2_i32
, s
->tmp3_i32
);
1842 /* The CC_OP value is no longer predictable. */
1843 set_cc_op(s
, CC_OP_DYNAMIC
);
1846 static void gen_rot_rm_im(DisasContext
*s
, MemOp ot
, int op1
, int op2
,
1849 int mask
= (ot
== MO_64
? 0x3f : 0x1f);
1853 if (op1
== OR_TMP0
) {
1854 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1856 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1862 #ifdef TARGET_X86_64
1864 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
1866 tcg_gen_rotri_i32(s
->tmp2_i32
, s
->tmp2_i32
, op2
);
1868 tcg_gen_rotli_i32(s
->tmp2_i32
, s
->tmp2_i32
, op2
);
1870 tcg_gen_extu_i32_tl(s
->T0
, s
->tmp2_i32
);
1875 tcg_gen_rotri_tl(s
->T0
, s
->T0
, op2
);
1877 tcg_gen_rotli_tl(s
->T0
, s
->T0
, op2
);
1888 shift
= mask
+ 1 - shift
;
1890 gen_extu(ot
, s
->T0
);
1891 tcg_gen_shli_tl(s
->tmp0
, s
->T0
, shift
);
1892 tcg_gen_shri_tl(s
->T0
, s
->T0
, mask
+ 1 - shift
);
1893 tcg_gen_or_tl(s
->T0
, s
->T0
, s
->tmp0
);
1899 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1902 /* Compute the flags into CC_SRC. */
1903 gen_compute_eflags(s
);
1905 /* The value that was "rotated out" is now present at the other end
1906 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1907 since we've computed the flags into CC_SRC, these variables are
1910 tcg_gen_shri_tl(cpu_cc_src2
, s
->T0
, mask
- 1);
1911 tcg_gen_shri_tl(cpu_cc_dst
, s
->T0
, mask
);
1912 tcg_gen_andi_tl(cpu_cc_dst
, cpu_cc_dst
, 1);
1914 tcg_gen_shri_tl(cpu_cc_src2
, s
->T0
, mask
);
1915 tcg_gen_andi_tl(cpu_cc_dst
, s
->T0
, 1);
1917 tcg_gen_andi_tl(cpu_cc_src2
, cpu_cc_src2
, 1);
1918 tcg_gen_xor_tl(cpu_cc_src2
, cpu_cc_src2
, cpu_cc_dst
);
1919 set_cc_op(s
, CC_OP_ADCOX
);
1923 /* XXX: add faster immediate = 1 case */
1924 static void gen_rotc_rm_T1(DisasContext
*s
, MemOp ot
, int op1
,
1927 gen_compute_eflags(s
);
1928 assert(s
->cc_op
== CC_OP_EFLAGS
);
1932 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1934 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1939 gen_helper_rcrb(s
->T0
, tcg_env
, s
->T0
, s
->T1
);
1942 gen_helper_rcrw(s
->T0
, tcg_env
, s
->T0
, s
->T1
);
1945 gen_helper_rcrl(s
->T0
, tcg_env
, s
->T0
, s
->T1
);
1947 #ifdef TARGET_X86_64
1949 gen_helper_rcrq(s
->T0
, tcg_env
, s
->T0
, s
->T1
);
1953 g_assert_not_reached();
1958 gen_helper_rclb(s
->T0
, tcg_env
, s
->T0
, s
->T1
);
1961 gen_helper_rclw(s
->T0
, tcg_env
, s
->T0
, s
->T1
);
1964 gen_helper_rcll(s
->T0
, tcg_env
, s
->T0
, s
->T1
);
1966 #ifdef TARGET_X86_64
1968 gen_helper_rclq(s
->T0
, tcg_env
, s
->T0
, s
->T1
);
1972 g_assert_not_reached();
1976 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1979 /* XXX: add faster immediate case */
1980 static void gen_shiftd_rm_T1(DisasContext
*s
, MemOp ot
, int op1
,
1981 bool is_right
, TCGv count_in
)
1983 target_ulong mask
= (ot
== MO_64
? 63 : 31);
1987 if (op1
== OR_TMP0
) {
1988 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1990 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1993 count
= tcg_temp_new();
1994 tcg_gen_andi_tl(count
, count_in
, mask
);
1998 /* Note: we implement the Intel behaviour for shift count > 16.
1999 This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A
2000 portion by constructing it as a 32-bit value. */
2002 tcg_gen_deposit_tl(s
->tmp0
, s
->T0
, s
->T1
, 16, 16);
2003 tcg_gen_mov_tl(s
->T1
, s
->T0
);
2004 tcg_gen_mov_tl(s
->T0
, s
->tmp0
);
2006 tcg_gen_deposit_tl(s
->T1
, s
->T0
, s
->T1
, 16, 16);
2009 * If TARGET_X86_64 defined then fall through into MO_32 case,
2010 * otherwise fall through default case.
2013 #ifdef TARGET_X86_64
2014 /* Concatenate the two 32-bit values and use a 64-bit shift. */
2015 tcg_gen_subi_tl(s
->tmp0
, count
, 1);
2017 tcg_gen_concat_tl_i64(s
->T0
, s
->T0
, s
->T1
);
2018 tcg_gen_shr_i64(s
->tmp0
, s
->T0
, s
->tmp0
);
2019 tcg_gen_shr_i64(s
->T0
, s
->T0
, count
);
2021 tcg_gen_concat_tl_i64(s
->T0
, s
->T1
, s
->T0
);
2022 tcg_gen_shl_i64(s
->tmp0
, s
->T0
, s
->tmp0
);
2023 tcg_gen_shl_i64(s
->T0
, s
->T0
, count
);
2024 tcg_gen_shri_i64(s
->tmp0
, s
->tmp0
, 32);
2025 tcg_gen_shri_i64(s
->T0
, s
->T0
, 32);
2030 tcg_gen_subi_tl(s
->tmp0
, count
, 1);
2032 tcg_gen_shr_tl(s
->tmp0
, s
->T0
, s
->tmp0
);
2034 tcg_gen_subfi_tl(s
->tmp4
, mask
+ 1, count
);
2035 tcg_gen_shr_tl(s
->T0
, s
->T0
, count
);
2036 tcg_gen_shl_tl(s
->T1
, s
->T1
, s
->tmp4
);
2038 tcg_gen_shl_tl(s
->tmp0
, s
->T0
, s
->tmp0
);
2040 /* Only needed if count > 16, for Intel behaviour. */
2041 tcg_gen_subfi_tl(s
->tmp4
, 33, count
);
2042 tcg_gen_shr_tl(s
->tmp4
, s
->T1
, s
->tmp4
);
2043 tcg_gen_or_tl(s
->tmp0
, s
->tmp0
, s
->tmp4
);
2046 tcg_gen_subfi_tl(s
->tmp4
, mask
+ 1, count
);
2047 tcg_gen_shl_tl(s
->T0
, s
->T0
, count
);
2048 tcg_gen_shr_tl(s
->T1
, s
->T1
, s
->tmp4
);
2050 tcg_gen_movi_tl(s
->tmp4
, 0);
2051 tcg_gen_movcond_tl(TCG_COND_EQ
, s
->T1
, count
, s
->tmp4
,
2053 tcg_gen_or_tl(s
->T0
, s
->T0
, s
->T1
);
2058 gen_op_st_rm_T0_A0(s
, ot
, op1
);
2060 gen_shift_flags(s
, ot
, s
->T0
, s
->tmp0
, count
, is_right
);
2063 static void gen_shift(DisasContext
*s1
, int op
, MemOp ot
, int d
, int s
)
2066 gen_op_mov_v_reg(s1
, ot
, s1
->T1
, s
);
2069 gen_rot_rm_T1(s1
, ot
, d
, 0);
2072 gen_rot_rm_T1(s1
, ot
, d
, 1);
2076 gen_shift_rm_T1(s1
, ot
, d
, 0, 0);
2079 gen_shift_rm_T1(s1
, ot
, d
, 1, 0);
2082 gen_shift_rm_T1(s1
, ot
, d
, 1, 1);
2085 gen_rotc_rm_T1(s1
, ot
, d
, 0);
2088 gen_rotc_rm_T1(s1
, ot
, d
, 1);
2093 static void gen_shifti(DisasContext
*s1
, int op
, MemOp ot
, int d
, int c
)
2097 gen_rot_rm_im(s1
, ot
, d
, c
, 0);
2100 gen_rot_rm_im(s1
, ot
, d
, c
, 1);
2104 gen_shift_rm_im(s1
, ot
, d
, c
, 0, 0);
2107 gen_shift_rm_im(s1
, ot
, d
, c
, 1, 0);
2110 gen_shift_rm_im(s1
, ot
, d
, c
, 1, 1);
2113 /* currently not optimized */
2114 tcg_gen_movi_tl(s1
->T1
, c
);
2115 gen_shift(s1
, op
, ot
, d
, OR_TMP1
);
2120 #define X86_MAX_INSN_LENGTH 15
2122 static uint64_t advance_pc(CPUX86State
*env
, DisasContext
*s
, int num_bytes
)
2124 uint64_t pc
= s
->pc
;
2126 /* This is a subsequent insn that crosses a page boundary. */
2127 if (s
->base
.num_insns
> 1 &&
2128 !is_same_page(&s
->base
, s
->pc
+ num_bytes
- 1)) {
2129 siglongjmp(s
->jmpbuf
, 2);
2133 if (unlikely(cur_insn_len(s
) > X86_MAX_INSN_LENGTH
)) {
2134 /* If the instruction's 16th byte is on a different page than the 1st, a
2135 * page fault on the second page wins over the general protection fault
2136 * caused by the instruction being too long.
2137 * This can happen even if the operand is only one byte long!
2139 if (((s
->pc
- 1) ^ (pc
- 1)) & TARGET_PAGE_MASK
) {
2140 volatile uint8_t unused
=
2141 cpu_ldub_code(env
, (s
->pc
- 1) & TARGET_PAGE_MASK
);
2144 siglongjmp(s
->jmpbuf
, 1);
2150 static inline uint8_t x86_ldub_code(CPUX86State
*env
, DisasContext
*s
)
2152 return translator_ldub(env
, &s
->base
, advance_pc(env
, s
, 1));
2155 static inline int16_t x86_ldsw_code(CPUX86State
*env
, DisasContext
*s
)
2157 return translator_lduw(env
, &s
->base
, advance_pc(env
, s
, 2));
2160 static inline uint16_t x86_lduw_code(CPUX86State
*env
, DisasContext
*s
)
2162 return translator_lduw(env
, &s
->base
, advance_pc(env
, s
, 2));
2165 static inline uint32_t x86_ldl_code(CPUX86State
*env
, DisasContext
*s
)
2167 return translator_ldl(env
, &s
->base
, advance_pc(env
, s
, 4));
2170 #ifdef TARGET_X86_64
2171 static inline uint64_t x86_ldq_code(CPUX86State
*env
, DisasContext
*s
)
2173 return translator_ldq(env
, &s
->base
, advance_pc(env
, s
, 8));
2177 /* Decompose an address. */
2179 typedef struct AddressParts
{
2187 static AddressParts
gen_lea_modrm_0(CPUX86State
*env
, DisasContext
*s
,
2190 int def_seg
, base
, index
, scale
, mod
, rm
;
2199 mod
= (modrm
>> 6) & 3;
2201 base
= rm
| REX_B(s
);
2204 /* Normally filtered out earlier, but including this path
2205 simplifies multi-byte nop, as well as bndcl, bndcu, bndcn. */
2214 int code
= x86_ldub_code(env
, s
);
2215 scale
= (code
>> 6) & 3;
2216 index
= ((code
>> 3) & 7) | REX_X(s
);
2218 index
= -1; /* no index */
2220 base
= (code
& 7) | REX_B(s
);
2226 if ((base
& 7) == 5) {
2228 disp
= (int32_t)x86_ldl_code(env
, s
);
2229 if (CODE64(s
) && !havesib
) {
2231 disp
+= s
->pc
+ s
->rip_offset
;
2236 disp
= (int8_t)x86_ldub_code(env
, s
);
2240 disp
= (int32_t)x86_ldl_code(env
, s
);
2244 /* For correct popl handling with esp. */
2245 if (base
== R_ESP
&& s
->popl_esp_hack
) {
2246 disp
+= s
->popl_esp_hack
;
2248 if (base
== R_EBP
|| base
== R_ESP
) {
2257 disp
= x86_lduw_code(env
, s
);
2260 } else if (mod
== 1) {
2261 disp
= (int8_t)x86_ldub_code(env
, s
);
2263 disp
= (int16_t)x86_lduw_code(env
, s
);
2303 g_assert_not_reached();
2307 return (AddressParts
){ def_seg
, base
, index
, scale
, disp
};
2310 /* Compute the address, with a minimum number of TCG ops. */
2311 static TCGv
gen_lea_modrm_1(DisasContext
*s
, AddressParts a
, bool is_vsib
)
2315 if (a
.index
>= 0 && !is_vsib
) {
2317 ea
= cpu_regs
[a
.index
];
2319 tcg_gen_shli_tl(s
->A0
, cpu_regs
[a
.index
], a
.scale
);
2323 tcg_gen_add_tl(s
->A0
, ea
, cpu_regs
[a
.base
]);
2326 } else if (a
.base
>= 0) {
2327 ea
= cpu_regs
[a
.base
];
2330 if (tb_cflags(s
->base
.tb
) & CF_PCREL
&& a
.base
== -2) {
2331 /* With cpu_eip ~= pc_save, the expression is pc-relative. */
2332 tcg_gen_addi_tl(s
->A0
, cpu_eip
, a
.disp
- s
->pc_save
);
2334 tcg_gen_movi_tl(s
->A0
, a
.disp
);
2337 } else if (a
.disp
!= 0) {
2338 tcg_gen_addi_tl(s
->A0
, ea
, a
.disp
);
2345 static void gen_lea_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
)
2347 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
2348 TCGv ea
= gen_lea_modrm_1(s
, a
, false);
2349 gen_lea_v_seg(s
, s
->aflag
, ea
, a
.def_seg
, s
->override
);
2352 static void gen_nop_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
)
2354 (void)gen_lea_modrm_0(env
, s
, modrm
);
2357 /* Used for BNDCL, BNDCU, BNDCN. */
2358 static void gen_bndck(CPUX86State
*env
, DisasContext
*s
, int modrm
,
2359 TCGCond cond
, TCGv_i64 bndv
)
2361 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
2362 TCGv ea
= gen_lea_modrm_1(s
, a
, false);
2364 tcg_gen_extu_tl_i64(s
->tmp1_i64
, ea
);
2366 tcg_gen_ext32u_i64(s
->tmp1_i64
, s
->tmp1_i64
);
2368 tcg_gen_setcond_i64(cond
, s
->tmp1_i64
, s
->tmp1_i64
, bndv
);
2369 tcg_gen_extrl_i64_i32(s
->tmp2_i32
, s
->tmp1_i64
);
2370 gen_helper_bndck(tcg_env
, s
->tmp2_i32
);
2373 /* used for LEA and MOV AX, mem */
2374 static void gen_add_A0_ds_seg(DisasContext
*s
)
2376 gen_lea_v_seg(s
, s
->aflag
, s
->A0
, R_DS
, s
->override
);
2379 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2381 static void gen_ldst_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
,
2382 MemOp ot
, int reg
, int is_store
)
2386 mod
= (modrm
>> 6) & 3;
2387 rm
= (modrm
& 7) | REX_B(s
);
2391 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
2392 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
2394 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
2396 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
2399 gen_lea_modrm(env
, s
, modrm
);
2402 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
2403 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
2405 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
2407 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
2412 static target_ulong
insn_get_addr(CPUX86State
*env
, DisasContext
*s
, MemOp ot
)
2418 ret
= x86_ldub_code(env
, s
);
2421 ret
= x86_lduw_code(env
, s
);
2424 ret
= x86_ldl_code(env
, s
);
2426 #ifdef TARGET_X86_64
2428 ret
= x86_ldq_code(env
, s
);
2432 g_assert_not_reached();
2437 static inline uint32_t insn_get(CPUX86State
*env
, DisasContext
*s
, MemOp ot
)
2443 ret
= x86_ldub_code(env
, s
);
2446 ret
= x86_lduw_code(env
, s
);
2449 #ifdef TARGET_X86_64
2452 ret
= x86_ldl_code(env
, s
);
2455 g_assert_not_reached();
2460 static target_long
insn_get_signed(CPUX86State
*env
, DisasContext
*s
, MemOp ot
)
2466 ret
= (int8_t) x86_ldub_code(env
, s
);
2469 ret
= (int16_t) x86_lduw_code(env
, s
);
2472 ret
= (int32_t) x86_ldl_code(env
, s
);
2474 #ifdef TARGET_X86_64
2476 ret
= x86_ldq_code(env
, s
);
2480 g_assert_not_reached();
2485 static inline int insn_const_size(MemOp ot
)
2494 static void gen_jcc(DisasContext
*s
, int b
, int diff
)
2496 TCGLabel
*l1
= gen_new_label();
2499 gen_jmp_rel_csize(s
, 0, 1);
2501 gen_jmp_rel(s
, s
->dflag
, diff
, 0);
2504 static void gen_cmovcc1(DisasContext
*s
, int b
, TCGv dest
, TCGv src
)
2506 CCPrepare cc
= gen_prepare_cc(s
, b
, s
->T1
);
2508 if (cc
.mask
!= -1) {
2509 TCGv t0
= tcg_temp_new();
2510 tcg_gen_andi_tl(t0
, cc
.reg
, cc
.mask
);
2514 cc
.reg2
= tcg_constant_tl(cc
.imm
);
2517 tcg_gen_movcond_tl(cc
.cond
, dest
, cc
.reg
, cc
.reg2
, src
, dest
);
2520 static inline void gen_op_movl_T0_seg(DisasContext
*s
, X86Seg seg_reg
)
2522 tcg_gen_ld32u_tl(s
->T0
, tcg_env
,
2523 offsetof(CPUX86State
,segs
[seg_reg
].selector
));
2526 static inline void gen_op_movl_seg_T0_vm(DisasContext
*s
, X86Seg seg_reg
)
2528 tcg_gen_ext16u_tl(s
->T0
, s
->T0
);
2529 tcg_gen_st32_tl(s
->T0
, tcg_env
,
2530 offsetof(CPUX86State
,segs
[seg_reg
].selector
));
2531 tcg_gen_shli_tl(cpu_seg_base
[seg_reg
], s
->T0
, 4);
2534 /* move T0 to seg_reg and compute if the CPU state may change. Never
2535 call this function with seg_reg == R_CS */
2536 static void gen_movl_seg_T0(DisasContext
*s
, X86Seg seg_reg
)
2538 if (PE(s
) && !VM86(s
)) {
2539 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
2540 gen_helper_load_seg(tcg_env
, tcg_constant_i32(seg_reg
), s
->tmp2_i32
);
2541 /* abort translation because the addseg value may change or
2542 because ss32 may change. For R_SS, translation must always
2543 stop as a special handling must be done to disable hardware
2544 interrupts for the next instruction */
2545 if (seg_reg
== R_SS
) {
2546 s
->base
.is_jmp
= DISAS_EOB_INHIBIT_IRQ
;
2547 } else if (CODE32(s
) && seg_reg
< R_FS
) {
2548 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
2551 gen_op_movl_seg_T0_vm(s
, seg_reg
);
2552 if (seg_reg
== R_SS
) {
2553 s
->base
.is_jmp
= DISAS_EOB_INHIBIT_IRQ
;
2558 static void gen_svm_check_intercept(DisasContext
*s
, uint32_t type
)
2560 /* no SVM activated; fast case */
2561 if (likely(!GUEST(s
))) {
2564 gen_helper_svm_check_intercept(tcg_env
, tcg_constant_i32(type
));
2567 static inline void gen_stack_update(DisasContext
*s
, int addend
)
2569 gen_op_add_reg_im(s
, mo_stacksize(s
), R_ESP
, addend
);
2572 /* Generate a push. It depends on ss32, addseg and dflag. */
2573 static void gen_push_v(DisasContext
*s
, TCGv val
)
2575 MemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2576 MemOp a_ot
= mo_stacksize(s
);
2577 int size
= 1 << d_ot
;
2578 TCGv new_esp
= s
->A0
;
2580 tcg_gen_subi_tl(s
->A0
, cpu_regs
[R_ESP
], size
);
2584 new_esp
= tcg_temp_new();
2585 tcg_gen_mov_tl(new_esp
, s
->A0
);
2587 gen_lea_v_seg(s
, a_ot
, s
->A0
, R_SS
, -1);
2590 gen_op_st_v(s
, d_ot
, val
, s
->A0
);
2591 gen_op_mov_reg_v(s
, a_ot
, R_ESP
, new_esp
);
2594 /* two step pop is necessary for precise exceptions */
2595 static MemOp
gen_pop_T0(DisasContext
*s
)
2597 MemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2599 gen_lea_v_seg_dest(s
, mo_stacksize(s
), s
->T0
, cpu_regs
[R_ESP
], R_SS
, -1);
2600 gen_op_ld_v(s
, d_ot
, s
->T0
, s
->T0
);
2605 static inline void gen_pop_update(DisasContext
*s
, MemOp ot
)
2607 gen_stack_update(s
, 1 << ot
);
2610 static inline void gen_stack_A0(DisasContext
*s
)
2612 gen_lea_v_seg(s
, SS32(s
) ? MO_32
: MO_16
, cpu_regs
[R_ESP
], R_SS
, -1);
2615 static void gen_pusha(DisasContext
*s
)
2617 MemOp s_ot
= SS32(s
) ? MO_32
: MO_16
;
2618 MemOp d_ot
= s
->dflag
;
2619 int size
= 1 << d_ot
;
2622 for (i
= 0; i
< 8; i
++) {
2623 tcg_gen_addi_tl(s
->A0
, cpu_regs
[R_ESP
], (i
- 8) * size
);
2624 gen_lea_v_seg(s
, s_ot
, s
->A0
, R_SS
, -1);
2625 gen_op_st_v(s
, d_ot
, cpu_regs
[7 - i
], s
->A0
);
2628 gen_stack_update(s
, -8 * size
);
2631 static void gen_popa(DisasContext
*s
)
2633 MemOp s_ot
= SS32(s
) ? MO_32
: MO_16
;
2634 MemOp d_ot
= s
->dflag
;
2635 int size
= 1 << d_ot
;
2638 for (i
= 0; i
< 8; i
++) {
2639 /* ESP is not reloaded */
2640 if (7 - i
== R_ESP
) {
2643 tcg_gen_addi_tl(s
->A0
, cpu_regs
[R_ESP
], i
* size
);
2644 gen_lea_v_seg(s
, s_ot
, s
->A0
, R_SS
, -1);
2645 gen_op_ld_v(s
, d_ot
, s
->T0
, s
->A0
);
2646 gen_op_mov_reg_v(s
, d_ot
, 7 - i
, s
->T0
);
2649 gen_stack_update(s
, 8 * size
);
2652 static void gen_enter(DisasContext
*s
, int esp_addend
, int level
)
2654 MemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2655 MemOp a_ot
= CODE64(s
) ? MO_64
: SS32(s
) ? MO_32
: MO_16
;
2656 int size
= 1 << d_ot
;
2658 /* Push BP; compute FrameTemp into T1. */
2659 tcg_gen_subi_tl(s
->T1
, cpu_regs
[R_ESP
], size
);
2660 gen_lea_v_seg(s
, a_ot
, s
->T1
, R_SS
, -1);
2661 gen_op_st_v(s
, d_ot
, cpu_regs
[R_EBP
], s
->A0
);
2667 /* Copy level-1 pointers from the previous frame. */
2668 for (i
= 1; i
< level
; ++i
) {
2669 tcg_gen_subi_tl(s
->A0
, cpu_regs
[R_EBP
], size
* i
);
2670 gen_lea_v_seg(s
, a_ot
, s
->A0
, R_SS
, -1);
2671 gen_op_ld_v(s
, d_ot
, s
->tmp0
, s
->A0
);
2673 tcg_gen_subi_tl(s
->A0
, s
->T1
, size
* i
);
2674 gen_lea_v_seg(s
, a_ot
, s
->A0
, R_SS
, -1);
2675 gen_op_st_v(s
, d_ot
, s
->tmp0
, s
->A0
);
2678 /* Push the current FrameTemp as the last level. */
2679 tcg_gen_subi_tl(s
->A0
, s
->T1
, size
* level
);
2680 gen_lea_v_seg(s
, a_ot
, s
->A0
, R_SS
, -1);
2681 gen_op_st_v(s
, d_ot
, s
->T1
, s
->A0
);
2684 /* Copy the FrameTemp value to EBP. */
2685 gen_op_mov_reg_v(s
, a_ot
, R_EBP
, s
->T1
);
2687 /* Compute the final value of ESP. */
2688 tcg_gen_subi_tl(s
->T1
, s
->T1
, esp_addend
+ size
* level
);
2689 gen_op_mov_reg_v(s
, a_ot
, R_ESP
, s
->T1
);
2692 static void gen_leave(DisasContext
*s
)
2694 MemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2695 MemOp a_ot
= mo_stacksize(s
);
2697 gen_lea_v_seg(s
, a_ot
, cpu_regs
[R_EBP
], R_SS
, -1);
2698 gen_op_ld_v(s
, d_ot
, s
->T0
, s
->A0
);
2700 tcg_gen_addi_tl(s
->T1
, cpu_regs
[R_EBP
], 1 << d_ot
);
2702 gen_op_mov_reg_v(s
, d_ot
, R_EBP
, s
->T0
);
2703 gen_op_mov_reg_v(s
, a_ot
, R_ESP
, s
->T1
);
2706 /* Similarly, except that the assumption here is that we don't decode
2707 the instruction at all -- either a missing opcode, an unimplemented
2708 feature, or just a bogus instruction stream. */
2709 static void gen_unknown_opcode(CPUX86State
*env
, DisasContext
*s
)
2711 gen_illegal_opcode(s
);
2713 if (qemu_loglevel_mask(LOG_UNIMP
)) {
2714 FILE *logfile
= qemu_log_trylock();
2716 target_ulong pc
= s
->base
.pc_next
, end
= s
->pc
;
2718 fprintf(logfile
, "ILLOPC: " TARGET_FMT_lx
":", pc
);
2719 for (; pc
< end
; ++pc
) {
2720 fprintf(logfile
, " %02x", cpu_ldub_code(env
, pc
));
2722 fprintf(logfile
, "\n");
2723 qemu_log_unlock(logfile
);
2728 /* an interrupt is different from an exception because of the
2730 static void gen_interrupt(DisasContext
*s
, int intno
)
2732 gen_update_cc_op(s
);
2733 gen_update_eip_cur(s
);
2734 gen_helper_raise_interrupt(tcg_env
, tcg_constant_i32(intno
),
2735 cur_insn_len_i32(s
));
2736 s
->base
.is_jmp
= DISAS_NORETURN
;
2739 static void gen_set_hflag(DisasContext
*s
, uint32_t mask
)
2741 if ((s
->flags
& mask
) == 0) {
2742 TCGv_i32 t
= tcg_temp_new_i32();
2743 tcg_gen_ld_i32(t
, tcg_env
, offsetof(CPUX86State
, hflags
));
2744 tcg_gen_ori_i32(t
, t
, mask
);
2745 tcg_gen_st_i32(t
, tcg_env
, offsetof(CPUX86State
, hflags
));
2750 static void gen_reset_hflag(DisasContext
*s
, uint32_t mask
)
2752 if (s
->flags
& mask
) {
2753 TCGv_i32 t
= tcg_temp_new_i32();
2754 tcg_gen_ld_i32(t
, tcg_env
, offsetof(CPUX86State
, hflags
));
2755 tcg_gen_andi_i32(t
, t
, ~mask
);
2756 tcg_gen_st_i32(t
, tcg_env
, offsetof(CPUX86State
, hflags
));
2761 static void gen_set_eflags(DisasContext
*s
, target_ulong mask
)
2763 TCGv t
= tcg_temp_new();
2765 tcg_gen_ld_tl(t
, tcg_env
, offsetof(CPUX86State
, eflags
));
2766 tcg_gen_ori_tl(t
, t
, mask
);
2767 tcg_gen_st_tl(t
, tcg_env
, offsetof(CPUX86State
, eflags
));
2770 static void gen_reset_eflags(DisasContext
*s
, target_ulong mask
)
2772 TCGv t
= tcg_temp_new();
2774 tcg_gen_ld_tl(t
, tcg_env
, offsetof(CPUX86State
, eflags
));
2775 tcg_gen_andi_tl(t
, t
, ~mask
);
2776 tcg_gen_st_tl(t
, tcg_env
, offsetof(CPUX86State
, eflags
));
2779 /* Clear BND registers during legacy branches. */
2780 static void gen_bnd_jmp(DisasContext
*s
)
2782 /* Clear the registers only if BND prefix is missing, MPX is enabled,
2783 and if the BNDREGs are known to be in use (non-zero) already.
2784 The helper itself will check BNDPRESERVE at runtime. */
2785 if ((s
->prefix
& PREFIX_REPNZ
) == 0
2786 && (s
->flags
& HF_MPX_EN_MASK
) != 0
2787 && (s
->flags
& HF_MPX_IU_MASK
) != 0) {
2788 gen_helper_bnd_jmp(tcg_env
);
2792 /* Generate an end of block. Trace exception is also generated if needed.
2793 If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set.
2794 If RECHECK_TF, emit a rechecking helper for #DB, ignoring the state of
2795 S->TF. This is used by the syscall/sysret insns. */
2797 do_gen_eob_worker(DisasContext
*s
, bool inhibit
, bool recheck_tf
, bool jr
)
2799 gen_update_cc_op(s
);
2801 /* If several instructions disable interrupts, only the first does it. */
2802 if (inhibit
&& !(s
->flags
& HF_INHIBIT_IRQ_MASK
)) {
2803 gen_set_hflag(s
, HF_INHIBIT_IRQ_MASK
);
2805 gen_reset_hflag(s
, HF_INHIBIT_IRQ_MASK
);
2808 if (s
->base
.tb
->flags
& HF_RF_MASK
) {
2809 gen_reset_eflags(s
, RF_MASK
);
2812 gen_helper_rechecking_single_step(tcg_env
);
2813 tcg_gen_exit_tb(NULL
, 0);
2814 } else if (s
->flags
& HF_TF_MASK
) {
2815 gen_helper_single_step(tcg_env
);
2817 tcg_gen_lookup_and_goto_ptr();
2819 tcg_gen_exit_tb(NULL
, 0);
2821 s
->base
.is_jmp
= DISAS_NORETURN
;
2825 gen_eob_worker(DisasContext
*s
, bool inhibit
, bool recheck_tf
)
2827 do_gen_eob_worker(s
, inhibit
, recheck_tf
, false);
2831 If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set. */
2832 static void gen_eob_inhibit_irq(DisasContext
*s
, bool inhibit
)
2834 gen_eob_worker(s
, inhibit
, false);
2837 /* End of block, resetting the inhibit irq flag. */
2838 static void gen_eob(DisasContext
*s
)
2840 gen_eob_worker(s
, false, false);
2843 /* Jump to register */
2844 static void gen_jr(DisasContext
*s
)
2846 do_gen_eob_worker(s
, false, false, true);
2849 /* Jump to eip+diff, truncating the result to OT. */
2850 static void gen_jmp_rel(DisasContext
*s
, MemOp ot
, int diff
, int tb_num
)
2852 bool use_goto_tb
= s
->jmp_opt
;
2853 target_ulong mask
= -1;
2854 target_ulong new_pc
= s
->pc
+ diff
;
2855 target_ulong new_eip
= new_pc
- s
->cs_base
;
2857 /* In 64-bit mode, operand size is fixed at 64 bits. */
2861 if (tb_cflags(s
->base
.tb
) & CF_PCREL
&& CODE32(s
)) {
2862 use_goto_tb
= false;
2870 gen_update_cc_op(s
);
2871 set_cc_op(s
, CC_OP_DYNAMIC
);
2873 if (tb_cflags(s
->base
.tb
) & CF_PCREL
) {
2874 tcg_gen_addi_tl(cpu_eip
, cpu_eip
, new_pc
- s
->pc_save
);
2876 * If we can prove the branch does not leave the page and we have
2877 * no extra masking to apply (data16 branch in code32, see above),
2878 * then we have also proven that the addition does not wrap.
2880 if (!use_goto_tb
|| !is_same_page(&s
->base
, new_pc
)) {
2881 tcg_gen_andi_tl(cpu_eip
, cpu_eip
, mask
);
2882 use_goto_tb
= false;
2884 } else if (!CODE64(s
)) {
2885 new_pc
= (uint32_t)(new_eip
+ s
->cs_base
);
2888 if (use_goto_tb
&& translator_use_goto_tb(&s
->base
, new_pc
)) {
2889 /* jump to same page: we can use a direct jump */
2890 tcg_gen_goto_tb(tb_num
);
2891 if (!(tb_cflags(s
->base
.tb
) & CF_PCREL
)) {
2892 tcg_gen_movi_tl(cpu_eip
, new_eip
);
2894 tcg_gen_exit_tb(s
->base
.tb
, tb_num
);
2895 s
->base
.is_jmp
= DISAS_NORETURN
;
2897 if (!(tb_cflags(s
->base
.tb
) & CF_PCREL
)) {
2898 tcg_gen_movi_tl(cpu_eip
, new_eip
);
2901 gen_jr(s
); /* jump to another page */
2903 gen_eob(s
); /* exit to main loop */
2908 /* Jump to eip+diff, truncating to the current code size. */
2909 static void gen_jmp_rel_csize(DisasContext
*s
, int diff
, int tb_num
)
2911 /* CODE64 ignores the OT argument, so we need not consider it. */
2912 gen_jmp_rel(s
, CODE32(s
) ? MO_32
: MO_16
, diff
, tb_num
);
2915 static inline void gen_ldq_env_A0(DisasContext
*s
, int offset
)
2917 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
, s
->mem_index
, MO_LEUQ
);
2918 tcg_gen_st_i64(s
->tmp1_i64
, tcg_env
, offset
);
2921 static inline void gen_stq_env_A0(DisasContext
*s
, int offset
)
2923 tcg_gen_ld_i64(s
->tmp1_i64
, tcg_env
, offset
);
2924 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
, s
->mem_index
, MO_LEUQ
);
2927 static inline void gen_ldo_env_A0(DisasContext
*s
, int offset
, bool align
)
2929 MemOp atom
= (s
->cpuid_ext_features
& CPUID_EXT_AVX
2930 ? MO_ATOM_IFALIGN
: MO_ATOM_IFALIGN_PAIR
);
2931 MemOp mop
= MO_128
| MO_LE
| atom
| (align
? MO_ALIGN_16
: 0);
2932 int mem_index
= s
->mem_index
;
2933 TCGv_i128 t
= tcg_temp_new_i128();
2935 tcg_gen_qemu_ld_i128(t
, s
->A0
, mem_index
, mop
);
2936 tcg_gen_st_i128(t
, tcg_env
, offset
);
2939 static inline void gen_sto_env_A0(DisasContext
*s
, int offset
, bool align
)
2941 MemOp atom
= (s
->cpuid_ext_features
& CPUID_EXT_AVX
2942 ? MO_ATOM_IFALIGN
: MO_ATOM_IFALIGN_PAIR
);
2943 MemOp mop
= MO_128
| MO_LE
| atom
| (align
? MO_ALIGN_16
: 0);
2944 int mem_index
= s
->mem_index
;
2945 TCGv_i128 t
= tcg_temp_new_i128();
2947 tcg_gen_ld_i128(t
, tcg_env
, offset
);
2948 tcg_gen_qemu_st_i128(t
, s
->A0
, mem_index
, mop
);
2951 static void gen_ldy_env_A0(DisasContext
*s
, int offset
, bool align
)
2953 MemOp mop
= MO_128
| MO_LE
| MO_ATOM_IFALIGN_PAIR
;
2954 int mem_index
= s
->mem_index
;
2955 TCGv_i128 t0
= tcg_temp_new_i128();
2956 TCGv_i128 t1
= tcg_temp_new_i128();
2958 tcg_gen_qemu_ld_i128(t0
, s
->A0
, mem_index
, mop
| (align
? MO_ALIGN_32
: 0));
2959 tcg_gen_addi_tl(s
->tmp0
, s
->A0
, 16);
2960 tcg_gen_qemu_ld_i128(t1
, s
->tmp0
, mem_index
, mop
);
2962 tcg_gen_st_i128(t0
, tcg_env
, offset
+ offsetof(YMMReg
, YMM_X(0)));
2963 tcg_gen_st_i128(t1
, tcg_env
, offset
+ offsetof(YMMReg
, YMM_X(1)));
2966 static void gen_sty_env_A0(DisasContext
*s
, int offset
, bool align
)
2968 MemOp mop
= MO_128
| MO_LE
| MO_ATOM_IFALIGN_PAIR
;
2969 int mem_index
= s
->mem_index
;
2970 TCGv_i128 t
= tcg_temp_new_i128();
2972 tcg_gen_ld_i128(t
, tcg_env
, offset
+ offsetof(YMMReg
, YMM_X(0)));
2973 tcg_gen_qemu_st_i128(t
, s
->A0
, mem_index
, mop
| (align
? MO_ALIGN_32
: 0));
2974 tcg_gen_addi_tl(s
->tmp0
, s
->A0
, 16);
2975 tcg_gen_ld_i128(t
, tcg_env
, offset
+ offsetof(YMMReg
, YMM_X(1)));
2976 tcg_gen_qemu_st_i128(t
, s
->tmp0
, mem_index
, mop
);
2979 #include "decode-new.h"
2980 #include "emit.c.inc"
2981 #include "decode-new.c.inc"
2983 static void gen_cmpxchg8b(DisasContext
*s
, CPUX86State
*env
, int modrm
)
2985 TCGv_i64 cmp
, val
, old
;
2988 gen_lea_modrm(env
, s
, modrm
);
2990 cmp
= tcg_temp_new_i64();
2991 val
= tcg_temp_new_i64();
2992 old
= tcg_temp_new_i64();
2994 /* Construct the comparison values from the register pair. */
2995 tcg_gen_concat_tl_i64(cmp
, cpu_regs
[R_EAX
], cpu_regs
[R_EDX
]);
2996 tcg_gen_concat_tl_i64(val
, cpu_regs
[R_EBX
], cpu_regs
[R_ECX
]);
2998 /* Only require atomic with LOCK; non-parallel handled in generator. */
2999 if (s
->prefix
& PREFIX_LOCK
) {
3000 tcg_gen_atomic_cmpxchg_i64(old
, s
->A0
, cmp
, val
, s
->mem_index
, MO_TEUQ
);
3002 tcg_gen_nonatomic_cmpxchg_i64(old
, s
->A0
, cmp
, val
,
3003 s
->mem_index
, MO_TEUQ
);
3006 /* Set tmp0 to match the required value of Z. */
3007 tcg_gen_setcond_i64(TCG_COND_EQ
, cmp
, old
, cmp
);
3009 tcg_gen_trunc_i64_tl(Z
, cmp
);
3012 * Extract the result values for the register pair.
3013 * For 32-bit, we may do this unconditionally, because on success (Z=1),
3014 * the old value matches the previous value in EDX:EAX. For x86_64,
3015 * the store must be conditional, because we must leave the source
3016 * registers unchanged on success, and zero-extend the writeback
3019 if (TARGET_LONG_BITS
== 32) {
3020 tcg_gen_extr_i64_tl(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
], old
);
3022 TCGv zero
= tcg_constant_tl(0);
3024 tcg_gen_extr_i64_tl(s
->T0
, s
->T1
, old
);
3025 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_regs
[R_EAX
], Z
, zero
,
3026 s
->T0
, cpu_regs
[R_EAX
]);
3027 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_regs
[R_EDX
], Z
, zero
,
3028 s
->T1
, cpu_regs
[R_EDX
]);
3032 gen_compute_eflags(s
);
3033 tcg_gen_deposit_tl(cpu_cc_src
, cpu_cc_src
, Z
, ctz32(CC_Z
), 1);
3036 #ifdef TARGET_X86_64
3037 static void gen_cmpxchg16b(DisasContext
*s
, CPUX86State
*env
, int modrm
)
3039 MemOp mop
= MO_TE
| MO_128
| MO_ALIGN
;
3043 gen_lea_modrm(env
, s
, modrm
);
3045 cmp
= tcg_temp_new_i128();
3046 val
= tcg_temp_new_i128();
3047 tcg_gen_concat_i64_i128(cmp
, cpu_regs
[R_EAX
], cpu_regs
[R_EDX
]);
3048 tcg_gen_concat_i64_i128(val
, cpu_regs
[R_EBX
], cpu_regs
[R_ECX
]);
3050 /* Only require atomic with LOCK; non-parallel handled in generator. */
3051 if (s
->prefix
& PREFIX_LOCK
) {
3052 tcg_gen_atomic_cmpxchg_i128(val
, s
->A0
, cmp
, val
, s
->mem_index
, mop
);
3054 tcg_gen_nonatomic_cmpxchg_i128(val
, s
->A0
, cmp
, val
, s
->mem_index
, mop
);
3057 tcg_gen_extr_i128_i64(s
->T0
, s
->T1
, val
);
3059 /* Determine success after the fact. */
3060 t0
= tcg_temp_new_i64();
3061 t1
= tcg_temp_new_i64();
3062 tcg_gen_xor_i64(t0
, s
->T0
, cpu_regs
[R_EAX
]);
3063 tcg_gen_xor_i64(t1
, s
->T1
, cpu_regs
[R_EDX
]);
3064 tcg_gen_or_i64(t0
, t0
, t1
);
3067 gen_compute_eflags(s
);
3068 tcg_gen_setcondi_i64(TCG_COND_EQ
, t0
, t0
, 0);
3069 tcg_gen_deposit_tl(cpu_cc_src
, cpu_cc_src
, t0
, ctz32(CC_Z
), 1);
3072 * Extract the result values for the register pair. We may do this
3073 * unconditionally, because on success (Z=1), the old value matches
3074 * the previous value in RDX:RAX.
3076 tcg_gen_mov_i64(cpu_regs
[R_EAX
], s
->T0
);
3077 tcg_gen_mov_i64(cpu_regs
[R_EDX
], s
->T1
);
3081 /* convert one instruction. s->base.is_jmp is set if the translation must
3082 be stopped. Return the next pc value */
3083 static bool disas_insn(DisasContext
*s
, CPUState
*cpu
)
3085 CPUX86State
*env
= cpu_env(cpu
);
3088 MemOp ot
, aflag
, dflag
;
3089 int modrm
, reg
, rm
, mod
, op
, opreg
, val
;
3090 bool orig_cc_op_dirty
= s
->cc_op_dirty
;
3091 CCOp orig_cc_op
= s
->cc_op
;
3092 target_ulong orig_pc_save
= s
->pc_save
;
3094 s
->pc
= s
->base
.pc_next
;
3096 #ifdef TARGET_X86_64
3101 s
->rip_offset
= 0; /* for relative ip address */
3105 switch (sigsetjmp(s
->jmpbuf
, 0)) {
3109 gen_exception_gpf(s
);
3112 /* Restore state that may affect the next instruction. */
3113 s
->pc
= s
->base
.pc_next
;
3115 * TODO: These save/restore can be removed after the table-based
3116 * decoder is complete; we will be decoding the insn completely
3117 * before any code generation that might affect these variables.
3119 s
->cc_op_dirty
= orig_cc_op_dirty
;
3120 s
->cc_op
= orig_cc_op
;
3121 s
->pc_save
= orig_pc_save
;
3123 s
->base
.num_insns
--;
3124 tcg_remove_ops_after(s
->prev_insn_end
);
3125 s
->base
.is_jmp
= DISAS_TOO_MANY
;
3128 g_assert_not_reached();
3134 s
->prefix
= prefixes
;
3135 b
= x86_ldub_code(env
, s
);
3136 /* Collect prefixes. */
3141 b
= x86_ldub_code(env
, s
) + 0x100;
3144 prefixes
|= PREFIX_REPZ
;
3145 prefixes
&= ~PREFIX_REPNZ
;
3148 prefixes
|= PREFIX_REPNZ
;
3149 prefixes
&= ~PREFIX_REPZ
;
3152 prefixes
|= PREFIX_LOCK
;
3173 prefixes
|= PREFIX_DATA
;
3176 prefixes
|= PREFIX_ADR
;
3178 #ifdef TARGET_X86_64
3182 prefixes
|= PREFIX_REX
;
3183 s
->vex_w
= (b
>> 3) & 1;
3184 s
->rex_r
= (b
& 0x4) << 1;
3185 s
->rex_x
= (b
& 0x2) << 2;
3186 s
->rex_b
= (b
& 0x1) << 3;
3191 case 0xc5: /* 2-byte VEX */
3192 case 0xc4: /* 3-byte VEX */
3193 if (CODE32(s
) && !VM86(s
)) {
3194 int vex2
= x86_ldub_code(env
, s
);
3195 s
->pc
--; /* rewind the advance_pc() x86_ldub_code() did */
3197 if (!CODE64(s
) && (vex2
& 0xc0) != 0xc0) {
3198 /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
3199 otherwise the instruction is LES or LDS. */
3202 disas_insn_new(s
, cpu
, b
);
3208 /* Post-process prefixes. */
3210 /* In 64-bit mode, the default data size is 32-bit. Select 64-bit
3211 data with rex_w, and 16-bit data with 0x66; rex_w takes precedence
3212 over 0x66 if both are present. */
3213 dflag
= (REX_W(s
) ? MO_64
: prefixes
& PREFIX_DATA
? MO_16
: MO_32
);
3214 /* In 64-bit mode, 0x67 selects 32-bit addressing. */
3215 aflag
= (prefixes
& PREFIX_ADR
? MO_32
: MO_64
);
3217 /* In 16/32-bit mode, 0x66 selects the opposite data size. */
3218 if (CODE32(s
) ^ ((prefixes
& PREFIX_DATA
) != 0)) {
3223 /* In 16/32-bit mode, 0x67 selects the opposite addressing. */
3224 if (CODE32(s
) ^ ((prefixes
& PREFIX_ADR
) != 0)) {
3231 s
->prefix
= prefixes
;
3235 /* now check op code */
3237 /**************************/
3252 ot
= mo_b_d(b
, dflag
);
3255 case 0: /* OP Ev, Gv */
3256 modrm
= x86_ldub_code(env
, s
);
3257 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
3258 mod
= (modrm
>> 6) & 3;
3259 rm
= (modrm
& 7) | REX_B(s
);
3261 gen_lea_modrm(env
, s
, modrm
);
3263 } else if (op
== OP_XORL
&& rm
== reg
) {
3265 /* xor reg, reg optimisation */
3266 set_cc_op(s
, CC_OP_CLR
);
3267 tcg_gen_movi_tl(s
->T0
, 0);
3268 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
3273 gen_op_mov_v_reg(s
, ot
, s
->T1
, reg
);
3274 gen_op(s
, op
, ot
, opreg
);
3276 case 1: /* OP Gv, Ev */
3277 modrm
= x86_ldub_code(env
, s
);
3278 mod
= (modrm
>> 6) & 3;
3279 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
3280 rm
= (modrm
& 7) | REX_B(s
);
3282 gen_lea_modrm(env
, s
, modrm
);
3283 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
3284 } else if (op
== OP_XORL
&& rm
== reg
) {
3287 gen_op_mov_v_reg(s
, ot
, s
->T1
, rm
);
3289 gen_op(s
, op
, ot
, reg
);
3291 case 2: /* OP A, Iv */
3292 val
= insn_get(env
, s
, ot
);
3293 tcg_gen_movi_tl(s
->T1
, val
);
3294 gen_op(s
, op
, ot
, OR_EAX
);
3304 case 0x80: /* GRP1 */
3308 ot
= mo_b_d(b
, dflag
);
3310 modrm
= x86_ldub_code(env
, s
);
3311 mod
= (modrm
>> 6) & 3;
3312 rm
= (modrm
& 7) | REX_B(s
);
3313 op
= (modrm
>> 3) & 7;
3319 s
->rip_offset
= insn_const_size(ot
);
3320 gen_lea_modrm(env
, s
, modrm
);
3331 val
= insn_get(env
, s
, ot
);
3334 val
= (int8_t)insn_get(env
, s
, MO_8
);
3337 tcg_gen_movi_tl(s
->T1
, val
);
3338 gen_op(s
, op
, ot
, opreg
);
3342 /**************************/
3343 /* inc, dec, and other misc arith */
3344 case 0x40 ... 0x47: /* inc Gv */
3346 gen_inc(s
, ot
, OR_EAX
+ (b
& 7), 1);
3348 case 0x48 ... 0x4f: /* dec Gv */
3350 gen_inc(s
, ot
, OR_EAX
+ (b
& 7), -1);
3352 case 0xf6: /* GRP3 */
3354 ot
= mo_b_d(b
, dflag
);
3356 modrm
= x86_ldub_code(env
, s
);
3357 mod
= (modrm
>> 6) & 3;
3358 rm
= (modrm
& 7) | REX_B(s
);
3359 op
= (modrm
>> 3) & 7;
3362 s
->rip_offset
= insn_const_size(ot
);
3364 gen_lea_modrm(env
, s
, modrm
);
3365 /* For those below that handle locked memory, don't load here. */
3366 if (!(s
->prefix
& PREFIX_LOCK
)
3368 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
3371 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
3376 val
= insn_get(env
, s
, ot
);
3377 tcg_gen_movi_tl(s
->T1
, val
);
3378 gen_op_testl_T0_T1_cc(s
);
3379 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
3382 if (s
->prefix
& PREFIX_LOCK
) {
3386 tcg_gen_movi_tl(s
->T0
, ~0);
3387 tcg_gen_atomic_xor_fetch_tl(s
->T0
, s
->A0
, s
->T0
,
3388 s
->mem_index
, ot
| MO_LE
);
3390 tcg_gen_not_tl(s
->T0
, s
->T0
);
3392 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
3394 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
3399 if (s
->prefix
& PREFIX_LOCK
) {
3401 TCGv a0
, t0
, t1
, t2
;
3408 label1
= gen_new_label();
3410 gen_set_label(label1
);
3411 t1
= tcg_temp_new();
3412 t2
= tcg_temp_new();
3413 tcg_gen_mov_tl(t2
, t0
);
3414 tcg_gen_neg_tl(t1
, t0
);
3415 tcg_gen_atomic_cmpxchg_tl(t0
, a0
, t0
, t1
,
3416 s
->mem_index
, ot
| MO_LE
);
3417 tcg_gen_brcond_tl(TCG_COND_NE
, t0
, t2
, label1
);
3419 tcg_gen_neg_tl(s
->T0
, t0
);
3421 tcg_gen_neg_tl(s
->T0
, s
->T0
);
3423 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
3425 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
3428 gen_op_update_neg_cc(s
);
3429 set_cc_op(s
, CC_OP_SUBB
+ ot
);
3434 gen_op_mov_v_reg(s
, MO_8
, s
->T1
, R_EAX
);
3435 tcg_gen_ext8u_tl(s
->T0
, s
->T0
);
3436 tcg_gen_ext8u_tl(s
->T1
, s
->T1
);
3437 /* XXX: use 32 bit mul which could be faster */
3438 tcg_gen_mul_tl(s
->T0
, s
->T0
, s
->T1
);
3439 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
3440 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
3441 tcg_gen_andi_tl(cpu_cc_src
, s
->T0
, 0xff00);
3442 set_cc_op(s
, CC_OP_MULB
);
3445 gen_op_mov_v_reg(s
, MO_16
, s
->T1
, R_EAX
);
3446 tcg_gen_ext16u_tl(s
->T0
, s
->T0
);
3447 tcg_gen_ext16u_tl(s
->T1
, s
->T1
);
3448 /* XXX: use 32 bit mul which could be faster */
3449 tcg_gen_mul_tl(s
->T0
, s
->T0
, s
->T1
);
3450 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
3451 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
3452 tcg_gen_shri_tl(s
->T0
, s
->T0
, 16);
3453 gen_op_mov_reg_v(s
, MO_16
, R_EDX
, s
->T0
);
3454 tcg_gen_mov_tl(cpu_cc_src
, s
->T0
);
3455 set_cc_op(s
, CC_OP_MULW
);
3459 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
3460 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, cpu_regs
[R_EAX
]);
3461 tcg_gen_mulu2_i32(s
->tmp2_i32
, s
->tmp3_i32
,
3462 s
->tmp2_i32
, s
->tmp3_i32
);
3463 tcg_gen_extu_i32_tl(cpu_regs
[R_EAX
], s
->tmp2_i32
);
3464 tcg_gen_extu_i32_tl(cpu_regs
[R_EDX
], s
->tmp3_i32
);
3465 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
3466 tcg_gen_mov_tl(cpu_cc_src
, cpu_regs
[R_EDX
]);
3467 set_cc_op(s
, CC_OP_MULL
);
3469 #ifdef TARGET_X86_64
3471 tcg_gen_mulu2_i64(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
],
3472 s
->T0
, cpu_regs
[R_EAX
]);
3473 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
3474 tcg_gen_mov_tl(cpu_cc_src
, cpu_regs
[R_EDX
]);
3475 set_cc_op(s
, CC_OP_MULQ
);
3483 gen_op_mov_v_reg(s
, MO_8
, s
->T1
, R_EAX
);
3484 tcg_gen_ext8s_tl(s
->T0
, s
->T0
);
3485 tcg_gen_ext8s_tl(s
->T1
, s
->T1
);
3486 /* XXX: use 32 bit mul which could be faster */
3487 tcg_gen_mul_tl(s
->T0
, s
->T0
, s
->T1
);
3488 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
3489 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
3490 tcg_gen_ext8s_tl(s
->tmp0
, s
->T0
);
3491 tcg_gen_sub_tl(cpu_cc_src
, s
->T0
, s
->tmp0
);
3492 set_cc_op(s
, CC_OP_MULB
);
3495 gen_op_mov_v_reg(s
, MO_16
, s
->T1
, R_EAX
);
3496 tcg_gen_ext16s_tl(s
->T0
, s
->T0
);
3497 tcg_gen_ext16s_tl(s
->T1
, s
->T1
);
3498 /* XXX: use 32 bit mul which could be faster */
3499 tcg_gen_mul_tl(s
->T0
, s
->T0
, s
->T1
);
3500 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
3501 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
3502 tcg_gen_ext16s_tl(s
->tmp0
, s
->T0
);
3503 tcg_gen_sub_tl(cpu_cc_src
, s
->T0
, s
->tmp0
);
3504 tcg_gen_shri_tl(s
->T0
, s
->T0
, 16);
3505 gen_op_mov_reg_v(s
, MO_16
, R_EDX
, s
->T0
);
3506 set_cc_op(s
, CC_OP_MULW
);
3510 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
3511 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, cpu_regs
[R_EAX
]);
3512 tcg_gen_muls2_i32(s
->tmp2_i32
, s
->tmp3_i32
,
3513 s
->tmp2_i32
, s
->tmp3_i32
);
3514 tcg_gen_extu_i32_tl(cpu_regs
[R_EAX
], s
->tmp2_i32
);
3515 tcg_gen_extu_i32_tl(cpu_regs
[R_EDX
], s
->tmp3_i32
);
3516 tcg_gen_sari_i32(s
->tmp2_i32
, s
->tmp2_i32
, 31);
3517 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
3518 tcg_gen_sub_i32(s
->tmp2_i32
, s
->tmp2_i32
, s
->tmp3_i32
);
3519 tcg_gen_extu_i32_tl(cpu_cc_src
, s
->tmp2_i32
);
3520 set_cc_op(s
, CC_OP_MULL
);
3522 #ifdef TARGET_X86_64
3524 tcg_gen_muls2_i64(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
],
3525 s
->T0
, cpu_regs
[R_EAX
]);
3526 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
3527 tcg_gen_sari_tl(cpu_cc_src
, cpu_regs
[R_EAX
], 63);
3528 tcg_gen_sub_tl(cpu_cc_src
, cpu_cc_src
, cpu_regs
[R_EDX
]);
3529 set_cc_op(s
, CC_OP_MULQ
);
3537 gen_helper_divb_AL(tcg_env
, s
->T0
);
3540 gen_helper_divw_AX(tcg_env
, s
->T0
);
3544 gen_helper_divl_EAX(tcg_env
, s
->T0
);
3546 #ifdef TARGET_X86_64
3548 gen_helper_divq_EAX(tcg_env
, s
->T0
);
3556 gen_helper_idivb_AL(tcg_env
, s
->T0
);
3559 gen_helper_idivw_AX(tcg_env
, s
->T0
);
3563 gen_helper_idivl_EAX(tcg_env
, s
->T0
);
3565 #ifdef TARGET_X86_64
3567 gen_helper_idivq_EAX(tcg_env
, s
->T0
);
3577 case 0xfe: /* GRP4 */
3578 case 0xff: /* GRP5 */
3579 ot
= mo_b_d(b
, dflag
);
3581 modrm
= x86_ldub_code(env
, s
);
3582 mod
= (modrm
>> 6) & 3;
3583 rm
= (modrm
& 7) | REX_B(s
);
3584 op
= (modrm
>> 3) & 7;
3585 if (op
>= 2 && b
== 0xfe) {
3589 if (op
== 2 || op
== 4) {
3590 /* operand size for jumps is 64 bit */
3592 } else if (op
== 3 || op
== 5) {
3593 ot
= dflag
!= MO_16
? MO_32
+ REX_W(s
) : MO_16
;
3594 } else if (op
== 6) {
3595 /* default push size is 64 bit */
3596 ot
= mo_pushpop(s
, dflag
);
3600 gen_lea_modrm(env
, s
, modrm
);
3601 if (op
>= 2 && op
!= 3 && op
!= 5)
3602 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
3604 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
3608 case 0: /* inc Ev */
3613 gen_inc(s
, ot
, opreg
, 1);
3615 case 1: /* dec Ev */
3620 gen_inc(s
, ot
, opreg
, -1);
3622 case 2: /* call Ev */
3623 /* XXX: optimize if memory (no 'and' is necessary) */
3624 if (dflag
== MO_16
) {
3625 tcg_gen_ext16u_tl(s
->T0
, s
->T0
);
3627 gen_push_v(s
, eip_next_tl(s
));
3628 gen_op_jmp_v(s
, s
->T0
);
3630 s
->base
.is_jmp
= DISAS_JUMP
;
3632 case 3: /* lcall Ev */
3636 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
3637 gen_add_A0_im(s
, 1 << ot
);
3638 gen_op_ld_v(s
, MO_16
, s
->T0
, s
->A0
);
3640 if (PE(s
) && !VM86(s
)) {
3641 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
3642 gen_helper_lcall_protected(tcg_env
, s
->tmp2_i32
, s
->T1
,
3643 tcg_constant_i32(dflag
- 1),
3646 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
3647 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T1
);
3648 gen_helper_lcall_real(tcg_env
, s
->tmp2_i32
, s
->tmp3_i32
,
3649 tcg_constant_i32(dflag
- 1),
3652 s
->base
.is_jmp
= DISAS_JUMP
;
3654 case 4: /* jmp Ev */
3655 if (dflag
== MO_16
) {
3656 tcg_gen_ext16u_tl(s
->T0
, s
->T0
);
3658 gen_op_jmp_v(s
, s
->T0
);
3660 s
->base
.is_jmp
= DISAS_JUMP
;
3662 case 5: /* ljmp Ev */
3666 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
3667 gen_add_A0_im(s
, 1 << ot
);
3668 gen_op_ld_v(s
, MO_16
, s
->T0
, s
->A0
);
3670 if (PE(s
) && !VM86(s
)) {
3671 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
3672 gen_helper_ljmp_protected(tcg_env
, s
->tmp2_i32
, s
->T1
,
3675 gen_op_movl_seg_T0_vm(s
, R_CS
);
3676 gen_op_jmp_v(s
, s
->T1
);
3678 s
->base
.is_jmp
= DISAS_JUMP
;
3680 case 6: /* push Ev */
3681 gen_push_v(s
, s
->T0
);
3688 case 0x84: /* test Ev, Gv */
3690 ot
= mo_b_d(b
, dflag
);
3692 modrm
= x86_ldub_code(env
, s
);
3693 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
3695 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3696 gen_op_mov_v_reg(s
, ot
, s
->T1
, reg
);
3697 gen_op_testl_T0_T1_cc(s
);
3698 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
3701 case 0xa8: /* test eAX, Iv */
3703 ot
= mo_b_d(b
, dflag
);
3704 val
= insn_get(env
, s
, ot
);
3706 gen_op_mov_v_reg(s
, ot
, s
->T0
, OR_EAX
);
3707 tcg_gen_movi_tl(s
->T1
, val
);
3708 gen_op_testl_T0_T1_cc(s
);
3709 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
3712 case 0x98: /* CWDE/CBW */
3714 #ifdef TARGET_X86_64
3716 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, R_EAX
);
3717 tcg_gen_ext32s_tl(s
->T0
, s
->T0
);
3718 gen_op_mov_reg_v(s
, MO_64
, R_EAX
, s
->T0
);
3722 gen_op_mov_v_reg(s
, MO_16
, s
->T0
, R_EAX
);
3723 tcg_gen_ext16s_tl(s
->T0
, s
->T0
);
3724 gen_op_mov_reg_v(s
, MO_32
, R_EAX
, s
->T0
);
3727 gen_op_mov_v_reg(s
, MO_8
, s
->T0
, R_EAX
);
3728 tcg_gen_ext8s_tl(s
->T0
, s
->T0
);
3729 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
3732 g_assert_not_reached();
3735 case 0x99: /* CDQ/CWD */
3737 #ifdef TARGET_X86_64
3739 gen_op_mov_v_reg(s
, MO_64
, s
->T0
, R_EAX
);
3740 tcg_gen_sari_tl(s
->T0
, s
->T0
, 63);
3741 gen_op_mov_reg_v(s
, MO_64
, R_EDX
, s
->T0
);
3745 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, R_EAX
);
3746 tcg_gen_ext32s_tl(s
->T0
, s
->T0
);
3747 tcg_gen_sari_tl(s
->T0
, s
->T0
, 31);
3748 gen_op_mov_reg_v(s
, MO_32
, R_EDX
, s
->T0
);
3751 gen_op_mov_v_reg(s
, MO_16
, s
->T0
, R_EAX
);
3752 tcg_gen_ext16s_tl(s
->T0
, s
->T0
);
3753 tcg_gen_sari_tl(s
->T0
, s
->T0
, 15);
3754 gen_op_mov_reg_v(s
, MO_16
, R_EDX
, s
->T0
);
3757 g_assert_not_reached();
3760 case 0x1af: /* imul Gv, Ev */
3761 case 0x69: /* imul Gv, Ev, I */
3764 modrm
= x86_ldub_code(env
, s
);
3765 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
3767 s
->rip_offset
= insn_const_size(ot
);
3770 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3772 val
= insn_get(env
, s
, ot
);
3773 tcg_gen_movi_tl(s
->T1
, val
);
3774 } else if (b
== 0x6b) {
3775 val
= (int8_t)insn_get(env
, s
, MO_8
);
3776 tcg_gen_movi_tl(s
->T1
, val
);
3778 gen_op_mov_v_reg(s
, ot
, s
->T1
, reg
);
3781 #ifdef TARGET_X86_64
3783 tcg_gen_muls2_i64(cpu_regs
[reg
], s
->T1
, s
->T0
, s
->T1
);
3784 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[reg
]);
3785 tcg_gen_sari_tl(cpu_cc_src
, cpu_cc_dst
, 63);
3786 tcg_gen_sub_tl(cpu_cc_src
, cpu_cc_src
, s
->T1
);
3790 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
3791 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T1
);
3792 tcg_gen_muls2_i32(s
->tmp2_i32
, s
->tmp3_i32
,
3793 s
->tmp2_i32
, s
->tmp3_i32
);
3794 tcg_gen_extu_i32_tl(cpu_regs
[reg
], s
->tmp2_i32
);
3795 tcg_gen_sari_i32(s
->tmp2_i32
, s
->tmp2_i32
, 31);
3796 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[reg
]);
3797 tcg_gen_sub_i32(s
->tmp2_i32
, s
->tmp2_i32
, s
->tmp3_i32
);
3798 tcg_gen_extu_i32_tl(cpu_cc_src
, s
->tmp2_i32
);
3801 tcg_gen_ext16s_tl(s
->T0
, s
->T0
);
3802 tcg_gen_ext16s_tl(s
->T1
, s
->T1
);
3803 /* XXX: use 32 bit mul which could be faster */
3804 tcg_gen_mul_tl(s
->T0
, s
->T0
, s
->T1
);
3805 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
3806 tcg_gen_ext16s_tl(s
->tmp0
, s
->T0
);
3807 tcg_gen_sub_tl(cpu_cc_src
, s
->T0
, s
->tmp0
);
3808 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
3811 set_cc_op(s
, CC_OP_MULB
+ ot
);
3814 case 0x1c1: /* xadd Ev, Gv */
3815 ot
= mo_b_d(b
, dflag
);
3816 modrm
= x86_ldub_code(env
, s
);
3817 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
3818 mod
= (modrm
>> 6) & 3;
3819 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
3821 rm
= (modrm
& 7) | REX_B(s
);
3822 gen_op_mov_v_reg(s
, ot
, s
->T1
, rm
);
3823 tcg_gen_add_tl(s
->T0
, s
->T0
, s
->T1
);
3824 gen_op_mov_reg_v(s
, ot
, reg
, s
->T1
);
3825 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
3827 gen_lea_modrm(env
, s
, modrm
);
3828 if (s
->prefix
& PREFIX_LOCK
) {
3829 tcg_gen_atomic_fetch_add_tl(s
->T1
, s
->A0
, s
->T0
,
3830 s
->mem_index
, ot
| MO_LE
);
3831 tcg_gen_add_tl(s
->T0
, s
->T0
, s
->T1
);
3833 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
3834 tcg_gen_add_tl(s
->T0
, s
->T0
, s
->T1
);
3835 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
3837 gen_op_mov_reg_v(s
, ot
, reg
, s
->T1
);
3839 gen_op_update2_cc(s
);
3840 set_cc_op(s
, CC_OP_ADDB
+ ot
);
3843 case 0x1b1: /* cmpxchg Ev, Gv */
3845 TCGv oldv
, newv
, cmpv
, dest
;
3847 ot
= mo_b_d(b
, dflag
);
3848 modrm
= x86_ldub_code(env
, s
);
3849 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
3850 mod
= (modrm
>> 6) & 3;
3851 oldv
= tcg_temp_new();
3852 newv
= tcg_temp_new();
3853 cmpv
= tcg_temp_new();
3854 gen_op_mov_v_reg(s
, ot
, newv
, reg
);
3855 tcg_gen_mov_tl(cmpv
, cpu_regs
[R_EAX
]);
3857 if (s
->prefix
& PREFIX_LOCK
) {
3861 gen_lea_modrm(env
, s
, modrm
);
3862 tcg_gen_atomic_cmpxchg_tl(oldv
, s
->A0
, cmpv
, newv
,
3863 s
->mem_index
, ot
| MO_LE
);
3866 rm
= (modrm
& 7) | REX_B(s
);
3867 gen_op_mov_v_reg(s
, ot
, oldv
, rm
);
3871 * Unlike the memory case, where "the destination operand receives
3872 * a write cycle without regard to the result of the comparison",
3873 * rm must not be touched altogether if the write fails, including
3874 * not zero-extending it on 64-bit processors. So, precompute
3875 * the result of a successful writeback and perform the movcond
3876 * directly on cpu_regs. Also need to write accumulator first, in
3877 * case rm is part of RAX too.
3879 dest
= gen_op_deposit_reg_v(s
, ot
, rm
, newv
, newv
);
3880 tcg_gen_movcond_tl(TCG_COND_EQ
, dest
, oldv
, cmpv
, newv
, dest
);
3882 gen_lea_modrm(env
, s
, modrm
);
3883 gen_op_ld_v(s
, ot
, oldv
, s
->A0
);
3886 * Perform an unconditional store cycle like physical cpu;
3887 * must be before changing accumulator to ensure
3888 * idempotency if the store faults and the instruction
3891 tcg_gen_movcond_tl(TCG_COND_EQ
, newv
, oldv
, cmpv
, newv
, oldv
);
3892 gen_op_st_v(s
, ot
, newv
, s
->A0
);
3896 * Write EAX only if the cmpxchg fails; reuse newv as the destination,
3897 * since it's dead here.
3899 dest
= gen_op_deposit_reg_v(s
, ot
, R_EAX
, newv
, oldv
);
3900 tcg_gen_movcond_tl(TCG_COND_EQ
, dest
, oldv
, cmpv
, dest
, newv
);
3901 tcg_gen_mov_tl(cpu_cc_src
, oldv
);
3902 tcg_gen_mov_tl(s
->cc_srcT
, cmpv
);
3903 tcg_gen_sub_tl(cpu_cc_dst
, cmpv
, oldv
);
3904 set_cc_op(s
, CC_OP_SUBB
+ ot
);
3907 case 0x1c7: /* cmpxchg8b */
3908 modrm
= x86_ldub_code(env
, s
);
3909 mod
= (modrm
>> 6) & 3;
3910 switch ((modrm
>> 3) & 7) {
3911 case 1: /* CMPXCHG8, CMPXCHG16 */
3915 #ifdef TARGET_X86_64
3916 if (dflag
== MO_64
) {
3917 if (!(s
->cpuid_ext_features
& CPUID_EXT_CX16
)) {
3920 gen_cmpxchg16b(s
, env
, modrm
);
3924 if (!(s
->cpuid_features
& CPUID_CX8
)) {
3927 gen_cmpxchg8b(s
, env
, modrm
);
3930 case 7: /* RDSEED, RDPID with f3 prefix */
3932 (s
->prefix
& (PREFIX_LOCK
| PREFIX_REPNZ
))) {
3935 if (s
->prefix
& PREFIX_REPZ
) {
3936 if (!(s
->cpuid_ext_features
& CPUID_7_0_ECX_RDPID
)) {
3939 gen_helper_rdpid(s
->T0
, tcg_env
);
3940 rm
= (modrm
& 7) | REX_B(s
);
3941 gen_op_mov_reg_v(s
, dflag
, rm
, s
->T0
);
3944 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_RDSEED
)) {
3950 case 6: /* RDRAND */
3952 (s
->prefix
& (PREFIX_LOCK
| PREFIX_REPZ
| PREFIX_REPNZ
)) ||
3953 !(s
->cpuid_ext_features
& CPUID_EXT_RDRAND
)) {
3957 translator_io_start(&s
->base
);
3958 gen_helper_rdrand(s
->T0
, tcg_env
);
3959 rm
= (modrm
& 7) | REX_B(s
);
3960 gen_op_mov_reg_v(s
, dflag
, rm
, s
->T0
);
3961 set_cc_op(s
, CC_OP_EFLAGS
);
3969 /**************************/
3971 case 0x50 ... 0x57: /* push */
3972 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, (b
& 7) | REX_B(s
));
3973 gen_push_v(s
, s
->T0
);
3975 case 0x58 ... 0x5f: /* pop */
3977 /* NOTE: order is important for pop %sp */
3978 gen_pop_update(s
, ot
);
3979 gen_op_mov_reg_v(s
, ot
, (b
& 7) | REX_B(s
), s
->T0
);
3981 case 0x60: /* pusha */
3986 case 0x61: /* popa */
3991 case 0x68: /* push Iv */
3993 ot
= mo_pushpop(s
, dflag
);
3995 val
= insn_get(env
, s
, ot
);
3997 val
= (int8_t)insn_get(env
, s
, MO_8
);
3998 tcg_gen_movi_tl(s
->T0
, val
);
3999 gen_push_v(s
, s
->T0
);
4001 case 0x8f: /* pop Ev */
4002 modrm
= x86_ldub_code(env
, s
);
4003 mod
= (modrm
>> 6) & 3;
4006 /* NOTE: order is important for pop %sp */
4007 gen_pop_update(s
, ot
);
4008 rm
= (modrm
& 7) | REX_B(s
);
4009 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
4011 /* NOTE: order is important too for MMU exceptions */
4012 s
->popl_esp_hack
= 1 << ot
;
4013 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
4014 s
->popl_esp_hack
= 0;
4015 gen_pop_update(s
, ot
);
4018 case 0xc8: /* enter */
4021 val
= x86_lduw_code(env
, s
);
4022 level
= x86_ldub_code(env
, s
);
4023 gen_enter(s
, val
, level
);
4026 case 0xc9: /* leave */
4029 case 0x06: /* push es */
4030 case 0x0e: /* push cs */
4031 case 0x16: /* push ss */
4032 case 0x1e: /* push ds */
4035 gen_op_movl_T0_seg(s
, b
>> 3);
4036 gen_push_v(s
, s
->T0
);
4038 case 0x1a0: /* push fs */
4039 case 0x1a8: /* push gs */
4040 gen_op_movl_T0_seg(s
, (b
>> 3) & 7);
4041 gen_push_v(s
, s
->T0
);
4043 case 0x07: /* pop es */
4044 case 0x17: /* pop ss */
4045 case 0x1f: /* pop ds */
4050 gen_movl_seg_T0(s
, reg
);
4051 gen_pop_update(s
, ot
);
4053 case 0x1a1: /* pop fs */
4054 case 0x1a9: /* pop gs */
4056 gen_movl_seg_T0(s
, (b
>> 3) & 7);
4057 gen_pop_update(s
, ot
);
4060 /**************************/
4063 case 0x89: /* mov Gv, Ev */
4064 ot
= mo_b_d(b
, dflag
);
4065 modrm
= x86_ldub_code(env
, s
);
4066 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4068 /* generate a generic store */
4069 gen_ldst_modrm(env
, s
, modrm
, ot
, reg
, 1);
4072 case 0xc7: /* mov Ev, Iv */
4073 ot
= mo_b_d(b
, dflag
);
4074 modrm
= x86_ldub_code(env
, s
);
4075 mod
= (modrm
>> 6) & 3;
4077 s
->rip_offset
= insn_const_size(ot
);
4078 gen_lea_modrm(env
, s
, modrm
);
4080 val
= insn_get(env
, s
, ot
);
4081 tcg_gen_movi_tl(s
->T0
, val
);
4083 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
4085 gen_op_mov_reg_v(s
, ot
, (modrm
& 7) | REX_B(s
), s
->T0
);
4089 case 0x8b: /* mov Ev, Gv */
4090 ot
= mo_b_d(b
, dflag
);
4091 modrm
= x86_ldub_code(env
, s
);
4092 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4094 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4095 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
4097 case 0x8e: /* mov seg, Gv */
4098 modrm
= x86_ldub_code(env
, s
);
4099 reg
= (modrm
>> 3) & 7;
4100 if (reg
>= 6 || reg
== R_CS
)
4102 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
4103 gen_movl_seg_T0(s
, reg
);
4105 case 0x8c: /* mov Gv, seg */
4106 modrm
= x86_ldub_code(env
, s
);
4107 reg
= (modrm
>> 3) & 7;
4108 mod
= (modrm
>> 6) & 3;
4111 gen_op_movl_T0_seg(s
, reg
);
4112 ot
= mod
== 3 ? dflag
: MO_16
;
4113 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
4116 case 0x1b6: /* movzbS Gv, Eb */
4117 case 0x1b7: /* movzwS Gv, Eb */
4118 case 0x1be: /* movsbS Gv, Eb */
4119 case 0x1bf: /* movswS Gv, Eb */
4124 /* d_ot is the size of destination */
4126 /* ot is the size of source */
4127 ot
= (b
& 1) + MO_8
;
4128 /* s_ot is the sign+size of source */
4129 s_ot
= b
& 8 ? MO_SIGN
| ot
: ot
;
4131 modrm
= x86_ldub_code(env
, s
);
4132 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4133 mod
= (modrm
>> 6) & 3;
4134 rm
= (modrm
& 7) | REX_B(s
);
4137 if (s_ot
== MO_SB
&& byte_reg_is_xH(s
, rm
)) {
4138 tcg_gen_sextract_tl(s
->T0
, cpu_regs
[rm
- 4], 8, 8);
4140 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
4143 tcg_gen_ext8u_tl(s
->T0
, s
->T0
);
4146 tcg_gen_ext8s_tl(s
->T0
, s
->T0
);
4149 tcg_gen_ext16u_tl(s
->T0
, s
->T0
);
4153 tcg_gen_ext16s_tl(s
->T0
, s
->T0
);
4157 gen_op_mov_reg_v(s
, d_ot
, reg
, s
->T0
);
4159 gen_lea_modrm(env
, s
, modrm
);
4160 gen_op_ld_v(s
, s_ot
, s
->T0
, s
->A0
);
4161 gen_op_mov_reg_v(s
, d_ot
, reg
, s
->T0
);
4166 case 0x8d: /* lea */
4167 modrm
= x86_ldub_code(env
, s
);
4168 mod
= (modrm
>> 6) & 3;
4171 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4173 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
4174 TCGv ea
= gen_lea_modrm_1(s
, a
, false);
4175 gen_lea_v_seg(s
, s
->aflag
, ea
, -1, -1);
4176 gen_op_mov_reg_v(s
, dflag
, reg
, s
->A0
);
4180 case 0xa0: /* mov EAX, Ov */
4182 case 0xa2: /* mov Ov, EAX */
4185 target_ulong offset_addr
;
4187 ot
= mo_b_d(b
, dflag
);
4188 offset_addr
= insn_get_addr(env
, s
, s
->aflag
);
4189 tcg_gen_movi_tl(s
->A0
, offset_addr
);
4190 gen_add_A0_ds_seg(s
);
4192 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
4193 gen_op_mov_reg_v(s
, ot
, R_EAX
, s
->T0
);
4195 gen_op_mov_v_reg(s
, ot
, s
->T0
, R_EAX
);
4196 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
4200 case 0xd7: /* xlat */
4201 tcg_gen_mov_tl(s
->A0
, cpu_regs
[R_EBX
]);
4202 tcg_gen_ext8u_tl(s
->T0
, cpu_regs
[R_EAX
]);
4203 tcg_gen_add_tl(s
->A0
, s
->A0
, s
->T0
);
4204 gen_add_A0_ds_seg(s
);
4205 gen_op_ld_v(s
, MO_8
, s
->T0
, s
->A0
);
4206 gen_op_mov_reg_v(s
, MO_8
, R_EAX
, s
->T0
);
4208 case 0xb0 ... 0xb7: /* mov R, Ib */
4209 val
= insn_get(env
, s
, MO_8
);
4210 tcg_gen_movi_tl(s
->T0
, val
);
4211 gen_op_mov_reg_v(s
, MO_8
, (b
& 7) | REX_B(s
), s
->T0
);
4213 case 0xb8 ... 0xbf: /* mov R, Iv */
4214 #ifdef TARGET_X86_64
4215 if (dflag
== MO_64
) {
4218 tmp
= x86_ldq_code(env
, s
);
4219 reg
= (b
& 7) | REX_B(s
);
4220 tcg_gen_movi_tl(s
->T0
, tmp
);
4221 gen_op_mov_reg_v(s
, MO_64
, reg
, s
->T0
);
4226 val
= insn_get(env
, s
, ot
);
4227 reg
= (b
& 7) | REX_B(s
);
4228 tcg_gen_movi_tl(s
->T0
, val
);
4229 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
4233 case 0x91 ... 0x97: /* xchg R, EAX */
4236 reg
= (b
& 7) | REX_B(s
);
4240 case 0x87: /* xchg Ev, Gv */
4241 ot
= mo_b_d(b
, dflag
);
4242 modrm
= x86_ldub_code(env
, s
);
4243 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4244 mod
= (modrm
>> 6) & 3;
4246 rm
= (modrm
& 7) | REX_B(s
);
4248 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
4249 gen_op_mov_v_reg(s
, ot
, s
->T1
, rm
);
4250 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
4251 gen_op_mov_reg_v(s
, ot
, reg
, s
->T1
);
4253 gen_lea_modrm(env
, s
, modrm
);
4254 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
4255 /* for xchg, lock is implicit */
4256 tcg_gen_atomic_xchg_tl(s
->T1
, s
->A0
, s
->T0
,
4257 s
->mem_index
, ot
| MO_LE
);
4258 gen_op_mov_reg_v(s
, ot
, reg
, s
->T1
);
4261 case 0xc4: /* les Gv */
4262 /* In CODE64 this is VEX3; see above. */
4265 case 0xc5: /* lds Gv */
4266 /* In CODE64 this is VEX2; see above. */
4269 case 0x1b2: /* lss Gv */
4272 case 0x1b4: /* lfs Gv */
4275 case 0x1b5: /* lgs Gv */
4278 ot
= dflag
!= MO_16
? MO_32
: MO_16
;
4279 modrm
= x86_ldub_code(env
, s
);
4280 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4281 mod
= (modrm
>> 6) & 3;
4284 gen_lea_modrm(env
, s
, modrm
);
4285 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
4286 gen_add_A0_im(s
, 1 << ot
);
4287 /* load the segment first to handle exceptions properly */
4288 gen_op_ld_v(s
, MO_16
, s
->T0
, s
->A0
);
4289 gen_movl_seg_T0(s
, op
);
4290 /* then put the data */
4291 gen_op_mov_reg_v(s
, ot
, reg
, s
->T1
);
4294 /************************/
4302 ot
= mo_b_d(b
, dflag
);
4303 modrm
= x86_ldub_code(env
, s
);
4304 mod
= (modrm
>> 6) & 3;
4305 op
= (modrm
>> 3) & 7;
4311 gen_lea_modrm(env
, s
, modrm
);
4314 opreg
= (modrm
& 7) | REX_B(s
);
4319 gen_shift(s
, op
, ot
, opreg
, OR_ECX
);
4322 shift
= x86_ldub_code(env
, s
);
4324 gen_shifti(s
, op
, ot
, opreg
, shift
);
4339 case 0x1a4: /* shld imm */
4343 case 0x1a5: /* shld cl */
4347 case 0x1ac: /* shrd imm */
4351 case 0x1ad: /* shrd cl */
4356 modrm
= x86_ldub_code(env
, s
);
4357 mod
= (modrm
>> 6) & 3;
4358 rm
= (modrm
& 7) | REX_B(s
);
4359 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4361 gen_lea_modrm(env
, s
, modrm
);
4366 gen_op_mov_v_reg(s
, ot
, s
->T1
, reg
);
4369 TCGv imm
= tcg_constant_tl(x86_ldub_code(env
, s
));
4370 gen_shiftd_rm_T1(s
, ot
, opreg
, op
, imm
);
4372 gen_shiftd_rm_T1(s
, ot
, opreg
, op
, cpu_regs
[R_ECX
]);
4376 /************************/
4380 bool update_fip
= true;
4382 if (s
->flags
& (HF_EM_MASK
| HF_TS_MASK
)) {
4383 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
4384 /* XXX: what to do if illegal op ? */
4385 gen_exception(s
, EXCP07_PREX
);
4388 modrm
= x86_ldub_code(env
, s
);
4389 mod
= (modrm
>> 6) & 3;
4391 op
= ((b
& 7) << 3) | ((modrm
>> 3) & 7);
4394 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
4395 TCGv ea
= gen_lea_modrm_1(s
, a
, false);
4396 TCGv last_addr
= tcg_temp_new();
4397 bool update_fdp
= true;
4399 tcg_gen_mov_tl(last_addr
, ea
);
4400 gen_lea_v_seg(s
, s
->aflag
, ea
, a
.def_seg
, s
->override
);
4403 case 0x00 ... 0x07: /* fxxxs */
4404 case 0x10 ... 0x17: /* fixxxl */
4405 case 0x20 ... 0x27: /* fxxxl */
4406 case 0x30 ... 0x37: /* fixxx */
4413 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
4414 s
->mem_index
, MO_LEUL
);
4415 gen_helper_flds_FT0(tcg_env
, s
->tmp2_i32
);
4418 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
4419 s
->mem_index
, MO_LEUL
);
4420 gen_helper_fildl_FT0(tcg_env
, s
->tmp2_i32
);
4423 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
,
4424 s
->mem_index
, MO_LEUQ
);
4425 gen_helper_fldl_FT0(tcg_env
, s
->tmp1_i64
);
4429 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
4430 s
->mem_index
, MO_LESW
);
4431 gen_helper_fildl_FT0(tcg_env
, s
->tmp2_i32
);
4435 gen_helper_fp_arith_ST0_FT0(op1
);
4437 /* fcomp needs pop */
4438 gen_helper_fpop(tcg_env
);
4442 case 0x08: /* flds */
4443 case 0x0a: /* fsts */
4444 case 0x0b: /* fstps */
4445 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
4446 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
4447 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
4452 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
4453 s
->mem_index
, MO_LEUL
);
4454 gen_helper_flds_ST0(tcg_env
, s
->tmp2_i32
);
4457 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
4458 s
->mem_index
, MO_LEUL
);
4459 gen_helper_fildl_ST0(tcg_env
, s
->tmp2_i32
);
4462 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
,
4463 s
->mem_index
, MO_LEUQ
);
4464 gen_helper_fldl_ST0(tcg_env
, s
->tmp1_i64
);
4468 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
4469 s
->mem_index
, MO_LESW
);
4470 gen_helper_fildl_ST0(tcg_env
, s
->tmp2_i32
);
4475 /* XXX: the corresponding CPUID bit must be tested ! */
4478 gen_helper_fisttl_ST0(s
->tmp2_i32
, tcg_env
);
4479 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
4480 s
->mem_index
, MO_LEUL
);
4483 gen_helper_fisttll_ST0(s
->tmp1_i64
, tcg_env
);
4484 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
,
4485 s
->mem_index
, MO_LEUQ
);
4489 gen_helper_fistt_ST0(s
->tmp2_i32
, tcg_env
);
4490 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
4491 s
->mem_index
, MO_LEUW
);
4494 gen_helper_fpop(tcg_env
);
4499 gen_helper_fsts_ST0(s
->tmp2_i32
, tcg_env
);
4500 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
4501 s
->mem_index
, MO_LEUL
);
4504 gen_helper_fistl_ST0(s
->tmp2_i32
, tcg_env
);
4505 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
4506 s
->mem_index
, MO_LEUL
);
4509 gen_helper_fstl_ST0(s
->tmp1_i64
, tcg_env
);
4510 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
,
4511 s
->mem_index
, MO_LEUQ
);
4515 gen_helper_fist_ST0(s
->tmp2_i32
, tcg_env
);
4516 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
4517 s
->mem_index
, MO_LEUW
);
4520 if ((op
& 7) == 3) {
4521 gen_helper_fpop(tcg_env
);
4526 case 0x0c: /* fldenv mem */
4527 gen_helper_fldenv(tcg_env
, s
->A0
,
4528 tcg_constant_i32(dflag
- 1));
4529 update_fip
= update_fdp
= false;
4531 case 0x0d: /* fldcw mem */
4532 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
4533 s
->mem_index
, MO_LEUW
);
4534 gen_helper_fldcw(tcg_env
, s
->tmp2_i32
);
4535 update_fip
= update_fdp
= false;
4537 case 0x0e: /* fnstenv mem */
4538 gen_helper_fstenv(tcg_env
, s
->A0
,
4539 tcg_constant_i32(dflag
- 1));
4540 update_fip
= update_fdp
= false;
4542 case 0x0f: /* fnstcw mem */
4543 gen_helper_fnstcw(s
->tmp2_i32
, tcg_env
);
4544 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
4545 s
->mem_index
, MO_LEUW
);
4546 update_fip
= update_fdp
= false;
4548 case 0x1d: /* fldt mem */
4549 gen_helper_fldt_ST0(tcg_env
, s
->A0
);
4551 case 0x1f: /* fstpt mem */
4552 gen_helper_fstt_ST0(tcg_env
, s
->A0
);
4553 gen_helper_fpop(tcg_env
);
4555 case 0x2c: /* frstor mem */
4556 gen_helper_frstor(tcg_env
, s
->A0
,
4557 tcg_constant_i32(dflag
- 1));
4558 update_fip
= update_fdp
= false;
4560 case 0x2e: /* fnsave mem */
4561 gen_helper_fsave(tcg_env
, s
->A0
,
4562 tcg_constant_i32(dflag
- 1));
4563 update_fip
= update_fdp
= false;
4565 case 0x2f: /* fnstsw mem */
4566 gen_helper_fnstsw(s
->tmp2_i32
, tcg_env
);
4567 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
4568 s
->mem_index
, MO_LEUW
);
4569 update_fip
= update_fdp
= false;
4571 case 0x3c: /* fbld */
4572 gen_helper_fbld_ST0(tcg_env
, s
->A0
);
4574 case 0x3e: /* fbstp */
4575 gen_helper_fbst_ST0(tcg_env
, s
->A0
);
4576 gen_helper_fpop(tcg_env
);
4578 case 0x3d: /* fildll */
4579 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
,
4580 s
->mem_index
, MO_LEUQ
);
4581 gen_helper_fildll_ST0(tcg_env
, s
->tmp1_i64
);
4583 case 0x3f: /* fistpll */
4584 gen_helper_fistll_ST0(s
->tmp1_i64
, tcg_env
);
4585 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
,
4586 s
->mem_index
, MO_LEUQ
);
4587 gen_helper_fpop(tcg_env
);
4594 int last_seg
= s
->override
>= 0 ? s
->override
: a
.def_seg
;
4596 tcg_gen_ld_i32(s
->tmp2_i32
, tcg_env
,
4597 offsetof(CPUX86State
,
4598 segs
[last_seg
].selector
));
4599 tcg_gen_st16_i32(s
->tmp2_i32
, tcg_env
,
4600 offsetof(CPUX86State
, fpds
));
4601 tcg_gen_st_tl(last_addr
, tcg_env
,
4602 offsetof(CPUX86State
, fpdp
));
4605 /* register float ops */
4609 case 0x08: /* fld sti */
4610 gen_helper_fpush(tcg_env
);
4611 gen_helper_fmov_ST0_STN(tcg_env
,
4612 tcg_constant_i32((opreg
+ 1) & 7));
4614 case 0x09: /* fxchg sti */
4615 case 0x29: /* fxchg4 sti, undocumented op */
4616 case 0x39: /* fxchg7 sti, undocumented op */
4617 gen_helper_fxchg_ST0_STN(tcg_env
, tcg_constant_i32(opreg
));
4619 case 0x0a: /* grp d9/2 */
4623 * check exceptions (FreeBSD FPU probe)
4624 * needs to be treated as I/O because of ferr_irq
4626 translator_io_start(&s
->base
);
4627 gen_helper_fwait(tcg_env
);
4634 case 0x0c: /* grp d9/4 */
4637 gen_helper_fchs_ST0(tcg_env
);
4640 gen_helper_fabs_ST0(tcg_env
);
4643 gen_helper_fldz_FT0(tcg_env
);
4644 gen_helper_fcom_ST0_FT0(tcg_env
);
4647 gen_helper_fxam_ST0(tcg_env
);
4653 case 0x0d: /* grp d9/5 */
4657 gen_helper_fpush(tcg_env
);
4658 gen_helper_fld1_ST0(tcg_env
);
4661 gen_helper_fpush(tcg_env
);
4662 gen_helper_fldl2t_ST0(tcg_env
);
4665 gen_helper_fpush(tcg_env
);
4666 gen_helper_fldl2e_ST0(tcg_env
);
4669 gen_helper_fpush(tcg_env
);
4670 gen_helper_fldpi_ST0(tcg_env
);
4673 gen_helper_fpush(tcg_env
);
4674 gen_helper_fldlg2_ST0(tcg_env
);
4677 gen_helper_fpush(tcg_env
);
4678 gen_helper_fldln2_ST0(tcg_env
);
4681 gen_helper_fpush(tcg_env
);
4682 gen_helper_fldz_ST0(tcg_env
);
4689 case 0x0e: /* grp d9/6 */
4692 gen_helper_f2xm1(tcg_env
);
4695 gen_helper_fyl2x(tcg_env
);
4698 gen_helper_fptan(tcg_env
);
4700 case 3: /* fpatan */
4701 gen_helper_fpatan(tcg_env
);
4703 case 4: /* fxtract */
4704 gen_helper_fxtract(tcg_env
);
4706 case 5: /* fprem1 */
4707 gen_helper_fprem1(tcg_env
);
4709 case 6: /* fdecstp */
4710 gen_helper_fdecstp(tcg_env
);
4713 case 7: /* fincstp */
4714 gen_helper_fincstp(tcg_env
);
4718 case 0x0f: /* grp d9/7 */
4721 gen_helper_fprem(tcg_env
);
4723 case 1: /* fyl2xp1 */
4724 gen_helper_fyl2xp1(tcg_env
);
4727 gen_helper_fsqrt(tcg_env
);
4729 case 3: /* fsincos */
4730 gen_helper_fsincos(tcg_env
);
4732 case 5: /* fscale */
4733 gen_helper_fscale(tcg_env
);
4735 case 4: /* frndint */
4736 gen_helper_frndint(tcg_env
);
4739 gen_helper_fsin(tcg_env
);
4743 gen_helper_fcos(tcg_env
);
4747 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
4748 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
4749 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
4755 gen_helper_fp_arith_STN_ST0(op1
, opreg
);
4757 gen_helper_fpop(tcg_env
);
4760 gen_helper_fmov_FT0_STN(tcg_env
,
4761 tcg_constant_i32(opreg
));
4762 gen_helper_fp_arith_ST0_FT0(op1
);
4766 case 0x02: /* fcom */
4767 case 0x22: /* fcom2, undocumented op */
4768 gen_helper_fmov_FT0_STN(tcg_env
, tcg_constant_i32(opreg
));
4769 gen_helper_fcom_ST0_FT0(tcg_env
);
4771 case 0x03: /* fcomp */
4772 case 0x23: /* fcomp3, undocumented op */
4773 case 0x32: /* fcomp5, undocumented op */
4774 gen_helper_fmov_FT0_STN(tcg_env
, tcg_constant_i32(opreg
));
4775 gen_helper_fcom_ST0_FT0(tcg_env
);
4776 gen_helper_fpop(tcg_env
);
4778 case 0x15: /* da/5 */
4780 case 1: /* fucompp */
4781 gen_helper_fmov_FT0_STN(tcg_env
, tcg_constant_i32(1));
4782 gen_helper_fucom_ST0_FT0(tcg_env
);
4783 gen_helper_fpop(tcg_env
);
4784 gen_helper_fpop(tcg_env
);
4792 case 0: /* feni (287 only, just do nop here) */
4794 case 1: /* fdisi (287 only, just do nop here) */
4797 gen_helper_fclex(tcg_env
);
4800 case 3: /* fninit */
4801 gen_helper_fninit(tcg_env
);
4804 case 4: /* fsetpm (287 only, just do nop here) */
4810 case 0x1d: /* fucomi */
4811 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
4814 gen_update_cc_op(s
);
4815 gen_helper_fmov_FT0_STN(tcg_env
, tcg_constant_i32(opreg
));
4816 gen_helper_fucomi_ST0_FT0(tcg_env
);
4817 set_cc_op(s
, CC_OP_EFLAGS
);
4819 case 0x1e: /* fcomi */
4820 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
4823 gen_update_cc_op(s
);
4824 gen_helper_fmov_FT0_STN(tcg_env
, tcg_constant_i32(opreg
));
4825 gen_helper_fcomi_ST0_FT0(tcg_env
);
4826 set_cc_op(s
, CC_OP_EFLAGS
);
4828 case 0x28: /* ffree sti */
4829 gen_helper_ffree_STN(tcg_env
, tcg_constant_i32(opreg
));
4831 case 0x2a: /* fst sti */
4832 gen_helper_fmov_STN_ST0(tcg_env
, tcg_constant_i32(opreg
));
4834 case 0x2b: /* fstp sti */
4835 case 0x0b: /* fstp1 sti, undocumented op */
4836 case 0x3a: /* fstp8 sti, undocumented op */
4837 case 0x3b: /* fstp9 sti, undocumented op */
4838 gen_helper_fmov_STN_ST0(tcg_env
, tcg_constant_i32(opreg
));
4839 gen_helper_fpop(tcg_env
);
4841 case 0x2c: /* fucom st(i) */
4842 gen_helper_fmov_FT0_STN(tcg_env
, tcg_constant_i32(opreg
));
4843 gen_helper_fucom_ST0_FT0(tcg_env
);
4845 case 0x2d: /* fucomp st(i) */
4846 gen_helper_fmov_FT0_STN(tcg_env
, tcg_constant_i32(opreg
));
4847 gen_helper_fucom_ST0_FT0(tcg_env
);
4848 gen_helper_fpop(tcg_env
);
4850 case 0x33: /* de/3 */
4852 case 1: /* fcompp */
4853 gen_helper_fmov_FT0_STN(tcg_env
, tcg_constant_i32(1));
4854 gen_helper_fcom_ST0_FT0(tcg_env
);
4855 gen_helper_fpop(tcg_env
);
4856 gen_helper_fpop(tcg_env
);
4862 case 0x38: /* ffreep sti, undocumented op */
4863 gen_helper_ffree_STN(tcg_env
, tcg_constant_i32(opreg
));
4864 gen_helper_fpop(tcg_env
);
4866 case 0x3c: /* df/4 */
4869 gen_helper_fnstsw(s
->tmp2_i32
, tcg_env
);
4870 tcg_gen_extu_i32_tl(s
->T0
, s
->tmp2_i32
);
4871 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
4877 case 0x3d: /* fucomip */
4878 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
4881 gen_update_cc_op(s
);
4882 gen_helper_fmov_FT0_STN(tcg_env
, tcg_constant_i32(opreg
));
4883 gen_helper_fucomi_ST0_FT0(tcg_env
);
4884 gen_helper_fpop(tcg_env
);
4885 set_cc_op(s
, CC_OP_EFLAGS
);
4887 case 0x3e: /* fcomip */
4888 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
4891 gen_update_cc_op(s
);
4892 gen_helper_fmov_FT0_STN(tcg_env
, tcg_constant_i32(opreg
));
4893 gen_helper_fcomi_ST0_FT0(tcg_env
);
4894 gen_helper_fpop(tcg_env
);
4895 set_cc_op(s
, CC_OP_EFLAGS
);
4897 case 0x10 ... 0x13: /* fcmovxx */
4902 static const uint8_t fcmov_cc
[8] = {
4909 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
4912 op1
= fcmov_cc
[op
& 3] | (((op
>> 3) & 1) ^ 1);
4913 l1
= gen_new_label();
4914 gen_jcc1_noeob(s
, op1
, l1
);
4915 gen_helper_fmov_ST0_STN(tcg_env
,
4916 tcg_constant_i32(opreg
));
4926 tcg_gen_ld_i32(s
->tmp2_i32
, tcg_env
,
4927 offsetof(CPUX86State
, segs
[R_CS
].selector
));
4928 tcg_gen_st16_i32(s
->tmp2_i32
, tcg_env
,
4929 offsetof(CPUX86State
, fpcs
));
4930 tcg_gen_st_tl(eip_cur_tl(s
),
4931 tcg_env
, offsetof(CPUX86State
, fpip
));
4935 /************************/
4938 case 0xa4: /* movsS */
4940 ot
= mo_b_d(b
, dflag
);
4941 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
4942 gen_repz_movs(s
, ot
);
4948 case 0xaa: /* stosS */
4950 ot
= mo_b_d(b
, dflag
);
4951 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, R_EAX
);
4952 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
4953 gen_repz_stos(s
, ot
);
4958 case 0xac: /* lodsS */
4960 ot
= mo_b_d(b
, dflag
);
4961 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
4962 gen_repz_lods(s
, ot
);
4967 case 0xae: /* scasS */
4969 ot
= mo_b_d(b
, dflag
);
4970 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, R_EAX
);
4971 if (prefixes
& PREFIX_REPNZ
) {
4972 gen_repz_scas(s
, ot
, 1);
4973 } else if (prefixes
& PREFIX_REPZ
) {
4974 gen_repz_scas(s
, ot
, 0);
4980 case 0xa6: /* cmpsS */
4982 ot
= mo_b_d(b
, dflag
);
4983 if (prefixes
& PREFIX_REPNZ
) {
4984 gen_repz_cmps(s
, ot
, 1);
4985 } else if (prefixes
& PREFIX_REPZ
) {
4986 gen_repz_cmps(s
, ot
, 0);
4991 case 0x6c: /* insS */
4993 ot
= mo_b_d32(b
, dflag
);
4994 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_EDX
]);
4995 tcg_gen_ext16u_i32(s
->tmp2_i32
, s
->tmp2_i32
);
4996 if (!gen_check_io(s
, ot
, s
->tmp2_i32
,
4997 SVM_IOIO_TYPE_MASK
| SVM_IOIO_STR_MASK
)) {
5000 translator_io_start(&s
->base
);
5001 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
5002 gen_repz_ins(s
, ot
);
5007 case 0x6e: /* outsS */
5009 ot
= mo_b_d32(b
, dflag
);
5010 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_EDX
]);
5011 tcg_gen_ext16u_i32(s
->tmp2_i32
, s
->tmp2_i32
);
5012 if (!gen_check_io(s
, ot
, s
->tmp2_i32
, SVM_IOIO_STR_MASK
)) {
5015 translator_io_start(&s
->base
);
5016 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
5017 gen_repz_outs(s
, ot
);
5023 /************************/
5028 ot
= mo_b_d32(b
, dflag
);
5029 val
= x86_ldub_code(env
, s
);
5030 tcg_gen_movi_i32(s
->tmp2_i32
, val
);
5031 if (!gen_check_io(s
, ot
, s
->tmp2_i32
, SVM_IOIO_TYPE_MASK
)) {
5034 translator_io_start(&s
->base
);
5035 gen_helper_in_func(ot
, s
->T1
, s
->tmp2_i32
);
5036 gen_op_mov_reg_v(s
, ot
, R_EAX
, s
->T1
);
5037 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
5041 ot
= mo_b_d32(b
, dflag
);
5042 val
= x86_ldub_code(env
, s
);
5043 tcg_gen_movi_i32(s
->tmp2_i32
, val
);
5044 if (!gen_check_io(s
, ot
, s
->tmp2_i32
, 0)) {
5047 translator_io_start(&s
->base
);
5048 gen_op_mov_v_reg(s
, ot
, s
->T1
, R_EAX
);
5049 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T1
);
5050 gen_helper_out_func(ot
, s
->tmp2_i32
, s
->tmp3_i32
);
5051 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
5055 ot
= mo_b_d32(b
, dflag
);
5056 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_EDX
]);
5057 tcg_gen_ext16u_i32(s
->tmp2_i32
, s
->tmp2_i32
);
5058 if (!gen_check_io(s
, ot
, s
->tmp2_i32
, SVM_IOIO_TYPE_MASK
)) {
5061 translator_io_start(&s
->base
);
5062 gen_helper_in_func(ot
, s
->T1
, s
->tmp2_i32
);
5063 gen_op_mov_reg_v(s
, ot
, R_EAX
, s
->T1
);
5064 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
5068 ot
= mo_b_d32(b
, dflag
);
5069 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_EDX
]);
5070 tcg_gen_ext16u_i32(s
->tmp2_i32
, s
->tmp2_i32
);
5071 if (!gen_check_io(s
, ot
, s
->tmp2_i32
, 0)) {
5074 translator_io_start(&s
->base
);
5075 gen_op_mov_v_reg(s
, ot
, s
->T1
, R_EAX
);
5076 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T1
);
5077 gen_helper_out_func(ot
, s
->tmp2_i32
, s
->tmp3_i32
);
5078 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
5081 /************************/
5083 case 0xc2: /* ret im */
5084 val
= x86_ldsw_code(env
, s
);
5086 gen_stack_update(s
, val
+ (1 << ot
));
5087 /* Note that gen_pop_T0 uses a zero-extending load. */
5088 gen_op_jmp_v(s
, s
->T0
);
5090 s
->base
.is_jmp
= DISAS_JUMP
;
5092 case 0xc3: /* ret */
5094 gen_pop_update(s
, ot
);
5095 /* Note that gen_pop_T0 uses a zero-extending load. */
5096 gen_op_jmp_v(s
, s
->T0
);
5098 s
->base
.is_jmp
= DISAS_JUMP
;
5100 case 0xca: /* lret im */
5101 val
= x86_ldsw_code(env
, s
);
5103 if (PE(s
) && !VM86(s
)) {
5104 gen_update_cc_op(s
);
5105 gen_update_eip_cur(s
);
5106 gen_helper_lret_protected(tcg_env
, tcg_constant_i32(dflag
- 1),
5107 tcg_constant_i32(val
));
5111 gen_op_ld_v(s
, dflag
, s
->T0
, s
->A0
);
5112 /* NOTE: keeping EIP updated is not a problem in case of
5114 gen_op_jmp_v(s
, s
->T0
);
5116 gen_add_A0_im(s
, 1 << dflag
);
5117 gen_op_ld_v(s
, dflag
, s
->T0
, s
->A0
);
5118 gen_op_movl_seg_T0_vm(s
, R_CS
);
5119 /* add stack offset */
5120 gen_stack_update(s
, val
+ (2 << dflag
));
5122 s
->base
.is_jmp
= DISAS_EOB_ONLY
;
5124 case 0xcb: /* lret */
5127 case 0xcf: /* iret */
5128 gen_svm_check_intercept(s
, SVM_EXIT_IRET
);
5129 if (!PE(s
) || VM86(s
)) {
5130 /* real mode or vm86 mode */
5131 if (!check_vm86_iopl(s
)) {
5134 gen_helper_iret_real(tcg_env
, tcg_constant_i32(dflag
- 1));
5136 gen_helper_iret_protected(tcg_env
, tcg_constant_i32(dflag
- 1),
5139 set_cc_op(s
, CC_OP_EFLAGS
);
5140 s
->base
.is_jmp
= DISAS_EOB_ONLY
;
5142 case 0xe8: /* call im */
5144 int diff
= (dflag
!= MO_16
5145 ? (int32_t)insn_get(env
, s
, MO_32
)
5146 : (int16_t)insn_get(env
, s
, MO_16
));
5147 gen_push_v(s
, eip_next_tl(s
));
5149 gen_jmp_rel(s
, dflag
, diff
, 0);
5152 case 0x9a: /* lcall im */
5154 unsigned int selector
, offset
;
5159 offset
= insn_get(env
, s
, ot
);
5160 selector
= insn_get(env
, s
, MO_16
);
5162 tcg_gen_movi_tl(s
->T0
, selector
);
5163 tcg_gen_movi_tl(s
->T1
, offset
);
5166 case 0xe9: /* jmp im */
5168 int diff
= (dflag
!= MO_16
5169 ? (int32_t)insn_get(env
, s
, MO_32
)
5170 : (int16_t)insn_get(env
, s
, MO_16
));
5172 gen_jmp_rel(s
, dflag
, diff
, 0);
5175 case 0xea: /* ljmp im */
5177 unsigned int selector
, offset
;
5182 offset
= insn_get(env
, s
, ot
);
5183 selector
= insn_get(env
, s
, MO_16
);
5185 tcg_gen_movi_tl(s
->T0
, selector
);
5186 tcg_gen_movi_tl(s
->T1
, offset
);
5189 case 0xeb: /* jmp Jb */
5191 int diff
= (int8_t)insn_get(env
, s
, MO_8
);
5192 gen_jmp_rel(s
, dflag
, diff
, 0);
5195 case 0x70 ... 0x7f: /* jcc Jb */
5197 int diff
= (int8_t)insn_get(env
, s
, MO_8
);
5199 gen_jcc(s
, b
, diff
);
5202 case 0x180 ... 0x18f: /* jcc Jv */
5204 int diff
= (dflag
!= MO_16
5205 ? (int32_t)insn_get(env
, s
, MO_32
)
5206 : (int16_t)insn_get(env
, s
, MO_16
));
5208 gen_jcc(s
, b
, diff
);
5212 case 0x190 ... 0x19f: /* setcc Gv */
5213 modrm
= x86_ldub_code(env
, s
);
5214 gen_setcc1(s
, b
, s
->T0
);
5215 gen_ldst_modrm(env
, s
, modrm
, MO_8
, OR_TMP0
, 1);
5217 case 0x140 ... 0x14f: /* cmov Gv, Ev */
5218 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
5222 modrm
= x86_ldub_code(env
, s
);
5223 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
5224 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
5225 gen_cmovcc1(s
, b
^ 1, s
->T0
, cpu_regs
[reg
]);
5226 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
5229 /************************/
5231 case 0x9c: /* pushf */
5232 gen_svm_check_intercept(s
, SVM_EXIT_PUSHF
);
5233 if (check_vm86_iopl(s
)) {
5234 gen_update_cc_op(s
);
5235 gen_helper_read_eflags(s
->T0
, tcg_env
);
5236 gen_push_v(s
, s
->T0
);
5239 case 0x9d: /* popf */
5240 gen_svm_check_intercept(s
, SVM_EXIT_POPF
);
5241 if (check_vm86_iopl(s
)) {
5242 int mask
= TF_MASK
| AC_MASK
| ID_MASK
| NT_MASK
;
5245 mask
|= IF_MASK
| IOPL_MASK
;
5246 } else if (CPL(s
) <= IOPL(s
)) {
5249 if (dflag
== MO_16
) {
5254 gen_helper_write_eflags(tcg_env
, s
->T0
, tcg_constant_i32(mask
));
5255 gen_pop_update(s
, ot
);
5256 set_cc_op(s
, CC_OP_EFLAGS
);
5257 /* abort translation because TF/AC flag may change */
5258 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
5261 case 0x9e: /* sahf */
5262 if (CODE64(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_LAHF_LM
))
5264 tcg_gen_shri_tl(s
->T0
, cpu_regs
[R_EAX
], 8);
5265 gen_compute_eflags(s
);
5266 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, CC_O
);
5267 tcg_gen_andi_tl(s
->T0
, s
->T0
, CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
5268 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, s
->T0
);
5270 case 0x9f: /* lahf */
5271 if (CODE64(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_LAHF_LM
))
5273 gen_compute_eflags(s
);
5274 /* Note: gen_compute_eflags() only gives the condition codes */
5275 tcg_gen_ori_tl(s
->T0
, cpu_cc_src
, 0x02);
5276 tcg_gen_deposit_tl(cpu_regs
[R_EAX
], cpu_regs
[R_EAX
], s
->T0
, 8, 8);
5278 case 0xf5: /* cmc */
5279 gen_compute_eflags(s
);
5280 tcg_gen_xori_tl(cpu_cc_src
, cpu_cc_src
, CC_C
);
5282 case 0xf8: /* clc */
5283 gen_compute_eflags(s
);
5284 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~CC_C
);
5286 case 0xf9: /* stc */
5287 gen_compute_eflags(s
);
5288 tcg_gen_ori_tl(cpu_cc_src
, cpu_cc_src
, CC_C
);
5290 case 0xfc: /* cld */
5291 tcg_gen_movi_i32(s
->tmp2_i32
, 1);
5292 tcg_gen_st_i32(s
->tmp2_i32
, tcg_env
, offsetof(CPUX86State
, df
));
5294 case 0xfd: /* std */
5295 tcg_gen_movi_i32(s
->tmp2_i32
, -1);
5296 tcg_gen_st_i32(s
->tmp2_i32
, tcg_env
, offsetof(CPUX86State
, df
));
5299 /************************/
5300 /* bit operations */
5301 case 0x1ba: /* bt/bts/btr/btc Gv, im */
5303 modrm
= x86_ldub_code(env
, s
);
5304 op
= (modrm
>> 3) & 7;
5305 mod
= (modrm
>> 6) & 3;
5306 rm
= (modrm
& 7) | REX_B(s
);
5309 gen_lea_modrm(env
, s
, modrm
);
5310 if (!(s
->prefix
& PREFIX_LOCK
)) {
5311 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
5314 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
5317 val
= x86_ldub_code(env
, s
);
5318 tcg_gen_movi_tl(s
->T1
, val
);
5323 case 0x1a3: /* bt Gv, Ev */
5326 case 0x1ab: /* bts */
5329 case 0x1b3: /* btr */
5332 case 0x1bb: /* btc */
5336 modrm
= x86_ldub_code(env
, s
);
5337 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
5338 mod
= (modrm
>> 6) & 3;
5339 rm
= (modrm
& 7) | REX_B(s
);
5340 gen_op_mov_v_reg(s
, MO_32
, s
->T1
, reg
);
5342 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
5343 /* specific case: we need to add a displacement */
5344 gen_exts(ot
, s
->T1
);
5345 tcg_gen_sari_tl(s
->tmp0
, s
->T1
, 3 + ot
);
5346 tcg_gen_shli_tl(s
->tmp0
, s
->tmp0
, ot
);
5347 tcg_gen_add_tl(s
->A0
, gen_lea_modrm_1(s
, a
, false), s
->tmp0
);
5348 gen_lea_v_seg(s
, s
->aflag
, s
->A0
, a
.def_seg
, s
->override
);
5349 if (!(s
->prefix
& PREFIX_LOCK
)) {
5350 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
5353 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
5356 tcg_gen_andi_tl(s
->T1
, s
->T1
, (1 << (3 + ot
)) - 1);
5357 tcg_gen_movi_tl(s
->tmp0
, 1);
5358 tcg_gen_shl_tl(s
->tmp0
, s
->tmp0
, s
->T1
);
5359 if (s
->prefix
& PREFIX_LOCK
) {
5362 /* Needs no atomic ops; we suppressed the normal
5363 memory load for LOCK above so do it now. */
5364 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
5367 tcg_gen_atomic_fetch_or_tl(s
->T0
, s
->A0
, s
->tmp0
,
5368 s
->mem_index
, ot
| MO_LE
);
5371 tcg_gen_not_tl(s
->tmp0
, s
->tmp0
);
5372 tcg_gen_atomic_fetch_and_tl(s
->T0
, s
->A0
, s
->tmp0
,
5373 s
->mem_index
, ot
| MO_LE
);
5377 tcg_gen_atomic_fetch_xor_tl(s
->T0
, s
->A0
, s
->tmp0
,
5378 s
->mem_index
, ot
| MO_LE
);
5381 tcg_gen_shr_tl(s
->tmp4
, s
->T0
, s
->T1
);
5383 tcg_gen_shr_tl(s
->tmp4
, s
->T0
, s
->T1
);
5386 /* Data already loaded; nothing to do. */
5389 tcg_gen_or_tl(s
->T0
, s
->T0
, s
->tmp0
);
5392 tcg_gen_andc_tl(s
->T0
, s
->T0
, s
->tmp0
);
5396 tcg_gen_xor_tl(s
->T0
, s
->T0
, s
->tmp0
);
5401 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
5403 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
5408 /* Delay all CC updates until after the store above. Note that
5409 C is the result of the test, Z is unchanged, and the others
5410 are all undefined. */
5412 case CC_OP_MULB
... CC_OP_MULQ
:
5413 case CC_OP_ADDB
... CC_OP_ADDQ
:
5414 case CC_OP_ADCB
... CC_OP_ADCQ
:
5415 case CC_OP_SUBB
... CC_OP_SUBQ
:
5416 case CC_OP_SBBB
... CC_OP_SBBQ
:
5417 case CC_OP_LOGICB
... CC_OP_LOGICQ
:
5418 case CC_OP_INCB
... CC_OP_INCQ
:
5419 case CC_OP_DECB
... CC_OP_DECQ
:
5420 case CC_OP_SHLB
... CC_OP_SHLQ
:
5421 case CC_OP_SARB
... CC_OP_SARQ
:
5422 case CC_OP_BMILGB
... CC_OP_BMILGQ
:
5423 /* Z was going to be computed from the non-zero status of CC_DST.
5424 We can get that same Z value (and the new C value) by leaving
5425 CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the
5427 tcg_gen_mov_tl(cpu_cc_src
, s
->tmp4
);
5428 set_cc_op(s
, ((s
->cc_op
- CC_OP_MULB
) & 3) + CC_OP_SARB
);
5431 /* Otherwise, generate EFLAGS and replace the C bit. */
5432 gen_compute_eflags(s
);
5433 tcg_gen_deposit_tl(cpu_cc_src
, cpu_cc_src
, s
->tmp4
,
5438 case 0x1bc: /* bsf / tzcnt */
5439 case 0x1bd: /* bsr / lzcnt */
5441 modrm
= x86_ldub_code(env
, s
);
5442 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
5443 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
5444 gen_extu(ot
, s
->T0
);
5446 /* Note that lzcnt and tzcnt are in different extensions. */
5447 if ((prefixes
& PREFIX_REPZ
)
5449 ? s
->cpuid_ext3_features
& CPUID_EXT3_ABM
5450 : s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI1
)) {
5452 /* For lzcnt/tzcnt, C bit is defined related to the input. */
5453 tcg_gen_mov_tl(cpu_cc_src
, s
->T0
);
5455 /* For lzcnt, reduce the target_ulong result by the
5456 number of zeros that we expect to find at the top. */
5457 tcg_gen_clzi_tl(s
->T0
, s
->T0
, TARGET_LONG_BITS
);
5458 tcg_gen_subi_tl(s
->T0
, s
->T0
, TARGET_LONG_BITS
- size
);
5460 /* For tzcnt, a zero input must return the operand size. */
5461 tcg_gen_ctzi_tl(s
->T0
, s
->T0
, size
);
5463 /* For lzcnt/tzcnt, Z bit is defined related to the result. */
5464 gen_op_update1_cc(s
);
5465 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
5467 /* For bsr/bsf, only the Z bit is defined and it is related
5468 to the input and not the result. */
5469 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
5470 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
5472 /* ??? The manual says that the output is undefined when the
5473 input is zero, but real hardware leaves it unchanged, and
5474 real programs appear to depend on that. Accomplish this
5475 by passing the output as the value to return upon zero. */
5477 /* For bsr, return the bit index of the first 1 bit,
5478 not the count of leading zeros. */
5479 tcg_gen_xori_tl(s
->T1
, cpu_regs
[reg
], TARGET_LONG_BITS
- 1);
5480 tcg_gen_clz_tl(s
->T0
, s
->T0
, s
->T1
);
5481 tcg_gen_xori_tl(s
->T0
, s
->T0
, TARGET_LONG_BITS
- 1);
5483 tcg_gen_ctz_tl(s
->T0
, s
->T0
, cpu_regs
[reg
]);
5486 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
5488 /************************/
5490 case 0x27: /* daa */
5493 gen_update_cc_op(s
);
5494 gen_helper_daa(tcg_env
);
5495 set_cc_op(s
, CC_OP_EFLAGS
);
5497 case 0x2f: /* das */
5500 gen_update_cc_op(s
);
5501 gen_helper_das(tcg_env
);
5502 set_cc_op(s
, CC_OP_EFLAGS
);
5504 case 0x37: /* aaa */
5507 gen_update_cc_op(s
);
5508 gen_helper_aaa(tcg_env
);
5509 set_cc_op(s
, CC_OP_EFLAGS
);
5511 case 0x3f: /* aas */
5514 gen_update_cc_op(s
);
5515 gen_helper_aas(tcg_env
);
5516 set_cc_op(s
, CC_OP_EFLAGS
);
5518 case 0xd4: /* aam */
5521 val
= x86_ldub_code(env
, s
);
5523 gen_exception(s
, EXCP00_DIVZ
);
5525 gen_helper_aam(tcg_env
, tcg_constant_i32(val
));
5526 set_cc_op(s
, CC_OP_LOGICB
);
5529 case 0xd5: /* aad */
5532 val
= x86_ldub_code(env
, s
);
5533 gen_helper_aad(tcg_env
, tcg_constant_i32(val
));
5534 set_cc_op(s
, CC_OP_LOGICB
);
5536 /************************/
5538 case 0x90: /* nop */
5539 /* XXX: correct lock test for all insn */
5540 if (prefixes
& PREFIX_LOCK
) {
5543 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
5545 goto do_xchg_reg_eax
;
5547 if (prefixes
& PREFIX_REPZ
) {
5548 gen_update_cc_op(s
);
5549 gen_update_eip_cur(s
);
5550 gen_helper_pause(tcg_env
, cur_insn_len_i32(s
));
5551 s
->base
.is_jmp
= DISAS_NORETURN
;
5554 case 0x9b: /* fwait */
5555 if ((s
->flags
& (HF_MP_MASK
| HF_TS_MASK
)) ==
5556 (HF_MP_MASK
| HF_TS_MASK
)) {
5557 gen_exception(s
, EXCP07_PREX
);
5559 /* needs to be treated as I/O because of ferr_irq */
5560 translator_io_start(&s
->base
);
5561 gen_helper_fwait(tcg_env
);
5564 case 0xcc: /* int3 */
5565 gen_interrupt(s
, EXCP03_INT3
);
5567 case 0xcd: /* int N */
5568 val
= x86_ldub_code(env
, s
);
5569 if (check_vm86_iopl(s
)) {
5570 gen_interrupt(s
, val
);
5573 case 0xce: /* into */
5576 gen_update_cc_op(s
);
5577 gen_update_eip_cur(s
);
5578 gen_helper_into(tcg_env
, cur_insn_len_i32(s
));
5581 case 0xf1: /* icebp (undocumented, exits to external debugger) */
5582 gen_svm_check_intercept(s
, SVM_EXIT_ICEBP
);
5586 case 0xfa: /* cli */
5587 if (check_iopl(s
)) {
5588 gen_reset_eflags(s
, IF_MASK
);
5591 case 0xfb: /* sti */
5592 if (check_iopl(s
)) {
5593 gen_set_eflags(s
, IF_MASK
);
5594 /* interruptions are enabled only the first insn after sti */
5595 gen_update_eip_next(s
);
5596 gen_eob_inhibit_irq(s
, true);
5599 case 0x62: /* bound */
5603 modrm
= x86_ldub_code(env
, s
);
5604 reg
= (modrm
>> 3) & 7;
5605 mod
= (modrm
>> 6) & 3;
5608 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
5609 gen_lea_modrm(env
, s
, modrm
);
5610 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
5612 gen_helper_boundw(tcg_env
, s
->A0
, s
->tmp2_i32
);
5614 gen_helper_boundl(tcg_env
, s
->A0
, s
->tmp2_i32
);
5617 case 0x1c8 ... 0x1cf: /* bswap reg */
5618 reg
= (b
& 7) | REX_B(s
);
5619 #ifdef TARGET_X86_64
5620 if (dflag
== MO_64
) {
5621 tcg_gen_bswap64_i64(cpu_regs
[reg
], cpu_regs
[reg
]);
5625 tcg_gen_bswap32_tl(cpu_regs
[reg
], cpu_regs
[reg
], TCG_BSWAP_OZ
);
5627 case 0xd6: /* salc */
5630 gen_compute_eflags_c(s
, s
->T0
);
5631 tcg_gen_neg_tl(s
->T0
, s
->T0
);
5632 gen_op_mov_reg_v(s
, MO_8
, R_EAX
, s
->T0
);
5634 case 0xe0: /* loopnz */
5635 case 0xe1: /* loopz */
5636 case 0xe2: /* loop */
5637 case 0xe3: /* jecxz */
5640 int diff
= (int8_t)insn_get(env
, s
, MO_8
);
5642 l1
= gen_new_label();
5643 l2
= gen_new_label();
5644 gen_update_cc_op(s
);
5647 case 0: /* loopnz */
5649 gen_op_add_reg_im(s
, s
->aflag
, R_ECX
, -1);
5650 gen_op_jz_ecx(s
, l2
);
5651 gen_jcc1(s
, (JCC_Z
<< 1) | (b
^ 1), l1
);
5654 gen_op_add_reg_im(s
, s
->aflag
, R_ECX
, -1);
5655 gen_op_jnz_ecx(s
, l1
);
5659 gen_op_jz_ecx(s
, l1
);
5664 gen_jmp_rel_csize(s
, 0, 1);
5667 gen_jmp_rel(s
, dflag
, diff
, 0);
5670 case 0x130: /* wrmsr */
5671 case 0x132: /* rdmsr */
5672 if (check_cpl0(s
)) {
5673 gen_update_cc_op(s
);
5674 gen_update_eip_cur(s
);
5676 gen_helper_rdmsr(tcg_env
);
5678 gen_helper_wrmsr(tcg_env
);
5679 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
5683 case 0x131: /* rdtsc */
5684 gen_update_cc_op(s
);
5685 gen_update_eip_cur(s
);
5686 translator_io_start(&s
->base
);
5687 gen_helper_rdtsc(tcg_env
);
5689 case 0x133: /* rdpmc */
5690 gen_update_cc_op(s
);
5691 gen_update_eip_cur(s
);
5692 gen_helper_rdpmc(tcg_env
);
5693 s
->base
.is_jmp
= DISAS_NORETURN
;
5695 case 0x134: /* sysenter */
5696 /* For AMD SYSENTER is not valid in long mode */
5697 if (LMA(s
) && env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
) {
5701 gen_exception_gpf(s
);
5703 gen_helper_sysenter(tcg_env
);
5704 s
->base
.is_jmp
= DISAS_EOB_ONLY
;
5707 case 0x135: /* sysexit */
5708 /* For AMD SYSEXIT is not valid in long mode */
5709 if (LMA(s
) && env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
) {
5712 if (!PE(s
) || CPL(s
) != 0) {
5713 gen_exception_gpf(s
);
5715 gen_helper_sysexit(tcg_env
, tcg_constant_i32(dflag
- 1));
5716 s
->base
.is_jmp
= DISAS_EOB_ONLY
;
5719 case 0x105: /* syscall */
5720 /* For Intel SYSCALL is only valid in long mode */
5721 if (!LMA(s
) && env
->cpuid_vendor1
== CPUID_VENDOR_INTEL_1
) {
5724 gen_update_cc_op(s
);
5725 gen_update_eip_cur(s
);
5726 gen_helper_syscall(tcg_env
, cur_insn_len_i32(s
));
5727 /* TF handling for the syscall insn is different. The TF bit is checked
5728 after the syscall insn completes. This allows #DB to not be
5729 generated after one has entered CPL0 if TF is set in FMASK. */
5730 gen_eob_worker(s
, false, true);
5732 case 0x107: /* sysret */
5733 /* For Intel SYSRET is only valid in long mode */
5734 if (!LMA(s
) && env
->cpuid_vendor1
== CPUID_VENDOR_INTEL_1
) {
5737 if (!PE(s
) || CPL(s
) != 0) {
5738 gen_exception_gpf(s
);
5740 gen_helper_sysret(tcg_env
, tcg_constant_i32(dflag
- 1));
5741 /* condition codes are modified only in long mode */
5743 set_cc_op(s
, CC_OP_EFLAGS
);
5745 /* TF handling for the sysret insn is different. The TF bit is
5746 checked after the sysret insn completes. This allows #DB to be
5747 generated "as if" the syscall insn in userspace has just
5749 gen_eob_worker(s
, false, true);
5752 case 0x1a2: /* cpuid */
5753 gen_update_cc_op(s
);
5754 gen_update_eip_cur(s
);
5755 gen_helper_cpuid(tcg_env
);
5757 case 0xf4: /* hlt */
5758 if (check_cpl0(s
)) {
5759 gen_update_cc_op(s
);
5760 gen_update_eip_cur(s
);
5761 gen_helper_hlt(tcg_env
, cur_insn_len_i32(s
));
5762 s
->base
.is_jmp
= DISAS_NORETURN
;
5766 modrm
= x86_ldub_code(env
, s
);
5767 mod
= (modrm
>> 6) & 3;
5768 op
= (modrm
>> 3) & 7;
5771 if (!PE(s
) || VM86(s
))
5773 if (s
->flags
& HF_UMIP_MASK
&& !check_cpl0(s
)) {
5776 gen_svm_check_intercept(s
, SVM_EXIT_LDTR_READ
);
5777 tcg_gen_ld32u_tl(s
->T0
, tcg_env
,
5778 offsetof(CPUX86State
, ldt
.selector
));
5779 ot
= mod
== 3 ? dflag
: MO_16
;
5780 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
5783 if (!PE(s
) || VM86(s
))
5785 if (check_cpl0(s
)) {
5786 gen_svm_check_intercept(s
, SVM_EXIT_LDTR_WRITE
);
5787 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
5788 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
5789 gen_helper_lldt(tcg_env
, s
->tmp2_i32
);
5793 if (!PE(s
) || VM86(s
))
5795 if (s
->flags
& HF_UMIP_MASK
&& !check_cpl0(s
)) {
5798 gen_svm_check_intercept(s
, SVM_EXIT_TR_READ
);
5799 tcg_gen_ld32u_tl(s
->T0
, tcg_env
,
5800 offsetof(CPUX86State
, tr
.selector
));
5801 ot
= mod
== 3 ? dflag
: MO_16
;
5802 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
5805 if (!PE(s
) || VM86(s
))
5807 if (check_cpl0(s
)) {
5808 gen_svm_check_intercept(s
, SVM_EXIT_TR_WRITE
);
5809 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
5810 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
5811 gen_helper_ltr(tcg_env
, s
->tmp2_i32
);
5816 if (!PE(s
) || VM86(s
))
5818 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
5819 gen_update_cc_op(s
);
5821 gen_helper_verr(tcg_env
, s
->T0
);
5823 gen_helper_verw(tcg_env
, s
->T0
);
5825 set_cc_op(s
, CC_OP_EFLAGS
);
5833 modrm
= x86_ldub_code(env
, s
);
5835 CASE_MODRM_MEM_OP(0): /* sgdt */
5836 if (s
->flags
& HF_UMIP_MASK
&& !check_cpl0(s
)) {
5839 gen_svm_check_intercept(s
, SVM_EXIT_GDTR_READ
);
5840 gen_lea_modrm(env
, s
, modrm
);
5841 tcg_gen_ld32u_tl(s
->T0
,
5842 tcg_env
, offsetof(CPUX86State
, gdt
.limit
));
5843 gen_op_st_v(s
, MO_16
, s
->T0
, s
->A0
);
5844 gen_add_A0_im(s
, 2);
5845 tcg_gen_ld_tl(s
->T0
, tcg_env
, offsetof(CPUX86State
, gdt
.base
));
5846 if (dflag
== MO_16
) {
5847 tcg_gen_andi_tl(s
->T0
, s
->T0
, 0xffffff);
5849 gen_op_st_v(s
, CODE64(s
) + MO_32
, s
->T0
, s
->A0
);
5852 case 0xc8: /* monitor */
5853 if (!(s
->cpuid_ext_features
& CPUID_EXT_MONITOR
) || CPL(s
) != 0) {
5856 gen_update_cc_op(s
);
5857 gen_update_eip_cur(s
);
5858 tcg_gen_mov_tl(s
->A0
, cpu_regs
[R_EAX
]);
5859 gen_add_A0_ds_seg(s
);
5860 gen_helper_monitor(tcg_env
, s
->A0
);
5863 case 0xc9: /* mwait */
5864 if (!(s
->cpuid_ext_features
& CPUID_EXT_MONITOR
) || CPL(s
) != 0) {
5867 gen_update_cc_op(s
);
5868 gen_update_eip_cur(s
);
5869 gen_helper_mwait(tcg_env
, cur_insn_len_i32(s
));
5870 s
->base
.is_jmp
= DISAS_NORETURN
;
5873 case 0xca: /* clac */
5874 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_SMAP
)
5878 gen_reset_eflags(s
, AC_MASK
);
5879 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
5882 case 0xcb: /* stac */
5883 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_SMAP
)
5887 gen_set_eflags(s
, AC_MASK
);
5888 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
5891 CASE_MODRM_MEM_OP(1): /* sidt */
5892 if (s
->flags
& HF_UMIP_MASK
&& !check_cpl0(s
)) {
5895 gen_svm_check_intercept(s
, SVM_EXIT_IDTR_READ
);
5896 gen_lea_modrm(env
, s
, modrm
);
5897 tcg_gen_ld32u_tl(s
->T0
, tcg_env
, offsetof(CPUX86State
, idt
.limit
));
5898 gen_op_st_v(s
, MO_16
, s
->T0
, s
->A0
);
5899 gen_add_A0_im(s
, 2);
5900 tcg_gen_ld_tl(s
->T0
, tcg_env
, offsetof(CPUX86State
, idt
.base
));
5901 if (dflag
== MO_16
) {
5902 tcg_gen_andi_tl(s
->T0
, s
->T0
, 0xffffff);
5904 gen_op_st_v(s
, CODE64(s
) + MO_32
, s
->T0
, s
->A0
);
5907 case 0xd0: /* xgetbv */
5908 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
5909 || (s
->prefix
& (PREFIX_LOCK
| PREFIX_DATA
5910 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
5913 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_ECX
]);
5914 gen_helper_xgetbv(s
->tmp1_i64
, tcg_env
, s
->tmp2_i32
);
5915 tcg_gen_extr_i64_tl(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
], s
->tmp1_i64
);
5918 case 0xd1: /* xsetbv */
5919 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
5920 || (s
->prefix
& (PREFIX_LOCK
| PREFIX_DATA
5921 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
5924 gen_svm_check_intercept(s
, SVM_EXIT_XSETBV
);
5925 if (!check_cpl0(s
)) {
5928 tcg_gen_concat_tl_i64(s
->tmp1_i64
, cpu_regs
[R_EAX
],
5930 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_ECX
]);
5931 gen_helper_xsetbv(tcg_env
, s
->tmp2_i32
, s
->tmp1_i64
);
5932 /* End TB because translation flags may change. */
5933 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
5936 case 0xd8: /* VMRUN */
5937 if (!SVME(s
) || !PE(s
)) {
5940 if (!check_cpl0(s
)) {
5943 gen_update_cc_op(s
);
5944 gen_update_eip_cur(s
);
5945 gen_helper_vmrun(tcg_env
, tcg_constant_i32(s
->aflag
- 1),
5946 cur_insn_len_i32(s
));
5947 tcg_gen_exit_tb(NULL
, 0);
5948 s
->base
.is_jmp
= DISAS_NORETURN
;
5951 case 0xd9: /* VMMCALL */
5955 gen_update_cc_op(s
);
5956 gen_update_eip_cur(s
);
5957 gen_helper_vmmcall(tcg_env
);
5960 case 0xda: /* VMLOAD */
5961 if (!SVME(s
) || !PE(s
)) {
5964 if (!check_cpl0(s
)) {
5967 gen_update_cc_op(s
);
5968 gen_update_eip_cur(s
);
5969 gen_helper_vmload(tcg_env
, tcg_constant_i32(s
->aflag
- 1));
5972 case 0xdb: /* VMSAVE */
5973 if (!SVME(s
) || !PE(s
)) {
5976 if (!check_cpl0(s
)) {
5979 gen_update_cc_op(s
);
5980 gen_update_eip_cur(s
);
5981 gen_helper_vmsave(tcg_env
, tcg_constant_i32(s
->aflag
- 1));
5984 case 0xdc: /* STGI */
5985 if ((!SVME(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_SKINIT
))
5989 if (!check_cpl0(s
)) {
5992 gen_update_cc_op(s
);
5993 gen_helper_stgi(tcg_env
);
5994 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
5997 case 0xdd: /* CLGI */
5998 if (!SVME(s
) || !PE(s
)) {
6001 if (!check_cpl0(s
)) {
6004 gen_update_cc_op(s
);
6005 gen_update_eip_cur(s
);
6006 gen_helper_clgi(tcg_env
);
6009 case 0xde: /* SKINIT */
6010 if ((!SVME(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_SKINIT
))
6014 gen_svm_check_intercept(s
, SVM_EXIT_SKINIT
);
6015 /* If not intercepted, not implemented -- raise #UD. */
6018 case 0xdf: /* INVLPGA */
6019 if (!SVME(s
) || !PE(s
)) {
6022 if (!check_cpl0(s
)) {
6025 gen_svm_check_intercept(s
, SVM_EXIT_INVLPGA
);
6026 if (s
->aflag
== MO_64
) {
6027 tcg_gen_mov_tl(s
->A0
, cpu_regs
[R_EAX
]);
6029 tcg_gen_ext32u_tl(s
->A0
, cpu_regs
[R_EAX
]);
6031 gen_helper_flush_page(tcg_env
, s
->A0
);
6032 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
6035 CASE_MODRM_MEM_OP(2): /* lgdt */
6036 if (!check_cpl0(s
)) {
6039 gen_svm_check_intercept(s
, SVM_EXIT_GDTR_WRITE
);
6040 gen_lea_modrm(env
, s
, modrm
);
6041 gen_op_ld_v(s
, MO_16
, s
->T1
, s
->A0
);
6042 gen_add_A0_im(s
, 2);
6043 gen_op_ld_v(s
, CODE64(s
) + MO_32
, s
->T0
, s
->A0
);
6044 if (dflag
== MO_16
) {
6045 tcg_gen_andi_tl(s
->T0
, s
->T0
, 0xffffff);
6047 tcg_gen_st_tl(s
->T0
, tcg_env
, offsetof(CPUX86State
, gdt
.base
));
6048 tcg_gen_st32_tl(s
->T1
, tcg_env
, offsetof(CPUX86State
, gdt
.limit
));
6051 CASE_MODRM_MEM_OP(3): /* lidt */
6052 if (!check_cpl0(s
)) {
6055 gen_svm_check_intercept(s
, SVM_EXIT_IDTR_WRITE
);
6056 gen_lea_modrm(env
, s
, modrm
);
6057 gen_op_ld_v(s
, MO_16
, s
->T1
, s
->A0
);
6058 gen_add_A0_im(s
, 2);
6059 gen_op_ld_v(s
, CODE64(s
) + MO_32
, s
->T0
, s
->A0
);
6060 if (dflag
== MO_16
) {
6061 tcg_gen_andi_tl(s
->T0
, s
->T0
, 0xffffff);
6063 tcg_gen_st_tl(s
->T0
, tcg_env
, offsetof(CPUX86State
, idt
.base
));
6064 tcg_gen_st32_tl(s
->T1
, tcg_env
, offsetof(CPUX86State
, idt
.limit
));
6067 CASE_MODRM_OP(4): /* smsw */
6068 if (s
->flags
& HF_UMIP_MASK
&& !check_cpl0(s
)) {
6071 gen_svm_check_intercept(s
, SVM_EXIT_READ_CR0
);
6072 tcg_gen_ld_tl(s
->T0
, tcg_env
, offsetof(CPUX86State
, cr
[0]));
6074 * In 32-bit mode, the higher 16 bits of the destination
6075 * register are undefined. In practice CR0[31:0] is stored
6076 * just like in 64-bit mode.
6078 mod
= (modrm
>> 6) & 3;
6079 ot
= (mod
!= 3 ? MO_16
: s
->dflag
);
6080 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
6082 case 0xee: /* rdpkru */
6083 if (prefixes
& PREFIX_LOCK
) {
6086 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_ECX
]);
6087 gen_helper_rdpkru(s
->tmp1_i64
, tcg_env
, s
->tmp2_i32
);
6088 tcg_gen_extr_i64_tl(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
], s
->tmp1_i64
);
6090 case 0xef: /* wrpkru */
6091 if (prefixes
& PREFIX_LOCK
) {
6094 tcg_gen_concat_tl_i64(s
->tmp1_i64
, cpu_regs
[R_EAX
],
6096 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_ECX
]);
6097 gen_helper_wrpkru(tcg_env
, s
->tmp2_i32
, s
->tmp1_i64
);
6100 CASE_MODRM_OP(6): /* lmsw */
6101 if (!check_cpl0(s
)) {
6104 gen_svm_check_intercept(s
, SVM_EXIT_WRITE_CR0
);
6105 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
6107 * Only the 4 lower bits of CR0 are modified.
6108 * PE cannot be set to zero if already set to one.
6110 tcg_gen_ld_tl(s
->T1
, tcg_env
, offsetof(CPUX86State
, cr
[0]));
6111 tcg_gen_andi_tl(s
->T0
, s
->T0
, 0xf);
6112 tcg_gen_andi_tl(s
->T1
, s
->T1
, ~0xe);
6113 tcg_gen_or_tl(s
->T0
, s
->T0
, s
->T1
);
6114 gen_helper_write_crN(tcg_env
, tcg_constant_i32(0), s
->T0
);
6115 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
6118 CASE_MODRM_MEM_OP(7): /* invlpg */
6119 if (!check_cpl0(s
)) {
6122 gen_svm_check_intercept(s
, SVM_EXIT_INVLPG
);
6123 gen_lea_modrm(env
, s
, modrm
);
6124 gen_helper_flush_page(tcg_env
, s
->A0
);
6125 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
6128 case 0xf8: /* swapgs */
6129 #ifdef TARGET_X86_64
6131 if (check_cpl0(s
)) {
6132 tcg_gen_mov_tl(s
->T0
, cpu_seg_base
[R_GS
]);
6133 tcg_gen_ld_tl(cpu_seg_base
[R_GS
], tcg_env
,
6134 offsetof(CPUX86State
, kernelgsbase
));
6135 tcg_gen_st_tl(s
->T0
, tcg_env
,
6136 offsetof(CPUX86State
, kernelgsbase
));
6143 case 0xf9: /* rdtscp */
6144 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_RDTSCP
)) {
6147 gen_update_cc_op(s
);
6148 gen_update_eip_cur(s
);
6149 translator_io_start(&s
->base
);
6150 gen_helper_rdtsc(tcg_env
);
6151 gen_helper_rdpid(s
->T0
, tcg_env
);
6152 gen_op_mov_reg_v(s
, dflag
, R_ECX
, s
->T0
);
6160 case 0x108: /* invd */
6161 case 0x109: /* wbinvd; wbnoinvd with REPZ prefix */
6162 if (check_cpl0(s
)) {
6163 gen_svm_check_intercept(s
, (b
& 1) ? SVM_EXIT_WBINVD
: SVM_EXIT_INVD
);
6167 case 0x63: /* arpl or movslS (x86_64) */
6168 #ifdef TARGET_X86_64
6171 /* d_ot is the size of destination */
6174 modrm
= x86_ldub_code(env
, s
);
6175 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6176 mod
= (modrm
>> 6) & 3;
6177 rm
= (modrm
& 7) | REX_B(s
);
6180 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, rm
);
6182 if (d_ot
== MO_64
) {
6183 tcg_gen_ext32s_tl(s
->T0
, s
->T0
);
6185 gen_op_mov_reg_v(s
, d_ot
, reg
, s
->T0
);
6187 gen_lea_modrm(env
, s
, modrm
);
6188 gen_op_ld_v(s
, MO_32
| MO_SIGN
, s
->T0
, s
->A0
);
6189 gen_op_mov_reg_v(s
, d_ot
, reg
, s
->T0
);
6197 if (!PE(s
) || VM86(s
))
6199 t0
= tcg_temp_new();
6200 t1
= tcg_temp_new();
6201 t2
= tcg_temp_new();
6203 modrm
= x86_ldub_code(env
, s
);
6204 reg
= (modrm
>> 3) & 7;
6205 mod
= (modrm
>> 6) & 3;
6208 gen_lea_modrm(env
, s
, modrm
);
6209 gen_op_ld_v(s
, ot
, t0
, s
->A0
);
6211 gen_op_mov_v_reg(s
, ot
, t0
, rm
);
6213 gen_op_mov_v_reg(s
, ot
, t1
, reg
);
6214 tcg_gen_andi_tl(s
->tmp0
, t0
, 3);
6215 tcg_gen_andi_tl(t1
, t1
, 3);
6216 tcg_gen_movi_tl(t2
, 0);
6217 label1
= gen_new_label();
6218 tcg_gen_brcond_tl(TCG_COND_GE
, s
->tmp0
, t1
, label1
);
6219 tcg_gen_andi_tl(t0
, t0
, ~3);
6220 tcg_gen_or_tl(t0
, t0
, t1
);
6221 tcg_gen_movi_tl(t2
, CC_Z
);
6222 gen_set_label(label1
);
6224 gen_op_st_v(s
, ot
, t0
, s
->A0
);
6226 gen_op_mov_reg_v(s
, ot
, rm
, t0
);
6228 gen_compute_eflags(s
);
6229 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~CC_Z
);
6230 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t2
);
6233 case 0x102: /* lar */
6234 case 0x103: /* lsl */
6238 if (!PE(s
) || VM86(s
))
6240 ot
= dflag
!= MO_16
? MO_32
: MO_16
;
6241 modrm
= x86_ldub_code(env
, s
);
6242 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6243 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
6244 t0
= tcg_temp_new();
6245 gen_update_cc_op(s
);
6247 gen_helper_lar(t0
, tcg_env
, s
->T0
);
6249 gen_helper_lsl(t0
, tcg_env
, s
->T0
);
6251 tcg_gen_andi_tl(s
->tmp0
, cpu_cc_src
, CC_Z
);
6252 label1
= gen_new_label();
6253 tcg_gen_brcondi_tl(TCG_COND_EQ
, s
->tmp0
, 0, label1
);
6254 gen_op_mov_reg_v(s
, ot
, reg
, t0
);
6255 gen_set_label(label1
);
6256 set_cc_op(s
, CC_OP_EFLAGS
);
6260 modrm
= x86_ldub_code(env
, s
);
6261 mod
= (modrm
>> 6) & 3;
6262 op
= (modrm
>> 3) & 7;
6264 case 0: /* prefetchnta */
6265 case 1: /* prefetchnt0 */
6266 case 2: /* prefetchnt0 */
6267 case 3: /* prefetchnt0 */
6270 gen_nop_modrm(env
, s
, modrm
);
6271 /* nothing more to do */
6273 default: /* nop (multi byte) */
6274 gen_nop_modrm(env
, s
, modrm
);
6279 modrm
= x86_ldub_code(env
, s
);
6280 if (s
->flags
& HF_MPX_EN_MASK
) {
6281 mod
= (modrm
>> 6) & 3;
6282 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6283 if (prefixes
& PREFIX_REPZ
) {
6286 || (prefixes
& PREFIX_LOCK
)
6287 || s
->aflag
== MO_16
) {
6290 gen_bndck(env
, s
, modrm
, TCG_COND_LTU
, cpu_bndl
[reg
]);
6291 } else if (prefixes
& PREFIX_REPNZ
) {
6294 || (prefixes
& PREFIX_LOCK
)
6295 || s
->aflag
== MO_16
) {
6298 TCGv_i64 notu
= tcg_temp_new_i64();
6299 tcg_gen_not_i64(notu
, cpu_bndu
[reg
]);
6300 gen_bndck(env
, s
, modrm
, TCG_COND_GTU
, notu
);
6301 } else if (prefixes
& PREFIX_DATA
) {
6302 /* bndmov -- from reg/mem */
6303 if (reg
>= 4 || s
->aflag
== MO_16
) {
6307 int reg2
= (modrm
& 7) | REX_B(s
);
6308 if (reg2
>= 4 || (prefixes
& PREFIX_LOCK
)) {
6311 if (s
->flags
& HF_MPX_IU_MASK
) {
6312 tcg_gen_mov_i64(cpu_bndl
[reg
], cpu_bndl
[reg2
]);
6313 tcg_gen_mov_i64(cpu_bndu
[reg
], cpu_bndu
[reg2
]);
6316 gen_lea_modrm(env
, s
, modrm
);
6318 tcg_gen_qemu_ld_i64(cpu_bndl
[reg
], s
->A0
,
6319 s
->mem_index
, MO_LEUQ
);
6320 tcg_gen_addi_tl(s
->A0
, s
->A0
, 8);
6321 tcg_gen_qemu_ld_i64(cpu_bndu
[reg
], s
->A0
,
6322 s
->mem_index
, MO_LEUQ
);
6324 tcg_gen_qemu_ld_i64(cpu_bndl
[reg
], s
->A0
,
6325 s
->mem_index
, MO_LEUL
);
6326 tcg_gen_addi_tl(s
->A0
, s
->A0
, 4);
6327 tcg_gen_qemu_ld_i64(cpu_bndu
[reg
], s
->A0
,
6328 s
->mem_index
, MO_LEUL
);
6330 /* bnd registers are now in-use */
6331 gen_set_hflag(s
, HF_MPX_IU_MASK
);
6333 } else if (mod
!= 3) {
6335 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
6337 || (prefixes
& PREFIX_LOCK
)
6338 || s
->aflag
== MO_16
6343 tcg_gen_addi_tl(s
->A0
, cpu_regs
[a
.base
], a
.disp
);
6345 tcg_gen_movi_tl(s
->A0
, 0);
6347 gen_lea_v_seg(s
, s
->aflag
, s
->A0
, a
.def_seg
, s
->override
);
6349 tcg_gen_mov_tl(s
->T0
, cpu_regs
[a
.index
]);
6351 tcg_gen_movi_tl(s
->T0
, 0);
6354 gen_helper_bndldx64(cpu_bndl
[reg
], tcg_env
, s
->A0
, s
->T0
);
6355 tcg_gen_ld_i64(cpu_bndu
[reg
], tcg_env
,
6356 offsetof(CPUX86State
, mmx_t0
.MMX_Q(0)));
6358 gen_helper_bndldx32(cpu_bndu
[reg
], tcg_env
, s
->A0
, s
->T0
);
6359 tcg_gen_ext32u_i64(cpu_bndl
[reg
], cpu_bndu
[reg
]);
6360 tcg_gen_shri_i64(cpu_bndu
[reg
], cpu_bndu
[reg
], 32);
6362 gen_set_hflag(s
, HF_MPX_IU_MASK
);
6365 gen_nop_modrm(env
, s
, modrm
);
6368 modrm
= x86_ldub_code(env
, s
);
6369 if (s
->flags
& HF_MPX_EN_MASK
) {
6370 mod
= (modrm
>> 6) & 3;
6371 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6372 if (mod
!= 3 && (prefixes
& PREFIX_REPZ
)) {
6375 || (prefixes
& PREFIX_LOCK
)
6376 || s
->aflag
== MO_16
) {
6379 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
6381 tcg_gen_extu_tl_i64(cpu_bndl
[reg
], cpu_regs
[a
.base
]);
6383 tcg_gen_ext32u_i64(cpu_bndl
[reg
], cpu_bndl
[reg
]);
6385 } else if (a
.base
== -1) {
6386 /* no base register has lower bound of 0 */
6387 tcg_gen_movi_i64(cpu_bndl
[reg
], 0);
6389 /* rip-relative generates #ud */
6392 tcg_gen_not_tl(s
->A0
, gen_lea_modrm_1(s
, a
, false));
6394 tcg_gen_ext32u_tl(s
->A0
, s
->A0
);
6396 tcg_gen_extu_tl_i64(cpu_bndu
[reg
], s
->A0
);
6397 /* bnd registers are now in-use */
6398 gen_set_hflag(s
, HF_MPX_IU_MASK
);
6400 } else if (prefixes
& PREFIX_REPNZ
) {
6403 || (prefixes
& PREFIX_LOCK
)
6404 || s
->aflag
== MO_16
) {
6407 gen_bndck(env
, s
, modrm
, TCG_COND_GTU
, cpu_bndu
[reg
]);
6408 } else if (prefixes
& PREFIX_DATA
) {
6409 /* bndmov -- to reg/mem */
6410 if (reg
>= 4 || s
->aflag
== MO_16
) {
6414 int reg2
= (modrm
& 7) | REX_B(s
);
6415 if (reg2
>= 4 || (prefixes
& PREFIX_LOCK
)) {
6418 if (s
->flags
& HF_MPX_IU_MASK
) {
6419 tcg_gen_mov_i64(cpu_bndl
[reg2
], cpu_bndl
[reg
]);
6420 tcg_gen_mov_i64(cpu_bndu
[reg2
], cpu_bndu
[reg
]);
6423 gen_lea_modrm(env
, s
, modrm
);
6425 tcg_gen_qemu_st_i64(cpu_bndl
[reg
], s
->A0
,
6426 s
->mem_index
, MO_LEUQ
);
6427 tcg_gen_addi_tl(s
->A0
, s
->A0
, 8);
6428 tcg_gen_qemu_st_i64(cpu_bndu
[reg
], s
->A0
,
6429 s
->mem_index
, MO_LEUQ
);
6431 tcg_gen_qemu_st_i64(cpu_bndl
[reg
], s
->A0
,
6432 s
->mem_index
, MO_LEUL
);
6433 tcg_gen_addi_tl(s
->A0
, s
->A0
, 4);
6434 tcg_gen_qemu_st_i64(cpu_bndu
[reg
], s
->A0
,
6435 s
->mem_index
, MO_LEUL
);
6438 } else if (mod
!= 3) {
6440 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
6442 || (prefixes
& PREFIX_LOCK
)
6443 || s
->aflag
== MO_16
6448 tcg_gen_addi_tl(s
->A0
, cpu_regs
[a
.base
], a
.disp
);
6450 tcg_gen_movi_tl(s
->A0
, 0);
6452 gen_lea_v_seg(s
, s
->aflag
, s
->A0
, a
.def_seg
, s
->override
);
6454 tcg_gen_mov_tl(s
->T0
, cpu_regs
[a
.index
]);
6456 tcg_gen_movi_tl(s
->T0
, 0);
6459 gen_helper_bndstx64(tcg_env
, s
->A0
, s
->T0
,
6460 cpu_bndl
[reg
], cpu_bndu
[reg
]);
6462 gen_helper_bndstx32(tcg_env
, s
->A0
, s
->T0
,
6463 cpu_bndl
[reg
], cpu_bndu
[reg
]);
6467 gen_nop_modrm(env
, s
, modrm
);
6469 case 0x119: case 0x11c ... 0x11f: /* nop (multi byte) */
6470 modrm
= x86_ldub_code(env
, s
);
6471 gen_nop_modrm(env
, s
, modrm
);
6474 case 0x120: /* mov reg, crN */
6475 case 0x122: /* mov crN, reg */
6476 if (!check_cpl0(s
)) {
6479 modrm
= x86_ldub_code(env
, s
);
6481 * Ignore the mod bits (assume (modrm&0xc0)==0xc0).
6482 * AMD documentation (24594.pdf) and testing of Intel 386 and 486
6483 * processors all show that the mod bits are assumed to be 1's,
6484 * regardless of actual values.
6486 rm
= (modrm
& 7) | REX_B(s
);
6487 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6490 if ((prefixes
& PREFIX_LOCK
) &&
6491 (s
->cpuid_ext3_features
& CPUID_EXT3_CR8LEG
)) {
6503 ot
= (CODE64(s
) ? MO_64
: MO_32
);
6505 translator_io_start(&s
->base
);
6507 gen_svm_check_intercept(s
, SVM_EXIT_WRITE_CR0
+ reg
);
6508 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
6509 gen_helper_write_crN(tcg_env
, tcg_constant_i32(reg
), s
->T0
);
6510 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
6512 gen_svm_check_intercept(s
, SVM_EXIT_READ_CR0
+ reg
);
6513 gen_helper_read_crN(s
->T0
, tcg_env
, tcg_constant_i32(reg
));
6514 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
6518 case 0x121: /* mov reg, drN */
6519 case 0x123: /* mov drN, reg */
6520 if (check_cpl0(s
)) {
6521 modrm
= x86_ldub_code(env
, s
);
6522 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
6523 * AMD documentation (24594.pdf) and testing of
6524 * intel 386 and 486 processors all show that the mod bits
6525 * are assumed to be 1's, regardless of actual values.
6527 rm
= (modrm
& 7) | REX_B(s
);
6528 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6537 gen_svm_check_intercept(s
, SVM_EXIT_WRITE_DR0
+ reg
);
6538 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
6539 tcg_gen_movi_i32(s
->tmp2_i32
, reg
);
6540 gen_helper_set_dr(tcg_env
, s
->tmp2_i32
, s
->T0
);
6541 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
6543 gen_svm_check_intercept(s
, SVM_EXIT_READ_DR0
+ reg
);
6544 tcg_gen_movi_i32(s
->tmp2_i32
, reg
);
6545 gen_helper_get_dr(s
->T0
, tcg_env
, s
->tmp2_i32
);
6546 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
6550 case 0x106: /* clts */
6551 if (check_cpl0(s
)) {
6552 gen_svm_check_intercept(s
, SVM_EXIT_WRITE_CR0
);
6553 gen_helper_clts(tcg_env
);
6554 /* abort block because static cpu state changed */
6555 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
6558 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
6559 case 0x1c3: /* MOVNTI reg, mem */
6560 if (!(s
->cpuid_features
& CPUID_SSE2
))
6562 ot
= mo_64_32(dflag
);
6563 modrm
= x86_ldub_code(env
, s
);
6564 mod
= (modrm
>> 6) & 3;
6567 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6568 /* generate a generic store */
6569 gen_ldst_modrm(env
, s
, modrm
, ot
, reg
, 1);
6572 modrm
= x86_ldub_code(env
, s
);
6574 CASE_MODRM_MEM_OP(0): /* fxsave */
6575 if (!(s
->cpuid_features
& CPUID_FXSR
)
6576 || (prefixes
& PREFIX_LOCK
)) {
6579 if ((s
->flags
& HF_EM_MASK
) || (s
->flags
& HF_TS_MASK
)) {
6580 gen_exception(s
, EXCP07_PREX
);
6583 gen_lea_modrm(env
, s
, modrm
);
6584 gen_helper_fxsave(tcg_env
, s
->A0
);
6587 CASE_MODRM_MEM_OP(1): /* fxrstor */
6588 if (!(s
->cpuid_features
& CPUID_FXSR
)
6589 || (prefixes
& PREFIX_LOCK
)) {
6592 if ((s
->flags
& HF_EM_MASK
) || (s
->flags
& HF_TS_MASK
)) {
6593 gen_exception(s
, EXCP07_PREX
);
6596 gen_lea_modrm(env
, s
, modrm
);
6597 gen_helper_fxrstor(tcg_env
, s
->A0
);
6600 CASE_MODRM_MEM_OP(2): /* ldmxcsr */
6601 if ((s
->flags
& HF_EM_MASK
) || !(s
->flags
& HF_OSFXSR_MASK
)) {
6604 if (s
->flags
& HF_TS_MASK
) {
6605 gen_exception(s
, EXCP07_PREX
);
6608 gen_lea_modrm(env
, s
, modrm
);
6609 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
, s
->mem_index
, MO_LEUL
);
6610 gen_helper_ldmxcsr(tcg_env
, s
->tmp2_i32
);
6613 CASE_MODRM_MEM_OP(3): /* stmxcsr */
6614 if ((s
->flags
& HF_EM_MASK
) || !(s
->flags
& HF_OSFXSR_MASK
)) {
6617 if (s
->flags
& HF_TS_MASK
) {
6618 gen_exception(s
, EXCP07_PREX
);
6621 gen_helper_update_mxcsr(tcg_env
);
6622 gen_lea_modrm(env
, s
, modrm
);
6623 tcg_gen_ld32u_tl(s
->T0
, tcg_env
, offsetof(CPUX86State
, mxcsr
));
6624 gen_op_st_v(s
, MO_32
, s
->T0
, s
->A0
);
6627 CASE_MODRM_MEM_OP(4): /* xsave */
6628 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
6629 || (prefixes
& (PREFIX_LOCK
| PREFIX_DATA
6630 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
6633 gen_lea_modrm(env
, s
, modrm
);
6634 tcg_gen_concat_tl_i64(s
->tmp1_i64
, cpu_regs
[R_EAX
],
6636 gen_helper_xsave(tcg_env
, s
->A0
, s
->tmp1_i64
);
6639 CASE_MODRM_MEM_OP(5): /* xrstor */
6640 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
6641 || (prefixes
& (PREFIX_LOCK
| PREFIX_DATA
6642 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
6645 gen_lea_modrm(env
, s
, modrm
);
6646 tcg_gen_concat_tl_i64(s
->tmp1_i64
, cpu_regs
[R_EAX
],
6648 gen_helper_xrstor(tcg_env
, s
->A0
, s
->tmp1_i64
);
6649 /* XRSTOR is how MPX is enabled, which changes how
6650 we translate. Thus we need to end the TB. */
6651 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
6654 CASE_MODRM_MEM_OP(6): /* xsaveopt / clwb */
6655 if (prefixes
& PREFIX_LOCK
) {
6658 if (prefixes
& PREFIX_DATA
) {
6660 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_CLWB
)) {
6663 gen_nop_modrm(env
, s
, modrm
);
6666 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
6667 || (s
->cpuid_xsave_features
& CPUID_XSAVE_XSAVEOPT
) == 0
6668 || (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
))) {
6671 gen_lea_modrm(env
, s
, modrm
);
6672 tcg_gen_concat_tl_i64(s
->tmp1_i64
, cpu_regs
[R_EAX
],
6674 gen_helper_xsaveopt(tcg_env
, s
->A0
, s
->tmp1_i64
);
6678 CASE_MODRM_MEM_OP(7): /* clflush / clflushopt */
6679 if (prefixes
& PREFIX_LOCK
) {
6682 if (prefixes
& PREFIX_DATA
) {
6684 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_CLFLUSHOPT
)) {
6689 if ((s
->prefix
& (PREFIX_REPZ
| PREFIX_REPNZ
))
6690 || !(s
->cpuid_features
& CPUID_CLFLUSH
)) {
6694 gen_nop_modrm(env
, s
, modrm
);
6697 case 0xc0 ... 0xc7: /* rdfsbase (f3 0f ae /0) */
6698 case 0xc8 ... 0xcf: /* rdgsbase (f3 0f ae /1) */
6699 case 0xd0 ... 0xd7: /* wrfsbase (f3 0f ae /2) */
6700 case 0xd8 ... 0xdf: /* wrgsbase (f3 0f ae /3) */
6702 && (prefixes
& PREFIX_REPZ
)
6703 && !(prefixes
& PREFIX_LOCK
)
6704 && (s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_FSGSBASE
)) {
6705 TCGv base
, treg
, src
, dst
;
6707 /* Preserve hflags bits by testing CR4 at runtime. */
6708 tcg_gen_movi_i32(s
->tmp2_i32
, CR4_FSGSBASE_MASK
);
6709 gen_helper_cr4_testbit(tcg_env
, s
->tmp2_i32
);
6711 base
= cpu_seg_base
[modrm
& 8 ? R_GS
: R_FS
];
6712 treg
= cpu_regs
[(modrm
& 7) | REX_B(s
)];
6716 dst
= base
, src
= treg
;
6719 dst
= treg
, src
= base
;
6722 if (s
->dflag
== MO_32
) {
6723 tcg_gen_ext32u_tl(dst
, src
);
6725 tcg_gen_mov_tl(dst
, src
);
6731 case 0xf8: /* sfence / pcommit */
6732 if (prefixes
& PREFIX_DATA
) {
6734 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_PCOMMIT
)
6735 || (prefixes
& PREFIX_LOCK
)) {
6741 case 0xf9 ... 0xff: /* sfence */
6742 if (!(s
->cpuid_features
& CPUID_SSE
)
6743 || (prefixes
& PREFIX_LOCK
)) {
6746 tcg_gen_mb(TCG_MO_ST_ST
| TCG_BAR_SC
);
6748 case 0xe8 ... 0xef: /* lfence */
6749 if (!(s
->cpuid_features
& CPUID_SSE
)
6750 || (prefixes
& PREFIX_LOCK
)) {
6753 tcg_gen_mb(TCG_MO_LD_LD
| TCG_BAR_SC
);
6755 case 0xf0 ... 0xf7: /* mfence */
6756 if (!(s
->cpuid_features
& CPUID_SSE2
)
6757 || (prefixes
& PREFIX_LOCK
)) {
6760 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
6768 case 0x10d: /* 3DNow! prefetch(w) */
6769 modrm
= x86_ldub_code(env
, s
);
6770 mod
= (modrm
>> 6) & 3;
6773 gen_nop_modrm(env
, s
, modrm
);
6775 case 0x1aa: /* rsm */
6776 gen_svm_check_intercept(s
, SVM_EXIT_RSM
);
6777 if (!(s
->flags
& HF_SMM_MASK
))
6779 #ifdef CONFIG_USER_ONLY
6780 /* we should not be in SMM mode */
6781 g_assert_not_reached();
6783 gen_update_cc_op(s
);
6784 gen_update_eip_next(s
);
6785 gen_helper_rsm(tcg_env
);
6786 #endif /* CONFIG_USER_ONLY */
6787 s
->base
.is_jmp
= DISAS_EOB_ONLY
;
6789 case 0x1b8: /* SSE4.2 popcnt */
6790 if ((prefixes
& (PREFIX_REPZ
| PREFIX_LOCK
| PREFIX_REPNZ
)) !=
6793 if (!(s
->cpuid_ext_features
& CPUID_EXT_POPCNT
))
6796 modrm
= x86_ldub_code(env
, s
);
6797 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6799 if (s
->prefix
& PREFIX_DATA
) {
6802 ot
= mo_64_32(dflag
);
6805 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
6806 gen_extu(ot
, s
->T0
);
6807 tcg_gen_mov_tl(cpu_cc_src
, s
->T0
);
6808 tcg_gen_ctpop_tl(s
->T0
, s
->T0
);
6809 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
6811 set_cc_op(s
, CC_OP_POPCNT
);
6813 case 0x10e ... 0x117:
6814 case 0x128 ... 0x12f:
6815 case 0x138 ... 0x13a:
6816 case 0x150 ... 0x179:
6817 case 0x17c ... 0x17f:
6819 case 0x1c4 ... 0x1c6:
6820 case 0x1d0 ... 0x1fe:
6821 disas_insn_new(s
, cpu
, b
);
6828 gen_illegal_opcode(s
);
6831 gen_unknown_opcode(env
, s
);
6835 void tcg_x86_init(void)
6837 static const char reg_names
[CPU_NB_REGS
][4] = {
6838 #ifdef TARGET_X86_64
6866 static const char eip_name
[] = {
6867 #ifdef TARGET_X86_64
6873 static const char seg_base_names
[6][8] = {
6881 static const char bnd_regl_names
[4][8] = {
6882 "bnd0_lb", "bnd1_lb", "bnd2_lb", "bnd3_lb"
6884 static const char bnd_regu_names
[4][8] = {
6885 "bnd0_ub", "bnd1_ub", "bnd2_ub", "bnd3_ub"
6889 cpu_cc_op
= tcg_global_mem_new_i32(tcg_env
,
6890 offsetof(CPUX86State
, cc_op
), "cc_op");
6891 cpu_cc_dst
= tcg_global_mem_new(tcg_env
, offsetof(CPUX86State
, cc_dst
),
6893 cpu_cc_src
= tcg_global_mem_new(tcg_env
, offsetof(CPUX86State
, cc_src
),
6895 cpu_cc_src2
= tcg_global_mem_new(tcg_env
, offsetof(CPUX86State
, cc_src2
),
6897 cpu_eip
= tcg_global_mem_new(tcg_env
, offsetof(CPUX86State
, eip
), eip_name
);
6899 for (i
= 0; i
< CPU_NB_REGS
; ++i
) {
6900 cpu_regs
[i
] = tcg_global_mem_new(tcg_env
,
6901 offsetof(CPUX86State
, regs
[i
]),
6905 for (i
= 0; i
< 6; ++i
) {
6907 = tcg_global_mem_new(tcg_env
,
6908 offsetof(CPUX86State
, segs
[i
].base
),
6912 for (i
= 0; i
< 4; ++i
) {
6914 = tcg_global_mem_new_i64(tcg_env
,
6915 offsetof(CPUX86State
, bnd_regs
[i
].lb
),
6918 = tcg_global_mem_new_i64(tcg_env
,
6919 offsetof(CPUX86State
, bnd_regs
[i
].ub
),
6924 static void i386_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cpu
)
6926 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6927 CPUX86State
*env
= cpu_env(cpu
);
6928 uint32_t flags
= dc
->base
.tb
->flags
;
6929 uint32_t cflags
= tb_cflags(dc
->base
.tb
);
6930 int cpl
= (flags
>> HF_CPL_SHIFT
) & 3;
6931 int iopl
= (flags
>> IOPL_SHIFT
) & 3;
6933 dc
->cs_base
= dc
->base
.tb
->cs_base
;
6934 dc
->pc_save
= dc
->base
.pc_next
;
6936 #ifndef CONFIG_USER_ONLY
6941 /* We make some simplifying assumptions; validate they're correct. */
6942 g_assert(PE(dc
) == ((flags
& HF_PE_MASK
) != 0));
6943 g_assert(CPL(dc
) == cpl
);
6944 g_assert(IOPL(dc
) == iopl
);
6945 g_assert(VM86(dc
) == ((flags
& HF_VM_MASK
) != 0));
6946 g_assert(CODE32(dc
) == ((flags
& HF_CS32_MASK
) != 0));
6947 g_assert(CODE64(dc
) == ((flags
& HF_CS64_MASK
) != 0));
6948 g_assert(SS32(dc
) == ((flags
& HF_SS32_MASK
) != 0));
6949 g_assert(LMA(dc
) == ((flags
& HF_LMA_MASK
) != 0));
6950 g_assert(ADDSEG(dc
) == ((flags
& HF_ADDSEG_MASK
) != 0));
6951 g_assert(SVME(dc
) == ((flags
& HF_SVME_MASK
) != 0));
6952 g_assert(GUEST(dc
) == ((flags
& HF_GUEST_MASK
) != 0));
6954 dc
->cc_op
= CC_OP_DYNAMIC
;
6955 dc
->cc_op_dirty
= false;
6956 dc
->popl_esp_hack
= 0;
6957 /* select memory access functions */
6958 dc
->mem_index
= cpu_mmu_index(env
, false);
6959 dc
->cpuid_features
= env
->features
[FEAT_1_EDX
];
6960 dc
->cpuid_ext_features
= env
->features
[FEAT_1_ECX
];
6961 dc
->cpuid_ext2_features
= env
->features
[FEAT_8000_0001_EDX
];
6962 dc
->cpuid_ext3_features
= env
->features
[FEAT_8000_0001_ECX
];
6963 dc
->cpuid_7_0_ebx_features
= env
->features
[FEAT_7_0_EBX
];
6964 dc
->cpuid_7_0_ecx_features
= env
->features
[FEAT_7_0_ECX
];
6965 dc
->cpuid_7_1_eax_features
= env
->features
[FEAT_7_1_EAX
];
6966 dc
->cpuid_xsave_features
= env
->features
[FEAT_XSAVE
];
6967 dc
->jmp_opt
= !((cflags
& CF_NO_GOTO_TB
) ||
6968 (flags
& (HF_TF_MASK
| HF_INHIBIT_IRQ_MASK
)));
6970 * If jmp_opt, we want to handle each string instruction individually.
6971 * For icount also disable repz optimization so that each iteration
6972 * is accounted separately.
6974 dc
->repz_opt
= !dc
->jmp_opt
&& !(cflags
& CF_USE_ICOUNT
);
6976 dc
->T0
= tcg_temp_new();
6977 dc
->T1
= tcg_temp_new();
6978 dc
->A0
= tcg_temp_new();
6980 dc
->tmp0
= tcg_temp_new();
6981 dc
->tmp1_i64
= tcg_temp_new_i64();
6982 dc
->tmp2_i32
= tcg_temp_new_i32();
6983 dc
->tmp3_i32
= tcg_temp_new_i32();
6984 dc
->tmp4
= tcg_temp_new();
6985 dc
->cc_srcT
= tcg_temp_new();
6988 static void i386_tr_tb_start(DisasContextBase
*db
, CPUState
*cpu
)
6992 static void i386_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cpu
)
6994 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6995 target_ulong pc_arg
= dc
->base
.pc_next
;
6997 dc
->prev_insn_end
= tcg_last_op();
6998 if (tb_cflags(dcbase
->tb
) & CF_PCREL
) {
6999 pc_arg
&= ~TARGET_PAGE_MASK
;
7001 tcg_gen_insn_start(pc_arg
, dc
->cc_op
);
7004 static void i386_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cpu
)
7006 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
7008 #ifdef TARGET_VSYSCALL_PAGE
7010 * Detect entry into the vsyscall page and invoke the syscall.
7012 if ((dc
->base
.pc_next
& TARGET_PAGE_MASK
) == TARGET_VSYSCALL_PAGE
) {
7013 gen_exception(dc
, EXCP_VSYSCALL
);
7014 dc
->base
.pc_next
= dc
->pc
+ 1;
7019 if (disas_insn(dc
, cpu
)) {
7020 target_ulong pc_next
= dc
->pc
;
7021 dc
->base
.pc_next
= pc_next
;
7023 if (dc
->base
.is_jmp
== DISAS_NEXT
) {
7024 if (dc
->flags
& (HF_TF_MASK
| HF_INHIBIT_IRQ_MASK
)) {
7026 * If single step mode, we generate only one instruction and
7027 * generate an exception.
7028 * If irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
7029 * the flag and abort the translation to give the irqs a
7032 dc
->base
.is_jmp
= DISAS_EOB_NEXT
;
7033 } else if (!is_same_page(&dc
->base
, pc_next
)) {
7034 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
7040 static void i386_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cpu
)
7042 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
7044 switch (dc
->base
.is_jmp
) {
7045 case DISAS_NORETURN
:
7047 case DISAS_TOO_MANY
:
7048 gen_update_cc_op(dc
);
7049 gen_jmp_rel_csize(dc
, 0, 0);
7051 case DISAS_EOB_NEXT
:
7052 gen_update_cc_op(dc
);
7053 gen_update_eip_cur(dc
);
7055 case DISAS_EOB_ONLY
:
7058 case DISAS_EOB_INHIBIT_IRQ
:
7059 gen_update_cc_op(dc
);
7060 gen_update_eip_cur(dc
);
7061 gen_eob_inhibit_irq(dc
, true);
7067 g_assert_not_reached();
7071 static void i386_tr_disas_log(const DisasContextBase
*dcbase
,
7072 CPUState
*cpu
, FILE *logfile
)
7074 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
7076 fprintf(logfile
, "IN: %s\n", lookup_symbol(dc
->base
.pc_first
));
7077 target_disas(logfile
, cpu
, dc
->base
.pc_first
, dc
->base
.tb
->size
);
7080 static const TranslatorOps i386_tr_ops
= {
7081 .init_disas_context
= i386_tr_init_disas_context
,
7082 .tb_start
= i386_tr_tb_start
,
7083 .insn_start
= i386_tr_insn_start
,
7084 .translate_insn
= i386_tr_translate_insn
,
7085 .tb_stop
= i386_tr_tb_stop
,
7086 .disas_log
= i386_tr_disas_log
,
7089 /* generate intermediate code for basic block 'tb'. */
7090 void gen_intermediate_code(CPUState
*cpu
, TranslationBlock
*tb
, int *max_insns
,
7091 target_ulong pc
, void *host_pc
)
7095 translator_loop(cpu
, tb
, max_insns
, pc
, host_pc
, &i386_tr_ops
, &dc
.base
);