4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include "qemu/host-utils.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "tcg/tcg-op-gvec.h"
27 #include "exec/cpu_ldst.h"
28 #include "exec/translator.h"
29 #include "fpu/softfloat.h"
31 #include "exec/helper-proto.h"
32 #include "exec/helper-gen.h"
33 #include "helper-tcg.h"
37 #define HELPER_H "helper.h"
38 #include "exec/helper-info.c.inc"
42 #define PREFIX_REPZ 0x01
43 #define PREFIX_REPNZ 0x02
44 #define PREFIX_LOCK 0x04
45 #define PREFIX_DATA 0x08
46 #define PREFIX_ADR 0x10
47 #define PREFIX_VEX 0x20
48 #define PREFIX_REX 0x40
58 /* For a switch indexed by MODRM, match all memory operands for a given OP. */
59 #define CASE_MODRM_MEM_OP(OP) \
60 case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
61 case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
62 case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7
64 #define CASE_MODRM_OP(OP) \
65 case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
66 case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
67 case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7: \
68 case (3 << 6) | (OP << 3) | 0 ... (3 << 6) | (OP << 3) | 7
70 //#define MACRO_TEST 1
72 /* global register indexes */
73 static TCGv cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
;
75 static TCGv_i32 cpu_cc_op
;
76 static TCGv cpu_regs
[CPU_NB_REGS
];
77 static TCGv cpu_seg_base
[6];
78 static TCGv_i64 cpu_bndl
[4];
79 static TCGv_i64 cpu_bndu
[4];
81 typedef struct DisasContext
{
82 DisasContextBase base
;
84 target_ulong pc
; /* pc = eip + cs_base */
85 target_ulong cs_base
; /* base of CS segment */
91 int8_t override
; /* -1 if no override, else R_CS, R_DS, etc */
97 #ifndef CONFIG_USER_ONLY
98 uint8_t cpl
; /* code priv level */
99 uint8_t iopl
; /* i/o priv level */
101 uint8_t vex_l
; /* vex vector length */
102 uint8_t vex_v
; /* vex vvvv register, without 1's complement. */
103 uint8_t popl_esp_hack
; /* for correct popl with esp base handling */
104 uint8_t rip_offset
; /* only used in x86_64, but left for simplicity */
111 bool vex_w
; /* used by AVX even on 32-bit processors */
112 bool jmp_opt
; /* use direct block chaining for direct jumps */
113 bool repz_opt
; /* optimize jumps within repz instructions */
116 CCOp cc_op
; /* current CC operation */
117 int mem_index
; /* select memory access functions */
118 uint32_t flags
; /* all execution flags */
120 int cpuid_ext_features
;
121 int cpuid_ext2_features
;
122 int cpuid_ext3_features
;
123 int cpuid_7_0_ebx_features
;
124 int cpuid_7_0_ecx_features
;
125 int cpuid_xsave_features
;
127 /* TCG local temps */
133 /* TCG local register indexes (only used inside old micro ops) */
141 TCGOp
*prev_insn_end
;
144 #define DISAS_EOB_ONLY DISAS_TARGET_0
145 #define DISAS_EOB_NEXT DISAS_TARGET_1
146 #define DISAS_EOB_INHIBIT_IRQ DISAS_TARGET_2
147 #define DISAS_JUMP DISAS_TARGET_3
149 /* The environment in which user-only runs is constrained. */
150 #ifdef CONFIG_USER_ONLY
154 #define SVME(S) false
155 #define GUEST(S) false
157 #define PE(S) (((S)->flags & HF_PE_MASK) != 0)
158 #define CPL(S) ((S)->cpl)
159 #define IOPL(S) ((S)->iopl)
160 #define SVME(S) (((S)->flags & HF_SVME_MASK) != 0)
161 #define GUEST(S) (((S)->flags & HF_GUEST_MASK) != 0)
163 #if defined(CONFIG_USER_ONLY) && defined(TARGET_X86_64)
164 #define VM86(S) false
165 #define CODE32(S) true
167 #define ADDSEG(S) false
169 #define VM86(S) (((S)->flags & HF_VM_MASK) != 0)
170 #define CODE32(S) (((S)->flags & HF_CS32_MASK) != 0)
171 #define SS32(S) (((S)->flags & HF_SS32_MASK) != 0)
172 #define ADDSEG(S) (((S)->flags & HF_ADDSEG_MASK) != 0)
174 #if !defined(TARGET_X86_64)
175 #define CODE64(S) false
177 #elif defined(CONFIG_USER_ONLY)
178 #define CODE64(S) true
181 #define CODE64(S) (((S)->flags & HF_CS64_MASK) != 0)
182 #define LMA(S) (((S)->flags & HF_LMA_MASK) != 0)
186 #define REX_PREFIX(S) (((S)->prefix & PREFIX_REX) != 0)
187 #define REX_W(S) ((S)->vex_w)
188 #define REX_R(S) ((S)->rex_r + 0)
189 #define REX_X(S) ((S)->rex_x + 0)
190 #define REX_B(S) ((S)->rex_b + 0)
192 #define REX_PREFIX(S) false
193 #define REX_W(S) false
200 * Many sysemu-only helpers are not reachable for user-only.
201 * Define stub generators here, so that we need not either sprinkle
202 * ifdefs through the translator, nor provide the helper function.
204 #define STUB_HELPER(NAME, ...) \
205 static inline void gen_helper_##NAME(__VA_ARGS__) \
206 { qemu_build_not_reached(); }
208 #ifdef CONFIG_USER_ONLY
209 STUB_HELPER(clgi
, TCGv_env env
)
210 STUB_HELPER(flush_page
, TCGv_env env
, TCGv addr
)
211 STUB_HELPER(hlt
, TCGv_env env
, TCGv_i32 pc_ofs
)
212 STUB_HELPER(inb
, TCGv ret
, TCGv_env env
, TCGv_i32 port
)
213 STUB_HELPER(inw
, TCGv ret
, TCGv_env env
, TCGv_i32 port
)
214 STUB_HELPER(inl
, TCGv ret
, TCGv_env env
, TCGv_i32 port
)
215 STUB_HELPER(monitor
, TCGv_env env
, TCGv addr
)
216 STUB_HELPER(mwait
, TCGv_env env
, TCGv_i32 pc_ofs
)
217 STUB_HELPER(outb
, TCGv_env env
, TCGv_i32 port
, TCGv_i32 val
)
218 STUB_HELPER(outw
, TCGv_env env
, TCGv_i32 port
, TCGv_i32 val
)
219 STUB_HELPER(outl
, TCGv_env env
, TCGv_i32 port
, TCGv_i32 val
)
220 STUB_HELPER(rdmsr
, TCGv_env env
)
221 STUB_HELPER(read_crN
, TCGv ret
, TCGv_env env
, TCGv_i32 reg
)
222 STUB_HELPER(get_dr
, TCGv ret
, TCGv_env env
, TCGv_i32 reg
)
223 STUB_HELPER(set_dr
, TCGv_env env
, TCGv_i32 reg
, TCGv val
)
224 STUB_HELPER(stgi
, TCGv_env env
)
225 STUB_HELPER(svm_check_intercept
, TCGv_env env
, TCGv_i32 type
)
226 STUB_HELPER(vmload
, TCGv_env env
, TCGv_i32 aflag
)
227 STUB_HELPER(vmmcall
, TCGv_env env
)
228 STUB_HELPER(vmrun
, TCGv_env env
, TCGv_i32 aflag
, TCGv_i32 pc_ofs
)
229 STUB_HELPER(vmsave
, TCGv_env env
, TCGv_i32 aflag
)
230 STUB_HELPER(write_crN
, TCGv_env env
, TCGv_i32 reg
, TCGv val
)
231 STUB_HELPER(wrmsr
, TCGv_env env
)
234 static void gen_eob(DisasContext
*s
);
235 static void gen_jr(DisasContext
*s
);
236 static void gen_jmp_rel(DisasContext
*s
, MemOp ot
, int diff
, int tb_num
);
237 static void gen_jmp_rel_csize(DisasContext
*s
, int diff
, int tb_num
);
238 static void gen_op(DisasContext
*s1
, int op
, MemOp ot
, int d
);
239 static void gen_exception_gpf(DisasContext
*s
);
241 /* i386 arith/logic operations */
261 OP_SHL1
, /* undocumented */
277 /* I386 int registers */
278 OR_EAX
, /* MUST be even numbered */
287 OR_TMP0
= 16, /* temporary operand register */
289 OR_A0
, /* temporary register used when doing address evaluation */
299 /* Bit set if the global variable is live after setting CC_OP to X. */
300 static const uint8_t cc_op_live
[CC_OP_NB
] = {
301 [CC_OP_DYNAMIC
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
302 [CC_OP_EFLAGS
] = USES_CC_SRC
,
303 [CC_OP_MULB
... CC_OP_MULQ
] = USES_CC_DST
| USES_CC_SRC
,
304 [CC_OP_ADDB
... CC_OP_ADDQ
] = USES_CC_DST
| USES_CC_SRC
,
305 [CC_OP_ADCB
... CC_OP_ADCQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
306 [CC_OP_SUBB
... CC_OP_SUBQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRCT
,
307 [CC_OP_SBBB
... CC_OP_SBBQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
308 [CC_OP_LOGICB
... CC_OP_LOGICQ
] = USES_CC_DST
,
309 [CC_OP_INCB
... CC_OP_INCQ
] = USES_CC_DST
| USES_CC_SRC
,
310 [CC_OP_DECB
... CC_OP_DECQ
] = USES_CC_DST
| USES_CC_SRC
,
311 [CC_OP_SHLB
... CC_OP_SHLQ
] = USES_CC_DST
| USES_CC_SRC
,
312 [CC_OP_SARB
... CC_OP_SARQ
] = USES_CC_DST
| USES_CC_SRC
,
313 [CC_OP_BMILGB
... CC_OP_BMILGQ
] = USES_CC_DST
| USES_CC_SRC
,
314 [CC_OP_ADCX
] = USES_CC_DST
| USES_CC_SRC
,
315 [CC_OP_ADOX
] = USES_CC_SRC
| USES_CC_SRC2
,
316 [CC_OP_ADCOX
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
318 [CC_OP_POPCNT
] = USES_CC_SRC
,
321 static void set_cc_op(DisasContext
*s
, CCOp op
)
325 if (s
->cc_op
== op
) {
329 /* Discard CC computation that will no longer be used. */
330 dead
= cc_op_live
[s
->cc_op
] & ~cc_op_live
[op
];
331 if (dead
& USES_CC_DST
) {
332 tcg_gen_discard_tl(cpu_cc_dst
);
334 if (dead
& USES_CC_SRC
) {
335 tcg_gen_discard_tl(cpu_cc_src
);
337 if (dead
& USES_CC_SRC2
) {
338 tcg_gen_discard_tl(cpu_cc_src2
);
340 if (dead
& USES_CC_SRCT
) {
341 tcg_gen_discard_tl(s
->cc_srcT
);
344 if (op
== CC_OP_DYNAMIC
) {
345 /* The DYNAMIC setting is translator only, and should never be
346 stored. Thus we always consider it clean. */
347 s
->cc_op_dirty
= false;
349 /* Discard any computed CC_OP value (see shifts). */
350 if (s
->cc_op
== CC_OP_DYNAMIC
) {
351 tcg_gen_discard_i32(cpu_cc_op
);
353 s
->cc_op_dirty
= true;
358 static void gen_update_cc_op(DisasContext
*s
)
360 if (s
->cc_op_dirty
) {
361 tcg_gen_movi_i32(cpu_cc_op
, s
->cc_op
);
362 s
->cc_op_dirty
= false;
368 #define NB_OP_SIZES 4
370 #else /* !TARGET_X86_64 */
372 #define NB_OP_SIZES 3
374 #endif /* !TARGET_X86_64 */
377 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
378 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
379 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
380 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
381 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
383 #define REG_B_OFFSET 0
384 #define REG_H_OFFSET 1
385 #define REG_W_OFFSET 0
386 #define REG_L_OFFSET 0
387 #define REG_LH_OFFSET 4
390 /* In instruction encodings for byte register accesses the
391 * register number usually indicates "low 8 bits of register N";
392 * however there are some special cases where N 4..7 indicates
393 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
394 * true for this special case, false otherwise.
396 static inline bool byte_reg_is_xH(DisasContext
*s
, int reg
)
398 /* Any time the REX prefix is present, byte registers are uniform */
399 if (reg
< 4 || REX_PREFIX(s
)) {
405 /* Select the size of a push/pop operation. */
406 static inline MemOp
mo_pushpop(DisasContext
*s
, MemOp ot
)
409 return ot
== MO_16
? MO_16
: MO_64
;
415 /* Select the size of the stack pointer. */
416 static inline MemOp
mo_stacksize(DisasContext
*s
)
418 return CODE64(s
) ? MO_64
: SS32(s
) ? MO_32
: MO_16
;
421 /* Select only size 64 else 32. Used for SSE operand sizes. */
422 static inline MemOp
mo_64_32(MemOp ot
)
425 return ot
== MO_64
? MO_64
: MO_32
;
431 /* Select size 8 if lsb of B is clear, else OT. Used for decoding
432 byte vs word opcodes. */
433 static inline MemOp
mo_b_d(int b
, MemOp ot
)
435 return b
& 1 ? ot
: MO_8
;
438 /* Select size 8 if lsb of B is clear, else OT capped at 32.
439 Used for decoding operand size of port opcodes. */
440 static inline MemOp
mo_b_d32(int b
, MemOp ot
)
442 return b
& 1 ? (ot
== MO_16
? MO_16
: MO_32
) : MO_8
;
445 /* Compute the result of writing t0 to the OT-sized register REG.
447 * If DEST is NULL, store the result into the register and return the
450 * If DEST is not NULL, store the result into DEST and return the
453 static TCGv
gen_op_deposit_reg_v(DisasContext
*s
, MemOp ot
, int reg
, TCGv dest
, TCGv t0
)
457 if (byte_reg_is_xH(s
, reg
)) {
458 dest
= dest
? dest
: cpu_regs
[reg
- 4];
459 tcg_gen_deposit_tl(dest
, cpu_regs
[reg
- 4], t0
, 8, 8);
460 return cpu_regs
[reg
- 4];
462 dest
= dest
? dest
: cpu_regs
[reg
];
463 tcg_gen_deposit_tl(dest
, cpu_regs
[reg
], t0
, 0, 8);
466 dest
= dest
? dest
: cpu_regs
[reg
];
467 tcg_gen_deposit_tl(dest
, cpu_regs
[reg
], t0
, 0, 16);
470 /* For x86_64, this sets the higher half of register to zero.
471 For i386, this is equivalent to a mov. */
472 dest
= dest
? dest
: cpu_regs
[reg
];
473 tcg_gen_ext32u_tl(dest
, t0
);
477 dest
= dest
? dest
: cpu_regs
[reg
];
478 tcg_gen_mov_tl(dest
, t0
);
482 g_assert_not_reached();
484 return cpu_regs
[reg
];
487 static void gen_op_mov_reg_v(DisasContext
*s
, MemOp ot
, int reg
, TCGv t0
)
489 gen_op_deposit_reg_v(s
, ot
, reg
, NULL
, t0
);
493 void gen_op_mov_v_reg(DisasContext
*s
, MemOp ot
, TCGv t0
, int reg
)
495 if (ot
== MO_8
&& byte_reg_is_xH(s
, reg
)) {
496 tcg_gen_extract_tl(t0
, cpu_regs
[reg
- 4], 8, 8);
498 tcg_gen_mov_tl(t0
, cpu_regs
[reg
]);
502 static void gen_add_A0_im(DisasContext
*s
, int val
)
504 tcg_gen_addi_tl(s
->A0
, s
->A0
, val
);
506 tcg_gen_ext32u_tl(s
->A0
, s
->A0
);
510 static inline void gen_op_jmp_v(DisasContext
*s
, TCGv dest
)
512 tcg_gen_mov_tl(cpu_eip
, dest
);
517 void gen_op_add_reg_im(DisasContext
*s
, MemOp size
, int reg
, int32_t val
)
519 tcg_gen_addi_tl(s
->tmp0
, cpu_regs
[reg
], val
);
520 gen_op_mov_reg_v(s
, size
, reg
, s
->tmp0
);
523 static inline void gen_op_add_reg_T0(DisasContext
*s
, MemOp size
, int reg
)
525 tcg_gen_add_tl(s
->tmp0
, cpu_regs
[reg
], s
->T0
);
526 gen_op_mov_reg_v(s
, size
, reg
, s
->tmp0
);
529 static inline void gen_op_ld_v(DisasContext
*s
, int idx
, TCGv t0
, TCGv a0
)
531 tcg_gen_qemu_ld_tl(t0
, a0
, s
->mem_index
, idx
| MO_LE
);
534 static inline void gen_op_st_v(DisasContext
*s
, int idx
, TCGv t0
, TCGv a0
)
536 tcg_gen_qemu_st_tl(t0
, a0
, s
->mem_index
, idx
| MO_LE
);
539 static inline void gen_op_st_rm_T0_A0(DisasContext
*s
, int idx
, int d
)
542 gen_op_st_v(s
, idx
, s
->T0
, s
->A0
);
544 gen_op_mov_reg_v(s
, idx
, d
, s
->T0
);
548 static void gen_update_eip_cur(DisasContext
*s
)
550 assert(s
->pc_save
!= -1);
551 if (tb_cflags(s
->base
.tb
) & CF_PCREL
) {
552 tcg_gen_addi_tl(cpu_eip
, cpu_eip
, s
->base
.pc_next
- s
->pc_save
);
554 tcg_gen_movi_tl(cpu_eip
, s
->base
.pc_next
- s
->cs_base
);
556 s
->pc_save
= s
->base
.pc_next
;
559 static void gen_update_eip_next(DisasContext
*s
)
561 assert(s
->pc_save
!= -1);
562 if (tb_cflags(s
->base
.tb
) & CF_PCREL
) {
563 tcg_gen_addi_tl(cpu_eip
, cpu_eip
, s
->pc
- s
->pc_save
);
565 tcg_gen_movi_tl(cpu_eip
, s
->pc
- s
->cs_base
);
570 static int cur_insn_len(DisasContext
*s
)
572 return s
->pc
- s
->base
.pc_next
;
575 static TCGv_i32
cur_insn_len_i32(DisasContext
*s
)
577 return tcg_constant_i32(cur_insn_len(s
));
580 static TCGv_i32
eip_next_i32(DisasContext
*s
)
582 assert(s
->pc_save
!= -1);
584 * This function has two users: lcall_real (always 16-bit mode), and
585 * iret_protected (16, 32, or 64-bit mode). IRET only uses the value
586 * when EFLAGS.NT is set, which is illegal in 64-bit mode, which is
587 * why passing a 32-bit value isn't broken. To avoid using this where
588 * we shouldn't, return -1 in 64-bit mode so that execution goes into
592 return tcg_constant_i32(-1);
594 if (tb_cflags(s
->base
.tb
) & CF_PCREL
) {
595 TCGv_i32 ret
= tcg_temp_new_i32();
596 tcg_gen_trunc_tl_i32(ret
, cpu_eip
);
597 tcg_gen_addi_i32(ret
, ret
, s
->pc
- s
->pc_save
);
600 return tcg_constant_i32(s
->pc
- s
->cs_base
);
604 static TCGv
eip_next_tl(DisasContext
*s
)
606 assert(s
->pc_save
!= -1);
607 if (tb_cflags(s
->base
.tb
) & CF_PCREL
) {
608 TCGv ret
= tcg_temp_new();
609 tcg_gen_addi_tl(ret
, cpu_eip
, s
->pc
- s
->pc_save
);
612 return tcg_constant_tl(s
->pc
- s
->cs_base
);
616 static TCGv
eip_cur_tl(DisasContext
*s
)
618 assert(s
->pc_save
!= -1);
619 if (tb_cflags(s
->base
.tb
) & CF_PCREL
) {
620 TCGv ret
= tcg_temp_new();
621 tcg_gen_addi_tl(ret
, cpu_eip
, s
->base
.pc_next
- s
->pc_save
);
624 return tcg_constant_tl(s
->base
.pc_next
- s
->cs_base
);
628 /* Compute SEG:REG into A0. SEG is selected from the override segment
629 (OVR_SEG) and the default segment (DEF_SEG). OVR_SEG may be -1 to
630 indicate no override. */
631 static void gen_lea_v_seg(DisasContext
*s
, MemOp aflag
, TCGv a0
,
632 int def_seg
, int ovr_seg
)
638 tcg_gen_mov_tl(s
->A0
, a0
);
645 if (ovr_seg
< 0 && ADDSEG(s
)) {
649 tcg_gen_ext32u_tl(s
->A0
, a0
);
655 tcg_gen_ext16u_tl(s
->A0
, a0
);
666 g_assert_not_reached();
670 TCGv seg
= cpu_seg_base
[ovr_seg
];
672 if (aflag
== MO_64
) {
673 tcg_gen_add_tl(s
->A0
, a0
, seg
);
674 } else if (CODE64(s
)) {
675 tcg_gen_ext32u_tl(s
->A0
, a0
);
676 tcg_gen_add_tl(s
->A0
, s
->A0
, seg
);
678 tcg_gen_add_tl(s
->A0
, a0
, seg
);
679 tcg_gen_ext32u_tl(s
->A0
, s
->A0
);
684 static inline void gen_string_movl_A0_ESI(DisasContext
*s
)
686 gen_lea_v_seg(s
, s
->aflag
, cpu_regs
[R_ESI
], R_DS
, s
->override
);
689 static inline void gen_string_movl_A0_EDI(DisasContext
*s
)
691 gen_lea_v_seg(s
, s
->aflag
, cpu_regs
[R_EDI
], R_ES
, -1);
694 static inline void gen_op_movl_T0_Dshift(DisasContext
*s
, MemOp ot
)
696 tcg_gen_ld32s_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
, df
));
697 tcg_gen_shli_tl(s
->T0
, s
->T0
, ot
);
700 static TCGv
gen_ext_tl(TCGv dst
, TCGv src
, MemOp size
, bool sign
)
705 tcg_gen_ext8s_tl(dst
, src
);
707 tcg_gen_ext8u_tl(dst
, src
);
712 tcg_gen_ext16s_tl(dst
, src
);
714 tcg_gen_ext16u_tl(dst
, src
);
720 tcg_gen_ext32s_tl(dst
, src
);
722 tcg_gen_ext32u_tl(dst
, src
);
731 static void gen_extu(MemOp ot
, TCGv reg
)
733 gen_ext_tl(reg
, reg
, ot
, false);
736 static void gen_exts(MemOp ot
, TCGv reg
)
738 gen_ext_tl(reg
, reg
, ot
, true);
741 static void gen_op_j_ecx(DisasContext
*s
, TCGCond cond
, TCGLabel
*label1
)
743 tcg_gen_mov_tl(s
->tmp0
, cpu_regs
[R_ECX
]);
744 gen_extu(s
->aflag
, s
->tmp0
);
745 tcg_gen_brcondi_tl(cond
, s
->tmp0
, 0, label1
);
748 static inline void gen_op_jz_ecx(DisasContext
*s
, TCGLabel
*label1
)
750 gen_op_j_ecx(s
, TCG_COND_EQ
, label1
);
753 static inline void gen_op_jnz_ecx(DisasContext
*s
, TCGLabel
*label1
)
755 gen_op_j_ecx(s
, TCG_COND_NE
, label1
);
758 static void gen_helper_in_func(MemOp ot
, TCGv v
, TCGv_i32 n
)
762 gen_helper_inb(v
, cpu_env
, n
);
765 gen_helper_inw(v
, cpu_env
, n
);
768 gen_helper_inl(v
, cpu_env
, n
);
771 g_assert_not_reached();
775 static void gen_helper_out_func(MemOp ot
, TCGv_i32 v
, TCGv_i32 n
)
779 gen_helper_outb(cpu_env
, v
, n
);
782 gen_helper_outw(cpu_env
, v
, n
);
785 gen_helper_outl(cpu_env
, v
, n
);
788 g_assert_not_reached();
793 * Validate that access to [port, port + 1<<ot) is allowed.
794 * Raise #GP, or VMM exit if not.
796 static bool gen_check_io(DisasContext
*s
, MemOp ot
, TCGv_i32 port
,
799 #ifdef CONFIG_USER_ONLY
801 * We do not implement the ioperm(2) syscall, so the TSS check
804 gen_exception_gpf(s
);
807 if (PE(s
) && (CPL(s
) > IOPL(s
) || VM86(s
))) {
808 gen_helper_check_io(cpu_env
, port
, tcg_constant_i32(1 << ot
));
812 gen_update_eip_cur(s
);
813 if (s
->prefix
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
814 svm_flags
|= SVM_IOIO_REP_MASK
;
816 svm_flags
|= 1 << (SVM_IOIO_SIZE_SHIFT
+ ot
);
817 gen_helper_svm_check_io(cpu_env
, port
,
818 tcg_constant_i32(svm_flags
),
819 cur_insn_len_i32(s
));
825 static void gen_movs(DisasContext
*s
, MemOp ot
)
827 gen_string_movl_A0_ESI(s
);
828 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
829 gen_string_movl_A0_EDI(s
);
830 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
831 gen_op_movl_T0_Dshift(s
, ot
);
832 gen_op_add_reg_T0(s
, s
->aflag
, R_ESI
);
833 gen_op_add_reg_T0(s
, s
->aflag
, R_EDI
);
836 static void gen_op_update1_cc(DisasContext
*s
)
838 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
841 static void gen_op_update2_cc(DisasContext
*s
)
843 tcg_gen_mov_tl(cpu_cc_src
, s
->T1
);
844 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
847 static void gen_op_update3_cc(DisasContext
*s
, TCGv reg
)
849 tcg_gen_mov_tl(cpu_cc_src2
, reg
);
850 tcg_gen_mov_tl(cpu_cc_src
, s
->T1
);
851 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
854 static inline void gen_op_testl_T0_T1_cc(DisasContext
*s
)
856 tcg_gen_and_tl(cpu_cc_dst
, s
->T0
, s
->T1
);
859 static void gen_op_update_neg_cc(DisasContext
*s
)
861 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
862 tcg_gen_neg_tl(cpu_cc_src
, s
->T0
);
863 tcg_gen_movi_tl(s
->cc_srcT
, 0);
866 /* compute all eflags to cc_src */
867 static void gen_compute_eflags(DisasContext
*s
)
869 TCGv zero
, dst
, src1
, src2
;
872 if (s
->cc_op
== CC_OP_EFLAGS
) {
875 if (s
->cc_op
== CC_OP_CLR
) {
876 tcg_gen_movi_tl(cpu_cc_src
, CC_Z
| CC_P
);
877 set_cc_op(s
, CC_OP_EFLAGS
);
886 /* Take care to not read values that are not live. */
887 live
= cc_op_live
[s
->cc_op
] & ~USES_CC_SRCT
;
888 dead
= live
^ (USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
);
890 zero
= tcg_constant_tl(0);
891 if (dead
& USES_CC_DST
) {
894 if (dead
& USES_CC_SRC
) {
897 if (dead
& USES_CC_SRC2
) {
903 gen_helper_cc_compute_all(cpu_cc_src
, dst
, src1
, src2
, cpu_cc_op
);
904 set_cc_op(s
, CC_OP_EFLAGS
);
907 typedef struct CCPrepare
{
917 /* compute eflags.C to reg */
918 static CCPrepare
gen_prepare_eflags_c(DisasContext
*s
, TCGv reg
)
924 case CC_OP_SUBB
... CC_OP_SUBQ
:
925 /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
926 size
= s
->cc_op
- CC_OP_SUBB
;
927 t1
= gen_ext_tl(s
->tmp0
, cpu_cc_src
, size
, false);
928 /* If no temporary was used, be careful not to alias t1 and t0. */
929 t0
= t1
== cpu_cc_src
? s
->tmp0
: reg
;
930 tcg_gen_mov_tl(t0
, s
->cc_srcT
);
934 case CC_OP_ADDB
... CC_OP_ADDQ
:
935 /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
936 size
= s
->cc_op
- CC_OP_ADDB
;
937 t1
= gen_ext_tl(s
->tmp0
, cpu_cc_src
, size
, false);
938 t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, false);
940 return (CCPrepare
) { .cond
= TCG_COND_LTU
, .reg
= t0
,
941 .reg2
= t1
, .mask
= -1, .use_reg2
= true };
943 case CC_OP_LOGICB
... CC_OP_LOGICQ
:
946 return (CCPrepare
) { .cond
= TCG_COND_NEVER
, .mask
= -1 };
948 case CC_OP_INCB
... CC_OP_INCQ
:
949 case CC_OP_DECB
... CC_OP_DECQ
:
950 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
951 .mask
= -1, .no_setcond
= true };
953 case CC_OP_SHLB
... CC_OP_SHLQ
:
954 /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
955 size
= s
->cc_op
- CC_OP_SHLB
;
956 shift
= (8 << size
) - 1;
957 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
958 .mask
= (target_ulong
)1 << shift
};
960 case CC_OP_MULB
... CC_OP_MULQ
:
961 return (CCPrepare
) { .cond
= TCG_COND_NE
,
962 .reg
= cpu_cc_src
, .mask
= -1 };
964 case CC_OP_BMILGB
... CC_OP_BMILGQ
:
965 size
= s
->cc_op
- CC_OP_BMILGB
;
966 t0
= gen_ext_tl(reg
, cpu_cc_src
, size
, false);
967 return (CCPrepare
) { .cond
= TCG_COND_EQ
, .reg
= t0
, .mask
= -1 };
971 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_dst
,
972 .mask
= -1, .no_setcond
= true };
975 case CC_OP_SARB
... CC_OP_SARQ
:
977 return (CCPrepare
) { .cond
= TCG_COND_NE
,
978 .reg
= cpu_cc_src
, .mask
= CC_C
};
981 /* The need to compute only C from CC_OP_DYNAMIC is important
982 in efficiently implementing e.g. INC at the start of a TB. */
984 gen_helper_cc_compute_c(reg
, cpu_cc_dst
, cpu_cc_src
,
985 cpu_cc_src2
, cpu_cc_op
);
986 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
987 .mask
= -1, .no_setcond
= true };
991 /* compute eflags.P to reg */
992 static CCPrepare
gen_prepare_eflags_p(DisasContext
*s
, TCGv reg
)
994 gen_compute_eflags(s
);
995 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
999 /* compute eflags.S to reg */
1000 static CCPrepare
gen_prepare_eflags_s(DisasContext
*s
, TCGv reg
)
1004 gen_compute_eflags(s
);
1010 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1014 return (CCPrepare
) { .cond
= TCG_COND_NEVER
, .mask
= -1 };
1017 MemOp size
= (s
->cc_op
- CC_OP_ADDB
) & 3;
1018 TCGv t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, true);
1019 return (CCPrepare
) { .cond
= TCG_COND_LT
, .reg
= t0
, .mask
= -1 };
1024 /* compute eflags.O to reg */
1025 static CCPrepare
gen_prepare_eflags_o(DisasContext
*s
, TCGv reg
)
1030 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src2
,
1031 .mask
= -1, .no_setcond
= true };
1034 return (CCPrepare
) { .cond
= TCG_COND_NEVER
, .mask
= -1 };
1036 gen_compute_eflags(s
);
1037 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1042 /* compute eflags.Z to reg */
1043 static CCPrepare
gen_prepare_eflags_z(DisasContext
*s
, TCGv reg
)
1047 gen_compute_eflags(s
);
1053 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1056 return (CCPrepare
) { .cond
= TCG_COND_ALWAYS
, .mask
= -1 };
1058 return (CCPrepare
) { .cond
= TCG_COND_EQ
, .reg
= cpu_cc_src
,
1062 MemOp size
= (s
->cc_op
- CC_OP_ADDB
) & 3;
1063 TCGv t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, false);
1064 return (CCPrepare
) { .cond
= TCG_COND_EQ
, .reg
= t0
, .mask
= -1 };
1069 /* perform a conditional store into register 'reg' according to jump opcode
1070 value 'b'. In the fast case, T0 is guaranted not to be used. */
1071 static CCPrepare
gen_prepare_cc(DisasContext
*s
, int b
, TCGv reg
)
1073 int inv
, jcc_op
, cond
;
1079 jcc_op
= (b
>> 1) & 7;
1082 case CC_OP_SUBB
... CC_OP_SUBQ
:
1083 /* We optimize relational operators for the cmp/jcc case. */
1084 size
= s
->cc_op
- CC_OP_SUBB
;
1087 tcg_gen_mov_tl(s
->tmp4
, s
->cc_srcT
);
1088 gen_extu(size
, s
->tmp4
);
1089 t0
= gen_ext_tl(s
->tmp0
, cpu_cc_src
, size
, false);
1090 cc
= (CCPrepare
) { .cond
= TCG_COND_LEU
, .reg
= s
->tmp4
,
1091 .reg2
= t0
, .mask
= -1, .use_reg2
= true };
1100 tcg_gen_mov_tl(s
->tmp4
, s
->cc_srcT
);
1101 gen_exts(size
, s
->tmp4
);
1102 t0
= gen_ext_tl(s
->tmp0
, cpu_cc_src
, size
, true);
1103 cc
= (CCPrepare
) { .cond
= cond
, .reg
= s
->tmp4
,
1104 .reg2
= t0
, .mask
= -1, .use_reg2
= true };
1114 /* This actually generates good code for JC, JZ and JS. */
1117 cc
= gen_prepare_eflags_o(s
, reg
);
1120 cc
= gen_prepare_eflags_c(s
, reg
);
1123 cc
= gen_prepare_eflags_z(s
, reg
);
1126 gen_compute_eflags(s
);
1127 cc
= (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1128 .mask
= CC_Z
| CC_C
};
1131 cc
= gen_prepare_eflags_s(s
, reg
);
1134 cc
= gen_prepare_eflags_p(s
, reg
);
1137 gen_compute_eflags(s
);
1138 if (reg
== cpu_cc_src
) {
1141 tcg_gen_shri_tl(reg
, cpu_cc_src
, 4); /* CC_O -> CC_S */
1142 tcg_gen_xor_tl(reg
, reg
, cpu_cc_src
);
1143 cc
= (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
1148 gen_compute_eflags(s
);
1149 if (reg
== cpu_cc_src
) {
1152 tcg_gen_shri_tl(reg
, cpu_cc_src
, 4); /* CC_O -> CC_S */
1153 tcg_gen_xor_tl(reg
, reg
, cpu_cc_src
);
1154 cc
= (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
1155 .mask
= CC_S
| CC_Z
};
1162 cc
.cond
= tcg_invert_cond(cc
.cond
);
1167 static void gen_setcc1(DisasContext
*s
, int b
, TCGv reg
)
1169 CCPrepare cc
= gen_prepare_cc(s
, b
, reg
);
1171 if (cc
.no_setcond
) {
1172 if (cc
.cond
== TCG_COND_EQ
) {
1173 tcg_gen_xori_tl(reg
, cc
.reg
, 1);
1175 tcg_gen_mov_tl(reg
, cc
.reg
);
1180 if (cc
.cond
== TCG_COND_NE
&& !cc
.use_reg2
&& cc
.imm
== 0 &&
1181 cc
.mask
!= 0 && (cc
.mask
& (cc
.mask
- 1)) == 0) {
1182 tcg_gen_shri_tl(reg
, cc
.reg
, ctztl(cc
.mask
));
1183 tcg_gen_andi_tl(reg
, reg
, 1);
1186 if (cc
.mask
!= -1) {
1187 tcg_gen_andi_tl(reg
, cc
.reg
, cc
.mask
);
1191 tcg_gen_setcond_tl(cc
.cond
, reg
, cc
.reg
, cc
.reg2
);
1193 tcg_gen_setcondi_tl(cc
.cond
, reg
, cc
.reg
, cc
.imm
);
1197 static inline void gen_compute_eflags_c(DisasContext
*s
, TCGv reg
)
1199 gen_setcc1(s
, JCC_B
<< 1, reg
);
1202 /* generate a conditional jump to label 'l1' according to jump opcode
1203 value 'b'. In the fast case, T0 is guaranted not to be used. */
1204 static inline void gen_jcc1_noeob(DisasContext
*s
, int b
, TCGLabel
*l1
)
1206 CCPrepare cc
= gen_prepare_cc(s
, b
, s
->T0
);
1208 if (cc
.mask
!= -1) {
1209 tcg_gen_andi_tl(s
->T0
, cc
.reg
, cc
.mask
);
1213 tcg_gen_brcond_tl(cc
.cond
, cc
.reg
, cc
.reg2
, l1
);
1215 tcg_gen_brcondi_tl(cc
.cond
, cc
.reg
, cc
.imm
, l1
);
1219 /* Generate a conditional jump to label 'l1' according to jump opcode
1220 value 'b'. In the fast case, T0 is guaranted not to be used.
1221 A translation block must end soon. */
1222 static inline void gen_jcc1(DisasContext
*s
, int b
, TCGLabel
*l1
)
1224 CCPrepare cc
= gen_prepare_cc(s
, b
, s
->T0
);
1226 gen_update_cc_op(s
);
1227 if (cc
.mask
!= -1) {
1228 tcg_gen_andi_tl(s
->T0
, cc
.reg
, cc
.mask
);
1231 set_cc_op(s
, CC_OP_DYNAMIC
);
1233 tcg_gen_brcond_tl(cc
.cond
, cc
.reg
, cc
.reg2
, l1
);
1235 tcg_gen_brcondi_tl(cc
.cond
, cc
.reg
, cc
.imm
, l1
);
1239 /* XXX: does not work with gdbstub "ice" single step - not a
1241 static TCGLabel
*gen_jz_ecx_string(DisasContext
*s
)
1243 TCGLabel
*l1
= gen_new_label();
1244 TCGLabel
*l2
= gen_new_label();
1245 gen_op_jnz_ecx(s
, l1
);
1247 gen_jmp_rel_csize(s
, 0, 1);
1252 static void gen_stos(DisasContext
*s
, MemOp ot
)
1254 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, R_EAX
);
1255 gen_string_movl_A0_EDI(s
);
1256 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
1257 gen_op_movl_T0_Dshift(s
, ot
);
1258 gen_op_add_reg_T0(s
, s
->aflag
, R_EDI
);
1261 static void gen_lods(DisasContext
*s
, MemOp ot
)
1263 gen_string_movl_A0_ESI(s
);
1264 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1265 gen_op_mov_reg_v(s
, ot
, R_EAX
, s
->T0
);
1266 gen_op_movl_T0_Dshift(s
, ot
);
1267 gen_op_add_reg_T0(s
, s
->aflag
, R_ESI
);
1270 static void gen_scas(DisasContext
*s
, MemOp ot
)
1272 gen_string_movl_A0_EDI(s
);
1273 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
1274 gen_op(s
, OP_CMPL
, ot
, R_EAX
);
1275 gen_op_movl_T0_Dshift(s
, ot
);
1276 gen_op_add_reg_T0(s
, s
->aflag
, R_EDI
);
1279 static void gen_cmps(DisasContext
*s
, MemOp ot
)
1281 gen_string_movl_A0_EDI(s
);
1282 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
1283 gen_string_movl_A0_ESI(s
);
1284 gen_op(s
, OP_CMPL
, ot
, OR_TMP0
);
1285 gen_op_movl_T0_Dshift(s
, ot
);
1286 gen_op_add_reg_T0(s
, s
->aflag
, R_ESI
);
1287 gen_op_add_reg_T0(s
, s
->aflag
, R_EDI
);
1290 static void gen_bpt_io(DisasContext
*s
, TCGv_i32 t_port
, int ot
)
1292 if (s
->flags
& HF_IOBPT_MASK
) {
1293 #ifdef CONFIG_USER_ONLY
1294 /* user-mode cpu should not be in IOBPT mode */
1295 g_assert_not_reached();
1297 TCGv_i32 t_size
= tcg_constant_i32(1 << ot
);
1298 TCGv t_next
= eip_next_tl(s
);
1299 gen_helper_bpt_io(cpu_env
, t_port
, t_size
, t_next
);
1300 #endif /* CONFIG_USER_ONLY */
1304 static void gen_ins(DisasContext
*s
, MemOp ot
)
1306 gen_string_movl_A0_EDI(s
);
1307 /* Note: we must do this dummy write first to be restartable in
1308 case of page fault. */
1309 tcg_gen_movi_tl(s
->T0
, 0);
1310 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
1311 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_EDX
]);
1312 tcg_gen_andi_i32(s
->tmp2_i32
, s
->tmp2_i32
, 0xffff);
1313 gen_helper_in_func(ot
, s
->T0
, s
->tmp2_i32
);
1314 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
1315 gen_op_movl_T0_Dshift(s
, ot
);
1316 gen_op_add_reg_T0(s
, s
->aflag
, R_EDI
);
1317 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
1320 static void gen_outs(DisasContext
*s
, MemOp ot
)
1322 gen_string_movl_A0_ESI(s
);
1323 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1325 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_EDX
]);
1326 tcg_gen_andi_i32(s
->tmp2_i32
, s
->tmp2_i32
, 0xffff);
1327 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T0
);
1328 gen_helper_out_func(ot
, s
->tmp2_i32
, s
->tmp3_i32
);
1329 gen_op_movl_T0_Dshift(s
, ot
);
1330 gen_op_add_reg_T0(s
, s
->aflag
, R_ESI
);
1331 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
1334 /* Generate jumps to current or next instruction */
1335 static void gen_repz(DisasContext
*s
, MemOp ot
,
1336 void (*fn
)(DisasContext
*s
, MemOp ot
))
1339 gen_update_cc_op(s
);
1340 l2
= gen_jz_ecx_string(s
);
1342 gen_op_add_reg_im(s
, s
->aflag
, R_ECX
, -1);
1344 * A loop would cause two single step exceptions if ECX = 1
1345 * before rep string_insn
1348 gen_op_jz_ecx(s
, l2
);
1350 gen_jmp_rel_csize(s
, -cur_insn_len(s
), 0);
1353 #define GEN_REPZ(op) \
1354 static inline void gen_repz_ ## op(DisasContext *s, MemOp ot) \
1355 { gen_repz(s, ot, gen_##op); }
1357 static void gen_repz2(DisasContext
*s
, MemOp ot
, int nz
,
1358 void (*fn
)(DisasContext
*s
, MemOp ot
))
1361 gen_update_cc_op(s
);
1362 l2
= gen_jz_ecx_string(s
);
1364 gen_op_add_reg_im(s
, s
->aflag
, R_ECX
, -1);
1365 gen_update_cc_op(s
);
1366 gen_jcc1(s
, (JCC_Z
<< 1) | (nz
^ 1), l2
);
1368 gen_op_jz_ecx(s
, l2
);
1370 gen_jmp_rel_csize(s
, -cur_insn_len(s
), 0);
1373 #define GEN_REPZ2(op) \
1374 static inline void gen_repz_ ## op(DisasContext *s, MemOp ot, int nz) \
1375 { gen_repz2(s, ot, nz, gen_##op); }
1385 static void gen_helper_fp_arith_ST0_FT0(int op
)
1389 gen_helper_fadd_ST0_FT0(cpu_env
);
1392 gen_helper_fmul_ST0_FT0(cpu_env
);
1395 gen_helper_fcom_ST0_FT0(cpu_env
);
1398 gen_helper_fcom_ST0_FT0(cpu_env
);
1401 gen_helper_fsub_ST0_FT0(cpu_env
);
1404 gen_helper_fsubr_ST0_FT0(cpu_env
);
1407 gen_helper_fdiv_ST0_FT0(cpu_env
);
1410 gen_helper_fdivr_ST0_FT0(cpu_env
);
1415 /* NOTE the exception in "r" op ordering */
1416 static void gen_helper_fp_arith_STN_ST0(int op
, int opreg
)
1418 TCGv_i32 tmp
= tcg_constant_i32(opreg
);
1421 gen_helper_fadd_STN_ST0(cpu_env
, tmp
);
1424 gen_helper_fmul_STN_ST0(cpu_env
, tmp
);
1427 gen_helper_fsubr_STN_ST0(cpu_env
, tmp
);
1430 gen_helper_fsub_STN_ST0(cpu_env
, tmp
);
1433 gen_helper_fdivr_STN_ST0(cpu_env
, tmp
);
1436 gen_helper_fdiv_STN_ST0(cpu_env
, tmp
);
1441 static void gen_exception(DisasContext
*s
, int trapno
)
1443 gen_update_cc_op(s
);
1444 gen_update_eip_cur(s
);
1445 gen_helper_raise_exception(cpu_env
, tcg_constant_i32(trapno
));
1446 s
->base
.is_jmp
= DISAS_NORETURN
;
1449 /* Generate #UD for the current instruction. The assumption here is that
1450 the instruction is known, but it isn't allowed in the current cpu mode. */
1451 static void gen_illegal_opcode(DisasContext
*s
)
1453 gen_exception(s
, EXCP06_ILLOP
);
1456 /* Generate #GP for the current instruction. */
1457 static void gen_exception_gpf(DisasContext
*s
)
1459 gen_exception(s
, EXCP0D_GPF
);
1462 /* Check for cpl == 0; if not, raise #GP and return false. */
1463 static bool check_cpl0(DisasContext
*s
)
1468 gen_exception_gpf(s
);
1472 /* If vm86, check for iopl == 3; if not, raise #GP and return false. */
1473 static bool check_vm86_iopl(DisasContext
*s
)
1475 if (!VM86(s
) || IOPL(s
) == 3) {
1478 gen_exception_gpf(s
);
1482 /* Check for iopl allowing access; if not, raise #GP and return false. */
1483 static bool check_iopl(DisasContext
*s
)
1485 if (VM86(s
) ? IOPL(s
) == 3 : CPL(s
) <= IOPL(s
)) {
1488 gen_exception_gpf(s
);
1492 /* if d == OR_TMP0, it means memory operand (address in A0) */
1493 static void gen_op(DisasContext
*s1
, int op
, MemOp ot
, int d
)
1496 if (s1
->prefix
& PREFIX_LOCK
) {
1497 /* Lock prefix when destination is not memory. */
1498 gen_illegal_opcode(s1
);
1501 gen_op_mov_v_reg(s1
, ot
, s1
->T0
, d
);
1502 } else if (!(s1
->prefix
& PREFIX_LOCK
)) {
1503 gen_op_ld_v(s1
, ot
, s1
->T0
, s1
->A0
);
1507 gen_compute_eflags_c(s1
, s1
->tmp4
);
1508 if (s1
->prefix
& PREFIX_LOCK
) {
1509 tcg_gen_add_tl(s1
->T0
, s1
->tmp4
, s1
->T1
);
1510 tcg_gen_atomic_add_fetch_tl(s1
->T0
, s1
->A0
, s1
->T0
,
1511 s1
->mem_index
, ot
| MO_LE
);
1513 tcg_gen_add_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1514 tcg_gen_add_tl(s1
->T0
, s1
->T0
, s1
->tmp4
);
1515 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1517 gen_op_update3_cc(s1
, s1
->tmp4
);
1518 set_cc_op(s1
, CC_OP_ADCB
+ ot
);
1521 gen_compute_eflags_c(s1
, s1
->tmp4
);
1522 if (s1
->prefix
& PREFIX_LOCK
) {
1523 tcg_gen_add_tl(s1
->T0
, s1
->T1
, s1
->tmp4
);
1524 tcg_gen_neg_tl(s1
->T0
, s1
->T0
);
1525 tcg_gen_atomic_add_fetch_tl(s1
->T0
, s1
->A0
, s1
->T0
,
1526 s1
->mem_index
, ot
| MO_LE
);
1528 tcg_gen_sub_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1529 tcg_gen_sub_tl(s1
->T0
, s1
->T0
, s1
->tmp4
);
1530 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1532 gen_op_update3_cc(s1
, s1
->tmp4
);
1533 set_cc_op(s1
, CC_OP_SBBB
+ ot
);
1536 if (s1
->prefix
& PREFIX_LOCK
) {
1537 tcg_gen_atomic_add_fetch_tl(s1
->T0
, s1
->A0
, s1
->T1
,
1538 s1
->mem_index
, ot
| MO_LE
);
1540 tcg_gen_add_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1541 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1543 gen_op_update2_cc(s1
);
1544 set_cc_op(s1
, CC_OP_ADDB
+ ot
);
1547 if (s1
->prefix
& PREFIX_LOCK
) {
1548 tcg_gen_neg_tl(s1
->T0
, s1
->T1
);
1549 tcg_gen_atomic_fetch_add_tl(s1
->cc_srcT
, s1
->A0
, s1
->T0
,
1550 s1
->mem_index
, ot
| MO_LE
);
1551 tcg_gen_sub_tl(s1
->T0
, s1
->cc_srcT
, s1
->T1
);
1553 tcg_gen_mov_tl(s1
->cc_srcT
, s1
->T0
);
1554 tcg_gen_sub_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1555 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1557 gen_op_update2_cc(s1
);
1558 set_cc_op(s1
, CC_OP_SUBB
+ ot
);
1562 if (s1
->prefix
& PREFIX_LOCK
) {
1563 tcg_gen_atomic_and_fetch_tl(s1
->T0
, s1
->A0
, s1
->T1
,
1564 s1
->mem_index
, ot
| MO_LE
);
1566 tcg_gen_and_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1567 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1569 gen_op_update1_cc(s1
);
1570 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1573 if (s1
->prefix
& PREFIX_LOCK
) {
1574 tcg_gen_atomic_or_fetch_tl(s1
->T0
, s1
->A0
, s1
->T1
,
1575 s1
->mem_index
, ot
| MO_LE
);
1577 tcg_gen_or_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1578 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1580 gen_op_update1_cc(s1
);
1581 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1584 if (s1
->prefix
& PREFIX_LOCK
) {
1585 tcg_gen_atomic_xor_fetch_tl(s1
->T0
, s1
->A0
, s1
->T1
,
1586 s1
->mem_index
, ot
| MO_LE
);
1588 tcg_gen_xor_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1589 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1591 gen_op_update1_cc(s1
);
1592 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1595 tcg_gen_mov_tl(cpu_cc_src
, s1
->T1
);
1596 tcg_gen_mov_tl(s1
->cc_srcT
, s1
->T0
);
1597 tcg_gen_sub_tl(cpu_cc_dst
, s1
->T0
, s1
->T1
);
1598 set_cc_op(s1
, CC_OP_SUBB
+ ot
);
1603 /* if d == OR_TMP0, it means memory operand (address in A0) */
1604 static void gen_inc(DisasContext
*s1
, MemOp ot
, int d
, int c
)
1606 if (s1
->prefix
& PREFIX_LOCK
) {
1608 /* Lock prefix when destination is not memory */
1609 gen_illegal_opcode(s1
);
1612 tcg_gen_movi_tl(s1
->T0
, c
> 0 ? 1 : -1);
1613 tcg_gen_atomic_add_fetch_tl(s1
->T0
, s1
->A0
, s1
->T0
,
1614 s1
->mem_index
, ot
| MO_LE
);
1617 gen_op_mov_v_reg(s1
, ot
, s1
->T0
, d
);
1619 gen_op_ld_v(s1
, ot
, s1
->T0
, s1
->A0
);
1621 tcg_gen_addi_tl(s1
->T0
, s1
->T0
, (c
> 0 ? 1 : -1));
1622 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1625 gen_compute_eflags_c(s1
, cpu_cc_src
);
1626 tcg_gen_mov_tl(cpu_cc_dst
, s1
->T0
);
1627 set_cc_op(s1
, (c
> 0 ? CC_OP_INCB
: CC_OP_DECB
) + ot
);
1630 static void gen_shift_flags(DisasContext
*s
, MemOp ot
, TCGv result
,
1631 TCGv shm1
, TCGv count
, bool is_right
)
1633 TCGv_i32 z32
, s32
, oldop
;
1636 /* Store the results into the CC variables. If we know that the
1637 variable must be dead, store unconditionally. Otherwise we'll
1638 need to not disrupt the current contents. */
1639 z_tl
= tcg_constant_tl(0);
1640 if (cc_op_live
[s
->cc_op
] & USES_CC_DST
) {
1641 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_cc_dst
, count
, z_tl
,
1642 result
, cpu_cc_dst
);
1644 tcg_gen_mov_tl(cpu_cc_dst
, result
);
1646 if (cc_op_live
[s
->cc_op
] & USES_CC_SRC
) {
1647 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_cc_src
, count
, z_tl
,
1650 tcg_gen_mov_tl(cpu_cc_src
, shm1
);
1653 /* Get the two potential CC_OP values into temporaries. */
1654 tcg_gen_movi_i32(s
->tmp2_i32
, (is_right
? CC_OP_SARB
: CC_OP_SHLB
) + ot
);
1655 if (s
->cc_op
== CC_OP_DYNAMIC
) {
1658 tcg_gen_movi_i32(s
->tmp3_i32
, s
->cc_op
);
1659 oldop
= s
->tmp3_i32
;
1662 /* Conditionally store the CC_OP value. */
1663 z32
= tcg_constant_i32(0);
1664 s32
= tcg_temp_new_i32();
1665 tcg_gen_trunc_tl_i32(s32
, count
);
1666 tcg_gen_movcond_i32(TCG_COND_NE
, cpu_cc_op
, s32
, z32
, s
->tmp2_i32
, oldop
);
1668 /* The CC_OP value is no longer predictable. */
1669 set_cc_op(s
, CC_OP_DYNAMIC
);
1672 static void gen_shift_rm_T1(DisasContext
*s
, MemOp ot
, int op1
,
1673 int is_right
, int is_arith
)
1675 target_ulong mask
= (ot
== MO_64
? 0x3f : 0x1f);
1678 if (op1
== OR_TMP0
) {
1679 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1681 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1684 tcg_gen_andi_tl(s
->T1
, s
->T1
, mask
);
1685 tcg_gen_subi_tl(s
->tmp0
, s
->T1
, 1);
1689 gen_exts(ot
, s
->T0
);
1690 tcg_gen_sar_tl(s
->tmp0
, s
->T0
, s
->tmp0
);
1691 tcg_gen_sar_tl(s
->T0
, s
->T0
, s
->T1
);
1693 gen_extu(ot
, s
->T0
);
1694 tcg_gen_shr_tl(s
->tmp0
, s
->T0
, s
->tmp0
);
1695 tcg_gen_shr_tl(s
->T0
, s
->T0
, s
->T1
);
1698 tcg_gen_shl_tl(s
->tmp0
, s
->T0
, s
->tmp0
);
1699 tcg_gen_shl_tl(s
->T0
, s
->T0
, s
->T1
);
1703 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1705 gen_shift_flags(s
, ot
, s
->T0
, s
->tmp0
, s
->T1
, is_right
);
1708 static void gen_shift_rm_im(DisasContext
*s
, MemOp ot
, int op1
, int op2
,
1709 int is_right
, int is_arith
)
1711 int mask
= (ot
== MO_64
? 0x3f : 0x1f);
1715 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1717 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1723 gen_exts(ot
, s
->T0
);
1724 tcg_gen_sari_tl(s
->tmp4
, s
->T0
, op2
- 1);
1725 tcg_gen_sari_tl(s
->T0
, s
->T0
, op2
);
1727 gen_extu(ot
, s
->T0
);
1728 tcg_gen_shri_tl(s
->tmp4
, s
->T0
, op2
- 1);
1729 tcg_gen_shri_tl(s
->T0
, s
->T0
, op2
);
1732 tcg_gen_shli_tl(s
->tmp4
, s
->T0
, op2
- 1);
1733 tcg_gen_shli_tl(s
->T0
, s
->T0
, op2
);
1738 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1740 /* update eflags if non zero shift */
1742 tcg_gen_mov_tl(cpu_cc_src
, s
->tmp4
);
1743 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
1744 set_cc_op(s
, (is_right
? CC_OP_SARB
: CC_OP_SHLB
) + ot
);
1748 static void gen_rot_rm_T1(DisasContext
*s
, MemOp ot
, int op1
, int is_right
)
1750 target_ulong mask
= (ot
== MO_64
? 0x3f : 0x1f);
1754 if (op1
== OR_TMP0
) {
1755 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1757 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1760 tcg_gen_andi_tl(s
->T1
, s
->T1
, mask
);
1764 /* Replicate the 8-bit input so that a 32-bit rotate works. */
1765 tcg_gen_ext8u_tl(s
->T0
, s
->T0
);
1766 tcg_gen_muli_tl(s
->T0
, s
->T0
, 0x01010101);
1769 /* Replicate the 16-bit input so that a 32-bit rotate works. */
1770 tcg_gen_deposit_tl(s
->T0
, s
->T0
, s
->T0
, 16, 16);
1773 #ifdef TARGET_X86_64
1775 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
1776 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T1
);
1778 tcg_gen_rotr_i32(s
->tmp2_i32
, s
->tmp2_i32
, s
->tmp3_i32
);
1780 tcg_gen_rotl_i32(s
->tmp2_i32
, s
->tmp2_i32
, s
->tmp3_i32
);
1782 tcg_gen_extu_i32_tl(s
->T0
, s
->tmp2_i32
);
1787 tcg_gen_rotr_tl(s
->T0
, s
->T0
, s
->T1
);
1789 tcg_gen_rotl_tl(s
->T0
, s
->T0
, s
->T1
);
1795 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1797 /* We'll need the flags computed into CC_SRC. */
1798 gen_compute_eflags(s
);
1800 /* The value that was "rotated out" is now present at the other end
1801 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1802 since we've computed the flags into CC_SRC, these variables are
1805 tcg_gen_shri_tl(cpu_cc_src2
, s
->T0
, mask
- 1);
1806 tcg_gen_shri_tl(cpu_cc_dst
, s
->T0
, mask
);
1807 tcg_gen_andi_tl(cpu_cc_dst
, cpu_cc_dst
, 1);
1809 tcg_gen_shri_tl(cpu_cc_src2
, s
->T0
, mask
);
1810 tcg_gen_andi_tl(cpu_cc_dst
, s
->T0
, 1);
1812 tcg_gen_andi_tl(cpu_cc_src2
, cpu_cc_src2
, 1);
1813 tcg_gen_xor_tl(cpu_cc_src2
, cpu_cc_src2
, cpu_cc_dst
);
1815 /* Now conditionally store the new CC_OP value. If the shift count
1816 is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live.
1817 Otherwise reuse CC_OP_ADCOX which have the C and O flags split out
1818 exactly as we computed above. */
1819 t0
= tcg_constant_i32(0);
1820 t1
= tcg_temp_new_i32();
1821 tcg_gen_trunc_tl_i32(t1
, s
->T1
);
1822 tcg_gen_movi_i32(s
->tmp2_i32
, CC_OP_ADCOX
);
1823 tcg_gen_movi_i32(s
->tmp3_i32
, CC_OP_EFLAGS
);
1824 tcg_gen_movcond_i32(TCG_COND_NE
, cpu_cc_op
, t1
, t0
,
1825 s
->tmp2_i32
, s
->tmp3_i32
);
1827 /* The CC_OP value is no longer predictable. */
1828 set_cc_op(s
, CC_OP_DYNAMIC
);
1831 static void gen_rot_rm_im(DisasContext
*s
, MemOp ot
, int op1
, int op2
,
1834 int mask
= (ot
== MO_64
? 0x3f : 0x1f);
1838 if (op1
== OR_TMP0
) {
1839 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1841 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1847 #ifdef TARGET_X86_64
1849 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
1851 tcg_gen_rotri_i32(s
->tmp2_i32
, s
->tmp2_i32
, op2
);
1853 tcg_gen_rotli_i32(s
->tmp2_i32
, s
->tmp2_i32
, op2
);
1855 tcg_gen_extu_i32_tl(s
->T0
, s
->tmp2_i32
);
1860 tcg_gen_rotri_tl(s
->T0
, s
->T0
, op2
);
1862 tcg_gen_rotli_tl(s
->T0
, s
->T0
, op2
);
1873 shift
= mask
+ 1 - shift
;
1875 gen_extu(ot
, s
->T0
);
1876 tcg_gen_shli_tl(s
->tmp0
, s
->T0
, shift
);
1877 tcg_gen_shri_tl(s
->T0
, s
->T0
, mask
+ 1 - shift
);
1878 tcg_gen_or_tl(s
->T0
, s
->T0
, s
->tmp0
);
1884 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1887 /* Compute the flags into CC_SRC. */
1888 gen_compute_eflags(s
);
1890 /* The value that was "rotated out" is now present at the other end
1891 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1892 since we've computed the flags into CC_SRC, these variables are
1895 tcg_gen_shri_tl(cpu_cc_src2
, s
->T0
, mask
- 1);
1896 tcg_gen_shri_tl(cpu_cc_dst
, s
->T0
, mask
);
1897 tcg_gen_andi_tl(cpu_cc_dst
, cpu_cc_dst
, 1);
1899 tcg_gen_shri_tl(cpu_cc_src2
, s
->T0
, mask
);
1900 tcg_gen_andi_tl(cpu_cc_dst
, s
->T0
, 1);
1902 tcg_gen_andi_tl(cpu_cc_src2
, cpu_cc_src2
, 1);
1903 tcg_gen_xor_tl(cpu_cc_src2
, cpu_cc_src2
, cpu_cc_dst
);
1904 set_cc_op(s
, CC_OP_ADCOX
);
1908 /* XXX: add faster immediate = 1 case */
1909 static void gen_rotc_rm_T1(DisasContext
*s
, MemOp ot
, int op1
,
1912 gen_compute_eflags(s
);
1913 assert(s
->cc_op
== CC_OP_EFLAGS
);
1917 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1919 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1924 gen_helper_rcrb(s
->T0
, cpu_env
, s
->T0
, s
->T1
);
1927 gen_helper_rcrw(s
->T0
, cpu_env
, s
->T0
, s
->T1
);
1930 gen_helper_rcrl(s
->T0
, cpu_env
, s
->T0
, s
->T1
);
1932 #ifdef TARGET_X86_64
1934 gen_helper_rcrq(s
->T0
, cpu_env
, s
->T0
, s
->T1
);
1938 g_assert_not_reached();
1943 gen_helper_rclb(s
->T0
, cpu_env
, s
->T0
, s
->T1
);
1946 gen_helper_rclw(s
->T0
, cpu_env
, s
->T0
, s
->T1
);
1949 gen_helper_rcll(s
->T0
, cpu_env
, s
->T0
, s
->T1
);
1951 #ifdef TARGET_X86_64
1953 gen_helper_rclq(s
->T0
, cpu_env
, s
->T0
, s
->T1
);
1957 g_assert_not_reached();
1961 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1964 /* XXX: add faster immediate case */
1965 static void gen_shiftd_rm_T1(DisasContext
*s
, MemOp ot
, int op1
,
1966 bool is_right
, TCGv count_in
)
1968 target_ulong mask
= (ot
== MO_64
? 63 : 31);
1972 if (op1
== OR_TMP0
) {
1973 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1975 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1978 count
= tcg_temp_new();
1979 tcg_gen_andi_tl(count
, count_in
, mask
);
1983 /* Note: we implement the Intel behaviour for shift count > 16.
1984 This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A
1985 portion by constructing it as a 32-bit value. */
1987 tcg_gen_deposit_tl(s
->tmp0
, s
->T0
, s
->T1
, 16, 16);
1988 tcg_gen_mov_tl(s
->T1
, s
->T0
);
1989 tcg_gen_mov_tl(s
->T0
, s
->tmp0
);
1991 tcg_gen_deposit_tl(s
->T1
, s
->T0
, s
->T1
, 16, 16);
1994 * If TARGET_X86_64 defined then fall through into MO_32 case,
1995 * otherwise fall through default case.
1998 #ifdef TARGET_X86_64
1999 /* Concatenate the two 32-bit values and use a 64-bit shift. */
2000 tcg_gen_subi_tl(s
->tmp0
, count
, 1);
2002 tcg_gen_concat_tl_i64(s
->T0
, s
->T0
, s
->T1
);
2003 tcg_gen_shr_i64(s
->tmp0
, s
->T0
, s
->tmp0
);
2004 tcg_gen_shr_i64(s
->T0
, s
->T0
, count
);
2006 tcg_gen_concat_tl_i64(s
->T0
, s
->T1
, s
->T0
);
2007 tcg_gen_shl_i64(s
->tmp0
, s
->T0
, s
->tmp0
);
2008 tcg_gen_shl_i64(s
->T0
, s
->T0
, count
);
2009 tcg_gen_shri_i64(s
->tmp0
, s
->tmp0
, 32);
2010 tcg_gen_shri_i64(s
->T0
, s
->T0
, 32);
2015 tcg_gen_subi_tl(s
->tmp0
, count
, 1);
2017 tcg_gen_shr_tl(s
->tmp0
, s
->T0
, s
->tmp0
);
2019 tcg_gen_subfi_tl(s
->tmp4
, mask
+ 1, count
);
2020 tcg_gen_shr_tl(s
->T0
, s
->T0
, count
);
2021 tcg_gen_shl_tl(s
->T1
, s
->T1
, s
->tmp4
);
2023 tcg_gen_shl_tl(s
->tmp0
, s
->T0
, s
->tmp0
);
2025 /* Only needed if count > 16, for Intel behaviour. */
2026 tcg_gen_subfi_tl(s
->tmp4
, 33, count
);
2027 tcg_gen_shr_tl(s
->tmp4
, s
->T1
, s
->tmp4
);
2028 tcg_gen_or_tl(s
->tmp0
, s
->tmp0
, s
->tmp4
);
2031 tcg_gen_subfi_tl(s
->tmp4
, mask
+ 1, count
);
2032 tcg_gen_shl_tl(s
->T0
, s
->T0
, count
);
2033 tcg_gen_shr_tl(s
->T1
, s
->T1
, s
->tmp4
);
2035 tcg_gen_movi_tl(s
->tmp4
, 0);
2036 tcg_gen_movcond_tl(TCG_COND_EQ
, s
->T1
, count
, s
->tmp4
,
2038 tcg_gen_or_tl(s
->T0
, s
->T0
, s
->T1
);
2043 gen_op_st_rm_T0_A0(s
, ot
, op1
);
2045 gen_shift_flags(s
, ot
, s
->T0
, s
->tmp0
, count
, is_right
);
2048 static void gen_shift(DisasContext
*s1
, int op
, MemOp ot
, int d
, int s
)
2051 gen_op_mov_v_reg(s1
, ot
, s1
->T1
, s
);
2054 gen_rot_rm_T1(s1
, ot
, d
, 0);
2057 gen_rot_rm_T1(s1
, ot
, d
, 1);
2061 gen_shift_rm_T1(s1
, ot
, d
, 0, 0);
2064 gen_shift_rm_T1(s1
, ot
, d
, 1, 0);
2067 gen_shift_rm_T1(s1
, ot
, d
, 1, 1);
2070 gen_rotc_rm_T1(s1
, ot
, d
, 0);
2073 gen_rotc_rm_T1(s1
, ot
, d
, 1);
2078 static void gen_shifti(DisasContext
*s1
, int op
, MemOp ot
, int d
, int c
)
2082 gen_rot_rm_im(s1
, ot
, d
, c
, 0);
2085 gen_rot_rm_im(s1
, ot
, d
, c
, 1);
2089 gen_shift_rm_im(s1
, ot
, d
, c
, 0, 0);
2092 gen_shift_rm_im(s1
, ot
, d
, c
, 1, 0);
2095 gen_shift_rm_im(s1
, ot
, d
, c
, 1, 1);
2098 /* currently not optimized */
2099 tcg_gen_movi_tl(s1
->T1
, c
);
2100 gen_shift(s1
, op
, ot
, d
, OR_TMP1
);
2105 #define X86_MAX_INSN_LENGTH 15
2107 static uint64_t advance_pc(CPUX86State
*env
, DisasContext
*s
, int num_bytes
)
2109 uint64_t pc
= s
->pc
;
2111 /* This is a subsequent insn that crosses a page boundary. */
2112 if (s
->base
.num_insns
> 1 &&
2113 !is_same_page(&s
->base
, s
->pc
+ num_bytes
- 1)) {
2114 siglongjmp(s
->jmpbuf
, 2);
2118 if (unlikely(cur_insn_len(s
) > X86_MAX_INSN_LENGTH
)) {
2119 /* If the instruction's 16th byte is on a different page than the 1st, a
2120 * page fault on the second page wins over the general protection fault
2121 * caused by the instruction being too long.
2122 * This can happen even if the operand is only one byte long!
2124 if (((s
->pc
- 1) ^ (pc
- 1)) & TARGET_PAGE_MASK
) {
2125 volatile uint8_t unused
=
2126 cpu_ldub_code(env
, (s
->pc
- 1) & TARGET_PAGE_MASK
);
2129 siglongjmp(s
->jmpbuf
, 1);
2135 static inline uint8_t x86_ldub_code(CPUX86State
*env
, DisasContext
*s
)
2137 return translator_ldub(env
, &s
->base
, advance_pc(env
, s
, 1));
2140 static inline int16_t x86_ldsw_code(CPUX86State
*env
, DisasContext
*s
)
2142 return translator_lduw(env
, &s
->base
, advance_pc(env
, s
, 2));
2145 static inline uint16_t x86_lduw_code(CPUX86State
*env
, DisasContext
*s
)
2147 return translator_lduw(env
, &s
->base
, advance_pc(env
, s
, 2));
2150 static inline uint32_t x86_ldl_code(CPUX86State
*env
, DisasContext
*s
)
2152 return translator_ldl(env
, &s
->base
, advance_pc(env
, s
, 4));
2155 #ifdef TARGET_X86_64
2156 static inline uint64_t x86_ldq_code(CPUX86State
*env
, DisasContext
*s
)
2158 return translator_ldq(env
, &s
->base
, advance_pc(env
, s
, 8));
2162 /* Decompose an address. */
2164 typedef struct AddressParts
{
2172 static AddressParts
gen_lea_modrm_0(CPUX86State
*env
, DisasContext
*s
,
2175 int def_seg
, base
, index
, scale
, mod
, rm
;
2184 mod
= (modrm
>> 6) & 3;
2186 base
= rm
| REX_B(s
);
2189 /* Normally filtered out earlier, but including this path
2190 simplifies multi-byte nop, as well as bndcl, bndcu, bndcn. */
2199 int code
= x86_ldub_code(env
, s
);
2200 scale
= (code
>> 6) & 3;
2201 index
= ((code
>> 3) & 7) | REX_X(s
);
2203 index
= -1; /* no index */
2205 base
= (code
& 7) | REX_B(s
);
2211 if ((base
& 7) == 5) {
2213 disp
= (int32_t)x86_ldl_code(env
, s
);
2214 if (CODE64(s
) && !havesib
) {
2216 disp
+= s
->pc
+ s
->rip_offset
;
2221 disp
= (int8_t)x86_ldub_code(env
, s
);
2225 disp
= (int32_t)x86_ldl_code(env
, s
);
2229 /* For correct popl handling with esp. */
2230 if (base
== R_ESP
&& s
->popl_esp_hack
) {
2231 disp
+= s
->popl_esp_hack
;
2233 if (base
== R_EBP
|| base
== R_ESP
) {
2242 disp
= x86_lduw_code(env
, s
);
2245 } else if (mod
== 1) {
2246 disp
= (int8_t)x86_ldub_code(env
, s
);
2248 disp
= (int16_t)x86_lduw_code(env
, s
);
2288 g_assert_not_reached();
2292 return (AddressParts
){ def_seg
, base
, index
, scale
, disp
};
2295 /* Compute the address, with a minimum number of TCG ops. */
2296 static TCGv
gen_lea_modrm_1(DisasContext
*s
, AddressParts a
, bool is_vsib
)
2300 if (a
.index
>= 0 && !is_vsib
) {
2302 ea
= cpu_regs
[a
.index
];
2304 tcg_gen_shli_tl(s
->A0
, cpu_regs
[a
.index
], a
.scale
);
2308 tcg_gen_add_tl(s
->A0
, ea
, cpu_regs
[a
.base
]);
2311 } else if (a
.base
>= 0) {
2312 ea
= cpu_regs
[a
.base
];
2315 if (tb_cflags(s
->base
.tb
) & CF_PCREL
&& a
.base
== -2) {
2316 /* With cpu_eip ~= pc_save, the expression is pc-relative. */
2317 tcg_gen_addi_tl(s
->A0
, cpu_eip
, a
.disp
- s
->pc_save
);
2319 tcg_gen_movi_tl(s
->A0
, a
.disp
);
2322 } else if (a
.disp
!= 0) {
2323 tcg_gen_addi_tl(s
->A0
, ea
, a
.disp
);
2330 static void gen_lea_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
)
2332 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
2333 TCGv ea
= gen_lea_modrm_1(s
, a
, false);
2334 gen_lea_v_seg(s
, s
->aflag
, ea
, a
.def_seg
, s
->override
);
2337 static void gen_nop_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
)
2339 (void)gen_lea_modrm_0(env
, s
, modrm
);
2342 /* Used for BNDCL, BNDCU, BNDCN. */
2343 static void gen_bndck(CPUX86State
*env
, DisasContext
*s
, int modrm
,
2344 TCGCond cond
, TCGv_i64 bndv
)
2346 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
2347 TCGv ea
= gen_lea_modrm_1(s
, a
, false);
2349 tcg_gen_extu_tl_i64(s
->tmp1_i64
, ea
);
2351 tcg_gen_ext32u_i64(s
->tmp1_i64
, s
->tmp1_i64
);
2353 tcg_gen_setcond_i64(cond
, s
->tmp1_i64
, s
->tmp1_i64
, bndv
);
2354 tcg_gen_extrl_i64_i32(s
->tmp2_i32
, s
->tmp1_i64
);
2355 gen_helper_bndck(cpu_env
, s
->tmp2_i32
);
2358 /* used for LEA and MOV AX, mem */
2359 static void gen_add_A0_ds_seg(DisasContext
*s
)
2361 gen_lea_v_seg(s
, s
->aflag
, s
->A0
, R_DS
, s
->override
);
2364 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2366 static void gen_ldst_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
,
2367 MemOp ot
, int reg
, int is_store
)
2371 mod
= (modrm
>> 6) & 3;
2372 rm
= (modrm
& 7) | REX_B(s
);
2376 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
2377 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
2379 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
2381 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
2384 gen_lea_modrm(env
, s
, modrm
);
2387 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
2388 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
2390 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
2392 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
2397 static target_ulong
insn_get_addr(CPUX86State
*env
, DisasContext
*s
, MemOp ot
)
2403 ret
= x86_ldub_code(env
, s
);
2406 ret
= x86_lduw_code(env
, s
);
2409 ret
= x86_ldl_code(env
, s
);
2411 #ifdef TARGET_X86_64
2413 ret
= x86_ldq_code(env
, s
);
2417 g_assert_not_reached();
2422 static inline uint32_t insn_get(CPUX86State
*env
, DisasContext
*s
, MemOp ot
)
2428 ret
= x86_ldub_code(env
, s
);
2431 ret
= x86_lduw_code(env
, s
);
2434 #ifdef TARGET_X86_64
2437 ret
= x86_ldl_code(env
, s
);
2440 g_assert_not_reached();
2445 static target_long
insn_get_signed(CPUX86State
*env
, DisasContext
*s
, MemOp ot
)
2451 ret
= (int8_t) x86_ldub_code(env
, s
);
2454 ret
= (int16_t) x86_lduw_code(env
, s
);
2457 ret
= (int32_t) x86_ldl_code(env
, s
);
2459 #ifdef TARGET_X86_64
2461 ret
= x86_ldq_code(env
, s
);
2465 g_assert_not_reached();
2470 static inline int insn_const_size(MemOp ot
)
2479 static void gen_jcc(DisasContext
*s
, int b
, int diff
)
2481 TCGLabel
*l1
= gen_new_label();
2484 gen_jmp_rel_csize(s
, 0, 1);
2486 gen_jmp_rel(s
, s
->dflag
, diff
, 0);
2489 static void gen_cmovcc1(CPUX86State
*env
, DisasContext
*s
, MemOp ot
, int b
,
2494 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
2496 cc
= gen_prepare_cc(s
, b
, s
->T1
);
2497 if (cc
.mask
!= -1) {
2498 TCGv t0
= tcg_temp_new();
2499 tcg_gen_andi_tl(t0
, cc
.reg
, cc
.mask
);
2503 cc
.reg2
= tcg_constant_tl(cc
.imm
);
2506 tcg_gen_movcond_tl(cc
.cond
, s
->T0
, cc
.reg
, cc
.reg2
,
2507 s
->T0
, cpu_regs
[reg
]);
2508 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
2511 static inline void gen_op_movl_T0_seg(DisasContext
*s
, X86Seg seg_reg
)
2513 tcg_gen_ld32u_tl(s
->T0
, cpu_env
,
2514 offsetof(CPUX86State
,segs
[seg_reg
].selector
));
2517 static inline void gen_op_movl_seg_T0_vm(DisasContext
*s
, X86Seg seg_reg
)
2519 tcg_gen_ext16u_tl(s
->T0
, s
->T0
);
2520 tcg_gen_st32_tl(s
->T0
, cpu_env
,
2521 offsetof(CPUX86State
,segs
[seg_reg
].selector
));
2522 tcg_gen_shli_tl(cpu_seg_base
[seg_reg
], s
->T0
, 4);
2525 /* move T0 to seg_reg and compute if the CPU state may change. Never
2526 call this function with seg_reg == R_CS */
2527 static void gen_movl_seg_T0(DisasContext
*s
, X86Seg seg_reg
)
2529 if (PE(s
) && !VM86(s
)) {
2530 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
2531 gen_helper_load_seg(cpu_env
, tcg_constant_i32(seg_reg
), s
->tmp2_i32
);
2532 /* abort translation because the addseg value may change or
2533 because ss32 may change. For R_SS, translation must always
2534 stop as a special handling must be done to disable hardware
2535 interrupts for the next instruction */
2536 if (seg_reg
== R_SS
) {
2537 s
->base
.is_jmp
= DISAS_EOB_INHIBIT_IRQ
;
2538 } else if (CODE32(s
) && seg_reg
< R_FS
) {
2539 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
2542 gen_op_movl_seg_T0_vm(s
, seg_reg
);
2543 if (seg_reg
== R_SS
) {
2544 s
->base
.is_jmp
= DISAS_EOB_INHIBIT_IRQ
;
2549 static void gen_svm_check_intercept(DisasContext
*s
, uint32_t type
)
2551 /* no SVM activated; fast case */
2552 if (likely(!GUEST(s
))) {
2555 gen_helper_svm_check_intercept(cpu_env
, tcg_constant_i32(type
));
2558 static inline void gen_stack_update(DisasContext
*s
, int addend
)
2560 gen_op_add_reg_im(s
, mo_stacksize(s
), R_ESP
, addend
);
2563 /* Generate a push. It depends on ss32, addseg and dflag. */
2564 static void gen_push_v(DisasContext
*s
, TCGv val
)
2566 MemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2567 MemOp a_ot
= mo_stacksize(s
);
2568 int size
= 1 << d_ot
;
2569 TCGv new_esp
= s
->A0
;
2571 tcg_gen_subi_tl(s
->A0
, cpu_regs
[R_ESP
], size
);
2576 tcg_gen_mov_tl(new_esp
, s
->A0
);
2578 gen_lea_v_seg(s
, a_ot
, s
->A0
, R_SS
, -1);
2581 gen_op_st_v(s
, d_ot
, val
, s
->A0
);
2582 gen_op_mov_reg_v(s
, a_ot
, R_ESP
, new_esp
);
2585 /* two step pop is necessary for precise exceptions */
2586 static MemOp
gen_pop_T0(DisasContext
*s
)
2588 MemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2590 gen_lea_v_seg(s
, mo_stacksize(s
), cpu_regs
[R_ESP
], R_SS
, -1);
2591 gen_op_ld_v(s
, d_ot
, s
->T0
, s
->A0
);
2596 static inline void gen_pop_update(DisasContext
*s
, MemOp ot
)
2598 gen_stack_update(s
, 1 << ot
);
2601 static inline void gen_stack_A0(DisasContext
*s
)
2603 gen_lea_v_seg(s
, SS32(s
) ? MO_32
: MO_16
, cpu_regs
[R_ESP
], R_SS
, -1);
2606 static void gen_pusha(DisasContext
*s
)
2608 MemOp s_ot
= SS32(s
) ? MO_32
: MO_16
;
2609 MemOp d_ot
= s
->dflag
;
2610 int size
= 1 << d_ot
;
2613 for (i
= 0; i
< 8; i
++) {
2614 tcg_gen_addi_tl(s
->A0
, cpu_regs
[R_ESP
], (i
- 8) * size
);
2615 gen_lea_v_seg(s
, s_ot
, s
->A0
, R_SS
, -1);
2616 gen_op_st_v(s
, d_ot
, cpu_regs
[7 - i
], s
->A0
);
2619 gen_stack_update(s
, -8 * size
);
2622 static void gen_popa(DisasContext
*s
)
2624 MemOp s_ot
= SS32(s
) ? MO_32
: MO_16
;
2625 MemOp d_ot
= s
->dflag
;
2626 int size
= 1 << d_ot
;
2629 for (i
= 0; i
< 8; i
++) {
2630 /* ESP is not reloaded */
2631 if (7 - i
== R_ESP
) {
2634 tcg_gen_addi_tl(s
->A0
, cpu_regs
[R_ESP
], i
* size
);
2635 gen_lea_v_seg(s
, s_ot
, s
->A0
, R_SS
, -1);
2636 gen_op_ld_v(s
, d_ot
, s
->T0
, s
->A0
);
2637 gen_op_mov_reg_v(s
, d_ot
, 7 - i
, s
->T0
);
2640 gen_stack_update(s
, 8 * size
);
2643 static void gen_enter(DisasContext
*s
, int esp_addend
, int level
)
2645 MemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2646 MemOp a_ot
= CODE64(s
) ? MO_64
: SS32(s
) ? MO_32
: MO_16
;
2647 int size
= 1 << d_ot
;
2649 /* Push BP; compute FrameTemp into T1. */
2650 tcg_gen_subi_tl(s
->T1
, cpu_regs
[R_ESP
], size
);
2651 gen_lea_v_seg(s
, a_ot
, s
->T1
, R_SS
, -1);
2652 gen_op_st_v(s
, d_ot
, cpu_regs
[R_EBP
], s
->A0
);
2658 /* Copy level-1 pointers from the previous frame. */
2659 for (i
= 1; i
< level
; ++i
) {
2660 tcg_gen_subi_tl(s
->A0
, cpu_regs
[R_EBP
], size
* i
);
2661 gen_lea_v_seg(s
, a_ot
, s
->A0
, R_SS
, -1);
2662 gen_op_ld_v(s
, d_ot
, s
->tmp0
, s
->A0
);
2664 tcg_gen_subi_tl(s
->A0
, s
->T1
, size
* i
);
2665 gen_lea_v_seg(s
, a_ot
, s
->A0
, R_SS
, -1);
2666 gen_op_st_v(s
, d_ot
, s
->tmp0
, s
->A0
);
2669 /* Push the current FrameTemp as the last level. */
2670 tcg_gen_subi_tl(s
->A0
, s
->T1
, size
* level
);
2671 gen_lea_v_seg(s
, a_ot
, s
->A0
, R_SS
, -1);
2672 gen_op_st_v(s
, d_ot
, s
->T1
, s
->A0
);
2675 /* Copy the FrameTemp value to EBP. */
2676 gen_op_mov_reg_v(s
, a_ot
, R_EBP
, s
->T1
);
2678 /* Compute the final value of ESP. */
2679 tcg_gen_subi_tl(s
->T1
, s
->T1
, esp_addend
+ size
* level
);
2680 gen_op_mov_reg_v(s
, a_ot
, R_ESP
, s
->T1
);
2683 static void gen_leave(DisasContext
*s
)
2685 MemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2686 MemOp a_ot
= mo_stacksize(s
);
2688 gen_lea_v_seg(s
, a_ot
, cpu_regs
[R_EBP
], R_SS
, -1);
2689 gen_op_ld_v(s
, d_ot
, s
->T0
, s
->A0
);
2691 tcg_gen_addi_tl(s
->T1
, cpu_regs
[R_EBP
], 1 << d_ot
);
2693 gen_op_mov_reg_v(s
, d_ot
, R_EBP
, s
->T0
);
2694 gen_op_mov_reg_v(s
, a_ot
, R_ESP
, s
->T1
);
2697 /* Similarly, except that the assumption here is that we don't decode
2698 the instruction at all -- either a missing opcode, an unimplemented
2699 feature, or just a bogus instruction stream. */
2700 static void gen_unknown_opcode(CPUX86State
*env
, DisasContext
*s
)
2702 gen_illegal_opcode(s
);
2704 if (qemu_loglevel_mask(LOG_UNIMP
)) {
2705 FILE *logfile
= qemu_log_trylock();
2707 target_ulong pc
= s
->base
.pc_next
, end
= s
->pc
;
2709 fprintf(logfile
, "ILLOPC: " TARGET_FMT_lx
":", pc
);
2710 for (; pc
< end
; ++pc
) {
2711 fprintf(logfile
, " %02x", cpu_ldub_code(env
, pc
));
2713 fprintf(logfile
, "\n");
2714 qemu_log_unlock(logfile
);
2719 /* an interrupt is different from an exception because of the
2721 static void gen_interrupt(DisasContext
*s
, int intno
)
2723 gen_update_cc_op(s
);
2724 gen_update_eip_cur(s
);
2725 gen_helper_raise_interrupt(cpu_env
, tcg_constant_i32(intno
),
2726 cur_insn_len_i32(s
));
2727 s
->base
.is_jmp
= DISAS_NORETURN
;
2730 static void gen_set_hflag(DisasContext
*s
, uint32_t mask
)
2732 if ((s
->flags
& mask
) == 0) {
2733 TCGv_i32 t
= tcg_temp_new_i32();
2734 tcg_gen_ld_i32(t
, cpu_env
, offsetof(CPUX86State
, hflags
));
2735 tcg_gen_ori_i32(t
, t
, mask
);
2736 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUX86State
, hflags
));
2741 static void gen_reset_hflag(DisasContext
*s
, uint32_t mask
)
2743 if (s
->flags
& mask
) {
2744 TCGv_i32 t
= tcg_temp_new_i32();
2745 tcg_gen_ld_i32(t
, cpu_env
, offsetof(CPUX86State
, hflags
));
2746 tcg_gen_andi_i32(t
, t
, ~mask
);
2747 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUX86State
, hflags
));
2752 static void gen_set_eflags(DisasContext
*s
, target_ulong mask
)
2754 TCGv t
= tcg_temp_new();
2756 tcg_gen_ld_tl(t
, cpu_env
, offsetof(CPUX86State
, eflags
));
2757 tcg_gen_ori_tl(t
, t
, mask
);
2758 tcg_gen_st_tl(t
, cpu_env
, offsetof(CPUX86State
, eflags
));
2761 static void gen_reset_eflags(DisasContext
*s
, target_ulong mask
)
2763 TCGv t
= tcg_temp_new();
2765 tcg_gen_ld_tl(t
, cpu_env
, offsetof(CPUX86State
, eflags
));
2766 tcg_gen_andi_tl(t
, t
, ~mask
);
2767 tcg_gen_st_tl(t
, cpu_env
, offsetof(CPUX86State
, eflags
));
2770 /* Clear BND registers during legacy branches. */
2771 static void gen_bnd_jmp(DisasContext
*s
)
2773 /* Clear the registers only if BND prefix is missing, MPX is enabled,
2774 and if the BNDREGs are known to be in use (non-zero) already.
2775 The helper itself will check BNDPRESERVE at runtime. */
2776 if ((s
->prefix
& PREFIX_REPNZ
) == 0
2777 && (s
->flags
& HF_MPX_EN_MASK
) != 0
2778 && (s
->flags
& HF_MPX_IU_MASK
) != 0) {
2779 gen_helper_bnd_jmp(cpu_env
);
2783 /* Generate an end of block. Trace exception is also generated if needed.
2784 If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set.
2785 If RECHECK_TF, emit a rechecking helper for #DB, ignoring the state of
2786 S->TF. This is used by the syscall/sysret insns. */
2788 do_gen_eob_worker(DisasContext
*s
, bool inhibit
, bool recheck_tf
, bool jr
)
2790 gen_update_cc_op(s
);
2792 /* If several instructions disable interrupts, only the first does it. */
2793 if (inhibit
&& !(s
->flags
& HF_INHIBIT_IRQ_MASK
)) {
2794 gen_set_hflag(s
, HF_INHIBIT_IRQ_MASK
);
2796 gen_reset_hflag(s
, HF_INHIBIT_IRQ_MASK
);
2799 if (s
->base
.tb
->flags
& HF_RF_MASK
) {
2800 gen_reset_eflags(s
, RF_MASK
);
2803 gen_helper_rechecking_single_step(cpu_env
);
2804 tcg_gen_exit_tb(NULL
, 0);
2805 } else if (s
->flags
& HF_TF_MASK
) {
2806 gen_helper_single_step(cpu_env
);
2808 tcg_gen_lookup_and_goto_ptr();
2810 tcg_gen_exit_tb(NULL
, 0);
2812 s
->base
.is_jmp
= DISAS_NORETURN
;
2816 gen_eob_worker(DisasContext
*s
, bool inhibit
, bool recheck_tf
)
2818 do_gen_eob_worker(s
, inhibit
, recheck_tf
, false);
2822 If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set. */
2823 static void gen_eob_inhibit_irq(DisasContext
*s
, bool inhibit
)
2825 gen_eob_worker(s
, inhibit
, false);
2828 /* End of block, resetting the inhibit irq flag. */
2829 static void gen_eob(DisasContext
*s
)
2831 gen_eob_worker(s
, false, false);
2834 /* Jump to register */
2835 static void gen_jr(DisasContext
*s
)
2837 do_gen_eob_worker(s
, false, false, true);
2840 /* Jump to eip+diff, truncating the result to OT. */
2841 static void gen_jmp_rel(DisasContext
*s
, MemOp ot
, int diff
, int tb_num
)
2843 bool use_goto_tb
= s
->jmp_opt
;
2844 target_ulong mask
= -1;
2845 target_ulong new_pc
= s
->pc
+ diff
;
2846 target_ulong new_eip
= new_pc
- s
->cs_base
;
2848 /* In 64-bit mode, operand size is fixed at 64 bits. */
2852 if (tb_cflags(s
->base
.tb
) & CF_PCREL
&& CODE32(s
)) {
2853 use_goto_tb
= false;
2861 gen_update_cc_op(s
);
2862 set_cc_op(s
, CC_OP_DYNAMIC
);
2864 if (tb_cflags(s
->base
.tb
) & CF_PCREL
) {
2865 tcg_gen_addi_tl(cpu_eip
, cpu_eip
, new_pc
- s
->pc_save
);
2867 * If we can prove the branch does not leave the page and we have
2868 * no extra masking to apply (data16 branch in code32, see above),
2869 * then we have also proven that the addition does not wrap.
2871 if (!use_goto_tb
|| !is_same_page(&s
->base
, new_pc
)) {
2872 tcg_gen_andi_tl(cpu_eip
, cpu_eip
, mask
);
2873 use_goto_tb
= false;
2878 translator_use_goto_tb(&s
->base
, new_eip
+ s
->cs_base
)) {
2879 /* jump to same page: we can use a direct jump */
2880 tcg_gen_goto_tb(tb_num
);
2881 if (!(tb_cflags(s
->base
.tb
) & CF_PCREL
)) {
2882 tcg_gen_movi_tl(cpu_eip
, new_eip
);
2884 tcg_gen_exit_tb(s
->base
.tb
, tb_num
);
2885 s
->base
.is_jmp
= DISAS_NORETURN
;
2887 if (!(tb_cflags(s
->base
.tb
) & CF_PCREL
)) {
2888 tcg_gen_movi_tl(cpu_eip
, new_eip
);
2891 gen_jr(s
); /* jump to another page */
2893 gen_eob(s
); /* exit to main loop */
2898 /* Jump to eip+diff, truncating to the current code size. */
2899 static void gen_jmp_rel_csize(DisasContext
*s
, int diff
, int tb_num
)
2901 /* CODE64 ignores the OT argument, so we need not consider it. */
2902 gen_jmp_rel(s
, CODE32(s
) ? MO_32
: MO_16
, diff
, tb_num
);
2905 static inline void gen_ldq_env_A0(DisasContext
*s
, int offset
)
2907 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
, s
->mem_index
, MO_LEUQ
);
2908 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
, offset
);
2911 static inline void gen_stq_env_A0(DisasContext
*s
, int offset
)
2913 tcg_gen_ld_i64(s
->tmp1_i64
, cpu_env
, offset
);
2914 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
, s
->mem_index
, MO_LEUQ
);
2917 static inline void gen_ldo_env_A0(DisasContext
*s
, int offset
, bool align
)
2919 int mem_index
= s
->mem_index
;
2920 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
, mem_index
,
2921 MO_LEUQ
| (align
? MO_ALIGN_16
: 0));
2922 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(0)));
2923 tcg_gen_addi_tl(s
->tmp0
, s
->A0
, 8);
2924 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->tmp0
, mem_index
, MO_LEUQ
);
2925 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(1)));
2928 static inline void gen_sto_env_A0(DisasContext
*s
, int offset
, bool align
)
2930 int mem_index
= s
->mem_index
;
2931 tcg_gen_ld_i64(s
->tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(0)));
2932 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
, mem_index
,
2933 MO_LEUQ
| (align
? MO_ALIGN_16
: 0));
2934 tcg_gen_addi_tl(s
->tmp0
, s
->A0
, 8);
2935 tcg_gen_ld_i64(s
->tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(1)));
2936 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->tmp0
, mem_index
, MO_LEUQ
);
2939 static void gen_ldy_env_A0(DisasContext
*s
, int offset
, bool align
)
2941 int mem_index
= s
->mem_index
;
2942 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
, mem_index
,
2943 MO_LEUQ
| (align
? MO_ALIGN_32
: 0));
2944 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
, offset
+ offsetof(YMMReg
, YMM_Q(0)));
2945 tcg_gen_addi_tl(s
->tmp0
, s
->A0
, 8);
2946 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->tmp0
, mem_index
, MO_LEUQ
);
2947 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
, offset
+ offsetof(YMMReg
, YMM_Q(1)));
2949 tcg_gen_addi_tl(s
->tmp0
, s
->A0
, 16);
2950 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->tmp0
, mem_index
, MO_LEUQ
);
2951 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
, offset
+ offsetof(YMMReg
, YMM_Q(2)));
2952 tcg_gen_addi_tl(s
->tmp0
, s
->A0
, 24);
2953 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->tmp0
, mem_index
, MO_LEUQ
);
2954 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
, offset
+ offsetof(YMMReg
, YMM_Q(3)));
2957 static void gen_sty_env_A0(DisasContext
*s
, int offset
, bool align
)
2959 int mem_index
= s
->mem_index
;
2960 tcg_gen_ld_i64(s
->tmp1_i64
, cpu_env
, offset
+ offsetof(YMMReg
, YMM_Q(0)));
2961 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
, mem_index
,
2962 MO_LEUQ
| (align
? MO_ALIGN_32
: 0));
2963 tcg_gen_addi_tl(s
->tmp0
, s
->A0
, 8);
2964 tcg_gen_ld_i64(s
->tmp1_i64
, cpu_env
, offset
+ offsetof(YMMReg
, YMM_Q(1)));
2965 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->tmp0
, mem_index
, MO_LEUQ
);
2966 tcg_gen_addi_tl(s
->tmp0
, s
->A0
, 16);
2967 tcg_gen_ld_i64(s
->tmp1_i64
, cpu_env
, offset
+ offsetof(YMMReg
, YMM_Q(2)));
2968 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->tmp0
, mem_index
, MO_LEUQ
);
2969 tcg_gen_addi_tl(s
->tmp0
, s
->A0
, 24);
2970 tcg_gen_ld_i64(s
->tmp1_i64
, cpu_env
, offset
+ offsetof(YMMReg
, YMM_Q(3)));
2971 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->tmp0
, mem_index
, MO_LEUQ
);
2974 #include "decode-new.h"
2975 #include "emit.c.inc"
2976 #include "decode-new.c.inc"
2978 static void gen_cmpxchg8b(DisasContext
*s
, CPUX86State
*env
, int modrm
)
2980 TCGv_i64 cmp
, val
, old
;
2983 gen_lea_modrm(env
, s
, modrm
);
2985 cmp
= tcg_temp_new_i64();
2986 val
= tcg_temp_new_i64();
2987 old
= tcg_temp_new_i64();
2989 /* Construct the comparison values from the register pair. */
2990 tcg_gen_concat_tl_i64(cmp
, cpu_regs
[R_EAX
], cpu_regs
[R_EDX
]);
2991 tcg_gen_concat_tl_i64(val
, cpu_regs
[R_EBX
], cpu_regs
[R_ECX
]);
2993 /* Only require atomic with LOCK; non-parallel handled in generator. */
2994 if (s
->prefix
& PREFIX_LOCK
) {
2995 tcg_gen_atomic_cmpxchg_i64(old
, s
->A0
, cmp
, val
, s
->mem_index
, MO_TEUQ
);
2997 tcg_gen_nonatomic_cmpxchg_i64(old
, s
->A0
, cmp
, val
,
2998 s
->mem_index
, MO_TEUQ
);
3001 /* Set tmp0 to match the required value of Z. */
3002 tcg_gen_setcond_i64(TCG_COND_EQ
, cmp
, old
, cmp
);
3004 tcg_gen_trunc_i64_tl(Z
, cmp
);
3007 * Extract the result values for the register pair.
3008 * For 32-bit, we may do this unconditionally, because on success (Z=1),
3009 * the old value matches the previous value in EDX:EAX. For x86_64,
3010 * the store must be conditional, because we must leave the source
3011 * registers unchanged on success, and zero-extend the writeback
3014 if (TARGET_LONG_BITS
== 32) {
3015 tcg_gen_extr_i64_tl(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
], old
);
3017 TCGv zero
= tcg_constant_tl(0);
3019 tcg_gen_extr_i64_tl(s
->T0
, s
->T1
, old
);
3020 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_regs
[R_EAX
], Z
, zero
,
3021 s
->T0
, cpu_regs
[R_EAX
]);
3022 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_regs
[R_EDX
], Z
, zero
,
3023 s
->T1
, cpu_regs
[R_EDX
]);
3027 gen_compute_eflags(s
);
3028 tcg_gen_deposit_tl(cpu_cc_src
, cpu_cc_src
, Z
, ctz32(CC_Z
), 1);
3031 #ifdef TARGET_X86_64
3032 static void gen_cmpxchg16b(DisasContext
*s
, CPUX86State
*env
, int modrm
)
3034 MemOp mop
= MO_TE
| MO_128
| MO_ALIGN
;
3038 gen_lea_modrm(env
, s
, modrm
);
3040 cmp
= tcg_temp_new_i128();
3041 val
= tcg_temp_new_i128();
3042 tcg_gen_concat_i64_i128(cmp
, cpu_regs
[R_EAX
], cpu_regs
[R_EDX
]);
3043 tcg_gen_concat_i64_i128(val
, cpu_regs
[R_EBX
], cpu_regs
[R_ECX
]);
3045 /* Only require atomic with LOCK; non-parallel handled in generator. */
3046 if (s
->prefix
& PREFIX_LOCK
) {
3047 tcg_gen_atomic_cmpxchg_i128(val
, s
->A0
, cmp
, val
, s
->mem_index
, mop
);
3049 tcg_gen_nonatomic_cmpxchg_i128(val
, s
->A0
, cmp
, val
, s
->mem_index
, mop
);
3052 tcg_gen_extr_i128_i64(s
->T0
, s
->T1
, val
);
3054 /* Determine success after the fact. */
3055 t0
= tcg_temp_new_i64();
3056 t1
= tcg_temp_new_i64();
3057 tcg_gen_xor_i64(t0
, s
->T0
, cpu_regs
[R_EAX
]);
3058 tcg_gen_xor_i64(t1
, s
->T1
, cpu_regs
[R_EDX
]);
3059 tcg_gen_or_i64(t0
, t0
, t1
);
3062 gen_compute_eflags(s
);
3063 tcg_gen_setcondi_i64(TCG_COND_EQ
, t0
, t0
, 0);
3064 tcg_gen_deposit_tl(cpu_cc_src
, cpu_cc_src
, t0
, ctz32(CC_Z
), 1);
3067 * Extract the result values for the register pair. We may do this
3068 * unconditionally, because on success (Z=1), the old value matches
3069 * the previous value in RDX:RAX.
3071 tcg_gen_mov_i64(cpu_regs
[R_EAX
], s
->T0
);
3072 tcg_gen_mov_i64(cpu_regs
[R_EDX
], s
->T1
);
3076 /* convert one instruction. s->base.is_jmp is set if the translation must
3077 be stopped. Return the next pc value */
3078 static bool disas_insn(DisasContext
*s
, CPUState
*cpu
)
3080 CPUX86State
*env
= cpu
->env_ptr
;
3083 MemOp ot
, aflag
, dflag
;
3084 int modrm
, reg
, rm
, mod
, op
, opreg
, val
;
3085 bool orig_cc_op_dirty
= s
->cc_op_dirty
;
3086 CCOp orig_cc_op
= s
->cc_op
;
3087 target_ulong orig_pc_save
= s
->pc_save
;
3089 s
->pc
= s
->base
.pc_next
;
3091 #ifdef TARGET_X86_64
3096 s
->rip_offset
= 0; /* for relative ip address */
3100 switch (sigsetjmp(s
->jmpbuf
, 0)) {
3104 gen_exception_gpf(s
);
3107 /* Restore state that may affect the next instruction. */
3108 s
->pc
= s
->base
.pc_next
;
3110 * TODO: These save/restore can be removed after the table-based
3111 * decoder is complete; we will be decoding the insn completely
3112 * before any code generation that might affect these variables.
3114 s
->cc_op_dirty
= orig_cc_op_dirty
;
3115 s
->cc_op
= orig_cc_op
;
3116 s
->pc_save
= orig_pc_save
;
3118 s
->base
.num_insns
--;
3119 tcg_remove_ops_after(s
->prev_insn_end
);
3120 s
->base
.is_jmp
= DISAS_TOO_MANY
;
3123 g_assert_not_reached();
3129 s
->prefix
= prefixes
;
3130 b
= x86_ldub_code(env
, s
);
3131 /* Collect prefixes. */
3136 b
= x86_ldub_code(env
, s
) + 0x100;
3139 prefixes
|= PREFIX_REPZ
;
3140 prefixes
&= ~PREFIX_REPNZ
;
3143 prefixes
|= PREFIX_REPNZ
;
3144 prefixes
&= ~PREFIX_REPZ
;
3147 prefixes
|= PREFIX_LOCK
;
3168 prefixes
|= PREFIX_DATA
;
3171 prefixes
|= PREFIX_ADR
;
3173 #ifdef TARGET_X86_64
3177 prefixes
|= PREFIX_REX
;
3178 s
->vex_w
= (b
>> 3) & 1;
3179 s
->rex_r
= (b
& 0x4) << 1;
3180 s
->rex_x
= (b
& 0x2) << 2;
3181 s
->rex_b
= (b
& 0x1) << 3;
3186 case 0xc5: /* 2-byte VEX */
3187 case 0xc4: /* 3-byte VEX */
3188 if (CODE32(s
) && !VM86(s
)) {
3189 int vex2
= x86_ldub_code(env
, s
);
3190 s
->pc
--; /* rewind the advance_pc() x86_ldub_code() did */
3192 if (!CODE64(s
) && (vex2
& 0xc0) != 0xc0) {
3193 /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
3194 otherwise the instruction is LES or LDS. */
3197 disas_insn_new(s
, cpu
, b
);
3203 /* Post-process prefixes. */
3205 /* In 64-bit mode, the default data size is 32-bit. Select 64-bit
3206 data with rex_w, and 16-bit data with 0x66; rex_w takes precedence
3207 over 0x66 if both are present. */
3208 dflag
= (REX_W(s
) ? MO_64
: prefixes
& PREFIX_DATA
? MO_16
: MO_32
);
3209 /* In 64-bit mode, 0x67 selects 32-bit addressing. */
3210 aflag
= (prefixes
& PREFIX_ADR
? MO_32
: MO_64
);
3212 /* In 16/32-bit mode, 0x66 selects the opposite data size. */
3213 if (CODE32(s
) ^ ((prefixes
& PREFIX_DATA
) != 0)) {
3218 /* In 16/32-bit mode, 0x67 selects the opposite addressing. */
3219 if (CODE32(s
) ^ ((prefixes
& PREFIX_ADR
) != 0)) {
3226 s
->prefix
= prefixes
;
3230 /* now check op code */
3232 /**************************/
3247 ot
= mo_b_d(b
, dflag
);
3250 case 0: /* OP Ev, Gv */
3251 modrm
= x86_ldub_code(env
, s
);
3252 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
3253 mod
= (modrm
>> 6) & 3;
3254 rm
= (modrm
& 7) | REX_B(s
);
3256 gen_lea_modrm(env
, s
, modrm
);
3258 } else if (op
== OP_XORL
&& rm
== reg
) {
3260 /* xor reg, reg optimisation */
3261 set_cc_op(s
, CC_OP_CLR
);
3262 tcg_gen_movi_tl(s
->T0
, 0);
3263 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
3268 gen_op_mov_v_reg(s
, ot
, s
->T1
, reg
);
3269 gen_op(s
, op
, ot
, opreg
);
3271 case 1: /* OP Gv, Ev */
3272 modrm
= x86_ldub_code(env
, s
);
3273 mod
= (modrm
>> 6) & 3;
3274 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
3275 rm
= (modrm
& 7) | REX_B(s
);
3277 gen_lea_modrm(env
, s
, modrm
);
3278 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
3279 } else if (op
== OP_XORL
&& rm
== reg
) {
3282 gen_op_mov_v_reg(s
, ot
, s
->T1
, rm
);
3284 gen_op(s
, op
, ot
, reg
);
3286 case 2: /* OP A, Iv */
3287 val
= insn_get(env
, s
, ot
);
3288 tcg_gen_movi_tl(s
->T1
, val
);
3289 gen_op(s
, op
, ot
, OR_EAX
);
3299 case 0x80: /* GRP1 */
3305 ot
= mo_b_d(b
, dflag
);
3307 modrm
= x86_ldub_code(env
, s
);
3308 mod
= (modrm
>> 6) & 3;
3309 rm
= (modrm
& 7) | REX_B(s
);
3310 op
= (modrm
>> 3) & 7;
3316 s
->rip_offset
= insn_const_size(ot
);
3317 gen_lea_modrm(env
, s
, modrm
);
3328 val
= insn_get(env
, s
, ot
);
3331 val
= (int8_t)insn_get(env
, s
, MO_8
);
3334 tcg_gen_movi_tl(s
->T1
, val
);
3335 gen_op(s
, op
, ot
, opreg
);
3339 /**************************/
3340 /* inc, dec, and other misc arith */
3341 case 0x40 ... 0x47: /* inc Gv */
3343 gen_inc(s
, ot
, OR_EAX
+ (b
& 7), 1);
3345 case 0x48 ... 0x4f: /* dec Gv */
3347 gen_inc(s
, ot
, OR_EAX
+ (b
& 7), -1);
3349 case 0xf6: /* GRP3 */
3351 ot
= mo_b_d(b
, dflag
);
3353 modrm
= x86_ldub_code(env
, s
);
3354 mod
= (modrm
>> 6) & 3;
3355 rm
= (modrm
& 7) | REX_B(s
);
3356 op
= (modrm
>> 3) & 7;
3359 s
->rip_offset
= insn_const_size(ot
);
3361 gen_lea_modrm(env
, s
, modrm
);
3362 /* For those below that handle locked memory, don't load here. */
3363 if (!(s
->prefix
& PREFIX_LOCK
)
3365 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
3368 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
3373 val
= insn_get(env
, s
, ot
);
3374 tcg_gen_movi_tl(s
->T1
, val
);
3375 gen_op_testl_T0_T1_cc(s
);
3376 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
3379 if (s
->prefix
& PREFIX_LOCK
) {
3383 tcg_gen_movi_tl(s
->T0
, ~0);
3384 tcg_gen_atomic_xor_fetch_tl(s
->T0
, s
->A0
, s
->T0
,
3385 s
->mem_index
, ot
| MO_LE
);
3387 tcg_gen_not_tl(s
->T0
, s
->T0
);
3389 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
3391 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
3396 if (s
->prefix
& PREFIX_LOCK
) {
3398 TCGv a0
, t0
, t1
, t2
;
3405 label1
= gen_new_label();
3407 gen_set_label(label1
);
3408 t1
= tcg_temp_new();
3409 t2
= tcg_temp_new();
3410 tcg_gen_mov_tl(t2
, t0
);
3411 tcg_gen_neg_tl(t1
, t0
);
3412 tcg_gen_atomic_cmpxchg_tl(t0
, a0
, t0
, t1
,
3413 s
->mem_index
, ot
| MO_LE
);
3414 tcg_gen_brcond_tl(TCG_COND_NE
, t0
, t2
, label1
);
3416 tcg_gen_neg_tl(s
->T0
, t0
);
3418 tcg_gen_neg_tl(s
->T0
, s
->T0
);
3420 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
3422 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
3425 gen_op_update_neg_cc(s
);
3426 set_cc_op(s
, CC_OP_SUBB
+ ot
);
3431 gen_op_mov_v_reg(s
, MO_8
, s
->T1
, R_EAX
);
3432 tcg_gen_ext8u_tl(s
->T0
, s
->T0
);
3433 tcg_gen_ext8u_tl(s
->T1
, s
->T1
);
3434 /* XXX: use 32 bit mul which could be faster */
3435 tcg_gen_mul_tl(s
->T0
, s
->T0
, s
->T1
);
3436 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
3437 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
3438 tcg_gen_andi_tl(cpu_cc_src
, s
->T0
, 0xff00);
3439 set_cc_op(s
, CC_OP_MULB
);
3442 gen_op_mov_v_reg(s
, MO_16
, s
->T1
, R_EAX
);
3443 tcg_gen_ext16u_tl(s
->T0
, s
->T0
);
3444 tcg_gen_ext16u_tl(s
->T1
, s
->T1
);
3445 /* XXX: use 32 bit mul which could be faster */
3446 tcg_gen_mul_tl(s
->T0
, s
->T0
, s
->T1
);
3447 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
3448 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
3449 tcg_gen_shri_tl(s
->T0
, s
->T0
, 16);
3450 gen_op_mov_reg_v(s
, MO_16
, R_EDX
, s
->T0
);
3451 tcg_gen_mov_tl(cpu_cc_src
, s
->T0
);
3452 set_cc_op(s
, CC_OP_MULW
);
3456 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
3457 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, cpu_regs
[R_EAX
]);
3458 tcg_gen_mulu2_i32(s
->tmp2_i32
, s
->tmp3_i32
,
3459 s
->tmp2_i32
, s
->tmp3_i32
);
3460 tcg_gen_extu_i32_tl(cpu_regs
[R_EAX
], s
->tmp2_i32
);
3461 tcg_gen_extu_i32_tl(cpu_regs
[R_EDX
], s
->tmp3_i32
);
3462 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
3463 tcg_gen_mov_tl(cpu_cc_src
, cpu_regs
[R_EDX
]);
3464 set_cc_op(s
, CC_OP_MULL
);
3466 #ifdef TARGET_X86_64
3468 tcg_gen_mulu2_i64(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
],
3469 s
->T0
, cpu_regs
[R_EAX
]);
3470 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
3471 tcg_gen_mov_tl(cpu_cc_src
, cpu_regs
[R_EDX
]);
3472 set_cc_op(s
, CC_OP_MULQ
);
3480 gen_op_mov_v_reg(s
, MO_8
, s
->T1
, R_EAX
);
3481 tcg_gen_ext8s_tl(s
->T0
, s
->T0
);
3482 tcg_gen_ext8s_tl(s
->T1
, s
->T1
);
3483 /* XXX: use 32 bit mul which could be faster */
3484 tcg_gen_mul_tl(s
->T0
, s
->T0
, s
->T1
);
3485 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
3486 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
3487 tcg_gen_ext8s_tl(s
->tmp0
, s
->T0
);
3488 tcg_gen_sub_tl(cpu_cc_src
, s
->T0
, s
->tmp0
);
3489 set_cc_op(s
, CC_OP_MULB
);
3492 gen_op_mov_v_reg(s
, MO_16
, s
->T1
, R_EAX
);
3493 tcg_gen_ext16s_tl(s
->T0
, s
->T0
);
3494 tcg_gen_ext16s_tl(s
->T1
, s
->T1
);
3495 /* XXX: use 32 bit mul which could be faster */
3496 tcg_gen_mul_tl(s
->T0
, s
->T0
, s
->T1
);
3497 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
3498 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
3499 tcg_gen_ext16s_tl(s
->tmp0
, s
->T0
);
3500 tcg_gen_sub_tl(cpu_cc_src
, s
->T0
, s
->tmp0
);
3501 tcg_gen_shri_tl(s
->T0
, s
->T0
, 16);
3502 gen_op_mov_reg_v(s
, MO_16
, R_EDX
, s
->T0
);
3503 set_cc_op(s
, CC_OP_MULW
);
3507 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
3508 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, cpu_regs
[R_EAX
]);
3509 tcg_gen_muls2_i32(s
->tmp2_i32
, s
->tmp3_i32
,
3510 s
->tmp2_i32
, s
->tmp3_i32
);
3511 tcg_gen_extu_i32_tl(cpu_regs
[R_EAX
], s
->tmp2_i32
);
3512 tcg_gen_extu_i32_tl(cpu_regs
[R_EDX
], s
->tmp3_i32
);
3513 tcg_gen_sari_i32(s
->tmp2_i32
, s
->tmp2_i32
, 31);
3514 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
3515 tcg_gen_sub_i32(s
->tmp2_i32
, s
->tmp2_i32
, s
->tmp3_i32
);
3516 tcg_gen_extu_i32_tl(cpu_cc_src
, s
->tmp2_i32
);
3517 set_cc_op(s
, CC_OP_MULL
);
3519 #ifdef TARGET_X86_64
3521 tcg_gen_muls2_i64(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
],
3522 s
->T0
, cpu_regs
[R_EAX
]);
3523 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
3524 tcg_gen_sari_tl(cpu_cc_src
, cpu_regs
[R_EAX
], 63);
3525 tcg_gen_sub_tl(cpu_cc_src
, cpu_cc_src
, cpu_regs
[R_EDX
]);
3526 set_cc_op(s
, CC_OP_MULQ
);
3534 gen_helper_divb_AL(cpu_env
, s
->T0
);
3537 gen_helper_divw_AX(cpu_env
, s
->T0
);
3541 gen_helper_divl_EAX(cpu_env
, s
->T0
);
3543 #ifdef TARGET_X86_64
3545 gen_helper_divq_EAX(cpu_env
, s
->T0
);
3553 gen_helper_idivb_AL(cpu_env
, s
->T0
);
3556 gen_helper_idivw_AX(cpu_env
, s
->T0
);
3560 gen_helper_idivl_EAX(cpu_env
, s
->T0
);
3562 #ifdef TARGET_X86_64
3564 gen_helper_idivq_EAX(cpu_env
, s
->T0
);
3574 case 0xfe: /* GRP4 */
3575 case 0xff: /* GRP5 */
3576 ot
= mo_b_d(b
, dflag
);
3578 modrm
= x86_ldub_code(env
, s
);
3579 mod
= (modrm
>> 6) & 3;
3580 rm
= (modrm
& 7) | REX_B(s
);
3581 op
= (modrm
>> 3) & 7;
3582 if (op
>= 2 && b
== 0xfe) {
3586 if (op
== 2 || op
== 4) {
3587 /* operand size for jumps is 64 bit */
3589 } else if (op
== 3 || op
== 5) {
3590 ot
= dflag
!= MO_16
? MO_32
+ REX_W(s
) : MO_16
;
3591 } else if (op
== 6) {
3592 /* default push size is 64 bit */
3593 ot
= mo_pushpop(s
, dflag
);
3597 gen_lea_modrm(env
, s
, modrm
);
3598 if (op
>= 2 && op
!= 3 && op
!= 5)
3599 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
3601 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
3605 case 0: /* inc Ev */
3610 gen_inc(s
, ot
, opreg
, 1);
3612 case 1: /* dec Ev */
3617 gen_inc(s
, ot
, opreg
, -1);
3619 case 2: /* call Ev */
3620 /* XXX: optimize if memory (no 'and' is necessary) */
3621 if (dflag
== MO_16
) {
3622 tcg_gen_ext16u_tl(s
->T0
, s
->T0
);
3624 gen_push_v(s
, eip_next_tl(s
));
3625 gen_op_jmp_v(s
, s
->T0
);
3627 s
->base
.is_jmp
= DISAS_JUMP
;
3629 case 3: /* lcall Ev */
3633 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
3634 gen_add_A0_im(s
, 1 << ot
);
3635 gen_op_ld_v(s
, MO_16
, s
->T0
, s
->A0
);
3637 if (PE(s
) && !VM86(s
)) {
3638 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
3639 gen_helper_lcall_protected(cpu_env
, s
->tmp2_i32
, s
->T1
,
3640 tcg_constant_i32(dflag
- 1),
3643 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
3644 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T1
);
3645 gen_helper_lcall_real(cpu_env
, s
->tmp2_i32
, s
->tmp3_i32
,
3646 tcg_constant_i32(dflag
- 1),
3649 s
->base
.is_jmp
= DISAS_JUMP
;
3651 case 4: /* jmp Ev */
3652 if (dflag
== MO_16
) {
3653 tcg_gen_ext16u_tl(s
->T0
, s
->T0
);
3655 gen_op_jmp_v(s
, s
->T0
);
3657 s
->base
.is_jmp
= DISAS_JUMP
;
3659 case 5: /* ljmp Ev */
3663 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
3664 gen_add_A0_im(s
, 1 << ot
);
3665 gen_op_ld_v(s
, MO_16
, s
->T0
, s
->A0
);
3667 if (PE(s
) && !VM86(s
)) {
3668 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
3669 gen_helper_ljmp_protected(cpu_env
, s
->tmp2_i32
, s
->T1
,
3672 gen_op_movl_seg_T0_vm(s
, R_CS
);
3673 gen_op_jmp_v(s
, s
->T1
);
3675 s
->base
.is_jmp
= DISAS_JUMP
;
3677 case 6: /* push Ev */
3678 gen_push_v(s
, s
->T0
);
3685 case 0x84: /* test Ev, Gv */
3687 ot
= mo_b_d(b
, dflag
);
3689 modrm
= x86_ldub_code(env
, s
);
3690 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
3692 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3693 gen_op_mov_v_reg(s
, ot
, s
->T1
, reg
);
3694 gen_op_testl_T0_T1_cc(s
);
3695 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
3698 case 0xa8: /* test eAX, Iv */
3700 ot
= mo_b_d(b
, dflag
);
3701 val
= insn_get(env
, s
, ot
);
3703 gen_op_mov_v_reg(s
, ot
, s
->T0
, OR_EAX
);
3704 tcg_gen_movi_tl(s
->T1
, val
);
3705 gen_op_testl_T0_T1_cc(s
);
3706 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
3709 case 0x98: /* CWDE/CBW */
3711 #ifdef TARGET_X86_64
3713 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, R_EAX
);
3714 tcg_gen_ext32s_tl(s
->T0
, s
->T0
);
3715 gen_op_mov_reg_v(s
, MO_64
, R_EAX
, s
->T0
);
3719 gen_op_mov_v_reg(s
, MO_16
, s
->T0
, R_EAX
);
3720 tcg_gen_ext16s_tl(s
->T0
, s
->T0
);
3721 gen_op_mov_reg_v(s
, MO_32
, R_EAX
, s
->T0
);
3724 gen_op_mov_v_reg(s
, MO_8
, s
->T0
, R_EAX
);
3725 tcg_gen_ext8s_tl(s
->T0
, s
->T0
);
3726 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
3729 g_assert_not_reached();
3732 case 0x99: /* CDQ/CWD */
3734 #ifdef TARGET_X86_64
3736 gen_op_mov_v_reg(s
, MO_64
, s
->T0
, R_EAX
);
3737 tcg_gen_sari_tl(s
->T0
, s
->T0
, 63);
3738 gen_op_mov_reg_v(s
, MO_64
, R_EDX
, s
->T0
);
3742 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, R_EAX
);
3743 tcg_gen_ext32s_tl(s
->T0
, s
->T0
);
3744 tcg_gen_sari_tl(s
->T0
, s
->T0
, 31);
3745 gen_op_mov_reg_v(s
, MO_32
, R_EDX
, s
->T0
);
3748 gen_op_mov_v_reg(s
, MO_16
, s
->T0
, R_EAX
);
3749 tcg_gen_ext16s_tl(s
->T0
, s
->T0
);
3750 tcg_gen_sari_tl(s
->T0
, s
->T0
, 15);
3751 gen_op_mov_reg_v(s
, MO_16
, R_EDX
, s
->T0
);
3754 g_assert_not_reached();
3757 case 0x1af: /* imul Gv, Ev */
3758 case 0x69: /* imul Gv, Ev, I */
3761 modrm
= x86_ldub_code(env
, s
);
3762 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
3764 s
->rip_offset
= insn_const_size(ot
);
3767 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3769 val
= insn_get(env
, s
, ot
);
3770 tcg_gen_movi_tl(s
->T1
, val
);
3771 } else if (b
== 0x6b) {
3772 val
= (int8_t)insn_get(env
, s
, MO_8
);
3773 tcg_gen_movi_tl(s
->T1
, val
);
3775 gen_op_mov_v_reg(s
, ot
, s
->T1
, reg
);
3778 #ifdef TARGET_X86_64
3780 tcg_gen_muls2_i64(cpu_regs
[reg
], s
->T1
, s
->T0
, s
->T1
);
3781 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[reg
]);
3782 tcg_gen_sari_tl(cpu_cc_src
, cpu_cc_dst
, 63);
3783 tcg_gen_sub_tl(cpu_cc_src
, cpu_cc_src
, s
->T1
);
3787 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
3788 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T1
);
3789 tcg_gen_muls2_i32(s
->tmp2_i32
, s
->tmp3_i32
,
3790 s
->tmp2_i32
, s
->tmp3_i32
);
3791 tcg_gen_extu_i32_tl(cpu_regs
[reg
], s
->tmp2_i32
);
3792 tcg_gen_sari_i32(s
->tmp2_i32
, s
->tmp2_i32
, 31);
3793 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[reg
]);
3794 tcg_gen_sub_i32(s
->tmp2_i32
, s
->tmp2_i32
, s
->tmp3_i32
);
3795 tcg_gen_extu_i32_tl(cpu_cc_src
, s
->tmp2_i32
);
3798 tcg_gen_ext16s_tl(s
->T0
, s
->T0
);
3799 tcg_gen_ext16s_tl(s
->T1
, s
->T1
);
3800 /* XXX: use 32 bit mul which could be faster */
3801 tcg_gen_mul_tl(s
->T0
, s
->T0
, s
->T1
);
3802 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
3803 tcg_gen_ext16s_tl(s
->tmp0
, s
->T0
);
3804 tcg_gen_sub_tl(cpu_cc_src
, s
->T0
, s
->tmp0
);
3805 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
3808 set_cc_op(s
, CC_OP_MULB
+ ot
);
3811 case 0x1c1: /* xadd Ev, Gv */
3812 ot
= mo_b_d(b
, dflag
);
3813 modrm
= x86_ldub_code(env
, s
);
3814 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
3815 mod
= (modrm
>> 6) & 3;
3816 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
3818 rm
= (modrm
& 7) | REX_B(s
);
3819 gen_op_mov_v_reg(s
, ot
, s
->T1
, rm
);
3820 tcg_gen_add_tl(s
->T0
, s
->T0
, s
->T1
);
3821 gen_op_mov_reg_v(s
, ot
, reg
, s
->T1
);
3822 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
3824 gen_lea_modrm(env
, s
, modrm
);
3825 if (s
->prefix
& PREFIX_LOCK
) {
3826 tcg_gen_atomic_fetch_add_tl(s
->T1
, s
->A0
, s
->T0
,
3827 s
->mem_index
, ot
| MO_LE
);
3828 tcg_gen_add_tl(s
->T0
, s
->T0
, s
->T1
);
3830 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
3831 tcg_gen_add_tl(s
->T0
, s
->T0
, s
->T1
);
3832 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
3834 gen_op_mov_reg_v(s
, ot
, reg
, s
->T1
);
3836 gen_op_update2_cc(s
);
3837 set_cc_op(s
, CC_OP_ADDB
+ ot
);
3840 case 0x1b1: /* cmpxchg Ev, Gv */
3842 TCGv oldv
, newv
, cmpv
, dest
;
3844 ot
= mo_b_d(b
, dflag
);
3845 modrm
= x86_ldub_code(env
, s
);
3846 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
3847 mod
= (modrm
>> 6) & 3;
3848 oldv
= tcg_temp_new();
3849 newv
= tcg_temp_new();
3850 cmpv
= tcg_temp_new();
3851 gen_op_mov_v_reg(s
, ot
, newv
, reg
);
3852 tcg_gen_mov_tl(cmpv
, cpu_regs
[R_EAX
]);
3854 if (s
->prefix
& PREFIX_LOCK
) {
3858 gen_lea_modrm(env
, s
, modrm
);
3859 tcg_gen_atomic_cmpxchg_tl(oldv
, s
->A0
, cmpv
, newv
,
3860 s
->mem_index
, ot
| MO_LE
);
3863 rm
= (modrm
& 7) | REX_B(s
);
3864 gen_op_mov_v_reg(s
, ot
, oldv
, rm
);
3868 * Unlike the memory case, where "the destination operand receives
3869 * a write cycle without regard to the result of the comparison",
3870 * rm must not be touched altogether if the write fails, including
3871 * not zero-extending it on 64-bit processors. So, precompute
3872 * the result of a successful writeback and perform the movcond
3873 * directly on cpu_regs. Also need to write accumulator first, in
3874 * case rm is part of RAX too.
3876 dest
= gen_op_deposit_reg_v(s
, ot
, rm
, newv
, newv
);
3877 tcg_gen_movcond_tl(TCG_COND_EQ
, dest
, oldv
, cmpv
, newv
, dest
);
3879 gen_lea_modrm(env
, s
, modrm
);
3880 gen_op_ld_v(s
, ot
, oldv
, s
->A0
);
3883 * Perform an unconditional store cycle like physical cpu;
3884 * must be before changing accumulator to ensure
3885 * idempotency if the store faults and the instruction
3888 tcg_gen_movcond_tl(TCG_COND_EQ
, newv
, oldv
, cmpv
, newv
, oldv
);
3889 gen_op_st_v(s
, ot
, newv
, s
->A0
);
3893 * Write EAX only if the cmpxchg fails; reuse newv as the destination,
3894 * since it's dead here.
3896 dest
= gen_op_deposit_reg_v(s
, ot
, R_EAX
, newv
, oldv
);
3897 tcg_gen_movcond_tl(TCG_COND_EQ
, dest
, oldv
, cmpv
, dest
, newv
);
3898 tcg_gen_mov_tl(cpu_cc_src
, oldv
);
3899 tcg_gen_mov_tl(s
->cc_srcT
, cmpv
);
3900 tcg_gen_sub_tl(cpu_cc_dst
, cmpv
, oldv
);
3901 set_cc_op(s
, CC_OP_SUBB
+ ot
);
3904 case 0x1c7: /* cmpxchg8b */
3905 modrm
= x86_ldub_code(env
, s
);
3906 mod
= (modrm
>> 6) & 3;
3907 switch ((modrm
>> 3) & 7) {
3908 case 1: /* CMPXCHG8, CMPXCHG16 */
3912 #ifdef TARGET_X86_64
3913 if (dflag
== MO_64
) {
3914 if (!(s
->cpuid_ext_features
& CPUID_EXT_CX16
)) {
3917 gen_cmpxchg16b(s
, env
, modrm
);
3921 if (!(s
->cpuid_features
& CPUID_CX8
)) {
3924 gen_cmpxchg8b(s
, env
, modrm
);
3927 case 7: /* RDSEED */
3928 case 6: /* RDRAND */
3930 (s
->prefix
& (PREFIX_LOCK
| PREFIX_REPZ
| PREFIX_REPNZ
)) ||
3931 !(s
->cpuid_ext_features
& CPUID_EXT_RDRAND
)) {
3934 translator_io_start(&s
->base
);
3935 gen_helper_rdrand(s
->T0
, cpu_env
);
3936 rm
= (modrm
& 7) | REX_B(s
);
3937 gen_op_mov_reg_v(s
, dflag
, rm
, s
->T0
);
3938 set_cc_op(s
, CC_OP_EFLAGS
);
3946 /**************************/
3948 case 0x50 ... 0x57: /* push */
3949 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, (b
& 7) | REX_B(s
));
3950 gen_push_v(s
, s
->T0
);
3952 case 0x58 ... 0x5f: /* pop */
3954 /* NOTE: order is important for pop %sp */
3955 gen_pop_update(s
, ot
);
3956 gen_op_mov_reg_v(s
, ot
, (b
& 7) | REX_B(s
), s
->T0
);
3958 case 0x60: /* pusha */
3963 case 0x61: /* popa */
3968 case 0x68: /* push Iv */
3970 ot
= mo_pushpop(s
, dflag
);
3972 val
= insn_get(env
, s
, ot
);
3974 val
= (int8_t)insn_get(env
, s
, MO_8
);
3975 tcg_gen_movi_tl(s
->T0
, val
);
3976 gen_push_v(s
, s
->T0
);
3978 case 0x8f: /* pop Ev */
3979 modrm
= x86_ldub_code(env
, s
);
3980 mod
= (modrm
>> 6) & 3;
3983 /* NOTE: order is important for pop %sp */
3984 gen_pop_update(s
, ot
);
3985 rm
= (modrm
& 7) | REX_B(s
);
3986 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
3988 /* NOTE: order is important too for MMU exceptions */
3989 s
->popl_esp_hack
= 1 << ot
;
3990 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
3991 s
->popl_esp_hack
= 0;
3992 gen_pop_update(s
, ot
);
3995 case 0xc8: /* enter */
3998 val
= x86_lduw_code(env
, s
);
3999 level
= x86_ldub_code(env
, s
);
4000 gen_enter(s
, val
, level
);
4003 case 0xc9: /* leave */
4006 case 0x06: /* push es */
4007 case 0x0e: /* push cs */
4008 case 0x16: /* push ss */
4009 case 0x1e: /* push ds */
4012 gen_op_movl_T0_seg(s
, b
>> 3);
4013 gen_push_v(s
, s
->T0
);
4015 case 0x1a0: /* push fs */
4016 case 0x1a8: /* push gs */
4017 gen_op_movl_T0_seg(s
, (b
>> 3) & 7);
4018 gen_push_v(s
, s
->T0
);
4020 case 0x07: /* pop es */
4021 case 0x17: /* pop ss */
4022 case 0x1f: /* pop ds */
4027 gen_movl_seg_T0(s
, reg
);
4028 gen_pop_update(s
, ot
);
4030 case 0x1a1: /* pop fs */
4031 case 0x1a9: /* pop gs */
4033 gen_movl_seg_T0(s
, (b
>> 3) & 7);
4034 gen_pop_update(s
, ot
);
4037 /**************************/
4040 case 0x89: /* mov Gv, Ev */
4041 ot
= mo_b_d(b
, dflag
);
4042 modrm
= x86_ldub_code(env
, s
);
4043 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4045 /* generate a generic store */
4046 gen_ldst_modrm(env
, s
, modrm
, ot
, reg
, 1);
4049 case 0xc7: /* mov Ev, Iv */
4050 ot
= mo_b_d(b
, dflag
);
4051 modrm
= x86_ldub_code(env
, s
);
4052 mod
= (modrm
>> 6) & 3;
4054 s
->rip_offset
= insn_const_size(ot
);
4055 gen_lea_modrm(env
, s
, modrm
);
4057 val
= insn_get(env
, s
, ot
);
4058 tcg_gen_movi_tl(s
->T0
, val
);
4060 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
4062 gen_op_mov_reg_v(s
, ot
, (modrm
& 7) | REX_B(s
), s
->T0
);
4066 case 0x8b: /* mov Ev, Gv */
4067 ot
= mo_b_d(b
, dflag
);
4068 modrm
= x86_ldub_code(env
, s
);
4069 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4071 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4072 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
4074 case 0x8e: /* mov seg, Gv */
4075 modrm
= x86_ldub_code(env
, s
);
4076 reg
= (modrm
>> 3) & 7;
4077 if (reg
>= 6 || reg
== R_CS
)
4079 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
4080 gen_movl_seg_T0(s
, reg
);
4082 case 0x8c: /* mov Gv, seg */
4083 modrm
= x86_ldub_code(env
, s
);
4084 reg
= (modrm
>> 3) & 7;
4085 mod
= (modrm
>> 6) & 3;
4088 gen_op_movl_T0_seg(s
, reg
);
4089 ot
= mod
== 3 ? dflag
: MO_16
;
4090 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
4093 case 0x1b6: /* movzbS Gv, Eb */
4094 case 0x1b7: /* movzwS Gv, Eb */
4095 case 0x1be: /* movsbS Gv, Eb */
4096 case 0x1bf: /* movswS Gv, Eb */
4101 /* d_ot is the size of destination */
4103 /* ot is the size of source */
4104 ot
= (b
& 1) + MO_8
;
4105 /* s_ot is the sign+size of source */
4106 s_ot
= b
& 8 ? MO_SIGN
| ot
: ot
;
4108 modrm
= x86_ldub_code(env
, s
);
4109 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4110 mod
= (modrm
>> 6) & 3;
4111 rm
= (modrm
& 7) | REX_B(s
);
4114 if (s_ot
== MO_SB
&& byte_reg_is_xH(s
, rm
)) {
4115 tcg_gen_sextract_tl(s
->T0
, cpu_regs
[rm
- 4], 8, 8);
4117 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
4120 tcg_gen_ext8u_tl(s
->T0
, s
->T0
);
4123 tcg_gen_ext8s_tl(s
->T0
, s
->T0
);
4126 tcg_gen_ext16u_tl(s
->T0
, s
->T0
);
4130 tcg_gen_ext16s_tl(s
->T0
, s
->T0
);
4134 gen_op_mov_reg_v(s
, d_ot
, reg
, s
->T0
);
4136 gen_lea_modrm(env
, s
, modrm
);
4137 gen_op_ld_v(s
, s_ot
, s
->T0
, s
->A0
);
4138 gen_op_mov_reg_v(s
, d_ot
, reg
, s
->T0
);
4143 case 0x8d: /* lea */
4144 modrm
= x86_ldub_code(env
, s
);
4145 mod
= (modrm
>> 6) & 3;
4148 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4150 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
4151 TCGv ea
= gen_lea_modrm_1(s
, a
, false);
4152 gen_lea_v_seg(s
, s
->aflag
, ea
, -1, -1);
4153 gen_op_mov_reg_v(s
, dflag
, reg
, s
->A0
);
4157 case 0xa0: /* mov EAX, Ov */
4159 case 0xa2: /* mov Ov, EAX */
4162 target_ulong offset_addr
;
4164 ot
= mo_b_d(b
, dflag
);
4165 offset_addr
= insn_get_addr(env
, s
, s
->aflag
);
4166 tcg_gen_movi_tl(s
->A0
, offset_addr
);
4167 gen_add_A0_ds_seg(s
);
4169 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
4170 gen_op_mov_reg_v(s
, ot
, R_EAX
, s
->T0
);
4172 gen_op_mov_v_reg(s
, ot
, s
->T0
, R_EAX
);
4173 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
4177 case 0xd7: /* xlat */
4178 tcg_gen_mov_tl(s
->A0
, cpu_regs
[R_EBX
]);
4179 tcg_gen_ext8u_tl(s
->T0
, cpu_regs
[R_EAX
]);
4180 tcg_gen_add_tl(s
->A0
, s
->A0
, s
->T0
);
4181 gen_extu(s
->aflag
, s
->A0
);
4182 gen_add_A0_ds_seg(s
);
4183 gen_op_ld_v(s
, MO_8
, s
->T0
, s
->A0
);
4184 gen_op_mov_reg_v(s
, MO_8
, R_EAX
, s
->T0
);
4186 case 0xb0 ... 0xb7: /* mov R, Ib */
4187 val
= insn_get(env
, s
, MO_8
);
4188 tcg_gen_movi_tl(s
->T0
, val
);
4189 gen_op_mov_reg_v(s
, MO_8
, (b
& 7) | REX_B(s
), s
->T0
);
4191 case 0xb8 ... 0xbf: /* mov R, Iv */
4192 #ifdef TARGET_X86_64
4193 if (dflag
== MO_64
) {
4196 tmp
= x86_ldq_code(env
, s
);
4197 reg
= (b
& 7) | REX_B(s
);
4198 tcg_gen_movi_tl(s
->T0
, tmp
);
4199 gen_op_mov_reg_v(s
, MO_64
, reg
, s
->T0
);
4204 val
= insn_get(env
, s
, ot
);
4205 reg
= (b
& 7) | REX_B(s
);
4206 tcg_gen_movi_tl(s
->T0
, val
);
4207 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
4211 case 0x91 ... 0x97: /* xchg R, EAX */
4214 reg
= (b
& 7) | REX_B(s
);
4218 case 0x87: /* xchg Ev, Gv */
4219 ot
= mo_b_d(b
, dflag
);
4220 modrm
= x86_ldub_code(env
, s
);
4221 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4222 mod
= (modrm
>> 6) & 3;
4224 rm
= (modrm
& 7) | REX_B(s
);
4226 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
4227 gen_op_mov_v_reg(s
, ot
, s
->T1
, rm
);
4228 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
4229 gen_op_mov_reg_v(s
, ot
, reg
, s
->T1
);
4231 gen_lea_modrm(env
, s
, modrm
);
4232 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
4233 /* for xchg, lock is implicit */
4234 tcg_gen_atomic_xchg_tl(s
->T1
, s
->A0
, s
->T0
,
4235 s
->mem_index
, ot
| MO_LE
);
4236 gen_op_mov_reg_v(s
, ot
, reg
, s
->T1
);
4239 case 0xc4: /* les Gv */
4240 /* In CODE64 this is VEX3; see above. */
4243 case 0xc5: /* lds Gv */
4244 /* In CODE64 this is VEX2; see above. */
4247 case 0x1b2: /* lss Gv */
4250 case 0x1b4: /* lfs Gv */
4253 case 0x1b5: /* lgs Gv */
4256 ot
= dflag
!= MO_16
? MO_32
: MO_16
;
4257 modrm
= x86_ldub_code(env
, s
);
4258 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4259 mod
= (modrm
>> 6) & 3;
4262 gen_lea_modrm(env
, s
, modrm
);
4263 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
4264 gen_add_A0_im(s
, 1 << ot
);
4265 /* load the segment first to handle exceptions properly */
4266 gen_op_ld_v(s
, MO_16
, s
->T0
, s
->A0
);
4267 gen_movl_seg_T0(s
, op
);
4268 /* then put the data */
4269 gen_op_mov_reg_v(s
, ot
, reg
, s
->T1
);
4272 /************************/
4280 ot
= mo_b_d(b
, dflag
);
4281 modrm
= x86_ldub_code(env
, s
);
4282 mod
= (modrm
>> 6) & 3;
4283 op
= (modrm
>> 3) & 7;
4289 gen_lea_modrm(env
, s
, modrm
);
4292 opreg
= (modrm
& 7) | REX_B(s
);
4297 gen_shift(s
, op
, ot
, opreg
, OR_ECX
);
4300 shift
= x86_ldub_code(env
, s
);
4302 gen_shifti(s
, op
, ot
, opreg
, shift
);
4317 case 0x1a4: /* shld imm */
4321 case 0x1a5: /* shld cl */
4325 case 0x1ac: /* shrd imm */
4329 case 0x1ad: /* shrd cl */
4334 modrm
= x86_ldub_code(env
, s
);
4335 mod
= (modrm
>> 6) & 3;
4336 rm
= (modrm
& 7) | REX_B(s
);
4337 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4339 gen_lea_modrm(env
, s
, modrm
);
4344 gen_op_mov_v_reg(s
, ot
, s
->T1
, reg
);
4347 TCGv imm
= tcg_constant_tl(x86_ldub_code(env
, s
));
4348 gen_shiftd_rm_T1(s
, ot
, opreg
, op
, imm
);
4350 gen_shiftd_rm_T1(s
, ot
, opreg
, op
, cpu_regs
[R_ECX
]);
4354 /************************/
4358 bool update_fip
= true;
4360 if (s
->flags
& (HF_EM_MASK
| HF_TS_MASK
)) {
4361 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
4362 /* XXX: what to do if illegal op ? */
4363 gen_exception(s
, EXCP07_PREX
);
4366 modrm
= x86_ldub_code(env
, s
);
4367 mod
= (modrm
>> 6) & 3;
4369 op
= ((b
& 7) << 3) | ((modrm
>> 3) & 7);
4372 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
4373 TCGv ea
= gen_lea_modrm_1(s
, a
, false);
4374 TCGv last_addr
= tcg_temp_new();
4375 bool update_fdp
= true;
4377 tcg_gen_mov_tl(last_addr
, ea
);
4378 gen_lea_v_seg(s
, s
->aflag
, ea
, a
.def_seg
, s
->override
);
4381 case 0x00 ... 0x07: /* fxxxs */
4382 case 0x10 ... 0x17: /* fixxxl */
4383 case 0x20 ... 0x27: /* fxxxl */
4384 case 0x30 ... 0x37: /* fixxx */
4391 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
4392 s
->mem_index
, MO_LEUL
);
4393 gen_helper_flds_FT0(cpu_env
, s
->tmp2_i32
);
4396 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
4397 s
->mem_index
, MO_LEUL
);
4398 gen_helper_fildl_FT0(cpu_env
, s
->tmp2_i32
);
4401 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
,
4402 s
->mem_index
, MO_LEUQ
);
4403 gen_helper_fldl_FT0(cpu_env
, s
->tmp1_i64
);
4407 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
4408 s
->mem_index
, MO_LESW
);
4409 gen_helper_fildl_FT0(cpu_env
, s
->tmp2_i32
);
4413 gen_helper_fp_arith_ST0_FT0(op1
);
4415 /* fcomp needs pop */
4416 gen_helper_fpop(cpu_env
);
4420 case 0x08: /* flds */
4421 case 0x0a: /* fsts */
4422 case 0x0b: /* fstps */
4423 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
4424 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
4425 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
4430 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
4431 s
->mem_index
, MO_LEUL
);
4432 gen_helper_flds_ST0(cpu_env
, s
->tmp2_i32
);
4435 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
4436 s
->mem_index
, MO_LEUL
);
4437 gen_helper_fildl_ST0(cpu_env
, s
->tmp2_i32
);
4440 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
,
4441 s
->mem_index
, MO_LEUQ
);
4442 gen_helper_fldl_ST0(cpu_env
, s
->tmp1_i64
);
4446 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
4447 s
->mem_index
, MO_LESW
);
4448 gen_helper_fildl_ST0(cpu_env
, s
->tmp2_i32
);
4453 /* XXX: the corresponding CPUID bit must be tested ! */
4456 gen_helper_fisttl_ST0(s
->tmp2_i32
, cpu_env
);
4457 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
4458 s
->mem_index
, MO_LEUL
);
4461 gen_helper_fisttll_ST0(s
->tmp1_i64
, cpu_env
);
4462 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
,
4463 s
->mem_index
, MO_LEUQ
);
4467 gen_helper_fistt_ST0(s
->tmp2_i32
, cpu_env
);
4468 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
4469 s
->mem_index
, MO_LEUW
);
4472 gen_helper_fpop(cpu_env
);
4477 gen_helper_fsts_ST0(s
->tmp2_i32
, cpu_env
);
4478 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
4479 s
->mem_index
, MO_LEUL
);
4482 gen_helper_fistl_ST0(s
->tmp2_i32
, cpu_env
);
4483 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
4484 s
->mem_index
, MO_LEUL
);
4487 gen_helper_fstl_ST0(s
->tmp1_i64
, cpu_env
);
4488 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
,
4489 s
->mem_index
, MO_LEUQ
);
4493 gen_helper_fist_ST0(s
->tmp2_i32
, cpu_env
);
4494 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
4495 s
->mem_index
, MO_LEUW
);
4498 if ((op
& 7) == 3) {
4499 gen_helper_fpop(cpu_env
);
4504 case 0x0c: /* fldenv mem */
4505 gen_helper_fldenv(cpu_env
, s
->A0
,
4506 tcg_constant_i32(dflag
- 1));
4507 update_fip
= update_fdp
= false;
4509 case 0x0d: /* fldcw mem */
4510 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
4511 s
->mem_index
, MO_LEUW
);
4512 gen_helper_fldcw(cpu_env
, s
->tmp2_i32
);
4513 update_fip
= update_fdp
= false;
4515 case 0x0e: /* fnstenv mem */
4516 gen_helper_fstenv(cpu_env
, s
->A0
,
4517 tcg_constant_i32(dflag
- 1));
4518 update_fip
= update_fdp
= false;
4520 case 0x0f: /* fnstcw mem */
4521 gen_helper_fnstcw(s
->tmp2_i32
, cpu_env
);
4522 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
4523 s
->mem_index
, MO_LEUW
);
4524 update_fip
= update_fdp
= false;
4526 case 0x1d: /* fldt mem */
4527 gen_helper_fldt_ST0(cpu_env
, s
->A0
);
4529 case 0x1f: /* fstpt mem */
4530 gen_helper_fstt_ST0(cpu_env
, s
->A0
);
4531 gen_helper_fpop(cpu_env
);
4533 case 0x2c: /* frstor mem */
4534 gen_helper_frstor(cpu_env
, s
->A0
,
4535 tcg_constant_i32(dflag
- 1));
4536 update_fip
= update_fdp
= false;
4538 case 0x2e: /* fnsave mem */
4539 gen_helper_fsave(cpu_env
, s
->A0
,
4540 tcg_constant_i32(dflag
- 1));
4541 update_fip
= update_fdp
= false;
4543 case 0x2f: /* fnstsw mem */
4544 gen_helper_fnstsw(s
->tmp2_i32
, cpu_env
);
4545 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
4546 s
->mem_index
, MO_LEUW
);
4547 update_fip
= update_fdp
= false;
4549 case 0x3c: /* fbld */
4550 gen_helper_fbld_ST0(cpu_env
, s
->A0
);
4552 case 0x3e: /* fbstp */
4553 gen_helper_fbst_ST0(cpu_env
, s
->A0
);
4554 gen_helper_fpop(cpu_env
);
4556 case 0x3d: /* fildll */
4557 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
,
4558 s
->mem_index
, MO_LEUQ
);
4559 gen_helper_fildll_ST0(cpu_env
, s
->tmp1_i64
);
4561 case 0x3f: /* fistpll */
4562 gen_helper_fistll_ST0(s
->tmp1_i64
, cpu_env
);
4563 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
,
4564 s
->mem_index
, MO_LEUQ
);
4565 gen_helper_fpop(cpu_env
);
4572 int last_seg
= s
->override
>= 0 ? s
->override
: a
.def_seg
;
4574 tcg_gen_ld_i32(s
->tmp2_i32
, cpu_env
,
4575 offsetof(CPUX86State
,
4576 segs
[last_seg
].selector
));
4577 tcg_gen_st16_i32(s
->tmp2_i32
, cpu_env
,
4578 offsetof(CPUX86State
, fpds
));
4579 tcg_gen_st_tl(last_addr
, cpu_env
,
4580 offsetof(CPUX86State
, fpdp
));
4583 /* register float ops */
4587 case 0x08: /* fld sti */
4588 gen_helper_fpush(cpu_env
);
4589 gen_helper_fmov_ST0_STN(cpu_env
,
4590 tcg_constant_i32((opreg
+ 1) & 7));
4592 case 0x09: /* fxchg sti */
4593 case 0x29: /* fxchg4 sti, undocumented op */
4594 case 0x39: /* fxchg7 sti, undocumented op */
4595 gen_helper_fxchg_ST0_STN(cpu_env
, tcg_constant_i32(opreg
));
4597 case 0x0a: /* grp d9/2 */
4600 /* check exceptions (FreeBSD FPU probe) */
4601 gen_helper_fwait(cpu_env
);
4608 case 0x0c: /* grp d9/4 */
4611 gen_helper_fchs_ST0(cpu_env
);
4614 gen_helper_fabs_ST0(cpu_env
);
4617 gen_helper_fldz_FT0(cpu_env
);
4618 gen_helper_fcom_ST0_FT0(cpu_env
);
4621 gen_helper_fxam_ST0(cpu_env
);
4627 case 0x0d: /* grp d9/5 */
4631 gen_helper_fpush(cpu_env
);
4632 gen_helper_fld1_ST0(cpu_env
);
4635 gen_helper_fpush(cpu_env
);
4636 gen_helper_fldl2t_ST0(cpu_env
);
4639 gen_helper_fpush(cpu_env
);
4640 gen_helper_fldl2e_ST0(cpu_env
);
4643 gen_helper_fpush(cpu_env
);
4644 gen_helper_fldpi_ST0(cpu_env
);
4647 gen_helper_fpush(cpu_env
);
4648 gen_helper_fldlg2_ST0(cpu_env
);
4651 gen_helper_fpush(cpu_env
);
4652 gen_helper_fldln2_ST0(cpu_env
);
4655 gen_helper_fpush(cpu_env
);
4656 gen_helper_fldz_ST0(cpu_env
);
4663 case 0x0e: /* grp d9/6 */
4666 gen_helper_f2xm1(cpu_env
);
4669 gen_helper_fyl2x(cpu_env
);
4672 gen_helper_fptan(cpu_env
);
4674 case 3: /* fpatan */
4675 gen_helper_fpatan(cpu_env
);
4677 case 4: /* fxtract */
4678 gen_helper_fxtract(cpu_env
);
4680 case 5: /* fprem1 */
4681 gen_helper_fprem1(cpu_env
);
4683 case 6: /* fdecstp */
4684 gen_helper_fdecstp(cpu_env
);
4687 case 7: /* fincstp */
4688 gen_helper_fincstp(cpu_env
);
4692 case 0x0f: /* grp d9/7 */
4695 gen_helper_fprem(cpu_env
);
4697 case 1: /* fyl2xp1 */
4698 gen_helper_fyl2xp1(cpu_env
);
4701 gen_helper_fsqrt(cpu_env
);
4703 case 3: /* fsincos */
4704 gen_helper_fsincos(cpu_env
);
4706 case 5: /* fscale */
4707 gen_helper_fscale(cpu_env
);
4709 case 4: /* frndint */
4710 gen_helper_frndint(cpu_env
);
4713 gen_helper_fsin(cpu_env
);
4717 gen_helper_fcos(cpu_env
);
4721 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
4722 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
4723 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
4729 gen_helper_fp_arith_STN_ST0(op1
, opreg
);
4731 gen_helper_fpop(cpu_env
);
4734 gen_helper_fmov_FT0_STN(cpu_env
,
4735 tcg_constant_i32(opreg
));
4736 gen_helper_fp_arith_ST0_FT0(op1
);
4740 case 0x02: /* fcom */
4741 case 0x22: /* fcom2, undocumented op */
4742 gen_helper_fmov_FT0_STN(cpu_env
, tcg_constant_i32(opreg
));
4743 gen_helper_fcom_ST0_FT0(cpu_env
);
4745 case 0x03: /* fcomp */
4746 case 0x23: /* fcomp3, undocumented op */
4747 case 0x32: /* fcomp5, undocumented op */
4748 gen_helper_fmov_FT0_STN(cpu_env
, tcg_constant_i32(opreg
));
4749 gen_helper_fcom_ST0_FT0(cpu_env
);
4750 gen_helper_fpop(cpu_env
);
4752 case 0x15: /* da/5 */
4754 case 1: /* fucompp */
4755 gen_helper_fmov_FT0_STN(cpu_env
, tcg_constant_i32(1));
4756 gen_helper_fucom_ST0_FT0(cpu_env
);
4757 gen_helper_fpop(cpu_env
);
4758 gen_helper_fpop(cpu_env
);
4766 case 0: /* feni (287 only, just do nop here) */
4768 case 1: /* fdisi (287 only, just do nop here) */
4771 gen_helper_fclex(cpu_env
);
4774 case 3: /* fninit */
4775 gen_helper_fninit(cpu_env
);
4778 case 4: /* fsetpm (287 only, just do nop here) */
4784 case 0x1d: /* fucomi */
4785 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
4788 gen_update_cc_op(s
);
4789 gen_helper_fmov_FT0_STN(cpu_env
, tcg_constant_i32(opreg
));
4790 gen_helper_fucomi_ST0_FT0(cpu_env
);
4791 set_cc_op(s
, CC_OP_EFLAGS
);
4793 case 0x1e: /* fcomi */
4794 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
4797 gen_update_cc_op(s
);
4798 gen_helper_fmov_FT0_STN(cpu_env
, tcg_constant_i32(opreg
));
4799 gen_helper_fcomi_ST0_FT0(cpu_env
);
4800 set_cc_op(s
, CC_OP_EFLAGS
);
4802 case 0x28: /* ffree sti */
4803 gen_helper_ffree_STN(cpu_env
, tcg_constant_i32(opreg
));
4805 case 0x2a: /* fst sti */
4806 gen_helper_fmov_STN_ST0(cpu_env
, tcg_constant_i32(opreg
));
4808 case 0x2b: /* fstp sti */
4809 case 0x0b: /* fstp1 sti, undocumented op */
4810 case 0x3a: /* fstp8 sti, undocumented op */
4811 case 0x3b: /* fstp9 sti, undocumented op */
4812 gen_helper_fmov_STN_ST0(cpu_env
, tcg_constant_i32(opreg
));
4813 gen_helper_fpop(cpu_env
);
4815 case 0x2c: /* fucom st(i) */
4816 gen_helper_fmov_FT0_STN(cpu_env
, tcg_constant_i32(opreg
));
4817 gen_helper_fucom_ST0_FT0(cpu_env
);
4819 case 0x2d: /* fucomp st(i) */
4820 gen_helper_fmov_FT0_STN(cpu_env
, tcg_constant_i32(opreg
));
4821 gen_helper_fucom_ST0_FT0(cpu_env
);
4822 gen_helper_fpop(cpu_env
);
4824 case 0x33: /* de/3 */
4826 case 1: /* fcompp */
4827 gen_helper_fmov_FT0_STN(cpu_env
, tcg_constant_i32(1));
4828 gen_helper_fcom_ST0_FT0(cpu_env
);
4829 gen_helper_fpop(cpu_env
);
4830 gen_helper_fpop(cpu_env
);
4836 case 0x38: /* ffreep sti, undocumented op */
4837 gen_helper_ffree_STN(cpu_env
, tcg_constant_i32(opreg
));
4838 gen_helper_fpop(cpu_env
);
4840 case 0x3c: /* df/4 */
4843 gen_helper_fnstsw(s
->tmp2_i32
, cpu_env
);
4844 tcg_gen_extu_i32_tl(s
->T0
, s
->tmp2_i32
);
4845 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
4851 case 0x3d: /* fucomip */
4852 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
4855 gen_update_cc_op(s
);
4856 gen_helper_fmov_FT0_STN(cpu_env
, tcg_constant_i32(opreg
));
4857 gen_helper_fucomi_ST0_FT0(cpu_env
);
4858 gen_helper_fpop(cpu_env
);
4859 set_cc_op(s
, CC_OP_EFLAGS
);
4861 case 0x3e: /* fcomip */
4862 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
4865 gen_update_cc_op(s
);
4866 gen_helper_fmov_FT0_STN(cpu_env
, tcg_constant_i32(opreg
));
4867 gen_helper_fcomi_ST0_FT0(cpu_env
);
4868 gen_helper_fpop(cpu_env
);
4869 set_cc_op(s
, CC_OP_EFLAGS
);
4871 case 0x10 ... 0x13: /* fcmovxx */
4876 static const uint8_t fcmov_cc
[8] = {
4883 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
4886 op1
= fcmov_cc
[op
& 3] | (((op
>> 3) & 1) ^ 1);
4887 l1
= gen_new_label();
4888 gen_jcc1_noeob(s
, op1
, l1
);
4889 gen_helper_fmov_ST0_STN(cpu_env
,
4890 tcg_constant_i32(opreg
));
4900 tcg_gen_ld_i32(s
->tmp2_i32
, cpu_env
,
4901 offsetof(CPUX86State
, segs
[R_CS
].selector
));
4902 tcg_gen_st16_i32(s
->tmp2_i32
, cpu_env
,
4903 offsetof(CPUX86State
, fpcs
));
4904 tcg_gen_st_tl(eip_cur_tl(s
),
4905 cpu_env
, offsetof(CPUX86State
, fpip
));
4909 /************************/
4912 case 0xa4: /* movsS */
4914 ot
= mo_b_d(b
, dflag
);
4915 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
4916 gen_repz_movs(s
, ot
);
4922 case 0xaa: /* stosS */
4924 ot
= mo_b_d(b
, dflag
);
4925 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
4926 gen_repz_stos(s
, ot
);
4931 case 0xac: /* lodsS */
4933 ot
= mo_b_d(b
, dflag
);
4934 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
4935 gen_repz_lods(s
, ot
);
4940 case 0xae: /* scasS */
4942 ot
= mo_b_d(b
, dflag
);
4943 if (prefixes
& PREFIX_REPNZ
) {
4944 gen_repz_scas(s
, ot
, 1);
4945 } else if (prefixes
& PREFIX_REPZ
) {
4946 gen_repz_scas(s
, ot
, 0);
4952 case 0xa6: /* cmpsS */
4954 ot
= mo_b_d(b
, dflag
);
4955 if (prefixes
& PREFIX_REPNZ
) {
4956 gen_repz_cmps(s
, ot
, 1);
4957 } else if (prefixes
& PREFIX_REPZ
) {
4958 gen_repz_cmps(s
, ot
, 0);
4963 case 0x6c: /* insS */
4965 ot
= mo_b_d32(b
, dflag
);
4966 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_EDX
]);
4967 tcg_gen_ext16u_i32(s
->tmp2_i32
, s
->tmp2_i32
);
4968 if (!gen_check_io(s
, ot
, s
->tmp2_i32
,
4969 SVM_IOIO_TYPE_MASK
| SVM_IOIO_STR_MASK
)) {
4972 translator_io_start(&s
->base
);
4973 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
4974 gen_repz_ins(s
, ot
);
4979 case 0x6e: /* outsS */
4981 ot
= mo_b_d32(b
, dflag
);
4982 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_EDX
]);
4983 tcg_gen_ext16u_i32(s
->tmp2_i32
, s
->tmp2_i32
);
4984 if (!gen_check_io(s
, ot
, s
->tmp2_i32
, SVM_IOIO_STR_MASK
)) {
4987 translator_io_start(&s
->base
);
4988 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
4989 gen_repz_outs(s
, ot
);
4995 /************************/
5000 ot
= mo_b_d32(b
, dflag
);
5001 val
= x86_ldub_code(env
, s
);
5002 tcg_gen_movi_i32(s
->tmp2_i32
, val
);
5003 if (!gen_check_io(s
, ot
, s
->tmp2_i32
, SVM_IOIO_TYPE_MASK
)) {
5006 translator_io_start(&s
->base
);
5007 gen_helper_in_func(ot
, s
->T1
, s
->tmp2_i32
);
5008 gen_op_mov_reg_v(s
, ot
, R_EAX
, s
->T1
);
5009 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
5013 ot
= mo_b_d32(b
, dflag
);
5014 val
= x86_ldub_code(env
, s
);
5015 tcg_gen_movi_i32(s
->tmp2_i32
, val
);
5016 if (!gen_check_io(s
, ot
, s
->tmp2_i32
, 0)) {
5019 translator_io_start(&s
->base
);
5020 gen_op_mov_v_reg(s
, ot
, s
->T1
, R_EAX
);
5021 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T1
);
5022 gen_helper_out_func(ot
, s
->tmp2_i32
, s
->tmp3_i32
);
5023 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
5027 ot
= mo_b_d32(b
, dflag
);
5028 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_EDX
]);
5029 tcg_gen_ext16u_i32(s
->tmp2_i32
, s
->tmp2_i32
);
5030 if (!gen_check_io(s
, ot
, s
->tmp2_i32
, SVM_IOIO_TYPE_MASK
)) {
5033 translator_io_start(&s
->base
);
5034 gen_helper_in_func(ot
, s
->T1
, s
->tmp2_i32
);
5035 gen_op_mov_reg_v(s
, ot
, R_EAX
, s
->T1
);
5036 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
5040 ot
= mo_b_d32(b
, dflag
);
5041 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_EDX
]);
5042 tcg_gen_ext16u_i32(s
->tmp2_i32
, s
->tmp2_i32
);
5043 if (!gen_check_io(s
, ot
, s
->tmp2_i32
, 0)) {
5046 translator_io_start(&s
->base
);
5047 gen_op_mov_v_reg(s
, ot
, s
->T1
, R_EAX
);
5048 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T1
);
5049 gen_helper_out_func(ot
, s
->tmp2_i32
, s
->tmp3_i32
);
5050 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
5053 /************************/
5055 case 0xc2: /* ret im */
5056 val
= x86_ldsw_code(env
, s
);
5058 gen_stack_update(s
, val
+ (1 << ot
));
5059 /* Note that gen_pop_T0 uses a zero-extending load. */
5060 gen_op_jmp_v(s
, s
->T0
);
5062 s
->base
.is_jmp
= DISAS_JUMP
;
5064 case 0xc3: /* ret */
5066 gen_pop_update(s
, ot
);
5067 /* Note that gen_pop_T0 uses a zero-extending load. */
5068 gen_op_jmp_v(s
, s
->T0
);
5070 s
->base
.is_jmp
= DISAS_JUMP
;
5072 case 0xca: /* lret im */
5073 val
= x86_ldsw_code(env
, s
);
5075 if (PE(s
) && !VM86(s
)) {
5076 gen_update_cc_op(s
);
5077 gen_update_eip_cur(s
);
5078 gen_helper_lret_protected(cpu_env
, tcg_constant_i32(dflag
- 1),
5079 tcg_constant_i32(val
));
5083 gen_op_ld_v(s
, dflag
, s
->T0
, s
->A0
);
5084 /* NOTE: keeping EIP updated is not a problem in case of
5086 gen_op_jmp_v(s
, s
->T0
);
5088 gen_add_A0_im(s
, 1 << dflag
);
5089 gen_op_ld_v(s
, dflag
, s
->T0
, s
->A0
);
5090 gen_op_movl_seg_T0_vm(s
, R_CS
);
5091 /* add stack offset */
5092 gen_stack_update(s
, val
+ (2 << dflag
));
5094 s
->base
.is_jmp
= DISAS_EOB_ONLY
;
5096 case 0xcb: /* lret */
5099 case 0xcf: /* iret */
5100 gen_svm_check_intercept(s
, SVM_EXIT_IRET
);
5101 if (!PE(s
) || VM86(s
)) {
5102 /* real mode or vm86 mode */
5103 if (!check_vm86_iopl(s
)) {
5106 gen_helper_iret_real(cpu_env
, tcg_constant_i32(dflag
- 1));
5108 gen_helper_iret_protected(cpu_env
, tcg_constant_i32(dflag
- 1),
5111 set_cc_op(s
, CC_OP_EFLAGS
);
5112 s
->base
.is_jmp
= DISAS_EOB_ONLY
;
5114 case 0xe8: /* call im */
5116 int diff
= (dflag
!= MO_16
5117 ? (int32_t)insn_get(env
, s
, MO_32
)
5118 : (int16_t)insn_get(env
, s
, MO_16
));
5119 gen_push_v(s
, eip_next_tl(s
));
5121 gen_jmp_rel(s
, dflag
, diff
, 0);
5124 case 0x9a: /* lcall im */
5126 unsigned int selector
, offset
;
5131 offset
= insn_get(env
, s
, ot
);
5132 selector
= insn_get(env
, s
, MO_16
);
5134 tcg_gen_movi_tl(s
->T0
, selector
);
5135 tcg_gen_movi_tl(s
->T1
, offset
);
5138 case 0xe9: /* jmp im */
5140 int diff
= (dflag
!= MO_16
5141 ? (int32_t)insn_get(env
, s
, MO_32
)
5142 : (int16_t)insn_get(env
, s
, MO_16
));
5144 gen_jmp_rel(s
, dflag
, diff
, 0);
5147 case 0xea: /* ljmp im */
5149 unsigned int selector
, offset
;
5154 offset
= insn_get(env
, s
, ot
);
5155 selector
= insn_get(env
, s
, MO_16
);
5157 tcg_gen_movi_tl(s
->T0
, selector
);
5158 tcg_gen_movi_tl(s
->T1
, offset
);
5161 case 0xeb: /* jmp Jb */
5163 int diff
= (int8_t)insn_get(env
, s
, MO_8
);
5164 gen_jmp_rel(s
, dflag
, diff
, 0);
5167 case 0x70 ... 0x7f: /* jcc Jb */
5169 int diff
= (int8_t)insn_get(env
, s
, MO_8
);
5171 gen_jcc(s
, b
, diff
);
5174 case 0x180 ... 0x18f: /* jcc Jv */
5176 int diff
= (dflag
!= MO_16
5177 ? (int32_t)insn_get(env
, s
, MO_32
)
5178 : (int16_t)insn_get(env
, s
, MO_16
));
5180 gen_jcc(s
, b
, diff
);
5184 case 0x190 ... 0x19f: /* setcc Gv */
5185 modrm
= x86_ldub_code(env
, s
);
5186 gen_setcc1(s
, b
, s
->T0
);
5187 gen_ldst_modrm(env
, s
, modrm
, MO_8
, OR_TMP0
, 1);
5189 case 0x140 ... 0x14f: /* cmov Gv, Ev */
5190 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
5194 modrm
= x86_ldub_code(env
, s
);
5195 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
5196 gen_cmovcc1(env
, s
, ot
, b
, modrm
, reg
);
5199 /************************/
5201 case 0x9c: /* pushf */
5202 gen_svm_check_intercept(s
, SVM_EXIT_PUSHF
);
5203 if (check_vm86_iopl(s
)) {
5204 gen_update_cc_op(s
);
5205 gen_helper_read_eflags(s
->T0
, cpu_env
);
5206 gen_push_v(s
, s
->T0
);
5209 case 0x9d: /* popf */
5210 gen_svm_check_intercept(s
, SVM_EXIT_POPF
);
5211 if (check_vm86_iopl(s
)) {
5212 int mask
= TF_MASK
| AC_MASK
| ID_MASK
| NT_MASK
;
5215 mask
|= IF_MASK
| IOPL_MASK
;
5216 } else if (CPL(s
) <= IOPL(s
)) {
5219 if (dflag
== MO_16
) {
5224 gen_helper_write_eflags(cpu_env
, s
->T0
, tcg_constant_i32(mask
));
5225 gen_pop_update(s
, ot
);
5226 set_cc_op(s
, CC_OP_EFLAGS
);
5227 /* abort translation because TF/AC flag may change */
5228 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
5231 case 0x9e: /* sahf */
5232 if (CODE64(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_LAHF_LM
))
5234 tcg_gen_shri_tl(s
->T0
, cpu_regs
[R_EAX
], 8);
5235 gen_compute_eflags(s
);
5236 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, CC_O
);
5237 tcg_gen_andi_tl(s
->T0
, s
->T0
, CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
5238 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, s
->T0
);
5240 case 0x9f: /* lahf */
5241 if (CODE64(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_LAHF_LM
))
5243 gen_compute_eflags(s
);
5244 /* Note: gen_compute_eflags() only gives the condition codes */
5245 tcg_gen_ori_tl(s
->T0
, cpu_cc_src
, 0x02);
5246 tcg_gen_deposit_tl(cpu_regs
[R_EAX
], cpu_regs
[R_EAX
], s
->T0
, 8, 8);
5248 case 0xf5: /* cmc */
5249 gen_compute_eflags(s
);
5250 tcg_gen_xori_tl(cpu_cc_src
, cpu_cc_src
, CC_C
);
5252 case 0xf8: /* clc */
5253 gen_compute_eflags(s
);
5254 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~CC_C
);
5256 case 0xf9: /* stc */
5257 gen_compute_eflags(s
);
5258 tcg_gen_ori_tl(cpu_cc_src
, cpu_cc_src
, CC_C
);
5260 case 0xfc: /* cld */
5261 tcg_gen_movi_i32(s
->tmp2_i32
, 1);
5262 tcg_gen_st_i32(s
->tmp2_i32
, cpu_env
, offsetof(CPUX86State
, df
));
5264 case 0xfd: /* std */
5265 tcg_gen_movi_i32(s
->tmp2_i32
, -1);
5266 tcg_gen_st_i32(s
->tmp2_i32
, cpu_env
, offsetof(CPUX86State
, df
));
5269 /************************/
5270 /* bit operations */
5271 case 0x1ba: /* bt/bts/btr/btc Gv, im */
5273 modrm
= x86_ldub_code(env
, s
);
5274 op
= (modrm
>> 3) & 7;
5275 mod
= (modrm
>> 6) & 3;
5276 rm
= (modrm
& 7) | REX_B(s
);
5279 gen_lea_modrm(env
, s
, modrm
);
5280 if (!(s
->prefix
& PREFIX_LOCK
)) {
5281 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
5284 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
5287 val
= x86_ldub_code(env
, s
);
5288 tcg_gen_movi_tl(s
->T1
, val
);
5293 case 0x1a3: /* bt Gv, Ev */
5296 case 0x1ab: /* bts */
5299 case 0x1b3: /* btr */
5302 case 0x1bb: /* btc */
5306 modrm
= x86_ldub_code(env
, s
);
5307 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
5308 mod
= (modrm
>> 6) & 3;
5309 rm
= (modrm
& 7) | REX_B(s
);
5310 gen_op_mov_v_reg(s
, MO_32
, s
->T1
, reg
);
5312 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
5313 /* specific case: we need to add a displacement */
5314 gen_exts(ot
, s
->T1
);
5315 tcg_gen_sari_tl(s
->tmp0
, s
->T1
, 3 + ot
);
5316 tcg_gen_shli_tl(s
->tmp0
, s
->tmp0
, ot
);
5317 tcg_gen_add_tl(s
->A0
, gen_lea_modrm_1(s
, a
, false), s
->tmp0
);
5318 gen_lea_v_seg(s
, s
->aflag
, s
->A0
, a
.def_seg
, s
->override
);
5319 if (!(s
->prefix
& PREFIX_LOCK
)) {
5320 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
5323 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
5326 tcg_gen_andi_tl(s
->T1
, s
->T1
, (1 << (3 + ot
)) - 1);
5327 tcg_gen_movi_tl(s
->tmp0
, 1);
5328 tcg_gen_shl_tl(s
->tmp0
, s
->tmp0
, s
->T1
);
5329 if (s
->prefix
& PREFIX_LOCK
) {
5332 /* Needs no atomic ops; we surpressed the normal
5333 memory load for LOCK above so do it now. */
5334 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
5337 tcg_gen_atomic_fetch_or_tl(s
->T0
, s
->A0
, s
->tmp0
,
5338 s
->mem_index
, ot
| MO_LE
);
5341 tcg_gen_not_tl(s
->tmp0
, s
->tmp0
);
5342 tcg_gen_atomic_fetch_and_tl(s
->T0
, s
->A0
, s
->tmp0
,
5343 s
->mem_index
, ot
| MO_LE
);
5347 tcg_gen_atomic_fetch_xor_tl(s
->T0
, s
->A0
, s
->tmp0
,
5348 s
->mem_index
, ot
| MO_LE
);
5351 tcg_gen_shr_tl(s
->tmp4
, s
->T0
, s
->T1
);
5353 tcg_gen_shr_tl(s
->tmp4
, s
->T0
, s
->T1
);
5356 /* Data already loaded; nothing to do. */
5359 tcg_gen_or_tl(s
->T0
, s
->T0
, s
->tmp0
);
5362 tcg_gen_andc_tl(s
->T0
, s
->T0
, s
->tmp0
);
5366 tcg_gen_xor_tl(s
->T0
, s
->T0
, s
->tmp0
);
5371 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
5373 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
5378 /* Delay all CC updates until after the store above. Note that
5379 C is the result of the test, Z is unchanged, and the others
5380 are all undefined. */
5382 case CC_OP_MULB
... CC_OP_MULQ
:
5383 case CC_OP_ADDB
... CC_OP_ADDQ
:
5384 case CC_OP_ADCB
... CC_OP_ADCQ
:
5385 case CC_OP_SUBB
... CC_OP_SUBQ
:
5386 case CC_OP_SBBB
... CC_OP_SBBQ
:
5387 case CC_OP_LOGICB
... CC_OP_LOGICQ
:
5388 case CC_OP_INCB
... CC_OP_INCQ
:
5389 case CC_OP_DECB
... CC_OP_DECQ
:
5390 case CC_OP_SHLB
... CC_OP_SHLQ
:
5391 case CC_OP_SARB
... CC_OP_SARQ
:
5392 case CC_OP_BMILGB
... CC_OP_BMILGQ
:
5393 /* Z was going to be computed from the non-zero status of CC_DST.
5394 We can get that same Z value (and the new C value) by leaving
5395 CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the
5397 tcg_gen_mov_tl(cpu_cc_src
, s
->tmp4
);
5398 set_cc_op(s
, ((s
->cc_op
- CC_OP_MULB
) & 3) + CC_OP_SARB
);
5401 /* Otherwise, generate EFLAGS and replace the C bit. */
5402 gen_compute_eflags(s
);
5403 tcg_gen_deposit_tl(cpu_cc_src
, cpu_cc_src
, s
->tmp4
,
5408 case 0x1bc: /* bsf / tzcnt */
5409 case 0x1bd: /* bsr / lzcnt */
5411 modrm
= x86_ldub_code(env
, s
);
5412 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
5413 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
5414 gen_extu(ot
, s
->T0
);
5416 /* Note that lzcnt and tzcnt are in different extensions. */
5417 if ((prefixes
& PREFIX_REPZ
)
5419 ? s
->cpuid_ext3_features
& CPUID_EXT3_ABM
5420 : s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI1
)) {
5422 /* For lzcnt/tzcnt, C bit is defined related to the input. */
5423 tcg_gen_mov_tl(cpu_cc_src
, s
->T0
);
5425 /* For lzcnt, reduce the target_ulong result by the
5426 number of zeros that we expect to find at the top. */
5427 tcg_gen_clzi_tl(s
->T0
, s
->T0
, TARGET_LONG_BITS
);
5428 tcg_gen_subi_tl(s
->T0
, s
->T0
, TARGET_LONG_BITS
- size
);
5430 /* For tzcnt, a zero input must return the operand size. */
5431 tcg_gen_ctzi_tl(s
->T0
, s
->T0
, size
);
5433 /* For lzcnt/tzcnt, Z bit is defined related to the result. */
5434 gen_op_update1_cc(s
);
5435 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
5437 /* For bsr/bsf, only the Z bit is defined and it is related
5438 to the input and not the result. */
5439 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
5440 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
5442 /* ??? The manual says that the output is undefined when the
5443 input is zero, but real hardware leaves it unchanged, and
5444 real programs appear to depend on that. Accomplish this
5445 by passing the output as the value to return upon zero. */
5447 /* For bsr, return the bit index of the first 1 bit,
5448 not the count of leading zeros. */
5449 tcg_gen_xori_tl(s
->T1
, cpu_regs
[reg
], TARGET_LONG_BITS
- 1);
5450 tcg_gen_clz_tl(s
->T0
, s
->T0
, s
->T1
);
5451 tcg_gen_xori_tl(s
->T0
, s
->T0
, TARGET_LONG_BITS
- 1);
5453 tcg_gen_ctz_tl(s
->T0
, s
->T0
, cpu_regs
[reg
]);
5456 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
5458 /************************/
5460 case 0x27: /* daa */
5463 gen_update_cc_op(s
);
5464 gen_helper_daa(cpu_env
);
5465 set_cc_op(s
, CC_OP_EFLAGS
);
5467 case 0x2f: /* das */
5470 gen_update_cc_op(s
);
5471 gen_helper_das(cpu_env
);
5472 set_cc_op(s
, CC_OP_EFLAGS
);
5474 case 0x37: /* aaa */
5477 gen_update_cc_op(s
);
5478 gen_helper_aaa(cpu_env
);
5479 set_cc_op(s
, CC_OP_EFLAGS
);
5481 case 0x3f: /* aas */
5484 gen_update_cc_op(s
);
5485 gen_helper_aas(cpu_env
);
5486 set_cc_op(s
, CC_OP_EFLAGS
);
5488 case 0xd4: /* aam */
5491 val
= x86_ldub_code(env
, s
);
5493 gen_exception(s
, EXCP00_DIVZ
);
5495 gen_helper_aam(cpu_env
, tcg_constant_i32(val
));
5496 set_cc_op(s
, CC_OP_LOGICB
);
5499 case 0xd5: /* aad */
5502 val
= x86_ldub_code(env
, s
);
5503 gen_helper_aad(cpu_env
, tcg_constant_i32(val
));
5504 set_cc_op(s
, CC_OP_LOGICB
);
5506 /************************/
5508 case 0x90: /* nop */
5509 /* XXX: correct lock test for all insn */
5510 if (prefixes
& PREFIX_LOCK
) {
5513 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
5515 goto do_xchg_reg_eax
;
5517 if (prefixes
& PREFIX_REPZ
) {
5518 gen_update_cc_op(s
);
5519 gen_update_eip_cur(s
);
5520 gen_helper_pause(cpu_env
, cur_insn_len_i32(s
));
5521 s
->base
.is_jmp
= DISAS_NORETURN
;
5524 case 0x9b: /* fwait */
5525 if ((s
->flags
& (HF_MP_MASK
| HF_TS_MASK
)) ==
5526 (HF_MP_MASK
| HF_TS_MASK
)) {
5527 gen_exception(s
, EXCP07_PREX
);
5529 gen_helper_fwait(cpu_env
);
5532 case 0xcc: /* int3 */
5533 gen_interrupt(s
, EXCP03_INT3
);
5535 case 0xcd: /* int N */
5536 val
= x86_ldub_code(env
, s
);
5537 if (check_vm86_iopl(s
)) {
5538 gen_interrupt(s
, val
);
5541 case 0xce: /* into */
5544 gen_update_cc_op(s
);
5545 gen_update_eip_cur(s
);
5546 gen_helper_into(cpu_env
, cur_insn_len_i32(s
));
5549 case 0xf1: /* icebp (undocumented, exits to external debugger) */
5550 gen_svm_check_intercept(s
, SVM_EXIT_ICEBP
);
5554 case 0xfa: /* cli */
5555 if (check_iopl(s
)) {
5556 gen_reset_eflags(s
, IF_MASK
);
5559 case 0xfb: /* sti */
5560 if (check_iopl(s
)) {
5561 gen_set_eflags(s
, IF_MASK
);
5562 /* interruptions are enabled only the first insn after sti */
5563 gen_update_eip_next(s
);
5564 gen_eob_inhibit_irq(s
, true);
5567 case 0x62: /* bound */
5571 modrm
= x86_ldub_code(env
, s
);
5572 reg
= (modrm
>> 3) & 7;
5573 mod
= (modrm
>> 6) & 3;
5576 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
5577 gen_lea_modrm(env
, s
, modrm
);
5578 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
5580 gen_helper_boundw(cpu_env
, s
->A0
, s
->tmp2_i32
);
5582 gen_helper_boundl(cpu_env
, s
->A0
, s
->tmp2_i32
);
5585 case 0x1c8 ... 0x1cf: /* bswap reg */
5586 reg
= (b
& 7) | REX_B(s
);
5587 #ifdef TARGET_X86_64
5588 if (dflag
== MO_64
) {
5589 tcg_gen_bswap64_i64(cpu_regs
[reg
], cpu_regs
[reg
]);
5593 tcg_gen_bswap32_tl(cpu_regs
[reg
], cpu_regs
[reg
], TCG_BSWAP_OZ
);
5595 case 0xd6: /* salc */
5598 gen_compute_eflags_c(s
, s
->T0
);
5599 tcg_gen_neg_tl(s
->T0
, s
->T0
);
5600 gen_op_mov_reg_v(s
, MO_8
, R_EAX
, s
->T0
);
5602 case 0xe0: /* loopnz */
5603 case 0xe1: /* loopz */
5604 case 0xe2: /* loop */
5605 case 0xe3: /* jecxz */
5608 int diff
= (int8_t)insn_get(env
, s
, MO_8
);
5610 l1
= gen_new_label();
5611 l2
= gen_new_label();
5612 gen_update_cc_op(s
);
5615 case 0: /* loopnz */
5617 gen_op_add_reg_im(s
, s
->aflag
, R_ECX
, -1);
5618 gen_op_jz_ecx(s
, l2
);
5619 gen_jcc1(s
, (JCC_Z
<< 1) | (b
^ 1), l1
);
5622 gen_op_add_reg_im(s
, s
->aflag
, R_ECX
, -1);
5623 gen_op_jnz_ecx(s
, l1
);
5627 gen_op_jz_ecx(s
, l1
);
5632 gen_jmp_rel_csize(s
, 0, 1);
5635 gen_jmp_rel(s
, dflag
, diff
, 0);
5638 case 0x130: /* wrmsr */
5639 case 0x132: /* rdmsr */
5640 if (check_cpl0(s
)) {
5641 gen_update_cc_op(s
);
5642 gen_update_eip_cur(s
);
5644 gen_helper_rdmsr(cpu_env
);
5646 gen_helper_wrmsr(cpu_env
);
5647 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
5651 case 0x131: /* rdtsc */
5652 gen_update_cc_op(s
);
5653 gen_update_eip_cur(s
);
5654 translator_io_start(&s
->base
);
5655 gen_helper_rdtsc(cpu_env
);
5657 case 0x133: /* rdpmc */
5658 gen_update_cc_op(s
);
5659 gen_update_eip_cur(s
);
5660 gen_helper_rdpmc(cpu_env
);
5661 s
->base
.is_jmp
= DISAS_NORETURN
;
5663 case 0x134: /* sysenter */
5664 /* For Intel SYSENTER is valid on 64-bit */
5665 if (CODE64(s
) && env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
)
5668 gen_exception_gpf(s
);
5670 gen_helper_sysenter(cpu_env
);
5671 s
->base
.is_jmp
= DISAS_EOB_ONLY
;
5674 case 0x135: /* sysexit */
5675 /* For Intel SYSEXIT is valid on 64-bit */
5676 if (CODE64(s
) && env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
)
5679 gen_exception_gpf(s
);
5681 gen_helper_sysexit(cpu_env
, tcg_constant_i32(dflag
- 1));
5682 s
->base
.is_jmp
= DISAS_EOB_ONLY
;
5685 #ifdef TARGET_X86_64
5686 case 0x105: /* syscall */
5687 /* XXX: is it usable in real mode ? */
5688 gen_update_cc_op(s
);
5689 gen_update_eip_cur(s
);
5690 gen_helper_syscall(cpu_env
, cur_insn_len_i32(s
));
5691 /* TF handling for the syscall insn is different. The TF bit is checked
5692 after the syscall insn completes. This allows #DB to not be
5693 generated after one has entered CPL0 if TF is set in FMASK. */
5694 gen_eob_worker(s
, false, true);
5696 case 0x107: /* sysret */
5698 gen_exception_gpf(s
);
5700 gen_helper_sysret(cpu_env
, tcg_constant_i32(dflag
- 1));
5701 /* condition codes are modified only in long mode */
5703 set_cc_op(s
, CC_OP_EFLAGS
);
5705 /* TF handling for the sysret insn is different. The TF bit is
5706 checked after the sysret insn completes. This allows #DB to be
5707 generated "as if" the syscall insn in userspace has just
5709 gen_eob_worker(s
, false, true);
5713 case 0x1a2: /* cpuid */
5714 gen_update_cc_op(s
);
5715 gen_update_eip_cur(s
);
5716 gen_helper_cpuid(cpu_env
);
5718 case 0xf4: /* hlt */
5719 if (check_cpl0(s
)) {
5720 gen_update_cc_op(s
);
5721 gen_update_eip_cur(s
);
5722 gen_helper_hlt(cpu_env
, cur_insn_len_i32(s
));
5723 s
->base
.is_jmp
= DISAS_NORETURN
;
5727 modrm
= x86_ldub_code(env
, s
);
5728 mod
= (modrm
>> 6) & 3;
5729 op
= (modrm
>> 3) & 7;
5732 if (!PE(s
) || VM86(s
))
5734 if (s
->flags
& HF_UMIP_MASK
&& !check_cpl0(s
)) {
5737 gen_svm_check_intercept(s
, SVM_EXIT_LDTR_READ
);
5738 tcg_gen_ld32u_tl(s
->T0
, cpu_env
,
5739 offsetof(CPUX86State
, ldt
.selector
));
5740 ot
= mod
== 3 ? dflag
: MO_16
;
5741 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
5744 if (!PE(s
) || VM86(s
))
5746 if (check_cpl0(s
)) {
5747 gen_svm_check_intercept(s
, SVM_EXIT_LDTR_WRITE
);
5748 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
5749 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
5750 gen_helper_lldt(cpu_env
, s
->tmp2_i32
);
5754 if (!PE(s
) || VM86(s
))
5756 if (s
->flags
& HF_UMIP_MASK
&& !check_cpl0(s
)) {
5759 gen_svm_check_intercept(s
, SVM_EXIT_TR_READ
);
5760 tcg_gen_ld32u_tl(s
->T0
, cpu_env
,
5761 offsetof(CPUX86State
, tr
.selector
));
5762 ot
= mod
== 3 ? dflag
: MO_16
;
5763 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
5766 if (!PE(s
) || VM86(s
))
5768 if (check_cpl0(s
)) {
5769 gen_svm_check_intercept(s
, SVM_EXIT_TR_WRITE
);
5770 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
5771 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
5772 gen_helper_ltr(cpu_env
, s
->tmp2_i32
);
5777 if (!PE(s
) || VM86(s
))
5779 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
5780 gen_update_cc_op(s
);
5782 gen_helper_verr(cpu_env
, s
->T0
);
5784 gen_helper_verw(cpu_env
, s
->T0
);
5786 set_cc_op(s
, CC_OP_EFLAGS
);
5794 modrm
= x86_ldub_code(env
, s
);
5796 CASE_MODRM_MEM_OP(0): /* sgdt */
5797 if (s
->flags
& HF_UMIP_MASK
&& !check_cpl0(s
)) {
5800 gen_svm_check_intercept(s
, SVM_EXIT_GDTR_READ
);
5801 gen_lea_modrm(env
, s
, modrm
);
5802 tcg_gen_ld32u_tl(s
->T0
,
5803 cpu_env
, offsetof(CPUX86State
, gdt
.limit
));
5804 gen_op_st_v(s
, MO_16
, s
->T0
, s
->A0
);
5805 gen_add_A0_im(s
, 2);
5806 tcg_gen_ld_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
, gdt
.base
));
5807 if (dflag
== MO_16
) {
5808 tcg_gen_andi_tl(s
->T0
, s
->T0
, 0xffffff);
5810 gen_op_st_v(s
, CODE64(s
) + MO_32
, s
->T0
, s
->A0
);
5813 case 0xc8: /* monitor */
5814 if (!(s
->cpuid_ext_features
& CPUID_EXT_MONITOR
) || CPL(s
) != 0) {
5817 gen_update_cc_op(s
);
5818 gen_update_eip_cur(s
);
5819 tcg_gen_mov_tl(s
->A0
, cpu_regs
[R_EAX
]);
5820 gen_extu(s
->aflag
, s
->A0
);
5821 gen_add_A0_ds_seg(s
);
5822 gen_helper_monitor(cpu_env
, s
->A0
);
5825 case 0xc9: /* mwait */
5826 if (!(s
->cpuid_ext_features
& CPUID_EXT_MONITOR
) || CPL(s
) != 0) {
5829 gen_update_cc_op(s
);
5830 gen_update_eip_cur(s
);
5831 gen_helper_mwait(cpu_env
, cur_insn_len_i32(s
));
5832 s
->base
.is_jmp
= DISAS_NORETURN
;
5835 case 0xca: /* clac */
5836 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_SMAP
)
5840 gen_reset_eflags(s
, AC_MASK
);
5841 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
5844 case 0xcb: /* stac */
5845 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_SMAP
)
5849 gen_set_eflags(s
, AC_MASK
);
5850 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
5853 CASE_MODRM_MEM_OP(1): /* sidt */
5854 if (s
->flags
& HF_UMIP_MASK
&& !check_cpl0(s
)) {
5857 gen_svm_check_intercept(s
, SVM_EXIT_IDTR_READ
);
5858 gen_lea_modrm(env
, s
, modrm
);
5859 tcg_gen_ld32u_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
, idt
.limit
));
5860 gen_op_st_v(s
, MO_16
, s
->T0
, s
->A0
);
5861 gen_add_A0_im(s
, 2);
5862 tcg_gen_ld_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
, idt
.base
));
5863 if (dflag
== MO_16
) {
5864 tcg_gen_andi_tl(s
->T0
, s
->T0
, 0xffffff);
5866 gen_op_st_v(s
, CODE64(s
) + MO_32
, s
->T0
, s
->A0
);
5869 case 0xd0: /* xgetbv */
5870 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
5871 || (s
->prefix
& (PREFIX_LOCK
| PREFIX_DATA
5872 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
5875 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_ECX
]);
5876 gen_helper_xgetbv(s
->tmp1_i64
, cpu_env
, s
->tmp2_i32
);
5877 tcg_gen_extr_i64_tl(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
], s
->tmp1_i64
);
5880 case 0xd1: /* xsetbv */
5881 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
5882 || (s
->prefix
& (PREFIX_LOCK
| PREFIX_DATA
5883 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
5886 if (!check_cpl0(s
)) {
5889 tcg_gen_concat_tl_i64(s
->tmp1_i64
, cpu_regs
[R_EAX
],
5891 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_ECX
]);
5892 gen_helper_xsetbv(cpu_env
, s
->tmp2_i32
, s
->tmp1_i64
);
5893 /* End TB because translation flags may change. */
5894 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
5897 case 0xd8: /* VMRUN */
5898 if (!SVME(s
) || !PE(s
)) {
5901 if (!check_cpl0(s
)) {
5904 gen_update_cc_op(s
);
5905 gen_update_eip_cur(s
);
5906 gen_helper_vmrun(cpu_env
, tcg_constant_i32(s
->aflag
- 1),
5907 cur_insn_len_i32(s
));
5908 tcg_gen_exit_tb(NULL
, 0);
5909 s
->base
.is_jmp
= DISAS_NORETURN
;
5912 case 0xd9: /* VMMCALL */
5916 gen_update_cc_op(s
);
5917 gen_update_eip_cur(s
);
5918 gen_helper_vmmcall(cpu_env
);
5921 case 0xda: /* VMLOAD */
5922 if (!SVME(s
) || !PE(s
)) {
5925 if (!check_cpl0(s
)) {
5928 gen_update_cc_op(s
);
5929 gen_update_eip_cur(s
);
5930 gen_helper_vmload(cpu_env
, tcg_constant_i32(s
->aflag
- 1));
5933 case 0xdb: /* VMSAVE */
5934 if (!SVME(s
) || !PE(s
)) {
5937 if (!check_cpl0(s
)) {
5940 gen_update_cc_op(s
);
5941 gen_update_eip_cur(s
);
5942 gen_helper_vmsave(cpu_env
, tcg_constant_i32(s
->aflag
- 1));
5945 case 0xdc: /* STGI */
5946 if ((!SVME(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_SKINIT
))
5950 if (!check_cpl0(s
)) {
5953 gen_update_cc_op(s
);
5954 gen_helper_stgi(cpu_env
);
5955 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
5958 case 0xdd: /* CLGI */
5959 if (!SVME(s
) || !PE(s
)) {
5962 if (!check_cpl0(s
)) {
5965 gen_update_cc_op(s
);
5966 gen_update_eip_cur(s
);
5967 gen_helper_clgi(cpu_env
);
5970 case 0xde: /* SKINIT */
5971 if ((!SVME(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_SKINIT
))
5975 gen_svm_check_intercept(s
, SVM_EXIT_SKINIT
);
5976 /* If not intercepted, not implemented -- raise #UD. */
5979 case 0xdf: /* INVLPGA */
5980 if (!SVME(s
) || !PE(s
)) {
5983 if (!check_cpl0(s
)) {
5986 gen_svm_check_intercept(s
, SVM_EXIT_INVLPGA
);
5987 if (s
->aflag
== MO_64
) {
5988 tcg_gen_mov_tl(s
->A0
, cpu_regs
[R_EAX
]);
5990 tcg_gen_ext32u_tl(s
->A0
, cpu_regs
[R_EAX
]);
5992 gen_helper_flush_page(cpu_env
, s
->A0
);
5993 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
5996 CASE_MODRM_MEM_OP(2): /* lgdt */
5997 if (!check_cpl0(s
)) {
6000 gen_svm_check_intercept(s
, SVM_EXIT_GDTR_WRITE
);
6001 gen_lea_modrm(env
, s
, modrm
);
6002 gen_op_ld_v(s
, MO_16
, s
->T1
, s
->A0
);
6003 gen_add_A0_im(s
, 2);
6004 gen_op_ld_v(s
, CODE64(s
) + MO_32
, s
->T0
, s
->A0
);
6005 if (dflag
== MO_16
) {
6006 tcg_gen_andi_tl(s
->T0
, s
->T0
, 0xffffff);
6008 tcg_gen_st_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
, gdt
.base
));
6009 tcg_gen_st32_tl(s
->T1
, cpu_env
, offsetof(CPUX86State
, gdt
.limit
));
6012 CASE_MODRM_MEM_OP(3): /* lidt */
6013 if (!check_cpl0(s
)) {
6016 gen_svm_check_intercept(s
, SVM_EXIT_IDTR_WRITE
);
6017 gen_lea_modrm(env
, s
, modrm
);
6018 gen_op_ld_v(s
, MO_16
, s
->T1
, s
->A0
);
6019 gen_add_A0_im(s
, 2);
6020 gen_op_ld_v(s
, CODE64(s
) + MO_32
, s
->T0
, s
->A0
);
6021 if (dflag
== MO_16
) {
6022 tcg_gen_andi_tl(s
->T0
, s
->T0
, 0xffffff);
6024 tcg_gen_st_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
, idt
.base
));
6025 tcg_gen_st32_tl(s
->T1
, cpu_env
, offsetof(CPUX86State
, idt
.limit
));
6028 CASE_MODRM_OP(4): /* smsw */
6029 if (s
->flags
& HF_UMIP_MASK
&& !check_cpl0(s
)) {
6032 gen_svm_check_intercept(s
, SVM_EXIT_READ_CR0
);
6033 tcg_gen_ld_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
, cr
[0]));
6035 * In 32-bit mode, the higher 16 bits of the destination
6036 * register are undefined. In practice CR0[31:0] is stored
6037 * just like in 64-bit mode.
6039 mod
= (modrm
>> 6) & 3;
6040 ot
= (mod
!= 3 ? MO_16
: s
->dflag
);
6041 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
6043 case 0xee: /* rdpkru */
6044 if (prefixes
& PREFIX_LOCK
) {
6047 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_ECX
]);
6048 gen_helper_rdpkru(s
->tmp1_i64
, cpu_env
, s
->tmp2_i32
);
6049 tcg_gen_extr_i64_tl(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
], s
->tmp1_i64
);
6051 case 0xef: /* wrpkru */
6052 if (prefixes
& PREFIX_LOCK
) {
6055 tcg_gen_concat_tl_i64(s
->tmp1_i64
, cpu_regs
[R_EAX
],
6057 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_ECX
]);
6058 gen_helper_wrpkru(cpu_env
, s
->tmp2_i32
, s
->tmp1_i64
);
6061 CASE_MODRM_OP(6): /* lmsw */
6062 if (!check_cpl0(s
)) {
6065 gen_svm_check_intercept(s
, SVM_EXIT_WRITE_CR0
);
6066 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
6068 * Only the 4 lower bits of CR0 are modified.
6069 * PE cannot be set to zero if already set to one.
6071 tcg_gen_ld_tl(s
->T1
, cpu_env
, offsetof(CPUX86State
, cr
[0]));
6072 tcg_gen_andi_tl(s
->T0
, s
->T0
, 0xf);
6073 tcg_gen_andi_tl(s
->T1
, s
->T1
, ~0xe);
6074 tcg_gen_or_tl(s
->T0
, s
->T0
, s
->T1
);
6075 gen_helper_write_crN(cpu_env
, tcg_constant_i32(0), s
->T0
);
6076 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
6079 CASE_MODRM_MEM_OP(7): /* invlpg */
6080 if (!check_cpl0(s
)) {
6083 gen_svm_check_intercept(s
, SVM_EXIT_INVLPG
);
6084 gen_lea_modrm(env
, s
, modrm
);
6085 gen_helper_flush_page(cpu_env
, s
->A0
);
6086 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
6089 case 0xf8: /* swapgs */
6090 #ifdef TARGET_X86_64
6092 if (check_cpl0(s
)) {
6093 tcg_gen_mov_tl(s
->T0
, cpu_seg_base
[R_GS
]);
6094 tcg_gen_ld_tl(cpu_seg_base
[R_GS
], cpu_env
,
6095 offsetof(CPUX86State
, kernelgsbase
));
6096 tcg_gen_st_tl(s
->T0
, cpu_env
,
6097 offsetof(CPUX86State
, kernelgsbase
));
6104 case 0xf9: /* rdtscp */
6105 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_RDTSCP
)) {
6108 gen_update_cc_op(s
);
6109 gen_update_eip_cur(s
);
6110 translator_io_start(&s
->base
);
6111 gen_helper_rdtscp(cpu_env
);
6119 case 0x108: /* invd */
6120 case 0x109: /* wbinvd */
6121 if (check_cpl0(s
)) {
6122 gen_svm_check_intercept(s
, (b
& 2) ? SVM_EXIT_INVD
: SVM_EXIT_WBINVD
);
6126 case 0x63: /* arpl or movslS (x86_64) */
6127 #ifdef TARGET_X86_64
6130 /* d_ot is the size of destination */
6133 modrm
= x86_ldub_code(env
, s
);
6134 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6135 mod
= (modrm
>> 6) & 3;
6136 rm
= (modrm
& 7) | REX_B(s
);
6139 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, rm
);
6141 if (d_ot
== MO_64
) {
6142 tcg_gen_ext32s_tl(s
->T0
, s
->T0
);
6144 gen_op_mov_reg_v(s
, d_ot
, reg
, s
->T0
);
6146 gen_lea_modrm(env
, s
, modrm
);
6147 gen_op_ld_v(s
, MO_32
| MO_SIGN
, s
->T0
, s
->A0
);
6148 gen_op_mov_reg_v(s
, d_ot
, reg
, s
->T0
);
6156 if (!PE(s
) || VM86(s
))
6158 t0
= tcg_temp_new();
6159 t1
= tcg_temp_new();
6160 t2
= tcg_temp_new();
6162 modrm
= x86_ldub_code(env
, s
);
6163 reg
= (modrm
>> 3) & 7;
6164 mod
= (modrm
>> 6) & 3;
6167 gen_lea_modrm(env
, s
, modrm
);
6168 gen_op_ld_v(s
, ot
, t0
, s
->A0
);
6170 gen_op_mov_v_reg(s
, ot
, t0
, rm
);
6172 gen_op_mov_v_reg(s
, ot
, t1
, reg
);
6173 tcg_gen_andi_tl(s
->tmp0
, t0
, 3);
6174 tcg_gen_andi_tl(t1
, t1
, 3);
6175 tcg_gen_movi_tl(t2
, 0);
6176 label1
= gen_new_label();
6177 tcg_gen_brcond_tl(TCG_COND_GE
, s
->tmp0
, t1
, label1
);
6178 tcg_gen_andi_tl(t0
, t0
, ~3);
6179 tcg_gen_or_tl(t0
, t0
, t1
);
6180 tcg_gen_movi_tl(t2
, CC_Z
);
6181 gen_set_label(label1
);
6183 gen_op_st_v(s
, ot
, t0
, s
->A0
);
6185 gen_op_mov_reg_v(s
, ot
, rm
, t0
);
6187 gen_compute_eflags(s
);
6188 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~CC_Z
);
6189 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t2
);
6192 case 0x102: /* lar */
6193 case 0x103: /* lsl */
6197 if (!PE(s
) || VM86(s
))
6199 ot
= dflag
!= MO_16
? MO_32
: MO_16
;
6200 modrm
= x86_ldub_code(env
, s
);
6201 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6202 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
6203 t0
= tcg_temp_new();
6204 gen_update_cc_op(s
);
6206 gen_helper_lar(t0
, cpu_env
, s
->T0
);
6208 gen_helper_lsl(t0
, cpu_env
, s
->T0
);
6210 tcg_gen_andi_tl(s
->tmp0
, cpu_cc_src
, CC_Z
);
6211 label1
= gen_new_label();
6212 tcg_gen_brcondi_tl(TCG_COND_EQ
, s
->tmp0
, 0, label1
);
6213 gen_op_mov_reg_v(s
, ot
, reg
, t0
);
6214 gen_set_label(label1
);
6215 set_cc_op(s
, CC_OP_EFLAGS
);
6219 modrm
= x86_ldub_code(env
, s
);
6220 mod
= (modrm
>> 6) & 3;
6221 op
= (modrm
>> 3) & 7;
6223 case 0: /* prefetchnta */
6224 case 1: /* prefetchnt0 */
6225 case 2: /* prefetchnt0 */
6226 case 3: /* prefetchnt0 */
6229 gen_nop_modrm(env
, s
, modrm
);
6230 /* nothing more to do */
6232 default: /* nop (multi byte) */
6233 gen_nop_modrm(env
, s
, modrm
);
6238 modrm
= x86_ldub_code(env
, s
);
6239 if (s
->flags
& HF_MPX_EN_MASK
) {
6240 mod
= (modrm
>> 6) & 3;
6241 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6242 if (prefixes
& PREFIX_REPZ
) {
6245 || (prefixes
& PREFIX_LOCK
)
6246 || s
->aflag
== MO_16
) {
6249 gen_bndck(env
, s
, modrm
, TCG_COND_LTU
, cpu_bndl
[reg
]);
6250 } else if (prefixes
& PREFIX_REPNZ
) {
6253 || (prefixes
& PREFIX_LOCK
)
6254 || s
->aflag
== MO_16
) {
6257 TCGv_i64 notu
= tcg_temp_new_i64();
6258 tcg_gen_not_i64(notu
, cpu_bndu
[reg
]);
6259 gen_bndck(env
, s
, modrm
, TCG_COND_GTU
, notu
);
6260 } else if (prefixes
& PREFIX_DATA
) {
6261 /* bndmov -- from reg/mem */
6262 if (reg
>= 4 || s
->aflag
== MO_16
) {
6266 int reg2
= (modrm
& 7) | REX_B(s
);
6267 if (reg2
>= 4 || (prefixes
& PREFIX_LOCK
)) {
6270 if (s
->flags
& HF_MPX_IU_MASK
) {
6271 tcg_gen_mov_i64(cpu_bndl
[reg
], cpu_bndl
[reg2
]);
6272 tcg_gen_mov_i64(cpu_bndu
[reg
], cpu_bndu
[reg2
]);
6275 gen_lea_modrm(env
, s
, modrm
);
6277 tcg_gen_qemu_ld_i64(cpu_bndl
[reg
], s
->A0
,
6278 s
->mem_index
, MO_LEUQ
);
6279 tcg_gen_addi_tl(s
->A0
, s
->A0
, 8);
6280 tcg_gen_qemu_ld_i64(cpu_bndu
[reg
], s
->A0
,
6281 s
->mem_index
, MO_LEUQ
);
6283 tcg_gen_qemu_ld_i64(cpu_bndl
[reg
], s
->A0
,
6284 s
->mem_index
, MO_LEUL
);
6285 tcg_gen_addi_tl(s
->A0
, s
->A0
, 4);
6286 tcg_gen_qemu_ld_i64(cpu_bndu
[reg
], s
->A0
,
6287 s
->mem_index
, MO_LEUL
);
6289 /* bnd registers are now in-use */
6290 gen_set_hflag(s
, HF_MPX_IU_MASK
);
6292 } else if (mod
!= 3) {
6294 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
6296 || (prefixes
& PREFIX_LOCK
)
6297 || s
->aflag
== MO_16
6302 tcg_gen_addi_tl(s
->A0
, cpu_regs
[a
.base
], a
.disp
);
6304 tcg_gen_movi_tl(s
->A0
, 0);
6306 gen_lea_v_seg(s
, s
->aflag
, s
->A0
, a
.def_seg
, s
->override
);
6308 tcg_gen_mov_tl(s
->T0
, cpu_regs
[a
.index
]);
6310 tcg_gen_movi_tl(s
->T0
, 0);
6313 gen_helper_bndldx64(cpu_bndl
[reg
], cpu_env
, s
->A0
, s
->T0
);
6314 tcg_gen_ld_i64(cpu_bndu
[reg
], cpu_env
,
6315 offsetof(CPUX86State
, mmx_t0
.MMX_Q(0)));
6317 gen_helper_bndldx32(cpu_bndu
[reg
], cpu_env
, s
->A0
, s
->T0
);
6318 tcg_gen_ext32u_i64(cpu_bndl
[reg
], cpu_bndu
[reg
]);
6319 tcg_gen_shri_i64(cpu_bndu
[reg
], cpu_bndu
[reg
], 32);
6321 gen_set_hflag(s
, HF_MPX_IU_MASK
);
6324 gen_nop_modrm(env
, s
, modrm
);
6327 modrm
= x86_ldub_code(env
, s
);
6328 if (s
->flags
& HF_MPX_EN_MASK
) {
6329 mod
= (modrm
>> 6) & 3;
6330 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6331 if (mod
!= 3 && (prefixes
& PREFIX_REPZ
)) {
6334 || (prefixes
& PREFIX_LOCK
)
6335 || s
->aflag
== MO_16
) {
6338 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
6340 tcg_gen_extu_tl_i64(cpu_bndl
[reg
], cpu_regs
[a
.base
]);
6342 tcg_gen_ext32u_i64(cpu_bndl
[reg
], cpu_bndl
[reg
]);
6344 } else if (a
.base
== -1) {
6345 /* no base register has lower bound of 0 */
6346 tcg_gen_movi_i64(cpu_bndl
[reg
], 0);
6348 /* rip-relative generates #ud */
6351 tcg_gen_not_tl(s
->A0
, gen_lea_modrm_1(s
, a
, false));
6353 tcg_gen_ext32u_tl(s
->A0
, s
->A0
);
6355 tcg_gen_extu_tl_i64(cpu_bndu
[reg
], s
->A0
);
6356 /* bnd registers are now in-use */
6357 gen_set_hflag(s
, HF_MPX_IU_MASK
);
6359 } else if (prefixes
& PREFIX_REPNZ
) {
6362 || (prefixes
& PREFIX_LOCK
)
6363 || s
->aflag
== MO_16
) {
6366 gen_bndck(env
, s
, modrm
, TCG_COND_GTU
, cpu_bndu
[reg
]);
6367 } else if (prefixes
& PREFIX_DATA
) {
6368 /* bndmov -- to reg/mem */
6369 if (reg
>= 4 || s
->aflag
== MO_16
) {
6373 int reg2
= (modrm
& 7) | REX_B(s
);
6374 if (reg2
>= 4 || (prefixes
& PREFIX_LOCK
)) {
6377 if (s
->flags
& HF_MPX_IU_MASK
) {
6378 tcg_gen_mov_i64(cpu_bndl
[reg2
], cpu_bndl
[reg
]);
6379 tcg_gen_mov_i64(cpu_bndu
[reg2
], cpu_bndu
[reg
]);
6382 gen_lea_modrm(env
, s
, modrm
);
6384 tcg_gen_qemu_st_i64(cpu_bndl
[reg
], s
->A0
,
6385 s
->mem_index
, MO_LEUQ
);
6386 tcg_gen_addi_tl(s
->A0
, s
->A0
, 8);
6387 tcg_gen_qemu_st_i64(cpu_bndu
[reg
], s
->A0
,
6388 s
->mem_index
, MO_LEUQ
);
6390 tcg_gen_qemu_st_i64(cpu_bndl
[reg
], s
->A0
,
6391 s
->mem_index
, MO_LEUL
);
6392 tcg_gen_addi_tl(s
->A0
, s
->A0
, 4);
6393 tcg_gen_qemu_st_i64(cpu_bndu
[reg
], s
->A0
,
6394 s
->mem_index
, MO_LEUL
);
6397 } else if (mod
!= 3) {
6399 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
6401 || (prefixes
& PREFIX_LOCK
)
6402 || s
->aflag
== MO_16
6407 tcg_gen_addi_tl(s
->A0
, cpu_regs
[a
.base
], a
.disp
);
6409 tcg_gen_movi_tl(s
->A0
, 0);
6411 gen_lea_v_seg(s
, s
->aflag
, s
->A0
, a
.def_seg
, s
->override
);
6413 tcg_gen_mov_tl(s
->T0
, cpu_regs
[a
.index
]);
6415 tcg_gen_movi_tl(s
->T0
, 0);
6418 gen_helper_bndstx64(cpu_env
, s
->A0
, s
->T0
,
6419 cpu_bndl
[reg
], cpu_bndu
[reg
]);
6421 gen_helper_bndstx32(cpu_env
, s
->A0
, s
->T0
,
6422 cpu_bndl
[reg
], cpu_bndu
[reg
]);
6426 gen_nop_modrm(env
, s
, modrm
);
6428 case 0x119: case 0x11c ... 0x11f: /* nop (multi byte) */
6429 modrm
= x86_ldub_code(env
, s
);
6430 gen_nop_modrm(env
, s
, modrm
);
6433 case 0x120: /* mov reg, crN */
6434 case 0x122: /* mov crN, reg */
6435 if (!check_cpl0(s
)) {
6438 modrm
= x86_ldub_code(env
, s
);
6440 * Ignore the mod bits (assume (modrm&0xc0)==0xc0).
6441 * AMD documentation (24594.pdf) and testing of Intel 386 and 486
6442 * processors all show that the mod bits are assumed to be 1's,
6443 * regardless of actual values.
6445 rm
= (modrm
& 7) | REX_B(s
);
6446 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6449 if ((prefixes
& PREFIX_LOCK
) &&
6450 (s
->cpuid_ext3_features
& CPUID_EXT3_CR8LEG
)) {
6462 ot
= (CODE64(s
) ? MO_64
: MO_32
);
6464 translator_io_start(&s
->base
);
6466 gen_svm_check_intercept(s
, SVM_EXIT_WRITE_CR0
+ reg
);
6467 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
6468 gen_helper_write_crN(cpu_env
, tcg_constant_i32(reg
), s
->T0
);
6469 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
6471 gen_svm_check_intercept(s
, SVM_EXIT_READ_CR0
+ reg
);
6472 gen_helper_read_crN(s
->T0
, cpu_env
, tcg_constant_i32(reg
));
6473 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
6477 case 0x121: /* mov reg, drN */
6478 case 0x123: /* mov drN, reg */
6479 if (check_cpl0(s
)) {
6480 modrm
= x86_ldub_code(env
, s
);
6481 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
6482 * AMD documentation (24594.pdf) and testing of
6483 * intel 386 and 486 processors all show that the mod bits
6484 * are assumed to be 1's, regardless of actual values.
6486 rm
= (modrm
& 7) | REX_B(s
);
6487 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6496 gen_svm_check_intercept(s
, SVM_EXIT_WRITE_DR0
+ reg
);
6497 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
6498 tcg_gen_movi_i32(s
->tmp2_i32
, reg
);
6499 gen_helper_set_dr(cpu_env
, s
->tmp2_i32
, s
->T0
);
6500 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
6502 gen_svm_check_intercept(s
, SVM_EXIT_READ_DR0
+ reg
);
6503 tcg_gen_movi_i32(s
->tmp2_i32
, reg
);
6504 gen_helper_get_dr(s
->T0
, cpu_env
, s
->tmp2_i32
);
6505 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
6509 case 0x106: /* clts */
6510 if (check_cpl0(s
)) {
6511 gen_svm_check_intercept(s
, SVM_EXIT_WRITE_CR0
);
6512 gen_helper_clts(cpu_env
);
6513 /* abort block because static cpu state changed */
6514 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
6517 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
6518 case 0x1c3: /* MOVNTI reg, mem */
6519 if (!(s
->cpuid_features
& CPUID_SSE2
))
6521 ot
= mo_64_32(dflag
);
6522 modrm
= x86_ldub_code(env
, s
);
6523 mod
= (modrm
>> 6) & 3;
6526 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6527 /* generate a generic store */
6528 gen_ldst_modrm(env
, s
, modrm
, ot
, reg
, 1);
6531 modrm
= x86_ldub_code(env
, s
);
6533 CASE_MODRM_MEM_OP(0): /* fxsave */
6534 if (!(s
->cpuid_features
& CPUID_FXSR
)
6535 || (prefixes
& PREFIX_LOCK
)) {
6538 if ((s
->flags
& HF_EM_MASK
) || (s
->flags
& HF_TS_MASK
)) {
6539 gen_exception(s
, EXCP07_PREX
);
6542 gen_lea_modrm(env
, s
, modrm
);
6543 gen_helper_fxsave(cpu_env
, s
->A0
);
6546 CASE_MODRM_MEM_OP(1): /* fxrstor */
6547 if (!(s
->cpuid_features
& CPUID_FXSR
)
6548 || (prefixes
& PREFIX_LOCK
)) {
6551 if ((s
->flags
& HF_EM_MASK
) || (s
->flags
& HF_TS_MASK
)) {
6552 gen_exception(s
, EXCP07_PREX
);
6555 gen_lea_modrm(env
, s
, modrm
);
6556 gen_helper_fxrstor(cpu_env
, s
->A0
);
6559 CASE_MODRM_MEM_OP(2): /* ldmxcsr */
6560 if ((s
->flags
& HF_EM_MASK
) || !(s
->flags
& HF_OSFXSR_MASK
)) {
6563 if (s
->flags
& HF_TS_MASK
) {
6564 gen_exception(s
, EXCP07_PREX
);
6567 gen_lea_modrm(env
, s
, modrm
);
6568 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
, s
->mem_index
, MO_LEUL
);
6569 gen_helper_ldmxcsr(cpu_env
, s
->tmp2_i32
);
6572 CASE_MODRM_MEM_OP(3): /* stmxcsr */
6573 if ((s
->flags
& HF_EM_MASK
) || !(s
->flags
& HF_OSFXSR_MASK
)) {
6576 if (s
->flags
& HF_TS_MASK
) {
6577 gen_exception(s
, EXCP07_PREX
);
6580 gen_helper_update_mxcsr(cpu_env
);
6581 gen_lea_modrm(env
, s
, modrm
);
6582 tcg_gen_ld32u_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
, mxcsr
));
6583 gen_op_st_v(s
, MO_32
, s
->T0
, s
->A0
);
6586 CASE_MODRM_MEM_OP(4): /* xsave */
6587 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
6588 || (prefixes
& (PREFIX_LOCK
| PREFIX_DATA
6589 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
6592 gen_lea_modrm(env
, s
, modrm
);
6593 tcg_gen_concat_tl_i64(s
->tmp1_i64
, cpu_regs
[R_EAX
],
6595 gen_helper_xsave(cpu_env
, s
->A0
, s
->tmp1_i64
);
6598 CASE_MODRM_MEM_OP(5): /* xrstor */
6599 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
6600 || (prefixes
& (PREFIX_LOCK
| PREFIX_DATA
6601 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
6604 gen_lea_modrm(env
, s
, modrm
);
6605 tcg_gen_concat_tl_i64(s
->tmp1_i64
, cpu_regs
[R_EAX
],
6607 gen_helper_xrstor(cpu_env
, s
->A0
, s
->tmp1_i64
);
6608 /* XRSTOR is how MPX is enabled, which changes how
6609 we translate. Thus we need to end the TB. */
6610 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
6613 CASE_MODRM_MEM_OP(6): /* xsaveopt / clwb */
6614 if (prefixes
& PREFIX_LOCK
) {
6617 if (prefixes
& PREFIX_DATA
) {
6619 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_CLWB
)) {
6622 gen_nop_modrm(env
, s
, modrm
);
6625 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
6626 || (s
->cpuid_xsave_features
& CPUID_XSAVE_XSAVEOPT
) == 0
6627 || (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
))) {
6630 gen_lea_modrm(env
, s
, modrm
);
6631 tcg_gen_concat_tl_i64(s
->tmp1_i64
, cpu_regs
[R_EAX
],
6633 gen_helper_xsaveopt(cpu_env
, s
->A0
, s
->tmp1_i64
);
6637 CASE_MODRM_MEM_OP(7): /* clflush / clflushopt */
6638 if (prefixes
& PREFIX_LOCK
) {
6641 if (prefixes
& PREFIX_DATA
) {
6643 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_CLFLUSHOPT
)) {
6648 if ((s
->prefix
& (PREFIX_REPZ
| PREFIX_REPNZ
))
6649 || !(s
->cpuid_features
& CPUID_CLFLUSH
)) {
6653 gen_nop_modrm(env
, s
, modrm
);
6656 case 0xc0 ... 0xc7: /* rdfsbase (f3 0f ae /0) */
6657 case 0xc8 ... 0xcf: /* rdgsbase (f3 0f ae /1) */
6658 case 0xd0 ... 0xd7: /* wrfsbase (f3 0f ae /2) */
6659 case 0xd8 ... 0xdf: /* wrgsbase (f3 0f ae /3) */
6661 && (prefixes
& PREFIX_REPZ
)
6662 && !(prefixes
& PREFIX_LOCK
)
6663 && (s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_FSGSBASE
)) {
6664 TCGv base
, treg
, src
, dst
;
6666 /* Preserve hflags bits by testing CR4 at runtime. */
6667 tcg_gen_movi_i32(s
->tmp2_i32
, CR4_FSGSBASE_MASK
);
6668 gen_helper_cr4_testbit(cpu_env
, s
->tmp2_i32
);
6670 base
= cpu_seg_base
[modrm
& 8 ? R_GS
: R_FS
];
6671 treg
= cpu_regs
[(modrm
& 7) | REX_B(s
)];
6675 dst
= base
, src
= treg
;
6678 dst
= treg
, src
= base
;
6681 if (s
->dflag
== MO_32
) {
6682 tcg_gen_ext32u_tl(dst
, src
);
6684 tcg_gen_mov_tl(dst
, src
);
6690 case 0xf8: /* sfence / pcommit */
6691 if (prefixes
& PREFIX_DATA
) {
6693 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_PCOMMIT
)
6694 || (prefixes
& PREFIX_LOCK
)) {
6700 case 0xf9 ... 0xff: /* sfence */
6701 if (!(s
->cpuid_features
& CPUID_SSE
)
6702 || (prefixes
& PREFIX_LOCK
)) {
6705 tcg_gen_mb(TCG_MO_ST_ST
| TCG_BAR_SC
);
6707 case 0xe8 ... 0xef: /* lfence */
6708 if (!(s
->cpuid_features
& CPUID_SSE
)
6709 || (prefixes
& PREFIX_LOCK
)) {
6712 tcg_gen_mb(TCG_MO_LD_LD
| TCG_BAR_SC
);
6714 case 0xf0 ... 0xf7: /* mfence */
6715 if (!(s
->cpuid_features
& CPUID_SSE2
)
6716 || (prefixes
& PREFIX_LOCK
)) {
6719 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
6727 case 0x10d: /* 3DNow! prefetch(w) */
6728 modrm
= x86_ldub_code(env
, s
);
6729 mod
= (modrm
>> 6) & 3;
6732 gen_nop_modrm(env
, s
, modrm
);
6734 case 0x1aa: /* rsm */
6735 gen_svm_check_intercept(s
, SVM_EXIT_RSM
);
6736 if (!(s
->flags
& HF_SMM_MASK
))
6738 #ifdef CONFIG_USER_ONLY
6739 /* we should not be in SMM mode */
6740 g_assert_not_reached();
6742 gen_update_cc_op(s
);
6743 gen_update_eip_next(s
);
6744 gen_helper_rsm(cpu_env
);
6745 #endif /* CONFIG_USER_ONLY */
6746 s
->base
.is_jmp
= DISAS_EOB_ONLY
;
6748 case 0x1b8: /* SSE4.2 popcnt */
6749 if ((prefixes
& (PREFIX_REPZ
| PREFIX_LOCK
| PREFIX_REPNZ
)) !=
6752 if (!(s
->cpuid_ext_features
& CPUID_EXT_POPCNT
))
6755 modrm
= x86_ldub_code(env
, s
);
6756 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6758 if (s
->prefix
& PREFIX_DATA
) {
6761 ot
= mo_64_32(dflag
);
6764 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
6765 gen_extu(ot
, s
->T0
);
6766 tcg_gen_mov_tl(cpu_cc_src
, s
->T0
);
6767 tcg_gen_ctpop_tl(s
->T0
, s
->T0
);
6768 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
6770 set_cc_op(s
, CC_OP_POPCNT
);
6772 case 0x10e ... 0x117:
6773 case 0x128 ... 0x12f:
6774 case 0x138 ... 0x13a:
6775 case 0x150 ... 0x179:
6776 case 0x17c ... 0x17f:
6778 case 0x1c4 ... 0x1c6:
6779 case 0x1d0 ... 0x1fe:
6780 disas_insn_new(s
, cpu
, b
);
6787 gen_illegal_opcode(s
);
6790 gen_unknown_opcode(env
, s
);
6794 void tcg_x86_init(void)
6796 static const char reg_names
[CPU_NB_REGS
][4] = {
6797 #ifdef TARGET_X86_64
6825 static const char eip_name
[] = {
6826 #ifdef TARGET_X86_64
6832 static const char seg_base_names
[6][8] = {
6840 static const char bnd_regl_names
[4][8] = {
6841 "bnd0_lb", "bnd1_lb", "bnd2_lb", "bnd3_lb"
6843 static const char bnd_regu_names
[4][8] = {
6844 "bnd0_ub", "bnd1_ub", "bnd2_ub", "bnd3_ub"
6848 cpu_cc_op
= tcg_global_mem_new_i32(cpu_env
,
6849 offsetof(CPUX86State
, cc_op
), "cc_op");
6850 cpu_cc_dst
= tcg_global_mem_new(cpu_env
, offsetof(CPUX86State
, cc_dst
),
6852 cpu_cc_src
= tcg_global_mem_new(cpu_env
, offsetof(CPUX86State
, cc_src
),
6854 cpu_cc_src2
= tcg_global_mem_new(cpu_env
, offsetof(CPUX86State
, cc_src2
),
6856 cpu_eip
= tcg_global_mem_new(cpu_env
, offsetof(CPUX86State
, eip
), eip_name
);
6858 for (i
= 0; i
< CPU_NB_REGS
; ++i
) {
6859 cpu_regs
[i
] = tcg_global_mem_new(cpu_env
,
6860 offsetof(CPUX86State
, regs
[i
]),
6864 for (i
= 0; i
< 6; ++i
) {
6866 = tcg_global_mem_new(cpu_env
,
6867 offsetof(CPUX86State
, segs
[i
].base
),
6871 for (i
= 0; i
< 4; ++i
) {
6873 = tcg_global_mem_new_i64(cpu_env
,
6874 offsetof(CPUX86State
, bnd_regs
[i
].lb
),
6877 = tcg_global_mem_new_i64(cpu_env
,
6878 offsetof(CPUX86State
, bnd_regs
[i
].ub
),
6883 static void i386_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cpu
)
6885 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6886 CPUX86State
*env
= cpu
->env_ptr
;
6887 uint32_t flags
= dc
->base
.tb
->flags
;
6888 uint32_t cflags
= tb_cflags(dc
->base
.tb
);
6889 int cpl
= (flags
>> HF_CPL_SHIFT
) & 3;
6890 int iopl
= (flags
>> IOPL_SHIFT
) & 3;
6892 dc
->cs_base
= dc
->base
.tb
->cs_base
;
6893 dc
->pc_save
= dc
->base
.pc_next
;
6895 #ifndef CONFIG_USER_ONLY
6900 /* We make some simplifying assumptions; validate they're correct. */
6901 g_assert(PE(dc
) == ((flags
& HF_PE_MASK
) != 0));
6902 g_assert(CPL(dc
) == cpl
);
6903 g_assert(IOPL(dc
) == iopl
);
6904 g_assert(VM86(dc
) == ((flags
& HF_VM_MASK
) != 0));
6905 g_assert(CODE32(dc
) == ((flags
& HF_CS32_MASK
) != 0));
6906 g_assert(CODE64(dc
) == ((flags
& HF_CS64_MASK
) != 0));
6907 g_assert(SS32(dc
) == ((flags
& HF_SS32_MASK
) != 0));
6908 g_assert(LMA(dc
) == ((flags
& HF_LMA_MASK
) != 0));
6909 g_assert(ADDSEG(dc
) == ((flags
& HF_ADDSEG_MASK
) != 0));
6910 g_assert(SVME(dc
) == ((flags
& HF_SVME_MASK
) != 0));
6911 g_assert(GUEST(dc
) == ((flags
& HF_GUEST_MASK
) != 0));
6913 dc
->cc_op
= CC_OP_DYNAMIC
;
6914 dc
->cc_op_dirty
= false;
6915 dc
->popl_esp_hack
= 0;
6916 /* select memory access functions */
6917 dc
->mem_index
= cpu_mmu_index(env
, false);
6918 dc
->cpuid_features
= env
->features
[FEAT_1_EDX
];
6919 dc
->cpuid_ext_features
= env
->features
[FEAT_1_ECX
];
6920 dc
->cpuid_ext2_features
= env
->features
[FEAT_8000_0001_EDX
];
6921 dc
->cpuid_ext3_features
= env
->features
[FEAT_8000_0001_ECX
];
6922 dc
->cpuid_7_0_ebx_features
= env
->features
[FEAT_7_0_EBX
];
6923 dc
->cpuid_7_0_ecx_features
= env
->features
[FEAT_7_0_ECX
];
6924 dc
->cpuid_xsave_features
= env
->features
[FEAT_XSAVE
];
6925 dc
->jmp_opt
= !((cflags
& CF_NO_GOTO_TB
) ||
6926 (flags
& (HF_TF_MASK
| HF_INHIBIT_IRQ_MASK
)));
6928 * If jmp_opt, we want to handle each string instruction individually.
6929 * For icount also disable repz optimization so that each iteration
6930 * is accounted separately.
6932 dc
->repz_opt
= !dc
->jmp_opt
&& !(cflags
& CF_USE_ICOUNT
);
6934 dc
->T0
= tcg_temp_new();
6935 dc
->T1
= tcg_temp_new();
6936 dc
->A0
= tcg_temp_new();
6938 dc
->tmp0
= tcg_temp_new();
6939 dc
->tmp1_i64
= tcg_temp_new_i64();
6940 dc
->tmp2_i32
= tcg_temp_new_i32();
6941 dc
->tmp3_i32
= tcg_temp_new_i32();
6942 dc
->tmp4
= tcg_temp_new();
6943 dc
->cc_srcT
= tcg_temp_new();
6946 static void i386_tr_tb_start(DisasContextBase
*db
, CPUState
*cpu
)
6950 static void i386_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cpu
)
6952 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6953 target_ulong pc_arg
= dc
->base
.pc_next
;
6955 dc
->prev_insn_end
= tcg_last_op();
6956 if (tb_cflags(dcbase
->tb
) & CF_PCREL
) {
6957 pc_arg
-= dc
->cs_base
;
6958 pc_arg
&= ~TARGET_PAGE_MASK
;
6960 tcg_gen_insn_start(pc_arg
, dc
->cc_op
);
6963 static void i386_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cpu
)
6965 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6967 #ifdef TARGET_VSYSCALL_PAGE
6969 * Detect entry into the vsyscall page and invoke the syscall.
6971 if ((dc
->base
.pc_next
& TARGET_PAGE_MASK
) == TARGET_VSYSCALL_PAGE
) {
6972 gen_exception(dc
, EXCP_VSYSCALL
);
6973 dc
->base
.pc_next
= dc
->pc
+ 1;
6978 if (disas_insn(dc
, cpu
)) {
6979 target_ulong pc_next
= dc
->pc
;
6980 dc
->base
.pc_next
= pc_next
;
6982 if (dc
->base
.is_jmp
== DISAS_NEXT
) {
6983 if (dc
->flags
& (HF_TF_MASK
| HF_INHIBIT_IRQ_MASK
)) {
6985 * If single step mode, we generate only one instruction and
6986 * generate an exception.
6987 * If irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
6988 * the flag and abort the translation to give the irqs a
6991 dc
->base
.is_jmp
= DISAS_EOB_NEXT
;
6992 } else if (!is_same_page(&dc
->base
, pc_next
)) {
6993 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
6999 static void i386_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cpu
)
7001 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
7003 switch (dc
->base
.is_jmp
) {
7004 case DISAS_NORETURN
:
7006 case DISAS_TOO_MANY
:
7007 gen_update_cc_op(dc
);
7008 gen_jmp_rel_csize(dc
, 0, 0);
7010 case DISAS_EOB_NEXT
:
7011 gen_update_cc_op(dc
);
7012 gen_update_eip_cur(dc
);
7014 case DISAS_EOB_ONLY
:
7017 case DISAS_EOB_INHIBIT_IRQ
:
7018 gen_update_cc_op(dc
);
7019 gen_update_eip_cur(dc
);
7020 gen_eob_inhibit_irq(dc
, true);
7026 g_assert_not_reached();
7030 static void i386_tr_disas_log(const DisasContextBase
*dcbase
,
7031 CPUState
*cpu
, FILE *logfile
)
7033 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
7035 fprintf(logfile
, "IN: %s\n", lookup_symbol(dc
->base
.pc_first
));
7036 target_disas(logfile
, cpu
, dc
->base
.pc_first
, dc
->base
.tb
->size
);
7039 static const TranslatorOps i386_tr_ops
= {
7040 .init_disas_context
= i386_tr_init_disas_context
,
7041 .tb_start
= i386_tr_tb_start
,
7042 .insn_start
= i386_tr_insn_start
,
7043 .translate_insn
= i386_tr_translate_insn
,
7044 .tb_stop
= i386_tr_tb_stop
,
7045 .disas_log
= i386_tr_disas_log
,
7048 /* generate intermediate code for basic block 'tb'. */
7049 void gen_intermediate_code(CPUState
*cpu
, TranslationBlock
*tb
, int *max_insns
,
7050 target_ulong pc
, void *host_pc
)
7054 translator_loop(cpu
, tb
, max_insns
, pc
, host_pc
, &i386_tr_ops
, &dc
.base
);