4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include "qemu/host-utils.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "tcg/tcg-op-gvec.h"
27 #include "exec/cpu_ldst.h"
28 #include "exec/translator.h"
29 #include "fpu/softfloat.h"
31 #include "exec/helper-proto.h"
32 #include "exec/helper-gen.h"
33 #include "helper-tcg.h"
37 #define HELPER_H "helper.h"
38 #include "exec/helper-info.c.inc"
42 #define PREFIX_REPZ 0x01
43 #define PREFIX_REPNZ 0x02
44 #define PREFIX_LOCK 0x04
45 #define PREFIX_DATA 0x08
46 #define PREFIX_ADR 0x10
47 #define PREFIX_VEX 0x20
48 #define PREFIX_REX 0x40
58 /* For a switch indexed by MODRM, match all memory operands for a given OP. */
59 #define CASE_MODRM_MEM_OP(OP) \
60 case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
61 case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
62 case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7
64 #define CASE_MODRM_OP(OP) \
65 case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
66 case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
67 case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7: \
68 case (3 << 6) | (OP << 3) | 0 ... (3 << 6) | (OP << 3) | 7
70 //#define MACRO_TEST 1
72 /* global register indexes */
73 static TCGv cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
;
75 static TCGv_i32 cpu_cc_op
;
76 static TCGv cpu_regs
[CPU_NB_REGS
];
77 static TCGv cpu_seg_base
[6];
78 static TCGv_i64 cpu_bndl
[4];
79 static TCGv_i64 cpu_bndu
[4];
81 typedef struct DisasContext
{
82 DisasContextBase base
;
84 target_ulong pc
; /* pc = eip + cs_base */
85 target_ulong cs_base
; /* base of CS segment */
91 int8_t override
; /* -1 if no override, else R_CS, R_DS, etc */
97 #ifndef CONFIG_USER_ONLY
98 uint8_t cpl
; /* code priv level */
99 uint8_t iopl
; /* i/o priv level */
101 uint8_t vex_l
; /* vex vector length */
102 uint8_t vex_v
; /* vex vvvv register, without 1's complement. */
103 uint8_t popl_esp_hack
; /* for correct popl with esp base handling */
104 uint8_t rip_offset
; /* only used in x86_64, but left for simplicity */
111 bool vex_w
; /* used by AVX even on 32-bit processors */
112 bool jmp_opt
; /* use direct block chaining for direct jumps */
113 bool repz_opt
; /* optimize jumps within repz instructions */
116 CCOp cc_op
; /* current CC operation */
117 int mem_index
; /* select memory access functions */
118 uint32_t flags
; /* all execution flags */
120 int cpuid_ext_features
;
121 int cpuid_ext2_features
;
122 int cpuid_ext3_features
;
123 int cpuid_7_0_ebx_features
;
124 int cpuid_7_0_ecx_features
;
125 int cpuid_xsave_features
;
127 /* TCG local temps */
133 /* TCG local register indexes (only used inside old micro ops) */
141 TCGOp
*prev_insn_end
;
144 #define DISAS_EOB_ONLY DISAS_TARGET_0
145 #define DISAS_EOB_NEXT DISAS_TARGET_1
146 #define DISAS_EOB_INHIBIT_IRQ DISAS_TARGET_2
147 #define DISAS_JUMP DISAS_TARGET_3
149 /* The environment in which user-only runs is constrained. */
150 #ifdef CONFIG_USER_ONLY
154 #define SVME(S) false
155 #define GUEST(S) false
157 #define PE(S) (((S)->flags & HF_PE_MASK) != 0)
158 #define CPL(S) ((S)->cpl)
159 #define IOPL(S) ((S)->iopl)
160 #define SVME(S) (((S)->flags & HF_SVME_MASK) != 0)
161 #define GUEST(S) (((S)->flags & HF_GUEST_MASK) != 0)
163 #if defined(CONFIG_USER_ONLY) && defined(TARGET_X86_64)
164 #define VM86(S) false
165 #define CODE32(S) true
167 #define ADDSEG(S) false
169 #define VM86(S) (((S)->flags & HF_VM_MASK) != 0)
170 #define CODE32(S) (((S)->flags & HF_CS32_MASK) != 0)
171 #define SS32(S) (((S)->flags & HF_SS32_MASK) != 0)
172 #define ADDSEG(S) (((S)->flags & HF_ADDSEG_MASK) != 0)
174 #if !defined(TARGET_X86_64)
175 #define CODE64(S) false
176 #elif defined(CONFIG_USER_ONLY)
177 #define CODE64(S) true
179 #define CODE64(S) (((S)->flags & HF_CS64_MASK) != 0)
181 #if defined(CONFIG_SOFTMMU) && !defined(TARGET_X86_64)
184 #define LMA(S) (((S)->flags & HF_LMA_MASK) != 0)
188 #define REX_PREFIX(S) (((S)->prefix & PREFIX_REX) != 0)
189 #define REX_W(S) ((S)->vex_w)
190 #define REX_R(S) ((S)->rex_r + 0)
191 #define REX_X(S) ((S)->rex_x + 0)
192 #define REX_B(S) ((S)->rex_b + 0)
194 #define REX_PREFIX(S) false
195 #define REX_W(S) false
202 * Many sysemu-only helpers are not reachable for user-only.
203 * Define stub generators here, so that we need not either sprinkle
204 * ifdefs through the translator, nor provide the helper function.
206 #define STUB_HELPER(NAME, ...) \
207 static inline void gen_helper_##NAME(__VA_ARGS__) \
208 { qemu_build_not_reached(); }
210 #ifdef CONFIG_USER_ONLY
211 STUB_HELPER(clgi
, TCGv_env env
)
212 STUB_HELPER(flush_page
, TCGv_env env
, TCGv addr
)
213 STUB_HELPER(hlt
, TCGv_env env
, TCGv_i32 pc_ofs
)
214 STUB_HELPER(inb
, TCGv ret
, TCGv_env env
, TCGv_i32 port
)
215 STUB_HELPER(inw
, TCGv ret
, TCGv_env env
, TCGv_i32 port
)
216 STUB_HELPER(inl
, TCGv ret
, TCGv_env env
, TCGv_i32 port
)
217 STUB_HELPER(monitor
, TCGv_env env
, TCGv addr
)
218 STUB_HELPER(mwait
, TCGv_env env
, TCGv_i32 pc_ofs
)
219 STUB_HELPER(outb
, TCGv_env env
, TCGv_i32 port
, TCGv_i32 val
)
220 STUB_HELPER(outw
, TCGv_env env
, TCGv_i32 port
, TCGv_i32 val
)
221 STUB_HELPER(outl
, TCGv_env env
, TCGv_i32 port
, TCGv_i32 val
)
222 STUB_HELPER(rdmsr
, TCGv_env env
)
223 STUB_HELPER(read_crN
, TCGv ret
, TCGv_env env
, TCGv_i32 reg
)
224 STUB_HELPER(get_dr
, TCGv ret
, TCGv_env env
, TCGv_i32 reg
)
225 STUB_HELPER(set_dr
, TCGv_env env
, TCGv_i32 reg
, TCGv val
)
226 STUB_HELPER(stgi
, TCGv_env env
)
227 STUB_HELPER(svm_check_intercept
, TCGv_env env
, TCGv_i32 type
)
228 STUB_HELPER(vmload
, TCGv_env env
, TCGv_i32 aflag
)
229 STUB_HELPER(vmmcall
, TCGv_env env
)
230 STUB_HELPER(vmrun
, TCGv_env env
, TCGv_i32 aflag
, TCGv_i32 pc_ofs
)
231 STUB_HELPER(vmsave
, TCGv_env env
, TCGv_i32 aflag
)
232 STUB_HELPER(write_crN
, TCGv_env env
, TCGv_i32 reg
, TCGv val
)
233 STUB_HELPER(wrmsr
, TCGv_env env
)
236 static void gen_eob(DisasContext
*s
);
237 static void gen_jr(DisasContext
*s
);
238 static void gen_jmp_rel(DisasContext
*s
, MemOp ot
, int diff
, int tb_num
);
239 static void gen_jmp_rel_csize(DisasContext
*s
, int diff
, int tb_num
);
240 static void gen_op(DisasContext
*s1
, int op
, MemOp ot
, int d
);
241 static void gen_exception_gpf(DisasContext
*s
);
243 /* i386 arith/logic operations */
263 OP_SHL1
, /* undocumented */
279 /* I386 int registers */
280 OR_EAX
, /* MUST be even numbered */
289 OR_TMP0
= 16, /* temporary operand register */
291 OR_A0
, /* temporary register used when doing address evaluation */
301 /* Bit set if the global variable is live after setting CC_OP to X. */
302 static const uint8_t cc_op_live
[CC_OP_NB
] = {
303 [CC_OP_DYNAMIC
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
304 [CC_OP_EFLAGS
] = USES_CC_SRC
,
305 [CC_OP_MULB
... CC_OP_MULQ
] = USES_CC_DST
| USES_CC_SRC
,
306 [CC_OP_ADDB
... CC_OP_ADDQ
] = USES_CC_DST
| USES_CC_SRC
,
307 [CC_OP_ADCB
... CC_OP_ADCQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
308 [CC_OP_SUBB
... CC_OP_SUBQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRCT
,
309 [CC_OP_SBBB
... CC_OP_SBBQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
310 [CC_OP_LOGICB
... CC_OP_LOGICQ
] = USES_CC_DST
,
311 [CC_OP_INCB
... CC_OP_INCQ
] = USES_CC_DST
| USES_CC_SRC
,
312 [CC_OP_DECB
... CC_OP_DECQ
] = USES_CC_DST
| USES_CC_SRC
,
313 [CC_OP_SHLB
... CC_OP_SHLQ
] = USES_CC_DST
| USES_CC_SRC
,
314 [CC_OP_SARB
... CC_OP_SARQ
] = USES_CC_DST
| USES_CC_SRC
,
315 [CC_OP_BMILGB
... CC_OP_BMILGQ
] = USES_CC_DST
| USES_CC_SRC
,
316 [CC_OP_ADCX
] = USES_CC_DST
| USES_CC_SRC
,
317 [CC_OP_ADOX
] = USES_CC_SRC
| USES_CC_SRC2
,
318 [CC_OP_ADCOX
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
320 [CC_OP_POPCNT
] = USES_CC_SRC
,
323 static void set_cc_op(DisasContext
*s
, CCOp op
)
327 if (s
->cc_op
== op
) {
331 /* Discard CC computation that will no longer be used. */
332 dead
= cc_op_live
[s
->cc_op
] & ~cc_op_live
[op
];
333 if (dead
& USES_CC_DST
) {
334 tcg_gen_discard_tl(cpu_cc_dst
);
336 if (dead
& USES_CC_SRC
) {
337 tcg_gen_discard_tl(cpu_cc_src
);
339 if (dead
& USES_CC_SRC2
) {
340 tcg_gen_discard_tl(cpu_cc_src2
);
342 if (dead
& USES_CC_SRCT
) {
343 tcg_gen_discard_tl(s
->cc_srcT
);
346 if (op
== CC_OP_DYNAMIC
) {
347 /* The DYNAMIC setting is translator only, and should never be
348 stored. Thus we always consider it clean. */
349 s
->cc_op_dirty
= false;
351 /* Discard any computed CC_OP value (see shifts). */
352 if (s
->cc_op
== CC_OP_DYNAMIC
) {
353 tcg_gen_discard_i32(cpu_cc_op
);
355 s
->cc_op_dirty
= true;
360 static void gen_update_cc_op(DisasContext
*s
)
362 if (s
->cc_op_dirty
) {
363 tcg_gen_movi_i32(cpu_cc_op
, s
->cc_op
);
364 s
->cc_op_dirty
= false;
370 #define NB_OP_SIZES 4
372 #else /* !TARGET_X86_64 */
374 #define NB_OP_SIZES 3
376 #endif /* !TARGET_X86_64 */
379 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
380 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
381 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
382 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
383 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
385 #define REG_B_OFFSET 0
386 #define REG_H_OFFSET 1
387 #define REG_W_OFFSET 0
388 #define REG_L_OFFSET 0
389 #define REG_LH_OFFSET 4
392 /* In instruction encodings for byte register accesses the
393 * register number usually indicates "low 8 bits of register N";
394 * however there are some special cases where N 4..7 indicates
395 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
396 * true for this special case, false otherwise.
398 static inline bool byte_reg_is_xH(DisasContext
*s
, int reg
)
400 /* Any time the REX prefix is present, byte registers are uniform */
401 if (reg
< 4 || REX_PREFIX(s
)) {
407 /* Select the size of a push/pop operation. */
408 static inline MemOp
mo_pushpop(DisasContext
*s
, MemOp ot
)
411 return ot
== MO_16
? MO_16
: MO_64
;
417 /* Select the size of the stack pointer. */
418 static inline MemOp
mo_stacksize(DisasContext
*s
)
420 return CODE64(s
) ? MO_64
: SS32(s
) ? MO_32
: MO_16
;
423 /* Select only size 64 else 32. Used for SSE operand sizes. */
424 static inline MemOp
mo_64_32(MemOp ot
)
427 return ot
== MO_64
? MO_64
: MO_32
;
433 /* Select size 8 if lsb of B is clear, else OT. Used for decoding
434 byte vs word opcodes. */
435 static inline MemOp
mo_b_d(int b
, MemOp ot
)
437 return b
& 1 ? ot
: MO_8
;
440 /* Select size 8 if lsb of B is clear, else OT capped at 32.
441 Used for decoding operand size of port opcodes. */
442 static inline MemOp
mo_b_d32(int b
, MemOp ot
)
444 return b
& 1 ? (ot
== MO_16
? MO_16
: MO_32
) : MO_8
;
447 /* Compute the result of writing t0 to the OT-sized register REG.
449 * If DEST is NULL, store the result into the register and return the
452 * If DEST is not NULL, store the result into DEST and return the
455 static TCGv
gen_op_deposit_reg_v(DisasContext
*s
, MemOp ot
, int reg
, TCGv dest
, TCGv t0
)
459 if (byte_reg_is_xH(s
, reg
)) {
460 dest
= dest
? dest
: cpu_regs
[reg
- 4];
461 tcg_gen_deposit_tl(dest
, cpu_regs
[reg
- 4], t0
, 8, 8);
462 return cpu_regs
[reg
- 4];
464 dest
= dest
? dest
: cpu_regs
[reg
];
465 tcg_gen_deposit_tl(dest
, cpu_regs
[reg
], t0
, 0, 8);
468 dest
= dest
? dest
: cpu_regs
[reg
];
469 tcg_gen_deposit_tl(dest
, cpu_regs
[reg
], t0
, 0, 16);
472 /* For x86_64, this sets the higher half of register to zero.
473 For i386, this is equivalent to a mov. */
474 dest
= dest
? dest
: cpu_regs
[reg
];
475 tcg_gen_ext32u_tl(dest
, t0
);
479 dest
= dest
? dest
: cpu_regs
[reg
];
480 tcg_gen_mov_tl(dest
, t0
);
484 g_assert_not_reached();
486 return cpu_regs
[reg
];
489 static void gen_op_mov_reg_v(DisasContext
*s
, MemOp ot
, int reg
, TCGv t0
)
491 gen_op_deposit_reg_v(s
, ot
, reg
, NULL
, t0
);
495 void gen_op_mov_v_reg(DisasContext
*s
, MemOp ot
, TCGv t0
, int reg
)
497 if (ot
== MO_8
&& byte_reg_is_xH(s
, reg
)) {
498 tcg_gen_extract_tl(t0
, cpu_regs
[reg
- 4], 8, 8);
500 tcg_gen_mov_tl(t0
, cpu_regs
[reg
]);
504 static void gen_add_A0_im(DisasContext
*s
, int val
)
506 tcg_gen_addi_tl(s
->A0
, s
->A0
, val
);
508 tcg_gen_ext32u_tl(s
->A0
, s
->A0
);
512 static inline void gen_op_jmp_v(DisasContext
*s
, TCGv dest
)
514 tcg_gen_mov_tl(cpu_eip
, dest
);
519 void gen_op_add_reg_im(DisasContext
*s
, MemOp size
, int reg
, int32_t val
)
521 tcg_gen_addi_tl(s
->tmp0
, cpu_regs
[reg
], val
);
522 gen_op_mov_reg_v(s
, size
, reg
, s
->tmp0
);
525 static inline void gen_op_add_reg_T0(DisasContext
*s
, MemOp size
, int reg
)
527 tcg_gen_add_tl(s
->tmp0
, cpu_regs
[reg
], s
->T0
);
528 gen_op_mov_reg_v(s
, size
, reg
, s
->tmp0
);
531 static inline void gen_op_ld_v(DisasContext
*s
, int idx
, TCGv t0
, TCGv a0
)
533 tcg_gen_qemu_ld_tl(t0
, a0
, s
->mem_index
, idx
| MO_LE
);
536 static inline void gen_op_st_v(DisasContext
*s
, int idx
, TCGv t0
, TCGv a0
)
538 tcg_gen_qemu_st_tl(t0
, a0
, s
->mem_index
, idx
| MO_LE
);
541 static inline void gen_op_st_rm_T0_A0(DisasContext
*s
, int idx
, int d
)
544 gen_op_st_v(s
, idx
, s
->T0
, s
->A0
);
546 gen_op_mov_reg_v(s
, idx
, d
, s
->T0
);
550 static void gen_update_eip_cur(DisasContext
*s
)
552 assert(s
->pc_save
!= -1);
553 if (tb_cflags(s
->base
.tb
) & CF_PCREL
) {
554 tcg_gen_addi_tl(cpu_eip
, cpu_eip
, s
->base
.pc_next
- s
->pc_save
);
556 tcg_gen_movi_tl(cpu_eip
, s
->base
.pc_next
- s
->cs_base
);
558 s
->pc_save
= s
->base
.pc_next
;
561 static void gen_update_eip_next(DisasContext
*s
)
563 assert(s
->pc_save
!= -1);
564 if (tb_cflags(s
->base
.tb
) & CF_PCREL
) {
565 tcg_gen_addi_tl(cpu_eip
, cpu_eip
, s
->pc
- s
->pc_save
);
567 tcg_gen_movi_tl(cpu_eip
, s
->pc
- s
->cs_base
);
572 static int cur_insn_len(DisasContext
*s
)
574 return s
->pc
- s
->base
.pc_next
;
577 static TCGv_i32
cur_insn_len_i32(DisasContext
*s
)
579 return tcg_constant_i32(cur_insn_len(s
));
582 static TCGv_i32
eip_next_i32(DisasContext
*s
)
584 assert(s
->pc_save
!= -1);
586 * This function has two users: lcall_real (always 16-bit mode), and
587 * iret_protected (16, 32, or 64-bit mode). IRET only uses the value
588 * when EFLAGS.NT is set, which is illegal in 64-bit mode, which is
589 * why passing a 32-bit value isn't broken. To avoid using this where
590 * we shouldn't, return -1 in 64-bit mode so that execution goes into
594 return tcg_constant_i32(-1);
596 if (tb_cflags(s
->base
.tb
) & CF_PCREL
) {
597 TCGv_i32 ret
= tcg_temp_new_i32();
598 tcg_gen_trunc_tl_i32(ret
, cpu_eip
);
599 tcg_gen_addi_i32(ret
, ret
, s
->pc
- s
->pc_save
);
602 return tcg_constant_i32(s
->pc
- s
->cs_base
);
606 static TCGv
eip_next_tl(DisasContext
*s
)
608 assert(s
->pc_save
!= -1);
609 if (tb_cflags(s
->base
.tb
) & CF_PCREL
) {
610 TCGv ret
= tcg_temp_new();
611 tcg_gen_addi_tl(ret
, cpu_eip
, s
->pc
- s
->pc_save
);
614 return tcg_constant_tl(s
->pc
- s
->cs_base
);
618 static TCGv
eip_cur_tl(DisasContext
*s
)
620 assert(s
->pc_save
!= -1);
621 if (tb_cflags(s
->base
.tb
) & CF_PCREL
) {
622 TCGv ret
= tcg_temp_new();
623 tcg_gen_addi_tl(ret
, cpu_eip
, s
->base
.pc_next
- s
->pc_save
);
626 return tcg_constant_tl(s
->base
.pc_next
- s
->cs_base
);
630 /* Compute SEG:REG into A0. SEG is selected from the override segment
631 (OVR_SEG) and the default segment (DEF_SEG). OVR_SEG may be -1 to
632 indicate no override. */
633 static void gen_lea_v_seg(DisasContext
*s
, MemOp aflag
, TCGv a0
,
634 int def_seg
, int ovr_seg
)
640 tcg_gen_mov_tl(s
->A0
, a0
);
647 if (ovr_seg
< 0 && ADDSEG(s
)) {
651 tcg_gen_ext32u_tl(s
->A0
, a0
);
657 tcg_gen_ext16u_tl(s
->A0
, a0
);
668 g_assert_not_reached();
672 TCGv seg
= cpu_seg_base
[ovr_seg
];
674 if (aflag
== MO_64
) {
675 tcg_gen_add_tl(s
->A0
, a0
, seg
);
676 } else if (CODE64(s
)) {
677 tcg_gen_ext32u_tl(s
->A0
, a0
);
678 tcg_gen_add_tl(s
->A0
, s
->A0
, seg
);
680 tcg_gen_add_tl(s
->A0
, a0
, seg
);
681 tcg_gen_ext32u_tl(s
->A0
, s
->A0
);
686 static inline void gen_string_movl_A0_ESI(DisasContext
*s
)
688 gen_lea_v_seg(s
, s
->aflag
, cpu_regs
[R_ESI
], R_DS
, s
->override
);
691 static inline void gen_string_movl_A0_EDI(DisasContext
*s
)
693 gen_lea_v_seg(s
, s
->aflag
, cpu_regs
[R_EDI
], R_ES
, -1);
696 static inline void gen_op_movl_T0_Dshift(DisasContext
*s
, MemOp ot
)
698 tcg_gen_ld32s_tl(s
->T0
, tcg_env
, offsetof(CPUX86State
, df
));
699 tcg_gen_shli_tl(s
->T0
, s
->T0
, ot
);
702 static TCGv
gen_ext_tl(TCGv dst
, TCGv src
, MemOp size
, bool sign
)
707 tcg_gen_ext8s_tl(dst
, src
);
709 tcg_gen_ext8u_tl(dst
, src
);
714 tcg_gen_ext16s_tl(dst
, src
);
716 tcg_gen_ext16u_tl(dst
, src
);
722 tcg_gen_ext32s_tl(dst
, src
);
724 tcg_gen_ext32u_tl(dst
, src
);
733 static void gen_extu(MemOp ot
, TCGv reg
)
735 gen_ext_tl(reg
, reg
, ot
, false);
738 static void gen_exts(MemOp ot
, TCGv reg
)
740 gen_ext_tl(reg
, reg
, ot
, true);
743 static void gen_op_j_ecx(DisasContext
*s
, TCGCond cond
, TCGLabel
*label1
)
745 tcg_gen_mov_tl(s
->tmp0
, cpu_regs
[R_ECX
]);
746 gen_extu(s
->aflag
, s
->tmp0
);
747 tcg_gen_brcondi_tl(cond
, s
->tmp0
, 0, label1
);
750 static inline void gen_op_jz_ecx(DisasContext
*s
, TCGLabel
*label1
)
752 gen_op_j_ecx(s
, TCG_COND_EQ
, label1
);
755 static inline void gen_op_jnz_ecx(DisasContext
*s
, TCGLabel
*label1
)
757 gen_op_j_ecx(s
, TCG_COND_NE
, label1
);
760 static void gen_helper_in_func(MemOp ot
, TCGv v
, TCGv_i32 n
)
764 gen_helper_inb(v
, tcg_env
, n
);
767 gen_helper_inw(v
, tcg_env
, n
);
770 gen_helper_inl(v
, tcg_env
, n
);
773 g_assert_not_reached();
777 static void gen_helper_out_func(MemOp ot
, TCGv_i32 v
, TCGv_i32 n
)
781 gen_helper_outb(tcg_env
, v
, n
);
784 gen_helper_outw(tcg_env
, v
, n
);
787 gen_helper_outl(tcg_env
, v
, n
);
790 g_assert_not_reached();
795 * Validate that access to [port, port + 1<<ot) is allowed.
796 * Raise #GP, or VMM exit if not.
798 static bool gen_check_io(DisasContext
*s
, MemOp ot
, TCGv_i32 port
,
801 #ifdef CONFIG_USER_ONLY
803 * We do not implement the ioperm(2) syscall, so the TSS check
806 gen_exception_gpf(s
);
809 if (PE(s
) && (CPL(s
) > IOPL(s
) || VM86(s
))) {
810 gen_helper_check_io(tcg_env
, port
, tcg_constant_i32(1 << ot
));
814 gen_update_eip_cur(s
);
815 if (s
->prefix
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
816 svm_flags
|= SVM_IOIO_REP_MASK
;
818 svm_flags
|= 1 << (SVM_IOIO_SIZE_SHIFT
+ ot
);
819 gen_helper_svm_check_io(tcg_env
, port
,
820 tcg_constant_i32(svm_flags
),
821 cur_insn_len_i32(s
));
827 static void gen_movs(DisasContext
*s
, MemOp ot
)
829 gen_string_movl_A0_ESI(s
);
830 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
831 gen_string_movl_A0_EDI(s
);
832 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
833 gen_op_movl_T0_Dshift(s
, ot
);
834 gen_op_add_reg_T0(s
, s
->aflag
, R_ESI
);
835 gen_op_add_reg_T0(s
, s
->aflag
, R_EDI
);
838 static void gen_op_update1_cc(DisasContext
*s
)
840 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
843 static void gen_op_update2_cc(DisasContext
*s
)
845 tcg_gen_mov_tl(cpu_cc_src
, s
->T1
);
846 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
849 static void gen_op_update3_cc(DisasContext
*s
, TCGv reg
)
851 tcg_gen_mov_tl(cpu_cc_src2
, reg
);
852 tcg_gen_mov_tl(cpu_cc_src
, s
->T1
);
853 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
856 static inline void gen_op_testl_T0_T1_cc(DisasContext
*s
)
858 tcg_gen_and_tl(cpu_cc_dst
, s
->T0
, s
->T1
);
861 static void gen_op_update_neg_cc(DisasContext
*s
)
863 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
864 tcg_gen_neg_tl(cpu_cc_src
, s
->T0
);
865 tcg_gen_movi_tl(s
->cc_srcT
, 0);
868 /* compute all eflags to cc_src */
869 static void gen_compute_eflags(DisasContext
*s
)
871 TCGv zero
, dst
, src1
, src2
;
874 if (s
->cc_op
== CC_OP_EFLAGS
) {
877 if (s
->cc_op
== CC_OP_CLR
) {
878 tcg_gen_movi_tl(cpu_cc_src
, CC_Z
| CC_P
);
879 set_cc_op(s
, CC_OP_EFLAGS
);
888 /* Take care to not read values that are not live. */
889 live
= cc_op_live
[s
->cc_op
] & ~USES_CC_SRCT
;
890 dead
= live
^ (USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
);
892 zero
= tcg_constant_tl(0);
893 if (dead
& USES_CC_DST
) {
896 if (dead
& USES_CC_SRC
) {
899 if (dead
& USES_CC_SRC2
) {
905 gen_helper_cc_compute_all(cpu_cc_src
, dst
, src1
, src2
, cpu_cc_op
);
906 set_cc_op(s
, CC_OP_EFLAGS
);
909 typedef struct CCPrepare
{
919 /* compute eflags.C to reg */
920 static CCPrepare
gen_prepare_eflags_c(DisasContext
*s
, TCGv reg
)
926 case CC_OP_SUBB
... CC_OP_SUBQ
:
927 /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
928 size
= s
->cc_op
- CC_OP_SUBB
;
929 t1
= gen_ext_tl(s
->tmp0
, cpu_cc_src
, size
, false);
930 /* If no temporary was used, be careful not to alias t1 and t0. */
931 t0
= t1
== cpu_cc_src
? s
->tmp0
: reg
;
932 tcg_gen_mov_tl(t0
, s
->cc_srcT
);
936 case CC_OP_ADDB
... CC_OP_ADDQ
:
937 /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
938 size
= s
->cc_op
- CC_OP_ADDB
;
939 t1
= gen_ext_tl(s
->tmp0
, cpu_cc_src
, size
, false);
940 t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, false);
942 return (CCPrepare
) { .cond
= TCG_COND_LTU
, .reg
= t0
,
943 .reg2
= t1
, .mask
= -1, .use_reg2
= true };
945 case CC_OP_LOGICB
... CC_OP_LOGICQ
:
948 return (CCPrepare
) { .cond
= TCG_COND_NEVER
, .mask
= -1 };
950 case CC_OP_INCB
... CC_OP_INCQ
:
951 case CC_OP_DECB
... CC_OP_DECQ
:
952 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
953 .mask
= -1, .no_setcond
= true };
955 case CC_OP_SHLB
... CC_OP_SHLQ
:
956 /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
957 size
= s
->cc_op
- CC_OP_SHLB
;
958 shift
= (8 << size
) - 1;
959 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
960 .mask
= (target_ulong
)1 << shift
};
962 case CC_OP_MULB
... CC_OP_MULQ
:
963 return (CCPrepare
) { .cond
= TCG_COND_NE
,
964 .reg
= cpu_cc_src
, .mask
= -1 };
966 case CC_OP_BMILGB
... CC_OP_BMILGQ
:
967 size
= s
->cc_op
- CC_OP_BMILGB
;
968 t0
= gen_ext_tl(reg
, cpu_cc_src
, size
, false);
969 return (CCPrepare
) { .cond
= TCG_COND_EQ
, .reg
= t0
, .mask
= -1 };
973 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_dst
,
974 .mask
= -1, .no_setcond
= true };
977 case CC_OP_SARB
... CC_OP_SARQ
:
979 return (CCPrepare
) { .cond
= TCG_COND_NE
,
980 .reg
= cpu_cc_src
, .mask
= CC_C
};
983 /* The need to compute only C from CC_OP_DYNAMIC is important
984 in efficiently implementing e.g. INC at the start of a TB. */
986 gen_helper_cc_compute_c(reg
, cpu_cc_dst
, cpu_cc_src
,
987 cpu_cc_src2
, cpu_cc_op
);
988 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
989 .mask
= -1, .no_setcond
= true };
993 /* compute eflags.P to reg */
994 static CCPrepare
gen_prepare_eflags_p(DisasContext
*s
, TCGv reg
)
996 gen_compute_eflags(s
);
997 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1001 /* compute eflags.S to reg */
1002 static CCPrepare
gen_prepare_eflags_s(DisasContext
*s
, TCGv reg
)
1006 gen_compute_eflags(s
);
1012 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1016 return (CCPrepare
) { .cond
= TCG_COND_NEVER
, .mask
= -1 };
1019 MemOp size
= (s
->cc_op
- CC_OP_ADDB
) & 3;
1020 TCGv t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, true);
1021 return (CCPrepare
) { .cond
= TCG_COND_LT
, .reg
= t0
, .mask
= -1 };
1026 /* compute eflags.O to reg */
1027 static CCPrepare
gen_prepare_eflags_o(DisasContext
*s
, TCGv reg
)
1032 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src2
,
1033 .mask
= -1, .no_setcond
= true };
1036 return (CCPrepare
) { .cond
= TCG_COND_NEVER
, .mask
= -1 };
1038 gen_compute_eflags(s
);
1039 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1044 /* compute eflags.Z to reg */
1045 static CCPrepare
gen_prepare_eflags_z(DisasContext
*s
, TCGv reg
)
1049 gen_compute_eflags(s
);
1055 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1058 return (CCPrepare
) { .cond
= TCG_COND_ALWAYS
, .mask
= -1 };
1060 return (CCPrepare
) { .cond
= TCG_COND_EQ
, .reg
= cpu_cc_src
,
1064 MemOp size
= (s
->cc_op
- CC_OP_ADDB
) & 3;
1065 TCGv t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, false);
1066 return (CCPrepare
) { .cond
= TCG_COND_EQ
, .reg
= t0
, .mask
= -1 };
1071 /* perform a conditional store into register 'reg' according to jump opcode
1072 value 'b'. In the fast case, T0 is guaranteed not to be used. */
1073 static CCPrepare
gen_prepare_cc(DisasContext
*s
, int b
, TCGv reg
)
1075 int inv
, jcc_op
, cond
;
1081 jcc_op
= (b
>> 1) & 7;
1084 case CC_OP_SUBB
... CC_OP_SUBQ
:
1085 /* We optimize relational operators for the cmp/jcc case. */
1086 size
= s
->cc_op
- CC_OP_SUBB
;
1089 tcg_gen_mov_tl(s
->tmp4
, s
->cc_srcT
);
1090 gen_extu(size
, s
->tmp4
);
1091 t0
= gen_ext_tl(s
->tmp0
, cpu_cc_src
, size
, false);
1092 cc
= (CCPrepare
) { .cond
= TCG_COND_LEU
, .reg
= s
->tmp4
,
1093 .reg2
= t0
, .mask
= -1, .use_reg2
= true };
1102 tcg_gen_mov_tl(s
->tmp4
, s
->cc_srcT
);
1103 gen_exts(size
, s
->tmp4
);
1104 t0
= gen_ext_tl(s
->tmp0
, cpu_cc_src
, size
, true);
1105 cc
= (CCPrepare
) { .cond
= cond
, .reg
= s
->tmp4
,
1106 .reg2
= t0
, .mask
= -1, .use_reg2
= true };
1116 /* This actually generates good code for JC, JZ and JS. */
1119 cc
= gen_prepare_eflags_o(s
, reg
);
1122 cc
= gen_prepare_eflags_c(s
, reg
);
1125 cc
= gen_prepare_eflags_z(s
, reg
);
1128 gen_compute_eflags(s
);
1129 cc
= (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1130 .mask
= CC_Z
| CC_C
};
1133 cc
= gen_prepare_eflags_s(s
, reg
);
1136 cc
= gen_prepare_eflags_p(s
, reg
);
1139 gen_compute_eflags(s
);
1140 if (reg
== cpu_cc_src
) {
1143 tcg_gen_shri_tl(reg
, cpu_cc_src
, 4); /* CC_O -> CC_S */
1144 tcg_gen_xor_tl(reg
, reg
, cpu_cc_src
);
1145 cc
= (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
1150 gen_compute_eflags(s
);
1151 if (reg
== cpu_cc_src
) {
1154 tcg_gen_shri_tl(reg
, cpu_cc_src
, 4); /* CC_O -> CC_S */
1155 tcg_gen_xor_tl(reg
, reg
, cpu_cc_src
);
1156 cc
= (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
1157 .mask
= CC_S
| CC_Z
};
1164 cc
.cond
= tcg_invert_cond(cc
.cond
);
1169 static void gen_setcc1(DisasContext
*s
, int b
, TCGv reg
)
1171 CCPrepare cc
= gen_prepare_cc(s
, b
, reg
);
1173 if (cc
.no_setcond
) {
1174 if (cc
.cond
== TCG_COND_EQ
) {
1175 tcg_gen_xori_tl(reg
, cc
.reg
, 1);
1177 tcg_gen_mov_tl(reg
, cc
.reg
);
1182 if (cc
.cond
== TCG_COND_NE
&& !cc
.use_reg2
&& cc
.imm
== 0 &&
1183 cc
.mask
!= 0 && (cc
.mask
& (cc
.mask
- 1)) == 0) {
1184 tcg_gen_shri_tl(reg
, cc
.reg
, ctztl(cc
.mask
));
1185 tcg_gen_andi_tl(reg
, reg
, 1);
1188 if (cc
.mask
!= -1) {
1189 tcg_gen_andi_tl(reg
, cc
.reg
, cc
.mask
);
1193 tcg_gen_setcond_tl(cc
.cond
, reg
, cc
.reg
, cc
.reg2
);
1195 tcg_gen_setcondi_tl(cc
.cond
, reg
, cc
.reg
, cc
.imm
);
1199 static inline void gen_compute_eflags_c(DisasContext
*s
, TCGv reg
)
1201 gen_setcc1(s
, JCC_B
<< 1, reg
);
1204 /* generate a conditional jump to label 'l1' according to jump opcode
1205 value 'b'. In the fast case, T0 is guaranteed not to be used. */
1206 static inline void gen_jcc1_noeob(DisasContext
*s
, int b
, TCGLabel
*l1
)
1208 CCPrepare cc
= gen_prepare_cc(s
, b
, s
->T0
);
1210 if (cc
.mask
!= -1) {
1211 tcg_gen_andi_tl(s
->T0
, cc
.reg
, cc
.mask
);
1215 tcg_gen_brcond_tl(cc
.cond
, cc
.reg
, cc
.reg2
, l1
);
1217 tcg_gen_brcondi_tl(cc
.cond
, cc
.reg
, cc
.imm
, l1
);
1221 /* Generate a conditional jump to label 'l1' according to jump opcode
1222 value 'b'. In the fast case, T0 is guaranteed not to be used.
1223 A translation block must end soon. */
1224 static inline void gen_jcc1(DisasContext
*s
, int b
, TCGLabel
*l1
)
1226 CCPrepare cc
= gen_prepare_cc(s
, b
, s
->T0
);
1228 gen_update_cc_op(s
);
1229 if (cc
.mask
!= -1) {
1230 tcg_gen_andi_tl(s
->T0
, cc
.reg
, cc
.mask
);
1233 set_cc_op(s
, CC_OP_DYNAMIC
);
1235 tcg_gen_brcond_tl(cc
.cond
, cc
.reg
, cc
.reg2
, l1
);
1237 tcg_gen_brcondi_tl(cc
.cond
, cc
.reg
, cc
.imm
, l1
);
1241 /* XXX: does not work with gdbstub "ice" single step - not a
1243 static TCGLabel
*gen_jz_ecx_string(DisasContext
*s
)
1245 TCGLabel
*l1
= gen_new_label();
1246 TCGLabel
*l2
= gen_new_label();
1247 gen_op_jnz_ecx(s
, l1
);
1249 gen_jmp_rel_csize(s
, 0, 1);
1254 static void gen_stos(DisasContext
*s
, MemOp ot
)
1256 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, R_EAX
);
1257 gen_string_movl_A0_EDI(s
);
1258 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
1259 gen_op_movl_T0_Dshift(s
, ot
);
1260 gen_op_add_reg_T0(s
, s
->aflag
, R_EDI
);
1263 static void gen_lods(DisasContext
*s
, MemOp ot
)
1265 gen_string_movl_A0_ESI(s
);
1266 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1267 gen_op_mov_reg_v(s
, ot
, R_EAX
, s
->T0
);
1268 gen_op_movl_T0_Dshift(s
, ot
);
1269 gen_op_add_reg_T0(s
, s
->aflag
, R_ESI
);
1272 static void gen_scas(DisasContext
*s
, MemOp ot
)
1274 gen_string_movl_A0_EDI(s
);
1275 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
1276 gen_op(s
, OP_CMPL
, ot
, R_EAX
);
1277 gen_op_movl_T0_Dshift(s
, ot
);
1278 gen_op_add_reg_T0(s
, s
->aflag
, R_EDI
);
1281 static void gen_cmps(DisasContext
*s
, MemOp ot
)
1283 gen_string_movl_A0_EDI(s
);
1284 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
1285 gen_string_movl_A0_ESI(s
);
1286 gen_op(s
, OP_CMPL
, ot
, OR_TMP0
);
1287 gen_op_movl_T0_Dshift(s
, ot
);
1288 gen_op_add_reg_T0(s
, s
->aflag
, R_ESI
);
1289 gen_op_add_reg_T0(s
, s
->aflag
, R_EDI
);
1292 static void gen_bpt_io(DisasContext
*s
, TCGv_i32 t_port
, int ot
)
1294 if (s
->flags
& HF_IOBPT_MASK
) {
1295 #ifdef CONFIG_USER_ONLY
1296 /* user-mode cpu should not be in IOBPT mode */
1297 g_assert_not_reached();
1299 TCGv_i32 t_size
= tcg_constant_i32(1 << ot
);
1300 TCGv t_next
= eip_next_tl(s
);
1301 gen_helper_bpt_io(tcg_env
, t_port
, t_size
, t_next
);
1302 #endif /* CONFIG_USER_ONLY */
1306 static void gen_ins(DisasContext
*s
, MemOp ot
)
1308 gen_string_movl_A0_EDI(s
);
1309 /* Note: we must do this dummy write first to be restartable in
1310 case of page fault. */
1311 tcg_gen_movi_tl(s
->T0
, 0);
1312 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
1313 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_EDX
]);
1314 tcg_gen_andi_i32(s
->tmp2_i32
, s
->tmp2_i32
, 0xffff);
1315 gen_helper_in_func(ot
, s
->T0
, s
->tmp2_i32
);
1316 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
1317 gen_op_movl_T0_Dshift(s
, ot
);
1318 gen_op_add_reg_T0(s
, s
->aflag
, R_EDI
);
1319 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
1322 static void gen_outs(DisasContext
*s
, MemOp ot
)
1324 gen_string_movl_A0_ESI(s
);
1325 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1327 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_EDX
]);
1328 tcg_gen_andi_i32(s
->tmp2_i32
, s
->tmp2_i32
, 0xffff);
1329 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T0
);
1330 gen_helper_out_func(ot
, s
->tmp2_i32
, s
->tmp3_i32
);
1331 gen_op_movl_T0_Dshift(s
, ot
);
1332 gen_op_add_reg_T0(s
, s
->aflag
, R_ESI
);
1333 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
1336 /* Generate jumps to current or next instruction */
1337 static void gen_repz(DisasContext
*s
, MemOp ot
,
1338 void (*fn
)(DisasContext
*s
, MemOp ot
))
1341 gen_update_cc_op(s
);
1342 l2
= gen_jz_ecx_string(s
);
1344 gen_op_add_reg_im(s
, s
->aflag
, R_ECX
, -1);
1346 * A loop would cause two single step exceptions if ECX = 1
1347 * before rep string_insn
1350 gen_op_jz_ecx(s
, l2
);
1352 gen_jmp_rel_csize(s
, -cur_insn_len(s
), 0);
1355 #define GEN_REPZ(op) \
1356 static inline void gen_repz_ ## op(DisasContext *s, MemOp ot) \
1357 { gen_repz(s, ot, gen_##op); }
1359 static void gen_repz2(DisasContext
*s
, MemOp ot
, int nz
,
1360 void (*fn
)(DisasContext
*s
, MemOp ot
))
1363 gen_update_cc_op(s
);
1364 l2
= gen_jz_ecx_string(s
);
1366 gen_op_add_reg_im(s
, s
->aflag
, R_ECX
, -1);
1367 gen_update_cc_op(s
);
1368 gen_jcc1(s
, (JCC_Z
<< 1) | (nz
^ 1), l2
);
1370 gen_op_jz_ecx(s
, l2
);
1372 gen_jmp_rel_csize(s
, -cur_insn_len(s
), 0);
1375 #define GEN_REPZ2(op) \
1376 static inline void gen_repz_ ## op(DisasContext *s, MemOp ot, int nz) \
1377 { gen_repz2(s, ot, nz, gen_##op); }
1387 static void gen_helper_fp_arith_ST0_FT0(int op
)
1391 gen_helper_fadd_ST0_FT0(tcg_env
);
1394 gen_helper_fmul_ST0_FT0(tcg_env
);
1397 gen_helper_fcom_ST0_FT0(tcg_env
);
1400 gen_helper_fcom_ST0_FT0(tcg_env
);
1403 gen_helper_fsub_ST0_FT0(tcg_env
);
1406 gen_helper_fsubr_ST0_FT0(tcg_env
);
1409 gen_helper_fdiv_ST0_FT0(tcg_env
);
1412 gen_helper_fdivr_ST0_FT0(tcg_env
);
1417 /* NOTE the exception in "r" op ordering */
1418 static void gen_helper_fp_arith_STN_ST0(int op
, int opreg
)
1420 TCGv_i32 tmp
= tcg_constant_i32(opreg
);
1423 gen_helper_fadd_STN_ST0(tcg_env
, tmp
);
1426 gen_helper_fmul_STN_ST0(tcg_env
, tmp
);
1429 gen_helper_fsubr_STN_ST0(tcg_env
, tmp
);
1432 gen_helper_fsub_STN_ST0(tcg_env
, tmp
);
1435 gen_helper_fdivr_STN_ST0(tcg_env
, tmp
);
1438 gen_helper_fdiv_STN_ST0(tcg_env
, tmp
);
1443 static void gen_exception(DisasContext
*s
, int trapno
)
1445 gen_update_cc_op(s
);
1446 gen_update_eip_cur(s
);
1447 gen_helper_raise_exception(tcg_env
, tcg_constant_i32(trapno
));
1448 s
->base
.is_jmp
= DISAS_NORETURN
;
1451 /* Generate #UD for the current instruction. The assumption here is that
1452 the instruction is known, but it isn't allowed in the current cpu mode. */
1453 static void gen_illegal_opcode(DisasContext
*s
)
1455 gen_exception(s
, EXCP06_ILLOP
);
1458 /* Generate #GP for the current instruction. */
1459 static void gen_exception_gpf(DisasContext
*s
)
1461 gen_exception(s
, EXCP0D_GPF
);
1464 /* Check for cpl == 0; if not, raise #GP and return false. */
1465 static bool check_cpl0(DisasContext
*s
)
1470 gen_exception_gpf(s
);
1474 /* If vm86, check for iopl == 3; if not, raise #GP and return false. */
1475 static bool check_vm86_iopl(DisasContext
*s
)
1477 if (!VM86(s
) || IOPL(s
) == 3) {
1480 gen_exception_gpf(s
);
1484 /* Check for iopl allowing access; if not, raise #GP and return false. */
1485 static bool check_iopl(DisasContext
*s
)
1487 if (VM86(s
) ? IOPL(s
) == 3 : CPL(s
) <= IOPL(s
)) {
1490 gen_exception_gpf(s
);
1494 /* if d == OR_TMP0, it means memory operand (address in A0) */
1495 static void gen_op(DisasContext
*s1
, int op
, MemOp ot
, int d
)
1498 if (s1
->prefix
& PREFIX_LOCK
) {
1499 /* Lock prefix when destination is not memory. */
1500 gen_illegal_opcode(s1
);
1503 gen_op_mov_v_reg(s1
, ot
, s1
->T0
, d
);
1504 } else if (!(s1
->prefix
& PREFIX_LOCK
)) {
1505 gen_op_ld_v(s1
, ot
, s1
->T0
, s1
->A0
);
1509 gen_compute_eflags_c(s1
, s1
->tmp4
);
1510 if (s1
->prefix
& PREFIX_LOCK
) {
1511 tcg_gen_add_tl(s1
->T0
, s1
->tmp4
, s1
->T1
);
1512 tcg_gen_atomic_add_fetch_tl(s1
->T0
, s1
->A0
, s1
->T0
,
1513 s1
->mem_index
, ot
| MO_LE
);
1515 tcg_gen_add_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1516 tcg_gen_add_tl(s1
->T0
, s1
->T0
, s1
->tmp4
);
1517 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1519 gen_op_update3_cc(s1
, s1
->tmp4
);
1520 set_cc_op(s1
, CC_OP_ADCB
+ ot
);
1523 gen_compute_eflags_c(s1
, s1
->tmp4
);
1524 if (s1
->prefix
& PREFIX_LOCK
) {
1525 tcg_gen_add_tl(s1
->T0
, s1
->T1
, s1
->tmp4
);
1526 tcg_gen_neg_tl(s1
->T0
, s1
->T0
);
1527 tcg_gen_atomic_add_fetch_tl(s1
->T0
, s1
->A0
, s1
->T0
,
1528 s1
->mem_index
, ot
| MO_LE
);
1530 tcg_gen_sub_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1531 tcg_gen_sub_tl(s1
->T0
, s1
->T0
, s1
->tmp4
);
1532 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1534 gen_op_update3_cc(s1
, s1
->tmp4
);
1535 set_cc_op(s1
, CC_OP_SBBB
+ ot
);
1538 if (s1
->prefix
& PREFIX_LOCK
) {
1539 tcg_gen_atomic_add_fetch_tl(s1
->T0
, s1
->A0
, s1
->T1
,
1540 s1
->mem_index
, ot
| MO_LE
);
1542 tcg_gen_add_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1543 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1545 gen_op_update2_cc(s1
);
1546 set_cc_op(s1
, CC_OP_ADDB
+ ot
);
1549 if (s1
->prefix
& PREFIX_LOCK
) {
1550 tcg_gen_neg_tl(s1
->T0
, s1
->T1
);
1551 tcg_gen_atomic_fetch_add_tl(s1
->cc_srcT
, s1
->A0
, s1
->T0
,
1552 s1
->mem_index
, ot
| MO_LE
);
1553 tcg_gen_sub_tl(s1
->T0
, s1
->cc_srcT
, s1
->T1
);
1555 tcg_gen_mov_tl(s1
->cc_srcT
, s1
->T0
);
1556 tcg_gen_sub_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1557 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1559 gen_op_update2_cc(s1
);
1560 set_cc_op(s1
, CC_OP_SUBB
+ ot
);
1564 if (s1
->prefix
& PREFIX_LOCK
) {
1565 tcg_gen_atomic_and_fetch_tl(s1
->T0
, s1
->A0
, s1
->T1
,
1566 s1
->mem_index
, ot
| MO_LE
);
1568 tcg_gen_and_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1569 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1571 gen_op_update1_cc(s1
);
1572 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1575 if (s1
->prefix
& PREFIX_LOCK
) {
1576 tcg_gen_atomic_or_fetch_tl(s1
->T0
, s1
->A0
, s1
->T1
,
1577 s1
->mem_index
, ot
| MO_LE
);
1579 tcg_gen_or_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1580 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1582 gen_op_update1_cc(s1
);
1583 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1586 if (s1
->prefix
& PREFIX_LOCK
) {
1587 tcg_gen_atomic_xor_fetch_tl(s1
->T0
, s1
->A0
, s1
->T1
,
1588 s1
->mem_index
, ot
| MO_LE
);
1590 tcg_gen_xor_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1591 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1593 gen_op_update1_cc(s1
);
1594 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1597 tcg_gen_mov_tl(cpu_cc_src
, s1
->T1
);
1598 tcg_gen_mov_tl(s1
->cc_srcT
, s1
->T0
);
1599 tcg_gen_sub_tl(cpu_cc_dst
, s1
->T0
, s1
->T1
);
1600 set_cc_op(s1
, CC_OP_SUBB
+ ot
);
1605 /* if d == OR_TMP0, it means memory operand (address in A0) */
1606 static void gen_inc(DisasContext
*s1
, MemOp ot
, int d
, int c
)
1608 if (s1
->prefix
& PREFIX_LOCK
) {
1610 /* Lock prefix when destination is not memory */
1611 gen_illegal_opcode(s1
);
1614 tcg_gen_movi_tl(s1
->T0
, c
> 0 ? 1 : -1);
1615 tcg_gen_atomic_add_fetch_tl(s1
->T0
, s1
->A0
, s1
->T0
,
1616 s1
->mem_index
, ot
| MO_LE
);
1619 gen_op_mov_v_reg(s1
, ot
, s1
->T0
, d
);
1621 gen_op_ld_v(s1
, ot
, s1
->T0
, s1
->A0
);
1623 tcg_gen_addi_tl(s1
->T0
, s1
->T0
, (c
> 0 ? 1 : -1));
1624 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1627 gen_compute_eflags_c(s1
, cpu_cc_src
);
1628 tcg_gen_mov_tl(cpu_cc_dst
, s1
->T0
);
1629 set_cc_op(s1
, (c
> 0 ? CC_OP_INCB
: CC_OP_DECB
) + ot
);
1632 static void gen_shift_flags(DisasContext
*s
, MemOp ot
, TCGv result
,
1633 TCGv shm1
, TCGv count
, bool is_right
)
1635 TCGv_i32 z32
, s32
, oldop
;
1638 /* Store the results into the CC variables. If we know that the
1639 variable must be dead, store unconditionally. Otherwise we'll
1640 need to not disrupt the current contents. */
1641 z_tl
= tcg_constant_tl(0);
1642 if (cc_op_live
[s
->cc_op
] & USES_CC_DST
) {
1643 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_cc_dst
, count
, z_tl
,
1644 result
, cpu_cc_dst
);
1646 tcg_gen_mov_tl(cpu_cc_dst
, result
);
1648 if (cc_op_live
[s
->cc_op
] & USES_CC_SRC
) {
1649 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_cc_src
, count
, z_tl
,
1652 tcg_gen_mov_tl(cpu_cc_src
, shm1
);
1655 /* Get the two potential CC_OP values into temporaries. */
1656 tcg_gen_movi_i32(s
->tmp2_i32
, (is_right
? CC_OP_SARB
: CC_OP_SHLB
) + ot
);
1657 if (s
->cc_op
== CC_OP_DYNAMIC
) {
1660 tcg_gen_movi_i32(s
->tmp3_i32
, s
->cc_op
);
1661 oldop
= s
->tmp3_i32
;
1664 /* Conditionally store the CC_OP value. */
1665 z32
= tcg_constant_i32(0);
1666 s32
= tcg_temp_new_i32();
1667 tcg_gen_trunc_tl_i32(s32
, count
);
1668 tcg_gen_movcond_i32(TCG_COND_NE
, cpu_cc_op
, s32
, z32
, s
->tmp2_i32
, oldop
);
1670 /* The CC_OP value is no longer predictable. */
1671 set_cc_op(s
, CC_OP_DYNAMIC
);
1674 static void gen_shift_rm_T1(DisasContext
*s
, MemOp ot
, int op1
,
1675 int is_right
, int is_arith
)
1677 target_ulong mask
= (ot
== MO_64
? 0x3f : 0x1f);
1680 if (op1
== OR_TMP0
) {
1681 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1683 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1686 tcg_gen_andi_tl(s
->T1
, s
->T1
, mask
);
1687 tcg_gen_subi_tl(s
->tmp0
, s
->T1
, 1);
1691 gen_exts(ot
, s
->T0
);
1692 tcg_gen_sar_tl(s
->tmp0
, s
->T0
, s
->tmp0
);
1693 tcg_gen_sar_tl(s
->T0
, s
->T0
, s
->T1
);
1695 gen_extu(ot
, s
->T0
);
1696 tcg_gen_shr_tl(s
->tmp0
, s
->T0
, s
->tmp0
);
1697 tcg_gen_shr_tl(s
->T0
, s
->T0
, s
->T1
);
1700 tcg_gen_shl_tl(s
->tmp0
, s
->T0
, s
->tmp0
);
1701 tcg_gen_shl_tl(s
->T0
, s
->T0
, s
->T1
);
1705 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1707 gen_shift_flags(s
, ot
, s
->T0
, s
->tmp0
, s
->T1
, is_right
);
1710 static void gen_shift_rm_im(DisasContext
*s
, MemOp ot
, int op1
, int op2
,
1711 int is_right
, int is_arith
)
1713 int mask
= (ot
== MO_64
? 0x3f : 0x1f);
1717 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1719 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1725 gen_exts(ot
, s
->T0
);
1726 tcg_gen_sari_tl(s
->tmp4
, s
->T0
, op2
- 1);
1727 tcg_gen_sari_tl(s
->T0
, s
->T0
, op2
);
1729 gen_extu(ot
, s
->T0
);
1730 tcg_gen_shri_tl(s
->tmp4
, s
->T0
, op2
- 1);
1731 tcg_gen_shri_tl(s
->T0
, s
->T0
, op2
);
1734 tcg_gen_shli_tl(s
->tmp4
, s
->T0
, op2
- 1);
1735 tcg_gen_shli_tl(s
->T0
, s
->T0
, op2
);
1740 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1742 /* update eflags if non zero shift */
1744 tcg_gen_mov_tl(cpu_cc_src
, s
->tmp4
);
1745 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
1746 set_cc_op(s
, (is_right
? CC_OP_SARB
: CC_OP_SHLB
) + ot
);
1750 static void gen_rot_rm_T1(DisasContext
*s
, MemOp ot
, int op1
, int is_right
)
1752 target_ulong mask
= (ot
== MO_64
? 0x3f : 0x1f);
1756 if (op1
== OR_TMP0
) {
1757 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1759 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1762 tcg_gen_andi_tl(s
->T1
, s
->T1
, mask
);
1766 /* Replicate the 8-bit input so that a 32-bit rotate works. */
1767 tcg_gen_ext8u_tl(s
->T0
, s
->T0
);
1768 tcg_gen_muli_tl(s
->T0
, s
->T0
, 0x01010101);
1771 /* Replicate the 16-bit input so that a 32-bit rotate works. */
1772 tcg_gen_deposit_tl(s
->T0
, s
->T0
, s
->T0
, 16, 16);
1775 #ifdef TARGET_X86_64
1777 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
1778 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T1
);
1780 tcg_gen_rotr_i32(s
->tmp2_i32
, s
->tmp2_i32
, s
->tmp3_i32
);
1782 tcg_gen_rotl_i32(s
->tmp2_i32
, s
->tmp2_i32
, s
->tmp3_i32
);
1784 tcg_gen_extu_i32_tl(s
->T0
, s
->tmp2_i32
);
1789 tcg_gen_rotr_tl(s
->T0
, s
->T0
, s
->T1
);
1791 tcg_gen_rotl_tl(s
->T0
, s
->T0
, s
->T1
);
1797 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1799 /* We'll need the flags computed into CC_SRC. */
1800 gen_compute_eflags(s
);
1802 /* The value that was "rotated out" is now present at the other end
1803 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1804 since we've computed the flags into CC_SRC, these variables are
1807 tcg_gen_shri_tl(cpu_cc_src2
, s
->T0
, mask
- 1);
1808 tcg_gen_shri_tl(cpu_cc_dst
, s
->T0
, mask
);
1809 tcg_gen_andi_tl(cpu_cc_dst
, cpu_cc_dst
, 1);
1811 tcg_gen_shri_tl(cpu_cc_src2
, s
->T0
, mask
);
1812 tcg_gen_andi_tl(cpu_cc_dst
, s
->T0
, 1);
1814 tcg_gen_andi_tl(cpu_cc_src2
, cpu_cc_src2
, 1);
1815 tcg_gen_xor_tl(cpu_cc_src2
, cpu_cc_src2
, cpu_cc_dst
);
1817 /* Now conditionally store the new CC_OP value. If the shift count
1818 is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live.
1819 Otherwise reuse CC_OP_ADCOX which have the C and O flags split out
1820 exactly as we computed above. */
1821 t0
= tcg_constant_i32(0);
1822 t1
= tcg_temp_new_i32();
1823 tcg_gen_trunc_tl_i32(t1
, s
->T1
);
1824 tcg_gen_movi_i32(s
->tmp2_i32
, CC_OP_ADCOX
);
1825 tcg_gen_movi_i32(s
->tmp3_i32
, CC_OP_EFLAGS
);
1826 tcg_gen_movcond_i32(TCG_COND_NE
, cpu_cc_op
, t1
, t0
,
1827 s
->tmp2_i32
, s
->tmp3_i32
);
1829 /* The CC_OP value is no longer predictable. */
1830 set_cc_op(s
, CC_OP_DYNAMIC
);
1833 static void gen_rot_rm_im(DisasContext
*s
, MemOp ot
, int op1
, int op2
,
1836 int mask
= (ot
== MO_64
? 0x3f : 0x1f);
1840 if (op1
== OR_TMP0
) {
1841 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1843 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1849 #ifdef TARGET_X86_64
1851 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
1853 tcg_gen_rotri_i32(s
->tmp2_i32
, s
->tmp2_i32
, op2
);
1855 tcg_gen_rotli_i32(s
->tmp2_i32
, s
->tmp2_i32
, op2
);
1857 tcg_gen_extu_i32_tl(s
->T0
, s
->tmp2_i32
);
1862 tcg_gen_rotri_tl(s
->T0
, s
->T0
, op2
);
1864 tcg_gen_rotli_tl(s
->T0
, s
->T0
, op2
);
1875 shift
= mask
+ 1 - shift
;
1877 gen_extu(ot
, s
->T0
);
1878 tcg_gen_shli_tl(s
->tmp0
, s
->T0
, shift
);
1879 tcg_gen_shri_tl(s
->T0
, s
->T0
, mask
+ 1 - shift
);
1880 tcg_gen_or_tl(s
->T0
, s
->T0
, s
->tmp0
);
1886 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1889 /* Compute the flags into CC_SRC. */
1890 gen_compute_eflags(s
);
1892 /* The value that was "rotated out" is now present at the other end
1893 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1894 since we've computed the flags into CC_SRC, these variables are
1897 tcg_gen_shri_tl(cpu_cc_src2
, s
->T0
, mask
- 1);
1898 tcg_gen_shri_tl(cpu_cc_dst
, s
->T0
, mask
);
1899 tcg_gen_andi_tl(cpu_cc_dst
, cpu_cc_dst
, 1);
1901 tcg_gen_shri_tl(cpu_cc_src2
, s
->T0
, mask
);
1902 tcg_gen_andi_tl(cpu_cc_dst
, s
->T0
, 1);
1904 tcg_gen_andi_tl(cpu_cc_src2
, cpu_cc_src2
, 1);
1905 tcg_gen_xor_tl(cpu_cc_src2
, cpu_cc_src2
, cpu_cc_dst
);
1906 set_cc_op(s
, CC_OP_ADCOX
);
1910 /* XXX: add faster immediate = 1 case */
1911 static void gen_rotc_rm_T1(DisasContext
*s
, MemOp ot
, int op1
,
1914 gen_compute_eflags(s
);
1915 assert(s
->cc_op
== CC_OP_EFLAGS
);
1919 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1921 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1926 gen_helper_rcrb(s
->T0
, tcg_env
, s
->T0
, s
->T1
);
1929 gen_helper_rcrw(s
->T0
, tcg_env
, s
->T0
, s
->T1
);
1932 gen_helper_rcrl(s
->T0
, tcg_env
, s
->T0
, s
->T1
);
1934 #ifdef TARGET_X86_64
1936 gen_helper_rcrq(s
->T0
, tcg_env
, s
->T0
, s
->T1
);
1940 g_assert_not_reached();
1945 gen_helper_rclb(s
->T0
, tcg_env
, s
->T0
, s
->T1
);
1948 gen_helper_rclw(s
->T0
, tcg_env
, s
->T0
, s
->T1
);
1951 gen_helper_rcll(s
->T0
, tcg_env
, s
->T0
, s
->T1
);
1953 #ifdef TARGET_X86_64
1955 gen_helper_rclq(s
->T0
, tcg_env
, s
->T0
, s
->T1
);
1959 g_assert_not_reached();
1963 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1966 /* XXX: add faster immediate case */
1967 static void gen_shiftd_rm_T1(DisasContext
*s
, MemOp ot
, int op1
,
1968 bool is_right
, TCGv count_in
)
1970 target_ulong mask
= (ot
== MO_64
? 63 : 31);
1974 if (op1
== OR_TMP0
) {
1975 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1977 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1980 count
= tcg_temp_new();
1981 tcg_gen_andi_tl(count
, count_in
, mask
);
1985 /* Note: we implement the Intel behaviour for shift count > 16.
1986 This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A
1987 portion by constructing it as a 32-bit value. */
1989 tcg_gen_deposit_tl(s
->tmp0
, s
->T0
, s
->T1
, 16, 16);
1990 tcg_gen_mov_tl(s
->T1
, s
->T0
);
1991 tcg_gen_mov_tl(s
->T0
, s
->tmp0
);
1993 tcg_gen_deposit_tl(s
->T1
, s
->T0
, s
->T1
, 16, 16);
1996 * If TARGET_X86_64 defined then fall through into MO_32 case,
1997 * otherwise fall through default case.
2000 #ifdef TARGET_X86_64
2001 /* Concatenate the two 32-bit values and use a 64-bit shift. */
2002 tcg_gen_subi_tl(s
->tmp0
, count
, 1);
2004 tcg_gen_concat_tl_i64(s
->T0
, s
->T0
, s
->T1
);
2005 tcg_gen_shr_i64(s
->tmp0
, s
->T0
, s
->tmp0
);
2006 tcg_gen_shr_i64(s
->T0
, s
->T0
, count
);
2008 tcg_gen_concat_tl_i64(s
->T0
, s
->T1
, s
->T0
);
2009 tcg_gen_shl_i64(s
->tmp0
, s
->T0
, s
->tmp0
);
2010 tcg_gen_shl_i64(s
->T0
, s
->T0
, count
);
2011 tcg_gen_shri_i64(s
->tmp0
, s
->tmp0
, 32);
2012 tcg_gen_shri_i64(s
->T0
, s
->T0
, 32);
2017 tcg_gen_subi_tl(s
->tmp0
, count
, 1);
2019 tcg_gen_shr_tl(s
->tmp0
, s
->T0
, s
->tmp0
);
2021 tcg_gen_subfi_tl(s
->tmp4
, mask
+ 1, count
);
2022 tcg_gen_shr_tl(s
->T0
, s
->T0
, count
);
2023 tcg_gen_shl_tl(s
->T1
, s
->T1
, s
->tmp4
);
2025 tcg_gen_shl_tl(s
->tmp0
, s
->T0
, s
->tmp0
);
2027 /* Only needed if count > 16, for Intel behaviour. */
2028 tcg_gen_subfi_tl(s
->tmp4
, 33, count
);
2029 tcg_gen_shr_tl(s
->tmp4
, s
->T1
, s
->tmp4
);
2030 tcg_gen_or_tl(s
->tmp0
, s
->tmp0
, s
->tmp4
);
2033 tcg_gen_subfi_tl(s
->tmp4
, mask
+ 1, count
);
2034 tcg_gen_shl_tl(s
->T0
, s
->T0
, count
);
2035 tcg_gen_shr_tl(s
->T1
, s
->T1
, s
->tmp4
);
2037 tcg_gen_movi_tl(s
->tmp4
, 0);
2038 tcg_gen_movcond_tl(TCG_COND_EQ
, s
->T1
, count
, s
->tmp4
,
2040 tcg_gen_or_tl(s
->T0
, s
->T0
, s
->T1
);
2045 gen_op_st_rm_T0_A0(s
, ot
, op1
);
2047 gen_shift_flags(s
, ot
, s
->T0
, s
->tmp0
, count
, is_right
);
2050 static void gen_shift(DisasContext
*s1
, int op
, MemOp ot
, int d
, int s
)
2053 gen_op_mov_v_reg(s1
, ot
, s1
->T1
, s
);
2056 gen_rot_rm_T1(s1
, ot
, d
, 0);
2059 gen_rot_rm_T1(s1
, ot
, d
, 1);
2063 gen_shift_rm_T1(s1
, ot
, d
, 0, 0);
2066 gen_shift_rm_T1(s1
, ot
, d
, 1, 0);
2069 gen_shift_rm_T1(s1
, ot
, d
, 1, 1);
2072 gen_rotc_rm_T1(s1
, ot
, d
, 0);
2075 gen_rotc_rm_T1(s1
, ot
, d
, 1);
2080 static void gen_shifti(DisasContext
*s1
, int op
, MemOp ot
, int d
, int c
)
2084 gen_rot_rm_im(s1
, ot
, d
, c
, 0);
2087 gen_rot_rm_im(s1
, ot
, d
, c
, 1);
2091 gen_shift_rm_im(s1
, ot
, d
, c
, 0, 0);
2094 gen_shift_rm_im(s1
, ot
, d
, c
, 1, 0);
2097 gen_shift_rm_im(s1
, ot
, d
, c
, 1, 1);
2100 /* currently not optimized */
2101 tcg_gen_movi_tl(s1
->T1
, c
);
2102 gen_shift(s1
, op
, ot
, d
, OR_TMP1
);
2107 #define X86_MAX_INSN_LENGTH 15
2109 static uint64_t advance_pc(CPUX86State
*env
, DisasContext
*s
, int num_bytes
)
2111 uint64_t pc
= s
->pc
;
2113 /* This is a subsequent insn that crosses a page boundary. */
2114 if (s
->base
.num_insns
> 1 &&
2115 !is_same_page(&s
->base
, s
->pc
+ num_bytes
- 1)) {
2116 siglongjmp(s
->jmpbuf
, 2);
2120 if (unlikely(cur_insn_len(s
) > X86_MAX_INSN_LENGTH
)) {
2121 /* If the instruction's 16th byte is on a different page than the 1st, a
2122 * page fault on the second page wins over the general protection fault
2123 * caused by the instruction being too long.
2124 * This can happen even if the operand is only one byte long!
2126 if (((s
->pc
- 1) ^ (pc
- 1)) & TARGET_PAGE_MASK
) {
2127 volatile uint8_t unused
=
2128 cpu_ldub_code(env
, (s
->pc
- 1) & TARGET_PAGE_MASK
);
2131 siglongjmp(s
->jmpbuf
, 1);
2137 static inline uint8_t x86_ldub_code(CPUX86State
*env
, DisasContext
*s
)
2139 return translator_ldub(env
, &s
->base
, advance_pc(env
, s
, 1));
2142 static inline int16_t x86_ldsw_code(CPUX86State
*env
, DisasContext
*s
)
2144 return translator_lduw(env
, &s
->base
, advance_pc(env
, s
, 2));
2147 static inline uint16_t x86_lduw_code(CPUX86State
*env
, DisasContext
*s
)
2149 return translator_lduw(env
, &s
->base
, advance_pc(env
, s
, 2));
2152 static inline uint32_t x86_ldl_code(CPUX86State
*env
, DisasContext
*s
)
2154 return translator_ldl(env
, &s
->base
, advance_pc(env
, s
, 4));
2157 #ifdef TARGET_X86_64
2158 static inline uint64_t x86_ldq_code(CPUX86State
*env
, DisasContext
*s
)
2160 return translator_ldq(env
, &s
->base
, advance_pc(env
, s
, 8));
2164 /* Decompose an address. */
2166 typedef struct AddressParts
{
2174 static AddressParts
gen_lea_modrm_0(CPUX86State
*env
, DisasContext
*s
,
2177 int def_seg
, base
, index
, scale
, mod
, rm
;
2186 mod
= (modrm
>> 6) & 3;
2188 base
= rm
| REX_B(s
);
2191 /* Normally filtered out earlier, but including this path
2192 simplifies multi-byte nop, as well as bndcl, bndcu, bndcn. */
2201 int code
= x86_ldub_code(env
, s
);
2202 scale
= (code
>> 6) & 3;
2203 index
= ((code
>> 3) & 7) | REX_X(s
);
2205 index
= -1; /* no index */
2207 base
= (code
& 7) | REX_B(s
);
2213 if ((base
& 7) == 5) {
2215 disp
= (int32_t)x86_ldl_code(env
, s
);
2216 if (CODE64(s
) && !havesib
) {
2218 disp
+= s
->pc
+ s
->rip_offset
;
2223 disp
= (int8_t)x86_ldub_code(env
, s
);
2227 disp
= (int32_t)x86_ldl_code(env
, s
);
2231 /* For correct popl handling with esp. */
2232 if (base
== R_ESP
&& s
->popl_esp_hack
) {
2233 disp
+= s
->popl_esp_hack
;
2235 if (base
== R_EBP
|| base
== R_ESP
) {
2244 disp
= x86_lduw_code(env
, s
);
2247 } else if (mod
== 1) {
2248 disp
= (int8_t)x86_ldub_code(env
, s
);
2250 disp
= (int16_t)x86_lduw_code(env
, s
);
2290 g_assert_not_reached();
2294 return (AddressParts
){ def_seg
, base
, index
, scale
, disp
};
2297 /* Compute the address, with a minimum number of TCG ops. */
2298 static TCGv
gen_lea_modrm_1(DisasContext
*s
, AddressParts a
, bool is_vsib
)
2302 if (a
.index
>= 0 && !is_vsib
) {
2304 ea
= cpu_regs
[a
.index
];
2306 tcg_gen_shli_tl(s
->A0
, cpu_regs
[a
.index
], a
.scale
);
2310 tcg_gen_add_tl(s
->A0
, ea
, cpu_regs
[a
.base
]);
2313 } else if (a
.base
>= 0) {
2314 ea
= cpu_regs
[a
.base
];
2317 if (tb_cflags(s
->base
.tb
) & CF_PCREL
&& a
.base
== -2) {
2318 /* With cpu_eip ~= pc_save, the expression is pc-relative. */
2319 tcg_gen_addi_tl(s
->A0
, cpu_eip
, a
.disp
- s
->pc_save
);
2321 tcg_gen_movi_tl(s
->A0
, a
.disp
);
2324 } else if (a
.disp
!= 0) {
2325 tcg_gen_addi_tl(s
->A0
, ea
, a
.disp
);
2332 static void gen_lea_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
)
2334 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
2335 TCGv ea
= gen_lea_modrm_1(s
, a
, false);
2336 gen_lea_v_seg(s
, s
->aflag
, ea
, a
.def_seg
, s
->override
);
2339 static void gen_nop_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
)
2341 (void)gen_lea_modrm_0(env
, s
, modrm
);
2344 /* Used for BNDCL, BNDCU, BNDCN. */
2345 static void gen_bndck(CPUX86State
*env
, DisasContext
*s
, int modrm
,
2346 TCGCond cond
, TCGv_i64 bndv
)
2348 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
2349 TCGv ea
= gen_lea_modrm_1(s
, a
, false);
2351 tcg_gen_extu_tl_i64(s
->tmp1_i64
, ea
);
2353 tcg_gen_ext32u_i64(s
->tmp1_i64
, s
->tmp1_i64
);
2355 tcg_gen_setcond_i64(cond
, s
->tmp1_i64
, s
->tmp1_i64
, bndv
);
2356 tcg_gen_extrl_i64_i32(s
->tmp2_i32
, s
->tmp1_i64
);
2357 gen_helper_bndck(tcg_env
, s
->tmp2_i32
);
2360 /* used for LEA and MOV AX, mem */
2361 static void gen_add_A0_ds_seg(DisasContext
*s
)
2363 gen_lea_v_seg(s
, s
->aflag
, s
->A0
, R_DS
, s
->override
);
2366 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2368 static void gen_ldst_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
,
2369 MemOp ot
, int reg
, int is_store
)
2373 mod
= (modrm
>> 6) & 3;
2374 rm
= (modrm
& 7) | REX_B(s
);
2378 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
2379 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
2381 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
2383 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
2386 gen_lea_modrm(env
, s
, modrm
);
2389 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
2390 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
2392 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
2394 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
2399 static target_ulong
insn_get_addr(CPUX86State
*env
, DisasContext
*s
, MemOp ot
)
2405 ret
= x86_ldub_code(env
, s
);
2408 ret
= x86_lduw_code(env
, s
);
2411 ret
= x86_ldl_code(env
, s
);
2413 #ifdef TARGET_X86_64
2415 ret
= x86_ldq_code(env
, s
);
2419 g_assert_not_reached();
2424 static inline uint32_t insn_get(CPUX86State
*env
, DisasContext
*s
, MemOp ot
)
2430 ret
= x86_ldub_code(env
, s
);
2433 ret
= x86_lduw_code(env
, s
);
2436 #ifdef TARGET_X86_64
2439 ret
= x86_ldl_code(env
, s
);
2442 g_assert_not_reached();
2447 static target_long
insn_get_signed(CPUX86State
*env
, DisasContext
*s
, MemOp ot
)
2453 ret
= (int8_t) x86_ldub_code(env
, s
);
2456 ret
= (int16_t) x86_lduw_code(env
, s
);
2459 ret
= (int32_t) x86_ldl_code(env
, s
);
2461 #ifdef TARGET_X86_64
2463 ret
= x86_ldq_code(env
, s
);
2467 g_assert_not_reached();
2472 static inline int insn_const_size(MemOp ot
)
2481 static void gen_jcc(DisasContext
*s
, int b
, int diff
)
2483 TCGLabel
*l1
= gen_new_label();
2486 gen_jmp_rel_csize(s
, 0, 1);
2488 gen_jmp_rel(s
, s
->dflag
, diff
, 0);
2491 static void gen_cmovcc1(CPUX86State
*env
, DisasContext
*s
, MemOp ot
, int b
,
2496 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
2498 cc
= gen_prepare_cc(s
, b
, s
->T1
);
2499 if (cc
.mask
!= -1) {
2500 TCGv t0
= tcg_temp_new();
2501 tcg_gen_andi_tl(t0
, cc
.reg
, cc
.mask
);
2505 cc
.reg2
= tcg_constant_tl(cc
.imm
);
2508 tcg_gen_movcond_tl(cc
.cond
, s
->T0
, cc
.reg
, cc
.reg2
,
2509 s
->T0
, cpu_regs
[reg
]);
2510 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
2513 static inline void gen_op_movl_T0_seg(DisasContext
*s
, X86Seg seg_reg
)
2515 tcg_gen_ld32u_tl(s
->T0
, tcg_env
,
2516 offsetof(CPUX86State
,segs
[seg_reg
].selector
));
2519 static inline void gen_op_movl_seg_T0_vm(DisasContext
*s
, X86Seg seg_reg
)
2521 tcg_gen_ext16u_tl(s
->T0
, s
->T0
);
2522 tcg_gen_st32_tl(s
->T0
, tcg_env
,
2523 offsetof(CPUX86State
,segs
[seg_reg
].selector
));
2524 tcg_gen_shli_tl(cpu_seg_base
[seg_reg
], s
->T0
, 4);
2527 /* move T0 to seg_reg and compute if the CPU state may change. Never
2528 call this function with seg_reg == R_CS */
2529 static void gen_movl_seg_T0(DisasContext
*s
, X86Seg seg_reg
)
2531 if (PE(s
) && !VM86(s
)) {
2532 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
2533 gen_helper_load_seg(tcg_env
, tcg_constant_i32(seg_reg
), s
->tmp2_i32
);
2534 /* abort translation because the addseg value may change or
2535 because ss32 may change. For R_SS, translation must always
2536 stop as a special handling must be done to disable hardware
2537 interrupts for the next instruction */
2538 if (seg_reg
== R_SS
) {
2539 s
->base
.is_jmp
= DISAS_EOB_INHIBIT_IRQ
;
2540 } else if (CODE32(s
) && seg_reg
< R_FS
) {
2541 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
2544 gen_op_movl_seg_T0_vm(s
, seg_reg
);
2545 if (seg_reg
== R_SS
) {
2546 s
->base
.is_jmp
= DISAS_EOB_INHIBIT_IRQ
;
2551 static void gen_svm_check_intercept(DisasContext
*s
, uint32_t type
)
2553 /* no SVM activated; fast case */
2554 if (likely(!GUEST(s
))) {
2557 gen_helper_svm_check_intercept(tcg_env
, tcg_constant_i32(type
));
2560 static inline void gen_stack_update(DisasContext
*s
, int addend
)
2562 gen_op_add_reg_im(s
, mo_stacksize(s
), R_ESP
, addend
);
2565 /* Generate a push. It depends on ss32, addseg and dflag. */
2566 static void gen_push_v(DisasContext
*s
, TCGv val
)
2568 MemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2569 MemOp a_ot
= mo_stacksize(s
);
2570 int size
= 1 << d_ot
;
2571 TCGv new_esp
= s
->A0
;
2573 tcg_gen_subi_tl(s
->A0
, cpu_regs
[R_ESP
], size
);
2578 tcg_gen_mov_tl(new_esp
, s
->A0
);
2580 gen_lea_v_seg(s
, a_ot
, s
->A0
, R_SS
, -1);
2583 gen_op_st_v(s
, d_ot
, val
, s
->A0
);
2584 gen_op_mov_reg_v(s
, a_ot
, R_ESP
, new_esp
);
2587 /* two step pop is necessary for precise exceptions */
2588 static MemOp
gen_pop_T0(DisasContext
*s
)
2590 MemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2592 gen_lea_v_seg(s
, mo_stacksize(s
), cpu_regs
[R_ESP
], R_SS
, -1);
2593 gen_op_ld_v(s
, d_ot
, s
->T0
, s
->A0
);
2598 static inline void gen_pop_update(DisasContext
*s
, MemOp ot
)
2600 gen_stack_update(s
, 1 << ot
);
2603 static inline void gen_stack_A0(DisasContext
*s
)
2605 gen_lea_v_seg(s
, SS32(s
) ? MO_32
: MO_16
, cpu_regs
[R_ESP
], R_SS
, -1);
2608 static void gen_pusha(DisasContext
*s
)
2610 MemOp s_ot
= SS32(s
) ? MO_32
: MO_16
;
2611 MemOp d_ot
= s
->dflag
;
2612 int size
= 1 << d_ot
;
2615 for (i
= 0; i
< 8; i
++) {
2616 tcg_gen_addi_tl(s
->A0
, cpu_regs
[R_ESP
], (i
- 8) * size
);
2617 gen_lea_v_seg(s
, s_ot
, s
->A0
, R_SS
, -1);
2618 gen_op_st_v(s
, d_ot
, cpu_regs
[7 - i
], s
->A0
);
2621 gen_stack_update(s
, -8 * size
);
2624 static void gen_popa(DisasContext
*s
)
2626 MemOp s_ot
= SS32(s
) ? MO_32
: MO_16
;
2627 MemOp d_ot
= s
->dflag
;
2628 int size
= 1 << d_ot
;
2631 for (i
= 0; i
< 8; i
++) {
2632 /* ESP is not reloaded */
2633 if (7 - i
== R_ESP
) {
2636 tcg_gen_addi_tl(s
->A0
, cpu_regs
[R_ESP
], i
* size
);
2637 gen_lea_v_seg(s
, s_ot
, s
->A0
, R_SS
, -1);
2638 gen_op_ld_v(s
, d_ot
, s
->T0
, s
->A0
);
2639 gen_op_mov_reg_v(s
, d_ot
, 7 - i
, s
->T0
);
2642 gen_stack_update(s
, 8 * size
);
2645 static void gen_enter(DisasContext
*s
, int esp_addend
, int level
)
2647 MemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2648 MemOp a_ot
= CODE64(s
) ? MO_64
: SS32(s
) ? MO_32
: MO_16
;
2649 int size
= 1 << d_ot
;
2651 /* Push BP; compute FrameTemp into T1. */
2652 tcg_gen_subi_tl(s
->T1
, cpu_regs
[R_ESP
], size
);
2653 gen_lea_v_seg(s
, a_ot
, s
->T1
, R_SS
, -1);
2654 gen_op_st_v(s
, d_ot
, cpu_regs
[R_EBP
], s
->A0
);
2660 /* Copy level-1 pointers from the previous frame. */
2661 for (i
= 1; i
< level
; ++i
) {
2662 tcg_gen_subi_tl(s
->A0
, cpu_regs
[R_EBP
], size
* i
);
2663 gen_lea_v_seg(s
, a_ot
, s
->A0
, R_SS
, -1);
2664 gen_op_ld_v(s
, d_ot
, s
->tmp0
, s
->A0
);
2666 tcg_gen_subi_tl(s
->A0
, s
->T1
, size
* i
);
2667 gen_lea_v_seg(s
, a_ot
, s
->A0
, R_SS
, -1);
2668 gen_op_st_v(s
, d_ot
, s
->tmp0
, s
->A0
);
2671 /* Push the current FrameTemp as the last level. */
2672 tcg_gen_subi_tl(s
->A0
, s
->T1
, size
* level
);
2673 gen_lea_v_seg(s
, a_ot
, s
->A0
, R_SS
, -1);
2674 gen_op_st_v(s
, d_ot
, s
->T1
, s
->A0
);
2677 /* Copy the FrameTemp value to EBP. */
2678 gen_op_mov_reg_v(s
, a_ot
, R_EBP
, s
->T1
);
2680 /* Compute the final value of ESP. */
2681 tcg_gen_subi_tl(s
->T1
, s
->T1
, esp_addend
+ size
* level
);
2682 gen_op_mov_reg_v(s
, a_ot
, R_ESP
, s
->T1
);
2685 static void gen_leave(DisasContext
*s
)
2687 MemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2688 MemOp a_ot
= mo_stacksize(s
);
2690 gen_lea_v_seg(s
, a_ot
, cpu_regs
[R_EBP
], R_SS
, -1);
2691 gen_op_ld_v(s
, d_ot
, s
->T0
, s
->A0
);
2693 tcg_gen_addi_tl(s
->T1
, cpu_regs
[R_EBP
], 1 << d_ot
);
2695 gen_op_mov_reg_v(s
, d_ot
, R_EBP
, s
->T0
);
2696 gen_op_mov_reg_v(s
, a_ot
, R_ESP
, s
->T1
);
2699 /* Similarly, except that the assumption here is that we don't decode
2700 the instruction at all -- either a missing opcode, an unimplemented
2701 feature, or just a bogus instruction stream. */
2702 static void gen_unknown_opcode(CPUX86State
*env
, DisasContext
*s
)
2704 gen_illegal_opcode(s
);
2706 if (qemu_loglevel_mask(LOG_UNIMP
)) {
2707 FILE *logfile
= qemu_log_trylock();
2709 target_ulong pc
= s
->base
.pc_next
, end
= s
->pc
;
2711 fprintf(logfile
, "ILLOPC: " TARGET_FMT_lx
":", pc
);
2712 for (; pc
< end
; ++pc
) {
2713 fprintf(logfile
, " %02x", cpu_ldub_code(env
, pc
));
2715 fprintf(logfile
, "\n");
2716 qemu_log_unlock(logfile
);
2721 /* an interrupt is different from an exception because of the
2723 static void gen_interrupt(DisasContext
*s
, int intno
)
2725 gen_update_cc_op(s
);
2726 gen_update_eip_cur(s
);
2727 gen_helper_raise_interrupt(tcg_env
, tcg_constant_i32(intno
),
2728 cur_insn_len_i32(s
));
2729 s
->base
.is_jmp
= DISAS_NORETURN
;
2732 static void gen_set_hflag(DisasContext
*s
, uint32_t mask
)
2734 if ((s
->flags
& mask
) == 0) {
2735 TCGv_i32 t
= tcg_temp_new_i32();
2736 tcg_gen_ld_i32(t
, tcg_env
, offsetof(CPUX86State
, hflags
));
2737 tcg_gen_ori_i32(t
, t
, mask
);
2738 tcg_gen_st_i32(t
, tcg_env
, offsetof(CPUX86State
, hflags
));
2743 static void gen_reset_hflag(DisasContext
*s
, uint32_t mask
)
2745 if (s
->flags
& mask
) {
2746 TCGv_i32 t
= tcg_temp_new_i32();
2747 tcg_gen_ld_i32(t
, tcg_env
, offsetof(CPUX86State
, hflags
));
2748 tcg_gen_andi_i32(t
, t
, ~mask
);
2749 tcg_gen_st_i32(t
, tcg_env
, offsetof(CPUX86State
, hflags
));
2754 static void gen_set_eflags(DisasContext
*s
, target_ulong mask
)
2756 TCGv t
= tcg_temp_new();
2758 tcg_gen_ld_tl(t
, tcg_env
, offsetof(CPUX86State
, eflags
));
2759 tcg_gen_ori_tl(t
, t
, mask
);
2760 tcg_gen_st_tl(t
, tcg_env
, offsetof(CPUX86State
, eflags
));
2763 static void gen_reset_eflags(DisasContext
*s
, target_ulong mask
)
2765 TCGv t
= tcg_temp_new();
2767 tcg_gen_ld_tl(t
, tcg_env
, offsetof(CPUX86State
, eflags
));
2768 tcg_gen_andi_tl(t
, t
, ~mask
);
2769 tcg_gen_st_tl(t
, tcg_env
, offsetof(CPUX86State
, eflags
));
2772 /* Clear BND registers during legacy branches. */
2773 static void gen_bnd_jmp(DisasContext
*s
)
2775 /* Clear the registers only if BND prefix is missing, MPX is enabled,
2776 and if the BNDREGs are known to be in use (non-zero) already.
2777 The helper itself will check BNDPRESERVE at runtime. */
2778 if ((s
->prefix
& PREFIX_REPNZ
) == 0
2779 && (s
->flags
& HF_MPX_EN_MASK
) != 0
2780 && (s
->flags
& HF_MPX_IU_MASK
) != 0) {
2781 gen_helper_bnd_jmp(tcg_env
);
2785 /* Generate an end of block. Trace exception is also generated if needed.
2786 If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set.
2787 If RECHECK_TF, emit a rechecking helper for #DB, ignoring the state of
2788 S->TF. This is used by the syscall/sysret insns. */
2790 do_gen_eob_worker(DisasContext
*s
, bool inhibit
, bool recheck_tf
, bool jr
)
2792 gen_update_cc_op(s
);
2794 /* If several instructions disable interrupts, only the first does it. */
2795 if (inhibit
&& !(s
->flags
& HF_INHIBIT_IRQ_MASK
)) {
2796 gen_set_hflag(s
, HF_INHIBIT_IRQ_MASK
);
2798 gen_reset_hflag(s
, HF_INHIBIT_IRQ_MASK
);
2801 if (s
->base
.tb
->flags
& HF_RF_MASK
) {
2802 gen_reset_eflags(s
, RF_MASK
);
2805 gen_helper_rechecking_single_step(tcg_env
);
2806 tcg_gen_exit_tb(NULL
, 0);
2807 } else if (s
->flags
& HF_TF_MASK
) {
2808 gen_helper_single_step(tcg_env
);
2810 tcg_gen_lookup_and_goto_ptr();
2812 tcg_gen_exit_tb(NULL
, 0);
2814 s
->base
.is_jmp
= DISAS_NORETURN
;
2818 gen_eob_worker(DisasContext
*s
, bool inhibit
, bool recheck_tf
)
2820 do_gen_eob_worker(s
, inhibit
, recheck_tf
, false);
2824 If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set. */
2825 static void gen_eob_inhibit_irq(DisasContext
*s
, bool inhibit
)
2827 gen_eob_worker(s
, inhibit
, false);
2830 /* End of block, resetting the inhibit irq flag. */
2831 static void gen_eob(DisasContext
*s
)
2833 gen_eob_worker(s
, false, false);
2836 /* Jump to register */
2837 static void gen_jr(DisasContext
*s
)
2839 do_gen_eob_worker(s
, false, false, true);
2842 /* Jump to eip+diff, truncating the result to OT. */
2843 static void gen_jmp_rel(DisasContext
*s
, MemOp ot
, int diff
, int tb_num
)
2845 bool use_goto_tb
= s
->jmp_opt
;
2846 target_ulong mask
= -1;
2847 target_ulong new_pc
= s
->pc
+ diff
;
2848 target_ulong new_eip
= new_pc
- s
->cs_base
;
2850 /* In 64-bit mode, operand size is fixed at 64 bits. */
2854 if (tb_cflags(s
->base
.tb
) & CF_PCREL
&& CODE32(s
)) {
2855 use_goto_tb
= false;
2863 gen_update_cc_op(s
);
2864 set_cc_op(s
, CC_OP_DYNAMIC
);
2866 if (tb_cflags(s
->base
.tb
) & CF_PCREL
) {
2867 tcg_gen_addi_tl(cpu_eip
, cpu_eip
, new_pc
- s
->pc_save
);
2869 * If we can prove the branch does not leave the page and we have
2870 * no extra masking to apply (data16 branch in code32, see above),
2871 * then we have also proven that the addition does not wrap.
2873 if (!use_goto_tb
|| !is_same_page(&s
->base
, new_pc
)) {
2874 tcg_gen_andi_tl(cpu_eip
, cpu_eip
, mask
);
2875 use_goto_tb
= false;
2880 translator_use_goto_tb(&s
->base
, new_eip
+ s
->cs_base
)) {
2881 /* jump to same page: we can use a direct jump */
2882 tcg_gen_goto_tb(tb_num
);
2883 if (!(tb_cflags(s
->base
.tb
) & CF_PCREL
)) {
2884 tcg_gen_movi_tl(cpu_eip
, new_eip
);
2886 tcg_gen_exit_tb(s
->base
.tb
, tb_num
);
2887 s
->base
.is_jmp
= DISAS_NORETURN
;
2889 if (!(tb_cflags(s
->base
.tb
) & CF_PCREL
)) {
2890 tcg_gen_movi_tl(cpu_eip
, new_eip
);
2893 gen_jr(s
); /* jump to another page */
2895 gen_eob(s
); /* exit to main loop */
2900 /* Jump to eip+diff, truncating to the current code size. */
2901 static void gen_jmp_rel_csize(DisasContext
*s
, int diff
, int tb_num
)
2903 /* CODE64 ignores the OT argument, so we need not consider it. */
2904 gen_jmp_rel(s
, CODE32(s
) ? MO_32
: MO_16
, diff
, tb_num
);
2907 static inline void gen_ldq_env_A0(DisasContext
*s
, int offset
)
2909 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
, s
->mem_index
, MO_LEUQ
);
2910 tcg_gen_st_i64(s
->tmp1_i64
, tcg_env
, offset
);
2913 static inline void gen_stq_env_A0(DisasContext
*s
, int offset
)
2915 tcg_gen_ld_i64(s
->tmp1_i64
, tcg_env
, offset
);
2916 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
, s
->mem_index
, MO_LEUQ
);
2919 static inline void gen_ldo_env_A0(DisasContext
*s
, int offset
, bool align
)
2921 int mem_index
= s
->mem_index
;
2922 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
, mem_index
,
2923 MO_LEUQ
| (align
? MO_ALIGN_16
: 0));
2924 tcg_gen_st_i64(s
->tmp1_i64
, tcg_env
, offset
+ offsetof(XMMReg
, XMM_Q(0)));
2925 tcg_gen_addi_tl(s
->tmp0
, s
->A0
, 8);
2926 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->tmp0
, mem_index
, MO_LEUQ
);
2927 tcg_gen_st_i64(s
->tmp1_i64
, tcg_env
, offset
+ offsetof(XMMReg
, XMM_Q(1)));
2930 static inline void gen_sto_env_A0(DisasContext
*s
, int offset
, bool align
)
2932 int mem_index
= s
->mem_index
;
2933 tcg_gen_ld_i64(s
->tmp1_i64
, tcg_env
, offset
+ offsetof(XMMReg
, XMM_Q(0)));
2934 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
, mem_index
,
2935 MO_LEUQ
| (align
? MO_ALIGN_16
: 0));
2936 tcg_gen_addi_tl(s
->tmp0
, s
->A0
, 8);
2937 tcg_gen_ld_i64(s
->tmp1_i64
, tcg_env
, offset
+ offsetof(XMMReg
, XMM_Q(1)));
2938 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->tmp0
, mem_index
, MO_LEUQ
);
2941 static void gen_ldy_env_A0(DisasContext
*s
, int offset
, bool align
)
2943 int mem_index
= s
->mem_index
;
2944 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
, mem_index
,
2945 MO_LEUQ
| (align
? MO_ALIGN_32
: 0));
2946 tcg_gen_st_i64(s
->tmp1_i64
, tcg_env
, offset
+ offsetof(YMMReg
, YMM_Q(0)));
2947 tcg_gen_addi_tl(s
->tmp0
, s
->A0
, 8);
2948 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->tmp0
, mem_index
, MO_LEUQ
);
2949 tcg_gen_st_i64(s
->tmp1_i64
, tcg_env
, offset
+ offsetof(YMMReg
, YMM_Q(1)));
2951 tcg_gen_addi_tl(s
->tmp0
, s
->A0
, 16);
2952 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->tmp0
, mem_index
, MO_LEUQ
);
2953 tcg_gen_st_i64(s
->tmp1_i64
, tcg_env
, offset
+ offsetof(YMMReg
, YMM_Q(2)));
2954 tcg_gen_addi_tl(s
->tmp0
, s
->A0
, 24);
2955 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->tmp0
, mem_index
, MO_LEUQ
);
2956 tcg_gen_st_i64(s
->tmp1_i64
, tcg_env
, offset
+ offsetof(YMMReg
, YMM_Q(3)));
2959 static void gen_sty_env_A0(DisasContext
*s
, int offset
, bool align
)
2961 int mem_index
= s
->mem_index
;
2962 tcg_gen_ld_i64(s
->tmp1_i64
, tcg_env
, offset
+ offsetof(YMMReg
, YMM_Q(0)));
2963 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
, mem_index
,
2964 MO_LEUQ
| (align
? MO_ALIGN_32
: 0));
2965 tcg_gen_addi_tl(s
->tmp0
, s
->A0
, 8);
2966 tcg_gen_ld_i64(s
->tmp1_i64
, tcg_env
, offset
+ offsetof(YMMReg
, YMM_Q(1)));
2967 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->tmp0
, mem_index
, MO_LEUQ
);
2968 tcg_gen_addi_tl(s
->tmp0
, s
->A0
, 16);
2969 tcg_gen_ld_i64(s
->tmp1_i64
, tcg_env
, offset
+ offsetof(YMMReg
, YMM_Q(2)));
2970 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->tmp0
, mem_index
, MO_LEUQ
);
2971 tcg_gen_addi_tl(s
->tmp0
, s
->A0
, 24);
2972 tcg_gen_ld_i64(s
->tmp1_i64
, tcg_env
, offset
+ offsetof(YMMReg
, YMM_Q(3)));
2973 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->tmp0
, mem_index
, MO_LEUQ
);
2976 #include "decode-new.h"
2977 #include "emit.c.inc"
2978 #include "decode-new.c.inc"
2980 static void gen_cmpxchg8b(DisasContext
*s
, CPUX86State
*env
, int modrm
)
2982 TCGv_i64 cmp
, val
, old
;
2985 gen_lea_modrm(env
, s
, modrm
);
2987 cmp
= tcg_temp_new_i64();
2988 val
= tcg_temp_new_i64();
2989 old
= tcg_temp_new_i64();
2991 /* Construct the comparison values from the register pair. */
2992 tcg_gen_concat_tl_i64(cmp
, cpu_regs
[R_EAX
], cpu_regs
[R_EDX
]);
2993 tcg_gen_concat_tl_i64(val
, cpu_regs
[R_EBX
], cpu_regs
[R_ECX
]);
2995 /* Only require atomic with LOCK; non-parallel handled in generator. */
2996 if (s
->prefix
& PREFIX_LOCK
) {
2997 tcg_gen_atomic_cmpxchg_i64(old
, s
->A0
, cmp
, val
, s
->mem_index
, MO_TEUQ
);
2999 tcg_gen_nonatomic_cmpxchg_i64(old
, s
->A0
, cmp
, val
,
3000 s
->mem_index
, MO_TEUQ
);
3003 /* Set tmp0 to match the required value of Z. */
3004 tcg_gen_setcond_i64(TCG_COND_EQ
, cmp
, old
, cmp
);
3006 tcg_gen_trunc_i64_tl(Z
, cmp
);
3009 * Extract the result values for the register pair.
3010 * For 32-bit, we may do this unconditionally, because on success (Z=1),
3011 * the old value matches the previous value in EDX:EAX. For x86_64,
3012 * the store must be conditional, because we must leave the source
3013 * registers unchanged on success, and zero-extend the writeback
3016 if (TARGET_LONG_BITS
== 32) {
3017 tcg_gen_extr_i64_tl(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
], old
);
3019 TCGv zero
= tcg_constant_tl(0);
3021 tcg_gen_extr_i64_tl(s
->T0
, s
->T1
, old
);
3022 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_regs
[R_EAX
], Z
, zero
,
3023 s
->T0
, cpu_regs
[R_EAX
]);
3024 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_regs
[R_EDX
], Z
, zero
,
3025 s
->T1
, cpu_regs
[R_EDX
]);
3029 gen_compute_eflags(s
);
3030 tcg_gen_deposit_tl(cpu_cc_src
, cpu_cc_src
, Z
, ctz32(CC_Z
), 1);
3033 #ifdef TARGET_X86_64
3034 static void gen_cmpxchg16b(DisasContext
*s
, CPUX86State
*env
, int modrm
)
3036 MemOp mop
= MO_TE
| MO_128
| MO_ALIGN
;
3040 gen_lea_modrm(env
, s
, modrm
);
3042 cmp
= tcg_temp_new_i128();
3043 val
= tcg_temp_new_i128();
3044 tcg_gen_concat_i64_i128(cmp
, cpu_regs
[R_EAX
], cpu_regs
[R_EDX
]);
3045 tcg_gen_concat_i64_i128(val
, cpu_regs
[R_EBX
], cpu_regs
[R_ECX
]);
3047 /* Only require atomic with LOCK; non-parallel handled in generator. */
3048 if (s
->prefix
& PREFIX_LOCK
) {
3049 tcg_gen_atomic_cmpxchg_i128(val
, s
->A0
, cmp
, val
, s
->mem_index
, mop
);
3051 tcg_gen_nonatomic_cmpxchg_i128(val
, s
->A0
, cmp
, val
, s
->mem_index
, mop
);
3054 tcg_gen_extr_i128_i64(s
->T0
, s
->T1
, val
);
3056 /* Determine success after the fact. */
3057 t0
= tcg_temp_new_i64();
3058 t1
= tcg_temp_new_i64();
3059 tcg_gen_xor_i64(t0
, s
->T0
, cpu_regs
[R_EAX
]);
3060 tcg_gen_xor_i64(t1
, s
->T1
, cpu_regs
[R_EDX
]);
3061 tcg_gen_or_i64(t0
, t0
, t1
);
3064 gen_compute_eflags(s
);
3065 tcg_gen_setcondi_i64(TCG_COND_EQ
, t0
, t0
, 0);
3066 tcg_gen_deposit_tl(cpu_cc_src
, cpu_cc_src
, t0
, ctz32(CC_Z
), 1);
3069 * Extract the result values for the register pair. We may do this
3070 * unconditionally, because on success (Z=1), the old value matches
3071 * the previous value in RDX:RAX.
3073 tcg_gen_mov_i64(cpu_regs
[R_EAX
], s
->T0
);
3074 tcg_gen_mov_i64(cpu_regs
[R_EDX
], s
->T1
);
3078 /* convert one instruction. s->base.is_jmp is set if the translation must
3079 be stopped. Return the next pc value */
3080 static bool disas_insn(DisasContext
*s
, CPUState
*cpu
)
3082 CPUX86State
*env
= cpu_env(cpu
);
3085 MemOp ot
, aflag
, dflag
;
3086 int modrm
, reg
, rm
, mod
, op
, opreg
, val
;
3087 bool orig_cc_op_dirty
= s
->cc_op_dirty
;
3088 CCOp orig_cc_op
= s
->cc_op
;
3089 target_ulong orig_pc_save
= s
->pc_save
;
3091 s
->pc
= s
->base
.pc_next
;
3093 #ifdef TARGET_X86_64
3098 s
->rip_offset
= 0; /* for relative ip address */
3102 switch (sigsetjmp(s
->jmpbuf
, 0)) {
3106 gen_exception_gpf(s
);
3109 /* Restore state that may affect the next instruction. */
3110 s
->pc
= s
->base
.pc_next
;
3112 * TODO: These save/restore can be removed after the table-based
3113 * decoder is complete; we will be decoding the insn completely
3114 * before any code generation that might affect these variables.
3116 s
->cc_op_dirty
= orig_cc_op_dirty
;
3117 s
->cc_op
= orig_cc_op
;
3118 s
->pc_save
= orig_pc_save
;
3120 s
->base
.num_insns
--;
3121 tcg_remove_ops_after(s
->prev_insn_end
);
3122 s
->base
.is_jmp
= DISAS_TOO_MANY
;
3125 g_assert_not_reached();
3131 s
->prefix
= prefixes
;
3132 b
= x86_ldub_code(env
, s
);
3133 /* Collect prefixes. */
3138 b
= x86_ldub_code(env
, s
) + 0x100;
3141 prefixes
|= PREFIX_REPZ
;
3142 prefixes
&= ~PREFIX_REPNZ
;
3145 prefixes
|= PREFIX_REPNZ
;
3146 prefixes
&= ~PREFIX_REPZ
;
3149 prefixes
|= PREFIX_LOCK
;
3170 prefixes
|= PREFIX_DATA
;
3173 prefixes
|= PREFIX_ADR
;
3175 #ifdef TARGET_X86_64
3179 prefixes
|= PREFIX_REX
;
3180 s
->vex_w
= (b
>> 3) & 1;
3181 s
->rex_r
= (b
& 0x4) << 1;
3182 s
->rex_x
= (b
& 0x2) << 2;
3183 s
->rex_b
= (b
& 0x1) << 3;
3188 case 0xc5: /* 2-byte VEX */
3189 case 0xc4: /* 3-byte VEX */
3190 if (CODE32(s
) && !VM86(s
)) {
3191 int vex2
= x86_ldub_code(env
, s
);
3192 s
->pc
--; /* rewind the advance_pc() x86_ldub_code() did */
3194 if (!CODE64(s
) && (vex2
& 0xc0) != 0xc0) {
3195 /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
3196 otherwise the instruction is LES or LDS. */
3199 disas_insn_new(s
, cpu
, b
);
3205 /* Post-process prefixes. */
3207 /* In 64-bit mode, the default data size is 32-bit. Select 64-bit
3208 data with rex_w, and 16-bit data with 0x66; rex_w takes precedence
3209 over 0x66 if both are present. */
3210 dflag
= (REX_W(s
) ? MO_64
: prefixes
& PREFIX_DATA
? MO_16
: MO_32
);
3211 /* In 64-bit mode, 0x67 selects 32-bit addressing. */
3212 aflag
= (prefixes
& PREFIX_ADR
? MO_32
: MO_64
);
3214 /* In 16/32-bit mode, 0x66 selects the opposite data size. */
3215 if (CODE32(s
) ^ ((prefixes
& PREFIX_DATA
) != 0)) {
3220 /* In 16/32-bit mode, 0x67 selects the opposite addressing. */
3221 if (CODE32(s
) ^ ((prefixes
& PREFIX_ADR
) != 0)) {
3228 s
->prefix
= prefixes
;
3232 /* now check op code */
3234 /**************************/
3249 ot
= mo_b_d(b
, dflag
);
3252 case 0: /* OP Ev, Gv */
3253 modrm
= x86_ldub_code(env
, s
);
3254 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
3255 mod
= (modrm
>> 6) & 3;
3256 rm
= (modrm
& 7) | REX_B(s
);
3258 gen_lea_modrm(env
, s
, modrm
);
3260 } else if (op
== OP_XORL
&& rm
== reg
) {
3262 /* xor reg, reg optimisation */
3263 set_cc_op(s
, CC_OP_CLR
);
3264 tcg_gen_movi_tl(s
->T0
, 0);
3265 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
3270 gen_op_mov_v_reg(s
, ot
, s
->T1
, reg
);
3271 gen_op(s
, op
, ot
, opreg
);
3273 case 1: /* OP Gv, Ev */
3274 modrm
= x86_ldub_code(env
, s
);
3275 mod
= (modrm
>> 6) & 3;
3276 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
3277 rm
= (modrm
& 7) | REX_B(s
);
3279 gen_lea_modrm(env
, s
, modrm
);
3280 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
3281 } else if (op
== OP_XORL
&& rm
== reg
) {
3284 gen_op_mov_v_reg(s
, ot
, s
->T1
, rm
);
3286 gen_op(s
, op
, ot
, reg
);
3288 case 2: /* OP A, Iv */
3289 val
= insn_get(env
, s
, ot
);
3290 tcg_gen_movi_tl(s
->T1
, val
);
3291 gen_op(s
, op
, ot
, OR_EAX
);
3301 case 0x80: /* GRP1 */
3305 ot
= mo_b_d(b
, dflag
);
3307 modrm
= x86_ldub_code(env
, s
);
3308 mod
= (modrm
>> 6) & 3;
3309 rm
= (modrm
& 7) | REX_B(s
);
3310 op
= (modrm
>> 3) & 7;
3316 s
->rip_offset
= insn_const_size(ot
);
3317 gen_lea_modrm(env
, s
, modrm
);
3328 val
= insn_get(env
, s
, ot
);
3331 val
= (int8_t)insn_get(env
, s
, MO_8
);
3334 tcg_gen_movi_tl(s
->T1
, val
);
3335 gen_op(s
, op
, ot
, opreg
);
3339 /**************************/
3340 /* inc, dec, and other misc arith */
3341 case 0x40 ... 0x47: /* inc Gv */
3343 gen_inc(s
, ot
, OR_EAX
+ (b
& 7), 1);
3345 case 0x48 ... 0x4f: /* dec Gv */
3347 gen_inc(s
, ot
, OR_EAX
+ (b
& 7), -1);
3349 case 0xf6: /* GRP3 */
3351 ot
= mo_b_d(b
, dflag
);
3353 modrm
= x86_ldub_code(env
, s
);
3354 mod
= (modrm
>> 6) & 3;
3355 rm
= (modrm
& 7) | REX_B(s
);
3356 op
= (modrm
>> 3) & 7;
3359 s
->rip_offset
= insn_const_size(ot
);
3361 gen_lea_modrm(env
, s
, modrm
);
3362 /* For those below that handle locked memory, don't load here. */
3363 if (!(s
->prefix
& PREFIX_LOCK
)
3365 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
3368 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
3373 val
= insn_get(env
, s
, ot
);
3374 tcg_gen_movi_tl(s
->T1
, val
);
3375 gen_op_testl_T0_T1_cc(s
);
3376 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
3379 if (s
->prefix
& PREFIX_LOCK
) {
3383 tcg_gen_movi_tl(s
->T0
, ~0);
3384 tcg_gen_atomic_xor_fetch_tl(s
->T0
, s
->A0
, s
->T0
,
3385 s
->mem_index
, ot
| MO_LE
);
3387 tcg_gen_not_tl(s
->T0
, s
->T0
);
3389 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
3391 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
3396 if (s
->prefix
& PREFIX_LOCK
) {
3398 TCGv a0
, t0
, t1
, t2
;
3405 label1
= gen_new_label();
3407 gen_set_label(label1
);
3408 t1
= tcg_temp_new();
3409 t2
= tcg_temp_new();
3410 tcg_gen_mov_tl(t2
, t0
);
3411 tcg_gen_neg_tl(t1
, t0
);
3412 tcg_gen_atomic_cmpxchg_tl(t0
, a0
, t0
, t1
,
3413 s
->mem_index
, ot
| MO_LE
);
3414 tcg_gen_brcond_tl(TCG_COND_NE
, t0
, t2
, label1
);
3416 tcg_gen_neg_tl(s
->T0
, t0
);
3418 tcg_gen_neg_tl(s
->T0
, s
->T0
);
3420 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
3422 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
3425 gen_op_update_neg_cc(s
);
3426 set_cc_op(s
, CC_OP_SUBB
+ ot
);
3431 gen_op_mov_v_reg(s
, MO_8
, s
->T1
, R_EAX
);
3432 tcg_gen_ext8u_tl(s
->T0
, s
->T0
);
3433 tcg_gen_ext8u_tl(s
->T1
, s
->T1
);
3434 /* XXX: use 32 bit mul which could be faster */
3435 tcg_gen_mul_tl(s
->T0
, s
->T0
, s
->T1
);
3436 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
3437 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
3438 tcg_gen_andi_tl(cpu_cc_src
, s
->T0
, 0xff00);
3439 set_cc_op(s
, CC_OP_MULB
);
3442 gen_op_mov_v_reg(s
, MO_16
, s
->T1
, R_EAX
);
3443 tcg_gen_ext16u_tl(s
->T0
, s
->T0
);
3444 tcg_gen_ext16u_tl(s
->T1
, s
->T1
);
3445 /* XXX: use 32 bit mul which could be faster */
3446 tcg_gen_mul_tl(s
->T0
, s
->T0
, s
->T1
);
3447 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
3448 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
3449 tcg_gen_shri_tl(s
->T0
, s
->T0
, 16);
3450 gen_op_mov_reg_v(s
, MO_16
, R_EDX
, s
->T0
);
3451 tcg_gen_mov_tl(cpu_cc_src
, s
->T0
);
3452 set_cc_op(s
, CC_OP_MULW
);
3456 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
3457 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, cpu_regs
[R_EAX
]);
3458 tcg_gen_mulu2_i32(s
->tmp2_i32
, s
->tmp3_i32
,
3459 s
->tmp2_i32
, s
->tmp3_i32
);
3460 tcg_gen_extu_i32_tl(cpu_regs
[R_EAX
], s
->tmp2_i32
);
3461 tcg_gen_extu_i32_tl(cpu_regs
[R_EDX
], s
->tmp3_i32
);
3462 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
3463 tcg_gen_mov_tl(cpu_cc_src
, cpu_regs
[R_EDX
]);
3464 set_cc_op(s
, CC_OP_MULL
);
3466 #ifdef TARGET_X86_64
3468 tcg_gen_mulu2_i64(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
],
3469 s
->T0
, cpu_regs
[R_EAX
]);
3470 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
3471 tcg_gen_mov_tl(cpu_cc_src
, cpu_regs
[R_EDX
]);
3472 set_cc_op(s
, CC_OP_MULQ
);
3480 gen_op_mov_v_reg(s
, MO_8
, s
->T1
, R_EAX
);
3481 tcg_gen_ext8s_tl(s
->T0
, s
->T0
);
3482 tcg_gen_ext8s_tl(s
->T1
, s
->T1
);
3483 /* XXX: use 32 bit mul which could be faster */
3484 tcg_gen_mul_tl(s
->T0
, s
->T0
, s
->T1
);
3485 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
3486 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
3487 tcg_gen_ext8s_tl(s
->tmp0
, s
->T0
);
3488 tcg_gen_sub_tl(cpu_cc_src
, s
->T0
, s
->tmp0
);
3489 set_cc_op(s
, CC_OP_MULB
);
3492 gen_op_mov_v_reg(s
, MO_16
, s
->T1
, R_EAX
);
3493 tcg_gen_ext16s_tl(s
->T0
, s
->T0
);
3494 tcg_gen_ext16s_tl(s
->T1
, s
->T1
);
3495 /* XXX: use 32 bit mul which could be faster */
3496 tcg_gen_mul_tl(s
->T0
, s
->T0
, s
->T1
);
3497 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
3498 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
3499 tcg_gen_ext16s_tl(s
->tmp0
, s
->T0
);
3500 tcg_gen_sub_tl(cpu_cc_src
, s
->T0
, s
->tmp0
);
3501 tcg_gen_shri_tl(s
->T0
, s
->T0
, 16);
3502 gen_op_mov_reg_v(s
, MO_16
, R_EDX
, s
->T0
);
3503 set_cc_op(s
, CC_OP_MULW
);
3507 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
3508 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, cpu_regs
[R_EAX
]);
3509 tcg_gen_muls2_i32(s
->tmp2_i32
, s
->tmp3_i32
,
3510 s
->tmp2_i32
, s
->tmp3_i32
);
3511 tcg_gen_extu_i32_tl(cpu_regs
[R_EAX
], s
->tmp2_i32
);
3512 tcg_gen_extu_i32_tl(cpu_regs
[R_EDX
], s
->tmp3_i32
);
3513 tcg_gen_sari_i32(s
->tmp2_i32
, s
->tmp2_i32
, 31);
3514 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
3515 tcg_gen_sub_i32(s
->tmp2_i32
, s
->tmp2_i32
, s
->tmp3_i32
);
3516 tcg_gen_extu_i32_tl(cpu_cc_src
, s
->tmp2_i32
);
3517 set_cc_op(s
, CC_OP_MULL
);
3519 #ifdef TARGET_X86_64
3521 tcg_gen_muls2_i64(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
],
3522 s
->T0
, cpu_regs
[R_EAX
]);
3523 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
3524 tcg_gen_sari_tl(cpu_cc_src
, cpu_regs
[R_EAX
], 63);
3525 tcg_gen_sub_tl(cpu_cc_src
, cpu_cc_src
, cpu_regs
[R_EDX
]);
3526 set_cc_op(s
, CC_OP_MULQ
);
3534 gen_helper_divb_AL(tcg_env
, s
->T0
);
3537 gen_helper_divw_AX(tcg_env
, s
->T0
);
3541 gen_helper_divl_EAX(tcg_env
, s
->T0
);
3543 #ifdef TARGET_X86_64
3545 gen_helper_divq_EAX(tcg_env
, s
->T0
);
3553 gen_helper_idivb_AL(tcg_env
, s
->T0
);
3556 gen_helper_idivw_AX(tcg_env
, s
->T0
);
3560 gen_helper_idivl_EAX(tcg_env
, s
->T0
);
3562 #ifdef TARGET_X86_64
3564 gen_helper_idivq_EAX(tcg_env
, s
->T0
);
3574 case 0xfe: /* GRP4 */
3575 case 0xff: /* GRP5 */
3576 ot
= mo_b_d(b
, dflag
);
3578 modrm
= x86_ldub_code(env
, s
);
3579 mod
= (modrm
>> 6) & 3;
3580 rm
= (modrm
& 7) | REX_B(s
);
3581 op
= (modrm
>> 3) & 7;
3582 if (op
>= 2 && b
== 0xfe) {
3586 if (op
== 2 || op
== 4) {
3587 /* operand size for jumps is 64 bit */
3589 } else if (op
== 3 || op
== 5) {
3590 ot
= dflag
!= MO_16
? MO_32
+ REX_W(s
) : MO_16
;
3591 } else if (op
== 6) {
3592 /* default push size is 64 bit */
3593 ot
= mo_pushpop(s
, dflag
);
3597 gen_lea_modrm(env
, s
, modrm
);
3598 if (op
>= 2 && op
!= 3 && op
!= 5)
3599 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
3601 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
3605 case 0: /* inc Ev */
3610 gen_inc(s
, ot
, opreg
, 1);
3612 case 1: /* dec Ev */
3617 gen_inc(s
, ot
, opreg
, -1);
3619 case 2: /* call Ev */
3620 /* XXX: optimize if memory (no 'and' is necessary) */
3621 if (dflag
== MO_16
) {
3622 tcg_gen_ext16u_tl(s
->T0
, s
->T0
);
3624 gen_push_v(s
, eip_next_tl(s
));
3625 gen_op_jmp_v(s
, s
->T0
);
3627 s
->base
.is_jmp
= DISAS_JUMP
;
3629 case 3: /* lcall Ev */
3633 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
3634 gen_add_A0_im(s
, 1 << ot
);
3635 gen_op_ld_v(s
, MO_16
, s
->T0
, s
->A0
);
3637 if (PE(s
) && !VM86(s
)) {
3638 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
3639 gen_helper_lcall_protected(tcg_env
, s
->tmp2_i32
, s
->T1
,
3640 tcg_constant_i32(dflag
- 1),
3643 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
3644 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T1
);
3645 gen_helper_lcall_real(tcg_env
, s
->tmp2_i32
, s
->tmp3_i32
,
3646 tcg_constant_i32(dflag
- 1),
3649 s
->base
.is_jmp
= DISAS_JUMP
;
3651 case 4: /* jmp Ev */
3652 if (dflag
== MO_16
) {
3653 tcg_gen_ext16u_tl(s
->T0
, s
->T0
);
3655 gen_op_jmp_v(s
, s
->T0
);
3657 s
->base
.is_jmp
= DISAS_JUMP
;
3659 case 5: /* ljmp Ev */
3663 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
3664 gen_add_A0_im(s
, 1 << ot
);
3665 gen_op_ld_v(s
, MO_16
, s
->T0
, s
->A0
);
3667 if (PE(s
) && !VM86(s
)) {
3668 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
3669 gen_helper_ljmp_protected(tcg_env
, s
->tmp2_i32
, s
->T1
,
3672 gen_op_movl_seg_T0_vm(s
, R_CS
);
3673 gen_op_jmp_v(s
, s
->T1
);
3675 s
->base
.is_jmp
= DISAS_JUMP
;
3677 case 6: /* push Ev */
3678 gen_push_v(s
, s
->T0
);
3685 case 0x84: /* test Ev, Gv */
3687 ot
= mo_b_d(b
, dflag
);
3689 modrm
= x86_ldub_code(env
, s
);
3690 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
3692 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3693 gen_op_mov_v_reg(s
, ot
, s
->T1
, reg
);
3694 gen_op_testl_T0_T1_cc(s
);
3695 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
3698 case 0xa8: /* test eAX, Iv */
3700 ot
= mo_b_d(b
, dflag
);
3701 val
= insn_get(env
, s
, ot
);
3703 gen_op_mov_v_reg(s
, ot
, s
->T0
, OR_EAX
);
3704 tcg_gen_movi_tl(s
->T1
, val
);
3705 gen_op_testl_T0_T1_cc(s
);
3706 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
3709 case 0x98: /* CWDE/CBW */
3711 #ifdef TARGET_X86_64
3713 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, R_EAX
);
3714 tcg_gen_ext32s_tl(s
->T0
, s
->T0
);
3715 gen_op_mov_reg_v(s
, MO_64
, R_EAX
, s
->T0
);
3719 gen_op_mov_v_reg(s
, MO_16
, s
->T0
, R_EAX
);
3720 tcg_gen_ext16s_tl(s
->T0
, s
->T0
);
3721 gen_op_mov_reg_v(s
, MO_32
, R_EAX
, s
->T0
);
3724 gen_op_mov_v_reg(s
, MO_8
, s
->T0
, R_EAX
);
3725 tcg_gen_ext8s_tl(s
->T0
, s
->T0
);
3726 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
3729 g_assert_not_reached();
3732 case 0x99: /* CDQ/CWD */
3734 #ifdef TARGET_X86_64
3736 gen_op_mov_v_reg(s
, MO_64
, s
->T0
, R_EAX
);
3737 tcg_gen_sari_tl(s
->T0
, s
->T0
, 63);
3738 gen_op_mov_reg_v(s
, MO_64
, R_EDX
, s
->T0
);
3742 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, R_EAX
);
3743 tcg_gen_ext32s_tl(s
->T0
, s
->T0
);
3744 tcg_gen_sari_tl(s
->T0
, s
->T0
, 31);
3745 gen_op_mov_reg_v(s
, MO_32
, R_EDX
, s
->T0
);
3748 gen_op_mov_v_reg(s
, MO_16
, s
->T0
, R_EAX
);
3749 tcg_gen_ext16s_tl(s
->T0
, s
->T0
);
3750 tcg_gen_sari_tl(s
->T0
, s
->T0
, 15);
3751 gen_op_mov_reg_v(s
, MO_16
, R_EDX
, s
->T0
);
3754 g_assert_not_reached();
3757 case 0x1af: /* imul Gv, Ev */
3758 case 0x69: /* imul Gv, Ev, I */
3761 modrm
= x86_ldub_code(env
, s
);
3762 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
3764 s
->rip_offset
= insn_const_size(ot
);
3767 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3769 val
= insn_get(env
, s
, ot
);
3770 tcg_gen_movi_tl(s
->T1
, val
);
3771 } else if (b
== 0x6b) {
3772 val
= (int8_t)insn_get(env
, s
, MO_8
);
3773 tcg_gen_movi_tl(s
->T1
, val
);
3775 gen_op_mov_v_reg(s
, ot
, s
->T1
, reg
);
3778 #ifdef TARGET_X86_64
3780 tcg_gen_muls2_i64(cpu_regs
[reg
], s
->T1
, s
->T0
, s
->T1
);
3781 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[reg
]);
3782 tcg_gen_sari_tl(cpu_cc_src
, cpu_cc_dst
, 63);
3783 tcg_gen_sub_tl(cpu_cc_src
, cpu_cc_src
, s
->T1
);
3787 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
3788 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T1
);
3789 tcg_gen_muls2_i32(s
->tmp2_i32
, s
->tmp3_i32
,
3790 s
->tmp2_i32
, s
->tmp3_i32
);
3791 tcg_gen_extu_i32_tl(cpu_regs
[reg
], s
->tmp2_i32
);
3792 tcg_gen_sari_i32(s
->tmp2_i32
, s
->tmp2_i32
, 31);
3793 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[reg
]);
3794 tcg_gen_sub_i32(s
->tmp2_i32
, s
->tmp2_i32
, s
->tmp3_i32
);
3795 tcg_gen_extu_i32_tl(cpu_cc_src
, s
->tmp2_i32
);
3798 tcg_gen_ext16s_tl(s
->T0
, s
->T0
);
3799 tcg_gen_ext16s_tl(s
->T1
, s
->T1
);
3800 /* XXX: use 32 bit mul which could be faster */
3801 tcg_gen_mul_tl(s
->T0
, s
->T0
, s
->T1
);
3802 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
3803 tcg_gen_ext16s_tl(s
->tmp0
, s
->T0
);
3804 tcg_gen_sub_tl(cpu_cc_src
, s
->T0
, s
->tmp0
);
3805 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
3808 set_cc_op(s
, CC_OP_MULB
+ ot
);
3811 case 0x1c1: /* xadd Ev, Gv */
3812 ot
= mo_b_d(b
, dflag
);
3813 modrm
= x86_ldub_code(env
, s
);
3814 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
3815 mod
= (modrm
>> 6) & 3;
3816 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
3818 rm
= (modrm
& 7) | REX_B(s
);
3819 gen_op_mov_v_reg(s
, ot
, s
->T1
, rm
);
3820 tcg_gen_add_tl(s
->T0
, s
->T0
, s
->T1
);
3821 gen_op_mov_reg_v(s
, ot
, reg
, s
->T1
);
3822 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
3824 gen_lea_modrm(env
, s
, modrm
);
3825 if (s
->prefix
& PREFIX_LOCK
) {
3826 tcg_gen_atomic_fetch_add_tl(s
->T1
, s
->A0
, s
->T0
,
3827 s
->mem_index
, ot
| MO_LE
);
3828 tcg_gen_add_tl(s
->T0
, s
->T0
, s
->T1
);
3830 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
3831 tcg_gen_add_tl(s
->T0
, s
->T0
, s
->T1
);
3832 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
3834 gen_op_mov_reg_v(s
, ot
, reg
, s
->T1
);
3836 gen_op_update2_cc(s
);
3837 set_cc_op(s
, CC_OP_ADDB
+ ot
);
3840 case 0x1b1: /* cmpxchg Ev, Gv */
3842 TCGv oldv
, newv
, cmpv
, dest
;
3844 ot
= mo_b_d(b
, dflag
);
3845 modrm
= x86_ldub_code(env
, s
);
3846 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
3847 mod
= (modrm
>> 6) & 3;
3848 oldv
= tcg_temp_new();
3849 newv
= tcg_temp_new();
3850 cmpv
= tcg_temp_new();
3851 gen_op_mov_v_reg(s
, ot
, newv
, reg
);
3852 tcg_gen_mov_tl(cmpv
, cpu_regs
[R_EAX
]);
3854 if (s
->prefix
& PREFIX_LOCK
) {
3858 gen_lea_modrm(env
, s
, modrm
);
3859 tcg_gen_atomic_cmpxchg_tl(oldv
, s
->A0
, cmpv
, newv
,
3860 s
->mem_index
, ot
| MO_LE
);
3863 rm
= (modrm
& 7) | REX_B(s
);
3864 gen_op_mov_v_reg(s
, ot
, oldv
, rm
);
3868 * Unlike the memory case, where "the destination operand receives
3869 * a write cycle without regard to the result of the comparison",
3870 * rm must not be touched altogether if the write fails, including
3871 * not zero-extending it on 64-bit processors. So, precompute
3872 * the result of a successful writeback and perform the movcond
3873 * directly on cpu_regs. Also need to write accumulator first, in
3874 * case rm is part of RAX too.
3876 dest
= gen_op_deposit_reg_v(s
, ot
, rm
, newv
, newv
);
3877 tcg_gen_movcond_tl(TCG_COND_EQ
, dest
, oldv
, cmpv
, newv
, dest
);
3879 gen_lea_modrm(env
, s
, modrm
);
3880 gen_op_ld_v(s
, ot
, oldv
, s
->A0
);
3883 * Perform an unconditional store cycle like physical cpu;
3884 * must be before changing accumulator to ensure
3885 * idempotency if the store faults and the instruction
3888 tcg_gen_movcond_tl(TCG_COND_EQ
, newv
, oldv
, cmpv
, newv
, oldv
);
3889 gen_op_st_v(s
, ot
, newv
, s
->A0
);
3893 * Write EAX only if the cmpxchg fails; reuse newv as the destination,
3894 * since it's dead here.
3896 dest
= gen_op_deposit_reg_v(s
, ot
, R_EAX
, newv
, oldv
);
3897 tcg_gen_movcond_tl(TCG_COND_EQ
, dest
, oldv
, cmpv
, dest
, newv
);
3898 tcg_gen_mov_tl(cpu_cc_src
, oldv
);
3899 tcg_gen_mov_tl(s
->cc_srcT
, cmpv
);
3900 tcg_gen_sub_tl(cpu_cc_dst
, cmpv
, oldv
);
3901 set_cc_op(s
, CC_OP_SUBB
+ ot
);
3904 case 0x1c7: /* cmpxchg8b */
3905 modrm
= x86_ldub_code(env
, s
);
3906 mod
= (modrm
>> 6) & 3;
3907 switch ((modrm
>> 3) & 7) {
3908 case 1: /* CMPXCHG8, CMPXCHG16 */
3912 #ifdef TARGET_X86_64
3913 if (dflag
== MO_64
) {
3914 if (!(s
->cpuid_ext_features
& CPUID_EXT_CX16
)) {
3917 gen_cmpxchg16b(s
, env
, modrm
);
3921 if (!(s
->cpuid_features
& CPUID_CX8
)) {
3924 gen_cmpxchg8b(s
, env
, modrm
);
3927 case 7: /* RDSEED, RDPID with f3 prefix */
3929 (s
->prefix
& (PREFIX_LOCK
| PREFIX_REPNZ
))) {
3932 if (s
->prefix
& PREFIX_REPZ
) {
3933 if (!(s
->cpuid_ext_features
& CPUID_7_0_ECX_RDPID
)) {
3936 gen_helper_rdpid(s
->T0
, tcg_env
);
3937 rm
= (modrm
& 7) | REX_B(s
);
3938 gen_op_mov_reg_v(s
, dflag
, rm
, s
->T0
);
3941 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_RDSEED
)) {
3947 case 6: /* RDRAND */
3949 (s
->prefix
& (PREFIX_LOCK
| PREFIX_REPZ
| PREFIX_REPNZ
)) ||
3950 !(s
->cpuid_ext_features
& CPUID_EXT_RDRAND
)) {
3954 translator_io_start(&s
->base
);
3955 gen_helper_rdrand(s
->T0
, tcg_env
);
3956 rm
= (modrm
& 7) | REX_B(s
);
3957 gen_op_mov_reg_v(s
, dflag
, rm
, s
->T0
);
3958 set_cc_op(s
, CC_OP_EFLAGS
);
3966 /**************************/
3968 case 0x50 ... 0x57: /* push */
3969 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, (b
& 7) | REX_B(s
));
3970 gen_push_v(s
, s
->T0
);
3972 case 0x58 ... 0x5f: /* pop */
3974 /* NOTE: order is important for pop %sp */
3975 gen_pop_update(s
, ot
);
3976 gen_op_mov_reg_v(s
, ot
, (b
& 7) | REX_B(s
), s
->T0
);
3978 case 0x60: /* pusha */
3983 case 0x61: /* popa */
3988 case 0x68: /* push Iv */
3990 ot
= mo_pushpop(s
, dflag
);
3992 val
= insn_get(env
, s
, ot
);
3994 val
= (int8_t)insn_get(env
, s
, MO_8
);
3995 tcg_gen_movi_tl(s
->T0
, val
);
3996 gen_push_v(s
, s
->T0
);
3998 case 0x8f: /* pop Ev */
3999 modrm
= x86_ldub_code(env
, s
);
4000 mod
= (modrm
>> 6) & 3;
4003 /* NOTE: order is important for pop %sp */
4004 gen_pop_update(s
, ot
);
4005 rm
= (modrm
& 7) | REX_B(s
);
4006 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
4008 /* NOTE: order is important too for MMU exceptions */
4009 s
->popl_esp_hack
= 1 << ot
;
4010 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
4011 s
->popl_esp_hack
= 0;
4012 gen_pop_update(s
, ot
);
4015 case 0xc8: /* enter */
4018 val
= x86_lduw_code(env
, s
);
4019 level
= x86_ldub_code(env
, s
);
4020 gen_enter(s
, val
, level
);
4023 case 0xc9: /* leave */
4026 case 0x06: /* push es */
4027 case 0x0e: /* push cs */
4028 case 0x16: /* push ss */
4029 case 0x1e: /* push ds */
4032 gen_op_movl_T0_seg(s
, b
>> 3);
4033 gen_push_v(s
, s
->T0
);
4035 case 0x1a0: /* push fs */
4036 case 0x1a8: /* push gs */
4037 gen_op_movl_T0_seg(s
, (b
>> 3) & 7);
4038 gen_push_v(s
, s
->T0
);
4040 case 0x07: /* pop es */
4041 case 0x17: /* pop ss */
4042 case 0x1f: /* pop ds */
4047 gen_movl_seg_T0(s
, reg
);
4048 gen_pop_update(s
, ot
);
4050 case 0x1a1: /* pop fs */
4051 case 0x1a9: /* pop gs */
4053 gen_movl_seg_T0(s
, (b
>> 3) & 7);
4054 gen_pop_update(s
, ot
);
4057 /**************************/
4060 case 0x89: /* mov Gv, Ev */
4061 ot
= mo_b_d(b
, dflag
);
4062 modrm
= x86_ldub_code(env
, s
);
4063 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4065 /* generate a generic store */
4066 gen_ldst_modrm(env
, s
, modrm
, ot
, reg
, 1);
4069 case 0xc7: /* mov Ev, Iv */
4070 ot
= mo_b_d(b
, dflag
);
4071 modrm
= x86_ldub_code(env
, s
);
4072 mod
= (modrm
>> 6) & 3;
4074 s
->rip_offset
= insn_const_size(ot
);
4075 gen_lea_modrm(env
, s
, modrm
);
4077 val
= insn_get(env
, s
, ot
);
4078 tcg_gen_movi_tl(s
->T0
, val
);
4080 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
4082 gen_op_mov_reg_v(s
, ot
, (modrm
& 7) | REX_B(s
), s
->T0
);
4086 case 0x8b: /* mov Ev, Gv */
4087 ot
= mo_b_d(b
, dflag
);
4088 modrm
= x86_ldub_code(env
, s
);
4089 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4091 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4092 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
4094 case 0x8e: /* mov seg, Gv */
4095 modrm
= x86_ldub_code(env
, s
);
4096 reg
= (modrm
>> 3) & 7;
4097 if (reg
>= 6 || reg
== R_CS
)
4099 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
4100 gen_movl_seg_T0(s
, reg
);
4102 case 0x8c: /* mov Gv, seg */
4103 modrm
= x86_ldub_code(env
, s
);
4104 reg
= (modrm
>> 3) & 7;
4105 mod
= (modrm
>> 6) & 3;
4108 gen_op_movl_T0_seg(s
, reg
);
4109 ot
= mod
== 3 ? dflag
: MO_16
;
4110 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
4113 case 0x1b6: /* movzbS Gv, Eb */
4114 case 0x1b7: /* movzwS Gv, Eb */
4115 case 0x1be: /* movsbS Gv, Eb */
4116 case 0x1bf: /* movswS Gv, Eb */
4121 /* d_ot is the size of destination */
4123 /* ot is the size of source */
4124 ot
= (b
& 1) + MO_8
;
4125 /* s_ot is the sign+size of source */
4126 s_ot
= b
& 8 ? MO_SIGN
| ot
: ot
;
4128 modrm
= x86_ldub_code(env
, s
);
4129 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4130 mod
= (modrm
>> 6) & 3;
4131 rm
= (modrm
& 7) | REX_B(s
);
4134 if (s_ot
== MO_SB
&& byte_reg_is_xH(s
, rm
)) {
4135 tcg_gen_sextract_tl(s
->T0
, cpu_regs
[rm
- 4], 8, 8);
4137 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
4140 tcg_gen_ext8u_tl(s
->T0
, s
->T0
);
4143 tcg_gen_ext8s_tl(s
->T0
, s
->T0
);
4146 tcg_gen_ext16u_tl(s
->T0
, s
->T0
);
4150 tcg_gen_ext16s_tl(s
->T0
, s
->T0
);
4154 gen_op_mov_reg_v(s
, d_ot
, reg
, s
->T0
);
4156 gen_lea_modrm(env
, s
, modrm
);
4157 gen_op_ld_v(s
, s_ot
, s
->T0
, s
->A0
);
4158 gen_op_mov_reg_v(s
, d_ot
, reg
, s
->T0
);
4163 case 0x8d: /* lea */
4164 modrm
= x86_ldub_code(env
, s
);
4165 mod
= (modrm
>> 6) & 3;
4168 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4170 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
4171 TCGv ea
= gen_lea_modrm_1(s
, a
, false);
4172 gen_lea_v_seg(s
, s
->aflag
, ea
, -1, -1);
4173 gen_op_mov_reg_v(s
, dflag
, reg
, s
->A0
);
4177 case 0xa0: /* mov EAX, Ov */
4179 case 0xa2: /* mov Ov, EAX */
4182 target_ulong offset_addr
;
4184 ot
= mo_b_d(b
, dflag
);
4185 offset_addr
= insn_get_addr(env
, s
, s
->aflag
);
4186 tcg_gen_movi_tl(s
->A0
, offset_addr
);
4187 gen_add_A0_ds_seg(s
);
4189 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
4190 gen_op_mov_reg_v(s
, ot
, R_EAX
, s
->T0
);
4192 gen_op_mov_v_reg(s
, ot
, s
->T0
, R_EAX
);
4193 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
4197 case 0xd7: /* xlat */
4198 tcg_gen_mov_tl(s
->A0
, cpu_regs
[R_EBX
]);
4199 tcg_gen_ext8u_tl(s
->T0
, cpu_regs
[R_EAX
]);
4200 tcg_gen_add_tl(s
->A0
, s
->A0
, s
->T0
);
4201 gen_extu(s
->aflag
, s
->A0
);
4202 gen_add_A0_ds_seg(s
);
4203 gen_op_ld_v(s
, MO_8
, s
->T0
, s
->A0
);
4204 gen_op_mov_reg_v(s
, MO_8
, R_EAX
, s
->T0
);
4206 case 0xb0 ... 0xb7: /* mov R, Ib */
4207 val
= insn_get(env
, s
, MO_8
);
4208 tcg_gen_movi_tl(s
->T0
, val
);
4209 gen_op_mov_reg_v(s
, MO_8
, (b
& 7) | REX_B(s
), s
->T0
);
4211 case 0xb8 ... 0xbf: /* mov R, Iv */
4212 #ifdef TARGET_X86_64
4213 if (dflag
== MO_64
) {
4216 tmp
= x86_ldq_code(env
, s
);
4217 reg
= (b
& 7) | REX_B(s
);
4218 tcg_gen_movi_tl(s
->T0
, tmp
);
4219 gen_op_mov_reg_v(s
, MO_64
, reg
, s
->T0
);
4224 val
= insn_get(env
, s
, ot
);
4225 reg
= (b
& 7) | REX_B(s
);
4226 tcg_gen_movi_tl(s
->T0
, val
);
4227 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
4231 case 0x91 ... 0x97: /* xchg R, EAX */
4234 reg
= (b
& 7) | REX_B(s
);
4238 case 0x87: /* xchg Ev, Gv */
4239 ot
= mo_b_d(b
, dflag
);
4240 modrm
= x86_ldub_code(env
, s
);
4241 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4242 mod
= (modrm
>> 6) & 3;
4244 rm
= (modrm
& 7) | REX_B(s
);
4246 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
4247 gen_op_mov_v_reg(s
, ot
, s
->T1
, rm
);
4248 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
4249 gen_op_mov_reg_v(s
, ot
, reg
, s
->T1
);
4251 gen_lea_modrm(env
, s
, modrm
);
4252 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
4253 /* for xchg, lock is implicit */
4254 tcg_gen_atomic_xchg_tl(s
->T1
, s
->A0
, s
->T0
,
4255 s
->mem_index
, ot
| MO_LE
);
4256 gen_op_mov_reg_v(s
, ot
, reg
, s
->T1
);
4259 case 0xc4: /* les Gv */
4260 /* In CODE64 this is VEX3; see above. */
4263 case 0xc5: /* lds Gv */
4264 /* In CODE64 this is VEX2; see above. */
4267 case 0x1b2: /* lss Gv */
4270 case 0x1b4: /* lfs Gv */
4273 case 0x1b5: /* lgs Gv */
4276 ot
= dflag
!= MO_16
? MO_32
: MO_16
;
4277 modrm
= x86_ldub_code(env
, s
);
4278 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4279 mod
= (modrm
>> 6) & 3;
4282 gen_lea_modrm(env
, s
, modrm
);
4283 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
4284 gen_add_A0_im(s
, 1 << ot
);
4285 /* load the segment first to handle exceptions properly */
4286 gen_op_ld_v(s
, MO_16
, s
->T0
, s
->A0
);
4287 gen_movl_seg_T0(s
, op
);
4288 /* then put the data */
4289 gen_op_mov_reg_v(s
, ot
, reg
, s
->T1
);
4292 /************************/
4300 ot
= mo_b_d(b
, dflag
);
4301 modrm
= x86_ldub_code(env
, s
);
4302 mod
= (modrm
>> 6) & 3;
4303 op
= (modrm
>> 3) & 7;
4309 gen_lea_modrm(env
, s
, modrm
);
4312 opreg
= (modrm
& 7) | REX_B(s
);
4317 gen_shift(s
, op
, ot
, opreg
, OR_ECX
);
4320 shift
= x86_ldub_code(env
, s
);
4322 gen_shifti(s
, op
, ot
, opreg
, shift
);
4337 case 0x1a4: /* shld imm */
4341 case 0x1a5: /* shld cl */
4345 case 0x1ac: /* shrd imm */
4349 case 0x1ad: /* shrd cl */
4354 modrm
= x86_ldub_code(env
, s
);
4355 mod
= (modrm
>> 6) & 3;
4356 rm
= (modrm
& 7) | REX_B(s
);
4357 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4359 gen_lea_modrm(env
, s
, modrm
);
4364 gen_op_mov_v_reg(s
, ot
, s
->T1
, reg
);
4367 TCGv imm
= tcg_constant_tl(x86_ldub_code(env
, s
));
4368 gen_shiftd_rm_T1(s
, ot
, opreg
, op
, imm
);
4370 gen_shiftd_rm_T1(s
, ot
, opreg
, op
, cpu_regs
[R_ECX
]);
4374 /************************/
4378 bool update_fip
= true;
4380 if (s
->flags
& (HF_EM_MASK
| HF_TS_MASK
)) {
4381 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
4382 /* XXX: what to do if illegal op ? */
4383 gen_exception(s
, EXCP07_PREX
);
4386 modrm
= x86_ldub_code(env
, s
);
4387 mod
= (modrm
>> 6) & 3;
4389 op
= ((b
& 7) << 3) | ((modrm
>> 3) & 7);
4392 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
4393 TCGv ea
= gen_lea_modrm_1(s
, a
, false);
4394 TCGv last_addr
= tcg_temp_new();
4395 bool update_fdp
= true;
4397 tcg_gen_mov_tl(last_addr
, ea
);
4398 gen_lea_v_seg(s
, s
->aflag
, ea
, a
.def_seg
, s
->override
);
4401 case 0x00 ... 0x07: /* fxxxs */
4402 case 0x10 ... 0x17: /* fixxxl */
4403 case 0x20 ... 0x27: /* fxxxl */
4404 case 0x30 ... 0x37: /* fixxx */
4411 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
4412 s
->mem_index
, MO_LEUL
);
4413 gen_helper_flds_FT0(tcg_env
, s
->tmp2_i32
);
4416 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
4417 s
->mem_index
, MO_LEUL
);
4418 gen_helper_fildl_FT0(tcg_env
, s
->tmp2_i32
);
4421 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
,
4422 s
->mem_index
, MO_LEUQ
);
4423 gen_helper_fldl_FT0(tcg_env
, s
->tmp1_i64
);
4427 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
4428 s
->mem_index
, MO_LESW
);
4429 gen_helper_fildl_FT0(tcg_env
, s
->tmp2_i32
);
4433 gen_helper_fp_arith_ST0_FT0(op1
);
4435 /* fcomp needs pop */
4436 gen_helper_fpop(tcg_env
);
4440 case 0x08: /* flds */
4441 case 0x0a: /* fsts */
4442 case 0x0b: /* fstps */
4443 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
4444 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
4445 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
4450 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
4451 s
->mem_index
, MO_LEUL
);
4452 gen_helper_flds_ST0(tcg_env
, s
->tmp2_i32
);
4455 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
4456 s
->mem_index
, MO_LEUL
);
4457 gen_helper_fildl_ST0(tcg_env
, s
->tmp2_i32
);
4460 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
,
4461 s
->mem_index
, MO_LEUQ
);
4462 gen_helper_fldl_ST0(tcg_env
, s
->tmp1_i64
);
4466 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
4467 s
->mem_index
, MO_LESW
);
4468 gen_helper_fildl_ST0(tcg_env
, s
->tmp2_i32
);
4473 /* XXX: the corresponding CPUID bit must be tested ! */
4476 gen_helper_fisttl_ST0(s
->tmp2_i32
, tcg_env
);
4477 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
4478 s
->mem_index
, MO_LEUL
);
4481 gen_helper_fisttll_ST0(s
->tmp1_i64
, tcg_env
);
4482 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
,
4483 s
->mem_index
, MO_LEUQ
);
4487 gen_helper_fistt_ST0(s
->tmp2_i32
, tcg_env
);
4488 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
4489 s
->mem_index
, MO_LEUW
);
4492 gen_helper_fpop(tcg_env
);
4497 gen_helper_fsts_ST0(s
->tmp2_i32
, tcg_env
);
4498 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
4499 s
->mem_index
, MO_LEUL
);
4502 gen_helper_fistl_ST0(s
->tmp2_i32
, tcg_env
);
4503 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
4504 s
->mem_index
, MO_LEUL
);
4507 gen_helper_fstl_ST0(s
->tmp1_i64
, tcg_env
);
4508 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
,
4509 s
->mem_index
, MO_LEUQ
);
4513 gen_helper_fist_ST0(s
->tmp2_i32
, tcg_env
);
4514 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
4515 s
->mem_index
, MO_LEUW
);
4518 if ((op
& 7) == 3) {
4519 gen_helper_fpop(tcg_env
);
4524 case 0x0c: /* fldenv mem */
4525 gen_helper_fldenv(tcg_env
, s
->A0
,
4526 tcg_constant_i32(dflag
- 1));
4527 update_fip
= update_fdp
= false;
4529 case 0x0d: /* fldcw mem */
4530 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
4531 s
->mem_index
, MO_LEUW
);
4532 gen_helper_fldcw(tcg_env
, s
->tmp2_i32
);
4533 update_fip
= update_fdp
= false;
4535 case 0x0e: /* fnstenv mem */
4536 gen_helper_fstenv(tcg_env
, s
->A0
,
4537 tcg_constant_i32(dflag
- 1));
4538 update_fip
= update_fdp
= false;
4540 case 0x0f: /* fnstcw mem */
4541 gen_helper_fnstcw(s
->tmp2_i32
, tcg_env
);
4542 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
4543 s
->mem_index
, MO_LEUW
);
4544 update_fip
= update_fdp
= false;
4546 case 0x1d: /* fldt mem */
4547 gen_helper_fldt_ST0(tcg_env
, s
->A0
);
4549 case 0x1f: /* fstpt mem */
4550 gen_helper_fstt_ST0(tcg_env
, s
->A0
);
4551 gen_helper_fpop(tcg_env
);
4553 case 0x2c: /* frstor mem */
4554 gen_helper_frstor(tcg_env
, s
->A0
,
4555 tcg_constant_i32(dflag
- 1));
4556 update_fip
= update_fdp
= false;
4558 case 0x2e: /* fnsave mem */
4559 gen_helper_fsave(tcg_env
, s
->A0
,
4560 tcg_constant_i32(dflag
- 1));
4561 update_fip
= update_fdp
= false;
4563 case 0x2f: /* fnstsw mem */
4564 gen_helper_fnstsw(s
->tmp2_i32
, tcg_env
);
4565 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
4566 s
->mem_index
, MO_LEUW
);
4567 update_fip
= update_fdp
= false;
4569 case 0x3c: /* fbld */
4570 gen_helper_fbld_ST0(tcg_env
, s
->A0
);
4572 case 0x3e: /* fbstp */
4573 gen_helper_fbst_ST0(tcg_env
, s
->A0
);
4574 gen_helper_fpop(tcg_env
);
4576 case 0x3d: /* fildll */
4577 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
,
4578 s
->mem_index
, MO_LEUQ
);
4579 gen_helper_fildll_ST0(tcg_env
, s
->tmp1_i64
);
4581 case 0x3f: /* fistpll */
4582 gen_helper_fistll_ST0(s
->tmp1_i64
, tcg_env
);
4583 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
,
4584 s
->mem_index
, MO_LEUQ
);
4585 gen_helper_fpop(tcg_env
);
4592 int last_seg
= s
->override
>= 0 ? s
->override
: a
.def_seg
;
4594 tcg_gen_ld_i32(s
->tmp2_i32
, tcg_env
,
4595 offsetof(CPUX86State
,
4596 segs
[last_seg
].selector
));
4597 tcg_gen_st16_i32(s
->tmp2_i32
, tcg_env
,
4598 offsetof(CPUX86State
, fpds
));
4599 tcg_gen_st_tl(last_addr
, tcg_env
,
4600 offsetof(CPUX86State
, fpdp
));
4603 /* register float ops */
4607 case 0x08: /* fld sti */
4608 gen_helper_fpush(tcg_env
);
4609 gen_helper_fmov_ST0_STN(tcg_env
,
4610 tcg_constant_i32((opreg
+ 1) & 7));
4612 case 0x09: /* fxchg sti */
4613 case 0x29: /* fxchg4 sti, undocumented op */
4614 case 0x39: /* fxchg7 sti, undocumented op */
4615 gen_helper_fxchg_ST0_STN(tcg_env
, tcg_constant_i32(opreg
));
4617 case 0x0a: /* grp d9/2 */
4621 * check exceptions (FreeBSD FPU probe)
4622 * needs to be treated as I/O because of ferr_irq
4624 translator_io_start(&s
->base
);
4625 gen_helper_fwait(tcg_env
);
4632 case 0x0c: /* grp d9/4 */
4635 gen_helper_fchs_ST0(tcg_env
);
4638 gen_helper_fabs_ST0(tcg_env
);
4641 gen_helper_fldz_FT0(tcg_env
);
4642 gen_helper_fcom_ST0_FT0(tcg_env
);
4645 gen_helper_fxam_ST0(tcg_env
);
4651 case 0x0d: /* grp d9/5 */
4655 gen_helper_fpush(tcg_env
);
4656 gen_helper_fld1_ST0(tcg_env
);
4659 gen_helper_fpush(tcg_env
);
4660 gen_helper_fldl2t_ST0(tcg_env
);
4663 gen_helper_fpush(tcg_env
);
4664 gen_helper_fldl2e_ST0(tcg_env
);
4667 gen_helper_fpush(tcg_env
);
4668 gen_helper_fldpi_ST0(tcg_env
);
4671 gen_helper_fpush(tcg_env
);
4672 gen_helper_fldlg2_ST0(tcg_env
);
4675 gen_helper_fpush(tcg_env
);
4676 gen_helper_fldln2_ST0(tcg_env
);
4679 gen_helper_fpush(tcg_env
);
4680 gen_helper_fldz_ST0(tcg_env
);
4687 case 0x0e: /* grp d9/6 */
4690 gen_helper_f2xm1(tcg_env
);
4693 gen_helper_fyl2x(tcg_env
);
4696 gen_helper_fptan(tcg_env
);
4698 case 3: /* fpatan */
4699 gen_helper_fpatan(tcg_env
);
4701 case 4: /* fxtract */
4702 gen_helper_fxtract(tcg_env
);
4704 case 5: /* fprem1 */
4705 gen_helper_fprem1(tcg_env
);
4707 case 6: /* fdecstp */
4708 gen_helper_fdecstp(tcg_env
);
4711 case 7: /* fincstp */
4712 gen_helper_fincstp(tcg_env
);
4716 case 0x0f: /* grp d9/7 */
4719 gen_helper_fprem(tcg_env
);
4721 case 1: /* fyl2xp1 */
4722 gen_helper_fyl2xp1(tcg_env
);
4725 gen_helper_fsqrt(tcg_env
);
4727 case 3: /* fsincos */
4728 gen_helper_fsincos(tcg_env
);
4730 case 5: /* fscale */
4731 gen_helper_fscale(tcg_env
);
4733 case 4: /* frndint */
4734 gen_helper_frndint(tcg_env
);
4737 gen_helper_fsin(tcg_env
);
4741 gen_helper_fcos(tcg_env
);
4745 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
4746 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
4747 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
4753 gen_helper_fp_arith_STN_ST0(op1
, opreg
);
4755 gen_helper_fpop(tcg_env
);
4758 gen_helper_fmov_FT0_STN(tcg_env
,
4759 tcg_constant_i32(opreg
));
4760 gen_helper_fp_arith_ST0_FT0(op1
);
4764 case 0x02: /* fcom */
4765 case 0x22: /* fcom2, undocumented op */
4766 gen_helper_fmov_FT0_STN(tcg_env
, tcg_constant_i32(opreg
));
4767 gen_helper_fcom_ST0_FT0(tcg_env
);
4769 case 0x03: /* fcomp */
4770 case 0x23: /* fcomp3, undocumented op */
4771 case 0x32: /* fcomp5, undocumented op */
4772 gen_helper_fmov_FT0_STN(tcg_env
, tcg_constant_i32(opreg
));
4773 gen_helper_fcom_ST0_FT0(tcg_env
);
4774 gen_helper_fpop(tcg_env
);
4776 case 0x15: /* da/5 */
4778 case 1: /* fucompp */
4779 gen_helper_fmov_FT0_STN(tcg_env
, tcg_constant_i32(1));
4780 gen_helper_fucom_ST0_FT0(tcg_env
);
4781 gen_helper_fpop(tcg_env
);
4782 gen_helper_fpop(tcg_env
);
4790 case 0: /* feni (287 only, just do nop here) */
4792 case 1: /* fdisi (287 only, just do nop here) */
4795 gen_helper_fclex(tcg_env
);
4798 case 3: /* fninit */
4799 gen_helper_fninit(tcg_env
);
4802 case 4: /* fsetpm (287 only, just do nop here) */
4808 case 0x1d: /* fucomi */
4809 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
4812 gen_update_cc_op(s
);
4813 gen_helper_fmov_FT0_STN(tcg_env
, tcg_constant_i32(opreg
));
4814 gen_helper_fucomi_ST0_FT0(tcg_env
);
4815 set_cc_op(s
, CC_OP_EFLAGS
);
4817 case 0x1e: /* fcomi */
4818 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
4821 gen_update_cc_op(s
);
4822 gen_helper_fmov_FT0_STN(tcg_env
, tcg_constant_i32(opreg
));
4823 gen_helper_fcomi_ST0_FT0(tcg_env
);
4824 set_cc_op(s
, CC_OP_EFLAGS
);
4826 case 0x28: /* ffree sti */
4827 gen_helper_ffree_STN(tcg_env
, tcg_constant_i32(opreg
));
4829 case 0x2a: /* fst sti */
4830 gen_helper_fmov_STN_ST0(tcg_env
, tcg_constant_i32(opreg
));
4832 case 0x2b: /* fstp sti */
4833 case 0x0b: /* fstp1 sti, undocumented op */
4834 case 0x3a: /* fstp8 sti, undocumented op */
4835 case 0x3b: /* fstp9 sti, undocumented op */
4836 gen_helper_fmov_STN_ST0(tcg_env
, tcg_constant_i32(opreg
));
4837 gen_helper_fpop(tcg_env
);
4839 case 0x2c: /* fucom st(i) */
4840 gen_helper_fmov_FT0_STN(tcg_env
, tcg_constant_i32(opreg
));
4841 gen_helper_fucom_ST0_FT0(tcg_env
);
4843 case 0x2d: /* fucomp st(i) */
4844 gen_helper_fmov_FT0_STN(tcg_env
, tcg_constant_i32(opreg
));
4845 gen_helper_fucom_ST0_FT0(tcg_env
);
4846 gen_helper_fpop(tcg_env
);
4848 case 0x33: /* de/3 */
4850 case 1: /* fcompp */
4851 gen_helper_fmov_FT0_STN(tcg_env
, tcg_constant_i32(1));
4852 gen_helper_fcom_ST0_FT0(tcg_env
);
4853 gen_helper_fpop(tcg_env
);
4854 gen_helper_fpop(tcg_env
);
4860 case 0x38: /* ffreep sti, undocumented op */
4861 gen_helper_ffree_STN(tcg_env
, tcg_constant_i32(opreg
));
4862 gen_helper_fpop(tcg_env
);
4864 case 0x3c: /* df/4 */
4867 gen_helper_fnstsw(s
->tmp2_i32
, tcg_env
);
4868 tcg_gen_extu_i32_tl(s
->T0
, s
->tmp2_i32
);
4869 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
4875 case 0x3d: /* fucomip */
4876 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
4879 gen_update_cc_op(s
);
4880 gen_helper_fmov_FT0_STN(tcg_env
, tcg_constant_i32(opreg
));
4881 gen_helper_fucomi_ST0_FT0(tcg_env
);
4882 gen_helper_fpop(tcg_env
);
4883 set_cc_op(s
, CC_OP_EFLAGS
);
4885 case 0x3e: /* fcomip */
4886 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
4889 gen_update_cc_op(s
);
4890 gen_helper_fmov_FT0_STN(tcg_env
, tcg_constant_i32(opreg
));
4891 gen_helper_fcomi_ST0_FT0(tcg_env
);
4892 gen_helper_fpop(tcg_env
);
4893 set_cc_op(s
, CC_OP_EFLAGS
);
4895 case 0x10 ... 0x13: /* fcmovxx */
4900 static const uint8_t fcmov_cc
[8] = {
4907 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
4910 op1
= fcmov_cc
[op
& 3] | (((op
>> 3) & 1) ^ 1);
4911 l1
= gen_new_label();
4912 gen_jcc1_noeob(s
, op1
, l1
);
4913 gen_helper_fmov_ST0_STN(tcg_env
,
4914 tcg_constant_i32(opreg
));
4924 tcg_gen_ld_i32(s
->tmp2_i32
, tcg_env
,
4925 offsetof(CPUX86State
, segs
[R_CS
].selector
));
4926 tcg_gen_st16_i32(s
->tmp2_i32
, tcg_env
,
4927 offsetof(CPUX86State
, fpcs
));
4928 tcg_gen_st_tl(eip_cur_tl(s
),
4929 tcg_env
, offsetof(CPUX86State
, fpip
));
4933 /************************/
4936 case 0xa4: /* movsS */
4938 ot
= mo_b_d(b
, dflag
);
4939 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
4940 gen_repz_movs(s
, ot
);
4946 case 0xaa: /* stosS */
4948 ot
= mo_b_d(b
, dflag
);
4949 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
4950 gen_repz_stos(s
, ot
);
4955 case 0xac: /* lodsS */
4957 ot
= mo_b_d(b
, dflag
);
4958 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
4959 gen_repz_lods(s
, ot
);
4964 case 0xae: /* scasS */
4966 ot
= mo_b_d(b
, dflag
);
4967 if (prefixes
& PREFIX_REPNZ
) {
4968 gen_repz_scas(s
, ot
, 1);
4969 } else if (prefixes
& PREFIX_REPZ
) {
4970 gen_repz_scas(s
, ot
, 0);
4976 case 0xa6: /* cmpsS */
4978 ot
= mo_b_d(b
, dflag
);
4979 if (prefixes
& PREFIX_REPNZ
) {
4980 gen_repz_cmps(s
, ot
, 1);
4981 } else if (prefixes
& PREFIX_REPZ
) {
4982 gen_repz_cmps(s
, ot
, 0);
4987 case 0x6c: /* insS */
4989 ot
= mo_b_d32(b
, dflag
);
4990 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_EDX
]);
4991 tcg_gen_ext16u_i32(s
->tmp2_i32
, s
->tmp2_i32
);
4992 if (!gen_check_io(s
, ot
, s
->tmp2_i32
,
4993 SVM_IOIO_TYPE_MASK
| SVM_IOIO_STR_MASK
)) {
4996 translator_io_start(&s
->base
);
4997 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
4998 gen_repz_ins(s
, ot
);
5003 case 0x6e: /* outsS */
5005 ot
= mo_b_d32(b
, dflag
);
5006 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_EDX
]);
5007 tcg_gen_ext16u_i32(s
->tmp2_i32
, s
->tmp2_i32
);
5008 if (!gen_check_io(s
, ot
, s
->tmp2_i32
, SVM_IOIO_STR_MASK
)) {
5011 translator_io_start(&s
->base
);
5012 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
5013 gen_repz_outs(s
, ot
);
5019 /************************/
5024 ot
= mo_b_d32(b
, dflag
);
5025 val
= x86_ldub_code(env
, s
);
5026 tcg_gen_movi_i32(s
->tmp2_i32
, val
);
5027 if (!gen_check_io(s
, ot
, s
->tmp2_i32
, SVM_IOIO_TYPE_MASK
)) {
5030 translator_io_start(&s
->base
);
5031 gen_helper_in_func(ot
, s
->T1
, s
->tmp2_i32
);
5032 gen_op_mov_reg_v(s
, ot
, R_EAX
, s
->T1
);
5033 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
5037 ot
= mo_b_d32(b
, dflag
);
5038 val
= x86_ldub_code(env
, s
);
5039 tcg_gen_movi_i32(s
->tmp2_i32
, val
);
5040 if (!gen_check_io(s
, ot
, s
->tmp2_i32
, 0)) {
5043 translator_io_start(&s
->base
);
5044 gen_op_mov_v_reg(s
, ot
, s
->T1
, R_EAX
);
5045 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T1
);
5046 gen_helper_out_func(ot
, s
->tmp2_i32
, s
->tmp3_i32
);
5047 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
5051 ot
= mo_b_d32(b
, dflag
);
5052 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_EDX
]);
5053 tcg_gen_ext16u_i32(s
->tmp2_i32
, s
->tmp2_i32
);
5054 if (!gen_check_io(s
, ot
, s
->tmp2_i32
, SVM_IOIO_TYPE_MASK
)) {
5057 translator_io_start(&s
->base
);
5058 gen_helper_in_func(ot
, s
->T1
, s
->tmp2_i32
);
5059 gen_op_mov_reg_v(s
, ot
, R_EAX
, s
->T1
);
5060 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
5064 ot
= mo_b_d32(b
, dflag
);
5065 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_EDX
]);
5066 tcg_gen_ext16u_i32(s
->tmp2_i32
, s
->tmp2_i32
);
5067 if (!gen_check_io(s
, ot
, s
->tmp2_i32
, 0)) {
5070 translator_io_start(&s
->base
);
5071 gen_op_mov_v_reg(s
, ot
, s
->T1
, R_EAX
);
5072 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T1
);
5073 gen_helper_out_func(ot
, s
->tmp2_i32
, s
->tmp3_i32
);
5074 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
5077 /************************/
5079 case 0xc2: /* ret im */
5080 val
= x86_ldsw_code(env
, s
);
5082 gen_stack_update(s
, val
+ (1 << ot
));
5083 /* Note that gen_pop_T0 uses a zero-extending load. */
5084 gen_op_jmp_v(s
, s
->T0
);
5086 s
->base
.is_jmp
= DISAS_JUMP
;
5088 case 0xc3: /* ret */
5090 gen_pop_update(s
, ot
);
5091 /* Note that gen_pop_T0 uses a zero-extending load. */
5092 gen_op_jmp_v(s
, s
->T0
);
5094 s
->base
.is_jmp
= DISAS_JUMP
;
5096 case 0xca: /* lret im */
5097 val
= x86_ldsw_code(env
, s
);
5099 if (PE(s
) && !VM86(s
)) {
5100 gen_update_cc_op(s
);
5101 gen_update_eip_cur(s
);
5102 gen_helper_lret_protected(tcg_env
, tcg_constant_i32(dflag
- 1),
5103 tcg_constant_i32(val
));
5107 gen_op_ld_v(s
, dflag
, s
->T0
, s
->A0
);
5108 /* NOTE: keeping EIP updated is not a problem in case of
5110 gen_op_jmp_v(s
, s
->T0
);
5112 gen_add_A0_im(s
, 1 << dflag
);
5113 gen_op_ld_v(s
, dflag
, s
->T0
, s
->A0
);
5114 gen_op_movl_seg_T0_vm(s
, R_CS
);
5115 /* add stack offset */
5116 gen_stack_update(s
, val
+ (2 << dflag
));
5118 s
->base
.is_jmp
= DISAS_EOB_ONLY
;
5120 case 0xcb: /* lret */
5123 case 0xcf: /* iret */
5124 gen_svm_check_intercept(s
, SVM_EXIT_IRET
);
5125 if (!PE(s
) || VM86(s
)) {
5126 /* real mode or vm86 mode */
5127 if (!check_vm86_iopl(s
)) {
5130 gen_helper_iret_real(tcg_env
, tcg_constant_i32(dflag
- 1));
5132 gen_helper_iret_protected(tcg_env
, tcg_constant_i32(dflag
- 1),
5135 set_cc_op(s
, CC_OP_EFLAGS
);
5136 s
->base
.is_jmp
= DISAS_EOB_ONLY
;
5138 case 0xe8: /* call im */
5140 int diff
= (dflag
!= MO_16
5141 ? (int32_t)insn_get(env
, s
, MO_32
)
5142 : (int16_t)insn_get(env
, s
, MO_16
));
5143 gen_push_v(s
, eip_next_tl(s
));
5145 gen_jmp_rel(s
, dflag
, diff
, 0);
5148 case 0x9a: /* lcall im */
5150 unsigned int selector
, offset
;
5155 offset
= insn_get(env
, s
, ot
);
5156 selector
= insn_get(env
, s
, MO_16
);
5158 tcg_gen_movi_tl(s
->T0
, selector
);
5159 tcg_gen_movi_tl(s
->T1
, offset
);
5162 case 0xe9: /* jmp im */
5164 int diff
= (dflag
!= MO_16
5165 ? (int32_t)insn_get(env
, s
, MO_32
)
5166 : (int16_t)insn_get(env
, s
, MO_16
));
5168 gen_jmp_rel(s
, dflag
, diff
, 0);
5171 case 0xea: /* ljmp im */
5173 unsigned int selector
, offset
;
5178 offset
= insn_get(env
, s
, ot
);
5179 selector
= insn_get(env
, s
, MO_16
);
5181 tcg_gen_movi_tl(s
->T0
, selector
);
5182 tcg_gen_movi_tl(s
->T1
, offset
);
5185 case 0xeb: /* jmp Jb */
5187 int diff
= (int8_t)insn_get(env
, s
, MO_8
);
5188 gen_jmp_rel(s
, dflag
, diff
, 0);
5191 case 0x70 ... 0x7f: /* jcc Jb */
5193 int diff
= (int8_t)insn_get(env
, s
, MO_8
);
5195 gen_jcc(s
, b
, diff
);
5198 case 0x180 ... 0x18f: /* jcc Jv */
5200 int diff
= (dflag
!= MO_16
5201 ? (int32_t)insn_get(env
, s
, MO_32
)
5202 : (int16_t)insn_get(env
, s
, MO_16
));
5204 gen_jcc(s
, b
, diff
);
5208 case 0x190 ... 0x19f: /* setcc Gv */
5209 modrm
= x86_ldub_code(env
, s
);
5210 gen_setcc1(s
, b
, s
->T0
);
5211 gen_ldst_modrm(env
, s
, modrm
, MO_8
, OR_TMP0
, 1);
5213 case 0x140 ... 0x14f: /* cmov Gv, Ev */
5214 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
5218 modrm
= x86_ldub_code(env
, s
);
5219 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
5220 gen_cmovcc1(env
, s
, ot
, b
, modrm
, reg
);
5223 /************************/
5225 case 0x9c: /* pushf */
5226 gen_svm_check_intercept(s
, SVM_EXIT_PUSHF
);
5227 if (check_vm86_iopl(s
)) {
5228 gen_update_cc_op(s
);
5229 gen_helper_read_eflags(s
->T0
, tcg_env
);
5230 gen_push_v(s
, s
->T0
);
5233 case 0x9d: /* popf */
5234 gen_svm_check_intercept(s
, SVM_EXIT_POPF
);
5235 if (check_vm86_iopl(s
)) {
5236 int mask
= TF_MASK
| AC_MASK
| ID_MASK
| NT_MASK
;
5239 mask
|= IF_MASK
| IOPL_MASK
;
5240 } else if (CPL(s
) <= IOPL(s
)) {
5243 if (dflag
== MO_16
) {
5248 gen_helper_write_eflags(tcg_env
, s
->T0
, tcg_constant_i32(mask
));
5249 gen_pop_update(s
, ot
);
5250 set_cc_op(s
, CC_OP_EFLAGS
);
5251 /* abort translation because TF/AC flag may change */
5252 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
5255 case 0x9e: /* sahf */
5256 if (CODE64(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_LAHF_LM
))
5258 tcg_gen_shri_tl(s
->T0
, cpu_regs
[R_EAX
], 8);
5259 gen_compute_eflags(s
);
5260 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, CC_O
);
5261 tcg_gen_andi_tl(s
->T0
, s
->T0
, CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
5262 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, s
->T0
);
5264 case 0x9f: /* lahf */
5265 if (CODE64(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_LAHF_LM
))
5267 gen_compute_eflags(s
);
5268 /* Note: gen_compute_eflags() only gives the condition codes */
5269 tcg_gen_ori_tl(s
->T0
, cpu_cc_src
, 0x02);
5270 tcg_gen_deposit_tl(cpu_regs
[R_EAX
], cpu_regs
[R_EAX
], s
->T0
, 8, 8);
5272 case 0xf5: /* cmc */
5273 gen_compute_eflags(s
);
5274 tcg_gen_xori_tl(cpu_cc_src
, cpu_cc_src
, CC_C
);
5276 case 0xf8: /* clc */
5277 gen_compute_eflags(s
);
5278 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~CC_C
);
5280 case 0xf9: /* stc */
5281 gen_compute_eflags(s
);
5282 tcg_gen_ori_tl(cpu_cc_src
, cpu_cc_src
, CC_C
);
5284 case 0xfc: /* cld */
5285 tcg_gen_movi_i32(s
->tmp2_i32
, 1);
5286 tcg_gen_st_i32(s
->tmp2_i32
, tcg_env
, offsetof(CPUX86State
, df
));
5288 case 0xfd: /* std */
5289 tcg_gen_movi_i32(s
->tmp2_i32
, -1);
5290 tcg_gen_st_i32(s
->tmp2_i32
, tcg_env
, offsetof(CPUX86State
, df
));
5293 /************************/
5294 /* bit operations */
5295 case 0x1ba: /* bt/bts/btr/btc Gv, im */
5297 modrm
= x86_ldub_code(env
, s
);
5298 op
= (modrm
>> 3) & 7;
5299 mod
= (modrm
>> 6) & 3;
5300 rm
= (modrm
& 7) | REX_B(s
);
5303 gen_lea_modrm(env
, s
, modrm
);
5304 if (!(s
->prefix
& PREFIX_LOCK
)) {
5305 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
5308 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
5311 val
= x86_ldub_code(env
, s
);
5312 tcg_gen_movi_tl(s
->T1
, val
);
5317 case 0x1a3: /* bt Gv, Ev */
5320 case 0x1ab: /* bts */
5323 case 0x1b3: /* btr */
5326 case 0x1bb: /* btc */
5330 modrm
= x86_ldub_code(env
, s
);
5331 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
5332 mod
= (modrm
>> 6) & 3;
5333 rm
= (modrm
& 7) | REX_B(s
);
5334 gen_op_mov_v_reg(s
, MO_32
, s
->T1
, reg
);
5336 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
5337 /* specific case: we need to add a displacement */
5338 gen_exts(ot
, s
->T1
);
5339 tcg_gen_sari_tl(s
->tmp0
, s
->T1
, 3 + ot
);
5340 tcg_gen_shli_tl(s
->tmp0
, s
->tmp0
, ot
);
5341 tcg_gen_add_tl(s
->A0
, gen_lea_modrm_1(s
, a
, false), s
->tmp0
);
5342 gen_lea_v_seg(s
, s
->aflag
, s
->A0
, a
.def_seg
, s
->override
);
5343 if (!(s
->prefix
& PREFIX_LOCK
)) {
5344 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
5347 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
5350 tcg_gen_andi_tl(s
->T1
, s
->T1
, (1 << (3 + ot
)) - 1);
5351 tcg_gen_movi_tl(s
->tmp0
, 1);
5352 tcg_gen_shl_tl(s
->tmp0
, s
->tmp0
, s
->T1
);
5353 if (s
->prefix
& PREFIX_LOCK
) {
5356 /* Needs no atomic ops; we suppressed the normal
5357 memory load for LOCK above so do it now. */
5358 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
5361 tcg_gen_atomic_fetch_or_tl(s
->T0
, s
->A0
, s
->tmp0
,
5362 s
->mem_index
, ot
| MO_LE
);
5365 tcg_gen_not_tl(s
->tmp0
, s
->tmp0
);
5366 tcg_gen_atomic_fetch_and_tl(s
->T0
, s
->A0
, s
->tmp0
,
5367 s
->mem_index
, ot
| MO_LE
);
5371 tcg_gen_atomic_fetch_xor_tl(s
->T0
, s
->A0
, s
->tmp0
,
5372 s
->mem_index
, ot
| MO_LE
);
5375 tcg_gen_shr_tl(s
->tmp4
, s
->T0
, s
->T1
);
5377 tcg_gen_shr_tl(s
->tmp4
, s
->T0
, s
->T1
);
5380 /* Data already loaded; nothing to do. */
5383 tcg_gen_or_tl(s
->T0
, s
->T0
, s
->tmp0
);
5386 tcg_gen_andc_tl(s
->T0
, s
->T0
, s
->tmp0
);
5390 tcg_gen_xor_tl(s
->T0
, s
->T0
, s
->tmp0
);
5395 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
5397 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
5402 /* Delay all CC updates until after the store above. Note that
5403 C is the result of the test, Z is unchanged, and the others
5404 are all undefined. */
5406 case CC_OP_MULB
... CC_OP_MULQ
:
5407 case CC_OP_ADDB
... CC_OP_ADDQ
:
5408 case CC_OP_ADCB
... CC_OP_ADCQ
:
5409 case CC_OP_SUBB
... CC_OP_SUBQ
:
5410 case CC_OP_SBBB
... CC_OP_SBBQ
:
5411 case CC_OP_LOGICB
... CC_OP_LOGICQ
:
5412 case CC_OP_INCB
... CC_OP_INCQ
:
5413 case CC_OP_DECB
... CC_OP_DECQ
:
5414 case CC_OP_SHLB
... CC_OP_SHLQ
:
5415 case CC_OP_SARB
... CC_OP_SARQ
:
5416 case CC_OP_BMILGB
... CC_OP_BMILGQ
:
5417 /* Z was going to be computed from the non-zero status of CC_DST.
5418 We can get that same Z value (and the new C value) by leaving
5419 CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the
5421 tcg_gen_mov_tl(cpu_cc_src
, s
->tmp4
);
5422 set_cc_op(s
, ((s
->cc_op
- CC_OP_MULB
) & 3) + CC_OP_SARB
);
5425 /* Otherwise, generate EFLAGS and replace the C bit. */
5426 gen_compute_eflags(s
);
5427 tcg_gen_deposit_tl(cpu_cc_src
, cpu_cc_src
, s
->tmp4
,
5432 case 0x1bc: /* bsf / tzcnt */
5433 case 0x1bd: /* bsr / lzcnt */
5435 modrm
= x86_ldub_code(env
, s
);
5436 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
5437 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
5438 gen_extu(ot
, s
->T0
);
5440 /* Note that lzcnt and tzcnt are in different extensions. */
5441 if ((prefixes
& PREFIX_REPZ
)
5443 ? s
->cpuid_ext3_features
& CPUID_EXT3_ABM
5444 : s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI1
)) {
5446 /* For lzcnt/tzcnt, C bit is defined related to the input. */
5447 tcg_gen_mov_tl(cpu_cc_src
, s
->T0
);
5449 /* For lzcnt, reduce the target_ulong result by the
5450 number of zeros that we expect to find at the top. */
5451 tcg_gen_clzi_tl(s
->T0
, s
->T0
, TARGET_LONG_BITS
);
5452 tcg_gen_subi_tl(s
->T0
, s
->T0
, TARGET_LONG_BITS
- size
);
5454 /* For tzcnt, a zero input must return the operand size. */
5455 tcg_gen_ctzi_tl(s
->T0
, s
->T0
, size
);
5457 /* For lzcnt/tzcnt, Z bit is defined related to the result. */
5458 gen_op_update1_cc(s
);
5459 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
5461 /* For bsr/bsf, only the Z bit is defined and it is related
5462 to the input and not the result. */
5463 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
5464 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
5466 /* ??? The manual says that the output is undefined when the
5467 input is zero, but real hardware leaves it unchanged, and
5468 real programs appear to depend on that. Accomplish this
5469 by passing the output as the value to return upon zero. */
5471 /* For bsr, return the bit index of the first 1 bit,
5472 not the count of leading zeros. */
5473 tcg_gen_xori_tl(s
->T1
, cpu_regs
[reg
], TARGET_LONG_BITS
- 1);
5474 tcg_gen_clz_tl(s
->T0
, s
->T0
, s
->T1
);
5475 tcg_gen_xori_tl(s
->T0
, s
->T0
, TARGET_LONG_BITS
- 1);
5477 tcg_gen_ctz_tl(s
->T0
, s
->T0
, cpu_regs
[reg
]);
5480 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
5482 /************************/
5484 case 0x27: /* daa */
5487 gen_update_cc_op(s
);
5488 gen_helper_daa(tcg_env
);
5489 set_cc_op(s
, CC_OP_EFLAGS
);
5491 case 0x2f: /* das */
5494 gen_update_cc_op(s
);
5495 gen_helper_das(tcg_env
);
5496 set_cc_op(s
, CC_OP_EFLAGS
);
5498 case 0x37: /* aaa */
5501 gen_update_cc_op(s
);
5502 gen_helper_aaa(tcg_env
);
5503 set_cc_op(s
, CC_OP_EFLAGS
);
5505 case 0x3f: /* aas */
5508 gen_update_cc_op(s
);
5509 gen_helper_aas(tcg_env
);
5510 set_cc_op(s
, CC_OP_EFLAGS
);
5512 case 0xd4: /* aam */
5515 val
= x86_ldub_code(env
, s
);
5517 gen_exception(s
, EXCP00_DIVZ
);
5519 gen_helper_aam(tcg_env
, tcg_constant_i32(val
));
5520 set_cc_op(s
, CC_OP_LOGICB
);
5523 case 0xd5: /* aad */
5526 val
= x86_ldub_code(env
, s
);
5527 gen_helper_aad(tcg_env
, tcg_constant_i32(val
));
5528 set_cc_op(s
, CC_OP_LOGICB
);
5530 /************************/
5532 case 0x90: /* nop */
5533 /* XXX: correct lock test for all insn */
5534 if (prefixes
& PREFIX_LOCK
) {
5537 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
5539 goto do_xchg_reg_eax
;
5541 if (prefixes
& PREFIX_REPZ
) {
5542 gen_update_cc_op(s
);
5543 gen_update_eip_cur(s
);
5544 gen_helper_pause(tcg_env
, cur_insn_len_i32(s
));
5545 s
->base
.is_jmp
= DISAS_NORETURN
;
5548 case 0x9b: /* fwait */
5549 if ((s
->flags
& (HF_MP_MASK
| HF_TS_MASK
)) ==
5550 (HF_MP_MASK
| HF_TS_MASK
)) {
5551 gen_exception(s
, EXCP07_PREX
);
5553 /* needs to be treated as I/O because of ferr_irq */
5554 translator_io_start(&s
->base
);
5555 gen_helper_fwait(tcg_env
);
5558 case 0xcc: /* int3 */
5559 gen_interrupt(s
, EXCP03_INT3
);
5561 case 0xcd: /* int N */
5562 val
= x86_ldub_code(env
, s
);
5563 if (check_vm86_iopl(s
)) {
5564 gen_interrupt(s
, val
);
5567 case 0xce: /* into */
5570 gen_update_cc_op(s
);
5571 gen_update_eip_cur(s
);
5572 gen_helper_into(tcg_env
, cur_insn_len_i32(s
));
5575 case 0xf1: /* icebp (undocumented, exits to external debugger) */
5576 gen_svm_check_intercept(s
, SVM_EXIT_ICEBP
);
5580 case 0xfa: /* cli */
5581 if (check_iopl(s
)) {
5582 gen_reset_eflags(s
, IF_MASK
);
5585 case 0xfb: /* sti */
5586 if (check_iopl(s
)) {
5587 gen_set_eflags(s
, IF_MASK
);
5588 /* interruptions are enabled only the first insn after sti */
5589 gen_update_eip_next(s
);
5590 gen_eob_inhibit_irq(s
, true);
5593 case 0x62: /* bound */
5597 modrm
= x86_ldub_code(env
, s
);
5598 reg
= (modrm
>> 3) & 7;
5599 mod
= (modrm
>> 6) & 3;
5602 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
5603 gen_lea_modrm(env
, s
, modrm
);
5604 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
5606 gen_helper_boundw(tcg_env
, s
->A0
, s
->tmp2_i32
);
5608 gen_helper_boundl(tcg_env
, s
->A0
, s
->tmp2_i32
);
5611 case 0x1c8 ... 0x1cf: /* bswap reg */
5612 reg
= (b
& 7) | REX_B(s
);
5613 #ifdef TARGET_X86_64
5614 if (dflag
== MO_64
) {
5615 tcg_gen_bswap64_i64(cpu_regs
[reg
], cpu_regs
[reg
]);
5619 tcg_gen_bswap32_tl(cpu_regs
[reg
], cpu_regs
[reg
], TCG_BSWAP_OZ
);
5621 case 0xd6: /* salc */
5624 gen_compute_eflags_c(s
, s
->T0
);
5625 tcg_gen_neg_tl(s
->T0
, s
->T0
);
5626 gen_op_mov_reg_v(s
, MO_8
, R_EAX
, s
->T0
);
5628 case 0xe0: /* loopnz */
5629 case 0xe1: /* loopz */
5630 case 0xe2: /* loop */
5631 case 0xe3: /* jecxz */
5634 int diff
= (int8_t)insn_get(env
, s
, MO_8
);
5636 l1
= gen_new_label();
5637 l2
= gen_new_label();
5638 gen_update_cc_op(s
);
5641 case 0: /* loopnz */
5643 gen_op_add_reg_im(s
, s
->aflag
, R_ECX
, -1);
5644 gen_op_jz_ecx(s
, l2
);
5645 gen_jcc1(s
, (JCC_Z
<< 1) | (b
^ 1), l1
);
5648 gen_op_add_reg_im(s
, s
->aflag
, R_ECX
, -1);
5649 gen_op_jnz_ecx(s
, l1
);
5653 gen_op_jz_ecx(s
, l1
);
5658 gen_jmp_rel_csize(s
, 0, 1);
5661 gen_jmp_rel(s
, dflag
, diff
, 0);
5664 case 0x130: /* wrmsr */
5665 case 0x132: /* rdmsr */
5666 if (check_cpl0(s
)) {
5667 gen_update_cc_op(s
);
5668 gen_update_eip_cur(s
);
5670 gen_helper_rdmsr(tcg_env
);
5672 gen_helper_wrmsr(tcg_env
);
5673 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
5677 case 0x131: /* rdtsc */
5678 gen_update_cc_op(s
);
5679 gen_update_eip_cur(s
);
5680 translator_io_start(&s
->base
);
5681 gen_helper_rdtsc(tcg_env
);
5683 case 0x133: /* rdpmc */
5684 gen_update_cc_op(s
);
5685 gen_update_eip_cur(s
);
5686 gen_helper_rdpmc(tcg_env
);
5687 s
->base
.is_jmp
= DISAS_NORETURN
;
5689 case 0x134: /* sysenter */
5690 /* For AMD SYSENTER is not valid in long mode */
5691 if (LMA(s
) && env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
) {
5695 gen_exception_gpf(s
);
5697 gen_helper_sysenter(tcg_env
);
5698 s
->base
.is_jmp
= DISAS_EOB_ONLY
;
5701 case 0x135: /* sysexit */
5702 /* For AMD SYSEXIT is not valid in long mode */
5703 if (LMA(s
) && env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
) {
5706 if (!PE(s
) || CPL(s
) != 0) {
5707 gen_exception_gpf(s
);
5709 gen_helper_sysexit(tcg_env
, tcg_constant_i32(dflag
- 1));
5710 s
->base
.is_jmp
= DISAS_EOB_ONLY
;
5713 case 0x105: /* syscall */
5714 /* For Intel SYSCALL is only valid in long mode */
5715 if (!LMA(s
) && env
->cpuid_vendor1
== CPUID_VENDOR_INTEL_1
) {
5718 gen_update_cc_op(s
);
5719 gen_update_eip_cur(s
);
5720 gen_helper_syscall(tcg_env
, cur_insn_len_i32(s
));
5721 /* TF handling for the syscall insn is different. The TF bit is checked
5722 after the syscall insn completes. This allows #DB to not be
5723 generated after one has entered CPL0 if TF is set in FMASK. */
5724 gen_eob_worker(s
, false, true);
5726 case 0x107: /* sysret */
5727 /* For Intel SYSRET is only valid in long mode */
5728 if (!LMA(s
) && env
->cpuid_vendor1
== CPUID_VENDOR_INTEL_1
) {
5731 if (!PE(s
) || CPL(s
) != 0) {
5732 gen_exception_gpf(s
);
5734 gen_helper_sysret(tcg_env
, tcg_constant_i32(dflag
- 1));
5735 /* condition codes are modified only in long mode */
5737 set_cc_op(s
, CC_OP_EFLAGS
);
5739 /* TF handling for the sysret insn is different. The TF bit is
5740 checked after the sysret insn completes. This allows #DB to be
5741 generated "as if" the syscall insn in userspace has just
5743 gen_eob_worker(s
, false, true);
5746 case 0x1a2: /* cpuid */
5747 gen_update_cc_op(s
);
5748 gen_update_eip_cur(s
);
5749 gen_helper_cpuid(tcg_env
);
5751 case 0xf4: /* hlt */
5752 if (check_cpl0(s
)) {
5753 gen_update_cc_op(s
);
5754 gen_update_eip_cur(s
);
5755 gen_helper_hlt(tcg_env
, cur_insn_len_i32(s
));
5756 s
->base
.is_jmp
= DISAS_NORETURN
;
5760 modrm
= x86_ldub_code(env
, s
);
5761 mod
= (modrm
>> 6) & 3;
5762 op
= (modrm
>> 3) & 7;
5765 if (!PE(s
) || VM86(s
))
5767 if (s
->flags
& HF_UMIP_MASK
&& !check_cpl0(s
)) {
5770 gen_svm_check_intercept(s
, SVM_EXIT_LDTR_READ
);
5771 tcg_gen_ld32u_tl(s
->T0
, tcg_env
,
5772 offsetof(CPUX86State
, ldt
.selector
));
5773 ot
= mod
== 3 ? dflag
: MO_16
;
5774 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
5777 if (!PE(s
) || VM86(s
))
5779 if (check_cpl0(s
)) {
5780 gen_svm_check_intercept(s
, SVM_EXIT_LDTR_WRITE
);
5781 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
5782 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
5783 gen_helper_lldt(tcg_env
, s
->tmp2_i32
);
5787 if (!PE(s
) || VM86(s
))
5789 if (s
->flags
& HF_UMIP_MASK
&& !check_cpl0(s
)) {
5792 gen_svm_check_intercept(s
, SVM_EXIT_TR_READ
);
5793 tcg_gen_ld32u_tl(s
->T0
, tcg_env
,
5794 offsetof(CPUX86State
, tr
.selector
));
5795 ot
= mod
== 3 ? dflag
: MO_16
;
5796 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
5799 if (!PE(s
) || VM86(s
))
5801 if (check_cpl0(s
)) {
5802 gen_svm_check_intercept(s
, SVM_EXIT_TR_WRITE
);
5803 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
5804 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
5805 gen_helper_ltr(tcg_env
, s
->tmp2_i32
);
5810 if (!PE(s
) || VM86(s
))
5812 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
5813 gen_update_cc_op(s
);
5815 gen_helper_verr(tcg_env
, s
->T0
);
5817 gen_helper_verw(tcg_env
, s
->T0
);
5819 set_cc_op(s
, CC_OP_EFLAGS
);
5827 modrm
= x86_ldub_code(env
, s
);
5829 CASE_MODRM_MEM_OP(0): /* sgdt */
5830 if (s
->flags
& HF_UMIP_MASK
&& !check_cpl0(s
)) {
5833 gen_svm_check_intercept(s
, SVM_EXIT_GDTR_READ
);
5834 gen_lea_modrm(env
, s
, modrm
);
5835 tcg_gen_ld32u_tl(s
->T0
,
5836 tcg_env
, offsetof(CPUX86State
, gdt
.limit
));
5837 gen_op_st_v(s
, MO_16
, s
->T0
, s
->A0
);
5838 gen_add_A0_im(s
, 2);
5839 tcg_gen_ld_tl(s
->T0
, tcg_env
, offsetof(CPUX86State
, gdt
.base
));
5840 if (dflag
== MO_16
) {
5841 tcg_gen_andi_tl(s
->T0
, s
->T0
, 0xffffff);
5843 gen_op_st_v(s
, CODE64(s
) + MO_32
, s
->T0
, s
->A0
);
5846 case 0xc8: /* monitor */
5847 if (!(s
->cpuid_ext_features
& CPUID_EXT_MONITOR
) || CPL(s
) != 0) {
5850 gen_update_cc_op(s
);
5851 gen_update_eip_cur(s
);
5852 tcg_gen_mov_tl(s
->A0
, cpu_regs
[R_EAX
]);
5853 gen_extu(s
->aflag
, s
->A0
);
5854 gen_add_A0_ds_seg(s
);
5855 gen_helper_monitor(tcg_env
, s
->A0
);
5858 case 0xc9: /* mwait */
5859 if (!(s
->cpuid_ext_features
& CPUID_EXT_MONITOR
) || CPL(s
) != 0) {
5862 gen_update_cc_op(s
);
5863 gen_update_eip_cur(s
);
5864 gen_helper_mwait(tcg_env
, cur_insn_len_i32(s
));
5865 s
->base
.is_jmp
= DISAS_NORETURN
;
5868 case 0xca: /* clac */
5869 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_SMAP
)
5873 gen_reset_eflags(s
, AC_MASK
);
5874 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
5877 case 0xcb: /* stac */
5878 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_SMAP
)
5882 gen_set_eflags(s
, AC_MASK
);
5883 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
5886 CASE_MODRM_MEM_OP(1): /* sidt */
5887 if (s
->flags
& HF_UMIP_MASK
&& !check_cpl0(s
)) {
5890 gen_svm_check_intercept(s
, SVM_EXIT_IDTR_READ
);
5891 gen_lea_modrm(env
, s
, modrm
);
5892 tcg_gen_ld32u_tl(s
->T0
, tcg_env
, offsetof(CPUX86State
, idt
.limit
));
5893 gen_op_st_v(s
, MO_16
, s
->T0
, s
->A0
);
5894 gen_add_A0_im(s
, 2);
5895 tcg_gen_ld_tl(s
->T0
, tcg_env
, offsetof(CPUX86State
, idt
.base
));
5896 if (dflag
== MO_16
) {
5897 tcg_gen_andi_tl(s
->T0
, s
->T0
, 0xffffff);
5899 gen_op_st_v(s
, CODE64(s
) + MO_32
, s
->T0
, s
->A0
);
5902 case 0xd0: /* xgetbv */
5903 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
5904 || (s
->prefix
& (PREFIX_LOCK
| PREFIX_DATA
5905 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
5908 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_ECX
]);
5909 gen_helper_xgetbv(s
->tmp1_i64
, tcg_env
, s
->tmp2_i32
);
5910 tcg_gen_extr_i64_tl(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
], s
->tmp1_i64
);
5913 case 0xd1: /* xsetbv */
5914 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
5915 || (s
->prefix
& (PREFIX_LOCK
| PREFIX_DATA
5916 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
5919 if (!check_cpl0(s
)) {
5922 tcg_gen_concat_tl_i64(s
->tmp1_i64
, cpu_regs
[R_EAX
],
5924 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_ECX
]);
5925 gen_helper_xsetbv(tcg_env
, s
->tmp2_i32
, s
->tmp1_i64
);
5926 /* End TB because translation flags may change. */
5927 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
5930 case 0xd8: /* VMRUN */
5931 if (!SVME(s
) || !PE(s
)) {
5934 if (!check_cpl0(s
)) {
5937 gen_update_cc_op(s
);
5938 gen_update_eip_cur(s
);
5939 gen_helper_vmrun(tcg_env
, tcg_constant_i32(s
->aflag
- 1),
5940 cur_insn_len_i32(s
));
5941 tcg_gen_exit_tb(NULL
, 0);
5942 s
->base
.is_jmp
= DISAS_NORETURN
;
5945 case 0xd9: /* VMMCALL */
5949 gen_update_cc_op(s
);
5950 gen_update_eip_cur(s
);
5951 gen_helper_vmmcall(tcg_env
);
5954 case 0xda: /* VMLOAD */
5955 if (!SVME(s
) || !PE(s
)) {
5958 if (!check_cpl0(s
)) {
5961 gen_update_cc_op(s
);
5962 gen_update_eip_cur(s
);
5963 gen_helper_vmload(tcg_env
, tcg_constant_i32(s
->aflag
- 1));
5966 case 0xdb: /* VMSAVE */
5967 if (!SVME(s
) || !PE(s
)) {
5970 if (!check_cpl0(s
)) {
5973 gen_update_cc_op(s
);
5974 gen_update_eip_cur(s
);
5975 gen_helper_vmsave(tcg_env
, tcg_constant_i32(s
->aflag
- 1));
5978 case 0xdc: /* STGI */
5979 if ((!SVME(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_SKINIT
))
5983 if (!check_cpl0(s
)) {
5986 gen_update_cc_op(s
);
5987 gen_helper_stgi(tcg_env
);
5988 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
5991 case 0xdd: /* CLGI */
5992 if (!SVME(s
) || !PE(s
)) {
5995 if (!check_cpl0(s
)) {
5998 gen_update_cc_op(s
);
5999 gen_update_eip_cur(s
);
6000 gen_helper_clgi(tcg_env
);
6003 case 0xde: /* SKINIT */
6004 if ((!SVME(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_SKINIT
))
6008 gen_svm_check_intercept(s
, SVM_EXIT_SKINIT
);
6009 /* If not intercepted, not implemented -- raise #UD. */
6012 case 0xdf: /* INVLPGA */
6013 if (!SVME(s
) || !PE(s
)) {
6016 if (!check_cpl0(s
)) {
6019 gen_svm_check_intercept(s
, SVM_EXIT_INVLPGA
);
6020 if (s
->aflag
== MO_64
) {
6021 tcg_gen_mov_tl(s
->A0
, cpu_regs
[R_EAX
]);
6023 tcg_gen_ext32u_tl(s
->A0
, cpu_regs
[R_EAX
]);
6025 gen_helper_flush_page(tcg_env
, s
->A0
);
6026 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
6029 CASE_MODRM_MEM_OP(2): /* lgdt */
6030 if (!check_cpl0(s
)) {
6033 gen_svm_check_intercept(s
, SVM_EXIT_GDTR_WRITE
);
6034 gen_lea_modrm(env
, s
, modrm
);
6035 gen_op_ld_v(s
, MO_16
, s
->T1
, s
->A0
);
6036 gen_add_A0_im(s
, 2);
6037 gen_op_ld_v(s
, CODE64(s
) + MO_32
, s
->T0
, s
->A0
);
6038 if (dflag
== MO_16
) {
6039 tcg_gen_andi_tl(s
->T0
, s
->T0
, 0xffffff);
6041 tcg_gen_st_tl(s
->T0
, tcg_env
, offsetof(CPUX86State
, gdt
.base
));
6042 tcg_gen_st32_tl(s
->T1
, tcg_env
, offsetof(CPUX86State
, gdt
.limit
));
6045 CASE_MODRM_MEM_OP(3): /* lidt */
6046 if (!check_cpl0(s
)) {
6049 gen_svm_check_intercept(s
, SVM_EXIT_IDTR_WRITE
);
6050 gen_lea_modrm(env
, s
, modrm
);
6051 gen_op_ld_v(s
, MO_16
, s
->T1
, s
->A0
);
6052 gen_add_A0_im(s
, 2);
6053 gen_op_ld_v(s
, CODE64(s
) + MO_32
, s
->T0
, s
->A0
);
6054 if (dflag
== MO_16
) {
6055 tcg_gen_andi_tl(s
->T0
, s
->T0
, 0xffffff);
6057 tcg_gen_st_tl(s
->T0
, tcg_env
, offsetof(CPUX86State
, idt
.base
));
6058 tcg_gen_st32_tl(s
->T1
, tcg_env
, offsetof(CPUX86State
, idt
.limit
));
6061 CASE_MODRM_OP(4): /* smsw */
6062 if (s
->flags
& HF_UMIP_MASK
&& !check_cpl0(s
)) {
6065 gen_svm_check_intercept(s
, SVM_EXIT_READ_CR0
);
6066 tcg_gen_ld_tl(s
->T0
, tcg_env
, offsetof(CPUX86State
, cr
[0]));
6068 * In 32-bit mode, the higher 16 bits of the destination
6069 * register are undefined. In practice CR0[31:0] is stored
6070 * just like in 64-bit mode.
6072 mod
= (modrm
>> 6) & 3;
6073 ot
= (mod
!= 3 ? MO_16
: s
->dflag
);
6074 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
6076 case 0xee: /* rdpkru */
6077 if (prefixes
& PREFIX_LOCK
) {
6080 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_ECX
]);
6081 gen_helper_rdpkru(s
->tmp1_i64
, tcg_env
, s
->tmp2_i32
);
6082 tcg_gen_extr_i64_tl(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
], s
->tmp1_i64
);
6084 case 0xef: /* wrpkru */
6085 if (prefixes
& PREFIX_LOCK
) {
6088 tcg_gen_concat_tl_i64(s
->tmp1_i64
, cpu_regs
[R_EAX
],
6090 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_ECX
]);
6091 gen_helper_wrpkru(tcg_env
, s
->tmp2_i32
, s
->tmp1_i64
);
6094 CASE_MODRM_OP(6): /* lmsw */
6095 if (!check_cpl0(s
)) {
6098 gen_svm_check_intercept(s
, SVM_EXIT_WRITE_CR0
);
6099 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
6101 * Only the 4 lower bits of CR0 are modified.
6102 * PE cannot be set to zero if already set to one.
6104 tcg_gen_ld_tl(s
->T1
, tcg_env
, offsetof(CPUX86State
, cr
[0]));
6105 tcg_gen_andi_tl(s
->T0
, s
->T0
, 0xf);
6106 tcg_gen_andi_tl(s
->T1
, s
->T1
, ~0xe);
6107 tcg_gen_or_tl(s
->T0
, s
->T0
, s
->T1
);
6108 gen_helper_write_crN(tcg_env
, tcg_constant_i32(0), s
->T0
);
6109 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
6112 CASE_MODRM_MEM_OP(7): /* invlpg */
6113 if (!check_cpl0(s
)) {
6116 gen_svm_check_intercept(s
, SVM_EXIT_INVLPG
);
6117 gen_lea_modrm(env
, s
, modrm
);
6118 gen_helper_flush_page(tcg_env
, s
->A0
);
6119 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
6122 case 0xf8: /* swapgs */
6123 #ifdef TARGET_X86_64
6125 if (check_cpl0(s
)) {
6126 tcg_gen_mov_tl(s
->T0
, cpu_seg_base
[R_GS
]);
6127 tcg_gen_ld_tl(cpu_seg_base
[R_GS
], tcg_env
,
6128 offsetof(CPUX86State
, kernelgsbase
));
6129 tcg_gen_st_tl(s
->T0
, tcg_env
,
6130 offsetof(CPUX86State
, kernelgsbase
));
6137 case 0xf9: /* rdtscp */
6138 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_RDTSCP
)) {
6141 gen_update_cc_op(s
);
6142 gen_update_eip_cur(s
);
6143 translator_io_start(&s
->base
);
6144 gen_helper_rdtsc(tcg_env
);
6145 gen_helper_rdpid(s
->T0
, tcg_env
);
6146 gen_op_mov_reg_v(s
, dflag
, R_ECX
, s
->T0
);
6154 case 0x108: /* invd */
6155 case 0x109: /* wbinvd; wbnoinvd with REPZ prefix */
6156 if (check_cpl0(s
)) {
6157 gen_svm_check_intercept(s
, (b
& 1) ? SVM_EXIT_WBINVD
: SVM_EXIT_INVD
);
6161 case 0x63: /* arpl or movslS (x86_64) */
6162 #ifdef TARGET_X86_64
6165 /* d_ot is the size of destination */
6168 modrm
= x86_ldub_code(env
, s
);
6169 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6170 mod
= (modrm
>> 6) & 3;
6171 rm
= (modrm
& 7) | REX_B(s
);
6174 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, rm
);
6176 if (d_ot
== MO_64
) {
6177 tcg_gen_ext32s_tl(s
->T0
, s
->T0
);
6179 gen_op_mov_reg_v(s
, d_ot
, reg
, s
->T0
);
6181 gen_lea_modrm(env
, s
, modrm
);
6182 gen_op_ld_v(s
, MO_32
| MO_SIGN
, s
->T0
, s
->A0
);
6183 gen_op_mov_reg_v(s
, d_ot
, reg
, s
->T0
);
6191 if (!PE(s
) || VM86(s
))
6193 t0
= tcg_temp_new();
6194 t1
= tcg_temp_new();
6195 t2
= tcg_temp_new();
6197 modrm
= x86_ldub_code(env
, s
);
6198 reg
= (modrm
>> 3) & 7;
6199 mod
= (modrm
>> 6) & 3;
6202 gen_lea_modrm(env
, s
, modrm
);
6203 gen_op_ld_v(s
, ot
, t0
, s
->A0
);
6205 gen_op_mov_v_reg(s
, ot
, t0
, rm
);
6207 gen_op_mov_v_reg(s
, ot
, t1
, reg
);
6208 tcg_gen_andi_tl(s
->tmp0
, t0
, 3);
6209 tcg_gen_andi_tl(t1
, t1
, 3);
6210 tcg_gen_movi_tl(t2
, 0);
6211 label1
= gen_new_label();
6212 tcg_gen_brcond_tl(TCG_COND_GE
, s
->tmp0
, t1
, label1
);
6213 tcg_gen_andi_tl(t0
, t0
, ~3);
6214 tcg_gen_or_tl(t0
, t0
, t1
);
6215 tcg_gen_movi_tl(t2
, CC_Z
);
6216 gen_set_label(label1
);
6218 gen_op_st_v(s
, ot
, t0
, s
->A0
);
6220 gen_op_mov_reg_v(s
, ot
, rm
, t0
);
6222 gen_compute_eflags(s
);
6223 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~CC_Z
);
6224 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t2
);
6227 case 0x102: /* lar */
6228 case 0x103: /* lsl */
6232 if (!PE(s
) || VM86(s
))
6234 ot
= dflag
!= MO_16
? MO_32
: MO_16
;
6235 modrm
= x86_ldub_code(env
, s
);
6236 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6237 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
6238 t0
= tcg_temp_new();
6239 gen_update_cc_op(s
);
6241 gen_helper_lar(t0
, tcg_env
, s
->T0
);
6243 gen_helper_lsl(t0
, tcg_env
, s
->T0
);
6245 tcg_gen_andi_tl(s
->tmp0
, cpu_cc_src
, CC_Z
);
6246 label1
= gen_new_label();
6247 tcg_gen_brcondi_tl(TCG_COND_EQ
, s
->tmp0
, 0, label1
);
6248 gen_op_mov_reg_v(s
, ot
, reg
, t0
);
6249 gen_set_label(label1
);
6250 set_cc_op(s
, CC_OP_EFLAGS
);
6254 modrm
= x86_ldub_code(env
, s
);
6255 mod
= (modrm
>> 6) & 3;
6256 op
= (modrm
>> 3) & 7;
6258 case 0: /* prefetchnta */
6259 case 1: /* prefetchnt0 */
6260 case 2: /* prefetchnt0 */
6261 case 3: /* prefetchnt0 */
6264 gen_nop_modrm(env
, s
, modrm
);
6265 /* nothing more to do */
6267 default: /* nop (multi byte) */
6268 gen_nop_modrm(env
, s
, modrm
);
6273 modrm
= x86_ldub_code(env
, s
);
6274 if (s
->flags
& HF_MPX_EN_MASK
) {
6275 mod
= (modrm
>> 6) & 3;
6276 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6277 if (prefixes
& PREFIX_REPZ
) {
6280 || (prefixes
& PREFIX_LOCK
)
6281 || s
->aflag
== MO_16
) {
6284 gen_bndck(env
, s
, modrm
, TCG_COND_LTU
, cpu_bndl
[reg
]);
6285 } else if (prefixes
& PREFIX_REPNZ
) {
6288 || (prefixes
& PREFIX_LOCK
)
6289 || s
->aflag
== MO_16
) {
6292 TCGv_i64 notu
= tcg_temp_new_i64();
6293 tcg_gen_not_i64(notu
, cpu_bndu
[reg
]);
6294 gen_bndck(env
, s
, modrm
, TCG_COND_GTU
, notu
);
6295 } else if (prefixes
& PREFIX_DATA
) {
6296 /* bndmov -- from reg/mem */
6297 if (reg
>= 4 || s
->aflag
== MO_16
) {
6301 int reg2
= (modrm
& 7) | REX_B(s
);
6302 if (reg2
>= 4 || (prefixes
& PREFIX_LOCK
)) {
6305 if (s
->flags
& HF_MPX_IU_MASK
) {
6306 tcg_gen_mov_i64(cpu_bndl
[reg
], cpu_bndl
[reg2
]);
6307 tcg_gen_mov_i64(cpu_bndu
[reg
], cpu_bndu
[reg2
]);
6310 gen_lea_modrm(env
, s
, modrm
);
6312 tcg_gen_qemu_ld_i64(cpu_bndl
[reg
], s
->A0
,
6313 s
->mem_index
, MO_LEUQ
);
6314 tcg_gen_addi_tl(s
->A0
, s
->A0
, 8);
6315 tcg_gen_qemu_ld_i64(cpu_bndu
[reg
], s
->A0
,
6316 s
->mem_index
, MO_LEUQ
);
6318 tcg_gen_qemu_ld_i64(cpu_bndl
[reg
], s
->A0
,
6319 s
->mem_index
, MO_LEUL
);
6320 tcg_gen_addi_tl(s
->A0
, s
->A0
, 4);
6321 tcg_gen_qemu_ld_i64(cpu_bndu
[reg
], s
->A0
,
6322 s
->mem_index
, MO_LEUL
);
6324 /* bnd registers are now in-use */
6325 gen_set_hflag(s
, HF_MPX_IU_MASK
);
6327 } else if (mod
!= 3) {
6329 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
6331 || (prefixes
& PREFIX_LOCK
)
6332 || s
->aflag
== MO_16
6337 tcg_gen_addi_tl(s
->A0
, cpu_regs
[a
.base
], a
.disp
);
6339 tcg_gen_movi_tl(s
->A0
, 0);
6341 gen_lea_v_seg(s
, s
->aflag
, s
->A0
, a
.def_seg
, s
->override
);
6343 tcg_gen_mov_tl(s
->T0
, cpu_regs
[a
.index
]);
6345 tcg_gen_movi_tl(s
->T0
, 0);
6348 gen_helper_bndldx64(cpu_bndl
[reg
], tcg_env
, s
->A0
, s
->T0
);
6349 tcg_gen_ld_i64(cpu_bndu
[reg
], tcg_env
,
6350 offsetof(CPUX86State
, mmx_t0
.MMX_Q(0)));
6352 gen_helper_bndldx32(cpu_bndu
[reg
], tcg_env
, s
->A0
, s
->T0
);
6353 tcg_gen_ext32u_i64(cpu_bndl
[reg
], cpu_bndu
[reg
]);
6354 tcg_gen_shri_i64(cpu_bndu
[reg
], cpu_bndu
[reg
], 32);
6356 gen_set_hflag(s
, HF_MPX_IU_MASK
);
6359 gen_nop_modrm(env
, s
, modrm
);
6362 modrm
= x86_ldub_code(env
, s
);
6363 if (s
->flags
& HF_MPX_EN_MASK
) {
6364 mod
= (modrm
>> 6) & 3;
6365 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6366 if (mod
!= 3 && (prefixes
& PREFIX_REPZ
)) {
6369 || (prefixes
& PREFIX_LOCK
)
6370 || s
->aflag
== MO_16
) {
6373 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
6375 tcg_gen_extu_tl_i64(cpu_bndl
[reg
], cpu_regs
[a
.base
]);
6377 tcg_gen_ext32u_i64(cpu_bndl
[reg
], cpu_bndl
[reg
]);
6379 } else if (a
.base
== -1) {
6380 /* no base register has lower bound of 0 */
6381 tcg_gen_movi_i64(cpu_bndl
[reg
], 0);
6383 /* rip-relative generates #ud */
6386 tcg_gen_not_tl(s
->A0
, gen_lea_modrm_1(s
, a
, false));
6388 tcg_gen_ext32u_tl(s
->A0
, s
->A0
);
6390 tcg_gen_extu_tl_i64(cpu_bndu
[reg
], s
->A0
);
6391 /* bnd registers are now in-use */
6392 gen_set_hflag(s
, HF_MPX_IU_MASK
);
6394 } else if (prefixes
& PREFIX_REPNZ
) {
6397 || (prefixes
& PREFIX_LOCK
)
6398 || s
->aflag
== MO_16
) {
6401 gen_bndck(env
, s
, modrm
, TCG_COND_GTU
, cpu_bndu
[reg
]);
6402 } else if (prefixes
& PREFIX_DATA
) {
6403 /* bndmov -- to reg/mem */
6404 if (reg
>= 4 || s
->aflag
== MO_16
) {
6408 int reg2
= (modrm
& 7) | REX_B(s
);
6409 if (reg2
>= 4 || (prefixes
& PREFIX_LOCK
)) {
6412 if (s
->flags
& HF_MPX_IU_MASK
) {
6413 tcg_gen_mov_i64(cpu_bndl
[reg2
], cpu_bndl
[reg
]);
6414 tcg_gen_mov_i64(cpu_bndu
[reg2
], cpu_bndu
[reg
]);
6417 gen_lea_modrm(env
, s
, modrm
);
6419 tcg_gen_qemu_st_i64(cpu_bndl
[reg
], s
->A0
,
6420 s
->mem_index
, MO_LEUQ
);
6421 tcg_gen_addi_tl(s
->A0
, s
->A0
, 8);
6422 tcg_gen_qemu_st_i64(cpu_bndu
[reg
], s
->A0
,
6423 s
->mem_index
, MO_LEUQ
);
6425 tcg_gen_qemu_st_i64(cpu_bndl
[reg
], s
->A0
,
6426 s
->mem_index
, MO_LEUL
);
6427 tcg_gen_addi_tl(s
->A0
, s
->A0
, 4);
6428 tcg_gen_qemu_st_i64(cpu_bndu
[reg
], s
->A0
,
6429 s
->mem_index
, MO_LEUL
);
6432 } else if (mod
!= 3) {
6434 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
6436 || (prefixes
& PREFIX_LOCK
)
6437 || s
->aflag
== MO_16
6442 tcg_gen_addi_tl(s
->A0
, cpu_regs
[a
.base
], a
.disp
);
6444 tcg_gen_movi_tl(s
->A0
, 0);
6446 gen_lea_v_seg(s
, s
->aflag
, s
->A0
, a
.def_seg
, s
->override
);
6448 tcg_gen_mov_tl(s
->T0
, cpu_regs
[a
.index
]);
6450 tcg_gen_movi_tl(s
->T0
, 0);
6453 gen_helper_bndstx64(tcg_env
, s
->A0
, s
->T0
,
6454 cpu_bndl
[reg
], cpu_bndu
[reg
]);
6456 gen_helper_bndstx32(tcg_env
, s
->A0
, s
->T0
,
6457 cpu_bndl
[reg
], cpu_bndu
[reg
]);
6461 gen_nop_modrm(env
, s
, modrm
);
6463 case 0x119: case 0x11c ... 0x11f: /* nop (multi byte) */
6464 modrm
= x86_ldub_code(env
, s
);
6465 gen_nop_modrm(env
, s
, modrm
);
6468 case 0x120: /* mov reg, crN */
6469 case 0x122: /* mov crN, reg */
6470 if (!check_cpl0(s
)) {
6473 modrm
= x86_ldub_code(env
, s
);
6475 * Ignore the mod bits (assume (modrm&0xc0)==0xc0).
6476 * AMD documentation (24594.pdf) and testing of Intel 386 and 486
6477 * processors all show that the mod bits are assumed to be 1's,
6478 * regardless of actual values.
6480 rm
= (modrm
& 7) | REX_B(s
);
6481 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6484 if ((prefixes
& PREFIX_LOCK
) &&
6485 (s
->cpuid_ext3_features
& CPUID_EXT3_CR8LEG
)) {
6497 ot
= (CODE64(s
) ? MO_64
: MO_32
);
6499 translator_io_start(&s
->base
);
6501 gen_svm_check_intercept(s
, SVM_EXIT_WRITE_CR0
+ reg
);
6502 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
6503 gen_helper_write_crN(tcg_env
, tcg_constant_i32(reg
), s
->T0
);
6504 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
6506 gen_svm_check_intercept(s
, SVM_EXIT_READ_CR0
+ reg
);
6507 gen_helper_read_crN(s
->T0
, tcg_env
, tcg_constant_i32(reg
));
6508 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
6512 case 0x121: /* mov reg, drN */
6513 case 0x123: /* mov drN, reg */
6514 if (check_cpl0(s
)) {
6515 modrm
= x86_ldub_code(env
, s
);
6516 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
6517 * AMD documentation (24594.pdf) and testing of
6518 * intel 386 and 486 processors all show that the mod bits
6519 * are assumed to be 1's, regardless of actual values.
6521 rm
= (modrm
& 7) | REX_B(s
);
6522 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6531 gen_svm_check_intercept(s
, SVM_EXIT_WRITE_DR0
+ reg
);
6532 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
6533 tcg_gen_movi_i32(s
->tmp2_i32
, reg
);
6534 gen_helper_set_dr(tcg_env
, s
->tmp2_i32
, s
->T0
);
6535 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
6537 gen_svm_check_intercept(s
, SVM_EXIT_READ_DR0
+ reg
);
6538 tcg_gen_movi_i32(s
->tmp2_i32
, reg
);
6539 gen_helper_get_dr(s
->T0
, tcg_env
, s
->tmp2_i32
);
6540 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
6544 case 0x106: /* clts */
6545 if (check_cpl0(s
)) {
6546 gen_svm_check_intercept(s
, SVM_EXIT_WRITE_CR0
);
6547 gen_helper_clts(tcg_env
);
6548 /* abort block because static cpu state changed */
6549 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
6552 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
6553 case 0x1c3: /* MOVNTI reg, mem */
6554 if (!(s
->cpuid_features
& CPUID_SSE2
))
6556 ot
= mo_64_32(dflag
);
6557 modrm
= x86_ldub_code(env
, s
);
6558 mod
= (modrm
>> 6) & 3;
6561 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6562 /* generate a generic store */
6563 gen_ldst_modrm(env
, s
, modrm
, ot
, reg
, 1);
6566 modrm
= x86_ldub_code(env
, s
);
6568 CASE_MODRM_MEM_OP(0): /* fxsave */
6569 if (!(s
->cpuid_features
& CPUID_FXSR
)
6570 || (prefixes
& PREFIX_LOCK
)) {
6573 if ((s
->flags
& HF_EM_MASK
) || (s
->flags
& HF_TS_MASK
)) {
6574 gen_exception(s
, EXCP07_PREX
);
6577 gen_lea_modrm(env
, s
, modrm
);
6578 gen_helper_fxsave(tcg_env
, s
->A0
);
6581 CASE_MODRM_MEM_OP(1): /* fxrstor */
6582 if (!(s
->cpuid_features
& CPUID_FXSR
)
6583 || (prefixes
& PREFIX_LOCK
)) {
6586 if ((s
->flags
& HF_EM_MASK
) || (s
->flags
& HF_TS_MASK
)) {
6587 gen_exception(s
, EXCP07_PREX
);
6590 gen_lea_modrm(env
, s
, modrm
);
6591 gen_helper_fxrstor(tcg_env
, s
->A0
);
6594 CASE_MODRM_MEM_OP(2): /* ldmxcsr */
6595 if ((s
->flags
& HF_EM_MASK
) || !(s
->flags
& HF_OSFXSR_MASK
)) {
6598 if (s
->flags
& HF_TS_MASK
) {
6599 gen_exception(s
, EXCP07_PREX
);
6602 gen_lea_modrm(env
, s
, modrm
);
6603 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
, s
->mem_index
, MO_LEUL
);
6604 gen_helper_ldmxcsr(tcg_env
, s
->tmp2_i32
);
6607 CASE_MODRM_MEM_OP(3): /* stmxcsr */
6608 if ((s
->flags
& HF_EM_MASK
) || !(s
->flags
& HF_OSFXSR_MASK
)) {
6611 if (s
->flags
& HF_TS_MASK
) {
6612 gen_exception(s
, EXCP07_PREX
);
6615 gen_helper_update_mxcsr(tcg_env
);
6616 gen_lea_modrm(env
, s
, modrm
);
6617 tcg_gen_ld32u_tl(s
->T0
, tcg_env
, offsetof(CPUX86State
, mxcsr
));
6618 gen_op_st_v(s
, MO_32
, s
->T0
, s
->A0
);
6621 CASE_MODRM_MEM_OP(4): /* xsave */
6622 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
6623 || (prefixes
& (PREFIX_LOCK
| PREFIX_DATA
6624 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
6627 gen_lea_modrm(env
, s
, modrm
);
6628 tcg_gen_concat_tl_i64(s
->tmp1_i64
, cpu_regs
[R_EAX
],
6630 gen_helper_xsave(tcg_env
, s
->A0
, s
->tmp1_i64
);
6633 CASE_MODRM_MEM_OP(5): /* xrstor */
6634 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
6635 || (prefixes
& (PREFIX_LOCK
| PREFIX_DATA
6636 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
6639 gen_lea_modrm(env
, s
, modrm
);
6640 tcg_gen_concat_tl_i64(s
->tmp1_i64
, cpu_regs
[R_EAX
],
6642 gen_helper_xrstor(tcg_env
, s
->A0
, s
->tmp1_i64
);
6643 /* XRSTOR is how MPX is enabled, which changes how
6644 we translate. Thus we need to end the TB. */
6645 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
6648 CASE_MODRM_MEM_OP(6): /* xsaveopt / clwb */
6649 if (prefixes
& PREFIX_LOCK
) {
6652 if (prefixes
& PREFIX_DATA
) {
6654 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_CLWB
)) {
6657 gen_nop_modrm(env
, s
, modrm
);
6660 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
6661 || (s
->cpuid_xsave_features
& CPUID_XSAVE_XSAVEOPT
) == 0
6662 || (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
))) {
6665 gen_lea_modrm(env
, s
, modrm
);
6666 tcg_gen_concat_tl_i64(s
->tmp1_i64
, cpu_regs
[R_EAX
],
6668 gen_helper_xsaveopt(tcg_env
, s
->A0
, s
->tmp1_i64
);
6672 CASE_MODRM_MEM_OP(7): /* clflush / clflushopt */
6673 if (prefixes
& PREFIX_LOCK
) {
6676 if (prefixes
& PREFIX_DATA
) {
6678 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_CLFLUSHOPT
)) {
6683 if ((s
->prefix
& (PREFIX_REPZ
| PREFIX_REPNZ
))
6684 || !(s
->cpuid_features
& CPUID_CLFLUSH
)) {
6688 gen_nop_modrm(env
, s
, modrm
);
6691 case 0xc0 ... 0xc7: /* rdfsbase (f3 0f ae /0) */
6692 case 0xc8 ... 0xcf: /* rdgsbase (f3 0f ae /1) */
6693 case 0xd0 ... 0xd7: /* wrfsbase (f3 0f ae /2) */
6694 case 0xd8 ... 0xdf: /* wrgsbase (f3 0f ae /3) */
6696 && (prefixes
& PREFIX_REPZ
)
6697 && !(prefixes
& PREFIX_LOCK
)
6698 && (s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_FSGSBASE
)) {
6699 TCGv base
, treg
, src
, dst
;
6701 /* Preserve hflags bits by testing CR4 at runtime. */
6702 tcg_gen_movi_i32(s
->tmp2_i32
, CR4_FSGSBASE_MASK
);
6703 gen_helper_cr4_testbit(tcg_env
, s
->tmp2_i32
);
6705 base
= cpu_seg_base
[modrm
& 8 ? R_GS
: R_FS
];
6706 treg
= cpu_regs
[(modrm
& 7) | REX_B(s
)];
6710 dst
= base
, src
= treg
;
6713 dst
= treg
, src
= base
;
6716 if (s
->dflag
== MO_32
) {
6717 tcg_gen_ext32u_tl(dst
, src
);
6719 tcg_gen_mov_tl(dst
, src
);
6725 case 0xf8: /* sfence / pcommit */
6726 if (prefixes
& PREFIX_DATA
) {
6728 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_PCOMMIT
)
6729 || (prefixes
& PREFIX_LOCK
)) {
6735 case 0xf9 ... 0xff: /* sfence */
6736 if (!(s
->cpuid_features
& CPUID_SSE
)
6737 || (prefixes
& PREFIX_LOCK
)) {
6740 tcg_gen_mb(TCG_MO_ST_ST
| TCG_BAR_SC
);
6742 case 0xe8 ... 0xef: /* lfence */
6743 if (!(s
->cpuid_features
& CPUID_SSE
)
6744 || (prefixes
& PREFIX_LOCK
)) {
6747 tcg_gen_mb(TCG_MO_LD_LD
| TCG_BAR_SC
);
6749 case 0xf0 ... 0xf7: /* mfence */
6750 if (!(s
->cpuid_features
& CPUID_SSE2
)
6751 || (prefixes
& PREFIX_LOCK
)) {
6754 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
6762 case 0x10d: /* 3DNow! prefetch(w) */
6763 modrm
= x86_ldub_code(env
, s
);
6764 mod
= (modrm
>> 6) & 3;
6767 gen_nop_modrm(env
, s
, modrm
);
6769 case 0x1aa: /* rsm */
6770 gen_svm_check_intercept(s
, SVM_EXIT_RSM
);
6771 if (!(s
->flags
& HF_SMM_MASK
))
6773 #ifdef CONFIG_USER_ONLY
6774 /* we should not be in SMM mode */
6775 g_assert_not_reached();
6777 gen_update_cc_op(s
);
6778 gen_update_eip_next(s
);
6779 gen_helper_rsm(tcg_env
);
6780 #endif /* CONFIG_USER_ONLY */
6781 s
->base
.is_jmp
= DISAS_EOB_ONLY
;
6783 case 0x1b8: /* SSE4.2 popcnt */
6784 if ((prefixes
& (PREFIX_REPZ
| PREFIX_LOCK
| PREFIX_REPNZ
)) !=
6787 if (!(s
->cpuid_ext_features
& CPUID_EXT_POPCNT
))
6790 modrm
= x86_ldub_code(env
, s
);
6791 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6793 if (s
->prefix
& PREFIX_DATA
) {
6796 ot
= mo_64_32(dflag
);
6799 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
6800 gen_extu(ot
, s
->T0
);
6801 tcg_gen_mov_tl(cpu_cc_src
, s
->T0
);
6802 tcg_gen_ctpop_tl(s
->T0
, s
->T0
);
6803 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
6805 set_cc_op(s
, CC_OP_POPCNT
);
6807 case 0x10e ... 0x117:
6808 case 0x128 ... 0x12f:
6809 case 0x138 ... 0x13a:
6810 case 0x150 ... 0x179:
6811 case 0x17c ... 0x17f:
6813 case 0x1c4 ... 0x1c6:
6814 case 0x1d0 ... 0x1fe:
6815 disas_insn_new(s
, cpu
, b
);
6822 gen_illegal_opcode(s
);
6825 gen_unknown_opcode(env
, s
);
6829 void tcg_x86_init(void)
6831 static const char reg_names
[CPU_NB_REGS
][4] = {
6832 #ifdef TARGET_X86_64
6860 static const char eip_name
[] = {
6861 #ifdef TARGET_X86_64
6867 static const char seg_base_names
[6][8] = {
6875 static const char bnd_regl_names
[4][8] = {
6876 "bnd0_lb", "bnd1_lb", "bnd2_lb", "bnd3_lb"
6878 static const char bnd_regu_names
[4][8] = {
6879 "bnd0_ub", "bnd1_ub", "bnd2_ub", "bnd3_ub"
6883 cpu_cc_op
= tcg_global_mem_new_i32(tcg_env
,
6884 offsetof(CPUX86State
, cc_op
), "cc_op");
6885 cpu_cc_dst
= tcg_global_mem_new(tcg_env
, offsetof(CPUX86State
, cc_dst
),
6887 cpu_cc_src
= tcg_global_mem_new(tcg_env
, offsetof(CPUX86State
, cc_src
),
6889 cpu_cc_src2
= tcg_global_mem_new(tcg_env
, offsetof(CPUX86State
, cc_src2
),
6891 cpu_eip
= tcg_global_mem_new(tcg_env
, offsetof(CPUX86State
, eip
), eip_name
);
6893 for (i
= 0; i
< CPU_NB_REGS
; ++i
) {
6894 cpu_regs
[i
] = tcg_global_mem_new(tcg_env
,
6895 offsetof(CPUX86State
, regs
[i
]),
6899 for (i
= 0; i
< 6; ++i
) {
6901 = tcg_global_mem_new(tcg_env
,
6902 offsetof(CPUX86State
, segs
[i
].base
),
6906 for (i
= 0; i
< 4; ++i
) {
6908 = tcg_global_mem_new_i64(tcg_env
,
6909 offsetof(CPUX86State
, bnd_regs
[i
].lb
),
6912 = tcg_global_mem_new_i64(tcg_env
,
6913 offsetof(CPUX86State
, bnd_regs
[i
].ub
),
6918 static void i386_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cpu
)
6920 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6921 CPUX86State
*env
= cpu_env(cpu
);
6922 uint32_t flags
= dc
->base
.tb
->flags
;
6923 uint32_t cflags
= tb_cflags(dc
->base
.tb
);
6924 int cpl
= (flags
>> HF_CPL_SHIFT
) & 3;
6925 int iopl
= (flags
>> IOPL_SHIFT
) & 3;
6927 dc
->cs_base
= dc
->base
.tb
->cs_base
;
6928 dc
->pc_save
= dc
->base
.pc_next
;
6930 #ifndef CONFIG_USER_ONLY
6935 /* We make some simplifying assumptions; validate they're correct. */
6936 g_assert(PE(dc
) == ((flags
& HF_PE_MASK
) != 0));
6937 g_assert(CPL(dc
) == cpl
);
6938 g_assert(IOPL(dc
) == iopl
);
6939 g_assert(VM86(dc
) == ((flags
& HF_VM_MASK
) != 0));
6940 g_assert(CODE32(dc
) == ((flags
& HF_CS32_MASK
) != 0));
6941 g_assert(CODE64(dc
) == ((flags
& HF_CS64_MASK
) != 0));
6942 g_assert(SS32(dc
) == ((flags
& HF_SS32_MASK
) != 0));
6943 g_assert(LMA(dc
) == ((flags
& HF_LMA_MASK
) != 0));
6944 g_assert(ADDSEG(dc
) == ((flags
& HF_ADDSEG_MASK
) != 0));
6945 g_assert(SVME(dc
) == ((flags
& HF_SVME_MASK
) != 0));
6946 g_assert(GUEST(dc
) == ((flags
& HF_GUEST_MASK
) != 0));
6948 dc
->cc_op
= CC_OP_DYNAMIC
;
6949 dc
->cc_op_dirty
= false;
6950 dc
->popl_esp_hack
= 0;
6951 /* select memory access functions */
6952 dc
->mem_index
= cpu_mmu_index(env
, false);
6953 dc
->cpuid_features
= env
->features
[FEAT_1_EDX
];
6954 dc
->cpuid_ext_features
= env
->features
[FEAT_1_ECX
];
6955 dc
->cpuid_ext2_features
= env
->features
[FEAT_8000_0001_EDX
];
6956 dc
->cpuid_ext3_features
= env
->features
[FEAT_8000_0001_ECX
];
6957 dc
->cpuid_7_0_ebx_features
= env
->features
[FEAT_7_0_EBX
];
6958 dc
->cpuid_7_0_ecx_features
= env
->features
[FEAT_7_0_ECX
];
6959 dc
->cpuid_xsave_features
= env
->features
[FEAT_XSAVE
];
6960 dc
->jmp_opt
= !((cflags
& CF_NO_GOTO_TB
) ||
6961 (flags
& (HF_TF_MASK
| HF_INHIBIT_IRQ_MASK
)));
6963 * If jmp_opt, we want to handle each string instruction individually.
6964 * For icount also disable repz optimization so that each iteration
6965 * is accounted separately.
6967 dc
->repz_opt
= !dc
->jmp_opt
&& !(cflags
& CF_USE_ICOUNT
);
6969 dc
->T0
= tcg_temp_new();
6970 dc
->T1
= tcg_temp_new();
6971 dc
->A0
= tcg_temp_new();
6973 dc
->tmp0
= tcg_temp_new();
6974 dc
->tmp1_i64
= tcg_temp_new_i64();
6975 dc
->tmp2_i32
= tcg_temp_new_i32();
6976 dc
->tmp3_i32
= tcg_temp_new_i32();
6977 dc
->tmp4
= tcg_temp_new();
6978 dc
->cc_srcT
= tcg_temp_new();
6981 static void i386_tr_tb_start(DisasContextBase
*db
, CPUState
*cpu
)
6985 static void i386_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cpu
)
6987 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6988 target_ulong pc_arg
= dc
->base
.pc_next
;
6990 dc
->prev_insn_end
= tcg_last_op();
6991 if (tb_cflags(dcbase
->tb
) & CF_PCREL
) {
6992 pc_arg
-= dc
->cs_base
;
6993 pc_arg
&= ~TARGET_PAGE_MASK
;
6995 tcg_gen_insn_start(pc_arg
, dc
->cc_op
);
6998 static void i386_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cpu
)
7000 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
7002 #ifdef TARGET_VSYSCALL_PAGE
7004 * Detect entry into the vsyscall page and invoke the syscall.
7006 if ((dc
->base
.pc_next
& TARGET_PAGE_MASK
) == TARGET_VSYSCALL_PAGE
) {
7007 gen_exception(dc
, EXCP_VSYSCALL
);
7008 dc
->base
.pc_next
= dc
->pc
+ 1;
7013 if (disas_insn(dc
, cpu
)) {
7014 target_ulong pc_next
= dc
->pc
;
7015 dc
->base
.pc_next
= pc_next
;
7017 if (dc
->base
.is_jmp
== DISAS_NEXT
) {
7018 if (dc
->flags
& (HF_TF_MASK
| HF_INHIBIT_IRQ_MASK
)) {
7020 * If single step mode, we generate only one instruction and
7021 * generate an exception.
7022 * If irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
7023 * the flag and abort the translation to give the irqs a
7026 dc
->base
.is_jmp
= DISAS_EOB_NEXT
;
7027 } else if (!is_same_page(&dc
->base
, pc_next
)) {
7028 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
7034 static void i386_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cpu
)
7036 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
7038 switch (dc
->base
.is_jmp
) {
7039 case DISAS_NORETURN
:
7041 case DISAS_TOO_MANY
:
7042 gen_update_cc_op(dc
);
7043 gen_jmp_rel_csize(dc
, 0, 0);
7045 case DISAS_EOB_NEXT
:
7046 gen_update_cc_op(dc
);
7047 gen_update_eip_cur(dc
);
7049 case DISAS_EOB_ONLY
:
7052 case DISAS_EOB_INHIBIT_IRQ
:
7053 gen_update_cc_op(dc
);
7054 gen_update_eip_cur(dc
);
7055 gen_eob_inhibit_irq(dc
, true);
7061 g_assert_not_reached();
7065 static void i386_tr_disas_log(const DisasContextBase
*dcbase
,
7066 CPUState
*cpu
, FILE *logfile
)
7068 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
7070 fprintf(logfile
, "IN: %s\n", lookup_symbol(dc
->base
.pc_first
));
7071 target_disas(logfile
, cpu
, dc
->base
.pc_first
, dc
->base
.tb
->size
);
7074 static const TranslatorOps i386_tr_ops
= {
7075 .init_disas_context
= i386_tr_init_disas_context
,
7076 .tb_start
= i386_tr_tb_start
,
7077 .insn_start
= i386_tr_insn_start
,
7078 .translate_insn
= i386_tr_translate_insn
,
7079 .tb_stop
= i386_tr_tb_stop
,
7080 .disas_log
= i386_tr_disas_log
,
7083 /* generate intermediate code for basic block 'tb'. */
7084 void gen_intermediate_code(CPUState
*cpu
, TranslationBlock
*tb
, int *max_insns
,
7085 target_ulong pc
, void *host_pc
)
7089 translator_loop(cpu
, tb
, max_insns
, pc
, host_pc
, &i386_tr_ops
, &dc
.base
);