4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include "qemu/host-utils.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "tcg/tcg-op-gvec.h"
27 #include "exec/cpu_ldst.h"
28 #include "exec/translator.h"
29 #include "fpu/softfloat.h"
31 #include "exec/helper-proto.h"
32 #include "exec/helper-gen.h"
33 #include "helper-tcg.h"
37 #define HELPER_H "helper.h"
38 #include "exec/helper-info.c.inc"
42 #define PREFIX_REPZ 0x01
43 #define PREFIX_REPNZ 0x02
44 #define PREFIX_LOCK 0x04
45 #define PREFIX_DATA 0x08
46 #define PREFIX_ADR 0x10
47 #define PREFIX_VEX 0x20
48 #define PREFIX_REX 0x40
58 /* For a switch indexed by MODRM, match all memory operands for a given OP. */
59 #define CASE_MODRM_MEM_OP(OP) \
60 case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
61 case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
62 case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7
64 #define CASE_MODRM_OP(OP) \
65 case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
66 case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
67 case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7: \
68 case (3 << 6) | (OP << 3) | 0 ... (3 << 6) | (OP << 3) | 7
70 //#define MACRO_TEST 1
72 /* global register indexes */
73 static TCGv cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
;
75 static TCGv_i32 cpu_cc_op
;
76 static TCGv cpu_regs
[CPU_NB_REGS
];
77 static TCGv cpu_seg_base
[6];
78 static TCGv_i64 cpu_bndl
[4];
79 static TCGv_i64 cpu_bndu
[4];
81 typedef struct DisasContext
{
82 DisasContextBase base
;
84 target_ulong pc
; /* pc = eip + cs_base */
85 target_ulong cs_base
; /* base of CS segment */
91 int8_t override
; /* -1 if no override, else R_CS, R_DS, etc */
97 #ifndef CONFIG_USER_ONLY
98 uint8_t cpl
; /* code priv level */
99 uint8_t iopl
; /* i/o priv level */
101 uint8_t vex_l
; /* vex vector length */
102 uint8_t vex_v
; /* vex vvvv register, without 1's complement. */
103 uint8_t popl_esp_hack
; /* for correct popl with esp base handling */
104 uint8_t rip_offset
; /* only used in x86_64, but left for simplicity */
111 bool vex_w
; /* used by AVX even on 32-bit processors */
112 bool jmp_opt
; /* use direct block chaining for direct jumps */
113 bool repz_opt
; /* optimize jumps within repz instructions */
116 CCOp cc_op
; /* current CC operation */
117 int mem_index
; /* select memory access functions */
118 uint32_t flags
; /* all execution flags */
120 int cpuid_ext_features
;
121 int cpuid_ext2_features
;
122 int cpuid_ext3_features
;
123 int cpuid_7_0_ebx_features
;
124 int cpuid_7_0_ecx_features
;
125 int cpuid_7_1_eax_features
;
126 int cpuid_xsave_features
;
128 /* TCG local temps */
134 /* TCG local register indexes (only used inside old micro ops) */
142 TCGOp
*prev_insn_start
;
143 TCGOp
*prev_insn_end
;
146 #define DISAS_EOB_ONLY DISAS_TARGET_0
147 #define DISAS_EOB_NEXT DISAS_TARGET_1
148 #define DISAS_EOB_INHIBIT_IRQ DISAS_TARGET_2
149 #define DISAS_JUMP DISAS_TARGET_3
151 /* The environment in which user-only runs is constrained. */
152 #ifdef CONFIG_USER_ONLY
156 #define SVME(S) false
157 #define GUEST(S) false
159 #define PE(S) (((S)->flags & HF_PE_MASK) != 0)
160 #define CPL(S) ((S)->cpl)
161 #define IOPL(S) ((S)->iopl)
162 #define SVME(S) (((S)->flags & HF_SVME_MASK) != 0)
163 #define GUEST(S) (((S)->flags & HF_GUEST_MASK) != 0)
165 #if defined(CONFIG_USER_ONLY) && defined(TARGET_X86_64)
166 #define VM86(S) false
167 #define CODE32(S) true
169 #define ADDSEG(S) false
171 #define VM86(S) (((S)->flags & HF_VM_MASK) != 0)
172 #define CODE32(S) (((S)->flags & HF_CS32_MASK) != 0)
173 #define SS32(S) (((S)->flags & HF_SS32_MASK) != 0)
174 #define ADDSEG(S) (((S)->flags & HF_ADDSEG_MASK) != 0)
176 #if !defined(TARGET_X86_64)
177 #define CODE64(S) false
178 #elif defined(CONFIG_USER_ONLY)
179 #define CODE64(S) true
181 #define CODE64(S) (((S)->flags & HF_CS64_MASK) != 0)
183 #if defined(CONFIG_USER_ONLY) || defined(TARGET_X86_64)
184 #define LMA(S) (((S)->flags & HF_LMA_MASK) != 0)
190 #define REX_PREFIX(S) (((S)->prefix & PREFIX_REX) != 0)
191 #define REX_W(S) ((S)->vex_w)
192 #define REX_R(S) ((S)->rex_r + 0)
193 #define REX_X(S) ((S)->rex_x + 0)
194 #define REX_B(S) ((S)->rex_b + 0)
196 #define REX_PREFIX(S) false
197 #define REX_W(S) false
204 * Many sysemu-only helpers are not reachable for user-only.
205 * Define stub generators here, so that we need not either sprinkle
206 * ifdefs through the translator, nor provide the helper function.
208 #define STUB_HELPER(NAME, ...) \
209 static inline void gen_helper_##NAME(__VA_ARGS__) \
210 { qemu_build_not_reached(); }
212 #ifdef CONFIG_USER_ONLY
213 STUB_HELPER(clgi
, TCGv_env env
)
214 STUB_HELPER(flush_page
, TCGv_env env
, TCGv addr
)
215 STUB_HELPER(hlt
, TCGv_env env
, TCGv_i32 pc_ofs
)
216 STUB_HELPER(inb
, TCGv ret
, TCGv_env env
, TCGv_i32 port
)
217 STUB_HELPER(inw
, TCGv ret
, TCGv_env env
, TCGv_i32 port
)
218 STUB_HELPER(inl
, TCGv ret
, TCGv_env env
, TCGv_i32 port
)
219 STUB_HELPER(monitor
, TCGv_env env
, TCGv addr
)
220 STUB_HELPER(mwait
, TCGv_env env
, TCGv_i32 pc_ofs
)
221 STUB_HELPER(outb
, TCGv_env env
, TCGv_i32 port
, TCGv_i32 val
)
222 STUB_HELPER(outw
, TCGv_env env
, TCGv_i32 port
, TCGv_i32 val
)
223 STUB_HELPER(outl
, TCGv_env env
, TCGv_i32 port
, TCGv_i32 val
)
224 STUB_HELPER(rdmsr
, TCGv_env env
)
225 STUB_HELPER(read_crN
, TCGv ret
, TCGv_env env
, TCGv_i32 reg
)
226 STUB_HELPER(get_dr
, TCGv ret
, TCGv_env env
, TCGv_i32 reg
)
227 STUB_HELPER(set_dr
, TCGv_env env
, TCGv_i32 reg
, TCGv val
)
228 STUB_HELPER(stgi
, TCGv_env env
)
229 STUB_HELPER(svm_check_intercept
, TCGv_env env
, TCGv_i32 type
)
230 STUB_HELPER(vmload
, TCGv_env env
, TCGv_i32 aflag
)
231 STUB_HELPER(vmmcall
, TCGv_env env
)
232 STUB_HELPER(vmrun
, TCGv_env env
, TCGv_i32 aflag
, TCGv_i32 pc_ofs
)
233 STUB_HELPER(vmsave
, TCGv_env env
, TCGv_i32 aflag
)
234 STUB_HELPER(write_crN
, TCGv_env env
, TCGv_i32 reg
, TCGv val
)
235 STUB_HELPER(wrmsr
, TCGv_env env
)
238 static void gen_eob(DisasContext
*s
);
239 static void gen_jr(DisasContext
*s
);
240 static void gen_jmp_rel(DisasContext
*s
, MemOp ot
, int diff
, int tb_num
);
241 static void gen_jmp_rel_csize(DisasContext
*s
, int diff
, int tb_num
);
242 static void gen_op(DisasContext
*s1
, int op
, MemOp ot
, int d
);
243 static void gen_exception_gpf(DisasContext
*s
);
245 /* i386 arith/logic operations */
265 OP_SHL1
, /* undocumented */
281 /* I386 int registers */
282 OR_EAX
, /* MUST be even numbered */
291 OR_TMP0
= 16, /* temporary operand register */
293 OR_A0
, /* temporary register used when doing address evaluation */
303 /* Bit set if the global variable is live after setting CC_OP to X. */
304 static const uint8_t cc_op_live
[CC_OP_NB
] = {
305 [CC_OP_DYNAMIC
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
306 [CC_OP_EFLAGS
] = USES_CC_SRC
,
307 [CC_OP_MULB
... CC_OP_MULQ
] = USES_CC_DST
| USES_CC_SRC
,
308 [CC_OP_ADDB
... CC_OP_ADDQ
] = USES_CC_DST
| USES_CC_SRC
,
309 [CC_OP_ADCB
... CC_OP_ADCQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
310 [CC_OP_SUBB
... CC_OP_SUBQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRCT
,
311 [CC_OP_SBBB
... CC_OP_SBBQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
312 [CC_OP_LOGICB
... CC_OP_LOGICQ
] = USES_CC_DST
,
313 [CC_OP_INCB
... CC_OP_INCQ
] = USES_CC_DST
| USES_CC_SRC
,
314 [CC_OP_DECB
... CC_OP_DECQ
] = USES_CC_DST
| USES_CC_SRC
,
315 [CC_OP_SHLB
... CC_OP_SHLQ
] = USES_CC_DST
| USES_CC_SRC
,
316 [CC_OP_SARB
... CC_OP_SARQ
] = USES_CC_DST
| USES_CC_SRC
,
317 [CC_OP_BMILGB
... CC_OP_BMILGQ
] = USES_CC_DST
| USES_CC_SRC
,
318 [CC_OP_ADCX
] = USES_CC_DST
| USES_CC_SRC
,
319 [CC_OP_ADOX
] = USES_CC_SRC
| USES_CC_SRC2
,
320 [CC_OP_ADCOX
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
322 [CC_OP_POPCNT
] = USES_CC_SRC
,
325 static void set_cc_op(DisasContext
*s
, CCOp op
)
329 if (s
->cc_op
== op
) {
333 /* Discard CC computation that will no longer be used. */
334 dead
= cc_op_live
[s
->cc_op
] & ~cc_op_live
[op
];
335 if (dead
& USES_CC_DST
) {
336 tcg_gen_discard_tl(cpu_cc_dst
);
338 if (dead
& USES_CC_SRC
) {
339 tcg_gen_discard_tl(cpu_cc_src
);
341 if (dead
& USES_CC_SRC2
) {
342 tcg_gen_discard_tl(cpu_cc_src2
);
344 if (dead
& USES_CC_SRCT
) {
345 tcg_gen_discard_tl(s
->cc_srcT
);
348 if (op
== CC_OP_DYNAMIC
) {
349 /* The DYNAMIC setting is translator only, and should never be
350 stored. Thus we always consider it clean. */
351 s
->cc_op_dirty
= false;
353 /* Discard any computed CC_OP value (see shifts). */
354 if (s
->cc_op
== CC_OP_DYNAMIC
) {
355 tcg_gen_discard_i32(cpu_cc_op
);
357 s
->cc_op_dirty
= true;
362 static void gen_update_cc_op(DisasContext
*s
)
364 if (s
->cc_op_dirty
) {
365 tcg_gen_movi_i32(cpu_cc_op
, s
->cc_op
);
366 s
->cc_op_dirty
= false;
372 #define NB_OP_SIZES 4
374 #else /* !TARGET_X86_64 */
376 #define NB_OP_SIZES 3
378 #endif /* !TARGET_X86_64 */
381 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
382 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
383 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
384 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
385 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
387 #define REG_B_OFFSET 0
388 #define REG_H_OFFSET 1
389 #define REG_W_OFFSET 0
390 #define REG_L_OFFSET 0
391 #define REG_LH_OFFSET 4
394 /* In instruction encodings for byte register accesses the
395 * register number usually indicates "low 8 bits of register N";
396 * however there are some special cases where N 4..7 indicates
397 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
398 * true for this special case, false otherwise.
400 static inline bool byte_reg_is_xH(DisasContext
*s
, int reg
)
402 /* Any time the REX prefix is present, byte registers are uniform */
403 if (reg
< 4 || REX_PREFIX(s
)) {
409 /* Select the size of a push/pop operation. */
410 static inline MemOp
mo_pushpop(DisasContext
*s
, MemOp ot
)
413 return ot
== MO_16
? MO_16
: MO_64
;
419 /* Select the size of the stack pointer. */
420 static inline MemOp
mo_stacksize(DisasContext
*s
)
422 return CODE64(s
) ? MO_64
: SS32(s
) ? MO_32
: MO_16
;
425 /* Select only size 64 else 32. Used for SSE operand sizes. */
426 static inline MemOp
mo_64_32(MemOp ot
)
429 return ot
== MO_64
? MO_64
: MO_32
;
435 /* Select size 8 if lsb of B is clear, else OT. Used for decoding
436 byte vs word opcodes. */
437 static inline MemOp
mo_b_d(int b
, MemOp ot
)
439 return b
& 1 ? ot
: MO_8
;
442 /* Select size 8 if lsb of B is clear, else OT capped at 32.
443 Used for decoding operand size of port opcodes. */
444 static inline MemOp
mo_b_d32(int b
, MemOp ot
)
446 return b
& 1 ? (ot
== MO_16
? MO_16
: MO_32
) : MO_8
;
449 /* Compute the result of writing t0 to the OT-sized register REG.
451 * If DEST is NULL, store the result into the register and return the
454 * If DEST is not NULL, store the result into DEST and return the
457 static TCGv
gen_op_deposit_reg_v(DisasContext
*s
, MemOp ot
, int reg
, TCGv dest
, TCGv t0
)
461 if (byte_reg_is_xH(s
, reg
)) {
462 dest
= dest
? dest
: cpu_regs
[reg
- 4];
463 tcg_gen_deposit_tl(dest
, cpu_regs
[reg
- 4], t0
, 8, 8);
464 return cpu_regs
[reg
- 4];
466 dest
= dest
? dest
: cpu_regs
[reg
];
467 tcg_gen_deposit_tl(dest
, cpu_regs
[reg
], t0
, 0, 8);
470 dest
= dest
? dest
: cpu_regs
[reg
];
471 tcg_gen_deposit_tl(dest
, cpu_regs
[reg
], t0
, 0, 16);
474 /* For x86_64, this sets the higher half of register to zero.
475 For i386, this is equivalent to a mov. */
476 dest
= dest
? dest
: cpu_regs
[reg
];
477 tcg_gen_ext32u_tl(dest
, t0
);
481 dest
= dest
? dest
: cpu_regs
[reg
];
482 tcg_gen_mov_tl(dest
, t0
);
486 g_assert_not_reached();
488 return cpu_regs
[reg
];
491 static void gen_op_mov_reg_v(DisasContext
*s
, MemOp ot
, int reg
, TCGv t0
)
493 gen_op_deposit_reg_v(s
, ot
, reg
, NULL
, t0
);
497 void gen_op_mov_v_reg(DisasContext
*s
, MemOp ot
, TCGv t0
, int reg
)
499 if (ot
== MO_8
&& byte_reg_is_xH(s
, reg
)) {
500 tcg_gen_extract_tl(t0
, cpu_regs
[reg
- 4], 8, 8);
502 tcg_gen_mov_tl(t0
, cpu_regs
[reg
]);
506 static void gen_add_A0_im(DisasContext
*s
, int val
)
508 tcg_gen_addi_tl(s
->A0
, s
->A0
, val
);
510 tcg_gen_ext32u_tl(s
->A0
, s
->A0
);
514 static inline void gen_op_jmp_v(DisasContext
*s
, TCGv dest
)
516 tcg_gen_mov_tl(cpu_eip
, dest
);
521 void gen_op_add_reg_im(DisasContext
*s
, MemOp size
, int reg
, int32_t val
)
523 tcg_gen_addi_tl(s
->tmp0
, cpu_regs
[reg
], val
);
524 gen_op_mov_reg_v(s
, size
, reg
, s
->tmp0
);
527 static inline void gen_op_add_reg(DisasContext
*s
, MemOp size
, int reg
, TCGv val
)
529 tcg_gen_add_tl(s
->tmp0
, cpu_regs
[reg
], val
);
530 gen_op_mov_reg_v(s
, size
, reg
, s
->tmp0
);
533 static inline void gen_op_ld_v(DisasContext
*s
, int idx
, TCGv t0
, TCGv a0
)
535 tcg_gen_qemu_ld_tl(t0
, a0
, s
->mem_index
, idx
| MO_LE
);
538 static inline void gen_op_st_v(DisasContext
*s
, int idx
, TCGv t0
, TCGv a0
)
540 tcg_gen_qemu_st_tl(t0
, a0
, s
->mem_index
, idx
| MO_LE
);
543 static inline void gen_op_st_rm_T0_A0(DisasContext
*s
, int idx
, int d
)
546 gen_op_st_v(s
, idx
, s
->T0
, s
->A0
);
548 gen_op_mov_reg_v(s
, idx
, d
, s
->T0
);
552 static void gen_update_eip_cur(DisasContext
*s
)
554 assert(s
->pc_save
!= -1);
555 if (tb_cflags(s
->base
.tb
) & CF_PCREL
) {
556 tcg_gen_addi_tl(cpu_eip
, cpu_eip
, s
->base
.pc_next
- s
->pc_save
);
557 } else if (CODE64(s
)) {
558 tcg_gen_movi_tl(cpu_eip
, s
->base
.pc_next
);
560 tcg_gen_movi_tl(cpu_eip
, (uint32_t)(s
->base
.pc_next
- s
->cs_base
));
562 s
->pc_save
= s
->base
.pc_next
;
565 static void gen_update_eip_next(DisasContext
*s
)
567 assert(s
->pc_save
!= -1);
568 if (tb_cflags(s
->base
.tb
) & CF_PCREL
) {
569 tcg_gen_addi_tl(cpu_eip
, cpu_eip
, s
->pc
- s
->pc_save
);
570 } else if (CODE64(s
)) {
571 tcg_gen_movi_tl(cpu_eip
, s
->pc
);
573 tcg_gen_movi_tl(cpu_eip
, (uint32_t)(s
->pc
- s
->cs_base
));
578 static int cur_insn_len(DisasContext
*s
)
580 return s
->pc
- s
->base
.pc_next
;
583 static TCGv_i32
cur_insn_len_i32(DisasContext
*s
)
585 return tcg_constant_i32(cur_insn_len(s
));
588 static TCGv_i32
eip_next_i32(DisasContext
*s
)
590 assert(s
->pc_save
!= -1);
592 * This function has two users: lcall_real (always 16-bit mode), and
593 * iret_protected (16, 32, or 64-bit mode). IRET only uses the value
594 * when EFLAGS.NT is set, which is illegal in 64-bit mode, which is
595 * why passing a 32-bit value isn't broken. To avoid using this where
596 * we shouldn't, return -1 in 64-bit mode so that execution goes into
600 return tcg_constant_i32(-1);
602 if (tb_cflags(s
->base
.tb
) & CF_PCREL
) {
603 TCGv_i32 ret
= tcg_temp_new_i32();
604 tcg_gen_trunc_tl_i32(ret
, cpu_eip
);
605 tcg_gen_addi_i32(ret
, ret
, s
->pc
- s
->pc_save
);
608 return tcg_constant_i32(s
->pc
- s
->cs_base
);
612 static TCGv
eip_next_tl(DisasContext
*s
)
614 assert(s
->pc_save
!= -1);
615 if (tb_cflags(s
->base
.tb
) & CF_PCREL
) {
616 TCGv ret
= tcg_temp_new();
617 tcg_gen_addi_tl(ret
, cpu_eip
, s
->pc
- s
->pc_save
);
619 } else if (CODE64(s
)) {
620 return tcg_constant_tl(s
->pc
);
622 return tcg_constant_tl((uint32_t)(s
->pc
- s
->cs_base
));
626 static TCGv
eip_cur_tl(DisasContext
*s
)
628 assert(s
->pc_save
!= -1);
629 if (tb_cflags(s
->base
.tb
) & CF_PCREL
) {
630 TCGv ret
= tcg_temp_new();
631 tcg_gen_addi_tl(ret
, cpu_eip
, s
->base
.pc_next
- s
->pc_save
);
633 } else if (CODE64(s
)) {
634 return tcg_constant_tl(s
->base
.pc_next
);
636 return tcg_constant_tl((uint32_t)(s
->base
.pc_next
- s
->cs_base
));
640 /* Compute SEG:REG into DEST. SEG is selected from the override segment
641 (OVR_SEG) and the default segment (DEF_SEG). OVR_SEG may be -1 to
642 indicate no override. */
643 static void gen_lea_v_seg_dest(DisasContext
*s
, MemOp aflag
, TCGv dest
, TCGv a0
,
644 int def_seg
, int ovr_seg
)
650 tcg_gen_mov_tl(dest
, a0
);
657 if (ovr_seg
< 0 && ADDSEG(s
)) {
661 tcg_gen_ext32u_tl(dest
, a0
);
667 tcg_gen_ext16u_tl(dest
, a0
);
678 g_assert_not_reached();
682 TCGv seg
= cpu_seg_base
[ovr_seg
];
684 if (aflag
== MO_64
) {
685 tcg_gen_add_tl(dest
, a0
, seg
);
686 } else if (CODE64(s
)) {
687 tcg_gen_ext32u_tl(dest
, a0
);
688 tcg_gen_add_tl(dest
, dest
, seg
);
690 tcg_gen_add_tl(dest
, a0
, seg
);
691 tcg_gen_ext32u_tl(dest
, dest
);
696 static void gen_lea_v_seg(DisasContext
*s
, MemOp aflag
, TCGv a0
,
697 int def_seg
, int ovr_seg
)
699 gen_lea_v_seg_dest(s
, aflag
, s
->A0
, a0
, def_seg
, ovr_seg
);
702 static inline void gen_string_movl_A0_ESI(DisasContext
*s
)
704 gen_lea_v_seg(s
, s
->aflag
, cpu_regs
[R_ESI
], R_DS
, s
->override
);
707 static inline void gen_string_movl_A0_EDI(DisasContext
*s
)
709 gen_lea_v_seg(s
, s
->aflag
, cpu_regs
[R_EDI
], R_ES
, -1);
712 static inline TCGv
gen_compute_Dshift(DisasContext
*s
, MemOp ot
)
714 TCGv dshift
= tcg_temp_new();
715 tcg_gen_ld32s_tl(dshift
, tcg_env
, offsetof(CPUX86State
, df
));
716 tcg_gen_shli_tl(dshift
, dshift
, ot
);
720 static TCGv
gen_ext_tl(TCGv dst
, TCGv src
, MemOp size
, bool sign
)
726 dst
= tcg_temp_new();
728 tcg_gen_ext_tl(dst
, src
, size
| (sign
? MO_SIGN
: 0));
732 static void gen_extu(MemOp ot
, TCGv reg
)
734 gen_ext_tl(reg
, reg
, ot
, false);
737 static void gen_exts(MemOp ot
, TCGv reg
)
739 gen_ext_tl(reg
, reg
, ot
, true);
742 static void gen_op_j_ecx(DisasContext
*s
, TCGCond cond
, TCGLabel
*label1
)
744 TCGv tmp
= gen_ext_tl(NULL
, cpu_regs
[R_ECX
], s
->aflag
, false);
746 tcg_gen_brcondi_tl(cond
, tmp
, 0, label1
);
749 static inline void gen_op_jz_ecx(DisasContext
*s
, TCGLabel
*label1
)
751 gen_op_j_ecx(s
, TCG_COND_EQ
, label1
);
754 static inline void gen_op_jnz_ecx(DisasContext
*s
, TCGLabel
*label1
)
756 gen_op_j_ecx(s
, TCG_COND_NE
, label1
);
759 static void gen_helper_in_func(MemOp ot
, TCGv v
, TCGv_i32 n
)
763 gen_helper_inb(v
, tcg_env
, n
);
766 gen_helper_inw(v
, tcg_env
, n
);
769 gen_helper_inl(v
, tcg_env
, n
);
772 g_assert_not_reached();
776 static void gen_helper_out_func(MemOp ot
, TCGv_i32 v
, TCGv_i32 n
)
780 gen_helper_outb(tcg_env
, v
, n
);
783 gen_helper_outw(tcg_env
, v
, n
);
786 gen_helper_outl(tcg_env
, v
, n
);
789 g_assert_not_reached();
794 * Validate that access to [port, port + 1<<ot) is allowed.
795 * Raise #GP, or VMM exit if not.
797 static bool gen_check_io(DisasContext
*s
, MemOp ot
, TCGv_i32 port
,
800 #ifdef CONFIG_USER_ONLY
802 * We do not implement the ioperm(2) syscall, so the TSS check
805 gen_exception_gpf(s
);
808 if (PE(s
) && (CPL(s
) > IOPL(s
) || VM86(s
))) {
809 gen_helper_check_io(tcg_env
, port
, tcg_constant_i32(1 << ot
));
813 gen_update_eip_cur(s
);
814 if (s
->prefix
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
815 svm_flags
|= SVM_IOIO_REP_MASK
;
817 svm_flags
|= 1 << (SVM_IOIO_SIZE_SHIFT
+ ot
);
818 gen_helper_svm_check_io(tcg_env
, port
,
819 tcg_constant_i32(svm_flags
),
820 cur_insn_len_i32(s
));
826 static void gen_movs(DisasContext
*s
, MemOp ot
)
830 gen_string_movl_A0_ESI(s
);
831 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
832 gen_string_movl_A0_EDI(s
);
833 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
835 dshift
= gen_compute_Dshift(s
, ot
);
836 gen_op_add_reg(s
, s
->aflag
, R_ESI
, dshift
);
837 gen_op_add_reg(s
, s
->aflag
, R_EDI
, dshift
);
840 static void gen_op_update1_cc(DisasContext
*s
)
842 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
845 static void gen_op_update2_cc(DisasContext
*s
)
847 tcg_gen_mov_tl(cpu_cc_src
, s
->T1
);
848 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
851 static void gen_op_update3_cc(DisasContext
*s
, TCGv reg
)
853 tcg_gen_mov_tl(cpu_cc_src2
, reg
);
854 tcg_gen_mov_tl(cpu_cc_src
, s
->T1
);
855 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
858 static inline void gen_op_testl_T0_T1_cc(DisasContext
*s
)
860 tcg_gen_and_tl(cpu_cc_dst
, s
->T0
, s
->T1
);
863 static void gen_op_update_neg_cc(DisasContext
*s
)
865 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
866 tcg_gen_neg_tl(cpu_cc_src
, s
->T0
);
867 tcg_gen_movi_tl(s
->cc_srcT
, 0);
870 /* compute all eflags to reg */
871 static void gen_mov_eflags(DisasContext
*s
, TCGv reg
)
873 TCGv dst
, src1
, src2
;
877 if (s
->cc_op
== CC_OP_EFLAGS
) {
878 tcg_gen_mov_tl(reg
, cpu_cc_src
);
881 if (s
->cc_op
== CC_OP_CLR
) {
882 tcg_gen_movi_tl(reg
, CC_Z
| CC_P
);
890 /* Take care to not read values that are not live. */
891 live
= cc_op_live
[s
->cc_op
] & ~USES_CC_SRCT
;
892 dead
= live
^ (USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
);
894 TCGv zero
= tcg_constant_tl(0);
895 if (dead
& USES_CC_DST
) {
898 if (dead
& USES_CC_SRC
) {
901 if (dead
& USES_CC_SRC2
) {
906 if (s
->cc_op
!= CC_OP_DYNAMIC
) {
907 cc_op
= tcg_constant_i32(s
->cc_op
);
911 gen_helper_cc_compute_all(reg
, dst
, src1
, src2
, cc_op
);
914 /* compute all eflags to cc_src */
915 static void gen_compute_eflags(DisasContext
*s
)
917 gen_mov_eflags(s
, cpu_cc_src
);
918 set_cc_op(s
, CC_OP_EFLAGS
);
921 typedef struct CCPrepare
{
930 static CCPrepare
gen_prepare_sign_nz(TCGv src
, MemOp size
)
933 return (CCPrepare
) { .cond
= TCG_COND_LT
, .reg
= src
};
935 return (CCPrepare
) { .cond
= TCG_COND_TSTNE
, .reg
= src
,
936 .imm
= 1ull << ((8 << size
) - 1) };
940 /* compute eflags.C, trying to store it in reg if not NULL */
941 static CCPrepare
gen_prepare_eflags_c(DisasContext
*s
, TCGv reg
)
946 case CC_OP_SUBB
... CC_OP_SUBQ
:
947 /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
948 size
= s
->cc_op
- CC_OP_SUBB
;
949 gen_ext_tl(s
->cc_srcT
, s
->cc_srcT
, size
, false);
950 gen_ext_tl(cpu_cc_src
, cpu_cc_src
, size
, false);
951 return (CCPrepare
) { .cond
= TCG_COND_LTU
, .reg
= s
->cc_srcT
,
952 .reg2
= cpu_cc_src
, .use_reg2
= true };
954 case CC_OP_ADDB
... CC_OP_ADDQ
:
955 /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
956 size
= s
->cc_op
- CC_OP_ADDB
;
957 gen_ext_tl(cpu_cc_dst
, cpu_cc_dst
, size
, false);
958 gen_ext_tl(cpu_cc_src
, cpu_cc_src
, size
, false);
959 return (CCPrepare
) { .cond
= TCG_COND_LTU
, .reg
= cpu_cc_dst
,
960 .reg2
= cpu_cc_src
, .use_reg2
= true };
962 case CC_OP_LOGICB
... CC_OP_LOGICQ
:
965 return (CCPrepare
) { .cond
= TCG_COND_NEVER
};
967 case CC_OP_INCB
... CC_OP_INCQ
:
968 case CC_OP_DECB
... CC_OP_DECQ
:
969 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
970 .no_setcond
= true };
972 case CC_OP_SHLB
... CC_OP_SHLQ
:
973 /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
974 size
= s
->cc_op
- CC_OP_SHLB
;
975 return gen_prepare_sign_nz(cpu_cc_src
, size
);
977 case CC_OP_MULB
... CC_OP_MULQ
:
978 return (CCPrepare
) { .cond
= TCG_COND_NE
,
981 case CC_OP_BMILGB
... CC_OP_BMILGQ
:
982 size
= s
->cc_op
- CC_OP_BMILGB
;
983 gen_ext_tl(cpu_cc_src
, cpu_cc_src
, size
, false);
984 return (CCPrepare
) { .cond
= TCG_COND_EQ
, .reg
= cpu_cc_src
};
988 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_dst
,
989 .no_setcond
= true };
992 case CC_OP_SARB
... CC_OP_SARQ
:
994 return (CCPrepare
) { .cond
= TCG_COND_TSTNE
,
995 .reg
= cpu_cc_src
, .imm
= CC_C
};
998 /* The need to compute only C from CC_OP_DYNAMIC is important
999 in efficiently implementing e.g. INC at the start of a TB. */
1000 gen_update_cc_op(s
);
1002 reg
= tcg_temp_new();
1004 gen_helper_cc_compute_c(reg
, cpu_cc_dst
, cpu_cc_src
,
1005 cpu_cc_src2
, cpu_cc_op
);
1006 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
1007 .no_setcond
= true };
1011 /* compute eflags.P, trying to store it in reg if not NULL */
1012 static CCPrepare
gen_prepare_eflags_p(DisasContext
*s
, TCGv reg
)
1014 gen_compute_eflags(s
);
1015 return (CCPrepare
) { .cond
= TCG_COND_TSTNE
, .reg
= cpu_cc_src
,
1019 /* compute eflags.S, trying to store it in reg if not NULL */
1020 static CCPrepare
gen_prepare_eflags_s(DisasContext
*s
, TCGv reg
)
1024 gen_compute_eflags(s
);
1030 return (CCPrepare
) { .cond
= TCG_COND_TSTNE
, .reg
= cpu_cc_src
,
1034 return (CCPrepare
) { .cond
= TCG_COND_NEVER
};
1037 MemOp size
= (s
->cc_op
- CC_OP_ADDB
) & 3;
1038 return gen_prepare_sign_nz(cpu_cc_dst
, size
);
1043 /* compute eflags.O, trying to store it in reg if not NULL */
1044 static CCPrepare
gen_prepare_eflags_o(DisasContext
*s
, TCGv reg
)
1049 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src2
,
1050 .no_setcond
= true };
1053 return (CCPrepare
) { .cond
= TCG_COND_NEVER
};
1054 case CC_OP_MULB
... CC_OP_MULQ
:
1055 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
};
1057 gen_compute_eflags(s
);
1058 return (CCPrepare
) { .cond
= TCG_COND_TSTNE
, .reg
= cpu_cc_src
,
1063 /* compute eflags.Z, trying to store it in reg if not NULL */
1064 static CCPrepare
gen_prepare_eflags_z(DisasContext
*s
, TCGv reg
)
1068 gen_compute_eflags(s
);
1074 return (CCPrepare
) { .cond
= TCG_COND_TSTNE
, .reg
= cpu_cc_src
,
1077 return (CCPrepare
) { .cond
= TCG_COND_ALWAYS
};
1079 return (CCPrepare
) { .cond
= TCG_COND_EQ
, .reg
= cpu_cc_src
};
1082 MemOp size
= (s
->cc_op
- CC_OP_ADDB
) & 3;
1083 if (size
== MO_TL
) {
1084 return (CCPrepare
) { .cond
= TCG_COND_EQ
, .reg
= cpu_cc_dst
};
1086 return (CCPrepare
) { .cond
= TCG_COND_TSTEQ
, .reg
= cpu_cc_dst
,
1087 .imm
= (1ull << (8 << size
)) - 1 };
1093 /* return how to compute jump opcode 'b'. 'reg' can be clobbered
1094 * if needed; it may be used for CCPrepare.reg if that will
1095 * provide more freedom in the translation of a subsequent setcond. */
1096 static CCPrepare
gen_prepare_cc(DisasContext
*s
, int b
, TCGv reg
)
1098 int inv
, jcc_op
, cond
;
1103 jcc_op
= (b
>> 1) & 7;
1106 case CC_OP_SUBB
... CC_OP_SUBQ
:
1107 /* We optimize relational operators for the cmp/jcc case. */
1108 size
= s
->cc_op
- CC_OP_SUBB
;
1111 gen_ext_tl(s
->cc_srcT
, s
->cc_srcT
, size
, false);
1112 gen_ext_tl(cpu_cc_src
, cpu_cc_src
, size
, false);
1113 cc
= (CCPrepare
) { .cond
= TCG_COND_LEU
, .reg
= s
->cc_srcT
,
1114 .reg2
= cpu_cc_src
, .use_reg2
= true };
1122 gen_ext_tl(s
->cc_srcT
, s
->cc_srcT
, size
, true);
1123 gen_ext_tl(cpu_cc_src
, cpu_cc_src
, size
, true);
1124 cc
= (CCPrepare
) { .cond
= cond
, .reg
= s
->cc_srcT
,
1125 .reg2
= cpu_cc_src
, .use_reg2
= true };
1135 /* This actually generates good code for JC, JZ and JS. */
1138 cc
= gen_prepare_eflags_o(s
, reg
);
1141 cc
= gen_prepare_eflags_c(s
, reg
);
1144 cc
= gen_prepare_eflags_z(s
, reg
);
1147 gen_compute_eflags(s
);
1148 cc
= (CCPrepare
) { .cond
= TCG_COND_TSTNE
, .reg
= cpu_cc_src
,
1149 .imm
= CC_Z
| CC_C
};
1152 cc
= gen_prepare_eflags_s(s
, reg
);
1155 cc
= gen_prepare_eflags_p(s
, reg
);
1158 gen_compute_eflags(s
);
1159 if (!reg
|| reg
== cpu_cc_src
) {
1160 reg
= tcg_temp_new();
1162 tcg_gen_addi_tl(reg
, cpu_cc_src
, CC_O
- CC_S
);
1163 cc
= (CCPrepare
) { .cond
= TCG_COND_TSTNE
, .reg
= reg
,
1168 gen_compute_eflags(s
);
1169 if (!reg
|| reg
== cpu_cc_src
) {
1170 reg
= tcg_temp_new();
1172 tcg_gen_addi_tl(reg
, cpu_cc_src
, CC_O
- CC_S
);
1173 cc
= (CCPrepare
) { .cond
= TCG_COND_TSTNE
, .reg
= reg
,
1174 .imm
= CC_O
| CC_Z
};
1181 cc
.cond
= tcg_invert_cond(cc
.cond
);
1186 static void gen_setcc1(DisasContext
*s
, int b
, TCGv reg
)
1188 CCPrepare cc
= gen_prepare_cc(s
, b
, reg
);
1190 if (cc
.no_setcond
) {
1191 if (cc
.cond
== TCG_COND_EQ
) {
1192 tcg_gen_xori_tl(reg
, cc
.reg
, 1);
1194 tcg_gen_mov_tl(reg
, cc
.reg
);
1200 tcg_gen_setcond_tl(cc
.cond
, reg
, cc
.reg
, cc
.reg2
);
1202 tcg_gen_setcondi_tl(cc
.cond
, reg
, cc
.reg
, cc
.imm
);
1206 static inline void gen_compute_eflags_c(DisasContext
*s
, TCGv reg
)
1208 gen_setcc1(s
, JCC_B
<< 1, reg
);
1211 /* generate a conditional jump to label 'l1' according to jump opcode
1212 value 'b'. In the fast case, T0 is guaranteed not to be used. */
1213 static inline void gen_jcc1_noeob(DisasContext
*s
, int b
, TCGLabel
*l1
)
1215 CCPrepare cc
= gen_prepare_cc(s
, b
, NULL
);
1218 tcg_gen_brcond_tl(cc
.cond
, cc
.reg
, cc
.reg2
, l1
);
1220 tcg_gen_brcondi_tl(cc
.cond
, cc
.reg
, cc
.imm
, l1
);
1224 /* Generate a conditional jump to label 'l1' according to jump opcode
1225 value 'b'. In the fast case, T0 is guaranteed not to be used.
1226 One or both of the branches will call gen_jmp_rel, so ensure
1228 static inline void gen_jcc1(DisasContext
*s
, int b
, TCGLabel
*l1
)
1230 CCPrepare cc
= gen_prepare_cc(s
, b
, NULL
);
1232 gen_update_cc_op(s
);
1234 tcg_gen_brcond_tl(cc
.cond
, cc
.reg
, cc
.reg2
, l1
);
1236 tcg_gen_brcondi_tl(cc
.cond
, cc
.reg
, cc
.imm
, l1
);
1240 /* XXX: does not work with gdbstub "ice" single step - not a
1241 serious problem. The caller can jump to the returned label
1242 to stop the REP but, if the flags have changed, it has to call
1243 gen_update_cc_op before doing so. */
1244 static TCGLabel
*gen_jz_ecx_string(DisasContext
*s
)
1246 TCGLabel
*l1
= gen_new_label();
1247 TCGLabel
*l2
= gen_new_label();
1249 gen_update_cc_op(s
);
1250 gen_op_jnz_ecx(s
, l1
);
1252 gen_jmp_rel_csize(s
, 0, 1);
1257 static void gen_stos(DisasContext
*s
, MemOp ot
)
1259 gen_string_movl_A0_EDI(s
);
1260 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
1261 gen_op_add_reg(s
, s
->aflag
, R_EDI
, gen_compute_Dshift(s
, ot
));
1264 static void gen_lods(DisasContext
*s
, MemOp ot
)
1266 gen_string_movl_A0_ESI(s
);
1267 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1268 gen_op_mov_reg_v(s
, ot
, R_EAX
, s
->T0
);
1269 gen_op_add_reg(s
, s
->aflag
, R_ESI
, gen_compute_Dshift(s
, ot
));
1272 static void gen_scas(DisasContext
*s
, MemOp ot
)
1274 gen_string_movl_A0_EDI(s
);
1275 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
1276 tcg_gen_mov_tl(cpu_cc_src
, s
->T1
);
1277 tcg_gen_mov_tl(s
->cc_srcT
, s
->T0
);
1278 tcg_gen_sub_tl(cpu_cc_dst
, s
->T0
, s
->T1
);
1279 set_cc_op(s
, CC_OP_SUBB
+ ot
);
1281 gen_op_add_reg(s
, s
->aflag
, R_EDI
, gen_compute_Dshift(s
, ot
));
1284 static void gen_cmps(DisasContext
*s
, MemOp ot
)
1288 gen_string_movl_A0_EDI(s
);
1289 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
1290 gen_string_movl_A0_ESI(s
);
1291 gen_op(s
, OP_CMPL
, ot
, OR_TMP0
);
1293 dshift
= gen_compute_Dshift(s
, ot
);
1294 gen_op_add_reg(s
, s
->aflag
, R_ESI
, dshift
);
1295 gen_op_add_reg(s
, s
->aflag
, R_EDI
, dshift
);
1298 static void gen_bpt_io(DisasContext
*s
, TCGv_i32 t_port
, int ot
)
1300 if (s
->flags
& HF_IOBPT_MASK
) {
1301 #ifdef CONFIG_USER_ONLY
1302 /* user-mode cpu should not be in IOBPT mode */
1303 g_assert_not_reached();
1305 TCGv_i32 t_size
= tcg_constant_i32(1 << ot
);
1306 TCGv t_next
= eip_next_tl(s
);
1307 gen_helper_bpt_io(tcg_env
, t_port
, t_size
, t_next
);
1308 #endif /* CONFIG_USER_ONLY */
1312 static void gen_ins(DisasContext
*s
, MemOp ot
)
1314 gen_string_movl_A0_EDI(s
);
1315 /* Note: we must do this dummy write first to be restartable in
1316 case of page fault. */
1317 tcg_gen_movi_tl(s
->T0
, 0);
1318 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
1319 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_EDX
]);
1320 tcg_gen_andi_i32(s
->tmp2_i32
, s
->tmp2_i32
, 0xffff);
1321 gen_helper_in_func(ot
, s
->T0
, s
->tmp2_i32
);
1322 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
1323 gen_op_add_reg(s
, s
->aflag
, R_EDI
, gen_compute_Dshift(s
, ot
));
1324 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
1327 static void gen_outs(DisasContext
*s
, MemOp ot
)
1329 gen_string_movl_A0_ESI(s
);
1330 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1332 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_EDX
]);
1333 tcg_gen_andi_i32(s
->tmp2_i32
, s
->tmp2_i32
, 0xffff);
1334 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T0
);
1335 gen_helper_out_func(ot
, s
->tmp2_i32
, s
->tmp3_i32
);
1336 gen_op_add_reg(s
, s
->aflag
, R_ESI
, gen_compute_Dshift(s
, ot
));
1337 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
1340 /* Generate jumps to current or next instruction */
1341 static void gen_repz(DisasContext
*s
, MemOp ot
,
1342 void (*fn
)(DisasContext
*s
, MemOp ot
))
1345 l2
= gen_jz_ecx_string(s
);
1347 gen_op_add_reg_im(s
, s
->aflag
, R_ECX
, -1);
1349 * A loop would cause two single step exceptions if ECX = 1
1350 * before rep string_insn
1353 gen_op_jz_ecx(s
, l2
);
1355 gen_jmp_rel_csize(s
, -cur_insn_len(s
), 0);
1358 #define GEN_REPZ(op) \
1359 static inline void gen_repz_ ## op(DisasContext *s, MemOp ot) \
1360 { gen_repz(s, ot, gen_##op); }
1362 static void gen_repz2(DisasContext
*s
, MemOp ot
, int nz
,
1363 void (*fn
)(DisasContext
*s
, MemOp ot
))
1366 l2
= gen_jz_ecx_string(s
);
1368 gen_op_add_reg_im(s
, s
->aflag
, R_ECX
, -1);
1369 gen_jcc1(s
, (JCC_Z
<< 1) | (nz
^ 1), l2
);
1371 gen_op_jz_ecx(s
, l2
);
1374 * Only one iteration is done at a time, so the translation
1375 * block ends unconditionally after this instruction and there
1376 * is no control flow junction - no need to set CC_OP_DYNAMIC.
1378 gen_jmp_rel_csize(s
, -cur_insn_len(s
), 0);
1381 #define GEN_REPZ2(op) \
1382 static inline void gen_repz_ ## op(DisasContext *s, MemOp ot, int nz) \
1383 { gen_repz2(s, ot, nz, gen_##op); }
1393 static void gen_helper_fp_arith_ST0_FT0(int op
)
1397 gen_helper_fadd_ST0_FT0(tcg_env
);
1400 gen_helper_fmul_ST0_FT0(tcg_env
);
1403 gen_helper_fcom_ST0_FT0(tcg_env
);
1406 gen_helper_fcom_ST0_FT0(tcg_env
);
1409 gen_helper_fsub_ST0_FT0(tcg_env
);
1412 gen_helper_fsubr_ST0_FT0(tcg_env
);
1415 gen_helper_fdiv_ST0_FT0(tcg_env
);
1418 gen_helper_fdivr_ST0_FT0(tcg_env
);
1423 /* NOTE the exception in "r" op ordering */
1424 static void gen_helper_fp_arith_STN_ST0(int op
, int opreg
)
1426 TCGv_i32 tmp
= tcg_constant_i32(opreg
);
1429 gen_helper_fadd_STN_ST0(tcg_env
, tmp
);
1432 gen_helper_fmul_STN_ST0(tcg_env
, tmp
);
1435 gen_helper_fsubr_STN_ST0(tcg_env
, tmp
);
1438 gen_helper_fsub_STN_ST0(tcg_env
, tmp
);
1441 gen_helper_fdivr_STN_ST0(tcg_env
, tmp
);
1444 gen_helper_fdiv_STN_ST0(tcg_env
, tmp
);
1449 static void gen_exception(DisasContext
*s
, int trapno
)
1451 gen_update_cc_op(s
);
1452 gen_update_eip_cur(s
);
1453 gen_helper_raise_exception(tcg_env
, tcg_constant_i32(trapno
));
1454 s
->base
.is_jmp
= DISAS_NORETURN
;
1457 /* Generate #UD for the current instruction. The assumption here is that
1458 the instruction is known, but it isn't allowed in the current cpu mode. */
1459 static void gen_illegal_opcode(DisasContext
*s
)
1461 gen_exception(s
, EXCP06_ILLOP
);
1464 /* Generate #GP for the current instruction. */
1465 static void gen_exception_gpf(DisasContext
*s
)
1467 gen_exception(s
, EXCP0D_GPF
);
1470 /* Check for cpl == 0; if not, raise #GP and return false. */
1471 static bool check_cpl0(DisasContext
*s
)
1476 gen_exception_gpf(s
);
1480 /* If vm86, check for iopl == 3; if not, raise #GP and return false. */
1481 static bool check_vm86_iopl(DisasContext
*s
)
1483 if (!VM86(s
) || IOPL(s
) == 3) {
1486 gen_exception_gpf(s
);
1490 /* Check for iopl allowing access; if not, raise #GP and return false. */
1491 static bool check_iopl(DisasContext
*s
)
1493 if (VM86(s
) ? IOPL(s
) == 3 : CPL(s
) <= IOPL(s
)) {
1496 gen_exception_gpf(s
);
1500 /* if d == OR_TMP0, it means memory operand (address in A0) */
1501 static void gen_op(DisasContext
*s1
, int op
, MemOp ot
, int d
)
1503 /* Invalid lock prefix when destination is not memory or OP_CMPL. */
1504 if ((d
!= OR_TMP0
|| op
== OP_CMPL
) && s1
->prefix
& PREFIX_LOCK
) {
1505 gen_illegal_opcode(s1
);
1510 gen_op_mov_v_reg(s1
, ot
, s1
->T0
, d
);
1511 } else if (!(s1
->prefix
& PREFIX_LOCK
)) {
1512 gen_op_ld_v(s1
, ot
, s1
->T0
, s1
->A0
);
1516 gen_compute_eflags_c(s1
, s1
->tmp4
);
1517 if (s1
->prefix
& PREFIX_LOCK
) {
1518 tcg_gen_add_tl(s1
->T0
, s1
->tmp4
, s1
->T1
);
1519 tcg_gen_atomic_add_fetch_tl(s1
->T0
, s1
->A0
, s1
->T0
,
1520 s1
->mem_index
, ot
| MO_LE
);
1522 tcg_gen_add_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1523 tcg_gen_add_tl(s1
->T0
, s1
->T0
, s1
->tmp4
);
1524 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1526 gen_op_update3_cc(s1
, s1
->tmp4
);
1527 set_cc_op(s1
, CC_OP_ADCB
+ ot
);
1530 gen_compute_eflags_c(s1
, s1
->tmp4
);
1531 if (s1
->prefix
& PREFIX_LOCK
) {
1532 tcg_gen_add_tl(s1
->T0
, s1
->T1
, s1
->tmp4
);
1533 tcg_gen_neg_tl(s1
->T0
, s1
->T0
);
1534 tcg_gen_atomic_add_fetch_tl(s1
->T0
, s1
->A0
, s1
->T0
,
1535 s1
->mem_index
, ot
| MO_LE
);
1537 tcg_gen_sub_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1538 tcg_gen_sub_tl(s1
->T0
, s1
->T0
, s1
->tmp4
);
1539 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1541 gen_op_update3_cc(s1
, s1
->tmp4
);
1542 set_cc_op(s1
, CC_OP_SBBB
+ ot
);
1545 if (s1
->prefix
& PREFIX_LOCK
) {
1546 tcg_gen_atomic_add_fetch_tl(s1
->T0
, s1
->A0
, s1
->T1
,
1547 s1
->mem_index
, ot
| MO_LE
);
1549 tcg_gen_add_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1550 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1552 gen_op_update2_cc(s1
);
1553 set_cc_op(s1
, CC_OP_ADDB
+ ot
);
1556 if (s1
->prefix
& PREFIX_LOCK
) {
1557 tcg_gen_neg_tl(s1
->T0
, s1
->T1
);
1558 tcg_gen_atomic_fetch_add_tl(s1
->cc_srcT
, s1
->A0
, s1
->T0
,
1559 s1
->mem_index
, ot
| MO_LE
);
1560 tcg_gen_sub_tl(s1
->T0
, s1
->cc_srcT
, s1
->T1
);
1562 tcg_gen_mov_tl(s1
->cc_srcT
, s1
->T0
);
1563 tcg_gen_sub_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1564 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1566 gen_op_update2_cc(s1
);
1567 set_cc_op(s1
, CC_OP_SUBB
+ ot
);
1571 if (s1
->prefix
& PREFIX_LOCK
) {
1572 tcg_gen_atomic_and_fetch_tl(s1
->T0
, s1
->A0
, s1
->T1
,
1573 s1
->mem_index
, ot
| MO_LE
);
1575 tcg_gen_and_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1576 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1578 gen_op_update1_cc(s1
);
1579 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1582 if (s1
->prefix
& PREFIX_LOCK
) {
1583 tcg_gen_atomic_or_fetch_tl(s1
->T0
, s1
->A0
, s1
->T1
,
1584 s1
->mem_index
, ot
| MO_LE
);
1586 tcg_gen_or_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1587 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1589 gen_op_update1_cc(s1
);
1590 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1593 if (s1
->prefix
& PREFIX_LOCK
) {
1594 tcg_gen_atomic_xor_fetch_tl(s1
->T0
, s1
->A0
, s1
->T1
,
1595 s1
->mem_index
, ot
| MO_LE
);
1597 tcg_gen_xor_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1598 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1600 gen_op_update1_cc(s1
);
1601 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1604 tcg_gen_mov_tl(cpu_cc_src
, s1
->T1
);
1605 tcg_gen_mov_tl(s1
->cc_srcT
, s1
->T0
);
1606 tcg_gen_sub_tl(cpu_cc_dst
, s1
->T0
, s1
->T1
);
1607 set_cc_op(s1
, CC_OP_SUBB
+ ot
);
1612 /* if d == OR_TMP0, it means memory operand (address in A0) */
1613 static void gen_inc(DisasContext
*s1
, MemOp ot
, int d
, int c
)
1615 if (s1
->prefix
& PREFIX_LOCK
) {
1617 /* Lock prefix when destination is not memory */
1618 gen_illegal_opcode(s1
);
1621 tcg_gen_movi_tl(s1
->T0
, c
> 0 ? 1 : -1);
1622 tcg_gen_atomic_add_fetch_tl(s1
->T0
, s1
->A0
, s1
->T0
,
1623 s1
->mem_index
, ot
| MO_LE
);
1626 gen_op_mov_v_reg(s1
, ot
, s1
->T0
, d
);
1628 gen_op_ld_v(s1
, ot
, s1
->T0
, s1
->A0
);
1630 tcg_gen_addi_tl(s1
->T0
, s1
->T0
, (c
> 0 ? 1 : -1));
1631 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1634 gen_compute_eflags_c(s1
, cpu_cc_src
);
1635 tcg_gen_mov_tl(cpu_cc_dst
, s1
->T0
);
1636 set_cc_op(s1
, (c
> 0 ? CC_OP_INCB
: CC_OP_DECB
) + ot
);
1639 static void gen_shift_flags(DisasContext
*s
, MemOp ot
, TCGv result
,
1640 TCGv shm1
, TCGv count
, bool is_right
)
1642 TCGv_i32 z32
, s32
, oldop
;
1645 /* Store the results into the CC variables. If we know that the
1646 variable must be dead, store unconditionally. Otherwise we'll
1647 need to not disrupt the current contents. */
1648 z_tl
= tcg_constant_tl(0);
1649 if (cc_op_live
[s
->cc_op
] & USES_CC_DST
) {
1650 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_cc_dst
, count
, z_tl
,
1651 result
, cpu_cc_dst
);
1653 tcg_gen_mov_tl(cpu_cc_dst
, result
);
1655 if (cc_op_live
[s
->cc_op
] & USES_CC_SRC
) {
1656 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_cc_src
, count
, z_tl
,
1659 tcg_gen_mov_tl(cpu_cc_src
, shm1
);
1662 /* Get the two potential CC_OP values into temporaries. */
1663 tcg_gen_movi_i32(s
->tmp2_i32
, (is_right
? CC_OP_SARB
: CC_OP_SHLB
) + ot
);
1664 if (s
->cc_op
== CC_OP_DYNAMIC
) {
1667 tcg_gen_movi_i32(s
->tmp3_i32
, s
->cc_op
);
1668 oldop
= s
->tmp3_i32
;
1671 /* Conditionally store the CC_OP value. */
1672 z32
= tcg_constant_i32(0);
1673 s32
= tcg_temp_new_i32();
1674 tcg_gen_trunc_tl_i32(s32
, count
);
1675 tcg_gen_movcond_i32(TCG_COND_NE
, cpu_cc_op
, s32
, z32
, s
->tmp2_i32
, oldop
);
1677 /* The CC_OP value is no longer predictable. */
1678 set_cc_op(s
, CC_OP_DYNAMIC
);
1681 static void gen_shift_rm_T1(DisasContext
*s
, MemOp ot
, int op1
,
1682 int is_right
, int is_arith
)
1684 target_ulong mask
= (ot
== MO_64
? 0x3f : 0x1f);
1687 if (op1
== OR_TMP0
) {
1688 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1690 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1693 tcg_gen_andi_tl(s
->T1
, s
->T1
, mask
);
1694 tcg_gen_subi_tl(s
->tmp0
, s
->T1
, 1);
1698 gen_exts(ot
, s
->T0
);
1699 tcg_gen_sar_tl(s
->tmp0
, s
->T0
, s
->tmp0
);
1700 tcg_gen_sar_tl(s
->T0
, s
->T0
, s
->T1
);
1702 gen_extu(ot
, s
->T0
);
1703 tcg_gen_shr_tl(s
->tmp0
, s
->T0
, s
->tmp0
);
1704 tcg_gen_shr_tl(s
->T0
, s
->T0
, s
->T1
);
1707 tcg_gen_shl_tl(s
->tmp0
, s
->T0
, s
->tmp0
);
1708 tcg_gen_shl_tl(s
->T0
, s
->T0
, s
->T1
);
1712 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1714 gen_shift_flags(s
, ot
, s
->T0
, s
->tmp0
, s
->T1
, is_right
);
1717 static void gen_shift_rm_im(DisasContext
*s
, MemOp ot
, int op1
, int op2
,
1718 int is_right
, int is_arith
)
1720 int mask
= (ot
== MO_64
? 0x3f : 0x1f);
1724 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1726 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1732 gen_exts(ot
, s
->T0
);
1733 tcg_gen_sari_tl(s
->tmp4
, s
->T0
, op2
- 1);
1734 tcg_gen_sari_tl(s
->T0
, s
->T0
, op2
);
1736 gen_extu(ot
, s
->T0
);
1737 tcg_gen_shri_tl(s
->tmp4
, s
->T0
, op2
- 1);
1738 tcg_gen_shri_tl(s
->T0
, s
->T0
, op2
);
1741 tcg_gen_shli_tl(s
->tmp4
, s
->T0
, op2
- 1);
1742 tcg_gen_shli_tl(s
->T0
, s
->T0
, op2
);
1747 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1749 /* update eflags if non zero shift */
1751 tcg_gen_mov_tl(cpu_cc_src
, s
->tmp4
);
1752 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
1753 set_cc_op(s
, (is_right
? CC_OP_SARB
: CC_OP_SHLB
) + ot
);
1757 static void gen_rot_rm_T1(DisasContext
*s
, MemOp ot
, int op1
, int is_right
)
1759 target_ulong mask
= (ot
== MO_64
? 0x3f : 0x1f);
1763 if (op1
== OR_TMP0
) {
1764 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1766 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1769 tcg_gen_andi_tl(s
->T1
, s
->T1
, mask
);
1773 /* Replicate the 8-bit input so that a 32-bit rotate works. */
1774 tcg_gen_ext8u_tl(s
->T0
, s
->T0
);
1775 tcg_gen_muli_tl(s
->T0
, s
->T0
, 0x01010101);
1778 /* Replicate the 16-bit input so that a 32-bit rotate works. */
1779 tcg_gen_deposit_tl(s
->T0
, s
->T0
, s
->T0
, 16, 16);
1782 #ifdef TARGET_X86_64
1784 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
1785 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T1
);
1787 tcg_gen_rotr_i32(s
->tmp2_i32
, s
->tmp2_i32
, s
->tmp3_i32
);
1789 tcg_gen_rotl_i32(s
->tmp2_i32
, s
->tmp2_i32
, s
->tmp3_i32
);
1791 tcg_gen_extu_i32_tl(s
->T0
, s
->tmp2_i32
);
1796 tcg_gen_rotr_tl(s
->T0
, s
->T0
, s
->T1
);
1798 tcg_gen_rotl_tl(s
->T0
, s
->T0
, s
->T1
);
1804 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1806 /* We'll need the flags computed into CC_SRC. */
1807 gen_compute_eflags(s
);
1809 /* The value that was "rotated out" is now present at the other end
1810 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1811 since we've computed the flags into CC_SRC, these variables are
1814 tcg_gen_shri_tl(cpu_cc_src2
, s
->T0
, mask
- 1);
1815 tcg_gen_shri_tl(cpu_cc_dst
, s
->T0
, mask
);
1816 tcg_gen_andi_tl(cpu_cc_dst
, cpu_cc_dst
, 1);
1818 tcg_gen_shri_tl(cpu_cc_src2
, s
->T0
, mask
);
1819 tcg_gen_andi_tl(cpu_cc_dst
, s
->T0
, 1);
1821 tcg_gen_andi_tl(cpu_cc_src2
, cpu_cc_src2
, 1);
1822 tcg_gen_xor_tl(cpu_cc_src2
, cpu_cc_src2
, cpu_cc_dst
);
1824 /* Now conditionally store the new CC_OP value. If the shift count
1825 is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live.
1826 Otherwise reuse CC_OP_ADCOX which have the C and O flags split out
1827 exactly as we computed above. */
1828 t0
= tcg_constant_i32(0);
1829 t1
= tcg_temp_new_i32();
1830 tcg_gen_trunc_tl_i32(t1
, s
->T1
);
1831 tcg_gen_movi_i32(s
->tmp2_i32
, CC_OP_ADCOX
);
1832 tcg_gen_movi_i32(s
->tmp3_i32
, CC_OP_EFLAGS
);
1833 tcg_gen_movcond_i32(TCG_COND_NE
, cpu_cc_op
, t1
, t0
,
1834 s
->tmp2_i32
, s
->tmp3_i32
);
1836 /* The CC_OP value is no longer predictable. */
1837 set_cc_op(s
, CC_OP_DYNAMIC
);
1840 static void gen_rot_rm_im(DisasContext
*s
, MemOp ot
, int op1
, int op2
,
1843 int mask
= (ot
== MO_64
? 0x3f : 0x1f);
1847 if (op1
== OR_TMP0
) {
1848 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1850 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1856 #ifdef TARGET_X86_64
1858 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
1860 tcg_gen_rotri_i32(s
->tmp2_i32
, s
->tmp2_i32
, op2
);
1862 tcg_gen_rotli_i32(s
->tmp2_i32
, s
->tmp2_i32
, op2
);
1864 tcg_gen_extu_i32_tl(s
->T0
, s
->tmp2_i32
);
1869 tcg_gen_rotri_tl(s
->T0
, s
->T0
, op2
);
1871 tcg_gen_rotli_tl(s
->T0
, s
->T0
, op2
);
1882 shift
= mask
+ 1 - shift
;
1884 gen_extu(ot
, s
->T0
);
1885 tcg_gen_shli_tl(s
->tmp0
, s
->T0
, shift
);
1886 tcg_gen_shri_tl(s
->T0
, s
->T0
, mask
+ 1 - shift
);
1887 tcg_gen_or_tl(s
->T0
, s
->T0
, s
->tmp0
);
1893 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1896 /* Compute the flags into CC_SRC. */
1897 gen_compute_eflags(s
);
1899 /* The value that was "rotated out" is now present at the other end
1900 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1901 since we've computed the flags into CC_SRC, these variables are
1904 tcg_gen_shri_tl(cpu_cc_src2
, s
->T0
, mask
- 1);
1905 tcg_gen_shri_tl(cpu_cc_dst
, s
->T0
, mask
);
1906 tcg_gen_andi_tl(cpu_cc_dst
, cpu_cc_dst
, 1);
1908 tcg_gen_shri_tl(cpu_cc_src2
, s
->T0
, mask
);
1909 tcg_gen_andi_tl(cpu_cc_dst
, s
->T0
, 1);
1911 tcg_gen_andi_tl(cpu_cc_src2
, cpu_cc_src2
, 1);
1912 tcg_gen_xor_tl(cpu_cc_src2
, cpu_cc_src2
, cpu_cc_dst
);
1913 set_cc_op(s
, CC_OP_ADCOX
);
1917 /* XXX: add faster immediate = 1 case */
1918 static void gen_rotc_rm_T1(DisasContext
*s
, MemOp ot
, int op1
,
1921 gen_compute_eflags(s
);
1922 assert(s
->cc_op
== CC_OP_EFLAGS
);
1926 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1928 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1933 gen_helper_rcrb(s
->T0
, tcg_env
, s
->T0
, s
->T1
);
1936 gen_helper_rcrw(s
->T0
, tcg_env
, s
->T0
, s
->T1
);
1939 gen_helper_rcrl(s
->T0
, tcg_env
, s
->T0
, s
->T1
);
1941 #ifdef TARGET_X86_64
1943 gen_helper_rcrq(s
->T0
, tcg_env
, s
->T0
, s
->T1
);
1947 g_assert_not_reached();
1952 gen_helper_rclb(s
->T0
, tcg_env
, s
->T0
, s
->T1
);
1955 gen_helper_rclw(s
->T0
, tcg_env
, s
->T0
, s
->T1
);
1958 gen_helper_rcll(s
->T0
, tcg_env
, s
->T0
, s
->T1
);
1960 #ifdef TARGET_X86_64
1962 gen_helper_rclq(s
->T0
, tcg_env
, s
->T0
, s
->T1
);
1966 g_assert_not_reached();
1970 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1973 /* XXX: add faster immediate case */
1974 static void gen_shiftd_rm_T1(DisasContext
*s
, MemOp ot
, int op1
,
1975 bool is_right
, TCGv count_in
)
1977 target_ulong mask
= (ot
== MO_64
? 63 : 31);
1981 if (op1
== OR_TMP0
) {
1982 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1984 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1987 count
= tcg_temp_new();
1988 tcg_gen_andi_tl(count
, count_in
, mask
);
1992 /* Note: we implement the Intel behaviour for shift count > 16.
1993 This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A
1994 portion by constructing it as a 32-bit value. */
1996 tcg_gen_deposit_tl(s
->tmp0
, s
->T0
, s
->T1
, 16, 16);
1997 tcg_gen_mov_tl(s
->T1
, s
->T0
);
1998 tcg_gen_mov_tl(s
->T0
, s
->tmp0
);
2000 tcg_gen_deposit_tl(s
->T1
, s
->T0
, s
->T1
, 16, 16);
2003 * If TARGET_X86_64 defined then fall through into MO_32 case,
2004 * otherwise fall through default case.
2007 #ifdef TARGET_X86_64
2008 /* Concatenate the two 32-bit values and use a 64-bit shift. */
2009 tcg_gen_subi_tl(s
->tmp0
, count
, 1);
2011 tcg_gen_concat_tl_i64(s
->T0
, s
->T0
, s
->T1
);
2012 tcg_gen_shr_i64(s
->tmp0
, s
->T0
, s
->tmp0
);
2013 tcg_gen_shr_i64(s
->T0
, s
->T0
, count
);
2015 tcg_gen_concat_tl_i64(s
->T0
, s
->T1
, s
->T0
);
2016 tcg_gen_shl_i64(s
->tmp0
, s
->T0
, s
->tmp0
);
2017 tcg_gen_shl_i64(s
->T0
, s
->T0
, count
);
2018 tcg_gen_shri_i64(s
->tmp0
, s
->tmp0
, 32);
2019 tcg_gen_shri_i64(s
->T0
, s
->T0
, 32);
2024 tcg_gen_subi_tl(s
->tmp0
, count
, 1);
2026 tcg_gen_shr_tl(s
->tmp0
, s
->T0
, s
->tmp0
);
2028 tcg_gen_subfi_tl(s
->tmp4
, mask
+ 1, count
);
2029 tcg_gen_shr_tl(s
->T0
, s
->T0
, count
);
2030 tcg_gen_shl_tl(s
->T1
, s
->T1
, s
->tmp4
);
2032 tcg_gen_shl_tl(s
->tmp0
, s
->T0
, s
->tmp0
);
2034 /* Only needed if count > 16, for Intel behaviour. */
2035 tcg_gen_subfi_tl(s
->tmp4
, 33, count
);
2036 tcg_gen_shr_tl(s
->tmp4
, s
->T1
, s
->tmp4
);
2037 tcg_gen_or_tl(s
->tmp0
, s
->tmp0
, s
->tmp4
);
2040 tcg_gen_subfi_tl(s
->tmp4
, mask
+ 1, count
);
2041 tcg_gen_shl_tl(s
->T0
, s
->T0
, count
);
2042 tcg_gen_shr_tl(s
->T1
, s
->T1
, s
->tmp4
);
2044 tcg_gen_movi_tl(s
->tmp4
, 0);
2045 tcg_gen_movcond_tl(TCG_COND_EQ
, s
->T1
, count
, s
->tmp4
,
2047 tcg_gen_or_tl(s
->T0
, s
->T0
, s
->T1
);
2052 gen_op_st_rm_T0_A0(s
, ot
, op1
);
2054 gen_shift_flags(s
, ot
, s
->T0
, s
->tmp0
, count
, is_right
);
2057 static void gen_shift(DisasContext
*s1
, int op
, MemOp ot
, int d
, int s
)
2060 gen_op_mov_v_reg(s1
, ot
, s1
->T1
, s
);
2063 gen_rot_rm_T1(s1
, ot
, d
, 0);
2066 gen_rot_rm_T1(s1
, ot
, d
, 1);
2070 gen_shift_rm_T1(s1
, ot
, d
, 0, 0);
2073 gen_shift_rm_T1(s1
, ot
, d
, 1, 0);
2076 gen_shift_rm_T1(s1
, ot
, d
, 1, 1);
2079 gen_rotc_rm_T1(s1
, ot
, d
, 0);
2082 gen_rotc_rm_T1(s1
, ot
, d
, 1);
2087 static void gen_shifti(DisasContext
*s1
, int op
, MemOp ot
, int d
, int c
)
2091 gen_rot_rm_im(s1
, ot
, d
, c
, 0);
2094 gen_rot_rm_im(s1
, ot
, d
, c
, 1);
2098 gen_shift_rm_im(s1
, ot
, d
, c
, 0, 0);
2101 gen_shift_rm_im(s1
, ot
, d
, c
, 1, 0);
2104 gen_shift_rm_im(s1
, ot
, d
, c
, 1, 1);
2107 /* currently not optimized */
2108 tcg_gen_movi_tl(s1
->T1
, c
);
2109 gen_shift(s1
, op
, ot
, d
, OR_TMP1
);
2114 #define X86_MAX_INSN_LENGTH 15
2116 static uint64_t advance_pc(CPUX86State
*env
, DisasContext
*s
, int num_bytes
)
2118 uint64_t pc
= s
->pc
;
2120 /* This is a subsequent insn that crosses a page boundary. */
2121 if (s
->base
.num_insns
> 1 &&
2122 !is_same_page(&s
->base
, s
->pc
+ num_bytes
- 1)) {
2123 siglongjmp(s
->jmpbuf
, 2);
2127 if (unlikely(cur_insn_len(s
) > X86_MAX_INSN_LENGTH
)) {
2128 /* If the instruction's 16th byte is on a different page than the 1st, a
2129 * page fault on the second page wins over the general protection fault
2130 * caused by the instruction being too long.
2131 * This can happen even if the operand is only one byte long!
2133 if (((s
->pc
- 1) ^ (pc
- 1)) & TARGET_PAGE_MASK
) {
2134 volatile uint8_t unused
=
2135 cpu_ldub_code(env
, (s
->pc
- 1) & TARGET_PAGE_MASK
);
2138 siglongjmp(s
->jmpbuf
, 1);
2144 static inline uint8_t x86_ldub_code(CPUX86State
*env
, DisasContext
*s
)
2146 return translator_ldub(env
, &s
->base
, advance_pc(env
, s
, 1));
2149 static inline int16_t x86_ldsw_code(CPUX86State
*env
, DisasContext
*s
)
2151 return translator_lduw(env
, &s
->base
, advance_pc(env
, s
, 2));
2154 static inline uint16_t x86_lduw_code(CPUX86State
*env
, DisasContext
*s
)
2156 return translator_lduw(env
, &s
->base
, advance_pc(env
, s
, 2));
2159 static inline uint32_t x86_ldl_code(CPUX86State
*env
, DisasContext
*s
)
2161 return translator_ldl(env
, &s
->base
, advance_pc(env
, s
, 4));
2164 #ifdef TARGET_X86_64
2165 static inline uint64_t x86_ldq_code(CPUX86State
*env
, DisasContext
*s
)
2167 return translator_ldq(env
, &s
->base
, advance_pc(env
, s
, 8));
2171 /* Decompose an address. */
2173 typedef struct AddressParts
{
2181 static AddressParts
gen_lea_modrm_0(CPUX86State
*env
, DisasContext
*s
,
2184 int def_seg
, base
, index
, scale
, mod
, rm
;
2193 mod
= (modrm
>> 6) & 3;
2195 base
= rm
| REX_B(s
);
2198 /* Normally filtered out earlier, but including this path
2199 simplifies multi-byte nop, as well as bndcl, bndcu, bndcn. */
2208 int code
= x86_ldub_code(env
, s
);
2209 scale
= (code
>> 6) & 3;
2210 index
= ((code
>> 3) & 7) | REX_X(s
);
2212 index
= -1; /* no index */
2214 base
= (code
& 7) | REX_B(s
);
2220 if ((base
& 7) == 5) {
2222 disp
= (int32_t)x86_ldl_code(env
, s
);
2223 if (CODE64(s
) && !havesib
) {
2225 disp
+= s
->pc
+ s
->rip_offset
;
2230 disp
= (int8_t)x86_ldub_code(env
, s
);
2234 disp
= (int32_t)x86_ldl_code(env
, s
);
2238 /* For correct popl handling with esp. */
2239 if (base
== R_ESP
&& s
->popl_esp_hack
) {
2240 disp
+= s
->popl_esp_hack
;
2242 if (base
== R_EBP
|| base
== R_ESP
) {
2251 disp
= x86_lduw_code(env
, s
);
2254 } else if (mod
== 1) {
2255 disp
= (int8_t)x86_ldub_code(env
, s
);
2257 disp
= (int16_t)x86_lduw_code(env
, s
);
2297 g_assert_not_reached();
2301 return (AddressParts
){ def_seg
, base
, index
, scale
, disp
};
2304 /* Compute the address, with a minimum number of TCG ops. */
2305 static TCGv
gen_lea_modrm_1(DisasContext
*s
, AddressParts a
, bool is_vsib
)
2309 if (a
.index
>= 0 && !is_vsib
) {
2311 ea
= cpu_regs
[a
.index
];
2313 tcg_gen_shli_tl(s
->A0
, cpu_regs
[a
.index
], a
.scale
);
2317 tcg_gen_add_tl(s
->A0
, ea
, cpu_regs
[a
.base
]);
2320 } else if (a
.base
>= 0) {
2321 ea
= cpu_regs
[a
.base
];
2324 if (tb_cflags(s
->base
.tb
) & CF_PCREL
&& a
.base
== -2) {
2325 /* With cpu_eip ~= pc_save, the expression is pc-relative. */
2326 tcg_gen_addi_tl(s
->A0
, cpu_eip
, a
.disp
- s
->pc_save
);
2328 tcg_gen_movi_tl(s
->A0
, a
.disp
);
2331 } else if (a
.disp
!= 0) {
2332 tcg_gen_addi_tl(s
->A0
, ea
, a
.disp
);
2339 static void gen_lea_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
)
2341 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
2342 TCGv ea
= gen_lea_modrm_1(s
, a
, false);
2343 gen_lea_v_seg(s
, s
->aflag
, ea
, a
.def_seg
, s
->override
);
2346 static void gen_nop_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
)
2348 (void)gen_lea_modrm_0(env
, s
, modrm
);
2351 /* Used for BNDCL, BNDCU, BNDCN. */
2352 static void gen_bndck(CPUX86State
*env
, DisasContext
*s
, int modrm
,
2353 TCGCond cond
, TCGv_i64 bndv
)
2355 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
2356 TCGv ea
= gen_lea_modrm_1(s
, a
, false);
2358 tcg_gen_extu_tl_i64(s
->tmp1_i64
, ea
);
2360 tcg_gen_ext32u_i64(s
->tmp1_i64
, s
->tmp1_i64
);
2362 tcg_gen_setcond_i64(cond
, s
->tmp1_i64
, s
->tmp1_i64
, bndv
);
2363 tcg_gen_extrl_i64_i32(s
->tmp2_i32
, s
->tmp1_i64
);
2364 gen_helper_bndck(tcg_env
, s
->tmp2_i32
);
2367 /* used for LEA and MOV AX, mem */
2368 static void gen_add_A0_ds_seg(DisasContext
*s
)
2370 gen_lea_v_seg(s
, s
->aflag
, s
->A0
, R_DS
, s
->override
);
2373 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2375 static void gen_ldst_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
,
2376 MemOp ot
, int reg
, int is_store
)
2380 mod
= (modrm
>> 6) & 3;
2381 rm
= (modrm
& 7) | REX_B(s
);
2385 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
2386 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
2388 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
2390 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
2393 gen_lea_modrm(env
, s
, modrm
);
2396 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
2397 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
2399 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
2401 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
2406 static target_ulong
insn_get_addr(CPUX86State
*env
, DisasContext
*s
, MemOp ot
)
2412 ret
= x86_ldub_code(env
, s
);
2415 ret
= x86_lduw_code(env
, s
);
2418 ret
= x86_ldl_code(env
, s
);
2420 #ifdef TARGET_X86_64
2422 ret
= x86_ldq_code(env
, s
);
2426 g_assert_not_reached();
2431 static inline uint32_t insn_get(CPUX86State
*env
, DisasContext
*s
, MemOp ot
)
2437 ret
= x86_ldub_code(env
, s
);
2440 ret
= x86_lduw_code(env
, s
);
2443 #ifdef TARGET_X86_64
2446 ret
= x86_ldl_code(env
, s
);
2449 g_assert_not_reached();
2454 static target_long
insn_get_signed(CPUX86State
*env
, DisasContext
*s
, MemOp ot
)
2460 ret
= (int8_t) x86_ldub_code(env
, s
);
2463 ret
= (int16_t) x86_lduw_code(env
, s
);
2466 ret
= (int32_t) x86_ldl_code(env
, s
);
2468 #ifdef TARGET_X86_64
2470 ret
= x86_ldq_code(env
, s
);
2474 g_assert_not_reached();
2479 static inline int insn_const_size(MemOp ot
)
2488 static void gen_jcc(DisasContext
*s
, int b
, int diff
)
2490 TCGLabel
*l1
= gen_new_label();
2493 gen_jmp_rel_csize(s
, 0, 1);
2495 gen_jmp_rel(s
, s
->dflag
, diff
, 0);
2498 static void gen_cmovcc1(DisasContext
*s
, int b
, TCGv dest
, TCGv src
)
2500 CCPrepare cc
= gen_prepare_cc(s
, b
, NULL
);
2503 cc
.reg2
= tcg_constant_tl(cc
.imm
);
2506 tcg_gen_movcond_tl(cc
.cond
, dest
, cc
.reg
, cc
.reg2
, src
, dest
);
2509 static inline void gen_op_movl_T0_seg(DisasContext
*s
, X86Seg seg_reg
)
2511 tcg_gen_ld32u_tl(s
->T0
, tcg_env
,
2512 offsetof(CPUX86State
,segs
[seg_reg
].selector
));
2515 static void gen_op_movl_seg_real(DisasContext
*s
, X86Seg seg_reg
, TCGv seg
)
2517 TCGv selector
= tcg_temp_new();
2518 tcg_gen_ext16u_tl(selector
, seg
);
2519 tcg_gen_st32_tl(selector
, tcg_env
,
2520 offsetof(CPUX86State
,segs
[seg_reg
].selector
));
2521 tcg_gen_shli_tl(cpu_seg_base
[seg_reg
], selector
, 4);
2524 /* move T0 to seg_reg and compute if the CPU state may change. Never
2525 call this function with seg_reg == R_CS */
2526 static void gen_movl_seg_T0(DisasContext
*s
, X86Seg seg_reg
)
2528 if (PE(s
) && !VM86(s
)) {
2529 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
2530 gen_helper_load_seg(tcg_env
, tcg_constant_i32(seg_reg
), s
->tmp2_i32
);
2531 /* abort translation because the addseg value may change or
2532 because ss32 may change. For R_SS, translation must always
2533 stop as a special handling must be done to disable hardware
2534 interrupts for the next instruction */
2535 if (seg_reg
== R_SS
) {
2536 s
->base
.is_jmp
= DISAS_EOB_INHIBIT_IRQ
;
2537 } else if (CODE32(s
) && seg_reg
< R_FS
) {
2538 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
2541 gen_op_movl_seg_real(s
, seg_reg
, s
->T0
);
2542 if (seg_reg
== R_SS
) {
2543 s
->base
.is_jmp
= DISAS_EOB_INHIBIT_IRQ
;
2548 static void gen_far_call(DisasContext
*s
)
2550 TCGv_i32 new_cs
= tcg_temp_new_i32();
2551 tcg_gen_trunc_tl_i32(new_cs
, s
->T1
);
2552 if (PE(s
) && !VM86(s
)) {
2553 gen_helper_lcall_protected(tcg_env
, new_cs
, s
->T0
,
2554 tcg_constant_i32(s
->dflag
- 1),
2557 TCGv_i32 new_eip
= tcg_temp_new_i32();
2558 tcg_gen_trunc_tl_i32(new_eip
, s
->T0
);
2559 gen_helper_lcall_real(tcg_env
, new_cs
, new_eip
,
2560 tcg_constant_i32(s
->dflag
- 1),
2563 s
->base
.is_jmp
= DISAS_JUMP
;
2566 static void gen_far_jmp(DisasContext
*s
)
2568 if (PE(s
) && !VM86(s
)) {
2569 TCGv_i32 new_cs
= tcg_temp_new_i32();
2570 tcg_gen_trunc_tl_i32(new_cs
, s
->T1
);
2571 gen_helper_ljmp_protected(tcg_env
, new_cs
, s
->T0
,
2574 gen_op_movl_seg_real(s
, R_CS
, s
->T1
);
2575 gen_op_jmp_v(s
, s
->T0
);
2577 s
->base
.is_jmp
= DISAS_JUMP
;
2580 static void gen_svm_check_intercept(DisasContext
*s
, uint32_t type
)
2582 /* no SVM activated; fast case */
2583 if (likely(!GUEST(s
))) {
2586 gen_helper_svm_check_intercept(tcg_env
, tcg_constant_i32(type
));
2589 static inline void gen_stack_update(DisasContext
*s
, int addend
)
2591 gen_op_add_reg_im(s
, mo_stacksize(s
), R_ESP
, addend
);
2594 /* Generate a push. It depends on ss32, addseg and dflag. */
2595 static void gen_push_v(DisasContext
*s
, TCGv val
)
2597 MemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2598 MemOp a_ot
= mo_stacksize(s
);
2599 int size
= 1 << d_ot
;
2600 TCGv new_esp
= s
->A0
;
2602 tcg_gen_subi_tl(s
->A0
, cpu_regs
[R_ESP
], size
);
2606 new_esp
= tcg_temp_new();
2607 tcg_gen_mov_tl(new_esp
, s
->A0
);
2609 gen_lea_v_seg(s
, a_ot
, s
->A0
, R_SS
, -1);
2612 gen_op_st_v(s
, d_ot
, val
, s
->A0
);
2613 gen_op_mov_reg_v(s
, a_ot
, R_ESP
, new_esp
);
2616 /* two step pop is necessary for precise exceptions */
2617 static MemOp
gen_pop_T0(DisasContext
*s
)
2619 MemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2621 gen_lea_v_seg_dest(s
, mo_stacksize(s
), s
->T0
, cpu_regs
[R_ESP
], R_SS
, -1);
2622 gen_op_ld_v(s
, d_ot
, s
->T0
, s
->T0
);
2627 static inline void gen_pop_update(DisasContext
*s
, MemOp ot
)
2629 gen_stack_update(s
, 1 << ot
);
2632 static inline void gen_stack_A0(DisasContext
*s
)
2634 gen_lea_v_seg(s
, SS32(s
) ? MO_32
: MO_16
, cpu_regs
[R_ESP
], R_SS
, -1);
2637 static void gen_pusha(DisasContext
*s
)
2639 MemOp s_ot
= SS32(s
) ? MO_32
: MO_16
;
2640 MemOp d_ot
= s
->dflag
;
2641 int size
= 1 << d_ot
;
2644 for (i
= 0; i
< 8; i
++) {
2645 tcg_gen_addi_tl(s
->A0
, cpu_regs
[R_ESP
], (i
- 8) * size
);
2646 gen_lea_v_seg(s
, s_ot
, s
->A0
, R_SS
, -1);
2647 gen_op_st_v(s
, d_ot
, cpu_regs
[7 - i
], s
->A0
);
2650 gen_stack_update(s
, -8 * size
);
2653 static void gen_popa(DisasContext
*s
)
2655 MemOp s_ot
= SS32(s
) ? MO_32
: MO_16
;
2656 MemOp d_ot
= s
->dflag
;
2657 int size
= 1 << d_ot
;
2660 for (i
= 0; i
< 8; i
++) {
2661 /* ESP is not reloaded */
2662 if (7 - i
== R_ESP
) {
2665 tcg_gen_addi_tl(s
->A0
, cpu_regs
[R_ESP
], i
* size
);
2666 gen_lea_v_seg(s
, s_ot
, s
->A0
, R_SS
, -1);
2667 gen_op_ld_v(s
, d_ot
, s
->T0
, s
->A0
);
2668 gen_op_mov_reg_v(s
, d_ot
, 7 - i
, s
->T0
);
2671 gen_stack_update(s
, 8 * size
);
2674 static void gen_enter(DisasContext
*s
, int esp_addend
, int level
)
2676 MemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2677 MemOp a_ot
= CODE64(s
) ? MO_64
: SS32(s
) ? MO_32
: MO_16
;
2678 int size
= 1 << d_ot
;
2680 /* Push BP; compute FrameTemp into T1. */
2681 tcg_gen_subi_tl(s
->T1
, cpu_regs
[R_ESP
], size
);
2682 gen_lea_v_seg(s
, a_ot
, s
->T1
, R_SS
, -1);
2683 gen_op_st_v(s
, d_ot
, cpu_regs
[R_EBP
], s
->A0
);
2689 /* Copy level-1 pointers from the previous frame. */
2690 for (i
= 1; i
< level
; ++i
) {
2691 tcg_gen_subi_tl(s
->A0
, cpu_regs
[R_EBP
], size
* i
);
2692 gen_lea_v_seg(s
, a_ot
, s
->A0
, R_SS
, -1);
2693 gen_op_ld_v(s
, d_ot
, s
->tmp0
, s
->A0
);
2695 tcg_gen_subi_tl(s
->A0
, s
->T1
, size
* i
);
2696 gen_lea_v_seg(s
, a_ot
, s
->A0
, R_SS
, -1);
2697 gen_op_st_v(s
, d_ot
, s
->tmp0
, s
->A0
);
2700 /* Push the current FrameTemp as the last level. */
2701 tcg_gen_subi_tl(s
->A0
, s
->T1
, size
* level
);
2702 gen_lea_v_seg(s
, a_ot
, s
->A0
, R_SS
, -1);
2703 gen_op_st_v(s
, d_ot
, s
->T1
, s
->A0
);
2706 /* Copy the FrameTemp value to EBP. */
2707 gen_op_mov_reg_v(s
, a_ot
, R_EBP
, s
->T1
);
2709 /* Compute the final value of ESP. */
2710 tcg_gen_subi_tl(s
->T1
, s
->T1
, esp_addend
+ size
* level
);
2711 gen_op_mov_reg_v(s
, a_ot
, R_ESP
, s
->T1
);
2714 static void gen_leave(DisasContext
*s
)
2716 MemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2717 MemOp a_ot
= mo_stacksize(s
);
2719 gen_lea_v_seg(s
, a_ot
, cpu_regs
[R_EBP
], R_SS
, -1);
2720 gen_op_ld_v(s
, d_ot
, s
->T0
, s
->A0
);
2722 tcg_gen_addi_tl(s
->T1
, cpu_regs
[R_EBP
], 1 << d_ot
);
2724 gen_op_mov_reg_v(s
, d_ot
, R_EBP
, s
->T0
);
2725 gen_op_mov_reg_v(s
, a_ot
, R_ESP
, s
->T1
);
2728 /* Similarly, except that the assumption here is that we don't decode
2729 the instruction at all -- either a missing opcode, an unimplemented
2730 feature, or just a bogus instruction stream. */
2731 static void gen_unknown_opcode(CPUX86State
*env
, DisasContext
*s
)
2733 gen_illegal_opcode(s
);
2735 if (qemu_loglevel_mask(LOG_UNIMP
)) {
2736 FILE *logfile
= qemu_log_trylock();
2738 target_ulong pc
= s
->base
.pc_next
, end
= s
->pc
;
2740 fprintf(logfile
, "ILLOPC: " TARGET_FMT_lx
":", pc
);
2741 for (; pc
< end
; ++pc
) {
2742 fprintf(logfile
, " %02x", cpu_ldub_code(env
, pc
));
2744 fprintf(logfile
, "\n");
2745 qemu_log_unlock(logfile
);
2750 /* an interrupt is different from an exception because of the
2752 static void gen_interrupt(DisasContext
*s
, int intno
)
2754 gen_update_cc_op(s
);
2755 gen_update_eip_cur(s
);
2756 gen_helper_raise_interrupt(tcg_env
, tcg_constant_i32(intno
),
2757 cur_insn_len_i32(s
));
2758 s
->base
.is_jmp
= DISAS_NORETURN
;
2761 static void gen_set_hflag(DisasContext
*s
, uint32_t mask
)
2763 if ((s
->flags
& mask
) == 0) {
2764 TCGv_i32 t
= tcg_temp_new_i32();
2765 tcg_gen_ld_i32(t
, tcg_env
, offsetof(CPUX86State
, hflags
));
2766 tcg_gen_ori_i32(t
, t
, mask
);
2767 tcg_gen_st_i32(t
, tcg_env
, offsetof(CPUX86State
, hflags
));
2772 static void gen_reset_hflag(DisasContext
*s
, uint32_t mask
)
2774 if (s
->flags
& mask
) {
2775 TCGv_i32 t
= tcg_temp_new_i32();
2776 tcg_gen_ld_i32(t
, tcg_env
, offsetof(CPUX86State
, hflags
));
2777 tcg_gen_andi_i32(t
, t
, ~mask
);
2778 tcg_gen_st_i32(t
, tcg_env
, offsetof(CPUX86State
, hflags
));
2783 static void gen_set_eflags(DisasContext
*s
, target_ulong mask
)
2785 TCGv t
= tcg_temp_new();
2787 tcg_gen_ld_tl(t
, tcg_env
, offsetof(CPUX86State
, eflags
));
2788 tcg_gen_ori_tl(t
, t
, mask
);
2789 tcg_gen_st_tl(t
, tcg_env
, offsetof(CPUX86State
, eflags
));
2792 static void gen_reset_eflags(DisasContext
*s
, target_ulong mask
)
2794 TCGv t
= tcg_temp_new();
2796 tcg_gen_ld_tl(t
, tcg_env
, offsetof(CPUX86State
, eflags
));
2797 tcg_gen_andi_tl(t
, t
, ~mask
);
2798 tcg_gen_st_tl(t
, tcg_env
, offsetof(CPUX86State
, eflags
));
2801 /* Clear BND registers during legacy branches. */
2802 static void gen_bnd_jmp(DisasContext
*s
)
2804 /* Clear the registers only if BND prefix is missing, MPX is enabled,
2805 and if the BNDREGs are known to be in use (non-zero) already.
2806 The helper itself will check BNDPRESERVE at runtime. */
2807 if ((s
->prefix
& PREFIX_REPNZ
) == 0
2808 && (s
->flags
& HF_MPX_EN_MASK
) != 0
2809 && (s
->flags
& HF_MPX_IU_MASK
) != 0) {
2810 gen_helper_bnd_jmp(tcg_env
);
2814 /* Generate an end of block. Trace exception is also generated if needed.
2815 If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set.
2816 If RECHECK_TF, emit a rechecking helper for #DB, ignoring the state of
2817 S->TF. This is used by the syscall/sysret insns. */
2819 gen_eob_worker(DisasContext
*s
, bool inhibit
, bool recheck_tf
, bool jr
)
2823 gen_update_cc_op(s
);
2825 /* If several instructions disable interrupts, only the first does it. */
2826 inhibit_reset
= false;
2827 if (s
->flags
& HF_INHIBIT_IRQ_MASK
) {
2828 gen_reset_hflag(s
, HF_INHIBIT_IRQ_MASK
);
2829 inhibit_reset
= true;
2830 } else if (inhibit
) {
2831 gen_set_hflag(s
, HF_INHIBIT_IRQ_MASK
);
2834 if (s
->base
.tb
->flags
& HF_RF_MASK
) {
2835 gen_reset_eflags(s
, RF_MASK
);
2838 gen_helper_rechecking_single_step(tcg_env
);
2839 tcg_gen_exit_tb(NULL
, 0);
2840 } else if (s
->flags
& HF_TF_MASK
) {
2841 gen_helper_single_step(tcg_env
);
2843 /* give irqs a chance to happen */
2845 tcg_gen_lookup_and_goto_ptr();
2847 tcg_gen_exit_tb(NULL
, 0);
2849 s
->base
.is_jmp
= DISAS_NORETURN
;
2853 gen_eob_syscall(DisasContext
*s
)
2855 gen_eob_worker(s
, false, true, false);
2858 /* End of block. Set HF_INHIBIT_IRQ_MASK if it isn't already set. */
2859 static void gen_eob_inhibit_irq(DisasContext
*s
)
2861 gen_eob_worker(s
, true, false, false);
2864 /* End of block, resetting the inhibit irq flag. */
2865 static void gen_eob(DisasContext
*s
)
2867 gen_eob_worker(s
, false, false, false);
2870 /* Jump to register */
2871 static void gen_jr(DisasContext
*s
)
2873 gen_eob_worker(s
, false, false, true);
2876 /* Jump to eip+diff, truncating the result to OT. */
2877 static void gen_jmp_rel(DisasContext
*s
, MemOp ot
, int diff
, int tb_num
)
2879 bool use_goto_tb
= s
->jmp_opt
;
2880 target_ulong mask
= -1;
2881 target_ulong new_pc
= s
->pc
+ diff
;
2882 target_ulong new_eip
= new_pc
- s
->cs_base
;
2884 assert(!s
->cc_op_dirty
);
2886 /* In 64-bit mode, operand size is fixed at 64 bits. */
2890 if (tb_cflags(s
->base
.tb
) & CF_PCREL
&& CODE32(s
)) {
2891 use_goto_tb
= false;
2899 if (tb_cflags(s
->base
.tb
) & CF_PCREL
) {
2900 tcg_gen_addi_tl(cpu_eip
, cpu_eip
, new_pc
- s
->pc_save
);
2902 * If we can prove the branch does not leave the page and we have
2903 * no extra masking to apply (data16 branch in code32, see above),
2904 * then we have also proven that the addition does not wrap.
2906 if (!use_goto_tb
|| !is_same_page(&s
->base
, new_pc
)) {
2907 tcg_gen_andi_tl(cpu_eip
, cpu_eip
, mask
);
2908 use_goto_tb
= false;
2910 } else if (!CODE64(s
)) {
2911 new_pc
= (uint32_t)(new_eip
+ s
->cs_base
);
2914 if (use_goto_tb
&& translator_use_goto_tb(&s
->base
, new_pc
)) {
2915 /* jump to same page: we can use a direct jump */
2916 tcg_gen_goto_tb(tb_num
);
2917 if (!(tb_cflags(s
->base
.tb
) & CF_PCREL
)) {
2918 tcg_gen_movi_tl(cpu_eip
, new_eip
);
2920 tcg_gen_exit_tb(s
->base
.tb
, tb_num
);
2921 s
->base
.is_jmp
= DISAS_NORETURN
;
2923 if (!(tb_cflags(s
->base
.tb
) & CF_PCREL
)) {
2924 tcg_gen_movi_tl(cpu_eip
, new_eip
);
2927 gen_jr(s
); /* jump to another page */
2929 gen_eob(s
); /* exit to main loop */
2934 /* Jump to eip+diff, truncating to the current code size. */
2935 static void gen_jmp_rel_csize(DisasContext
*s
, int diff
, int tb_num
)
2937 /* CODE64 ignores the OT argument, so we need not consider it. */
2938 gen_jmp_rel(s
, CODE32(s
) ? MO_32
: MO_16
, diff
, tb_num
);
2941 static inline void gen_ldq_env_A0(DisasContext
*s
, int offset
)
2943 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
, s
->mem_index
, MO_LEUQ
);
2944 tcg_gen_st_i64(s
->tmp1_i64
, tcg_env
, offset
);
2947 static inline void gen_stq_env_A0(DisasContext
*s
, int offset
)
2949 tcg_gen_ld_i64(s
->tmp1_i64
, tcg_env
, offset
);
2950 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
, s
->mem_index
, MO_LEUQ
);
2953 static inline void gen_ldo_env_A0(DisasContext
*s
, int offset
, bool align
)
2955 MemOp atom
= (s
->cpuid_ext_features
& CPUID_EXT_AVX
2956 ? MO_ATOM_IFALIGN
: MO_ATOM_IFALIGN_PAIR
);
2957 MemOp mop
= MO_128
| MO_LE
| atom
| (align
? MO_ALIGN_16
: 0);
2958 int mem_index
= s
->mem_index
;
2959 TCGv_i128 t
= tcg_temp_new_i128();
2961 tcg_gen_qemu_ld_i128(t
, s
->A0
, mem_index
, mop
);
2962 tcg_gen_st_i128(t
, tcg_env
, offset
);
2965 static inline void gen_sto_env_A0(DisasContext
*s
, int offset
, bool align
)
2967 MemOp atom
= (s
->cpuid_ext_features
& CPUID_EXT_AVX
2968 ? MO_ATOM_IFALIGN
: MO_ATOM_IFALIGN_PAIR
);
2969 MemOp mop
= MO_128
| MO_LE
| atom
| (align
? MO_ALIGN_16
: 0);
2970 int mem_index
= s
->mem_index
;
2971 TCGv_i128 t
= tcg_temp_new_i128();
2973 tcg_gen_ld_i128(t
, tcg_env
, offset
);
2974 tcg_gen_qemu_st_i128(t
, s
->A0
, mem_index
, mop
);
2977 static void gen_ldy_env_A0(DisasContext
*s
, int offset
, bool align
)
2979 MemOp mop
= MO_128
| MO_LE
| MO_ATOM_IFALIGN_PAIR
;
2980 int mem_index
= s
->mem_index
;
2981 TCGv_i128 t0
= tcg_temp_new_i128();
2982 TCGv_i128 t1
= tcg_temp_new_i128();
2984 tcg_gen_qemu_ld_i128(t0
, s
->A0
, mem_index
, mop
| (align
? MO_ALIGN_32
: 0));
2985 tcg_gen_addi_tl(s
->tmp0
, s
->A0
, 16);
2986 tcg_gen_qemu_ld_i128(t1
, s
->tmp0
, mem_index
, mop
);
2988 tcg_gen_st_i128(t0
, tcg_env
, offset
+ offsetof(YMMReg
, YMM_X(0)));
2989 tcg_gen_st_i128(t1
, tcg_env
, offset
+ offsetof(YMMReg
, YMM_X(1)));
2992 static void gen_sty_env_A0(DisasContext
*s
, int offset
, bool align
)
2994 MemOp mop
= MO_128
| MO_LE
| MO_ATOM_IFALIGN_PAIR
;
2995 int mem_index
= s
->mem_index
;
2996 TCGv_i128 t
= tcg_temp_new_i128();
2998 tcg_gen_ld_i128(t
, tcg_env
, offset
+ offsetof(YMMReg
, YMM_X(0)));
2999 tcg_gen_qemu_st_i128(t
, s
->A0
, mem_index
, mop
| (align
? MO_ALIGN_32
: 0));
3000 tcg_gen_addi_tl(s
->tmp0
, s
->A0
, 16);
3001 tcg_gen_ld_i128(t
, tcg_env
, offset
+ offsetof(YMMReg
, YMM_X(1)));
3002 tcg_gen_qemu_st_i128(t
, s
->tmp0
, mem_index
, mop
);
3005 static bool first
= true;
3006 static unsigned long limit
;
3008 #include "decode-new.h"
3009 #include "emit.c.inc"
3010 #include "decode-new.c.inc"
3012 static void gen_cmpxchg8b(DisasContext
*s
, CPUX86State
*env
, int modrm
)
3014 TCGv_i64 cmp
, val
, old
;
3017 gen_lea_modrm(env
, s
, modrm
);
3019 cmp
= tcg_temp_new_i64();
3020 val
= tcg_temp_new_i64();
3021 old
= tcg_temp_new_i64();
3023 /* Construct the comparison values from the register pair. */
3024 tcg_gen_concat_tl_i64(cmp
, cpu_regs
[R_EAX
], cpu_regs
[R_EDX
]);
3025 tcg_gen_concat_tl_i64(val
, cpu_regs
[R_EBX
], cpu_regs
[R_ECX
]);
3027 /* Only require atomic with LOCK; non-parallel handled in generator. */
3028 if (s
->prefix
& PREFIX_LOCK
) {
3029 tcg_gen_atomic_cmpxchg_i64(old
, s
->A0
, cmp
, val
, s
->mem_index
, MO_TEUQ
);
3031 tcg_gen_nonatomic_cmpxchg_i64(old
, s
->A0
, cmp
, val
,
3032 s
->mem_index
, MO_TEUQ
);
3035 /* Set tmp0 to match the required value of Z. */
3036 tcg_gen_setcond_i64(TCG_COND_EQ
, cmp
, old
, cmp
);
3038 tcg_gen_trunc_i64_tl(Z
, cmp
);
3041 * Extract the result values for the register pair.
3042 * For 32-bit, we may do this unconditionally, because on success (Z=1),
3043 * the old value matches the previous value in EDX:EAX. For x86_64,
3044 * the store must be conditional, because we must leave the source
3045 * registers unchanged on success, and zero-extend the writeback
3048 if (TARGET_LONG_BITS
== 32) {
3049 tcg_gen_extr_i64_tl(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
], old
);
3051 TCGv zero
= tcg_constant_tl(0);
3053 tcg_gen_extr_i64_tl(s
->T0
, s
->T1
, old
);
3054 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_regs
[R_EAX
], Z
, zero
,
3055 s
->T0
, cpu_regs
[R_EAX
]);
3056 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_regs
[R_EDX
], Z
, zero
,
3057 s
->T1
, cpu_regs
[R_EDX
]);
3061 gen_compute_eflags(s
);
3062 tcg_gen_deposit_tl(cpu_cc_src
, cpu_cc_src
, Z
, ctz32(CC_Z
), 1);
3065 #ifdef TARGET_X86_64
3066 static void gen_cmpxchg16b(DisasContext
*s
, CPUX86State
*env
, int modrm
)
3068 MemOp mop
= MO_TE
| MO_128
| MO_ALIGN
;
3072 gen_lea_modrm(env
, s
, modrm
);
3074 cmp
= tcg_temp_new_i128();
3075 val
= tcg_temp_new_i128();
3076 tcg_gen_concat_i64_i128(cmp
, cpu_regs
[R_EAX
], cpu_regs
[R_EDX
]);
3077 tcg_gen_concat_i64_i128(val
, cpu_regs
[R_EBX
], cpu_regs
[R_ECX
]);
3079 /* Only require atomic with LOCK; non-parallel handled in generator. */
3080 if (s
->prefix
& PREFIX_LOCK
) {
3081 tcg_gen_atomic_cmpxchg_i128(val
, s
->A0
, cmp
, val
, s
->mem_index
, mop
);
3083 tcg_gen_nonatomic_cmpxchg_i128(val
, s
->A0
, cmp
, val
, s
->mem_index
, mop
);
3086 tcg_gen_extr_i128_i64(s
->T0
, s
->T1
, val
);
3088 /* Determine success after the fact. */
3089 t0
= tcg_temp_new_i64();
3090 t1
= tcg_temp_new_i64();
3091 tcg_gen_xor_i64(t0
, s
->T0
, cpu_regs
[R_EAX
]);
3092 tcg_gen_xor_i64(t1
, s
->T1
, cpu_regs
[R_EDX
]);
3093 tcg_gen_or_i64(t0
, t0
, t1
);
3096 gen_compute_eflags(s
);
3097 tcg_gen_setcondi_i64(TCG_COND_EQ
, t0
, t0
, 0);
3098 tcg_gen_deposit_tl(cpu_cc_src
, cpu_cc_src
, t0
, ctz32(CC_Z
), 1);
3101 * Extract the result values for the register pair. We may do this
3102 * unconditionally, because on success (Z=1), the old value matches
3103 * the previous value in RDX:RAX.
3105 tcg_gen_mov_i64(cpu_regs
[R_EAX
], s
->T0
);
3106 tcg_gen_mov_i64(cpu_regs
[R_EDX
], s
->T1
);
3110 /* convert one instruction. s->base.is_jmp is set if the translation must
3111 be stopped. Return the next pc value */
3112 static bool disas_insn(DisasContext
*s
, CPUState
*cpu
)
3114 CPUX86State
*env
= cpu_env(cpu
);
3117 MemOp ot
, aflag
, dflag
;
3118 int modrm
, reg
, rm
, mod
, op
, opreg
, val
;
3119 bool orig_cc_op_dirty
= s
->cc_op_dirty
;
3120 CCOp orig_cc_op
= s
->cc_op
;
3121 target_ulong orig_pc_save
= s
->pc_save
;
3123 s
->pc
= s
->base
.pc_next
;
3125 #ifdef TARGET_X86_64
3130 s
->rip_offset
= 0; /* for relative ip address */
3134 switch (sigsetjmp(s
->jmpbuf
, 0)) {
3138 gen_exception_gpf(s
);
3141 /* Restore state that may affect the next instruction. */
3142 s
->pc
= s
->base
.pc_next
;
3144 * TODO: These save/restore can be removed after the table-based
3145 * decoder is complete; we will be decoding the insn completely
3146 * before any code generation that might affect these variables.
3148 s
->cc_op_dirty
= orig_cc_op_dirty
;
3149 s
->cc_op
= orig_cc_op
;
3150 s
->pc_save
= orig_pc_save
;
3152 s
->base
.num_insns
--;
3153 tcg_remove_ops_after(s
->prev_insn_end
);
3154 s
->base
.insn_start
= s
->prev_insn_start
;
3155 s
->base
.is_jmp
= DISAS_TOO_MANY
;
3158 g_assert_not_reached();
3164 const char *limit_str
= getenv("QEMU_I386_LIMIT");
3165 limit
= limit_str
? atol(limit_str
) : -1;
3168 bool use_new
= true;
3169 #ifdef CONFIG_USER_ONLY
3170 use_new
&= limit
> 0;
3174 s
->prefix
= prefixes
;
3175 b
= x86_ldub_code(env
, s
);
3176 /* Collect prefixes. */
3179 #ifndef CONFIG_USER_ONLY
3180 use_new
&= b
<= limit
;
3182 if (use_new
&& b
<= 0x5f) {
3183 disas_insn_new(s
, cpu
, b
);
3188 b
= x86_ldub_code(env
, s
) + 0x100;
3189 #ifndef CONFIG_USER_ONLY
3190 use_new
&= b
<= limit
;
3193 disas_insn_new(s
, cpu
, b
);
3198 prefixes
|= PREFIX_REPZ
;
3199 prefixes
&= ~PREFIX_REPNZ
;
3202 prefixes
|= PREFIX_REPNZ
;
3203 prefixes
&= ~PREFIX_REPZ
;
3206 prefixes
|= PREFIX_LOCK
;
3227 prefixes
|= PREFIX_DATA
;
3230 prefixes
|= PREFIX_ADR
;
3232 #ifdef TARGET_X86_64
3236 prefixes
|= PREFIX_REX
;
3237 s
->vex_w
= (b
>> 3) & 1;
3238 s
->rex_r
= (b
& 0x4) << 1;
3239 s
->rex_x
= (b
& 0x2) << 2;
3240 s
->rex_b
= (b
& 0x1) << 3;
3245 case 0xc5: /* 2-byte VEX */
3246 case 0xc4: /* 3-byte VEX */
3247 if (CODE32(s
) && !VM86(s
)) {
3248 int vex2
= x86_ldub_code(env
, s
);
3249 s
->pc
--; /* rewind the advance_pc() x86_ldub_code() did */
3251 if (!CODE64(s
) && (vex2
& 0xc0) != 0xc0) {
3252 /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
3253 otherwise the instruction is LES or LDS. */
3256 disas_insn_new(s
, cpu
, b
);
3262 /* Post-process prefixes. */
3264 /* In 64-bit mode, the default data size is 32-bit. Select 64-bit
3265 data with rex_w, and 16-bit data with 0x66; rex_w takes precedence
3266 over 0x66 if both are present. */
3267 dflag
= (REX_W(s
) ? MO_64
: prefixes
& PREFIX_DATA
? MO_16
: MO_32
);
3268 /* In 64-bit mode, 0x67 selects 32-bit addressing. */
3269 aflag
= (prefixes
& PREFIX_ADR
? MO_32
: MO_64
);
3271 /* In 16/32-bit mode, 0x66 selects the opposite data size. */
3272 if (CODE32(s
) ^ ((prefixes
& PREFIX_DATA
) != 0)) {
3277 /* In 16/32-bit mode, 0x67 selects the opposite addressing. */
3278 if (CODE32(s
) ^ ((prefixes
& PREFIX_ADR
) != 0)) {
3285 s
->prefix
= prefixes
;
3289 /* now check op code */
3291 /**************************/
3306 ot
= mo_b_d(b
, dflag
);
3309 case 0: /* OP Ev, Gv */
3310 modrm
= x86_ldub_code(env
, s
);
3311 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
3312 mod
= (modrm
>> 6) & 3;
3313 rm
= (modrm
& 7) | REX_B(s
);
3315 gen_lea_modrm(env
, s
, modrm
);
3317 } else if (op
== OP_XORL
&& rm
== reg
) {
3319 /* xor reg, reg optimisation */
3320 set_cc_op(s
, CC_OP_CLR
);
3321 tcg_gen_movi_tl(s
->T0
, 0);
3322 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
3327 gen_op_mov_v_reg(s
, ot
, s
->T1
, reg
);
3328 gen_op(s
, op
, ot
, opreg
);
3330 case 1: /* OP Gv, Ev */
3331 modrm
= x86_ldub_code(env
, s
);
3332 mod
= (modrm
>> 6) & 3;
3333 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
3334 rm
= (modrm
& 7) | REX_B(s
);
3336 gen_lea_modrm(env
, s
, modrm
);
3337 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
3338 } else if (op
== OP_XORL
&& rm
== reg
) {
3341 gen_op_mov_v_reg(s
, ot
, s
->T1
, rm
);
3343 gen_op(s
, op
, ot
, reg
);
3345 case 2: /* OP A, Iv */
3346 val
= insn_get(env
, s
, ot
);
3347 tcg_gen_movi_tl(s
->T1
, val
);
3348 gen_op(s
, op
, ot
, OR_EAX
);
3358 case 0x80: /* GRP1 */
3362 ot
= mo_b_d(b
, dflag
);
3364 modrm
= x86_ldub_code(env
, s
);
3365 mod
= (modrm
>> 6) & 3;
3366 rm
= (modrm
& 7) | REX_B(s
);
3367 op
= (modrm
>> 3) & 7;
3373 s
->rip_offset
= insn_const_size(ot
);
3374 gen_lea_modrm(env
, s
, modrm
);
3385 val
= insn_get(env
, s
, ot
);
3388 val
= (int8_t)insn_get(env
, s
, MO_8
);
3391 tcg_gen_movi_tl(s
->T1
, val
);
3392 gen_op(s
, op
, ot
, opreg
);
3396 /**************************/
3397 /* inc, dec, and other misc arith */
3398 case 0x40 ... 0x47: /* inc Gv */
3400 gen_inc(s
, ot
, OR_EAX
+ (b
& 7), 1);
3402 case 0x48 ... 0x4f: /* dec Gv */
3404 gen_inc(s
, ot
, OR_EAX
+ (b
& 7), -1);
3406 case 0xf6: /* GRP3 */
3408 ot
= mo_b_d(b
, dflag
);
3410 modrm
= x86_ldub_code(env
, s
);
3411 mod
= (modrm
>> 6) & 3;
3412 rm
= (modrm
& 7) | REX_B(s
);
3413 op
= (modrm
>> 3) & 7;
3416 s
->rip_offset
= insn_const_size(ot
);
3418 gen_lea_modrm(env
, s
, modrm
);
3419 /* For those below that handle locked memory, don't load here. */
3420 if (!(s
->prefix
& PREFIX_LOCK
)
3422 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
3425 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
3430 val
= insn_get(env
, s
, ot
);
3431 tcg_gen_movi_tl(s
->T1
, val
);
3432 gen_op_testl_T0_T1_cc(s
);
3433 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
3436 if (s
->prefix
& PREFIX_LOCK
) {
3440 tcg_gen_movi_tl(s
->T0
, ~0);
3441 tcg_gen_atomic_xor_fetch_tl(s
->T0
, s
->A0
, s
->T0
,
3442 s
->mem_index
, ot
| MO_LE
);
3444 tcg_gen_not_tl(s
->T0
, s
->T0
);
3446 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
3448 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
3453 if (s
->prefix
& PREFIX_LOCK
) {
3455 TCGv a0
, t0
, t1
, t2
;
3462 label1
= gen_new_label();
3464 gen_set_label(label1
);
3465 t1
= tcg_temp_new();
3466 t2
= tcg_temp_new();
3467 tcg_gen_mov_tl(t2
, t0
);
3468 tcg_gen_neg_tl(t1
, t0
);
3469 tcg_gen_atomic_cmpxchg_tl(t0
, a0
, t0
, t1
,
3470 s
->mem_index
, ot
| MO_LE
);
3471 tcg_gen_brcond_tl(TCG_COND_NE
, t0
, t2
, label1
);
3473 tcg_gen_neg_tl(s
->T0
, t0
);
3475 tcg_gen_neg_tl(s
->T0
, s
->T0
);
3477 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
3479 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
3482 gen_op_update_neg_cc(s
);
3483 set_cc_op(s
, CC_OP_SUBB
+ ot
);
3488 gen_op_mov_v_reg(s
, MO_8
, s
->T1
, R_EAX
);
3489 tcg_gen_ext8u_tl(s
->T0
, s
->T0
);
3490 tcg_gen_ext8u_tl(s
->T1
, s
->T1
);
3491 /* XXX: use 32 bit mul which could be faster */
3492 tcg_gen_mul_tl(s
->T0
, s
->T0
, s
->T1
);
3493 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
3494 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
3495 tcg_gen_andi_tl(cpu_cc_src
, s
->T0
, 0xff00);
3496 set_cc_op(s
, CC_OP_MULB
);
3499 gen_op_mov_v_reg(s
, MO_16
, s
->T1
, R_EAX
);
3500 tcg_gen_ext16u_tl(s
->T0
, s
->T0
);
3501 tcg_gen_ext16u_tl(s
->T1
, s
->T1
);
3502 /* XXX: use 32 bit mul which could be faster */
3503 tcg_gen_mul_tl(s
->T0
, s
->T0
, s
->T1
);
3504 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
3505 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
3506 tcg_gen_shri_tl(s
->T0
, s
->T0
, 16);
3507 gen_op_mov_reg_v(s
, MO_16
, R_EDX
, s
->T0
);
3508 tcg_gen_mov_tl(cpu_cc_src
, s
->T0
);
3509 set_cc_op(s
, CC_OP_MULW
);
3513 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
3514 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, cpu_regs
[R_EAX
]);
3515 tcg_gen_mulu2_i32(s
->tmp2_i32
, s
->tmp3_i32
,
3516 s
->tmp2_i32
, s
->tmp3_i32
);
3517 tcg_gen_extu_i32_tl(cpu_regs
[R_EAX
], s
->tmp2_i32
);
3518 tcg_gen_extu_i32_tl(cpu_regs
[R_EDX
], s
->tmp3_i32
);
3519 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
3520 tcg_gen_mov_tl(cpu_cc_src
, cpu_regs
[R_EDX
]);
3521 set_cc_op(s
, CC_OP_MULL
);
3523 #ifdef TARGET_X86_64
3525 tcg_gen_mulu2_i64(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
],
3526 s
->T0
, cpu_regs
[R_EAX
]);
3527 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
3528 tcg_gen_mov_tl(cpu_cc_src
, cpu_regs
[R_EDX
]);
3529 set_cc_op(s
, CC_OP_MULQ
);
3537 gen_op_mov_v_reg(s
, MO_8
, s
->T1
, R_EAX
);
3538 tcg_gen_ext8s_tl(s
->T0
, s
->T0
);
3539 tcg_gen_ext8s_tl(s
->T1
, s
->T1
);
3540 /* XXX: use 32 bit mul which could be faster */
3541 tcg_gen_mul_tl(s
->T0
, s
->T0
, s
->T1
);
3542 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
3543 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
3544 tcg_gen_ext8s_tl(s
->tmp0
, s
->T0
);
3545 tcg_gen_sub_tl(cpu_cc_src
, s
->T0
, s
->tmp0
);
3546 set_cc_op(s
, CC_OP_MULB
);
3549 gen_op_mov_v_reg(s
, MO_16
, s
->T1
, R_EAX
);
3550 tcg_gen_ext16s_tl(s
->T0
, s
->T0
);
3551 tcg_gen_ext16s_tl(s
->T1
, s
->T1
);
3552 /* XXX: use 32 bit mul which could be faster */
3553 tcg_gen_mul_tl(s
->T0
, s
->T0
, s
->T1
);
3554 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
3555 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
3556 tcg_gen_ext16s_tl(s
->tmp0
, s
->T0
);
3557 tcg_gen_sub_tl(cpu_cc_src
, s
->T0
, s
->tmp0
);
3558 tcg_gen_shri_tl(s
->T0
, s
->T0
, 16);
3559 gen_op_mov_reg_v(s
, MO_16
, R_EDX
, s
->T0
);
3560 set_cc_op(s
, CC_OP_MULW
);
3564 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
3565 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, cpu_regs
[R_EAX
]);
3566 tcg_gen_muls2_i32(s
->tmp2_i32
, s
->tmp3_i32
,
3567 s
->tmp2_i32
, s
->tmp3_i32
);
3568 tcg_gen_extu_i32_tl(cpu_regs
[R_EAX
], s
->tmp2_i32
);
3569 tcg_gen_extu_i32_tl(cpu_regs
[R_EDX
], s
->tmp3_i32
);
3570 tcg_gen_sari_i32(s
->tmp2_i32
, s
->tmp2_i32
, 31);
3571 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
3572 tcg_gen_sub_i32(s
->tmp2_i32
, s
->tmp2_i32
, s
->tmp3_i32
);
3573 tcg_gen_extu_i32_tl(cpu_cc_src
, s
->tmp2_i32
);
3574 set_cc_op(s
, CC_OP_MULL
);
3576 #ifdef TARGET_X86_64
3578 tcg_gen_muls2_i64(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
],
3579 s
->T0
, cpu_regs
[R_EAX
]);
3580 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
3581 tcg_gen_sari_tl(cpu_cc_src
, cpu_regs
[R_EAX
], 63);
3582 tcg_gen_sub_tl(cpu_cc_src
, cpu_cc_src
, cpu_regs
[R_EDX
]);
3583 set_cc_op(s
, CC_OP_MULQ
);
3591 gen_helper_divb_AL(tcg_env
, s
->T0
);
3594 gen_helper_divw_AX(tcg_env
, s
->T0
);
3598 gen_helper_divl_EAX(tcg_env
, s
->T0
);
3600 #ifdef TARGET_X86_64
3602 gen_helper_divq_EAX(tcg_env
, s
->T0
);
3610 gen_helper_idivb_AL(tcg_env
, s
->T0
);
3613 gen_helper_idivw_AX(tcg_env
, s
->T0
);
3617 gen_helper_idivl_EAX(tcg_env
, s
->T0
);
3619 #ifdef TARGET_X86_64
3621 gen_helper_idivq_EAX(tcg_env
, s
->T0
);
3631 case 0xfe: /* GRP4 */
3632 case 0xff: /* GRP5 */
3633 ot
= mo_b_d(b
, dflag
);
3635 modrm
= x86_ldub_code(env
, s
);
3636 mod
= (modrm
>> 6) & 3;
3637 rm
= (modrm
& 7) | REX_B(s
);
3638 op
= (modrm
>> 3) & 7;
3639 if (op
>= 2 && b
== 0xfe) {
3643 if (op
== 2 || op
== 4) {
3644 /* operand size for jumps is 64 bit */
3646 } else if (op
== 3 || op
== 5) {
3647 ot
= dflag
!= MO_16
? MO_32
+ REX_W(s
) : MO_16
;
3648 } else if (op
== 6) {
3649 /* default push size is 64 bit */
3650 ot
= mo_pushpop(s
, dflag
);
3654 gen_lea_modrm(env
, s
, modrm
);
3655 if (op
>= 2 && op
!= 3 && op
!= 5)
3656 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
3658 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
3662 case 0: /* inc Ev */
3667 gen_inc(s
, ot
, opreg
, 1);
3669 case 1: /* dec Ev */
3674 gen_inc(s
, ot
, opreg
, -1);
3676 case 2: /* call Ev */
3677 /* XXX: optimize if memory (no 'and' is necessary) */
3678 if (dflag
== MO_16
) {
3679 tcg_gen_ext16u_tl(s
->T0
, s
->T0
);
3681 gen_push_v(s
, eip_next_tl(s
));
3682 gen_op_jmp_v(s
, s
->T0
);
3684 s
->base
.is_jmp
= DISAS_JUMP
;
3686 case 3: /* lcall Ev */
3690 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
3691 gen_add_A0_im(s
, 1 << ot
);
3692 gen_op_ld_v(s
, MO_16
, s
->T1
, s
->A0
);
3695 case 4: /* jmp Ev */
3696 if (dflag
== MO_16
) {
3697 tcg_gen_ext16u_tl(s
->T0
, s
->T0
);
3699 gen_op_jmp_v(s
, s
->T0
);
3701 s
->base
.is_jmp
= DISAS_JUMP
;
3703 case 5: /* ljmp Ev */
3707 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
3708 gen_add_A0_im(s
, 1 << ot
);
3709 gen_op_ld_v(s
, MO_16
, s
->T1
, s
->A0
);
3712 case 6: /* push Ev */
3713 gen_push_v(s
, s
->T0
);
3720 case 0x84: /* test Ev, Gv */
3722 ot
= mo_b_d(b
, dflag
);
3724 modrm
= x86_ldub_code(env
, s
);
3725 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
3727 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3728 gen_op_mov_v_reg(s
, ot
, s
->T1
, reg
);
3729 gen_op_testl_T0_T1_cc(s
);
3730 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
3733 case 0xa8: /* test eAX, Iv */
3735 ot
= mo_b_d(b
, dflag
);
3736 val
= insn_get(env
, s
, ot
);
3738 gen_op_mov_v_reg(s
, ot
, s
->T0
, OR_EAX
);
3739 tcg_gen_movi_tl(s
->T1
, val
);
3740 gen_op_testl_T0_T1_cc(s
);
3741 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
3744 case 0x98: /* CWDE/CBW */
3746 #ifdef TARGET_X86_64
3748 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, R_EAX
);
3749 tcg_gen_ext32s_tl(s
->T0
, s
->T0
);
3750 gen_op_mov_reg_v(s
, MO_64
, R_EAX
, s
->T0
);
3754 gen_op_mov_v_reg(s
, MO_16
, s
->T0
, R_EAX
);
3755 tcg_gen_ext16s_tl(s
->T0
, s
->T0
);
3756 gen_op_mov_reg_v(s
, MO_32
, R_EAX
, s
->T0
);
3759 gen_op_mov_v_reg(s
, MO_8
, s
->T0
, R_EAX
);
3760 tcg_gen_ext8s_tl(s
->T0
, s
->T0
);
3761 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
3764 g_assert_not_reached();
3767 case 0x99: /* CDQ/CWD */
3769 #ifdef TARGET_X86_64
3771 gen_op_mov_v_reg(s
, MO_64
, s
->T0
, R_EAX
);
3772 tcg_gen_sari_tl(s
->T0
, s
->T0
, 63);
3773 gen_op_mov_reg_v(s
, MO_64
, R_EDX
, s
->T0
);
3777 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, R_EAX
);
3778 tcg_gen_ext32s_tl(s
->T0
, s
->T0
);
3779 tcg_gen_sari_tl(s
->T0
, s
->T0
, 31);
3780 gen_op_mov_reg_v(s
, MO_32
, R_EDX
, s
->T0
);
3783 gen_op_mov_v_reg(s
, MO_16
, s
->T0
, R_EAX
);
3784 tcg_gen_ext16s_tl(s
->T0
, s
->T0
);
3785 tcg_gen_sari_tl(s
->T0
, s
->T0
, 15);
3786 gen_op_mov_reg_v(s
, MO_16
, R_EDX
, s
->T0
);
3789 g_assert_not_reached();
3792 case 0x1af: /* imul Gv, Ev */
3793 case 0x69: /* imul Gv, Ev, I */
3796 modrm
= x86_ldub_code(env
, s
);
3797 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
3799 s
->rip_offset
= insn_const_size(ot
);
3802 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3804 val
= insn_get(env
, s
, ot
);
3805 tcg_gen_movi_tl(s
->T1
, val
);
3806 } else if (b
== 0x6b) {
3807 val
= (int8_t)insn_get(env
, s
, MO_8
);
3808 tcg_gen_movi_tl(s
->T1
, val
);
3810 gen_op_mov_v_reg(s
, ot
, s
->T1
, reg
);
3813 #ifdef TARGET_X86_64
3815 tcg_gen_muls2_i64(cpu_regs
[reg
], s
->T1
, s
->T0
, s
->T1
);
3816 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[reg
]);
3817 tcg_gen_sari_tl(cpu_cc_src
, cpu_cc_dst
, 63);
3818 tcg_gen_sub_tl(cpu_cc_src
, cpu_cc_src
, s
->T1
);
3822 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
3823 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T1
);
3824 tcg_gen_muls2_i32(s
->tmp2_i32
, s
->tmp3_i32
,
3825 s
->tmp2_i32
, s
->tmp3_i32
);
3826 tcg_gen_extu_i32_tl(cpu_regs
[reg
], s
->tmp2_i32
);
3827 tcg_gen_sari_i32(s
->tmp2_i32
, s
->tmp2_i32
, 31);
3828 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[reg
]);
3829 tcg_gen_sub_i32(s
->tmp2_i32
, s
->tmp2_i32
, s
->tmp3_i32
);
3830 tcg_gen_extu_i32_tl(cpu_cc_src
, s
->tmp2_i32
);
3833 tcg_gen_ext16s_tl(s
->T0
, s
->T0
);
3834 tcg_gen_ext16s_tl(s
->T1
, s
->T1
);
3835 /* XXX: use 32 bit mul which could be faster */
3836 tcg_gen_mul_tl(s
->T0
, s
->T0
, s
->T1
);
3837 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
3838 tcg_gen_ext16s_tl(s
->tmp0
, s
->T0
);
3839 tcg_gen_sub_tl(cpu_cc_src
, s
->T0
, s
->tmp0
);
3840 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
3843 set_cc_op(s
, CC_OP_MULB
+ ot
);
3846 case 0x1c1: /* xadd Ev, Gv */
3847 ot
= mo_b_d(b
, dflag
);
3848 modrm
= x86_ldub_code(env
, s
);
3849 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
3850 mod
= (modrm
>> 6) & 3;
3851 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
3853 rm
= (modrm
& 7) | REX_B(s
);
3854 gen_op_mov_v_reg(s
, ot
, s
->T1
, rm
);
3855 tcg_gen_add_tl(s
->T0
, s
->T0
, s
->T1
);
3856 gen_op_mov_reg_v(s
, ot
, reg
, s
->T1
);
3857 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
3859 gen_lea_modrm(env
, s
, modrm
);
3860 if (s
->prefix
& PREFIX_LOCK
) {
3861 tcg_gen_atomic_fetch_add_tl(s
->T1
, s
->A0
, s
->T0
,
3862 s
->mem_index
, ot
| MO_LE
);
3863 tcg_gen_add_tl(s
->T0
, s
->T0
, s
->T1
);
3865 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
3866 tcg_gen_add_tl(s
->T0
, s
->T0
, s
->T1
);
3867 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
3869 gen_op_mov_reg_v(s
, ot
, reg
, s
->T1
);
3871 gen_op_update2_cc(s
);
3872 set_cc_op(s
, CC_OP_ADDB
+ ot
);
3875 case 0x1b1: /* cmpxchg Ev, Gv */
3877 TCGv oldv
, newv
, cmpv
, dest
;
3879 ot
= mo_b_d(b
, dflag
);
3880 modrm
= x86_ldub_code(env
, s
);
3881 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
3882 mod
= (modrm
>> 6) & 3;
3883 oldv
= tcg_temp_new();
3884 newv
= tcg_temp_new();
3885 cmpv
= tcg_temp_new();
3886 gen_op_mov_v_reg(s
, ot
, newv
, reg
);
3887 tcg_gen_mov_tl(cmpv
, cpu_regs
[R_EAX
]);
3889 if (s
->prefix
& PREFIX_LOCK
) {
3893 gen_lea_modrm(env
, s
, modrm
);
3894 tcg_gen_atomic_cmpxchg_tl(oldv
, s
->A0
, cmpv
, newv
,
3895 s
->mem_index
, ot
| MO_LE
);
3898 rm
= (modrm
& 7) | REX_B(s
);
3899 gen_op_mov_v_reg(s
, ot
, oldv
, rm
);
3903 * Unlike the memory case, where "the destination operand receives
3904 * a write cycle without regard to the result of the comparison",
3905 * rm must not be touched altogether if the write fails, including
3906 * not zero-extending it on 64-bit processors. So, precompute
3907 * the result of a successful writeback and perform the movcond
3908 * directly on cpu_regs. Also need to write accumulator first, in
3909 * case rm is part of RAX too.
3911 dest
= gen_op_deposit_reg_v(s
, ot
, rm
, newv
, newv
);
3912 tcg_gen_movcond_tl(TCG_COND_EQ
, dest
, oldv
, cmpv
, newv
, dest
);
3914 gen_lea_modrm(env
, s
, modrm
);
3915 gen_op_ld_v(s
, ot
, oldv
, s
->A0
);
3918 * Perform an unconditional store cycle like physical cpu;
3919 * must be before changing accumulator to ensure
3920 * idempotency if the store faults and the instruction
3923 tcg_gen_movcond_tl(TCG_COND_EQ
, newv
, oldv
, cmpv
, newv
, oldv
);
3924 gen_op_st_v(s
, ot
, newv
, s
->A0
);
3928 * Write EAX only if the cmpxchg fails; reuse newv as the destination,
3929 * since it's dead here.
3931 dest
= gen_op_deposit_reg_v(s
, ot
, R_EAX
, newv
, oldv
);
3932 tcg_gen_movcond_tl(TCG_COND_EQ
, dest
, oldv
, cmpv
, dest
, newv
);
3933 tcg_gen_mov_tl(cpu_cc_src
, oldv
);
3934 tcg_gen_mov_tl(s
->cc_srcT
, cmpv
);
3935 tcg_gen_sub_tl(cpu_cc_dst
, cmpv
, oldv
);
3936 set_cc_op(s
, CC_OP_SUBB
+ ot
);
3939 case 0x1c7: /* cmpxchg8b */
3940 modrm
= x86_ldub_code(env
, s
);
3941 mod
= (modrm
>> 6) & 3;
3942 switch ((modrm
>> 3) & 7) {
3943 case 1: /* CMPXCHG8, CMPXCHG16 */
3947 #ifdef TARGET_X86_64
3948 if (dflag
== MO_64
) {
3949 if (!(s
->cpuid_ext_features
& CPUID_EXT_CX16
)) {
3952 gen_cmpxchg16b(s
, env
, modrm
);
3956 if (!(s
->cpuid_features
& CPUID_CX8
)) {
3959 gen_cmpxchg8b(s
, env
, modrm
);
3962 case 7: /* RDSEED, RDPID with f3 prefix */
3964 (s
->prefix
& (PREFIX_LOCK
| PREFIX_REPNZ
))) {
3967 if (s
->prefix
& PREFIX_REPZ
) {
3968 if (!(s
->cpuid_ext_features
& CPUID_7_0_ECX_RDPID
)) {
3971 gen_helper_rdpid(s
->T0
, tcg_env
);
3972 rm
= (modrm
& 7) | REX_B(s
);
3973 gen_op_mov_reg_v(s
, dflag
, rm
, s
->T0
);
3976 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_RDSEED
)) {
3982 case 6: /* RDRAND */
3984 (s
->prefix
& (PREFIX_LOCK
| PREFIX_REPZ
| PREFIX_REPNZ
)) ||
3985 !(s
->cpuid_ext_features
& CPUID_EXT_RDRAND
)) {
3989 translator_io_start(&s
->base
);
3990 gen_helper_rdrand(s
->T0
, tcg_env
);
3991 rm
= (modrm
& 7) | REX_B(s
);
3992 gen_op_mov_reg_v(s
, dflag
, rm
, s
->T0
);
3993 set_cc_op(s
, CC_OP_EFLAGS
);
4001 /**************************/
4003 case 0x50 ... 0x57: /* push */
4004 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, (b
& 7) | REX_B(s
));
4005 gen_push_v(s
, s
->T0
);
4007 case 0x58 ... 0x5f: /* pop */
4009 /* NOTE: order is important for pop %sp */
4010 gen_pop_update(s
, ot
);
4011 gen_op_mov_reg_v(s
, ot
, (b
& 7) | REX_B(s
), s
->T0
);
4013 case 0x60: /* pusha */
4018 case 0x61: /* popa */
4023 case 0x68: /* push Iv */
4025 ot
= mo_pushpop(s
, dflag
);
4027 val
= insn_get(env
, s
, ot
);
4029 val
= (int8_t)insn_get(env
, s
, MO_8
);
4030 tcg_gen_movi_tl(s
->T0
, val
);
4031 gen_push_v(s
, s
->T0
);
4033 case 0x8f: /* pop Ev */
4034 modrm
= x86_ldub_code(env
, s
);
4035 mod
= (modrm
>> 6) & 3;
4038 /* NOTE: order is important for pop %sp */
4039 gen_pop_update(s
, ot
);
4040 rm
= (modrm
& 7) | REX_B(s
);
4041 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
4043 /* NOTE: order is important too for MMU exceptions */
4044 s
->popl_esp_hack
= 1 << ot
;
4045 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
4046 s
->popl_esp_hack
= 0;
4047 gen_pop_update(s
, ot
);
4050 case 0xc8: /* enter */
4053 val
= x86_lduw_code(env
, s
);
4054 level
= x86_ldub_code(env
, s
);
4055 gen_enter(s
, val
, level
);
4058 case 0xc9: /* leave */
4061 case 0x06: /* push es */
4062 case 0x0e: /* push cs */
4063 case 0x16: /* push ss */
4064 case 0x1e: /* push ds */
4067 gen_op_movl_T0_seg(s
, b
>> 3);
4068 gen_push_v(s
, s
->T0
);
4070 case 0x1a0: /* push fs */
4071 case 0x1a8: /* push gs */
4072 gen_op_movl_T0_seg(s
, (b
>> 3) & 7);
4073 gen_push_v(s
, s
->T0
);
4075 case 0x07: /* pop es */
4076 case 0x17: /* pop ss */
4077 case 0x1f: /* pop ds */
4082 gen_movl_seg_T0(s
, reg
);
4083 gen_pop_update(s
, ot
);
4085 case 0x1a1: /* pop fs */
4086 case 0x1a9: /* pop gs */
4088 gen_movl_seg_T0(s
, (b
>> 3) & 7);
4089 gen_pop_update(s
, ot
);
4092 /**************************/
4095 case 0x89: /* mov Gv, Ev */
4096 ot
= mo_b_d(b
, dflag
);
4097 modrm
= x86_ldub_code(env
, s
);
4098 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4100 /* generate a generic store */
4101 gen_ldst_modrm(env
, s
, modrm
, ot
, reg
, 1);
4104 case 0xc7: /* mov Ev, Iv */
4105 ot
= mo_b_d(b
, dflag
);
4106 modrm
= x86_ldub_code(env
, s
);
4107 mod
= (modrm
>> 6) & 3;
4109 s
->rip_offset
= insn_const_size(ot
);
4110 gen_lea_modrm(env
, s
, modrm
);
4112 val
= insn_get(env
, s
, ot
);
4113 tcg_gen_movi_tl(s
->T0
, val
);
4115 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
4117 gen_op_mov_reg_v(s
, ot
, (modrm
& 7) | REX_B(s
), s
->T0
);
4121 case 0x8b: /* mov Ev, Gv */
4122 ot
= mo_b_d(b
, dflag
);
4123 modrm
= x86_ldub_code(env
, s
);
4124 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4126 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4127 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
4129 case 0x8e: /* mov seg, Gv */
4130 modrm
= x86_ldub_code(env
, s
);
4131 reg
= (modrm
>> 3) & 7;
4132 if (reg
>= 6 || reg
== R_CS
)
4134 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
4135 gen_movl_seg_T0(s
, reg
);
4137 case 0x8c: /* mov Gv, seg */
4138 modrm
= x86_ldub_code(env
, s
);
4139 reg
= (modrm
>> 3) & 7;
4140 mod
= (modrm
>> 6) & 3;
4143 gen_op_movl_T0_seg(s
, reg
);
4144 ot
= mod
== 3 ? dflag
: MO_16
;
4145 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
4148 case 0x1b6: /* movzbS Gv, Eb */
4149 case 0x1b7: /* movzwS Gv, Eb */
4150 case 0x1be: /* movsbS Gv, Eb */
4151 case 0x1bf: /* movswS Gv, Eb */
4156 /* d_ot is the size of destination */
4158 /* ot is the size of source */
4159 ot
= (b
& 1) + MO_8
;
4160 /* s_ot is the sign+size of source */
4161 s_ot
= b
& 8 ? MO_SIGN
| ot
: ot
;
4163 modrm
= x86_ldub_code(env
, s
);
4164 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4165 mod
= (modrm
>> 6) & 3;
4166 rm
= (modrm
& 7) | REX_B(s
);
4169 if (s_ot
== MO_SB
&& byte_reg_is_xH(s
, rm
)) {
4170 tcg_gen_sextract_tl(s
->T0
, cpu_regs
[rm
- 4], 8, 8);
4172 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
4175 tcg_gen_ext8u_tl(s
->T0
, s
->T0
);
4178 tcg_gen_ext8s_tl(s
->T0
, s
->T0
);
4181 tcg_gen_ext16u_tl(s
->T0
, s
->T0
);
4185 tcg_gen_ext16s_tl(s
->T0
, s
->T0
);
4189 gen_op_mov_reg_v(s
, d_ot
, reg
, s
->T0
);
4191 gen_lea_modrm(env
, s
, modrm
);
4192 gen_op_ld_v(s
, s_ot
, s
->T0
, s
->A0
);
4193 gen_op_mov_reg_v(s
, d_ot
, reg
, s
->T0
);
4198 case 0x8d: /* lea */
4199 modrm
= x86_ldub_code(env
, s
);
4200 mod
= (modrm
>> 6) & 3;
4203 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4205 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
4206 TCGv ea
= gen_lea_modrm_1(s
, a
, false);
4207 gen_lea_v_seg(s
, s
->aflag
, ea
, -1, -1);
4208 gen_op_mov_reg_v(s
, dflag
, reg
, s
->A0
);
4212 case 0xa0: /* mov EAX, Ov */
4214 case 0xa2: /* mov Ov, EAX */
4217 target_ulong offset_addr
;
4219 ot
= mo_b_d(b
, dflag
);
4220 offset_addr
= insn_get_addr(env
, s
, s
->aflag
);
4221 tcg_gen_movi_tl(s
->A0
, offset_addr
);
4222 gen_add_A0_ds_seg(s
);
4224 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
4225 gen_op_mov_reg_v(s
, ot
, R_EAX
, s
->T0
);
4227 gen_op_mov_v_reg(s
, ot
, s
->T0
, R_EAX
);
4228 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
4232 case 0xd7: /* xlat */
4233 tcg_gen_mov_tl(s
->A0
, cpu_regs
[R_EBX
]);
4234 tcg_gen_ext8u_tl(s
->T0
, cpu_regs
[R_EAX
]);
4235 tcg_gen_add_tl(s
->A0
, s
->A0
, s
->T0
);
4236 gen_add_A0_ds_seg(s
);
4237 gen_op_ld_v(s
, MO_8
, s
->T0
, s
->A0
);
4238 gen_op_mov_reg_v(s
, MO_8
, R_EAX
, s
->T0
);
4240 case 0xb0 ... 0xb7: /* mov R, Ib */
4241 val
= insn_get(env
, s
, MO_8
);
4242 tcg_gen_movi_tl(s
->T0
, val
);
4243 gen_op_mov_reg_v(s
, MO_8
, (b
& 7) | REX_B(s
), s
->T0
);
4245 case 0xb8 ... 0xbf: /* mov R, Iv */
4246 #ifdef TARGET_X86_64
4247 if (dflag
== MO_64
) {
4250 tmp
= x86_ldq_code(env
, s
);
4251 reg
= (b
& 7) | REX_B(s
);
4252 tcg_gen_movi_tl(s
->T0
, tmp
);
4253 gen_op_mov_reg_v(s
, MO_64
, reg
, s
->T0
);
4258 val
= insn_get(env
, s
, ot
);
4259 reg
= (b
& 7) | REX_B(s
);
4260 tcg_gen_movi_tl(s
->T0
, val
);
4261 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
4265 case 0x91 ... 0x97: /* xchg R, EAX */
4268 reg
= (b
& 7) | REX_B(s
);
4272 case 0x87: /* xchg Ev, Gv */
4273 ot
= mo_b_d(b
, dflag
);
4274 modrm
= x86_ldub_code(env
, s
);
4275 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4276 mod
= (modrm
>> 6) & 3;
4278 rm
= (modrm
& 7) | REX_B(s
);
4280 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
4281 gen_op_mov_v_reg(s
, ot
, s
->T1
, rm
);
4282 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
4283 gen_op_mov_reg_v(s
, ot
, reg
, s
->T1
);
4285 gen_lea_modrm(env
, s
, modrm
);
4286 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
4287 /* for xchg, lock is implicit */
4288 tcg_gen_atomic_xchg_tl(s
->T1
, s
->A0
, s
->T0
,
4289 s
->mem_index
, ot
| MO_LE
);
4290 gen_op_mov_reg_v(s
, ot
, reg
, s
->T1
);
4293 case 0xc4: /* les Gv */
4294 /* In CODE64 this is VEX3; see above. */
4297 case 0xc5: /* lds Gv */
4298 /* In CODE64 this is VEX2; see above. */
4301 case 0x1b2: /* lss Gv */
4304 case 0x1b4: /* lfs Gv */
4307 case 0x1b5: /* lgs Gv */
4310 ot
= dflag
!= MO_16
? MO_32
: MO_16
;
4311 modrm
= x86_ldub_code(env
, s
);
4312 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4313 mod
= (modrm
>> 6) & 3;
4316 gen_lea_modrm(env
, s
, modrm
);
4317 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
4318 gen_add_A0_im(s
, 1 << ot
);
4319 /* load the segment first to handle exceptions properly */
4320 gen_op_ld_v(s
, MO_16
, s
->T0
, s
->A0
);
4321 gen_movl_seg_T0(s
, op
);
4322 /* then put the data */
4323 gen_op_mov_reg_v(s
, ot
, reg
, s
->T1
);
4326 /************************/
4334 ot
= mo_b_d(b
, dflag
);
4335 modrm
= x86_ldub_code(env
, s
);
4336 mod
= (modrm
>> 6) & 3;
4337 op
= (modrm
>> 3) & 7;
4343 gen_lea_modrm(env
, s
, modrm
);
4346 opreg
= (modrm
& 7) | REX_B(s
);
4351 gen_shift(s
, op
, ot
, opreg
, OR_ECX
);
4354 shift
= x86_ldub_code(env
, s
);
4356 gen_shifti(s
, op
, ot
, opreg
, shift
);
4371 case 0x1a4: /* shld imm */
4375 case 0x1a5: /* shld cl */
4379 case 0x1ac: /* shrd imm */
4383 case 0x1ad: /* shrd cl */
4388 modrm
= x86_ldub_code(env
, s
);
4389 mod
= (modrm
>> 6) & 3;
4390 rm
= (modrm
& 7) | REX_B(s
);
4391 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4393 gen_lea_modrm(env
, s
, modrm
);
4398 gen_op_mov_v_reg(s
, ot
, s
->T1
, reg
);
4401 TCGv imm
= tcg_constant_tl(x86_ldub_code(env
, s
));
4402 gen_shiftd_rm_T1(s
, ot
, opreg
, op
, imm
);
4404 gen_shiftd_rm_T1(s
, ot
, opreg
, op
, cpu_regs
[R_ECX
]);
4408 /************************/
4412 bool update_fip
= true;
4414 if (s
->flags
& (HF_EM_MASK
| HF_TS_MASK
)) {
4415 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
4416 /* XXX: what to do if illegal op ? */
4417 gen_exception(s
, EXCP07_PREX
);
4420 modrm
= x86_ldub_code(env
, s
);
4421 mod
= (modrm
>> 6) & 3;
4423 op
= ((b
& 7) << 3) | ((modrm
>> 3) & 7);
4426 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
4427 TCGv ea
= gen_lea_modrm_1(s
, a
, false);
4428 TCGv last_addr
= tcg_temp_new();
4429 bool update_fdp
= true;
4431 tcg_gen_mov_tl(last_addr
, ea
);
4432 gen_lea_v_seg(s
, s
->aflag
, ea
, a
.def_seg
, s
->override
);
4435 case 0x00 ... 0x07: /* fxxxs */
4436 case 0x10 ... 0x17: /* fixxxl */
4437 case 0x20 ... 0x27: /* fxxxl */
4438 case 0x30 ... 0x37: /* fixxx */
4445 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
4446 s
->mem_index
, MO_LEUL
);
4447 gen_helper_flds_FT0(tcg_env
, s
->tmp2_i32
);
4450 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
4451 s
->mem_index
, MO_LEUL
);
4452 gen_helper_fildl_FT0(tcg_env
, s
->tmp2_i32
);
4455 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
,
4456 s
->mem_index
, MO_LEUQ
);
4457 gen_helper_fldl_FT0(tcg_env
, s
->tmp1_i64
);
4461 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
4462 s
->mem_index
, MO_LESW
);
4463 gen_helper_fildl_FT0(tcg_env
, s
->tmp2_i32
);
4467 gen_helper_fp_arith_ST0_FT0(op1
);
4469 /* fcomp needs pop */
4470 gen_helper_fpop(tcg_env
);
4474 case 0x08: /* flds */
4475 case 0x0a: /* fsts */
4476 case 0x0b: /* fstps */
4477 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
4478 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
4479 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
4484 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
4485 s
->mem_index
, MO_LEUL
);
4486 gen_helper_flds_ST0(tcg_env
, s
->tmp2_i32
);
4489 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
4490 s
->mem_index
, MO_LEUL
);
4491 gen_helper_fildl_ST0(tcg_env
, s
->tmp2_i32
);
4494 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
,
4495 s
->mem_index
, MO_LEUQ
);
4496 gen_helper_fldl_ST0(tcg_env
, s
->tmp1_i64
);
4500 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
4501 s
->mem_index
, MO_LESW
);
4502 gen_helper_fildl_ST0(tcg_env
, s
->tmp2_i32
);
4507 /* XXX: the corresponding CPUID bit must be tested ! */
4510 gen_helper_fisttl_ST0(s
->tmp2_i32
, tcg_env
);
4511 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
4512 s
->mem_index
, MO_LEUL
);
4515 gen_helper_fisttll_ST0(s
->tmp1_i64
, tcg_env
);
4516 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
,
4517 s
->mem_index
, MO_LEUQ
);
4521 gen_helper_fistt_ST0(s
->tmp2_i32
, tcg_env
);
4522 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
4523 s
->mem_index
, MO_LEUW
);
4526 gen_helper_fpop(tcg_env
);
4531 gen_helper_fsts_ST0(s
->tmp2_i32
, tcg_env
);
4532 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
4533 s
->mem_index
, MO_LEUL
);
4536 gen_helper_fistl_ST0(s
->tmp2_i32
, tcg_env
);
4537 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
4538 s
->mem_index
, MO_LEUL
);
4541 gen_helper_fstl_ST0(s
->tmp1_i64
, tcg_env
);
4542 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
,
4543 s
->mem_index
, MO_LEUQ
);
4547 gen_helper_fist_ST0(s
->tmp2_i32
, tcg_env
);
4548 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
4549 s
->mem_index
, MO_LEUW
);
4552 if ((op
& 7) == 3) {
4553 gen_helper_fpop(tcg_env
);
4558 case 0x0c: /* fldenv mem */
4559 gen_helper_fldenv(tcg_env
, s
->A0
,
4560 tcg_constant_i32(dflag
- 1));
4561 update_fip
= update_fdp
= false;
4563 case 0x0d: /* fldcw mem */
4564 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
4565 s
->mem_index
, MO_LEUW
);
4566 gen_helper_fldcw(tcg_env
, s
->tmp2_i32
);
4567 update_fip
= update_fdp
= false;
4569 case 0x0e: /* fnstenv mem */
4570 gen_helper_fstenv(tcg_env
, s
->A0
,
4571 tcg_constant_i32(dflag
- 1));
4572 update_fip
= update_fdp
= false;
4574 case 0x0f: /* fnstcw mem */
4575 gen_helper_fnstcw(s
->tmp2_i32
, tcg_env
);
4576 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
4577 s
->mem_index
, MO_LEUW
);
4578 update_fip
= update_fdp
= false;
4580 case 0x1d: /* fldt mem */
4581 gen_helper_fldt_ST0(tcg_env
, s
->A0
);
4583 case 0x1f: /* fstpt mem */
4584 gen_helper_fstt_ST0(tcg_env
, s
->A0
);
4585 gen_helper_fpop(tcg_env
);
4587 case 0x2c: /* frstor mem */
4588 gen_helper_frstor(tcg_env
, s
->A0
,
4589 tcg_constant_i32(dflag
- 1));
4590 update_fip
= update_fdp
= false;
4592 case 0x2e: /* fnsave mem */
4593 gen_helper_fsave(tcg_env
, s
->A0
,
4594 tcg_constant_i32(dflag
- 1));
4595 update_fip
= update_fdp
= false;
4597 case 0x2f: /* fnstsw mem */
4598 gen_helper_fnstsw(s
->tmp2_i32
, tcg_env
);
4599 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
4600 s
->mem_index
, MO_LEUW
);
4601 update_fip
= update_fdp
= false;
4603 case 0x3c: /* fbld */
4604 gen_helper_fbld_ST0(tcg_env
, s
->A0
);
4606 case 0x3e: /* fbstp */
4607 gen_helper_fbst_ST0(tcg_env
, s
->A0
);
4608 gen_helper_fpop(tcg_env
);
4610 case 0x3d: /* fildll */
4611 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
,
4612 s
->mem_index
, MO_LEUQ
);
4613 gen_helper_fildll_ST0(tcg_env
, s
->tmp1_i64
);
4615 case 0x3f: /* fistpll */
4616 gen_helper_fistll_ST0(s
->tmp1_i64
, tcg_env
);
4617 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
,
4618 s
->mem_index
, MO_LEUQ
);
4619 gen_helper_fpop(tcg_env
);
4626 int last_seg
= s
->override
>= 0 ? s
->override
: a
.def_seg
;
4628 tcg_gen_ld_i32(s
->tmp2_i32
, tcg_env
,
4629 offsetof(CPUX86State
,
4630 segs
[last_seg
].selector
));
4631 tcg_gen_st16_i32(s
->tmp2_i32
, tcg_env
,
4632 offsetof(CPUX86State
, fpds
));
4633 tcg_gen_st_tl(last_addr
, tcg_env
,
4634 offsetof(CPUX86State
, fpdp
));
4637 /* register float ops */
4641 case 0x08: /* fld sti */
4642 gen_helper_fpush(tcg_env
);
4643 gen_helper_fmov_ST0_STN(tcg_env
,
4644 tcg_constant_i32((opreg
+ 1) & 7));
4646 case 0x09: /* fxchg sti */
4647 case 0x29: /* fxchg4 sti, undocumented op */
4648 case 0x39: /* fxchg7 sti, undocumented op */
4649 gen_helper_fxchg_ST0_STN(tcg_env
, tcg_constant_i32(opreg
));
4651 case 0x0a: /* grp d9/2 */
4655 * check exceptions (FreeBSD FPU probe)
4656 * needs to be treated as I/O because of ferr_irq
4658 translator_io_start(&s
->base
);
4659 gen_helper_fwait(tcg_env
);
4666 case 0x0c: /* grp d9/4 */
4669 gen_helper_fchs_ST0(tcg_env
);
4672 gen_helper_fabs_ST0(tcg_env
);
4675 gen_helper_fldz_FT0(tcg_env
);
4676 gen_helper_fcom_ST0_FT0(tcg_env
);
4679 gen_helper_fxam_ST0(tcg_env
);
4685 case 0x0d: /* grp d9/5 */
4689 gen_helper_fpush(tcg_env
);
4690 gen_helper_fld1_ST0(tcg_env
);
4693 gen_helper_fpush(tcg_env
);
4694 gen_helper_fldl2t_ST0(tcg_env
);
4697 gen_helper_fpush(tcg_env
);
4698 gen_helper_fldl2e_ST0(tcg_env
);
4701 gen_helper_fpush(tcg_env
);
4702 gen_helper_fldpi_ST0(tcg_env
);
4705 gen_helper_fpush(tcg_env
);
4706 gen_helper_fldlg2_ST0(tcg_env
);
4709 gen_helper_fpush(tcg_env
);
4710 gen_helper_fldln2_ST0(tcg_env
);
4713 gen_helper_fpush(tcg_env
);
4714 gen_helper_fldz_ST0(tcg_env
);
4721 case 0x0e: /* grp d9/6 */
4724 gen_helper_f2xm1(tcg_env
);
4727 gen_helper_fyl2x(tcg_env
);
4730 gen_helper_fptan(tcg_env
);
4732 case 3: /* fpatan */
4733 gen_helper_fpatan(tcg_env
);
4735 case 4: /* fxtract */
4736 gen_helper_fxtract(tcg_env
);
4738 case 5: /* fprem1 */
4739 gen_helper_fprem1(tcg_env
);
4741 case 6: /* fdecstp */
4742 gen_helper_fdecstp(tcg_env
);
4745 case 7: /* fincstp */
4746 gen_helper_fincstp(tcg_env
);
4750 case 0x0f: /* grp d9/7 */
4753 gen_helper_fprem(tcg_env
);
4755 case 1: /* fyl2xp1 */
4756 gen_helper_fyl2xp1(tcg_env
);
4759 gen_helper_fsqrt(tcg_env
);
4761 case 3: /* fsincos */
4762 gen_helper_fsincos(tcg_env
);
4764 case 5: /* fscale */
4765 gen_helper_fscale(tcg_env
);
4767 case 4: /* frndint */
4768 gen_helper_frndint(tcg_env
);
4771 gen_helper_fsin(tcg_env
);
4775 gen_helper_fcos(tcg_env
);
4779 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
4780 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
4781 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
4787 gen_helper_fp_arith_STN_ST0(op1
, opreg
);
4789 gen_helper_fpop(tcg_env
);
4792 gen_helper_fmov_FT0_STN(tcg_env
,
4793 tcg_constant_i32(opreg
));
4794 gen_helper_fp_arith_ST0_FT0(op1
);
4798 case 0x02: /* fcom */
4799 case 0x22: /* fcom2, undocumented op */
4800 gen_helper_fmov_FT0_STN(tcg_env
, tcg_constant_i32(opreg
));
4801 gen_helper_fcom_ST0_FT0(tcg_env
);
4803 case 0x03: /* fcomp */
4804 case 0x23: /* fcomp3, undocumented op */
4805 case 0x32: /* fcomp5, undocumented op */
4806 gen_helper_fmov_FT0_STN(tcg_env
, tcg_constant_i32(opreg
));
4807 gen_helper_fcom_ST0_FT0(tcg_env
);
4808 gen_helper_fpop(tcg_env
);
4810 case 0x15: /* da/5 */
4812 case 1: /* fucompp */
4813 gen_helper_fmov_FT0_STN(tcg_env
, tcg_constant_i32(1));
4814 gen_helper_fucom_ST0_FT0(tcg_env
);
4815 gen_helper_fpop(tcg_env
);
4816 gen_helper_fpop(tcg_env
);
4824 case 0: /* feni (287 only, just do nop here) */
4826 case 1: /* fdisi (287 only, just do nop here) */
4829 gen_helper_fclex(tcg_env
);
4832 case 3: /* fninit */
4833 gen_helper_fninit(tcg_env
);
4836 case 4: /* fsetpm (287 only, just do nop here) */
4842 case 0x1d: /* fucomi */
4843 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
4846 gen_update_cc_op(s
);
4847 gen_helper_fmov_FT0_STN(tcg_env
, tcg_constant_i32(opreg
));
4848 gen_helper_fucomi_ST0_FT0(tcg_env
);
4849 set_cc_op(s
, CC_OP_EFLAGS
);
4851 case 0x1e: /* fcomi */
4852 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
4855 gen_update_cc_op(s
);
4856 gen_helper_fmov_FT0_STN(tcg_env
, tcg_constant_i32(opreg
));
4857 gen_helper_fcomi_ST0_FT0(tcg_env
);
4858 set_cc_op(s
, CC_OP_EFLAGS
);
4860 case 0x28: /* ffree sti */
4861 gen_helper_ffree_STN(tcg_env
, tcg_constant_i32(opreg
));
4863 case 0x2a: /* fst sti */
4864 gen_helper_fmov_STN_ST0(tcg_env
, tcg_constant_i32(opreg
));
4866 case 0x2b: /* fstp sti */
4867 case 0x0b: /* fstp1 sti, undocumented op */
4868 case 0x3a: /* fstp8 sti, undocumented op */
4869 case 0x3b: /* fstp9 sti, undocumented op */
4870 gen_helper_fmov_STN_ST0(tcg_env
, tcg_constant_i32(opreg
));
4871 gen_helper_fpop(tcg_env
);
4873 case 0x2c: /* fucom st(i) */
4874 gen_helper_fmov_FT0_STN(tcg_env
, tcg_constant_i32(opreg
));
4875 gen_helper_fucom_ST0_FT0(tcg_env
);
4877 case 0x2d: /* fucomp st(i) */
4878 gen_helper_fmov_FT0_STN(tcg_env
, tcg_constant_i32(opreg
));
4879 gen_helper_fucom_ST0_FT0(tcg_env
);
4880 gen_helper_fpop(tcg_env
);
4882 case 0x33: /* de/3 */
4884 case 1: /* fcompp */
4885 gen_helper_fmov_FT0_STN(tcg_env
, tcg_constant_i32(1));
4886 gen_helper_fcom_ST0_FT0(tcg_env
);
4887 gen_helper_fpop(tcg_env
);
4888 gen_helper_fpop(tcg_env
);
4894 case 0x38: /* ffreep sti, undocumented op */
4895 gen_helper_ffree_STN(tcg_env
, tcg_constant_i32(opreg
));
4896 gen_helper_fpop(tcg_env
);
4898 case 0x3c: /* df/4 */
4901 gen_helper_fnstsw(s
->tmp2_i32
, tcg_env
);
4902 tcg_gen_extu_i32_tl(s
->T0
, s
->tmp2_i32
);
4903 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
4909 case 0x3d: /* fucomip */
4910 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
4913 gen_update_cc_op(s
);
4914 gen_helper_fmov_FT0_STN(tcg_env
, tcg_constant_i32(opreg
));
4915 gen_helper_fucomi_ST0_FT0(tcg_env
);
4916 gen_helper_fpop(tcg_env
);
4917 set_cc_op(s
, CC_OP_EFLAGS
);
4919 case 0x3e: /* fcomip */
4920 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
4923 gen_update_cc_op(s
);
4924 gen_helper_fmov_FT0_STN(tcg_env
, tcg_constant_i32(opreg
));
4925 gen_helper_fcomi_ST0_FT0(tcg_env
);
4926 gen_helper_fpop(tcg_env
);
4927 set_cc_op(s
, CC_OP_EFLAGS
);
4929 case 0x10 ... 0x13: /* fcmovxx */
4934 static const uint8_t fcmov_cc
[8] = {
4941 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
4944 op1
= fcmov_cc
[op
& 3] | (((op
>> 3) & 1) ^ 1);
4945 l1
= gen_new_label();
4946 gen_jcc1_noeob(s
, op1
, l1
);
4947 gen_helper_fmov_ST0_STN(tcg_env
,
4948 tcg_constant_i32(opreg
));
4958 tcg_gen_ld_i32(s
->tmp2_i32
, tcg_env
,
4959 offsetof(CPUX86State
, segs
[R_CS
].selector
));
4960 tcg_gen_st16_i32(s
->tmp2_i32
, tcg_env
,
4961 offsetof(CPUX86State
, fpcs
));
4962 tcg_gen_st_tl(eip_cur_tl(s
),
4963 tcg_env
, offsetof(CPUX86State
, fpip
));
4967 /************************/
4970 case 0xa4: /* movsS */
4972 ot
= mo_b_d(b
, dflag
);
4973 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
4974 gen_repz_movs(s
, ot
);
4980 case 0xaa: /* stosS */
4982 ot
= mo_b_d(b
, dflag
);
4983 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, R_EAX
);
4984 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
4985 gen_repz_stos(s
, ot
);
4990 case 0xac: /* lodsS */
4992 ot
= mo_b_d(b
, dflag
);
4993 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
4994 gen_repz_lods(s
, ot
);
4999 case 0xae: /* scasS */
5001 ot
= mo_b_d(b
, dflag
);
5002 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, R_EAX
);
5003 if (prefixes
& PREFIX_REPNZ
) {
5004 gen_repz_scas(s
, ot
, 1);
5005 } else if (prefixes
& PREFIX_REPZ
) {
5006 gen_repz_scas(s
, ot
, 0);
5012 case 0xa6: /* cmpsS */
5014 ot
= mo_b_d(b
, dflag
);
5015 if (prefixes
& PREFIX_REPNZ
) {
5016 gen_repz_cmps(s
, ot
, 1);
5017 } else if (prefixes
& PREFIX_REPZ
) {
5018 gen_repz_cmps(s
, ot
, 0);
5023 case 0x6c: /* insS */
5025 ot
= mo_b_d32(b
, dflag
);
5026 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_EDX
]);
5027 tcg_gen_ext16u_i32(s
->tmp2_i32
, s
->tmp2_i32
);
5028 if (!gen_check_io(s
, ot
, s
->tmp2_i32
,
5029 SVM_IOIO_TYPE_MASK
| SVM_IOIO_STR_MASK
)) {
5032 translator_io_start(&s
->base
);
5033 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
5034 gen_repz_ins(s
, ot
);
5039 case 0x6e: /* outsS */
5041 ot
= mo_b_d32(b
, dflag
);
5042 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_EDX
]);
5043 tcg_gen_ext16u_i32(s
->tmp2_i32
, s
->tmp2_i32
);
5044 if (!gen_check_io(s
, ot
, s
->tmp2_i32
, SVM_IOIO_STR_MASK
)) {
5047 translator_io_start(&s
->base
);
5048 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
5049 gen_repz_outs(s
, ot
);
5055 /************************/
5060 ot
= mo_b_d32(b
, dflag
);
5061 val
= x86_ldub_code(env
, s
);
5062 tcg_gen_movi_i32(s
->tmp2_i32
, val
);
5063 if (!gen_check_io(s
, ot
, s
->tmp2_i32
, SVM_IOIO_TYPE_MASK
)) {
5066 translator_io_start(&s
->base
);
5067 gen_helper_in_func(ot
, s
->T1
, s
->tmp2_i32
);
5068 gen_op_mov_reg_v(s
, ot
, R_EAX
, s
->T1
);
5069 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
5073 ot
= mo_b_d32(b
, dflag
);
5074 val
= x86_ldub_code(env
, s
);
5075 tcg_gen_movi_i32(s
->tmp2_i32
, val
);
5076 if (!gen_check_io(s
, ot
, s
->tmp2_i32
, 0)) {
5079 translator_io_start(&s
->base
);
5080 gen_op_mov_v_reg(s
, ot
, s
->T1
, R_EAX
);
5081 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T1
);
5082 gen_helper_out_func(ot
, s
->tmp2_i32
, s
->tmp3_i32
);
5083 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
5087 ot
= mo_b_d32(b
, dflag
);
5088 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_EDX
]);
5089 tcg_gen_ext16u_i32(s
->tmp2_i32
, s
->tmp2_i32
);
5090 if (!gen_check_io(s
, ot
, s
->tmp2_i32
, SVM_IOIO_TYPE_MASK
)) {
5093 translator_io_start(&s
->base
);
5094 gen_helper_in_func(ot
, s
->T1
, s
->tmp2_i32
);
5095 gen_op_mov_reg_v(s
, ot
, R_EAX
, s
->T1
);
5096 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
5100 ot
= mo_b_d32(b
, dflag
);
5101 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_EDX
]);
5102 tcg_gen_ext16u_i32(s
->tmp2_i32
, s
->tmp2_i32
);
5103 if (!gen_check_io(s
, ot
, s
->tmp2_i32
, 0)) {
5106 translator_io_start(&s
->base
);
5107 gen_op_mov_v_reg(s
, ot
, s
->T1
, R_EAX
);
5108 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T1
);
5109 gen_helper_out_func(ot
, s
->tmp2_i32
, s
->tmp3_i32
);
5110 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
5113 /************************/
5115 case 0xc2: /* ret im */
5116 val
= x86_ldsw_code(env
, s
);
5118 gen_stack_update(s
, val
+ (1 << ot
));
5119 /* Note that gen_pop_T0 uses a zero-extending load. */
5120 gen_op_jmp_v(s
, s
->T0
);
5122 s
->base
.is_jmp
= DISAS_JUMP
;
5124 case 0xc3: /* ret */
5126 gen_pop_update(s
, ot
);
5127 /* Note that gen_pop_T0 uses a zero-extending load. */
5128 gen_op_jmp_v(s
, s
->T0
);
5130 s
->base
.is_jmp
= DISAS_JUMP
;
5132 case 0xca: /* lret im */
5133 val
= x86_ldsw_code(env
, s
);
5135 if (PE(s
) && !VM86(s
)) {
5136 gen_update_cc_op(s
);
5137 gen_update_eip_cur(s
);
5138 gen_helper_lret_protected(tcg_env
, tcg_constant_i32(dflag
- 1),
5139 tcg_constant_i32(val
));
5143 gen_op_ld_v(s
, dflag
, s
->T0
, s
->A0
);
5144 /* NOTE: keeping EIP updated is not a problem in case of
5146 gen_op_jmp_v(s
, s
->T0
);
5148 gen_add_A0_im(s
, 1 << dflag
);
5149 gen_op_ld_v(s
, dflag
, s
->T0
, s
->A0
);
5150 gen_op_movl_seg_real(s
, R_CS
, s
->T0
);
5151 /* add stack offset */
5152 gen_stack_update(s
, val
+ (2 << dflag
));
5154 s
->base
.is_jmp
= DISAS_EOB_ONLY
;
5156 case 0xcb: /* lret */
5159 case 0xcf: /* iret */
5160 gen_svm_check_intercept(s
, SVM_EXIT_IRET
);
5161 if (!PE(s
) || VM86(s
)) {
5162 /* real mode or vm86 mode */
5163 if (!check_vm86_iopl(s
)) {
5166 gen_helper_iret_real(tcg_env
, tcg_constant_i32(dflag
- 1));
5168 gen_helper_iret_protected(tcg_env
, tcg_constant_i32(dflag
- 1),
5171 set_cc_op(s
, CC_OP_EFLAGS
);
5172 s
->base
.is_jmp
= DISAS_EOB_ONLY
;
5174 case 0xe8: /* call im */
5176 int diff
= (dflag
!= MO_16
5177 ? (int32_t)insn_get(env
, s
, MO_32
)
5178 : (int16_t)insn_get(env
, s
, MO_16
));
5179 gen_push_v(s
, eip_next_tl(s
));
5181 gen_update_cc_op(s
);
5182 gen_jmp_rel(s
, dflag
, diff
, 0);
5185 case 0x9a: /* lcall im */
5187 unsigned int selector
, offset
;
5192 offset
= insn_get(env
, s
, ot
);
5193 selector
= insn_get(env
, s
, MO_16
);
5195 tcg_gen_movi_tl(s
->T0
, offset
);
5196 tcg_gen_movi_tl(s
->T1
, selector
);
5200 case 0xe9: /* jmp im */
5202 int diff
= (dflag
!= MO_16
5203 ? (int32_t)insn_get(env
, s
, MO_32
)
5204 : (int16_t)insn_get(env
, s
, MO_16
));
5206 gen_update_cc_op(s
);
5207 gen_jmp_rel(s
, dflag
, diff
, 0);
5210 case 0xea: /* ljmp im */
5212 unsigned int selector
, offset
;
5217 offset
= insn_get(env
, s
, ot
);
5218 selector
= insn_get(env
, s
, MO_16
);
5220 tcg_gen_movi_tl(s
->T0
, offset
);
5221 tcg_gen_movi_tl(s
->T1
, selector
);
5225 case 0xeb: /* jmp Jb */
5227 int diff
= (int8_t)insn_get(env
, s
, MO_8
);
5228 gen_update_cc_op(s
);
5229 gen_jmp_rel(s
, dflag
, diff
, 0);
5232 case 0x70 ... 0x7f: /* jcc Jb */
5234 int diff
= (int8_t)insn_get(env
, s
, MO_8
);
5236 gen_jcc(s
, b
, diff
);
5239 case 0x180 ... 0x18f: /* jcc Jv */
5241 int diff
= (dflag
!= MO_16
5242 ? (int32_t)insn_get(env
, s
, MO_32
)
5243 : (int16_t)insn_get(env
, s
, MO_16
));
5245 gen_jcc(s
, b
, diff
);
5249 case 0x190 ... 0x19f: /* setcc Gv */
5250 modrm
= x86_ldub_code(env
, s
);
5251 gen_setcc1(s
, b
, s
->T0
);
5252 gen_ldst_modrm(env
, s
, modrm
, MO_8
, OR_TMP0
, 1);
5254 case 0x140 ... 0x14f: /* cmov Gv, Ev */
5255 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
5259 modrm
= x86_ldub_code(env
, s
);
5260 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
5261 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
5262 gen_cmovcc1(s
, b
^ 1, s
->T0
, cpu_regs
[reg
]);
5263 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
5266 /************************/
5268 case 0x9c: /* pushf */
5269 gen_svm_check_intercept(s
, SVM_EXIT_PUSHF
);
5270 if (check_vm86_iopl(s
)) {
5271 gen_update_cc_op(s
);
5272 gen_helper_read_eflags(s
->T0
, tcg_env
);
5273 gen_push_v(s
, s
->T0
);
5276 case 0x9d: /* popf */
5277 gen_svm_check_intercept(s
, SVM_EXIT_POPF
);
5278 if (check_vm86_iopl(s
)) {
5279 int mask
= TF_MASK
| AC_MASK
| ID_MASK
| NT_MASK
;
5282 mask
|= IF_MASK
| IOPL_MASK
;
5283 } else if (CPL(s
) <= IOPL(s
)) {
5286 if (dflag
== MO_16
) {
5291 gen_helper_write_eflags(tcg_env
, s
->T0
, tcg_constant_i32(mask
));
5292 gen_pop_update(s
, ot
);
5293 set_cc_op(s
, CC_OP_EFLAGS
);
5294 /* abort translation because TF/AC flag may change */
5295 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
5298 case 0x9e: /* sahf */
5299 if (CODE64(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_LAHF_LM
))
5301 tcg_gen_shri_tl(s
->T0
, cpu_regs
[R_EAX
], 8);
5302 gen_compute_eflags(s
);
5303 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, CC_O
);
5304 tcg_gen_andi_tl(s
->T0
, s
->T0
, CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
5305 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, s
->T0
);
5307 case 0x9f: /* lahf */
5308 if (CODE64(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_LAHF_LM
))
5310 gen_compute_eflags(s
);
5311 /* Note: gen_compute_eflags() only gives the condition codes */
5312 tcg_gen_ori_tl(s
->T0
, cpu_cc_src
, 0x02);
5313 tcg_gen_deposit_tl(cpu_regs
[R_EAX
], cpu_regs
[R_EAX
], s
->T0
, 8, 8);
5315 case 0xf5: /* cmc */
5316 gen_compute_eflags(s
);
5317 tcg_gen_xori_tl(cpu_cc_src
, cpu_cc_src
, CC_C
);
5319 case 0xf8: /* clc */
5320 gen_compute_eflags(s
);
5321 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~CC_C
);
5323 case 0xf9: /* stc */
5324 gen_compute_eflags(s
);
5325 tcg_gen_ori_tl(cpu_cc_src
, cpu_cc_src
, CC_C
);
5327 case 0xfc: /* cld */
5328 tcg_gen_movi_i32(s
->tmp2_i32
, 1);
5329 tcg_gen_st_i32(s
->tmp2_i32
, tcg_env
, offsetof(CPUX86State
, df
));
5331 case 0xfd: /* std */
5332 tcg_gen_movi_i32(s
->tmp2_i32
, -1);
5333 tcg_gen_st_i32(s
->tmp2_i32
, tcg_env
, offsetof(CPUX86State
, df
));
5336 /************************/
5337 /* bit operations */
5338 case 0x1ba: /* bt/bts/btr/btc Gv, im */
5340 modrm
= x86_ldub_code(env
, s
);
5341 op
= (modrm
>> 3) & 7;
5342 mod
= (modrm
>> 6) & 3;
5343 rm
= (modrm
& 7) | REX_B(s
);
5346 gen_lea_modrm(env
, s
, modrm
);
5347 if (!(s
->prefix
& PREFIX_LOCK
)) {
5348 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
5351 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
5354 val
= x86_ldub_code(env
, s
);
5355 tcg_gen_movi_tl(s
->T1
, val
);
5360 case 0x1a3: /* bt Gv, Ev */
5363 case 0x1ab: /* bts */
5366 case 0x1b3: /* btr */
5369 case 0x1bb: /* btc */
5373 modrm
= x86_ldub_code(env
, s
);
5374 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
5375 mod
= (modrm
>> 6) & 3;
5376 rm
= (modrm
& 7) | REX_B(s
);
5377 gen_op_mov_v_reg(s
, MO_32
, s
->T1
, reg
);
5379 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
5380 /* specific case: we need to add a displacement */
5381 gen_exts(ot
, s
->T1
);
5382 tcg_gen_sari_tl(s
->tmp0
, s
->T1
, 3 + ot
);
5383 tcg_gen_shli_tl(s
->tmp0
, s
->tmp0
, ot
);
5384 tcg_gen_add_tl(s
->A0
, gen_lea_modrm_1(s
, a
, false), s
->tmp0
);
5385 gen_lea_v_seg(s
, s
->aflag
, s
->A0
, a
.def_seg
, s
->override
);
5386 if (!(s
->prefix
& PREFIX_LOCK
)) {
5387 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
5390 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
5393 tcg_gen_andi_tl(s
->T1
, s
->T1
, (1 << (3 + ot
)) - 1);
5394 tcg_gen_movi_tl(s
->tmp0
, 1);
5395 tcg_gen_shl_tl(s
->tmp0
, s
->tmp0
, s
->T1
);
5396 if (s
->prefix
& PREFIX_LOCK
) {
5399 /* Needs no atomic ops; we suppressed the normal
5400 memory load for LOCK above so do it now. */
5401 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
5404 tcg_gen_atomic_fetch_or_tl(s
->T0
, s
->A0
, s
->tmp0
,
5405 s
->mem_index
, ot
| MO_LE
);
5408 tcg_gen_not_tl(s
->tmp0
, s
->tmp0
);
5409 tcg_gen_atomic_fetch_and_tl(s
->T0
, s
->A0
, s
->tmp0
,
5410 s
->mem_index
, ot
| MO_LE
);
5414 tcg_gen_atomic_fetch_xor_tl(s
->T0
, s
->A0
, s
->tmp0
,
5415 s
->mem_index
, ot
| MO_LE
);
5418 tcg_gen_shr_tl(s
->tmp4
, s
->T0
, s
->T1
);
5420 tcg_gen_shr_tl(s
->tmp4
, s
->T0
, s
->T1
);
5423 /* Data already loaded; nothing to do. */
5426 tcg_gen_or_tl(s
->T0
, s
->T0
, s
->tmp0
);
5429 tcg_gen_andc_tl(s
->T0
, s
->T0
, s
->tmp0
);
5433 tcg_gen_xor_tl(s
->T0
, s
->T0
, s
->tmp0
);
5438 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
5440 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
5445 /* Delay all CC updates until after the store above. Note that
5446 C is the result of the test, Z is unchanged, and the others
5447 are all undefined. */
5449 case CC_OP_MULB
... CC_OP_MULQ
:
5450 case CC_OP_ADDB
... CC_OP_ADDQ
:
5451 case CC_OP_ADCB
... CC_OP_ADCQ
:
5452 case CC_OP_SUBB
... CC_OP_SUBQ
:
5453 case CC_OP_SBBB
... CC_OP_SBBQ
:
5454 case CC_OP_LOGICB
... CC_OP_LOGICQ
:
5455 case CC_OP_INCB
... CC_OP_INCQ
:
5456 case CC_OP_DECB
... CC_OP_DECQ
:
5457 case CC_OP_SHLB
... CC_OP_SHLQ
:
5458 case CC_OP_SARB
... CC_OP_SARQ
:
5459 case CC_OP_BMILGB
... CC_OP_BMILGQ
:
5460 /* Z was going to be computed from the non-zero status of CC_DST.
5461 We can get that same Z value (and the new C value) by leaving
5462 CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the
5464 tcg_gen_mov_tl(cpu_cc_src
, s
->tmp4
);
5465 set_cc_op(s
, ((s
->cc_op
- CC_OP_MULB
) & 3) + CC_OP_SARB
);
5468 /* Otherwise, generate EFLAGS and replace the C bit. */
5469 gen_compute_eflags(s
);
5470 tcg_gen_deposit_tl(cpu_cc_src
, cpu_cc_src
, s
->tmp4
,
5475 case 0x1bc: /* bsf / tzcnt */
5476 case 0x1bd: /* bsr / lzcnt */
5478 modrm
= x86_ldub_code(env
, s
);
5479 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
5480 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
5481 gen_extu(ot
, s
->T0
);
5483 /* Note that lzcnt and tzcnt are in different extensions. */
5484 if ((prefixes
& PREFIX_REPZ
)
5486 ? s
->cpuid_ext3_features
& CPUID_EXT3_ABM
5487 : s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI1
)) {
5489 /* For lzcnt/tzcnt, C bit is defined related to the input. */
5490 tcg_gen_mov_tl(cpu_cc_src
, s
->T0
);
5492 /* For lzcnt, reduce the target_ulong result by the
5493 number of zeros that we expect to find at the top. */
5494 tcg_gen_clzi_tl(s
->T0
, s
->T0
, TARGET_LONG_BITS
);
5495 tcg_gen_subi_tl(s
->T0
, s
->T0
, TARGET_LONG_BITS
- size
);
5497 /* For tzcnt, a zero input must return the operand size. */
5498 tcg_gen_ctzi_tl(s
->T0
, s
->T0
, size
);
5500 /* For lzcnt/tzcnt, Z bit is defined related to the result. */
5501 gen_op_update1_cc(s
);
5502 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
5504 /* For bsr/bsf, only the Z bit is defined and it is related
5505 to the input and not the result. */
5506 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
5507 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
5509 /* ??? The manual says that the output is undefined when the
5510 input is zero, but real hardware leaves it unchanged, and
5511 real programs appear to depend on that. Accomplish this
5512 by passing the output as the value to return upon zero. */
5514 /* For bsr, return the bit index of the first 1 bit,
5515 not the count of leading zeros. */
5516 tcg_gen_xori_tl(s
->T1
, cpu_regs
[reg
], TARGET_LONG_BITS
- 1);
5517 tcg_gen_clz_tl(s
->T0
, s
->T0
, s
->T1
);
5518 tcg_gen_xori_tl(s
->T0
, s
->T0
, TARGET_LONG_BITS
- 1);
5520 tcg_gen_ctz_tl(s
->T0
, s
->T0
, cpu_regs
[reg
]);
5523 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
5525 /************************/
5527 case 0x27: /* daa */
5530 gen_update_cc_op(s
);
5531 gen_helper_daa(tcg_env
);
5532 set_cc_op(s
, CC_OP_EFLAGS
);
5534 case 0x2f: /* das */
5537 gen_update_cc_op(s
);
5538 gen_helper_das(tcg_env
);
5539 set_cc_op(s
, CC_OP_EFLAGS
);
5541 case 0x37: /* aaa */
5544 gen_update_cc_op(s
);
5545 gen_helper_aaa(tcg_env
);
5546 set_cc_op(s
, CC_OP_EFLAGS
);
5548 case 0x3f: /* aas */
5551 gen_update_cc_op(s
);
5552 gen_helper_aas(tcg_env
);
5553 set_cc_op(s
, CC_OP_EFLAGS
);
5555 case 0xd4: /* aam */
5558 val
= x86_ldub_code(env
, s
);
5560 gen_exception(s
, EXCP00_DIVZ
);
5562 gen_helper_aam(tcg_env
, tcg_constant_i32(val
));
5563 set_cc_op(s
, CC_OP_LOGICB
);
5566 case 0xd5: /* aad */
5569 val
= x86_ldub_code(env
, s
);
5570 gen_helper_aad(tcg_env
, tcg_constant_i32(val
));
5571 set_cc_op(s
, CC_OP_LOGICB
);
5573 /************************/
5575 case 0x90: /* nop */
5576 /* XXX: correct lock test for all insn */
5577 if (prefixes
& PREFIX_LOCK
) {
5580 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
5582 goto do_xchg_reg_eax
;
5584 if (prefixes
& PREFIX_REPZ
) {
5585 gen_update_cc_op(s
);
5586 gen_update_eip_cur(s
);
5587 gen_helper_pause(tcg_env
, cur_insn_len_i32(s
));
5588 s
->base
.is_jmp
= DISAS_NORETURN
;
5591 case 0x9b: /* fwait */
5592 if ((s
->flags
& (HF_MP_MASK
| HF_TS_MASK
)) ==
5593 (HF_MP_MASK
| HF_TS_MASK
)) {
5594 gen_exception(s
, EXCP07_PREX
);
5596 /* needs to be treated as I/O because of ferr_irq */
5597 translator_io_start(&s
->base
);
5598 gen_helper_fwait(tcg_env
);
5601 case 0xcc: /* int3 */
5602 gen_interrupt(s
, EXCP03_INT3
);
5604 case 0xcd: /* int N */
5605 val
= x86_ldub_code(env
, s
);
5606 if (check_vm86_iopl(s
)) {
5607 gen_interrupt(s
, val
);
5610 case 0xce: /* into */
5613 gen_update_cc_op(s
);
5614 gen_update_eip_cur(s
);
5615 gen_helper_into(tcg_env
, cur_insn_len_i32(s
));
5618 case 0xf1: /* icebp (undocumented, exits to external debugger) */
5619 gen_svm_check_intercept(s
, SVM_EXIT_ICEBP
);
5623 case 0xfa: /* cli */
5624 if (check_iopl(s
)) {
5625 gen_reset_eflags(s
, IF_MASK
);
5628 case 0xfb: /* sti */
5629 if (check_iopl(s
)) {
5630 gen_set_eflags(s
, IF_MASK
);
5631 /* interruptions are enabled only the first insn after sti */
5632 gen_update_eip_next(s
);
5633 gen_eob_inhibit_irq(s
);
5636 case 0x62: /* bound */
5640 modrm
= x86_ldub_code(env
, s
);
5641 reg
= (modrm
>> 3) & 7;
5642 mod
= (modrm
>> 6) & 3;
5645 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
5646 gen_lea_modrm(env
, s
, modrm
);
5647 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
5649 gen_helper_boundw(tcg_env
, s
->A0
, s
->tmp2_i32
);
5651 gen_helper_boundl(tcg_env
, s
->A0
, s
->tmp2_i32
);
5654 case 0x1c8 ... 0x1cf: /* bswap reg */
5655 reg
= (b
& 7) | REX_B(s
);
5656 #ifdef TARGET_X86_64
5657 if (dflag
== MO_64
) {
5658 tcg_gen_bswap64_i64(cpu_regs
[reg
], cpu_regs
[reg
]);
5662 tcg_gen_bswap32_tl(cpu_regs
[reg
], cpu_regs
[reg
], TCG_BSWAP_OZ
);
5664 case 0xd6: /* salc */
5667 gen_compute_eflags_c(s
, s
->T0
);
5668 tcg_gen_neg_tl(s
->T0
, s
->T0
);
5669 gen_op_mov_reg_v(s
, MO_8
, R_EAX
, s
->T0
);
5671 case 0xe0: /* loopnz */
5672 case 0xe1: /* loopz */
5673 case 0xe2: /* loop */
5674 case 0xe3: /* jecxz */
5677 int diff
= (int8_t)insn_get(env
, s
, MO_8
);
5679 l1
= gen_new_label();
5680 l2
= gen_new_label();
5681 gen_update_cc_op(s
);
5684 case 0: /* loopnz */
5686 gen_op_add_reg_im(s
, s
->aflag
, R_ECX
, -1);
5687 gen_op_jz_ecx(s
, l2
);
5688 gen_jcc1(s
, (JCC_Z
<< 1) | (b
^ 1), l1
);
5691 gen_op_add_reg_im(s
, s
->aflag
, R_ECX
, -1);
5692 gen_op_jnz_ecx(s
, l1
);
5696 gen_op_jz_ecx(s
, l1
);
5701 gen_jmp_rel_csize(s
, 0, 1);
5704 gen_jmp_rel(s
, dflag
, diff
, 0);
5707 case 0x130: /* wrmsr */
5708 case 0x132: /* rdmsr */
5709 if (check_cpl0(s
)) {
5710 gen_update_cc_op(s
);
5711 gen_update_eip_cur(s
);
5713 gen_helper_rdmsr(tcg_env
);
5715 gen_helper_wrmsr(tcg_env
);
5716 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
5720 case 0x131: /* rdtsc */
5721 gen_update_cc_op(s
);
5722 gen_update_eip_cur(s
);
5723 translator_io_start(&s
->base
);
5724 gen_helper_rdtsc(tcg_env
);
5726 case 0x133: /* rdpmc */
5727 gen_update_cc_op(s
);
5728 gen_update_eip_cur(s
);
5729 gen_helper_rdpmc(tcg_env
);
5730 s
->base
.is_jmp
= DISAS_NORETURN
;
5732 case 0x134: /* sysenter */
5733 /* For AMD SYSENTER is not valid in long mode */
5734 if (LMA(s
) && env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
) {
5738 gen_exception_gpf(s
);
5740 gen_helper_sysenter(tcg_env
);
5741 s
->base
.is_jmp
= DISAS_EOB_ONLY
;
5744 case 0x135: /* sysexit */
5745 /* For AMD SYSEXIT is not valid in long mode */
5746 if (LMA(s
) && env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
) {
5749 if (!PE(s
) || CPL(s
) != 0) {
5750 gen_exception_gpf(s
);
5752 gen_helper_sysexit(tcg_env
, tcg_constant_i32(dflag
- 1));
5753 s
->base
.is_jmp
= DISAS_EOB_ONLY
;
5756 case 0x105: /* syscall */
5757 /* For Intel SYSCALL is only valid in long mode */
5758 if (!LMA(s
) && env
->cpuid_vendor1
== CPUID_VENDOR_INTEL_1
) {
5761 gen_update_cc_op(s
);
5762 gen_update_eip_cur(s
);
5763 gen_helper_syscall(tcg_env
, cur_insn_len_i32(s
));
5764 /* TF handling for the syscall insn is different. The TF bit is checked
5765 after the syscall insn completes. This allows #DB to not be
5766 generated after one has entered CPL0 if TF is set in FMASK. */
5769 case 0x107: /* sysret */
5770 /* For Intel SYSRET is only valid in long mode */
5771 if (!LMA(s
) && env
->cpuid_vendor1
== CPUID_VENDOR_INTEL_1
) {
5774 if (!PE(s
) || CPL(s
) != 0) {
5775 gen_exception_gpf(s
);
5777 gen_helper_sysret(tcg_env
, tcg_constant_i32(dflag
- 1));
5778 /* condition codes are modified only in long mode */
5780 set_cc_op(s
, CC_OP_EFLAGS
);
5782 /* TF handling for the sysret insn is different. The TF bit is
5783 checked after the sysret insn completes. This allows #DB to be
5784 generated "as if" the syscall insn in userspace has just
5789 case 0x1a2: /* cpuid */
5790 gen_update_cc_op(s
);
5791 gen_update_eip_cur(s
);
5792 gen_helper_cpuid(tcg_env
);
5794 case 0xf4: /* hlt */
5795 if (check_cpl0(s
)) {
5796 gen_update_cc_op(s
);
5797 gen_update_eip_cur(s
);
5798 gen_helper_hlt(tcg_env
, cur_insn_len_i32(s
));
5799 s
->base
.is_jmp
= DISAS_NORETURN
;
5803 modrm
= x86_ldub_code(env
, s
);
5804 mod
= (modrm
>> 6) & 3;
5805 op
= (modrm
>> 3) & 7;
5808 if (!PE(s
) || VM86(s
))
5810 if (s
->flags
& HF_UMIP_MASK
&& !check_cpl0(s
)) {
5813 gen_svm_check_intercept(s
, SVM_EXIT_LDTR_READ
);
5814 tcg_gen_ld32u_tl(s
->T0
, tcg_env
,
5815 offsetof(CPUX86State
, ldt
.selector
));
5816 ot
= mod
== 3 ? dflag
: MO_16
;
5817 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
5820 if (!PE(s
) || VM86(s
))
5822 if (check_cpl0(s
)) {
5823 gen_svm_check_intercept(s
, SVM_EXIT_LDTR_WRITE
);
5824 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
5825 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
5826 gen_helper_lldt(tcg_env
, s
->tmp2_i32
);
5830 if (!PE(s
) || VM86(s
))
5832 if (s
->flags
& HF_UMIP_MASK
&& !check_cpl0(s
)) {
5835 gen_svm_check_intercept(s
, SVM_EXIT_TR_READ
);
5836 tcg_gen_ld32u_tl(s
->T0
, tcg_env
,
5837 offsetof(CPUX86State
, tr
.selector
));
5838 ot
= mod
== 3 ? dflag
: MO_16
;
5839 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
5842 if (!PE(s
) || VM86(s
))
5844 if (check_cpl0(s
)) {
5845 gen_svm_check_intercept(s
, SVM_EXIT_TR_WRITE
);
5846 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
5847 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
5848 gen_helper_ltr(tcg_env
, s
->tmp2_i32
);
5853 if (!PE(s
) || VM86(s
))
5855 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
5856 gen_update_cc_op(s
);
5858 gen_helper_verr(tcg_env
, s
->T0
);
5860 gen_helper_verw(tcg_env
, s
->T0
);
5862 set_cc_op(s
, CC_OP_EFLAGS
);
5870 modrm
= x86_ldub_code(env
, s
);
5872 CASE_MODRM_MEM_OP(0): /* sgdt */
5873 if (s
->flags
& HF_UMIP_MASK
&& !check_cpl0(s
)) {
5876 gen_svm_check_intercept(s
, SVM_EXIT_GDTR_READ
);
5877 gen_lea_modrm(env
, s
, modrm
);
5878 tcg_gen_ld32u_tl(s
->T0
,
5879 tcg_env
, offsetof(CPUX86State
, gdt
.limit
));
5880 gen_op_st_v(s
, MO_16
, s
->T0
, s
->A0
);
5881 gen_add_A0_im(s
, 2);
5882 tcg_gen_ld_tl(s
->T0
, tcg_env
, offsetof(CPUX86State
, gdt
.base
));
5884 * NB: Despite a confusing description in Intel CPU documentation,
5885 * all 32-bits are written regardless of operand size.
5887 gen_op_st_v(s
, CODE64(s
) + MO_32
, s
->T0
, s
->A0
);
5890 case 0xc8: /* monitor */
5891 if (!(s
->cpuid_ext_features
& CPUID_EXT_MONITOR
) || CPL(s
) != 0) {
5894 gen_update_cc_op(s
);
5895 gen_update_eip_cur(s
);
5896 tcg_gen_mov_tl(s
->A0
, cpu_regs
[R_EAX
]);
5897 gen_add_A0_ds_seg(s
);
5898 gen_helper_monitor(tcg_env
, s
->A0
);
5901 case 0xc9: /* mwait */
5902 if (!(s
->cpuid_ext_features
& CPUID_EXT_MONITOR
) || CPL(s
) != 0) {
5905 gen_update_cc_op(s
);
5906 gen_update_eip_cur(s
);
5907 gen_helper_mwait(tcg_env
, cur_insn_len_i32(s
));
5908 s
->base
.is_jmp
= DISAS_NORETURN
;
5911 case 0xca: /* clac */
5912 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_SMAP
)
5916 gen_reset_eflags(s
, AC_MASK
);
5917 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
5920 case 0xcb: /* stac */
5921 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_SMAP
)
5925 gen_set_eflags(s
, AC_MASK
);
5926 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
5929 CASE_MODRM_MEM_OP(1): /* sidt */
5930 if (s
->flags
& HF_UMIP_MASK
&& !check_cpl0(s
)) {
5933 gen_svm_check_intercept(s
, SVM_EXIT_IDTR_READ
);
5934 gen_lea_modrm(env
, s
, modrm
);
5935 tcg_gen_ld32u_tl(s
->T0
, tcg_env
, offsetof(CPUX86State
, idt
.limit
));
5936 gen_op_st_v(s
, MO_16
, s
->T0
, s
->A0
);
5937 gen_add_A0_im(s
, 2);
5938 tcg_gen_ld_tl(s
->T0
, tcg_env
, offsetof(CPUX86State
, idt
.base
));
5940 * NB: Despite a confusing description in Intel CPU documentation,
5941 * all 32-bits are written regardless of operand size.
5943 gen_op_st_v(s
, CODE64(s
) + MO_32
, s
->T0
, s
->A0
);
5946 case 0xd0: /* xgetbv */
5947 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
5948 || (s
->prefix
& (PREFIX_LOCK
| PREFIX_DATA
5949 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
5952 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_ECX
]);
5953 gen_helper_xgetbv(s
->tmp1_i64
, tcg_env
, s
->tmp2_i32
);
5954 tcg_gen_extr_i64_tl(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
], s
->tmp1_i64
);
5957 case 0xd1: /* xsetbv */
5958 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
5959 || (s
->prefix
& (PREFIX_LOCK
| PREFIX_DATA
5960 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
5963 gen_svm_check_intercept(s
, SVM_EXIT_XSETBV
);
5964 if (!check_cpl0(s
)) {
5967 tcg_gen_concat_tl_i64(s
->tmp1_i64
, cpu_regs
[R_EAX
],
5969 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_ECX
]);
5970 gen_helper_xsetbv(tcg_env
, s
->tmp2_i32
, s
->tmp1_i64
);
5971 /* End TB because translation flags may change. */
5972 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
5975 case 0xd8: /* VMRUN */
5976 if (!SVME(s
) || !PE(s
)) {
5979 if (!check_cpl0(s
)) {
5982 gen_update_cc_op(s
);
5983 gen_update_eip_cur(s
);
5984 gen_helper_vmrun(tcg_env
, tcg_constant_i32(s
->aflag
- 1),
5985 cur_insn_len_i32(s
));
5986 tcg_gen_exit_tb(NULL
, 0);
5987 s
->base
.is_jmp
= DISAS_NORETURN
;
5990 case 0xd9: /* VMMCALL */
5994 gen_update_cc_op(s
);
5995 gen_update_eip_cur(s
);
5996 gen_helper_vmmcall(tcg_env
);
5999 case 0xda: /* VMLOAD */
6000 if (!SVME(s
) || !PE(s
)) {
6003 if (!check_cpl0(s
)) {
6006 gen_update_cc_op(s
);
6007 gen_update_eip_cur(s
);
6008 gen_helper_vmload(tcg_env
, tcg_constant_i32(s
->aflag
- 1));
6011 case 0xdb: /* VMSAVE */
6012 if (!SVME(s
) || !PE(s
)) {
6015 if (!check_cpl0(s
)) {
6018 gen_update_cc_op(s
);
6019 gen_update_eip_cur(s
);
6020 gen_helper_vmsave(tcg_env
, tcg_constant_i32(s
->aflag
- 1));
6023 case 0xdc: /* STGI */
6024 if ((!SVME(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_SKINIT
))
6028 if (!check_cpl0(s
)) {
6031 gen_update_cc_op(s
);
6032 gen_helper_stgi(tcg_env
);
6033 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
6036 case 0xdd: /* CLGI */
6037 if (!SVME(s
) || !PE(s
)) {
6040 if (!check_cpl0(s
)) {
6043 gen_update_cc_op(s
);
6044 gen_update_eip_cur(s
);
6045 gen_helper_clgi(tcg_env
);
6048 case 0xde: /* SKINIT */
6049 if ((!SVME(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_SKINIT
))
6053 gen_svm_check_intercept(s
, SVM_EXIT_SKINIT
);
6054 /* If not intercepted, not implemented -- raise #UD. */
6057 case 0xdf: /* INVLPGA */
6058 if (!SVME(s
) || !PE(s
)) {
6061 if (!check_cpl0(s
)) {
6064 gen_svm_check_intercept(s
, SVM_EXIT_INVLPGA
);
6065 if (s
->aflag
== MO_64
) {
6066 tcg_gen_mov_tl(s
->A0
, cpu_regs
[R_EAX
]);
6068 tcg_gen_ext32u_tl(s
->A0
, cpu_regs
[R_EAX
]);
6070 gen_helper_flush_page(tcg_env
, s
->A0
);
6071 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
6074 CASE_MODRM_MEM_OP(2): /* lgdt */
6075 if (!check_cpl0(s
)) {
6078 gen_svm_check_intercept(s
, SVM_EXIT_GDTR_WRITE
);
6079 gen_lea_modrm(env
, s
, modrm
);
6080 gen_op_ld_v(s
, MO_16
, s
->T1
, s
->A0
);
6081 gen_add_A0_im(s
, 2);
6082 gen_op_ld_v(s
, CODE64(s
) + MO_32
, s
->T0
, s
->A0
);
6083 if (dflag
== MO_16
) {
6084 tcg_gen_andi_tl(s
->T0
, s
->T0
, 0xffffff);
6086 tcg_gen_st_tl(s
->T0
, tcg_env
, offsetof(CPUX86State
, gdt
.base
));
6087 tcg_gen_st32_tl(s
->T1
, tcg_env
, offsetof(CPUX86State
, gdt
.limit
));
6090 CASE_MODRM_MEM_OP(3): /* lidt */
6091 if (!check_cpl0(s
)) {
6094 gen_svm_check_intercept(s
, SVM_EXIT_IDTR_WRITE
);
6095 gen_lea_modrm(env
, s
, modrm
);
6096 gen_op_ld_v(s
, MO_16
, s
->T1
, s
->A0
);
6097 gen_add_A0_im(s
, 2);
6098 gen_op_ld_v(s
, CODE64(s
) + MO_32
, s
->T0
, s
->A0
);
6099 if (dflag
== MO_16
) {
6100 tcg_gen_andi_tl(s
->T0
, s
->T0
, 0xffffff);
6102 tcg_gen_st_tl(s
->T0
, tcg_env
, offsetof(CPUX86State
, idt
.base
));
6103 tcg_gen_st32_tl(s
->T1
, tcg_env
, offsetof(CPUX86State
, idt
.limit
));
6106 CASE_MODRM_OP(4): /* smsw */
6107 if (s
->flags
& HF_UMIP_MASK
&& !check_cpl0(s
)) {
6110 gen_svm_check_intercept(s
, SVM_EXIT_READ_CR0
);
6111 tcg_gen_ld_tl(s
->T0
, tcg_env
, offsetof(CPUX86State
, cr
[0]));
6113 * In 32-bit mode, the higher 16 bits of the destination
6114 * register are undefined. In practice CR0[31:0] is stored
6115 * just like in 64-bit mode.
6117 mod
= (modrm
>> 6) & 3;
6118 ot
= (mod
!= 3 ? MO_16
: s
->dflag
);
6119 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
6121 case 0xee: /* rdpkru */
6122 if (prefixes
& PREFIX_LOCK
) {
6125 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_ECX
]);
6126 gen_helper_rdpkru(s
->tmp1_i64
, tcg_env
, s
->tmp2_i32
);
6127 tcg_gen_extr_i64_tl(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
], s
->tmp1_i64
);
6129 case 0xef: /* wrpkru */
6130 if (prefixes
& PREFIX_LOCK
) {
6133 tcg_gen_concat_tl_i64(s
->tmp1_i64
, cpu_regs
[R_EAX
],
6135 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_ECX
]);
6136 gen_helper_wrpkru(tcg_env
, s
->tmp2_i32
, s
->tmp1_i64
);
6139 CASE_MODRM_OP(6): /* lmsw */
6140 if (!check_cpl0(s
)) {
6143 gen_svm_check_intercept(s
, SVM_EXIT_WRITE_CR0
);
6144 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
6146 * Only the 4 lower bits of CR0 are modified.
6147 * PE cannot be set to zero if already set to one.
6149 tcg_gen_ld_tl(s
->T1
, tcg_env
, offsetof(CPUX86State
, cr
[0]));
6150 tcg_gen_andi_tl(s
->T0
, s
->T0
, 0xf);
6151 tcg_gen_andi_tl(s
->T1
, s
->T1
, ~0xe);
6152 tcg_gen_or_tl(s
->T0
, s
->T0
, s
->T1
);
6153 gen_helper_write_crN(tcg_env
, tcg_constant_i32(0), s
->T0
);
6154 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
6157 CASE_MODRM_MEM_OP(7): /* invlpg */
6158 if (!check_cpl0(s
)) {
6161 gen_svm_check_intercept(s
, SVM_EXIT_INVLPG
);
6162 gen_lea_modrm(env
, s
, modrm
);
6163 gen_helper_flush_page(tcg_env
, s
->A0
);
6164 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
6167 case 0xf8: /* swapgs */
6168 #ifdef TARGET_X86_64
6170 if (check_cpl0(s
)) {
6171 tcg_gen_mov_tl(s
->T0
, cpu_seg_base
[R_GS
]);
6172 tcg_gen_ld_tl(cpu_seg_base
[R_GS
], tcg_env
,
6173 offsetof(CPUX86State
, kernelgsbase
));
6174 tcg_gen_st_tl(s
->T0
, tcg_env
,
6175 offsetof(CPUX86State
, kernelgsbase
));
6182 case 0xf9: /* rdtscp */
6183 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_RDTSCP
)) {
6186 gen_update_cc_op(s
);
6187 gen_update_eip_cur(s
);
6188 translator_io_start(&s
->base
);
6189 gen_helper_rdtsc(tcg_env
);
6190 gen_helper_rdpid(s
->T0
, tcg_env
);
6191 gen_op_mov_reg_v(s
, dflag
, R_ECX
, s
->T0
);
6199 case 0x108: /* invd */
6200 case 0x109: /* wbinvd; wbnoinvd with REPZ prefix */
6201 if (check_cpl0(s
)) {
6202 gen_svm_check_intercept(s
, (b
& 1) ? SVM_EXIT_WBINVD
: SVM_EXIT_INVD
);
6206 case 0x63: /* arpl or movslS (x86_64) */
6207 #ifdef TARGET_X86_64
6210 /* d_ot is the size of destination */
6213 modrm
= x86_ldub_code(env
, s
);
6214 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6215 mod
= (modrm
>> 6) & 3;
6216 rm
= (modrm
& 7) | REX_B(s
);
6219 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, rm
);
6221 if (d_ot
== MO_64
) {
6222 tcg_gen_ext32s_tl(s
->T0
, s
->T0
);
6224 gen_op_mov_reg_v(s
, d_ot
, reg
, s
->T0
);
6226 gen_lea_modrm(env
, s
, modrm
);
6227 gen_op_ld_v(s
, MO_32
| MO_SIGN
, s
->T0
, s
->A0
);
6228 gen_op_mov_reg_v(s
, d_ot
, reg
, s
->T0
);
6236 if (!PE(s
) || VM86(s
))
6238 t0
= tcg_temp_new();
6239 t1
= tcg_temp_new();
6240 t2
= tcg_temp_new();
6242 modrm
= x86_ldub_code(env
, s
);
6243 reg
= (modrm
>> 3) & 7;
6244 mod
= (modrm
>> 6) & 3;
6247 gen_lea_modrm(env
, s
, modrm
);
6248 gen_op_ld_v(s
, ot
, t0
, s
->A0
);
6250 gen_op_mov_v_reg(s
, ot
, t0
, rm
);
6252 gen_op_mov_v_reg(s
, ot
, t1
, reg
);
6253 tcg_gen_andi_tl(s
->tmp0
, t0
, 3);
6254 tcg_gen_andi_tl(t1
, t1
, 3);
6255 tcg_gen_movi_tl(t2
, 0);
6256 label1
= gen_new_label();
6257 tcg_gen_brcond_tl(TCG_COND_GE
, s
->tmp0
, t1
, label1
);
6258 tcg_gen_andi_tl(t0
, t0
, ~3);
6259 tcg_gen_or_tl(t0
, t0
, t1
);
6260 tcg_gen_movi_tl(t2
, CC_Z
);
6261 gen_set_label(label1
);
6263 gen_op_st_v(s
, ot
, t0
, s
->A0
);
6265 gen_op_mov_reg_v(s
, ot
, rm
, t0
);
6267 gen_compute_eflags(s
);
6268 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~CC_Z
);
6269 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t2
);
6272 case 0x102: /* lar */
6273 case 0x103: /* lsl */
6277 if (!PE(s
) || VM86(s
))
6279 ot
= dflag
!= MO_16
? MO_32
: MO_16
;
6280 modrm
= x86_ldub_code(env
, s
);
6281 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6282 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
6283 t0
= tcg_temp_new();
6284 gen_update_cc_op(s
);
6286 gen_helper_lar(t0
, tcg_env
, s
->T0
);
6288 gen_helper_lsl(t0
, tcg_env
, s
->T0
);
6290 tcg_gen_andi_tl(s
->tmp0
, cpu_cc_src
, CC_Z
);
6291 label1
= gen_new_label();
6292 tcg_gen_brcondi_tl(TCG_COND_EQ
, s
->tmp0
, 0, label1
);
6293 gen_op_mov_reg_v(s
, ot
, reg
, t0
);
6294 gen_set_label(label1
);
6295 set_cc_op(s
, CC_OP_EFLAGS
);
6299 modrm
= x86_ldub_code(env
, s
);
6300 mod
= (modrm
>> 6) & 3;
6301 op
= (modrm
>> 3) & 7;
6303 case 0: /* prefetchnta */
6304 case 1: /* prefetchnt0 */
6305 case 2: /* prefetchnt0 */
6306 case 3: /* prefetchnt0 */
6309 gen_nop_modrm(env
, s
, modrm
);
6310 /* nothing more to do */
6312 default: /* nop (multi byte) */
6313 gen_nop_modrm(env
, s
, modrm
);
6318 modrm
= x86_ldub_code(env
, s
);
6319 if (s
->flags
& HF_MPX_EN_MASK
) {
6320 mod
= (modrm
>> 6) & 3;
6321 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6322 if (prefixes
& PREFIX_REPZ
) {
6325 || (prefixes
& PREFIX_LOCK
)
6326 || s
->aflag
== MO_16
) {
6329 gen_bndck(env
, s
, modrm
, TCG_COND_LTU
, cpu_bndl
[reg
]);
6330 } else if (prefixes
& PREFIX_REPNZ
) {
6333 || (prefixes
& PREFIX_LOCK
)
6334 || s
->aflag
== MO_16
) {
6337 TCGv_i64 notu
= tcg_temp_new_i64();
6338 tcg_gen_not_i64(notu
, cpu_bndu
[reg
]);
6339 gen_bndck(env
, s
, modrm
, TCG_COND_GTU
, notu
);
6340 } else if (prefixes
& PREFIX_DATA
) {
6341 /* bndmov -- from reg/mem */
6342 if (reg
>= 4 || s
->aflag
== MO_16
) {
6346 int reg2
= (modrm
& 7) | REX_B(s
);
6347 if (reg2
>= 4 || (prefixes
& PREFIX_LOCK
)) {
6350 if (s
->flags
& HF_MPX_IU_MASK
) {
6351 tcg_gen_mov_i64(cpu_bndl
[reg
], cpu_bndl
[reg2
]);
6352 tcg_gen_mov_i64(cpu_bndu
[reg
], cpu_bndu
[reg2
]);
6355 gen_lea_modrm(env
, s
, modrm
);
6357 tcg_gen_qemu_ld_i64(cpu_bndl
[reg
], s
->A0
,
6358 s
->mem_index
, MO_LEUQ
);
6359 tcg_gen_addi_tl(s
->A0
, s
->A0
, 8);
6360 tcg_gen_qemu_ld_i64(cpu_bndu
[reg
], s
->A0
,
6361 s
->mem_index
, MO_LEUQ
);
6363 tcg_gen_qemu_ld_i64(cpu_bndl
[reg
], s
->A0
,
6364 s
->mem_index
, MO_LEUL
);
6365 tcg_gen_addi_tl(s
->A0
, s
->A0
, 4);
6366 tcg_gen_qemu_ld_i64(cpu_bndu
[reg
], s
->A0
,
6367 s
->mem_index
, MO_LEUL
);
6369 /* bnd registers are now in-use */
6370 gen_set_hflag(s
, HF_MPX_IU_MASK
);
6372 } else if (mod
!= 3) {
6374 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
6376 || (prefixes
& PREFIX_LOCK
)
6377 || s
->aflag
== MO_16
6382 tcg_gen_addi_tl(s
->A0
, cpu_regs
[a
.base
], a
.disp
);
6384 tcg_gen_movi_tl(s
->A0
, 0);
6386 gen_lea_v_seg(s
, s
->aflag
, s
->A0
, a
.def_seg
, s
->override
);
6388 tcg_gen_mov_tl(s
->T0
, cpu_regs
[a
.index
]);
6390 tcg_gen_movi_tl(s
->T0
, 0);
6393 gen_helper_bndldx64(cpu_bndl
[reg
], tcg_env
, s
->A0
, s
->T0
);
6394 tcg_gen_ld_i64(cpu_bndu
[reg
], tcg_env
,
6395 offsetof(CPUX86State
, mmx_t0
.MMX_Q(0)));
6397 gen_helper_bndldx32(cpu_bndu
[reg
], tcg_env
, s
->A0
, s
->T0
);
6398 tcg_gen_ext32u_i64(cpu_bndl
[reg
], cpu_bndu
[reg
]);
6399 tcg_gen_shri_i64(cpu_bndu
[reg
], cpu_bndu
[reg
], 32);
6401 gen_set_hflag(s
, HF_MPX_IU_MASK
);
6404 gen_nop_modrm(env
, s
, modrm
);
6407 modrm
= x86_ldub_code(env
, s
);
6408 if (s
->flags
& HF_MPX_EN_MASK
) {
6409 mod
= (modrm
>> 6) & 3;
6410 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6411 if (mod
!= 3 && (prefixes
& PREFIX_REPZ
)) {
6414 || (prefixes
& PREFIX_LOCK
)
6415 || s
->aflag
== MO_16
) {
6418 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
6420 tcg_gen_extu_tl_i64(cpu_bndl
[reg
], cpu_regs
[a
.base
]);
6422 tcg_gen_ext32u_i64(cpu_bndl
[reg
], cpu_bndl
[reg
]);
6424 } else if (a
.base
== -1) {
6425 /* no base register has lower bound of 0 */
6426 tcg_gen_movi_i64(cpu_bndl
[reg
], 0);
6428 /* rip-relative generates #ud */
6431 tcg_gen_not_tl(s
->A0
, gen_lea_modrm_1(s
, a
, false));
6433 tcg_gen_ext32u_tl(s
->A0
, s
->A0
);
6435 tcg_gen_extu_tl_i64(cpu_bndu
[reg
], s
->A0
);
6436 /* bnd registers are now in-use */
6437 gen_set_hflag(s
, HF_MPX_IU_MASK
);
6439 } else if (prefixes
& PREFIX_REPNZ
) {
6442 || (prefixes
& PREFIX_LOCK
)
6443 || s
->aflag
== MO_16
) {
6446 gen_bndck(env
, s
, modrm
, TCG_COND_GTU
, cpu_bndu
[reg
]);
6447 } else if (prefixes
& PREFIX_DATA
) {
6448 /* bndmov -- to reg/mem */
6449 if (reg
>= 4 || s
->aflag
== MO_16
) {
6453 int reg2
= (modrm
& 7) | REX_B(s
);
6454 if (reg2
>= 4 || (prefixes
& PREFIX_LOCK
)) {
6457 if (s
->flags
& HF_MPX_IU_MASK
) {
6458 tcg_gen_mov_i64(cpu_bndl
[reg2
], cpu_bndl
[reg
]);
6459 tcg_gen_mov_i64(cpu_bndu
[reg2
], cpu_bndu
[reg
]);
6462 gen_lea_modrm(env
, s
, modrm
);
6464 tcg_gen_qemu_st_i64(cpu_bndl
[reg
], s
->A0
,
6465 s
->mem_index
, MO_LEUQ
);
6466 tcg_gen_addi_tl(s
->A0
, s
->A0
, 8);
6467 tcg_gen_qemu_st_i64(cpu_bndu
[reg
], s
->A0
,
6468 s
->mem_index
, MO_LEUQ
);
6470 tcg_gen_qemu_st_i64(cpu_bndl
[reg
], s
->A0
,
6471 s
->mem_index
, MO_LEUL
);
6472 tcg_gen_addi_tl(s
->A0
, s
->A0
, 4);
6473 tcg_gen_qemu_st_i64(cpu_bndu
[reg
], s
->A0
,
6474 s
->mem_index
, MO_LEUL
);
6477 } else if (mod
!= 3) {
6479 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
6481 || (prefixes
& PREFIX_LOCK
)
6482 || s
->aflag
== MO_16
6487 tcg_gen_addi_tl(s
->A0
, cpu_regs
[a
.base
], a
.disp
);
6489 tcg_gen_movi_tl(s
->A0
, 0);
6491 gen_lea_v_seg(s
, s
->aflag
, s
->A0
, a
.def_seg
, s
->override
);
6493 tcg_gen_mov_tl(s
->T0
, cpu_regs
[a
.index
]);
6495 tcg_gen_movi_tl(s
->T0
, 0);
6498 gen_helper_bndstx64(tcg_env
, s
->A0
, s
->T0
,
6499 cpu_bndl
[reg
], cpu_bndu
[reg
]);
6501 gen_helper_bndstx32(tcg_env
, s
->A0
, s
->T0
,
6502 cpu_bndl
[reg
], cpu_bndu
[reg
]);
6506 gen_nop_modrm(env
, s
, modrm
);
6508 case 0x119: case 0x11c ... 0x11f: /* nop (multi byte) */
6509 modrm
= x86_ldub_code(env
, s
);
6510 gen_nop_modrm(env
, s
, modrm
);
6513 case 0x120: /* mov reg, crN */
6514 case 0x122: /* mov crN, reg */
6515 if (!check_cpl0(s
)) {
6518 modrm
= x86_ldub_code(env
, s
);
6520 * Ignore the mod bits (assume (modrm&0xc0)==0xc0).
6521 * AMD documentation (24594.pdf) and testing of Intel 386 and 486
6522 * processors all show that the mod bits are assumed to be 1's,
6523 * regardless of actual values.
6525 rm
= (modrm
& 7) | REX_B(s
);
6526 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6529 if ((prefixes
& PREFIX_LOCK
) &&
6530 (s
->cpuid_ext3_features
& CPUID_EXT3_CR8LEG
)) {
6542 ot
= (CODE64(s
) ? MO_64
: MO_32
);
6544 translator_io_start(&s
->base
);
6546 gen_svm_check_intercept(s
, SVM_EXIT_WRITE_CR0
+ reg
);
6547 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
6548 gen_helper_write_crN(tcg_env
, tcg_constant_i32(reg
), s
->T0
);
6549 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
6551 gen_svm_check_intercept(s
, SVM_EXIT_READ_CR0
+ reg
);
6552 gen_helper_read_crN(s
->T0
, tcg_env
, tcg_constant_i32(reg
));
6553 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
6557 case 0x121: /* mov reg, drN */
6558 case 0x123: /* mov drN, reg */
6559 if (check_cpl0(s
)) {
6560 modrm
= x86_ldub_code(env
, s
);
6561 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
6562 * AMD documentation (24594.pdf) and testing of
6563 * intel 386 and 486 processors all show that the mod bits
6564 * are assumed to be 1's, regardless of actual values.
6566 rm
= (modrm
& 7) | REX_B(s
);
6567 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6576 gen_svm_check_intercept(s
, SVM_EXIT_WRITE_DR0
+ reg
);
6577 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
6578 tcg_gen_movi_i32(s
->tmp2_i32
, reg
);
6579 gen_helper_set_dr(tcg_env
, s
->tmp2_i32
, s
->T0
);
6580 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
6582 gen_svm_check_intercept(s
, SVM_EXIT_READ_DR0
+ reg
);
6583 tcg_gen_movi_i32(s
->tmp2_i32
, reg
);
6584 gen_helper_get_dr(s
->T0
, tcg_env
, s
->tmp2_i32
);
6585 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
6589 case 0x106: /* clts */
6590 if (check_cpl0(s
)) {
6591 gen_svm_check_intercept(s
, SVM_EXIT_WRITE_CR0
);
6592 gen_helper_clts(tcg_env
);
6593 /* abort block because static cpu state changed */
6594 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
6597 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
6598 case 0x1c3: /* MOVNTI reg, mem */
6599 if (!(s
->cpuid_features
& CPUID_SSE2
))
6601 ot
= mo_64_32(dflag
);
6602 modrm
= x86_ldub_code(env
, s
);
6603 mod
= (modrm
>> 6) & 3;
6606 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6607 /* generate a generic store */
6608 gen_ldst_modrm(env
, s
, modrm
, ot
, reg
, 1);
6611 modrm
= x86_ldub_code(env
, s
);
6613 CASE_MODRM_MEM_OP(0): /* fxsave */
6614 if (!(s
->cpuid_features
& CPUID_FXSR
)
6615 || (prefixes
& PREFIX_LOCK
)) {
6618 if ((s
->flags
& HF_EM_MASK
) || (s
->flags
& HF_TS_MASK
)) {
6619 gen_exception(s
, EXCP07_PREX
);
6622 gen_lea_modrm(env
, s
, modrm
);
6623 gen_helper_fxsave(tcg_env
, s
->A0
);
6626 CASE_MODRM_MEM_OP(1): /* fxrstor */
6627 if (!(s
->cpuid_features
& CPUID_FXSR
)
6628 || (prefixes
& PREFIX_LOCK
)) {
6631 if ((s
->flags
& HF_EM_MASK
) || (s
->flags
& HF_TS_MASK
)) {
6632 gen_exception(s
, EXCP07_PREX
);
6635 gen_lea_modrm(env
, s
, modrm
);
6636 gen_helper_fxrstor(tcg_env
, s
->A0
);
6639 CASE_MODRM_MEM_OP(2): /* ldmxcsr */
6640 if ((s
->flags
& HF_EM_MASK
) || !(s
->flags
& HF_OSFXSR_MASK
)) {
6643 if (s
->flags
& HF_TS_MASK
) {
6644 gen_exception(s
, EXCP07_PREX
);
6647 gen_lea_modrm(env
, s
, modrm
);
6648 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
, s
->mem_index
, MO_LEUL
);
6649 gen_helper_ldmxcsr(tcg_env
, s
->tmp2_i32
);
6652 CASE_MODRM_MEM_OP(3): /* stmxcsr */
6653 if ((s
->flags
& HF_EM_MASK
) || !(s
->flags
& HF_OSFXSR_MASK
)) {
6656 if (s
->flags
& HF_TS_MASK
) {
6657 gen_exception(s
, EXCP07_PREX
);
6660 gen_helper_update_mxcsr(tcg_env
);
6661 gen_lea_modrm(env
, s
, modrm
);
6662 tcg_gen_ld32u_tl(s
->T0
, tcg_env
, offsetof(CPUX86State
, mxcsr
));
6663 gen_op_st_v(s
, MO_32
, s
->T0
, s
->A0
);
6666 CASE_MODRM_MEM_OP(4): /* xsave */
6667 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
6668 || (prefixes
& (PREFIX_LOCK
| PREFIX_DATA
6669 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
6672 gen_lea_modrm(env
, s
, modrm
);
6673 tcg_gen_concat_tl_i64(s
->tmp1_i64
, cpu_regs
[R_EAX
],
6675 gen_helper_xsave(tcg_env
, s
->A0
, s
->tmp1_i64
);
6678 CASE_MODRM_MEM_OP(5): /* xrstor */
6679 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
6680 || (prefixes
& (PREFIX_LOCK
| PREFIX_DATA
6681 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
6684 gen_lea_modrm(env
, s
, modrm
);
6685 tcg_gen_concat_tl_i64(s
->tmp1_i64
, cpu_regs
[R_EAX
],
6687 gen_helper_xrstor(tcg_env
, s
->A0
, s
->tmp1_i64
);
6688 /* XRSTOR is how MPX is enabled, which changes how
6689 we translate. Thus we need to end the TB. */
6690 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
6693 CASE_MODRM_MEM_OP(6): /* xsaveopt / clwb */
6694 if (prefixes
& PREFIX_LOCK
) {
6697 if (prefixes
& PREFIX_DATA
) {
6699 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_CLWB
)) {
6702 gen_nop_modrm(env
, s
, modrm
);
6705 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
6706 || (s
->cpuid_xsave_features
& CPUID_XSAVE_XSAVEOPT
) == 0
6707 || (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
))) {
6710 gen_lea_modrm(env
, s
, modrm
);
6711 tcg_gen_concat_tl_i64(s
->tmp1_i64
, cpu_regs
[R_EAX
],
6713 gen_helper_xsaveopt(tcg_env
, s
->A0
, s
->tmp1_i64
);
6717 CASE_MODRM_MEM_OP(7): /* clflush / clflushopt */
6718 if (prefixes
& PREFIX_LOCK
) {
6721 if (prefixes
& PREFIX_DATA
) {
6723 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_CLFLUSHOPT
)) {
6728 if ((s
->prefix
& (PREFIX_REPZ
| PREFIX_REPNZ
))
6729 || !(s
->cpuid_features
& CPUID_CLFLUSH
)) {
6733 gen_nop_modrm(env
, s
, modrm
);
6736 case 0xc0 ... 0xc7: /* rdfsbase (f3 0f ae /0) */
6737 case 0xc8 ... 0xcf: /* rdgsbase (f3 0f ae /1) */
6738 case 0xd0 ... 0xd7: /* wrfsbase (f3 0f ae /2) */
6739 case 0xd8 ... 0xdf: /* wrgsbase (f3 0f ae /3) */
6741 && (prefixes
& PREFIX_REPZ
)
6742 && !(prefixes
& PREFIX_LOCK
)
6743 && (s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_FSGSBASE
)) {
6744 TCGv base
, treg
, src
, dst
;
6746 /* Preserve hflags bits by testing CR4 at runtime. */
6747 tcg_gen_movi_i32(s
->tmp2_i32
, CR4_FSGSBASE_MASK
);
6748 gen_helper_cr4_testbit(tcg_env
, s
->tmp2_i32
);
6750 base
= cpu_seg_base
[modrm
& 8 ? R_GS
: R_FS
];
6751 treg
= cpu_regs
[(modrm
& 7) | REX_B(s
)];
6755 dst
= base
, src
= treg
;
6758 dst
= treg
, src
= base
;
6761 if (s
->dflag
== MO_32
) {
6762 tcg_gen_ext32u_tl(dst
, src
);
6764 tcg_gen_mov_tl(dst
, src
);
6770 case 0xf8: /* sfence / pcommit */
6771 if (prefixes
& PREFIX_DATA
) {
6773 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_PCOMMIT
)
6774 || (prefixes
& PREFIX_LOCK
)) {
6780 case 0xf9 ... 0xff: /* sfence */
6781 if (!(s
->cpuid_features
& CPUID_SSE
)
6782 || (prefixes
& PREFIX_LOCK
)) {
6785 tcg_gen_mb(TCG_MO_ST_ST
| TCG_BAR_SC
);
6787 case 0xe8 ... 0xef: /* lfence */
6788 if (!(s
->cpuid_features
& CPUID_SSE
)
6789 || (prefixes
& PREFIX_LOCK
)) {
6792 tcg_gen_mb(TCG_MO_LD_LD
| TCG_BAR_SC
);
6794 case 0xf0 ... 0xf7: /* mfence */
6795 if (!(s
->cpuid_features
& CPUID_SSE2
)
6796 || (prefixes
& PREFIX_LOCK
)) {
6799 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
6807 case 0x10d: /* 3DNow! prefetch(w) */
6808 modrm
= x86_ldub_code(env
, s
);
6809 mod
= (modrm
>> 6) & 3;
6812 gen_nop_modrm(env
, s
, modrm
);
6814 case 0x1aa: /* rsm */
6815 gen_svm_check_intercept(s
, SVM_EXIT_RSM
);
6816 if (!(s
->flags
& HF_SMM_MASK
))
6818 #ifdef CONFIG_USER_ONLY
6819 /* we should not be in SMM mode */
6820 g_assert_not_reached();
6822 gen_update_cc_op(s
);
6823 gen_update_eip_next(s
);
6824 gen_helper_rsm(tcg_env
);
6825 #endif /* CONFIG_USER_ONLY */
6826 s
->base
.is_jmp
= DISAS_EOB_ONLY
;
6828 case 0x1b8: /* SSE4.2 popcnt */
6829 if ((prefixes
& (PREFIX_REPZ
| PREFIX_LOCK
| PREFIX_REPNZ
)) !=
6832 if (!(s
->cpuid_ext_features
& CPUID_EXT_POPCNT
))
6835 modrm
= x86_ldub_code(env
, s
);
6836 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6838 if (s
->prefix
& PREFIX_DATA
) {
6841 ot
= mo_64_32(dflag
);
6844 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
6845 gen_extu(ot
, s
->T0
);
6846 tcg_gen_mov_tl(cpu_cc_src
, s
->T0
);
6847 tcg_gen_ctpop_tl(s
->T0
, s
->T0
);
6848 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
6850 set_cc_op(s
, CC_OP_POPCNT
);
6852 case 0x10e ... 0x117:
6853 case 0x128 ... 0x12f:
6854 case 0x138 ... 0x13a:
6855 case 0x150 ... 0x179:
6856 case 0x17c ... 0x17f:
6858 case 0x1c4 ... 0x1c6:
6859 case 0x1d0 ... 0x1fe:
6860 disas_insn_new(s
, cpu
, b
);
6867 gen_illegal_opcode(s
);
6870 gen_unknown_opcode(env
, s
);
6874 void tcg_x86_init(void)
6876 static const char reg_names
[CPU_NB_REGS
][4] = {
6877 #ifdef TARGET_X86_64
6905 static const char eip_name
[] = {
6906 #ifdef TARGET_X86_64
6912 static const char seg_base_names
[6][8] = {
6920 static const char bnd_regl_names
[4][8] = {
6921 "bnd0_lb", "bnd1_lb", "bnd2_lb", "bnd3_lb"
6923 static const char bnd_regu_names
[4][8] = {
6924 "bnd0_ub", "bnd1_ub", "bnd2_ub", "bnd3_ub"
6928 cpu_cc_op
= tcg_global_mem_new_i32(tcg_env
,
6929 offsetof(CPUX86State
, cc_op
), "cc_op");
6930 cpu_cc_dst
= tcg_global_mem_new(tcg_env
, offsetof(CPUX86State
, cc_dst
),
6932 cpu_cc_src
= tcg_global_mem_new(tcg_env
, offsetof(CPUX86State
, cc_src
),
6934 cpu_cc_src2
= tcg_global_mem_new(tcg_env
, offsetof(CPUX86State
, cc_src2
),
6936 cpu_eip
= tcg_global_mem_new(tcg_env
, offsetof(CPUX86State
, eip
), eip_name
);
6938 for (i
= 0; i
< CPU_NB_REGS
; ++i
) {
6939 cpu_regs
[i
] = tcg_global_mem_new(tcg_env
,
6940 offsetof(CPUX86State
, regs
[i
]),
6944 for (i
= 0; i
< 6; ++i
) {
6946 = tcg_global_mem_new(tcg_env
,
6947 offsetof(CPUX86State
, segs
[i
].base
),
6951 for (i
= 0; i
< 4; ++i
) {
6953 = tcg_global_mem_new_i64(tcg_env
,
6954 offsetof(CPUX86State
, bnd_regs
[i
].lb
),
6957 = tcg_global_mem_new_i64(tcg_env
,
6958 offsetof(CPUX86State
, bnd_regs
[i
].ub
),
6963 static void i386_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cpu
)
6965 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6966 CPUX86State
*env
= cpu_env(cpu
);
6967 uint32_t flags
= dc
->base
.tb
->flags
;
6968 uint32_t cflags
= tb_cflags(dc
->base
.tb
);
6969 int cpl
= (flags
>> HF_CPL_SHIFT
) & 3;
6970 int iopl
= (flags
>> IOPL_SHIFT
) & 3;
6972 dc
->cs_base
= dc
->base
.tb
->cs_base
;
6973 dc
->pc_save
= dc
->base
.pc_next
;
6975 #ifndef CONFIG_USER_ONLY
6980 /* We make some simplifying assumptions; validate they're correct. */
6981 g_assert(PE(dc
) == ((flags
& HF_PE_MASK
) != 0));
6982 g_assert(CPL(dc
) == cpl
);
6983 g_assert(IOPL(dc
) == iopl
);
6984 g_assert(VM86(dc
) == ((flags
& HF_VM_MASK
) != 0));
6985 g_assert(CODE32(dc
) == ((flags
& HF_CS32_MASK
) != 0));
6986 g_assert(CODE64(dc
) == ((flags
& HF_CS64_MASK
) != 0));
6987 g_assert(SS32(dc
) == ((flags
& HF_SS32_MASK
) != 0));
6988 g_assert(LMA(dc
) == ((flags
& HF_LMA_MASK
) != 0));
6989 g_assert(ADDSEG(dc
) == ((flags
& HF_ADDSEG_MASK
) != 0));
6990 g_assert(SVME(dc
) == ((flags
& HF_SVME_MASK
) != 0));
6991 g_assert(GUEST(dc
) == ((flags
& HF_GUEST_MASK
) != 0));
6993 dc
->cc_op
= CC_OP_DYNAMIC
;
6994 dc
->cc_op_dirty
= false;
6995 dc
->popl_esp_hack
= 0;
6996 /* select memory access functions */
6997 dc
->mem_index
= cpu_mmu_index(cpu
, false);
6998 dc
->cpuid_features
= env
->features
[FEAT_1_EDX
];
6999 dc
->cpuid_ext_features
= env
->features
[FEAT_1_ECX
];
7000 dc
->cpuid_ext2_features
= env
->features
[FEAT_8000_0001_EDX
];
7001 dc
->cpuid_ext3_features
= env
->features
[FEAT_8000_0001_ECX
];
7002 dc
->cpuid_7_0_ebx_features
= env
->features
[FEAT_7_0_EBX
];
7003 dc
->cpuid_7_0_ecx_features
= env
->features
[FEAT_7_0_ECX
];
7004 dc
->cpuid_7_1_eax_features
= env
->features
[FEAT_7_1_EAX
];
7005 dc
->cpuid_xsave_features
= env
->features
[FEAT_XSAVE
];
7006 dc
->jmp_opt
= !((cflags
& CF_NO_GOTO_TB
) ||
7007 (flags
& (HF_TF_MASK
| HF_INHIBIT_IRQ_MASK
)));
7009 * If jmp_opt, we want to handle each string instruction individually.
7010 * For icount also disable repz optimization so that each iteration
7011 * is accounted separately.
7013 dc
->repz_opt
= !dc
->jmp_opt
&& !(cflags
& CF_USE_ICOUNT
);
7015 dc
->T0
= tcg_temp_new();
7016 dc
->T1
= tcg_temp_new();
7017 dc
->A0
= tcg_temp_new();
7019 dc
->tmp0
= tcg_temp_new();
7020 dc
->tmp1_i64
= tcg_temp_new_i64();
7021 dc
->tmp2_i32
= tcg_temp_new_i32();
7022 dc
->tmp3_i32
= tcg_temp_new_i32();
7023 dc
->tmp4
= tcg_temp_new();
7024 dc
->cc_srcT
= tcg_temp_new();
7027 static void i386_tr_tb_start(DisasContextBase
*db
, CPUState
*cpu
)
7031 static void i386_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cpu
)
7033 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
7034 target_ulong pc_arg
= dc
->base
.pc_next
;
7036 dc
->prev_insn_start
= dc
->base
.insn_start
;
7037 dc
->prev_insn_end
= tcg_last_op();
7038 if (tb_cflags(dcbase
->tb
) & CF_PCREL
) {
7039 pc_arg
&= ~TARGET_PAGE_MASK
;
7041 tcg_gen_insn_start(pc_arg
, dc
->cc_op
);
7044 static void i386_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cpu
)
7046 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
7048 #ifdef TARGET_VSYSCALL_PAGE
7050 * Detect entry into the vsyscall page and invoke the syscall.
7052 if ((dc
->base
.pc_next
& TARGET_PAGE_MASK
) == TARGET_VSYSCALL_PAGE
) {
7053 gen_exception(dc
, EXCP_VSYSCALL
);
7054 dc
->base
.pc_next
= dc
->pc
+ 1;
7059 if (disas_insn(dc
, cpu
)) {
7060 target_ulong pc_next
= dc
->pc
;
7061 dc
->base
.pc_next
= pc_next
;
7063 if (dc
->base
.is_jmp
== DISAS_NEXT
) {
7064 if (dc
->flags
& (HF_TF_MASK
| HF_INHIBIT_IRQ_MASK
)) {
7066 * If single step mode, we generate only one instruction and
7067 * generate an exception.
7068 * If irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
7069 * the flag and abort the translation to give the irqs a
7072 dc
->base
.is_jmp
= DISAS_EOB_NEXT
;
7073 } else if (!is_same_page(&dc
->base
, pc_next
)) {
7074 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
7080 static void i386_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cpu
)
7082 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
7084 switch (dc
->base
.is_jmp
) {
7085 case DISAS_NORETURN
:
7087 case DISAS_TOO_MANY
:
7088 gen_update_cc_op(dc
);
7089 gen_jmp_rel_csize(dc
, 0, 0);
7091 case DISAS_EOB_NEXT
:
7092 gen_update_cc_op(dc
);
7093 gen_update_eip_cur(dc
);
7095 case DISAS_EOB_ONLY
:
7098 case DISAS_EOB_INHIBIT_IRQ
:
7099 gen_update_cc_op(dc
);
7100 gen_update_eip_cur(dc
);
7101 gen_eob_inhibit_irq(dc
);
7107 g_assert_not_reached();
7111 static void i386_tr_disas_log(const DisasContextBase
*dcbase
,
7112 CPUState
*cpu
, FILE *logfile
)
7114 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
7116 fprintf(logfile
, "IN: %s\n", lookup_symbol(dc
->base
.pc_first
));
7117 target_disas(logfile
, cpu
, dc
->base
.pc_first
, dc
->base
.tb
->size
);
7120 static const TranslatorOps i386_tr_ops
= {
7121 .init_disas_context
= i386_tr_init_disas_context
,
7122 .tb_start
= i386_tr_tb_start
,
7123 .insn_start
= i386_tr_insn_start
,
7124 .translate_insn
= i386_tr_translate_insn
,
7125 .tb_stop
= i386_tr_tb_stop
,
7126 .disas_log
= i386_tr_disas_log
,
7129 /* generate intermediate code for basic block 'tb'. */
7130 void gen_intermediate_code(CPUState
*cpu
, TranslationBlock
*tb
, int *max_insns
,
7131 vaddr pc
, void *host_pc
)
7135 translator_loop(cpu
, tb
, max_insns
, pc
, host_pc
, &i386_tr_ops
, &dc
.base
);