4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include "qemu/host-utils.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "tcg/tcg-op-gvec.h"
27 #include "exec/cpu_ldst.h"
28 #include "exec/translator.h"
29 #include "fpu/softfloat.h"
31 #include "exec/helper-proto.h"
32 #include "exec/helper-gen.h"
33 #include "helper-tcg.h"
37 #define PREFIX_REPZ 0x01
38 #define PREFIX_REPNZ 0x02
39 #define PREFIX_LOCK 0x04
40 #define PREFIX_DATA 0x08
41 #define PREFIX_ADR 0x10
42 #define PREFIX_VEX 0x20
43 #define PREFIX_REX 0x40
53 /* For a switch indexed by MODRM, match all memory operands for a given OP. */
54 #define CASE_MODRM_MEM_OP(OP) \
55 case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
56 case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
57 case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7
59 #define CASE_MODRM_OP(OP) \
60 case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
61 case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
62 case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7: \
63 case (3 << 6) | (OP << 3) | 0 ... (3 << 6) | (OP << 3) | 7
65 //#define MACRO_TEST 1
67 /* global register indexes */
68 static TCGv cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
;
70 static TCGv_i32 cpu_cc_op
;
71 static TCGv cpu_regs
[CPU_NB_REGS
];
72 static TCGv cpu_seg_base
[6];
73 static TCGv_i64 cpu_bndl
[4];
74 static TCGv_i64 cpu_bndu
[4];
76 #include "exec/gen-icount.h"
78 typedef struct DisasContext
{
79 DisasContextBase base
;
81 target_ulong pc
; /* pc = eip + cs_base */
82 target_ulong cs_base
; /* base of CS segment */
88 int8_t override
; /* -1 if no override, else R_CS, R_DS, etc */
94 #ifndef CONFIG_USER_ONLY
95 uint8_t cpl
; /* code priv level */
96 uint8_t iopl
; /* i/o priv level */
98 uint8_t vex_l
; /* vex vector length */
99 uint8_t vex_v
; /* vex vvvv register, without 1's complement. */
100 uint8_t popl_esp_hack
; /* for correct popl with esp base handling */
101 uint8_t rip_offset
; /* only used in x86_64, but left for simplicity */
108 bool vex_w
; /* used by AVX even on 32-bit processors */
109 bool jmp_opt
; /* use direct block chaining for direct jumps */
110 bool repz_opt
; /* optimize jumps within repz instructions */
113 CCOp cc_op
; /* current CC operation */
114 int mem_index
; /* select memory access functions */
115 uint32_t flags
; /* all execution flags */
117 int cpuid_ext_features
;
118 int cpuid_ext2_features
;
119 int cpuid_ext3_features
;
120 int cpuid_7_0_ebx_features
;
121 int cpuid_7_0_ecx_features
;
122 int cpuid_xsave_features
;
124 /* TCG local temps */
130 /* TCG local register indexes (only used inside old micro ops) */
138 TCGOp
*prev_insn_end
;
141 #define DISAS_EOB_ONLY DISAS_TARGET_0
142 #define DISAS_EOB_NEXT DISAS_TARGET_1
143 #define DISAS_EOB_INHIBIT_IRQ DISAS_TARGET_2
144 #define DISAS_JUMP DISAS_TARGET_3
146 /* The environment in which user-only runs is constrained. */
147 #ifdef CONFIG_USER_ONLY
151 #define SVME(S) false
152 #define GUEST(S) false
154 #define PE(S) (((S)->flags & HF_PE_MASK) != 0)
155 #define CPL(S) ((S)->cpl)
156 #define IOPL(S) ((S)->iopl)
157 #define SVME(S) (((S)->flags & HF_SVME_MASK) != 0)
158 #define GUEST(S) (((S)->flags & HF_GUEST_MASK) != 0)
160 #if defined(CONFIG_USER_ONLY) && defined(TARGET_X86_64)
161 #define VM86(S) false
162 #define CODE32(S) true
164 #define ADDSEG(S) false
166 #define VM86(S) (((S)->flags & HF_VM_MASK) != 0)
167 #define CODE32(S) (((S)->flags & HF_CS32_MASK) != 0)
168 #define SS32(S) (((S)->flags & HF_SS32_MASK) != 0)
169 #define ADDSEG(S) (((S)->flags & HF_ADDSEG_MASK) != 0)
171 #if !defined(TARGET_X86_64)
172 #define CODE64(S) false
174 #elif defined(CONFIG_USER_ONLY)
175 #define CODE64(S) true
178 #define CODE64(S) (((S)->flags & HF_CS64_MASK) != 0)
179 #define LMA(S) (((S)->flags & HF_LMA_MASK) != 0)
183 #define REX_PREFIX(S) (((S)->prefix & PREFIX_REX) != 0)
184 #define REX_W(S) ((S)->vex_w)
185 #define REX_R(S) ((S)->rex_r + 0)
186 #define REX_X(S) ((S)->rex_x + 0)
187 #define REX_B(S) ((S)->rex_b + 0)
189 #define REX_PREFIX(S) false
190 #define REX_W(S) false
197 * Many sysemu-only helpers are not reachable for user-only.
198 * Define stub generators here, so that we need not either sprinkle
199 * ifdefs through the translator, nor provide the helper function.
201 #define STUB_HELPER(NAME, ...) \
202 static inline void gen_helper_##NAME(__VA_ARGS__) \
203 { qemu_build_not_reached(); }
205 #ifdef CONFIG_USER_ONLY
206 STUB_HELPER(clgi
, TCGv_env env
)
207 STUB_HELPER(flush_page
, TCGv_env env
, TCGv addr
)
208 STUB_HELPER(hlt
, TCGv_env env
, TCGv_i32 pc_ofs
)
209 STUB_HELPER(inb
, TCGv ret
, TCGv_env env
, TCGv_i32 port
)
210 STUB_HELPER(inw
, TCGv ret
, TCGv_env env
, TCGv_i32 port
)
211 STUB_HELPER(inl
, TCGv ret
, TCGv_env env
, TCGv_i32 port
)
212 STUB_HELPER(monitor
, TCGv_env env
, TCGv addr
)
213 STUB_HELPER(mwait
, TCGv_env env
, TCGv_i32 pc_ofs
)
214 STUB_HELPER(outb
, TCGv_env env
, TCGv_i32 port
, TCGv_i32 val
)
215 STUB_HELPER(outw
, TCGv_env env
, TCGv_i32 port
, TCGv_i32 val
)
216 STUB_HELPER(outl
, TCGv_env env
, TCGv_i32 port
, TCGv_i32 val
)
217 STUB_HELPER(rdmsr
, TCGv_env env
)
218 STUB_HELPER(read_crN
, TCGv ret
, TCGv_env env
, TCGv_i32 reg
)
219 STUB_HELPER(get_dr
, TCGv ret
, TCGv_env env
, TCGv_i32 reg
)
220 STUB_HELPER(set_dr
, TCGv_env env
, TCGv_i32 reg
, TCGv val
)
221 STUB_HELPER(stgi
, TCGv_env env
)
222 STUB_HELPER(svm_check_intercept
, TCGv_env env
, TCGv_i32 type
)
223 STUB_HELPER(vmload
, TCGv_env env
, TCGv_i32 aflag
)
224 STUB_HELPER(vmmcall
, TCGv_env env
)
225 STUB_HELPER(vmrun
, TCGv_env env
, TCGv_i32 aflag
, TCGv_i32 pc_ofs
)
226 STUB_HELPER(vmsave
, TCGv_env env
, TCGv_i32 aflag
)
227 STUB_HELPER(write_crN
, TCGv_env env
, TCGv_i32 reg
, TCGv val
)
228 STUB_HELPER(wrmsr
, TCGv_env env
)
231 static void gen_eob(DisasContext
*s
);
232 static void gen_jr(DisasContext
*s
);
233 static void gen_jmp_rel(DisasContext
*s
, MemOp ot
, int diff
, int tb_num
);
234 static void gen_jmp_rel_csize(DisasContext
*s
, int diff
, int tb_num
);
235 static void gen_op(DisasContext
*s1
, int op
, MemOp ot
, int d
);
236 static void gen_exception_gpf(DisasContext
*s
);
238 /* i386 arith/logic operations */
258 OP_SHL1
, /* undocumented */
274 /* I386 int registers */
275 OR_EAX
, /* MUST be even numbered */
284 OR_TMP0
= 16, /* temporary operand register */
286 OR_A0
, /* temporary register used when doing address evaluation */
296 /* Bit set if the global variable is live after setting CC_OP to X. */
297 static const uint8_t cc_op_live
[CC_OP_NB
] = {
298 [CC_OP_DYNAMIC
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
299 [CC_OP_EFLAGS
] = USES_CC_SRC
,
300 [CC_OP_MULB
... CC_OP_MULQ
] = USES_CC_DST
| USES_CC_SRC
,
301 [CC_OP_ADDB
... CC_OP_ADDQ
] = USES_CC_DST
| USES_CC_SRC
,
302 [CC_OP_ADCB
... CC_OP_ADCQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
303 [CC_OP_SUBB
... CC_OP_SUBQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRCT
,
304 [CC_OP_SBBB
... CC_OP_SBBQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
305 [CC_OP_LOGICB
... CC_OP_LOGICQ
] = USES_CC_DST
,
306 [CC_OP_INCB
... CC_OP_INCQ
] = USES_CC_DST
| USES_CC_SRC
,
307 [CC_OP_DECB
... CC_OP_DECQ
] = USES_CC_DST
| USES_CC_SRC
,
308 [CC_OP_SHLB
... CC_OP_SHLQ
] = USES_CC_DST
| USES_CC_SRC
,
309 [CC_OP_SARB
... CC_OP_SARQ
] = USES_CC_DST
| USES_CC_SRC
,
310 [CC_OP_BMILGB
... CC_OP_BMILGQ
] = USES_CC_DST
| USES_CC_SRC
,
311 [CC_OP_ADCX
] = USES_CC_DST
| USES_CC_SRC
,
312 [CC_OP_ADOX
] = USES_CC_SRC
| USES_CC_SRC2
,
313 [CC_OP_ADCOX
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
315 [CC_OP_POPCNT
] = USES_CC_SRC
,
318 static void set_cc_op(DisasContext
*s
, CCOp op
)
322 if (s
->cc_op
== op
) {
326 /* Discard CC computation that will no longer be used. */
327 dead
= cc_op_live
[s
->cc_op
] & ~cc_op_live
[op
];
328 if (dead
& USES_CC_DST
) {
329 tcg_gen_discard_tl(cpu_cc_dst
);
331 if (dead
& USES_CC_SRC
) {
332 tcg_gen_discard_tl(cpu_cc_src
);
334 if (dead
& USES_CC_SRC2
) {
335 tcg_gen_discard_tl(cpu_cc_src2
);
337 if (dead
& USES_CC_SRCT
) {
338 tcg_gen_discard_tl(s
->cc_srcT
);
341 if (op
== CC_OP_DYNAMIC
) {
342 /* The DYNAMIC setting is translator only, and should never be
343 stored. Thus we always consider it clean. */
344 s
->cc_op_dirty
= false;
346 /* Discard any computed CC_OP value (see shifts). */
347 if (s
->cc_op
== CC_OP_DYNAMIC
) {
348 tcg_gen_discard_i32(cpu_cc_op
);
350 s
->cc_op_dirty
= true;
355 static void gen_update_cc_op(DisasContext
*s
)
357 if (s
->cc_op_dirty
) {
358 tcg_gen_movi_i32(cpu_cc_op
, s
->cc_op
);
359 s
->cc_op_dirty
= false;
365 #define NB_OP_SIZES 4
367 #else /* !TARGET_X86_64 */
369 #define NB_OP_SIZES 3
371 #endif /* !TARGET_X86_64 */
374 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
375 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
376 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
377 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
378 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
380 #define REG_B_OFFSET 0
381 #define REG_H_OFFSET 1
382 #define REG_W_OFFSET 0
383 #define REG_L_OFFSET 0
384 #define REG_LH_OFFSET 4
387 /* In instruction encodings for byte register accesses the
388 * register number usually indicates "low 8 bits of register N";
389 * however there are some special cases where N 4..7 indicates
390 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
391 * true for this special case, false otherwise.
393 static inline bool byte_reg_is_xH(DisasContext
*s
, int reg
)
395 /* Any time the REX prefix is present, byte registers are uniform */
396 if (reg
< 4 || REX_PREFIX(s
)) {
402 /* Select the size of a push/pop operation. */
403 static inline MemOp
mo_pushpop(DisasContext
*s
, MemOp ot
)
406 return ot
== MO_16
? MO_16
: MO_64
;
412 /* Select the size of the stack pointer. */
413 static inline MemOp
mo_stacksize(DisasContext
*s
)
415 return CODE64(s
) ? MO_64
: SS32(s
) ? MO_32
: MO_16
;
418 /* Select only size 64 else 32. Used for SSE operand sizes. */
419 static inline MemOp
mo_64_32(MemOp ot
)
422 return ot
== MO_64
? MO_64
: MO_32
;
428 /* Select size 8 if lsb of B is clear, else OT. Used for decoding
429 byte vs word opcodes. */
430 static inline MemOp
mo_b_d(int b
, MemOp ot
)
432 return b
& 1 ? ot
: MO_8
;
435 /* Select size 8 if lsb of B is clear, else OT capped at 32.
436 Used for decoding operand size of port opcodes. */
437 static inline MemOp
mo_b_d32(int b
, MemOp ot
)
439 return b
& 1 ? (ot
== MO_16
? MO_16
: MO_32
) : MO_8
;
442 /* Compute the result of writing t0 to the OT-sized register REG.
444 * If DEST is NULL, store the result into the register and return the
447 * If DEST is not NULL, store the result into DEST and return the
450 static TCGv
gen_op_deposit_reg_v(DisasContext
*s
, MemOp ot
, int reg
, TCGv dest
, TCGv t0
)
454 if (byte_reg_is_xH(s
, reg
)) {
455 dest
= dest
? dest
: cpu_regs
[reg
- 4];
456 tcg_gen_deposit_tl(dest
, cpu_regs
[reg
- 4], t0
, 8, 8);
457 return cpu_regs
[reg
- 4];
459 dest
= dest
? dest
: cpu_regs
[reg
];
460 tcg_gen_deposit_tl(dest
, cpu_regs
[reg
], t0
, 0, 8);
463 dest
= dest
? dest
: cpu_regs
[reg
];
464 tcg_gen_deposit_tl(dest
, cpu_regs
[reg
], t0
, 0, 16);
467 /* For x86_64, this sets the higher half of register to zero.
468 For i386, this is equivalent to a mov. */
469 dest
= dest
? dest
: cpu_regs
[reg
];
470 tcg_gen_ext32u_tl(dest
, t0
);
474 dest
= dest
? dest
: cpu_regs
[reg
];
475 tcg_gen_mov_tl(dest
, t0
);
481 return cpu_regs
[reg
];
484 static void gen_op_mov_reg_v(DisasContext
*s
, MemOp ot
, int reg
, TCGv t0
)
486 gen_op_deposit_reg_v(s
, ot
, reg
, NULL
, t0
);
490 void gen_op_mov_v_reg(DisasContext
*s
, MemOp ot
, TCGv t0
, int reg
)
492 if (ot
== MO_8
&& byte_reg_is_xH(s
, reg
)) {
493 tcg_gen_extract_tl(t0
, cpu_regs
[reg
- 4], 8, 8);
495 tcg_gen_mov_tl(t0
, cpu_regs
[reg
]);
499 static void gen_add_A0_im(DisasContext
*s
, int val
)
501 tcg_gen_addi_tl(s
->A0
, s
->A0
, val
);
503 tcg_gen_ext32u_tl(s
->A0
, s
->A0
);
507 static inline void gen_op_jmp_v(DisasContext
*s
, TCGv dest
)
509 tcg_gen_mov_tl(cpu_eip
, dest
);
514 void gen_op_add_reg_im(DisasContext
*s
, MemOp size
, int reg
, int32_t val
)
516 tcg_gen_addi_tl(s
->tmp0
, cpu_regs
[reg
], val
);
517 gen_op_mov_reg_v(s
, size
, reg
, s
->tmp0
);
520 static inline void gen_op_add_reg_T0(DisasContext
*s
, MemOp size
, int reg
)
522 tcg_gen_add_tl(s
->tmp0
, cpu_regs
[reg
], s
->T0
);
523 gen_op_mov_reg_v(s
, size
, reg
, s
->tmp0
);
526 static inline void gen_op_ld_v(DisasContext
*s
, int idx
, TCGv t0
, TCGv a0
)
528 tcg_gen_qemu_ld_tl(t0
, a0
, s
->mem_index
, idx
| MO_LE
);
531 static inline void gen_op_st_v(DisasContext
*s
, int idx
, TCGv t0
, TCGv a0
)
533 tcg_gen_qemu_st_tl(t0
, a0
, s
->mem_index
, idx
| MO_LE
);
536 static inline void gen_op_st_rm_T0_A0(DisasContext
*s
, int idx
, int d
)
539 gen_op_st_v(s
, idx
, s
->T0
, s
->A0
);
541 gen_op_mov_reg_v(s
, idx
, d
, s
->T0
);
545 static void gen_update_eip_cur(DisasContext
*s
)
547 assert(s
->pc_save
!= -1);
548 if (tb_cflags(s
->base
.tb
) & CF_PCREL
) {
549 tcg_gen_addi_tl(cpu_eip
, cpu_eip
, s
->base
.pc_next
- s
->pc_save
);
551 tcg_gen_movi_tl(cpu_eip
, s
->base
.pc_next
- s
->cs_base
);
553 s
->pc_save
= s
->base
.pc_next
;
556 static void gen_update_eip_next(DisasContext
*s
)
558 assert(s
->pc_save
!= -1);
559 if (tb_cflags(s
->base
.tb
) & CF_PCREL
) {
560 tcg_gen_addi_tl(cpu_eip
, cpu_eip
, s
->pc
- s
->pc_save
);
562 tcg_gen_movi_tl(cpu_eip
, s
->pc
- s
->cs_base
);
567 static int cur_insn_len(DisasContext
*s
)
569 return s
->pc
- s
->base
.pc_next
;
572 static TCGv_i32
cur_insn_len_i32(DisasContext
*s
)
574 return tcg_constant_i32(cur_insn_len(s
));
577 static TCGv_i32
eip_next_i32(DisasContext
*s
)
579 assert(s
->pc_save
!= -1);
581 * This function has two users: lcall_real (always 16-bit mode), and
582 * iret_protected (16, 32, or 64-bit mode). IRET only uses the value
583 * when EFLAGS.NT is set, which is illegal in 64-bit mode, which is
584 * why passing a 32-bit value isn't broken. To avoid using this where
585 * we shouldn't, return -1 in 64-bit mode so that execution goes into
589 return tcg_constant_i32(-1);
591 if (tb_cflags(s
->base
.tb
) & CF_PCREL
) {
592 TCGv_i32 ret
= tcg_temp_new_i32();
593 tcg_gen_trunc_tl_i32(ret
, cpu_eip
);
594 tcg_gen_addi_i32(ret
, ret
, s
->pc
- s
->pc_save
);
597 return tcg_constant_i32(s
->pc
- s
->cs_base
);
601 static TCGv
eip_next_tl(DisasContext
*s
)
603 assert(s
->pc_save
!= -1);
604 if (tb_cflags(s
->base
.tb
) & CF_PCREL
) {
605 TCGv ret
= tcg_temp_new();
606 tcg_gen_addi_tl(ret
, cpu_eip
, s
->pc
- s
->pc_save
);
609 return tcg_constant_tl(s
->pc
- s
->cs_base
);
613 static TCGv
eip_cur_tl(DisasContext
*s
)
615 assert(s
->pc_save
!= -1);
616 if (tb_cflags(s
->base
.tb
) & CF_PCREL
) {
617 TCGv ret
= tcg_temp_new();
618 tcg_gen_addi_tl(ret
, cpu_eip
, s
->base
.pc_next
- s
->pc_save
);
621 return tcg_constant_tl(s
->base
.pc_next
- s
->cs_base
);
625 /* Compute SEG:REG into A0. SEG is selected from the override segment
626 (OVR_SEG) and the default segment (DEF_SEG). OVR_SEG may be -1 to
627 indicate no override. */
628 static void gen_lea_v_seg(DisasContext
*s
, MemOp aflag
, TCGv a0
,
629 int def_seg
, int ovr_seg
)
635 tcg_gen_mov_tl(s
->A0
, a0
);
642 if (ovr_seg
< 0 && ADDSEG(s
)) {
646 tcg_gen_ext32u_tl(s
->A0
, a0
);
652 tcg_gen_ext16u_tl(s
->A0
, a0
);
667 TCGv seg
= cpu_seg_base
[ovr_seg
];
669 if (aflag
== MO_64
) {
670 tcg_gen_add_tl(s
->A0
, a0
, seg
);
671 } else if (CODE64(s
)) {
672 tcg_gen_ext32u_tl(s
->A0
, a0
);
673 tcg_gen_add_tl(s
->A0
, s
->A0
, seg
);
675 tcg_gen_add_tl(s
->A0
, a0
, seg
);
676 tcg_gen_ext32u_tl(s
->A0
, s
->A0
);
681 static inline void gen_string_movl_A0_ESI(DisasContext
*s
)
683 gen_lea_v_seg(s
, s
->aflag
, cpu_regs
[R_ESI
], R_DS
, s
->override
);
686 static inline void gen_string_movl_A0_EDI(DisasContext
*s
)
688 gen_lea_v_seg(s
, s
->aflag
, cpu_regs
[R_EDI
], R_ES
, -1);
691 static inline void gen_op_movl_T0_Dshift(DisasContext
*s
, MemOp ot
)
693 tcg_gen_ld32s_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
, df
));
694 tcg_gen_shli_tl(s
->T0
, s
->T0
, ot
);
697 static TCGv
gen_ext_tl(TCGv dst
, TCGv src
, MemOp size
, bool sign
)
702 tcg_gen_ext8s_tl(dst
, src
);
704 tcg_gen_ext8u_tl(dst
, src
);
709 tcg_gen_ext16s_tl(dst
, src
);
711 tcg_gen_ext16u_tl(dst
, src
);
717 tcg_gen_ext32s_tl(dst
, src
);
719 tcg_gen_ext32u_tl(dst
, src
);
728 static void gen_extu(MemOp ot
, TCGv reg
)
730 gen_ext_tl(reg
, reg
, ot
, false);
733 static void gen_exts(MemOp ot
, TCGv reg
)
735 gen_ext_tl(reg
, reg
, ot
, true);
738 static void gen_op_j_ecx(DisasContext
*s
, TCGCond cond
, TCGLabel
*label1
)
740 tcg_gen_mov_tl(s
->tmp0
, cpu_regs
[R_ECX
]);
741 gen_extu(s
->aflag
, s
->tmp0
);
742 tcg_gen_brcondi_tl(cond
, s
->tmp0
, 0, label1
);
745 static inline void gen_op_jz_ecx(DisasContext
*s
, TCGLabel
*label1
)
747 gen_op_j_ecx(s
, TCG_COND_EQ
, label1
);
750 static inline void gen_op_jnz_ecx(DisasContext
*s
, TCGLabel
*label1
)
752 gen_op_j_ecx(s
, TCG_COND_NE
, label1
);
755 static void gen_helper_in_func(MemOp ot
, TCGv v
, TCGv_i32 n
)
759 gen_helper_inb(v
, cpu_env
, n
);
762 gen_helper_inw(v
, cpu_env
, n
);
765 gen_helper_inl(v
, cpu_env
, n
);
772 static void gen_helper_out_func(MemOp ot
, TCGv_i32 v
, TCGv_i32 n
)
776 gen_helper_outb(cpu_env
, v
, n
);
779 gen_helper_outw(cpu_env
, v
, n
);
782 gen_helper_outl(cpu_env
, v
, n
);
790 * Validate that access to [port, port + 1<<ot) is allowed.
791 * Raise #GP, or VMM exit if not.
793 static bool gen_check_io(DisasContext
*s
, MemOp ot
, TCGv_i32 port
,
796 #ifdef CONFIG_USER_ONLY
798 * We do not implement the ioperm(2) syscall, so the TSS check
801 gen_exception_gpf(s
);
804 if (PE(s
) && (CPL(s
) > IOPL(s
) || VM86(s
))) {
805 gen_helper_check_io(cpu_env
, port
, tcg_constant_i32(1 << ot
));
809 gen_update_eip_cur(s
);
810 if (s
->prefix
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
811 svm_flags
|= SVM_IOIO_REP_MASK
;
813 svm_flags
|= 1 << (SVM_IOIO_SIZE_SHIFT
+ ot
);
814 gen_helper_svm_check_io(cpu_env
, port
,
815 tcg_constant_i32(svm_flags
),
816 cur_insn_len_i32(s
));
822 static void gen_movs(DisasContext
*s
, MemOp ot
)
824 gen_string_movl_A0_ESI(s
);
825 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
826 gen_string_movl_A0_EDI(s
);
827 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
828 gen_op_movl_T0_Dshift(s
, ot
);
829 gen_op_add_reg_T0(s
, s
->aflag
, R_ESI
);
830 gen_op_add_reg_T0(s
, s
->aflag
, R_EDI
);
833 static void gen_op_update1_cc(DisasContext
*s
)
835 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
838 static void gen_op_update2_cc(DisasContext
*s
)
840 tcg_gen_mov_tl(cpu_cc_src
, s
->T1
);
841 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
844 static void gen_op_update3_cc(DisasContext
*s
, TCGv reg
)
846 tcg_gen_mov_tl(cpu_cc_src2
, reg
);
847 tcg_gen_mov_tl(cpu_cc_src
, s
->T1
);
848 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
851 static inline void gen_op_testl_T0_T1_cc(DisasContext
*s
)
853 tcg_gen_and_tl(cpu_cc_dst
, s
->T0
, s
->T1
);
856 static void gen_op_update_neg_cc(DisasContext
*s
)
858 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
859 tcg_gen_neg_tl(cpu_cc_src
, s
->T0
);
860 tcg_gen_movi_tl(s
->cc_srcT
, 0);
863 /* compute all eflags to cc_src */
864 static void gen_compute_eflags(DisasContext
*s
)
866 TCGv zero
, dst
, src1
, src2
;
869 if (s
->cc_op
== CC_OP_EFLAGS
) {
872 if (s
->cc_op
== CC_OP_CLR
) {
873 tcg_gen_movi_tl(cpu_cc_src
, CC_Z
| CC_P
);
874 set_cc_op(s
, CC_OP_EFLAGS
);
883 /* Take care to not read values that are not live. */
884 live
= cc_op_live
[s
->cc_op
] & ~USES_CC_SRCT
;
885 dead
= live
^ (USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
);
887 zero
= tcg_const_tl(0);
888 if (dead
& USES_CC_DST
) {
891 if (dead
& USES_CC_SRC
) {
894 if (dead
& USES_CC_SRC2
) {
900 gen_helper_cc_compute_all(cpu_cc_src
, dst
, src1
, src2
, cpu_cc_op
);
901 set_cc_op(s
, CC_OP_EFLAGS
);
908 typedef struct CCPrepare
{
918 /* compute eflags.C to reg */
919 static CCPrepare
gen_prepare_eflags_c(DisasContext
*s
, TCGv reg
)
925 case CC_OP_SUBB
... CC_OP_SUBQ
:
926 /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
927 size
= s
->cc_op
- CC_OP_SUBB
;
928 t1
= gen_ext_tl(s
->tmp0
, cpu_cc_src
, size
, false);
929 /* If no temporary was used, be careful not to alias t1 and t0. */
930 t0
= t1
== cpu_cc_src
? s
->tmp0
: reg
;
931 tcg_gen_mov_tl(t0
, s
->cc_srcT
);
935 case CC_OP_ADDB
... CC_OP_ADDQ
:
936 /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
937 size
= s
->cc_op
- CC_OP_ADDB
;
938 t1
= gen_ext_tl(s
->tmp0
, cpu_cc_src
, size
, false);
939 t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, false);
941 return (CCPrepare
) { .cond
= TCG_COND_LTU
, .reg
= t0
,
942 .reg2
= t1
, .mask
= -1, .use_reg2
= true };
944 case CC_OP_LOGICB
... CC_OP_LOGICQ
:
947 return (CCPrepare
) { .cond
= TCG_COND_NEVER
, .mask
= -1 };
949 case CC_OP_INCB
... CC_OP_INCQ
:
950 case CC_OP_DECB
... CC_OP_DECQ
:
951 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
952 .mask
= -1, .no_setcond
= true };
954 case CC_OP_SHLB
... CC_OP_SHLQ
:
955 /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
956 size
= s
->cc_op
- CC_OP_SHLB
;
957 shift
= (8 << size
) - 1;
958 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
959 .mask
= (target_ulong
)1 << shift
};
961 case CC_OP_MULB
... CC_OP_MULQ
:
962 return (CCPrepare
) { .cond
= TCG_COND_NE
,
963 .reg
= cpu_cc_src
, .mask
= -1 };
965 case CC_OP_BMILGB
... CC_OP_BMILGQ
:
966 size
= s
->cc_op
- CC_OP_BMILGB
;
967 t0
= gen_ext_tl(reg
, cpu_cc_src
, size
, false);
968 return (CCPrepare
) { .cond
= TCG_COND_EQ
, .reg
= t0
, .mask
= -1 };
972 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_dst
,
973 .mask
= -1, .no_setcond
= true };
976 case CC_OP_SARB
... CC_OP_SARQ
:
978 return (CCPrepare
) { .cond
= TCG_COND_NE
,
979 .reg
= cpu_cc_src
, .mask
= CC_C
};
982 /* The need to compute only C from CC_OP_DYNAMIC is important
983 in efficiently implementing e.g. INC at the start of a TB. */
985 gen_helper_cc_compute_c(reg
, cpu_cc_dst
, cpu_cc_src
,
986 cpu_cc_src2
, cpu_cc_op
);
987 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
988 .mask
= -1, .no_setcond
= true };
992 /* compute eflags.P to reg */
993 static CCPrepare
gen_prepare_eflags_p(DisasContext
*s
, TCGv reg
)
995 gen_compute_eflags(s
);
996 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1000 /* compute eflags.S to reg */
1001 static CCPrepare
gen_prepare_eflags_s(DisasContext
*s
, TCGv reg
)
1005 gen_compute_eflags(s
);
1011 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1015 return (CCPrepare
) { .cond
= TCG_COND_NEVER
, .mask
= -1 };
1018 MemOp size
= (s
->cc_op
- CC_OP_ADDB
) & 3;
1019 TCGv t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, true);
1020 return (CCPrepare
) { .cond
= TCG_COND_LT
, .reg
= t0
, .mask
= -1 };
1025 /* compute eflags.O to reg */
1026 static CCPrepare
gen_prepare_eflags_o(DisasContext
*s
, TCGv reg
)
1031 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src2
,
1032 .mask
= -1, .no_setcond
= true };
1035 return (CCPrepare
) { .cond
= TCG_COND_NEVER
, .mask
= -1 };
1037 gen_compute_eflags(s
);
1038 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1043 /* compute eflags.Z to reg */
1044 static CCPrepare
gen_prepare_eflags_z(DisasContext
*s
, TCGv reg
)
1048 gen_compute_eflags(s
);
1054 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1057 return (CCPrepare
) { .cond
= TCG_COND_ALWAYS
, .mask
= -1 };
1059 return (CCPrepare
) { .cond
= TCG_COND_EQ
, .reg
= cpu_cc_src
,
1063 MemOp size
= (s
->cc_op
- CC_OP_ADDB
) & 3;
1064 TCGv t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, false);
1065 return (CCPrepare
) { .cond
= TCG_COND_EQ
, .reg
= t0
, .mask
= -1 };
1070 /* perform a conditional store into register 'reg' according to jump opcode
1071 value 'b'. In the fast case, T0 is guaranted not to be used. */
1072 static CCPrepare
gen_prepare_cc(DisasContext
*s
, int b
, TCGv reg
)
1074 int inv
, jcc_op
, cond
;
1080 jcc_op
= (b
>> 1) & 7;
1083 case CC_OP_SUBB
... CC_OP_SUBQ
:
1084 /* We optimize relational operators for the cmp/jcc case. */
1085 size
= s
->cc_op
- CC_OP_SUBB
;
1088 tcg_gen_mov_tl(s
->tmp4
, s
->cc_srcT
);
1089 gen_extu(size
, s
->tmp4
);
1090 t0
= gen_ext_tl(s
->tmp0
, cpu_cc_src
, size
, false);
1091 cc
= (CCPrepare
) { .cond
= TCG_COND_LEU
, .reg
= s
->tmp4
,
1092 .reg2
= t0
, .mask
= -1, .use_reg2
= true };
1101 tcg_gen_mov_tl(s
->tmp4
, s
->cc_srcT
);
1102 gen_exts(size
, s
->tmp4
);
1103 t0
= gen_ext_tl(s
->tmp0
, cpu_cc_src
, size
, true);
1104 cc
= (CCPrepare
) { .cond
= cond
, .reg
= s
->tmp4
,
1105 .reg2
= t0
, .mask
= -1, .use_reg2
= true };
1115 /* This actually generates good code for JC, JZ and JS. */
1118 cc
= gen_prepare_eflags_o(s
, reg
);
1121 cc
= gen_prepare_eflags_c(s
, reg
);
1124 cc
= gen_prepare_eflags_z(s
, reg
);
1127 gen_compute_eflags(s
);
1128 cc
= (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1129 .mask
= CC_Z
| CC_C
};
1132 cc
= gen_prepare_eflags_s(s
, reg
);
1135 cc
= gen_prepare_eflags_p(s
, reg
);
1138 gen_compute_eflags(s
);
1139 if (reg
== cpu_cc_src
) {
1142 tcg_gen_shri_tl(reg
, cpu_cc_src
, 4); /* CC_O -> CC_S */
1143 tcg_gen_xor_tl(reg
, reg
, cpu_cc_src
);
1144 cc
= (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
1149 gen_compute_eflags(s
);
1150 if (reg
== cpu_cc_src
) {
1153 tcg_gen_shri_tl(reg
, cpu_cc_src
, 4); /* CC_O -> CC_S */
1154 tcg_gen_xor_tl(reg
, reg
, cpu_cc_src
);
1155 cc
= (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
1156 .mask
= CC_S
| CC_Z
};
1163 cc
.cond
= tcg_invert_cond(cc
.cond
);
1168 static void gen_setcc1(DisasContext
*s
, int b
, TCGv reg
)
1170 CCPrepare cc
= gen_prepare_cc(s
, b
, reg
);
1172 if (cc
.no_setcond
) {
1173 if (cc
.cond
== TCG_COND_EQ
) {
1174 tcg_gen_xori_tl(reg
, cc
.reg
, 1);
1176 tcg_gen_mov_tl(reg
, cc
.reg
);
1181 if (cc
.cond
== TCG_COND_NE
&& !cc
.use_reg2
&& cc
.imm
== 0 &&
1182 cc
.mask
!= 0 && (cc
.mask
& (cc
.mask
- 1)) == 0) {
1183 tcg_gen_shri_tl(reg
, cc
.reg
, ctztl(cc
.mask
));
1184 tcg_gen_andi_tl(reg
, reg
, 1);
1187 if (cc
.mask
!= -1) {
1188 tcg_gen_andi_tl(reg
, cc
.reg
, cc
.mask
);
1192 tcg_gen_setcond_tl(cc
.cond
, reg
, cc
.reg
, cc
.reg2
);
1194 tcg_gen_setcondi_tl(cc
.cond
, reg
, cc
.reg
, cc
.imm
);
1198 static inline void gen_compute_eflags_c(DisasContext
*s
, TCGv reg
)
1200 gen_setcc1(s
, JCC_B
<< 1, reg
);
1203 /* generate a conditional jump to label 'l1' according to jump opcode
1204 value 'b'. In the fast case, T0 is guaranted not to be used. */
1205 static inline void gen_jcc1_noeob(DisasContext
*s
, int b
, TCGLabel
*l1
)
1207 CCPrepare cc
= gen_prepare_cc(s
, b
, s
->T0
);
1209 if (cc
.mask
!= -1) {
1210 tcg_gen_andi_tl(s
->T0
, cc
.reg
, cc
.mask
);
1214 tcg_gen_brcond_tl(cc
.cond
, cc
.reg
, cc
.reg2
, l1
);
1216 tcg_gen_brcondi_tl(cc
.cond
, cc
.reg
, cc
.imm
, l1
);
1220 /* Generate a conditional jump to label 'l1' according to jump opcode
1221 value 'b'. In the fast case, T0 is guaranted not to be used.
1222 A translation block must end soon. */
1223 static inline void gen_jcc1(DisasContext
*s
, int b
, TCGLabel
*l1
)
1225 CCPrepare cc
= gen_prepare_cc(s
, b
, s
->T0
);
1227 gen_update_cc_op(s
);
1228 if (cc
.mask
!= -1) {
1229 tcg_gen_andi_tl(s
->T0
, cc
.reg
, cc
.mask
);
1232 set_cc_op(s
, CC_OP_DYNAMIC
);
1234 tcg_gen_brcond_tl(cc
.cond
, cc
.reg
, cc
.reg2
, l1
);
1236 tcg_gen_brcondi_tl(cc
.cond
, cc
.reg
, cc
.imm
, l1
);
1240 /* XXX: does not work with gdbstub "ice" single step - not a
1242 static TCGLabel
*gen_jz_ecx_string(DisasContext
*s
)
1244 TCGLabel
*l1
= gen_new_label();
1245 TCGLabel
*l2
= gen_new_label();
1246 gen_op_jnz_ecx(s
, l1
);
1248 gen_jmp_rel_csize(s
, 0, 1);
1253 static void gen_stos(DisasContext
*s
, MemOp ot
)
1255 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, R_EAX
);
1256 gen_string_movl_A0_EDI(s
);
1257 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
1258 gen_op_movl_T0_Dshift(s
, ot
);
1259 gen_op_add_reg_T0(s
, s
->aflag
, R_EDI
);
1262 static void gen_lods(DisasContext
*s
, MemOp ot
)
1264 gen_string_movl_A0_ESI(s
);
1265 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1266 gen_op_mov_reg_v(s
, ot
, R_EAX
, s
->T0
);
1267 gen_op_movl_T0_Dshift(s
, ot
);
1268 gen_op_add_reg_T0(s
, s
->aflag
, R_ESI
);
1271 static void gen_scas(DisasContext
*s
, MemOp ot
)
1273 gen_string_movl_A0_EDI(s
);
1274 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
1275 gen_op(s
, OP_CMPL
, ot
, R_EAX
);
1276 gen_op_movl_T0_Dshift(s
, ot
);
1277 gen_op_add_reg_T0(s
, s
->aflag
, R_EDI
);
1280 static void gen_cmps(DisasContext
*s
, MemOp ot
)
1282 gen_string_movl_A0_EDI(s
);
1283 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
1284 gen_string_movl_A0_ESI(s
);
1285 gen_op(s
, OP_CMPL
, ot
, OR_TMP0
);
1286 gen_op_movl_T0_Dshift(s
, ot
);
1287 gen_op_add_reg_T0(s
, s
->aflag
, R_ESI
);
1288 gen_op_add_reg_T0(s
, s
->aflag
, R_EDI
);
1291 static void gen_bpt_io(DisasContext
*s
, TCGv_i32 t_port
, int ot
)
1293 if (s
->flags
& HF_IOBPT_MASK
) {
1294 #ifdef CONFIG_USER_ONLY
1295 /* user-mode cpu should not be in IOBPT mode */
1296 g_assert_not_reached();
1298 TCGv_i32 t_size
= tcg_constant_i32(1 << ot
);
1299 TCGv t_next
= eip_next_tl(s
);
1300 gen_helper_bpt_io(cpu_env
, t_port
, t_size
, t_next
);
1301 #endif /* CONFIG_USER_ONLY */
1305 static void gen_ins(DisasContext
*s
, MemOp ot
)
1307 gen_string_movl_A0_EDI(s
);
1308 /* Note: we must do this dummy write first to be restartable in
1309 case of page fault. */
1310 tcg_gen_movi_tl(s
->T0
, 0);
1311 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
1312 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_EDX
]);
1313 tcg_gen_andi_i32(s
->tmp2_i32
, s
->tmp2_i32
, 0xffff);
1314 gen_helper_in_func(ot
, s
->T0
, s
->tmp2_i32
);
1315 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
1316 gen_op_movl_T0_Dshift(s
, ot
);
1317 gen_op_add_reg_T0(s
, s
->aflag
, R_EDI
);
1318 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
1321 static void gen_outs(DisasContext
*s
, MemOp ot
)
1323 gen_string_movl_A0_ESI(s
);
1324 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1326 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_EDX
]);
1327 tcg_gen_andi_i32(s
->tmp2_i32
, s
->tmp2_i32
, 0xffff);
1328 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T0
);
1329 gen_helper_out_func(ot
, s
->tmp2_i32
, s
->tmp3_i32
);
1330 gen_op_movl_T0_Dshift(s
, ot
);
1331 gen_op_add_reg_T0(s
, s
->aflag
, R_ESI
);
1332 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
1335 /* Generate jumps to current or next instruction */
1336 static void gen_repz(DisasContext
*s
, MemOp ot
,
1337 void (*fn
)(DisasContext
*s
, MemOp ot
))
1340 gen_update_cc_op(s
);
1341 l2
= gen_jz_ecx_string(s
);
1343 gen_op_add_reg_im(s
, s
->aflag
, R_ECX
, -1);
1345 * A loop would cause two single step exceptions if ECX = 1
1346 * before rep string_insn
1349 gen_op_jz_ecx(s
, l2
);
1351 gen_jmp_rel_csize(s
, -cur_insn_len(s
), 0);
1354 #define GEN_REPZ(op) \
1355 static inline void gen_repz_ ## op(DisasContext *s, MemOp ot) \
1356 { gen_repz(s, ot, gen_##op); }
1358 static void gen_repz2(DisasContext
*s
, MemOp ot
, int nz
,
1359 void (*fn
)(DisasContext
*s
, MemOp ot
))
1362 gen_update_cc_op(s
);
1363 l2
= gen_jz_ecx_string(s
);
1365 gen_op_add_reg_im(s
, s
->aflag
, R_ECX
, -1);
1366 gen_update_cc_op(s
);
1367 gen_jcc1(s
, (JCC_Z
<< 1) | (nz
^ 1), l2
);
1369 gen_op_jz_ecx(s
, l2
);
1371 gen_jmp_rel_csize(s
, -cur_insn_len(s
), 0);
1374 #define GEN_REPZ2(op) \
1375 static inline void gen_repz_ ## op(DisasContext *s, MemOp ot, int nz) \
1376 { gen_repz2(s, ot, nz, gen_##op); }
1386 static void gen_helper_fp_arith_ST0_FT0(int op
)
1390 gen_helper_fadd_ST0_FT0(cpu_env
);
1393 gen_helper_fmul_ST0_FT0(cpu_env
);
1396 gen_helper_fcom_ST0_FT0(cpu_env
);
1399 gen_helper_fcom_ST0_FT0(cpu_env
);
1402 gen_helper_fsub_ST0_FT0(cpu_env
);
1405 gen_helper_fsubr_ST0_FT0(cpu_env
);
1408 gen_helper_fdiv_ST0_FT0(cpu_env
);
1411 gen_helper_fdivr_ST0_FT0(cpu_env
);
1416 /* NOTE the exception in "r" op ordering */
1417 static void gen_helper_fp_arith_STN_ST0(int op
, int opreg
)
1419 TCGv_i32 tmp
= tcg_const_i32(opreg
);
1422 gen_helper_fadd_STN_ST0(cpu_env
, tmp
);
1425 gen_helper_fmul_STN_ST0(cpu_env
, tmp
);
1428 gen_helper_fsubr_STN_ST0(cpu_env
, tmp
);
1431 gen_helper_fsub_STN_ST0(cpu_env
, tmp
);
1434 gen_helper_fdivr_STN_ST0(cpu_env
, tmp
);
1437 gen_helper_fdiv_STN_ST0(cpu_env
, tmp
);
1442 static void gen_exception(DisasContext
*s
, int trapno
)
1444 gen_update_cc_op(s
);
1445 gen_update_eip_cur(s
);
1446 gen_helper_raise_exception(cpu_env
, tcg_const_i32(trapno
));
1447 s
->base
.is_jmp
= DISAS_NORETURN
;
1450 /* Generate #UD for the current instruction. The assumption here is that
1451 the instruction is known, but it isn't allowed in the current cpu mode. */
1452 static void gen_illegal_opcode(DisasContext
*s
)
1454 gen_exception(s
, EXCP06_ILLOP
);
1457 /* Generate #GP for the current instruction. */
1458 static void gen_exception_gpf(DisasContext
*s
)
1460 gen_exception(s
, EXCP0D_GPF
);
1463 /* Check for cpl == 0; if not, raise #GP and return false. */
1464 static bool check_cpl0(DisasContext
*s
)
1469 gen_exception_gpf(s
);
1473 /* If vm86, check for iopl == 3; if not, raise #GP and return false. */
1474 static bool check_vm86_iopl(DisasContext
*s
)
1476 if (!VM86(s
) || IOPL(s
) == 3) {
1479 gen_exception_gpf(s
);
1483 /* Check for iopl allowing access; if not, raise #GP and return false. */
1484 static bool check_iopl(DisasContext
*s
)
1486 if (VM86(s
) ? IOPL(s
) == 3 : CPL(s
) <= IOPL(s
)) {
1489 gen_exception_gpf(s
);
1493 /* if d == OR_TMP0, it means memory operand (address in A0) */
1494 static void gen_op(DisasContext
*s1
, int op
, MemOp ot
, int d
)
1497 if (s1
->prefix
& PREFIX_LOCK
) {
1498 /* Lock prefix when destination is not memory. */
1499 gen_illegal_opcode(s1
);
1502 gen_op_mov_v_reg(s1
, ot
, s1
->T0
, d
);
1503 } else if (!(s1
->prefix
& PREFIX_LOCK
)) {
1504 gen_op_ld_v(s1
, ot
, s1
->T0
, s1
->A0
);
1508 gen_compute_eflags_c(s1
, s1
->tmp4
);
1509 if (s1
->prefix
& PREFIX_LOCK
) {
1510 tcg_gen_add_tl(s1
->T0
, s1
->tmp4
, s1
->T1
);
1511 tcg_gen_atomic_add_fetch_tl(s1
->T0
, s1
->A0
, s1
->T0
,
1512 s1
->mem_index
, ot
| MO_LE
);
1514 tcg_gen_add_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1515 tcg_gen_add_tl(s1
->T0
, s1
->T0
, s1
->tmp4
);
1516 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1518 gen_op_update3_cc(s1
, s1
->tmp4
);
1519 set_cc_op(s1
, CC_OP_ADCB
+ ot
);
1522 gen_compute_eflags_c(s1
, s1
->tmp4
);
1523 if (s1
->prefix
& PREFIX_LOCK
) {
1524 tcg_gen_add_tl(s1
->T0
, s1
->T1
, s1
->tmp4
);
1525 tcg_gen_neg_tl(s1
->T0
, s1
->T0
);
1526 tcg_gen_atomic_add_fetch_tl(s1
->T0
, s1
->A0
, s1
->T0
,
1527 s1
->mem_index
, ot
| MO_LE
);
1529 tcg_gen_sub_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1530 tcg_gen_sub_tl(s1
->T0
, s1
->T0
, s1
->tmp4
);
1531 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1533 gen_op_update3_cc(s1
, s1
->tmp4
);
1534 set_cc_op(s1
, CC_OP_SBBB
+ ot
);
1537 if (s1
->prefix
& PREFIX_LOCK
) {
1538 tcg_gen_atomic_add_fetch_tl(s1
->T0
, s1
->A0
, s1
->T1
,
1539 s1
->mem_index
, ot
| MO_LE
);
1541 tcg_gen_add_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1542 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1544 gen_op_update2_cc(s1
);
1545 set_cc_op(s1
, CC_OP_ADDB
+ ot
);
1548 if (s1
->prefix
& PREFIX_LOCK
) {
1549 tcg_gen_neg_tl(s1
->T0
, s1
->T1
);
1550 tcg_gen_atomic_fetch_add_tl(s1
->cc_srcT
, s1
->A0
, s1
->T0
,
1551 s1
->mem_index
, ot
| MO_LE
);
1552 tcg_gen_sub_tl(s1
->T0
, s1
->cc_srcT
, s1
->T1
);
1554 tcg_gen_mov_tl(s1
->cc_srcT
, s1
->T0
);
1555 tcg_gen_sub_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1556 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1558 gen_op_update2_cc(s1
);
1559 set_cc_op(s1
, CC_OP_SUBB
+ ot
);
1563 if (s1
->prefix
& PREFIX_LOCK
) {
1564 tcg_gen_atomic_and_fetch_tl(s1
->T0
, s1
->A0
, s1
->T1
,
1565 s1
->mem_index
, ot
| MO_LE
);
1567 tcg_gen_and_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1568 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1570 gen_op_update1_cc(s1
);
1571 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1574 if (s1
->prefix
& PREFIX_LOCK
) {
1575 tcg_gen_atomic_or_fetch_tl(s1
->T0
, s1
->A0
, s1
->T1
,
1576 s1
->mem_index
, ot
| MO_LE
);
1578 tcg_gen_or_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1579 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1581 gen_op_update1_cc(s1
);
1582 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1585 if (s1
->prefix
& PREFIX_LOCK
) {
1586 tcg_gen_atomic_xor_fetch_tl(s1
->T0
, s1
->A0
, s1
->T1
,
1587 s1
->mem_index
, ot
| MO_LE
);
1589 tcg_gen_xor_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1590 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1592 gen_op_update1_cc(s1
);
1593 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1596 tcg_gen_mov_tl(cpu_cc_src
, s1
->T1
);
1597 tcg_gen_mov_tl(s1
->cc_srcT
, s1
->T0
);
1598 tcg_gen_sub_tl(cpu_cc_dst
, s1
->T0
, s1
->T1
);
1599 set_cc_op(s1
, CC_OP_SUBB
+ ot
);
1604 /* if d == OR_TMP0, it means memory operand (address in A0) */
1605 static void gen_inc(DisasContext
*s1
, MemOp ot
, int d
, int c
)
1607 if (s1
->prefix
& PREFIX_LOCK
) {
1609 /* Lock prefix when destination is not memory */
1610 gen_illegal_opcode(s1
);
1613 tcg_gen_movi_tl(s1
->T0
, c
> 0 ? 1 : -1);
1614 tcg_gen_atomic_add_fetch_tl(s1
->T0
, s1
->A0
, s1
->T0
,
1615 s1
->mem_index
, ot
| MO_LE
);
1618 gen_op_mov_v_reg(s1
, ot
, s1
->T0
, d
);
1620 gen_op_ld_v(s1
, ot
, s1
->T0
, s1
->A0
);
1622 tcg_gen_addi_tl(s1
->T0
, s1
->T0
, (c
> 0 ? 1 : -1));
1623 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1626 gen_compute_eflags_c(s1
, cpu_cc_src
);
1627 tcg_gen_mov_tl(cpu_cc_dst
, s1
->T0
);
1628 set_cc_op(s1
, (c
> 0 ? CC_OP_INCB
: CC_OP_DECB
) + ot
);
1631 static void gen_shift_flags(DisasContext
*s
, MemOp ot
, TCGv result
,
1632 TCGv shm1
, TCGv count
, bool is_right
)
1634 TCGv_i32 z32
, s32
, oldop
;
1637 /* Store the results into the CC variables. If we know that the
1638 variable must be dead, store unconditionally. Otherwise we'll
1639 need to not disrupt the current contents. */
1640 z_tl
= tcg_const_tl(0);
1641 if (cc_op_live
[s
->cc_op
] & USES_CC_DST
) {
1642 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_cc_dst
, count
, z_tl
,
1643 result
, cpu_cc_dst
);
1645 tcg_gen_mov_tl(cpu_cc_dst
, result
);
1647 if (cc_op_live
[s
->cc_op
] & USES_CC_SRC
) {
1648 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_cc_src
, count
, z_tl
,
1651 tcg_gen_mov_tl(cpu_cc_src
, shm1
);
1653 tcg_temp_free(z_tl
);
1655 /* Get the two potential CC_OP values into temporaries. */
1656 tcg_gen_movi_i32(s
->tmp2_i32
, (is_right
? CC_OP_SARB
: CC_OP_SHLB
) + ot
);
1657 if (s
->cc_op
== CC_OP_DYNAMIC
) {
1660 tcg_gen_movi_i32(s
->tmp3_i32
, s
->cc_op
);
1661 oldop
= s
->tmp3_i32
;
1664 /* Conditionally store the CC_OP value. */
1665 z32
= tcg_const_i32(0);
1666 s32
= tcg_temp_new_i32();
1667 tcg_gen_trunc_tl_i32(s32
, count
);
1668 tcg_gen_movcond_i32(TCG_COND_NE
, cpu_cc_op
, s32
, z32
, s
->tmp2_i32
, oldop
);
1669 tcg_temp_free_i32(z32
);
1670 tcg_temp_free_i32(s32
);
1672 /* The CC_OP value is no longer predictable. */
1673 set_cc_op(s
, CC_OP_DYNAMIC
);
1676 static void gen_shift_rm_T1(DisasContext
*s
, MemOp ot
, int op1
,
1677 int is_right
, int is_arith
)
1679 target_ulong mask
= (ot
== MO_64
? 0x3f : 0x1f);
1682 if (op1
== OR_TMP0
) {
1683 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1685 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1688 tcg_gen_andi_tl(s
->T1
, s
->T1
, mask
);
1689 tcg_gen_subi_tl(s
->tmp0
, s
->T1
, 1);
1693 gen_exts(ot
, s
->T0
);
1694 tcg_gen_sar_tl(s
->tmp0
, s
->T0
, s
->tmp0
);
1695 tcg_gen_sar_tl(s
->T0
, s
->T0
, s
->T1
);
1697 gen_extu(ot
, s
->T0
);
1698 tcg_gen_shr_tl(s
->tmp0
, s
->T0
, s
->tmp0
);
1699 tcg_gen_shr_tl(s
->T0
, s
->T0
, s
->T1
);
1702 tcg_gen_shl_tl(s
->tmp0
, s
->T0
, s
->tmp0
);
1703 tcg_gen_shl_tl(s
->T0
, s
->T0
, s
->T1
);
1707 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1709 gen_shift_flags(s
, ot
, s
->T0
, s
->tmp0
, s
->T1
, is_right
);
1712 static void gen_shift_rm_im(DisasContext
*s
, MemOp ot
, int op1
, int op2
,
1713 int is_right
, int is_arith
)
1715 int mask
= (ot
== MO_64
? 0x3f : 0x1f);
1719 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1721 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1727 gen_exts(ot
, s
->T0
);
1728 tcg_gen_sari_tl(s
->tmp4
, s
->T0
, op2
- 1);
1729 tcg_gen_sari_tl(s
->T0
, s
->T0
, op2
);
1731 gen_extu(ot
, s
->T0
);
1732 tcg_gen_shri_tl(s
->tmp4
, s
->T0
, op2
- 1);
1733 tcg_gen_shri_tl(s
->T0
, s
->T0
, op2
);
1736 tcg_gen_shli_tl(s
->tmp4
, s
->T0
, op2
- 1);
1737 tcg_gen_shli_tl(s
->T0
, s
->T0
, op2
);
1742 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1744 /* update eflags if non zero shift */
1746 tcg_gen_mov_tl(cpu_cc_src
, s
->tmp4
);
1747 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
1748 set_cc_op(s
, (is_right
? CC_OP_SARB
: CC_OP_SHLB
) + ot
);
1752 static void gen_rot_rm_T1(DisasContext
*s
, MemOp ot
, int op1
, int is_right
)
1754 target_ulong mask
= (ot
== MO_64
? 0x3f : 0x1f);
1758 if (op1
== OR_TMP0
) {
1759 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1761 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1764 tcg_gen_andi_tl(s
->T1
, s
->T1
, mask
);
1768 /* Replicate the 8-bit input so that a 32-bit rotate works. */
1769 tcg_gen_ext8u_tl(s
->T0
, s
->T0
);
1770 tcg_gen_muli_tl(s
->T0
, s
->T0
, 0x01010101);
1773 /* Replicate the 16-bit input so that a 32-bit rotate works. */
1774 tcg_gen_deposit_tl(s
->T0
, s
->T0
, s
->T0
, 16, 16);
1777 #ifdef TARGET_X86_64
1779 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
1780 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T1
);
1782 tcg_gen_rotr_i32(s
->tmp2_i32
, s
->tmp2_i32
, s
->tmp3_i32
);
1784 tcg_gen_rotl_i32(s
->tmp2_i32
, s
->tmp2_i32
, s
->tmp3_i32
);
1786 tcg_gen_extu_i32_tl(s
->T0
, s
->tmp2_i32
);
1791 tcg_gen_rotr_tl(s
->T0
, s
->T0
, s
->T1
);
1793 tcg_gen_rotl_tl(s
->T0
, s
->T0
, s
->T1
);
1799 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1801 /* We'll need the flags computed into CC_SRC. */
1802 gen_compute_eflags(s
);
1804 /* The value that was "rotated out" is now present at the other end
1805 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1806 since we've computed the flags into CC_SRC, these variables are
1809 tcg_gen_shri_tl(cpu_cc_src2
, s
->T0
, mask
- 1);
1810 tcg_gen_shri_tl(cpu_cc_dst
, s
->T0
, mask
);
1811 tcg_gen_andi_tl(cpu_cc_dst
, cpu_cc_dst
, 1);
1813 tcg_gen_shri_tl(cpu_cc_src2
, s
->T0
, mask
);
1814 tcg_gen_andi_tl(cpu_cc_dst
, s
->T0
, 1);
1816 tcg_gen_andi_tl(cpu_cc_src2
, cpu_cc_src2
, 1);
1817 tcg_gen_xor_tl(cpu_cc_src2
, cpu_cc_src2
, cpu_cc_dst
);
1819 /* Now conditionally store the new CC_OP value. If the shift count
1820 is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live.
1821 Otherwise reuse CC_OP_ADCOX which have the C and O flags split out
1822 exactly as we computed above. */
1823 t0
= tcg_const_i32(0);
1824 t1
= tcg_temp_new_i32();
1825 tcg_gen_trunc_tl_i32(t1
, s
->T1
);
1826 tcg_gen_movi_i32(s
->tmp2_i32
, CC_OP_ADCOX
);
1827 tcg_gen_movi_i32(s
->tmp3_i32
, CC_OP_EFLAGS
);
1828 tcg_gen_movcond_i32(TCG_COND_NE
, cpu_cc_op
, t1
, t0
,
1829 s
->tmp2_i32
, s
->tmp3_i32
);
1830 tcg_temp_free_i32(t0
);
1831 tcg_temp_free_i32(t1
);
1833 /* The CC_OP value is no longer predictable. */
1834 set_cc_op(s
, CC_OP_DYNAMIC
);
1837 static void gen_rot_rm_im(DisasContext
*s
, MemOp ot
, int op1
, int op2
,
1840 int mask
= (ot
== MO_64
? 0x3f : 0x1f);
1844 if (op1
== OR_TMP0
) {
1845 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1847 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1853 #ifdef TARGET_X86_64
1855 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
1857 tcg_gen_rotri_i32(s
->tmp2_i32
, s
->tmp2_i32
, op2
);
1859 tcg_gen_rotli_i32(s
->tmp2_i32
, s
->tmp2_i32
, op2
);
1861 tcg_gen_extu_i32_tl(s
->T0
, s
->tmp2_i32
);
1866 tcg_gen_rotri_tl(s
->T0
, s
->T0
, op2
);
1868 tcg_gen_rotli_tl(s
->T0
, s
->T0
, op2
);
1879 shift
= mask
+ 1 - shift
;
1881 gen_extu(ot
, s
->T0
);
1882 tcg_gen_shli_tl(s
->tmp0
, s
->T0
, shift
);
1883 tcg_gen_shri_tl(s
->T0
, s
->T0
, mask
+ 1 - shift
);
1884 tcg_gen_or_tl(s
->T0
, s
->T0
, s
->tmp0
);
1890 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1893 /* Compute the flags into CC_SRC. */
1894 gen_compute_eflags(s
);
1896 /* The value that was "rotated out" is now present at the other end
1897 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1898 since we've computed the flags into CC_SRC, these variables are
1901 tcg_gen_shri_tl(cpu_cc_src2
, s
->T0
, mask
- 1);
1902 tcg_gen_shri_tl(cpu_cc_dst
, s
->T0
, mask
);
1903 tcg_gen_andi_tl(cpu_cc_dst
, cpu_cc_dst
, 1);
1905 tcg_gen_shri_tl(cpu_cc_src2
, s
->T0
, mask
);
1906 tcg_gen_andi_tl(cpu_cc_dst
, s
->T0
, 1);
1908 tcg_gen_andi_tl(cpu_cc_src2
, cpu_cc_src2
, 1);
1909 tcg_gen_xor_tl(cpu_cc_src2
, cpu_cc_src2
, cpu_cc_dst
);
1910 set_cc_op(s
, CC_OP_ADCOX
);
1914 /* XXX: add faster immediate = 1 case */
1915 static void gen_rotc_rm_T1(DisasContext
*s
, MemOp ot
, int op1
,
1918 gen_compute_eflags(s
);
1919 assert(s
->cc_op
== CC_OP_EFLAGS
);
1923 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1925 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1930 gen_helper_rcrb(s
->T0
, cpu_env
, s
->T0
, s
->T1
);
1933 gen_helper_rcrw(s
->T0
, cpu_env
, s
->T0
, s
->T1
);
1936 gen_helper_rcrl(s
->T0
, cpu_env
, s
->T0
, s
->T1
);
1938 #ifdef TARGET_X86_64
1940 gen_helper_rcrq(s
->T0
, cpu_env
, s
->T0
, s
->T1
);
1949 gen_helper_rclb(s
->T0
, cpu_env
, s
->T0
, s
->T1
);
1952 gen_helper_rclw(s
->T0
, cpu_env
, s
->T0
, s
->T1
);
1955 gen_helper_rcll(s
->T0
, cpu_env
, s
->T0
, s
->T1
);
1957 #ifdef TARGET_X86_64
1959 gen_helper_rclq(s
->T0
, cpu_env
, s
->T0
, s
->T1
);
1967 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1970 /* XXX: add faster immediate case */
1971 static void gen_shiftd_rm_T1(DisasContext
*s
, MemOp ot
, int op1
,
1972 bool is_right
, TCGv count_in
)
1974 target_ulong mask
= (ot
== MO_64
? 63 : 31);
1978 if (op1
== OR_TMP0
) {
1979 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1981 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1984 count
= tcg_temp_new();
1985 tcg_gen_andi_tl(count
, count_in
, mask
);
1989 /* Note: we implement the Intel behaviour for shift count > 16.
1990 This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A
1991 portion by constructing it as a 32-bit value. */
1993 tcg_gen_deposit_tl(s
->tmp0
, s
->T0
, s
->T1
, 16, 16);
1994 tcg_gen_mov_tl(s
->T1
, s
->T0
);
1995 tcg_gen_mov_tl(s
->T0
, s
->tmp0
);
1997 tcg_gen_deposit_tl(s
->T1
, s
->T0
, s
->T1
, 16, 16);
2000 * If TARGET_X86_64 defined then fall through into MO_32 case,
2001 * otherwise fall through default case.
2004 #ifdef TARGET_X86_64
2005 /* Concatenate the two 32-bit values and use a 64-bit shift. */
2006 tcg_gen_subi_tl(s
->tmp0
, count
, 1);
2008 tcg_gen_concat_tl_i64(s
->T0
, s
->T0
, s
->T1
);
2009 tcg_gen_shr_i64(s
->tmp0
, s
->T0
, s
->tmp0
);
2010 tcg_gen_shr_i64(s
->T0
, s
->T0
, count
);
2012 tcg_gen_concat_tl_i64(s
->T0
, s
->T1
, s
->T0
);
2013 tcg_gen_shl_i64(s
->tmp0
, s
->T0
, s
->tmp0
);
2014 tcg_gen_shl_i64(s
->T0
, s
->T0
, count
);
2015 tcg_gen_shri_i64(s
->tmp0
, s
->tmp0
, 32);
2016 tcg_gen_shri_i64(s
->T0
, s
->T0
, 32);
2021 tcg_gen_subi_tl(s
->tmp0
, count
, 1);
2023 tcg_gen_shr_tl(s
->tmp0
, s
->T0
, s
->tmp0
);
2025 tcg_gen_subfi_tl(s
->tmp4
, mask
+ 1, count
);
2026 tcg_gen_shr_tl(s
->T0
, s
->T0
, count
);
2027 tcg_gen_shl_tl(s
->T1
, s
->T1
, s
->tmp4
);
2029 tcg_gen_shl_tl(s
->tmp0
, s
->T0
, s
->tmp0
);
2031 /* Only needed if count > 16, for Intel behaviour. */
2032 tcg_gen_subfi_tl(s
->tmp4
, 33, count
);
2033 tcg_gen_shr_tl(s
->tmp4
, s
->T1
, s
->tmp4
);
2034 tcg_gen_or_tl(s
->tmp0
, s
->tmp0
, s
->tmp4
);
2037 tcg_gen_subfi_tl(s
->tmp4
, mask
+ 1, count
);
2038 tcg_gen_shl_tl(s
->T0
, s
->T0
, count
);
2039 tcg_gen_shr_tl(s
->T1
, s
->T1
, s
->tmp4
);
2041 tcg_gen_movi_tl(s
->tmp4
, 0);
2042 tcg_gen_movcond_tl(TCG_COND_EQ
, s
->T1
, count
, s
->tmp4
,
2044 tcg_gen_or_tl(s
->T0
, s
->T0
, s
->T1
);
2049 gen_op_st_rm_T0_A0(s
, ot
, op1
);
2051 gen_shift_flags(s
, ot
, s
->T0
, s
->tmp0
, count
, is_right
);
2052 tcg_temp_free(count
);
2055 static void gen_shift(DisasContext
*s1
, int op
, MemOp ot
, int d
, int s
)
2058 gen_op_mov_v_reg(s1
, ot
, s1
->T1
, s
);
2061 gen_rot_rm_T1(s1
, ot
, d
, 0);
2064 gen_rot_rm_T1(s1
, ot
, d
, 1);
2068 gen_shift_rm_T1(s1
, ot
, d
, 0, 0);
2071 gen_shift_rm_T1(s1
, ot
, d
, 1, 0);
2074 gen_shift_rm_T1(s1
, ot
, d
, 1, 1);
2077 gen_rotc_rm_T1(s1
, ot
, d
, 0);
2080 gen_rotc_rm_T1(s1
, ot
, d
, 1);
2085 static void gen_shifti(DisasContext
*s1
, int op
, MemOp ot
, int d
, int c
)
2089 gen_rot_rm_im(s1
, ot
, d
, c
, 0);
2092 gen_rot_rm_im(s1
, ot
, d
, c
, 1);
2096 gen_shift_rm_im(s1
, ot
, d
, c
, 0, 0);
2099 gen_shift_rm_im(s1
, ot
, d
, c
, 1, 0);
2102 gen_shift_rm_im(s1
, ot
, d
, c
, 1, 1);
2105 /* currently not optimized */
2106 tcg_gen_movi_tl(s1
->T1
, c
);
2107 gen_shift(s1
, op
, ot
, d
, OR_TMP1
);
2112 #define X86_MAX_INSN_LENGTH 15
2114 static uint64_t advance_pc(CPUX86State
*env
, DisasContext
*s
, int num_bytes
)
2116 uint64_t pc
= s
->pc
;
2118 /* This is a subsequent insn that crosses a page boundary. */
2119 if (s
->base
.num_insns
> 1 &&
2120 !is_same_page(&s
->base
, s
->pc
+ num_bytes
- 1)) {
2121 siglongjmp(s
->jmpbuf
, 2);
2125 if (unlikely(cur_insn_len(s
) > X86_MAX_INSN_LENGTH
)) {
2126 /* If the instruction's 16th byte is on a different page than the 1st, a
2127 * page fault on the second page wins over the general protection fault
2128 * caused by the instruction being too long.
2129 * This can happen even if the operand is only one byte long!
2131 if (((s
->pc
- 1) ^ (pc
- 1)) & TARGET_PAGE_MASK
) {
2132 volatile uint8_t unused
=
2133 cpu_ldub_code(env
, (s
->pc
- 1) & TARGET_PAGE_MASK
);
2136 siglongjmp(s
->jmpbuf
, 1);
2142 static inline uint8_t x86_ldub_code(CPUX86State
*env
, DisasContext
*s
)
2144 return translator_ldub(env
, &s
->base
, advance_pc(env
, s
, 1));
2147 static inline int16_t x86_ldsw_code(CPUX86State
*env
, DisasContext
*s
)
2149 return translator_lduw(env
, &s
->base
, advance_pc(env
, s
, 2));
2152 static inline uint16_t x86_lduw_code(CPUX86State
*env
, DisasContext
*s
)
2154 return translator_lduw(env
, &s
->base
, advance_pc(env
, s
, 2));
2157 static inline uint32_t x86_ldl_code(CPUX86State
*env
, DisasContext
*s
)
2159 return translator_ldl(env
, &s
->base
, advance_pc(env
, s
, 4));
2162 #ifdef TARGET_X86_64
2163 static inline uint64_t x86_ldq_code(CPUX86State
*env
, DisasContext
*s
)
2165 return translator_ldq(env
, &s
->base
, advance_pc(env
, s
, 8));
2169 /* Decompose an address. */
2171 typedef struct AddressParts
{
2179 static AddressParts
gen_lea_modrm_0(CPUX86State
*env
, DisasContext
*s
,
2182 int def_seg
, base
, index
, scale
, mod
, rm
;
2191 mod
= (modrm
>> 6) & 3;
2193 base
= rm
| REX_B(s
);
2196 /* Normally filtered out earlier, but including this path
2197 simplifies multi-byte nop, as well as bndcl, bndcu, bndcn. */
2206 int code
= x86_ldub_code(env
, s
);
2207 scale
= (code
>> 6) & 3;
2208 index
= ((code
>> 3) & 7) | REX_X(s
);
2210 index
= -1; /* no index */
2212 base
= (code
& 7) | REX_B(s
);
2218 if ((base
& 7) == 5) {
2220 disp
= (int32_t)x86_ldl_code(env
, s
);
2221 if (CODE64(s
) && !havesib
) {
2223 disp
+= s
->pc
+ s
->rip_offset
;
2228 disp
= (int8_t)x86_ldub_code(env
, s
);
2232 disp
= (int32_t)x86_ldl_code(env
, s
);
2236 /* For correct popl handling with esp. */
2237 if (base
== R_ESP
&& s
->popl_esp_hack
) {
2238 disp
+= s
->popl_esp_hack
;
2240 if (base
== R_EBP
|| base
== R_ESP
) {
2249 disp
= x86_lduw_code(env
, s
);
2252 } else if (mod
== 1) {
2253 disp
= (int8_t)x86_ldub_code(env
, s
);
2255 disp
= (int16_t)x86_lduw_code(env
, s
);
2299 return (AddressParts
){ def_seg
, base
, index
, scale
, disp
};
2302 /* Compute the address, with a minimum number of TCG ops. */
2303 static TCGv
gen_lea_modrm_1(DisasContext
*s
, AddressParts a
, bool is_vsib
)
2307 if (a
.index
>= 0 && !is_vsib
) {
2309 ea
= cpu_regs
[a
.index
];
2311 tcg_gen_shli_tl(s
->A0
, cpu_regs
[a
.index
], a
.scale
);
2315 tcg_gen_add_tl(s
->A0
, ea
, cpu_regs
[a
.base
]);
2318 } else if (a
.base
>= 0) {
2319 ea
= cpu_regs
[a
.base
];
2322 if (tb_cflags(s
->base
.tb
) & CF_PCREL
&& a
.base
== -2) {
2323 /* With cpu_eip ~= pc_save, the expression is pc-relative. */
2324 tcg_gen_addi_tl(s
->A0
, cpu_eip
, a
.disp
- s
->pc_save
);
2326 tcg_gen_movi_tl(s
->A0
, a
.disp
);
2329 } else if (a
.disp
!= 0) {
2330 tcg_gen_addi_tl(s
->A0
, ea
, a
.disp
);
2337 static void gen_lea_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
)
2339 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
2340 TCGv ea
= gen_lea_modrm_1(s
, a
, false);
2341 gen_lea_v_seg(s
, s
->aflag
, ea
, a
.def_seg
, s
->override
);
2344 static void gen_nop_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
)
2346 (void)gen_lea_modrm_0(env
, s
, modrm
);
2349 /* Used for BNDCL, BNDCU, BNDCN. */
2350 static void gen_bndck(CPUX86State
*env
, DisasContext
*s
, int modrm
,
2351 TCGCond cond
, TCGv_i64 bndv
)
2353 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
2354 TCGv ea
= gen_lea_modrm_1(s
, a
, false);
2356 tcg_gen_extu_tl_i64(s
->tmp1_i64
, ea
);
2358 tcg_gen_ext32u_i64(s
->tmp1_i64
, s
->tmp1_i64
);
2360 tcg_gen_setcond_i64(cond
, s
->tmp1_i64
, s
->tmp1_i64
, bndv
);
2361 tcg_gen_extrl_i64_i32(s
->tmp2_i32
, s
->tmp1_i64
);
2362 gen_helper_bndck(cpu_env
, s
->tmp2_i32
);
2365 /* used for LEA and MOV AX, mem */
2366 static void gen_add_A0_ds_seg(DisasContext
*s
)
2368 gen_lea_v_seg(s
, s
->aflag
, s
->A0
, R_DS
, s
->override
);
2371 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2373 static void gen_ldst_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
,
2374 MemOp ot
, int reg
, int is_store
)
2378 mod
= (modrm
>> 6) & 3;
2379 rm
= (modrm
& 7) | REX_B(s
);
2383 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
2384 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
2386 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
2388 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
2391 gen_lea_modrm(env
, s
, modrm
);
2394 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
2395 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
2397 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
2399 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
2404 static target_ulong
insn_get_addr(CPUX86State
*env
, DisasContext
*s
, MemOp ot
)
2410 ret
= x86_ldub_code(env
, s
);
2413 ret
= x86_lduw_code(env
, s
);
2416 ret
= x86_ldl_code(env
, s
);
2418 #ifdef TARGET_X86_64
2420 ret
= x86_ldq_code(env
, s
);
2424 g_assert_not_reached();
2429 static inline uint32_t insn_get(CPUX86State
*env
, DisasContext
*s
, MemOp ot
)
2435 ret
= x86_ldub_code(env
, s
);
2438 ret
= x86_lduw_code(env
, s
);
2441 #ifdef TARGET_X86_64
2444 ret
= x86_ldl_code(env
, s
);
2452 static target_long
insn_get_signed(CPUX86State
*env
, DisasContext
*s
, MemOp ot
)
2458 ret
= (int8_t) x86_ldub_code(env
, s
);
2461 ret
= (int16_t) x86_lduw_code(env
, s
);
2464 ret
= (int32_t) x86_ldl_code(env
, s
);
2466 #ifdef TARGET_X86_64
2468 ret
= x86_ldq_code(env
, s
);
2472 g_assert_not_reached();
2477 static inline int insn_const_size(MemOp ot
)
2486 static void gen_jcc(DisasContext
*s
, int b
, int diff
)
2488 TCGLabel
*l1
= gen_new_label();
2491 gen_jmp_rel_csize(s
, 0, 1);
2493 gen_jmp_rel(s
, s
->dflag
, diff
, 0);
2496 static void gen_cmovcc1(CPUX86State
*env
, DisasContext
*s
, MemOp ot
, int b
,
2501 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
2503 cc
= gen_prepare_cc(s
, b
, s
->T1
);
2504 if (cc
.mask
!= -1) {
2505 TCGv t0
= tcg_temp_new();
2506 tcg_gen_andi_tl(t0
, cc
.reg
, cc
.mask
);
2510 cc
.reg2
= tcg_const_tl(cc
.imm
);
2513 tcg_gen_movcond_tl(cc
.cond
, s
->T0
, cc
.reg
, cc
.reg2
,
2514 s
->T0
, cpu_regs
[reg
]);
2515 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
2517 if (cc
.mask
!= -1) {
2518 tcg_temp_free(cc
.reg
);
2521 tcg_temp_free(cc
.reg2
);
2525 static inline void gen_op_movl_T0_seg(DisasContext
*s
, X86Seg seg_reg
)
2527 tcg_gen_ld32u_tl(s
->T0
, cpu_env
,
2528 offsetof(CPUX86State
,segs
[seg_reg
].selector
));
2531 static inline void gen_op_movl_seg_T0_vm(DisasContext
*s
, X86Seg seg_reg
)
2533 tcg_gen_ext16u_tl(s
->T0
, s
->T0
);
2534 tcg_gen_st32_tl(s
->T0
, cpu_env
,
2535 offsetof(CPUX86State
,segs
[seg_reg
].selector
));
2536 tcg_gen_shli_tl(cpu_seg_base
[seg_reg
], s
->T0
, 4);
2539 /* move T0 to seg_reg and compute if the CPU state may change. Never
2540 call this function with seg_reg == R_CS */
2541 static void gen_movl_seg_T0(DisasContext
*s
, X86Seg seg_reg
)
2543 if (PE(s
) && !VM86(s
)) {
2544 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
2545 gen_helper_load_seg(cpu_env
, tcg_const_i32(seg_reg
), s
->tmp2_i32
);
2546 /* abort translation because the addseg value may change or
2547 because ss32 may change. For R_SS, translation must always
2548 stop as a special handling must be done to disable hardware
2549 interrupts for the next instruction */
2550 if (seg_reg
== R_SS
) {
2551 s
->base
.is_jmp
= DISAS_EOB_INHIBIT_IRQ
;
2552 } else if (CODE32(s
) && seg_reg
< R_FS
) {
2553 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
2556 gen_op_movl_seg_T0_vm(s
, seg_reg
);
2557 if (seg_reg
== R_SS
) {
2558 s
->base
.is_jmp
= DISAS_EOB_INHIBIT_IRQ
;
2563 static void gen_svm_check_intercept(DisasContext
*s
, uint32_t type
)
2565 /* no SVM activated; fast case */
2566 if (likely(!GUEST(s
))) {
2569 gen_helper_svm_check_intercept(cpu_env
, tcg_constant_i32(type
));
2572 static inline void gen_stack_update(DisasContext
*s
, int addend
)
2574 gen_op_add_reg_im(s
, mo_stacksize(s
), R_ESP
, addend
);
2577 /* Generate a push. It depends on ss32, addseg and dflag. */
2578 static void gen_push_v(DisasContext
*s
, TCGv val
)
2580 MemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2581 MemOp a_ot
= mo_stacksize(s
);
2582 int size
= 1 << d_ot
;
2583 TCGv new_esp
= s
->A0
;
2585 tcg_gen_subi_tl(s
->A0
, cpu_regs
[R_ESP
], size
);
2590 tcg_gen_mov_tl(new_esp
, s
->A0
);
2592 gen_lea_v_seg(s
, a_ot
, s
->A0
, R_SS
, -1);
2595 gen_op_st_v(s
, d_ot
, val
, s
->A0
);
2596 gen_op_mov_reg_v(s
, a_ot
, R_ESP
, new_esp
);
2599 /* two step pop is necessary for precise exceptions */
2600 static MemOp
gen_pop_T0(DisasContext
*s
)
2602 MemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2604 gen_lea_v_seg(s
, mo_stacksize(s
), cpu_regs
[R_ESP
], R_SS
, -1);
2605 gen_op_ld_v(s
, d_ot
, s
->T0
, s
->A0
);
2610 static inline void gen_pop_update(DisasContext
*s
, MemOp ot
)
2612 gen_stack_update(s
, 1 << ot
);
2615 static inline void gen_stack_A0(DisasContext
*s
)
2617 gen_lea_v_seg(s
, SS32(s
) ? MO_32
: MO_16
, cpu_regs
[R_ESP
], R_SS
, -1);
2620 static void gen_pusha(DisasContext
*s
)
2622 MemOp s_ot
= SS32(s
) ? MO_32
: MO_16
;
2623 MemOp d_ot
= s
->dflag
;
2624 int size
= 1 << d_ot
;
2627 for (i
= 0; i
< 8; i
++) {
2628 tcg_gen_addi_tl(s
->A0
, cpu_regs
[R_ESP
], (i
- 8) * size
);
2629 gen_lea_v_seg(s
, s_ot
, s
->A0
, R_SS
, -1);
2630 gen_op_st_v(s
, d_ot
, cpu_regs
[7 - i
], s
->A0
);
2633 gen_stack_update(s
, -8 * size
);
2636 static void gen_popa(DisasContext
*s
)
2638 MemOp s_ot
= SS32(s
) ? MO_32
: MO_16
;
2639 MemOp d_ot
= s
->dflag
;
2640 int size
= 1 << d_ot
;
2643 for (i
= 0; i
< 8; i
++) {
2644 /* ESP is not reloaded */
2645 if (7 - i
== R_ESP
) {
2648 tcg_gen_addi_tl(s
->A0
, cpu_regs
[R_ESP
], i
* size
);
2649 gen_lea_v_seg(s
, s_ot
, s
->A0
, R_SS
, -1);
2650 gen_op_ld_v(s
, d_ot
, s
->T0
, s
->A0
);
2651 gen_op_mov_reg_v(s
, d_ot
, 7 - i
, s
->T0
);
2654 gen_stack_update(s
, 8 * size
);
2657 static void gen_enter(DisasContext
*s
, int esp_addend
, int level
)
2659 MemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2660 MemOp a_ot
= CODE64(s
) ? MO_64
: SS32(s
) ? MO_32
: MO_16
;
2661 int size
= 1 << d_ot
;
2663 /* Push BP; compute FrameTemp into T1. */
2664 tcg_gen_subi_tl(s
->T1
, cpu_regs
[R_ESP
], size
);
2665 gen_lea_v_seg(s
, a_ot
, s
->T1
, R_SS
, -1);
2666 gen_op_st_v(s
, d_ot
, cpu_regs
[R_EBP
], s
->A0
);
2672 /* Copy level-1 pointers from the previous frame. */
2673 for (i
= 1; i
< level
; ++i
) {
2674 tcg_gen_subi_tl(s
->A0
, cpu_regs
[R_EBP
], size
* i
);
2675 gen_lea_v_seg(s
, a_ot
, s
->A0
, R_SS
, -1);
2676 gen_op_ld_v(s
, d_ot
, s
->tmp0
, s
->A0
);
2678 tcg_gen_subi_tl(s
->A0
, s
->T1
, size
* i
);
2679 gen_lea_v_seg(s
, a_ot
, s
->A0
, R_SS
, -1);
2680 gen_op_st_v(s
, d_ot
, s
->tmp0
, s
->A0
);
2683 /* Push the current FrameTemp as the last level. */
2684 tcg_gen_subi_tl(s
->A0
, s
->T1
, size
* level
);
2685 gen_lea_v_seg(s
, a_ot
, s
->A0
, R_SS
, -1);
2686 gen_op_st_v(s
, d_ot
, s
->T1
, s
->A0
);
2689 /* Copy the FrameTemp value to EBP. */
2690 gen_op_mov_reg_v(s
, a_ot
, R_EBP
, s
->T1
);
2692 /* Compute the final value of ESP. */
2693 tcg_gen_subi_tl(s
->T1
, s
->T1
, esp_addend
+ size
* level
);
2694 gen_op_mov_reg_v(s
, a_ot
, R_ESP
, s
->T1
);
2697 static void gen_leave(DisasContext
*s
)
2699 MemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2700 MemOp a_ot
= mo_stacksize(s
);
2702 gen_lea_v_seg(s
, a_ot
, cpu_regs
[R_EBP
], R_SS
, -1);
2703 gen_op_ld_v(s
, d_ot
, s
->T0
, s
->A0
);
2705 tcg_gen_addi_tl(s
->T1
, cpu_regs
[R_EBP
], 1 << d_ot
);
2707 gen_op_mov_reg_v(s
, d_ot
, R_EBP
, s
->T0
);
2708 gen_op_mov_reg_v(s
, a_ot
, R_ESP
, s
->T1
);
2711 /* Similarly, except that the assumption here is that we don't decode
2712 the instruction at all -- either a missing opcode, an unimplemented
2713 feature, or just a bogus instruction stream. */
2714 static void gen_unknown_opcode(CPUX86State
*env
, DisasContext
*s
)
2716 gen_illegal_opcode(s
);
2718 if (qemu_loglevel_mask(LOG_UNIMP
)) {
2719 FILE *logfile
= qemu_log_trylock();
2721 target_ulong pc
= s
->base
.pc_next
, end
= s
->pc
;
2723 fprintf(logfile
, "ILLOPC: " TARGET_FMT_lx
":", pc
);
2724 for (; pc
< end
; ++pc
) {
2725 fprintf(logfile
, " %02x", cpu_ldub_code(env
, pc
));
2727 fprintf(logfile
, "\n");
2728 qemu_log_unlock(logfile
);
2733 /* an interrupt is different from an exception because of the
2735 static void gen_interrupt(DisasContext
*s
, int intno
)
2737 gen_update_cc_op(s
);
2738 gen_update_eip_cur(s
);
2739 gen_helper_raise_interrupt(cpu_env
, tcg_constant_i32(intno
),
2740 cur_insn_len_i32(s
));
2741 s
->base
.is_jmp
= DISAS_NORETURN
;
2744 static void gen_set_hflag(DisasContext
*s
, uint32_t mask
)
2746 if ((s
->flags
& mask
) == 0) {
2747 TCGv_i32 t
= tcg_temp_new_i32();
2748 tcg_gen_ld_i32(t
, cpu_env
, offsetof(CPUX86State
, hflags
));
2749 tcg_gen_ori_i32(t
, t
, mask
);
2750 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUX86State
, hflags
));
2751 tcg_temp_free_i32(t
);
2756 static void gen_reset_hflag(DisasContext
*s
, uint32_t mask
)
2758 if (s
->flags
& mask
) {
2759 TCGv_i32 t
= tcg_temp_new_i32();
2760 tcg_gen_ld_i32(t
, cpu_env
, offsetof(CPUX86State
, hflags
));
2761 tcg_gen_andi_i32(t
, t
, ~mask
);
2762 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUX86State
, hflags
));
2763 tcg_temp_free_i32(t
);
2768 static void gen_set_eflags(DisasContext
*s
, target_ulong mask
)
2770 TCGv t
= tcg_temp_new();
2772 tcg_gen_ld_tl(t
, cpu_env
, offsetof(CPUX86State
, eflags
));
2773 tcg_gen_ori_tl(t
, t
, mask
);
2774 tcg_gen_st_tl(t
, cpu_env
, offsetof(CPUX86State
, eflags
));
2778 static void gen_reset_eflags(DisasContext
*s
, target_ulong mask
)
2780 TCGv t
= tcg_temp_new();
2782 tcg_gen_ld_tl(t
, cpu_env
, offsetof(CPUX86State
, eflags
));
2783 tcg_gen_andi_tl(t
, t
, ~mask
);
2784 tcg_gen_st_tl(t
, cpu_env
, offsetof(CPUX86State
, eflags
));
2788 /* Clear BND registers during legacy branches. */
2789 static void gen_bnd_jmp(DisasContext
*s
)
2791 /* Clear the registers only if BND prefix is missing, MPX is enabled,
2792 and if the BNDREGs are known to be in use (non-zero) already.
2793 The helper itself will check BNDPRESERVE at runtime. */
2794 if ((s
->prefix
& PREFIX_REPNZ
) == 0
2795 && (s
->flags
& HF_MPX_EN_MASK
) != 0
2796 && (s
->flags
& HF_MPX_IU_MASK
) != 0) {
2797 gen_helper_bnd_jmp(cpu_env
);
2801 /* Generate an end of block. Trace exception is also generated if needed.
2802 If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set.
2803 If RECHECK_TF, emit a rechecking helper for #DB, ignoring the state of
2804 S->TF. This is used by the syscall/sysret insns. */
2806 do_gen_eob_worker(DisasContext
*s
, bool inhibit
, bool recheck_tf
, bool jr
)
2808 gen_update_cc_op(s
);
2810 /* If several instructions disable interrupts, only the first does it. */
2811 if (inhibit
&& !(s
->flags
& HF_INHIBIT_IRQ_MASK
)) {
2812 gen_set_hflag(s
, HF_INHIBIT_IRQ_MASK
);
2814 gen_reset_hflag(s
, HF_INHIBIT_IRQ_MASK
);
2817 if (s
->base
.tb
->flags
& HF_RF_MASK
) {
2818 gen_reset_eflags(s
, RF_MASK
);
2821 gen_helper_rechecking_single_step(cpu_env
);
2822 tcg_gen_exit_tb(NULL
, 0);
2823 } else if (s
->flags
& HF_TF_MASK
) {
2824 gen_helper_single_step(cpu_env
);
2826 tcg_gen_lookup_and_goto_ptr();
2828 tcg_gen_exit_tb(NULL
, 0);
2830 s
->base
.is_jmp
= DISAS_NORETURN
;
2834 gen_eob_worker(DisasContext
*s
, bool inhibit
, bool recheck_tf
)
2836 do_gen_eob_worker(s
, inhibit
, recheck_tf
, false);
2840 If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set. */
2841 static void gen_eob_inhibit_irq(DisasContext
*s
, bool inhibit
)
2843 gen_eob_worker(s
, inhibit
, false);
2846 /* End of block, resetting the inhibit irq flag. */
2847 static void gen_eob(DisasContext
*s
)
2849 gen_eob_worker(s
, false, false);
2852 /* Jump to register */
2853 static void gen_jr(DisasContext
*s
)
2855 do_gen_eob_worker(s
, false, false, true);
2858 /* Jump to eip+diff, truncating the result to OT. */
2859 static void gen_jmp_rel(DisasContext
*s
, MemOp ot
, int diff
, int tb_num
)
2861 bool use_goto_tb
= s
->jmp_opt
;
2862 target_ulong mask
= -1;
2863 target_ulong new_pc
= s
->pc
+ diff
;
2864 target_ulong new_eip
= new_pc
- s
->cs_base
;
2866 /* In 64-bit mode, operand size is fixed at 64 bits. */
2870 if (tb_cflags(s
->base
.tb
) & CF_PCREL
&& CODE32(s
)) {
2871 use_goto_tb
= false;
2879 gen_update_cc_op(s
);
2880 set_cc_op(s
, CC_OP_DYNAMIC
);
2882 if (tb_cflags(s
->base
.tb
) & CF_PCREL
) {
2883 tcg_gen_addi_tl(cpu_eip
, cpu_eip
, new_pc
- s
->pc_save
);
2885 * If we can prove the branch does not leave the page and we have
2886 * no extra masking to apply (data16 branch in code32, see above),
2887 * then we have also proven that the addition does not wrap.
2889 if (!use_goto_tb
|| !is_same_page(&s
->base
, new_pc
)) {
2890 tcg_gen_andi_tl(cpu_eip
, cpu_eip
, mask
);
2891 use_goto_tb
= false;
2896 translator_use_goto_tb(&s
->base
, new_eip
+ s
->cs_base
)) {
2897 /* jump to same page: we can use a direct jump */
2898 tcg_gen_goto_tb(tb_num
);
2899 if (!(tb_cflags(s
->base
.tb
) & CF_PCREL
)) {
2900 tcg_gen_movi_tl(cpu_eip
, new_eip
);
2902 tcg_gen_exit_tb(s
->base
.tb
, tb_num
);
2903 s
->base
.is_jmp
= DISAS_NORETURN
;
2905 if (!(tb_cflags(s
->base
.tb
) & CF_PCREL
)) {
2906 tcg_gen_movi_tl(cpu_eip
, new_eip
);
2909 gen_jr(s
); /* jump to another page */
2911 gen_eob(s
); /* exit to main loop */
2916 /* Jump to eip+diff, truncating to the current code size. */
2917 static void gen_jmp_rel_csize(DisasContext
*s
, int diff
, int tb_num
)
2919 /* CODE64 ignores the OT argument, so we need not consider it. */
2920 gen_jmp_rel(s
, CODE32(s
) ? MO_32
: MO_16
, diff
, tb_num
);
2923 static inline void gen_ldq_env_A0(DisasContext
*s
, int offset
)
2925 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
, s
->mem_index
, MO_LEUQ
);
2926 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
, offset
);
2929 static inline void gen_stq_env_A0(DisasContext
*s
, int offset
)
2931 tcg_gen_ld_i64(s
->tmp1_i64
, cpu_env
, offset
);
2932 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
, s
->mem_index
, MO_LEUQ
);
2935 static inline void gen_ldo_env_A0(DisasContext
*s
, int offset
, bool align
)
2937 int mem_index
= s
->mem_index
;
2938 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
, mem_index
,
2939 MO_LEUQ
| (align
? MO_ALIGN_16
: 0));
2940 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(0)));
2941 tcg_gen_addi_tl(s
->tmp0
, s
->A0
, 8);
2942 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->tmp0
, mem_index
, MO_LEUQ
);
2943 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(1)));
2946 static inline void gen_sto_env_A0(DisasContext
*s
, int offset
, bool align
)
2948 int mem_index
= s
->mem_index
;
2949 tcg_gen_ld_i64(s
->tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(0)));
2950 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
, mem_index
,
2951 MO_LEUQ
| (align
? MO_ALIGN_16
: 0));
2952 tcg_gen_addi_tl(s
->tmp0
, s
->A0
, 8);
2953 tcg_gen_ld_i64(s
->tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(1)));
2954 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->tmp0
, mem_index
, MO_LEUQ
);
2957 static void gen_ldy_env_A0(DisasContext
*s
, int offset
, bool align
)
2959 int mem_index
= s
->mem_index
;
2960 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
, mem_index
,
2961 MO_LEUQ
| (align
? MO_ALIGN_32
: 0));
2962 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
, offset
+ offsetof(YMMReg
, YMM_Q(0)));
2963 tcg_gen_addi_tl(s
->tmp0
, s
->A0
, 8);
2964 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->tmp0
, mem_index
, MO_LEUQ
);
2965 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
, offset
+ offsetof(YMMReg
, YMM_Q(1)));
2967 tcg_gen_addi_tl(s
->tmp0
, s
->A0
, 16);
2968 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->tmp0
, mem_index
, MO_LEUQ
);
2969 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
, offset
+ offsetof(YMMReg
, YMM_Q(2)));
2970 tcg_gen_addi_tl(s
->tmp0
, s
->A0
, 24);
2971 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->tmp0
, mem_index
, MO_LEUQ
);
2972 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
, offset
+ offsetof(YMMReg
, YMM_Q(3)));
2975 static void gen_sty_env_A0(DisasContext
*s
, int offset
, bool align
)
2977 int mem_index
= s
->mem_index
;
2978 tcg_gen_ld_i64(s
->tmp1_i64
, cpu_env
, offset
+ offsetof(YMMReg
, YMM_Q(0)));
2979 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
, mem_index
,
2980 MO_LEUQ
| (align
? MO_ALIGN_32
: 0));
2981 tcg_gen_addi_tl(s
->tmp0
, s
->A0
, 8);
2982 tcg_gen_ld_i64(s
->tmp1_i64
, cpu_env
, offset
+ offsetof(YMMReg
, YMM_Q(1)));
2983 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->tmp0
, mem_index
, MO_LEUQ
);
2984 tcg_gen_addi_tl(s
->tmp0
, s
->A0
, 16);
2985 tcg_gen_ld_i64(s
->tmp1_i64
, cpu_env
, offset
+ offsetof(YMMReg
, YMM_Q(2)));
2986 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->tmp0
, mem_index
, MO_LEUQ
);
2987 tcg_gen_addi_tl(s
->tmp0
, s
->A0
, 24);
2988 tcg_gen_ld_i64(s
->tmp1_i64
, cpu_env
, offset
+ offsetof(YMMReg
, YMM_Q(3)));
2989 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->tmp0
, mem_index
, MO_LEUQ
);
2992 #include "decode-new.h"
2993 #include "emit.c.inc"
2994 #include "decode-new.c.inc"
2996 static void gen_cmpxchg8b(DisasContext
*s
, CPUX86State
*env
, int modrm
)
2998 TCGv_i64 cmp
, val
, old
;
3001 gen_lea_modrm(env
, s
, modrm
);
3003 cmp
= tcg_temp_new_i64();
3004 val
= tcg_temp_new_i64();
3005 old
= tcg_temp_new_i64();
3007 /* Construct the comparison values from the register pair. */
3008 tcg_gen_concat_tl_i64(cmp
, cpu_regs
[R_EAX
], cpu_regs
[R_EDX
]);
3009 tcg_gen_concat_tl_i64(val
, cpu_regs
[R_EBX
], cpu_regs
[R_ECX
]);
3011 /* Only require atomic with LOCK; non-parallel handled in generator. */
3012 if (s
->prefix
& PREFIX_LOCK
) {
3013 tcg_gen_atomic_cmpxchg_i64(old
, s
->A0
, cmp
, val
, s
->mem_index
, MO_TEUQ
);
3015 tcg_gen_nonatomic_cmpxchg_i64(old
, s
->A0
, cmp
, val
,
3016 s
->mem_index
, MO_TEUQ
);
3018 tcg_temp_free_i64(val
);
3020 /* Set tmp0 to match the required value of Z. */
3021 tcg_gen_setcond_i64(TCG_COND_EQ
, cmp
, old
, cmp
);
3023 tcg_gen_trunc_i64_tl(Z
, cmp
);
3024 tcg_temp_free_i64(cmp
);
3027 * Extract the result values for the register pair.
3028 * For 32-bit, we may do this unconditionally, because on success (Z=1),
3029 * the old value matches the previous value in EDX:EAX. For x86_64,
3030 * the store must be conditional, because we must leave the source
3031 * registers unchanged on success, and zero-extend the writeback
3034 if (TARGET_LONG_BITS
== 32) {
3035 tcg_gen_extr_i64_tl(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
], old
);
3037 TCGv zero
= tcg_constant_tl(0);
3039 tcg_gen_extr_i64_tl(s
->T0
, s
->T1
, old
);
3040 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_regs
[R_EAX
], Z
, zero
,
3041 s
->T0
, cpu_regs
[R_EAX
]);
3042 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_regs
[R_EDX
], Z
, zero
,
3043 s
->T1
, cpu_regs
[R_EDX
]);
3045 tcg_temp_free_i64(old
);
3048 gen_compute_eflags(s
);
3049 tcg_gen_deposit_tl(cpu_cc_src
, cpu_cc_src
, Z
, ctz32(CC_Z
), 1);
3053 #ifdef TARGET_X86_64
3054 static void gen_cmpxchg16b(DisasContext
*s
, CPUX86State
*env
, int modrm
)
3056 MemOp mop
= MO_TE
| MO_128
| MO_ALIGN
;
3060 gen_lea_modrm(env
, s
, modrm
);
3062 cmp
= tcg_temp_new_i128();
3063 val
= tcg_temp_new_i128();
3064 tcg_gen_concat_i64_i128(cmp
, cpu_regs
[R_EAX
], cpu_regs
[R_EDX
]);
3065 tcg_gen_concat_i64_i128(val
, cpu_regs
[R_EBX
], cpu_regs
[R_ECX
]);
3067 /* Only require atomic with LOCK; non-parallel handled in generator. */
3068 if (s
->prefix
& PREFIX_LOCK
) {
3069 tcg_gen_atomic_cmpxchg_i128(val
, s
->A0
, cmp
, val
, s
->mem_index
, mop
);
3071 tcg_gen_nonatomic_cmpxchg_i128(val
, s
->A0
, cmp
, val
, s
->mem_index
, mop
);
3074 tcg_gen_extr_i128_i64(s
->T0
, s
->T1
, val
);
3075 tcg_temp_free_i128(cmp
);
3076 tcg_temp_free_i128(val
);
3078 /* Determine success after the fact. */
3079 t0
= tcg_temp_new_i64();
3080 t1
= tcg_temp_new_i64();
3081 tcg_gen_xor_i64(t0
, s
->T0
, cpu_regs
[R_EAX
]);
3082 tcg_gen_xor_i64(t1
, s
->T1
, cpu_regs
[R_EDX
]);
3083 tcg_gen_or_i64(t0
, t0
, t1
);
3084 tcg_temp_free_i64(t1
);
3087 gen_compute_eflags(s
);
3088 tcg_gen_setcondi_i64(TCG_COND_EQ
, t0
, t0
, 0);
3089 tcg_gen_deposit_tl(cpu_cc_src
, cpu_cc_src
, t0
, ctz32(CC_Z
), 1);
3090 tcg_temp_free_i64(t0
);
3093 * Extract the result values for the register pair. We may do this
3094 * unconditionally, because on success (Z=1), the old value matches
3095 * the previous value in RDX:RAX.
3097 tcg_gen_mov_i64(cpu_regs
[R_EAX
], s
->T0
);
3098 tcg_gen_mov_i64(cpu_regs
[R_EDX
], s
->T1
);
3102 /* convert one instruction. s->base.is_jmp is set if the translation must
3103 be stopped. Return the next pc value */
3104 static bool disas_insn(DisasContext
*s
, CPUState
*cpu
)
3106 CPUX86State
*env
= cpu
->env_ptr
;
3109 MemOp ot
, aflag
, dflag
;
3110 int modrm
, reg
, rm
, mod
, op
, opreg
, val
;
3111 bool orig_cc_op_dirty
= s
->cc_op_dirty
;
3112 CCOp orig_cc_op
= s
->cc_op
;
3113 target_ulong orig_pc_save
= s
->pc_save
;
3115 s
->pc
= s
->base
.pc_next
;
3117 #ifdef TARGET_X86_64
3122 s
->rip_offset
= 0; /* for relative ip address */
3126 switch (sigsetjmp(s
->jmpbuf
, 0)) {
3130 gen_exception_gpf(s
);
3133 /* Restore state that may affect the next instruction. */
3134 s
->pc
= s
->base
.pc_next
;
3136 * TODO: These save/restore can be removed after the table-based
3137 * decoder is complete; we will be decoding the insn completely
3138 * before any code generation that might affect these variables.
3140 s
->cc_op_dirty
= orig_cc_op_dirty
;
3141 s
->cc_op
= orig_cc_op
;
3142 s
->pc_save
= orig_pc_save
;
3144 s
->base
.num_insns
--;
3145 tcg_remove_ops_after(s
->prev_insn_end
);
3146 s
->base
.is_jmp
= DISAS_TOO_MANY
;
3149 g_assert_not_reached();
3155 s
->prefix
= prefixes
;
3156 b
= x86_ldub_code(env
, s
);
3157 /* Collect prefixes. */
3162 b
= x86_ldub_code(env
, s
) + 0x100;
3165 prefixes
|= PREFIX_REPZ
;
3166 prefixes
&= ~PREFIX_REPNZ
;
3169 prefixes
|= PREFIX_REPNZ
;
3170 prefixes
&= ~PREFIX_REPZ
;
3173 prefixes
|= PREFIX_LOCK
;
3194 prefixes
|= PREFIX_DATA
;
3197 prefixes
|= PREFIX_ADR
;
3199 #ifdef TARGET_X86_64
3203 prefixes
|= PREFIX_REX
;
3204 s
->vex_w
= (b
>> 3) & 1;
3205 s
->rex_r
= (b
& 0x4) << 1;
3206 s
->rex_x
= (b
& 0x2) << 2;
3207 s
->rex_b
= (b
& 0x1) << 3;
3212 case 0xc5: /* 2-byte VEX */
3213 case 0xc4: /* 3-byte VEX */
3214 if (CODE32(s
) && !VM86(s
)) {
3215 int vex2
= x86_ldub_code(env
, s
);
3216 s
->pc
--; /* rewind the advance_pc() x86_ldub_code() did */
3218 if (!CODE64(s
) && (vex2
& 0xc0) != 0xc0) {
3219 /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
3220 otherwise the instruction is LES or LDS. */
3223 disas_insn_new(s
, cpu
, b
);
3229 /* Post-process prefixes. */
3231 /* In 64-bit mode, the default data size is 32-bit. Select 64-bit
3232 data with rex_w, and 16-bit data with 0x66; rex_w takes precedence
3233 over 0x66 if both are present. */
3234 dflag
= (REX_W(s
) ? MO_64
: prefixes
& PREFIX_DATA
? MO_16
: MO_32
);
3235 /* In 64-bit mode, 0x67 selects 32-bit addressing. */
3236 aflag
= (prefixes
& PREFIX_ADR
? MO_32
: MO_64
);
3238 /* In 16/32-bit mode, 0x66 selects the opposite data size. */
3239 if (CODE32(s
) ^ ((prefixes
& PREFIX_DATA
) != 0)) {
3244 /* In 16/32-bit mode, 0x67 selects the opposite addressing. */
3245 if (CODE32(s
) ^ ((prefixes
& PREFIX_ADR
) != 0)) {
3252 s
->prefix
= prefixes
;
3256 /* now check op code */
3258 /**************************/
3273 ot
= mo_b_d(b
, dflag
);
3276 case 0: /* OP Ev, Gv */
3277 modrm
= x86_ldub_code(env
, s
);
3278 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
3279 mod
= (modrm
>> 6) & 3;
3280 rm
= (modrm
& 7) | REX_B(s
);
3282 gen_lea_modrm(env
, s
, modrm
);
3284 } else if (op
== OP_XORL
&& rm
== reg
) {
3286 /* xor reg, reg optimisation */
3287 set_cc_op(s
, CC_OP_CLR
);
3288 tcg_gen_movi_tl(s
->T0
, 0);
3289 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
3294 gen_op_mov_v_reg(s
, ot
, s
->T1
, reg
);
3295 gen_op(s
, op
, ot
, opreg
);
3297 case 1: /* OP Gv, Ev */
3298 modrm
= x86_ldub_code(env
, s
);
3299 mod
= (modrm
>> 6) & 3;
3300 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
3301 rm
= (modrm
& 7) | REX_B(s
);
3303 gen_lea_modrm(env
, s
, modrm
);
3304 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
3305 } else if (op
== OP_XORL
&& rm
== reg
) {
3308 gen_op_mov_v_reg(s
, ot
, s
->T1
, rm
);
3310 gen_op(s
, op
, ot
, reg
);
3312 case 2: /* OP A, Iv */
3313 val
= insn_get(env
, s
, ot
);
3314 tcg_gen_movi_tl(s
->T1
, val
);
3315 gen_op(s
, op
, ot
, OR_EAX
);
3325 case 0x80: /* GRP1 */
3331 ot
= mo_b_d(b
, dflag
);
3333 modrm
= x86_ldub_code(env
, s
);
3334 mod
= (modrm
>> 6) & 3;
3335 rm
= (modrm
& 7) | REX_B(s
);
3336 op
= (modrm
>> 3) & 7;
3342 s
->rip_offset
= insn_const_size(ot
);
3343 gen_lea_modrm(env
, s
, modrm
);
3354 val
= insn_get(env
, s
, ot
);
3357 val
= (int8_t)insn_get(env
, s
, MO_8
);
3360 tcg_gen_movi_tl(s
->T1
, val
);
3361 gen_op(s
, op
, ot
, opreg
);
3365 /**************************/
3366 /* inc, dec, and other misc arith */
3367 case 0x40 ... 0x47: /* inc Gv */
3369 gen_inc(s
, ot
, OR_EAX
+ (b
& 7), 1);
3371 case 0x48 ... 0x4f: /* dec Gv */
3373 gen_inc(s
, ot
, OR_EAX
+ (b
& 7), -1);
3375 case 0xf6: /* GRP3 */
3377 ot
= mo_b_d(b
, dflag
);
3379 modrm
= x86_ldub_code(env
, s
);
3380 mod
= (modrm
>> 6) & 3;
3381 rm
= (modrm
& 7) | REX_B(s
);
3382 op
= (modrm
>> 3) & 7;
3385 s
->rip_offset
= insn_const_size(ot
);
3387 gen_lea_modrm(env
, s
, modrm
);
3388 /* For those below that handle locked memory, don't load here. */
3389 if (!(s
->prefix
& PREFIX_LOCK
)
3391 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
3394 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
3399 val
= insn_get(env
, s
, ot
);
3400 tcg_gen_movi_tl(s
->T1
, val
);
3401 gen_op_testl_T0_T1_cc(s
);
3402 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
3405 if (s
->prefix
& PREFIX_LOCK
) {
3409 tcg_gen_movi_tl(s
->T0
, ~0);
3410 tcg_gen_atomic_xor_fetch_tl(s
->T0
, s
->A0
, s
->T0
,
3411 s
->mem_index
, ot
| MO_LE
);
3413 tcg_gen_not_tl(s
->T0
, s
->T0
);
3415 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
3417 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
3422 if (s
->prefix
& PREFIX_LOCK
) {
3424 TCGv a0
, t0
, t1
, t2
;
3429 a0
= tcg_temp_local_new();
3430 t0
= tcg_temp_local_new();
3431 label1
= gen_new_label();
3433 tcg_gen_mov_tl(a0
, s
->A0
);
3434 tcg_gen_mov_tl(t0
, s
->T0
);
3436 gen_set_label(label1
);
3437 t1
= tcg_temp_new();
3438 t2
= tcg_temp_new();
3439 tcg_gen_mov_tl(t2
, t0
);
3440 tcg_gen_neg_tl(t1
, t0
);
3441 tcg_gen_atomic_cmpxchg_tl(t0
, a0
, t0
, t1
,
3442 s
->mem_index
, ot
| MO_LE
);
3444 tcg_gen_brcond_tl(TCG_COND_NE
, t0
, t2
, label1
);
3448 tcg_gen_neg_tl(s
->T0
, t0
);
3451 tcg_gen_neg_tl(s
->T0
, s
->T0
);
3453 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
3455 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
3458 gen_op_update_neg_cc(s
);
3459 set_cc_op(s
, CC_OP_SUBB
+ ot
);
3464 gen_op_mov_v_reg(s
, MO_8
, s
->T1
, R_EAX
);
3465 tcg_gen_ext8u_tl(s
->T0
, s
->T0
);
3466 tcg_gen_ext8u_tl(s
->T1
, s
->T1
);
3467 /* XXX: use 32 bit mul which could be faster */
3468 tcg_gen_mul_tl(s
->T0
, s
->T0
, s
->T1
);
3469 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
3470 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
3471 tcg_gen_andi_tl(cpu_cc_src
, s
->T0
, 0xff00);
3472 set_cc_op(s
, CC_OP_MULB
);
3475 gen_op_mov_v_reg(s
, MO_16
, s
->T1
, R_EAX
);
3476 tcg_gen_ext16u_tl(s
->T0
, s
->T0
);
3477 tcg_gen_ext16u_tl(s
->T1
, s
->T1
);
3478 /* XXX: use 32 bit mul which could be faster */
3479 tcg_gen_mul_tl(s
->T0
, s
->T0
, s
->T1
);
3480 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
3481 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
3482 tcg_gen_shri_tl(s
->T0
, s
->T0
, 16);
3483 gen_op_mov_reg_v(s
, MO_16
, R_EDX
, s
->T0
);
3484 tcg_gen_mov_tl(cpu_cc_src
, s
->T0
);
3485 set_cc_op(s
, CC_OP_MULW
);
3489 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
3490 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, cpu_regs
[R_EAX
]);
3491 tcg_gen_mulu2_i32(s
->tmp2_i32
, s
->tmp3_i32
,
3492 s
->tmp2_i32
, s
->tmp3_i32
);
3493 tcg_gen_extu_i32_tl(cpu_regs
[R_EAX
], s
->tmp2_i32
);
3494 tcg_gen_extu_i32_tl(cpu_regs
[R_EDX
], s
->tmp3_i32
);
3495 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
3496 tcg_gen_mov_tl(cpu_cc_src
, cpu_regs
[R_EDX
]);
3497 set_cc_op(s
, CC_OP_MULL
);
3499 #ifdef TARGET_X86_64
3501 tcg_gen_mulu2_i64(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
],
3502 s
->T0
, cpu_regs
[R_EAX
]);
3503 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
3504 tcg_gen_mov_tl(cpu_cc_src
, cpu_regs
[R_EDX
]);
3505 set_cc_op(s
, CC_OP_MULQ
);
3513 gen_op_mov_v_reg(s
, MO_8
, s
->T1
, R_EAX
);
3514 tcg_gen_ext8s_tl(s
->T0
, s
->T0
);
3515 tcg_gen_ext8s_tl(s
->T1
, s
->T1
);
3516 /* XXX: use 32 bit mul which could be faster */
3517 tcg_gen_mul_tl(s
->T0
, s
->T0
, s
->T1
);
3518 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
3519 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
3520 tcg_gen_ext8s_tl(s
->tmp0
, s
->T0
);
3521 tcg_gen_sub_tl(cpu_cc_src
, s
->T0
, s
->tmp0
);
3522 set_cc_op(s
, CC_OP_MULB
);
3525 gen_op_mov_v_reg(s
, MO_16
, s
->T1
, R_EAX
);
3526 tcg_gen_ext16s_tl(s
->T0
, s
->T0
);
3527 tcg_gen_ext16s_tl(s
->T1
, s
->T1
);
3528 /* XXX: use 32 bit mul which could be faster */
3529 tcg_gen_mul_tl(s
->T0
, s
->T0
, s
->T1
);
3530 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
3531 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
3532 tcg_gen_ext16s_tl(s
->tmp0
, s
->T0
);
3533 tcg_gen_sub_tl(cpu_cc_src
, s
->T0
, s
->tmp0
);
3534 tcg_gen_shri_tl(s
->T0
, s
->T0
, 16);
3535 gen_op_mov_reg_v(s
, MO_16
, R_EDX
, s
->T0
);
3536 set_cc_op(s
, CC_OP_MULW
);
3540 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
3541 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, cpu_regs
[R_EAX
]);
3542 tcg_gen_muls2_i32(s
->tmp2_i32
, s
->tmp3_i32
,
3543 s
->tmp2_i32
, s
->tmp3_i32
);
3544 tcg_gen_extu_i32_tl(cpu_regs
[R_EAX
], s
->tmp2_i32
);
3545 tcg_gen_extu_i32_tl(cpu_regs
[R_EDX
], s
->tmp3_i32
);
3546 tcg_gen_sari_i32(s
->tmp2_i32
, s
->tmp2_i32
, 31);
3547 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
3548 tcg_gen_sub_i32(s
->tmp2_i32
, s
->tmp2_i32
, s
->tmp3_i32
);
3549 tcg_gen_extu_i32_tl(cpu_cc_src
, s
->tmp2_i32
);
3550 set_cc_op(s
, CC_OP_MULL
);
3552 #ifdef TARGET_X86_64
3554 tcg_gen_muls2_i64(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
],
3555 s
->T0
, cpu_regs
[R_EAX
]);
3556 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
3557 tcg_gen_sari_tl(cpu_cc_src
, cpu_regs
[R_EAX
], 63);
3558 tcg_gen_sub_tl(cpu_cc_src
, cpu_cc_src
, cpu_regs
[R_EDX
]);
3559 set_cc_op(s
, CC_OP_MULQ
);
3567 gen_helper_divb_AL(cpu_env
, s
->T0
);
3570 gen_helper_divw_AX(cpu_env
, s
->T0
);
3574 gen_helper_divl_EAX(cpu_env
, s
->T0
);
3576 #ifdef TARGET_X86_64
3578 gen_helper_divq_EAX(cpu_env
, s
->T0
);
3586 gen_helper_idivb_AL(cpu_env
, s
->T0
);
3589 gen_helper_idivw_AX(cpu_env
, s
->T0
);
3593 gen_helper_idivl_EAX(cpu_env
, s
->T0
);
3595 #ifdef TARGET_X86_64
3597 gen_helper_idivq_EAX(cpu_env
, s
->T0
);
3607 case 0xfe: /* GRP4 */
3608 case 0xff: /* GRP5 */
3609 ot
= mo_b_d(b
, dflag
);
3611 modrm
= x86_ldub_code(env
, s
);
3612 mod
= (modrm
>> 6) & 3;
3613 rm
= (modrm
& 7) | REX_B(s
);
3614 op
= (modrm
>> 3) & 7;
3615 if (op
>= 2 && b
== 0xfe) {
3619 if (op
== 2 || op
== 4) {
3620 /* operand size for jumps is 64 bit */
3622 } else if (op
== 3 || op
== 5) {
3623 ot
= dflag
!= MO_16
? MO_32
+ REX_W(s
) : MO_16
;
3624 } else if (op
== 6) {
3625 /* default push size is 64 bit */
3626 ot
= mo_pushpop(s
, dflag
);
3630 gen_lea_modrm(env
, s
, modrm
);
3631 if (op
>= 2 && op
!= 3 && op
!= 5)
3632 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
3634 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
3638 case 0: /* inc Ev */
3643 gen_inc(s
, ot
, opreg
, 1);
3645 case 1: /* dec Ev */
3650 gen_inc(s
, ot
, opreg
, -1);
3652 case 2: /* call Ev */
3653 /* XXX: optimize if memory (no 'and' is necessary) */
3654 if (dflag
== MO_16
) {
3655 tcg_gen_ext16u_tl(s
->T0
, s
->T0
);
3657 gen_push_v(s
, eip_next_tl(s
));
3658 gen_op_jmp_v(s
, s
->T0
);
3660 s
->base
.is_jmp
= DISAS_JUMP
;
3662 case 3: /* lcall Ev */
3666 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
3667 gen_add_A0_im(s
, 1 << ot
);
3668 gen_op_ld_v(s
, MO_16
, s
->T0
, s
->A0
);
3670 if (PE(s
) && !VM86(s
)) {
3671 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
3672 gen_helper_lcall_protected(cpu_env
, s
->tmp2_i32
, s
->T1
,
3673 tcg_constant_i32(dflag
- 1),
3676 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
3677 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T1
);
3678 gen_helper_lcall_real(cpu_env
, s
->tmp2_i32
, s
->tmp3_i32
,
3679 tcg_constant_i32(dflag
- 1),
3682 s
->base
.is_jmp
= DISAS_JUMP
;
3684 case 4: /* jmp Ev */
3685 if (dflag
== MO_16
) {
3686 tcg_gen_ext16u_tl(s
->T0
, s
->T0
);
3688 gen_op_jmp_v(s
, s
->T0
);
3690 s
->base
.is_jmp
= DISAS_JUMP
;
3692 case 5: /* ljmp Ev */
3696 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
3697 gen_add_A0_im(s
, 1 << ot
);
3698 gen_op_ld_v(s
, MO_16
, s
->T0
, s
->A0
);
3700 if (PE(s
) && !VM86(s
)) {
3701 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
3702 gen_helper_ljmp_protected(cpu_env
, s
->tmp2_i32
, s
->T1
,
3705 gen_op_movl_seg_T0_vm(s
, R_CS
);
3706 gen_op_jmp_v(s
, s
->T1
);
3708 s
->base
.is_jmp
= DISAS_JUMP
;
3710 case 6: /* push Ev */
3711 gen_push_v(s
, s
->T0
);
3718 case 0x84: /* test Ev, Gv */
3720 ot
= mo_b_d(b
, dflag
);
3722 modrm
= x86_ldub_code(env
, s
);
3723 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
3725 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3726 gen_op_mov_v_reg(s
, ot
, s
->T1
, reg
);
3727 gen_op_testl_T0_T1_cc(s
);
3728 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
3731 case 0xa8: /* test eAX, Iv */
3733 ot
= mo_b_d(b
, dflag
);
3734 val
= insn_get(env
, s
, ot
);
3736 gen_op_mov_v_reg(s
, ot
, s
->T0
, OR_EAX
);
3737 tcg_gen_movi_tl(s
->T1
, val
);
3738 gen_op_testl_T0_T1_cc(s
);
3739 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
3742 case 0x98: /* CWDE/CBW */
3744 #ifdef TARGET_X86_64
3746 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, R_EAX
);
3747 tcg_gen_ext32s_tl(s
->T0
, s
->T0
);
3748 gen_op_mov_reg_v(s
, MO_64
, R_EAX
, s
->T0
);
3752 gen_op_mov_v_reg(s
, MO_16
, s
->T0
, R_EAX
);
3753 tcg_gen_ext16s_tl(s
->T0
, s
->T0
);
3754 gen_op_mov_reg_v(s
, MO_32
, R_EAX
, s
->T0
);
3757 gen_op_mov_v_reg(s
, MO_8
, s
->T0
, R_EAX
);
3758 tcg_gen_ext8s_tl(s
->T0
, s
->T0
);
3759 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
3765 case 0x99: /* CDQ/CWD */
3767 #ifdef TARGET_X86_64
3769 gen_op_mov_v_reg(s
, MO_64
, s
->T0
, R_EAX
);
3770 tcg_gen_sari_tl(s
->T0
, s
->T0
, 63);
3771 gen_op_mov_reg_v(s
, MO_64
, R_EDX
, s
->T0
);
3775 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, R_EAX
);
3776 tcg_gen_ext32s_tl(s
->T0
, s
->T0
);
3777 tcg_gen_sari_tl(s
->T0
, s
->T0
, 31);
3778 gen_op_mov_reg_v(s
, MO_32
, R_EDX
, s
->T0
);
3781 gen_op_mov_v_reg(s
, MO_16
, s
->T0
, R_EAX
);
3782 tcg_gen_ext16s_tl(s
->T0
, s
->T0
);
3783 tcg_gen_sari_tl(s
->T0
, s
->T0
, 15);
3784 gen_op_mov_reg_v(s
, MO_16
, R_EDX
, s
->T0
);
3790 case 0x1af: /* imul Gv, Ev */
3791 case 0x69: /* imul Gv, Ev, I */
3794 modrm
= x86_ldub_code(env
, s
);
3795 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
3797 s
->rip_offset
= insn_const_size(ot
);
3800 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3802 val
= insn_get(env
, s
, ot
);
3803 tcg_gen_movi_tl(s
->T1
, val
);
3804 } else if (b
== 0x6b) {
3805 val
= (int8_t)insn_get(env
, s
, MO_8
);
3806 tcg_gen_movi_tl(s
->T1
, val
);
3808 gen_op_mov_v_reg(s
, ot
, s
->T1
, reg
);
3811 #ifdef TARGET_X86_64
3813 tcg_gen_muls2_i64(cpu_regs
[reg
], s
->T1
, s
->T0
, s
->T1
);
3814 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[reg
]);
3815 tcg_gen_sari_tl(cpu_cc_src
, cpu_cc_dst
, 63);
3816 tcg_gen_sub_tl(cpu_cc_src
, cpu_cc_src
, s
->T1
);
3820 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
3821 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T1
);
3822 tcg_gen_muls2_i32(s
->tmp2_i32
, s
->tmp3_i32
,
3823 s
->tmp2_i32
, s
->tmp3_i32
);
3824 tcg_gen_extu_i32_tl(cpu_regs
[reg
], s
->tmp2_i32
);
3825 tcg_gen_sari_i32(s
->tmp2_i32
, s
->tmp2_i32
, 31);
3826 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[reg
]);
3827 tcg_gen_sub_i32(s
->tmp2_i32
, s
->tmp2_i32
, s
->tmp3_i32
);
3828 tcg_gen_extu_i32_tl(cpu_cc_src
, s
->tmp2_i32
);
3831 tcg_gen_ext16s_tl(s
->T0
, s
->T0
);
3832 tcg_gen_ext16s_tl(s
->T1
, s
->T1
);
3833 /* XXX: use 32 bit mul which could be faster */
3834 tcg_gen_mul_tl(s
->T0
, s
->T0
, s
->T1
);
3835 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
3836 tcg_gen_ext16s_tl(s
->tmp0
, s
->T0
);
3837 tcg_gen_sub_tl(cpu_cc_src
, s
->T0
, s
->tmp0
);
3838 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
3841 set_cc_op(s
, CC_OP_MULB
+ ot
);
3844 case 0x1c1: /* xadd Ev, Gv */
3845 ot
= mo_b_d(b
, dflag
);
3846 modrm
= x86_ldub_code(env
, s
);
3847 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
3848 mod
= (modrm
>> 6) & 3;
3849 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
3851 rm
= (modrm
& 7) | REX_B(s
);
3852 gen_op_mov_v_reg(s
, ot
, s
->T1
, rm
);
3853 tcg_gen_add_tl(s
->T0
, s
->T0
, s
->T1
);
3854 gen_op_mov_reg_v(s
, ot
, reg
, s
->T1
);
3855 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
3857 gen_lea_modrm(env
, s
, modrm
);
3858 if (s
->prefix
& PREFIX_LOCK
) {
3859 tcg_gen_atomic_fetch_add_tl(s
->T1
, s
->A0
, s
->T0
,
3860 s
->mem_index
, ot
| MO_LE
);
3861 tcg_gen_add_tl(s
->T0
, s
->T0
, s
->T1
);
3863 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
3864 tcg_gen_add_tl(s
->T0
, s
->T0
, s
->T1
);
3865 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
3867 gen_op_mov_reg_v(s
, ot
, reg
, s
->T1
);
3869 gen_op_update2_cc(s
);
3870 set_cc_op(s
, CC_OP_ADDB
+ ot
);
3873 case 0x1b1: /* cmpxchg Ev, Gv */
3875 TCGv oldv
, newv
, cmpv
, dest
;
3877 ot
= mo_b_d(b
, dflag
);
3878 modrm
= x86_ldub_code(env
, s
);
3879 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
3880 mod
= (modrm
>> 6) & 3;
3881 oldv
= tcg_temp_new();
3882 newv
= tcg_temp_new();
3883 cmpv
= tcg_temp_new();
3884 gen_op_mov_v_reg(s
, ot
, newv
, reg
);
3885 tcg_gen_mov_tl(cmpv
, cpu_regs
[R_EAX
]);
3887 if (s
->prefix
& PREFIX_LOCK
) {
3891 gen_lea_modrm(env
, s
, modrm
);
3892 tcg_gen_atomic_cmpxchg_tl(oldv
, s
->A0
, cmpv
, newv
,
3893 s
->mem_index
, ot
| MO_LE
);
3896 rm
= (modrm
& 7) | REX_B(s
);
3897 gen_op_mov_v_reg(s
, ot
, oldv
, rm
);
3901 * Unlike the memory case, where "the destination operand receives
3902 * a write cycle without regard to the result of the comparison",
3903 * rm must not be touched altogether if the write fails, including
3904 * not zero-extending it on 64-bit processors. So, precompute
3905 * the result of a successful writeback and perform the movcond
3906 * directly on cpu_regs. Also need to write accumulator first, in
3907 * case rm is part of RAX too.
3909 dest
= gen_op_deposit_reg_v(s
, ot
, rm
, newv
, newv
);
3910 tcg_gen_movcond_tl(TCG_COND_EQ
, dest
, oldv
, cmpv
, newv
, dest
);
3912 gen_lea_modrm(env
, s
, modrm
);
3913 gen_op_ld_v(s
, ot
, oldv
, s
->A0
);
3916 * Perform an unconditional store cycle like physical cpu;
3917 * must be before changing accumulator to ensure
3918 * idempotency if the store faults and the instruction
3921 tcg_gen_movcond_tl(TCG_COND_EQ
, newv
, oldv
, cmpv
, newv
, oldv
);
3922 gen_op_st_v(s
, ot
, newv
, s
->A0
);
3926 * Write EAX only if the cmpxchg fails; reuse newv as the destination,
3927 * since it's dead here.
3929 dest
= gen_op_deposit_reg_v(s
, ot
, R_EAX
, newv
, oldv
);
3930 tcg_gen_movcond_tl(TCG_COND_EQ
, dest
, oldv
, cmpv
, dest
, newv
);
3931 tcg_gen_mov_tl(cpu_cc_src
, oldv
);
3932 tcg_gen_mov_tl(s
->cc_srcT
, cmpv
);
3933 tcg_gen_sub_tl(cpu_cc_dst
, cmpv
, oldv
);
3934 set_cc_op(s
, CC_OP_SUBB
+ ot
);
3935 tcg_temp_free(oldv
);
3936 tcg_temp_free(newv
);
3937 tcg_temp_free(cmpv
);
3940 case 0x1c7: /* cmpxchg8b */
3941 modrm
= x86_ldub_code(env
, s
);
3942 mod
= (modrm
>> 6) & 3;
3943 switch ((modrm
>> 3) & 7) {
3944 case 1: /* CMPXCHG8, CMPXCHG16 */
3948 #ifdef TARGET_X86_64
3949 if (dflag
== MO_64
) {
3950 if (!(s
->cpuid_ext_features
& CPUID_EXT_CX16
)) {
3953 gen_cmpxchg16b(s
, env
, modrm
);
3957 if (!(s
->cpuid_features
& CPUID_CX8
)) {
3960 gen_cmpxchg8b(s
, env
, modrm
);
3963 case 7: /* RDSEED */
3964 case 6: /* RDRAND */
3966 (s
->prefix
& (PREFIX_LOCK
| PREFIX_REPZ
| PREFIX_REPNZ
)) ||
3967 !(s
->cpuid_ext_features
& CPUID_EXT_RDRAND
)) {
3970 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
3972 s
->base
.is_jmp
= DISAS_TOO_MANY
;
3974 gen_helper_rdrand(s
->T0
, cpu_env
);
3975 rm
= (modrm
& 7) | REX_B(s
);
3976 gen_op_mov_reg_v(s
, dflag
, rm
, s
->T0
);
3977 set_cc_op(s
, CC_OP_EFLAGS
);
3985 /**************************/
3987 case 0x50 ... 0x57: /* push */
3988 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, (b
& 7) | REX_B(s
));
3989 gen_push_v(s
, s
->T0
);
3991 case 0x58 ... 0x5f: /* pop */
3993 /* NOTE: order is important for pop %sp */
3994 gen_pop_update(s
, ot
);
3995 gen_op_mov_reg_v(s
, ot
, (b
& 7) | REX_B(s
), s
->T0
);
3997 case 0x60: /* pusha */
4002 case 0x61: /* popa */
4007 case 0x68: /* push Iv */
4009 ot
= mo_pushpop(s
, dflag
);
4011 val
= insn_get(env
, s
, ot
);
4013 val
= (int8_t)insn_get(env
, s
, MO_8
);
4014 tcg_gen_movi_tl(s
->T0
, val
);
4015 gen_push_v(s
, s
->T0
);
4017 case 0x8f: /* pop Ev */
4018 modrm
= x86_ldub_code(env
, s
);
4019 mod
= (modrm
>> 6) & 3;
4022 /* NOTE: order is important for pop %sp */
4023 gen_pop_update(s
, ot
);
4024 rm
= (modrm
& 7) | REX_B(s
);
4025 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
4027 /* NOTE: order is important too for MMU exceptions */
4028 s
->popl_esp_hack
= 1 << ot
;
4029 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
4030 s
->popl_esp_hack
= 0;
4031 gen_pop_update(s
, ot
);
4034 case 0xc8: /* enter */
4037 val
= x86_lduw_code(env
, s
);
4038 level
= x86_ldub_code(env
, s
);
4039 gen_enter(s
, val
, level
);
4042 case 0xc9: /* leave */
4045 case 0x06: /* push es */
4046 case 0x0e: /* push cs */
4047 case 0x16: /* push ss */
4048 case 0x1e: /* push ds */
4051 gen_op_movl_T0_seg(s
, b
>> 3);
4052 gen_push_v(s
, s
->T0
);
4054 case 0x1a0: /* push fs */
4055 case 0x1a8: /* push gs */
4056 gen_op_movl_T0_seg(s
, (b
>> 3) & 7);
4057 gen_push_v(s
, s
->T0
);
4059 case 0x07: /* pop es */
4060 case 0x17: /* pop ss */
4061 case 0x1f: /* pop ds */
4066 gen_movl_seg_T0(s
, reg
);
4067 gen_pop_update(s
, ot
);
4069 case 0x1a1: /* pop fs */
4070 case 0x1a9: /* pop gs */
4072 gen_movl_seg_T0(s
, (b
>> 3) & 7);
4073 gen_pop_update(s
, ot
);
4076 /**************************/
4079 case 0x89: /* mov Gv, Ev */
4080 ot
= mo_b_d(b
, dflag
);
4081 modrm
= x86_ldub_code(env
, s
);
4082 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4084 /* generate a generic store */
4085 gen_ldst_modrm(env
, s
, modrm
, ot
, reg
, 1);
4088 case 0xc7: /* mov Ev, Iv */
4089 ot
= mo_b_d(b
, dflag
);
4090 modrm
= x86_ldub_code(env
, s
);
4091 mod
= (modrm
>> 6) & 3;
4093 s
->rip_offset
= insn_const_size(ot
);
4094 gen_lea_modrm(env
, s
, modrm
);
4096 val
= insn_get(env
, s
, ot
);
4097 tcg_gen_movi_tl(s
->T0
, val
);
4099 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
4101 gen_op_mov_reg_v(s
, ot
, (modrm
& 7) | REX_B(s
), s
->T0
);
4105 case 0x8b: /* mov Ev, Gv */
4106 ot
= mo_b_d(b
, dflag
);
4107 modrm
= x86_ldub_code(env
, s
);
4108 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4110 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4111 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
4113 case 0x8e: /* mov seg, Gv */
4114 modrm
= x86_ldub_code(env
, s
);
4115 reg
= (modrm
>> 3) & 7;
4116 if (reg
>= 6 || reg
== R_CS
)
4118 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
4119 gen_movl_seg_T0(s
, reg
);
4121 case 0x8c: /* mov Gv, seg */
4122 modrm
= x86_ldub_code(env
, s
);
4123 reg
= (modrm
>> 3) & 7;
4124 mod
= (modrm
>> 6) & 3;
4127 gen_op_movl_T0_seg(s
, reg
);
4128 ot
= mod
== 3 ? dflag
: MO_16
;
4129 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
4132 case 0x1b6: /* movzbS Gv, Eb */
4133 case 0x1b7: /* movzwS Gv, Eb */
4134 case 0x1be: /* movsbS Gv, Eb */
4135 case 0x1bf: /* movswS Gv, Eb */
4140 /* d_ot is the size of destination */
4142 /* ot is the size of source */
4143 ot
= (b
& 1) + MO_8
;
4144 /* s_ot is the sign+size of source */
4145 s_ot
= b
& 8 ? MO_SIGN
| ot
: ot
;
4147 modrm
= x86_ldub_code(env
, s
);
4148 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4149 mod
= (modrm
>> 6) & 3;
4150 rm
= (modrm
& 7) | REX_B(s
);
4153 if (s_ot
== MO_SB
&& byte_reg_is_xH(s
, rm
)) {
4154 tcg_gen_sextract_tl(s
->T0
, cpu_regs
[rm
- 4], 8, 8);
4156 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
4159 tcg_gen_ext8u_tl(s
->T0
, s
->T0
);
4162 tcg_gen_ext8s_tl(s
->T0
, s
->T0
);
4165 tcg_gen_ext16u_tl(s
->T0
, s
->T0
);
4169 tcg_gen_ext16s_tl(s
->T0
, s
->T0
);
4173 gen_op_mov_reg_v(s
, d_ot
, reg
, s
->T0
);
4175 gen_lea_modrm(env
, s
, modrm
);
4176 gen_op_ld_v(s
, s_ot
, s
->T0
, s
->A0
);
4177 gen_op_mov_reg_v(s
, d_ot
, reg
, s
->T0
);
4182 case 0x8d: /* lea */
4183 modrm
= x86_ldub_code(env
, s
);
4184 mod
= (modrm
>> 6) & 3;
4187 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4189 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
4190 TCGv ea
= gen_lea_modrm_1(s
, a
, false);
4191 gen_lea_v_seg(s
, s
->aflag
, ea
, -1, -1);
4192 gen_op_mov_reg_v(s
, dflag
, reg
, s
->A0
);
4196 case 0xa0: /* mov EAX, Ov */
4198 case 0xa2: /* mov Ov, EAX */
4201 target_ulong offset_addr
;
4203 ot
= mo_b_d(b
, dflag
);
4204 offset_addr
= insn_get_addr(env
, s
, s
->aflag
);
4205 tcg_gen_movi_tl(s
->A0
, offset_addr
);
4206 gen_add_A0_ds_seg(s
);
4208 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
4209 gen_op_mov_reg_v(s
, ot
, R_EAX
, s
->T0
);
4211 gen_op_mov_v_reg(s
, ot
, s
->T0
, R_EAX
);
4212 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
4216 case 0xd7: /* xlat */
4217 tcg_gen_mov_tl(s
->A0
, cpu_regs
[R_EBX
]);
4218 tcg_gen_ext8u_tl(s
->T0
, cpu_regs
[R_EAX
]);
4219 tcg_gen_add_tl(s
->A0
, s
->A0
, s
->T0
);
4220 gen_extu(s
->aflag
, s
->A0
);
4221 gen_add_A0_ds_seg(s
);
4222 gen_op_ld_v(s
, MO_8
, s
->T0
, s
->A0
);
4223 gen_op_mov_reg_v(s
, MO_8
, R_EAX
, s
->T0
);
4225 case 0xb0 ... 0xb7: /* mov R, Ib */
4226 val
= insn_get(env
, s
, MO_8
);
4227 tcg_gen_movi_tl(s
->T0
, val
);
4228 gen_op_mov_reg_v(s
, MO_8
, (b
& 7) | REX_B(s
), s
->T0
);
4230 case 0xb8 ... 0xbf: /* mov R, Iv */
4231 #ifdef TARGET_X86_64
4232 if (dflag
== MO_64
) {
4235 tmp
= x86_ldq_code(env
, s
);
4236 reg
= (b
& 7) | REX_B(s
);
4237 tcg_gen_movi_tl(s
->T0
, tmp
);
4238 gen_op_mov_reg_v(s
, MO_64
, reg
, s
->T0
);
4243 val
= insn_get(env
, s
, ot
);
4244 reg
= (b
& 7) | REX_B(s
);
4245 tcg_gen_movi_tl(s
->T0
, val
);
4246 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
4250 case 0x91 ... 0x97: /* xchg R, EAX */
4253 reg
= (b
& 7) | REX_B(s
);
4257 case 0x87: /* xchg Ev, Gv */
4258 ot
= mo_b_d(b
, dflag
);
4259 modrm
= x86_ldub_code(env
, s
);
4260 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4261 mod
= (modrm
>> 6) & 3;
4263 rm
= (modrm
& 7) | REX_B(s
);
4265 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
4266 gen_op_mov_v_reg(s
, ot
, s
->T1
, rm
);
4267 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
4268 gen_op_mov_reg_v(s
, ot
, reg
, s
->T1
);
4270 gen_lea_modrm(env
, s
, modrm
);
4271 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
4272 /* for xchg, lock is implicit */
4273 tcg_gen_atomic_xchg_tl(s
->T1
, s
->A0
, s
->T0
,
4274 s
->mem_index
, ot
| MO_LE
);
4275 gen_op_mov_reg_v(s
, ot
, reg
, s
->T1
);
4278 case 0xc4: /* les Gv */
4279 /* In CODE64 this is VEX3; see above. */
4282 case 0xc5: /* lds Gv */
4283 /* In CODE64 this is VEX2; see above. */
4286 case 0x1b2: /* lss Gv */
4289 case 0x1b4: /* lfs Gv */
4292 case 0x1b5: /* lgs Gv */
4295 ot
= dflag
!= MO_16
? MO_32
: MO_16
;
4296 modrm
= x86_ldub_code(env
, s
);
4297 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4298 mod
= (modrm
>> 6) & 3;
4301 gen_lea_modrm(env
, s
, modrm
);
4302 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
4303 gen_add_A0_im(s
, 1 << ot
);
4304 /* load the segment first to handle exceptions properly */
4305 gen_op_ld_v(s
, MO_16
, s
->T0
, s
->A0
);
4306 gen_movl_seg_T0(s
, op
);
4307 /* then put the data */
4308 gen_op_mov_reg_v(s
, ot
, reg
, s
->T1
);
4311 /************************/
4319 ot
= mo_b_d(b
, dflag
);
4320 modrm
= x86_ldub_code(env
, s
);
4321 mod
= (modrm
>> 6) & 3;
4322 op
= (modrm
>> 3) & 7;
4328 gen_lea_modrm(env
, s
, modrm
);
4331 opreg
= (modrm
& 7) | REX_B(s
);
4336 gen_shift(s
, op
, ot
, opreg
, OR_ECX
);
4339 shift
= x86_ldub_code(env
, s
);
4341 gen_shifti(s
, op
, ot
, opreg
, shift
);
4356 case 0x1a4: /* shld imm */
4360 case 0x1a5: /* shld cl */
4364 case 0x1ac: /* shrd imm */
4368 case 0x1ad: /* shrd cl */
4373 modrm
= x86_ldub_code(env
, s
);
4374 mod
= (modrm
>> 6) & 3;
4375 rm
= (modrm
& 7) | REX_B(s
);
4376 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4378 gen_lea_modrm(env
, s
, modrm
);
4383 gen_op_mov_v_reg(s
, ot
, s
->T1
, reg
);
4386 TCGv imm
= tcg_const_tl(x86_ldub_code(env
, s
));
4387 gen_shiftd_rm_T1(s
, ot
, opreg
, op
, imm
);
4390 gen_shiftd_rm_T1(s
, ot
, opreg
, op
, cpu_regs
[R_ECX
]);
4394 /************************/
4398 bool update_fip
= true;
4400 if (s
->flags
& (HF_EM_MASK
| HF_TS_MASK
)) {
4401 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
4402 /* XXX: what to do if illegal op ? */
4403 gen_exception(s
, EXCP07_PREX
);
4406 modrm
= x86_ldub_code(env
, s
);
4407 mod
= (modrm
>> 6) & 3;
4409 op
= ((b
& 7) << 3) | ((modrm
>> 3) & 7);
4412 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
4413 TCGv ea
= gen_lea_modrm_1(s
, a
, false);
4414 TCGv last_addr
= tcg_temp_new();
4415 bool update_fdp
= true;
4417 tcg_gen_mov_tl(last_addr
, ea
);
4418 gen_lea_v_seg(s
, s
->aflag
, ea
, a
.def_seg
, s
->override
);
4421 case 0x00 ... 0x07: /* fxxxs */
4422 case 0x10 ... 0x17: /* fixxxl */
4423 case 0x20 ... 0x27: /* fxxxl */
4424 case 0x30 ... 0x37: /* fixxx */
4431 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
4432 s
->mem_index
, MO_LEUL
);
4433 gen_helper_flds_FT0(cpu_env
, s
->tmp2_i32
);
4436 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
4437 s
->mem_index
, MO_LEUL
);
4438 gen_helper_fildl_FT0(cpu_env
, s
->tmp2_i32
);
4441 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
,
4442 s
->mem_index
, MO_LEUQ
);
4443 gen_helper_fldl_FT0(cpu_env
, s
->tmp1_i64
);
4447 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
4448 s
->mem_index
, MO_LESW
);
4449 gen_helper_fildl_FT0(cpu_env
, s
->tmp2_i32
);
4453 gen_helper_fp_arith_ST0_FT0(op1
);
4455 /* fcomp needs pop */
4456 gen_helper_fpop(cpu_env
);
4460 case 0x08: /* flds */
4461 case 0x0a: /* fsts */
4462 case 0x0b: /* fstps */
4463 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
4464 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
4465 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
4470 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
4471 s
->mem_index
, MO_LEUL
);
4472 gen_helper_flds_ST0(cpu_env
, s
->tmp2_i32
);
4475 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
4476 s
->mem_index
, MO_LEUL
);
4477 gen_helper_fildl_ST0(cpu_env
, s
->tmp2_i32
);
4480 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
,
4481 s
->mem_index
, MO_LEUQ
);
4482 gen_helper_fldl_ST0(cpu_env
, s
->tmp1_i64
);
4486 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
4487 s
->mem_index
, MO_LESW
);
4488 gen_helper_fildl_ST0(cpu_env
, s
->tmp2_i32
);
4493 /* XXX: the corresponding CPUID bit must be tested ! */
4496 gen_helper_fisttl_ST0(s
->tmp2_i32
, cpu_env
);
4497 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
4498 s
->mem_index
, MO_LEUL
);
4501 gen_helper_fisttll_ST0(s
->tmp1_i64
, cpu_env
);
4502 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
,
4503 s
->mem_index
, MO_LEUQ
);
4507 gen_helper_fistt_ST0(s
->tmp2_i32
, cpu_env
);
4508 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
4509 s
->mem_index
, MO_LEUW
);
4512 gen_helper_fpop(cpu_env
);
4517 gen_helper_fsts_ST0(s
->tmp2_i32
, cpu_env
);
4518 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
4519 s
->mem_index
, MO_LEUL
);
4522 gen_helper_fistl_ST0(s
->tmp2_i32
, cpu_env
);
4523 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
4524 s
->mem_index
, MO_LEUL
);
4527 gen_helper_fstl_ST0(s
->tmp1_i64
, cpu_env
);
4528 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
,
4529 s
->mem_index
, MO_LEUQ
);
4533 gen_helper_fist_ST0(s
->tmp2_i32
, cpu_env
);
4534 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
4535 s
->mem_index
, MO_LEUW
);
4538 if ((op
& 7) == 3) {
4539 gen_helper_fpop(cpu_env
);
4544 case 0x0c: /* fldenv mem */
4545 gen_helper_fldenv(cpu_env
, s
->A0
,
4546 tcg_const_i32(dflag
- 1));
4547 update_fip
= update_fdp
= false;
4549 case 0x0d: /* fldcw mem */
4550 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
4551 s
->mem_index
, MO_LEUW
);
4552 gen_helper_fldcw(cpu_env
, s
->tmp2_i32
);
4553 update_fip
= update_fdp
= false;
4555 case 0x0e: /* fnstenv mem */
4556 gen_helper_fstenv(cpu_env
, s
->A0
,
4557 tcg_const_i32(dflag
- 1));
4558 update_fip
= update_fdp
= false;
4560 case 0x0f: /* fnstcw mem */
4561 gen_helper_fnstcw(s
->tmp2_i32
, cpu_env
);
4562 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
4563 s
->mem_index
, MO_LEUW
);
4564 update_fip
= update_fdp
= false;
4566 case 0x1d: /* fldt mem */
4567 gen_helper_fldt_ST0(cpu_env
, s
->A0
);
4569 case 0x1f: /* fstpt mem */
4570 gen_helper_fstt_ST0(cpu_env
, s
->A0
);
4571 gen_helper_fpop(cpu_env
);
4573 case 0x2c: /* frstor mem */
4574 gen_helper_frstor(cpu_env
, s
->A0
,
4575 tcg_const_i32(dflag
- 1));
4576 update_fip
= update_fdp
= false;
4578 case 0x2e: /* fnsave mem */
4579 gen_helper_fsave(cpu_env
, s
->A0
,
4580 tcg_const_i32(dflag
- 1));
4581 update_fip
= update_fdp
= false;
4583 case 0x2f: /* fnstsw mem */
4584 gen_helper_fnstsw(s
->tmp2_i32
, cpu_env
);
4585 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
4586 s
->mem_index
, MO_LEUW
);
4587 update_fip
= update_fdp
= false;
4589 case 0x3c: /* fbld */
4590 gen_helper_fbld_ST0(cpu_env
, s
->A0
);
4592 case 0x3e: /* fbstp */
4593 gen_helper_fbst_ST0(cpu_env
, s
->A0
);
4594 gen_helper_fpop(cpu_env
);
4596 case 0x3d: /* fildll */
4597 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
,
4598 s
->mem_index
, MO_LEUQ
);
4599 gen_helper_fildll_ST0(cpu_env
, s
->tmp1_i64
);
4601 case 0x3f: /* fistpll */
4602 gen_helper_fistll_ST0(s
->tmp1_i64
, cpu_env
);
4603 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
,
4604 s
->mem_index
, MO_LEUQ
);
4605 gen_helper_fpop(cpu_env
);
4612 int last_seg
= s
->override
>= 0 ? s
->override
: a
.def_seg
;
4614 tcg_gen_ld_i32(s
->tmp2_i32
, cpu_env
,
4615 offsetof(CPUX86State
,
4616 segs
[last_seg
].selector
));
4617 tcg_gen_st16_i32(s
->tmp2_i32
, cpu_env
,
4618 offsetof(CPUX86State
, fpds
));
4619 tcg_gen_st_tl(last_addr
, cpu_env
,
4620 offsetof(CPUX86State
, fpdp
));
4622 tcg_temp_free(last_addr
);
4624 /* register float ops */
4628 case 0x08: /* fld sti */
4629 gen_helper_fpush(cpu_env
);
4630 gen_helper_fmov_ST0_STN(cpu_env
,
4631 tcg_const_i32((opreg
+ 1) & 7));
4633 case 0x09: /* fxchg sti */
4634 case 0x29: /* fxchg4 sti, undocumented op */
4635 case 0x39: /* fxchg7 sti, undocumented op */
4636 gen_helper_fxchg_ST0_STN(cpu_env
, tcg_const_i32(opreg
));
4638 case 0x0a: /* grp d9/2 */
4641 /* check exceptions (FreeBSD FPU probe) */
4642 gen_helper_fwait(cpu_env
);
4649 case 0x0c: /* grp d9/4 */
4652 gen_helper_fchs_ST0(cpu_env
);
4655 gen_helper_fabs_ST0(cpu_env
);
4658 gen_helper_fldz_FT0(cpu_env
);
4659 gen_helper_fcom_ST0_FT0(cpu_env
);
4662 gen_helper_fxam_ST0(cpu_env
);
4668 case 0x0d: /* grp d9/5 */
4672 gen_helper_fpush(cpu_env
);
4673 gen_helper_fld1_ST0(cpu_env
);
4676 gen_helper_fpush(cpu_env
);
4677 gen_helper_fldl2t_ST0(cpu_env
);
4680 gen_helper_fpush(cpu_env
);
4681 gen_helper_fldl2e_ST0(cpu_env
);
4684 gen_helper_fpush(cpu_env
);
4685 gen_helper_fldpi_ST0(cpu_env
);
4688 gen_helper_fpush(cpu_env
);
4689 gen_helper_fldlg2_ST0(cpu_env
);
4692 gen_helper_fpush(cpu_env
);
4693 gen_helper_fldln2_ST0(cpu_env
);
4696 gen_helper_fpush(cpu_env
);
4697 gen_helper_fldz_ST0(cpu_env
);
4704 case 0x0e: /* grp d9/6 */
4707 gen_helper_f2xm1(cpu_env
);
4710 gen_helper_fyl2x(cpu_env
);
4713 gen_helper_fptan(cpu_env
);
4715 case 3: /* fpatan */
4716 gen_helper_fpatan(cpu_env
);
4718 case 4: /* fxtract */
4719 gen_helper_fxtract(cpu_env
);
4721 case 5: /* fprem1 */
4722 gen_helper_fprem1(cpu_env
);
4724 case 6: /* fdecstp */
4725 gen_helper_fdecstp(cpu_env
);
4728 case 7: /* fincstp */
4729 gen_helper_fincstp(cpu_env
);
4733 case 0x0f: /* grp d9/7 */
4736 gen_helper_fprem(cpu_env
);
4738 case 1: /* fyl2xp1 */
4739 gen_helper_fyl2xp1(cpu_env
);
4742 gen_helper_fsqrt(cpu_env
);
4744 case 3: /* fsincos */
4745 gen_helper_fsincos(cpu_env
);
4747 case 5: /* fscale */
4748 gen_helper_fscale(cpu_env
);
4750 case 4: /* frndint */
4751 gen_helper_frndint(cpu_env
);
4754 gen_helper_fsin(cpu_env
);
4758 gen_helper_fcos(cpu_env
);
4762 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
4763 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
4764 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
4770 gen_helper_fp_arith_STN_ST0(op1
, opreg
);
4772 gen_helper_fpop(cpu_env
);
4775 gen_helper_fmov_FT0_STN(cpu_env
,
4776 tcg_const_i32(opreg
));
4777 gen_helper_fp_arith_ST0_FT0(op1
);
4781 case 0x02: /* fcom */
4782 case 0x22: /* fcom2, undocumented op */
4783 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
4784 gen_helper_fcom_ST0_FT0(cpu_env
);
4786 case 0x03: /* fcomp */
4787 case 0x23: /* fcomp3, undocumented op */
4788 case 0x32: /* fcomp5, undocumented op */
4789 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
4790 gen_helper_fcom_ST0_FT0(cpu_env
);
4791 gen_helper_fpop(cpu_env
);
4793 case 0x15: /* da/5 */
4795 case 1: /* fucompp */
4796 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(1));
4797 gen_helper_fucom_ST0_FT0(cpu_env
);
4798 gen_helper_fpop(cpu_env
);
4799 gen_helper_fpop(cpu_env
);
4807 case 0: /* feni (287 only, just do nop here) */
4809 case 1: /* fdisi (287 only, just do nop here) */
4812 gen_helper_fclex(cpu_env
);
4815 case 3: /* fninit */
4816 gen_helper_fninit(cpu_env
);
4819 case 4: /* fsetpm (287 only, just do nop here) */
4825 case 0x1d: /* fucomi */
4826 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
4829 gen_update_cc_op(s
);
4830 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
4831 gen_helper_fucomi_ST0_FT0(cpu_env
);
4832 set_cc_op(s
, CC_OP_EFLAGS
);
4834 case 0x1e: /* fcomi */
4835 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
4838 gen_update_cc_op(s
);
4839 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
4840 gen_helper_fcomi_ST0_FT0(cpu_env
);
4841 set_cc_op(s
, CC_OP_EFLAGS
);
4843 case 0x28: /* ffree sti */
4844 gen_helper_ffree_STN(cpu_env
, tcg_const_i32(opreg
));
4846 case 0x2a: /* fst sti */
4847 gen_helper_fmov_STN_ST0(cpu_env
, tcg_const_i32(opreg
));
4849 case 0x2b: /* fstp sti */
4850 case 0x0b: /* fstp1 sti, undocumented op */
4851 case 0x3a: /* fstp8 sti, undocumented op */
4852 case 0x3b: /* fstp9 sti, undocumented op */
4853 gen_helper_fmov_STN_ST0(cpu_env
, tcg_const_i32(opreg
));
4854 gen_helper_fpop(cpu_env
);
4856 case 0x2c: /* fucom st(i) */
4857 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
4858 gen_helper_fucom_ST0_FT0(cpu_env
);
4860 case 0x2d: /* fucomp st(i) */
4861 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
4862 gen_helper_fucom_ST0_FT0(cpu_env
);
4863 gen_helper_fpop(cpu_env
);
4865 case 0x33: /* de/3 */
4867 case 1: /* fcompp */
4868 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(1));
4869 gen_helper_fcom_ST0_FT0(cpu_env
);
4870 gen_helper_fpop(cpu_env
);
4871 gen_helper_fpop(cpu_env
);
4877 case 0x38: /* ffreep sti, undocumented op */
4878 gen_helper_ffree_STN(cpu_env
, tcg_const_i32(opreg
));
4879 gen_helper_fpop(cpu_env
);
4881 case 0x3c: /* df/4 */
4884 gen_helper_fnstsw(s
->tmp2_i32
, cpu_env
);
4885 tcg_gen_extu_i32_tl(s
->T0
, s
->tmp2_i32
);
4886 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
4892 case 0x3d: /* fucomip */
4893 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
4896 gen_update_cc_op(s
);
4897 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
4898 gen_helper_fucomi_ST0_FT0(cpu_env
);
4899 gen_helper_fpop(cpu_env
);
4900 set_cc_op(s
, CC_OP_EFLAGS
);
4902 case 0x3e: /* fcomip */
4903 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
4906 gen_update_cc_op(s
);
4907 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
4908 gen_helper_fcomi_ST0_FT0(cpu_env
);
4909 gen_helper_fpop(cpu_env
);
4910 set_cc_op(s
, CC_OP_EFLAGS
);
4912 case 0x10 ... 0x13: /* fcmovxx */
4917 static const uint8_t fcmov_cc
[8] = {
4924 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
4927 op1
= fcmov_cc
[op
& 3] | (((op
>> 3) & 1) ^ 1);
4928 l1
= gen_new_label();
4929 gen_jcc1_noeob(s
, op1
, l1
);
4930 gen_helper_fmov_ST0_STN(cpu_env
, tcg_const_i32(opreg
));
4940 tcg_gen_ld_i32(s
->tmp2_i32
, cpu_env
,
4941 offsetof(CPUX86State
, segs
[R_CS
].selector
));
4942 tcg_gen_st16_i32(s
->tmp2_i32
, cpu_env
,
4943 offsetof(CPUX86State
, fpcs
));
4944 tcg_gen_st_tl(eip_cur_tl(s
),
4945 cpu_env
, offsetof(CPUX86State
, fpip
));
4949 /************************/
4952 case 0xa4: /* movsS */
4954 ot
= mo_b_d(b
, dflag
);
4955 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
4956 gen_repz_movs(s
, ot
);
4962 case 0xaa: /* stosS */
4964 ot
= mo_b_d(b
, dflag
);
4965 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
4966 gen_repz_stos(s
, ot
);
4971 case 0xac: /* lodsS */
4973 ot
= mo_b_d(b
, dflag
);
4974 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
4975 gen_repz_lods(s
, ot
);
4980 case 0xae: /* scasS */
4982 ot
= mo_b_d(b
, dflag
);
4983 if (prefixes
& PREFIX_REPNZ
) {
4984 gen_repz_scas(s
, ot
, 1);
4985 } else if (prefixes
& PREFIX_REPZ
) {
4986 gen_repz_scas(s
, ot
, 0);
4992 case 0xa6: /* cmpsS */
4994 ot
= mo_b_d(b
, dflag
);
4995 if (prefixes
& PREFIX_REPNZ
) {
4996 gen_repz_cmps(s
, ot
, 1);
4997 } else if (prefixes
& PREFIX_REPZ
) {
4998 gen_repz_cmps(s
, ot
, 0);
5003 case 0x6c: /* insS */
5005 ot
= mo_b_d32(b
, dflag
);
5006 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_EDX
]);
5007 tcg_gen_ext16u_i32(s
->tmp2_i32
, s
->tmp2_i32
);
5008 if (!gen_check_io(s
, ot
, s
->tmp2_i32
,
5009 SVM_IOIO_TYPE_MASK
| SVM_IOIO_STR_MASK
)) {
5012 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
5014 s
->base
.is_jmp
= DISAS_TOO_MANY
;
5016 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
5017 gen_repz_ins(s
, ot
);
5022 case 0x6e: /* outsS */
5024 ot
= mo_b_d32(b
, dflag
);
5025 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_EDX
]);
5026 tcg_gen_ext16u_i32(s
->tmp2_i32
, s
->tmp2_i32
);
5027 if (!gen_check_io(s
, ot
, s
->tmp2_i32
, SVM_IOIO_STR_MASK
)) {
5030 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
5032 s
->base
.is_jmp
= DISAS_TOO_MANY
;
5034 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
5035 gen_repz_outs(s
, ot
);
5041 /************************/
5046 ot
= mo_b_d32(b
, dflag
);
5047 val
= x86_ldub_code(env
, s
);
5048 tcg_gen_movi_i32(s
->tmp2_i32
, val
);
5049 if (!gen_check_io(s
, ot
, s
->tmp2_i32
, SVM_IOIO_TYPE_MASK
)) {
5052 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
5054 s
->base
.is_jmp
= DISAS_TOO_MANY
;
5056 gen_helper_in_func(ot
, s
->T1
, s
->tmp2_i32
);
5057 gen_op_mov_reg_v(s
, ot
, R_EAX
, s
->T1
);
5058 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
5062 ot
= mo_b_d32(b
, dflag
);
5063 val
= x86_ldub_code(env
, s
);
5064 tcg_gen_movi_i32(s
->tmp2_i32
, val
);
5065 if (!gen_check_io(s
, ot
, s
->tmp2_i32
, 0)) {
5068 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
5070 s
->base
.is_jmp
= DISAS_TOO_MANY
;
5072 gen_op_mov_v_reg(s
, ot
, s
->T1
, R_EAX
);
5073 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T1
);
5074 gen_helper_out_func(ot
, s
->tmp2_i32
, s
->tmp3_i32
);
5075 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
5079 ot
= mo_b_d32(b
, dflag
);
5080 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_EDX
]);
5081 tcg_gen_ext16u_i32(s
->tmp2_i32
, s
->tmp2_i32
);
5082 if (!gen_check_io(s
, ot
, s
->tmp2_i32
, SVM_IOIO_TYPE_MASK
)) {
5085 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
5087 s
->base
.is_jmp
= DISAS_TOO_MANY
;
5089 gen_helper_in_func(ot
, s
->T1
, s
->tmp2_i32
);
5090 gen_op_mov_reg_v(s
, ot
, R_EAX
, s
->T1
);
5091 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
5095 ot
= mo_b_d32(b
, dflag
);
5096 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_EDX
]);
5097 tcg_gen_ext16u_i32(s
->tmp2_i32
, s
->tmp2_i32
);
5098 if (!gen_check_io(s
, ot
, s
->tmp2_i32
, 0)) {
5101 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
5103 s
->base
.is_jmp
= DISAS_TOO_MANY
;
5105 gen_op_mov_v_reg(s
, ot
, s
->T1
, R_EAX
);
5106 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T1
);
5107 gen_helper_out_func(ot
, s
->tmp2_i32
, s
->tmp3_i32
);
5108 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
5111 /************************/
5113 case 0xc2: /* ret im */
5114 val
= x86_ldsw_code(env
, s
);
5116 gen_stack_update(s
, val
+ (1 << ot
));
5117 /* Note that gen_pop_T0 uses a zero-extending load. */
5118 gen_op_jmp_v(s
, s
->T0
);
5120 s
->base
.is_jmp
= DISAS_JUMP
;
5122 case 0xc3: /* ret */
5124 gen_pop_update(s
, ot
);
5125 /* Note that gen_pop_T0 uses a zero-extending load. */
5126 gen_op_jmp_v(s
, s
->T0
);
5128 s
->base
.is_jmp
= DISAS_JUMP
;
5130 case 0xca: /* lret im */
5131 val
= x86_ldsw_code(env
, s
);
5133 if (PE(s
) && !VM86(s
)) {
5134 gen_update_cc_op(s
);
5135 gen_update_eip_cur(s
);
5136 gen_helper_lret_protected(cpu_env
, tcg_const_i32(dflag
- 1),
5137 tcg_const_i32(val
));
5141 gen_op_ld_v(s
, dflag
, s
->T0
, s
->A0
);
5142 /* NOTE: keeping EIP updated is not a problem in case of
5144 gen_op_jmp_v(s
, s
->T0
);
5146 gen_add_A0_im(s
, 1 << dflag
);
5147 gen_op_ld_v(s
, dflag
, s
->T0
, s
->A0
);
5148 gen_op_movl_seg_T0_vm(s
, R_CS
);
5149 /* add stack offset */
5150 gen_stack_update(s
, val
+ (2 << dflag
));
5152 s
->base
.is_jmp
= DISAS_EOB_ONLY
;
5154 case 0xcb: /* lret */
5157 case 0xcf: /* iret */
5158 gen_svm_check_intercept(s
, SVM_EXIT_IRET
);
5159 if (!PE(s
) || VM86(s
)) {
5160 /* real mode or vm86 mode */
5161 if (!check_vm86_iopl(s
)) {
5164 gen_helper_iret_real(cpu_env
, tcg_const_i32(dflag
- 1));
5166 gen_helper_iret_protected(cpu_env
, tcg_constant_i32(dflag
- 1),
5169 set_cc_op(s
, CC_OP_EFLAGS
);
5170 s
->base
.is_jmp
= DISAS_EOB_ONLY
;
5172 case 0xe8: /* call im */
5174 int diff
= (dflag
!= MO_16
5175 ? (int32_t)insn_get(env
, s
, MO_32
)
5176 : (int16_t)insn_get(env
, s
, MO_16
));
5177 gen_push_v(s
, eip_next_tl(s
));
5179 gen_jmp_rel(s
, dflag
, diff
, 0);
5182 case 0x9a: /* lcall im */
5184 unsigned int selector
, offset
;
5189 offset
= insn_get(env
, s
, ot
);
5190 selector
= insn_get(env
, s
, MO_16
);
5192 tcg_gen_movi_tl(s
->T0
, selector
);
5193 tcg_gen_movi_tl(s
->T1
, offset
);
5196 case 0xe9: /* jmp im */
5198 int diff
= (dflag
!= MO_16
5199 ? (int32_t)insn_get(env
, s
, MO_32
)
5200 : (int16_t)insn_get(env
, s
, MO_16
));
5202 gen_jmp_rel(s
, dflag
, diff
, 0);
5205 case 0xea: /* ljmp im */
5207 unsigned int selector
, offset
;
5212 offset
= insn_get(env
, s
, ot
);
5213 selector
= insn_get(env
, s
, MO_16
);
5215 tcg_gen_movi_tl(s
->T0
, selector
);
5216 tcg_gen_movi_tl(s
->T1
, offset
);
5219 case 0xeb: /* jmp Jb */
5221 int diff
= (int8_t)insn_get(env
, s
, MO_8
);
5222 gen_jmp_rel(s
, dflag
, diff
, 0);
5225 case 0x70 ... 0x7f: /* jcc Jb */
5227 int diff
= (int8_t)insn_get(env
, s
, MO_8
);
5229 gen_jcc(s
, b
, diff
);
5232 case 0x180 ... 0x18f: /* jcc Jv */
5234 int diff
= (dflag
!= MO_16
5235 ? (int32_t)insn_get(env
, s
, MO_32
)
5236 : (int16_t)insn_get(env
, s
, MO_16
));
5238 gen_jcc(s
, b
, diff
);
5242 case 0x190 ... 0x19f: /* setcc Gv */
5243 modrm
= x86_ldub_code(env
, s
);
5244 gen_setcc1(s
, b
, s
->T0
);
5245 gen_ldst_modrm(env
, s
, modrm
, MO_8
, OR_TMP0
, 1);
5247 case 0x140 ... 0x14f: /* cmov Gv, Ev */
5248 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
5252 modrm
= x86_ldub_code(env
, s
);
5253 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
5254 gen_cmovcc1(env
, s
, ot
, b
, modrm
, reg
);
5257 /************************/
5259 case 0x9c: /* pushf */
5260 gen_svm_check_intercept(s
, SVM_EXIT_PUSHF
);
5261 if (check_vm86_iopl(s
)) {
5262 gen_update_cc_op(s
);
5263 gen_helper_read_eflags(s
->T0
, cpu_env
);
5264 gen_push_v(s
, s
->T0
);
5267 case 0x9d: /* popf */
5268 gen_svm_check_intercept(s
, SVM_EXIT_POPF
);
5269 if (check_vm86_iopl(s
)) {
5272 if (dflag
!= MO_16
) {
5273 gen_helper_write_eflags(cpu_env
, s
->T0
,
5274 tcg_const_i32((TF_MASK
| AC_MASK
|
5279 gen_helper_write_eflags(cpu_env
, s
->T0
,
5280 tcg_const_i32((TF_MASK
| AC_MASK
|
5282 IF_MASK
| IOPL_MASK
)
5286 if (CPL(s
) <= IOPL(s
)) {
5287 if (dflag
!= MO_16
) {
5288 gen_helper_write_eflags(cpu_env
, s
->T0
,
5289 tcg_const_i32((TF_MASK
|
5295 gen_helper_write_eflags(cpu_env
, s
->T0
,
5296 tcg_const_i32((TF_MASK
|
5304 if (dflag
!= MO_16
) {
5305 gen_helper_write_eflags(cpu_env
, s
->T0
,
5306 tcg_const_i32((TF_MASK
| AC_MASK
|
5307 ID_MASK
| NT_MASK
)));
5309 gen_helper_write_eflags(cpu_env
, s
->T0
,
5310 tcg_const_i32((TF_MASK
| AC_MASK
|
5316 gen_pop_update(s
, ot
);
5317 set_cc_op(s
, CC_OP_EFLAGS
);
5318 /* abort translation because TF/AC flag may change */
5319 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
5322 case 0x9e: /* sahf */
5323 if (CODE64(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_LAHF_LM
))
5325 tcg_gen_shri_tl(s
->T0
, cpu_regs
[R_EAX
], 8);
5326 gen_compute_eflags(s
);
5327 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, CC_O
);
5328 tcg_gen_andi_tl(s
->T0
, s
->T0
, CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
5329 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, s
->T0
);
5331 case 0x9f: /* lahf */
5332 if (CODE64(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_LAHF_LM
))
5334 gen_compute_eflags(s
);
5335 /* Note: gen_compute_eflags() only gives the condition codes */
5336 tcg_gen_ori_tl(s
->T0
, cpu_cc_src
, 0x02);
5337 tcg_gen_deposit_tl(cpu_regs
[R_EAX
], cpu_regs
[R_EAX
], s
->T0
, 8, 8);
5339 case 0xf5: /* cmc */
5340 gen_compute_eflags(s
);
5341 tcg_gen_xori_tl(cpu_cc_src
, cpu_cc_src
, CC_C
);
5343 case 0xf8: /* clc */
5344 gen_compute_eflags(s
);
5345 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~CC_C
);
5347 case 0xf9: /* stc */
5348 gen_compute_eflags(s
);
5349 tcg_gen_ori_tl(cpu_cc_src
, cpu_cc_src
, CC_C
);
5351 case 0xfc: /* cld */
5352 tcg_gen_movi_i32(s
->tmp2_i32
, 1);
5353 tcg_gen_st_i32(s
->tmp2_i32
, cpu_env
, offsetof(CPUX86State
, df
));
5355 case 0xfd: /* std */
5356 tcg_gen_movi_i32(s
->tmp2_i32
, -1);
5357 tcg_gen_st_i32(s
->tmp2_i32
, cpu_env
, offsetof(CPUX86State
, df
));
5360 /************************/
5361 /* bit operations */
5362 case 0x1ba: /* bt/bts/btr/btc Gv, im */
5364 modrm
= x86_ldub_code(env
, s
);
5365 op
= (modrm
>> 3) & 7;
5366 mod
= (modrm
>> 6) & 3;
5367 rm
= (modrm
& 7) | REX_B(s
);
5370 gen_lea_modrm(env
, s
, modrm
);
5371 if (!(s
->prefix
& PREFIX_LOCK
)) {
5372 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
5375 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
5378 val
= x86_ldub_code(env
, s
);
5379 tcg_gen_movi_tl(s
->T1
, val
);
5384 case 0x1a3: /* bt Gv, Ev */
5387 case 0x1ab: /* bts */
5390 case 0x1b3: /* btr */
5393 case 0x1bb: /* btc */
5397 modrm
= x86_ldub_code(env
, s
);
5398 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
5399 mod
= (modrm
>> 6) & 3;
5400 rm
= (modrm
& 7) | REX_B(s
);
5401 gen_op_mov_v_reg(s
, MO_32
, s
->T1
, reg
);
5403 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
5404 /* specific case: we need to add a displacement */
5405 gen_exts(ot
, s
->T1
);
5406 tcg_gen_sari_tl(s
->tmp0
, s
->T1
, 3 + ot
);
5407 tcg_gen_shli_tl(s
->tmp0
, s
->tmp0
, ot
);
5408 tcg_gen_add_tl(s
->A0
, gen_lea_modrm_1(s
, a
, false), s
->tmp0
);
5409 gen_lea_v_seg(s
, s
->aflag
, s
->A0
, a
.def_seg
, s
->override
);
5410 if (!(s
->prefix
& PREFIX_LOCK
)) {
5411 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
5414 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
5417 tcg_gen_andi_tl(s
->T1
, s
->T1
, (1 << (3 + ot
)) - 1);
5418 tcg_gen_movi_tl(s
->tmp0
, 1);
5419 tcg_gen_shl_tl(s
->tmp0
, s
->tmp0
, s
->T1
);
5420 if (s
->prefix
& PREFIX_LOCK
) {
5423 /* Needs no atomic ops; we surpressed the normal
5424 memory load for LOCK above so do it now. */
5425 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
5428 tcg_gen_atomic_fetch_or_tl(s
->T0
, s
->A0
, s
->tmp0
,
5429 s
->mem_index
, ot
| MO_LE
);
5432 tcg_gen_not_tl(s
->tmp0
, s
->tmp0
);
5433 tcg_gen_atomic_fetch_and_tl(s
->T0
, s
->A0
, s
->tmp0
,
5434 s
->mem_index
, ot
| MO_LE
);
5438 tcg_gen_atomic_fetch_xor_tl(s
->T0
, s
->A0
, s
->tmp0
,
5439 s
->mem_index
, ot
| MO_LE
);
5442 tcg_gen_shr_tl(s
->tmp4
, s
->T0
, s
->T1
);
5444 tcg_gen_shr_tl(s
->tmp4
, s
->T0
, s
->T1
);
5447 /* Data already loaded; nothing to do. */
5450 tcg_gen_or_tl(s
->T0
, s
->T0
, s
->tmp0
);
5453 tcg_gen_andc_tl(s
->T0
, s
->T0
, s
->tmp0
);
5457 tcg_gen_xor_tl(s
->T0
, s
->T0
, s
->tmp0
);
5462 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
5464 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
5469 /* Delay all CC updates until after the store above. Note that
5470 C is the result of the test, Z is unchanged, and the others
5471 are all undefined. */
5473 case CC_OP_MULB
... CC_OP_MULQ
:
5474 case CC_OP_ADDB
... CC_OP_ADDQ
:
5475 case CC_OP_ADCB
... CC_OP_ADCQ
:
5476 case CC_OP_SUBB
... CC_OP_SUBQ
:
5477 case CC_OP_SBBB
... CC_OP_SBBQ
:
5478 case CC_OP_LOGICB
... CC_OP_LOGICQ
:
5479 case CC_OP_INCB
... CC_OP_INCQ
:
5480 case CC_OP_DECB
... CC_OP_DECQ
:
5481 case CC_OP_SHLB
... CC_OP_SHLQ
:
5482 case CC_OP_SARB
... CC_OP_SARQ
:
5483 case CC_OP_BMILGB
... CC_OP_BMILGQ
:
5484 /* Z was going to be computed from the non-zero status of CC_DST.
5485 We can get that same Z value (and the new C value) by leaving
5486 CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the
5488 tcg_gen_mov_tl(cpu_cc_src
, s
->tmp4
);
5489 set_cc_op(s
, ((s
->cc_op
- CC_OP_MULB
) & 3) + CC_OP_SARB
);
5492 /* Otherwise, generate EFLAGS and replace the C bit. */
5493 gen_compute_eflags(s
);
5494 tcg_gen_deposit_tl(cpu_cc_src
, cpu_cc_src
, s
->tmp4
,
5499 case 0x1bc: /* bsf / tzcnt */
5500 case 0x1bd: /* bsr / lzcnt */
5502 modrm
= x86_ldub_code(env
, s
);
5503 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
5504 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
5505 gen_extu(ot
, s
->T0
);
5507 /* Note that lzcnt and tzcnt are in different extensions. */
5508 if ((prefixes
& PREFIX_REPZ
)
5510 ? s
->cpuid_ext3_features
& CPUID_EXT3_ABM
5511 : s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI1
)) {
5513 /* For lzcnt/tzcnt, C bit is defined related to the input. */
5514 tcg_gen_mov_tl(cpu_cc_src
, s
->T0
);
5516 /* For lzcnt, reduce the target_ulong result by the
5517 number of zeros that we expect to find at the top. */
5518 tcg_gen_clzi_tl(s
->T0
, s
->T0
, TARGET_LONG_BITS
);
5519 tcg_gen_subi_tl(s
->T0
, s
->T0
, TARGET_LONG_BITS
- size
);
5521 /* For tzcnt, a zero input must return the operand size. */
5522 tcg_gen_ctzi_tl(s
->T0
, s
->T0
, size
);
5524 /* For lzcnt/tzcnt, Z bit is defined related to the result. */
5525 gen_op_update1_cc(s
);
5526 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
5528 /* For bsr/bsf, only the Z bit is defined and it is related
5529 to the input and not the result. */
5530 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
5531 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
5533 /* ??? The manual says that the output is undefined when the
5534 input is zero, but real hardware leaves it unchanged, and
5535 real programs appear to depend on that. Accomplish this
5536 by passing the output as the value to return upon zero. */
5538 /* For bsr, return the bit index of the first 1 bit,
5539 not the count of leading zeros. */
5540 tcg_gen_xori_tl(s
->T1
, cpu_regs
[reg
], TARGET_LONG_BITS
- 1);
5541 tcg_gen_clz_tl(s
->T0
, s
->T0
, s
->T1
);
5542 tcg_gen_xori_tl(s
->T0
, s
->T0
, TARGET_LONG_BITS
- 1);
5544 tcg_gen_ctz_tl(s
->T0
, s
->T0
, cpu_regs
[reg
]);
5547 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
5549 /************************/
5551 case 0x27: /* daa */
5554 gen_update_cc_op(s
);
5555 gen_helper_daa(cpu_env
);
5556 set_cc_op(s
, CC_OP_EFLAGS
);
5558 case 0x2f: /* das */
5561 gen_update_cc_op(s
);
5562 gen_helper_das(cpu_env
);
5563 set_cc_op(s
, CC_OP_EFLAGS
);
5565 case 0x37: /* aaa */
5568 gen_update_cc_op(s
);
5569 gen_helper_aaa(cpu_env
);
5570 set_cc_op(s
, CC_OP_EFLAGS
);
5572 case 0x3f: /* aas */
5575 gen_update_cc_op(s
);
5576 gen_helper_aas(cpu_env
);
5577 set_cc_op(s
, CC_OP_EFLAGS
);
5579 case 0xd4: /* aam */
5582 val
= x86_ldub_code(env
, s
);
5584 gen_exception(s
, EXCP00_DIVZ
);
5586 gen_helper_aam(cpu_env
, tcg_const_i32(val
));
5587 set_cc_op(s
, CC_OP_LOGICB
);
5590 case 0xd5: /* aad */
5593 val
= x86_ldub_code(env
, s
);
5594 gen_helper_aad(cpu_env
, tcg_const_i32(val
));
5595 set_cc_op(s
, CC_OP_LOGICB
);
5597 /************************/
5599 case 0x90: /* nop */
5600 /* XXX: correct lock test for all insn */
5601 if (prefixes
& PREFIX_LOCK
) {
5604 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
5606 goto do_xchg_reg_eax
;
5608 if (prefixes
& PREFIX_REPZ
) {
5609 gen_update_cc_op(s
);
5610 gen_update_eip_cur(s
);
5611 gen_helper_pause(cpu_env
, cur_insn_len_i32(s
));
5612 s
->base
.is_jmp
= DISAS_NORETURN
;
5615 case 0x9b: /* fwait */
5616 if ((s
->flags
& (HF_MP_MASK
| HF_TS_MASK
)) ==
5617 (HF_MP_MASK
| HF_TS_MASK
)) {
5618 gen_exception(s
, EXCP07_PREX
);
5620 gen_helper_fwait(cpu_env
);
5623 case 0xcc: /* int3 */
5624 gen_interrupt(s
, EXCP03_INT3
);
5626 case 0xcd: /* int N */
5627 val
= x86_ldub_code(env
, s
);
5628 if (check_vm86_iopl(s
)) {
5629 gen_interrupt(s
, val
);
5632 case 0xce: /* into */
5635 gen_update_cc_op(s
);
5636 gen_update_eip_cur(s
);
5637 gen_helper_into(cpu_env
, cur_insn_len_i32(s
));
5640 case 0xf1: /* icebp (undocumented, exits to external debugger) */
5641 gen_svm_check_intercept(s
, SVM_EXIT_ICEBP
);
5645 case 0xfa: /* cli */
5646 if (check_iopl(s
)) {
5647 gen_reset_eflags(s
, IF_MASK
);
5650 case 0xfb: /* sti */
5651 if (check_iopl(s
)) {
5652 gen_set_eflags(s
, IF_MASK
);
5653 /* interruptions are enabled only the first insn after sti */
5654 gen_update_eip_next(s
);
5655 gen_eob_inhibit_irq(s
, true);
5658 case 0x62: /* bound */
5662 modrm
= x86_ldub_code(env
, s
);
5663 reg
= (modrm
>> 3) & 7;
5664 mod
= (modrm
>> 6) & 3;
5667 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
5668 gen_lea_modrm(env
, s
, modrm
);
5669 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
5671 gen_helper_boundw(cpu_env
, s
->A0
, s
->tmp2_i32
);
5673 gen_helper_boundl(cpu_env
, s
->A0
, s
->tmp2_i32
);
5676 case 0x1c8 ... 0x1cf: /* bswap reg */
5677 reg
= (b
& 7) | REX_B(s
);
5678 #ifdef TARGET_X86_64
5679 if (dflag
== MO_64
) {
5680 tcg_gen_bswap64_i64(cpu_regs
[reg
], cpu_regs
[reg
]);
5684 tcg_gen_bswap32_tl(cpu_regs
[reg
], cpu_regs
[reg
], TCG_BSWAP_OZ
);
5686 case 0xd6: /* salc */
5689 gen_compute_eflags_c(s
, s
->T0
);
5690 tcg_gen_neg_tl(s
->T0
, s
->T0
);
5691 gen_op_mov_reg_v(s
, MO_8
, R_EAX
, s
->T0
);
5693 case 0xe0: /* loopnz */
5694 case 0xe1: /* loopz */
5695 case 0xe2: /* loop */
5696 case 0xe3: /* jecxz */
5699 int diff
= (int8_t)insn_get(env
, s
, MO_8
);
5701 l1
= gen_new_label();
5702 l2
= gen_new_label();
5703 gen_update_cc_op(s
);
5706 case 0: /* loopnz */
5708 gen_op_add_reg_im(s
, s
->aflag
, R_ECX
, -1);
5709 gen_op_jz_ecx(s
, l2
);
5710 gen_jcc1(s
, (JCC_Z
<< 1) | (b
^ 1), l1
);
5713 gen_op_add_reg_im(s
, s
->aflag
, R_ECX
, -1);
5714 gen_op_jnz_ecx(s
, l1
);
5718 gen_op_jz_ecx(s
, l1
);
5723 gen_jmp_rel_csize(s
, 0, 1);
5726 gen_jmp_rel(s
, dflag
, diff
, 0);
5729 case 0x130: /* wrmsr */
5730 case 0x132: /* rdmsr */
5731 if (check_cpl0(s
)) {
5732 gen_update_cc_op(s
);
5733 gen_update_eip_cur(s
);
5735 gen_helper_rdmsr(cpu_env
);
5737 gen_helper_wrmsr(cpu_env
);
5738 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
5742 case 0x131: /* rdtsc */
5743 gen_update_cc_op(s
);
5744 gen_update_eip_cur(s
);
5745 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
5747 s
->base
.is_jmp
= DISAS_TOO_MANY
;
5749 gen_helper_rdtsc(cpu_env
);
5751 case 0x133: /* rdpmc */
5752 gen_update_cc_op(s
);
5753 gen_update_eip_cur(s
);
5754 gen_helper_rdpmc(cpu_env
);
5755 s
->base
.is_jmp
= DISAS_NORETURN
;
5757 case 0x134: /* sysenter */
5758 /* For Intel SYSENTER is valid on 64-bit */
5759 if (CODE64(s
) && env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
)
5762 gen_exception_gpf(s
);
5764 gen_helper_sysenter(cpu_env
);
5765 s
->base
.is_jmp
= DISAS_EOB_ONLY
;
5768 case 0x135: /* sysexit */
5769 /* For Intel SYSEXIT is valid on 64-bit */
5770 if (CODE64(s
) && env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
)
5773 gen_exception_gpf(s
);
5775 gen_helper_sysexit(cpu_env
, tcg_const_i32(dflag
- 1));
5776 s
->base
.is_jmp
= DISAS_EOB_ONLY
;
5779 #ifdef TARGET_X86_64
5780 case 0x105: /* syscall */
5781 /* XXX: is it usable in real mode ? */
5782 gen_update_cc_op(s
);
5783 gen_update_eip_cur(s
);
5784 gen_helper_syscall(cpu_env
, cur_insn_len_i32(s
));
5785 /* TF handling for the syscall insn is different. The TF bit is checked
5786 after the syscall insn completes. This allows #DB to not be
5787 generated after one has entered CPL0 if TF is set in FMASK. */
5788 gen_eob_worker(s
, false, true);
5790 case 0x107: /* sysret */
5792 gen_exception_gpf(s
);
5794 gen_helper_sysret(cpu_env
, tcg_const_i32(dflag
- 1));
5795 /* condition codes are modified only in long mode */
5797 set_cc_op(s
, CC_OP_EFLAGS
);
5799 /* TF handling for the sysret insn is different. The TF bit is
5800 checked after the sysret insn completes. This allows #DB to be
5801 generated "as if" the syscall insn in userspace has just
5803 gen_eob_worker(s
, false, true);
5807 case 0x1a2: /* cpuid */
5808 gen_update_cc_op(s
);
5809 gen_update_eip_cur(s
);
5810 gen_helper_cpuid(cpu_env
);
5812 case 0xf4: /* hlt */
5813 if (check_cpl0(s
)) {
5814 gen_update_cc_op(s
);
5815 gen_update_eip_cur(s
);
5816 gen_helper_hlt(cpu_env
, cur_insn_len_i32(s
));
5817 s
->base
.is_jmp
= DISAS_NORETURN
;
5821 modrm
= x86_ldub_code(env
, s
);
5822 mod
= (modrm
>> 6) & 3;
5823 op
= (modrm
>> 3) & 7;
5826 if (!PE(s
) || VM86(s
))
5828 if (s
->flags
& HF_UMIP_MASK
&& !check_cpl0(s
)) {
5831 gen_svm_check_intercept(s
, SVM_EXIT_LDTR_READ
);
5832 tcg_gen_ld32u_tl(s
->T0
, cpu_env
,
5833 offsetof(CPUX86State
, ldt
.selector
));
5834 ot
= mod
== 3 ? dflag
: MO_16
;
5835 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
5838 if (!PE(s
) || VM86(s
))
5840 if (check_cpl0(s
)) {
5841 gen_svm_check_intercept(s
, SVM_EXIT_LDTR_WRITE
);
5842 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
5843 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
5844 gen_helper_lldt(cpu_env
, s
->tmp2_i32
);
5848 if (!PE(s
) || VM86(s
))
5850 if (s
->flags
& HF_UMIP_MASK
&& !check_cpl0(s
)) {
5853 gen_svm_check_intercept(s
, SVM_EXIT_TR_READ
);
5854 tcg_gen_ld32u_tl(s
->T0
, cpu_env
,
5855 offsetof(CPUX86State
, tr
.selector
));
5856 ot
= mod
== 3 ? dflag
: MO_16
;
5857 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
5860 if (!PE(s
) || VM86(s
))
5862 if (check_cpl0(s
)) {
5863 gen_svm_check_intercept(s
, SVM_EXIT_TR_WRITE
);
5864 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
5865 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
5866 gen_helper_ltr(cpu_env
, s
->tmp2_i32
);
5871 if (!PE(s
) || VM86(s
))
5873 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
5874 gen_update_cc_op(s
);
5876 gen_helper_verr(cpu_env
, s
->T0
);
5878 gen_helper_verw(cpu_env
, s
->T0
);
5880 set_cc_op(s
, CC_OP_EFLAGS
);
5888 modrm
= x86_ldub_code(env
, s
);
5890 CASE_MODRM_MEM_OP(0): /* sgdt */
5891 if (s
->flags
& HF_UMIP_MASK
&& !check_cpl0(s
)) {
5894 gen_svm_check_intercept(s
, SVM_EXIT_GDTR_READ
);
5895 gen_lea_modrm(env
, s
, modrm
);
5896 tcg_gen_ld32u_tl(s
->T0
,
5897 cpu_env
, offsetof(CPUX86State
, gdt
.limit
));
5898 gen_op_st_v(s
, MO_16
, s
->T0
, s
->A0
);
5899 gen_add_A0_im(s
, 2);
5900 tcg_gen_ld_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
, gdt
.base
));
5901 if (dflag
== MO_16
) {
5902 tcg_gen_andi_tl(s
->T0
, s
->T0
, 0xffffff);
5904 gen_op_st_v(s
, CODE64(s
) + MO_32
, s
->T0
, s
->A0
);
5907 case 0xc8: /* monitor */
5908 if (!(s
->cpuid_ext_features
& CPUID_EXT_MONITOR
) || CPL(s
) != 0) {
5911 gen_update_cc_op(s
);
5912 gen_update_eip_cur(s
);
5913 tcg_gen_mov_tl(s
->A0
, cpu_regs
[R_EAX
]);
5914 gen_extu(s
->aflag
, s
->A0
);
5915 gen_add_A0_ds_seg(s
);
5916 gen_helper_monitor(cpu_env
, s
->A0
);
5919 case 0xc9: /* mwait */
5920 if (!(s
->cpuid_ext_features
& CPUID_EXT_MONITOR
) || CPL(s
) != 0) {
5923 gen_update_cc_op(s
);
5924 gen_update_eip_cur(s
);
5925 gen_helper_mwait(cpu_env
, cur_insn_len_i32(s
));
5926 s
->base
.is_jmp
= DISAS_NORETURN
;
5929 case 0xca: /* clac */
5930 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_SMAP
)
5934 gen_reset_eflags(s
, AC_MASK
);
5935 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
5938 case 0xcb: /* stac */
5939 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_SMAP
)
5943 gen_set_eflags(s
, AC_MASK
);
5944 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
5947 CASE_MODRM_MEM_OP(1): /* sidt */
5948 if (s
->flags
& HF_UMIP_MASK
&& !check_cpl0(s
)) {
5951 gen_svm_check_intercept(s
, SVM_EXIT_IDTR_READ
);
5952 gen_lea_modrm(env
, s
, modrm
);
5953 tcg_gen_ld32u_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
, idt
.limit
));
5954 gen_op_st_v(s
, MO_16
, s
->T0
, s
->A0
);
5955 gen_add_A0_im(s
, 2);
5956 tcg_gen_ld_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
, idt
.base
));
5957 if (dflag
== MO_16
) {
5958 tcg_gen_andi_tl(s
->T0
, s
->T0
, 0xffffff);
5960 gen_op_st_v(s
, CODE64(s
) + MO_32
, s
->T0
, s
->A0
);
5963 case 0xd0: /* xgetbv */
5964 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
5965 || (s
->prefix
& (PREFIX_LOCK
| PREFIX_DATA
5966 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
5969 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_ECX
]);
5970 gen_helper_xgetbv(s
->tmp1_i64
, cpu_env
, s
->tmp2_i32
);
5971 tcg_gen_extr_i64_tl(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
], s
->tmp1_i64
);
5974 case 0xd1: /* xsetbv */
5975 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
5976 || (s
->prefix
& (PREFIX_LOCK
| PREFIX_DATA
5977 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
5980 if (!check_cpl0(s
)) {
5983 tcg_gen_concat_tl_i64(s
->tmp1_i64
, cpu_regs
[R_EAX
],
5985 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_ECX
]);
5986 gen_helper_xsetbv(cpu_env
, s
->tmp2_i32
, s
->tmp1_i64
);
5987 /* End TB because translation flags may change. */
5988 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
5991 case 0xd8: /* VMRUN */
5992 if (!SVME(s
) || !PE(s
)) {
5995 if (!check_cpl0(s
)) {
5998 gen_update_cc_op(s
);
5999 gen_update_eip_cur(s
);
6000 gen_helper_vmrun(cpu_env
, tcg_const_i32(s
->aflag
- 1),
6001 cur_insn_len_i32(s
));
6002 tcg_gen_exit_tb(NULL
, 0);
6003 s
->base
.is_jmp
= DISAS_NORETURN
;
6006 case 0xd9: /* VMMCALL */
6010 gen_update_cc_op(s
);
6011 gen_update_eip_cur(s
);
6012 gen_helper_vmmcall(cpu_env
);
6015 case 0xda: /* VMLOAD */
6016 if (!SVME(s
) || !PE(s
)) {
6019 if (!check_cpl0(s
)) {
6022 gen_update_cc_op(s
);
6023 gen_update_eip_cur(s
);
6024 gen_helper_vmload(cpu_env
, tcg_const_i32(s
->aflag
- 1));
6027 case 0xdb: /* VMSAVE */
6028 if (!SVME(s
) || !PE(s
)) {
6031 if (!check_cpl0(s
)) {
6034 gen_update_cc_op(s
);
6035 gen_update_eip_cur(s
);
6036 gen_helper_vmsave(cpu_env
, tcg_const_i32(s
->aflag
- 1));
6039 case 0xdc: /* STGI */
6040 if ((!SVME(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_SKINIT
))
6044 if (!check_cpl0(s
)) {
6047 gen_update_cc_op(s
);
6048 gen_helper_stgi(cpu_env
);
6049 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
6052 case 0xdd: /* CLGI */
6053 if (!SVME(s
) || !PE(s
)) {
6056 if (!check_cpl0(s
)) {
6059 gen_update_cc_op(s
);
6060 gen_update_eip_cur(s
);
6061 gen_helper_clgi(cpu_env
);
6064 case 0xde: /* SKINIT */
6065 if ((!SVME(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_SKINIT
))
6069 gen_svm_check_intercept(s
, SVM_EXIT_SKINIT
);
6070 /* If not intercepted, not implemented -- raise #UD. */
6073 case 0xdf: /* INVLPGA */
6074 if (!SVME(s
) || !PE(s
)) {
6077 if (!check_cpl0(s
)) {
6080 gen_svm_check_intercept(s
, SVM_EXIT_INVLPGA
);
6081 if (s
->aflag
== MO_64
) {
6082 tcg_gen_mov_tl(s
->A0
, cpu_regs
[R_EAX
]);
6084 tcg_gen_ext32u_tl(s
->A0
, cpu_regs
[R_EAX
]);
6086 gen_helper_flush_page(cpu_env
, s
->A0
);
6087 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
6090 CASE_MODRM_MEM_OP(2): /* lgdt */
6091 if (!check_cpl0(s
)) {
6094 gen_svm_check_intercept(s
, SVM_EXIT_GDTR_WRITE
);
6095 gen_lea_modrm(env
, s
, modrm
);
6096 gen_op_ld_v(s
, MO_16
, s
->T1
, s
->A0
);
6097 gen_add_A0_im(s
, 2);
6098 gen_op_ld_v(s
, CODE64(s
) + MO_32
, s
->T0
, s
->A0
);
6099 if (dflag
== MO_16
) {
6100 tcg_gen_andi_tl(s
->T0
, s
->T0
, 0xffffff);
6102 tcg_gen_st_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
, gdt
.base
));
6103 tcg_gen_st32_tl(s
->T1
, cpu_env
, offsetof(CPUX86State
, gdt
.limit
));
6106 CASE_MODRM_MEM_OP(3): /* lidt */
6107 if (!check_cpl0(s
)) {
6110 gen_svm_check_intercept(s
, SVM_EXIT_IDTR_WRITE
);
6111 gen_lea_modrm(env
, s
, modrm
);
6112 gen_op_ld_v(s
, MO_16
, s
->T1
, s
->A0
);
6113 gen_add_A0_im(s
, 2);
6114 gen_op_ld_v(s
, CODE64(s
) + MO_32
, s
->T0
, s
->A0
);
6115 if (dflag
== MO_16
) {
6116 tcg_gen_andi_tl(s
->T0
, s
->T0
, 0xffffff);
6118 tcg_gen_st_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
, idt
.base
));
6119 tcg_gen_st32_tl(s
->T1
, cpu_env
, offsetof(CPUX86State
, idt
.limit
));
6122 CASE_MODRM_OP(4): /* smsw */
6123 if (s
->flags
& HF_UMIP_MASK
&& !check_cpl0(s
)) {
6126 gen_svm_check_intercept(s
, SVM_EXIT_READ_CR0
);
6127 tcg_gen_ld_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
, cr
[0]));
6129 * In 32-bit mode, the higher 16 bits of the destination
6130 * register are undefined. In practice CR0[31:0] is stored
6131 * just like in 64-bit mode.
6133 mod
= (modrm
>> 6) & 3;
6134 ot
= (mod
!= 3 ? MO_16
: s
->dflag
);
6135 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
6137 case 0xee: /* rdpkru */
6138 if (prefixes
& PREFIX_LOCK
) {
6141 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_ECX
]);
6142 gen_helper_rdpkru(s
->tmp1_i64
, cpu_env
, s
->tmp2_i32
);
6143 tcg_gen_extr_i64_tl(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
], s
->tmp1_i64
);
6145 case 0xef: /* wrpkru */
6146 if (prefixes
& PREFIX_LOCK
) {
6149 tcg_gen_concat_tl_i64(s
->tmp1_i64
, cpu_regs
[R_EAX
],
6151 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_ECX
]);
6152 gen_helper_wrpkru(cpu_env
, s
->tmp2_i32
, s
->tmp1_i64
);
6155 CASE_MODRM_OP(6): /* lmsw */
6156 if (!check_cpl0(s
)) {
6159 gen_svm_check_intercept(s
, SVM_EXIT_WRITE_CR0
);
6160 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
6162 * Only the 4 lower bits of CR0 are modified.
6163 * PE cannot be set to zero if already set to one.
6165 tcg_gen_ld_tl(s
->T1
, cpu_env
, offsetof(CPUX86State
, cr
[0]));
6166 tcg_gen_andi_tl(s
->T0
, s
->T0
, 0xf);
6167 tcg_gen_andi_tl(s
->T1
, s
->T1
, ~0xe);
6168 tcg_gen_or_tl(s
->T0
, s
->T0
, s
->T1
);
6169 gen_helper_write_crN(cpu_env
, tcg_constant_i32(0), s
->T0
);
6170 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
6173 CASE_MODRM_MEM_OP(7): /* invlpg */
6174 if (!check_cpl0(s
)) {
6177 gen_svm_check_intercept(s
, SVM_EXIT_INVLPG
);
6178 gen_lea_modrm(env
, s
, modrm
);
6179 gen_helper_flush_page(cpu_env
, s
->A0
);
6180 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
6183 case 0xf8: /* swapgs */
6184 #ifdef TARGET_X86_64
6186 if (check_cpl0(s
)) {
6187 tcg_gen_mov_tl(s
->T0
, cpu_seg_base
[R_GS
]);
6188 tcg_gen_ld_tl(cpu_seg_base
[R_GS
], cpu_env
,
6189 offsetof(CPUX86State
, kernelgsbase
));
6190 tcg_gen_st_tl(s
->T0
, cpu_env
,
6191 offsetof(CPUX86State
, kernelgsbase
));
6198 case 0xf9: /* rdtscp */
6199 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_RDTSCP
)) {
6202 gen_update_cc_op(s
);
6203 gen_update_eip_cur(s
);
6204 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
6206 s
->base
.is_jmp
= DISAS_TOO_MANY
;
6208 gen_helper_rdtscp(cpu_env
);
6216 case 0x108: /* invd */
6217 case 0x109: /* wbinvd */
6218 if (check_cpl0(s
)) {
6219 gen_svm_check_intercept(s
, (b
& 2) ? SVM_EXIT_INVD
: SVM_EXIT_WBINVD
);
6223 case 0x63: /* arpl or movslS (x86_64) */
6224 #ifdef TARGET_X86_64
6227 /* d_ot is the size of destination */
6230 modrm
= x86_ldub_code(env
, s
);
6231 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6232 mod
= (modrm
>> 6) & 3;
6233 rm
= (modrm
& 7) | REX_B(s
);
6236 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, rm
);
6238 if (d_ot
== MO_64
) {
6239 tcg_gen_ext32s_tl(s
->T0
, s
->T0
);
6241 gen_op_mov_reg_v(s
, d_ot
, reg
, s
->T0
);
6243 gen_lea_modrm(env
, s
, modrm
);
6244 gen_op_ld_v(s
, MO_32
| MO_SIGN
, s
->T0
, s
->A0
);
6245 gen_op_mov_reg_v(s
, d_ot
, reg
, s
->T0
);
6251 TCGv t0
, t1
, t2
, a0
;
6253 if (!PE(s
) || VM86(s
))
6255 t0
= tcg_temp_local_new();
6256 t1
= tcg_temp_local_new();
6257 t2
= tcg_temp_local_new();
6259 modrm
= x86_ldub_code(env
, s
);
6260 reg
= (modrm
>> 3) & 7;
6261 mod
= (modrm
>> 6) & 3;
6264 gen_lea_modrm(env
, s
, modrm
);
6265 gen_op_ld_v(s
, ot
, t0
, s
->A0
);
6266 a0
= tcg_temp_local_new();
6267 tcg_gen_mov_tl(a0
, s
->A0
);
6269 gen_op_mov_v_reg(s
, ot
, t0
, rm
);
6272 gen_op_mov_v_reg(s
, ot
, t1
, reg
);
6273 tcg_gen_andi_tl(s
->tmp0
, t0
, 3);
6274 tcg_gen_andi_tl(t1
, t1
, 3);
6275 tcg_gen_movi_tl(t2
, 0);
6276 label1
= gen_new_label();
6277 tcg_gen_brcond_tl(TCG_COND_GE
, s
->tmp0
, t1
, label1
);
6278 tcg_gen_andi_tl(t0
, t0
, ~3);
6279 tcg_gen_or_tl(t0
, t0
, t1
);
6280 tcg_gen_movi_tl(t2
, CC_Z
);
6281 gen_set_label(label1
);
6283 gen_op_st_v(s
, ot
, t0
, a0
);
6286 gen_op_mov_reg_v(s
, ot
, rm
, t0
);
6288 gen_compute_eflags(s
);
6289 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~CC_Z
);
6290 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t2
);
6296 case 0x102: /* lar */
6297 case 0x103: /* lsl */
6301 if (!PE(s
) || VM86(s
))
6303 ot
= dflag
!= MO_16
? MO_32
: MO_16
;
6304 modrm
= x86_ldub_code(env
, s
);
6305 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6306 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
6307 t0
= tcg_temp_local_new();
6308 gen_update_cc_op(s
);
6310 gen_helper_lar(t0
, cpu_env
, s
->T0
);
6312 gen_helper_lsl(t0
, cpu_env
, s
->T0
);
6314 tcg_gen_andi_tl(s
->tmp0
, cpu_cc_src
, CC_Z
);
6315 label1
= gen_new_label();
6316 tcg_gen_brcondi_tl(TCG_COND_EQ
, s
->tmp0
, 0, label1
);
6317 gen_op_mov_reg_v(s
, ot
, reg
, t0
);
6318 gen_set_label(label1
);
6319 set_cc_op(s
, CC_OP_EFLAGS
);
6324 modrm
= x86_ldub_code(env
, s
);
6325 mod
= (modrm
>> 6) & 3;
6326 op
= (modrm
>> 3) & 7;
6328 case 0: /* prefetchnta */
6329 case 1: /* prefetchnt0 */
6330 case 2: /* prefetchnt0 */
6331 case 3: /* prefetchnt0 */
6334 gen_nop_modrm(env
, s
, modrm
);
6335 /* nothing more to do */
6337 default: /* nop (multi byte) */
6338 gen_nop_modrm(env
, s
, modrm
);
6343 modrm
= x86_ldub_code(env
, s
);
6344 if (s
->flags
& HF_MPX_EN_MASK
) {
6345 mod
= (modrm
>> 6) & 3;
6346 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6347 if (prefixes
& PREFIX_REPZ
) {
6350 || (prefixes
& PREFIX_LOCK
)
6351 || s
->aflag
== MO_16
) {
6354 gen_bndck(env
, s
, modrm
, TCG_COND_LTU
, cpu_bndl
[reg
]);
6355 } else if (prefixes
& PREFIX_REPNZ
) {
6358 || (prefixes
& PREFIX_LOCK
)
6359 || s
->aflag
== MO_16
) {
6362 TCGv_i64 notu
= tcg_temp_new_i64();
6363 tcg_gen_not_i64(notu
, cpu_bndu
[reg
]);
6364 gen_bndck(env
, s
, modrm
, TCG_COND_GTU
, notu
);
6365 tcg_temp_free_i64(notu
);
6366 } else if (prefixes
& PREFIX_DATA
) {
6367 /* bndmov -- from reg/mem */
6368 if (reg
>= 4 || s
->aflag
== MO_16
) {
6372 int reg2
= (modrm
& 7) | REX_B(s
);
6373 if (reg2
>= 4 || (prefixes
& PREFIX_LOCK
)) {
6376 if (s
->flags
& HF_MPX_IU_MASK
) {
6377 tcg_gen_mov_i64(cpu_bndl
[reg
], cpu_bndl
[reg2
]);
6378 tcg_gen_mov_i64(cpu_bndu
[reg
], cpu_bndu
[reg2
]);
6381 gen_lea_modrm(env
, s
, modrm
);
6383 tcg_gen_qemu_ld_i64(cpu_bndl
[reg
], s
->A0
,
6384 s
->mem_index
, MO_LEUQ
);
6385 tcg_gen_addi_tl(s
->A0
, s
->A0
, 8);
6386 tcg_gen_qemu_ld_i64(cpu_bndu
[reg
], s
->A0
,
6387 s
->mem_index
, MO_LEUQ
);
6389 tcg_gen_qemu_ld_i64(cpu_bndl
[reg
], s
->A0
,
6390 s
->mem_index
, MO_LEUL
);
6391 tcg_gen_addi_tl(s
->A0
, s
->A0
, 4);
6392 tcg_gen_qemu_ld_i64(cpu_bndu
[reg
], s
->A0
,
6393 s
->mem_index
, MO_LEUL
);
6395 /* bnd registers are now in-use */
6396 gen_set_hflag(s
, HF_MPX_IU_MASK
);
6398 } else if (mod
!= 3) {
6400 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
6402 || (prefixes
& PREFIX_LOCK
)
6403 || s
->aflag
== MO_16
6408 tcg_gen_addi_tl(s
->A0
, cpu_regs
[a
.base
], a
.disp
);
6410 tcg_gen_movi_tl(s
->A0
, 0);
6412 gen_lea_v_seg(s
, s
->aflag
, s
->A0
, a
.def_seg
, s
->override
);
6414 tcg_gen_mov_tl(s
->T0
, cpu_regs
[a
.index
]);
6416 tcg_gen_movi_tl(s
->T0
, 0);
6419 gen_helper_bndldx64(cpu_bndl
[reg
], cpu_env
, s
->A0
, s
->T0
);
6420 tcg_gen_ld_i64(cpu_bndu
[reg
], cpu_env
,
6421 offsetof(CPUX86State
, mmx_t0
.MMX_Q(0)));
6423 gen_helper_bndldx32(cpu_bndu
[reg
], cpu_env
, s
->A0
, s
->T0
);
6424 tcg_gen_ext32u_i64(cpu_bndl
[reg
], cpu_bndu
[reg
]);
6425 tcg_gen_shri_i64(cpu_bndu
[reg
], cpu_bndu
[reg
], 32);
6427 gen_set_hflag(s
, HF_MPX_IU_MASK
);
6430 gen_nop_modrm(env
, s
, modrm
);
6433 modrm
= x86_ldub_code(env
, s
);
6434 if (s
->flags
& HF_MPX_EN_MASK
) {
6435 mod
= (modrm
>> 6) & 3;
6436 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6437 if (mod
!= 3 && (prefixes
& PREFIX_REPZ
)) {
6440 || (prefixes
& PREFIX_LOCK
)
6441 || s
->aflag
== MO_16
) {
6444 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
6446 tcg_gen_extu_tl_i64(cpu_bndl
[reg
], cpu_regs
[a
.base
]);
6448 tcg_gen_ext32u_i64(cpu_bndl
[reg
], cpu_bndl
[reg
]);
6450 } else if (a
.base
== -1) {
6451 /* no base register has lower bound of 0 */
6452 tcg_gen_movi_i64(cpu_bndl
[reg
], 0);
6454 /* rip-relative generates #ud */
6457 tcg_gen_not_tl(s
->A0
, gen_lea_modrm_1(s
, a
, false));
6459 tcg_gen_ext32u_tl(s
->A0
, s
->A0
);
6461 tcg_gen_extu_tl_i64(cpu_bndu
[reg
], s
->A0
);
6462 /* bnd registers are now in-use */
6463 gen_set_hflag(s
, HF_MPX_IU_MASK
);
6465 } else if (prefixes
& PREFIX_REPNZ
) {
6468 || (prefixes
& PREFIX_LOCK
)
6469 || s
->aflag
== MO_16
) {
6472 gen_bndck(env
, s
, modrm
, TCG_COND_GTU
, cpu_bndu
[reg
]);
6473 } else if (prefixes
& PREFIX_DATA
) {
6474 /* bndmov -- to reg/mem */
6475 if (reg
>= 4 || s
->aflag
== MO_16
) {
6479 int reg2
= (modrm
& 7) | REX_B(s
);
6480 if (reg2
>= 4 || (prefixes
& PREFIX_LOCK
)) {
6483 if (s
->flags
& HF_MPX_IU_MASK
) {
6484 tcg_gen_mov_i64(cpu_bndl
[reg2
], cpu_bndl
[reg
]);
6485 tcg_gen_mov_i64(cpu_bndu
[reg2
], cpu_bndu
[reg
]);
6488 gen_lea_modrm(env
, s
, modrm
);
6490 tcg_gen_qemu_st_i64(cpu_bndl
[reg
], s
->A0
,
6491 s
->mem_index
, MO_LEUQ
);
6492 tcg_gen_addi_tl(s
->A0
, s
->A0
, 8);
6493 tcg_gen_qemu_st_i64(cpu_bndu
[reg
], s
->A0
,
6494 s
->mem_index
, MO_LEUQ
);
6496 tcg_gen_qemu_st_i64(cpu_bndl
[reg
], s
->A0
,
6497 s
->mem_index
, MO_LEUL
);
6498 tcg_gen_addi_tl(s
->A0
, s
->A0
, 4);
6499 tcg_gen_qemu_st_i64(cpu_bndu
[reg
], s
->A0
,
6500 s
->mem_index
, MO_LEUL
);
6503 } else if (mod
!= 3) {
6505 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
6507 || (prefixes
& PREFIX_LOCK
)
6508 || s
->aflag
== MO_16
6513 tcg_gen_addi_tl(s
->A0
, cpu_regs
[a
.base
], a
.disp
);
6515 tcg_gen_movi_tl(s
->A0
, 0);
6517 gen_lea_v_seg(s
, s
->aflag
, s
->A0
, a
.def_seg
, s
->override
);
6519 tcg_gen_mov_tl(s
->T0
, cpu_regs
[a
.index
]);
6521 tcg_gen_movi_tl(s
->T0
, 0);
6524 gen_helper_bndstx64(cpu_env
, s
->A0
, s
->T0
,
6525 cpu_bndl
[reg
], cpu_bndu
[reg
]);
6527 gen_helper_bndstx32(cpu_env
, s
->A0
, s
->T0
,
6528 cpu_bndl
[reg
], cpu_bndu
[reg
]);
6532 gen_nop_modrm(env
, s
, modrm
);
6534 case 0x119: case 0x11c ... 0x11f: /* nop (multi byte) */
6535 modrm
= x86_ldub_code(env
, s
);
6536 gen_nop_modrm(env
, s
, modrm
);
6539 case 0x120: /* mov reg, crN */
6540 case 0x122: /* mov crN, reg */
6541 if (!check_cpl0(s
)) {
6544 modrm
= x86_ldub_code(env
, s
);
6546 * Ignore the mod bits (assume (modrm&0xc0)==0xc0).
6547 * AMD documentation (24594.pdf) and testing of Intel 386 and 486
6548 * processors all show that the mod bits are assumed to be 1's,
6549 * regardless of actual values.
6551 rm
= (modrm
& 7) | REX_B(s
);
6552 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6555 if ((prefixes
& PREFIX_LOCK
) &&
6556 (s
->cpuid_ext3_features
& CPUID_EXT3_CR8LEG
)) {
6568 ot
= (CODE64(s
) ? MO_64
: MO_32
);
6570 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
6572 s
->base
.is_jmp
= DISAS_TOO_MANY
;
6575 gen_svm_check_intercept(s
, SVM_EXIT_WRITE_CR0
+ reg
);
6576 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
6577 gen_helper_write_crN(cpu_env
, tcg_constant_i32(reg
), s
->T0
);
6578 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
6580 gen_svm_check_intercept(s
, SVM_EXIT_READ_CR0
+ reg
);
6581 gen_helper_read_crN(s
->T0
, cpu_env
, tcg_constant_i32(reg
));
6582 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
6586 case 0x121: /* mov reg, drN */
6587 case 0x123: /* mov drN, reg */
6588 if (check_cpl0(s
)) {
6589 modrm
= x86_ldub_code(env
, s
);
6590 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
6591 * AMD documentation (24594.pdf) and testing of
6592 * intel 386 and 486 processors all show that the mod bits
6593 * are assumed to be 1's, regardless of actual values.
6595 rm
= (modrm
& 7) | REX_B(s
);
6596 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6605 gen_svm_check_intercept(s
, SVM_EXIT_WRITE_DR0
+ reg
);
6606 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
6607 tcg_gen_movi_i32(s
->tmp2_i32
, reg
);
6608 gen_helper_set_dr(cpu_env
, s
->tmp2_i32
, s
->T0
);
6609 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
6611 gen_svm_check_intercept(s
, SVM_EXIT_READ_DR0
+ reg
);
6612 tcg_gen_movi_i32(s
->tmp2_i32
, reg
);
6613 gen_helper_get_dr(s
->T0
, cpu_env
, s
->tmp2_i32
);
6614 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
6618 case 0x106: /* clts */
6619 if (check_cpl0(s
)) {
6620 gen_svm_check_intercept(s
, SVM_EXIT_WRITE_CR0
);
6621 gen_helper_clts(cpu_env
);
6622 /* abort block because static cpu state changed */
6623 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
6626 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
6627 case 0x1c3: /* MOVNTI reg, mem */
6628 if (!(s
->cpuid_features
& CPUID_SSE2
))
6630 ot
= mo_64_32(dflag
);
6631 modrm
= x86_ldub_code(env
, s
);
6632 mod
= (modrm
>> 6) & 3;
6635 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6636 /* generate a generic store */
6637 gen_ldst_modrm(env
, s
, modrm
, ot
, reg
, 1);
6640 modrm
= x86_ldub_code(env
, s
);
6642 CASE_MODRM_MEM_OP(0): /* fxsave */
6643 if (!(s
->cpuid_features
& CPUID_FXSR
)
6644 || (prefixes
& PREFIX_LOCK
)) {
6647 if ((s
->flags
& HF_EM_MASK
) || (s
->flags
& HF_TS_MASK
)) {
6648 gen_exception(s
, EXCP07_PREX
);
6651 gen_lea_modrm(env
, s
, modrm
);
6652 gen_helper_fxsave(cpu_env
, s
->A0
);
6655 CASE_MODRM_MEM_OP(1): /* fxrstor */
6656 if (!(s
->cpuid_features
& CPUID_FXSR
)
6657 || (prefixes
& PREFIX_LOCK
)) {
6660 if ((s
->flags
& HF_EM_MASK
) || (s
->flags
& HF_TS_MASK
)) {
6661 gen_exception(s
, EXCP07_PREX
);
6664 gen_lea_modrm(env
, s
, modrm
);
6665 gen_helper_fxrstor(cpu_env
, s
->A0
);
6668 CASE_MODRM_MEM_OP(2): /* ldmxcsr */
6669 if ((s
->flags
& HF_EM_MASK
) || !(s
->flags
& HF_OSFXSR_MASK
)) {
6672 if (s
->flags
& HF_TS_MASK
) {
6673 gen_exception(s
, EXCP07_PREX
);
6676 gen_lea_modrm(env
, s
, modrm
);
6677 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
, s
->mem_index
, MO_LEUL
);
6678 gen_helper_ldmxcsr(cpu_env
, s
->tmp2_i32
);
6681 CASE_MODRM_MEM_OP(3): /* stmxcsr */
6682 if ((s
->flags
& HF_EM_MASK
) || !(s
->flags
& HF_OSFXSR_MASK
)) {
6685 if (s
->flags
& HF_TS_MASK
) {
6686 gen_exception(s
, EXCP07_PREX
);
6689 gen_helper_update_mxcsr(cpu_env
);
6690 gen_lea_modrm(env
, s
, modrm
);
6691 tcg_gen_ld32u_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
, mxcsr
));
6692 gen_op_st_v(s
, MO_32
, s
->T0
, s
->A0
);
6695 CASE_MODRM_MEM_OP(4): /* xsave */
6696 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
6697 || (prefixes
& (PREFIX_LOCK
| PREFIX_DATA
6698 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
6701 gen_lea_modrm(env
, s
, modrm
);
6702 tcg_gen_concat_tl_i64(s
->tmp1_i64
, cpu_regs
[R_EAX
],
6704 gen_helper_xsave(cpu_env
, s
->A0
, s
->tmp1_i64
);
6707 CASE_MODRM_MEM_OP(5): /* xrstor */
6708 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
6709 || (prefixes
& (PREFIX_LOCK
| PREFIX_DATA
6710 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
6713 gen_lea_modrm(env
, s
, modrm
);
6714 tcg_gen_concat_tl_i64(s
->tmp1_i64
, cpu_regs
[R_EAX
],
6716 gen_helper_xrstor(cpu_env
, s
->A0
, s
->tmp1_i64
);
6717 /* XRSTOR is how MPX is enabled, which changes how
6718 we translate. Thus we need to end the TB. */
6719 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
6722 CASE_MODRM_MEM_OP(6): /* xsaveopt / clwb */
6723 if (prefixes
& PREFIX_LOCK
) {
6726 if (prefixes
& PREFIX_DATA
) {
6728 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_CLWB
)) {
6731 gen_nop_modrm(env
, s
, modrm
);
6734 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
6735 || (s
->cpuid_xsave_features
& CPUID_XSAVE_XSAVEOPT
) == 0
6736 || (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
))) {
6739 gen_lea_modrm(env
, s
, modrm
);
6740 tcg_gen_concat_tl_i64(s
->tmp1_i64
, cpu_regs
[R_EAX
],
6742 gen_helper_xsaveopt(cpu_env
, s
->A0
, s
->tmp1_i64
);
6746 CASE_MODRM_MEM_OP(7): /* clflush / clflushopt */
6747 if (prefixes
& PREFIX_LOCK
) {
6750 if (prefixes
& PREFIX_DATA
) {
6752 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_CLFLUSHOPT
)) {
6757 if ((s
->prefix
& (PREFIX_REPZ
| PREFIX_REPNZ
))
6758 || !(s
->cpuid_features
& CPUID_CLFLUSH
)) {
6762 gen_nop_modrm(env
, s
, modrm
);
6765 case 0xc0 ... 0xc7: /* rdfsbase (f3 0f ae /0) */
6766 case 0xc8 ... 0xcf: /* rdgsbase (f3 0f ae /1) */
6767 case 0xd0 ... 0xd7: /* wrfsbase (f3 0f ae /2) */
6768 case 0xd8 ... 0xdf: /* wrgsbase (f3 0f ae /3) */
6770 && (prefixes
& PREFIX_REPZ
)
6771 && !(prefixes
& PREFIX_LOCK
)
6772 && (s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_FSGSBASE
)) {
6773 TCGv base
, treg
, src
, dst
;
6775 /* Preserve hflags bits by testing CR4 at runtime. */
6776 tcg_gen_movi_i32(s
->tmp2_i32
, CR4_FSGSBASE_MASK
);
6777 gen_helper_cr4_testbit(cpu_env
, s
->tmp2_i32
);
6779 base
= cpu_seg_base
[modrm
& 8 ? R_GS
: R_FS
];
6780 treg
= cpu_regs
[(modrm
& 7) | REX_B(s
)];
6784 dst
= base
, src
= treg
;
6787 dst
= treg
, src
= base
;
6790 if (s
->dflag
== MO_32
) {
6791 tcg_gen_ext32u_tl(dst
, src
);
6793 tcg_gen_mov_tl(dst
, src
);
6799 case 0xf8: /* sfence / pcommit */
6800 if (prefixes
& PREFIX_DATA
) {
6802 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_PCOMMIT
)
6803 || (prefixes
& PREFIX_LOCK
)) {
6809 case 0xf9 ... 0xff: /* sfence */
6810 if (!(s
->cpuid_features
& CPUID_SSE
)
6811 || (prefixes
& PREFIX_LOCK
)) {
6814 tcg_gen_mb(TCG_MO_ST_ST
| TCG_BAR_SC
);
6816 case 0xe8 ... 0xef: /* lfence */
6817 if (!(s
->cpuid_features
& CPUID_SSE
)
6818 || (prefixes
& PREFIX_LOCK
)) {
6821 tcg_gen_mb(TCG_MO_LD_LD
| TCG_BAR_SC
);
6823 case 0xf0 ... 0xf7: /* mfence */
6824 if (!(s
->cpuid_features
& CPUID_SSE2
)
6825 || (prefixes
& PREFIX_LOCK
)) {
6828 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
6836 case 0x10d: /* 3DNow! prefetch(w) */
6837 modrm
= x86_ldub_code(env
, s
);
6838 mod
= (modrm
>> 6) & 3;
6841 gen_nop_modrm(env
, s
, modrm
);
6843 case 0x1aa: /* rsm */
6844 gen_svm_check_intercept(s
, SVM_EXIT_RSM
);
6845 if (!(s
->flags
& HF_SMM_MASK
))
6847 #ifdef CONFIG_USER_ONLY
6848 /* we should not be in SMM mode */
6849 g_assert_not_reached();
6851 gen_update_cc_op(s
);
6852 gen_update_eip_next(s
);
6853 gen_helper_rsm(cpu_env
);
6854 #endif /* CONFIG_USER_ONLY */
6855 s
->base
.is_jmp
= DISAS_EOB_ONLY
;
6857 case 0x1b8: /* SSE4.2 popcnt */
6858 if ((prefixes
& (PREFIX_REPZ
| PREFIX_LOCK
| PREFIX_REPNZ
)) !=
6861 if (!(s
->cpuid_ext_features
& CPUID_EXT_POPCNT
))
6864 modrm
= x86_ldub_code(env
, s
);
6865 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6867 if (s
->prefix
& PREFIX_DATA
) {
6870 ot
= mo_64_32(dflag
);
6873 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
6874 gen_extu(ot
, s
->T0
);
6875 tcg_gen_mov_tl(cpu_cc_src
, s
->T0
);
6876 tcg_gen_ctpop_tl(s
->T0
, s
->T0
);
6877 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
6879 set_cc_op(s
, CC_OP_POPCNT
);
6881 case 0x10e ... 0x117:
6882 case 0x128 ... 0x12f:
6883 case 0x138 ... 0x13a:
6884 case 0x150 ... 0x179:
6885 case 0x17c ... 0x17f:
6887 case 0x1c4 ... 0x1c6:
6888 case 0x1d0 ... 0x1fe:
6889 disas_insn_new(s
, cpu
, b
);
6896 gen_illegal_opcode(s
);
6899 gen_unknown_opcode(env
, s
);
6903 void tcg_x86_init(void)
6905 static const char reg_names
[CPU_NB_REGS
][4] = {
6906 #ifdef TARGET_X86_64
6934 static const char eip_name
[] = {
6935 #ifdef TARGET_X86_64
6941 static const char seg_base_names
[6][8] = {
6949 static const char bnd_regl_names
[4][8] = {
6950 "bnd0_lb", "bnd1_lb", "bnd2_lb", "bnd3_lb"
6952 static const char bnd_regu_names
[4][8] = {
6953 "bnd0_ub", "bnd1_ub", "bnd2_ub", "bnd3_ub"
6957 cpu_cc_op
= tcg_global_mem_new_i32(cpu_env
,
6958 offsetof(CPUX86State
, cc_op
), "cc_op");
6959 cpu_cc_dst
= tcg_global_mem_new(cpu_env
, offsetof(CPUX86State
, cc_dst
),
6961 cpu_cc_src
= tcg_global_mem_new(cpu_env
, offsetof(CPUX86State
, cc_src
),
6963 cpu_cc_src2
= tcg_global_mem_new(cpu_env
, offsetof(CPUX86State
, cc_src2
),
6965 cpu_eip
= tcg_global_mem_new(cpu_env
, offsetof(CPUX86State
, eip
), eip_name
);
6967 for (i
= 0; i
< CPU_NB_REGS
; ++i
) {
6968 cpu_regs
[i
] = tcg_global_mem_new(cpu_env
,
6969 offsetof(CPUX86State
, regs
[i
]),
6973 for (i
= 0; i
< 6; ++i
) {
6975 = tcg_global_mem_new(cpu_env
,
6976 offsetof(CPUX86State
, segs
[i
].base
),
6980 for (i
= 0; i
< 4; ++i
) {
6982 = tcg_global_mem_new_i64(cpu_env
,
6983 offsetof(CPUX86State
, bnd_regs
[i
].lb
),
6986 = tcg_global_mem_new_i64(cpu_env
,
6987 offsetof(CPUX86State
, bnd_regs
[i
].ub
),
6992 static void i386_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cpu
)
6994 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6995 CPUX86State
*env
= cpu
->env_ptr
;
6996 uint32_t flags
= dc
->base
.tb
->flags
;
6997 uint32_t cflags
= tb_cflags(dc
->base
.tb
);
6998 int cpl
= (flags
>> HF_CPL_SHIFT
) & 3;
6999 int iopl
= (flags
>> IOPL_SHIFT
) & 3;
7001 dc
->cs_base
= dc
->base
.tb
->cs_base
;
7002 dc
->pc_save
= dc
->base
.pc_next
;
7004 #ifndef CONFIG_USER_ONLY
7009 /* We make some simplifying assumptions; validate they're correct. */
7010 g_assert(PE(dc
) == ((flags
& HF_PE_MASK
) != 0));
7011 g_assert(CPL(dc
) == cpl
);
7012 g_assert(IOPL(dc
) == iopl
);
7013 g_assert(VM86(dc
) == ((flags
& HF_VM_MASK
) != 0));
7014 g_assert(CODE32(dc
) == ((flags
& HF_CS32_MASK
) != 0));
7015 g_assert(CODE64(dc
) == ((flags
& HF_CS64_MASK
) != 0));
7016 g_assert(SS32(dc
) == ((flags
& HF_SS32_MASK
) != 0));
7017 g_assert(LMA(dc
) == ((flags
& HF_LMA_MASK
) != 0));
7018 g_assert(ADDSEG(dc
) == ((flags
& HF_ADDSEG_MASK
) != 0));
7019 g_assert(SVME(dc
) == ((flags
& HF_SVME_MASK
) != 0));
7020 g_assert(GUEST(dc
) == ((flags
& HF_GUEST_MASK
) != 0));
7022 dc
->cc_op
= CC_OP_DYNAMIC
;
7023 dc
->cc_op_dirty
= false;
7024 dc
->popl_esp_hack
= 0;
7025 /* select memory access functions */
7027 #ifdef CONFIG_SOFTMMU
7028 dc
->mem_index
= cpu_mmu_index(env
, false);
7030 dc
->cpuid_features
= env
->features
[FEAT_1_EDX
];
7031 dc
->cpuid_ext_features
= env
->features
[FEAT_1_ECX
];
7032 dc
->cpuid_ext2_features
= env
->features
[FEAT_8000_0001_EDX
];
7033 dc
->cpuid_ext3_features
= env
->features
[FEAT_8000_0001_ECX
];
7034 dc
->cpuid_7_0_ebx_features
= env
->features
[FEAT_7_0_EBX
];
7035 dc
->cpuid_7_0_ecx_features
= env
->features
[FEAT_7_0_ECX
];
7036 dc
->cpuid_xsave_features
= env
->features
[FEAT_XSAVE
];
7037 dc
->jmp_opt
= !((cflags
& CF_NO_GOTO_TB
) ||
7038 (flags
& (HF_TF_MASK
| HF_INHIBIT_IRQ_MASK
)));
7040 * If jmp_opt, we want to handle each string instruction individually.
7041 * For icount also disable repz optimization so that each iteration
7042 * is accounted separately.
7044 dc
->repz_opt
= !dc
->jmp_opt
&& !(cflags
& CF_USE_ICOUNT
);
7046 dc
->T0
= tcg_temp_new();
7047 dc
->T1
= tcg_temp_new();
7048 dc
->A0
= tcg_temp_new();
7050 dc
->tmp0
= tcg_temp_new();
7051 dc
->tmp1_i64
= tcg_temp_new_i64();
7052 dc
->tmp2_i32
= tcg_temp_new_i32();
7053 dc
->tmp3_i32
= tcg_temp_new_i32();
7054 dc
->tmp4
= tcg_temp_new();
7055 dc
->cc_srcT
= tcg_temp_local_new();
7058 static void i386_tr_tb_start(DisasContextBase
*db
, CPUState
*cpu
)
7062 static void i386_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cpu
)
7064 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
7065 target_ulong pc_arg
= dc
->base
.pc_next
;
7067 dc
->prev_insn_end
= tcg_last_op();
7068 if (tb_cflags(dcbase
->tb
) & CF_PCREL
) {
7069 pc_arg
-= dc
->cs_base
;
7070 pc_arg
&= ~TARGET_PAGE_MASK
;
7072 tcg_gen_insn_start(pc_arg
, dc
->cc_op
);
7075 static void i386_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cpu
)
7077 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
7079 #ifdef TARGET_VSYSCALL_PAGE
7081 * Detect entry into the vsyscall page and invoke the syscall.
7083 if ((dc
->base
.pc_next
& TARGET_PAGE_MASK
) == TARGET_VSYSCALL_PAGE
) {
7084 gen_exception(dc
, EXCP_VSYSCALL
);
7085 dc
->base
.pc_next
= dc
->pc
+ 1;
7090 if (disas_insn(dc
, cpu
)) {
7091 target_ulong pc_next
= dc
->pc
;
7092 dc
->base
.pc_next
= pc_next
;
7094 if (dc
->base
.is_jmp
== DISAS_NEXT
) {
7095 if (dc
->flags
& (HF_TF_MASK
| HF_INHIBIT_IRQ_MASK
)) {
7097 * If single step mode, we generate only one instruction and
7098 * generate an exception.
7099 * If irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
7100 * the flag and abort the translation to give the irqs a
7103 dc
->base
.is_jmp
= DISAS_EOB_NEXT
;
7104 } else if (!is_same_page(&dc
->base
, pc_next
)) {
7105 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
7111 static void i386_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cpu
)
7113 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
7115 switch (dc
->base
.is_jmp
) {
7116 case DISAS_NORETURN
:
7118 case DISAS_TOO_MANY
:
7119 gen_update_cc_op(dc
);
7120 gen_jmp_rel_csize(dc
, 0, 0);
7122 case DISAS_EOB_NEXT
:
7123 gen_update_cc_op(dc
);
7124 gen_update_eip_cur(dc
);
7126 case DISAS_EOB_ONLY
:
7129 case DISAS_EOB_INHIBIT_IRQ
:
7130 gen_update_cc_op(dc
);
7131 gen_update_eip_cur(dc
);
7132 gen_eob_inhibit_irq(dc
, true);
7138 g_assert_not_reached();
7142 static void i386_tr_disas_log(const DisasContextBase
*dcbase
,
7143 CPUState
*cpu
, FILE *logfile
)
7145 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
7147 fprintf(logfile
, "IN: %s\n", lookup_symbol(dc
->base
.pc_first
));
7148 target_disas(logfile
, cpu
, dc
->base
.pc_first
, dc
->base
.tb
->size
);
7151 static const TranslatorOps i386_tr_ops
= {
7152 .init_disas_context
= i386_tr_init_disas_context
,
7153 .tb_start
= i386_tr_tb_start
,
7154 .insn_start
= i386_tr_insn_start
,
7155 .translate_insn
= i386_tr_translate_insn
,
7156 .tb_stop
= i386_tr_tb_stop
,
7157 .disas_log
= i386_tr_disas_log
,
7160 /* generate intermediate code for basic block 'tb'. */
7161 void gen_intermediate_code(CPUState
*cpu
, TranslationBlock
*tb
, int *max_insns
,
7162 target_ulong pc
, void *host_pc
)
7166 translator_loop(cpu
, tb
, max_insns
, pc
, host_pc
, &i386_tr_ops
, &dc
.base
);