]> git.proxmox.com Git - mirror_qemu.git/blame - target-i386/translate.c
device_tree: introduce load_device_tree_from_sysfs
[mirror_qemu.git] / target-i386 / translate.c
CommitLineData
2c0262af
FB
1/*
2 * i386 translation
5fafdf24 3 *
2c0262af
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af 18 */
b6a0aa05 19#include "qemu/osdep.h"
2c0262af 20
bec93d72 21#include "qemu/host-utils.h"
2c0262af 22#include "cpu.h"
76cad711 23#include "disas/disas.h"
57fec1fe 24#include "tcg-op.h"
f08b6170 25#include "exec/cpu_ldst.h"
2c0262af 26
2ef6175a
RH
27#include "exec/helper-proto.h"
28#include "exec/helper-gen.h"
a7812ae4 29
a7e30d84 30#include "trace-tcg.h"
508127e2 31#include "exec/log.h"
a7e30d84
LV
32
33
2c0262af
FB
34#define PREFIX_REPZ 0x01
35#define PREFIX_REPNZ 0x02
36#define PREFIX_LOCK 0x04
37#define PREFIX_DATA 0x08
38#define PREFIX_ADR 0x10
701ed211 39#define PREFIX_VEX 0x20
2c0262af 40
14ce26e7 41#ifdef TARGET_X86_64
14ce26e7
FB
42#define CODE64(s) ((s)->code64)
43#define REX_X(s) ((s)->rex_x)
44#define REX_B(s) ((s)->rex_b)
14ce26e7 45#else
14ce26e7
FB
46#define CODE64(s) 0
47#define REX_X(s) 0
48#define REX_B(s) 0
49#endif
50
bec93d72
RH
51#ifdef TARGET_X86_64
52# define ctztl ctz64
53# define clztl clz64
54#else
55# define ctztl ctz32
56# define clztl clz32
57#endif
58
1906b2af
RH
59/* For a switch indexed by MODRM, match all memory operands for a given OP. */
60#define CASE_MEM_OP(OP) \
61 case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
62 case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
63 case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7
64
57fec1fe
FB
65//#define MACRO_TEST 1
66
57fec1fe 67/* global register indexes */
a7812ae4 68static TCGv_ptr cpu_env;
a3251186 69static TCGv cpu_A0;
988c3eb0 70static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2, cpu_cc_srcT;
a7812ae4 71static TCGv_i32 cpu_cc_op;
cc739bb0 72static TCGv cpu_regs[CPU_NB_REGS];
3558f805 73static TCGv cpu_seg_base[6];
149b427b
RH
74static TCGv_i64 cpu_bndl[4];
75static TCGv_i64 cpu_bndu[4];
1e4840bf 76/* local temps */
1d1cc4d0 77static TCGv cpu_T0, cpu_T1;
57fec1fe 78/* local register indexes (only used inside old micro ops) */
a7812ae4
PB
79static TCGv cpu_tmp0, cpu_tmp4;
80static TCGv_ptr cpu_ptr0, cpu_ptr1;
81static TCGv_i32 cpu_tmp2_i32, cpu_tmp3_i32;
82static TCGv_i64 cpu_tmp1_i64;
57fec1fe 83
022c62cb 84#include "exec/gen-icount.h"
2e70f6ef 85
57fec1fe
FB
86#ifdef TARGET_X86_64
87static int x86_64_hregs;
ae063a68
FB
88#endif
89
2c0262af
FB
90typedef struct DisasContext {
91 /* current insn context */
92 int override; /* -1 if no override */
93 int prefix;
1d71ddb1 94 TCGMemOp aflag;
ab4e4aec 95 TCGMemOp dflag;
14ce26e7 96 target_ulong pc; /* pc = eip + cs_base */
2c0262af
FB
97 int is_jmp; /* 1 = means jump (stop translation), 2 means CPU
98 static state change (stop translation) */
99 /* current block context */
14ce26e7 100 target_ulong cs_base; /* base of CS segment */
2c0262af
FB
101 int pe; /* protected mode */
102 int code32; /* 32 bit code segment */
14ce26e7
FB
103#ifdef TARGET_X86_64
104 int lma; /* long mode active */
105 int code64; /* 64 bit code segment */
106 int rex_x, rex_b;
107#endif
701ed211
RH
108 int vex_l; /* vex vector length */
109 int vex_v; /* vex vvvv register, without 1's compliment. */
2c0262af 110 int ss32; /* 32 bit stack segment */
fee71888 111 CCOp cc_op; /* current CC operation */
e207582f 112 bool cc_op_dirty;
2c0262af
FB
113 int addseg; /* non zero if either DS/ES/SS have a non zero base */
114 int f_st; /* currently unused */
115 int vm86; /* vm86 mode */
116 int cpl;
117 int iopl;
118 int tf; /* TF cpu flag */
34865134 119 int singlestep_enabled; /* "hardware" single step enabled */
2c0262af 120 int jmp_opt; /* use direct block chaining for direct jumps */
c4d4525c 121 int repz_opt; /* optimize jumps within repz instructions */
2c0262af 122 int mem_index; /* select memory access functions */
c068688b 123 uint64_t flags; /* all execution flags */
2c0262af
FB
124 struct TranslationBlock *tb;
125 int popl_esp_hack; /* for correct popl with esp base handling */
14ce26e7
FB
126 int rip_offset; /* only used in x86_64, but left for simplicity */
127 int cpuid_features;
3d7374c5 128 int cpuid_ext_features;
e771edab 129 int cpuid_ext2_features;
12e26b75 130 int cpuid_ext3_features;
a9321a4d 131 int cpuid_7_0_ebx_features;
c9cfe8f9 132 int cpuid_xsave_features;
2c0262af
FB
133} DisasContext;
134
135static void gen_eob(DisasContext *s);
14ce26e7
FB
136static void gen_jmp(DisasContext *s, target_ulong eip);
137static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num);
d67dc9e6 138static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d);
2c0262af
FB
139
140/* i386 arith/logic operations */
141enum {
5fafdf24
TS
142 OP_ADDL,
143 OP_ORL,
144 OP_ADCL,
2c0262af 145 OP_SBBL,
5fafdf24
TS
146 OP_ANDL,
147 OP_SUBL,
148 OP_XORL,
2c0262af
FB
149 OP_CMPL,
150};
151
152/* i386 shift ops */
153enum {
5fafdf24
TS
154 OP_ROL,
155 OP_ROR,
156 OP_RCL,
157 OP_RCR,
158 OP_SHL,
159 OP_SHR,
2c0262af
FB
160 OP_SHL1, /* undocumented */
161 OP_SAR = 7,
162};
163
8e1c85e3
FB
164enum {
165 JCC_O,
166 JCC_B,
167 JCC_Z,
168 JCC_BE,
169 JCC_S,
170 JCC_P,
171 JCC_L,
172 JCC_LE,
173};
174
2c0262af
FB
175enum {
176 /* I386 int registers */
177 OR_EAX, /* MUST be even numbered */
178 OR_ECX,
179 OR_EDX,
180 OR_EBX,
181 OR_ESP,
182 OR_EBP,
183 OR_ESI,
184 OR_EDI,
14ce26e7
FB
185
186 OR_TMP0 = 16, /* temporary operand register */
2c0262af
FB
187 OR_TMP1,
188 OR_A0, /* temporary register used when doing address evaluation */
2c0262af
FB
189};
190
b666265b 191enum {
a3251186
RH
192 USES_CC_DST = 1,
193 USES_CC_SRC = 2,
988c3eb0
RH
194 USES_CC_SRC2 = 4,
195 USES_CC_SRCT = 8,
b666265b
RH
196};
197
198/* Bit set if the global variable is live after setting CC_OP to X. */
199static const uint8_t cc_op_live[CC_OP_NB] = {
988c3eb0 200 [CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
b666265b
RH
201 [CC_OP_EFLAGS] = USES_CC_SRC,
202 [CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC,
203 [CC_OP_ADDB ... CC_OP_ADDQ] = USES_CC_DST | USES_CC_SRC,
988c3eb0 204 [CC_OP_ADCB ... CC_OP_ADCQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
a3251186 205 [CC_OP_SUBB ... CC_OP_SUBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT,
988c3eb0 206 [CC_OP_SBBB ... CC_OP_SBBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
b666265b
RH
207 [CC_OP_LOGICB ... CC_OP_LOGICQ] = USES_CC_DST,
208 [CC_OP_INCB ... CC_OP_INCQ] = USES_CC_DST | USES_CC_SRC,
209 [CC_OP_DECB ... CC_OP_DECQ] = USES_CC_DST | USES_CC_SRC,
210 [CC_OP_SHLB ... CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC,
211 [CC_OP_SARB ... CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC,
bc4b43dc 212 [CC_OP_BMILGB ... CC_OP_BMILGQ] = USES_CC_DST | USES_CC_SRC,
cd7f97ca
RH
213 [CC_OP_ADCX] = USES_CC_DST | USES_CC_SRC,
214 [CC_OP_ADOX] = USES_CC_SRC | USES_CC_SRC2,
215 [CC_OP_ADCOX] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
436ff2d2 216 [CC_OP_CLR] = 0,
b666265b
RH
217};
218
e207582f 219static void set_cc_op(DisasContext *s, CCOp op)
3ca51d07 220{
b666265b
RH
221 int dead;
222
223 if (s->cc_op == op) {
224 return;
225 }
226
227 /* Discard CC computation that will no longer be used. */
228 dead = cc_op_live[s->cc_op] & ~cc_op_live[op];
229 if (dead & USES_CC_DST) {
230 tcg_gen_discard_tl(cpu_cc_dst);
e207582f 231 }
b666265b
RH
232 if (dead & USES_CC_SRC) {
233 tcg_gen_discard_tl(cpu_cc_src);
234 }
988c3eb0
RH
235 if (dead & USES_CC_SRC2) {
236 tcg_gen_discard_tl(cpu_cc_src2);
237 }
a3251186
RH
238 if (dead & USES_CC_SRCT) {
239 tcg_gen_discard_tl(cpu_cc_srcT);
240 }
b666265b 241
e2f515cf
RH
242 if (op == CC_OP_DYNAMIC) {
243 /* The DYNAMIC setting is translator only, and should never be
244 stored. Thus we always consider it clean. */
245 s->cc_op_dirty = false;
246 } else {
247 /* Discard any computed CC_OP value (see shifts). */
248 if (s->cc_op == CC_OP_DYNAMIC) {
249 tcg_gen_discard_i32(cpu_cc_op);
250 }
251 s->cc_op_dirty = true;
252 }
b666265b 253 s->cc_op = op;
e207582f
RH
254}
255
e207582f
RH
256static void gen_update_cc_op(DisasContext *s)
257{
258 if (s->cc_op_dirty) {
773cdfcc 259 tcg_gen_movi_i32(cpu_cc_op, s->cc_op);
e207582f
RH
260 s->cc_op_dirty = false;
261 }
3ca51d07
RH
262}
263
14ce26e7
FB
264#ifdef TARGET_X86_64
265
266#define NB_OP_SIZES 4
267
14ce26e7
FB
268#else /* !TARGET_X86_64 */
269
270#define NB_OP_SIZES 3
271
14ce26e7
FB
272#endif /* !TARGET_X86_64 */
273
e2542fe2 274#if defined(HOST_WORDS_BIGENDIAN)
57fec1fe
FB
275#define REG_B_OFFSET (sizeof(target_ulong) - 1)
276#define REG_H_OFFSET (sizeof(target_ulong) - 2)
277#define REG_W_OFFSET (sizeof(target_ulong) - 2)
278#define REG_L_OFFSET (sizeof(target_ulong) - 4)
279#define REG_LH_OFFSET (sizeof(target_ulong) - 8)
14ce26e7 280#else
57fec1fe
FB
281#define REG_B_OFFSET 0
282#define REG_H_OFFSET 1
283#define REG_W_OFFSET 0
284#define REG_L_OFFSET 0
285#define REG_LH_OFFSET 4
14ce26e7 286#endif
57fec1fe 287
96d7073f
PM
288/* In instruction encodings for byte register accesses the
289 * register number usually indicates "low 8 bits of register N";
290 * however there are some special cases where N 4..7 indicates
291 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
292 * true for this special case, false otherwise.
293 */
294static inline bool byte_reg_is_xH(int reg)
295{
296 if (reg < 4) {
297 return false;
298 }
299#ifdef TARGET_X86_64
300 if (reg >= 8 || x86_64_hregs) {
301 return false;
302 }
303#endif
304 return true;
305}
306
ab4e4aec
RH
307/* Select the size of a push/pop operation. */
308static inline TCGMemOp mo_pushpop(DisasContext *s, TCGMemOp ot)
309{
310 if (CODE64(s)) {
311 return ot == MO_16 ? MO_16 : MO_64;
312 } else {
313 return ot;
314 }
315}
316
64ae256c
RH
317/* Select the size of the stack pointer. */
318static inline TCGMemOp mo_stacksize(DisasContext *s)
319{
320 return CODE64(s) ? MO_64 : s->ss32 ? MO_32 : MO_16;
321}
322
ab4e4aec
RH
323/* Select only size 64 else 32. Used for SSE operand sizes. */
324static inline TCGMemOp mo_64_32(TCGMemOp ot)
325{
326#ifdef TARGET_X86_64
327 return ot == MO_64 ? MO_64 : MO_32;
328#else
329 return MO_32;
330#endif
331}
332
333/* Select size 8 if lsb of B is clear, else OT. Used for decoding
334 byte vs word opcodes. */
335static inline TCGMemOp mo_b_d(int b, TCGMemOp ot)
336{
337 return b & 1 ? ot : MO_8;
338}
339
340/* Select size 8 if lsb of B is clear, else OT capped at 32.
341 Used for decoding operand size of port opcodes. */
342static inline TCGMemOp mo_b_d32(int b, TCGMemOp ot)
343{
344 return b & 1 ? (ot == MO_16 ? MO_16 : MO_32) : MO_8;
345}
346
d67dc9e6 347static void gen_op_mov_reg_v(TCGMemOp ot, int reg, TCGv t0)
57fec1fe
FB
348{
349 switch(ot) {
4ba9938c 350 case MO_8:
96d7073f 351 if (!byte_reg_is_xH(reg)) {
c832e3de 352 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 8);
57fec1fe 353 } else {
c832e3de 354 tcg_gen_deposit_tl(cpu_regs[reg - 4], cpu_regs[reg - 4], t0, 8, 8);
57fec1fe
FB
355 }
356 break;
4ba9938c 357 case MO_16:
c832e3de 358 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 16);
57fec1fe 359 break;
4ba9938c 360 case MO_32:
cc739bb0
LD
361 /* For x86_64, this sets the higher half of register to zero.
362 For i386, this is equivalent to a mov. */
363 tcg_gen_ext32u_tl(cpu_regs[reg], t0);
57fec1fe 364 break;
cc739bb0 365#ifdef TARGET_X86_64
4ba9938c 366 case MO_64:
cc739bb0 367 tcg_gen_mov_tl(cpu_regs[reg], t0);
57fec1fe 368 break;
14ce26e7 369#endif
d67dc9e6
RH
370 default:
371 tcg_abort();
57fec1fe
FB
372 }
373}
2c0262af 374
d67dc9e6 375static inline void gen_op_mov_v_reg(TCGMemOp ot, TCGv t0, int reg)
57fec1fe 376{
4ba9938c 377 if (ot == MO_8 && byte_reg_is_xH(reg)) {
96d7073f
PM
378 tcg_gen_shri_tl(t0, cpu_regs[reg - 4], 8);
379 tcg_gen_ext8u_tl(t0, t0);
380 } else {
cc739bb0 381 tcg_gen_mov_tl(t0, cpu_regs[reg]);
57fec1fe
FB
382 }
383}
384
57fec1fe
FB
385static void gen_add_A0_im(DisasContext *s, int val)
386{
4e85057b
RH
387 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
388 if (!CODE64(s)) {
389 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
390 }
57fec1fe 391}
2c0262af 392
74bdfbda 393static inline void gen_op_jmp_v(TCGv dest)
57fec1fe 394{
74bdfbda 395 tcg_gen_st_tl(dest, cpu_env, offsetof(CPUX86State, eip));
57fec1fe
FB
396}
397
d3f4bbe3 398static inline void gen_op_add_reg_im(TCGMemOp size, int reg, int32_t val)
57fec1fe 399{
d3f4bbe3
RH
400 tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val);
401 gen_op_mov_reg_v(size, reg, cpu_tmp0);
57fec1fe
FB
402}
403
d3f4bbe3 404static inline void gen_op_add_reg_T0(TCGMemOp size, int reg)
57fec1fe 405{
1d1cc4d0 406 tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T0);
d3f4bbe3 407 gen_op_mov_reg_v(size, reg, cpu_tmp0);
6e0d8677 408}
57fec1fe 409
323d1876 410static inline void gen_op_ld_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
57fec1fe 411{
3c5f4116 412 tcg_gen_qemu_ld_tl(t0, a0, s->mem_index, idx | MO_LE);
57fec1fe 413}
2c0262af 414
323d1876 415static inline void gen_op_st_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
57fec1fe 416{
3523e4bd 417 tcg_gen_qemu_st_tl(t0, a0, s->mem_index, idx | MO_LE);
57fec1fe 418}
4f31916f 419
d4faa3e0
RH
420static inline void gen_op_st_rm_T0_A0(DisasContext *s, int idx, int d)
421{
422 if (d == OR_TMP0) {
1d1cc4d0 423 gen_op_st_v(s, idx, cpu_T0, cpu_A0);
d4faa3e0 424 } else {
1d1cc4d0 425 gen_op_mov_reg_v(idx, d, cpu_T0);
d4faa3e0
RH
426 }
427}
428
14ce26e7
FB
429static inline void gen_jmp_im(target_ulong pc)
430{
57fec1fe 431 tcg_gen_movi_tl(cpu_tmp0, pc);
74bdfbda 432 gen_op_jmp_v(cpu_tmp0);
14ce26e7
FB
433}
434
ca2f29f5
RH
435/* Compute SEG:REG into A0. SEG is selected from the override segment
436 (OVR_SEG) and the default segment (DEF_SEG). OVR_SEG may be -1 to
437 indicate no override. */
77ebcad0
RH
438static void gen_lea_v_seg(DisasContext *s, TCGMemOp aflag, TCGv a0,
439 int def_seg, int ovr_seg)
2c0262af 440{
ca2f29f5 441 switch (aflag) {
14ce26e7 442#ifdef TARGET_X86_64
1d71ddb1 443 case MO_64:
ca2f29f5
RH
444 if (ovr_seg < 0) {
445 tcg_gen_mov_tl(cpu_A0, a0);
446 return;
14ce26e7 447 }
1d71ddb1 448 break;
14ce26e7 449#endif
1d71ddb1 450 case MO_32:
2c0262af 451 /* 32 bit address */
ca2f29f5
RH
452 if (ovr_seg < 0) {
453 if (s->addseg) {
454 ovr_seg = def_seg;
455 } else {
456 tcg_gen_ext32u_tl(cpu_A0, a0);
457 return;
458 }
2c0262af 459 }
1d71ddb1
RH
460 break;
461 case MO_16:
ca2f29f5
RH
462 /* 16 bit address */
463 if (ovr_seg < 0) {
464 ovr_seg = def_seg;
465 }
466 tcg_gen_ext16u_tl(cpu_A0, a0);
467 /* ADDSEG will only be false in 16-bit mode for LEA. */
468 if (!s->addseg) {
469 return;
470 }
471 a0 = cpu_A0;
1d71ddb1
RH
472 break;
473 default:
474 tcg_abort();
2c0262af 475 }
2c0262af 476
ca2f29f5 477 if (ovr_seg >= 0) {
3558f805 478 TCGv seg = cpu_seg_base[ovr_seg];
ca2f29f5
RH
479
480 if (aflag == MO_64) {
481 tcg_gen_add_tl(cpu_A0, a0, seg);
482 } else if (CODE64(s)) {
483 tcg_gen_ext32u_tl(cpu_A0, a0);
484 tcg_gen_add_tl(cpu_A0, cpu_A0, seg);
2c0262af 485 } else {
ca2f29f5
RH
486 tcg_gen_add_tl(cpu_A0, a0, seg);
487 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
2c0262af 488 }
2c0262af
FB
489 }
490}
491
ca2f29f5
RH
492static inline void gen_string_movl_A0_ESI(DisasContext *s)
493{
77ebcad0 494 gen_lea_v_seg(s, s->aflag, cpu_regs[R_ESI], R_DS, s->override);
ca2f29f5
RH
495}
496
497static inline void gen_string_movl_A0_EDI(DisasContext *s)
498{
77ebcad0 499 gen_lea_v_seg(s, s->aflag, cpu_regs[R_EDI], R_ES, -1);
ca2f29f5
RH
500}
501
d67dc9e6 502static inline void gen_op_movl_T0_Dshift(TCGMemOp ot)
6e0d8677 503{
1d1cc4d0
RH
504 tcg_gen_ld32s_tl(cpu_T0, cpu_env, offsetof(CPUX86State, df));
505 tcg_gen_shli_tl(cpu_T0, cpu_T0, ot);
2c0262af
FB
506};
507
d67dc9e6 508static TCGv gen_ext_tl(TCGv dst, TCGv src, TCGMemOp size, bool sign)
6e0d8677 509{
d824df34 510 switch (size) {
4ba9938c 511 case MO_8:
d824df34
PB
512 if (sign) {
513 tcg_gen_ext8s_tl(dst, src);
514 } else {
515 tcg_gen_ext8u_tl(dst, src);
516 }
517 return dst;
4ba9938c 518 case MO_16:
d824df34
PB
519 if (sign) {
520 tcg_gen_ext16s_tl(dst, src);
521 } else {
522 tcg_gen_ext16u_tl(dst, src);
523 }
524 return dst;
525#ifdef TARGET_X86_64
4ba9938c 526 case MO_32:
d824df34
PB
527 if (sign) {
528 tcg_gen_ext32s_tl(dst, src);
529 } else {
530 tcg_gen_ext32u_tl(dst, src);
531 }
532 return dst;
533#endif
6e0d8677 534 default:
d824df34 535 return src;
6e0d8677
FB
536 }
537}
3b46e624 538
d67dc9e6 539static void gen_extu(TCGMemOp ot, TCGv reg)
d824df34
PB
540{
541 gen_ext_tl(reg, reg, ot, false);
542}
543
d67dc9e6 544static void gen_exts(TCGMemOp ot, TCGv reg)
6e0d8677 545{
d824df34 546 gen_ext_tl(reg, reg, ot, true);
6e0d8677 547}
2c0262af 548
42a268c2 549static inline void gen_op_jnz_ecx(TCGMemOp size, TCGLabel *label1)
6e0d8677 550{
cc739bb0 551 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
c92aa1ad 552 gen_extu(size, cpu_tmp0);
cb63669a 553 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, label1);
6e0d8677
FB
554}
555
42a268c2 556static inline void gen_op_jz_ecx(TCGMemOp size, TCGLabel *label1)
6e0d8677 557{
cc739bb0 558 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
c92aa1ad 559 gen_extu(size, cpu_tmp0);
cb63669a 560 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
6e0d8677 561}
2c0262af 562
d67dc9e6 563static void gen_helper_in_func(TCGMemOp ot, TCGv v, TCGv_i32 n)
a7812ae4
PB
564{
565 switch (ot) {
4ba9938c 566 case MO_8:
3f7d8464 567 gen_helper_inb(v, cpu_env, n);
93ab25d7 568 break;
4ba9938c 569 case MO_16:
3f7d8464 570 gen_helper_inw(v, cpu_env, n);
93ab25d7 571 break;
4ba9938c 572 case MO_32:
3f7d8464 573 gen_helper_inl(v, cpu_env, n);
93ab25d7 574 break;
d67dc9e6
RH
575 default:
576 tcg_abort();
a7812ae4 577 }
a7812ae4 578}
2c0262af 579
d67dc9e6 580static void gen_helper_out_func(TCGMemOp ot, TCGv_i32 v, TCGv_i32 n)
a7812ae4
PB
581{
582 switch (ot) {
4ba9938c 583 case MO_8:
3f7d8464 584 gen_helper_outb(cpu_env, v, n);
93ab25d7 585 break;
4ba9938c 586 case MO_16:
3f7d8464 587 gen_helper_outw(cpu_env, v, n);
93ab25d7 588 break;
4ba9938c 589 case MO_32:
3f7d8464 590 gen_helper_outl(cpu_env, v, n);
93ab25d7 591 break;
d67dc9e6
RH
592 default:
593 tcg_abort();
a7812ae4 594 }
a7812ae4 595}
f115e911 596
d67dc9e6 597static void gen_check_io(DisasContext *s, TCGMemOp ot, target_ulong cur_eip,
b8b6a50b 598 uint32_t svm_flags)
f115e911 599{
b8b6a50b
FB
600 target_ulong next_eip;
601
f115e911 602 if (s->pe && (s->cpl > s->iopl || s->vm86)) {
1d1cc4d0 603 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
a7812ae4 604 switch (ot) {
4ba9938c 605 case MO_8:
4a7443be
BS
606 gen_helper_check_iob(cpu_env, cpu_tmp2_i32);
607 break;
4ba9938c 608 case MO_16:
4a7443be
BS
609 gen_helper_check_iow(cpu_env, cpu_tmp2_i32);
610 break;
4ba9938c 611 case MO_32:
4a7443be
BS
612 gen_helper_check_iol(cpu_env, cpu_tmp2_i32);
613 break;
d67dc9e6
RH
614 default:
615 tcg_abort();
a7812ae4 616 }
b8b6a50b 617 }
872929aa 618 if(s->flags & HF_SVMI_MASK) {
100ec099
PD
619 gen_update_cc_op(s);
620 gen_jmp_im(cur_eip);
b8b6a50b
FB
621 svm_flags |= (1 << (4 + ot));
622 next_eip = s->pc - s->cs_base;
1d1cc4d0 623 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
052e80d5
BS
624 gen_helper_svm_check_io(cpu_env, cpu_tmp2_i32,
625 tcg_const_i32(svm_flags),
a7812ae4 626 tcg_const_i32(next_eip - cur_eip));
f115e911
FB
627 }
628}
629
d67dc9e6 630static inline void gen_movs(DisasContext *s, TCGMemOp ot)
2c0262af
FB
631{
632 gen_string_movl_A0_ESI(s);
1d1cc4d0 633 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
2c0262af 634 gen_string_movl_A0_EDI(s);
1d1cc4d0 635 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
6e0d8677 636 gen_op_movl_T0_Dshift(ot);
1d71ddb1
RH
637 gen_op_add_reg_T0(s->aflag, R_ESI);
638 gen_op_add_reg_T0(s->aflag, R_EDI);
2c0262af
FB
639}
640
b6abf97d
FB
641static void gen_op_update1_cc(void)
642{
1d1cc4d0 643 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
b6abf97d
FB
644}
645
646static void gen_op_update2_cc(void)
647{
1d1cc4d0
RH
648 tcg_gen_mov_tl(cpu_cc_src, cpu_T1);
649 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
b6abf97d
FB
650}
651
988c3eb0
RH
652static void gen_op_update3_cc(TCGv reg)
653{
654 tcg_gen_mov_tl(cpu_cc_src2, reg);
1d1cc4d0
RH
655 tcg_gen_mov_tl(cpu_cc_src, cpu_T1);
656 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
988c3eb0
RH
657}
658
b6abf97d
FB
659static inline void gen_op_testl_T0_T1_cc(void)
660{
1d1cc4d0 661 tcg_gen_and_tl(cpu_cc_dst, cpu_T0, cpu_T1);
b6abf97d
FB
662}
663
664static void gen_op_update_neg_cc(void)
665{
1d1cc4d0
RH
666 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
667 tcg_gen_neg_tl(cpu_cc_src, cpu_T0);
a3251186 668 tcg_gen_movi_tl(cpu_cc_srcT, 0);
b6abf97d
FB
669}
670
d229edce
RH
671/* compute all eflags to cc_src */
672static void gen_compute_eflags(DisasContext *s)
8e1c85e3 673{
988c3eb0 674 TCGv zero, dst, src1, src2;
db9f2597
RH
675 int live, dead;
676
d229edce
RH
677 if (s->cc_op == CC_OP_EFLAGS) {
678 return;
679 }
436ff2d2 680 if (s->cc_op == CC_OP_CLR) {
d2fe51bd 681 tcg_gen_movi_tl(cpu_cc_src, CC_Z | CC_P);
436ff2d2
RH
682 set_cc_op(s, CC_OP_EFLAGS);
683 return;
684 }
db9f2597
RH
685
686 TCGV_UNUSED(zero);
687 dst = cpu_cc_dst;
688 src1 = cpu_cc_src;
988c3eb0 689 src2 = cpu_cc_src2;
db9f2597
RH
690
691 /* Take care to not read values that are not live. */
692 live = cc_op_live[s->cc_op] & ~USES_CC_SRCT;
988c3eb0 693 dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2);
db9f2597
RH
694 if (dead) {
695 zero = tcg_const_tl(0);
696 if (dead & USES_CC_DST) {
697 dst = zero;
698 }
699 if (dead & USES_CC_SRC) {
700 src1 = zero;
701 }
988c3eb0
RH
702 if (dead & USES_CC_SRC2) {
703 src2 = zero;
704 }
db9f2597
RH
705 }
706
773cdfcc 707 gen_update_cc_op(s);
988c3eb0 708 gen_helper_cc_compute_all(cpu_cc_src, dst, src1, src2, cpu_cc_op);
d229edce 709 set_cc_op(s, CC_OP_EFLAGS);
db9f2597
RH
710
711 if (dead) {
712 tcg_temp_free(zero);
713 }
8e1c85e3
FB
714}
715
bec93d72
RH
716typedef struct CCPrepare {
717 TCGCond cond;
718 TCGv reg;
719 TCGv reg2;
720 target_ulong imm;
721 target_ulong mask;
722 bool use_reg2;
723 bool no_setcond;
724} CCPrepare;
725
06847f1f 726/* compute eflags.C to reg */
bec93d72 727static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
06847f1f
RH
728{
729 TCGv t0, t1;
bec93d72 730 int size, shift;
06847f1f
RH
731
732 switch (s->cc_op) {
733 case CC_OP_SUBB ... CC_OP_SUBQ:
a3251186 734 /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
06847f1f
RH
735 size = s->cc_op - CC_OP_SUBB;
736 t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
737 /* If no temporary was used, be careful not to alias t1 and t0. */
738 t0 = TCGV_EQUAL(t1, cpu_cc_src) ? cpu_tmp0 : reg;
a3251186 739 tcg_gen_mov_tl(t0, cpu_cc_srcT);
06847f1f
RH
740 gen_extu(size, t0);
741 goto add_sub;
742
743 case CC_OP_ADDB ... CC_OP_ADDQ:
744 /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
745 size = s->cc_op - CC_OP_ADDB;
746 t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
747 t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
748 add_sub:
bec93d72
RH
749 return (CCPrepare) { .cond = TCG_COND_LTU, .reg = t0,
750 .reg2 = t1, .mask = -1, .use_reg2 = true };
06847f1f 751
06847f1f 752 case CC_OP_LOGICB ... CC_OP_LOGICQ:
436ff2d2 753 case CC_OP_CLR:
bec93d72 754 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
06847f1f
RH
755
756 case CC_OP_INCB ... CC_OP_INCQ:
757 case CC_OP_DECB ... CC_OP_DECQ:
bec93d72
RH
758 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
759 .mask = -1, .no_setcond = true };
06847f1f
RH
760
761 case CC_OP_SHLB ... CC_OP_SHLQ:
762 /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
763 size = s->cc_op - CC_OP_SHLB;
bec93d72
RH
764 shift = (8 << size) - 1;
765 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
766 .mask = (target_ulong)1 << shift };
06847f1f
RH
767
768 case CC_OP_MULB ... CC_OP_MULQ:
bec93d72
RH
769 return (CCPrepare) { .cond = TCG_COND_NE,
770 .reg = cpu_cc_src, .mask = -1 };
06847f1f 771
bc4b43dc
RH
772 case CC_OP_BMILGB ... CC_OP_BMILGQ:
773 size = s->cc_op - CC_OP_BMILGB;
774 t0 = gen_ext_tl(reg, cpu_cc_src, size, false);
775 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
776
cd7f97ca
RH
777 case CC_OP_ADCX:
778 case CC_OP_ADCOX:
779 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_dst,
780 .mask = -1, .no_setcond = true };
781
06847f1f
RH
782 case CC_OP_EFLAGS:
783 case CC_OP_SARB ... CC_OP_SARQ:
784 /* CC_SRC & 1 */
bec93d72
RH
785 return (CCPrepare) { .cond = TCG_COND_NE,
786 .reg = cpu_cc_src, .mask = CC_C };
06847f1f
RH
787
788 default:
789 /* The need to compute only C from CC_OP_DYNAMIC is important
790 in efficiently implementing e.g. INC at the start of a TB. */
791 gen_update_cc_op(s);
988c3eb0
RH
792 gen_helper_cc_compute_c(reg, cpu_cc_dst, cpu_cc_src,
793 cpu_cc_src2, cpu_cc_op);
bec93d72
RH
794 return (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
795 .mask = -1, .no_setcond = true };
06847f1f
RH
796 }
797}
798
1608ecca 799/* compute eflags.P to reg */
bec93d72 800static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGv reg)
1608ecca 801{
d229edce 802 gen_compute_eflags(s);
bec93d72
RH
803 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
804 .mask = CC_P };
1608ecca
PB
805}
806
807/* compute eflags.S to reg */
bec93d72 808static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg)
1608ecca 809{
086c4077
RH
810 switch (s->cc_op) {
811 case CC_OP_DYNAMIC:
812 gen_compute_eflags(s);
813 /* FALLTHRU */
814 case CC_OP_EFLAGS:
cd7f97ca
RH
815 case CC_OP_ADCX:
816 case CC_OP_ADOX:
817 case CC_OP_ADCOX:
bec93d72
RH
818 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
819 .mask = CC_S };
436ff2d2
RH
820 case CC_OP_CLR:
821 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
086c4077
RH
822 default:
823 {
d67dc9e6 824 TCGMemOp size = (s->cc_op - CC_OP_ADDB) & 3;
086c4077 825 TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, true);
bec93d72 826 return (CCPrepare) { .cond = TCG_COND_LT, .reg = t0, .mask = -1 };
086c4077 827 }
086c4077 828 }
1608ecca
PB
829}
830
831/* compute eflags.O to reg */
bec93d72 832static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg)
1608ecca 833{
cd7f97ca
RH
834 switch (s->cc_op) {
835 case CC_OP_ADOX:
836 case CC_OP_ADCOX:
837 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src2,
838 .mask = -1, .no_setcond = true };
436ff2d2
RH
839 case CC_OP_CLR:
840 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
cd7f97ca
RH
841 default:
842 gen_compute_eflags(s);
843 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
844 .mask = CC_O };
845 }
1608ecca
PB
846}
847
848/* compute eflags.Z to reg */
bec93d72 849static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
1608ecca 850{
086c4077
RH
851 switch (s->cc_op) {
852 case CC_OP_DYNAMIC:
853 gen_compute_eflags(s);
854 /* FALLTHRU */
855 case CC_OP_EFLAGS:
cd7f97ca
RH
856 case CC_OP_ADCX:
857 case CC_OP_ADOX:
858 case CC_OP_ADCOX:
bec93d72
RH
859 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
860 .mask = CC_Z };
436ff2d2
RH
861 case CC_OP_CLR:
862 return (CCPrepare) { .cond = TCG_COND_ALWAYS, .mask = -1 };
086c4077
RH
863 default:
864 {
d67dc9e6 865 TCGMemOp size = (s->cc_op - CC_OP_ADDB) & 3;
086c4077 866 TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
bec93d72 867 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
086c4077 868 }
bec93d72
RH
869 }
870}
871
c365395e
PB
872/* perform a conditional store into register 'reg' according to jump opcode
873 value 'b'. In the fast case, T0 is guaranted not to be used. */
276e6b5f 874static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
8e1c85e3 875{
d67dc9e6
RH
876 int inv, jcc_op, cond;
877 TCGMemOp size;
276e6b5f 878 CCPrepare cc;
c365395e
PB
879 TCGv t0;
880
881 inv = b & 1;
8e1c85e3 882 jcc_op = (b >> 1) & 7;
c365395e
PB
883
884 switch (s->cc_op) {
69d1aa31
RH
885 case CC_OP_SUBB ... CC_OP_SUBQ:
886 /* We optimize relational operators for the cmp/jcc case. */
c365395e
PB
887 size = s->cc_op - CC_OP_SUBB;
888 switch (jcc_op) {
889 case JCC_BE:
a3251186 890 tcg_gen_mov_tl(cpu_tmp4, cpu_cc_srcT);
c365395e
PB
891 gen_extu(size, cpu_tmp4);
892 t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
276e6b5f
RH
893 cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = cpu_tmp4,
894 .reg2 = t0, .mask = -1, .use_reg2 = true };
c365395e 895 break;
8e1c85e3 896
c365395e 897 case JCC_L:
276e6b5f 898 cond = TCG_COND_LT;
c365395e
PB
899 goto fast_jcc_l;
900 case JCC_LE:
276e6b5f 901 cond = TCG_COND_LE;
c365395e 902 fast_jcc_l:
a3251186 903 tcg_gen_mov_tl(cpu_tmp4, cpu_cc_srcT);
c365395e
PB
904 gen_exts(size, cpu_tmp4);
905 t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, true);
276e6b5f
RH
906 cc = (CCPrepare) { .cond = cond, .reg = cpu_tmp4,
907 .reg2 = t0, .mask = -1, .use_reg2 = true };
c365395e 908 break;
8e1c85e3 909
c365395e 910 default:
8e1c85e3 911 goto slow_jcc;
c365395e 912 }
8e1c85e3 913 break;
c365395e 914
8e1c85e3
FB
915 default:
916 slow_jcc:
69d1aa31
RH
917 /* This actually generates good code for JC, JZ and JS. */
918 switch (jcc_op) {
919 case JCC_O:
920 cc = gen_prepare_eflags_o(s, reg);
921 break;
922 case JCC_B:
923 cc = gen_prepare_eflags_c(s, reg);
924 break;
925 case JCC_Z:
926 cc = gen_prepare_eflags_z(s, reg);
927 break;
928 case JCC_BE:
929 gen_compute_eflags(s);
930 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
931 .mask = CC_Z | CC_C };
932 break;
933 case JCC_S:
934 cc = gen_prepare_eflags_s(s, reg);
935 break;
936 case JCC_P:
937 cc = gen_prepare_eflags_p(s, reg);
938 break;
939 case JCC_L:
940 gen_compute_eflags(s);
941 if (TCGV_EQUAL(reg, cpu_cc_src)) {
942 reg = cpu_tmp0;
943 }
944 tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
945 tcg_gen_xor_tl(reg, reg, cpu_cc_src);
946 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
947 .mask = CC_S };
948 break;
949 default:
950 case JCC_LE:
951 gen_compute_eflags(s);
952 if (TCGV_EQUAL(reg, cpu_cc_src)) {
953 reg = cpu_tmp0;
954 }
955 tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
956 tcg_gen_xor_tl(reg, reg, cpu_cc_src);
957 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
958 .mask = CC_S | CC_Z };
959 break;
960 }
c365395e 961 break;
8e1c85e3 962 }
276e6b5f
RH
963
964 if (inv) {
965 cc.cond = tcg_invert_cond(cc.cond);
966 }
967 return cc;
8e1c85e3
FB
968}
969
cc8b6f5b
PB
970static void gen_setcc1(DisasContext *s, int b, TCGv reg)
971{
972 CCPrepare cc = gen_prepare_cc(s, b, reg);
973
974 if (cc.no_setcond) {
975 if (cc.cond == TCG_COND_EQ) {
976 tcg_gen_xori_tl(reg, cc.reg, 1);
977 } else {
978 tcg_gen_mov_tl(reg, cc.reg);
979 }
980 return;
981 }
982
983 if (cc.cond == TCG_COND_NE && !cc.use_reg2 && cc.imm == 0 &&
984 cc.mask != 0 && (cc.mask & (cc.mask - 1)) == 0) {
985 tcg_gen_shri_tl(reg, cc.reg, ctztl(cc.mask));
986 tcg_gen_andi_tl(reg, reg, 1);
987 return;
988 }
989 if (cc.mask != -1) {
990 tcg_gen_andi_tl(reg, cc.reg, cc.mask);
991 cc.reg = reg;
992 }
993 if (cc.use_reg2) {
994 tcg_gen_setcond_tl(cc.cond, reg, cc.reg, cc.reg2);
995 } else {
996 tcg_gen_setcondi_tl(cc.cond, reg, cc.reg, cc.imm);
997 }
998}
999
1000static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg)
1001{
1002 gen_setcc1(s, JCC_B << 1, reg);
1003}
276e6b5f 1004
8e1c85e3
FB
1005/* generate a conditional jump to label 'l1' according to jump opcode
1006 value 'b'. In the fast case, T0 is guaranted not to be used. */
42a268c2 1007static inline void gen_jcc1_noeob(DisasContext *s, int b, TCGLabel *l1)
dc259201 1008{
1d1cc4d0 1009 CCPrepare cc = gen_prepare_cc(s, b, cpu_T0);
dc259201
RH
1010
1011 if (cc.mask != -1) {
1d1cc4d0
RH
1012 tcg_gen_andi_tl(cpu_T0, cc.reg, cc.mask);
1013 cc.reg = cpu_T0;
dc259201
RH
1014 }
1015 if (cc.use_reg2) {
1016 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1017 } else {
1018 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1019 }
1020}
1021
1022/* Generate a conditional jump to label 'l1' according to jump opcode
1023 value 'b'. In the fast case, T0 is guaranted not to be used.
1024 A translation block must end soon. */
42a268c2 1025static inline void gen_jcc1(DisasContext *s, int b, TCGLabel *l1)
8e1c85e3 1026{
1d1cc4d0 1027 CCPrepare cc = gen_prepare_cc(s, b, cpu_T0);
8e1c85e3 1028
dc259201 1029 gen_update_cc_op(s);
943131ca 1030 if (cc.mask != -1) {
1d1cc4d0
RH
1031 tcg_gen_andi_tl(cpu_T0, cc.reg, cc.mask);
1032 cc.reg = cpu_T0;
943131ca 1033 }
dc259201 1034 set_cc_op(s, CC_OP_DYNAMIC);
943131ca
PB
1035 if (cc.use_reg2) {
1036 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1037 } else {
1038 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
8e1c85e3
FB
1039 }
1040}
1041
14ce26e7
FB
1042/* XXX: does not work with gdbstub "ice" single step - not a
1043 serious problem */
42a268c2 1044static TCGLabel *gen_jz_ecx_string(DisasContext *s, target_ulong next_eip)
2c0262af 1045{
42a268c2
RH
1046 TCGLabel *l1 = gen_new_label();
1047 TCGLabel *l2 = gen_new_label();
1d71ddb1 1048 gen_op_jnz_ecx(s->aflag, l1);
14ce26e7
FB
1049 gen_set_label(l2);
1050 gen_jmp_tb(s, next_eip, 1);
1051 gen_set_label(l1);
1052 return l2;
2c0262af
FB
1053}
1054
d67dc9e6 1055static inline void gen_stos(DisasContext *s, TCGMemOp ot)
2c0262af 1056{
1d1cc4d0 1057 gen_op_mov_v_reg(MO_32, cpu_T0, R_EAX);
2c0262af 1058 gen_string_movl_A0_EDI(s);
1d1cc4d0 1059 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
6e0d8677 1060 gen_op_movl_T0_Dshift(ot);
1d71ddb1 1061 gen_op_add_reg_T0(s->aflag, R_EDI);
2c0262af
FB
1062}
1063
d67dc9e6 1064static inline void gen_lods(DisasContext *s, TCGMemOp ot)
2c0262af
FB
1065{
1066 gen_string_movl_A0_ESI(s);
1d1cc4d0
RH
1067 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
1068 gen_op_mov_reg_v(ot, R_EAX, cpu_T0);
6e0d8677 1069 gen_op_movl_T0_Dshift(ot);
1d71ddb1 1070 gen_op_add_reg_T0(s->aflag, R_ESI);
2c0262af
FB
1071}
1072
d67dc9e6 1073static inline void gen_scas(DisasContext *s, TCGMemOp ot)
2c0262af 1074{
2c0262af 1075 gen_string_movl_A0_EDI(s);
1d1cc4d0 1076 gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
63633fe6 1077 gen_op(s, OP_CMPL, ot, R_EAX);
6e0d8677 1078 gen_op_movl_T0_Dshift(ot);
1d71ddb1 1079 gen_op_add_reg_T0(s->aflag, R_EDI);
2c0262af
FB
1080}
1081
d67dc9e6 1082static inline void gen_cmps(DisasContext *s, TCGMemOp ot)
2c0262af 1083{
2c0262af 1084 gen_string_movl_A0_EDI(s);
1d1cc4d0 1085 gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
63633fe6
RH
1086 gen_string_movl_A0_ESI(s);
1087 gen_op(s, OP_CMPL, ot, OR_TMP0);
6e0d8677 1088 gen_op_movl_T0_Dshift(ot);
1d71ddb1
RH
1089 gen_op_add_reg_T0(s->aflag, R_ESI);
1090 gen_op_add_reg_T0(s->aflag, R_EDI);
2c0262af
FB
1091}
1092
5223a942
EH
1093static void gen_bpt_io(DisasContext *s, TCGv_i32 t_port, int ot)
1094{
1095 if (s->flags & HF_IOBPT_MASK) {
1096 TCGv_i32 t_size = tcg_const_i32(1 << ot);
1097 TCGv t_next = tcg_const_tl(s->pc - s->cs_base);
1098
1099 gen_helper_bpt_io(cpu_env, t_port, t_size, t_next);
1100 tcg_temp_free_i32(t_size);
1101 tcg_temp_free(t_next);
1102 }
1103}
1104
1105
d67dc9e6 1106static inline void gen_ins(DisasContext *s, TCGMemOp ot)
2c0262af 1107{
bd79255d 1108 if (s->tb->cflags & CF_USE_ICOUNT) {
2e70f6ef 1109 gen_io_start();
bd79255d 1110 }
2c0262af 1111 gen_string_movl_A0_EDI(s);
6e0d8677
FB
1112 /* Note: we must do this dummy write first to be restartable in
1113 case of page fault. */
1d1cc4d0
RH
1114 tcg_gen_movi_tl(cpu_T0, 0);
1115 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
24b9c00f 1116 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_EDX]);
b6abf97d 1117 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1d1cc4d0
RH
1118 gen_helper_in_func(ot, cpu_T0, cpu_tmp2_i32);
1119 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
6e0d8677 1120 gen_op_movl_T0_Dshift(ot);
1d71ddb1 1121 gen_op_add_reg_T0(s->aflag, R_EDI);
5223a942 1122 gen_bpt_io(s, cpu_tmp2_i32, ot);
bd79255d 1123 if (s->tb->cflags & CF_USE_ICOUNT) {
2e70f6ef 1124 gen_io_end();
bd79255d 1125 }
2c0262af
FB
1126}
1127
d67dc9e6 1128static inline void gen_outs(DisasContext *s, TCGMemOp ot)
2c0262af 1129{
bd79255d 1130 if (s->tb->cflags & CF_USE_ICOUNT) {
2e70f6ef 1131 gen_io_start();
bd79255d 1132 }
2c0262af 1133 gen_string_movl_A0_ESI(s);
1d1cc4d0 1134 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
b8b6a50b 1135
24b9c00f 1136 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_EDX]);
b6abf97d 1137 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1d1cc4d0 1138 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T0);
a7812ae4 1139 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
6e0d8677 1140 gen_op_movl_T0_Dshift(ot);
1d71ddb1 1141 gen_op_add_reg_T0(s->aflag, R_ESI);
5223a942 1142 gen_bpt_io(s, cpu_tmp2_i32, ot);
bd79255d 1143 if (s->tb->cflags & CF_USE_ICOUNT) {
2e70f6ef 1144 gen_io_end();
bd79255d 1145 }
2c0262af
FB
1146}
1147
1148/* same method as Valgrind : we generate jumps to current or next
1149 instruction */
1150#define GEN_REPZ(op) \
d67dc9e6 1151static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \
14ce26e7 1152 target_ulong cur_eip, target_ulong next_eip) \
2c0262af 1153{ \
42a268c2 1154 TCGLabel *l2; \
2c0262af 1155 gen_update_cc_op(s); \
14ce26e7 1156 l2 = gen_jz_ecx_string(s, next_eip); \
2c0262af 1157 gen_ ## op(s, ot); \
1d71ddb1 1158 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
2c0262af
FB
1159 /* a loop would cause two single step exceptions if ECX = 1 \
1160 before rep string_insn */ \
c4d4525c 1161 if (s->repz_opt) \
1d71ddb1 1162 gen_op_jz_ecx(s->aflag, l2); \
2c0262af
FB
1163 gen_jmp(s, cur_eip); \
1164}
1165
1166#define GEN_REPZ2(op) \
d67dc9e6 1167static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \
14ce26e7
FB
1168 target_ulong cur_eip, \
1169 target_ulong next_eip, \
2c0262af
FB
1170 int nz) \
1171{ \
42a268c2 1172 TCGLabel *l2; \
2c0262af 1173 gen_update_cc_op(s); \
14ce26e7 1174 l2 = gen_jz_ecx_string(s, next_eip); \
2c0262af 1175 gen_ ## op(s, ot); \
1d71ddb1 1176 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
773cdfcc 1177 gen_update_cc_op(s); \
b27fc131 1178 gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2); \
c4d4525c 1179 if (s->repz_opt) \
1d71ddb1 1180 gen_op_jz_ecx(s->aflag, l2); \
2c0262af
FB
1181 gen_jmp(s, cur_eip); \
1182}
1183
1184GEN_REPZ(movs)
1185GEN_REPZ(stos)
1186GEN_REPZ(lods)
1187GEN_REPZ(ins)
1188GEN_REPZ(outs)
1189GEN_REPZ2(scas)
1190GEN_REPZ2(cmps)
1191
a7812ae4
PB
1192static void gen_helper_fp_arith_ST0_FT0(int op)
1193{
1194 switch (op) {
d3eb5eae
BS
1195 case 0:
1196 gen_helper_fadd_ST0_FT0(cpu_env);
1197 break;
1198 case 1:
1199 gen_helper_fmul_ST0_FT0(cpu_env);
1200 break;
1201 case 2:
1202 gen_helper_fcom_ST0_FT0(cpu_env);
1203 break;
1204 case 3:
1205 gen_helper_fcom_ST0_FT0(cpu_env);
1206 break;
1207 case 4:
1208 gen_helper_fsub_ST0_FT0(cpu_env);
1209 break;
1210 case 5:
1211 gen_helper_fsubr_ST0_FT0(cpu_env);
1212 break;
1213 case 6:
1214 gen_helper_fdiv_ST0_FT0(cpu_env);
1215 break;
1216 case 7:
1217 gen_helper_fdivr_ST0_FT0(cpu_env);
1218 break;
a7812ae4
PB
1219 }
1220}
2c0262af
FB
1221
1222/* NOTE the exception in "r" op ordering */
a7812ae4
PB
1223static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
1224{
1225 TCGv_i32 tmp = tcg_const_i32(opreg);
1226 switch (op) {
d3eb5eae
BS
1227 case 0:
1228 gen_helper_fadd_STN_ST0(cpu_env, tmp);
1229 break;
1230 case 1:
1231 gen_helper_fmul_STN_ST0(cpu_env, tmp);
1232 break;
1233 case 4:
1234 gen_helper_fsubr_STN_ST0(cpu_env, tmp);
1235 break;
1236 case 5:
1237 gen_helper_fsub_STN_ST0(cpu_env, tmp);
1238 break;
1239 case 6:
1240 gen_helper_fdivr_STN_ST0(cpu_env, tmp);
1241 break;
1242 case 7:
1243 gen_helper_fdiv_STN_ST0(cpu_env, tmp);
1244 break;
a7812ae4
PB
1245 }
1246}
2c0262af
FB
1247
1248/* if d == OR_TMP0, it means memory operand (address in A0) */
d67dc9e6 1249static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d)
2c0262af 1250{
2c0262af 1251 if (d != OR_TMP0) {
1d1cc4d0 1252 gen_op_mov_v_reg(ot, cpu_T0, d);
2c0262af 1253 } else {
1d1cc4d0 1254 gen_op_ld_v(s1, ot, cpu_T0, cpu_A0);
2c0262af
FB
1255 }
1256 switch(op) {
1257 case OP_ADCL:
cc8b6f5b 1258 gen_compute_eflags_c(s1, cpu_tmp4);
1d1cc4d0
RH
1259 tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1);
1260 tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_tmp4);
d4faa3e0 1261 gen_op_st_rm_T0_A0(s1, ot, d);
988c3eb0
RH
1262 gen_op_update3_cc(cpu_tmp4);
1263 set_cc_op(s1, CC_OP_ADCB + ot);
cad3a37d 1264 break;
2c0262af 1265 case OP_SBBL:
cc8b6f5b 1266 gen_compute_eflags_c(s1, cpu_tmp4);
1d1cc4d0
RH
1267 tcg_gen_sub_tl(cpu_T0, cpu_T0, cpu_T1);
1268 tcg_gen_sub_tl(cpu_T0, cpu_T0, cpu_tmp4);
d4faa3e0 1269 gen_op_st_rm_T0_A0(s1, ot, d);
988c3eb0
RH
1270 gen_op_update3_cc(cpu_tmp4);
1271 set_cc_op(s1, CC_OP_SBBB + ot);
cad3a37d 1272 break;
2c0262af 1273 case OP_ADDL:
1d1cc4d0 1274 tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1);
d4faa3e0 1275 gen_op_st_rm_T0_A0(s1, ot, d);
cad3a37d 1276 gen_op_update2_cc();
3ca51d07 1277 set_cc_op(s1, CC_OP_ADDB + ot);
2c0262af
FB
1278 break;
1279 case OP_SUBL:
1d1cc4d0
RH
1280 tcg_gen_mov_tl(cpu_cc_srcT, cpu_T0);
1281 tcg_gen_sub_tl(cpu_T0, cpu_T0, cpu_T1);
d4faa3e0 1282 gen_op_st_rm_T0_A0(s1, ot, d);
cad3a37d 1283 gen_op_update2_cc();
3ca51d07 1284 set_cc_op(s1, CC_OP_SUBB + ot);
2c0262af
FB
1285 break;
1286 default:
1287 case OP_ANDL:
1d1cc4d0 1288 tcg_gen_and_tl(cpu_T0, cpu_T0, cpu_T1);
d4faa3e0 1289 gen_op_st_rm_T0_A0(s1, ot, d);
cad3a37d 1290 gen_op_update1_cc();
3ca51d07 1291 set_cc_op(s1, CC_OP_LOGICB + ot);
57fec1fe 1292 break;
2c0262af 1293 case OP_ORL:
1d1cc4d0 1294 tcg_gen_or_tl(cpu_T0, cpu_T0, cpu_T1);
d4faa3e0 1295 gen_op_st_rm_T0_A0(s1, ot, d);
cad3a37d 1296 gen_op_update1_cc();
3ca51d07 1297 set_cc_op(s1, CC_OP_LOGICB + ot);
57fec1fe 1298 break;
2c0262af 1299 case OP_XORL:
1d1cc4d0 1300 tcg_gen_xor_tl(cpu_T0, cpu_T0, cpu_T1);
d4faa3e0 1301 gen_op_st_rm_T0_A0(s1, ot, d);
cad3a37d 1302 gen_op_update1_cc();
3ca51d07 1303 set_cc_op(s1, CC_OP_LOGICB + ot);
2c0262af
FB
1304 break;
1305 case OP_CMPL:
1d1cc4d0
RH
1306 tcg_gen_mov_tl(cpu_cc_src, cpu_T1);
1307 tcg_gen_mov_tl(cpu_cc_srcT, cpu_T0);
1308 tcg_gen_sub_tl(cpu_cc_dst, cpu_T0, cpu_T1);
3ca51d07 1309 set_cc_op(s1, CC_OP_SUBB + ot);
2c0262af
FB
1310 break;
1311 }
b6abf97d
FB
1312}
1313
2c0262af 1314/* if d == OR_TMP0, it means memory operand (address in A0) */
d67dc9e6 1315static void gen_inc(DisasContext *s1, TCGMemOp ot, int d, int c)
2c0262af 1316{
909be183 1317 if (d != OR_TMP0) {
1d1cc4d0 1318 gen_op_mov_v_reg(ot, cpu_T0, d);
909be183 1319 } else {
1d1cc4d0 1320 gen_op_ld_v(s1, ot, cpu_T0, cpu_A0);
909be183 1321 }
cc8b6f5b 1322 gen_compute_eflags_c(s1, cpu_cc_src);
2c0262af 1323 if (c > 0) {
1d1cc4d0 1324 tcg_gen_addi_tl(cpu_T0, cpu_T0, 1);
3ca51d07 1325 set_cc_op(s1, CC_OP_INCB + ot);
2c0262af 1326 } else {
1d1cc4d0 1327 tcg_gen_addi_tl(cpu_T0, cpu_T0, -1);
3ca51d07 1328 set_cc_op(s1, CC_OP_DECB + ot);
2c0262af 1329 }
d4faa3e0 1330 gen_op_st_rm_T0_A0(s1, ot, d);
1d1cc4d0 1331 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
2c0262af
FB
1332}
1333
d67dc9e6
RH
1334static void gen_shift_flags(DisasContext *s, TCGMemOp ot, TCGv result,
1335 TCGv shm1, TCGv count, bool is_right)
f437d0a3
RH
1336{
1337 TCGv_i32 z32, s32, oldop;
1338 TCGv z_tl;
1339
1340 /* Store the results into the CC variables. If we know that the
1341 variable must be dead, store unconditionally. Otherwise we'll
1342 need to not disrupt the current contents. */
1343 z_tl = tcg_const_tl(0);
1344 if (cc_op_live[s->cc_op] & USES_CC_DST) {
1345 tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_dst, count, z_tl,
1346 result, cpu_cc_dst);
1347 } else {
1348 tcg_gen_mov_tl(cpu_cc_dst, result);
1349 }
1350 if (cc_op_live[s->cc_op] & USES_CC_SRC) {
1351 tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_src, count, z_tl,
1352 shm1, cpu_cc_src);
1353 } else {
1354 tcg_gen_mov_tl(cpu_cc_src, shm1);
1355 }
1356 tcg_temp_free(z_tl);
1357
1358 /* Get the two potential CC_OP values into temporaries. */
1359 tcg_gen_movi_i32(cpu_tmp2_i32, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
1360 if (s->cc_op == CC_OP_DYNAMIC) {
1361 oldop = cpu_cc_op;
1362 } else {
1363 tcg_gen_movi_i32(cpu_tmp3_i32, s->cc_op);
1364 oldop = cpu_tmp3_i32;
1365 }
1366
1367 /* Conditionally store the CC_OP value. */
1368 z32 = tcg_const_i32(0);
1369 s32 = tcg_temp_new_i32();
1370 tcg_gen_trunc_tl_i32(s32, count);
1371 tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, s32, z32, cpu_tmp2_i32, oldop);
1372 tcg_temp_free_i32(z32);
1373 tcg_temp_free_i32(s32);
1374
1375 /* The CC_OP value is no longer predictable. */
1376 set_cc_op(s, CC_OP_DYNAMIC);
1377}
1378
d67dc9e6 1379static void gen_shift_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
b6abf97d 1380 int is_right, int is_arith)
2c0262af 1381{
4ba9938c 1382 target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
3b46e624 1383
b6abf97d 1384 /* load */
82786041 1385 if (op1 == OR_TMP0) {
1d1cc4d0 1386 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
82786041 1387 } else {
1d1cc4d0 1388 gen_op_mov_v_reg(ot, cpu_T0, op1);
82786041 1389 }
b6abf97d 1390
1d1cc4d0
RH
1391 tcg_gen_andi_tl(cpu_T1, cpu_T1, mask);
1392 tcg_gen_subi_tl(cpu_tmp0, cpu_T1, 1);
b6abf97d
FB
1393
1394 if (is_right) {
1395 if (is_arith) {
1d1cc4d0
RH
1396 gen_exts(ot, cpu_T0);
1397 tcg_gen_sar_tl(cpu_tmp0, cpu_T0, cpu_tmp0);
1398 tcg_gen_sar_tl(cpu_T0, cpu_T0, cpu_T1);
b6abf97d 1399 } else {
1d1cc4d0
RH
1400 gen_extu(ot, cpu_T0);
1401 tcg_gen_shr_tl(cpu_tmp0, cpu_T0, cpu_tmp0);
1402 tcg_gen_shr_tl(cpu_T0, cpu_T0, cpu_T1);
b6abf97d
FB
1403 }
1404 } else {
1d1cc4d0
RH
1405 tcg_gen_shl_tl(cpu_tmp0, cpu_T0, cpu_tmp0);
1406 tcg_gen_shl_tl(cpu_T0, cpu_T0, cpu_T1);
b6abf97d
FB
1407 }
1408
1409 /* store */
d4faa3e0 1410 gen_op_st_rm_T0_A0(s, ot, op1);
82786041 1411
1d1cc4d0 1412 gen_shift_flags(s, ot, cpu_T0, cpu_tmp0, cpu_T1, is_right);
b6abf97d
FB
1413}
1414
d67dc9e6 1415static void gen_shift_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2,
c1c37968
FB
1416 int is_right, int is_arith)
1417{
4ba9938c 1418 int mask = (ot == MO_64 ? 0x3f : 0x1f);
c1c37968
FB
1419
1420 /* load */
1421 if (op1 == OR_TMP0)
1d1cc4d0 1422 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
c1c37968 1423 else
1d1cc4d0 1424 gen_op_mov_v_reg(ot, cpu_T0, op1);
c1c37968
FB
1425
1426 op2 &= mask;
1427 if (op2 != 0) {
1428 if (is_right) {
1429 if (is_arith) {
1d1cc4d0
RH
1430 gen_exts(ot, cpu_T0);
1431 tcg_gen_sari_tl(cpu_tmp4, cpu_T0, op2 - 1);
1432 tcg_gen_sari_tl(cpu_T0, cpu_T0, op2);
c1c37968 1433 } else {
1d1cc4d0
RH
1434 gen_extu(ot, cpu_T0);
1435 tcg_gen_shri_tl(cpu_tmp4, cpu_T0, op2 - 1);
1436 tcg_gen_shri_tl(cpu_T0, cpu_T0, op2);
c1c37968
FB
1437 }
1438 } else {
1d1cc4d0
RH
1439 tcg_gen_shli_tl(cpu_tmp4, cpu_T0, op2 - 1);
1440 tcg_gen_shli_tl(cpu_T0, cpu_T0, op2);
c1c37968
FB
1441 }
1442 }
1443
1444 /* store */
d4faa3e0
RH
1445 gen_op_st_rm_T0_A0(s, ot, op1);
1446
c1c37968
FB
1447 /* update eflags if non zero shift */
1448 if (op2 != 0) {
2a449d14 1449 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
1d1cc4d0 1450 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
3ca51d07 1451 set_cc_op(s, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
c1c37968
FB
1452 }
1453}
1454
d67dc9e6 1455static void gen_rot_rm_T1(DisasContext *s, TCGMemOp ot, int op1, int is_right)
b6abf97d 1456{
4ba9938c 1457 target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
34d80a55 1458 TCGv_i32 t0, t1;
b6abf97d
FB
1459
1460 /* load */
1e4840bf 1461 if (op1 == OR_TMP0) {
1d1cc4d0 1462 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
1e4840bf 1463 } else {
1d1cc4d0 1464 gen_op_mov_v_reg(ot, cpu_T0, op1);
1e4840bf 1465 }
b6abf97d 1466
1d1cc4d0 1467 tcg_gen_andi_tl(cpu_T1, cpu_T1, mask);
b6abf97d 1468
34d80a55 1469 switch (ot) {
4ba9938c 1470 case MO_8:
34d80a55 1471 /* Replicate the 8-bit input so that a 32-bit rotate works. */
1d1cc4d0
RH
1472 tcg_gen_ext8u_tl(cpu_T0, cpu_T0);
1473 tcg_gen_muli_tl(cpu_T0, cpu_T0, 0x01010101);
34d80a55 1474 goto do_long;
4ba9938c 1475 case MO_16:
34d80a55 1476 /* Replicate the 16-bit input so that a 32-bit rotate works. */
1d1cc4d0 1477 tcg_gen_deposit_tl(cpu_T0, cpu_T0, cpu_T0, 16, 16);
34d80a55
RH
1478 goto do_long;
1479 do_long:
1480#ifdef TARGET_X86_64
4ba9938c 1481 case MO_32:
1d1cc4d0
RH
1482 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
1483 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1);
34d80a55
RH
1484 if (is_right) {
1485 tcg_gen_rotr_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
1486 } else {
1487 tcg_gen_rotl_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
1488 }
1d1cc4d0 1489 tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);
34d80a55
RH
1490 break;
1491#endif
1492 default:
1493 if (is_right) {
1d1cc4d0 1494 tcg_gen_rotr_tl(cpu_T0, cpu_T0, cpu_T1);
34d80a55 1495 } else {
1d1cc4d0 1496 tcg_gen_rotl_tl(cpu_T0, cpu_T0, cpu_T1);
34d80a55
RH
1497 }
1498 break;
b6abf97d 1499 }
b6abf97d 1500
b6abf97d 1501 /* store */
d4faa3e0 1502 gen_op_st_rm_T0_A0(s, ot, op1);
b6abf97d 1503
34d80a55
RH
1504 /* We'll need the flags computed into CC_SRC. */
1505 gen_compute_eflags(s);
b6abf97d 1506
34d80a55
RH
1507 /* The value that was "rotated out" is now present at the other end
1508 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1509 since we've computed the flags into CC_SRC, these variables are
1510 currently dead. */
b6abf97d 1511 if (is_right) {
1d1cc4d0
RH
1512 tcg_gen_shri_tl(cpu_cc_src2, cpu_T0, mask - 1);
1513 tcg_gen_shri_tl(cpu_cc_dst, cpu_T0, mask);
089305ac 1514 tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
34d80a55 1515 } else {
1d1cc4d0
RH
1516 tcg_gen_shri_tl(cpu_cc_src2, cpu_T0, mask);
1517 tcg_gen_andi_tl(cpu_cc_dst, cpu_T0, 1);
b6abf97d 1518 }
34d80a55
RH
1519 tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1520 tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1521
1522 /* Now conditionally store the new CC_OP value. If the shift count
1523 is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live.
1524 Otherwise reuse CC_OP_ADCOX which have the C and O flags split out
1525 exactly as we computed above. */
1526 t0 = tcg_const_i32(0);
1527 t1 = tcg_temp_new_i32();
1d1cc4d0 1528 tcg_gen_trunc_tl_i32(t1, cpu_T1);
34d80a55
RH
1529 tcg_gen_movi_i32(cpu_tmp2_i32, CC_OP_ADCOX);
1530 tcg_gen_movi_i32(cpu_tmp3_i32, CC_OP_EFLAGS);
1531 tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, t1, t0,
1532 cpu_tmp2_i32, cpu_tmp3_i32);
1533 tcg_temp_free_i32(t0);
1534 tcg_temp_free_i32(t1);
1535
1536 /* The CC_OP value is no longer predictable. */
1537 set_cc_op(s, CC_OP_DYNAMIC);
b6abf97d
FB
1538}
1539
d67dc9e6 1540static void gen_rot_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2,
8cd6345d 1541 int is_right)
1542{
4ba9938c 1543 int mask = (ot == MO_64 ? 0x3f : 0x1f);
34d80a55 1544 int shift;
8cd6345d 1545
1546 /* load */
1547 if (op1 == OR_TMP0) {
1d1cc4d0 1548 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
8cd6345d 1549 } else {
1d1cc4d0 1550 gen_op_mov_v_reg(ot, cpu_T0, op1);
8cd6345d 1551 }
1552
8cd6345d 1553 op2 &= mask;
8cd6345d 1554 if (op2 != 0) {
34d80a55
RH
1555 switch (ot) {
1556#ifdef TARGET_X86_64
4ba9938c 1557 case MO_32:
1d1cc4d0 1558 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
34d80a55
RH
1559 if (is_right) {
1560 tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, op2);
1561 } else {
1562 tcg_gen_rotli_i32(cpu_tmp2_i32, cpu_tmp2_i32, op2);
1563 }
1d1cc4d0 1564 tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);
34d80a55
RH
1565 break;
1566#endif
1567 default:
1568 if (is_right) {
1d1cc4d0 1569 tcg_gen_rotri_tl(cpu_T0, cpu_T0, op2);
34d80a55 1570 } else {
1d1cc4d0 1571 tcg_gen_rotli_tl(cpu_T0, cpu_T0, op2);
34d80a55
RH
1572 }
1573 break;
4ba9938c 1574 case MO_8:
34d80a55
RH
1575 mask = 7;
1576 goto do_shifts;
4ba9938c 1577 case MO_16:
34d80a55
RH
1578 mask = 15;
1579 do_shifts:
1580 shift = op2 & mask;
1581 if (is_right) {
1582 shift = mask + 1 - shift;
1583 }
1d1cc4d0
RH
1584 gen_extu(ot, cpu_T0);
1585 tcg_gen_shli_tl(cpu_tmp0, cpu_T0, shift);
1586 tcg_gen_shri_tl(cpu_T0, cpu_T0, mask + 1 - shift);
1587 tcg_gen_or_tl(cpu_T0, cpu_T0, cpu_tmp0);
34d80a55 1588 break;
8cd6345d 1589 }
8cd6345d 1590 }
1591
1592 /* store */
d4faa3e0 1593 gen_op_st_rm_T0_A0(s, ot, op1);
8cd6345d 1594
1595 if (op2 != 0) {
34d80a55 1596 /* Compute the flags into CC_SRC. */
d229edce 1597 gen_compute_eflags(s);
0ff6addd 1598
34d80a55
RH
1599 /* The value that was "rotated out" is now present at the other end
1600 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1601 since we've computed the flags into CC_SRC, these variables are
1602 currently dead. */
8cd6345d 1603 if (is_right) {
1d1cc4d0
RH
1604 tcg_gen_shri_tl(cpu_cc_src2, cpu_T0, mask - 1);
1605 tcg_gen_shri_tl(cpu_cc_dst, cpu_T0, mask);
38ebb396 1606 tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
34d80a55 1607 } else {
1d1cc4d0
RH
1608 tcg_gen_shri_tl(cpu_cc_src2, cpu_T0, mask);
1609 tcg_gen_andi_tl(cpu_cc_dst, cpu_T0, 1);
8cd6345d 1610 }
34d80a55
RH
1611 tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1612 tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1613 set_cc_op(s, CC_OP_ADCOX);
8cd6345d 1614 }
8cd6345d 1615}
1616
b6abf97d 1617/* XXX: add faster immediate = 1 case */
d67dc9e6 1618static void gen_rotc_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
b6abf97d
FB
1619 int is_right)
1620{
d229edce 1621 gen_compute_eflags(s);
c7b3c873 1622 assert(s->cc_op == CC_OP_EFLAGS);
b6abf97d
FB
1623
1624 /* load */
1625 if (op1 == OR_TMP0)
1d1cc4d0 1626 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
b6abf97d 1627 else
1d1cc4d0 1628 gen_op_mov_v_reg(ot, cpu_T0, op1);
b6abf97d 1629
a7812ae4
PB
1630 if (is_right) {
1631 switch (ot) {
4ba9938c 1632 case MO_8:
1d1cc4d0 1633 gen_helper_rcrb(cpu_T0, cpu_env, cpu_T0, cpu_T1);
7923057b 1634 break;
4ba9938c 1635 case MO_16:
1d1cc4d0 1636 gen_helper_rcrw(cpu_T0, cpu_env, cpu_T0, cpu_T1);
7923057b 1637 break;
4ba9938c 1638 case MO_32:
1d1cc4d0 1639 gen_helper_rcrl(cpu_T0, cpu_env, cpu_T0, cpu_T1);
7923057b 1640 break;
a7812ae4 1641#ifdef TARGET_X86_64
4ba9938c 1642 case MO_64:
1d1cc4d0 1643 gen_helper_rcrq(cpu_T0, cpu_env, cpu_T0, cpu_T1);
7923057b 1644 break;
a7812ae4 1645#endif
d67dc9e6
RH
1646 default:
1647 tcg_abort();
a7812ae4
PB
1648 }
1649 } else {
1650 switch (ot) {
4ba9938c 1651 case MO_8:
1d1cc4d0 1652 gen_helper_rclb(cpu_T0, cpu_env, cpu_T0, cpu_T1);
7923057b 1653 break;
4ba9938c 1654 case MO_16:
1d1cc4d0 1655 gen_helper_rclw(cpu_T0, cpu_env, cpu_T0, cpu_T1);
7923057b 1656 break;
4ba9938c 1657 case MO_32:
1d1cc4d0 1658 gen_helper_rcll(cpu_T0, cpu_env, cpu_T0, cpu_T1);
7923057b 1659 break;
a7812ae4 1660#ifdef TARGET_X86_64
4ba9938c 1661 case MO_64:
1d1cc4d0 1662 gen_helper_rclq(cpu_T0, cpu_env, cpu_T0, cpu_T1);
7923057b 1663 break;
a7812ae4 1664#endif
d67dc9e6
RH
1665 default:
1666 tcg_abort();
a7812ae4
PB
1667 }
1668 }
b6abf97d 1669 /* store */
d4faa3e0 1670 gen_op_st_rm_T0_A0(s, ot, op1);
b6abf97d
FB
1671}
1672
1673/* XXX: add faster immediate case */
d67dc9e6 1674static void gen_shiftd_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
f437d0a3 1675 bool is_right, TCGv count_in)
b6abf97d 1676{
4ba9938c 1677 target_ulong mask = (ot == MO_64 ? 63 : 31);
f437d0a3 1678 TCGv count;
b6abf97d
FB
1679
1680 /* load */
1e4840bf 1681 if (op1 == OR_TMP0) {
1d1cc4d0 1682 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
1e4840bf 1683 } else {
1d1cc4d0 1684 gen_op_mov_v_reg(ot, cpu_T0, op1);
1e4840bf 1685 }
b6abf97d 1686
f437d0a3
RH
1687 count = tcg_temp_new();
1688 tcg_gen_andi_tl(count, count_in, mask);
1e4840bf 1689
f437d0a3 1690 switch (ot) {
4ba9938c 1691 case MO_16:
f437d0a3
RH
1692 /* Note: we implement the Intel behaviour for shift count > 16.
1693 This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A
1694 portion by constructing it as a 32-bit value. */
b6abf97d 1695 if (is_right) {
1d1cc4d0
RH
1696 tcg_gen_deposit_tl(cpu_tmp0, cpu_T0, cpu_T1, 16, 16);
1697 tcg_gen_mov_tl(cpu_T1, cpu_T0);
1698 tcg_gen_mov_tl(cpu_T0, cpu_tmp0);
b6abf97d 1699 } else {
1d1cc4d0 1700 tcg_gen_deposit_tl(cpu_T1, cpu_T0, cpu_T1, 16, 16);
b6abf97d 1701 }
f437d0a3
RH
1702 /* FALLTHRU */
1703#ifdef TARGET_X86_64
4ba9938c 1704 case MO_32:
f437d0a3
RH
1705 /* Concatenate the two 32-bit values and use a 64-bit shift. */
1706 tcg_gen_subi_tl(cpu_tmp0, count, 1);
b6abf97d 1707 if (is_right) {
1d1cc4d0
RH
1708 tcg_gen_concat_tl_i64(cpu_T0, cpu_T0, cpu_T1);
1709 tcg_gen_shr_i64(cpu_tmp0, cpu_T0, cpu_tmp0);
1710 tcg_gen_shr_i64(cpu_T0, cpu_T0, count);
f437d0a3 1711 } else {
1d1cc4d0
RH
1712 tcg_gen_concat_tl_i64(cpu_T0, cpu_T1, cpu_T0);
1713 tcg_gen_shl_i64(cpu_tmp0, cpu_T0, cpu_tmp0);
1714 tcg_gen_shl_i64(cpu_T0, cpu_T0, count);
f437d0a3 1715 tcg_gen_shri_i64(cpu_tmp0, cpu_tmp0, 32);
1d1cc4d0 1716 tcg_gen_shri_i64(cpu_T0, cpu_T0, 32);
f437d0a3
RH
1717 }
1718 break;
1719#endif
1720 default:
1721 tcg_gen_subi_tl(cpu_tmp0, count, 1);
1722 if (is_right) {
1d1cc4d0 1723 tcg_gen_shr_tl(cpu_tmp0, cpu_T0, cpu_tmp0);
b6abf97d 1724
f437d0a3 1725 tcg_gen_subfi_tl(cpu_tmp4, mask + 1, count);
1d1cc4d0
RH
1726 tcg_gen_shr_tl(cpu_T0, cpu_T0, count);
1727 tcg_gen_shl_tl(cpu_T1, cpu_T1, cpu_tmp4);
b6abf97d 1728 } else {
1d1cc4d0 1729 tcg_gen_shl_tl(cpu_tmp0, cpu_T0, cpu_tmp0);
4ba9938c 1730 if (ot == MO_16) {
f437d0a3
RH
1731 /* Only needed if count > 16, for Intel behaviour. */
1732 tcg_gen_subfi_tl(cpu_tmp4, 33, count);
1d1cc4d0 1733 tcg_gen_shr_tl(cpu_tmp4, cpu_T1, cpu_tmp4);
f437d0a3
RH
1734 tcg_gen_or_tl(cpu_tmp0, cpu_tmp0, cpu_tmp4);
1735 }
1736
1737 tcg_gen_subfi_tl(cpu_tmp4, mask + 1, count);
1d1cc4d0
RH
1738 tcg_gen_shl_tl(cpu_T0, cpu_T0, count);
1739 tcg_gen_shr_tl(cpu_T1, cpu_T1, cpu_tmp4);
b6abf97d 1740 }
f437d0a3 1741 tcg_gen_movi_tl(cpu_tmp4, 0);
1d1cc4d0
RH
1742 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_T1, count, cpu_tmp4,
1743 cpu_tmp4, cpu_T1);
1744 tcg_gen_or_tl(cpu_T0, cpu_T0, cpu_T1);
f437d0a3 1745 break;
b6abf97d 1746 }
b6abf97d 1747
b6abf97d 1748 /* store */
d4faa3e0 1749 gen_op_st_rm_T0_A0(s, ot, op1);
1e4840bf 1750
1d1cc4d0 1751 gen_shift_flags(s, ot, cpu_T0, cpu_tmp0, count, is_right);
f437d0a3 1752 tcg_temp_free(count);
b6abf97d
FB
1753}
1754
d67dc9e6 1755static void gen_shift(DisasContext *s1, int op, TCGMemOp ot, int d, int s)
b6abf97d
FB
1756{
1757 if (s != OR_TMP1)
1d1cc4d0 1758 gen_op_mov_v_reg(ot, cpu_T1, s);
b6abf97d
FB
1759 switch(op) {
1760 case OP_ROL:
1761 gen_rot_rm_T1(s1, ot, d, 0);
1762 break;
1763 case OP_ROR:
1764 gen_rot_rm_T1(s1, ot, d, 1);
1765 break;
1766 case OP_SHL:
1767 case OP_SHL1:
1768 gen_shift_rm_T1(s1, ot, d, 0, 0);
1769 break;
1770 case OP_SHR:
1771 gen_shift_rm_T1(s1, ot, d, 1, 0);
1772 break;
1773 case OP_SAR:
1774 gen_shift_rm_T1(s1, ot, d, 1, 1);
1775 break;
1776 case OP_RCL:
1777 gen_rotc_rm_T1(s1, ot, d, 0);
1778 break;
1779 case OP_RCR:
1780 gen_rotc_rm_T1(s1, ot, d, 1);
1781 break;
1782 }
2c0262af
FB
1783}
1784
d67dc9e6 1785static void gen_shifti(DisasContext *s1, int op, TCGMemOp ot, int d, int c)
2c0262af 1786{
c1c37968 1787 switch(op) {
8cd6345d 1788 case OP_ROL:
1789 gen_rot_rm_im(s1, ot, d, c, 0);
1790 break;
1791 case OP_ROR:
1792 gen_rot_rm_im(s1, ot, d, c, 1);
1793 break;
c1c37968
FB
1794 case OP_SHL:
1795 case OP_SHL1:
1796 gen_shift_rm_im(s1, ot, d, c, 0, 0);
1797 break;
1798 case OP_SHR:
1799 gen_shift_rm_im(s1, ot, d, c, 1, 0);
1800 break;
1801 case OP_SAR:
1802 gen_shift_rm_im(s1, ot, d, c, 1, 1);
1803 break;
1804 default:
1805 /* currently not optimized */
1d1cc4d0 1806 tcg_gen_movi_tl(cpu_T1, c);
c1c37968
FB
1807 gen_shift(s1, op, ot, d, OR_TMP1);
1808 break;
1809 }
2c0262af
FB
1810}
1811
a074ce42
RH
1812/* Decompose an address. */
1813
1814typedef struct AddressParts {
1815 int def_seg;
1816 int base;
1817 int index;
1818 int scale;
1819 target_long disp;
1820} AddressParts;
1821
1822static AddressParts gen_lea_modrm_0(CPUX86State *env, DisasContext *s,
1823 int modrm)
2c0262af 1824{
a074ce42 1825 int def_seg, base, index, scale, mod, rm;
14ce26e7 1826 target_long disp;
a074ce42 1827 bool havesib;
2c0262af 1828
d6a29149 1829 def_seg = R_DS;
a074ce42
RH
1830 index = -1;
1831 scale = 0;
1832 disp = 0;
1833
2c0262af
FB
1834 mod = (modrm >> 6) & 3;
1835 rm = modrm & 7;
a074ce42
RH
1836 base = rm | REX_B(s);
1837
1838 if (mod == 3) {
1839 /* Normally filtered out earlier, but including this path
1840 simplifies multi-byte nop, as well as bndcl, bndcu, bndcn. */
1841 goto done;
1842 }
2c0262af 1843
1d71ddb1
RH
1844 switch (s->aflag) {
1845 case MO_64:
1846 case MO_32:
2c0262af 1847 havesib = 0;
a074ce42
RH
1848 if (rm == 4) {
1849 int code = cpu_ldub_code(env, s->pc++);
2c0262af 1850 scale = (code >> 6) & 3;
14ce26e7 1851 index = ((code >> 3) & 7) | REX_X(s);
7865eec4
RH
1852 if (index == 4) {
1853 index = -1; /* no index */
1854 }
a074ce42
RH
1855 base = (code & 7) | REX_B(s);
1856 havesib = 1;
2c0262af
FB
1857 }
1858
1859 switch (mod) {
1860 case 0:
14ce26e7 1861 if ((base & 7) == 5) {
2c0262af 1862 base = -1;
0af10c86 1863 disp = (int32_t)cpu_ldl_code(env, s->pc);
2c0262af 1864 s->pc += 4;
14ce26e7 1865 if (CODE64(s) && !havesib) {
a074ce42 1866 base = -2;
14ce26e7
FB
1867 disp += s->pc + s->rip_offset;
1868 }
2c0262af
FB
1869 }
1870 break;
1871 case 1:
0af10c86 1872 disp = (int8_t)cpu_ldub_code(env, s->pc++);
2c0262af
FB
1873 break;
1874 default:
1875 case 2:
0af10c86 1876 disp = (int32_t)cpu_ldl_code(env, s->pc);
2c0262af
FB
1877 s->pc += 4;
1878 break;
1879 }
3b46e624 1880
7865eec4
RH
1881 /* For correct popl handling with esp. */
1882 if (base == R_ESP && s->popl_esp_hack) {
1883 disp += s->popl_esp_hack;
1884 }
d6a29149
RH
1885 if (base == R_EBP || base == R_ESP) {
1886 def_seg = R_SS;
2c0262af 1887 }
1d71ddb1
RH
1888 break;
1889
1890 case MO_16:
d6a29149 1891 if (mod == 0) {
2c0262af 1892 if (rm == 6) {
a074ce42 1893 base = -1;
0af10c86 1894 disp = cpu_lduw_code(env, s->pc);
2c0262af 1895 s->pc += 2;
d6a29149 1896 break;
2c0262af 1897 }
d6a29149 1898 } else if (mod == 1) {
0af10c86 1899 disp = (int8_t)cpu_ldub_code(env, s->pc++);
d6a29149 1900 } else {
7effd625 1901 disp = (int16_t)cpu_lduw_code(env, s->pc);
2c0262af 1902 s->pc += 2;
2c0262af 1903 }
7effd625 1904
7effd625 1905 switch (rm) {
2c0262af 1906 case 0:
a074ce42
RH
1907 base = R_EBX;
1908 index = R_ESI;
2c0262af
FB
1909 break;
1910 case 1:
a074ce42
RH
1911 base = R_EBX;
1912 index = R_EDI;
2c0262af
FB
1913 break;
1914 case 2:
a074ce42
RH
1915 base = R_EBP;
1916 index = R_ESI;
d6a29149 1917 def_seg = R_SS;
2c0262af
FB
1918 break;
1919 case 3:
a074ce42
RH
1920 base = R_EBP;
1921 index = R_EDI;
d6a29149 1922 def_seg = R_SS;
2c0262af
FB
1923 break;
1924 case 4:
a074ce42 1925 base = R_ESI;
2c0262af
FB
1926 break;
1927 case 5:
a074ce42 1928 base = R_EDI;
2c0262af
FB
1929 break;
1930 case 6:
a074ce42 1931 base = R_EBP;
d6a29149 1932 def_seg = R_SS;
2c0262af
FB
1933 break;
1934 default:
1935 case 7:
a074ce42 1936 base = R_EBX;
2c0262af
FB
1937 break;
1938 }
1d71ddb1
RH
1939 break;
1940
1941 default:
1942 tcg_abort();
2c0262af 1943 }
d6a29149 1944
a074ce42
RH
1945 done:
1946 return (AddressParts){ def_seg, base, index, scale, disp };
2c0262af
FB
1947}
1948
a074ce42
RH
1949/* Compute the address, with a minimum number of TCG ops. */
1950static TCGv gen_lea_modrm_1(AddressParts a)
e17a36ce 1951{
a074ce42 1952 TCGv ea;
3b46e624 1953
a074ce42
RH
1954 TCGV_UNUSED(ea);
1955 if (a.index >= 0) {
1956 if (a.scale == 0) {
1957 ea = cpu_regs[a.index];
1958 } else {
1959 tcg_gen_shli_tl(cpu_A0, cpu_regs[a.index], a.scale);
1960 ea = cpu_A0;
e17a36ce 1961 }
a074ce42
RH
1962 if (a.base >= 0) {
1963 tcg_gen_add_tl(cpu_A0, ea, cpu_regs[a.base]);
1964 ea = cpu_A0;
e17a36ce 1965 }
a074ce42
RH
1966 } else if (a.base >= 0) {
1967 ea = cpu_regs[a.base];
1968 }
1969 if (TCGV_IS_UNUSED(ea)) {
1970 tcg_gen_movi_tl(cpu_A0, a.disp);
1971 ea = cpu_A0;
1972 } else if (a.disp != 0) {
1973 tcg_gen_addi_tl(cpu_A0, ea, a.disp);
1974 ea = cpu_A0;
1975 }
1d71ddb1 1976
a074ce42
RH
1977 return ea;
1978}
1d71ddb1 1979
a074ce42
RH
1980static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm)
1981{
1982 AddressParts a = gen_lea_modrm_0(env, s, modrm);
1983 TCGv ea = gen_lea_modrm_1(a);
1984 gen_lea_v_seg(s, s->aflag, ea, a.def_seg, s->override);
1985}
1986
1987static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm)
1988{
1989 (void)gen_lea_modrm_0(env, s, modrm);
e17a36ce
FB
1990}
1991
523e28d7
RH
1992/* Used for BNDCL, BNDCU, BNDCN. */
1993static void gen_bndck(CPUX86State *env, DisasContext *s, int modrm,
1994 TCGCond cond, TCGv_i64 bndv)
1995{
1996 TCGv ea = gen_lea_modrm_1(gen_lea_modrm_0(env, s, modrm));
1997
1998 tcg_gen_extu_tl_i64(cpu_tmp1_i64, ea);
1999 if (!CODE64(s)) {
2000 tcg_gen_ext32u_i64(cpu_tmp1_i64, cpu_tmp1_i64);
2001 }
2002 tcg_gen_setcond_i64(cond, cpu_tmp1_i64, cpu_tmp1_i64, bndv);
2003 tcg_gen_extrl_i64_i32(cpu_tmp2_i32, cpu_tmp1_i64);
2004 gen_helper_bndck(cpu_env, cpu_tmp2_i32);
2005}
2006
664e0f19
FB
2007/* used for LEA and MOV AX, mem */
2008static void gen_add_A0_ds_seg(DisasContext *s)
2009{
77ebcad0 2010 gen_lea_v_seg(s, s->aflag, cpu_A0, R_DS, s->override);
664e0f19
FB
2011}
2012
222a3336 2013/* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2c0262af 2014 OR_TMP0 */
0af10c86 2015static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm,
d67dc9e6 2016 TCGMemOp ot, int reg, int is_store)
2c0262af 2017{
4eeb3939 2018 int mod, rm;
2c0262af
FB
2019
2020 mod = (modrm >> 6) & 3;
14ce26e7 2021 rm = (modrm & 7) | REX_B(s);
2c0262af
FB
2022 if (mod == 3) {
2023 if (is_store) {
2024 if (reg != OR_TMP0)
1d1cc4d0
RH
2025 gen_op_mov_v_reg(ot, cpu_T0, reg);
2026 gen_op_mov_reg_v(ot, rm, cpu_T0);
2c0262af 2027 } else {
1d1cc4d0 2028 gen_op_mov_v_reg(ot, cpu_T0, rm);
2c0262af 2029 if (reg != OR_TMP0)
1d1cc4d0 2030 gen_op_mov_reg_v(ot, reg, cpu_T0);
2c0262af
FB
2031 }
2032 } else {
4eeb3939 2033 gen_lea_modrm(env, s, modrm);
2c0262af
FB
2034 if (is_store) {
2035 if (reg != OR_TMP0)
1d1cc4d0
RH
2036 gen_op_mov_v_reg(ot, cpu_T0, reg);
2037 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
2c0262af 2038 } else {
1d1cc4d0 2039 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
2c0262af 2040 if (reg != OR_TMP0)
1d1cc4d0 2041 gen_op_mov_reg_v(ot, reg, cpu_T0);
2c0262af
FB
2042 }
2043 }
2044}
2045
d67dc9e6 2046static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, TCGMemOp ot)
2c0262af
FB
2047{
2048 uint32_t ret;
2049
d67dc9e6 2050 switch (ot) {
4ba9938c 2051 case MO_8:
0af10c86 2052 ret = cpu_ldub_code(env, s->pc);
2c0262af
FB
2053 s->pc++;
2054 break;
4ba9938c 2055 case MO_16:
0af10c86 2056 ret = cpu_lduw_code(env, s->pc);
2c0262af
FB
2057 s->pc += 2;
2058 break;
4ba9938c 2059 case MO_32:
d67dc9e6
RH
2060#ifdef TARGET_X86_64
2061 case MO_64:
2062#endif
0af10c86 2063 ret = cpu_ldl_code(env, s->pc);
2c0262af
FB
2064 s->pc += 4;
2065 break;
d67dc9e6
RH
2066 default:
2067 tcg_abort();
2c0262af
FB
2068 }
2069 return ret;
2070}
2071
d67dc9e6 2072static inline int insn_const_size(TCGMemOp ot)
14ce26e7 2073{
4ba9938c 2074 if (ot <= MO_32) {
14ce26e7 2075 return 1 << ot;
4ba9938c 2076 } else {
14ce26e7 2077 return 4;
4ba9938c 2078 }
14ce26e7
FB
2079}
2080
6e256c93
FB
2081static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong eip)
2082{
2083 TranslationBlock *tb;
2084 target_ulong pc;
2085
2086 pc = s->cs_base + eip;
2087 tb = s->tb;
2088 /* NOTE: we handle the case where the TB spans two pages here */
2089 if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) ||
2090 (pc & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK)) {
2091 /* jump to same page: we can use a direct jump */
57fec1fe 2092 tcg_gen_goto_tb(tb_num);
6e256c93 2093 gen_jmp_im(eip);
8cfd0495 2094 tcg_gen_exit_tb((uintptr_t)tb + tb_num);
6e256c93
FB
2095 } else {
2096 /* jump to another page: currently not optimized */
2097 gen_jmp_im(eip);
2098 gen_eob(s);
2099 }
2100}
2101
5fafdf24 2102static inline void gen_jcc(DisasContext *s, int b,
14ce26e7 2103 target_ulong val, target_ulong next_eip)
2c0262af 2104{
42a268c2 2105 TCGLabel *l1, *l2;
3b46e624 2106
2c0262af 2107 if (s->jmp_opt) {
14ce26e7 2108 l1 = gen_new_label();
b27fc131 2109 gen_jcc1(s, b, l1);
dc259201 2110
6e256c93 2111 gen_goto_tb(s, 0, next_eip);
14ce26e7
FB
2112
2113 gen_set_label(l1);
6e256c93 2114 gen_goto_tb(s, 1, val);
5779406a 2115 s->is_jmp = DISAS_TB_JUMP;
2c0262af 2116 } else {
14ce26e7
FB
2117 l1 = gen_new_label();
2118 l2 = gen_new_label();
b27fc131 2119 gen_jcc1(s, b, l1);
8e1c85e3 2120
14ce26e7 2121 gen_jmp_im(next_eip);
8e1c85e3
FB
2122 tcg_gen_br(l2);
2123
14ce26e7
FB
2124 gen_set_label(l1);
2125 gen_jmp_im(val);
2126 gen_set_label(l2);
2c0262af
FB
2127 gen_eob(s);
2128 }
2129}
2130
d67dc9e6 2131static void gen_cmovcc1(CPUX86State *env, DisasContext *s, TCGMemOp ot, int b,
f32d3781
PB
2132 int modrm, int reg)
2133{
57eb0cc8 2134 CCPrepare cc;
f32d3781 2135
57eb0cc8 2136 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
f32d3781 2137
1d1cc4d0 2138 cc = gen_prepare_cc(s, b, cpu_T1);
57eb0cc8
RH
2139 if (cc.mask != -1) {
2140 TCGv t0 = tcg_temp_new();
2141 tcg_gen_andi_tl(t0, cc.reg, cc.mask);
2142 cc.reg = t0;
2143 }
2144 if (!cc.use_reg2) {
2145 cc.reg2 = tcg_const_tl(cc.imm);
f32d3781
PB
2146 }
2147
1d1cc4d0
RH
2148 tcg_gen_movcond_tl(cc.cond, cpu_T0, cc.reg, cc.reg2,
2149 cpu_T0, cpu_regs[reg]);
2150 gen_op_mov_reg_v(ot, reg, cpu_T0);
57eb0cc8
RH
2151
2152 if (cc.mask != -1) {
2153 tcg_temp_free(cc.reg);
2154 }
2155 if (!cc.use_reg2) {
2156 tcg_temp_free(cc.reg2);
2157 }
f32d3781
PB
2158}
2159
3bd7da9e
FB
2160static inline void gen_op_movl_T0_seg(int seg_reg)
2161{
1d1cc4d0 2162 tcg_gen_ld32u_tl(cpu_T0, cpu_env,
3bd7da9e
FB
2163 offsetof(CPUX86State,segs[seg_reg].selector));
2164}
2165
2166static inline void gen_op_movl_seg_T0_vm(int seg_reg)
2167{
1d1cc4d0
RH
2168 tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
2169 tcg_gen_st32_tl(cpu_T0, cpu_env,
3bd7da9e 2170 offsetof(CPUX86State,segs[seg_reg].selector));
1d1cc4d0 2171 tcg_gen_shli_tl(cpu_seg_base[seg_reg], cpu_T0, 4);
3bd7da9e
FB
2172}
2173
2c0262af
FB
2174/* move T0 to seg_reg and compute if the CPU state may change. Never
2175 call this function with seg_reg == R_CS */
100ec099 2176static void gen_movl_seg_T0(DisasContext *s, int seg_reg)
2c0262af 2177{
3415a4dd 2178 if (s->pe && !s->vm86) {
1d1cc4d0 2179 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
2999a0b2 2180 gen_helper_load_seg(cpu_env, tcg_const_i32(seg_reg), cpu_tmp2_i32);
dc196a57
FB
2181 /* abort translation because the addseg value may change or
2182 because ss32 may change. For R_SS, translation must always
2183 stop as a special handling must be done to disable hardware
2184 interrupts for the next instruction */
2185 if (seg_reg == R_SS || (s->code32 && seg_reg < R_FS))
5779406a 2186 s->is_jmp = DISAS_TB_JUMP;
3415a4dd 2187 } else {
3bd7da9e 2188 gen_op_movl_seg_T0_vm(seg_reg);
dc196a57 2189 if (seg_reg == R_SS)
5779406a 2190 s->is_jmp = DISAS_TB_JUMP;
3415a4dd 2191 }
2c0262af
FB
2192}
2193
0573fbfc
TS
2194static inline int svm_is_rep(int prefixes)
2195{
2196 return ((prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) ? 8 : 0);
2197}
2198
872929aa 2199static inline void
0573fbfc 2200gen_svm_check_intercept_param(DisasContext *s, target_ulong pc_start,
b8b6a50b 2201 uint32_t type, uint64_t param)
0573fbfc 2202{
872929aa
FB
2203 /* no SVM activated; fast case */
2204 if (likely(!(s->flags & HF_SVMI_MASK)))
2205 return;
773cdfcc 2206 gen_update_cc_op(s);
872929aa 2207 gen_jmp_im(pc_start - s->cs_base);
052e80d5 2208 gen_helper_svm_check_intercept_param(cpu_env, tcg_const_i32(type),
a7812ae4 2209 tcg_const_i64(param));
0573fbfc
TS
2210}
2211
872929aa 2212static inline void
0573fbfc
TS
2213gen_svm_check_intercept(DisasContext *s, target_ulong pc_start, uint64_t type)
2214{
872929aa 2215 gen_svm_check_intercept_param(s, pc_start, type, 0);
0573fbfc
TS
2216}
2217
4f31916f
FB
2218static inline void gen_stack_update(DisasContext *s, int addend)
2219{
64ae256c 2220 gen_op_add_reg_im(mo_stacksize(s), R_ESP, addend);
4f31916f
FB
2221}
2222
432baffe
RH
2223/* Generate a push. It depends on ss32, addseg and dflag. */
2224static void gen_push_v(DisasContext *s, TCGv val)
2c0262af 2225{
64ae256c
RH
2226 TCGMemOp d_ot = mo_pushpop(s, s->dflag);
2227 TCGMemOp a_ot = mo_stacksize(s);
432baffe
RH
2228 int size = 1 << d_ot;
2229 TCGv new_esp = cpu_A0;
2230
2231 tcg_gen_subi_tl(cpu_A0, cpu_regs[R_ESP], size);
2c0262af 2232
77ebcad0 2233 if (!CODE64(s)) {
432baffe
RH
2234 if (s->addseg) {
2235 new_esp = cpu_tmp4;
2236 tcg_gen_mov_tl(new_esp, cpu_A0);
2c0262af 2237 }
77ebcad0 2238 gen_lea_v_seg(s, a_ot, cpu_A0, R_SS, -1);
2c0262af 2239 }
432baffe
RH
2240
2241 gen_op_st_v(s, d_ot, val, cpu_A0);
2242 gen_op_mov_reg_v(a_ot, R_ESP, new_esp);
2c0262af
FB
2243}
2244
4f31916f 2245/* two step pop is necessary for precise exceptions */
8e31d234 2246static TCGMemOp gen_pop_T0(DisasContext *s)
2c0262af 2247{
8e31d234 2248 TCGMemOp d_ot = mo_pushpop(s, s->dflag);
8e31d234 2249
77ebcad0 2250 gen_lea_v_seg(s, mo_stacksize(s), cpu_regs[R_ESP], R_SS, -1);
1d1cc4d0 2251 gen_op_ld_v(s, d_ot, cpu_T0, cpu_A0);
8e31d234 2252
8e31d234 2253 return d_ot;
2c0262af
FB
2254}
2255
77ebcad0 2256static inline void gen_pop_update(DisasContext *s, TCGMemOp ot)
2c0262af 2257{
8e31d234 2258 gen_stack_update(s, 1 << ot);
2c0262af
FB
2259}
2260
77ebcad0 2261static inline void gen_stack_A0(DisasContext *s)
2c0262af 2262{
77ebcad0 2263 gen_lea_v_seg(s, s->ss32 ? MO_32 : MO_16, cpu_regs[R_ESP], R_SS, -1);
2c0262af
FB
2264}
2265
2c0262af
FB
2266static void gen_pusha(DisasContext *s)
2267{
d37ea0c0
RH
2268 TCGMemOp s_ot = s->ss32 ? MO_32 : MO_16;
2269 TCGMemOp d_ot = s->dflag;
2270 int size = 1 << d_ot;
2c0262af 2271 int i;
d37ea0c0
RH
2272
2273 for (i = 0; i < 8; i++) {
2274 tcg_gen_addi_tl(cpu_A0, cpu_regs[R_ESP], (i - 8) * size);
2275 gen_lea_v_seg(s, s_ot, cpu_A0, R_SS, -1);
2276 gen_op_st_v(s, d_ot, cpu_regs[7 - i], cpu_A0);
2277 }
2278
2279 gen_stack_update(s, -8 * size);
2280}
2281
2c0262af
FB
2282static void gen_popa(DisasContext *s)
2283{
d37ea0c0
RH
2284 TCGMemOp s_ot = s->ss32 ? MO_32 : MO_16;
2285 TCGMemOp d_ot = s->dflag;
2286 int size = 1 << d_ot;
2c0262af 2287 int i;
d37ea0c0
RH
2288
2289 for (i = 0; i < 8; i++) {
2c0262af 2290 /* ESP is not reloaded */
d37ea0c0
RH
2291 if (7 - i == R_ESP) {
2292 continue;
2c0262af 2293 }
d37ea0c0
RH
2294 tcg_gen_addi_tl(cpu_A0, cpu_regs[R_ESP], i * size);
2295 gen_lea_v_seg(s, s_ot, cpu_A0, R_SS, -1);
1d1cc4d0
RH
2296 gen_op_ld_v(s, d_ot, cpu_T0, cpu_A0);
2297 gen_op_mov_reg_v(d_ot, 7 - i, cpu_T0);
2c0262af 2298 }
d37ea0c0
RH
2299
2300 gen_stack_update(s, 8 * size);
2c0262af
FB
2301}
2302
2c0262af
FB
2303static void gen_enter(DisasContext *s, int esp_addend, int level)
2304{
743e398e
RH
2305 TCGMemOp d_ot = mo_pushpop(s, s->dflag);
2306 TCGMemOp a_ot = CODE64(s) ? MO_64 : s->ss32 ? MO_32 : MO_16;
2307 int size = 1 << d_ot;
2c0262af 2308
743e398e 2309 /* Push BP; compute FrameTemp into T1. */
1d1cc4d0
RH
2310 tcg_gen_subi_tl(cpu_T1, cpu_regs[R_ESP], size);
2311 gen_lea_v_seg(s, a_ot, cpu_T1, R_SS, -1);
743e398e
RH
2312 gen_op_st_v(s, d_ot, cpu_regs[R_EBP], cpu_A0);
2313
2314 level &= 31;
2315 if (level != 0) {
2316 int i;
2317
2318 /* Copy level-1 pointers from the previous frame. */
2319 for (i = 1; i < level; ++i) {
2320 tcg_gen_subi_tl(cpu_A0, cpu_regs[R_EBP], size * i);
2321 gen_lea_v_seg(s, a_ot, cpu_A0, R_SS, -1);
2322 gen_op_ld_v(s, d_ot, cpu_tmp0, cpu_A0);
2323
1d1cc4d0 2324 tcg_gen_subi_tl(cpu_A0, cpu_T1, size * i);
743e398e
RH
2325 gen_lea_v_seg(s, a_ot, cpu_A0, R_SS, -1);
2326 gen_op_st_v(s, d_ot, cpu_tmp0, cpu_A0);
8f091a59 2327 }
743e398e
RH
2328
2329 /* Push the current FrameTemp as the last level. */
1d1cc4d0 2330 tcg_gen_subi_tl(cpu_A0, cpu_T1, size * level);
743e398e 2331 gen_lea_v_seg(s, a_ot, cpu_A0, R_SS, -1);
1d1cc4d0 2332 gen_op_st_v(s, d_ot, cpu_T1, cpu_A0);
2c0262af 2333 }
743e398e
RH
2334
2335 /* Copy the FrameTemp value to EBP. */
1d1cc4d0 2336 gen_op_mov_reg_v(a_ot, R_EBP, cpu_T1);
743e398e
RH
2337
2338 /* Compute the final value of ESP. */
1d1cc4d0
RH
2339 tcg_gen_subi_tl(cpu_T1, cpu_T1, esp_addend + size * level);
2340 gen_op_mov_reg_v(a_ot, R_ESP, cpu_T1);
2c0262af
FB
2341}
2342
2045f04c
RH
2343static void gen_leave(DisasContext *s)
2344{
2345 TCGMemOp d_ot = mo_pushpop(s, s->dflag);
2346 TCGMemOp a_ot = mo_stacksize(s);
2347
2348 gen_lea_v_seg(s, a_ot, cpu_regs[R_EBP], R_SS, -1);
1d1cc4d0 2349 gen_op_ld_v(s, d_ot, cpu_T0, cpu_A0);
2045f04c 2350
1d1cc4d0 2351 tcg_gen_addi_tl(cpu_T1, cpu_regs[R_EBP], 1 << d_ot);
2045f04c 2352
1d1cc4d0
RH
2353 gen_op_mov_reg_v(d_ot, R_EBP, cpu_T0);
2354 gen_op_mov_reg_v(a_ot, R_ESP, cpu_T1);
2045f04c
RH
2355}
2356
14ce26e7 2357static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip)
2c0262af 2358{
773cdfcc 2359 gen_update_cc_op(s);
14ce26e7 2360 gen_jmp_im(cur_eip);
77b2bc2c 2361 gen_helper_raise_exception(cpu_env, tcg_const_i32(trapno));
5779406a 2362 s->is_jmp = DISAS_TB_JUMP;
2c0262af
FB
2363}
2364
2365/* an interrupt is different from an exception because of the
7f75ffd3 2366 privilege checks */
5fafdf24 2367static void gen_interrupt(DisasContext *s, int intno,
14ce26e7 2368 target_ulong cur_eip, target_ulong next_eip)
2c0262af 2369{
773cdfcc 2370 gen_update_cc_op(s);
14ce26e7 2371 gen_jmp_im(cur_eip);
77b2bc2c 2372 gen_helper_raise_interrupt(cpu_env, tcg_const_i32(intno),
a7812ae4 2373 tcg_const_i32(next_eip - cur_eip));
5779406a 2374 s->is_jmp = DISAS_TB_JUMP;
2c0262af
FB
2375}
2376
14ce26e7 2377static void gen_debug(DisasContext *s, target_ulong cur_eip)
2c0262af 2378{
773cdfcc 2379 gen_update_cc_op(s);
14ce26e7 2380 gen_jmp_im(cur_eip);
4a7443be 2381 gen_helper_debug(cpu_env);
5779406a 2382 s->is_jmp = DISAS_TB_JUMP;
2c0262af
FB
2383}
2384
7f0b7141
RH
2385static void gen_set_hflag(DisasContext *s, uint32_t mask)
2386{
2387 if ((s->flags & mask) == 0) {
2388 TCGv_i32 t = tcg_temp_new_i32();
2389 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUX86State, hflags));
2390 tcg_gen_ori_i32(t, t, mask);
2391 tcg_gen_st_i32(t, cpu_env, offsetof(CPUX86State, hflags));
2392 tcg_temp_free_i32(t);
2393 s->flags |= mask;
2394 }
2395}
2396
2397static void gen_reset_hflag(DisasContext *s, uint32_t mask)
2398{
2399 if (s->flags & mask) {
2400 TCGv_i32 t = tcg_temp_new_i32();
2401 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUX86State, hflags));
2402 tcg_gen_andi_i32(t, t, ~mask);
2403 tcg_gen_st_i32(t, cpu_env, offsetof(CPUX86State, hflags));
2404 tcg_temp_free_i32(t);
2405 s->flags &= ~mask;
2406 }
2407}
2408
7d117ce8
RH
2409/* Clear BND registers during legacy branches. */
2410static void gen_bnd_jmp(DisasContext *s)
2411{
2412 /* Do nothing if BND prefix present, MPX is disabled, or if the
2413 BNDREGs are known to be in INIT state already. The helper
2414 itself will check BNDPRESERVE at runtime. */
2415 if ((s->prefix & PREFIX_REPNZ) == 0
2416 && (s->flags & HF_MPX_EN_MASK) == 0
2417 && (s->flags & HF_MPX_IU_MASK) == 0) {
2418 gen_helper_bnd_jmp(cpu_env);
2419 }
2420}
2421
2c0262af
FB
2422/* generate a generic end of block. Trace exception is also generated
2423 if needed */
2424static void gen_eob(DisasContext *s)
2425{
773cdfcc 2426 gen_update_cc_op(s);
7f0b7141 2427 gen_reset_hflag(s, HF_INHIBIT_IRQ_MASK);
a2397807 2428 if (s->tb->flags & HF_RF_MASK) {
f0967a1a 2429 gen_helper_reset_rf(cpu_env);
a2397807 2430 }
34865134 2431 if (s->singlestep_enabled) {
4a7443be 2432 gen_helper_debug(cpu_env);
34865134 2433 } else if (s->tf) {
4a7443be 2434 gen_helper_single_step(cpu_env);
2c0262af 2435 } else {
57fec1fe 2436 tcg_gen_exit_tb(0);
2c0262af 2437 }
5779406a 2438 s->is_jmp = DISAS_TB_JUMP;
2c0262af
FB
2439}
2440
2441/* generate a jump to eip. No segment change must happen before as a
2442 direct call to the next block may occur */
14ce26e7 2443static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num)
2c0262af 2444{
a3251186
RH
2445 gen_update_cc_op(s);
2446 set_cc_op(s, CC_OP_DYNAMIC);
2c0262af 2447 if (s->jmp_opt) {
6e256c93 2448 gen_goto_tb(s, tb_num, eip);
5779406a 2449 s->is_jmp = DISAS_TB_JUMP;
2c0262af 2450 } else {
14ce26e7 2451 gen_jmp_im(eip);
2c0262af
FB
2452 gen_eob(s);
2453 }
2454}
2455
14ce26e7
FB
2456static void gen_jmp(DisasContext *s, target_ulong eip)
2457{
2458 gen_jmp_tb(s, eip, 0);
2459}
2460
323d1876 2461static inline void gen_ldq_env_A0(DisasContext *s, int offset)
8686c490 2462{
3c5f4116 2463 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
b6abf97d 2464 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset);
8686c490 2465}
664e0f19 2466
323d1876 2467static inline void gen_stq_env_A0(DisasContext *s, int offset)
8686c490 2468{
b6abf97d 2469 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset);
3523e4bd 2470 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
8686c490 2471}
664e0f19 2472
323d1876 2473static inline void gen_ldo_env_A0(DisasContext *s, int offset)
8686c490 2474{
5c42a7cd 2475 int mem_index = s->mem_index;
3c5f4116 2476 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, mem_index, MO_LEQ);
19cbd87c 2477 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(0)));
8686c490 2478 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
3c5f4116 2479 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_tmp0, mem_index, MO_LEQ);
19cbd87c 2480 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(1)));
8686c490 2481}
14ce26e7 2482
323d1876 2483static inline void gen_sto_env_A0(DisasContext *s, int offset)
8686c490 2484{
5c42a7cd 2485 int mem_index = s->mem_index;
19cbd87c 2486 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(0)));
3523e4bd 2487 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, mem_index, MO_LEQ);
8686c490 2488 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
19cbd87c 2489 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(1)));
3523e4bd 2490 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_tmp0, mem_index, MO_LEQ);
8686c490 2491}
14ce26e7 2492
5af45186
FB
2493static inline void gen_op_movo(int d_offset, int s_offset)
2494{
19cbd87c
EH
2495 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + offsetof(ZMMReg, ZMM_Q(0)));
2496 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + offsetof(ZMMReg, ZMM_Q(0)));
2497 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + offsetof(ZMMReg, ZMM_Q(1)));
2498 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + offsetof(ZMMReg, ZMM_Q(1)));
5af45186
FB
2499}
2500
2501static inline void gen_op_movq(int d_offset, int s_offset)
2502{
b6abf97d
FB
2503 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2504 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
5af45186
FB
2505}
2506
2507static inline void gen_op_movl(int d_offset, int s_offset)
2508{
b6abf97d
FB
2509 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env, s_offset);
2510 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, d_offset);
5af45186
FB
2511}
2512
2513static inline void gen_op_movq_env_0(int d_offset)
2514{
b6abf97d
FB
2515 tcg_gen_movi_i64(cpu_tmp1_i64, 0);
2516 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
5af45186 2517}
664e0f19 2518
d3eb5eae
BS
2519typedef void (*SSEFunc_i_ep)(TCGv_i32 val, TCGv_ptr env, TCGv_ptr reg);
2520typedef void (*SSEFunc_l_ep)(TCGv_i64 val, TCGv_ptr env, TCGv_ptr reg);
2521typedef void (*SSEFunc_0_epi)(TCGv_ptr env, TCGv_ptr reg, TCGv_i32 val);
2522typedef void (*SSEFunc_0_epl)(TCGv_ptr env, TCGv_ptr reg, TCGv_i64 val);
2523typedef void (*SSEFunc_0_epp)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b);
2524typedef void (*SSEFunc_0_eppi)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
2525 TCGv_i32 val);
c4baa050 2526typedef void (*SSEFunc_0_ppi)(TCGv_ptr reg_a, TCGv_ptr reg_b, TCGv_i32 val);
d3eb5eae
BS
2527typedef void (*SSEFunc_0_eppt)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
2528 TCGv val);
c4baa050 2529
5af45186
FB
2530#define SSE_SPECIAL ((void *)1)
2531#define SSE_DUMMY ((void *)2)
664e0f19 2532
a7812ae4
PB
2533#define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
2534#define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
2535 gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
5af45186 2536
d3eb5eae 2537static const SSEFunc_0_epp sse_op_table1[256][4] = {
a35f3ec7
AJ
2538 /* 3DNow! extensions */
2539 [0x0e] = { SSE_DUMMY }, /* femms */
2540 [0x0f] = { SSE_DUMMY }, /* pf... */
664e0f19
FB
2541 /* pure SSE operations */
2542 [0x10] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2543 [0x11] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
465e9838 2544 [0x12] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd, movsldup, movddup */
664e0f19 2545 [0x13] = { SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd */
a7812ae4
PB
2546 [0x14] = { gen_helper_punpckldq_xmm, gen_helper_punpcklqdq_xmm },
2547 [0x15] = { gen_helper_punpckhdq_xmm, gen_helper_punpckhqdq_xmm },
664e0f19
FB
2548 [0x16] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd, movshdup */
2549 [0x17] = { SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd */
2550
2551 [0x28] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2552 [0x29] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2553 [0x2a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
d9f4bb27 2554 [0x2b] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movntps, movntpd, movntss, movntsd */
664e0f19
FB
2555 [0x2c] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
2556 [0x2d] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
a7812ae4
PB
2557 [0x2e] = { gen_helper_ucomiss, gen_helper_ucomisd },
2558 [0x2f] = { gen_helper_comiss, gen_helper_comisd },
664e0f19
FB
2559 [0x50] = { SSE_SPECIAL, SSE_SPECIAL }, /* movmskps, movmskpd */
2560 [0x51] = SSE_FOP(sqrt),
a7812ae4
PB
2561 [0x52] = { gen_helper_rsqrtps, NULL, gen_helper_rsqrtss, NULL },
2562 [0x53] = { gen_helper_rcpps, NULL, gen_helper_rcpss, NULL },
2563 [0x54] = { gen_helper_pand_xmm, gen_helper_pand_xmm }, /* andps, andpd */
2564 [0x55] = { gen_helper_pandn_xmm, gen_helper_pandn_xmm }, /* andnps, andnpd */
2565 [0x56] = { gen_helper_por_xmm, gen_helper_por_xmm }, /* orps, orpd */
2566 [0x57] = { gen_helper_pxor_xmm, gen_helper_pxor_xmm }, /* xorps, xorpd */
664e0f19
FB
2567 [0x58] = SSE_FOP(add),
2568 [0x59] = SSE_FOP(mul),
a7812ae4
PB
2569 [0x5a] = { gen_helper_cvtps2pd, gen_helper_cvtpd2ps,
2570 gen_helper_cvtss2sd, gen_helper_cvtsd2ss },
2571 [0x5b] = { gen_helper_cvtdq2ps, gen_helper_cvtps2dq, gen_helper_cvttps2dq },
664e0f19
FB
2572 [0x5c] = SSE_FOP(sub),
2573 [0x5d] = SSE_FOP(min),
2574 [0x5e] = SSE_FOP(div),
2575 [0x5f] = SSE_FOP(max),
2576
2577 [0xc2] = SSE_FOP(cmpeq),
d3eb5eae
BS
2578 [0xc6] = { (SSEFunc_0_epp)gen_helper_shufps,
2579 (SSEFunc_0_epp)gen_helper_shufpd }, /* XXX: casts */
664e0f19 2580
7073fbad
RH
2581 /* SSSE3, SSE4, MOVBE, CRC32, BMI1, BMI2, ADX. */
2582 [0x38] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2583 [0x3a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
4242b1bd 2584
664e0f19
FB
2585 /* MMX ops and their SSE extensions */
2586 [0x60] = MMX_OP2(punpcklbw),
2587 [0x61] = MMX_OP2(punpcklwd),
2588 [0x62] = MMX_OP2(punpckldq),
2589 [0x63] = MMX_OP2(packsswb),
2590 [0x64] = MMX_OP2(pcmpgtb),
2591 [0x65] = MMX_OP2(pcmpgtw),
2592 [0x66] = MMX_OP2(pcmpgtl),
2593 [0x67] = MMX_OP2(packuswb),
2594 [0x68] = MMX_OP2(punpckhbw),
2595 [0x69] = MMX_OP2(punpckhwd),
2596 [0x6a] = MMX_OP2(punpckhdq),
2597 [0x6b] = MMX_OP2(packssdw),
a7812ae4
PB
2598 [0x6c] = { NULL, gen_helper_punpcklqdq_xmm },
2599 [0x6d] = { NULL, gen_helper_punpckhqdq_xmm },
664e0f19
FB
2600 [0x6e] = { SSE_SPECIAL, SSE_SPECIAL }, /* movd mm, ea */
2601 [0x6f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, , movqdu */
d3eb5eae
BS
2602 [0x70] = { (SSEFunc_0_epp)gen_helper_pshufw_mmx,
2603 (SSEFunc_0_epp)gen_helper_pshufd_xmm,
2604 (SSEFunc_0_epp)gen_helper_pshufhw_xmm,
2605 (SSEFunc_0_epp)gen_helper_pshuflw_xmm }, /* XXX: casts */
664e0f19
FB
2606 [0x71] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftw */
2607 [0x72] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftd */
2608 [0x73] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftq */
2609 [0x74] = MMX_OP2(pcmpeqb),
2610 [0x75] = MMX_OP2(pcmpeqw),
2611 [0x76] = MMX_OP2(pcmpeql),
a35f3ec7 2612 [0x77] = { SSE_DUMMY }, /* emms */
d9f4bb27
AP
2613 [0x78] = { NULL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* extrq_i, insertq_i */
2614 [0x79] = { NULL, gen_helper_extrq_r, NULL, gen_helper_insertq_r },
a7812ae4
PB
2615 [0x7c] = { NULL, gen_helper_haddpd, NULL, gen_helper_haddps },
2616 [0x7d] = { NULL, gen_helper_hsubpd, NULL, gen_helper_hsubps },
664e0f19
FB
2617 [0x7e] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movd, movd, , movq */
2618 [0x7f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, movdqu */
2619 [0xc4] = { SSE_SPECIAL, SSE_SPECIAL }, /* pinsrw */
2620 [0xc5] = { SSE_SPECIAL, SSE_SPECIAL }, /* pextrw */
a7812ae4 2621 [0xd0] = { NULL, gen_helper_addsubpd, NULL, gen_helper_addsubps },
664e0f19
FB
2622 [0xd1] = MMX_OP2(psrlw),
2623 [0xd2] = MMX_OP2(psrld),
2624 [0xd3] = MMX_OP2(psrlq),
2625 [0xd4] = MMX_OP2(paddq),
2626 [0xd5] = MMX_OP2(pmullw),
2627 [0xd6] = { NULL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2628 [0xd7] = { SSE_SPECIAL, SSE_SPECIAL }, /* pmovmskb */
2629 [0xd8] = MMX_OP2(psubusb),
2630 [0xd9] = MMX_OP2(psubusw),
2631 [0xda] = MMX_OP2(pminub),
2632 [0xdb] = MMX_OP2(pand),
2633 [0xdc] = MMX_OP2(paddusb),
2634 [0xdd] = MMX_OP2(paddusw),
2635 [0xde] = MMX_OP2(pmaxub),
2636 [0xdf] = MMX_OP2(pandn),
2637 [0xe0] = MMX_OP2(pavgb),
2638 [0xe1] = MMX_OP2(psraw),
2639 [0xe2] = MMX_OP2(psrad),
2640 [0xe3] = MMX_OP2(pavgw),
2641 [0xe4] = MMX_OP2(pmulhuw),
2642 [0xe5] = MMX_OP2(pmulhw),
a7812ae4 2643 [0xe6] = { NULL, gen_helper_cvttpd2dq, gen_helper_cvtdq2pd, gen_helper_cvtpd2dq },
664e0f19
FB
2644 [0xe7] = { SSE_SPECIAL , SSE_SPECIAL }, /* movntq, movntq */
2645 [0xe8] = MMX_OP2(psubsb),
2646 [0xe9] = MMX_OP2(psubsw),
2647 [0xea] = MMX_OP2(pminsw),
2648 [0xeb] = MMX_OP2(por),
2649 [0xec] = MMX_OP2(paddsb),
2650 [0xed] = MMX_OP2(paddsw),
2651 [0xee] = MMX_OP2(pmaxsw),
2652 [0xef] = MMX_OP2(pxor),
465e9838 2653 [0xf0] = { NULL, NULL, NULL, SSE_SPECIAL }, /* lddqu */
664e0f19
FB
2654 [0xf1] = MMX_OP2(psllw),
2655 [0xf2] = MMX_OP2(pslld),
2656 [0xf3] = MMX_OP2(psllq),
2657 [0xf4] = MMX_OP2(pmuludq),
2658 [0xf5] = MMX_OP2(pmaddwd),
2659 [0xf6] = MMX_OP2(psadbw),
d3eb5eae
BS
2660 [0xf7] = { (SSEFunc_0_epp)gen_helper_maskmov_mmx,
2661 (SSEFunc_0_epp)gen_helper_maskmov_xmm }, /* XXX: casts */
664e0f19
FB
2662 [0xf8] = MMX_OP2(psubb),
2663 [0xf9] = MMX_OP2(psubw),
2664 [0xfa] = MMX_OP2(psubl),
2665 [0xfb] = MMX_OP2(psubq),
2666 [0xfc] = MMX_OP2(paddb),
2667 [0xfd] = MMX_OP2(paddw),
2668 [0xfe] = MMX_OP2(paddl),
2669};
2670
d3eb5eae 2671static const SSEFunc_0_epp sse_op_table2[3 * 8][2] = {
664e0f19
FB
2672 [0 + 2] = MMX_OP2(psrlw),
2673 [0 + 4] = MMX_OP2(psraw),
2674 [0 + 6] = MMX_OP2(psllw),
2675 [8 + 2] = MMX_OP2(psrld),
2676 [8 + 4] = MMX_OP2(psrad),
2677 [8 + 6] = MMX_OP2(pslld),
2678 [16 + 2] = MMX_OP2(psrlq),
a7812ae4 2679 [16 + 3] = { NULL, gen_helper_psrldq_xmm },
664e0f19 2680 [16 + 6] = MMX_OP2(psllq),
a7812ae4 2681 [16 + 7] = { NULL, gen_helper_pslldq_xmm },
664e0f19
FB
2682};
2683
d3eb5eae 2684static const SSEFunc_0_epi sse_op_table3ai[] = {
a7812ae4 2685 gen_helper_cvtsi2ss,
11f8cdbc 2686 gen_helper_cvtsi2sd
c4baa050 2687};
a7812ae4 2688
11f8cdbc 2689#ifdef TARGET_X86_64
d3eb5eae 2690static const SSEFunc_0_epl sse_op_table3aq[] = {
11f8cdbc
SW
2691 gen_helper_cvtsq2ss,
2692 gen_helper_cvtsq2sd
2693};
2694#endif
2695
d3eb5eae 2696static const SSEFunc_i_ep sse_op_table3bi[] = {
a7812ae4 2697 gen_helper_cvttss2si,
a7812ae4 2698 gen_helper_cvtss2si,
bedc2ac1 2699 gen_helper_cvttsd2si,
11f8cdbc 2700 gen_helper_cvtsd2si
664e0f19 2701};
3b46e624 2702
11f8cdbc 2703#ifdef TARGET_X86_64
d3eb5eae 2704static const SSEFunc_l_ep sse_op_table3bq[] = {
11f8cdbc 2705 gen_helper_cvttss2sq,
11f8cdbc 2706 gen_helper_cvtss2sq,
bedc2ac1 2707 gen_helper_cvttsd2sq,
11f8cdbc
SW
2708 gen_helper_cvtsd2sq
2709};
2710#endif
2711
d3eb5eae 2712static const SSEFunc_0_epp sse_op_table4[8][4] = {
664e0f19
FB
2713 SSE_FOP(cmpeq),
2714 SSE_FOP(cmplt),
2715 SSE_FOP(cmple),
2716 SSE_FOP(cmpunord),
2717 SSE_FOP(cmpneq),
2718 SSE_FOP(cmpnlt),
2719 SSE_FOP(cmpnle),
2720 SSE_FOP(cmpord),
2721};
3b46e624 2722
d3eb5eae 2723static const SSEFunc_0_epp sse_op_table5[256] = {
a7812ae4
PB
2724 [0x0c] = gen_helper_pi2fw,
2725 [0x0d] = gen_helper_pi2fd,
2726 [0x1c] = gen_helper_pf2iw,
2727 [0x1d] = gen_helper_pf2id,
2728 [0x8a] = gen_helper_pfnacc,
2729 [0x8e] = gen_helper_pfpnacc,
2730 [0x90] = gen_helper_pfcmpge,
2731 [0x94] = gen_helper_pfmin,
2732 [0x96] = gen_helper_pfrcp,
2733 [0x97] = gen_helper_pfrsqrt,
2734 [0x9a] = gen_helper_pfsub,
2735 [0x9e] = gen_helper_pfadd,
2736 [0xa0] = gen_helper_pfcmpgt,
2737 [0xa4] = gen_helper_pfmax,
2738 [0xa6] = gen_helper_movq, /* pfrcpit1; no need to actually increase precision */
2739 [0xa7] = gen_helper_movq, /* pfrsqit1 */
2740 [0xaa] = gen_helper_pfsubr,
2741 [0xae] = gen_helper_pfacc,
2742 [0xb0] = gen_helper_pfcmpeq,
2743 [0xb4] = gen_helper_pfmul,
2744 [0xb6] = gen_helper_movq, /* pfrcpit2 */
2745 [0xb7] = gen_helper_pmulhrw_mmx,
2746 [0xbb] = gen_helper_pswapd,
2747 [0xbf] = gen_helper_pavgb_mmx /* pavgusb */
a35f3ec7
AJ
2748};
2749
d3eb5eae
BS
2750struct SSEOpHelper_epp {
2751 SSEFunc_0_epp op[2];
c4baa050
BS
2752 uint32_t ext_mask;
2753};
2754
d3eb5eae
BS
2755struct SSEOpHelper_eppi {
2756 SSEFunc_0_eppi op[2];
c4baa050 2757 uint32_t ext_mask;
222a3336 2758};
c4baa050 2759
222a3336 2760#define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
a7812ae4
PB
2761#define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
2762#define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
222a3336 2763#define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
e71827bc
AJ
2764#define PCLMULQDQ_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, \
2765 CPUID_EXT_PCLMULQDQ }
d640045a 2766#define AESNI_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_AES }
c4baa050 2767
d3eb5eae 2768static const struct SSEOpHelper_epp sse_op_table6[256] = {
222a3336
AZ
2769 [0x00] = SSSE3_OP(pshufb),
2770 [0x01] = SSSE3_OP(phaddw),
2771 [0x02] = SSSE3_OP(phaddd),
2772 [0x03] = SSSE3_OP(phaddsw),
2773 [0x04] = SSSE3_OP(pmaddubsw),
2774 [0x05] = SSSE3_OP(phsubw),
2775 [0x06] = SSSE3_OP(phsubd),
2776 [0x07] = SSSE3_OP(phsubsw),
2777 [0x08] = SSSE3_OP(psignb),
2778 [0x09] = SSSE3_OP(psignw),
2779 [0x0a] = SSSE3_OP(psignd),
2780 [0x0b] = SSSE3_OP(pmulhrsw),
2781 [0x10] = SSE41_OP(pblendvb),
2782 [0x14] = SSE41_OP(blendvps),
2783 [0x15] = SSE41_OP(blendvpd),
2784 [0x17] = SSE41_OP(ptest),
2785 [0x1c] = SSSE3_OP(pabsb),
2786 [0x1d] = SSSE3_OP(pabsw),
2787 [0x1e] = SSSE3_OP(pabsd),
2788 [0x20] = SSE41_OP(pmovsxbw),
2789 [0x21] = SSE41_OP(pmovsxbd),
2790 [0x22] = SSE41_OP(pmovsxbq),
2791 [0x23] = SSE41_OP(pmovsxwd),
2792 [0x24] = SSE41_OP(pmovsxwq),
2793 [0x25] = SSE41_OP(pmovsxdq),
2794 [0x28] = SSE41_OP(pmuldq),
2795 [0x29] = SSE41_OP(pcmpeqq),
2796 [0x2a] = SSE41_SPECIAL, /* movntqda */
2797 [0x2b] = SSE41_OP(packusdw),
2798 [0x30] = SSE41_OP(pmovzxbw),
2799 [0x31] = SSE41_OP(pmovzxbd),
2800 [0x32] = SSE41_OP(pmovzxbq),
2801 [0x33] = SSE41_OP(pmovzxwd),
2802 [0x34] = SSE41_OP(pmovzxwq),
2803 [0x35] = SSE41_OP(pmovzxdq),
2804 [0x37] = SSE42_OP(pcmpgtq),
2805 [0x38] = SSE41_OP(pminsb),
2806 [0x39] = SSE41_OP(pminsd),
2807 [0x3a] = SSE41_OP(pminuw),
2808 [0x3b] = SSE41_OP(pminud),
2809 [0x3c] = SSE41_OP(pmaxsb),
2810 [0x3d] = SSE41_OP(pmaxsd),
2811 [0x3e] = SSE41_OP(pmaxuw),
2812 [0x3f] = SSE41_OP(pmaxud),
2813 [0x40] = SSE41_OP(pmulld),
2814 [0x41] = SSE41_OP(phminposuw),
d640045a
AJ
2815 [0xdb] = AESNI_OP(aesimc),
2816 [0xdc] = AESNI_OP(aesenc),
2817 [0xdd] = AESNI_OP(aesenclast),
2818 [0xde] = AESNI_OP(aesdec),
2819 [0xdf] = AESNI_OP(aesdeclast),
4242b1bd
AZ
2820};
2821
d3eb5eae 2822static const struct SSEOpHelper_eppi sse_op_table7[256] = {
222a3336
AZ
2823 [0x08] = SSE41_OP(roundps),
2824 [0x09] = SSE41_OP(roundpd),
2825 [0x0a] = SSE41_OP(roundss),
2826 [0x0b] = SSE41_OP(roundsd),
2827 [0x0c] = SSE41_OP(blendps),
2828 [0x0d] = SSE41_OP(blendpd),
2829 [0x0e] = SSE41_OP(pblendw),
2830 [0x0f] = SSSE3_OP(palignr),
2831 [0x14] = SSE41_SPECIAL, /* pextrb */
2832 [0x15] = SSE41_SPECIAL, /* pextrw */
2833 [0x16] = SSE41_SPECIAL, /* pextrd/pextrq */
2834 [0x17] = SSE41_SPECIAL, /* extractps */
2835 [0x20] = SSE41_SPECIAL, /* pinsrb */
2836 [0x21] = SSE41_SPECIAL, /* insertps */
2837 [0x22] = SSE41_SPECIAL, /* pinsrd/pinsrq */
2838 [0x40] = SSE41_OP(dpps),
2839 [0x41] = SSE41_OP(dppd),
2840 [0x42] = SSE41_OP(mpsadbw),
e71827bc 2841 [0x44] = PCLMULQDQ_OP(pclmulqdq),
222a3336
AZ
2842 [0x60] = SSE42_OP(pcmpestrm),
2843 [0x61] = SSE42_OP(pcmpestri),
2844 [0x62] = SSE42_OP(pcmpistrm),
2845 [0x63] = SSE42_OP(pcmpistri),
d640045a 2846 [0xdf] = AESNI_OP(aeskeygenassist),
4242b1bd
AZ
2847};
2848
0af10c86
BS
2849static void gen_sse(CPUX86State *env, DisasContext *s, int b,
2850 target_ulong pc_start, int rex_r)
664e0f19 2851{
d67dc9e6 2852 int b1, op1_offset, op2_offset, is_xmm, val;
4eeb3939 2853 int modrm, mod, rm, reg;
d3eb5eae
BS
2854 SSEFunc_0_epp sse_fn_epp;
2855 SSEFunc_0_eppi sse_fn_eppi;
c4baa050 2856 SSEFunc_0_ppi sse_fn_ppi;
d3eb5eae 2857 SSEFunc_0_eppt sse_fn_eppt;
d67dc9e6 2858 TCGMemOp ot;
664e0f19
FB
2859
2860 b &= 0xff;
5fafdf24 2861 if (s->prefix & PREFIX_DATA)
664e0f19 2862 b1 = 1;
5fafdf24 2863 else if (s->prefix & PREFIX_REPZ)
664e0f19 2864 b1 = 2;
5fafdf24 2865 else if (s->prefix & PREFIX_REPNZ)
664e0f19
FB
2866 b1 = 3;
2867 else
2868 b1 = 0;
d3eb5eae
BS
2869 sse_fn_epp = sse_op_table1[b][b1];
2870 if (!sse_fn_epp) {
664e0f19 2871 goto illegal_op;
c4baa050 2872 }
a35f3ec7 2873 if ((b <= 0x5f && b >= 0x10) || b == 0xc6 || b == 0xc2) {
664e0f19
FB
2874 is_xmm = 1;
2875 } else {
2876 if (b1 == 0) {
2877 /* MMX case */
2878 is_xmm = 0;
2879 } else {
2880 is_xmm = 1;
2881 }
2882 }
2883 /* simple MMX/SSE operation */
2884 if (s->flags & HF_TS_MASK) {
2885 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
2886 return;
2887 }
2888 if (s->flags & HF_EM_MASK) {
2889 illegal_op:
2890 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
2891 return;
2892 }
2893 if (is_xmm && !(s->flags & HF_OSFXSR_MASK))
4242b1bd
AZ
2894 if ((b != 0x38 && b != 0x3a) || (s->prefix & PREFIX_DATA))
2895 goto illegal_op;
e771edab
AJ
2896 if (b == 0x0e) {
2897 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
2898 goto illegal_op;
2899 /* femms */
d3eb5eae 2900 gen_helper_emms(cpu_env);
e771edab
AJ
2901 return;
2902 }
2903 if (b == 0x77) {
2904 /* emms */
d3eb5eae 2905 gen_helper_emms(cpu_env);
664e0f19
FB
2906 return;
2907 }
2908 /* prepare MMX state (XXX: optimize by storing fptt and fptags in
2909 the static cpu state) */
2910 if (!is_xmm) {
d3eb5eae 2911 gen_helper_enter_mmx(cpu_env);
664e0f19
FB
2912 }
2913
0af10c86 2914 modrm = cpu_ldub_code(env, s->pc++);
664e0f19
FB
2915 reg = ((modrm >> 3) & 7);
2916 if (is_xmm)
2917 reg |= rex_r;
2918 mod = (modrm >> 6) & 3;
d3eb5eae 2919 if (sse_fn_epp == SSE_SPECIAL) {
664e0f19
FB
2920 b |= (b1 << 8);
2921 switch(b) {
2922 case 0x0e7: /* movntq */
5fafdf24 2923 if (mod == 3)
664e0f19 2924 goto illegal_op;
4eeb3939 2925 gen_lea_modrm(env, s, modrm);
323d1876 2926 gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
664e0f19
FB
2927 break;
2928 case 0x1e7: /* movntdq */
2929 case 0x02b: /* movntps */
2930 case 0x12b: /* movntps */
2e21e749
T
2931 if (mod == 3)
2932 goto illegal_op;
4eeb3939 2933 gen_lea_modrm(env, s, modrm);
323d1876 2934 gen_sto_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
2e21e749 2935 break;
465e9838
FB
2936 case 0x3f0: /* lddqu */
2937 if (mod == 3)
664e0f19 2938 goto illegal_op;
4eeb3939 2939 gen_lea_modrm(env, s, modrm);
323d1876 2940 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
664e0f19 2941 break;
d9f4bb27
AP
2942 case 0x22b: /* movntss */
2943 case 0x32b: /* movntsd */
2944 if (mod == 3)
2945 goto illegal_op;
4eeb3939 2946 gen_lea_modrm(env, s, modrm);
d9f4bb27 2947 if (b1 & 1) {
07958082 2948 gen_stq_env_A0(s, offsetof(CPUX86State,
19cbd87c 2949 xmm_regs[reg].ZMM_Q(0)));
d9f4bb27 2950 } else {
1d1cc4d0 2951 tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
19cbd87c 2952 xmm_regs[reg].ZMM_L(0)));
1d1cc4d0 2953 gen_op_st_v(s, MO_32, cpu_T0, cpu_A0);
d9f4bb27
AP
2954 }
2955 break;
664e0f19 2956 case 0x6e: /* movd mm, ea */
dabd98dd 2957#ifdef TARGET_X86_64
ab4e4aec 2958 if (s->dflag == MO_64) {
4ba9938c 2959 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0);
1d1cc4d0 2960 tcg_gen_st_tl(cpu_T0, cpu_env, offsetof(CPUX86State,fpregs[reg].mmx));
5fafdf24 2961 } else
dabd98dd
FB
2962#endif
2963 {
4ba9938c 2964 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0);
5af45186
FB
2965 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
2966 offsetof(CPUX86State,fpregs[reg].mmx));
1d1cc4d0 2967 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
a7812ae4 2968 gen_helper_movl_mm_T0_mmx(cpu_ptr0, cpu_tmp2_i32);
dabd98dd 2969 }
664e0f19
FB
2970 break;
2971 case 0x16e: /* movd xmm, ea */
dabd98dd 2972#ifdef TARGET_X86_64
ab4e4aec 2973 if (s->dflag == MO_64) {
4ba9938c 2974 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0);
5af45186
FB
2975 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
2976 offsetof(CPUX86State,xmm_regs[reg]));
1d1cc4d0 2977 gen_helper_movq_mm_T0_xmm(cpu_ptr0, cpu_T0);
5fafdf24 2978 } else
dabd98dd
FB
2979#endif
2980 {
4ba9938c 2981 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0);
5af45186
FB
2982 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
2983 offsetof(CPUX86State,xmm_regs[reg]));
1d1cc4d0 2984 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
a7812ae4 2985 gen_helper_movl_mm_T0_xmm(cpu_ptr0, cpu_tmp2_i32);
dabd98dd 2986 }
664e0f19
FB
2987 break;
2988 case 0x6f: /* movq mm, ea */
2989 if (mod != 3) {
4eeb3939 2990 gen_lea_modrm(env, s, modrm);
323d1876 2991 gen_ldq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
664e0f19
FB
2992 } else {
2993 rm = (modrm & 7);
b6abf97d 2994 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
5af45186 2995 offsetof(CPUX86State,fpregs[rm].mmx));
b6abf97d 2996 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
5af45186 2997 offsetof(CPUX86State,fpregs[reg].mmx));
664e0f19
FB
2998 }
2999 break;
3000 case 0x010: /* movups */
3001 case 0x110: /* movupd */
3002 case 0x028: /* movaps */
3003 case 0x128: /* movapd */
3004 case 0x16f: /* movdqa xmm, ea */
3005 case 0x26f: /* movdqu xmm, ea */
3006 if (mod != 3) {
4eeb3939 3007 gen_lea_modrm(env, s, modrm);
323d1876 3008 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
664e0f19
FB
3009 } else {
3010 rm = (modrm & 7) | REX_B(s);
3011 gen_op_movo(offsetof(CPUX86State,xmm_regs[reg]),
3012 offsetof(CPUX86State,xmm_regs[rm]));
3013 }
3014 break;
3015 case 0x210: /* movss xmm, ea */
3016 if (mod != 3) {
4eeb3939 3017 gen_lea_modrm(env, s, modrm);
1d1cc4d0
RH
3018 gen_op_ld_v(s, MO_32, cpu_T0, cpu_A0);
3019 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
3020 tcg_gen_movi_tl(cpu_T0, 0);
3021 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1)));
3022 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)));
3023 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)));
664e0f19
FB
3024 } else {
3025 rm = (modrm & 7) | REX_B(s);
19cbd87c
EH
3026 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)),
3027 offsetof(CPUX86State,xmm_regs[rm].ZMM_L(0)));
664e0f19
FB
3028 }
3029 break;
3030 case 0x310: /* movsd xmm, ea */
3031 if (mod != 3) {
4eeb3939 3032 gen_lea_modrm(env, s, modrm);
323d1876 3033 gen_ldq_env_A0(s, offsetof(CPUX86State,
19cbd87c 3034 xmm_regs[reg].ZMM_Q(0)));
1d1cc4d0
RH
3035 tcg_gen_movi_tl(cpu_T0, 0);
3036 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)));
3037 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)));
664e0f19
FB
3038 } else {
3039 rm = (modrm & 7) | REX_B(s);
19cbd87c
EH
3040 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
3041 offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
664e0f19
FB
3042 }
3043 break;
3044 case 0x012: /* movlps */
3045 case 0x112: /* movlpd */
3046 if (mod != 3) {
4eeb3939 3047 gen_lea_modrm(env, s, modrm);
323d1876 3048 gen_ldq_env_A0(s, offsetof(CPUX86State,
19cbd87c 3049 xmm_regs[reg].ZMM_Q(0)));
664e0f19
FB
3050 } else {
3051 /* movhlps */
3052 rm = (modrm & 7) | REX_B(s);
19cbd87c
EH
3053 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
3054 offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(1)));
664e0f19
FB
3055 }
3056 break;
465e9838
FB
3057 case 0x212: /* movsldup */
3058 if (mod != 3) {
4eeb3939 3059 gen_lea_modrm(env, s, modrm);
323d1876 3060 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
465e9838
FB
3061 } else {
3062 rm = (modrm & 7) | REX_B(s);
19cbd87c
EH
3063 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)),
3064 offsetof(CPUX86State,xmm_regs[rm].ZMM_L(0)));
3065 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)),
3066 offsetof(CPUX86State,xmm_regs[rm].ZMM_L(2)));
465e9838 3067 }
19cbd87c
EH
3068 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1)),
3069 offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
3070 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)),
3071 offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)));
465e9838
FB
3072 break;
3073 case 0x312: /* movddup */
3074 if (mod != 3) {
4eeb3939 3075 gen_lea_modrm(env, s, modrm);
323d1876 3076 gen_ldq_env_A0(s, offsetof(CPUX86State,
19cbd87c 3077 xmm_regs[reg].ZMM_Q(0)));
465e9838
FB
3078 } else {
3079 rm = (modrm & 7) | REX_B(s);
19cbd87c
EH
3080 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
3081 offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
465e9838 3082 }
19cbd87c
EH
3083 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(1)),
3084 offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)));
465e9838 3085 break;
664e0f19
FB
3086 case 0x016: /* movhps */
3087 case 0x116: /* movhpd */
3088 if (mod != 3) {
4eeb3939 3089 gen_lea_modrm(env, s, modrm);
323d1876 3090 gen_ldq_env_A0(s, offsetof(CPUX86State,
19cbd87c 3091 xmm_regs[reg].ZMM_Q(1)));
664e0f19
FB
3092 } else {
3093 /* movlhps */
3094 rm = (modrm & 7) | REX_B(s);
19cbd87c
EH
3095 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(1)),
3096 offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
664e0f19
FB
3097 }
3098 break;
3099 case 0x216: /* movshdup */
3100 if (mod != 3) {
4eeb3939 3101 gen_lea_modrm(env, s, modrm);
323d1876 3102 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
664e0f19
FB
3103 } else {
3104 rm = (modrm & 7) | REX_B(s);
19cbd87c
EH
3105 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1)),
3106 offsetof(CPUX86State,xmm_regs[rm].ZMM_L(1)));
3107 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)),
3108 offsetof(CPUX86State,xmm_regs[rm].ZMM_L(3)));
664e0f19 3109 }
19cbd87c
EH
3110 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)),
3111 offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1)));
3112 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)),
3113 offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)));
664e0f19 3114 break;
d9f4bb27
AP
3115 case 0x178:
3116 case 0x378:
3117 {
3118 int bit_index, field_length;
3119
3120 if (b1 == 1 && reg != 0)
3121 goto illegal_op;
0af10c86
BS
3122 field_length = cpu_ldub_code(env, s->pc++) & 0x3F;
3123 bit_index = cpu_ldub_code(env, s->pc++) & 0x3F;
d9f4bb27
AP
3124 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3125 offsetof(CPUX86State,xmm_regs[reg]));
3126 if (b1 == 1)
d3eb5eae
BS
3127 gen_helper_extrq_i(cpu_env, cpu_ptr0,
3128 tcg_const_i32(bit_index),
3129 tcg_const_i32(field_length));
d9f4bb27 3130 else
d3eb5eae
BS
3131 gen_helper_insertq_i(cpu_env, cpu_ptr0,
3132 tcg_const_i32(bit_index),
3133 tcg_const_i32(field_length));
d9f4bb27
AP
3134 }
3135 break;
664e0f19 3136 case 0x7e: /* movd ea, mm */
dabd98dd 3137#ifdef TARGET_X86_64
ab4e4aec 3138 if (s->dflag == MO_64) {
1d1cc4d0 3139 tcg_gen_ld_i64(cpu_T0, cpu_env,
5af45186 3140 offsetof(CPUX86State,fpregs[reg].mmx));
4ba9938c 3141 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1);
5fafdf24 3142 } else
dabd98dd
FB
3143#endif
3144 {
1d1cc4d0 3145 tcg_gen_ld32u_tl(cpu_T0, cpu_env,
5af45186 3146 offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0)));
4ba9938c 3147 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1);
dabd98dd 3148 }
664e0f19
FB
3149 break;
3150 case 0x17e: /* movd ea, xmm */
dabd98dd 3151#ifdef TARGET_X86_64
ab4e4aec 3152 if (s->dflag == MO_64) {
1d1cc4d0 3153 tcg_gen_ld_i64(cpu_T0, cpu_env,
19cbd87c 3154 offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)));
4ba9938c 3155 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1);
5fafdf24 3156 } else
dabd98dd
FB
3157#endif
3158 {
1d1cc4d0 3159 tcg_gen_ld32u_tl(cpu_T0, cpu_env,
19cbd87c 3160 offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
4ba9938c 3161 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1);
dabd98dd 3162 }
664e0f19
FB
3163 break;
3164 case 0x27e: /* movq xmm, ea */
3165 if (mod != 3) {
4eeb3939 3166 gen_lea_modrm(env, s, modrm);
323d1876 3167 gen_ldq_env_A0(s, offsetof(CPUX86State,
19cbd87c 3168 xmm_regs[reg].ZMM_Q(0)));
664e0f19
FB
3169 } else {
3170 rm = (modrm & 7) | REX_B(s);
19cbd87c
EH
3171 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
3172 offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
664e0f19 3173 }
19cbd87c 3174 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(1)));
664e0f19
FB
3175 break;
3176 case 0x7f: /* movq ea, mm */
3177 if (mod != 3) {
4eeb3939 3178 gen_lea_modrm(env, s, modrm);
323d1876 3179 gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
664e0f19
FB
3180 } else {
3181 rm = (modrm & 7);
3182 gen_op_movq(offsetof(CPUX86State,fpregs[rm].mmx),
3183 offsetof(CPUX86State,fpregs[reg].mmx));
3184 }
3185 break;
3186 case 0x011: /* movups */
3187 case 0x111: /* movupd */
3188 case 0x029: /* movaps */
3189 case 0x129: /* movapd */
3190 case 0x17f: /* movdqa ea, xmm */
3191 case 0x27f: /* movdqu ea, xmm */
3192 if (mod != 3) {
4eeb3939 3193 gen_lea_modrm(env, s, modrm);
323d1876 3194 gen_sto_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
664e0f19
FB
3195 } else {
3196 rm = (modrm & 7) | REX_B(s);
3197 gen_op_movo(offsetof(CPUX86State,xmm_regs[rm]),
3198 offsetof(CPUX86State,xmm_regs[reg]));
3199 }
3200 break;
3201 case 0x211: /* movss ea, xmm */
3202 if (mod != 3) {
4eeb3939 3203 gen_lea_modrm(env, s, modrm);
1d1cc4d0
RH
3204 tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
3205 gen_op_st_v(s, MO_32, cpu_T0, cpu_A0);
664e0f19
FB
3206 } else {
3207 rm = (modrm & 7) | REX_B(s);
19cbd87c
EH
3208 gen_op_movl(offsetof(CPUX86State,xmm_regs[rm].ZMM_L(0)),
3209 offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
664e0f19
FB
3210 }
3211 break;
3212 case 0x311: /* movsd ea, xmm */
3213 if (mod != 3) {
4eeb3939 3214 gen_lea_modrm(env, s, modrm);
323d1876 3215 gen_stq_env_A0(s, offsetof(CPUX86State,
19cbd87c 3216 xmm_regs[reg].ZMM_Q(0)));
664e0f19
FB
3217 } else {
3218 rm = (modrm & 7) | REX_B(s);
19cbd87c
EH
3219 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)),
3220 offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)));
664e0f19
FB
3221 }
3222 break;
3223 case 0x013: /* movlps */
3224 case 0x113: /* movlpd */
3225 if (mod != 3) {
4eeb3939 3226 gen_lea_modrm(env, s, modrm);
323d1876 3227 gen_stq_env_A0(s, offsetof(CPUX86State,
19cbd87c 3228 xmm_regs[reg].ZMM_Q(0)));
664e0f19
FB
3229 } else {
3230 goto illegal_op;
3231 }
3232 break;
3233 case 0x017: /* movhps */
3234 case 0x117: /* movhpd */
3235 if (mod != 3) {
4eeb3939 3236 gen_lea_modrm(env, s, modrm);
323d1876 3237 gen_stq_env_A0(s, offsetof(CPUX86State,
19cbd87c 3238 xmm_regs[reg].ZMM_Q(1)));
664e0f19
FB
3239 } else {
3240 goto illegal_op;
3241 }
3242 break;
3243 case 0x71: /* shift mm, im */
3244 case 0x72:
3245 case 0x73:
3246 case 0x171: /* shift xmm, im */
3247 case 0x172:
3248 case 0x173:
c045af25
AK
3249 if (b1 >= 2) {
3250 goto illegal_op;
3251 }
0af10c86 3252 val = cpu_ldub_code(env, s->pc++);
664e0f19 3253 if (is_xmm) {
1d1cc4d0
RH
3254 tcg_gen_movi_tl(cpu_T0, val);
3255 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(0)));
3256 tcg_gen_movi_tl(cpu_T0, 0);
3257 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(1)));
664e0f19
FB
3258 op1_offset = offsetof(CPUX86State,xmm_t0);
3259 } else {
1d1cc4d0
RH
3260 tcg_gen_movi_tl(cpu_T0, val);
3261 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(0)));
3262 tcg_gen_movi_tl(cpu_T0, 0);
3263 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(1)));
664e0f19
FB
3264 op1_offset = offsetof(CPUX86State,mmx_t0);
3265 }
d3eb5eae
BS
3266 sse_fn_epp = sse_op_table2[((b - 1) & 3) * 8 +
3267 (((modrm >> 3)) & 7)][b1];
3268 if (!sse_fn_epp) {
664e0f19 3269 goto illegal_op;
c4baa050 3270 }
664e0f19
FB
3271 if (is_xmm) {
3272 rm = (modrm & 7) | REX_B(s);
3273 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3274 } else {
3275 rm = (modrm & 7);
3276 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3277 }
5af45186
FB
3278 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3279 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op1_offset);
d3eb5eae 3280 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
3281 break;
3282 case 0x050: /* movmskps */
664e0f19 3283 rm = (modrm & 7) | REX_B(s);
5af45186
FB
3284 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3285 offsetof(CPUX86State,xmm_regs[rm]));
d3eb5eae 3286 gen_helper_movmskps(cpu_tmp2_i32, cpu_env, cpu_ptr0);
a7fbcbe5 3287 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
664e0f19
FB
3288 break;
3289 case 0x150: /* movmskpd */
664e0f19 3290 rm = (modrm & 7) | REX_B(s);
5af45186
FB
3291 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3292 offsetof(CPUX86State,xmm_regs[rm]));
d3eb5eae 3293 gen_helper_movmskpd(cpu_tmp2_i32, cpu_env, cpu_ptr0);
a7fbcbe5 3294 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
664e0f19
FB
3295 break;
3296 case 0x02a: /* cvtpi2ps */
3297 case 0x12a: /* cvtpi2pd */
d3eb5eae 3298 gen_helper_enter_mmx(cpu_env);
664e0f19 3299 if (mod != 3) {
4eeb3939 3300 gen_lea_modrm(env, s, modrm);
664e0f19 3301 op2_offset = offsetof(CPUX86State,mmx_t0);
323d1876 3302 gen_ldq_env_A0(s, op2_offset);
664e0f19
FB
3303 } else {
3304 rm = (modrm & 7);
3305 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3306 }
3307 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
5af45186
FB
3308 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3309 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
664e0f19
FB
3310 switch(b >> 8) {
3311 case 0x0:
d3eb5eae 3312 gen_helper_cvtpi2ps(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
3313 break;
3314 default:
3315 case 0x1:
d3eb5eae 3316 gen_helper_cvtpi2pd(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
3317 break;
3318 }
3319 break;
3320 case 0x22a: /* cvtsi2ss */
3321 case 0x32a: /* cvtsi2sd */
ab4e4aec 3322 ot = mo_64_32(s->dflag);
0af10c86 3323 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
664e0f19 3324 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
5af45186 3325 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4ba9938c 3326 if (ot == MO_32) {
d3eb5eae 3327 SSEFunc_0_epi sse_fn_epi = sse_op_table3ai[(b >> 8) & 1];
1d1cc4d0 3328 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
d3eb5eae 3329 sse_fn_epi(cpu_env, cpu_ptr0, cpu_tmp2_i32);
28e10711 3330 } else {
11f8cdbc 3331#ifdef TARGET_X86_64
d3eb5eae 3332 SSEFunc_0_epl sse_fn_epl = sse_op_table3aq[(b >> 8) & 1];
1d1cc4d0 3333 sse_fn_epl(cpu_env, cpu_ptr0, cpu_T0);
11f8cdbc
SW
3334#else
3335 goto illegal_op;
3336#endif
28e10711 3337 }
664e0f19
FB
3338 break;
3339 case 0x02c: /* cvttps2pi */
3340 case 0x12c: /* cvttpd2pi */
3341 case 0x02d: /* cvtps2pi */
3342 case 0x12d: /* cvtpd2pi */
d3eb5eae 3343 gen_helper_enter_mmx(cpu_env);
664e0f19 3344 if (mod != 3) {
4eeb3939 3345 gen_lea_modrm(env, s, modrm);
664e0f19 3346 op2_offset = offsetof(CPUX86State,xmm_t0);
323d1876 3347 gen_ldo_env_A0(s, op2_offset);
664e0f19
FB
3348 } else {
3349 rm = (modrm & 7) | REX_B(s);
3350 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3351 }
3352 op1_offset = offsetof(CPUX86State,fpregs[reg & 7].mmx);
5af45186
FB
3353 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3354 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
664e0f19
FB
3355 switch(b) {
3356 case 0x02c:
d3eb5eae 3357 gen_helper_cvttps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
3358 break;
3359 case 0x12c:
d3eb5eae 3360 gen_helper_cvttpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
3361 break;
3362 case 0x02d:
d3eb5eae 3363 gen_helper_cvtps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
3364 break;
3365 case 0x12d:
d3eb5eae 3366 gen_helper_cvtpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
3367 break;
3368 }
3369 break;
3370 case 0x22c: /* cvttss2si */
3371 case 0x32c: /* cvttsd2si */
3372 case 0x22d: /* cvtss2si */
3373 case 0x32d: /* cvtsd2si */
ab4e4aec 3374 ot = mo_64_32(s->dflag);
31313213 3375 if (mod != 3) {
4eeb3939 3376 gen_lea_modrm(env, s, modrm);
31313213 3377 if ((b >> 8) & 1) {
19cbd87c 3378 gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.ZMM_Q(0)));
31313213 3379 } else {
1d1cc4d0
RH
3380 gen_op_ld_v(s, MO_32, cpu_T0, cpu_A0);
3381 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(0)));
31313213
FB
3382 }
3383 op2_offset = offsetof(CPUX86State,xmm_t0);
3384 } else {
3385 rm = (modrm & 7) | REX_B(s);
3386 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3387 }
5af45186 3388 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
4ba9938c 3389 if (ot == MO_32) {
d3eb5eae 3390 SSEFunc_i_ep sse_fn_i_ep =
bedc2ac1 3391 sse_op_table3bi[((b >> 7) & 2) | (b & 1)];
d3eb5eae 3392 sse_fn_i_ep(cpu_tmp2_i32, cpu_env, cpu_ptr0);
1d1cc4d0 3393 tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);
5af45186 3394 } else {
11f8cdbc 3395#ifdef TARGET_X86_64
d3eb5eae 3396 SSEFunc_l_ep sse_fn_l_ep =
bedc2ac1 3397 sse_op_table3bq[((b >> 7) & 2) | (b & 1)];
1d1cc4d0 3398 sse_fn_l_ep(cpu_T0, cpu_env, cpu_ptr0);
11f8cdbc
SW
3399#else
3400 goto illegal_op;
3401#endif
5af45186 3402 }
1d1cc4d0 3403 gen_op_mov_reg_v(ot, reg, cpu_T0);
664e0f19
FB
3404 break;
3405 case 0xc4: /* pinsrw */
5fafdf24 3406 case 0x1c4:
d1e42c5c 3407 s->rip_offset = 1;
4ba9938c 3408 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
0af10c86 3409 val = cpu_ldub_code(env, s->pc++);
664e0f19
FB
3410 if (b1) {
3411 val &= 7;
1d1cc4d0 3412 tcg_gen_st16_tl(cpu_T0, cpu_env,
19cbd87c 3413 offsetof(CPUX86State,xmm_regs[reg].ZMM_W(val)));
664e0f19
FB
3414 } else {
3415 val &= 3;
1d1cc4d0 3416 tcg_gen_st16_tl(cpu_T0, cpu_env,
5af45186 3417 offsetof(CPUX86State,fpregs[reg].mmx.MMX_W(val)));
664e0f19
FB
3418 }
3419 break;
3420 case 0xc5: /* pextrw */
5fafdf24 3421 case 0x1c5:
664e0f19
FB
3422 if (mod != 3)
3423 goto illegal_op;
ab4e4aec 3424 ot = mo_64_32(s->dflag);
0af10c86 3425 val = cpu_ldub_code(env, s->pc++);
664e0f19
FB
3426 if (b1) {
3427 val &= 7;
3428 rm = (modrm & 7) | REX_B(s);
1d1cc4d0 3429 tcg_gen_ld16u_tl(cpu_T0, cpu_env,
19cbd87c 3430 offsetof(CPUX86State,xmm_regs[rm].ZMM_W(val)));
664e0f19
FB
3431 } else {
3432 val &= 3;
3433 rm = (modrm & 7);
1d1cc4d0 3434 tcg_gen_ld16u_tl(cpu_T0, cpu_env,
5af45186 3435 offsetof(CPUX86State,fpregs[rm].mmx.MMX_W(val)));
664e0f19
FB
3436 }
3437 reg = ((modrm >> 3) & 7) | rex_r;
1d1cc4d0 3438 gen_op_mov_reg_v(ot, reg, cpu_T0);
664e0f19
FB
3439 break;
3440 case 0x1d6: /* movq ea, xmm */
3441 if (mod != 3) {
4eeb3939 3442 gen_lea_modrm(env, s, modrm);
323d1876 3443 gen_stq_env_A0(s, offsetof(CPUX86State,
19cbd87c 3444 xmm_regs[reg].ZMM_Q(0)));
664e0f19
FB
3445 } else {
3446 rm = (modrm & 7) | REX_B(s);
19cbd87c
EH
3447 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)),
3448 offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)));
3449 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(1)));
664e0f19
FB
3450 }
3451 break;
3452 case 0x2d6: /* movq2dq */
d3eb5eae 3453 gen_helper_enter_mmx(cpu_env);
480c1cdb 3454 rm = (modrm & 7);
19cbd87c 3455 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
480c1cdb 3456 offsetof(CPUX86State,fpregs[rm].mmx));
19cbd87c 3457 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(1)));
664e0f19
FB
3458 break;
3459 case 0x3d6: /* movdq2q */
d3eb5eae 3460 gen_helper_enter_mmx(cpu_env);
480c1cdb
FB
3461 rm = (modrm & 7) | REX_B(s);
3462 gen_op_movq(offsetof(CPUX86State,fpregs[reg & 7].mmx),
19cbd87c 3463 offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
664e0f19
FB
3464 break;
3465 case 0xd7: /* pmovmskb */
3466 case 0x1d7:
3467 if (mod != 3)
3468 goto illegal_op;
3469 if (b1) {
3470 rm = (modrm & 7) | REX_B(s);
5af45186 3471 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[rm]));
d3eb5eae 3472 gen_helper_pmovmskb_xmm(cpu_tmp2_i32, cpu_env, cpu_ptr0);
664e0f19
FB
3473 } else {
3474 rm = (modrm & 7);
5af45186 3475 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,fpregs[rm].mmx));
d3eb5eae 3476 gen_helper_pmovmskb_mmx(cpu_tmp2_i32, cpu_env, cpu_ptr0);
664e0f19
FB
3477 }
3478 reg = ((modrm >> 3) & 7) | rex_r;
a7fbcbe5 3479 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
664e0f19 3480 break;
111994ee 3481
4242b1bd 3482 case 0x138:
000cacf6 3483 case 0x038:
4242b1bd 3484 b = modrm;
111994ee
RH
3485 if ((b & 0xf0) == 0xf0) {
3486 goto do_0f_38_fx;
3487 }
0af10c86 3488 modrm = cpu_ldub_code(env, s->pc++);
4242b1bd
AZ
3489 rm = modrm & 7;
3490 reg = ((modrm >> 3) & 7) | rex_r;
3491 mod = (modrm >> 6) & 3;
c045af25
AK
3492 if (b1 >= 2) {
3493 goto illegal_op;
3494 }
4242b1bd 3495
d3eb5eae
BS
3496 sse_fn_epp = sse_op_table6[b].op[b1];
3497 if (!sse_fn_epp) {
4242b1bd 3498 goto illegal_op;
c4baa050 3499 }
222a3336
AZ
3500 if (!(s->cpuid_ext_features & sse_op_table6[b].ext_mask))
3501 goto illegal_op;
4242b1bd
AZ
3502
3503 if (b1) {
3504 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3505 if (mod == 3) {
3506 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
3507 } else {
3508 op2_offset = offsetof(CPUX86State,xmm_t0);
4eeb3939 3509 gen_lea_modrm(env, s, modrm);
222a3336
AZ
3510 switch (b) {
3511 case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
3512 case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
3513 case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
323d1876 3514 gen_ldq_env_A0(s, op2_offset +
19cbd87c 3515 offsetof(ZMMReg, ZMM_Q(0)));
222a3336
AZ
3516 break;
3517 case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
3518 case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
3c5f4116
RH
3519 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
3520 s->mem_index, MO_LEUL);
222a3336 3521 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, op2_offset +
19cbd87c 3522 offsetof(ZMMReg, ZMM_L(0)));
222a3336
AZ
3523 break;
3524 case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
3c5f4116
RH
3525 tcg_gen_qemu_ld_tl(cpu_tmp0, cpu_A0,
3526 s->mem_index, MO_LEUW);
222a3336 3527 tcg_gen_st16_tl(cpu_tmp0, cpu_env, op2_offset +
19cbd87c 3528 offsetof(ZMMReg, ZMM_W(0)));
222a3336
AZ
3529 break;
3530 case 0x2a: /* movntqda */
323d1876 3531 gen_ldo_env_A0(s, op1_offset);
222a3336
AZ
3532 return;
3533 default:
323d1876 3534 gen_ldo_env_A0(s, op2_offset);
222a3336 3535 }
4242b1bd
AZ
3536 }
3537 } else {
3538 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
3539 if (mod == 3) {
3540 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3541 } else {
3542 op2_offset = offsetof(CPUX86State,mmx_t0);
4eeb3939 3543 gen_lea_modrm(env, s, modrm);
323d1876 3544 gen_ldq_env_A0(s, op2_offset);
4242b1bd
AZ
3545 }
3546 }
d3eb5eae 3547 if (sse_fn_epp == SSE_SPECIAL) {
222a3336 3548 goto illegal_op;
c4baa050 3549 }
222a3336 3550
4242b1bd
AZ
3551 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3552 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
d3eb5eae 3553 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
222a3336 3554
3ca51d07
RH
3555 if (b == 0x17) {
3556 set_cc_op(s, CC_OP_EFLAGS);
3557 }
4242b1bd 3558 break;
111994ee
RH
3559
3560 case 0x238:
3561 case 0x338:
3562 do_0f_38_fx:
3563 /* Various integer extensions at 0f 38 f[0-f]. */
3564 b = modrm | (b1 << 8);
0af10c86 3565 modrm = cpu_ldub_code(env, s->pc++);
222a3336
AZ
3566 reg = ((modrm >> 3) & 7) | rex_r;
3567
111994ee
RH
3568 switch (b) {
3569 case 0x3f0: /* crc32 Gd,Eb */
3570 case 0x3f1: /* crc32 Gd,Ey */
3571 do_crc32:
3572 if (!(s->cpuid_ext_features & CPUID_EXT_SSE42)) {
3573 goto illegal_op;
3574 }
3575 if ((b & 0xff) == 0xf0) {
4ba9938c 3576 ot = MO_8;
ab4e4aec 3577 } else if (s->dflag != MO_64) {
4ba9938c 3578 ot = (s->prefix & PREFIX_DATA ? MO_16 : MO_32);
111994ee 3579 } else {
4ba9938c 3580 ot = MO_64;
111994ee 3581 }
4242b1bd 3582
24b9c00f 3583 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[reg]);
111994ee 3584 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
1d1cc4d0
RH
3585 gen_helper_crc32(cpu_T0, cpu_tmp2_i32,
3586 cpu_T0, tcg_const_i32(8 << ot));
222a3336 3587
ab4e4aec 3588 ot = mo_64_32(s->dflag);
1d1cc4d0 3589 gen_op_mov_reg_v(ot, reg, cpu_T0);
111994ee 3590 break;
222a3336 3591
111994ee
RH
3592 case 0x1f0: /* crc32 or movbe */
3593 case 0x1f1:
3594 /* For these insns, the f3 prefix is supposed to have priority
3595 over the 66 prefix, but that's not what we implement above
3596 setting b1. */
3597 if (s->prefix & PREFIX_REPNZ) {
3598 goto do_crc32;
3599 }
3600 /* FALLTHRU */
3601 case 0x0f0: /* movbe Gy,My */
3602 case 0x0f1: /* movbe My,Gy */
3603 if (!(s->cpuid_ext_features & CPUID_EXT_MOVBE)) {
3604 goto illegal_op;
3605 }
ab4e4aec 3606 if (s->dflag != MO_64) {
4ba9938c 3607 ot = (s->prefix & PREFIX_DATA ? MO_16 : MO_32);
111994ee 3608 } else {
4ba9938c 3609 ot = MO_64;
111994ee
RH
3610 }
3611
3655a19f 3612 gen_lea_modrm(env, s, modrm);
111994ee 3613 if ((b & 1) == 0) {
1d1cc4d0 3614 tcg_gen_qemu_ld_tl(cpu_T0, cpu_A0,
3655a19f 3615 s->mem_index, ot | MO_BE);
1d1cc4d0 3616 gen_op_mov_reg_v(ot, reg, cpu_T0);
111994ee 3617 } else {
3655a19f
RH
3618 tcg_gen_qemu_st_tl(cpu_regs[reg], cpu_A0,
3619 s->mem_index, ot | MO_BE);
111994ee
RH
3620 }
3621 break;
3622
7073fbad
RH
3623 case 0x0f2: /* andn Gy, By, Ey */
3624 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
3625 || !(s->prefix & PREFIX_VEX)
3626 || s->vex_l != 0) {
3627 goto illegal_op;
3628 }
ab4e4aec 3629 ot = mo_64_32(s->dflag);
7073fbad 3630 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
1d1cc4d0
RH
3631 tcg_gen_andc_tl(cpu_T0, cpu_regs[s->vex_v], cpu_T0);
3632 gen_op_mov_reg_v(ot, reg, cpu_T0);
7073fbad
RH
3633 gen_op_update1_cc();
3634 set_cc_op(s, CC_OP_LOGICB + ot);
3635 break;
3636
c7ab7565
RH
3637 case 0x0f7: /* bextr Gy, Ey, By */
3638 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
3639 || !(s->prefix & PREFIX_VEX)
3640 || s->vex_l != 0) {
3641 goto illegal_op;
3642 }
ab4e4aec 3643 ot = mo_64_32(s->dflag);
c7ab7565
RH
3644 {
3645 TCGv bound, zero;
3646
3647 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3648 /* Extract START, and shift the operand.
3649 Shifts larger than operand size get zeros. */
3650 tcg_gen_ext8u_tl(cpu_A0, cpu_regs[s->vex_v]);
1d1cc4d0 3651 tcg_gen_shr_tl(cpu_T0, cpu_T0, cpu_A0);
c7ab7565 3652
4ba9938c 3653 bound = tcg_const_tl(ot == MO_64 ? 63 : 31);
c7ab7565 3654 zero = tcg_const_tl(0);
1d1cc4d0
RH
3655 tcg_gen_movcond_tl(TCG_COND_LEU, cpu_T0, cpu_A0, bound,
3656 cpu_T0, zero);
c7ab7565
RH
3657 tcg_temp_free(zero);
3658
3659 /* Extract the LEN into a mask. Lengths larger than
3660 operand size get all ones. */
3661 tcg_gen_shri_tl(cpu_A0, cpu_regs[s->vex_v], 8);
3662 tcg_gen_ext8u_tl(cpu_A0, cpu_A0);
3663 tcg_gen_movcond_tl(TCG_COND_LEU, cpu_A0, cpu_A0, bound,
3664 cpu_A0, bound);
3665 tcg_temp_free(bound);
1d1cc4d0
RH
3666 tcg_gen_movi_tl(cpu_T1, 1);
3667 tcg_gen_shl_tl(cpu_T1, cpu_T1, cpu_A0);
3668 tcg_gen_subi_tl(cpu_T1, cpu_T1, 1);
3669 tcg_gen_and_tl(cpu_T0, cpu_T0, cpu_T1);
c7ab7565 3670
1d1cc4d0 3671 gen_op_mov_reg_v(ot, reg, cpu_T0);
c7ab7565
RH
3672 gen_op_update1_cc();
3673 set_cc_op(s, CC_OP_LOGICB + ot);
3674 }
3675 break;
3676
02ea1e6b
RH
3677 case 0x0f5: /* bzhi Gy, Ey, By */
3678 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3679 || !(s->prefix & PREFIX_VEX)
3680 || s->vex_l != 0) {
3681 goto illegal_op;
3682 }
ab4e4aec 3683 ot = mo_64_32(s->dflag);
02ea1e6b 3684 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
1d1cc4d0 3685 tcg_gen_ext8u_tl(cpu_T1, cpu_regs[s->vex_v]);
02ea1e6b 3686 {
4ba9938c 3687 TCGv bound = tcg_const_tl(ot == MO_64 ? 63 : 31);
02ea1e6b
RH
3688 /* Note that since we're using BMILG (in order to get O
3689 cleared) we need to store the inverse into C. */
3690 tcg_gen_setcond_tl(TCG_COND_LT, cpu_cc_src,
1d1cc4d0
RH
3691 cpu_T1, bound);
3692 tcg_gen_movcond_tl(TCG_COND_GT, cpu_T1, cpu_T1,
3693 bound, bound, cpu_T1);
02ea1e6b
RH
3694 tcg_temp_free(bound);
3695 }
3696 tcg_gen_movi_tl(cpu_A0, -1);
1d1cc4d0
RH
3697 tcg_gen_shl_tl(cpu_A0, cpu_A0, cpu_T1);
3698 tcg_gen_andc_tl(cpu_T0, cpu_T0, cpu_A0);
3699 gen_op_mov_reg_v(ot, reg, cpu_T0);
02ea1e6b
RH
3700 gen_op_update1_cc();
3701 set_cc_op(s, CC_OP_BMILGB + ot);
3702 break;
3703
5f1f4b17
RH
3704 case 0x3f6: /* mulx By, Gy, rdx, Ey */
3705 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3706 || !(s->prefix & PREFIX_VEX)
3707 || s->vex_l != 0) {
3708 goto illegal_op;
3709 }
ab4e4aec 3710 ot = mo_64_32(s->dflag);
5f1f4b17
RH
3711 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3712 switch (ot) {
5f1f4b17 3713 default:
1d1cc4d0 3714 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
a4bcea3d
RH
3715 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EDX]);
3716 tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
3717 cpu_tmp2_i32, cpu_tmp3_i32);
3718 tcg_gen_extu_i32_tl(cpu_regs[s->vex_v], cpu_tmp2_i32);
3719 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp3_i32);
5f1f4b17
RH
3720 break;
3721#ifdef TARGET_X86_64
4ba9938c 3722 case MO_64:
1d1cc4d0
RH
3723 tcg_gen_mulu2_i64(cpu_T0, cpu_T1,
3724 cpu_T0, cpu_regs[R_EDX]);
3725 tcg_gen_mov_i64(cpu_regs[s->vex_v], cpu_T0);
3726 tcg_gen_mov_i64(cpu_regs[reg], cpu_T1);
5f1f4b17
RH
3727 break;
3728#endif
3729 }
3730 break;
3731
0592f74a
RH
3732 case 0x3f5: /* pdep Gy, By, Ey */
3733 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3734 || !(s->prefix & PREFIX_VEX)
3735 || s->vex_l != 0) {
3736 goto illegal_op;
3737 }
ab4e4aec 3738 ot = mo_64_32(s->dflag);
0592f74a
RH
3739 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3740 /* Note that by zero-extending the mask operand, we
3741 automatically handle zero-extending the result. */
ab4e4aec 3742 if (ot == MO_64) {
1d1cc4d0 3743 tcg_gen_mov_tl(cpu_T1, cpu_regs[s->vex_v]);
0592f74a 3744 } else {
1d1cc4d0 3745 tcg_gen_ext32u_tl(cpu_T1, cpu_regs[s->vex_v]);
0592f74a 3746 }
1d1cc4d0 3747 gen_helper_pdep(cpu_regs[reg], cpu_T0, cpu_T1);
0592f74a
RH
3748 break;
3749
3750 case 0x2f5: /* pext Gy, By, Ey */
3751 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3752 || !(s->prefix & PREFIX_VEX)
3753 || s->vex_l != 0) {
3754 goto illegal_op;
3755 }
ab4e4aec 3756 ot = mo_64_32(s->dflag);
0592f74a
RH
3757 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3758 /* Note that by zero-extending the mask operand, we
3759 automatically handle zero-extending the result. */
ab4e4aec 3760 if (ot == MO_64) {
1d1cc4d0 3761 tcg_gen_mov_tl(cpu_T1, cpu_regs[s->vex_v]);
0592f74a 3762 } else {
1d1cc4d0 3763 tcg_gen_ext32u_tl(cpu_T1, cpu_regs[s->vex_v]);
0592f74a 3764 }
1d1cc4d0 3765 gen_helper_pext(cpu_regs[reg], cpu_T0, cpu_T1);
0592f74a
RH
3766 break;
3767
cd7f97ca
RH
3768 case 0x1f6: /* adcx Gy, Ey */
3769 case 0x2f6: /* adox Gy, Ey */
3770 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_ADX)) {
3771 goto illegal_op;
3772 } else {
76f13133 3773 TCGv carry_in, carry_out, zero;
cd7f97ca
RH
3774 int end_op;
3775
ab4e4aec 3776 ot = mo_64_32(s->dflag);
cd7f97ca
RH
3777 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3778
3779 /* Re-use the carry-out from a previous round. */
3780 TCGV_UNUSED(carry_in);
3781 carry_out = (b == 0x1f6 ? cpu_cc_dst : cpu_cc_src2);
3782 switch (s->cc_op) {
3783 case CC_OP_ADCX:
3784 if (b == 0x1f6) {
3785 carry_in = cpu_cc_dst;
3786 end_op = CC_OP_ADCX;
3787 } else {
3788 end_op = CC_OP_ADCOX;
3789 }
3790 break;
3791 case CC_OP_ADOX:
3792 if (b == 0x1f6) {
3793 end_op = CC_OP_ADCOX;
3794 } else {
3795 carry_in = cpu_cc_src2;
3796 end_op = CC_OP_ADOX;
3797 }
3798 break;
3799 case CC_OP_ADCOX:
3800 end_op = CC_OP_ADCOX;
3801 carry_in = carry_out;
3802 break;
3803 default:
c53de1a2 3804 end_op = (b == 0x1f6 ? CC_OP_ADCX : CC_OP_ADOX);
cd7f97ca
RH
3805 break;
3806 }
3807 /* If we can't reuse carry-out, get it out of EFLAGS. */
3808 if (TCGV_IS_UNUSED(carry_in)) {
3809 if (s->cc_op != CC_OP_ADCX && s->cc_op != CC_OP_ADOX) {
3810 gen_compute_eflags(s);
3811 }
3812 carry_in = cpu_tmp0;
3813 tcg_gen_shri_tl(carry_in, cpu_cc_src,
3814 ctz32(b == 0x1f6 ? CC_C : CC_O));
3815 tcg_gen_andi_tl(carry_in, carry_in, 1);
3816 }
3817
3818 switch (ot) {
3819#ifdef TARGET_X86_64
4ba9938c 3820 case MO_32:
cd7f97ca
RH
3821 /* If we know TL is 64-bit, and we want a 32-bit
3822 result, just do everything in 64-bit arithmetic. */
3823 tcg_gen_ext32u_i64(cpu_regs[reg], cpu_regs[reg]);
1d1cc4d0
RH
3824 tcg_gen_ext32u_i64(cpu_T0, cpu_T0);
3825 tcg_gen_add_i64(cpu_T0, cpu_T0, cpu_regs[reg]);
3826 tcg_gen_add_i64(cpu_T0, cpu_T0, carry_in);
3827 tcg_gen_ext32u_i64(cpu_regs[reg], cpu_T0);
3828 tcg_gen_shri_i64(carry_out, cpu_T0, 32);
cd7f97ca
RH
3829 break;
3830#endif
3831 default:
3832 /* Otherwise compute the carry-out in two steps. */
76f13133 3833 zero = tcg_const_tl(0);
1d1cc4d0
RH
3834 tcg_gen_add2_tl(cpu_T0, carry_out,
3835 cpu_T0, zero,
76f13133
RH
3836 carry_in, zero);
3837 tcg_gen_add2_tl(cpu_regs[reg], carry_out,
3838 cpu_regs[reg], carry_out,
1d1cc4d0 3839 cpu_T0, zero);
76f13133 3840 tcg_temp_free(zero);
cd7f97ca
RH
3841 break;
3842 }
cd7f97ca
RH
3843 set_cc_op(s, end_op);
3844 }
3845 break;
3846
4a554890
RH
3847 case 0x1f7: /* shlx Gy, Ey, By */
3848 case 0x2f7: /* sarx Gy, Ey, By */
3849 case 0x3f7: /* shrx Gy, Ey, By */
3850 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3851 || !(s->prefix & PREFIX_VEX)
3852 || s->vex_l != 0) {
3853 goto illegal_op;
3854 }
ab4e4aec 3855 ot = mo_64_32(s->dflag);
4a554890 3856 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4ba9938c 3857 if (ot == MO_64) {
1d1cc4d0 3858 tcg_gen_andi_tl(cpu_T1, cpu_regs[s->vex_v], 63);
4a554890 3859 } else {
1d1cc4d0 3860 tcg_gen_andi_tl(cpu_T1, cpu_regs[s->vex_v], 31);
4a554890
RH
3861 }
3862 if (b == 0x1f7) {
1d1cc4d0 3863 tcg_gen_shl_tl(cpu_T0, cpu_T0, cpu_T1);
4a554890 3864 } else if (b == 0x2f7) {
4ba9938c 3865 if (ot != MO_64) {
1d1cc4d0 3866 tcg_gen_ext32s_tl(cpu_T0, cpu_T0);
4a554890 3867 }
1d1cc4d0 3868 tcg_gen_sar_tl(cpu_T0, cpu_T0, cpu_T1);
4a554890 3869 } else {
4ba9938c 3870 if (ot != MO_64) {
1d1cc4d0 3871 tcg_gen_ext32u_tl(cpu_T0, cpu_T0);
4a554890 3872 }
1d1cc4d0 3873 tcg_gen_shr_tl(cpu_T0, cpu_T0, cpu_T1);
4a554890 3874 }
1d1cc4d0 3875 gen_op_mov_reg_v(ot, reg, cpu_T0);
4a554890
RH
3876 break;
3877
bc4b43dc
RH
3878 case 0x0f3:
3879 case 0x1f3:
3880 case 0x2f3:
3881 case 0x3f3: /* Group 17 */
3882 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
3883 || !(s->prefix & PREFIX_VEX)
3884 || s->vex_l != 0) {
3885 goto illegal_op;
3886 }
ab4e4aec 3887 ot = mo_64_32(s->dflag);
bc4b43dc
RH
3888 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3889
3890 switch (reg & 7) {
3891 case 1: /* blsr By,Ey */
1d1cc4d0
RH
3892 tcg_gen_neg_tl(cpu_T1, cpu_T0);
3893 tcg_gen_and_tl(cpu_T0, cpu_T0, cpu_T1);
3894 gen_op_mov_reg_v(ot, s->vex_v, cpu_T0);
bc4b43dc
RH
3895 gen_op_update2_cc();
3896 set_cc_op(s, CC_OP_BMILGB + ot);
3897 break;
3898
3899 case 2: /* blsmsk By,Ey */
1d1cc4d0
RH
3900 tcg_gen_mov_tl(cpu_cc_src, cpu_T0);
3901 tcg_gen_subi_tl(cpu_T0, cpu_T0, 1);
3902 tcg_gen_xor_tl(cpu_T0, cpu_T0, cpu_cc_src);
3903 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
bc4b43dc
RH
3904 set_cc_op(s, CC_OP_BMILGB + ot);
3905 break;
3906
3907 case 3: /* blsi By, Ey */
1d1cc4d0
RH
3908 tcg_gen_mov_tl(cpu_cc_src, cpu_T0);
3909 tcg_gen_subi_tl(cpu_T0, cpu_T0, 1);
3910 tcg_gen_and_tl(cpu_T0, cpu_T0, cpu_cc_src);
3911 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
bc4b43dc
RH
3912 set_cc_op(s, CC_OP_BMILGB + ot);
3913 break;
3914
3915 default:
3916 goto illegal_op;
3917 }
3918 break;
3919
111994ee
RH
3920 default:
3921 goto illegal_op;
3922 }
222a3336 3923 break;
111994ee 3924
222a3336
AZ
3925 case 0x03a:
3926 case 0x13a:
4242b1bd 3927 b = modrm;
0af10c86 3928 modrm = cpu_ldub_code(env, s->pc++);
4242b1bd
AZ
3929 rm = modrm & 7;
3930 reg = ((modrm >> 3) & 7) | rex_r;
3931 mod = (modrm >> 6) & 3;
c045af25
AK
3932 if (b1 >= 2) {
3933 goto illegal_op;
3934 }
4242b1bd 3935
d3eb5eae
BS
3936 sse_fn_eppi = sse_op_table7[b].op[b1];
3937 if (!sse_fn_eppi) {
4242b1bd 3938 goto illegal_op;
c4baa050 3939 }
222a3336
AZ
3940 if (!(s->cpuid_ext_features & sse_op_table7[b].ext_mask))
3941 goto illegal_op;
3942
d3eb5eae 3943 if (sse_fn_eppi == SSE_SPECIAL) {
ab4e4aec 3944 ot = mo_64_32(s->dflag);
222a3336
AZ
3945 rm = (modrm & 7) | REX_B(s);
3946 if (mod != 3)
4eeb3939 3947 gen_lea_modrm(env, s, modrm);
222a3336 3948 reg = ((modrm >> 3) & 7) | rex_r;
0af10c86 3949 val = cpu_ldub_code(env, s->pc++);
222a3336
AZ
3950 switch (b) {
3951 case 0x14: /* pextrb */
1d1cc4d0 3952 tcg_gen_ld8u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
19cbd87c 3953 xmm_regs[reg].ZMM_B(val & 15)));
3523e4bd 3954 if (mod == 3) {
1d1cc4d0 3955 gen_op_mov_reg_v(ot, rm, cpu_T0);
3523e4bd 3956 } else {
1d1cc4d0 3957 tcg_gen_qemu_st_tl(cpu_T0, cpu_A0,
3523e4bd
RH
3958 s->mem_index, MO_UB);
3959 }
222a3336
AZ
3960 break;
3961 case 0x15: /* pextrw */
1d1cc4d0 3962 tcg_gen_ld16u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
19cbd87c 3963 xmm_regs[reg].ZMM_W(val & 7)));
3523e4bd 3964 if (mod == 3) {
1d1cc4d0 3965 gen_op_mov_reg_v(ot, rm, cpu_T0);
3523e4bd 3966 } else {
1d1cc4d0 3967 tcg_gen_qemu_st_tl(cpu_T0, cpu_A0,
3523e4bd
RH
3968 s->mem_index, MO_LEUW);
3969 }
222a3336
AZ
3970 break;
3971 case 0x16:
4ba9938c 3972 if (ot == MO_32) { /* pextrd */
222a3336
AZ
3973 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
3974 offsetof(CPUX86State,
19cbd87c 3975 xmm_regs[reg].ZMM_L(val & 3)));
3523e4bd 3976 if (mod == 3) {
a7fbcbe5 3977 tcg_gen_extu_i32_tl(cpu_regs[rm], cpu_tmp2_i32);
3523e4bd 3978 } else {
d5601ad0
RH
3979 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
3980 s->mem_index, MO_LEUL);
3523e4bd 3981 }
222a3336 3982 } else { /* pextrq */
a7812ae4 3983#ifdef TARGET_X86_64
222a3336
AZ
3984 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
3985 offsetof(CPUX86State,
19cbd87c 3986 xmm_regs[reg].ZMM_Q(val & 1)));
3523e4bd 3987 if (mod == 3) {
a7fbcbe5 3988 tcg_gen_mov_i64(cpu_regs[rm], cpu_tmp1_i64);
3523e4bd
RH
3989 } else {
3990 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
3991 s->mem_index, MO_LEQ);
3992 }
a7812ae4
PB
3993#else
3994 goto illegal_op;
3995#endif
222a3336
AZ
3996 }
3997 break;
3998 case 0x17: /* extractps */
1d1cc4d0 3999 tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
19cbd87c 4000 xmm_regs[reg].ZMM_L(val & 3)));
3523e4bd 4001 if (mod == 3) {
1d1cc4d0 4002 gen_op_mov_reg_v(ot, rm, cpu_T0);
3523e4bd 4003 } else {
1d1cc4d0 4004 tcg_gen_qemu_st_tl(cpu_T0, cpu_A0,
3523e4bd
RH
4005 s->mem_index, MO_LEUL);
4006 }
222a3336
AZ
4007 break;
4008 case 0x20: /* pinsrb */
3c5f4116 4009 if (mod == 3) {
1d1cc4d0 4010 gen_op_mov_v_reg(MO_32, cpu_T0, rm);
3c5f4116 4011 } else {
1d1cc4d0 4012 tcg_gen_qemu_ld_tl(cpu_T0, cpu_A0,
3c5f4116
RH
4013 s->mem_index, MO_UB);
4014 }
1d1cc4d0 4015 tcg_gen_st8_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
19cbd87c 4016 xmm_regs[reg].ZMM_B(val & 15)));
222a3336
AZ
4017 break;
4018 case 0x21: /* insertps */
a7812ae4 4019 if (mod == 3) {
222a3336
AZ
4020 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
4021 offsetof(CPUX86State,xmm_regs[rm]
19cbd87c 4022 .ZMM_L((val >> 6) & 3)));
a7812ae4 4023 } else {
3c5f4116
RH
4024 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
4025 s->mem_index, MO_LEUL);
a7812ae4 4026 }
222a3336
AZ
4027 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
4028 offsetof(CPUX86State,xmm_regs[reg]
19cbd87c 4029 .ZMM_L((val >> 4) & 3)));
222a3336
AZ
4030 if ((val >> 0) & 1)
4031 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4032 cpu_env, offsetof(CPUX86State,
19cbd87c 4033 xmm_regs[reg].ZMM_L(0)));
222a3336
AZ
4034 if ((val >> 1) & 1)
4035 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4036 cpu_env, offsetof(CPUX86State,
19cbd87c 4037 xmm_regs[reg].ZMM_L(1)));
222a3336
AZ
4038 if ((val >> 2) & 1)
4039 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4040 cpu_env, offsetof(CPUX86State,
19cbd87c 4041 xmm_regs[reg].ZMM_L(2)));
222a3336
AZ
4042 if ((val >> 3) & 1)
4043 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4044 cpu_env, offsetof(CPUX86State,
19cbd87c 4045 xmm_regs[reg].ZMM_L(3)));
222a3336
AZ
4046 break;
4047 case 0x22:
4ba9938c 4048 if (ot == MO_32) { /* pinsrd */
3c5f4116 4049 if (mod == 3) {
80b02013 4050 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[rm]);
3c5f4116 4051 } else {
80b02013
RH
4052 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
4053 s->mem_index, MO_LEUL);
3c5f4116 4054 }
222a3336
AZ
4055 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
4056 offsetof(CPUX86State,
19cbd87c 4057 xmm_regs[reg].ZMM_L(val & 3)));
222a3336 4058 } else { /* pinsrq */
a7812ae4 4059#ifdef TARGET_X86_64
3c5f4116 4060 if (mod == 3) {
222a3336 4061 gen_op_mov_v_reg(ot, cpu_tmp1_i64, rm);
3c5f4116
RH
4062 } else {
4063 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
4064 s->mem_index, MO_LEQ);
4065 }
222a3336
AZ
4066 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
4067 offsetof(CPUX86State,
19cbd87c 4068 xmm_regs[reg].ZMM_Q(val & 1)));
a7812ae4
PB
4069#else
4070 goto illegal_op;
4071#endif
222a3336
AZ
4072 }
4073 break;
4074 }
4075 return;
4076 }
4242b1bd
AZ
4077
4078 if (b1) {
4079 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
4080 if (mod == 3) {
4081 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
4082 } else {
4083 op2_offset = offsetof(CPUX86State,xmm_t0);
4eeb3939 4084 gen_lea_modrm(env, s, modrm);
323d1876 4085 gen_ldo_env_A0(s, op2_offset);
4242b1bd
AZ
4086 }
4087 } else {
4088 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
4089 if (mod == 3) {
4090 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
4091 } else {
4092 op2_offset = offsetof(CPUX86State,mmx_t0);
4eeb3939 4093 gen_lea_modrm(env, s, modrm);
323d1876 4094 gen_ldq_env_A0(s, op2_offset);
4242b1bd
AZ
4095 }
4096 }
0af10c86 4097 val = cpu_ldub_code(env, s->pc++);
4242b1bd 4098
222a3336 4099 if ((b & 0xfc) == 0x60) { /* pcmpXstrX */
3ca51d07 4100 set_cc_op(s, CC_OP_EFLAGS);
222a3336 4101
ab4e4aec 4102 if (s->dflag == MO_64) {
222a3336
AZ
4103 /* The helper must use entire 64-bit gp registers */
4104 val |= 1 << 8;
ab4e4aec 4105 }
222a3336
AZ
4106 }
4107
4242b1bd
AZ
4108 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4109 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
d3eb5eae 4110 sse_fn_eppi(cpu_env, cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
4242b1bd 4111 break;
e2c3c2c5
RH
4112
4113 case 0x33a:
4114 /* Various integer extensions at 0f 3a f[0-f]. */
4115 b = modrm | (b1 << 8);
4116 modrm = cpu_ldub_code(env, s->pc++);
4117 reg = ((modrm >> 3) & 7) | rex_r;
4118
4119 switch (b) {
4120 case 0x3f0: /* rorx Gy,Ey, Ib */
4121 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
4122 || !(s->prefix & PREFIX_VEX)
4123 || s->vex_l != 0) {
4124 goto illegal_op;
4125 }
ab4e4aec 4126 ot = mo_64_32(s->dflag);
e2c3c2c5
RH
4127 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4128 b = cpu_ldub_code(env, s->pc++);
4ba9938c 4129 if (ot == MO_64) {
1d1cc4d0 4130 tcg_gen_rotri_tl(cpu_T0, cpu_T0, b & 63);
e2c3c2c5 4131 } else {
1d1cc4d0 4132 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
e2c3c2c5 4133 tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, b & 31);
1d1cc4d0 4134 tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);
e2c3c2c5 4135 }
1d1cc4d0 4136 gen_op_mov_reg_v(ot, reg, cpu_T0);
e2c3c2c5
RH
4137 break;
4138
4139 default:
4140 goto illegal_op;
4141 }
4142 break;
4143
664e0f19
FB
4144 default:
4145 goto illegal_op;
4146 }
4147 } else {
4148 /* generic MMX or SSE operation */
d1e42c5c 4149 switch(b) {
d1e42c5c
FB
4150 case 0x70: /* pshufx insn */
4151 case 0xc6: /* pshufx insn */
4152 case 0xc2: /* compare insns */
4153 s->rip_offset = 1;
4154 break;
4155 default:
4156 break;
664e0f19
FB
4157 }
4158 if (is_xmm) {
4159 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
4160 if (mod != 3) {
cb48da7f
RH
4161 int sz = 4;
4162
4eeb3939 4163 gen_lea_modrm(env, s, modrm);
664e0f19 4164 op2_offset = offsetof(CPUX86State,xmm_t0);
cb48da7f
RH
4165
4166 switch (b) {
4167 case 0x50 ... 0x5a:
4168 case 0x5c ... 0x5f:
4169 case 0xc2:
4170 /* Most sse scalar operations. */
664e0f19 4171 if (b1 == 2) {
cb48da7f
RH
4172 sz = 2;
4173 } else if (b1 == 3) {
4174 sz = 3;
4175 }
4176 break;
4177
4178 case 0x2e: /* ucomis[sd] */
4179 case 0x2f: /* comis[sd] */
4180 if (b1 == 0) {
4181 sz = 2;
664e0f19 4182 } else {
cb48da7f 4183 sz = 3;
664e0f19 4184 }
cb48da7f
RH
4185 break;
4186 }
4187
4188 switch (sz) {
4189 case 2:
4190 /* 32 bit access */
1d1cc4d0
RH
4191 gen_op_ld_v(s, MO_32, cpu_T0, cpu_A0);
4192 tcg_gen_st32_tl(cpu_T0, cpu_env,
19cbd87c 4193 offsetof(CPUX86State,xmm_t0.ZMM_L(0)));
cb48da7f
RH
4194 break;
4195 case 3:
4196 /* 64 bit access */
19cbd87c 4197 gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.ZMM_D(0)));
cb48da7f
RH
4198 break;
4199 default:
4200 /* 128 bit access */
323d1876 4201 gen_ldo_env_A0(s, op2_offset);
cb48da7f 4202 break;
664e0f19
FB
4203 }
4204 } else {
4205 rm = (modrm & 7) | REX_B(s);
4206 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
4207 }
4208 } else {
4209 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
4210 if (mod != 3) {
4eeb3939 4211 gen_lea_modrm(env, s, modrm);
664e0f19 4212 op2_offset = offsetof(CPUX86State,mmx_t0);
323d1876 4213 gen_ldq_env_A0(s, op2_offset);
664e0f19
FB
4214 } else {
4215 rm = (modrm & 7);
4216 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
4217 }
4218 }
4219 switch(b) {
a35f3ec7 4220 case 0x0f: /* 3DNow! data insns */
e771edab
AJ
4221 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
4222 goto illegal_op;
0af10c86 4223 val = cpu_ldub_code(env, s->pc++);
d3eb5eae
BS
4224 sse_fn_epp = sse_op_table5[val];
4225 if (!sse_fn_epp) {
a35f3ec7 4226 goto illegal_op;
c4baa050 4227 }
5af45186
FB
4228 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4229 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
d3eb5eae 4230 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
a35f3ec7 4231 break;
664e0f19
FB
4232 case 0x70: /* pshufx insn */
4233 case 0xc6: /* pshufx insn */
0af10c86 4234 val = cpu_ldub_code(env, s->pc++);
5af45186
FB
4235 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4236 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
c4baa050 4237 /* XXX: introduce a new table? */
d3eb5eae 4238 sse_fn_ppi = (SSEFunc_0_ppi)sse_fn_epp;
c4baa050 4239 sse_fn_ppi(cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
664e0f19
FB
4240 break;
4241 case 0xc2:
4242 /* compare insns */
0af10c86 4243 val = cpu_ldub_code(env, s->pc++);
664e0f19
FB
4244 if (val >= 8)
4245 goto illegal_op;
d3eb5eae 4246 sse_fn_epp = sse_op_table4[val][b1];
c4baa050 4247
5af45186
FB
4248 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4249 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
d3eb5eae 4250 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19 4251 break;
b8b6a50b
FB
4252 case 0xf7:
4253 /* maskmov : we must prepare A0 */
4254 if (mod != 3)
4255 goto illegal_op;
1d71ddb1
RH
4256 tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EDI]);
4257 gen_extu(s->aflag, cpu_A0);
b8b6a50b
FB
4258 gen_add_A0_ds_seg(s);
4259
4260 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4261 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
c4baa050 4262 /* XXX: introduce a new table? */
d3eb5eae
BS
4263 sse_fn_eppt = (SSEFunc_0_eppt)sse_fn_epp;
4264 sse_fn_eppt(cpu_env, cpu_ptr0, cpu_ptr1, cpu_A0);
b8b6a50b 4265 break;
664e0f19 4266 default:
5af45186
FB
4267 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4268 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
d3eb5eae 4269 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
4270 break;
4271 }
4272 if (b == 0x2e || b == 0x2f) {
3ca51d07 4273 set_cc_op(s, CC_OP_EFLAGS);
664e0f19
FB
4274 }
4275 }
4276}
4277
2c0262af
FB
4278/* convert one instruction. s->is_jmp is set if the translation must
4279 be stopped. Return the next pc value */
0af10c86
BS
4280static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
4281 target_ulong pc_start)
2c0262af 4282{
ab4e4aec 4283 int b, prefixes;
d67dc9e6 4284 int shift;
ab4e4aec 4285 TCGMemOp ot, aflag, dflag;
4eeb3939 4286 int modrm, reg, rm, mod, op, opreg, val;
14ce26e7
FB
4287 target_ulong next_eip, tval;
4288 int rex_w, rex_r;
2c0262af
FB
4289
4290 s->pc = pc_start;
4291 prefixes = 0;
2c0262af 4292 s->override = -1;
14ce26e7
FB
4293 rex_w = -1;
4294 rex_r = 0;
4295#ifdef TARGET_X86_64
4296 s->rex_x = 0;
4297 s->rex_b = 0;
5fafdf24 4298 x86_64_hregs = 0;
14ce26e7
FB
4299#endif
4300 s->rip_offset = 0; /* for relative ip address */
701ed211
RH
4301 s->vex_l = 0;
4302 s->vex_v = 0;
2c0262af 4303 next_byte:
0af10c86 4304 b = cpu_ldub_code(env, s->pc);
2c0262af 4305 s->pc++;
4a6fd938
RH
4306 /* Collect prefixes. */
4307 switch (b) {
4308 case 0xf3:
4309 prefixes |= PREFIX_REPZ;
4310 goto next_byte;
4311 case 0xf2:
4312 prefixes |= PREFIX_REPNZ;
4313 goto next_byte;
4314 case 0xf0:
4315 prefixes |= PREFIX_LOCK;
4316 goto next_byte;
4317 case 0x2e:
4318 s->override = R_CS;
4319 goto next_byte;
4320 case 0x36:
4321 s->override = R_SS;
4322 goto next_byte;
4323 case 0x3e:
4324 s->override = R_DS;
4325 goto next_byte;
4326 case 0x26:
4327 s->override = R_ES;
4328 goto next_byte;
4329 case 0x64:
4330 s->override = R_FS;
4331 goto next_byte;
4332 case 0x65:
4333 s->override = R_GS;
4334 goto next_byte;
4335 case 0x66:
4336 prefixes |= PREFIX_DATA;
4337 goto next_byte;
4338 case 0x67:
4339 prefixes |= PREFIX_ADR;
4340 goto next_byte;
14ce26e7 4341#ifdef TARGET_X86_64
4a6fd938
RH
4342 case 0x40 ... 0x4f:
4343 if (CODE64(s)) {
14ce26e7
FB
4344 /* REX prefix */
4345 rex_w = (b >> 3) & 1;
4346 rex_r = (b & 0x4) << 1;
4347 s->rex_x = (b & 0x2) << 2;
4348 REX_B(s) = (b & 0x1) << 3;
4349 x86_64_hregs = 1; /* select uniform byte register addressing */
4350 goto next_byte;
4351 }
4a6fd938
RH
4352 break;
4353#endif
701ed211
RH
4354 case 0xc5: /* 2-byte VEX */
4355 case 0xc4: /* 3-byte VEX */
4356 /* VEX prefixes cannot be used except in 32-bit mode.
4357 Otherwise the instruction is LES or LDS. */
4358 if (s->code32 && !s->vm86) {
4359 static const int pp_prefix[4] = {
4360 0, PREFIX_DATA, PREFIX_REPZ, PREFIX_REPNZ
4361 };
4362 int vex3, vex2 = cpu_ldub_code(env, s->pc);
4363
4364 if (!CODE64(s) && (vex2 & 0xc0) != 0xc0) {
4365 /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
4366 otherwise the instruction is LES or LDS. */
4367 break;
4368 }
4369 s->pc++;
4370
085d8134 4371 /* 4.1.1-4.1.3: No preceding lock, 66, f2, f3, or rex prefixes. */
701ed211
RH
4372 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ
4373 | PREFIX_LOCK | PREFIX_DATA)) {
4374 goto illegal_op;
4375 }
4376#ifdef TARGET_X86_64
4377 if (x86_64_hregs) {
4378 goto illegal_op;
4379 }
4380#endif
4381 rex_r = (~vex2 >> 4) & 8;
4382 if (b == 0xc5) {
4383 vex3 = vex2;
4384 b = cpu_ldub_code(env, s->pc++);
4385 } else {
4386#ifdef TARGET_X86_64
4387 s->rex_x = (~vex2 >> 3) & 8;
4388 s->rex_b = (~vex2 >> 2) & 8;
4389#endif
4390 vex3 = cpu_ldub_code(env, s->pc++);
4391 rex_w = (vex3 >> 7) & 1;
4392 switch (vex2 & 0x1f) {
4393 case 0x01: /* Implied 0f leading opcode bytes. */
4394 b = cpu_ldub_code(env, s->pc++) | 0x100;
4395 break;
4396 case 0x02: /* Implied 0f 38 leading opcode bytes. */
4397 b = 0x138;
4398 break;
4399 case 0x03: /* Implied 0f 3a leading opcode bytes. */
4400 b = 0x13a;
4401 break;
4402 default: /* Reserved for future use. */
4403 goto illegal_op;
4404 }
4405 }
4406 s->vex_v = (~vex3 >> 3) & 0xf;
4407 s->vex_l = (vex3 >> 2) & 1;
4408 prefixes |= pp_prefix[vex3 & 3] | PREFIX_VEX;
4409 }
4410 break;
4a6fd938
RH
4411 }
4412
4413 /* Post-process prefixes. */
4a6fd938 4414 if (CODE64(s)) {
dec3fc96
RH
4415 /* In 64-bit mode, the default data size is 32-bit. Select 64-bit
4416 data with rex_w, and 16-bit data with 0x66; rex_w takes precedence
4417 over 0x66 if both are present. */
ab4e4aec 4418 dflag = (rex_w > 0 ? MO_64 : prefixes & PREFIX_DATA ? MO_16 : MO_32);
dec3fc96 4419 /* In 64-bit mode, 0x67 selects 32-bit addressing. */
1d71ddb1 4420 aflag = (prefixes & PREFIX_ADR ? MO_32 : MO_64);
dec3fc96
RH
4421 } else {
4422 /* In 16/32-bit mode, 0x66 selects the opposite data size. */
ab4e4aec
RH
4423 if (s->code32 ^ ((prefixes & PREFIX_DATA) != 0)) {
4424 dflag = MO_32;
4425 } else {
4426 dflag = MO_16;
14ce26e7 4427 }
dec3fc96 4428 /* In 16/32-bit mode, 0x67 selects the opposite addressing. */
1d71ddb1
RH
4429 if (s->code32 ^ ((prefixes & PREFIX_ADR) != 0)) {
4430 aflag = MO_32;
4431 } else {
4432 aflag = MO_16;
14ce26e7 4433 }
2c0262af
FB
4434 }
4435
2c0262af
FB
4436 s->prefix = prefixes;
4437 s->aflag = aflag;
4438 s->dflag = dflag;
4439
4440 /* lock generation */
4441 if (prefixes & PREFIX_LOCK)
a7812ae4 4442 gen_helper_lock();
2c0262af
FB
4443
4444 /* now check op code */
4445 reswitch:
4446 switch(b) {
4447 case 0x0f:
4448 /**************************/
4449 /* extended op code */
0af10c86 4450 b = cpu_ldub_code(env, s->pc++) | 0x100;
2c0262af 4451 goto reswitch;
3b46e624 4452
2c0262af
FB
4453 /**************************/
4454 /* arith & logic */
4455 case 0x00 ... 0x05:
4456 case 0x08 ... 0x0d:
4457 case 0x10 ... 0x15:
4458 case 0x18 ... 0x1d:
4459 case 0x20 ... 0x25:
4460 case 0x28 ... 0x2d:
4461 case 0x30 ... 0x35:
4462 case 0x38 ... 0x3d:
4463 {
4464 int op, f, val;
4465 op = (b >> 3) & 7;
4466 f = (b >> 1) & 3;
4467
ab4e4aec 4468 ot = mo_b_d(b, dflag);
3b46e624 4469
2c0262af
FB
4470 switch(f) {
4471 case 0: /* OP Ev, Gv */
0af10c86 4472 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 4473 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af 4474 mod = (modrm >> 6) & 3;
14ce26e7 4475 rm = (modrm & 7) | REX_B(s);
2c0262af 4476 if (mod != 3) {
4eeb3939 4477 gen_lea_modrm(env, s, modrm);
2c0262af
FB
4478 opreg = OR_TMP0;
4479 } else if (op == OP_XORL && rm == reg) {
4480 xor_zero:
4481 /* xor reg, reg optimisation */
436ff2d2 4482 set_cc_op(s, CC_OP_CLR);
1d1cc4d0
RH
4483 tcg_gen_movi_tl(cpu_T0, 0);
4484 gen_op_mov_reg_v(ot, reg, cpu_T0);
2c0262af
FB
4485 break;
4486 } else {
4487 opreg = rm;
4488 }
1d1cc4d0 4489 gen_op_mov_v_reg(ot, cpu_T1, reg);
2c0262af
FB
4490 gen_op(s, op, ot, opreg);
4491 break;
4492 case 1: /* OP Gv, Ev */
0af10c86 4493 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 4494 mod = (modrm >> 6) & 3;
14ce26e7
FB
4495 reg = ((modrm >> 3) & 7) | rex_r;
4496 rm = (modrm & 7) | REX_B(s);
2c0262af 4497 if (mod != 3) {
4eeb3939 4498 gen_lea_modrm(env, s, modrm);
1d1cc4d0 4499 gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
2c0262af
FB
4500 } else if (op == OP_XORL && rm == reg) {
4501 goto xor_zero;
4502 } else {
1d1cc4d0 4503 gen_op_mov_v_reg(ot, cpu_T1, rm);
2c0262af
FB
4504 }
4505 gen_op(s, op, ot, reg);
4506 break;
4507 case 2: /* OP A, Iv */
0af10c86 4508 val = insn_get(env, s, ot);
1d1cc4d0 4509 tcg_gen_movi_tl(cpu_T1, val);
2c0262af
FB
4510 gen_op(s, op, ot, OR_EAX);
4511 break;
4512 }
4513 }
4514 break;
4515
ec9d6075
FB
4516 case 0x82:
4517 if (CODE64(s))
4518 goto illegal_op;
2c0262af
FB
4519 case 0x80: /* GRP1 */
4520 case 0x81:
4521 case 0x83:
4522 {
4523 int val;
4524
ab4e4aec 4525 ot = mo_b_d(b, dflag);
3b46e624 4526
0af10c86 4527 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 4528 mod = (modrm >> 6) & 3;
14ce26e7 4529 rm = (modrm & 7) | REX_B(s);
2c0262af 4530 op = (modrm >> 3) & 7;
3b46e624 4531
2c0262af 4532 if (mod != 3) {
14ce26e7
FB
4533 if (b == 0x83)
4534 s->rip_offset = 1;
4535 else
4536 s->rip_offset = insn_const_size(ot);
4eeb3939 4537 gen_lea_modrm(env, s, modrm);
2c0262af
FB
4538 opreg = OR_TMP0;
4539 } else {
14ce26e7 4540 opreg = rm;
2c0262af
FB
4541 }
4542
4543 switch(b) {
4544 default:
4545 case 0x80:
4546 case 0x81:
d64477af 4547 case 0x82:
0af10c86 4548 val = insn_get(env, s, ot);
2c0262af
FB
4549 break;
4550 case 0x83:
4ba9938c 4551 val = (int8_t)insn_get(env, s, MO_8);
2c0262af
FB
4552 break;
4553 }
1d1cc4d0 4554 tcg_gen_movi_tl(cpu_T1, val);
2c0262af
FB
4555 gen_op(s, op, ot, opreg);
4556 }
4557 break;
4558
4559 /**************************/
4560 /* inc, dec, and other misc arith */
4561 case 0x40 ... 0x47: /* inc Gv */
ab4e4aec 4562 ot = dflag;
2c0262af
FB
4563 gen_inc(s, ot, OR_EAX + (b & 7), 1);
4564 break;
4565 case 0x48 ... 0x4f: /* dec Gv */
ab4e4aec 4566 ot = dflag;
2c0262af
FB
4567 gen_inc(s, ot, OR_EAX + (b & 7), -1);
4568 break;
4569 case 0xf6: /* GRP3 */
4570 case 0xf7:
ab4e4aec 4571 ot = mo_b_d(b, dflag);
2c0262af 4572
0af10c86 4573 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 4574 mod = (modrm >> 6) & 3;
14ce26e7 4575 rm = (modrm & 7) | REX_B(s);
2c0262af
FB
4576 op = (modrm >> 3) & 7;
4577 if (mod != 3) {
14ce26e7
FB
4578 if (op == 0)
4579 s->rip_offset = insn_const_size(ot);
4eeb3939 4580 gen_lea_modrm(env, s, modrm);
1d1cc4d0 4581 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
2c0262af 4582 } else {
1d1cc4d0 4583 gen_op_mov_v_reg(ot, cpu_T0, rm);
2c0262af
FB
4584 }
4585
4586 switch(op) {
4587 case 0: /* test */
0af10c86 4588 val = insn_get(env, s, ot);
1d1cc4d0 4589 tcg_gen_movi_tl(cpu_T1, val);
2c0262af 4590 gen_op_testl_T0_T1_cc();
3ca51d07 4591 set_cc_op(s, CC_OP_LOGICB + ot);
2c0262af
FB
4592 break;
4593 case 2: /* not */
1d1cc4d0 4594 tcg_gen_not_tl(cpu_T0, cpu_T0);
2c0262af 4595 if (mod != 3) {
1d1cc4d0 4596 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
2c0262af 4597 } else {
1d1cc4d0 4598 gen_op_mov_reg_v(ot, rm, cpu_T0);
2c0262af
FB
4599 }
4600 break;
4601 case 3: /* neg */
1d1cc4d0 4602 tcg_gen_neg_tl(cpu_T0, cpu_T0);
2c0262af 4603 if (mod != 3) {
1d1cc4d0 4604 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
2c0262af 4605 } else {
1d1cc4d0 4606 gen_op_mov_reg_v(ot, rm, cpu_T0);
2c0262af
FB
4607 }
4608 gen_op_update_neg_cc();
3ca51d07 4609 set_cc_op(s, CC_OP_SUBB + ot);
2c0262af
FB
4610 break;
4611 case 4: /* mul */
4612 switch(ot) {
4ba9938c 4613 case MO_8:
1d1cc4d0
RH
4614 gen_op_mov_v_reg(MO_8, cpu_T1, R_EAX);
4615 tcg_gen_ext8u_tl(cpu_T0, cpu_T0);
4616 tcg_gen_ext8u_tl(cpu_T1, cpu_T1);
0211e5af 4617 /* XXX: use 32 bit mul which could be faster */
1d1cc4d0
RH
4618 tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
4619 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
4620 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
4621 tcg_gen_andi_tl(cpu_cc_src, cpu_T0, 0xff00);
3ca51d07 4622 set_cc_op(s, CC_OP_MULB);
2c0262af 4623 break;
4ba9938c 4624 case MO_16:
1d1cc4d0
RH
4625 gen_op_mov_v_reg(MO_16, cpu_T1, R_EAX);
4626 tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
4627 tcg_gen_ext16u_tl(cpu_T1, cpu_T1);
0211e5af 4628 /* XXX: use 32 bit mul which could be faster */
1d1cc4d0
RH
4629 tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
4630 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
4631 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
4632 tcg_gen_shri_tl(cpu_T0, cpu_T0, 16);
4633 gen_op_mov_reg_v(MO_16, R_EDX, cpu_T0);
4634 tcg_gen_mov_tl(cpu_cc_src, cpu_T0);
3ca51d07 4635 set_cc_op(s, CC_OP_MULW);
2c0262af
FB
4636 break;
4637 default:
4ba9938c 4638 case MO_32:
1d1cc4d0 4639 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
a4bcea3d
RH
4640 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]);
4641 tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
4642 cpu_tmp2_i32, cpu_tmp3_i32);
4643 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32);
4644 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32);
4645 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4646 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
3ca51d07 4647 set_cc_op(s, CC_OP_MULL);
2c0262af 4648 break;
14ce26e7 4649#ifdef TARGET_X86_64
4ba9938c 4650 case MO_64:
a4bcea3d 4651 tcg_gen_mulu2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
1d1cc4d0 4652 cpu_T0, cpu_regs[R_EAX]);
a4bcea3d
RH
4653 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4654 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
3ca51d07 4655 set_cc_op(s, CC_OP_MULQ);
14ce26e7
FB
4656 break;
4657#endif
2c0262af 4658 }
2c0262af
FB
4659 break;
4660 case 5: /* imul */
4661 switch(ot) {
4ba9938c 4662 case MO_8:
1d1cc4d0
RH
4663 gen_op_mov_v_reg(MO_8, cpu_T1, R_EAX);
4664 tcg_gen_ext8s_tl(cpu_T0, cpu_T0);
4665 tcg_gen_ext8s_tl(cpu_T1, cpu_T1);
0211e5af 4666 /* XXX: use 32 bit mul which could be faster */
1d1cc4d0
RH
4667 tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
4668 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
4669 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
4670 tcg_gen_ext8s_tl(cpu_tmp0, cpu_T0);
4671 tcg_gen_sub_tl(cpu_cc_src, cpu_T0, cpu_tmp0);
3ca51d07 4672 set_cc_op(s, CC_OP_MULB);
2c0262af 4673 break;
4ba9938c 4674 case MO_16:
1d1cc4d0
RH
4675 gen_op_mov_v_reg(MO_16, cpu_T1, R_EAX);
4676 tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
4677 tcg_gen_ext16s_tl(cpu_T1, cpu_T1);
0211e5af 4678 /* XXX: use 32 bit mul which could be faster */
1d1cc4d0
RH
4679 tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
4680 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
4681 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
4682 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T0);
4683 tcg_gen_sub_tl(cpu_cc_src, cpu_T0, cpu_tmp0);
4684 tcg_gen_shri_tl(cpu_T0, cpu_T0, 16);
4685 gen_op_mov_reg_v(MO_16, R_EDX, cpu_T0);
3ca51d07 4686 set_cc_op(s, CC_OP_MULW);
2c0262af
FB
4687 break;
4688 default:
4ba9938c 4689 case MO_32:
1d1cc4d0 4690 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
a4bcea3d
RH
4691 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]);
4692 tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
4693 cpu_tmp2_i32, cpu_tmp3_i32);
4694 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32);
4695 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32);
4696 tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31);
4697 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4698 tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
4699 tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32);
3ca51d07 4700 set_cc_op(s, CC_OP_MULL);
2c0262af 4701 break;
14ce26e7 4702#ifdef TARGET_X86_64
4ba9938c 4703 case MO_64:
a4bcea3d 4704 tcg_gen_muls2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
1d1cc4d0 4705 cpu_T0, cpu_regs[R_EAX]);
a4bcea3d
RH
4706 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4707 tcg_gen_sari_tl(cpu_cc_src, cpu_regs[R_EAX], 63);
4708 tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_regs[R_EDX]);
3ca51d07 4709 set_cc_op(s, CC_OP_MULQ);
14ce26e7
FB
4710 break;
4711#endif
2c0262af 4712 }
2c0262af
FB
4713 break;
4714 case 6: /* div */
4715 switch(ot) {
4ba9938c 4716 case MO_8:
1d1cc4d0 4717 gen_helper_divb_AL(cpu_env, cpu_T0);
2c0262af 4718 break;
4ba9938c 4719 case MO_16:
1d1cc4d0 4720 gen_helper_divw_AX(cpu_env, cpu_T0);
2c0262af
FB
4721 break;
4722 default:
4ba9938c 4723 case MO_32:
1d1cc4d0 4724 gen_helper_divl_EAX(cpu_env, cpu_T0);
14ce26e7
FB
4725 break;
4726#ifdef TARGET_X86_64
4ba9938c 4727 case MO_64:
1d1cc4d0 4728 gen_helper_divq_EAX(cpu_env, cpu_T0);
2c0262af 4729 break;
14ce26e7 4730#endif
2c0262af
FB
4731 }
4732 break;
4733 case 7: /* idiv */
4734 switch(ot) {
4ba9938c 4735 case MO_8:
1d1cc4d0 4736 gen_helper_idivb_AL(cpu_env, cpu_T0);
2c0262af 4737 break;
4ba9938c 4738 case MO_16:
1d1cc4d0 4739 gen_helper_idivw_AX(cpu_env, cpu_T0);
2c0262af
FB
4740 break;
4741 default:
4ba9938c 4742 case MO_32:
1d1cc4d0 4743 gen_helper_idivl_EAX(cpu_env, cpu_T0);
14ce26e7
FB
4744 break;
4745#ifdef TARGET_X86_64
4ba9938c 4746 case MO_64:
1d1cc4d0 4747 gen_helper_idivq_EAX(cpu_env, cpu_T0);
2c0262af 4748 break;
14ce26e7 4749#endif
2c0262af
FB
4750 }
4751 break;
4752 default:
4753 goto illegal_op;
4754 }
4755 break;
4756
4757 case 0xfe: /* GRP4 */
4758 case 0xff: /* GRP5 */
ab4e4aec 4759 ot = mo_b_d(b, dflag);
2c0262af 4760
0af10c86 4761 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 4762 mod = (modrm >> 6) & 3;
14ce26e7 4763 rm = (modrm & 7) | REX_B(s);
2c0262af
FB
4764 op = (modrm >> 3) & 7;
4765 if (op >= 2 && b == 0xfe) {
4766 goto illegal_op;
4767 }
14ce26e7 4768 if (CODE64(s)) {
aba9d61e 4769 if (op == 2 || op == 4) {
14ce26e7 4770 /* operand size for jumps is 64 bit */
4ba9938c 4771 ot = MO_64;
aba9d61e 4772 } else if (op == 3 || op == 5) {
ab4e4aec 4773 ot = dflag != MO_16 ? MO_32 + (rex_w == 1) : MO_16;
14ce26e7
FB
4774 } else if (op == 6) {
4775 /* default push size is 64 bit */
ab4e4aec 4776 ot = mo_pushpop(s, dflag);
14ce26e7
FB
4777 }
4778 }
2c0262af 4779 if (mod != 3) {
4eeb3939 4780 gen_lea_modrm(env, s, modrm);
2c0262af 4781 if (op >= 2 && op != 3 && op != 5)
1d1cc4d0 4782 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
2c0262af 4783 } else {
1d1cc4d0 4784 gen_op_mov_v_reg(ot, cpu_T0, rm);
2c0262af
FB
4785 }
4786
4787 switch(op) {
4788 case 0: /* inc Ev */
4789 if (mod != 3)
4790 opreg = OR_TMP0;
4791 else
4792 opreg = rm;
4793 gen_inc(s, ot, opreg, 1);
4794 break;
4795 case 1: /* dec Ev */
4796 if (mod != 3)
4797 opreg = OR_TMP0;
4798 else
4799 opreg = rm;
4800 gen_inc(s, ot, opreg, -1);
4801 break;
4802 case 2: /* call Ev */
4f31916f 4803 /* XXX: optimize if memory (no 'and' is necessary) */
ab4e4aec 4804 if (dflag == MO_16) {
1d1cc4d0 4805 tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
40b90233 4806 }
2c0262af 4807 next_eip = s->pc - s->cs_base;
1d1cc4d0
RH
4808 tcg_gen_movi_tl(cpu_T1, next_eip);
4809 gen_push_v(s, cpu_T1);
4810 gen_op_jmp_v(cpu_T0);
7d117ce8 4811 gen_bnd_jmp(s);
2c0262af
FB
4812 gen_eob(s);
4813 break;
61382a50 4814 case 3: /* lcall Ev */
1d1cc4d0 4815 gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
830a19a4 4816 gen_add_A0_im(s, 1 << ot);
1d1cc4d0 4817 gen_op_ld_v(s, MO_16, cpu_T0, cpu_A0);
2c0262af
FB
4818 do_lcall:
4819 if (s->pe && !s->vm86) {
1d1cc4d0
RH
4820 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
4821 gen_helper_lcall_protected(cpu_env, cpu_tmp2_i32, cpu_T1,
ab4e4aec 4822 tcg_const_i32(dflag - 1),
100ec099 4823 tcg_const_tl(s->pc - s->cs_base));
2c0262af 4824 } else {
1d1cc4d0
RH
4825 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
4826 gen_helper_lcall_real(cpu_env, cpu_tmp2_i32, cpu_T1,
ab4e4aec 4827 tcg_const_i32(dflag - 1),
a7812ae4 4828 tcg_const_i32(s->pc - s->cs_base));
2c0262af
FB
4829 }
4830 gen_eob(s);
4831 break;
4832 case 4: /* jmp Ev */
ab4e4aec 4833 if (dflag == MO_16) {
1d1cc4d0 4834 tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
40b90233 4835 }
1d1cc4d0 4836 gen_op_jmp_v(cpu_T0);
7d117ce8 4837 gen_bnd_jmp(s);
2c0262af
FB
4838 gen_eob(s);
4839 break;
4840 case 5: /* ljmp Ev */
1d1cc4d0 4841 gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
830a19a4 4842 gen_add_A0_im(s, 1 << ot);
1d1cc4d0 4843 gen_op_ld_v(s, MO_16, cpu_T0, cpu_A0);
2c0262af
FB
4844 do_ljmp:
4845 if (s->pe && !s->vm86) {
1d1cc4d0
RH
4846 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
4847 gen_helper_ljmp_protected(cpu_env, cpu_tmp2_i32, cpu_T1,
100ec099 4848 tcg_const_tl(s->pc - s->cs_base));
2c0262af 4849 } else {
3bd7da9e 4850 gen_op_movl_seg_T0_vm(R_CS);
1d1cc4d0 4851 gen_op_jmp_v(cpu_T1);
2c0262af
FB
4852 }
4853 gen_eob(s);
4854 break;
4855 case 6: /* push Ev */
1d1cc4d0 4856 gen_push_v(s, cpu_T0);
2c0262af
FB
4857 break;
4858 default:
4859 goto illegal_op;
4860 }
4861 break;
4862
4863 case 0x84: /* test Ev, Gv */
5fafdf24 4864 case 0x85:
ab4e4aec 4865 ot = mo_b_d(b, dflag);
2c0262af 4866
0af10c86 4867 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 4868 reg = ((modrm >> 3) & 7) | rex_r;
3b46e624 4869
0af10c86 4870 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
1d1cc4d0 4871 gen_op_mov_v_reg(ot, cpu_T1, reg);
2c0262af 4872 gen_op_testl_T0_T1_cc();
3ca51d07 4873 set_cc_op(s, CC_OP_LOGICB + ot);
2c0262af 4874 break;
3b46e624 4875
2c0262af
FB
4876 case 0xa8: /* test eAX, Iv */
4877 case 0xa9:
ab4e4aec 4878 ot = mo_b_d(b, dflag);
0af10c86 4879 val = insn_get(env, s, ot);
2c0262af 4880
1d1cc4d0
RH
4881 gen_op_mov_v_reg(ot, cpu_T0, OR_EAX);
4882 tcg_gen_movi_tl(cpu_T1, val);
2c0262af 4883 gen_op_testl_T0_T1_cc();
3ca51d07 4884 set_cc_op(s, CC_OP_LOGICB + ot);
2c0262af 4885 break;
3b46e624 4886
2c0262af 4887 case 0x98: /* CWDE/CBW */
ab4e4aec 4888 switch (dflag) {
14ce26e7 4889#ifdef TARGET_X86_64
ab4e4aec 4890 case MO_64:
1d1cc4d0
RH
4891 gen_op_mov_v_reg(MO_32, cpu_T0, R_EAX);
4892 tcg_gen_ext32s_tl(cpu_T0, cpu_T0);
4893 gen_op_mov_reg_v(MO_64, R_EAX, cpu_T0);
ab4e4aec 4894 break;
14ce26e7 4895#endif
ab4e4aec 4896 case MO_32:
1d1cc4d0
RH
4897 gen_op_mov_v_reg(MO_16, cpu_T0, R_EAX);
4898 tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
4899 gen_op_mov_reg_v(MO_32, R_EAX, cpu_T0);
ab4e4aec
RH
4900 break;
4901 case MO_16:
1d1cc4d0
RH
4902 gen_op_mov_v_reg(MO_8, cpu_T0, R_EAX);
4903 tcg_gen_ext8s_tl(cpu_T0, cpu_T0);
4904 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
ab4e4aec
RH
4905 break;
4906 default:
4907 tcg_abort();
e108dd01 4908 }
2c0262af
FB
4909 break;
4910 case 0x99: /* CDQ/CWD */
ab4e4aec 4911 switch (dflag) {
14ce26e7 4912#ifdef TARGET_X86_64
ab4e4aec 4913 case MO_64:
1d1cc4d0
RH
4914 gen_op_mov_v_reg(MO_64, cpu_T0, R_EAX);
4915 tcg_gen_sari_tl(cpu_T0, cpu_T0, 63);
4916 gen_op_mov_reg_v(MO_64, R_EDX, cpu_T0);
ab4e4aec 4917 break;
14ce26e7 4918#endif
ab4e4aec 4919 case MO_32:
1d1cc4d0
RH
4920 gen_op_mov_v_reg(MO_32, cpu_T0, R_EAX);
4921 tcg_gen_ext32s_tl(cpu_T0, cpu_T0);
4922 tcg_gen_sari_tl(cpu_T0, cpu_T0, 31);
4923 gen_op_mov_reg_v(MO_32, R_EDX, cpu_T0);
ab4e4aec
RH
4924 break;
4925 case MO_16:
1d1cc4d0
RH
4926 gen_op_mov_v_reg(MO_16, cpu_T0, R_EAX);
4927 tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
4928 tcg_gen_sari_tl(cpu_T0, cpu_T0, 15);
4929 gen_op_mov_reg_v(MO_16, R_EDX, cpu_T0);
ab4e4aec
RH
4930 break;
4931 default:
4932 tcg_abort();
e108dd01 4933 }
2c0262af
FB
4934 break;
4935 case 0x1af: /* imul Gv, Ev */
4936 case 0x69: /* imul Gv, Ev, I */
4937 case 0x6b:
ab4e4aec 4938 ot = dflag;
0af10c86 4939 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7
FB
4940 reg = ((modrm >> 3) & 7) | rex_r;
4941 if (b == 0x69)
4942 s->rip_offset = insn_const_size(ot);
4943 else if (b == 0x6b)
4944 s->rip_offset = 1;
0af10c86 4945 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
2c0262af 4946 if (b == 0x69) {
0af10c86 4947 val = insn_get(env, s, ot);
1d1cc4d0 4948 tcg_gen_movi_tl(cpu_T1, val);
2c0262af 4949 } else if (b == 0x6b) {
4ba9938c 4950 val = (int8_t)insn_get(env, s, MO_8);
1d1cc4d0 4951 tcg_gen_movi_tl(cpu_T1, val);
2c0262af 4952 } else {
1d1cc4d0 4953 gen_op_mov_v_reg(ot, cpu_T1, reg);
2c0262af 4954 }
a4bcea3d 4955 switch (ot) {
0211e5af 4956#ifdef TARGET_X86_64
4ba9938c 4957 case MO_64:
1d1cc4d0 4958 tcg_gen_muls2_i64(cpu_regs[reg], cpu_T1, cpu_T0, cpu_T1);
a4bcea3d
RH
4959 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
4960 tcg_gen_sari_tl(cpu_cc_src, cpu_cc_dst, 63);
1d1cc4d0 4961 tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_T1);
a4bcea3d 4962 break;
0211e5af 4963#endif
4ba9938c 4964 case MO_32:
1d1cc4d0
RH
4965 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
4966 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1);
a4bcea3d
RH
4967 tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
4968 cpu_tmp2_i32, cpu_tmp3_i32);
4969 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
4970 tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31);
4971 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
4972 tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
4973 tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32);
4974 break;
4975 default:
1d1cc4d0
RH
4976 tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
4977 tcg_gen_ext16s_tl(cpu_T1, cpu_T1);
0211e5af 4978 /* XXX: use 32 bit mul which could be faster */
1d1cc4d0
RH
4979 tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
4980 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
4981 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T0);
4982 tcg_gen_sub_tl(cpu_cc_src, cpu_T0, cpu_tmp0);
4983 gen_op_mov_reg_v(ot, reg, cpu_T0);
a4bcea3d 4984 break;
2c0262af 4985 }
3ca51d07 4986 set_cc_op(s, CC_OP_MULB + ot);
2c0262af
FB
4987 break;
4988 case 0x1c0:
4989 case 0x1c1: /* xadd Ev, Gv */
ab4e4aec 4990 ot = mo_b_d(b, dflag);
0af10c86 4991 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 4992 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af
FB
4993 mod = (modrm >> 6) & 3;
4994 if (mod == 3) {
14ce26e7 4995 rm = (modrm & 7) | REX_B(s);
1d1cc4d0
RH
4996 gen_op_mov_v_reg(ot, cpu_T0, reg);
4997 gen_op_mov_v_reg(ot, cpu_T1, rm);
4998 tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1);
4999 gen_op_mov_reg_v(ot, reg, cpu_T1);
5000 gen_op_mov_reg_v(ot, rm, cpu_T0);
2c0262af 5001 } else {
4eeb3939 5002 gen_lea_modrm(env, s, modrm);
1d1cc4d0
RH
5003 gen_op_mov_v_reg(ot, cpu_T0, reg);
5004 gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
5005 tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1);
5006 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
5007 gen_op_mov_reg_v(ot, reg, cpu_T1);
2c0262af
FB
5008 }
5009 gen_op_update2_cc();
3ca51d07 5010 set_cc_op(s, CC_OP_ADDB + ot);
2c0262af
FB
5011 break;
5012 case 0x1b0:
5013 case 0x1b1: /* cmpxchg Ev, Gv */
cad3a37d 5014 {
42a268c2 5015 TCGLabel *label1, *label2;
1e4840bf 5016 TCGv t0, t1, t2, a0;
cad3a37d 5017
ab4e4aec 5018 ot = mo_b_d(b, dflag);
0af10c86 5019 modrm = cpu_ldub_code(env, s->pc++);
cad3a37d
FB
5020 reg = ((modrm >> 3) & 7) | rex_r;
5021 mod = (modrm >> 6) & 3;
a7812ae4
PB
5022 t0 = tcg_temp_local_new();
5023 t1 = tcg_temp_local_new();
5024 t2 = tcg_temp_local_new();
5025 a0 = tcg_temp_local_new();
1e4840bf 5026 gen_op_mov_v_reg(ot, t1, reg);
cad3a37d
FB
5027 if (mod == 3) {
5028 rm = (modrm & 7) | REX_B(s);
1e4840bf 5029 gen_op_mov_v_reg(ot, t0, rm);
cad3a37d 5030 } else {
4eeb3939 5031 gen_lea_modrm(env, s, modrm);
1e4840bf 5032 tcg_gen_mov_tl(a0, cpu_A0);
323d1876 5033 gen_op_ld_v(s, ot, t0, a0);
cad3a37d
FB
5034 rm = 0; /* avoid warning */
5035 }
5036 label1 = gen_new_label();
a3251186
RH
5037 tcg_gen_mov_tl(t2, cpu_regs[R_EAX]);
5038 gen_extu(ot, t0);
1e4840bf 5039 gen_extu(ot, t2);
a3251186 5040 tcg_gen_brcond_tl(TCG_COND_EQ, t2, t0, label1);
f7e80adf 5041 label2 = gen_new_label();
cad3a37d 5042 if (mod == 3) {
1e4840bf 5043 gen_op_mov_reg_v(ot, R_EAX, t0);
1130328e
FB
5044 tcg_gen_br(label2);
5045 gen_set_label(label1);
1e4840bf 5046 gen_op_mov_reg_v(ot, rm, t1);
cad3a37d 5047 } else {
f7e80adf
AG
5048 /* perform no-op store cycle like physical cpu; must be
5049 before changing accumulator to ensure idempotency if
5050 the store faults and the instruction is restarted */
323d1876 5051 gen_op_st_v(s, ot, t0, a0);
1e4840bf 5052 gen_op_mov_reg_v(ot, R_EAX, t0);
f7e80adf 5053 tcg_gen_br(label2);
1130328e 5054 gen_set_label(label1);
323d1876 5055 gen_op_st_v(s, ot, t1, a0);
cad3a37d 5056 }
f7e80adf 5057 gen_set_label(label2);
1e4840bf 5058 tcg_gen_mov_tl(cpu_cc_src, t0);
a3251186
RH
5059 tcg_gen_mov_tl(cpu_cc_srcT, t2);
5060 tcg_gen_sub_tl(cpu_cc_dst, t2, t0);
3ca51d07 5061 set_cc_op(s, CC_OP_SUBB + ot);
1e4840bf
FB
5062 tcg_temp_free(t0);
5063 tcg_temp_free(t1);
5064 tcg_temp_free(t2);
5065 tcg_temp_free(a0);
2c0262af 5066 }
2c0262af
FB
5067 break;
5068 case 0x1c7: /* cmpxchg8b */
0af10c86 5069 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 5070 mod = (modrm >> 6) & 3;
71c3558e 5071 if ((mod == 3) || ((modrm & 0x38) != 0x8))
2c0262af 5072 goto illegal_op;
1b9d9ebb 5073#ifdef TARGET_X86_64
ab4e4aec 5074 if (dflag == MO_64) {
1b9d9ebb
FB
5075 if (!(s->cpuid_ext_features & CPUID_EXT_CX16))
5076 goto illegal_op;
4eeb3939 5077 gen_lea_modrm(env, s, modrm);
92fc4b58 5078 gen_helper_cmpxchg16b(cpu_env, cpu_A0);
1b9d9ebb
FB
5079 } else
5080#endif
5081 {
5082 if (!(s->cpuid_features & CPUID_CX8))
5083 goto illegal_op;
4eeb3939 5084 gen_lea_modrm(env, s, modrm);
92fc4b58 5085 gen_helper_cmpxchg8b(cpu_env, cpu_A0);
1b9d9ebb 5086 }
3ca51d07 5087 set_cc_op(s, CC_OP_EFLAGS);
2c0262af 5088 break;
3b46e624 5089
2c0262af
FB
5090 /**************************/
5091 /* push/pop */
5092 case 0x50 ... 0x57: /* push */
1d1cc4d0
RH
5093 gen_op_mov_v_reg(MO_32, cpu_T0, (b & 7) | REX_B(s));
5094 gen_push_v(s, cpu_T0);
2c0262af
FB
5095 break;
5096 case 0x58 ... 0x5f: /* pop */
8e31d234 5097 ot = gen_pop_T0(s);
77729c24 5098 /* NOTE: order is important for pop %sp */
8e31d234 5099 gen_pop_update(s, ot);
1d1cc4d0 5100 gen_op_mov_reg_v(ot, (b & 7) | REX_B(s), cpu_T0);
2c0262af
FB
5101 break;
5102 case 0x60: /* pusha */
14ce26e7
FB
5103 if (CODE64(s))
5104 goto illegal_op;
2c0262af
FB
5105 gen_pusha(s);
5106 break;
5107 case 0x61: /* popa */
14ce26e7
FB
5108 if (CODE64(s))
5109 goto illegal_op;
2c0262af
FB
5110 gen_popa(s);
5111 break;
5112 case 0x68: /* push Iv */
5113 case 0x6a:
ab4e4aec 5114 ot = mo_pushpop(s, dflag);
2c0262af 5115 if (b == 0x68)
0af10c86 5116 val = insn_get(env, s, ot);
2c0262af 5117 else
4ba9938c 5118 val = (int8_t)insn_get(env, s, MO_8);
1d1cc4d0
RH
5119 tcg_gen_movi_tl(cpu_T0, val);
5120 gen_push_v(s, cpu_T0);
2c0262af
FB
5121 break;
5122 case 0x8f: /* pop Ev */
0af10c86 5123 modrm = cpu_ldub_code(env, s->pc++);
77729c24 5124 mod = (modrm >> 6) & 3;
8e31d234 5125 ot = gen_pop_T0(s);
77729c24
FB
5126 if (mod == 3) {
5127 /* NOTE: order is important for pop %sp */
8e31d234 5128 gen_pop_update(s, ot);
14ce26e7 5129 rm = (modrm & 7) | REX_B(s);
1d1cc4d0 5130 gen_op_mov_reg_v(ot, rm, cpu_T0);
77729c24
FB
5131 } else {
5132 /* NOTE: order is important too for MMU exceptions */
14ce26e7 5133 s->popl_esp_hack = 1 << ot;
0af10c86 5134 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
77729c24 5135 s->popl_esp_hack = 0;
8e31d234 5136 gen_pop_update(s, ot);
77729c24 5137 }
2c0262af
FB
5138 break;
5139 case 0xc8: /* enter */
5140 {
5141 int level;
0af10c86 5142 val = cpu_lduw_code(env, s->pc);
2c0262af 5143 s->pc += 2;
0af10c86 5144 level = cpu_ldub_code(env, s->pc++);
2c0262af
FB
5145 gen_enter(s, val, level);
5146 }
5147 break;
5148 case 0xc9: /* leave */
2045f04c 5149 gen_leave(s);
2c0262af
FB
5150 break;
5151 case 0x06: /* push es */
5152 case 0x0e: /* push cs */
5153 case 0x16: /* push ss */
5154 case 0x1e: /* push ds */
14ce26e7
FB
5155 if (CODE64(s))
5156 goto illegal_op;
2c0262af 5157 gen_op_movl_T0_seg(b >> 3);
1d1cc4d0 5158 gen_push_v(s, cpu_T0);
2c0262af
FB
5159 break;
5160 case 0x1a0: /* push fs */
5161 case 0x1a8: /* push gs */
5162 gen_op_movl_T0_seg((b >> 3) & 7);
1d1cc4d0 5163 gen_push_v(s, cpu_T0);
2c0262af
FB
5164 break;
5165 case 0x07: /* pop es */
5166 case 0x17: /* pop ss */
5167 case 0x1f: /* pop ds */
14ce26e7
FB
5168 if (CODE64(s))
5169 goto illegal_op;
2c0262af 5170 reg = b >> 3;
8e31d234 5171 ot = gen_pop_T0(s);
100ec099 5172 gen_movl_seg_T0(s, reg);
8e31d234 5173 gen_pop_update(s, ot);
2c0262af 5174 if (reg == R_SS) {
a2cc3b24
FB
5175 /* if reg == SS, inhibit interrupts/trace. */
5176 /* If several instructions disable interrupts, only the
5177 _first_ does it */
7f0b7141 5178 gen_set_hflag(s, HF_INHIBIT_IRQ_MASK);
2c0262af
FB
5179 s->tf = 0;
5180 }
5181 if (s->is_jmp) {
14ce26e7 5182 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
5183 gen_eob(s);
5184 }
5185 break;
5186 case 0x1a1: /* pop fs */
5187 case 0x1a9: /* pop gs */
8e31d234 5188 ot = gen_pop_T0(s);
100ec099 5189 gen_movl_seg_T0(s, (b >> 3) & 7);
8e31d234 5190 gen_pop_update(s, ot);
2c0262af 5191 if (s->is_jmp) {
14ce26e7 5192 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
5193 gen_eob(s);
5194 }
5195 break;
5196
5197 /**************************/
5198 /* mov */
5199 case 0x88:
5200 case 0x89: /* mov Gv, Ev */
ab4e4aec 5201 ot = mo_b_d(b, dflag);
0af10c86 5202 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 5203 reg = ((modrm >> 3) & 7) | rex_r;
3b46e624 5204
2c0262af 5205 /* generate a generic store */
0af10c86 5206 gen_ldst_modrm(env, s, modrm, ot, reg, 1);
2c0262af
FB
5207 break;
5208 case 0xc6:
5209 case 0xc7: /* mov Ev, Iv */
ab4e4aec 5210 ot = mo_b_d(b, dflag);
0af10c86 5211 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 5212 mod = (modrm >> 6) & 3;
14ce26e7
FB
5213 if (mod != 3) {
5214 s->rip_offset = insn_const_size(ot);
4eeb3939 5215 gen_lea_modrm(env, s, modrm);
14ce26e7 5216 }
0af10c86 5217 val = insn_get(env, s, ot);
1d1cc4d0 5218 tcg_gen_movi_tl(cpu_T0, val);
fd8ca9f6 5219 if (mod != 3) {
1d1cc4d0 5220 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
fd8ca9f6 5221 } else {
1d1cc4d0 5222 gen_op_mov_reg_v(ot, (modrm & 7) | REX_B(s), cpu_T0);
fd8ca9f6 5223 }
2c0262af
FB
5224 break;
5225 case 0x8a:
5226 case 0x8b: /* mov Ev, Gv */
ab4e4aec 5227 ot = mo_b_d(b, dflag);
0af10c86 5228 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 5229 reg = ((modrm >> 3) & 7) | rex_r;
3b46e624 5230
0af10c86 5231 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
1d1cc4d0 5232 gen_op_mov_reg_v(ot, reg, cpu_T0);
2c0262af
FB
5233 break;
5234 case 0x8e: /* mov seg, Gv */
0af10c86 5235 modrm = cpu_ldub_code(env, s->pc++);
2c0262af
FB
5236 reg = (modrm >> 3) & 7;
5237 if (reg >= 6 || reg == R_CS)
5238 goto illegal_op;
4ba9938c 5239 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
100ec099 5240 gen_movl_seg_T0(s, reg);
2c0262af
FB
5241 if (reg == R_SS) {
5242 /* if reg == SS, inhibit interrupts/trace */
a2cc3b24
FB
5243 /* If several instructions disable interrupts, only the
5244 _first_ does it */
7f0b7141 5245 gen_set_hflag(s, HF_INHIBIT_IRQ_MASK);
2c0262af
FB
5246 s->tf = 0;
5247 }
5248 if (s->is_jmp) {
14ce26e7 5249 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
5250 gen_eob(s);
5251 }
5252 break;
5253 case 0x8c: /* mov Gv, seg */
0af10c86 5254 modrm = cpu_ldub_code(env, s->pc++);
2c0262af
FB
5255 reg = (modrm >> 3) & 7;
5256 mod = (modrm >> 6) & 3;
5257 if (reg >= 6)
5258 goto illegal_op;
5259 gen_op_movl_T0_seg(reg);
ab4e4aec 5260 ot = mod == 3 ? dflag : MO_16;
0af10c86 5261 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
2c0262af
FB
5262 break;
5263
5264 case 0x1b6: /* movzbS Gv, Eb */
5265 case 0x1b7: /* movzwS Gv, Eb */
5266 case 0x1be: /* movsbS Gv, Eb */
5267 case 0x1bf: /* movswS Gv, Eb */
5268 {
c8fbc479
RH
5269 TCGMemOp d_ot;
5270 TCGMemOp s_ot;
5271
2c0262af 5272 /* d_ot is the size of destination */
ab4e4aec 5273 d_ot = dflag;
2c0262af 5274 /* ot is the size of source */
4ba9938c 5275 ot = (b & 1) + MO_8;
c8fbc479
RH
5276 /* s_ot is the sign+size of source */
5277 s_ot = b & 8 ? MO_SIGN | ot : ot;
5278
0af10c86 5279 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 5280 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af 5281 mod = (modrm >> 6) & 3;
14ce26e7 5282 rm = (modrm & 7) | REX_B(s);
3b46e624 5283
2c0262af 5284 if (mod == 3) {
1d1cc4d0 5285 gen_op_mov_v_reg(ot, cpu_T0, rm);
c8fbc479
RH
5286 switch (s_ot) {
5287 case MO_UB:
1d1cc4d0 5288 tcg_gen_ext8u_tl(cpu_T0, cpu_T0);
2c0262af 5289 break;
c8fbc479 5290 case MO_SB:
1d1cc4d0 5291 tcg_gen_ext8s_tl(cpu_T0, cpu_T0);
2c0262af 5292 break;
c8fbc479 5293 case MO_UW:
1d1cc4d0 5294 tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
2c0262af
FB
5295 break;
5296 default:
c8fbc479 5297 case MO_SW:
1d1cc4d0 5298 tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
2c0262af
FB
5299 break;
5300 }
1d1cc4d0 5301 gen_op_mov_reg_v(d_ot, reg, cpu_T0);
2c0262af 5302 } else {
4eeb3939 5303 gen_lea_modrm(env, s, modrm);
1d1cc4d0
RH
5304 gen_op_ld_v(s, s_ot, cpu_T0, cpu_A0);
5305 gen_op_mov_reg_v(d_ot, reg, cpu_T0);
2c0262af
FB
5306 }
5307 }
5308 break;
5309
5310 case 0x8d: /* lea */
0af10c86 5311 modrm = cpu_ldub_code(env, s->pc++);
3a1d9b8b
FB
5312 mod = (modrm >> 6) & 3;
5313 if (mod == 3)
5314 goto illegal_op;
14ce26e7 5315 reg = ((modrm >> 3) & 7) | rex_r;
a074ce42
RH
5316 {
5317 AddressParts a = gen_lea_modrm_0(env, s, modrm);
5318 TCGv ea = gen_lea_modrm_1(a);
5319 gen_op_mov_reg_v(dflag, reg, ea);
5320 }
2c0262af 5321 break;
3b46e624 5322
2c0262af
FB
5323 case 0xa0: /* mov EAX, Ov */
5324 case 0xa1:
5325 case 0xa2: /* mov Ov, EAX */
5326 case 0xa3:
2c0262af 5327 {
14ce26e7
FB
5328 target_ulong offset_addr;
5329
ab4e4aec 5330 ot = mo_b_d(b, dflag);
1d71ddb1 5331 switch (s->aflag) {
14ce26e7 5332#ifdef TARGET_X86_64
1d71ddb1 5333 case MO_64:
0af10c86 5334 offset_addr = cpu_ldq_code(env, s->pc);
14ce26e7 5335 s->pc += 8;
1d71ddb1 5336 break;
14ce26e7 5337#endif
1d71ddb1
RH
5338 default:
5339 offset_addr = insn_get(env, s, s->aflag);
5340 break;
14ce26e7 5341 }
3250cff8 5342 tcg_gen_movi_tl(cpu_A0, offset_addr);
664e0f19 5343 gen_add_A0_ds_seg(s);
14ce26e7 5344 if ((b & 2) == 0) {
1d1cc4d0
RH
5345 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
5346 gen_op_mov_reg_v(ot, R_EAX, cpu_T0);
14ce26e7 5347 } else {
1d1cc4d0
RH
5348 gen_op_mov_v_reg(ot, cpu_T0, R_EAX);
5349 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
2c0262af
FB
5350 }
5351 }
2c0262af
FB
5352 break;
5353 case 0xd7: /* xlat */
1d71ddb1 5354 tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EBX]);
1d1cc4d0
RH
5355 tcg_gen_ext8u_tl(cpu_T0, cpu_regs[R_EAX]);
5356 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T0);
1d71ddb1 5357 gen_extu(s->aflag, cpu_A0);
664e0f19 5358 gen_add_A0_ds_seg(s);
1d1cc4d0
RH
5359 gen_op_ld_v(s, MO_8, cpu_T0, cpu_A0);
5360 gen_op_mov_reg_v(MO_8, R_EAX, cpu_T0);
2c0262af
FB
5361 break;
5362 case 0xb0 ... 0xb7: /* mov R, Ib */
4ba9938c 5363 val = insn_get(env, s, MO_8);
1d1cc4d0
RH
5364 tcg_gen_movi_tl(cpu_T0, val);
5365 gen_op_mov_reg_v(MO_8, (b & 7) | REX_B(s), cpu_T0);
2c0262af
FB
5366 break;
5367 case 0xb8 ... 0xbf: /* mov R, Iv */
14ce26e7 5368#ifdef TARGET_X86_64
ab4e4aec 5369 if (dflag == MO_64) {
14ce26e7
FB
5370 uint64_t tmp;
5371 /* 64 bit case */
0af10c86 5372 tmp = cpu_ldq_code(env, s->pc);
14ce26e7
FB
5373 s->pc += 8;
5374 reg = (b & 7) | REX_B(s);
1d1cc4d0
RH
5375 tcg_gen_movi_tl(cpu_T0, tmp);
5376 gen_op_mov_reg_v(MO_64, reg, cpu_T0);
5fafdf24 5377 } else
14ce26e7
FB
5378#endif
5379 {
ab4e4aec 5380 ot = dflag;
0af10c86 5381 val = insn_get(env, s, ot);
14ce26e7 5382 reg = (b & 7) | REX_B(s);
1d1cc4d0
RH
5383 tcg_gen_movi_tl(cpu_T0, val);
5384 gen_op_mov_reg_v(ot, reg, cpu_T0);
14ce26e7 5385 }
2c0262af
FB
5386 break;
5387
5388 case 0x91 ... 0x97: /* xchg R, EAX */
7418027e 5389 do_xchg_reg_eax:
ab4e4aec 5390 ot = dflag;
14ce26e7 5391 reg = (b & 7) | REX_B(s);
2c0262af
FB
5392 rm = R_EAX;
5393 goto do_xchg_reg;
5394 case 0x86:
5395 case 0x87: /* xchg Ev, Gv */
ab4e4aec 5396 ot = mo_b_d(b, dflag);
0af10c86 5397 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 5398 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af
FB
5399 mod = (modrm >> 6) & 3;
5400 if (mod == 3) {
14ce26e7 5401 rm = (modrm & 7) | REX_B(s);
2c0262af 5402 do_xchg_reg:
1d1cc4d0
RH
5403 gen_op_mov_v_reg(ot, cpu_T0, reg);
5404 gen_op_mov_v_reg(ot, cpu_T1, rm);
5405 gen_op_mov_reg_v(ot, rm, cpu_T0);
5406 gen_op_mov_reg_v(ot, reg, cpu_T1);
2c0262af 5407 } else {
4eeb3939 5408 gen_lea_modrm(env, s, modrm);
1d1cc4d0 5409 gen_op_mov_v_reg(ot, cpu_T0, reg);
2c0262af
FB
5410 /* for xchg, lock is implicit */
5411 if (!(prefixes & PREFIX_LOCK))
a7812ae4 5412 gen_helper_lock();
1d1cc4d0
RH
5413 gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
5414 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
2c0262af 5415 if (!(prefixes & PREFIX_LOCK))
a7812ae4 5416 gen_helper_unlock();
1d1cc4d0 5417 gen_op_mov_reg_v(ot, reg, cpu_T1);
2c0262af
FB
5418 }
5419 break;
5420 case 0xc4: /* les Gv */
701ed211 5421 /* In CODE64 this is VEX3; see above. */
2c0262af
FB
5422 op = R_ES;
5423 goto do_lxx;
5424 case 0xc5: /* lds Gv */
701ed211 5425 /* In CODE64 this is VEX2; see above. */
2c0262af
FB
5426 op = R_DS;
5427 goto do_lxx;
5428 case 0x1b2: /* lss Gv */
5429 op = R_SS;
5430 goto do_lxx;
5431 case 0x1b4: /* lfs Gv */
5432 op = R_FS;
5433 goto do_lxx;
5434 case 0x1b5: /* lgs Gv */
5435 op = R_GS;
5436 do_lxx:
ab4e4aec 5437 ot = dflag != MO_16 ? MO_32 : MO_16;
0af10c86 5438 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 5439 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af
FB
5440 mod = (modrm >> 6) & 3;
5441 if (mod == 3)
5442 goto illegal_op;
4eeb3939 5443 gen_lea_modrm(env, s, modrm);
1d1cc4d0 5444 gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
830a19a4 5445 gen_add_A0_im(s, 1 << ot);
2c0262af 5446 /* load the segment first to handle exceptions properly */
1d1cc4d0 5447 gen_op_ld_v(s, MO_16, cpu_T0, cpu_A0);
100ec099 5448 gen_movl_seg_T0(s, op);
2c0262af 5449 /* then put the data */
1d1cc4d0 5450 gen_op_mov_reg_v(ot, reg, cpu_T1);
2c0262af 5451 if (s->is_jmp) {
14ce26e7 5452 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
5453 gen_eob(s);
5454 }
5455 break;
3b46e624 5456
2c0262af
FB
5457 /************************/
5458 /* shifts */
5459 case 0xc0:
5460 case 0xc1:
5461 /* shift Ev,Ib */
5462 shift = 2;
5463 grp2:
5464 {
ab4e4aec 5465 ot = mo_b_d(b, dflag);
0af10c86 5466 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 5467 mod = (modrm >> 6) & 3;
2c0262af 5468 op = (modrm >> 3) & 7;
3b46e624 5469
2c0262af 5470 if (mod != 3) {
14ce26e7
FB
5471 if (shift == 2) {
5472 s->rip_offset = 1;
5473 }
4eeb3939 5474 gen_lea_modrm(env, s, modrm);
2c0262af
FB
5475 opreg = OR_TMP0;
5476 } else {
14ce26e7 5477 opreg = (modrm & 7) | REX_B(s);
2c0262af
FB
5478 }
5479
5480 /* simpler op */
5481 if (shift == 0) {
5482 gen_shift(s, op, ot, opreg, OR_ECX);
5483 } else {
5484 if (shift == 2) {
0af10c86 5485 shift = cpu_ldub_code(env, s->pc++);
2c0262af
FB
5486 }
5487 gen_shifti(s, op, ot, opreg, shift);
5488 }
5489 }
5490 break;
5491 case 0xd0:
5492 case 0xd1:
5493 /* shift Ev,1 */
5494 shift = 1;
5495 goto grp2;
5496 case 0xd2:
5497 case 0xd3:
5498 /* shift Ev,cl */
5499 shift = 0;
5500 goto grp2;
5501
5502 case 0x1a4: /* shld imm */
5503 op = 0;
5504 shift = 1;
5505 goto do_shiftd;
5506 case 0x1a5: /* shld cl */
5507 op = 0;
5508 shift = 0;
5509 goto do_shiftd;
5510 case 0x1ac: /* shrd imm */
5511 op = 1;
5512 shift = 1;
5513 goto do_shiftd;
5514 case 0x1ad: /* shrd cl */
5515 op = 1;
5516 shift = 0;
5517 do_shiftd:
ab4e4aec 5518 ot = dflag;
0af10c86 5519 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 5520 mod = (modrm >> 6) & 3;
14ce26e7
FB
5521 rm = (modrm & 7) | REX_B(s);
5522 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af 5523 if (mod != 3) {
4eeb3939 5524 gen_lea_modrm(env, s, modrm);
b6abf97d 5525 opreg = OR_TMP0;
2c0262af 5526 } else {
b6abf97d 5527 opreg = rm;
2c0262af 5528 }
1d1cc4d0 5529 gen_op_mov_v_reg(ot, cpu_T1, reg);
3b46e624 5530
2c0262af 5531 if (shift) {
3b9d3cf1
PB
5532 TCGv imm = tcg_const_tl(cpu_ldub_code(env, s->pc++));
5533 gen_shiftd_rm_T1(s, ot, opreg, op, imm);
5534 tcg_temp_free(imm);
2c0262af 5535 } else {
3b9d3cf1 5536 gen_shiftd_rm_T1(s, ot, opreg, op, cpu_regs[R_ECX]);
2c0262af
FB
5537 }
5538 break;
5539
5540 /************************/
5541 /* floats */
5fafdf24 5542 case 0xd8 ... 0xdf:
7eee2a50
FB
5543 if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
5544 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
5545 /* XXX: what to do if illegal op ? */
5546 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
5547 break;
5548 }
0af10c86 5549 modrm = cpu_ldub_code(env, s->pc++);
2c0262af
FB
5550 mod = (modrm >> 6) & 3;
5551 rm = modrm & 7;
5552 op = ((b & 7) << 3) | ((modrm >> 3) & 7);
2c0262af
FB
5553 if (mod != 3) {
5554 /* memory op */
4eeb3939 5555 gen_lea_modrm(env, s, modrm);
2c0262af
FB
5556 switch(op) {
5557 case 0x00 ... 0x07: /* fxxxs */
5558 case 0x10 ... 0x17: /* fixxxl */
5559 case 0x20 ... 0x27: /* fxxxl */
5560 case 0x30 ... 0x37: /* fixxx */
5561 {
5562 int op1;
5563 op1 = op & 7;
5564
5565 switch(op >> 4) {
5566 case 0:
80b02013
RH
5567 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5568 s->mem_index, MO_LEUL);
d3eb5eae 5569 gen_helper_flds_FT0(cpu_env, cpu_tmp2_i32);
2c0262af
FB
5570 break;
5571 case 1:
80b02013
RH
5572 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5573 s->mem_index, MO_LEUL);
d3eb5eae 5574 gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
2c0262af
FB
5575 break;
5576 case 2:
3c5f4116
RH
5577 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
5578 s->mem_index, MO_LEQ);
d3eb5eae 5579 gen_helper_fldl_FT0(cpu_env, cpu_tmp1_i64);
2c0262af
FB
5580 break;
5581 case 3:
5582 default:
80b02013
RH
5583 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5584 s->mem_index, MO_LESW);
d3eb5eae 5585 gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
2c0262af
FB
5586 break;
5587 }
3b46e624 5588
a7812ae4 5589 gen_helper_fp_arith_ST0_FT0(op1);
2c0262af
FB
5590 if (op1 == 3) {
5591 /* fcomp needs pop */
d3eb5eae 5592 gen_helper_fpop(cpu_env);
2c0262af
FB
5593 }
5594 }
5595 break;
5596 case 0x08: /* flds */
5597 case 0x0a: /* fsts */
5598 case 0x0b: /* fstps */
465e9838
FB
5599 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
5600 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
5601 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
2c0262af
FB
5602 switch(op & 7) {
5603 case 0:
5604 switch(op >> 4) {
5605 case 0:
80b02013
RH
5606 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5607 s->mem_index, MO_LEUL);
d3eb5eae 5608 gen_helper_flds_ST0(cpu_env, cpu_tmp2_i32);
2c0262af
FB
5609 break;
5610 case 1:
80b02013
RH
5611 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5612 s->mem_index, MO_LEUL);
d3eb5eae 5613 gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
2c0262af
FB
5614 break;
5615 case 2:
3c5f4116
RH
5616 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
5617 s->mem_index, MO_LEQ);
d3eb5eae 5618 gen_helper_fldl_ST0(cpu_env, cpu_tmp1_i64);
2c0262af
FB
5619 break;
5620 case 3:
5621 default:
80b02013
RH
5622 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5623 s->mem_index, MO_LESW);
d3eb5eae 5624 gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
2c0262af
FB
5625 break;
5626 }
5627 break;
465e9838 5628 case 1:
19e6c4b8 5629 /* XXX: the corresponding CPUID bit must be tested ! */
465e9838
FB
5630 switch(op >> 4) {
5631 case 1:
d3eb5eae 5632 gen_helper_fisttl_ST0(cpu_tmp2_i32, cpu_env);
d5601ad0
RH
5633 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5634 s->mem_index, MO_LEUL);
465e9838
FB
5635 break;
5636 case 2:
d3eb5eae 5637 gen_helper_fisttll_ST0(cpu_tmp1_i64, cpu_env);
3523e4bd
RH
5638 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
5639 s->mem_index, MO_LEQ);
465e9838
FB
5640 break;
5641 case 3:
5642 default:
d3eb5eae 5643 gen_helper_fistt_ST0(cpu_tmp2_i32, cpu_env);
d5601ad0
RH
5644 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5645 s->mem_index, MO_LEUW);
19e6c4b8 5646 break;
465e9838 5647 }
d3eb5eae 5648 gen_helper_fpop(cpu_env);
465e9838 5649 break;
2c0262af
FB
5650 default:
5651 switch(op >> 4) {
5652 case 0:
d3eb5eae 5653 gen_helper_fsts_ST0(cpu_tmp2_i32, cpu_env);
d5601ad0
RH
5654 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5655 s->mem_index, MO_LEUL);
2c0262af
FB
5656 break;
5657 case 1:
d3eb5eae 5658 gen_helper_fistl_ST0(cpu_tmp2_i32, cpu_env);
d5601ad0
RH
5659 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5660 s->mem_index, MO_LEUL);
2c0262af
FB
5661 break;
5662 case 2:
d3eb5eae 5663 gen_helper_fstl_ST0(cpu_tmp1_i64, cpu_env);
3523e4bd
RH
5664 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
5665 s->mem_index, MO_LEQ);
2c0262af
FB
5666 break;
5667 case 3:
5668 default:
d3eb5eae 5669 gen_helper_fist_ST0(cpu_tmp2_i32, cpu_env);
d5601ad0
RH
5670 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5671 s->mem_index, MO_LEUW);
2c0262af
FB
5672 break;
5673 }
5674 if ((op & 7) == 3)
d3eb5eae 5675 gen_helper_fpop(cpu_env);
2c0262af
FB
5676 break;
5677 }
5678 break;
5679 case 0x0c: /* fldenv mem */
ab4e4aec 5680 gen_helper_fldenv(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
2c0262af
FB
5681 break;
5682 case 0x0d: /* fldcw mem */
80b02013
RH
5683 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5684 s->mem_index, MO_LEUW);
d3eb5eae 5685 gen_helper_fldcw(cpu_env, cpu_tmp2_i32);
2c0262af
FB
5686 break;
5687 case 0x0e: /* fnstenv mem */
ab4e4aec 5688 gen_helper_fstenv(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
2c0262af
FB
5689 break;
5690 case 0x0f: /* fnstcw mem */
d3eb5eae 5691 gen_helper_fnstcw(cpu_tmp2_i32, cpu_env);
d5601ad0
RH
5692 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5693 s->mem_index, MO_LEUW);
2c0262af
FB
5694 break;
5695 case 0x1d: /* fldt mem */
d3eb5eae 5696 gen_helper_fldt_ST0(cpu_env, cpu_A0);
2c0262af
FB
5697 break;
5698 case 0x1f: /* fstpt mem */
d3eb5eae
BS
5699 gen_helper_fstt_ST0(cpu_env, cpu_A0);
5700 gen_helper_fpop(cpu_env);
2c0262af
FB
5701 break;
5702 case 0x2c: /* frstor mem */
ab4e4aec 5703 gen_helper_frstor(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
2c0262af
FB
5704 break;
5705 case 0x2e: /* fnsave mem */
ab4e4aec 5706 gen_helper_fsave(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
2c0262af
FB
5707 break;
5708 case 0x2f: /* fnstsw mem */
d3eb5eae 5709 gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
d5601ad0
RH
5710 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5711 s->mem_index, MO_LEUW);
2c0262af
FB
5712 break;
5713 case 0x3c: /* fbld */
d3eb5eae 5714 gen_helper_fbld_ST0(cpu_env, cpu_A0);
2c0262af
FB
5715 break;
5716 case 0x3e: /* fbstp */
d3eb5eae
BS
5717 gen_helper_fbst_ST0(cpu_env, cpu_A0);
5718 gen_helper_fpop(cpu_env);
2c0262af
FB
5719 break;
5720 case 0x3d: /* fildll */
3c5f4116 5721 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
d3eb5eae 5722 gen_helper_fildll_ST0(cpu_env, cpu_tmp1_i64);
2c0262af
FB
5723 break;
5724 case 0x3f: /* fistpll */
d3eb5eae 5725 gen_helper_fistll_ST0(cpu_tmp1_i64, cpu_env);
3523e4bd 5726 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
d3eb5eae 5727 gen_helper_fpop(cpu_env);
2c0262af
FB
5728 break;
5729 default:
5730 goto illegal_op;
5731 }
5732 } else {
5733 /* register float ops */
5734 opreg = rm;
5735
5736 switch(op) {
5737 case 0x08: /* fld sti */
d3eb5eae
BS
5738 gen_helper_fpush(cpu_env);
5739 gen_helper_fmov_ST0_STN(cpu_env,
5740 tcg_const_i32((opreg + 1) & 7));
2c0262af
FB
5741 break;
5742 case 0x09: /* fxchg sti */
c169c906
FB
5743 case 0x29: /* fxchg4 sti, undocumented op */
5744 case 0x39: /* fxchg7 sti, undocumented op */
d3eb5eae 5745 gen_helper_fxchg_ST0_STN(cpu_env, tcg_const_i32(opreg));
2c0262af
FB
5746 break;
5747 case 0x0a: /* grp d9/2 */
5748 switch(rm) {
5749 case 0: /* fnop */
023fe10d 5750 /* check exceptions (FreeBSD FPU probe) */
d3eb5eae 5751 gen_helper_fwait(cpu_env);
2c0262af
FB
5752 break;
5753 default:
5754 goto illegal_op;
5755 }
5756 break;
5757 case 0x0c: /* grp d9/4 */
5758 switch(rm) {
5759 case 0: /* fchs */
d3eb5eae 5760 gen_helper_fchs_ST0(cpu_env);
2c0262af
FB
5761 break;
5762 case 1: /* fabs */
d3eb5eae 5763 gen_helper_fabs_ST0(cpu_env);
2c0262af
FB
5764 break;
5765 case 4: /* ftst */
d3eb5eae
BS
5766 gen_helper_fldz_FT0(cpu_env);
5767 gen_helper_fcom_ST0_FT0(cpu_env);
2c0262af
FB
5768 break;
5769 case 5: /* fxam */
d3eb5eae 5770 gen_helper_fxam_ST0(cpu_env);
2c0262af
FB
5771 break;
5772 default:
5773 goto illegal_op;
5774 }
5775 break;
5776 case 0x0d: /* grp d9/5 */
5777 {
5778 switch(rm) {
5779 case 0:
d3eb5eae
BS
5780 gen_helper_fpush(cpu_env);
5781 gen_helper_fld1_ST0(cpu_env);
2c0262af
FB
5782 break;
5783 case 1:
d3eb5eae
BS
5784 gen_helper_fpush(cpu_env);
5785 gen_helper_fldl2t_ST0(cpu_env);
2c0262af
FB
5786 break;
5787 case 2:
d3eb5eae
BS
5788 gen_helper_fpush(cpu_env);
5789 gen_helper_fldl2e_ST0(cpu_env);
2c0262af
FB
5790 break;
5791 case 3:
d3eb5eae
BS
5792 gen_helper_fpush(cpu_env);
5793 gen_helper_fldpi_ST0(cpu_env);
2c0262af
FB
5794 break;
5795 case 4:
d3eb5eae
BS
5796 gen_helper_fpush(cpu_env);
5797 gen_helper_fldlg2_ST0(cpu_env);
2c0262af
FB
5798 break;
5799 case 5:
d3eb5eae
BS
5800 gen_helper_fpush(cpu_env);
5801 gen_helper_fldln2_ST0(cpu_env);
2c0262af
FB
5802 break;
5803 case 6:
d3eb5eae
BS
5804 gen_helper_fpush(cpu_env);
5805 gen_helper_fldz_ST0(cpu_env);
2c0262af
FB
5806 break;
5807 default:
5808 goto illegal_op;
5809 }
5810 }
5811 break;
5812 case 0x0e: /* grp d9/6 */
5813 switch(rm) {
5814 case 0: /* f2xm1 */
d3eb5eae 5815 gen_helper_f2xm1(cpu_env);
2c0262af
FB
5816 break;
5817 case 1: /* fyl2x */
d3eb5eae 5818 gen_helper_fyl2x(cpu_env);
2c0262af
FB
5819 break;
5820 case 2: /* fptan */
d3eb5eae 5821 gen_helper_fptan(cpu_env);
2c0262af
FB
5822 break;
5823 case 3: /* fpatan */
d3eb5eae 5824 gen_helper_fpatan(cpu_env);
2c0262af
FB
5825 break;
5826 case 4: /* fxtract */
d3eb5eae 5827 gen_helper_fxtract(cpu_env);
2c0262af
FB
5828 break;
5829 case 5: /* fprem1 */
d3eb5eae 5830 gen_helper_fprem1(cpu_env);
2c0262af
FB
5831 break;
5832 case 6: /* fdecstp */
d3eb5eae 5833 gen_helper_fdecstp(cpu_env);
2c0262af
FB
5834 break;
5835 default:
5836 case 7: /* fincstp */
d3eb5eae 5837 gen_helper_fincstp(cpu_env);
2c0262af
FB
5838 break;
5839 }
5840 break;
5841 case 0x0f: /* grp d9/7 */
5842 switch(rm) {
5843 case 0: /* fprem */
d3eb5eae 5844 gen_helper_fprem(cpu_env);
2c0262af
FB
5845 break;
5846 case 1: /* fyl2xp1 */
d3eb5eae 5847 gen_helper_fyl2xp1(cpu_env);
2c0262af
FB
5848 break;
5849 case 2: /* fsqrt */
d3eb5eae 5850 gen_helper_fsqrt(cpu_env);
2c0262af
FB
5851 break;
5852 case 3: /* fsincos */
d3eb5eae 5853 gen_helper_fsincos(cpu_env);
2c0262af
FB
5854 break;
5855 case 5: /* fscale */
d3eb5eae 5856 gen_helper_fscale(cpu_env);
2c0262af
FB
5857 break;
5858 case 4: /* frndint */
d3eb5eae 5859 gen_helper_frndint(cpu_env);
2c0262af
FB
5860 break;
5861 case 6: /* fsin */
d3eb5eae 5862 gen_helper_fsin(cpu_env);
2c0262af
FB
5863 break;
5864 default:
5865 case 7: /* fcos */
d3eb5eae 5866 gen_helper_fcos(cpu_env);
2c0262af
FB
5867 break;
5868 }
5869 break;
5870 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
5871 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
5872 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
5873 {
5874 int op1;
3b46e624 5875
2c0262af
FB
5876 op1 = op & 7;
5877 if (op >= 0x20) {
a7812ae4 5878 gen_helper_fp_arith_STN_ST0(op1, opreg);
2c0262af 5879 if (op >= 0x30)
d3eb5eae 5880 gen_helper_fpop(cpu_env);
2c0262af 5881 } else {
d3eb5eae 5882 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
a7812ae4 5883 gen_helper_fp_arith_ST0_FT0(op1);
2c0262af
FB
5884 }
5885 }
5886 break;
5887 case 0x02: /* fcom */
c169c906 5888 case 0x22: /* fcom2, undocumented op */
d3eb5eae
BS
5889 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
5890 gen_helper_fcom_ST0_FT0(cpu_env);
2c0262af
FB
5891 break;
5892 case 0x03: /* fcomp */
c169c906
FB
5893 case 0x23: /* fcomp3, undocumented op */
5894 case 0x32: /* fcomp5, undocumented op */
d3eb5eae
BS
5895 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
5896 gen_helper_fcom_ST0_FT0(cpu_env);
5897 gen_helper_fpop(cpu_env);
2c0262af
FB
5898 break;
5899 case 0x15: /* da/5 */
5900 switch(rm) {
5901 case 1: /* fucompp */
d3eb5eae
BS
5902 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
5903 gen_helper_fucom_ST0_FT0(cpu_env);
5904 gen_helper_fpop(cpu_env);
5905 gen_helper_fpop(cpu_env);
2c0262af
FB
5906 break;
5907 default:
5908 goto illegal_op;
5909 }
5910 break;
5911 case 0x1c:
5912 switch(rm) {
5913 case 0: /* feni (287 only, just do nop here) */
5914 break;
5915 case 1: /* fdisi (287 only, just do nop here) */
5916 break;
5917 case 2: /* fclex */
d3eb5eae 5918 gen_helper_fclex(cpu_env);
2c0262af
FB
5919 break;
5920 case 3: /* fninit */
d3eb5eae 5921 gen_helper_fninit(cpu_env);
2c0262af
FB
5922 break;
5923 case 4: /* fsetpm (287 only, just do nop here) */
5924 break;
5925 default:
5926 goto illegal_op;
5927 }
5928 break;
5929 case 0x1d: /* fucomi */
bff93281
PM
5930 if (!(s->cpuid_features & CPUID_CMOV)) {
5931 goto illegal_op;
5932 }
773cdfcc 5933 gen_update_cc_op(s);
d3eb5eae
BS
5934 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
5935 gen_helper_fucomi_ST0_FT0(cpu_env);
3ca51d07 5936 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
5937 break;
5938 case 0x1e: /* fcomi */
bff93281
PM
5939 if (!(s->cpuid_features & CPUID_CMOV)) {
5940 goto illegal_op;
5941 }
773cdfcc 5942 gen_update_cc_op(s);
d3eb5eae
BS
5943 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
5944 gen_helper_fcomi_ST0_FT0(cpu_env);
3ca51d07 5945 set_cc_op(s, CC_OP_EFLAGS);
2c0262af 5946 break;
658c8bda 5947 case 0x28: /* ffree sti */
d3eb5eae 5948 gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
5fafdf24 5949 break;
2c0262af 5950 case 0x2a: /* fst sti */
d3eb5eae 5951 gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
2c0262af
FB
5952 break;
5953 case 0x2b: /* fstp sti */
c169c906
FB
5954 case 0x0b: /* fstp1 sti, undocumented op */
5955 case 0x3a: /* fstp8 sti, undocumented op */
5956 case 0x3b: /* fstp9 sti, undocumented op */
d3eb5eae
BS
5957 gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
5958 gen_helper_fpop(cpu_env);
2c0262af
FB
5959 break;
5960 case 0x2c: /* fucom st(i) */
d3eb5eae
BS
5961 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
5962 gen_helper_fucom_ST0_FT0(cpu_env);
2c0262af
FB
5963 break;
5964 case 0x2d: /* fucomp st(i) */
d3eb5eae
BS
5965 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
5966 gen_helper_fucom_ST0_FT0(cpu_env);
5967 gen_helper_fpop(cpu_env);
2c0262af
FB
5968 break;
5969 case 0x33: /* de/3 */
5970 switch(rm) {
5971 case 1: /* fcompp */
d3eb5eae
BS
5972 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
5973 gen_helper_fcom_ST0_FT0(cpu_env);
5974 gen_helper_fpop(cpu_env);
5975 gen_helper_fpop(cpu_env);
2c0262af
FB
5976 break;
5977 default:
5978 goto illegal_op;
5979 }
5980 break;
c169c906 5981 case 0x38: /* ffreep sti, undocumented op */
d3eb5eae
BS
5982 gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
5983 gen_helper_fpop(cpu_env);
c169c906 5984 break;
2c0262af
FB
5985 case 0x3c: /* df/4 */
5986 switch(rm) {
5987 case 0:
d3eb5eae 5988 gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
1d1cc4d0
RH
5989 tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);
5990 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
2c0262af
FB
5991 break;
5992 default:
5993 goto illegal_op;
5994 }
5995 break;
5996 case 0x3d: /* fucomip */
bff93281
PM
5997 if (!(s->cpuid_features & CPUID_CMOV)) {
5998 goto illegal_op;
5999 }
773cdfcc 6000 gen_update_cc_op(s);
d3eb5eae
BS
6001 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6002 gen_helper_fucomi_ST0_FT0(cpu_env);
6003 gen_helper_fpop(cpu_env);
3ca51d07 6004 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
6005 break;
6006 case 0x3e: /* fcomip */
bff93281
PM
6007 if (!(s->cpuid_features & CPUID_CMOV)) {
6008 goto illegal_op;
6009 }
773cdfcc 6010 gen_update_cc_op(s);
d3eb5eae
BS
6011 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6012 gen_helper_fcomi_ST0_FT0(cpu_env);
6013 gen_helper_fpop(cpu_env);
3ca51d07 6014 set_cc_op(s, CC_OP_EFLAGS);
2c0262af 6015 break;
a2cc3b24
FB
6016 case 0x10 ... 0x13: /* fcmovxx */
6017 case 0x18 ... 0x1b:
6018 {
42a268c2
RH
6019 int op1;
6020 TCGLabel *l1;
d70040bc 6021 static const uint8_t fcmov_cc[8] = {
a2cc3b24
FB
6022 (JCC_B << 1),
6023 (JCC_Z << 1),
6024 (JCC_BE << 1),
6025 (JCC_P << 1),
6026 };
bff93281
PM
6027
6028 if (!(s->cpuid_features & CPUID_CMOV)) {
6029 goto illegal_op;
6030 }
1e4840bf 6031 op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
19e6c4b8 6032 l1 = gen_new_label();
dc259201 6033 gen_jcc1_noeob(s, op1, l1);
d3eb5eae 6034 gen_helper_fmov_ST0_STN(cpu_env, tcg_const_i32(opreg));
19e6c4b8 6035 gen_set_label(l1);
a2cc3b24
FB
6036 }
6037 break;
2c0262af
FB
6038 default:
6039 goto illegal_op;
6040 }
6041 }
6042 break;
6043 /************************/
6044 /* string ops */
6045
6046 case 0xa4: /* movsS */
6047 case 0xa5:
ab4e4aec 6048 ot = mo_b_d(b, dflag);
2c0262af
FB
6049 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6050 gen_repz_movs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6051 } else {
6052 gen_movs(s, ot);
6053 }
6054 break;
3b46e624 6055
2c0262af
FB
6056 case 0xaa: /* stosS */
6057 case 0xab:
ab4e4aec 6058 ot = mo_b_d(b, dflag);
2c0262af
FB
6059 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6060 gen_repz_stos(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6061 } else {
6062 gen_stos(s, ot);
6063 }
6064 break;
6065 case 0xac: /* lodsS */
6066 case 0xad:
ab4e4aec 6067 ot = mo_b_d(b, dflag);
2c0262af
FB
6068 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6069 gen_repz_lods(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6070 } else {
6071 gen_lods(s, ot);
6072 }
6073 break;
6074 case 0xae: /* scasS */
6075 case 0xaf:
ab4e4aec 6076 ot = mo_b_d(b, dflag);
2c0262af
FB
6077 if (prefixes & PREFIX_REPNZ) {
6078 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6079 } else if (prefixes & PREFIX_REPZ) {
6080 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6081 } else {
6082 gen_scas(s, ot);
2c0262af
FB
6083 }
6084 break;
6085
6086 case 0xa6: /* cmpsS */
6087 case 0xa7:
ab4e4aec 6088 ot = mo_b_d(b, dflag);
2c0262af
FB
6089 if (prefixes & PREFIX_REPNZ) {
6090 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6091 } else if (prefixes & PREFIX_REPZ) {
6092 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6093 } else {
6094 gen_cmps(s, ot);
2c0262af
FB
6095 }
6096 break;
6097 case 0x6c: /* insS */
6098 case 0x6d:
ab4e4aec 6099 ot = mo_b_d32(b, dflag);
1d1cc4d0 6100 tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]);
b8b6a50b
FB
6101 gen_check_io(s, ot, pc_start - s->cs_base,
6102 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4);
f115e911
FB
6103 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6104 gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
2c0262af 6105 } else {
f115e911 6106 gen_ins(s, ot);
bd79255d 6107 if (s->tb->cflags & CF_USE_ICOUNT) {
2e70f6ef
PB
6108 gen_jmp(s, s->pc - s->cs_base);
6109 }
2c0262af
FB
6110 }
6111 break;
6112 case 0x6e: /* outsS */
6113 case 0x6f:
ab4e4aec 6114 ot = mo_b_d32(b, dflag);
1d1cc4d0 6115 tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]);
b8b6a50b
FB
6116 gen_check_io(s, ot, pc_start - s->cs_base,
6117 svm_is_rep(prefixes) | 4);
f115e911
FB
6118 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6119 gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
2c0262af 6120 } else {
f115e911 6121 gen_outs(s, ot);
bd79255d 6122 if (s->tb->cflags & CF_USE_ICOUNT) {
2e70f6ef
PB
6123 gen_jmp(s, s->pc - s->cs_base);
6124 }
2c0262af
FB
6125 }
6126 break;
6127
6128 /************************/
6129 /* port I/O */
0573fbfc 6130
2c0262af
FB
6131 case 0xe4:
6132 case 0xe5:
ab4e4aec 6133 ot = mo_b_d32(b, dflag);
0af10c86 6134 val = cpu_ldub_code(env, s->pc++);
1d1cc4d0 6135 tcg_gen_movi_tl(cpu_T0, val);
b8b6a50b
FB
6136 gen_check_io(s, ot, pc_start - s->cs_base,
6137 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
bd79255d 6138 if (s->tb->cflags & CF_USE_ICOUNT) {
2e70f6ef 6139 gen_io_start();
bd79255d 6140 }
1b90d56e 6141 tcg_gen_movi_i32(cpu_tmp2_i32, val);
1d1cc4d0
RH
6142 gen_helper_in_func(ot, cpu_T1, cpu_tmp2_i32);
6143 gen_op_mov_reg_v(ot, R_EAX, cpu_T1);
5223a942 6144 gen_bpt_io(s, cpu_tmp2_i32, ot);
bd79255d 6145 if (s->tb->cflags & CF_USE_ICOUNT) {
2e70f6ef
PB
6146 gen_io_end();
6147 gen_jmp(s, s->pc - s->cs_base);
6148 }
2c0262af
FB
6149 break;
6150 case 0xe6:
6151 case 0xe7:
ab4e4aec 6152 ot = mo_b_d32(b, dflag);
0af10c86 6153 val = cpu_ldub_code(env, s->pc++);
1d1cc4d0 6154 tcg_gen_movi_tl(cpu_T0, val);
b8b6a50b
FB
6155 gen_check_io(s, ot, pc_start - s->cs_base,
6156 svm_is_rep(prefixes));
1d1cc4d0 6157 gen_op_mov_v_reg(ot, cpu_T1, R_EAX);
b8b6a50b 6158
bd79255d 6159 if (s->tb->cflags & CF_USE_ICOUNT) {
2e70f6ef 6160 gen_io_start();
bd79255d 6161 }
1b90d56e 6162 tcg_gen_movi_i32(cpu_tmp2_i32, val);
1d1cc4d0 6163 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1);
a7812ae4 6164 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
5223a942 6165 gen_bpt_io(s, cpu_tmp2_i32, ot);
bd79255d 6166 if (s->tb->cflags & CF_USE_ICOUNT) {
2e70f6ef
PB
6167 gen_io_end();
6168 gen_jmp(s, s->pc - s->cs_base);
6169 }
2c0262af
FB
6170 break;
6171 case 0xec:
6172 case 0xed:
ab4e4aec 6173 ot = mo_b_d32(b, dflag);
1d1cc4d0 6174 tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]);
b8b6a50b
FB
6175 gen_check_io(s, ot, pc_start - s->cs_base,
6176 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
bd79255d 6177 if (s->tb->cflags & CF_USE_ICOUNT) {
2e70f6ef 6178 gen_io_start();
bd79255d 6179 }
1d1cc4d0
RH
6180 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
6181 gen_helper_in_func(ot, cpu_T1, cpu_tmp2_i32);
6182 gen_op_mov_reg_v(ot, R_EAX, cpu_T1);
5223a942 6183 gen_bpt_io(s, cpu_tmp2_i32, ot);
bd79255d 6184 if (s->tb->cflags & CF_USE_ICOUNT) {
2e70f6ef
PB
6185 gen_io_end();
6186 gen_jmp(s, s->pc - s->cs_base);
6187 }
2c0262af
FB
6188 break;
6189 case 0xee:
6190 case 0xef:
ab4e4aec 6191 ot = mo_b_d32(b, dflag);
1d1cc4d0 6192 tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]);
b8b6a50b
FB
6193 gen_check_io(s, ot, pc_start - s->cs_base,
6194 svm_is_rep(prefixes));
1d1cc4d0 6195 gen_op_mov_v_reg(ot, cpu_T1, R_EAX);
b8b6a50b 6196
bd79255d 6197 if (s->tb->cflags & CF_USE_ICOUNT) {
2e70f6ef 6198 gen_io_start();
bd79255d 6199 }
1d1cc4d0
RH
6200 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
6201 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1);
a7812ae4 6202 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
5223a942 6203 gen_bpt_io(s, cpu_tmp2_i32, ot);
bd79255d 6204 if (s->tb->cflags & CF_USE_ICOUNT) {
2e70f6ef
PB
6205 gen_io_end();
6206 gen_jmp(s, s->pc - s->cs_base);
6207 }
2c0262af
FB
6208 break;
6209
6210 /************************/
6211 /* control */
6212 case 0xc2: /* ret im */
0af10c86 6213 val = cpu_ldsw_code(env, s->pc);
2c0262af 6214 s->pc += 2;
8e31d234
RH
6215 ot = gen_pop_T0(s);
6216 gen_stack_update(s, val + (1 << ot));
6217 /* Note that gen_pop_T0 uses a zero-extending load. */
1d1cc4d0 6218 gen_op_jmp_v(cpu_T0);
7d117ce8 6219 gen_bnd_jmp(s);
2c0262af
FB
6220 gen_eob(s);
6221 break;
6222 case 0xc3: /* ret */
8e31d234
RH
6223 ot = gen_pop_T0(s);
6224 gen_pop_update(s, ot);
6225 /* Note that gen_pop_T0 uses a zero-extending load. */
1d1cc4d0 6226 gen_op_jmp_v(cpu_T0);
7d117ce8 6227 gen_bnd_jmp(s);
2c0262af
FB
6228 gen_eob(s);
6229 break;
6230 case 0xca: /* lret im */
0af10c86 6231 val = cpu_ldsw_code(env, s->pc);
2c0262af
FB
6232 s->pc += 2;
6233 do_lret:
6234 if (s->pe && !s->vm86) {
773cdfcc 6235 gen_update_cc_op(s);
14ce26e7 6236 gen_jmp_im(pc_start - s->cs_base);
ab4e4aec 6237 gen_helper_lret_protected(cpu_env, tcg_const_i32(dflag - 1),
a7812ae4 6238 tcg_const_i32(val));
2c0262af
FB
6239 } else {
6240 gen_stack_A0(s);
6241 /* pop offset */
1d1cc4d0 6242 gen_op_ld_v(s, dflag, cpu_T0, cpu_A0);
2c0262af
FB
6243 /* NOTE: keeping EIP updated is not a problem in case of
6244 exception */
1d1cc4d0 6245 gen_op_jmp_v(cpu_T0);
2c0262af 6246 /* pop selector */
4e85057b 6247 gen_add_A0_im(s, 1 << dflag);
1d1cc4d0 6248 gen_op_ld_v(s, dflag, cpu_T0, cpu_A0);
3bd7da9e 6249 gen_op_movl_seg_T0_vm(R_CS);
2c0262af 6250 /* add stack offset */
ab4e4aec 6251 gen_stack_update(s, val + (2 << dflag));
2c0262af
FB
6252 }
6253 gen_eob(s);
6254 break;
6255 case 0xcb: /* lret */
6256 val = 0;
6257 goto do_lret;
6258 case 0xcf: /* iret */
872929aa 6259 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET);
2c0262af
FB
6260 if (!s->pe) {
6261 /* real mode */
ab4e4aec 6262 gen_helper_iret_real(cpu_env, tcg_const_i32(dflag - 1));
3ca51d07 6263 set_cc_op(s, CC_OP_EFLAGS);
f115e911
FB
6264 } else if (s->vm86) {
6265 if (s->iopl != 3) {
6266 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6267 } else {
ab4e4aec 6268 gen_helper_iret_real(cpu_env, tcg_const_i32(dflag - 1));
3ca51d07 6269 set_cc_op(s, CC_OP_EFLAGS);
f115e911 6270 }
2c0262af 6271 } else {
ab4e4aec 6272 gen_helper_iret_protected(cpu_env, tcg_const_i32(dflag - 1),
a7812ae4 6273 tcg_const_i32(s->pc - s->cs_base));
3ca51d07 6274 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
6275 }
6276 gen_eob(s);
6277 break;
6278 case 0xe8: /* call im */
6279 {
ab4e4aec 6280 if (dflag != MO_16) {
4ba9938c 6281 tval = (int32_t)insn_get(env, s, MO_32);
ab4e4aec 6282 } else {
4ba9938c 6283 tval = (int16_t)insn_get(env, s, MO_16);
ab4e4aec 6284 }
2c0262af 6285 next_eip = s->pc - s->cs_base;
14ce26e7 6286 tval += next_eip;
ab4e4aec 6287 if (dflag == MO_16) {
14ce26e7 6288 tval &= 0xffff;
ab4e4aec 6289 } else if (!CODE64(s)) {
99596385 6290 tval &= 0xffffffff;
ab4e4aec 6291 }
1d1cc4d0
RH
6292 tcg_gen_movi_tl(cpu_T0, next_eip);
6293 gen_push_v(s, cpu_T0);
7d117ce8 6294 gen_bnd_jmp(s);
14ce26e7 6295 gen_jmp(s, tval);
2c0262af
FB
6296 }
6297 break;
6298 case 0x9a: /* lcall im */
6299 {
6300 unsigned int selector, offset;
3b46e624 6301
14ce26e7
FB
6302 if (CODE64(s))
6303 goto illegal_op;
ab4e4aec 6304 ot = dflag;
0af10c86 6305 offset = insn_get(env, s, ot);
4ba9938c 6306 selector = insn_get(env, s, MO_16);
3b46e624 6307
1d1cc4d0
RH
6308 tcg_gen_movi_tl(cpu_T0, selector);
6309 tcg_gen_movi_tl(cpu_T1, offset);
2c0262af
FB
6310 }
6311 goto do_lcall;
ecada8a2 6312 case 0xe9: /* jmp im */
ab4e4aec 6313 if (dflag != MO_16) {
4ba9938c 6314 tval = (int32_t)insn_get(env, s, MO_32);
ab4e4aec 6315 } else {
4ba9938c 6316 tval = (int16_t)insn_get(env, s, MO_16);
ab4e4aec 6317 }
14ce26e7 6318 tval += s->pc - s->cs_base;
ab4e4aec 6319 if (dflag == MO_16) {
14ce26e7 6320 tval &= 0xffff;
ab4e4aec 6321 } else if (!CODE64(s)) {
32938e12 6322 tval &= 0xffffffff;
ab4e4aec 6323 }
7d117ce8 6324 gen_bnd_jmp(s);
14ce26e7 6325 gen_jmp(s, tval);
2c0262af
FB
6326 break;
6327 case 0xea: /* ljmp im */
6328 {
6329 unsigned int selector, offset;
6330
14ce26e7
FB
6331 if (CODE64(s))
6332 goto illegal_op;
ab4e4aec 6333 ot = dflag;
0af10c86 6334 offset = insn_get(env, s, ot);
4ba9938c 6335 selector = insn_get(env, s, MO_16);
3b46e624 6336
1d1cc4d0
RH
6337 tcg_gen_movi_tl(cpu_T0, selector);
6338 tcg_gen_movi_tl(cpu_T1, offset);
2c0262af
FB
6339 }
6340 goto do_ljmp;
6341 case 0xeb: /* jmp Jb */
4ba9938c 6342 tval = (int8_t)insn_get(env, s, MO_8);
14ce26e7 6343 tval += s->pc - s->cs_base;
ab4e4aec 6344 if (dflag == MO_16) {
14ce26e7 6345 tval &= 0xffff;
ab4e4aec 6346 }
14ce26e7 6347 gen_jmp(s, tval);
2c0262af
FB
6348 break;
6349 case 0x70 ... 0x7f: /* jcc Jb */
4ba9938c 6350 tval = (int8_t)insn_get(env, s, MO_8);
2c0262af
FB
6351 goto do_jcc;
6352 case 0x180 ... 0x18f: /* jcc Jv */
ab4e4aec 6353 if (dflag != MO_16) {
4ba9938c 6354 tval = (int32_t)insn_get(env, s, MO_32);
2c0262af 6355 } else {
4ba9938c 6356 tval = (int16_t)insn_get(env, s, MO_16);
2c0262af
FB
6357 }
6358 do_jcc:
6359 next_eip = s->pc - s->cs_base;
14ce26e7 6360 tval += next_eip;
ab4e4aec 6361 if (dflag == MO_16) {
14ce26e7 6362 tval &= 0xffff;
ab4e4aec 6363 }
7d117ce8 6364 gen_bnd_jmp(s);
14ce26e7 6365 gen_jcc(s, b, tval, next_eip);
2c0262af
FB
6366 break;
6367
6368 case 0x190 ... 0x19f: /* setcc Gv */
0af10c86 6369 modrm = cpu_ldub_code(env, s->pc++);
1d1cc4d0 6370 gen_setcc1(s, b, cpu_T0);
4ba9938c 6371 gen_ldst_modrm(env, s, modrm, MO_8, OR_TMP0, 1);
2c0262af
FB
6372 break;
6373 case 0x140 ... 0x14f: /* cmov Gv, Ev */
bff93281
PM
6374 if (!(s->cpuid_features & CPUID_CMOV)) {
6375 goto illegal_op;
6376 }
ab4e4aec 6377 ot = dflag;
f32d3781
PB
6378 modrm = cpu_ldub_code(env, s->pc++);
6379 reg = ((modrm >> 3) & 7) | rex_r;
6380 gen_cmovcc1(env, s, ot, b, modrm, reg);
2c0262af 6381 break;
3b46e624 6382
2c0262af
FB
6383 /************************/
6384 /* flags */
6385 case 0x9c: /* pushf */
872929aa 6386 gen_svm_check_intercept(s, pc_start, SVM_EXIT_PUSHF);
2c0262af
FB
6387 if (s->vm86 && s->iopl != 3) {
6388 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6389 } else {
773cdfcc 6390 gen_update_cc_op(s);
1d1cc4d0
RH
6391 gen_helper_read_eflags(cpu_T0, cpu_env);
6392 gen_push_v(s, cpu_T0);
2c0262af
FB
6393 }
6394 break;
6395 case 0x9d: /* popf */
872929aa 6396 gen_svm_check_intercept(s, pc_start, SVM_EXIT_POPF);
2c0262af
FB
6397 if (s->vm86 && s->iopl != 3) {
6398 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6399 } else {
8e31d234 6400 ot = gen_pop_T0(s);
2c0262af 6401 if (s->cpl == 0) {
ab4e4aec 6402 if (dflag != MO_16) {
1d1cc4d0 6403 gen_helper_write_eflags(cpu_env, cpu_T0,
f0967a1a
BS
6404 tcg_const_i32((TF_MASK | AC_MASK |
6405 ID_MASK | NT_MASK |
6406 IF_MASK |
6407 IOPL_MASK)));
2c0262af 6408 } else {
1d1cc4d0 6409 gen_helper_write_eflags(cpu_env, cpu_T0,
f0967a1a
BS
6410 tcg_const_i32((TF_MASK | AC_MASK |
6411 ID_MASK | NT_MASK |
6412 IF_MASK | IOPL_MASK)
6413 & 0xffff));
2c0262af
FB
6414 }
6415 } else {
4136f33c 6416 if (s->cpl <= s->iopl) {
ab4e4aec 6417 if (dflag != MO_16) {
1d1cc4d0 6418 gen_helper_write_eflags(cpu_env, cpu_T0,
f0967a1a
BS
6419 tcg_const_i32((TF_MASK |
6420 AC_MASK |
6421 ID_MASK |
6422 NT_MASK |
6423 IF_MASK)));
4136f33c 6424 } else {
1d1cc4d0 6425 gen_helper_write_eflags(cpu_env, cpu_T0,
f0967a1a
BS
6426 tcg_const_i32((TF_MASK |
6427 AC_MASK |
6428 ID_MASK |
6429 NT_MASK |
6430 IF_MASK)
6431 & 0xffff));
4136f33c 6432 }
2c0262af 6433 } else {
ab4e4aec 6434 if (dflag != MO_16) {
1d1cc4d0 6435 gen_helper_write_eflags(cpu_env, cpu_T0,
f0967a1a
BS
6436 tcg_const_i32((TF_MASK | AC_MASK |
6437 ID_MASK | NT_MASK)));
4136f33c 6438 } else {
1d1cc4d0 6439 gen_helper_write_eflags(cpu_env, cpu_T0,
f0967a1a
BS
6440 tcg_const_i32((TF_MASK | AC_MASK |
6441 ID_MASK | NT_MASK)
6442 & 0xffff));
4136f33c 6443 }
2c0262af
FB
6444 }
6445 }
8e31d234 6446 gen_pop_update(s, ot);
3ca51d07 6447 set_cc_op(s, CC_OP_EFLAGS);
a9321a4d 6448 /* abort translation because TF/AC flag may change */
14ce26e7 6449 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
6450 gen_eob(s);
6451 }
6452 break;
6453 case 0x9e: /* sahf */
12e26b75 6454 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
14ce26e7 6455 goto illegal_op;
1d1cc4d0 6456 gen_op_mov_v_reg(MO_8, cpu_T0, R_AH);
d229edce 6457 gen_compute_eflags(s);
bd7a7b33 6458 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O);
1d1cc4d0
RH
6459 tcg_gen_andi_tl(cpu_T0, cpu_T0, CC_S | CC_Z | CC_A | CC_P | CC_C);
6460 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_T0);
2c0262af
FB
6461 break;
6462 case 0x9f: /* lahf */
12e26b75 6463 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
14ce26e7 6464 goto illegal_op;
d229edce 6465 gen_compute_eflags(s);
bd7a7b33 6466 /* Note: gen_compute_eflags() only gives the condition codes */
1d1cc4d0
RH
6467 tcg_gen_ori_tl(cpu_T0, cpu_cc_src, 0x02);
6468 gen_op_mov_reg_v(MO_8, R_AH, cpu_T0);
2c0262af
FB
6469 break;
6470 case 0xf5: /* cmc */
d229edce 6471 gen_compute_eflags(s);
bd7a7b33 6472 tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C);
2c0262af
FB
6473 break;
6474 case 0xf8: /* clc */
d229edce 6475 gen_compute_eflags(s);
bd7a7b33 6476 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C);
2c0262af
FB
6477 break;
6478 case 0xf9: /* stc */
d229edce 6479 gen_compute_eflags(s);
bd7a7b33 6480 tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C);
2c0262af
FB
6481 break;
6482 case 0xfc: /* cld */
b6abf97d 6483 tcg_gen_movi_i32(cpu_tmp2_i32, 1);
317ac620 6484 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
2c0262af
FB
6485 break;
6486 case 0xfd: /* std */
b6abf97d 6487 tcg_gen_movi_i32(cpu_tmp2_i32, -1);
317ac620 6488 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
2c0262af
FB
6489 break;
6490
6491 /************************/
6492 /* bit operations */
6493 case 0x1ba: /* bt/bts/btr/btc Gv, im */
ab4e4aec 6494 ot = dflag;
0af10c86 6495 modrm = cpu_ldub_code(env, s->pc++);
33698e5f 6496 op = (modrm >> 3) & 7;
2c0262af 6497 mod = (modrm >> 6) & 3;
14ce26e7 6498 rm = (modrm & 7) | REX_B(s);
2c0262af 6499 if (mod != 3) {
14ce26e7 6500 s->rip_offset = 1;
4eeb3939 6501 gen_lea_modrm(env, s, modrm);
1d1cc4d0 6502 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
2c0262af 6503 } else {
1d1cc4d0 6504 gen_op_mov_v_reg(ot, cpu_T0, rm);
2c0262af
FB
6505 }
6506 /* load shift */
0af10c86 6507 val = cpu_ldub_code(env, s->pc++);
1d1cc4d0 6508 tcg_gen_movi_tl(cpu_T1, val);
2c0262af
FB
6509 if (op < 4)
6510 goto illegal_op;
6511 op -= 4;
f484d386 6512 goto bt_op;
2c0262af
FB
6513 case 0x1a3: /* bt Gv, Ev */
6514 op = 0;
6515 goto do_btx;
6516 case 0x1ab: /* bts */
6517 op = 1;
6518 goto do_btx;
6519 case 0x1b3: /* btr */
6520 op = 2;
6521 goto do_btx;
6522 case 0x1bb: /* btc */
6523 op = 3;
6524 do_btx:
ab4e4aec 6525 ot = dflag;
0af10c86 6526 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 6527 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af 6528 mod = (modrm >> 6) & 3;
14ce26e7 6529 rm = (modrm & 7) | REX_B(s);
1d1cc4d0 6530 gen_op_mov_v_reg(MO_32, cpu_T1, reg);
2c0262af 6531 if (mod != 3) {
4eeb3939 6532 gen_lea_modrm(env, s, modrm);
2c0262af 6533 /* specific case: we need to add a displacement */
1d1cc4d0
RH
6534 gen_exts(ot, cpu_T1);
6535 tcg_gen_sari_tl(cpu_tmp0, cpu_T1, 3 + ot);
f484d386
FB
6536 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, ot);
6537 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
1d1cc4d0 6538 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
2c0262af 6539 } else {
1d1cc4d0 6540 gen_op_mov_v_reg(ot, cpu_T0, rm);
2c0262af 6541 }
f484d386 6542 bt_op:
1d1cc4d0
RH
6543 tcg_gen_andi_tl(cpu_T1, cpu_T1, (1 << (3 + ot)) - 1);
6544 tcg_gen_shr_tl(cpu_tmp4, cpu_T0, cpu_T1);
f484d386
FB
6545 switch(op) {
6546 case 0:
f484d386
FB
6547 break;
6548 case 1:
f484d386 6549 tcg_gen_movi_tl(cpu_tmp0, 1);
1d1cc4d0
RH
6550 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T1);
6551 tcg_gen_or_tl(cpu_T0, cpu_T0, cpu_tmp0);
f484d386
FB
6552 break;
6553 case 2:
f484d386 6554 tcg_gen_movi_tl(cpu_tmp0, 1);
1d1cc4d0
RH
6555 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T1);
6556 tcg_gen_andc_tl(cpu_T0, cpu_T0, cpu_tmp0);
f484d386
FB
6557 break;
6558 default:
6559 case 3:
f484d386 6560 tcg_gen_movi_tl(cpu_tmp0, 1);
1d1cc4d0
RH
6561 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T1);
6562 tcg_gen_xor_tl(cpu_T0, cpu_T0, cpu_tmp0);
f484d386
FB
6563 break;
6564 }
2c0262af 6565 if (op != 0) {
fd8ca9f6 6566 if (mod != 3) {
1d1cc4d0 6567 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
fd8ca9f6 6568 } else {
1d1cc4d0 6569 gen_op_mov_reg_v(ot, rm, cpu_T0);
fd8ca9f6 6570 }
dc1823ce
RH
6571 }
6572
6573 /* Delay all CC updates until after the store above. Note that
6574 C is the result of the test, Z is unchanged, and the others
6575 are all undefined. */
6576 switch (s->cc_op) {
6577 case CC_OP_MULB ... CC_OP_MULQ:
6578 case CC_OP_ADDB ... CC_OP_ADDQ:
6579 case CC_OP_ADCB ... CC_OP_ADCQ:
6580 case CC_OP_SUBB ... CC_OP_SUBQ:
6581 case CC_OP_SBBB ... CC_OP_SBBQ:
6582 case CC_OP_LOGICB ... CC_OP_LOGICQ:
6583 case CC_OP_INCB ... CC_OP_INCQ:
6584 case CC_OP_DECB ... CC_OP_DECQ:
6585 case CC_OP_SHLB ... CC_OP_SHLQ:
6586 case CC_OP_SARB ... CC_OP_SARQ:
6587 case CC_OP_BMILGB ... CC_OP_BMILGQ:
6588 /* Z was going to be computed from the non-zero status of CC_DST.
6589 We can get that same Z value (and the new C value) by leaving
6590 CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the
6591 same width. */
f484d386 6592 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
dc1823ce
RH
6593 set_cc_op(s, ((s->cc_op - CC_OP_MULB) & 3) + CC_OP_SARB);
6594 break;
6595 default:
6596 /* Otherwise, generate EFLAGS and replace the C bit. */
6597 gen_compute_eflags(s);
6598 tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, cpu_tmp4,
6599 ctz32(CC_C), 1);
6600 break;
2c0262af
FB
6601 }
6602 break;
321c5351
RH
6603 case 0x1bc: /* bsf / tzcnt */
6604 case 0x1bd: /* bsr / lzcnt */
ab4e4aec 6605 ot = dflag;
321c5351
RH
6606 modrm = cpu_ldub_code(env, s->pc++);
6607 reg = ((modrm >> 3) & 7) | rex_r;
6608 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
1d1cc4d0 6609 gen_extu(ot, cpu_T0);
321c5351
RH
6610
6611 /* Note that lzcnt and tzcnt are in different extensions. */
6612 if ((prefixes & PREFIX_REPZ)
6613 && (b & 1
6614 ? s->cpuid_ext3_features & CPUID_EXT3_ABM
6615 : s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)) {
6616 int size = 8 << ot;
1d1cc4d0 6617 tcg_gen_mov_tl(cpu_cc_src, cpu_T0);
321c5351
RH
6618 if (b & 1) {
6619 /* For lzcnt, reduce the target_ulong result by the
6620 number of zeros that we expect to find at the top. */
1d1cc4d0
RH
6621 gen_helper_clz(cpu_T0, cpu_T0);
6622 tcg_gen_subi_tl(cpu_T0, cpu_T0, TARGET_LONG_BITS - size);
6191b059 6623 } else {
321c5351
RH
6624 /* For tzcnt, a zero input must return the operand size:
6625 force all bits outside the operand size to 1. */
6626 target_ulong mask = (target_ulong)-2 << (size - 1);
1d1cc4d0
RH
6627 tcg_gen_ori_tl(cpu_T0, cpu_T0, mask);
6628 gen_helper_ctz(cpu_T0, cpu_T0);
6191b059 6629 }
321c5351
RH
6630 /* For lzcnt/tzcnt, C and Z bits are defined and are
6631 related to the result. */
6632 gen_op_update1_cc();
6633 set_cc_op(s, CC_OP_BMILGB + ot);
6634 } else {
6635 /* For bsr/bsf, only the Z bit is defined and it is related
6636 to the input and not the result. */
1d1cc4d0 6637 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
321c5351
RH
6638 set_cc_op(s, CC_OP_LOGICB + ot);
6639 if (b & 1) {
6640 /* For bsr, return the bit index of the first 1 bit,
6641 not the count of leading zeros. */
1d1cc4d0
RH
6642 gen_helper_clz(cpu_T0, cpu_T0);
6643 tcg_gen_xori_tl(cpu_T0, cpu_T0, TARGET_LONG_BITS - 1);
321c5351 6644 } else {
1d1cc4d0 6645 gen_helper_ctz(cpu_T0, cpu_T0);
321c5351
RH
6646 }
6647 /* ??? The manual says that the output is undefined when the
6648 input is zero, but real hardware leaves it unchanged, and
6649 real programs appear to depend on that. */
6650 tcg_gen_movi_tl(cpu_tmp0, 0);
1d1cc4d0
RH
6651 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_T0, cpu_cc_dst, cpu_tmp0,
6652 cpu_regs[reg], cpu_T0);
6191b059 6653 }
1d1cc4d0 6654 gen_op_mov_reg_v(ot, reg, cpu_T0);
2c0262af
FB
6655 break;
6656 /************************/
6657 /* bcd */
6658 case 0x27: /* daa */
14ce26e7
FB
6659 if (CODE64(s))
6660 goto illegal_op;
773cdfcc 6661 gen_update_cc_op(s);
7923057b 6662 gen_helper_daa(cpu_env);
3ca51d07 6663 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
6664 break;
6665 case 0x2f: /* das */
14ce26e7
FB
6666 if (CODE64(s))
6667 goto illegal_op;
773cdfcc 6668 gen_update_cc_op(s);
7923057b 6669 gen_helper_das(cpu_env);
3ca51d07 6670 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
6671 break;
6672 case 0x37: /* aaa */
14ce26e7
FB
6673 if (CODE64(s))
6674 goto illegal_op;
773cdfcc 6675 gen_update_cc_op(s);
7923057b 6676 gen_helper_aaa(cpu_env);
3ca51d07 6677 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
6678 break;
6679 case 0x3f: /* aas */
14ce26e7
FB
6680 if (CODE64(s))
6681 goto illegal_op;
773cdfcc 6682 gen_update_cc_op(s);
7923057b 6683 gen_helper_aas(cpu_env);
3ca51d07 6684 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
6685 break;
6686 case 0xd4: /* aam */
14ce26e7
FB
6687 if (CODE64(s))
6688 goto illegal_op;
0af10c86 6689 val = cpu_ldub_code(env, s->pc++);
b6d7c3db
TS
6690 if (val == 0) {
6691 gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base);
6692 } else {
7923057b 6693 gen_helper_aam(cpu_env, tcg_const_i32(val));
3ca51d07 6694 set_cc_op(s, CC_OP_LOGICB);
b6d7c3db 6695 }
2c0262af
FB
6696 break;
6697 case 0xd5: /* aad */
14ce26e7
FB
6698 if (CODE64(s))
6699 goto illegal_op;
0af10c86 6700 val = cpu_ldub_code(env, s->pc++);
7923057b 6701 gen_helper_aad(cpu_env, tcg_const_i32(val));
3ca51d07 6702 set_cc_op(s, CC_OP_LOGICB);
2c0262af
FB
6703 break;
6704 /************************/
6705 /* misc */
6706 case 0x90: /* nop */
ab1f142b 6707 /* XXX: correct lock test for all insn */
7418027e 6708 if (prefixes & PREFIX_LOCK) {
ab1f142b 6709 goto illegal_op;
7418027e
RH
6710 }
6711 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
6712 if (REX_B(s)) {
6713 goto do_xchg_reg_eax;
6714 }
0573fbfc 6715 if (prefixes & PREFIX_REPZ) {
81f3053b
PB
6716 gen_update_cc_op(s);
6717 gen_jmp_im(pc_start - s->cs_base);
6718 gen_helper_pause(cpu_env, tcg_const_i32(s->pc - pc_start));
6719 s->is_jmp = DISAS_TB_JUMP;
0573fbfc 6720 }
2c0262af
FB
6721 break;
6722 case 0x9b: /* fwait */
5fafdf24 6723 if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) ==
7eee2a50
FB
6724 (HF_MP_MASK | HF_TS_MASK)) {
6725 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
2ee73ac3 6726 } else {
d3eb5eae 6727 gen_helper_fwait(cpu_env);
7eee2a50 6728 }
2c0262af
FB
6729 break;
6730 case 0xcc: /* int3 */
6731 gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base);
6732 break;
6733 case 0xcd: /* int N */
0af10c86 6734 val = cpu_ldub_code(env, s->pc++);
f115e911 6735 if (s->vm86 && s->iopl != 3) {
5fafdf24 6736 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
f115e911
FB
6737 } else {
6738 gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base);
6739 }
2c0262af
FB
6740 break;
6741 case 0xce: /* into */
14ce26e7
FB
6742 if (CODE64(s))
6743 goto illegal_op;
773cdfcc 6744 gen_update_cc_op(s);
a8ede8ba 6745 gen_jmp_im(pc_start - s->cs_base);
4a7443be 6746 gen_helper_into(cpu_env, tcg_const_i32(s->pc - pc_start));
2c0262af 6747 break;
0b97134b 6748#ifdef WANT_ICEBP
2c0262af 6749 case 0xf1: /* icebp (undocumented, exits to external debugger) */
872929aa 6750 gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP);
aba9d61e 6751#if 1
2c0262af 6752 gen_debug(s, pc_start - s->cs_base);
aba9d61e
FB
6753#else
6754 /* start debug */
bbd77c18 6755 tb_flush(CPU(x86_env_get_cpu(env)));
24537a01 6756 qemu_set_log(CPU_LOG_INT | CPU_LOG_TB_IN_ASM);
aba9d61e 6757#endif
2c0262af 6758 break;
0b97134b 6759#endif
2c0262af
FB
6760 case 0xfa: /* cli */
6761 if (!s->vm86) {
6762 if (s->cpl <= s->iopl) {
f0967a1a 6763 gen_helper_cli(cpu_env);
2c0262af
FB
6764 } else {
6765 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6766 }
6767 } else {
6768 if (s->iopl == 3) {
f0967a1a 6769 gen_helper_cli(cpu_env);
2c0262af
FB
6770 } else {
6771 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6772 }
6773 }
6774 break;
6775 case 0xfb: /* sti */
6776 if (!s->vm86) {
6777 if (s->cpl <= s->iopl) {
6778 gen_sti:
f0967a1a 6779 gen_helper_sti(cpu_env);
2c0262af 6780 /* interruptions are enabled only the first insn after sti */
a2cc3b24
FB
6781 /* If several instructions disable interrupts, only the
6782 _first_ does it */
7f0b7141 6783 gen_set_hflag(s, HF_INHIBIT_IRQ_MASK);
2c0262af 6784 /* give a chance to handle pending irqs */
14ce26e7 6785 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
6786 gen_eob(s);
6787 } else {
6788 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6789 }
6790 } else {
6791 if (s->iopl == 3) {
6792 goto gen_sti;
6793 } else {
6794 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6795 }
6796 }
6797 break;
6798 case 0x62: /* bound */
14ce26e7
FB
6799 if (CODE64(s))
6800 goto illegal_op;
ab4e4aec 6801 ot = dflag;
0af10c86 6802 modrm = cpu_ldub_code(env, s->pc++);
2c0262af
FB
6803 reg = (modrm >> 3) & 7;
6804 mod = (modrm >> 6) & 3;
6805 if (mod == 3)
6806 goto illegal_op;
1d1cc4d0 6807 gen_op_mov_v_reg(ot, cpu_T0, reg);
4eeb3939 6808 gen_lea_modrm(env, s, modrm);
1d1cc4d0 6809 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
4ba9938c 6810 if (ot == MO_16) {
92fc4b58
BS
6811 gen_helper_boundw(cpu_env, cpu_A0, cpu_tmp2_i32);
6812 } else {
6813 gen_helper_boundl(cpu_env, cpu_A0, cpu_tmp2_i32);
6814 }
2c0262af
FB
6815 break;
6816 case 0x1c8 ... 0x1cf: /* bswap reg */
14ce26e7
FB
6817 reg = (b & 7) | REX_B(s);
6818#ifdef TARGET_X86_64
ab4e4aec 6819 if (dflag == MO_64) {
1d1cc4d0
RH
6820 gen_op_mov_v_reg(MO_64, cpu_T0, reg);
6821 tcg_gen_bswap64_i64(cpu_T0, cpu_T0);
6822 gen_op_mov_reg_v(MO_64, reg, cpu_T0);
5fafdf24 6823 } else
8777643e 6824#endif
57fec1fe 6825 {
1d1cc4d0
RH
6826 gen_op_mov_v_reg(MO_32, cpu_T0, reg);
6827 tcg_gen_ext32u_tl(cpu_T0, cpu_T0);
6828 tcg_gen_bswap32_tl(cpu_T0, cpu_T0);
6829 gen_op_mov_reg_v(MO_32, reg, cpu_T0);
14ce26e7 6830 }
2c0262af
FB
6831 break;
6832 case 0xd6: /* salc */
14ce26e7
FB
6833 if (CODE64(s))
6834 goto illegal_op;
1d1cc4d0
RH
6835 gen_compute_eflags_c(s, cpu_T0);
6836 tcg_gen_neg_tl(cpu_T0, cpu_T0);
6837 gen_op_mov_reg_v(MO_8, R_EAX, cpu_T0);
2c0262af
FB
6838 break;
6839 case 0xe0: /* loopnz */
6840 case 0xe1: /* loopz */
2c0262af
FB
6841 case 0xe2: /* loop */
6842 case 0xe3: /* jecxz */
14ce26e7 6843 {
42a268c2 6844 TCGLabel *l1, *l2, *l3;
14ce26e7 6845
4ba9938c 6846 tval = (int8_t)insn_get(env, s, MO_8);
14ce26e7
FB
6847 next_eip = s->pc - s->cs_base;
6848 tval += next_eip;
ab4e4aec 6849 if (dflag == MO_16) {
14ce26e7 6850 tval &= 0xffff;
ab4e4aec 6851 }
3b46e624 6852
14ce26e7
FB
6853 l1 = gen_new_label();
6854 l2 = gen_new_label();
6e0d8677 6855 l3 = gen_new_label();
14ce26e7 6856 b &= 3;
6e0d8677
FB
6857 switch(b) {
6858 case 0: /* loopnz */
6859 case 1: /* loopz */
1d71ddb1
RH
6860 gen_op_add_reg_im(s->aflag, R_ECX, -1);
6861 gen_op_jz_ecx(s->aflag, l3);
5bdb91b0 6862 gen_jcc1(s, (JCC_Z << 1) | (b ^ 1), l1);
6e0d8677
FB
6863 break;
6864 case 2: /* loop */
1d71ddb1
RH
6865 gen_op_add_reg_im(s->aflag, R_ECX, -1);
6866 gen_op_jnz_ecx(s->aflag, l1);
6e0d8677
FB
6867 break;
6868 default:
6869 case 3: /* jcxz */
1d71ddb1 6870 gen_op_jz_ecx(s->aflag, l1);
6e0d8677 6871 break;
14ce26e7
FB
6872 }
6873
6e0d8677 6874 gen_set_label(l3);
14ce26e7 6875 gen_jmp_im(next_eip);
8e1c85e3 6876 tcg_gen_br(l2);
6e0d8677 6877
14ce26e7
FB
6878 gen_set_label(l1);
6879 gen_jmp_im(tval);
6880 gen_set_label(l2);
6881 gen_eob(s);
6882 }
2c0262af
FB
6883 break;
6884 case 0x130: /* wrmsr */
6885 case 0x132: /* rdmsr */
6886 if (s->cpl != 0) {
6887 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6888 } else {
773cdfcc 6889 gen_update_cc_op(s);
872929aa 6890 gen_jmp_im(pc_start - s->cs_base);
0573fbfc 6891 if (b & 2) {
4a7443be 6892 gen_helper_rdmsr(cpu_env);
0573fbfc 6893 } else {
4a7443be 6894 gen_helper_wrmsr(cpu_env);
0573fbfc 6895 }
2c0262af
FB
6896 }
6897 break;
6898 case 0x131: /* rdtsc */
773cdfcc 6899 gen_update_cc_op(s);
ecada8a2 6900 gen_jmp_im(pc_start - s->cs_base);
bd79255d 6901 if (s->tb->cflags & CF_USE_ICOUNT) {
efade670 6902 gen_io_start();
bd79255d 6903 }
4a7443be 6904 gen_helper_rdtsc(cpu_env);
bd79255d 6905 if (s->tb->cflags & CF_USE_ICOUNT) {
efade670
PB
6906 gen_io_end();
6907 gen_jmp(s, s->pc - s->cs_base);
6908 }
2c0262af 6909 break;
df01e0fc 6910 case 0x133: /* rdpmc */
773cdfcc 6911 gen_update_cc_op(s);
df01e0fc 6912 gen_jmp_im(pc_start - s->cs_base);
4a7443be 6913 gen_helper_rdpmc(cpu_env);
df01e0fc 6914 break;
023fe10d 6915 case 0x134: /* sysenter */
2436b61a 6916 /* For Intel SYSENTER is valid on 64-bit */
0af10c86 6917 if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
14ce26e7 6918 goto illegal_op;
023fe10d
FB
6919 if (!s->pe) {
6920 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6921 } else {
2999a0b2 6922 gen_helper_sysenter(cpu_env);
023fe10d
FB
6923 gen_eob(s);
6924 }
6925 break;
6926 case 0x135: /* sysexit */
2436b61a 6927 /* For Intel SYSEXIT is valid on 64-bit */
0af10c86 6928 if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
14ce26e7 6929 goto illegal_op;
023fe10d
FB
6930 if (!s->pe) {
6931 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6932 } else {
ab4e4aec 6933 gen_helper_sysexit(cpu_env, tcg_const_i32(dflag - 1));
023fe10d
FB
6934 gen_eob(s);
6935 }
6936 break;
14ce26e7
FB
6937#ifdef TARGET_X86_64
6938 case 0x105: /* syscall */
6939 /* XXX: is it usable in real mode ? */
728d803b 6940 gen_update_cc_op(s);
14ce26e7 6941 gen_jmp_im(pc_start - s->cs_base);
2999a0b2 6942 gen_helper_syscall(cpu_env, tcg_const_i32(s->pc - pc_start));
14ce26e7
FB
6943 gen_eob(s);
6944 break;
6945 case 0x107: /* sysret */
6946 if (!s->pe) {
6947 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6948 } else {
ab4e4aec 6949 gen_helper_sysret(cpu_env, tcg_const_i32(dflag - 1));
aba9d61e 6950 /* condition codes are modified only in long mode */
3ca51d07
RH
6951 if (s->lma) {
6952 set_cc_op(s, CC_OP_EFLAGS);
6953 }
14ce26e7
FB
6954 gen_eob(s);
6955 }
6956 break;
6957#endif
2c0262af 6958 case 0x1a2: /* cpuid */
773cdfcc 6959 gen_update_cc_op(s);
9575cb94 6960 gen_jmp_im(pc_start - s->cs_base);
4a7443be 6961 gen_helper_cpuid(cpu_env);
2c0262af
FB
6962 break;
6963 case 0xf4: /* hlt */
6964 if (s->cpl != 0) {
6965 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6966 } else {
773cdfcc 6967 gen_update_cc_op(s);
94451178 6968 gen_jmp_im(pc_start - s->cs_base);
4a7443be 6969 gen_helper_hlt(cpu_env, tcg_const_i32(s->pc - pc_start));
5779406a 6970 s->is_jmp = DISAS_TB_JUMP;
2c0262af
FB
6971 }
6972 break;
6973 case 0x100:
0af10c86 6974 modrm = cpu_ldub_code(env, s->pc++);
2c0262af
FB
6975 mod = (modrm >> 6) & 3;
6976 op = (modrm >> 3) & 7;
6977 switch(op) {
6978 case 0: /* sldt */
f115e911
FB
6979 if (!s->pe || s->vm86)
6980 goto illegal_op;
872929aa 6981 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ);
1d1cc4d0
RH
6982 tcg_gen_ld32u_tl(cpu_T0, cpu_env,
6983 offsetof(CPUX86State, ldt.selector));
ab4e4aec 6984 ot = mod == 3 ? dflag : MO_16;
0af10c86 6985 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
2c0262af
FB
6986 break;
6987 case 2: /* lldt */
f115e911
FB
6988 if (!s->pe || s->vm86)
6989 goto illegal_op;
2c0262af
FB
6990 if (s->cpl != 0) {
6991 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6992 } else {
872929aa 6993 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE);
4ba9938c 6994 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
1d1cc4d0 6995 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
2999a0b2 6996 gen_helper_lldt(cpu_env, cpu_tmp2_i32);
2c0262af
FB
6997 }
6998 break;
6999 case 1: /* str */
f115e911
FB
7000 if (!s->pe || s->vm86)
7001 goto illegal_op;
872929aa 7002 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ);
1d1cc4d0
RH
7003 tcg_gen_ld32u_tl(cpu_T0, cpu_env,
7004 offsetof(CPUX86State, tr.selector));
ab4e4aec 7005 ot = mod == 3 ? dflag : MO_16;
0af10c86 7006 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
2c0262af
FB
7007 break;
7008 case 3: /* ltr */
f115e911
FB
7009 if (!s->pe || s->vm86)
7010 goto illegal_op;
2c0262af
FB
7011 if (s->cpl != 0) {
7012 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7013 } else {
872929aa 7014 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE);
4ba9938c 7015 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
1d1cc4d0 7016 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
2999a0b2 7017 gen_helper_ltr(cpu_env, cpu_tmp2_i32);
2c0262af
FB
7018 }
7019 break;
7020 case 4: /* verr */
7021 case 5: /* verw */
f115e911
FB
7022 if (!s->pe || s->vm86)
7023 goto illegal_op;
4ba9938c 7024 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
773cdfcc 7025 gen_update_cc_op(s);
2999a0b2 7026 if (op == 4) {
1d1cc4d0 7027 gen_helper_verr(cpu_env, cpu_T0);
2999a0b2 7028 } else {
1d1cc4d0 7029 gen_helper_verw(cpu_env, cpu_T0);
2999a0b2 7030 }
3ca51d07 7031 set_cc_op(s, CC_OP_EFLAGS);
f115e911 7032 break;
2c0262af
FB
7033 default:
7034 goto illegal_op;
7035 }
7036 break;
1906b2af 7037
2c0262af 7038 case 0x101:
0af10c86 7039 modrm = cpu_ldub_code(env, s->pc++);
1906b2af
RH
7040 switch (modrm) {
7041 CASE_MEM_OP(0): /* sgdt */
872929aa 7042 gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ);
4eeb3939 7043 gen_lea_modrm(env, s, modrm);
1d1cc4d0
RH
7044 tcg_gen_ld32u_tl(cpu_T0,
7045 cpu_env, offsetof(CPUX86State, gdt.limit));
7046 gen_op_st_v(s, MO_16, cpu_T0, cpu_A0);
aba9d61e 7047 gen_add_A0_im(s, 2);
1d1cc4d0 7048 tcg_gen_ld_tl(cpu_T0, cpu_env, offsetof(CPUX86State, gdt.base));
ab4e4aec 7049 if (dflag == MO_16) {
1d1cc4d0 7050 tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff);
f0706f0c 7051 }
1d1cc4d0 7052 gen_op_st_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0);
2c0262af 7053 break;
1906b2af
RH
7054
7055 case 0xc8: /* monitor */
7056 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || s->cpl != 0) {
7057 goto illegal_op;
3d7374c5 7058 }
1906b2af
RH
7059 gen_update_cc_op(s);
7060 gen_jmp_im(pc_start - s->cs_base);
7061 tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EAX]);
7062 gen_extu(s->aflag, cpu_A0);
7063 gen_add_A0_ds_seg(s);
7064 gen_helper_monitor(cpu_env, cpu_A0);
3d7374c5 7065 break;
1906b2af
RH
7066
7067 case 0xc9: /* mwait */
7068 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || s->cpl != 0) {
7069 goto illegal_op;
7070 }
7071 gen_update_cc_op(s);
7072 gen_jmp_im(pc_start - s->cs_base);
7073 gen_helper_mwait(cpu_env, tcg_const_i32(s->pc - pc_start));
7074 gen_eob(s);
7075 break;
7076
7077 case 0xca: /* clac */
7078 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
7079 || s->cpl != 0) {
7080 goto illegal_op;
7081 }
7082 gen_helper_clac(cpu_env);
7083 gen_jmp_im(s->pc - s->cs_base);
7084 gen_eob(s);
7085 break;
7086
7087 case 0xcb: /* stac */
7088 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
7089 || s->cpl != 0) {
7090 goto illegal_op;
7091 }
7092 gen_helper_stac(cpu_env);
7093 gen_jmp_im(s->pc - s->cs_base);
7094 gen_eob(s);
7095 break;
7096
7097 CASE_MEM_OP(1): /* sidt */
7098 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ);
7099 gen_lea_modrm(env, s, modrm);
7100 tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State, idt.limit));
7101 gen_op_st_v(s, MO_16, cpu_T0, cpu_A0);
7102 gen_add_A0_im(s, 2);
7103 tcg_gen_ld_tl(cpu_T0, cpu_env, offsetof(CPUX86State, idt.base));
7104 if (dflag == MO_16) {
7105 tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff);
7106 }
7107 gen_op_st_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0);
7108 break;
7109
19dc85db
RH
7110 case 0xd0: /* xgetbv */
7111 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
7112 || (s->prefix & (PREFIX_LOCK | PREFIX_DATA
7113 | PREFIX_REPZ | PREFIX_REPNZ))) {
7114 goto illegal_op;
7115 }
7116 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]);
7117 gen_helper_xgetbv(cpu_tmp1_i64, cpu_env, cpu_tmp2_i32);
7118 tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], cpu_tmp1_i64);
7119 break;
7120
7121 case 0xd1: /* xsetbv */
7122 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
7123 || (s->prefix & (PREFIX_LOCK | PREFIX_DATA
7124 | PREFIX_REPZ | PREFIX_REPNZ))) {
7125 goto illegal_op;
7126 }
7127 if (s->cpl != 0) {
7128 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7129 break;
7130 }
7131 tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
7132 cpu_regs[R_EDX]);
7133 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]);
7134 gen_helper_xsetbv(cpu_env, cpu_tmp2_i32, cpu_tmp1_i64);
7135 /* End TB because translation flags may change. */
7136 gen_jmp_im(s->pc - pc_start);
7137 gen_eob(s);
7138 break;
7139
1906b2af
RH
7140 case 0xd8: /* VMRUN */
7141 if (!(s->flags & HF_SVME_MASK) || !s->pe) {
7142 goto illegal_op;
7143 }
7144 if (s->cpl != 0) {
2c0262af 7145 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
1906b2af 7146 break;
2c0262af 7147 }
1906b2af
RH
7148 gen_update_cc_op(s);
7149 gen_jmp_im(pc_start - s->cs_base);
7150 gen_helper_vmrun(cpu_env, tcg_const_i32(s->aflag - 1),
7151 tcg_const_i32(s->pc - pc_start));
7152 tcg_gen_exit_tb(0);
7153 s->is_jmp = DISAS_TB_JUMP;
2c0262af 7154 break;
1906b2af
RH
7155
7156 case 0xd9: /* VMMCALL */
7157 if (!(s->flags & HF_SVME_MASK)) {
7158 goto illegal_op;
7159 }
7160 gen_update_cc_op(s);
7161 gen_jmp_im(pc_start - s->cs_base);
7162 gen_helper_vmmcall(cpu_env);
7163 break;
7164
7165 case 0xda: /* VMLOAD */
7166 if (!(s->flags & HF_SVME_MASK) || !s->pe) {
7167 goto illegal_op;
7168 }
7169 if (s->cpl != 0) {
7170 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7171 break;
7172 }
7173 gen_update_cc_op(s);
7174 gen_jmp_im(pc_start - s->cs_base);
7175 gen_helper_vmload(cpu_env, tcg_const_i32(s->aflag - 1));
7176 break;
7177
7178 case 0xdb: /* VMSAVE */
7179 if (!(s->flags & HF_SVME_MASK) || !s->pe) {
7180 goto illegal_op;
7181 }
7182 if (s->cpl != 0) {
7183 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7184 break;
7185 }
7186 gen_update_cc_op(s);
7187 gen_jmp_im(pc_start - s->cs_base);
7188 gen_helper_vmsave(cpu_env, tcg_const_i32(s->aflag - 1));
7189 break;
7190
7191 case 0xdc: /* STGI */
7192 if ((!(s->flags & HF_SVME_MASK)
7193 && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
7194 || !s->pe) {
7195 goto illegal_op;
7196 }
7197 if (s->cpl != 0) {
7198 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7199 break;
7200 }
7201 gen_update_cc_op(s);
7202 gen_jmp_im(pc_start - s->cs_base);
7203 gen_helper_stgi(cpu_env);
7204 break;
7205
7206 case 0xdd: /* CLGI */
7207 if (!(s->flags & HF_SVME_MASK) || !s->pe) {
7208 goto illegal_op;
7209 }
7210 if (s->cpl != 0) {
7211 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7212 break;
7213 }
7214 gen_update_cc_op(s);
7215 gen_jmp_im(pc_start - s->cs_base);
7216 gen_helper_clgi(cpu_env);
7217 break;
7218
7219 case 0xde: /* SKINIT */
7220 if ((!(s->flags & HF_SVME_MASK)
7221 && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
7222 || !s->pe) {
7223 goto illegal_op;
7224 }
7225 gen_update_cc_op(s);
7226 gen_jmp_im(pc_start - s->cs_base);
7227 gen_helper_skinit(cpu_env);
7228 break;
7229
7230 case 0xdf: /* INVLPGA */
7231 if (!(s->flags & HF_SVME_MASK) || !s->pe) {
7232 goto illegal_op;
7233 }
7234 if (s->cpl != 0) {
7235 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7236 break;
7237 }
7238 gen_update_cc_op(s);
7239 gen_jmp_im(pc_start - s->cs_base);
7240 gen_helper_invlpga(cpu_env, tcg_const_i32(s->aflag - 1));
7241 break;
7242
7243 CASE_MEM_OP(2): /* lgdt */
7244 if (s->cpl != 0) {
7245 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7246 break;
7247 }
7248 gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_WRITE);
7249 gen_lea_modrm(env, s, modrm);
7250 gen_op_ld_v(s, MO_16, cpu_T1, cpu_A0);
7251 gen_add_A0_im(s, 2);
7252 gen_op_ld_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0);
7253 if (dflag == MO_16) {
7254 tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff);
7255 }
7256 tcg_gen_st_tl(cpu_T0, cpu_env, offsetof(CPUX86State, gdt.base));
7257 tcg_gen_st32_tl(cpu_T1, cpu_env, offsetof(CPUX86State, gdt.limit));
7258 break;
7259
7260 CASE_MEM_OP(3): /* lidt */
7261 if (s->cpl != 0) {
7262 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7263 break;
7264 }
7265 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_WRITE);
7266 gen_lea_modrm(env, s, modrm);
7267 gen_op_ld_v(s, MO_16, cpu_T1, cpu_A0);
7268 gen_add_A0_im(s, 2);
7269 gen_op_ld_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0);
7270 if (dflag == MO_16) {
7271 tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff);
7272 }
7273 tcg_gen_st_tl(cpu_T0, cpu_env, offsetof(CPUX86State, idt.base));
7274 tcg_gen_st32_tl(cpu_T1, cpu_env, offsetof(CPUX86State, idt.limit));
7275 break;
7276
7277 CASE_MEM_OP(4): /* smsw */
872929aa 7278 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0);
e2542fe2 7279#if defined TARGET_X86_64 && defined HOST_WORDS_BIGENDIAN
1d1cc4d0 7280 tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State, cr[0]) + 4);
f60d2728 7281#else
1d1cc4d0 7282 tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State, cr[0]));
f60d2728 7283#endif
4ba9938c 7284 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 1);
2c0262af 7285 break;
1906b2af
RH
7286
7287 CASE_MEM_OP(6): /* lmsw */
2c0262af
FB
7288 if (s->cpl != 0) {
7289 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
1906b2af 7290 break;
2c0262af 7291 }
1906b2af
RH
7292 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
7293 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
7294 gen_helper_lmsw(cpu_env, cpu_T0);
7295 gen_jmp_im(s->pc - s->cs_base);
7296 gen_eob(s);
2c0262af 7297 break;
1906b2af
RH
7298
7299 CASE_MEM_OP(7): /* invlpg */
7300 if (s->cpl != 0) {
7301 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7302 break;
7303 }
7304 gen_update_cc_op(s);
7305 gen_jmp_im(pc_start - s->cs_base);
7306 gen_lea_modrm(env, s, modrm);
7307 gen_helper_invlpg(cpu_env, cpu_A0);
7308 gen_jmp_im(s->pc - s->cs_base);
7309 gen_eob(s);
7310 break;
7311
7312 case 0xf8: /* swapgs */
7313#ifdef TARGET_X86_64
7314 if (CODE64(s)) {
1b050077
AP
7315 if (s->cpl != 0) {
7316 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7317 } else {
1906b2af
RH
7318 tcg_gen_mov_tl(cpu_T0, cpu_seg_base[R_GS]);
7319 tcg_gen_ld_tl(cpu_seg_base[R_GS], cpu_env,
7320 offsetof(CPUX86State, kernelgsbase));
7321 tcg_gen_st_tl(cpu_T0, cpu_env,
7322 offsetof(CPUX86State, kernelgsbase));
1b050077 7323 }
1906b2af
RH
7324 break;
7325 }
3558f805 7326#endif
1906b2af
RH
7327 goto illegal_op;
7328
7329 case 0xf9: /* rdtscp */
7330 if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP)) {
7331 goto illegal_op;
7332 }
7333 gen_update_cc_op(s);
7334 gen_jmp_im(pc_start - s->cs_base);
7335 if (s->tb->cflags & CF_USE_ICOUNT) {
7336 gen_io_start();
7337 }
7338 gen_helper_rdtscp(cpu_env);
7339 if (s->tb->cflags & CF_USE_ICOUNT) {
7340 gen_io_end();
7341 gen_jmp(s, s->pc - s->cs_base);
2c0262af
FB
7342 }
7343 break;
1906b2af 7344
2c0262af
FB
7345 default:
7346 goto illegal_op;
7347 }
7348 break;
1906b2af 7349
3415a4dd
FB
7350 case 0x108: /* invd */
7351 case 0x109: /* wbinvd */
7352 if (s->cpl != 0) {
7353 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7354 } else {
872929aa 7355 gen_svm_check_intercept(s, pc_start, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD);
3415a4dd
FB
7356 /* nothing to do */
7357 }
7358 break;
14ce26e7
FB
7359 case 0x63: /* arpl or movslS (x86_64) */
7360#ifdef TARGET_X86_64
7361 if (CODE64(s)) {
7362 int d_ot;
7363 /* d_ot is the size of destination */
ab4e4aec 7364 d_ot = dflag;
14ce26e7 7365
0af10c86 7366 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7
FB
7367 reg = ((modrm >> 3) & 7) | rex_r;
7368 mod = (modrm >> 6) & 3;
7369 rm = (modrm & 7) | REX_B(s);
3b46e624 7370
14ce26e7 7371 if (mod == 3) {
1d1cc4d0 7372 gen_op_mov_v_reg(MO_32, cpu_T0, rm);
14ce26e7 7373 /* sign extend */
4ba9938c 7374 if (d_ot == MO_64) {
1d1cc4d0 7375 tcg_gen_ext32s_tl(cpu_T0, cpu_T0);
4ba9938c 7376 }
1d1cc4d0 7377 gen_op_mov_reg_v(d_ot, reg, cpu_T0);
14ce26e7 7378 } else {
4eeb3939 7379 gen_lea_modrm(env, s, modrm);
1d1cc4d0
RH
7380 gen_op_ld_v(s, MO_32 | MO_SIGN, cpu_T0, cpu_A0);
7381 gen_op_mov_reg_v(d_ot, reg, cpu_T0);
14ce26e7 7382 }
5fafdf24 7383 } else
14ce26e7
FB
7384#endif
7385 {
42a268c2 7386 TCGLabel *label1;
49d9fdcc 7387 TCGv t0, t1, t2, a0;
1e4840bf 7388
14ce26e7
FB
7389 if (!s->pe || s->vm86)
7390 goto illegal_op;
a7812ae4
PB
7391 t0 = tcg_temp_local_new();
7392 t1 = tcg_temp_local_new();
7393 t2 = tcg_temp_local_new();
4ba9938c 7394 ot = MO_16;
0af10c86 7395 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7
FB
7396 reg = (modrm >> 3) & 7;
7397 mod = (modrm >> 6) & 3;
7398 rm = modrm & 7;
7399 if (mod != 3) {
4eeb3939 7400 gen_lea_modrm(env, s, modrm);
323d1876 7401 gen_op_ld_v(s, ot, t0, cpu_A0);
49d9fdcc
LD
7402 a0 = tcg_temp_local_new();
7403 tcg_gen_mov_tl(a0, cpu_A0);
14ce26e7 7404 } else {
1e4840bf 7405 gen_op_mov_v_reg(ot, t0, rm);
49d9fdcc 7406 TCGV_UNUSED(a0);
14ce26e7 7407 }
1e4840bf
FB
7408 gen_op_mov_v_reg(ot, t1, reg);
7409 tcg_gen_andi_tl(cpu_tmp0, t0, 3);
7410 tcg_gen_andi_tl(t1, t1, 3);
7411 tcg_gen_movi_tl(t2, 0);
3bd7da9e 7412 label1 = gen_new_label();
1e4840bf
FB
7413 tcg_gen_brcond_tl(TCG_COND_GE, cpu_tmp0, t1, label1);
7414 tcg_gen_andi_tl(t0, t0, ~3);
7415 tcg_gen_or_tl(t0, t0, t1);
7416 tcg_gen_movi_tl(t2, CC_Z);
3bd7da9e 7417 gen_set_label(label1);
14ce26e7 7418 if (mod != 3) {
323d1876 7419 gen_op_st_v(s, ot, t0, a0);
49d9fdcc
LD
7420 tcg_temp_free(a0);
7421 } else {
1e4840bf 7422 gen_op_mov_reg_v(ot, rm, t0);
14ce26e7 7423 }
d229edce 7424 gen_compute_eflags(s);
3bd7da9e 7425 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z);
1e4840bf 7426 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2);
1e4840bf
FB
7427 tcg_temp_free(t0);
7428 tcg_temp_free(t1);
7429 tcg_temp_free(t2);
f115e911 7430 }
f115e911 7431 break;
2c0262af
FB
7432 case 0x102: /* lar */
7433 case 0x103: /* lsl */
cec6843e 7434 {
42a268c2 7435 TCGLabel *label1;
1e4840bf 7436 TCGv t0;
cec6843e
FB
7437 if (!s->pe || s->vm86)
7438 goto illegal_op;
ab4e4aec 7439 ot = dflag != MO_16 ? MO_32 : MO_16;
0af10c86 7440 modrm = cpu_ldub_code(env, s->pc++);
cec6843e 7441 reg = ((modrm >> 3) & 7) | rex_r;
4ba9938c 7442 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
a7812ae4 7443 t0 = tcg_temp_local_new();
773cdfcc 7444 gen_update_cc_op(s);
2999a0b2 7445 if (b == 0x102) {
1d1cc4d0 7446 gen_helper_lar(t0, cpu_env, cpu_T0);
2999a0b2 7447 } else {
1d1cc4d0 7448 gen_helper_lsl(t0, cpu_env, cpu_T0);
2999a0b2 7449 }
cec6843e
FB
7450 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_src, CC_Z);
7451 label1 = gen_new_label();
cb63669a 7452 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
1e4840bf 7453 gen_op_mov_reg_v(ot, reg, t0);
cec6843e 7454 gen_set_label(label1);
3ca51d07 7455 set_cc_op(s, CC_OP_EFLAGS);
1e4840bf 7456 tcg_temp_free(t0);
cec6843e 7457 }
2c0262af
FB
7458 break;
7459 case 0x118:
0af10c86 7460 modrm = cpu_ldub_code(env, s->pc++);
2c0262af
FB
7461 mod = (modrm >> 6) & 3;
7462 op = (modrm >> 3) & 7;
7463 switch(op) {
7464 case 0: /* prefetchnta */
7465 case 1: /* prefetchnt0 */
7466 case 2: /* prefetchnt0 */
7467 case 3: /* prefetchnt0 */
7468 if (mod == 3)
7469 goto illegal_op;
4eeb3939 7470 gen_lea_modrm(env, s, modrm);
2c0262af
FB
7471 /* nothing more to do */
7472 break;
e17a36ce 7473 default: /* nop (multi byte) */
0af10c86 7474 gen_nop_modrm(env, s, modrm);
e17a36ce 7475 break;
2c0262af
FB
7476 }
7477 break;
62b58ba5
RH
7478 case 0x11a:
7479 modrm = cpu_ldub_code(env, s->pc++);
7480 if (s->flags & HF_MPX_EN_MASK) {
7481 mod = (modrm >> 6) & 3;
7482 reg = ((modrm >> 3) & 7) | rex_r;
523e28d7
RH
7483 if (prefixes & PREFIX_REPZ) {
7484 /* bndcl */
7485 if (reg >= 4
7486 || (prefixes & PREFIX_LOCK)
7487 || s->aflag == MO_16) {
7488 goto illegal_op;
7489 }
7490 gen_bndck(env, s, modrm, TCG_COND_LTU, cpu_bndl[reg]);
7491 } else if (prefixes & PREFIX_REPNZ) {
7492 /* bndcu */
7493 if (reg >= 4
7494 || (prefixes & PREFIX_LOCK)
7495 || s->aflag == MO_16) {
7496 goto illegal_op;
7497 }
7498 TCGv_i64 notu = tcg_temp_new_i64();
7499 tcg_gen_not_i64(notu, cpu_bndu[reg]);
7500 gen_bndck(env, s, modrm, TCG_COND_GTU, notu);
7501 tcg_temp_free_i64(notu);
7502 } else if (prefixes & PREFIX_DATA) {
62b58ba5
RH
7503 /* bndmov -- from reg/mem */
7504 if (reg >= 4 || s->aflag == MO_16) {
7505 goto illegal_op;
7506 }
7507 if (mod == 3) {
7508 int reg2 = (modrm & 7) | REX_B(s);
7509 if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
7510 goto illegal_op;
7511 }
7512 if (s->flags & HF_MPX_IU_MASK) {
7513 tcg_gen_mov_i64(cpu_bndl[reg], cpu_bndl[reg2]);
7514 tcg_gen_mov_i64(cpu_bndu[reg], cpu_bndu[reg2]);
7515 }
7516 } else {
7517 gen_lea_modrm(env, s, modrm);
7518 if (CODE64(s)) {
7519 tcg_gen_qemu_ld_i64(cpu_bndl[reg], cpu_A0,
7520 s->mem_index, MO_LEQ);
7521 tcg_gen_addi_tl(cpu_A0, cpu_A0, 8);
7522 tcg_gen_qemu_ld_i64(cpu_bndu[reg], cpu_A0,
7523 s->mem_index, MO_LEQ);
7524 } else {
7525 tcg_gen_qemu_ld_i64(cpu_bndl[reg], cpu_A0,
7526 s->mem_index, MO_LEUL);
7527 tcg_gen_addi_tl(cpu_A0, cpu_A0, 4);
7528 tcg_gen_qemu_ld_i64(cpu_bndu[reg], cpu_A0,
7529 s->mem_index, MO_LEUL);
7530 }
7531 /* bnd registers are now in-use */
7532 gen_set_hflag(s, HF_MPX_IU_MASK);
7533 }
bdd87b3b
RH
7534 } else if (mod != 3) {
7535 /* bndldx */
7536 AddressParts a = gen_lea_modrm_0(env, s, modrm);
7537 if (reg >= 4
7538 || (prefixes & PREFIX_LOCK)
7539 || s->aflag == MO_16
7540 || a.base < -1) {
7541 goto illegal_op;
7542 }
7543 if (a.base >= 0) {
7544 tcg_gen_addi_tl(cpu_A0, cpu_regs[a.base], a.disp);
7545 } else {
7546 tcg_gen_movi_tl(cpu_A0, 0);
7547 }
7548 gen_lea_v_seg(s, s->aflag, cpu_A0, a.def_seg, s->override);
7549 if (a.index >= 0) {
7550 tcg_gen_mov_tl(cpu_T0, cpu_regs[a.index]);
7551 } else {
7552 tcg_gen_movi_tl(cpu_T0, 0);
7553 }
7554 if (CODE64(s)) {
7555 gen_helper_bndldx64(cpu_bndl[reg], cpu_env, cpu_A0, cpu_T0);
7556 tcg_gen_ld_i64(cpu_bndu[reg], cpu_env,
7557 offsetof(CPUX86State, mmx_t0.MMX_Q(0)));
7558 } else {
7559 gen_helper_bndldx32(cpu_bndu[reg], cpu_env, cpu_A0, cpu_T0);
7560 tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndu[reg]);
7561 tcg_gen_shri_i64(cpu_bndu[reg], cpu_bndu[reg], 32);
7562 }
7563 gen_set_hflag(s, HF_MPX_IU_MASK);
62b58ba5
RH
7564 }
7565 }
7566 gen_nop_modrm(env, s, modrm);
7567 break;
149b427b
RH
7568 case 0x11b:
7569 modrm = cpu_ldub_code(env, s->pc++);
7570 if (s->flags & HF_MPX_EN_MASK) {
7571 mod = (modrm >> 6) & 3;
7572 reg = ((modrm >> 3) & 7) | rex_r;
7573 if (mod != 3 && (prefixes & PREFIX_REPZ)) {
7574 /* bndmk */
7575 if (reg >= 4
7576 || (prefixes & PREFIX_LOCK)
7577 || s->aflag == MO_16) {
7578 goto illegal_op;
7579 }
7580 AddressParts a = gen_lea_modrm_0(env, s, modrm);
7581 if (a.base >= 0) {
7582 tcg_gen_extu_tl_i64(cpu_bndl[reg], cpu_regs[a.base]);
7583 if (!CODE64(s)) {
7584 tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndl[reg]);
7585 }
7586 } else if (a.base == -1) {
7587 /* no base register has lower bound of 0 */
7588 tcg_gen_movi_i64(cpu_bndl[reg], 0);
7589 } else {
7590 /* rip-relative generates #ud */
7591 goto illegal_op;
7592 }
7593 tcg_gen_not_tl(cpu_A0, gen_lea_modrm_1(a));
7594 if (!CODE64(s)) {
7595 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
7596 }
7597 tcg_gen_extu_tl_i64(cpu_bndu[reg], cpu_A0);
7598 /* bnd registers are now in-use */
7599 gen_set_hflag(s, HF_MPX_IU_MASK);
7600 break;
523e28d7
RH
7601 } else if (prefixes & PREFIX_REPNZ) {
7602 /* bndcn */
7603 if (reg >= 4
7604 || (prefixes & PREFIX_LOCK)
7605 || s->aflag == MO_16) {
7606 goto illegal_op;
7607 }
7608 gen_bndck(env, s, modrm, TCG_COND_GTU, cpu_bndu[reg]);
62b58ba5
RH
7609 } else if (prefixes & PREFIX_DATA) {
7610 /* bndmov -- to reg/mem */
7611 if (reg >= 4 || s->aflag == MO_16) {
7612 goto illegal_op;
7613 }
7614 if (mod == 3) {
7615 int reg2 = (modrm & 7) | REX_B(s);
7616 if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
7617 goto illegal_op;
7618 }
7619 if (s->flags & HF_MPX_IU_MASK) {
7620 tcg_gen_mov_i64(cpu_bndl[reg2], cpu_bndl[reg]);
7621 tcg_gen_mov_i64(cpu_bndu[reg2], cpu_bndu[reg]);
7622 }
7623 } else {
7624 gen_lea_modrm(env, s, modrm);
7625 if (CODE64(s)) {
7626 tcg_gen_qemu_st_i64(cpu_bndl[reg], cpu_A0,
7627 s->mem_index, MO_LEQ);
7628 tcg_gen_addi_tl(cpu_A0, cpu_A0, 8);
7629 tcg_gen_qemu_st_i64(cpu_bndu[reg], cpu_A0,
7630 s->mem_index, MO_LEQ);
7631 } else {
7632 tcg_gen_qemu_st_i64(cpu_bndl[reg], cpu_A0,
7633 s->mem_index, MO_LEUL);
7634 tcg_gen_addi_tl(cpu_A0, cpu_A0, 4);
7635 tcg_gen_qemu_st_i64(cpu_bndu[reg], cpu_A0,
7636 s->mem_index, MO_LEUL);
7637 }
7638 }
bdd87b3b
RH
7639 } else if (mod != 3) {
7640 /* bndstx */
7641 AddressParts a = gen_lea_modrm_0(env, s, modrm);
7642 if (reg >= 4
7643 || (prefixes & PREFIX_LOCK)
7644 || s->aflag == MO_16
7645 || a.base < -1) {
7646 goto illegal_op;
7647 }
7648 if (a.base >= 0) {
7649 tcg_gen_addi_tl(cpu_A0, cpu_regs[a.base], a.disp);
7650 } else {
7651 tcg_gen_movi_tl(cpu_A0, 0);
7652 }
7653 gen_lea_v_seg(s, s->aflag, cpu_A0, a.def_seg, s->override);
7654 if (a.index >= 0) {
7655 tcg_gen_mov_tl(cpu_T0, cpu_regs[a.index]);
7656 } else {
7657 tcg_gen_movi_tl(cpu_T0, 0);
7658 }
7659 if (CODE64(s)) {
7660 gen_helper_bndstx64(cpu_env, cpu_A0, cpu_T0,
7661 cpu_bndl[reg], cpu_bndu[reg]);
7662 } else {
7663 gen_helper_bndstx32(cpu_env, cpu_A0, cpu_T0,
7664 cpu_bndl[reg], cpu_bndu[reg]);
7665 }
149b427b
RH
7666 }
7667 }
7668 gen_nop_modrm(env, s, modrm);
7669 break;
62b58ba5 7670 case 0x119: case 0x11c ... 0x11f: /* nop (multi byte) */
0af10c86
BS
7671 modrm = cpu_ldub_code(env, s->pc++);
7672 gen_nop_modrm(env, s, modrm);
e17a36ce 7673 break;
2c0262af
FB
7674 case 0x120: /* mov reg, crN */
7675 case 0x122: /* mov crN, reg */
7676 if (s->cpl != 0) {
7677 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7678 } else {
0af10c86 7679 modrm = cpu_ldub_code(env, s->pc++);
5c73b757
MO
7680 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7681 * AMD documentation (24594.pdf) and testing of
7682 * intel 386 and 486 processors all show that the mod bits
7683 * are assumed to be 1's, regardless of actual values.
7684 */
14ce26e7
FB
7685 rm = (modrm & 7) | REX_B(s);
7686 reg = ((modrm >> 3) & 7) | rex_r;
7687 if (CODE64(s))
4ba9938c 7688 ot = MO_64;
14ce26e7 7689 else
4ba9938c 7690 ot = MO_32;
ccd59d09
AP
7691 if ((prefixes & PREFIX_LOCK) && (reg == 0) &&
7692 (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) {
7693 reg = 8;
7694 }
2c0262af
FB
7695 switch(reg) {
7696 case 0:
7697 case 2:
7698 case 3:
7699 case 4:
9230e66e 7700 case 8:
773cdfcc 7701 gen_update_cc_op(s);
872929aa 7702 gen_jmp_im(pc_start - s->cs_base);
2c0262af 7703 if (b & 2) {
1d1cc4d0 7704 gen_op_mov_v_reg(ot, cpu_T0, rm);
4a7443be 7705 gen_helper_write_crN(cpu_env, tcg_const_i32(reg),
1d1cc4d0 7706 cpu_T0);
14ce26e7 7707 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
7708 gen_eob(s);
7709 } else {
1d1cc4d0
RH
7710 gen_helper_read_crN(cpu_T0, cpu_env, tcg_const_i32(reg));
7711 gen_op_mov_reg_v(ot, rm, cpu_T0);
2c0262af
FB
7712 }
7713 break;
7714 default:
7715 goto illegal_op;
7716 }
7717 }
7718 break;
7719 case 0x121: /* mov reg, drN */
7720 case 0x123: /* mov drN, reg */
7721 if (s->cpl != 0) {
7722 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7723 } else {
0af10c86 7724 modrm = cpu_ldub_code(env, s->pc++);
5c73b757
MO
7725 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7726 * AMD documentation (24594.pdf) and testing of
7727 * intel 386 and 486 processors all show that the mod bits
7728 * are assumed to be 1's, regardless of actual values.
7729 */
14ce26e7
FB
7730 rm = (modrm & 7) | REX_B(s);
7731 reg = ((modrm >> 3) & 7) | rex_r;
7732 if (CODE64(s))
4ba9938c 7733 ot = MO_64;
14ce26e7 7734 else
4ba9938c 7735 ot = MO_32;
d0052339 7736 if (reg >= 8) {
2c0262af 7737 goto illegal_op;
d0052339 7738 }
2c0262af 7739 if (b & 2) {
0573fbfc 7740 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg);
1d1cc4d0 7741 gen_op_mov_v_reg(ot, cpu_T0, rm);
d0052339 7742 tcg_gen_movi_i32(cpu_tmp2_i32, reg);
1d1cc4d0 7743 gen_helper_set_dr(cpu_env, cpu_tmp2_i32, cpu_T0);
14ce26e7 7744 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
7745 gen_eob(s);
7746 } else {
0573fbfc 7747 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg);
d0052339 7748 tcg_gen_movi_i32(cpu_tmp2_i32, reg);
1d1cc4d0
RH
7749 gen_helper_get_dr(cpu_T0, cpu_env, cpu_tmp2_i32);
7750 gen_op_mov_reg_v(ot, rm, cpu_T0);
2c0262af
FB
7751 }
7752 }
7753 break;
7754 case 0x106: /* clts */
7755 if (s->cpl != 0) {
7756 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7757 } else {
0573fbfc 7758 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
f0967a1a 7759 gen_helper_clts(cpu_env);
7eee2a50 7760 /* abort block because static cpu state changed */
14ce26e7 7761 gen_jmp_im(s->pc - s->cs_base);
7eee2a50 7762 gen_eob(s);
2c0262af
FB
7763 }
7764 break;
222a3336 7765 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
664e0f19
FB
7766 case 0x1c3: /* MOVNTI reg, mem */
7767 if (!(s->cpuid_features & CPUID_SSE2))
14ce26e7 7768 goto illegal_op;
ab4e4aec 7769 ot = mo_64_32(dflag);
0af10c86 7770 modrm = cpu_ldub_code(env, s->pc++);
664e0f19
FB
7771 mod = (modrm >> 6) & 3;
7772 if (mod == 3)
7773 goto illegal_op;
7774 reg = ((modrm >> 3) & 7) | rex_r;
7775 /* generate a generic store */
0af10c86 7776 gen_ldst_modrm(env, s, modrm, ot, reg, 1);
14ce26e7 7777 break;
664e0f19 7778 case 0x1ae:
0af10c86 7779 modrm = cpu_ldub_code(env, s->pc++);
121f3157
RH
7780 switch (modrm) {
7781 CASE_MEM_OP(0): /* fxsave */
7782 if (!(s->cpuid_features & CPUID_FXSR)
7783 || (prefixes & PREFIX_LOCK)) {
14ce26e7 7784 goto illegal_op;
121f3157 7785 }
09d85fb8 7786 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
0fd14b72
FB
7787 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7788 break;
7789 }
4eeb3939 7790 gen_lea_modrm(env, s, modrm);
64dbaff0 7791 gen_helper_fxsave(cpu_env, cpu_A0);
664e0f19 7792 break;
121f3157
RH
7793
7794 CASE_MEM_OP(1): /* fxrstor */
7795 if (!(s->cpuid_features & CPUID_FXSR)
7796 || (prefixes & PREFIX_LOCK)) {
14ce26e7 7797 goto illegal_op;
121f3157 7798 }
09d85fb8 7799 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
0fd14b72
FB
7800 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7801 break;
7802 }
4eeb3939 7803 gen_lea_modrm(env, s, modrm);
64dbaff0 7804 gen_helper_fxrstor(cpu_env, cpu_A0);
664e0f19 7805 break;
121f3157
RH
7806
7807 CASE_MEM_OP(2): /* ldmxcsr */
7808 if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) {
7809 goto illegal_op;
7810 }
664e0f19
FB
7811 if (s->flags & HF_TS_MASK) {
7812 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7813 break;
14ce26e7 7814 }
4eeb3939 7815 gen_lea_modrm(env, s, modrm);
121f3157
RH
7816 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LEUL);
7817 gen_helper_ldmxcsr(cpu_env, cpu_tmp2_i32);
664e0f19 7818 break;
121f3157
RH
7819
7820 CASE_MEM_OP(3): /* stmxcsr */
7821 if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) {
664e0f19 7822 goto illegal_op;
121f3157
RH
7823 }
7824 if (s->flags & HF_TS_MASK) {
7825 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7826 break;
7827 }
7828 gen_lea_modrm(env, s, modrm);
7829 tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State, mxcsr));
7830 gen_op_st_v(s, MO_32, cpu_T0, cpu_A0);
664e0f19 7831 break;
121f3157 7832
19dc85db
RH
7833 CASE_MEM_OP(4): /* xsave */
7834 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
7835 || (prefixes & (PREFIX_LOCK | PREFIX_DATA
7836 | PREFIX_REPZ | PREFIX_REPNZ))) {
7837 goto illegal_op;
7838 }
7839 gen_lea_modrm(env, s, modrm);
7840 tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
7841 cpu_regs[R_EDX]);
7842 gen_helper_xsave(cpu_env, cpu_A0, cpu_tmp1_i64);
7843 break;
7844
7845 CASE_MEM_OP(5): /* xrstor */
7846 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
7847 || (prefixes & (PREFIX_LOCK | PREFIX_DATA
7848 | PREFIX_REPZ | PREFIX_REPNZ))) {
7849 goto illegal_op;
7850 }
7851 gen_lea_modrm(env, s, modrm);
7852 tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
7853 cpu_regs[R_EDX]);
7854 gen_helper_xrstor(cpu_env, cpu_A0, cpu_tmp1_i64);
f4f1110e
RH
7855 /* XRSTOR is how MPX is enabled, which changes how
7856 we translate. Thus we need to end the TB. */
7857 gen_update_cc_op(s);
7858 gen_jmp_im(s->pc - s->cs_base);
7859 gen_eob(s);
19dc85db
RH
7860 break;
7861
c9cfe8f9 7862 CASE_MEM_OP(6): /* xsaveopt / clwb */
121f3157
RH
7863 if (prefixes & PREFIX_LOCK) {
7864 goto illegal_op;
7865 }
7866 if (prefixes & PREFIX_DATA) {
5e1fac2d 7867 /* clwb */
121f3157 7868 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLWB)) {
5e1fac2d 7869 goto illegal_op;
121f3157 7870 }
5e1fac2d 7871 gen_nop_modrm(env, s, modrm);
c9cfe8f9
RH
7872 } else {
7873 /* xsaveopt */
7874 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
7875 || (s->cpuid_xsave_features & CPUID_XSAVE_XSAVEOPT) == 0
7876 || (prefixes & (PREFIX_REPZ | PREFIX_REPNZ))) {
7877 goto illegal_op;
7878 }
7879 gen_lea_modrm(env, s, modrm);
7880 tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
7881 cpu_regs[R_EDX]);
7882 gen_helper_xsaveopt(cpu_env, cpu_A0, cpu_tmp1_i64);
121f3157 7883 }
c9cfe8f9 7884 break;
121f3157
RH
7885
7886 CASE_MEM_OP(7): /* clflush / clflushopt */
7887 if (prefixes & PREFIX_LOCK) {
7888 goto illegal_op;
7889 }
7890 if (prefixes & PREFIX_DATA) {
7891 /* clflushopt */
7892 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLFLUSHOPT)) {
7893 goto illegal_op;
7894 }
5e1fac2d 7895 } else {
121f3157
RH
7896 /* clflush */
7897 if ((s->prefix & (PREFIX_REPZ | PREFIX_REPNZ))
7898 || !(s->cpuid_features & CPUID_CLFLUSH)) {
5e1fac2d 7899 goto illegal_op;
121f3157 7900 }
5e1fac2d 7901 }
121f3157 7902 gen_nop_modrm(env, s, modrm);
5e1fac2d 7903 break;
121f3157 7904
07929f2a
RH
7905 case 0xc0 ... 0xc7: /* rdfsbase (f3 0f ae /0) */
7906 case 0xc8 ... 0xc8: /* rdgsbase (f3 0f ae /1) */
7907 case 0xd0 ... 0xd7: /* wrfsbase (f3 0f ae /2) */
7908 case 0xd8 ... 0xd8: /* wrgsbase (f3 0f ae /3) */
7909 if (CODE64(s)
7910 && (prefixes & PREFIX_REPZ)
7911 && !(prefixes & PREFIX_LOCK)
7912 && (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_FSGSBASE)) {
7913 TCGv base, treg, src, dst;
7914
7915 /* Preserve hflags bits by testing CR4 at runtime. */
7916 tcg_gen_movi_i32(cpu_tmp2_i32, CR4_FSGSBASE_MASK);
7917 gen_helper_cr4_testbit(cpu_env, cpu_tmp2_i32);
7918
7919 base = cpu_seg_base[modrm & 8 ? R_GS : R_FS];
7920 treg = cpu_regs[(modrm & 7) | REX_B(s)];
7921
7922 if (modrm & 0x10) {
7923 /* wr*base */
7924 dst = base, src = treg;
7925 } else {
7926 /* rd*base */
7927 dst = treg, src = base;
7928 }
7929
7930 if (s->dflag == MO_32) {
7931 tcg_gen_ext32u_tl(dst, src);
7932 } else {
7933 tcg_gen_mov_tl(dst, src);
7934 }
7935 break;
7936 }
7937 goto illegal_op;
7938
121f3157
RH
7939 case 0xf8: /* sfence / pcommit */
7940 if (prefixes & PREFIX_DATA) {
7941 /* pcommit */
7942 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_PCOMMIT)
7943 || (prefixes & PREFIX_LOCK)) {
7944 goto illegal_op;
891bc821 7945 }
121f3157
RH
7946 break;
7947 }
7948 /* fallthru */
7949 case 0xf9 ... 0xff: /* sfence */
7950 case 0xe8 ... 0xef: /* lfence */
7951 case 0xf0 ... 0xf7: /* mfence */
7952 if (!(s->cpuid_features & CPUID_SSE2)
7953 || (prefixes & PREFIX_LOCK)) {
7954 goto illegal_op;
8f091a59
FB
7955 }
7956 break;
121f3157 7957
664e0f19 7958 default:
14ce26e7
FB
7959 goto illegal_op;
7960 }
7961 break;
121f3157 7962
a35f3ec7 7963 case 0x10d: /* 3DNow! prefetch(w) */
0af10c86 7964 modrm = cpu_ldub_code(env, s->pc++);
a35f3ec7
AJ
7965 mod = (modrm >> 6) & 3;
7966 if (mod == 3)
7967 goto illegal_op;
4eeb3939 7968 gen_lea_modrm(env, s, modrm);
8f091a59
FB
7969 /* ignore for now */
7970 break;
3b21e03e 7971 case 0x1aa: /* rsm */
872929aa 7972 gen_svm_check_intercept(s, pc_start, SVM_EXIT_RSM);
3b21e03e
FB
7973 if (!(s->flags & HF_SMM_MASK))
7974 goto illegal_op;
728d803b 7975 gen_update_cc_op(s);
3b21e03e 7976 gen_jmp_im(s->pc - s->cs_base);
608badfc 7977 gen_helper_rsm(cpu_env);
3b21e03e
FB
7978 gen_eob(s);
7979 break;
222a3336
AZ
7980 case 0x1b8: /* SSE4.2 popcnt */
7981 if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) !=
7982 PREFIX_REPZ)
7983 goto illegal_op;
7984 if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
7985 goto illegal_op;
7986
0af10c86 7987 modrm = cpu_ldub_code(env, s->pc++);
8b4a3df8 7988 reg = ((modrm >> 3) & 7) | rex_r;
222a3336 7989
ab4e4aec 7990 if (s->prefix & PREFIX_DATA) {
4ba9938c 7991 ot = MO_16;
ab4e4aec
RH
7992 } else {
7993 ot = mo_64_32(dflag);
7994 }
222a3336 7995
0af10c86 7996 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
1d1cc4d0
RH
7997 gen_helper_popcnt(cpu_T0, cpu_env, cpu_T0, tcg_const_i32(ot));
7998 gen_op_mov_reg_v(ot, reg, cpu_T0);
fdb0d09d 7999
3ca51d07 8000 set_cc_op(s, CC_OP_EFLAGS);
222a3336 8001 break;
a35f3ec7
AJ
8002 case 0x10e ... 0x10f:
8003 /* 3DNow! instructions, ignore prefixes */
8004 s->prefix &= ~(PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA);
664e0f19
FB
8005 case 0x110 ... 0x117:
8006 case 0x128 ... 0x12f:
4242b1bd 8007 case 0x138 ... 0x13a:
d9f4bb27 8008 case 0x150 ... 0x179:
664e0f19
FB
8009 case 0x17c ... 0x17f:
8010 case 0x1c2:
8011 case 0x1c4 ... 0x1c6:
8012 case 0x1d0 ... 0x1fe:
0af10c86 8013 gen_sse(env, s, b, pc_start, rex_r);
664e0f19 8014 break;
2c0262af
FB
8015 default:
8016 goto illegal_op;
8017 }
8018 /* lock generation */
8019 if (s->prefix & PREFIX_LOCK)
a7812ae4 8020 gen_helper_unlock();
2c0262af
FB
8021 return s->pc;
8022 illegal_op:
ab1f142b 8023 if (s->prefix & PREFIX_LOCK)
a7812ae4 8024 gen_helper_unlock();
2c0262af
FB
8025 /* XXX: ensure that no lock was generated */
8026 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
8027 return s->pc;
8028}
8029
63618b4e 8030void tcg_x86_init(void)
2c0262af 8031{
fac0aff9
RH
8032 static const char reg_names[CPU_NB_REGS][4] = {
8033#ifdef TARGET_X86_64
8034 [R_EAX] = "rax",
8035 [R_EBX] = "rbx",
8036 [R_ECX] = "rcx",
8037 [R_EDX] = "rdx",
8038 [R_ESI] = "rsi",
8039 [R_EDI] = "rdi",
8040 [R_EBP] = "rbp",
8041 [R_ESP] = "rsp",
8042 [8] = "r8",
8043 [9] = "r9",
8044 [10] = "r10",
8045 [11] = "r11",
8046 [12] = "r12",
8047 [13] = "r13",
8048 [14] = "r14",
8049 [15] = "r15",
8050#else
8051 [R_EAX] = "eax",
8052 [R_EBX] = "ebx",
8053 [R_ECX] = "ecx",
8054 [R_EDX] = "edx",
8055 [R_ESI] = "esi",
8056 [R_EDI] = "edi",
8057 [R_EBP] = "ebp",
8058 [R_ESP] = "esp",
8059#endif
8060 };
3558f805
RH
8061 static const char seg_base_names[6][8] = {
8062 [R_CS] = "cs_base",
8063 [R_DS] = "ds_base",
8064 [R_ES] = "es_base",
8065 [R_FS] = "fs_base",
8066 [R_GS] = "gs_base",
8067 [R_SS] = "ss_base",
8068 };
149b427b
RH
8069 static const char bnd_regl_names[4][8] = {
8070 "bnd0_lb", "bnd1_lb", "bnd2_lb", "bnd3_lb"
8071 };
8072 static const char bnd_regu_names[4][8] = {
8073 "bnd0_ub", "bnd1_ub", "bnd2_ub", "bnd3_ub"
8074 };
fac0aff9
RH
8075 int i;
8076
a7812ae4 8077 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
e1ccc054 8078 cpu_cc_op = tcg_global_mem_new_i32(cpu_env,
317ac620 8079 offsetof(CPUX86State, cc_op), "cc_op");
e1ccc054 8080 cpu_cc_dst = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, cc_dst),
a7812ae4 8081 "cc_dst");
e1ccc054 8082 cpu_cc_src = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, cc_src),
a3251186 8083 "cc_src");
e1ccc054 8084 cpu_cc_src2 = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, cc_src2),
988c3eb0 8085 "cc_src2");
437a88a5 8086
fac0aff9 8087 for (i = 0; i < CPU_NB_REGS; ++i) {
e1ccc054 8088 cpu_regs[i] = tcg_global_mem_new(cpu_env,
fac0aff9
RH
8089 offsetof(CPUX86State, regs[i]),
8090 reg_names[i]);
8091 }
677ef623 8092
3558f805
RH
8093 for (i = 0; i < 6; ++i) {
8094 cpu_seg_base[i]
8095 = tcg_global_mem_new(cpu_env,
8096 offsetof(CPUX86State, segs[i].base),
8097 seg_base_names[i]);
8098 }
8099
149b427b
RH
8100 for (i = 0; i < 4; ++i) {
8101 cpu_bndl[i]
8102 = tcg_global_mem_new_i64(cpu_env,
8103 offsetof(CPUX86State, bnd_regs[i].lb),
8104 bnd_regl_names[i]);
8105 cpu_bndu[i]
8106 = tcg_global_mem_new_i64(cpu_env,
8107 offsetof(CPUX86State, bnd_regs[i].ub),
8108 bnd_regu_names[i]);
8109 }
8110
677ef623 8111 helper_lock_init();
2c0262af
FB
8112}
8113
20157705 8114/* generate intermediate code for basic block 'tb'. */
4e5e1215 8115void gen_intermediate_code(CPUX86State *env, TranslationBlock *tb)
2c0262af 8116{
4e5e1215 8117 X86CPU *cpu = x86_env_get_cpu(env);
ed2803da 8118 CPUState *cs = CPU(cpu);
2c0262af 8119 DisasContext dc1, *dc = &dc1;
14ce26e7 8120 target_ulong pc_ptr;
c068688b 8121 uint64_t flags;
14ce26e7
FB
8122 target_ulong pc_start;
8123 target_ulong cs_base;
2e70f6ef
PB
8124 int num_insns;
8125 int max_insns;
3b46e624 8126
2c0262af 8127 /* generate intermediate code */
14ce26e7
FB
8128 pc_start = tb->pc;
8129 cs_base = tb->cs_base;
2c0262af 8130 flags = tb->flags;
3a1d9b8b 8131
4f31916f 8132 dc->pe = (flags >> HF_PE_SHIFT) & 1;
2c0262af
FB
8133 dc->code32 = (flags >> HF_CS32_SHIFT) & 1;
8134 dc->ss32 = (flags >> HF_SS32_SHIFT) & 1;
8135 dc->addseg = (flags >> HF_ADDSEG_SHIFT) & 1;
8136 dc->f_st = 0;
8137 dc->vm86 = (flags >> VM_SHIFT) & 1;
8138 dc->cpl = (flags >> HF_CPL_SHIFT) & 3;
8139 dc->iopl = (flags >> IOPL_SHIFT) & 3;
8140 dc->tf = (flags >> TF_SHIFT) & 1;
ed2803da 8141 dc->singlestep_enabled = cs->singlestep_enabled;
2c0262af 8142 dc->cc_op = CC_OP_DYNAMIC;
e207582f 8143 dc->cc_op_dirty = false;
2c0262af
FB
8144 dc->cs_base = cs_base;
8145 dc->tb = tb;
8146 dc->popl_esp_hack = 0;
8147 /* select memory access functions */
8148 dc->mem_index = 0;
8149 if (flags & HF_SOFTMMU_MASK) {
97ed5ccd 8150 dc->mem_index = cpu_mmu_index(env, false);
2c0262af 8151 }
0514ef2f
EH
8152 dc->cpuid_features = env->features[FEAT_1_EDX];
8153 dc->cpuid_ext_features = env->features[FEAT_1_ECX];
8154 dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX];
8155 dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX];
8156 dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX];
c9cfe8f9 8157 dc->cpuid_xsave_features = env->features[FEAT_XSAVE];
14ce26e7
FB
8158#ifdef TARGET_X86_64
8159 dc->lma = (flags >> HF_LMA_SHIFT) & 1;
8160 dc->code64 = (flags >> HF_CS64_SHIFT) & 1;
8161#endif
7eee2a50 8162 dc->flags = flags;
ed2803da 8163 dc->jmp_opt = !(dc->tf || cs->singlestep_enabled ||
a2cc3b24 8164 (flags & HF_INHIBIT_IRQ_MASK)
415fa2ea 8165#ifndef CONFIG_SOFTMMU
2c0262af
FB
8166 || (flags & HF_SOFTMMU_MASK)
8167#endif
8168 );
c4d4525c
PD
8169 /* Do not optimize repz jumps at all in icount mode, because
8170 rep movsS instructions are execured with different paths
8171 in !repz_opt and repz_opt modes. The first one was used
8172 always except single step mode. And this setting
8173 disables jumps optimization and control paths become
8174 equivalent in run and single step modes.
8175 Now there will be no jump optimization for repz in
8176 record/replay modes and there will always be an
8177 additional step for ecx=0 when icount is enabled.
8178 */
bd79255d 8179 dc->repz_opt = !dc->jmp_opt && !(tb->cflags & CF_USE_ICOUNT);
4f31916f
FB
8180#if 0
8181 /* check addseg logic */
dc196a57 8182 if (!dc->addseg && (dc->vm86 || !dc->pe || !dc->code32))
4f31916f
FB
8183 printf("ERROR addseg\n");
8184#endif
8185
1d1cc4d0
RH
8186 cpu_T0 = tcg_temp_new();
8187 cpu_T1 = tcg_temp_new();
a7812ae4 8188 cpu_A0 = tcg_temp_new();
a7812ae4
PB
8189
8190 cpu_tmp0 = tcg_temp_new();
8191 cpu_tmp1_i64 = tcg_temp_new_i64();
8192 cpu_tmp2_i32 = tcg_temp_new_i32();
8193 cpu_tmp3_i32 = tcg_temp_new_i32();
8194 cpu_tmp4 = tcg_temp_new();
a7812ae4
PB
8195 cpu_ptr0 = tcg_temp_new_ptr();
8196 cpu_ptr1 = tcg_temp_new_ptr();
a3251186 8197 cpu_cc_srcT = tcg_temp_local_new();
57fec1fe 8198
2c0262af
FB
8199 dc->is_jmp = DISAS_NEXT;
8200 pc_ptr = pc_start;
2e70f6ef
PB
8201 num_insns = 0;
8202 max_insns = tb->cflags & CF_COUNT_MASK;
190ce7fb 8203 if (max_insns == 0) {
2e70f6ef 8204 max_insns = CF_COUNT_MASK;
190ce7fb
RH
8205 }
8206 if (max_insns > TCG_MAX_INSNS) {
8207 max_insns = TCG_MAX_INSNS;
8208 }
2c0262af 8209
cd42d5b2 8210 gen_tb_start(tb);
2c0262af 8211 for(;;) {
2066d095 8212 tcg_gen_insn_start(pc_ptr, dc->cc_op);
959082fc 8213 num_insns++;
667b8e29 8214
b933066a
RH
8215 /* If RF is set, suppress an internally generated breakpoint. */
8216 if (unlikely(cpu_breakpoint_test(cs, pc_ptr,
8217 tb->flags & HF_RF_MASK
8218 ? BP_GDB : BP_ANY))) {
8219 gen_debug(dc, pc_ptr - dc->cs_base);
522a0d4e
RH
8220 /* The address covered by the breakpoint must be included in
8221 [tb->pc, tb->pc + tb->size) in order to for it to be
8222 properly cleared -- thus we increment the PC here so that
8223 the logic setting tb->size below does the right thing. */
8224 pc_ptr += 1;
b933066a
RH
8225 goto done_generating;
8226 }
959082fc 8227 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
2e70f6ef 8228 gen_io_start();
959082fc 8229 }
2e70f6ef 8230
0af10c86 8231 pc_ptr = disas_insn(env, dc, pc_ptr);
2c0262af
FB
8232 /* stop translation if indicated */
8233 if (dc->is_jmp)
8234 break;
8235 /* if single step mode, we generate only one instruction and
8236 generate an exception */
a2cc3b24
FB
8237 /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
8238 the flag and abort the translation to give the irqs a
8239 change to be happen */
5fafdf24 8240 if (dc->tf || dc->singlestep_enabled ||
2e70f6ef 8241 (flags & HF_INHIBIT_IRQ_MASK)) {
14ce26e7 8242 gen_jmp_im(pc_ptr - dc->cs_base);
2c0262af
FB
8243 gen_eob(dc);
8244 break;
8245 }
5b9efc39
PD
8246 /* Do not cross the boundary of the pages in icount mode,
8247 it can cause an exception. Do it only when boundary is
8248 crossed by the first instruction in the block.
8249 If current instruction already crossed the bound - it's ok,
8250 because an exception hasn't stopped this code.
8251 */
bd79255d 8252 if ((tb->cflags & CF_USE_ICOUNT)
5b9efc39
PD
8253 && ((pc_ptr & TARGET_PAGE_MASK)
8254 != ((pc_ptr + TARGET_MAX_INSN_SIZE - 1) & TARGET_PAGE_MASK)
8255 || (pc_ptr & ~TARGET_PAGE_MASK) == 0)) {
8256 gen_jmp_im(pc_ptr - dc->cs_base);
8257 gen_eob(dc);
8258 break;
8259 }
2c0262af 8260 /* if too long translation, stop generation too */
fe700adb 8261 if (tcg_op_buf_full() ||
2e70f6ef
PB
8262 (pc_ptr - pc_start) >= (TARGET_PAGE_SIZE - 32) ||
8263 num_insns >= max_insns) {
14ce26e7 8264 gen_jmp_im(pc_ptr - dc->cs_base);
2c0262af
FB
8265 gen_eob(dc);
8266 break;
8267 }
1b530a6d
AJ
8268 if (singlestep) {
8269 gen_jmp_im(pc_ptr - dc->cs_base);
8270 gen_eob(dc);
8271 break;
8272 }
2c0262af 8273 }
2e70f6ef
PB
8274 if (tb->cflags & CF_LAST_IO)
8275 gen_io_end();
e64e3535 8276done_generating:
806f352d 8277 gen_tb_end(tb, num_insns);
0a7df5da 8278
2c0262af 8279#ifdef DEBUG_DISAS
8fec2b8c 8280 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
14ce26e7 8281 int disas_flags;
93fcfe39
AL
8282 qemu_log("----------------\n");
8283 qemu_log("IN: %s\n", lookup_symbol(pc_start));
14ce26e7
FB
8284#ifdef TARGET_X86_64
8285 if (dc->code64)
8286 disas_flags = 2;
8287 else
8288#endif
8289 disas_flags = !dc->code32;
d49190c4 8290 log_target_disas(cs, pc_start, pc_ptr - pc_start, disas_flags);
93fcfe39 8291 qemu_log("\n");
2c0262af
FB
8292 }
8293#endif
8294
4e5e1215
RH
8295 tb->size = pc_ptr - pc_start;
8296 tb->icount = num_insns;
2c0262af
FB
8297}
8298
bad729e2
RH
8299void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb,
8300 target_ulong *data)
d2856f1a 8301{
bad729e2
RH
8302 int cc_op = data[1];
8303 env->eip = data[0] - tb->cs_base;
8304 if (cc_op != CC_OP_DYNAMIC) {
d2856f1a 8305 env->cc_op = cc_op;
bad729e2 8306 }
d2856f1a 8307}