]> git.proxmox.com Git - mirror_qemu.git/blame - target-i386/translate.c
target-i386: Remove gen_op_movl_T0_im*
[mirror_qemu.git] / target-i386 / translate.c
CommitLineData
2c0262af
FB
1/*
2 * i386 translation
5fafdf24 3 *
2c0262af
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
18 */
19#include <stdarg.h>
20#include <stdlib.h>
21#include <stdio.h>
22#include <string.h>
23#include <inttypes.h>
24#include <signal.h>
2c0262af 25
bec93d72 26#include "qemu/host-utils.h"
2c0262af 27#include "cpu.h"
76cad711 28#include "disas/disas.h"
57fec1fe 29#include "tcg-op.h"
2c0262af 30
a7812ae4
PB
31#include "helper.h"
32#define GEN_HELPER 1
33#include "helper.h"
34
2c0262af
FB
35#define PREFIX_REPZ 0x01
36#define PREFIX_REPNZ 0x02
37#define PREFIX_LOCK 0x04
38#define PREFIX_DATA 0x08
39#define PREFIX_ADR 0x10
701ed211 40#define PREFIX_VEX 0x20
2c0262af 41
14ce26e7 42#ifdef TARGET_X86_64
14ce26e7
FB
43#define CODE64(s) ((s)->code64)
44#define REX_X(s) ((s)->rex_x)
45#define REX_B(s) ((s)->rex_b)
14ce26e7 46#else
14ce26e7
FB
47#define CODE64(s) 0
48#define REX_X(s) 0
49#define REX_B(s) 0
50#endif
51
bec93d72
RH
52#ifdef TARGET_X86_64
53# define ctztl ctz64
54# define clztl clz64
55#else
56# define ctztl ctz32
57# define clztl clz32
58#endif
59
57fec1fe
FB
60//#define MACRO_TEST 1
61
57fec1fe 62/* global register indexes */
a7812ae4 63static TCGv_ptr cpu_env;
a3251186 64static TCGv cpu_A0;
988c3eb0 65static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2, cpu_cc_srcT;
a7812ae4 66static TCGv_i32 cpu_cc_op;
cc739bb0 67static TCGv cpu_regs[CPU_NB_REGS];
1e4840bf 68/* local temps */
3b9d3cf1 69static TCGv cpu_T[2];
57fec1fe 70/* local register indexes (only used inside old micro ops) */
a7812ae4
PB
71static TCGv cpu_tmp0, cpu_tmp4;
72static TCGv_ptr cpu_ptr0, cpu_ptr1;
73static TCGv_i32 cpu_tmp2_i32, cpu_tmp3_i32;
74static TCGv_i64 cpu_tmp1_i64;
57fec1fe 75
1a7ff922
PB
76static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
77
022c62cb 78#include "exec/gen-icount.h"
2e70f6ef 79
57fec1fe
FB
80#ifdef TARGET_X86_64
81static int x86_64_hregs;
ae063a68
FB
82#endif
83
2c0262af
FB
84typedef struct DisasContext {
85 /* current insn context */
86 int override; /* -1 if no override */
87 int prefix;
88 int aflag, dflag;
14ce26e7 89 target_ulong pc; /* pc = eip + cs_base */
2c0262af
FB
90 int is_jmp; /* 1 = means jump (stop translation), 2 means CPU
91 static state change (stop translation) */
92 /* current block context */
14ce26e7 93 target_ulong cs_base; /* base of CS segment */
2c0262af
FB
94 int pe; /* protected mode */
95 int code32; /* 32 bit code segment */
14ce26e7
FB
96#ifdef TARGET_X86_64
97 int lma; /* long mode active */
98 int code64; /* 64 bit code segment */
99 int rex_x, rex_b;
100#endif
701ed211
RH
101 int vex_l; /* vex vector length */
102 int vex_v; /* vex vvvv register, without 1's compliment. */
2c0262af 103 int ss32; /* 32 bit stack segment */
fee71888 104 CCOp cc_op; /* current CC operation */
e207582f 105 bool cc_op_dirty;
2c0262af
FB
106 int addseg; /* non zero if either DS/ES/SS have a non zero base */
107 int f_st; /* currently unused */
108 int vm86; /* vm86 mode */
109 int cpl;
110 int iopl;
111 int tf; /* TF cpu flag */
34865134 112 int singlestep_enabled; /* "hardware" single step enabled */
2c0262af
FB
113 int jmp_opt; /* use direct block chaining for direct jumps */
114 int mem_index; /* select memory access functions */
c068688b 115 uint64_t flags; /* all execution flags */
2c0262af
FB
116 struct TranslationBlock *tb;
117 int popl_esp_hack; /* for correct popl with esp base handling */
14ce26e7
FB
118 int rip_offset; /* only used in x86_64, but left for simplicity */
119 int cpuid_features;
3d7374c5 120 int cpuid_ext_features;
e771edab 121 int cpuid_ext2_features;
12e26b75 122 int cpuid_ext3_features;
a9321a4d 123 int cpuid_7_0_ebx_features;
2c0262af
FB
124} DisasContext;
125
126static void gen_eob(DisasContext *s);
14ce26e7
FB
127static void gen_jmp(DisasContext *s, target_ulong eip);
128static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num);
63633fe6 129static void gen_op(DisasContext *s1, int op, int ot, int d);
2c0262af
FB
130
131/* i386 arith/logic operations */
132enum {
5fafdf24
TS
133 OP_ADDL,
134 OP_ORL,
135 OP_ADCL,
2c0262af 136 OP_SBBL,
5fafdf24
TS
137 OP_ANDL,
138 OP_SUBL,
139 OP_XORL,
2c0262af
FB
140 OP_CMPL,
141};
142
143/* i386 shift ops */
144enum {
5fafdf24
TS
145 OP_ROL,
146 OP_ROR,
147 OP_RCL,
148 OP_RCR,
149 OP_SHL,
150 OP_SHR,
2c0262af
FB
151 OP_SHL1, /* undocumented */
152 OP_SAR = 7,
153};
154
8e1c85e3
FB
155enum {
156 JCC_O,
157 JCC_B,
158 JCC_Z,
159 JCC_BE,
160 JCC_S,
161 JCC_P,
162 JCC_L,
163 JCC_LE,
164};
165
2c0262af
FB
166enum {
167 /* I386 int registers */
168 OR_EAX, /* MUST be even numbered */
169 OR_ECX,
170 OR_EDX,
171 OR_EBX,
172 OR_ESP,
173 OR_EBP,
174 OR_ESI,
175 OR_EDI,
14ce26e7
FB
176
177 OR_TMP0 = 16, /* temporary operand register */
2c0262af
FB
178 OR_TMP1,
179 OR_A0, /* temporary register used when doing address evaluation */
2c0262af
FB
180};
181
b666265b 182enum {
a3251186
RH
183 USES_CC_DST = 1,
184 USES_CC_SRC = 2,
988c3eb0
RH
185 USES_CC_SRC2 = 4,
186 USES_CC_SRCT = 8,
b666265b
RH
187};
188
189/* Bit set if the global variable is live after setting CC_OP to X. */
190static const uint8_t cc_op_live[CC_OP_NB] = {
988c3eb0 191 [CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
b666265b
RH
192 [CC_OP_EFLAGS] = USES_CC_SRC,
193 [CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC,
194 [CC_OP_ADDB ... CC_OP_ADDQ] = USES_CC_DST | USES_CC_SRC,
988c3eb0 195 [CC_OP_ADCB ... CC_OP_ADCQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
a3251186 196 [CC_OP_SUBB ... CC_OP_SUBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT,
988c3eb0 197 [CC_OP_SBBB ... CC_OP_SBBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
b666265b
RH
198 [CC_OP_LOGICB ... CC_OP_LOGICQ] = USES_CC_DST,
199 [CC_OP_INCB ... CC_OP_INCQ] = USES_CC_DST | USES_CC_SRC,
200 [CC_OP_DECB ... CC_OP_DECQ] = USES_CC_DST | USES_CC_SRC,
201 [CC_OP_SHLB ... CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC,
202 [CC_OP_SARB ... CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC,
bc4b43dc 203 [CC_OP_BMILGB ... CC_OP_BMILGQ] = USES_CC_DST | USES_CC_SRC,
cd7f97ca
RH
204 [CC_OP_ADCX] = USES_CC_DST | USES_CC_SRC,
205 [CC_OP_ADOX] = USES_CC_SRC | USES_CC_SRC2,
206 [CC_OP_ADCOX] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
436ff2d2 207 [CC_OP_CLR] = 0,
b666265b
RH
208};
209
e207582f 210static void set_cc_op(DisasContext *s, CCOp op)
3ca51d07 211{
b666265b
RH
212 int dead;
213
214 if (s->cc_op == op) {
215 return;
216 }
217
218 /* Discard CC computation that will no longer be used. */
219 dead = cc_op_live[s->cc_op] & ~cc_op_live[op];
220 if (dead & USES_CC_DST) {
221 tcg_gen_discard_tl(cpu_cc_dst);
e207582f 222 }
b666265b
RH
223 if (dead & USES_CC_SRC) {
224 tcg_gen_discard_tl(cpu_cc_src);
225 }
988c3eb0
RH
226 if (dead & USES_CC_SRC2) {
227 tcg_gen_discard_tl(cpu_cc_src2);
228 }
a3251186
RH
229 if (dead & USES_CC_SRCT) {
230 tcg_gen_discard_tl(cpu_cc_srcT);
231 }
b666265b 232
e2f515cf
RH
233 if (op == CC_OP_DYNAMIC) {
234 /* The DYNAMIC setting is translator only, and should never be
235 stored. Thus we always consider it clean. */
236 s->cc_op_dirty = false;
237 } else {
238 /* Discard any computed CC_OP value (see shifts). */
239 if (s->cc_op == CC_OP_DYNAMIC) {
240 tcg_gen_discard_i32(cpu_cc_op);
241 }
242 s->cc_op_dirty = true;
243 }
b666265b 244 s->cc_op = op;
e207582f
RH
245}
246
e207582f
RH
247static void gen_update_cc_op(DisasContext *s)
248{
249 if (s->cc_op_dirty) {
773cdfcc 250 tcg_gen_movi_i32(cpu_cc_op, s->cc_op);
e207582f
RH
251 s->cc_op_dirty = false;
252 }
3ca51d07
RH
253}
254
57fec1fe
FB
255static inline void gen_op_movl_T1_im(int32_t val)
256{
257 tcg_gen_movi_tl(cpu_T[1], val);
258}
259
260static inline void gen_op_movl_T1_imu(uint32_t val)
261{
262 tcg_gen_movi_tl(cpu_T[1], val);
263}
264
265static inline void gen_op_movl_A0_im(uint32_t val)
266{
267 tcg_gen_movi_tl(cpu_A0, val);
268}
269
270#ifdef TARGET_X86_64
271static inline void gen_op_movq_A0_im(int64_t val)
272{
273 tcg_gen_movi_tl(cpu_A0, val);
274}
275#endif
276
277static inline void gen_movtl_T0_im(target_ulong val)
278{
279 tcg_gen_movi_tl(cpu_T[0], val);
280}
281
282static inline void gen_movtl_T1_im(target_ulong val)
283{
284 tcg_gen_movi_tl(cpu_T[1], val);
285}
286
287static inline void gen_op_andl_T0_ffff(void)
288{
289 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
290}
291
292static inline void gen_op_andl_T0_im(uint32_t val)
293{
294 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], val);
295}
296
297static inline void gen_op_movl_T0_T1(void)
298{
299 tcg_gen_mov_tl(cpu_T[0], cpu_T[1]);
300}
301
302static inline void gen_op_andl_A0_ffff(void)
303{
304 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffff);
305}
306
14ce26e7
FB
307#ifdef TARGET_X86_64
308
309#define NB_OP_SIZES 4
310
14ce26e7
FB
311#else /* !TARGET_X86_64 */
312
313#define NB_OP_SIZES 3
314
14ce26e7
FB
315#endif /* !TARGET_X86_64 */
316
e2542fe2 317#if defined(HOST_WORDS_BIGENDIAN)
57fec1fe
FB
318#define REG_B_OFFSET (sizeof(target_ulong) - 1)
319#define REG_H_OFFSET (sizeof(target_ulong) - 2)
320#define REG_W_OFFSET (sizeof(target_ulong) - 2)
321#define REG_L_OFFSET (sizeof(target_ulong) - 4)
322#define REG_LH_OFFSET (sizeof(target_ulong) - 8)
14ce26e7 323#else
57fec1fe
FB
324#define REG_B_OFFSET 0
325#define REG_H_OFFSET 1
326#define REG_W_OFFSET 0
327#define REG_L_OFFSET 0
328#define REG_LH_OFFSET 4
14ce26e7 329#endif
57fec1fe 330
96d7073f
PM
331/* In instruction encodings for byte register accesses the
332 * register number usually indicates "low 8 bits of register N";
333 * however there are some special cases where N 4..7 indicates
334 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
335 * true for this special case, false otherwise.
336 */
337static inline bool byte_reg_is_xH(int reg)
338{
339 if (reg < 4) {
340 return false;
341 }
342#ifdef TARGET_X86_64
343 if (reg >= 8 || x86_64_hregs) {
344 return false;
345 }
346#endif
347 return true;
348}
349
1e4840bf 350static inline void gen_op_mov_reg_v(int ot, int reg, TCGv t0)
57fec1fe
FB
351{
352 switch(ot) {
4ba9938c 353 case MO_8:
96d7073f 354 if (!byte_reg_is_xH(reg)) {
c832e3de 355 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 8);
57fec1fe 356 } else {
c832e3de 357 tcg_gen_deposit_tl(cpu_regs[reg - 4], cpu_regs[reg - 4], t0, 8, 8);
57fec1fe
FB
358 }
359 break;
4ba9938c 360 case MO_16:
c832e3de 361 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 16);
57fec1fe 362 break;
cc739bb0 363 default: /* XXX this shouldn't be reached; abort? */
4ba9938c 364 case MO_32:
cc739bb0
LD
365 /* For x86_64, this sets the higher half of register to zero.
366 For i386, this is equivalent to a mov. */
367 tcg_gen_ext32u_tl(cpu_regs[reg], t0);
57fec1fe 368 break;
cc739bb0 369#ifdef TARGET_X86_64
4ba9938c 370 case MO_64:
cc739bb0 371 tcg_gen_mov_tl(cpu_regs[reg], t0);
57fec1fe 372 break;
14ce26e7 373#endif
57fec1fe
FB
374 }
375}
2c0262af 376
57fec1fe
FB
377static inline void gen_op_mov_reg_T0(int ot, int reg)
378{
1e4840bf 379 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
57fec1fe
FB
380}
381
382static inline void gen_op_mov_reg_T1(int ot, int reg)
383{
1e4840bf 384 gen_op_mov_reg_v(ot, reg, cpu_T[1]);
57fec1fe
FB
385}
386
387static inline void gen_op_mov_reg_A0(int size, int reg)
388{
389 switch(size) {
4ba9938c 390 case MO_8:
c832e3de 391 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], cpu_A0, 0, 16);
57fec1fe 392 break;
cc739bb0 393 default: /* XXX this shouldn't be reached; abort? */
4ba9938c 394 case MO_16:
cc739bb0
LD
395 /* For x86_64, this sets the higher half of register to zero.
396 For i386, this is equivalent to a mov. */
397 tcg_gen_ext32u_tl(cpu_regs[reg], cpu_A0);
57fec1fe 398 break;
cc739bb0 399#ifdef TARGET_X86_64
4ba9938c 400 case MO_32:
cc739bb0 401 tcg_gen_mov_tl(cpu_regs[reg], cpu_A0);
57fec1fe 402 break;
14ce26e7 403#endif
57fec1fe
FB
404 }
405}
406
1e4840bf 407static inline void gen_op_mov_v_reg(int ot, TCGv t0, int reg)
57fec1fe 408{
4ba9938c 409 if (ot == MO_8 && byte_reg_is_xH(reg)) {
96d7073f
PM
410 tcg_gen_shri_tl(t0, cpu_regs[reg - 4], 8);
411 tcg_gen_ext8u_tl(t0, t0);
412 } else {
cc739bb0 413 tcg_gen_mov_tl(t0, cpu_regs[reg]);
57fec1fe
FB
414 }
415}
416
1e4840bf
FB
417static inline void gen_op_mov_TN_reg(int ot, int t_index, int reg)
418{
419 gen_op_mov_v_reg(ot, cpu_T[t_index], reg);
420}
421
57fec1fe
FB
422static inline void gen_op_movl_A0_reg(int reg)
423{
cc739bb0 424 tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]);
57fec1fe
FB
425}
426
427static inline void gen_op_addl_A0_im(int32_t val)
428{
429 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
14ce26e7 430#ifdef TARGET_X86_64
57fec1fe 431 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
14ce26e7 432#endif
57fec1fe 433}
2c0262af 434
14ce26e7 435#ifdef TARGET_X86_64
57fec1fe
FB
436static inline void gen_op_addq_A0_im(int64_t val)
437{
438 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
439}
14ce26e7 440#endif
57fec1fe
FB
441
442static void gen_add_A0_im(DisasContext *s, int val)
443{
444#ifdef TARGET_X86_64
445 if (CODE64(s))
446 gen_op_addq_A0_im(val);
447 else
448#endif
449 gen_op_addl_A0_im(val);
450}
2c0262af 451
57fec1fe 452static inline void gen_op_addl_T0_T1(void)
2c0262af 453{
57fec1fe
FB
454 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
455}
456
457static inline void gen_op_jmp_T0(void)
458{
317ac620 459 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, eip));
57fec1fe
FB
460}
461
6e0d8677 462static inline void gen_op_add_reg_im(int size, int reg, int32_t val)
57fec1fe 463{
6e0d8677 464 switch(size) {
4ba9938c 465 case MO_8:
cc739bb0 466 tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val);
c832e3de 467 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], cpu_tmp0, 0, 16);
6e0d8677 468 break;
4ba9938c 469 case MO_16:
cc739bb0
LD
470 tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val);
471 /* For x86_64, this sets the higher half of register to zero.
472 For i386, this is equivalent to a nop. */
473 tcg_gen_ext32u_tl(cpu_tmp0, cpu_tmp0);
474 tcg_gen_mov_tl(cpu_regs[reg], cpu_tmp0);
6e0d8677
FB
475 break;
476#ifdef TARGET_X86_64
4ba9938c 477 case MO_32:
cc739bb0 478 tcg_gen_addi_tl(cpu_regs[reg], cpu_regs[reg], val);
6e0d8677
FB
479 break;
480#endif
481 }
57fec1fe
FB
482}
483
6e0d8677 484static inline void gen_op_add_reg_T0(int size, int reg)
57fec1fe 485{
6e0d8677 486 switch(size) {
4ba9938c 487 case MO_8:
cc739bb0 488 tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T[0]);
c832e3de 489 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], cpu_tmp0, 0, 16);
6e0d8677 490 break;
4ba9938c 491 case MO_16:
cc739bb0
LD
492 tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T[0]);
493 /* For x86_64, this sets the higher half of register to zero.
494 For i386, this is equivalent to a nop. */
495 tcg_gen_ext32u_tl(cpu_tmp0, cpu_tmp0);
496 tcg_gen_mov_tl(cpu_regs[reg], cpu_tmp0);
6e0d8677 497 break;
14ce26e7 498#ifdef TARGET_X86_64
4ba9938c 499 case MO_32:
cc739bb0 500 tcg_gen_add_tl(cpu_regs[reg], cpu_regs[reg], cpu_T[0]);
6e0d8677 501 break;
14ce26e7 502#endif
6e0d8677
FB
503 }
504}
57fec1fe 505
57fec1fe
FB
506static inline void gen_op_addl_A0_reg_sN(int shift, int reg)
507{
cc739bb0
LD
508 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]);
509 if (shift != 0)
57fec1fe
FB
510 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
511 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
cc739bb0
LD
512 /* For x86_64, this sets the higher half of register to zero.
513 For i386, this is equivalent to a nop. */
514 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
57fec1fe 515}
2c0262af 516
57fec1fe
FB
517static inline void gen_op_movl_A0_seg(int reg)
518{
317ac620 519 tcg_gen_ld32u_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base) + REG_L_OFFSET);
57fec1fe 520}
2c0262af 521
7162ab21 522static inline void gen_op_addl_A0_seg(DisasContext *s, int reg)
57fec1fe 523{
317ac620 524 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base));
57fec1fe 525#ifdef TARGET_X86_64
7162ab21
VC
526 if (CODE64(s)) {
527 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
528 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
529 } else {
530 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
531 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
532 }
533#else
534 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
57fec1fe
FB
535#endif
536}
2c0262af 537
14ce26e7 538#ifdef TARGET_X86_64
57fec1fe
FB
539static inline void gen_op_movq_A0_seg(int reg)
540{
317ac620 541 tcg_gen_ld_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base));
57fec1fe 542}
14ce26e7 543
57fec1fe
FB
544static inline void gen_op_addq_A0_seg(int reg)
545{
317ac620 546 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base));
57fec1fe
FB
547 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
548}
549
550static inline void gen_op_movq_A0_reg(int reg)
551{
cc739bb0 552 tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]);
57fec1fe
FB
553}
554
555static inline void gen_op_addq_A0_reg_sN(int shift, int reg)
556{
cc739bb0
LD
557 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]);
558 if (shift != 0)
57fec1fe
FB
559 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
560 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
561}
14ce26e7
FB
562#endif
563
323d1876 564static inline void gen_op_ld_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
57fec1fe 565{
3c5f4116 566 tcg_gen_qemu_ld_tl(t0, a0, s->mem_index, idx | MO_LE);
57fec1fe 567}
2c0262af 568
323d1876 569static inline void gen_op_st_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
57fec1fe 570{
3523e4bd 571 tcg_gen_qemu_st_tl(t0, a0, s->mem_index, idx | MO_LE);
57fec1fe 572}
4f31916f 573
d4faa3e0
RH
574static inline void gen_op_st_rm_T0_A0(DisasContext *s, int idx, int d)
575{
576 if (d == OR_TMP0) {
fd8ca9f6 577 gen_op_st_v(s, idx, cpu_T[0], cpu_A0);
d4faa3e0
RH
578 } else {
579 gen_op_mov_reg_T0(idx, d);
580 }
581}
582
14ce26e7
FB
583static inline void gen_jmp_im(target_ulong pc)
584{
57fec1fe 585 tcg_gen_movi_tl(cpu_tmp0, pc);
317ac620 586 tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, eip));
14ce26e7
FB
587}
588
2c0262af
FB
589static inline void gen_string_movl_A0_ESI(DisasContext *s)
590{
591 int override;
592
593 override = s->override;
14ce26e7
FB
594#ifdef TARGET_X86_64
595 if (s->aflag == 2) {
596 if (override >= 0) {
57fec1fe
FB
597 gen_op_movq_A0_seg(override);
598 gen_op_addq_A0_reg_sN(0, R_ESI);
14ce26e7 599 } else {
57fec1fe 600 gen_op_movq_A0_reg(R_ESI);
14ce26e7
FB
601 }
602 } else
603#endif
2c0262af
FB
604 if (s->aflag) {
605 /* 32 bit address */
606 if (s->addseg && override < 0)
607 override = R_DS;
608 if (override >= 0) {
57fec1fe
FB
609 gen_op_movl_A0_seg(override);
610 gen_op_addl_A0_reg_sN(0, R_ESI);
2c0262af 611 } else {
57fec1fe 612 gen_op_movl_A0_reg(R_ESI);
2c0262af
FB
613 }
614 } else {
615 /* 16 address, always override */
616 if (override < 0)
617 override = R_DS;
57fec1fe 618 gen_op_movl_A0_reg(R_ESI);
2c0262af 619 gen_op_andl_A0_ffff();
7162ab21 620 gen_op_addl_A0_seg(s, override);
2c0262af
FB
621 }
622}
623
624static inline void gen_string_movl_A0_EDI(DisasContext *s)
625{
14ce26e7
FB
626#ifdef TARGET_X86_64
627 if (s->aflag == 2) {
57fec1fe 628 gen_op_movq_A0_reg(R_EDI);
14ce26e7
FB
629 } else
630#endif
2c0262af
FB
631 if (s->aflag) {
632 if (s->addseg) {
57fec1fe
FB
633 gen_op_movl_A0_seg(R_ES);
634 gen_op_addl_A0_reg_sN(0, R_EDI);
2c0262af 635 } else {
57fec1fe 636 gen_op_movl_A0_reg(R_EDI);
2c0262af
FB
637 }
638 } else {
57fec1fe 639 gen_op_movl_A0_reg(R_EDI);
2c0262af 640 gen_op_andl_A0_ffff();
7162ab21 641 gen_op_addl_A0_seg(s, R_ES);
2c0262af
FB
642 }
643}
644
6e0d8677
FB
645static inline void gen_op_movl_T0_Dshift(int ot)
646{
317ac620 647 tcg_gen_ld32s_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, df));
6e0d8677 648 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], ot);
2c0262af
FB
649};
650
d824df34 651static TCGv gen_ext_tl(TCGv dst, TCGv src, int size, bool sign)
6e0d8677 652{
d824df34 653 switch (size) {
4ba9938c 654 case MO_8:
d824df34
PB
655 if (sign) {
656 tcg_gen_ext8s_tl(dst, src);
657 } else {
658 tcg_gen_ext8u_tl(dst, src);
659 }
660 return dst;
4ba9938c 661 case MO_16:
d824df34
PB
662 if (sign) {
663 tcg_gen_ext16s_tl(dst, src);
664 } else {
665 tcg_gen_ext16u_tl(dst, src);
666 }
667 return dst;
668#ifdef TARGET_X86_64
4ba9938c 669 case MO_32:
d824df34
PB
670 if (sign) {
671 tcg_gen_ext32s_tl(dst, src);
672 } else {
673 tcg_gen_ext32u_tl(dst, src);
674 }
675 return dst;
676#endif
6e0d8677 677 default:
d824df34 678 return src;
6e0d8677
FB
679 }
680}
3b46e624 681
d824df34
PB
682static void gen_extu(int ot, TCGv reg)
683{
684 gen_ext_tl(reg, reg, ot, false);
685}
686
6e0d8677
FB
687static void gen_exts(int ot, TCGv reg)
688{
d824df34 689 gen_ext_tl(reg, reg, ot, true);
6e0d8677 690}
2c0262af 691
6e0d8677
FB
692static inline void gen_op_jnz_ecx(int size, int label1)
693{
cc739bb0 694 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
6e0d8677 695 gen_extu(size + 1, cpu_tmp0);
cb63669a 696 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, label1);
6e0d8677
FB
697}
698
699static inline void gen_op_jz_ecx(int size, int label1)
700{
cc739bb0 701 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
6e0d8677 702 gen_extu(size + 1, cpu_tmp0);
cb63669a 703 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
6e0d8677 704}
2c0262af 705
a7812ae4
PB
706static void gen_helper_in_func(int ot, TCGv v, TCGv_i32 n)
707{
708 switch (ot) {
4ba9938c 709 case MO_8:
93ab25d7
PB
710 gen_helper_inb(v, n);
711 break;
4ba9938c 712 case MO_16:
93ab25d7
PB
713 gen_helper_inw(v, n);
714 break;
4ba9938c 715 case MO_32:
93ab25d7
PB
716 gen_helper_inl(v, n);
717 break;
a7812ae4 718 }
a7812ae4 719}
2c0262af 720
a7812ae4
PB
721static void gen_helper_out_func(int ot, TCGv_i32 v, TCGv_i32 n)
722{
723 switch (ot) {
4ba9938c 724 case MO_8:
93ab25d7
PB
725 gen_helper_outb(v, n);
726 break;
4ba9938c 727 case MO_16:
93ab25d7
PB
728 gen_helper_outw(v, n);
729 break;
4ba9938c 730 case MO_32:
93ab25d7
PB
731 gen_helper_outl(v, n);
732 break;
a7812ae4 733 }
a7812ae4 734}
f115e911 735
b8b6a50b
FB
736static void gen_check_io(DisasContext *s, int ot, target_ulong cur_eip,
737 uint32_t svm_flags)
f115e911 738{
b8b6a50b
FB
739 int state_saved;
740 target_ulong next_eip;
741
742 state_saved = 0;
f115e911 743 if (s->pe && (s->cpl > s->iopl || s->vm86)) {
773cdfcc 744 gen_update_cc_op(s);
14ce26e7 745 gen_jmp_im(cur_eip);
b8b6a50b 746 state_saved = 1;
b6abf97d 747 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
a7812ae4 748 switch (ot) {
4ba9938c 749 case MO_8:
4a7443be
BS
750 gen_helper_check_iob(cpu_env, cpu_tmp2_i32);
751 break;
4ba9938c 752 case MO_16:
4a7443be
BS
753 gen_helper_check_iow(cpu_env, cpu_tmp2_i32);
754 break;
4ba9938c 755 case MO_32:
4a7443be
BS
756 gen_helper_check_iol(cpu_env, cpu_tmp2_i32);
757 break;
a7812ae4 758 }
b8b6a50b 759 }
872929aa 760 if(s->flags & HF_SVMI_MASK) {
b8b6a50b 761 if (!state_saved) {
773cdfcc 762 gen_update_cc_op(s);
b8b6a50b 763 gen_jmp_im(cur_eip);
b8b6a50b
FB
764 }
765 svm_flags |= (1 << (4 + ot));
766 next_eip = s->pc - s->cs_base;
b6abf97d 767 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
052e80d5
BS
768 gen_helper_svm_check_io(cpu_env, cpu_tmp2_i32,
769 tcg_const_i32(svm_flags),
a7812ae4 770 tcg_const_i32(next_eip - cur_eip));
f115e911
FB
771 }
772}
773
2c0262af
FB
774static inline void gen_movs(DisasContext *s, int ot)
775{
776 gen_string_movl_A0_ESI(s);
909be183 777 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
2c0262af 778 gen_string_movl_A0_EDI(s);
fd8ca9f6 779 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
6e0d8677
FB
780 gen_op_movl_T0_Dshift(ot);
781 gen_op_add_reg_T0(s->aflag, R_ESI);
782 gen_op_add_reg_T0(s->aflag, R_EDI);
2c0262af
FB
783}
784
b6abf97d
FB
785static void gen_op_update1_cc(void)
786{
b6abf97d
FB
787 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
788}
789
790static void gen_op_update2_cc(void)
791{
792 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
793 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
794}
795
988c3eb0
RH
796static void gen_op_update3_cc(TCGv reg)
797{
798 tcg_gen_mov_tl(cpu_cc_src2, reg);
799 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
800 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
801}
802
b6abf97d
FB
803static inline void gen_op_testl_T0_T1_cc(void)
804{
b6abf97d
FB
805 tcg_gen_and_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
806}
807
808static void gen_op_update_neg_cc(void)
809{
b6abf97d 810 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
a3251186
RH
811 tcg_gen_neg_tl(cpu_cc_src, cpu_T[0]);
812 tcg_gen_movi_tl(cpu_cc_srcT, 0);
b6abf97d
FB
813}
814
d229edce
RH
815/* compute all eflags to cc_src */
816static void gen_compute_eflags(DisasContext *s)
8e1c85e3 817{
988c3eb0 818 TCGv zero, dst, src1, src2;
db9f2597
RH
819 int live, dead;
820
d229edce
RH
821 if (s->cc_op == CC_OP_EFLAGS) {
822 return;
823 }
436ff2d2
RH
824 if (s->cc_op == CC_OP_CLR) {
825 tcg_gen_movi_tl(cpu_cc_src, CC_Z);
826 set_cc_op(s, CC_OP_EFLAGS);
827 return;
828 }
db9f2597
RH
829
830 TCGV_UNUSED(zero);
831 dst = cpu_cc_dst;
832 src1 = cpu_cc_src;
988c3eb0 833 src2 = cpu_cc_src2;
db9f2597
RH
834
835 /* Take care to not read values that are not live. */
836 live = cc_op_live[s->cc_op] & ~USES_CC_SRCT;
988c3eb0 837 dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2);
db9f2597
RH
838 if (dead) {
839 zero = tcg_const_tl(0);
840 if (dead & USES_CC_DST) {
841 dst = zero;
842 }
843 if (dead & USES_CC_SRC) {
844 src1 = zero;
845 }
988c3eb0
RH
846 if (dead & USES_CC_SRC2) {
847 src2 = zero;
848 }
db9f2597
RH
849 }
850
773cdfcc 851 gen_update_cc_op(s);
988c3eb0 852 gen_helper_cc_compute_all(cpu_cc_src, dst, src1, src2, cpu_cc_op);
d229edce 853 set_cc_op(s, CC_OP_EFLAGS);
db9f2597
RH
854
855 if (dead) {
856 tcg_temp_free(zero);
857 }
8e1c85e3
FB
858}
859
bec93d72
RH
860typedef struct CCPrepare {
861 TCGCond cond;
862 TCGv reg;
863 TCGv reg2;
864 target_ulong imm;
865 target_ulong mask;
866 bool use_reg2;
867 bool no_setcond;
868} CCPrepare;
869
06847f1f 870/* compute eflags.C to reg */
bec93d72 871static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
06847f1f
RH
872{
873 TCGv t0, t1;
bec93d72 874 int size, shift;
06847f1f
RH
875
876 switch (s->cc_op) {
877 case CC_OP_SUBB ... CC_OP_SUBQ:
a3251186 878 /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
06847f1f
RH
879 size = s->cc_op - CC_OP_SUBB;
880 t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
881 /* If no temporary was used, be careful not to alias t1 and t0. */
882 t0 = TCGV_EQUAL(t1, cpu_cc_src) ? cpu_tmp0 : reg;
a3251186 883 tcg_gen_mov_tl(t0, cpu_cc_srcT);
06847f1f
RH
884 gen_extu(size, t0);
885 goto add_sub;
886
887 case CC_OP_ADDB ... CC_OP_ADDQ:
888 /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
889 size = s->cc_op - CC_OP_ADDB;
890 t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
891 t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
892 add_sub:
bec93d72
RH
893 return (CCPrepare) { .cond = TCG_COND_LTU, .reg = t0,
894 .reg2 = t1, .mask = -1, .use_reg2 = true };
06847f1f 895
06847f1f 896 case CC_OP_LOGICB ... CC_OP_LOGICQ:
436ff2d2 897 case CC_OP_CLR:
bec93d72 898 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
06847f1f
RH
899
900 case CC_OP_INCB ... CC_OP_INCQ:
901 case CC_OP_DECB ... CC_OP_DECQ:
bec93d72
RH
902 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
903 .mask = -1, .no_setcond = true };
06847f1f
RH
904
905 case CC_OP_SHLB ... CC_OP_SHLQ:
906 /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
907 size = s->cc_op - CC_OP_SHLB;
bec93d72
RH
908 shift = (8 << size) - 1;
909 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
910 .mask = (target_ulong)1 << shift };
06847f1f
RH
911
912 case CC_OP_MULB ... CC_OP_MULQ:
bec93d72
RH
913 return (CCPrepare) { .cond = TCG_COND_NE,
914 .reg = cpu_cc_src, .mask = -1 };
06847f1f 915
bc4b43dc
RH
916 case CC_OP_BMILGB ... CC_OP_BMILGQ:
917 size = s->cc_op - CC_OP_BMILGB;
918 t0 = gen_ext_tl(reg, cpu_cc_src, size, false);
919 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
920
cd7f97ca
RH
921 case CC_OP_ADCX:
922 case CC_OP_ADCOX:
923 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_dst,
924 .mask = -1, .no_setcond = true };
925
06847f1f
RH
926 case CC_OP_EFLAGS:
927 case CC_OP_SARB ... CC_OP_SARQ:
928 /* CC_SRC & 1 */
bec93d72
RH
929 return (CCPrepare) { .cond = TCG_COND_NE,
930 .reg = cpu_cc_src, .mask = CC_C };
06847f1f
RH
931
932 default:
933 /* The need to compute only C from CC_OP_DYNAMIC is important
934 in efficiently implementing e.g. INC at the start of a TB. */
935 gen_update_cc_op(s);
988c3eb0
RH
936 gen_helper_cc_compute_c(reg, cpu_cc_dst, cpu_cc_src,
937 cpu_cc_src2, cpu_cc_op);
bec93d72
RH
938 return (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
939 .mask = -1, .no_setcond = true };
06847f1f
RH
940 }
941}
942
1608ecca 943/* compute eflags.P to reg */
bec93d72 944static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGv reg)
1608ecca 945{
d229edce 946 gen_compute_eflags(s);
bec93d72
RH
947 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
948 .mask = CC_P };
1608ecca
PB
949}
950
951/* compute eflags.S to reg */
bec93d72 952static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg)
1608ecca 953{
086c4077
RH
954 switch (s->cc_op) {
955 case CC_OP_DYNAMIC:
956 gen_compute_eflags(s);
957 /* FALLTHRU */
958 case CC_OP_EFLAGS:
cd7f97ca
RH
959 case CC_OP_ADCX:
960 case CC_OP_ADOX:
961 case CC_OP_ADCOX:
bec93d72
RH
962 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
963 .mask = CC_S };
436ff2d2
RH
964 case CC_OP_CLR:
965 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
086c4077
RH
966 default:
967 {
968 int size = (s->cc_op - CC_OP_ADDB) & 3;
969 TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, true);
bec93d72 970 return (CCPrepare) { .cond = TCG_COND_LT, .reg = t0, .mask = -1 };
086c4077 971 }
086c4077 972 }
1608ecca
PB
973}
974
975/* compute eflags.O to reg */
bec93d72 976static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg)
1608ecca 977{
cd7f97ca
RH
978 switch (s->cc_op) {
979 case CC_OP_ADOX:
980 case CC_OP_ADCOX:
981 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src2,
982 .mask = -1, .no_setcond = true };
436ff2d2
RH
983 case CC_OP_CLR:
984 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
cd7f97ca
RH
985 default:
986 gen_compute_eflags(s);
987 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
988 .mask = CC_O };
989 }
1608ecca
PB
990}
991
992/* compute eflags.Z to reg */
bec93d72 993static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
1608ecca 994{
086c4077
RH
995 switch (s->cc_op) {
996 case CC_OP_DYNAMIC:
997 gen_compute_eflags(s);
998 /* FALLTHRU */
999 case CC_OP_EFLAGS:
cd7f97ca
RH
1000 case CC_OP_ADCX:
1001 case CC_OP_ADOX:
1002 case CC_OP_ADCOX:
bec93d72
RH
1003 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1004 .mask = CC_Z };
436ff2d2
RH
1005 case CC_OP_CLR:
1006 return (CCPrepare) { .cond = TCG_COND_ALWAYS, .mask = -1 };
086c4077
RH
1007 default:
1008 {
1009 int size = (s->cc_op - CC_OP_ADDB) & 3;
1010 TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
bec93d72 1011 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
086c4077 1012 }
bec93d72
RH
1013 }
1014}
1015
c365395e
PB
1016/* perform a conditional store into register 'reg' according to jump opcode
1017 value 'b'. In the fast case, T0 is guaranted not to be used. */
276e6b5f 1018static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
8e1c85e3 1019{
c365395e 1020 int inv, jcc_op, size, cond;
276e6b5f 1021 CCPrepare cc;
c365395e
PB
1022 TCGv t0;
1023
1024 inv = b & 1;
8e1c85e3 1025 jcc_op = (b >> 1) & 7;
c365395e
PB
1026
1027 switch (s->cc_op) {
69d1aa31
RH
1028 case CC_OP_SUBB ... CC_OP_SUBQ:
1029 /* We optimize relational operators for the cmp/jcc case. */
c365395e
PB
1030 size = s->cc_op - CC_OP_SUBB;
1031 switch (jcc_op) {
1032 case JCC_BE:
a3251186 1033 tcg_gen_mov_tl(cpu_tmp4, cpu_cc_srcT);
c365395e
PB
1034 gen_extu(size, cpu_tmp4);
1035 t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
276e6b5f
RH
1036 cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = cpu_tmp4,
1037 .reg2 = t0, .mask = -1, .use_reg2 = true };
c365395e 1038 break;
8e1c85e3 1039
c365395e 1040 case JCC_L:
276e6b5f 1041 cond = TCG_COND_LT;
c365395e
PB
1042 goto fast_jcc_l;
1043 case JCC_LE:
276e6b5f 1044 cond = TCG_COND_LE;
c365395e 1045 fast_jcc_l:
a3251186 1046 tcg_gen_mov_tl(cpu_tmp4, cpu_cc_srcT);
c365395e
PB
1047 gen_exts(size, cpu_tmp4);
1048 t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, true);
276e6b5f
RH
1049 cc = (CCPrepare) { .cond = cond, .reg = cpu_tmp4,
1050 .reg2 = t0, .mask = -1, .use_reg2 = true };
c365395e 1051 break;
8e1c85e3 1052
c365395e 1053 default:
8e1c85e3 1054 goto slow_jcc;
c365395e 1055 }
8e1c85e3 1056 break;
c365395e 1057
8e1c85e3
FB
1058 default:
1059 slow_jcc:
69d1aa31
RH
1060 /* This actually generates good code for JC, JZ and JS. */
1061 switch (jcc_op) {
1062 case JCC_O:
1063 cc = gen_prepare_eflags_o(s, reg);
1064 break;
1065 case JCC_B:
1066 cc = gen_prepare_eflags_c(s, reg);
1067 break;
1068 case JCC_Z:
1069 cc = gen_prepare_eflags_z(s, reg);
1070 break;
1071 case JCC_BE:
1072 gen_compute_eflags(s);
1073 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1074 .mask = CC_Z | CC_C };
1075 break;
1076 case JCC_S:
1077 cc = gen_prepare_eflags_s(s, reg);
1078 break;
1079 case JCC_P:
1080 cc = gen_prepare_eflags_p(s, reg);
1081 break;
1082 case JCC_L:
1083 gen_compute_eflags(s);
1084 if (TCGV_EQUAL(reg, cpu_cc_src)) {
1085 reg = cpu_tmp0;
1086 }
1087 tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
1088 tcg_gen_xor_tl(reg, reg, cpu_cc_src);
1089 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1090 .mask = CC_S };
1091 break;
1092 default:
1093 case JCC_LE:
1094 gen_compute_eflags(s);
1095 if (TCGV_EQUAL(reg, cpu_cc_src)) {
1096 reg = cpu_tmp0;
1097 }
1098 tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
1099 tcg_gen_xor_tl(reg, reg, cpu_cc_src);
1100 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1101 .mask = CC_S | CC_Z };
1102 break;
1103 }
c365395e 1104 break;
8e1c85e3 1105 }
276e6b5f
RH
1106
1107 if (inv) {
1108 cc.cond = tcg_invert_cond(cc.cond);
1109 }
1110 return cc;
8e1c85e3
FB
1111}
1112
cc8b6f5b
PB
1113static void gen_setcc1(DisasContext *s, int b, TCGv reg)
1114{
1115 CCPrepare cc = gen_prepare_cc(s, b, reg);
1116
1117 if (cc.no_setcond) {
1118 if (cc.cond == TCG_COND_EQ) {
1119 tcg_gen_xori_tl(reg, cc.reg, 1);
1120 } else {
1121 tcg_gen_mov_tl(reg, cc.reg);
1122 }
1123 return;
1124 }
1125
1126 if (cc.cond == TCG_COND_NE && !cc.use_reg2 && cc.imm == 0 &&
1127 cc.mask != 0 && (cc.mask & (cc.mask - 1)) == 0) {
1128 tcg_gen_shri_tl(reg, cc.reg, ctztl(cc.mask));
1129 tcg_gen_andi_tl(reg, reg, 1);
1130 return;
1131 }
1132 if (cc.mask != -1) {
1133 tcg_gen_andi_tl(reg, cc.reg, cc.mask);
1134 cc.reg = reg;
1135 }
1136 if (cc.use_reg2) {
1137 tcg_gen_setcond_tl(cc.cond, reg, cc.reg, cc.reg2);
1138 } else {
1139 tcg_gen_setcondi_tl(cc.cond, reg, cc.reg, cc.imm);
1140 }
1141}
1142
1143static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg)
1144{
1145 gen_setcc1(s, JCC_B << 1, reg);
1146}
276e6b5f 1147
8e1c85e3
FB
1148/* generate a conditional jump to label 'l1' according to jump opcode
1149 value 'b'. In the fast case, T0 is guaranted not to be used. */
dc259201
RH
1150static inline void gen_jcc1_noeob(DisasContext *s, int b, int l1)
1151{
1152 CCPrepare cc = gen_prepare_cc(s, b, cpu_T[0]);
1153
1154 if (cc.mask != -1) {
1155 tcg_gen_andi_tl(cpu_T[0], cc.reg, cc.mask);
1156 cc.reg = cpu_T[0];
1157 }
1158 if (cc.use_reg2) {
1159 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1160 } else {
1161 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1162 }
1163}
1164
1165/* Generate a conditional jump to label 'l1' according to jump opcode
1166 value 'b'. In the fast case, T0 is guaranted not to be used.
1167 A translation block must end soon. */
b27fc131 1168static inline void gen_jcc1(DisasContext *s, int b, int l1)
8e1c85e3 1169{
943131ca 1170 CCPrepare cc = gen_prepare_cc(s, b, cpu_T[0]);
8e1c85e3 1171
dc259201 1172 gen_update_cc_op(s);
943131ca
PB
1173 if (cc.mask != -1) {
1174 tcg_gen_andi_tl(cpu_T[0], cc.reg, cc.mask);
1175 cc.reg = cpu_T[0];
1176 }
dc259201 1177 set_cc_op(s, CC_OP_DYNAMIC);
943131ca
PB
1178 if (cc.use_reg2) {
1179 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1180 } else {
1181 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
8e1c85e3
FB
1182 }
1183}
1184
14ce26e7
FB
1185/* XXX: does not work with gdbstub "ice" single step - not a
1186 serious problem */
1187static int gen_jz_ecx_string(DisasContext *s, target_ulong next_eip)
2c0262af 1188{
14ce26e7
FB
1189 int l1, l2;
1190
1191 l1 = gen_new_label();
1192 l2 = gen_new_label();
6e0d8677 1193 gen_op_jnz_ecx(s->aflag, l1);
14ce26e7
FB
1194 gen_set_label(l2);
1195 gen_jmp_tb(s, next_eip, 1);
1196 gen_set_label(l1);
1197 return l2;
2c0262af
FB
1198}
1199
1200static inline void gen_stos(DisasContext *s, int ot)
1201{
4ba9938c 1202 gen_op_mov_TN_reg(MO_32, 0, R_EAX);
2c0262af 1203 gen_string_movl_A0_EDI(s);
fd8ca9f6 1204 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
6e0d8677
FB
1205 gen_op_movl_T0_Dshift(ot);
1206 gen_op_add_reg_T0(s->aflag, R_EDI);
2c0262af
FB
1207}
1208
1209static inline void gen_lods(DisasContext *s, int ot)
1210{
1211 gen_string_movl_A0_ESI(s);
909be183 1212 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
57fec1fe 1213 gen_op_mov_reg_T0(ot, R_EAX);
6e0d8677
FB
1214 gen_op_movl_T0_Dshift(ot);
1215 gen_op_add_reg_T0(s->aflag, R_ESI);
2c0262af
FB
1216}
1217
1218static inline void gen_scas(DisasContext *s, int ot)
1219{
2c0262af 1220 gen_string_movl_A0_EDI(s);
0f712e10 1221 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
63633fe6 1222 gen_op(s, OP_CMPL, ot, R_EAX);
6e0d8677
FB
1223 gen_op_movl_T0_Dshift(ot);
1224 gen_op_add_reg_T0(s->aflag, R_EDI);
2c0262af
FB
1225}
1226
1227static inline void gen_cmps(DisasContext *s, int ot)
1228{
2c0262af 1229 gen_string_movl_A0_EDI(s);
0f712e10 1230 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
63633fe6
RH
1231 gen_string_movl_A0_ESI(s);
1232 gen_op(s, OP_CMPL, ot, OR_TMP0);
6e0d8677
FB
1233 gen_op_movl_T0_Dshift(ot);
1234 gen_op_add_reg_T0(s->aflag, R_ESI);
1235 gen_op_add_reg_T0(s->aflag, R_EDI);
2c0262af
FB
1236}
1237
1238static inline void gen_ins(DisasContext *s, int ot)
1239{
2e70f6ef
PB
1240 if (use_icount)
1241 gen_io_start();
2c0262af 1242 gen_string_movl_A0_EDI(s);
6e0d8677
FB
1243 /* Note: we must do this dummy write first to be restartable in
1244 case of page fault. */
97212c88 1245 tcg_gen_movi_tl(cpu_T[0], 0);
fd8ca9f6 1246 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
24b9c00f 1247 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_EDX]);
b6abf97d 1248 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
a7812ae4 1249 gen_helper_in_func(ot, cpu_T[0], cpu_tmp2_i32);
fd8ca9f6 1250 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
6e0d8677
FB
1251 gen_op_movl_T0_Dshift(ot);
1252 gen_op_add_reg_T0(s->aflag, R_EDI);
2e70f6ef
PB
1253 if (use_icount)
1254 gen_io_end();
2c0262af
FB
1255}
1256
1257static inline void gen_outs(DisasContext *s, int ot)
1258{
2e70f6ef
PB
1259 if (use_icount)
1260 gen_io_start();
2c0262af 1261 gen_string_movl_A0_ESI(s);
909be183 1262 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
b8b6a50b 1263
24b9c00f 1264 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_EDX]);
b6abf97d
FB
1265 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1266 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[0]);
a7812ae4 1267 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
b8b6a50b 1268
6e0d8677
FB
1269 gen_op_movl_T0_Dshift(ot);
1270 gen_op_add_reg_T0(s->aflag, R_ESI);
2e70f6ef
PB
1271 if (use_icount)
1272 gen_io_end();
2c0262af
FB
1273}
1274
1275/* same method as Valgrind : we generate jumps to current or next
1276 instruction */
1277#define GEN_REPZ(op) \
1278static inline void gen_repz_ ## op(DisasContext *s, int ot, \
14ce26e7 1279 target_ulong cur_eip, target_ulong next_eip) \
2c0262af 1280{ \
14ce26e7 1281 int l2;\
2c0262af 1282 gen_update_cc_op(s); \
14ce26e7 1283 l2 = gen_jz_ecx_string(s, next_eip); \
2c0262af 1284 gen_ ## op(s, ot); \
6e0d8677 1285 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
2c0262af
FB
1286 /* a loop would cause two single step exceptions if ECX = 1 \
1287 before rep string_insn */ \
1288 if (!s->jmp_opt) \
6e0d8677 1289 gen_op_jz_ecx(s->aflag, l2); \
2c0262af
FB
1290 gen_jmp(s, cur_eip); \
1291}
1292
1293#define GEN_REPZ2(op) \
1294static inline void gen_repz_ ## op(DisasContext *s, int ot, \
14ce26e7
FB
1295 target_ulong cur_eip, \
1296 target_ulong next_eip, \
2c0262af
FB
1297 int nz) \
1298{ \
14ce26e7 1299 int l2;\
2c0262af 1300 gen_update_cc_op(s); \
14ce26e7 1301 l2 = gen_jz_ecx_string(s, next_eip); \
2c0262af 1302 gen_ ## op(s, ot); \
6e0d8677 1303 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
773cdfcc 1304 gen_update_cc_op(s); \
b27fc131 1305 gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2); \
2c0262af 1306 if (!s->jmp_opt) \
6e0d8677 1307 gen_op_jz_ecx(s->aflag, l2); \
2c0262af
FB
1308 gen_jmp(s, cur_eip); \
1309}
1310
1311GEN_REPZ(movs)
1312GEN_REPZ(stos)
1313GEN_REPZ(lods)
1314GEN_REPZ(ins)
1315GEN_REPZ(outs)
1316GEN_REPZ2(scas)
1317GEN_REPZ2(cmps)
1318
a7812ae4
PB
1319static void gen_helper_fp_arith_ST0_FT0(int op)
1320{
1321 switch (op) {
d3eb5eae
BS
1322 case 0:
1323 gen_helper_fadd_ST0_FT0(cpu_env);
1324 break;
1325 case 1:
1326 gen_helper_fmul_ST0_FT0(cpu_env);
1327 break;
1328 case 2:
1329 gen_helper_fcom_ST0_FT0(cpu_env);
1330 break;
1331 case 3:
1332 gen_helper_fcom_ST0_FT0(cpu_env);
1333 break;
1334 case 4:
1335 gen_helper_fsub_ST0_FT0(cpu_env);
1336 break;
1337 case 5:
1338 gen_helper_fsubr_ST0_FT0(cpu_env);
1339 break;
1340 case 6:
1341 gen_helper_fdiv_ST0_FT0(cpu_env);
1342 break;
1343 case 7:
1344 gen_helper_fdivr_ST0_FT0(cpu_env);
1345 break;
a7812ae4
PB
1346 }
1347}
2c0262af
FB
1348
1349/* NOTE the exception in "r" op ordering */
a7812ae4
PB
1350static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
1351{
1352 TCGv_i32 tmp = tcg_const_i32(opreg);
1353 switch (op) {
d3eb5eae
BS
1354 case 0:
1355 gen_helper_fadd_STN_ST0(cpu_env, tmp);
1356 break;
1357 case 1:
1358 gen_helper_fmul_STN_ST0(cpu_env, tmp);
1359 break;
1360 case 4:
1361 gen_helper_fsubr_STN_ST0(cpu_env, tmp);
1362 break;
1363 case 5:
1364 gen_helper_fsub_STN_ST0(cpu_env, tmp);
1365 break;
1366 case 6:
1367 gen_helper_fdivr_STN_ST0(cpu_env, tmp);
1368 break;
1369 case 7:
1370 gen_helper_fdiv_STN_ST0(cpu_env, tmp);
1371 break;
a7812ae4
PB
1372 }
1373}
2c0262af
FB
1374
1375/* if d == OR_TMP0, it means memory operand (address in A0) */
1376static void gen_op(DisasContext *s1, int op, int ot, int d)
1377{
2c0262af 1378 if (d != OR_TMP0) {
57fec1fe 1379 gen_op_mov_TN_reg(ot, 0, d);
2c0262af 1380 } else {
909be183 1381 gen_op_ld_v(s1, ot, cpu_T[0], cpu_A0);
2c0262af
FB
1382 }
1383 switch(op) {
1384 case OP_ADCL:
cc8b6f5b 1385 gen_compute_eflags_c(s1, cpu_tmp4);
cad3a37d
FB
1386 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1387 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
d4faa3e0 1388 gen_op_st_rm_T0_A0(s1, ot, d);
988c3eb0
RH
1389 gen_op_update3_cc(cpu_tmp4);
1390 set_cc_op(s1, CC_OP_ADCB + ot);
cad3a37d 1391 break;
2c0262af 1392 case OP_SBBL:
cc8b6f5b 1393 gen_compute_eflags_c(s1, cpu_tmp4);
cad3a37d
FB
1394 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1395 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
d4faa3e0 1396 gen_op_st_rm_T0_A0(s1, ot, d);
988c3eb0
RH
1397 gen_op_update3_cc(cpu_tmp4);
1398 set_cc_op(s1, CC_OP_SBBB + ot);
cad3a37d 1399 break;
2c0262af
FB
1400 case OP_ADDL:
1401 gen_op_addl_T0_T1();
d4faa3e0 1402 gen_op_st_rm_T0_A0(s1, ot, d);
cad3a37d 1403 gen_op_update2_cc();
3ca51d07 1404 set_cc_op(s1, CC_OP_ADDB + ot);
2c0262af
FB
1405 break;
1406 case OP_SUBL:
a3251186 1407 tcg_gen_mov_tl(cpu_cc_srcT, cpu_T[0]);
57fec1fe 1408 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
d4faa3e0 1409 gen_op_st_rm_T0_A0(s1, ot, d);
cad3a37d 1410 gen_op_update2_cc();
3ca51d07 1411 set_cc_op(s1, CC_OP_SUBB + ot);
2c0262af
FB
1412 break;
1413 default:
1414 case OP_ANDL:
57fec1fe 1415 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
d4faa3e0 1416 gen_op_st_rm_T0_A0(s1, ot, d);
cad3a37d 1417 gen_op_update1_cc();
3ca51d07 1418 set_cc_op(s1, CC_OP_LOGICB + ot);
57fec1fe 1419 break;
2c0262af 1420 case OP_ORL:
57fec1fe 1421 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
d4faa3e0 1422 gen_op_st_rm_T0_A0(s1, ot, d);
cad3a37d 1423 gen_op_update1_cc();
3ca51d07 1424 set_cc_op(s1, CC_OP_LOGICB + ot);
57fec1fe 1425 break;
2c0262af 1426 case OP_XORL:
57fec1fe 1427 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
d4faa3e0 1428 gen_op_st_rm_T0_A0(s1, ot, d);
cad3a37d 1429 gen_op_update1_cc();
3ca51d07 1430 set_cc_op(s1, CC_OP_LOGICB + ot);
2c0262af
FB
1431 break;
1432 case OP_CMPL:
63633fe6 1433 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
a3251186 1434 tcg_gen_mov_tl(cpu_cc_srcT, cpu_T[0]);
63633fe6 1435 tcg_gen_sub_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
3ca51d07 1436 set_cc_op(s1, CC_OP_SUBB + ot);
2c0262af
FB
1437 break;
1438 }
b6abf97d
FB
1439}
1440
2c0262af
FB
1441/* if d == OR_TMP0, it means memory operand (address in A0) */
1442static void gen_inc(DisasContext *s1, int ot, int d, int c)
1443{
909be183 1444 if (d != OR_TMP0) {
57fec1fe 1445 gen_op_mov_TN_reg(ot, 0, d);
909be183
RH
1446 } else {
1447 gen_op_ld_v(s1, ot, cpu_T[0], cpu_A0);
1448 }
cc8b6f5b 1449 gen_compute_eflags_c(s1, cpu_cc_src);
2c0262af 1450 if (c > 0) {
b6abf97d 1451 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], 1);
3ca51d07 1452 set_cc_op(s1, CC_OP_INCB + ot);
2c0262af 1453 } else {
b6abf97d 1454 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], -1);
3ca51d07 1455 set_cc_op(s1, CC_OP_DECB + ot);
2c0262af 1456 }
d4faa3e0 1457 gen_op_st_rm_T0_A0(s1, ot, d);
cd31fefa 1458 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
2c0262af
FB
1459}
1460
f437d0a3
RH
1461static void gen_shift_flags(DisasContext *s, int ot, TCGv result, TCGv shm1,
1462 TCGv count, bool is_right)
1463{
1464 TCGv_i32 z32, s32, oldop;
1465 TCGv z_tl;
1466
1467 /* Store the results into the CC variables. If we know that the
1468 variable must be dead, store unconditionally. Otherwise we'll
1469 need to not disrupt the current contents. */
1470 z_tl = tcg_const_tl(0);
1471 if (cc_op_live[s->cc_op] & USES_CC_DST) {
1472 tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_dst, count, z_tl,
1473 result, cpu_cc_dst);
1474 } else {
1475 tcg_gen_mov_tl(cpu_cc_dst, result);
1476 }
1477 if (cc_op_live[s->cc_op] & USES_CC_SRC) {
1478 tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_src, count, z_tl,
1479 shm1, cpu_cc_src);
1480 } else {
1481 tcg_gen_mov_tl(cpu_cc_src, shm1);
1482 }
1483 tcg_temp_free(z_tl);
1484
1485 /* Get the two potential CC_OP values into temporaries. */
1486 tcg_gen_movi_i32(cpu_tmp2_i32, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
1487 if (s->cc_op == CC_OP_DYNAMIC) {
1488 oldop = cpu_cc_op;
1489 } else {
1490 tcg_gen_movi_i32(cpu_tmp3_i32, s->cc_op);
1491 oldop = cpu_tmp3_i32;
1492 }
1493
1494 /* Conditionally store the CC_OP value. */
1495 z32 = tcg_const_i32(0);
1496 s32 = tcg_temp_new_i32();
1497 tcg_gen_trunc_tl_i32(s32, count);
1498 tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, s32, z32, cpu_tmp2_i32, oldop);
1499 tcg_temp_free_i32(z32);
1500 tcg_temp_free_i32(s32);
1501
1502 /* The CC_OP value is no longer predictable. */
1503 set_cc_op(s, CC_OP_DYNAMIC);
1504}
1505
b6abf97d
FB
1506static void gen_shift_rm_T1(DisasContext *s, int ot, int op1,
1507 int is_right, int is_arith)
2c0262af 1508{
4ba9938c 1509 target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
3b46e624 1510
b6abf97d 1511 /* load */
82786041 1512 if (op1 == OR_TMP0) {
909be183 1513 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
82786041 1514 } else {
b6abf97d 1515 gen_op_mov_TN_reg(ot, 0, op1);
82786041 1516 }
b6abf97d 1517
a41f62f5
RH
1518 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], mask);
1519 tcg_gen_subi_tl(cpu_tmp0, cpu_T[1], 1);
b6abf97d
FB
1520
1521 if (is_right) {
1522 if (is_arith) {
f484d386 1523 gen_exts(ot, cpu_T[0]);
a41f62f5
RH
1524 tcg_gen_sar_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
1525 tcg_gen_sar_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
b6abf97d 1526 } else {
cad3a37d 1527 gen_extu(ot, cpu_T[0]);
a41f62f5
RH
1528 tcg_gen_shr_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
1529 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
b6abf97d
FB
1530 }
1531 } else {
a41f62f5
RH
1532 tcg_gen_shl_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
1533 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
b6abf97d
FB
1534 }
1535
1536 /* store */
d4faa3e0 1537 gen_op_st_rm_T0_A0(s, ot, op1);
82786041 1538
f437d0a3 1539 gen_shift_flags(s, ot, cpu_T[0], cpu_tmp0, cpu_T[1], is_right);
b6abf97d
FB
1540}
1541
c1c37968
FB
1542static void gen_shift_rm_im(DisasContext *s, int ot, int op1, int op2,
1543 int is_right, int is_arith)
1544{
4ba9938c 1545 int mask = (ot == MO_64 ? 0x3f : 0x1f);
c1c37968
FB
1546
1547 /* load */
1548 if (op1 == OR_TMP0)
909be183 1549 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
c1c37968
FB
1550 else
1551 gen_op_mov_TN_reg(ot, 0, op1);
1552
1553 op2 &= mask;
1554 if (op2 != 0) {
1555 if (is_right) {
1556 if (is_arith) {
1557 gen_exts(ot, cpu_T[0]);
2a449d14 1558 tcg_gen_sari_tl(cpu_tmp4, cpu_T[0], op2 - 1);
c1c37968
FB
1559 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], op2);
1560 } else {
1561 gen_extu(ot, cpu_T[0]);
2a449d14 1562 tcg_gen_shri_tl(cpu_tmp4, cpu_T[0], op2 - 1);
c1c37968
FB
1563 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], op2);
1564 }
1565 } else {
2a449d14 1566 tcg_gen_shli_tl(cpu_tmp4, cpu_T[0], op2 - 1);
c1c37968
FB
1567 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], op2);
1568 }
1569 }
1570
1571 /* store */
d4faa3e0
RH
1572 gen_op_st_rm_T0_A0(s, ot, op1);
1573
c1c37968
FB
1574 /* update eflags if non zero shift */
1575 if (op2 != 0) {
2a449d14 1576 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
c1c37968 1577 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
3ca51d07 1578 set_cc_op(s, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
c1c37968
FB
1579 }
1580}
1581
b6abf97d
FB
1582static inline void tcg_gen_lshift(TCGv ret, TCGv arg1, target_long arg2)
1583{
1584 if (arg2 >= 0)
1585 tcg_gen_shli_tl(ret, arg1, arg2);
1586 else
1587 tcg_gen_shri_tl(ret, arg1, -arg2);
1588}
1589
34d80a55 1590static void gen_rot_rm_T1(DisasContext *s, int ot, int op1, int is_right)
b6abf97d 1591{
4ba9938c 1592 target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
34d80a55 1593 TCGv_i32 t0, t1;
b6abf97d
FB
1594
1595 /* load */
1e4840bf 1596 if (op1 == OR_TMP0) {
909be183 1597 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1e4840bf 1598 } else {
34d80a55 1599 gen_op_mov_TN_reg(ot, 0, op1);
1e4840bf 1600 }
b6abf97d 1601
34d80a55 1602 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], mask);
b6abf97d 1603
34d80a55 1604 switch (ot) {
4ba9938c 1605 case MO_8:
34d80a55
RH
1606 /* Replicate the 8-bit input so that a 32-bit rotate works. */
1607 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
1608 tcg_gen_muli_tl(cpu_T[0], cpu_T[0], 0x01010101);
1609 goto do_long;
4ba9938c 1610 case MO_16:
34d80a55
RH
1611 /* Replicate the 16-bit input so that a 32-bit rotate works. */
1612 tcg_gen_deposit_tl(cpu_T[0], cpu_T[0], cpu_T[0], 16, 16);
1613 goto do_long;
1614 do_long:
1615#ifdef TARGET_X86_64
4ba9938c 1616 case MO_32:
34d80a55
RH
1617 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
1618 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
1619 if (is_right) {
1620 tcg_gen_rotr_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
1621 } else {
1622 tcg_gen_rotl_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
1623 }
1624 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
1625 break;
1626#endif
1627 default:
1628 if (is_right) {
1629 tcg_gen_rotr_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1630 } else {
1631 tcg_gen_rotl_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1632 }
1633 break;
b6abf97d 1634 }
b6abf97d 1635
b6abf97d 1636 /* store */
d4faa3e0 1637 gen_op_st_rm_T0_A0(s, ot, op1);
b6abf97d 1638
34d80a55
RH
1639 /* We'll need the flags computed into CC_SRC. */
1640 gen_compute_eflags(s);
b6abf97d 1641
34d80a55
RH
1642 /* The value that was "rotated out" is now present at the other end
1643 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1644 since we've computed the flags into CC_SRC, these variables are
1645 currently dead. */
b6abf97d 1646 if (is_right) {
34d80a55
RH
1647 tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask - 1);
1648 tcg_gen_shri_tl(cpu_cc_dst, cpu_T[0], mask);
089305ac 1649 tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
34d80a55
RH
1650 } else {
1651 tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask);
1652 tcg_gen_andi_tl(cpu_cc_dst, cpu_T[0], 1);
b6abf97d 1653 }
34d80a55
RH
1654 tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1655 tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1656
1657 /* Now conditionally store the new CC_OP value. If the shift count
1658 is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live.
1659 Otherwise reuse CC_OP_ADCOX which have the C and O flags split out
1660 exactly as we computed above. */
1661 t0 = tcg_const_i32(0);
1662 t1 = tcg_temp_new_i32();
1663 tcg_gen_trunc_tl_i32(t1, cpu_T[1]);
1664 tcg_gen_movi_i32(cpu_tmp2_i32, CC_OP_ADCOX);
1665 tcg_gen_movi_i32(cpu_tmp3_i32, CC_OP_EFLAGS);
1666 tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, t1, t0,
1667 cpu_tmp2_i32, cpu_tmp3_i32);
1668 tcg_temp_free_i32(t0);
1669 tcg_temp_free_i32(t1);
1670
1671 /* The CC_OP value is no longer predictable. */
1672 set_cc_op(s, CC_OP_DYNAMIC);
b6abf97d
FB
1673}
1674
8cd6345d 1675static void gen_rot_rm_im(DisasContext *s, int ot, int op1, int op2,
1676 int is_right)
1677{
4ba9938c 1678 int mask = (ot == MO_64 ? 0x3f : 0x1f);
34d80a55 1679 int shift;
8cd6345d 1680
1681 /* load */
1682 if (op1 == OR_TMP0) {
909be183 1683 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
8cd6345d 1684 } else {
34d80a55 1685 gen_op_mov_TN_reg(ot, 0, op1);
8cd6345d 1686 }
1687
8cd6345d 1688 op2 &= mask;
8cd6345d 1689 if (op2 != 0) {
34d80a55
RH
1690 switch (ot) {
1691#ifdef TARGET_X86_64
4ba9938c 1692 case MO_32:
34d80a55
RH
1693 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
1694 if (is_right) {
1695 tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, op2);
1696 } else {
1697 tcg_gen_rotli_i32(cpu_tmp2_i32, cpu_tmp2_i32, op2);
1698 }
1699 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
1700 break;
1701#endif
1702 default:
1703 if (is_right) {
1704 tcg_gen_rotri_tl(cpu_T[0], cpu_T[0], op2);
1705 } else {
1706 tcg_gen_rotli_tl(cpu_T[0], cpu_T[0], op2);
1707 }
1708 break;
4ba9938c 1709 case MO_8:
34d80a55
RH
1710 mask = 7;
1711 goto do_shifts;
4ba9938c 1712 case MO_16:
34d80a55
RH
1713 mask = 15;
1714 do_shifts:
1715 shift = op2 & mask;
1716 if (is_right) {
1717 shift = mask + 1 - shift;
1718 }
1719 gen_extu(ot, cpu_T[0]);
1720 tcg_gen_shli_tl(cpu_tmp0, cpu_T[0], shift);
1721 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], mask + 1 - shift);
1722 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
1723 break;
8cd6345d 1724 }
8cd6345d 1725 }
1726
1727 /* store */
d4faa3e0 1728 gen_op_st_rm_T0_A0(s, ot, op1);
8cd6345d 1729
1730 if (op2 != 0) {
34d80a55 1731 /* Compute the flags into CC_SRC. */
d229edce 1732 gen_compute_eflags(s);
0ff6addd 1733
34d80a55
RH
1734 /* The value that was "rotated out" is now present at the other end
1735 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1736 since we've computed the flags into CC_SRC, these variables are
1737 currently dead. */
8cd6345d 1738 if (is_right) {
34d80a55
RH
1739 tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask - 1);
1740 tcg_gen_shri_tl(cpu_cc_dst, cpu_T[0], mask);
38ebb396 1741 tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
34d80a55
RH
1742 } else {
1743 tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask);
1744 tcg_gen_andi_tl(cpu_cc_dst, cpu_T[0], 1);
8cd6345d 1745 }
34d80a55
RH
1746 tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1747 tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1748 set_cc_op(s, CC_OP_ADCOX);
8cd6345d 1749 }
8cd6345d 1750}
1751
b6abf97d
FB
1752/* XXX: add faster immediate = 1 case */
1753static void gen_rotc_rm_T1(DisasContext *s, int ot, int op1,
1754 int is_right)
1755{
d229edce 1756 gen_compute_eflags(s);
c7b3c873 1757 assert(s->cc_op == CC_OP_EFLAGS);
b6abf97d
FB
1758
1759 /* load */
1760 if (op1 == OR_TMP0)
909be183 1761 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
b6abf97d
FB
1762 else
1763 gen_op_mov_TN_reg(ot, 0, op1);
1764
a7812ae4
PB
1765 if (is_right) {
1766 switch (ot) {
4ba9938c 1767 case MO_8:
7923057b
BS
1768 gen_helper_rcrb(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1769 break;
4ba9938c 1770 case MO_16:
7923057b
BS
1771 gen_helper_rcrw(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1772 break;
4ba9938c 1773 case MO_32:
7923057b
BS
1774 gen_helper_rcrl(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1775 break;
a7812ae4 1776#ifdef TARGET_X86_64
4ba9938c 1777 case MO_64:
7923057b
BS
1778 gen_helper_rcrq(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1779 break;
a7812ae4
PB
1780#endif
1781 }
1782 } else {
1783 switch (ot) {
4ba9938c 1784 case MO_8:
7923057b
BS
1785 gen_helper_rclb(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1786 break;
4ba9938c 1787 case MO_16:
7923057b
BS
1788 gen_helper_rclw(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1789 break;
4ba9938c 1790 case MO_32:
7923057b
BS
1791 gen_helper_rcll(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1792 break;
a7812ae4 1793#ifdef TARGET_X86_64
4ba9938c 1794 case MO_64:
7923057b
BS
1795 gen_helper_rclq(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1796 break;
a7812ae4
PB
1797#endif
1798 }
1799 }
b6abf97d 1800 /* store */
d4faa3e0 1801 gen_op_st_rm_T0_A0(s, ot, op1);
b6abf97d
FB
1802}
1803
1804/* XXX: add faster immediate case */
3b9d3cf1 1805static void gen_shiftd_rm_T1(DisasContext *s, int ot, int op1,
f437d0a3 1806 bool is_right, TCGv count_in)
b6abf97d 1807{
4ba9938c 1808 target_ulong mask = (ot == MO_64 ? 63 : 31);
f437d0a3 1809 TCGv count;
b6abf97d
FB
1810
1811 /* load */
1e4840bf 1812 if (op1 == OR_TMP0) {
909be183 1813 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1e4840bf 1814 } else {
f437d0a3 1815 gen_op_mov_TN_reg(ot, 0, op1);
1e4840bf 1816 }
b6abf97d 1817
f437d0a3
RH
1818 count = tcg_temp_new();
1819 tcg_gen_andi_tl(count, count_in, mask);
1e4840bf 1820
f437d0a3 1821 switch (ot) {
4ba9938c 1822 case MO_16:
f437d0a3
RH
1823 /* Note: we implement the Intel behaviour for shift count > 16.
1824 This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A
1825 portion by constructing it as a 32-bit value. */
b6abf97d 1826 if (is_right) {
f437d0a3
RH
1827 tcg_gen_deposit_tl(cpu_tmp0, cpu_T[0], cpu_T[1], 16, 16);
1828 tcg_gen_mov_tl(cpu_T[1], cpu_T[0]);
1829 tcg_gen_mov_tl(cpu_T[0], cpu_tmp0);
b6abf97d 1830 } else {
f437d0a3 1831 tcg_gen_deposit_tl(cpu_T[1], cpu_T[0], cpu_T[1], 16, 16);
b6abf97d 1832 }
f437d0a3
RH
1833 /* FALLTHRU */
1834#ifdef TARGET_X86_64
4ba9938c 1835 case MO_32:
f437d0a3
RH
1836 /* Concatenate the two 32-bit values and use a 64-bit shift. */
1837 tcg_gen_subi_tl(cpu_tmp0, count, 1);
b6abf97d 1838 if (is_right) {
f437d0a3
RH
1839 tcg_gen_concat_tl_i64(cpu_T[0], cpu_T[0], cpu_T[1]);
1840 tcg_gen_shr_i64(cpu_tmp0, cpu_T[0], cpu_tmp0);
1841 tcg_gen_shr_i64(cpu_T[0], cpu_T[0], count);
1842 } else {
1843 tcg_gen_concat_tl_i64(cpu_T[0], cpu_T[1], cpu_T[0]);
1844 tcg_gen_shl_i64(cpu_tmp0, cpu_T[0], cpu_tmp0);
1845 tcg_gen_shl_i64(cpu_T[0], cpu_T[0], count);
1846 tcg_gen_shri_i64(cpu_tmp0, cpu_tmp0, 32);
1847 tcg_gen_shri_i64(cpu_T[0], cpu_T[0], 32);
1848 }
1849 break;
1850#endif
1851 default:
1852 tcg_gen_subi_tl(cpu_tmp0, count, 1);
1853 if (is_right) {
1854 tcg_gen_shr_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
b6abf97d 1855
f437d0a3
RH
1856 tcg_gen_subfi_tl(cpu_tmp4, mask + 1, count);
1857 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], count);
1858 tcg_gen_shl_tl(cpu_T[1], cpu_T[1], cpu_tmp4);
b6abf97d 1859 } else {
f437d0a3 1860 tcg_gen_shl_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
4ba9938c 1861 if (ot == MO_16) {
f437d0a3
RH
1862 /* Only needed if count > 16, for Intel behaviour. */
1863 tcg_gen_subfi_tl(cpu_tmp4, 33, count);
1864 tcg_gen_shr_tl(cpu_tmp4, cpu_T[1], cpu_tmp4);
1865 tcg_gen_or_tl(cpu_tmp0, cpu_tmp0, cpu_tmp4);
1866 }
1867
1868 tcg_gen_subfi_tl(cpu_tmp4, mask + 1, count);
1869 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], count);
1870 tcg_gen_shr_tl(cpu_T[1], cpu_T[1], cpu_tmp4);
b6abf97d 1871 }
f437d0a3
RH
1872 tcg_gen_movi_tl(cpu_tmp4, 0);
1873 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_T[1], count, cpu_tmp4,
1874 cpu_tmp4, cpu_T[1]);
1875 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1876 break;
b6abf97d 1877 }
b6abf97d 1878
b6abf97d 1879 /* store */
d4faa3e0 1880 gen_op_st_rm_T0_A0(s, ot, op1);
1e4840bf 1881
f437d0a3
RH
1882 gen_shift_flags(s, ot, cpu_T[0], cpu_tmp0, count, is_right);
1883 tcg_temp_free(count);
b6abf97d
FB
1884}
1885
1886static void gen_shift(DisasContext *s1, int op, int ot, int d, int s)
1887{
1888 if (s != OR_TMP1)
1889 gen_op_mov_TN_reg(ot, 1, s);
1890 switch(op) {
1891 case OP_ROL:
1892 gen_rot_rm_T1(s1, ot, d, 0);
1893 break;
1894 case OP_ROR:
1895 gen_rot_rm_T1(s1, ot, d, 1);
1896 break;
1897 case OP_SHL:
1898 case OP_SHL1:
1899 gen_shift_rm_T1(s1, ot, d, 0, 0);
1900 break;
1901 case OP_SHR:
1902 gen_shift_rm_T1(s1, ot, d, 1, 0);
1903 break;
1904 case OP_SAR:
1905 gen_shift_rm_T1(s1, ot, d, 1, 1);
1906 break;
1907 case OP_RCL:
1908 gen_rotc_rm_T1(s1, ot, d, 0);
1909 break;
1910 case OP_RCR:
1911 gen_rotc_rm_T1(s1, ot, d, 1);
1912 break;
1913 }
2c0262af
FB
1914}
1915
1916static void gen_shifti(DisasContext *s1, int op, int ot, int d, int c)
1917{
c1c37968 1918 switch(op) {
8cd6345d 1919 case OP_ROL:
1920 gen_rot_rm_im(s1, ot, d, c, 0);
1921 break;
1922 case OP_ROR:
1923 gen_rot_rm_im(s1, ot, d, c, 1);
1924 break;
c1c37968
FB
1925 case OP_SHL:
1926 case OP_SHL1:
1927 gen_shift_rm_im(s1, ot, d, c, 0, 0);
1928 break;
1929 case OP_SHR:
1930 gen_shift_rm_im(s1, ot, d, c, 1, 0);
1931 break;
1932 case OP_SAR:
1933 gen_shift_rm_im(s1, ot, d, c, 1, 1);
1934 break;
1935 default:
1936 /* currently not optimized */
1937 gen_op_movl_T1_im(c);
1938 gen_shift(s1, op, ot, d, OR_TMP1);
1939 break;
1940 }
2c0262af
FB
1941}
1942
4eeb3939 1943static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm)
2c0262af 1944{
14ce26e7 1945 target_long disp;
2c0262af 1946 int havesib;
14ce26e7 1947 int base;
2c0262af
FB
1948 int index;
1949 int scale;
2c0262af 1950 int mod, rm, code, override, must_add_seg;
7865eec4 1951 TCGv sum;
2c0262af
FB
1952
1953 override = s->override;
1954 must_add_seg = s->addseg;
1955 if (override >= 0)
1956 must_add_seg = 1;
1957 mod = (modrm >> 6) & 3;
1958 rm = modrm & 7;
1959
1960 if (s->aflag) {
2c0262af
FB
1961 havesib = 0;
1962 base = rm;
7865eec4 1963 index = -1;
2c0262af 1964 scale = 0;
3b46e624 1965
2c0262af
FB
1966 if (base == 4) {
1967 havesib = 1;
0af10c86 1968 code = cpu_ldub_code(env, s->pc++);
2c0262af 1969 scale = (code >> 6) & 3;
14ce26e7 1970 index = ((code >> 3) & 7) | REX_X(s);
7865eec4
RH
1971 if (index == 4) {
1972 index = -1; /* no index */
1973 }
14ce26e7 1974 base = (code & 7);
2c0262af 1975 }
14ce26e7 1976 base |= REX_B(s);
2c0262af
FB
1977
1978 switch (mod) {
1979 case 0:
14ce26e7 1980 if ((base & 7) == 5) {
2c0262af 1981 base = -1;
0af10c86 1982 disp = (int32_t)cpu_ldl_code(env, s->pc);
2c0262af 1983 s->pc += 4;
14ce26e7
FB
1984 if (CODE64(s) && !havesib) {
1985 disp += s->pc + s->rip_offset;
1986 }
2c0262af
FB
1987 } else {
1988 disp = 0;
1989 }
1990 break;
1991 case 1:
0af10c86 1992 disp = (int8_t)cpu_ldub_code(env, s->pc++);
2c0262af
FB
1993 break;
1994 default:
1995 case 2:
0af10c86 1996 disp = (int32_t)cpu_ldl_code(env, s->pc);
2c0262af
FB
1997 s->pc += 4;
1998 break;
1999 }
3b46e624 2000
7865eec4
RH
2001 /* For correct popl handling with esp. */
2002 if (base == R_ESP && s->popl_esp_hack) {
2003 disp += s->popl_esp_hack;
2004 }
2005
2006 /* Compute the address, with a minimum number of TCG ops. */
2007 TCGV_UNUSED(sum);
2008 if (index >= 0) {
2009 if (scale == 0) {
2010 sum = cpu_regs[index];
2011 } else {
2012 tcg_gen_shli_tl(cpu_A0, cpu_regs[index], scale);
2013 sum = cpu_A0;
14ce26e7 2014 }
7865eec4
RH
2015 if (base >= 0) {
2016 tcg_gen_add_tl(cpu_A0, sum, cpu_regs[base]);
2017 sum = cpu_A0;
14ce26e7 2018 }
7865eec4
RH
2019 } else if (base >= 0) {
2020 sum = cpu_regs[base];
2c0262af 2021 }
7865eec4
RH
2022 if (TCGV_IS_UNUSED(sum)) {
2023 tcg_gen_movi_tl(cpu_A0, disp);
2024 } else {
2025 tcg_gen_addi_tl(cpu_A0, sum, disp);
2c0262af 2026 }
7865eec4 2027
2c0262af
FB
2028 if (must_add_seg) {
2029 if (override < 0) {
7865eec4 2030 if (base == R_EBP || base == R_ESP) {
2c0262af 2031 override = R_SS;
7865eec4 2032 } else {
2c0262af 2033 override = R_DS;
7865eec4 2034 }
2c0262af 2035 }
7865eec4
RH
2036
2037 tcg_gen_ld_tl(cpu_tmp0, cpu_env,
2038 offsetof(CPUX86State, segs[override].base));
2039 if (CODE64(s)) {
2040 if (s->aflag != 2) {
2041 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
2042 }
2043 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
4eeb3939 2044 return;
14ce26e7 2045 }
7865eec4
RH
2046
2047 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
2048 }
2049
2050 if (s->aflag != 2) {
2051 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
2c0262af
FB
2052 }
2053 } else {
2054 switch (mod) {
2055 case 0:
2056 if (rm == 6) {
0af10c86 2057 disp = cpu_lduw_code(env, s->pc);
2c0262af
FB
2058 s->pc += 2;
2059 gen_op_movl_A0_im(disp);
2060 rm = 0; /* avoid SS override */
2061 goto no_rm;
2062 } else {
2063 disp = 0;
2064 }
2065 break;
2066 case 1:
0af10c86 2067 disp = (int8_t)cpu_ldub_code(env, s->pc++);
2c0262af
FB
2068 break;
2069 default:
2070 case 2:
0af10c86 2071 disp = cpu_lduw_code(env, s->pc);
2c0262af
FB
2072 s->pc += 2;
2073 break;
2074 }
2075 switch(rm) {
2076 case 0:
57fec1fe
FB
2077 gen_op_movl_A0_reg(R_EBX);
2078 gen_op_addl_A0_reg_sN(0, R_ESI);
2c0262af
FB
2079 break;
2080 case 1:
57fec1fe
FB
2081 gen_op_movl_A0_reg(R_EBX);
2082 gen_op_addl_A0_reg_sN(0, R_EDI);
2c0262af
FB
2083 break;
2084 case 2:
57fec1fe
FB
2085 gen_op_movl_A0_reg(R_EBP);
2086 gen_op_addl_A0_reg_sN(0, R_ESI);
2c0262af
FB
2087 break;
2088 case 3:
57fec1fe
FB
2089 gen_op_movl_A0_reg(R_EBP);
2090 gen_op_addl_A0_reg_sN(0, R_EDI);
2c0262af
FB
2091 break;
2092 case 4:
57fec1fe 2093 gen_op_movl_A0_reg(R_ESI);
2c0262af
FB
2094 break;
2095 case 5:
57fec1fe 2096 gen_op_movl_A0_reg(R_EDI);
2c0262af
FB
2097 break;
2098 case 6:
57fec1fe 2099 gen_op_movl_A0_reg(R_EBP);
2c0262af
FB
2100 break;
2101 default:
2102 case 7:
57fec1fe 2103 gen_op_movl_A0_reg(R_EBX);
2c0262af
FB
2104 break;
2105 }
2106 if (disp != 0)
2107 gen_op_addl_A0_im(disp);
2108 gen_op_andl_A0_ffff();
2109 no_rm:
2110 if (must_add_seg) {
2111 if (override < 0) {
2112 if (rm == 2 || rm == 3 || rm == 6)
2113 override = R_SS;
2114 else
2115 override = R_DS;
2116 }
7162ab21 2117 gen_op_addl_A0_seg(s, override);
2c0262af
FB
2118 }
2119 }
2c0262af
FB
2120}
2121
0af10c86 2122static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm)
e17a36ce
FB
2123{
2124 int mod, rm, base, code;
2125
2126 mod = (modrm >> 6) & 3;
2127 if (mod == 3)
2128 return;
2129 rm = modrm & 7;
2130
2131 if (s->aflag) {
2132
2133 base = rm;
3b46e624 2134
e17a36ce 2135 if (base == 4) {
0af10c86 2136 code = cpu_ldub_code(env, s->pc++);
e17a36ce
FB
2137 base = (code & 7);
2138 }
3b46e624 2139
e17a36ce
FB
2140 switch (mod) {
2141 case 0:
2142 if (base == 5) {
2143 s->pc += 4;
2144 }
2145 break;
2146 case 1:
2147 s->pc++;
2148 break;
2149 default:
2150 case 2:
2151 s->pc += 4;
2152 break;
2153 }
2154 } else {
2155 switch (mod) {
2156 case 0:
2157 if (rm == 6) {
2158 s->pc += 2;
2159 }
2160 break;
2161 case 1:
2162 s->pc++;
2163 break;
2164 default:
2165 case 2:
2166 s->pc += 2;
2167 break;
2168 }
2169 }
2170}
2171
664e0f19
FB
2172/* used for LEA and MOV AX, mem */
2173static void gen_add_A0_ds_seg(DisasContext *s)
2174{
2175 int override, must_add_seg;
2176 must_add_seg = s->addseg;
2177 override = R_DS;
2178 if (s->override >= 0) {
2179 override = s->override;
2180 must_add_seg = 1;
664e0f19
FB
2181 }
2182 if (must_add_seg) {
8f091a59
FB
2183#ifdef TARGET_X86_64
2184 if (CODE64(s)) {
57fec1fe 2185 gen_op_addq_A0_seg(override);
5fafdf24 2186 } else
8f091a59
FB
2187#endif
2188 {
7162ab21 2189 gen_op_addl_A0_seg(s, override);
8f091a59 2190 }
664e0f19
FB
2191 }
2192}
2193
222a3336 2194/* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2c0262af 2195 OR_TMP0 */
0af10c86
BS
2196static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm,
2197 int ot, int reg, int is_store)
2c0262af 2198{
4eeb3939 2199 int mod, rm;
2c0262af
FB
2200
2201 mod = (modrm >> 6) & 3;
14ce26e7 2202 rm = (modrm & 7) | REX_B(s);
2c0262af
FB
2203 if (mod == 3) {
2204 if (is_store) {
2205 if (reg != OR_TMP0)
57fec1fe
FB
2206 gen_op_mov_TN_reg(ot, 0, reg);
2207 gen_op_mov_reg_T0(ot, rm);
2c0262af 2208 } else {
57fec1fe 2209 gen_op_mov_TN_reg(ot, 0, rm);
2c0262af 2210 if (reg != OR_TMP0)
57fec1fe 2211 gen_op_mov_reg_T0(ot, reg);
2c0262af
FB
2212 }
2213 } else {
4eeb3939 2214 gen_lea_modrm(env, s, modrm);
2c0262af
FB
2215 if (is_store) {
2216 if (reg != OR_TMP0)
57fec1fe 2217 gen_op_mov_TN_reg(ot, 0, reg);
fd8ca9f6 2218 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
2c0262af 2219 } else {
909be183 2220 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
2c0262af 2221 if (reg != OR_TMP0)
57fec1fe 2222 gen_op_mov_reg_T0(ot, reg);
2c0262af
FB
2223 }
2224 }
2225}
2226
0af10c86 2227static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, int ot)
2c0262af
FB
2228{
2229 uint32_t ret;
2230
2231 switch(ot) {
4ba9938c 2232 case MO_8:
0af10c86 2233 ret = cpu_ldub_code(env, s->pc);
2c0262af
FB
2234 s->pc++;
2235 break;
4ba9938c 2236 case MO_16:
0af10c86 2237 ret = cpu_lduw_code(env, s->pc);
2c0262af
FB
2238 s->pc += 2;
2239 break;
2240 default:
4ba9938c 2241 case MO_32:
0af10c86 2242 ret = cpu_ldl_code(env, s->pc);
2c0262af
FB
2243 s->pc += 4;
2244 break;
2245 }
2246 return ret;
2247}
2248
14ce26e7
FB
2249static inline int insn_const_size(unsigned int ot)
2250{
4ba9938c 2251 if (ot <= MO_32) {
14ce26e7 2252 return 1 << ot;
4ba9938c 2253 } else {
14ce26e7 2254 return 4;
4ba9938c 2255 }
14ce26e7
FB
2256}
2257
6e256c93
FB
2258static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong eip)
2259{
2260 TranslationBlock *tb;
2261 target_ulong pc;
2262
2263 pc = s->cs_base + eip;
2264 tb = s->tb;
2265 /* NOTE: we handle the case where the TB spans two pages here */
2266 if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) ||
2267 (pc & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK)) {
2268 /* jump to same page: we can use a direct jump */
57fec1fe 2269 tcg_gen_goto_tb(tb_num);
6e256c93 2270 gen_jmp_im(eip);
8cfd0495 2271 tcg_gen_exit_tb((uintptr_t)tb + tb_num);
6e256c93
FB
2272 } else {
2273 /* jump to another page: currently not optimized */
2274 gen_jmp_im(eip);
2275 gen_eob(s);
2276 }
2277}
2278
5fafdf24 2279static inline void gen_jcc(DisasContext *s, int b,
14ce26e7 2280 target_ulong val, target_ulong next_eip)
2c0262af 2281{
b27fc131 2282 int l1, l2;
3b46e624 2283
2c0262af 2284 if (s->jmp_opt) {
14ce26e7 2285 l1 = gen_new_label();
b27fc131 2286 gen_jcc1(s, b, l1);
dc259201 2287
6e256c93 2288 gen_goto_tb(s, 0, next_eip);
14ce26e7
FB
2289
2290 gen_set_label(l1);
6e256c93 2291 gen_goto_tb(s, 1, val);
5779406a 2292 s->is_jmp = DISAS_TB_JUMP;
2c0262af 2293 } else {
14ce26e7
FB
2294 l1 = gen_new_label();
2295 l2 = gen_new_label();
b27fc131 2296 gen_jcc1(s, b, l1);
8e1c85e3 2297
14ce26e7 2298 gen_jmp_im(next_eip);
8e1c85e3
FB
2299 tcg_gen_br(l2);
2300
14ce26e7
FB
2301 gen_set_label(l1);
2302 gen_jmp_im(val);
2303 gen_set_label(l2);
2c0262af
FB
2304 gen_eob(s);
2305 }
2306}
2307
f32d3781
PB
2308static void gen_cmovcc1(CPUX86State *env, DisasContext *s, int ot, int b,
2309 int modrm, int reg)
2310{
57eb0cc8 2311 CCPrepare cc;
f32d3781 2312
57eb0cc8 2313 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
f32d3781 2314
57eb0cc8
RH
2315 cc = gen_prepare_cc(s, b, cpu_T[1]);
2316 if (cc.mask != -1) {
2317 TCGv t0 = tcg_temp_new();
2318 tcg_gen_andi_tl(t0, cc.reg, cc.mask);
2319 cc.reg = t0;
2320 }
2321 if (!cc.use_reg2) {
2322 cc.reg2 = tcg_const_tl(cc.imm);
f32d3781
PB
2323 }
2324
57eb0cc8
RH
2325 tcg_gen_movcond_tl(cc.cond, cpu_T[0], cc.reg, cc.reg2,
2326 cpu_T[0], cpu_regs[reg]);
2327 gen_op_mov_reg_T0(ot, reg);
2328
2329 if (cc.mask != -1) {
2330 tcg_temp_free(cc.reg);
2331 }
2332 if (!cc.use_reg2) {
2333 tcg_temp_free(cc.reg2);
2334 }
f32d3781
PB
2335}
2336
3bd7da9e
FB
2337static inline void gen_op_movl_T0_seg(int seg_reg)
2338{
2339 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
2340 offsetof(CPUX86State,segs[seg_reg].selector));
2341}
2342
2343static inline void gen_op_movl_seg_T0_vm(int seg_reg)
2344{
2345 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
2346 tcg_gen_st32_tl(cpu_T[0], cpu_env,
2347 offsetof(CPUX86State,segs[seg_reg].selector));
2348 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], 4);
2349 tcg_gen_st_tl(cpu_T[0], cpu_env,
2350 offsetof(CPUX86State,segs[seg_reg].base));
2351}
2352
2c0262af
FB
2353/* move T0 to seg_reg and compute if the CPU state may change. Never
2354 call this function with seg_reg == R_CS */
14ce26e7 2355static void gen_movl_seg_T0(DisasContext *s, int seg_reg, target_ulong cur_eip)
2c0262af 2356{
3415a4dd
FB
2357 if (s->pe && !s->vm86) {
2358 /* XXX: optimize by finding processor state dynamically */
773cdfcc 2359 gen_update_cc_op(s);
14ce26e7 2360 gen_jmp_im(cur_eip);
b6abf97d 2361 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2999a0b2 2362 gen_helper_load_seg(cpu_env, tcg_const_i32(seg_reg), cpu_tmp2_i32);
dc196a57
FB
2363 /* abort translation because the addseg value may change or
2364 because ss32 may change. For R_SS, translation must always
2365 stop as a special handling must be done to disable hardware
2366 interrupts for the next instruction */
2367 if (seg_reg == R_SS || (s->code32 && seg_reg < R_FS))
5779406a 2368 s->is_jmp = DISAS_TB_JUMP;
3415a4dd 2369 } else {
3bd7da9e 2370 gen_op_movl_seg_T0_vm(seg_reg);
dc196a57 2371 if (seg_reg == R_SS)
5779406a 2372 s->is_jmp = DISAS_TB_JUMP;
3415a4dd 2373 }
2c0262af
FB
2374}
2375
0573fbfc
TS
2376static inline int svm_is_rep(int prefixes)
2377{
2378 return ((prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) ? 8 : 0);
2379}
2380
872929aa 2381static inline void
0573fbfc 2382gen_svm_check_intercept_param(DisasContext *s, target_ulong pc_start,
b8b6a50b 2383 uint32_t type, uint64_t param)
0573fbfc 2384{
872929aa
FB
2385 /* no SVM activated; fast case */
2386 if (likely(!(s->flags & HF_SVMI_MASK)))
2387 return;
773cdfcc 2388 gen_update_cc_op(s);
872929aa 2389 gen_jmp_im(pc_start - s->cs_base);
052e80d5 2390 gen_helper_svm_check_intercept_param(cpu_env, tcg_const_i32(type),
a7812ae4 2391 tcg_const_i64(param));
0573fbfc
TS
2392}
2393
872929aa 2394static inline void
0573fbfc
TS
2395gen_svm_check_intercept(DisasContext *s, target_ulong pc_start, uint64_t type)
2396{
872929aa 2397 gen_svm_check_intercept_param(s, pc_start, type, 0);
0573fbfc
TS
2398}
2399
4f31916f
FB
2400static inline void gen_stack_update(DisasContext *s, int addend)
2401{
14ce26e7
FB
2402#ifdef TARGET_X86_64
2403 if (CODE64(s)) {
6e0d8677 2404 gen_op_add_reg_im(2, R_ESP, addend);
14ce26e7
FB
2405 } else
2406#endif
4f31916f 2407 if (s->ss32) {
6e0d8677 2408 gen_op_add_reg_im(1, R_ESP, addend);
4f31916f 2409 } else {
6e0d8677 2410 gen_op_add_reg_im(0, R_ESP, addend);
4f31916f
FB
2411 }
2412}
2413
2c0262af
FB
2414/* generate a push. It depends on ss32, addseg and dflag */
2415static void gen_push_T0(DisasContext *s)
2416{
14ce26e7
FB
2417#ifdef TARGET_X86_64
2418 if (CODE64(s)) {
57fec1fe 2419 gen_op_movq_A0_reg(R_ESP);
8f091a59 2420 if (s->dflag) {
57fec1fe 2421 gen_op_addq_A0_im(-8);
fd8ca9f6 2422 gen_op_st_v(s, MO_64, cpu_T[0], cpu_A0);
8f091a59 2423 } else {
57fec1fe 2424 gen_op_addq_A0_im(-2);
fd8ca9f6 2425 gen_op_st_v(s, MO_16, cpu_T[0], cpu_A0);
8f091a59 2426 }
57fec1fe 2427 gen_op_mov_reg_A0(2, R_ESP);
5fafdf24 2428 } else
14ce26e7
FB
2429#endif
2430 {
57fec1fe 2431 gen_op_movl_A0_reg(R_ESP);
14ce26e7 2432 if (!s->dflag)
57fec1fe 2433 gen_op_addl_A0_im(-2);
14ce26e7 2434 else
57fec1fe 2435 gen_op_addl_A0_im(-4);
14ce26e7
FB
2436 if (s->ss32) {
2437 if (s->addseg) {
bbf662ee 2438 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
7162ab21 2439 gen_op_addl_A0_seg(s, R_SS);
14ce26e7
FB
2440 }
2441 } else {
2442 gen_op_andl_A0_ffff();
bbf662ee 2443 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
7162ab21 2444 gen_op_addl_A0_seg(s, R_SS);
2c0262af 2445 }
fd8ca9f6 2446 gen_op_st_v(s, s->dflag + 1, cpu_T[0], cpu_A0);
14ce26e7 2447 if (s->ss32 && !s->addseg)
57fec1fe 2448 gen_op_mov_reg_A0(1, R_ESP);
14ce26e7 2449 else
57fec1fe 2450 gen_op_mov_reg_T1(s->ss32 + 1, R_ESP);
2c0262af
FB
2451 }
2452}
2453
4f31916f
FB
2454/* generate a push. It depends on ss32, addseg and dflag */
2455/* slower version for T1, only used for call Ev */
2456static void gen_push_T1(DisasContext *s)
2c0262af 2457{
14ce26e7
FB
2458#ifdef TARGET_X86_64
2459 if (CODE64(s)) {
57fec1fe 2460 gen_op_movq_A0_reg(R_ESP);
8f091a59 2461 if (s->dflag) {
57fec1fe 2462 gen_op_addq_A0_im(-8);
b5afc104 2463 gen_op_st_v(s, MO_64, cpu_T[1], cpu_A0);
8f091a59 2464 } else {
57fec1fe 2465 gen_op_addq_A0_im(-2);
ee3138da 2466 gen_op_st_v(s, MO_16, cpu_T[1], cpu_A0);
8f091a59 2467 }
57fec1fe 2468 gen_op_mov_reg_A0(2, R_ESP);
5fafdf24 2469 } else
14ce26e7
FB
2470#endif
2471 {
57fec1fe 2472 gen_op_movl_A0_reg(R_ESP);
14ce26e7 2473 if (!s->dflag)
57fec1fe 2474 gen_op_addl_A0_im(-2);
14ce26e7 2475 else
57fec1fe 2476 gen_op_addl_A0_im(-4);
14ce26e7
FB
2477 if (s->ss32) {
2478 if (s->addseg) {
7162ab21 2479 gen_op_addl_A0_seg(s, R_SS);
14ce26e7
FB
2480 }
2481 } else {
2482 gen_op_andl_A0_ffff();
7162ab21 2483 gen_op_addl_A0_seg(s, R_SS);
2c0262af 2484 }
b5afc104 2485 gen_op_st_v(s, s->dflag + 1, cpu_T[1], cpu_A0);
3b46e624 2486
14ce26e7 2487 if (s->ss32 && !s->addseg)
57fec1fe 2488 gen_op_mov_reg_A0(1, R_ESP);
14ce26e7
FB
2489 else
2490 gen_stack_update(s, (-2) << s->dflag);
2c0262af
FB
2491 }
2492}
2493
4f31916f
FB
2494/* two step pop is necessary for precise exceptions */
2495static void gen_pop_T0(DisasContext *s)
2c0262af 2496{
14ce26e7
FB
2497#ifdef TARGET_X86_64
2498 if (CODE64(s)) {
57fec1fe 2499 gen_op_movq_A0_reg(R_ESP);
909be183 2500 gen_op_ld_v(s, s->dflag ? MO_64 : MO_16, cpu_T[0], cpu_A0);
5fafdf24 2501 } else
14ce26e7
FB
2502#endif
2503 {
57fec1fe 2504 gen_op_movl_A0_reg(R_ESP);
14ce26e7
FB
2505 if (s->ss32) {
2506 if (s->addseg)
7162ab21 2507 gen_op_addl_A0_seg(s, R_SS);
14ce26e7
FB
2508 } else {
2509 gen_op_andl_A0_ffff();
7162ab21 2510 gen_op_addl_A0_seg(s, R_SS);
14ce26e7 2511 }
909be183 2512 gen_op_ld_v(s, s->dflag + 1, cpu_T[0], cpu_A0);
2c0262af
FB
2513 }
2514}
2515
2516static void gen_pop_update(DisasContext *s)
2517{
14ce26e7 2518#ifdef TARGET_X86_64
8f091a59 2519 if (CODE64(s) && s->dflag) {
14ce26e7
FB
2520 gen_stack_update(s, 8);
2521 } else
2522#endif
2523 {
2524 gen_stack_update(s, 2 << s->dflag);
2525 }
2c0262af
FB
2526}
2527
2528static void gen_stack_A0(DisasContext *s)
2529{
57fec1fe 2530 gen_op_movl_A0_reg(R_ESP);
2c0262af
FB
2531 if (!s->ss32)
2532 gen_op_andl_A0_ffff();
bbf662ee 2533 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2c0262af 2534 if (s->addseg)
7162ab21 2535 gen_op_addl_A0_seg(s, R_SS);
2c0262af
FB
2536}
2537
2538/* NOTE: wrap around in 16 bit not fully handled */
2539static void gen_pusha(DisasContext *s)
2540{
2541 int i;
57fec1fe 2542 gen_op_movl_A0_reg(R_ESP);
2c0262af
FB
2543 gen_op_addl_A0_im(-16 << s->dflag);
2544 if (!s->ss32)
2545 gen_op_andl_A0_ffff();
bbf662ee 2546 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2c0262af 2547 if (s->addseg)
7162ab21 2548 gen_op_addl_A0_seg(s, R_SS);
2c0262af 2549 for(i = 0;i < 8; i++) {
4ba9938c 2550 gen_op_mov_TN_reg(MO_32, 0, 7 - i);
fd8ca9f6 2551 gen_op_st_v(s, MO_16 + s->dflag, cpu_T[0], cpu_A0);
2c0262af
FB
2552 gen_op_addl_A0_im(2 << s->dflag);
2553 }
4ba9938c 2554 gen_op_mov_reg_T1(MO_16 + s->ss32, R_ESP);
2c0262af
FB
2555}
2556
2557/* NOTE: wrap around in 16 bit not fully handled */
2558static void gen_popa(DisasContext *s)
2559{
2560 int i;
57fec1fe 2561 gen_op_movl_A0_reg(R_ESP);
2c0262af
FB
2562 if (!s->ss32)
2563 gen_op_andl_A0_ffff();
bbf662ee
FB
2564 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2565 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], 16 << s->dflag);
2c0262af 2566 if (s->addseg)
7162ab21 2567 gen_op_addl_A0_seg(s, R_SS);
2c0262af
FB
2568 for(i = 0;i < 8; i++) {
2569 /* ESP is not reloaded */
2570 if (i != 3) {
909be183 2571 gen_op_ld_v(s, MO_16 + s->dflag, cpu_T[0], cpu_A0);
4ba9938c 2572 gen_op_mov_reg_T0(MO_16 + s->dflag, 7 - i);
2c0262af
FB
2573 }
2574 gen_op_addl_A0_im(2 << s->dflag);
2575 }
4ba9938c 2576 gen_op_mov_reg_T1(MO_16 + s->ss32, R_ESP);
2c0262af
FB
2577}
2578
2c0262af
FB
2579static void gen_enter(DisasContext *s, int esp_addend, int level)
2580{
61a8c4ec 2581 int ot, opsize;
2c0262af 2582
2c0262af 2583 level &= 0x1f;
8f091a59
FB
2584#ifdef TARGET_X86_64
2585 if (CODE64(s)) {
4ba9938c 2586 ot = s->dflag ? MO_64 : MO_16;
8f091a59 2587 opsize = 1 << ot;
3b46e624 2588
57fec1fe 2589 gen_op_movl_A0_reg(R_ESP);
8f091a59 2590 gen_op_addq_A0_im(-opsize);
bbf662ee 2591 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
8f091a59
FB
2592
2593 /* push bp */
4ba9938c 2594 gen_op_mov_TN_reg(MO_32, 0, R_EBP);
fd8ca9f6 2595 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
8f091a59 2596 if (level) {
b5b38f61 2597 /* XXX: must save state */
2999a0b2 2598 gen_helper_enter64_level(cpu_env, tcg_const_i32(level),
4ba9938c 2599 tcg_const_i32((ot == MO_64)),
a7812ae4 2600 cpu_T[1]);
8f091a59 2601 }
57fec1fe 2602 gen_op_mov_reg_T1(ot, R_EBP);
bbf662ee 2603 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
4ba9938c 2604 gen_op_mov_reg_T1(MO_64, R_ESP);
5fafdf24 2605 } else
8f091a59
FB
2606#endif
2607 {
4ba9938c 2608 ot = s->dflag + MO_16;
8f091a59 2609 opsize = 2 << s->dflag;
3b46e624 2610
57fec1fe 2611 gen_op_movl_A0_reg(R_ESP);
8f091a59
FB
2612 gen_op_addl_A0_im(-opsize);
2613 if (!s->ss32)
2614 gen_op_andl_A0_ffff();
bbf662ee 2615 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
8f091a59 2616 if (s->addseg)
7162ab21 2617 gen_op_addl_A0_seg(s, R_SS);
8f091a59 2618 /* push bp */
4ba9938c 2619 gen_op_mov_TN_reg(MO_32, 0, R_EBP);
fd8ca9f6 2620 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
8f091a59 2621 if (level) {
b5b38f61 2622 /* XXX: must save state */
2999a0b2 2623 gen_helper_enter_level(cpu_env, tcg_const_i32(level),
a7812ae4
PB
2624 tcg_const_i32(s->dflag),
2625 cpu_T[1]);
8f091a59 2626 }
57fec1fe 2627 gen_op_mov_reg_T1(ot, R_EBP);
bbf662ee 2628 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
4ba9938c 2629 gen_op_mov_reg_T1(MO_16 + s->ss32, R_ESP);
2c0262af 2630 }
2c0262af
FB
2631}
2632
14ce26e7 2633static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip)
2c0262af 2634{
773cdfcc 2635 gen_update_cc_op(s);
14ce26e7 2636 gen_jmp_im(cur_eip);
77b2bc2c 2637 gen_helper_raise_exception(cpu_env, tcg_const_i32(trapno));
5779406a 2638 s->is_jmp = DISAS_TB_JUMP;
2c0262af
FB
2639}
2640
2641/* an interrupt is different from an exception because of the
7f75ffd3 2642 privilege checks */
5fafdf24 2643static void gen_interrupt(DisasContext *s, int intno,
14ce26e7 2644 target_ulong cur_eip, target_ulong next_eip)
2c0262af 2645{
773cdfcc 2646 gen_update_cc_op(s);
14ce26e7 2647 gen_jmp_im(cur_eip);
77b2bc2c 2648 gen_helper_raise_interrupt(cpu_env, tcg_const_i32(intno),
a7812ae4 2649 tcg_const_i32(next_eip - cur_eip));
5779406a 2650 s->is_jmp = DISAS_TB_JUMP;
2c0262af
FB
2651}
2652
14ce26e7 2653static void gen_debug(DisasContext *s, target_ulong cur_eip)
2c0262af 2654{
773cdfcc 2655 gen_update_cc_op(s);
14ce26e7 2656 gen_jmp_im(cur_eip);
4a7443be 2657 gen_helper_debug(cpu_env);
5779406a 2658 s->is_jmp = DISAS_TB_JUMP;
2c0262af
FB
2659}
2660
2661/* generate a generic end of block. Trace exception is also generated
2662 if needed */
2663static void gen_eob(DisasContext *s)
2664{
773cdfcc 2665 gen_update_cc_op(s);
a2cc3b24 2666 if (s->tb->flags & HF_INHIBIT_IRQ_MASK) {
f0967a1a 2667 gen_helper_reset_inhibit_irq(cpu_env);
a2cc3b24 2668 }
a2397807 2669 if (s->tb->flags & HF_RF_MASK) {
f0967a1a 2670 gen_helper_reset_rf(cpu_env);
a2397807 2671 }
34865134 2672 if (s->singlestep_enabled) {
4a7443be 2673 gen_helper_debug(cpu_env);
34865134 2674 } else if (s->tf) {
4a7443be 2675 gen_helper_single_step(cpu_env);
2c0262af 2676 } else {
57fec1fe 2677 tcg_gen_exit_tb(0);
2c0262af 2678 }
5779406a 2679 s->is_jmp = DISAS_TB_JUMP;
2c0262af
FB
2680}
2681
2682/* generate a jump to eip. No segment change must happen before as a
2683 direct call to the next block may occur */
14ce26e7 2684static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num)
2c0262af 2685{
a3251186
RH
2686 gen_update_cc_op(s);
2687 set_cc_op(s, CC_OP_DYNAMIC);
2c0262af 2688 if (s->jmp_opt) {
6e256c93 2689 gen_goto_tb(s, tb_num, eip);
5779406a 2690 s->is_jmp = DISAS_TB_JUMP;
2c0262af 2691 } else {
14ce26e7 2692 gen_jmp_im(eip);
2c0262af
FB
2693 gen_eob(s);
2694 }
2695}
2696
14ce26e7
FB
2697static void gen_jmp(DisasContext *s, target_ulong eip)
2698{
2699 gen_jmp_tb(s, eip, 0);
2700}
2701
323d1876 2702static inline void gen_ldq_env_A0(DisasContext *s, int offset)
8686c490 2703{
3c5f4116 2704 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
b6abf97d 2705 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset);
8686c490 2706}
664e0f19 2707
323d1876 2708static inline void gen_stq_env_A0(DisasContext *s, int offset)
8686c490 2709{
b6abf97d 2710 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset);
3523e4bd 2711 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
8686c490 2712}
664e0f19 2713
323d1876 2714static inline void gen_ldo_env_A0(DisasContext *s, int offset)
8686c490 2715{
5c42a7cd 2716 int mem_index = s->mem_index;
3c5f4116 2717 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, mem_index, MO_LEQ);
b6abf97d 2718 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
8686c490 2719 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
3c5f4116 2720 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_tmp0, mem_index, MO_LEQ);
b6abf97d 2721 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
8686c490 2722}
14ce26e7 2723
323d1876 2724static inline void gen_sto_env_A0(DisasContext *s, int offset)
8686c490 2725{
5c42a7cd 2726 int mem_index = s->mem_index;
b6abf97d 2727 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
3523e4bd 2728 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, mem_index, MO_LEQ);
8686c490 2729 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
b6abf97d 2730 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
3523e4bd 2731 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_tmp0, mem_index, MO_LEQ);
8686c490 2732}
14ce26e7 2733
5af45186
FB
2734static inline void gen_op_movo(int d_offset, int s_offset)
2735{
b6abf97d
FB
2736 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2737 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2738 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + 8);
2739 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + 8);
5af45186
FB
2740}
2741
2742static inline void gen_op_movq(int d_offset, int s_offset)
2743{
b6abf97d
FB
2744 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2745 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
5af45186
FB
2746}
2747
2748static inline void gen_op_movl(int d_offset, int s_offset)
2749{
b6abf97d
FB
2750 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env, s_offset);
2751 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, d_offset);
5af45186
FB
2752}
2753
2754static inline void gen_op_movq_env_0(int d_offset)
2755{
b6abf97d
FB
2756 tcg_gen_movi_i64(cpu_tmp1_i64, 0);
2757 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
5af45186 2758}
664e0f19 2759
d3eb5eae
BS
2760typedef void (*SSEFunc_i_ep)(TCGv_i32 val, TCGv_ptr env, TCGv_ptr reg);
2761typedef void (*SSEFunc_l_ep)(TCGv_i64 val, TCGv_ptr env, TCGv_ptr reg);
2762typedef void (*SSEFunc_0_epi)(TCGv_ptr env, TCGv_ptr reg, TCGv_i32 val);
2763typedef void (*SSEFunc_0_epl)(TCGv_ptr env, TCGv_ptr reg, TCGv_i64 val);
2764typedef void (*SSEFunc_0_epp)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b);
2765typedef void (*SSEFunc_0_eppi)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
2766 TCGv_i32 val);
c4baa050 2767typedef void (*SSEFunc_0_ppi)(TCGv_ptr reg_a, TCGv_ptr reg_b, TCGv_i32 val);
d3eb5eae
BS
2768typedef void (*SSEFunc_0_eppt)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
2769 TCGv val);
c4baa050 2770
5af45186
FB
2771#define SSE_SPECIAL ((void *)1)
2772#define SSE_DUMMY ((void *)2)
664e0f19 2773
a7812ae4
PB
2774#define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
2775#define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
2776 gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
5af45186 2777
d3eb5eae 2778static const SSEFunc_0_epp sse_op_table1[256][4] = {
a35f3ec7
AJ
2779 /* 3DNow! extensions */
2780 [0x0e] = { SSE_DUMMY }, /* femms */
2781 [0x0f] = { SSE_DUMMY }, /* pf... */
664e0f19
FB
2782 /* pure SSE operations */
2783 [0x10] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2784 [0x11] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
465e9838 2785 [0x12] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd, movsldup, movddup */
664e0f19 2786 [0x13] = { SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd */
a7812ae4
PB
2787 [0x14] = { gen_helper_punpckldq_xmm, gen_helper_punpcklqdq_xmm },
2788 [0x15] = { gen_helper_punpckhdq_xmm, gen_helper_punpckhqdq_xmm },
664e0f19
FB
2789 [0x16] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd, movshdup */
2790 [0x17] = { SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd */
2791
2792 [0x28] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2793 [0x29] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2794 [0x2a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
d9f4bb27 2795 [0x2b] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movntps, movntpd, movntss, movntsd */
664e0f19
FB
2796 [0x2c] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
2797 [0x2d] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
a7812ae4
PB
2798 [0x2e] = { gen_helper_ucomiss, gen_helper_ucomisd },
2799 [0x2f] = { gen_helper_comiss, gen_helper_comisd },
664e0f19
FB
2800 [0x50] = { SSE_SPECIAL, SSE_SPECIAL }, /* movmskps, movmskpd */
2801 [0x51] = SSE_FOP(sqrt),
a7812ae4
PB
2802 [0x52] = { gen_helper_rsqrtps, NULL, gen_helper_rsqrtss, NULL },
2803 [0x53] = { gen_helper_rcpps, NULL, gen_helper_rcpss, NULL },
2804 [0x54] = { gen_helper_pand_xmm, gen_helper_pand_xmm }, /* andps, andpd */
2805 [0x55] = { gen_helper_pandn_xmm, gen_helper_pandn_xmm }, /* andnps, andnpd */
2806 [0x56] = { gen_helper_por_xmm, gen_helper_por_xmm }, /* orps, orpd */
2807 [0x57] = { gen_helper_pxor_xmm, gen_helper_pxor_xmm }, /* xorps, xorpd */
664e0f19
FB
2808 [0x58] = SSE_FOP(add),
2809 [0x59] = SSE_FOP(mul),
a7812ae4
PB
2810 [0x5a] = { gen_helper_cvtps2pd, gen_helper_cvtpd2ps,
2811 gen_helper_cvtss2sd, gen_helper_cvtsd2ss },
2812 [0x5b] = { gen_helper_cvtdq2ps, gen_helper_cvtps2dq, gen_helper_cvttps2dq },
664e0f19
FB
2813 [0x5c] = SSE_FOP(sub),
2814 [0x5d] = SSE_FOP(min),
2815 [0x5e] = SSE_FOP(div),
2816 [0x5f] = SSE_FOP(max),
2817
2818 [0xc2] = SSE_FOP(cmpeq),
d3eb5eae
BS
2819 [0xc6] = { (SSEFunc_0_epp)gen_helper_shufps,
2820 (SSEFunc_0_epp)gen_helper_shufpd }, /* XXX: casts */
664e0f19 2821
7073fbad
RH
2822 /* SSSE3, SSE4, MOVBE, CRC32, BMI1, BMI2, ADX. */
2823 [0x38] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2824 [0x3a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
4242b1bd 2825
664e0f19
FB
2826 /* MMX ops and their SSE extensions */
2827 [0x60] = MMX_OP2(punpcklbw),
2828 [0x61] = MMX_OP2(punpcklwd),
2829 [0x62] = MMX_OP2(punpckldq),
2830 [0x63] = MMX_OP2(packsswb),
2831 [0x64] = MMX_OP2(pcmpgtb),
2832 [0x65] = MMX_OP2(pcmpgtw),
2833 [0x66] = MMX_OP2(pcmpgtl),
2834 [0x67] = MMX_OP2(packuswb),
2835 [0x68] = MMX_OP2(punpckhbw),
2836 [0x69] = MMX_OP2(punpckhwd),
2837 [0x6a] = MMX_OP2(punpckhdq),
2838 [0x6b] = MMX_OP2(packssdw),
a7812ae4
PB
2839 [0x6c] = { NULL, gen_helper_punpcklqdq_xmm },
2840 [0x6d] = { NULL, gen_helper_punpckhqdq_xmm },
664e0f19
FB
2841 [0x6e] = { SSE_SPECIAL, SSE_SPECIAL }, /* movd mm, ea */
2842 [0x6f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, , movqdu */
d3eb5eae
BS
2843 [0x70] = { (SSEFunc_0_epp)gen_helper_pshufw_mmx,
2844 (SSEFunc_0_epp)gen_helper_pshufd_xmm,
2845 (SSEFunc_0_epp)gen_helper_pshufhw_xmm,
2846 (SSEFunc_0_epp)gen_helper_pshuflw_xmm }, /* XXX: casts */
664e0f19
FB
2847 [0x71] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftw */
2848 [0x72] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftd */
2849 [0x73] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftq */
2850 [0x74] = MMX_OP2(pcmpeqb),
2851 [0x75] = MMX_OP2(pcmpeqw),
2852 [0x76] = MMX_OP2(pcmpeql),
a35f3ec7 2853 [0x77] = { SSE_DUMMY }, /* emms */
d9f4bb27
AP
2854 [0x78] = { NULL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* extrq_i, insertq_i */
2855 [0x79] = { NULL, gen_helper_extrq_r, NULL, gen_helper_insertq_r },
a7812ae4
PB
2856 [0x7c] = { NULL, gen_helper_haddpd, NULL, gen_helper_haddps },
2857 [0x7d] = { NULL, gen_helper_hsubpd, NULL, gen_helper_hsubps },
664e0f19
FB
2858 [0x7e] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movd, movd, , movq */
2859 [0x7f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, movdqu */
2860 [0xc4] = { SSE_SPECIAL, SSE_SPECIAL }, /* pinsrw */
2861 [0xc5] = { SSE_SPECIAL, SSE_SPECIAL }, /* pextrw */
a7812ae4 2862 [0xd0] = { NULL, gen_helper_addsubpd, NULL, gen_helper_addsubps },
664e0f19
FB
2863 [0xd1] = MMX_OP2(psrlw),
2864 [0xd2] = MMX_OP2(psrld),
2865 [0xd3] = MMX_OP2(psrlq),
2866 [0xd4] = MMX_OP2(paddq),
2867 [0xd5] = MMX_OP2(pmullw),
2868 [0xd6] = { NULL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2869 [0xd7] = { SSE_SPECIAL, SSE_SPECIAL }, /* pmovmskb */
2870 [0xd8] = MMX_OP2(psubusb),
2871 [0xd9] = MMX_OP2(psubusw),
2872 [0xda] = MMX_OP2(pminub),
2873 [0xdb] = MMX_OP2(pand),
2874 [0xdc] = MMX_OP2(paddusb),
2875 [0xdd] = MMX_OP2(paddusw),
2876 [0xde] = MMX_OP2(pmaxub),
2877 [0xdf] = MMX_OP2(pandn),
2878 [0xe0] = MMX_OP2(pavgb),
2879 [0xe1] = MMX_OP2(psraw),
2880 [0xe2] = MMX_OP2(psrad),
2881 [0xe3] = MMX_OP2(pavgw),
2882 [0xe4] = MMX_OP2(pmulhuw),
2883 [0xe5] = MMX_OP2(pmulhw),
a7812ae4 2884 [0xe6] = { NULL, gen_helper_cvttpd2dq, gen_helper_cvtdq2pd, gen_helper_cvtpd2dq },
664e0f19
FB
2885 [0xe7] = { SSE_SPECIAL , SSE_SPECIAL }, /* movntq, movntq */
2886 [0xe8] = MMX_OP2(psubsb),
2887 [0xe9] = MMX_OP2(psubsw),
2888 [0xea] = MMX_OP2(pminsw),
2889 [0xeb] = MMX_OP2(por),
2890 [0xec] = MMX_OP2(paddsb),
2891 [0xed] = MMX_OP2(paddsw),
2892 [0xee] = MMX_OP2(pmaxsw),
2893 [0xef] = MMX_OP2(pxor),
465e9838 2894 [0xf0] = { NULL, NULL, NULL, SSE_SPECIAL }, /* lddqu */
664e0f19
FB
2895 [0xf1] = MMX_OP2(psllw),
2896 [0xf2] = MMX_OP2(pslld),
2897 [0xf3] = MMX_OP2(psllq),
2898 [0xf4] = MMX_OP2(pmuludq),
2899 [0xf5] = MMX_OP2(pmaddwd),
2900 [0xf6] = MMX_OP2(psadbw),
d3eb5eae
BS
2901 [0xf7] = { (SSEFunc_0_epp)gen_helper_maskmov_mmx,
2902 (SSEFunc_0_epp)gen_helper_maskmov_xmm }, /* XXX: casts */
664e0f19
FB
2903 [0xf8] = MMX_OP2(psubb),
2904 [0xf9] = MMX_OP2(psubw),
2905 [0xfa] = MMX_OP2(psubl),
2906 [0xfb] = MMX_OP2(psubq),
2907 [0xfc] = MMX_OP2(paddb),
2908 [0xfd] = MMX_OP2(paddw),
2909 [0xfe] = MMX_OP2(paddl),
2910};
2911
d3eb5eae 2912static const SSEFunc_0_epp sse_op_table2[3 * 8][2] = {
664e0f19
FB
2913 [0 + 2] = MMX_OP2(psrlw),
2914 [0 + 4] = MMX_OP2(psraw),
2915 [0 + 6] = MMX_OP2(psllw),
2916 [8 + 2] = MMX_OP2(psrld),
2917 [8 + 4] = MMX_OP2(psrad),
2918 [8 + 6] = MMX_OP2(pslld),
2919 [16 + 2] = MMX_OP2(psrlq),
a7812ae4 2920 [16 + 3] = { NULL, gen_helper_psrldq_xmm },
664e0f19 2921 [16 + 6] = MMX_OP2(psllq),
a7812ae4 2922 [16 + 7] = { NULL, gen_helper_pslldq_xmm },
664e0f19
FB
2923};
2924
d3eb5eae 2925static const SSEFunc_0_epi sse_op_table3ai[] = {
a7812ae4 2926 gen_helper_cvtsi2ss,
11f8cdbc 2927 gen_helper_cvtsi2sd
c4baa050 2928};
a7812ae4 2929
11f8cdbc 2930#ifdef TARGET_X86_64
d3eb5eae 2931static const SSEFunc_0_epl sse_op_table3aq[] = {
11f8cdbc
SW
2932 gen_helper_cvtsq2ss,
2933 gen_helper_cvtsq2sd
2934};
2935#endif
2936
d3eb5eae 2937static const SSEFunc_i_ep sse_op_table3bi[] = {
a7812ae4 2938 gen_helper_cvttss2si,
a7812ae4 2939 gen_helper_cvtss2si,
bedc2ac1 2940 gen_helper_cvttsd2si,
11f8cdbc 2941 gen_helper_cvtsd2si
664e0f19 2942};
3b46e624 2943
11f8cdbc 2944#ifdef TARGET_X86_64
d3eb5eae 2945static const SSEFunc_l_ep sse_op_table3bq[] = {
11f8cdbc 2946 gen_helper_cvttss2sq,
11f8cdbc 2947 gen_helper_cvtss2sq,
bedc2ac1 2948 gen_helper_cvttsd2sq,
11f8cdbc
SW
2949 gen_helper_cvtsd2sq
2950};
2951#endif
2952
d3eb5eae 2953static const SSEFunc_0_epp sse_op_table4[8][4] = {
664e0f19
FB
2954 SSE_FOP(cmpeq),
2955 SSE_FOP(cmplt),
2956 SSE_FOP(cmple),
2957 SSE_FOP(cmpunord),
2958 SSE_FOP(cmpneq),
2959 SSE_FOP(cmpnlt),
2960 SSE_FOP(cmpnle),
2961 SSE_FOP(cmpord),
2962};
3b46e624 2963
d3eb5eae 2964static const SSEFunc_0_epp sse_op_table5[256] = {
a7812ae4
PB
2965 [0x0c] = gen_helper_pi2fw,
2966 [0x0d] = gen_helper_pi2fd,
2967 [0x1c] = gen_helper_pf2iw,
2968 [0x1d] = gen_helper_pf2id,
2969 [0x8a] = gen_helper_pfnacc,
2970 [0x8e] = gen_helper_pfpnacc,
2971 [0x90] = gen_helper_pfcmpge,
2972 [0x94] = gen_helper_pfmin,
2973 [0x96] = gen_helper_pfrcp,
2974 [0x97] = gen_helper_pfrsqrt,
2975 [0x9a] = gen_helper_pfsub,
2976 [0x9e] = gen_helper_pfadd,
2977 [0xa0] = gen_helper_pfcmpgt,
2978 [0xa4] = gen_helper_pfmax,
2979 [0xa6] = gen_helper_movq, /* pfrcpit1; no need to actually increase precision */
2980 [0xa7] = gen_helper_movq, /* pfrsqit1 */
2981 [0xaa] = gen_helper_pfsubr,
2982 [0xae] = gen_helper_pfacc,
2983 [0xb0] = gen_helper_pfcmpeq,
2984 [0xb4] = gen_helper_pfmul,
2985 [0xb6] = gen_helper_movq, /* pfrcpit2 */
2986 [0xb7] = gen_helper_pmulhrw_mmx,
2987 [0xbb] = gen_helper_pswapd,
2988 [0xbf] = gen_helper_pavgb_mmx /* pavgusb */
a35f3ec7
AJ
2989};
2990
d3eb5eae
BS
2991struct SSEOpHelper_epp {
2992 SSEFunc_0_epp op[2];
c4baa050
BS
2993 uint32_t ext_mask;
2994};
2995
d3eb5eae
BS
2996struct SSEOpHelper_eppi {
2997 SSEFunc_0_eppi op[2];
c4baa050 2998 uint32_t ext_mask;
222a3336 2999};
c4baa050 3000
222a3336 3001#define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
a7812ae4
PB
3002#define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
3003#define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
222a3336 3004#define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
e71827bc
AJ
3005#define PCLMULQDQ_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, \
3006 CPUID_EXT_PCLMULQDQ }
d640045a 3007#define AESNI_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_AES }
c4baa050 3008
d3eb5eae 3009static const struct SSEOpHelper_epp sse_op_table6[256] = {
222a3336
AZ
3010 [0x00] = SSSE3_OP(pshufb),
3011 [0x01] = SSSE3_OP(phaddw),
3012 [0x02] = SSSE3_OP(phaddd),
3013 [0x03] = SSSE3_OP(phaddsw),
3014 [0x04] = SSSE3_OP(pmaddubsw),
3015 [0x05] = SSSE3_OP(phsubw),
3016 [0x06] = SSSE3_OP(phsubd),
3017 [0x07] = SSSE3_OP(phsubsw),
3018 [0x08] = SSSE3_OP(psignb),
3019 [0x09] = SSSE3_OP(psignw),
3020 [0x0a] = SSSE3_OP(psignd),
3021 [0x0b] = SSSE3_OP(pmulhrsw),
3022 [0x10] = SSE41_OP(pblendvb),
3023 [0x14] = SSE41_OP(blendvps),
3024 [0x15] = SSE41_OP(blendvpd),
3025 [0x17] = SSE41_OP(ptest),
3026 [0x1c] = SSSE3_OP(pabsb),
3027 [0x1d] = SSSE3_OP(pabsw),
3028 [0x1e] = SSSE3_OP(pabsd),
3029 [0x20] = SSE41_OP(pmovsxbw),
3030 [0x21] = SSE41_OP(pmovsxbd),
3031 [0x22] = SSE41_OP(pmovsxbq),
3032 [0x23] = SSE41_OP(pmovsxwd),
3033 [0x24] = SSE41_OP(pmovsxwq),
3034 [0x25] = SSE41_OP(pmovsxdq),
3035 [0x28] = SSE41_OP(pmuldq),
3036 [0x29] = SSE41_OP(pcmpeqq),
3037 [0x2a] = SSE41_SPECIAL, /* movntqda */
3038 [0x2b] = SSE41_OP(packusdw),
3039 [0x30] = SSE41_OP(pmovzxbw),
3040 [0x31] = SSE41_OP(pmovzxbd),
3041 [0x32] = SSE41_OP(pmovzxbq),
3042 [0x33] = SSE41_OP(pmovzxwd),
3043 [0x34] = SSE41_OP(pmovzxwq),
3044 [0x35] = SSE41_OP(pmovzxdq),
3045 [0x37] = SSE42_OP(pcmpgtq),
3046 [0x38] = SSE41_OP(pminsb),
3047 [0x39] = SSE41_OP(pminsd),
3048 [0x3a] = SSE41_OP(pminuw),
3049 [0x3b] = SSE41_OP(pminud),
3050 [0x3c] = SSE41_OP(pmaxsb),
3051 [0x3d] = SSE41_OP(pmaxsd),
3052 [0x3e] = SSE41_OP(pmaxuw),
3053 [0x3f] = SSE41_OP(pmaxud),
3054 [0x40] = SSE41_OP(pmulld),
3055 [0x41] = SSE41_OP(phminposuw),
d640045a
AJ
3056 [0xdb] = AESNI_OP(aesimc),
3057 [0xdc] = AESNI_OP(aesenc),
3058 [0xdd] = AESNI_OP(aesenclast),
3059 [0xde] = AESNI_OP(aesdec),
3060 [0xdf] = AESNI_OP(aesdeclast),
4242b1bd
AZ
3061};
3062
d3eb5eae 3063static const struct SSEOpHelper_eppi sse_op_table7[256] = {
222a3336
AZ
3064 [0x08] = SSE41_OP(roundps),
3065 [0x09] = SSE41_OP(roundpd),
3066 [0x0a] = SSE41_OP(roundss),
3067 [0x0b] = SSE41_OP(roundsd),
3068 [0x0c] = SSE41_OP(blendps),
3069 [0x0d] = SSE41_OP(blendpd),
3070 [0x0e] = SSE41_OP(pblendw),
3071 [0x0f] = SSSE3_OP(palignr),
3072 [0x14] = SSE41_SPECIAL, /* pextrb */
3073 [0x15] = SSE41_SPECIAL, /* pextrw */
3074 [0x16] = SSE41_SPECIAL, /* pextrd/pextrq */
3075 [0x17] = SSE41_SPECIAL, /* extractps */
3076 [0x20] = SSE41_SPECIAL, /* pinsrb */
3077 [0x21] = SSE41_SPECIAL, /* insertps */
3078 [0x22] = SSE41_SPECIAL, /* pinsrd/pinsrq */
3079 [0x40] = SSE41_OP(dpps),
3080 [0x41] = SSE41_OP(dppd),
3081 [0x42] = SSE41_OP(mpsadbw),
e71827bc 3082 [0x44] = PCLMULQDQ_OP(pclmulqdq),
222a3336
AZ
3083 [0x60] = SSE42_OP(pcmpestrm),
3084 [0x61] = SSE42_OP(pcmpestri),
3085 [0x62] = SSE42_OP(pcmpistrm),
3086 [0x63] = SSE42_OP(pcmpistri),
d640045a 3087 [0xdf] = AESNI_OP(aeskeygenassist),
4242b1bd
AZ
3088};
3089
0af10c86
BS
3090static void gen_sse(CPUX86State *env, DisasContext *s, int b,
3091 target_ulong pc_start, int rex_r)
664e0f19
FB
3092{
3093 int b1, op1_offset, op2_offset, is_xmm, val, ot;
4eeb3939 3094 int modrm, mod, rm, reg;
d3eb5eae
BS
3095 SSEFunc_0_epp sse_fn_epp;
3096 SSEFunc_0_eppi sse_fn_eppi;
c4baa050 3097 SSEFunc_0_ppi sse_fn_ppi;
d3eb5eae 3098 SSEFunc_0_eppt sse_fn_eppt;
664e0f19
FB
3099
3100 b &= 0xff;
5fafdf24 3101 if (s->prefix & PREFIX_DATA)
664e0f19 3102 b1 = 1;
5fafdf24 3103 else if (s->prefix & PREFIX_REPZ)
664e0f19 3104 b1 = 2;
5fafdf24 3105 else if (s->prefix & PREFIX_REPNZ)
664e0f19
FB
3106 b1 = 3;
3107 else
3108 b1 = 0;
d3eb5eae
BS
3109 sse_fn_epp = sse_op_table1[b][b1];
3110 if (!sse_fn_epp) {
664e0f19 3111 goto illegal_op;
c4baa050 3112 }
a35f3ec7 3113 if ((b <= 0x5f && b >= 0x10) || b == 0xc6 || b == 0xc2) {
664e0f19
FB
3114 is_xmm = 1;
3115 } else {
3116 if (b1 == 0) {
3117 /* MMX case */
3118 is_xmm = 0;
3119 } else {
3120 is_xmm = 1;
3121 }
3122 }
3123 /* simple MMX/SSE operation */
3124 if (s->flags & HF_TS_MASK) {
3125 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
3126 return;
3127 }
3128 if (s->flags & HF_EM_MASK) {
3129 illegal_op:
3130 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
3131 return;
3132 }
3133 if (is_xmm && !(s->flags & HF_OSFXSR_MASK))
4242b1bd
AZ
3134 if ((b != 0x38 && b != 0x3a) || (s->prefix & PREFIX_DATA))
3135 goto illegal_op;
e771edab
AJ
3136 if (b == 0x0e) {
3137 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
3138 goto illegal_op;
3139 /* femms */
d3eb5eae 3140 gen_helper_emms(cpu_env);
e771edab
AJ
3141 return;
3142 }
3143 if (b == 0x77) {
3144 /* emms */
d3eb5eae 3145 gen_helper_emms(cpu_env);
664e0f19
FB
3146 return;
3147 }
3148 /* prepare MMX state (XXX: optimize by storing fptt and fptags in
3149 the static cpu state) */
3150 if (!is_xmm) {
d3eb5eae 3151 gen_helper_enter_mmx(cpu_env);
664e0f19
FB
3152 }
3153
0af10c86 3154 modrm = cpu_ldub_code(env, s->pc++);
664e0f19
FB
3155 reg = ((modrm >> 3) & 7);
3156 if (is_xmm)
3157 reg |= rex_r;
3158 mod = (modrm >> 6) & 3;
d3eb5eae 3159 if (sse_fn_epp == SSE_SPECIAL) {
664e0f19
FB
3160 b |= (b1 << 8);
3161 switch(b) {
3162 case 0x0e7: /* movntq */
5fafdf24 3163 if (mod == 3)
664e0f19 3164 goto illegal_op;
4eeb3939 3165 gen_lea_modrm(env, s, modrm);
323d1876 3166 gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
664e0f19
FB
3167 break;
3168 case 0x1e7: /* movntdq */
3169 case 0x02b: /* movntps */
3170 case 0x12b: /* movntps */
2e21e749
T
3171 if (mod == 3)
3172 goto illegal_op;
4eeb3939 3173 gen_lea_modrm(env, s, modrm);
323d1876 3174 gen_sto_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
2e21e749 3175 break;
465e9838
FB
3176 case 0x3f0: /* lddqu */
3177 if (mod == 3)
664e0f19 3178 goto illegal_op;
4eeb3939 3179 gen_lea_modrm(env, s, modrm);
323d1876 3180 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
664e0f19 3181 break;
d9f4bb27
AP
3182 case 0x22b: /* movntss */
3183 case 0x32b: /* movntsd */
3184 if (mod == 3)
3185 goto illegal_op;
4eeb3939 3186 gen_lea_modrm(env, s, modrm);
d9f4bb27 3187 if (b1 & 1) {
323d1876 3188 gen_stq_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
d9f4bb27
AP
3189 } else {
3190 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3191 xmm_regs[reg].XMM_L(0)));
fd8ca9f6 3192 gen_op_st_v(s, MO_32, cpu_T[0], cpu_A0);
d9f4bb27
AP
3193 }
3194 break;
664e0f19 3195 case 0x6e: /* movd mm, ea */
dabd98dd
FB
3196#ifdef TARGET_X86_64
3197 if (s->dflag == 2) {
4ba9938c 3198 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0);
5af45186 3199 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,fpregs[reg].mmx));
5fafdf24 3200 } else
dabd98dd
FB
3201#endif
3202 {
4ba9938c 3203 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0);
5af45186
FB
3204 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3205 offsetof(CPUX86State,fpregs[reg].mmx));
a7812ae4
PB
3206 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3207 gen_helper_movl_mm_T0_mmx(cpu_ptr0, cpu_tmp2_i32);
dabd98dd 3208 }
664e0f19
FB
3209 break;
3210 case 0x16e: /* movd xmm, ea */
dabd98dd
FB
3211#ifdef TARGET_X86_64
3212 if (s->dflag == 2) {
4ba9938c 3213 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0);
5af45186
FB
3214 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3215 offsetof(CPUX86State,xmm_regs[reg]));
a7812ae4 3216 gen_helper_movq_mm_T0_xmm(cpu_ptr0, cpu_T[0]);
5fafdf24 3217 } else
dabd98dd
FB
3218#endif
3219 {
4ba9938c 3220 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0);
5af45186
FB
3221 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3222 offsetof(CPUX86State,xmm_regs[reg]));
b6abf97d 3223 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
a7812ae4 3224 gen_helper_movl_mm_T0_xmm(cpu_ptr0, cpu_tmp2_i32);
dabd98dd 3225 }
664e0f19
FB
3226 break;
3227 case 0x6f: /* movq mm, ea */
3228 if (mod != 3) {
4eeb3939 3229 gen_lea_modrm(env, s, modrm);
323d1876 3230 gen_ldq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
664e0f19
FB
3231 } else {
3232 rm = (modrm & 7);
b6abf97d 3233 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
5af45186 3234 offsetof(CPUX86State,fpregs[rm].mmx));
b6abf97d 3235 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
5af45186 3236 offsetof(CPUX86State,fpregs[reg].mmx));
664e0f19
FB
3237 }
3238 break;
3239 case 0x010: /* movups */
3240 case 0x110: /* movupd */
3241 case 0x028: /* movaps */
3242 case 0x128: /* movapd */
3243 case 0x16f: /* movdqa xmm, ea */
3244 case 0x26f: /* movdqu xmm, ea */
3245 if (mod != 3) {
4eeb3939 3246 gen_lea_modrm(env, s, modrm);
323d1876 3247 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
664e0f19
FB
3248 } else {
3249 rm = (modrm & 7) | REX_B(s);
3250 gen_op_movo(offsetof(CPUX86State,xmm_regs[reg]),
3251 offsetof(CPUX86State,xmm_regs[rm]));
3252 }
3253 break;
3254 case 0x210: /* movss xmm, ea */
3255 if (mod != 3) {
4eeb3939 3256 gen_lea_modrm(env, s, modrm);
909be183 3257 gen_op_ld_v(s, MO_32, cpu_T[0], cpu_A0);
651ba608 3258 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
97212c88 3259 tcg_gen_movi_tl(cpu_T[0], 0);
651ba608
FB
3260 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
3261 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3262 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
664e0f19
FB
3263 } else {
3264 rm = (modrm & 7) | REX_B(s);
3265 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3266 offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
3267 }
3268 break;
3269 case 0x310: /* movsd xmm, ea */
3270 if (mod != 3) {
4eeb3939 3271 gen_lea_modrm(env, s, modrm);
323d1876
RH
3272 gen_ldq_env_A0(s, offsetof(CPUX86State,
3273 xmm_regs[reg].XMM_Q(0)));
97212c88 3274 tcg_gen_movi_tl(cpu_T[0], 0);
651ba608
FB
3275 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3276 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
664e0f19
FB
3277 } else {
3278 rm = (modrm & 7) | REX_B(s);
3279 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3280 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3281 }
3282 break;
3283 case 0x012: /* movlps */
3284 case 0x112: /* movlpd */
3285 if (mod != 3) {
4eeb3939 3286 gen_lea_modrm(env, s, modrm);
323d1876
RH
3287 gen_ldq_env_A0(s, offsetof(CPUX86State,
3288 xmm_regs[reg].XMM_Q(0)));
664e0f19
FB
3289 } else {
3290 /* movhlps */
3291 rm = (modrm & 7) | REX_B(s);
3292 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3293 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
3294 }
3295 break;
465e9838
FB
3296 case 0x212: /* movsldup */
3297 if (mod != 3) {
4eeb3939 3298 gen_lea_modrm(env, s, modrm);
323d1876 3299 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
465e9838
FB
3300 } else {
3301 rm = (modrm & 7) | REX_B(s);
3302 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3303 offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
3304 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
3305 offsetof(CPUX86State,xmm_regs[rm].XMM_L(2)));
3306 }
3307 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
3308 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3309 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
3310 offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3311 break;
3312 case 0x312: /* movddup */
3313 if (mod != 3) {
4eeb3939 3314 gen_lea_modrm(env, s, modrm);
323d1876
RH
3315 gen_ldq_env_A0(s, offsetof(CPUX86State,
3316 xmm_regs[reg].XMM_Q(0)));
465e9838
FB
3317 } else {
3318 rm = (modrm & 7) | REX_B(s);
3319 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3320 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3321 }
3322 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
ba6526df 3323 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
465e9838 3324 break;
664e0f19
FB
3325 case 0x016: /* movhps */
3326 case 0x116: /* movhpd */
3327 if (mod != 3) {
4eeb3939 3328 gen_lea_modrm(env, s, modrm);
323d1876
RH
3329 gen_ldq_env_A0(s, offsetof(CPUX86State,
3330 xmm_regs[reg].XMM_Q(1)));
664e0f19
FB
3331 } else {
3332 /* movlhps */
3333 rm = (modrm & 7) | REX_B(s);
3334 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
3335 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3336 }
3337 break;
3338 case 0x216: /* movshdup */
3339 if (mod != 3) {
4eeb3939 3340 gen_lea_modrm(env, s, modrm);
323d1876 3341 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
664e0f19
FB
3342 } else {
3343 rm = (modrm & 7) | REX_B(s);
3344 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
3345 offsetof(CPUX86State,xmm_regs[rm].XMM_L(1)));
3346 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
3347 offsetof(CPUX86State,xmm_regs[rm].XMM_L(3)));
3348 }
3349 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3350 offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
3351 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
3352 offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3353 break;
d9f4bb27
AP
3354 case 0x178:
3355 case 0x378:
3356 {
3357 int bit_index, field_length;
3358
3359 if (b1 == 1 && reg != 0)
3360 goto illegal_op;
0af10c86
BS
3361 field_length = cpu_ldub_code(env, s->pc++) & 0x3F;
3362 bit_index = cpu_ldub_code(env, s->pc++) & 0x3F;
d9f4bb27
AP
3363 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3364 offsetof(CPUX86State,xmm_regs[reg]));
3365 if (b1 == 1)
d3eb5eae
BS
3366 gen_helper_extrq_i(cpu_env, cpu_ptr0,
3367 tcg_const_i32(bit_index),
3368 tcg_const_i32(field_length));
d9f4bb27 3369 else
d3eb5eae
BS
3370 gen_helper_insertq_i(cpu_env, cpu_ptr0,
3371 tcg_const_i32(bit_index),
3372 tcg_const_i32(field_length));
d9f4bb27
AP
3373 }
3374 break;
664e0f19 3375 case 0x7e: /* movd ea, mm */
dabd98dd
FB
3376#ifdef TARGET_X86_64
3377 if (s->dflag == 2) {
5af45186
FB
3378 tcg_gen_ld_i64(cpu_T[0], cpu_env,
3379 offsetof(CPUX86State,fpregs[reg].mmx));
4ba9938c 3380 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1);
5fafdf24 3381 } else
dabd98dd
FB
3382#endif
3383 {
5af45186
FB
3384 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
3385 offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0)));
4ba9938c 3386 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1);
dabd98dd 3387 }
664e0f19
FB
3388 break;
3389 case 0x17e: /* movd ea, xmm */
dabd98dd
FB
3390#ifdef TARGET_X86_64
3391 if (s->dflag == 2) {
5af45186
FB
3392 tcg_gen_ld_i64(cpu_T[0], cpu_env,
3393 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
4ba9938c 3394 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1);
5fafdf24 3395 } else
dabd98dd
FB
3396#endif
3397 {
5af45186
FB
3398 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
3399 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
4ba9938c 3400 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1);
dabd98dd 3401 }
664e0f19
FB
3402 break;
3403 case 0x27e: /* movq xmm, ea */
3404 if (mod != 3) {
4eeb3939 3405 gen_lea_modrm(env, s, modrm);
323d1876
RH
3406 gen_ldq_env_A0(s, offsetof(CPUX86State,
3407 xmm_regs[reg].XMM_Q(0)));
664e0f19
FB
3408 } else {
3409 rm = (modrm & 7) | REX_B(s);
3410 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3411 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3412 }
3413 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3414 break;
3415 case 0x7f: /* movq ea, mm */
3416 if (mod != 3) {
4eeb3939 3417 gen_lea_modrm(env, s, modrm);
323d1876 3418 gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
664e0f19
FB
3419 } else {
3420 rm = (modrm & 7);
3421 gen_op_movq(offsetof(CPUX86State,fpregs[rm].mmx),
3422 offsetof(CPUX86State,fpregs[reg].mmx));
3423 }
3424 break;
3425 case 0x011: /* movups */
3426 case 0x111: /* movupd */
3427 case 0x029: /* movaps */
3428 case 0x129: /* movapd */
3429 case 0x17f: /* movdqa ea, xmm */
3430 case 0x27f: /* movdqu ea, xmm */
3431 if (mod != 3) {
4eeb3939 3432 gen_lea_modrm(env, s, modrm);
323d1876 3433 gen_sto_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
664e0f19
FB
3434 } else {
3435 rm = (modrm & 7) | REX_B(s);
3436 gen_op_movo(offsetof(CPUX86State,xmm_regs[rm]),
3437 offsetof(CPUX86State,xmm_regs[reg]));
3438 }
3439 break;
3440 case 0x211: /* movss ea, xmm */
3441 if (mod != 3) {
4eeb3939 3442 gen_lea_modrm(env, s, modrm);
651ba608 3443 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
fd8ca9f6 3444 gen_op_st_v(s, MO_32, cpu_T[0], cpu_A0);
664e0f19
FB
3445 } else {
3446 rm = (modrm & 7) | REX_B(s);
3447 gen_op_movl(offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)),
3448 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3449 }
3450 break;
3451 case 0x311: /* movsd ea, xmm */
3452 if (mod != 3) {
4eeb3939 3453 gen_lea_modrm(env, s, modrm);
323d1876
RH
3454 gen_stq_env_A0(s, offsetof(CPUX86State,
3455 xmm_regs[reg].XMM_Q(0)));
664e0f19
FB
3456 } else {
3457 rm = (modrm & 7) | REX_B(s);
3458 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
3459 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3460 }
3461 break;
3462 case 0x013: /* movlps */
3463 case 0x113: /* movlpd */
3464 if (mod != 3) {
4eeb3939 3465 gen_lea_modrm(env, s, modrm);
323d1876
RH
3466 gen_stq_env_A0(s, offsetof(CPUX86State,
3467 xmm_regs[reg].XMM_Q(0)));
664e0f19
FB
3468 } else {
3469 goto illegal_op;
3470 }
3471 break;
3472 case 0x017: /* movhps */
3473 case 0x117: /* movhpd */
3474 if (mod != 3) {
4eeb3939 3475 gen_lea_modrm(env, s, modrm);
323d1876
RH
3476 gen_stq_env_A0(s, offsetof(CPUX86State,
3477 xmm_regs[reg].XMM_Q(1)));
664e0f19
FB
3478 } else {
3479 goto illegal_op;
3480 }
3481 break;
3482 case 0x71: /* shift mm, im */
3483 case 0x72:
3484 case 0x73:
3485 case 0x171: /* shift xmm, im */
3486 case 0x172:
3487 case 0x173:
c045af25
AK
3488 if (b1 >= 2) {
3489 goto illegal_op;
3490 }
0af10c86 3491 val = cpu_ldub_code(env, s->pc++);
664e0f19 3492 if (is_xmm) {
1b90d56e 3493 tcg_gen_movi_tl(cpu_T[0], val);
651ba608 3494 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
97212c88 3495 tcg_gen_movi_tl(cpu_T[0], 0);
651ba608 3496 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(1)));
664e0f19
FB
3497 op1_offset = offsetof(CPUX86State,xmm_t0);
3498 } else {
1b90d56e 3499 tcg_gen_movi_tl(cpu_T[0], val);
651ba608 3500 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(0)));
97212c88 3501 tcg_gen_movi_tl(cpu_T[0], 0);
651ba608 3502 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(1)));
664e0f19
FB
3503 op1_offset = offsetof(CPUX86State,mmx_t0);
3504 }
d3eb5eae
BS
3505 sse_fn_epp = sse_op_table2[((b - 1) & 3) * 8 +
3506 (((modrm >> 3)) & 7)][b1];
3507 if (!sse_fn_epp) {
664e0f19 3508 goto illegal_op;
c4baa050 3509 }
664e0f19
FB
3510 if (is_xmm) {
3511 rm = (modrm & 7) | REX_B(s);
3512 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3513 } else {
3514 rm = (modrm & 7);
3515 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3516 }
5af45186
FB
3517 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3518 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op1_offset);
d3eb5eae 3519 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
3520 break;
3521 case 0x050: /* movmskps */
664e0f19 3522 rm = (modrm & 7) | REX_B(s);
5af45186
FB
3523 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3524 offsetof(CPUX86State,xmm_regs[rm]));
d3eb5eae 3525 gen_helper_movmskps(cpu_tmp2_i32, cpu_env, cpu_ptr0);
a7fbcbe5 3526 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
664e0f19
FB
3527 break;
3528 case 0x150: /* movmskpd */
664e0f19 3529 rm = (modrm & 7) | REX_B(s);
5af45186
FB
3530 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3531 offsetof(CPUX86State,xmm_regs[rm]));
d3eb5eae 3532 gen_helper_movmskpd(cpu_tmp2_i32, cpu_env, cpu_ptr0);
a7fbcbe5 3533 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
664e0f19
FB
3534 break;
3535 case 0x02a: /* cvtpi2ps */
3536 case 0x12a: /* cvtpi2pd */
d3eb5eae 3537 gen_helper_enter_mmx(cpu_env);
664e0f19 3538 if (mod != 3) {
4eeb3939 3539 gen_lea_modrm(env, s, modrm);
664e0f19 3540 op2_offset = offsetof(CPUX86State,mmx_t0);
323d1876 3541 gen_ldq_env_A0(s, op2_offset);
664e0f19
FB
3542 } else {
3543 rm = (modrm & 7);
3544 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3545 }
3546 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
5af45186
FB
3547 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3548 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
664e0f19
FB
3549 switch(b >> 8) {
3550 case 0x0:
d3eb5eae 3551 gen_helper_cvtpi2ps(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
3552 break;
3553 default:
3554 case 0x1:
d3eb5eae 3555 gen_helper_cvtpi2pd(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
3556 break;
3557 }
3558 break;
3559 case 0x22a: /* cvtsi2ss */
3560 case 0x32a: /* cvtsi2sd */
4ba9938c 3561 ot = (s->dflag == 2) ? MO_64 : MO_32;
0af10c86 3562 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
664e0f19 3563 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
5af45186 3564 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4ba9938c 3565 if (ot == MO_32) {
d3eb5eae 3566 SSEFunc_0_epi sse_fn_epi = sse_op_table3ai[(b >> 8) & 1];
28e10711 3567 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
d3eb5eae 3568 sse_fn_epi(cpu_env, cpu_ptr0, cpu_tmp2_i32);
28e10711 3569 } else {
11f8cdbc 3570#ifdef TARGET_X86_64
d3eb5eae
BS
3571 SSEFunc_0_epl sse_fn_epl = sse_op_table3aq[(b >> 8) & 1];
3572 sse_fn_epl(cpu_env, cpu_ptr0, cpu_T[0]);
11f8cdbc
SW
3573#else
3574 goto illegal_op;
3575#endif
28e10711 3576 }
664e0f19
FB
3577 break;
3578 case 0x02c: /* cvttps2pi */
3579 case 0x12c: /* cvttpd2pi */
3580 case 0x02d: /* cvtps2pi */
3581 case 0x12d: /* cvtpd2pi */
d3eb5eae 3582 gen_helper_enter_mmx(cpu_env);
664e0f19 3583 if (mod != 3) {
4eeb3939 3584 gen_lea_modrm(env, s, modrm);
664e0f19 3585 op2_offset = offsetof(CPUX86State,xmm_t0);
323d1876 3586 gen_ldo_env_A0(s, op2_offset);
664e0f19
FB
3587 } else {
3588 rm = (modrm & 7) | REX_B(s);
3589 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3590 }
3591 op1_offset = offsetof(CPUX86State,fpregs[reg & 7].mmx);
5af45186
FB
3592 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3593 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
664e0f19
FB
3594 switch(b) {
3595 case 0x02c:
d3eb5eae 3596 gen_helper_cvttps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
3597 break;
3598 case 0x12c:
d3eb5eae 3599 gen_helper_cvttpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
3600 break;
3601 case 0x02d:
d3eb5eae 3602 gen_helper_cvtps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
3603 break;
3604 case 0x12d:
d3eb5eae 3605 gen_helper_cvtpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
3606 break;
3607 }
3608 break;
3609 case 0x22c: /* cvttss2si */
3610 case 0x32c: /* cvttsd2si */
3611 case 0x22d: /* cvtss2si */
3612 case 0x32d: /* cvtsd2si */
4ba9938c 3613 ot = (s->dflag == 2) ? MO_64 : MO_32;
31313213 3614 if (mod != 3) {
4eeb3939 3615 gen_lea_modrm(env, s, modrm);
31313213 3616 if ((b >> 8) & 1) {
323d1876 3617 gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.XMM_Q(0)));
31313213 3618 } else {
909be183 3619 gen_op_ld_v(s, MO_32, cpu_T[0], cpu_A0);
651ba608 3620 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
31313213
FB
3621 }
3622 op2_offset = offsetof(CPUX86State,xmm_t0);
3623 } else {
3624 rm = (modrm & 7) | REX_B(s);
3625 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3626 }
5af45186 3627 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
4ba9938c 3628 if (ot == MO_32) {
d3eb5eae 3629 SSEFunc_i_ep sse_fn_i_ep =
bedc2ac1 3630 sse_op_table3bi[((b >> 7) & 2) | (b & 1)];
d3eb5eae 3631 sse_fn_i_ep(cpu_tmp2_i32, cpu_env, cpu_ptr0);
b6abf97d 3632 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5af45186 3633 } else {
11f8cdbc 3634#ifdef TARGET_X86_64
d3eb5eae 3635 SSEFunc_l_ep sse_fn_l_ep =
bedc2ac1 3636 sse_op_table3bq[((b >> 7) & 2) | (b & 1)];
d3eb5eae 3637 sse_fn_l_ep(cpu_T[0], cpu_env, cpu_ptr0);
11f8cdbc
SW
3638#else
3639 goto illegal_op;
3640#endif
5af45186 3641 }
57fec1fe 3642 gen_op_mov_reg_T0(ot, reg);
664e0f19
FB
3643 break;
3644 case 0xc4: /* pinsrw */
5fafdf24 3645 case 0x1c4:
d1e42c5c 3646 s->rip_offset = 1;
4ba9938c 3647 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
0af10c86 3648 val = cpu_ldub_code(env, s->pc++);
664e0f19
FB
3649 if (b1) {
3650 val &= 7;
5af45186
FB
3651 tcg_gen_st16_tl(cpu_T[0], cpu_env,
3652 offsetof(CPUX86State,xmm_regs[reg].XMM_W(val)));
664e0f19
FB
3653 } else {
3654 val &= 3;
5af45186
FB
3655 tcg_gen_st16_tl(cpu_T[0], cpu_env,
3656 offsetof(CPUX86State,fpregs[reg].mmx.MMX_W(val)));
664e0f19
FB
3657 }
3658 break;
3659 case 0xc5: /* pextrw */
5fafdf24 3660 case 0x1c5:
664e0f19
FB
3661 if (mod != 3)
3662 goto illegal_op;
4ba9938c 3663 ot = (s->dflag == 2) ? MO_64 : MO_32;
0af10c86 3664 val = cpu_ldub_code(env, s->pc++);
664e0f19
FB
3665 if (b1) {
3666 val &= 7;
3667 rm = (modrm & 7) | REX_B(s);
5af45186
FB
3668 tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3669 offsetof(CPUX86State,xmm_regs[rm].XMM_W(val)));
664e0f19
FB
3670 } else {
3671 val &= 3;
3672 rm = (modrm & 7);
5af45186
FB
3673 tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3674 offsetof(CPUX86State,fpregs[rm].mmx.MMX_W(val)));
664e0f19
FB
3675 }
3676 reg = ((modrm >> 3) & 7) | rex_r;
6dc2d0da 3677 gen_op_mov_reg_T0(ot, reg);
664e0f19
FB
3678 break;
3679 case 0x1d6: /* movq ea, xmm */
3680 if (mod != 3) {
4eeb3939 3681 gen_lea_modrm(env, s, modrm);
323d1876
RH
3682 gen_stq_env_A0(s, offsetof(CPUX86State,
3683 xmm_regs[reg].XMM_Q(0)));
664e0f19
FB
3684 } else {
3685 rm = (modrm & 7) | REX_B(s);
3686 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
3687 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3688 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
3689 }
3690 break;
3691 case 0x2d6: /* movq2dq */
d3eb5eae 3692 gen_helper_enter_mmx(cpu_env);
480c1cdb
FB
3693 rm = (modrm & 7);
3694 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3695 offsetof(CPUX86State,fpregs[rm].mmx));
3696 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
664e0f19
FB
3697 break;
3698 case 0x3d6: /* movdq2q */
d3eb5eae 3699 gen_helper_enter_mmx(cpu_env);
480c1cdb
FB
3700 rm = (modrm & 7) | REX_B(s);
3701 gen_op_movq(offsetof(CPUX86State,fpregs[reg & 7].mmx),
3702 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
664e0f19
FB
3703 break;
3704 case 0xd7: /* pmovmskb */
3705 case 0x1d7:
3706 if (mod != 3)
3707 goto illegal_op;
3708 if (b1) {
3709 rm = (modrm & 7) | REX_B(s);
5af45186 3710 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[rm]));
d3eb5eae 3711 gen_helper_pmovmskb_xmm(cpu_tmp2_i32, cpu_env, cpu_ptr0);
664e0f19
FB
3712 } else {
3713 rm = (modrm & 7);
5af45186 3714 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,fpregs[rm].mmx));
d3eb5eae 3715 gen_helper_pmovmskb_mmx(cpu_tmp2_i32, cpu_env, cpu_ptr0);
664e0f19
FB
3716 }
3717 reg = ((modrm >> 3) & 7) | rex_r;
a7fbcbe5 3718 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
664e0f19 3719 break;
111994ee 3720
4242b1bd 3721 case 0x138:
000cacf6 3722 case 0x038:
4242b1bd 3723 b = modrm;
111994ee
RH
3724 if ((b & 0xf0) == 0xf0) {
3725 goto do_0f_38_fx;
3726 }
0af10c86 3727 modrm = cpu_ldub_code(env, s->pc++);
4242b1bd
AZ
3728 rm = modrm & 7;
3729 reg = ((modrm >> 3) & 7) | rex_r;
3730 mod = (modrm >> 6) & 3;
c045af25
AK
3731 if (b1 >= 2) {
3732 goto illegal_op;
3733 }
4242b1bd 3734
d3eb5eae
BS
3735 sse_fn_epp = sse_op_table6[b].op[b1];
3736 if (!sse_fn_epp) {
4242b1bd 3737 goto illegal_op;
c4baa050 3738 }
222a3336
AZ
3739 if (!(s->cpuid_ext_features & sse_op_table6[b].ext_mask))
3740 goto illegal_op;
4242b1bd
AZ
3741
3742 if (b1) {
3743 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3744 if (mod == 3) {
3745 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
3746 } else {
3747 op2_offset = offsetof(CPUX86State,xmm_t0);
4eeb3939 3748 gen_lea_modrm(env, s, modrm);
222a3336
AZ
3749 switch (b) {
3750 case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
3751 case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
3752 case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
323d1876 3753 gen_ldq_env_A0(s, op2_offset +
222a3336
AZ
3754 offsetof(XMMReg, XMM_Q(0)));
3755 break;
3756 case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
3757 case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
3c5f4116
RH
3758 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
3759 s->mem_index, MO_LEUL);
222a3336
AZ
3760 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, op2_offset +
3761 offsetof(XMMReg, XMM_L(0)));
3762 break;
3763 case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
3c5f4116
RH
3764 tcg_gen_qemu_ld_tl(cpu_tmp0, cpu_A0,
3765 s->mem_index, MO_LEUW);
222a3336
AZ
3766 tcg_gen_st16_tl(cpu_tmp0, cpu_env, op2_offset +
3767 offsetof(XMMReg, XMM_W(0)));
3768 break;
3769 case 0x2a: /* movntqda */
323d1876 3770 gen_ldo_env_A0(s, op1_offset);
222a3336
AZ
3771 return;
3772 default:
323d1876 3773 gen_ldo_env_A0(s, op2_offset);
222a3336 3774 }
4242b1bd
AZ
3775 }
3776 } else {
3777 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
3778 if (mod == 3) {
3779 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3780 } else {
3781 op2_offset = offsetof(CPUX86State,mmx_t0);
4eeb3939 3782 gen_lea_modrm(env, s, modrm);
323d1876 3783 gen_ldq_env_A0(s, op2_offset);
4242b1bd
AZ
3784 }
3785 }
d3eb5eae 3786 if (sse_fn_epp == SSE_SPECIAL) {
222a3336 3787 goto illegal_op;
c4baa050 3788 }
222a3336 3789
4242b1bd
AZ
3790 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3791 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
d3eb5eae 3792 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
222a3336 3793
3ca51d07
RH
3794 if (b == 0x17) {
3795 set_cc_op(s, CC_OP_EFLAGS);
3796 }
4242b1bd 3797 break;
111994ee
RH
3798
3799 case 0x238:
3800 case 0x338:
3801 do_0f_38_fx:
3802 /* Various integer extensions at 0f 38 f[0-f]. */
3803 b = modrm | (b1 << 8);
0af10c86 3804 modrm = cpu_ldub_code(env, s->pc++);
222a3336
AZ
3805 reg = ((modrm >> 3) & 7) | rex_r;
3806
111994ee
RH
3807 switch (b) {
3808 case 0x3f0: /* crc32 Gd,Eb */
3809 case 0x3f1: /* crc32 Gd,Ey */
3810 do_crc32:
3811 if (!(s->cpuid_ext_features & CPUID_EXT_SSE42)) {
3812 goto illegal_op;
3813 }
3814 if ((b & 0xff) == 0xf0) {
4ba9938c 3815 ot = MO_8;
111994ee 3816 } else if (s->dflag != 2) {
4ba9938c 3817 ot = (s->prefix & PREFIX_DATA ? MO_16 : MO_32);
111994ee 3818 } else {
4ba9938c 3819 ot = MO_64;
111994ee 3820 }
4242b1bd 3821
24b9c00f 3822 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[reg]);
111994ee
RH
3823 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3824 gen_helper_crc32(cpu_T[0], cpu_tmp2_i32,
3825 cpu_T[0], tcg_const_i32(8 << ot));
222a3336 3826
4ba9938c 3827 ot = (s->dflag == 2) ? MO_64 : MO_32;
111994ee
RH
3828 gen_op_mov_reg_T0(ot, reg);
3829 break;
222a3336 3830
111994ee
RH
3831 case 0x1f0: /* crc32 or movbe */
3832 case 0x1f1:
3833 /* For these insns, the f3 prefix is supposed to have priority
3834 over the 66 prefix, but that's not what we implement above
3835 setting b1. */
3836 if (s->prefix & PREFIX_REPNZ) {
3837 goto do_crc32;
3838 }
3839 /* FALLTHRU */
3840 case 0x0f0: /* movbe Gy,My */
3841 case 0x0f1: /* movbe My,Gy */
3842 if (!(s->cpuid_ext_features & CPUID_EXT_MOVBE)) {
3843 goto illegal_op;
3844 }
3845 if (s->dflag != 2) {
4ba9938c 3846 ot = (s->prefix & PREFIX_DATA ? MO_16 : MO_32);
111994ee 3847 } else {
4ba9938c 3848 ot = MO_64;
111994ee
RH
3849 }
3850
3655a19f 3851 gen_lea_modrm(env, s, modrm);
111994ee 3852 if ((b & 1) == 0) {
3655a19f
RH
3853 tcg_gen_qemu_ld_tl(cpu_T[0], cpu_A0,
3854 s->mem_index, ot | MO_BE);
111994ee
RH
3855 gen_op_mov_reg_T0(ot, reg);
3856 } else {
3655a19f
RH
3857 tcg_gen_qemu_st_tl(cpu_regs[reg], cpu_A0,
3858 s->mem_index, ot | MO_BE);
111994ee
RH
3859 }
3860 break;
3861
7073fbad
RH
3862 case 0x0f2: /* andn Gy, By, Ey */
3863 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
3864 || !(s->prefix & PREFIX_VEX)
3865 || s->vex_l != 0) {
3866 goto illegal_op;
3867 }
4ba9938c 3868 ot = s->dflag == 2 ? MO_64 : MO_32;
7073fbad
RH
3869 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3870 tcg_gen_andc_tl(cpu_T[0], cpu_regs[s->vex_v], cpu_T[0]);
3871 gen_op_mov_reg_T0(ot, reg);
3872 gen_op_update1_cc();
3873 set_cc_op(s, CC_OP_LOGICB + ot);
3874 break;
3875
c7ab7565
RH
3876 case 0x0f7: /* bextr Gy, Ey, By */
3877 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
3878 || !(s->prefix & PREFIX_VEX)
3879 || s->vex_l != 0) {
3880 goto illegal_op;
3881 }
4ba9938c 3882 ot = s->dflag == 2 ? MO_64 : MO_32;
c7ab7565
RH
3883 {
3884 TCGv bound, zero;
3885
3886 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3887 /* Extract START, and shift the operand.
3888 Shifts larger than operand size get zeros. */
3889 tcg_gen_ext8u_tl(cpu_A0, cpu_regs[s->vex_v]);
3890 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_A0);
3891
4ba9938c 3892 bound = tcg_const_tl(ot == MO_64 ? 63 : 31);
c7ab7565
RH
3893 zero = tcg_const_tl(0);
3894 tcg_gen_movcond_tl(TCG_COND_LEU, cpu_T[0], cpu_A0, bound,
3895 cpu_T[0], zero);
3896 tcg_temp_free(zero);
3897
3898 /* Extract the LEN into a mask. Lengths larger than
3899 operand size get all ones. */
3900 tcg_gen_shri_tl(cpu_A0, cpu_regs[s->vex_v], 8);
3901 tcg_gen_ext8u_tl(cpu_A0, cpu_A0);
3902 tcg_gen_movcond_tl(TCG_COND_LEU, cpu_A0, cpu_A0, bound,
3903 cpu_A0, bound);
3904 tcg_temp_free(bound);
3905 tcg_gen_movi_tl(cpu_T[1], 1);
3906 tcg_gen_shl_tl(cpu_T[1], cpu_T[1], cpu_A0);
3907 tcg_gen_subi_tl(cpu_T[1], cpu_T[1], 1);
3908 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
3909
3910 gen_op_mov_reg_T0(ot, reg);
3911 gen_op_update1_cc();
3912 set_cc_op(s, CC_OP_LOGICB + ot);
3913 }
3914 break;
3915
02ea1e6b
RH
3916 case 0x0f5: /* bzhi Gy, Ey, By */
3917 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3918 || !(s->prefix & PREFIX_VEX)
3919 || s->vex_l != 0) {
3920 goto illegal_op;
3921 }
4ba9938c 3922 ot = s->dflag == 2 ? MO_64 : MO_32;
02ea1e6b
RH
3923 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3924 tcg_gen_ext8u_tl(cpu_T[1], cpu_regs[s->vex_v]);
3925 {
4ba9938c 3926 TCGv bound = tcg_const_tl(ot == MO_64 ? 63 : 31);
02ea1e6b
RH
3927 /* Note that since we're using BMILG (in order to get O
3928 cleared) we need to store the inverse into C. */
3929 tcg_gen_setcond_tl(TCG_COND_LT, cpu_cc_src,
3930 cpu_T[1], bound);
3931 tcg_gen_movcond_tl(TCG_COND_GT, cpu_T[1], cpu_T[1],
3932 bound, bound, cpu_T[1]);
3933 tcg_temp_free(bound);
3934 }
3935 tcg_gen_movi_tl(cpu_A0, -1);
3936 tcg_gen_shl_tl(cpu_A0, cpu_A0, cpu_T[1]);
3937 tcg_gen_andc_tl(cpu_T[0], cpu_T[0], cpu_A0);
3938 gen_op_mov_reg_T0(ot, reg);
3939 gen_op_update1_cc();
3940 set_cc_op(s, CC_OP_BMILGB + ot);
3941 break;
3942
5f1f4b17
RH
3943 case 0x3f6: /* mulx By, Gy, rdx, Ey */
3944 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3945 || !(s->prefix & PREFIX_VEX)
3946 || s->vex_l != 0) {
3947 goto illegal_op;
3948 }
4ba9938c 3949 ot = s->dflag == 2 ? MO_64 : MO_32;
5f1f4b17
RH
3950 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3951 switch (ot) {
5f1f4b17 3952 default:
a4bcea3d
RH
3953 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3954 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EDX]);
3955 tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
3956 cpu_tmp2_i32, cpu_tmp3_i32);
3957 tcg_gen_extu_i32_tl(cpu_regs[s->vex_v], cpu_tmp2_i32);
3958 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp3_i32);
5f1f4b17
RH
3959 break;
3960#ifdef TARGET_X86_64
4ba9938c 3961 case MO_64:
a4bcea3d
RH
3962 tcg_gen_mulu2_i64(cpu_regs[s->vex_v], cpu_regs[reg],
3963 cpu_T[0], cpu_regs[R_EDX]);
5f1f4b17
RH
3964 break;
3965#endif
3966 }
3967 break;
3968
0592f74a
RH
3969 case 0x3f5: /* pdep Gy, By, Ey */
3970 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3971 || !(s->prefix & PREFIX_VEX)
3972 || s->vex_l != 0) {
3973 goto illegal_op;
3974 }
4ba9938c 3975 ot = s->dflag == 2 ? MO_64 : MO_32;
0592f74a
RH
3976 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3977 /* Note that by zero-extending the mask operand, we
3978 automatically handle zero-extending the result. */
3979 if (s->dflag == 2) {
3980 tcg_gen_mov_tl(cpu_T[1], cpu_regs[s->vex_v]);
3981 } else {
3982 tcg_gen_ext32u_tl(cpu_T[1], cpu_regs[s->vex_v]);
3983 }
3984 gen_helper_pdep(cpu_regs[reg], cpu_T[0], cpu_T[1]);
3985 break;
3986
3987 case 0x2f5: /* pext Gy, By, Ey */
3988 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3989 || !(s->prefix & PREFIX_VEX)
3990 || s->vex_l != 0) {
3991 goto illegal_op;
3992 }
4ba9938c 3993 ot = s->dflag == 2 ? MO_64 : MO_32;
0592f74a
RH
3994 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3995 /* Note that by zero-extending the mask operand, we
3996 automatically handle zero-extending the result. */
3997 if (s->dflag == 2) {
3998 tcg_gen_mov_tl(cpu_T[1], cpu_regs[s->vex_v]);
3999 } else {
4000 tcg_gen_ext32u_tl(cpu_T[1], cpu_regs[s->vex_v]);
4001 }
4002 gen_helper_pext(cpu_regs[reg], cpu_T[0], cpu_T[1]);
4003 break;
4004
cd7f97ca
RH
4005 case 0x1f6: /* adcx Gy, Ey */
4006 case 0x2f6: /* adox Gy, Ey */
4007 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_ADX)) {
4008 goto illegal_op;
4009 } else {
76f13133 4010 TCGv carry_in, carry_out, zero;
cd7f97ca
RH
4011 int end_op;
4012
4ba9938c 4013 ot = (s->dflag == 2 ? MO_64 : MO_32);
cd7f97ca
RH
4014 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4015
4016 /* Re-use the carry-out from a previous round. */
4017 TCGV_UNUSED(carry_in);
4018 carry_out = (b == 0x1f6 ? cpu_cc_dst : cpu_cc_src2);
4019 switch (s->cc_op) {
4020 case CC_OP_ADCX:
4021 if (b == 0x1f6) {
4022 carry_in = cpu_cc_dst;
4023 end_op = CC_OP_ADCX;
4024 } else {
4025 end_op = CC_OP_ADCOX;
4026 }
4027 break;
4028 case CC_OP_ADOX:
4029 if (b == 0x1f6) {
4030 end_op = CC_OP_ADCOX;
4031 } else {
4032 carry_in = cpu_cc_src2;
4033 end_op = CC_OP_ADOX;
4034 }
4035 break;
4036 case CC_OP_ADCOX:
4037 end_op = CC_OP_ADCOX;
4038 carry_in = carry_out;
4039 break;
4040 default:
c53de1a2 4041 end_op = (b == 0x1f6 ? CC_OP_ADCX : CC_OP_ADOX);
cd7f97ca
RH
4042 break;
4043 }
4044 /* If we can't reuse carry-out, get it out of EFLAGS. */
4045 if (TCGV_IS_UNUSED(carry_in)) {
4046 if (s->cc_op != CC_OP_ADCX && s->cc_op != CC_OP_ADOX) {
4047 gen_compute_eflags(s);
4048 }
4049 carry_in = cpu_tmp0;
4050 tcg_gen_shri_tl(carry_in, cpu_cc_src,
4051 ctz32(b == 0x1f6 ? CC_C : CC_O));
4052 tcg_gen_andi_tl(carry_in, carry_in, 1);
4053 }
4054
4055 switch (ot) {
4056#ifdef TARGET_X86_64
4ba9938c 4057 case MO_32:
cd7f97ca
RH
4058 /* If we know TL is 64-bit, and we want a 32-bit
4059 result, just do everything in 64-bit arithmetic. */
4060 tcg_gen_ext32u_i64(cpu_regs[reg], cpu_regs[reg]);
4061 tcg_gen_ext32u_i64(cpu_T[0], cpu_T[0]);
4062 tcg_gen_add_i64(cpu_T[0], cpu_T[0], cpu_regs[reg]);
4063 tcg_gen_add_i64(cpu_T[0], cpu_T[0], carry_in);
4064 tcg_gen_ext32u_i64(cpu_regs[reg], cpu_T[0]);
4065 tcg_gen_shri_i64(carry_out, cpu_T[0], 32);
4066 break;
4067#endif
4068 default:
4069 /* Otherwise compute the carry-out in two steps. */
76f13133
RH
4070 zero = tcg_const_tl(0);
4071 tcg_gen_add2_tl(cpu_T[0], carry_out,
4072 cpu_T[0], zero,
4073 carry_in, zero);
4074 tcg_gen_add2_tl(cpu_regs[reg], carry_out,
4075 cpu_regs[reg], carry_out,
4076 cpu_T[0], zero);
4077 tcg_temp_free(zero);
cd7f97ca
RH
4078 break;
4079 }
cd7f97ca
RH
4080 set_cc_op(s, end_op);
4081 }
4082 break;
4083
4a554890
RH
4084 case 0x1f7: /* shlx Gy, Ey, By */
4085 case 0x2f7: /* sarx Gy, Ey, By */
4086 case 0x3f7: /* shrx Gy, Ey, By */
4087 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
4088 || !(s->prefix & PREFIX_VEX)
4089 || s->vex_l != 0) {
4090 goto illegal_op;
4091 }
4ba9938c 4092 ot = (s->dflag == 2 ? MO_64 : MO_32);
4a554890 4093 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4ba9938c 4094 if (ot == MO_64) {
4a554890
RH
4095 tcg_gen_andi_tl(cpu_T[1], cpu_regs[s->vex_v], 63);
4096 } else {
4097 tcg_gen_andi_tl(cpu_T[1], cpu_regs[s->vex_v], 31);
4098 }
4099 if (b == 0x1f7) {
4100 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4101 } else if (b == 0x2f7) {
4ba9938c 4102 if (ot != MO_64) {
4a554890
RH
4103 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4104 }
4105 tcg_gen_sar_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4106 } else {
4ba9938c 4107 if (ot != MO_64) {
4a554890
RH
4108 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
4109 }
4110 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4111 }
4112 gen_op_mov_reg_T0(ot, reg);
4113 break;
4114
bc4b43dc
RH
4115 case 0x0f3:
4116 case 0x1f3:
4117 case 0x2f3:
4118 case 0x3f3: /* Group 17 */
4119 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
4120 || !(s->prefix & PREFIX_VEX)
4121 || s->vex_l != 0) {
4122 goto illegal_op;
4123 }
4ba9938c 4124 ot = s->dflag == 2 ? MO_64 : MO_32;
bc4b43dc
RH
4125 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4126
4127 switch (reg & 7) {
4128 case 1: /* blsr By,Ey */
4129 tcg_gen_neg_tl(cpu_T[1], cpu_T[0]);
4130 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4131 gen_op_mov_reg_T0(ot, s->vex_v);
4132 gen_op_update2_cc();
4133 set_cc_op(s, CC_OP_BMILGB + ot);
4134 break;
4135
4136 case 2: /* blsmsk By,Ey */
4137 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4138 tcg_gen_subi_tl(cpu_T[0], cpu_T[0], 1);
4139 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_cc_src);
4140 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4141 set_cc_op(s, CC_OP_BMILGB + ot);
4142 break;
4143
4144 case 3: /* blsi By, Ey */
4145 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4146 tcg_gen_subi_tl(cpu_T[0], cpu_T[0], 1);
4147 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_cc_src);
4148 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4149 set_cc_op(s, CC_OP_BMILGB + ot);
4150 break;
4151
4152 default:
4153 goto illegal_op;
4154 }
4155 break;
4156
111994ee
RH
4157 default:
4158 goto illegal_op;
4159 }
222a3336 4160 break;
111994ee 4161
222a3336
AZ
4162 case 0x03a:
4163 case 0x13a:
4242b1bd 4164 b = modrm;
0af10c86 4165 modrm = cpu_ldub_code(env, s->pc++);
4242b1bd
AZ
4166 rm = modrm & 7;
4167 reg = ((modrm >> 3) & 7) | rex_r;
4168 mod = (modrm >> 6) & 3;
c045af25
AK
4169 if (b1 >= 2) {
4170 goto illegal_op;
4171 }
4242b1bd 4172
d3eb5eae
BS
4173 sse_fn_eppi = sse_op_table7[b].op[b1];
4174 if (!sse_fn_eppi) {
4242b1bd 4175 goto illegal_op;
c4baa050 4176 }
222a3336
AZ
4177 if (!(s->cpuid_ext_features & sse_op_table7[b].ext_mask))
4178 goto illegal_op;
4179
d3eb5eae 4180 if (sse_fn_eppi == SSE_SPECIAL) {
4ba9938c 4181 ot = (s->dflag == 2) ? MO_64 : MO_32;
222a3336
AZ
4182 rm = (modrm & 7) | REX_B(s);
4183 if (mod != 3)
4eeb3939 4184 gen_lea_modrm(env, s, modrm);
222a3336 4185 reg = ((modrm >> 3) & 7) | rex_r;
0af10c86 4186 val = cpu_ldub_code(env, s->pc++);
222a3336
AZ
4187 switch (b) {
4188 case 0x14: /* pextrb */
4189 tcg_gen_ld8u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
4190 xmm_regs[reg].XMM_B(val & 15)));
3523e4bd 4191 if (mod == 3) {
222a3336 4192 gen_op_mov_reg_T0(ot, rm);
3523e4bd
RH
4193 } else {
4194 tcg_gen_qemu_st_tl(cpu_T[0], cpu_A0,
4195 s->mem_index, MO_UB);
4196 }
222a3336
AZ
4197 break;
4198 case 0x15: /* pextrw */
4199 tcg_gen_ld16u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
4200 xmm_regs[reg].XMM_W(val & 7)));
3523e4bd 4201 if (mod == 3) {
222a3336 4202 gen_op_mov_reg_T0(ot, rm);
3523e4bd
RH
4203 } else {
4204 tcg_gen_qemu_st_tl(cpu_T[0], cpu_A0,
4205 s->mem_index, MO_LEUW);
4206 }
222a3336
AZ
4207 break;
4208 case 0x16:
4ba9938c 4209 if (ot == MO_32) { /* pextrd */
222a3336
AZ
4210 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
4211 offsetof(CPUX86State,
4212 xmm_regs[reg].XMM_L(val & 3)));
3523e4bd 4213 if (mod == 3) {
a7fbcbe5 4214 tcg_gen_extu_i32_tl(cpu_regs[rm], cpu_tmp2_i32);
3523e4bd 4215 } else {
d5601ad0
RH
4216 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
4217 s->mem_index, MO_LEUL);
3523e4bd 4218 }
222a3336 4219 } else { /* pextrq */
a7812ae4 4220#ifdef TARGET_X86_64
222a3336
AZ
4221 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
4222 offsetof(CPUX86State,
4223 xmm_regs[reg].XMM_Q(val & 1)));
3523e4bd 4224 if (mod == 3) {
a7fbcbe5 4225 tcg_gen_mov_i64(cpu_regs[rm], cpu_tmp1_i64);
3523e4bd
RH
4226 } else {
4227 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
4228 s->mem_index, MO_LEQ);
4229 }
a7812ae4
PB
4230#else
4231 goto illegal_op;
4232#endif
222a3336
AZ
4233 }
4234 break;
4235 case 0x17: /* extractps */
4236 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
4237 xmm_regs[reg].XMM_L(val & 3)));
3523e4bd 4238 if (mod == 3) {
222a3336 4239 gen_op_mov_reg_T0(ot, rm);
3523e4bd
RH
4240 } else {
4241 tcg_gen_qemu_st_tl(cpu_T[0], cpu_A0,
4242 s->mem_index, MO_LEUL);
4243 }
222a3336
AZ
4244 break;
4245 case 0x20: /* pinsrb */
3c5f4116 4246 if (mod == 3) {
4ba9938c 4247 gen_op_mov_TN_reg(MO_32, 0, rm);
3c5f4116
RH
4248 } else {
4249 tcg_gen_qemu_ld_tl(cpu_T[0], cpu_A0,
4250 s->mem_index, MO_UB);
4251 }
34c6addd 4252 tcg_gen_st8_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
222a3336
AZ
4253 xmm_regs[reg].XMM_B(val & 15)));
4254 break;
4255 case 0x21: /* insertps */
a7812ae4 4256 if (mod == 3) {
222a3336
AZ
4257 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
4258 offsetof(CPUX86State,xmm_regs[rm]
4259 .XMM_L((val >> 6) & 3)));
a7812ae4 4260 } else {
3c5f4116
RH
4261 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
4262 s->mem_index, MO_LEUL);
a7812ae4 4263 }
222a3336
AZ
4264 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
4265 offsetof(CPUX86State,xmm_regs[reg]
4266 .XMM_L((val >> 4) & 3)));
4267 if ((val >> 0) & 1)
4268 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4269 cpu_env, offsetof(CPUX86State,
4270 xmm_regs[reg].XMM_L(0)));
4271 if ((val >> 1) & 1)
4272 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4273 cpu_env, offsetof(CPUX86State,
4274 xmm_regs[reg].XMM_L(1)));
4275 if ((val >> 2) & 1)
4276 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4277 cpu_env, offsetof(CPUX86State,
4278 xmm_regs[reg].XMM_L(2)));
4279 if ((val >> 3) & 1)
4280 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4281 cpu_env, offsetof(CPUX86State,
4282 xmm_regs[reg].XMM_L(3)));
4283 break;
4284 case 0x22:
4ba9938c 4285 if (ot == MO_32) { /* pinsrd */
3c5f4116 4286 if (mod == 3) {
80b02013 4287 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[rm]);
3c5f4116 4288 } else {
80b02013
RH
4289 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
4290 s->mem_index, MO_LEUL);
3c5f4116 4291 }
222a3336
AZ
4292 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
4293 offsetof(CPUX86State,
4294 xmm_regs[reg].XMM_L(val & 3)));
4295 } else { /* pinsrq */
a7812ae4 4296#ifdef TARGET_X86_64
3c5f4116 4297 if (mod == 3) {
222a3336 4298 gen_op_mov_v_reg(ot, cpu_tmp1_i64, rm);
3c5f4116
RH
4299 } else {
4300 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
4301 s->mem_index, MO_LEQ);
4302 }
222a3336
AZ
4303 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
4304 offsetof(CPUX86State,
4305 xmm_regs[reg].XMM_Q(val & 1)));
a7812ae4
PB
4306#else
4307 goto illegal_op;
4308#endif
222a3336
AZ
4309 }
4310 break;
4311 }
4312 return;
4313 }
4242b1bd
AZ
4314
4315 if (b1) {
4316 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
4317 if (mod == 3) {
4318 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
4319 } else {
4320 op2_offset = offsetof(CPUX86State,xmm_t0);
4eeb3939 4321 gen_lea_modrm(env, s, modrm);
323d1876 4322 gen_ldo_env_A0(s, op2_offset);
4242b1bd
AZ
4323 }
4324 } else {
4325 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
4326 if (mod == 3) {
4327 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
4328 } else {
4329 op2_offset = offsetof(CPUX86State,mmx_t0);
4eeb3939 4330 gen_lea_modrm(env, s, modrm);
323d1876 4331 gen_ldq_env_A0(s, op2_offset);
4242b1bd
AZ
4332 }
4333 }
0af10c86 4334 val = cpu_ldub_code(env, s->pc++);
4242b1bd 4335
222a3336 4336 if ((b & 0xfc) == 0x60) { /* pcmpXstrX */
3ca51d07 4337 set_cc_op(s, CC_OP_EFLAGS);
222a3336
AZ
4338
4339 if (s->dflag == 2)
4340 /* The helper must use entire 64-bit gp registers */
4341 val |= 1 << 8;
4342 }
4343
4242b1bd
AZ
4344 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4345 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
d3eb5eae 4346 sse_fn_eppi(cpu_env, cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
4242b1bd 4347 break;
e2c3c2c5
RH
4348
4349 case 0x33a:
4350 /* Various integer extensions at 0f 3a f[0-f]. */
4351 b = modrm | (b1 << 8);
4352 modrm = cpu_ldub_code(env, s->pc++);
4353 reg = ((modrm >> 3) & 7) | rex_r;
4354
4355 switch (b) {
4356 case 0x3f0: /* rorx Gy,Ey, Ib */
4357 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
4358 || !(s->prefix & PREFIX_VEX)
4359 || s->vex_l != 0) {
4360 goto illegal_op;
4361 }
4ba9938c 4362 ot = s->dflag == 2 ? MO_64 : MO_32;
e2c3c2c5
RH
4363 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4364 b = cpu_ldub_code(env, s->pc++);
4ba9938c 4365 if (ot == MO_64) {
e2c3c2c5
RH
4366 tcg_gen_rotri_tl(cpu_T[0], cpu_T[0], b & 63);
4367 } else {
4368 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4369 tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, b & 31);
4370 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
4371 }
4372 gen_op_mov_reg_T0(ot, reg);
4373 break;
4374
4375 default:
4376 goto illegal_op;
4377 }
4378 break;
4379
664e0f19
FB
4380 default:
4381 goto illegal_op;
4382 }
4383 } else {
4384 /* generic MMX or SSE operation */
d1e42c5c 4385 switch(b) {
d1e42c5c
FB
4386 case 0x70: /* pshufx insn */
4387 case 0xc6: /* pshufx insn */
4388 case 0xc2: /* compare insns */
4389 s->rip_offset = 1;
4390 break;
4391 default:
4392 break;
664e0f19
FB
4393 }
4394 if (is_xmm) {
4395 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
4396 if (mod != 3) {
4eeb3939 4397 gen_lea_modrm(env, s, modrm);
664e0f19 4398 op2_offset = offsetof(CPUX86State,xmm_t0);
480c1cdb 4399 if (b1 >= 2 && ((b >= 0x50 && b <= 0x5f && b != 0x5b) ||
664e0f19
FB
4400 b == 0xc2)) {
4401 /* specific case for SSE single instructions */
4402 if (b1 == 2) {
4403 /* 32 bit access */
909be183 4404 gen_op_ld_v(s, MO_32, cpu_T[0], cpu_A0);
651ba608 4405 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
664e0f19
FB
4406 } else {
4407 /* 64 bit access */
323d1876
RH
4408 gen_ldq_env_A0(s, offsetof(CPUX86State,
4409 xmm_t0.XMM_D(0)));
664e0f19
FB
4410 }
4411 } else {
323d1876 4412 gen_ldo_env_A0(s, op2_offset);
664e0f19
FB
4413 }
4414 } else {
4415 rm = (modrm & 7) | REX_B(s);
4416 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
4417 }
4418 } else {
4419 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
4420 if (mod != 3) {
4eeb3939 4421 gen_lea_modrm(env, s, modrm);
664e0f19 4422 op2_offset = offsetof(CPUX86State,mmx_t0);
323d1876 4423 gen_ldq_env_A0(s, op2_offset);
664e0f19
FB
4424 } else {
4425 rm = (modrm & 7);
4426 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
4427 }
4428 }
4429 switch(b) {
a35f3ec7 4430 case 0x0f: /* 3DNow! data insns */
e771edab
AJ
4431 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
4432 goto illegal_op;
0af10c86 4433 val = cpu_ldub_code(env, s->pc++);
d3eb5eae
BS
4434 sse_fn_epp = sse_op_table5[val];
4435 if (!sse_fn_epp) {
a35f3ec7 4436 goto illegal_op;
c4baa050 4437 }
5af45186
FB
4438 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4439 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
d3eb5eae 4440 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
a35f3ec7 4441 break;
664e0f19
FB
4442 case 0x70: /* pshufx insn */
4443 case 0xc6: /* pshufx insn */
0af10c86 4444 val = cpu_ldub_code(env, s->pc++);
5af45186
FB
4445 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4446 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
c4baa050 4447 /* XXX: introduce a new table? */
d3eb5eae 4448 sse_fn_ppi = (SSEFunc_0_ppi)sse_fn_epp;
c4baa050 4449 sse_fn_ppi(cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
664e0f19
FB
4450 break;
4451 case 0xc2:
4452 /* compare insns */
0af10c86 4453 val = cpu_ldub_code(env, s->pc++);
664e0f19
FB
4454 if (val >= 8)
4455 goto illegal_op;
d3eb5eae 4456 sse_fn_epp = sse_op_table4[val][b1];
c4baa050 4457
5af45186
FB
4458 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4459 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
d3eb5eae 4460 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19 4461 break;
b8b6a50b
FB
4462 case 0xf7:
4463 /* maskmov : we must prepare A0 */
4464 if (mod != 3)
4465 goto illegal_op;
4466#ifdef TARGET_X86_64
4467 if (s->aflag == 2) {
4468 gen_op_movq_A0_reg(R_EDI);
4469 } else
4470#endif
4471 {
4472 gen_op_movl_A0_reg(R_EDI);
4473 if (s->aflag == 0)
4474 gen_op_andl_A0_ffff();
4475 }
4476 gen_add_A0_ds_seg(s);
4477
4478 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4479 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
c4baa050 4480 /* XXX: introduce a new table? */
d3eb5eae
BS
4481 sse_fn_eppt = (SSEFunc_0_eppt)sse_fn_epp;
4482 sse_fn_eppt(cpu_env, cpu_ptr0, cpu_ptr1, cpu_A0);
b8b6a50b 4483 break;
664e0f19 4484 default:
5af45186
FB
4485 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4486 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
d3eb5eae 4487 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
4488 break;
4489 }
4490 if (b == 0x2e || b == 0x2f) {
3ca51d07 4491 set_cc_op(s, CC_OP_EFLAGS);
664e0f19
FB
4492 }
4493 }
4494}
4495
2c0262af
FB
4496/* convert one instruction. s->is_jmp is set if the translation must
4497 be stopped. Return the next pc value */
0af10c86
BS
4498static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
4499 target_ulong pc_start)
2c0262af
FB
4500{
4501 int b, prefixes, aflag, dflag;
4502 int shift, ot;
4eeb3939 4503 int modrm, reg, rm, mod, op, opreg, val;
14ce26e7
FB
4504 target_ulong next_eip, tval;
4505 int rex_w, rex_r;
2c0262af 4506
fdefe51c 4507 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
70cff25e 4508 tcg_gen_debug_insn_start(pc_start);
fdefe51c 4509 }
2c0262af
FB
4510 s->pc = pc_start;
4511 prefixes = 0;
2c0262af 4512 s->override = -1;
14ce26e7
FB
4513 rex_w = -1;
4514 rex_r = 0;
4515#ifdef TARGET_X86_64
4516 s->rex_x = 0;
4517 s->rex_b = 0;
5fafdf24 4518 x86_64_hregs = 0;
14ce26e7
FB
4519#endif
4520 s->rip_offset = 0; /* for relative ip address */
701ed211
RH
4521 s->vex_l = 0;
4522 s->vex_v = 0;
2c0262af 4523 next_byte:
0af10c86 4524 b = cpu_ldub_code(env, s->pc);
2c0262af 4525 s->pc++;
4a6fd938
RH
4526 /* Collect prefixes. */
4527 switch (b) {
4528 case 0xf3:
4529 prefixes |= PREFIX_REPZ;
4530 goto next_byte;
4531 case 0xf2:
4532 prefixes |= PREFIX_REPNZ;
4533 goto next_byte;
4534 case 0xf0:
4535 prefixes |= PREFIX_LOCK;
4536 goto next_byte;
4537 case 0x2e:
4538 s->override = R_CS;
4539 goto next_byte;
4540 case 0x36:
4541 s->override = R_SS;
4542 goto next_byte;
4543 case 0x3e:
4544 s->override = R_DS;
4545 goto next_byte;
4546 case 0x26:
4547 s->override = R_ES;
4548 goto next_byte;
4549 case 0x64:
4550 s->override = R_FS;
4551 goto next_byte;
4552 case 0x65:
4553 s->override = R_GS;
4554 goto next_byte;
4555 case 0x66:
4556 prefixes |= PREFIX_DATA;
4557 goto next_byte;
4558 case 0x67:
4559 prefixes |= PREFIX_ADR;
4560 goto next_byte;
14ce26e7 4561#ifdef TARGET_X86_64
4a6fd938
RH
4562 case 0x40 ... 0x4f:
4563 if (CODE64(s)) {
14ce26e7
FB
4564 /* REX prefix */
4565 rex_w = (b >> 3) & 1;
4566 rex_r = (b & 0x4) << 1;
4567 s->rex_x = (b & 0x2) << 2;
4568 REX_B(s) = (b & 0x1) << 3;
4569 x86_64_hregs = 1; /* select uniform byte register addressing */
4570 goto next_byte;
4571 }
4a6fd938
RH
4572 break;
4573#endif
701ed211
RH
4574 case 0xc5: /* 2-byte VEX */
4575 case 0xc4: /* 3-byte VEX */
4576 /* VEX prefixes cannot be used except in 32-bit mode.
4577 Otherwise the instruction is LES or LDS. */
4578 if (s->code32 && !s->vm86) {
4579 static const int pp_prefix[4] = {
4580 0, PREFIX_DATA, PREFIX_REPZ, PREFIX_REPNZ
4581 };
4582 int vex3, vex2 = cpu_ldub_code(env, s->pc);
4583
4584 if (!CODE64(s) && (vex2 & 0xc0) != 0xc0) {
4585 /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
4586 otherwise the instruction is LES or LDS. */
4587 break;
4588 }
4589 s->pc++;
4590
085d8134 4591 /* 4.1.1-4.1.3: No preceding lock, 66, f2, f3, or rex prefixes. */
701ed211
RH
4592 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ
4593 | PREFIX_LOCK | PREFIX_DATA)) {
4594 goto illegal_op;
4595 }
4596#ifdef TARGET_X86_64
4597 if (x86_64_hregs) {
4598 goto illegal_op;
4599 }
4600#endif
4601 rex_r = (~vex2 >> 4) & 8;
4602 if (b == 0xc5) {
4603 vex3 = vex2;
4604 b = cpu_ldub_code(env, s->pc++);
4605 } else {
4606#ifdef TARGET_X86_64
4607 s->rex_x = (~vex2 >> 3) & 8;
4608 s->rex_b = (~vex2 >> 2) & 8;
4609#endif
4610 vex3 = cpu_ldub_code(env, s->pc++);
4611 rex_w = (vex3 >> 7) & 1;
4612 switch (vex2 & 0x1f) {
4613 case 0x01: /* Implied 0f leading opcode bytes. */
4614 b = cpu_ldub_code(env, s->pc++) | 0x100;
4615 break;
4616 case 0x02: /* Implied 0f 38 leading opcode bytes. */
4617 b = 0x138;
4618 break;
4619 case 0x03: /* Implied 0f 3a leading opcode bytes. */
4620 b = 0x13a;
4621 break;
4622 default: /* Reserved for future use. */
4623 goto illegal_op;
4624 }
4625 }
4626 s->vex_v = (~vex3 >> 3) & 0xf;
4627 s->vex_l = (vex3 >> 2) & 1;
4628 prefixes |= pp_prefix[vex3 & 3] | PREFIX_VEX;
4629 }
4630 break;
4a6fd938
RH
4631 }
4632
4633 /* Post-process prefixes. */
4a6fd938 4634 if (CODE64(s)) {
dec3fc96
RH
4635 /* In 64-bit mode, the default data size is 32-bit. Select 64-bit
4636 data with rex_w, and 16-bit data with 0x66; rex_w takes precedence
4637 over 0x66 if both are present. */
4638 dflag = (rex_w > 0 ? 2 : prefixes & PREFIX_DATA ? 0 : 1);
4639 /* In 64-bit mode, 0x67 selects 32-bit addressing. */
4640 aflag = (prefixes & PREFIX_ADR ? 1 : 2);
4641 } else {
4642 /* In 16/32-bit mode, 0x66 selects the opposite data size. */
4643 dflag = s->code32;
4644 if (prefixes & PREFIX_DATA) {
4645 dflag ^= 1;
14ce26e7 4646 }
dec3fc96
RH
4647 /* In 16/32-bit mode, 0x67 selects the opposite addressing. */
4648 aflag = s->code32;
4649 if (prefixes & PREFIX_ADR) {
4650 aflag ^= 1;
14ce26e7 4651 }
2c0262af
FB
4652 }
4653
2c0262af
FB
4654 s->prefix = prefixes;
4655 s->aflag = aflag;
4656 s->dflag = dflag;
4657
4658 /* lock generation */
4659 if (prefixes & PREFIX_LOCK)
a7812ae4 4660 gen_helper_lock();
2c0262af
FB
4661
4662 /* now check op code */
4663 reswitch:
4664 switch(b) {
4665 case 0x0f:
4666 /**************************/
4667 /* extended op code */
0af10c86 4668 b = cpu_ldub_code(env, s->pc++) | 0x100;
2c0262af 4669 goto reswitch;
3b46e624 4670
2c0262af
FB
4671 /**************************/
4672 /* arith & logic */
4673 case 0x00 ... 0x05:
4674 case 0x08 ... 0x0d:
4675 case 0x10 ... 0x15:
4676 case 0x18 ... 0x1d:
4677 case 0x20 ... 0x25:
4678 case 0x28 ... 0x2d:
4679 case 0x30 ... 0x35:
4680 case 0x38 ... 0x3d:
4681 {
4682 int op, f, val;
4683 op = (b >> 3) & 7;
4684 f = (b >> 1) & 3;
4685
4686 if ((b & 1) == 0)
4ba9938c 4687 ot = MO_8;
2c0262af 4688 else
4ba9938c 4689 ot = dflag + MO_16;
3b46e624 4690
2c0262af
FB
4691 switch(f) {
4692 case 0: /* OP Ev, Gv */
0af10c86 4693 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 4694 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af 4695 mod = (modrm >> 6) & 3;
14ce26e7 4696 rm = (modrm & 7) | REX_B(s);
2c0262af 4697 if (mod != 3) {
4eeb3939 4698 gen_lea_modrm(env, s, modrm);
2c0262af
FB
4699 opreg = OR_TMP0;
4700 } else if (op == OP_XORL && rm == reg) {
4701 xor_zero:
4702 /* xor reg, reg optimisation */
436ff2d2 4703 set_cc_op(s, CC_OP_CLR);
97212c88 4704 tcg_gen_movi_tl(cpu_T[0], 0);
57fec1fe 4705 gen_op_mov_reg_T0(ot, reg);
2c0262af
FB
4706 break;
4707 } else {
4708 opreg = rm;
4709 }
57fec1fe 4710 gen_op_mov_TN_reg(ot, 1, reg);
2c0262af
FB
4711 gen_op(s, op, ot, opreg);
4712 break;
4713 case 1: /* OP Gv, Ev */
0af10c86 4714 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 4715 mod = (modrm >> 6) & 3;
14ce26e7
FB
4716 reg = ((modrm >> 3) & 7) | rex_r;
4717 rm = (modrm & 7) | REX_B(s);
2c0262af 4718 if (mod != 3) {
4eeb3939 4719 gen_lea_modrm(env, s, modrm);
0f712e10 4720 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
2c0262af
FB
4721 } else if (op == OP_XORL && rm == reg) {
4722 goto xor_zero;
4723 } else {
57fec1fe 4724 gen_op_mov_TN_reg(ot, 1, rm);
2c0262af
FB
4725 }
4726 gen_op(s, op, ot, reg);
4727 break;
4728 case 2: /* OP A, Iv */
0af10c86 4729 val = insn_get(env, s, ot);
2c0262af
FB
4730 gen_op_movl_T1_im(val);
4731 gen_op(s, op, ot, OR_EAX);
4732 break;
4733 }
4734 }
4735 break;
4736
ec9d6075
FB
4737 case 0x82:
4738 if (CODE64(s))
4739 goto illegal_op;
2c0262af
FB
4740 case 0x80: /* GRP1 */
4741 case 0x81:
4742 case 0x83:
4743 {
4744 int val;
4745
4746 if ((b & 1) == 0)
4ba9938c 4747 ot = MO_8;
2c0262af 4748 else
4ba9938c 4749 ot = dflag + MO_16;
3b46e624 4750
0af10c86 4751 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 4752 mod = (modrm >> 6) & 3;
14ce26e7 4753 rm = (modrm & 7) | REX_B(s);
2c0262af 4754 op = (modrm >> 3) & 7;
3b46e624 4755
2c0262af 4756 if (mod != 3) {
14ce26e7
FB
4757 if (b == 0x83)
4758 s->rip_offset = 1;
4759 else
4760 s->rip_offset = insn_const_size(ot);
4eeb3939 4761 gen_lea_modrm(env, s, modrm);
2c0262af
FB
4762 opreg = OR_TMP0;
4763 } else {
14ce26e7 4764 opreg = rm;
2c0262af
FB
4765 }
4766
4767 switch(b) {
4768 default:
4769 case 0x80:
4770 case 0x81:
d64477af 4771 case 0x82:
0af10c86 4772 val = insn_get(env, s, ot);
2c0262af
FB
4773 break;
4774 case 0x83:
4ba9938c 4775 val = (int8_t)insn_get(env, s, MO_8);
2c0262af
FB
4776 break;
4777 }
4778 gen_op_movl_T1_im(val);
4779 gen_op(s, op, ot, opreg);
4780 }
4781 break;
4782
4783 /**************************/
4784 /* inc, dec, and other misc arith */
4785 case 0x40 ... 0x47: /* inc Gv */
4ba9938c 4786 ot = dflag ? MO_32 : MO_16;
2c0262af
FB
4787 gen_inc(s, ot, OR_EAX + (b & 7), 1);
4788 break;
4789 case 0x48 ... 0x4f: /* dec Gv */
4ba9938c 4790 ot = dflag ? MO_32 : MO_16;
2c0262af
FB
4791 gen_inc(s, ot, OR_EAX + (b & 7), -1);
4792 break;
4793 case 0xf6: /* GRP3 */
4794 case 0xf7:
4795 if ((b & 1) == 0)
4ba9938c 4796 ot = MO_8;
2c0262af 4797 else
4ba9938c 4798 ot = dflag + MO_16;
2c0262af 4799
0af10c86 4800 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 4801 mod = (modrm >> 6) & 3;
14ce26e7 4802 rm = (modrm & 7) | REX_B(s);
2c0262af
FB
4803 op = (modrm >> 3) & 7;
4804 if (mod != 3) {
14ce26e7
FB
4805 if (op == 0)
4806 s->rip_offset = insn_const_size(ot);
4eeb3939 4807 gen_lea_modrm(env, s, modrm);
909be183 4808 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
2c0262af 4809 } else {
57fec1fe 4810 gen_op_mov_TN_reg(ot, 0, rm);
2c0262af
FB
4811 }
4812
4813 switch(op) {
4814 case 0: /* test */
0af10c86 4815 val = insn_get(env, s, ot);
2c0262af
FB
4816 gen_op_movl_T1_im(val);
4817 gen_op_testl_T0_T1_cc();
3ca51d07 4818 set_cc_op(s, CC_OP_LOGICB + ot);
2c0262af
FB
4819 break;
4820 case 2: /* not */
b6abf97d 4821 tcg_gen_not_tl(cpu_T[0], cpu_T[0]);
2c0262af 4822 if (mod != 3) {
fd8ca9f6 4823 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
2c0262af 4824 } else {
57fec1fe 4825 gen_op_mov_reg_T0(ot, rm);
2c0262af
FB
4826 }
4827 break;
4828 case 3: /* neg */
b6abf97d 4829 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
2c0262af 4830 if (mod != 3) {
fd8ca9f6 4831 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
2c0262af 4832 } else {
57fec1fe 4833 gen_op_mov_reg_T0(ot, rm);
2c0262af
FB
4834 }
4835 gen_op_update_neg_cc();
3ca51d07 4836 set_cc_op(s, CC_OP_SUBB + ot);
2c0262af
FB
4837 break;
4838 case 4: /* mul */
4839 switch(ot) {
4ba9938c
RH
4840 case MO_8:
4841 gen_op_mov_TN_reg(MO_8, 1, R_EAX);
0211e5af
FB
4842 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
4843 tcg_gen_ext8u_tl(cpu_T[1], cpu_T[1]);
4844 /* XXX: use 32 bit mul which could be faster */
4845 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4ba9938c 4846 gen_op_mov_reg_T0(MO_16, R_EAX);
0211e5af
FB
4847 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4848 tcg_gen_andi_tl(cpu_cc_src, cpu_T[0], 0xff00);
3ca51d07 4849 set_cc_op(s, CC_OP_MULB);
2c0262af 4850 break;
4ba9938c
RH
4851 case MO_16:
4852 gen_op_mov_TN_reg(MO_16, 1, R_EAX);
0211e5af
FB
4853 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
4854 tcg_gen_ext16u_tl(cpu_T[1], cpu_T[1]);
4855 /* XXX: use 32 bit mul which could be faster */
4856 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4ba9938c 4857 gen_op_mov_reg_T0(MO_16, R_EAX);
0211e5af
FB
4858 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4859 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
4ba9938c 4860 gen_op_mov_reg_T0(MO_16, R_EDX);
0211e5af 4861 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
3ca51d07 4862 set_cc_op(s, CC_OP_MULW);
2c0262af
FB
4863 break;
4864 default:
4ba9938c 4865 case MO_32:
a4bcea3d
RH
4866 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4867 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]);
4868 tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
4869 cpu_tmp2_i32, cpu_tmp3_i32);
4870 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32);
4871 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32);
4872 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4873 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
3ca51d07 4874 set_cc_op(s, CC_OP_MULL);
2c0262af 4875 break;
14ce26e7 4876#ifdef TARGET_X86_64
4ba9938c 4877 case MO_64:
a4bcea3d
RH
4878 tcg_gen_mulu2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
4879 cpu_T[0], cpu_regs[R_EAX]);
4880 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4881 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
3ca51d07 4882 set_cc_op(s, CC_OP_MULQ);
14ce26e7
FB
4883 break;
4884#endif
2c0262af 4885 }
2c0262af
FB
4886 break;
4887 case 5: /* imul */
4888 switch(ot) {
4ba9938c
RH
4889 case MO_8:
4890 gen_op_mov_TN_reg(MO_8, 1, R_EAX);
0211e5af
FB
4891 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
4892 tcg_gen_ext8s_tl(cpu_T[1], cpu_T[1]);
4893 /* XXX: use 32 bit mul which could be faster */
4894 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4ba9938c 4895 gen_op_mov_reg_T0(MO_16, R_EAX);
0211e5af
FB
4896 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4897 tcg_gen_ext8s_tl(cpu_tmp0, cpu_T[0]);
4898 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
3ca51d07 4899 set_cc_op(s, CC_OP_MULB);
2c0262af 4900 break;
4ba9938c
RH
4901 case MO_16:
4902 gen_op_mov_TN_reg(MO_16, 1, R_EAX);
0211e5af
FB
4903 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4904 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
4905 /* XXX: use 32 bit mul which could be faster */
4906 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4ba9938c 4907 gen_op_mov_reg_T0(MO_16, R_EAX);
0211e5af
FB
4908 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4909 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
4910 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4911 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
4ba9938c 4912 gen_op_mov_reg_T0(MO_16, R_EDX);
3ca51d07 4913 set_cc_op(s, CC_OP_MULW);
2c0262af
FB
4914 break;
4915 default:
4ba9938c 4916 case MO_32:
a4bcea3d
RH
4917 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4918 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]);
4919 tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
4920 cpu_tmp2_i32, cpu_tmp3_i32);
4921 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32);
4922 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32);
4923 tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31);
4924 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4925 tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
4926 tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32);
3ca51d07 4927 set_cc_op(s, CC_OP_MULL);
2c0262af 4928 break;
14ce26e7 4929#ifdef TARGET_X86_64
4ba9938c 4930 case MO_64:
a4bcea3d
RH
4931 tcg_gen_muls2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
4932 cpu_T[0], cpu_regs[R_EAX]);
4933 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4934 tcg_gen_sari_tl(cpu_cc_src, cpu_regs[R_EAX], 63);
4935 tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_regs[R_EDX]);
3ca51d07 4936 set_cc_op(s, CC_OP_MULQ);
14ce26e7
FB
4937 break;
4938#endif
2c0262af 4939 }
2c0262af
FB
4940 break;
4941 case 6: /* div */
4942 switch(ot) {
4ba9938c 4943 case MO_8:
14ce26e7 4944 gen_jmp_im(pc_start - s->cs_base);
7923057b 4945 gen_helper_divb_AL(cpu_env, cpu_T[0]);
2c0262af 4946 break;
4ba9938c 4947 case MO_16:
14ce26e7 4948 gen_jmp_im(pc_start - s->cs_base);
7923057b 4949 gen_helper_divw_AX(cpu_env, cpu_T[0]);
2c0262af
FB
4950 break;
4951 default:
4ba9938c 4952 case MO_32:
14ce26e7 4953 gen_jmp_im(pc_start - s->cs_base);
7923057b 4954 gen_helper_divl_EAX(cpu_env, cpu_T[0]);
14ce26e7
FB
4955 break;
4956#ifdef TARGET_X86_64
4ba9938c 4957 case MO_64:
14ce26e7 4958 gen_jmp_im(pc_start - s->cs_base);
7923057b 4959 gen_helper_divq_EAX(cpu_env, cpu_T[0]);
2c0262af 4960 break;
14ce26e7 4961#endif
2c0262af
FB
4962 }
4963 break;
4964 case 7: /* idiv */
4965 switch(ot) {
4ba9938c 4966 case MO_8:
14ce26e7 4967 gen_jmp_im(pc_start - s->cs_base);
7923057b 4968 gen_helper_idivb_AL(cpu_env, cpu_T[0]);
2c0262af 4969 break;
4ba9938c 4970 case MO_16:
14ce26e7 4971 gen_jmp_im(pc_start - s->cs_base);
7923057b 4972 gen_helper_idivw_AX(cpu_env, cpu_T[0]);
2c0262af
FB
4973 break;
4974 default:
4ba9938c 4975 case MO_32:
14ce26e7 4976 gen_jmp_im(pc_start - s->cs_base);
7923057b 4977 gen_helper_idivl_EAX(cpu_env, cpu_T[0]);
14ce26e7
FB
4978 break;
4979#ifdef TARGET_X86_64
4ba9938c 4980 case MO_64:
14ce26e7 4981 gen_jmp_im(pc_start - s->cs_base);
7923057b 4982 gen_helper_idivq_EAX(cpu_env, cpu_T[0]);
2c0262af 4983 break;
14ce26e7 4984#endif
2c0262af
FB
4985 }
4986 break;
4987 default:
4988 goto illegal_op;
4989 }
4990 break;
4991
4992 case 0xfe: /* GRP4 */
4993 case 0xff: /* GRP5 */
4994 if ((b & 1) == 0)
4ba9938c 4995 ot = MO_8;
2c0262af 4996 else
4ba9938c 4997 ot = dflag + MO_16;
2c0262af 4998
0af10c86 4999 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 5000 mod = (modrm >> 6) & 3;
14ce26e7 5001 rm = (modrm & 7) | REX_B(s);
2c0262af
FB
5002 op = (modrm >> 3) & 7;
5003 if (op >= 2 && b == 0xfe) {
5004 goto illegal_op;
5005 }
14ce26e7 5006 if (CODE64(s)) {
aba9d61e 5007 if (op == 2 || op == 4) {
14ce26e7 5008 /* operand size for jumps is 64 bit */
4ba9938c 5009 ot = MO_64;
aba9d61e 5010 } else if (op == 3 || op == 5) {
4ba9938c 5011 ot = dflag ? MO_32 + (rex_w == 1) : MO_16;
14ce26e7
FB
5012 } else if (op == 6) {
5013 /* default push size is 64 bit */
4ba9938c 5014 ot = dflag ? MO_64 : MO_16;
14ce26e7
FB
5015 }
5016 }
2c0262af 5017 if (mod != 3) {
4eeb3939 5018 gen_lea_modrm(env, s, modrm);
2c0262af 5019 if (op >= 2 && op != 3 && op != 5)
909be183 5020 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
2c0262af 5021 } else {
57fec1fe 5022 gen_op_mov_TN_reg(ot, 0, rm);
2c0262af
FB
5023 }
5024
5025 switch(op) {
5026 case 0: /* inc Ev */
5027 if (mod != 3)
5028 opreg = OR_TMP0;
5029 else
5030 opreg = rm;
5031 gen_inc(s, ot, opreg, 1);
5032 break;
5033 case 1: /* dec Ev */
5034 if (mod != 3)
5035 opreg = OR_TMP0;
5036 else
5037 opreg = rm;
5038 gen_inc(s, ot, opreg, -1);
5039 break;
5040 case 2: /* call Ev */
4f31916f 5041 /* XXX: optimize if memory (no 'and' is necessary) */
2c0262af
FB
5042 if (s->dflag == 0)
5043 gen_op_andl_T0_ffff();
2c0262af 5044 next_eip = s->pc - s->cs_base;
1ef38687 5045 gen_movtl_T1_im(next_eip);
4f31916f
FB
5046 gen_push_T1(s);
5047 gen_op_jmp_T0();
2c0262af
FB
5048 gen_eob(s);
5049 break;
61382a50 5050 case 3: /* lcall Ev */
0f712e10 5051 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
4ba9938c 5052 gen_add_A0_im(s, 1 << (ot - MO_16 + 1));
cc1a80df 5053 gen_op_ld_v(s, MO_16, cpu_T[0], cpu_A0);
2c0262af
FB
5054 do_lcall:
5055 if (s->pe && !s->vm86) {
773cdfcc 5056 gen_update_cc_op(s);
14ce26e7 5057 gen_jmp_im(pc_start - s->cs_base);
b6abf97d 5058 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2999a0b2
BS
5059 gen_helper_lcall_protected(cpu_env, cpu_tmp2_i32, cpu_T[1],
5060 tcg_const_i32(dflag),
a7812ae4 5061 tcg_const_i32(s->pc - pc_start));
2c0262af 5062 } else {
b6abf97d 5063 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2999a0b2
BS
5064 gen_helper_lcall_real(cpu_env, cpu_tmp2_i32, cpu_T[1],
5065 tcg_const_i32(dflag),
a7812ae4 5066 tcg_const_i32(s->pc - s->cs_base));
2c0262af
FB
5067 }
5068 gen_eob(s);
5069 break;
5070 case 4: /* jmp Ev */
5071 if (s->dflag == 0)
5072 gen_op_andl_T0_ffff();
5073 gen_op_jmp_T0();
5074 gen_eob(s);
5075 break;
5076 case 5: /* ljmp Ev */
0f712e10 5077 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
4ba9938c 5078 gen_add_A0_im(s, 1 << (ot - MO_16 + 1));
cc1a80df 5079 gen_op_ld_v(s, MO_16, cpu_T[0], cpu_A0);
2c0262af
FB
5080 do_ljmp:
5081 if (s->pe && !s->vm86) {
773cdfcc 5082 gen_update_cc_op(s);
14ce26e7 5083 gen_jmp_im(pc_start - s->cs_base);
b6abf97d 5084 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2999a0b2 5085 gen_helper_ljmp_protected(cpu_env, cpu_tmp2_i32, cpu_T[1],
a7812ae4 5086 tcg_const_i32(s->pc - pc_start));
2c0262af 5087 } else {
3bd7da9e 5088 gen_op_movl_seg_T0_vm(R_CS);
2c0262af
FB
5089 gen_op_movl_T0_T1();
5090 gen_op_jmp_T0();
5091 }
5092 gen_eob(s);
5093 break;
5094 case 6: /* push Ev */
5095 gen_push_T0(s);
5096 break;
5097 default:
5098 goto illegal_op;
5099 }
5100 break;
5101
5102 case 0x84: /* test Ev, Gv */
5fafdf24 5103 case 0x85:
2c0262af 5104 if ((b & 1) == 0)
4ba9938c 5105 ot = MO_8;
2c0262af 5106 else
4ba9938c 5107 ot = dflag + MO_16;
2c0262af 5108
0af10c86 5109 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 5110 reg = ((modrm >> 3) & 7) | rex_r;
3b46e624 5111
0af10c86 5112 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
57fec1fe 5113 gen_op_mov_TN_reg(ot, 1, reg);
2c0262af 5114 gen_op_testl_T0_T1_cc();
3ca51d07 5115 set_cc_op(s, CC_OP_LOGICB + ot);
2c0262af 5116 break;
3b46e624 5117
2c0262af
FB
5118 case 0xa8: /* test eAX, Iv */
5119 case 0xa9:
5120 if ((b & 1) == 0)
4ba9938c 5121 ot = MO_8;
2c0262af 5122 else
4ba9938c 5123 ot = dflag + MO_16;
0af10c86 5124 val = insn_get(env, s, ot);
2c0262af 5125
57fec1fe 5126 gen_op_mov_TN_reg(ot, 0, OR_EAX);
2c0262af
FB
5127 gen_op_movl_T1_im(val);
5128 gen_op_testl_T0_T1_cc();
3ca51d07 5129 set_cc_op(s, CC_OP_LOGICB + ot);
2c0262af 5130 break;
3b46e624 5131
2c0262af 5132 case 0x98: /* CWDE/CBW */
14ce26e7
FB
5133#ifdef TARGET_X86_64
5134 if (dflag == 2) {
4ba9938c 5135 gen_op_mov_TN_reg(MO_32, 0, R_EAX);
e108dd01 5136 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4ba9938c 5137 gen_op_mov_reg_T0(MO_64, R_EAX);
14ce26e7
FB
5138 } else
5139#endif
e108dd01 5140 if (dflag == 1) {
4ba9938c 5141 gen_op_mov_TN_reg(MO_16, 0, R_EAX);
e108dd01 5142 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4ba9938c 5143 gen_op_mov_reg_T0(MO_32, R_EAX);
e108dd01 5144 } else {
4ba9938c 5145 gen_op_mov_TN_reg(MO_8, 0, R_EAX);
e108dd01 5146 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
4ba9938c 5147 gen_op_mov_reg_T0(MO_16, R_EAX);
e108dd01 5148 }
2c0262af
FB
5149 break;
5150 case 0x99: /* CDQ/CWD */
14ce26e7
FB
5151#ifdef TARGET_X86_64
5152 if (dflag == 2) {
4ba9938c 5153 gen_op_mov_TN_reg(MO_64, 0, R_EAX);
e108dd01 5154 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 63);
4ba9938c 5155 gen_op_mov_reg_T0(MO_64, R_EDX);
14ce26e7
FB
5156 } else
5157#endif
e108dd01 5158 if (dflag == 1) {
4ba9938c 5159 gen_op_mov_TN_reg(MO_32, 0, R_EAX);
e108dd01
FB
5160 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
5161 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 31);
4ba9938c 5162 gen_op_mov_reg_T0(MO_32, R_EDX);
e108dd01 5163 } else {
4ba9938c 5164 gen_op_mov_TN_reg(MO_16, 0, R_EAX);
e108dd01
FB
5165 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5166 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 15);
4ba9938c 5167 gen_op_mov_reg_T0(MO_16, R_EDX);
e108dd01 5168 }
2c0262af
FB
5169 break;
5170 case 0x1af: /* imul Gv, Ev */
5171 case 0x69: /* imul Gv, Ev, I */
5172 case 0x6b:
4ba9938c 5173 ot = dflag + MO_16;
0af10c86 5174 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7
FB
5175 reg = ((modrm >> 3) & 7) | rex_r;
5176 if (b == 0x69)
5177 s->rip_offset = insn_const_size(ot);
5178 else if (b == 0x6b)
5179 s->rip_offset = 1;
0af10c86 5180 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
2c0262af 5181 if (b == 0x69) {
0af10c86 5182 val = insn_get(env, s, ot);
2c0262af
FB
5183 gen_op_movl_T1_im(val);
5184 } else if (b == 0x6b) {
4ba9938c 5185 val = (int8_t)insn_get(env, s, MO_8);
2c0262af
FB
5186 gen_op_movl_T1_im(val);
5187 } else {
57fec1fe 5188 gen_op_mov_TN_reg(ot, 1, reg);
2c0262af 5189 }
a4bcea3d 5190 switch (ot) {
0211e5af 5191#ifdef TARGET_X86_64
4ba9938c 5192 case MO_64:
a4bcea3d
RH
5193 tcg_gen_muls2_i64(cpu_regs[reg], cpu_T[1], cpu_T[0], cpu_T[1]);
5194 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
5195 tcg_gen_sari_tl(cpu_cc_src, cpu_cc_dst, 63);
5196 tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_T[1]);
5197 break;
0211e5af 5198#endif
4ba9938c 5199 case MO_32:
a4bcea3d
RH
5200 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5201 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
5202 tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
5203 cpu_tmp2_i32, cpu_tmp3_i32);
5204 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
5205 tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31);
5206 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
5207 tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
5208 tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32);
5209 break;
5210 default:
0211e5af
FB
5211 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5212 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
5213 /* XXX: use 32 bit mul which could be faster */
5214 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
5215 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
5216 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
5217 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
a4bcea3d
RH
5218 gen_op_mov_reg_T0(ot, reg);
5219 break;
2c0262af 5220 }
3ca51d07 5221 set_cc_op(s, CC_OP_MULB + ot);
2c0262af
FB
5222 break;
5223 case 0x1c0:
5224 case 0x1c1: /* xadd Ev, Gv */
5225 if ((b & 1) == 0)
4ba9938c 5226 ot = MO_8;
2c0262af 5227 else
4ba9938c 5228 ot = dflag + MO_16;
0af10c86 5229 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 5230 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af
FB
5231 mod = (modrm >> 6) & 3;
5232 if (mod == 3) {
14ce26e7 5233 rm = (modrm & 7) | REX_B(s);
57fec1fe
FB
5234 gen_op_mov_TN_reg(ot, 0, reg);
5235 gen_op_mov_TN_reg(ot, 1, rm);
2c0262af 5236 gen_op_addl_T0_T1();
57fec1fe
FB
5237 gen_op_mov_reg_T1(ot, reg);
5238 gen_op_mov_reg_T0(ot, rm);
2c0262af 5239 } else {
4eeb3939 5240 gen_lea_modrm(env, s, modrm);
57fec1fe 5241 gen_op_mov_TN_reg(ot, 0, reg);
0f712e10 5242 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
2c0262af 5243 gen_op_addl_T0_T1();
fd8ca9f6 5244 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
57fec1fe 5245 gen_op_mov_reg_T1(ot, reg);
2c0262af
FB
5246 }
5247 gen_op_update2_cc();
3ca51d07 5248 set_cc_op(s, CC_OP_ADDB + ot);
2c0262af
FB
5249 break;
5250 case 0x1b0:
5251 case 0x1b1: /* cmpxchg Ev, Gv */
cad3a37d 5252 {
1130328e 5253 int label1, label2;
1e4840bf 5254 TCGv t0, t1, t2, a0;
cad3a37d
FB
5255
5256 if ((b & 1) == 0)
4ba9938c 5257 ot = MO_8;
cad3a37d 5258 else
4ba9938c 5259 ot = dflag + MO_16;
0af10c86 5260 modrm = cpu_ldub_code(env, s->pc++);
cad3a37d
FB
5261 reg = ((modrm >> 3) & 7) | rex_r;
5262 mod = (modrm >> 6) & 3;
a7812ae4
PB
5263 t0 = tcg_temp_local_new();
5264 t1 = tcg_temp_local_new();
5265 t2 = tcg_temp_local_new();
5266 a0 = tcg_temp_local_new();
1e4840bf 5267 gen_op_mov_v_reg(ot, t1, reg);
cad3a37d
FB
5268 if (mod == 3) {
5269 rm = (modrm & 7) | REX_B(s);
1e4840bf 5270 gen_op_mov_v_reg(ot, t0, rm);
cad3a37d 5271 } else {
4eeb3939 5272 gen_lea_modrm(env, s, modrm);
1e4840bf 5273 tcg_gen_mov_tl(a0, cpu_A0);
323d1876 5274 gen_op_ld_v(s, ot, t0, a0);
cad3a37d
FB
5275 rm = 0; /* avoid warning */
5276 }
5277 label1 = gen_new_label();
a3251186
RH
5278 tcg_gen_mov_tl(t2, cpu_regs[R_EAX]);
5279 gen_extu(ot, t0);
1e4840bf 5280 gen_extu(ot, t2);
a3251186 5281 tcg_gen_brcond_tl(TCG_COND_EQ, t2, t0, label1);
f7e80adf 5282 label2 = gen_new_label();
cad3a37d 5283 if (mod == 3) {
1e4840bf 5284 gen_op_mov_reg_v(ot, R_EAX, t0);
1130328e
FB
5285 tcg_gen_br(label2);
5286 gen_set_label(label1);
1e4840bf 5287 gen_op_mov_reg_v(ot, rm, t1);
cad3a37d 5288 } else {
f7e80adf
AG
5289 /* perform no-op store cycle like physical cpu; must be
5290 before changing accumulator to ensure idempotency if
5291 the store faults and the instruction is restarted */
323d1876 5292 gen_op_st_v(s, ot, t0, a0);
1e4840bf 5293 gen_op_mov_reg_v(ot, R_EAX, t0);
f7e80adf 5294 tcg_gen_br(label2);
1130328e 5295 gen_set_label(label1);
323d1876 5296 gen_op_st_v(s, ot, t1, a0);
cad3a37d 5297 }
f7e80adf 5298 gen_set_label(label2);
1e4840bf 5299 tcg_gen_mov_tl(cpu_cc_src, t0);
a3251186
RH
5300 tcg_gen_mov_tl(cpu_cc_srcT, t2);
5301 tcg_gen_sub_tl(cpu_cc_dst, t2, t0);
3ca51d07 5302 set_cc_op(s, CC_OP_SUBB + ot);
1e4840bf
FB
5303 tcg_temp_free(t0);
5304 tcg_temp_free(t1);
5305 tcg_temp_free(t2);
5306 tcg_temp_free(a0);
2c0262af 5307 }
2c0262af
FB
5308 break;
5309 case 0x1c7: /* cmpxchg8b */
0af10c86 5310 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 5311 mod = (modrm >> 6) & 3;
71c3558e 5312 if ((mod == 3) || ((modrm & 0x38) != 0x8))
2c0262af 5313 goto illegal_op;
1b9d9ebb
FB
5314#ifdef TARGET_X86_64
5315 if (dflag == 2) {
5316 if (!(s->cpuid_ext_features & CPUID_EXT_CX16))
5317 goto illegal_op;
5318 gen_jmp_im(pc_start - s->cs_base);
773cdfcc 5319 gen_update_cc_op(s);
4eeb3939 5320 gen_lea_modrm(env, s, modrm);
92fc4b58 5321 gen_helper_cmpxchg16b(cpu_env, cpu_A0);
1b9d9ebb
FB
5322 } else
5323#endif
5324 {
5325 if (!(s->cpuid_features & CPUID_CX8))
5326 goto illegal_op;
5327 gen_jmp_im(pc_start - s->cs_base);
773cdfcc 5328 gen_update_cc_op(s);
4eeb3939 5329 gen_lea_modrm(env, s, modrm);
92fc4b58 5330 gen_helper_cmpxchg8b(cpu_env, cpu_A0);
1b9d9ebb 5331 }
3ca51d07 5332 set_cc_op(s, CC_OP_EFLAGS);
2c0262af 5333 break;
3b46e624 5334
2c0262af
FB
5335 /**************************/
5336 /* push/pop */
5337 case 0x50 ... 0x57: /* push */
4ba9938c 5338 gen_op_mov_TN_reg(MO_32, 0, (b & 7) | REX_B(s));
2c0262af
FB
5339 gen_push_T0(s);
5340 break;
5341 case 0x58 ... 0x5f: /* pop */
14ce26e7 5342 if (CODE64(s)) {
4ba9938c 5343 ot = dflag ? MO_64 : MO_16;
14ce26e7 5344 } else {
4ba9938c 5345 ot = dflag + MO_16;
14ce26e7 5346 }
2c0262af 5347 gen_pop_T0(s);
77729c24 5348 /* NOTE: order is important for pop %sp */
2c0262af 5349 gen_pop_update(s);
57fec1fe 5350 gen_op_mov_reg_T0(ot, (b & 7) | REX_B(s));
2c0262af
FB
5351 break;
5352 case 0x60: /* pusha */
14ce26e7
FB
5353 if (CODE64(s))
5354 goto illegal_op;
2c0262af
FB
5355 gen_pusha(s);
5356 break;
5357 case 0x61: /* popa */
14ce26e7
FB
5358 if (CODE64(s))
5359 goto illegal_op;
2c0262af
FB
5360 gen_popa(s);
5361 break;
5362 case 0x68: /* push Iv */
5363 case 0x6a:
14ce26e7 5364 if (CODE64(s)) {
4ba9938c 5365 ot = dflag ? MO_64 : MO_16;
14ce26e7 5366 } else {
4ba9938c 5367 ot = dflag + MO_16;
14ce26e7 5368 }
2c0262af 5369 if (b == 0x68)
0af10c86 5370 val = insn_get(env, s, ot);
2c0262af 5371 else
4ba9938c 5372 val = (int8_t)insn_get(env, s, MO_8);
1b90d56e 5373 tcg_gen_movi_tl(cpu_T[0], val);
2c0262af
FB
5374 gen_push_T0(s);
5375 break;
5376 case 0x8f: /* pop Ev */
14ce26e7 5377 if (CODE64(s)) {
4ba9938c 5378 ot = dflag ? MO_64 : MO_16;
14ce26e7 5379 } else {
4ba9938c 5380 ot = dflag + MO_16;
14ce26e7 5381 }
0af10c86 5382 modrm = cpu_ldub_code(env, s->pc++);
77729c24 5383 mod = (modrm >> 6) & 3;
2c0262af 5384 gen_pop_T0(s);
77729c24
FB
5385 if (mod == 3) {
5386 /* NOTE: order is important for pop %sp */
5387 gen_pop_update(s);
14ce26e7 5388 rm = (modrm & 7) | REX_B(s);
57fec1fe 5389 gen_op_mov_reg_T0(ot, rm);
77729c24
FB
5390 } else {
5391 /* NOTE: order is important too for MMU exceptions */
14ce26e7 5392 s->popl_esp_hack = 1 << ot;
0af10c86 5393 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
77729c24
FB
5394 s->popl_esp_hack = 0;
5395 gen_pop_update(s);
5396 }
2c0262af
FB
5397 break;
5398 case 0xc8: /* enter */
5399 {
5400 int level;
0af10c86 5401 val = cpu_lduw_code(env, s->pc);
2c0262af 5402 s->pc += 2;
0af10c86 5403 level = cpu_ldub_code(env, s->pc++);
2c0262af
FB
5404 gen_enter(s, val, level);
5405 }
5406 break;
5407 case 0xc9: /* leave */
5408 /* XXX: exception not precise (ESP is updated before potential exception) */
14ce26e7 5409 if (CODE64(s)) {
4ba9938c
RH
5410 gen_op_mov_TN_reg(MO_64, 0, R_EBP);
5411 gen_op_mov_reg_T0(MO_64, R_ESP);
14ce26e7 5412 } else if (s->ss32) {
4ba9938c
RH
5413 gen_op_mov_TN_reg(MO_32, 0, R_EBP);
5414 gen_op_mov_reg_T0(MO_32, R_ESP);
2c0262af 5415 } else {
4ba9938c
RH
5416 gen_op_mov_TN_reg(MO_16, 0, R_EBP);
5417 gen_op_mov_reg_T0(MO_16, R_ESP);
2c0262af
FB
5418 }
5419 gen_pop_T0(s);
14ce26e7 5420 if (CODE64(s)) {
4ba9938c 5421 ot = dflag ? MO_64 : MO_16;
14ce26e7 5422 } else {
4ba9938c 5423 ot = dflag + MO_16;
14ce26e7 5424 }
57fec1fe 5425 gen_op_mov_reg_T0(ot, R_EBP);
2c0262af
FB
5426 gen_pop_update(s);
5427 break;
5428 case 0x06: /* push es */
5429 case 0x0e: /* push cs */
5430 case 0x16: /* push ss */
5431 case 0x1e: /* push ds */
14ce26e7
FB
5432 if (CODE64(s))
5433 goto illegal_op;
2c0262af
FB
5434 gen_op_movl_T0_seg(b >> 3);
5435 gen_push_T0(s);
5436 break;
5437 case 0x1a0: /* push fs */
5438 case 0x1a8: /* push gs */
5439 gen_op_movl_T0_seg((b >> 3) & 7);
5440 gen_push_T0(s);
5441 break;
5442 case 0x07: /* pop es */
5443 case 0x17: /* pop ss */
5444 case 0x1f: /* pop ds */
14ce26e7
FB
5445 if (CODE64(s))
5446 goto illegal_op;
2c0262af
FB
5447 reg = b >> 3;
5448 gen_pop_T0(s);
5449 gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
5450 gen_pop_update(s);
5451 if (reg == R_SS) {
a2cc3b24
FB
5452 /* if reg == SS, inhibit interrupts/trace. */
5453 /* If several instructions disable interrupts, only the
5454 _first_ does it */
5455 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
f0967a1a 5456 gen_helper_set_inhibit_irq(cpu_env);
2c0262af
FB
5457 s->tf = 0;
5458 }
5459 if (s->is_jmp) {
14ce26e7 5460 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
5461 gen_eob(s);
5462 }
5463 break;
5464 case 0x1a1: /* pop fs */
5465 case 0x1a9: /* pop gs */
5466 gen_pop_T0(s);
5467 gen_movl_seg_T0(s, (b >> 3) & 7, pc_start - s->cs_base);
5468 gen_pop_update(s);
5469 if (s->is_jmp) {
14ce26e7 5470 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
5471 gen_eob(s);
5472 }
5473 break;
5474
5475 /**************************/
5476 /* mov */
5477 case 0x88:
5478 case 0x89: /* mov Gv, Ev */
5479 if ((b & 1) == 0)
4ba9938c 5480 ot = MO_8;
2c0262af 5481 else
4ba9938c 5482 ot = dflag + MO_16;
0af10c86 5483 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 5484 reg = ((modrm >> 3) & 7) | rex_r;
3b46e624 5485
2c0262af 5486 /* generate a generic store */
0af10c86 5487 gen_ldst_modrm(env, s, modrm, ot, reg, 1);
2c0262af
FB
5488 break;
5489 case 0xc6:
5490 case 0xc7: /* mov Ev, Iv */
5491 if ((b & 1) == 0)
4ba9938c 5492 ot = MO_8;
2c0262af 5493 else
4ba9938c 5494 ot = dflag + MO_16;
0af10c86 5495 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 5496 mod = (modrm >> 6) & 3;
14ce26e7
FB
5497 if (mod != 3) {
5498 s->rip_offset = insn_const_size(ot);
4eeb3939 5499 gen_lea_modrm(env, s, modrm);
14ce26e7 5500 }
0af10c86 5501 val = insn_get(env, s, ot);
1b90d56e 5502 tcg_gen_movi_tl(cpu_T[0], val);
fd8ca9f6
RH
5503 if (mod != 3) {
5504 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
5505 } else {
57fec1fe 5506 gen_op_mov_reg_T0(ot, (modrm & 7) | REX_B(s));
fd8ca9f6 5507 }
2c0262af
FB
5508 break;
5509 case 0x8a:
5510 case 0x8b: /* mov Ev, Gv */
5511 if ((b & 1) == 0)
4ba9938c 5512 ot = MO_8;
2c0262af 5513 else
4ba9938c 5514 ot = MO_16 + dflag;
0af10c86 5515 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 5516 reg = ((modrm >> 3) & 7) | rex_r;
3b46e624 5517
0af10c86 5518 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
57fec1fe 5519 gen_op_mov_reg_T0(ot, reg);
2c0262af
FB
5520 break;
5521 case 0x8e: /* mov seg, Gv */
0af10c86 5522 modrm = cpu_ldub_code(env, s->pc++);
2c0262af
FB
5523 reg = (modrm >> 3) & 7;
5524 if (reg >= 6 || reg == R_CS)
5525 goto illegal_op;
4ba9938c 5526 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
2c0262af
FB
5527 gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
5528 if (reg == R_SS) {
5529 /* if reg == SS, inhibit interrupts/trace */
a2cc3b24
FB
5530 /* If several instructions disable interrupts, only the
5531 _first_ does it */
5532 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
f0967a1a 5533 gen_helper_set_inhibit_irq(cpu_env);
2c0262af
FB
5534 s->tf = 0;
5535 }
5536 if (s->is_jmp) {
14ce26e7 5537 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
5538 gen_eob(s);
5539 }
5540 break;
5541 case 0x8c: /* mov Gv, seg */
0af10c86 5542 modrm = cpu_ldub_code(env, s->pc++);
2c0262af
FB
5543 reg = (modrm >> 3) & 7;
5544 mod = (modrm >> 6) & 3;
5545 if (reg >= 6)
5546 goto illegal_op;
5547 gen_op_movl_T0_seg(reg);
14ce26e7 5548 if (mod == 3)
4ba9938c 5549 ot = MO_16 + dflag;
14ce26e7 5550 else
4ba9938c 5551 ot = MO_16;
0af10c86 5552 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
2c0262af
FB
5553 break;
5554
5555 case 0x1b6: /* movzbS Gv, Eb */
5556 case 0x1b7: /* movzwS Gv, Eb */
5557 case 0x1be: /* movsbS Gv, Eb */
5558 case 0x1bf: /* movswS Gv, Eb */
5559 {
c8fbc479
RH
5560 TCGMemOp d_ot;
5561 TCGMemOp s_ot;
5562
2c0262af 5563 /* d_ot is the size of destination */
4ba9938c 5564 d_ot = dflag + MO_16;
2c0262af 5565 /* ot is the size of source */
4ba9938c 5566 ot = (b & 1) + MO_8;
c8fbc479
RH
5567 /* s_ot is the sign+size of source */
5568 s_ot = b & 8 ? MO_SIGN | ot : ot;
5569
0af10c86 5570 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 5571 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af 5572 mod = (modrm >> 6) & 3;
14ce26e7 5573 rm = (modrm & 7) | REX_B(s);
3b46e624 5574
2c0262af 5575 if (mod == 3) {
57fec1fe 5576 gen_op_mov_TN_reg(ot, 0, rm);
c8fbc479
RH
5577 switch (s_ot) {
5578 case MO_UB:
e108dd01 5579 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
2c0262af 5580 break;
c8fbc479 5581 case MO_SB:
e108dd01 5582 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
2c0262af 5583 break;
c8fbc479 5584 case MO_UW:
e108dd01 5585 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
2c0262af
FB
5586 break;
5587 default:
c8fbc479 5588 case MO_SW:
e108dd01 5589 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
2c0262af
FB
5590 break;
5591 }
57fec1fe 5592 gen_op_mov_reg_T0(d_ot, reg);
2c0262af 5593 } else {
4eeb3939 5594 gen_lea_modrm(env, s, modrm);
c8fbc479 5595 gen_op_ld_v(s, s_ot, cpu_T[0], cpu_A0);
57fec1fe 5596 gen_op_mov_reg_T0(d_ot, reg);
2c0262af
FB
5597 }
5598 }
5599 break;
5600
5601 case 0x8d: /* lea */
4ba9938c 5602 ot = dflag + MO_16;
0af10c86 5603 modrm = cpu_ldub_code(env, s->pc++);
3a1d9b8b
FB
5604 mod = (modrm >> 6) & 3;
5605 if (mod == 3)
5606 goto illegal_op;
14ce26e7 5607 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af
FB
5608 /* we must ensure that no segment is added */
5609 s->override = -1;
5610 val = s->addseg;
5611 s->addseg = 0;
4eeb3939 5612 gen_lea_modrm(env, s, modrm);
2c0262af 5613 s->addseg = val;
4ba9938c 5614 gen_op_mov_reg_A0(ot - MO_16, reg);
2c0262af 5615 break;
3b46e624 5616
2c0262af
FB
5617 case 0xa0: /* mov EAX, Ov */
5618 case 0xa1:
5619 case 0xa2: /* mov Ov, EAX */
5620 case 0xa3:
2c0262af 5621 {
14ce26e7
FB
5622 target_ulong offset_addr;
5623
5624 if ((b & 1) == 0)
4ba9938c 5625 ot = MO_8;
14ce26e7 5626 else
4ba9938c 5627 ot = dflag + MO_16;
14ce26e7 5628#ifdef TARGET_X86_64
8f091a59 5629 if (s->aflag == 2) {
0af10c86 5630 offset_addr = cpu_ldq_code(env, s->pc);
14ce26e7 5631 s->pc += 8;
57fec1fe 5632 gen_op_movq_A0_im(offset_addr);
5fafdf24 5633 } else
14ce26e7
FB
5634#endif
5635 {
5636 if (s->aflag) {
4ba9938c 5637 offset_addr = insn_get(env, s, MO_32);
14ce26e7 5638 } else {
4ba9938c 5639 offset_addr = insn_get(env, s, MO_16);
14ce26e7
FB
5640 }
5641 gen_op_movl_A0_im(offset_addr);
5642 }
664e0f19 5643 gen_add_A0_ds_seg(s);
14ce26e7 5644 if ((b & 2) == 0) {
909be183 5645 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
57fec1fe 5646 gen_op_mov_reg_T0(ot, R_EAX);
14ce26e7 5647 } else {
57fec1fe 5648 gen_op_mov_TN_reg(ot, 0, R_EAX);
fd8ca9f6 5649 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
2c0262af
FB
5650 }
5651 }
2c0262af
FB
5652 break;
5653 case 0xd7: /* xlat */
14ce26e7 5654#ifdef TARGET_X86_64
8f091a59 5655 if (s->aflag == 2) {
57fec1fe 5656 gen_op_movq_A0_reg(R_EBX);
4ba9938c 5657 gen_op_mov_TN_reg(MO_64, 0, R_EAX);
bbf662ee
FB
5658 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xff);
5659 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]);
5fafdf24 5660 } else
14ce26e7
FB
5661#endif
5662 {
57fec1fe 5663 gen_op_movl_A0_reg(R_EBX);
4ba9938c 5664 gen_op_mov_TN_reg(MO_32, 0, R_EAX);
bbf662ee
FB
5665 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xff);
5666 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]);
14ce26e7
FB
5667 if (s->aflag == 0)
5668 gen_op_andl_A0_ffff();
bbf662ee
FB
5669 else
5670 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
14ce26e7 5671 }
664e0f19 5672 gen_add_A0_ds_seg(s);
cc1a80df 5673 gen_op_ld_v(s, MO_8, cpu_T[0], cpu_A0);
4ba9938c 5674 gen_op_mov_reg_T0(MO_8, R_EAX);
2c0262af
FB
5675 break;
5676 case 0xb0 ... 0xb7: /* mov R, Ib */
4ba9938c 5677 val = insn_get(env, s, MO_8);
1b90d56e 5678 tcg_gen_movi_tl(cpu_T[0], val);
4ba9938c 5679 gen_op_mov_reg_T0(MO_8, (b & 7) | REX_B(s));
2c0262af
FB
5680 break;
5681 case 0xb8 ... 0xbf: /* mov R, Iv */
14ce26e7
FB
5682#ifdef TARGET_X86_64
5683 if (dflag == 2) {
5684 uint64_t tmp;
5685 /* 64 bit case */
0af10c86 5686 tmp = cpu_ldq_code(env, s->pc);
14ce26e7
FB
5687 s->pc += 8;
5688 reg = (b & 7) | REX_B(s);
5689 gen_movtl_T0_im(tmp);
4ba9938c 5690 gen_op_mov_reg_T0(MO_64, reg);
5fafdf24 5691 } else
14ce26e7
FB
5692#endif
5693 {
4ba9938c 5694 ot = dflag ? MO_32 : MO_16;
0af10c86 5695 val = insn_get(env, s, ot);
14ce26e7 5696 reg = (b & 7) | REX_B(s);
1b90d56e 5697 tcg_gen_movi_tl(cpu_T[0], val);
57fec1fe 5698 gen_op_mov_reg_T0(ot, reg);
14ce26e7 5699 }
2c0262af
FB
5700 break;
5701
5702 case 0x91 ... 0x97: /* xchg R, EAX */
7418027e 5703 do_xchg_reg_eax:
4ba9938c 5704 ot = dflag + MO_16;
14ce26e7 5705 reg = (b & 7) | REX_B(s);
2c0262af
FB
5706 rm = R_EAX;
5707 goto do_xchg_reg;
5708 case 0x86:
5709 case 0x87: /* xchg Ev, Gv */
5710 if ((b & 1) == 0)
4ba9938c 5711 ot = MO_8;
2c0262af 5712 else
4ba9938c 5713 ot = dflag + MO_16;
0af10c86 5714 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 5715 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af
FB
5716 mod = (modrm >> 6) & 3;
5717 if (mod == 3) {
14ce26e7 5718 rm = (modrm & 7) | REX_B(s);
2c0262af 5719 do_xchg_reg:
57fec1fe
FB
5720 gen_op_mov_TN_reg(ot, 0, reg);
5721 gen_op_mov_TN_reg(ot, 1, rm);
5722 gen_op_mov_reg_T0(ot, rm);
5723 gen_op_mov_reg_T1(ot, reg);
2c0262af 5724 } else {
4eeb3939 5725 gen_lea_modrm(env, s, modrm);
57fec1fe 5726 gen_op_mov_TN_reg(ot, 0, reg);
2c0262af
FB
5727 /* for xchg, lock is implicit */
5728 if (!(prefixes & PREFIX_LOCK))
a7812ae4 5729 gen_helper_lock();
0f712e10 5730 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
fd8ca9f6 5731 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
2c0262af 5732 if (!(prefixes & PREFIX_LOCK))
a7812ae4 5733 gen_helper_unlock();
57fec1fe 5734 gen_op_mov_reg_T1(ot, reg);
2c0262af
FB
5735 }
5736 break;
5737 case 0xc4: /* les Gv */
701ed211 5738 /* In CODE64 this is VEX3; see above. */
2c0262af
FB
5739 op = R_ES;
5740 goto do_lxx;
5741 case 0xc5: /* lds Gv */
701ed211 5742 /* In CODE64 this is VEX2; see above. */
2c0262af
FB
5743 op = R_DS;
5744 goto do_lxx;
5745 case 0x1b2: /* lss Gv */
5746 op = R_SS;
5747 goto do_lxx;
5748 case 0x1b4: /* lfs Gv */
5749 op = R_FS;
5750 goto do_lxx;
5751 case 0x1b5: /* lgs Gv */
5752 op = R_GS;
5753 do_lxx:
4ba9938c 5754 ot = dflag ? MO_32 : MO_16;
0af10c86 5755 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 5756 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af
FB
5757 mod = (modrm >> 6) & 3;
5758 if (mod == 3)
5759 goto illegal_op;
4eeb3939 5760 gen_lea_modrm(env, s, modrm);
0f712e10 5761 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
4ba9938c 5762 gen_add_A0_im(s, 1 << (ot - MO_16 + 1));
2c0262af 5763 /* load the segment first to handle exceptions properly */
cc1a80df 5764 gen_op_ld_v(s, MO_16, cpu_T[0], cpu_A0);
2c0262af
FB
5765 gen_movl_seg_T0(s, op, pc_start - s->cs_base);
5766 /* then put the data */
57fec1fe 5767 gen_op_mov_reg_T1(ot, reg);
2c0262af 5768 if (s->is_jmp) {
14ce26e7 5769 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
5770 gen_eob(s);
5771 }
5772 break;
3b46e624 5773
2c0262af
FB
5774 /************************/
5775 /* shifts */
5776 case 0xc0:
5777 case 0xc1:
5778 /* shift Ev,Ib */
5779 shift = 2;
5780 grp2:
5781 {
5782 if ((b & 1) == 0)
4ba9938c 5783 ot = MO_8;
2c0262af 5784 else
4ba9938c 5785 ot = dflag + MO_16;
3b46e624 5786
0af10c86 5787 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 5788 mod = (modrm >> 6) & 3;
2c0262af 5789 op = (modrm >> 3) & 7;
3b46e624 5790
2c0262af 5791 if (mod != 3) {
14ce26e7
FB
5792 if (shift == 2) {
5793 s->rip_offset = 1;
5794 }
4eeb3939 5795 gen_lea_modrm(env, s, modrm);
2c0262af
FB
5796 opreg = OR_TMP0;
5797 } else {
14ce26e7 5798 opreg = (modrm & 7) | REX_B(s);
2c0262af
FB
5799 }
5800
5801 /* simpler op */
5802 if (shift == 0) {
5803 gen_shift(s, op, ot, opreg, OR_ECX);
5804 } else {
5805 if (shift == 2) {
0af10c86 5806 shift = cpu_ldub_code(env, s->pc++);
2c0262af
FB
5807 }
5808 gen_shifti(s, op, ot, opreg, shift);
5809 }
5810 }
5811 break;
5812 case 0xd0:
5813 case 0xd1:
5814 /* shift Ev,1 */
5815 shift = 1;
5816 goto grp2;
5817 case 0xd2:
5818 case 0xd3:
5819 /* shift Ev,cl */
5820 shift = 0;
5821 goto grp2;
5822
5823 case 0x1a4: /* shld imm */
5824 op = 0;
5825 shift = 1;
5826 goto do_shiftd;
5827 case 0x1a5: /* shld cl */
5828 op = 0;
5829 shift = 0;
5830 goto do_shiftd;
5831 case 0x1ac: /* shrd imm */
5832 op = 1;
5833 shift = 1;
5834 goto do_shiftd;
5835 case 0x1ad: /* shrd cl */
5836 op = 1;
5837 shift = 0;
5838 do_shiftd:
4ba9938c 5839 ot = dflag + MO_16;
0af10c86 5840 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 5841 mod = (modrm >> 6) & 3;
14ce26e7
FB
5842 rm = (modrm & 7) | REX_B(s);
5843 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af 5844 if (mod != 3) {
4eeb3939 5845 gen_lea_modrm(env, s, modrm);
b6abf97d 5846 opreg = OR_TMP0;
2c0262af 5847 } else {
b6abf97d 5848 opreg = rm;
2c0262af 5849 }
57fec1fe 5850 gen_op_mov_TN_reg(ot, 1, reg);
3b46e624 5851
2c0262af 5852 if (shift) {
3b9d3cf1
PB
5853 TCGv imm = tcg_const_tl(cpu_ldub_code(env, s->pc++));
5854 gen_shiftd_rm_T1(s, ot, opreg, op, imm);
5855 tcg_temp_free(imm);
2c0262af 5856 } else {
3b9d3cf1 5857 gen_shiftd_rm_T1(s, ot, opreg, op, cpu_regs[R_ECX]);
2c0262af
FB
5858 }
5859 break;
5860
5861 /************************/
5862 /* floats */
5fafdf24 5863 case 0xd8 ... 0xdf:
7eee2a50
FB
5864 if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
5865 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
5866 /* XXX: what to do if illegal op ? */
5867 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
5868 break;
5869 }
0af10c86 5870 modrm = cpu_ldub_code(env, s->pc++);
2c0262af
FB
5871 mod = (modrm >> 6) & 3;
5872 rm = modrm & 7;
5873 op = ((b & 7) << 3) | ((modrm >> 3) & 7);
2c0262af
FB
5874 if (mod != 3) {
5875 /* memory op */
4eeb3939 5876 gen_lea_modrm(env, s, modrm);
2c0262af
FB
5877 switch(op) {
5878 case 0x00 ... 0x07: /* fxxxs */
5879 case 0x10 ... 0x17: /* fixxxl */
5880 case 0x20 ... 0x27: /* fxxxl */
5881 case 0x30 ... 0x37: /* fixxx */
5882 {
5883 int op1;
5884 op1 = op & 7;
5885
5886 switch(op >> 4) {
5887 case 0:
80b02013
RH
5888 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5889 s->mem_index, MO_LEUL);
d3eb5eae 5890 gen_helper_flds_FT0(cpu_env, cpu_tmp2_i32);
2c0262af
FB
5891 break;
5892 case 1:
80b02013
RH
5893 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5894 s->mem_index, MO_LEUL);
d3eb5eae 5895 gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
2c0262af
FB
5896 break;
5897 case 2:
3c5f4116
RH
5898 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
5899 s->mem_index, MO_LEQ);
d3eb5eae 5900 gen_helper_fldl_FT0(cpu_env, cpu_tmp1_i64);
2c0262af
FB
5901 break;
5902 case 3:
5903 default:
80b02013
RH
5904 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5905 s->mem_index, MO_LESW);
d3eb5eae 5906 gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
2c0262af
FB
5907 break;
5908 }
3b46e624 5909
a7812ae4 5910 gen_helper_fp_arith_ST0_FT0(op1);
2c0262af
FB
5911 if (op1 == 3) {
5912 /* fcomp needs pop */
d3eb5eae 5913 gen_helper_fpop(cpu_env);
2c0262af
FB
5914 }
5915 }
5916 break;
5917 case 0x08: /* flds */
5918 case 0x0a: /* fsts */
5919 case 0x0b: /* fstps */
465e9838
FB
5920 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
5921 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
5922 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
2c0262af
FB
5923 switch(op & 7) {
5924 case 0:
5925 switch(op >> 4) {
5926 case 0:
80b02013
RH
5927 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5928 s->mem_index, MO_LEUL);
d3eb5eae 5929 gen_helper_flds_ST0(cpu_env, cpu_tmp2_i32);
2c0262af
FB
5930 break;
5931 case 1:
80b02013
RH
5932 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5933 s->mem_index, MO_LEUL);
d3eb5eae 5934 gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
2c0262af
FB
5935 break;
5936 case 2:
3c5f4116
RH
5937 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
5938 s->mem_index, MO_LEQ);
d3eb5eae 5939 gen_helper_fldl_ST0(cpu_env, cpu_tmp1_i64);
2c0262af
FB
5940 break;
5941 case 3:
5942 default:
80b02013
RH
5943 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5944 s->mem_index, MO_LESW);
d3eb5eae 5945 gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
2c0262af
FB
5946 break;
5947 }
5948 break;
465e9838 5949 case 1:
19e6c4b8 5950 /* XXX: the corresponding CPUID bit must be tested ! */
465e9838
FB
5951 switch(op >> 4) {
5952 case 1:
d3eb5eae 5953 gen_helper_fisttl_ST0(cpu_tmp2_i32, cpu_env);
d5601ad0
RH
5954 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5955 s->mem_index, MO_LEUL);
465e9838
FB
5956 break;
5957 case 2:
d3eb5eae 5958 gen_helper_fisttll_ST0(cpu_tmp1_i64, cpu_env);
3523e4bd
RH
5959 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
5960 s->mem_index, MO_LEQ);
465e9838
FB
5961 break;
5962 case 3:
5963 default:
d3eb5eae 5964 gen_helper_fistt_ST0(cpu_tmp2_i32, cpu_env);
d5601ad0
RH
5965 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5966 s->mem_index, MO_LEUW);
19e6c4b8 5967 break;
465e9838 5968 }
d3eb5eae 5969 gen_helper_fpop(cpu_env);
465e9838 5970 break;
2c0262af
FB
5971 default:
5972 switch(op >> 4) {
5973 case 0:
d3eb5eae 5974 gen_helper_fsts_ST0(cpu_tmp2_i32, cpu_env);
d5601ad0
RH
5975 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5976 s->mem_index, MO_LEUL);
2c0262af
FB
5977 break;
5978 case 1:
d3eb5eae 5979 gen_helper_fistl_ST0(cpu_tmp2_i32, cpu_env);
d5601ad0
RH
5980 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5981 s->mem_index, MO_LEUL);
2c0262af
FB
5982 break;
5983 case 2:
d3eb5eae 5984 gen_helper_fstl_ST0(cpu_tmp1_i64, cpu_env);
3523e4bd
RH
5985 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
5986 s->mem_index, MO_LEQ);
2c0262af
FB
5987 break;
5988 case 3:
5989 default:
d3eb5eae 5990 gen_helper_fist_ST0(cpu_tmp2_i32, cpu_env);
d5601ad0
RH
5991 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5992 s->mem_index, MO_LEUW);
2c0262af
FB
5993 break;
5994 }
5995 if ((op & 7) == 3)
d3eb5eae 5996 gen_helper_fpop(cpu_env);
2c0262af
FB
5997 break;
5998 }
5999 break;
6000 case 0x0c: /* fldenv mem */
773cdfcc 6001 gen_update_cc_op(s);
19e6c4b8 6002 gen_jmp_im(pc_start - s->cs_base);
d3eb5eae 6003 gen_helper_fldenv(cpu_env, cpu_A0, tcg_const_i32(s->dflag));
2c0262af
FB
6004 break;
6005 case 0x0d: /* fldcw mem */
80b02013
RH
6006 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
6007 s->mem_index, MO_LEUW);
d3eb5eae 6008 gen_helper_fldcw(cpu_env, cpu_tmp2_i32);
2c0262af
FB
6009 break;
6010 case 0x0e: /* fnstenv mem */
773cdfcc 6011 gen_update_cc_op(s);
19e6c4b8 6012 gen_jmp_im(pc_start - s->cs_base);
d3eb5eae 6013 gen_helper_fstenv(cpu_env, cpu_A0, tcg_const_i32(s->dflag));
2c0262af
FB
6014 break;
6015 case 0x0f: /* fnstcw mem */
d3eb5eae 6016 gen_helper_fnstcw(cpu_tmp2_i32, cpu_env);
d5601ad0
RH
6017 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
6018 s->mem_index, MO_LEUW);
2c0262af
FB
6019 break;
6020 case 0x1d: /* fldt mem */
773cdfcc 6021 gen_update_cc_op(s);
19e6c4b8 6022 gen_jmp_im(pc_start - s->cs_base);
d3eb5eae 6023 gen_helper_fldt_ST0(cpu_env, cpu_A0);
2c0262af
FB
6024 break;
6025 case 0x1f: /* fstpt mem */
773cdfcc 6026 gen_update_cc_op(s);
19e6c4b8 6027 gen_jmp_im(pc_start - s->cs_base);
d3eb5eae
BS
6028 gen_helper_fstt_ST0(cpu_env, cpu_A0);
6029 gen_helper_fpop(cpu_env);
2c0262af
FB
6030 break;
6031 case 0x2c: /* frstor mem */
773cdfcc 6032 gen_update_cc_op(s);
19e6c4b8 6033 gen_jmp_im(pc_start - s->cs_base);
d3eb5eae 6034 gen_helper_frstor(cpu_env, cpu_A0, tcg_const_i32(s->dflag));
2c0262af
FB
6035 break;
6036 case 0x2e: /* fnsave mem */
773cdfcc 6037 gen_update_cc_op(s);
19e6c4b8 6038 gen_jmp_im(pc_start - s->cs_base);
d3eb5eae 6039 gen_helper_fsave(cpu_env, cpu_A0, tcg_const_i32(s->dflag));
2c0262af
FB
6040 break;
6041 case 0x2f: /* fnstsw mem */
d3eb5eae 6042 gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
d5601ad0
RH
6043 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
6044 s->mem_index, MO_LEUW);
2c0262af
FB
6045 break;
6046 case 0x3c: /* fbld */
773cdfcc 6047 gen_update_cc_op(s);
19e6c4b8 6048 gen_jmp_im(pc_start - s->cs_base);
d3eb5eae 6049 gen_helper_fbld_ST0(cpu_env, cpu_A0);
2c0262af
FB
6050 break;
6051 case 0x3e: /* fbstp */
773cdfcc 6052 gen_update_cc_op(s);
19e6c4b8 6053 gen_jmp_im(pc_start - s->cs_base);
d3eb5eae
BS
6054 gen_helper_fbst_ST0(cpu_env, cpu_A0);
6055 gen_helper_fpop(cpu_env);
2c0262af
FB
6056 break;
6057 case 0x3d: /* fildll */
3c5f4116 6058 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
d3eb5eae 6059 gen_helper_fildll_ST0(cpu_env, cpu_tmp1_i64);
2c0262af
FB
6060 break;
6061 case 0x3f: /* fistpll */
d3eb5eae 6062 gen_helper_fistll_ST0(cpu_tmp1_i64, cpu_env);
3523e4bd 6063 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
d3eb5eae 6064 gen_helper_fpop(cpu_env);
2c0262af
FB
6065 break;
6066 default:
6067 goto illegal_op;
6068 }
6069 } else {
6070 /* register float ops */
6071 opreg = rm;
6072
6073 switch(op) {
6074 case 0x08: /* fld sti */
d3eb5eae
BS
6075 gen_helper_fpush(cpu_env);
6076 gen_helper_fmov_ST0_STN(cpu_env,
6077 tcg_const_i32((opreg + 1) & 7));
2c0262af
FB
6078 break;
6079 case 0x09: /* fxchg sti */
c169c906
FB
6080 case 0x29: /* fxchg4 sti, undocumented op */
6081 case 0x39: /* fxchg7 sti, undocumented op */
d3eb5eae 6082 gen_helper_fxchg_ST0_STN(cpu_env, tcg_const_i32(opreg));
2c0262af
FB
6083 break;
6084 case 0x0a: /* grp d9/2 */
6085 switch(rm) {
6086 case 0: /* fnop */
023fe10d 6087 /* check exceptions (FreeBSD FPU probe) */
773cdfcc 6088 gen_update_cc_op(s);
14ce26e7 6089 gen_jmp_im(pc_start - s->cs_base);
d3eb5eae 6090 gen_helper_fwait(cpu_env);
2c0262af
FB
6091 break;
6092 default:
6093 goto illegal_op;
6094 }
6095 break;
6096 case 0x0c: /* grp d9/4 */
6097 switch(rm) {
6098 case 0: /* fchs */
d3eb5eae 6099 gen_helper_fchs_ST0(cpu_env);
2c0262af
FB
6100 break;
6101 case 1: /* fabs */
d3eb5eae 6102 gen_helper_fabs_ST0(cpu_env);
2c0262af
FB
6103 break;
6104 case 4: /* ftst */
d3eb5eae
BS
6105 gen_helper_fldz_FT0(cpu_env);
6106 gen_helper_fcom_ST0_FT0(cpu_env);
2c0262af
FB
6107 break;
6108 case 5: /* fxam */
d3eb5eae 6109 gen_helper_fxam_ST0(cpu_env);
2c0262af
FB
6110 break;
6111 default:
6112 goto illegal_op;
6113 }
6114 break;
6115 case 0x0d: /* grp d9/5 */
6116 {
6117 switch(rm) {
6118 case 0:
d3eb5eae
BS
6119 gen_helper_fpush(cpu_env);
6120 gen_helper_fld1_ST0(cpu_env);
2c0262af
FB
6121 break;
6122 case 1:
d3eb5eae
BS
6123 gen_helper_fpush(cpu_env);
6124 gen_helper_fldl2t_ST0(cpu_env);
2c0262af
FB
6125 break;
6126 case 2:
d3eb5eae
BS
6127 gen_helper_fpush(cpu_env);
6128 gen_helper_fldl2e_ST0(cpu_env);
2c0262af
FB
6129 break;
6130 case 3:
d3eb5eae
BS
6131 gen_helper_fpush(cpu_env);
6132 gen_helper_fldpi_ST0(cpu_env);
2c0262af
FB
6133 break;
6134 case 4:
d3eb5eae
BS
6135 gen_helper_fpush(cpu_env);
6136 gen_helper_fldlg2_ST0(cpu_env);
2c0262af
FB
6137 break;
6138 case 5:
d3eb5eae
BS
6139 gen_helper_fpush(cpu_env);
6140 gen_helper_fldln2_ST0(cpu_env);
2c0262af
FB
6141 break;
6142 case 6:
d3eb5eae
BS
6143 gen_helper_fpush(cpu_env);
6144 gen_helper_fldz_ST0(cpu_env);
2c0262af
FB
6145 break;
6146 default:
6147 goto illegal_op;
6148 }
6149 }
6150 break;
6151 case 0x0e: /* grp d9/6 */
6152 switch(rm) {
6153 case 0: /* f2xm1 */
d3eb5eae 6154 gen_helper_f2xm1(cpu_env);
2c0262af
FB
6155 break;
6156 case 1: /* fyl2x */
d3eb5eae 6157 gen_helper_fyl2x(cpu_env);
2c0262af
FB
6158 break;
6159 case 2: /* fptan */
d3eb5eae 6160 gen_helper_fptan(cpu_env);
2c0262af
FB
6161 break;
6162 case 3: /* fpatan */
d3eb5eae 6163 gen_helper_fpatan(cpu_env);
2c0262af
FB
6164 break;
6165 case 4: /* fxtract */
d3eb5eae 6166 gen_helper_fxtract(cpu_env);
2c0262af
FB
6167 break;
6168 case 5: /* fprem1 */
d3eb5eae 6169 gen_helper_fprem1(cpu_env);
2c0262af
FB
6170 break;
6171 case 6: /* fdecstp */
d3eb5eae 6172 gen_helper_fdecstp(cpu_env);
2c0262af
FB
6173 break;
6174 default:
6175 case 7: /* fincstp */
d3eb5eae 6176 gen_helper_fincstp(cpu_env);
2c0262af
FB
6177 break;
6178 }
6179 break;
6180 case 0x0f: /* grp d9/7 */
6181 switch(rm) {
6182 case 0: /* fprem */
d3eb5eae 6183 gen_helper_fprem(cpu_env);
2c0262af
FB
6184 break;
6185 case 1: /* fyl2xp1 */
d3eb5eae 6186 gen_helper_fyl2xp1(cpu_env);
2c0262af
FB
6187 break;
6188 case 2: /* fsqrt */
d3eb5eae 6189 gen_helper_fsqrt(cpu_env);
2c0262af
FB
6190 break;
6191 case 3: /* fsincos */
d3eb5eae 6192 gen_helper_fsincos(cpu_env);
2c0262af
FB
6193 break;
6194 case 5: /* fscale */
d3eb5eae 6195 gen_helper_fscale(cpu_env);
2c0262af
FB
6196 break;
6197 case 4: /* frndint */
d3eb5eae 6198 gen_helper_frndint(cpu_env);
2c0262af
FB
6199 break;
6200 case 6: /* fsin */
d3eb5eae 6201 gen_helper_fsin(cpu_env);
2c0262af
FB
6202 break;
6203 default:
6204 case 7: /* fcos */
d3eb5eae 6205 gen_helper_fcos(cpu_env);
2c0262af
FB
6206 break;
6207 }
6208 break;
6209 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
6210 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
6211 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
6212 {
6213 int op1;
3b46e624 6214
2c0262af
FB
6215 op1 = op & 7;
6216 if (op >= 0x20) {
a7812ae4 6217 gen_helper_fp_arith_STN_ST0(op1, opreg);
2c0262af 6218 if (op >= 0x30)
d3eb5eae 6219 gen_helper_fpop(cpu_env);
2c0262af 6220 } else {
d3eb5eae 6221 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
a7812ae4 6222 gen_helper_fp_arith_ST0_FT0(op1);
2c0262af
FB
6223 }
6224 }
6225 break;
6226 case 0x02: /* fcom */
c169c906 6227 case 0x22: /* fcom2, undocumented op */
d3eb5eae
BS
6228 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6229 gen_helper_fcom_ST0_FT0(cpu_env);
2c0262af
FB
6230 break;
6231 case 0x03: /* fcomp */
c169c906
FB
6232 case 0x23: /* fcomp3, undocumented op */
6233 case 0x32: /* fcomp5, undocumented op */
d3eb5eae
BS
6234 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6235 gen_helper_fcom_ST0_FT0(cpu_env);
6236 gen_helper_fpop(cpu_env);
2c0262af
FB
6237 break;
6238 case 0x15: /* da/5 */
6239 switch(rm) {
6240 case 1: /* fucompp */
d3eb5eae
BS
6241 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
6242 gen_helper_fucom_ST0_FT0(cpu_env);
6243 gen_helper_fpop(cpu_env);
6244 gen_helper_fpop(cpu_env);
2c0262af
FB
6245 break;
6246 default:
6247 goto illegal_op;
6248 }
6249 break;
6250 case 0x1c:
6251 switch(rm) {
6252 case 0: /* feni (287 only, just do nop here) */
6253 break;
6254 case 1: /* fdisi (287 only, just do nop here) */
6255 break;
6256 case 2: /* fclex */
d3eb5eae 6257 gen_helper_fclex(cpu_env);
2c0262af
FB
6258 break;
6259 case 3: /* fninit */
d3eb5eae 6260 gen_helper_fninit(cpu_env);
2c0262af
FB
6261 break;
6262 case 4: /* fsetpm (287 only, just do nop here) */
6263 break;
6264 default:
6265 goto illegal_op;
6266 }
6267 break;
6268 case 0x1d: /* fucomi */
bff93281
PM
6269 if (!(s->cpuid_features & CPUID_CMOV)) {
6270 goto illegal_op;
6271 }
773cdfcc 6272 gen_update_cc_op(s);
d3eb5eae
BS
6273 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6274 gen_helper_fucomi_ST0_FT0(cpu_env);
3ca51d07 6275 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
6276 break;
6277 case 0x1e: /* fcomi */
bff93281
PM
6278 if (!(s->cpuid_features & CPUID_CMOV)) {
6279 goto illegal_op;
6280 }
773cdfcc 6281 gen_update_cc_op(s);
d3eb5eae
BS
6282 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6283 gen_helper_fcomi_ST0_FT0(cpu_env);
3ca51d07 6284 set_cc_op(s, CC_OP_EFLAGS);
2c0262af 6285 break;
658c8bda 6286 case 0x28: /* ffree sti */
d3eb5eae 6287 gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
5fafdf24 6288 break;
2c0262af 6289 case 0x2a: /* fst sti */
d3eb5eae 6290 gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
2c0262af
FB
6291 break;
6292 case 0x2b: /* fstp sti */
c169c906
FB
6293 case 0x0b: /* fstp1 sti, undocumented op */
6294 case 0x3a: /* fstp8 sti, undocumented op */
6295 case 0x3b: /* fstp9 sti, undocumented op */
d3eb5eae
BS
6296 gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
6297 gen_helper_fpop(cpu_env);
2c0262af
FB
6298 break;
6299 case 0x2c: /* fucom st(i) */
d3eb5eae
BS
6300 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6301 gen_helper_fucom_ST0_FT0(cpu_env);
2c0262af
FB
6302 break;
6303 case 0x2d: /* fucomp st(i) */
d3eb5eae
BS
6304 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6305 gen_helper_fucom_ST0_FT0(cpu_env);
6306 gen_helper_fpop(cpu_env);
2c0262af
FB
6307 break;
6308 case 0x33: /* de/3 */
6309 switch(rm) {
6310 case 1: /* fcompp */
d3eb5eae
BS
6311 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
6312 gen_helper_fcom_ST0_FT0(cpu_env);
6313 gen_helper_fpop(cpu_env);
6314 gen_helper_fpop(cpu_env);
2c0262af
FB
6315 break;
6316 default:
6317 goto illegal_op;
6318 }
6319 break;
c169c906 6320 case 0x38: /* ffreep sti, undocumented op */
d3eb5eae
BS
6321 gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
6322 gen_helper_fpop(cpu_env);
c169c906 6323 break;
2c0262af
FB
6324 case 0x3c: /* df/4 */
6325 switch(rm) {
6326 case 0:
d3eb5eae 6327 gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
b6abf97d 6328 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
4ba9938c 6329 gen_op_mov_reg_T0(MO_16, R_EAX);
2c0262af
FB
6330 break;
6331 default:
6332 goto illegal_op;
6333 }
6334 break;
6335 case 0x3d: /* fucomip */
bff93281
PM
6336 if (!(s->cpuid_features & CPUID_CMOV)) {
6337 goto illegal_op;
6338 }
773cdfcc 6339 gen_update_cc_op(s);
d3eb5eae
BS
6340 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6341 gen_helper_fucomi_ST0_FT0(cpu_env);
6342 gen_helper_fpop(cpu_env);
3ca51d07 6343 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
6344 break;
6345 case 0x3e: /* fcomip */
bff93281
PM
6346 if (!(s->cpuid_features & CPUID_CMOV)) {
6347 goto illegal_op;
6348 }
773cdfcc 6349 gen_update_cc_op(s);
d3eb5eae
BS
6350 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6351 gen_helper_fcomi_ST0_FT0(cpu_env);
6352 gen_helper_fpop(cpu_env);
3ca51d07 6353 set_cc_op(s, CC_OP_EFLAGS);
2c0262af 6354 break;
a2cc3b24
FB
6355 case 0x10 ... 0x13: /* fcmovxx */
6356 case 0x18 ... 0x1b:
6357 {
19e6c4b8 6358 int op1, l1;
d70040bc 6359 static const uint8_t fcmov_cc[8] = {
a2cc3b24
FB
6360 (JCC_B << 1),
6361 (JCC_Z << 1),
6362 (JCC_BE << 1),
6363 (JCC_P << 1),
6364 };
bff93281
PM
6365
6366 if (!(s->cpuid_features & CPUID_CMOV)) {
6367 goto illegal_op;
6368 }
1e4840bf 6369 op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
19e6c4b8 6370 l1 = gen_new_label();
dc259201 6371 gen_jcc1_noeob(s, op1, l1);
d3eb5eae 6372 gen_helper_fmov_ST0_STN(cpu_env, tcg_const_i32(opreg));
19e6c4b8 6373 gen_set_label(l1);
a2cc3b24
FB
6374 }
6375 break;
2c0262af
FB
6376 default:
6377 goto illegal_op;
6378 }
6379 }
6380 break;
6381 /************************/
6382 /* string ops */
6383
6384 case 0xa4: /* movsS */
6385 case 0xa5:
6386 if ((b & 1) == 0)
4ba9938c 6387 ot = MO_8;
2c0262af 6388 else
4ba9938c 6389 ot = dflag + MO_16;
2c0262af
FB
6390
6391 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6392 gen_repz_movs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6393 } else {
6394 gen_movs(s, ot);
6395 }
6396 break;
3b46e624 6397
2c0262af
FB
6398 case 0xaa: /* stosS */
6399 case 0xab:
6400 if ((b & 1) == 0)
4ba9938c 6401 ot = MO_8;
2c0262af 6402 else
4ba9938c 6403 ot = dflag + MO_16;
2c0262af
FB
6404
6405 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6406 gen_repz_stos(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6407 } else {
6408 gen_stos(s, ot);
6409 }
6410 break;
6411 case 0xac: /* lodsS */
6412 case 0xad:
6413 if ((b & 1) == 0)
4ba9938c 6414 ot = MO_8;
2c0262af 6415 else
4ba9938c 6416 ot = dflag + MO_16;
2c0262af
FB
6417 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6418 gen_repz_lods(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6419 } else {
6420 gen_lods(s, ot);
6421 }
6422 break;
6423 case 0xae: /* scasS */
6424 case 0xaf:
6425 if ((b & 1) == 0)
4ba9938c 6426 ot = MO_8;
2c0262af 6427 else
4ba9938c 6428 ot = dflag + MO_16;
2c0262af
FB
6429 if (prefixes & PREFIX_REPNZ) {
6430 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6431 } else if (prefixes & PREFIX_REPZ) {
6432 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6433 } else {
6434 gen_scas(s, ot);
2c0262af
FB
6435 }
6436 break;
6437
6438 case 0xa6: /* cmpsS */
6439 case 0xa7:
6440 if ((b & 1) == 0)
4ba9938c 6441 ot = MO_8;
2c0262af 6442 else
4ba9938c 6443 ot = dflag + MO_16;
2c0262af
FB
6444 if (prefixes & PREFIX_REPNZ) {
6445 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6446 } else if (prefixes & PREFIX_REPZ) {
6447 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6448 } else {
6449 gen_cmps(s, ot);
2c0262af
FB
6450 }
6451 break;
6452 case 0x6c: /* insS */
6453 case 0x6d:
f115e911 6454 if ((b & 1) == 0)
4ba9938c 6455 ot = MO_8;
f115e911 6456 else
4ba9938c
RH
6457 ot = dflag ? MO_32 : MO_16;
6458 gen_op_mov_TN_reg(MO_16, 0, R_EDX);
0573fbfc 6459 gen_op_andl_T0_ffff();
b8b6a50b
FB
6460 gen_check_io(s, ot, pc_start - s->cs_base,
6461 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4);
f115e911
FB
6462 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6463 gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
2c0262af 6464 } else {
f115e911 6465 gen_ins(s, ot);
2e70f6ef
PB
6466 if (use_icount) {
6467 gen_jmp(s, s->pc - s->cs_base);
6468 }
2c0262af
FB
6469 }
6470 break;
6471 case 0x6e: /* outsS */
6472 case 0x6f:
f115e911 6473 if ((b & 1) == 0)
4ba9938c 6474 ot = MO_8;
f115e911 6475 else
4ba9938c
RH
6476 ot = dflag ? MO_32 : MO_16;
6477 gen_op_mov_TN_reg(MO_16, 0, R_EDX);
0573fbfc 6478 gen_op_andl_T0_ffff();
b8b6a50b
FB
6479 gen_check_io(s, ot, pc_start - s->cs_base,
6480 svm_is_rep(prefixes) | 4);
f115e911
FB
6481 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6482 gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
2c0262af 6483 } else {
f115e911 6484 gen_outs(s, ot);
2e70f6ef
PB
6485 if (use_icount) {
6486 gen_jmp(s, s->pc - s->cs_base);
6487 }
2c0262af
FB
6488 }
6489 break;
6490
6491 /************************/
6492 /* port I/O */
0573fbfc 6493
2c0262af
FB
6494 case 0xe4:
6495 case 0xe5:
f115e911 6496 if ((b & 1) == 0)
4ba9938c 6497 ot = MO_8;
f115e911 6498 else
4ba9938c 6499 ot = dflag ? MO_32 : MO_16;
0af10c86 6500 val = cpu_ldub_code(env, s->pc++);
b8b6a50b
FB
6501 gen_check_io(s, ot, pc_start - s->cs_base,
6502 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
2e70f6ef
PB
6503 if (use_icount)
6504 gen_io_start();
1b90d56e 6505 tcg_gen_movi_i32(cpu_tmp2_i32, val);
a7812ae4 6506 gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
57fec1fe 6507 gen_op_mov_reg_T1(ot, R_EAX);
2e70f6ef
PB
6508 if (use_icount) {
6509 gen_io_end();
6510 gen_jmp(s, s->pc - s->cs_base);
6511 }
2c0262af
FB
6512 break;
6513 case 0xe6:
6514 case 0xe7:
f115e911 6515 if ((b & 1) == 0)
4ba9938c 6516 ot = MO_8;
f115e911 6517 else
4ba9938c 6518 ot = dflag ? MO_32 : MO_16;
0af10c86 6519 val = cpu_ldub_code(env, s->pc++);
b8b6a50b
FB
6520 gen_check_io(s, ot, pc_start - s->cs_base,
6521 svm_is_rep(prefixes));
57fec1fe 6522 gen_op_mov_TN_reg(ot, 1, R_EAX);
b8b6a50b 6523
2e70f6ef
PB
6524 if (use_icount)
6525 gen_io_start();
1b90d56e 6526 tcg_gen_movi_i32(cpu_tmp2_i32, val);
b6abf97d 6527 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
a7812ae4 6528 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
2e70f6ef
PB
6529 if (use_icount) {
6530 gen_io_end();
6531 gen_jmp(s, s->pc - s->cs_base);
6532 }
2c0262af
FB
6533 break;
6534 case 0xec:
6535 case 0xed:
f115e911 6536 if ((b & 1) == 0)
4ba9938c 6537 ot = MO_8;
f115e911 6538 else
4ba9938c
RH
6539 ot = dflag ? MO_32 : MO_16;
6540 gen_op_mov_TN_reg(MO_16, 0, R_EDX);
4f31916f 6541 gen_op_andl_T0_ffff();
b8b6a50b
FB
6542 gen_check_io(s, ot, pc_start - s->cs_base,
6543 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
2e70f6ef
PB
6544 if (use_icount)
6545 gen_io_start();
b6abf97d 6546 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
a7812ae4 6547 gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
57fec1fe 6548 gen_op_mov_reg_T1(ot, R_EAX);
2e70f6ef
PB
6549 if (use_icount) {
6550 gen_io_end();
6551 gen_jmp(s, s->pc - s->cs_base);
6552 }
2c0262af
FB
6553 break;
6554 case 0xee:
6555 case 0xef:
f115e911 6556 if ((b & 1) == 0)
4ba9938c 6557 ot = MO_8;
f115e911 6558 else
4ba9938c
RH
6559 ot = dflag ? MO_32 : MO_16;
6560 gen_op_mov_TN_reg(MO_16, 0, R_EDX);
4f31916f 6561 gen_op_andl_T0_ffff();
b8b6a50b
FB
6562 gen_check_io(s, ot, pc_start - s->cs_base,
6563 svm_is_rep(prefixes));
57fec1fe 6564 gen_op_mov_TN_reg(ot, 1, R_EAX);
b8b6a50b 6565
2e70f6ef
PB
6566 if (use_icount)
6567 gen_io_start();
b6abf97d 6568 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
b6abf97d 6569 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
a7812ae4 6570 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
2e70f6ef
PB
6571 if (use_icount) {
6572 gen_io_end();
6573 gen_jmp(s, s->pc - s->cs_base);
6574 }
2c0262af
FB
6575 break;
6576
6577 /************************/
6578 /* control */
6579 case 0xc2: /* ret im */
0af10c86 6580 val = cpu_ldsw_code(env, s->pc);
2c0262af
FB
6581 s->pc += 2;
6582 gen_pop_T0(s);
8f091a59
FB
6583 if (CODE64(s) && s->dflag)
6584 s->dflag = 2;
2c0262af
FB
6585 gen_stack_update(s, val + (2 << s->dflag));
6586 if (s->dflag == 0)
6587 gen_op_andl_T0_ffff();
6588 gen_op_jmp_T0();
6589 gen_eob(s);
6590 break;
6591 case 0xc3: /* ret */
6592 gen_pop_T0(s);
6593 gen_pop_update(s);
6594 if (s->dflag == 0)
6595 gen_op_andl_T0_ffff();
6596 gen_op_jmp_T0();
6597 gen_eob(s);
6598 break;
6599 case 0xca: /* lret im */
0af10c86 6600 val = cpu_ldsw_code(env, s->pc);
2c0262af
FB
6601 s->pc += 2;
6602 do_lret:
6603 if (s->pe && !s->vm86) {
773cdfcc 6604 gen_update_cc_op(s);
14ce26e7 6605 gen_jmp_im(pc_start - s->cs_base);
2999a0b2 6606 gen_helper_lret_protected(cpu_env, tcg_const_i32(s->dflag),
a7812ae4 6607 tcg_const_i32(val));
2c0262af
FB
6608 } else {
6609 gen_stack_A0(s);
6610 /* pop offset */
909be183 6611 gen_op_ld_v(s, 1 + s->dflag, cpu_T[0], cpu_A0);
2c0262af
FB
6612 if (s->dflag == 0)
6613 gen_op_andl_T0_ffff();
6614 /* NOTE: keeping EIP updated is not a problem in case of
6615 exception */
6616 gen_op_jmp_T0();
6617 /* pop selector */
6618 gen_op_addl_A0_im(2 << s->dflag);
909be183 6619 gen_op_ld_v(s, 1 + s->dflag, cpu_T[0], cpu_A0);
3bd7da9e 6620 gen_op_movl_seg_T0_vm(R_CS);
2c0262af
FB
6621 /* add stack offset */
6622 gen_stack_update(s, val + (4 << s->dflag));
6623 }
6624 gen_eob(s);
6625 break;
6626 case 0xcb: /* lret */
6627 val = 0;
6628 goto do_lret;
6629 case 0xcf: /* iret */
872929aa 6630 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET);
2c0262af
FB
6631 if (!s->pe) {
6632 /* real mode */
2999a0b2 6633 gen_helper_iret_real(cpu_env, tcg_const_i32(s->dflag));
3ca51d07 6634 set_cc_op(s, CC_OP_EFLAGS);
f115e911
FB
6635 } else if (s->vm86) {
6636 if (s->iopl != 3) {
6637 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6638 } else {
2999a0b2 6639 gen_helper_iret_real(cpu_env, tcg_const_i32(s->dflag));
3ca51d07 6640 set_cc_op(s, CC_OP_EFLAGS);
f115e911 6641 }
2c0262af 6642 } else {
773cdfcc 6643 gen_update_cc_op(s);
14ce26e7 6644 gen_jmp_im(pc_start - s->cs_base);
2999a0b2 6645 gen_helper_iret_protected(cpu_env, tcg_const_i32(s->dflag),
a7812ae4 6646 tcg_const_i32(s->pc - s->cs_base));
3ca51d07 6647 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
6648 }
6649 gen_eob(s);
6650 break;
6651 case 0xe8: /* call im */
6652 {
14ce26e7 6653 if (dflag)
4ba9938c 6654 tval = (int32_t)insn_get(env, s, MO_32);
14ce26e7 6655 else
4ba9938c 6656 tval = (int16_t)insn_get(env, s, MO_16);
2c0262af 6657 next_eip = s->pc - s->cs_base;
14ce26e7 6658 tval += next_eip;
2c0262af 6659 if (s->dflag == 0)
14ce26e7 6660 tval &= 0xffff;
99596385
AJ
6661 else if(!CODE64(s))
6662 tval &= 0xffffffff;
14ce26e7 6663 gen_movtl_T0_im(next_eip);
2c0262af 6664 gen_push_T0(s);
14ce26e7 6665 gen_jmp(s, tval);
2c0262af
FB
6666 }
6667 break;
6668 case 0x9a: /* lcall im */
6669 {
6670 unsigned int selector, offset;
3b46e624 6671
14ce26e7
FB
6672 if (CODE64(s))
6673 goto illegal_op;
4ba9938c 6674 ot = dflag ? MO_32 : MO_16;
0af10c86 6675 offset = insn_get(env, s, ot);
4ba9938c 6676 selector = insn_get(env, s, MO_16);
3b46e624 6677
1b90d56e 6678 tcg_gen_movi_tl(cpu_T[0], selector);
14ce26e7 6679 gen_op_movl_T1_imu(offset);
2c0262af
FB
6680 }
6681 goto do_lcall;
ecada8a2 6682 case 0xe9: /* jmp im */
14ce26e7 6683 if (dflag)
4ba9938c 6684 tval = (int32_t)insn_get(env, s, MO_32);
14ce26e7 6685 else
4ba9938c 6686 tval = (int16_t)insn_get(env, s, MO_16);
14ce26e7 6687 tval += s->pc - s->cs_base;
2c0262af 6688 if (s->dflag == 0)
14ce26e7 6689 tval &= 0xffff;
32938e12
AJ
6690 else if(!CODE64(s))
6691 tval &= 0xffffffff;
14ce26e7 6692 gen_jmp(s, tval);
2c0262af
FB
6693 break;
6694 case 0xea: /* ljmp im */
6695 {
6696 unsigned int selector, offset;
6697
14ce26e7
FB
6698 if (CODE64(s))
6699 goto illegal_op;
4ba9938c 6700 ot = dflag ? MO_32 : MO_16;
0af10c86 6701 offset = insn_get(env, s, ot);
4ba9938c 6702 selector = insn_get(env, s, MO_16);
3b46e624 6703
1b90d56e 6704 tcg_gen_movi_tl(cpu_T[0], selector);
14ce26e7 6705 gen_op_movl_T1_imu(offset);
2c0262af
FB
6706 }
6707 goto do_ljmp;
6708 case 0xeb: /* jmp Jb */
4ba9938c 6709 tval = (int8_t)insn_get(env, s, MO_8);
14ce26e7 6710 tval += s->pc - s->cs_base;
2c0262af 6711 if (s->dflag == 0)
14ce26e7
FB
6712 tval &= 0xffff;
6713 gen_jmp(s, tval);
2c0262af
FB
6714 break;
6715 case 0x70 ... 0x7f: /* jcc Jb */
4ba9938c 6716 tval = (int8_t)insn_get(env, s, MO_8);
2c0262af
FB
6717 goto do_jcc;
6718 case 0x180 ... 0x18f: /* jcc Jv */
6719 if (dflag) {
4ba9938c 6720 tval = (int32_t)insn_get(env, s, MO_32);
2c0262af 6721 } else {
4ba9938c 6722 tval = (int16_t)insn_get(env, s, MO_16);
2c0262af
FB
6723 }
6724 do_jcc:
6725 next_eip = s->pc - s->cs_base;
14ce26e7 6726 tval += next_eip;
2c0262af 6727 if (s->dflag == 0)
14ce26e7
FB
6728 tval &= 0xffff;
6729 gen_jcc(s, b, tval, next_eip);
2c0262af
FB
6730 break;
6731
6732 case 0x190 ... 0x19f: /* setcc Gv */
0af10c86 6733 modrm = cpu_ldub_code(env, s->pc++);
cc8b6f5b 6734 gen_setcc1(s, b, cpu_T[0]);
4ba9938c 6735 gen_ldst_modrm(env, s, modrm, MO_8, OR_TMP0, 1);
2c0262af
FB
6736 break;
6737 case 0x140 ... 0x14f: /* cmov Gv, Ev */
bff93281
PM
6738 if (!(s->cpuid_features & CPUID_CMOV)) {
6739 goto illegal_op;
6740 }
4ba9938c 6741 ot = dflag + MO_16;
f32d3781
PB
6742 modrm = cpu_ldub_code(env, s->pc++);
6743 reg = ((modrm >> 3) & 7) | rex_r;
6744 gen_cmovcc1(env, s, ot, b, modrm, reg);
2c0262af 6745 break;
3b46e624 6746
2c0262af
FB
6747 /************************/
6748 /* flags */
6749 case 0x9c: /* pushf */
872929aa 6750 gen_svm_check_intercept(s, pc_start, SVM_EXIT_PUSHF);
2c0262af
FB
6751 if (s->vm86 && s->iopl != 3) {
6752 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6753 } else {
773cdfcc 6754 gen_update_cc_op(s);
f0967a1a 6755 gen_helper_read_eflags(cpu_T[0], cpu_env);
2c0262af
FB
6756 gen_push_T0(s);
6757 }
6758 break;
6759 case 0x9d: /* popf */
872929aa 6760 gen_svm_check_intercept(s, pc_start, SVM_EXIT_POPF);
2c0262af
FB
6761 if (s->vm86 && s->iopl != 3) {
6762 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6763 } else {
6764 gen_pop_T0(s);
6765 if (s->cpl == 0) {
6766 if (s->dflag) {
f0967a1a
BS
6767 gen_helper_write_eflags(cpu_env, cpu_T[0],
6768 tcg_const_i32((TF_MASK | AC_MASK |
6769 ID_MASK | NT_MASK |
6770 IF_MASK |
6771 IOPL_MASK)));
2c0262af 6772 } else {
f0967a1a
BS
6773 gen_helper_write_eflags(cpu_env, cpu_T[0],
6774 tcg_const_i32((TF_MASK | AC_MASK |
6775 ID_MASK | NT_MASK |
6776 IF_MASK | IOPL_MASK)
6777 & 0xffff));
2c0262af
FB
6778 }
6779 } else {
4136f33c
FB
6780 if (s->cpl <= s->iopl) {
6781 if (s->dflag) {
f0967a1a
BS
6782 gen_helper_write_eflags(cpu_env, cpu_T[0],
6783 tcg_const_i32((TF_MASK |
6784 AC_MASK |
6785 ID_MASK |
6786 NT_MASK |
6787 IF_MASK)));
4136f33c 6788 } else {
f0967a1a
BS
6789 gen_helper_write_eflags(cpu_env, cpu_T[0],
6790 tcg_const_i32((TF_MASK |
6791 AC_MASK |
6792 ID_MASK |
6793 NT_MASK |
6794 IF_MASK)
6795 & 0xffff));
4136f33c 6796 }
2c0262af 6797 } else {
4136f33c 6798 if (s->dflag) {
f0967a1a
BS
6799 gen_helper_write_eflags(cpu_env, cpu_T[0],
6800 tcg_const_i32((TF_MASK | AC_MASK |
6801 ID_MASK | NT_MASK)));
4136f33c 6802 } else {
f0967a1a
BS
6803 gen_helper_write_eflags(cpu_env, cpu_T[0],
6804 tcg_const_i32((TF_MASK | AC_MASK |
6805 ID_MASK | NT_MASK)
6806 & 0xffff));
4136f33c 6807 }
2c0262af
FB
6808 }
6809 }
6810 gen_pop_update(s);
3ca51d07 6811 set_cc_op(s, CC_OP_EFLAGS);
a9321a4d 6812 /* abort translation because TF/AC flag may change */
14ce26e7 6813 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
6814 gen_eob(s);
6815 }
6816 break;
6817 case 0x9e: /* sahf */
12e26b75 6818 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
14ce26e7 6819 goto illegal_op;
4ba9938c 6820 gen_op_mov_TN_reg(MO_8, 0, R_AH);
d229edce 6821 gen_compute_eflags(s);
bd7a7b33
FB
6822 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O);
6823 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], CC_S | CC_Z | CC_A | CC_P | CC_C);
6824 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_T[0]);
2c0262af
FB
6825 break;
6826 case 0x9f: /* lahf */
12e26b75 6827 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
14ce26e7 6828 goto illegal_op;
d229edce 6829 gen_compute_eflags(s);
bd7a7b33 6830 /* Note: gen_compute_eflags() only gives the condition codes */
d229edce 6831 tcg_gen_ori_tl(cpu_T[0], cpu_cc_src, 0x02);
4ba9938c 6832 gen_op_mov_reg_T0(MO_8, R_AH);
2c0262af
FB
6833 break;
6834 case 0xf5: /* cmc */
d229edce 6835 gen_compute_eflags(s);
bd7a7b33 6836 tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C);
2c0262af
FB
6837 break;
6838 case 0xf8: /* clc */
d229edce 6839 gen_compute_eflags(s);
bd7a7b33 6840 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C);
2c0262af
FB
6841 break;
6842 case 0xf9: /* stc */
d229edce 6843 gen_compute_eflags(s);
bd7a7b33 6844 tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C);
2c0262af
FB
6845 break;
6846 case 0xfc: /* cld */
b6abf97d 6847 tcg_gen_movi_i32(cpu_tmp2_i32, 1);
317ac620 6848 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
2c0262af
FB
6849 break;
6850 case 0xfd: /* std */
b6abf97d 6851 tcg_gen_movi_i32(cpu_tmp2_i32, -1);
317ac620 6852 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
2c0262af
FB
6853 break;
6854
6855 /************************/
6856 /* bit operations */
6857 case 0x1ba: /* bt/bts/btr/btc Gv, im */
4ba9938c 6858 ot = dflag + MO_16;
0af10c86 6859 modrm = cpu_ldub_code(env, s->pc++);
33698e5f 6860 op = (modrm >> 3) & 7;
2c0262af 6861 mod = (modrm >> 6) & 3;
14ce26e7 6862 rm = (modrm & 7) | REX_B(s);
2c0262af 6863 if (mod != 3) {
14ce26e7 6864 s->rip_offset = 1;
4eeb3939 6865 gen_lea_modrm(env, s, modrm);
909be183 6866 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
2c0262af 6867 } else {
57fec1fe 6868 gen_op_mov_TN_reg(ot, 0, rm);
2c0262af
FB
6869 }
6870 /* load shift */
0af10c86 6871 val = cpu_ldub_code(env, s->pc++);
2c0262af
FB
6872 gen_op_movl_T1_im(val);
6873 if (op < 4)
6874 goto illegal_op;
6875 op -= 4;
f484d386 6876 goto bt_op;
2c0262af
FB
6877 case 0x1a3: /* bt Gv, Ev */
6878 op = 0;
6879 goto do_btx;
6880 case 0x1ab: /* bts */
6881 op = 1;
6882 goto do_btx;
6883 case 0x1b3: /* btr */
6884 op = 2;
6885 goto do_btx;
6886 case 0x1bb: /* btc */
6887 op = 3;
6888 do_btx:
4ba9938c 6889 ot = dflag + MO_16;
0af10c86 6890 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 6891 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af 6892 mod = (modrm >> 6) & 3;
14ce26e7 6893 rm = (modrm & 7) | REX_B(s);
4ba9938c 6894 gen_op_mov_TN_reg(MO_32, 1, reg);
2c0262af 6895 if (mod != 3) {
4eeb3939 6896 gen_lea_modrm(env, s, modrm);
2c0262af 6897 /* specific case: we need to add a displacement */
f484d386
FB
6898 gen_exts(ot, cpu_T[1]);
6899 tcg_gen_sari_tl(cpu_tmp0, cpu_T[1], 3 + ot);
6900 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, ot);
6901 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
909be183 6902 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
2c0262af 6903 } else {
57fec1fe 6904 gen_op_mov_TN_reg(ot, 0, rm);
2c0262af 6905 }
f484d386
FB
6906 bt_op:
6907 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], (1 << (3 + ot)) - 1);
6908 switch(op) {
6909 case 0:
6910 tcg_gen_shr_tl(cpu_cc_src, cpu_T[0], cpu_T[1]);
6911 tcg_gen_movi_tl(cpu_cc_dst, 0);
6912 break;
6913 case 1:
6914 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6915 tcg_gen_movi_tl(cpu_tmp0, 1);
6916 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6917 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6918 break;
6919 case 2:
6920 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6921 tcg_gen_movi_tl(cpu_tmp0, 1);
6922 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6923 tcg_gen_not_tl(cpu_tmp0, cpu_tmp0);
6924 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6925 break;
6926 default:
6927 case 3:
6928 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6929 tcg_gen_movi_tl(cpu_tmp0, 1);
6930 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6931 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6932 break;
6933 }
3ca51d07 6934 set_cc_op(s, CC_OP_SARB + ot);
2c0262af 6935 if (op != 0) {
fd8ca9f6
RH
6936 if (mod != 3) {
6937 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
6938 } else {
57fec1fe 6939 gen_op_mov_reg_T0(ot, rm);
fd8ca9f6 6940 }
f484d386
FB
6941 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
6942 tcg_gen_movi_tl(cpu_cc_dst, 0);
2c0262af
FB
6943 }
6944 break;
321c5351
RH
6945 case 0x1bc: /* bsf / tzcnt */
6946 case 0x1bd: /* bsr / lzcnt */
4ba9938c 6947 ot = dflag + MO_16;
321c5351
RH
6948 modrm = cpu_ldub_code(env, s->pc++);
6949 reg = ((modrm >> 3) & 7) | rex_r;
6950 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
6951 gen_extu(ot, cpu_T[0]);
6952
6953 /* Note that lzcnt and tzcnt are in different extensions. */
6954 if ((prefixes & PREFIX_REPZ)
6955 && (b & 1
6956 ? s->cpuid_ext3_features & CPUID_EXT3_ABM
6957 : s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)) {
6958 int size = 8 << ot;
6959 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
6960 if (b & 1) {
6961 /* For lzcnt, reduce the target_ulong result by the
6962 number of zeros that we expect to find at the top. */
6963 gen_helper_clz(cpu_T[0], cpu_T[0]);
6964 tcg_gen_subi_tl(cpu_T[0], cpu_T[0], TARGET_LONG_BITS - size);
6191b059 6965 } else {
321c5351
RH
6966 /* For tzcnt, a zero input must return the operand size:
6967 force all bits outside the operand size to 1. */
6968 target_ulong mask = (target_ulong)-2 << (size - 1);
6969 tcg_gen_ori_tl(cpu_T[0], cpu_T[0], mask);
6970 gen_helper_ctz(cpu_T[0], cpu_T[0]);
6191b059 6971 }
321c5351
RH
6972 /* For lzcnt/tzcnt, C and Z bits are defined and are
6973 related to the result. */
6974 gen_op_update1_cc();
6975 set_cc_op(s, CC_OP_BMILGB + ot);
6976 } else {
6977 /* For bsr/bsf, only the Z bit is defined and it is related
6978 to the input and not the result. */
6979 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
6980 set_cc_op(s, CC_OP_LOGICB + ot);
6981 if (b & 1) {
6982 /* For bsr, return the bit index of the first 1 bit,
6983 not the count of leading zeros. */
6984 gen_helper_clz(cpu_T[0], cpu_T[0]);
6985 tcg_gen_xori_tl(cpu_T[0], cpu_T[0], TARGET_LONG_BITS - 1);
6986 } else {
6987 gen_helper_ctz(cpu_T[0], cpu_T[0]);
6988 }
6989 /* ??? The manual says that the output is undefined when the
6990 input is zero, but real hardware leaves it unchanged, and
6991 real programs appear to depend on that. */
6992 tcg_gen_movi_tl(cpu_tmp0, 0);
6993 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_T[0], cpu_cc_dst, cpu_tmp0,
6994 cpu_regs[reg], cpu_T[0]);
6191b059 6995 }
321c5351 6996 gen_op_mov_reg_T0(ot, reg);
2c0262af
FB
6997 break;
6998 /************************/
6999 /* bcd */
7000 case 0x27: /* daa */
14ce26e7
FB
7001 if (CODE64(s))
7002 goto illegal_op;
773cdfcc 7003 gen_update_cc_op(s);
7923057b 7004 gen_helper_daa(cpu_env);
3ca51d07 7005 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
7006 break;
7007 case 0x2f: /* das */
14ce26e7
FB
7008 if (CODE64(s))
7009 goto illegal_op;
773cdfcc 7010 gen_update_cc_op(s);
7923057b 7011 gen_helper_das(cpu_env);
3ca51d07 7012 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
7013 break;
7014 case 0x37: /* aaa */
14ce26e7
FB
7015 if (CODE64(s))
7016 goto illegal_op;
773cdfcc 7017 gen_update_cc_op(s);
7923057b 7018 gen_helper_aaa(cpu_env);
3ca51d07 7019 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
7020 break;
7021 case 0x3f: /* aas */
14ce26e7
FB
7022 if (CODE64(s))
7023 goto illegal_op;
773cdfcc 7024 gen_update_cc_op(s);
7923057b 7025 gen_helper_aas(cpu_env);
3ca51d07 7026 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
7027 break;
7028 case 0xd4: /* aam */
14ce26e7
FB
7029 if (CODE64(s))
7030 goto illegal_op;
0af10c86 7031 val = cpu_ldub_code(env, s->pc++);
b6d7c3db
TS
7032 if (val == 0) {
7033 gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base);
7034 } else {
7923057b 7035 gen_helper_aam(cpu_env, tcg_const_i32(val));
3ca51d07 7036 set_cc_op(s, CC_OP_LOGICB);
b6d7c3db 7037 }
2c0262af
FB
7038 break;
7039 case 0xd5: /* aad */
14ce26e7
FB
7040 if (CODE64(s))
7041 goto illegal_op;
0af10c86 7042 val = cpu_ldub_code(env, s->pc++);
7923057b 7043 gen_helper_aad(cpu_env, tcg_const_i32(val));
3ca51d07 7044 set_cc_op(s, CC_OP_LOGICB);
2c0262af
FB
7045 break;
7046 /************************/
7047 /* misc */
7048 case 0x90: /* nop */
ab1f142b 7049 /* XXX: correct lock test for all insn */
7418027e 7050 if (prefixes & PREFIX_LOCK) {
ab1f142b 7051 goto illegal_op;
7418027e
RH
7052 }
7053 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
7054 if (REX_B(s)) {
7055 goto do_xchg_reg_eax;
7056 }
0573fbfc 7057 if (prefixes & PREFIX_REPZ) {
81f3053b
PB
7058 gen_update_cc_op(s);
7059 gen_jmp_im(pc_start - s->cs_base);
7060 gen_helper_pause(cpu_env, tcg_const_i32(s->pc - pc_start));
7061 s->is_jmp = DISAS_TB_JUMP;
0573fbfc 7062 }
2c0262af
FB
7063 break;
7064 case 0x9b: /* fwait */
5fafdf24 7065 if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) ==
7eee2a50
FB
7066 (HF_MP_MASK | HF_TS_MASK)) {
7067 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
2ee73ac3 7068 } else {
773cdfcc 7069 gen_update_cc_op(s);
14ce26e7 7070 gen_jmp_im(pc_start - s->cs_base);
d3eb5eae 7071 gen_helper_fwait(cpu_env);
7eee2a50 7072 }
2c0262af
FB
7073 break;
7074 case 0xcc: /* int3 */
7075 gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base);
7076 break;
7077 case 0xcd: /* int N */
0af10c86 7078 val = cpu_ldub_code(env, s->pc++);
f115e911 7079 if (s->vm86 && s->iopl != 3) {
5fafdf24 7080 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
f115e911
FB
7081 } else {
7082 gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base);
7083 }
2c0262af
FB
7084 break;
7085 case 0xce: /* into */
14ce26e7
FB
7086 if (CODE64(s))
7087 goto illegal_op;
773cdfcc 7088 gen_update_cc_op(s);
a8ede8ba 7089 gen_jmp_im(pc_start - s->cs_base);
4a7443be 7090 gen_helper_into(cpu_env, tcg_const_i32(s->pc - pc_start));
2c0262af 7091 break;
0b97134b 7092#ifdef WANT_ICEBP
2c0262af 7093 case 0xf1: /* icebp (undocumented, exits to external debugger) */
872929aa 7094 gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP);
aba9d61e 7095#if 1
2c0262af 7096 gen_debug(s, pc_start - s->cs_base);
aba9d61e
FB
7097#else
7098 /* start debug */
0af10c86 7099 tb_flush(env);
24537a01 7100 qemu_set_log(CPU_LOG_INT | CPU_LOG_TB_IN_ASM);
aba9d61e 7101#endif
2c0262af 7102 break;
0b97134b 7103#endif
2c0262af
FB
7104 case 0xfa: /* cli */
7105 if (!s->vm86) {
7106 if (s->cpl <= s->iopl) {
f0967a1a 7107 gen_helper_cli(cpu_env);
2c0262af
FB
7108 } else {
7109 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7110 }
7111 } else {
7112 if (s->iopl == 3) {
f0967a1a 7113 gen_helper_cli(cpu_env);
2c0262af
FB
7114 } else {
7115 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7116 }
7117 }
7118 break;
7119 case 0xfb: /* sti */
7120 if (!s->vm86) {
7121 if (s->cpl <= s->iopl) {
7122 gen_sti:
f0967a1a 7123 gen_helper_sti(cpu_env);
2c0262af 7124 /* interruptions are enabled only the first insn after sti */
a2cc3b24
FB
7125 /* If several instructions disable interrupts, only the
7126 _first_ does it */
7127 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
f0967a1a 7128 gen_helper_set_inhibit_irq(cpu_env);
2c0262af 7129 /* give a chance to handle pending irqs */
14ce26e7 7130 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
7131 gen_eob(s);
7132 } else {
7133 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7134 }
7135 } else {
7136 if (s->iopl == 3) {
7137 goto gen_sti;
7138 } else {
7139 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7140 }
7141 }
7142 break;
7143 case 0x62: /* bound */
14ce26e7
FB
7144 if (CODE64(s))
7145 goto illegal_op;
4ba9938c 7146 ot = dflag ? MO_32 : MO_16;
0af10c86 7147 modrm = cpu_ldub_code(env, s->pc++);
2c0262af
FB
7148 reg = (modrm >> 3) & 7;
7149 mod = (modrm >> 6) & 3;
7150 if (mod == 3)
7151 goto illegal_op;
57fec1fe 7152 gen_op_mov_TN_reg(ot, 0, reg);
4eeb3939 7153 gen_lea_modrm(env, s, modrm);
14ce26e7 7154 gen_jmp_im(pc_start - s->cs_base);
b6abf97d 7155 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4ba9938c 7156 if (ot == MO_16) {
92fc4b58
BS
7157 gen_helper_boundw(cpu_env, cpu_A0, cpu_tmp2_i32);
7158 } else {
7159 gen_helper_boundl(cpu_env, cpu_A0, cpu_tmp2_i32);
7160 }
2c0262af
FB
7161 break;
7162 case 0x1c8 ... 0x1cf: /* bswap reg */
14ce26e7
FB
7163 reg = (b & 7) | REX_B(s);
7164#ifdef TARGET_X86_64
7165 if (dflag == 2) {
4ba9938c 7166 gen_op_mov_TN_reg(MO_64, 0, reg);
66896cb8 7167 tcg_gen_bswap64_i64(cpu_T[0], cpu_T[0]);
4ba9938c 7168 gen_op_mov_reg_T0(MO_64, reg);
5fafdf24 7169 } else
8777643e 7170#endif
57fec1fe 7171 {
4ba9938c 7172 gen_op_mov_TN_reg(MO_32, 0, reg);
8777643e
AJ
7173 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
7174 tcg_gen_bswap32_tl(cpu_T[0], cpu_T[0]);
4ba9938c 7175 gen_op_mov_reg_T0(MO_32, reg);
14ce26e7 7176 }
2c0262af
FB
7177 break;
7178 case 0xd6: /* salc */
14ce26e7
FB
7179 if (CODE64(s))
7180 goto illegal_op;
cc8b6f5b 7181 gen_compute_eflags_c(s, cpu_T[0]);
bd7a7b33 7182 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
4ba9938c 7183 gen_op_mov_reg_T0(MO_8, R_EAX);
2c0262af
FB
7184 break;
7185 case 0xe0: /* loopnz */
7186 case 0xe1: /* loopz */
2c0262af
FB
7187 case 0xe2: /* loop */
7188 case 0xe3: /* jecxz */
14ce26e7 7189 {
6e0d8677 7190 int l1, l2, l3;
14ce26e7 7191
4ba9938c 7192 tval = (int8_t)insn_get(env, s, MO_8);
14ce26e7
FB
7193 next_eip = s->pc - s->cs_base;
7194 tval += next_eip;
7195 if (s->dflag == 0)
7196 tval &= 0xffff;
3b46e624 7197
14ce26e7
FB
7198 l1 = gen_new_label();
7199 l2 = gen_new_label();
6e0d8677 7200 l3 = gen_new_label();
14ce26e7 7201 b &= 3;
6e0d8677
FB
7202 switch(b) {
7203 case 0: /* loopnz */
7204 case 1: /* loopz */
6e0d8677
FB
7205 gen_op_add_reg_im(s->aflag, R_ECX, -1);
7206 gen_op_jz_ecx(s->aflag, l3);
5bdb91b0 7207 gen_jcc1(s, (JCC_Z << 1) | (b ^ 1), l1);
6e0d8677
FB
7208 break;
7209 case 2: /* loop */
7210 gen_op_add_reg_im(s->aflag, R_ECX, -1);
7211 gen_op_jnz_ecx(s->aflag, l1);
7212 break;
7213 default:
7214 case 3: /* jcxz */
7215 gen_op_jz_ecx(s->aflag, l1);
7216 break;
14ce26e7
FB
7217 }
7218
6e0d8677 7219 gen_set_label(l3);
14ce26e7 7220 gen_jmp_im(next_eip);
8e1c85e3 7221 tcg_gen_br(l2);
6e0d8677 7222
14ce26e7
FB
7223 gen_set_label(l1);
7224 gen_jmp_im(tval);
7225 gen_set_label(l2);
7226 gen_eob(s);
7227 }
2c0262af
FB
7228 break;
7229 case 0x130: /* wrmsr */
7230 case 0x132: /* rdmsr */
7231 if (s->cpl != 0) {
7232 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7233 } else {
773cdfcc 7234 gen_update_cc_op(s);
872929aa 7235 gen_jmp_im(pc_start - s->cs_base);
0573fbfc 7236 if (b & 2) {
4a7443be 7237 gen_helper_rdmsr(cpu_env);
0573fbfc 7238 } else {
4a7443be 7239 gen_helper_wrmsr(cpu_env);
0573fbfc 7240 }
2c0262af
FB
7241 }
7242 break;
7243 case 0x131: /* rdtsc */
773cdfcc 7244 gen_update_cc_op(s);
ecada8a2 7245 gen_jmp_im(pc_start - s->cs_base);
efade670
PB
7246 if (use_icount)
7247 gen_io_start();
4a7443be 7248 gen_helper_rdtsc(cpu_env);
efade670
PB
7249 if (use_icount) {
7250 gen_io_end();
7251 gen_jmp(s, s->pc - s->cs_base);
7252 }
2c0262af 7253 break;
df01e0fc 7254 case 0x133: /* rdpmc */
773cdfcc 7255 gen_update_cc_op(s);
df01e0fc 7256 gen_jmp_im(pc_start - s->cs_base);
4a7443be 7257 gen_helper_rdpmc(cpu_env);
df01e0fc 7258 break;
023fe10d 7259 case 0x134: /* sysenter */
2436b61a 7260 /* For Intel SYSENTER is valid on 64-bit */
0af10c86 7261 if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
14ce26e7 7262 goto illegal_op;
023fe10d
FB
7263 if (!s->pe) {
7264 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7265 } else {
728d803b 7266 gen_update_cc_op(s);
14ce26e7 7267 gen_jmp_im(pc_start - s->cs_base);
2999a0b2 7268 gen_helper_sysenter(cpu_env);
023fe10d
FB
7269 gen_eob(s);
7270 }
7271 break;
7272 case 0x135: /* sysexit */
2436b61a 7273 /* For Intel SYSEXIT is valid on 64-bit */
0af10c86 7274 if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
14ce26e7 7275 goto illegal_op;
023fe10d
FB
7276 if (!s->pe) {
7277 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7278 } else {
728d803b 7279 gen_update_cc_op(s);
14ce26e7 7280 gen_jmp_im(pc_start - s->cs_base);
2999a0b2 7281 gen_helper_sysexit(cpu_env, tcg_const_i32(dflag));
023fe10d
FB
7282 gen_eob(s);
7283 }
7284 break;
14ce26e7
FB
7285#ifdef TARGET_X86_64
7286 case 0x105: /* syscall */
7287 /* XXX: is it usable in real mode ? */
728d803b 7288 gen_update_cc_op(s);
14ce26e7 7289 gen_jmp_im(pc_start - s->cs_base);
2999a0b2 7290 gen_helper_syscall(cpu_env, tcg_const_i32(s->pc - pc_start));
14ce26e7
FB
7291 gen_eob(s);
7292 break;
7293 case 0x107: /* sysret */
7294 if (!s->pe) {
7295 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7296 } else {
728d803b 7297 gen_update_cc_op(s);
14ce26e7 7298 gen_jmp_im(pc_start - s->cs_base);
2999a0b2 7299 gen_helper_sysret(cpu_env, tcg_const_i32(s->dflag));
aba9d61e 7300 /* condition codes are modified only in long mode */
3ca51d07
RH
7301 if (s->lma) {
7302 set_cc_op(s, CC_OP_EFLAGS);
7303 }
14ce26e7
FB
7304 gen_eob(s);
7305 }
7306 break;
7307#endif
2c0262af 7308 case 0x1a2: /* cpuid */
773cdfcc 7309 gen_update_cc_op(s);
9575cb94 7310 gen_jmp_im(pc_start - s->cs_base);
4a7443be 7311 gen_helper_cpuid(cpu_env);
2c0262af
FB
7312 break;
7313 case 0xf4: /* hlt */
7314 if (s->cpl != 0) {
7315 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7316 } else {
773cdfcc 7317 gen_update_cc_op(s);
94451178 7318 gen_jmp_im(pc_start - s->cs_base);
4a7443be 7319 gen_helper_hlt(cpu_env, tcg_const_i32(s->pc - pc_start));
5779406a 7320 s->is_jmp = DISAS_TB_JUMP;
2c0262af
FB
7321 }
7322 break;
7323 case 0x100:
0af10c86 7324 modrm = cpu_ldub_code(env, s->pc++);
2c0262af
FB
7325 mod = (modrm >> 6) & 3;
7326 op = (modrm >> 3) & 7;
7327 switch(op) {
7328 case 0: /* sldt */
f115e911
FB
7329 if (!s->pe || s->vm86)
7330 goto illegal_op;
872929aa 7331 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ);
651ba608 7332 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,ldt.selector));
4ba9938c 7333 ot = MO_16;
2c0262af
FB
7334 if (mod == 3)
7335 ot += s->dflag;
0af10c86 7336 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
2c0262af
FB
7337 break;
7338 case 2: /* lldt */
f115e911
FB
7339 if (!s->pe || s->vm86)
7340 goto illegal_op;
2c0262af
FB
7341 if (s->cpl != 0) {
7342 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7343 } else {
872929aa 7344 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE);
4ba9938c 7345 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
14ce26e7 7346 gen_jmp_im(pc_start - s->cs_base);
b6abf97d 7347 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2999a0b2 7348 gen_helper_lldt(cpu_env, cpu_tmp2_i32);
2c0262af
FB
7349 }
7350 break;
7351 case 1: /* str */
f115e911
FB
7352 if (!s->pe || s->vm86)
7353 goto illegal_op;
872929aa 7354 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ);
651ba608 7355 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,tr.selector));
4ba9938c 7356 ot = MO_16;
2c0262af
FB
7357 if (mod == 3)
7358 ot += s->dflag;
0af10c86 7359 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
2c0262af
FB
7360 break;
7361 case 3: /* ltr */
f115e911
FB
7362 if (!s->pe || s->vm86)
7363 goto illegal_op;
2c0262af
FB
7364 if (s->cpl != 0) {
7365 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7366 } else {
872929aa 7367 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE);
4ba9938c 7368 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
14ce26e7 7369 gen_jmp_im(pc_start - s->cs_base);
b6abf97d 7370 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2999a0b2 7371 gen_helper_ltr(cpu_env, cpu_tmp2_i32);
2c0262af
FB
7372 }
7373 break;
7374 case 4: /* verr */
7375 case 5: /* verw */
f115e911
FB
7376 if (!s->pe || s->vm86)
7377 goto illegal_op;
4ba9938c 7378 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
773cdfcc 7379 gen_update_cc_op(s);
2999a0b2
BS
7380 if (op == 4) {
7381 gen_helper_verr(cpu_env, cpu_T[0]);
7382 } else {
7383 gen_helper_verw(cpu_env, cpu_T[0]);
7384 }
3ca51d07 7385 set_cc_op(s, CC_OP_EFLAGS);
f115e911 7386 break;
2c0262af
FB
7387 default:
7388 goto illegal_op;
7389 }
7390 break;
7391 case 0x101:
0af10c86 7392 modrm = cpu_ldub_code(env, s->pc++);
2c0262af
FB
7393 mod = (modrm >> 6) & 3;
7394 op = (modrm >> 3) & 7;
3d7374c5 7395 rm = modrm & 7;
2c0262af
FB
7396 switch(op) {
7397 case 0: /* sgdt */
2c0262af
FB
7398 if (mod == 3)
7399 goto illegal_op;
872929aa 7400 gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ);
4eeb3939 7401 gen_lea_modrm(env, s, modrm);
651ba608 7402 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.limit));
fd8ca9f6 7403 gen_op_st_v(s, MO_16, cpu_T[0], cpu_A0);
aba9d61e 7404 gen_add_A0_im(s, 2);
651ba608 7405 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.base));
2c0262af
FB
7406 if (!s->dflag)
7407 gen_op_andl_T0_im(0xffffff);
fd8ca9f6 7408 gen_op_st_v(s, CODE64(s) + MO_32, cpu_T[0], cpu_A0);
2c0262af 7409 break;
3d7374c5
FB
7410 case 1:
7411 if (mod == 3) {
7412 switch (rm) {
7413 case 0: /* monitor */
7414 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
7415 s->cpl != 0)
7416 goto illegal_op;
773cdfcc 7417 gen_update_cc_op(s);
3d7374c5
FB
7418 gen_jmp_im(pc_start - s->cs_base);
7419#ifdef TARGET_X86_64
7420 if (s->aflag == 2) {
bbf662ee 7421 gen_op_movq_A0_reg(R_EAX);
5fafdf24 7422 } else
3d7374c5
FB
7423#endif
7424 {
bbf662ee 7425 gen_op_movl_A0_reg(R_EAX);
3d7374c5
FB
7426 if (s->aflag == 0)
7427 gen_op_andl_A0_ffff();
7428 }
7429 gen_add_A0_ds_seg(s);
4a7443be 7430 gen_helper_monitor(cpu_env, cpu_A0);
3d7374c5
FB
7431 break;
7432 case 1: /* mwait */
7433 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
7434 s->cpl != 0)
7435 goto illegal_op;
728d803b 7436 gen_update_cc_op(s);
94451178 7437 gen_jmp_im(pc_start - s->cs_base);
4a7443be 7438 gen_helper_mwait(cpu_env, tcg_const_i32(s->pc - pc_start));
3d7374c5
FB
7439 gen_eob(s);
7440 break;
a9321a4d
PA
7441 case 2: /* clac */
7442 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) ||
7443 s->cpl != 0) {
7444 goto illegal_op;
7445 }
7446 gen_helper_clac(cpu_env);
7447 gen_jmp_im(s->pc - s->cs_base);
7448 gen_eob(s);
7449 break;
7450 case 3: /* stac */
7451 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) ||
7452 s->cpl != 0) {
7453 goto illegal_op;
7454 }
7455 gen_helper_stac(cpu_env);
7456 gen_jmp_im(s->pc - s->cs_base);
7457 gen_eob(s);
7458 break;
3d7374c5
FB
7459 default:
7460 goto illegal_op;
7461 }
7462 } else { /* sidt */
872929aa 7463 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ);
4eeb3939 7464 gen_lea_modrm(env, s, modrm);
651ba608 7465 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.limit));
fd8ca9f6 7466 gen_op_st_v(s, MO_16, cpu_T[0], cpu_A0);
3d7374c5 7467 gen_add_A0_im(s, 2);
651ba608 7468 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.base));
3d7374c5
FB
7469 if (!s->dflag)
7470 gen_op_andl_T0_im(0xffffff);
fd8ca9f6 7471 gen_op_st_v(s, CODE64(s) + MO_32, cpu_T[0], cpu_A0);
3d7374c5
FB
7472 }
7473 break;
2c0262af
FB
7474 case 2: /* lgdt */
7475 case 3: /* lidt */
0573fbfc 7476 if (mod == 3) {
773cdfcc 7477 gen_update_cc_op(s);
872929aa 7478 gen_jmp_im(pc_start - s->cs_base);
0573fbfc
TS
7479 switch(rm) {
7480 case 0: /* VMRUN */
872929aa
FB
7481 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7482 goto illegal_op;
7483 if (s->cpl != 0) {
7484 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
0573fbfc 7485 break;
872929aa 7486 } else {
052e80d5 7487 gen_helper_vmrun(cpu_env, tcg_const_i32(s->aflag),
a7812ae4 7488 tcg_const_i32(s->pc - pc_start));
db620f46 7489 tcg_gen_exit_tb(0);
5779406a 7490 s->is_jmp = DISAS_TB_JUMP;
872929aa 7491 }
0573fbfc
TS
7492 break;
7493 case 1: /* VMMCALL */
872929aa
FB
7494 if (!(s->flags & HF_SVME_MASK))
7495 goto illegal_op;
052e80d5 7496 gen_helper_vmmcall(cpu_env);
0573fbfc
TS
7497 break;
7498 case 2: /* VMLOAD */
872929aa
FB
7499 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7500 goto illegal_op;
7501 if (s->cpl != 0) {
7502 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7503 break;
7504 } else {
052e80d5 7505 gen_helper_vmload(cpu_env, tcg_const_i32(s->aflag));
872929aa 7506 }
0573fbfc
TS
7507 break;
7508 case 3: /* VMSAVE */
872929aa
FB
7509 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7510 goto illegal_op;
7511 if (s->cpl != 0) {
7512 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7513 break;
7514 } else {
052e80d5 7515 gen_helper_vmsave(cpu_env, tcg_const_i32(s->aflag));
872929aa 7516 }
0573fbfc
TS
7517 break;
7518 case 4: /* STGI */
872929aa
FB
7519 if ((!(s->flags & HF_SVME_MASK) &&
7520 !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
7521 !s->pe)
7522 goto illegal_op;
7523 if (s->cpl != 0) {
7524 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7525 break;
7526 } else {
052e80d5 7527 gen_helper_stgi(cpu_env);
872929aa 7528 }
0573fbfc
TS
7529 break;
7530 case 5: /* CLGI */
872929aa
FB
7531 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7532 goto illegal_op;
7533 if (s->cpl != 0) {
7534 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7535 break;
7536 } else {
052e80d5 7537 gen_helper_clgi(cpu_env);
872929aa 7538 }
0573fbfc
TS
7539 break;
7540 case 6: /* SKINIT */
872929aa
FB
7541 if ((!(s->flags & HF_SVME_MASK) &&
7542 !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
7543 !s->pe)
7544 goto illegal_op;
052e80d5 7545 gen_helper_skinit(cpu_env);
0573fbfc
TS
7546 break;
7547 case 7: /* INVLPGA */
872929aa
FB
7548 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7549 goto illegal_op;
7550 if (s->cpl != 0) {
7551 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7552 break;
7553 } else {
052e80d5 7554 gen_helper_invlpga(cpu_env, tcg_const_i32(s->aflag));
872929aa 7555 }
0573fbfc
TS
7556 break;
7557 default:
7558 goto illegal_op;
7559 }
7560 } else if (s->cpl != 0) {
2c0262af
FB
7561 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7562 } else {
872929aa
FB
7563 gen_svm_check_intercept(s, pc_start,
7564 op==2 ? SVM_EXIT_GDTR_WRITE : SVM_EXIT_IDTR_WRITE);
4eeb3939 7565 gen_lea_modrm(env, s, modrm);
0f712e10 7566 gen_op_ld_v(s, MO_16, cpu_T[1], cpu_A0);
aba9d61e 7567 gen_add_A0_im(s, 2);
909be183 7568 gen_op_ld_v(s, CODE64(s) + MO_32, cpu_T[0], cpu_A0);
2c0262af
FB
7569 if (!s->dflag)
7570 gen_op_andl_T0_im(0xffffff);
7571 if (op == 2) {
651ba608
FB
7572 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,gdt.base));
7573 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,gdt.limit));
2c0262af 7574 } else {
651ba608
FB
7575 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,idt.base));
7576 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,idt.limit));
2c0262af
FB
7577 }
7578 }
7579 break;
7580 case 4: /* smsw */
872929aa 7581 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0);
e2542fe2 7582#if defined TARGET_X86_64 && defined HOST_WORDS_BIGENDIAN
f60d2728 7583 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]) + 4);
7584#else
651ba608 7585 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]));
f60d2728 7586#endif
4ba9938c 7587 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 1);
2c0262af
FB
7588 break;
7589 case 6: /* lmsw */
7590 if (s->cpl != 0) {
7591 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7592 } else {
872929aa 7593 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
4ba9938c 7594 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
4a7443be 7595 gen_helper_lmsw(cpu_env, cpu_T[0]);
14ce26e7 7596 gen_jmp_im(s->pc - s->cs_base);
d71b9a8b 7597 gen_eob(s);
2c0262af
FB
7598 }
7599 break;
1b050077
AP
7600 case 7:
7601 if (mod != 3) { /* invlpg */
7602 if (s->cpl != 0) {
7603 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7604 } else {
773cdfcc 7605 gen_update_cc_op(s);
1b050077 7606 gen_jmp_im(pc_start - s->cs_base);
4eeb3939 7607 gen_lea_modrm(env, s, modrm);
4a7443be 7608 gen_helper_invlpg(cpu_env, cpu_A0);
1b050077
AP
7609 gen_jmp_im(s->pc - s->cs_base);
7610 gen_eob(s);
7611 }
2c0262af 7612 } else {
1b050077
AP
7613 switch (rm) {
7614 case 0: /* swapgs */
14ce26e7 7615#ifdef TARGET_X86_64
1b050077
AP
7616 if (CODE64(s)) {
7617 if (s->cpl != 0) {
7618 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7619 } else {
7620 tcg_gen_ld_tl(cpu_T[0], cpu_env,
7621 offsetof(CPUX86State,segs[R_GS].base));
7622 tcg_gen_ld_tl(cpu_T[1], cpu_env,
7623 offsetof(CPUX86State,kernelgsbase));
7624 tcg_gen_st_tl(cpu_T[1], cpu_env,
7625 offsetof(CPUX86State,segs[R_GS].base));
7626 tcg_gen_st_tl(cpu_T[0], cpu_env,
7627 offsetof(CPUX86State,kernelgsbase));
7628 }
5fafdf24 7629 } else
14ce26e7
FB
7630#endif
7631 {
7632 goto illegal_op;
7633 }
1b050077
AP
7634 break;
7635 case 1: /* rdtscp */
7636 if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP))
7637 goto illegal_op;
773cdfcc 7638 gen_update_cc_op(s);
9575cb94 7639 gen_jmp_im(pc_start - s->cs_base);
1b050077
AP
7640 if (use_icount)
7641 gen_io_start();
4a7443be 7642 gen_helper_rdtscp(cpu_env);
1b050077
AP
7643 if (use_icount) {
7644 gen_io_end();
7645 gen_jmp(s, s->pc - s->cs_base);
7646 }
7647 break;
7648 default:
7649 goto illegal_op;
14ce26e7 7650 }
2c0262af
FB
7651 }
7652 break;
7653 default:
7654 goto illegal_op;
7655 }
7656 break;
3415a4dd
FB
7657 case 0x108: /* invd */
7658 case 0x109: /* wbinvd */
7659 if (s->cpl != 0) {
7660 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7661 } else {
872929aa 7662 gen_svm_check_intercept(s, pc_start, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD);
3415a4dd
FB
7663 /* nothing to do */
7664 }
7665 break;
14ce26e7
FB
7666 case 0x63: /* arpl or movslS (x86_64) */
7667#ifdef TARGET_X86_64
7668 if (CODE64(s)) {
7669 int d_ot;
7670 /* d_ot is the size of destination */
4ba9938c 7671 d_ot = dflag + MO_16;
14ce26e7 7672
0af10c86 7673 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7
FB
7674 reg = ((modrm >> 3) & 7) | rex_r;
7675 mod = (modrm >> 6) & 3;
7676 rm = (modrm & 7) | REX_B(s);
3b46e624 7677
14ce26e7 7678 if (mod == 3) {
4ba9938c 7679 gen_op_mov_TN_reg(MO_32, 0, rm);
14ce26e7 7680 /* sign extend */
4ba9938c 7681 if (d_ot == MO_64) {
e108dd01 7682 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4ba9938c 7683 }
57fec1fe 7684 gen_op_mov_reg_T0(d_ot, reg);
14ce26e7 7685 } else {
4eeb3939 7686 gen_lea_modrm(env, s, modrm);
4b1fe067 7687 gen_op_ld_v(s, MO_32 | MO_SIGN, cpu_T[0], cpu_A0);
57fec1fe 7688 gen_op_mov_reg_T0(d_ot, reg);
14ce26e7 7689 }
5fafdf24 7690 } else
14ce26e7
FB
7691#endif
7692 {
3bd7da9e 7693 int label1;
49d9fdcc 7694 TCGv t0, t1, t2, a0;
1e4840bf 7695
14ce26e7
FB
7696 if (!s->pe || s->vm86)
7697 goto illegal_op;
a7812ae4
PB
7698 t0 = tcg_temp_local_new();
7699 t1 = tcg_temp_local_new();
7700 t2 = tcg_temp_local_new();
4ba9938c 7701 ot = MO_16;
0af10c86 7702 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7
FB
7703 reg = (modrm >> 3) & 7;
7704 mod = (modrm >> 6) & 3;
7705 rm = modrm & 7;
7706 if (mod != 3) {
4eeb3939 7707 gen_lea_modrm(env, s, modrm);
323d1876 7708 gen_op_ld_v(s, ot, t0, cpu_A0);
49d9fdcc
LD
7709 a0 = tcg_temp_local_new();
7710 tcg_gen_mov_tl(a0, cpu_A0);
14ce26e7 7711 } else {
1e4840bf 7712 gen_op_mov_v_reg(ot, t0, rm);
49d9fdcc 7713 TCGV_UNUSED(a0);
14ce26e7 7714 }
1e4840bf
FB
7715 gen_op_mov_v_reg(ot, t1, reg);
7716 tcg_gen_andi_tl(cpu_tmp0, t0, 3);
7717 tcg_gen_andi_tl(t1, t1, 3);
7718 tcg_gen_movi_tl(t2, 0);
3bd7da9e 7719 label1 = gen_new_label();
1e4840bf
FB
7720 tcg_gen_brcond_tl(TCG_COND_GE, cpu_tmp0, t1, label1);
7721 tcg_gen_andi_tl(t0, t0, ~3);
7722 tcg_gen_or_tl(t0, t0, t1);
7723 tcg_gen_movi_tl(t2, CC_Z);
3bd7da9e 7724 gen_set_label(label1);
14ce26e7 7725 if (mod != 3) {
323d1876 7726 gen_op_st_v(s, ot, t0, a0);
49d9fdcc
LD
7727 tcg_temp_free(a0);
7728 } else {
1e4840bf 7729 gen_op_mov_reg_v(ot, rm, t0);
14ce26e7 7730 }
d229edce 7731 gen_compute_eflags(s);
3bd7da9e 7732 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z);
1e4840bf 7733 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2);
1e4840bf
FB
7734 tcg_temp_free(t0);
7735 tcg_temp_free(t1);
7736 tcg_temp_free(t2);
f115e911 7737 }
f115e911 7738 break;
2c0262af
FB
7739 case 0x102: /* lar */
7740 case 0x103: /* lsl */
cec6843e
FB
7741 {
7742 int label1;
1e4840bf 7743 TCGv t0;
cec6843e
FB
7744 if (!s->pe || s->vm86)
7745 goto illegal_op;
4ba9938c 7746 ot = dflag ? MO_32 : MO_16;
0af10c86 7747 modrm = cpu_ldub_code(env, s->pc++);
cec6843e 7748 reg = ((modrm >> 3) & 7) | rex_r;
4ba9938c 7749 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
a7812ae4 7750 t0 = tcg_temp_local_new();
773cdfcc 7751 gen_update_cc_op(s);
2999a0b2
BS
7752 if (b == 0x102) {
7753 gen_helper_lar(t0, cpu_env, cpu_T[0]);
7754 } else {
7755 gen_helper_lsl(t0, cpu_env, cpu_T[0]);
7756 }
cec6843e
FB
7757 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_src, CC_Z);
7758 label1 = gen_new_label();
cb63669a 7759 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
1e4840bf 7760 gen_op_mov_reg_v(ot, reg, t0);
cec6843e 7761 gen_set_label(label1);
3ca51d07 7762 set_cc_op(s, CC_OP_EFLAGS);
1e4840bf 7763 tcg_temp_free(t0);
cec6843e 7764 }
2c0262af
FB
7765 break;
7766 case 0x118:
0af10c86 7767 modrm = cpu_ldub_code(env, s->pc++);
2c0262af
FB
7768 mod = (modrm >> 6) & 3;
7769 op = (modrm >> 3) & 7;
7770 switch(op) {
7771 case 0: /* prefetchnta */
7772 case 1: /* prefetchnt0 */
7773 case 2: /* prefetchnt0 */
7774 case 3: /* prefetchnt0 */
7775 if (mod == 3)
7776 goto illegal_op;
4eeb3939 7777 gen_lea_modrm(env, s, modrm);
2c0262af
FB
7778 /* nothing more to do */
7779 break;
e17a36ce 7780 default: /* nop (multi byte) */
0af10c86 7781 gen_nop_modrm(env, s, modrm);
e17a36ce 7782 break;
2c0262af
FB
7783 }
7784 break;
e17a36ce 7785 case 0x119 ... 0x11f: /* nop (multi byte) */
0af10c86
BS
7786 modrm = cpu_ldub_code(env, s->pc++);
7787 gen_nop_modrm(env, s, modrm);
e17a36ce 7788 break;
2c0262af
FB
7789 case 0x120: /* mov reg, crN */
7790 case 0x122: /* mov crN, reg */
7791 if (s->cpl != 0) {
7792 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7793 } else {
0af10c86 7794 modrm = cpu_ldub_code(env, s->pc++);
5c73b757
MO
7795 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7796 * AMD documentation (24594.pdf) and testing of
7797 * intel 386 and 486 processors all show that the mod bits
7798 * are assumed to be 1's, regardless of actual values.
7799 */
14ce26e7
FB
7800 rm = (modrm & 7) | REX_B(s);
7801 reg = ((modrm >> 3) & 7) | rex_r;
7802 if (CODE64(s))
4ba9938c 7803 ot = MO_64;
14ce26e7 7804 else
4ba9938c 7805 ot = MO_32;
ccd59d09
AP
7806 if ((prefixes & PREFIX_LOCK) && (reg == 0) &&
7807 (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) {
7808 reg = 8;
7809 }
2c0262af
FB
7810 switch(reg) {
7811 case 0:
7812 case 2:
7813 case 3:
7814 case 4:
9230e66e 7815 case 8:
773cdfcc 7816 gen_update_cc_op(s);
872929aa 7817 gen_jmp_im(pc_start - s->cs_base);
2c0262af 7818 if (b & 2) {
57fec1fe 7819 gen_op_mov_TN_reg(ot, 0, rm);
4a7443be
BS
7820 gen_helper_write_crN(cpu_env, tcg_const_i32(reg),
7821 cpu_T[0]);
14ce26e7 7822 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
7823 gen_eob(s);
7824 } else {
4a7443be 7825 gen_helper_read_crN(cpu_T[0], cpu_env, tcg_const_i32(reg));
57fec1fe 7826 gen_op_mov_reg_T0(ot, rm);
2c0262af
FB
7827 }
7828 break;
7829 default:
7830 goto illegal_op;
7831 }
7832 }
7833 break;
7834 case 0x121: /* mov reg, drN */
7835 case 0x123: /* mov drN, reg */
7836 if (s->cpl != 0) {
7837 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7838 } else {
0af10c86 7839 modrm = cpu_ldub_code(env, s->pc++);
5c73b757
MO
7840 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7841 * AMD documentation (24594.pdf) and testing of
7842 * intel 386 and 486 processors all show that the mod bits
7843 * are assumed to be 1's, regardless of actual values.
7844 */
14ce26e7
FB
7845 rm = (modrm & 7) | REX_B(s);
7846 reg = ((modrm >> 3) & 7) | rex_r;
7847 if (CODE64(s))
4ba9938c 7848 ot = MO_64;
14ce26e7 7849 else
4ba9938c 7850 ot = MO_32;
2c0262af 7851 /* XXX: do it dynamically with CR4.DE bit */
14ce26e7 7852 if (reg == 4 || reg == 5 || reg >= 8)
2c0262af
FB
7853 goto illegal_op;
7854 if (b & 2) {
0573fbfc 7855 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg);
57fec1fe 7856 gen_op_mov_TN_reg(ot, 0, rm);
4a7443be 7857 gen_helper_movl_drN_T0(cpu_env, tcg_const_i32(reg), cpu_T[0]);
14ce26e7 7858 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
7859 gen_eob(s);
7860 } else {
0573fbfc 7861 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg);
651ba608 7862 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,dr[reg]));
57fec1fe 7863 gen_op_mov_reg_T0(ot, rm);
2c0262af
FB
7864 }
7865 }
7866 break;
7867 case 0x106: /* clts */
7868 if (s->cpl != 0) {
7869 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7870 } else {
0573fbfc 7871 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
f0967a1a 7872 gen_helper_clts(cpu_env);
7eee2a50 7873 /* abort block because static cpu state changed */
14ce26e7 7874 gen_jmp_im(s->pc - s->cs_base);
7eee2a50 7875 gen_eob(s);
2c0262af
FB
7876 }
7877 break;
222a3336 7878 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
664e0f19
FB
7879 case 0x1c3: /* MOVNTI reg, mem */
7880 if (!(s->cpuid_features & CPUID_SSE2))
14ce26e7 7881 goto illegal_op;
4ba9938c 7882 ot = s->dflag == 2 ? MO_64 : MO_32;
0af10c86 7883 modrm = cpu_ldub_code(env, s->pc++);
664e0f19
FB
7884 mod = (modrm >> 6) & 3;
7885 if (mod == 3)
7886 goto illegal_op;
7887 reg = ((modrm >> 3) & 7) | rex_r;
7888 /* generate a generic store */
0af10c86 7889 gen_ldst_modrm(env, s, modrm, ot, reg, 1);
14ce26e7 7890 break;
664e0f19 7891 case 0x1ae:
0af10c86 7892 modrm = cpu_ldub_code(env, s->pc++);
664e0f19
FB
7893 mod = (modrm >> 6) & 3;
7894 op = (modrm >> 3) & 7;
7895 switch(op) {
7896 case 0: /* fxsave */
5fafdf24 7897 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
09d85fb8 7898 (s->prefix & PREFIX_LOCK))
14ce26e7 7899 goto illegal_op;
09d85fb8 7900 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
0fd14b72
FB
7901 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7902 break;
7903 }
4eeb3939 7904 gen_lea_modrm(env, s, modrm);
773cdfcc 7905 gen_update_cc_op(s);
19e6c4b8 7906 gen_jmp_im(pc_start - s->cs_base);
d3eb5eae 7907 gen_helper_fxsave(cpu_env, cpu_A0, tcg_const_i32((s->dflag == 2)));
664e0f19
FB
7908 break;
7909 case 1: /* fxrstor */
5fafdf24 7910 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
09d85fb8 7911 (s->prefix & PREFIX_LOCK))
14ce26e7 7912 goto illegal_op;
09d85fb8 7913 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
0fd14b72
FB
7914 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7915 break;
7916 }
4eeb3939 7917 gen_lea_modrm(env, s, modrm);
773cdfcc 7918 gen_update_cc_op(s);
19e6c4b8 7919 gen_jmp_im(pc_start - s->cs_base);
d3eb5eae
BS
7920 gen_helper_fxrstor(cpu_env, cpu_A0,
7921 tcg_const_i32((s->dflag == 2)));
664e0f19
FB
7922 break;
7923 case 2: /* ldmxcsr */
7924 case 3: /* stmxcsr */
7925 if (s->flags & HF_TS_MASK) {
7926 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7927 break;
14ce26e7 7928 }
664e0f19
FB
7929 if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK) ||
7930 mod == 3)
14ce26e7 7931 goto illegal_op;
4eeb3939 7932 gen_lea_modrm(env, s, modrm);
664e0f19 7933 if (op == 2) {
80b02013
RH
7934 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
7935 s->mem_index, MO_LEUL);
d3eb5eae 7936 gen_helper_ldmxcsr(cpu_env, cpu_tmp2_i32);
14ce26e7 7937 } else {
651ba608 7938 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, mxcsr));
fd8ca9f6 7939 gen_op_st_v(s, MO_32, cpu_T[0], cpu_A0);
14ce26e7 7940 }
664e0f19
FB
7941 break;
7942 case 5: /* lfence */
7943 case 6: /* mfence */
8001c294 7944 if ((modrm & 0xc7) != 0xc0 || !(s->cpuid_features & CPUID_SSE2))
664e0f19
FB
7945 goto illegal_op;
7946 break;
8f091a59
FB
7947 case 7: /* sfence / clflush */
7948 if ((modrm & 0xc7) == 0xc0) {
7949 /* sfence */
a35f3ec7 7950 /* XXX: also check for cpuid_ext2_features & CPUID_EXT2_EMMX */
8f091a59
FB
7951 if (!(s->cpuid_features & CPUID_SSE))
7952 goto illegal_op;
7953 } else {
7954 /* clflush */
7955 if (!(s->cpuid_features & CPUID_CLFLUSH))
7956 goto illegal_op;
4eeb3939 7957 gen_lea_modrm(env, s, modrm);
8f091a59
FB
7958 }
7959 break;
664e0f19 7960 default:
14ce26e7
FB
7961 goto illegal_op;
7962 }
7963 break;
a35f3ec7 7964 case 0x10d: /* 3DNow! prefetch(w) */
0af10c86 7965 modrm = cpu_ldub_code(env, s->pc++);
a35f3ec7
AJ
7966 mod = (modrm >> 6) & 3;
7967 if (mod == 3)
7968 goto illegal_op;
4eeb3939 7969 gen_lea_modrm(env, s, modrm);
8f091a59
FB
7970 /* ignore for now */
7971 break;
3b21e03e 7972 case 0x1aa: /* rsm */
872929aa 7973 gen_svm_check_intercept(s, pc_start, SVM_EXIT_RSM);
3b21e03e
FB
7974 if (!(s->flags & HF_SMM_MASK))
7975 goto illegal_op;
728d803b 7976 gen_update_cc_op(s);
3b21e03e 7977 gen_jmp_im(s->pc - s->cs_base);
608badfc 7978 gen_helper_rsm(cpu_env);
3b21e03e
FB
7979 gen_eob(s);
7980 break;
222a3336
AZ
7981 case 0x1b8: /* SSE4.2 popcnt */
7982 if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) !=
7983 PREFIX_REPZ)
7984 goto illegal_op;
7985 if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
7986 goto illegal_op;
7987
0af10c86 7988 modrm = cpu_ldub_code(env, s->pc++);
8b4a3df8 7989 reg = ((modrm >> 3) & 7) | rex_r;
222a3336
AZ
7990
7991 if (s->prefix & PREFIX_DATA)
4ba9938c 7992 ot = MO_16;
222a3336 7993 else if (s->dflag != 2)
4ba9938c 7994 ot = MO_32;
222a3336 7995 else
4ba9938c 7996 ot = MO_64;
222a3336 7997
0af10c86 7998 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
d3eb5eae 7999 gen_helper_popcnt(cpu_T[0], cpu_env, cpu_T[0], tcg_const_i32(ot));
222a3336 8000 gen_op_mov_reg_T0(ot, reg);
fdb0d09d 8001
3ca51d07 8002 set_cc_op(s, CC_OP_EFLAGS);
222a3336 8003 break;
a35f3ec7
AJ
8004 case 0x10e ... 0x10f:
8005 /* 3DNow! instructions, ignore prefixes */
8006 s->prefix &= ~(PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA);
664e0f19
FB
8007 case 0x110 ... 0x117:
8008 case 0x128 ... 0x12f:
4242b1bd 8009 case 0x138 ... 0x13a:
d9f4bb27 8010 case 0x150 ... 0x179:
664e0f19
FB
8011 case 0x17c ... 0x17f:
8012 case 0x1c2:
8013 case 0x1c4 ... 0x1c6:
8014 case 0x1d0 ... 0x1fe:
0af10c86 8015 gen_sse(env, s, b, pc_start, rex_r);
664e0f19 8016 break;
2c0262af
FB
8017 default:
8018 goto illegal_op;
8019 }
8020 /* lock generation */
8021 if (s->prefix & PREFIX_LOCK)
a7812ae4 8022 gen_helper_unlock();
2c0262af
FB
8023 return s->pc;
8024 illegal_op:
ab1f142b 8025 if (s->prefix & PREFIX_LOCK)
a7812ae4 8026 gen_helper_unlock();
2c0262af
FB
8027 /* XXX: ensure that no lock was generated */
8028 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
8029 return s->pc;
8030}
8031
2c0262af
FB
8032void optimize_flags_init(void)
8033{
a7812ae4
PB
8034 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
8035 cpu_cc_op = tcg_global_mem_new_i32(TCG_AREG0,
317ac620 8036 offsetof(CPUX86State, cc_op), "cc_op");
317ac620 8037 cpu_cc_dst = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_dst),
a7812ae4 8038 "cc_dst");
a3251186
RH
8039 cpu_cc_src = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_src),
8040 "cc_src");
988c3eb0
RH
8041 cpu_cc_src2 = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_src2),
8042 "cc_src2");
437a88a5 8043
cc739bb0
LD
8044#ifdef TARGET_X86_64
8045 cpu_regs[R_EAX] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8046 offsetof(CPUX86State, regs[R_EAX]), "rax");
cc739bb0 8047 cpu_regs[R_ECX] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8048 offsetof(CPUX86State, regs[R_ECX]), "rcx");
cc739bb0 8049 cpu_regs[R_EDX] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8050 offsetof(CPUX86State, regs[R_EDX]), "rdx");
cc739bb0 8051 cpu_regs[R_EBX] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8052 offsetof(CPUX86State, regs[R_EBX]), "rbx");
cc739bb0 8053 cpu_regs[R_ESP] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8054 offsetof(CPUX86State, regs[R_ESP]), "rsp");
cc739bb0 8055 cpu_regs[R_EBP] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8056 offsetof(CPUX86State, regs[R_EBP]), "rbp");
cc739bb0 8057 cpu_regs[R_ESI] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8058 offsetof(CPUX86State, regs[R_ESI]), "rsi");
cc739bb0 8059 cpu_regs[R_EDI] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8060 offsetof(CPUX86State, regs[R_EDI]), "rdi");
cc739bb0 8061 cpu_regs[8] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8062 offsetof(CPUX86State, regs[8]), "r8");
cc739bb0 8063 cpu_regs[9] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8064 offsetof(CPUX86State, regs[9]), "r9");
cc739bb0 8065 cpu_regs[10] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8066 offsetof(CPUX86State, regs[10]), "r10");
cc739bb0 8067 cpu_regs[11] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8068 offsetof(CPUX86State, regs[11]), "r11");
cc739bb0 8069 cpu_regs[12] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8070 offsetof(CPUX86State, regs[12]), "r12");
cc739bb0 8071 cpu_regs[13] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8072 offsetof(CPUX86State, regs[13]), "r13");
cc739bb0 8073 cpu_regs[14] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8074 offsetof(CPUX86State, regs[14]), "r14");
cc739bb0 8075 cpu_regs[15] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8076 offsetof(CPUX86State, regs[15]), "r15");
cc739bb0
LD
8077#else
8078 cpu_regs[R_EAX] = tcg_global_mem_new_i32(TCG_AREG0,
317ac620 8079 offsetof(CPUX86State, regs[R_EAX]), "eax");
cc739bb0 8080 cpu_regs[R_ECX] = tcg_global_mem_new_i32(TCG_AREG0,
317ac620 8081 offsetof(CPUX86State, regs[R_ECX]), "ecx");
cc739bb0 8082 cpu_regs[R_EDX] = tcg_global_mem_new_i32(TCG_AREG0,
317ac620 8083 offsetof(CPUX86State, regs[R_EDX]), "edx");
cc739bb0 8084 cpu_regs[R_EBX] = tcg_global_mem_new_i32(TCG_AREG0,
317ac620 8085 offsetof(CPUX86State, regs[R_EBX]), "ebx");
cc739bb0 8086 cpu_regs[R_ESP] = tcg_global_mem_new_i32(TCG_AREG0,
317ac620 8087 offsetof(CPUX86State, regs[R_ESP]), "esp");
cc739bb0 8088 cpu_regs[R_EBP] = tcg_global_mem_new_i32(TCG_AREG0,
317ac620 8089 offsetof(CPUX86State, regs[R_EBP]), "ebp");
cc739bb0 8090 cpu_regs[R_ESI] = tcg_global_mem_new_i32(TCG_AREG0,
317ac620 8091 offsetof(CPUX86State, regs[R_ESI]), "esi");
cc739bb0 8092 cpu_regs[R_EDI] = tcg_global_mem_new_i32(TCG_AREG0,
317ac620 8093 offsetof(CPUX86State, regs[R_EDI]), "edi");
cc739bb0 8094#endif
2c0262af
FB
8095}
8096
8097/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
8098 basic block 'tb'. If search_pc is TRUE, also generate PC
8099 information for each intermediate instruction. */
467215c2 8100static inline void gen_intermediate_code_internal(X86CPU *cpu,
2cfc5f17 8101 TranslationBlock *tb,
467215c2 8102 bool search_pc)
2c0262af 8103{
ed2803da 8104 CPUState *cs = CPU(cpu);
467215c2 8105 CPUX86State *env = &cpu->env;
2c0262af 8106 DisasContext dc1, *dc = &dc1;
14ce26e7 8107 target_ulong pc_ptr;
2c0262af 8108 uint16_t *gen_opc_end;
a1d1bb31 8109 CPUBreakpoint *bp;
7f5b7d3e 8110 int j, lj;
c068688b 8111 uint64_t flags;
14ce26e7
FB
8112 target_ulong pc_start;
8113 target_ulong cs_base;
2e70f6ef
PB
8114 int num_insns;
8115 int max_insns;
3b46e624 8116
2c0262af 8117 /* generate intermediate code */
14ce26e7
FB
8118 pc_start = tb->pc;
8119 cs_base = tb->cs_base;
2c0262af 8120 flags = tb->flags;
3a1d9b8b 8121
4f31916f 8122 dc->pe = (flags >> HF_PE_SHIFT) & 1;
2c0262af
FB
8123 dc->code32 = (flags >> HF_CS32_SHIFT) & 1;
8124 dc->ss32 = (flags >> HF_SS32_SHIFT) & 1;
8125 dc->addseg = (flags >> HF_ADDSEG_SHIFT) & 1;
8126 dc->f_st = 0;
8127 dc->vm86 = (flags >> VM_SHIFT) & 1;
8128 dc->cpl = (flags >> HF_CPL_SHIFT) & 3;
8129 dc->iopl = (flags >> IOPL_SHIFT) & 3;
8130 dc->tf = (flags >> TF_SHIFT) & 1;
ed2803da 8131 dc->singlestep_enabled = cs->singlestep_enabled;
2c0262af 8132 dc->cc_op = CC_OP_DYNAMIC;
e207582f 8133 dc->cc_op_dirty = false;
2c0262af
FB
8134 dc->cs_base = cs_base;
8135 dc->tb = tb;
8136 dc->popl_esp_hack = 0;
8137 /* select memory access functions */
8138 dc->mem_index = 0;
8139 if (flags & HF_SOFTMMU_MASK) {
5c42a7cd 8140 dc->mem_index = cpu_mmu_index(env);
2c0262af 8141 }
0514ef2f
EH
8142 dc->cpuid_features = env->features[FEAT_1_EDX];
8143 dc->cpuid_ext_features = env->features[FEAT_1_ECX];
8144 dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX];
8145 dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX];
8146 dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX];
14ce26e7
FB
8147#ifdef TARGET_X86_64
8148 dc->lma = (flags >> HF_LMA_SHIFT) & 1;
8149 dc->code64 = (flags >> HF_CS64_SHIFT) & 1;
8150#endif
7eee2a50 8151 dc->flags = flags;
ed2803da 8152 dc->jmp_opt = !(dc->tf || cs->singlestep_enabled ||
a2cc3b24 8153 (flags & HF_INHIBIT_IRQ_MASK)
415fa2ea 8154#ifndef CONFIG_SOFTMMU
2c0262af
FB
8155 || (flags & HF_SOFTMMU_MASK)
8156#endif
8157 );
4f31916f
FB
8158#if 0
8159 /* check addseg logic */
dc196a57 8160 if (!dc->addseg && (dc->vm86 || !dc->pe || !dc->code32))
4f31916f
FB
8161 printf("ERROR addseg\n");
8162#endif
8163
a7812ae4
PB
8164 cpu_T[0] = tcg_temp_new();
8165 cpu_T[1] = tcg_temp_new();
8166 cpu_A0 = tcg_temp_new();
a7812ae4
PB
8167
8168 cpu_tmp0 = tcg_temp_new();
8169 cpu_tmp1_i64 = tcg_temp_new_i64();
8170 cpu_tmp2_i32 = tcg_temp_new_i32();
8171 cpu_tmp3_i32 = tcg_temp_new_i32();
8172 cpu_tmp4 = tcg_temp_new();
a7812ae4
PB
8173 cpu_ptr0 = tcg_temp_new_ptr();
8174 cpu_ptr1 = tcg_temp_new_ptr();
a3251186 8175 cpu_cc_srcT = tcg_temp_local_new();
57fec1fe 8176
92414b31 8177 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
8178
8179 dc->is_jmp = DISAS_NEXT;
8180 pc_ptr = pc_start;
8181 lj = -1;
2e70f6ef
PB
8182 num_insns = 0;
8183 max_insns = tb->cflags & CF_COUNT_MASK;
8184 if (max_insns == 0)
8185 max_insns = CF_COUNT_MASK;
2c0262af 8186
806f352d 8187 gen_tb_start();
2c0262af 8188 for(;;) {
72cf2d4f
BS
8189 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
8190 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a2397807
JK
8191 if (bp->pc == pc_ptr &&
8192 !((bp->flags & BP_CPU) && (tb->flags & HF_RF_MASK))) {
2c0262af
FB
8193 gen_debug(dc, pc_ptr - dc->cs_base);
8194 break;
8195 }
8196 }
8197 }
8198 if (search_pc) {
92414b31 8199 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
2c0262af
FB
8200 if (lj < j) {
8201 lj++;
8202 while (lj < j)
ab1103de 8203 tcg_ctx.gen_opc_instr_start[lj++] = 0;
2c0262af 8204 }
25983cad 8205 tcg_ctx.gen_opc_pc[lj] = pc_ptr;
2c0262af 8206 gen_opc_cc_op[lj] = dc->cc_op;
ab1103de 8207 tcg_ctx.gen_opc_instr_start[lj] = 1;
c9c99c22 8208 tcg_ctx.gen_opc_icount[lj] = num_insns;
2c0262af 8209 }
2e70f6ef
PB
8210 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
8211 gen_io_start();
8212
0af10c86 8213 pc_ptr = disas_insn(env, dc, pc_ptr);
2e70f6ef 8214 num_insns++;
2c0262af
FB
8215 /* stop translation if indicated */
8216 if (dc->is_jmp)
8217 break;
8218 /* if single step mode, we generate only one instruction and
8219 generate an exception */
a2cc3b24
FB
8220 /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
8221 the flag and abort the translation to give the irqs a
8222 change to be happen */
5fafdf24 8223 if (dc->tf || dc->singlestep_enabled ||
2e70f6ef 8224 (flags & HF_INHIBIT_IRQ_MASK)) {
14ce26e7 8225 gen_jmp_im(pc_ptr - dc->cs_base);
2c0262af
FB
8226 gen_eob(dc);
8227 break;
8228 }
8229 /* if too long translation, stop generation too */
efd7f486 8230 if (tcg_ctx.gen_opc_ptr >= gen_opc_end ||
2e70f6ef
PB
8231 (pc_ptr - pc_start) >= (TARGET_PAGE_SIZE - 32) ||
8232 num_insns >= max_insns) {
14ce26e7 8233 gen_jmp_im(pc_ptr - dc->cs_base);
2c0262af
FB
8234 gen_eob(dc);
8235 break;
8236 }
1b530a6d
AJ
8237 if (singlestep) {
8238 gen_jmp_im(pc_ptr - dc->cs_base);
8239 gen_eob(dc);
8240 break;
8241 }
2c0262af 8242 }
2e70f6ef
PB
8243 if (tb->cflags & CF_LAST_IO)
8244 gen_io_end();
806f352d 8245 gen_tb_end(tb, num_insns);
efd7f486 8246 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
2c0262af
FB
8247 /* we don't forget to fill the last values */
8248 if (search_pc) {
92414b31 8249 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
2c0262af
FB
8250 lj++;
8251 while (lj <= j)
ab1103de 8252 tcg_ctx.gen_opc_instr_start[lj++] = 0;
2c0262af 8253 }
3b46e624 8254
2c0262af 8255#ifdef DEBUG_DISAS
8fec2b8c 8256 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
14ce26e7 8257 int disas_flags;
93fcfe39
AL
8258 qemu_log("----------------\n");
8259 qemu_log("IN: %s\n", lookup_symbol(pc_start));
14ce26e7
FB
8260#ifdef TARGET_X86_64
8261 if (dc->code64)
8262 disas_flags = 2;
8263 else
8264#endif
8265 disas_flags = !dc->code32;
f4359b9f 8266 log_target_disas(env, pc_start, pc_ptr - pc_start, disas_flags);
93fcfe39 8267 qemu_log("\n");
2c0262af
FB
8268 }
8269#endif
8270
2e70f6ef 8271 if (!search_pc) {
2c0262af 8272 tb->size = pc_ptr - pc_start;
2e70f6ef
PB
8273 tb->icount = num_insns;
8274 }
2c0262af
FB
8275}
8276
317ac620 8277void gen_intermediate_code(CPUX86State *env, TranslationBlock *tb)
2c0262af 8278{
467215c2 8279 gen_intermediate_code_internal(x86_env_get_cpu(env), tb, false);
2c0262af
FB
8280}
8281
317ac620 8282void gen_intermediate_code_pc(CPUX86State *env, TranslationBlock *tb)
2c0262af 8283{
467215c2 8284 gen_intermediate_code_internal(x86_env_get_cpu(env), tb, true);
2c0262af
FB
8285}
8286
317ac620 8287void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb, int pc_pos)
d2856f1a
AJ
8288{
8289 int cc_op;
8290#ifdef DEBUG_DISAS
8fec2b8c 8291 if (qemu_loglevel_mask(CPU_LOG_TB_OP)) {
d2856f1a 8292 int i;
93fcfe39 8293 qemu_log("RESTORE:\n");
d2856f1a 8294 for(i = 0;i <= pc_pos; i++) {
ab1103de 8295 if (tcg_ctx.gen_opc_instr_start[i]) {
25983cad
EV
8296 qemu_log("0x%04x: " TARGET_FMT_lx "\n", i,
8297 tcg_ctx.gen_opc_pc[i]);
d2856f1a
AJ
8298 }
8299 }
e87b7cb0 8300 qemu_log("pc_pos=0x%x eip=" TARGET_FMT_lx " cs_base=%x\n",
25983cad 8301 pc_pos, tcg_ctx.gen_opc_pc[pc_pos] - tb->cs_base,
d2856f1a
AJ
8302 (uint32_t)tb->cs_base);
8303 }
8304#endif
25983cad 8305 env->eip = tcg_ctx.gen_opc_pc[pc_pos] - tb->cs_base;
d2856f1a
AJ
8306 cc_op = gen_opc_cc_op[pc_pos];
8307 if (cc_op != CC_OP_DYNAMIC)
8308 env->cc_op = cc_op;
8309}