]> git.proxmox.com Git - mirror_qemu.git/blame - target-i386/translate.c
target-i386: Remove gen_op_movl_T0_T1
[mirror_qemu.git] / target-i386 / translate.c
CommitLineData
2c0262af
FB
1/*
2 * i386 translation
5fafdf24 3 *
2c0262af
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
18 */
19#include <stdarg.h>
20#include <stdlib.h>
21#include <stdio.h>
22#include <string.h>
23#include <inttypes.h>
24#include <signal.h>
2c0262af 25
bec93d72 26#include "qemu/host-utils.h"
2c0262af 27#include "cpu.h"
76cad711 28#include "disas/disas.h"
57fec1fe 29#include "tcg-op.h"
2c0262af 30
a7812ae4
PB
31#include "helper.h"
32#define GEN_HELPER 1
33#include "helper.h"
34
2c0262af
FB
35#define PREFIX_REPZ 0x01
36#define PREFIX_REPNZ 0x02
37#define PREFIX_LOCK 0x04
38#define PREFIX_DATA 0x08
39#define PREFIX_ADR 0x10
701ed211 40#define PREFIX_VEX 0x20
2c0262af 41
14ce26e7 42#ifdef TARGET_X86_64
14ce26e7
FB
43#define CODE64(s) ((s)->code64)
44#define REX_X(s) ((s)->rex_x)
45#define REX_B(s) ((s)->rex_b)
14ce26e7 46#else
14ce26e7
FB
47#define CODE64(s) 0
48#define REX_X(s) 0
49#define REX_B(s) 0
50#endif
51
bec93d72
RH
52#ifdef TARGET_X86_64
53# define ctztl ctz64
54# define clztl clz64
55#else
56# define ctztl ctz32
57# define clztl clz32
58#endif
59
57fec1fe
FB
60//#define MACRO_TEST 1
61
57fec1fe 62/* global register indexes */
a7812ae4 63static TCGv_ptr cpu_env;
a3251186 64static TCGv cpu_A0;
988c3eb0 65static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2, cpu_cc_srcT;
a7812ae4 66static TCGv_i32 cpu_cc_op;
cc739bb0 67static TCGv cpu_regs[CPU_NB_REGS];
1e4840bf 68/* local temps */
3b9d3cf1 69static TCGv cpu_T[2];
57fec1fe 70/* local register indexes (only used inside old micro ops) */
a7812ae4
PB
71static TCGv cpu_tmp0, cpu_tmp4;
72static TCGv_ptr cpu_ptr0, cpu_ptr1;
73static TCGv_i32 cpu_tmp2_i32, cpu_tmp3_i32;
74static TCGv_i64 cpu_tmp1_i64;
57fec1fe 75
1a7ff922
PB
76static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
77
022c62cb 78#include "exec/gen-icount.h"
2e70f6ef 79
57fec1fe
FB
80#ifdef TARGET_X86_64
81static int x86_64_hregs;
ae063a68
FB
82#endif
83
2c0262af
FB
84typedef struct DisasContext {
85 /* current insn context */
86 int override; /* -1 if no override */
87 int prefix;
88 int aflag, dflag;
14ce26e7 89 target_ulong pc; /* pc = eip + cs_base */
2c0262af
FB
90 int is_jmp; /* 1 = means jump (stop translation), 2 means CPU
91 static state change (stop translation) */
92 /* current block context */
14ce26e7 93 target_ulong cs_base; /* base of CS segment */
2c0262af
FB
94 int pe; /* protected mode */
95 int code32; /* 32 bit code segment */
14ce26e7
FB
96#ifdef TARGET_X86_64
97 int lma; /* long mode active */
98 int code64; /* 64 bit code segment */
99 int rex_x, rex_b;
100#endif
701ed211
RH
101 int vex_l; /* vex vector length */
102 int vex_v; /* vex vvvv register, without 1's compliment. */
2c0262af 103 int ss32; /* 32 bit stack segment */
fee71888 104 CCOp cc_op; /* current CC operation */
e207582f 105 bool cc_op_dirty;
2c0262af
FB
106 int addseg; /* non zero if either DS/ES/SS have a non zero base */
107 int f_st; /* currently unused */
108 int vm86; /* vm86 mode */
109 int cpl;
110 int iopl;
111 int tf; /* TF cpu flag */
34865134 112 int singlestep_enabled; /* "hardware" single step enabled */
2c0262af
FB
113 int jmp_opt; /* use direct block chaining for direct jumps */
114 int mem_index; /* select memory access functions */
c068688b 115 uint64_t flags; /* all execution flags */
2c0262af
FB
116 struct TranslationBlock *tb;
117 int popl_esp_hack; /* for correct popl with esp base handling */
14ce26e7
FB
118 int rip_offset; /* only used in x86_64, but left for simplicity */
119 int cpuid_features;
3d7374c5 120 int cpuid_ext_features;
e771edab 121 int cpuid_ext2_features;
12e26b75 122 int cpuid_ext3_features;
a9321a4d 123 int cpuid_7_0_ebx_features;
2c0262af
FB
124} DisasContext;
125
126static void gen_eob(DisasContext *s);
14ce26e7
FB
127static void gen_jmp(DisasContext *s, target_ulong eip);
128static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num);
63633fe6 129static void gen_op(DisasContext *s1, int op, int ot, int d);
2c0262af
FB
130
131/* i386 arith/logic operations */
132enum {
5fafdf24
TS
133 OP_ADDL,
134 OP_ORL,
135 OP_ADCL,
2c0262af 136 OP_SBBL,
5fafdf24
TS
137 OP_ANDL,
138 OP_SUBL,
139 OP_XORL,
2c0262af
FB
140 OP_CMPL,
141};
142
143/* i386 shift ops */
144enum {
5fafdf24
TS
145 OP_ROL,
146 OP_ROR,
147 OP_RCL,
148 OP_RCR,
149 OP_SHL,
150 OP_SHR,
2c0262af
FB
151 OP_SHL1, /* undocumented */
152 OP_SAR = 7,
153};
154
8e1c85e3
FB
155enum {
156 JCC_O,
157 JCC_B,
158 JCC_Z,
159 JCC_BE,
160 JCC_S,
161 JCC_P,
162 JCC_L,
163 JCC_LE,
164};
165
2c0262af
FB
166enum {
167 /* I386 int registers */
168 OR_EAX, /* MUST be even numbered */
169 OR_ECX,
170 OR_EDX,
171 OR_EBX,
172 OR_ESP,
173 OR_EBP,
174 OR_ESI,
175 OR_EDI,
14ce26e7
FB
176
177 OR_TMP0 = 16, /* temporary operand register */
2c0262af
FB
178 OR_TMP1,
179 OR_A0, /* temporary register used when doing address evaluation */
2c0262af
FB
180};
181
b666265b 182enum {
a3251186
RH
183 USES_CC_DST = 1,
184 USES_CC_SRC = 2,
988c3eb0
RH
185 USES_CC_SRC2 = 4,
186 USES_CC_SRCT = 8,
b666265b
RH
187};
188
189/* Bit set if the global variable is live after setting CC_OP to X. */
190static const uint8_t cc_op_live[CC_OP_NB] = {
988c3eb0 191 [CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
b666265b
RH
192 [CC_OP_EFLAGS] = USES_CC_SRC,
193 [CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC,
194 [CC_OP_ADDB ... CC_OP_ADDQ] = USES_CC_DST | USES_CC_SRC,
988c3eb0 195 [CC_OP_ADCB ... CC_OP_ADCQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
a3251186 196 [CC_OP_SUBB ... CC_OP_SUBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT,
988c3eb0 197 [CC_OP_SBBB ... CC_OP_SBBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
b666265b
RH
198 [CC_OP_LOGICB ... CC_OP_LOGICQ] = USES_CC_DST,
199 [CC_OP_INCB ... CC_OP_INCQ] = USES_CC_DST | USES_CC_SRC,
200 [CC_OP_DECB ... CC_OP_DECQ] = USES_CC_DST | USES_CC_SRC,
201 [CC_OP_SHLB ... CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC,
202 [CC_OP_SARB ... CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC,
bc4b43dc 203 [CC_OP_BMILGB ... CC_OP_BMILGQ] = USES_CC_DST | USES_CC_SRC,
cd7f97ca
RH
204 [CC_OP_ADCX] = USES_CC_DST | USES_CC_SRC,
205 [CC_OP_ADOX] = USES_CC_SRC | USES_CC_SRC2,
206 [CC_OP_ADCOX] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
436ff2d2 207 [CC_OP_CLR] = 0,
b666265b
RH
208};
209
e207582f 210static void set_cc_op(DisasContext *s, CCOp op)
3ca51d07 211{
b666265b
RH
212 int dead;
213
214 if (s->cc_op == op) {
215 return;
216 }
217
218 /* Discard CC computation that will no longer be used. */
219 dead = cc_op_live[s->cc_op] & ~cc_op_live[op];
220 if (dead & USES_CC_DST) {
221 tcg_gen_discard_tl(cpu_cc_dst);
e207582f 222 }
b666265b
RH
223 if (dead & USES_CC_SRC) {
224 tcg_gen_discard_tl(cpu_cc_src);
225 }
988c3eb0
RH
226 if (dead & USES_CC_SRC2) {
227 tcg_gen_discard_tl(cpu_cc_src2);
228 }
a3251186
RH
229 if (dead & USES_CC_SRCT) {
230 tcg_gen_discard_tl(cpu_cc_srcT);
231 }
b666265b 232
e2f515cf
RH
233 if (op == CC_OP_DYNAMIC) {
234 /* The DYNAMIC setting is translator only, and should never be
235 stored. Thus we always consider it clean. */
236 s->cc_op_dirty = false;
237 } else {
238 /* Discard any computed CC_OP value (see shifts). */
239 if (s->cc_op == CC_OP_DYNAMIC) {
240 tcg_gen_discard_i32(cpu_cc_op);
241 }
242 s->cc_op_dirty = true;
243 }
b666265b 244 s->cc_op = op;
e207582f
RH
245}
246
e207582f
RH
247static void gen_update_cc_op(DisasContext *s)
248{
249 if (s->cc_op_dirty) {
773cdfcc 250 tcg_gen_movi_i32(cpu_cc_op, s->cc_op);
e207582f
RH
251 s->cc_op_dirty = false;
252 }
3ca51d07
RH
253}
254
57fec1fe
FB
255static inline void gen_op_andl_A0_ffff(void)
256{
257 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffff);
258}
259
14ce26e7
FB
260#ifdef TARGET_X86_64
261
262#define NB_OP_SIZES 4
263
14ce26e7
FB
264#else /* !TARGET_X86_64 */
265
266#define NB_OP_SIZES 3
267
14ce26e7
FB
268#endif /* !TARGET_X86_64 */
269
e2542fe2 270#if defined(HOST_WORDS_BIGENDIAN)
57fec1fe
FB
271#define REG_B_OFFSET (sizeof(target_ulong) - 1)
272#define REG_H_OFFSET (sizeof(target_ulong) - 2)
273#define REG_W_OFFSET (sizeof(target_ulong) - 2)
274#define REG_L_OFFSET (sizeof(target_ulong) - 4)
275#define REG_LH_OFFSET (sizeof(target_ulong) - 8)
14ce26e7 276#else
57fec1fe
FB
277#define REG_B_OFFSET 0
278#define REG_H_OFFSET 1
279#define REG_W_OFFSET 0
280#define REG_L_OFFSET 0
281#define REG_LH_OFFSET 4
14ce26e7 282#endif
57fec1fe 283
96d7073f
PM
284/* In instruction encodings for byte register accesses the
285 * register number usually indicates "low 8 bits of register N";
286 * however there are some special cases where N 4..7 indicates
287 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
288 * true for this special case, false otherwise.
289 */
290static inline bool byte_reg_is_xH(int reg)
291{
292 if (reg < 4) {
293 return false;
294 }
295#ifdef TARGET_X86_64
296 if (reg >= 8 || x86_64_hregs) {
297 return false;
298 }
299#endif
300 return true;
301}
302
1e4840bf 303static inline void gen_op_mov_reg_v(int ot, int reg, TCGv t0)
57fec1fe
FB
304{
305 switch(ot) {
4ba9938c 306 case MO_8:
96d7073f 307 if (!byte_reg_is_xH(reg)) {
c832e3de 308 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 8);
57fec1fe 309 } else {
c832e3de 310 tcg_gen_deposit_tl(cpu_regs[reg - 4], cpu_regs[reg - 4], t0, 8, 8);
57fec1fe
FB
311 }
312 break;
4ba9938c 313 case MO_16:
c832e3de 314 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 16);
57fec1fe 315 break;
cc739bb0 316 default: /* XXX this shouldn't be reached; abort? */
4ba9938c 317 case MO_32:
cc739bb0
LD
318 /* For x86_64, this sets the higher half of register to zero.
319 For i386, this is equivalent to a mov. */
320 tcg_gen_ext32u_tl(cpu_regs[reg], t0);
57fec1fe 321 break;
cc739bb0 322#ifdef TARGET_X86_64
4ba9938c 323 case MO_64:
cc739bb0 324 tcg_gen_mov_tl(cpu_regs[reg], t0);
57fec1fe 325 break;
14ce26e7 326#endif
57fec1fe
FB
327 }
328}
2c0262af 329
57fec1fe
FB
330static inline void gen_op_mov_reg_T0(int ot, int reg)
331{
1e4840bf 332 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
57fec1fe
FB
333}
334
335static inline void gen_op_mov_reg_T1(int ot, int reg)
336{
1e4840bf 337 gen_op_mov_reg_v(ot, reg, cpu_T[1]);
57fec1fe
FB
338}
339
340static inline void gen_op_mov_reg_A0(int size, int reg)
341{
342 switch(size) {
4ba9938c 343 case MO_8:
c832e3de 344 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], cpu_A0, 0, 16);
57fec1fe 345 break;
cc739bb0 346 default: /* XXX this shouldn't be reached; abort? */
4ba9938c 347 case MO_16:
cc739bb0
LD
348 /* For x86_64, this sets the higher half of register to zero.
349 For i386, this is equivalent to a mov. */
350 tcg_gen_ext32u_tl(cpu_regs[reg], cpu_A0);
57fec1fe 351 break;
cc739bb0 352#ifdef TARGET_X86_64
4ba9938c 353 case MO_32:
cc739bb0 354 tcg_gen_mov_tl(cpu_regs[reg], cpu_A0);
57fec1fe 355 break;
14ce26e7 356#endif
57fec1fe
FB
357 }
358}
359
1e4840bf 360static inline void gen_op_mov_v_reg(int ot, TCGv t0, int reg)
57fec1fe 361{
4ba9938c 362 if (ot == MO_8 && byte_reg_is_xH(reg)) {
96d7073f
PM
363 tcg_gen_shri_tl(t0, cpu_regs[reg - 4], 8);
364 tcg_gen_ext8u_tl(t0, t0);
365 } else {
cc739bb0 366 tcg_gen_mov_tl(t0, cpu_regs[reg]);
57fec1fe
FB
367 }
368}
369
1e4840bf
FB
370static inline void gen_op_mov_TN_reg(int ot, int t_index, int reg)
371{
372 gen_op_mov_v_reg(ot, cpu_T[t_index], reg);
373}
374
57fec1fe
FB
375static inline void gen_op_movl_A0_reg(int reg)
376{
cc739bb0 377 tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]);
57fec1fe
FB
378}
379
380static inline void gen_op_addl_A0_im(int32_t val)
381{
382 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
14ce26e7 383#ifdef TARGET_X86_64
57fec1fe 384 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
14ce26e7 385#endif
57fec1fe 386}
2c0262af 387
14ce26e7 388#ifdef TARGET_X86_64
57fec1fe
FB
389static inline void gen_op_addq_A0_im(int64_t val)
390{
391 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
392}
14ce26e7 393#endif
57fec1fe
FB
394
395static void gen_add_A0_im(DisasContext *s, int val)
396{
397#ifdef TARGET_X86_64
398 if (CODE64(s))
399 gen_op_addq_A0_im(val);
400 else
401#endif
402 gen_op_addl_A0_im(val);
403}
2c0262af 404
57fec1fe 405static inline void gen_op_addl_T0_T1(void)
2c0262af 406{
57fec1fe
FB
407 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
408}
409
410static inline void gen_op_jmp_T0(void)
411{
317ac620 412 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, eip));
57fec1fe
FB
413}
414
6e0d8677 415static inline void gen_op_add_reg_im(int size, int reg, int32_t val)
57fec1fe 416{
6e0d8677 417 switch(size) {
4ba9938c 418 case MO_8:
cc739bb0 419 tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val);
c832e3de 420 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], cpu_tmp0, 0, 16);
6e0d8677 421 break;
4ba9938c 422 case MO_16:
cc739bb0
LD
423 tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val);
424 /* For x86_64, this sets the higher half of register to zero.
425 For i386, this is equivalent to a nop. */
426 tcg_gen_ext32u_tl(cpu_tmp0, cpu_tmp0);
427 tcg_gen_mov_tl(cpu_regs[reg], cpu_tmp0);
6e0d8677
FB
428 break;
429#ifdef TARGET_X86_64
4ba9938c 430 case MO_32:
cc739bb0 431 tcg_gen_addi_tl(cpu_regs[reg], cpu_regs[reg], val);
6e0d8677
FB
432 break;
433#endif
434 }
57fec1fe
FB
435}
436
6e0d8677 437static inline void gen_op_add_reg_T0(int size, int reg)
57fec1fe 438{
6e0d8677 439 switch(size) {
4ba9938c 440 case MO_8:
cc739bb0 441 tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T[0]);
c832e3de 442 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], cpu_tmp0, 0, 16);
6e0d8677 443 break;
4ba9938c 444 case MO_16:
cc739bb0
LD
445 tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T[0]);
446 /* For x86_64, this sets the higher half of register to zero.
447 For i386, this is equivalent to a nop. */
448 tcg_gen_ext32u_tl(cpu_tmp0, cpu_tmp0);
449 tcg_gen_mov_tl(cpu_regs[reg], cpu_tmp0);
6e0d8677 450 break;
14ce26e7 451#ifdef TARGET_X86_64
4ba9938c 452 case MO_32:
cc739bb0 453 tcg_gen_add_tl(cpu_regs[reg], cpu_regs[reg], cpu_T[0]);
6e0d8677 454 break;
14ce26e7 455#endif
6e0d8677
FB
456 }
457}
57fec1fe 458
57fec1fe
FB
459static inline void gen_op_addl_A0_reg_sN(int shift, int reg)
460{
cc739bb0
LD
461 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]);
462 if (shift != 0)
57fec1fe
FB
463 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
464 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
cc739bb0
LD
465 /* For x86_64, this sets the higher half of register to zero.
466 For i386, this is equivalent to a nop. */
467 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
57fec1fe 468}
2c0262af 469
57fec1fe
FB
470static inline void gen_op_movl_A0_seg(int reg)
471{
317ac620 472 tcg_gen_ld32u_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base) + REG_L_OFFSET);
57fec1fe 473}
2c0262af 474
7162ab21 475static inline void gen_op_addl_A0_seg(DisasContext *s, int reg)
57fec1fe 476{
317ac620 477 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base));
57fec1fe 478#ifdef TARGET_X86_64
7162ab21
VC
479 if (CODE64(s)) {
480 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
481 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
482 } else {
483 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
484 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
485 }
486#else
487 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
57fec1fe
FB
488#endif
489}
2c0262af 490
14ce26e7 491#ifdef TARGET_X86_64
57fec1fe
FB
492static inline void gen_op_movq_A0_seg(int reg)
493{
317ac620 494 tcg_gen_ld_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base));
57fec1fe 495}
14ce26e7 496
57fec1fe
FB
497static inline void gen_op_addq_A0_seg(int reg)
498{
317ac620 499 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base));
57fec1fe
FB
500 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
501}
502
503static inline void gen_op_movq_A0_reg(int reg)
504{
cc739bb0 505 tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]);
57fec1fe
FB
506}
507
508static inline void gen_op_addq_A0_reg_sN(int shift, int reg)
509{
cc739bb0
LD
510 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]);
511 if (shift != 0)
57fec1fe
FB
512 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
513 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
514}
14ce26e7
FB
515#endif
516
323d1876 517static inline void gen_op_ld_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
57fec1fe 518{
3c5f4116 519 tcg_gen_qemu_ld_tl(t0, a0, s->mem_index, idx | MO_LE);
57fec1fe 520}
2c0262af 521
323d1876 522static inline void gen_op_st_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
57fec1fe 523{
3523e4bd 524 tcg_gen_qemu_st_tl(t0, a0, s->mem_index, idx | MO_LE);
57fec1fe 525}
4f31916f 526
d4faa3e0
RH
527static inline void gen_op_st_rm_T0_A0(DisasContext *s, int idx, int d)
528{
529 if (d == OR_TMP0) {
fd8ca9f6 530 gen_op_st_v(s, idx, cpu_T[0], cpu_A0);
d4faa3e0
RH
531 } else {
532 gen_op_mov_reg_T0(idx, d);
533 }
534}
535
14ce26e7
FB
536static inline void gen_jmp_im(target_ulong pc)
537{
57fec1fe 538 tcg_gen_movi_tl(cpu_tmp0, pc);
317ac620 539 tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, eip));
14ce26e7
FB
540}
541
2c0262af
FB
542static inline void gen_string_movl_A0_ESI(DisasContext *s)
543{
544 int override;
545
546 override = s->override;
14ce26e7
FB
547#ifdef TARGET_X86_64
548 if (s->aflag == 2) {
549 if (override >= 0) {
57fec1fe
FB
550 gen_op_movq_A0_seg(override);
551 gen_op_addq_A0_reg_sN(0, R_ESI);
14ce26e7 552 } else {
57fec1fe 553 gen_op_movq_A0_reg(R_ESI);
14ce26e7
FB
554 }
555 } else
556#endif
2c0262af
FB
557 if (s->aflag) {
558 /* 32 bit address */
559 if (s->addseg && override < 0)
560 override = R_DS;
561 if (override >= 0) {
57fec1fe
FB
562 gen_op_movl_A0_seg(override);
563 gen_op_addl_A0_reg_sN(0, R_ESI);
2c0262af 564 } else {
57fec1fe 565 gen_op_movl_A0_reg(R_ESI);
2c0262af
FB
566 }
567 } else {
568 /* 16 address, always override */
569 if (override < 0)
570 override = R_DS;
57fec1fe 571 gen_op_movl_A0_reg(R_ESI);
2c0262af 572 gen_op_andl_A0_ffff();
7162ab21 573 gen_op_addl_A0_seg(s, override);
2c0262af
FB
574 }
575}
576
577static inline void gen_string_movl_A0_EDI(DisasContext *s)
578{
14ce26e7
FB
579#ifdef TARGET_X86_64
580 if (s->aflag == 2) {
57fec1fe 581 gen_op_movq_A0_reg(R_EDI);
14ce26e7
FB
582 } else
583#endif
2c0262af
FB
584 if (s->aflag) {
585 if (s->addseg) {
57fec1fe
FB
586 gen_op_movl_A0_seg(R_ES);
587 gen_op_addl_A0_reg_sN(0, R_EDI);
2c0262af 588 } else {
57fec1fe 589 gen_op_movl_A0_reg(R_EDI);
2c0262af
FB
590 }
591 } else {
57fec1fe 592 gen_op_movl_A0_reg(R_EDI);
2c0262af 593 gen_op_andl_A0_ffff();
7162ab21 594 gen_op_addl_A0_seg(s, R_ES);
2c0262af
FB
595 }
596}
597
6e0d8677
FB
598static inline void gen_op_movl_T0_Dshift(int ot)
599{
317ac620 600 tcg_gen_ld32s_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, df));
6e0d8677 601 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], ot);
2c0262af
FB
602};
603
d824df34 604static TCGv gen_ext_tl(TCGv dst, TCGv src, int size, bool sign)
6e0d8677 605{
d824df34 606 switch (size) {
4ba9938c 607 case MO_8:
d824df34
PB
608 if (sign) {
609 tcg_gen_ext8s_tl(dst, src);
610 } else {
611 tcg_gen_ext8u_tl(dst, src);
612 }
613 return dst;
4ba9938c 614 case MO_16:
d824df34
PB
615 if (sign) {
616 tcg_gen_ext16s_tl(dst, src);
617 } else {
618 tcg_gen_ext16u_tl(dst, src);
619 }
620 return dst;
621#ifdef TARGET_X86_64
4ba9938c 622 case MO_32:
d824df34
PB
623 if (sign) {
624 tcg_gen_ext32s_tl(dst, src);
625 } else {
626 tcg_gen_ext32u_tl(dst, src);
627 }
628 return dst;
629#endif
6e0d8677 630 default:
d824df34 631 return src;
6e0d8677
FB
632 }
633}
3b46e624 634
d824df34
PB
635static void gen_extu(int ot, TCGv reg)
636{
637 gen_ext_tl(reg, reg, ot, false);
638}
639
6e0d8677
FB
640static void gen_exts(int ot, TCGv reg)
641{
d824df34 642 gen_ext_tl(reg, reg, ot, true);
6e0d8677 643}
2c0262af 644
6e0d8677
FB
645static inline void gen_op_jnz_ecx(int size, int label1)
646{
cc739bb0 647 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
6e0d8677 648 gen_extu(size + 1, cpu_tmp0);
cb63669a 649 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, label1);
6e0d8677
FB
650}
651
652static inline void gen_op_jz_ecx(int size, int label1)
653{
cc739bb0 654 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
6e0d8677 655 gen_extu(size + 1, cpu_tmp0);
cb63669a 656 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
6e0d8677 657}
2c0262af 658
a7812ae4
PB
659static void gen_helper_in_func(int ot, TCGv v, TCGv_i32 n)
660{
661 switch (ot) {
4ba9938c 662 case MO_8:
93ab25d7
PB
663 gen_helper_inb(v, n);
664 break;
4ba9938c 665 case MO_16:
93ab25d7
PB
666 gen_helper_inw(v, n);
667 break;
4ba9938c 668 case MO_32:
93ab25d7
PB
669 gen_helper_inl(v, n);
670 break;
a7812ae4 671 }
a7812ae4 672}
2c0262af 673
a7812ae4
PB
674static void gen_helper_out_func(int ot, TCGv_i32 v, TCGv_i32 n)
675{
676 switch (ot) {
4ba9938c 677 case MO_8:
93ab25d7
PB
678 gen_helper_outb(v, n);
679 break;
4ba9938c 680 case MO_16:
93ab25d7
PB
681 gen_helper_outw(v, n);
682 break;
4ba9938c 683 case MO_32:
93ab25d7
PB
684 gen_helper_outl(v, n);
685 break;
a7812ae4 686 }
a7812ae4 687}
f115e911 688
b8b6a50b
FB
689static void gen_check_io(DisasContext *s, int ot, target_ulong cur_eip,
690 uint32_t svm_flags)
f115e911 691{
b8b6a50b
FB
692 int state_saved;
693 target_ulong next_eip;
694
695 state_saved = 0;
f115e911 696 if (s->pe && (s->cpl > s->iopl || s->vm86)) {
773cdfcc 697 gen_update_cc_op(s);
14ce26e7 698 gen_jmp_im(cur_eip);
b8b6a50b 699 state_saved = 1;
b6abf97d 700 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
a7812ae4 701 switch (ot) {
4ba9938c 702 case MO_8:
4a7443be
BS
703 gen_helper_check_iob(cpu_env, cpu_tmp2_i32);
704 break;
4ba9938c 705 case MO_16:
4a7443be
BS
706 gen_helper_check_iow(cpu_env, cpu_tmp2_i32);
707 break;
4ba9938c 708 case MO_32:
4a7443be
BS
709 gen_helper_check_iol(cpu_env, cpu_tmp2_i32);
710 break;
a7812ae4 711 }
b8b6a50b 712 }
872929aa 713 if(s->flags & HF_SVMI_MASK) {
b8b6a50b 714 if (!state_saved) {
773cdfcc 715 gen_update_cc_op(s);
b8b6a50b 716 gen_jmp_im(cur_eip);
b8b6a50b
FB
717 }
718 svm_flags |= (1 << (4 + ot));
719 next_eip = s->pc - s->cs_base;
b6abf97d 720 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
052e80d5
BS
721 gen_helper_svm_check_io(cpu_env, cpu_tmp2_i32,
722 tcg_const_i32(svm_flags),
a7812ae4 723 tcg_const_i32(next_eip - cur_eip));
f115e911
FB
724 }
725}
726
2c0262af
FB
727static inline void gen_movs(DisasContext *s, int ot)
728{
729 gen_string_movl_A0_ESI(s);
909be183 730 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
2c0262af 731 gen_string_movl_A0_EDI(s);
fd8ca9f6 732 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
6e0d8677
FB
733 gen_op_movl_T0_Dshift(ot);
734 gen_op_add_reg_T0(s->aflag, R_ESI);
735 gen_op_add_reg_T0(s->aflag, R_EDI);
2c0262af
FB
736}
737
b6abf97d
FB
738static void gen_op_update1_cc(void)
739{
b6abf97d
FB
740 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
741}
742
743static void gen_op_update2_cc(void)
744{
745 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
746 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
747}
748
988c3eb0
RH
749static void gen_op_update3_cc(TCGv reg)
750{
751 tcg_gen_mov_tl(cpu_cc_src2, reg);
752 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
753 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
754}
755
b6abf97d
FB
756static inline void gen_op_testl_T0_T1_cc(void)
757{
b6abf97d
FB
758 tcg_gen_and_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
759}
760
761static void gen_op_update_neg_cc(void)
762{
b6abf97d 763 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
a3251186
RH
764 tcg_gen_neg_tl(cpu_cc_src, cpu_T[0]);
765 tcg_gen_movi_tl(cpu_cc_srcT, 0);
b6abf97d
FB
766}
767
d229edce
RH
768/* compute all eflags to cc_src */
769static void gen_compute_eflags(DisasContext *s)
8e1c85e3 770{
988c3eb0 771 TCGv zero, dst, src1, src2;
db9f2597
RH
772 int live, dead;
773
d229edce
RH
774 if (s->cc_op == CC_OP_EFLAGS) {
775 return;
776 }
436ff2d2
RH
777 if (s->cc_op == CC_OP_CLR) {
778 tcg_gen_movi_tl(cpu_cc_src, CC_Z);
779 set_cc_op(s, CC_OP_EFLAGS);
780 return;
781 }
db9f2597
RH
782
783 TCGV_UNUSED(zero);
784 dst = cpu_cc_dst;
785 src1 = cpu_cc_src;
988c3eb0 786 src2 = cpu_cc_src2;
db9f2597
RH
787
788 /* Take care to not read values that are not live. */
789 live = cc_op_live[s->cc_op] & ~USES_CC_SRCT;
988c3eb0 790 dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2);
db9f2597
RH
791 if (dead) {
792 zero = tcg_const_tl(0);
793 if (dead & USES_CC_DST) {
794 dst = zero;
795 }
796 if (dead & USES_CC_SRC) {
797 src1 = zero;
798 }
988c3eb0
RH
799 if (dead & USES_CC_SRC2) {
800 src2 = zero;
801 }
db9f2597
RH
802 }
803
773cdfcc 804 gen_update_cc_op(s);
988c3eb0 805 gen_helper_cc_compute_all(cpu_cc_src, dst, src1, src2, cpu_cc_op);
d229edce 806 set_cc_op(s, CC_OP_EFLAGS);
db9f2597
RH
807
808 if (dead) {
809 tcg_temp_free(zero);
810 }
8e1c85e3
FB
811}
812
bec93d72
RH
813typedef struct CCPrepare {
814 TCGCond cond;
815 TCGv reg;
816 TCGv reg2;
817 target_ulong imm;
818 target_ulong mask;
819 bool use_reg2;
820 bool no_setcond;
821} CCPrepare;
822
06847f1f 823/* compute eflags.C to reg */
bec93d72 824static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
06847f1f
RH
825{
826 TCGv t0, t1;
bec93d72 827 int size, shift;
06847f1f
RH
828
829 switch (s->cc_op) {
830 case CC_OP_SUBB ... CC_OP_SUBQ:
a3251186 831 /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
06847f1f
RH
832 size = s->cc_op - CC_OP_SUBB;
833 t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
834 /* If no temporary was used, be careful not to alias t1 and t0. */
835 t0 = TCGV_EQUAL(t1, cpu_cc_src) ? cpu_tmp0 : reg;
a3251186 836 tcg_gen_mov_tl(t0, cpu_cc_srcT);
06847f1f
RH
837 gen_extu(size, t0);
838 goto add_sub;
839
840 case CC_OP_ADDB ... CC_OP_ADDQ:
841 /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
842 size = s->cc_op - CC_OP_ADDB;
843 t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
844 t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
845 add_sub:
bec93d72
RH
846 return (CCPrepare) { .cond = TCG_COND_LTU, .reg = t0,
847 .reg2 = t1, .mask = -1, .use_reg2 = true };
06847f1f 848
06847f1f 849 case CC_OP_LOGICB ... CC_OP_LOGICQ:
436ff2d2 850 case CC_OP_CLR:
bec93d72 851 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
06847f1f
RH
852
853 case CC_OP_INCB ... CC_OP_INCQ:
854 case CC_OP_DECB ... CC_OP_DECQ:
bec93d72
RH
855 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
856 .mask = -1, .no_setcond = true };
06847f1f
RH
857
858 case CC_OP_SHLB ... CC_OP_SHLQ:
859 /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
860 size = s->cc_op - CC_OP_SHLB;
bec93d72
RH
861 shift = (8 << size) - 1;
862 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
863 .mask = (target_ulong)1 << shift };
06847f1f
RH
864
865 case CC_OP_MULB ... CC_OP_MULQ:
bec93d72
RH
866 return (CCPrepare) { .cond = TCG_COND_NE,
867 .reg = cpu_cc_src, .mask = -1 };
06847f1f 868
bc4b43dc
RH
869 case CC_OP_BMILGB ... CC_OP_BMILGQ:
870 size = s->cc_op - CC_OP_BMILGB;
871 t0 = gen_ext_tl(reg, cpu_cc_src, size, false);
872 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
873
cd7f97ca
RH
874 case CC_OP_ADCX:
875 case CC_OP_ADCOX:
876 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_dst,
877 .mask = -1, .no_setcond = true };
878
06847f1f
RH
879 case CC_OP_EFLAGS:
880 case CC_OP_SARB ... CC_OP_SARQ:
881 /* CC_SRC & 1 */
bec93d72
RH
882 return (CCPrepare) { .cond = TCG_COND_NE,
883 .reg = cpu_cc_src, .mask = CC_C };
06847f1f
RH
884
885 default:
886 /* The need to compute only C from CC_OP_DYNAMIC is important
887 in efficiently implementing e.g. INC at the start of a TB. */
888 gen_update_cc_op(s);
988c3eb0
RH
889 gen_helper_cc_compute_c(reg, cpu_cc_dst, cpu_cc_src,
890 cpu_cc_src2, cpu_cc_op);
bec93d72
RH
891 return (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
892 .mask = -1, .no_setcond = true };
06847f1f
RH
893 }
894}
895
1608ecca 896/* compute eflags.P to reg */
bec93d72 897static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGv reg)
1608ecca 898{
d229edce 899 gen_compute_eflags(s);
bec93d72
RH
900 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
901 .mask = CC_P };
1608ecca
PB
902}
903
904/* compute eflags.S to reg */
bec93d72 905static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg)
1608ecca 906{
086c4077
RH
907 switch (s->cc_op) {
908 case CC_OP_DYNAMIC:
909 gen_compute_eflags(s);
910 /* FALLTHRU */
911 case CC_OP_EFLAGS:
cd7f97ca
RH
912 case CC_OP_ADCX:
913 case CC_OP_ADOX:
914 case CC_OP_ADCOX:
bec93d72
RH
915 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
916 .mask = CC_S };
436ff2d2
RH
917 case CC_OP_CLR:
918 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
086c4077
RH
919 default:
920 {
921 int size = (s->cc_op - CC_OP_ADDB) & 3;
922 TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, true);
bec93d72 923 return (CCPrepare) { .cond = TCG_COND_LT, .reg = t0, .mask = -1 };
086c4077 924 }
086c4077 925 }
1608ecca
PB
926}
927
928/* compute eflags.O to reg */
bec93d72 929static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg)
1608ecca 930{
cd7f97ca
RH
931 switch (s->cc_op) {
932 case CC_OP_ADOX:
933 case CC_OP_ADCOX:
934 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src2,
935 .mask = -1, .no_setcond = true };
436ff2d2
RH
936 case CC_OP_CLR:
937 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
cd7f97ca
RH
938 default:
939 gen_compute_eflags(s);
940 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
941 .mask = CC_O };
942 }
1608ecca
PB
943}
944
945/* compute eflags.Z to reg */
bec93d72 946static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
1608ecca 947{
086c4077
RH
948 switch (s->cc_op) {
949 case CC_OP_DYNAMIC:
950 gen_compute_eflags(s);
951 /* FALLTHRU */
952 case CC_OP_EFLAGS:
cd7f97ca
RH
953 case CC_OP_ADCX:
954 case CC_OP_ADOX:
955 case CC_OP_ADCOX:
bec93d72
RH
956 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
957 .mask = CC_Z };
436ff2d2
RH
958 case CC_OP_CLR:
959 return (CCPrepare) { .cond = TCG_COND_ALWAYS, .mask = -1 };
086c4077
RH
960 default:
961 {
962 int size = (s->cc_op - CC_OP_ADDB) & 3;
963 TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
bec93d72 964 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
086c4077 965 }
bec93d72
RH
966 }
967}
968
c365395e
PB
969/* perform a conditional store into register 'reg' according to jump opcode
970 value 'b'. In the fast case, T0 is guaranted not to be used. */
276e6b5f 971static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
8e1c85e3 972{
c365395e 973 int inv, jcc_op, size, cond;
276e6b5f 974 CCPrepare cc;
c365395e
PB
975 TCGv t0;
976
977 inv = b & 1;
8e1c85e3 978 jcc_op = (b >> 1) & 7;
c365395e
PB
979
980 switch (s->cc_op) {
69d1aa31
RH
981 case CC_OP_SUBB ... CC_OP_SUBQ:
982 /* We optimize relational operators for the cmp/jcc case. */
c365395e
PB
983 size = s->cc_op - CC_OP_SUBB;
984 switch (jcc_op) {
985 case JCC_BE:
a3251186 986 tcg_gen_mov_tl(cpu_tmp4, cpu_cc_srcT);
c365395e
PB
987 gen_extu(size, cpu_tmp4);
988 t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
276e6b5f
RH
989 cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = cpu_tmp4,
990 .reg2 = t0, .mask = -1, .use_reg2 = true };
c365395e 991 break;
8e1c85e3 992
c365395e 993 case JCC_L:
276e6b5f 994 cond = TCG_COND_LT;
c365395e
PB
995 goto fast_jcc_l;
996 case JCC_LE:
276e6b5f 997 cond = TCG_COND_LE;
c365395e 998 fast_jcc_l:
a3251186 999 tcg_gen_mov_tl(cpu_tmp4, cpu_cc_srcT);
c365395e
PB
1000 gen_exts(size, cpu_tmp4);
1001 t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, true);
276e6b5f
RH
1002 cc = (CCPrepare) { .cond = cond, .reg = cpu_tmp4,
1003 .reg2 = t0, .mask = -1, .use_reg2 = true };
c365395e 1004 break;
8e1c85e3 1005
c365395e 1006 default:
8e1c85e3 1007 goto slow_jcc;
c365395e 1008 }
8e1c85e3 1009 break;
c365395e 1010
8e1c85e3
FB
1011 default:
1012 slow_jcc:
69d1aa31
RH
1013 /* This actually generates good code for JC, JZ and JS. */
1014 switch (jcc_op) {
1015 case JCC_O:
1016 cc = gen_prepare_eflags_o(s, reg);
1017 break;
1018 case JCC_B:
1019 cc = gen_prepare_eflags_c(s, reg);
1020 break;
1021 case JCC_Z:
1022 cc = gen_prepare_eflags_z(s, reg);
1023 break;
1024 case JCC_BE:
1025 gen_compute_eflags(s);
1026 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1027 .mask = CC_Z | CC_C };
1028 break;
1029 case JCC_S:
1030 cc = gen_prepare_eflags_s(s, reg);
1031 break;
1032 case JCC_P:
1033 cc = gen_prepare_eflags_p(s, reg);
1034 break;
1035 case JCC_L:
1036 gen_compute_eflags(s);
1037 if (TCGV_EQUAL(reg, cpu_cc_src)) {
1038 reg = cpu_tmp0;
1039 }
1040 tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
1041 tcg_gen_xor_tl(reg, reg, cpu_cc_src);
1042 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1043 .mask = CC_S };
1044 break;
1045 default:
1046 case JCC_LE:
1047 gen_compute_eflags(s);
1048 if (TCGV_EQUAL(reg, cpu_cc_src)) {
1049 reg = cpu_tmp0;
1050 }
1051 tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
1052 tcg_gen_xor_tl(reg, reg, cpu_cc_src);
1053 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1054 .mask = CC_S | CC_Z };
1055 break;
1056 }
c365395e 1057 break;
8e1c85e3 1058 }
276e6b5f
RH
1059
1060 if (inv) {
1061 cc.cond = tcg_invert_cond(cc.cond);
1062 }
1063 return cc;
8e1c85e3
FB
1064}
1065
cc8b6f5b
PB
1066static void gen_setcc1(DisasContext *s, int b, TCGv reg)
1067{
1068 CCPrepare cc = gen_prepare_cc(s, b, reg);
1069
1070 if (cc.no_setcond) {
1071 if (cc.cond == TCG_COND_EQ) {
1072 tcg_gen_xori_tl(reg, cc.reg, 1);
1073 } else {
1074 tcg_gen_mov_tl(reg, cc.reg);
1075 }
1076 return;
1077 }
1078
1079 if (cc.cond == TCG_COND_NE && !cc.use_reg2 && cc.imm == 0 &&
1080 cc.mask != 0 && (cc.mask & (cc.mask - 1)) == 0) {
1081 tcg_gen_shri_tl(reg, cc.reg, ctztl(cc.mask));
1082 tcg_gen_andi_tl(reg, reg, 1);
1083 return;
1084 }
1085 if (cc.mask != -1) {
1086 tcg_gen_andi_tl(reg, cc.reg, cc.mask);
1087 cc.reg = reg;
1088 }
1089 if (cc.use_reg2) {
1090 tcg_gen_setcond_tl(cc.cond, reg, cc.reg, cc.reg2);
1091 } else {
1092 tcg_gen_setcondi_tl(cc.cond, reg, cc.reg, cc.imm);
1093 }
1094}
1095
1096static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg)
1097{
1098 gen_setcc1(s, JCC_B << 1, reg);
1099}
276e6b5f 1100
8e1c85e3
FB
1101/* generate a conditional jump to label 'l1' according to jump opcode
1102 value 'b'. In the fast case, T0 is guaranted not to be used. */
dc259201
RH
1103static inline void gen_jcc1_noeob(DisasContext *s, int b, int l1)
1104{
1105 CCPrepare cc = gen_prepare_cc(s, b, cpu_T[0]);
1106
1107 if (cc.mask != -1) {
1108 tcg_gen_andi_tl(cpu_T[0], cc.reg, cc.mask);
1109 cc.reg = cpu_T[0];
1110 }
1111 if (cc.use_reg2) {
1112 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1113 } else {
1114 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1115 }
1116}
1117
1118/* Generate a conditional jump to label 'l1' according to jump opcode
1119 value 'b'. In the fast case, T0 is guaranted not to be used.
1120 A translation block must end soon. */
b27fc131 1121static inline void gen_jcc1(DisasContext *s, int b, int l1)
8e1c85e3 1122{
943131ca 1123 CCPrepare cc = gen_prepare_cc(s, b, cpu_T[0]);
8e1c85e3 1124
dc259201 1125 gen_update_cc_op(s);
943131ca
PB
1126 if (cc.mask != -1) {
1127 tcg_gen_andi_tl(cpu_T[0], cc.reg, cc.mask);
1128 cc.reg = cpu_T[0];
1129 }
dc259201 1130 set_cc_op(s, CC_OP_DYNAMIC);
943131ca
PB
1131 if (cc.use_reg2) {
1132 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1133 } else {
1134 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
8e1c85e3
FB
1135 }
1136}
1137
14ce26e7
FB
1138/* XXX: does not work with gdbstub "ice" single step - not a
1139 serious problem */
1140static int gen_jz_ecx_string(DisasContext *s, target_ulong next_eip)
2c0262af 1141{
14ce26e7
FB
1142 int l1, l2;
1143
1144 l1 = gen_new_label();
1145 l2 = gen_new_label();
6e0d8677 1146 gen_op_jnz_ecx(s->aflag, l1);
14ce26e7
FB
1147 gen_set_label(l2);
1148 gen_jmp_tb(s, next_eip, 1);
1149 gen_set_label(l1);
1150 return l2;
2c0262af
FB
1151}
1152
1153static inline void gen_stos(DisasContext *s, int ot)
1154{
4ba9938c 1155 gen_op_mov_TN_reg(MO_32, 0, R_EAX);
2c0262af 1156 gen_string_movl_A0_EDI(s);
fd8ca9f6 1157 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
6e0d8677
FB
1158 gen_op_movl_T0_Dshift(ot);
1159 gen_op_add_reg_T0(s->aflag, R_EDI);
2c0262af
FB
1160}
1161
1162static inline void gen_lods(DisasContext *s, int ot)
1163{
1164 gen_string_movl_A0_ESI(s);
909be183 1165 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
57fec1fe 1166 gen_op_mov_reg_T0(ot, R_EAX);
6e0d8677
FB
1167 gen_op_movl_T0_Dshift(ot);
1168 gen_op_add_reg_T0(s->aflag, R_ESI);
2c0262af
FB
1169}
1170
1171static inline void gen_scas(DisasContext *s, int ot)
1172{
2c0262af 1173 gen_string_movl_A0_EDI(s);
0f712e10 1174 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
63633fe6 1175 gen_op(s, OP_CMPL, ot, R_EAX);
6e0d8677
FB
1176 gen_op_movl_T0_Dshift(ot);
1177 gen_op_add_reg_T0(s->aflag, R_EDI);
2c0262af
FB
1178}
1179
1180static inline void gen_cmps(DisasContext *s, int ot)
1181{
2c0262af 1182 gen_string_movl_A0_EDI(s);
0f712e10 1183 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
63633fe6
RH
1184 gen_string_movl_A0_ESI(s);
1185 gen_op(s, OP_CMPL, ot, OR_TMP0);
6e0d8677
FB
1186 gen_op_movl_T0_Dshift(ot);
1187 gen_op_add_reg_T0(s->aflag, R_ESI);
1188 gen_op_add_reg_T0(s->aflag, R_EDI);
2c0262af
FB
1189}
1190
1191static inline void gen_ins(DisasContext *s, int ot)
1192{
2e70f6ef
PB
1193 if (use_icount)
1194 gen_io_start();
2c0262af 1195 gen_string_movl_A0_EDI(s);
6e0d8677
FB
1196 /* Note: we must do this dummy write first to be restartable in
1197 case of page fault. */
97212c88 1198 tcg_gen_movi_tl(cpu_T[0], 0);
fd8ca9f6 1199 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
24b9c00f 1200 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_EDX]);
b6abf97d 1201 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
a7812ae4 1202 gen_helper_in_func(ot, cpu_T[0], cpu_tmp2_i32);
fd8ca9f6 1203 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
6e0d8677
FB
1204 gen_op_movl_T0_Dshift(ot);
1205 gen_op_add_reg_T0(s->aflag, R_EDI);
2e70f6ef
PB
1206 if (use_icount)
1207 gen_io_end();
2c0262af
FB
1208}
1209
1210static inline void gen_outs(DisasContext *s, int ot)
1211{
2e70f6ef
PB
1212 if (use_icount)
1213 gen_io_start();
2c0262af 1214 gen_string_movl_A0_ESI(s);
909be183 1215 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
b8b6a50b 1216
24b9c00f 1217 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_EDX]);
b6abf97d
FB
1218 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1219 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[0]);
a7812ae4 1220 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
b8b6a50b 1221
6e0d8677
FB
1222 gen_op_movl_T0_Dshift(ot);
1223 gen_op_add_reg_T0(s->aflag, R_ESI);
2e70f6ef
PB
1224 if (use_icount)
1225 gen_io_end();
2c0262af
FB
1226}
1227
1228/* same method as Valgrind : we generate jumps to current or next
1229 instruction */
1230#define GEN_REPZ(op) \
1231static inline void gen_repz_ ## op(DisasContext *s, int ot, \
14ce26e7 1232 target_ulong cur_eip, target_ulong next_eip) \
2c0262af 1233{ \
14ce26e7 1234 int l2;\
2c0262af 1235 gen_update_cc_op(s); \
14ce26e7 1236 l2 = gen_jz_ecx_string(s, next_eip); \
2c0262af 1237 gen_ ## op(s, ot); \
6e0d8677 1238 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
2c0262af
FB
1239 /* a loop would cause two single step exceptions if ECX = 1 \
1240 before rep string_insn */ \
1241 if (!s->jmp_opt) \
6e0d8677 1242 gen_op_jz_ecx(s->aflag, l2); \
2c0262af
FB
1243 gen_jmp(s, cur_eip); \
1244}
1245
1246#define GEN_REPZ2(op) \
1247static inline void gen_repz_ ## op(DisasContext *s, int ot, \
14ce26e7
FB
1248 target_ulong cur_eip, \
1249 target_ulong next_eip, \
2c0262af
FB
1250 int nz) \
1251{ \
14ce26e7 1252 int l2;\
2c0262af 1253 gen_update_cc_op(s); \
14ce26e7 1254 l2 = gen_jz_ecx_string(s, next_eip); \
2c0262af 1255 gen_ ## op(s, ot); \
6e0d8677 1256 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
773cdfcc 1257 gen_update_cc_op(s); \
b27fc131 1258 gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2); \
2c0262af 1259 if (!s->jmp_opt) \
6e0d8677 1260 gen_op_jz_ecx(s->aflag, l2); \
2c0262af
FB
1261 gen_jmp(s, cur_eip); \
1262}
1263
1264GEN_REPZ(movs)
1265GEN_REPZ(stos)
1266GEN_REPZ(lods)
1267GEN_REPZ(ins)
1268GEN_REPZ(outs)
1269GEN_REPZ2(scas)
1270GEN_REPZ2(cmps)
1271
a7812ae4
PB
1272static void gen_helper_fp_arith_ST0_FT0(int op)
1273{
1274 switch (op) {
d3eb5eae
BS
1275 case 0:
1276 gen_helper_fadd_ST0_FT0(cpu_env);
1277 break;
1278 case 1:
1279 gen_helper_fmul_ST0_FT0(cpu_env);
1280 break;
1281 case 2:
1282 gen_helper_fcom_ST0_FT0(cpu_env);
1283 break;
1284 case 3:
1285 gen_helper_fcom_ST0_FT0(cpu_env);
1286 break;
1287 case 4:
1288 gen_helper_fsub_ST0_FT0(cpu_env);
1289 break;
1290 case 5:
1291 gen_helper_fsubr_ST0_FT0(cpu_env);
1292 break;
1293 case 6:
1294 gen_helper_fdiv_ST0_FT0(cpu_env);
1295 break;
1296 case 7:
1297 gen_helper_fdivr_ST0_FT0(cpu_env);
1298 break;
a7812ae4
PB
1299 }
1300}
2c0262af
FB
1301
1302/* NOTE the exception in "r" op ordering */
a7812ae4
PB
1303static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
1304{
1305 TCGv_i32 tmp = tcg_const_i32(opreg);
1306 switch (op) {
d3eb5eae
BS
1307 case 0:
1308 gen_helper_fadd_STN_ST0(cpu_env, tmp);
1309 break;
1310 case 1:
1311 gen_helper_fmul_STN_ST0(cpu_env, tmp);
1312 break;
1313 case 4:
1314 gen_helper_fsubr_STN_ST0(cpu_env, tmp);
1315 break;
1316 case 5:
1317 gen_helper_fsub_STN_ST0(cpu_env, tmp);
1318 break;
1319 case 6:
1320 gen_helper_fdivr_STN_ST0(cpu_env, tmp);
1321 break;
1322 case 7:
1323 gen_helper_fdiv_STN_ST0(cpu_env, tmp);
1324 break;
a7812ae4
PB
1325 }
1326}
2c0262af
FB
1327
1328/* if d == OR_TMP0, it means memory operand (address in A0) */
1329static void gen_op(DisasContext *s1, int op, int ot, int d)
1330{
2c0262af 1331 if (d != OR_TMP0) {
57fec1fe 1332 gen_op_mov_TN_reg(ot, 0, d);
2c0262af 1333 } else {
909be183 1334 gen_op_ld_v(s1, ot, cpu_T[0], cpu_A0);
2c0262af
FB
1335 }
1336 switch(op) {
1337 case OP_ADCL:
cc8b6f5b 1338 gen_compute_eflags_c(s1, cpu_tmp4);
cad3a37d
FB
1339 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1340 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
d4faa3e0 1341 gen_op_st_rm_T0_A0(s1, ot, d);
988c3eb0
RH
1342 gen_op_update3_cc(cpu_tmp4);
1343 set_cc_op(s1, CC_OP_ADCB + ot);
cad3a37d 1344 break;
2c0262af 1345 case OP_SBBL:
cc8b6f5b 1346 gen_compute_eflags_c(s1, cpu_tmp4);
cad3a37d
FB
1347 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1348 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
d4faa3e0 1349 gen_op_st_rm_T0_A0(s1, ot, d);
988c3eb0
RH
1350 gen_op_update3_cc(cpu_tmp4);
1351 set_cc_op(s1, CC_OP_SBBB + ot);
cad3a37d 1352 break;
2c0262af
FB
1353 case OP_ADDL:
1354 gen_op_addl_T0_T1();
d4faa3e0 1355 gen_op_st_rm_T0_A0(s1, ot, d);
cad3a37d 1356 gen_op_update2_cc();
3ca51d07 1357 set_cc_op(s1, CC_OP_ADDB + ot);
2c0262af
FB
1358 break;
1359 case OP_SUBL:
a3251186 1360 tcg_gen_mov_tl(cpu_cc_srcT, cpu_T[0]);
57fec1fe 1361 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
d4faa3e0 1362 gen_op_st_rm_T0_A0(s1, ot, d);
cad3a37d 1363 gen_op_update2_cc();
3ca51d07 1364 set_cc_op(s1, CC_OP_SUBB + ot);
2c0262af
FB
1365 break;
1366 default:
1367 case OP_ANDL:
57fec1fe 1368 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
d4faa3e0 1369 gen_op_st_rm_T0_A0(s1, ot, d);
cad3a37d 1370 gen_op_update1_cc();
3ca51d07 1371 set_cc_op(s1, CC_OP_LOGICB + ot);
57fec1fe 1372 break;
2c0262af 1373 case OP_ORL:
57fec1fe 1374 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
d4faa3e0 1375 gen_op_st_rm_T0_A0(s1, ot, d);
cad3a37d 1376 gen_op_update1_cc();
3ca51d07 1377 set_cc_op(s1, CC_OP_LOGICB + ot);
57fec1fe 1378 break;
2c0262af 1379 case OP_XORL:
57fec1fe 1380 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
d4faa3e0 1381 gen_op_st_rm_T0_A0(s1, ot, d);
cad3a37d 1382 gen_op_update1_cc();
3ca51d07 1383 set_cc_op(s1, CC_OP_LOGICB + ot);
2c0262af
FB
1384 break;
1385 case OP_CMPL:
63633fe6 1386 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
a3251186 1387 tcg_gen_mov_tl(cpu_cc_srcT, cpu_T[0]);
63633fe6 1388 tcg_gen_sub_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
3ca51d07 1389 set_cc_op(s1, CC_OP_SUBB + ot);
2c0262af
FB
1390 break;
1391 }
b6abf97d
FB
1392}
1393
2c0262af
FB
1394/* if d == OR_TMP0, it means memory operand (address in A0) */
1395static void gen_inc(DisasContext *s1, int ot, int d, int c)
1396{
909be183 1397 if (d != OR_TMP0) {
57fec1fe 1398 gen_op_mov_TN_reg(ot, 0, d);
909be183
RH
1399 } else {
1400 gen_op_ld_v(s1, ot, cpu_T[0], cpu_A0);
1401 }
cc8b6f5b 1402 gen_compute_eflags_c(s1, cpu_cc_src);
2c0262af 1403 if (c > 0) {
b6abf97d 1404 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], 1);
3ca51d07 1405 set_cc_op(s1, CC_OP_INCB + ot);
2c0262af 1406 } else {
b6abf97d 1407 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], -1);
3ca51d07 1408 set_cc_op(s1, CC_OP_DECB + ot);
2c0262af 1409 }
d4faa3e0 1410 gen_op_st_rm_T0_A0(s1, ot, d);
cd31fefa 1411 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
2c0262af
FB
1412}
1413
f437d0a3
RH
1414static void gen_shift_flags(DisasContext *s, int ot, TCGv result, TCGv shm1,
1415 TCGv count, bool is_right)
1416{
1417 TCGv_i32 z32, s32, oldop;
1418 TCGv z_tl;
1419
1420 /* Store the results into the CC variables. If we know that the
1421 variable must be dead, store unconditionally. Otherwise we'll
1422 need to not disrupt the current contents. */
1423 z_tl = tcg_const_tl(0);
1424 if (cc_op_live[s->cc_op] & USES_CC_DST) {
1425 tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_dst, count, z_tl,
1426 result, cpu_cc_dst);
1427 } else {
1428 tcg_gen_mov_tl(cpu_cc_dst, result);
1429 }
1430 if (cc_op_live[s->cc_op] & USES_CC_SRC) {
1431 tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_src, count, z_tl,
1432 shm1, cpu_cc_src);
1433 } else {
1434 tcg_gen_mov_tl(cpu_cc_src, shm1);
1435 }
1436 tcg_temp_free(z_tl);
1437
1438 /* Get the two potential CC_OP values into temporaries. */
1439 tcg_gen_movi_i32(cpu_tmp2_i32, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
1440 if (s->cc_op == CC_OP_DYNAMIC) {
1441 oldop = cpu_cc_op;
1442 } else {
1443 tcg_gen_movi_i32(cpu_tmp3_i32, s->cc_op);
1444 oldop = cpu_tmp3_i32;
1445 }
1446
1447 /* Conditionally store the CC_OP value. */
1448 z32 = tcg_const_i32(0);
1449 s32 = tcg_temp_new_i32();
1450 tcg_gen_trunc_tl_i32(s32, count);
1451 tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, s32, z32, cpu_tmp2_i32, oldop);
1452 tcg_temp_free_i32(z32);
1453 tcg_temp_free_i32(s32);
1454
1455 /* The CC_OP value is no longer predictable. */
1456 set_cc_op(s, CC_OP_DYNAMIC);
1457}
1458
b6abf97d
FB
1459static void gen_shift_rm_T1(DisasContext *s, int ot, int op1,
1460 int is_right, int is_arith)
2c0262af 1461{
4ba9938c 1462 target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
3b46e624 1463
b6abf97d 1464 /* load */
82786041 1465 if (op1 == OR_TMP0) {
909be183 1466 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
82786041 1467 } else {
b6abf97d 1468 gen_op_mov_TN_reg(ot, 0, op1);
82786041 1469 }
b6abf97d 1470
a41f62f5
RH
1471 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], mask);
1472 tcg_gen_subi_tl(cpu_tmp0, cpu_T[1], 1);
b6abf97d
FB
1473
1474 if (is_right) {
1475 if (is_arith) {
f484d386 1476 gen_exts(ot, cpu_T[0]);
a41f62f5
RH
1477 tcg_gen_sar_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
1478 tcg_gen_sar_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
b6abf97d 1479 } else {
cad3a37d 1480 gen_extu(ot, cpu_T[0]);
a41f62f5
RH
1481 tcg_gen_shr_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
1482 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
b6abf97d
FB
1483 }
1484 } else {
a41f62f5
RH
1485 tcg_gen_shl_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
1486 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
b6abf97d
FB
1487 }
1488
1489 /* store */
d4faa3e0 1490 gen_op_st_rm_T0_A0(s, ot, op1);
82786041 1491
f437d0a3 1492 gen_shift_flags(s, ot, cpu_T[0], cpu_tmp0, cpu_T[1], is_right);
b6abf97d
FB
1493}
1494
c1c37968
FB
1495static void gen_shift_rm_im(DisasContext *s, int ot, int op1, int op2,
1496 int is_right, int is_arith)
1497{
4ba9938c 1498 int mask = (ot == MO_64 ? 0x3f : 0x1f);
c1c37968
FB
1499
1500 /* load */
1501 if (op1 == OR_TMP0)
909be183 1502 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
c1c37968
FB
1503 else
1504 gen_op_mov_TN_reg(ot, 0, op1);
1505
1506 op2 &= mask;
1507 if (op2 != 0) {
1508 if (is_right) {
1509 if (is_arith) {
1510 gen_exts(ot, cpu_T[0]);
2a449d14 1511 tcg_gen_sari_tl(cpu_tmp4, cpu_T[0], op2 - 1);
c1c37968
FB
1512 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], op2);
1513 } else {
1514 gen_extu(ot, cpu_T[0]);
2a449d14 1515 tcg_gen_shri_tl(cpu_tmp4, cpu_T[0], op2 - 1);
c1c37968
FB
1516 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], op2);
1517 }
1518 } else {
2a449d14 1519 tcg_gen_shli_tl(cpu_tmp4, cpu_T[0], op2 - 1);
c1c37968
FB
1520 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], op2);
1521 }
1522 }
1523
1524 /* store */
d4faa3e0
RH
1525 gen_op_st_rm_T0_A0(s, ot, op1);
1526
c1c37968
FB
1527 /* update eflags if non zero shift */
1528 if (op2 != 0) {
2a449d14 1529 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
c1c37968 1530 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
3ca51d07 1531 set_cc_op(s, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
c1c37968
FB
1532 }
1533}
1534
b6abf97d
FB
1535static inline void tcg_gen_lshift(TCGv ret, TCGv arg1, target_long arg2)
1536{
1537 if (arg2 >= 0)
1538 tcg_gen_shli_tl(ret, arg1, arg2);
1539 else
1540 tcg_gen_shri_tl(ret, arg1, -arg2);
1541}
1542
34d80a55 1543static void gen_rot_rm_T1(DisasContext *s, int ot, int op1, int is_right)
b6abf97d 1544{
4ba9938c 1545 target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
34d80a55 1546 TCGv_i32 t0, t1;
b6abf97d
FB
1547
1548 /* load */
1e4840bf 1549 if (op1 == OR_TMP0) {
909be183 1550 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1e4840bf 1551 } else {
34d80a55 1552 gen_op_mov_TN_reg(ot, 0, op1);
1e4840bf 1553 }
b6abf97d 1554
34d80a55 1555 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], mask);
b6abf97d 1556
34d80a55 1557 switch (ot) {
4ba9938c 1558 case MO_8:
34d80a55
RH
1559 /* Replicate the 8-bit input so that a 32-bit rotate works. */
1560 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
1561 tcg_gen_muli_tl(cpu_T[0], cpu_T[0], 0x01010101);
1562 goto do_long;
4ba9938c 1563 case MO_16:
34d80a55
RH
1564 /* Replicate the 16-bit input so that a 32-bit rotate works. */
1565 tcg_gen_deposit_tl(cpu_T[0], cpu_T[0], cpu_T[0], 16, 16);
1566 goto do_long;
1567 do_long:
1568#ifdef TARGET_X86_64
4ba9938c 1569 case MO_32:
34d80a55
RH
1570 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
1571 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
1572 if (is_right) {
1573 tcg_gen_rotr_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
1574 } else {
1575 tcg_gen_rotl_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
1576 }
1577 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
1578 break;
1579#endif
1580 default:
1581 if (is_right) {
1582 tcg_gen_rotr_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1583 } else {
1584 tcg_gen_rotl_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1585 }
1586 break;
b6abf97d 1587 }
b6abf97d 1588
b6abf97d 1589 /* store */
d4faa3e0 1590 gen_op_st_rm_T0_A0(s, ot, op1);
b6abf97d 1591
34d80a55
RH
1592 /* We'll need the flags computed into CC_SRC. */
1593 gen_compute_eflags(s);
b6abf97d 1594
34d80a55
RH
1595 /* The value that was "rotated out" is now present at the other end
1596 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1597 since we've computed the flags into CC_SRC, these variables are
1598 currently dead. */
b6abf97d 1599 if (is_right) {
34d80a55
RH
1600 tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask - 1);
1601 tcg_gen_shri_tl(cpu_cc_dst, cpu_T[0], mask);
089305ac 1602 tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
34d80a55
RH
1603 } else {
1604 tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask);
1605 tcg_gen_andi_tl(cpu_cc_dst, cpu_T[0], 1);
b6abf97d 1606 }
34d80a55
RH
1607 tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1608 tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1609
1610 /* Now conditionally store the new CC_OP value. If the shift count
1611 is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live.
1612 Otherwise reuse CC_OP_ADCOX which have the C and O flags split out
1613 exactly as we computed above. */
1614 t0 = tcg_const_i32(0);
1615 t1 = tcg_temp_new_i32();
1616 tcg_gen_trunc_tl_i32(t1, cpu_T[1]);
1617 tcg_gen_movi_i32(cpu_tmp2_i32, CC_OP_ADCOX);
1618 tcg_gen_movi_i32(cpu_tmp3_i32, CC_OP_EFLAGS);
1619 tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, t1, t0,
1620 cpu_tmp2_i32, cpu_tmp3_i32);
1621 tcg_temp_free_i32(t0);
1622 tcg_temp_free_i32(t1);
1623
1624 /* The CC_OP value is no longer predictable. */
1625 set_cc_op(s, CC_OP_DYNAMIC);
b6abf97d
FB
1626}
1627
8cd6345d 1628static void gen_rot_rm_im(DisasContext *s, int ot, int op1, int op2,
1629 int is_right)
1630{
4ba9938c 1631 int mask = (ot == MO_64 ? 0x3f : 0x1f);
34d80a55 1632 int shift;
8cd6345d 1633
1634 /* load */
1635 if (op1 == OR_TMP0) {
909be183 1636 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
8cd6345d 1637 } else {
34d80a55 1638 gen_op_mov_TN_reg(ot, 0, op1);
8cd6345d 1639 }
1640
8cd6345d 1641 op2 &= mask;
8cd6345d 1642 if (op2 != 0) {
34d80a55
RH
1643 switch (ot) {
1644#ifdef TARGET_X86_64
4ba9938c 1645 case MO_32:
34d80a55
RH
1646 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
1647 if (is_right) {
1648 tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, op2);
1649 } else {
1650 tcg_gen_rotli_i32(cpu_tmp2_i32, cpu_tmp2_i32, op2);
1651 }
1652 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
1653 break;
1654#endif
1655 default:
1656 if (is_right) {
1657 tcg_gen_rotri_tl(cpu_T[0], cpu_T[0], op2);
1658 } else {
1659 tcg_gen_rotli_tl(cpu_T[0], cpu_T[0], op2);
1660 }
1661 break;
4ba9938c 1662 case MO_8:
34d80a55
RH
1663 mask = 7;
1664 goto do_shifts;
4ba9938c 1665 case MO_16:
34d80a55
RH
1666 mask = 15;
1667 do_shifts:
1668 shift = op2 & mask;
1669 if (is_right) {
1670 shift = mask + 1 - shift;
1671 }
1672 gen_extu(ot, cpu_T[0]);
1673 tcg_gen_shli_tl(cpu_tmp0, cpu_T[0], shift);
1674 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], mask + 1 - shift);
1675 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
1676 break;
8cd6345d 1677 }
8cd6345d 1678 }
1679
1680 /* store */
d4faa3e0 1681 gen_op_st_rm_T0_A0(s, ot, op1);
8cd6345d 1682
1683 if (op2 != 0) {
34d80a55 1684 /* Compute the flags into CC_SRC. */
d229edce 1685 gen_compute_eflags(s);
0ff6addd 1686
34d80a55
RH
1687 /* The value that was "rotated out" is now present at the other end
1688 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1689 since we've computed the flags into CC_SRC, these variables are
1690 currently dead. */
8cd6345d 1691 if (is_right) {
34d80a55
RH
1692 tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask - 1);
1693 tcg_gen_shri_tl(cpu_cc_dst, cpu_T[0], mask);
38ebb396 1694 tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
34d80a55
RH
1695 } else {
1696 tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask);
1697 tcg_gen_andi_tl(cpu_cc_dst, cpu_T[0], 1);
8cd6345d 1698 }
34d80a55
RH
1699 tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1700 tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1701 set_cc_op(s, CC_OP_ADCOX);
8cd6345d 1702 }
8cd6345d 1703}
1704
b6abf97d
FB
1705/* XXX: add faster immediate = 1 case */
1706static void gen_rotc_rm_T1(DisasContext *s, int ot, int op1,
1707 int is_right)
1708{
d229edce 1709 gen_compute_eflags(s);
c7b3c873 1710 assert(s->cc_op == CC_OP_EFLAGS);
b6abf97d
FB
1711
1712 /* load */
1713 if (op1 == OR_TMP0)
909be183 1714 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
b6abf97d
FB
1715 else
1716 gen_op_mov_TN_reg(ot, 0, op1);
1717
a7812ae4
PB
1718 if (is_right) {
1719 switch (ot) {
4ba9938c 1720 case MO_8:
7923057b
BS
1721 gen_helper_rcrb(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1722 break;
4ba9938c 1723 case MO_16:
7923057b
BS
1724 gen_helper_rcrw(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1725 break;
4ba9938c 1726 case MO_32:
7923057b
BS
1727 gen_helper_rcrl(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1728 break;
a7812ae4 1729#ifdef TARGET_X86_64
4ba9938c 1730 case MO_64:
7923057b
BS
1731 gen_helper_rcrq(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1732 break;
a7812ae4
PB
1733#endif
1734 }
1735 } else {
1736 switch (ot) {
4ba9938c 1737 case MO_8:
7923057b
BS
1738 gen_helper_rclb(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1739 break;
4ba9938c 1740 case MO_16:
7923057b
BS
1741 gen_helper_rclw(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1742 break;
4ba9938c 1743 case MO_32:
7923057b
BS
1744 gen_helper_rcll(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1745 break;
a7812ae4 1746#ifdef TARGET_X86_64
4ba9938c 1747 case MO_64:
7923057b
BS
1748 gen_helper_rclq(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1749 break;
a7812ae4
PB
1750#endif
1751 }
1752 }
b6abf97d 1753 /* store */
d4faa3e0 1754 gen_op_st_rm_T0_A0(s, ot, op1);
b6abf97d
FB
1755}
1756
1757/* XXX: add faster immediate case */
3b9d3cf1 1758static void gen_shiftd_rm_T1(DisasContext *s, int ot, int op1,
f437d0a3 1759 bool is_right, TCGv count_in)
b6abf97d 1760{
4ba9938c 1761 target_ulong mask = (ot == MO_64 ? 63 : 31);
f437d0a3 1762 TCGv count;
b6abf97d
FB
1763
1764 /* load */
1e4840bf 1765 if (op1 == OR_TMP0) {
909be183 1766 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1e4840bf 1767 } else {
f437d0a3 1768 gen_op_mov_TN_reg(ot, 0, op1);
1e4840bf 1769 }
b6abf97d 1770
f437d0a3
RH
1771 count = tcg_temp_new();
1772 tcg_gen_andi_tl(count, count_in, mask);
1e4840bf 1773
f437d0a3 1774 switch (ot) {
4ba9938c 1775 case MO_16:
f437d0a3
RH
1776 /* Note: we implement the Intel behaviour for shift count > 16.
1777 This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A
1778 portion by constructing it as a 32-bit value. */
b6abf97d 1779 if (is_right) {
f437d0a3
RH
1780 tcg_gen_deposit_tl(cpu_tmp0, cpu_T[0], cpu_T[1], 16, 16);
1781 tcg_gen_mov_tl(cpu_T[1], cpu_T[0]);
1782 tcg_gen_mov_tl(cpu_T[0], cpu_tmp0);
b6abf97d 1783 } else {
f437d0a3 1784 tcg_gen_deposit_tl(cpu_T[1], cpu_T[0], cpu_T[1], 16, 16);
b6abf97d 1785 }
f437d0a3
RH
1786 /* FALLTHRU */
1787#ifdef TARGET_X86_64
4ba9938c 1788 case MO_32:
f437d0a3
RH
1789 /* Concatenate the two 32-bit values and use a 64-bit shift. */
1790 tcg_gen_subi_tl(cpu_tmp0, count, 1);
b6abf97d 1791 if (is_right) {
f437d0a3
RH
1792 tcg_gen_concat_tl_i64(cpu_T[0], cpu_T[0], cpu_T[1]);
1793 tcg_gen_shr_i64(cpu_tmp0, cpu_T[0], cpu_tmp0);
1794 tcg_gen_shr_i64(cpu_T[0], cpu_T[0], count);
1795 } else {
1796 tcg_gen_concat_tl_i64(cpu_T[0], cpu_T[1], cpu_T[0]);
1797 tcg_gen_shl_i64(cpu_tmp0, cpu_T[0], cpu_tmp0);
1798 tcg_gen_shl_i64(cpu_T[0], cpu_T[0], count);
1799 tcg_gen_shri_i64(cpu_tmp0, cpu_tmp0, 32);
1800 tcg_gen_shri_i64(cpu_T[0], cpu_T[0], 32);
1801 }
1802 break;
1803#endif
1804 default:
1805 tcg_gen_subi_tl(cpu_tmp0, count, 1);
1806 if (is_right) {
1807 tcg_gen_shr_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
b6abf97d 1808
f437d0a3
RH
1809 tcg_gen_subfi_tl(cpu_tmp4, mask + 1, count);
1810 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], count);
1811 tcg_gen_shl_tl(cpu_T[1], cpu_T[1], cpu_tmp4);
b6abf97d 1812 } else {
f437d0a3 1813 tcg_gen_shl_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
4ba9938c 1814 if (ot == MO_16) {
f437d0a3
RH
1815 /* Only needed if count > 16, for Intel behaviour. */
1816 tcg_gen_subfi_tl(cpu_tmp4, 33, count);
1817 tcg_gen_shr_tl(cpu_tmp4, cpu_T[1], cpu_tmp4);
1818 tcg_gen_or_tl(cpu_tmp0, cpu_tmp0, cpu_tmp4);
1819 }
1820
1821 tcg_gen_subfi_tl(cpu_tmp4, mask + 1, count);
1822 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], count);
1823 tcg_gen_shr_tl(cpu_T[1], cpu_T[1], cpu_tmp4);
b6abf97d 1824 }
f437d0a3
RH
1825 tcg_gen_movi_tl(cpu_tmp4, 0);
1826 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_T[1], count, cpu_tmp4,
1827 cpu_tmp4, cpu_T[1]);
1828 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1829 break;
b6abf97d 1830 }
b6abf97d 1831
b6abf97d 1832 /* store */
d4faa3e0 1833 gen_op_st_rm_T0_A0(s, ot, op1);
1e4840bf 1834
f437d0a3
RH
1835 gen_shift_flags(s, ot, cpu_T[0], cpu_tmp0, count, is_right);
1836 tcg_temp_free(count);
b6abf97d
FB
1837}
1838
1839static void gen_shift(DisasContext *s1, int op, int ot, int d, int s)
1840{
1841 if (s != OR_TMP1)
1842 gen_op_mov_TN_reg(ot, 1, s);
1843 switch(op) {
1844 case OP_ROL:
1845 gen_rot_rm_T1(s1, ot, d, 0);
1846 break;
1847 case OP_ROR:
1848 gen_rot_rm_T1(s1, ot, d, 1);
1849 break;
1850 case OP_SHL:
1851 case OP_SHL1:
1852 gen_shift_rm_T1(s1, ot, d, 0, 0);
1853 break;
1854 case OP_SHR:
1855 gen_shift_rm_T1(s1, ot, d, 1, 0);
1856 break;
1857 case OP_SAR:
1858 gen_shift_rm_T1(s1, ot, d, 1, 1);
1859 break;
1860 case OP_RCL:
1861 gen_rotc_rm_T1(s1, ot, d, 0);
1862 break;
1863 case OP_RCR:
1864 gen_rotc_rm_T1(s1, ot, d, 1);
1865 break;
1866 }
2c0262af
FB
1867}
1868
1869static void gen_shifti(DisasContext *s1, int op, int ot, int d, int c)
1870{
c1c37968 1871 switch(op) {
8cd6345d 1872 case OP_ROL:
1873 gen_rot_rm_im(s1, ot, d, c, 0);
1874 break;
1875 case OP_ROR:
1876 gen_rot_rm_im(s1, ot, d, c, 1);
1877 break;
c1c37968
FB
1878 case OP_SHL:
1879 case OP_SHL1:
1880 gen_shift_rm_im(s1, ot, d, c, 0, 0);
1881 break;
1882 case OP_SHR:
1883 gen_shift_rm_im(s1, ot, d, c, 1, 0);
1884 break;
1885 case OP_SAR:
1886 gen_shift_rm_im(s1, ot, d, c, 1, 1);
1887 break;
1888 default:
1889 /* currently not optimized */
0ae657b1 1890 tcg_gen_movi_tl(cpu_T[1], c);
c1c37968
FB
1891 gen_shift(s1, op, ot, d, OR_TMP1);
1892 break;
1893 }
2c0262af
FB
1894}
1895
4eeb3939 1896static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm)
2c0262af 1897{
14ce26e7 1898 target_long disp;
2c0262af 1899 int havesib;
14ce26e7 1900 int base;
2c0262af
FB
1901 int index;
1902 int scale;
2c0262af 1903 int mod, rm, code, override, must_add_seg;
7865eec4 1904 TCGv sum;
2c0262af
FB
1905
1906 override = s->override;
1907 must_add_seg = s->addseg;
1908 if (override >= 0)
1909 must_add_seg = 1;
1910 mod = (modrm >> 6) & 3;
1911 rm = modrm & 7;
1912
1913 if (s->aflag) {
2c0262af
FB
1914 havesib = 0;
1915 base = rm;
7865eec4 1916 index = -1;
2c0262af 1917 scale = 0;
3b46e624 1918
2c0262af
FB
1919 if (base == 4) {
1920 havesib = 1;
0af10c86 1921 code = cpu_ldub_code(env, s->pc++);
2c0262af 1922 scale = (code >> 6) & 3;
14ce26e7 1923 index = ((code >> 3) & 7) | REX_X(s);
7865eec4
RH
1924 if (index == 4) {
1925 index = -1; /* no index */
1926 }
14ce26e7 1927 base = (code & 7);
2c0262af 1928 }
14ce26e7 1929 base |= REX_B(s);
2c0262af
FB
1930
1931 switch (mod) {
1932 case 0:
14ce26e7 1933 if ((base & 7) == 5) {
2c0262af 1934 base = -1;
0af10c86 1935 disp = (int32_t)cpu_ldl_code(env, s->pc);
2c0262af 1936 s->pc += 4;
14ce26e7
FB
1937 if (CODE64(s) && !havesib) {
1938 disp += s->pc + s->rip_offset;
1939 }
2c0262af
FB
1940 } else {
1941 disp = 0;
1942 }
1943 break;
1944 case 1:
0af10c86 1945 disp = (int8_t)cpu_ldub_code(env, s->pc++);
2c0262af
FB
1946 break;
1947 default:
1948 case 2:
0af10c86 1949 disp = (int32_t)cpu_ldl_code(env, s->pc);
2c0262af
FB
1950 s->pc += 4;
1951 break;
1952 }
3b46e624 1953
7865eec4
RH
1954 /* For correct popl handling with esp. */
1955 if (base == R_ESP && s->popl_esp_hack) {
1956 disp += s->popl_esp_hack;
1957 }
1958
1959 /* Compute the address, with a minimum number of TCG ops. */
1960 TCGV_UNUSED(sum);
1961 if (index >= 0) {
1962 if (scale == 0) {
1963 sum = cpu_regs[index];
1964 } else {
1965 tcg_gen_shli_tl(cpu_A0, cpu_regs[index], scale);
1966 sum = cpu_A0;
14ce26e7 1967 }
7865eec4
RH
1968 if (base >= 0) {
1969 tcg_gen_add_tl(cpu_A0, sum, cpu_regs[base]);
1970 sum = cpu_A0;
14ce26e7 1971 }
7865eec4
RH
1972 } else if (base >= 0) {
1973 sum = cpu_regs[base];
2c0262af 1974 }
7865eec4
RH
1975 if (TCGV_IS_UNUSED(sum)) {
1976 tcg_gen_movi_tl(cpu_A0, disp);
1977 } else {
1978 tcg_gen_addi_tl(cpu_A0, sum, disp);
2c0262af 1979 }
7865eec4 1980
2c0262af
FB
1981 if (must_add_seg) {
1982 if (override < 0) {
7865eec4 1983 if (base == R_EBP || base == R_ESP) {
2c0262af 1984 override = R_SS;
7865eec4 1985 } else {
2c0262af 1986 override = R_DS;
7865eec4 1987 }
2c0262af 1988 }
7865eec4
RH
1989
1990 tcg_gen_ld_tl(cpu_tmp0, cpu_env,
1991 offsetof(CPUX86State, segs[override].base));
1992 if (CODE64(s)) {
1993 if (s->aflag != 2) {
1994 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
1995 }
1996 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
4eeb3939 1997 return;
14ce26e7 1998 }
7865eec4
RH
1999
2000 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
2001 }
2002
2003 if (s->aflag != 2) {
2004 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
2c0262af
FB
2005 }
2006 } else {
2007 switch (mod) {
2008 case 0:
2009 if (rm == 6) {
0af10c86 2010 disp = cpu_lduw_code(env, s->pc);
2c0262af 2011 s->pc += 2;
3250cff8 2012 tcg_gen_movi_tl(cpu_A0, disp);
2c0262af
FB
2013 rm = 0; /* avoid SS override */
2014 goto no_rm;
2015 } else {
2016 disp = 0;
2017 }
2018 break;
2019 case 1:
0af10c86 2020 disp = (int8_t)cpu_ldub_code(env, s->pc++);
2c0262af
FB
2021 break;
2022 default:
2023 case 2:
0af10c86 2024 disp = cpu_lduw_code(env, s->pc);
2c0262af
FB
2025 s->pc += 2;
2026 break;
2027 }
2028 switch(rm) {
2029 case 0:
57fec1fe
FB
2030 gen_op_movl_A0_reg(R_EBX);
2031 gen_op_addl_A0_reg_sN(0, R_ESI);
2c0262af
FB
2032 break;
2033 case 1:
57fec1fe
FB
2034 gen_op_movl_A0_reg(R_EBX);
2035 gen_op_addl_A0_reg_sN(0, R_EDI);
2c0262af
FB
2036 break;
2037 case 2:
57fec1fe
FB
2038 gen_op_movl_A0_reg(R_EBP);
2039 gen_op_addl_A0_reg_sN(0, R_ESI);
2c0262af
FB
2040 break;
2041 case 3:
57fec1fe
FB
2042 gen_op_movl_A0_reg(R_EBP);
2043 gen_op_addl_A0_reg_sN(0, R_EDI);
2c0262af
FB
2044 break;
2045 case 4:
57fec1fe 2046 gen_op_movl_A0_reg(R_ESI);
2c0262af
FB
2047 break;
2048 case 5:
57fec1fe 2049 gen_op_movl_A0_reg(R_EDI);
2c0262af
FB
2050 break;
2051 case 6:
57fec1fe 2052 gen_op_movl_A0_reg(R_EBP);
2c0262af
FB
2053 break;
2054 default:
2055 case 7:
57fec1fe 2056 gen_op_movl_A0_reg(R_EBX);
2c0262af
FB
2057 break;
2058 }
2059 if (disp != 0)
2060 gen_op_addl_A0_im(disp);
2061 gen_op_andl_A0_ffff();
2062 no_rm:
2063 if (must_add_seg) {
2064 if (override < 0) {
2065 if (rm == 2 || rm == 3 || rm == 6)
2066 override = R_SS;
2067 else
2068 override = R_DS;
2069 }
7162ab21 2070 gen_op_addl_A0_seg(s, override);
2c0262af
FB
2071 }
2072 }
2c0262af
FB
2073}
2074
0af10c86 2075static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm)
e17a36ce
FB
2076{
2077 int mod, rm, base, code;
2078
2079 mod = (modrm >> 6) & 3;
2080 if (mod == 3)
2081 return;
2082 rm = modrm & 7;
2083
2084 if (s->aflag) {
2085
2086 base = rm;
3b46e624 2087
e17a36ce 2088 if (base == 4) {
0af10c86 2089 code = cpu_ldub_code(env, s->pc++);
e17a36ce
FB
2090 base = (code & 7);
2091 }
3b46e624 2092
e17a36ce
FB
2093 switch (mod) {
2094 case 0:
2095 if (base == 5) {
2096 s->pc += 4;
2097 }
2098 break;
2099 case 1:
2100 s->pc++;
2101 break;
2102 default:
2103 case 2:
2104 s->pc += 4;
2105 break;
2106 }
2107 } else {
2108 switch (mod) {
2109 case 0:
2110 if (rm == 6) {
2111 s->pc += 2;
2112 }
2113 break;
2114 case 1:
2115 s->pc++;
2116 break;
2117 default:
2118 case 2:
2119 s->pc += 2;
2120 break;
2121 }
2122 }
2123}
2124
664e0f19
FB
2125/* used for LEA and MOV AX, mem */
2126static void gen_add_A0_ds_seg(DisasContext *s)
2127{
2128 int override, must_add_seg;
2129 must_add_seg = s->addseg;
2130 override = R_DS;
2131 if (s->override >= 0) {
2132 override = s->override;
2133 must_add_seg = 1;
664e0f19
FB
2134 }
2135 if (must_add_seg) {
8f091a59
FB
2136#ifdef TARGET_X86_64
2137 if (CODE64(s)) {
57fec1fe 2138 gen_op_addq_A0_seg(override);
5fafdf24 2139 } else
8f091a59
FB
2140#endif
2141 {
7162ab21 2142 gen_op_addl_A0_seg(s, override);
8f091a59 2143 }
664e0f19
FB
2144 }
2145}
2146
222a3336 2147/* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2c0262af 2148 OR_TMP0 */
0af10c86
BS
2149static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm,
2150 int ot, int reg, int is_store)
2c0262af 2151{
4eeb3939 2152 int mod, rm;
2c0262af
FB
2153
2154 mod = (modrm >> 6) & 3;
14ce26e7 2155 rm = (modrm & 7) | REX_B(s);
2c0262af
FB
2156 if (mod == 3) {
2157 if (is_store) {
2158 if (reg != OR_TMP0)
57fec1fe
FB
2159 gen_op_mov_TN_reg(ot, 0, reg);
2160 gen_op_mov_reg_T0(ot, rm);
2c0262af 2161 } else {
57fec1fe 2162 gen_op_mov_TN_reg(ot, 0, rm);
2c0262af 2163 if (reg != OR_TMP0)
57fec1fe 2164 gen_op_mov_reg_T0(ot, reg);
2c0262af
FB
2165 }
2166 } else {
4eeb3939 2167 gen_lea_modrm(env, s, modrm);
2c0262af
FB
2168 if (is_store) {
2169 if (reg != OR_TMP0)
57fec1fe 2170 gen_op_mov_TN_reg(ot, 0, reg);
fd8ca9f6 2171 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
2c0262af 2172 } else {
909be183 2173 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
2c0262af 2174 if (reg != OR_TMP0)
57fec1fe 2175 gen_op_mov_reg_T0(ot, reg);
2c0262af
FB
2176 }
2177 }
2178}
2179
0af10c86 2180static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, int ot)
2c0262af
FB
2181{
2182 uint32_t ret;
2183
2184 switch(ot) {
4ba9938c 2185 case MO_8:
0af10c86 2186 ret = cpu_ldub_code(env, s->pc);
2c0262af
FB
2187 s->pc++;
2188 break;
4ba9938c 2189 case MO_16:
0af10c86 2190 ret = cpu_lduw_code(env, s->pc);
2c0262af
FB
2191 s->pc += 2;
2192 break;
2193 default:
4ba9938c 2194 case MO_32:
0af10c86 2195 ret = cpu_ldl_code(env, s->pc);
2c0262af
FB
2196 s->pc += 4;
2197 break;
2198 }
2199 return ret;
2200}
2201
14ce26e7
FB
2202static inline int insn_const_size(unsigned int ot)
2203{
4ba9938c 2204 if (ot <= MO_32) {
14ce26e7 2205 return 1 << ot;
4ba9938c 2206 } else {
14ce26e7 2207 return 4;
4ba9938c 2208 }
14ce26e7
FB
2209}
2210
6e256c93
FB
2211static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong eip)
2212{
2213 TranslationBlock *tb;
2214 target_ulong pc;
2215
2216 pc = s->cs_base + eip;
2217 tb = s->tb;
2218 /* NOTE: we handle the case where the TB spans two pages here */
2219 if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) ||
2220 (pc & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK)) {
2221 /* jump to same page: we can use a direct jump */
57fec1fe 2222 tcg_gen_goto_tb(tb_num);
6e256c93 2223 gen_jmp_im(eip);
8cfd0495 2224 tcg_gen_exit_tb((uintptr_t)tb + tb_num);
6e256c93
FB
2225 } else {
2226 /* jump to another page: currently not optimized */
2227 gen_jmp_im(eip);
2228 gen_eob(s);
2229 }
2230}
2231
5fafdf24 2232static inline void gen_jcc(DisasContext *s, int b,
14ce26e7 2233 target_ulong val, target_ulong next_eip)
2c0262af 2234{
b27fc131 2235 int l1, l2;
3b46e624 2236
2c0262af 2237 if (s->jmp_opt) {
14ce26e7 2238 l1 = gen_new_label();
b27fc131 2239 gen_jcc1(s, b, l1);
dc259201 2240
6e256c93 2241 gen_goto_tb(s, 0, next_eip);
14ce26e7
FB
2242
2243 gen_set_label(l1);
6e256c93 2244 gen_goto_tb(s, 1, val);
5779406a 2245 s->is_jmp = DISAS_TB_JUMP;
2c0262af 2246 } else {
14ce26e7
FB
2247 l1 = gen_new_label();
2248 l2 = gen_new_label();
b27fc131 2249 gen_jcc1(s, b, l1);
8e1c85e3 2250
14ce26e7 2251 gen_jmp_im(next_eip);
8e1c85e3
FB
2252 tcg_gen_br(l2);
2253
14ce26e7
FB
2254 gen_set_label(l1);
2255 gen_jmp_im(val);
2256 gen_set_label(l2);
2c0262af
FB
2257 gen_eob(s);
2258 }
2259}
2260
f32d3781
PB
2261static void gen_cmovcc1(CPUX86State *env, DisasContext *s, int ot, int b,
2262 int modrm, int reg)
2263{
57eb0cc8 2264 CCPrepare cc;
f32d3781 2265
57eb0cc8 2266 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
f32d3781 2267
57eb0cc8
RH
2268 cc = gen_prepare_cc(s, b, cpu_T[1]);
2269 if (cc.mask != -1) {
2270 TCGv t0 = tcg_temp_new();
2271 tcg_gen_andi_tl(t0, cc.reg, cc.mask);
2272 cc.reg = t0;
2273 }
2274 if (!cc.use_reg2) {
2275 cc.reg2 = tcg_const_tl(cc.imm);
f32d3781
PB
2276 }
2277
57eb0cc8
RH
2278 tcg_gen_movcond_tl(cc.cond, cpu_T[0], cc.reg, cc.reg2,
2279 cpu_T[0], cpu_regs[reg]);
2280 gen_op_mov_reg_T0(ot, reg);
2281
2282 if (cc.mask != -1) {
2283 tcg_temp_free(cc.reg);
2284 }
2285 if (!cc.use_reg2) {
2286 tcg_temp_free(cc.reg2);
2287 }
f32d3781
PB
2288}
2289
3bd7da9e
FB
2290static inline void gen_op_movl_T0_seg(int seg_reg)
2291{
2292 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
2293 offsetof(CPUX86State,segs[seg_reg].selector));
2294}
2295
2296static inline void gen_op_movl_seg_T0_vm(int seg_reg)
2297{
2298 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
2299 tcg_gen_st32_tl(cpu_T[0], cpu_env,
2300 offsetof(CPUX86State,segs[seg_reg].selector));
2301 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], 4);
2302 tcg_gen_st_tl(cpu_T[0], cpu_env,
2303 offsetof(CPUX86State,segs[seg_reg].base));
2304}
2305
2c0262af
FB
2306/* move T0 to seg_reg and compute if the CPU state may change. Never
2307 call this function with seg_reg == R_CS */
14ce26e7 2308static void gen_movl_seg_T0(DisasContext *s, int seg_reg, target_ulong cur_eip)
2c0262af 2309{
3415a4dd
FB
2310 if (s->pe && !s->vm86) {
2311 /* XXX: optimize by finding processor state dynamically */
773cdfcc 2312 gen_update_cc_op(s);
14ce26e7 2313 gen_jmp_im(cur_eip);
b6abf97d 2314 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2999a0b2 2315 gen_helper_load_seg(cpu_env, tcg_const_i32(seg_reg), cpu_tmp2_i32);
dc196a57
FB
2316 /* abort translation because the addseg value may change or
2317 because ss32 may change. For R_SS, translation must always
2318 stop as a special handling must be done to disable hardware
2319 interrupts for the next instruction */
2320 if (seg_reg == R_SS || (s->code32 && seg_reg < R_FS))
5779406a 2321 s->is_jmp = DISAS_TB_JUMP;
3415a4dd 2322 } else {
3bd7da9e 2323 gen_op_movl_seg_T0_vm(seg_reg);
dc196a57 2324 if (seg_reg == R_SS)
5779406a 2325 s->is_jmp = DISAS_TB_JUMP;
3415a4dd 2326 }
2c0262af
FB
2327}
2328
0573fbfc
TS
2329static inline int svm_is_rep(int prefixes)
2330{
2331 return ((prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) ? 8 : 0);
2332}
2333
872929aa 2334static inline void
0573fbfc 2335gen_svm_check_intercept_param(DisasContext *s, target_ulong pc_start,
b8b6a50b 2336 uint32_t type, uint64_t param)
0573fbfc 2337{
872929aa
FB
2338 /* no SVM activated; fast case */
2339 if (likely(!(s->flags & HF_SVMI_MASK)))
2340 return;
773cdfcc 2341 gen_update_cc_op(s);
872929aa 2342 gen_jmp_im(pc_start - s->cs_base);
052e80d5 2343 gen_helper_svm_check_intercept_param(cpu_env, tcg_const_i32(type),
a7812ae4 2344 tcg_const_i64(param));
0573fbfc
TS
2345}
2346
872929aa 2347static inline void
0573fbfc
TS
2348gen_svm_check_intercept(DisasContext *s, target_ulong pc_start, uint64_t type)
2349{
872929aa 2350 gen_svm_check_intercept_param(s, pc_start, type, 0);
0573fbfc
TS
2351}
2352
4f31916f
FB
2353static inline void gen_stack_update(DisasContext *s, int addend)
2354{
14ce26e7
FB
2355#ifdef TARGET_X86_64
2356 if (CODE64(s)) {
6e0d8677 2357 gen_op_add_reg_im(2, R_ESP, addend);
14ce26e7
FB
2358 } else
2359#endif
4f31916f 2360 if (s->ss32) {
6e0d8677 2361 gen_op_add_reg_im(1, R_ESP, addend);
4f31916f 2362 } else {
6e0d8677 2363 gen_op_add_reg_im(0, R_ESP, addend);
4f31916f
FB
2364 }
2365}
2366
2c0262af
FB
2367/* generate a push. It depends on ss32, addseg and dflag */
2368static void gen_push_T0(DisasContext *s)
2369{
14ce26e7
FB
2370#ifdef TARGET_X86_64
2371 if (CODE64(s)) {
57fec1fe 2372 gen_op_movq_A0_reg(R_ESP);
8f091a59 2373 if (s->dflag) {
57fec1fe 2374 gen_op_addq_A0_im(-8);
fd8ca9f6 2375 gen_op_st_v(s, MO_64, cpu_T[0], cpu_A0);
8f091a59 2376 } else {
57fec1fe 2377 gen_op_addq_A0_im(-2);
fd8ca9f6 2378 gen_op_st_v(s, MO_16, cpu_T[0], cpu_A0);
8f091a59 2379 }
57fec1fe 2380 gen_op_mov_reg_A0(2, R_ESP);
5fafdf24 2381 } else
14ce26e7
FB
2382#endif
2383 {
57fec1fe 2384 gen_op_movl_A0_reg(R_ESP);
14ce26e7 2385 if (!s->dflag)
57fec1fe 2386 gen_op_addl_A0_im(-2);
14ce26e7 2387 else
57fec1fe 2388 gen_op_addl_A0_im(-4);
14ce26e7
FB
2389 if (s->ss32) {
2390 if (s->addseg) {
bbf662ee 2391 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
7162ab21 2392 gen_op_addl_A0_seg(s, R_SS);
14ce26e7
FB
2393 }
2394 } else {
2395 gen_op_andl_A0_ffff();
bbf662ee 2396 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
7162ab21 2397 gen_op_addl_A0_seg(s, R_SS);
2c0262af 2398 }
fd8ca9f6 2399 gen_op_st_v(s, s->dflag + 1, cpu_T[0], cpu_A0);
14ce26e7 2400 if (s->ss32 && !s->addseg)
57fec1fe 2401 gen_op_mov_reg_A0(1, R_ESP);
14ce26e7 2402 else
57fec1fe 2403 gen_op_mov_reg_T1(s->ss32 + 1, R_ESP);
2c0262af
FB
2404 }
2405}
2406
4f31916f
FB
2407/* generate a push. It depends on ss32, addseg and dflag */
2408/* slower version for T1, only used for call Ev */
2409static void gen_push_T1(DisasContext *s)
2c0262af 2410{
14ce26e7
FB
2411#ifdef TARGET_X86_64
2412 if (CODE64(s)) {
57fec1fe 2413 gen_op_movq_A0_reg(R_ESP);
8f091a59 2414 if (s->dflag) {
57fec1fe 2415 gen_op_addq_A0_im(-8);
b5afc104 2416 gen_op_st_v(s, MO_64, cpu_T[1], cpu_A0);
8f091a59 2417 } else {
57fec1fe 2418 gen_op_addq_A0_im(-2);
ee3138da 2419 gen_op_st_v(s, MO_16, cpu_T[1], cpu_A0);
8f091a59 2420 }
57fec1fe 2421 gen_op_mov_reg_A0(2, R_ESP);
5fafdf24 2422 } else
14ce26e7
FB
2423#endif
2424 {
57fec1fe 2425 gen_op_movl_A0_reg(R_ESP);
14ce26e7 2426 if (!s->dflag)
57fec1fe 2427 gen_op_addl_A0_im(-2);
14ce26e7 2428 else
57fec1fe 2429 gen_op_addl_A0_im(-4);
14ce26e7
FB
2430 if (s->ss32) {
2431 if (s->addseg) {
7162ab21 2432 gen_op_addl_A0_seg(s, R_SS);
14ce26e7
FB
2433 }
2434 } else {
2435 gen_op_andl_A0_ffff();
7162ab21 2436 gen_op_addl_A0_seg(s, R_SS);
2c0262af 2437 }
b5afc104 2438 gen_op_st_v(s, s->dflag + 1, cpu_T[1], cpu_A0);
3b46e624 2439
14ce26e7 2440 if (s->ss32 && !s->addseg)
57fec1fe 2441 gen_op_mov_reg_A0(1, R_ESP);
14ce26e7
FB
2442 else
2443 gen_stack_update(s, (-2) << s->dflag);
2c0262af
FB
2444 }
2445}
2446
4f31916f
FB
2447/* two step pop is necessary for precise exceptions */
2448static void gen_pop_T0(DisasContext *s)
2c0262af 2449{
14ce26e7
FB
2450#ifdef TARGET_X86_64
2451 if (CODE64(s)) {
57fec1fe 2452 gen_op_movq_A0_reg(R_ESP);
909be183 2453 gen_op_ld_v(s, s->dflag ? MO_64 : MO_16, cpu_T[0], cpu_A0);
5fafdf24 2454 } else
14ce26e7
FB
2455#endif
2456 {
57fec1fe 2457 gen_op_movl_A0_reg(R_ESP);
14ce26e7
FB
2458 if (s->ss32) {
2459 if (s->addseg)
7162ab21 2460 gen_op_addl_A0_seg(s, R_SS);
14ce26e7
FB
2461 } else {
2462 gen_op_andl_A0_ffff();
7162ab21 2463 gen_op_addl_A0_seg(s, R_SS);
14ce26e7 2464 }
909be183 2465 gen_op_ld_v(s, s->dflag + 1, cpu_T[0], cpu_A0);
2c0262af
FB
2466 }
2467}
2468
2469static void gen_pop_update(DisasContext *s)
2470{
14ce26e7 2471#ifdef TARGET_X86_64
8f091a59 2472 if (CODE64(s) && s->dflag) {
14ce26e7
FB
2473 gen_stack_update(s, 8);
2474 } else
2475#endif
2476 {
2477 gen_stack_update(s, 2 << s->dflag);
2478 }
2c0262af
FB
2479}
2480
2481static void gen_stack_A0(DisasContext *s)
2482{
57fec1fe 2483 gen_op_movl_A0_reg(R_ESP);
2c0262af
FB
2484 if (!s->ss32)
2485 gen_op_andl_A0_ffff();
bbf662ee 2486 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2c0262af 2487 if (s->addseg)
7162ab21 2488 gen_op_addl_A0_seg(s, R_SS);
2c0262af
FB
2489}
2490
2491/* NOTE: wrap around in 16 bit not fully handled */
2492static void gen_pusha(DisasContext *s)
2493{
2494 int i;
57fec1fe 2495 gen_op_movl_A0_reg(R_ESP);
2c0262af
FB
2496 gen_op_addl_A0_im(-16 << s->dflag);
2497 if (!s->ss32)
2498 gen_op_andl_A0_ffff();
bbf662ee 2499 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2c0262af 2500 if (s->addseg)
7162ab21 2501 gen_op_addl_A0_seg(s, R_SS);
2c0262af 2502 for(i = 0;i < 8; i++) {
4ba9938c 2503 gen_op_mov_TN_reg(MO_32, 0, 7 - i);
fd8ca9f6 2504 gen_op_st_v(s, MO_16 + s->dflag, cpu_T[0], cpu_A0);
2c0262af
FB
2505 gen_op_addl_A0_im(2 << s->dflag);
2506 }
4ba9938c 2507 gen_op_mov_reg_T1(MO_16 + s->ss32, R_ESP);
2c0262af
FB
2508}
2509
2510/* NOTE: wrap around in 16 bit not fully handled */
2511static void gen_popa(DisasContext *s)
2512{
2513 int i;
57fec1fe 2514 gen_op_movl_A0_reg(R_ESP);
2c0262af
FB
2515 if (!s->ss32)
2516 gen_op_andl_A0_ffff();
bbf662ee
FB
2517 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2518 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], 16 << s->dflag);
2c0262af 2519 if (s->addseg)
7162ab21 2520 gen_op_addl_A0_seg(s, R_SS);
2c0262af
FB
2521 for(i = 0;i < 8; i++) {
2522 /* ESP is not reloaded */
2523 if (i != 3) {
909be183 2524 gen_op_ld_v(s, MO_16 + s->dflag, cpu_T[0], cpu_A0);
4ba9938c 2525 gen_op_mov_reg_T0(MO_16 + s->dflag, 7 - i);
2c0262af
FB
2526 }
2527 gen_op_addl_A0_im(2 << s->dflag);
2528 }
4ba9938c 2529 gen_op_mov_reg_T1(MO_16 + s->ss32, R_ESP);
2c0262af
FB
2530}
2531
2c0262af
FB
2532static void gen_enter(DisasContext *s, int esp_addend, int level)
2533{
61a8c4ec 2534 int ot, opsize;
2c0262af 2535
2c0262af 2536 level &= 0x1f;
8f091a59
FB
2537#ifdef TARGET_X86_64
2538 if (CODE64(s)) {
4ba9938c 2539 ot = s->dflag ? MO_64 : MO_16;
8f091a59 2540 opsize = 1 << ot;
3b46e624 2541
57fec1fe 2542 gen_op_movl_A0_reg(R_ESP);
8f091a59 2543 gen_op_addq_A0_im(-opsize);
bbf662ee 2544 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
8f091a59
FB
2545
2546 /* push bp */
4ba9938c 2547 gen_op_mov_TN_reg(MO_32, 0, R_EBP);
fd8ca9f6 2548 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
8f091a59 2549 if (level) {
b5b38f61 2550 /* XXX: must save state */
2999a0b2 2551 gen_helper_enter64_level(cpu_env, tcg_const_i32(level),
4ba9938c 2552 tcg_const_i32((ot == MO_64)),
a7812ae4 2553 cpu_T[1]);
8f091a59 2554 }
57fec1fe 2555 gen_op_mov_reg_T1(ot, R_EBP);
bbf662ee 2556 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
4ba9938c 2557 gen_op_mov_reg_T1(MO_64, R_ESP);
5fafdf24 2558 } else
8f091a59
FB
2559#endif
2560 {
4ba9938c 2561 ot = s->dflag + MO_16;
8f091a59 2562 opsize = 2 << s->dflag;
3b46e624 2563
57fec1fe 2564 gen_op_movl_A0_reg(R_ESP);
8f091a59
FB
2565 gen_op_addl_A0_im(-opsize);
2566 if (!s->ss32)
2567 gen_op_andl_A0_ffff();
bbf662ee 2568 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
8f091a59 2569 if (s->addseg)
7162ab21 2570 gen_op_addl_A0_seg(s, R_SS);
8f091a59 2571 /* push bp */
4ba9938c 2572 gen_op_mov_TN_reg(MO_32, 0, R_EBP);
fd8ca9f6 2573 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
8f091a59 2574 if (level) {
b5b38f61 2575 /* XXX: must save state */
2999a0b2 2576 gen_helper_enter_level(cpu_env, tcg_const_i32(level),
a7812ae4
PB
2577 tcg_const_i32(s->dflag),
2578 cpu_T[1]);
8f091a59 2579 }
57fec1fe 2580 gen_op_mov_reg_T1(ot, R_EBP);
bbf662ee 2581 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
4ba9938c 2582 gen_op_mov_reg_T1(MO_16 + s->ss32, R_ESP);
2c0262af 2583 }
2c0262af
FB
2584}
2585
14ce26e7 2586static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip)
2c0262af 2587{
773cdfcc 2588 gen_update_cc_op(s);
14ce26e7 2589 gen_jmp_im(cur_eip);
77b2bc2c 2590 gen_helper_raise_exception(cpu_env, tcg_const_i32(trapno));
5779406a 2591 s->is_jmp = DISAS_TB_JUMP;
2c0262af
FB
2592}
2593
2594/* an interrupt is different from an exception because of the
7f75ffd3 2595 privilege checks */
5fafdf24 2596static void gen_interrupt(DisasContext *s, int intno,
14ce26e7 2597 target_ulong cur_eip, target_ulong next_eip)
2c0262af 2598{
773cdfcc 2599 gen_update_cc_op(s);
14ce26e7 2600 gen_jmp_im(cur_eip);
77b2bc2c 2601 gen_helper_raise_interrupt(cpu_env, tcg_const_i32(intno),
a7812ae4 2602 tcg_const_i32(next_eip - cur_eip));
5779406a 2603 s->is_jmp = DISAS_TB_JUMP;
2c0262af
FB
2604}
2605
14ce26e7 2606static void gen_debug(DisasContext *s, target_ulong cur_eip)
2c0262af 2607{
773cdfcc 2608 gen_update_cc_op(s);
14ce26e7 2609 gen_jmp_im(cur_eip);
4a7443be 2610 gen_helper_debug(cpu_env);
5779406a 2611 s->is_jmp = DISAS_TB_JUMP;
2c0262af
FB
2612}
2613
2614/* generate a generic end of block. Trace exception is also generated
2615 if needed */
2616static void gen_eob(DisasContext *s)
2617{
773cdfcc 2618 gen_update_cc_op(s);
a2cc3b24 2619 if (s->tb->flags & HF_INHIBIT_IRQ_MASK) {
f0967a1a 2620 gen_helper_reset_inhibit_irq(cpu_env);
a2cc3b24 2621 }
a2397807 2622 if (s->tb->flags & HF_RF_MASK) {
f0967a1a 2623 gen_helper_reset_rf(cpu_env);
a2397807 2624 }
34865134 2625 if (s->singlestep_enabled) {
4a7443be 2626 gen_helper_debug(cpu_env);
34865134 2627 } else if (s->tf) {
4a7443be 2628 gen_helper_single_step(cpu_env);
2c0262af 2629 } else {
57fec1fe 2630 tcg_gen_exit_tb(0);
2c0262af 2631 }
5779406a 2632 s->is_jmp = DISAS_TB_JUMP;
2c0262af
FB
2633}
2634
2635/* generate a jump to eip. No segment change must happen before as a
2636 direct call to the next block may occur */
14ce26e7 2637static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num)
2c0262af 2638{
a3251186
RH
2639 gen_update_cc_op(s);
2640 set_cc_op(s, CC_OP_DYNAMIC);
2c0262af 2641 if (s->jmp_opt) {
6e256c93 2642 gen_goto_tb(s, tb_num, eip);
5779406a 2643 s->is_jmp = DISAS_TB_JUMP;
2c0262af 2644 } else {
14ce26e7 2645 gen_jmp_im(eip);
2c0262af
FB
2646 gen_eob(s);
2647 }
2648}
2649
14ce26e7
FB
2650static void gen_jmp(DisasContext *s, target_ulong eip)
2651{
2652 gen_jmp_tb(s, eip, 0);
2653}
2654
323d1876 2655static inline void gen_ldq_env_A0(DisasContext *s, int offset)
8686c490 2656{
3c5f4116 2657 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
b6abf97d 2658 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset);
8686c490 2659}
664e0f19 2660
323d1876 2661static inline void gen_stq_env_A0(DisasContext *s, int offset)
8686c490 2662{
b6abf97d 2663 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset);
3523e4bd 2664 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
8686c490 2665}
664e0f19 2666
323d1876 2667static inline void gen_ldo_env_A0(DisasContext *s, int offset)
8686c490 2668{
5c42a7cd 2669 int mem_index = s->mem_index;
3c5f4116 2670 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, mem_index, MO_LEQ);
b6abf97d 2671 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
8686c490 2672 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
3c5f4116 2673 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_tmp0, mem_index, MO_LEQ);
b6abf97d 2674 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
8686c490 2675}
14ce26e7 2676
323d1876 2677static inline void gen_sto_env_A0(DisasContext *s, int offset)
8686c490 2678{
5c42a7cd 2679 int mem_index = s->mem_index;
b6abf97d 2680 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
3523e4bd 2681 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, mem_index, MO_LEQ);
8686c490 2682 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
b6abf97d 2683 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
3523e4bd 2684 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_tmp0, mem_index, MO_LEQ);
8686c490 2685}
14ce26e7 2686
5af45186
FB
2687static inline void gen_op_movo(int d_offset, int s_offset)
2688{
b6abf97d
FB
2689 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2690 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2691 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + 8);
2692 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + 8);
5af45186
FB
2693}
2694
2695static inline void gen_op_movq(int d_offset, int s_offset)
2696{
b6abf97d
FB
2697 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2698 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
5af45186
FB
2699}
2700
2701static inline void gen_op_movl(int d_offset, int s_offset)
2702{
b6abf97d
FB
2703 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env, s_offset);
2704 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, d_offset);
5af45186
FB
2705}
2706
2707static inline void gen_op_movq_env_0(int d_offset)
2708{
b6abf97d
FB
2709 tcg_gen_movi_i64(cpu_tmp1_i64, 0);
2710 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
5af45186 2711}
664e0f19 2712
d3eb5eae
BS
2713typedef void (*SSEFunc_i_ep)(TCGv_i32 val, TCGv_ptr env, TCGv_ptr reg);
2714typedef void (*SSEFunc_l_ep)(TCGv_i64 val, TCGv_ptr env, TCGv_ptr reg);
2715typedef void (*SSEFunc_0_epi)(TCGv_ptr env, TCGv_ptr reg, TCGv_i32 val);
2716typedef void (*SSEFunc_0_epl)(TCGv_ptr env, TCGv_ptr reg, TCGv_i64 val);
2717typedef void (*SSEFunc_0_epp)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b);
2718typedef void (*SSEFunc_0_eppi)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
2719 TCGv_i32 val);
c4baa050 2720typedef void (*SSEFunc_0_ppi)(TCGv_ptr reg_a, TCGv_ptr reg_b, TCGv_i32 val);
d3eb5eae
BS
2721typedef void (*SSEFunc_0_eppt)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
2722 TCGv val);
c4baa050 2723
5af45186
FB
2724#define SSE_SPECIAL ((void *)1)
2725#define SSE_DUMMY ((void *)2)
664e0f19 2726
a7812ae4
PB
2727#define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
2728#define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
2729 gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
5af45186 2730
d3eb5eae 2731static const SSEFunc_0_epp sse_op_table1[256][4] = {
a35f3ec7
AJ
2732 /* 3DNow! extensions */
2733 [0x0e] = { SSE_DUMMY }, /* femms */
2734 [0x0f] = { SSE_DUMMY }, /* pf... */
664e0f19
FB
2735 /* pure SSE operations */
2736 [0x10] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2737 [0x11] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
465e9838 2738 [0x12] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd, movsldup, movddup */
664e0f19 2739 [0x13] = { SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd */
a7812ae4
PB
2740 [0x14] = { gen_helper_punpckldq_xmm, gen_helper_punpcklqdq_xmm },
2741 [0x15] = { gen_helper_punpckhdq_xmm, gen_helper_punpckhqdq_xmm },
664e0f19
FB
2742 [0x16] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd, movshdup */
2743 [0x17] = { SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd */
2744
2745 [0x28] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2746 [0x29] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2747 [0x2a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
d9f4bb27 2748 [0x2b] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movntps, movntpd, movntss, movntsd */
664e0f19
FB
2749 [0x2c] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
2750 [0x2d] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
a7812ae4
PB
2751 [0x2e] = { gen_helper_ucomiss, gen_helper_ucomisd },
2752 [0x2f] = { gen_helper_comiss, gen_helper_comisd },
664e0f19
FB
2753 [0x50] = { SSE_SPECIAL, SSE_SPECIAL }, /* movmskps, movmskpd */
2754 [0x51] = SSE_FOP(sqrt),
a7812ae4
PB
2755 [0x52] = { gen_helper_rsqrtps, NULL, gen_helper_rsqrtss, NULL },
2756 [0x53] = { gen_helper_rcpps, NULL, gen_helper_rcpss, NULL },
2757 [0x54] = { gen_helper_pand_xmm, gen_helper_pand_xmm }, /* andps, andpd */
2758 [0x55] = { gen_helper_pandn_xmm, gen_helper_pandn_xmm }, /* andnps, andnpd */
2759 [0x56] = { gen_helper_por_xmm, gen_helper_por_xmm }, /* orps, orpd */
2760 [0x57] = { gen_helper_pxor_xmm, gen_helper_pxor_xmm }, /* xorps, xorpd */
664e0f19
FB
2761 [0x58] = SSE_FOP(add),
2762 [0x59] = SSE_FOP(mul),
a7812ae4
PB
2763 [0x5a] = { gen_helper_cvtps2pd, gen_helper_cvtpd2ps,
2764 gen_helper_cvtss2sd, gen_helper_cvtsd2ss },
2765 [0x5b] = { gen_helper_cvtdq2ps, gen_helper_cvtps2dq, gen_helper_cvttps2dq },
664e0f19
FB
2766 [0x5c] = SSE_FOP(sub),
2767 [0x5d] = SSE_FOP(min),
2768 [0x5e] = SSE_FOP(div),
2769 [0x5f] = SSE_FOP(max),
2770
2771 [0xc2] = SSE_FOP(cmpeq),
d3eb5eae
BS
2772 [0xc6] = { (SSEFunc_0_epp)gen_helper_shufps,
2773 (SSEFunc_0_epp)gen_helper_shufpd }, /* XXX: casts */
664e0f19 2774
7073fbad
RH
2775 /* SSSE3, SSE4, MOVBE, CRC32, BMI1, BMI2, ADX. */
2776 [0x38] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2777 [0x3a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
4242b1bd 2778
664e0f19
FB
2779 /* MMX ops and their SSE extensions */
2780 [0x60] = MMX_OP2(punpcklbw),
2781 [0x61] = MMX_OP2(punpcklwd),
2782 [0x62] = MMX_OP2(punpckldq),
2783 [0x63] = MMX_OP2(packsswb),
2784 [0x64] = MMX_OP2(pcmpgtb),
2785 [0x65] = MMX_OP2(pcmpgtw),
2786 [0x66] = MMX_OP2(pcmpgtl),
2787 [0x67] = MMX_OP2(packuswb),
2788 [0x68] = MMX_OP2(punpckhbw),
2789 [0x69] = MMX_OP2(punpckhwd),
2790 [0x6a] = MMX_OP2(punpckhdq),
2791 [0x6b] = MMX_OP2(packssdw),
a7812ae4
PB
2792 [0x6c] = { NULL, gen_helper_punpcklqdq_xmm },
2793 [0x6d] = { NULL, gen_helper_punpckhqdq_xmm },
664e0f19
FB
2794 [0x6e] = { SSE_SPECIAL, SSE_SPECIAL }, /* movd mm, ea */
2795 [0x6f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, , movqdu */
d3eb5eae
BS
2796 [0x70] = { (SSEFunc_0_epp)gen_helper_pshufw_mmx,
2797 (SSEFunc_0_epp)gen_helper_pshufd_xmm,
2798 (SSEFunc_0_epp)gen_helper_pshufhw_xmm,
2799 (SSEFunc_0_epp)gen_helper_pshuflw_xmm }, /* XXX: casts */
664e0f19
FB
2800 [0x71] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftw */
2801 [0x72] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftd */
2802 [0x73] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftq */
2803 [0x74] = MMX_OP2(pcmpeqb),
2804 [0x75] = MMX_OP2(pcmpeqw),
2805 [0x76] = MMX_OP2(pcmpeql),
a35f3ec7 2806 [0x77] = { SSE_DUMMY }, /* emms */
d9f4bb27
AP
2807 [0x78] = { NULL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* extrq_i, insertq_i */
2808 [0x79] = { NULL, gen_helper_extrq_r, NULL, gen_helper_insertq_r },
a7812ae4
PB
2809 [0x7c] = { NULL, gen_helper_haddpd, NULL, gen_helper_haddps },
2810 [0x7d] = { NULL, gen_helper_hsubpd, NULL, gen_helper_hsubps },
664e0f19
FB
2811 [0x7e] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movd, movd, , movq */
2812 [0x7f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, movdqu */
2813 [0xc4] = { SSE_SPECIAL, SSE_SPECIAL }, /* pinsrw */
2814 [0xc5] = { SSE_SPECIAL, SSE_SPECIAL }, /* pextrw */
a7812ae4 2815 [0xd0] = { NULL, gen_helper_addsubpd, NULL, gen_helper_addsubps },
664e0f19
FB
2816 [0xd1] = MMX_OP2(psrlw),
2817 [0xd2] = MMX_OP2(psrld),
2818 [0xd3] = MMX_OP2(psrlq),
2819 [0xd4] = MMX_OP2(paddq),
2820 [0xd5] = MMX_OP2(pmullw),
2821 [0xd6] = { NULL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2822 [0xd7] = { SSE_SPECIAL, SSE_SPECIAL }, /* pmovmskb */
2823 [0xd8] = MMX_OP2(psubusb),
2824 [0xd9] = MMX_OP2(psubusw),
2825 [0xda] = MMX_OP2(pminub),
2826 [0xdb] = MMX_OP2(pand),
2827 [0xdc] = MMX_OP2(paddusb),
2828 [0xdd] = MMX_OP2(paddusw),
2829 [0xde] = MMX_OP2(pmaxub),
2830 [0xdf] = MMX_OP2(pandn),
2831 [0xe0] = MMX_OP2(pavgb),
2832 [0xe1] = MMX_OP2(psraw),
2833 [0xe2] = MMX_OP2(psrad),
2834 [0xe3] = MMX_OP2(pavgw),
2835 [0xe4] = MMX_OP2(pmulhuw),
2836 [0xe5] = MMX_OP2(pmulhw),
a7812ae4 2837 [0xe6] = { NULL, gen_helper_cvttpd2dq, gen_helper_cvtdq2pd, gen_helper_cvtpd2dq },
664e0f19
FB
2838 [0xe7] = { SSE_SPECIAL , SSE_SPECIAL }, /* movntq, movntq */
2839 [0xe8] = MMX_OP2(psubsb),
2840 [0xe9] = MMX_OP2(psubsw),
2841 [0xea] = MMX_OP2(pminsw),
2842 [0xeb] = MMX_OP2(por),
2843 [0xec] = MMX_OP2(paddsb),
2844 [0xed] = MMX_OP2(paddsw),
2845 [0xee] = MMX_OP2(pmaxsw),
2846 [0xef] = MMX_OP2(pxor),
465e9838 2847 [0xf0] = { NULL, NULL, NULL, SSE_SPECIAL }, /* lddqu */
664e0f19
FB
2848 [0xf1] = MMX_OP2(psllw),
2849 [0xf2] = MMX_OP2(pslld),
2850 [0xf3] = MMX_OP2(psllq),
2851 [0xf4] = MMX_OP2(pmuludq),
2852 [0xf5] = MMX_OP2(pmaddwd),
2853 [0xf6] = MMX_OP2(psadbw),
d3eb5eae
BS
2854 [0xf7] = { (SSEFunc_0_epp)gen_helper_maskmov_mmx,
2855 (SSEFunc_0_epp)gen_helper_maskmov_xmm }, /* XXX: casts */
664e0f19
FB
2856 [0xf8] = MMX_OP2(psubb),
2857 [0xf9] = MMX_OP2(psubw),
2858 [0xfa] = MMX_OP2(psubl),
2859 [0xfb] = MMX_OP2(psubq),
2860 [0xfc] = MMX_OP2(paddb),
2861 [0xfd] = MMX_OP2(paddw),
2862 [0xfe] = MMX_OP2(paddl),
2863};
2864
d3eb5eae 2865static const SSEFunc_0_epp sse_op_table2[3 * 8][2] = {
664e0f19
FB
2866 [0 + 2] = MMX_OP2(psrlw),
2867 [0 + 4] = MMX_OP2(psraw),
2868 [0 + 6] = MMX_OP2(psllw),
2869 [8 + 2] = MMX_OP2(psrld),
2870 [8 + 4] = MMX_OP2(psrad),
2871 [8 + 6] = MMX_OP2(pslld),
2872 [16 + 2] = MMX_OP2(psrlq),
a7812ae4 2873 [16 + 3] = { NULL, gen_helper_psrldq_xmm },
664e0f19 2874 [16 + 6] = MMX_OP2(psllq),
a7812ae4 2875 [16 + 7] = { NULL, gen_helper_pslldq_xmm },
664e0f19
FB
2876};
2877
d3eb5eae 2878static const SSEFunc_0_epi sse_op_table3ai[] = {
a7812ae4 2879 gen_helper_cvtsi2ss,
11f8cdbc 2880 gen_helper_cvtsi2sd
c4baa050 2881};
a7812ae4 2882
11f8cdbc 2883#ifdef TARGET_X86_64
d3eb5eae 2884static const SSEFunc_0_epl sse_op_table3aq[] = {
11f8cdbc
SW
2885 gen_helper_cvtsq2ss,
2886 gen_helper_cvtsq2sd
2887};
2888#endif
2889
d3eb5eae 2890static const SSEFunc_i_ep sse_op_table3bi[] = {
a7812ae4 2891 gen_helper_cvttss2si,
a7812ae4 2892 gen_helper_cvtss2si,
bedc2ac1 2893 gen_helper_cvttsd2si,
11f8cdbc 2894 gen_helper_cvtsd2si
664e0f19 2895};
3b46e624 2896
11f8cdbc 2897#ifdef TARGET_X86_64
d3eb5eae 2898static const SSEFunc_l_ep sse_op_table3bq[] = {
11f8cdbc 2899 gen_helper_cvttss2sq,
11f8cdbc 2900 gen_helper_cvtss2sq,
bedc2ac1 2901 gen_helper_cvttsd2sq,
11f8cdbc
SW
2902 gen_helper_cvtsd2sq
2903};
2904#endif
2905
d3eb5eae 2906static const SSEFunc_0_epp sse_op_table4[8][4] = {
664e0f19
FB
2907 SSE_FOP(cmpeq),
2908 SSE_FOP(cmplt),
2909 SSE_FOP(cmple),
2910 SSE_FOP(cmpunord),
2911 SSE_FOP(cmpneq),
2912 SSE_FOP(cmpnlt),
2913 SSE_FOP(cmpnle),
2914 SSE_FOP(cmpord),
2915};
3b46e624 2916
d3eb5eae 2917static const SSEFunc_0_epp sse_op_table5[256] = {
a7812ae4
PB
2918 [0x0c] = gen_helper_pi2fw,
2919 [0x0d] = gen_helper_pi2fd,
2920 [0x1c] = gen_helper_pf2iw,
2921 [0x1d] = gen_helper_pf2id,
2922 [0x8a] = gen_helper_pfnacc,
2923 [0x8e] = gen_helper_pfpnacc,
2924 [0x90] = gen_helper_pfcmpge,
2925 [0x94] = gen_helper_pfmin,
2926 [0x96] = gen_helper_pfrcp,
2927 [0x97] = gen_helper_pfrsqrt,
2928 [0x9a] = gen_helper_pfsub,
2929 [0x9e] = gen_helper_pfadd,
2930 [0xa0] = gen_helper_pfcmpgt,
2931 [0xa4] = gen_helper_pfmax,
2932 [0xa6] = gen_helper_movq, /* pfrcpit1; no need to actually increase precision */
2933 [0xa7] = gen_helper_movq, /* pfrsqit1 */
2934 [0xaa] = gen_helper_pfsubr,
2935 [0xae] = gen_helper_pfacc,
2936 [0xb0] = gen_helper_pfcmpeq,
2937 [0xb4] = gen_helper_pfmul,
2938 [0xb6] = gen_helper_movq, /* pfrcpit2 */
2939 [0xb7] = gen_helper_pmulhrw_mmx,
2940 [0xbb] = gen_helper_pswapd,
2941 [0xbf] = gen_helper_pavgb_mmx /* pavgusb */
a35f3ec7
AJ
2942};
2943
d3eb5eae
BS
2944struct SSEOpHelper_epp {
2945 SSEFunc_0_epp op[2];
c4baa050
BS
2946 uint32_t ext_mask;
2947};
2948
d3eb5eae
BS
2949struct SSEOpHelper_eppi {
2950 SSEFunc_0_eppi op[2];
c4baa050 2951 uint32_t ext_mask;
222a3336 2952};
c4baa050 2953
222a3336 2954#define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
a7812ae4
PB
2955#define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
2956#define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
222a3336 2957#define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
e71827bc
AJ
2958#define PCLMULQDQ_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, \
2959 CPUID_EXT_PCLMULQDQ }
d640045a 2960#define AESNI_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_AES }
c4baa050 2961
d3eb5eae 2962static const struct SSEOpHelper_epp sse_op_table6[256] = {
222a3336
AZ
2963 [0x00] = SSSE3_OP(pshufb),
2964 [0x01] = SSSE3_OP(phaddw),
2965 [0x02] = SSSE3_OP(phaddd),
2966 [0x03] = SSSE3_OP(phaddsw),
2967 [0x04] = SSSE3_OP(pmaddubsw),
2968 [0x05] = SSSE3_OP(phsubw),
2969 [0x06] = SSSE3_OP(phsubd),
2970 [0x07] = SSSE3_OP(phsubsw),
2971 [0x08] = SSSE3_OP(psignb),
2972 [0x09] = SSSE3_OP(psignw),
2973 [0x0a] = SSSE3_OP(psignd),
2974 [0x0b] = SSSE3_OP(pmulhrsw),
2975 [0x10] = SSE41_OP(pblendvb),
2976 [0x14] = SSE41_OP(blendvps),
2977 [0x15] = SSE41_OP(blendvpd),
2978 [0x17] = SSE41_OP(ptest),
2979 [0x1c] = SSSE3_OP(pabsb),
2980 [0x1d] = SSSE3_OP(pabsw),
2981 [0x1e] = SSSE3_OP(pabsd),
2982 [0x20] = SSE41_OP(pmovsxbw),
2983 [0x21] = SSE41_OP(pmovsxbd),
2984 [0x22] = SSE41_OP(pmovsxbq),
2985 [0x23] = SSE41_OP(pmovsxwd),
2986 [0x24] = SSE41_OP(pmovsxwq),
2987 [0x25] = SSE41_OP(pmovsxdq),
2988 [0x28] = SSE41_OP(pmuldq),
2989 [0x29] = SSE41_OP(pcmpeqq),
2990 [0x2a] = SSE41_SPECIAL, /* movntqda */
2991 [0x2b] = SSE41_OP(packusdw),
2992 [0x30] = SSE41_OP(pmovzxbw),
2993 [0x31] = SSE41_OP(pmovzxbd),
2994 [0x32] = SSE41_OP(pmovzxbq),
2995 [0x33] = SSE41_OP(pmovzxwd),
2996 [0x34] = SSE41_OP(pmovzxwq),
2997 [0x35] = SSE41_OP(pmovzxdq),
2998 [0x37] = SSE42_OP(pcmpgtq),
2999 [0x38] = SSE41_OP(pminsb),
3000 [0x39] = SSE41_OP(pminsd),
3001 [0x3a] = SSE41_OP(pminuw),
3002 [0x3b] = SSE41_OP(pminud),
3003 [0x3c] = SSE41_OP(pmaxsb),
3004 [0x3d] = SSE41_OP(pmaxsd),
3005 [0x3e] = SSE41_OP(pmaxuw),
3006 [0x3f] = SSE41_OP(pmaxud),
3007 [0x40] = SSE41_OP(pmulld),
3008 [0x41] = SSE41_OP(phminposuw),
d640045a
AJ
3009 [0xdb] = AESNI_OP(aesimc),
3010 [0xdc] = AESNI_OP(aesenc),
3011 [0xdd] = AESNI_OP(aesenclast),
3012 [0xde] = AESNI_OP(aesdec),
3013 [0xdf] = AESNI_OP(aesdeclast),
4242b1bd
AZ
3014};
3015
d3eb5eae 3016static const struct SSEOpHelper_eppi sse_op_table7[256] = {
222a3336
AZ
3017 [0x08] = SSE41_OP(roundps),
3018 [0x09] = SSE41_OP(roundpd),
3019 [0x0a] = SSE41_OP(roundss),
3020 [0x0b] = SSE41_OP(roundsd),
3021 [0x0c] = SSE41_OP(blendps),
3022 [0x0d] = SSE41_OP(blendpd),
3023 [0x0e] = SSE41_OP(pblendw),
3024 [0x0f] = SSSE3_OP(palignr),
3025 [0x14] = SSE41_SPECIAL, /* pextrb */
3026 [0x15] = SSE41_SPECIAL, /* pextrw */
3027 [0x16] = SSE41_SPECIAL, /* pextrd/pextrq */
3028 [0x17] = SSE41_SPECIAL, /* extractps */
3029 [0x20] = SSE41_SPECIAL, /* pinsrb */
3030 [0x21] = SSE41_SPECIAL, /* insertps */
3031 [0x22] = SSE41_SPECIAL, /* pinsrd/pinsrq */
3032 [0x40] = SSE41_OP(dpps),
3033 [0x41] = SSE41_OP(dppd),
3034 [0x42] = SSE41_OP(mpsadbw),
e71827bc 3035 [0x44] = PCLMULQDQ_OP(pclmulqdq),
222a3336
AZ
3036 [0x60] = SSE42_OP(pcmpestrm),
3037 [0x61] = SSE42_OP(pcmpestri),
3038 [0x62] = SSE42_OP(pcmpistrm),
3039 [0x63] = SSE42_OP(pcmpistri),
d640045a 3040 [0xdf] = AESNI_OP(aeskeygenassist),
4242b1bd
AZ
3041};
3042
0af10c86
BS
3043static void gen_sse(CPUX86State *env, DisasContext *s, int b,
3044 target_ulong pc_start, int rex_r)
664e0f19
FB
3045{
3046 int b1, op1_offset, op2_offset, is_xmm, val, ot;
4eeb3939 3047 int modrm, mod, rm, reg;
d3eb5eae
BS
3048 SSEFunc_0_epp sse_fn_epp;
3049 SSEFunc_0_eppi sse_fn_eppi;
c4baa050 3050 SSEFunc_0_ppi sse_fn_ppi;
d3eb5eae 3051 SSEFunc_0_eppt sse_fn_eppt;
664e0f19
FB
3052
3053 b &= 0xff;
5fafdf24 3054 if (s->prefix & PREFIX_DATA)
664e0f19 3055 b1 = 1;
5fafdf24 3056 else if (s->prefix & PREFIX_REPZ)
664e0f19 3057 b1 = 2;
5fafdf24 3058 else if (s->prefix & PREFIX_REPNZ)
664e0f19
FB
3059 b1 = 3;
3060 else
3061 b1 = 0;
d3eb5eae
BS
3062 sse_fn_epp = sse_op_table1[b][b1];
3063 if (!sse_fn_epp) {
664e0f19 3064 goto illegal_op;
c4baa050 3065 }
a35f3ec7 3066 if ((b <= 0x5f && b >= 0x10) || b == 0xc6 || b == 0xc2) {
664e0f19
FB
3067 is_xmm = 1;
3068 } else {
3069 if (b1 == 0) {
3070 /* MMX case */
3071 is_xmm = 0;
3072 } else {
3073 is_xmm = 1;
3074 }
3075 }
3076 /* simple MMX/SSE operation */
3077 if (s->flags & HF_TS_MASK) {
3078 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
3079 return;
3080 }
3081 if (s->flags & HF_EM_MASK) {
3082 illegal_op:
3083 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
3084 return;
3085 }
3086 if (is_xmm && !(s->flags & HF_OSFXSR_MASK))
4242b1bd
AZ
3087 if ((b != 0x38 && b != 0x3a) || (s->prefix & PREFIX_DATA))
3088 goto illegal_op;
e771edab
AJ
3089 if (b == 0x0e) {
3090 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
3091 goto illegal_op;
3092 /* femms */
d3eb5eae 3093 gen_helper_emms(cpu_env);
e771edab
AJ
3094 return;
3095 }
3096 if (b == 0x77) {
3097 /* emms */
d3eb5eae 3098 gen_helper_emms(cpu_env);
664e0f19
FB
3099 return;
3100 }
3101 /* prepare MMX state (XXX: optimize by storing fptt and fptags in
3102 the static cpu state) */
3103 if (!is_xmm) {
d3eb5eae 3104 gen_helper_enter_mmx(cpu_env);
664e0f19
FB
3105 }
3106
0af10c86 3107 modrm = cpu_ldub_code(env, s->pc++);
664e0f19
FB
3108 reg = ((modrm >> 3) & 7);
3109 if (is_xmm)
3110 reg |= rex_r;
3111 mod = (modrm >> 6) & 3;
d3eb5eae 3112 if (sse_fn_epp == SSE_SPECIAL) {
664e0f19
FB
3113 b |= (b1 << 8);
3114 switch(b) {
3115 case 0x0e7: /* movntq */
5fafdf24 3116 if (mod == 3)
664e0f19 3117 goto illegal_op;
4eeb3939 3118 gen_lea_modrm(env, s, modrm);
323d1876 3119 gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
664e0f19
FB
3120 break;
3121 case 0x1e7: /* movntdq */
3122 case 0x02b: /* movntps */
3123 case 0x12b: /* movntps */
2e21e749
T
3124 if (mod == 3)
3125 goto illegal_op;
4eeb3939 3126 gen_lea_modrm(env, s, modrm);
323d1876 3127 gen_sto_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
2e21e749 3128 break;
465e9838
FB
3129 case 0x3f0: /* lddqu */
3130 if (mod == 3)
664e0f19 3131 goto illegal_op;
4eeb3939 3132 gen_lea_modrm(env, s, modrm);
323d1876 3133 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
664e0f19 3134 break;
d9f4bb27
AP
3135 case 0x22b: /* movntss */
3136 case 0x32b: /* movntsd */
3137 if (mod == 3)
3138 goto illegal_op;
4eeb3939 3139 gen_lea_modrm(env, s, modrm);
d9f4bb27 3140 if (b1 & 1) {
323d1876 3141 gen_stq_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
d9f4bb27
AP
3142 } else {
3143 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3144 xmm_regs[reg].XMM_L(0)));
fd8ca9f6 3145 gen_op_st_v(s, MO_32, cpu_T[0], cpu_A0);
d9f4bb27
AP
3146 }
3147 break;
664e0f19 3148 case 0x6e: /* movd mm, ea */
dabd98dd
FB
3149#ifdef TARGET_X86_64
3150 if (s->dflag == 2) {
4ba9938c 3151 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0);
5af45186 3152 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,fpregs[reg].mmx));
5fafdf24 3153 } else
dabd98dd
FB
3154#endif
3155 {
4ba9938c 3156 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0);
5af45186
FB
3157 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3158 offsetof(CPUX86State,fpregs[reg].mmx));
a7812ae4
PB
3159 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3160 gen_helper_movl_mm_T0_mmx(cpu_ptr0, cpu_tmp2_i32);
dabd98dd 3161 }
664e0f19
FB
3162 break;
3163 case 0x16e: /* movd xmm, ea */
dabd98dd
FB
3164#ifdef TARGET_X86_64
3165 if (s->dflag == 2) {
4ba9938c 3166 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0);
5af45186
FB
3167 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3168 offsetof(CPUX86State,xmm_regs[reg]));
a7812ae4 3169 gen_helper_movq_mm_T0_xmm(cpu_ptr0, cpu_T[0]);
5fafdf24 3170 } else
dabd98dd
FB
3171#endif
3172 {
4ba9938c 3173 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0);
5af45186
FB
3174 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3175 offsetof(CPUX86State,xmm_regs[reg]));
b6abf97d 3176 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
a7812ae4 3177 gen_helper_movl_mm_T0_xmm(cpu_ptr0, cpu_tmp2_i32);
dabd98dd 3178 }
664e0f19
FB
3179 break;
3180 case 0x6f: /* movq mm, ea */
3181 if (mod != 3) {
4eeb3939 3182 gen_lea_modrm(env, s, modrm);
323d1876 3183 gen_ldq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
664e0f19
FB
3184 } else {
3185 rm = (modrm & 7);
b6abf97d 3186 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
5af45186 3187 offsetof(CPUX86State,fpregs[rm].mmx));
b6abf97d 3188 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
5af45186 3189 offsetof(CPUX86State,fpregs[reg].mmx));
664e0f19
FB
3190 }
3191 break;
3192 case 0x010: /* movups */
3193 case 0x110: /* movupd */
3194 case 0x028: /* movaps */
3195 case 0x128: /* movapd */
3196 case 0x16f: /* movdqa xmm, ea */
3197 case 0x26f: /* movdqu xmm, ea */
3198 if (mod != 3) {
4eeb3939 3199 gen_lea_modrm(env, s, modrm);
323d1876 3200 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
664e0f19
FB
3201 } else {
3202 rm = (modrm & 7) | REX_B(s);
3203 gen_op_movo(offsetof(CPUX86State,xmm_regs[reg]),
3204 offsetof(CPUX86State,xmm_regs[rm]));
3205 }
3206 break;
3207 case 0x210: /* movss xmm, ea */
3208 if (mod != 3) {
4eeb3939 3209 gen_lea_modrm(env, s, modrm);
909be183 3210 gen_op_ld_v(s, MO_32, cpu_T[0], cpu_A0);
651ba608 3211 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
97212c88 3212 tcg_gen_movi_tl(cpu_T[0], 0);
651ba608
FB
3213 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
3214 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3215 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
664e0f19
FB
3216 } else {
3217 rm = (modrm & 7) | REX_B(s);
3218 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3219 offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
3220 }
3221 break;
3222 case 0x310: /* movsd xmm, ea */
3223 if (mod != 3) {
4eeb3939 3224 gen_lea_modrm(env, s, modrm);
323d1876
RH
3225 gen_ldq_env_A0(s, offsetof(CPUX86State,
3226 xmm_regs[reg].XMM_Q(0)));
97212c88 3227 tcg_gen_movi_tl(cpu_T[0], 0);
651ba608
FB
3228 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3229 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
664e0f19
FB
3230 } else {
3231 rm = (modrm & 7) | REX_B(s);
3232 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3233 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3234 }
3235 break;
3236 case 0x012: /* movlps */
3237 case 0x112: /* movlpd */
3238 if (mod != 3) {
4eeb3939 3239 gen_lea_modrm(env, s, modrm);
323d1876
RH
3240 gen_ldq_env_A0(s, offsetof(CPUX86State,
3241 xmm_regs[reg].XMM_Q(0)));
664e0f19
FB
3242 } else {
3243 /* movhlps */
3244 rm = (modrm & 7) | REX_B(s);
3245 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3246 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
3247 }
3248 break;
465e9838
FB
3249 case 0x212: /* movsldup */
3250 if (mod != 3) {
4eeb3939 3251 gen_lea_modrm(env, s, modrm);
323d1876 3252 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
465e9838
FB
3253 } else {
3254 rm = (modrm & 7) | REX_B(s);
3255 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3256 offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
3257 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
3258 offsetof(CPUX86State,xmm_regs[rm].XMM_L(2)));
3259 }
3260 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
3261 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3262 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
3263 offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3264 break;
3265 case 0x312: /* movddup */
3266 if (mod != 3) {
4eeb3939 3267 gen_lea_modrm(env, s, modrm);
323d1876
RH
3268 gen_ldq_env_A0(s, offsetof(CPUX86State,
3269 xmm_regs[reg].XMM_Q(0)));
465e9838
FB
3270 } else {
3271 rm = (modrm & 7) | REX_B(s);
3272 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3273 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3274 }
3275 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
ba6526df 3276 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
465e9838 3277 break;
664e0f19
FB
3278 case 0x016: /* movhps */
3279 case 0x116: /* movhpd */
3280 if (mod != 3) {
4eeb3939 3281 gen_lea_modrm(env, s, modrm);
323d1876
RH
3282 gen_ldq_env_A0(s, offsetof(CPUX86State,
3283 xmm_regs[reg].XMM_Q(1)));
664e0f19
FB
3284 } else {
3285 /* movlhps */
3286 rm = (modrm & 7) | REX_B(s);
3287 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
3288 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3289 }
3290 break;
3291 case 0x216: /* movshdup */
3292 if (mod != 3) {
4eeb3939 3293 gen_lea_modrm(env, s, modrm);
323d1876 3294 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
664e0f19
FB
3295 } else {
3296 rm = (modrm & 7) | REX_B(s);
3297 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
3298 offsetof(CPUX86State,xmm_regs[rm].XMM_L(1)));
3299 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
3300 offsetof(CPUX86State,xmm_regs[rm].XMM_L(3)));
3301 }
3302 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3303 offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
3304 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
3305 offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3306 break;
d9f4bb27
AP
3307 case 0x178:
3308 case 0x378:
3309 {
3310 int bit_index, field_length;
3311
3312 if (b1 == 1 && reg != 0)
3313 goto illegal_op;
0af10c86
BS
3314 field_length = cpu_ldub_code(env, s->pc++) & 0x3F;
3315 bit_index = cpu_ldub_code(env, s->pc++) & 0x3F;
d9f4bb27
AP
3316 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3317 offsetof(CPUX86State,xmm_regs[reg]));
3318 if (b1 == 1)
d3eb5eae
BS
3319 gen_helper_extrq_i(cpu_env, cpu_ptr0,
3320 tcg_const_i32(bit_index),
3321 tcg_const_i32(field_length));
d9f4bb27 3322 else
d3eb5eae
BS
3323 gen_helper_insertq_i(cpu_env, cpu_ptr0,
3324 tcg_const_i32(bit_index),
3325 tcg_const_i32(field_length));
d9f4bb27
AP
3326 }
3327 break;
664e0f19 3328 case 0x7e: /* movd ea, mm */
dabd98dd
FB
3329#ifdef TARGET_X86_64
3330 if (s->dflag == 2) {
5af45186
FB
3331 tcg_gen_ld_i64(cpu_T[0], cpu_env,
3332 offsetof(CPUX86State,fpregs[reg].mmx));
4ba9938c 3333 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1);
5fafdf24 3334 } else
dabd98dd
FB
3335#endif
3336 {
5af45186
FB
3337 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
3338 offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0)));
4ba9938c 3339 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1);
dabd98dd 3340 }
664e0f19
FB
3341 break;
3342 case 0x17e: /* movd ea, xmm */
dabd98dd
FB
3343#ifdef TARGET_X86_64
3344 if (s->dflag == 2) {
5af45186
FB
3345 tcg_gen_ld_i64(cpu_T[0], cpu_env,
3346 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
4ba9938c 3347 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1);
5fafdf24 3348 } else
dabd98dd
FB
3349#endif
3350 {
5af45186
FB
3351 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
3352 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
4ba9938c 3353 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1);
dabd98dd 3354 }
664e0f19
FB
3355 break;
3356 case 0x27e: /* movq xmm, ea */
3357 if (mod != 3) {
4eeb3939 3358 gen_lea_modrm(env, s, modrm);
323d1876
RH
3359 gen_ldq_env_A0(s, offsetof(CPUX86State,
3360 xmm_regs[reg].XMM_Q(0)));
664e0f19
FB
3361 } else {
3362 rm = (modrm & 7) | REX_B(s);
3363 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3364 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3365 }
3366 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3367 break;
3368 case 0x7f: /* movq ea, mm */
3369 if (mod != 3) {
4eeb3939 3370 gen_lea_modrm(env, s, modrm);
323d1876 3371 gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
664e0f19
FB
3372 } else {
3373 rm = (modrm & 7);
3374 gen_op_movq(offsetof(CPUX86State,fpregs[rm].mmx),
3375 offsetof(CPUX86State,fpregs[reg].mmx));
3376 }
3377 break;
3378 case 0x011: /* movups */
3379 case 0x111: /* movupd */
3380 case 0x029: /* movaps */
3381 case 0x129: /* movapd */
3382 case 0x17f: /* movdqa ea, xmm */
3383 case 0x27f: /* movdqu ea, xmm */
3384 if (mod != 3) {
4eeb3939 3385 gen_lea_modrm(env, s, modrm);
323d1876 3386 gen_sto_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
664e0f19
FB
3387 } else {
3388 rm = (modrm & 7) | REX_B(s);
3389 gen_op_movo(offsetof(CPUX86State,xmm_regs[rm]),
3390 offsetof(CPUX86State,xmm_regs[reg]));
3391 }
3392 break;
3393 case 0x211: /* movss ea, xmm */
3394 if (mod != 3) {
4eeb3939 3395 gen_lea_modrm(env, s, modrm);
651ba608 3396 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
fd8ca9f6 3397 gen_op_st_v(s, MO_32, cpu_T[0], cpu_A0);
664e0f19
FB
3398 } else {
3399 rm = (modrm & 7) | REX_B(s);
3400 gen_op_movl(offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)),
3401 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3402 }
3403 break;
3404 case 0x311: /* movsd ea, xmm */
3405 if (mod != 3) {
4eeb3939 3406 gen_lea_modrm(env, s, modrm);
323d1876
RH
3407 gen_stq_env_A0(s, offsetof(CPUX86State,
3408 xmm_regs[reg].XMM_Q(0)));
664e0f19
FB
3409 } else {
3410 rm = (modrm & 7) | REX_B(s);
3411 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
3412 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3413 }
3414 break;
3415 case 0x013: /* movlps */
3416 case 0x113: /* movlpd */
3417 if (mod != 3) {
4eeb3939 3418 gen_lea_modrm(env, s, modrm);
323d1876
RH
3419 gen_stq_env_A0(s, offsetof(CPUX86State,
3420 xmm_regs[reg].XMM_Q(0)));
664e0f19
FB
3421 } else {
3422 goto illegal_op;
3423 }
3424 break;
3425 case 0x017: /* movhps */
3426 case 0x117: /* movhpd */
3427 if (mod != 3) {
4eeb3939 3428 gen_lea_modrm(env, s, modrm);
323d1876
RH
3429 gen_stq_env_A0(s, offsetof(CPUX86State,
3430 xmm_regs[reg].XMM_Q(1)));
664e0f19
FB
3431 } else {
3432 goto illegal_op;
3433 }
3434 break;
3435 case 0x71: /* shift mm, im */
3436 case 0x72:
3437 case 0x73:
3438 case 0x171: /* shift xmm, im */
3439 case 0x172:
3440 case 0x173:
c045af25
AK
3441 if (b1 >= 2) {
3442 goto illegal_op;
3443 }
0af10c86 3444 val = cpu_ldub_code(env, s->pc++);
664e0f19 3445 if (is_xmm) {
1b90d56e 3446 tcg_gen_movi_tl(cpu_T[0], val);
651ba608 3447 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
97212c88 3448 tcg_gen_movi_tl(cpu_T[0], 0);
651ba608 3449 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(1)));
664e0f19
FB
3450 op1_offset = offsetof(CPUX86State,xmm_t0);
3451 } else {
1b90d56e 3452 tcg_gen_movi_tl(cpu_T[0], val);
651ba608 3453 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(0)));
97212c88 3454 tcg_gen_movi_tl(cpu_T[0], 0);
651ba608 3455 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(1)));
664e0f19
FB
3456 op1_offset = offsetof(CPUX86State,mmx_t0);
3457 }
d3eb5eae
BS
3458 sse_fn_epp = sse_op_table2[((b - 1) & 3) * 8 +
3459 (((modrm >> 3)) & 7)][b1];
3460 if (!sse_fn_epp) {
664e0f19 3461 goto illegal_op;
c4baa050 3462 }
664e0f19
FB
3463 if (is_xmm) {
3464 rm = (modrm & 7) | REX_B(s);
3465 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3466 } else {
3467 rm = (modrm & 7);
3468 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3469 }
5af45186
FB
3470 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3471 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op1_offset);
d3eb5eae 3472 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
3473 break;
3474 case 0x050: /* movmskps */
664e0f19 3475 rm = (modrm & 7) | REX_B(s);
5af45186
FB
3476 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3477 offsetof(CPUX86State,xmm_regs[rm]));
d3eb5eae 3478 gen_helper_movmskps(cpu_tmp2_i32, cpu_env, cpu_ptr0);
a7fbcbe5 3479 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
664e0f19
FB
3480 break;
3481 case 0x150: /* movmskpd */
664e0f19 3482 rm = (modrm & 7) | REX_B(s);
5af45186
FB
3483 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3484 offsetof(CPUX86State,xmm_regs[rm]));
d3eb5eae 3485 gen_helper_movmskpd(cpu_tmp2_i32, cpu_env, cpu_ptr0);
a7fbcbe5 3486 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
664e0f19
FB
3487 break;
3488 case 0x02a: /* cvtpi2ps */
3489 case 0x12a: /* cvtpi2pd */
d3eb5eae 3490 gen_helper_enter_mmx(cpu_env);
664e0f19 3491 if (mod != 3) {
4eeb3939 3492 gen_lea_modrm(env, s, modrm);
664e0f19 3493 op2_offset = offsetof(CPUX86State,mmx_t0);
323d1876 3494 gen_ldq_env_A0(s, op2_offset);
664e0f19
FB
3495 } else {
3496 rm = (modrm & 7);
3497 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3498 }
3499 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
5af45186
FB
3500 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3501 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
664e0f19
FB
3502 switch(b >> 8) {
3503 case 0x0:
d3eb5eae 3504 gen_helper_cvtpi2ps(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
3505 break;
3506 default:
3507 case 0x1:
d3eb5eae 3508 gen_helper_cvtpi2pd(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
3509 break;
3510 }
3511 break;
3512 case 0x22a: /* cvtsi2ss */
3513 case 0x32a: /* cvtsi2sd */
4ba9938c 3514 ot = (s->dflag == 2) ? MO_64 : MO_32;
0af10c86 3515 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
664e0f19 3516 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
5af45186 3517 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4ba9938c 3518 if (ot == MO_32) {
d3eb5eae 3519 SSEFunc_0_epi sse_fn_epi = sse_op_table3ai[(b >> 8) & 1];
28e10711 3520 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
d3eb5eae 3521 sse_fn_epi(cpu_env, cpu_ptr0, cpu_tmp2_i32);
28e10711 3522 } else {
11f8cdbc 3523#ifdef TARGET_X86_64
d3eb5eae
BS
3524 SSEFunc_0_epl sse_fn_epl = sse_op_table3aq[(b >> 8) & 1];
3525 sse_fn_epl(cpu_env, cpu_ptr0, cpu_T[0]);
11f8cdbc
SW
3526#else
3527 goto illegal_op;
3528#endif
28e10711 3529 }
664e0f19
FB
3530 break;
3531 case 0x02c: /* cvttps2pi */
3532 case 0x12c: /* cvttpd2pi */
3533 case 0x02d: /* cvtps2pi */
3534 case 0x12d: /* cvtpd2pi */
d3eb5eae 3535 gen_helper_enter_mmx(cpu_env);
664e0f19 3536 if (mod != 3) {
4eeb3939 3537 gen_lea_modrm(env, s, modrm);
664e0f19 3538 op2_offset = offsetof(CPUX86State,xmm_t0);
323d1876 3539 gen_ldo_env_A0(s, op2_offset);
664e0f19
FB
3540 } else {
3541 rm = (modrm & 7) | REX_B(s);
3542 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3543 }
3544 op1_offset = offsetof(CPUX86State,fpregs[reg & 7].mmx);
5af45186
FB
3545 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3546 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
664e0f19
FB
3547 switch(b) {
3548 case 0x02c:
d3eb5eae 3549 gen_helper_cvttps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
3550 break;
3551 case 0x12c:
d3eb5eae 3552 gen_helper_cvttpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
3553 break;
3554 case 0x02d:
d3eb5eae 3555 gen_helper_cvtps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
3556 break;
3557 case 0x12d:
d3eb5eae 3558 gen_helper_cvtpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
3559 break;
3560 }
3561 break;
3562 case 0x22c: /* cvttss2si */
3563 case 0x32c: /* cvttsd2si */
3564 case 0x22d: /* cvtss2si */
3565 case 0x32d: /* cvtsd2si */
4ba9938c 3566 ot = (s->dflag == 2) ? MO_64 : MO_32;
31313213 3567 if (mod != 3) {
4eeb3939 3568 gen_lea_modrm(env, s, modrm);
31313213 3569 if ((b >> 8) & 1) {
323d1876 3570 gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.XMM_Q(0)));
31313213 3571 } else {
909be183 3572 gen_op_ld_v(s, MO_32, cpu_T[0], cpu_A0);
651ba608 3573 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
31313213
FB
3574 }
3575 op2_offset = offsetof(CPUX86State,xmm_t0);
3576 } else {
3577 rm = (modrm & 7) | REX_B(s);
3578 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3579 }
5af45186 3580 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
4ba9938c 3581 if (ot == MO_32) {
d3eb5eae 3582 SSEFunc_i_ep sse_fn_i_ep =
bedc2ac1 3583 sse_op_table3bi[((b >> 7) & 2) | (b & 1)];
d3eb5eae 3584 sse_fn_i_ep(cpu_tmp2_i32, cpu_env, cpu_ptr0);
b6abf97d 3585 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5af45186 3586 } else {
11f8cdbc 3587#ifdef TARGET_X86_64
d3eb5eae 3588 SSEFunc_l_ep sse_fn_l_ep =
bedc2ac1 3589 sse_op_table3bq[((b >> 7) & 2) | (b & 1)];
d3eb5eae 3590 sse_fn_l_ep(cpu_T[0], cpu_env, cpu_ptr0);
11f8cdbc
SW
3591#else
3592 goto illegal_op;
3593#endif
5af45186 3594 }
57fec1fe 3595 gen_op_mov_reg_T0(ot, reg);
664e0f19
FB
3596 break;
3597 case 0xc4: /* pinsrw */
5fafdf24 3598 case 0x1c4:
d1e42c5c 3599 s->rip_offset = 1;
4ba9938c 3600 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
0af10c86 3601 val = cpu_ldub_code(env, s->pc++);
664e0f19
FB
3602 if (b1) {
3603 val &= 7;
5af45186
FB
3604 tcg_gen_st16_tl(cpu_T[0], cpu_env,
3605 offsetof(CPUX86State,xmm_regs[reg].XMM_W(val)));
664e0f19
FB
3606 } else {
3607 val &= 3;
5af45186
FB
3608 tcg_gen_st16_tl(cpu_T[0], cpu_env,
3609 offsetof(CPUX86State,fpregs[reg].mmx.MMX_W(val)));
664e0f19
FB
3610 }
3611 break;
3612 case 0xc5: /* pextrw */
5fafdf24 3613 case 0x1c5:
664e0f19
FB
3614 if (mod != 3)
3615 goto illegal_op;
4ba9938c 3616 ot = (s->dflag == 2) ? MO_64 : MO_32;
0af10c86 3617 val = cpu_ldub_code(env, s->pc++);
664e0f19
FB
3618 if (b1) {
3619 val &= 7;
3620 rm = (modrm & 7) | REX_B(s);
5af45186
FB
3621 tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3622 offsetof(CPUX86State,xmm_regs[rm].XMM_W(val)));
664e0f19
FB
3623 } else {
3624 val &= 3;
3625 rm = (modrm & 7);
5af45186
FB
3626 tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3627 offsetof(CPUX86State,fpregs[rm].mmx.MMX_W(val)));
664e0f19
FB
3628 }
3629 reg = ((modrm >> 3) & 7) | rex_r;
6dc2d0da 3630 gen_op_mov_reg_T0(ot, reg);
664e0f19
FB
3631 break;
3632 case 0x1d6: /* movq ea, xmm */
3633 if (mod != 3) {
4eeb3939 3634 gen_lea_modrm(env, s, modrm);
323d1876
RH
3635 gen_stq_env_A0(s, offsetof(CPUX86State,
3636 xmm_regs[reg].XMM_Q(0)));
664e0f19
FB
3637 } else {
3638 rm = (modrm & 7) | REX_B(s);
3639 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
3640 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3641 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
3642 }
3643 break;
3644 case 0x2d6: /* movq2dq */
d3eb5eae 3645 gen_helper_enter_mmx(cpu_env);
480c1cdb
FB
3646 rm = (modrm & 7);
3647 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3648 offsetof(CPUX86State,fpregs[rm].mmx));
3649 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
664e0f19
FB
3650 break;
3651 case 0x3d6: /* movdq2q */
d3eb5eae 3652 gen_helper_enter_mmx(cpu_env);
480c1cdb
FB
3653 rm = (modrm & 7) | REX_B(s);
3654 gen_op_movq(offsetof(CPUX86State,fpregs[reg & 7].mmx),
3655 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
664e0f19
FB
3656 break;
3657 case 0xd7: /* pmovmskb */
3658 case 0x1d7:
3659 if (mod != 3)
3660 goto illegal_op;
3661 if (b1) {
3662 rm = (modrm & 7) | REX_B(s);
5af45186 3663 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[rm]));
d3eb5eae 3664 gen_helper_pmovmskb_xmm(cpu_tmp2_i32, cpu_env, cpu_ptr0);
664e0f19
FB
3665 } else {
3666 rm = (modrm & 7);
5af45186 3667 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,fpregs[rm].mmx));
d3eb5eae 3668 gen_helper_pmovmskb_mmx(cpu_tmp2_i32, cpu_env, cpu_ptr0);
664e0f19
FB
3669 }
3670 reg = ((modrm >> 3) & 7) | rex_r;
a7fbcbe5 3671 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
664e0f19 3672 break;
111994ee 3673
4242b1bd 3674 case 0x138:
000cacf6 3675 case 0x038:
4242b1bd 3676 b = modrm;
111994ee
RH
3677 if ((b & 0xf0) == 0xf0) {
3678 goto do_0f_38_fx;
3679 }
0af10c86 3680 modrm = cpu_ldub_code(env, s->pc++);
4242b1bd
AZ
3681 rm = modrm & 7;
3682 reg = ((modrm >> 3) & 7) | rex_r;
3683 mod = (modrm >> 6) & 3;
c045af25
AK
3684 if (b1 >= 2) {
3685 goto illegal_op;
3686 }
4242b1bd 3687
d3eb5eae
BS
3688 sse_fn_epp = sse_op_table6[b].op[b1];
3689 if (!sse_fn_epp) {
4242b1bd 3690 goto illegal_op;
c4baa050 3691 }
222a3336
AZ
3692 if (!(s->cpuid_ext_features & sse_op_table6[b].ext_mask))
3693 goto illegal_op;
4242b1bd
AZ
3694
3695 if (b1) {
3696 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3697 if (mod == 3) {
3698 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
3699 } else {
3700 op2_offset = offsetof(CPUX86State,xmm_t0);
4eeb3939 3701 gen_lea_modrm(env, s, modrm);
222a3336
AZ
3702 switch (b) {
3703 case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
3704 case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
3705 case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
323d1876 3706 gen_ldq_env_A0(s, op2_offset +
222a3336
AZ
3707 offsetof(XMMReg, XMM_Q(0)));
3708 break;
3709 case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
3710 case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
3c5f4116
RH
3711 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
3712 s->mem_index, MO_LEUL);
222a3336
AZ
3713 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, op2_offset +
3714 offsetof(XMMReg, XMM_L(0)));
3715 break;
3716 case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
3c5f4116
RH
3717 tcg_gen_qemu_ld_tl(cpu_tmp0, cpu_A0,
3718 s->mem_index, MO_LEUW);
222a3336
AZ
3719 tcg_gen_st16_tl(cpu_tmp0, cpu_env, op2_offset +
3720 offsetof(XMMReg, XMM_W(0)));
3721 break;
3722 case 0x2a: /* movntqda */
323d1876 3723 gen_ldo_env_A0(s, op1_offset);
222a3336
AZ
3724 return;
3725 default:
323d1876 3726 gen_ldo_env_A0(s, op2_offset);
222a3336 3727 }
4242b1bd
AZ
3728 }
3729 } else {
3730 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
3731 if (mod == 3) {
3732 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3733 } else {
3734 op2_offset = offsetof(CPUX86State,mmx_t0);
4eeb3939 3735 gen_lea_modrm(env, s, modrm);
323d1876 3736 gen_ldq_env_A0(s, op2_offset);
4242b1bd
AZ
3737 }
3738 }
d3eb5eae 3739 if (sse_fn_epp == SSE_SPECIAL) {
222a3336 3740 goto illegal_op;
c4baa050 3741 }
222a3336 3742
4242b1bd
AZ
3743 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3744 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
d3eb5eae 3745 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
222a3336 3746
3ca51d07
RH
3747 if (b == 0x17) {
3748 set_cc_op(s, CC_OP_EFLAGS);
3749 }
4242b1bd 3750 break;
111994ee
RH
3751
3752 case 0x238:
3753 case 0x338:
3754 do_0f_38_fx:
3755 /* Various integer extensions at 0f 38 f[0-f]. */
3756 b = modrm | (b1 << 8);
0af10c86 3757 modrm = cpu_ldub_code(env, s->pc++);
222a3336
AZ
3758 reg = ((modrm >> 3) & 7) | rex_r;
3759
111994ee
RH
3760 switch (b) {
3761 case 0x3f0: /* crc32 Gd,Eb */
3762 case 0x3f1: /* crc32 Gd,Ey */
3763 do_crc32:
3764 if (!(s->cpuid_ext_features & CPUID_EXT_SSE42)) {
3765 goto illegal_op;
3766 }
3767 if ((b & 0xff) == 0xf0) {
4ba9938c 3768 ot = MO_8;
111994ee 3769 } else if (s->dflag != 2) {
4ba9938c 3770 ot = (s->prefix & PREFIX_DATA ? MO_16 : MO_32);
111994ee 3771 } else {
4ba9938c 3772 ot = MO_64;
111994ee 3773 }
4242b1bd 3774
24b9c00f 3775 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[reg]);
111994ee
RH
3776 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3777 gen_helper_crc32(cpu_T[0], cpu_tmp2_i32,
3778 cpu_T[0], tcg_const_i32(8 << ot));
222a3336 3779
4ba9938c 3780 ot = (s->dflag == 2) ? MO_64 : MO_32;
111994ee
RH
3781 gen_op_mov_reg_T0(ot, reg);
3782 break;
222a3336 3783
111994ee
RH
3784 case 0x1f0: /* crc32 or movbe */
3785 case 0x1f1:
3786 /* For these insns, the f3 prefix is supposed to have priority
3787 over the 66 prefix, but that's not what we implement above
3788 setting b1. */
3789 if (s->prefix & PREFIX_REPNZ) {
3790 goto do_crc32;
3791 }
3792 /* FALLTHRU */
3793 case 0x0f0: /* movbe Gy,My */
3794 case 0x0f1: /* movbe My,Gy */
3795 if (!(s->cpuid_ext_features & CPUID_EXT_MOVBE)) {
3796 goto illegal_op;
3797 }
3798 if (s->dflag != 2) {
4ba9938c 3799 ot = (s->prefix & PREFIX_DATA ? MO_16 : MO_32);
111994ee 3800 } else {
4ba9938c 3801 ot = MO_64;
111994ee
RH
3802 }
3803
3655a19f 3804 gen_lea_modrm(env, s, modrm);
111994ee 3805 if ((b & 1) == 0) {
3655a19f
RH
3806 tcg_gen_qemu_ld_tl(cpu_T[0], cpu_A0,
3807 s->mem_index, ot | MO_BE);
111994ee
RH
3808 gen_op_mov_reg_T0(ot, reg);
3809 } else {
3655a19f
RH
3810 tcg_gen_qemu_st_tl(cpu_regs[reg], cpu_A0,
3811 s->mem_index, ot | MO_BE);
111994ee
RH
3812 }
3813 break;
3814
7073fbad
RH
3815 case 0x0f2: /* andn Gy, By, Ey */
3816 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
3817 || !(s->prefix & PREFIX_VEX)
3818 || s->vex_l != 0) {
3819 goto illegal_op;
3820 }
4ba9938c 3821 ot = s->dflag == 2 ? MO_64 : MO_32;
7073fbad
RH
3822 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3823 tcg_gen_andc_tl(cpu_T[0], cpu_regs[s->vex_v], cpu_T[0]);
3824 gen_op_mov_reg_T0(ot, reg);
3825 gen_op_update1_cc();
3826 set_cc_op(s, CC_OP_LOGICB + ot);
3827 break;
3828
c7ab7565
RH
3829 case 0x0f7: /* bextr Gy, Ey, By */
3830 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
3831 || !(s->prefix & PREFIX_VEX)
3832 || s->vex_l != 0) {
3833 goto illegal_op;
3834 }
4ba9938c 3835 ot = s->dflag == 2 ? MO_64 : MO_32;
c7ab7565
RH
3836 {
3837 TCGv bound, zero;
3838
3839 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3840 /* Extract START, and shift the operand.
3841 Shifts larger than operand size get zeros. */
3842 tcg_gen_ext8u_tl(cpu_A0, cpu_regs[s->vex_v]);
3843 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_A0);
3844
4ba9938c 3845 bound = tcg_const_tl(ot == MO_64 ? 63 : 31);
c7ab7565
RH
3846 zero = tcg_const_tl(0);
3847 tcg_gen_movcond_tl(TCG_COND_LEU, cpu_T[0], cpu_A0, bound,
3848 cpu_T[0], zero);
3849 tcg_temp_free(zero);
3850
3851 /* Extract the LEN into a mask. Lengths larger than
3852 operand size get all ones. */
3853 tcg_gen_shri_tl(cpu_A0, cpu_regs[s->vex_v], 8);
3854 tcg_gen_ext8u_tl(cpu_A0, cpu_A0);
3855 tcg_gen_movcond_tl(TCG_COND_LEU, cpu_A0, cpu_A0, bound,
3856 cpu_A0, bound);
3857 tcg_temp_free(bound);
3858 tcg_gen_movi_tl(cpu_T[1], 1);
3859 tcg_gen_shl_tl(cpu_T[1], cpu_T[1], cpu_A0);
3860 tcg_gen_subi_tl(cpu_T[1], cpu_T[1], 1);
3861 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
3862
3863 gen_op_mov_reg_T0(ot, reg);
3864 gen_op_update1_cc();
3865 set_cc_op(s, CC_OP_LOGICB + ot);
3866 }
3867 break;
3868
02ea1e6b
RH
3869 case 0x0f5: /* bzhi Gy, Ey, By */
3870 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3871 || !(s->prefix & PREFIX_VEX)
3872 || s->vex_l != 0) {
3873 goto illegal_op;
3874 }
4ba9938c 3875 ot = s->dflag == 2 ? MO_64 : MO_32;
02ea1e6b
RH
3876 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3877 tcg_gen_ext8u_tl(cpu_T[1], cpu_regs[s->vex_v]);
3878 {
4ba9938c 3879 TCGv bound = tcg_const_tl(ot == MO_64 ? 63 : 31);
02ea1e6b
RH
3880 /* Note that since we're using BMILG (in order to get O
3881 cleared) we need to store the inverse into C. */
3882 tcg_gen_setcond_tl(TCG_COND_LT, cpu_cc_src,
3883 cpu_T[1], bound);
3884 tcg_gen_movcond_tl(TCG_COND_GT, cpu_T[1], cpu_T[1],
3885 bound, bound, cpu_T[1]);
3886 tcg_temp_free(bound);
3887 }
3888 tcg_gen_movi_tl(cpu_A0, -1);
3889 tcg_gen_shl_tl(cpu_A0, cpu_A0, cpu_T[1]);
3890 tcg_gen_andc_tl(cpu_T[0], cpu_T[0], cpu_A0);
3891 gen_op_mov_reg_T0(ot, reg);
3892 gen_op_update1_cc();
3893 set_cc_op(s, CC_OP_BMILGB + ot);
3894 break;
3895
5f1f4b17
RH
3896 case 0x3f6: /* mulx By, Gy, rdx, Ey */
3897 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3898 || !(s->prefix & PREFIX_VEX)
3899 || s->vex_l != 0) {
3900 goto illegal_op;
3901 }
4ba9938c 3902 ot = s->dflag == 2 ? MO_64 : MO_32;
5f1f4b17
RH
3903 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3904 switch (ot) {
5f1f4b17 3905 default:
a4bcea3d
RH
3906 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3907 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EDX]);
3908 tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
3909 cpu_tmp2_i32, cpu_tmp3_i32);
3910 tcg_gen_extu_i32_tl(cpu_regs[s->vex_v], cpu_tmp2_i32);
3911 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp3_i32);
5f1f4b17
RH
3912 break;
3913#ifdef TARGET_X86_64
4ba9938c 3914 case MO_64:
a4bcea3d
RH
3915 tcg_gen_mulu2_i64(cpu_regs[s->vex_v], cpu_regs[reg],
3916 cpu_T[0], cpu_regs[R_EDX]);
5f1f4b17
RH
3917 break;
3918#endif
3919 }
3920 break;
3921
0592f74a
RH
3922 case 0x3f5: /* pdep Gy, By, Ey */
3923 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3924 || !(s->prefix & PREFIX_VEX)
3925 || s->vex_l != 0) {
3926 goto illegal_op;
3927 }
4ba9938c 3928 ot = s->dflag == 2 ? MO_64 : MO_32;
0592f74a
RH
3929 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3930 /* Note that by zero-extending the mask operand, we
3931 automatically handle zero-extending the result. */
3932 if (s->dflag == 2) {
3933 tcg_gen_mov_tl(cpu_T[1], cpu_regs[s->vex_v]);
3934 } else {
3935 tcg_gen_ext32u_tl(cpu_T[1], cpu_regs[s->vex_v]);
3936 }
3937 gen_helper_pdep(cpu_regs[reg], cpu_T[0], cpu_T[1]);
3938 break;
3939
3940 case 0x2f5: /* pext Gy, By, Ey */
3941 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3942 || !(s->prefix & PREFIX_VEX)
3943 || s->vex_l != 0) {
3944 goto illegal_op;
3945 }
4ba9938c 3946 ot = s->dflag == 2 ? MO_64 : MO_32;
0592f74a
RH
3947 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3948 /* Note that by zero-extending the mask operand, we
3949 automatically handle zero-extending the result. */
3950 if (s->dflag == 2) {
3951 tcg_gen_mov_tl(cpu_T[1], cpu_regs[s->vex_v]);
3952 } else {
3953 tcg_gen_ext32u_tl(cpu_T[1], cpu_regs[s->vex_v]);
3954 }
3955 gen_helper_pext(cpu_regs[reg], cpu_T[0], cpu_T[1]);
3956 break;
3957
cd7f97ca
RH
3958 case 0x1f6: /* adcx Gy, Ey */
3959 case 0x2f6: /* adox Gy, Ey */
3960 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_ADX)) {
3961 goto illegal_op;
3962 } else {
76f13133 3963 TCGv carry_in, carry_out, zero;
cd7f97ca
RH
3964 int end_op;
3965
4ba9938c 3966 ot = (s->dflag == 2 ? MO_64 : MO_32);
cd7f97ca
RH
3967 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3968
3969 /* Re-use the carry-out from a previous round. */
3970 TCGV_UNUSED(carry_in);
3971 carry_out = (b == 0x1f6 ? cpu_cc_dst : cpu_cc_src2);
3972 switch (s->cc_op) {
3973 case CC_OP_ADCX:
3974 if (b == 0x1f6) {
3975 carry_in = cpu_cc_dst;
3976 end_op = CC_OP_ADCX;
3977 } else {
3978 end_op = CC_OP_ADCOX;
3979 }
3980 break;
3981 case CC_OP_ADOX:
3982 if (b == 0x1f6) {
3983 end_op = CC_OP_ADCOX;
3984 } else {
3985 carry_in = cpu_cc_src2;
3986 end_op = CC_OP_ADOX;
3987 }
3988 break;
3989 case CC_OP_ADCOX:
3990 end_op = CC_OP_ADCOX;
3991 carry_in = carry_out;
3992 break;
3993 default:
c53de1a2 3994 end_op = (b == 0x1f6 ? CC_OP_ADCX : CC_OP_ADOX);
cd7f97ca
RH
3995 break;
3996 }
3997 /* If we can't reuse carry-out, get it out of EFLAGS. */
3998 if (TCGV_IS_UNUSED(carry_in)) {
3999 if (s->cc_op != CC_OP_ADCX && s->cc_op != CC_OP_ADOX) {
4000 gen_compute_eflags(s);
4001 }
4002 carry_in = cpu_tmp0;
4003 tcg_gen_shri_tl(carry_in, cpu_cc_src,
4004 ctz32(b == 0x1f6 ? CC_C : CC_O));
4005 tcg_gen_andi_tl(carry_in, carry_in, 1);
4006 }
4007
4008 switch (ot) {
4009#ifdef TARGET_X86_64
4ba9938c 4010 case MO_32:
cd7f97ca
RH
4011 /* If we know TL is 64-bit, and we want a 32-bit
4012 result, just do everything in 64-bit arithmetic. */
4013 tcg_gen_ext32u_i64(cpu_regs[reg], cpu_regs[reg]);
4014 tcg_gen_ext32u_i64(cpu_T[0], cpu_T[0]);
4015 tcg_gen_add_i64(cpu_T[0], cpu_T[0], cpu_regs[reg]);
4016 tcg_gen_add_i64(cpu_T[0], cpu_T[0], carry_in);
4017 tcg_gen_ext32u_i64(cpu_regs[reg], cpu_T[0]);
4018 tcg_gen_shri_i64(carry_out, cpu_T[0], 32);
4019 break;
4020#endif
4021 default:
4022 /* Otherwise compute the carry-out in two steps. */
76f13133
RH
4023 zero = tcg_const_tl(0);
4024 tcg_gen_add2_tl(cpu_T[0], carry_out,
4025 cpu_T[0], zero,
4026 carry_in, zero);
4027 tcg_gen_add2_tl(cpu_regs[reg], carry_out,
4028 cpu_regs[reg], carry_out,
4029 cpu_T[0], zero);
4030 tcg_temp_free(zero);
cd7f97ca
RH
4031 break;
4032 }
cd7f97ca
RH
4033 set_cc_op(s, end_op);
4034 }
4035 break;
4036
4a554890
RH
4037 case 0x1f7: /* shlx Gy, Ey, By */
4038 case 0x2f7: /* sarx Gy, Ey, By */
4039 case 0x3f7: /* shrx Gy, Ey, By */
4040 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
4041 || !(s->prefix & PREFIX_VEX)
4042 || s->vex_l != 0) {
4043 goto illegal_op;
4044 }
4ba9938c 4045 ot = (s->dflag == 2 ? MO_64 : MO_32);
4a554890 4046 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4ba9938c 4047 if (ot == MO_64) {
4a554890
RH
4048 tcg_gen_andi_tl(cpu_T[1], cpu_regs[s->vex_v], 63);
4049 } else {
4050 tcg_gen_andi_tl(cpu_T[1], cpu_regs[s->vex_v], 31);
4051 }
4052 if (b == 0x1f7) {
4053 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4054 } else if (b == 0x2f7) {
4ba9938c 4055 if (ot != MO_64) {
4a554890
RH
4056 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4057 }
4058 tcg_gen_sar_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4059 } else {
4ba9938c 4060 if (ot != MO_64) {
4a554890
RH
4061 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
4062 }
4063 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4064 }
4065 gen_op_mov_reg_T0(ot, reg);
4066 break;
4067
bc4b43dc
RH
4068 case 0x0f3:
4069 case 0x1f3:
4070 case 0x2f3:
4071 case 0x3f3: /* Group 17 */
4072 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
4073 || !(s->prefix & PREFIX_VEX)
4074 || s->vex_l != 0) {
4075 goto illegal_op;
4076 }
4ba9938c 4077 ot = s->dflag == 2 ? MO_64 : MO_32;
bc4b43dc
RH
4078 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4079
4080 switch (reg & 7) {
4081 case 1: /* blsr By,Ey */
4082 tcg_gen_neg_tl(cpu_T[1], cpu_T[0]);
4083 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4084 gen_op_mov_reg_T0(ot, s->vex_v);
4085 gen_op_update2_cc();
4086 set_cc_op(s, CC_OP_BMILGB + ot);
4087 break;
4088
4089 case 2: /* blsmsk By,Ey */
4090 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4091 tcg_gen_subi_tl(cpu_T[0], cpu_T[0], 1);
4092 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_cc_src);
4093 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4094 set_cc_op(s, CC_OP_BMILGB + ot);
4095 break;
4096
4097 case 3: /* blsi By, Ey */
4098 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4099 tcg_gen_subi_tl(cpu_T[0], cpu_T[0], 1);
4100 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_cc_src);
4101 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4102 set_cc_op(s, CC_OP_BMILGB + ot);
4103 break;
4104
4105 default:
4106 goto illegal_op;
4107 }
4108 break;
4109
111994ee
RH
4110 default:
4111 goto illegal_op;
4112 }
222a3336 4113 break;
111994ee 4114
222a3336
AZ
4115 case 0x03a:
4116 case 0x13a:
4242b1bd 4117 b = modrm;
0af10c86 4118 modrm = cpu_ldub_code(env, s->pc++);
4242b1bd
AZ
4119 rm = modrm & 7;
4120 reg = ((modrm >> 3) & 7) | rex_r;
4121 mod = (modrm >> 6) & 3;
c045af25
AK
4122 if (b1 >= 2) {
4123 goto illegal_op;
4124 }
4242b1bd 4125
d3eb5eae
BS
4126 sse_fn_eppi = sse_op_table7[b].op[b1];
4127 if (!sse_fn_eppi) {
4242b1bd 4128 goto illegal_op;
c4baa050 4129 }
222a3336
AZ
4130 if (!(s->cpuid_ext_features & sse_op_table7[b].ext_mask))
4131 goto illegal_op;
4132
d3eb5eae 4133 if (sse_fn_eppi == SSE_SPECIAL) {
4ba9938c 4134 ot = (s->dflag == 2) ? MO_64 : MO_32;
222a3336
AZ
4135 rm = (modrm & 7) | REX_B(s);
4136 if (mod != 3)
4eeb3939 4137 gen_lea_modrm(env, s, modrm);
222a3336 4138 reg = ((modrm >> 3) & 7) | rex_r;
0af10c86 4139 val = cpu_ldub_code(env, s->pc++);
222a3336
AZ
4140 switch (b) {
4141 case 0x14: /* pextrb */
4142 tcg_gen_ld8u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
4143 xmm_regs[reg].XMM_B(val & 15)));
3523e4bd 4144 if (mod == 3) {
222a3336 4145 gen_op_mov_reg_T0(ot, rm);
3523e4bd
RH
4146 } else {
4147 tcg_gen_qemu_st_tl(cpu_T[0], cpu_A0,
4148 s->mem_index, MO_UB);
4149 }
222a3336
AZ
4150 break;
4151 case 0x15: /* pextrw */
4152 tcg_gen_ld16u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
4153 xmm_regs[reg].XMM_W(val & 7)));
3523e4bd 4154 if (mod == 3) {
222a3336 4155 gen_op_mov_reg_T0(ot, rm);
3523e4bd
RH
4156 } else {
4157 tcg_gen_qemu_st_tl(cpu_T[0], cpu_A0,
4158 s->mem_index, MO_LEUW);
4159 }
222a3336
AZ
4160 break;
4161 case 0x16:
4ba9938c 4162 if (ot == MO_32) { /* pextrd */
222a3336
AZ
4163 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
4164 offsetof(CPUX86State,
4165 xmm_regs[reg].XMM_L(val & 3)));
3523e4bd 4166 if (mod == 3) {
a7fbcbe5 4167 tcg_gen_extu_i32_tl(cpu_regs[rm], cpu_tmp2_i32);
3523e4bd 4168 } else {
d5601ad0
RH
4169 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
4170 s->mem_index, MO_LEUL);
3523e4bd 4171 }
222a3336 4172 } else { /* pextrq */
a7812ae4 4173#ifdef TARGET_X86_64
222a3336
AZ
4174 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
4175 offsetof(CPUX86State,
4176 xmm_regs[reg].XMM_Q(val & 1)));
3523e4bd 4177 if (mod == 3) {
a7fbcbe5 4178 tcg_gen_mov_i64(cpu_regs[rm], cpu_tmp1_i64);
3523e4bd
RH
4179 } else {
4180 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
4181 s->mem_index, MO_LEQ);
4182 }
a7812ae4
PB
4183#else
4184 goto illegal_op;
4185#endif
222a3336
AZ
4186 }
4187 break;
4188 case 0x17: /* extractps */
4189 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
4190 xmm_regs[reg].XMM_L(val & 3)));
3523e4bd 4191 if (mod == 3) {
222a3336 4192 gen_op_mov_reg_T0(ot, rm);
3523e4bd
RH
4193 } else {
4194 tcg_gen_qemu_st_tl(cpu_T[0], cpu_A0,
4195 s->mem_index, MO_LEUL);
4196 }
222a3336
AZ
4197 break;
4198 case 0x20: /* pinsrb */
3c5f4116 4199 if (mod == 3) {
4ba9938c 4200 gen_op_mov_TN_reg(MO_32, 0, rm);
3c5f4116
RH
4201 } else {
4202 tcg_gen_qemu_ld_tl(cpu_T[0], cpu_A0,
4203 s->mem_index, MO_UB);
4204 }
34c6addd 4205 tcg_gen_st8_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
222a3336
AZ
4206 xmm_regs[reg].XMM_B(val & 15)));
4207 break;
4208 case 0x21: /* insertps */
a7812ae4 4209 if (mod == 3) {
222a3336
AZ
4210 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
4211 offsetof(CPUX86State,xmm_regs[rm]
4212 .XMM_L((val >> 6) & 3)));
a7812ae4 4213 } else {
3c5f4116
RH
4214 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
4215 s->mem_index, MO_LEUL);
a7812ae4 4216 }
222a3336
AZ
4217 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
4218 offsetof(CPUX86State,xmm_regs[reg]
4219 .XMM_L((val >> 4) & 3)));
4220 if ((val >> 0) & 1)
4221 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4222 cpu_env, offsetof(CPUX86State,
4223 xmm_regs[reg].XMM_L(0)));
4224 if ((val >> 1) & 1)
4225 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4226 cpu_env, offsetof(CPUX86State,
4227 xmm_regs[reg].XMM_L(1)));
4228 if ((val >> 2) & 1)
4229 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4230 cpu_env, offsetof(CPUX86State,
4231 xmm_regs[reg].XMM_L(2)));
4232 if ((val >> 3) & 1)
4233 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4234 cpu_env, offsetof(CPUX86State,
4235 xmm_regs[reg].XMM_L(3)));
4236 break;
4237 case 0x22:
4ba9938c 4238 if (ot == MO_32) { /* pinsrd */
3c5f4116 4239 if (mod == 3) {
80b02013 4240 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[rm]);
3c5f4116 4241 } else {
80b02013
RH
4242 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
4243 s->mem_index, MO_LEUL);
3c5f4116 4244 }
222a3336
AZ
4245 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
4246 offsetof(CPUX86State,
4247 xmm_regs[reg].XMM_L(val & 3)));
4248 } else { /* pinsrq */
a7812ae4 4249#ifdef TARGET_X86_64
3c5f4116 4250 if (mod == 3) {
222a3336 4251 gen_op_mov_v_reg(ot, cpu_tmp1_i64, rm);
3c5f4116
RH
4252 } else {
4253 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
4254 s->mem_index, MO_LEQ);
4255 }
222a3336
AZ
4256 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
4257 offsetof(CPUX86State,
4258 xmm_regs[reg].XMM_Q(val & 1)));
a7812ae4
PB
4259#else
4260 goto illegal_op;
4261#endif
222a3336
AZ
4262 }
4263 break;
4264 }
4265 return;
4266 }
4242b1bd
AZ
4267
4268 if (b1) {
4269 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
4270 if (mod == 3) {
4271 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
4272 } else {
4273 op2_offset = offsetof(CPUX86State,xmm_t0);
4eeb3939 4274 gen_lea_modrm(env, s, modrm);
323d1876 4275 gen_ldo_env_A0(s, op2_offset);
4242b1bd
AZ
4276 }
4277 } else {
4278 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
4279 if (mod == 3) {
4280 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
4281 } else {
4282 op2_offset = offsetof(CPUX86State,mmx_t0);
4eeb3939 4283 gen_lea_modrm(env, s, modrm);
323d1876 4284 gen_ldq_env_A0(s, op2_offset);
4242b1bd
AZ
4285 }
4286 }
0af10c86 4287 val = cpu_ldub_code(env, s->pc++);
4242b1bd 4288
222a3336 4289 if ((b & 0xfc) == 0x60) { /* pcmpXstrX */
3ca51d07 4290 set_cc_op(s, CC_OP_EFLAGS);
222a3336
AZ
4291
4292 if (s->dflag == 2)
4293 /* The helper must use entire 64-bit gp registers */
4294 val |= 1 << 8;
4295 }
4296
4242b1bd
AZ
4297 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4298 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
d3eb5eae 4299 sse_fn_eppi(cpu_env, cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
4242b1bd 4300 break;
e2c3c2c5
RH
4301
4302 case 0x33a:
4303 /* Various integer extensions at 0f 3a f[0-f]. */
4304 b = modrm | (b1 << 8);
4305 modrm = cpu_ldub_code(env, s->pc++);
4306 reg = ((modrm >> 3) & 7) | rex_r;
4307
4308 switch (b) {
4309 case 0x3f0: /* rorx Gy,Ey, Ib */
4310 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
4311 || !(s->prefix & PREFIX_VEX)
4312 || s->vex_l != 0) {
4313 goto illegal_op;
4314 }
4ba9938c 4315 ot = s->dflag == 2 ? MO_64 : MO_32;
e2c3c2c5
RH
4316 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4317 b = cpu_ldub_code(env, s->pc++);
4ba9938c 4318 if (ot == MO_64) {
e2c3c2c5
RH
4319 tcg_gen_rotri_tl(cpu_T[0], cpu_T[0], b & 63);
4320 } else {
4321 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4322 tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, b & 31);
4323 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
4324 }
4325 gen_op_mov_reg_T0(ot, reg);
4326 break;
4327
4328 default:
4329 goto illegal_op;
4330 }
4331 break;
4332
664e0f19
FB
4333 default:
4334 goto illegal_op;
4335 }
4336 } else {
4337 /* generic MMX or SSE operation */
d1e42c5c 4338 switch(b) {
d1e42c5c
FB
4339 case 0x70: /* pshufx insn */
4340 case 0xc6: /* pshufx insn */
4341 case 0xc2: /* compare insns */
4342 s->rip_offset = 1;
4343 break;
4344 default:
4345 break;
664e0f19
FB
4346 }
4347 if (is_xmm) {
4348 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
4349 if (mod != 3) {
4eeb3939 4350 gen_lea_modrm(env, s, modrm);
664e0f19 4351 op2_offset = offsetof(CPUX86State,xmm_t0);
480c1cdb 4352 if (b1 >= 2 && ((b >= 0x50 && b <= 0x5f && b != 0x5b) ||
664e0f19
FB
4353 b == 0xc2)) {
4354 /* specific case for SSE single instructions */
4355 if (b1 == 2) {
4356 /* 32 bit access */
909be183 4357 gen_op_ld_v(s, MO_32, cpu_T[0], cpu_A0);
651ba608 4358 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
664e0f19
FB
4359 } else {
4360 /* 64 bit access */
323d1876
RH
4361 gen_ldq_env_A0(s, offsetof(CPUX86State,
4362 xmm_t0.XMM_D(0)));
664e0f19
FB
4363 }
4364 } else {
323d1876 4365 gen_ldo_env_A0(s, op2_offset);
664e0f19
FB
4366 }
4367 } else {
4368 rm = (modrm & 7) | REX_B(s);
4369 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
4370 }
4371 } else {
4372 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
4373 if (mod != 3) {
4eeb3939 4374 gen_lea_modrm(env, s, modrm);
664e0f19 4375 op2_offset = offsetof(CPUX86State,mmx_t0);
323d1876 4376 gen_ldq_env_A0(s, op2_offset);
664e0f19
FB
4377 } else {
4378 rm = (modrm & 7);
4379 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
4380 }
4381 }
4382 switch(b) {
a35f3ec7 4383 case 0x0f: /* 3DNow! data insns */
e771edab
AJ
4384 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
4385 goto illegal_op;
0af10c86 4386 val = cpu_ldub_code(env, s->pc++);
d3eb5eae
BS
4387 sse_fn_epp = sse_op_table5[val];
4388 if (!sse_fn_epp) {
a35f3ec7 4389 goto illegal_op;
c4baa050 4390 }
5af45186
FB
4391 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4392 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
d3eb5eae 4393 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
a35f3ec7 4394 break;
664e0f19
FB
4395 case 0x70: /* pshufx insn */
4396 case 0xc6: /* pshufx insn */
0af10c86 4397 val = cpu_ldub_code(env, s->pc++);
5af45186
FB
4398 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4399 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
c4baa050 4400 /* XXX: introduce a new table? */
d3eb5eae 4401 sse_fn_ppi = (SSEFunc_0_ppi)sse_fn_epp;
c4baa050 4402 sse_fn_ppi(cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
664e0f19
FB
4403 break;
4404 case 0xc2:
4405 /* compare insns */
0af10c86 4406 val = cpu_ldub_code(env, s->pc++);
664e0f19
FB
4407 if (val >= 8)
4408 goto illegal_op;
d3eb5eae 4409 sse_fn_epp = sse_op_table4[val][b1];
c4baa050 4410
5af45186
FB
4411 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4412 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
d3eb5eae 4413 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19 4414 break;
b8b6a50b
FB
4415 case 0xf7:
4416 /* maskmov : we must prepare A0 */
4417 if (mod != 3)
4418 goto illegal_op;
4419#ifdef TARGET_X86_64
4420 if (s->aflag == 2) {
4421 gen_op_movq_A0_reg(R_EDI);
4422 } else
4423#endif
4424 {
4425 gen_op_movl_A0_reg(R_EDI);
4426 if (s->aflag == 0)
4427 gen_op_andl_A0_ffff();
4428 }
4429 gen_add_A0_ds_seg(s);
4430
4431 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4432 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
c4baa050 4433 /* XXX: introduce a new table? */
d3eb5eae
BS
4434 sse_fn_eppt = (SSEFunc_0_eppt)sse_fn_epp;
4435 sse_fn_eppt(cpu_env, cpu_ptr0, cpu_ptr1, cpu_A0);
b8b6a50b 4436 break;
664e0f19 4437 default:
5af45186
FB
4438 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4439 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
d3eb5eae 4440 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
4441 break;
4442 }
4443 if (b == 0x2e || b == 0x2f) {
3ca51d07 4444 set_cc_op(s, CC_OP_EFLAGS);
664e0f19
FB
4445 }
4446 }
4447}
4448
2c0262af
FB
4449/* convert one instruction. s->is_jmp is set if the translation must
4450 be stopped. Return the next pc value */
0af10c86
BS
4451static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
4452 target_ulong pc_start)
2c0262af
FB
4453{
4454 int b, prefixes, aflag, dflag;
4455 int shift, ot;
4eeb3939 4456 int modrm, reg, rm, mod, op, opreg, val;
14ce26e7
FB
4457 target_ulong next_eip, tval;
4458 int rex_w, rex_r;
2c0262af 4459
fdefe51c 4460 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
70cff25e 4461 tcg_gen_debug_insn_start(pc_start);
fdefe51c 4462 }
2c0262af
FB
4463 s->pc = pc_start;
4464 prefixes = 0;
2c0262af 4465 s->override = -1;
14ce26e7
FB
4466 rex_w = -1;
4467 rex_r = 0;
4468#ifdef TARGET_X86_64
4469 s->rex_x = 0;
4470 s->rex_b = 0;
5fafdf24 4471 x86_64_hregs = 0;
14ce26e7
FB
4472#endif
4473 s->rip_offset = 0; /* for relative ip address */
701ed211
RH
4474 s->vex_l = 0;
4475 s->vex_v = 0;
2c0262af 4476 next_byte:
0af10c86 4477 b = cpu_ldub_code(env, s->pc);
2c0262af 4478 s->pc++;
4a6fd938
RH
4479 /* Collect prefixes. */
4480 switch (b) {
4481 case 0xf3:
4482 prefixes |= PREFIX_REPZ;
4483 goto next_byte;
4484 case 0xf2:
4485 prefixes |= PREFIX_REPNZ;
4486 goto next_byte;
4487 case 0xf0:
4488 prefixes |= PREFIX_LOCK;
4489 goto next_byte;
4490 case 0x2e:
4491 s->override = R_CS;
4492 goto next_byte;
4493 case 0x36:
4494 s->override = R_SS;
4495 goto next_byte;
4496 case 0x3e:
4497 s->override = R_DS;
4498 goto next_byte;
4499 case 0x26:
4500 s->override = R_ES;
4501 goto next_byte;
4502 case 0x64:
4503 s->override = R_FS;
4504 goto next_byte;
4505 case 0x65:
4506 s->override = R_GS;
4507 goto next_byte;
4508 case 0x66:
4509 prefixes |= PREFIX_DATA;
4510 goto next_byte;
4511 case 0x67:
4512 prefixes |= PREFIX_ADR;
4513 goto next_byte;
14ce26e7 4514#ifdef TARGET_X86_64
4a6fd938
RH
4515 case 0x40 ... 0x4f:
4516 if (CODE64(s)) {
14ce26e7
FB
4517 /* REX prefix */
4518 rex_w = (b >> 3) & 1;
4519 rex_r = (b & 0x4) << 1;
4520 s->rex_x = (b & 0x2) << 2;
4521 REX_B(s) = (b & 0x1) << 3;
4522 x86_64_hregs = 1; /* select uniform byte register addressing */
4523 goto next_byte;
4524 }
4a6fd938
RH
4525 break;
4526#endif
701ed211
RH
4527 case 0xc5: /* 2-byte VEX */
4528 case 0xc4: /* 3-byte VEX */
4529 /* VEX prefixes cannot be used except in 32-bit mode.
4530 Otherwise the instruction is LES or LDS. */
4531 if (s->code32 && !s->vm86) {
4532 static const int pp_prefix[4] = {
4533 0, PREFIX_DATA, PREFIX_REPZ, PREFIX_REPNZ
4534 };
4535 int vex3, vex2 = cpu_ldub_code(env, s->pc);
4536
4537 if (!CODE64(s) && (vex2 & 0xc0) != 0xc0) {
4538 /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
4539 otherwise the instruction is LES or LDS. */
4540 break;
4541 }
4542 s->pc++;
4543
085d8134 4544 /* 4.1.1-4.1.3: No preceding lock, 66, f2, f3, or rex prefixes. */
701ed211
RH
4545 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ
4546 | PREFIX_LOCK | PREFIX_DATA)) {
4547 goto illegal_op;
4548 }
4549#ifdef TARGET_X86_64
4550 if (x86_64_hregs) {
4551 goto illegal_op;
4552 }
4553#endif
4554 rex_r = (~vex2 >> 4) & 8;
4555 if (b == 0xc5) {
4556 vex3 = vex2;
4557 b = cpu_ldub_code(env, s->pc++);
4558 } else {
4559#ifdef TARGET_X86_64
4560 s->rex_x = (~vex2 >> 3) & 8;
4561 s->rex_b = (~vex2 >> 2) & 8;
4562#endif
4563 vex3 = cpu_ldub_code(env, s->pc++);
4564 rex_w = (vex3 >> 7) & 1;
4565 switch (vex2 & 0x1f) {
4566 case 0x01: /* Implied 0f leading opcode bytes. */
4567 b = cpu_ldub_code(env, s->pc++) | 0x100;
4568 break;
4569 case 0x02: /* Implied 0f 38 leading opcode bytes. */
4570 b = 0x138;
4571 break;
4572 case 0x03: /* Implied 0f 3a leading opcode bytes. */
4573 b = 0x13a;
4574 break;
4575 default: /* Reserved for future use. */
4576 goto illegal_op;
4577 }
4578 }
4579 s->vex_v = (~vex3 >> 3) & 0xf;
4580 s->vex_l = (vex3 >> 2) & 1;
4581 prefixes |= pp_prefix[vex3 & 3] | PREFIX_VEX;
4582 }
4583 break;
4a6fd938
RH
4584 }
4585
4586 /* Post-process prefixes. */
4a6fd938 4587 if (CODE64(s)) {
dec3fc96
RH
4588 /* In 64-bit mode, the default data size is 32-bit. Select 64-bit
4589 data with rex_w, and 16-bit data with 0x66; rex_w takes precedence
4590 over 0x66 if both are present. */
4591 dflag = (rex_w > 0 ? 2 : prefixes & PREFIX_DATA ? 0 : 1);
4592 /* In 64-bit mode, 0x67 selects 32-bit addressing. */
4593 aflag = (prefixes & PREFIX_ADR ? 1 : 2);
4594 } else {
4595 /* In 16/32-bit mode, 0x66 selects the opposite data size. */
4596 dflag = s->code32;
4597 if (prefixes & PREFIX_DATA) {
4598 dflag ^= 1;
14ce26e7 4599 }
dec3fc96
RH
4600 /* In 16/32-bit mode, 0x67 selects the opposite addressing. */
4601 aflag = s->code32;
4602 if (prefixes & PREFIX_ADR) {
4603 aflag ^= 1;
14ce26e7 4604 }
2c0262af
FB
4605 }
4606
2c0262af
FB
4607 s->prefix = prefixes;
4608 s->aflag = aflag;
4609 s->dflag = dflag;
4610
4611 /* lock generation */
4612 if (prefixes & PREFIX_LOCK)
a7812ae4 4613 gen_helper_lock();
2c0262af
FB
4614
4615 /* now check op code */
4616 reswitch:
4617 switch(b) {
4618 case 0x0f:
4619 /**************************/
4620 /* extended op code */
0af10c86 4621 b = cpu_ldub_code(env, s->pc++) | 0x100;
2c0262af 4622 goto reswitch;
3b46e624 4623
2c0262af
FB
4624 /**************************/
4625 /* arith & logic */
4626 case 0x00 ... 0x05:
4627 case 0x08 ... 0x0d:
4628 case 0x10 ... 0x15:
4629 case 0x18 ... 0x1d:
4630 case 0x20 ... 0x25:
4631 case 0x28 ... 0x2d:
4632 case 0x30 ... 0x35:
4633 case 0x38 ... 0x3d:
4634 {
4635 int op, f, val;
4636 op = (b >> 3) & 7;
4637 f = (b >> 1) & 3;
4638
4639 if ((b & 1) == 0)
4ba9938c 4640 ot = MO_8;
2c0262af 4641 else
4ba9938c 4642 ot = dflag + MO_16;
3b46e624 4643
2c0262af
FB
4644 switch(f) {
4645 case 0: /* OP Ev, Gv */
0af10c86 4646 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 4647 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af 4648 mod = (modrm >> 6) & 3;
14ce26e7 4649 rm = (modrm & 7) | REX_B(s);
2c0262af 4650 if (mod != 3) {
4eeb3939 4651 gen_lea_modrm(env, s, modrm);
2c0262af
FB
4652 opreg = OR_TMP0;
4653 } else if (op == OP_XORL && rm == reg) {
4654 xor_zero:
4655 /* xor reg, reg optimisation */
436ff2d2 4656 set_cc_op(s, CC_OP_CLR);
97212c88 4657 tcg_gen_movi_tl(cpu_T[0], 0);
57fec1fe 4658 gen_op_mov_reg_T0(ot, reg);
2c0262af
FB
4659 break;
4660 } else {
4661 opreg = rm;
4662 }
57fec1fe 4663 gen_op_mov_TN_reg(ot, 1, reg);
2c0262af
FB
4664 gen_op(s, op, ot, opreg);
4665 break;
4666 case 1: /* OP Gv, Ev */
0af10c86 4667 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 4668 mod = (modrm >> 6) & 3;
14ce26e7
FB
4669 reg = ((modrm >> 3) & 7) | rex_r;
4670 rm = (modrm & 7) | REX_B(s);
2c0262af 4671 if (mod != 3) {
4eeb3939 4672 gen_lea_modrm(env, s, modrm);
0f712e10 4673 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
2c0262af
FB
4674 } else if (op == OP_XORL && rm == reg) {
4675 goto xor_zero;
4676 } else {
57fec1fe 4677 gen_op_mov_TN_reg(ot, 1, rm);
2c0262af
FB
4678 }
4679 gen_op(s, op, ot, reg);
4680 break;
4681 case 2: /* OP A, Iv */
0af10c86 4682 val = insn_get(env, s, ot);
0ae657b1 4683 tcg_gen_movi_tl(cpu_T[1], val);
2c0262af
FB
4684 gen_op(s, op, ot, OR_EAX);
4685 break;
4686 }
4687 }
4688 break;
4689
ec9d6075
FB
4690 case 0x82:
4691 if (CODE64(s))
4692 goto illegal_op;
2c0262af
FB
4693 case 0x80: /* GRP1 */
4694 case 0x81:
4695 case 0x83:
4696 {
4697 int val;
4698
4699 if ((b & 1) == 0)
4ba9938c 4700 ot = MO_8;
2c0262af 4701 else
4ba9938c 4702 ot = dflag + MO_16;
3b46e624 4703
0af10c86 4704 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 4705 mod = (modrm >> 6) & 3;
14ce26e7 4706 rm = (modrm & 7) | REX_B(s);
2c0262af 4707 op = (modrm >> 3) & 7;
3b46e624 4708
2c0262af 4709 if (mod != 3) {
14ce26e7
FB
4710 if (b == 0x83)
4711 s->rip_offset = 1;
4712 else
4713 s->rip_offset = insn_const_size(ot);
4eeb3939 4714 gen_lea_modrm(env, s, modrm);
2c0262af
FB
4715 opreg = OR_TMP0;
4716 } else {
14ce26e7 4717 opreg = rm;
2c0262af
FB
4718 }
4719
4720 switch(b) {
4721 default:
4722 case 0x80:
4723 case 0x81:
d64477af 4724 case 0x82:
0af10c86 4725 val = insn_get(env, s, ot);
2c0262af
FB
4726 break;
4727 case 0x83:
4ba9938c 4728 val = (int8_t)insn_get(env, s, MO_8);
2c0262af
FB
4729 break;
4730 }
0ae657b1 4731 tcg_gen_movi_tl(cpu_T[1], val);
2c0262af
FB
4732 gen_op(s, op, ot, opreg);
4733 }
4734 break;
4735
4736 /**************************/
4737 /* inc, dec, and other misc arith */
4738 case 0x40 ... 0x47: /* inc Gv */
4ba9938c 4739 ot = dflag ? MO_32 : MO_16;
2c0262af
FB
4740 gen_inc(s, ot, OR_EAX + (b & 7), 1);
4741 break;
4742 case 0x48 ... 0x4f: /* dec Gv */
4ba9938c 4743 ot = dflag ? MO_32 : MO_16;
2c0262af
FB
4744 gen_inc(s, ot, OR_EAX + (b & 7), -1);
4745 break;
4746 case 0xf6: /* GRP3 */
4747 case 0xf7:
4748 if ((b & 1) == 0)
4ba9938c 4749 ot = MO_8;
2c0262af 4750 else
4ba9938c 4751 ot = dflag + MO_16;
2c0262af 4752
0af10c86 4753 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 4754 mod = (modrm >> 6) & 3;
14ce26e7 4755 rm = (modrm & 7) | REX_B(s);
2c0262af
FB
4756 op = (modrm >> 3) & 7;
4757 if (mod != 3) {
14ce26e7
FB
4758 if (op == 0)
4759 s->rip_offset = insn_const_size(ot);
4eeb3939 4760 gen_lea_modrm(env, s, modrm);
909be183 4761 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
2c0262af 4762 } else {
57fec1fe 4763 gen_op_mov_TN_reg(ot, 0, rm);
2c0262af
FB
4764 }
4765
4766 switch(op) {
4767 case 0: /* test */
0af10c86 4768 val = insn_get(env, s, ot);
0ae657b1 4769 tcg_gen_movi_tl(cpu_T[1], val);
2c0262af 4770 gen_op_testl_T0_T1_cc();
3ca51d07 4771 set_cc_op(s, CC_OP_LOGICB + ot);
2c0262af
FB
4772 break;
4773 case 2: /* not */
b6abf97d 4774 tcg_gen_not_tl(cpu_T[0], cpu_T[0]);
2c0262af 4775 if (mod != 3) {
fd8ca9f6 4776 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
2c0262af 4777 } else {
57fec1fe 4778 gen_op_mov_reg_T0(ot, rm);
2c0262af
FB
4779 }
4780 break;
4781 case 3: /* neg */
b6abf97d 4782 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
2c0262af 4783 if (mod != 3) {
fd8ca9f6 4784 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
2c0262af 4785 } else {
57fec1fe 4786 gen_op_mov_reg_T0(ot, rm);
2c0262af
FB
4787 }
4788 gen_op_update_neg_cc();
3ca51d07 4789 set_cc_op(s, CC_OP_SUBB + ot);
2c0262af
FB
4790 break;
4791 case 4: /* mul */
4792 switch(ot) {
4ba9938c
RH
4793 case MO_8:
4794 gen_op_mov_TN_reg(MO_8, 1, R_EAX);
0211e5af
FB
4795 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
4796 tcg_gen_ext8u_tl(cpu_T[1], cpu_T[1]);
4797 /* XXX: use 32 bit mul which could be faster */
4798 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4ba9938c 4799 gen_op_mov_reg_T0(MO_16, R_EAX);
0211e5af
FB
4800 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4801 tcg_gen_andi_tl(cpu_cc_src, cpu_T[0], 0xff00);
3ca51d07 4802 set_cc_op(s, CC_OP_MULB);
2c0262af 4803 break;
4ba9938c
RH
4804 case MO_16:
4805 gen_op_mov_TN_reg(MO_16, 1, R_EAX);
0211e5af
FB
4806 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
4807 tcg_gen_ext16u_tl(cpu_T[1], cpu_T[1]);
4808 /* XXX: use 32 bit mul which could be faster */
4809 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4ba9938c 4810 gen_op_mov_reg_T0(MO_16, R_EAX);
0211e5af
FB
4811 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4812 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
4ba9938c 4813 gen_op_mov_reg_T0(MO_16, R_EDX);
0211e5af 4814 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
3ca51d07 4815 set_cc_op(s, CC_OP_MULW);
2c0262af
FB
4816 break;
4817 default:
4ba9938c 4818 case MO_32:
a4bcea3d
RH
4819 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4820 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]);
4821 tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
4822 cpu_tmp2_i32, cpu_tmp3_i32);
4823 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32);
4824 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32);
4825 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4826 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
3ca51d07 4827 set_cc_op(s, CC_OP_MULL);
2c0262af 4828 break;
14ce26e7 4829#ifdef TARGET_X86_64
4ba9938c 4830 case MO_64:
a4bcea3d
RH
4831 tcg_gen_mulu2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
4832 cpu_T[0], cpu_regs[R_EAX]);
4833 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4834 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
3ca51d07 4835 set_cc_op(s, CC_OP_MULQ);
14ce26e7
FB
4836 break;
4837#endif
2c0262af 4838 }
2c0262af
FB
4839 break;
4840 case 5: /* imul */
4841 switch(ot) {
4ba9938c
RH
4842 case MO_8:
4843 gen_op_mov_TN_reg(MO_8, 1, R_EAX);
0211e5af
FB
4844 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
4845 tcg_gen_ext8s_tl(cpu_T[1], cpu_T[1]);
4846 /* XXX: use 32 bit mul which could be faster */
4847 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4ba9938c 4848 gen_op_mov_reg_T0(MO_16, R_EAX);
0211e5af
FB
4849 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4850 tcg_gen_ext8s_tl(cpu_tmp0, cpu_T[0]);
4851 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
3ca51d07 4852 set_cc_op(s, CC_OP_MULB);
2c0262af 4853 break;
4ba9938c
RH
4854 case MO_16:
4855 gen_op_mov_TN_reg(MO_16, 1, R_EAX);
0211e5af
FB
4856 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4857 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
4858 /* XXX: use 32 bit mul which could be faster */
4859 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4ba9938c 4860 gen_op_mov_reg_T0(MO_16, R_EAX);
0211e5af
FB
4861 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4862 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
4863 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4864 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
4ba9938c 4865 gen_op_mov_reg_T0(MO_16, R_EDX);
3ca51d07 4866 set_cc_op(s, CC_OP_MULW);
2c0262af
FB
4867 break;
4868 default:
4ba9938c 4869 case MO_32:
a4bcea3d
RH
4870 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4871 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]);
4872 tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
4873 cpu_tmp2_i32, cpu_tmp3_i32);
4874 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32);
4875 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32);
4876 tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31);
4877 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4878 tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
4879 tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32);
3ca51d07 4880 set_cc_op(s, CC_OP_MULL);
2c0262af 4881 break;
14ce26e7 4882#ifdef TARGET_X86_64
4ba9938c 4883 case MO_64:
a4bcea3d
RH
4884 tcg_gen_muls2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
4885 cpu_T[0], cpu_regs[R_EAX]);
4886 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4887 tcg_gen_sari_tl(cpu_cc_src, cpu_regs[R_EAX], 63);
4888 tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_regs[R_EDX]);
3ca51d07 4889 set_cc_op(s, CC_OP_MULQ);
14ce26e7
FB
4890 break;
4891#endif
2c0262af 4892 }
2c0262af
FB
4893 break;
4894 case 6: /* div */
4895 switch(ot) {
4ba9938c 4896 case MO_8:
14ce26e7 4897 gen_jmp_im(pc_start - s->cs_base);
7923057b 4898 gen_helper_divb_AL(cpu_env, cpu_T[0]);
2c0262af 4899 break;
4ba9938c 4900 case MO_16:
14ce26e7 4901 gen_jmp_im(pc_start - s->cs_base);
7923057b 4902 gen_helper_divw_AX(cpu_env, cpu_T[0]);
2c0262af
FB
4903 break;
4904 default:
4ba9938c 4905 case MO_32:
14ce26e7 4906 gen_jmp_im(pc_start - s->cs_base);
7923057b 4907 gen_helper_divl_EAX(cpu_env, cpu_T[0]);
14ce26e7
FB
4908 break;
4909#ifdef TARGET_X86_64
4ba9938c 4910 case MO_64:
14ce26e7 4911 gen_jmp_im(pc_start - s->cs_base);
7923057b 4912 gen_helper_divq_EAX(cpu_env, cpu_T[0]);
2c0262af 4913 break;
14ce26e7 4914#endif
2c0262af
FB
4915 }
4916 break;
4917 case 7: /* idiv */
4918 switch(ot) {
4ba9938c 4919 case MO_8:
14ce26e7 4920 gen_jmp_im(pc_start - s->cs_base);
7923057b 4921 gen_helper_idivb_AL(cpu_env, cpu_T[0]);
2c0262af 4922 break;
4ba9938c 4923 case MO_16:
14ce26e7 4924 gen_jmp_im(pc_start - s->cs_base);
7923057b 4925 gen_helper_idivw_AX(cpu_env, cpu_T[0]);
2c0262af
FB
4926 break;
4927 default:
4ba9938c 4928 case MO_32:
14ce26e7 4929 gen_jmp_im(pc_start - s->cs_base);
7923057b 4930 gen_helper_idivl_EAX(cpu_env, cpu_T[0]);
14ce26e7
FB
4931 break;
4932#ifdef TARGET_X86_64
4ba9938c 4933 case MO_64:
14ce26e7 4934 gen_jmp_im(pc_start - s->cs_base);
7923057b 4935 gen_helper_idivq_EAX(cpu_env, cpu_T[0]);
2c0262af 4936 break;
14ce26e7 4937#endif
2c0262af
FB
4938 }
4939 break;
4940 default:
4941 goto illegal_op;
4942 }
4943 break;
4944
4945 case 0xfe: /* GRP4 */
4946 case 0xff: /* GRP5 */
4947 if ((b & 1) == 0)
4ba9938c 4948 ot = MO_8;
2c0262af 4949 else
4ba9938c 4950 ot = dflag + MO_16;
2c0262af 4951
0af10c86 4952 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 4953 mod = (modrm >> 6) & 3;
14ce26e7 4954 rm = (modrm & 7) | REX_B(s);
2c0262af
FB
4955 op = (modrm >> 3) & 7;
4956 if (op >= 2 && b == 0xfe) {
4957 goto illegal_op;
4958 }
14ce26e7 4959 if (CODE64(s)) {
aba9d61e 4960 if (op == 2 || op == 4) {
14ce26e7 4961 /* operand size for jumps is 64 bit */
4ba9938c 4962 ot = MO_64;
aba9d61e 4963 } else if (op == 3 || op == 5) {
4ba9938c 4964 ot = dflag ? MO_32 + (rex_w == 1) : MO_16;
14ce26e7
FB
4965 } else if (op == 6) {
4966 /* default push size is 64 bit */
4ba9938c 4967 ot = dflag ? MO_64 : MO_16;
14ce26e7
FB
4968 }
4969 }
2c0262af 4970 if (mod != 3) {
4eeb3939 4971 gen_lea_modrm(env, s, modrm);
2c0262af 4972 if (op >= 2 && op != 3 && op != 5)
909be183 4973 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
2c0262af 4974 } else {
57fec1fe 4975 gen_op_mov_TN_reg(ot, 0, rm);
2c0262af
FB
4976 }
4977
4978 switch(op) {
4979 case 0: /* inc Ev */
4980 if (mod != 3)
4981 opreg = OR_TMP0;
4982 else
4983 opreg = rm;
4984 gen_inc(s, ot, opreg, 1);
4985 break;
4986 case 1: /* dec Ev */
4987 if (mod != 3)
4988 opreg = OR_TMP0;
4989 else
4990 opreg = rm;
4991 gen_inc(s, ot, opreg, -1);
4992 break;
4993 case 2: /* call Ev */
4f31916f 4994 /* XXX: optimize if memory (no 'and' is necessary) */
40b90233
RH
4995 if (s->dflag == 0) {
4996 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
4997 }
2c0262af 4998 next_eip = s->pc - s->cs_base;
cc0bce88 4999 tcg_gen_movi_tl(cpu_T[1], next_eip);
4f31916f
FB
5000 gen_push_T1(s);
5001 gen_op_jmp_T0();
2c0262af
FB
5002 gen_eob(s);
5003 break;
61382a50 5004 case 3: /* lcall Ev */
0f712e10 5005 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
4ba9938c 5006 gen_add_A0_im(s, 1 << (ot - MO_16 + 1));
cc1a80df 5007 gen_op_ld_v(s, MO_16, cpu_T[0], cpu_A0);
2c0262af
FB
5008 do_lcall:
5009 if (s->pe && !s->vm86) {
773cdfcc 5010 gen_update_cc_op(s);
14ce26e7 5011 gen_jmp_im(pc_start - s->cs_base);
b6abf97d 5012 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2999a0b2
BS
5013 gen_helper_lcall_protected(cpu_env, cpu_tmp2_i32, cpu_T[1],
5014 tcg_const_i32(dflag),
a7812ae4 5015 tcg_const_i32(s->pc - pc_start));
2c0262af 5016 } else {
b6abf97d 5017 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2999a0b2
BS
5018 gen_helper_lcall_real(cpu_env, cpu_tmp2_i32, cpu_T[1],
5019 tcg_const_i32(dflag),
a7812ae4 5020 tcg_const_i32(s->pc - s->cs_base));
2c0262af
FB
5021 }
5022 gen_eob(s);
5023 break;
5024 case 4: /* jmp Ev */
40b90233
RH
5025 if (s->dflag == 0) {
5026 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
5027 }
2c0262af
FB
5028 gen_op_jmp_T0();
5029 gen_eob(s);
5030 break;
5031 case 5: /* ljmp Ev */
0f712e10 5032 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
4ba9938c 5033 gen_add_A0_im(s, 1 << (ot - MO_16 + 1));
cc1a80df 5034 gen_op_ld_v(s, MO_16, cpu_T[0], cpu_A0);
2c0262af
FB
5035 do_ljmp:
5036 if (s->pe && !s->vm86) {
773cdfcc 5037 gen_update_cc_op(s);
14ce26e7 5038 gen_jmp_im(pc_start - s->cs_base);
b6abf97d 5039 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2999a0b2 5040 gen_helper_ljmp_protected(cpu_env, cpu_tmp2_i32, cpu_T[1],
a7812ae4 5041 tcg_const_i32(s->pc - pc_start));
2c0262af 5042 } else {
3bd7da9e 5043 gen_op_movl_seg_T0_vm(R_CS);
2b98a7d7 5044 tcg_gen_mov_tl(cpu_T[0], cpu_T[1]);
2c0262af
FB
5045 gen_op_jmp_T0();
5046 }
5047 gen_eob(s);
5048 break;
5049 case 6: /* push Ev */
5050 gen_push_T0(s);
5051 break;
5052 default:
5053 goto illegal_op;
5054 }
5055 break;
5056
5057 case 0x84: /* test Ev, Gv */
5fafdf24 5058 case 0x85:
2c0262af 5059 if ((b & 1) == 0)
4ba9938c 5060 ot = MO_8;
2c0262af 5061 else
4ba9938c 5062 ot = dflag + MO_16;
2c0262af 5063
0af10c86 5064 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 5065 reg = ((modrm >> 3) & 7) | rex_r;
3b46e624 5066
0af10c86 5067 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
57fec1fe 5068 gen_op_mov_TN_reg(ot, 1, reg);
2c0262af 5069 gen_op_testl_T0_T1_cc();
3ca51d07 5070 set_cc_op(s, CC_OP_LOGICB + ot);
2c0262af 5071 break;
3b46e624 5072
2c0262af
FB
5073 case 0xa8: /* test eAX, Iv */
5074 case 0xa9:
5075 if ((b & 1) == 0)
4ba9938c 5076 ot = MO_8;
2c0262af 5077 else
4ba9938c 5078 ot = dflag + MO_16;
0af10c86 5079 val = insn_get(env, s, ot);
2c0262af 5080
57fec1fe 5081 gen_op_mov_TN_reg(ot, 0, OR_EAX);
0ae657b1 5082 tcg_gen_movi_tl(cpu_T[1], val);
2c0262af 5083 gen_op_testl_T0_T1_cc();
3ca51d07 5084 set_cc_op(s, CC_OP_LOGICB + ot);
2c0262af 5085 break;
3b46e624 5086
2c0262af 5087 case 0x98: /* CWDE/CBW */
14ce26e7
FB
5088#ifdef TARGET_X86_64
5089 if (dflag == 2) {
4ba9938c 5090 gen_op_mov_TN_reg(MO_32, 0, R_EAX);
e108dd01 5091 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4ba9938c 5092 gen_op_mov_reg_T0(MO_64, R_EAX);
14ce26e7
FB
5093 } else
5094#endif
e108dd01 5095 if (dflag == 1) {
4ba9938c 5096 gen_op_mov_TN_reg(MO_16, 0, R_EAX);
e108dd01 5097 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4ba9938c 5098 gen_op_mov_reg_T0(MO_32, R_EAX);
e108dd01 5099 } else {
4ba9938c 5100 gen_op_mov_TN_reg(MO_8, 0, R_EAX);
e108dd01 5101 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
4ba9938c 5102 gen_op_mov_reg_T0(MO_16, R_EAX);
e108dd01 5103 }
2c0262af
FB
5104 break;
5105 case 0x99: /* CDQ/CWD */
14ce26e7
FB
5106#ifdef TARGET_X86_64
5107 if (dflag == 2) {
4ba9938c 5108 gen_op_mov_TN_reg(MO_64, 0, R_EAX);
e108dd01 5109 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 63);
4ba9938c 5110 gen_op_mov_reg_T0(MO_64, R_EDX);
14ce26e7
FB
5111 } else
5112#endif
e108dd01 5113 if (dflag == 1) {
4ba9938c 5114 gen_op_mov_TN_reg(MO_32, 0, R_EAX);
e108dd01
FB
5115 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
5116 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 31);
4ba9938c 5117 gen_op_mov_reg_T0(MO_32, R_EDX);
e108dd01 5118 } else {
4ba9938c 5119 gen_op_mov_TN_reg(MO_16, 0, R_EAX);
e108dd01
FB
5120 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5121 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 15);
4ba9938c 5122 gen_op_mov_reg_T0(MO_16, R_EDX);
e108dd01 5123 }
2c0262af
FB
5124 break;
5125 case 0x1af: /* imul Gv, Ev */
5126 case 0x69: /* imul Gv, Ev, I */
5127 case 0x6b:
4ba9938c 5128 ot = dflag + MO_16;
0af10c86 5129 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7
FB
5130 reg = ((modrm >> 3) & 7) | rex_r;
5131 if (b == 0x69)
5132 s->rip_offset = insn_const_size(ot);
5133 else if (b == 0x6b)
5134 s->rip_offset = 1;
0af10c86 5135 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
2c0262af 5136 if (b == 0x69) {
0af10c86 5137 val = insn_get(env, s, ot);
0ae657b1 5138 tcg_gen_movi_tl(cpu_T[1], val);
2c0262af 5139 } else if (b == 0x6b) {
4ba9938c 5140 val = (int8_t)insn_get(env, s, MO_8);
0ae657b1 5141 tcg_gen_movi_tl(cpu_T[1], val);
2c0262af 5142 } else {
57fec1fe 5143 gen_op_mov_TN_reg(ot, 1, reg);
2c0262af 5144 }
a4bcea3d 5145 switch (ot) {
0211e5af 5146#ifdef TARGET_X86_64
4ba9938c 5147 case MO_64:
a4bcea3d
RH
5148 tcg_gen_muls2_i64(cpu_regs[reg], cpu_T[1], cpu_T[0], cpu_T[1]);
5149 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
5150 tcg_gen_sari_tl(cpu_cc_src, cpu_cc_dst, 63);
5151 tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_T[1]);
5152 break;
0211e5af 5153#endif
4ba9938c 5154 case MO_32:
a4bcea3d
RH
5155 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5156 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
5157 tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
5158 cpu_tmp2_i32, cpu_tmp3_i32);
5159 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
5160 tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31);
5161 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
5162 tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
5163 tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32);
5164 break;
5165 default:
0211e5af
FB
5166 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5167 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
5168 /* XXX: use 32 bit mul which could be faster */
5169 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
5170 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
5171 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
5172 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
a4bcea3d
RH
5173 gen_op_mov_reg_T0(ot, reg);
5174 break;
2c0262af 5175 }
3ca51d07 5176 set_cc_op(s, CC_OP_MULB + ot);
2c0262af
FB
5177 break;
5178 case 0x1c0:
5179 case 0x1c1: /* xadd Ev, Gv */
5180 if ((b & 1) == 0)
4ba9938c 5181 ot = MO_8;
2c0262af 5182 else
4ba9938c 5183 ot = dflag + MO_16;
0af10c86 5184 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 5185 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af
FB
5186 mod = (modrm >> 6) & 3;
5187 if (mod == 3) {
14ce26e7 5188 rm = (modrm & 7) | REX_B(s);
57fec1fe
FB
5189 gen_op_mov_TN_reg(ot, 0, reg);
5190 gen_op_mov_TN_reg(ot, 1, rm);
2c0262af 5191 gen_op_addl_T0_T1();
57fec1fe
FB
5192 gen_op_mov_reg_T1(ot, reg);
5193 gen_op_mov_reg_T0(ot, rm);
2c0262af 5194 } else {
4eeb3939 5195 gen_lea_modrm(env, s, modrm);
57fec1fe 5196 gen_op_mov_TN_reg(ot, 0, reg);
0f712e10 5197 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
2c0262af 5198 gen_op_addl_T0_T1();
fd8ca9f6 5199 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
57fec1fe 5200 gen_op_mov_reg_T1(ot, reg);
2c0262af
FB
5201 }
5202 gen_op_update2_cc();
3ca51d07 5203 set_cc_op(s, CC_OP_ADDB + ot);
2c0262af
FB
5204 break;
5205 case 0x1b0:
5206 case 0x1b1: /* cmpxchg Ev, Gv */
cad3a37d 5207 {
1130328e 5208 int label1, label2;
1e4840bf 5209 TCGv t0, t1, t2, a0;
cad3a37d
FB
5210
5211 if ((b & 1) == 0)
4ba9938c 5212 ot = MO_8;
cad3a37d 5213 else
4ba9938c 5214 ot = dflag + MO_16;
0af10c86 5215 modrm = cpu_ldub_code(env, s->pc++);
cad3a37d
FB
5216 reg = ((modrm >> 3) & 7) | rex_r;
5217 mod = (modrm >> 6) & 3;
a7812ae4
PB
5218 t0 = tcg_temp_local_new();
5219 t1 = tcg_temp_local_new();
5220 t2 = tcg_temp_local_new();
5221 a0 = tcg_temp_local_new();
1e4840bf 5222 gen_op_mov_v_reg(ot, t1, reg);
cad3a37d
FB
5223 if (mod == 3) {
5224 rm = (modrm & 7) | REX_B(s);
1e4840bf 5225 gen_op_mov_v_reg(ot, t0, rm);
cad3a37d 5226 } else {
4eeb3939 5227 gen_lea_modrm(env, s, modrm);
1e4840bf 5228 tcg_gen_mov_tl(a0, cpu_A0);
323d1876 5229 gen_op_ld_v(s, ot, t0, a0);
cad3a37d
FB
5230 rm = 0; /* avoid warning */
5231 }
5232 label1 = gen_new_label();
a3251186
RH
5233 tcg_gen_mov_tl(t2, cpu_regs[R_EAX]);
5234 gen_extu(ot, t0);
1e4840bf 5235 gen_extu(ot, t2);
a3251186 5236 tcg_gen_brcond_tl(TCG_COND_EQ, t2, t0, label1);
f7e80adf 5237 label2 = gen_new_label();
cad3a37d 5238 if (mod == 3) {
1e4840bf 5239 gen_op_mov_reg_v(ot, R_EAX, t0);
1130328e
FB
5240 tcg_gen_br(label2);
5241 gen_set_label(label1);
1e4840bf 5242 gen_op_mov_reg_v(ot, rm, t1);
cad3a37d 5243 } else {
f7e80adf
AG
5244 /* perform no-op store cycle like physical cpu; must be
5245 before changing accumulator to ensure idempotency if
5246 the store faults and the instruction is restarted */
323d1876 5247 gen_op_st_v(s, ot, t0, a0);
1e4840bf 5248 gen_op_mov_reg_v(ot, R_EAX, t0);
f7e80adf 5249 tcg_gen_br(label2);
1130328e 5250 gen_set_label(label1);
323d1876 5251 gen_op_st_v(s, ot, t1, a0);
cad3a37d 5252 }
f7e80adf 5253 gen_set_label(label2);
1e4840bf 5254 tcg_gen_mov_tl(cpu_cc_src, t0);
a3251186
RH
5255 tcg_gen_mov_tl(cpu_cc_srcT, t2);
5256 tcg_gen_sub_tl(cpu_cc_dst, t2, t0);
3ca51d07 5257 set_cc_op(s, CC_OP_SUBB + ot);
1e4840bf
FB
5258 tcg_temp_free(t0);
5259 tcg_temp_free(t1);
5260 tcg_temp_free(t2);
5261 tcg_temp_free(a0);
2c0262af 5262 }
2c0262af
FB
5263 break;
5264 case 0x1c7: /* cmpxchg8b */
0af10c86 5265 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 5266 mod = (modrm >> 6) & 3;
71c3558e 5267 if ((mod == 3) || ((modrm & 0x38) != 0x8))
2c0262af 5268 goto illegal_op;
1b9d9ebb
FB
5269#ifdef TARGET_X86_64
5270 if (dflag == 2) {
5271 if (!(s->cpuid_ext_features & CPUID_EXT_CX16))
5272 goto illegal_op;
5273 gen_jmp_im(pc_start - s->cs_base);
773cdfcc 5274 gen_update_cc_op(s);
4eeb3939 5275 gen_lea_modrm(env, s, modrm);
92fc4b58 5276 gen_helper_cmpxchg16b(cpu_env, cpu_A0);
1b9d9ebb
FB
5277 } else
5278#endif
5279 {
5280 if (!(s->cpuid_features & CPUID_CX8))
5281 goto illegal_op;
5282 gen_jmp_im(pc_start - s->cs_base);
773cdfcc 5283 gen_update_cc_op(s);
4eeb3939 5284 gen_lea_modrm(env, s, modrm);
92fc4b58 5285 gen_helper_cmpxchg8b(cpu_env, cpu_A0);
1b9d9ebb 5286 }
3ca51d07 5287 set_cc_op(s, CC_OP_EFLAGS);
2c0262af 5288 break;
3b46e624 5289
2c0262af
FB
5290 /**************************/
5291 /* push/pop */
5292 case 0x50 ... 0x57: /* push */
4ba9938c 5293 gen_op_mov_TN_reg(MO_32, 0, (b & 7) | REX_B(s));
2c0262af
FB
5294 gen_push_T0(s);
5295 break;
5296 case 0x58 ... 0x5f: /* pop */
14ce26e7 5297 if (CODE64(s)) {
4ba9938c 5298 ot = dflag ? MO_64 : MO_16;
14ce26e7 5299 } else {
4ba9938c 5300 ot = dflag + MO_16;
14ce26e7 5301 }
2c0262af 5302 gen_pop_T0(s);
77729c24 5303 /* NOTE: order is important for pop %sp */
2c0262af 5304 gen_pop_update(s);
57fec1fe 5305 gen_op_mov_reg_T0(ot, (b & 7) | REX_B(s));
2c0262af
FB
5306 break;
5307 case 0x60: /* pusha */
14ce26e7
FB
5308 if (CODE64(s))
5309 goto illegal_op;
2c0262af
FB
5310 gen_pusha(s);
5311 break;
5312 case 0x61: /* popa */
14ce26e7
FB
5313 if (CODE64(s))
5314 goto illegal_op;
2c0262af
FB
5315 gen_popa(s);
5316 break;
5317 case 0x68: /* push Iv */
5318 case 0x6a:
14ce26e7 5319 if (CODE64(s)) {
4ba9938c 5320 ot = dflag ? MO_64 : MO_16;
14ce26e7 5321 } else {
4ba9938c 5322 ot = dflag + MO_16;
14ce26e7 5323 }
2c0262af 5324 if (b == 0x68)
0af10c86 5325 val = insn_get(env, s, ot);
2c0262af 5326 else
4ba9938c 5327 val = (int8_t)insn_get(env, s, MO_8);
1b90d56e 5328 tcg_gen_movi_tl(cpu_T[0], val);
2c0262af
FB
5329 gen_push_T0(s);
5330 break;
5331 case 0x8f: /* pop Ev */
14ce26e7 5332 if (CODE64(s)) {
4ba9938c 5333 ot = dflag ? MO_64 : MO_16;
14ce26e7 5334 } else {
4ba9938c 5335 ot = dflag + MO_16;
14ce26e7 5336 }
0af10c86 5337 modrm = cpu_ldub_code(env, s->pc++);
77729c24 5338 mod = (modrm >> 6) & 3;
2c0262af 5339 gen_pop_T0(s);
77729c24
FB
5340 if (mod == 3) {
5341 /* NOTE: order is important for pop %sp */
5342 gen_pop_update(s);
14ce26e7 5343 rm = (modrm & 7) | REX_B(s);
57fec1fe 5344 gen_op_mov_reg_T0(ot, rm);
77729c24
FB
5345 } else {
5346 /* NOTE: order is important too for MMU exceptions */
14ce26e7 5347 s->popl_esp_hack = 1 << ot;
0af10c86 5348 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
77729c24
FB
5349 s->popl_esp_hack = 0;
5350 gen_pop_update(s);
5351 }
2c0262af
FB
5352 break;
5353 case 0xc8: /* enter */
5354 {
5355 int level;
0af10c86 5356 val = cpu_lduw_code(env, s->pc);
2c0262af 5357 s->pc += 2;
0af10c86 5358 level = cpu_ldub_code(env, s->pc++);
2c0262af
FB
5359 gen_enter(s, val, level);
5360 }
5361 break;
5362 case 0xc9: /* leave */
5363 /* XXX: exception not precise (ESP is updated before potential exception) */
14ce26e7 5364 if (CODE64(s)) {
4ba9938c
RH
5365 gen_op_mov_TN_reg(MO_64, 0, R_EBP);
5366 gen_op_mov_reg_T0(MO_64, R_ESP);
14ce26e7 5367 } else if (s->ss32) {
4ba9938c
RH
5368 gen_op_mov_TN_reg(MO_32, 0, R_EBP);
5369 gen_op_mov_reg_T0(MO_32, R_ESP);
2c0262af 5370 } else {
4ba9938c
RH
5371 gen_op_mov_TN_reg(MO_16, 0, R_EBP);
5372 gen_op_mov_reg_T0(MO_16, R_ESP);
2c0262af
FB
5373 }
5374 gen_pop_T0(s);
14ce26e7 5375 if (CODE64(s)) {
4ba9938c 5376 ot = dflag ? MO_64 : MO_16;
14ce26e7 5377 } else {
4ba9938c 5378 ot = dflag + MO_16;
14ce26e7 5379 }
57fec1fe 5380 gen_op_mov_reg_T0(ot, R_EBP);
2c0262af
FB
5381 gen_pop_update(s);
5382 break;
5383 case 0x06: /* push es */
5384 case 0x0e: /* push cs */
5385 case 0x16: /* push ss */
5386 case 0x1e: /* push ds */
14ce26e7
FB
5387 if (CODE64(s))
5388 goto illegal_op;
2c0262af
FB
5389 gen_op_movl_T0_seg(b >> 3);
5390 gen_push_T0(s);
5391 break;
5392 case 0x1a0: /* push fs */
5393 case 0x1a8: /* push gs */
5394 gen_op_movl_T0_seg((b >> 3) & 7);
5395 gen_push_T0(s);
5396 break;
5397 case 0x07: /* pop es */
5398 case 0x17: /* pop ss */
5399 case 0x1f: /* pop ds */
14ce26e7
FB
5400 if (CODE64(s))
5401 goto illegal_op;
2c0262af
FB
5402 reg = b >> 3;
5403 gen_pop_T0(s);
5404 gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
5405 gen_pop_update(s);
5406 if (reg == R_SS) {
a2cc3b24
FB
5407 /* if reg == SS, inhibit interrupts/trace. */
5408 /* If several instructions disable interrupts, only the
5409 _first_ does it */
5410 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
f0967a1a 5411 gen_helper_set_inhibit_irq(cpu_env);
2c0262af
FB
5412 s->tf = 0;
5413 }
5414 if (s->is_jmp) {
14ce26e7 5415 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
5416 gen_eob(s);
5417 }
5418 break;
5419 case 0x1a1: /* pop fs */
5420 case 0x1a9: /* pop gs */
5421 gen_pop_T0(s);
5422 gen_movl_seg_T0(s, (b >> 3) & 7, pc_start - s->cs_base);
5423 gen_pop_update(s);
5424 if (s->is_jmp) {
14ce26e7 5425 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
5426 gen_eob(s);
5427 }
5428 break;
5429
5430 /**************************/
5431 /* mov */
5432 case 0x88:
5433 case 0x89: /* mov Gv, Ev */
5434 if ((b & 1) == 0)
4ba9938c 5435 ot = MO_8;
2c0262af 5436 else
4ba9938c 5437 ot = dflag + MO_16;
0af10c86 5438 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 5439 reg = ((modrm >> 3) & 7) | rex_r;
3b46e624 5440
2c0262af 5441 /* generate a generic store */
0af10c86 5442 gen_ldst_modrm(env, s, modrm, ot, reg, 1);
2c0262af
FB
5443 break;
5444 case 0xc6:
5445 case 0xc7: /* mov Ev, Iv */
5446 if ((b & 1) == 0)
4ba9938c 5447 ot = MO_8;
2c0262af 5448 else
4ba9938c 5449 ot = dflag + MO_16;
0af10c86 5450 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 5451 mod = (modrm >> 6) & 3;
14ce26e7
FB
5452 if (mod != 3) {
5453 s->rip_offset = insn_const_size(ot);
4eeb3939 5454 gen_lea_modrm(env, s, modrm);
14ce26e7 5455 }
0af10c86 5456 val = insn_get(env, s, ot);
1b90d56e 5457 tcg_gen_movi_tl(cpu_T[0], val);
fd8ca9f6
RH
5458 if (mod != 3) {
5459 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
5460 } else {
57fec1fe 5461 gen_op_mov_reg_T0(ot, (modrm & 7) | REX_B(s));
fd8ca9f6 5462 }
2c0262af
FB
5463 break;
5464 case 0x8a:
5465 case 0x8b: /* mov Ev, Gv */
5466 if ((b & 1) == 0)
4ba9938c 5467 ot = MO_8;
2c0262af 5468 else
4ba9938c 5469 ot = MO_16 + dflag;
0af10c86 5470 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 5471 reg = ((modrm >> 3) & 7) | rex_r;
3b46e624 5472
0af10c86 5473 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
57fec1fe 5474 gen_op_mov_reg_T0(ot, reg);
2c0262af
FB
5475 break;
5476 case 0x8e: /* mov seg, Gv */
0af10c86 5477 modrm = cpu_ldub_code(env, s->pc++);
2c0262af
FB
5478 reg = (modrm >> 3) & 7;
5479 if (reg >= 6 || reg == R_CS)
5480 goto illegal_op;
4ba9938c 5481 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
2c0262af
FB
5482 gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
5483 if (reg == R_SS) {
5484 /* if reg == SS, inhibit interrupts/trace */
a2cc3b24
FB
5485 /* If several instructions disable interrupts, only the
5486 _first_ does it */
5487 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
f0967a1a 5488 gen_helper_set_inhibit_irq(cpu_env);
2c0262af
FB
5489 s->tf = 0;
5490 }
5491 if (s->is_jmp) {
14ce26e7 5492 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
5493 gen_eob(s);
5494 }
5495 break;
5496 case 0x8c: /* mov Gv, seg */
0af10c86 5497 modrm = cpu_ldub_code(env, s->pc++);
2c0262af
FB
5498 reg = (modrm >> 3) & 7;
5499 mod = (modrm >> 6) & 3;
5500 if (reg >= 6)
5501 goto illegal_op;
5502 gen_op_movl_T0_seg(reg);
14ce26e7 5503 if (mod == 3)
4ba9938c 5504 ot = MO_16 + dflag;
14ce26e7 5505 else
4ba9938c 5506 ot = MO_16;
0af10c86 5507 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
2c0262af
FB
5508 break;
5509
5510 case 0x1b6: /* movzbS Gv, Eb */
5511 case 0x1b7: /* movzwS Gv, Eb */
5512 case 0x1be: /* movsbS Gv, Eb */
5513 case 0x1bf: /* movswS Gv, Eb */
5514 {
c8fbc479
RH
5515 TCGMemOp d_ot;
5516 TCGMemOp s_ot;
5517
2c0262af 5518 /* d_ot is the size of destination */
4ba9938c 5519 d_ot = dflag + MO_16;
2c0262af 5520 /* ot is the size of source */
4ba9938c 5521 ot = (b & 1) + MO_8;
c8fbc479
RH
5522 /* s_ot is the sign+size of source */
5523 s_ot = b & 8 ? MO_SIGN | ot : ot;
5524
0af10c86 5525 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 5526 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af 5527 mod = (modrm >> 6) & 3;
14ce26e7 5528 rm = (modrm & 7) | REX_B(s);
3b46e624 5529
2c0262af 5530 if (mod == 3) {
57fec1fe 5531 gen_op_mov_TN_reg(ot, 0, rm);
c8fbc479
RH
5532 switch (s_ot) {
5533 case MO_UB:
e108dd01 5534 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
2c0262af 5535 break;
c8fbc479 5536 case MO_SB:
e108dd01 5537 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
2c0262af 5538 break;
c8fbc479 5539 case MO_UW:
e108dd01 5540 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
2c0262af
FB
5541 break;
5542 default:
c8fbc479 5543 case MO_SW:
e108dd01 5544 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
2c0262af
FB
5545 break;
5546 }
57fec1fe 5547 gen_op_mov_reg_T0(d_ot, reg);
2c0262af 5548 } else {
4eeb3939 5549 gen_lea_modrm(env, s, modrm);
c8fbc479 5550 gen_op_ld_v(s, s_ot, cpu_T[0], cpu_A0);
57fec1fe 5551 gen_op_mov_reg_T0(d_ot, reg);
2c0262af
FB
5552 }
5553 }
5554 break;
5555
5556 case 0x8d: /* lea */
4ba9938c 5557 ot = dflag + MO_16;
0af10c86 5558 modrm = cpu_ldub_code(env, s->pc++);
3a1d9b8b
FB
5559 mod = (modrm >> 6) & 3;
5560 if (mod == 3)
5561 goto illegal_op;
14ce26e7 5562 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af
FB
5563 /* we must ensure that no segment is added */
5564 s->override = -1;
5565 val = s->addseg;
5566 s->addseg = 0;
4eeb3939 5567 gen_lea_modrm(env, s, modrm);
2c0262af 5568 s->addseg = val;
4ba9938c 5569 gen_op_mov_reg_A0(ot - MO_16, reg);
2c0262af 5570 break;
3b46e624 5571
2c0262af
FB
5572 case 0xa0: /* mov EAX, Ov */
5573 case 0xa1:
5574 case 0xa2: /* mov Ov, EAX */
5575 case 0xa3:
2c0262af 5576 {
14ce26e7
FB
5577 target_ulong offset_addr;
5578
5579 if ((b & 1) == 0)
4ba9938c 5580 ot = MO_8;
14ce26e7 5581 else
4ba9938c 5582 ot = dflag + MO_16;
14ce26e7 5583#ifdef TARGET_X86_64
8f091a59 5584 if (s->aflag == 2) {
0af10c86 5585 offset_addr = cpu_ldq_code(env, s->pc);
14ce26e7 5586 s->pc += 8;
5fafdf24 5587 } else
14ce26e7
FB
5588#endif
5589 {
5590 if (s->aflag) {
4ba9938c 5591 offset_addr = insn_get(env, s, MO_32);
14ce26e7 5592 } else {
4ba9938c 5593 offset_addr = insn_get(env, s, MO_16);
14ce26e7 5594 }
14ce26e7 5595 }
3250cff8 5596 tcg_gen_movi_tl(cpu_A0, offset_addr);
664e0f19 5597 gen_add_A0_ds_seg(s);
14ce26e7 5598 if ((b & 2) == 0) {
909be183 5599 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
57fec1fe 5600 gen_op_mov_reg_T0(ot, R_EAX);
14ce26e7 5601 } else {
57fec1fe 5602 gen_op_mov_TN_reg(ot, 0, R_EAX);
fd8ca9f6 5603 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
2c0262af
FB
5604 }
5605 }
2c0262af
FB
5606 break;
5607 case 0xd7: /* xlat */
14ce26e7 5608#ifdef TARGET_X86_64
8f091a59 5609 if (s->aflag == 2) {
57fec1fe 5610 gen_op_movq_A0_reg(R_EBX);
4ba9938c 5611 gen_op_mov_TN_reg(MO_64, 0, R_EAX);
bbf662ee
FB
5612 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xff);
5613 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]);
5fafdf24 5614 } else
14ce26e7
FB
5615#endif
5616 {
57fec1fe 5617 gen_op_movl_A0_reg(R_EBX);
4ba9938c 5618 gen_op_mov_TN_reg(MO_32, 0, R_EAX);
bbf662ee
FB
5619 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xff);
5620 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]);
14ce26e7
FB
5621 if (s->aflag == 0)
5622 gen_op_andl_A0_ffff();
bbf662ee
FB
5623 else
5624 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
14ce26e7 5625 }
664e0f19 5626 gen_add_A0_ds_seg(s);
cc1a80df 5627 gen_op_ld_v(s, MO_8, cpu_T[0], cpu_A0);
4ba9938c 5628 gen_op_mov_reg_T0(MO_8, R_EAX);
2c0262af
FB
5629 break;
5630 case 0xb0 ... 0xb7: /* mov R, Ib */
4ba9938c 5631 val = insn_get(env, s, MO_8);
1b90d56e 5632 tcg_gen_movi_tl(cpu_T[0], val);
4ba9938c 5633 gen_op_mov_reg_T0(MO_8, (b & 7) | REX_B(s));
2c0262af
FB
5634 break;
5635 case 0xb8 ... 0xbf: /* mov R, Iv */
14ce26e7
FB
5636#ifdef TARGET_X86_64
5637 if (dflag == 2) {
5638 uint64_t tmp;
5639 /* 64 bit case */
0af10c86 5640 tmp = cpu_ldq_code(env, s->pc);
14ce26e7
FB
5641 s->pc += 8;
5642 reg = (b & 7) | REX_B(s);
cc0bce88 5643 tcg_gen_movi_tl(cpu_T[0], tmp);
4ba9938c 5644 gen_op_mov_reg_T0(MO_64, reg);
5fafdf24 5645 } else
14ce26e7
FB
5646#endif
5647 {
4ba9938c 5648 ot = dflag ? MO_32 : MO_16;
0af10c86 5649 val = insn_get(env, s, ot);
14ce26e7 5650 reg = (b & 7) | REX_B(s);
1b90d56e 5651 tcg_gen_movi_tl(cpu_T[0], val);
57fec1fe 5652 gen_op_mov_reg_T0(ot, reg);
14ce26e7 5653 }
2c0262af
FB
5654 break;
5655
5656 case 0x91 ... 0x97: /* xchg R, EAX */
7418027e 5657 do_xchg_reg_eax:
4ba9938c 5658 ot = dflag + MO_16;
14ce26e7 5659 reg = (b & 7) | REX_B(s);
2c0262af
FB
5660 rm = R_EAX;
5661 goto do_xchg_reg;
5662 case 0x86:
5663 case 0x87: /* xchg Ev, Gv */
5664 if ((b & 1) == 0)
4ba9938c 5665 ot = MO_8;
2c0262af 5666 else
4ba9938c 5667 ot = dflag + MO_16;
0af10c86 5668 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 5669 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af
FB
5670 mod = (modrm >> 6) & 3;
5671 if (mod == 3) {
14ce26e7 5672 rm = (modrm & 7) | REX_B(s);
2c0262af 5673 do_xchg_reg:
57fec1fe
FB
5674 gen_op_mov_TN_reg(ot, 0, reg);
5675 gen_op_mov_TN_reg(ot, 1, rm);
5676 gen_op_mov_reg_T0(ot, rm);
5677 gen_op_mov_reg_T1(ot, reg);
2c0262af 5678 } else {
4eeb3939 5679 gen_lea_modrm(env, s, modrm);
57fec1fe 5680 gen_op_mov_TN_reg(ot, 0, reg);
2c0262af
FB
5681 /* for xchg, lock is implicit */
5682 if (!(prefixes & PREFIX_LOCK))
a7812ae4 5683 gen_helper_lock();
0f712e10 5684 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
fd8ca9f6 5685 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
2c0262af 5686 if (!(prefixes & PREFIX_LOCK))
a7812ae4 5687 gen_helper_unlock();
57fec1fe 5688 gen_op_mov_reg_T1(ot, reg);
2c0262af
FB
5689 }
5690 break;
5691 case 0xc4: /* les Gv */
701ed211 5692 /* In CODE64 this is VEX3; see above. */
2c0262af
FB
5693 op = R_ES;
5694 goto do_lxx;
5695 case 0xc5: /* lds Gv */
701ed211 5696 /* In CODE64 this is VEX2; see above. */
2c0262af
FB
5697 op = R_DS;
5698 goto do_lxx;
5699 case 0x1b2: /* lss Gv */
5700 op = R_SS;
5701 goto do_lxx;
5702 case 0x1b4: /* lfs Gv */
5703 op = R_FS;
5704 goto do_lxx;
5705 case 0x1b5: /* lgs Gv */
5706 op = R_GS;
5707 do_lxx:
4ba9938c 5708 ot = dflag ? MO_32 : MO_16;
0af10c86 5709 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 5710 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af
FB
5711 mod = (modrm >> 6) & 3;
5712 if (mod == 3)
5713 goto illegal_op;
4eeb3939 5714 gen_lea_modrm(env, s, modrm);
0f712e10 5715 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
4ba9938c 5716 gen_add_A0_im(s, 1 << (ot - MO_16 + 1));
2c0262af 5717 /* load the segment first to handle exceptions properly */
cc1a80df 5718 gen_op_ld_v(s, MO_16, cpu_T[0], cpu_A0);
2c0262af
FB
5719 gen_movl_seg_T0(s, op, pc_start - s->cs_base);
5720 /* then put the data */
57fec1fe 5721 gen_op_mov_reg_T1(ot, reg);
2c0262af 5722 if (s->is_jmp) {
14ce26e7 5723 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
5724 gen_eob(s);
5725 }
5726 break;
3b46e624 5727
2c0262af
FB
5728 /************************/
5729 /* shifts */
5730 case 0xc0:
5731 case 0xc1:
5732 /* shift Ev,Ib */
5733 shift = 2;
5734 grp2:
5735 {
5736 if ((b & 1) == 0)
4ba9938c 5737 ot = MO_8;
2c0262af 5738 else
4ba9938c 5739 ot = dflag + MO_16;
3b46e624 5740
0af10c86 5741 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 5742 mod = (modrm >> 6) & 3;
2c0262af 5743 op = (modrm >> 3) & 7;
3b46e624 5744
2c0262af 5745 if (mod != 3) {
14ce26e7
FB
5746 if (shift == 2) {
5747 s->rip_offset = 1;
5748 }
4eeb3939 5749 gen_lea_modrm(env, s, modrm);
2c0262af
FB
5750 opreg = OR_TMP0;
5751 } else {
14ce26e7 5752 opreg = (modrm & 7) | REX_B(s);
2c0262af
FB
5753 }
5754
5755 /* simpler op */
5756 if (shift == 0) {
5757 gen_shift(s, op, ot, opreg, OR_ECX);
5758 } else {
5759 if (shift == 2) {
0af10c86 5760 shift = cpu_ldub_code(env, s->pc++);
2c0262af
FB
5761 }
5762 gen_shifti(s, op, ot, opreg, shift);
5763 }
5764 }
5765 break;
5766 case 0xd0:
5767 case 0xd1:
5768 /* shift Ev,1 */
5769 shift = 1;
5770 goto grp2;
5771 case 0xd2:
5772 case 0xd3:
5773 /* shift Ev,cl */
5774 shift = 0;
5775 goto grp2;
5776
5777 case 0x1a4: /* shld imm */
5778 op = 0;
5779 shift = 1;
5780 goto do_shiftd;
5781 case 0x1a5: /* shld cl */
5782 op = 0;
5783 shift = 0;
5784 goto do_shiftd;
5785 case 0x1ac: /* shrd imm */
5786 op = 1;
5787 shift = 1;
5788 goto do_shiftd;
5789 case 0x1ad: /* shrd cl */
5790 op = 1;
5791 shift = 0;
5792 do_shiftd:
4ba9938c 5793 ot = dflag + MO_16;
0af10c86 5794 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 5795 mod = (modrm >> 6) & 3;
14ce26e7
FB
5796 rm = (modrm & 7) | REX_B(s);
5797 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af 5798 if (mod != 3) {
4eeb3939 5799 gen_lea_modrm(env, s, modrm);
b6abf97d 5800 opreg = OR_TMP0;
2c0262af 5801 } else {
b6abf97d 5802 opreg = rm;
2c0262af 5803 }
57fec1fe 5804 gen_op_mov_TN_reg(ot, 1, reg);
3b46e624 5805
2c0262af 5806 if (shift) {
3b9d3cf1
PB
5807 TCGv imm = tcg_const_tl(cpu_ldub_code(env, s->pc++));
5808 gen_shiftd_rm_T1(s, ot, opreg, op, imm);
5809 tcg_temp_free(imm);
2c0262af 5810 } else {
3b9d3cf1 5811 gen_shiftd_rm_T1(s, ot, opreg, op, cpu_regs[R_ECX]);
2c0262af
FB
5812 }
5813 break;
5814
5815 /************************/
5816 /* floats */
5fafdf24 5817 case 0xd8 ... 0xdf:
7eee2a50
FB
5818 if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
5819 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
5820 /* XXX: what to do if illegal op ? */
5821 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
5822 break;
5823 }
0af10c86 5824 modrm = cpu_ldub_code(env, s->pc++);
2c0262af
FB
5825 mod = (modrm >> 6) & 3;
5826 rm = modrm & 7;
5827 op = ((b & 7) << 3) | ((modrm >> 3) & 7);
2c0262af
FB
5828 if (mod != 3) {
5829 /* memory op */
4eeb3939 5830 gen_lea_modrm(env, s, modrm);
2c0262af
FB
5831 switch(op) {
5832 case 0x00 ... 0x07: /* fxxxs */
5833 case 0x10 ... 0x17: /* fixxxl */
5834 case 0x20 ... 0x27: /* fxxxl */
5835 case 0x30 ... 0x37: /* fixxx */
5836 {
5837 int op1;
5838 op1 = op & 7;
5839
5840 switch(op >> 4) {
5841 case 0:
80b02013
RH
5842 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5843 s->mem_index, MO_LEUL);
d3eb5eae 5844 gen_helper_flds_FT0(cpu_env, cpu_tmp2_i32);
2c0262af
FB
5845 break;
5846 case 1:
80b02013
RH
5847 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5848 s->mem_index, MO_LEUL);
d3eb5eae 5849 gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
2c0262af
FB
5850 break;
5851 case 2:
3c5f4116
RH
5852 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
5853 s->mem_index, MO_LEQ);
d3eb5eae 5854 gen_helper_fldl_FT0(cpu_env, cpu_tmp1_i64);
2c0262af
FB
5855 break;
5856 case 3:
5857 default:
80b02013
RH
5858 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5859 s->mem_index, MO_LESW);
d3eb5eae 5860 gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
2c0262af
FB
5861 break;
5862 }
3b46e624 5863
a7812ae4 5864 gen_helper_fp_arith_ST0_FT0(op1);
2c0262af
FB
5865 if (op1 == 3) {
5866 /* fcomp needs pop */
d3eb5eae 5867 gen_helper_fpop(cpu_env);
2c0262af
FB
5868 }
5869 }
5870 break;
5871 case 0x08: /* flds */
5872 case 0x0a: /* fsts */
5873 case 0x0b: /* fstps */
465e9838
FB
5874 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
5875 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
5876 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
2c0262af
FB
5877 switch(op & 7) {
5878 case 0:
5879 switch(op >> 4) {
5880 case 0:
80b02013
RH
5881 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5882 s->mem_index, MO_LEUL);
d3eb5eae 5883 gen_helper_flds_ST0(cpu_env, cpu_tmp2_i32);
2c0262af
FB
5884 break;
5885 case 1:
80b02013
RH
5886 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5887 s->mem_index, MO_LEUL);
d3eb5eae 5888 gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
2c0262af
FB
5889 break;
5890 case 2:
3c5f4116
RH
5891 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
5892 s->mem_index, MO_LEQ);
d3eb5eae 5893 gen_helper_fldl_ST0(cpu_env, cpu_tmp1_i64);
2c0262af
FB
5894 break;
5895 case 3:
5896 default:
80b02013
RH
5897 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5898 s->mem_index, MO_LESW);
d3eb5eae 5899 gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
2c0262af
FB
5900 break;
5901 }
5902 break;
465e9838 5903 case 1:
19e6c4b8 5904 /* XXX: the corresponding CPUID bit must be tested ! */
465e9838
FB
5905 switch(op >> 4) {
5906 case 1:
d3eb5eae 5907 gen_helper_fisttl_ST0(cpu_tmp2_i32, cpu_env);
d5601ad0
RH
5908 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5909 s->mem_index, MO_LEUL);
465e9838
FB
5910 break;
5911 case 2:
d3eb5eae 5912 gen_helper_fisttll_ST0(cpu_tmp1_i64, cpu_env);
3523e4bd
RH
5913 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
5914 s->mem_index, MO_LEQ);
465e9838
FB
5915 break;
5916 case 3:
5917 default:
d3eb5eae 5918 gen_helper_fistt_ST0(cpu_tmp2_i32, cpu_env);
d5601ad0
RH
5919 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5920 s->mem_index, MO_LEUW);
19e6c4b8 5921 break;
465e9838 5922 }
d3eb5eae 5923 gen_helper_fpop(cpu_env);
465e9838 5924 break;
2c0262af
FB
5925 default:
5926 switch(op >> 4) {
5927 case 0:
d3eb5eae 5928 gen_helper_fsts_ST0(cpu_tmp2_i32, cpu_env);
d5601ad0
RH
5929 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5930 s->mem_index, MO_LEUL);
2c0262af
FB
5931 break;
5932 case 1:
d3eb5eae 5933 gen_helper_fistl_ST0(cpu_tmp2_i32, cpu_env);
d5601ad0
RH
5934 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5935 s->mem_index, MO_LEUL);
2c0262af
FB
5936 break;
5937 case 2:
d3eb5eae 5938 gen_helper_fstl_ST0(cpu_tmp1_i64, cpu_env);
3523e4bd
RH
5939 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
5940 s->mem_index, MO_LEQ);
2c0262af
FB
5941 break;
5942 case 3:
5943 default:
d3eb5eae 5944 gen_helper_fist_ST0(cpu_tmp2_i32, cpu_env);
d5601ad0
RH
5945 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5946 s->mem_index, MO_LEUW);
2c0262af
FB
5947 break;
5948 }
5949 if ((op & 7) == 3)
d3eb5eae 5950 gen_helper_fpop(cpu_env);
2c0262af
FB
5951 break;
5952 }
5953 break;
5954 case 0x0c: /* fldenv mem */
773cdfcc 5955 gen_update_cc_op(s);
19e6c4b8 5956 gen_jmp_im(pc_start - s->cs_base);
d3eb5eae 5957 gen_helper_fldenv(cpu_env, cpu_A0, tcg_const_i32(s->dflag));
2c0262af
FB
5958 break;
5959 case 0x0d: /* fldcw mem */
80b02013
RH
5960 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5961 s->mem_index, MO_LEUW);
d3eb5eae 5962 gen_helper_fldcw(cpu_env, cpu_tmp2_i32);
2c0262af
FB
5963 break;
5964 case 0x0e: /* fnstenv mem */
773cdfcc 5965 gen_update_cc_op(s);
19e6c4b8 5966 gen_jmp_im(pc_start - s->cs_base);
d3eb5eae 5967 gen_helper_fstenv(cpu_env, cpu_A0, tcg_const_i32(s->dflag));
2c0262af
FB
5968 break;
5969 case 0x0f: /* fnstcw mem */
d3eb5eae 5970 gen_helper_fnstcw(cpu_tmp2_i32, cpu_env);
d5601ad0
RH
5971 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5972 s->mem_index, MO_LEUW);
2c0262af
FB
5973 break;
5974 case 0x1d: /* fldt mem */
773cdfcc 5975 gen_update_cc_op(s);
19e6c4b8 5976 gen_jmp_im(pc_start - s->cs_base);
d3eb5eae 5977 gen_helper_fldt_ST0(cpu_env, cpu_A0);
2c0262af
FB
5978 break;
5979 case 0x1f: /* fstpt mem */
773cdfcc 5980 gen_update_cc_op(s);
19e6c4b8 5981 gen_jmp_im(pc_start - s->cs_base);
d3eb5eae
BS
5982 gen_helper_fstt_ST0(cpu_env, cpu_A0);
5983 gen_helper_fpop(cpu_env);
2c0262af
FB
5984 break;
5985 case 0x2c: /* frstor mem */
773cdfcc 5986 gen_update_cc_op(s);
19e6c4b8 5987 gen_jmp_im(pc_start - s->cs_base);
d3eb5eae 5988 gen_helper_frstor(cpu_env, cpu_A0, tcg_const_i32(s->dflag));
2c0262af
FB
5989 break;
5990 case 0x2e: /* fnsave mem */
773cdfcc 5991 gen_update_cc_op(s);
19e6c4b8 5992 gen_jmp_im(pc_start - s->cs_base);
d3eb5eae 5993 gen_helper_fsave(cpu_env, cpu_A0, tcg_const_i32(s->dflag));
2c0262af
FB
5994 break;
5995 case 0x2f: /* fnstsw mem */
d3eb5eae 5996 gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
d5601ad0
RH
5997 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5998 s->mem_index, MO_LEUW);
2c0262af
FB
5999 break;
6000 case 0x3c: /* fbld */
773cdfcc 6001 gen_update_cc_op(s);
19e6c4b8 6002 gen_jmp_im(pc_start - s->cs_base);
d3eb5eae 6003 gen_helper_fbld_ST0(cpu_env, cpu_A0);
2c0262af
FB
6004 break;
6005 case 0x3e: /* fbstp */
773cdfcc 6006 gen_update_cc_op(s);
19e6c4b8 6007 gen_jmp_im(pc_start - s->cs_base);
d3eb5eae
BS
6008 gen_helper_fbst_ST0(cpu_env, cpu_A0);
6009 gen_helper_fpop(cpu_env);
2c0262af
FB
6010 break;
6011 case 0x3d: /* fildll */
3c5f4116 6012 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
d3eb5eae 6013 gen_helper_fildll_ST0(cpu_env, cpu_tmp1_i64);
2c0262af
FB
6014 break;
6015 case 0x3f: /* fistpll */
d3eb5eae 6016 gen_helper_fistll_ST0(cpu_tmp1_i64, cpu_env);
3523e4bd 6017 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
d3eb5eae 6018 gen_helper_fpop(cpu_env);
2c0262af
FB
6019 break;
6020 default:
6021 goto illegal_op;
6022 }
6023 } else {
6024 /* register float ops */
6025 opreg = rm;
6026
6027 switch(op) {
6028 case 0x08: /* fld sti */
d3eb5eae
BS
6029 gen_helper_fpush(cpu_env);
6030 gen_helper_fmov_ST0_STN(cpu_env,
6031 tcg_const_i32((opreg + 1) & 7));
2c0262af
FB
6032 break;
6033 case 0x09: /* fxchg sti */
c169c906
FB
6034 case 0x29: /* fxchg4 sti, undocumented op */
6035 case 0x39: /* fxchg7 sti, undocumented op */
d3eb5eae 6036 gen_helper_fxchg_ST0_STN(cpu_env, tcg_const_i32(opreg));
2c0262af
FB
6037 break;
6038 case 0x0a: /* grp d9/2 */
6039 switch(rm) {
6040 case 0: /* fnop */
023fe10d 6041 /* check exceptions (FreeBSD FPU probe) */
773cdfcc 6042 gen_update_cc_op(s);
14ce26e7 6043 gen_jmp_im(pc_start - s->cs_base);
d3eb5eae 6044 gen_helper_fwait(cpu_env);
2c0262af
FB
6045 break;
6046 default:
6047 goto illegal_op;
6048 }
6049 break;
6050 case 0x0c: /* grp d9/4 */
6051 switch(rm) {
6052 case 0: /* fchs */
d3eb5eae 6053 gen_helper_fchs_ST0(cpu_env);
2c0262af
FB
6054 break;
6055 case 1: /* fabs */
d3eb5eae 6056 gen_helper_fabs_ST0(cpu_env);
2c0262af
FB
6057 break;
6058 case 4: /* ftst */
d3eb5eae
BS
6059 gen_helper_fldz_FT0(cpu_env);
6060 gen_helper_fcom_ST0_FT0(cpu_env);
2c0262af
FB
6061 break;
6062 case 5: /* fxam */
d3eb5eae 6063 gen_helper_fxam_ST0(cpu_env);
2c0262af
FB
6064 break;
6065 default:
6066 goto illegal_op;
6067 }
6068 break;
6069 case 0x0d: /* grp d9/5 */
6070 {
6071 switch(rm) {
6072 case 0:
d3eb5eae
BS
6073 gen_helper_fpush(cpu_env);
6074 gen_helper_fld1_ST0(cpu_env);
2c0262af
FB
6075 break;
6076 case 1:
d3eb5eae
BS
6077 gen_helper_fpush(cpu_env);
6078 gen_helper_fldl2t_ST0(cpu_env);
2c0262af
FB
6079 break;
6080 case 2:
d3eb5eae
BS
6081 gen_helper_fpush(cpu_env);
6082 gen_helper_fldl2e_ST0(cpu_env);
2c0262af
FB
6083 break;
6084 case 3:
d3eb5eae
BS
6085 gen_helper_fpush(cpu_env);
6086 gen_helper_fldpi_ST0(cpu_env);
2c0262af
FB
6087 break;
6088 case 4:
d3eb5eae
BS
6089 gen_helper_fpush(cpu_env);
6090 gen_helper_fldlg2_ST0(cpu_env);
2c0262af
FB
6091 break;
6092 case 5:
d3eb5eae
BS
6093 gen_helper_fpush(cpu_env);
6094 gen_helper_fldln2_ST0(cpu_env);
2c0262af
FB
6095 break;
6096 case 6:
d3eb5eae
BS
6097 gen_helper_fpush(cpu_env);
6098 gen_helper_fldz_ST0(cpu_env);
2c0262af
FB
6099 break;
6100 default:
6101 goto illegal_op;
6102 }
6103 }
6104 break;
6105 case 0x0e: /* grp d9/6 */
6106 switch(rm) {
6107 case 0: /* f2xm1 */
d3eb5eae 6108 gen_helper_f2xm1(cpu_env);
2c0262af
FB
6109 break;
6110 case 1: /* fyl2x */
d3eb5eae 6111 gen_helper_fyl2x(cpu_env);
2c0262af
FB
6112 break;
6113 case 2: /* fptan */
d3eb5eae 6114 gen_helper_fptan(cpu_env);
2c0262af
FB
6115 break;
6116 case 3: /* fpatan */
d3eb5eae 6117 gen_helper_fpatan(cpu_env);
2c0262af
FB
6118 break;
6119 case 4: /* fxtract */
d3eb5eae 6120 gen_helper_fxtract(cpu_env);
2c0262af
FB
6121 break;
6122 case 5: /* fprem1 */
d3eb5eae 6123 gen_helper_fprem1(cpu_env);
2c0262af
FB
6124 break;
6125 case 6: /* fdecstp */
d3eb5eae 6126 gen_helper_fdecstp(cpu_env);
2c0262af
FB
6127 break;
6128 default:
6129 case 7: /* fincstp */
d3eb5eae 6130 gen_helper_fincstp(cpu_env);
2c0262af
FB
6131 break;
6132 }
6133 break;
6134 case 0x0f: /* grp d9/7 */
6135 switch(rm) {
6136 case 0: /* fprem */
d3eb5eae 6137 gen_helper_fprem(cpu_env);
2c0262af
FB
6138 break;
6139 case 1: /* fyl2xp1 */
d3eb5eae 6140 gen_helper_fyl2xp1(cpu_env);
2c0262af
FB
6141 break;
6142 case 2: /* fsqrt */
d3eb5eae 6143 gen_helper_fsqrt(cpu_env);
2c0262af
FB
6144 break;
6145 case 3: /* fsincos */
d3eb5eae 6146 gen_helper_fsincos(cpu_env);
2c0262af
FB
6147 break;
6148 case 5: /* fscale */
d3eb5eae 6149 gen_helper_fscale(cpu_env);
2c0262af
FB
6150 break;
6151 case 4: /* frndint */
d3eb5eae 6152 gen_helper_frndint(cpu_env);
2c0262af
FB
6153 break;
6154 case 6: /* fsin */
d3eb5eae 6155 gen_helper_fsin(cpu_env);
2c0262af
FB
6156 break;
6157 default:
6158 case 7: /* fcos */
d3eb5eae 6159 gen_helper_fcos(cpu_env);
2c0262af
FB
6160 break;
6161 }
6162 break;
6163 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
6164 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
6165 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
6166 {
6167 int op1;
3b46e624 6168
2c0262af
FB
6169 op1 = op & 7;
6170 if (op >= 0x20) {
a7812ae4 6171 gen_helper_fp_arith_STN_ST0(op1, opreg);
2c0262af 6172 if (op >= 0x30)
d3eb5eae 6173 gen_helper_fpop(cpu_env);
2c0262af 6174 } else {
d3eb5eae 6175 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
a7812ae4 6176 gen_helper_fp_arith_ST0_FT0(op1);
2c0262af
FB
6177 }
6178 }
6179 break;
6180 case 0x02: /* fcom */
c169c906 6181 case 0x22: /* fcom2, undocumented op */
d3eb5eae
BS
6182 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6183 gen_helper_fcom_ST0_FT0(cpu_env);
2c0262af
FB
6184 break;
6185 case 0x03: /* fcomp */
c169c906
FB
6186 case 0x23: /* fcomp3, undocumented op */
6187 case 0x32: /* fcomp5, undocumented op */
d3eb5eae
BS
6188 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6189 gen_helper_fcom_ST0_FT0(cpu_env);
6190 gen_helper_fpop(cpu_env);
2c0262af
FB
6191 break;
6192 case 0x15: /* da/5 */
6193 switch(rm) {
6194 case 1: /* fucompp */
d3eb5eae
BS
6195 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
6196 gen_helper_fucom_ST0_FT0(cpu_env);
6197 gen_helper_fpop(cpu_env);
6198 gen_helper_fpop(cpu_env);
2c0262af
FB
6199 break;
6200 default:
6201 goto illegal_op;
6202 }
6203 break;
6204 case 0x1c:
6205 switch(rm) {
6206 case 0: /* feni (287 only, just do nop here) */
6207 break;
6208 case 1: /* fdisi (287 only, just do nop here) */
6209 break;
6210 case 2: /* fclex */
d3eb5eae 6211 gen_helper_fclex(cpu_env);
2c0262af
FB
6212 break;
6213 case 3: /* fninit */
d3eb5eae 6214 gen_helper_fninit(cpu_env);
2c0262af
FB
6215 break;
6216 case 4: /* fsetpm (287 only, just do nop here) */
6217 break;
6218 default:
6219 goto illegal_op;
6220 }
6221 break;
6222 case 0x1d: /* fucomi */
bff93281
PM
6223 if (!(s->cpuid_features & CPUID_CMOV)) {
6224 goto illegal_op;
6225 }
773cdfcc 6226 gen_update_cc_op(s);
d3eb5eae
BS
6227 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6228 gen_helper_fucomi_ST0_FT0(cpu_env);
3ca51d07 6229 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
6230 break;
6231 case 0x1e: /* fcomi */
bff93281
PM
6232 if (!(s->cpuid_features & CPUID_CMOV)) {
6233 goto illegal_op;
6234 }
773cdfcc 6235 gen_update_cc_op(s);
d3eb5eae
BS
6236 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6237 gen_helper_fcomi_ST0_FT0(cpu_env);
3ca51d07 6238 set_cc_op(s, CC_OP_EFLAGS);
2c0262af 6239 break;
658c8bda 6240 case 0x28: /* ffree sti */
d3eb5eae 6241 gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
5fafdf24 6242 break;
2c0262af 6243 case 0x2a: /* fst sti */
d3eb5eae 6244 gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
2c0262af
FB
6245 break;
6246 case 0x2b: /* fstp sti */
c169c906
FB
6247 case 0x0b: /* fstp1 sti, undocumented op */
6248 case 0x3a: /* fstp8 sti, undocumented op */
6249 case 0x3b: /* fstp9 sti, undocumented op */
d3eb5eae
BS
6250 gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
6251 gen_helper_fpop(cpu_env);
2c0262af
FB
6252 break;
6253 case 0x2c: /* fucom st(i) */
d3eb5eae
BS
6254 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6255 gen_helper_fucom_ST0_FT0(cpu_env);
2c0262af
FB
6256 break;
6257 case 0x2d: /* fucomp st(i) */
d3eb5eae
BS
6258 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6259 gen_helper_fucom_ST0_FT0(cpu_env);
6260 gen_helper_fpop(cpu_env);
2c0262af
FB
6261 break;
6262 case 0x33: /* de/3 */
6263 switch(rm) {
6264 case 1: /* fcompp */
d3eb5eae
BS
6265 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
6266 gen_helper_fcom_ST0_FT0(cpu_env);
6267 gen_helper_fpop(cpu_env);
6268 gen_helper_fpop(cpu_env);
2c0262af
FB
6269 break;
6270 default:
6271 goto illegal_op;
6272 }
6273 break;
c169c906 6274 case 0x38: /* ffreep sti, undocumented op */
d3eb5eae
BS
6275 gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
6276 gen_helper_fpop(cpu_env);
c169c906 6277 break;
2c0262af
FB
6278 case 0x3c: /* df/4 */
6279 switch(rm) {
6280 case 0:
d3eb5eae 6281 gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
b6abf97d 6282 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
4ba9938c 6283 gen_op_mov_reg_T0(MO_16, R_EAX);
2c0262af
FB
6284 break;
6285 default:
6286 goto illegal_op;
6287 }
6288 break;
6289 case 0x3d: /* fucomip */
bff93281
PM
6290 if (!(s->cpuid_features & CPUID_CMOV)) {
6291 goto illegal_op;
6292 }
773cdfcc 6293 gen_update_cc_op(s);
d3eb5eae
BS
6294 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6295 gen_helper_fucomi_ST0_FT0(cpu_env);
6296 gen_helper_fpop(cpu_env);
3ca51d07 6297 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
6298 break;
6299 case 0x3e: /* fcomip */
bff93281
PM
6300 if (!(s->cpuid_features & CPUID_CMOV)) {
6301 goto illegal_op;
6302 }
773cdfcc 6303 gen_update_cc_op(s);
d3eb5eae
BS
6304 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6305 gen_helper_fcomi_ST0_FT0(cpu_env);
6306 gen_helper_fpop(cpu_env);
3ca51d07 6307 set_cc_op(s, CC_OP_EFLAGS);
2c0262af 6308 break;
a2cc3b24
FB
6309 case 0x10 ... 0x13: /* fcmovxx */
6310 case 0x18 ... 0x1b:
6311 {
19e6c4b8 6312 int op1, l1;
d70040bc 6313 static const uint8_t fcmov_cc[8] = {
a2cc3b24
FB
6314 (JCC_B << 1),
6315 (JCC_Z << 1),
6316 (JCC_BE << 1),
6317 (JCC_P << 1),
6318 };
bff93281
PM
6319
6320 if (!(s->cpuid_features & CPUID_CMOV)) {
6321 goto illegal_op;
6322 }
1e4840bf 6323 op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
19e6c4b8 6324 l1 = gen_new_label();
dc259201 6325 gen_jcc1_noeob(s, op1, l1);
d3eb5eae 6326 gen_helper_fmov_ST0_STN(cpu_env, tcg_const_i32(opreg));
19e6c4b8 6327 gen_set_label(l1);
a2cc3b24
FB
6328 }
6329 break;
2c0262af
FB
6330 default:
6331 goto illegal_op;
6332 }
6333 }
6334 break;
6335 /************************/
6336 /* string ops */
6337
6338 case 0xa4: /* movsS */
6339 case 0xa5:
6340 if ((b & 1) == 0)
4ba9938c 6341 ot = MO_8;
2c0262af 6342 else
4ba9938c 6343 ot = dflag + MO_16;
2c0262af
FB
6344
6345 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6346 gen_repz_movs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6347 } else {
6348 gen_movs(s, ot);
6349 }
6350 break;
3b46e624 6351
2c0262af
FB
6352 case 0xaa: /* stosS */
6353 case 0xab:
6354 if ((b & 1) == 0)
4ba9938c 6355 ot = MO_8;
2c0262af 6356 else
4ba9938c 6357 ot = dflag + MO_16;
2c0262af
FB
6358
6359 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6360 gen_repz_stos(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6361 } else {
6362 gen_stos(s, ot);
6363 }
6364 break;
6365 case 0xac: /* lodsS */
6366 case 0xad:
6367 if ((b & 1) == 0)
4ba9938c 6368 ot = MO_8;
2c0262af 6369 else
4ba9938c 6370 ot = dflag + MO_16;
2c0262af
FB
6371 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6372 gen_repz_lods(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6373 } else {
6374 gen_lods(s, ot);
6375 }
6376 break;
6377 case 0xae: /* scasS */
6378 case 0xaf:
6379 if ((b & 1) == 0)
4ba9938c 6380 ot = MO_8;
2c0262af 6381 else
4ba9938c 6382 ot = dflag + MO_16;
2c0262af
FB
6383 if (prefixes & PREFIX_REPNZ) {
6384 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6385 } else if (prefixes & PREFIX_REPZ) {
6386 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6387 } else {
6388 gen_scas(s, ot);
2c0262af
FB
6389 }
6390 break;
6391
6392 case 0xa6: /* cmpsS */
6393 case 0xa7:
6394 if ((b & 1) == 0)
4ba9938c 6395 ot = MO_8;
2c0262af 6396 else
4ba9938c 6397 ot = dflag + MO_16;
2c0262af
FB
6398 if (prefixes & PREFIX_REPNZ) {
6399 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6400 } else if (prefixes & PREFIX_REPZ) {
6401 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6402 } else {
6403 gen_cmps(s, ot);
2c0262af
FB
6404 }
6405 break;
6406 case 0x6c: /* insS */
6407 case 0x6d:
f115e911 6408 if ((b & 1) == 0)
4ba9938c 6409 ot = MO_8;
f115e911 6410 else
4ba9938c 6411 ot = dflag ? MO_32 : MO_16;
40b90233 6412 tcg_gen_ext16u_tl(cpu_T[0], cpu_regs[R_EDX]);
b8b6a50b
FB
6413 gen_check_io(s, ot, pc_start - s->cs_base,
6414 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4);
f115e911
FB
6415 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6416 gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
2c0262af 6417 } else {
f115e911 6418 gen_ins(s, ot);
2e70f6ef
PB
6419 if (use_icount) {
6420 gen_jmp(s, s->pc - s->cs_base);
6421 }
2c0262af
FB
6422 }
6423 break;
6424 case 0x6e: /* outsS */
6425 case 0x6f:
f115e911 6426 if ((b & 1) == 0)
4ba9938c 6427 ot = MO_8;
f115e911 6428 else
4ba9938c 6429 ot = dflag ? MO_32 : MO_16;
40b90233 6430 tcg_gen_ext16u_tl(cpu_T[0], cpu_regs[R_EDX]);
b8b6a50b
FB
6431 gen_check_io(s, ot, pc_start - s->cs_base,
6432 svm_is_rep(prefixes) | 4);
f115e911
FB
6433 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6434 gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
2c0262af 6435 } else {
f115e911 6436 gen_outs(s, ot);
2e70f6ef
PB
6437 if (use_icount) {
6438 gen_jmp(s, s->pc - s->cs_base);
6439 }
2c0262af
FB
6440 }
6441 break;
6442
6443 /************************/
6444 /* port I/O */
0573fbfc 6445
2c0262af
FB
6446 case 0xe4:
6447 case 0xe5:
f115e911 6448 if ((b & 1) == 0)
4ba9938c 6449 ot = MO_8;
f115e911 6450 else
4ba9938c 6451 ot = dflag ? MO_32 : MO_16;
0af10c86 6452 val = cpu_ldub_code(env, s->pc++);
b8b6a50b
FB
6453 gen_check_io(s, ot, pc_start - s->cs_base,
6454 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
2e70f6ef
PB
6455 if (use_icount)
6456 gen_io_start();
1b90d56e 6457 tcg_gen_movi_i32(cpu_tmp2_i32, val);
a7812ae4 6458 gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
57fec1fe 6459 gen_op_mov_reg_T1(ot, R_EAX);
2e70f6ef
PB
6460 if (use_icount) {
6461 gen_io_end();
6462 gen_jmp(s, s->pc - s->cs_base);
6463 }
2c0262af
FB
6464 break;
6465 case 0xe6:
6466 case 0xe7:
f115e911 6467 if ((b & 1) == 0)
4ba9938c 6468 ot = MO_8;
f115e911 6469 else
4ba9938c 6470 ot = dflag ? MO_32 : MO_16;
0af10c86 6471 val = cpu_ldub_code(env, s->pc++);
b8b6a50b
FB
6472 gen_check_io(s, ot, pc_start - s->cs_base,
6473 svm_is_rep(prefixes));
57fec1fe 6474 gen_op_mov_TN_reg(ot, 1, R_EAX);
b8b6a50b 6475
2e70f6ef
PB
6476 if (use_icount)
6477 gen_io_start();
1b90d56e 6478 tcg_gen_movi_i32(cpu_tmp2_i32, val);
b6abf97d 6479 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
a7812ae4 6480 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
2e70f6ef
PB
6481 if (use_icount) {
6482 gen_io_end();
6483 gen_jmp(s, s->pc - s->cs_base);
6484 }
2c0262af
FB
6485 break;
6486 case 0xec:
6487 case 0xed:
f115e911 6488 if ((b & 1) == 0)
4ba9938c 6489 ot = MO_8;
f115e911 6490 else
4ba9938c 6491 ot = dflag ? MO_32 : MO_16;
40b90233 6492 tcg_gen_ext16u_tl(cpu_T[0], cpu_regs[R_EDX]);
b8b6a50b
FB
6493 gen_check_io(s, ot, pc_start - s->cs_base,
6494 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
2e70f6ef
PB
6495 if (use_icount)
6496 gen_io_start();
b6abf97d 6497 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
a7812ae4 6498 gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
57fec1fe 6499 gen_op_mov_reg_T1(ot, R_EAX);
2e70f6ef
PB
6500 if (use_icount) {
6501 gen_io_end();
6502 gen_jmp(s, s->pc - s->cs_base);
6503 }
2c0262af
FB
6504 break;
6505 case 0xee:
6506 case 0xef:
f115e911 6507 if ((b & 1) == 0)
4ba9938c 6508 ot = MO_8;
f115e911 6509 else
4ba9938c 6510 ot = dflag ? MO_32 : MO_16;
40b90233 6511 tcg_gen_ext16u_tl(cpu_T[0], cpu_regs[R_EDX]);
b8b6a50b
FB
6512 gen_check_io(s, ot, pc_start - s->cs_base,
6513 svm_is_rep(prefixes));
57fec1fe 6514 gen_op_mov_TN_reg(ot, 1, R_EAX);
b8b6a50b 6515
2e70f6ef
PB
6516 if (use_icount)
6517 gen_io_start();
b6abf97d 6518 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
b6abf97d 6519 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
a7812ae4 6520 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
2e70f6ef
PB
6521 if (use_icount) {
6522 gen_io_end();
6523 gen_jmp(s, s->pc - s->cs_base);
6524 }
2c0262af
FB
6525 break;
6526
6527 /************************/
6528 /* control */
6529 case 0xc2: /* ret im */
0af10c86 6530 val = cpu_ldsw_code(env, s->pc);
2c0262af
FB
6531 s->pc += 2;
6532 gen_pop_T0(s);
8f091a59
FB
6533 if (CODE64(s) && s->dflag)
6534 s->dflag = 2;
2c0262af 6535 gen_stack_update(s, val + (2 << s->dflag));
40b90233
RH
6536 if (s->dflag == 0) {
6537 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
6538 }
2c0262af
FB
6539 gen_op_jmp_T0();
6540 gen_eob(s);
6541 break;
6542 case 0xc3: /* ret */
6543 gen_pop_T0(s);
6544 gen_pop_update(s);
40b90233
RH
6545 if (s->dflag == 0) {
6546 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
6547 }
2c0262af
FB
6548 gen_op_jmp_T0();
6549 gen_eob(s);
6550 break;
6551 case 0xca: /* lret im */
0af10c86 6552 val = cpu_ldsw_code(env, s->pc);
2c0262af
FB
6553 s->pc += 2;
6554 do_lret:
6555 if (s->pe && !s->vm86) {
773cdfcc 6556 gen_update_cc_op(s);
14ce26e7 6557 gen_jmp_im(pc_start - s->cs_base);
2999a0b2 6558 gen_helper_lret_protected(cpu_env, tcg_const_i32(s->dflag),
a7812ae4 6559 tcg_const_i32(val));
2c0262af
FB
6560 } else {
6561 gen_stack_A0(s);
6562 /* pop offset */
40b90233 6563 gen_op_ld_v(s, MO_16 + s->dflag, cpu_T[0], cpu_A0);
2c0262af
FB
6564 /* NOTE: keeping EIP updated is not a problem in case of
6565 exception */
6566 gen_op_jmp_T0();
6567 /* pop selector */
6568 gen_op_addl_A0_im(2 << s->dflag);
40b90233 6569 gen_op_ld_v(s, MO_16 + s->dflag, cpu_T[0], cpu_A0);
3bd7da9e 6570 gen_op_movl_seg_T0_vm(R_CS);
2c0262af
FB
6571 /* add stack offset */
6572 gen_stack_update(s, val + (4 << s->dflag));
6573 }
6574 gen_eob(s);
6575 break;
6576 case 0xcb: /* lret */
6577 val = 0;
6578 goto do_lret;
6579 case 0xcf: /* iret */
872929aa 6580 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET);
2c0262af
FB
6581 if (!s->pe) {
6582 /* real mode */
2999a0b2 6583 gen_helper_iret_real(cpu_env, tcg_const_i32(s->dflag));
3ca51d07 6584 set_cc_op(s, CC_OP_EFLAGS);
f115e911
FB
6585 } else if (s->vm86) {
6586 if (s->iopl != 3) {
6587 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6588 } else {
2999a0b2 6589 gen_helper_iret_real(cpu_env, tcg_const_i32(s->dflag));
3ca51d07 6590 set_cc_op(s, CC_OP_EFLAGS);
f115e911 6591 }
2c0262af 6592 } else {
773cdfcc 6593 gen_update_cc_op(s);
14ce26e7 6594 gen_jmp_im(pc_start - s->cs_base);
2999a0b2 6595 gen_helper_iret_protected(cpu_env, tcg_const_i32(s->dflag),
a7812ae4 6596 tcg_const_i32(s->pc - s->cs_base));
3ca51d07 6597 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
6598 }
6599 gen_eob(s);
6600 break;
6601 case 0xe8: /* call im */
6602 {
14ce26e7 6603 if (dflag)
4ba9938c 6604 tval = (int32_t)insn_get(env, s, MO_32);
14ce26e7 6605 else
4ba9938c 6606 tval = (int16_t)insn_get(env, s, MO_16);
2c0262af 6607 next_eip = s->pc - s->cs_base;
14ce26e7 6608 tval += next_eip;
2c0262af 6609 if (s->dflag == 0)
14ce26e7 6610 tval &= 0xffff;
99596385
AJ
6611 else if(!CODE64(s))
6612 tval &= 0xffffffff;
cc0bce88 6613 tcg_gen_movi_tl(cpu_T[0], next_eip);
2c0262af 6614 gen_push_T0(s);
14ce26e7 6615 gen_jmp(s, tval);
2c0262af
FB
6616 }
6617 break;
6618 case 0x9a: /* lcall im */
6619 {
6620 unsigned int selector, offset;
3b46e624 6621
14ce26e7
FB
6622 if (CODE64(s))
6623 goto illegal_op;
4ba9938c 6624 ot = dflag ? MO_32 : MO_16;
0af10c86 6625 offset = insn_get(env, s, ot);
4ba9938c 6626 selector = insn_get(env, s, MO_16);
3b46e624 6627
1b90d56e 6628 tcg_gen_movi_tl(cpu_T[0], selector);
0ae657b1 6629 tcg_gen_movi_tl(cpu_T[1], offset);
2c0262af
FB
6630 }
6631 goto do_lcall;
ecada8a2 6632 case 0xe9: /* jmp im */
14ce26e7 6633 if (dflag)
4ba9938c 6634 tval = (int32_t)insn_get(env, s, MO_32);
14ce26e7 6635 else
4ba9938c 6636 tval = (int16_t)insn_get(env, s, MO_16);
14ce26e7 6637 tval += s->pc - s->cs_base;
2c0262af 6638 if (s->dflag == 0)
14ce26e7 6639 tval &= 0xffff;
32938e12
AJ
6640 else if(!CODE64(s))
6641 tval &= 0xffffffff;
14ce26e7 6642 gen_jmp(s, tval);
2c0262af
FB
6643 break;
6644 case 0xea: /* ljmp im */
6645 {
6646 unsigned int selector, offset;
6647
14ce26e7
FB
6648 if (CODE64(s))
6649 goto illegal_op;
4ba9938c 6650 ot = dflag ? MO_32 : MO_16;
0af10c86 6651 offset = insn_get(env, s, ot);
4ba9938c 6652 selector = insn_get(env, s, MO_16);
3b46e624 6653
1b90d56e 6654 tcg_gen_movi_tl(cpu_T[0], selector);
0ae657b1 6655 tcg_gen_movi_tl(cpu_T[1], offset);
2c0262af
FB
6656 }
6657 goto do_ljmp;
6658 case 0xeb: /* jmp Jb */
4ba9938c 6659 tval = (int8_t)insn_get(env, s, MO_8);
14ce26e7 6660 tval += s->pc - s->cs_base;
2c0262af 6661 if (s->dflag == 0)
14ce26e7
FB
6662 tval &= 0xffff;
6663 gen_jmp(s, tval);
2c0262af
FB
6664 break;
6665 case 0x70 ... 0x7f: /* jcc Jb */
4ba9938c 6666 tval = (int8_t)insn_get(env, s, MO_8);
2c0262af
FB
6667 goto do_jcc;
6668 case 0x180 ... 0x18f: /* jcc Jv */
6669 if (dflag) {
4ba9938c 6670 tval = (int32_t)insn_get(env, s, MO_32);
2c0262af 6671 } else {
4ba9938c 6672 tval = (int16_t)insn_get(env, s, MO_16);
2c0262af
FB
6673 }
6674 do_jcc:
6675 next_eip = s->pc - s->cs_base;
14ce26e7 6676 tval += next_eip;
2c0262af 6677 if (s->dflag == 0)
14ce26e7
FB
6678 tval &= 0xffff;
6679 gen_jcc(s, b, tval, next_eip);
2c0262af
FB
6680 break;
6681
6682 case 0x190 ... 0x19f: /* setcc Gv */
0af10c86 6683 modrm = cpu_ldub_code(env, s->pc++);
cc8b6f5b 6684 gen_setcc1(s, b, cpu_T[0]);
4ba9938c 6685 gen_ldst_modrm(env, s, modrm, MO_8, OR_TMP0, 1);
2c0262af
FB
6686 break;
6687 case 0x140 ... 0x14f: /* cmov Gv, Ev */
bff93281
PM
6688 if (!(s->cpuid_features & CPUID_CMOV)) {
6689 goto illegal_op;
6690 }
4ba9938c 6691 ot = dflag + MO_16;
f32d3781
PB
6692 modrm = cpu_ldub_code(env, s->pc++);
6693 reg = ((modrm >> 3) & 7) | rex_r;
6694 gen_cmovcc1(env, s, ot, b, modrm, reg);
2c0262af 6695 break;
3b46e624 6696
2c0262af
FB
6697 /************************/
6698 /* flags */
6699 case 0x9c: /* pushf */
872929aa 6700 gen_svm_check_intercept(s, pc_start, SVM_EXIT_PUSHF);
2c0262af
FB
6701 if (s->vm86 && s->iopl != 3) {
6702 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6703 } else {
773cdfcc 6704 gen_update_cc_op(s);
f0967a1a 6705 gen_helper_read_eflags(cpu_T[0], cpu_env);
2c0262af
FB
6706 gen_push_T0(s);
6707 }
6708 break;
6709 case 0x9d: /* popf */
872929aa 6710 gen_svm_check_intercept(s, pc_start, SVM_EXIT_POPF);
2c0262af
FB
6711 if (s->vm86 && s->iopl != 3) {
6712 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6713 } else {
6714 gen_pop_T0(s);
6715 if (s->cpl == 0) {
6716 if (s->dflag) {
f0967a1a
BS
6717 gen_helper_write_eflags(cpu_env, cpu_T[0],
6718 tcg_const_i32((TF_MASK | AC_MASK |
6719 ID_MASK | NT_MASK |
6720 IF_MASK |
6721 IOPL_MASK)));
2c0262af 6722 } else {
f0967a1a
BS
6723 gen_helper_write_eflags(cpu_env, cpu_T[0],
6724 tcg_const_i32((TF_MASK | AC_MASK |
6725 ID_MASK | NT_MASK |
6726 IF_MASK | IOPL_MASK)
6727 & 0xffff));
2c0262af
FB
6728 }
6729 } else {
4136f33c
FB
6730 if (s->cpl <= s->iopl) {
6731 if (s->dflag) {
f0967a1a
BS
6732 gen_helper_write_eflags(cpu_env, cpu_T[0],
6733 tcg_const_i32((TF_MASK |
6734 AC_MASK |
6735 ID_MASK |
6736 NT_MASK |
6737 IF_MASK)));
4136f33c 6738 } else {
f0967a1a
BS
6739 gen_helper_write_eflags(cpu_env, cpu_T[0],
6740 tcg_const_i32((TF_MASK |
6741 AC_MASK |
6742 ID_MASK |
6743 NT_MASK |
6744 IF_MASK)
6745 & 0xffff));
4136f33c 6746 }
2c0262af 6747 } else {
4136f33c 6748 if (s->dflag) {
f0967a1a
BS
6749 gen_helper_write_eflags(cpu_env, cpu_T[0],
6750 tcg_const_i32((TF_MASK | AC_MASK |
6751 ID_MASK | NT_MASK)));
4136f33c 6752 } else {
f0967a1a
BS
6753 gen_helper_write_eflags(cpu_env, cpu_T[0],
6754 tcg_const_i32((TF_MASK | AC_MASK |
6755 ID_MASK | NT_MASK)
6756 & 0xffff));
4136f33c 6757 }
2c0262af
FB
6758 }
6759 }
6760 gen_pop_update(s);
3ca51d07 6761 set_cc_op(s, CC_OP_EFLAGS);
a9321a4d 6762 /* abort translation because TF/AC flag may change */
14ce26e7 6763 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
6764 gen_eob(s);
6765 }
6766 break;
6767 case 0x9e: /* sahf */
12e26b75 6768 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
14ce26e7 6769 goto illegal_op;
4ba9938c 6770 gen_op_mov_TN_reg(MO_8, 0, R_AH);
d229edce 6771 gen_compute_eflags(s);
bd7a7b33
FB
6772 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O);
6773 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], CC_S | CC_Z | CC_A | CC_P | CC_C);
6774 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_T[0]);
2c0262af
FB
6775 break;
6776 case 0x9f: /* lahf */
12e26b75 6777 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
14ce26e7 6778 goto illegal_op;
d229edce 6779 gen_compute_eflags(s);
bd7a7b33 6780 /* Note: gen_compute_eflags() only gives the condition codes */
d229edce 6781 tcg_gen_ori_tl(cpu_T[0], cpu_cc_src, 0x02);
4ba9938c 6782 gen_op_mov_reg_T0(MO_8, R_AH);
2c0262af
FB
6783 break;
6784 case 0xf5: /* cmc */
d229edce 6785 gen_compute_eflags(s);
bd7a7b33 6786 tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C);
2c0262af
FB
6787 break;
6788 case 0xf8: /* clc */
d229edce 6789 gen_compute_eflags(s);
bd7a7b33 6790 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C);
2c0262af
FB
6791 break;
6792 case 0xf9: /* stc */
d229edce 6793 gen_compute_eflags(s);
bd7a7b33 6794 tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C);
2c0262af
FB
6795 break;
6796 case 0xfc: /* cld */
b6abf97d 6797 tcg_gen_movi_i32(cpu_tmp2_i32, 1);
317ac620 6798 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
2c0262af
FB
6799 break;
6800 case 0xfd: /* std */
b6abf97d 6801 tcg_gen_movi_i32(cpu_tmp2_i32, -1);
317ac620 6802 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
2c0262af
FB
6803 break;
6804
6805 /************************/
6806 /* bit operations */
6807 case 0x1ba: /* bt/bts/btr/btc Gv, im */
4ba9938c 6808 ot = dflag + MO_16;
0af10c86 6809 modrm = cpu_ldub_code(env, s->pc++);
33698e5f 6810 op = (modrm >> 3) & 7;
2c0262af 6811 mod = (modrm >> 6) & 3;
14ce26e7 6812 rm = (modrm & 7) | REX_B(s);
2c0262af 6813 if (mod != 3) {
14ce26e7 6814 s->rip_offset = 1;
4eeb3939 6815 gen_lea_modrm(env, s, modrm);
909be183 6816 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
2c0262af 6817 } else {
57fec1fe 6818 gen_op_mov_TN_reg(ot, 0, rm);
2c0262af
FB
6819 }
6820 /* load shift */
0af10c86 6821 val = cpu_ldub_code(env, s->pc++);
0ae657b1 6822 tcg_gen_movi_tl(cpu_T[1], val);
2c0262af
FB
6823 if (op < 4)
6824 goto illegal_op;
6825 op -= 4;
f484d386 6826 goto bt_op;
2c0262af
FB
6827 case 0x1a3: /* bt Gv, Ev */
6828 op = 0;
6829 goto do_btx;
6830 case 0x1ab: /* bts */
6831 op = 1;
6832 goto do_btx;
6833 case 0x1b3: /* btr */
6834 op = 2;
6835 goto do_btx;
6836 case 0x1bb: /* btc */
6837 op = 3;
6838 do_btx:
4ba9938c 6839 ot = dflag + MO_16;
0af10c86 6840 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 6841 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af 6842 mod = (modrm >> 6) & 3;
14ce26e7 6843 rm = (modrm & 7) | REX_B(s);
4ba9938c 6844 gen_op_mov_TN_reg(MO_32, 1, reg);
2c0262af 6845 if (mod != 3) {
4eeb3939 6846 gen_lea_modrm(env, s, modrm);
2c0262af 6847 /* specific case: we need to add a displacement */
f484d386
FB
6848 gen_exts(ot, cpu_T[1]);
6849 tcg_gen_sari_tl(cpu_tmp0, cpu_T[1], 3 + ot);
6850 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, ot);
6851 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
909be183 6852 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
2c0262af 6853 } else {
57fec1fe 6854 gen_op_mov_TN_reg(ot, 0, rm);
2c0262af 6855 }
f484d386
FB
6856 bt_op:
6857 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], (1 << (3 + ot)) - 1);
6858 switch(op) {
6859 case 0:
6860 tcg_gen_shr_tl(cpu_cc_src, cpu_T[0], cpu_T[1]);
6861 tcg_gen_movi_tl(cpu_cc_dst, 0);
6862 break;
6863 case 1:
6864 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6865 tcg_gen_movi_tl(cpu_tmp0, 1);
6866 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6867 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6868 break;
6869 case 2:
6870 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6871 tcg_gen_movi_tl(cpu_tmp0, 1);
6872 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6873 tcg_gen_not_tl(cpu_tmp0, cpu_tmp0);
6874 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6875 break;
6876 default:
6877 case 3:
6878 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6879 tcg_gen_movi_tl(cpu_tmp0, 1);
6880 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6881 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6882 break;
6883 }
3ca51d07 6884 set_cc_op(s, CC_OP_SARB + ot);
2c0262af 6885 if (op != 0) {
fd8ca9f6
RH
6886 if (mod != 3) {
6887 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
6888 } else {
57fec1fe 6889 gen_op_mov_reg_T0(ot, rm);
fd8ca9f6 6890 }
f484d386
FB
6891 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
6892 tcg_gen_movi_tl(cpu_cc_dst, 0);
2c0262af
FB
6893 }
6894 break;
321c5351
RH
6895 case 0x1bc: /* bsf / tzcnt */
6896 case 0x1bd: /* bsr / lzcnt */
4ba9938c 6897 ot = dflag + MO_16;
321c5351
RH
6898 modrm = cpu_ldub_code(env, s->pc++);
6899 reg = ((modrm >> 3) & 7) | rex_r;
6900 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
6901 gen_extu(ot, cpu_T[0]);
6902
6903 /* Note that lzcnt and tzcnt are in different extensions. */
6904 if ((prefixes & PREFIX_REPZ)
6905 && (b & 1
6906 ? s->cpuid_ext3_features & CPUID_EXT3_ABM
6907 : s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)) {
6908 int size = 8 << ot;
6909 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
6910 if (b & 1) {
6911 /* For lzcnt, reduce the target_ulong result by the
6912 number of zeros that we expect to find at the top. */
6913 gen_helper_clz(cpu_T[0], cpu_T[0]);
6914 tcg_gen_subi_tl(cpu_T[0], cpu_T[0], TARGET_LONG_BITS - size);
6191b059 6915 } else {
321c5351
RH
6916 /* For tzcnt, a zero input must return the operand size:
6917 force all bits outside the operand size to 1. */
6918 target_ulong mask = (target_ulong)-2 << (size - 1);
6919 tcg_gen_ori_tl(cpu_T[0], cpu_T[0], mask);
6920 gen_helper_ctz(cpu_T[0], cpu_T[0]);
6191b059 6921 }
321c5351
RH
6922 /* For lzcnt/tzcnt, C and Z bits are defined and are
6923 related to the result. */
6924 gen_op_update1_cc();
6925 set_cc_op(s, CC_OP_BMILGB + ot);
6926 } else {
6927 /* For bsr/bsf, only the Z bit is defined and it is related
6928 to the input and not the result. */
6929 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
6930 set_cc_op(s, CC_OP_LOGICB + ot);
6931 if (b & 1) {
6932 /* For bsr, return the bit index of the first 1 bit,
6933 not the count of leading zeros. */
6934 gen_helper_clz(cpu_T[0], cpu_T[0]);
6935 tcg_gen_xori_tl(cpu_T[0], cpu_T[0], TARGET_LONG_BITS - 1);
6936 } else {
6937 gen_helper_ctz(cpu_T[0], cpu_T[0]);
6938 }
6939 /* ??? The manual says that the output is undefined when the
6940 input is zero, but real hardware leaves it unchanged, and
6941 real programs appear to depend on that. */
6942 tcg_gen_movi_tl(cpu_tmp0, 0);
6943 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_T[0], cpu_cc_dst, cpu_tmp0,
6944 cpu_regs[reg], cpu_T[0]);
6191b059 6945 }
321c5351 6946 gen_op_mov_reg_T0(ot, reg);
2c0262af
FB
6947 break;
6948 /************************/
6949 /* bcd */
6950 case 0x27: /* daa */
14ce26e7
FB
6951 if (CODE64(s))
6952 goto illegal_op;
773cdfcc 6953 gen_update_cc_op(s);
7923057b 6954 gen_helper_daa(cpu_env);
3ca51d07 6955 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
6956 break;
6957 case 0x2f: /* das */
14ce26e7
FB
6958 if (CODE64(s))
6959 goto illegal_op;
773cdfcc 6960 gen_update_cc_op(s);
7923057b 6961 gen_helper_das(cpu_env);
3ca51d07 6962 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
6963 break;
6964 case 0x37: /* aaa */
14ce26e7
FB
6965 if (CODE64(s))
6966 goto illegal_op;
773cdfcc 6967 gen_update_cc_op(s);
7923057b 6968 gen_helper_aaa(cpu_env);
3ca51d07 6969 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
6970 break;
6971 case 0x3f: /* aas */
14ce26e7
FB
6972 if (CODE64(s))
6973 goto illegal_op;
773cdfcc 6974 gen_update_cc_op(s);
7923057b 6975 gen_helper_aas(cpu_env);
3ca51d07 6976 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
6977 break;
6978 case 0xd4: /* aam */
14ce26e7
FB
6979 if (CODE64(s))
6980 goto illegal_op;
0af10c86 6981 val = cpu_ldub_code(env, s->pc++);
b6d7c3db
TS
6982 if (val == 0) {
6983 gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base);
6984 } else {
7923057b 6985 gen_helper_aam(cpu_env, tcg_const_i32(val));
3ca51d07 6986 set_cc_op(s, CC_OP_LOGICB);
b6d7c3db 6987 }
2c0262af
FB
6988 break;
6989 case 0xd5: /* aad */
14ce26e7
FB
6990 if (CODE64(s))
6991 goto illegal_op;
0af10c86 6992 val = cpu_ldub_code(env, s->pc++);
7923057b 6993 gen_helper_aad(cpu_env, tcg_const_i32(val));
3ca51d07 6994 set_cc_op(s, CC_OP_LOGICB);
2c0262af
FB
6995 break;
6996 /************************/
6997 /* misc */
6998 case 0x90: /* nop */
ab1f142b 6999 /* XXX: correct lock test for all insn */
7418027e 7000 if (prefixes & PREFIX_LOCK) {
ab1f142b 7001 goto illegal_op;
7418027e
RH
7002 }
7003 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
7004 if (REX_B(s)) {
7005 goto do_xchg_reg_eax;
7006 }
0573fbfc 7007 if (prefixes & PREFIX_REPZ) {
81f3053b
PB
7008 gen_update_cc_op(s);
7009 gen_jmp_im(pc_start - s->cs_base);
7010 gen_helper_pause(cpu_env, tcg_const_i32(s->pc - pc_start));
7011 s->is_jmp = DISAS_TB_JUMP;
0573fbfc 7012 }
2c0262af
FB
7013 break;
7014 case 0x9b: /* fwait */
5fafdf24 7015 if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) ==
7eee2a50
FB
7016 (HF_MP_MASK | HF_TS_MASK)) {
7017 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
2ee73ac3 7018 } else {
773cdfcc 7019 gen_update_cc_op(s);
14ce26e7 7020 gen_jmp_im(pc_start - s->cs_base);
d3eb5eae 7021 gen_helper_fwait(cpu_env);
7eee2a50 7022 }
2c0262af
FB
7023 break;
7024 case 0xcc: /* int3 */
7025 gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base);
7026 break;
7027 case 0xcd: /* int N */
0af10c86 7028 val = cpu_ldub_code(env, s->pc++);
f115e911 7029 if (s->vm86 && s->iopl != 3) {
5fafdf24 7030 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
f115e911
FB
7031 } else {
7032 gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base);
7033 }
2c0262af
FB
7034 break;
7035 case 0xce: /* into */
14ce26e7
FB
7036 if (CODE64(s))
7037 goto illegal_op;
773cdfcc 7038 gen_update_cc_op(s);
a8ede8ba 7039 gen_jmp_im(pc_start - s->cs_base);
4a7443be 7040 gen_helper_into(cpu_env, tcg_const_i32(s->pc - pc_start));
2c0262af 7041 break;
0b97134b 7042#ifdef WANT_ICEBP
2c0262af 7043 case 0xf1: /* icebp (undocumented, exits to external debugger) */
872929aa 7044 gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP);
aba9d61e 7045#if 1
2c0262af 7046 gen_debug(s, pc_start - s->cs_base);
aba9d61e
FB
7047#else
7048 /* start debug */
0af10c86 7049 tb_flush(env);
24537a01 7050 qemu_set_log(CPU_LOG_INT | CPU_LOG_TB_IN_ASM);
aba9d61e 7051#endif
2c0262af 7052 break;
0b97134b 7053#endif
2c0262af
FB
7054 case 0xfa: /* cli */
7055 if (!s->vm86) {
7056 if (s->cpl <= s->iopl) {
f0967a1a 7057 gen_helper_cli(cpu_env);
2c0262af
FB
7058 } else {
7059 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7060 }
7061 } else {
7062 if (s->iopl == 3) {
f0967a1a 7063 gen_helper_cli(cpu_env);
2c0262af
FB
7064 } else {
7065 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7066 }
7067 }
7068 break;
7069 case 0xfb: /* sti */
7070 if (!s->vm86) {
7071 if (s->cpl <= s->iopl) {
7072 gen_sti:
f0967a1a 7073 gen_helper_sti(cpu_env);
2c0262af 7074 /* interruptions are enabled only the first insn after sti */
a2cc3b24
FB
7075 /* If several instructions disable interrupts, only the
7076 _first_ does it */
7077 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
f0967a1a 7078 gen_helper_set_inhibit_irq(cpu_env);
2c0262af 7079 /* give a chance to handle pending irqs */
14ce26e7 7080 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
7081 gen_eob(s);
7082 } else {
7083 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7084 }
7085 } else {
7086 if (s->iopl == 3) {
7087 goto gen_sti;
7088 } else {
7089 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7090 }
7091 }
7092 break;
7093 case 0x62: /* bound */
14ce26e7
FB
7094 if (CODE64(s))
7095 goto illegal_op;
4ba9938c 7096 ot = dflag ? MO_32 : MO_16;
0af10c86 7097 modrm = cpu_ldub_code(env, s->pc++);
2c0262af
FB
7098 reg = (modrm >> 3) & 7;
7099 mod = (modrm >> 6) & 3;
7100 if (mod == 3)
7101 goto illegal_op;
57fec1fe 7102 gen_op_mov_TN_reg(ot, 0, reg);
4eeb3939 7103 gen_lea_modrm(env, s, modrm);
14ce26e7 7104 gen_jmp_im(pc_start - s->cs_base);
b6abf97d 7105 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4ba9938c 7106 if (ot == MO_16) {
92fc4b58
BS
7107 gen_helper_boundw(cpu_env, cpu_A0, cpu_tmp2_i32);
7108 } else {
7109 gen_helper_boundl(cpu_env, cpu_A0, cpu_tmp2_i32);
7110 }
2c0262af
FB
7111 break;
7112 case 0x1c8 ... 0x1cf: /* bswap reg */
14ce26e7
FB
7113 reg = (b & 7) | REX_B(s);
7114#ifdef TARGET_X86_64
7115 if (dflag == 2) {
4ba9938c 7116 gen_op_mov_TN_reg(MO_64, 0, reg);
66896cb8 7117 tcg_gen_bswap64_i64(cpu_T[0], cpu_T[0]);
4ba9938c 7118 gen_op_mov_reg_T0(MO_64, reg);
5fafdf24 7119 } else
8777643e 7120#endif
57fec1fe 7121 {
4ba9938c 7122 gen_op_mov_TN_reg(MO_32, 0, reg);
8777643e
AJ
7123 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
7124 tcg_gen_bswap32_tl(cpu_T[0], cpu_T[0]);
4ba9938c 7125 gen_op_mov_reg_T0(MO_32, reg);
14ce26e7 7126 }
2c0262af
FB
7127 break;
7128 case 0xd6: /* salc */
14ce26e7
FB
7129 if (CODE64(s))
7130 goto illegal_op;
cc8b6f5b 7131 gen_compute_eflags_c(s, cpu_T[0]);
bd7a7b33 7132 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
4ba9938c 7133 gen_op_mov_reg_T0(MO_8, R_EAX);
2c0262af
FB
7134 break;
7135 case 0xe0: /* loopnz */
7136 case 0xe1: /* loopz */
2c0262af
FB
7137 case 0xe2: /* loop */
7138 case 0xe3: /* jecxz */
14ce26e7 7139 {
6e0d8677 7140 int l1, l2, l3;
14ce26e7 7141
4ba9938c 7142 tval = (int8_t)insn_get(env, s, MO_8);
14ce26e7
FB
7143 next_eip = s->pc - s->cs_base;
7144 tval += next_eip;
7145 if (s->dflag == 0)
7146 tval &= 0xffff;
3b46e624 7147
14ce26e7
FB
7148 l1 = gen_new_label();
7149 l2 = gen_new_label();
6e0d8677 7150 l3 = gen_new_label();
14ce26e7 7151 b &= 3;
6e0d8677
FB
7152 switch(b) {
7153 case 0: /* loopnz */
7154 case 1: /* loopz */
6e0d8677
FB
7155 gen_op_add_reg_im(s->aflag, R_ECX, -1);
7156 gen_op_jz_ecx(s->aflag, l3);
5bdb91b0 7157 gen_jcc1(s, (JCC_Z << 1) | (b ^ 1), l1);
6e0d8677
FB
7158 break;
7159 case 2: /* loop */
7160 gen_op_add_reg_im(s->aflag, R_ECX, -1);
7161 gen_op_jnz_ecx(s->aflag, l1);
7162 break;
7163 default:
7164 case 3: /* jcxz */
7165 gen_op_jz_ecx(s->aflag, l1);
7166 break;
14ce26e7
FB
7167 }
7168
6e0d8677 7169 gen_set_label(l3);
14ce26e7 7170 gen_jmp_im(next_eip);
8e1c85e3 7171 tcg_gen_br(l2);
6e0d8677 7172
14ce26e7
FB
7173 gen_set_label(l1);
7174 gen_jmp_im(tval);
7175 gen_set_label(l2);
7176 gen_eob(s);
7177 }
2c0262af
FB
7178 break;
7179 case 0x130: /* wrmsr */
7180 case 0x132: /* rdmsr */
7181 if (s->cpl != 0) {
7182 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7183 } else {
773cdfcc 7184 gen_update_cc_op(s);
872929aa 7185 gen_jmp_im(pc_start - s->cs_base);
0573fbfc 7186 if (b & 2) {
4a7443be 7187 gen_helper_rdmsr(cpu_env);
0573fbfc 7188 } else {
4a7443be 7189 gen_helper_wrmsr(cpu_env);
0573fbfc 7190 }
2c0262af
FB
7191 }
7192 break;
7193 case 0x131: /* rdtsc */
773cdfcc 7194 gen_update_cc_op(s);
ecada8a2 7195 gen_jmp_im(pc_start - s->cs_base);
efade670
PB
7196 if (use_icount)
7197 gen_io_start();
4a7443be 7198 gen_helper_rdtsc(cpu_env);
efade670
PB
7199 if (use_icount) {
7200 gen_io_end();
7201 gen_jmp(s, s->pc - s->cs_base);
7202 }
2c0262af 7203 break;
df01e0fc 7204 case 0x133: /* rdpmc */
773cdfcc 7205 gen_update_cc_op(s);
df01e0fc 7206 gen_jmp_im(pc_start - s->cs_base);
4a7443be 7207 gen_helper_rdpmc(cpu_env);
df01e0fc 7208 break;
023fe10d 7209 case 0x134: /* sysenter */
2436b61a 7210 /* For Intel SYSENTER is valid on 64-bit */
0af10c86 7211 if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
14ce26e7 7212 goto illegal_op;
023fe10d
FB
7213 if (!s->pe) {
7214 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7215 } else {
728d803b 7216 gen_update_cc_op(s);
14ce26e7 7217 gen_jmp_im(pc_start - s->cs_base);
2999a0b2 7218 gen_helper_sysenter(cpu_env);
023fe10d
FB
7219 gen_eob(s);
7220 }
7221 break;
7222 case 0x135: /* sysexit */
2436b61a 7223 /* For Intel SYSEXIT is valid on 64-bit */
0af10c86 7224 if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
14ce26e7 7225 goto illegal_op;
023fe10d
FB
7226 if (!s->pe) {
7227 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7228 } else {
728d803b 7229 gen_update_cc_op(s);
14ce26e7 7230 gen_jmp_im(pc_start - s->cs_base);
2999a0b2 7231 gen_helper_sysexit(cpu_env, tcg_const_i32(dflag));
023fe10d
FB
7232 gen_eob(s);
7233 }
7234 break;
14ce26e7
FB
7235#ifdef TARGET_X86_64
7236 case 0x105: /* syscall */
7237 /* XXX: is it usable in real mode ? */
728d803b 7238 gen_update_cc_op(s);
14ce26e7 7239 gen_jmp_im(pc_start - s->cs_base);
2999a0b2 7240 gen_helper_syscall(cpu_env, tcg_const_i32(s->pc - pc_start));
14ce26e7
FB
7241 gen_eob(s);
7242 break;
7243 case 0x107: /* sysret */
7244 if (!s->pe) {
7245 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7246 } else {
728d803b 7247 gen_update_cc_op(s);
14ce26e7 7248 gen_jmp_im(pc_start - s->cs_base);
2999a0b2 7249 gen_helper_sysret(cpu_env, tcg_const_i32(s->dflag));
aba9d61e 7250 /* condition codes are modified only in long mode */
3ca51d07
RH
7251 if (s->lma) {
7252 set_cc_op(s, CC_OP_EFLAGS);
7253 }
14ce26e7
FB
7254 gen_eob(s);
7255 }
7256 break;
7257#endif
2c0262af 7258 case 0x1a2: /* cpuid */
773cdfcc 7259 gen_update_cc_op(s);
9575cb94 7260 gen_jmp_im(pc_start - s->cs_base);
4a7443be 7261 gen_helper_cpuid(cpu_env);
2c0262af
FB
7262 break;
7263 case 0xf4: /* hlt */
7264 if (s->cpl != 0) {
7265 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7266 } else {
773cdfcc 7267 gen_update_cc_op(s);
94451178 7268 gen_jmp_im(pc_start - s->cs_base);
4a7443be 7269 gen_helper_hlt(cpu_env, tcg_const_i32(s->pc - pc_start));
5779406a 7270 s->is_jmp = DISAS_TB_JUMP;
2c0262af
FB
7271 }
7272 break;
7273 case 0x100:
0af10c86 7274 modrm = cpu_ldub_code(env, s->pc++);
2c0262af
FB
7275 mod = (modrm >> 6) & 3;
7276 op = (modrm >> 3) & 7;
7277 switch(op) {
7278 case 0: /* sldt */
f115e911
FB
7279 if (!s->pe || s->vm86)
7280 goto illegal_op;
872929aa 7281 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ);
651ba608 7282 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,ldt.selector));
4ba9938c 7283 ot = MO_16;
2c0262af
FB
7284 if (mod == 3)
7285 ot += s->dflag;
0af10c86 7286 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
2c0262af
FB
7287 break;
7288 case 2: /* lldt */
f115e911
FB
7289 if (!s->pe || s->vm86)
7290 goto illegal_op;
2c0262af
FB
7291 if (s->cpl != 0) {
7292 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7293 } else {
872929aa 7294 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE);
4ba9938c 7295 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
14ce26e7 7296 gen_jmp_im(pc_start - s->cs_base);
b6abf97d 7297 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2999a0b2 7298 gen_helper_lldt(cpu_env, cpu_tmp2_i32);
2c0262af
FB
7299 }
7300 break;
7301 case 1: /* str */
f115e911
FB
7302 if (!s->pe || s->vm86)
7303 goto illegal_op;
872929aa 7304 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ);
651ba608 7305 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,tr.selector));
4ba9938c 7306 ot = MO_16;
2c0262af
FB
7307 if (mod == 3)
7308 ot += s->dflag;
0af10c86 7309 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
2c0262af
FB
7310 break;
7311 case 3: /* ltr */
f115e911
FB
7312 if (!s->pe || s->vm86)
7313 goto illegal_op;
2c0262af
FB
7314 if (s->cpl != 0) {
7315 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7316 } else {
872929aa 7317 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE);
4ba9938c 7318 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
14ce26e7 7319 gen_jmp_im(pc_start - s->cs_base);
b6abf97d 7320 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2999a0b2 7321 gen_helper_ltr(cpu_env, cpu_tmp2_i32);
2c0262af
FB
7322 }
7323 break;
7324 case 4: /* verr */
7325 case 5: /* verw */
f115e911
FB
7326 if (!s->pe || s->vm86)
7327 goto illegal_op;
4ba9938c 7328 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
773cdfcc 7329 gen_update_cc_op(s);
2999a0b2
BS
7330 if (op == 4) {
7331 gen_helper_verr(cpu_env, cpu_T[0]);
7332 } else {
7333 gen_helper_verw(cpu_env, cpu_T[0]);
7334 }
3ca51d07 7335 set_cc_op(s, CC_OP_EFLAGS);
f115e911 7336 break;
2c0262af
FB
7337 default:
7338 goto illegal_op;
7339 }
7340 break;
7341 case 0x101:
0af10c86 7342 modrm = cpu_ldub_code(env, s->pc++);
2c0262af
FB
7343 mod = (modrm >> 6) & 3;
7344 op = (modrm >> 3) & 7;
3d7374c5 7345 rm = modrm & 7;
2c0262af
FB
7346 switch(op) {
7347 case 0: /* sgdt */
2c0262af
FB
7348 if (mod == 3)
7349 goto illegal_op;
872929aa 7350 gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ);
4eeb3939 7351 gen_lea_modrm(env, s, modrm);
651ba608 7352 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.limit));
fd8ca9f6 7353 gen_op_st_v(s, MO_16, cpu_T[0], cpu_A0);
aba9d61e 7354 gen_add_A0_im(s, 2);
651ba608 7355 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.base));
f0706f0c
RH
7356 if (s->dflag == 0) {
7357 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffffff);
7358 }
fd8ca9f6 7359 gen_op_st_v(s, CODE64(s) + MO_32, cpu_T[0], cpu_A0);
2c0262af 7360 break;
3d7374c5
FB
7361 case 1:
7362 if (mod == 3) {
7363 switch (rm) {
7364 case 0: /* monitor */
7365 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
7366 s->cpl != 0)
7367 goto illegal_op;
773cdfcc 7368 gen_update_cc_op(s);
3d7374c5
FB
7369 gen_jmp_im(pc_start - s->cs_base);
7370#ifdef TARGET_X86_64
7371 if (s->aflag == 2) {
bbf662ee 7372 gen_op_movq_A0_reg(R_EAX);
5fafdf24 7373 } else
3d7374c5
FB
7374#endif
7375 {
bbf662ee 7376 gen_op_movl_A0_reg(R_EAX);
3d7374c5
FB
7377 if (s->aflag == 0)
7378 gen_op_andl_A0_ffff();
7379 }
7380 gen_add_A0_ds_seg(s);
4a7443be 7381 gen_helper_monitor(cpu_env, cpu_A0);
3d7374c5
FB
7382 break;
7383 case 1: /* mwait */
7384 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
7385 s->cpl != 0)
7386 goto illegal_op;
728d803b 7387 gen_update_cc_op(s);
94451178 7388 gen_jmp_im(pc_start - s->cs_base);
4a7443be 7389 gen_helper_mwait(cpu_env, tcg_const_i32(s->pc - pc_start));
3d7374c5
FB
7390 gen_eob(s);
7391 break;
a9321a4d
PA
7392 case 2: /* clac */
7393 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) ||
7394 s->cpl != 0) {
7395 goto illegal_op;
7396 }
7397 gen_helper_clac(cpu_env);
7398 gen_jmp_im(s->pc - s->cs_base);
7399 gen_eob(s);
7400 break;
7401 case 3: /* stac */
7402 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) ||
7403 s->cpl != 0) {
7404 goto illegal_op;
7405 }
7406 gen_helper_stac(cpu_env);
7407 gen_jmp_im(s->pc - s->cs_base);
7408 gen_eob(s);
7409 break;
3d7374c5
FB
7410 default:
7411 goto illegal_op;
7412 }
7413 } else { /* sidt */
872929aa 7414 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ);
4eeb3939 7415 gen_lea_modrm(env, s, modrm);
651ba608 7416 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.limit));
fd8ca9f6 7417 gen_op_st_v(s, MO_16, cpu_T[0], cpu_A0);
3d7374c5 7418 gen_add_A0_im(s, 2);
651ba608 7419 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.base));
f0706f0c
RH
7420 if (s->dflag == 0) {
7421 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffffff);
7422 }
fd8ca9f6 7423 gen_op_st_v(s, CODE64(s) + MO_32, cpu_T[0], cpu_A0);
3d7374c5
FB
7424 }
7425 break;
2c0262af
FB
7426 case 2: /* lgdt */
7427 case 3: /* lidt */
0573fbfc 7428 if (mod == 3) {
773cdfcc 7429 gen_update_cc_op(s);
872929aa 7430 gen_jmp_im(pc_start - s->cs_base);
0573fbfc
TS
7431 switch(rm) {
7432 case 0: /* VMRUN */
872929aa
FB
7433 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7434 goto illegal_op;
7435 if (s->cpl != 0) {
7436 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
0573fbfc 7437 break;
872929aa 7438 } else {
052e80d5 7439 gen_helper_vmrun(cpu_env, tcg_const_i32(s->aflag),
a7812ae4 7440 tcg_const_i32(s->pc - pc_start));
db620f46 7441 tcg_gen_exit_tb(0);
5779406a 7442 s->is_jmp = DISAS_TB_JUMP;
872929aa 7443 }
0573fbfc
TS
7444 break;
7445 case 1: /* VMMCALL */
872929aa
FB
7446 if (!(s->flags & HF_SVME_MASK))
7447 goto illegal_op;
052e80d5 7448 gen_helper_vmmcall(cpu_env);
0573fbfc
TS
7449 break;
7450 case 2: /* VMLOAD */
872929aa
FB
7451 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7452 goto illegal_op;
7453 if (s->cpl != 0) {
7454 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7455 break;
7456 } else {
052e80d5 7457 gen_helper_vmload(cpu_env, tcg_const_i32(s->aflag));
872929aa 7458 }
0573fbfc
TS
7459 break;
7460 case 3: /* VMSAVE */
872929aa
FB
7461 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7462 goto illegal_op;
7463 if (s->cpl != 0) {
7464 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7465 break;
7466 } else {
052e80d5 7467 gen_helper_vmsave(cpu_env, tcg_const_i32(s->aflag));
872929aa 7468 }
0573fbfc
TS
7469 break;
7470 case 4: /* STGI */
872929aa
FB
7471 if ((!(s->flags & HF_SVME_MASK) &&
7472 !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
7473 !s->pe)
7474 goto illegal_op;
7475 if (s->cpl != 0) {
7476 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7477 break;
7478 } else {
052e80d5 7479 gen_helper_stgi(cpu_env);
872929aa 7480 }
0573fbfc
TS
7481 break;
7482 case 5: /* CLGI */
872929aa
FB
7483 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7484 goto illegal_op;
7485 if (s->cpl != 0) {
7486 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7487 break;
7488 } else {
052e80d5 7489 gen_helper_clgi(cpu_env);
872929aa 7490 }
0573fbfc
TS
7491 break;
7492 case 6: /* SKINIT */
872929aa
FB
7493 if ((!(s->flags & HF_SVME_MASK) &&
7494 !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
7495 !s->pe)
7496 goto illegal_op;
052e80d5 7497 gen_helper_skinit(cpu_env);
0573fbfc
TS
7498 break;
7499 case 7: /* INVLPGA */
872929aa
FB
7500 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7501 goto illegal_op;
7502 if (s->cpl != 0) {
7503 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7504 break;
7505 } else {
052e80d5 7506 gen_helper_invlpga(cpu_env, tcg_const_i32(s->aflag));
872929aa 7507 }
0573fbfc
TS
7508 break;
7509 default:
7510 goto illegal_op;
7511 }
7512 } else if (s->cpl != 0) {
2c0262af
FB
7513 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7514 } else {
872929aa
FB
7515 gen_svm_check_intercept(s, pc_start,
7516 op==2 ? SVM_EXIT_GDTR_WRITE : SVM_EXIT_IDTR_WRITE);
4eeb3939 7517 gen_lea_modrm(env, s, modrm);
0f712e10 7518 gen_op_ld_v(s, MO_16, cpu_T[1], cpu_A0);
aba9d61e 7519 gen_add_A0_im(s, 2);
909be183 7520 gen_op_ld_v(s, CODE64(s) + MO_32, cpu_T[0], cpu_A0);
f0706f0c
RH
7521 if (s->dflag == 0) {
7522 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffffff);
7523 }
2c0262af 7524 if (op == 2) {
651ba608
FB
7525 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,gdt.base));
7526 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,gdt.limit));
2c0262af 7527 } else {
651ba608
FB
7528 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,idt.base));
7529 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,idt.limit));
2c0262af
FB
7530 }
7531 }
7532 break;
7533 case 4: /* smsw */
872929aa 7534 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0);
e2542fe2 7535#if defined TARGET_X86_64 && defined HOST_WORDS_BIGENDIAN
f60d2728 7536 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]) + 4);
7537#else
651ba608 7538 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]));
f60d2728 7539#endif
4ba9938c 7540 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 1);
2c0262af
FB
7541 break;
7542 case 6: /* lmsw */
7543 if (s->cpl != 0) {
7544 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7545 } else {
872929aa 7546 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
4ba9938c 7547 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
4a7443be 7548 gen_helper_lmsw(cpu_env, cpu_T[0]);
14ce26e7 7549 gen_jmp_im(s->pc - s->cs_base);
d71b9a8b 7550 gen_eob(s);
2c0262af
FB
7551 }
7552 break;
1b050077
AP
7553 case 7:
7554 if (mod != 3) { /* invlpg */
7555 if (s->cpl != 0) {
7556 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7557 } else {
773cdfcc 7558 gen_update_cc_op(s);
1b050077 7559 gen_jmp_im(pc_start - s->cs_base);
4eeb3939 7560 gen_lea_modrm(env, s, modrm);
4a7443be 7561 gen_helper_invlpg(cpu_env, cpu_A0);
1b050077
AP
7562 gen_jmp_im(s->pc - s->cs_base);
7563 gen_eob(s);
7564 }
2c0262af 7565 } else {
1b050077
AP
7566 switch (rm) {
7567 case 0: /* swapgs */
14ce26e7 7568#ifdef TARGET_X86_64
1b050077
AP
7569 if (CODE64(s)) {
7570 if (s->cpl != 0) {
7571 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7572 } else {
7573 tcg_gen_ld_tl(cpu_T[0], cpu_env,
7574 offsetof(CPUX86State,segs[R_GS].base));
7575 tcg_gen_ld_tl(cpu_T[1], cpu_env,
7576 offsetof(CPUX86State,kernelgsbase));
7577 tcg_gen_st_tl(cpu_T[1], cpu_env,
7578 offsetof(CPUX86State,segs[R_GS].base));
7579 tcg_gen_st_tl(cpu_T[0], cpu_env,
7580 offsetof(CPUX86State,kernelgsbase));
7581 }
5fafdf24 7582 } else
14ce26e7
FB
7583#endif
7584 {
7585 goto illegal_op;
7586 }
1b050077
AP
7587 break;
7588 case 1: /* rdtscp */
7589 if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP))
7590 goto illegal_op;
773cdfcc 7591 gen_update_cc_op(s);
9575cb94 7592 gen_jmp_im(pc_start - s->cs_base);
1b050077
AP
7593 if (use_icount)
7594 gen_io_start();
4a7443be 7595 gen_helper_rdtscp(cpu_env);
1b050077
AP
7596 if (use_icount) {
7597 gen_io_end();
7598 gen_jmp(s, s->pc - s->cs_base);
7599 }
7600 break;
7601 default:
7602 goto illegal_op;
14ce26e7 7603 }
2c0262af
FB
7604 }
7605 break;
7606 default:
7607 goto illegal_op;
7608 }
7609 break;
3415a4dd
FB
7610 case 0x108: /* invd */
7611 case 0x109: /* wbinvd */
7612 if (s->cpl != 0) {
7613 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7614 } else {
872929aa 7615 gen_svm_check_intercept(s, pc_start, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD);
3415a4dd
FB
7616 /* nothing to do */
7617 }
7618 break;
14ce26e7
FB
7619 case 0x63: /* arpl or movslS (x86_64) */
7620#ifdef TARGET_X86_64
7621 if (CODE64(s)) {
7622 int d_ot;
7623 /* d_ot is the size of destination */
4ba9938c 7624 d_ot = dflag + MO_16;
14ce26e7 7625
0af10c86 7626 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7
FB
7627 reg = ((modrm >> 3) & 7) | rex_r;
7628 mod = (modrm >> 6) & 3;
7629 rm = (modrm & 7) | REX_B(s);
3b46e624 7630
14ce26e7 7631 if (mod == 3) {
4ba9938c 7632 gen_op_mov_TN_reg(MO_32, 0, rm);
14ce26e7 7633 /* sign extend */
4ba9938c 7634 if (d_ot == MO_64) {
e108dd01 7635 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4ba9938c 7636 }
57fec1fe 7637 gen_op_mov_reg_T0(d_ot, reg);
14ce26e7 7638 } else {
4eeb3939 7639 gen_lea_modrm(env, s, modrm);
4b1fe067 7640 gen_op_ld_v(s, MO_32 | MO_SIGN, cpu_T[0], cpu_A0);
57fec1fe 7641 gen_op_mov_reg_T0(d_ot, reg);
14ce26e7 7642 }
5fafdf24 7643 } else
14ce26e7
FB
7644#endif
7645 {
3bd7da9e 7646 int label1;
49d9fdcc 7647 TCGv t0, t1, t2, a0;
1e4840bf 7648
14ce26e7
FB
7649 if (!s->pe || s->vm86)
7650 goto illegal_op;
a7812ae4
PB
7651 t0 = tcg_temp_local_new();
7652 t1 = tcg_temp_local_new();
7653 t2 = tcg_temp_local_new();
4ba9938c 7654 ot = MO_16;
0af10c86 7655 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7
FB
7656 reg = (modrm >> 3) & 7;
7657 mod = (modrm >> 6) & 3;
7658 rm = modrm & 7;
7659 if (mod != 3) {
4eeb3939 7660 gen_lea_modrm(env, s, modrm);
323d1876 7661 gen_op_ld_v(s, ot, t0, cpu_A0);
49d9fdcc
LD
7662 a0 = tcg_temp_local_new();
7663 tcg_gen_mov_tl(a0, cpu_A0);
14ce26e7 7664 } else {
1e4840bf 7665 gen_op_mov_v_reg(ot, t0, rm);
49d9fdcc 7666 TCGV_UNUSED(a0);
14ce26e7 7667 }
1e4840bf
FB
7668 gen_op_mov_v_reg(ot, t1, reg);
7669 tcg_gen_andi_tl(cpu_tmp0, t0, 3);
7670 tcg_gen_andi_tl(t1, t1, 3);
7671 tcg_gen_movi_tl(t2, 0);
3bd7da9e 7672 label1 = gen_new_label();
1e4840bf
FB
7673 tcg_gen_brcond_tl(TCG_COND_GE, cpu_tmp0, t1, label1);
7674 tcg_gen_andi_tl(t0, t0, ~3);
7675 tcg_gen_or_tl(t0, t0, t1);
7676 tcg_gen_movi_tl(t2, CC_Z);
3bd7da9e 7677 gen_set_label(label1);
14ce26e7 7678 if (mod != 3) {
323d1876 7679 gen_op_st_v(s, ot, t0, a0);
49d9fdcc
LD
7680 tcg_temp_free(a0);
7681 } else {
1e4840bf 7682 gen_op_mov_reg_v(ot, rm, t0);
14ce26e7 7683 }
d229edce 7684 gen_compute_eflags(s);
3bd7da9e 7685 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z);
1e4840bf 7686 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2);
1e4840bf
FB
7687 tcg_temp_free(t0);
7688 tcg_temp_free(t1);
7689 tcg_temp_free(t2);
f115e911 7690 }
f115e911 7691 break;
2c0262af
FB
7692 case 0x102: /* lar */
7693 case 0x103: /* lsl */
cec6843e
FB
7694 {
7695 int label1;
1e4840bf 7696 TCGv t0;
cec6843e
FB
7697 if (!s->pe || s->vm86)
7698 goto illegal_op;
4ba9938c 7699 ot = dflag ? MO_32 : MO_16;
0af10c86 7700 modrm = cpu_ldub_code(env, s->pc++);
cec6843e 7701 reg = ((modrm >> 3) & 7) | rex_r;
4ba9938c 7702 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
a7812ae4 7703 t0 = tcg_temp_local_new();
773cdfcc 7704 gen_update_cc_op(s);
2999a0b2
BS
7705 if (b == 0x102) {
7706 gen_helper_lar(t0, cpu_env, cpu_T[0]);
7707 } else {
7708 gen_helper_lsl(t0, cpu_env, cpu_T[0]);
7709 }
cec6843e
FB
7710 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_src, CC_Z);
7711 label1 = gen_new_label();
cb63669a 7712 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
1e4840bf 7713 gen_op_mov_reg_v(ot, reg, t0);
cec6843e 7714 gen_set_label(label1);
3ca51d07 7715 set_cc_op(s, CC_OP_EFLAGS);
1e4840bf 7716 tcg_temp_free(t0);
cec6843e 7717 }
2c0262af
FB
7718 break;
7719 case 0x118:
0af10c86 7720 modrm = cpu_ldub_code(env, s->pc++);
2c0262af
FB
7721 mod = (modrm >> 6) & 3;
7722 op = (modrm >> 3) & 7;
7723 switch(op) {
7724 case 0: /* prefetchnta */
7725 case 1: /* prefetchnt0 */
7726 case 2: /* prefetchnt0 */
7727 case 3: /* prefetchnt0 */
7728 if (mod == 3)
7729 goto illegal_op;
4eeb3939 7730 gen_lea_modrm(env, s, modrm);
2c0262af
FB
7731 /* nothing more to do */
7732 break;
e17a36ce 7733 default: /* nop (multi byte) */
0af10c86 7734 gen_nop_modrm(env, s, modrm);
e17a36ce 7735 break;
2c0262af
FB
7736 }
7737 break;
e17a36ce 7738 case 0x119 ... 0x11f: /* nop (multi byte) */
0af10c86
BS
7739 modrm = cpu_ldub_code(env, s->pc++);
7740 gen_nop_modrm(env, s, modrm);
e17a36ce 7741 break;
2c0262af
FB
7742 case 0x120: /* mov reg, crN */
7743 case 0x122: /* mov crN, reg */
7744 if (s->cpl != 0) {
7745 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7746 } else {
0af10c86 7747 modrm = cpu_ldub_code(env, s->pc++);
5c73b757
MO
7748 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7749 * AMD documentation (24594.pdf) and testing of
7750 * intel 386 and 486 processors all show that the mod bits
7751 * are assumed to be 1's, regardless of actual values.
7752 */
14ce26e7
FB
7753 rm = (modrm & 7) | REX_B(s);
7754 reg = ((modrm >> 3) & 7) | rex_r;
7755 if (CODE64(s))
4ba9938c 7756 ot = MO_64;
14ce26e7 7757 else
4ba9938c 7758 ot = MO_32;
ccd59d09
AP
7759 if ((prefixes & PREFIX_LOCK) && (reg == 0) &&
7760 (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) {
7761 reg = 8;
7762 }
2c0262af
FB
7763 switch(reg) {
7764 case 0:
7765 case 2:
7766 case 3:
7767 case 4:
9230e66e 7768 case 8:
773cdfcc 7769 gen_update_cc_op(s);
872929aa 7770 gen_jmp_im(pc_start - s->cs_base);
2c0262af 7771 if (b & 2) {
57fec1fe 7772 gen_op_mov_TN_reg(ot, 0, rm);
4a7443be
BS
7773 gen_helper_write_crN(cpu_env, tcg_const_i32(reg),
7774 cpu_T[0]);
14ce26e7 7775 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
7776 gen_eob(s);
7777 } else {
4a7443be 7778 gen_helper_read_crN(cpu_T[0], cpu_env, tcg_const_i32(reg));
57fec1fe 7779 gen_op_mov_reg_T0(ot, rm);
2c0262af
FB
7780 }
7781 break;
7782 default:
7783 goto illegal_op;
7784 }
7785 }
7786 break;
7787 case 0x121: /* mov reg, drN */
7788 case 0x123: /* mov drN, reg */
7789 if (s->cpl != 0) {
7790 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7791 } else {
0af10c86 7792 modrm = cpu_ldub_code(env, s->pc++);
5c73b757
MO
7793 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7794 * AMD documentation (24594.pdf) and testing of
7795 * intel 386 and 486 processors all show that the mod bits
7796 * are assumed to be 1's, regardless of actual values.
7797 */
14ce26e7
FB
7798 rm = (modrm & 7) | REX_B(s);
7799 reg = ((modrm >> 3) & 7) | rex_r;
7800 if (CODE64(s))
4ba9938c 7801 ot = MO_64;
14ce26e7 7802 else
4ba9938c 7803 ot = MO_32;
2c0262af 7804 /* XXX: do it dynamically with CR4.DE bit */
14ce26e7 7805 if (reg == 4 || reg == 5 || reg >= 8)
2c0262af
FB
7806 goto illegal_op;
7807 if (b & 2) {
0573fbfc 7808 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg);
57fec1fe 7809 gen_op_mov_TN_reg(ot, 0, rm);
4a7443be 7810 gen_helper_movl_drN_T0(cpu_env, tcg_const_i32(reg), cpu_T[0]);
14ce26e7 7811 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
7812 gen_eob(s);
7813 } else {
0573fbfc 7814 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg);
651ba608 7815 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,dr[reg]));
57fec1fe 7816 gen_op_mov_reg_T0(ot, rm);
2c0262af
FB
7817 }
7818 }
7819 break;
7820 case 0x106: /* clts */
7821 if (s->cpl != 0) {
7822 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7823 } else {
0573fbfc 7824 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
f0967a1a 7825 gen_helper_clts(cpu_env);
7eee2a50 7826 /* abort block because static cpu state changed */
14ce26e7 7827 gen_jmp_im(s->pc - s->cs_base);
7eee2a50 7828 gen_eob(s);
2c0262af
FB
7829 }
7830 break;
222a3336 7831 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
664e0f19
FB
7832 case 0x1c3: /* MOVNTI reg, mem */
7833 if (!(s->cpuid_features & CPUID_SSE2))
14ce26e7 7834 goto illegal_op;
4ba9938c 7835 ot = s->dflag == 2 ? MO_64 : MO_32;
0af10c86 7836 modrm = cpu_ldub_code(env, s->pc++);
664e0f19
FB
7837 mod = (modrm >> 6) & 3;
7838 if (mod == 3)
7839 goto illegal_op;
7840 reg = ((modrm >> 3) & 7) | rex_r;
7841 /* generate a generic store */
0af10c86 7842 gen_ldst_modrm(env, s, modrm, ot, reg, 1);
14ce26e7 7843 break;
664e0f19 7844 case 0x1ae:
0af10c86 7845 modrm = cpu_ldub_code(env, s->pc++);
664e0f19
FB
7846 mod = (modrm >> 6) & 3;
7847 op = (modrm >> 3) & 7;
7848 switch(op) {
7849 case 0: /* fxsave */
5fafdf24 7850 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
09d85fb8 7851 (s->prefix & PREFIX_LOCK))
14ce26e7 7852 goto illegal_op;
09d85fb8 7853 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
0fd14b72
FB
7854 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7855 break;
7856 }
4eeb3939 7857 gen_lea_modrm(env, s, modrm);
773cdfcc 7858 gen_update_cc_op(s);
19e6c4b8 7859 gen_jmp_im(pc_start - s->cs_base);
d3eb5eae 7860 gen_helper_fxsave(cpu_env, cpu_A0, tcg_const_i32((s->dflag == 2)));
664e0f19
FB
7861 break;
7862 case 1: /* fxrstor */
5fafdf24 7863 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
09d85fb8 7864 (s->prefix & PREFIX_LOCK))
14ce26e7 7865 goto illegal_op;
09d85fb8 7866 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
0fd14b72
FB
7867 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7868 break;
7869 }
4eeb3939 7870 gen_lea_modrm(env, s, modrm);
773cdfcc 7871 gen_update_cc_op(s);
19e6c4b8 7872 gen_jmp_im(pc_start - s->cs_base);
d3eb5eae
BS
7873 gen_helper_fxrstor(cpu_env, cpu_A0,
7874 tcg_const_i32((s->dflag == 2)));
664e0f19
FB
7875 break;
7876 case 2: /* ldmxcsr */
7877 case 3: /* stmxcsr */
7878 if (s->flags & HF_TS_MASK) {
7879 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7880 break;
14ce26e7 7881 }
664e0f19
FB
7882 if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK) ||
7883 mod == 3)
14ce26e7 7884 goto illegal_op;
4eeb3939 7885 gen_lea_modrm(env, s, modrm);
664e0f19 7886 if (op == 2) {
80b02013
RH
7887 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
7888 s->mem_index, MO_LEUL);
d3eb5eae 7889 gen_helper_ldmxcsr(cpu_env, cpu_tmp2_i32);
14ce26e7 7890 } else {
651ba608 7891 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, mxcsr));
fd8ca9f6 7892 gen_op_st_v(s, MO_32, cpu_T[0], cpu_A0);
14ce26e7 7893 }
664e0f19
FB
7894 break;
7895 case 5: /* lfence */
7896 case 6: /* mfence */
8001c294 7897 if ((modrm & 0xc7) != 0xc0 || !(s->cpuid_features & CPUID_SSE2))
664e0f19
FB
7898 goto illegal_op;
7899 break;
8f091a59
FB
7900 case 7: /* sfence / clflush */
7901 if ((modrm & 0xc7) == 0xc0) {
7902 /* sfence */
a35f3ec7 7903 /* XXX: also check for cpuid_ext2_features & CPUID_EXT2_EMMX */
8f091a59
FB
7904 if (!(s->cpuid_features & CPUID_SSE))
7905 goto illegal_op;
7906 } else {
7907 /* clflush */
7908 if (!(s->cpuid_features & CPUID_CLFLUSH))
7909 goto illegal_op;
4eeb3939 7910 gen_lea_modrm(env, s, modrm);
8f091a59
FB
7911 }
7912 break;
664e0f19 7913 default:
14ce26e7
FB
7914 goto illegal_op;
7915 }
7916 break;
a35f3ec7 7917 case 0x10d: /* 3DNow! prefetch(w) */
0af10c86 7918 modrm = cpu_ldub_code(env, s->pc++);
a35f3ec7
AJ
7919 mod = (modrm >> 6) & 3;
7920 if (mod == 3)
7921 goto illegal_op;
4eeb3939 7922 gen_lea_modrm(env, s, modrm);
8f091a59
FB
7923 /* ignore for now */
7924 break;
3b21e03e 7925 case 0x1aa: /* rsm */
872929aa 7926 gen_svm_check_intercept(s, pc_start, SVM_EXIT_RSM);
3b21e03e
FB
7927 if (!(s->flags & HF_SMM_MASK))
7928 goto illegal_op;
728d803b 7929 gen_update_cc_op(s);
3b21e03e 7930 gen_jmp_im(s->pc - s->cs_base);
608badfc 7931 gen_helper_rsm(cpu_env);
3b21e03e
FB
7932 gen_eob(s);
7933 break;
222a3336
AZ
7934 case 0x1b8: /* SSE4.2 popcnt */
7935 if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) !=
7936 PREFIX_REPZ)
7937 goto illegal_op;
7938 if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
7939 goto illegal_op;
7940
0af10c86 7941 modrm = cpu_ldub_code(env, s->pc++);
8b4a3df8 7942 reg = ((modrm >> 3) & 7) | rex_r;
222a3336
AZ
7943
7944 if (s->prefix & PREFIX_DATA)
4ba9938c 7945 ot = MO_16;
222a3336 7946 else if (s->dflag != 2)
4ba9938c 7947 ot = MO_32;
222a3336 7948 else
4ba9938c 7949 ot = MO_64;
222a3336 7950
0af10c86 7951 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
d3eb5eae 7952 gen_helper_popcnt(cpu_T[0], cpu_env, cpu_T[0], tcg_const_i32(ot));
222a3336 7953 gen_op_mov_reg_T0(ot, reg);
fdb0d09d 7954
3ca51d07 7955 set_cc_op(s, CC_OP_EFLAGS);
222a3336 7956 break;
a35f3ec7
AJ
7957 case 0x10e ... 0x10f:
7958 /* 3DNow! instructions, ignore prefixes */
7959 s->prefix &= ~(PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA);
664e0f19
FB
7960 case 0x110 ... 0x117:
7961 case 0x128 ... 0x12f:
4242b1bd 7962 case 0x138 ... 0x13a:
d9f4bb27 7963 case 0x150 ... 0x179:
664e0f19
FB
7964 case 0x17c ... 0x17f:
7965 case 0x1c2:
7966 case 0x1c4 ... 0x1c6:
7967 case 0x1d0 ... 0x1fe:
0af10c86 7968 gen_sse(env, s, b, pc_start, rex_r);
664e0f19 7969 break;
2c0262af
FB
7970 default:
7971 goto illegal_op;
7972 }
7973 /* lock generation */
7974 if (s->prefix & PREFIX_LOCK)
a7812ae4 7975 gen_helper_unlock();
2c0262af
FB
7976 return s->pc;
7977 illegal_op:
ab1f142b 7978 if (s->prefix & PREFIX_LOCK)
a7812ae4 7979 gen_helper_unlock();
2c0262af
FB
7980 /* XXX: ensure that no lock was generated */
7981 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
7982 return s->pc;
7983}
7984
2c0262af
FB
7985void optimize_flags_init(void)
7986{
a7812ae4
PB
7987 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
7988 cpu_cc_op = tcg_global_mem_new_i32(TCG_AREG0,
317ac620 7989 offsetof(CPUX86State, cc_op), "cc_op");
317ac620 7990 cpu_cc_dst = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_dst),
a7812ae4 7991 "cc_dst");
a3251186
RH
7992 cpu_cc_src = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_src),
7993 "cc_src");
988c3eb0
RH
7994 cpu_cc_src2 = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_src2),
7995 "cc_src2");
437a88a5 7996
cc739bb0
LD
7997#ifdef TARGET_X86_64
7998 cpu_regs[R_EAX] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 7999 offsetof(CPUX86State, regs[R_EAX]), "rax");
cc739bb0 8000 cpu_regs[R_ECX] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8001 offsetof(CPUX86State, regs[R_ECX]), "rcx");
cc739bb0 8002 cpu_regs[R_EDX] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8003 offsetof(CPUX86State, regs[R_EDX]), "rdx");
cc739bb0 8004 cpu_regs[R_EBX] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8005 offsetof(CPUX86State, regs[R_EBX]), "rbx");
cc739bb0 8006 cpu_regs[R_ESP] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8007 offsetof(CPUX86State, regs[R_ESP]), "rsp");
cc739bb0 8008 cpu_regs[R_EBP] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8009 offsetof(CPUX86State, regs[R_EBP]), "rbp");
cc739bb0 8010 cpu_regs[R_ESI] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8011 offsetof(CPUX86State, regs[R_ESI]), "rsi");
cc739bb0 8012 cpu_regs[R_EDI] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8013 offsetof(CPUX86State, regs[R_EDI]), "rdi");
cc739bb0 8014 cpu_regs[8] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8015 offsetof(CPUX86State, regs[8]), "r8");
cc739bb0 8016 cpu_regs[9] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8017 offsetof(CPUX86State, regs[9]), "r9");
cc739bb0 8018 cpu_regs[10] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8019 offsetof(CPUX86State, regs[10]), "r10");
cc739bb0 8020 cpu_regs[11] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8021 offsetof(CPUX86State, regs[11]), "r11");
cc739bb0 8022 cpu_regs[12] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8023 offsetof(CPUX86State, regs[12]), "r12");
cc739bb0 8024 cpu_regs[13] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8025 offsetof(CPUX86State, regs[13]), "r13");
cc739bb0 8026 cpu_regs[14] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8027 offsetof(CPUX86State, regs[14]), "r14");
cc739bb0 8028 cpu_regs[15] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8029 offsetof(CPUX86State, regs[15]), "r15");
cc739bb0
LD
8030#else
8031 cpu_regs[R_EAX] = tcg_global_mem_new_i32(TCG_AREG0,
317ac620 8032 offsetof(CPUX86State, regs[R_EAX]), "eax");
cc739bb0 8033 cpu_regs[R_ECX] = tcg_global_mem_new_i32(TCG_AREG0,
317ac620 8034 offsetof(CPUX86State, regs[R_ECX]), "ecx");
cc739bb0 8035 cpu_regs[R_EDX] = tcg_global_mem_new_i32(TCG_AREG0,
317ac620 8036 offsetof(CPUX86State, regs[R_EDX]), "edx");
cc739bb0 8037 cpu_regs[R_EBX] = tcg_global_mem_new_i32(TCG_AREG0,
317ac620 8038 offsetof(CPUX86State, regs[R_EBX]), "ebx");
cc739bb0 8039 cpu_regs[R_ESP] = tcg_global_mem_new_i32(TCG_AREG0,
317ac620 8040 offsetof(CPUX86State, regs[R_ESP]), "esp");
cc739bb0 8041 cpu_regs[R_EBP] = tcg_global_mem_new_i32(TCG_AREG0,
317ac620 8042 offsetof(CPUX86State, regs[R_EBP]), "ebp");
cc739bb0 8043 cpu_regs[R_ESI] = tcg_global_mem_new_i32(TCG_AREG0,
317ac620 8044 offsetof(CPUX86State, regs[R_ESI]), "esi");
cc739bb0 8045 cpu_regs[R_EDI] = tcg_global_mem_new_i32(TCG_AREG0,
317ac620 8046 offsetof(CPUX86State, regs[R_EDI]), "edi");
cc739bb0 8047#endif
2c0262af
FB
8048}
8049
8050/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
8051 basic block 'tb'. If search_pc is TRUE, also generate PC
8052 information for each intermediate instruction. */
467215c2 8053static inline void gen_intermediate_code_internal(X86CPU *cpu,
2cfc5f17 8054 TranslationBlock *tb,
467215c2 8055 bool search_pc)
2c0262af 8056{
ed2803da 8057 CPUState *cs = CPU(cpu);
467215c2 8058 CPUX86State *env = &cpu->env;
2c0262af 8059 DisasContext dc1, *dc = &dc1;
14ce26e7 8060 target_ulong pc_ptr;
2c0262af 8061 uint16_t *gen_opc_end;
a1d1bb31 8062 CPUBreakpoint *bp;
7f5b7d3e 8063 int j, lj;
c068688b 8064 uint64_t flags;
14ce26e7
FB
8065 target_ulong pc_start;
8066 target_ulong cs_base;
2e70f6ef
PB
8067 int num_insns;
8068 int max_insns;
3b46e624 8069
2c0262af 8070 /* generate intermediate code */
14ce26e7
FB
8071 pc_start = tb->pc;
8072 cs_base = tb->cs_base;
2c0262af 8073 flags = tb->flags;
3a1d9b8b 8074
4f31916f 8075 dc->pe = (flags >> HF_PE_SHIFT) & 1;
2c0262af
FB
8076 dc->code32 = (flags >> HF_CS32_SHIFT) & 1;
8077 dc->ss32 = (flags >> HF_SS32_SHIFT) & 1;
8078 dc->addseg = (flags >> HF_ADDSEG_SHIFT) & 1;
8079 dc->f_st = 0;
8080 dc->vm86 = (flags >> VM_SHIFT) & 1;
8081 dc->cpl = (flags >> HF_CPL_SHIFT) & 3;
8082 dc->iopl = (flags >> IOPL_SHIFT) & 3;
8083 dc->tf = (flags >> TF_SHIFT) & 1;
ed2803da 8084 dc->singlestep_enabled = cs->singlestep_enabled;
2c0262af 8085 dc->cc_op = CC_OP_DYNAMIC;
e207582f 8086 dc->cc_op_dirty = false;
2c0262af
FB
8087 dc->cs_base = cs_base;
8088 dc->tb = tb;
8089 dc->popl_esp_hack = 0;
8090 /* select memory access functions */
8091 dc->mem_index = 0;
8092 if (flags & HF_SOFTMMU_MASK) {
5c42a7cd 8093 dc->mem_index = cpu_mmu_index(env);
2c0262af 8094 }
0514ef2f
EH
8095 dc->cpuid_features = env->features[FEAT_1_EDX];
8096 dc->cpuid_ext_features = env->features[FEAT_1_ECX];
8097 dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX];
8098 dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX];
8099 dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX];
14ce26e7
FB
8100#ifdef TARGET_X86_64
8101 dc->lma = (flags >> HF_LMA_SHIFT) & 1;
8102 dc->code64 = (flags >> HF_CS64_SHIFT) & 1;
8103#endif
7eee2a50 8104 dc->flags = flags;
ed2803da 8105 dc->jmp_opt = !(dc->tf || cs->singlestep_enabled ||
a2cc3b24 8106 (flags & HF_INHIBIT_IRQ_MASK)
415fa2ea 8107#ifndef CONFIG_SOFTMMU
2c0262af
FB
8108 || (flags & HF_SOFTMMU_MASK)
8109#endif
8110 );
4f31916f
FB
8111#if 0
8112 /* check addseg logic */
dc196a57 8113 if (!dc->addseg && (dc->vm86 || !dc->pe || !dc->code32))
4f31916f
FB
8114 printf("ERROR addseg\n");
8115#endif
8116
a7812ae4
PB
8117 cpu_T[0] = tcg_temp_new();
8118 cpu_T[1] = tcg_temp_new();
8119 cpu_A0 = tcg_temp_new();
a7812ae4
PB
8120
8121 cpu_tmp0 = tcg_temp_new();
8122 cpu_tmp1_i64 = tcg_temp_new_i64();
8123 cpu_tmp2_i32 = tcg_temp_new_i32();
8124 cpu_tmp3_i32 = tcg_temp_new_i32();
8125 cpu_tmp4 = tcg_temp_new();
a7812ae4
PB
8126 cpu_ptr0 = tcg_temp_new_ptr();
8127 cpu_ptr1 = tcg_temp_new_ptr();
a3251186 8128 cpu_cc_srcT = tcg_temp_local_new();
57fec1fe 8129
92414b31 8130 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
8131
8132 dc->is_jmp = DISAS_NEXT;
8133 pc_ptr = pc_start;
8134 lj = -1;
2e70f6ef
PB
8135 num_insns = 0;
8136 max_insns = tb->cflags & CF_COUNT_MASK;
8137 if (max_insns == 0)
8138 max_insns = CF_COUNT_MASK;
2c0262af 8139
806f352d 8140 gen_tb_start();
2c0262af 8141 for(;;) {
72cf2d4f
BS
8142 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
8143 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a2397807
JK
8144 if (bp->pc == pc_ptr &&
8145 !((bp->flags & BP_CPU) && (tb->flags & HF_RF_MASK))) {
2c0262af
FB
8146 gen_debug(dc, pc_ptr - dc->cs_base);
8147 break;
8148 }
8149 }
8150 }
8151 if (search_pc) {
92414b31 8152 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
2c0262af
FB
8153 if (lj < j) {
8154 lj++;
8155 while (lj < j)
ab1103de 8156 tcg_ctx.gen_opc_instr_start[lj++] = 0;
2c0262af 8157 }
25983cad 8158 tcg_ctx.gen_opc_pc[lj] = pc_ptr;
2c0262af 8159 gen_opc_cc_op[lj] = dc->cc_op;
ab1103de 8160 tcg_ctx.gen_opc_instr_start[lj] = 1;
c9c99c22 8161 tcg_ctx.gen_opc_icount[lj] = num_insns;
2c0262af 8162 }
2e70f6ef
PB
8163 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
8164 gen_io_start();
8165
0af10c86 8166 pc_ptr = disas_insn(env, dc, pc_ptr);
2e70f6ef 8167 num_insns++;
2c0262af
FB
8168 /* stop translation if indicated */
8169 if (dc->is_jmp)
8170 break;
8171 /* if single step mode, we generate only one instruction and
8172 generate an exception */
a2cc3b24
FB
8173 /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
8174 the flag and abort the translation to give the irqs a
8175 change to be happen */
5fafdf24 8176 if (dc->tf || dc->singlestep_enabled ||
2e70f6ef 8177 (flags & HF_INHIBIT_IRQ_MASK)) {
14ce26e7 8178 gen_jmp_im(pc_ptr - dc->cs_base);
2c0262af
FB
8179 gen_eob(dc);
8180 break;
8181 }
8182 /* if too long translation, stop generation too */
efd7f486 8183 if (tcg_ctx.gen_opc_ptr >= gen_opc_end ||
2e70f6ef
PB
8184 (pc_ptr - pc_start) >= (TARGET_PAGE_SIZE - 32) ||
8185 num_insns >= max_insns) {
14ce26e7 8186 gen_jmp_im(pc_ptr - dc->cs_base);
2c0262af
FB
8187 gen_eob(dc);
8188 break;
8189 }
1b530a6d
AJ
8190 if (singlestep) {
8191 gen_jmp_im(pc_ptr - dc->cs_base);
8192 gen_eob(dc);
8193 break;
8194 }
2c0262af 8195 }
2e70f6ef
PB
8196 if (tb->cflags & CF_LAST_IO)
8197 gen_io_end();
806f352d 8198 gen_tb_end(tb, num_insns);
efd7f486 8199 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
2c0262af
FB
8200 /* we don't forget to fill the last values */
8201 if (search_pc) {
92414b31 8202 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
2c0262af
FB
8203 lj++;
8204 while (lj <= j)
ab1103de 8205 tcg_ctx.gen_opc_instr_start[lj++] = 0;
2c0262af 8206 }
3b46e624 8207
2c0262af 8208#ifdef DEBUG_DISAS
8fec2b8c 8209 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
14ce26e7 8210 int disas_flags;
93fcfe39
AL
8211 qemu_log("----------------\n");
8212 qemu_log("IN: %s\n", lookup_symbol(pc_start));
14ce26e7
FB
8213#ifdef TARGET_X86_64
8214 if (dc->code64)
8215 disas_flags = 2;
8216 else
8217#endif
8218 disas_flags = !dc->code32;
f4359b9f 8219 log_target_disas(env, pc_start, pc_ptr - pc_start, disas_flags);
93fcfe39 8220 qemu_log("\n");
2c0262af
FB
8221 }
8222#endif
8223
2e70f6ef 8224 if (!search_pc) {
2c0262af 8225 tb->size = pc_ptr - pc_start;
2e70f6ef
PB
8226 tb->icount = num_insns;
8227 }
2c0262af
FB
8228}
8229
317ac620 8230void gen_intermediate_code(CPUX86State *env, TranslationBlock *tb)
2c0262af 8231{
467215c2 8232 gen_intermediate_code_internal(x86_env_get_cpu(env), tb, false);
2c0262af
FB
8233}
8234
317ac620 8235void gen_intermediate_code_pc(CPUX86State *env, TranslationBlock *tb)
2c0262af 8236{
467215c2 8237 gen_intermediate_code_internal(x86_env_get_cpu(env), tb, true);
2c0262af
FB
8238}
8239
317ac620 8240void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb, int pc_pos)
d2856f1a
AJ
8241{
8242 int cc_op;
8243#ifdef DEBUG_DISAS
8fec2b8c 8244 if (qemu_loglevel_mask(CPU_LOG_TB_OP)) {
d2856f1a 8245 int i;
93fcfe39 8246 qemu_log("RESTORE:\n");
d2856f1a 8247 for(i = 0;i <= pc_pos; i++) {
ab1103de 8248 if (tcg_ctx.gen_opc_instr_start[i]) {
25983cad
EV
8249 qemu_log("0x%04x: " TARGET_FMT_lx "\n", i,
8250 tcg_ctx.gen_opc_pc[i]);
d2856f1a
AJ
8251 }
8252 }
e87b7cb0 8253 qemu_log("pc_pos=0x%x eip=" TARGET_FMT_lx " cs_base=%x\n",
25983cad 8254 pc_pos, tcg_ctx.gen_opc_pc[pc_pos] - tb->cs_base,
d2856f1a
AJ
8255 (uint32_t)tb->cs_base);
8256 }
8257#endif
25983cad 8258 env->eip = tcg_ctx.gen_opc_pc[pc_pos] - tb->cs_base;
d2856f1a
AJ
8259 cc_op = gen_opc_cc_op[pc_pos];
8260 if (cc_op != CC_OP_DYNAMIC)
8261 env->cc_op = cc_op;
8262}