]> git.proxmox.com Git - mirror_qemu.git/blame - target-i386/translate.c
cpu: Move singlestep_enabled field from CPU_COMMON to CPUState
[mirror_qemu.git] / target-i386 / translate.c
CommitLineData
2c0262af
FB
1/*
2 * i386 translation
5fafdf24 3 *
2c0262af
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
18 */
19#include <stdarg.h>
20#include <stdlib.h>
21#include <stdio.h>
22#include <string.h>
23#include <inttypes.h>
24#include <signal.h>
2c0262af 25
bec93d72 26#include "qemu/host-utils.h"
2c0262af 27#include "cpu.h"
76cad711 28#include "disas/disas.h"
57fec1fe 29#include "tcg-op.h"
2c0262af 30
a7812ae4
PB
31#include "helper.h"
32#define GEN_HELPER 1
33#include "helper.h"
34
2c0262af
FB
35#define PREFIX_REPZ 0x01
36#define PREFIX_REPNZ 0x02
37#define PREFIX_LOCK 0x04
38#define PREFIX_DATA 0x08
39#define PREFIX_ADR 0x10
701ed211 40#define PREFIX_VEX 0x20
2c0262af 41
14ce26e7 42#ifdef TARGET_X86_64
14ce26e7
FB
43#define CODE64(s) ((s)->code64)
44#define REX_X(s) ((s)->rex_x)
45#define REX_B(s) ((s)->rex_b)
14ce26e7 46#else
14ce26e7
FB
47#define CODE64(s) 0
48#define REX_X(s) 0
49#define REX_B(s) 0
50#endif
51
bec93d72
RH
52#ifdef TARGET_X86_64
53# define ctztl ctz64
54# define clztl clz64
55#else
56# define ctztl ctz32
57# define clztl clz32
58#endif
59
57fec1fe
FB
60//#define MACRO_TEST 1
61
57fec1fe 62/* global register indexes */
a7812ae4 63static TCGv_ptr cpu_env;
a3251186 64static TCGv cpu_A0;
988c3eb0 65static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2, cpu_cc_srcT;
a7812ae4 66static TCGv_i32 cpu_cc_op;
cc739bb0 67static TCGv cpu_regs[CPU_NB_REGS];
1e4840bf 68/* local temps */
3b9d3cf1 69static TCGv cpu_T[2];
57fec1fe 70/* local register indexes (only used inside old micro ops) */
a7812ae4
PB
71static TCGv cpu_tmp0, cpu_tmp4;
72static TCGv_ptr cpu_ptr0, cpu_ptr1;
73static TCGv_i32 cpu_tmp2_i32, cpu_tmp3_i32;
74static TCGv_i64 cpu_tmp1_i64;
57fec1fe 75
1a7ff922
PB
76static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
77
022c62cb 78#include "exec/gen-icount.h"
2e70f6ef 79
57fec1fe
FB
80#ifdef TARGET_X86_64
81static int x86_64_hregs;
ae063a68
FB
82#endif
83
2c0262af
FB
84typedef struct DisasContext {
85 /* current insn context */
86 int override; /* -1 if no override */
87 int prefix;
88 int aflag, dflag;
14ce26e7 89 target_ulong pc; /* pc = eip + cs_base */
2c0262af
FB
90 int is_jmp; /* 1 = means jump (stop translation), 2 means CPU
91 static state change (stop translation) */
92 /* current block context */
14ce26e7 93 target_ulong cs_base; /* base of CS segment */
2c0262af
FB
94 int pe; /* protected mode */
95 int code32; /* 32 bit code segment */
14ce26e7
FB
96#ifdef TARGET_X86_64
97 int lma; /* long mode active */
98 int code64; /* 64 bit code segment */
99 int rex_x, rex_b;
100#endif
701ed211
RH
101 int vex_l; /* vex vector length */
102 int vex_v; /* vex vvvv register, without 1's compliment. */
2c0262af 103 int ss32; /* 32 bit stack segment */
fee71888 104 CCOp cc_op; /* current CC operation */
e207582f 105 bool cc_op_dirty;
2c0262af
FB
106 int addseg; /* non zero if either DS/ES/SS have a non zero base */
107 int f_st; /* currently unused */
108 int vm86; /* vm86 mode */
109 int cpl;
110 int iopl;
111 int tf; /* TF cpu flag */
34865134 112 int singlestep_enabled; /* "hardware" single step enabled */
2c0262af
FB
113 int jmp_opt; /* use direct block chaining for direct jumps */
114 int mem_index; /* select memory access functions */
c068688b 115 uint64_t flags; /* all execution flags */
2c0262af
FB
116 struct TranslationBlock *tb;
117 int popl_esp_hack; /* for correct popl with esp base handling */
14ce26e7
FB
118 int rip_offset; /* only used in x86_64, but left for simplicity */
119 int cpuid_features;
3d7374c5 120 int cpuid_ext_features;
e771edab 121 int cpuid_ext2_features;
12e26b75 122 int cpuid_ext3_features;
a9321a4d 123 int cpuid_7_0_ebx_features;
2c0262af
FB
124} DisasContext;
125
126static void gen_eob(DisasContext *s);
14ce26e7
FB
127static void gen_jmp(DisasContext *s, target_ulong eip);
128static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num);
63633fe6 129static void gen_op(DisasContext *s1, int op, int ot, int d);
2c0262af
FB
130
131/* i386 arith/logic operations */
132enum {
5fafdf24
TS
133 OP_ADDL,
134 OP_ORL,
135 OP_ADCL,
2c0262af 136 OP_SBBL,
5fafdf24
TS
137 OP_ANDL,
138 OP_SUBL,
139 OP_XORL,
2c0262af
FB
140 OP_CMPL,
141};
142
143/* i386 shift ops */
144enum {
5fafdf24
TS
145 OP_ROL,
146 OP_ROR,
147 OP_RCL,
148 OP_RCR,
149 OP_SHL,
150 OP_SHR,
2c0262af
FB
151 OP_SHL1, /* undocumented */
152 OP_SAR = 7,
153};
154
8e1c85e3
FB
155enum {
156 JCC_O,
157 JCC_B,
158 JCC_Z,
159 JCC_BE,
160 JCC_S,
161 JCC_P,
162 JCC_L,
163 JCC_LE,
164};
165
2c0262af
FB
166/* operand size */
167enum {
168 OT_BYTE = 0,
169 OT_WORD,
5fafdf24 170 OT_LONG,
2c0262af
FB
171 OT_QUAD,
172};
173
174enum {
175 /* I386 int registers */
176 OR_EAX, /* MUST be even numbered */
177 OR_ECX,
178 OR_EDX,
179 OR_EBX,
180 OR_ESP,
181 OR_EBP,
182 OR_ESI,
183 OR_EDI,
14ce26e7
FB
184
185 OR_TMP0 = 16, /* temporary operand register */
2c0262af
FB
186 OR_TMP1,
187 OR_A0, /* temporary register used when doing address evaluation */
2c0262af
FB
188};
189
b666265b 190enum {
a3251186
RH
191 USES_CC_DST = 1,
192 USES_CC_SRC = 2,
988c3eb0
RH
193 USES_CC_SRC2 = 4,
194 USES_CC_SRCT = 8,
b666265b
RH
195};
196
197/* Bit set if the global variable is live after setting CC_OP to X. */
198static const uint8_t cc_op_live[CC_OP_NB] = {
988c3eb0 199 [CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
b666265b
RH
200 [CC_OP_EFLAGS] = USES_CC_SRC,
201 [CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC,
202 [CC_OP_ADDB ... CC_OP_ADDQ] = USES_CC_DST | USES_CC_SRC,
988c3eb0 203 [CC_OP_ADCB ... CC_OP_ADCQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
a3251186 204 [CC_OP_SUBB ... CC_OP_SUBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT,
988c3eb0 205 [CC_OP_SBBB ... CC_OP_SBBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
b666265b
RH
206 [CC_OP_LOGICB ... CC_OP_LOGICQ] = USES_CC_DST,
207 [CC_OP_INCB ... CC_OP_INCQ] = USES_CC_DST | USES_CC_SRC,
208 [CC_OP_DECB ... CC_OP_DECQ] = USES_CC_DST | USES_CC_SRC,
209 [CC_OP_SHLB ... CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC,
210 [CC_OP_SARB ... CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC,
bc4b43dc 211 [CC_OP_BMILGB ... CC_OP_BMILGQ] = USES_CC_DST | USES_CC_SRC,
cd7f97ca
RH
212 [CC_OP_ADCX] = USES_CC_DST | USES_CC_SRC,
213 [CC_OP_ADOX] = USES_CC_SRC | USES_CC_SRC2,
214 [CC_OP_ADCOX] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
436ff2d2 215 [CC_OP_CLR] = 0,
b666265b
RH
216};
217
e207582f 218static void set_cc_op(DisasContext *s, CCOp op)
3ca51d07 219{
b666265b
RH
220 int dead;
221
222 if (s->cc_op == op) {
223 return;
224 }
225
226 /* Discard CC computation that will no longer be used. */
227 dead = cc_op_live[s->cc_op] & ~cc_op_live[op];
228 if (dead & USES_CC_DST) {
229 tcg_gen_discard_tl(cpu_cc_dst);
e207582f 230 }
b666265b
RH
231 if (dead & USES_CC_SRC) {
232 tcg_gen_discard_tl(cpu_cc_src);
233 }
988c3eb0
RH
234 if (dead & USES_CC_SRC2) {
235 tcg_gen_discard_tl(cpu_cc_src2);
236 }
a3251186
RH
237 if (dead & USES_CC_SRCT) {
238 tcg_gen_discard_tl(cpu_cc_srcT);
239 }
b666265b 240
e2f515cf
RH
241 if (op == CC_OP_DYNAMIC) {
242 /* The DYNAMIC setting is translator only, and should never be
243 stored. Thus we always consider it clean. */
244 s->cc_op_dirty = false;
245 } else {
246 /* Discard any computed CC_OP value (see shifts). */
247 if (s->cc_op == CC_OP_DYNAMIC) {
248 tcg_gen_discard_i32(cpu_cc_op);
249 }
250 s->cc_op_dirty = true;
251 }
b666265b 252 s->cc_op = op;
e207582f
RH
253}
254
e207582f
RH
255static void gen_update_cc_op(DisasContext *s)
256{
257 if (s->cc_op_dirty) {
773cdfcc 258 tcg_gen_movi_i32(cpu_cc_op, s->cc_op);
e207582f
RH
259 s->cc_op_dirty = false;
260 }
3ca51d07
RH
261}
262
57fec1fe
FB
263static inline void gen_op_movl_T0_0(void)
264{
265 tcg_gen_movi_tl(cpu_T[0], 0);
266}
267
268static inline void gen_op_movl_T0_im(int32_t val)
269{
270 tcg_gen_movi_tl(cpu_T[0], val);
271}
272
273static inline void gen_op_movl_T0_imu(uint32_t val)
274{
275 tcg_gen_movi_tl(cpu_T[0], val);
276}
277
278static inline void gen_op_movl_T1_im(int32_t val)
279{
280 tcg_gen_movi_tl(cpu_T[1], val);
281}
282
283static inline void gen_op_movl_T1_imu(uint32_t val)
284{
285 tcg_gen_movi_tl(cpu_T[1], val);
286}
287
288static inline void gen_op_movl_A0_im(uint32_t val)
289{
290 tcg_gen_movi_tl(cpu_A0, val);
291}
292
293#ifdef TARGET_X86_64
294static inline void gen_op_movq_A0_im(int64_t val)
295{
296 tcg_gen_movi_tl(cpu_A0, val);
297}
298#endif
299
300static inline void gen_movtl_T0_im(target_ulong val)
301{
302 tcg_gen_movi_tl(cpu_T[0], val);
303}
304
305static inline void gen_movtl_T1_im(target_ulong val)
306{
307 tcg_gen_movi_tl(cpu_T[1], val);
308}
309
310static inline void gen_op_andl_T0_ffff(void)
311{
312 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
313}
314
315static inline void gen_op_andl_T0_im(uint32_t val)
316{
317 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], val);
318}
319
320static inline void gen_op_movl_T0_T1(void)
321{
322 tcg_gen_mov_tl(cpu_T[0], cpu_T[1]);
323}
324
325static inline void gen_op_andl_A0_ffff(void)
326{
327 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffff);
328}
329
14ce26e7
FB
330#ifdef TARGET_X86_64
331
332#define NB_OP_SIZES 4
333
14ce26e7
FB
334#else /* !TARGET_X86_64 */
335
336#define NB_OP_SIZES 3
337
14ce26e7
FB
338#endif /* !TARGET_X86_64 */
339
e2542fe2 340#if defined(HOST_WORDS_BIGENDIAN)
57fec1fe
FB
341#define REG_B_OFFSET (sizeof(target_ulong) - 1)
342#define REG_H_OFFSET (sizeof(target_ulong) - 2)
343#define REG_W_OFFSET (sizeof(target_ulong) - 2)
344#define REG_L_OFFSET (sizeof(target_ulong) - 4)
345#define REG_LH_OFFSET (sizeof(target_ulong) - 8)
14ce26e7 346#else
57fec1fe
FB
347#define REG_B_OFFSET 0
348#define REG_H_OFFSET 1
349#define REG_W_OFFSET 0
350#define REG_L_OFFSET 0
351#define REG_LH_OFFSET 4
14ce26e7 352#endif
57fec1fe 353
96d7073f
PM
354/* In instruction encodings for byte register accesses the
355 * register number usually indicates "low 8 bits of register N";
356 * however there are some special cases where N 4..7 indicates
357 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
358 * true for this special case, false otherwise.
359 */
360static inline bool byte_reg_is_xH(int reg)
361{
362 if (reg < 4) {
363 return false;
364 }
365#ifdef TARGET_X86_64
366 if (reg >= 8 || x86_64_hregs) {
367 return false;
368 }
369#endif
370 return true;
371}
372
1e4840bf 373static inline void gen_op_mov_reg_v(int ot, int reg, TCGv t0)
57fec1fe
FB
374{
375 switch(ot) {
376 case OT_BYTE:
96d7073f 377 if (!byte_reg_is_xH(reg)) {
c832e3de 378 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 8);
57fec1fe 379 } else {
c832e3de 380 tcg_gen_deposit_tl(cpu_regs[reg - 4], cpu_regs[reg - 4], t0, 8, 8);
57fec1fe
FB
381 }
382 break;
383 case OT_WORD:
c832e3de 384 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 16);
57fec1fe 385 break;
cc739bb0 386 default: /* XXX this shouldn't be reached; abort? */
57fec1fe 387 case OT_LONG:
cc739bb0
LD
388 /* For x86_64, this sets the higher half of register to zero.
389 For i386, this is equivalent to a mov. */
390 tcg_gen_ext32u_tl(cpu_regs[reg], t0);
57fec1fe 391 break;
cc739bb0 392#ifdef TARGET_X86_64
57fec1fe 393 case OT_QUAD:
cc739bb0 394 tcg_gen_mov_tl(cpu_regs[reg], t0);
57fec1fe 395 break;
14ce26e7 396#endif
57fec1fe
FB
397 }
398}
2c0262af 399
57fec1fe
FB
400static inline void gen_op_mov_reg_T0(int ot, int reg)
401{
1e4840bf 402 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
57fec1fe
FB
403}
404
405static inline void gen_op_mov_reg_T1(int ot, int reg)
406{
1e4840bf 407 gen_op_mov_reg_v(ot, reg, cpu_T[1]);
57fec1fe
FB
408}
409
410static inline void gen_op_mov_reg_A0(int size, int reg)
411{
412 switch(size) {
93ab25d7 413 case OT_BYTE:
c832e3de 414 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], cpu_A0, 0, 16);
57fec1fe 415 break;
cc739bb0 416 default: /* XXX this shouldn't be reached; abort? */
93ab25d7 417 case OT_WORD:
cc739bb0
LD
418 /* For x86_64, this sets the higher half of register to zero.
419 For i386, this is equivalent to a mov. */
420 tcg_gen_ext32u_tl(cpu_regs[reg], cpu_A0);
57fec1fe 421 break;
cc739bb0 422#ifdef TARGET_X86_64
93ab25d7 423 case OT_LONG:
cc739bb0 424 tcg_gen_mov_tl(cpu_regs[reg], cpu_A0);
57fec1fe 425 break;
14ce26e7 426#endif
57fec1fe
FB
427 }
428}
429
1e4840bf 430static inline void gen_op_mov_v_reg(int ot, TCGv t0, int reg)
57fec1fe 431{
96d7073f
PM
432 if (ot == OT_BYTE && byte_reg_is_xH(reg)) {
433 tcg_gen_shri_tl(t0, cpu_regs[reg - 4], 8);
434 tcg_gen_ext8u_tl(t0, t0);
435 } else {
cc739bb0 436 tcg_gen_mov_tl(t0, cpu_regs[reg]);
57fec1fe
FB
437 }
438}
439
1e4840bf
FB
440static inline void gen_op_mov_TN_reg(int ot, int t_index, int reg)
441{
442 gen_op_mov_v_reg(ot, cpu_T[t_index], reg);
443}
444
57fec1fe
FB
445static inline void gen_op_movl_A0_reg(int reg)
446{
cc739bb0 447 tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]);
57fec1fe
FB
448}
449
450static inline void gen_op_addl_A0_im(int32_t val)
451{
452 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
14ce26e7 453#ifdef TARGET_X86_64
57fec1fe 454 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
14ce26e7 455#endif
57fec1fe 456}
2c0262af 457
14ce26e7 458#ifdef TARGET_X86_64
57fec1fe
FB
459static inline void gen_op_addq_A0_im(int64_t val)
460{
461 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
462}
14ce26e7 463#endif
57fec1fe
FB
464
465static void gen_add_A0_im(DisasContext *s, int val)
466{
467#ifdef TARGET_X86_64
468 if (CODE64(s))
469 gen_op_addq_A0_im(val);
470 else
471#endif
472 gen_op_addl_A0_im(val);
473}
2c0262af 474
57fec1fe 475static inline void gen_op_addl_T0_T1(void)
2c0262af 476{
57fec1fe
FB
477 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
478}
479
480static inline void gen_op_jmp_T0(void)
481{
317ac620 482 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, eip));
57fec1fe
FB
483}
484
6e0d8677 485static inline void gen_op_add_reg_im(int size, int reg, int32_t val)
57fec1fe 486{
6e0d8677 487 switch(size) {
93ab25d7 488 case OT_BYTE:
cc739bb0 489 tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val);
c832e3de 490 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], cpu_tmp0, 0, 16);
6e0d8677 491 break;
93ab25d7 492 case OT_WORD:
cc739bb0
LD
493 tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val);
494 /* For x86_64, this sets the higher half of register to zero.
495 For i386, this is equivalent to a nop. */
496 tcg_gen_ext32u_tl(cpu_tmp0, cpu_tmp0);
497 tcg_gen_mov_tl(cpu_regs[reg], cpu_tmp0);
6e0d8677
FB
498 break;
499#ifdef TARGET_X86_64
93ab25d7 500 case OT_LONG:
cc739bb0 501 tcg_gen_addi_tl(cpu_regs[reg], cpu_regs[reg], val);
6e0d8677
FB
502 break;
503#endif
504 }
57fec1fe
FB
505}
506
6e0d8677 507static inline void gen_op_add_reg_T0(int size, int reg)
57fec1fe 508{
6e0d8677 509 switch(size) {
93ab25d7 510 case OT_BYTE:
cc739bb0 511 tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T[0]);
c832e3de 512 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], cpu_tmp0, 0, 16);
6e0d8677 513 break;
93ab25d7 514 case OT_WORD:
cc739bb0
LD
515 tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T[0]);
516 /* For x86_64, this sets the higher half of register to zero.
517 For i386, this is equivalent to a nop. */
518 tcg_gen_ext32u_tl(cpu_tmp0, cpu_tmp0);
519 tcg_gen_mov_tl(cpu_regs[reg], cpu_tmp0);
6e0d8677 520 break;
14ce26e7 521#ifdef TARGET_X86_64
93ab25d7 522 case OT_LONG:
cc739bb0 523 tcg_gen_add_tl(cpu_regs[reg], cpu_regs[reg], cpu_T[0]);
6e0d8677 524 break;
14ce26e7 525#endif
6e0d8677
FB
526 }
527}
57fec1fe 528
57fec1fe
FB
529static inline void gen_op_addl_A0_reg_sN(int shift, int reg)
530{
cc739bb0
LD
531 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]);
532 if (shift != 0)
57fec1fe
FB
533 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
534 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
cc739bb0
LD
535 /* For x86_64, this sets the higher half of register to zero.
536 For i386, this is equivalent to a nop. */
537 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
57fec1fe 538}
2c0262af 539
57fec1fe
FB
540static inline void gen_op_movl_A0_seg(int reg)
541{
317ac620 542 tcg_gen_ld32u_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base) + REG_L_OFFSET);
57fec1fe 543}
2c0262af 544
7162ab21 545static inline void gen_op_addl_A0_seg(DisasContext *s, int reg)
57fec1fe 546{
317ac620 547 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base));
57fec1fe 548#ifdef TARGET_X86_64
7162ab21
VC
549 if (CODE64(s)) {
550 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
551 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
552 } else {
553 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
554 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
555 }
556#else
557 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
57fec1fe
FB
558#endif
559}
2c0262af 560
14ce26e7 561#ifdef TARGET_X86_64
57fec1fe
FB
562static inline void gen_op_movq_A0_seg(int reg)
563{
317ac620 564 tcg_gen_ld_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base));
57fec1fe 565}
14ce26e7 566
57fec1fe
FB
567static inline void gen_op_addq_A0_seg(int reg)
568{
317ac620 569 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base));
57fec1fe
FB
570 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
571}
572
573static inline void gen_op_movq_A0_reg(int reg)
574{
cc739bb0 575 tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]);
57fec1fe
FB
576}
577
578static inline void gen_op_addq_A0_reg_sN(int shift, int reg)
579{
cc739bb0
LD
580 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]);
581 if (shift != 0)
57fec1fe
FB
582 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
583 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
584}
14ce26e7
FB
585#endif
586
57fec1fe
FB
587static inline void gen_op_lds_T0_A0(int idx)
588{
589 int mem_index = (idx >> 2) - 1;
590 switch(idx & 3) {
93ab25d7 591 case OT_BYTE:
57fec1fe
FB
592 tcg_gen_qemu_ld8s(cpu_T[0], cpu_A0, mem_index);
593 break;
93ab25d7 594 case OT_WORD:
57fec1fe
FB
595 tcg_gen_qemu_ld16s(cpu_T[0], cpu_A0, mem_index);
596 break;
597 default:
93ab25d7 598 case OT_LONG:
57fec1fe
FB
599 tcg_gen_qemu_ld32s(cpu_T[0], cpu_A0, mem_index);
600 break;
601 }
602}
2c0262af 603
1e4840bf 604static inline void gen_op_ld_v(int idx, TCGv t0, TCGv a0)
57fec1fe
FB
605{
606 int mem_index = (idx >> 2) - 1;
607 switch(idx & 3) {
93ab25d7 608 case OT_BYTE:
1e4840bf 609 tcg_gen_qemu_ld8u(t0, a0, mem_index);
57fec1fe 610 break;
93ab25d7 611 case OT_WORD:
1e4840bf 612 tcg_gen_qemu_ld16u(t0, a0, mem_index);
57fec1fe 613 break;
93ab25d7 614 case OT_LONG:
1e4840bf 615 tcg_gen_qemu_ld32u(t0, a0, mem_index);
57fec1fe
FB
616 break;
617 default:
93ab25d7 618 case OT_QUAD:
a7812ae4
PB
619 /* Should never happen on 32-bit targets. */
620#ifdef TARGET_X86_64
1e4840bf 621 tcg_gen_qemu_ld64(t0, a0, mem_index);
a7812ae4 622#endif
57fec1fe
FB
623 break;
624 }
625}
2c0262af 626
1e4840bf
FB
627/* XXX: always use ldu or lds */
628static inline void gen_op_ld_T0_A0(int idx)
629{
630 gen_op_ld_v(idx, cpu_T[0], cpu_A0);
631}
632
57fec1fe
FB
633static inline void gen_op_ldu_T0_A0(int idx)
634{
1e4840bf 635 gen_op_ld_v(idx, cpu_T[0], cpu_A0);
57fec1fe 636}
2c0262af 637
57fec1fe 638static inline void gen_op_ld_T1_A0(int idx)
1e4840bf
FB
639{
640 gen_op_ld_v(idx, cpu_T[1], cpu_A0);
641}
642
643static inline void gen_op_st_v(int idx, TCGv t0, TCGv a0)
57fec1fe
FB
644{
645 int mem_index = (idx >> 2) - 1;
646 switch(idx & 3) {
93ab25d7 647 case OT_BYTE:
1e4840bf 648 tcg_gen_qemu_st8(t0, a0, mem_index);
57fec1fe 649 break;
93ab25d7 650 case OT_WORD:
1e4840bf 651 tcg_gen_qemu_st16(t0, a0, mem_index);
57fec1fe 652 break;
93ab25d7 653 case OT_LONG:
1e4840bf 654 tcg_gen_qemu_st32(t0, a0, mem_index);
57fec1fe
FB
655 break;
656 default:
93ab25d7 657 case OT_QUAD:
a7812ae4
PB
658 /* Should never happen on 32-bit targets. */
659#ifdef TARGET_X86_64
1e4840bf 660 tcg_gen_qemu_st64(t0, a0, mem_index);
a7812ae4 661#endif
57fec1fe
FB
662 break;
663 }
664}
4f31916f 665
57fec1fe
FB
666static inline void gen_op_st_T0_A0(int idx)
667{
1e4840bf 668 gen_op_st_v(idx, cpu_T[0], cpu_A0);
57fec1fe 669}
4f31916f 670
57fec1fe
FB
671static inline void gen_op_st_T1_A0(int idx)
672{
1e4840bf 673 gen_op_st_v(idx, cpu_T[1], cpu_A0);
57fec1fe 674}
4f31916f 675
14ce26e7
FB
676static inline void gen_jmp_im(target_ulong pc)
677{
57fec1fe 678 tcg_gen_movi_tl(cpu_tmp0, pc);
317ac620 679 tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, eip));
14ce26e7
FB
680}
681
2c0262af
FB
682static inline void gen_string_movl_A0_ESI(DisasContext *s)
683{
684 int override;
685
686 override = s->override;
14ce26e7
FB
687#ifdef TARGET_X86_64
688 if (s->aflag == 2) {
689 if (override >= 0) {
57fec1fe
FB
690 gen_op_movq_A0_seg(override);
691 gen_op_addq_A0_reg_sN(0, R_ESI);
14ce26e7 692 } else {
57fec1fe 693 gen_op_movq_A0_reg(R_ESI);
14ce26e7
FB
694 }
695 } else
696#endif
2c0262af
FB
697 if (s->aflag) {
698 /* 32 bit address */
699 if (s->addseg && override < 0)
700 override = R_DS;
701 if (override >= 0) {
57fec1fe
FB
702 gen_op_movl_A0_seg(override);
703 gen_op_addl_A0_reg_sN(0, R_ESI);
2c0262af 704 } else {
57fec1fe 705 gen_op_movl_A0_reg(R_ESI);
2c0262af
FB
706 }
707 } else {
708 /* 16 address, always override */
709 if (override < 0)
710 override = R_DS;
57fec1fe 711 gen_op_movl_A0_reg(R_ESI);
2c0262af 712 gen_op_andl_A0_ffff();
7162ab21 713 gen_op_addl_A0_seg(s, override);
2c0262af
FB
714 }
715}
716
717static inline void gen_string_movl_A0_EDI(DisasContext *s)
718{
14ce26e7
FB
719#ifdef TARGET_X86_64
720 if (s->aflag == 2) {
57fec1fe 721 gen_op_movq_A0_reg(R_EDI);
14ce26e7
FB
722 } else
723#endif
2c0262af
FB
724 if (s->aflag) {
725 if (s->addseg) {
57fec1fe
FB
726 gen_op_movl_A0_seg(R_ES);
727 gen_op_addl_A0_reg_sN(0, R_EDI);
2c0262af 728 } else {
57fec1fe 729 gen_op_movl_A0_reg(R_EDI);
2c0262af
FB
730 }
731 } else {
57fec1fe 732 gen_op_movl_A0_reg(R_EDI);
2c0262af 733 gen_op_andl_A0_ffff();
7162ab21 734 gen_op_addl_A0_seg(s, R_ES);
2c0262af
FB
735 }
736}
737
6e0d8677
FB
738static inline void gen_op_movl_T0_Dshift(int ot)
739{
317ac620 740 tcg_gen_ld32s_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, df));
6e0d8677 741 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], ot);
2c0262af
FB
742};
743
d824df34 744static TCGv gen_ext_tl(TCGv dst, TCGv src, int size, bool sign)
6e0d8677 745{
d824df34 746 switch (size) {
6e0d8677 747 case OT_BYTE:
d824df34
PB
748 if (sign) {
749 tcg_gen_ext8s_tl(dst, src);
750 } else {
751 tcg_gen_ext8u_tl(dst, src);
752 }
753 return dst;
6e0d8677 754 case OT_WORD:
d824df34
PB
755 if (sign) {
756 tcg_gen_ext16s_tl(dst, src);
757 } else {
758 tcg_gen_ext16u_tl(dst, src);
759 }
760 return dst;
761#ifdef TARGET_X86_64
6e0d8677 762 case OT_LONG:
d824df34
PB
763 if (sign) {
764 tcg_gen_ext32s_tl(dst, src);
765 } else {
766 tcg_gen_ext32u_tl(dst, src);
767 }
768 return dst;
769#endif
6e0d8677 770 default:
d824df34 771 return src;
6e0d8677
FB
772 }
773}
3b46e624 774
d824df34
PB
775static void gen_extu(int ot, TCGv reg)
776{
777 gen_ext_tl(reg, reg, ot, false);
778}
779
6e0d8677
FB
780static void gen_exts(int ot, TCGv reg)
781{
d824df34 782 gen_ext_tl(reg, reg, ot, true);
6e0d8677 783}
2c0262af 784
6e0d8677
FB
785static inline void gen_op_jnz_ecx(int size, int label1)
786{
cc739bb0 787 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
6e0d8677 788 gen_extu(size + 1, cpu_tmp0);
cb63669a 789 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, label1);
6e0d8677
FB
790}
791
792static inline void gen_op_jz_ecx(int size, int label1)
793{
cc739bb0 794 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
6e0d8677 795 gen_extu(size + 1, cpu_tmp0);
cb63669a 796 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
6e0d8677 797}
2c0262af 798
a7812ae4
PB
799static void gen_helper_in_func(int ot, TCGv v, TCGv_i32 n)
800{
801 switch (ot) {
93ab25d7
PB
802 case OT_BYTE:
803 gen_helper_inb(v, n);
804 break;
805 case OT_WORD:
806 gen_helper_inw(v, n);
807 break;
808 case OT_LONG:
809 gen_helper_inl(v, n);
810 break;
a7812ae4 811 }
a7812ae4 812}
2c0262af 813
a7812ae4
PB
814static void gen_helper_out_func(int ot, TCGv_i32 v, TCGv_i32 n)
815{
816 switch (ot) {
93ab25d7
PB
817 case OT_BYTE:
818 gen_helper_outb(v, n);
819 break;
820 case OT_WORD:
821 gen_helper_outw(v, n);
822 break;
823 case OT_LONG:
824 gen_helper_outl(v, n);
825 break;
a7812ae4 826 }
a7812ae4 827}
f115e911 828
b8b6a50b
FB
829static void gen_check_io(DisasContext *s, int ot, target_ulong cur_eip,
830 uint32_t svm_flags)
f115e911 831{
b8b6a50b
FB
832 int state_saved;
833 target_ulong next_eip;
834
835 state_saved = 0;
f115e911 836 if (s->pe && (s->cpl > s->iopl || s->vm86)) {
773cdfcc 837 gen_update_cc_op(s);
14ce26e7 838 gen_jmp_im(cur_eip);
b8b6a50b 839 state_saved = 1;
b6abf97d 840 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
a7812ae4 841 switch (ot) {
93ab25d7 842 case OT_BYTE:
4a7443be
BS
843 gen_helper_check_iob(cpu_env, cpu_tmp2_i32);
844 break;
93ab25d7 845 case OT_WORD:
4a7443be
BS
846 gen_helper_check_iow(cpu_env, cpu_tmp2_i32);
847 break;
93ab25d7 848 case OT_LONG:
4a7443be
BS
849 gen_helper_check_iol(cpu_env, cpu_tmp2_i32);
850 break;
a7812ae4 851 }
b8b6a50b 852 }
872929aa 853 if(s->flags & HF_SVMI_MASK) {
b8b6a50b 854 if (!state_saved) {
773cdfcc 855 gen_update_cc_op(s);
b8b6a50b 856 gen_jmp_im(cur_eip);
b8b6a50b
FB
857 }
858 svm_flags |= (1 << (4 + ot));
859 next_eip = s->pc - s->cs_base;
b6abf97d 860 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
052e80d5
BS
861 gen_helper_svm_check_io(cpu_env, cpu_tmp2_i32,
862 tcg_const_i32(svm_flags),
a7812ae4 863 tcg_const_i32(next_eip - cur_eip));
f115e911
FB
864 }
865}
866
2c0262af
FB
867static inline void gen_movs(DisasContext *s, int ot)
868{
869 gen_string_movl_A0_ESI(s);
57fec1fe 870 gen_op_ld_T0_A0(ot + s->mem_index);
2c0262af 871 gen_string_movl_A0_EDI(s);
57fec1fe 872 gen_op_st_T0_A0(ot + s->mem_index);
6e0d8677
FB
873 gen_op_movl_T0_Dshift(ot);
874 gen_op_add_reg_T0(s->aflag, R_ESI);
875 gen_op_add_reg_T0(s->aflag, R_EDI);
2c0262af
FB
876}
877
b6abf97d
FB
878static void gen_op_update1_cc(void)
879{
b6abf97d
FB
880 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
881}
882
883static void gen_op_update2_cc(void)
884{
885 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
886 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
887}
888
988c3eb0
RH
889static void gen_op_update3_cc(TCGv reg)
890{
891 tcg_gen_mov_tl(cpu_cc_src2, reg);
892 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
893 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
894}
895
b6abf97d
FB
896static inline void gen_op_testl_T0_T1_cc(void)
897{
b6abf97d
FB
898 tcg_gen_and_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
899}
900
901static void gen_op_update_neg_cc(void)
902{
b6abf97d 903 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
a3251186
RH
904 tcg_gen_neg_tl(cpu_cc_src, cpu_T[0]);
905 tcg_gen_movi_tl(cpu_cc_srcT, 0);
b6abf97d
FB
906}
907
d229edce
RH
908/* compute all eflags to cc_src */
909static void gen_compute_eflags(DisasContext *s)
8e1c85e3 910{
988c3eb0 911 TCGv zero, dst, src1, src2;
db9f2597
RH
912 int live, dead;
913
d229edce
RH
914 if (s->cc_op == CC_OP_EFLAGS) {
915 return;
916 }
436ff2d2
RH
917 if (s->cc_op == CC_OP_CLR) {
918 tcg_gen_movi_tl(cpu_cc_src, CC_Z);
919 set_cc_op(s, CC_OP_EFLAGS);
920 return;
921 }
db9f2597
RH
922
923 TCGV_UNUSED(zero);
924 dst = cpu_cc_dst;
925 src1 = cpu_cc_src;
988c3eb0 926 src2 = cpu_cc_src2;
db9f2597
RH
927
928 /* Take care to not read values that are not live. */
929 live = cc_op_live[s->cc_op] & ~USES_CC_SRCT;
988c3eb0 930 dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2);
db9f2597
RH
931 if (dead) {
932 zero = tcg_const_tl(0);
933 if (dead & USES_CC_DST) {
934 dst = zero;
935 }
936 if (dead & USES_CC_SRC) {
937 src1 = zero;
938 }
988c3eb0
RH
939 if (dead & USES_CC_SRC2) {
940 src2 = zero;
941 }
db9f2597
RH
942 }
943
773cdfcc 944 gen_update_cc_op(s);
988c3eb0 945 gen_helper_cc_compute_all(cpu_cc_src, dst, src1, src2, cpu_cc_op);
d229edce 946 set_cc_op(s, CC_OP_EFLAGS);
db9f2597
RH
947
948 if (dead) {
949 tcg_temp_free(zero);
950 }
8e1c85e3
FB
951}
952
bec93d72
RH
953typedef struct CCPrepare {
954 TCGCond cond;
955 TCGv reg;
956 TCGv reg2;
957 target_ulong imm;
958 target_ulong mask;
959 bool use_reg2;
960 bool no_setcond;
961} CCPrepare;
962
06847f1f 963/* compute eflags.C to reg */
bec93d72 964static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
06847f1f
RH
965{
966 TCGv t0, t1;
bec93d72 967 int size, shift;
06847f1f
RH
968
969 switch (s->cc_op) {
970 case CC_OP_SUBB ... CC_OP_SUBQ:
a3251186 971 /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
06847f1f
RH
972 size = s->cc_op - CC_OP_SUBB;
973 t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
974 /* If no temporary was used, be careful not to alias t1 and t0. */
975 t0 = TCGV_EQUAL(t1, cpu_cc_src) ? cpu_tmp0 : reg;
a3251186 976 tcg_gen_mov_tl(t0, cpu_cc_srcT);
06847f1f
RH
977 gen_extu(size, t0);
978 goto add_sub;
979
980 case CC_OP_ADDB ... CC_OP_ADDQ:
981 /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
982 size = s->cc_op - CC_OP_ADDB;
983 t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
984 t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
985 add_sub:
bec93d72
RH
986 return (CCPrepare) { .cond = TCG_COND_LTU, .reg = t0,
987 .reg2 = t1, .mask = -1, .use_reg2 = true };
06847f1f 988
06847f1f 989 case CC_OP_LOGICB ... CC_OP_LOGICQ:
436ff2d2 990 case CC_OP_CLR:
bec93d72 991 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
06847f1f
RH
992
993 case CC_OP_INCB ... CC_OP_INCQ:
994 case CC_OP_DECB ... CC_OP_DECQ:
bec93d72
RH
995 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
996 .mask = -1, .no_setcond = true };
06847f1f
RH
997
998 case CC_OP_SHLB ... CC_OP_SHLQ:
999 /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
1000 size = s->cc_op - CC_OP_SHLB;
bec93d72
RH
1001 shift = (8 << size) - 1;
1002 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1003 .mask = (target_ulong)1 << shift };
06847f1f
RH
1004
1005 case CC_OP_MULB ... CC_OP_MULQ:
bec93d72
RH
1006 return (CCPrepare) { .cond = TCG_COND_NE,
1007 .reg = cpu_cc_src, .mask = -1 };
06847f1f 1008
bc4b43dc
RH
1009 case CC_OP_BMILGB ... CC_OP_BMILGQ:
1010 size = s->cc_op - CC_OP_BMILGB;
1011 t0 = gen_ext_tl(reg, cpu_cc_src, size, false);
1012 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
1013
cd7f97ca
RH
1014 case CC_OP_ADCX:
1015 case CC_OP_ADCOX:
1016 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_dst,
1017 .mask = -1, .no_setcond = true };
1018
06847f1f
RH
1019 case CC_OP_EFLAGS:
1020 case CC_OP_SARB ... CC_OP_SARQ:
1021 /* CC_SRC & 1 */
bec93d72
RH
1022 return (CCPrepare) { .cond = TCG_COND_NE,
1023 .reg = cpu_cc_src, .mask = CC_C };
06847f1f
RH
1024
1025 default:
1026 /* The need to compute only C from CC_OP_DYNAMIC is important
1027 in efficiently implementing e.g. INC at the start of a TB. */
1028 gen_update_cc_op(s);
988c3eb0
RH
1029 gen_helper_cc_compute_c(reg, cpu_cc_dst, cpu_cc_src,
1030 cpu_cc_src2, cpu_cc_op);
bec93d72
RH
1031 return (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1032 .mask = -1, .no_setcond = true };
06847f1f
RH
1033 }
1034}
1035
1608ecca 1036/* compute eflags.P to reg */
bec93d72 1037static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGv reg)
1608ecca 1038{
d229edce 1039 gen_compute_eflags(s);
bec93d72
RH
1040 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1041 .mask = CC_P };
1608ecca
PB
1042}
1043
1044/* compute eflags.S to reg */
bec93d72 1045static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg)
1608ecca 1046{
086c4077
RH
1047 switch (s->cc_op) {
1048 case CC_OP_DYNAMIC:
1049 gen_compute_eflags(s);
1050 /* FALLTHRU */
1051 case CC_OP_EFLAGS:
cd7f97ca
RH
1052 case CC_OP_ADCX:
1053 case CC_OP_ADOX:
1054 case CC_OP_ADCOX:
bec93d72
RH
1055 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1056 .mask = CC_S };
436ff2d2
RH
1057 case CC_OP_CLR:
1058 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
086c4077
RH
1059 default:
1060 {
1061 int size = (s->cc_op - CC_OP_ADDB) & 3;
1062 TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, true);
bec93d72 1063 return (CCPrepare) { .cond = TCG_COND_LT, .reg = t0, .mask = -1 };
086c4077 1064 }
086c4077 1065 }
1608ecca
PB
1066}
1067
1068/* compute eflags.O to reg */
bec93d72 1069static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg)
1608ecca 1070{
cd7f97ca
RH
1071 switch (s->cc_op) {
1072 case CC_OP_ADOX:
1073 case CC_OP_ADCOX:
1074 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src2,
1075 .mask = -1, .no_setcond = true };
436ff2d2
RH
1076 case CC_OP_CLR:
1077 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
cd7f97ca
RH
1078 default:
1079 gen_compute_eflags(s);
1080 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1081 .mask = CC_O };
1082 }
1608ecca
PB
1083}
1084
1085/* compute eflags.Z to reg */
bec93d72 1086static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
1608ecca 1087{
086c4077
RH
1088 switch (s->cc_op) {
1089 case CC_OP_DYNAMIC:
1090 gen_compute_eflags(s);
1091 /* FALLTHRU */
1092 case CC_OP_EFLAGS:
cd7f97ca
RH
1093 case CC_OP_ADCX:
1094 case CC_OP_ADOX:
1095 case CC_OP_ADCOX:
bec93d72
RH
1096 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1097 .mask = CC_Z };
436ff2d2
RH
1098 case CC_OP_CLR:
1099 return (CCPrepare) { .cond = TCG_COND_ALWAYS, .mask = -1 };
086c4077
RH
1100 default:
1101 {
1102 int size = (s->cc_op - CC_OP_ADDB) & 3;
1103 TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
bec93d72 1104 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
086c4077 1105 }
bec93d72
RH
1106 }
1107}
1108
c365395e
PB
1109/* perform a conditional store into register 'reg' according to jump opcode
1110 value 'b'. In the fast case, T0 is guaranted not to be used. */
276e6b5f 1111static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
8e1c85e3 1112{
c365395e 1113 int inv, jcc_op, size, cond;
276e6b5f 1114 CCPrepare cc;
c365395e
PB
1115 TCGv t0;
1116
1117 inv = b & 1;
8e1c85e3 1118 jcc_op = (b >> 1) & 7;
c365395e
PB
1119
1120 switch (s->cc_op) {
69d1aa31
RH
1121 case CC_OP_SUBB ... CC_OP_SUBQ:
1122 /* We optimize relational operators for the cmp/jcc case. */
c365395e
PB
1123 size = s->cc_op - CC_OP_SUBB;
1124 switch (jcc_op) {
1125 case JCC_BE:
a3251186 1126 tcg_gen_mov_tl(cpu_tmp4, cpu_cc_srcT);
c365395e
PB
1127 gen_extu(size, cpu_tmp4);
1128 t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
276e6b5f
RH
1129 cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = cpu_tmp4,
1130 .reg2 = t0, .mask = -1, .use_reg2 = true };
c365395e 1131 break;
8e1c85e3 1132
c365395e 1133 case JCC_L:
276e6b5f 1134 cond = TCG_COND_LT;
c365395e
PB
1135 goto fast_jcc_l;
1136 case JCC_LE:
276e6b5f 1137 cond = TCG_COND_LE;
c365395e 1138 fast_jcc_l:
a3251186 1139 tcg_gen_mov_tl(cpu_tmp4, cpu_cc_srcT);
c365395e
PB
1140 gen_exts(size, cpu_tmp4);
1141 t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, true);
276e6b5f
RH
1142 cc = (CCPrepare) { .cond = cond, .reg = cpu_tmp4,
1143 .reg2 = t0, .mask = -1, .use_reg2 = true };
c365395e 1144 break;
8e1c85e3 1145
c365395e 1146 default:
8e1c85e3 1147 goto slow_jcc;
c365395e 1148 }
8e1c85e3 1149 break;
c365395e 1150
8e1c85e3
FB
1151 default:
1152 slow_jcc:
69d1aa31
RH
1153 /* This actually generates good code for JC, JZ and JS. */
1154 switch (jcc_op) {
1155 case JCC_O:
1156 cc = gen_prepare_eflags_o(s, reg);
1157 break;
1158 case JCC_B:
1159 cc = gen_prepare_eflags_c(s, reg);
1160 break;
1161 case JCC_Z:
1162 cc = gen_prepare_eflags_z(s, reg);
1163 break;
1164 case JCC_BE:
1165 gen_compute_eflags(s);
1166 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1167 .mask = CC_Z | CC_C };
1168 break;
1169 case JCC_S:
1170 cc = gen_prepare_eflags_s(s, reg);
1171 break;
1172 case JCC_P:
1173 cc = gen_prepare_eflags_p(s, reg);
1174 break;
1175 case JCC_L:
1176 gen_compute_eflags(s);
1177 if (TCGV_EQUAL(reg, cpu_cc_src)) {
1178 reg = cpu_tmp0;
1179 }
1180 tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
1181 tcg_gen_xor_tl(reg, reg, cpu_cc_src);
1182 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1183 .mask = CC_S };
1184 break;
1185 default:
1186 case JCC_LE:
1187 gen_compute_eflags(s);
1188 if (TCGV_EQUAL(reg, cpu_cc_src)) {
1189 reg = cpu_tmp0;
1190 }
1191 tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
1192 tcg_gen_xor_tl(reg, reg, cpu_cc_src);
1193 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1194 .mask = CC_S | CC_Z };
1195 break;
1196 }
c365395e 1197 break;
8e1c85e3 1198 }
276e6b5f
RH
1199
1200 if (inv) {
1201 cc.cond = tcg_invert_cond(cc.cond);
1202 }
1203 return cc;
8e1c85e3
FB
1204}
1205
cc8b6f5b
PB
1206static void gen_setcc1(DisasContext *s, int b, TCGv reg)
1207{
1208 CCPrepare cc = gen_prepare_cc(s, b, reg);
1209
1210 if (cc.no_setcond) {
1211 if (cc.cond == TCG_COND_EQ) {
1212 tcg_gen_xori_tl(reg, cc.reg, 1);
1213 } else {
1214 tcg_gen_mov_tl(reg, cc.reg);
1215 }
1216 return;
1217 }
1218
1219 if (cc.cond == TCG_COND_NE && !cc.use_reg2 && cc.imm == 0 &&
1220 cc.mask != 0 && (cc.mask & (cc.mask - 1)) == 0) {
1221 tcg_gen_shri_tl(reg, cc.reg, ctztl(cc.mask));
1222 tcg_gen_andi_tl(reg, reg, 1);
1223 return;
1224 }
1225 if (cc.mask != -1) {
1226 tcg_gen_andi_tl(reg, cc.reg, cc.mask);
1227 cc.reg = reg;
1228 }
1229 if (cc.use_reg2) {
1230 tcg_gen_setcond_tl(cc.cond, reg, cc.reg, cc.reg2);
1231 } else {
1232 tcg_gen_setcondi_tl(cc.cond, reg, cc.reg, cc.imm);
1233 }
1234}
1235
1236static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg)
1237{
1238 gen_setcc1(s, JCC_B << 1, reg);
1239}
276e6b5f 1240
8e1c85e3
FB
1241/* generate a conditional jump to label 'l1' according to jump opcode
1242 value 'b'. In the fast case, T0 is guaranted not to be used. */
dc259201
RH
1243static inline void gen_jcc1_noeob(DisasContext *s, int b, int l1)
1244{
1245 CCPrepare cc = gen_prepare_cc(s, b, cpu_T[0]);
1246
1247 if (cc.mask != -1) {
1248 tcg_gen_andi_tl(cpu_T[0], cc.reg, cc.mask);
1249 cc.reg = cpu_T[0];
1250 }
1251 if (cc.use_reg2) {
1252 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1253 } else {
1254 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1255 }
1256}
1257
1258/* Generate a conditional jump to label 'l1' according to jump opcode
1259 value 'b'. In the fast case, T0 is guaranted not to be used.
1260 A translation block must end soon. */
b27fc131 1261static inline void gen_jcc1(DisasContext *s, int b, int l1)
8e1c85e3 1262{
943131ca 1263 CCPrepare cc = gen_prepare_cc(s, b, cpu_T[0]);
8e1c85e3 1264
dc259201 1265 gen_update_cc_op(s);
943131ca
PB
1266 if (cc.mask != -1) {
1267 tcg_gen_andi_tl(cpu_T[0], cc.reg, cc.mask);
1268 cc.reg = cpu_T[0];
1269 }
dc259201 1270 set_cc_op(s, CC_OP_DYNAMIC);
943131ca
PB
1271 if (cc.use_reg2) {
1272 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1273 } else {
1274 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
8e1c85e3
FB
1275 }
1276}
1277
14ce26e7
FB
1278/* XXX: does not work with gdbstub "ice" single step - not a
1279 serious problem */
1280static int gen_jz_ecx_string(DisasContext *s, target_ulong next_eip)
2c0262af 1281{
14ce26e7
FB
1282 int l1, l2;
1283
1284 l1 = gen_new_label();
1285 l2 = gen_new_label();
6e0d8677 1286 gen_op_jnz_ecx(s->aflag, l1);
14ce26e7
FB
1287 gen_set_label(l2);
1288 gen_jmp_tb(s, next_eip, 1);
1289 gen_set_label(l1);
1290 return l2;
2c0262af
FB
1291}
1292
1293static inline void gen_stos(DisasContext *s, int ot)
1294{
57fec1fe 1295 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
2c0262af 1296 gen_string_movl_A0_EDI(s);
57fec1fe 1297 gen_op_st_T0_A0(ot + s->mem_index);
6e0d8677
FB
1298 gen_op_movl_T0_Dshift(ot);
1299 gen_op_add_reg_T0(s->aflag, R_EDI);
2c0262af
FB
1300}
1301
1302static inline void gen_lods(DisasContext *s, int ot)
1303{
1304 gen_string_movl_A0_ESI(s);
57fec1fe
FB
1305 gen_op_ld_T0_A0(ot + s->mem_index);
1306 gen_op_mov_reg_T0(ot, R_EAX);
6e0d8677
FB
1307 gen_op_movl_T0_Dshift(ot);
1308 gen_op_add_reg_T0(s->aflag, R_ESI);
2c0262af
FB
1309}
1310
1311static inline void gen_scas(DisasContext *s, int ot)
1312{
2c0262af 1313 gen_string_movl_A0_EDI(s);
57fec1fe 1314 gen_op_ld_T1_A0(ot + s->mem_index);
63633fe6 1315 gen_op(s, OP_CMPL, ot, R_EAX);
6e0d8677
FB
1316 gen_op_movl_T0_Dshift(ot);
1317 gen_op_add_reg_T0(s->aflag, R_EDI);
2c0262af
FB
1318}
1319
1320static inline void gen_cmps(DisasContext *s, int ot)
1321{
2c0262af 1322 gen_string_movl_A0_EDI(s);
57fec1fe 1323 gen_op_ld_T1_A0(ot + s->mem_index);
63633fe6
RH
1324 gen_string_movl_A0_ESI(s);
1325 gen_op(s, OP_CMPL, ot, OR_TMP0);
6e0d8677
FB
1326 gen_op_movl_T0_Dshift(ot);
1327 gen_op_add_reg_T0(s->aflag, R_ESI);
1328 gen_op_add_reg_T0(s->aflag, R_EDI);
2c0262af
FB
1329}
1330
1331static inline void gen_ins(DisasContext *s, int ot)
1332{
2e70f6ef
PB
1333 if (use_icount)
1334 gen_io_start();
2c0262af 1335 gen_string_movl_A0_EDI(s);
6e0d8677
FB
1336 /* Note: we must do this dummy write first to be restartable in
1337 case of page fault. */
9772c73b 1338 gen_op_movl_T0_0();
57fec1fe 1339 gen_op_st_T0_A0(ot + s->mem_index);
b8b6a50b 1340 gen_op_mov_TN_reg(OT_WORD, 1, R_EDX);
b6abf97d
FB
1341 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[1]);
1342 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
a7812ae4 1343 gen_helper_in_func(ot, cpu_T[0], cpu_tmp2_i32);
57fec1fe 1344 gen_op_st_T0_A0(ot + s->mem_index);
6e0d8677
FB
1345 gen_op_movl_T0_Dshift(ot);
1346 gen_op_add_reg_T0(s->aflag, R_EDI);
2e70f6ef
PB
1347 if (use_icount)
1348 gen_io_end();
2c0262af
FB
1349}
1350
1351static inline void gen_outs(DisasContext *s, int ot)
1352{
2e70f6ef
PB
1353 if (use_icount)
1354 gen_io_start();
2c0262af 1355 gen_string_movl_A0_ESI(s);
57fec1fe 1356 gen_op_ld_T0_A0(ot + s->mem_index);
b8b6a50b
FB
1357
1358 gen_op_mov_TN_reg(OT_WORD, 1, R_EDX);
b6abf97d
FB
1359 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[1]);
1360 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1361 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[0]);
a7812ae4 1362 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
b8b6a50b 1363
6e0d8677
FB
1364 gen_op_movl_T0_Dshift(ot);
1365 gen_op_add_reg_T0(s->aflag, R_ESI);
2e70f6ef
PB
1366 if (use_icount)
1367 gen_io_end();
2c0262af
FB
1368}
1369
1370/* same method as Valgrind : we generate jumps to current or next
1371 instruction */
1372#define GEN_REPZ(op) \
1373static inline void gen_repz_ ## op(DisasContext *s, int ot, \
14ce26e7 1374 target_ulong cur_eip, target_ulong next_eip) \
2c0262af 1375{ \
14ce26e7 1376 int l2;\
2c0262af 1377 gen_update_cc_op(s); \
14ce26e7 1378 l2 = gen_jz_ecx_string(s, next_eip); \
2c0262af 1379 gen_ ## op(s, ot); \
6e0d8677 1380 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
2c0262af
FB
1381 /* a loop would cause two single step exceptions if ECX = 1 \
1382 before rep string_insn */ \
1383 if (!s->jmp_opt) \
6e0d8677 1384 gen_op_jz_ecx(s->aflag, l2); \
2c0262af
FB
1385 gen_jmp(s, cur_eip); \
1386}
1387
1388#define GEN_REPZ2(op) \
1389static inline void gen_repz_ ## op(DisasContext *s, int ot, \
14ce26e7
FB
1390 target_ulong cur_eip, \
1391 target_ulong next_eip, \
2c0262af
FB
1392 int nz) \
1393{ \
14ce26e7 1394 int l2;\
2c0262af 1395 gen_update_cc_op(s); \
14ce26e7 1396 l2 = gen_jz_ecx_string(s, next_eip); \
2c0262af 1397 gen_ ## op(s, ot); \
6e0d8677 1398 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
773cdfcc 1399 gen_update_cc_op(s); \
b27fc131 1400 gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2); \
2c0262af 1401 if (!s->jmp_opt) \
6e0d8677 1402 gen_op_jz_ecx(s->aflag, l2); \
2c0262af
FB
1403 gen_jmp(s, cur_eip); \
1404}
1405
1406GEN_REPZ(movs)
1407GEN_REPZ(stos)
1408GEN_REPZ(lods)
1409GEN_REPZ(ins)
1410GEN_REPZ(outs)
1411GEN_REPZ2(scas)
1412GEN_REPZ2(cmps)
1413
a7812ae4
PB
1414static void gen_helper_fp_arith_ST0_FT0(int op)
1415{
1416 switch (op) {
d3eb5eae
BS
1417 case 0:
1418 gen_helper_fadd_ST0_FT0(cpu_env);
1419 break;
1420 case 1:
1421 gen_helper_fmul_ST0_FT0(cpu_env);
1422 break;
1423 case 2:
1424 gen_helper_fcom_ST0_FT0(cpu_env);
1425 break;
1426 case 3:
1427 gen_helper_fcom_ST0_FT0(cpu_env);
1428 break;
1429 case 4:
1430 gen_helper_fsub_ST0_FT0(cpu_env);
1431 break;
1432 case 5:
1433 gen_helper_fsubr_ST0_FT0(cpu_env);
1434 break;
1435 case 6:
1436 gen_helper_fdiv_ST0_FT0(cpu_env);
1437 break;
1438 case 7:
1439 gen_helper_fdivr_ST0_FT0(cpu_env);
1440 break;
a7812ae4
PB
1441 }
1442}
2c0262af
FB
1443
1444/* NOTE the exception in "r" op ordering */
a7812ae4
PB
1445static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
1446{
1447 TCGv_i32 tmp = tcg_const_i32(opreg);
1448 switch (op) {
d3eb5eae
BS
1449 case 0:
1450 gen_helper_fadd_STN_ST0(cpu_env, tmp);
1451 break;
1452 case 1:
1453 gen_helper_fmul_STN_ST0(cpu_env, tmp);
1454 break;
1455 case 4:
1456 gen_helper_fsubr_STN_ST0(cpu_env, tmp);
1457 break;
1458 case 5:
1459 gen_helper_fsub_STN_ST0(cpu_env, tmp);
1460 break;
1461 case 6:
1462 gen_helper_fdivr_STN_ST0(cpu_env, tmp);
1463 break;
1464 case 7:
1465 gen_helper_fdiv_STN_ST0(cpu_env, tmp);
1466 break;
a7812ae4
PB
1467 }
1468}
2c0262af
FB
1469
1470/* if d == OR_TMP0, it means memory operand (address in A0) */
1471static void gen_op(DisasContext *s1, int op, int ot, int d)
1472{
2c0262af 1473 if (d != OR_TMP0) {
57fec1fe 1474 gen_op_mov_TN_reg(ot, 0, d);
2c0262af 1475 } else {
57fec1fe 1476 gen_op_ld_T0_A0(ot + s1->mem_index);
2c0262af
FB
1477 }
1478 switch(op) {
1479 case OP_ADCL:
cc8b6f5b 1480 gen_compute_eflags_c(s1, cpu_tmp4);
cad3a37d
FB
1481 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1482 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1483 if (d != OR_TMP0)
1484 gen_op_mov_reg_T0(ot, d);
1485 else
1486 gen_op_st_T0_A0(ot + s1->mem_index);
988c3eb0
RH
1487 gen_op_update3_cc(cpu_tmp4);
1488 set_cc_op(s1, CC_OP_ADCB + ot);
cad3a37d 1489 break;
2c0262af 1490 case OP_SBBL:
cc8b6f5b 1491 gen_compute_eflags_c(s1, cpu_tmp4);
cad3a37d
FB
1492 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1493 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1494 if (d != OR_TMP0)
57fec1fe 1495 gen_op_mov_reg_T0(ot, d);
cad3a37d
FB
1496 else
1497 gen_op_st_T0_A0(ot + s1->mem_index);
988c3eb0
RH
1498 gen_op_update3_cc(cpu_tmp4);
1499 set_cc_op(s1, CC_OP_SBBB + ot);
cad3a37d 1500 break;
2c0262af
FB
1501 case OP_ADDL:
1502 gen_op_addl_T0_T1();
cad3a37d
FB
1503 if (d != OR_TMP0)
1504 gen_op_mov_reg_T0(ot, d);
1505 else
1506 gen_op_st_T0_A0(ot + s1->mem_index);
1507 gen_op_update2_cc();
3ca51d07 1508 set_cc_op(s1, CC_OP_ADDB + ot);
2c0262af
FB
1509 break;
1510 case OP_SUBL:
a3251186 1511 tcg_gen_mov_tl(cpu_cc_srcT, cpu_T[0]);
57fec1fe 1512 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
cad3a37d
FB
1513 if (d != OR_TMP0)
1514 gen_op_mov_reg_T0(ot, d);
1515 else
1516 gen_op_st_T0_A0(ot + s1->mem_index);
1517 gen_op_update2_cc();
3ca51d07 1518 set_cc_op(s1, CC_OP_SUBB + ot);
2c0262af
FB
1519 break;
1520 default:
1521 case OP_ANDL:
57fec1fe 1522 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
cad3a37d
FB
1523 if (d != OR_TMP0)
1524 gen_op_mov_reg_T0(ot, d);
1525 else
1526 gen_op_st_T0_A0(ot + s1->mem_index);
1527 gen_op_update1_cc();
3ca51d07 1528 set_cc_op(s1, CC_OP_LOGICB + ot);
57fec1fe 1529 break;
2c0262af 1530 case OP_ORL:
57fec1fe 1531 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
cad3a37d
FB
1532 if (d != OR_TMP0)
1533 gen_op_mov_reg_T0(ot, d);
1534 else
1535 gen_op_st_T0_A0(ot + s1->mem_index);
1536 gen_op_update1_cc();
3ca51d07 1537 set_cc_op(s1, CC_OP_LOGICB + ot);
57fec1fe 1538 break;
2c0262af 1539 case OP_XORL:
57fec1fe 1540 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
cad3a37d
FB
1541 if (d != OR_TMP0)
1542 gen_op_mov_reg_T0(ot, d);
1543 else
1544 gen_op_st_T0_A0(ot + s1->mem_index);
1545 gen_op_update1_cc();
3ca51d07 1546 set_cc_op(s1, CC_OP_LOGICB + ot);
2c0262af
FB
1547 break;
1548 case OP_CMPL:
63633fe6 1549 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
a3251186 1550 tcg_gen_mov_tl(cpu_cc_srcT, cpu_T[0]);
63633fe6 1551 tcg_gen_sub_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
3ca51d07 1552 set_cc_op(s1, CC_OP_SUBB + ot);
2c0262af
FB
1553 break;
1554 }
b6abf97d
FB
1555}
1556
2c0262af
FB
1557/* if d == OR_TMP0, it means memory operand (address in A0) */
1558static void gen_inc(DisasContext *s1, int ot, int d, int c)
1559{
1560 if (d != OR_TMP0)
57fec1fe 1561 gen_op_mov_TN_reg(ot, 0, d);
2c0262af 1562 else
57fec1fe 1563 gen_op_ld_T0_A0(ot + s1->mem_index);
cc8b6f5b 1564 gen_compute_eflags_c(s1, cpu_cc_src);
2c0262af 1565 if (c > 0) {
b6abf97d 1566 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], 1);
3ca51d07 1567 set_cc_op(s1, CC_OP_INCB + ot);
2c0262af 1568 } else {
b6abf97d 1569 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], -1);
3ca51d07 1570 set_cc_op(s1, CC_OP_DECB + ot);
2c0262af
FB
1571 }
1572 if (d != OR_TMP0)
57fec1fe 1573 gen_op_mov_reg_T0(ot, d);
2c0262af 1574 else
57fec1fe 1575 gen_op_st_T0_A0(ot + s1->mem_index);
cd31fefa 1576 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
2c0262af
FB
1577}
1578
f437d0a3
RH
1579static void gen_shift_flags(DisasContext *s, int ot, TCGv result, TCGv shm1,
1580 TCGv count, bool is_right)
1581{
1582 TCGv_i32 z32, s32, oldop;
1583 TCGv z_tl;
1584
1585 /* Store the results into the CC variables. If we know that the
1586 variable must be dead, store unconditionally. Otherwise we'll
1587 need to not disrupt the current contents. */
1588 z_tl = tcg_const_tl(0);
1589 if (cc_op_live[s->cc_op] & USES_CC_DST) {
1590 tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_dst, count, z_tl,
1591 result, cpu_cc_dst);
1592 } else {
1593 tcg_gen_mov_tl(cpu_cc_dst, result);
1594 }
1595 if (cc_op_live[s->cc_op] & USES_CC_SRC) {
1596 tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_src, count, z_tl,
1597 shm1, cpu_cc_src);
1598 } else {
1599 tcg_gen_mov_tl(cpu_cc_src, shm1);
1600 }
1601 tcg_temp_free(z_tl);
1602
1603 /* Get the two potential CC_OP values into temporaries. */
1604 tcg_gen_movi_i32(cpu_tmp2_i32, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
1605 if (s->cc_op == CC_OP_DYNAMIC) {
1606 oldop = cpu_cc_op;
1607 } else {
1608 tcg_gen_movi_i32(cpu_tmp3_i32, s->cc_op);
1609 oldop = cpu_tmp3_i32;
1610 }
1611
1612 /* Conditionally store the CC_OP value. */
1613 z32 = tcg_const_i32(0);
1614 s32 = tcg_temp_new_i32();
1615 tcg_gen_trunc_tl_i32(s32, count);
1616 tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, s32, z32, cpu_tmp2_i32, oldop);
1617 tcg_temp_free_i32(z32);
1618 tcg_temp_free_i32(s32);
1619
1620 /* The CC_OP value is no longer predictable. */
1621 set_cc_op(s, CC_OP_DYNAMIC);
1622}
1623
b6abf97d
FB
1624static void gen_shift_rm_T1(DisasContext *s, int ot, int op1,
1625 int is_right, int is_arith)
2c0262af 1626{
a41f62f5 1627 target_ulong mask = (ot == OT_QUAD ? 0x3f : 0x1f);
3b46e624 1628
b6abf97d 1629 /* load */
82786041 1630 if (op1 == OR_TMP0) {
b6abf97d 1631 gen_op_ld_T0_A0(ot + s->mem_index);
82786041 1632 } else {
b6abf97d 1633 gen_op_mov_TN_reg(ot, 0, op1);
82786041 1634 }
b6abf97d 1635
a41f62f5
RH
1636 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], mask);
1637 tcg_gen_subi_tl(cpu_tmp0, cpu_T[1], 1);
b6abf97d
FB
1638
1639 if (is_right) {
1640 if (is_arith) {
f484d386 1641 gen_exts(ot, cpu_T[0]);
a41f62f5
RH
1642 tcg_gen_sar_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
1643 tcg_gen_sar_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
b6abf97d 1644 } else {
cad3a37d 1645 gen_extu(ot, cpu_T[0]);
a41f62f5
RH
1646 tcg_gen_shr_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
1647 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
b6abf97d
FB
1648 }
1649 } else {
a41f62f5
RH
1650 tcg_gen_shl_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
1651 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
b6abf97d
FB
1652 }
1653
1654 /* store */
82786041 1655 if (op1 == OR_TMP0) {
b6abf97d 1656 gen_op_st_T0_A0(ot + s->mem_index);
82786041 1657 } else {
b6abf97d 1658 gen_op_mov_reg_T0(ot, op1);
82786041
RH
1659 }
1660
f437d0a3 1661 gen_shift_flags(s, ot, cpu_T[0], cpu_tmp0, cpu_T[1], is_right);
b6abf97d
FB
1662}
1663
c1c37968
FB
1664static void gen_shift_rm_im(DisasContext *s, int ot, int op1, int op2,
1665 int is_right, int is_arith)
1666{
a41f62f5 1667 int mask = (ot == OT_QUAD ? 0x3f : 0x1f);
c1c37968
FB
1668
1669 /* load */
1670 if (op1 == OR_TMP0)
1671 gen_op_ld_T0_A0(ot + s->mem_index);
1672 else
1673 gen_op_mov_TN_reg(ot, 0, op1);
1674
1675 op2 &= mask;
1676 if (op2 != 0) {
1677 if (is_right) {
1678 if (is_arith) {
1679 gen_exts(ot, cpu_T[0]);
2a449d14 1680 tcg_gen_sari_tl(cpu_tmp4, cpu_T[0], op2 - 1);
c1c37968
FB
1681 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], op2);
1682 } else {
1683 gen_extu(ot, cpu_T[0]);
2a449d14 1684 tcg_gen_shri_tl(cpu_tmp4, cpu_T[0], op2 - 1);
c1c37968
FB
1685 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], op2);
1686 }
1687 } else {
2a449d14 1688 tcg_gen_shli_tl(cpu_tmp4, cpu_T[0], op2 - 1);
c1c37968
FB
1689 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], op2);
1690 }
1691 }
1692
1693 /* store */
1694 if (op1 == OR_TMP0)
1695 gen_op_st_T0_A0(ot + s->mem_index);
1696 else
1697 gen_op_mov_reg_T0(ot, op1);
1698
1699 /* update eflags if non zero shift */
1700 if (op2 != 0) {
2a449d14 1701 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
c1c37968 1702 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
3ca51d07 1703 set_cc_op(s, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
c1c37968
FB
1704 }
1705}
1706
b6abf97d
FB
1707static inline void tcg_gen_lshift(TCGv ret, TCGv arg1, target_long arg2)
1708{
1709 if (arg2 >= 0)
1710 tcg_gen_shli_tl(ret, arg1, arg2);
1711 else
1712 tcg_gen_shri_tl(ret, arg1, -arg2);
1713}
1714
34d80a55 1715static void gen_rot_rm_T1(DisasContext *s, int ot, int op1, int is_right)
b6abf97d 1716{
34d80a55
RH
1717 target_ulong mask = (ot == OT_QUAD ? 0x3f : 0x1f);
1718 TCGv_i32 t0, t1;
b6abf97d
FB
1719
1720 /* load */
1e4840bf 1721 if (op1 == OR_TMP0) {
34d80a55 1722 gen_op_ld_T0_A0(ot + s->mem_index);
1e4840bf 1723 } else {
34d80a55 1724 gen_op_mov_TN_reg(ot, 0, op1);
1e4840bf 1725 }
b6abf97d 1726
34d80a55 1727 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], mask);
b6abf97d 1728
34d80a55
RH
1729 switch (ot) {
1730 case OT_BYTE:
1731 /* Replicate the 8-bit input so that a 32-bit rotate works. */
1732 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
1733 tcg_gen_muli_tl(cpu_T[0], cpu_T[0], 0x01010101);
1734 goto do_long;
1735 case OT_WORD:
1736 /* Replicate the 16-bit input so that a 32-bit rotate works. */
1737 tcg_gen_deposit_tl(cpu_T[0], cpu_T[0], cpu_T[0], 16, 16);
1738 goto do_long;
1739 do_long:
1740#ifdef TARGET_X86_64
1741 case OT_LONG:
1742 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
1743 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
1744 if (is_right) {
1745 tcg_gen_rotr_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
1746 } else {
1747 tcg_gen_rotl_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
1748 }
1749 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
1750 break;
1751#endif
1752 default:
1753 if (is_right) {
1754 tcg_gen_rotr_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1755 } else {
1756 tcg_gen_rotl_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1757 }
1758 break;
b6abf97d 1759 }
b6abf97d 1760
b6abf97d 1761 /* store */
1e4840bf 1762 if (op1 == OR_TMP0) {
34d80a55 1763 gen_op_st_T0_A0(ot + s->mem_index);
1e4840bf 1764 } else {
34d80a55 1765 gen_op_mov_reg_T0(ot, op1);
1e4840bf 1766 }
b6abf97d 1767
34d80a55
RH
1768 /* We'll need the flags computed into CC_SRC. */
1769 gen_compute_eflags(s);
b6abf97d 1770
34d80a55
RH
1771 /* The value that was "rotated out" is now present at the other end
1772 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1773 since we've computed the flags into CC_SRC, these variables are
1774 currently dead. */
b6abf97d 1775 if (is_right) {
34d80a55
RH
1776 tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask - 1);
1777 tcg_gen_shri_tl(cpu_cc_dst, cpu_T[0], mask);
089305ac 1778 tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
34d80a55
RH
1779 } else {
1780 tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask);
1781 tcg_gen_andi_tl(cpu_cc_dst, cpu_T[0], 1);
b6abf97d 1782 }
34d80a55
RH
1783 tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1784 tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1785
1786 /* Now conditionally store the new CC_OP value. If the shift count
1787 is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live.
1788 Otherwise reuse CC_OP_ADCOX which have the C and O flags split out
1789 exactly as we computed above. */
1790 t0 = tcg_const_i32(0);
1791 t1 = tcg_temp_new_i32();
1792 tcg_gen_trunc_tl_i32(t1, cpu_T[1]);
1793 tcg_gen_movi_i32(cpu_tmp2_i32, CC_OP_ADCOX);
1794 tcg_gen_movi_i32(cpu_tmp3_i32, CC_OP_EFLAGS);
1795 tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, t1, t0,
1796 cpu_tmp2_i32, cpu_tmp3_i32);
1797 tcg_temp_free_i32(t0);
1798 tcg_temp_free_i32(t1);
1799
1800 /* The CC_OP value is no longer predictable. */
1801 set_cc_op(s, CC_OP_DYNAMIC);
b6abf97d
FB
1802}
1803
8cd6345d 1804static void gen_rot_rm_im(DisasContext *s, int ot, int op1, int op2,
1805 int is_right)
1806{
34d80a55
RH
1807 int mask = (ot == OT_QUAD ? 0x3f : 0x1f);
1808 int shift;
8cd6345d 1809
1810 /* load */
1811 if (op1 == OR_TMP0) {
34d80a55 1812 gen_op_ld_T0_A0(ot + s->mem_index);
8cd6345d 1813 } else {
34d80a55 1814 gen_op_mov_TN_reg(ot, 0, op1);
8cd6345d 1815 }
1816
8cd6345d 1817 op2 &= mask;
8cd6345d 1818 if (op2 != 0) {
34d80a55
RH
1819 switch (ot) {
1820#ifdef TARGET_X86_64
1821 case OT_LONG:
1822 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
1823 if (is_right) {
1824 tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, op2);
1825 } else {
1826 tcg_gen_rotli_i32(cpu_tmp2_i32, cpu_tmp2_i32, op2);
1827 }
1828 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
1829 break;
1830#endif
1831 default:
1832 if (is_right) {
1833 tcg_gen_rotri_tl(cpu_T[0], cpu_T[0], op2);
1834 } else {
1835 tcg_gen_rotli_tl(cpu_T[0], cpu_T[0], op2);
1836 }
1837 break;
1838 case OT_BYTE:
1839 mask = 7;
1840 goto do_shifts;
1841 case OT_WORD:
1842 mask = 15;
1843 do_shifts:
1844 shift = op2 & mask;
1845 if (is_right) {
1846 shift = mask + 1 - shift;
1847 }
1848 gen_extu(ot, cpu_T[0]);
1849 tcg_gen_shli_tl(cpu_tmp0, cpu_T[0], shift);
1850 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], mask + 1 - shift);
1851 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
1852 break;
8cd6345d 1853 }
8cd6345d 1854 }
1855
1856 /* store */
1857 if (op1 == OR_TMP0) {
34d80a55 1858 gen_op_st_T0_A0(ot + s->mem_index);
8cd6345d 1859 } else {
34d80a55 1860 gen_op_mov_reg_T0(ot, op1);
8cd6345d 1861 }
1862
1863 if (op2 != 0) {
34d80a55 1864 /* Compute the flags into CC_SRC. */
d229edce 1865 gen_compute_eflags(s);
0ff6addd 1866
34d80a55
RH
1867 /* The value that was "rotated out" is now present at the other end
1868 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1869 since we've computed the flags into CC_SRC, these variables are
1870 currently dead. */
8cd6345d 1871 if (is_right) {
34d80a55
RH
1872 tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask - 1);
1873 tcg_gen_shri_tl(cpu_cc_dst, cpu_T[0], mask);
38ebb396 1874 tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
34d80a55
RH
1875 } else {
1876 tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask);
1877 tcg_gen_andi_tl(cpu_cc_dst, cpu_T[0], 1);
8cd6345d 1878 }
34d80a55
RH
1879 tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1880 tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1881 set_cc_op(s, CC_OP_ADCOX);
8cd6345d 1882 }
8cd6345d 1883}
1884
b6abf97d
FB
1885/* XXX: add faster immediate = 1 case */
1886static void gen_rotc_rm_T1(DisasContext *s, int ot, int op1,
1887 int is_right)
1888{
d229edce 1889 gen_compute_eflags(s);
c7b3c873 1890 assert(s->cc_op == CC_OP_EFLAGS);
b6abf97d
FB
1891
1892 /* load */
1893 if (op1 == OR_TMP0)
1894 gen_op_ld_T0_A0(ot + s->mem_index);
1895 else
1896 gen_op_mov_TN_reg(ot, 0, op1);
1897
a7812ae4
PB
1898 if (is_right) {
1899 switch (ot) {
93ab25d7 1900 case OT_BYTE:
7923057b
BS
1901 gen_helper_rcrb(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1902 break;
93ab25d7 1903 case OT_WORD:
7923057b
BS
1904 gen_helper_rcrw(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1905 break;
93ab25d7 1906 case OT_LONG:
7923057b
BS
1907 gen_helper_rcrl(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1908 break;
a7812ae4 1909#ifdef TARGET_X86_64
93ab25d7 1910 case OT_QUAD:
7923057b
BS
1911 gen_helper_rcrq(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1912 break;
a7812ae4
PB
1913#endif
1914 }
1915 } else {
1916 switch (ot) {
93ab25d7 1917 case OT_BYTE:
7923057b
BS
1918 gen_helper_rclb(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1919 break;
93ab25d7 1920 case OT_WORD:
7923057b
BS
1921 gen_helper_rclw(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1922 break;
93ab25d7 1923 case OT_LONG:
7923057b
BS
1924 gen_helper_rcll(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1925 break;
a7812ae4 1926#ifdef TARGET_X86_64
93ab25d7 1927 case OT_QUAD:
7923057b
BS
1928 gen_helper_rclq(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1929 break;
a7812ae4
PB
1930#endif
1931 }
1932 }
b6abf97d
FB
1933 /* store */
1934 if (op1 == OR_TMP0)
1935 gen_op_st_T0_A0(ot + s->mem_index);
1936 else
1937 gen_op_mov_reg_T0(ot, op1);
b6abf97d
FB
1938}
1939
1940/* XXX: add faster immediate case */
3b9d3cf1 1941static void gen_shiftd_rm_T1(DisasContext *s, int ot, int op1,
f437d0a3 1942 bool is_right, TCGv count_in)
b6abf97d 1943{
f437d0a3
RH
1944 target_ulong mask = (ot == OT_QUAD ? 63 : 31);
1945 TCGv count;
b6abf97d
FB
1946
1947 /* load */
1e4840bf 1948 if (op1 == OR_TMP0) {
f437d0a3 1949 gen_op_ld_T0_A0(ot + s->mem_index);
1e4840bf 1950 } else {
f437d0a3 1951 gen_op_mov_TN_reg(ot, 0, op1);
1e4840bf 1952 }
b6abf97d 1953
f437d0a3
RH
1954 count = tcg_temp_new();
1955 tcg_gen_andi_tl(count, count_in, mask);
1e4840bf 1956
f437d0a3
RH
1957 switch (ot) {
1958 case OT_WORD:
1959 /* Note: we implement the Intel behaviour for shift count > 16.
1960 This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A
1961 portion by constructing it as a 32-bit value. */
b6abf97d 1962 if (is_right) {
f437d0a3
RH
1963 tcg_gen_deposit_tl(cpu_tmp0, cpu_T[0], cpu_T[1], 16, 16);
1964 tcg_gen_mov_tl(cpu_T[1], cpu_T[0]);
1965 tcg_gen_mov_tl(cpu_T[0], cpu_tmp0);
b6abf97d 1966 } else {
f437d0a3 1967 tcg_gen_deposit_tl(cpu_T[1], cpu_T[0], cpu_T[1], 16, 16);
b6abf97d 1968 }
f437d0a3
RH
1969 /* FALLTHRU */
1970#ifdef TARGET_X86_64
1971 case OT_LONG:
1972 /* Concatenate the two 32-bit values and use a 64-bit shift. */
1973 tcg_gen_subi_tl(cpu_tmp0, count, 1);
b6abf97d 1974 if (is_right) {
f437d0a3
RH
1975 tcg_gen_concat_tl_i64(cpu_T[0], cpu_T[0], cpu_T[1]);
1976 tcg_gen_shr_i64(cpu_tmp0, cpu_T[0], cpu_tmp0);
1977 tcg_gen_shr_i64(cpu_T[0], cpu_T[0], count);
1978 } else {
1979 tcg_gen_concat_tl_i64(cpu_T[0], cpu_T[1], cpu_T[0]);
1980 tcg_gen_shl_i64(cpu_tmp0, cpu_T[0], cpu_tmp0);
1981 tcg_gen_shl_i64(cpu_T[0], cpu_T[0], count);
1982 tcg_gen_shri_i64(cpu_tmp0, cpu_tmp0, 32);
1983 tcg_gen_shri_i64(cpu_T[0], cpu_T[0], 32);
1984 }
1985 break;
1986#endif
1987 default:
1988 tcg_gen_subi_tl(cpu_tmp0, count, 1);
1989 if (is_right) {
1990 tcg_gen_shr_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
b6abf97d 1991
f437d0a3
RH
1992 tcg_gen_subfi_tl(cpu_tmp4, mask + 1, count);
1993 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], count);
1994 tcg_gen_shl_tl(cpu_T[1], cpu_T[1], cpu_tmp4);
b6abf97d 1995 } else {
f437d0a3
RH
1996 tcg_gen_shl_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
1997 if (ot == OT_WORD) {
1998 /* Only needed if count > 16, for Intel behaviour. */
1999 tcg_gen_subfi_tl(cpu_tmp4, 33, count);
2000 tcg_gen_shr_tl(cpu_tmp4, cpu_T[1], cpu_tmp4);
2001 tcg_gen_or_tl(cpu_tmp0, cpu_tmp0, cpu_tmp4);
2002 }
2003
2004 tcg_gen_subfi_tl(cpu_tmp4, mask + 1, count);
2005 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], count);
2006 tcg_gen_shr_tl(cpu_T[1], cpu_T[1], cpu_tmp4);
b6abf97d 2007 }
f437d0a3
RH
2008 tcg_gen_movi_tl(cpu_tmp4, 0);
2009 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_T[1], count, cpu_tmp4,
2010 cpu_tmp4, cpu_T[1]);
2011 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
2012 break;
b6abf97d 2013 }
b6abf97d 2014
b6abf97d 2015 /* store */
1e4840bf 2016 if (op1 == OR_TMP0) {
f437d0a3 2017 gen_op_st_T0_A0(ot + s->mem_index);
b6abf97d 2018 } else {
f437d0a3 2019 gen_op_mov_reg_T0(ot, op1);
b6abf97d 2020 }
1e4840bf 2021
f437d0a3
RH
2022 gen_shift_flags(s, ot, cpu_T[0], cpu_tmp0, count, is_right);
2023 tcg_temp_free(count);
b6abf97d
FB
2024}
2025
2026static void gen_shift(DisasContext *s1, int op, int ot, int d, int s)
2027{
2028 if (s != OR_TMP1)
2029 gen_op_mov_TN_reg(ot, 1, s);
2030 switch(op) {
2031 case OP_ROL:
2032 gen_rot_rm_T1(s1, ot, d, 0);
2033 break;
2034 case OP_ROR:
2035 gen_rot_rm_T1(s1, ot, d, 1);
2036 break;
2037 case OP_SHL:
2038 case OP_SHL1:
2039 gen_shift_rm_T1(s1, ot, d, 0, 0);
2040 break;
2041 case OP_SHR:
2042 gen_shift_rm_T1(s1, ot, d, 1, 0);
2043 break;
2044 case OP_SAR:
2045 gen_shift_rm_T1(s1, ot, d, 1, 1);
2046 break;
2047 case OP_RCL:
2048 gen_rotc_rm_T1(s1, ot, d, 0);
2049 break;
2050 case OP_RCR:
2051 gen_rotc_rm_T1(s1, ot, d, 1);
2052 break;
2053 }
2c0262af
FB
2054}
2055
2056static void gen_shifti(DisasContext *s1, int op, int ot, int d, int c)
2057{
c1c37968 2058 switch(op) {
8cd6345d 2059 case OP_ROL:
2060 gen_rot_rm_im(s1, ot, d, c, 0);
2061 break;
2062 case OP_ROR:
2063 gen_rot_rm_im(s1, ot, d, c, 1);
2064 break;
c1c37968
FB
2065 case OP_SHL:
2066 case OP_SHL1:
2067 gen_shift_rm_im(s1, ot, d, c, 0, 0);
2068 break;
2069 case OP_SHR:
2070 gen_shift_rm_im(s1, ot, d, c, 1, 0);
2071 break;
2072 case OP_SAR:
2073 gen_shift_rm_im(s1, ot, d, c, 1, 1);
2074 break;
2075 default:
2076 /* currently not optimized */
2077 gen_op_movl_T1_im(c);
2078 gen_shift(s1, op, ot, d, OR_TMP1);
2079 break;
2080 }
2c0262af
FB
2081}
2082
0af10c86
BS
2083static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm,
2084 int *reg_ptr, int *offset_ptr)
2c0262af 2085{
14ce26e7 2086 target_long disp;
2c0262af 2087 int havesib;
14ce26e7 2088 int base;
2c0262af
FB
2089 int index;
2090 int scale;
2091 int opreg;
2092 int mod, rm, code, override, must_add_seg;
2093
2094 override = s->override;
2095 must_add_seg = s->addseg;
2096 if (override >= 0)
2097 must_add_seg = 1;
2098 mod = (modrm >> 6) & 3;
2099 rm = modrm & 7;
2100
2101 if (s->aflag) {
2102
2103 havesib = 0;
2104 base = rm;
2105 index = 0;
2106 scale = 0;
3b46e624 2107
2c0262af
FB
2108 if (base == 4) {
2109 havesib = 1;
0af10c86 2110 code = cpu_ldub_code(env, s->pc++);
2c0262af 2111 scale = (code >> 6) & 3;
14ce26e7
FB
2112 index = ((code >> 3) & 7) | REX_X(s);
2113 base = (code & 7);
2c0262af 2114 }
14ce26e7 2115 base |= REX_B(s);
2c0262af
FB
2116
2117 switch (mod) {
2118 case 0:
14ce26e7 2119 if ((base & 7) == 5) {
2c0262af 2120 base = -1;
0af10c86 2121 disp = (int32_t)cpu_ldl_code(env, s->pc);
2c0262af 2122 s->pc += 4;
14ce26e7
FB
2123 if (CODE64(s) && !havesib) {
2124 disp += s->pc + s->rip_offset;
2125 }
2c0262af
FB
2126 } else {
2127 disp = 0;
2128 }
2129 break;
2130 case 1:
0af10c86 2131 disp = (int8_t)cpu_ldub_code(env, s->pc++);
2c0262af
FB
2132 break;
2133 default:
2134 case 2:
0af10c86 2135 disp = (int32_t)cpu_ldl_code(env, s->pc);
2c0262af
FB
2136 s->pc += 4;
2137 break;
2138 }
3b46e624 2139
2c0262af
FB
2140 if (base >= 0) {
2141 /* for correct popl handling with esp */
2142 if (base == 4 && s->popl_esp_hack)
2143 disp += s->popl_esp_hack;
14ce26e7
FB
2144#ifdef TARGET_X86_64
2145 if (s->aflag == 2) {
57fec1fe 2146 gen_op_movq_A0_reg(base);
14ce26e7 2147 if (disp != 0) {
57fec1fe 2148 gen_op_addq_A0_im(disp);
14ce26e7 2149 }
5fafdf24 2150 } else
14ce26e7
FB
2151#endif
2152 {
57fec1fe 2153 gen_op_movl_A0_reg(base);
14ce26e7
FB
2154 if (disp != 0)
2155 gen_op_addl_A0_im(disp);
2156 }
2c0262af 2157 } else {
14ce26e7
FB
2158#ifdef TARGET_X86_64
2159 if (s->aflag == 2) {
57fec1fe 2160 gen_op_movq_A0_im(disp);
5fafdf24 2161 } else
14ce26e7
FB
2162#endif
2163 {
2164 gen_op_movl_A0_im(disp);
2165 }
2c0262af 2166 }
b16f827b
AJ
2167 /* index == 4 means no index */
2168 if (havesib && (index != 4)) {
14ce26e7
FB
2169#ifdef TARGET_X86_64
2170 if (s->aflag == 2) {
57fec1fe 2171 gen_op_addq_A0_reg_sN(scale, index);
5fafdf24 2172 } else
14ce26e7
FB
2173#endif
2174 {
57fec1fe 2175 gen_op_addl_A0_reg_sN(scale, index);
14ce26e7 2176 }
2c0262af
FB
2177 }
2178 if (must_add_seg) {
2179 if (override < 0) {
2180 if (base == R_EBP || base == R_ESP)
2181 override = R_SS;
2182 else
2183 override = R_DS;
2184 }
14ce26e7
FB
2185#ifdef TARGET_X86_64
2186 if (s->aflag == 2) {
57fec1fe 2187 gen_op_addq_A0_seg(override);
5fafdf24 2188 } else
14ce26e7
FB
2189#endif
2190 {
7162ab21 2191 gen_op_addl_A0_seg(s, override);
14ce26e7 2192 }
2c0262af
FB
2193 }
2194 } else {
2195 switch (mod) {
2196 case 0:
2197 if (rm == 6) {
0af10c86 2198 disp = cpu_lduw_code(env, s->pc);
2c0262af
FB
2199 s->pc += 2;
2200 gen_op_movl_A0_im(disp);
2201 rm = 0; /* avoid SS override */
2202 goto no_rm;
2203 } else {
2204 disp = 0;
2205 }
2206 break;
2207 case 1:
0af10c86 2208 disp = (int8_t)cpu_ldub_code(env, s->pc++);
2c0262af
FB
2209 break;
2210 default:
2211 case 2:
0af10c86 2212 disp = cpu_lduw_code(env, s->pc);
2c0262af
FB
2213 s->pc += 2;
2214 break;
2215 }
2216 switch(rm) {
2217 case 0:
57fec1fe
FB
2218 gen_op_movl_A0_reg(R_EBX);
2219 gen_op_addl_A0_reg_sN(0, R_ESI);
2c0262af
FB
2220 break;
2221 case 1:
57fec1fe
FB
2222 gen_op_movl_A0_reg(R_EBX);
2223 gen_op_addl_A0_reg_sN(0, R_EDI);
2c0262af
FB
2224 break;
2225 case 2:
57fec1fe
FB
2226 gen_op_movl_A0_reg(R_EBP);
2227 gen_op_addl_A0_reg_sN(0, R_ESI);
2c0262af
FB
2228 break;
2229 case 3:
57fec1fe
FB
2230 gen_op_movl_A0_reg(R_EBP);
2231 gen_op_addl_A0_reg_sN(0, R_EDI);
2c0262af
FB
2232 break;
2233 case 4:
57fec1fe 2234 gen_op_movl_A0_reg(R_ESI);
2c0262af
FB
2235 break;
2236 case 5:
57fec1fe 2237 gen_op_movl_A0_reg(R_EDI);
2c0262af
FB
2238 break;
2239 case 6:
57fec1fe 2240 gen_op_movl_A0_reg(R_EBP);
2c0262af
FB
2241 break;
2242 default:
2243 case 7:
57fec1fe 2244 gen_op_movl_A0_reg(R_EBX);
2c0262af
FB
2245 break;
2246 }
2247 if (disp != 0)
2248 gen_op_addl_A0_im(disp);
2249 gen_op_andl_A0_ffff();
2250 no_rm:
2251 if (must_add_seg) {
2252 if (override < 0) {
2253 if (rm == 2 || rm == 3 || rm == 6)
2254 override = R_SS;
2255 else
2256 override = R_DS;
2257 }
7162ab21 2258 gen_op_addl_A0_seg(s, override);
2c0262af
FB
2259 }
2260 }
2261
2262 opreg = OR_A0;
2263 disp = 0;
2264 *reg_ptr = opreg;
2265 *offset_ptr = disp;
2266}
2267
0af10c86 2268static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm)
e17a36ce
FB
2269{
2270 int mod, rm, base, code;
2271
2272 mod = (modrm >> 6) & 3;
2273 if (mod == 3)
2274 return;
2275 rm = modrm & 7;
2276
2277 if (s->aflag) {
2278
2279 base = rm;
3b46e624 2280
e17a36ce 2281 if (base == 4) {
0af10c86 2282 code = cpu_ldub_code(env, s->pc++);
e17a36ce
FB
2283 base = (code & 7);
2284 }
3b46e624 2285
e17a36ce
FB
2286 switch (mod) {
2287 case 0:
2288 if (base == 5) {
2289 s->pc += 4;
2290 }
2291 break;
2292 case 1:
2293 s->pc++;
2294 break;
2295 default:
2296 case 2:
2297 s->pc += 4;
2298 break;
2299 }
2300 } else {
2301 switch (mod) {
2302 case 0:
2303 if (rm == 6) {
2304 s->pc += 2;
2305 }
2306 break;
2307 case 1:
2308 s->pc++;
2309 break;
2310 default:
2311 case 2:
2312 s->pc += 2;
2313 break;
2314 }
2315 }
2316}
2317
664e0f19
FB
2318/* used for LEA and MOV AX, mem */
2319static void gen_add_A0_ds_seg(DisasContext *s)
2320{
2321 int override, must_add_seg;
2322 must_add_seg = s->addseg;
2323 override = R_DS;
2324 if (s->override >= 0) {
2325 override = s->override;
2326 must_add_seg = 1;
664e0f19
FB
2327 }
2328 if (must_add_seg) {
8f091a59
FB
2329#ifdef TARGET_X86_64
2330 if (CODE64(s)) {
57fec1fe 2331 gen_op_addq_A0_seg(override);
5fafdf24 2332 } else
8f091a59
FB
2333#endif
2334 {
7162ab21 2335 gen_op_addl_A0_seg(s, override);
8f091a59 2336 }
664e0f19
FB
2337 }
2338}
2339
222a3336 2340/* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2c0262af 2341 OR_TMP0 */
0af10c86
BS
2342static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm,
2343 int ot, int reg, int is_store)
2c0262af
FB
2344{
2345 int mod, rm, opreg, disp;
2346
2347 mod = (modrm >> 6) & 3;
14ce26e7 2348 rm = (modrm & 7) | REX_B(s);
2c0262af
FB
2349 if (mod == 3) {
2350 if (is_store) {
2351 if (reg != OR_TMP0)
57fec1fe
FB
2352 gen_op_mov_TN_reg(ot, 0, reg);
2353 gen_op_mov_reg_T0(ot, rm);
2c0262af 2354 } else {
57fec1fe 2355 gen_op_mov_TN_reg(ot, 0, rm);
2c0262af 2356 if (reg != OR_TMP0)
57fec1fe 2357 gen_op_mov_reg_T0(ot, reg);
2c0262af
FB
2358 }
2359 } else {
0af10c86 2360 gen_lea_modrm(env, s, modrm, &opreg, &disp);
2c0262af
FB
2361 if (is_store) {
2362 if (reg != OR_TMP0)
57fec1fe
FB
2363 gen_op_mov_TN_reg(ot, 0, reg);
2364 gen_op_st_T0_A0(ot + s->mem_index);
2c0262af 2365 } else {
57fec1fe 2366 gen_op_ld_T0_A0(ot + s->mem_index);
2c0262af 2367 if (reg != OR_TMP0)
57fec1fe 2368 gen_op_mov_reg_T0(ot, reg);
2c0262af
FB
2369 }
2370 }
2371}
2372
0af10c86 2373static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, int ot)
2c0262af
FB
2374{
2375 uint32_t ret;
2376
2377 switch(ot) {
2378 case OT_BYTE:
0af10c86 2379 ret = cpu_ldub_code(env, s->pc);
2c0262af
FB
2380 s->pc++;
2381 break;
2382 case OT_WORD:
0af10c86 2383 ret = cpu_lduw_code(env, s->pc);
2c0262af
FB
2384 s->pc += 2;
2385 break;
2386 default:
2387 case OT_LONG:
0af10c86 2388 ret = cpu_ldl_code(env, s->pc);
2c0262af
FB
2389 s->pc += 4;
2390 break;
2391 }
2392 return ret;
2393}
2394
14ce26e7
FB
2395static inline int insn_const_size(unsigned int ot)
2396{
2397 if (ot <= OT_LONG)
2398 return 1 << ot;
2399 else
2400 return 4;
2401}
2402
6e256c93
FB
2403static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong eip)
2404{
2405 TranslationBlock *tb;
2406 target_ulong pc;
2407
2408 pc = s->cs_base + eip;
2409 tb = s->tb;
2410 /* NOTE: we handle the case where the TB spans two pages here */
2411 if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) ||
2412 (pc & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK)) {
2413 /* jump to same page: we can use a direct jump */
57fec1fe 2414 tcg_gen_goto_tb(tb_num);
6e256c93 2415 gen_jmp_im(eip);
4b4a72e5 2416 tcg_gen_exit_tb((tcg_target_long)tb + tb_num);
6e256c93
FB
2417 } else {
2418 /* jump to another page: currently not optimized */
2419 gen_jmp_im(eip);
2420 gen_eob(s);
2421 }
2422}
2423
5fafdf24 2424static inline void gen_jcc(DisasContext *s, int b,
14ce26e7 2425 target_ulong val, target_ulong next_eip)
2c0262af 2426{
b27fc131 2427 int l1, l2;
3b46e624 2428
2c0262af 2429 if (s->jmp_opt) {
14ce26e7 2430 l1 = gen_new_label();
b27fc131 2431 gen_jcc1(s, b, l1);
dc259201 2432
6e256c93 2433 gen_goto_tb(s, 0, next_eip);
14ce26e7
FB
2434
2435 gen_set_label(l1);
6e256c93 2436 gen_goto_tb(s, 1, val);
5779406a 2437 s->is_jmp = DISAS_TB_JUMP;
2c0262af 2438 } else {
14ce26e7
FB
2439 l1 = gen_new_label();
2440 l2 = gen_new_label();
b27fc131 2441 gen_jcc1(s, b, l1);
8e1c85e3 2442
14ce26e7 2443 gen_jmp_im(next_eip);
8e1c85e3
FB
2444 tcg_gen_br(l2);
2445
14ce26e7
FB
2446 gen_set_label(l1);
2447 gen_jmp_im(val);
2448 gen_set_label(l2);
2c0262af
FB
2449 gen_eob(s);
2450 }
2451}
2452
f32d3781
PB
2453static void gen_cmovcc1(CPUX86State *env, DisasContext *s, int ot, int b,
2454 int modrm, int reg)
2455{
57eb0cc8 2456 CCPrepare cc;
f32d3781 2457
57eb0cc8 2458 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
f32d3781 2459
57eb0cc8
RH
2460 cc = gen_prepare_cc(s, b, cpu_T[1]);
2461 if (cc.mask != -1) {
2462 TCGv t0 = tcg_temp_new();
2463 tcg_gen_andi_tl(t0, cc.reg, cc.mask);
2464 cc.reg = t0;
2465 }
2466 if (!cc.use_reg2) {
2467 cc.reg2 = tcg_const_tl(cc.imm);
f32d3781
PB
2468 }
2469
57eb0cc8
RH
2470 tcg_gen_movcond_tl(cc.cond, cpu_T[0], cc.reg, cc.reg2,
2471 cpu_T[0], cpu_regs[reg]);
2472 gen_op_mov_reg_T0(ot, reg);
2473
2474 if (cc.mask != -1) {
2475 tcg_temp_free(cc.reg);
2476 }
2477 if (!cc.use_reg2) {
2478 tcg_temp_free(cc.reg2);
2479 }
f32d3781
PB
2480}
2481
3bd7da9e
FB
2482static inline void gen_op_movl_T0_seg(int seg_reg)
2483{
2484 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
2485 offsetof(CPUX86State,segs[seg_reg].selector));
2486}
2487
2488static inline void gen_op_movl_seg_T0_vm(int seg_reg)
2489{
2490 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
2491 tcg_gen_st32_tl(cpu_T[0], cpu_env,
2492 offsetof(CPUX86State,segs[seg_reg].selector));
2493 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], 4);
2494 tcg_gen_st_tl(cpu_T[0], cpu_env,
2495 offsetof(CPUX86State,segs[seg_reg].base));
2496}
2497
2c0262af
FB
2498/* move T0 to seg_reg and compute if the CPU state may change. Never
2499 call this function with seg_reg == R_CS */
14ce26e7 2500static void gen_movl_seg_T0(DisasContext *s, int seg_reg, target_ulong cur_eip)
2c0262af 2501{
3415a4dd
FB
2502 if (s->pe && !s->vm86) {
2503 /* XXX: optimize by finding processor state dynamically */
773cdfcc 2504 gen_update_cc_op(s);
14ce26e7 2505 gen_jmp_im(cur_eip);
b6abf97d 2506 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2999a0b2 2507 gen_helper_load_seg(cpu_env, tcg_const_i32(seg_reg), cpu_tmp2_i32);
dc196a57
FB
2508 /* abort translation because the addseg value may change or
2509 because ss32 may change. For R_SS, translation must always
2510 stop as a special handling must be done to disable hardware
2511 interrupts for the next instruction */
2512 if (seg_reg == R_SS || (s->code32 && seg_reg < R_FS))
5779406a 2513 s->is_jmp = DISAS_TB_JUMP;
3415a4dd 2514 } else {
3bd7da9e 2515 gen_op_movl_seg_T0_vm(seg_reg);
dc196a57 2516 if (seg_reg == R_SS)
5779406a 2517 s->is_jmp = DISAS_TB_JUMP;
3415a4dd 2518 }
2c0262af
FB
2519}
2520
0573fbfc
TS
2521static inline int svm_is_rep(int prefixes)
2522{
2523 return ((prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) ? 8 : 0);
2524}
2525
872929aa 2526static inline void
0573fbfc 2527gen_svm_check_intercept_param(DisasContext *s, target_ulong pc_start,
b8b6a50b 2528 uint32_t type, uint64_t param)
0573fbfc 2529{
872929aa
FB
2530 /* no SVM activated; fast case */
2531 if (likely(!(s->flags & HF_SVMI_MASK)))
2532 return;
773cdfcc 2533 gen_update_cc_op(s);
872929aa 2534 gen_jmp_im(pc_start - s->cs_base);
052e80d5 2535 gen_helper_svm_check_intercept_param(cpu_env, tcg_const_i32(type),
a7812ae4 2536 tcg_const_i64(param));
0573fbfc
TS
2537}
2538
872929aa 2539static inline void
0573fbfc
TS
2540gen_svm_check_intercept(DisasContext *s, target_ulong pc_start, uint64_t type)
2541{
872929aa 2542 gen_svm_check_intercept_param(s, pc_start, type, 0);
0573fbfc
TS
2543}
2544
4f31916f
FB
2545static inline void gen_stack_update(DisasContext *s, int addend)
2546{
14ce26e7
FB
2547#ifdef TARGET_X86_64
2548 if (CODE64(s)) {
6e0d8677 2549 gen_op_add_reg_im(2, R_ESP, addend);
14ce26e7
FB
2550 } else
2551#endif
4f31916f 2552 if (s->ss32) {
6e0d8677 2553 gen_op_add_reg_im(1, R_ESP, addend);
4f31916f 2554 } else {
6e0d8677 2555 gen_op_add_reg_im(0, R_ESP, addend);
4f31916f
FB
2556 }
2557}
2558
2c0262af
FB
2559/* generate a push. It depends on ss32, addseg and dflag */
2560static void gen_push_T0(DisasContext *s)
2561{
14ce26e7
FB
2562#ifdef TARGET_X86_64
2563 if (CODE64(s)) {
57fec1fe 2564 gen_op_movq_A0_reg(R_ESP);
8f091a59 2565 if (s->dflag) {
57fec1fe
FB
2566 gen_op_addq_A0_im(-8);
2567 gen_op_st_T0_A0(OT_QUAD + s->mem_index);
8f091a59 2568 } else {
57fec1fe
FB
2569 gen_op_addq_A0_im(-2);
2570 gen_op_st_T0_A0(OT_WORD + s->mem_index);
8f091a59 2571 }
57fec1fe 2572 gen_op_mov_reg_A0(2, R_ESP);
5fafdf24 2573 } else
14ce26e7
FB
2574#endif
2575 {
57fec1fe 2576 gen_op_movl_A0_reg(R_ESP);
14ce26e7 2577 if (!s->dflag)
57fec1fe 2578 gen_op_addl_A0_im(-2);
14ce26e7 2579 else
57fec1fe 2580 gen_op_addl_A0_im(-4);
14ce26e7
FB
2581 if (s->ss32) {
2582 if (s->addseg) {
bbf662ee 2583 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
7162ab21 2584 gen_op_addl_A0_seg(s, R_SS);
14ce26e7
FB
2585 }
2586 } else {
2587 gen_op_andl_A0_ffff();
bbf662ee 2588 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
7162ab21 2589 gen_op_addl_A0_seg(s, R_SS);
2c0262af 2590 }
57fec1fe 2591 gen_op_st_T0_A0(s->dflag + 1 + s->mem_index);
14ce26e7 2592 if (s->ss32 && !s->addseg)
57fec1fe 2593 gen_op_mov_reg_A0(1, R_ESP);
14ce26e7 2594 else
57fec1fe 2595 gen_op_mov_reg_T1(s->ss32 + 1, R_ESP);
2c0262af
FB
2596 }
2597}
2598
4f31916f
FB
2599/* generate a push. It depends on ss32, addseg and dflag */
2600/* slower version for T1, only used for call Ev */
2601static void gen_push_T1(DisasContext *s)
2c0262af 2602{
14ce26e7
FB
2603#ifdef TARGET_X86_64
2604 if (CODE64(s)) {
57fec1fe 2605 gen_op_movq_A0_reg(R_ESP);
8f091a59 2606 if (s->dflag) {
57fec1fe
FB
2607 gen_op_addq_A0_im(-8);
2608 gen_op_st_T1_A0(OT_QUAD + s->mem_index);
8f091a59 2609 } else {
57fec1fe
FB
2610 gen_op_addq_A0_im(-2);
2611 gen_op_st_T0_A0(OT_WORD + s->mem_index);
8f091a59 2612 }
57fec1fe 2613 gen_op_mov_reg_A0(2, R_ESP);
5fafdf24 2614 } else
14ce26e7
FB
2615#endif
2616 {
57fec1fe 2617 gen_op_movl_A0_reg(R_ESP);
14ce26e7 2618 if (!s->dflag)
57fec1fe 2619 gen_op_addl_A0_im(-2);
14ce26e7 2620 else
57fec1fe 2621 gen_op_addl_A0_im(-4);
14ce26e7
FB
2622 if (s->ss32) {
2623 if (s->addseg) {
7162ab21 2624 gen_op_addl_A0_seg(s, R_SS);
14ce26e7
FB
2625 }
2626 } else {
2627 gen_op_andl_A0_ffff();
7162ab21 2628 gen_op_addl_A0_seg(s, R_SS);
2c0262af 2629 }
57fec1fe 2630 gen_op_st_T1_A0(s->dflag + 1 + s->mem_index);
3b46e624 2631
14ce26e7 2632 if (s->ss32 && !s->addseg)
57fec1fe 2633 gen_op_mov_reg_A0(1, R_ESP);
14ce26e7
FB
2634 else
2635 gen_stack_update(s, (-2) << s->dflag);
2c0262af
FB
2636 }
2637}
2638
4f31916f
FB
2639/* two step pop is necessary for precise exceptions */
2640static void gen_pop_T0(DisasContext *s)
2c0262af 2641{
14ce26e7
FB
2642#ifdef TARGET_X86_64
2643 if (CODE64(s)) {
57fec1fe
FB
2644 gen_op_movq_A0_reg(R_ESP);
2645 gen_op_ld_T0_A0((s->dflag ? OT_QUAD : OT_WORD) + s->mem_index);
5fafdf24 2646 } else
14ce26e7
FB
2647#endif
2648 {
57fec1fe 2649 gen_op_movl_A0_reg(R_ESP);
14ce26e7
FB
2650 if (s->ss32) {
2651 if (s->addseg)
7162ab21 2652 gen_op_addl_A0_seg(s, R_SS);
14ce26e7
FB
2653 } else {
2654 gen_op_andl_A0_ffff();
7162ab21 2655 gen_op_addl_A0_seg(s, R_SS);
14ce26e7 2656 }
57fec1fe 2657 gen_op_ld_T0_A0(s->dflag + 1 + s->mem_index);
2c0262af
FB
2658 }
2659}
2660
2661static void gen_pop_update(DisasContext *s)
2662{
14ce26e7 2663#ifdef TARGET_X86_64
8f091a59 2664 if (CODE64(s) && s->dflag) {
14ce26e7
FB
2665 gen_stack_update(s, 8);
2666 } else
2667#endif
2668 {
2669 gen_stack_update(s, 2 << s->dflag);
2670 }
2c0262af
FB
2671}
2672
2673static void gen_stack_A0(DisasContext *s)
2674{
57fec1fe 2675 gen_op_movl_A0_reg(R_ESP);
2c0262af
FB
2676 if (!s->ss32)
2677 gen_op_andl_A0_ffff();
bbf662ee 2678 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2c0262af 2679 if (s->addseg)
7162ab21 2680 gen_op_addl_A0_seg(s, R_SS);
2c0262af
FB
2681}
2682
2683/* NOTE: wrap around in 16 bit not fully handled */
2684static void gen_pusha(DisasContext *s)
2685{
2686 int i;
57fec1fe 2687 gen_op_movl_A0_reg(R_ESP);
2c0262af
FB
2688 gen_op_addl_A0_im(-16 << s->dflag);
2689 if (!s->ss32)
2690 gen_op_andl_A0_ffff();
bbf662ee 2691 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2c0262af 2692 if (s->addseg)
7162ab21 2693 gen_op_addl_A0_seg(s, R_SS);
2c0262af 2694 for(i = 0;i < 8; i++) {
57fec1fe
FB
2695 gen_op_mov_TN_reg(OT_LONG, 0, 7 - i);
2696 gen_op_st_T0_A0(OT_WORD + s->dflag + s->mem_index);
2c0262af
FB
2697 gen_op_addl_A0_im(2 << s->dflag);
2698 }
57fec1fe 2699 gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2c0262af
FB
2700}
2701
2702/* NOTE: wrap around in 16 bit not fully handled */
2703static void gen_popa(DisasContext *s)
2704{
2705 int i;
57fec1fe 2706 gen_op_movl_A0_reg(R_ESP);
2c0262af
FB
2707 if (!s->ss32)
2708 gen_op_andl_A0_ffff();
bbf662ee
FB
2709 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2710 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], 16 << s->dflag);
2c0262af 2711 if (s->addseg)
7162ab21 2712 gen_op_addl_A0_seg(s, R_SS);
2c0262af
FB
2713 for(i = 0;i < 8; i++) {
2714 /* ESP is not reloaded */
2715 if (i != 3) {
57fec1fe
FB
2716 gen_op_ld_T0_A0(OT_WORD + s->dflag + s->mem_index);
2717 gen_op_mov_reg_T0(OT_WORD + s->dflag, 7 - i);
2c0262af
FB
2718 }
2719 gen_op_addl_A0_im(2 << s->dflag);
2720 }
57fec1fe 2721 gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2c0262af
FB
2722}
2723
2c0262af
FB
2724static void gen_enter(DisasContext *s, int esp_addend, int level)
2725{
61a8c4ec 2726 int ot, opsize;
2c0262af 2727
2c0262af 2728 level &= 0x1f;
8f091a59
FB
2729#ifdef TARGET_X86_64
2730 if (CODE64(s)) {
2731 ot = s->dflag ? OT_QUAD : OT_WORD;
2732 opsize = 1 << ot;
3b46e624 2733
57fec1fe 2734 gen_op_movl_A0_reg(R_ESP);
8f091a59 2735 gen_op_addq_A0_im(-opsize);
bbf662ee 2736 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
8f091a59
FB
2737
2738 /* push bp */
57fec1fe
FB
2739 gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
2740 gen_op_st_T0_A0(ot + s->mem_index);
8f091a59 2741 if (level) {
b5b38f61 2742 /* XXX: must save state */
2999a0b2 2743 gen_helper_enter64_level(cpu_env, tcg_const_i32(level),
a7812ae4
PB
2744 tcg_const_i32((ot == OT_QUAD)),
2745 cpu_T[1]);
8f091a59 2746 }
57fec1fe 2747 gen_op_mov_reg_T1(ot, R_EBP);
bbf662ee 2748 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
57fec1fe 2749 gen_op_mov_reg_T1(OT_QUAD, R_ESP);
5fafdf24 2750 } else
8f091a59
FB
2751#endif
2752 {
2753 ot = s->dflag + OT_WORD;
2754 opsize = 2 << s->dflag;
3b46e624 2755
57fec1fe 2756 gen_op_movl_A0_reg(R_ESP);
8f091a59
FB
2757 gen_op_addl_A0_im(-opsize);
2758 if (!s->ss32)
2759 gen_op_andl_A0_ffff();
bbf662ee 2760 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
8f091a59 2761 if (s->addseg)
7162ab21 2762 gen_op_addl_A0_seg(s, R_SS);
8f091a59 2763 /* push bp */
57fec1fe
FB
2764 gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
2765 gen_op_st_T0_A0(ot + s->mem_index);
8f091a59 2766 if (level) {
b5b38f61 2767 /* XXX: must save state */
2999a0b2 2768 gen_helper_enter_level(cpu_env, tcg_const_i32(level),
a7812ae4
PB
2769 tcg_const_i32(s->dflag),
2770 cpu_T[1]);
8f091a59 2771 }
57fec1fe 2772 gen_op_mov_reg_T1(ot, R_EBP);
bbf662ee 2773 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
57fec1fe 2774 gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2c0262af 2775 }
2c0262af
FB
2776}
2777
14ce26e7 2778static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip)
2c0262af 2779{
773cdfcc 2780 gen_update_cc_op(s);
14ce26e7 2781 gen_jmp_im(cur_eip);
77b2bc2c 2782 gen_helper_raise_exception(cpu_env, tcg_const_i32(trapno));
5779406a 2783 s->is_jmp = DISAS_TB_JUMP;
2c0262af
FB
2784}
2785
2786/* an interrupt is different from an exception because of the
7f75ffd3 2787 privilege checks */
5fafdf24 2788static void gen_interrupt(DisasContext *s, int intno,
14ce26e7 2789 target_ulong cur_eip, target_ulong next_eip)
2c0262af 2790{
773cdfcc 2791 gen_update_cc_op(s);
14ce26e7 2792 gen_jmp_im(cur_eip);
77b2bc2c 2793 gen_helper_raise_interrupt(cpu_env, tcg_const_i32(intno),
a7812ae4 2794 tcg_const_i32(next_eip - cur_eip));
5779406a 2795 s->is_jmp = DISAS_TB_JUMP;
2c0262af
FB
2796}
2797
14ce26e7 2798static void gen_debug(DisasContext *s, target_ulong cur_eip)
2c0262af 2799{
773cdfcc 2800 gen_update_cc_op(s);
14ce26e7 2801 gen_jmp_im(cur_eip);
4a7443be 2802 gen_helper_debug(cpu_env);
5779406a 2803 s->is_jmp = DISAS_TB_JUMP;
2c0262af
FB
2804}
2805
2806/* generate a generic end of block. Trace exception is also generated
2807 if needed */
2808static void gen_eob(DisasContext *s)
2809{
773cdfcc 2810 gen_update_cc_op(s);
a2cc3b24 2811 if (s->tb->flags & HF_INHIBIT_IRQ_MASK) {
f0967a1a 2812 gen_helper_reset_inhibit_irq(cpu_env);
a2cc3b24 2813 }
a2397807 2814 if (s->tb->flags & HF_RF_MASK) {
f0967a1a 2815 gen_helper_reset_rf(cpu_env);
a2397807 2816 }
34865134 2817 if (s->singlestep_enabled) {
4a7443be 2818 gen_helper_debug(cpu_env);
34865134 2819 } else if (s->tf) {
4a7443be 2820 gen_helper_single_step(cpu_env);
2c0262af 2821 } else {
57fec1fe 2822 tcg_gen_exit_tb(0);
2c0262af 2823 }
5779406a 2824 s->is_jmp = DISAS_TB_JUMP;
2c0262af
FB
2825}
2826
2827/* generate a jump to eip. No segment change must happen before as a
2828 direct call to the next block may occur */
14ce26e7 2829static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num)
2c0262af 2830{
a3251186
RH
2831 gen_update_cc_op(s);
2832 set_cc_op(s, CC_OP_DYNAMIC);
2c0262af 2833 if (s->jmp_opt) {
6e256c93 2834 gen_goto_tb(s, tb_num, eip);
5779406a 2835 s->is_jmp = DISAS_TB_JUMP;
2c0262af 2836 } else {
14ce26e7 2837 gen_jmp_im(eip);
2c0262af
FB
2838 gen_eob(s);
2839 }
2840}
2841
14ce26e7
FB
2842static void gen_jmp(DisasContext *s, target_ulong eip)
2843{
2844 gen_jmp_tb(s, eip, 0);
2845}
2846
8686c490
FB
2847static inline void gen_ldq_env_A0(int idx, int offset)
2848{
2849 int mem_index = (idx >> 2) - 1;
b6abf97d
FB
2850 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, mem_index);
2851 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset);
8686c490 2852}
664e0f19 2853
8686c490
FB
2854static inline void gen_stq_env_A0(int idx, int offset)
2855{
2856 int mem_index = (idx >> 2) - 1;
b6abf97d
FB
2857 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset);
2858 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, mem_index);
8686c490 2859}
664e0f19 2860
8686c490
FB
2861static inline void gen_ldo_env_A0(int idx, int offset)
2862{
2863 int mem_index = (idx >> 2) - 1;
b6abf97d
FB
2864 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, mem_index);
2865 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
8686c490 2866 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
b6abf97d
FB
2867 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_tmp0, mem_index);
2868 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
8686c490 2869}
14ce26e7 2870
8686c490
FB
2871static inline void gen_sto_env_A0(int idx, int offset)
2872{
2873 int mem_index = (idx >> 2) - 1;
b6abf97d
FB
2874 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
2875 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, mem_index);
8686c490 2876 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
b6abf97d
FB
2877 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
2878 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_tmp0, mem_index);
8686c490 2879}
14ce26e7 2880
5af45186
FB
2881static inline void gen_op_movo(int d_offset, int s_offset)
2882{
b6abf97d
FB
2883 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2884 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2885 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + 8);
2886 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + 8);
5af45186
FB
2887}
2888
2889static inline void gen_op_movq(int d_offset, int s_offset)
2890{
b6abf97d
FB
2891 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2892 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
5af45186
FB
2893}
2894
2895static inline void gen_op_movl(int d_offset, int s_offset)
2896{
b6abf97d
FB
2897 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env, s_offset);
2898 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, d_offset);
5af45186
FB
2899}
2900
2901static inline void gen_op_movq_env_0(int d_offset)
2902{
b6abf97d
FB
2903 tcg_gen_movi_i64(cpu_tmp1_i64, 0);
2904 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
5af45186 2905}
664e0f19 2906
d3eb5eae
BS
2907typedef void (*SSEFunc_i_ep)(TCGv_i32 val, TCGv_ptr env, TCGv_ptr reg);
2908typedef void (*SSEFunc_l_ep)(TCGv_i64 val, TCGv_ptr env, TCGv_ptr reg);
2909typedef void (*SSEFunc_0_epi)(TCGv_ptr env, TCGv_ptr reg, TCGv_i32 val);
2910typedef void (*SSEFunc_0_epl)(TCGv_ptr env, TCGv_ptr reg, TCGv_i64 val);
2911typedef void (*SSEFunc_0_epp)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b);
2912typedef void (*SSEFunc_0_eppi)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
2913 TCGv_i32 val);
c4baa050 2914typedef void (*SSEFunc_0_ppi)(TCGv_ptr reg_a, TCGv_ptr reg_b, TCGv_i32 val);
d3eb5eae
BS
2915typedef void (*SSEFunc_0_eppt)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
2916 TCGv val);
c4baa050 2917
5af45186
FB
2918#define SSE_SPECIAL ((void *)1)
2919#define SSE_DUMMY ((void *)2)
664e0f19 2920
a7812ae4
PB
2921#define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
2922#define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
2923 gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
5af45186 2924
d3eb5eae 2925static const SSEFunc_0_epp sse_op_table1[256][4] = {
a35f3ec7
AJ
2926 /* 3DNow! extensions */
2927 [0x0e] = { SSE_DUMMY }, /* femms */
2928 [0x0f] = { SSE_DUMMY }, /* pf... */
664e0f19
FB
2929 /* pure SSE operations */
2930 [0x10] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2931 [0x11] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
465e9838 2932 [0x12] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd, movsldup, movddup */
664e0f19 2933 [0x13] = { SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd */
a7812ae4
PB
2934 [0x14] = { gen_helper_punpckldq_xmm, gen_helper_punpcklqdq_xmm },
2935 [0x15] = { gen_helper_punpckhdq_xmm, gen_helper_punpckhqdq_xmm },
664e0f19
FB
2936 [0x16] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd, movshdup */
2937 [0x17] = { SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd */
2938
2939 [0x28] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2940 [0x29] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2941 [0x2a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
d9f4bb27 2942 [0x2b] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movntps, movntpd, movntss, movntsd */
664e0f19
FB
2943 [0x2c] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
2944 [0x2d] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
a7812ae4
PB
2945 [0x2e] = { gen_helper_ucomiss, gen_helper_ucomisd },
2946 [0x2f] = { gen_helper_comiss, gen_helper_comisd },
664e0f19
FB
2947 [0x50] = { SSE_SPECIAL, SSE_SPECIAL }, /* movmskps, movmskpd */
2948 [0x51] = SSE_FOP(sqrt),
a7812ae4
PB
2949 [0x52] = { gen_helper_rsqrtps, NULL, gen_helper_rsqrtss, NULL },
2950 [0x53] = { gen_helper_rcpps, NULL, gen_helper_rcpss, NULL },
2951 [0x54] = { gen_helper_pand_xmm, gen_helper_pand_xmm }, /* andps, andpd */
2952 [0x55] = { gen_helper_pandn_xmm, gen_helper_pandn_xmm }, /* andnps, andnpd */
2953 [0x56] = { gen_helper_por_xmm, gen_helper_por_xmm }, /* orps, orpd */
2954 [0x57] = { gen_helper_pxor_xmm, gen_helper_pxor_xmm }, /* xorps, xorpd */
664e0f19
FB
2955 [0x58] = SSE_FOP(add),
2956 [0x59] = SSE_FOP(mul),
a7812ae4
PB
2957 [0x5a] = { gen_helper_cvtps2pd, gen_helper_cvtpd2ps,
2958 gen_helper_cvtss2sd, gen_helper_cvtsd2ss },
2959 [0x5b] = { gen_helper_cvtdq2ps, gen_helper_cvtps2dq, gen_helper_cvttps2dq },
664e0f19
FB
2960 [0x5c] = SSE_FOP(sub),
2961 [0x5d] = SSE_FOP(min),
2962 [0x5e] = SSE_FOP(div),
2963 [0x5f] = SSE_FOP(max),
2964
2965 [0xc2] = SSE_FOP(cmpeq),
d3eb5eae
BS
2966 [0xc6] = { (SSEFunc_0_epp)gen_helper_shufps,
2967 (SSEFunc_0_epp)gen_helper_shufpd }, /* XXX: casts */
664e0f19 2968
7073fbad
RH
2969 /* SSSE3, SSE4, MOVBE, CRC32, BMI1, BMI2, ADX. */
2970 [0x38] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2971 [0x3a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
4242b1bd 2972
664e0f19
FB
2973 /* MMX ops and their SSE extensions */
2974 [0x60] = MMX_OP2(punpcklbw),
2975 [0x61] = MMX_OP2(punpcklwd),
2976 [0x62] = MMX_OP2(punpckldq),
2977 [0x63] = MMX_OP2(packsswb),
2978 [0x64] = MMX_OP2(pcmpgtb),
2979 [0x65] = MMX_OP2(pcmpgtw),
2980 [0x66] = MMX_OP2(pcmpgtl),
2981 [0x67] = MMX_OP2(packuswb),
2982 [0x68] = MMX_OP2(punpckhbw),
2983 [0x69] = MMX_OP2(punpckhwd),
2984 [0x6a] = MMX_OP2(punpckhdq),
2985 [0x6b] = MMX_OP2(packssdw),
a7812ae4
PB
2986 [0x6c] = { NULL, gen_helper_punpcklqdq_xmm },
2987 [0x6d] = { NULL, gen_helper_punpckhqdq_xmm },
664e0f19
FB
2988 [0x6e] = { SSE_SPECIAL, SSE_SPECIAL }, /* movd mm, ea */
2989 [0x6f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, , movqdu */
d3eb5eae
BS
2990 [0x70] = { (SSEFunc_0_epp)gen_helper_pshufw_mmx,
2991 (SSEFunc_0_epp)gen_helper_pshufd_xmm,
2992 (SSEFunc_0_epp)gen_helper_pshufhw_xmm,
2993 (SSEFunc_0_epp)gen_helper_pshuflw_xmm }, /* XXX: casts */
664e0f19
FB
2994 [0x71] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftw */
2995 [0x72] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftd */
2996 [0x73] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftq */
2997 [0x74] = MMX_OP2(pcmpeqb),
2998 [0x75] = MMX_OP2(pcmpeqw),
2999 [0x76] = MMX_OP2(pcmpeql),
a35f3ec7 3000 [0x77] = { SSE_DUMMY }, /* emms */
d9f4bb27
AP
3001 [0x78] = { NULL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* extrq_i, insertq_i */
3002 [0x79] = { NULL, gen_helper_extrq_r, NULL, gen_helper_insertq_r },
a7812ae4
PB
3003 [0x7c] = { NULL, gen_helper_haddpd, NULL, gen_helper_haddps },
3004 [0x7d] = { NULL, gen_helper_hsubpd, NULL, gen_helper_hsubps },
664e0f19
FB
3005 [0x7e] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movd, movd, , movq */
3006 [0x7f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, movdqu */
3007 [0xc4] = { SSE_SPECIAL, SSE_SPECIAL }, /* pinsrw */
3008 [0xc5] = { SSE_SPECIAL, SSE_SPECIAL }, /* pextrw */
a7812ae4 3009 [0xd0] = { NULL, gen_helper_addsubpd, NULL, gen_helper_addsubps },
664e0f19
FB
3010 [0xd1] = MMX_OP2(psrlw),
3011 [0xd2] = MMX_OP2(psrld),
3012 [0xd3] = MMX_OP2(psrlq),
3013 [0xd4] = MMX_OP2(paddq),
3014 [0xd5] = MMX_OP2(pmullw),
3015 [0xd6] = { NULL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
3016 [0xd7] = { SSE_SPECIAL, SSE_SPECIAL }, /* pmovmskb */
3017 [0xd8] = MMX_OP2(psubusb),
3018 [0xd9] = MMX_OP2(psubusw),
3019 [0xda] = MMX_OP2(pminub),
3020 [0xdb] = MMX_OP2(pand),
3021 [0xdc] = MMX_OP2(paddusb),
3022 [0xdd] = MMX_OP2(paddusw),
3023 [0xde] = MMX_OP2(pmaxub),
3024 [0xdf] = MMX_OP2(pandn),
3025 [0xe0] = MMX_OP2(pavgb),
3026 [0xe1] = MMX_OP2(psraw),
3027 [0xe2] = MMX_OP2(psrad),
3028 [0xe3] = MMX_OP2(pavgw),
3029 [0xe4] = MMX_OP2(pmulhuw),
3030 [0xe5] = MMX_OP2(pmulhw),
a7812ae4 3031 [0xe6] = { NULL, gen_helper_cvttpd2dq, gen_helper_cvtdq2pd, gen_helper_cvtpd2dq },
664e0f19
FB
3032 [0xe7] = { SSE_SPECIAL , SSE_SPECIAL }, /* movntq, movntq */
3033 [0xe8] = MMX_OP2(psubsb),
3034 [0xe9] = MMX_OP2(psubsw),
3035 [0xea] = MMX_OP2(pminsw),
3036 [0xeb] = MMX_OP2(por),
3037 [0xec] = MMX_OP2(paddsb),
3038 [0xed] = MMX_OP2(paddsw),
3039 [0xee] = MMX_OP2(pmaxsw),
3040 [0xef] = MMX_OP2(pxor),
465e9838 3041 [0xf0] = { NULL, NULL, NULL, SSE_SPECIAL }, /* lddqu */
664e0f19
FB
3042 [0xf1] = MMX_OP2(psllw),
3043 [0xf2] = MMX_OP2(pslld),
3044 [0xf3] = MMX_OP2(psllq),
3045 [0xf4] = MMX_OP2(pmuludq),
3046 [0xf5] = MMX_OP2(pmaddwd),
3047 [0xf6] = MMX_OP2(psadbw),
d3eb5eae
BS
3048 [0xf7] = { (SSEFunc_0_epp)gen_helper_maskmov_mmx,
3049 (SSEFunc_0_epp)gen_helper_maskmov_xmm }, /* XXX: casts */
664e0f19
FB
3050 [0xf8] = MMX_OP2(psubb),
3051 [0xf9] = MMX_OP2(psubw),
3052 [0xfa] = MMX_OP2(psubl),
3053 [0xfb] = MMX_OP2(psubq),
3054 [0xfc] = MMX_OP2(paddb),
3055 [0xfd] = MMX_OP2(paddw),
3056 [0xfe] = MMX_OP2(paddl),
3057};
3058
d3eb5eae 3059static const SSEFunc_0_epp sse_op_table2[3 * 8][2] = {
664e0f19
FB
3060 [0 + 2] = MMX_OP2(psrlw),
3061 [0 + 4] = MMX_OP2(psraw),
3062 [0 + 6] = MMX_OP2(psllw),
3063 [8 + 2] = MMX_OP2(psrld),
3064 [8 + 4] = MMX_OP2(psrad),
3065 [8 + 6] = MMX_OP2(pslld),
3066 [16 + 2] = MMX_OP2(psrlq),
a7812ae4 3067 [16 + 3] = { NULL, gen_helper_psrldq_xmm },
664e0f19 3068 [16 + 6] = MMX_OP2(psllq),
a7812ae4 3069 [16 + 7] = { NULL, gen_helper_pslldq_xmm },
664e0f19
FB
3070};
3071
d3eb5eae 3072static const SSEFunc_0_epi sse_op_table3ai[] = {
a7812ae4 3073 gen_helper_cvtsi2ss,
11f8cdbc 3074 gen_helper_cvtsi2sd
c4baa050 3075};
a7812ae4 3076
11f8cdbc 3077#ifdef TARGET_X86_64
d3eb5eae 3078static const SSEFunc_0_epl sse_op_table3aq[] = {
11f8cdbc
SW
3079 gen_helper_cvtsq2ss,
3080 gen_helper_cvtsq2sd
3081};
3082#endif
3083
d3eb5eae 3084static const SSEFunc_i_ep sse_op_table3bi[] = {
a7812ae4 3085 gen_helper_cvttss2si,
a7812ae4 3086 gen_helper_cvtss2si,
bedc2ac1 3087 gen_helper_cvttsd2si,
11f8cdbc 3088 gen_helper_cvtsd2si
664e0f19 3089};
3b46e624 3090
11f8cdbc 3091#ifdef TARGET_X86_64
d3eb5eae 3092static const SSEFunc_l_ep sse_op_table3bq[] = {
11f8cdbc 3093 gen_helper_cvttss2sq,
11f8cdbc 3094 gen_helper_cvtss2sq,
bedc2ac1 3095 gen_helper_cvttsd2sq,
11f8cdbc
SW
3096 gen_helper_cvtsd2sq
3097};
3098#endif
3099
d3eb5eae 3100static const SSEFunc_0_epp sse_op_table4[8][4] = {
664e0f19
FB
3101 SSE_FOP(cmpeq),
3102 SSE_FOP(cmplt),
3103 SSE_FOP(cmple),
3104 SSE_FOP(cmpunord),
3105 SSE_FOP(cmpneq),
3106 SSE_FOP(cmpnlt),
3107 SSE_FOP(cmpnle),
3108 SSE_FOP(cmpord),
3109};
3b46e624 3110
d3eb5eae 3111static const SSEFunc_0_epp sse_op_table5[256] = {
a7812ae4
PB
3112 [0x0c] = gen_helper_pi2fw,
3113 [0x0d] = gen_helper_pi2fd,
3114 [0x1c] = gen_helper_pf2iw,
3115 [0x1d] = gen_helper_pf2id,
3116 [0x8a] = gen_helper_pfnacc,
3117 [0x8e] = gen_helper_pfpnacc,
3118 [0x90] = gen_helper_pfcmpge,
3119 [0x94] = gen_helper_pfmin,
3120 [0x96] = gen_helper_pfrcp,
3121 [0x97] = gen_helper_pfrsqrt,
3122 [0x9a] = gen_helper_pfsub,
3123 [0x9e] = gen_helper_pfadd,
3124 [0xa0] = gen_helper_pfcmpgt,
3125 [0xa4] = gen_helper_pfmax,
3126 [0xa6] = gen_helper_movq, /* pfrcpit1; no need to actually increase precision */
3127 [0xa7] = gen_helper_movq, /* pfrsqit1 */
3128 [0xaa] = gen_helper_pfsubr,
3129 [0xae] = gen_helper_pfacc,
3130 [0xb0] = gen_helper_pfcmpeq,
3131 [0xb4] = gen_helper_pfmul,
3132 [0xb6] = gen_helper_movq, /* pfrcpit2 */
3133 [0xb7] = gen_helper_pmulhrw_mmx,
3134 [0xbb] = gen_helper_pswapd,
3135 [0xbf] = gen_helper_pavgb_mmx /* pavgusb */
a35f3ec7
AJ
3136};
3137
d3eb5eae
BS
3138struct SSEOpHelper_epp {
3139 SSEFunc_0_epp op[2];
c4baa050
BS
3140 uint32_t ext_mask;
3141};
3142
d3eb5eae
BS
3143struct SSEOpHelper_eppi {
3144 SSEFunc_0_eppi op[2];
c4baa050 3145 uint32_t ext_mask;
222a3336 3146};
c4baa050 3147
222a3336 3148#define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
a7812ae4
PB
3149#define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
3150#define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
222a3336 3151#define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
e71827bc
AJ
3152#define PCLMULQDQ_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, \
3153 CPUID_EXT_PCLMULQDQ }
d640045a 3154#define AESNI_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_AES }
c4baa050 3155
d3eb5eae 3156static const struct SSEOpHelper_epp sse_op_table6[256] = {
222a3336
AZ
3157 [0x00] = SSSE3_OP(pshufb),
3158 [0x01] = SSSE3_OP(phaddw),
3159 [0x02] = SSSE3_OP(phaddd),
3160 [0x03] = SSSE3_OP(phaddsw),
3161 [0x04] = SSSE3_OP(pmaddubsw),
3162 [0x05] = SSSE3_OP(phsubw),
3163 [0x06] = SSSE3_OP(phsubd),
3164 [0x07] = SSSE3_OP(phsubsw),
3165 [0x08] = SSSE3_OP(psignb),
3166 [0x09] = SSSE3_OP(psignw),
3167 [0x0a] = SSSE3_OP(psignd),
3168 [0x0b] = SSSE3_OP(pmulhrsw),
3169 [0x10] = SSE41_OP(pblendvb),
3170 [0x14] = SSE41_OP(blendvps),
3171 [0x15] = SSE41_OP(blendvpd),
3172 [0x17] = SSE41_OP(ptest),
3173 [0x1c] = SSSE3_OP(pabsb),
3174 [0x1d] = SSSE3_OP(pabsw),
3175 [0x1e] = SSSE3_OP(pabsd),
3176 [0x20] = SSE41_OP(pmovsxbw),
3177 [0x21] = SSE41_OP(pmovsxbd),
3178 [0x22] = SSE41_OP(pmovsxbq),
3179 [0x23] = SSE41_OP(pmovsxwd),
3180 [0x24] = SSE41_OP(pmovsxwq),
3181 [0x25] = SSE41_OP(pmovsxdq),
3182 [0x28] = SSE41_OP(pmuldq),
3183 [0x29] = SSE41_OP(pcmpeqq),
3184 [0x2a] = SSE41_SPECIAL, /* movntqda */
3185 [0x2b] = SSE41_OP(packusdw),
3186 [0x30] = SSE41_OP(pmovzxbw),
3187 [0x31] = SSE41_OP(pmovzxbd),
3188 [0x32] = SSE41_OP(pmovzxbq),
3189 [0x33] = SSE41_OP(pmovzxwd),
3190 [0x34] = SSE41_OP(pmovzxwq),
3191 [0x35] = SSE41_OP(pmovzxdq),
3192 [0x37] = SSE42_OP(pcmpgtq),
3193 [0x38] = SSE41_OP(pminsb),
3194 [0x39] = SSE41_OP(pminsd),
3195 [0x3a] = SSE41_OP(pminuw),
3196 [0x3b] = SSE41_OP(pminud),
3197 [0x3c] = SSE41_OP(pmaxsb),
3198 [0x3d] = SSE41_OP(pmaxsd),
3199 [0x3e] = SSE41_OP(pmaxuw),
3200 [0x3f] = SSE41_OP(pmaxud),
3201 [0x40] = SSE41_OP(pmulld),
3202 [0x41] = SSE41_OP(phminposuw),
d640045a
AJ
3203 [0xdb] = AESNI_OP(aesimc),
3204 [0xdc] = AESNI_OP(aesenc),
3205 [0xdd] = AESNI_OP(aesenclast),
3206 [0xde] = AESNI_OP(aesdec),
3207 [0xdf] = AESNI_OP(aesdeclast),
4242b1bd
AZ
3208};
3209
d3eb5eae 3210static const struct SSEOpHelper_eppi sse_op_table7[256] = {
222a3336
AZ
3211 [0x08] = SSE41_OP(roundps),
3212 [0x09] = SSE41_OP(roundpd),
3213 [0x0a] = SSE41_OP(roundss),
3214 [0x0b] = SSE41_OP(roundsd),
3215 [0x0c] = SSE41_OP(blendps),
3216 [0x0d] = SSE41_OP(blendpd),
3217 [0x0e] = SSE41_OP(pblendw),
3218 [0x0f] = SSSE3_OP(palignr),
3219 [0x14] = SSE41_SPECIAL, /* pextrb */
3220 [0x15] = SSE41_SPECIAL, /* pextrw */
3221 [0x16] = SSE41_SPECIAL, /* pextrd/pextrq */
3222 [0x17] = SSE41_SPECIAL, /* extractps */
3223 [0x20] = SSE41_SPECIAL, /* pinsrb */
3224 [0x21] = SSE41_SPECIAL, /* insertps */
3225 [0x22] = SSE41_SPECIAL, /* pinsrd/pinsrq */
3226 [0x40] = SSE41_OP(dpps),
3227 [0x41] = SSE41_OP(dppd),
3228 [0x42] = SSE41_OP(mpsadbw),
e71827bc 3229 [0x44] = PCLMULQDQ_OP(pclmulqdq),
222a3336
AZ
3230 [0x60] = SSE42_OP(pcmpestrm),
3231 [0x61] = SSE42_OP(pcmpestri),
3232 [0x62] = SSE42_OP(pcmpistrm),
3233 [0x63] = SSE42_OP(pcmpistri),
d640045a 3234 [0xdf] = AESNI_OP(aeskeygenassist),
4242b1bd
AZ
3235};
3236
0af10c86
BS
3237static void gen_sse(CPUX86State *env, DisasContext *s, int b,
3238 target_ulong pc_start, int rex_r)
664e0f19
FB
3239{
3240 int b1, op1_offset, op2_offset, is_xmm, val, ot;
3241 int modrm, mod, rm, reg, reg_addr, offset_addr;
d3eb5eae
BS
3242 SSEFunc_0_epp sse_fn_epp;
3243 SSEFunc_0_eppi sse_fn_eppi;
c4baa050 3244 SSEFunc_0_ppi sse_fn_ppi;
d3eb5eae 3245 SSEFunc_0_eppt sse_fn_eppt;
664e0f19
FB
3246
3247 b &= 0xff;
5fafdf24 3248 if (s->prefix & PREFIX_DATA)
664e0f19 3249 b1 = 1;
5fafdf24 3250 else if (s->prefix & PREFIX_REPZ)
664e0f19 3251 b1 = 2;
5fafdf24 3252 else if (s->prefix & PREFIX_REPNZ)
664e0f19
FB
3253 b1 = 3;
3254 else
3255 b1 = 0;
d3eb5eae
BS
3256 sse_fn_epp = sse_op_table1[b][b1];
3257 if (!sse_fn_epp) {
664e0f19 3258 goto illegal_op;
c4baa050 3259 }
a35f3ec7 3260 if ((b <= 0x5f && b >= 0x10) || b == 0xc6 || b == 0xc2) {
664e0f19
FB
3261 is_xmm = 1;
3262 } else {
3263 if (b1 == 0) {
3264 /* MMX case */
3265 is_xmm = 0;
3266 } else {
3267 is_xmm = 1;
3268 }
3269 }
3270 /* simple MMX/SSE operation */
3271 if (s->flags & HF_TS_MASK) {
3272 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
3273 return;
3274 }
3275 if (s->flags & HF_EM_MASK) {
3276 illegal_op:
3277 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
3278 return;
3279 }
3280 if (is_xmm && !(s->flags & HF_OSFXSR_MASK))
4242b1bd
AZ
3281 if ((b != 0x38 && b != 0x3a) || (s->prefix & PREFIX_DATA))
3282 goto illegal_op;
e771edab
AJ
3283 if (b == 0x0e) {
3284 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
3285 goto illegal_op;
3286 /* femms */
d3eb5eae 3287 gen_helper_emms(cpu_env);
e771edab
AJ
3288 return;
3289 }
3290 if (b == 0x77) {
3291 /* emms */
d3eb5eae 3292 gen_helper_emms(cpu_env);
664e0f19
FB
3293 return;
3294 }
3295 /* prepare MMX state (XXX: optimize by storing fptt and fptags in
3296 the static cpu state) */
3297 if (!is_xmm) {
d3eb5eae 3298 gen_helper_enter_mmx(cpu_env);
664e0f19
FB
3299 }
3300
0af10c86 3301 modrm = cpu_ldub_code(env, s->pc++);
664e0f19
FB
3302 reg = ((modrm >> 3) & 7);
3303 if (is_xmm)
3304 reg |= rex_r;
3305 mod = (modrm >> 6) & 3;
d3eb5eae 3306 if (sse_fn_epp == SSE_SPECIAL) {
664e0f19
FB
3307 b |= (b1 << 8);
3308 switch(b) {
3309 case 0x0e7: /* movntq */
5fafdf24 3310 if (mod == 3)
664e0f19 3311 goto illegal_op;
0af10c86 3312 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
8686c490 3313 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
664e0f19
FB
3314 break;
3315 case 0x1e7: /* movntdq */
3316 case 0x02b: /* movntps */
3317 case 0x12b: /* movntps */
2e21e749
T
3318 if (mod == 3)
3319 goto illegal_op;
0af10c86 3320 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
2e21e749
T
3321 gen_sto_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3322 break;
465e9838
FB
3323 case 0x3f0: /* lddqu */
3324 if (mod == 3)
664e0f19 3325 goto illegal_op;
0af10c86 3326 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
c2254920 3327 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
664e0f19 3328 break;
d9f4bb27
AP
3329 case 0x22b: /* movntss */
3330 case 0x32b: /* movntsd */
3331 if (mod == 3)
3332 goto illegal_op;
0af10c86 3333 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
d9f4bb27
AP
3334 if (b1 & 1) {
3335 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,
3336 xmm_regs[reg]));
3337 } else {
3338 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3339 xmm_regs[reg].XMM_L(0)));
3340 gen_op_st_T0_A0(OT_LONG + s->mem_index);
3341 }
3342 break;
664e0f19 3343 case 0x6e: /* movd mm, ea */
dabd98dd
FB
3344#ifdef TARGET_X86_64
3345 if (s->dflag == 2) {
0af10c86 3346 gen_ldst_modrm(env, s, modrm, OT_QUAD, OR_TMP0, 0);
5af45186 3347 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,fpregs[reg].mmx));
5fafdf24 3348 } else
dabd98dd
FB
3349#endif
3350 {
0af10c86 3351 gen_ldst_modrm(env, s, modrm, OT_LONG, OR_TMP0, 0);
5af45186
FB
3352 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3353 offsetof(CPUX86State,fpregs[reg].mmx));
a7812ae4
PB
3354 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3355 gen_helper_movl_mm_T0_mmx(cpu_ptr0, cpu_tmp2_i32);
dabd98dd 3356 }
664e0f19
FB
3357 break;
3358 case 0x16e: /* movd xmm, ea */
dabd98dd
FB
3359#ifdef TARGET_X86_64
3360 if (s->dflag == 2) {
0af10c86 3361 gen_ldst_modrm(env, s, modrm, OT_QUAD, OR_TMP0, 0);
5af45186
FB
3362 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3363 offsetof(CPUX86State,xmm_regs[reg]));
a7812ae4 3364 gen_helper_movq_mm_T0_xmm(cpu_ptr0, cpu_T[0]);
5fafdf24 3365 } else
dabd98dd
FB
3366#endif
3367 {
0af10c86 3368 gen_ldst_modrm(env, s, modrm, OT_LONG, OR_TMP0, 0);
5af45186
FB
3369 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3370 offsetof(CPUX86State,xmm_regs[reg]));
b6abf97d 3371 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
a7812ae4 3372 gen_helper_movl_mm_T0_xmm(cpu_ptr0, cpu_tmp2_i32);
dabd98dd 3373 }
664e0f19
FB
3374 break;
3375 case 0x6f: /* movq mm, ea */
3376 if (mod != 3) {
0af10c86 3377 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
8686c490 3378 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
664e0f19
FB
3379 } else {
3380 rm = (modrm & 7);
b6abf97d 3381 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
5af45186 3382 offsetof(CPUX86State,fpregs[rm].mmx));
b6abf97d 3383 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
5af45186 3384 offsetof(CPUX86State,fpregs[reg].mmx));
664e0f19
FB
3385 }
3386 break;
3387 case 0x010: /* movups */
3388 case 0x110: /* movupd */
3389 case 0x028: /* movaps */
3390 case 0x128: /* movapd */
3391 case 0x16f: /* movdqa xmm, ea */
3392 case 0x26f: /* movdqu xmm, ea */
3393 if (mod != 3) {
0af10c86 3394 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
8686c490 3395 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
664e0f19
FB
3396 } else {
3397 rm = (modrm & 7) | REX_B(s);
3398 gen_op_movo(offsetof(CPUX86State,xmm_regs[reg]),
3399 offsetof(CPUX86State,xmm_regs[rm]));
3400 }
3401 break;
3402 case 0x210: /* movss xmm, ea */
3403 if (mod != 3) {
0af10c86 3404 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
57fec1fe 3405 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
651ba608 3406 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
664e0f19 3407 gen_op_movl_T0_0();
651ba608
FB
3408 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
3409 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3410 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
664e0f19
FB
3411 } else {
3412 rm = (modrm & 7) | REX_B(s);
3413 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3414 offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
3415 }
3416 break;
3417 case 0x310: /* movsd xmm, ea */
3418 if (mod != 3) {
0af10c86 3419 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
8686c490 3420 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
664e0f19 3421 gen_op_movl_T0_0();
651ba608
FB
3422 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3423 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
664e0f19
FB
3424 } else {
3425 rm = (modrm & 7) | REX_B(s);
3426 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3427 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3428 }
3429 break;
3430 case 0x012: /* movlps */
3431 case 0x112: /* movlpd */
3432 if (mod != 3) {
0af10c86 3433 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
8686c490 3434 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
664e0f19
FB
3435 } else {
3436 /* movhlps */
3437 rm = (modrm & 7) | REX_B(s);
3438 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3439 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
3440 }
3441 break;
465e9838
FB
3442 case 0x212: /* movsldup */
3443 if (mod != 3) {
0af10c86 3444 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
8686c490 3445 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
465e9838
FB
3446 } else {
3447 rm = (modrm & 7) | REX_B(s);
3448 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3449 offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
3450 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
3451 offsetof(CPUX86State,xmm_regs[rm].XMM_L(2)));
3452 }
3453 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
3454 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3455 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
3456 offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3457 break;
3458 case 0x312: /* movddup */
3459 if (mod != 3) {
0af10c86 3460 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
8686c490 3461 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
465e9838
FB
3462 } else {
3463 rm = (modrm & 7) | REX_B(s);
3464 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3465 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3466 }
3467 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
ba6526df 3468 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
465e9838 3469 break;
664e0f19
FB
3470 case 0x016: /* movhps */
3471 case 0x116: /* movhpd */
3472 if (mod != 3) {
0af10c86 3473 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
8686c490 3474 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
664e0f19
FB
3475 } else {
3476 /* movlhps */
3477 rm = (modrm & 7) | REX_B(s);
3478 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
3479 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3480 }
3481 break;
3482 case 0x216: /* movshdup */
3483 if (mod != 3) {
0af10c86 3484 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
8686c490 3485 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
664e0f19
FB
3486 } else {
3487 rm = (modrm & 7) | REX_B(s);
3488 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
3489 offsetof(CPUX86State,xmm_regs[rm].XMM_L(1)));
3490 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
3491 offsetof(CPUX86State,xmm_regs[rm].XMM_L(3)));
3492 }
3493 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3494 offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
3495 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
3496 offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3497 break;
d9f4bb27
AP
3498 case 0x178:
3499 case 0x378:
3500 {
3501 int bit_index, field_length;
3502
3503 if (b1 == 1 && reg != 0)
3504 goto illegal_op;
0af10c86
BS
3505 field_length = cpu_ldub_code(env, s->pc++) & 0x3F;
3506 bit_index = cpu_ldub_code(env, s->pc++) & 0x3F;
d9f4bb27
AP
3507 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3508 offsetof(CPUX86State,xmm_regs[reg]));
3509 if (b1 == 1)
d3eb5eae
BS
3510 gen_helper_extrq_i(cpu_env, cpu_ptr0,
3511 tcg_const_i32(bit_index),
3512 tcg_const_i32(field_length));
d9f4bb27 3513 else
d3eb5eae
BS
3514 gen_helper_insertq_i(cpu_env, cpu_ptr0,
3515 tcg_const_i32(bit_index),
3516 tcg_const_i32(field_length));
d9f4bb27
AP
3517 }
3518 break;
664e0f19 3519 case 0x7e: /* movd ea, mm */
dabd98dd
FB
3520#ifdef TARGET_X86_64
3521 if (s->dflag == 2) {
5af45186
FB
3522 tcg_gen_ld_i64(cpu_T[0], cpu_env,
3523 offsetof(CPUX86State,fpregs[reg].mmx));
0af10c86 3524 gen_ldst_modrm(env, s, modrm, OT_QUAD, OR_TMP0, 1);
5fafdf24 3525 } else
dabd98dd
FB
3526#endif
3527 {
5af45186
FB
3528 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
3529 offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0)));
0af10c86 3530 gen_ldst_modrm(env, s, modrm, OT_LONG, OR_TMP0, 1);
dabd98dd 3531 }
664e0f19
FB
3532 break;
3533 case 0x17e: /* movd ea, xmm */
dabd98dd
FB
3534#ifdef TARGET_X86_64
3535 if (s->dflag == 2) {
5af45186
FB
3536 tcg_gen_ld_i64(cpu_T[0], cpu_env,
3537 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
0af10c86 3538 gen_ldst_modrm(env, s, modrm, OT_QUAD, OR_TMP0, 1);
5fafdf24 3539 } else
dabd98dd
FB
3540#endif
3541 {
5af45186
FB
3542 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
3543 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
0af10c86 3544 gen_ldst_modrm(env, s, modrm, OT_LONG, OR_TMP0, 1);
dabd98dd 3545 }
664e0f19
FB
3546 break;
3547 case 0x27e: /* movq xmm, ea */
3548 if (mod != 3) {
0af10c86 3549 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
8686c490 3550 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
664e0f19
FB
3551 } else {
3552 rm = (modrm & 7) | REX_B(s);
3553 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3554 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3555 }
3556 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3557 break;
3558 case 0x7f: /* movq ea, mm */
3559 if (mod != 3) {
0af10c86 3560 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
8686c490 3561 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
664e0f19
FB
3562 } else {
3563 rm = (modrm & 7);
3564 gen_op_movq(offsetof(CPUX86State,fpregs[rm].mmx),
3565 offsetof(CPUX86State,fpregs[reg].mmx));
3566 }
3567 break;
3568 case 0x011: /* movups */
3569 case 0x111: /* movupd */
3570 case 0x029: /* movaps */
3571 case 0x129: /* movapd */
3572 case 0x17f: /* movdqa ea, xmm */
3573 case 0x27f: /* movdqu ea, xmm */
3574 if (mod != 3) {
0af10c86 3575 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
8686c490 3576 gen_sto_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
664e0f19
FB
3577 } else {
3578 rm = (modrm & 7) | REX_B(s);
3579 gen_op_movo(offsetof(CPUX86State,xmm_regs[rm]),
3580 offsetof(CPUX86State,xmm_regs[reg]));
3581 }
3582 break;
3583 case 0x211: /* movss ea, xmm */
3584 if (mod != 3) {
0af10c86 3585 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
651ba608 3586 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
57fec1fe 3587 gen_op_st_T0_A0(OT_LONG + s->mem_index);
664e0f19
FB
3588 } else {
3589 rm = (modrm & 7) | REX_B(s);
3590 gen_op_movl(offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)),
3591 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3592 }
3593 break;
3594 case 0x311: /* movsd ea, xmm */
3595 if (mod != 3) {
0af10c86 3596 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
8686c490 3597 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
664e0f19
FB
3598 } else {
3599 rm = (modrm & 7) | REX_B(s);
3600 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
3601 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3602 }
3603 break;
3604 case 0x013: /* movlps */
3605 case 0x113: /* movlpd */
3606 if (mod != 3) {
0af10c86 3607 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
8686c490 3608 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
664e0f19
FB
3609 } else {
3610 goto illegal_op;
3611 }
3612 break;
3613 case 0x017: /* movhps */
3614 case 0x117: /* movhpd */
3615 if (mod != 3) {
0af10c86 3616 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
8686c490 3617 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
664e0f19
FB
3618 } else {
3619 goto illegal_op;
3620 }
3621 break;
3622 case 0x71: /* shift mm, im */
3623 case 0x72:
3624 case 0x73:
3625 case 0x171: /* shift xmm, im */
3626 case 0x172:
3627 case 0x173:
c045af25
AK
3628 if (b1 >= 2) {
3629 goto illegal_op;
3630 }
0af10c86 3631 val = cpu_ldub_code(env, s->pc++);
664e0f19
FB
3632 if (is_xmm) {
3633 gen_op_movl_T0_im(val);
651ba608 3634 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
664e0f19 3635 gen_op_movl_T0_0();
651ba608 3636 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(1)));
664e0f19
FB
3637 op1_offset = offsetof(CPUX86State,xmm_t0);
3638 } else {
3639 gen_op_movl_T0_im(val);
651ba608 3640 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(0)));
664e0f19 3641 gen_op_movl_T0_0();
651ba608 3642 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(1)));
664e0f19
FB
3643 op1_offset = offsetof(CPUX86State,mmx_t0);
3644 }
d3eb5eae
BS
3645 sse_fn_epp = sse_op_table2[((b - 1) & 3) * 8 +
3646 (((modrm >> 3)) & 7)][b1];
3647 if (!sse_fn_epp) {
664e0f19 3648 goto illegal_op;
c4baa050 3649 }
664e0f19
FB
3650 if (is_xmm) {
3651 rm = (modrm & 7) | REX_B(s);
3652 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3653 } else {
3654 rm = (modrm & 7);
3655 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3656 }
5af45186
FB
3657 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3658 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op1_offset);
d3eb5eae 3659 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
3660 break;
3661 case 0x050: /* movmskps */
664e0f19 3662 rm = (modrm & 7) | REX_B(s);
5af45186
FB
3663 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3664 offsetof(CPUX86State,xmm_regs[rm]));
d3eb5eae 3665 gen_helper_movmskps(cpu_tmp2_i32, cpu_env, cpu_ptr0);
b6abf97d 3666 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
57fec1fe 3667 gen_op_mov_reg_T0(OT_LONG, reg);
664e0f19
FB
3668 break;
3669 case 0x150: /* movmskpd */
664e0f19 3670 rm = (modrm & 7) | REX_B(s);
5af45186
FB
3671 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3672 offsetof(CPUX86State,xmm_regs[rm]));
d3eb5eae 3673 gen_helper_movmskpd(cpu_tmp2_i32, cpu_env, cpu_ptr0);
b6abf97d 3674 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
57fec1fe 3675 gen_op_mov_reg_T0(OT_LONG, reg);
664e0f19
FB
3676 break;
3677 case 0x02a: /* cvtpi2ps */
3678 case 0x12a: /* cvtpi2pd */
d3eb5eae 3679 gen_helper_enter_mmx(cpu_env);
664e0f19 3680 if (mod != 3) {
0af10c86 3681 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
664e0f19 3682 op2_offset = offsetof(CPUX86State,mmx_t0);
8686c490 3683 gen_ldq_env_A0(s->mem_index, op2_offset);
664e0f19
FB
3684 } else {
3685 rm = (modrm & 7);
3686 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3687 }
3688 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
5af45186
FB
3689 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3690 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
664e0f19
FB
3691 switch(b >> 8) {
3692 case 0x0:
d3eb5eae 3693 gen_helper_cvtpi2ps(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
3694 break;
3695 default:
3696 case 0x1:
d3eb5eae 3697 gen_helper_cvtpi2pd(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
3698 break;
3699 }
3700 break;
3701 case 0x22a: /* cvtsi2ss */
3702 case 0x32a: /* cvtsi2sd */
3703 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
0af10c86 3704 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
664e0f19 3705 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
5af45186 3706 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
28e10711 3707 if (ot == OT_LONG) {
d3eb5eae 3708 SSEFunc_0_epi sse_fn_epi = sse_op_table3ai[(b >> 8) & 1];
28e10711 3709 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
d3eb5eae 3710 sse_fn_epi(cpu_env, cpu_ptr0, cpu_tmp2_i32);
28e10711 3711 } else {
11f8cdbc 3712#ifdef TARGET_X86_64
d3eb5eae
BS
3713 SSEFunc_0_epl sse_fn_epl = sse_op_table3aq[(b >> 8) & 1];
3714 sse_fn_epl(cpu_env, cpu_ptr0, cpu_T[0]);
11f8cdbc
SW
3715#else
3716 goto illegal_op;
3717#endif
28e10711 3718 }
664e0f19
FB
3719 break;
3720 case 0x02c: /* cvttps2pi */
3721 case 0x12c: /* cvttpd2pi */
3722 case 0x02d: /* cvtps2pi */
3723 case 0x12d: /* cvtpd2pi */
d3eb5eae 3724 gen_helper_enter_mmx(cpu_env);
664e0f19 3725 if (mod != 3) {
0af10c86 3726 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
664e0f19 3727 op2_offset = offsetof(CPUX86State,xmm_t0);
8686c490 3728 gen_ldo_env_A0(s->mem_index, op2_offset);
664e0f19
FB
3729 } else {
3730 rm = (modrm & 7) | REX_B(s);
3731 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3732 }
3733 op1_offset = offsetof(CPUX86State,fpregs[reg & 7].mmx);
5af45186
FB
3734 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3735 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
664e0f19
FB
3736 switch(b) {
3737 case 0x02c:
d3eb5eae 3738 gen_helper_cvttps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
3739 break;
3740 case 0x12c:
d3eb5eae 3741 gen_helper_cvttpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
3742 break;
3743 case 0x02d:
d3eb5eae 3744 gen_helper_cvtps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
3745 break;
3746 case 0x12d:
d3eb5eae 3747 gen_helper_cvtpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
3748 break;
3749 }
3750 break;
3751 case 0x22c: /* cvttss2si */
3752 case 0x32c: /* cvttsd2si */
3753 case 0x22d: /* cvtss2si */
3754 case 0x32d: /* cvtsd2si */
3755 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
31313213 3756 if (mod != 3) {
0af10c86 3757 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
31313213 3758 if ((b >> 8) & 1) {
8686c490 3759 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_t0.XMM_Q(0)));
31313213 3760 } else {
57fec1fe 3761 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
651ba608 3762 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
31313213
FB
3763 }
3764 op2_offset = offsetof(CPUX86State,xmm_t0);
3765 } else {
3766 rm = (modrm & 7) | REX_B(s);
3767 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3768 }
5af45186
FB
3769 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3770 if (ot == OT_LONG) {
d3eb5eae 3771 SSEFunc_i_ep sse_fn_i_ep =
bedc2ac1 3772 sse_op_table3bi[((b >> 7) & 2) | (b & 1)];
d3eb5eae 3773 sse_fn_i_ep(cpu_tmp2_i32, cpu_env, cpu_ptr0);
b6abf97d 3774 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5af45186 3775 } else {
11f8cdbc 3776#ifdef TARGET_X86_64
d3eb5eae 3777 SSEFunc_l_ep sse_fn_l_ep =
bedc2ac1 3778 sse_op_table3bq[((b >> 7) & 2) | (b & 1)];
d3eb5eae 3779 sse_fn_l_ep(cpu_T[0], cpu_env, cpu_ptr0);
11f8cdbc
SW
3780#else
3781 goto illegal_op;
3782#endif
5af45186 3783 }
57fec1fe 3784 gen_op_mov_reg_T0(ot, reg);
664e0f19
FB
3785 break;
3786 case 0xc4: /* pinsrw */
5fafdf24 3787 case 0x1c4:
d1e42c5c 3788 s->rip_offset = 1;
0af10c86
BS
3789 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
3790 val = cpu_ldub_code(env, s->pc++);
664e0f19
FB
3791 if (b1) {
3792 val &= 7;
5af45186
FB
3793 tcg_gen_st16_tl(cpu_T[0], cpu_env,
3794 offsetof(CPUX86State,xmm_regs[reg].XMM_W(val)));
664e0f19
FB
3795 } else {
3796 val &= 3;
5af45186
FB
3797 tcg_gen_st16_tl(cpu_T[0], cpu_env,
3798 offsetof(CPUX86State,fpregs[reg].mmx.MMX_W(val)));
664e0f19
FB
3799 }
3800 break;
3801 case 0xc5: /* pextrw */
5fafdf24 3802 case 0x1c5:
664e0f19
FB
3803 if (mod != 3)
3804 goto illegal_op;
6dc2d0da 3805 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
0af10c86 3806 val = cpu_ldub_code(env, s->pc++);
664e0f19
FB
3807 if (b1) {
3808 val &= 7;
3809 rm = (modrm & 7) | REX_B(s);
5af45186
FB
3810 tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3811 offsetof(CPUX86State,xmm_regs[rm].XMM_W(val)));
664e0f19
FB
3812 } else {
3813 val &= 3;
3814 rm = (modrm & 7);
5af45186
FB
3815 tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3816 offsetof(CPUX86State,fpregs[rm].mmx.MMX_W(val)));
664e0f19
FB
3817 }
3818 reg = ((modrm >> 3) & 7) | rex_r;
6dc2d0da 3819 gen_op_mov_reg_T0(ot, reg);
664e0f19
FB
3820 break;
3821 case 0x1d6: /* movq ea, xmm */
3822 if (mod != 3) {
0af10c86 3823 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
8686c490 3824 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
664e0f19
FB
3825 } else {
3826 rm = (modrm & 7) | REX_B(s);
3827 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
3828 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3829 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
3830 }
3831 break;
3832 case 0x2d6: /* movq2dq */
d3eb5eae 3833 gen_helper_enter_mmx(cpu_env);
480c1cdb
FB
3834 rm = (modrm & 7);
3835 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3836 offsetof(CPUX86State,fpregs[rm].mmx));
3837 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
664e0f19
FB
3838 break;
3839 case 0x3d6: /* movdq2q */
d3eb5eae 3840 gen_helper_enter_mmx(cpu_env);
480c1cdb
FB
3841 rm = (modrm & 7) | REX_B(s);
3842 gen_op_movq(offsetof(CPUX86State,fpregs[reg & 7].mmx),
3843 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
664e0f19
FB
3844 break;
3845 case 0xd7: /* pmovmskb */
3846 case 0x1d7:
3847 if (mod != 3)
3848 goto illegal_op;
3849 if (b1) {
3850 rm = (modrm & 7) | REX_B(s);
5af45186 3851 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[rm]));
d3eb5eae 3852 gen_helper_pmovmskb_xmm(cpu_tmp2_i32, cpu_env, cpu_ptr0);
664e0f19
FB
3853 } else {
3854 rm = (modrm & 7);
5af45186 3855 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,fpregs[rm].mmx));
d3eb5eae 3856 gen_helper_pmovmskb_mmx(cpu_tmp2_i32, cpu_env, cpu_ptr0);
664e0f19 3857 }
b6abf97d 3858 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
664e0f19 3859 reg = ((modrm >> 3) & 7) | rex_r;
57fec1fe 3860 gen_op_mov_reg_T0(OT_LONG, reg);
664e0f19 3861 break;
111994ee 3862
4242b1bd 3863 case 0x138:
000cacf6 3864 case 0x038:
4242b1bd 3865 b = modrm;
111994ee
RH
3866 if ((b & 0xf0) == 0xf0) {
3867 goto do_0f_38_fx;
3868 }
0af10c86 3869 modrm = cpu_ldub_code(env, s->pc++);
4242b1bd
AZ
3870 rm = modrm & 7;
3871 reg = ((modrm >> 3) & 7) | rex_r;
3872 mod = (modrm >> 6) & 3;
c045af25
AK
3873 if (b1 >= 2) {
3874 goto illegal_op;
3875 }
4242b1bd 3876
d3eb5eae
BS
3877 sse_fn_epp = sse_op_table6[b].op[b1];
3878 if (!sse_fn_epp) {
4242b1bd 3879 goto illegal_op;
c4baa050 3880 }
222a3336
AZ
3881 if (!(s->cpuid_ext_features & sse_op_table6[b].ext_mask))
3882 goto illegal_op;
4242b1bd
AZ
3883
3884 if (b1) {
3885 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3886 if (mod == 3) {
3887 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
3888 } else {
3889 op2_offset = offsetof(CPUX86State,xmm_t0);
0af10c86 3890 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
222a3336
AZ
3891 switch (b) {
3892 case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
3893 case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
3894 case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
3895 gen_ldq_env_A0(s->mem_index, op2_offset +
3896 offsetof(XMMReg, XMM_Q(0)));
3897 break;
3898 case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
3899 case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
a7812ae4 3900 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0,
222a3336 3901 (s->mem_index >> 2) - 1);
a7812ae4 3902 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0);
222a3336
AZ
3903 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, op2_offset +
3904 offsetof(XMMReg, XMM_L(0)));
3905 break;
3906 case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
3907 tcg_gen_qemu_ld16u(cpu_tmp0, cpu_A0,
3908 (s->mem_index >> 2) - 1);
3909 tcg_gen_st16_tl(cpu_tmp0, cpu_env, op2_offset +
3910 offsetof(XMMReg, XMM_W(0)));
3911 break;
3912 case 0x2a: /* movntqda */
3913 gen_ldo_env_A0(s->mem_index, op1_offset);
3914 return;
3915 default:
3916 gen_ldo_env_A0(s->mem_index, op2_offset);
3917 }
4242b1bd
AZ
3918 }
3919 } else {
3920 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
3921 if (mod == 3) {
3922 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3923 } else {
3924 op2_offset = offsetof(CPUX86State,mmx_t0);
0af10c86 3925 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4242b1bd
AZ
3926 gen_ldq_env_A0(s->mem_index, op2_offset);
3927 }
3928 }
d3eb5eae 3929 if (sse_fn_epp == SSE_SPECIAL) {
222a3336 3930 goto illegal_op;
c4baa050 3931 }
222a3336 3932
4242b1bd
AZ
3933 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3934 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
d3eb5eae 3935 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
222a3336 3936
3ca51d07
RH
3937 if (b == 0x17) {
3938 set_cc_op(s, CC_OP_EFLAGS);
3939 }
4242b1bd 3940 break;
111994ee
RH
3941
3942 case 0x238:
3943 case 0x338:
3944 do_0f_38_fx:
3945 /* Various integer extensions at 0f 38 f[0-f]. */
3946 b = modrm | (b1 << 8);
0af10c86 3947 modrm = cpu_ldub_code(env, s->pc++);
222a3336
AZ
3948 reg = ((modrm >> 3) & 7) | rex_r;
3949
111994ee
RH
3950 switch (b) {
3951 case 0x3f0: /* crc32 Gd,Eb */
3952 case 0x3f1: /* crc32 Gd,Ey */
3953 do_crc32:
3954 if (!(s->cpuid_ext_features & CPUID_EXT_SSE42)) {
3955 goto illegal_op;
3956 }
3957 if ((b & 0xff) == 0xf0) {
3958 ot = OT_BYTE;
3959 } else if (s->dflag != 2) {
3960 ot = (s->prefix & PREFIX_DATA ? OT_WORD : OT_LONG);
3961 } else {
3962 ot = OT_QUAD;
3963 }
4242b1bd 3964
111994ee
RH
3965 gen_op_mov_TN_reg(OT_LONG, 0, reg);
3966 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3967 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3968 gen_helper_crc32(cpu_T[0], cpu_tmp2_i32,
3969 cpu_T[0], tcg_const_i32(8 << ot));
222a3336 3970
111994ee
RH
3971 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3972 gen_op_mov_reg_T0(ot, reg);
3973 break;
222a3336 3974
111994ee
RH
3975 case 0x1f0: /* crc32 or movbe */
3976 case 0x1f1:
3977 /* For these insns, the f3 prefix is supposed to have priority
3978 over the 66 prefix, but that's not what we implement above
3979 setting b1. */
3980 if (s->prefix & PREFIX_REPNZ) {
3981 goto do_crc32;
3982 }
3983 /* FALLTHRU */
3984 case 0x0f0: /* movbe Gy,My */
3985 case 0x0f1: /* movbe My,Gy */
3986 if (!(s->cpuid_ext_features & CPUID_EXT_MOVBE)) {
3987 goto illegal_op;
3988 }
3989 if (s->dflag != 2) {
3990 ot = (s->prefix & PREFIX_DATA ? OT_WORD : OT_LONG);
3991 } else {
3992 ot = OT_QUAD;
3993 }
3994
3995 /* Load the data incoming to the bswap. Note that the TCG
3996 implementation of bswap requires the input be zero
3997 extended. In the case of the loads, we simply know that
3998 gen_op_ld_v via gen_ldst_modrm does that already. */
3999 if ((b & 1) == 0) {
4000 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4001 } else {
4002 switch (ot) {
4003 case OT_WORD:
4004 tcg_gen_ext16u_tl(cpu_T[0], cpu_regs[reg]);
4005 break;
4006 default:
4007 tcg_gen_ext32u_tl(cpu_T[0], cpu_regs[reg]);
4008 break;
4009 case OT_QUAD:
4010 tcg_gen_mov_tl(cpu_T[0], cpu_regs[reg]);
4011 break;
4012 }
4013 }
4014
4015 switch (ot) {
4016 case OT_WORD:
4017 tcg_gen_bswap16_tl(cpu_T[0], cpu_T[0]);
4018 break;
4019 default:
4020 tcg_gen_bswap32_tl(cpu_T[0], cpu_T[0]);
4021 break;
4022#ifdef TARGET_X86_64
4023 case OT_QUAD:
4024 tcg_gen_bswap64_tl(cpu_T[0], cpu_T[0]);
4025 break;
4026#endif
4027 }
4028
4029 if ((b & 1) == 0) {
4030 gen_op_mov_reg_T0(ot, reg);
4031 } else {
4032 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
4033 }
4034 break;
4035
7073fbad
RH
4036 case 0x0f2: /* andn Gy, By, Ey */
4037 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
4038 || !(s->prefix & PREFIX_VEX)
4039 || s->vex_l != 0) {
4040 goto illegal_op;
4041 }
4042 ot = s->dflag == 2 ? OT_QUAD : OT_LONG;
4043 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4044 tcg_gen_andc_tl(cpu_T[0], cpu_regs[s->vex_v], cpu_T[0]);
4045 gen_op_mov_reg_T0(ot, reg);
4046 gen_op_update1_cc();
4047 set_cc_op(s, CC_OP_LOGICB + ot);
4048 break;
4049
c7ab7565
RH
4050 case 0x0f7: /* bextr Gy, Ey, By */
4051 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
4052 || !(s->prefix & PREFIX_VEX)
4053 || s->vex_l != 0) {
4054 goto illegal_op;
4055 }
4056 ot = s->dflag == 2 ? OT_QUAD : OT_LONG;
4057 {
4058 TCGv bound, zero;
4059
4060 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4061 /* Extract START, and shift the operand.
4062 Shifts larger than operand size get zeros. */
4063 tcg_gen_ext8u_tl(cpu_A0, cpu_regs[s->vex_v]);
4064 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_A0);
4065
4066 bound = tcg_const_tl(ot == OT_QUAD ? 63 : 31);
4067 zero = tcg_const_tl(0);
4068 tcg_gen_movcond_tl(TCG_COND_LEU, cpu_T[0], cpu_A0, bound,
4069 cpu_T[0], zero);
4070 tcg_temp_free(zero);
4071
4072 /* Extract the LEN into a mask. Lengths larger than
4073 operand size get all ones. */
4074 tcg_gen_shri_tl(cpu_A0, cpu_regs[s->vex_v], 8);
4075 tcg_gen_ext8u_tl(cpu_A0, cpu_A0);
4076 tcg_gen_movcond_tl(TCG_COND_LEU, cpu_A0, cpu_A0, bound,
4077 cpu_A0, bound);
4078 tcg_temp_free(bound);
4079 tcg_gen_movi_tl(cpu_T[1], 1);
4080 tcg_gen_shl_tl(cpu_T[1], cpu_T[1], cpu_A0);
4081 tcg_gen_subi_tl(cpu_T[1], cpu_T[1], 1);
4082 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4083
4084 gen_op_mov_reg_T0(ot, reg);
4085 gen_op_update1_cc();
4086 set_cc_op(s, CC_OP_LOGICB + ot);
4087 }
4088 break;
4089
02ea1e6b
RH
4090 case 0x0f5: /* bzhi Gy, Ey, By */
4091 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
4092 || !(s->prefix & PREFIX_VEX)
4093 || s->vex_l != 0) {
4094 goto illegal_op;
4095 }
4096 ot = s->dflag == 2 ? OT_QUAD : OT_LONG;
4097 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4098 tcg_gen_ext8u_tl(cpu_T[1], cpu_regs[s->vex_v]);
4099 {
4100 TCGv bound = tcg_const_tl(ot == OT_QUAD ? 63 : 31);
4101 /* Note that since we're using BMILG (in order to get O
4102 cleared) we need to store the inverse into C. */
4103 tcg_gen_setcond_tl(TCG_COND_LT, cpu_cc_src,
4104 cpu_T[1], bound);
4105 tcg_gen_movcond_tl(TCG_COND_GT, cpu_T[1], cpu_T[1],
4106 bound, bound, cpu_T[1]);
4107 tcg_temp_free(bound);
4108 }
4109 tcg_gen_movi_tl(cpu_A0, -1);
4110 tcg_gen_shl_tl(cpu_A0, cpu_A0, cpu_T[1]);
4111 tcg_gen_andc_tl(cpu_T[0], cpu_T[0], cpu_A0);
4112 gen_op_mov_reg_T0(ot, reg);
4113 gen_op_update1_cc();
4114 set_cc_op(s, CC_OP_BMILGB + ot);
4115 break;
4116
5f1f4b17
RH
4117 case 0x3f6: /* mulx By, Gy, rdx, Ey */
4118 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
4119 || !(s->prefix & PREFIX_VEX)
4120 || s->vex_l != 0) {
4121 goto illegal_op;
4122 }
4123 ot = s->dflag == 2 ? OT_QUAD : OT_LONG;
4124 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4125 switch (ot) {
5f1f4b17 4126 default:
a4bcea3d
RH
4127 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4128 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EDX]);
4129 tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
4130 cpu_tmp2_i32, cpu_tmp3_i32);
4131 tcg_gen_extu_i32_tl(cpu_regs[s->vex_v], cpu_tmp2_i32);
4132 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp3_i32);
5f1f4b17
RH
4133 break;
4134#ifdef TARGET_X86_64
4135 case OT_QUAD:
a4bcea3d
RH
4136 tcg_gen_mulu2_i64(cpu_regs[s->vex_v], cpu_regs[reg],
4137 cpu_T[0], cpu_regs[R_EDX]);
5f1f4b17
RH
4138 break;
4139#endif
4140 }
4141 break;
4142
0592f74a
RH
4143 case 0x3f5: /* pdep Gy, By, Ey */
4144 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
4145 || !(s->prefix & PREFIX_VEX)
4146 || s->vex_l != 0) {
4147 goto illegal_op;
4148 }
4149 ot = s->dflag == 2 ? OT_QUAD : OT_LONG;
4150 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4151 /* Note that by zero-extending the mask operand, we
4152 automatically handle zero-extending the result. */
4153 if (s->dflag == 2) {
4154 tcg_gen_mov_tl(cpu_T[1], cpu_regs[s->vex_v]);
4155 } else {
4156 tcg_gen_ext32u_tl(cpu_T[1], cpu_regs[s->vex_v]);
4157 }
4158 gen_helper_pdep(cpu_regs[reg], cpu_T[0], cpu_T[1]);
4159 break;
4160
4161 case 0x2f5: /* pext Gy, By, Ey */
4162 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
4163 || !(s->prefix & PREFIX_VEX)
4164 || s->vex_l != 0) {
4165 goto illegal_op;
4166 }
4167 ot = s->dflag == 2 ? OT_QUAD : OT_LONG;
4168 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4169 /* Note that by zero-extending the mask operand, we
4170 automatically handle zero-extending the result. */
4171 if (s->dflag == 2) {
4172 tcg_gen_mov_tl(cpu_T[1], cpu_regs[s->vex_v]);
4173 } else {
4174 tcg_gen_ext32u_tl(cpu_T[1], cpu_regs[s->vex_v]);
4175 }
4176 gen_helper_pext(cpu_regs[reg], cpu_T[0], cpu_T[1]);
4177 break;
4178
cd7f97ca
RH
4179 case 0x1f6: /* adcx Gy, Ey */
4180 case 0x2f6: /* adox Gy, Ey */
4181 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_ADX)) {
4182 goto illegal_op;
4183 } else {
76f13133 4184 TCGv carry_in, carry_out, zero;
cd7f97ca
RH
4185 int end_op;
4186
4187 ot = (s->dflag == 2 ? OT_QUAD : OT_LONG);
4188 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4189
4190 /* Re-use the carry-out from a previous round. */
4191 TCGV_UNUSED(carry_in);
4192 carry_out = (b == 0x1f6 ? cpu_cc_dst : cpu_cc_src2);
4193 switch (s->cc_op) {
4194 case CC_OP_ADCX:
4195 if (b == 0x1f6) {
4196 carry_in = cpu_cc_dst;
4197 end_op = CC_OP_ADCX;
4198 } else {
4199 end_op = CC_OP_ADCOX;
4200 }
4201 break;
4202 case CC_OP_ADOX:
4203 if (b == 0x1f6) {
4204 end_op = CC_OP_ADCOX;
4205 } else {
4206 carry_in = cpu_cc_src2;
4207 end_op = CC_OP_ADOX;
4208 }
4209 break;
4210 case CC_OP_ADCOX:
4211 end_op = CC_OP_ADCOX;
4212 carry_in = carry_out;
4213 break;
4214 default:
c53de1a2 4215 end_op = (b == 0x1f6 ? CC_OP_ADCX : CC_OP_ADOX);
cd7f97ca
RH
4216 break;
4217 }
4218 /* If we can't reuse carry-out, get it out of EFLAGS. */
4219 if (TCGV_IS_UNUSED(carry_in)) {
4220 if (s->cc_op != CC_OP_ADCX && s->cc_op != CC_OP_ADOX) {
4221 gen_compute_eflags(s);
4222 }
4223 carry_in = cpu_tmp0;
4224 tcg_gen_shri_tl(carry_in, cpu_cc_src,
4225 ctz32(b == 0x1f6 ? CC_C : CC_O));
4226 tcg_gen_andi_tl(carry_in, carry_in, 1);
4227 }
4228
4229 switch (ot) {
4230#ifdef TARGET_X86_64
4231 case OT_LONG:
4232 /* If we know TL is 64-bit, and we want a 32-bit
4233 result, just do everything in 64-bit arithmetic. */
4234 tcg_gen_ext32u_i64(cpu_regs[reg], cpu_regs[reg]);
4235 tcg_gen_ext32u_i64(cpu_T[0], cpu_T[0]);
4236 tcg_gen_add_i64(cpu_T[0], cpu_T[0], cpu_regs[reg]);
4237 tcg_gen_add_i64(cpu_T[0], cpu_T[0], carry_in);
4238 tcg_gen_ext32u_i64(cpu_regs[reg], cpu_T[0]);
4239 tcg_gen_shri_i64(carry_out, cpu_T[0], 32);
4240 break;
4241#endif
4242 default:
4243 /* Otherwise compute the carry-out in two steps. */
76f13133
RH
4244 zero = tcg_const_tl(0);
4245 tcg_gen_add2_tl(cpu_T[0], carry_out,
4246 cpu_T[0], zero,
4247 carry_in, zero);
4248 tcg_gen_add2_tl(cpu_regs[reg], carry_out,
4249 cpu_regs[reg], carry_out,
4250 cpu_T[0], zero);
4251 tcg_temp_free(zero);
cd7f97ca
RH
4252 break;
4253 }
cd7f97ca
RH
4254 set_cc_op(s, end_op);
4255 }
4256 break;
4257
4a554890
RH
4258 case 0x1f7: /* shlx Gy, Ey, By */
4259 case 0x2f7: /* sarx Gy, Ey, By */
4260 case 0x3f7: /* shrx Gy, Ey, By */
4261 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
4262 || !(s->prefix & PREFIX_VEX)
4263 || s->vex_l != 0) {
4264 goto illegal_op;
4265 }
4266 ot = (s->dflag == 2 ? OT_QUAD : OT_LONG);
4267 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4268 if (ot == OT_QUAD) {
4269 tcg_gen_andi_tl(cpu_T[1], cpu_regs[s->vex_v], 63);
4270 } else {
4271 tcg_gen_andi_tl(cpu_T[1], cpu_regs[s->vex_v], 31);
4272 }
4273 if (b == 0x1f7) {
4274 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4275 } else if (b == 0x2f7) {
4276 if (ot != OT_QUAD) {
4277 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4278 }
4279 tcg_gen_sar_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4280 } else {
4281 if (ot != OT_QUAD) {
4282 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
4283 }
4284 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4285 }
4286 gen_op_mov_reg_T0(ot, reg);
4287 break;
4288
bc4b43dc
RH
4289 case 0x0f3:
4290 case 0x1f3:
4291 case 0x2f3:
4292 case 0x3f3: /* Group 17 */
4293 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
4294 || !(s->prefix & PREFIX_VEX)
4295 || s->vex_l != 0) {
4296 goto illegal_op;
4297 }
4298 ot = s->dflag == 2 ? OT_QUAD : OT_LONG;
4299 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4300
4301 switch (reg & 7) {
4302 case 1: /* blsr By,Ey */
4303 tcg_gen_neg_tl(cpu_T[1], cpu_T[0]);
4304 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4305 gen_op_mov_reg_T0(ot, s->vex_v);
4306 gen_op_update2_cc();
4307 set_cc_op(s, CC_OP_BMILGB + ot);
4308 break;
4309
4310 case 2: /* blsmsk By,Ey */
4311 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4312 tcg_gen_subi_tl(cpu_T[0], cpu_T[0], 1);
4313 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_cc_src);
4314 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4315 set_cc_op(s, CC_OP_BMILGB + ot);
4316 break;
4317
4318 case 3: /* blsi By, Ey */
4319 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4320 tcg_gen_subi_tl(cpu_T[0], cpu_T[0], 1);
4321 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_cc_src);
4322 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4323 set_cc_op(s, CC_OP_BMILGB + ot);
4324 break;
4325
4326 default:
4327 goto illegal_op;
4328 }
4329 break;
4330
111994ee
RH
4331 default:
4332 goto illegal_op;
4333 }
222a3336 4334 break;
111994ee 4335
222a3336
AZ
4336 case 0x03a:
4337 case 0x13a:
4242b1bd 4338 b = modrm;
0af10c86 4339 modrm = cpu_ldub_code(env, s->pc++);
4242b1bd
AZ
4340 rm = modrm & 7;
4341 reg = ((modrm >> 3) & 7) | rex_r;
4342 mod = (modrm >> 6) & 3;
c045af25
AK
4343 if (b1 >= 2) {
4344 goto illegal_op;
4345 }
4242b1bd 4346
d3eb5eae
BS
4347 sse_fn_eppi = sse_op_table7[b].op[b1];
4348 if (!sse_fn_eppi) {
4242b1bd 4349 goto illegal_op;
c4baa050 4350 }
222a3336
AZ
4351 if (!(s->cpuid_ext_features & sse_op_table7[b].ext_mask))
4352 goto illegal_op;
4353
d3eb5eae 4354 if (sse_fn_eppi == SSE_SPECIAL) {
222a3336
AZ
4355 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
4356 rm = (modrm & 7) | REX_B(s);
4357 if (mod != 3)
0af10c86 4358 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
222a3336 4359 reg = ((modrm >> 3) & 7) | rex_r;
0af10c86 4360 val = cpu_ldub_code(env, s->pc++);
222a3336
AZ
4361 switch (b) {
4362 case 0x14: /* pextrb */
4363 tcg_gen_ld8u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
4364 xmm_regs[reg].XMM_B(val & 15)));
4365 if (mod == 3)
4366 gen_op_mov_reg_T0(ot, rm);
4367 else
4368 tcg_gen_qemu_st8(cpu_T[0], cpu_A0,
4369 (s->mem_index >> 2) - 1);
4370 break;
4371 case 0x15: /* pextrw */
4372 tcg_gen_ld16u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
4373 xmm_regs[reg].XMM_W(val & 7)));
4374 if (mod == 3)
4375 gen_op_mov_reg_T0(ot, rm);
4376 else
4377 tcg_gen_qemu_st16(cpu_T[0], cpu_A0,
4378 (s->mem_index >> 2) - 1);
4379 break;
4380 case 0x16:
4381 if (ot == OT_LONG) { /* pextrd */
4382 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
4383 offsetof(CPUX86State,
4384 xmm_regs[reg].XMM_L(val & 3)));
a7812ae4 4385 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
222a3336 4386 if (mod == 3)
a7812ae4 4387 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
222a3336 4388 else
a7812ae4 4389 tcg_gen_qemu_st32(cpu_T[0], cpu_A0,
222a3336
AZ
4390 (s->mem_index >> 2) - 1);
4391 } else { /* pextrq */
a7812ae4 4392#ifdef TARGET_X86_64
222a3336
AZ
4393 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
4394 offsetof(CPUX86State,
4395 xmm_regs[reg].XMM_Q(val & 1)));
4396 if (mod == 3)
4397 gen_op_mov_reg_v(ot, rm, cpu_tmp1_i64);
4398 else
4399 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
4400 (s->mem_index >> 2) - 1);
a7812ae4
PB
4401#else
4402 goto illegal_op;
4403#endif
222a3336
AZ
4404 }
4405 break;
4406 case 0x17: /* extractps */
4407 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
4408 xmm_regs[reg].XMM_L(val & 3)));
4409 if (mod == 3)
4410 gen_op_mov_reg_T0(ot, rm);
4411 else
4412 tcg_gen_qemu_st32(cpu_T[0], cpu_A0,
4413 (s->mem_index >> 2) - 1);
4414 break;
4415 case 0x20: /* pinsrb */
4416 if (mod == 3)
4417 gen_op_mov_TN_reg(OT_LONG, 0, rm);
4418 else
34c6addd 4419 tcg_gen_qemu_ld8u(cpu_T[0], cpu_A0,
222a3336 4420 (s->mem_index >> 2) - 1);
34c6addd 4421 tcg_gen_st8_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
222a3336
AZ
4422 xmm_regs[reg].XMM_B(val & 15)));
4423 break;
4424 case 0x21: /* insertps */
a7812ae4 4425 if (mod == 3) {
222a3336
AZ
4426 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
4427 offsetof(CPUX86State,xmm_regs[rm]
4428 .XMM_L((val >> 6) & 3)));
a7812ae4
PB
4429 } else {
4430 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0,
222a3336 4431 (s->mem_index >> 2) - 1);
a7812ae4
PB
4432 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0);
4433 }
222a3336
AZ
4434 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
4435 offsetof(CPUX86State,xmm_regs[reg]
4436 .XMM_L((val >> 4) & 3)));
4437 if ((val >> 0) & 1)
4438 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4439 cpu_env, offsetof(CPUX86State,
4440 xmm_regs[reg].XMM_L(0)));
4441 if ((val >> 1) & 1)
4442 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4443 cpu_env, offsetof(CPUX86State,
4444 xmm_regs[reg].XMM_L(1)));
4445 if ((val >> 2) & 1)
4446 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4447 cpu_env, offsetof(CPUX86State,
4448 xmm_regs[reg].XMM_L(2)));
4449 if ((val >> 3) & 1)
4450 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4451 cpu_env, offsetof(CPUX86State,
4452 xmm_regs[reg].XMM_L(3)));
4453 break;
4454 case 0x22:
4455 if (ot == OT_LONG) { /* pinsrd */
4456 if (mod == 3)
a7812ae4 4457 gen_op_mov_v_reg(ot, cpu_tmp0, rm);
222a3336 4458 else
a7812ae4 4459 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0,
222a3336 4460 (s->mem_index >> 2) - 1);
a7812ae4 4461 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0);
222a3336
AZ
4462 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
4463 offsetof(CPUX86State,
4464 xmm_regs[reg].XMM_L(val & 3)));
4465 } else { /* pinsrq */
a7812ae4 4466#ifdef TARGET_X86_64
222a3336
AZ
4467 if (mod == 3)
4468 gen_op_mov_v_reg(ot, cpu_tmp1_i64, rm);
4469 else
4470 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
4471 (s->mem_index >> 2) - 1);
4472 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
4473 offsetof(CPUX86State,
4474 xmm_regs[reg].XMM_Q(val & 1)));
a7812ae4
PB
4475#else
4476 goto illegal_op;
4477#endif
222a3336
AZ
4478 }
4479 break;
4480 }
4481 return;
4482 }
4242b1bd
AZ
4483
4484 if (b1) {
4485 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
4486 if (mod == 3) {
4487 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
4488 } else {
4489 op2_offset = offsetof(CPUX86State,xmm_t0);
0af10c86 4490 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4242b1bd
AZ
4491 gen_ldo_env_A0(s->mem_index, op2_offset);
4492 }
4493 } else {
4494 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
4495 if (mod == 3) {
4496 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
4497 } else {
4498 op2_offset = offsetof(CPUX86State,mmx_t0);
0af10c86 4499 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4242b1bd
AZ
4500 gen_ldq_env_A0(s->mem_index, op2_offset);
4501 }
4502 }
0af10c86 4503 val = cpu_ldub_code(env, s->pc++);
4242b1bd 4504
222a3336 4505 if ((b & 0xfc) == 0x60) { /* pcmpXstrX */
3ca51d07 4506 set_cc_op(s, CC_OP_EFLAGS);
222a3336
AZ
4507
4508 if (s->dflag == 2)
4509 /* The helper must use entire 64-bit gp registers */
4510 val |= 1 << 8;
4511 }
4512
4242b1bd
AZ
4513 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4514 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
d3eb5eae 4515 sse_fn_eppi(cpu_env, cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
4242b1bd 4516 break;
e2c3c2c5
RH
4517
4518 case 0x33a:
4519 /* Various integer extensions at 0f 3a f[0-f]. */
4520 b = modrm | (b1 << 8);
4521 modrm = cpu_ldub_code(env, s->pc++);
4522 reg = ((modrm >> 3) & 7) | rex_r;
4523
4524 switch (b) {
4525 case 0x3f0: /* rorx Gy,Ey, Ib */
4526 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
4527 || !(s->prefix & PREFIX_VEX)
4528 || s->vex_l != 0) {
4529 goto illegal_op;
4530 }
4531 ot = s->dflag == 2 ? OT_QUAD : OT_LONG;
4532 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4533 b = cpu_ldub_code(env, s->pc++);
4534 if (ot == OT_QUAD) {
4535 tcg_gen_rotri_tl(cpu_T[0], cpu_T[0], b & 63);
4536 } else {
4537 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4538 tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, b & 31);
4539 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
4540 }
4541 gen_op_mov_reg_T0(ot, reg);
4542 break;
4543
4544 default:
4545 goto illegal_op;
4546 }
4547 break;
4548
664e0f19
FB
4549 default:
4550 goto illegal_op;
4551 }
4552 } else {
4553 /* generic MMX or SSE operation */
d1e42c5c 4554 switch(b) {
d1e42c5c
FB
4555 case 0x70: /* pshufx insn */
4556 case 0xc6: /* pshufx insn */
4557 case 0xc2: /* compare insns */
4558 s->rip_offset = 1;
4559 break;
4560 default:
4561 break;
664e0f19
FB
4562 }
4563 if (is_xmm) {
4564 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
4565 if (mod != 3) {
0af10c86 4566 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
664e0f19 4567 op2_offset = offsetof(CPUX86State,xmm_t0);
480c1cdb 4568 if (b1 >= 2 && ((b >= 0x50 && b <= 0x5f && b != 0x5b) ||
664e0f19
FB
4569 b == 0xc2)) {
4570 /* specific case for SSE single instructions */
4571 if (b1 == 2) {
4572 /* 32 bit access */
57fec1fe 4573 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
651ba608 4574 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
664e0f19
FB
4575 } else {
4576 /* 64 bit access */
8686c490 4577 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_t0.XMM_D(0)));
664e0f19
FB
4578 }
4579 } else {
8686c490 4580 gen_ldo_env_A0(s->mem_index, op2_offset);
664e0f19
FB
4581 }
4582 } else {
4583 rm = (modrm & 7) | REX_B(s);
4584 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
4585 }
4586 } else {
4587 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
4588 if (mod != 3) {
0af10c86 4589 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
664e0f19 4590 op2_offset = offsetof(CPUX86State,mmx_t0);
8686c490 4591 gen_ldq_env_A0(s->mem_index, op2_offset);
664e0f19
FB
4592 } else {
4593 rm = (modrm & 7);
4594 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
4595 }
4596 }
4597 switch(b) {
a35f3ec7 4598 case 0x0f: /* 3DNow! data insns */
e771edab
AJ
4599 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
4600 goto illegal_op;
0af10c86 4601 val = cpu_ldub_code(env, s->pc++);
d3eb5eae
BS
4602 sse_fn_epp = sse_op_table5[val];
4603 if (!sse_fn_epp) {
a35f3ec7 4604 goto illegal_op;
c4baa050 4605 }
5af45186
FB
4606 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4607 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
d3eb5eae 4608 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
a35f3ec7 4609 break;
664e0f19
FB
4610 case 0x70: /* pshufx insn */
4611 case 0xc6: /* pshufx insn */
0af10c86 4612 val = cpu_ldub_code(env, s->pc++);
5af45186
FB
4613 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4614 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
c4baa050 4615 /* XXX: introduce a new table? */
d3eb5eae 4616 sse_fn_ppi = (SSEFunc_0_ppi)sse_fn_epp;
c4baa050 4617 sse_fn_ppi(cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
664e0f19
FB
4618 break;
4619 case 0xc2:
4620 /* compare insns */
0af10c86 4621 val = cpu_ldub_code(env, s->pc++);
664e0f19
FB
4622 if (val >= 8)
4623 goto illegal_op;
d3eb5eae 4624 sse_fn_epp = sse_op_table4[val][b1];
c4baa050 4625
5af45186
FB
4626 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4627 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
d3eb5eae 4628 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19 4629 break;
b8b6a50b
FB
4630 case 0xf7:
4631 /* maskmov : we must prepare A0 */
4632 if (mod != 3)
4633 goto illegal_op;
4634#ifdef TARGET_X86_64
4635 if (s->aflag == 2) {
4636 gen_op_movq_A0_reg(R_EDI);
4637 } else
4638#endif
4639 {
4640 gen_op_movl_A0_reg(R_EDI);
4641 if (s->aflag == 0)
4642 gen_op_andl_A0_ffff();
4643 }
4644 gen_add_A0_ds_seg(s);
4645
4646 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4647 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
c4baa050 4648 /* XXX: introduce a new table? */
d3eb5eae
BS
4649 sse_fn_eppt = (SSEFunc_0_eppt)sse_fn_epp;
4650 sse_fn_eppt(cpu_env, cpu_ptr0, cpu_ptr1, cpu_A0);
b8b6a50b 4651 break;
664e0f19 4652 default:
5af45186
FB
4653 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4654 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
d3eb5eae 4655 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
4656 break;
4657 }
4658 if (b == 0x2e || b == 0x2f) {
3ca51d07 4659 set_cc_op(s, CC_OP_EFLAGS);
664e0f19
FB
4660 }
4661 }
4662}
4663
2c0262af
FB
4664/* convert one instruction. s->is_jmp is set if the translation must
4665 be stopped. Return the next pc value */
0af10c86
BS
4666static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
4667 target_ulong pc_start)
2c0262af
FB
4668{
4669 int b, prefixes, aflag, dflag;
4670 int shift, ot;
4671 int modrm, reg, rm, mod, reg_addr, op, opreg, offset_addr, val;
14ce26e7
FB
4672 target_ulong next_eip, tval;
4673 int rex_w, rex_r;
2c0262af 4674
fdefe51c 4675 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
70cff25e 4676 tcg_gen_debug_insn_start(pc_start);
fdefe51c 4677 }
2c0262af
FB
4678 s->pc = pc_start;
4679 prefixes = 0;
2c0262af 4680 s->override = -1;
14ce26e7
FB
4681 rex_w = -1;
4682 rex_r = 0;
4683#ifdef TARGET_X86_64
4684 s->rex_x = 0;
4685 s->rex_b = 0;
5fafdf24 4686 x86_64_hregs = 0;
14ce26e7
FB
4687#endif
4688 s->rip_offset = 0; /* for relative ip address */
701ed211
RH
4689 s->vex_l = 0;
4690 s->vex_v = 0;
2c0262af 4691 next_byte:
0af10c86 4692 b = cpu_ldub_code(env, s->pc);
2c0262af 4693 s->pc++;
4a6fd938
RH
4694 /* Collect prefixes. */
4695 switch (b) {
4696 case 0xf3:
4697 prefixes |= PREFIX_REPZ;
4698 goto next_byte;
4699 case 0xf2:
4700 prefixes |= PREFIX_REPNZ;
4701 goto next_byte;
4702 case 0xf0:
4703 prefixes |= PREFIX_LOCK;
4704 goto next_byte;
4705 case 0x2e:
4706 s->override = R_CS;
4707 goto next_byte;
4708 case 0x36:
4709 s->override = R_SS;
4710 goto next_byte;
4711 case 0x3e:
4712 s->override = R_DS;
4713 goto next_byte;
4714 case 0x26:
4715 s->override = R_ES;
4716 goto next_byte;
4717 case 0x64:
4718 s->override = R_FS;
4719 goto next_byte;
4720 case 0x65:
4721 s->override = R_GS;
4722 goto next_byte;
4723 case 0x66:
4724 prefixes |= PREFIX_DATA;
4725 goto next_byte;
4726 case 0x67:
4727 prefixes |= PREFIX_ADR;
4728 goto next_byte;
14ce26e7 4729#ifdef TARGET_X86_64
4a6fd938
RH
4730 case 0x40 ... 0x4f:
4731 if (CODE64(s)) {
14ce26e7
FB
4732 /* REX prefix */
4733 rex_w = (b >> 3) & 1;
4734 rex_r = (b & 0x4) << 1;
4735 s->rex_x = (b & 0x2) << 2;
4736 REX_B(s) = (b & 0x1) << 3;
4737 x86_64_hregs = 1; /* select uniform byte register addressing */
4738 goto next_byte;
4739 }
4a6fd938
RH
4740 break;
4741#endif
701ed211
RH
4742 case 0xc5: /* 2-byte VEX */
4743 case 0xc4: /* 3-byte VEX */
4744 /* VEX prefixes cannot be used except in 32-bit mode.
4745 Otherwise the instruction is LES or LDS. */
4746 if (s->code32 && !s->vm86) {
4747 static const int pp_prefix[4] = {
4748 0, PREFIX_DATA, PREFIX_REPZ, PREFIX_REPNZ
4749 };
4750 int vex3, vex2 = cpu_ldub_code(env, s->pc);
4751
4752 if (!CODE64(s) && (vex2 & 0xc0) != 0xc0) {
4753 /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
4754 otherwise the instruction is LES or LDS. */
4755 break;
4756 }
4757 s->pc++;
4758
085d8134 4759 /* 4.1.1-4.1.3: No preceding lock, 66, f2, f3, or rex prefixes. */
701ed211
RH
4760 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ
4761 | PREFIX_LOCK | PREFIX_DATA)) {
4762 goto illegal_op;
4763 }
4764#ifdef TARGET_X86_64
4765 if (x86_64_hregs) {
4766 goto illegal_op;
4767 }
4768#endif
4769 rex_r = (~vex2 >> 4) & 8;
4770 if (b == 0xc5) {
4771 vex3 = vex2;
4772 b = cpu_ldub_code(env, s->pc++);
4773 } else {
4774#ifdef TARGET_X86_64
4775 s->rex_x = (~vex2 >> 3) & 8;
4776 s->rex_b = (~vex2 >> 2) & 8;
4777#endif
4778 vex3 = cpu_ldub_code(env, s->pc++);
4779 rex_w = (vex3 >> 7) & 1;
4780 switch (vex2 & 0x1f) {
4781 case 0x01: /* Implied 0f leading opcode bytes. */
4782 b = cpu_ldub_code(env, s->pc++) | 0x100;
4783 break;
4784 case 0x02: /* Implied 0f 38 leading opcode bytes. */
4785 b = 0x138;
4786 break;
4787 case 0x03: /* Implied 0f 3a leading opcode bytes. */
4788 b = 0x13a;
4789 break;
4790 default: /* Reserved for future use. */
4791 goto illegal_op;
4792 }
4793 }
4794 s->vex_v = (~vex3 >> 3) & 0xf;
4795 s->vex_l = (vex3 >> 2) & 1;
4796 prefixes |= pp_prefix[vex3 & 3] | PREFIX_VEX;
4797 }
4798 break;
4a6fd938
RH
4799 }
4800
4801 /* Post-process prefixes. */
4a6fd938 4802 if (CODE64(s)) {
dec3fc96
RH
4803 /* In 64-bit mode, the default data size is 32-bit. Select 64-bit
4804 data with rex_w, and 16-bit data with 0x66; rex_w takes precedence
4805 over 0x66 if both are present. */
4806 dflag = (rex_w > 0 ? 2 : prefixes & PREFIX_DATA ? 0 : 1);
4807 /* In 64-bit mode, 0x67 selects 32-bit addressing. */
4808 aflag = (prefixes & PREFIX_ADR ? 1 : 2);
4809 } else {
4810 /* In 16/32-bit mode, 0x66 selects the opposite data size. */
4811 dflag = s->code32;
4812 if (prefixes & PREFIX_DATA) {
4813 dflag ^= 1;
14ce26e7 4814 }
dec3fc96
RH
4815 /* In 16/32-bit mode, 0x67 selects the opposite addressing. */
4816 aflag = s->code32;
4817 if (prefixes & PREFIX_ADR) {
4818 aflag ^= 1;
14ce26e7 4819 }
2c0262af
FB
4820 }
4821
2c0262af
FB
4822 s->prefix = prefixes;
4823 s->aflag = aflag;
4824 s->dflag = dflag;
4825
4826 /* lock generation */
4827 if (prefixes & PREFIX_LOCK)
a7812ae4 4828 gen_helper_lock();
2c0262af
FB
4829
4830 /* now check op code */
4831 reswitch:
4832 switch(b) {
4833 case 0x0f:
4834 /**************************/
4835 /* extended op code */
0af10c86 4836 b = cpu_ldub_code(env, s->pc++) | 0x100;
2c0262af 4837 goto reswitch;
3b46e624 4838
2c0262af
FB
4839 /**************************/
4840 /* arith & logic */
4841 case 0x00 ... 0x05:
4842 case 0x08 ... 0x0d:
4843 case 0x10 ... 0x15:
4844 case 0x18 ... 0x1d:
4845 case 0x20 ... 0x25:
4846 case 0x28 ... 0x2d:
4847 case 0x30 ... 0x35:
4848 case 0x38 ... 0x3d:
4849 {
4850 int op, f, val;
4851 op = (b >> 3) & 7;
4852 f = (b >> 1) & 3;
4853
4854 if ((b & 1) == 0)
4855 ot = OT_BYTE;
4856 else
14ce26e7 4857 ot = dflag + OT_WORD;
3b46e624 4858
2c0262af
FB
4859 switch(f) {
4860 case 0: /* OP Ev, Gv */
0af10c86 4861 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 4862 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af 4863 mod = (modrm >> 6) & 3;
14ce26e7 4864 rm = (modrm & 7) | REX_B(s);
2c0262af 4865 if (mod != 3) {
0af10c86 4866 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
2c0262af
FB
4867 opreg = OR_TMP0;
4868 } else if (op == OP_XORL && rm == reg) {
4869 xor_zero:
4870 /* xor reg, reg optimisation */
436ff2d2 4871 set_cc_op(s, CC_OP_CLR);
2c0262af 4872 gen_op_movl_T0_0();
57fec1fe 4873 gen_op_mov_reg_T0(ot, reg);
2c0262af
FB
4874 break;
4875 } else {
4876 opreg = rm;
4877 }
57fec1fe 4878 gen_op_mov_TN_reg(ot, 1, reg);
2c0262af
FB
4879 gen_op(s, op, ot, opreg);
4880 break;
4881 case 1: /* OP Gv, Ev */
0af10c86 4882 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 4883 mod = (modrm >> 6) & 3;
14ce26e7
FB
4884 reg = ((modrm >> 3) & 7) | rex_r;
4885 rm = (modrm & 7) | REX_B(s);
2c0262af 4886 if (mod != 3) {
0af10c86 4887 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
57fec1fe 4888 gen_op_ld_T1_A0(ot + s->mem_index);
2c0262af
FB
4889 } else if (op == OP_XORL && rm == reg) {
4890 goto xor_zero;
4891 } else {
57fec1fe 4892 gen_op_mov_TN_reg(ot, 1, rm);
2c0262af
FB
4893 }
4894 gen_op(s, op, ot, reg);
4895 break;
4896 case 2: /* OP A, Iv */
0af10c86 4897 val = insn_get(env, s, ot);
2c0262af
FB
4898 gen_op_movl_T1_im(val);
4899 gen_op(s, op, ot, OR_EAX);
4900 break;
4901 }
4902 }
4903 break;
4904
ec9d6075
FB
4905 case 0x82:
4906 if (CODE64(s))
4907 goto illegal_op;
2c0262af
FB
4908 case 0x80: /* GRP1 */
4909 case 0x81:
4910 case 0x83:
4911 {
4912 int val;
4913
4914 if ((b & 1) == 0)
4915 ot = OT_BYTE;
4916 else
14ce26e7 4917 ot = dflag + OT_WORD;
3b46e624 4918
0af10c86 4919 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 4920 mod = (modrm >> 6) & 3;
14ce26e7 4921 rm = (modrm & 7) | REX_B(s);
2c0262af 4922 op = (modrm >> 3) & 7;
3b46e624 4923
2c0262af 4924 if (mod != 3) {
14ce26e7
FB
4925 if (b == 0x83)
4926 s->rip_offset = 1;
4927 else
4928 s->rip_offset = insn_const_size(ot);
0af10c86 4929 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
2c0262af
FB
4930 opreg = OR_TMP0;
4931 } else {
14ce26e7 4932 opreg = rm;
2c0262af
FB
4933 }
4934
4935 switch(b) {
4936 default:
4937 case 0x80:
4938 case 0x81:
d64477af 4939 case 0x82:
0af10c86 4940 val = insn_get(env, s, ot);
2c0262af
FB
4941 break;
4942 case 0x83:
0af10c86 4943 val = (int8_t)insn_get(env, s, OT_BYTE);
2c0262af
FB
4944 break;
4945 }
4946 gen_op_movl_T1_im(val);
4947 gen_op(s, op, ot, opreg);
4948 }
4949 break;
4950
4951 /**************************/
4952 /* inc, dec, and other misc arith */
4953 case 0x40 ... 0x47: /* inc Gv */
4954 ot = dflag ? OT_LONG : OT_WORD;
4955 gen_inc(s, ot, OR_EAX + (b & 7), 1);
4956 break;
4957 case 0x48 ... 0x4f: /* dec Gv */
4958 ot = dflag ? OT_LONG : OT_WORD;
4959 gen_inc(s, ot, OR_EAX + (b & 7), -1);
4960 break;
4961 case 0xf6: /* GRP3 */
4962 case 0xf7:
4963 if ((b & 1) == 0)
4964 ot = OT_BYTE;
4965 else
14ce26e7 4966 ot = dflag + OT_WORD;
2c0262af 4967
0af10c86 4968 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 4969 mod = (modrm >> 6) & 3;
14ce26e7 4970 rm = (modrm & 7) | REX_B(s);
2c0262af
FB
4971 op = (modrm >> 3) & 7;
4972 if (mod != 3) {
14ce26e7
FB
4973 if (op == 0)
4974 s->rip_offset = insn_const_size(ot);
0af10c86 4975 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
57fec1fe 4976 gen_op_ld_T0_A0(ot + s->mem_index);
2c0262af 4977 } else {
57fec1fe 4978 gen_op_mov_TN_reg(ot, 0, rm);
2c0262af
FB
4979 }
4980
4981 switch(op) {
4982 case 0: /* test */
0af10c86 4983 val = insn_get(env, s, ot);
2c0262af
FB
4984 gen_op_movl_T1_im(val);
4985 gen_op_testl_T0_T1_cc();
3ca51d07 4986 set_cc_op(s, CC_OP_LOGICB + ot);
2c0262af
FB
4987 break;
4988 case 2: /* not */
b6abf97d 4989 tcg_gen_not_tl(cpu_T[0], cpu_T[0]);
2c0262af 4990 if (mod != 3) {
57fec1fe 4991 gen_op_st_T0_A0(ot + s->mem_index);
2c0262af 4992 } else {
57fec1fe 4993 gen_op_mov_reg_T0(ot, rm);
2c0262af
FB
4994 }
4995 break;
4996 case 3: /* neg */
b6abf97d 4997 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
2c0262af 4998 if (mod != 3) {
57fec1fe 4999 gen_op_st_T0_A0(ot + s->mem_index);
2c0262af 5000 } else {
57fec1fe 5001 gen_op_mov_reg_T0(ot, rm);
2c0262af
FB
5002 }
5003 gen_op_update_neg_cc();
3ca51d07 5004 set_cc_op(s, CC_OP_SUBB + ot);
2c0262af
FB
5005 break;
5006 case 4: /* mul */
5007 switch(ot) {
5008 case OT_BYTE:
0211e5af
FB
5009 gen_op_mov_TN_reg(OT_BYTE, 1, R_EAX);
5010 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
5011 tcg_gen_ext8u_tl(cpu_T[1], cpu_T[1]);
5012 /* XXX: use 32 bit mul which could be faster */
5013 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
5014 gen_op_mov_reg_T0(OT_WORD, R_EAX);
5015 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
5016 tcg_gen_andi_tl(cpu_cc_src, cpu_T[0], 0xff00);
3ca51d07 5017 set_cc_op(s, CC_OP_MULB);
2c0262af
FB
5018 break;
5019 case OT_WORD:
0211e5af
FB
5020 gen_op_mov_TN_reg(OT_WORD, 1, R_EAX);
5021 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
5022 tcg_gen_ext16u_tl(cpu_T[1], cpu_T[1]);
5023 /* XXX: use 32 bit mul which could be faster */
5024 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
5025 gen_op_mov_reg_T0(OT_WORD, R_EAX);
5026 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
5027 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
5028 gen_op_mov_reg_T0(OT_WORD, R_EDX);
5029 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
3ca51d07 5030 set_cc_op(s, CC_OP_MULW);
2c0262af
FB
5031 break;
5032 default:
5033 case OT_LONG:
a4bcea3d
RH
5034 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5035 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]);
5036 tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
5037 cpu_tmp2_i32, cpu_tmp3_i32);
5038 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32);
5039 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32);
5040 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
5041 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
3ca51d07 5042 set_cc_op(s, CC_OP_MULL);
2c0262af 5043 break;
14ce26e7
FB
5044#ifdef TARGET_X86_64
5045 case OT_QUAD:
a4bcea3d
RH
5046 tcg_gen_mulu2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
5047 cpu_T[0], cpu_regs[R_EAX]);
5048 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
5049 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
3ca51d07 5050 set_cc_op(s, CC_OP_MULQ);
14ce26e7
FB
5051 break;
5052#endif
2c0262af 5053 }
2c0262af
FB
5054 break;
5055 case 5: /* imul */
5056 switch(ot) {
5057 case OT_BYTE:
0211e5af
FB
5058 gen_op_mov_TN_reg(OT_BYTE, 1, R_EAX);
5059 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
5060 tcg_gen_ext8s_tl(cpu_T[1], cpu_T[1]);
5061 /* XXX: use 32 bit mul which could be faster */
5062 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
5063 gen_op_mov_reg_T0(OT_WORD, R_EAX);
5064 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
5065 tcg_gen_ext8s_tl(cpu_tmp0, cpu_T[0]);
5066 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
3ca51d07 5067 set_cc_op(s, CC_OP_MULB);
2c0262af
FB
5068 break;
5069 case OT_WORD:
0211e5af
FB
5070 gen_op_mov_TN_reg(OT_WORD, 1, R_EAX);
5071 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5072 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
5073 /* XXX: use 32 bit mul which could be faster */
5074 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
5075 gen_op_mov_reg_T0(OT_WORD, R_EAX);
5076 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
5077 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
5078 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
5079 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
5080 gen_op_mov_reg_T0(OT_WORD, R_EDX);
3ca51d07 5081 set_cc_op(s, CC_OP_MULW);
2c0262af
FB
5082 break;
5083 default:
5084 case OT_LONG:
a4bcea3d
RH
5085 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5086 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]);
5087 tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
5088 cpu_tmp2_i32, cpu_tmp3_i32);
5089 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32);
5090 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32);
5091 tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31);
5092 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
5093 tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
5094 tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32);
3ca51d07 5095 set_cc_op(s, CC_OP_MULL);
2c0262af 5096 break;
14ce26e7
FB
5097#ifdef TARGET_X86_64
5098 case OT_QUAD:
a4bcea3d
RH
5099 tcg_gen_muls2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
5100 cpu_T[0], cpu_regs[R_EAX]);
5101 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
5102 tcg_gen_sari_tl(cpu_cc_src, cpu_regs[R_EAX], 63);
5103 tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_regs[R_EDX]);
3ca51d07 5104 set_cc_op(s, CC_OP_MULQ);
14ce26e7
FB
5105 break;
5106#endif
2c0262af 5107 }
2c0262af
FB
5108 break;
5109 case 6: /* div */
5110 switch(ot) {
5111 case OT_BYTE:
14ce26e7 5112 gen_jmp_im(pc_start - s->cs_base);
7923057b 5113 gen_helper_divb_AL(cpu_env, cpu_T[0]);
2c0262af
FB
5114 break;
5115 case OT_WORD:
14ce26e7 5116 gen_jmp_im(pc_start - s->cs_base);
7923057b 5117 gen_helper_divw_AX(cpu_env, cpu_T[0]);
2c0262af
FB
5118 break;
5119 default:
5120 case OT_LONG:
14ce26e7 5121 gen_jmp_im(pc_start - s->cs_base);
7923057b 5122 gen_helper_divl_EAX(cpu_env, cpu_T[0]);
14ce26e7
FB
5123 break;
5124#ifdef TARGET_X86_64
5125 case OT_QUAD:
5126 gen_jmp_im(pc_start - s->cs_base);
7923057b 5127 gen_helper_divq_EAX(cpu_env, cpu_T[0]);
2c0262af 5128 break;
14ce26e7 5129#endif
2c0262af
FB
5130 }
5131 break;
5132 case 7: /* idiv */
5133 switch(ot) {
5134 case OT_BYTE:
14ce26e7 5135 gen_jmp_im(pc_start - s->cs_base);
7923057b 5136 gen_helper_idivb_AL(cpu_env, cpu_T[0]);
2c0262af
FB
5137 break;
5138 case OT_WORD:
14ce26e7 5139 gen_jmp_im(pc_start - s->cs_base);
7923057b 5140 gen_helper_idivw_AX(cpu_env, cpu_T[0]);
2c0262af
FB
5141 break;
5142 default:
5143 case OT_LONG:
14ce26e7 5144 gen_jmp_im(pc_start - s->cs_base);
7923057b 5145 gen_helper_idivl_EAX(cpu_env, cpu_T[0]);
14ce26e7
FB
5146 break;
5147#ifdef TARGET_X86_64
5148 case OT_QUAD:
5149 gen_jmp_im(pc_start - s->cs_base);
7923057b 5150 gen_helper_idivq_EAX(cpu_env, cpu_T[0]);
2c0262af 5151 break;
14ce26e7 5152#endif
2c0262af
FB
5153 }
5154 break;
5155 default:
5156 goto illegal_op;
5157 }
5158 break;
5159
5160 case 0xfe: /* GRP4 */
5161 case 0xff: /* GRP5 */
5162 if ((b & 1) == 0)
5163 ot = OT_BYTE;
5164 else
14ce26e7 5165 ot = dflag + OT_WORD;
2c0262af 5166
0af10c86 5167 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 5168 mod = (modrm >> 6) & 3;
14ce26e7 5169 rm = (modrm & 7) | REX_B(s);
2c0262af
FB
5170 op = (modrm >> 3) & 7;
5171 if (op >= 2 && b == 0xfe) {
5172 goto illegal_op;
5173 }
14ce26e7 5174 if (CODE64(s)) {
aba9d61e 5175 if (op == 2 || op == 4) {
14ce26e7
FB
5176 /* operand size for jumps is 64 bit */
5177 ot = OT_QUAD;
aba9d61e 5178 } else if (op == 3 || op == 5) {
41b1e61f 5179 ot = dflag ? OT_LONG + (rex_w == 1) : OT_WORD;
14ce26e7
FB
5180 } else if (op == 6) {
5181 /* default push size is 64 bit */
5182 ot = dflag ? OT_QUAD : OT_WORD;
5183 }
5184 }
2c0262af 5185 if (mod != 3) {
0af10c86 5186 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
2c0262af 5187 if (op >= 2 && op != 3 && op != 5)
57fec1fe 5188 gen_op_ld_T0_A0(ot + s->mem_index);
2c0262af 5189 } else {
57fec1fe 5190 gen_op_mov_TN_reg(ot, 0, rm);
2c0262af
FB
5191 }
5192
5193 switch(op) {
5194 case 0: /* inc Ev */
5195 if (mod != 3)
5196 opreg = OR_TMP0;
5197 else
5198 opreg = rm;
5199 gen_inc(s, ot, opreg, 1);
5200 break;
5201 case 1: /* dec Ev */
5202 if (mod != 3)
5203 opreg = OR_TMP0;
5204 else
5205 opreg = rm;
5206 gen_inc(s, ot, opreg, -1);
5207 break;
5208 case 2: /* call Ev */
4f31916f 5209 /* XXX: optimize if memory (no 'and' is necessary) */
2c0262af
FB
5210 if (s->dflag == 0)
5211 gen_op_andl_T0_ffff();
2c0262af 5212 next_eip = s->pc - s->cs_base;
1ef38687 5213 gen_movtl_T1_im(next_eip);
4f31916f
FB
5214 gen_push_T1(s);
5215 gen_op_jmp_T0();
2c0262af
FB
5216 gen_eob(s);
5217 break;
61382a50 5218 case 3: /* lcall Ev */
57fec1fe 5219 gen_op_ld_T1_A0(ot + s->mem_index);
aba9d61e 5220 gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
57fec1fe 5221 gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
2c0262af
FB
5222 do_lcall:
5223 if (s->pe && !s->vm86) {
773cdfcc 5224 gen_update_cc_op(s);
14ce26e7 5225 gen_jmp_im(pc_start - s->cs_base);
b6abf97d 5226 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2999a0b2
BS
5227 gen_helper_lcall_protected(cpu_env, cpu_tmp2_i32, cpu_T[1],
5228 tcg_const_i32(dflag),
a7812ae4 5229 tcg_const_i32(s->pc - pc_start));
2c0262af 5230 } else {
b6abf97d 5231 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2999a0b2
BS
5232 gen_helper_lcall_real(cpu_env, cpu_tmp2_i32, cpu_T[1],
5233 tcg_const_i32(dflag),
a7812ae4 5234 tcg_const_i32(s->pc - s->cs_base));
2c0262af
FB
5235 }
5236 gen_eob(s);
5237 break;
5238 case 4: /* jmp Ev */
5239 if (s->dflag == 0)
5240 gen_op_andl_T0_ffff();
5241 gen_op_jmp_T0();
5242 gen_eob(s);
5243 break;
5244 case 5: /* ljmp Ev */
57fec1fe 5245 gen_op_ld_T1_A0(ot + s->mem_index);
aba9d61e 5246 gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
57fec1fe 5247 gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
2c0262af
FB
5248 do_ljmp:
5249 if (s->pe && !s->vm86) {
773cdfcc 5250 gen_update_cc_op(s);
14ce26e7 5251 gen_jmp_im(pc_start - s->cs_base);
b6abf97d 5252 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2999a0b2 5253 gen_helper_ljmp_protected(cpu_env, cpu_tmp2_i32, cpu_T[1],
a7812ae4 5254 tcg_const_i32(s->pc - pc_start));
2c0262af 5255 } else {
3bd7da9e 5256 gen_op_movl_seg_T0_vm(R_CS);
2c0262af
FB
5257 gen_op_movl_T0_T1();
5258 gen_op_jmp_T0();
5259 }
5260 gen_eob(s);
5261 break;
5262 case 6: /* push Ev */
5263 gen_push_T0(s);
5264 break;
5265 default:
5266 goto illegal_op;
5267 }
5268 break;
5269
5270 case 0x84: /* test Ev, Gv */
5fafdf24 5271 case 0x85:
2c0262af
FB
5272 if ((b & 1) == 0)
5273 ot = OT_BYTE;
5274 else
14ce26e7 5275 ot = dflag + OT_WORD;
2c0262af 5276
0af10c86 5277 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 5278 reg = ((modrm >> 3) & 7) | rex_r;
3b46e624 5279
0af10c86 5280 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
57fec1fe 5281 gen_op_mov_TN_reg(ot, 1, reg);
2c0262af 5282 gen_op_testl_T0_T1_cc();
3ca51d07 5283 set_cc_op(s, CC_OP_LOGICB + ot);
2c0262af 5284 break;
3b46e624 5285
2c0262af
FB
5286 case 0xa8: /* test eAX, Iv */
5287 case 0xa9:
5288 if ((b & 1) == 0)
5289 ot = OT_BYTE;
5290 else
14ce26e7 5291 ot = dflag + OT_WORD;
0af10c86 5292 val = insn_get(env, s, ot);
2c0262af 5293
57fec1fe 5294 gen_op_mov_TN_reg(ot, 0, OR_EAX);
2c0262af
FB
5295 gen_op_movl_T1_im(val);
5296 gen_op_testl_T0_T1_cc();
3ca51d07 5297 set_cc_op(s, CC_OP_LOGICB + ot);
2c0262af 5298 break;
3b46e624 5299
2c0262af 5300 case 0x98: /* CWDE/CBW */
14ce26e7
FB
5301#ifdef TARGET_X86_64
5302 if (dflag == 2) {
e108dd01
FB
5303 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
5304 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
5305 gen_op_mov_reg_T0(OT_QUAD, R_EAX);
14ce26e7
FB
5306 } else
5307#endif
e108dd01
FB
5308 if (dflag == 1) {
5309 gen_op_mov_TN_reg(OT_WORD, 0, R_EAX);
5310 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5311 gen_op_mov_reg_T0(OT_LONG, R_EAX);
5312 } else {
5313 gen_op_mov_TN_reg(OT_BYTE, 0, R_EAX);
5314 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
5315 gen_op_mov_reg_T0(OT_WORD, R_EAX);
5316 }
2c0262af
FB
5317 break;
5318 case 0x99: /* CDQ/CWD */
14ce26e7
FB
5319#ifdef TARGET_X86_64
5320 if (dflag == 2) {
e108dd01
FB
5321 gen_op_mov_TN_reg(OT_QUAD, 0, R_EAX);
5322 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 63);
5323 gen_op_mov_reg_T0(OT_QUAD, R_EDX);
14ce26e7
FB
5324 } else
5325#endif
e108dd01
FB
5326 if (dflag == 1) {
5327 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
5328 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
5329 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 31);
5330 gen_op_mov_reg_T0(OT_LONG, R_EDX);
5331 } else {
5332 gen_op_mov_TN_reg(OT_WORD, 0, R_EAX);
5333 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5334 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 15);
5335 gen_op_mov_reg_T0(OT_WORD, R_EDX);
5336 }
2c0262af
FB
5337 break;
5338 case 0x1af: /* imul Gv, Ev */
5339 case 0x69: /* imul Gv, Ev, I */
5340 case 0x6b:
14ce26e7 5341 ot = dflag + OT_WORD;
0af10c86 5342 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7
FB
5343 reg = ((modrm >> 3) & 7) | rex_r;
5344 if (b == 0x69)
5345 s->rip_offset = insn_const_size(ot);
5346 else if (b == 0x6b)
5347 s->rip_offset = 1;
0af10c86 5348 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
2c0262af 5349 if (b == 0x69) {
0af10c86 5350 val = insn_get(env, s, ot);
2c0262af
FB
5351 gen_op_movl_T1_im(val);
5352 } else if (b == 0x6b) {
0af10c86 5353 val = (int8_t)insn_get(env, s, OT_BYTE);
2c0262af
FB
5354 gen_op_movl_T1_im(val);
5355 } else {
57fec1fe 5356 gen_op_mov_TN_reg(ot, 1, reg);
2c0262af 5357 }
a4bcea3d 5358 switch (ot) {
0211e5af 5359#ifdef TARGET_X86_64
a4bcea3d
RH
5360 case OT_QUAD:
5361 tcg_gen_muls2_i64(cpu_regs[reg], cpu_T[1], cpu_T[0], cpu_T[1]);
5362 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
5363 tcg_gen_sari_tl(cpu_cc_src, cpu_cc_dst, 63);
5364 tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_T[1]);
5365 break;
0211e5af 5366#endif
a4bcea3d
RH
5367 case OT_LONG:
5368 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5369 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
5370 tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
5371 cpu_tmp2_i32, cpu_tmp3_i32);
5372 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
5373 tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31);
5374 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
5375 tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
5376 tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32);
5377 break;
5378 default:
0211e5af
FB
5379 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5380 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
5381 /* XXX: use 32 bit mul which could be faster */
5382 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
5383 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
5384 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
5385 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
a4bcea3d
RH
5386 gen_op_mov_reg_T0(ot, reg);
5387 break;
2c0262af 5388 }
3ca51d07 5389 set_cc_op(s, CC_OP_MULB + ot);
2c0262af
FB
5390 break;
5391 case 0x1c0:
5392 case 0x1c1: /* xadd Ev, Gv */
5393 if ((b & 1) == 0)
5394 ot = OT_BYTE;
5395 else
14ce26e7 5396 ot = dflag + OT_WORD;
0af10c86 5397 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 5398 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af
FB
5399 mod = (modrm >> 6) & 3;
5400 if (mod == 3) {
14ce26e7 5401 rm = (modrm & 7) | REX_B(s);
57fec1fe
FB
5402 gen_op_mov_TN_reg(ot, 0, reg);
5403 gen_op_mov_TN_reg(ot, 1, rm);
2c0262af 5404 gen_op_addl_T0_T1();
57fec1fe
FB
5405 gen_op_mov_reg_T1(ot, reg);
5406 gen_op_mov_reg_T0(ot, rm);
2c0262af 5407 } else {
0af10c86 5408 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
57fec1fe
FB
5409 gen_op_mov_TN_reg(ot, 0, reg);
5410 gen_op_ld_T1_A0(ot + s->mem_index);
2c0262af 5411 gen_op_addl_T0_T1();
57fec1fe
FB
5412 gen_op_st_T0_A0(ot + s->mem_index);
5413 gen_op_mov_reg_T1(ot, reg);
2c0262af
FB
5414 }
5415 gen_op_update2_cc();
3ca51d07 5416 set_cc_op(s, CC_OP_ADDB + ot);
2c0262af
FB
5417 break;
5418 case 0x1b0:
5419 case 0x1b1: /* cmpxchg Ev, Gv */
cad3a37d 5420 {
1130328e 5421 int label1, label2;
1e4840bf 5422 TCGv t0, t1, t2, a0;
cad3a37d
FB
5423
5424 if ((b & 1) == 0)
5425 ot = OT_BYTE;
5426 else
5427 ot = dflag + OT_WORD;
0af10c86 5428 modrm = cpu_ldub_code(env, s->pc++);
cad3a37d
FB
5429 reg = ((modrm >> 3) & 7) | rex_r;
5430 mod = (modrm >> 6) & 3;
a7812ae4
PB
5431 t0 = tcg_temp_local_new();
5432 t1 = tcg_temp_local_new();
5433 t2 = tcg_temp_local_new();
5434 a0 = tcg_temp_local_new();
1e4840bf 5435 gen_op_mov_v_reg(ot, t1, reg);
cad3a37d
FB
5436 if (mod == 3) {
5437 rm = (modrm & 7) | REX_B(s);
1e4840bf 5438 gen_op_mov_v_reg(ot, t0, rm);
cad3a37d 5439 } else {
0af10c86 5440 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
1e4840bf
FB
5441 tcg_gen_mov_tl(a0, cpu_A0);
5442 gen_op_ld_v(ot + s->mem_index, t0, a0);
cad3a37d
FB
5443 rm = 0; /* avoid warning */
5444 }
5445 label1 = gen_new_label();
a3251186
RH
5446 tcg_gen_mov_tl(t2, cpu_regs[R_EAX]);
5447 gen_extu(ot, t0);
1e4840bf 5448 gen_extu(ot, t2);
a3251186 5449 tcg_gen_brcond_tl(TCG_COND_EQ, t2, t0, label1);
f7e80adf 5450 label2 = gen_new_label();
cad3a37d 5451 if (mod == 3) {
1e4840bf 5452 gen_op_mov_reg_v(ot, R_EAX, t0);
1130328e
FB
5453 tcg_gen_br(label2);
5454 gen_set_label(label1);
1e4840bf 5455 gen_op_mov_reg_v(ot, rm, t1);
cad3a37d 5456 } else {
f7e80adf
AG
5457 /* perform no-op store cycle like physical cpu; must be
5458 before changing accumulator to ensure idempotency if
5459 the store faults and the instruction is restarted */
5460 gen_op_st_v(ot + s->mem_index, t0, a0);
1e4840bf 5461 gen_op_mov_reg_v(ot, R_EAX, t0);
f7e80adf 5462 tcg_gen_br(label2);
1130328e 5463 gen_set_label(label1);
1e4840bf 5464 gen_op_st_v(ot + s->mem_index, t1, a0);
cad3a37d 5465 }
f7e80adf 5466 gen_set_label(label2);
1e4840bf 5467 tcg_gen_mov_tl(cpu_cc_src, t0);
a3251186
RH
5468 tcg_gen_mov_tl(cpu_cc_srcT, t2);
5469 tcg_gen_sub_tl(cpu_cc_dst, t2, t0);
3ca51d07 5470 set_cc_op(s, CC_OP_SUBB + ot);
1e4840bf
FB
5471 tcg_temp_free(t0);
5472 tcg_temp_free(t1);
5473 tcg_temp_free(t2);
5474 tcg_temp_free(a0);
2c0262af 5475 }
2c0262af
FB
5476 break;
5477 case 0x1c7: /* cmpxchg8b */
0af10c86 5478 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 5479 mod = (modrm >> 6) & 3;
71c3558e 5480 if ((mod == 3) || ((modrm & 0x38) != 0x8))
2c0262af 5481 goto illegal_op;
1b9d9ebb
FB
5482#ifdef TARGET_X86_64
5483 if (dflag == 2) {
5484 if (!(s->cpuid_ext_features & CPUID_EXT_CX16))
5485 goto illegal_op;
5486 gen_jmp_im(pc_start - s->cs_base);
773cdfcc 5487 gen_update_cc_op(s);
0af10c86 5488 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
92fc4b58 5489 gen_helper_cmpxchg16b(cpu_env, cpu_A0);
1b9d9ebb
FB
5490 } else
5491#endif
5492 {
5493 if (!(s->cpuid_features & CPUID_CX8))
5494 goto illegal_op;
5495 gen_jmp_im(pc_start - s->cs_base);
773cdfcc 5496 gen_update_cc_op(s);
0af10c86 5497 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
92fc4b58 5498 gen_helper_cmpxchg8b(cpu_env, cpu_A0);
1b9d9ebb 5499 }
3ca51d07 5500 set_cc_op(s, CC_OP_EFLAGS);
2c0262af 5501 break;
3b46e624 5502
2c0262af
FB
5503 /**************************/
5504 /* push/pop */
5505 case 0x50 ... 0x57: /* push */
57fec1fe 5506 gen_op_mov_TN_reg(OT_LONG, 0, (b & 7) | REX_B(s));
2c0262af
FB
5507 gen_push_T0(s);
5508 break;
5509 case 0x58 ... 0x5f: /* pop */
14ce26e7
FB
5510 if (CODE64(s)) {
5511 ot = dflag ? OT_QUAD : OT_WORD;
5512 } else {
5513 ot = dflag + OT_WORD;
5514 }
2c0262af 5515 gen_pop_T0(s);
77729c24 5516 /* NOTE: order is important for pop %sp */
2c0262af 5517 gen_pop_update(s);
57fec1fe 5518 gen_op_mov_reg_T0(ot, (b & 7) | REX_B(s));
2c0262af
FB
5519 break;
5520 case 0x60: /* pusha */
14ce26e7
FB
5521 if (CODE64(s))
5522 goto illegal_op;
2c0262af
FB
5523 gen_pusha(s);
5524 break;
5525 case 0x61: /* popa */
14ce26e7
FB
5526 if (CODE64(s))
5527 goto illegal_op;
2c0262af
FB
5528 gen_popa(s);
5529 break;
5530 case 0x68: /* push Iv */
5531 case 0x6a:
14ce26e7
FB
5532 if (CODE64(s)) {
5533 ot = dflag ? OT_QUAD : OT_WORD;
5534 } else {
5535 ot = dflag + OT_WORD;
5536 }
2c0262af 5537 if (b == 0x68)
0af10c86 5538 val = insn_get(env, s, ot);
2c0262af 5539 else
0af10c86 5540 val = (int8_t)insn_get(env, s, OT_BYTE);
2c0262af
FB
5541 gen_op_movl_T0_im(val);
5542 gen_push_T0(s);
5543 break;
5544 case 0x8f: /* pop Ev */
14ce26e7
FB
5545 if (CODE64(s)) {
5546 ot = dflag ? OT_QUAD : OT_WORD;
5547 } else {
5548 ot = dflag + OT_WORD;
5549 }
0af10c86 5550 modrm = cpu_ldub_code(env, s->pc++);
77729c24 5551 mod = (modrm >> 6) & 3;
2c0262af 5552 gen_pop_T0(s);
77729c24
FB
5553 if (mod == 3) {
5554 /* NOTE: order is important for pop %sp */
5555 gen_pop_update(s);
14ce26e7 5556 rm = (modrm & 7) | REX_B(s);
57fec1fe 5557 gen_op_mov_reg_T0(ot, rm);
77729c24
FB
5558 } else {
5559 /* NOTE: order is important too for MMU exceptions */
14ce26e7 5560 s->popl_esp_hack = 1 << ot;
0af10c86 5561 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
77729c24
FB
5562 s->popl_esp_hack = 0;
5563 gen_pop_update(s);
5564 }
2c0262af
FB
5565 break;
5566 case 0xc8: /* enter */
5567 {
5568 int level;
0af10c86 5569 val = cpu_lduw_code(env, s->pc);
2c0262af 5570 s->pc += 2;
0af10c86 5571 level = cpu_ldub_code(env, s->pc++);
2c0262af
FB
5572 gen_enter(s, val, level);
5573 }
5574 break;
5575 case 0xc9: /* leave */
5576 /* XXX: exception not precise (ESP is updated before potential exception) */
14ce26e7 5577 if (CODE64(s)) {
57fec1fe
FB
5578 gen_op_mov_TN_reg(OT_QUAD, 0, R_EBP);
5579 gen_op_mov_reg_T0(OT_QUAD, R_ESP);
14ce26e7 5580 } else if (s->ss32) {
57fec1fe
FB
5581 gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
5582 gen_op_mov_reg_T0(OT_LONG, R_ESP);
2c0262af 5583 } else {
57fec1fe
FB
5584 gen_op_mov_TN_reg(OT_WORD, 0, R_EBP);
5585 gen_op_mov_reg_T0(OT_WORD, R_ESP);
2c0262af
FB
5586 }
5587 gen_pop_T0(s);
14ce26e7
FB
5588 if (CODE64(s)) {
5589 ot = dflag ? OT_QUAD : OT_WORD;
5590 } else {
5591 ot = dflag + OT_WORD;
5592 }
57fec1fe 5593 gen_op_mov_reg_T0(ot, R_EBP);
2c0262af
FB
5594 gen_pop_update(s);
5595 break;
5596 case 0x06: /* push es */
5597 case 0x0e: /* push cs */
5598 case 0x16: /* push ss */
5599 case 0x1e: /* push ds */
14ce26e7
FB
5600 if (CODE64(s))
5601 goto illegal_op;
2c0262af
FB
5602 gen_op_movl_T0_seg(b >> 3);
5603 gen_push_T0(s);
5604 break;
5605 case 0x1a0: /* push fs */
5606 case 0x1a8: /* push gs */
5607 gen_op_movl_T0_seg((b >> 3) & 7);
5608 gen_push_T0(s);
5609 break;
5610 case 0x07: /* pop es */
5611 case 0x17: /* pop ss */
5612 case 0x1f: /* pop ds */
14ce26e7
FB
5613 if (CODE64(s))
5614 goto illegal_op;
2c0262af
FB
5615 reg = b >> 3;
5616 gen_pop_T0(s);
5617 gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
5618 gen_pop_update(s);
5619 if (reg == R_SS) {
a2cc3b24
FB
5620 /* if reg == SS, inhibit interrupts/trace. */
5621 /* If several instructions disable interrupts, only the
5622 _first_ does it */
5623 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
f0967a1a 5624 gen_helper_set_inhibit_irq(cpu_env);
2c0262af
FB
5625 s->tf = 0;
5626 }
5627 if (s->is_jmp) {
14ce26e7 5628 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
5629 gen_eob(s);
5630 }
5631 break;
5632 case 0x1a1: /* pop fs */
5633 case 0x1a9: /* pop gs */
5634 gen_pop_T0(s);
5635 gen_movl_seg_T0(s, (b >> 3) & 7, pc_start - s->cs_base);
5636 gen_pop_update(s);
5637 if (s->is_jmp) {
14ce26e7 5638 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
5639 gen_eob(s);
5640 }
5641 break;
5642
5643 /**************************/
5644 /* mov */
5645 case 0x88:
5646 case 0x89: /* mov Gv, Ev */
5647 if ((b & 1) == 0)
5648 ot = OT_BYTE;
5649 else
14ce26e7 5650 ot = dflag + OT_WORD;
0af10c86 5651 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 5652 reg = ((modrm >> 3) & 7) | rex_r;
3b46e624 5653
2c0262af 5654 /* generate a generic store */
0af10c86 5655 gen_ldst_modrm(env, s, modrm, ot, reg, 1);
2c0262af
FB
5656 break;
5657 case 0xc6:
5658 case 0xc7: /* mov Ev, Iv */
5659 if ((b & 1) == 0)
5660 ot = OT_BYTE;
5661 else
14ce26e7 5662 ot = dflag + OT_WORD;
0af10c86 5663 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 5664 mod = (modrm >> 6) & 3;
14ce26e7
FB
5665 if (mod != 3) {
5666 s->rip_offset = insn_const_size(ot);
0af10c86 5667 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
14ce26e7 5668 }
0af10c86 5669 val = insn_get(env, s, ot);
2c0262af
FB
5670 gen_op_movl_T0_im(val);
5671 if (mod != 3)
57fec1fe 5672 gen_op_st_T0_A0(ot + s->mem_index);
2c0262af 5673 else
57fec1fe 5674 gen_op_mov_reg_T0(ot, (modrm & 7) | REX_B(s));
2c0262af
FB
5675 break;
5676 case 0x8a:
5677 case 0x8b: /* mov Ev, Gv */
5678 if ((b & 1) == 0)
5679 ot = OT_BYTE;
5680 else
14ce26e7 5681 ot = OT_WORD + dflag;
0af10c86 5682 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 5683 reg = ((modrm >> 3) & 7) | rex_r;
3b46e624 5684
0af10c86 5685 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
57fec1fe 5686 gen_op_mov_reg_T0(ot, reg);
2c0262af
FB
5687 break;
5688 case 0x8e: /* mov seg, Gv */
0af10c86 5689 modrm = cpu_ldub_code(env, s->pc++);
2c0262af
FB
5690 reg = (modrm >> 3) & 7;
5691 if (reg >= 6 || reg == R_CS)
5692 goto illegal_op;
0af10c86 5693 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
2c0262af
FB
5694 gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
5695 if (reg == R_SS) {
5696 /* if reg == SS, inhibit interrupts/trace */
a2cc3b24
FB
5697 /* If several instructions disable interrupts, only the
5698 _first_ does it */
5699 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
f0967a1a 5700 gen_helper_set_inhibit_irq(cpu_env);
2c0262af
FB
5701 s->tf = 0;
5702 }
5703 if (s->is_jmp) {
14ce26e7 5704 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
5705 gen_eob(s);
5706 }
5707 break;
5708 case 0x8c: /* mov Gv, seg */
0af10c86 5709 modrm = cpu_ldub_code(env, s->pc++);
2c0262af
FB
5710 reg = (modrm >> 3) & 7;
5711 mod = (modrm >> 6) & 3;
5712 if (reg >= 6)
5713 goto illegal_op;
5714 gen_op_movl_T0_seg(reg);
14ce26e7
FB
5715 if (mod == 3)
5716 ot = OT_WORD + dflag;
5717 else
5718 ot = OT_WORD;
0af10c86 5719 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
2c0262af
FB
5720 break;
5721
5722 case 0x1b6: /* movzbS Gv, Eb */
5723 case 0x1b7: /* movzwS Gv, Eb */
5724 case 0x1be: /* movsbS Gv, Eb */
5725 case 0x1bf: /* movswS Gv, Eb */
5726 {
5727 int d_ot;
5728 /* d_ot is the size of destination */
5729 d_ot = dflag + OT_WORD;
5730 /* ot is the size of source */
5731 ot = (b & 1) + OT_BYTE;
0af10c86 5732 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 5733 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af 5734 mod = (modrm >> 6) & 3;
14ce26e7 5735 rm = (modrm & 7) | REX_B(s);
3b46e624 5736
2c0262af 5737 if (mod == 3) {
57fec1fe 5738 gen_op_mov_TN_reg(ot, 0, rm);
2c0262af
FB
5739 switch(ot | (b & 8)) {
5740 case OT_BYTE:
e108dd01 5741 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
2c0262af
FB
5742 break;
5743 case OT_BYTE | 8:
e108dd01 5744 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
2c0262af
FB
5745 break;
5746 case OT_WORD:
e108dd01 5747 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
2c0262af
FB
5748 break;
5749 default:
5750 case OT_WORD | 8:
e108dd01 5751 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
2c0262af
FB
5752 break;
5753 }
57fec1fe 5754 gen_op_mov_reg_T0(d_ot, reg);
2c0262af 5755 } else {
0af10c86 5756 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
2c0262af 5757 if (b & 8) {
57fec1fe 5758 gen_op_lds_T0_A0(ot + s->mem_index);
2c0262af 5759 } else {
57fec1fe 5760 gen_op_ldu_T0_A0(ot + s->mem_index);
2c0262af 5761 }
57fec1fe 5762 gen_op_mov_reg_T0(d_ot, reg);
2c0262af
FB
5763 }
5764 }
5765 break;
5766
5767 case 0x8d: /* lea */
14ce26e7 5768 ot = dflag + OT_WORD;
0af10c86 5769 modrm = cpu_ldub_code(env, s->pc++);
3a1d9b8b
FB
5770 mod = (modrm >> 6) & 3;
5771 if (mod == 3)
5772 goto illegal_op;
14ce26e7 5773 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af
FB
5774 /* we must ensure that no segment is added */
5775 s->override = -1;
5776 val = s->addseg;
5777 s->addseg = 0;
0af10c86 5778 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
2c0262af 5779 s->addseg = val;
57fec1fe 5780 gen_op_mov_reg_A0(ot - OT_WORD, reg);
2c0262af 5781 break;
3b46e624 5782
2c0262af
FB
5783 case 0xa0: /* mov EAX, Ov */
5784 case 0xa1:
5785 case 0xa2: /* mov Ov, EAX */
5786 case 0xa3:
2c0262af 5787 {
14ce26e7
FB
5788 target_ulong offset_addr;
5789
5790 if ((b & 1) == 0)
5791 ot = OT_BYTE;
5792 else
5793 ot = dflag + OT_WORD;
5794#ifdef TARGET_X86_64
8f091a59 5795 if (s->aflag == 2) {
0af10c86 5796 offset_addr = cpu_ldq_code(env, s->pc);
14ce26e7 5797 s->pc += 8;
57fec1fe 5798 gen_op_movq_A0_im(offset_addr);
5fafdf24 5799 } else
14ce26e7
FB
5800#endif
5801 {
5802 if (s->aflag) {
0af10c86 5803 offset_addr = insn_get(env, s, OT_LONG);
14ce26e7 5804 } else {
0af10c86 5805 offset_addr = insn_get(env, s, OT_WORD);
14ce26e7
FB
5806 }
5807 gen_op_movl_A0_im(offset_addr);
5808 }
664e0f19 5809 gen_add_A0_ds_seg(s);
14ce26e7 5810 if ((b & 2) == 0) {
57fec1fe
FB
5811 gen_op_ld_T0_A0(ot + s->mem_index);
5812 gen_op_mov_reg_T0(ot, R_EAX);
14ce26e7 5813 } else {
57fec1fe
FB
5814 gen_op_mov_TN_reg(ot, 0, R_EAX);
5815 gen_op_st_T0_A0(ot + s->mem_index);
2c0262af
FB
5816 }
5817 }
2c0262af
FB
5818 break;
5819 case 0xd7: /* xlat */
14ce26e7 5820#ifdef TARGET_X86_64
8f091a59 5821 if (s->aflag == 2) {
57fec1fe 5822 gen_op_movq_A0_reg(R_EBX);
bbf662ee
FB
5823 gen_op_mov_TN_reg(OT_QUAD, 0, R_EAX);
5824 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xff);
5825 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]);
5fafdf24 5826 } else
14ce26e7
FB
5827#endif
5828 {
57fec1fe 5829 gen_op_movl_A0_reg(R_EBX);
bbf662ee
FB
5830 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
5831 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xff);
5832 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]);
14ce26e7
FB
5833 if (s->aflag == 0)
5834 gen_op_andl_A0_ffff();
bbf662ee
FB
5835 else
5836 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
14ce26e7 5837 }
664e0f19 5838 gen_add_A0_ds_seg(s);
57fec1fe
FB
5839 gen_op_ldu_T0_A0(OT_BYTE + s->mem_index);
5840 gen_op_mov_reg_T0(OT_BYTE, R_EAX);
2c0262af
FB
5841 break;
5842 case 0xb0 ... 0xb7: /* mov R, Ib */
0af10c86 5843 val = insn_get(env, s, OT_BYTE);
2c0262af 5844 gen_op_movl_T0_im(val);
57fec1fe 5845 gen_op_mov_reg_T0(OT_BYTE, (b & 7) | REX_B(s));
2c0262af
FB
5846 break;
5847 case 0xb8 ... 0xbf: /* mov R, Iv */
14ce26e7
FB
5848#ifdef TARGET_X86_64
5849 if (dflag == 2) {
5850 uint64_t tmp;
5851 /* 64 bit case */
0af10c86 5852 tmp = cpu_ldq_code(env, s->pc);
14ce26e7
FB
5853 s->pc += 8;
5854 reg = (b & 7) | REX_B(s);
5855 gen_movtl_T0_im(tmp);
57fec1fe 5856 gen_op_mov_reg_T0(OT_QUAD, reg);
5fafdf24 5857 } else
14ce26e7
FB
5858#endif
5859 {
5860 ot = dflag ? OT_LONG : OT_WORD;
0af10c86 5861 val = insn_get(env, s, ot);
14ce26e7
FB
5862 reg = (b & 7) | REX_B(s);
5863 gen_op_movl_T0_im(val);
57fec1fe 5864 gen_op_mov_reg_T0(ot, reg);
14ce26e7 5865 }
2c0262af
FB
5866 break;
5867
5868 case 0x91 ... 0x97: /* xchg R, EAX */
7418027e 5869 do_xchg_reg_eax:
14ce26e7
FB
5870 ot = dflag + OT_WORD;
5871 reg = (b & 7) | REX_B(s);
2c0262af
FB
5872 rm = R_EAX;
5873 goto do_xchg_reg;
5874 case 0x86:
5875 case 0x87: /* xchg Ev, Gv */
5876 if ((b & 1) == 0)
5877 ot = OT_BYTE;
5878 else
14ce26e7 5879 ot = dflag + OT_WORD;
0af10c86 5880 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 5881 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af
FB
5882 mod = (modrm >> 6) & 3;
5883 if (mod == 3) {
14ce26e7 5884 rm = (modrm & 7) | REX_B(s);
2c0262af 5885 do_xchg_reg:
57fec1fe
FB
5886 gen_op_mov_TN_reg(ot, 0, reg);
5887 gen_op_mov_TN_reg(ot, 1, rm);
5888 gen_op_mov_reg_T0(ot, rm);
5889 gen_op_mov_reg_T1(ot, reg);
2c0262af 5890 } else {
0af10c86 5891 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
57fec1fe 5892 gen_op_mov_TN_reg(ot, 0, reg);
2c0262af
FB
5893 /* for xchg, lock is implicit */
5894 if (!(prefixes & PREFIX_LOCK))
a7812ae4 5895 gen_helper_lock();
57fec1fe
FB
5896 gen_op_ld_T1_A0(ot + s->mem_index);
5897 gen_op_st_T0_A0(ot + s->mem_index);
2c0262af 5898 if (!(prefixes & PREFIX_LOCK))
a7812ae4 5899 gen_helper_unlock();
57fec1fe 5900 gen_op_mov_reg_T1(ot, reg);
2c0262af
FB
5901 }
5902 break;
5903 case 0xc4: /* les Gv */
701ed211 5904 /* In CODE64 this is VEX3; see above. */
2c0262af
FB
5905 op = R_ES;
5906 goto do_lxx;
5907 case 0xc5: /* lds Gv */
701ed211 5908 /* In CODE64 this is VEX2; see above. */
2c0262af
FB
5909 op = R_DS;
5910 goto do_lxx;
5911 case 0x1b2: /* lss Gv */
5912 op = R_SS;
5913 goto do_lxx;
5914 case 0x1b4: /* lfs Gv */
5915 op = R_FS;
5916 goto do_lxx;
5917 case 0x1b5: /* lgs Gv */
5918 op = R_GS;
5919 do_lxx:
5920 ot = dflag ? OT_LONG : OT_WORD;
0af10c86 5921 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 5922 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af
FB
5923 mod = (modrm >> 6) & 3;
5924 if (mod == 3)
5925 goto illegal_op;
0af10c86 5926 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
57fec1fe 5927 gen_op_ld_T1_A0(ot + s->mem_index);
aba9d61e 5928 gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
2c0262af 5929 /* load the segment first to handle exceptions properly */
57fec1fe 5930 gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
2c0262af
FB
5931 gen_movl_seg_T0(s, op, pc_start - s->cs_base);
5932 /* then put the data */
57fec1fe 5933 gen_op_mov_reg_T1(ot, reg);
2c0262af 5934 if (s->is_jmp) {
14ce26e7 5935 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
5936 gen_eob(s);
5937 }
5938 break;
3b46e624 5939
2c0262af
FB
5940 /************************/
5941 /* shifts */
5942 case 0xc0:
5943 case 0xc1:
5944 /* shift Ev,Ib */
5945 shift = 2;
5946 grp2:
5947 {
5948 if ((b & 1) == 0)
5949 ot = OT_BYTE;
5950 else
14ce26e7 5951 ot = dflag + OT_WORD;
3b46e624 5952
0af10c86 5953 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 5954 mod = (modrm >> 6) & 3;
2c0262af 5955 op = (modrm >> 3) & 7;
3b46e624 5956
2c0262af 5957 if (mod != 3) {
14ce26e7
FB
5958 if (shift == 2) {
5959 s->rip_offset = 1;
5960 }
0af10c86 5961 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
2c0262af
FB
5962 opreg = OR_TMP0;
5963 } else {
14ce26e7 5964 opreg = (modrm & 7) | REX_B(s);
2c0262af
FB
5965 }
5966
5967 /* simpler op */
5968 if (shift == 0) {
5969 gen_shift(s, op, ot, opreg, OR_ECX);
5970 } else {
5971 if (shift == 2) {
0af10c86 5972 shift = cpu_ldub_code(env, s->pc++);
2c0262af
FB
5973 }
5974 gen_shifti(s, op, ot, opreg, shift);
5975 }
5976 }
5977 break;
5978 case 0xd0:
5979 case 0xd1:
5980 /* shift Ev,1 */
5981 shift = 1;
5982 goto grp2;
5983 case 0xd2:
5984 case 0xd3:
5985 /* shift Ev,cl */
5986 shift = 0;
5987 goto grp2;
5988
5989 case 0x1a4: /* shld imm */
5990 op = 0;
5991 shift = 1;
5992 goto do_shiftd;
5993 case 0x1a5: /* shld cl */
5994 op = 0;
5995 shift = 0;
5996 goto do_shiftd;
5997 case 0x1ac: /* shrd imm */
5998 op = 1;
5999 shift = 1;
6000 goto do_shiftd;
6001 case 0x1ad: /* shrd cl */
6002 op = 1;
6003 shift = 0;
6004 do_shiftd:
14ce26e7 6005 ot = dflag + OT_WORD;
0af10c86 6006 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 6007 mod = (modrm >> 6) & 3;
14ce26e7
FB
6008 rm = (modrm & 7) | REX_B(s);
6009 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af 6010 if (mod != 3) {
0af10c86 6011 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
b6abf97d 6012 opreg = OR_TMP0;
2c0262af 6013 } else {
b6abf97d 6014 opreg = rm;
2c0262af 6015 }
57fec1fe 6016 gen_op_mov_TN_reg(ot, 1, reg);
3b46e624 6017
2c0262af 6018 if (shift) {
3b9d3cf1
PB
6019 TCGv imm = tcg_const_tl(cpu_ldub_code(env, s->pc++));
6020 gen_shiftd_rm_T1(s, ot, opreg, op, imm);
6021 tcg_temp_free(imm);
2c0262af 6022 } else {
3b9d3cf1 6023 gen_shiftd_rm_T1(s, ot, opreg, op, cpu_regs[R_ECX]);
2c0262af
FB
6024 }
6025 break;
6026
6027 /************************/
6028 /* floats */
5fafdf24 6029 case 0xd8 ... 0xdf:
7eee2a50
FB
6030 if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
6031 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
6032 /* XXX: what to do if illegal op ? */
6033 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
6034 break;
6035 }
0af10c86 6036 modrm = cpu_ldub_code(env, s->pc++);
2c0262af
FB
6037 mod = (modrm >> 6) & 3;
6038 rm = modrm & 7;
6039 op = ((b & 7) << 3) | ((modrm >> 3) & 7);
2c0262af
FB
6040 if (mod != 3) {
6041 /* memory op */
0af10c86 6042 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
2c0262af
FB
6043 switch(op) {
6044 case 0x00 ... 0x07: /* fxxxs */
6045 case 0x10 ... 0x17: /* fixxxl */
6046 case 0x20 ... 0x27: /* fxxxl */
6047 case 0x30 ... 0x37: /* fixxx */
6048 {
6049 int op1;
6050 op1 = op & 7;
6051
6052 switch(op >> 4) {
6053 case 0:
ba7cd150 6054 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
b6abf97d 6055 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
d3eb5eae 6056 gen_helper_flds_FT0(cpu_env, cpu_tmp2_i32);
2c0262af
FB
6057 break;
6058 case 1:
ba7cd150 6059 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
b6abf97d 6060 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
d3eb5eae 6061 gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
2c0262af
FB
6062 break;
6063 case 2:
b6abf97d 6064 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
19e6c4b8 6065 (s->mem_index >> 2) - 1);
d3eb5eae 6066 gen_helper_fldl_FT0(cpu_env, cpu_tmp1_i64);
2c0262af
FB
6067 break;
6068 case 3:
6069 default:
ba7cd150 6070 gen_op_lds_T0_A0(OT_WORD + s->mem_index);
b6abf97d 6071 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
d3eb5eae 6072 gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
2c0262af
FB
6073 break;
6074 }
3b46e624 6075
a7812ae4 6076 gen_helper_fp_arith_ST0_FT0(op1);
2c0262af
FB
6077 if (op1 == 3) {
6078 /* fcomp needs pop */
d3eb5eae 6079 gen_helper_fpop(cpu_env);
2c0262af
FB
6080 }
6081 }
6082 break;
6083 case 0x08: /* flds */
6084 case 0x0a: /* fsts */
6085 case 0x0b: /* fstps */
465e9838
FB
6086 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
6087 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
6088 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
2c0262af
FB
6089 switch(op & 7) {
6090 case 0:
6091 switch(op >> 4) {
6092 case 0:
ba7cd150 6093 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
b6abf97d 6094 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
d3eb5eae 6095 gen_helper_flds_ST0(cpu_env, cpu_tmp2_i32);
2c0262af
FB
6096 break;
6097 case 1:
ba7cd150 6098 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
b6abf97d 6099 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
d3eb5eae 6100 gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
2c0262af
FB
6101 break;
6102 case 2:
b6abf97d 6103 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
19e6c4b8 6104 (s->mem_index >> 2) - 1);
d3eb5eae 6105 gen_helper_fldl_ST0(cpu_env, cpu_tmp1_i64);
2c0262af
FB
6106 break;
6107 case 3:
6108 default:
ba7cd150 6109 gen_op_lds_T0_A0(OT_WORD + s->mem_index);
b6abf97d 6110 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
d3eb5eae 6111 gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
2c0262af
FB
6112 break;
6113 }
6114 break;
465e9838 6115 case 1:
19e6c4b8 6116 /* XXX: the corresponding CPUID bit must be tested ! */
465e9838
FB
6117 switch(op >> 4) {
6118 case 1:
d3eb5eae 6119 gen_helper_fisttl_ST0(cpu_tmp2_i32, cpu_env);
b6abf97d 6120 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
ba7cd150 6121 gen_op_st_T0_A0(OT_LONG + s->mem_index);
465e9838
FB
6122 break;
6123 case 2:
d3eb5eae 6124 gen_helper_fisttll_ST0(cpu_tmp1_i64, cpu_env);
b6abf97d 6125 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
19e6c4b8 6126 (s->mem_index >> 2) - 1);
465e9838
FB
6127 break;
6128 case 3:
6129 default:
d3eb5eae 6130 gen_helper_fistt_ST0(cpu_tmp2_i32, cpu_env);
b6abf97d 6131 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
ba7cd150 6132 gen_op_st_T0_A0(OT_WORD + s->mem_index);
19e6c4b8 6133 break;
465e9838 6134 }
d3eb5eae 6135 gen_helper_fpop(cpu_env);
465e9838 6136 break;
2c0262af
FB
6137 default:
6138 switch(op >> 4) {
6139 case 0:
d3eb5eae 6140 gen_helper_fsts_ST0(cpu_tmp2_i32, cpu_env);
b6abf97d 6141 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
ba7cd150 6142 gen_op_st_T0_A0(OT_LONG + s->mem_index);
2c0262af
FB
6143 break;
6144 case 1:
d3eb5eae 6145 gen_helper_fistl_ST0(cpu_tmp2_i32, cpu_env);
b6abf97d 6146 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
ba7cd150 6147 gen_op_st_T0_A0(OT_LONG + s->mem_index);
2c0262af
FB
6148 break;
6149 case 2:
d3eb5eae 6150 gen_helper_fstl_ST0(cpu_tmp1_i64, cpu_env);
b6abf97d 6151 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
19e6c4b8 6152 (s->mem_index >> 2) - 1);
2c0262af
FB
6153 break;
6154 case 3:
6155 default:
d3eb5eae 6156 gen_helper_fist_ST0(cpu_tmp2_i32, cpu_env);
b6abf97d 6157 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
ba7cd150 6158 gen_op_st_T0_A0(OT_WORD + s->mem_index);
2c0262af
FB
6159 break;
6160 }
6161 if ((op & 7) == 3)
d3eb5eae 6162 gen_helper_fpop(cpu_env);
2c0262af
FB
6163 break;
6164 }
6165 break;
6166 case 0x0c: /* fldenv mem */
773cdfcc 6167 gen_update_cc_op(s);
19e6c4b8 6168 gen_jmp_im(pc_start - s->cs_base);
d3eb5eae 6169 gen_helper_fldenv(cpu_env, cpu_A0, tcg_const_i32(s->dflag));
2c0262af
FB
6170 break;
6171 case 0x0d: /* fldcw mem */
19e6c4b8 6172 gen_op_ld_T0_A0(OT_WORD + s->mem_index);
b6abf97d 6173 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
d3eb5eae 6174 gen_helper_fldcw(cpu_env, cpu_tmp2_i32);
2c0262af
FB
6175 break;
6176 case 0x0e: /* fnstenv mem */
773cdfcc 6177 gen_update_cc_op(s);
19e6c4b8 6178 gen_jmp_im(pc_start - s->cs_base);
d3eb5eae 6179 gen_helper_fstenv(cpu_env, cpu_A0, tcg_const_i32(s->dflag));
2c0262af
FB
6180 break;
6181 case 0x0f: /* fnstcw mem */
d3eb5eae 6182 gen_helper_fnstcw(cpu_tmp2_i32, cpu_env);
b6abf97d 6183 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
19e6c4b8 6184 gen_op_st_T0_A0(OT_WORD + s->mem_index);
2c0262af
FB
6185 break;
6186 case 0x1d: /* fldt mem */
773cdfcc 6187 gen_update_cc_op(s);
19e6c4b8 6188 gen_jmp_im(pc_start - s->cs_base);
d3eb5eae 6189 gen_helper_fldt_ST0(cpu_env, cpu_A0);
2c0262af
FB
6190 break;
6191 case 0x1f: /* fstpt mem */
773cdfcc 6192 gen_update_cc_op(s);
19e6c4b8 6193 gen_jmp_im(pc_start - s->cs_base);
d3eb5eae
BS
6194 gen_helper_fstt_ST0(cpu_env, cpu_A0);
6195 gen_helper_fpop(cpu_env);
2c0262af
FB
6196 break;
6197 case 0x2c: /* frstor mem */
773cdfcc 6198 gen_update_cc_op(s);
19e6c4b8 6199 gen_jmp_im(pc_start - s->cs_base);
d3eb5eae 6200 gen_helper_frstor(cpu_env, cpu_A0, tcg_const_i32(s->dflag));
2c0262af
FB
6201 break;
6202 case 0x2e: /* fnsave mem */
773cdfcc 6203 gen_update_cc_op(s);
19e6c4b8 6204 gen_jmp_im(pc_start - s->cs_base);
d3eb5eae 6205 gen_helper_fsave(cpu_env, cpu_A0, tcg_const_i32(s->dflag));
2c0262af
FB
6206 break;
6207 case 0x2f: /* fnstsw mem */
d3eb5eae 6208 gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
b6abf97d 6209 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
19e6c4b8 6210 gen_op_st_T0_A0(OT_WORD + s->mem_index);
2c0262af
FB
6211 break;
6212 case 0x3c: /* fbld */
773cdfcc 6213 gen_update_cc_op(s);
19e6c4b8 6214 gen_jmp_im(pc_start - s->cs_base);
d3eb5eae 6215 gen_helper_fbld_ST0(cpu_env, cpu_A0);
2c0262af
FB
6216 break;
6217 case 0x3e: /* fbstp */
773cdfcc 6218 gen_update_cc_op(s);
19e6c4b8 6219 gen_jmp_im(pc_start - s->cs_base);
d3eb5eae
BS
6220 gen_helper_fbst_ST0(cpu_env, cpu_A0);
6221 gen_helper_fpop(cpu_env);
2c0262af
FB
6222 break;
6223 case 0x3d: /* fildll */
b6abf97d 6224 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
19e6c4b8 6225 (s->mem_index >> 2) - 1);
d3eb5eae 6226 gen_helper_fildll_ST0(cpu_env, cpu_tmp1_i64);
2c0262af
FB
6227 break;
6228 case 0x3f: /* fistpll */
d3eb5eae 6229 gen_helper_fistll_ST0(cpu_tmp1_i64, cpu_env);
b6abf97d 6230 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
19e6c4b8 6231 (s->mem_index >> 2) - 1);
d3eb5eae 6232 gen_helper_fpop(cpu_env);
2c0262af
FB
6233 break;
6234 default:
6235 goto illegal_op;
6236 }
6237 } else {
6238 /* register float ops */
6239 opreg = rm;
6240
6241 switch(op) {
6242 case 0x08: /* fld sti */
d3eb5eae
BS
6243 gen_helper_fpush(cpu_env);
6244 gen_helper_fmov_ST0_STN(cpu_env,
6245 tcg_const_i32((opreg + 1) & 7));
2c0262af
FB
6246 break;
6247 case 0x09: /* fxchg sti */
c169c906
FB
6248 case 0x29: /* fxchg4 sti, undocumented op */
6249 case 0x39: /* fxchg7 sti, undocumented op */
d3eb5eae 6250 gen_helper_fxchg_ST0_STN(cpu_env, tcg_const_i32(opreg));
2c0262af
FB
6251 break;
6252 case 0x0a: /* grp d9/2 */
6253 switch(rm) {
6254 case 0: /* fnop */
023fe10d 6255 /* check exceptions (FreeBSD FPU probe) */
773cdfcc 6256 gen_update_cc_op(s);
14ce26e7 6257 gen_jmp_im(pc_start - s->cs_base);
d3eb5eae 6258 gen_helper_fwait(cpu_env);
2c0262af
FB
6259 break;
6260 default:
6261 goto illegal_op;
6262 }
6263 break;
6264 case 0x0c: /* grp d9/4 */
6265 switch(rm) {
6266 case 0: /* fchs */
d3eb5eae 6267 gen_helper_fchs_ST0(cpu_env);
2c0262af
FB
6268 break;
6269 case 1: /* fabs */
d3eb5eae 6270 gen_helper_fabs_ST0(cpu_env);
2c0262af
FB
6271 break;
6272 case 4: /* ftst */
d3eb5eae
BS
6273 gen_helper_fldz_FT0(cpu_env);
6274 gen_helper_fcom_ST0_FT0(cpu_env);
2c0262af
FB
6275 break;
6276 case 5: /* fxam */
d3eb5eae 6277 gen_helper_fxam_ST0(cpu_env);
2c0262af
FB
6278 break;
6279 default:
6280 goto illegal_op;
6281 }
6282 break;
6283 case 0x0d: /* grp d9/5 */
6284 {
6285 switch(rm) {
6286 case 0:
d3eb5eae
BS
6287 gen_helper_fpush(cpu_env);
6288 gen_helper_fld1_ST0(cpu_env);
2c0262af
FB
6289 break;
6290 case 1:
d3eb5eae
BS
6291 gen_helper_fpush(cpu_env);
6292 gen_helper_fldl2t_ST0(cpu_env);
2c0262af
FB
6293 break;
6294 case 2:
d3eb5eae
BS
6295 gen_helper_fpush(cpu_env);
6296 gen_helper_fldl2e_ST0(cpu_env);
2c0262af
FB
6297 break;
6298 case 3:
d3eb5eae
BS
6299 gen_helper_fpush(cpu_env);
6300 gen_helper_fldpi_ST0(cpu_env);
2c0262af
FB
6301 break;
6302 case 4:
d3eb5eae
BS
6303 gen_helper_fpush(cpu_env);
6304 gen_helper_fldlg2_ST0(cpu_env);
2c0262af
FB
6305 break;
6306 case 5:
d3eb5eae
BS
6307 gen_helper_fpush(cpu_env);
6308 gen_helper_fldln2_ST0(cpu_env);
2c0262af
FB
6309 break;
6310 case 6:
d3eb5eae
BS
6311 gen_helper_fpush(cpu_env);
6312 gen_helper_fldz_ST0(cpu_env);
2c0262af
FB
6313 break;
6314 default:
6315 goto illegal_op;
6316 }
6317 }
6318 break;
6319 case 0x0e: /* grp d9/6 */
6320 switch(rm) {
6321 case 0: /* f2xm1 */
d3eb5eae 6322 gen_helper_f2xm1(cpu_env);
2c0262af
FB
6323 break;
6324 case 1: /* fyl2x */
d3eb5eae 6325 gen_helper_fyl2x(cpu_env);
2c0262af
FB
6326 break;
6327 case 2: /* fptan */
d3eb5eae 6328 gen_helper_fptan(cpu_env);
2c0262af
FB
6329 break;
6330 case 3: /* fpatan */
d3eb5eae 6331 gen_helper_fpatan(cpu_env);
2c0262af
FB
6332 break;
6333 case 4: /* fxtract */
d3eb5eae 6334 gen_helper_fxtract(cpu_env);
2c0262af
FB
6335 break;
6336 case 5: /* fprem1 */
d3eb5eae 6337 gen_helper_fprem1(cpu_env);
2c0262af
FB
6338 break;
6339 case 6: /* fdecstp */
d3eb5eae 6340 gen_helper_fdecstp(cpu_env);
2c0262af
FB
6341 break;
6342 default:
6343 case 7: /* fincstp */
d3eb5eae 6344 gen_helper_fincstp(cpu_env);
2c0262af
FB
6345 break;
6346 }
6347 break;
6348 case 0x0f: /* grp d9/7 */
6349 switch(rm) {
6350 case 0: /* fprem */
d3eb5eae 6351 gen_helper_fprem(cpu_env);
2c0262af
FB
6352 break;
6353 case 1: /* fyl2xp1 */
d3eb5eae 6354 gen_helper_fyl2xp1(cpu_env);
2c0262af
FB
6355 break;
6356 case 2: /* fsqrt */
d3eb5eae 6357 gen_helper_fsqrt(cpu_env);
2c0262af
FB
6358 break;
6359 case 3: /* fsincos */
d3eb5eae 6360 gen_helper_fsincos(cpu_env);
2c0262af
FB
6361 break;
6362 case 5: /* fscale */
d3eb5eae 6363 gen_helper_fscale(cpu_env);
2c0262af
FB
6364 break;
6365 case 4: /* frndint */
d3eb5eae 6366 gen_helper_frndint(cpu_env);
2c0262af
FB
6367 break;
6368 case 6: /* fsin */
d3eb5eae 6369 gen_helper_fsin(cpu_env);
2c0262af
FB
6370 break;
6371 default:
6372 case 7: /* fcos */
d3eb5eae 6373 gen_helper_fcos(cpu_env);
2c0262af
FB
6374 break;
6375 }
6376 break;
6377 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
6378 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
6379 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
6380 {
6381 int op1;
3b46e624 6382
2c0262af
FB
6383 op1 = op & 7;
6384 if (op >= 0x20) {
a7812ae4 6385 gen_helper_fp_arith_STN_ST0(op1, opreg);
2c0262af 6386 if (op >= 0x30)
d3eb5eae 6387 gen_helper_fpop(cpu_env);
2c0262af 6388 } else {
d3eb5eae 6389 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
a7812ae4 6390 gen_helper_fp_arith_ST0_FT0(op1);
2c0262af
FB
6391 }
6392 }
6393 break;
6394 case 0x02: /* fcom */
c169c906 6395 case 0x22: /* fcom2, undocumented op */
d3eb5eae
BS
6396 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6397 gen_helper_fcom_ST0_FT0(cpu_env);
2c0262af
FB
6398 break;
6399 case 0x03: /* fcomp */
c169c906
FB
6400 case 0x23: /* fcomp3, undocumented op */
6401 case 0x32: /* fcomp5, undocumented op */
d3eb5eae
BS
6402 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6403 gen_helper_fcom_ST0_FT0(cpu_env);
6404 gen_helper_fpop(cpu_env);
2c0262af
FB
6405 break;
6406 case 0x15: /* da/5 */
6407 switch(rm) {
6408 case 1: /* fucompp */
d3eb5eae
BS
6409 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
6410 gen_helper_fucom_ST0_FT0(cpu_env);
6411 gen_helper_fpop(cpu_env);
6412 gen_helper_fpop(cpu_env);
2c0262af
FB
6413 break;
6414 default:
6415 goto illegal_op;
6416 }
6417 break;
6418 case 0x1c:
6419 switch(rm) {
6420 case 0: /* feni (287 only, just do nop here) */
6421 break;
6422 case 1: /* fdisi (287 only, just do nop here) */
6423 break;
6424 case 2: /* fclex */
d3eb5eae 6425 gen_helper_fclex(cpu_env);
2c0262af
FB
6426 break;
6427 case 3: /* fninit */
d3eb5eae 6428 gen_helper_fninit(cpu_env);
2c0262af
FB
6429 break;
6430 case 4: /* fsetpm (287 only, just do nop here) */
6431 break;
6432 default:
6433 goto illegal_op;
6434 }
6435 break;
6436 case 0x1d: /* fucomi */
773cdfcc 6437 gen_update_cc_op(s);
d3eb5eae
BS
6438 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6439 gen_helper_fucomi_ST0_FT0(cpu_env);
3ca51d07 6440 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
6441 break;
6442 case 0x1e: /* fcomi */
773cdfcc 6443 gen_update_cc_op(s);
d3eb5eae
BS
6444 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6445 gen_helper_fcomi_ST0_FT0(cpu_env);
3ca51d07 6446 set_cc_op(s, CC_OP_EFLAGS);
2c0262af 6447 break;
658c8bda 6448 case 0x28: /* ffree sti */
d3eb5eae 6449 gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
5fafdf24 6450 break;
2c0262af 6451 case 0x2a: /* fst sti */
d3eb5eae 6452 gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
2c0262af
FB
6453 break;
6454 case 0x2b: /* fstp sti */
c169c906
FB
6455 case 0x0b: /* fstp1 sti, undocumented op */
6456 case 0x3a: /* fstp8 sti, undocumented op */
6457 case 0x3b: /* fstp9 sti, undocumented op */
d3eb5eae
BS
6458 gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
6459 gen_helper_fpop(cpu_env);
2c0262af
FB
6460 break;
6461 case 0x2c: /* fucom st(i) */
d3eb5eae
BS
6462 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6463 gen_helper_fucom_ST0_FT0(cpu_env);
2c0262af
FB
6464 break;
6465 case 0x2d: /* fucomp st(i) */
d3eb5eae
BS
6466 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6467 gen_helper_fucom_ST0_FT0(cpu_env);
6468 gen_helper_fpop(cpu_env);
2c0262af
FB
6469 break;
6470 case 0x33: /* de/3 */
6471 switch(rm) {
6472 case 1: /* fcompp */
d3eb5eae
BS
6473 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
6474 gen_helper_fcom_ST0_FT0(cpu_env);
6475 gen_helper_fpop(cpu_env);
6476 gen_helper_fpop(cpu_env);
2c0262af
FB
6477 break;
6478 default:
6479 goto illegal_op;
6480 }
6481 break;
c169c906 6482 case 0x38: /* ffreep sti, undocumented op */
d3eb5eae
BS
6483 gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
6484 gen_helper_fpop(cpu_env);
c169c906 6485 break;
2c0262af
FB
6486 case 0x3c: /* df/4 */
6487 switch(rm) {
6488 case 0:
d3eb5eae 6489 gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
b6abf97d 6490 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
19e6c4b8 6491 gen_op_mov_reg_T0(OT_WORD, R_EAX);
2c0262af
FB
6492 break;
6493 default:
6494 goto illegal_op;
6495 }
6496 break;
6497 case 0x3d: /* fucomip */
773cdfcc 6498 gen_update_cc_op(s);
d3eb5eae
BS
6499 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6500 gen_helper_fucomi_ST0_FT0(cpu_env);
6501 gen_helper_fpop(cpu_env);
3ca51d07 6502 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
6503 break;
6504 case 0x3e: /* fcomip */
773cdfcc 6505 gen_update_cc_op(s);
d3eb5eae
BS
6506 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6507 gen_helper_fcomi_ST0_FT0(cpu_env);
6508 gen_helper_fpop(cpu_env);
3ca51d07 6509 set_cc_op(s, CC_OP_EFLAGS);
2c0262af 6510 break;
a2cc3b24
FB
6511 case 0x10 ... 0x13: /* fcmovxx */
6512 case 0x18 ... 0x1b:
6513 {
19e6c4b8 6514 int op1, l1;
d70040bc 6515 static const uint8_t fcmov_cc[8] = {
a2cc3b24
FB
6516 (JCC_B << 1),
6517 (JCC_Z << 1),
6518 (JCC_BE << 1),
6519 (JCC_P << 1),
6520 };
1e4840bf 6521 op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
19e6c4b8 6522 l1 = gen_new_label();
dc259201 6523 gen_jcc1_noeob(s, op1, l1);
d3eb5eae 6524 gen_helper_fmov_ST0_STN(cpu_env, tcg_const_i32(opreg));
19e6c4b8 6525 gen_set_label(l1);
a2cc3b24
FB
6526 }
6527 break;
2c0262af
FB
6528 default:
6529 goto illegal_op;
6530 }
6531 }
6532 break;
6533 /************************/
6534 /* string ops */
6535
6536 case 0xa4: /* movsS */
6537 case 0xa5:
6538 if ((b & 1) == 0)
6539 ot = OT_BYTE;
6540 else
14ce26e7 6541 ot = dflag + OT_WORD;
2c0262af
FB
6542
6543 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6544 gen_repz_movs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6545 } else {
6546 gen_movs(s, ot);
6547 }
6548 break;
3b46e624 6549
2c0262af
FB
6550 case 0xaa: /* stosS */
6551 case 0xab:
6552 if ((b & 1) == 0)
6553 ot = OT_BYTE;
6554 else
14ce26e7 6555 ot = dflag + OT_WORD;
2c0262af
FB
6556
6557 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6558 gen_repz_stos(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6559 } else {
6560 gen_stos(s, ot);
6561 }
6562 break;
6563 case 0xac: /* lodsS */
6564 case 0xad:
6565 if ((b & 1) == 0)
6566 ot = OT_BYTE;
6567 else
14ce26e7 6568 ot = dflag + OT_WORD;
2c0262af
FB
6569 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6570 gen_repz_lods(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6571 } else {
6572 gen_lods(s, ot);
6573 }
6574 break;
6575 case 0xae: /* scasS */
6576 case 0xaf:
6577 if ((b & 1) == 0)
6578 ot = OT_BYTE;
6579 else
14ce26e7 6580 ot = dflag + OT_WORD;
2c0262af
FB
6581 if (prefixes & PREFIX_REPNZ) {
6582 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6583 } else if (prefixes & PREFIX_REPZ) {
6584 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6585 } else {
6586 gen_scas(s, ot);
2c0262af
FB
6587 }
6588 break;
6589
6590 case 0xa6: /* cmpsS */
6591 case 0xa7:
6592 if ((b & 1) == 0)
6593 ot = OT_BYTE;
6594 else
14ce26e7 6595 ot = dflag + OT_WORD;
2c0262af
FB
6596 if (prefixes & PREFIX_REPNZ) {
6597 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6598 } else if (prefixes & PREFIX_REPZ) {
6599 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6600 } else {
6601 gen_cmps(s, ot);
2c0262af
FB
6602 }
6603 break;
6604 case 0x6c: /* insS */
6605 case 0x6d:
f115e911
FB
6606 if ((b & 1) == 0)
6607 ot = OT_BYTE;
6608 else
6609 ot = dflag ? OT_LONG : OT_WORD;
57fec1fe 6610 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
0573fbfc 6611 gen_op_andl_T0_ffff();
b8b6a50b
FB
6612 gen_check_io(s, ot, pc_start - s->cs_base,
6613 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4);
f115e911
FB
6614 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6615 gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
2c0262af 6616 } else {
f115e911 6617 gen_ins(s, ot);
2e70f6ef
PB
6618 if (use_icount) {
6619 gen_jmp(s, s->pc - s->cs_base);
6620 }
2c0262af
FB
6621 }
6622 break;
6623 case 0x6e: /* outsS */
6624 case 0x6f:
f115e911
FB
6625 if ((b & 1) == 0)
6626 ot = OT_BYTE;
6627 else
6628 ot = dflag ? OT_LONG : OT_WORD;
57fec1fe 6629 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
0573fbfc 6630 gen_op_andl_T0_ffff();
b8b6a50b
FB
6631 gen_check_io(s, ot, pc_start - s->cs_base,
6632 svm_is_rep(prefixes) | 4);
f115e911
FB
6633 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6634 gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
2c0262af 6635 } else {
f115e911 6636 gen_outs(s, ot);
2e70f6ef
PB
6637 if (use_icount) {
6638 gen_jmp(s, s->pc - s->cs_base);
6639 }
2c0262af
FB
6640 }
6641 break;
6642
6643 /************************/
6644 /* port I/O */
0573fbfc 6645
2c0262af
FB
6646 case 0xe4:
6647 case 0xe5:
f115e911
FB
6648 if ((b & 1) == 0)
6649 ot = OT_BYTE;
6650 else
6651 ot = dflag ? OT_LONG : OT_WORD;
0af10c86 6652 val = cpu_ldub_code(env, s->pc++);
f115e911 6653 gen_op_movl_T0_im(val);
b8b6a50b
FB
6654 gen_check_io(s, ot, pc_start - s->cs_base,
6655 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
2e70f6ef
PB
6656 if (use_icount)
6657 gen_io_start();
b6abf97d 6658 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
a7812ae4 6659 gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
57fec1fe 6660 gen_op_mov_reg_T1(ot, R_EAX);
2e70f6ef
PB
6661 if (use_icount) {
6662 gen_io_end();
6663 gen_jmp(s, s->pc - s->cs_base);
6664 }
2c0262af
FB
6665 break;
6666 case 0xe6:
6667 case 0xe7:
f115e911
FB
6668 if ((b & 1) == 0)
6669 ot = OT_BYTE;
6670 else
6671 ot = dflag ? OT_LONG : OT_WORD;
0af10c86 6672 val = cpu_ldub_code(env, s->pc++);
f115e911 6673 gen_op_movl_T0_im(val);
b8b6a50b
FB
6674 gen_check_io(s, ot, pc_start - s->cs_base,
6675 svm_is_rep(prefixes));
57fec1fe 6676 gen_op_mov_TN_reg(ot, 1, R_EAX);
b8b6a50b 6677
2e70f6ef
PB
6678 if (use_icount)
6679 gen_io_start();
b6abf97d 6680 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
b6abf97d 6681 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
a7812ae4 6682 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
2e70f6ef
PB
6683 if (use_icount) {
6684 gen_io_end();
6685 gen_jmp(s, s->pc - s->cs_base);
6686 }
2c0262af
FB
6687 break;
6688 case 0xec:
6689 case 0xed:
f115e911
FB
6690 if ((b & 1) == 0)
6691 ot = OT_BYTE;
6692 else
6693 ot = dflag ? OT_LONG : OT_WORD;
57fec1fe 6694 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
4f31916f 6695 gen_op_andl_T0_ffff();
b8b6a50b
FB
6696 gen_check_io(s, ot, pc_start - s->cs_base,
6697 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
2e70f6ef
PB
6698 if (use_icount)
6699 gen_io_start();
b6abf97d 6700 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
a7812ae4 6701 gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
57fec1fe 6702 gen_op_mov_reg_T1(ot, R_EAX);
2e70f6ef
PB
6703 if (use_icount) {
6704 gen_io_end();
6705 gen_jmp(s, s->pc - s->cs_base);
6706 }
2c0262af
FB
6707 break;
6708 case 0xee:
6709 case 0xef:
f115e911
FB
6710 if ((b & 1) == 0)
6711 ot = OT_BYTE;
6712 else
6713 ot = dflag ? OT_LONG : OT_WORD;
57fec1fe 6714 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
4f31916f 6715 gen_op_andl_T0_ffff();
b8b6a50b
FB
6716 gen_check_io(s, ot, pc_start - s->cs_base,
6717 svm_is_rep(prefixes));
57fec1fe 6718 gen_op_mov_TN_reg(ot, 1, R_EAX);
b8b6a50b 6719
2e70f6ef
PB
6720 if (use_icount)
6721 gen_io_start();
b6abf97d 6722 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
b6abf97d 6723 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
a7812ae4 6724 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
2e70f6ef
PB
6725 if (use_icount) {
6726 gen_io_end();
6727 gen_jmp(s, s->pc - s->cs_base);
6728 }
2c0262af
FB
6729 break;
6730
6731 /************************/
6732 /* control */
6733 case 0xc2: /* ret im */
0af10c86 6734 val = cpu_ldsw_code(env, s->pc);
2c0262af
FB
6735 s->pc += 2;
6736 gen_pop_T0(s);
8f091a59
FB
6737 if (CODE64(s) && s->dflag)
6738 s->dflag = 2;
2c0262af
FB
6739 gen_stack_update(s, val + (2 << s->dflag));
6740 if (s->dflag == 0)
6741 gen_op_andl_T0_ffff();
6742 gen_op_jmp_T0();
6743 gen_eob(s);
6744 break;
6745 case 0xc3: /* ret */
6746 gen_pop_T0(s);
6747 gen_pop_update(s);
6748 if (s->dflag == 0)
6749 gen_op_andl_T0_ffff();
6750 gen_op_jmp_T0();
6751 gen_eob(s);
6752 break;
6753 case 0xca: /* lret im */
0af10c86 6754 val = cpu_ldsw_code(env, s->pc);
2c0262af
FB
6755 s->pc += 2;
6756 do_lret:
6757 if (s->pe && !s->vm86) {
773cdfcc 6758 gen_update_cc_op(s);
14ce26e7 6759 gen_jmp_im(pc_start - s->cs_base);
2999a0b2 6760 gen_helper_lret_protected(cpu_env, tcg_const_i32(s->dflag),
a7812ae4 6761 tcg_const_i32(val));
2c0262af
FB
6762 } else {
6763 gen_stack_A0(s);
6764 /* pop offset */
57fec1fe 6765 gen_op_ld_T0_A0(1 + s->dflag + s->mem_index);
2c0262af
FB
6766 if (s->dflag == 0)
6767 gen_op_andl_T0_ffff();
6768 /* NOTE: keeping EIP updated is not a problem in case of
6769 exception */
6770 gen_op_jmp_T0();
6771 /* pop selector */
6772 gen_op_addl_A0_im(2 << s->dflag);
57fec1fe 6773 gen_op_ld_T0_A0(1 + s->dflag + s->mem_index);
3bd7da9e 6774 gen_op_movl_seg_T0_vm(R_CS);
2c0262af
FB
6775 /* add stack offset */
6776 gen_stack_update(s, val + (4 << s->dflag));
6777 }
6778 gen_eob(s);
6779 break;
6780 case 0xcb: /* lret */
6781 val = 0;
6782 goto do_lret;
6783 case 0xcf: /* iret */
872929aa 6784 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET);
2c0262af
FB
6785 if (!s->pe) {
6786 /* real mode */
2999a0b2 6787 gen_helper_iret_real(cpu_env, tcg_const_i32(s->dflag));
3ca51d07 6788 set_cc_op(s, CC_OP_EFLAGS);
f115e911
FB
6789 } else if (s->vm86) {
6790 if (s->iopl != 3) {
6791 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6792 } else {
2999a0b2 6793 gen_helper_iret_real(cpu_env, tcg_const_i32(s->dflag));
3ca51d07 6794 set_cc_op(s, CC_OP_EFLAGS);
f115e911 6795 }
2c0262af 6796 } else {
773cdfcc 6797 gen_update_cc_op(s);
14ce26e7 6798 gen_jmp_im(pc_start - s->cs_base);
2999a0b2 6799 gen_helper_iret_protected(cpu_env, tcg_const_i32(s->dflag),
a7812ae4 6800 tcg_const_i32(s->pc - s->cs_base));
3ca51d07 6801 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
6802 }
6803 gen_eob(s);
6804 break;
6805 case 0xe8: /* call im */
6806 {
14ce26e7 6807 if (dflag)
0af10c86 6808 tval = (int32_t)insn_get(env, s, OT_LONG);
14ce26e7 6809 else
0af10c86 6810 tval = (int16_t)insn_get(env, s, OT_WORD);
2c0262af 6811 next_eip = s->pc - s->cs_base;
14ce26e7 6812 tval += next_eip;
2c0262af 6813 if (s->dflag == 0)
14ce26e7 6814 tval &= 0xffff;
99596385
AJ
6815 else if(!CODE64(s))
6816 tval &= 0xffffffff;
14ce26e7 6817 gen_movtl_T0_im(next_eip);
2c0262af 6818 gen_push_T0(s);
14ce26e7 6819 gen_jmp(s, tval);
2c0262af
FB
6820 }
6821 break;
6822 case 0x9a: /* lcall im */
6823 {
6824 unsigned int selector, offset;
3b46e624 6825
14ce26e7
FB
6826 if (CODE64(s))
6827 goto illegal_op;
2c0262af 6828 ot = dflag ? OT_LONG : OT_WORD;
0af10c86
BS
6829 offset = insn_get(env, s, ot);
6830 selector = insn_get(env, s, OT_WORD);
3b46e624 6831
2c0262af 6832 gen_op_movl_T0_im(selector);
14ce26e7 6833 gen_op_movl_T1_imu(offset);
2c0262af
FB
6834 }
6835 goto do_lcall;
ecada8a2 6836 case 0xe9: /* jmp im */
14ce26e7 6837 if (dflag)
0af10c86 6838 tval = (int32_t)insn_get(env, s, OT_LONG);
14ce26e7 6839 else
0af10c86 6840 tval = (int16_t)insn_get(env, s, OT_WORD);
14ce26e7 6841 tval += s->pc - s->cs_base;
2c0262af 6842 if (s->dflag == 0)
14ce26e7 6843 tval &= 0xffff;
32938e12
AJ
6844 else if(!CODE64(s))
6845 tval &= 0xffffffff;
14ce26e7 6846 gen_jmp(s, tval);
2c0262af
FB
6847 break;
6848 case 0xea: /* ljmp im */
6849 {
6850 unsigned int selector, offset;
6851
14ce26e7
FB
6852 if (CODE64(s))
6853 goto illegal_op;
2c0262af 6854 ot = dflag ? OT_LONG : OT_WORD;
0af10c86
BS
6855 offset = insn_get(env, s, ot);
6856 selector = insn_get(env, s, OT_WORD);
3b46e624 6857
2c0262af 6858 gen_op_movl_T0_im(selector);
14ce26e7 6859 gen_op_movl_T1_imu(offset);
2c0262af
FB
6860 }
6861 goto do_ljmp;
6862 case 0xeb: /* jmp Jb */
0af10c86 6863 tval = (int8_t)insn_get(env, s, OT_BYTE);
14ce26e7 6864 tval += s->pc - s->cs_base;
2c0262af 6865 if (s->dflag == 0)
14ce26e7
FB
6866 tval &= 0xffff;
6867 gen_jmp(s, tval);
2c0262af
FB
6868 break;
6869 case 0x70 ... 0x7f: /* jcc Jb */
0af10c86 6870 tval = (int8_t)insn_get(env, s, OT_BYTE);
2c0262af
FB
6871 goto do_jcc;
6872 case 0x180 ... 0x18f: /* jcc Jv */
6873 if (dflag) {
0af10c86 6874 tval = (int32_t)insn_get(env, s, OT_LONG);
2c0262af 6875 } else {
0af10c86 6876 tval = (int16_t)insn_get(env, s, OT_WORD);
2c0262af
FB
6877 }
6878 do_jcc:
6879 next_eip = s->pc - s->cs_base;
14ce26e7 6880 tval += next_eip;
2c0262af 6881 if (s->dflag == 0)
14ce26e7
FB
6882 tval &= 0xffff;
6883 gen_jcc(s, b, tval, next_eip);
2c0262af
FB
6884 break;
6885
6886 case 0x190 ... 0x19f: /* setcc Gv */
0af10c86 6887 modrm = cpu_ldub_code(env, s->pc++);
cc8b6f5b 6888 gen_setcc1(s, b, cpu_T[0]);
0af10c86 6889 gen_ldst_modrm(env, s, modrm, OT_BYTE, OR_TMP0, 1);
2c0262af
FB
6890 break;
6891 case 0x140 ... 0x14f: /* cmov Gv, Ev */
f32d3781
PB
6892 ot = dflag + OT_WORD;
6893 modrm = cpu_ldub_code(env, s->pc++);
6894 reg = ((modrm >> 3) & 7) | rex_r;
6895 gen_cmovcc1(env, s, ot, b, modrm, reg);
2c0262af 6896 break;
3b46e624 6897
2c0262af
FB
6898 /************************/
6899 /* flags */
6900 case 0x9c: /* pushf */
872929aa 6901 gen_svm_check_intercept(s, pc_start, SVM_EXIT_PUSHF);
2c0262af
FB
6902 if (s->vm86 && s->iopl != 3) {
6903 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6904 } else {
773cdfcc 6905 gen_update_cc_op(s);
f0967a1a 6906 gen_helper_read_eflags(cpu_T[0], cpu_env);
2c0262af
FB
6907 gen_push_T0(s);
6908 }
6909 break;
6910 case 0x9d: /* popf */
872929aa 6911 gen_svm_check_intercept(s, pc_start, SVM_EXIT_POPF);
2c0262af
FB
6912 if (s->vm86 && s->iopl != 3) {
6913 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6914 } else {
6915 gen_pop_T0(s);
6916 if (s->cpl == 0) {
6917 if (s->dflag) {
f0967a1a
BS
6918 gen_helper_write_eflags(cpu_env, cpu_T[0],
6919 tcg_const_i32((TF_MASK | AC_MASK |
6920 ID_MASK | NT_MASK |
6921 IF_MASK |
6922 IOPL_MASK)));
2c0262af 6923 } else {
f0967a1a
BS
6924 gen_helper_write_eflags(cpu_env, cpu_T[0],
6925 tcg_const_i32((TF_MASK | AC_MASK |
6926 ID_MASK | NT_MASK |
6927 IF_MASK | IOPL_MASK)
6928 & 0xffff));
2c0262af
FB
6929 }
6930 } else {
4136f33c
FB
6931 if (s->cpl <= s->iopl) {
6932 if (s->dflag) {
f0967a1a
BS
6933 gen_helper_write_eflags(cpu_env, cpu_T[0],
6934 tcg_const_i32((TF_MASK |
6935 AC_MASK |
6936 ID_MASK |
6937 NT_MASK |
6938 IF_MASK)));
4136f33c 6939 } else {
f0967a1a
BS
6940 gen_helper_write_eflags(cpu_env, cpu_T[0],
6941 tcg_const_i32((TF_MASK |
6942 AC_MASK |
6943 ID_MASK |
6944 NT_MASK |
6945 IF_MASK)
6946 & 0xffff));
4136f33c 6947 }
2c0262af 6948 } else {
4136f33c 6949 if (s->dflag) {
f0967a1a
BS
6950 gen_helper_write_eflags(cpu_env, cpu_T[0],
6951 tcg_const_i32((TF_MASK | AC_MASK |
6952 ID_MASK | NT_MASK)));
4136f33c 6953 } else {
f0967a1a
BS
6954 gen_helper_write_eflags(cpu_env, cpu_T[0],
6955 tcg_const_i32((TF_MASK | AC_MASK |
6956 ID_MASK | NT_MASK)
6957 & 0xffff));
4136f33c 6958 }
2c0262af
FB
6959 }
6960 }
6961 gen_pop_update(s);
3ca51d07 6962 set_cc_op(s, CC_OP_EFLAGS);
a9321a4d 6963 /* abort translation because TF/AC flag may change */
14ce26e7 6964 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
6965 gen_eob(s);
6966 }
6967 break;
6968 case 0x9e: /* sahf */
12e26b75 6969 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
14ce26e7 6970 goto illegal_op;
57fec1fe 6971 gen_op_mov_TN_reg(OT_BYTE, 0, R_AH);
d229edce 6972 gen_compute_eflags(s);
bd7a7b33
FB
6973 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O);
6974 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], CC_S | CC_Z | CC_A | CC_P | CC_C);
6975 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_T[0]);
2c0262af
FB
6976 break;
6977 case 0x9f: /* lahf */
12e26b75 6978 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
14ce26e7 6979 goto illegal_op;
d229edce 6980 gen_compute_eflags(s);
bd7a7b33 6981 /* Note: gen_compute_eflags() only gives the condition codes */
d229edce 6982 tcg_gen_ori_tl(cpu_T[0], cpu_cc_src, 0x02);
57fec1fe 6983 gen_op_mov_reg_T0(OT_BYTE, R_AH);
2c0262af
FB
6984 break;
6985 case 0xf5: /* cmc */
d229edce 6986 gen_compute_eflags(s);
bd7a7b33 6987 tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C);
2c0262af
FB
6988 break;
6989 case 0xf8: /* clc */
d229edce 6990 gen_compute_eflags(s);
bd7a7b33 6991 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C);
2c0262af
FB
6992 break;
6993 case 0xf9: /* stc */
d229edce 6994 gen_compute_eflags(s);
bd7a7b33 6995 tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C);
2c0262af
FB
6996 break;
6997 case 0xfc: /* cld */
b6abf97d 6998 tcg_gen_movi_i32(cpu_tmp2_i32, 1);
317ac620 6999 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
2c0262af
FB
7000 break;
7001 case 0xfd: /* std */
b6abf97d 7002 tcg_gen_movi_i32(cpu_tmp2_i32, -1);
317ac620 7003 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
2c0262af
FB
7004 break;
7005
7006 /************************/
7007 /* bit operations */
7008 case 0x1ba: /* bt/bts/btr/btc Gv, im */
14ce26e7 7009 ot = dflag + OT_WORD;
0af10c86 7010 modrm = cpu_ldub_code(env, s->pc++);
33698e5f 7011 op = (modrm >> 3) & 7;
2c0262af 7012 mod = (modrm >> 6) & 3;
14ce26e7 7013 rm = (modrm & 7) | REX_B(s);
2c0262af 7014 if (mod != 3) {
14ce26e7 7015 s->rip_offset = 1;
0af10c86 7016 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
57fec1fe 7017 gen_op_ld_T0_A0(ot + s->mem_index);
2c0262af 7018 } else {
57fec1fe 7019 gen_op_mov_TN_reg(ot, 0, rm);
2c0262af
FB
7020 }
7021 /* load shift */
0af10c86 7022 val = cpu_ldub_code(env, s->pc++);
2c0262af
FB
7023 gen_op_movl_T1_im(val);
7024 if (op < 4)
7025 goto illegal_op;
7026 op -= 4;
f484d386 7027 goto bt_op;
2c0262af
FB
7028 case 0x1a3: /* bt Gv, Ev */
7029 op = 0;
7030 goto do_btx;
7031 case 0x1ab: /* bts */
7032 op = 1;
7033 goto do_btx;
7034 case 0x1b3: /* btr */
7035 op = 2;
7036 goto do_btx;
7037 case 0x1bb: /* btc */
7038 op = 3;
7039 do_btx:
14ce26e7 7040 ot = dflag + OT_WORD;
0af10c86 7041 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 7042 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af 7043 mod = (modrm >> 6) & 3;
14ce26e7 7044 rm = (modrm & 7) | REX_B(s);
57fec1fe 7045 gen_op_mov_TN_reg(OT_LONG, 1, reg);
2c0262af 7046 if (mod != 3) {
0af10c86 7047 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
2c0262af 7048 /* specific case: we need to add a displacement */
f484d386
FB
7049 gen_exts(ot, cpu_T[1]);
7050 tcg_gen_sari_tl(cpu_tmp0, cpu_T[1], 3 + ot);
7051 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, ot);
7052 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
57fec1fe 7053 gen_op_ld_T0_A0(ot + s->mem_index);
2c0262af 7054 } else {
57fec1fe 7055 gen_op_mov_TN_reg(ot, 0, rm);
2c0262af 7056 }
f484d386
FB
7057 bt_op:
7058 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], (1 << (3 + ot)) - 1);
7059 switch(op) {
7060 case 0:
7061 tcg_gen_shr_tl(cpu_cc_src, cpu_T[0], cpu_T[1]);
7062 tcg_gen_movi_tl(cpu_cc_dst, 0);
7063 break;
7064 case 1:
7065 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
7066 tcg_gen_movi_tl(cpu_tmp0, 1);
7067 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
7068 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
7069 break;
7070 case 2:
7071 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
7072 tcg_gen_movi_tl(cpu_tmp0, 1);
7073 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
7074 tcg_gen_not_tl(cpu_tmp0, cpu_tmp0);
7075 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
7076 break;
7077 default:
7078 case 3:
7079 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
7080 tcg_gen_movi_tl(cpu_tmp0, 1);
7081 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
7082 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
7083 break;
7084 }
3ca51d07 7085 set_cc_op(s, CC_OP_SARB + ot);
2c0262af
FB
7086 if (op != 0) {
7087 if (mod != 3)
57fec1fe 7088 gen_op_st_T0_A0(ot + s->mem_index);
2c0262af 7089 else
57fec1fe 7090 gen_op_mov_reg_T0(ot, rm);
f484d386
FB
7091 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
7092 tcg_gen_movi_tl(cpu_cc_dst, 0);
2c0262af
FB
7093 }
7094 break;
321c5351
RH
7095 case 0x1bc: /* bsf / tzcnt */
7096 case 0x1bd: /* bsr / lzcnt */
7097 ot = dflag + OT_WORD;
7098 modrm = cpu_ldub_code(env, s->pc++);
7099 reg = ((modrm >> 3) & 7) | rex_r;
7100 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
7101 gen_extu(ot, cpu_T[0]);
7102
7103 /* Note that lzcnt and tzcnt are in different extensions. */
7104 if ((prefixes & PREFIX_REPZ)
7105 && (b & 1
7106 ? s->cpuid_ext3_features & CPUID_EXT3_ABM
7107 : s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)) {
7108 int size = 8 << ot;
7109 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
7110 if (b & 1) {
7111 /* For lzcnt, reduce the target_ulong result by the
7112 number of zeros that we expect to find at the top. */
7113 gen_helper_clz(cpu_T[0], cpu_T[0]);
7114 tcg_gen_subi_tl(cpu_T[0], cpu_T[0], TARGET_LONG_BITS - size);
6191b059 7115 } else {
321c5351
RH
7116 /* For tzcnt, a zero input must return the operand size:
7117 force all bits outside the operand size to 1. */
7118 target_ulong mask = (target_ulong)-2 << (size - 1);
7119 tcg_gen_ori_tl(cpu_T[0], cpu_T[0], mask);
7120 gen_helper_ctz(cpu_T[0], cpu_T[0]);
6191b059 7121 }
321c5351
RH
7122 /* For lzcnt/tzcnt, C and Z bits are defined and are
7123 related to the result. */
7124 gen_op_update1_cc();
7125 set_cc_op(s, CC_OP_BMILGB + ot);
7126 } else {
7127 /* For bsr/bsf, only the Z bit is defined and it is related
7128 to the input and not the result. */
7129 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
7130 set_cc_op(s, CC_OP_LOGICB + ot);
7131 if (b & 1) {
7132 /* For bsr, return the bit index of the first 1 bit,
7133 not the count of leading zeros. */
7134 gen_helper_clz(cpu_T[0], cpu_T[0]);
7135 tcg_gen_xori_tl(cpu_T[0], cpu_T[0], TARGET_LONG_BITS - 1);
7136 } else {
7137 gen_helper_ctz(cpu_T[0], cpu_T[0]);
7138 }
7139 /* ??? The manual says that the output is undefined when the
7140 input is zero, but real hardware leaves it unchanged, and
7141 real programs appear to depend on that. */
7142 tcg_gen_movi_tl(cpu_tmp0, 0);
7143 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_T[0], cpu_cc_dst, cpu_tmp0,
7144 cpu_regs[reg], cpu_T[0]);
6191b059 7145 }
321c5351 7146 gen_op_mov_reg_T0(ot, reg);
2c0262af
FB
7147 break;
7148 /************************/
7149 /* bcd */
7150 case 0x27: /* daa */
14ce26e7
FB
7151 if (CODE64(s))
7152 goto illegal_op;
773cdfcc 7153 gen_update_cc_op(s);
7923057b 7154 gen_helper_daa(cpu_env);
3ca51d07 7155 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
7156 break;
7157 case 0x2f: /* das */
14ce26e7
FB
7158 if (CODE64(s))
7159 goto illegal_op;
773cdfcc 7160 gen_update_cc_op(s);
7923057b 7161 gen_helper_das(cpu_env);
3ca51d07 7162 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
7163 break;
7164 case 0x37: /* aaa */
14ce26e7
FB
7165 if (CODE64(s))
7166 goto illegal_op;
773cdfcc 7167 gen_update_cc_op(s);
7923057b 7168 gen_helper_aaa(cpu_env);
3ca51d07 7169 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
7170 break;
7171 case 0x3f: /* aas */
14ce26e7
FB
7172 if (CODE64(s))
7173 goto illegal_op;
773cdfcc 7174 gen_update_cc_op(s);
7923057b 7175 gen_helper_aas(cpu_env);
3ca51d07 7176 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
7177 break;
7178 case 0xd4: /* aam */
14ce26e7
FB
7179 if (CODE64(s))
7180 goto illegal_op;
0af10c86 7181 val = cpu_ldub_code(env, s->pc++);
b6d7c3db
TS
7182 if (val == 0) {
7183 gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base);
7184 } else {
7923057b 7185 gen_helper_aam(cpu_env, tcg_const_i32(val));
3ca51d07 7186 set_cc_op(s, CC_OP_LOGICB);
b6d7c3db 7187 }
2c0262af
FB
7188 break;
7189 case 0xd5: /* aad */
14ce26e7
FB
7190 if (CODE64(s))
7191 goto illegal_op;
0af10c86 7192 val = cpu_ldub_code(env, s->pc++);
7923057b 7193 gen_helper_aad(cpu_env, tcg_const_i32(val));
3ca51d07 7194 set_cc_op(s, CC_OP_LOGICB);
2c0262af
FB
7195 break;
7196 /************************/
7197 /* misc */
7198 case 0x90: /* nop */
ab1f142b 7199 /* XXX: correct lock test for all insn */
7418027e 7200 if (prefixes & PREFIX_LOCK) {
ab1f142b 7201 goto illegal_op;
7418027e
RH
7202 }
7203 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
7204 if (REX_B(s)) {
7205 goto do_xchg_reg_eax;
7206 }
0573fbfc
TS
7207 if (prefixes & PREFIX_REPZ) {
7208 gen_svm_check_intercept(s, pc_start, SVM_EXIT_PAUSE);
7209 }
2c0262af
FB
7210 break;
7211 case 0x9b: /* fwait */
5fafdf24 7212 if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) ==
7eee2a50
FB
7213 (HF_MP_MASK | HF_TS_MASK)) {
7214 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
2ee73ac3 7215 } else {
773cdfcc 7216 gen_update_cc_op(s);
14ce26e7 7217 gen_jmp_im(pc_start - s->cs_base);
d3eb5eae 7218 gen_helper_fwait(cpu_env);
7eee2a50 7219 }
2c0262af
FB
7220 break;
7221 case 0xcc: /* int3 */
7222 gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base);
7223 break;
7224 case 0xcd: /* int N */
0af10c86 7225 val = cpu_ldub_code(env, s->pc++);
f115e911 7226 if (s->vm86 && s->iopl != 3) {
5fafdf24 7227 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
f115e911
FB
7228 } else {
7229 gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base);
7230 }
2c0262af
FB
7231 break;
7232 case 0xce: /* into */
14ce26e7
FB
7233 if (CODE64(s))
7234 goto illegal_op;
773cdfcc 7235 gen_update_cc_op(s);
a8ede8ba 7236 gen_jmp_im(pc_start - s->cs_base);
4a7443be 7237 gen_helper_into(cpu_env, tcg_const_i32(s->pc - pc_start));
2c0262af 7238 break;
0b97134b 7239#ifdef WANT_ICEBP
2c0262af 7240 case 0xf1: /* icebp (undocumented, exits to external debugger) */
872929aa 7241 gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP);
aba9d61e 7242#if 1
2c0262af 7243 gen_debug(s, pc_start - s->cs_base);
aba9d61e
FB
7244#else
7245 /* start debug */
0af10c86 7246 tb_flush(env);
24537a01 7247 qemu_set_log(CPU_LOG_INT | CPU_LOG_TB_IN_ASM);
aba9d61e 7248#endif
2c0262af 7249 break;
0b97134b 7250#endif
2c0262af
FB
7251 case 0xfa: /* cli */
7252 if (!s->vm86) {
7253 if (s->cpl <= s->iopl) {
f0967a1a 7254 gen_helper_cli(cpu_env);
2c0262af
FB
7255 } else {
7256 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7257 }
7258 } else {
7259 if (s->iopl == 3) {
f0967a1a 7260 gen_helper_cli(cpu_env);
2c0262af
FB
7261 } else {
7262 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7263 }
7264 }
7265 break;
7266 case 0xfb: /* sti */
7267 if (!s->vm86) {
7268 if (s->cpl <= s->iopl) {
7269 gen_sti:
f0967a1a 7270 gen_helper_sti(cpu_env);
2c0262af 7271 /* interruptions are enabled only the first insn after sti */
a2cc3b24
FB
7272 /* If several instructions disable interrupts, only the
7273 _first_ does it */
7274 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
f0967a1a 7275 gen_helper_set_inhibit_irq(cpu_env);
2c0262af 7276 /* give a chance to handle pending irqs */
14ce26e7 7277 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
7278 gen_eob(s);
7279 } else {
7280 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7281 }
7282 } else {
7283 if (s->iopl == 3) {
7284 goto gen_sti;
7285 } else {
7286 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7287 }
7288 }
7289 break;
7290 case 0x62: /* bound */
14ce26e7
FB
7291 if (CODE64(s))
7292 goto illegal_op;
2c0262af 7293 ot = dflag ? OT_LONG : OT_WORD;
0af10c86 7294 modrm = cpu_ldub_code(env, s->pc++);
2c0262af
FB
7295 reg = (modrm >> 3) & 7;
7296 mod = (modrm >> 6) & 3;
7297 if (mod == 3)
7298 goto illegal_op;
57fec1fe 7299 gen_op_mov_TN_reg(ot, 0, reg);
0af10c86 7300 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
14ce26e7 7301 gen_jmp_im(pc_start - s->cs_base);
b6abf97d 7302 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
92fc4b58
BS
7303 if (ot == OT_WORD) {
7304 gen_helper_boundw(cpu_env, cpu_A0, cpu_tmp2_i32);
7305 } else {
7306 gen_helper_boundl(cpu_env, cpu_A0, cpu_tmp2_i32);
7307 }
2c0262af
FB
7308 break;
7309 case 0x1c8 ... 0x1cf: /* bswap reg */
14ce26e7
FB
7310 reg = (b & 7) | REX_B(s);
7311#ifdef TARGET_X86_64
7312 if (dflag == 2) {
57fec1fe 7313 gen_op_mov_TN_reg(OT_QUAD, 0, reg);
66896cb8 7314 tcg_gen_bswap64_i64(cpu_T[0], cpu_T[0]);
57fec1fe 7315 gen_op_mov_reg_T0(OT_QUAD, reg);
5fafdf24 7316 } else
8777643e 7317#endif
57fec1fe
FB
7318 {
7319 gen_op_mov_TN_reg(OT_LONG, 0, reg);
8777643e
AJ
7320 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
7321 tcg_gen_bswap32_tl(cpu_T[0], cpu_T[0]);
57fec1fe 7322 gen_op_mov_reg_T0(OT_LONG, reg);
14ce26e7 7323 }
2c0262af
FB
7324 break;
7325 case 0xd6: /* salc */
14ce26e7
FB
7326 if (CODE64(s))
7327 goto illegal_op;
cc8b6f5b 7328 gen_compute_eflags_c(s, cpu_T[0]);
bd7a7b33
FB
7329 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
7330 gen_op_mov_reg_T0(OT_BYTE, R_EAX);
2c0262af
FB
7331 break;
7332 case 0xe0: /* loopnz */
7333 case 0xe1: /* loopz */
2c0262af
FB
7334 case 0xe2: /* loop */
7335 case 0xe3: /* jecxz */
14ce26e7 7336 {
6e0d8677 7337 int l1, l2, l3;
14ce26e7 7338
0af10c86 7339 tval = (int8_t)insn_get(env, s, OT_BYTE);
14ce26e7
FB
7340 next_eip = s->pc - s->cs_base;
7341 tval += next_eip;
7342 if (s->dflag == 0)
7343 tval &= 0xffff;
3b46e624 7344
14ce26e7
FB
7345 l1 = gen_new_label();
7346 l2 = gen_new_label();
6e0d8677 7347 l3 = gen_new_label();
14ce26e7 7348 b &= 3;
6e0d8677
FB
7349 switch(b) {
7350 case 0: /* loopnz */
7351 case 1: /* loopz */
6e0d8677
FB
7352 gen_op_add_reg_im(s->aflag, R_ECX, -1);
7353 gen_op_jz_ecx(s->aflag, l3);
5bdb91b0 7354 gen_jcc1(s, (JCC_Z << 1) | (b ^ 1), l1);
6e0d8677
FB
7355 break;
7356 case 2: /* loop */
7357 gen_op_add_reg_im(s->aflag, R_ECX, -1);
7358 gen_op_jnz_ecx(s->aflag, l1);
7359 break;
7360 default:
7361 case 3: /* jcxz */
7362 gen_op_jz_ecx(s->aflag, l1);
7363 break;
14ce26e7
FB
7364 }
7365
6e0d8677 7366 gen_set_label(l3);
14ce26e7 7367 gen_jmp_im(next_eip);
8e1c85e3 7368 tcg_gen_br(l2);
6e0d8677 7369
14ce26e7
FB
7370 gen_set_label(l1);
7371 gen_jmp_im(tval);
7372 gen_set_label(l2);
7373 gen_eob(s);
7374 }
2c0262af
FB
7375 break;
7376 case 0x130: /* wrmsr */
7377 case 0x132: /* rdmsr */
7378 if (s->cpl != 0) {
7379 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7380 } else {
773cdfcc 7381 gen_update_cc_op(s);
872929aa 7382 gen_jmp_im(pc_start - s->cs_base);
0573fbfc 7383 if (b & 2) {
4a7443be 7384 gen_helper_rdmsr(cpu_env);
0573fbfc 7385 } else {
4a7443be 7386 gen_helper_wrmsr(cpu_env);
0573fbfc 7387 }
2c0262af
FB
7388 }
7389 break;
7390 case 0x131: /* rdtsc */
773cdfcc 7391 gen_update_cc_op(s);
ecada8a2 7392 gen_jmp_im(pc_start - s->cs_base);
efade670
PB
7393 if (use_icount)
7394 gen_io_start();
4a7443be 7395 gen_helper_rdtsc(cpu_env);
efade670
PB
7396 if (use_icount) {
7397 gen_io_end();
7398 gen_jmp(s, s->pc - s->cs_base);
7399 }
2c0262af 7400 break;
df01e0fc 7401 case 0x133: /* rdpmc */
773cdfcc 7402 gen_update_cc_op(s);
df01e0fc 7403 gen_jmp_im(pc_start - s->cs_base);
4a7443be 7404 gen_helper_rdpmc(cpu_env);
df01e0fc 7405 break;
023fe10d 7406 case 0x134: /* sysenter */
2436b61a 7407 /* For Intel SYSENTER is valid on 64-bit */
0af10c86 7408 if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
14ce26e7 7409 goto illegal_op;
023fe10d
FB
7410 if (!s->pe) {
7411 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7412 } else {
728d803b 7413 gen_update_cc_op(s);
14ce26e7 7414 gen_jmp_im(pc_start - s->cs_base);
2999a0b2 7415 gen_helper_sysenter(cpu_env);
023fe10d
FB
7416 gen_eob(s);
7417 }
7418 break;
7419 case 0x135: /* sysexit */
2436b61a 7420 /* For Intel SYSEXIT is valid on 64-bit */
0af10c86 7421 if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
14ce26e7 7422 goto illegal_op;
023fe10d
FB
7423 if (!s->pe) {
7424 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7425 } else {
728d803b 7426 gen_update_cc_op(s);
14ce26e7 7427 gen_jmp_im(pc_start - s->cs_base);
2999a0b2 7428 gen_helper_sysexit(cpu_env, tcg_const_i32(dflag));
023fe10d
FB
7429 gen_eob(s);
7430 }
7431 break;
14ce26e7
FB
7432#ifdef TARGET_X86_64
7433 case 0x105: /* syscall */
7434 /* XXX: is it usable in real mode ? */
728d803b 7435 gen_update_cc_op(s);
14ce26e7 7436 gen_jmp_im(pc_start - s->cs_base);
2999a0b2 7437 gen_helper_syscall(cpu_env, tcg_const_i32(s->pc - pc_start));
14ce26e7
FB
7438 gen_eob(s);
7439 break;
7440 case 0x107: /* sysret */
7441 if (!s->pe) {
7442 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7443 } else {
728d803b 7444 gen_update_cc_op(s);
14ce26e7 7445 gen_jmp_im(pc_start - s->cs_base);
2999a0b2 7446 gen_helper_sysret(cpu_env, tcg_const_i32(s->dflag));
aba9d61e 7447 /* condition codes are modified only in long mode */
3ca51d07
RH
7448 if (s->lma) {
7449 set_cc_op(s, CC_OP_EFLAGS);
7450 }
14ce26e7
FB
7451 gen_eob(s);
7452 }
7453 break;
7454#endif
2c0262af 7455 case 0x1a2: /* cpuid */
773cdfcc 7456 gen_update_cc_op(s);
9575cb94 7457 gen_jmp_im(pc_start - s->cs_base);
4a7443be 7458 gen_helper_cpuid(cpu_env);
2c0262af
FB
7459 break;
7460 case 0xf4: /* hlt */
7461 if (s->cpl != 0) {
7462 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7463 } else {
773cdfcc 7464 gen_update_cc_op(s);
94451178 7465 gen_jmp_im(pc_start - s->cs_base);
4a7443be 7466 gen_helper_hlt(cpu_env, tcg_const_i32(s->pc - pc_start));
5779406a 7467 s->is_jmp = DISAS_TB_JUMP;
2c0262af
FB
7468 }
7469 break;
7470 case 0x100:
0af10c86 7471 modrm = cpu_ldub_code(env, s->pc++);
2c0262af
FB
7472 mod = (modrm >> 6) & 3;
7473 op = (modrm >> 3) & 7;
7474 switch(op) {
7475 case 0: /* sldt */
f115e911
FB
7476 if (!s->pe || s->vm86)
7477 goto illegal_op;
872929aa 7478 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ);
651ba608 7479 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,ldt.selector));
2c0262af
FB
7480 ot = OT_WORD;
7481 if (mod == 3)
7482 ot += s->dflag;
0af10c86 7483 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
2c0262af
FB
7484 break;
7485 case 2: /* lldt */
f115e911
FB
7486 if (!s->pe || s->vm86)
7487 goto illegal_op;
2c0262af
FB
7488 if (s->cpl != 0) {
7489 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7490 } else {
872929aa 7491 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE);
0af10c86 7492 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
14ce26e7 7493 gen_jmp_im(pc_start - s->cs_base);
b6abf97d 7494 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2999a0b2 7495 gen_helper_lldt(cpu_env, cpu_tmp2_i32);
2c0262af
FB
7496 }
7497 break;
7498 case 1: /* str */
f115e911
FB
7499 if (!s->pe || s->vm86)
7500 goto illegal_op;
872929aa 7501 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ);
651ba608 7502 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,tr.selector));
2c0262af
FB
7503 ot = OT_WORD;
7504 if (mod == 3)
7505 ot += s->dflag;
0af10c86 7506 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
2c0262af
FB
7507 break;
7508 case 3: /* ltr */
f115e911
FB
7509 if (!s->pe || s->vm86)
7510 goto illegal_op;
2c0262af
FB
7511 if (s->cpl != 0) {
7512 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7513 } else {
872929aa 7514 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE);
0af10c86 7515 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
14ce26e7 7516 gen_jmp_im(pc_start - s->cs_base);
b6abf97d 7517 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2999a0b2 7518 gen_helper_ltr(cpu_env, cpu_tmp2_i32);
2c0262af
FB
7519 }
7520 break;
7521 case 4: /* verr */
7522 case 5: /* verw */
f115e911
FB
7523 if (!s->pe || s->vm86)
7524 goto illegal_op;
0af10c86 7525 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
773cdfcc 7526 gen_update_cc_op(s);
2999a0b2
BS
7527 if (op == 4) {
7528 gen_helper_verr(cpu_env, cpu_T[0]);
7529 } else {
7530 gen_helper_verw(cpu_env, cpu_T[0]);
7531 }
3ca51d07 7532 set_cc_op(s, CC_OP_EFLAGS);
f115e911 7533 break;
2c0262af
FB
7534 default:
7535 goto illegal_op;
7536 }
7537 break;
7538 case 0x101:
0af10c86 7539 modrm = cpu_ldub_code(env, s->pc++);
2c0262af
FB
7540 mod = (modrm >> 6) & 3;
7541 op = (modrm >> 3) & 7;
3d7374c5 7542 rm = modrm & 7;
2c0262af
FB
7543 switch(op) {
7544 case 0: /* sgdt */
2c0262af
FB
7545 if (mod == 3)
7546 goto illegal_op;
872929aa 7547 gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ);
0af10c86 7548 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
651ba608 7549 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.limit));
57fec1fe 7550 gen_op_st_T0_A0(OT_WORD + s->mem_index);
aba9d61e 7551 gen_add_A0_im(s, 2);
651ba608 7552 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.base));
2c0262af
FB
7553 if (!s->dflag)
7554 gen_op_andl_T0_im(0xffffff);
57fec1fe 7555 gen_op_st_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
2c0262af 7556 break;
3d7374c5
FB
7557 case 1:
7558 if (mod == 3) {
7559 switch (rm) {
7560 case 0: /* monitor */
7561 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
7562 s->cpl != 0)
7563 goto illegal_op;
773cdfcc 7564 gen_update_cc_op(s);
3d7374c5
FB
7565 gen_jmp_im(pc_start - s->cs_base);
7566#ifdef TARGET_X86_64
7567 if (s->aflag == 2) {
bbf662ee 7568 gen_op_movq_A0_reg(R_EAX);
5fafdf24 7569 } else
3d7374c5
FB
7570#endif
7571 {
bbf662ee 7572 gen_op_movl_A0_reg(R_EAX);
3d7374c5
FB
7573 if (s->aflag == 0)
7574 gen_op_andl_A0_ffff();
7575 }
7576 gen_add_A0_ds_seg(s);
4a7443be 7577 gen_helper_monitor(cpu_env, cpu_A0);
3d7374c5
FB
7578 break;
7579 case 1: /* mwait */
7580 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
7581 s->cpl != 0)
7582 goto illegal_op;
728d803b 7583 gen_update_cc_op(s);
94451178 7584 gen_jmp_im(pc_start - s->cs_base);
4a7443be 7585 gen_helper_mwait(cpu_env, tcg_const_i32(s->pc - pc_start));
3d7374c5
FB
7586 gen_eob(s);
7587 break;
a9321a4d
PA
7588 case 2: /* clac */
7589 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) ||
7590 s->cpl != 0) {
7591 goto illegal_op;
7592 }
7593 gen_helper_clac(cpu_env);
7594 gen_jmp_im(s->pc - s->cs_base);
7595 gen_eob(s);
7596 break;
7597 case 3: /* stac */
7598 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) ||
7599 s->cpl != 0) {
7600 goto illegal_op;
7601 }
7602 gen_helper_stac(cpu_env);
7603 gen_jmp_im(s->pc - s->cs_base);
7604 gen_eob(s);
7605 break;
3d7374c5
FB
7606 default:
7607 goto illegal_op;
7608 }
7609 } else { /* sidt */
872929aa 7610 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ);
0af10c86 7611 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
651ba608 7612 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.limit));
57fec1fe 7613 gen_op_st_T0_A0(OT_WORD + s->mem_index);
3d7374c5 7614 gen_add_A0_im(s, 2);
651ba608 7615 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.base));
3d7374c5
FB
7616 if (!s->dflag)
7617 gen_op_andl_T0_im(0xffffff);
57fec1fe 7618 gen_op_st_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
3d7374c5
FB
7619 }
7620 break;
2c0262af
FB
7621 case 2: /* lgdt */
7622 case 3: /* lidt */
0573fbfc 7623 if (mod == 3) {
773cdfcc 7624 gen_update_cc_op(s);
872929aa 7625 gen_jmp_im(pc_start - s->cs_base);
0573fbfc
TS
7626 switch(rm) {
7627 case 0: /* VMRUN */
872929aa
FB
7628 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7629 goto illegal_op;
7630 if (s->cpl != 0) {
7631 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
0573fbfc 7632 break;
872929aa 7633 } else {
052e80d5 7634 gen_helper_vmrun(cpu_env, tcg_const_i32(s->aflag),
a7812ae4 7635 tcg_const_i32(s->pc - pc_start));
db620f46 7636 tcg_gen_exit_tb(0);
5779406a 7637 s->is_jmp = DISAS_TB_JUMP;
872929aa 7638 }
0573fbfc
TS
7639 break;
7640 case 1: /* VMMCALL */
872929aa
FB
7641 if (!(s->flags & HF_SVME_MASK))
7642 goto illegal_op;
052e80d5 7643 gen_helper_vmmcall(cpu_env);
0573fbfc
TS
7644 break;
7645 case 2: /* VMLOAD */
872929aa
FB
7646 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7647 goto illegal_op;
7648 if (s->cpl != 0) {
7649 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7650 break;
7651 } else {
052e80d5 7652 gen_helper_vmload(cpu_env, tcg_const_i32(s->aflag));
872929aa 7653 }
0573fbfc
TS
7654 break;
7655 case 3: /* VMSAVE */
872929aa
FB
7656 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7657 goto illegal_op;
7658 if (s->cpl != 0) {
7659 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7660 break;
7661 } else {
052e80d5 7662 gen_helper_vmsave(cpu_env, tcg_const_i32(s->aflag));
872929aa 7663 }
0573fbfc
TS
7664 break;
7665 case 4: /* STGI */
872929aa
FB
7666 if ((!(s->flags & HF_SVME_MASK) &&
7667 !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
7668 !s->pe)
7669 goto illegal_op;
7670 if (s->cpl != 0) {
7671 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7672 break;
7673 } else {
052e80d5 7674 gen_helper_stgi(cpu_env);
872929aa 7675 }
0573fbfc
TS
7676 break;
7677 case 5: /* CLGI */
872929aa
FB
7678 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7679 goto illegal_op;
7680 if (s->cpl != 0) {
7681 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7682 break;
7683 } else {
052e80d5 7684 gen_helper_clgi(cpu_env);
872929aa 7685 }
0573fbfc
TS
7686 break;
7687 case 6: /* SKINIT */
872929aa
FB
7688 if ((!(s->flags & HF_SVME_MASK) &&
7689 !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
7690 !s->pe)
7691 goto illegal_op;
052e80d5 7692 gen_helper_skinit(cpu_env);
0573fbfc
TS
7693 break;
7694 case 7: /* INVLPGA */
872929aa
FB
7695 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7696 goto illegal_op;
7697 if (s->cpl != 0) {
7698 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7699 break;
7700 } else {
052e80d5 7701 gen_helper_invlpga(cpu_env, tcg_const_i32(s->aflag));
872929aa 7702 }
0573fbfc
TS
7703 break;
7704 default:
7705 goto illegal_op;
7706 }
7707 } else if (s->cpl != 0) {
2c0262af
FB
7708 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7709 } else {
872929aa
FB
7710 gen_svm_check_intercept(s, pc_start,
7711 op==2 ? SVM_EXIT_GDTR_WRITE : SVM_EXIT_IDTR_WRITE);
0af10c86 7712 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
57fec1fe 7713 gen_op_ld_T1_A0(OT_WORD + s->mem_index);
aba9d61e 7714 gen_add_A0_im(s, 2);
57fec1fe 7715 gen_op_ld_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
2c0262af
FB
7716 if (!s->dflag)
7717 gen_op_andl_T0_im(0xffffff);
7718 if (op == 2) {
651ba608
FB
7719 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,gdt.base));
7720 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,gdt.limit));
2c0262af 7721 } else {
651ba608
FB
7722 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,idt.base));
7723 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,idt.limit));
2c0262af
FB
7724 }
7725 }
7726 break;
7727 case 4: /* smsw */
872929aa 7728 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0);
e2542fe2 7729#if defined TARGET_X86_64 && defined HOST_WORDS_BIGENDIAN
f60d2728 7730 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]) + 4);
7731#else
651ba608 7732 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]));
f60d2728 7733#endif
0af10c86 7734 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 1);
2c0262af
FB
7735 break;
7736 case 6: /* lmsw */
7737 if (s->cpl != 0) {
7738 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7739 } else {
872929aa 7740 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
0af10c86 7741 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
4a7443be 7742 gen_helper_lmsw(cpu_env, cpu_T[0]);
14ce26e7 7743 gen_jmp_im(s->pc - s->cs_base);
d71b9a8b 7744 gen_eob(s);
2c0262af
FB
7745 }
7746 break;
1b050077
AP
7747 case 7:
7748 if (mod != 3) { /* invlpg */
7749 if (s->cpl != 0) {
7750 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7751 } else {
773cdfcc 7752 gen_update_cc_op(s);
1b050077 7753 gen_jmp_im(pc_start - s->cs_base);
0af10c86 7754 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4a7443be 7755 gen_helper_invlpg(cpu_env, cpu_A0);
1b050077
AP
7756 gen_jmp_im(s->pc - s->cs_base);
7757 gen_eob(s);
7758 }
2c0262af 7759 } else {
1b050077
AP
7760 switch (rm) {
7761 case 0: /* swapgs */
14ce26e7 7762#ifdef TARGET_X86_64
1b050077
AP
7763 if (CODE64(s)) {
7764 if (s->cpl != 0) {
7765 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7766 } else {
7767 tcg_gen_ld_tl(cpu_T[0], cpu_env,
7768 offsetof(CPUX86State,segs[R_GS].base));
7769 tcg_gen_ld_tl(cpu_T[1], cpu_env,
7770 offsetof(CPUX86State,kernelgsbase));
7771 tcg_gen_st_tl(cpu_T[1], cpu_env,
7772 offsetof(CPUX86State,segs[R_GS].base));
7773 tcg_gen_st_tl(cpu_T[0], cpu_env,
7774 offsetof(CPUX86State,kernelgsbase));
7775 }
5fafdf24 7776 } else
14ce26e7
FB
7777#endif
7778 {
7779 goto illegal_op;
7780 }
1b050077
AP
7781 break;
7782 case 1: /* rdtscp */
7783 if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP))
7784 goto illegal_op;
773cdfcc 7785 gen_update_cc_op(s);
9575cb94 7786 gen_jmp_im(pc_start - s->cs_base);
1b050077
AP
7787 if (use_icount)
7788 gen_io_start();
4a7443be 7789 gen_helper_rdtscp(cpu_env);
1b050077
AP
7790 if (use_icount) {
7791 gen_io_end();
7792 gen_jmp(s, s->pc - s->cs_base);
7793 }
7794 break;
7795 default:
7796 goto illegal_op;
14ce26e7 7797 }
2c0262af
FB
7798 }
7799 break;
7800 default:
7801 goto illegal_op;
7802 }
7803 break;
3415a4dd
FB
7804 case 0x108: /* invd */
7805 case 0x109: /* wbinvd */
7806 if (s->cpl != 0) {
7807 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7808 } else {
872929aa 7809 gen_svm_check_intercept(s, pc_start, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD);
3415a4dd
FB
7810 /* nothing to do */
7811 }
7812 break;
14ce26e7
FB
7813 case 0x63: /* arpl or movslS (x86_64) */
7814#ifdef TARGET_X86_64
7815 if (CODE64(s)) {
7816 int d_ot;
7817 /* d_ot is the size of destination */
7818 d_ot = dflag + OT_WORD;
7819
0af10c86 7820 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7
FB
7821 reg = ((modrm >> 3) & 7) | rex_r;
7822 mod = (modrm >> 6) & 3;
7823 rm = (modrm & 7) | REX_B(s);
3b46e624 7824
14ce26e7 7825 if (mod == 3) {
57fec1fe 7826 gen_op_mov_TN_reg(OT_LONG, 0, rm);
14ce26e7
FB
7827 /* sign extend */
7828 if (d_ot == OT_QUAD)
e108dd01 7829 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
57fec1fe 7830 gen_op_mov_reg_T0(d_ot, reg);
14ce26e7 7831 } else {
0af10c86 7832 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
14ce26e7 7833 if (d_ot == OT_QUAD) {
57fec1fe 7834 gen_op_lds_T0_A0(OT_LONG + s->mem_index);
14ce26e7 7835 } else {
57fec1fe 7836 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
14ce26e7 7837 }
57fec1fe 7838 gen_op_mov_reg_T0(d_ot, reg);
14ce26e7 7839 }
5fafdf24 7840 } else
14ce26e7
FB
7841#endif
7842 {
3bd7da9e 7843 int label1;
49d9fdcc 7844 TCGv t0, t1, t2, a0;
1e4840bf 7845
14ce26e7
FB
7846 if (!s->pe || s->vm86)
7847 goto illegal_op;
a7812ae4
PB
7848 t0 = tcg_temp_local_new();
7849 t1 = tcg_temp_local_new();
7850 t2 = tcg_temp_local_new();
3bd7da9e 7851 ot = OT_WORD;
0af10c86 7852 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7
FB
7853 reg = (modrm >> 3) & 7;
7854 mod = (modrm >> 6) & 3;
7855 rm = modrm & 7;
7856 if (mod != 3) {
0af10c86 7857 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
1e4840bf 7858 gen_op_ld_v(ot + s->mem_index, t0, cpu_A0);
49d9fdcc
LD
7859 a0 = tcg_temp_local_new();
7860 tcg_gen_mov_tl(a0, cpu_A0);
14ce26e7 7861 } else {
1e4840bf 7862 gen_op_mov_v_reg(ot, t0, rm);
49d9fdcc 7863 TCGV_UNUSED(a0);
14ce26e7 7864 }
1e4840bf
FB
7865 gen_op_mov_v_reg(ot, t1, reg);
7866 tcg_gen_andi_tl(cpu_tmp0, t0, 3);
7867 tcg_gen_andi_tl(t1, t1, 3);
7868 tcg_gen_movi_tl(t2, 0);
3bd7da9e 7869 label1 = gen_new_label();
1e4840bf
FB
7870 tcg_gen_brcond_tl(TCG_COND_GE, cpu_tmp0, t1, label1);
7871 tcg_gen_andi_tl(t0, t0, ~3);
7872 tcg_gen_or_tl(t0, t0, t1);
7873 tcg_gen_movi_tl(t2, CC_Z);
3bd7da9e 7874 gen_set_label(label1);
14ce26e7 7875 if (mod != 3) {
49d9fdcc
LD
7876 gen_op_st_v(ot + s->mem_index, t0, a0);
7877 tcg_temp_free(a0);
7878 } else {
1e4840bf 7879 gen_op_mov_reg_v(ot, rm, t0);
14ce26e7 7880 }
d229edce 7881 gen_compute_eflags(s);
3bd7da9e 7882 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z);
1e4840bf 7883 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2);
1e4840bf
FB
7884 tcg_temp_free(t0);
7885 tcg_temp_free(t1);
7886 tcg_temp_free(t2);
f115e911 7887 }
f115e911 7888 break;
2c0262af
FB
7889 case 0x102: /* lar */
7890 case 0x103: /* lsl */
cec6843e
FB
7891 {
7892 int label1;
1e4840bf 7893 TCGv t0;
cec6843e
FB
7894 if (!s->pe || s->vm86)
7895 goto illegal_op;
7896 ot = dflag ? OT_LONG : OT_WORD;
0af10c86 7897 modrm = cpu_ldub_code(env, s->pc++);
cec6843e 7898 reg = ((modrm >> 3) & 7) | rex_r;
0af10c86 7899 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
a7812ae4 7900 t0 = tcg_temp_local_new();
773cdfcc 7901 gen_update_cc_op(s);
2999a0b2
BS
7902 if (b == 0x102) {
7903 gen_helper_lar(t0, cpu_env, cpu_T[0]);
7904 } else {
7905 gen_helper_lsl(t0, cpu_env, cpu_T[0]);
7906 }
cec6843e
FB
7907 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_src, CC_Z);
7908 label1 = gen_new_label();
cb63669a 7909 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
1e4840bf 7910 gen_op_mov_reg_v(ot, reg, t0);
cec6843e 7911 gen_set_label(label1);
3ca51d07 7912 set_cc_op(s, CC_OP_EFLAGS);
1e4840bf 7913 tcg_temp_free(t0);
cec6843e 7914 }
2c0262af
FB
7915 break;
7916 case 0x118:
0af10c86 7917 modrm = cpu_ldub_code(env, s->pc++);
2c0262af
FB
7918 mod = (modrm >> 6) & 3;
7919 op = (modrm >> 3) & 7;
7920 switch(op) {
7921 case 0: /* prefetchnta */
7922 case 1: /* prefetchnt0 */
7923 case 2: /* prefetchnt0 */
7924 case 3: /* prefetchnt0 */
7925 if (mod == 3)
7926 goto illegal_op;
0af10c86 7927 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
2c0262af
FB
7928 /* nothing more to do */
7929 break;
e17a36ce 7930 default: /* nop (multi byte) */
0af10c86 7931 gen_nop_modrm(env, s, modrm);
e17a36ce 7932 break;
2c0262af
FB
7933 }
7934 break;
e17a36ce 7935 case 0x119 ... 0x11f: /* nop (multi byte) */
0af10c86
BS
7936 modrm = cpu_ldub_code(env, s->pc++);
7937 gen_nop_modrm(env, s, modrm);
e17a36ce 7938 break;
2c0262af
FB
7939 case 0x120: /* mov reg, crN */
7940 case 0x122: /* mov crN, reg */
7941 if (s->cpl != 0) {
7942 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7943 } else {
0af10c86 7944 modrm = cpu_ldub_code(env, s->pc++);
5c73b757
MO
7945 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7946 * AMD documentation (24594.pdf) and testing of
7947 * intel 386 and 486 processors all show that the mod bits
7948 * are assumed to be 1's, regardless of actual values.
7949 */
14ce26e7
FB
7950 rm = (modrm & 7) | REX_B(s);
7951 reg = ((modrm >> 3) & 7) | rex_r;
7952 if (CODE64(s))
7953 ot = OT_QUAD;
7954 else
7955 ot = OT_LONG;
ccd59d09
AP
7956 if ((prefixes & PREFIX_LOCK) && (reg == 0) &&
7957 (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) {
7958 reg = 8;
7959 }
2c0262af
FB
7960 switch(reg) {
7961 case 0:
7962 case 2:
7963 case 3:
7964 case 4:
9230e66e 7965 case 8:
773cdfcc 7966 gen_update_cc_op(s);
872929aa 7967 gen_jmp_im(pc_start - s->cs_base);
2c0262af 7968 if (b & 2) {
57fec1fe 7969 gen_op_mov_TN_reg(ot, 0, rm);
4a7443be
BS
7970 gen_helper_write_crN(cpu_env, tcg_const_i32(reg),
7971 cpu_T[0]);
14ce26e7 7972 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
7973 gen_eob(s);
7974 } else {
4a7443be 7975 gen_helper_read_crN(cpu_T[0], cpu_env, tcg_const_i32(reg));
57fec1fe 7976 gen_op_mov_reg_T0(ot, rm);
2c0262af
FB
7977 }
7978 break;
7979 default:
7980 goto illegal_op;
7981 }
7982 }
7983 break;
7984 case 0x121: /* mov reg, drN */
7985 case 0x123: /* mov drN, reg */
7986 if (s->cpl != 0) {
7987 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7988 } else {
0af10c86 7989 modrm = cpu_ldub_code(env, s->pc++);
5c73b757
MO
7990 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7991 * AMD documentation (24594.pdf) and testing of
7992 * intel 386 and 486 processors all show that the mod bits
7993 * are assumed to be 1's, regardless of actual values.
7994 */
14ce26e7
FB
7995 rm = (modrm & 7) | REX_B(s);
7996 reg = ((modrm >> 3) & 7) | rex_r;
7997 if (CODE64(s))
7998 ot = OT_QUAD;
7999 else
8000 ot = OT_LONG;
2c0262af 8001 /* XXX: do it dynamically with CR4.DE bit */
14ce26e7 8002 if (reg == 4 || reg == 5 || reg >= 8)
2c0262af
FB
8003 goto illegal_op;
8004 if (b & 2) {
0573fbfc 8005 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg);
57fec1fe 8006 gen_op_mov_TN_reg(ot, 0, rm);
4a7443be 8007 gen_helper_movl_drN_T0(cpu_env, tcg_const_i32(reg), cpu_T[0]);
14ce26e7 8008 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
8009 gen_eob(s);
8010 } else {
0573fbfc 8011 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg);
651ba608 8012 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,dr[reg]));
57fec1fe 8013 gen_op_mov_reg_T0(ot, rm);
2c0262af
FB
8014 }
8015 }
8016 break;
8017 case 0x106: /* clts */
8018 if (s->cpl != 0) {
8019 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
8020 } else {
0573fbfc 8021 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
f0967a1a 8022 gen_helper_clts(cpu_env);
7eee2a50 8023 /* abort block because static cpu state changed */
14ce26e7 8024 gen_jmp_im(s->pc - s->cs_base);
7eee2a50 8025 gen_eob(s);
2c0262af
FB
8026 }
8027 break;
222a3336 8028 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
664e0f19
FB
8029 case 0x1c3: /* MOVNTI reg, mem */
8030 if (!(s->cpuid_features & CPUID_SSE2))
14ce26e7 8031 goto illegal_op;
664e0f19 8032 ot = s->dflag == 2 ? OT_QUAD : OT_LONG;
0af10c86 8033 modrm = cpu_ldub_code(env, s->pc++);
664e0f19
FB
8034 mod = (modrm >> 6) & 3;
8035 if (mod == 3)
8036 goto illegal_op;
8037 reg = ((modrm >> 3) & 7) | rex_r;
8038 /* generate a generic store */
0af10c86 8039 gen_ldst_modrm(env, s, modrm, ot, reg, 1);
14ce26e7 8040 break;
664e0f19 8041 case 0x1ae:
0af10c86 8042 modrm = cpu_ldub_code(env, s->pc++);
664e0f19
FB
8043 mod = (modrm >> 6) & 3;
8044 op = (modrm >> 3) & 7;
8045 switch(op) {
8046 case 0: /* fxsave */
5fafdf24 8047 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
09d85fb8 8048 (s->prefix & PREFIX_LOCK))
14ce26e7 8049 goto illegal_op;
09d85fb8 8050 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
0fd14b72
FB
8051 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
8052 break;
8053 }
0af10c86 8054 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
773cdfcc 8055 gen_update_cc_op(s);
19e6c4b8 8056 gen_jmp_im(pc_start - s->cs_base);
d3eb5eae 8057 gen_helper_fxsave(cpu_env, cpu_A0, tcg_const_i32((s->dflag == 2)));
664e0f19
FB
8058 break;
8059 case 1: /* fxrstor */
5fafdf24 8060 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
09d85fb8 8061 (s->prefix & PREFIX_LOCK))
14ce26e7 8062 goto illegal_op;
09d85fb8 8063 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
0fd14b72
FB
8064 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
8065 break;
8066 }
0af10c86 8067 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
773cdfcc 8068 gen_update_cc_op(s);
19e6c4b8 8069 gen_jmp_im(pc_start - s->cs_base);
d3eb5eae
BS
8070 gen_helper_fxrstor(cpu_env, cpu_A0,
8071 tcg_const_i32((s->dflag == 2)));
664e0f19
FB
8072 break;
8073 case 2: /* ldmxcsr */
8074 case 3: /* stmxcsr */
8075 if (s->flags & HF_TS_MASK) {
8076 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
8077 break;
14ce26e7 8078 }
664e0f19
FB
8079 if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK) ||
8080 mod == 3)
14ce26e7 8081 goto illegal_op;
0af10c86 8082 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
664e0f19 8083 if (op == 2) {
57fec1fe 8084 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
20f8bd48 8085 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
d3eb5eae 8086 gen_helper_ldmxcsr(cpu_env, cpu_tmp2_i32);
14ce26e7 8087 } else {
651ba608 8088 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, mxcsr));
57fec1fe 8089 gen_op_st_T0_A0(OT_LONG + s->mem_index);
14ce26e7 8090 }
664e0f19
FB
8091 break;
8092 case 5: /* lfence */
8093 case 6: /* mfence */
8001c294 8094 if ((modrm & 0xc7) != 0xc0 || !(s->cpuid_features & CPUID_SSE2))
664e0f19
FB
8095 goto illegal_op;
8096 break;
8f091a59
FB
8097 case 7: /* sfence / clflush */
8098 if ((modrm & 0xc7) == 0xc0) {
8099 /* sfence */
a35f3ec7 8100 /* XXX: also check for cpuid_ext2_features & CPUID_EXT2_EMMX */
8f091a59
FB
8101 if (!(s->cpuid_features & CPUID_SSE))
8102 goto illegal_op;
8103 } else {
8104 /* clflush */
8105 if (!(s->cpuid_features & CPUID_CLFLUSH))
8106 goto illegal_op;
0af10c86 8107 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
8f091a59
FB
8108 }
8109 break;
664e0f19 8110 default:
14ce26e7
FB
8111 goto illegal_op;
8112 }
8113 break;
a35f3ec7 8114 case 0x10d: /* 3DNow! prefetch(w) */
0af10c86 8115 modrm = cpu_ldub_code(env, s->pc++);
a35f3ec7
AJ
8116 mod = (modrm >> 6) & 3;
8117 if (mod == 3)
8118 goto illegal_op;
0af10c86 8119 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
8f091a59
FB
8120 /* ignore for now */
8121 break;
3b21e03e 8122 case 0x1aa: /* rsm */
872929aa 8123 gen_svm_check_intercept(s, pc_start, SVM_EXIT_RSM);
3b21e03e
FB
8124 if (!(s->flags & HF_SMM_MASK))
8125 goto illegal_op;
728d803b 8126 gen_update_cc_op(s);
3b21e03e 8127 gen_jmp_im(s->pc - s->cs_base);
608badfc 8128 gen_helper_rsm(cpu_env);
3b21e03e
FB
8129 gen_eob(s);
8130 break;
222a3336
AZ
8131 case 0x1b8: /* SSE4.2 popcnt */
8132 if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) !=
8133 PREFIX_REPZ)
8134 goto illegal_op;
8135 if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
8136 goto illegal_op;
8137
0af10c86 8138 modrm = cpu_ldub_code(env, s->pc++);
8b4a3df8 8139 reg = ((modrm >> 3) & 7) | rex_r;
222a3336
AZ
8140
8141 if (s->prefix & PREFIX_DATA)
8142 ot = OT_WORD;
8143 else if (s->dflag != 2)
8144 ot = OT_LONG;
8145 else
8146 ot = OT_QUAD;
8147
0af10c86 8148 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
d3eb5eae 8149 gen_helper_popcnt(cpu_T[0], cpu_env, cpu_T[0], tcg_const_i32(ot));
222a3336 8150 gen_op_mov_reg_T0(ot, reg);
fdb0d09d 8151
3ca51d07 8152 set_cc_op(s, CC_OP_EFLAGS);
222a3336 8153 break;
a35f3ec7
AJ
8154 case 0x10e ... 0x10f:
8155 /* 3DNow! instructions, ignore prefixes */
8156 s->prefix &= ~(PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA);
664e0f19
FB
8157 case 0x110 ... 0x117:
8158 case 0x128 ... 0x12f:
4242b1bd 8159 case 0x138 ... 0x13a:
d9f4bb27 8160 case 0x150 ... 0x179:
664e0f19
FB
8161 case 0x17c ... 0x17f:
8162 case 0x1c2:
8163 case 0x1c4 ... 0x1c6:
8164 case 0x1d0 ... 0x1fe:
0af10c86 8165 gen_sse(env, s, b, pc_start, rex_r);
664e0f19 8166 break;
2c0262af
FB
8167 default:
8168 goto illegal_op;
8169 }
8170 /* lock generation */
8171 if (s->prefix & PREFIX_LOCK)
a7812ae4 8172 gen_helper_unlock();
2c0262af
FB
8173 return s->pc;
8174 illegal_op:
ab1f142b 8175 if (s->prefix & PREFIX_LOCK)
a7812ae4 8176 gen_helper_unlock();
2c0262af
FB
8177 /* XXX: ensure that no lock was generated */
8178 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
8179 return s->pc;
8180}
8181
2c0262af
FB
8182void optimize_flags_init(void)
8183{
a7812ae4
PB
8184 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
8185 cpu_cc_op = tcg_global_mem_new_i32(TCG_AREG0,
317ac620 8186 offsetof(CPUX86State, cc_op), "cc_op");
317ac620 8187 cpu_cc_dst = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_dst),
a7812ae4 8188 "cc_dst");
a3251186
RH
8189 cpu_cc_src = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_src),
8190 "cc_src");
988c3eb0
RH
8191 cpu_cc_src2 = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_src2),
8192 "cc_src2");
437a88a5 8193
cc739bb0
LD
8194#ifdef TARGET_X86_64
8195 cpu_regs[R_EAX] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8196 offsetof(CPUX86State, regs[R_EAX]), "rax");
cc739bb0 8197 cpu_regs[R_ECX] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8198 offsetof(CPUX86State, regs[R_ECX]), "rcx");
cc739bb0 8199 cpu_regs[R_EDX] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8200 offsetof(CPUX86State, regs[R_EDX]), "rdx");
cc739bb0 8201 cpu_regs[R_EBX] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8202 offsetof(CPUX86State, regs[R_EBX]), "rbx");
cc739bb0 8203 cpu_regs[R_ESP] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8204 offsetof(CPUX86State, regs[R_ESP]), "rsp");
cc739bb0 8205 cpu_regs[R_EBP] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8206 offsetof(CPUX86State, regs[R_EBP]), "rbp");
cc739bb0 8207 cpu_regs[R_ESI] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8208 offsetof(CPUX86State, regs[R_ESI]), "rsi");
cc739bb0 8209 cpu_regs[R_EDI] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8210 offsetof(CPUX86State, regs[R_EDI]), "rdi");
cc739bb0 8211 cpu_regs[8] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8212 offsetof(CPUX86State, regs[8]), "r8");
cc739bb0 8213 cpu_regs[9] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8214 offsetof(CPUX86State, regs[9]), "r9");
cc739bb0 8215 cpu_regs[10] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8216 offsetof(CPUX86State, regs[10]), "r10");
cc739bb0 8217 cpu_regs[11] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8218 offsetof(CPUX86State, regs[11]), "r11");
cc739bb0 8219 cpu_regs[12] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8220 offsetof(CPUX86State, regs[12]), "r12");
cc739bb0 8221 cpu_regs[13] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8222 offsetof(CPUX86State, regs[13]), "r13");
cc739bb0 8223 cpu_regs[14] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8224 offsetof(CPUX86State, regs[14]), "r14");
cc739bb0 8225 cpu_regs[15] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8226 offsetof(CPUX86State, regs[15]), "r15");
cc739bb0
LD
8227#else
8228 cpu_regs[R_EAX] = tcg_global_mem_new_i32(TCG_AREG0,
317ac620 8229 offsetof(CPUX86State, regs[R_EAX]), "eax");
cc739bb0 8230 cpu_regs[R_ECX] = tcg_global_mem_new_i32(TCG_AREG0,
317ac620 8231 offsetof(CPUX86State, regs[R_ECX]), "ecx");
cc739bb0 8232 cpu_regs[R_EDX] = tcg_global_mem_new_i32(TCG_AREG0,
317ac620 8233 offsetof(CPUX86State, regs[R_EDX]), "edx");
cc739bb0 8234 cpu_regs[R_EBX] = tcg_global_mem_new_i32(TCG_AREG0,
317ac620 8235 offsetof(CPUX86State, regs[R_EBX]), "ebx");
cc739bb0 8236 cpu_regs[R_ESP] = tcg_global_mem_new_i32(TCG_AREG0,
317ac620 8237 offsetof(CPUX86State, regs[R_ESP]), "esp");
cc739bb0 8238 cpu_regs[R_EBP] = tcg_global_mem_new_i32(TCG_AREG0,
317ac620 8239 offsetof(CPUX86State, regs[R_EBP]), "ebp");
cc739bb0 8240 cpu_regs[R_ESI] = tcg_global_mem_new_i32(TCG_AREG0,
317ac620 8241 offsetof(CPUX86State, regs[R_ESI]), "esi");
cc739bb0 8242 cpu_regs[R_EDI] = tcg_global_mem_new_i32(TCG_AREG0,
317ac620 8243 offsetof(CPUX86State, regs[R_EDI]), "edi");
cc739bb0
LD
8244#endif
8245
437a88a5 8246 /* register helpers */
a7812ae4 8247#define GEN_HELPER 2
437a88a5 8248#include "helper.h"
2c0262af
FB
8249}
8250
8251/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
8252 basic block 'tb'. If search_pc is TRUE, also generate PC
8253 information for each intermediate instruction. */
467215c2 8254static inline void gen_intermediate_code_internal(X86CPU *cpu,
2cfc5f17 8255 TranslationBlock *tb,
467215c2 8256 bool search_pc)
2c0262af 8257{
ed2803da 8258 CPUState *cs = CPU(cpu);
467215c2 8259 CPUX86State *env = &cpu->env;
2c0262af 8260 DisasContext dc1, *dc = &dc1;
14ce26e7 8261 target_ulong pc_ptr;
2c0262af 8262 uint16_t *gen_opc_end;
a1d1bb31 8263 CPUBreakpoint *bp;
7f5b7d3e 8264 int j, lj;
c068688b 8265 uint64_t flags;
14ce26e7
FB
8266 target_ulong pc_start;
8267 target_ulong cs_base;
2e70f6ef
PB
8268 int num_insns;
8269 int max_insns;
3b46e624 8270
2c0262af 8271 /* generate intermediate code */
14ce26e7
FB
8272 pc_start = tb->pc;
8273 cs_base = tb->cs_base;
2c0262af 8274 flags = tb->flags;
3a1d9b8b 8275
4f31916f 8276 dc->pe = (flags >> HF_PE_SHIFT) & 1;
2c0262af
FB
8277 dc->code32 = (flags >> HF_CS32_SHIFT) & 1;
8278 dc->ss32 = (flags >> HF_SS32_SHIFT) & 1;
8279 dc->addseg = (flags >> HF_ADDSEG_SHIFT) & 1;
8280 dc->f_st = 0;
8281 dc->vm86 = (flags >> VM_SHIFT) & 1;
8282 dc->cpl = (flags >> HF_CPL_SHIFT) & 3;
8283 dc->iopl = (flags >> IOPL_SHIFT) & 3;
8284 dc->tf = (flags >> TF_SHIFT) & 1;
ed2803da 8285 dc->singlestep_enabled = cs->singlestep_enabled;
2c0262af 8286 dc->cc_op = CC_OP_DYNAMIC;
e207582f 8287 dc->cc_op_dirty = false;
2c0262af
FB
8288 dc->cs_base = cs_base;
8289 dc->tb = tb;
8290 dc->popl_esp_hack = 0;
8291 /* select memory access functions */
8292 dc->mem_index = 0;
8293 if (flags & HF_SOFTMMU_MASK) {
a9321a4d 8294 dc->mem_index = (cpu_mmu_index(env) + 1) << 2;
2c0262af 8295 }
0514ef2f
EH
8296 dc->cpuid_features = env->features[FEAT_1_EDX];
8297 dc->cpuid_ext_features = env->features[FEAT_1_ECX];
8298 dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX];
8299 dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX];
8300 dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX];
14ce26e7
FB
8301#ifdef TARGET_X86_64
8302 dc->lma = (flags >> HF_LMA_SHIFT) & 1;
8303 dc->code64 = (flags >> HF_CS64_SHIFT) & 1;
8304#endif
7eee2a50 8305 dc->flags = flags;
ed2803da 8306 dc->jmp_opt = !(dc->tf || cs->singlestep_enabled ||
a2cc3b24 8307 (flags & HF_INHIBIT_IRQ_MASK)
415fa2ea 8308#ifndef CONFIG_SOFTMMU
2c0262af
FB
8309 || (flags & HF_SOFTMMU_MASK)
8310#endif
8311 );
4f31916f
FB
8312#if 0
8313 /* check addseg logic */
dc196a57 8314 if (!dc->addseg && (dc->vm86 || !dc->pe || !dc->code32))
4f31916f
FB
8315 printf("ERROR addseg\n");
8316#endif
8317
a7812ae4
PB
8318 cpu_T[0] = tcg_temp_new();
8319 cpu_T[1] = tcg_temp_new();
8320 cpu_A0 = tcg_temp_new();
a7812ae4
PB
8321
8322 cpu_tmp0 = tcg_temp_new();
8323 cpu_tmp1_i64 = tcg_temp_new_i64();
8324 cpu_tmp2_i32 = tcg_temp_new_i32();
8325 cpu_tmp3_i32 = tcg_temp_new_i32();
8326 cpu_tmp4 = tcg_temp_new();
a7812ae4
PB
8327 cpu_ptr0 = tcg_temp_new_ptr();
8328 cpu_ptr1 = tcg_temp_new_ptr();
a3251186 8329 cpu_cc_srcT = tcg_temp_local_new();
57fec1fe 8330
92414b31 8331 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
8332
8333 dc->is_jmp = DISAS_NEXT;
8334 pc_ptr = pc_start;
8335 lj = -1;
2e70f6ef
PB
8336 num_insns = 0;
8337 max_insns = tb->cflags & CF_COUNT_MASK;
8338 if (max_insns == 0)
8339 max_insns = CF_COUNT_MASK;
2c0262af 8340
806f352d 8341 gen_tb_start();
2c0262af 8342 for(;;) {
72cf2d4f
BS
8343 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
8344 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a2397807
JK
8345 if (bp->pc == pc_ptr &&
8346 !((bp->flags & BP_CPU) && (tb->flags & HF_RF_MASK))) {
2c0262af
FB
8347 gen_debug(dc, pc_ptr - dc->cs_base);
8348 break;
8349 }
8350 }
8351 }
8352 if (search_pc) {
92414b31 8353 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
2c0262af
FB
8354 if (lj < j) {
8355 lj++;
8356 while (lj < j)
ab1103de 8357 tcg_ctx.gen_opc_instr_start[lj++] = 0;
2c0262af 8358 }
25983cad 8359 tcg_ctx.gen_opc_pc[lj] = pc_ptr;
2c0262af 8360 gen_opc_cc_op[lj] = dc->cc_op;
ab1103de 8361 tcg_ctx.gen_opc_instr_start[lj] = 1;
c9c99c22 8362 tcg_ctx.gen_opc_icount[lj] = num_insns;
2c0262af 8363 }
2e70f6ef
PB
8364 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
8365 gen_io_start();
8366
0af10c86 8367 pc_ptr = disas_insn(env, dc, pc_ptr);
2e70f6ef 8368 num_insns++;
2c0262af
FB
8369 /* stop translation if indicated */
8370 if (dc->is_jmp)
8371 break;
8372 /* if single step mode, we generate only one instruction and
8373 generate an exception */
a2cc3b24
FB
8374 /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
8375 the flag and abort the translation to give the irqs a
8376 change to be happen */
5fafdf24 8377 if (dc->tf || dc->singlestep_enabled ||
2e70f6ef 8378 (flags & HF_INHIBIT_IRQ_MASK)) {
14ce26e7 8379 gen_jmp_im(pc_ptr - dc->cs_base);
2c0262af
FB
8380 gen_eob(dc);
8381 break;
8382 }
8383 /* if too long translation, stop generation too */
efd7f486 8384 if (tcg_ctx.gen_opc_ptr >= gen_opc_end ||
2e70f6ef
PB
8385 (pc_ptr - pc_start) >= (TARGET_PAGE_SIZE - 32) ||
8386 num_insns >= max_insns) {
14ce26e7 8387 gen_jmp_im(pc_ptr - dc->cs_base);
2c0262af
FB
8388 gen_eob(dc);
8389 break;
8390 }
1b530a6d
AJ
8391 if (singlestep) {
8392 gen_jmp_im(pc_ptr - dc->cs_base);
8393 gen_eob(dc);
8394 break;
8395 }
2c0262af 8396 }
2e70f6ef
PB
8397 if (tb->cflags & CF_LAST_IO)
8398 gen_io_end();
806f352d 8399 gen_tb_end(tb, num_insns);
efd7f486 8400 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
2c0262af
FB
8401 /* we don't forget to fill the last values */
8402 if (search_pc) {
92414b31 8403 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
2c0262af
FB
8404 lj++;
8405 while (lj <= j)
ab1103de 8406 tcg_ctx.gen_opc_instr_start[lj++] = 0;
2c0262af 8407 }
3b46e624 8408
2c0262af 8409#ifdef DEBUG_DISAS
8fec2b8c 8410 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
14ce26e7 8411 int disas_flags;
93fcfe39
AL
8412 qemu_log("----------------\n");
8413 qemu_log("IN: %s\n", lookup_symbol(pc_start));
14ce26e7
FB
8414#ifdef TARGET_X86_64
8415 if (dc->code64)
8416 disas_flags = 2;
8417 else
8418#endif
8419 disas_flags = !dc->code32;
f4359b9f 8420 log_target_disas(env, pc_start, pc_ptr - pc_start, disas_flags);
93fcfe39 8421 qemu_log("\n");
2c0262af
FB
8422 }
8423#endif
8424
2e70f6ef 8425 if (!search_pc) {
2c0262af 8426 tb->size = pc_ptr - pc_start;
2e70f6ef
PB
8427 tb->icount = num_insns;
8428 }
2c0262af
FB
8429}
8430
317ac620 8431void gen_intermediate_code(CPUX86State *env, TranslationBlock *tb)
2c0262af 8432{
467215c2 8433 gen_intermediate_code_internal(x86_env_get_cpu(env), tb, false);
2c0262af
FB
8434}
8435
317ac620 8436void gen_intermediate_code_pc(CPUX86State *env, TranslationBlock *tb)
2c0262af 8437{
467215c2 8438 gen_intermediate_code_internal(x86_env_get_cpu(env), tb, true);
2c0262af
FB
8439}
8440
317ac620 8441void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb, int pc_pos)
d2856f1a
AJ
8442{
8443 int cc_op;
8444#ifdef DEBUG_DISAS
8fec2b8c 8445 if (qemu_loglevel_mask(CPU_LOG_TB_OP)) {
d2856f1a 8446 int i;
93fcfe39 8447 qemu_log("RESTORE:\n");
d2856f1a 8448 for(i = 0;i <= pc_pos; i++) {
ab1103de 8449 if (tcg_ctx.gen_opc_instr_start[i]) {
25983cad
EV
8450 qemu_log("0x%04x: " TARGET_FMT_lx "\n", i,
8451 tcg_ctx.gen_opc_pc[i]);
d2856f1a
AJ
8452 }
8453 }
e87b7cb0 8454 qemu_log("pc_pos=0x%x eip=" TARGET_FMT_lx " cs_base=%x\n",
25983cad 8455 pc_pos, tcg_ctx.gen_opc_pc[pc_pos] - tb->cs_base,
d2856f1a
AJ
8456 (uint32_t)tb->cs_base);
8457 }
8458#endif
25983cad 8459 env->eip = tcg_ctx.gen_opc_pc[pc_pos] - tb->cs_base;
d2856f1a
AJ
8460 cc_op = gen_opc_cc_op[pc_pos];
8461 if (cc_op != CC_OP_DYNAMIC)
8462 env->cc_op = cc_op;
8463}