]> git.proxmox.com Git - mirror_qemu.git/blame - target-i386/translate.c
aes: make Td[0-5] and Te[0-5] tables non static
[mirror_qemu.git] / target-i386 / translate.c
CommitLineData
2c0262af
FB
1/*
2 * i386 translation
5fafdf24 3 *
2c0262af
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
18 */
19#include <stdarg.h>
20#include <stdlib.h>
21#include <stdio.h>
22#include <string.h>
23#include <inttypes.h>
24#include <signal.h>
2c0262af 25
bec93d72 26#include "qemu/host-utils.h"
2c0262af 27#include "cpu.h"
76cad711 28#include "disas/disas.h"
57fec1fe 29#include "tcg-op.h"
2c0262af 30
a7812ae4
PB
31#include "helper.h"
32#define GEN_HELPER 1
33#include "helper.h"
34
2c0262af
FB
35#define PREFIX_REPZ 0x01
36#define PREFIX_REPNZ 0x02
37#define PREFIX_LOCK 0x04
38#define PREFIX_DATA 0x08
39#define PREFIX_ADR 0x10
701ed211 40#define PREFIX_VEX 0x20
2c0262af 41
14ce26e7 42#ifdef TARGET_X86_64
14ce26e7
FB
43#define CODE64(s) ((s)->code64)
44#define REX_X(s) ((s)->rex_x)
45#define REX_B(s) ((s)->rex_b)
14ce26e7 46#else
14ce26e7
FB
47#define CODE64(s) 0
48#define REX_X(s) 0
49#define REX_B(s) 0
50#endif
51
bec93d72
RH
52#ifdef TARGET_X86_64
53# define ctztl ctz64
54# define clztl clz64
55#else
56# define ctztl ctz32
57# define clztl clz32
58#endif
59
57fec1fe
FB
60//#define MACRO_TEST 1
61
57fec1fe 62/* global register indexes */
a7812ae4 63static TCGv_ptr cpu_env;
a3251186 64static TCGv cpu_A0;
988c3eb0 65static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2, cpu_cc_srcT;
a7812ae4 66static TCGv_i32 cpu_cc_op;
cc739bb0 67static TCGv cpu_regs[CPU_NB_REGS];
1e4840bf 68/* local temps */
3b9d3cf1 69static TCGv cpu_T[2];
57fec1fe 70/* local register indexes (only used inside old micro ops) */
a7812ae4
PB
71static TCGv cpu_tmp0, cpu_tmp4;
72static TCGv_ptr cpu_ptr0, cpu_ptr1;
73static TCGv_i32 cpu_tmp2_i32, cpu_tmp3_i32;
74static TCGv_i64 cpu_tmp1_i64;
57fec1fe 75
1a7ff922
PB
76static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
77
022c62cb 78#include "exec/gen-icount.h"
2e70f6ef 79
57fec1fe
FB
80#ifdef TARGET_X86_64
81static int x86_64_hregs;
ae063a68
FB
82#endif
83
2c0262af
FB
84typedef struct DisasContext {
85 /* current insn context */
86 int override; /* -1 if no override */
87 int prefix;
88 int aflag, dflag;
14ce26e7 89 target_ulong pc; /* pc = eip + cs_base */
2c0262af
FB
90 int is_jmp; /* 1 = means jump (stop translation), 2 means CPU
91 static state change (stop translation) */
92 /* current block context */
14ce26e7 93 target_ulong cs_base; /* base of CS segment */
2c0262af
FB
94 int pe; /* protected mode */
95 int code32; /* 32 bit code segment */
14ce26e7
FB
96#ifdef TARGET_X86_64
97 int lma; /* long mode active */
98 int code64; /* 64 bit code segment */
99 int rex_x, rex_b;
100#endif
701ed211
RH
101 int vex_l; /* vex vector length */
102 int vex_v; /* vex vvvv register, without 1's compliment. */
2c0262af 103 int ss32; /* 32 bit stack segment */
fee71888 104 CCOp cc_op; /* current CC operation */
e207582f 105 bool cc_op_dirty;
2c0262af
FB
106 int addseg; /* non zero if either DS/ES/SS have a non zero base */
107 int f_st; /* currently unused */
108 int vm86; /* vm86 mode */
109 int cpl;
110 int iopl;
111 int tf; /* TF cpu flag */
34865134 112 int singlestep_enabled; /* "hardware" single step enabled */
2c0262af
FB
113 int jmp_opt; /* use direct block chaining for direct jumps */
114 int mem_index; /* select memory access functions */
c068688b 115 uint64_t flags; /* all execution flags */
2c0262af
FB
116 struct TranslationBlock *tb;
117 int popl_esp_hack; /* for correct popl with esp base handling */
14ce26e7
FB
118 int rip_offset; /* only used in x86_64, but left for simplicity */
119 int cpuid_features;
3d7374c5 120 int cpuid_ext_features;
e771edab 121 int cpuid_ext2_features;
12e26b75 122 int cpuid_ext3_features;
a9321a4d 123 int cpuid_7_0_ebx_features;
2c0262af
FB
124} DisasContext;
125
126static void gen_eob(DisasContext *s);
14ce26e7
FB
127static void gen_jmp(DisasContext *s, target_ulong eip);
128static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num);
63633fe6 129static void gen_op(DisasContext *s1, int op, int ot, int d);
2c0262af
FB
130
131/* i386 arith/logic operations */
132enum {
5fafdf24
TS
133 OP_ADDL,
134 OP_ORL,
135 OP_ADCL,
2c0262af 136 OP_SBBL,
5fafdf24
TS
137 OP_ANDL,
138 OP_SUBL,
139 OP_XORL,
2c0262af
FB
140 OP_CMPL,
141};
142
143/* i386 shift ops */
144enum {
5fafdf24
TS
145 OP_ROL,
146 OP_ROR,
147 OP_RCL,
148 OP_RCR,
149 OP_SHL,
150 OP_SHR,
2c0262af
FB
151 OP_SHL1, /* undocumented */
152 OP_SAR = 7,
153};
154
8e1c85e3
FB
155enum {
156 JCC_O,
157 JCC_B,
158 JCC_Z,
159 JCC_BE,
160 JCC_S,
161 JCC_P,
162 JCC_L,
163 JCC_LE,
164};
165
2c0262af
FB
166/* operand size */
167enum {
168 OT_BYTE = 0,
169 OT_WORD,
5fafdf24 170 OT_LONG,
2c0262af
FB
171 OT_QUAD,
172};
173
174enum {
175 /* I386 int registers */
176 OR_EAX, /* MUST be even numbered */
177 OR_ECX,
178 OR_EDX,
179 OR_EBX,
180 OR_ESP,
181 OR_EBP,
182 OR_ESI,
183 OR_EDI,
14ce26e7
FB
184
185 OR_TMP0 = 16, /* temporary operand register */
2c0262af
FB
186 OR_TMP1,
187 OR_A0, /* temporary register used when doing address evaluation */
2c0262af
FB
188};
189
b666265b 190enum {
a3251186
RH
191 USES_CC_DST = 1,
192 USES_CC_SRC = 2,
988c3eb0
RH
193 USES_CC_SRC2 = 4,
194 USES_CC_SRCT = 8,
b666265b
RH
195};
196
197/* Bit set if the global variable is live after setting CC_OP to X. */
198static const uint8_t cc_op_live[CC_OP_NB] = {
988c3eb0 199 [CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
b666265b
RH
200 [CC_OP_EFLAGS] = USES_CC_SRC,
201 [CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC,
202 [CC_OP_ADDB ... CC_OP_ADDQ] = USES_CC_DST | USES_CC_SRC,
988c3eb0 203 [CC_OP_ADCB ... CC_OP_ADCQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
a3251186 204 [CC_OP_SUBB ... CC_OP_SUBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT,
988c3eb0 205 [CC_OP_SBBB ... CC_OP_SBBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
b666265b
RH
206 [CC_OP_LOGICB ... CC_OP_LOGICQ] = USES_CC_DST,
207 [CC_OP_INCB ... CC_OP_INCQ] = USES_CC_DST | USES_CC_SRC,
208 [CC_OP_DECB ... CC_OP_DECQ] = USES_CC_DST | USES_CC_SRC,
209 [CC_OP_SHLB ... CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC,
210 [CC_OP_SARB ... CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC,
bc4b43dc 211 [CC_OP_BMILGB ... CC_OP_BMILGQ] = USES_CC_DST | USES_CC_SRC,
cd7f97ca
RH
212 [CC_OP_ADCX] = USES_CC_DST | USES_CC_SRC,
213 [CC_OP_ADOX] = USES_CC_SRC | USES_CC_SRC2,
214 [CC_OP_ADCOX] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
436ff2d2 215 [CC_OP_CLR] = 0,
b666265b
RH
216};
217
e207582f 218static void set_cc_op(DisasContext *s, CCOp op)
3ca51d07 219{
b666265b
RH
220 int dead;
221
222 if (s->cc_op == op) {
223 return;
224 }
225
226 /* Discard CC computation that will no longer be used. */
227 dead = cc_op_live[s->cc_op] & ~cc_op_live[op];
228 if (dead & USES_CC_DST) {
229 tcg_gen_discard_tl(cpu_cc_dst);
e207582f 230 }
b666265b
RH
231 if (dead & USES_CC_SRC) {
232 tcg_gen_discard_tl(cpu_cc_src);
233 }
988c3eb0
RH
234 if (dead & USES_CC_SRC2) {
235 tcg_gen_discard_tl(cpu_cc_src2);
236 }
a3251186
RH
237 if (dead & USES_CC_SRCT) {
238 tcg_gen_discard_tl(cpu_cc_srcT);
239 }
b666265b 240
e2f515cf
RH
241 if (op == CC_OP_DYNAMIC) {
242 /* The DYNAMIC setting is translator only, and should never be
243 stored. Thus we always consider it clean. */
244 s->cc_op_dirty = false;
245 } else {
246 /* Discard any computed CC_OP value (see shifts). */
247 if (s->cc_op == CC_OP_DYNAMIC) {
248 tcg_gen_discard_i32(cpu_cc_op);
249 }
250 s->cc_op_dirty = true;
251 }
b666265b 252 s->cc_op = op;
e207582f
RH
253}
254
e207582f
RH
255static void gen_update_cc_op(DisasContext *s)
256{
257 if (s->cc_op_dirty) {
773cdfcc 258 tcg_gen_movi_i32(cpu_cc_op, s->cc_op);
e207582f
RH
259 s->cc_op_dirty = false;
260 }
3ca51d07
RH
261}
262
57fec1fe
FB
263static inline void gen_op_movl_T0_0(void)
264{
265 tcg_gen_movi_tl(cpu_T[0], 0);
266}
267
268static inline void gen_op_movl_T0_im(int32_t val)
269{
270 tcg_gen_movi_tl(cpu_T[0], val);
271}
272
273static inline void gen_op_movl_T0_imu(uint32_t val)
274{
275 tcg_gen_movi_tl(cpu_T[0], val);
276}
277
278static inline void gen_op_movl_T1_im(int32_t val)
279{
280 tcg_gen_movi_tl(cpu_T[1], val);
281}
282
283static inline void gen_op_movl_T1_imu(uint32_t val)
284{
285 tcg_gen_movi_tl(cpu_T[1], val);
286}
287
288static inline void gen_op_movl_A0_im(uint32_t val)
289{
290 tcg_gen_movi_tl(cpu_A0, val);
291}
292
293#ifdef TARGET_X86_64
294static inline void gen_op_movq_A0_im(int64_t val)
295{
296 tcg_gen_movi_tl(cpu_A0, val);
297}
298#endif
299
300static inline void gen_movtl_T0_im(target_ulong val)
301{
302 tcg_gen_movi_tl(cpu_T[0], val);
303}
304
305static inline void gen_movtl_T1_im(target_ulong val)
306{
307 tcg_gen_movi_tl(cpu_T[1], val);
308}
309
310static inline void gen_op_andl_T0_ffff(void)
311{
312 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
313}
314
315static inline void gen_op_andl_T0_im(uint32_t val)
316{
317 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], val);
318}
319
320static inline void gen_op_movl_T0_T1(void)
321{
322 tcg_gen_mov_tl(cpu_T[0], cpu_T[1]);
323}
324
325static inline void gen_op_andl_A0_ffff(void)
326{
327 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffff);
328}
329
14ce26e7
FB
330#ifdef TARGET_X86_64
331
332#define NB_OP_SIZES 4
333
14ce26e7
FB
334#else /* !TARGET_X86_64 */
335
336#define NB_OP_SIZES 3
337
14ce26e7
FB
338#endif /* !TARGET_X86_64 */
339
e2542fe2 340#if defined(HOST_WORDS_BIGENDIAN)
57fec1fe
FB
341#define REG_B_OFFSET (sizeof(target_ulong) - 1)
342#define REG_H_OFFSET (sizeof(target_ulong) - 2)
343#define REG_W_OFFSET (sizeof(target_ulong) - 2)
344#define REG_L_OFFSET (sizeof(target_ulong) - 4)
345#define REG_LH_OFFSET (sizeof(target_ulong) - 8)
14ce26e7 346#else
57fec1fe
FB
347#define REG_B_OFFSET 0
348#define REG_H_OFFSET 1
349#define REG_W_OFFSET 0
350#define REG_L_OFFSET 0
351#define REG_LH_OFFSET 4
14ce26e7 352#endif
57fec1fe 353
96d7073f
PM
354/* In instruction encodings for byte register accesses the
355 * register number usually indicates "low 8 bits of register N";
356 * however there are some special cases where N 4..7 indicates
357 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
358 * true for this special case, false otherwise.
359 */
360static inline bool byte_reg_is_xH(int reg)
361{
362 if (reg < 4) {
363 return false;
364 }
365#ifdef TARGET_X86_64
366 if (reg >= 8 || x86_64_hregs) {
367 return false;
368 }
369#endif
370 return true;
371}
372
1e4840bf 373static inline void gen_op_mov_reg_v(int ot, int reg, TCGv t0)
57fec1fe
FB
374{
375 switch(ot) {
376 case OT_BYTE:
96d7073f 377 if (!byte_reg_is_xH(reg)) {
c832e3de 378 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 8);
57fec1fe 379 } else {
c832e3de 380 tcg_gen_deposit_tl(cpu_regs[reg - 4], cpu_regs[reg - 4], t0, 8, 8);
57fec1fe
FB
381 }
382 break;
383 case OT_WORD:
c832e3de 384 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 16);
57fec1fe 385 break;
cc739bb0 386 default: /* XXX this shouldn't be reached; abort? */
57fec1fe 387 case OT_LONG:
cc739bb0
LD
388 /* For x86_64, this sets the higher half of register to zero.
389 For i386, this is equivalent to a mov. */
390 tcg_gen_ext32u_tl(cpu_regs[reg], t0);
57fec1fe 391 break;
cc739bb0 392#ifdef TARGET_X86_64
57fec1fe 393 case OT_QUAD:
cc739bb0 394 tcg_gen_mov_tl(cpu_regs[reg], t0);
57fec1fe 395 break;
14ce26e7 396#endif
57fec1fe
FB
397 }
398}
2c0262af 399
57fec1fe
FB
400static inline void gen_op_mov_reg_T0(int ot, int reg)
401{
1e4840bf 402 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
57fec1fe
FB
403}
404
405static inline void gen_op_mov_reg_T1(int ot, int reg)
406{
1e4840bf 407 gen_op_mov_reg_v(ot, reg, cpu_T[1]);
57fec1fe
FB
408}
409
410static inline void gen_op_mov_reg_A0(int size, int reg)
411{
412 switch(size) {
93ab25d7 413 case OT_BYTE:
c832e3de 414 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], cpu_A0, 0, 16);
57fec1fe 415 break;
cc739bb0 416 default: /* XXX this shouldn't be reached; abort? */
93ab25d7 417 case OT_WORD:
cc739bb0
LD
418 /* For x86_64, this sets the higher half of register to zero.
419 For i386, this is equivalent to a mov. */
420 tcg_gen_ext32u_tl(cpu_regs[reg], cpu_A0);
57fec1fe 421 break;
cc739bb0 422#ifdef TARGET_X86_64
93ab25d7 423 case OT_LONG:
cc739bb0 424 tcg_gen_mov_tl(cpu_regs[reg], cpu_A0);
57fec1fe 425 break;
14ce26e7 426#endif
57fec1fe
FB
427 }
428}
429
1e4840bf 430static inline void gen_op_mov_v_reg(int ot, TCGv t0, int reg)
57fec1fe 431{
96d7073f
PM
432 if (ot == OT_BYTE && byte_reg_is_xH(reg)) {
433 tcg_gen_shri_tl(t0, cpu_regs[reg - 4], 8);
434 tcg_gen_ext8u_tl(t0, t0);
435 } else {
cc739bb0 436 tcg_gen_mov_tl(t0, cpu_regs[reg]);
57fec1fe
FB
437 }
438}
439
1e4840bf
FB
440static inline void gen_op_mov_TN_reg(int ot, int t_index, int reg)
441{
442 gen_op_mov_v_reg(ot, cpu_T[t_index], reg);
443}
444
57fec1fe
FB
445static inline void gen_op_movl_A0_reg(int reg)
446{
cc739bb0 447 tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]);
57fec1fe
FB
448}
449
450static inline void gen_op_addl_A0_im(int32_t val)
451{
452 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
14ce26e7 453#ifdef TARGET_X86_64
57fec1fe 454 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
14ce26e7 455#endif
57fec1fe 456}
2c0262af 457
14ce26e7 458#ifdef TARGET_X86_64
57fec1fe
FB
459static inline void gen_op_addq_A0_im(int64_t val)
460{
461 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
462}
14ce26e7 463#endif
57fec1fe
FB
464
465static void gen_add_A0_im(DisasContext *s, int val)
466{
467#ifdef TARGET_X86_64
468 if (CODE64(s))
469 gen_op_addq_A0_im(val);
470 else
471#endif
472 gen_op_addl_A0_im(val);
473}
2c0262af 474
57fec1fe 475static inline void gen_op_addl_T0_T1(void)
2c0262af 476{
57fec1fe
FB
477 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
478}
479
480static inline void gen_op_jmp_T0(void)
481{
317ac620 482 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, eip));
57fec1fe
FB
483}
484
6e0d8677 485static inline void gen_op_add_reg_im(int size, int reg, int32_t val)
57fec1fe 486{
6e0d8677 487 switch(size) {
93ab25d7 488 case OT_BYTE:
cc739bb0 489 tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val);
c832e3de 490 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], cpu_tmp0, 0, 16);
6e0d8677 491 break;
93ab25d7 492 case OT_WORD:
cc739bb0
LD
493 tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val);
494 /* For x86_64, this sets the higher half of register to zero.
495 For i386, this is equivalent to a nop. */
496 tcg_gen_ext32u_tl(cpu_tmp0, cpu_tmp0);
497 tcg_gen_mov_tl(cpu_regs[reg], cpu_tmp0);
6e0d8677
FB
498 break;
499#ifdef TARGET_X86_64
93ab25d7 500 case OT_LONG:
cc739bb0 501 tcg_gen_addi_tl(cpu_regs[reg], cpu_regs[reg], val);
6e0d8677
FB
502 break;
503#endif
504 }
57fec1fe
FB
505}
506
6e0d8677 507static inline void gen_op_add_reg_T0(int size, int reg)
57fec1fe 508{
6e0d8677 509 switch(size) {
93ab25d7 510 case OT_BYTE:
cc739bb0 511 tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T[0]);
c832e3de 512 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], cpu_tmp0, 0, 16);
6e0d8677 513 break;
93ab25d7 514 case OT_WORD:
cc739bb0
LD
515 tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T[0]);
516 /* For x86_64, this sets the higher half of register to zero.
517 For i386, this is equivalent to a nop. */
518 tcg_gen_ext32u_tl(cpu_tmp0, cpu_tmp0);
519 tcg_gen_mov_tl(cpu_regs[reg], cpu_tmp0);
6e0d8677 520 break;
14ce26e7 521#ifdef TARGET_X86_64
93ab25d7 522 case OT_LONG:
cc739bb0 523 tcg_gen_add_tl(cpu_regs[reg], cpu_regs[reg], cpu_T[0]);
6e0d8677 524 break;
14ce26e7 525#endif
6e0d8677
FB
526 }
527}
57fec1fe 528
57fec1fe
FB
529static inline void gen_op_addl_A0_reg_sN(int shift, int reg)
530{
cc739bb0
LD
531 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]);
532 if (shift != 0)
57fec1fe
FB
533 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
534 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
cc739bb0
LD
535 /* For x86_64, this sets the higher half of register to zero.
536 For i386, this is equivalent to a nop. */
537 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
57fec1fe 538}
2c0262af 539
57fec1fe
FB
540static inline void gen_op_movl_A0_seg(int reg)
541{
317ac620 542 tcg_gen_ld32u_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base) + REG_L_OFFSET);
57fec1fe 543}
2c0262af 544
7162ab21 545static inline void gen_op_addl_A0_seg(DisasContext *s, int reg)
57fec1fe 546{
317ac620 547 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base));
57fec1fe 548#ifdef TARGET_X86_64
7162ab21
VC
549 if (CODE64(s)) {
550 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
551 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
552 } else {
553 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
554 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
555 }
556#else
557 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
57fec1fe
FB
558#endif
559}
2c0262af 560
14ce26e7 561#ifdef TARGET_X86_64
57fec1fe
FB
562static inline void gen_op_movq_A0_seg(int reg)
563{
317ac620 564 tcg_gen_ld_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base));
57fec1fe 565}
14ce26e7 566
57fec1fe
FB
567static inline void gen_op_addq_A0_seg(int reg)
568{
317ac620 569 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base));
57fec1fe
FB
570 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
571}
572
573static inline void gen_op_movq_A0_reg(int reg)
574{
cc739bb0 575 tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]);
57fec1fe
FB
576}
577
578static inline void gen_op_addq_A0_reg_sN(int shift, int reg)
579{
cc739bb0
LD
580 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]);
581 if (shift != 0)
57fec1fe
FB
582 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
583 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
584}
14ce26e7
FB
585#endif
586
57fec1fe
FB
587static inline void gen_op_lds_T0_A0(int idx)
588{
589 int mem_index = (idx >> 2) - 1;
590 switch(idx & 3) {
93ab25d7 591 case OT_BYTE:
57fec1fe
FB
592 tcg_gen_qemu_ld8s(cpu_T[0], cpu_A0, mem_index);
593 break;
93ab25d7 594 case OT_WORD:
57fec1fe
FB
595 tcg_gen_qemu_ld16s(cpu_T[0], cpu_A0, mem_index);
596 break;
597 default:
93ab25d7 598 case OT_LONG:
57fec1fe
FB
599 tcg_gen_qemu_ld32s(cpu_T[0], cpu_A0, mem_index);
600 break;
601 }
602}
2c0262af 603
1e4840bf 604static inline void gen_op_ld_v(int idx, TCGv t0, TCGv a0)
57fec1fe
FB
605{
606 int mem_index = (idx >> 2) - 1;
607 switch(idx & 3) {
93ab25d7 608 case OT_BYTE:
1e4840bf 609 tcg_gen_qemu_ld8u(t0, a0, mem_index);
57fec1fe 610 break;
93ab25d7 611 case OT_WORD:
1e4840bf 612 tcg_gen_qemu_ld16u(t0, a0, mem_index);
57fec1fe 613 break;
93ab25d7 614 case OT_LONG:
1e4840bf 615 tcg_gen_qemu_ld32u(t0, a0, mem_index);
57fec1fe
FB
616 break;
617 default:
93ab25d7 618 case OT_QUAD:
a7812ae4
PB
619 /* Should never happen on 32-bit targets. */
620#ifdef TARGET_X86_64
1e4840bf 621 tcg_gen_qemu_ld64(t0, a0, mem_index);
a7812ae4 622#endif
57fec1fe
FB
623 break;
624 }
625}
2c0262af 626
1e4840bf
FB
627/* XXX: always use ldu or lds */
628static inline void gen_op_ld_T0_A0(int idx)
629{
630 gen_op_ld_v(idx, cpu_T[0], cpu_A0);
631}
632
57fec1fe
FB
633static inline void gen_op_ldu_T0_A0(int idx)
634{
1e4840bf 635 gen_op_ld_v(idx, cpu_T[0], cpu_A0);
57fec1fe 636}
2c0262af 637
57fec1fe 638static inline void gen_op_ld_T1_A0(int idx)
1e4840bf
FB
639{
640 gen_op_ld_v(idx, cpu_T[1], cpu_A0);
641}
642
643static inline void gen_op_st_v(int idx, TCGv t0, TCGv a0)
57fec1fe
FB
644{
645 int mem_index = (idx >> 2) - 1;
646 switch(idx & 3) {
93ab25d7 647 case OT_BYTE:
1e4840bf 648 tcg_gen_qemu_st8(t0, a0, mem_index);
57fec1fe 649 break;
93ab25d7 650 case OT_WORD:
1e4840bf 651 tcg_gen_qemu_st16(t0, a0, mem_index);
57fec1fe 652 break;
93ab25d7 653 case OT_LONG:
1e4840bf 654 tcg_gen_qemu_st32(t0, a0, mem_index);
57fec1fe
FB
655 break;
656 default:
93ab25d7 657 case OT_QUAD:
a7812ae4
PB
658 /* Should never happen on 32-bit targets. */
659#ifdef TARGET_X86_64
1e4840bf 660 tcg_gen_qemu_st64(t0, a0, mem_index);
a7812ae4 661#endif
57fec1fe
FB
662 break;
663 }
664}
4f31916f 665
57fec1fe
FB
666static inline void gen_op_st_T0_A0(int idx)
667{
1e4840bf 668 gen_op_st_v(idx, cpu_T[0], cpu_A0);
57fec1fe 669}
4f31916f 670
57fec1fe
FB
671static inline void gen_op_st_T1_A0(int idx)
672{
1e4840bf 673 gen_op_st_v(idx, cpu_T[1], cpu_A0);
57fec1fe 674}
4f31916f 675
14ce26e7
FB
676static inline void gen_jmp_im(target_ulong pc)
677{
57fec1fe 678 tcg_gen_movi_tl(cpu_tmp0, pc);
317ac620 679 tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, eip));
14ce26e7
FB
680}
681
2c0262af
FB
682static inline void gen_string_movl_A0_ESI(DisasContext *s)
683{
684 int override;
685
686 override = s->override;
14ce26e7
FB
687#ifdef TARGET_X86_64
688 if (s->aflag == 2) {
689 if (override >= 0) {
57fec1fe
FB
690 gen_op_movq_A0_seg(override);
691 gen_op_addq_A0_reg_sN(0, R_ESI);
14ce26e7 692 } else {
57fec1fe 693 gen_op_movq_A0_reg(R_ESI);
14ce26e7
FB
694 }
695 } else
696#endif
2c0262af
FB
697 if (s->aflag) {
698 /* 32 bit address */
699 if (s->addseg && override < 0)
700 override = R_DS;
701 if (override >= 0) {
57fec1fe
FB
702 gen_op_movl_A0_seg(override);
703 gen_op_addl_A0_reg_sN(0, R_ESI);
2c0262af 704 } else {
57fec1fe 705 gen_op_movl_A0_reg(R_ESI);
2c0262af
FB
706 }
707 } else {
708 /* 16 address, always override */
709 if (override < 0)
710 override = R_DS;
57fec1fe 711 gen_op_movl_A0_reg(R_ESI);
2c0262af 712 gen_op_andl_A0_ffff();
7162ab21 713 gen_op_addl_A0_seg(s, override);
2c0262af
FB
714 }
715}
716
717static inline void gen_string_movl_A0_EDI(DisasContext *s)
718{
14ce26e7
FB
719#ifdef TARGET_X86_64
720 if (s->aflag == 2) {
57fec1fe 721 gen_op_movq_A0_reg(R_EDI);
14ce26e7
FB
722 } else
723#endif
2c0262af
FB
724 if (s->aflag) {
725 if (s->addseg) {
57fec1fe
FB
726 gen_op_movl_A0_seg(R_ES);
727 gen_op_addl_A0_reg_sN(0, R_EDI);
2c0262af 728 } else {
57fec1fe 729 gen_op_movl_A0_reg(R_EDI);
2c0262af
FB
730 }
731 } else {
57fec1fe 732 gen_op_movl_A0_reg(R_EDI);
2c0262af 733 gen_op_andl_A0_ffff();
7162ab21 734 gen_op_addl_A0_seg(s, R_ES);
2c0262af
FB
735 }
736}
737
6e0d8677
FB
738static inline void gen_op_movl_T0_Dshift(int ot)
739{
317ac620 740 tcg_gen_ld32s_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, df));
6e0d8677 741 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], ot);
2c0262af
FB
742};
743
d824df34 744static TCGv gen_ext_tl(TCGv dst, TCGv src, int size, bool sign)
6e0d8677 745{
d824df34 746 switch (size) {
6e0d8677 747 case OT_BYTE:
d824df34
PB
748 if (sign) {
749 tcg_gen_ext8s_tl(dst, src);
750 } else {
751 tcg_gen_ext8u_tl(dst, src);
752 }
753 return dst;
6e0d8677 754 case OT_WORD:
d824df34
PB
755 if (sign) {
756 tcg_gen_ext16s_tl(dst, src);
757 } else {
758 tcg_gen_ext16u_tl(dst, src);
759 }
760 return dst;
761#ifdef TARGET_X86_64
6e0d8677 762 case OT_LONG:
d824df34
PB
763 if (sign) {
764 tcg_gen_ext32s_tl(dst, src);
765 } else {
766 tcg_gen_ext32u_tl(dst, src);
767 }
768 return dst;
769#endif
6e0d8677 770 default:
d824df34 771 return src;
6e0d8677
FB
772 }
773}
3b46e624 774
d824df34
PB
775static void gen_extu(int ot, TCGv reg)
776{
777 gen_ext_tl(reg, reg, ot, false);
778}
779
6e0d8677
FB
780static void gen_exts(int ot, TCGv reg)
781{
d824df34 782 gen_ext_tl(reg, reg, ot, true);
6e0d8677 783}
2c0262af 784
6e0d8677
FB
785static inline void gen_op_jnz_ecx(int size, int label1)
786{
cc739bb0 787 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
6e0d8677 788 gen_extu(size + 1, cpu_tmp0);
cb63669a 789 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, label1);
6e0d8677
FB
790}
791
792static inline void gen_op_jz_ecx(int size, int label1)
793{
cc739bb0 794 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
6e0d8677 795 gen_extu(size + 1, cpu_tmp0);
cb63669a 796 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
6e0d8677 797}
2c0262af 798
a7812ae4
PB
799static void gen_helper_in_func(int ot, TCGv v, TCGv_i32 n)
800{
801 switch (ot) {
93ab25d7
PB
802 case OT_BYTE:
803 gen_helper_inb(v, n);
804 break;
805 case OT_WORD:
806 gen_helper_inw(v, n);
807 break;
808 case OT_LONG:
809 gen_helper_inl(v, n);
810 break;
a7812ae4 811 }
a7812ae4 812}
2c0262af 813
a7812ae4
PB
814static void gen_helper_out_func(int ot, TCGv_i32 v, TCGv_i32 n)
815{
816 switch (ot) {
93ab25d7
PB
817 case OT_BYTE:
818 gen_helper_outb(v, n);
819 break;
820 case OT_WORD:
821 gen_helper_outw(v, n);
822 break;
823 case OT_LONG:
824 gen_helper_outl(v, n);
825 break;
a7812ae4 826 }
a7812ae4 827}
f115e911 828
b8b6a50b
FB
829static void gen_check_io(DisasContext *s, int ot, target_ulong cur_eip,
830 uint32_t svm_flags)
f115e911 831{
b8b6a50b
FB
832 int state_saved;
833 target_ulong next_eip;
834
835 state_saved = 0;
f115e911 836 if (s->pe && (s->cpl > s->iopl || s->vm86)) {
773cdfcc 837 gen_update_cc_op(s);
14ce26e7 838 gen_jmp_im(cur_eip);
b8b6a50b 839 state_saved = 1;
b6abf97d 840 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
a7812ae4 841 switch (ot) {
93ab25d7 842 case OT_BYTE:
4a7443be
BS
843 gen_helper_check_iob(cpu_env, cpu_tmp2_i32);
844 break;
93ab25d7 845 case OT_WORD:
4a7443be
BS
846 gen_helper_check_iow(cpu_env, cpu_tmp2_i32);
847 break;
93ab25d7 848 case OT_LONG:
4a7443be
BS
849 gen_helper_check_iol(cpu_env, cpu_tmp2_i32);
850 break;
a7812ae4 851 }
b8b6a50b 852 }
872929aa 853 if(s->flags & HF_SVMI_MASK) {
b8b6a50b 854 if (!state_saved) {
773cdfcc 855 gen_update_cc_op(s);
b8b6a50b 856 gen_jmp_im(cur_eip);
b8b6a50b
FB
857 }
858 svm_flags |= (1 << (4 + ot));
859 next_eip = s->pc - s->cs_base;
b6abf97d 860 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
052e80d5
BS
861 gen_helper_svm_check_io(cpu_env, cpu_tmp2_i32,
862 tcg_const_i32(svm_flags),
a7812ae4 863 tcg_const_i32(next_eip - cur_eip));
f115e911
FB
864 }
865}
866
2c0262af
FB
867static inline void gen_movs(DisasContext *s, int ot)
868{
869 gen_string_movl_A0_ESI(s);
57fec1fe 870 gen_op_ld_T0_A0(ot + s->mem_index);
2c0262af 871 gen_string_movl_A0_EDI(s);
57fec1fe 872 gen_op_st_T0_A0(ot + s->mem_index);
6e0d8677
FB
873 gen_op_movl_T0_Dshift(ot);
874 gen_op_add_reg_T0(s->aflag, R_ESI);
875 gen_op_add_reg_T0(s->aflag, R_EDI);
2c0262af
FB
876}
877
b6abf97d
FB
878static void gen_op_update1_cc(void)
879{
b6abf97d
FB
880 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
881}
882
883static void gen_op_update2_cc(void)
884{
885 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
886 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
887}
888
988c3eb0
RH
889static void gen_op_update3_cc(TCGv reg)
890{
891 tcg_gen_mov_tl(cpu_cc_src2, reg);
892 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
893 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
894}
895
b6abf97d
FB
896static inline void gen_op_testl_T0_T1_cc(void)
897{
b6abf97d
FB
898 tcg_gen_and_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
899}
900
901static void gen_op_update_neg_cc(void)
902{
b6abf97d 903 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
a3251186
RH
904 tcg_gen_neg_tl(cpu_cc_src, cpu_T[0]);
905 tcg_gen_movi_tl(cpu_cc_srcT, 0);
b6abf97d
FB
906}
907
d229edce
RH
908/* compute all eflags to cc_src */
909static void gen_compute_eflags(DisasContext *s)
8e1c85e3 910{
988c3eb0 911 TCGv zero, dst, src1, src2;
db9f2597
RH
912 int live, dead;
913
d229edce
RH
914 if (s->cc_op == CC_OP_EFLAGS) {
915 return;
916 }
436ff2d2
RH
917 if (s->cc_op == CC_OP_CLR) {
918 tcg_gen_movi_tl(cpu_cc_src, CC_Z);
919 set_cc_op(s, CC_OP_EFLAGS);
920 return;
921 }
db9f2597
RH
922
923 TCGV_UNUSED(zero);
924 dst = cpu_cc_dst;
925 src1 = cpu_cc_src;
988c3eb0 926 src2 = cpu_cc_src2;
db9f2597
RH
927
928 /* Take care to not read values that are not live. */
929 live = cc_op_live[s->cc_op] & ~USES_CC_SRCT;
988c3eb0 930 dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2);
db9f2597
RH
931 if (dead) {
932 zero = tcg_const_tl(0);
933 if (dead & USES_CC_DST) {
934 dst = zero;
935 }
936 if (dead & USES_CC_SRC) {
937 src1 = zero;
938 }
988c3eb0
RH
939 if (dead & USES_CC_SRC2) {
940 src2 = zero;
941 }
db9f2597
RH
942 }
943
773cdfcc 944 gen_update_cc_op(s);
988c3eb0 945 gen_helper_cc_compute_all(cpu_cc_src, dst, src1, src2, cpu_cc_op);
d229edce 946 set_cc_op(s, CC_OP_EFLAGS);
db9f2597
RH
947
948 if (dead) {
949 tcg_temp_free(zero);
950 }
8e1c85e3
FB
951}
952
bec93d72
RH
953typedef struct CCPrepare {
954 TCGCond cond;
955 TCGv reg;
956 TCGv reg2;
957 target_ulong imm;
958 target_ulong mask;
959 bool use_reg2;
960 bool no_setcond;
961} CCPrepare;
962
06847f1f 963/* compute eflags.C to reg */
bec93d72 964static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
06847f1f
RH
965{
966 TCGv t0, t1;
bec93d72 967 int size, shift;
06847f1f
RH
968
969 switch (s->cc_op) {
970 case CC_OP_SUBB ... CC_OP_SUBQ:
a3251186 971 /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
06847f1f
RH
972 size = s->cc_op - CC_OP_SUBB;
973 t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
974 /* If no temporary was used, be careful not to alias t1 and t0. */
975 t0 = TCGV_EQUAL(t1, cpu_cc_src) ? cpu_tmp0 : reg;
a3251186 976 tcg_gen_mov_tl(t0, cpu_cc_srcT);
06847f1f
RH
977 gen_extu(size, t0);
978 goto add_sub;
979
980 case CC_OP_ADDB ... CC_OP_ADDQ:
981 /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
982 size = s->cc_op - CC_OP_ADDB;
983 t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
984 t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
985 add_sub:
bec93d72
RH
986 return (CCPrepare) { .cond = TCG_COND_LTU, .reg = t0,
987 .reg2 = t1, .mask = -1, .use_reg2 = true };
06847f1f 988
06847f1f 989 case CC_OP_LOGICB ... CC_OP_LOGICQ:
436ff2d2 990 case CC_OP_CLR:
bec93d72 991 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
06847f1f
RH
992
993 case CC_OP_INCB ... CC_OP_INCQ:
994 case CC_OP_DECB ... CC_OP_DECQ:
bec93d72
RH
995 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
996 .mask = -1, .no_setcond = true };
06847f1f
RH
997
998 case CC_OP_SHLB ... CC_OP_SHLQ:
999 /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
1000 size = s->cc_op - CC_OP_SHLB;
bec93d72
RH
1001 shift = (8 << size) - 1;
1002 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1003 .mask = (target_ulong)1 << shift };
06847f1f
RH
1004
1005 case CC_OP_MULB ... CC_OP_MULQ:
bec93d72
RH
1006 return (CCPrepare) { .cond = TCG_COND_NE,
1007 .reg = cpu_cc_src, .mask = -1 };
06847f1f 1008
bc4b43dc
RH
1009 case CC_OP_BMILGB ... CC_OP_BMILGQ:
1010 size = s->cc_op - CC_OP_BMILGB;
1011 t0 = gen_ext_tl(reg, cpu_cc_src, size, false);
1012 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
1013
cd7f97ca
RH
1014 case CC_OP_ADCX:
1015 case CC_OP_ADCOX:
1016 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_dst,
1017 .mask = -1, .no_setcond = true };
1018
06847f1f
RH
1019 case CC_OP_EFLAGS:
1020 case CC_OP_SARB ... CC_OP_SARQ:
1021 /* CC_SRC & 1 */
bec93d72
RH
1022 return (CCPrepare) { .cond = TCG_COND_NE,
1023 .reg = cpu_cc_src, .mask = CC_C };
06847f1f
RH
1024
1025 default:
1026 /* The need to compute only C from CC_OP_DYNAMIC is important
1027 in efficiently implementing e.g. INC at the start of a TB. */
1028 gen_update_cc_op(s);
988c3eb0
RH
1029 gen_helper_cc_compute_c(reg, cpu_cc_dst, cpu_cc_src,
1030 cpu_cc_src2, cpu_cc_op);
bec93d72
RH
1031 return (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1032 .mask = -1, .no_setcond = true };
06847f1f
RH
1033 }
1034}
1035
1608ecca 1036/* compute eflags.P to reg */
bec93d72 1037static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGv reg)
1608ecca 1038{
d229edce 1039 gen_compute_eflags(s);
bec93d72
RH
1040 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1041 .mask = CC_P };
1608ecca
PB
1042}
1043
1044/* compute eflags.S to reg */
bec93d72 1045static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg)
1608ecca 1046{
086c4077
RH
1047 switch (s->cc_op) {
1048 case CC_OP_DYNAMIC:
1049 gen_compute_eflags(s);
1050 /* FALLTHRU */
1051 case CC_OP_EFLAGS:
cd7f97ca
RH
1052 case CC_OP_ADCX:
1053 case CC_OP_ADOX:
1054 case CC_OP_ADCOX:
bec93d72
RH
1055 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1056 .mask = CC_S };
436ff2d2
RH
1057 case CC_OP_CLR:
1058 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
086c4077
RH
1059 default:
1060 {
1061 int size = (s->cc_op - CC_OP_ADDB) & 3;
1062 TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, true);
bec93d72 1063 return (CCPrepare) { .cond = TCG_COND_LT, .reg = t0, .mask = -1 };
086c4077 1064 }
086c4077 1065 }
1608ecca
PB
1066}
1067
1068/* compute eflags.O to reg */
bec93d72 1069static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg)
1608ecca 1070{
cd7f97ca
RH
1071 switch (s->cc_op) {
1072 case CC_OP_ADOX:
1073 case CC_OP_ADCOX:
1074 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src2,
1075 .mask = -1, .no_setcond = true };
436ff2d2
RH
1076 case CC_OP_CLR:
1077 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
cd7f97ca
RH
1078 default:
1079 gen_compute_eflags(s);
1080 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1081 .mask = CC_O };
1082 }
1608ecca
PB
1083}
1084
1085/* compute eflags.Z to reg */
bec93d72 1086static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
1608ecca 1087{
086c4077
RH
1088 switch (s->cc_op) {
1089 case CC_OP_DYNAMIC:
1090 gen_compute_eflags(s);
1091 /* FALLTHRU */
1092 case CC_OP_EFLAGS:
cd7f97ca
RH
1093 case CC_OP_ADCX:
1094 case CC_OP_ADOX:
1095 case CC_OP_ADCOX:
bec93d72
RH
1096 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1097 .mask = CC_Z };
436ff2d2
RH
1098 case CC_OP_CLR:
1099 return (CCPrepare) { .cond = TCG_COND_ALWAYS, .mask = -1 };
086c4077
RH
1100 default:
1101 {
1102 int size = (s->cc_op - CC_OP_ADDB) & 3;
1103 TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
bec93d72 1104 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
086c4077 1105 }
bec93d72
RH
1106 }
1107}
1108
c365395e
PB
1109/* perform a conditional store into register 'reg' according to jump opcode
1110 value 'b'. In the fast case, T0 is guaranted not to be used. */
276e6b5f 1111static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
8e1c85e3 1112{
c365395e 1113 int inv, jcc_op, size, cond;
276e6b5f 1114 CCPrepare cc;
c365395e
PB
1115 TCGv t0;
1116
1117 inv = b & 1;
8e1c85e3 1118 jcc_op = (b >> 1) & 7;
c365395e
PB
1119
1120 switch (s->cc_op) {
69d1aa31
RH
1121 case CC_OP_SUBB ... CC_OP_SUBQ:
1122 /* We optimize relational operators for the cmp/jcc case. */
c365395e
PB
1123 size = s->cc_op - CC_OP_SUBB;
1124 switch (jcc_op) {
1125 case JCC_BE:
a3251186 1126 tcg_gen_mov_tl(cpu_tmp4, cpu_cc_srcT);
c365395e
PB
1127 gen_extu(size, cpu_tmp4);
1128 t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
276e6b5f
RH
1129 cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = cpu_tmp4,
1130 .reg2 = t0, .mask = -1, .use_reg2 = true };
c365395e 1131 break;
8e1c85e3 1132
c365395e 1133 case JCC_L:
276e6b5f 1134 cond = TCG_COND_LT;
c365395e
PB
1135 goto fast_jcc_l;
1136 case JCC_LE:
276e6b5f 1137 cond = TCG_COND_LE;
c365395e 1138 fast_jcc_l:
a3251186 1139 tcg_gen_mov_tl(cpu_tmp4, cpu_cc_srcT);
c365395e
PB
1140 gen_exts(size, cpu_tmp4);
1141 t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, true);
276e6b5f
RH
1142 cc = (CCPrepare) { .cond = cond, .reg = cpu_tmp4,
1143 .reg2 = t0, .mask = -1, .use_reg2 = true };
c365395e 1144 break;
8e1c85e3 1145
c365395e 1146 default:
8e1c85e3 1147 goto slow_jcc;
c365395e 1148 }
8e1c85e3 1149 break;
c365395e 1150
8e1c85e3
FB
1151 default:
1152 slow_jcc:
69d1aa31
RH
1153 /* This actually generates good code for JC, JZ and JS. */
1154 switch (jcc_op) {
1155 case JCC_O:
1156 cc = gen_prepare_eflags_o(s, reg);
1157 break;
1158 case JCC_B:
1159 cc = gen_prepare_eflags_c(s, reg);
1160 break;
1161 case JCC_Z:
1162 cc = gen_prepare_eflags_z(s, reg);
1163 break;
1164 case JCC_BE:
1165 gen_compute_eflags(s);
1166 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1167 .mask = CC_Z | CC_C };
1168 break;
1169 case JCC_S:
1170 cc = gen_prepare_eflags_s(s, reg);
1171 break;
1172 case JCC_P:
1173 cc = gen_prepare_eflags_p(s, reg);
1174 break;
1175 case JCC_L:
1176 gen_compute_eflags(s);
1177 if (TCGV_EQUAL(reg, cpu_cc_src)) {
1178 reg = cpu_tmp0;
1179 }
1180 tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
1181 tcg_gen_xor_tl(reg, reg, cpu_cc_src);
1182 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1183 .mask = CC_S };
1184 break;
1185 default:
1186 case JCC_LE:
1187 gen_compute_eflags(s);
1188 if (TCGV_EQUAL(reg, cpu_cc_src)) {
1189 reg = cpu_tmp0;
1190 }
1191 tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
1192 tcg_gen_xor_tl(reg, reg, cpu_cc_src);
1193 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1194 .mask = CC_S | CC_Z };
1195 break;
1196 }
c365395e 1197 break;
8e1c85e3 1198 }
276e6b5f
RH
1199
1200 if (inv) {
1201 cc.cond = tcg_invert_cond(cc.cond);
1202 }
1203 return cc;
8e1c85e3
FB
1204}
1205
cc8b6f5b
PB
1206static void gen_setcc1(DisasContext *s, int b, TCGv reg)
1207{
1208 CCPrepare cc = gen_prepare_cc(s, b, reg);
1209
1210 if (cc.no_setcond) {
1211 if (cc.cond == TCG_COND_EQ) {
1212 tcg_gen_xori_tl(reg, cc.reg, 1);
1213 } else {
1214 tcg_gen_mov_tl(reg, cc.reg);
1215 }
1216 return;
1217 }
1218
1219 if (cc.cond == TCG_COND_NE && !cc.use_reg2 && cc.imm == 0 &&
1220 cc.mask != 0 && (cc.mask & (cc.mask - 1)) == 0) {
1221 tcg_gen_shri_tl(reg, cc.reg, ctztl(cc.mask));
1222 tcg_gen_andi_tl(reg, reg, 1);
1223 return;
1224 }
1225 if (cc.mask != -1) {
1226 tcg_gen_andi_tl(reg, cc.reg, cc.mask);
1227 cc.reg = reg;
1228 }
1229 if (cc.use_reg2) {
1230 tcg_gen_setcond_tl(cc.cond, reg, cc.reg, cc.reg2);
1231 } else {
1232 tcg_gen_setcondi_tl(cc.cond, reg, cc.reg, cc.imm);
1233 }
1234}
1235
1236static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg)
1237{
1238 gen_setcc1(s, JCC_B << 1, reg);
1239}
276e6b5f 1240
8e1c85e3
FB
1241/* generate a conditional jump to label 'l1' according to jump opcode
1242 value 'b'. In the fast case, T0 is guaranted not to be used. */
dc259201
RH
1243static inline void gen_jcc1_noeob(DisasContext *s, int b, int l1)
1244{
1245 CCPrepare cc = gen_prepare_cc(s, b, cpu_T[0]);
1246
1247 if (cc.mask != -1) {
1248 tcg_gen_andi_tl(cpu_T[0], cc.reg, cc.mask);
1249 cc.reg = cpu_T[0];
1250 }
1251 if (cc.use_reg2) {
1252 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1253 } else {
1254 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1255 }
1256}
1257
1258/* Generate a conditional jump to label 'l1' according to jump opcode
1259 value 'b'. In the fast case, T0 is guaranted not to be used.
1260 A translation block must end soon. */
b27fc131 1261static inline void gen_jcc1(DisasContext *s, int b, int l1)
8e1c85e3 1262{
943131ca 1263 CCPrepare cc = gen_prepare_cc(s, b, cpu_T[0]);
8e1c85e3 1264
dc259201 1265 gen_update_cc_op(s);
943131ca
PB
1266 if (cc.mask != -1) {
1267 tcg_gen_andi_tl(cpu_T[0], cc.reg, cc.mask);
1268 cc.reg = cpu_T[0];
1269 }
dc259201 1270 set_cc_op(s, CC_OP_DYNAMIC);
943131ca
PB
1271 if (cc.use_reg2) {
1272 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1273 } else {
1274 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
8e1c85e3
FB
1275 }
1276}
1277
14ce26e7
FB
1278/* XXX: does not work with gdbstub "ice" single step - not a
1279 serious problem */
1280static int gen_jz_ecx_string(DisasContext *s, target_ulong next_eip)
2c0262af 1281{
14ce26e7
FB
1282 int l1, l2;
1283
1284 l1 = gen_new_label();
1285 l2 = gen_new_label();
6e0d8677 1286 gen_op_jnz_ecx(s->aflag, l1);
14ce26e7
FB
1287 gen_set_label(l2);
1288 gen_jmp_tb(s, next_eip, 1);
1289 gen_set_label(l1);
1290 return l2;
2c0262af
FB
1291}
1292
1293static inline void gen_stos(DisasContext *s, int ot)
1294{
57fec1fe 1295 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
2c0262af 1296 gen_string_movl_A0_EDI(s);
57fec1fe 1297 gen_op_st_T0_A0(ot + s->mem_index);
6e0d8677
FB
1298 gen_op_movl_T0_Dshift(ot);
1299 gen_op_add_reg_T0(s->aflag, R_EDI);
2c0262af
FB
1300}
1301
1302static inline void gen_lods(DisasContext *s, int ot)
1303{
1304 gen_string_movl_A0_ESI(s);
57fec1fe
FB
1305 gen_op_ld_T0_A0(ot + s->mem_index);
1306 gen_op_mov_reg_T0(ot, R_EAX);
6e0d8677
FB
1307 gen_op_movl_T0_Dshift(ot);
1308 gen_op_add_reg_T0(s->aflag, R_ESI);
2c0262af
FB
1309}
1310
1311static inline void gen_scas(DisasContext *s, int ot)
1312{
2c0262af 1313 gen_string_movl_A0_EDI(s);
57fec1fe 1314 gen_op_ld_T1_A0(ot + s->mem_index);
63633fe6 1315 gen_op(s, OP_CMPL, ot, R_EAX);
6e0d8677
FB
1316 gen_op_movl_T0_Dshift(ot);
1317 gen_op_add_reg_T0(s->aflag, R_EDI);
2c0262af
FB
1318}
1319
1320static inline void gen_cmps(DisasContext *s, int ot)
1321{
2c0262af 1322 gen_string_movl_A0_EDI(s);
57fec1fe 1323 gen_op_ld_T1_A0(ot + s->mem_index);
63633fe6
RH
1324 gen_string_movl_A0_ESI(s);
1325 gen_op(s, OP_CMPL, ot, OR_TMP0);
6e0d8677
FB
1326 gen_op_movl_T0_Dshift(ot);
1327 gen_op_add_reg_T0(s->aflag, R_ESI);
1328 gen_op_add_reg_T0(s->aflag, R_EDI);
2c0262af
FB
1329}
1330
1331static inline void gen_ins(DisasContext *s, int ot)
1332{
2e70f6ef
PB
1333 if (use_icount)
1334 gen_io_start();
2c0262af 1335 gen_string_movl_A0_EDI(s);
6e0d8677
FB
1336 /* Note: we must do this dummy write first to be restartable in
1337 case of page fault. */
9772c73b 1338 gen_op_movl_T0_0();
57fec1fe 1339 gen_op_st_T0_A0(ot + s->mem_index);
b8b6a50b 1340 gen_op_mov_TN_reg(OT_WORD, 1, R_EDX);
b6abf97d
FB
1341 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[1]);
1342 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
a7812ae4 1343 gen_helper_in_func(ot, cpu_T[0], cpu_tmp2_i32);
57fec1fe 1344 gen_op_st_T0_A0(ot + s->mem_index);
6e0d8677
FB
1345 gen_op_movl_T0_Dshift(ot);
1346 gen_op_add_reg_T0(s->aflag, R_EDI);
2e70f6ef
PB
1347 if (use_icount)
1348 gen_io_end();
2c0262af
FB
1349}
1350
1351static inline void gen_outs(DisasContext *s, int ot)
1352{
2e70f6ef
PB
1353 if (use_icount)
1354 gen_io_start();
2c0262af 1355 gen_string_movl_A0_ESI(s);
57fec1fe 1356 gen_op_ld_T0_A0(ot + s->mem_index);
b8b6a50b
FB
1357
1358 gen_op_mov_TN_reg(OT_WORD, 1, R_EDX);
b6abf97d
FB
1359 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[1]);
1360 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1361 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[0]);
a7812ae4 1362 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
b8b6a50b 1363
6e0d8677
FB
1364 gen_op_movl_T0_Dshift(ot);
1365 gen_op_add_reg_T0(s->aflag, R_ESI);
2e70f6ef
PB
1366 if (use_icount)
1367 gen_io_end();
2c0262af
FB
1368}
1369
1370/* same method as Valgrind : we generate jumps to current or next
1371 instruction */
1372#define GEN_REPZ(op) \
1373static inline void gen_repz_ ## op(DisasContext *s, int ot, \
14ce26e7 1374 target_ulong cur_eip, target_ulong next_eip) \
2c0262af 1375{ \
14ce26e7 1376 int l2;\
2c0262af 1377 gen_update_cc_op(s); \
14ce26e7 1378 l2 = gen_jz_ecx_string(s, next_eip); \
2c0262af 1379 gen_ ## op(s, ot); \
6e0d8677 1380 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
2c0262af
FB
1381 /* a loop would cause two single step exceptions if ECX = 1 \
1382 before rep string_insn */ \
1383 if (!s->jmp_opt) \
6e0d8677 1384 gen_op_jz_ecx(s->aflag, l2); \
2c0262af
FB
1385 gen_jmp(s, cur_eip); \
1386}
1387
1388#define GEN_REPZ2(op) \
1389static inline void gen_repz_ ## op(DisasContext *s, int ot, \
14ce26e7
FB
1390 target_ulong cur_eip, \
1391 target_ulong next_eip, \
2c0262af
FB
1392 int nz) \
1393{ \
14ce26e7 1394 int l2;\
2c0262af 1395 gen_update_cc_op(s); \
14ce26e7 1396 l2 = gen_jz_ecx_string(s, next_eip); \
2c0262af 1397 gen_ ## op(s, ot); \
6e0d8677 1398 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
773cdfcc 1399 gen_update_cc_op(s); \
b27fc131 1400 gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2); \
2c0262af 1401 if (!s->jmp_opt) \
6e0d8677 1402 gen_op_jz_ecx(s->aflag, l2); \
2c0262af
FB
1403 gen_jmp(s, cur_eip); \
1404}
1405
1406GEN_REPZ(movs)
1407GEN_REPZ(stos)
1408GEN_REPZ(lods)
1409GEN_REPZ(ins)
1410GEN_REPZ(outs)
1411GEN_REPZ2(scas)
1412GEN_REPZ2(cmps)
1413
a7812ae4
PB
1414static void gen_helper_fp_arith_ST0_FT0(int op)
1415{
1416 switch (op) {
d3eb5eae
BS
1417 case 0:
1418 gen_helper_fadd_ST0_FT0(cpu_env);
1419 break;
1420 case 1:
1421 gen_helper_fmul_ST0_FT0(cpu_env);
1422 break;
1423 case 2:
1424 gen_helper_fcom_ST0_FT0(cpu_env);
1425 break;
1426 case 3:
1427 gen_helper_fcom_ST0_FT0(cpu_env);
1428 break;
1429 case 4:
1430 gen_helper_fsub_ST0_FT0(cpu_env);
1431 break;
1432 case 5:
1433 gen_helper_fsubr_ST0_FT0(cpu_env);
1434 break;
1435 case 6:
1436 gen_helper_fdiv_ST0_FT0(cpu_env);
1437 break;
1438 case 7:
1439 gen_helper_fdivr_ST0_FT0(cpu_env);
1440 break;
a7812ae4
PB
1441 }
1442}
2c0262af
FB
1443
1444/* NOTE the exception in "r" op ordering */
a7812ae4
PB
1445static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
1446{
1447 TCGv_i32 tmp = tcg_const_i32(opreg);
1448 switch (op) {
d3eb5eae
BS
1449 case 0:
1450 gen_helper_fadd_STN_ST0(cpu_env, tmp);
1451 break;
1452 case 1:
1453 gen_helper_fmul_STN_ST0(cpu_env, tmp);
1454 break;
1455 case 4:
1456 gen_helper_fsubr_STN_ST0(cpu_env, tmp);
1457 break;
1458 case 5:
1459 gen_helper_fsub_STN_ST0(cpu_env, tmp);
1460 break;
1461 case 6:
1462 gen_helper_fdivr_STN_ST0(cpu_env, tmp);
1463 break;
1464 case 7:
1465 gen_helper_fdiv_STN_ST0(cpu_env, tmp);
1466 break;
a7812ae4
PB
1467 }
1468}
2c0262af
FB
1469
1470/* if d == OR_TMP0, it means memory operand (address in A0) */
1471static void gen_op(DisasContext *s1, int op, int ot, int d)
1472{
2c0262af 1473 if (d != OR_TMP0) {
57fec1fe 1474 gen_op_mov_TN_reg(ot, 0, d);
2c0262af 1475 } else {
57fec1fe 1476 gen_op_ld_T0_A0(ot + s1->mem_index);
2c0262af
FB
1477 }
1478 switch(op) {
1479 case OP_ADCL:
cc8b6f5b 1480 gen_compute_eflags_c(s1, cpu_tmp4);
cad3a37d
FB
1481 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1482 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1483 if (d != OR_TMP0)
1484 gen_op_mov_reg_T0(ot, d);
1485 else
1486 gen_op_st_T0_A0(ot + s1->mem_index);
988c3eb0
RH
1487 gen_op_update3_cc(cpu_tmp4);
1488 set_cc_op(s1, CC_OP_ADCB + ot);
cad3a37d 1489 break;
2c0262af 1490 case OP_SBBL:
cc8b6f5b 1491 gen_compute_eflags_c(s1, cpu_tmp4);
cad3a37d
FB
1492 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1493 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1494 if (d != OR_TMP0)
57fec1fe 1495 gen_op_mov_reg_T0(ot, d);
cad3a37d
FB
1496 else
1497 gen_op_st_T0_A0(ot + s1->mem_index);
988c3eb0
RH
1498 gen_op_update3_cc(cpu_tmp4);
1499 set_cc_op(s1, CC_OP_SBBB + ot);
cad3a37d 1500 break;
2c0262af
FB
1501 case OP_ADDL:
1502 gen_op_addl_T0_T1();
cad3a37d
FB
1503 if (d != OR_TMP0)
1504 gen_op_mov_reg_T0(ot, d);
1505 else
1506 gen_op_st_T0_A0(ot + s1->mem_index);
1507 gen_op_update2_cc();
3ca51d07 1508 set_cc_op(s1, CC_OP_ADDB + ot);
2c0262af
FB
1509 break;
1510 case OP_SUBL:
a3251186 1511 tcg_gen_mov_tl(cpu_cc_srcT, cpu_T[0]);
57fec1fe 1512 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
cad3a37d
FB
1513 if (d != OR_TMP0)
1514 gen_op_mov_reg_T0(ot, d);
1515 else
1516 gen_op_st_T0_A0(ot + s1->mem_index);
1517 gen_op_update2_cc();
3ca51d07 1518 set_cc_op(s1, CC_OP_SUBB + ot);
2c0262af
FB
1519 break;
1520 default:
1521 case OP_ANDL:
57fec1fe 1522 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
cad3a37d
FB
1523 if (d != OR_TMP0)
1524 gen_op_mov_reg_T0(ot, d);
1525 else
1526 gen_op_st_T0_A0(ot + s1->mem_index);
1527 gen_op_update1_cc();
3ca51d07 1528 set_cc_op(s1, CC_OP_LOGICB + ot);
57fec1fe 1529 break;
2c0262af 1530 case OP_ORL:
57fec1fe 1531 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
cad3a37d
FB
1532 if (d != OR_TMP0)
1533 gen_op_mov_reg_T0(ot, d);
1534 else
1535 gen_op_st_T0_A0(ot + s1->mem_index);
1536 gen_op_update1_cc();
3ca51d07 1537 set_cc_op(s1, CC_OP_LOGICB + ot);
57fec1fe 1538 break;
2c0262af 1539 case OP_XORL:
57fec1fe 1540 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
cad3a37d
FB
1541 if (d != OR_TMP0)
1542 gen_op_mov_reg_T0(ot, d);
1543 else
1544 gen_op_st_T0_A0(ot + s1->mem_index);
1545 gen_op_update1_cc();
3ca51d07 1546 set_cc_op(s1, CC_OP_LOGICB + ot);
2c0262af
FB
1547 break;
1548 case OP_CMPL:
63633fe6 1549 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
a3251186 1550 tcg_gen_mov_tl(cpu_cc_srcT, cpu_T[0]);
63633fe6 1551 tcg_gen_sub_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
3ca51d07 1552 set_cc_op(s1, CC_OP_SUBB + ot);
2c0262af
FB
1553 break;
1554 }
b6abf97d
FB
1555}
1556
2c0262af
FB
1557/* if d == OR_TMP0, it means memory operand (address in A0) */
1558static void gen_inc(DisasContext *s1, int ot, int d, int c)
1559{
1560 if (d != OR_TMP0)
57fec1fe 1561 gen_op_mov_TN_reg(ot, 0, d);
2c0262af 1562 else
57fec1fe 1563 gen_op_ld_T0_A0(ot + s1->mem_index);
cc8b6f5b 1564 gen_compute_eflags_c(s1, cpu_cc_src);
2c0262af 1565 if (c > 0) {
b6abf97d 1566 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], 1);
3ca51d07 1567 set_cc_op(s1, CC_OP_INCB + ot);
2c0262af 1568 } else {
b6abf97d 1569 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], -1);
3ca51d07 1570 set_cc_op(s1, CC_OP_DECB + ot);
2c0262af
FB
1571 }
1572 if (d != OR_TMP0)
57fec1fe 1573 gen_op_mov_reg_T0(ot, d);
2c0262af 1574 else
57fec1fe 1575 gen_op_st_T0_A0(ot + s1->mem_index);
cd31fefa 1576 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
2c0262af
FB
1577}
1578
f437d0a3
RH
1579static void gen_shift_flags(DisasContext *s, int ot, TCGv result, TCGv shm1,
1580 TCGv count, bool is_right)
1581{
1582 TCGv_i32 z32, s32, oldop;
1583 TCGv z_tl;
1584
1585 /* Store the results into the CC variables. If we know that the
1586 variable must be dead, store unconditionally. Otherwise we'll
1587 need to not disrupt the current contents. */
1588 z_tl = tcg_const_tl(0);
1589 if (cc_op_live[s->cc_op] & USES_CC_DST) {
1590 tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_dst, count, z_tl,
1591 result, cpu_cc_dst);
1592 } else {
1593 tcg_gen_mov_tl(cpu_cc_dst, result);
1594 }
1595 if (cc_op_live[s->cc_op] & USES_CC_SRC) {
1596 tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_src, count, z_tl,
1597 shm1, cpu_cc_src);
1598 } else {
1599 tcg_gen_mov_tl(cpu_cc_src, shm1);
1600 }
1601 tcg_temp_free(z_tl);
1602
1603 /* Get the two potential CC_OP values into temporaries. */
1604 tcg_gen_movi_i32(cpu_tmp2_i32, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
1605 if (s->cc_op == CC_OP_DYNAMIC) {
1606 oldop = cpu_cc_op;
1607 } else {
1608 tcg_gen_movi_i32(cpu_tmp3_i32, s->cc_op);
1609 oldop = cpu_tmp3_i32;
1610 }
1611
1612 /* Conditionally store the CC_OP value. */
1613 z32 = tcg_const_i32(0);
1614 s32 = tcg_temp_new_i32();
1615 tcg_gen_trunc_tl_i32(s32, count);
1616 tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, s32, z32, cpu_tmp2_i32, oldop);
1617 tcg_temp_free_i32(z32);
1618 tcg_temp_free_i32(s32);
1619
1620 /* The CC_OP value is no longer predictable. */
1621 set_cc_op(s, CC_OP_DYNAMIC);
1622}
1623
b6abf97d
FB
1624static void gen_shift_rm_T1(DisasContext *s, int ot, int op1,
1625 int is_right, int is_arith)
2c0262af 1626{
a41f62f5 1627 target_ulong mask = (ot == OT_QUAD ? 0x3f : 0x1f);
3b46e624 1628
b6abf97d 1629 /* load */
82786041 1630 if (op1 == OR_TMP0) {
b6abf97d 1631 gen_op_ld_T0_A0(ot + s->mem_index);
82786041 1632 } else {
b6abf97d 1633 gen_op_mov_TN_reg(ot, 0, op1);
82786041 1634 }
b6abf97d 1635
a41f62f5
RH
1636 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], mask);
1637 tcg_gen_subi_tl(cpu_tmp0, cpu_T[1], 1);
b6abf97d
FB
1638
1639 if (is_right) {
1640 if (is_arith) {
f484d386 1641 gen_exts(ot, cpu_T[0]);
a41f62f5
RH
1642 tcg_gen_sar_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
1643 tcg_gen_sar_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
b6abf97d 1644 } else {
cad3a37d 1645 gen_extu(ot, cpu_T[0]);
a41f62f5
RH
1646 tcg_gen_shr_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
1647 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
b6abf97d
FB
1648 }
1649 } else {
a41f62f5
RH
1650 tcg_gen_shl_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
1651 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
b6abf97d
FB
1652 }
1653
1654 /* store */
82786041 1655 if (op1 == OR_TMP0) {
b6abf97d 1656 gen_op_st_T0_A0(ot + s->mem_index);
82786041 1657 } else {
b6abf97d 1658 gen_op_mov_reg_T0(ot, op1);
82786041
RH
1659 }
1660
f437d0a3 1661 gen_shift_flags(s, ot, cpu_T[0], cpu_tmp0, cpu_T[1], is_right);
b6abf97d
FB
1662}
1663
c1c37968
FB
1664static void gen_shift_rm_im(DisasContext *s, int ot, int op1, int op2,
1665 int is_right, int is_arith)
1666{
a41f62f5 1667 int mask = (ot == OT_QUAD ? 0x3f : 0x1f);
c1c37968
FB
1668
1669 /* load */
1670 if (op1 == OR_TMP0)
1671 gen_op_ld_T0_A0(ot + s->mem_index);
1672 else
1673 gen_op_mov_TN_reg(ot, 0, op1);
1674
1675 op2 &= mask;
1676 if (op2 != 0) {
1677 if (is_right) {
1678 if (is_arith) {
1679 gen_exts(ot, cpu_T[0]);
2a449d14 1680 tcg_gen_sari_tl(cpu_tmp4, cpu_T[0], op2 - 1);
c1c37968
FB
1681 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], op2);
1682 } else {
1683 gen_extu(ot, cpu_T[0]);
2a449d14 1684 tcg_gen_shri_tl(cpu_tmp4, cpu_T[0], op2 - 1);
c1c37968
FB
1685 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], op2);
1686 }
1687 } else {
2a449d14 1688 tcg_gen_shli_tl(cpu_tmp4, cpu_T[0], op2 - 1);
c1c37968
FB
1689 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], op2);
1690 }
1691 }
1692
1693 /* store */
1694 if (op1 == OR_TMP0)
1695 gen_op_st_T0_A0(ot + s->mem_index);
1696 else
1697 gen_op_mov_reg_T0(ot, op1);
1698
1699 /* update eflags if non zero shift */
1700 if (op2 != 0) {
2a449d14 1701 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
c1c37968 1702 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
3ca51d07 1703 set_cc_op(s, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
c1c37968
FB
1704 }
1705}
1706
b6abf97d
FB
1707static inline void tcg_gen_lshift(TCGv ret, TCGv arg1, target_long arg2)
1708{
1709 if (arg2 >= 0)
1710 tcg_gen_shli_tl(ret, arg1, arg2);
1711 else
1712 tcg_gen_shri_tl(ret, arg1, -arg2);
1713}
1714
34d80a55 1715static void gen_rot_rm_T1(DisasContext *s, int ot, int op1, int is_right)
b6abf97d 1716{
34d80a55
RH
1717 target_ulong mask = (ot == OT_QUAD ? 0x3f : 0x1f);
1718 TCGv_i32 t0, t1;
b6abf97d
FB
1719
1720 /* load */
1e4840bf 1721 if (op1 == OR_TMP0) {
34d80a55 1722 gen_op_ld_T0_A0(ot + s->mem_index);
1e4840bf 1723 } else {
34d80a55 1724 gen_op_mov_TN_reg(ot, 0, op1);
1e4840bf 1725 }
b6abf97d 1726
34d80a55 1727 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], mask);
b6abf97d 1728
34d80a55
RH
1729 switch (ot) {
1730 case OT_BYTE:
1731 /* Replicate the 8-bit input so that a 32-bit rotate works. */
1732 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
1733 tcg_gen_muli_tl(cpu_T[0], cpu_T[0], 0x01010101);
1734 goto do_long;
1735 case OT_WORD:
1736 /* Replicate the 16-bit input so that a 32-bit rotate works. */
1737 tcg_gen_deposit_tl(cpu_T[0], cpu_T[0], cpu_T[0], 16, 16);
1738 goto do_long;
1739 do_long:
1740#ifdef TARGET_X86_64
1741 case OT_LONG:
1742 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
1743 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
1744 if (is_right) {
1745 tcg_gen_rotr_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
1746 } else {
1747 tcg_gen_rotl_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
1748 }
1749 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
1750 break;
1751#endif
1752 default:
1753 if (is_right) {
1754 tcg_gen_rotr_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1755 } else {
1756 tcg_gen_rotl_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1757 }
1758 break;
b6abf97d 1759 }
b6abf97d 1760
b6abf97d 1761 /* store */
1e4840bf 1762 if (op1 == OR_TMP0) {
34d80a55 1763 gen_op_st_T0_A0(ot + s->mem_index);
1e4840bf 1764 } else {
34d80a55 1765 gen_op_mov_reg_T0(ot, op1);
1e4840bf 1766 }
b6abf97d 1767
34d80a55
RH
1768 /* We'll need the flags computed into CC_SRC. */
1769 gen_compute_eflags(s);
b6abf97d 1770
34d80a55
RH
1771 /* The value that was "rotated out" is now present at the other end
1772 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1773 since we've computed the flags into CC_SRC, these variables are
1774 currently dead. */
b6abf97d 1775 if (is_right) {
34d80a55
RH
1776 tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask - 1);
1777 tcg_gen_shri_tl(cpu_cc_dst, cpu_T[0], mask);
1778 } else {
1779 tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask);
1780 tcg_gen_andi_tl(cpu_cc_dst, cpu_T[0], 1);
b6abf97d 1781 }
34d80a55
RH
1782 tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1783 tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1784
1785 /* Now conditionally store the new CC_OP value. If the shift count
1786 is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live.
1787 Otherwise reuse CC_OP_ADCOX which have the C and O flags split out
1788 exactly as we computed above. */
1789 t0 = tcg_const_i32(0);
1790 t1 = tcg_temp_new_i32();
1791 tcg_gen_trunc_tl_i32(t1, cpu_T[1]);
1792 tcg_gen_movi_i32(cpu_tmp2_i32, CC_OP_ADCOX);
1793 tcg_gen_movi_i32(cpu_tmp3_i32, CC_OP_EFLAGS);
1794 tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, t1, t0,
1795 cpu_tmp2_i32, cpu_tmp3_i32);
1796 tcg_temp_free_i32(t0);
1797 tcg_temp_free_i32(t1);
1798
1799 /* The CC_OP value is no longer predictable. */
1800 set_cc_op(s, CC_OP_DYNAMIC);
b6abf97d
FB
1801}
1802
8cd6345d 1803static void gen_rot_rm_im(DisasContext *s, int ot, int op1, int op2,
1804 int is_right)
1805{
34d80a55
RH
1806 int mask = (ot == OT_QUAD ? 0x3f : 0x1f);
1807 int shift;
8cd6345d 1808
1809 /* load */
1810 if (op1 == OR_TMP0) {
34d80a55 1811 gen_op_ld_T0_A0(ot + s->mem_index);
8cd6345d 1812 } else {
34d80a55 1813 gen_op_mov_TN_reg(ot, 0, op1);
8cd6345d 1814 }
1815
8cd6345d 1816 op2 &= mask;
8cd6345d 1817 if (op2 != 0) {
34d80a55
RH
1818 switch (ot) {
1819#ifdef TARGET_X86_64
1820 case OT_LONG:
1821 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
1822 if (is_right) {
1823 tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, op2);
1824 } else {
1825 tcg_gen_rotli_i32(cpu_tmp2_i32, cpu_tmp2_i32, op2);
1826 }
1827 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
1828 break;
1829#endif
1830 default:
1831 if (is_right) {
1832 tcg_gen_rotri_tl(cpu_T[0], cpu_T[0], op2);
1833 } else {
1834 tcg_gen_rotli_tl(cpu_T[0], cpu_T[0], op2);
1835 }
1836 break;
1837 case OT_BYTE:
1838 mask = 7;
1839 goto do_shifts;
1840 case OT_WORD:
1841 mask = 15;
1842 do_shifts:
1843 shift = op2 & mask;
1844 if (is_right) {
1845 shift = mask + 1 - shift;
1846 }
1847 gen_extu(ot, cpu_T[0]);
1848 tcg_gen_shli_tl(cpu_tmp0, cpu_T[0], shift);
1849 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], mask + 1 - shift);
1850 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
1851 break;
8cd6345d 1852 }
8cd6345d 1853 }
1854
1855 /* store */
1856 if (op1 == OR_TMP0) {
34d80a55 1857 gen_op_st_T0_A0(ot + s->mem_index);
8cd6345d 1858 } else {
34d80a55 1859 gen_op_mov_reg_T0(ot, op1);
8cd6345d 1860 }
1861
1862 if (op2 != 0) {
34d80a55 1863 /* Compute the flags into CC_SRC. */
d229edce 1864 gen_compute_eflags(s);
0ff6addd 1865
34d80a55
RH
1866 /* The value that was "rotated out" is now present at the other end
1867 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1868 since we've computed the flags into CC_SRC, these variables are
1869 currently dead. */
8cd6345d 1870 if (is_right) {
34d80a55
RH
1871 tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask - 1);
1872 tcg_gen_shri_tl(cpu_cc_dst, cpu_T[0], mask);
1873 } else {
1874 tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask);
1875 tcg_gen_andi_tl(cpu_cc_dst, cpu_T[0], 1);
8cd6345d 1876 }
34d80a55
RH
1877 tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1878 tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1879 set_cc_op(s, CC_OP_ADCOX);
8cd6345d 1880 }
8cd6345d 1881}
1882
b6abf97d
FB
1883/* XXX: add faster immediate = 1 case */
1884static void gen_rotc_rm_T1(DisasContext *s, int ot, int op1,
1885 int is_right)
1886{
d229edce 1887 gen_compute_eflags(s);
c7b3c873 1888 assert(s->cc_op == CC_OP_EFLAGS);
b6abf97d
FB
1889
1890 /* load */
1891 if (op1 == OR_TMP0)
1892 gen_op_ld_T0_A0(ot + s->mem_index);
1893 else
1894 gen_op_mov_TN_reg(ot, 0, op1);
1895
a7812ae4
PB
1896 if (is_right) {
1897 switch (ot) {
93ab25d7 1898 case OT_BYTE:
7923057b
BS
1899 gen_helper_rcrb(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1900 break;
93ab25d7 1901 case OT_WORD:
7923057b
BS
1902 gen_helper_rcrw(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1903 break;
93ab25d7 1904 case OT_LONG:
7923057b
BS
1905 gen_helper_rcrl(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1906 break;
a7812ae4 1907#ifdef TARGET_X86_64
93ab25d7 1908 case OT_QUAD:
7923057b
BS
1909 gen_helper_rcrq(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1910 break;
a7812ae4
PB
1911#endif
1912 }
1913 } else {
1914 switch (ot) {
93ab25d7 1915 case OT_BYTE:
7923057b
BS
1916 gen_helper_rclb(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1917 break;
93ab25d7 1918 case OT_WORD:
7923057b
BS
1919 gen_helper_rclw(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1920 break;
93ab25d7 1921 case OT_LONG:
7923057b
BS
1922 gen_helper_rcll(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1923 break;
a7812ae4 1924#ifdef TARGET_X86_64
93ab25d7 1925 case OT_QUAD:
7923057b
BS
1926 gen_helper_rclq(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1927 break;
a7812ae4
PB
1928#endif
1929 }
1930 }
b6abf97d
FB
1931 /* store */
1932 if (op1 == OR_TMP0)
1933 gen_op_st_T0_A0(ot + s->mem_index);
1934 else
1935 gen_op_mov_reg_T0(ot, op1);
b6abf97d
FB
1936}
1937
1938/* XXX: add faster immediate case */
3b9d3cf1 1939static void gen_shiftd_rm_T1(DisasContext *s, int ot, int op1,
f437d0a3 1940 bool is_right, TCGv count_in)
b6abf97d 1941{
f437d0a3
RH
1942 target_ulong mask = (ot == OT_QUAD ? 63 : 31);
1943 TCGv count;
b6abf97d
FB
1944
1945 /* load */
1e4840bf 1946 if (op1 == OR_TMP0) {
f437d0a3 1947 gen_op_ld_T0_A0(ot + s->mem_index);
1e4840bf 1948 } else {
f437d0a3 1949 gen_op_mov_TN_reg(ot, 0, op1);
1e4840bf 1950 }
b6abf97d 1951
f437d0a3
RH
1952 count = tcg_temp_new();
1953 tcg_gen_andi_tl(count, count_in, mask);
1e4840bf 1954
f437d0a3
RH
1955 switch (ot) {
1956 case OT_WORD:
1957 /* Note: we implement the Intel behaviour for shift count > 16.
1958 This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A
1959 portion by constructing it as a 32-bit value. */
b6abf97d 1960 if (is_right) {
f437d0a3
RH
1961 tcg_gen_deposit_tl(cpu_tmp0, cpu_T[0], cpu_T[1], 16, 16);
1962 tcg_gen_mov_tl(cpu_T[1], cpu_T[0]);
1963 tcg_gen_mov_tl(cpu_T[0], cpu_tmp0);
b6abf97d 1964 } else {
f437d0a3 1965 tcg_gen_deposit_tl(cpu_T[1], cpu_T[0], cpu_T[1], 16, 16);
b6abf97d 1966 }
f437d0a3
RH
1967 /* FALLTHRU */
1968#ifdef TARGET_X86_64
1969 case OT_LONG:
1970 /* Concatenate the two 32-bit values and use a 64-bit shift. */
1971 tcg_gen_subi_tl(cpu_tmp0, count, 1);
b6abf97d 1972 if (is_right) {
f437d0a3
RH
1973 tcg_gen_concat_tl_i64(cpu_T[0], cpu_T[0], cpu_T[1]);
1974 tcg_gen_shr_i64(cpu_tmp0, cpu_T[0], cpu_tmp0);
1975 tcg_gen_shr_i64(cpu_T[0], cpu_T[0], count);
1976 } else {
1977 tcg_gen_concat_tl_i64(cpu_T[0], cpu_T[1], cpu_T[0]);
1978 tcg_gen_shl_i64(cpu_tmp0, cpu_T[0], cpu_tmp0);
1979 tcg_gen_shl_i64(cpu_T[0], cpu_T[0], count);
1980 tcg_gen_shri_i64(cpu_tmp0, cpu_tmp0, 32);
1981 tcg_gen_shri_i64(cpu_T[0], cpu_T[0], 32);
1982 }
1983 break;
1984#endif
1985 default:
1986 tcg_gen_subi_tl(cpu_tmp0, count, 1);
1987 if (is_right) {
1988 tcg_gen_shr_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
b6abf97d 1989
f437d0a3
RH
1990 tcg_gen_subfi_tl(cpu_tmp4, mask + 1, count);
1991 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], count);
1992 tcg_gen_shl_tl(cpu_T[1], cpu_T[1], cpu_tmp4);
b6abf97d 1993 } else {
f437d0a3
RH
1994 tcg_gen_shl_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
1995 if (ot == OT_WORD) {
1996 /* Only needed if count > 16, for Intel behaviour. */
1997 tcg_gen_subfi_tl(cpu_tmp4, 33, count);
1998 tcg_gen_shr_tl(cpu_tmp4, cpu_T[1], cpu_tmp4);
1999 tcg_gen_or_tl(cpu_tmp0, cpu_tmp0, cpu_tmp4);
2000 }
2001
2002 tcg_gen_subfi_tl(cpu_tmp4, mask + 1, count);
2003 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], count);
2004 tcg_gen_shr_tl(cpu_T[1], cpu_T[1], cpu_tmp4);
b6abf97d 2005 }
f437d0a3
RH
2006 tcg_gen_movi_tl(cpu_tmp4, 0);
2007 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_T[1], count, cpu_tmp4,
2008 cpu_tmp4, cpu_T[1]);
2009 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
2010 break;
b6abf97d 2011 }
b6abf97d 2012
b6abf97d 2013 /* store */
1e4840bf 2014 if (op1 == OR_TMP0) {
f437d0a3 2015 gen_op_st_T0_A0(ot + s->mem_index);
b6abf97d 2016 } else {
f437d0a3 2017 gen_op_mov_reg_T0(ot, op1);
b6abf97d 2018 }
1e4840bf 2019
f437d0a3
RH
2020 gen_shift_flags(s, ot, cpu_T[0], cpu_tmp0, count, is_right);
2021 tcg_temp_free(count);
b6abf97d
FB
2022}
2023
2024static void gen_shift(DisasContext *s1, int op, int ot, int d, int s)
2025{
2026 if (s != OR_TMP1)
2027 gen_op_mov_TN_reg(ot, 1, s);
2028 switch(op) {
2029 case OP_ROL:
2030 gen_rot_rm_T1(s1, ot, d, 0);
2031 break;
2032 case OP_ROR:
2033 gen_rot_rm_T1(s1, ot, d, 1);
2034 break;
2035 case OP_SHL:
2036 case OP_SHL1:
2037 gen_shift_rm_T1(s1, ot, d, 0, 0);
2038 break;
2039 case OP_SHR:
2040 gen_shift_rm_T1(s1, ot, d, 1, 0);
2041 break;
2042 case OP_SAR:
2043 gen_shift_rm_T1(s1, ot, d, 1, 1);
2044 break;
2045 case OP_RCL:
2046 gen_rotc_rm_T1(s1, ot, d, 0);
2047 break;
2048 case OP_RCR:
2049 gen_rotc_rm_T1(s1, ot, d, 1);
2050 break;
2051 }
2c0262af
FB
2052}
2053
2054static void gen_shifti(DisasContext *s1, int op, int ot, int d, int c)
2055{
c1c37968 2056 switch(op) {
8cd6345d 2057 case OP_ROL:
2058 gen_rot_rm_im(s1, ot, d, c, 0);
2059 break;
2060 case OP_ROR:
2061 gen_rot_rm_im(s1, ot, d, c, 1);
2062 break;
c1c37968
FB
2063 case OP_SHL:
2064 case OP_SHL1:
2065 gen_shift_rm_im(s1, ot, d, c, 0, 0);
2066 break;
2067 case OP_SHR:
2068 gen_shift_rm_im(s1, ot, d, c, 1, 0);
2069 break;
2070 case OP_SAR:
2071 gen_shift_rm_im(s1, ot, d, c, 1, 1);
2072 break;
2073 default:
2074 /* currently not optimized */
2075 gen_op_movl_T1_im(c);
2076 gen_shift(s1, op, ot, d, OR_TMP1);
2077 break;
2078 }
2c0262af
FB
2079}
2080
0af10c86
BS
2081static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm,
2082 int *reg_ptr, int *offset_ptr)
2c0262af 2083{
14ce26e7 2084 target_long disp;
2c0262af 2085 int havesib;
14ce26e7 2086 int base;
2c0262af
FB
2087 int index;
2088 int scale;
2089 int opreg;
2090 int mod, rm, code, override, must_add_seg;
2091
2092 override = s->override;
2093 must_add_seg = s->addseg;
2094 if (override >= 0)
2095 must_add_seg = 1;
2096 mod = (modrm >> 6) & 3;
2097 rm = modrm & 7;
2098
2099 if (s->aflag) {
2100
2101 havesib = 0;
2102 base = rm;
2103 index = 0;
2104 scale = 0;
3b46e624 2105
2c0262af
FB
2106 if (base == 4) {
2107 havesib = 1;
0af10c86 2108 code = cpu_ldub_code(env, s->pc++);
2c0262af 2109 scale = (code >> 6) & 3;
14ce26e7
FB
2110 index = ((code >> 3) & 7) | REX_X(s);
2111 base = (code & 7);
2c0262af 2112 }
14ce26e7 2113 base |= REX_B(s);
2c0262af
FB
2114
2115 switch (mod) {
2116 case 0:
14ce26e7 2117 if ((base & 7) == 5) {
2c0262af 2118 base = -1;
0af10c86 2119 disp = (int32_t)cpu_ldl_code(env, s->pc);
2c0262af 2120 s->pc += 4;
14ce26e7
FB
2121 if (CODE64(s) && !havesib) {
2122 disp += s->pc + s->rip_offset;
2123 }
2c0262af
FB
2124 } else {
2125 disp = 0;
2126 }
2127 break;
2128 case 1:
0af10c86 2129 disp = (int8_t)cpu_ldub_code(env, s->pc++);
2c0262af
FB
2130 break;
2131 default:
2132 case 2:
0af10c86 2133 disp = (int32_t)cpu_ldl_code(env, s->pc);
2c0262af
FB
2134 s->pc += 4;
2135 break;
2136 }
3b46e624 2137
2c0262af
FB
2138 if (base >= 0) {
2139 /* for correct popl handling with esp */
2140 if (base == 4 && s->popl_esp_hack)
2141 disp += s->popl_esp_hack;
14ce26e7
FB
2142#ifdef TARGET_X86_64
2143 if (s->aflag == 2) {
57fec1fe 2144 gen_op_movq_A0_reg(base);
14ce26e7 2145 if (disp != 0) {
57fec1fe 2146 gen_op_addq_A0_im(disp);
14ce26e7 2147 }
5fafdf24 2148 } else
14ce26e7
FB
2149#endif
2150 {
57fec1fe 2151 gen_op_movl_A0_reg(base);
14ce26e7
FB
2152 if (disp != 0)
2153 gen_op_addl_A0_im(disp);
2154 }
2c0262af 2155 } else {
14ce26e7
FB
2156#ifdef TARGET_X86_64
2157 if (s->aflag == 2) {
57fec1fe 2158 gen_op_movq_A0_im(disp);
5fafdf24 2159 } else
14ce26e7
FB
2160#endif
2161 {
2162 gen_op_movl_A0_im(disp);
2163 }
2c0262af 2164 }
b16f827b
AJ
2165 /* index == 4 means no index */
2166 if (havesib && (index != 4)) {
14ce26e7
FB
2167#ifdef TARGET_X86_64
2168 if (s->aflag == 2) {
57fec1fe 2169 gen_op_addq_A0_reg_sN(scale, index);
5fafdf24 2170 } else
14ce26e7
FB
2171#endif
2172 {
57fec1fe 2173 gen_op_addl_A0_reg_sN(scale, index);
14ce26e7 2174 }
2c0262af
FB
2175 }
2176 if (must_add_seg) {
2177 if (override < 0) {
2178 if (base == R_EBP || base == R_ESP)
2179 override = R_SS;
2180 else
2181 override = R_DS;
2182 }
14ce26e7
FB
2183#ifdef TARGET_X86_64
2184 if (s->aflag == 2) {
57fec1fe 2185 gen_op_addq_A0_seg(override);
5fafdf24 2186 } else
14ce26e7
FB
2187#endif
2188 {
7162ab21 2189 gen_op_addl_A0_seg(s, override);
14ce26e7 2190 }
2c0262af
FB
2191 }
2192 } else {
2193 switch (mod) {
2194 case 0:
2195 if (rm == 6) {
0af10c86 2196 disp = cpu_lduw_code(env, s->pc);
2c0262af
FB
2197 s->pc += 2;
2198 gen_op_movl_A0_im(disp);
2199 rm = 0; /* avoid SS override */
2200 goto no_rm;
2201 } else {
2202 disp = 0;
2203 }
2204 break;
2205 case 1:
0af10c86 2206 disp = (int8_t)cpu_ldub_code(env, s->pc++);
2c0262af
FB
2207 break;
2208 default:
2209 case 2:
0af10c86 2210 disp = cpu_lduw_code(env, s->pc);
2c0262af
FB
2211 s->pc += 2;
2212 break;
2213 }
2214 switch(rm) {
2215 case 0:
57fec1fe
FB
2216 gen_op_movl_A0_reg(R_EBX);
2217 gen_op_addl_A0_reg_sN(0, R_ESI);
2c0262af
FB
2218 break;
2219 case 1:
57fec1fe
FB
2220 gen_op_movl_A0_reg(R_EBX);
2221 gen_op_addl_A0_reg_sN(0, R_EDI);
2c0262af
FB
2222 break;
2223 case 2:
57fec1fe
FB
2224 gen_op_movl_A0_reg(R_EBP);
2225 gen_op_addl_A0_reg_sN(0, R_ESI);
2c0262af
FB
2226 break;
2227 case 3:
57fec1fe
FB
2228 gen_op_movl_A0_reg(R_EBP);
2229 gen_op_addl_A0_reg_sN(0, R_EDI);
2c0262af
FB
2230 break;
2231 case 4:
57fec1fe 2232 gen_op_movl_A0_reg(R_ESI);
2c0262af
FB
2233 break;
2234 case 5:
57fec1fe 2235 gen_op_movl_A0_reg(R_EDI);
2c0262af
FB
2236 break;
2237 case 6:
57fec1fe 2238 gen_op_movl_A0_reg(R_EBP);
2c0262af
FB
2239 break;
2240 default:
2241 case 7:
57fec1fe 2242 gen_op_movl_A0_reg(R_EBX);
2c0262af
FB
2243 break;
2244 }
2245 if (disp != 0)
2246 gen_op_addl_A0_im(disp);
2247 gen_op_andl_A0_ffff();
2248 no_rm:
2249 if (must_add_seg) {
2250 if (override < 0) {
2251 if (rm == 2 || rm == 3 || rm == 6)
2252 override = R_SS;
2253 else
2254 override = R_DS;
2255 }
7162ab21 2256 gen_op_addl_A0_seg(s, override);
2c0262af
FB
2257 }
2258 }
2259
2260 opreg = OR_A0;
2261 disp = 0;
2262 *reg_ptr = opreg;
2263 *offset_ptr = disp;
2264}
2265
0af10c86 2266static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm)
e17a36ce
FB
2267{
2268 int mod, rm, base, code;
2269
2270 mod = (modrm >> 6) & 3;
2271 if (mod == 3)
2272 return;
2273 rm = modrm & 7;
2274
2275 if (s->aflag) {
2276
2277 base = rm;
3b46e624 2278
e17a36ce 2279 if (base == 4) {
0af10c86 2280 code = cpu_ldub_code(env, s->pc++);
e17a36ce
FB
2281 base = (code & 7);
2282 }
3b46e624 2283
e17a36ce
FB
2284 switch (mod) {
2285 case 0:
2286 if (base == 5) {
2287 s->pc += 4;
2288 }
2289 break;
2290 case 1:
2291 s->pc++;
2292 break;
2293 default:
2294 case 2:
2295 s->pc += 4;
2296 break;
2297 }
2298 } else {
2299 switch (mod) {
2300 case 0:
2301 if (rm == 6) {
2302 s->pc += 2;
2303 }
2304 break;
2305 case 1:
2306 s->pc++;
2307 break;
2308 default:
2309 case 2:
2310 s->pc += 2;
2311 break;
2312 }
2313 }
2314}
2315
664e0f19
FB
2316/* used for LEA and MOV AX, mem */
2317static void gen_add_A0_ds_seg(DisasContext *s)
2318{
2319 int override, must_add_seg;
2320 must_add_seg = s->addseg;
2321 override = R_DS;
2322 if (s->override >= 0) {
2323 override = s->override;
2324 must_add_seg = 1;
664e0f19
FB
2325 }
2326 if (must_add_seg) {
8f091a59
FB
2327#ifdef TARGET_X86_64
2328 if (CODE64(s)) {
57fec1fe 2329 gen_op_addq_A0_seg(override);
5fafdf24 2330 } else
8f091a59
FB
2331#endif
2332 {
7162ab21 2333 gen_op_addl_A0_seg(s, override);
8f091a59 2334 }
664e0f19
FB
2335 }
2336}
2337
222a3336 2338/* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2c0262af 2339 OR_TMP0 */
0af10c86
BS
2340static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm,
2341 int ot, int reg, int is_store)
2c0262af
FB
2342{
2343 int mod, rm, opreg, disp;
2344
2345 mod = (modrm >> 6) & 3;
14ce26e7 2346 rm = (modrm & 7) | REX_B(s);
2c0262af
FB
2347 if (mod == 3) {
2348 if (is_store) {
2349 if (reg != OR_TMP0)
57fec1fe
FB
2350 gen_op_mov_TN_reg(ot, 0, reg);
2351 gen_op_mov_reg_T0(ot, rm);
2c0262af 2352 } else {
57fec1fe 2353 gen_op_mov_TN_reg(ot, 0, rm);
2c0262af 2354 if (reg != OR_TMP0)
57fec1fe 2355 gen_op_mov_reg_T0(ot, reg);
2c0262af
FB
2356 }
2357 } else {
0af10c86 2358 gen_lea_modrm(env, s, modrm, &opreg, &disp);
2c0262af
FB
2359 if (is_store) {
2360 if (reg != OR_TMP0)
57fec1fe
FB
2361 gen_op_mov_TN_reg(ot, 0, reg);
2362 gen_op_st_T0_A0(ot + s->mem_index);
2c0262af 2363 } else {
57fec1fe 2364 gen_op_ld_T0_A0(ot + s->mem_index);
2c0262af 2365 if (reg != OR_TMP0)
57fec1fe 2366 gen_op_mov_reg_T0(ot, reg);
2c0262af
FB
2367 }
2368 }
2369}
2370
0af10c86 2371static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, int ot)
2c0262af
FB
2372{
2373 uint32_t ret;
2374
2375 switch(ot) {
2376 case OT_BYTE:
0af10c86 2377 ret = cpu_ldub_code(env, s->pc);
2c0262af
FB
2378 s->pc++;
2379 break;
2380 case OT_WORD:
0af10c86 2381 ret = cpu_lduw_code(env, s->pc);
2c0262af
FB
2382 s->pc += 2;
2383 break;
2384 default:
2385 case OT_LONG:
0af10c86 2386 ret = cpu_ldl_code(env, s->pc);
2c0262af
FB
2387 s->pc += 4;
2388 break;
2389 }
2390 return ret;
2391}
2392
14ce26e7
FB
2393static inline int insn_const_size(unsigned int ot)
2394{
2395 if (ot <= OT_LONG)
2396 return 1 << ot;
2397 else
2398 return 4;
2399}
2400
6e256c93
FB
2401static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong eip)
2402{
2403 TranslationBlock *tb;
2404 target_ulong pc;
2405
2406 pc = s->cs_base + eip;
2407 tb = s->tb;
2408 /* NOTE: we handle the case where the TB spans two pages here */
2409 if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) ||
2410 (pc & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK)) {
2411 /* jump to same page: we can use a direct jump */
57fec1fe 2412 tcg_gen_goto_tb(tb_num);
6e256c93 2413 gen_jmp_im(eip);
4b4a72e5 2414 tcg_gen_exit_tb((tcg_target_long)tb + tb_num);
6e256c93
FB
2415 } else {
2416 /* jump to another page: currently not optimized */
2417 gen_jmp_im(eip);
2418 gen_eob(s);
2419 }
2420}
2421
5fafdf24 2422static inline void gen_jcc(DisasContext *s, int b,
14ce26e7 2423 target_ulong val, target_ulong next_eip)
2c0262af 2424{
b27fc131 2425 int l1, l2;
3b46e624 2426
2c0262af 2427 if (s->jmp_opt) {
14ce26e7 2428 l1 = gen_new_label();
b27fc131 2429 gen_jcc1(s, b, l1);
dc259201 2430
6e256c93 2431 gen_goto_tb(s, 0, next_eip);
14ce26e7
FB
2432
2433 gen_set_label(l1);
6e256c93 2434 gen_goto_tb(s, 1, val);
5779406a 2435 s->is_jmp = DISAS_TB_JUMP;
2c0262af 2436 } else {
14ce26e7
FB
2437 l1 = gen_new_label();
2438 l2 = gen_new_label();
b27fc131 2439 gen_jcc1(s, b, l1);
8e1c85e3 2440
14ce26e7 2441 gen_jmp_im(next_eip);
8e1c85e3
FB
2442 tcg_gen_br(l2);
2443
14ce26e7
FB
2444 gen_set_label(l1);
2445 gen_jmp_im(val);
2446 gen_set_label(l2);
2c0262af
FB
2447 gen_eob(s);
2448 }
2449}
2450
f32d3781
PB
2451static void gen_cmovcc1(CPUX86State *env, DisasContext *s, int ot, int b,
2452 int modrm, int reg)
2453{
57eb0cc8 2454 CCPrepare cc;
f32d3781 2455
57eb0cc8 2456 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
f32d3781 2457
57eb0cc8
RH
2458 cc = gen_prepare_cc(s, b, cpu_T[1]);
2459 if (cc.mask != -1) {
2460 TCGv t0 = tcg_temp_new();
2461 tcg_gen_andi_tl(t0, cc.reg, cc.mask);
2462 cc.reg = t0;
2463 }
2464 if (!cc.use_reg2) {
2465 cc.reg2 = tcg_const_tl(cc.imm);
f32d3781
PB
2466 }
2467
57eb0cc8
RH
2468 tcg_gen_movcond_tl(cc.cond, cpu_T[0], cc.reg, cc.reg2,
2469 cpu_T[0], cpu_regs[reg]);
2470 gen_op_mov_reg_T0(ot, reg);
2471
2472 if (cc.mask != -1) {
2473 tcg_temp_free(cc.reg);
2474 }
2475 if (!cc.use_reg2) {
2476 tcg_temp_free(cc.reg2);
2477 }
f32d3781
PB
2478}
2479
3bd7da9e
FB
2480static inline void gen_op_movl_T0_seg(int seg_reg)
2481{
2482 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
2483 offsetof(CPUX86State,segs[seg_reg].selector));
2484}
2485
2486static inline void gen_op_movl_seg_T0_vm(int seg_reg)
2487{
2488 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
2489 tcg_gen_st32_tl(cpu_T[0], cpu_env,
2490 offsetof(CPUX86State,segs[seg_reg].selector));
2491 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], 4);
2492 tcg_gen_st_tl(cpu_T[0], cpu_env,
2493 offsetof(CPUX86State,segs[seg_reg].base));
2494}
2495
2c0262af
FB
2496/* move T0 to seg_reg and compute if the CPU state may change. Never
2497 call this function with seg_reg == R_CS */
14ce26e7 2498static void gen_movl_seg_T0(DisasContext *s, int seg_reg, target_ulong cur_eip)
2c0262af 2499{
3415a4dd
FB
2500 if (s->pe && !s->vm86) {
2501 /* XXX: optimize by finding processor state dynamically */
773cdfcc 2502 gen_update_cc_op(s);
14ce26e7 2503 gen_jmp_im(cur_eip);
b6abf97d 2504 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2999a0b2 2505 gen_helper_load_seg(cpu_env, tcg_const_i32(seg_reg), cpu_tmp2_i32);
dc196a57
FB
2506 /* abort translation because the addseg value may change or
2507 because ss32 may change. For R_SS, translation must always
2508 stop as a special handling must be done to disable hardware
2509 interrupts for the next instruction */
2510 if (seg_reg == R_SS || (s->code32 && seg_reg < R_FS))
5779406a 2511 s->is_jmp = DISAS_TB_JUMP;
3415a4dd 2512 } else {
3bd7da9e 2513 gen_op_movl_seg_T0_vm(seg_reg);
dc196a57 2514 if (seg_reg == R_SS)
5779406a 2515 s->is_jmp = DISAS_TB_JUMP;
3415a4dd 2516 }
2c0262af
FB
2517}
2518
0573fbfc
TS
2519static inline int svm_is_rep(int prefixes)
2520{
2521 return ((prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) ? 8 : 0);
2522}
2523
872929aa 2524static inline void
0573fbfc 2525gen_svm_check_intercept_param(DisasContext *s, target_ulong pc_start,
b8b6a50b 2526 uint32_t type, uint64_t param)
0573fbfc 2527{
872929aa
FB
2528 /* no SVM activated; fast case */
2529 if (likely(!(s->flags & HF_SVMI_MASK)))
2530 return;
773cdfcc 2531 gen_update_cc_op(s);
872929aa 2532 gen_jmp_im(pc_start - s->cs_base);
052e80d5 2533 gen_helper_svm_check_intercept_param(cpu_env, tcg_const_i32(type),
a7812ae4 2534 tcg_const_i64(param));
0573fbfc
TS
2535}
2536
872929aa 2537static inline void
0573fbfc
TS
2538gen_svm_check_intercept(DisasContext *s, target_ulong pc_start, uint64_t type)
2539{
872929aa 2540 gen_svm_check_intercept_param(s, pc_start, type, 0);
0573fbfc
TS
2541}
2542
4f31916f
FB
2543static inline void gen_stack_update(DisasContext *s, int addend)
2544{
14ce26e7
FB
2545#ifdef TARGET_X86_64
2546 if (CODE64(s)) {
6e0d8677 2547 gen_op_add_reg_im(2, R_ESP, addend);
14ce26e7
FB
2548 } else
2549#endif
4f31916f 2550 if (s->ss32) {
6e0d8677 2551 gen_op_add_reg_im(1, R_ESP, addend);
4f31916f 2552 } else {
6e0d8677 2553 gen_op_add_reg_im(0, R_ESP, addend);
4f31916f
FB
2554 }
2555}
2556
2c0262af
FB
2557/* generate a push. It depends on ss32, addseg and dflag */
2558static void gen_push_T0(DisasContext *s)
2559{
14ce26e7
FB
2560#ifdef TARGET_X86_64
2561 if (CODE64(s)) {
57fec1fe 2562 gen_op_movq_A0_reg(R_ESP);
8f091a59 2563 if (s->dflag) {
57fec1fe
FB
2564 gen_op_addq_A0_im(-8);
2565 gen_op_st_T0_A0(OT_QUAD + s->mem_index);
8f091a59 2566 } else {
57fec1fe
FB
2567 gen_op_addq_A0_im(-2);
2568 gen_op_st_T0_A0(OT_WORD + s->mem_index);
8f091a59 2569 }
57fec1fe 2570 gen_op_mov_reg_A0(2, R_ESP);
5fafdf24 2571 } else
14ce26e7
FB
2572#endif
2573 {
57fec1fe 2574 gen_op_movl_A0_reg(R_ESP);
14ce26e7 2575 if (!s->dflag)
57fec1fe 2576 gen_op_addl_A0_im(-2);
14ce26e7 2577 else
57fec1fe 2578 gen_op_addl_A0_im(-4);
14ce26e7
FB
2579 if (s->ss32) {
2580 if (s->addseg) {
bbf662ee 2581 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
7162ab21 2582 gen_op_addl_A0_seg(s, R_SS);
14ce26e7
FB
2583 }
2584 } else {
2585 gen_op_andl_A0_ffff();
bbf662ee 2586 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
7162ab21 2587 gen_op_addl_A0_seg(s, R_SS);
2c0262af 2588 }
57fec1fe 2589 gen_op_st_T0_A0(s->dflag + 1 + s->mem_index);
14ce26e7 2590 if (s->ss32 && !s->addseg)
57fec1fe 2591 gen_op_mov_reg_A0(1, R_ESP);
14ce26e7 2592 else
57fec1fe 2593 gen_op_mov_reg_T1(s->ss32 + 1, R_ESP);
2c0262af
FB
2594 }
2595}
2596
4f31916f
FB
2597/* generate a push. It depends on ss32, addseg and dflag */
2598/* slower version for T1, only used for call Ev */
2599static void gen_push_T1(DisasContext *s)
2c0262af 2600{
14ce26e7
FB
2601#ifdef TARGET_X86_64
2602 if (CODE64(s)) {
57fec1fe 2603 gen_op_movq_A0_reg(R_ESP);
8f091a59 2604 if (s->dflag) {
57fec1fe
FB
2605 gen_op_addq_A0_im(-8);
2606 gen_op_st_T1_A0(OT_QUAD + s->mem_index);
8f091a59 2607 } else {
57fec1fe
FB
2608 gen_op_addq_A0_im(-2);
2609 gen_op_st_T0_A0(OT_WORD + s->mem_index);
8f091a59 2610 }
57fec1fe 2611 gen_op_mov_reg_A0(2, R_ESP);
5fafdf24 2612 } else
14ce26e7
FB
2613#endif
2614 {
57fec1fe 2615 gen_op_movl_A0_reg(R_ESP);
14ce26e7 2616 if (!s->dflag)
57fec1fe 2617 gen_op_addl_A0_im(-2);
14ce26e7 2618 else
57fec1fe 2619 gen_op_addl_A0_im(-4);
14ce26e7
FB
2620 if (s->ss32) {
2621 if (s->addseg) {
7162ab21 2622 gen_op_addl_A0_seg(s, R_SS);
14ce26e7
FB
2623 }
2624 } else {
2625 gen_op_andl_A0_ffff();
7162ab21 2626 gen_op_addl_A0_seg(s, R_SS);
2c0262af 2627 }
57fec1fe 2628 gen_op_st_T1_A0(s->dflag + 1 + s->mem_index);
3b46e624 2629
14ce26e7 2630 if (s->ss32 && !s->addseg)
57fec1fe 2631 gen_op_mov_reg_A0(1, R_ESP);
14ce26e7
FB
2632 else
2633 gen_stack_update(s, (-2) << s->dflag);
2c0262af
FB
2634 }
2635}
2636
4f31916f
FB
2637/* two step pop is necessary for precise exceptions */
2638static void gen_pop_T0(DisasContext *s)
2c0262af 2639{
14ce26e7
FB
2640#ifdef TARGET_X86_64
2641 if (CODE64(s)) {
57fec1fe
FB
2642 gen_op_movq_A0_reg(R_ESP);
2643 gen_op_ld_T0_A0((s->dflag ? OT_QUAD : OT_WORD) + s->mem_index);
5fafdf24 2644 } else
14ce26e7
FB
2645#endif
2646 {
57fec1fe 2647 gen_op_movl_A0_reg(R_ESP);
14ce26e7
FB
2648 if (s->ss32) {
2649 if (s->addseg)
7162ab21 2650 gen_op_addl_A0_seg(s, R_SS);
14ce26e7
FB
2651 } else {
2652 gen_op_andl_A0_ffff();
7162ab21 2653 gen_op_addl_A0_seg(s, R_SS);
14ce26e7 2654 }
57fec1fe 2655 gen_op_ld_T0_A0(s->dflag + 1 + s->mem_index);
2c0262af
FB
2656 }
2657}
2658
2659static void gen_pop_update(DisasContext *s)
2660{
14ce26e7 2661#ifdef TARGET_X86_64
8f091a59 2662 if (CODE64(s) && s->dflag) {
14ce26e7
FB
2663 gen_stack_update(s, 8);
2664 } else
2665#endif
2666 {
2667 gen_stack_update(s, 2 << s->dflag);
2668 }
2c0262af
FB
2669}
2670
2671static void gen_stack_A0(DisasContext *s)
2672{
57fec1fe 2673 gen_op_movl_A0_reg(R_ESP);
2c0262af
FB
2674 if (!s->ss32)
2675 gen_op_andl_A0_ffff();
bbf662ee 2676 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2c0262af 2677 if (s->addseg)
7162ab21 2678 gen_op_addl_A0_seg(s, R_SS);
2c0262af
FB
2679}
2680
2681/* NOTE: wrap around in 16 bit not fully handled */
2682static void gen_pusha(DisasContext *s)
2683{
2684 int i;
57fec1fe 2685 gen_op_movl_A0_reg(R_ESP);
2c0262af
FB
2686 gen_op_addl_A0_im(-16 << s->dflag);
2687 if (!s->ss32)
2688 gen_op_andl_A0_ffff();
bbf662ee 2689 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2c0262af 2690 if (s->addseg)
7162ab21 2691 gen_op_addl_A0_seg(s, R_SS);
2c0262af 2692 for(i = 0;i < 8; i++) {
57fec1fe
FB
2693 gen_op_mov_TN_reg(OT_LONG, 0, 7 - i);
2694 gen_op_st_T0_A0(OT_WORD + s->dflag + s->mem_index);
2c0262af
FB
2695 gen_op_addl_A0_im(2 << s->dflag);
2696 }
57fec1fe 2697 gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2c0262af
FB
2698}
2699
2700/* NOTE: wrap around in 16 bit not fully handled */
2701static void gen_popa(DisasContext *s)
2702{
2703 int i;
57fec1fe 2704 gen_op_movl_A0_reg(R_ESP);
2c0262af
FB
2705 if (!s->ss32)
2706 gen_op_andl_A0_ffff();
bbf662ee
FB
2707 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2708 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], 16 << s->dflag);
2c0262af 2709 if (s->addseg)
7162ab21 2710 gen_op_addl_A0_seg(s, R_SS);
2c0262af
FB
2711 for(i = 0;i < 8; i++) {
2712 /* ESP is not reloaded */
2713 if (i != 3) {
57fec1fe
FB
2714 gen_op_ld_T0_A0(OT_WORD + s->dflag + s->mem_index);
2715 gen_op_mov_reg_T0(OT_WORD + s->dflag, 7 - i);
2c0262af
FB
2716 }
2717 gen_op_addl_A0_im(2 << s->dflag);
2718 }
57fec1fe 2719 gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2c0262af
FB
2720}
2721
2c0262af
FB
2722static void gen_enter(DisasContext *s, int esp_addend, int level)
2723{
61a8c4ec 2724 int ot, opsize;
2c0262af 2725
2c0262af 2726 level &= 0x1f;
8f091a59
FB
2727#ifdef TARGET_X86_64
2728 if (CODE64(s)) {
2729 ot = s->dflag ? OT_QUAD : OT_WORD;
2730 opsize = 1 << ot;
3b46e624 2731
57fec1fe 2732 gen_op_movl_A0_reg(R_ESP);
8f091a59 2733 gen_op_addq_A0_im(-opsize);
bbf662ee 2734 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
8f091a59
FB
2735
2736 /* push bp */
57fec1fe
FB
2737 gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
2738 gen_op_st_T0_A0(ot + s->mem_index);
8f091a59 2739 if (level) {
b5b38f61 2740 /* XXX: must save state */
2999a0b2 2741 gen_helper_enter64_level(cpu_env, tcg_const_i32(level),
a7812ae4
PB
2742 tcg_const_i32((ot == OT_QUAD)),
2743 cpu_T[1]);
8f091a59 2744 }
57fec1fe 2745 gen_op_mov_reg_T1(ot, R_EBP);
bbf662ee 2746 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
57fec1fe 2747 gen_op_mov_reg_T1(OT_QUAD, R_ESP);
5fafdf24 2748 } else
8f091a59
FB
2749#endif
2750 {
2751 ot = s->dflag + OT_WORD;
2752 opsize = 2 << s->dflag;
3b46e624 2753
57fec1fe 2754 gen_op_movl_A0_reg(R_ESP);
8f091a59
FB
2755 gen_op_addl_A0_im(-opsize);
2756 if (!s->ss32)
2757 gen_op_andl_A0_ffff();
bbf662ee 2758 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
8f091a59 2759 if (s->addseg)
7162ab21 2760 gen_op_addl_A0_seg(s, R_SS);
8f091a59 2761 /* push bp */
57fec1fe
FB
2762 gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
2763 gen_op_st_T0_A0(ot + s->mem_index);
8f091a59 2764 if (level) {
b5b38f61 2765 /* XXX: must save state */
2999a0b2 2766 gen_helper_enter_level(cpu_env, tcg_const_i32(level),
a7812ae4
PB
2767 tcg_const_i32(s->dflag),
2768 cpu_T[1]);
8f091a59 2769 }
57fec1fe 2770 gen_op_mov_reg_T1(ot, R_EBP);
bbf662ee 2771 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
57fec1fe 2772 gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2c0262af 2773 }
2c0262af
FB
2774}
2775
14ce26e7 2776static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip)
2c0262af 2777{
773cdfcc 2778 gen_update_cc_op(s);
14ce26e7 2779 gen_jmp_im(cur_eip);
77b2bc2c 2780 gen_helper_raise_exception(cpu_env, tcg_const_i32(trapno));
5779406a 2781 s->is_jmp = DISAS_TB_JUMP;
2c0262af
FB
2782}
2783
2784/* an interrupt is different from an exception because of the
7f75ffd3 2785 privilege checks */
5fafdf24 2786static void gen_interrupt(DisasContext *s, int intno,
14ce26e7 2787 target_ulong cur_eip, target_ulong next_eip)
2c0262af 2788{
773cdfcc 2789 gen_update_cc_op(s);
14ce26e7 2790 gen_jmp_im(cur_eip);
77b2bc2c 2791 gen_helper_raise_interrupt(cpu_env, tcg_const_i32(intno),
a7812ae4 2792 tcg_const_i32(next_eip - cur_eip));
5779406a 2793 s->is_jmp = DISAS_TB_JUMP;
2c0262af
FB
2794}
2795
14ce26e7 2796static void gen_debug(DisasContext *s, target_ulong cur_eip)
2c0262af 2797{
773cdfcc 2798 gen_update_cc_op(s);
14ce26e7 2799 gen_jmp_im(cur_eip);
4a7443be 2800 gen_helper_debug(cpu_env);
5779406a 2801 s->is_jmp = DISAS_TB_JUMP;
2c0262af
FB
2802}
2803
2804/* generate a generic end of block. Trace exception is also generated
2805 if needed */
2806static void gen_eob(DisasContext *s)
2807{
773cdfcc 2808 gen_update_cc_op(s);
a2cc3b24 2809 if (s->tb->flags & HF_INHIBIT_IRQ_MASK) {
f0967a1a 2810 gen_helper_reset_inhibit_irq(cpu_env);
a2cc3b24 2811 }
a2397807 2812 if (s->tb->flags & HF_RF_MASK) {
f0967a1a 2813 gen_helper_reset_rf(cpu_env);
a2397807 2814 }
34865134 2815 if (s->singlestep_enabled) {
4a7443be 2816 gen_helper_debug(cpu_env);
34865134 2817 } else if (s->tf) {
4a7443be 2818 gen_helper_single_step(cpu_env);
2c0262af 2819 } else {
57fec1fe 2820 tcg_gen_exit_tb(0);
2c0262af 2821 }
5779406a 2822 s->is_jmp = DISAS_TB_JUMP;
2c0262af
FB
2823}
2824
2825/* generate a jump to eip. No segment change must happen before as a
2826 direct call to the next block may occur */
14ce26e7 2827static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num)
2c0262af 2828{
a3251186
RH
2829 gen_update_cc_op(s);
2830 set_cc_op(s, CC_OP_DYNAMIC);
2c0262af 2831 if (s->jmp_opt) {
6e256c93 2832 gen_goto_tb(s, tb_num, eip);
5779406a 2833 s->is_jmp = DISAS_TB_JUMP;
2c0262af 2834 } else {
14ce26e7 2835 gen_jmp_im(eip);
2c0262af
FB
2836 gen_eob(s);
2837 }
2838}
2839
14ce26e7
FB
2840static void gen_jmp(DisasContext *s, target_ulong eip)
2841{
2842 gen_jmp_tb(s, eip, 0);
2843}
2844
8686c490
FB
2845static inline void gen_ldq_env_A0(int idx, int offset)
2846{
2847 int mem_index = (idx >> 2) - 1;
b6abf97d
FB
2848 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, mem_index);
2849 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset);
8686c490 2850}
664e0f19 2851
8686c490
FB
2852static inline void gen_stq_env_A0(int idx, int offset)
2853{
2854 int mem_index = (idx >> 2) - 1;
b6abf97d
FB
2855 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset);
2856 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, mem_index);
8686c490 2857}
664e0f19 2858
8686c490
FB
2859static inline void gen_ldo_env_A0(int idx, int offset)
2860{
2861 int mem_index = (idx >> 2) - 1;
b6abf97d
FB
2862 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, mem_index);
2863 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
8686c490 2864 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
b6abf97d
FB
2865 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_tmp0, mem_index);
2866 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
8686c490 2867}
14ce26e7 2868
8686c490
FB
2869static inline void gen_sto_env_A0(int idx, int offset)
2870{
2871 int mem_index = (idx >> 2) - 1;
b6abf97d
FB
2872 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
2873 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, mem_index);
8686c490 2874 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
b6abf97d
FB
2875 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
2876 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_tmp0, mem_index);
8686c490 2877}
14ce26e7 2878
5af45186
FB
2879static inline void gen_op_movo(int d_offset, int s_offset)
2880{
b6abf97d
FB
2881 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2882 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2883 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + 8);
2884 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + 8);
5af45186
FB
2885}
2886
2887static inline void gen_op_movq(int d_offset, int s_offset)
2888{
b6abf97d
FB
2889 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2890 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
5af45186
FB
2891}
2892
2893static inline void gen_op_movl(int d_offset, int s_offset)
2894{
b6abf97d
FB
2895 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env, s_offset);
2896 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, d_offset);
5af45186
FB
2897}
2898
2899static inline void gen_op_movq_env_0(int d_offset)
2900{
b6abf97d
FB
2901 tcg_gen_movi_i64(cpu_tmp1_i64, 0);
2902 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
5af45186 2903}
664e0f19 2904
d3eb5eae
BS
2905typedef void (*SSEFunc_i_ep)(TCGv_i32 val, TCGv_ptr env, TCGv_ptr reg);
2906typedef void (*SSEFunc_l_ep)(TCGv_i64 val, TCGv_ptr env, TCGv_ptr reg);
2907typedef void (*SSEFunc_0_epi)(TCGv_ptr env, TCGv_ptr reg, TCGv_i32 val);
2908typedef void (*SSEFunc_0_epl)(TCGv_ptr env, TCGv_ptr reg, TCGv_i64 val);
2909typedef void (*SSEFunc_0_epp)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b);
2910typedef void (*SSEFunc_0_eppi)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
2911 TCGv_i32 val);
c4baa050 2912typedef void (*SSEFunc_0_ppi)(TCGv_ptr reg_a, TCGv_ptr reg_b, TCGv_i32 val);
d3eb5eae
BS
2913typedef void (*SSEFunc_0_eppt)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
2914 TCGv val);
c4baa050 2915
5af45186
FB
2916#define SSE_SPECIAL ((void *)1)
2917#define SSE_DUMMY ((void *)2)
664e0f19 2918
a7812ae4
PB
2919#define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
2920#define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
2921 gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
5af45186 2922
d3eb5eae 2923static const SSEFunc_0_epp sse_op_table1[256][4] = {
a35f3ec7
AJ
2924 /* 3DNow! extensions */
2925 [0x0e] = { SSE_DUMMY }, /* femms */
2926 [0x0f] = { SSE_DUMMY }, /* pf... */
664e0f19
FB
2927 /* pure SSE operations */
2928 [0x10] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2929 [0x11] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
465e9838 2930 [0x12] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd, movsldup, movddup */
664e0f19 2931 [0x13] = { SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd */
a7812ae4
PB
2932 [0x14] = { gen_helper_punpckldq_xmm, gen_helper_punpcklqdq_xmm },
2933 [0x15] = { gen_helper_punpckhdq_xmm, gen_helper_punpckhqdq_xmm },
664e0f19
FB
2934 [0x16] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd, movshdup */
2935 [0x17] = { SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd */
2936
2937 [0x28] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2938 [0x29] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2939 [0x2a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
d9f4bb27 2940 [0x2b] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movntps, movntpd, movntss, movntsd */
664e0f19
FB
2941 [0x2c] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
2942 [0x2d] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
a7812ae4
PB
2943 [0x2e] = { gen_helper_ucomiss, gen_helper_ucomisd },
2944 [0x2f] = { gen_helper_comiss, gen_helper_comisd },
664e0f19
FB
2945 [0x50] = { SSE_SPECIAL, SSE_SPECIAL }, /* movmskps, movmskpd */
2946 [0x51] = SSE_FOP(sqrt),
a7812ae4
PB
2947 [0x52] = { gen_helper_rsqrtps, NULL, gen_helper_rsqrtss, NULL },
2948 [0x53] = { gen_helper_rcpps, NULL, gen_helper_rcpss, NULL },
2949 [0x54] = { gen_helper_pand_xmm, gen_helper_pand_xmm }, /* andps, andpd */
2950 [0x55] = { gen_helper_pandn_xmm, gen_helper_pandn_xmm }, /* andnps, andnpd */
2951 [0x56] = { gen_helper_por_xmm, gen_helper_por_xmm }, /* orps, orpd */
2952 [0x57] = { gen_helper_pxor_xmm, gen_helper_pxor_xmm }, /* xorps, xorpd */
664e0f19
FB
2953 [0x58] = SSE_FOP(add),
2954 [0x59] = SSE_FOP(mul),
a7812ae4
PB
2955 [0x5a] = { gen_helper_cvtps2pd, gen_helper_cvtpd2ps,
2956 gen_helper_cvtss2sd, gen_helper_cvtsd2ss },
2957 [0x5b] = { gen_helper_cvtdq2ps, gen_helper_cvtps2dq, gen_helper_cvttps2dq },
664e0f19
FB
2958 [0x5c] = SSE_FOP(sub),
2959 [0x5d] = SSE_FOP(min),
2960 [0x5e] = SSE_FOP(div),
2961 [0x5f] = SSE_FOP(max),
2962
2963 [0xc2] = SSE_FOP(cmpeq),
d3eb5eae
BS
2964 [0xc6] = { (SSEFunc_0_epp)gen_helper_shufps,
2965 (SSEFunc_0_epp)gen_helper_shufpd }, /* XXX: casts */
664e0f19 2966
7073fbad
RH
2967 /* SSSE3, SSE4, MOVBE, CRC32, BMI1, BMI2, ADX. */
2968 [0x38] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2969 [0x3a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
4242b1bd 2970
664e0f19
FB
2971 /* MMX ops and their SSE extensions */
2972 [0x60] = MMX_OP2(punpcklbw),
2973 [0x61] = MMX_OP2(punpcklwd),
2974 [0x62] = MMX_OP2(punpckldq),
2975 [0x63] = MMX_OP2(packsswb),
2976 [0x64] = MMX_OP2(pcmpgtb),
2977 [0x65] = MMX_OP2(pcmpgtw),
2978 [0x66] = MMX_OP2(pcmpgtl),
2979 [0x67] = MMX_OP2(packuswb),
2980 [0x68] = MMX_OP2(punpckhbw),
2981 [0x69] = MMX_OP2(punpckhwd),
2982 [0x6a] = MMX_OP2(punpckhdq),
2983 [0x6b] = MMX_OP2(packssdw),
a7812ae4
PB
2984 [0x6c] = { NULL, gen_helper_punpcklqdq_xmm },
2985 [0x6d] = { NULL, gen_helper_punpckhqdq_xmm },
664e0f19
FB
2986 [0x6e] = { SSE_SPECIAL, SSE_SPECIAL }, /* movd mm, ea */
2987 [0x6f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, , movqdu */
d3eb5eae
BS
2988 [0x70] = { (SSEFunc_0_epp)gen_helper_pshufw_mmx,
2989 (SSEFunc_0_epp)gen_helper_pshufd_xmm,
2990 (SSEFunc_0_epp)gen_helper_pshufhw_xmm,
2991 (SSEFunc_0_epp)gen_helper_pshuflw_xmm }, /* XXX: casts */
664e0f19
FB
2992 [0x71] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftw */
2993 [0x72] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftd */
2994 [0x73] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftq */
2995 [0x74] = MMX_OP2(pcmpeqb),
2996 [0x75] = MMX_OP2(pcmpeqw),
2997 [0x76] = MMX_OP2(pcmpeql),
a35f3ec7 2998 [0x77] = { SSE_DUMMY }, /* emms */
d9f4bb27
AP
2999 [0x78] = { NULL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* extrq_i, insertq_i */
3000 [0x79] = { NULL, gen_helper_extrq_r, NULL, gen_helper_insertq_r },
a7812ae4
PB
3001 [0x7c] = { NULL, gen_helper_haddpd, NULL, gen_helper_haddps },
3002 [0x7d] = { NULL, gen_helper_hsubpd, NULL, gen_helper_hsubps },
664e0f19
FB
3003 [0x7e] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movd, movd, , movq */
3004 [0x7f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, movdqu */
3005 [0xc4] = { SSE_SPECIAL, SSE_SPECIAL }, /* pinsrw */
3006 [0xc5] = { SSE_SPECIAL, SSE_SPECIAL }, /* pextrw */
a7812ae4 3007 [0xd0] = { NULL, gen_helper_addsubpd, NULL, gen_helper_addsubps },
664e0f19
FB
3008 [0xd1] = MMX_OP2(psrlw),
3009 [0xd2] = MMX_OP2(psrld),
3010 [0xd3] = MMX_OP2(psrlq),
3011 [0xd4] = MMX_OP2(paddq),
3012 [0xd5] = MMX_OP2(pmullw),
3013 [0xd6] = { NULL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
3014 [0xd7] = { SSE_SPECIAL, SSE_SPECIAL }, /* pmovmskb */
3015 [0xd8] = MMX_OP2(psubusb),
3016 [0xd9] = MMX_OP2(psubusw),
3017 [0xda] = MMX_OP2(pminub),
3018 [0xdb] = MMX_OP2(pand),
3019 [0xdc] = MMX_OP2(paddusb),
3020 [0xdd] = MMX_OP2(paddusw),
3021 [0xde] = MMX_OP2(pmaxub),
3022 [0xdf] = MMX_OP2(pandn),
3023 [0xe0] = MMX_OP2(pavgb),
3024 [0xe1] = MMX_OP2(psraw),
3025 [0xe2] = MMX_OP2(psrad),
3026 [0xe3] = MMX_OP2(pavgw),
3027 [0xe4] = MMX_OP2(pmulhuw),
3028 [0xe5] = MMX_OP2(pmulhw),
a7812ae4 3029 [0xe6] = { NULL, gen_helper_cvttpd2dq, gen_helper_cvtdq2pd, gen_helper_cvtpd2dq },
664e0f19
FB
3030 [0xe7] = { SSE_SPECIAL , SSE_SPECIAL }, /* movntq, movntq */
3031 [0xe8] = MMX_OP2(psubsb),
3032 [0xe9] = MMX_OP2(psubsw),
3033 [0xea] = MMX_OP2(pminsw),
3034 [0xeb] = MMX_OP2(por),
3035 [0xec] = MMX_OP2(paddsb),
3036 [0xed] = MMX_OP2(paddsw),
3037 [0xee] = MMX_OP2(pmaxsw),
3038 [0xef] = MMX_OP2(pxor),
465e9838 3039 [0xf0] = { NULL, NULL, NULL, SSE_SPECIAL }, /* lddqu */
664e0f19
FB
3040 [0xf1] = MMX_OP2(psllw),
3041 [0xf2] = MMX_OP2(pslld),
3042 [0xf3] = MMX_OP2(psllq),
3043 [0xf4] = MMX_OP2(pmuludq),
3044 [0xf5] = MMX_OP2(pmaddwd),
3045 [0xf6] = MMX_OP2(psadbw),
d3eb5eae
BS
3046 [0xf7] = { (SSEFunc_0_epp)gen_helper_maskmov_mmx,
3047 (SSEFunc_0_epp)gen_helper_maskmov_xmm }, /* XXX: casts */
664e0f19
FB
3048 [0xf8] = MMX_OP2(psubb),
3049 [0xf9] = MMX_OP2(psubw),
3050 [0xfa] = MMX_OP2(psubl),
3051 [0xfb] = MMX_OP2(psubq),
3052 [0xfc] = MMX_OP2(paddb),
3053 [0xfd] = MMX_OP2(paddw),
3054 [0xfe] = MMX_OP2(paddl),
3055};
3056
d3eb5eae 3057static const SSEFunc_0_epp sse_op_table2[3 * 8][2] = {
664e0f19
FB
3058 [0 + 2] = MMX_OP2(psrlw),
3059 [0 + 4] = MMX_OP2(psraw),
3060 [0 + 6] = MMX_OP2(psllw),
3061 [8 + 2] = MMX_OP2(psrld),
3062 [8 + 4] = MMX_OP2(psrad),
3063 [8 + 6] = MMX_OP2(pslld),
3064 [16 + 2] = MMX_OP2(psrlq),
a7812ae4 3065 [16 + 3] = { NULL, gen_helper_psrldq_xmm },
664e0f19 3066 [16 + 6] = MMX_OP2(psllq),
a7812ae4 3067 [16 + 7] = { NULL, gen_helper_pslldq_xmm },
664e0f19
FB
3068};
3069
d3eb5eae 3070static const SSEFunc_0_epi sse_op_table3ai[] = {
a7812ae4 3071 gen_helper_cvtsi2ss,
11f8cdbc 3072 gen_helper_cvtsi2sd
c4baa050 3073};
a7812ae4 3074
11f8cdbc 3075#ifdef TARGET_X86_64
d3eb5eae 3076static const SSEFunc_0_epl sse_op_table3aq[] = {
11f8cdbc
SW
3077 gen_helper_cvtsq2ss,
3078 gen_helper_cvtsq2sd
3079};
3080#endif
3081
d3eb5eae 3082static const SSEFunc_i_ep sse_op_table3bi[] = {
a7812ae4 3083 gen_helper_cvttss2si,
a7812ae4 3084 gen_helper_cvtss2si,
bedc2ac1 3085 gen_helper_cvttsd2si,
11f8cdbc 3086 gen_helper_cvtsd2si
664e0f19 3087};
3b46e624 3088
11f8cdbc 3089#ifdef TARGET_X86_64
d3eb5eae 3090static const SSEFunc_l_ep sse_op_table3bq[] = {
11f8cdbc 3091 gen_helper_cvttss2sq,
11f8cdbc 3092 gen_helper_cvtss2sq,
bedc2ac1 3093 gen_helper_cvttsd2sq,
11f8cdbc
SW
3094 gen_helper_cvtsd2sq
3095};
3096#endif
3097
d3eb5eae 3098static const SSEFunc_0_epp sse_op_table4[8][4] = {
664e0f19
FB
3099 SSE_FOP(cmpeq),
3100 SSE_FOP(cmplt),
3101 SSE_FOP(cmple),
3102 SSE_FOP(cmpunord),
3103 SSE_FOP(cmpneq),
3104 SSE_FOP(cmpnlt),
3105 SSE_FOP(cmpnle),
3106 SSE_FOP(cmpord),
3107};
3b46e624 3108
d3eb5eae 3109static const SSEFunc_0_epp sse_op_table5[256] = {
a7812ae4
PB
3110 [0x0c] = gen_helper_pi2fw,
3111 [0x0d] = gen_helper_pi2fd,
3112 [0x1c] = gen_helper_pf2iw,
3113 [0x1d] = gen_helper_pf2id,
3114 [0x8a] = gen_helper_pfnacc,
3115 [0x8e] = gen_helper_pfpnacc,
3116 [0x90] = gen_helper_pfcmpge,
3117 [0x94] = gen_helper_pfmin,
3118 [0x96] = gen_helper_pfrcp,
3119 [0x97] = gen_helper_pfrsqrt,
3120 [0x9a] = gen_helper_pfsub,
3121 [0x9e] = gen_helper_pfadd,
3122 [0xa0] = gen_helper_pfcmpgt,
3123 [0xa4] = gen_helper_pfmax,
3124 [0xa6] = gen_helper_movq, /* pfrcpit1; no need to actually increase precision */
3125 [0xa7] = gen_helper_movq, /* pfrsqit1 */
3126 [0xaa] = gen_helper_pfsubr,
3127 [0xae] = gen_helper_pfacc,
3128 [0xb0] = gen_helper_pfcmpeq,
3129 [0xb4] = gen_helper_pfmul,
3130 [0xb6] = gen_helper_movq, /* pfrcpit2 */
3131 [0xb7] = gen_helper_pmulhrw_mmx,
3132 [0xbb] = gen_helper_pswapd,
3133 [0xbf] = gen_helper_pavgb_mmx /* pavgusb */
a35f3ec7
AJ
3134};
3135
d3eb5eae
BS
3136struct SSEOpHelper_epp {
3137 SSEFunc_0_epp op[2];
c4baa050
BS
3138 uint32_t ext_mask;
3139};
3140
d3eb5eae
BS
3141struct SSEOpHelper_eppi {
3142 SSEFunc_0_eppi op[2];
c4baa050 3143 uint32_t ext_mask;
222a3336 3144};
c4baa050 3145
222a3336 3146#define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
a7812ae4
PB
3147#define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
3148#define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
222a3336 3149#define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
e71827bc
AJ
3150#define PCLMULQDQ_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, \
3151 CPUID_EXT_PCLMULQDQ }
c4baa050 3152
d3eb5eae 3153static const struct SSEOpHelper_epp sse_op_table6[256] = {
222a3336
AZ
3154 [0x00] = SSSE3_OP(pshufb),
3155 [0x01] = SSSE3_OP(phaddw),
3156 [0x02] = SSSE3_OP(phaddd),
3157 [0x03] = SSSE3_OP(phaddsw),
3158 [0x04] = SSSE3_OP(pmaddubsw),
3159 [0x05] = SSSE3_OP(phsubw),
3160 [0x06] = SSSE3_OP(phsubd),
3161 [0x07] = SSSE3_OP(phsubsw),
3162 [0x08] = SSSE3_OP(psignb),
3163 [0x09] = SSSE3_OP(psignw),
3164 [0x0a] = SSSE3_OP(psignd),
3165 [0x0b] = SSSE3_OP(pmulhrsw),
3166 [0x10] = SSE41_OP(pblendvb),
3167 [0x14] = SSE41_OP(blendvps),
3168 [0x15] = SSE41_OP(blendvpd),
3169 [0x17] = SSE41_OP(ptest),
3170 [0x1c] = SSSE3_OP(pabsb),
3171 [0x1d] = SSSE3_OP(pabsw),
3172 [0x1e] = SSSE3_OP(pabsd),
3173 [0x20] = SSE41_OP(pmovsxbw),
3174 [0x21] = SSE41_OP(pmovsxbd),
3175 [0x22] = SSE41_OP(pmovsxbq),
3176 [0x23] = SSE41_OP(pmovsxwd),
3177 [0x24] = SSE41_OP(pmovsxwq),
3178 [0x25] = SSE41_OP(pmovsxdq),
3179 [0x28] = SSE41_OP(pmuldq),
3180 [0x29] = SSE41_OP(pcmpeqq),
3181 [0x2a] = SSE41_SPECIAL, /* movntqda */
3182 [0x2b] = SSE41_OP(packusdw),
3183 [0x30] = SSE41_OP(pmovzxbw),
3184 [0x31] = SSE41_OP(pmovzxbd),
3185 [0x32] = SSE41_OP(pmovzxbq),
3186 [0x33] = SSE41_OP(pmovzxwd),
3187 [0x34] = SSE41_OP(pmovzxwq),
3188 [0x35] = SSE41_OP(pmovzxdq),
3189 [0x37] = SSE42_OP(pcmpgtq),
3190 [0x38] = SSE41_OP(pminsb),
3191 [0x39] = SSE41_OP(pminsd),
3192 [0x3a] = SSE41_OP(pminuw),
3193 [0x3b] = SSE41_OP(pminud),
3194 [0x3c] = SSE41_OP(pmaxsb),
3195 [0x3d] = SSE41_OP(pmaxsd),
3196 [0x3e] = SSE41_OP(pmaxuw),
3197 [0x3f] = SSE41_OP(pmaxud),
3198 [0x40] = SSE41_OP(pmulld),
3199 [0x41] = SSE41_OP(phminposuw),
4242b1bd
AZ
3200};
3201
d3eb5eae 3202static const struct SSEOpHelper_eppi sse_op_table7[256] = {
222a3336
AZ
3203 [0x08] = SSE41_OP(roundps),
3204 [0x09] = SSE41_OP(roundpd),
3205 [0x0a] = SSE41_OP(roundss),
3206 [0x0b] = SSE41_OP(roundsd),
3207 [0x0c] = SSE41_OP(blendps),
3208 [0x0d] = SSE41_OP(blendpd),
3209 [0x0e] = SSE41_OP(pblendw),
3210 [0x0f] = SSSE3_OP(palignr),
3211 [0x14] = SSE41_SPECIAL, /* pextrb */
3212 [0x15] = SSE41_SPECIAL, /* pextrw */
3213 [0x16] = SSE41_SPECIAL, /* pextrd/pextrq */
3214 [0x17] = SSE41_SPECIAL, /* extractps */
3215 [0x20] = SSE41_SPECIAL, /* pinsrb */
3216 [0x21] = SSE41_SPECIAL, /* insertps */
3217 [0x22] = SSE41_SPECIAL, /* pinsrd/pinsrq */
3218 [0x40] = SSE41_OP(dpps),
3219 [0x41] = SSE41_OP(dppd),
3220 [0x42] = SSE41_OP(mpsadbw),
e71827bc 3221 [0x44] = PCLMULQDQ_OP(pclmulqdq),
222a3336
AZ
3222 [0x60] = SSE42_OP(pcmpestrm),
3223 [0x61] = SSE42_OP(pcmpestri),
3224 [0x62] = SSE42_OP(pcmpistrm),
3225 [0x63] = SSE42_OP(pcmpistri),
4242b1bd
AZ
3226};
3227
0af10c86
BS
3228static void gen_sse(CPUX86State *env, DisasContext *s, int b,
3229 target_ulong pc_start, int rex_r)
664e0f19
FB
3230{
3231 int b1, op1_offset, op2_offset, is_xmm, val, ot;
3232 int modrm, mod, rm, reg, reg_addr, offset_addr;
d3eb5eae
BS
3233 SSEFunc_0_epp sse_fn_epp;
3234 SSEFunc_0_eppi sse_fn_eppi;
c4baa050 3235 SSEFunc_0_ppi sse_fn_ppi;
d3eb5eae 3236 SSEFunc_0_eppt sse_fn_eppt;
664e0f19
FB
3237
3238 b &= 0xff;
5fafdf24 3239 if (s->prefix & PREFIX_DATA)
664e0f19 3240 b1 = 1;
5fafdf24 3241 else if (s->prefix & PREFIX_REPZ)
664e0f19 3242 b1 = 2;
5fafdf24 3243 else if (s->prefix & PREFIX_REPNZ)
664e0f19
FB
3244 b1 = 3;
3245 else
3246 b1 = 0;
d3eb5eae
BS
3247 sse_fn_epp = sse_op_table1[b][b1];
3248 if (!sse_fn_epp) {
664e0f19 3249 goto illegal_op;
c4baa050 3250 }
a35f3ec7 3251 if ((b <= 0x5f && b >= 0x10) || b == 0xc6 || b == 0xc2) {
664e0f19
FB
3252 is_xmm = 1;
3253 } else {
3254 if (b1 == 0) {
3255 /* MMX case */
3256 is_xmm = 0;
3257 } else {
3258 is_xmm = 1;
3259 }
3260 }
3261 /* simple MMX/SSE operation */
3262 if (s->flags & HF_TS_MASK) {
3263 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
3264 return;
3265 }
3266 if (s->flags & HF_EM_MASK) {
3267 illegal_op:
3268 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
3269 return;
3270 }
3271 if (is_xmm && !(s->flags & HF_OSFXSR_MASK))
4242b1bd
AZ
3272 if ((b != 0x38 && b != 0x3a) || (s->prefix & PREFIX_DATA))
3273 goto illegal_op;
e771edab
AJ
3274 if (b == 0x0e) {
3275 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
3276 goto illegal_op;
3277 /* femms */
d3eb5eae 3278 gen_helper_emms(cpu_env);
e771edab
AJ
3279 return;
3280 }
3281 if (b == 0x77) {
3282 /* emms */
d3eb5eae 3283 gen_helper_emms(cpu_env);
664e0f19
FB
3284 return;
3285 }
3286 /* prepare MMX state (XXX: optimize by storing fptt and fptags in
3287 the static cpu state) */
3288 if (!is_xmm) {
d3eb5eae 3289 gen_helper_enter_mmx(cpu_env);
664e0f19
FB
3290 }
3291
0af10c86 3292 modrm = cpu_ldub_code(env, s->pc++);
664e0f19
FB
3293 reg = ((modrm >> 3) & 7);
3294 if (is_xmm)
3295 reg |= rex_r;
3296 mod = (modrm >> 6) & 3;
d3eb5eae 3297 if (sse_fn_epp == SSE_SPECIAL) {
664e0f19
FB
3298 b |= (b1 << 8);
3299 switch(b) {
3300 case 0x0e7: /* movntq */
5fafdf24 3301 if (mod == 3)
664e0f19 3302 goto illegal_op;
0af10c86 3303 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
8686c490 3304 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
664e0f19
FB
3305 break;
3306 case 0x1e7: /* movntdq */
3307 case 0x02b: /* movntps */
3308 case 0x12b: /* movntps */
2e21e749
T
3309 if (mod == 3)
3310 goto illegal_op;
0af10c86 3311 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
2e21e749
T
3312 gen_sto_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3313 break;
465e9838
FB
3314 case 0x3f0: /* lddqu */
3315 if (mod == 3)
664e0f19 3316 goto illegal_op;
0af10c86 3317 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
c2254920 3318 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
664e0f19 3319 break;
d9f4bb27
AP
3320 case 0x22b: /* movntss */
3321 case 0x32b: /* movntsd */
3322 if (mod == 3)
3323 goto illegal_op;
0af10c86 3324 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
d9f4bb27
AP
3325 if (b1 & 1) {
3326 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,
3327 xmm_regs[reg]));
3328 } else {
3329 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3330 xmm_regs[reg].XMM_L(0)));
3331 gen_op_st_T0_A0(OT_LONG + s->mem_index);
3332 }
3333 break;
664e0f19 3334 case 0x6e: /* movd mm, ea */
dabd98dd
FB
3335#ifdef TARGET_X86_64
3336 if (s->dflag == 2) {
0af10c86 3337 gen_ldst_modrm(env, s, modrm, OT_QUAD, OR_TMP0, 0);
5af45186 3338 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,fpregs[reg].mmx));
5fafdf24 3339 } else
dabd98dd
FB
3340#endif
3341 {
0af10c86 3342 gen_ldst_modrm(env, s, modrm, OT_LONG, OR_TMP0, 0);
5af45186
FB
3343 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3344 offsetof(CPUX86State,fpregs[reg].mmx));
a7812ae4
PB
3345 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3346 gen_helper_movl_mm_T0_mmx(cpu_ptr0, cpu_tmp2_i32);
dabd98dd 3347 }
664e0f19
FB
3348 break;
3349 case 0x16e: /* movd xmm, ea */
dabd98dd
FB
3350#ifdef TARGET_X86_64
3351 if (s->dflag == 2) {
0af10c86 3352 gen_ldst_modrm(env, s, modrm, OT_QUAD, OR_TMP0, 0);
5af45186
FB
3353 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3354 offsetof(CPUX86State,xmm_regs[reg]));
a7812ae4 3355 gen_helper_movq_mm_T0_xmm(cpu_ptr0, cpu_T[0]);
5fafdf24 3356 } else
dabd98dd
FB
3357#endif
3358 {
0af10c86 3359 gen_ldst_modrm(env, s, modrm, OT_LONG, OR_TMP0, 0);
5af45186
FB
3360 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3361 offsetof(CPUX86State,xmm_regs[reg]));
b6abf97d 3362 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
a7812ae4 3363 gen_helper_movl_mm_T0_xmm(cpu_ptr0, cpu_tmp2_i32);
dabd98dd 3364 }
664e0f19
FB
3365 break;
3366 case 0x6f: /* movq mm, ea */
3367 if (mod != 3) {
0af10c86 3368 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
8686c490 3369 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
664e0f19
FB
3370 } else {
3371 rm = (modrm & 7);
b6abf97d 3372 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
5af45186 3373 offsetof(CPUX86State,fpregs[rm].mmx));
b6abf97d 3374 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
5af45186 3375 offsetof(CPUX86State,fpregs[reg].mmx));
664e0f19
FB
3376 }
3377 break;
3378 case 0x010: /* movups */
3379 case 0x110: /* movupd */
3380 case 0x028: /* movaps */
3381 case 0x128: /* movapd */
3382 case 0x16f: /* movdqa xmm, ea */
3383 case 0x26f: /* movdqu xmm, ea */
3384 if (mod != 3) {
0af10c86 3385 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
8686c490 3386 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
664e0f19
FB
3387 } else {
3388 rm = (modrm & 7) | REX_B(s);
3389 gen_op_movo(offsetof(CPUX86State,xmm_regs[reg]),
3390 offsetof(CPUX86State,xmm_regs[rm]));
3391 }
3392 break;
3393 case 0x210: /* movss xmm, ea */
3394 if (mod != 3) {
0af10c86 3395 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
57fec1fe 3396 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
651ba608 3397 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
664e0f19 3398 gen_op_movl_T0_0();
651ba608
FB
3399 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
3400 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3401 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
664e0f19
FB
3402 } else {
3403 rm = (modrm & 7) | REX_B(s);
3404 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3405 offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
3406 }
3407 break;
3408 case 0x310: /* movsd xmm, ea */
3409 if (mod != 3) {
0af10c86 3410 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
8686c490 3411 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
664e0f19 3412 gen_op_movl_T0_0();
651ba608
FB
3413 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3414 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
664e0f19
FB
3415 } else {
3416 rm = (modrm & 7) | REX_B(s);
3417 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3418 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3419 }
3420 break;
3421 case 0x012: /* movlps */
3422 case 0x112: /* movlpd */
3423 if (mod != 3) {
0af10c86 3424 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
8686c490 3425 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
664e0f19
FB
3426 } else {
3427 /* movhlps */
3428 rm = (modrm & 7) | REX_B(s);
3429 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3430 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
3431 }
3432 break;
465e9838
FB
3433 case 0x212: /* movsldup */
3434 if (mod != 3) {
0af10c86 3435 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
8686c490 3436 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
465e9838
FB
3437 } else {
3438 rm = (modrm & 7) | REX_B(s);
3439 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3440 offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
3441 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
3442 offsetof(CPUX86State,xmm_regs[rm].XMM_L(2)));
3443 }
3444 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
3445 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3446 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
3447 offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3448 break;
3449 case 0x312: /* movddup */
3450 if (mod != 3) {
0af10c86 3451 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
8686c490 3452 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
465e9838
FB
3453 } else {
3454 rm = (modrm & 7) | REX_B(s);
3455 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3456 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3457 }
3458 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
ba6526df 3459 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
465e9838 3460 break;
664e0f19
FB
3461 case 0x016: /* movhps */
3462 case 0x116: /* movhpd */
3463 if (mod != 3) {
0af10c86 3464 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
8686c490 3465 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
664e0f19
FB
3466 } else {
3467 /* movlhps */
3468 rm = (modrm & 7) | REX_B(s);
3469 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
3470 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3471 }
3472 break;
3473 case 0x216: /* movshdup */
3474 if (mod != 3) {
0af10c86 3475 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
8686c490 3476 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
664e0f19
FB
3477 } else {
3478 rm = (modrm & 7) | REX_B(s);
3479 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
3480 offsetof(CPUX86State,xmm_regs[rm].XMM_L(1)));
3481 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
3482 offsetof(CPUX86State,xmm_regs[rm].XMM_L(3)));
3483 }
3484 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3485 offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
3486 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
3487 offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3488 break;
d9f4bb27
AP
3489 case 0x178:
3490 case 0x378:
3491 {
3492 int bit_index, field_length;
3493
3494 if (b1 == 1 && reg != 0)
3495 goto illegal_op;
0af10c86
BS
3496 field_length = cpu_ldub_code(env, s->pc++) & 0x3F;
3497 bit_index = cpu_ldub_code(env, s->pc++) & 0x3F;
d9f4bb27
AP
3498 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3499 offsetof(CPUX86State,xmm_regs[reg]));
3500 if (b1 == 1)
d3eb5eae
BS
3501 gen_helper_extrq_i(cpu_env, cpu_ptr0,
3502 tcg_const_i32(bit_index),
3503 tcg_const_i32(field_length));
d9f4bb27 3504 else
d3eb5eae
BS
3505 gen_helper_insertq_i(cpu_env, cpu_ptr0,
3506 tcg_const_i32(bit_index),
3507 tcg_const_i32(field_length));
d9f4bb27
AP
3508 }
3509 break;
664e0f19 3510 case 0x7e: /* movd ea, mm */
dabd98dd
FB
3511#ifdef TARGET_X86_64
3512 if (s->dflag == 2) {
5af45186
FB
3513 tcg_gen_ld_i64(cpu_T[0], cpu_env,
3514 offsetof(CPUX86State,fpregs[reg].mmx));
0af10c86 3515 gen_ldst_modrm(env, s, modrm, OT_QUAD, OR_TMP0, 1);
5fafdf24 3516 } else
dabd98dd
FB
3517#endif
3518 {
5af45186
FB
3519 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
3520 offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0)));
0af10c86 3521 gen_ldst_modrm(env, s, modrm, OT_LONG, OR_TMP0, 1);
dabd98dd 3522 }
664e0f19
FB
3523 break;
3524 case 0x17e: /* movd ea, xmm */
dabd98dd
FB
3525#ifdef TARGET_X86_64
3526 if (s->dflag == 2) {
5af45186
FB
3527 tcg_gen_ld_i64(cpu_T[0], cpu_env,
3528 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
0af10c86 3529 gen_ldst_modrm(env, s, modrm, OT_QUAD, OR_TMP0, 1);
5fafdf24 3530 } else
dabd98dd
FB
3531#endif
3532 {
5af45186
FB
3533 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
3534 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
0af10c86 3535 gen_ldst_modrm(env, s, modrm, OT_LONG, OR_TMP0, 1);
dabd98dd 3536 }
664e0f19
FB
3537 break;
3538 case 0x27e: /* movq xmm, ea */
3539 if (mod != 3) {
0af10c86 3540 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
8686c490 3541 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
664e0f19
FB
3542 } else {
3543 rm = (modrm & 7) | REX_B(s);
3544 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3545 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3546 }
3547 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3548 break;
3549 case 0x7f: /* movq ea, mm */
3550 if (mod != 3) {
0af10c86 3551 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
8686c490 3552 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
664e0f19
FB
3553 } else {
3554 rm = (modrm & 7);
3555 gen_op_movq(offsetof(CPUX86State,fpregs[rm].mmx),
3556 offsetof(CPUX86State,fpregs[reg].mmx));
3557 }
3558 break;
3559 case 0x011: /* movups */
3560 case 0x111: /* movupd */
3561 case 0x029: /* movaps */
3562 case 0x129: /* movapd */
3563 case 0x17f: /* movdqa ea, xmm */
3564 case 0x27f: /* movdqu ea, xmm */
3565 if (mod != 3) {
0af10c86 3566 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
8686c490 3567 gen_sto_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
664e0f19
FB
3568 } else {
3569 rm = (modrm & 7) | REX_B(s);
3570 gen_op_movo(offsetof(CPUX86State,xmm_regs[rm]),
3571 offsetof(CPUX86State,xmm_regs[reg]));
3572 }
3573 break;
3574 case 0x211: /* movss ea, xmm */
3575 if (mod != 3) {
0af10c86 3576 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
651ba608 3577 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
57fec1fe 3578 gen_op_st_T0_A0(OT_LONG + s->mem_index);
664e0f19
FB
3579 } else {
3580 rm = (modrm & 7) | REX_B(s);
3581 gen_op_movl(offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)),
3582 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3583 }
3584 break;
3585 case 0x311: /* movsd ea, xmm */
3586 if (mod != 3) {
0af10c86 3587 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
8686c490 3588 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
664e0f19
FB
3589 } else {
3590 rm = (modrm & 7) | REX_B(s);
3591 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
3592 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3593 }
3594 break;
3595 case 0x013: /* movlps */
3596 case 0x113: /* movlpd */
3597 if (mod != 3) {
0af10c86 3598 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
8686c490 3599 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
664e0f19
FB
3600 } else {
3601 goto illegal_op;
3602 }
3603 break;
3604 case 0x017: /* movhps */
3605 case 0x117: /* movhpd */
3606 if (mod != 3) {
0af10c86 3607 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
8686c490 3608 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
664e0f19
FB
3609 } else {
3610 goto illegal_op;
3611 }
3612 break;
3613 case 0x71: /* shift mm, im */
3614 case 0x72:
3615 case 0x73:
3616 case 0x171: /* shift xmm, im */
3617 case 0x172:
3618 case 0x173:
c045af25
AK
3619 if (b1 >= 2) {
3620 goto illegal_op;
3621 }
0af10c86 3622 val = cpu_ldub_code(env, s->pc++);
664e0f19
FB
3623 if (is_xmm) {
3624 gen_op_movl_T0_im(val);
651ba608 3625 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
664e0f19 3626 gen_op_movl_T0_0();
651ba608 3627 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(1)));
664e0f19
FB
3628 op1_offset = offsetof(CPUX86State,xmm_t0);
3629 } else {
3630 gen_op_movl_T0_im(val);
651ba608 3631 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(0)));
664e0f19 3632 gen_op_movl_T0_0();
651ba608 3633 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(1)));
664e0f19
FB
3634 op1_offset = offsetof(CPUX86State,mmx_t0);
3635 }
d3eb5eae
BS
3636 sse_fn_epp = sse_op_table2[((b - 1) & 3) * 8 +
3637 (((modrm >> 3)) & 7)][b1];
3638 if (!sse_fn_epp) {
664e0f19 3639 goto illegal_op;
c4baa050 3640 }
664e0f19
FB
3641 if (is_xmm) {
3642 rm = (modrm & 7) | REX_B(s);
3643 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3644 } else {
3645 rm = (modrm & 7);
3646 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3647 }
5af45186
FB
3648 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3649 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op1_offset);
d3eb5eae 3650 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
3651 break;
3652 case 0x050: /* movmskps */
664e0f19 3653 rm = (modrm & 7) | REX_B(s);
5af45186
FB
3654 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3655 offsetof(CPUX86State,xmm_regs[rm]));
d3eb5eae 3656 gen_helper_movmskps(cpu_tmp2_i32, cpu_env, cpu_ptr0);
b6abf97d 3657 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
57fec1fe 3658 gen_op_mov_reg_T0(OT_LONG, reg);
664e0f19
FB
3659 break;
3660 case 0x150: /* movmskpd */
664e0f19 3661 rm = (modrm & 7) | REX_B(s);
5af45186
FB
3662 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3663 offsetof(CPUX86State,xmm_regs[rm]));
d3eb5eae 3664 gen_helper_movmskpd(cpu_tmp2_i32, cpu_env, cpu_ptr0);
b6abf97d 3665 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
57fec1fe 3666 gen_op_mov_reg_T0(OT_LONG, reg);
664e0f19
FB
3667 break;
3668 case 0x02a: /* cvtpi2ps */
3669 case 0x12a: /* cvtpi2pd */
d3eb5eae 3670 gen_helper_enter_mmx(cpu_env);
664e0f19 3671 if (mod != 3) {
0af10c86 3672 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
664e0f19 3673 op2_offset = offsetof(CPUX86State,mmx_t0);
8686c490 3674 gen_ldq_env_A0(s->mem_index, op2_offset);
664e0f19
FB
3675 } else {
3676 rm = (modrm & 7);
3677 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3678 }
3679 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
5af45186
FB
3680 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3681 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
664e0f19
FB
3682 switch(b >> 8) {
3683 case 0x0:
d3eb5eae 3684 gen_helper_cvtpi2ps(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
3685 break;
3686 default:
3687 case 0x1:
d3eb5eae 3688 gen_helper_cvtpi2pd(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
3689 break;
3690 }
3691 break;
3692 case 0x22a: /* cvtsi2ss */
3693 case 0x32a: /* cvtsi2sd */
3694 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
0af10c86 3695 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
664e0f19 3696 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
5af45186 3697 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
28e10711 3698 if (ot == OT_LONG) {
d3eb5eae 3699 SSEFunc_0_epi sse_fn_epi = sse_op_table3ai[(b >> 8) & 1];
28e10711 3700 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
d3eb5eae 3701 sse_fn_epi(cpu_env, cpu_ptr0, cpu_tmp2_i32);
28e10711 3702 } else {
11f8cdbc 3703#ifdef TARGET_X86_64
d3eb5eae
BS
3704 SSEFunc_0_epl sse_fn_epl = sse_op_table3aq[(b >> 8) & 1];
3705 sse_fn_epl(cpu_env, cpu_ptr0, cpu_T[0]);
11f8cdbc
SW
3706#else
3707 goto illegal_op;
3708#endif
28e10711 3709 }
664e0f19
FB
3710 break;
3711 case 0x02c: /* cvttps2pi */
3712 case 0x12c: /* cvttpd2pi */
3713 case 0x02d: /* cvtps2pi */
3714 case 0x12d: /* cvtpd2pi */
d3eb5eae 3715 gen_helper_enter_mmx(cpu_env);
664e0f19 3716 if (mod != 3) {
0af10c86 3717 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
664e0f19 3718 op2_offset = offsetof(CPUX86State,xmm_t0);
8686c490 3719 gen_ldo_env_A0(s->mem_index, op2_offset);
664e0f19
FB
3720 } else {
3721 rm = (modrm & 7) | REX_B(s);
3722 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3723 }
3724 op1_offset = offsetof(CPUX86State,fpregs[reg & 7].mmx);
5af45186
FB
3725 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3726 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
664e0f19
FB
3727 switch(b) {
3728 case 0x02c:
d3eb5eae 3729 gen_helper_cvttps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
3730 break;
3731 case 0x12c:
d3eb5eae 3732 gen_helper_cvttpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
3733 break;
3734 case 0x02d:
d3eb5eae 3735 gen_helper_cvtps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
3736 break;
3737 case 0x12d:
d3eb5eae 3738 gen_helper_cvtpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
3739 break;
3740 }
3741 break;
3742 case 0x22c: /* cvttss2si */
3743 case 0x32c: /* cvttsd2si */
3744 case 0x22d: /* cvtss2si */
3745 case 0x32d: /* cvtsd2si */
3746 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
31313213 3747 if (mod != 3) {
0af10c86 3748 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
31313213 3749 if ((b >> 8) & 1) {
8686c490 3750 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_t0.XMM_Q(0)));
31313213 3751 } else {
57fec1fe 3752 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
651ba608 3753 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
31313213
FB
3754 }
3755 op2_offset = offsetof(CPUX86State,xmm_t0);
3756 } else {
3757 rm = (modrm & 7) | REX_B(s);
3758 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3759 }
5af45186
FB
3760 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3761 if (ot == OT_LONG) {
d3eb5eae 3762 SSEFunc_i_ep sse_fn_i_ep =
bedc2ac1 3763 sse_op_table3bi[((b >> 7) & 2) | (b & 1)];
d3eb5eae 3764 sse_fn_i_ep(cpu_tmp2_i32, cpu_env, cpu_ptr0);
b6abf97d 3765 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5af45186 3766 } else {
11f8cdbc 3767#ifdef TARGET_X86_64
d3eb5eae 3768 SSEFunc_l_ep sse_fn_l_ep =
bedc2ac1 3769 sse_op_table3bq[((b >> 7) & 2) | (b & 1)];
d3eb5eae 3770 sse_fn_l_ep(cpu_T[0], cpu_env, cpu_ptr0);
11f8cdbc
SW
3771#else
3772 goto illegal_op;
3773#endif
5af45186 3774 }
57fec1fe 3775 gen_op_mov_reg_T0(ot, reg);
664e0f19
FB
3776 break;
3777 case 0xc4: /* pinsrw */
5fafdf24 3778 case 0x1c4:
d1e42c5c 3779 s->rip_offset = 1;
0af10c86
BS
3780 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
3781 val = cpu_ldub_code(env, s->pc++);
664e0f19
FB
3782 if (b1) {
3783 val &= 7;
5af45186
FB
3784 tcg_gen_st16_tl(cpu_T[0], cpu_env,
3785 offsetof(CPUX86State,xmm_regs[reg].XMM_W(val)));
664e0f19
FB
3786 } else {
3787 val &= 3;
5af45186
FB
3788 tcg_gen_st16_tl(cpu_T[0], cpu_env,
3789 offsetof(CPUX86State,fpregs[reg].mmx.MMX_W(val)));
664e0f19
FB
3790 }
3791 break;
3792 case 0xc5: /* pextrw */
5fafdf24 3793 case 0x1c5:
664e0f19
FB
3794 if (mod != 3)
3795 goto illegal_op;
6dc2d0da 3796 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
0af10c86 3797 val = cpu_ldub_code(env, s->pc++);
664e0f19
FB
3798 if (b1) {
3799 val &= 7;
3800 rm = (modrm & 7) | REX_B(s);
5af45186
FB
3801 tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3802 offsetof(CPUX86State,xmm_regs[rm].XMM_W(val)));
664e0f19
FB
3803 } else {
3804 val &= 3;
3805 rm = (modrm & 7);
5af45186
FB
3806 tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3807 offsetof(CPUX86State,fpregs[rm].mmx.MMX_W(val)));
664e0f19
FB
3808 }
3809 reg = ((modrm >> 3) & 7) | rex_r;
6dc2d0da 3810 gen_op_mov_reg_T0(ot, reg);
664e0f19
FB
3811 break;
3812 case 0x1d6: /* movq ea, xmm */
3813 if (mod != 3) {
0af10c86 3814 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
8686c490 3815 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
664e0f19
FB
3816 } else {
3817 rm = (modrm & 7) | REX_B(s);
3818 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
3819 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3820 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
3821 }
3822 break;
3823 case 0x2d6: /* movq2dq */
d3eb5eae 3824 gen_helper_enter_mmx(cpu_env);
480c1cdb
FB
3825 rm = (modrm & 7);
3826 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3827 offsetof(CPUX86State,fpregs[rm].mmx));
3828 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
664e0f19
FB
3829 break;
3830 case 0x3d6: /* movdq2q */
d3eb5eae 3831 gen_helper_enter_mmx(cpu_env);
480c1cdb
FB
3832 rm = (modrm & 7) | REX_B(s);
3833 gen_op_movq(offsetof(CPUX86State,fpregs[reg & 7].mmx),
3834 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
664e0f19
FB
3835 break;
3836 case 0xd7: /* pmovmskb */
3837 case 0x1d7:
3838 if (mod != 3)
3839 goto illegal_op;
3840 if (b1) {
3841 rm = (modrm & 7) | REX_B(s);
5af45186 3842 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[rm]));
d3eb5eae 3843 gen_helper_pmovmskb_xmm(cpu_tmp2_i32, cpu_env, cpu_ptr0);
664e0f19
FB
3844 } else {
3845 rm = (modrm & 7);
5af45186 3846 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,fpregs[rm].mmx));
d3eb5eae 3847 gen_helper_pmovmskb_mmx(cpu_tmp2_i32, cpu_env, cpu_ptr0);
664e0f19 3848 }
b6abf97d 3849 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
664e0f19 3850 reg = ((modrm >> 3) & 7) | rex_r;
57fec1fe 3851 gen_op_mov_reg_T0(OT_LONG, reg);
664e0f19 3852 break;
111994ee 3853
4242b1bd 3854 case 0x138:
000cacf6 3855 case 0x038:
4242b1bd 3856 b = modrm;
111994ee
RH
3857 if ((b & 0xf0) == 0xf0) {
3858 goto do_0f_38_fx;
3859 }
0af10c86 3860 modrm = cpu_ldub_code(env, s->pc++);
4242b1bd
AZ
3861 rm = modrm & 7;
3862 reg = ((modrm >> 3) & 7) | rex_r;
3863 mod = (modrm >> 6) & 3;
c045af25
AK
3864 if (b1 >= 2) {
3865 goto illegal_op;
3866 }
4242b1bd 3867
d3eb5eae
BS
3868 sse_fn_epp = sse_op_table6[b].op[b1];
3869 if (!sse_fn_epp) {
4242b1bd 3870 goto illegal_op;
c4baa050 3871 }
222a3336
AZ
3872 if (!(s->cpuid_ext_features & sse_op_table6[b].ext_mask))
3873 goto illegal_op;
4242b1bd
AZ
3874
3875 if (b1) {
3876 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3877 if (mod == 3) {
3878 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
3879 } else {
3880 op2_offset = offsetof(CPUX86State,xmm_t0);
0af10c86 3881 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
222a3336
AZ
3882 switch (b) {
3883 case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
3884 case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
3885 case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
3886 gen_ldq_env_A0(s->mem_index, op2_offset +
3887 offsetof(XMMReg, XMM_Q(0)));
3888 break;
3889 case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
3890 case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
a7812ae4 3891 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0,
222a3336 3892 (s->mem_index >> 2) - 1);
a7812ae4 3893 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0);
222a3336
AZ
3894 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, op2_offset +
3895 offsetof(XMMReg, XMM_L(0)));
3896 break;
3897 case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
3898 tcg_gen_qemu_ld16u(cpu_tmp0, cpu_A0,
3899 (s->mem_index >> 2) - 1);
3900 tcg_gen_st16_tl(cpu_tmp0, cpu_env, op2_offset +
3901 offsetof(XMMReg, XMM_W(0)));
3902 break;
3903 case 0x2a: /* movntqda */
3904 gen_ldo_env_A0(s->mem_index, op1_offset);
3905 return;
3906 default:
3907 gen_ldo_env_A0(s->mem_index, op2_offset);
3908 }
4242b1bd
AZ
3909 }
3910 } else {
3911 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
3912 if (mod == 3) {
3913 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3914 } else {
3915 op2_offset = offsetof(CPUX86State,mmx_t0);
0af10c86 3916 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4242b1bd
AZ
3917 gen_ldq_env_A0(s->mem_index, op2_offset);
3918 }
3919 }
d3eb5eae 3920 if (sse_fn_epp == SSE_SPECIAL) {
222a3336 3921 goto illegal_op;
c4baa050 3922 }
222a3336 3923
4242b1bd
AZ
3924 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3925 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
d3eb5eae 3926 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
222a3336 3927
3ca51d07
RH
3928 if (b == 0x17) {
3929 set_cc_op(s, CC_OP_EFLAGS);
3930 }
4242b1bd 3931 break;
111994ee
RH
3932
3933 case 0x238:
3934 case 0x338:
3935 do_0f_38_fx:
3936 /* Various integer extensions at 0f 38 f[0-f]. */
3937 b = modrm | (b1 << 8);
0af10c86 3938 modrm = cpu_ldub_code(env, s->pc++);
222a3336
AZ
3939 reg = ((modrm >> 3) & 7) | rex_r;
3940
111994ee
RH
3941 switch (b) {
3942 case 0x3f0: /* crc32 Gd,Eb */
3943 case 0x3f1: /* crc32 Gd,Ey */
3944 do_crc32:
3945 if (!(s->cpuid_ext_features & CPUID_EXT_SSE42)) {
3946 goto illegal_op;
3947 }
3948 if ((b & 0xff) == 0xf0) {
3949 ot = OT_BYTE;
3950 } else if (s->dflag != 2) {
3951 ot = (s->prefix & PREFIX_DATA ? OT_WORD : OT_LONG);
3952 } else {
3953 ot = OT_QUAD;
3954 }
4242b1bd 3955
111994ee
RH
3956 gen_op_mov_TN_reg(OT_LONG, 0, reg);
3957 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3958 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3959 gen_helper_crc32(cpu_T[0], cpu_tmp2_i32,
3960 cpu_T[0], tcg_const_i32(8 << ot));
222a3336 3961
111994ee
RH
3962 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3963 gen_op_mov_reg_T0(ot, reg);
3964 break;
222a3336 3965
111994ee
RH
3966 case 0x1f0: /* crc32 or movbe */
3967 case 0x1f1:
3968 /* For these insns, the f3 prefix is supposed to have priority
3969 over the 66 prefix, but that's not what we implement above
3970 setting b1. */
3971 if (s->prefix & PREFIX_REPNZ) {
3972 goto do_crc32;
3973 }
3974 /* FALLTHRU */
3975 case 0x0f0: /* movbe Gy,My */
3976 case 0x0f1: /* movbe My,Gy */
3977 if (!(s->cpuid_ext_features & CPUID_EXT_MOVBE)) {
3978 goto illegal_op;
3979 }
3980 if (s->dflag != 2) {
3981 ot = (s->prefix & PREFIX_DATA ? OT_WORD : OT_LONG);
3982 } else {
3983 ot = OT_QUAD;
3984 }
3985
3986 /* Load the data incoming to the bswap. Note that the TCG
3987 implementation of bswap requires the input be zero
3988 extended. In the case of the loads, we simply know that
3989 gen_op_ld_v via gen_ldst_modrm does that already. */
3990 if ((b & 1) == 0) {
3991 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3992 } else {
3993 switch (ot) {
3994 case OT_WORD:
3995 tcg_gen_ext16u_tl(cpu_T[0], cpu_regs[reg]);
3996 break;
3997 default:
3998 tcg_gen_ext32u_tl(cpu_T[0], cpu_regs[reg]);
3999 break;
4000 case OT_QUAD:
4001 tcg_gen_mov_tl(cpu_T[0], cpu_regs[reg]);
4002 break;
4003 }
4004 }
4005
4006 switch (ot) {
4007 case OT_WORD:
4008 tcg_gen_bswap16_tl(cpu_T[0], cpu_T[0]);
4009 break;
4010 default:
4011 tcg_gen_bswap32_tl(cpu_T[0], cpu_T[0]);
4012 break;
4013#ifdef TARGET_X86_64
4014 case OT_QUAD:
4015 tcg_gen_bswap64_tl(cpu_T[0], cpu_T[0]);
4016 break;
4017#endif
4018 }
4019
4020 if ((b & 1) == 0) {
4021 gen_op_mov_reg_T0(ot, reg);
4022 } else {
4023 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
4024 }
4025 break;
4026
7073fbad
RH
4027 case 0x0f2: /* andn Gy, By, Ey */
4028 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
4029 || !(s->prefix & PREFIX_VEX)
4030 || s->vex_l != 0) {
4031 goto illegal_op;
4032 }
4033 ot = s->dflag == 2 ? OT_QUAD : OT_LONG;
4034 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4035 tcg_gen_andc_tl(cpu_T[0], cpu_regs[s->vex_v], cpu_T[0]);
4036 gen_op_mov_reg_T0(ot, reg);
4037 gen_op_update1_cc();
4038 set_cc_op(s, CC_OP_LOGICB + ot);
4039 break;
4040
c7ab7565
RH
4041 case 0x0f7: /* bextr Gy, Ey, By */
4042 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
4043 || !(s->prefix & PREFIX_VEX)
4044 || s->vex_l != 0) {
4045 goto illegal_op;
4046 }
4047 ot = s->dflag == 2 ? OT_QUAD : OT_LONG;
4048 {
4049 TCGv bound, zero;
4050
4051 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4052 /* Extract START, and shift the operand.
4053 Shifts larger than operand size get zeros. */
4054 tcg_gen_ext8u_tl(cpu_A0, cpu_regs[s->vex_v]);
4055 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_A0);
4056
4057 bound = tcg_const_tl(ot == OT_QUAD ? 63 : 31);
4058 zero = tcg_const_tl(0);
4059 tcg_gen_movcond_tl(TCG_COND_LEU, cpu_T[0], cpu_A0, bound,
4060 cpu_T[0], zero);
4061 tcg_temp_free(zero);
4062
4063 /* Extract the LEN into a mask. Lengths larger than
4064 operand size get all ones. */
4065 tcg_gen_shri_tl(cpu_A0, cpu_regs[s->vex_v], 8);
4066 tcg_gen_ext8u_tl(cpu_A0, cpu_A0);
4067 tcg_gen_movcond_tl(TCG_COND_LEU, cpu_A0, cpu_A0, bound,
4068 cpu_A0, bound);
4069 tcg_temp_free(bound);
4070 tcg_gen_movi_tl(cpu_T[1], 1);
4071 tcg_gen_shl_tl(cpu_T[1], cpu_T[1], cpu_A0);
4072 tcg_gen_subi_tl(cpu_T[1], cpu_T[1], 1);
4073 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4074
4075 gen_op_mov_reg_T0(ot, reg);
4076 gen_op_update1_cc();
4077 set_cc_op(s, CC_OP_LOGICB + ot);
4078 }
4079 break;
4080
02ea1e6b
RH
4081 case 0x0f5: /* bzhi Gy, Ey, By */
4082 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
4083 || !(s->prefix & PREFIX_VEX)
4084 || s->vex_l != 0) {
4085 goto illegal_op;
4086 }
4087 ot = s->dflag == 2 ? OT_QUAD : OT_LONG;
4088 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4089 tcg_gen_ext8u_tl(cpu_T[1], cpu_regs[s->vex_v]);
4090 {
4091 TCGv bound = tcg_const_tl(ot == OT_QUAD ? 63 : 31);
4092 /* Note that since we're using BMILG (in order to get O
4093 cleared) we need to store the inverse into C. */
4094 tcg_gen_setcond_tl(TCG_COND_LT, cpu_cc_src,
4095 cpu_T[1], bound);
4096 tcg_gen_movcond_tl(TCG_COND_GT, cpu_T[1], cpu_T[1],
4097 bound, bound, cpu_T[1]);
4098 tcg_temp_free(bound);
4099 }
4100 tcg_gen_movi_tl(cpu_A0, -1);
4101 tcg_gen_shl_tl(cpu_A0, cpu_A0, cpu_T[1]);
4102 tcg_gen_andc_tl(cpu_T[0], cpu_T[0], cpu_A0);
4103 gen_op_mov_reg_T0(ot, reg);
4104 gen_op_update1_cc();
4105 set_cc_op(s, CC_OP_BMILGB + ot);
4106 break;
4107
5f1f4b17
RH
4108 case 0x3f6: /* mulx By, Gy, rdx, Ey */
4109 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
4110 || !(s->prefix & PREFIX_VEX)
4111 || s->vex_l != 0) {
4112 goto illegal_op;
4113 }
4114 ot = s->dflag == 2 ? OT_QUAD : OT_LONG;
4115 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4116 switch (ot) {
5f1f4b17 4117 default:
a4bcea3d
RH
4118 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4119 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EDX]);
4120 tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
4121 cpu_tmp2_i32, cpu_tmp3_i32);
4122 tcg_gen_extu_i32_tl(cpu_regs[s->vex_v], cpu_tmp2_i32);
4123 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp3_i32);
5f1f4b17
RH
4124 break;
4125#ifdef TARGET_X86_64
4126 case OT_QUAD:
a4bcea3d
RH
4127 tcg_gen_mulu2_i64(cpu_regs[s->vex_v], cpu_regs[reg],
4128 cpu_T[0], cpu_regs[R_EDX]);
5f1f4b17
RH
4129 break;
4130#endif
4131 }
4132 break;
4133
0592f74a
RH
4134 case 0x3f5: /* pdep Gy, By, Ey */
4135 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
4136 || !(s->prefix & PREFIX_VEX)
4137 || s->vex_l != 0) {
4138 goto illegal_op;
4139 }
4140 ot = s->dflag == 2 ? OT_QUAD : OT_LONG;
4141 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4142 /* Note that by zero-extending the mask operand, we
4143 automatically handle zero-extending the result. */
4144 if (s->dflag == 2) {
4145 tcg_gen_mov_tl(cpu_T[1], cpu_regs[s->vex_v]);
4146 } else {
4147 tcg_gen_ext32u_tl(cpu_T[1], cpu_regs[s->vex_v]);
4148 }
4149 gen_helper_pdep(cpu_regs[reg], cpu_T[0], cpu_T[1]);
4150 break;
4151
4152 case 0x2f5: /* pext Gy, By, Ey */
4153 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
4154 || !(s->prefix & PREFIX_VEX)
4155 || s->vex_l != 0) {
4156 goto illegal_op;
4157 }
4158 ot = s->dflag == 2 ? OT_QUAD : OT_LONG;
4159 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4160 /* Note that by zero-extending the mask operand, we
4161 automatically handle zero-extending the result. */
4162 if (s->dflag == 2) {
4163 tcg_gen_mov_tl(cpu_T[1], cpu_regs[s->vex_v]);
4164 } else {
4165 tcg_gen_ext32u_tl(cpu_T[1], cpu_regs[s->vex_v]);
4166 }
4167 gen_helper_pext(cpu_regs[reg], cpu_T[0], cpu_T[1]);
4168 break;
4169
cd7f97ca
RH
4170 case 0x1f6: /* adcx Gy, Ey */
4171 case 0x2f6: /* adox Gy, Ey */
4172 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_ADX)) {
4173 goto illegal_op;
4174 } else {
76f13133 4175 TCGv carry_in, carry_out, zero;
cd7f97ca
RH
4176 int end_op;
4177
4178 ot = (s->dflag == 2 ? OT_QUAD : OT_LONG);
4179 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4180
4181 /* Re-use the carry-out from a previous round. */
4182 TCGV_UNUSED(carry_in);
4183 carry_out = (b == 0x1f6 ? cpu_cc_dst : cpu_cc_src2);
4184 switch (s->cc_op) {
4185 case CC_OP_ADCX:
4186 if (b == 0x1f6) {
4187 carry_in = cpu_cc_dst;
4188 end_op = CC_OP_ADCX;
4189 } else {
4190 end_op = CC_OP_ADCOX;
4191 }
4192 break;
4193 case CC_OP_ADOX:
4194 if (b == 0x1f6) {
4195 end_op = CC_OP_ADCOX;
4196 } else {
4197 carry_in = cpu_cc_src2;
4198 end_op = CC_OP_ADOX;
4199 }
4200 break;
4201 case CC_OP_ADCOX:
4202 end_op = CC_OP_ADCOX;
4203 carry_in = carry_out;
4204 break;
4205 default:
c53de1a2 4206 end_op = (b == 0x1f6 ? CC_OP_ADCX : CC_OP_ADOX);
cd7f97ca
RH
4207 break;
4208 }
4209 /* If we can't reuse carry-out, get it out of EFLAGS. */
4210 if (TCGV_IS_UNUSED(carry_in)) {
4211 if (s->cc_op != CC_OP_ADCX && s->cc_op != CC_OP_ADOX) {
4212 gen_compute_eflags(s);
4213 }
4214 carry_in = cpu_tmp0;
4215 tcg_gen_shri_tl(carry_in, cpu_cc_src,
4216 ctz32(b == 0x1f6 ? CC_C : CC_O));
4217 tcg_gen_andi_tl(carry_in, carry_in, 1);
4218 }
4219
4220 switch (ot) {
4221#ifdef TARGET_X86_64
4222 case OT_LONG:
4223 /* If we know TL is 64-bit, and we want a 32-bit
4224 result, just do everything in 64-bit arithmetic. */
4225 tcg_gen_ext32u_i64(cpu_regs[reg], cpu_regs[reg]);
4226 tcg_gen_ext32u_i64(cpu_T[0], cpu_T[0]);
4227 tcg_gen_add_i64(cpu_T[0], cpu_T[0], cpu_regs[reg]);
4228 tcg_gen_add_i64(cpu_T[0], cpu_T[0], carry_in);
4229 tcg_gen_ext32u_i64(cpu_regs[reg], cpu_T[0]);
4230 tcg_gen_shri_i64(carry_out, cpu_T[0], 32);
4231 break;
4232#endif
4233 default:
4234 /* Otherwise compute the carry-out in two steps. */
76f13133
RH
4235 zero = tcg_const_tl(0);
4236 tcg_gen_add2_tl(cpu_T[0], carry_out,
4237 cpu_T[0], zero,
4238 carry_in, zero);
4239 tcg_gen_add2_tl(cpu_regs[reg], carry_out,
4240 cpu_regs[reg], carry_out,
4241 cpu_T[0], zero);
4242 tcg_temp_free(zero);
cd7f97ca
RH
4243 break;
4244 }
cd7f97ca
RH
4245 set_cc_op(s, end_op);
4246 }
4247 break;
4248
4a554890
RH
4249 case 0x1f7: /* shlx Gy, Ey, By */
4250 case 0x2f7: /* sarx Gy, Ey, By */
4251 case 0x3f7: /* shrx Gy, Ey, By */
4252 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
4253 || !(s->prefix & PREFIX_VEX)
4254 || s->vex_l != 0) {
4255 goto illegal_op;
4256 }
4257 ot = (s->dflag == 2 ? OT_QUAD : OT_LONG);
4258 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4259 if (ot == OT_QUAD) {
4260 tcg_gen_andi_tl(cpu_T[1], cpu_regs[s->vex_v], 63);
4261 } else {
4262 tcg_gen_andi_tl(cpu_T[1], cpu_regs[s->vex_v], 31);
4263 }
4264 if (b == 0x1f7) {
4265 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4266 } else if (b == 0x2f7) {
4267 if (ot != OT_QUAD) {
4268 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4269 }
4270 tcg_gen_sar_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4271 } else {
4272 if (ot != OT_QUAD) {
4273 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
4274 }
4275 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4276 }
4277 gen_op_mov_reg_T0(ot, reg);
4278 break;
4279
bc4b43dc
RH
4280 case 0x0f3:
4281 case 0x1f3:
4282 case 0x2f3:
4283 case 0x3f3: /* Group 17 */
4284 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
4285 || !(s->prefix & PREFIX_VEX)
4286 || s->vex_l != 0) {
4287 goto illegal_op;
4288 }
4289 ot = s->dflag == 2 ? OT_QUAD : OT_LONG;
4290 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4291
4292 switch (reg & 7) {
4293 case 1: /* blsr By,Ey */
4294 tcg_gen_neg_tl(cpu_T[1], cpu_T[0]);
4295 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4296 gen_op_mov_reg_T0(ot, s->vex_v);
4297 gen_op_update2_cc();
4298 set_cc_op(s, CC_OP_BMILGB + ot);
4299 break;
4300
4301 case 2: /* blsmsk By,Ey */
4302 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4303 tcg_gen_subi_tl(cpu_T[0], cpu_T[0], 1);
4304 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_cc_src);
4305 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4306 set_cc_op(s, CC_OP_BMILGB + ot);
4307 break;
4308
4309 case 3: /* blsi By, Ey */
4310 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4311 tcg_gen_subi_tl(cpu_T[0], cpu_T[0], 1);
4312 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_cc_src);
4313 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4314 set_cc_op(s, CC_OP_BMILGB + ot);
4315 break;
4316
4317 default:
4318 goto illegal_op;
4319 }
4320 break;
4321
111994ee
RH
4322 default:
4323 goto illegal_op;
4324 }
222a3336 4325 break;
111994ee 4326
222a3336
AZ
4327 case 0x03a:
4328 case 0x13a:
4242b1bd 4329 b = modrm;
0af10c86 4330 modrm = cpu_ldub_code(env, s->pc++);
4242b1bd
AZ
4331 rm = modrm & 7;
4332 reg = ((modrm >> 3) & 7) | rex_r;
4333 mod = (modrm >> 6) & 3;
c045af25
AK
4334 if (b1 >= 2) {
4335 goto illegal_op;
4336 }
4242b1bd 4337
d3eb5eae
BS
4338 sse_fn_eppi = sse_op_table7[b].op[b1];
4339 if (!sse_fn_eppi) {
4242b1bd 4340 goto illegal_op;
c4baa050 4341 }
222a3336
AZ
4342 if (!(s->cpuid_ext_features & sse_op_table7[b].ext_mask))
4343 goto illegal_op;
4344
d3eb5eae 4345 if (sse_fn_eppi == SSE_SPECIAL) {
222a3336
AZ
4346 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
4347 rm = (modrm & 7) | REX_B(s);
4348 if (mod != 3)
0af10c86 4349 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
222a3336 4350 reg = ((modrm >> 3) & 7) | rex_r;
0af10c86 4351 val = cpu_ldub_code(env, s->pc++);
222a3336
AZ
4352 switch (b) {
4353 case 0x14: /* pextrb */
4354 tcg_gen_ld8u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
4355 xmm_regs[reg].XMM_B(val & 15)));
4356 if (mod == 3)
4357 gen_op_mov_reg_T0(ot, rm);
4358 else
4359 tcg_gen_qemu_st8(cpu_T[0], cpu_A0,
4360 (s->mem_index >> 2) - 1);
4361 break;
4362 case 0x15: /* pextrw */
4363 tcg_gen_ld16u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
4364 xmm_regs[reg].XMM_W(val & 7)));
4365 if (mod == 3)
4366 gen_op_mov_reg_T0(ot, rm);
4367 else
4368 tcg_gen_qemu_st16(cpu_T[0], cpu_A0,
4369 (s->mem_index >> 2) - 1);
4370 break;
4371 case 0x16:
4372 if (ot == OT_LONG) { /* pextrd */
4373 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
4374 offsetof(CPUX86State,
4375 xmm_regs[reg].XMM_L(val & 3)));
a7812ae4 4376 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
222a3336 4377 if (mod == 3)
a7812ae4 4378 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
222a3336 4379 else
a7812ae4 4380 tcg_gen_qemu_st32(cpu_T[0], cpu_A0,
222a3336
AZ
4381 (s->mem_index >> 2) - 1);
4382 } else { /* pextrq */
a7812ae4 4383#ifdef TARGET_X86_64
222a3336
AZ
4384 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
4385 offsetof(CPUX86State,
4386 xmm_regs[reg].XMM_Q(val & 1)));
4387 if (mod == 3)
4388 gen_op_mov_reg_v(ot, rm, cpu_tmp1_i64);
4389 else
4390 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
4391 (s->mem_index >> 2) - 1);
a7812ae4
PB
4392#else
4393 goto illegal_op;
4394#endif
222a3336
AZ
4395 }
4396 break;
4397 case 0x17: /* extractps */
4398 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
4399 xmm_regs[reg].XMM_L(val & 3)));
4400 if (mod == 3)
4401 gen_op_mov_reg_T0(ot, rm);
4402 else
4403 tcg_gen_qemu_st32(cpu_T[0], cpu_A0,
4404 (s->mem_index >> 2) - 1);
4405 break;
4406 case 0x20: /* pinsrb */
4407 if (mod == 3)
4408 gen_op_mov_TN_reg(OT_LONG, 0, rm);
4409 else
34c6addd 4410 tcg_gen_qemu_ld8u(cpu_T[0], cpu_A0,
222a3336 4411 (s->mem_index >> 2) - 1);
34c6addd 4412 tcg_gen_st8_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
222a3336
AZ
4413 xmm_regs[reg].XMM_B(val & 15)));
4414 break;
4415 case 0x21: /* insertps */
a7812ae4 4416 if (mod == 3) {
222a3336
AZ
4417 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
4418 offsetof(CPUX86State,xmm_regs[rm]
4419 .XMM_L((val >> 6) & 3)));
a7812ae4
PB
4420 } else {
4421 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0,
222a3336 4422 (s->mem_index >> 2) - 1);
a7812ae4
PB
4423 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0);
4424 }
222a3336
AZ
4425 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
4426 offsetof(CPUX86State,xmm_regs[reg]
4427 .XMM_L((val >> 4) & 3)));
4428 if ((val >> 0) & 1)
4429 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4430 cpu_env, offsetof(CPUX86State,
4431 xmm_regs[reg].XMM_L(0)));
4432 if ((val >> 1) & 1)
4433 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4434 cpu_env, offsetof(CPUX86State,
4435 xmm_regs[reg].XMM_L(1)));
4436 if ((val >> 2) & 1)
4437 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4438 cpu_env, offsetof(CPUX86State,
4439 xmm_regs[reg].XMM_L(2)));
4440 if ((val >> 3) & 1)
4441 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4442 cpu_env, offsetof(CPUX86State,
4443 xmm_regs[reg].XMM_L(3)));
4444 break;
4445 case 0x22:
4446 if (ot == OT_LONG) { /* pinsrd */
4447 if (mod == 3)
a7812ae4 4448 gen_op_mov_v_reg(ot, cpu_tmp0, rm);
222a3336 4449 else
a7812ae4 4450 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0,
222a3336 4451 (s->mem_index >> 2) - 1);
a7812ae4 4452 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0);
222a3336
AZ
4453 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
4454 offsetof(CPUX86State,
4455 xmm_regs[reg].XMM_L(val & 3)));
4456 } else { /* pinsrq */
a7812ae4 4457#ifdef TARGET_X86_64
222a3336
AZ
4458 if (mod == 3)
4459 gen_op_mov_v_reg(ot, cpu_tmp1_i64, rm);
4460 else
4461 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
4462 (s->mem_index >> 2) - 1);
4463 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
4464 offsetof(CPUX86State,
4465 xmm_regs[reg].XMM_Q(val & 1)));
a7812ae4
PB
4466#else
4467 goto illegal_op;
4468#endif
222a3336
AZ
4469 }
4470 break;
4471 }
4472 return;
4473 }
4242b1bd
AZ
4474
4475 if (b1) {
4476 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
4477 if (mod == 3) {
4478 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
4479 } else {
4480 op2_offset = offsetof(CPUX86State,xmm_t0);
0af10c86 4481 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4242b1bd
AZ
4482 gen_ldo_env_A0(s->mem_index, op2_offset);
4483 }
4484 } else {
4485 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
4486 if (mod == 3) {
4487 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
4488 } else {
4489 op2_offset = offsetof(CPUX86State,mmx_t0);
0af10c86 4490 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4242b1bd
AZ
4491 gen_ldq_env_A0(s->mem_index, op2_offset);
4492 }
4493 }
0af10c86 4494 val = cpu_ldub_code(env, s->pc++);
4242b1bd 4495
222a3336 4496 if ((b & 0xfc) == 0x60) { /* pcmpXstrX */
3ca51d07 4497 set_cc_op(s, CC_OP_EFLAGS);
222a3336
AZ
4498
4499 if (s->dflag == 2)
4500 /* The helper must use entire 64-bit gp registers */
4501 val |= 1 << 8;
4502 }
4503
4242b1bd
AZ
4504 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4505 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
d3eb5eae 4506 sse_fn_eppi(cpu_env, cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
4242b1bd 4507 break;
e2c3c2c5
RH
4508
4509 case 0x33a:
4510 /* Various integer extensions at 0f 3a f[0-f]. */
4511 b = modrm | (b1 << 8);
4512 modrm = cpu_ldub_code(env, s->pc++);
4513 reg = ((modrm >> 3) & 7) | rex_r;
4514
4515 switch (b) {
4516 case 0x3f0: /* rorx Gy,Ey, Ib */
4517 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
4518 || !(s->prefix & PREFIX_VEX)
4519 || s->vex_l != 0) {
4520 goto illegal_op;
4521 }
4522 ot = s->dflag == 2 ? OT_QUAD : OT_LONG;
4523 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4524 b = cpu_ldub_code(env, s->pc++);
4525 if (ot == OT_QUAD) {
4526 tcg_gen_rotri_tl(cpu_T[0], cpu_T[0], b & 63);
4527 } else {
4528 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4529 tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, b & 31);
4530 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
4531 }
4532 gen_op_mov_reg_T0(ot, reg);
4533 break;
4534
4535 default:
4536 goto illegal_op;
4537 }
4538 break;
4539
664e0f19
FB
4540 default:
4541 goto illegal_op;
4542 }
4543 } else {
4544 /* generic MMX or SSE operation */
d1e42c5c 4545 switch(b) {
d1e42c5c
FB
4546 case 0x70: /* pshufx insn */
4547 case 0xc6: /* pshufx insn */
4548 case 0xc2: /* compare insns */
4549 s->rip_offset = 1;
4550 break;
4551 default:
4552 break;
664e0f19
FB
4553 }
4554 if (is_xmm) {
4555 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
4556 if (mod != 3) {
0af10c86 4557 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
664e0f19 4558 op2_offset = offsetof(CPUX86State,xmm_t0);
480c1cdb 4559 if (b1 >= 2 && ((b >= 0x50 && b <= 0x5f && b != 0x5b) ||
664e0f19
FB
4560 b == 0xc2)) {
4561 /* specific case for SSE single instructions */
4562 if (b1 == 2) {
4563 /* 32 bit access */
57fec1fe 4564 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
651ba608 4565 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
664e0f19
FB
4566 } else {
4567 /* 64 bit access */
8686c490 4568 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_t0.XMM_D(0)));
664e0f19
FB
4569 }
4570 } else {
8686c490 4571 gen_ldo_env_A0(s->mem_index, op2_offset);
664e0f19
FB
4572 }
4573 } else {
4574 rm = (modrm & 7) | REX_B(s);
4575 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
4576 }
4577 } else {
4578 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
4579 if (mod != 3) {
0af10c86 4580 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
664e0f19 4581 op2_offset = offsetof(CPUX86State,mmx_t0);
8686c490 4582 gen_ldq_env_A0(s->mem_index, op2_offset);
664e0f19
FB
4583 } else {
4584 rm = (modrm & 7);
4585 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
4586 }
4587 }
4588 switch(b) {
a35f3ec7 4589 case 0x0f: /* 3DNow! data insns */
e771edab
AJ
4590 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
4591 goto illegal_op;
0af10c86 4592 val = cpu_ldub_code(env, s->pc++);
d3eb5eae
BS
4593 sse_fn_epp = sse_op_table5[val];
4594 if (!sse_fn_epp) {
a35f3ec7 4595 goto illegal_op;
c4baa050 4596 }
5af45186
FB
4597 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4598 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
d3eb5eae 4599 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
a35f3ec7 4600 break;
664e0f19
FB
4601 case 0x70: /* pshufx insn */
4602 case 0xc6: /* pshufx insn */
0af10c86 4603 val = cpu_ldub_code(env, s->pc++);
5af45186
FB
4604 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4605 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
c4baa050 4606 /* XXX: introduce a new table? */
d3eb5eae 4607 sse_fn_ppi = (SSEFunc_0_ppi)sse_fn_epp;
c4baa050 4608 sse_fn_ppi(cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
664e0f19
FB
4609 break;
4610 case 0xc2:
4611 /* compare insns */
0af10c86 4612 val = cpu_ldub_code(env, s->pc++);
664e0f19
FB
4613 if (val >= 8)
4614 goto illegal_op;
d3eb5eae 4615 sse_fn_epp = sse_op_table4[val][b1];
c4baa050 4616
5af45186
FB
4617 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4618 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
d3eb5eae 4619 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19 4620 break;
b8b6a50b
FB
4621 case 0xf7:
4622 /* maskmov : we must prepare A0 */
4623 if (mod != 3)
4624 goto illegal_op;
4625#ifdef TARGET_X86_64
4626 if (s->aflag == 2) {
4627 gen_op_movq_A0_reg(R_EDI);
4628 } else
4629#endif
4630 {
4631 gen_op_movl_A0_reg(R_EDI);
4632 if (s->aflag == 0)
4633 gen_op_andl_A0_ffff();
4634 }
4635 gen_add_A0_ds_seg(s);
4636
4637 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4638 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
c4baa050 4639 /* XXX: introduce a new table? */
d3eb5eae
BS
4640 sse_fn_eppt = (SSEFunc_0_eppt)sse_fn_epp;
4641 sse_fn_eppt(cpu_env, cpu_ptr0, cpu_ptr1, cpu_A0);
b8b6a50b 4642 break;
664e0f19 4643 default:
5af45186
FB
4644 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4645 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
d3eb5eae 4646 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
4647 break;
4648 }
4649 if (b == 0x2e || b == 0x2f) {
3ca51d07 4650 set_cc_op(s, CC_OP_EFLAGS);
664e0f19
FB
4651 }
4652 }
4653}
4654
2c0262af
FB
4655/* convert one instruction. s->is_jmp is set if the translation must
4656 be stopped. Return the next pc value */
0af10c86
BS
4657static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
4658 target_ulong pc_start)
2c0262af
FB
4659{
4660 int b, prefixes, aflag, dflag;
4661 int shift, ot;
4662 int modrm, reg, rm, mod, reg_addr, op, opreg, offset_addr, val;
14ce26e7
FB
4663 target_ulong next_eip, tval;
4664 int rex_w, rex_r;
2c0262af 4665
fdefe51c 4666 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
70cff25e 4667 tcg_gen_debug_insn_start(pc_start);
fdefe51c 4668 }
2c0262af
FB
4669 s->pc = pc_start;
4670 prefixes = 0;
4671 aflag = s->code32;
4672 dflag = s->code32;
4673 s->override = -1;
14ce26e7
FB
4674 rex_w = -1;
4675 rex_r = 0;
4676#ifdef TARGET_X86_64
4677 s->rex_x = 0;
4678 s->rex_b = 0;
5fafdf24 4679 x86_64_hregs = 0;
14ce26e7
FB
4680#endif
4681 s->rip_offset = 0; /* for relative ip address */
701ed211
RH
4682 s->vex_l = 0;
4683 s->vex_v = 0;
2c0262af 4684 next_byte:
0af10c86 4685 b = cpu_ldub_code(env, s->pc);
2c0262af 4686 s->pc++;
4a6fd938
RH
4687 /* Collect prefixes. */
4688 switch (b) {
4689 case 0xf3:
4690 prefixes |= PREFIX_REPZ;
4691 goto next_byte;
4692 case 0xf2:
4693 prefixes |= PREFIX_REPNZ;
4694 goto next_byte;
4695 case 0xf0:
4696 prefixes |= PREFIX_LOCK;
4697 goto next_byte;
4698 case 0x2e:
4699 s->override = R_CS;
4700 goto next_byte;
4701 case 0x36:
4702 s->override = R_SS;
4703 goto next_byte;
4704 case 0x3e:
4705 s->override = R_DS;
4706 goto next_byte;
4707 case 0x26:
4708 s->override = R_ES;
4709 goto next_byte;
4710 case 0x64:
4711 s->override = R_FS;
4712 goto next_byte;
4713 case 0x65:
4714 s->override = R_GS;
4715 goto next_byte;
4716 case 0x66:
4717 prefixes |= PREFIX_DATA;
4718 goto next_byte;
4719 case 0x67:
4720 prefixes |= PREFIX_ADR;
4721 goto next_byte;
14ce26e7 4722#ifdef TARGET_X86_64
4a6fd938
RH
4723 case 0x40 ... 0x4f:
4724 if (CODE64(s)) {
14ce26e7
FB
4725 /* REX prefix */
4726 rex_w = (b >> 3) & 1;
4727 rex_r = (b & 0x4) << 1;
4728 s->rex_x = (b & 0x2) << 2;
4729 REX_B(s) = (b & 0x1) << 3;
4730 x86_64_hregs = 1; /* select uniform byte register addressing */
4731 goto next_byte;
4732 }
4a6fd938
RH
4733 break;
4734#endif
701ed211
RH
4735 case 0xc5: /* 2-byte VEX */
4736 case 0xc4: /* 3-byte VEX */
4737 /* VEX prefixes cannot be used except in 32-bit mode.
4738 Otherwise the instruction is LES or LDS. */
4739 if (s->code32 && !s->vm86) {
4740 static const int pp_prefix[4] = {
4741 0, PREFIX_DATA, PREFIX_REPZ, PREFIX_REPNZ
4742 };
4743 int vex3, vex2 = cpu_ldub_code(env, s->pc);
4744
4745 if (!CODE64(s) && (vex2 & 0xc0) != 0xc0) {
4746 /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
4747 otherwise the instruction is LES or LDS. */
4748 break;
4749 }
4750 s->pc++;
4751
085d8134 4752 /* 4.1.1-4.1.3: No preceding lock, 66, f2, f3, or rex prefixes. */
701ed211
RH
4753 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ
4754 | PREFIX_LOCK | PREFIX_DATA)) {
4755 goto illegal_op;
4756 }
4757#ifdef TARGET_X86_64
4758 if (x86_64_hregs) {
4759 goto illegal_op;
4760 }
4761#endif
4762 rex_r = (~vex2 >> 4) & 8;
4763 if (b == 0xc5) {
4764 vex3 = vex2;
4765 b = cpu_ldub_code(env, s->pc++);
4766 } else {
4767#ifdef TARGET_X86_64
4768 s->rex_x = (~vex2 >> 3) & 8;
4769 s->rex_b = (~vex2 >> 2) & 8;
4770#endif
4771 vex3 = cpu_ldub_code(env, s->pc++);
4772 rex_w = (vex3 >> 7) & 1;
4773 switch (vex2 & 0x1f) {
4774 case 0x01: /* Implied 0f leading opcode bytes. */
4775 b = cpu_ldub_code(env, s->pc++) | 0x100;
4776 break;
4777 case 0x02: /* Implied 0f 38 leading opcode bytes. */
4778 b = 0x138;
4779 break;
4780 case 0x03: /* Implied 0f 3a leading opcode bytes. */
4781 b = 0x13a;
4782 break;
4783 default: /* Reserved for future use. */
4784 goto illegal_op;
4785 }
4786 }
4787 s->vex_v = (~vex3 >> 3) & 0xf;
4788 s->vex_l = (vex3 >> 2) & 1;
4789 prefixes |= pp_prefix[vex3 & 3] | PREFIX_VEX;
4790 }
4791 break;
4a6fd938
RH
4792 }
4793
4794 /* Post-process prefixes. */
4795 if (prefixes & PREFIX_DATA) {
4796 dflag ^= 1;
4797 }
4798 if (prefixes & PREFIX_ADR) {
4799 aflag ^= 1;
4800 }
4801#ifdef TARGET_X86_64
4802 if (CODE64(s)) {
14ce26e7
FB
4803 if (rex_w == 1) {
4804 /* 0x66 is ignored if rex.w is set */
4805 dflag = 2;
14ce26e7 4806 }
4a6fd938 4807 if (!(prefixes & PREFIX_ADR)) {
14ce26e7 4808 aflag = 2;
14ce26e7 4809 }
2c0262af 4810 }
4a6fd938 4811#endif
2c0262af 4812
2c0262af
FB
4813 s->prefix = prefixes;
4814 s->aflag = aflag;
4815 s->dflag = dflag;
4816
4817 /* lock generation */
4818 if (prefixes & PREFIX_LOCK)
a7812ae4 4819 gen_helper_lock();
2c0262af
FB
4820
4821 /* now check op code */
4822 reswitch:
4823 switch(b) {
4824 case 0x0f:
4825 /**************************/
4826 /* extended op code */
0af10c86 4827 b = cpu_ldub_code(env, s->pc++) | 0x100;
2c0262af 4828 goto reswitch;
3b46e624 4829
2c0262af
FB
4830 /**************************/
4831 /* arith & logic */
4832 case 0x00 ... 0x05:
4833 case 0x08 ... 0x0d:
4834 case 0x10 ... 0x15:
4835 case 0x18 ... 0x1d:
4836 case 0x20 ... 0x25:
4837 case 0x28 ... 0x2d:
4838 case 0x30 ... 0x35:
4839 case 0x38 ... 0x3d:
4840 {
4841 int op, f, val;
4842 op = (b >> 3) & 7;
4843 f = (b >> 1) & 3;
4844
4845 if ((b & 1) == 0)
4846 ot = OT_BYTE;
4847 else
14ce26e7 4848 ot = dflag + OT_WORD;
3b46e624 4849
2c0262af
FB
4850 switch(f) {
4851 case 0: /* OP Ev, Gv */
0af10c86 4852 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 4853 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af 4854 mod = (modrm >> 6) & 3;
14ce26e7 4855 rm = (modrm & 7) | REX_B(s);
2c0262af 4856 if (mod != 3) {
0af10c86 4857 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
2c0262af
FB
4858 opreg = OR_TMP0;
4859 } else if (op == OP_XORL && rm == reg) {
4860 xor_zero:
4861 /* xor reg, reg optimisation */
436ff2d2 4862 set_cc_op(s, CC_OP_CLR);
2c0262af 4863 gen_op_movl_T0_0();
57fec1fe 4864 gen_op_mov_reg_T0(ot, reg);
2c0262af
FB
4865 break;
4866 } else {
4867 opreg = rm;
4868 }
57fec1fe 4869 gen_op_mov_TN_reg(ot, 1, reg);
2c0262af
FB
4870 gen_op(s, op, ot, opreg);
4871 break;
4872 case 1: /* OP Gv, Ev */
0af10c86 4873 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 4874 mod = (modrm >> 6) & 3;
14ce26e7
FB
4875 reg = ((modrm >> 3) & 7) | rex_r;
4876 rm = (modrm & 7) | REX_B(s);
2c0262af 4877 if (mod != 3) {
0af10c86 4878 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
57fec1fe 4879 gen_op_ld_T1_A0(ot + s->mem_index);
2c0262af
FB
4880 } else if (op == OP_XORL && rm == reg) {
4881 goto xor_zero;
4882 } else {
57fec1fe 4883 gen_op_mov_TN_reg(ot, 1, rm);
2c0262af
FB
4884 }
4885 gen_op(s, op, ot, reg);
4886 break;
4887 case 2: /* OP A, Iv */
0af10c86 4888 val = insn_get(env, s, ot);
2c0262af
FB
4889 gen_op_movl_T1_im(val);
4890 gen_op(s, op, ot, OR_EAX);
4891 break;
4892 }
4893 }
4894 break;
4895
ec9d6075
FB
4896 case 0x82:
4897 if (CODE64(s))
4898 goto illegal_op;
2c0262af
FB
4899 case 0x80: /* GRP1 */
4900 case 0x81:
4901 case 0x83:
4902 {
4903 int val;
4904
4905 if ((b & 1) == 0)
4906 ot = OT_BYTE;
4907 else
14ce26e7 4908 ot = dflag + OT_WORD;
3b46e624 4909
0af10c86 4910 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 4911 mod = (modrm >> 6) & 3;
14ce26e7 4912 rm = (modrm & 7) | REX_B(s);
2c0262af 4913 op = (modrm >> 3) & 7;
3b46e624 4914
2c0262af 4915 if (mod != 3) {
14ce26e7
FB
4916 if (b == 0x83)
4917 s->rip_offset = 1;
4918 else
4919 s->rip_offset = insn_const_size(ot);
0af10c86 4920 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
2c0262af
FB
4921 opreg = OR_TMP0;
4922 } else {
14ce26e7 4923 opreg = rm;
2c0262af
FB
4924 }
4925
4926 switch(b) {
4927 default:
4928 case 0x80:
4929 case 0x81:
d64477af 4930 case 0x82:
0af10c86 4931 val = insn_get(env, s, ot);
2c0262af
FB
4932 break;
4933 case 0x83:
0af10c86 4934 val = (int8_t)insn_get(env, s, OT_BYTE);
2c0262af
FB
4935 break;
4936 }
4937 gen_op_movl_T1_im(val);
4938 gen_op(s, op, ot, opreg);
4939 }
4940 break;
4941
4942 /**************************/
4943 /* inc, dec, and other misc arith */
4944 case 0x40 ... 0x47: /* inc Gv */
4945 ot = dflag ? OT_LONG : OT_WORD;
4946 gen_inc(s, ot, OR_EAX + (b & 7), 1);
4947 break;
4948 case 0x48 ... 0x4f: /* dec Gv */
4949 ot = dflag ? OT_LONG : OT_WORD;
4950 gen_inc(s, ot, OR_EAX + (b & 7), -1);
4951 break;
4952 case 0xf6: /* GRP3 */
4953 case 0xf7:
4954 if ((b & 1) == 0)
4955 ot = OT_BYTE;
4956 else
14ce26e7 4957 ot = dflag + OT_WORD;
2c0262af 4958
0af10c86 4959 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 4960 mod = (modrm >> 6) & 3;
14ce26e7 4961 rm = (modrm & 7) | REX_B(s);
2c0262af
FB
4962 op = (modrm >> 3) & 7;
4963 if (mod != 3) {
14ce26e7
FB
4964 if (op == 0)
4965 s->rip_offset = insn_const_size(ot);
0af10c86 4966 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
57fec1fe 4967 gen_op_ld_T0_A0(ot + s->mem_index);
2c0262af 4968 } else {
57fec1fe 4969 gen_op_mov_TN_reg(ot, 0, rm);
2c0262af
FB
4970 }
4971
4972 switch(op) {
4973 case 0: /* test */
0af10c86 4974 val = insn_get(env, s, ot);
2c0262af
FB
4975 gen_op_movl_T1_im(val);
4976 gen_op_testl_T0_T1_cc();
3ca51d07 4977 set_cc_op(s, CC_OP_LOGICB + ot);
2c0262af
FB
4978 break;
4979 case 2: /* not */
b6abf97d 4980 tcg_gen_not_tl(cpu_T[0], cpu_T[0]);
2c0262af 4981 if (mod != 3) {
57fec1fe 4982 gen_op_st_T0_A0(ot + s->mem_index);
2c0262af 4983 } else {
57fec1fe 4984 gen_op_mov_reg_T0(ot, rm);
2c0262af
FB
4985 }
4986 break;
4987 case 3: /* neg */
b6abf97d 4988 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
2c0262af 4989 if (mod != 3) {
57fec1fe 4990 gen_op_st_T0_A0(ot + s->mem_index);
2c0262af 4991 } else {
57fec1fe 4992 gen_op_mov_reg_T0(ot, rm);
2c0262af
FB
4993 }
4994 gen_op_update_neg_cc();
3ca51d07 4995 set_cc_op(s, CC_OP_SUBB + ot);
2c0262af
FB
4996 break;
4997 case 4: /* mul */
4998 switch(ot) {
4999 case OT_BYTE:
0211e5af
FB
5000 gen_op_mov_TN_reg(OT_BYTE, 1, R_EAX);
5001 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
5002 tcg_gen_ext8u_tl(cpu_T[1], cpu_T[1]);
5003 /* XXX: use 32 bit mul which could be faster */
5004 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
5005 gen_op_mov_reg_T0(OT_WORD, R_EAX);
5006 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
5007 tcg_gen_andi_tl(cpu_cc_src, cpu_T[0], 0xff00);
3ca51d07 5008 set_cc_op(s, CC_OP_MULB);
2c0262af
FB
5009 break;
5010 case OT_WORD:
0211e5af
FB
5011 gen_op_mov_TN_reg(OT_WORD, 1, R_EAX);
5012 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
5013 tcg_gen_ext16u_tl(cpu_T[1], cpu_T[1]);
5014 /* XXX: use 32 bit mul which could be faster */
5015 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
5016 gen_op_mov_reg_T0(OT_WORD, R_EAX);
5017 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
5018 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
5019 gen_op_mov_reg_T0(OT_WORD, R_EDX);
5020 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
3ca51d07 5021 set_cc_op(s, CC_OP_MULW);
2c0262af
FB
5022 break;
5023 default:
5024 case OT_LONG:
a4bcea3d
RH
5025 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5026 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]);
5027 tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
5028 cpu_tmp2_i32, cpu_tmp3_i32);
5029 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32);
5030 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32);
5031 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
5032 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
3ca51d07 5033 set_cc_op(s, CC_OP_MULL);
2c0262af 5034 break;
14ce26e7
FB
5035#ifdef TARGET_X86_64
5036 case OT_QUAD:
a4bcea3d
RH
5037 tcg_gen_mulu2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
5038 cpu_T[0], cpu_regs[R_EAX]);
5039 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
5040 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
3ca51d07 5041 set_cc_op(s, CC_OP_MULQ);
14ce26e7
FB
5042 break;
5043#endif
2c0262af 5044 }
2c0262af
FB
5045 break;
5046 case 5: /* imul */
5047 switch(ot) {
5048 case OT_BYTE:
0211e5af
FB
5049 gen_op_mov_TN_reg(OT_BYTE, 1, R_EAX);
5050 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
5051 tcg_gen_ext8s_tl(cpu_T[1], cpu_T[1]);
5052 /* XXX: use 32 bit mul which could be faster */
5053 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
5054 gen_op_mov_reg_T0(OT_WORD, R_EAX);
5055 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
5056 tcg_gen_ext8s_tl(cpu_tmp0, cpu_T[0]);
5057 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
3ca51d07 5058 set_cc_op(s, CC_OP_MULB);
2c0262af
FB
5059 break;
5060 case OT_WORD:
0211e5af
FB
5061 gen_op_mov_TN_reg(OT_WORD, 1, R_EAX);
5062 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5063 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
5064 /* XXX: use 32 bit mul which could be faster */
5065 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
5066 gen_op_mov_reg_T0(OT_WORD, R_EAX);
5067 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
5068 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
5069 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
5070 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
5071 gen_op_mov_reg_T0(OT_WORD, R_EDX);
3ca51d07 5072 set_cc_op(s, CC_OP_MULW);
2c0262af
FB
5073 break;
5074 default:
5075 case OT_LONG:
a4bcea3d
RH
5076 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5077 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]);
5078 tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
5079 cpu_tmp2_i32, cpu_tmp3_i32);
5080 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32);
5081 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32);
5082 tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31);
5083 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
5084 tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
5085 tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32);
3ca51d07 5086 set_cc_op(s, CC_OP_MULL);
2c0262af 5087 break;
14ce26e7
FB
5088#ifdef TARGET_X86_64
5089 case OT_QUAD:
a4bcea3d
RH
5090 tcg_gen_muls2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
5091 cpu_T[0], cpu_regs[R_EAX]);
5092 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
5093 tcg_gen_sari_tl(cpu_cc_src, cpu_regs[R_EAX], 63);
5094 tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_regs[R_EDX]);
3ca51d07 5095 set_cc_op(s, CC_OP_MULQ);
14ce26e7
FB
5096 break;
5097#endif
2c0262af 5098 }
2c0262af
FB
5099 break;
5100 case 6: /* div */
5101 switch(ot) {
5102 case OT_BYTE:
14ce26e7 5103 gen_jmp_im(pc_start - s->cs_base);
7923057b 5104 gen_helper_divb_AL(cpu_env, cpu_T[0]);
2c0262af
FB
5105 break;
5106 case OT_WORD:
14ce26e7 5107 gen_jmp_im(pc_start - s->cs_base);
7923057b 5108 gen_helper_divw_AX(cpu_env, cpu_T[0]);
2c0262af
FB
5109 break;
5110 default:
5111 case OT_LONG:
14ce26e7 5112 gen_jmp_im(pc_start - s->cs_base);
7923057b 5113 gen_helper_divl_EAX(cpu_env, cpu_T[0]);
14ce26e7
FB
5114 break;
5115#ifdef TARGET_X86_64
5116 case OT_QUAD:
5117 gen_jmp_im(pc_start - s->cs_base);
7923057b 5118 gen_helper_divq_EAX(cpu_env, cpu_T[0]);
2c0262af 5119 break;
14ce26e7 5120#endif
2c0262af
FB
5121 }
5122 break;
5123 case 7: /* idiv */
5124 switch(ot) {
5125 case OT_BYTE:
14ce26e7 5126 gen_jmp_im(pc_start - s->cs_base);
7923057b 5127 gen_helper_idivb_AL(cpu_env, cpu_T[0]);
2c0262af
FB
5128 break;
5129 case OT_WORD:
14ce26e7 5130 gen_jmp_im(pc_start - s->cs_base);
7923057b 5131 gen_helper_idivw_AX(cpu_env, cpu_T[0]);
2c0262af
FB
5132 break;
5133 default:
5134 case OT_LONG:
14ce26e7 5135 gen_jmp_im(pc_start - s->cs_base);
7923057b 5136 gen_helper_idivl_EAX(cpu_env, cpu_T[0]);
14ce26e7
FB
5137 break;
5138#ifdef TARGET_X86_64
5139 case OT_QUAD:
5140 gen_jmp_im(pc_start - s->cs_base);
7923057b 5141 gen_helper_idivq_EAX(cpu_env, cpu_T[0]);
2c0262af 5142 break;
14ce26e7 5143#endif
2c0262af
FB
5144 }
5145 break;
5146 default:
5147 goto illegal_op;
5148 }
5149 break;
5150
5151 case 0xfe: /* GRP4 */
5152 case 0xff: /* GRP5 */
5153 if ((b & 1) == 0)
5154 ot = OT_BYTE;
5155 else
14ce26e7 5156 ot = dflag + OT_WORD;
2c0262af 5157
0af10c86 5158 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 5159 mod = (modrm >> 6) & 3;
14ce26e7 5160 rm = (modrm & 7) | REX_B(s);
2c0262af
FB
5161 op = (modrm >> 3) & 7;
5162 if (op >= 2 && b == 0xfe) {
5163 goto illegal_op;
5164 }
14ce26e7 5165 if (CODE64(s)) {
aba9d61e 5166 if (op == 2 || op == 4) {
14ce26e7
FB
5167 /* operand size for jumps is 64 bit */
5168 ot = OT_QUAD;
aba9d61e 5169 } else if (op == 3 || op == 5) {
41b1e61f 5170 ot = dflag ? OT_LONG + (rex_w == 1) : OT_WORD;
14ce26e7
FB
5171 } else if (op == 6) {
5172 /* default push size is 64 bit */
5173 ot = dflag ? OT_QUAD : OT_WORD;
5174 }
5175 }
2c0262af 5176 if (mod != 3) {
0af10c86 5177 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
2c0262af 5178 if (op >= 2 && op != 3 && op != 5)
57fec1fe 5179 gen_op_ld_T0_A0(ot + s->mem_index);
2c0262af 5180 } else {
57fec1fe 5181 gen_op_mov_TN_reg(ot, 0, rm);
2c0262af
FB
5182 }
5183
5184 switch(op) {
5185 case 0: /* inc Ev */
5186 if (mod != 3)
5187 opreg = OR_TMP0;
5188 else
5189 opreg = rm;
5190 gen_inc(s, ot, opreg, 1);
5191 break;
5192 case 1: /* dec Ev */
5193 if (mod != 3)
5194 opreg = OR_TMP0;
5195 else
5196 opreg = rm;
5197 gen_inc(s, ot, opreg, -1);
5198 break;
5199 case 2: /* call Ev */
4f31916f 5200 /* XXX: optimize if memory (no 'and' is necessary) */
2c0262af
FB
5201 if (s->dflag == 0)
5202 gen_op_andl_T0_ffff();
2c0262af 5203 next_eip = s->pc - s->cs_base;
1ef38687 5204 gen_movtl_T1_im(next_eip);
4f31916f
FB
5205 gen_push_T1(s);
5206 gen_op_jmp_T0();
2c0262af
FB
5207 gen_eob(s);
5208 break;
61382a50 5209 case 3: /* lcall Ev */
57fec1fe 5210 gen_op_ld_T1_A0(ot + s->mem_index);
aba9d61e 5211 gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
57fec1fe 5212 gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
2c0262af
FB
5213 do_lcall:
5214 if (s->pe && !s->vm86) {
773cdfcc 5215 gen_update_cc_op(s);
14ce26e7 5216 gen_jmp_im(pc_start - s->cs_base);
b6abf97d 5217 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2999a0b2
BS
5218 gen_helper_lcall_protected(cpu_env, cpu_tmp2_i32, cpu_T[1],
5219 tcg_const_i32(dflag),
a7812ae4 5220 tcg_const_i32(s->pc - pc_start));
2c0262af 5221 } else {
b6abf97d 5222 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2999a0b2
BS
5223 gen_helper_lcall_real(cpu_env, cpu_tmp2_i32, cpu_T[1],
5224 tcg_const_i32(dflag),
a7812ae4 5225 tcg_const_i32(s->pc - s->cs_base));
2c0262af
FB
5226 }
5227 gen_eob(s);
5228 break;
5229 case 4: /* jmp Ev */
5230 if (s->dflag == 0)
5231 gen_op_andl_T0_ffff();
5232 gen_op_jmp_T0();
5233 gen_eob(s);
5234 break;
5235 case 5: /* ljmp Ev */
57fec1fe 5236 gen_op_ld_T1_A0(ot + s->mem_index);
aba9d61e 5237 gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
57fec1fe 5238 gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
2c0262af
FB
5239 do_ljmp:
5240 if (s->pe && !s->vm86) {
773cdfcc 5241 gen_update_cc_op(s);
14ce26e7 5242 gen_jmp_im(pc_start - s->cs_base);
b6abf97d 5243 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2999a0b2 5244 gen_helper_ljmp_protected(cpu_env, cpu_tmp2_i32, cpu_T[1],
a7812ae4 5245 tcg_const_i32(s->pc - pc_start));
2c0262af 5246 } else {
3bd7da9e 5247 gen_op_movl_seg_T0_vm(R_CS);
2c0262af
FB
5248 gen_op_movl_T0_T1();
5249 gen_op_jmp_T0();
5250 }
5251 gen_eob(s);
5252 break;
5253 case 6: /* push Ev */
5254 gen_push_T0(s);
5255 break;
5256 default:
5257 goto illegal_op;
5258 }
5259 break;
5260
5261 case 0x84: /* test Ev, Gv */
5fafdf24 5262 case 0x85:
2c0262af
FB
5263 if ((b & 1) == 0)
5264 ot = OT_BYTE;
5265 else
14ce26e7 5266 ot = dflag + OT_WORD;
2c0262af 5267
0af10c86 5268 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 5269 reg = ((modrm >> 3) & 7) | rex_r;
3b46e624 5270
0af10c86 5271 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
57fec1fe 5272 gen_op_mov_TN_reg(ot, 1, reg);
2c0262af 5273 gen_op_testl_T0_T1_cc();
3ca51d07 5274 set_cc_op(s, CC_OP_LOGICB + ot);
2c0262af 5275 break;
3b46e624 5276
2c0262af
FB
5277 case 0xa8: /* test eAX, Iv */
5278 case 0xa9:
5279 if ((b & 1) == 0)
5280 ot = OT_BYTE;
5281 else
14ce26e7 5282 ot = dflag + OT_WORD;
0af10c86 5283 val = insn_get(env, s, ot);
2c0262af 5284
57fec1fe 5285 gen_op_mov_TN_reg(ot, 0, OR_EAX);
2c0262af
FB
5286 gen_op_movl_T1_im(val);
5287 gen_op_testl_T0_T1_cc();
3ca51d07 5288 set_cc_op(s, CC_OP_LOGICB + ot);
2c0262af 5289 break;
3b46e624 5290
2c0262af 5291 case 0x98: /* CWDE/CBW */
14ce26e7
FB
5292#ifdef TARGET_X86_64
5293 if (dflag == 2) {
e108dd01
FB
5294 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
5295 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
5296 gen_op_mov_reg_T0(OT_QUAD, R_EAX);
14ce26e7
FB
5297 } else
5298#endif
e108dd01
FB
5299 if (dflag == 1) {
5300 gen_op_mov_TN_reg(OT_WORD, 0, R_EAX);
5301 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5302 gen_op_mov_reg_T0(OT_LONG, R_EAX);
5303 } else {
5304 gen_op_mov_TN_reg(OT_BYTE, 0, R_EAX);
5305 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
5306 gen_op_mov_reg_T0(OT_WORD, R_EAX);
5307 }
2c0262af
FB
5308 break;
5309 case 0x99: /* CDQ/CWD */
14ce26e7
FB
5310#ifdef TARGET_X86_64
5311 if (dflag == 2) {
e108dd01
FB
5312 gen_op_mov_TN_reg(OT_QUAD, 0, R_EAX);
5313 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 63);
5314 gen_op_mov_reg_T0(OT_QUAD, R_EDX);
14ce26e7
FB
5315 } else
5316#endif
e108dd01
FB
5317 if (dflag == 1) {
5318 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
5319 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
5320 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 31);
5321 gen_op_mov_reg_T0(OT_LONG, R_EDX);
5322 } else {
5323 gen_op_mov_TN_reg(OT_WORD, 0, R_EAX);
5324 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5325 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 15);
5326 gen_op_mov_reg_T0(OT_WORD, R_EDX);
5327 }
2c0262af
FB
5328 break;
5329 case 0x1af: /* imul Gv, Ev */
5330 case 0x69: /* imul Gv, Ev, I */
5331 case 0x6b:
14ce26e7 5332 ot = dflag + OT_WORD;
0af10c86 5333 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7
FB
5334 reg = ((modrm >> 3) & 7) | rex_r;
5335 if (b == 0x69)
5336 s->rip_offset = insn_const_size(ot);
5337 else if (b == 0x6b)
5338 s->rip_offset = 1;
0af10c86 5339 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
2c0262af 5340 if (b == 0x69) {
0af10c86 5341 val = insn_get(env, s, ot);
2c0262af
FB
5342 gen_op_movl_T1_im(val);
5343 } else if (b == 0x6b) {
0af10c86 5344 val = (int8_t)insn_get(env, s, OT_BYTE);
2c0262af
FB
5345 gen_op_movl_T1_im(val);
5346 } else {
57fec1fe 5347 gen_op_mov_TN_reg(ot, 1, reg);
2c0262af 5348 }
a4bcea3d 5349 switch (ot) {
0211e5af 5350#ifdef TARGET_X86_64
a4bcea3d
RH
5351 case OT_QUAD:
5352 tcg_gen_muls2_i64(cpu_regs[reg], cpu_T[1], cpu_T[0], cpu_T[1]);
5353 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
5354 tcg_gen_sari_tl(cpu_cc_src, cpu_cc_dst, 63);
5355 tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_T[1]);
5356 break;
0211e5af 5357#endif
a4bcea3d
RH
5358 case OT_LONG:
5359 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5360 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
5361 tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
5362 cpu_tmp2_i32, cpu_tmp3_i32);
5363 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
5364 tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31);
5365 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
5366 tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
5367 tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32);
5368 break;
5369 default:
0211e5af
FB
5370 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5371 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
5372 /* XXX: use 32 bit mul which could be faster */
5373 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
5374 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
5375 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
5376 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
a4bcea3d
RH
5377 gen_op_mov_reg_T0(ot, reg);
5378 break;
2c0262af 5379 }
3ca51d07 5380 set_cc_op(s, CC_OP_MULB + ot);
2c0262af
FB
5381 break;
5382 case 0x1c0:
5383 case 0x1c1: /* xadd Ev, Gv */
5384 if ((b & 1) == 0)
5385 ot = OT_BYTE;
5386 else
14ce26e7 5387 ot = dflag + OT_WORD;
0af10c86 5388 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 5389 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af
FB
5390 mod = (modrm >> 6) & 3;
5391 if (mod == 3) {
14ce26e7 5392 rm = (modrm & 7) | REX_B(s);
57fec1fe
FB
5393 gen_op_mov_TN_reg(ot, 0, reg);
5394 gen_op_mov_TN_reg(ot, 1, rm);
2c0262af 5395 gen_op_addl_T0_T1();
57fec1fe
FB
5396 gen_op_mov_reg_T1(ot, reg);
5397 gen_op_mov_reg_T0(ot, rm);
2c0262af 5398 } else {
0af10c86 5399 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
57fec1fe
FB
5400 gen_op_mov_TN_reg(ot, 0, reg);
5401 gen_op_ld_T1_A0(ot + s->mem_index);
2c0262af 5402 gen_op_addl_T0_T1();
57fec1fe
FB
5403 gen_op_st_T0_A0(ot + s->mem_index);
5404 gen_op_mov_reg_T1(ot, reg);
2c0262af
FB
5405 }
5406 gen_op_update2_cc();
3ca51d07 5407 set_cc_op(s, CC_OP_ADDB + ot);
2c0262af
FB
5408 break;
5409 case 0x1b0:
5410 case 0x1b1: /* cmpxchg Ev, Gv */
cad3a37d 5411 {
1130328e 5412 int label1, label2;
1e4840bf 5413 TCGv t0, t1, t2, a0;
cad3a37d
FB
5414
5415 if ((b & 1) == 0)
5416 ot = OT_BYTE;
5417 else
5418 ot = dflag + OT_WORD;
0af10c86 5419 modrm = cpu_ldub_code(env, s->pc++);
cad3a37d
FB
5420 reg = ((modrm >> 3) & 7) | rex_r;
5421 mod = (modrm >> 6) & 3;
a7812ae4
PB
5422 t0 = tcg_temp_local_new();
5423 t1 = tcg_temp_local_new();
5424 t2 = tcg_temp_local_new();
5425 a0 = tcg_temp_local_new();
1e4840bf 5426 gen_op_mov_v_reg(ot, t1, reg);
cad3a37d
FB
5427 if (mod == 3) {
5428 rm = (modrm & 7) | REX_B(s);
1e4840bf 5429 gen_op_mov_v_reg(ot, t0, rm);
cad3a37d 5430 } else {
0af10c86 5431 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
1e4840bf
FB
5432 tcg_gen_mov_tl(a0, cpu_A0);
5433 gen_op_ld_v(ot + s->mem_index, t0, a0);
cad3a37d
FB
5434 rm = 0; /* avoid warning */
5435 }
5436 label1 = gen_new_label();
a3251186
RH
5437 tcg_gen_mov_tl(t2, cpu_regs[R_EAX]);
5438 gen_extu(ot, t0);
1e4840bf 5439 gen_extu(ot, t2);
a3251186 5440 tcg_gen_brcond_tl(TCG_COND_EQ, t2, t0, label1);
f7e80adf 5441 label2 = gen_new_label();
cad3a37d 5442 if (mod == 3) {
1e4840bf 5443 gen_op_mov_reg_v(ot, R_EAX, t0);
1130328e
FB
5444 tcg_gen_br(label2);
5445 gen_set_label(label1);
1e4840bf 5446 gen_op_mov_reg_v(ot, rm, t1);
cad3a37d 5447 } else {
f7e80adf
AG
5448 /* perform no-op store cycle like physical cpu; must be
5449 before changing accumulator to ensure idempotency if
5450 the store faults and the instruction is restarted */
5451 gen_op_st_v(ot + s->mem_index, t0, a0);
1e4840bf 5452 gen_op_mov_reg_v(ot, R_EAX, t0);
f7e80adf 5453 tcg_gen_br(label2);
1130328e 5454 gen_set_label(label1);
1e4840bf 5455 gen_op_st_v(ot + s->mem_index, t1, a0);
cad3a37d 5456 }
f7e80adf 5457 gen_set_label(label2);
1e4840bf 5458 tcg_gen_mov_tl(cpu_cc_src, t0);
a3251186
RH
5459 tcg_gen_mov_tl(cpu_cc_srcT, t2);
5460 tcg_gen_sub_tl(cpu_cc_dst, t2, t0);
3ca51d07 5461 set_cc_op(s, CC_OP_SUBB + ot);
1e4840bf
FB
5462 tcg_temp_free(t0);
5463 tcg_temp_free(t1);
5464 tcg_temp_free(t2);
5465 tcg_temp_free(a0);
2c0262af 5466 }
2c0262af
FB
5467 break;
5468 case 0x1c7: /* cmpxchg8b */
0af10c86 5469 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 5470 mod = (modrm >> 6) & 3;
71c3558e 5471 if ((mod == 3) || ((modrm & 0x38) != 0x8))
2c0262af 5472 goto illegal_op;
1b9d9ebb
FB
5473#ifdef TARGET_X86_64
5474 if (dflag == 2) {
5475 if (!(s->cpuid_ext_features & CPUID_EXT_CX16))
5476 goto illegal_op;
5477 gen_jmp_im(pc_start - s->cs_base);
773cdfcc 5478 gen_update_cc_op(s);
0af10c86 5479 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
92fc4b58 5480 gen_helper_cmpxchg16b(cpu_env, cpu_A0);
1b9d9ebb
FB
5481 } else
5482#endif
5483 {
5484 if (!(s->cpuid_features & CPUID_CX8))
5485 goto illegal_op;
5486 gen_jmp_im(pc_start - s->cs_base);
773cdfcc 5487 gen_update_cc_op(s);
0af10c86 5488 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
92fc4b58 5489 gen_helper_cmpxchg8b(cpu_env, cpu_A0);
1b9d9ebb 5490 }
3ca51d07 5491 set_cc_op(s, CC_OP_EFLAGS);
2c0262af 5492 break;
3b46e624 5493
2c0262af
FB
5494 /**************************/
5495 /* push/pop */
5496 case 0x50 ... 0x57: /* push */
57fec1fe 5497 gen_op_mov_TN_reg(OT_LONG, 0, (b & 7) | REX_B(s));
2c0262af
FB
5498 gen_push_T0(s);
5499 break;
5500 case 0x58 ... 0x5f: /* pop */
14ce26e7
FB
5501 if (CODE64(s)) {
5502 ot = dflag ? OT_QUAD : OT_WORD;
5503 } else {
5504 ot = dflag + OT_WORD;
5505 }
2c0262af 5506 gen_pop_T0(s);
77729c24 5507 /* NOTE: order is important for pop %sp */
2c0262af 5508 gen_pop_update(s);
57fec1fe 5509 gen_op_mov_reg_T0(ot, (b & 7) | REX_B(s));
2c0262af
FB
5510 break;
5511 case 0x60: /* pusha */
14ce26e7
FB
5512 if (CODE64(s))
5513 goto illegal_op;
2c0262af
FB
5514 gen_pusha(s);
5515 break;
5516 case 0x61: /* popa */
14ce26e7
FB
5517 if (CODE64(s))
5518 goto illegal_op;
2c0262af
FB
5519 gen_popa(s);
5520 break;
5521 case 0x68: /* push Iv */
5522 case 0x6a:
14ce26e7
FB
5523 if (CODE64(s)) {
5524 ot = dflag ? OT_QUAD : OT_WORD;
5525 } else {
5526 ot = dflag + OT_WORD;
5527 }
2c0262af 5528 if (b == 0x68)
0af10c86 5529 val = insn_get(env, s, ot);
2c0262af 5530 else
0af10c86 5531 val = (int8_t)insn_get(env, s, OT_BYTE);
2c0262af
FB
5532 gen_op_movl_T0_im(val);
5533 gen_push_T0(s);
5534 break;
5535 case 0x8f: /* pop Ev */
14ce26e7
FB
5536 if (CODE64(s)) {
5537 ot = dflag ? OT_QUAD : OT_WORD;
5538 } else {
5539 ot = dflag + OT_WORD;
5540 }
0af10c86 5541 modrm = cpu_ldub_code(env, s->pc++);
77729c24 5542 mod = (modrm >> 6) & 3;
2c0262af 5543 gen_pop_T0(s);
77729c24
FB
5544 if (mod == 3) {
5545 /* NOTE: order is important for pop %sp */
5546 gen_pop_update(s);
14ce26e7 5547 rm = (modrm & 7) | REX_B(s);
57fec1fe 5548 gen_op_mov_reg_T0(ot, rm);
77729c24
FB
5549 } else {
5550 /* NOTE: order is important too for MMU exceptions */
14ce26e7 5551 s->popl_esp_hack = 1 << ot;
0af10c86 5552 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
77729c24
FB
5553 s->popl_esp_hack = 0;
5554 gen_pop_update(s);
5555 }
2c0262af
FB
5556 break;
5557 case 0xc8: /* enter */
5558 {
5559 int level;
0af10c86 5560 val = cpu_lduw_code(env, s->pc);
2c0262af 5561 s->pc += 2;
0af10c86 5562 level = cpu_ldub_code(env, s->pc++);
2c0262af
FB
5563 gen_enter(s, val, level);
5564 }
5565 break;
5566 case 0xc9: /* leave */
5567 /* XXX: exception not precise (ESP is updated before potential exception) */
14ce26e7 5568 if (CODE64(s)) {
57fec1fe
FB
5569 gen_op_mov_TN_reg(OT_QUAD, 0, R_EBP);
5570 gen_op_mov_reg_T0(OT_QUAD, R_ESP);
14ce26e7 5571 } else if (s->ss32) {
57fec1fe
FB
5572 gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
5573 gen_op_mov_reg_T0(OT_LONG, R_ESP);
2c0262af 5574 } else {
57fec1fe
FB
5575 gen_op_mov_TN_reg(OT_WORD, 0, R_EBP);
5576 gen_op_mov_reg_T0(OT_WORD, R_ESP);
2c0262af
FB
5577 }
5578 gen_pop_T0(s);
14ce26e7
FB
5579 if (CODE64(s)) {
5580 ot = dflag ? OT_QUAD : OT_WORD;
5581 } else {
5582 ot = dflag + OT_WORD;
5583 }
57fec1fe 5584 gen_op_mov_reg_T0(ot, R_EBP);
2c0262af
FB
5585 gen_pop_update(s);
5586 break;
5587 case 0x06: /* push es */
5588 case 0x0e: /* push cs */
5589 case 0x16: /* push ss */
5590 case 0x1e: /* push ds */
14ce26e7
FB
5591 if (CODE64(s))
5592 goto illegal_op;
2c0262af
FB
5593 gen_op_movl_T0_seg(b >> 3);
5594 gen_push_T0(s);
5595 break;
5596 case 0x1a0: /* push fs */
5597 case 0x1a8: /* push gs */
5598 gen_op_movl_T0_seg((b >> 3) & 7);
5599 gen_push_T0(s);
5600 break;
5601 case 0x07: /* pop es */
5602 case 0x17: /* pop ss */
5603 case 0x1f: /* pop ds */
14ce26e7
FB
5604 if (CODE64(s))
5605 goto illegal_op;
2c0262af
FB
5606 reg = b >> 3;
5607 gen_pop_T0(s);
5608 gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
5609 gen_pop_update(s);
5610 if (reg == R_SS) {
a2cc3b24
FB
5611 /* if reg == SS, inhibit interrupts/trace. */
5612 /* If several instructions disable interrupts, only the
5613 _first_ does it */
5614 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
f0967a1a 5615 gen_helper_set_inhibit_irq(cpu_env);
2c0262af
FB
5616 s->tf = 0;
5617 }
5618 if (s->is_jmp) {
14ce26e7 5619 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
5620 gen_eob(s);
5621 }
5622 break;
5623 case 0x1a1: /* pop fs */
5624 case 0x1a9: /* pop gs */
5625 gen_pop_T0(s);
5626 gen_movl_seg_T0(s, (b >> 3) & 7, pc_start - s->cs_base);
5627 gen_pop_update(s);
5628 if (s->is_jmp) {
14ce26e7 5629 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
5630 gen_eob(s);
5631 }
5632 break;
5633
5634 /**************************/
5635 /* mov */
5636 case 0x88:
5637 case 0x89: /* mov Gv, Ev */
5638 if ((b & 1) == 0)
5639 ot = OT_BYTE;
5640 else
14ce26e7 5641 ot = dflag + OT_WORD;
0af10c86 5642 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 5643 reg = ((modrm >> 3) & 7) | rex_r;
3b46e624 5644
2c0262af 5645 /* generate a generic store */
0af10c86 5646 gen_ldst_modrm(env, s, modrm, ot, reg, 1);
2c0262af
FB
5647 break;
5648 case 0xc6:
5649 case 0xc7: /* mov Ev, Iv */
5650 if ((b & 1) == 0)
5651 ot = OT_BYTE;
5652 else
14ce26e7 5653 ot = dflag + OT_WORD;
0af10c86 5654 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 5655 mod = (modrm >> 6) & 3;
14ce26e7
FB
5656 if (mod != 3) {
5657 s->rip_offset = insn_const_size(ot);
0af10c86 5658 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
14ce26e7 5659 }
0af10c86 5660 val = insn_get(env, s, ot);
2c0262af
FB
5661 gen_op_movl_T0_im(val);
5662 if (mod != 3)
57fec1fe 5663 gen_op_st_T0_A0(ot + s->mem_index);
2c0262af 5664 else
57fec1fe 5665 gen_op_mov_reg_T0(ot, (modrm & 7) | REX_B(s));
2c0262af
FB
5666 break;
5667 case 0x8a:
5668 case 0x8b: /* mov Ev, Gv */
5669 if ((b & 1) == 0)
5670 ot = OT_BYTE;
5671 else
14ce26e7 5672 ot = OT_WORD + dflag;
0af10c86 5673 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 5674 reg = ((modrm >> 3) & 7) | rex_r;
3b46e624 5675
0af10c86 5676 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
57fec1fe 5677 gen_op_mov_reg_T0(ot, reg);
2c0262af
FB
5678 break;
5679 case 0x8e: /* mov seg, Gv */
0af10c86 5680 modrm = cpu_ldub_code(env, s->pc++);
2c0262af
FB
5681 reg = (modrm >> 3) & 7;
5682 if (reg >= 6 || reg == R_CS)
5683 goto illegal_op;
0af10c86 5684 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
2c0262af
FB
5685 gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
5686 if (reg == R_SS) {
5687 /* if reg == SS, inhibit interrupts/trace */
a2cc3b24
FB
5688 /* If several instructions disable interrupts, only the
5689 _first_ does it */
5690 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
f0967a1a 5691 gen_helper_set_inhibit_irq(cpu_env);
2c0262af
FB
5692 s->tf = 0;
5693 }
5694 if (s->is_jmp) {
14ce26e7 5695 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
5696 gen_eob(s);
5697 }
5698 break;
5699 case 0x8c: /* mov Gv, seg */
0af10c86 5700 modrm = cpu_ldub_code(env, s->pc++);
2c0262af
FB
5701 reg = (modrm >> 3) & 7;
5702 mod = (modrm >> 6) & 3;
5703 if (reg >= 6)
5704 goto illegal_op;
5705 gen_op_movl_T0_seg(reg);
14ce26e7
FB
5706 if (mod == 3)
5707 ot = OT_WORD + dflag;
5708 else
5709 ot = OT_WORD;
0af10c86 5710 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
2c0262af
FB
5711 break;
5712
5713 case 0x1b6: /* movzbS Gv, Eb */
5714 case 0x1b7: /* movzwS Gv, Eb */
5715 case 0x1be: /* movsbS Gv, Eb */
5716 case 0x1bf: /* movswS Gv, Eb */
5717 {
5718 int d_ot;
5719 /* d_ot is the size of destination */
5720 d_ot = dflag + OT_WORD;
5721 /* ot is the size of source */
5722 ot = (b & 1) + OT_BYTE;
0af10c86 5723 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 5724 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af 5725 mod = (modrm >> 6) & 3;
14ce26e7 5726 rm = (modrm & 7) | REX_B(s);
3b46e624 5727
2c0262af 5728 if (mod == 3) {
57fec1fe 5729 gen_op_mov_TN_reg(ot, 0, rm);
2c0262af
FB
5730 switch(ot | (b & 8)) {
5731 case OT_BYTE:
e108dd01 5732 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
2c0262af
FB
5733 break;
5734 case OT_BYTE | 8:
e108dd01 5735 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
2c0262af
FB
5736 break;
5737 case OT_WORD:
e108dd01 5738 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
2c0262af
FB
5739 break;
5740 default:
5741 case OT_WORD | 8:
e108dd01 5742 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
2c0262af
FB
5743 break;
5744 }
57fec1fe 5745 gen_op_mov_reg_T0(d_ot, reg);
2c0262af 5746 } else {
0af10c86 5747 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
2c0262af 5748 if (b & 8) {
57fec1fe 5749 gen_op_lds_T0_A0(ot + s->mem_index);
2c0262af 5750 } else {
57fec1fe 5751 gen_op_ldu_T0_A0(ot + s->mem_index);
2c0262af 5752 }
57fec1fe 5753 gen_op_mov_reg_T0(d_ot, reg);
2c0262af
FB
5754 }
5755 }
5756 break;
5757
5758 case 0x8d: /* lea */
14ce26e7 5759 ot = dflag + OT_WORD;
0af10c86 5760 modrm = cpu_ldub_code(env, s->pc++);
3a1d9b8b
FB
5761 mod = (modrm >> 6) & 3;
5762 if (mod == 3)
5763 goto illegal_op;
14ce26e7 5764 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af
FB
5765 /* we must ensure that no segment is added */
5766 s->override = -1;
5767 val = s->addseg;
5768 s->addseg = 0;
0af10c86 5769 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
2c0262af 5770 s->addseg = val;
57fec1fe 5771 gen_op_mov_reg_A0(ot - OT_WORD, reg);
2c0262af 5772 break;
3b46e624 5773
2c0262af
FB
5774 case 0xa0: /* mov EAX, Ov */
5775 case 0xa1:
5776 case 0xa2: /* mov Ov, EAX */
5777 case 0xa3:
2c0262af 5778 {
14ce26e7
FB
5779 target_ulong offset_addr;
5780
5781 if ((b & 1) == 0)
5782 ot = OT_BYTE;
5783 else
5784 ot = dflag + OT_WORD;
5785#ifdef TARGET_X86_64
8f091a59 5786 if (s->aflag == 2) {
0af10c86 5787 offset_addr = cpu_ldq_code(env, s->pc);
14ce26e7 5788 s->pc += 8;
57fec1fe 5789 gen_op_movq_A0_im(offset_addr);
5fafdf24 5790 } else
14ce26e7
FB
5791#endif
5792 {
5793 if (s->aflag) {
0af10c86 5794 offset_addr = insn_get(env, s, OT_LONG);
14ce26e7 5795 } else {
0af10c86 5796 offset_addr = insn_get(env, s, OT_WORD);
14ce26e7
FB
5797 }
5798 gen_op_movl_A0_im(offset_addr);
5799 }
664e0f19 5800 gen_add_A0_ds_seg(s);
14ce26e7 5801 if ((b & 2) == 0) {
57fec1fe
FB
5802 gen_op_ld_T0_A0(ot + s->mem_index);
5803 gen_op_mov_reg_T0(ot, R_EAX);
14ce26e7 5804 } else {
57fec1fe
FB
5805 gen_op_mov_TN_reg(ot, 0, R_EAX);
5806 gen_op_st_T0_A0(ot + s->mem_index);
2c0262af
FB
5807 }
5808 }
2c0262af
FB
5809 break;
5810 case 0xd7: /* xlat */
14ce26e7 5811#ifdef TARGET_X86_64
8f091a59 5812 if (s->aflag == 2) {
57fec1fe 5813 gen_op_movq_A0_reg(R_EBX);
bbf662ee
FB
5814 gen_op_mov_TN_reg(OT_QUAD, 0, R_EAX);
5815 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xff);
5816 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]);
5fafdf24 5817 } else
14ce26e7
FB
5818#endif
5819 {
57fec1fe 5820 gen_op_movl_A0_reg(R_EBX);
bbf662ee
FB
5821 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
5822 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xff);
5823 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]);
14ce26e7
FB
5824 if (s->aflag == 0)
5825 gen_op_andl_A0_ffff();
bbf662ee
FB
5826 else
5827 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
14ce26e7 5828 }
664e0f19 5829 gen_add_A0_ds_seg(s);
57fec1fe
FB
5830 gen_op_ldu_T0_A0(OT_BYTE + s->mem_index);
5831 gen_op_mov_reg_T0(OT_BYTE, R_EAX);
2c0262af
FB
5832 break;
5833 case 0xb0 ... 0xb7: /* mov R, Ib */
0af10c86 5834 val = insn_get(env, s, OT_BYTE);
2c0262af 5835 gen_op_movl_T0_im(val);
57fec1fe 5836 gen_op_mov_reg_T0(OT_BYTE, (b & 7) | REX_B(s));
2c0262af
FB
5837 break;
5838 case 0xb8 ... 0xbf: /* mov R, Iv */
14ce26e7
FB
5839#ifdef TARGET_X86_64
5840 if (dflag == 2) {
5841 uint64_t tmp;
5842 /* 64 bit case */
0af10c86 5843 tmp = cpu_ldq_code(env, s->pc);
14ce26e7
FB
5844 s->pc += 8;
5845 reg = (b & 7) | REX_B(s);
5846 gen_movtl_T0_im(tmp);
57fec1fe 5847 gen_op_mov_reg_T0(OT_QUAD, reg);
5fafdf24 5848 } else
14ce26e7
FB
5849#endif
5850 {
5851 ot = dflag ? OT_LONG : OT_WORD;
0af10c86 5852 val = insn_get(env, s, ot);
14ce26e7
FB
5853 reg = (b & 7) | REX_B(s);
5854 gen_op_movl_T0_im(val);
57fec1fe 5855 gen_op_mov_reg_T0(ot, reg);
14ce26e7 5856 }
2c0262af
FB
5857 break;
5858
5859 case 0x91 ... 0x97: /* xchg R, EAX */
7418027e 5860 do_xchg_reg_eax:
14ce26e7
FB
5861 ot = dflag + OT_WORD;
5862 reg = (b & 7) | REX_B(s);
2c0262af
FB
5863 rm = R_EAX;
5864 goto do_xchg_reg;
5865 case 0x86:
5866 case 0x87: /* xchg Ev, Gv */
5867 if ((b & 1) == 0)
5868 ot = OT_BYTE;
5869 else
14ce26e7 5870 ot = dflag + OT_WORD;
0af10c86 5871 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 5872 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af
FB
5873 mod = (modrm >> 6) & 3;
5874 if (mod == 3) {
14ce26e7 5875 rm = (modrm & 7) | REX_B(s);
2c0262af 5876 do_xchg_reg:
57fec1fe
FB
5877 gen_op_mov_TN_reg(ot, 0, reg);
5878 gen_op_mov_TN_reg(ot, 1, rm);
5879 gen_op_mov_reg_T0(ot, rm);
5880 gen_op_mov_reg_T1(ot, reg);
2c0262af 5881 } else {
0af10c86 5882 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
57fec1fe 5883 gen_op_mov_TN_reg(ot, 0, reg);
2c0262af
FB
5884 /* for xchg, lock is implicit */
5885 if (!(prefixes & PREFIX_LOCK))
a7812ae4 5886 gen_helper_lock();
57fec1fe
FB
5887 gen_op_ld_T1_A0(ot + s->mem_index);
5888 gen_op_st_T0_A0(ot + s->mem_index);
2c0262af 5889 if (!(prefixes & PREFIX_LOCK))
a7812ae4 5890 gen_helper_unlock();
57fec1fe 5891 gen_op_mov_reg_T1(ot, reg);
2c0262af
FB
5892 }
5893 break;
5894 case 0xc4: /* les Gv */
701ed211 5895 /* In CODE64 this is VEX3; see above. */
2c0262af
FB
5896 op = R_ES;
5897 goto do_lxx;
5898 case 0xc5: /* lds Gv */
701ed211 5899 /* In CODE64 this is VEX2; see above. */
2c0262af
FB
5900 op = R_DS;
5901 goto do_lxx;
5902 case 0x1b2: /* lss Gv */
5903 op = R_SS;
5904 goto do_lxx;
5905 case 0x1b4: /* lfs Gv */
5906 op = R_FS;
5907 goto do_lxx;
5908 case 0x1b5: /* lgs Gv */
5909 op = R_GS;
5910 do_lxx:
5911 ot = dflag ? OT_LONG : OT_WORD;
0af10c86 5912 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 5913 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af
FB
5914 mod = (modrm >> 6) & 3;
5915 if (mod == 3)
5916 goto illegal_op;
0af10c86 5917 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
57fec1fe 5918 gen_op_ld_T1_A0(ot + s->mem_index);
aba9d61e 5919 gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
2c0262af 5920 /* load the segment first to handle exceptions properly */
57fec1fe 5921 gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
2c0262af
FB
5922 gen_movl_seg_T0(s, op, pc_start - s->cs_base);
5923 /* then put the data */
57fec1fe 5924 gen_op_mov_reg_T1(ot, reg);
2c0262af 5925 if (s->is_jmp) {
14ce26e7 5926 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
5927 gen_eob(s);
5928 }
5929 break;
3b46e624 5930
2c0262af
FB
5931 /************************/
5932 /* shifts */
5933 case 0xc0:
5934 case 0xc1:
5935 /* shift Ev,Ib */
5936 shift = 2;
5937 grp2:
5938 {
5939 if ((b & 1) == 0)
5940 ot = OT_BYTE;
5941 else
14ce26e7 5942 ot = dflag + OT_WORD;
3b46e624 5943
0af10c86 5944 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 5945 mod = (modrm >> 6) & 3;
2c0262af 5946 op = (modrm >> 3) & 7;
3b46e624 5947
2c0262af 5948 if (mod != 3) {
14ce26e7
FB
5949 if (shift == 2) {
5950 s->rip_offset = 1;
5951 }
0af10c86 5952 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
2c0262af
FB
5953 opreg = OR_TMP0;
5954 } else {
14ce26e7 5955 opreg = (modrm & 7) | REX_B(s);
2c0262af
FB
5956 }
5957
5958 /* simpler op */
5959 if (shift == 0) {
5960 gen_shift(s, op, ot, opreg, OR_ECX);
5961 } else {
5962 if (shift == 2) {
0af10c86 5963 shift = cpu_ldub_code(env, s->pc++);
2c0262af
FB
5964 }
5965 gen_shifti(s, op, ot, opreg, shift);
5966 }
5967 }
5968 break;
5969 case 0xd0:
5970 case 0xd1:
5971 /* shift Ev,1 */
5972 shift = 1;
5973 goto grp2;
5974 case 0xd2:
5975 case 0xd3:
5976 /* shift Ev,cl */
5977 shift = 0;
5978 goto grp2;
5979
5980 case 0x1a4: /* shld imm */
5981 op = 0;
5982 shift = 1;
5983 goto do_shiftd;
5984 case 0x1a5: /* shld cl */
5985 op = 0;
5986 shift = 0;
5987 goto do_shiftd;
5988 case 0x1ac: /* shrd imm */
5989 op = 1;
5990 shift = 1;
5991 goto do_shiftd;
5992 case 0x1ad: /* shrd cl */
5993 op = 1;
5994 shift = 0;
5995 do_shiftd:
14ce26e7 5996 ot = dflag + OT_WORD;
0af10c86 5997 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 5998 mod = (modrm >> 6) & 3;
14ce26e7
FB
5999 rm = (modrm & 7) | REX_B(s);
6000 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af 6001 if (mod != 3) {
0af10c86 6002 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
b6abf97d 6003 opreg = OR_TMP0;
2c0262af 6004 } else {
b6abf97d 6005 opreg = rm;
2c0262af 6006 }
57fec1fe 6007 gen_op_mov_TN_reg(ot, 1, reg);
3b46e624 6008
2c0262af 6009 if (shift) {
3b9d3cf1
PB
6010 TCGv imm = tcg_const_tl(cpu_ldub_code(env, s->pc++));
6011 gen_shiftd_rm_T1(s, ot, opreg, op, imm);
6012 tcg_temp_free(imm);
2c0262af 6013 } else {
3b9d3cf1 6014 gen_shiftd_rm_T1(s, ot, opreg, op, cpu_regs[R_ECX]);
2c0262af
FB
6015 }
6016 break;
6017
6018 /************************/
6019 /* floats */
5fafdf24 6020 case 0xd8 ... 0xdf:
7eee2a50
FB
6021 if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
6022 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
6023 /* XXX: what to do if illegal op ? */
6024 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
6025 break;
6026 }
0af10c86 6027 modrm = cpu_ldub_code(env, s->pc++);
2c0262af
FB
6028 mod = (modrm >> 6) & 3;
6029 rm = modrm & 7;
6030 op = ((b & 7) << 3) | ((modrm >> 3) & 7);
2c0262af
FB
6031 if (mod != 3) {
6032 /* memory op */
0af10c86 6033 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
2c0262af
FB
6034 switch(op) {
6035 case 0x00 ... 0x07: /* fxxxs */
6036 case 0x10 ... 0x17: /* fixxxl */
6037 case 0x20 ... 0x27: /* fxxxl */
6038 case 0x30 ... 0x37: /* fixxx */
6039 {
6040 int op1;
6041 op1 = op & 7;
6042
6043 switch(op >> 4) {
6044 case 0:
ba7cd150 6045 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
b6abf97d 6046 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
d3eb5eae 6047 gen_helper_flds_FT0(cpu_env, cpu_tmp2_i32);
2c0262af
FB
6048 break;
6049 case 1:
ba7cd150 6050 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
b6abf97d 6051 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
d3eb5eae 6052 gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
2c0262af
FB
6053 break;
6054 case 2:
b6abf97d 6055 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
19e6c4b8 6056 (s->mem_index >> 2) - 1);
d3eb5eae 6057 gen_helper_fldl_FT0(cpu_env, cpu_tmp1_i64);
2c0262af
FB
6058 break;
6059 case 3:
6060 default:
ba7cd150 6061 gen_op_lds_T0_A0(OT_WORD + s->mem_index);
b6abf97d 6062 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
d3eb5eae 6063 gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
2c0262af
FB
6064 break;
6065 }
3b46e624 6066
a7812ae4 6067 gen_helper_fp_arith_ST0_FT0(op1);
2c0262af
FB
6068 if (op1 == 3) {
6069 /* fcomp needs pop */
d3eb5eae 6070 gen_helper_fpop(cpu_env);
2c0262af
FB
6071 }
6072 }
6073 break;
6074 case 0x08: /* flds */
6075 case 0x0a: /* fsts */
6076 case 0x0b: /* fstps */
465e9838
FB
6077 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
6078 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
6079 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
2c0262af
FB
6080 switch(op & 7) {
6081 case 0:
6082 switch(op >> 4) {
6083 case 0:
ba7cd150 6084 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
b6abf97d 6085 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
d3eb5eae 6086 gen_helper_flds_ST0(cpu_env, cpu_tmp2_i32);
2c0262af
FB
6087 break;
6088 case 1:
ba7cd150 6089 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
b6abf97d 6090 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
d3eb5eae 6091 gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
2c0262af
FB
6092 break;
6093 case 2:
b6abf97d 6094 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
19e6c4b8 6095 (s->mem_index >> 2) - 1);
d3eb5eae 6096 gen_helper_fldl_ST0(cpu_env, cpu_tmp1_i64);
2c0262af
FB
6097 break;
6098 case 3:
6099 default:
ba7cd150 6100 gen_op_lds_T0_A0(OT_WORD + s->mem_index);
b6abf97d 6101 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
d3eb5eae 6102 gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
2c0262af
FB
6103 break;
6104 }
6105 break;
465e9838 6106 case 1:
19e6c4b8 6107 /* XXX: the corresponding CPUID bit must be tested ! */
465e9838
FB
6108 switch(op >> 4) {
6109 case 1:
d3eb5eae 6110 gen_helper_fisttl_ST0(cpu_tmp2_i32, cpu_env);
b6abf97d 6111 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
ba7cd150 6112 gen_op_st_T0_A0(OT_LONG + s->mem_index);
465e9838
FB
6113 break;
6114 case 2:
d3eb5eae 6115 gen_helper_fisttll_ST0(cpu_tmp1_i64, cpu_env);
b6abf97d 6116 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
19e6c4b8 6117 (s->mem_index >> 2) - 1);
465e9838
FB
6118 break;
6119 case 3:
6120 default:
d3eb5eae 6121 gen_helper_fistt_ST0(cpu_tmp2_i32, cpu_env);
b6abf97d 6122 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
ba7cd150 6123 gen_op_st_T0_A0(OT_WORD + s->mem_index);
19e6c4b8 6124 break;
465e9838 6125 }
d3eb5eae 6126 gen_helper_fpop(cpu_env);
465e9838 6127 break;
2c0262af
FB
6128 default:
6129 switch(op >> 4) {
6130 case 0:
d3eb5eae 6131 gen_helper_fsts_ST0(cpu_tmp2_i32, cpu_env);
b6abf97d 6132 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
ba7cd150 6133 gen_op_st_T0_A0(OT_LONG + s->mem_index);
2c0262af
FB
6134 break;
6135 case 1:
d3eb5eae 6136 gen_helper_fistl_ST0(cpu_tmp2_i32, cpu_env);
b6abf97d 6137 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
ba7cd150 6138 gen_op_st_T0_A0(OT_LONG + s->mem_index);
2c0262af
FB
6139 break;
6140 case 2:
d3eb5eae 6141 gen_helper_fstl_ST0(cpu_tmp1_i64, cpu_env);
b6abf97d 6142 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
19e6c4b8 6143 (s->mem_index >> 2) - 1);
2c0262af
FB
6144 break;
6145 case 3:
6146 default:
d3eb5eae 6147 gen_helper_fist_ST0(cpu_tmp2_i32, cpu_env);
b6abf97d 6148 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
ba7cd150 6149 gen_op_st_T0_A0(OT_WORD + s->mem_index);
2c0262af
FB
6150 break;
6151 }
6152 if ((op & 7) == 3)
d3eb5eae 6153 gen_helper_fpop(cpu_env);
2c0262af
FB
6154 break;
6155 }
6156 break;
6157 case 0x0c: /* fldenv mem */
773cdfcc 6158 gen_update_cc_op(s);
19e6c4b8 6159 gen_jmp_im(pc_start - s->cs_base);
d3eb5eae 6160 gen_helper_fldenv(cpu_env, cpu_A0, tcg_const_i32(s->dflag));
2c0262af
FB
6161 break;
6162 case 0x0d: /* fldcw mem */
19e6c4b8 6163 gen_op_ld_T0_A0(OT_WORD + s->mem_index);
b6abf97d 6164 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
d3eb5eae 6165 gen_helper_fldcw(cpu_env, cpu_tmp2_i32);
2c0262af
FB
6166 break;
6167 case 0x0e: /* fnstenv mem */
773cdfcc 6168 gen_update_cc_op(s);
19e6c4b8 6169 gen_jmp_im(pc_start - s->cs_base);
d3eb5eae 6170 gen_helper_fstenv(cpu_env, cpu_A0, tcg_const_i32(s->dflag));
2c0262af
FB
6171 break;
6172 case 0x0f: /* fnstcw mem */
d3eb5eae 6173 gen_helper_fnstcw(cpu_tmp2_i32, cpu_env);
b6abf97d 6174 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
19e6c4b8 6175 gen_op_st_T0_A0(OT_WORD + s->mem_index);
2c0262af
FB
6176 break;
6177 case 0x1d: /* fldt mem */
773cdfcc 6178 gen_update_cc_op(s);
19e6c4b8 6179 gen_jmp_im(pc_start - s->cs_base);
d3eb5eae 6180 gen_helper_fldt_ST0(cpu_env, cpu_A0);
2c0262af
FB
6181 break;
6182 case 0x1f: /* fstpt mem */
773cdfcc 6183 gen_update_cc_op(s);
19e6c4b8 6184 gen_jmp_im(pc_start - s->cs_base);
d3eb5eae
BS
6185 gen_helper_fstt_ST0(cpu_env, cpu_A0);
6186 gen_helper_fpop(cpu_env);
2c0262af
FB
6187 break;
6188 case 0x2c: /* frstor mem */
773cdfcc 6189 gen_update_cc_op(s);
19e6c4b8 6190 gen_jmp_im(pc_start - s->cs_base);
d3eb5eae 6191 gen_helper_frstor(cpu_env, cpu_A0, tcg_const_i32(s->dflag));
2c0262af
FB
6192 break;
6193 case 0x2e: /* fnsave mem */
773cdfcc 6194 gen_update_cc_op(s);
19e6c4b8 6195 gen_jmp_im(pc_start - s->cs_base);
d3eb5eae 6196 gen_helper_fsave(cpu_env, cpu_A0, tcg_const_i32(s->dflag));
2c0262af
FB
6197 break;
6198 case 0x2f: /* fnstsw mem */
d3eb5eae 6199 gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
b6abf97d 6200 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
19e6c4b8 6201 gen_op_st_T0_A0(OT_WORD + s->mem_index);
2c0262af
FB
6202 break;
6203 case 0x3c: /* fbld */
773cdfcc 6204 gen_update_cc_op(s);
19e6c4b8 6205 gen_jmp_im(pc_start - s->cs_base);
d3eb5eae 6206 gen_helper_fbld_ST0(cpu_env, cpu_A0);
2c0262af
FB
6207 break;
6208 case 0x3e: /* fbstp */
773cdfcc 6209 gen_update_cc_op(s);
19e6c4b8 6210 gen_jmp_im(pc_start - s->cs_base);
d3eb5eae
BS
6211 gen_helper_fbst_ST0(cpu_env, cpu_A0);
6212 gen_helper_fpop(cpu_env);
2c0262af
FB
6213 break;
6214 case 0x3d: /* fildll */
b6abf97d 6215 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
19e6c4b8 6216 (s->mem_index >> 2) - 1);
d3eb5eae 6217 gen_helper_fildll_ST0(cpu_env, cpu_tmp1_i64);
2c0262af
FB
6218 break;
6219 case 0x3f: /* fistpll */
d3eb5eae 6220 gen_helper_fistll_ST0(cpu_tmp1_i64, cpu_env);
b6abf97d 6221 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
19e6c4b8 6222 (s->mem_index >> 2) - 1);
d3eb5eae 6223 gen_helper_fpop(cpu_env);
2c0262af
FB
6224 break;
6225 default:
6226 goto illegal_op;
6227 }
6228 } else {
6229 /* register float ops */
6230 opreg = rm;
6231
6232 switch(op) {
6233 case 0x08: /* fld sti */
d3eb5eae
BS
6234 gen_helper_fpush(cpu_env);
6235 gen_helper_fmov_ST0_STN(cpu_env,
6236 tcg_const_i32((opreg + 1) & 7));
2c0262af
FB
6237 break;
6238 case 0x09: /* fxchg sti */
c169c906
FB
6239 case 0x29: /* fxchg4 sti, undocumented op */
6240 case 0x39: /* fxchg7 sti, undocumented op */
d3eb5eae 6241 gen_helper_fxchg_ST0_STN(cpu_env, tcg_const_i32(opreg));
2c0262af
FB
6242 break;
6243 case 0x0a: /* grp d9/2 */
6244 switch(rm) {
6245 case 0: /* fnop */
023fe10d 6246 /* check exceptions (FreeBSD FPU probe) */
773cdfcc 6247 gen_update_cc_op(s);
14ce26e7 6248 gen_jmp_im(pc_start - s->cs_base);
d3eb5eae 6249 gen_helper_fwait(cpu_env);
2c0262af
FB
6250 break;
6251 default:
6252 goto illegal_op;
6253 }
6254 break;
6255 case 0x0c: /* grp d9/4 */
6256 switch(rm) {
6257 case 0: /* fchs */
d3eb5eae 6258 gen_helper_fchs_ST0(cpu_env);
2c0262af
FB
6259 break;
6260 case 1: /* fabs */
d3eb5eae 6261 gen_helper_fabs_ST0(cpu_env);
2c0262af
FB
6262 break;
6263 case 4: /* ftst */
d3eb5eae
BS
6264 gen_helper_fldz_FT0(cpu_env);
6265 gen_helper_fcom_ST0_FT0(cpu_env);
2c0262af
FB
6266 break;
6267 case 5: /* fxam */
d3eb5eae 6268 gen_helper_fxam_ST0(cpu_env);
2c0262af
FB
6269 break;
6270 default:
6271 goto illegal_op;
6272 }
6273 break;
6274 case 0x0d: /* grp d9/5 */
6275 {
6276 switch(rm) {
6277 case 0:
d3eb5eae
BS
6278 gen_helper_fpush(cpu_env);
6279 gen_helper_fld1_ST0(cpu_env);
2c0262af
FB
6280 break;
6281 case 1:
d3eb5eae
BS
6282 gen_helper_fpush(cpu_env);
6283 gen_helper_fldl2t_ST0(cpu_env);
2c0262af
FB
6284 break;
6285 case 2:
d3eb5eae
BS
6286 gen_helper_fpush(cpu_env);
6287 gen_helper_fldl2e_ST0(cpu_env);
2c0262af
FB
6288 break;
6289 case 3:
d3eb5eae
BS
6290 gen_helper_fpush(cpu_env);
6291 gen_helper_fldpi_ST0(cpu_env);
2c0262af
FB
6292 break;
6293 case 4:
d3eb5eae
BS
6294 gen_helper_fpush(cpu_env);
6295 gen_helper_fldlg2_ST0(cpu_env);
2c0262af
FB
6296 break;
6297 case 5:
d3eb5eae
BS
6298 gen_helper_fpush(cpu_env);
6299 gen_helper_fldln2_ST0(cpu_env);
2c0262af
FB
6300 break;
6301 case 6:
d3eb5eae
BS
6302 gen_helper_fpush(cpu_env);
6303 gen_helper_fldz_ST0(cpu_env);
2c0262af
FB
6304 break;
6305 default:
6306 goto illegal_op;
6307 }
6308 }
6309 break;
6310 case 0x0e: /* grp d9/6 */
6311 switch(rm) {
6312 case 0: /* f2xm1 */
d3eb5eae 6313 gen_helper_f2xm1(cpu_env);
2c0262af
FB
6314 break;
6315 case 1: /* fyl2x */
d3eb5eae 6316 gen_helper_fyl2x(cpu_env);
2c0262af
FB
6317 break;
6318 case 2: /* fptan */
d3eb5eae 6319 gen_helper_fptan(cpu_env);
2c0262af
FB
6320 break;
6321 case 3: /* fpatan */
d3eb5eae 6322 gen_helper_fpatan(cpu_env);
2c0262af
FB
6323 break;
6324 case 4: /* fxtract */
d3eb5eae 6325 gen_helper_fxtract(cpu_env);
2c0262af
FB
6326 break;
6327 case 5: /* fprem1 */
d3eb5eae 6328 gen_helper_fprem1(cpu_env);
2c0262af
FB
6329 break;
6330 case 6: /* fdecstp */
d3eb5eae 6331 gen_helper_fdecstp(cpu_env);
2c0262af
FB
6332 break;
6333 default:
6334 case 7: /* fincstp */
d3eb5eae 6335 gen_helper_fincstp(cpu_env);
2c0262af
FB
6336 break;
6337 }
6338 break;
6339 case 0x0f: /* grp d9/7 */
6340 switch(rm) {
6341 case 0: /* fprem */
d3eb5eae 6342 gen_helper_fprem(cpu_env);
2c0262af
FB
6343 break;
6344 case 1: /* fyl2xp1 */
d3eb5eae 6345 gen_helper_fyl2xp1(cpu_env);
2c0262af
FB
6346 break;
6347 case 2: /* fsqrt */
d3eb5eae 6348 gen_helper_fsqrt(cpu_env);
2c0262af
FB
6349 break;
6350 case 3: /* fsincos */
d3eb5eae 6351 gen_helper_fsincos(cpu_env);
2c0262af
FB
6352 break;
6353 case 5: /* fscale */
d3eb5eae 6354 gen_helper_fscale(cpu_env);
2c0262af
FB
6355 break;
6356 case 4: /* frndint */
d3eb5eae 6357 gen_helper_frndint(cpu_env);
2c0262af
FB
6358 break;
6359 case 6: /* fsin */
d3eb5eae 6360 gen_helper_fsin(cpu_env);
2c0262af
FB
6361 break;
6362 default:
6363 case 7: /* fcos */
d3eb5eae 6364 gen_helper_fcos(cpu_env);
2c0262af
FB
6365 break;
6366 }
6367 break;
6368 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
6369 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
6370 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
6371 {
6372 int op1;
3b46e624 6373
2c0262af
FB
6374 op1 = op & 7;
6375 if (op >= 0x20) {
a7812ae4 6376 gen_helper_fp_arith_STN_ST0(op1, opreg);
2c0262af 6377 if (op >= 0x30)
d3eb5eae 6378 gen_helper_fpop(cpu_env);
2c0262af 6379 } else {
d3eb5eae 6380 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
a7812ae4 6381 gen_helper_fp_arith_ST0_FT0(op1);
2c0262af
FB
6382 }
6383 }
6384 break;
6385 case 0x02: /* fcom */
c169c906 6386 case 0x22: /* fcom2, undocumented op */
d3eb5eae
BS
6387 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6388 gen_helper_fcom_ST0_FT0(cpu_env);
2c0262af
FB
6389 break;
6390 case 0x03: /* fcomp */
c169c906
FB
6391 case 0x23: /* fcomp3, undocumented op */
6392 case 0x32: /* fcomp5, undocumented op */
d3eb5eae
BS
6393 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6394 gen_helper_fcom_ST0_FT0(cpu_env);
6395 gen_helper_fpop(cpu_env);
2c0262af
FB
6396 break;
6397 case 0x15: /* da/5 */
6398 switch(rm) {
6399 case 1: /* fucompp */
d3eb5eae
BS
6400 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
6401 gen_helper_fucom_ST0_FT0(cpu_env);
6402 gen_helper_fpop(cpu_env);
6403 gen_helper_fpop(cpu_env);
2c0262af
FB
6404 break;
6405 default:
6406 goto illegal_op;
6407 }
6408 break;
6409 case 0x1c:
6410 switch(rm) {
6411 case 0: /* feni (287 only, just do nop here) */
6412 break;
6413 case 1: /* fdisi (287 only, just do nop here) */
6414 break;
6415 case 2: /* fclex */
d3eb5eae 6416 gen_helper_fclex(cpu_env);
2c0262af
FB
6417 break;
6418 case 3: /* fninit */
d3eb5eae 6419 gen_helper_fninit(cpu_env);
2c0262af
FB
6420 break;
6421 case 4: /* fsetpm (287 only, just do nop here) */
6422 break;
6423 default:
6424 goto illegal_op;
6425 }
6426 break;
6427 case 0x1d: /* fucomi */
773cdfcc 6428 gen_update_cc_op(s);
d3eb5eae
BS
6429 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6430 gen_helper_fucomi_ST0_FT0(cpu_env);
3ca51d07 6431 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
6432 break;
6433 case 0x1e: /* fcomi */
773cdfcc 6434 gen_update_cc_op(s);
d3eb5eae
BS
6435 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6436 gen_helper_fcomi_ST0_FT0(cpu_env);
3ca51d07 6437 set_cc_op(s, CC_OP_EFLAGS);
2c0262af 6438 break;
658c8bda 6439 case 0x28: /* ffree sti */
d3eb5eae 6440 gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
5fafdf24 6441 break;
2c0262af 6442 case 0x2a: /* fst sti */
d3eb5eae 6443 gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
2c0262af
FB
6444 break;
6445 case 0x2b: /* fstp sti */
c169c906
FB
6446 case 0x0b: /* fstp1 sti, undocumented op */
6447 case 0x3a: /* fstp8 sti, undocumented op */
6448 case 0x3b: /* fstp9 sti, undocumented op */
d3eb5eae
BS
6449 gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
6450 gen_helper_fpop(cpu_env);
2c0262af
FB
6451 break;
6452 case 0x2c: /* fucom st(i) */
d3eb5eae
BS
6453 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6454 gen_helper_fucom_ST0_FT0(cpu_env);
2c0262af
FB
6455 break;
6456 case 0x2d: /* fucomp st(i) */
d3eb5eae
BS
6457 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6458 gen_helper_fucom_ST0_FT0(cpu_env);
6459 gen_helper_fpop(cpu_env);
2c0262af
FB
6460 break;
6461 case 0x33: /* de/3 */
6462 switch(rm) {
6463 case 1: /* fcompp */
d3eb5eae
BS
6464 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
6465 gen_helper_fcom_ST0_FT0(cpu_env);
6466 gen_helper_fpop(cpu_env);
6467 gen_helper_fpop(cpu_env);
2c0262af
FB
6468 break;
6469 default:
6470 goto illegal_op;
6471 }
6472 break;
c169c906 6473 case 0x38: /* ffreep sti, undocumented op */
d3eb5eae
BS
6474 gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
6475 gen_helper_fpop(cpu_env);
c169c906 6476 break;
2c0262af
FB
6477 case 0x3c: /* df/4 */
6478 switch(rm) {
6479 case 0:
d3eb5eae 6480 gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
b6abf97d 6481 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
19e6c4b8 6482 gen_op_mov_reg_T0(OT_WORD, R_EAX);
2c0262af
FB
6483 break;
6484 default:
6485 goto illegal_op;
6486 }
6487 break;
6488 case 0x3d: /* fucomip */
773cdfcc 6489 gen_update_cc_op(s);
d3eb5eae
BS
6490 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6491 gen_helper_fucomi_ST0_FT0(cpu_env);
6492 gen_helper_fpop(cpu_env);
3ca51d07 6493 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
6494 break;
6495 case 0x3e: /* fcomip */
773cdfcc 6496 gen_update_cc_op(s);
d3eb5eae
BS
6497 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6498 gen_helper_fcomi_ST0_FT0(cpu_env);
6499 gen_helper_fpop(cpu_env);
3ca51d07 6500 set_cc_op(s, CC_OP_EFLAGS);
2c0262af 6501 break;
a2cc3b24
FB
6502 case 0x10 ... 0x13: /* fcmovxx */
6503 case 0x18 ... 0x1b:
6504 {
19e6c4b8 6505 int op1, l1;
d70040bc 6506 static const uint8_t fcmov_cc[8] = {
a2cc3b24
FB
6507 (JCC_B << 1),
6508 (JCC_Z << 1),
6509 (JCC_BE << 1),
6510 (JCC_P << 1),
6511 };
1e4840bf 6512 op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
19e6c4b8 6513 l1 = gen_new_label();
dc259201 6514 gen_jcc1_noeob(s, op1, l1);
d3eb5eae 6515 gen_helper_fmov_ST0_STN(cpu_env, tcg_const_i32(opreg));
19e6c4b8 6516 gen_set_label(l1);
a2cc3b24
FB
6517 }
6518 break;
2c0262af
FB
6519 default:
6520 goto illegal_op;
6521 }
6522 }
6523 break;
6524 /************************/
6525 /* string ops */
6526
6527 case 0xa4: /* movsS */
6528 case 0xa5:
6529 if ((b & 1) == 0)
6530 ot = OT_BYTE;
6531 else
14ce26e7 6532 ot = dflag + OT_WORD;
2c0262af
FB
6533
6534 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6535 gen_repz_movs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6536 } else {
6537 gen_movs(s, ot);
6538 }
6539 break;
3b46e624 6540
2c0262af
FB
6541 case 0xaa: /* stosS */
6542 case 0xab:
6543 if ((b & 1) == 0)
6544 ot = OT_BYTE;
6545 else
14ce26e7 6546 ot = dflag + OT_WORD;
2c0262af
FB
6547
6548 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6549 gen_repz_stos(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6550 } else {
6551 gen_stos(s, ot);
6552 }
6553 break;
6554 case 0xac: /* lodsS */
6555 case 0xad:
6556 if ((b & 1) == 0)
6557 ot = OT_BYTE;
6558 else
14ce26e7 6559 ot = dflag + OT_WORD;
2c0262af
FB
6560 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6561 gen_repz_lods(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6562 } else {
6563 gen_lods(s, ot);
6564 }
6565 break;
6566 case 0xae: /* scasS */
6567 case 0xaf:
6568 if ((b & 1) == 0)
6569 ot = OT_BYTE;
6570 else
14ce26e7 6571 ot = dflag + OT_WORD;
2c0262af
FB
6572 if (prefixes & PREFIX_REPNZ) {
6573 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6574 } else if (prefixes & PREFIX_REPZ) {
6575 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6576 } else {
6577 gen_scas(s, ot);
2c0262af
FB
6578 }
6579 break;
6580
6581 case 0xa6: /* cmpsS */
6582 case 0xa7:
6583 if ((b & 1) == 0)
6584 ot = OT_BYTE;
6585 else
14ce26e7 6586 ot = dflag + OT_WORD;
2c0262af
FB
6587 if (prefixes & PREFIX_REPNZ) {
6588 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6589 } else if (prefixes & PREFIX_REPZ) {
6590 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6591 } else {
6592 gen_cmps(s, ot);
2c0262af
FB
6593 }
6594 break;
6595 case 0x6c: /* insS */
6596 case 0x6d:
f115e911
FB
6597 if ((b & 1) == 0)
6598 ot = OT_BYTE;
6599 else
6600 ot = dflag ? OT_LONG : OT_WORD;
57fec1fe 6601 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
0573fbfc 6602 gen_op_andl_T0_ffff();
b8b6a50b
FB
6603 gen_check_io(s, ot, pc_start - s->cs_base,
6604 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4);
f115e911
FB
6605 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6606 gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
2c0262af 6607 } else {
f115e911 6608 gen_ins(s, ot);
2e70f6ef
PB
6609 if (use_icount) {
6610 gen_jmp(s, s->pc - s->cs_base);
6611 }
2c0262af
FB
6612 }
6613 break;
6614 case 0x6e: /* outsS */
6615 case 0x6f:
f115e911
FB
6616 if ((b & 1) == 0)
6617 ot = OT_BYTE;
6618 else
6619 ot = dflag ? OT_LONG : OT_WORD;
57fec1fe 6620 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
0573fbfc 6621 gen_op_andl_T0_ffff();
b8b6a50b
FB
6622 gen_check_io(s, ot, pc_start - s->cs_base,
6623 svm_is_rep(prefixes) | 4);
f115e911
FB
6624 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6625 gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
2c0262af 6626 } else {
f115e911 6627 gen_outs(s, ot);
2e70f6ef
PB
6628 if (use_icount) {
6629 gen_jmp(s, s->pc - s->cs_base);
6630 }
2c0262af
FB
6631 }
6632 break;
6633
6634 /************************/
6635 /* port I/O */
0573fbfc 6636
2c0262af
FB
6637 case 0xe4:
6638 case 0xe5:
f115e911
FB
6639 if ((b & 1) == 0)
6640 ot = OT_BYTE;
6641 else
6642 ot = dflag ? OT_LONG : OT_WORD;
0af10c86 6643 val = cpu_ldub_code(env, s->pc++);
f115e911 6644 gen_op_movl_T0_im(val);
b8b6a50b
FB
6645 gen_check_io(s, ot, pc_start - s->cs_base,
6646 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
2e70f6ef
PB
6647 if (use_icount)
6648 gen_io_start();
b6abf97d 6649 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
a7812ae4 6650 gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
57fec1fe 6651 gen_op_mov_reg_T1(ot, R_EAX);
2e70f6ef
PB
6652 if (use_icount) {
6653 gen_io_end();
6654 gen_jmp(s, s->pc - s->cs_base);
6655 }
2c0262af
FB
6656 break;
6657 case 0xe6:
6658 case 0xe7:
f115e911
FB
6659 if ((b & 1) == 0)
6660 ot = OT_BYTE;
6661 else
6662 ot = dflag ? OT_LONG : OT_WORD;
0af10c86 6663 val = cpu_ldub_code(env, s->pc++);
f115e911 6664 gen_op_movl_T0_im(val);
b8b6a50b
FB
6665 gen_check_io(s, ot, pc_start - s->cs_base,
6666 svm_is_rep(prefixes));
57fec1fe 6667 gen_op_mov_TN_reg(ot, 1, R_EAX);
b8b6a50b 6668
2e70f6ef
PB
6669 if (use_icount)
6670 gen_io_start();
b6abf97d 6671 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
b6abf97d 6672 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
a7812ae4 6673 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
2e70f6ef
PB
6674 if (use_icount) {
6675 gen_io_end();
6676 gen_jmp(s, s->pc - s->cs_base);
6677 }
2c0262af
FB
6678 break;
6679 case 0xec:
6680 case 0xed:
f115e911
FB
6681 if ((b & 1) == 0)
6682 ot = OT_BYTE;
6683 else
6684 ot = dflag ? OT_LONG : OT_WORD;
57fec1fe 6685 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
4f31916f 6686 gen_op_andl_T0_ffff();
b8b6a50b
FB
6687 gen_check_io(s, ot, pc_start - s->cs_base,
6688 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
2e70f6ef
PB
6689 if (use_icount)
6690 gen_io_start();
b6abf97d 6691 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
a7812ae4 6692 gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
57fec1fe 6693 gen_op_mov_reg_T1(ot, R_EAX);
2e70f6ef
PB
6694 if (use_icount) {
6695 gen_io_end();
6696 gen_jmp(s, s->pc - s->cs_base);
6697 }
2c0262af
FB
6698 break;
6699 case 0xee:
6700 case 0xef:
f115e911
FB
6701 if ((b & 1) == 0)
6702 ot = OT_BYTE;
6703 else
6704 ot = dflag ? OT_LONG : OT_WORD;
57fec1fe 6705 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
4f31916f 6706 gen_op_andl_T0_ffff();
b8b6a50b
FB
6707 gen_check_io(s, ot, pc_start - s->cs_base,
6708 svm_is_rep(prefixes));
57fec1fe 6709 gen_op_mov_TN_reg(ot, 1, R_EAX);
b8b6a50b 6710
2e70f6ef
PB
6711 if (use_icount)
6712 gen_io_start();
b6abf97d 6713 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
b6abf97d 6714 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
a7812ae4 6715 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
2e70f6ef
PB
6716 if (use_icount) {
6717 gen_io_end();
6718 gen_jmp(s, s->pc - s->cs_base);
6719 }
2c0262af
FB
6720 break;
6721
6722 /************************/
6723 /* control */
6724 case 0xc2: /* ret im */
0af10c86 6725 val = cpu_ldsw_code(env, s->pc);
2c0262af
FB
6726 s->pc += 2;
6727 gen_pop_T0(s);
8f091a59
FB
6728 if (CODE64(s) && s->dflag)
6729 s->dflag = 2;
2c0262af
FB
6730 gen_stack_update(s, val + (2 << s->dflag));
6731 if (s->dflag == 0)
6732 gen_op_andl_T0_ffff();
6733 gen_op_jmp_T0();
6734 gen_eob(s);
6735 break;
6736 case 0xc3: /* ret */
6737 gen_pop_T0(s);
6738 gen_pop_update(s);
6739 if (s->dflag == 0)
6740 gen_op_andl_T0_ffff();
6741 gen_op_jmp_T0();
6742 gen_eob(s);
6743 break;
6744 case 0xca: /* lret im */
0af10c86 6745 val = cpu_ldsw_code(env, s->pc);
2c0262af
FB
6746 s->pc += 2;
6747 do_lret:
6748 if (s->pe && !s->vm86) {
773cdfcc 6749 gen_update_cc_op(s);
14ce26e7 6750 gen_jmp_im(pc_start - s->cs_base);
2999a0b2 6751 gen_helper_lret_protected(cpu_env, tcg_const_i32(s->dflag),
a7812ae4 6752 tcg_const_i32(val));
2c0262af
FB
6753 } else {
6754 gen_stack_A0(s);
6755 /* pop offset */
57fec1fe 6756 gen_op_ld_T0_A0(1 + s->dflag + s->mem_index);
2c0262af
FB
6757 if (s->dflag == 0)
6758 gen_op_andl_T0_ffff();
6759 /* NOTE: keeping EIP updated is not a problem in case of
6760 exception */
6761 gen_op_jmp_T0();
6762 /* pop selector */
6763 gen_op_addl_A0_im(2 << s->dflag);
57fec1fe 6764 gen_op_ld_T0_A0(1 + s->dflag + s->mem_index);
3bd7da9e 6765 gen_op_movl_seg_T0_vm(R_CS);
2c0262af
FB
6766 /* add stack offset */
6767 gen_stack_update(s, val + (4 << s->dflag));
6768 }
6769 gen_eob(s);
6770 break;
6771 case 0xcb: /* lret */
6772 val = 0;
6773 goto do_lret;
6774 case 0xcf: /* iret */
872929aa 6775 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET);
2c0262af
FB
6776 if (!s->pe) {
6777 /* real mode */
2999a0b2 6778 gen_helper_iret_real(cpu_env, tcg_const_i32(s->dflag));
3ca51d07 6779 set_cc_op(s, CC_OP_EFLAGS);
f115e911
FB
6780 } else if (s->vm86) {
6781 if (s->iopl != 3) {
6782 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6783 } else {
2999a0b2 6784 gen_helper_iret_real(cpu_env, tcg_const_i32(s->dflag));
3ca51d07 6785 set_cc_op(s, CC_OP_EFLAGS);
f115e911 6786 }
2c0262af 6787 } else {
773cdfcc 6788 gen_update_cc_op(s);
14ce26e7 6789 gen_jmp_im(pc_start - s->cs_base);
2999a0b2 6790 gen_helper_iret_protected(cpu_env, tcg_const_i32(s->dflag),
a7812ae4 6791 tcg_const_i32(s->pc - s->cs_base));
3ca51d07 6792 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
6793 }
6794 gen_eob(s);
6795 break;
6796 case 0xe8: /* call im */
6797 {
14ce26e7 6798 if (dflag)
0af10c86 6799 tval = (int32_t)insn_get(env, s, OT_LONG);
14ce26e7 6800 else
0af10c86 6801 tval = (int16_t)insn_get(env, s, OT_WORD);
2c0262af 6802 next_eip = s->pc - s->cs_base;
14ce26e7 6803 tval += next_eip;
2c0262af 6804 if (s->dflag == 0)
14ce26e7 6805 tval &= 0xffff;
99596385
AJ
6806 else if(!CODE64(s))
6807 tval &= 0xffffffff;
14ce26e7 6808 gen_movtl_T0_im(next_eip);
2c0262af 6809 gen_push_T0(s);
14ce26e7 6810 gen_jmp(s, tval);
2c0262af
FB
6811 }
6812 break;
6813 case 0x9a: /* lcall im */
6814 {
6815 unsigned int selector, offset;
3b46e624 6816
14ce26e7
FB
6817 if (CODE64(s))
6818 goto illegal_op;
2c0262af 6819 ot = dflag ? OT_LONG : OT_WORD;
0af10c86
BS
6820 offset = insn_get(env, s, ot);
6821 selector = insn_get(env, s, OT_WORD);
3b46e624 6822
2c0262af 6823 gen_op_movl_T0_im(selector);
14ce26e7 6824 gen_op_movl_T1_imu(offset);
2c0262af
FB
6825 }
6826 goto do_lcall;
ecada8a2 6827 case 0xe9: /* jmp im */
14ce26e7 6828 if (dflag)
0af10c86 6829 tval = (int32_t)insn_get(env, s, OT_LONG);
14ce26e7 6830 else
0af10c86 6831 tval = (int16_t)insn_get(env, s, OT_WORD);
14ce26e7 6832 tval += s->pc - s->cs_base;
2c0262af 6833 if (s->dflag == 0)
14ce26e7 6834 tval &= 0xffff;
32938e12
AJ
6835 else if(!CODE64(s))
6836 tval &= 0xffffffff;
14ce26e7 6837 gen_jmp(s, tval);
2c0262af
FB
6838 break;
6839 case 0xea: /* ljmp im */
6840 {
6841 unsigned int selector, offset;
6842
14ce26e7
FB
6843 if (CODE64(s))
6844 goto illegal_op;
2c0262af 6845 ot = dflag ? OT_LONG : OT_WORD;
0af10c86
BS
6846 offset = insn_get(env, s, ot);
6847 selector = insn_get(env, s, OT_WORD);
3b46e624 6848
2c0262af 6849 gen_op_movl_T0_im(selector);
14ce26e7 6850 gen_op_movl_T1_imu(offset);
2c0262af
FB
6851 }
6852 goto do_ljmp;
6853 case 0xeb: /* jmp Jb */
0af10c86 6854 tval = (int8_t)insn_get(env, s, OT_BYTE);
14ce26e7 6855 tval += s->pc - s->cs_base;
2c0262af 6856 if (s->dflag == 0)
14ce26e7
FB
6857 tval &= 0xffff;
6858 gen_jmp(s, tval);
2c0262af
FB
6859 break;
6860 case 0x70 ... 0x7f: /* jcc Jb */
0af10c86 6861 tval = (int8_t)insn_get(env, s, OT_BYTE);
2c0262af
FB
6862 goto do_jcc;
6863 case 0x180 ... 0x18f: /* jcc Jv */
6864 if (dflag) {
0af10c86 6865 tval = (int32_t)insn_get(env, s, OT_LONG);
2c0262af 6866 } else {
0af10c86 6867 tval = (int16_t)insn_get(env, s, OT_WORD);
2c0262af
FB
6868 }
6869 do_jcc:
6870 next_eip = s->pc - s->cs_base;
14ce26e7 6871 tval += next_eip;
2c0262af 6872 if (s->dflag == 0)
14ce26e7
FB
6873 tval &= 0xffff;
6874 gen_jcc(s, b, tval, next_eip);
2c0262af
FB
6875 break;
6876
6877 case 0x190 ... 0x19f: /* setcc Gv */
0af10c86 6878 modrm = cpu_ldub_code(env, s->pc++);
cc8b6f5b 6879 gen_setcc1(s, b, cpu_T[0]);
0af10c86 6880 gen_ldst_modrm(env, s, modrm, OT_BYTE, OR_TMP0, 1);
2c0262af
FB
6881 break;
6882 case 0x140 ... 0x14f: /* cmov Gv, Ev */
f32d3781
PB
6883 ot = dflag + OT_WORD;
6884 modrm = cpu_ldub_code(env, s->pc++);
6885 reg = ((modrm >> 3) & 7) | rex_r;
6886 gen_cmovcc1(env, s, ot, b, modrm, reg);
2c0262af 6887 break;
3b46e624 6888
2c0262af
FB
6889 /************************/
6890 /* flags */
6891 case 0x9c: /* pushf */
872929aa 6892 gen_svm_check_intercept(s, pc_start, SVM_EXIT_PUSHF);
2c0262af
FB
6893 if (s->vm86 && s->iopl != 3) {
6894 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6895 } else {
773cdfcc 6896 gen_update_cc_op(s);
f0967a1a 6897 gen_helper_read_eflags(cpu_T[0], cpu_env);
2c0262af
FB
6898 gen_push_T0(s);
6899 }
6900 break;
6901 case 0x9d: /* popf */
872929aa 6902 gen_svm_check_intercept(s, pc_start, SVM_EXIT_POPF);
2c0262af
FB
6903 if (s->vm86 && s->iopl != 3) {
6904 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6905 } else {
6906 gen_pop_T0(s);
6907 if (s->cpl == 0) {
6908 if (s->dflag) {
f0967a1a
BS
6909 gen_helper_write_eflags(cpu_env, cpu_T[0],
6910 tcg_const_i32((TF_MASK | AC_MASK |
6911 ID_MASK | NT_MASK |
6912 IF_MASK |
6913 IOPL_MASK)));
2c0262af 6914 } else {
f0967a1a
BS
6915 gen_helper_write_eflags(cpu_env, cpu_T[0],
6916 tcg_const_i32((TF_MASK | AC_MASK |
6917 ID_MASK | NT_MASK |
6918 IF_MASK | IOPL_MASK)
6919 & 0xffff));
2c0262af
FB
6920 }
6921 } else {
4136f33c
FB
6922 if (s->cpl <= s->iopl) {
6923 if (s->dflag) {
f0967a1a
BS
6924 gen_helper_write_eflags(cpu_env, cpu_T[0],
6925 tcg_const_i32((TF_MASK |
6926 AC_MASK |
6927 ID_MASK |
6928 NT_MASK |
6929 IF_MASK)));
4136f33c 6930 } else {
f0967a1a
BS
6931 gen_helper_write_eflags(cpu_env, cpu_T[0],
6932 tcg_const_i32((TF_MASK |
6933 AC_MASK |
6934 ID_MASK |
6935 NT_MASK |
6936 IF_MASK)
6937 & 0xffff));
4136f33c 6938 }
2c0262af 6939 } else {
4136f33c 6940 if (s->dflag) {
f0967a1a
BS
6941 gen_helper_write_eflags(cpu_env, cpu_T[0],
6942 tcg_const_i32((TF_MASK | AC_MASK |
6943 ID_MASK | NT_MASK)));
4136f33c 6944 } else {
f0967a1a
BS
6945 gen_helper_write_eflags(cpu_env, cpu_T[0],
6946 tcg_const_i32((TF_MASK | AC_MASK |
6947 ID_MASK | NT_MASK)
6948 & 0xffff));
4136f33c 6949 }
2c0262af
FB
6950 }
6951 }
6952 gen_pop_update(s);
3ca51d07 6953 set_cc_op(s, CC_OP_EFLAGS);
a9321a4d 6954 /* abort translation because TF/AC flag may change */
14ce26e7 6955 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
6956 gen_eob(s);
6957 }
6958 break;
6959 case 0x9e: /* sahf */
12e26b75 6960 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
14ce26e7 6961 goto illegal_op;
57fec1fe 6962 gen_op_mov_TN_reg(OT_BYTE, 0, R_AH);
d229edce 6963 gen_compute_eflags(s);
bd7a7b33
FB
6964 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O);
6965 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], CC_S | CC_Z | CC_A | CC_P | CC_C);
6966 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_T[0]);
2c0262af
FB
6967 break;
6968 case 0x9f: /* lahf */
12e26b75 6969 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
14ce26e7 6970 goto illegal_op;
d229edce 6971 gen_compute_eflags(s);
bd7a7b33 6972 /* Note: gen_compute_eflags() only gives the condition codes */
d229edce 6973 tcg_gen_ori_tl(cpu_T[0], cpu_cc_src, 0x02);
57fec1fe 6974 gen_op_mov_reg_T0(OT_BYTE, R_AH);
2c0262af
FB
6975 break;
6976 case 0xf5: /* cmc */
d229edce 6977 gen_compute_eflags(s);
bd7a7b33 6978 tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C);
2c0262af
FB
6979 break;
6980 case 0xf8: /* clc */
d229edce 6981 gen_compute_eflags(s);
bd7a7b33 6982 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C);
2c0262af
FB
6983 break;
6984 case 0xf9: /* stc */
d229edce 6985 gen_compute_eflags(s);
bd7a7b33 6986 tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C);
2c0262af
FB
6987 break;
6988 case 0xfc: /* cld */
b6abf97d 6989 tcg_gen_movi_i32(cpu_tmp2_i32, 1);
317ac620 6990 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
2c0262af
FB
6991 break;
6992 case 0xfd: /* std */
b6abf97d 6993 tcg_gen_movi_i32(cpu_tmp2_i32, -1);
317ac620 6994 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
2c0262af
FB
6995 break;
6996
6997 /************************/
6998 /* bit operations */
6999 case 0x1ba: /* bt/bts/btr/btc Gv, im */
14ce26e7 7000 ot = dflag + OT_WORD;
0af10c86 7001 modrm = cpu_ldub_code(env, s->pc++);
33698e5f 7002 op = (modrm >> 3) & 7;
2c0262af 7003 mod = (modrm >> 6) & 3;
14ce26e7 7004 rm = (modrm & 7) | REX_B(s);
2c0262af 7005 if (mod != 3) {
14ce26e7 7006 s->rip_offset = 1;
0af10c86 7007 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
57fec1fe 7008 gen_op_ld_T0_A0(ot + s->mem_index);
2c0262af 7009 } else {
57fec1fe 7010 gen_op_mov_TN_reg(ot, 0, rm);
2c0262af
FB
7011 }
7012 /* load shift */
0af10c86 7013 val = cpu_ldub_code(env, s->pc++);
2c0262af
FB
7014 gen_op_movl_T1_im(val);
7015 if (op < 4)
7016 goto illegal_op;
7017 op -= 4;
f484d386 7018 goto bt_op;
2c0262af
FB
7019 case 0x1a3: /* bt Gv, Ev */
7020 op = 0;
7021 goto do_btx;
7022 case 0x1ab: /* bts */
7023 op = 1;
7024 goto do_btx;
7025 case 0x1b3: /* btr */
7026 op = 2;
7027 goto do_btx;
7028 case 0x1bb: /* btc */
7029 op = 3;
7030 do_btx:
14ce26e7 7031 ot = dflag + OT_WORD;
0af10c86 7032 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 7033 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af 7034 mod = (modrm >> 6) & 3;
14ce26e7 7035 rm = (modrm & 7) | REX_B(s);
57fec1fe 7036 gen_op_mov_TN_reg(OT_LONG, 1, reg);
2c0262af 7037 if (mod != 3) {
0af10c86 7038 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
2c0262af 7039 /* specific case: we need to add a displacement */
f484d386
FB
7040 gen_exts(ot, cpu_T[1]);
7041 tcg_gen_sari_tl(cpu_tmp0, cpu_T[1], 3 + ot);
7042 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, ot);
7043 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
57fec1fe 7044 gen_op_ld_T0_A0(ot + s->mem_index);
2c0262af 7045 } else {
57fec1fe 7046 gen_op_mov_TN_reg(ot, 0, rm);
2c0262af 7047 }
f484d386
FB
7048 bt_op:
7049 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], (1 << (3 + ot)) - 1);
7050 switch(op) {
7051 case 0:
7052 tcg_gen_shr_tl(cpu_cc_src, cpu_T[0], cpu_T[1]);
7053 tcg_gen_movi_tl(cpu_cc_dst, 0);
7054 break;
7055 case 1:
7056 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
7057 tcg_gen_movi_tl(cpu_tmp0, 1);
7058 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
7059 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
7060 break;
7061 case 2:
7062 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
7063 tcg_gen_movi_tl(cpu_tmp0, 1);
7064 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
7065 tcg_gen_not_tl(cpu_tmp0, cpu_tmp0);
7066 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
7067 break;
7068 default:
7069 case 3:
7070 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
7071 tcg_gen_movi_tl(cpu_tmp0, 1);
7072 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
7073 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
7074 break;
7075 }
3ca51d07 7076 set_cc_op(s, CC_OP_SARB + ot);
2c0262af
FB
7077 if (op != 0) {
7078 if (mod != 3)
57fec1fe 7079 gen_op_st_T0_A0(ot + s->mem_index);
2c0262af 7080 else
57fec1fe 7081 gen_op_mov_reg_T0(ot, rm);
f484d386
FB
7082 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
7083 tcg_gen_movi_tl(cpu_cc_dst, 0);
2c0262af
FB
7084 }
7085 break;
321c5351
RH
7086 case 0x1bc: /* bsf / tzcnt */
7087 case 0x1bd: /* bsr / lzcnt */
7088 ot = dflag + OT_WORD;
7089 modrm = cpu_ldub_code(env, s->pc++);
7090 reg = ((modrm >> 3) & 7) | rex_r;
7091 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
7092 gen_extu(ot, cpu_T[0]);
7093
7094 /* Note that lzcnt and tzcnt are in different extensions. */
7095 if ((prefixes & PREFIX_REPZ)
7096 && (b & 1
7097 ? s->cpuid_ext3_features & CPUID_EXT3_ABM
7098 : s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)) {
7099 int size = 8 << ot;
7100 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
7101 if (b & 1) {
7102 /* For lzcnt, reduce the target_ulong result by the
7103 number of zeros that we expect to find at the top. */
7104 gen_helper_clz(cpu_T[0], cpu_T[0]);
7105 tcg_gen_subi_tl(cpu_T[0], cpu_T[0], TARGET_LONG_BITS - size);
6191b059 7106 } else {
321c5351
RH
7107 /* For tzcnt, a zero input must return the operand size:
7108 force all bits outside the operand size to 1. */
7109 target_ulong mask = (target_ulong)-2 << (size - 1);
7110 tcg_gen_ori_tl(cpu_T[0], cpu_T[0], mask);
7111 gen_helper_ctz(cpu_T[0], cpu_T[0]);
6191b059 7112 }
321c5351
RH
7113 /* For lzcnt/tzcnt, C and Z bits are defined and are
7114 related to the result. */
7115 gen_op_update1_cc();
7116 set_cc_op(s, CC_OP_BMILGB + ot);
7117 } else {
7118 /* For bsr/bsf, only the Z bit is defined and it is related
7119 to the input and not the result. */
7120 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
7121 set_cc_op(s, CC_OP_LOGICB + ot);
7122 if (b & 1) {
7123 /* For bsr, return the bit index of the first 1 bit,
7124 not the count of leading zeros. */
7125 gen_helper_clz(cpu_T[0], cpu_T[0]);
7126 tcg_gen_xori_tl(cpu_T[0], cpu_T[0], TARGET_LONG_BITS - 1);
7127 } else {
7128 gen_helper_ctz(cpu_T[0], cpu_T[0]);
7129 }
7130 /* ??? The manual says that the output is undefined when the
7131 input is zero, but real hardware leaves it unchanged, and
7132 real programs appear to depend on that. */
7133 tcg_gen_movi_tl(cpu_tmp0, 0);
7134 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_T[0], cpu_cc_dst, cpu_tmp0,
7135 cpu_regs[reg], cpu_T[0]);
6191b059 7136 }
321c5351 7137 gen_op_mov_reg_T0(ot, reg);
2c0262af
FB
7138 break;
7139 /************************/
7140 /* bcd */
7141 case 0x27: /* daa */
14ce26e7
FB
7142 if (CODE64(s))
7143 goto illegal_op;
773cdfcc 7144 gen_update_cc_op(s);
7923057b 7145 gen_helper_daa(cpu_env);
3ca51d07 7146 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
7147 break;
7148 case 0x2f: /* das */
14ce26e7
FB
7149 if (CODE64(s))
7150 goto illegal_op;
773cdfcc 7151 gen_update_cc_op(s);
7923057b 7152 gen_helper_das(cpu_env);
3ca51d07 7153 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
7154 break;
7155 case 0x37: /* aaa */
14ce26e7
FB
7156 if (CODE64(s))
7157 goto illegal_op;
773cdfcc 7158 gen_update_cc_op(s);
7923057b 7159 gen_helper_aaa(cpu_env);
3ca51d07 7160 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
7161 break;
7162 case 0x3f: /* aas */
14ce26e7
FB
7163 if (CODE64(s))
7164 goto illegal_op;
773cdfcc 7165 gen_update_cc_op(s);
7923057b 7166 gen_helper_aas(cpu_env);
3ca51d07 7167 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
7168 break;
7169 case 0xd4: /* aam */
14ce26e7
FB
7170 if (CODE64(s))
7171 goto illegal_op;
0af10c86 7172 val = cpu_ldub_code(env, s->pc++);
b6d7c3db
TS
7173 if (val == 0) {
7174 gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base);
7175 } else {
7923057b 7176 gen_helper_aam(cpu_env, tcg_const_i32(val));
3ca51d07 7177 set_cc_op(s, CC_OP_LOGICB);
b6d7c3db 7178 }
2c0262af
FB
7179 break;
7180 case 0xd5: /* aad */
14ce26e7
FB
7181 if (CODE64(s))
7182 goto illegal_op;
0af10c86 7183 val = cpu_ldub_code(env, s->pc++);
7923057b 7184 gen_helper_aad(cpu_env, tcg_const_i32(val));
3ca51d07 7185 set_cc_op(s, CC_OP_LOGICB);
2c0262af
FB
7186 break;
7187 /************************/
7188 /* misc */
7189 case 0x90: /* nop */
ab1f142b 7190 /* XXX: correct lock test for all insn */
7418027e 7191 if (prefixes & PREFIX_LOCK) {
ab1f142b 7192 goto illegal_op;
7418027e
RH
7193 }
7194 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
7195 if (REX_B(s)) {
7196 goto do_xchg_reg_eax;
7197 }
0573fbfc
TS
7198 if (prefixes & PREFIX_REPZ) {
7199 gen_svm_check_intercept(s, pc_start, SVM_EXIT_PAUSE);
7200 }
2c0262af
FB
7201 break;
7202 case 0x9b: /* fwait */
5fafdf24 7203 if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) ==
7eee2a50
FB
7204 (HF_MP_MASK | HF_TS_MASK)) {
7205 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
2ee73ac3 7206 } else {
773cdfcc 7207 gen_update_cc_op(s);
14ce26e7 7208 gen_jmp_im(pc_start - s->cs_base);
d3eb5eae 7209 gen_helper_fwait(cpu_env);
7eee2a50 7210 }
2c0262af
FB
7211 break;
7212 case 0xcc: /* int3 */
7213 gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base);
7214 break;
7215 case 0xcd: /* int N */
0af10c86 7216 val = cpu_ldub_code(env, s->pc++);
f115e911 7217 if (s->vm86 && s->iopl != 3) {
5fafdf24 7218 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
f115e911
FB
7219 } else {
7220 gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base);
7221 }
2c0262af
FB
7222 break;
7223 case 0xce: /* into */
14ce26e7
FB
7224 if (CODE64(s))
7225 goto illegal_op;
773cdfcc 7226 gen_update_cc_op(s);
a8ede8ba 7227 gen_jmp_im(pc_start - s->cs_base);
4a7443be 7228 gen_helper_into(cpu_env, tcg_const_i32(s->pc - pc_start));
2c0262af 7229 break;
0b97134b 7230#ifdef WANT_ICEBP
2c0262af 7231 case 0xf1: /* icebp (undocumented, exits to external debugger) */
872929aa 7232 gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP);
aba9d61e 7233#if 1
2c0262af 7234 gen_debug(s, pc_start - s->cs_base);
aba9d61e
FB
7235#else
7236 /* start debug */
0af10c86 7237 tb_flush(env);
24537a01 7238 qemu_set_log(CPU_LOG_INT | CPU_LOG_TB_IN_ASM);
aba9d61e 7239#endif
2c0262af 7240 break;
0b97134b 7241#endif
2c0262af
FB
7242 case 0xfa: /* cli */
7243 if (!s->vm86) {
7244 if (s->cpl <= s->iopl) {
f0967a1a 7245 gen_helper_cli(cpu_env);
2c0262af
FB
7246 } else {
7247 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7248 }
7249 } else {
7250 if (s->iopl == 3) {
f0967a1a 7251 gen_helper_cli(cpu_env);
2c0262af
FB
7252 } else {
7253 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7254 }
7255 }
7256 break;
7257 case 0xfb: /* sti */
7258 if (!s->vm86) {
7259 if (s->cpl <= s->iopl) {
7260 gen_sti:
f0967a1a 7261 gen_helper_sti(cpu_env);
2c0262af 7262 /* interruptions are enabled only the first insn after sti */
a2cc3b24
FB
7263 /* If several instructions disable interrupts, only the
7264 _first_ does it */
7265 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
f0967a1a 7266 gen_helper_set_inhibit_irq(cpu_env);
2c0262af 7267 /* give a chance to handle pending irqs */
14ce26e7 7268 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
7269 gen_eob(s);
7270 } else {
7271 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7272 }
7273 } else {
7274 if (s->iopl == 3) {
7275 goto gen_sti;
7276 } else {
7277 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7278 }
7279 }
7280 break;
7281 case 0x62: /* bound */
14ce26e7
FB
7282 if (CODE64(s))
7283 goto illegal_op;
2c0262af 7284 ot = dflag ? OT_LONG : OT_WORD;
0af10c86 7285 modrm = cpu_ldub_code(env, s->pc++);
2c0262af
FB
7286 reg = (modrm >> 3) & 7;
7287 mod = (modrm >> 6) & 3;
7288 if (mod == 3)
7289 goto illegal_op;
57fec1fe 7290 gen_op_mov_TN_reg(ot, 0, reg);
0af10c86 7291 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
14ce26e7 7292 gen_jmp_im(pc_start - s->cs_base);
b6abf97d 7293 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
92fc4b58
BS
7294 if (ot == OT_WORD) {
7295 gen_helper_boundw(cpu_env, cpu_A0, cpu_tmp2_i32);
7296 } else {
7297 gen_helper_boundl(cpu_env, cpu_A0, cpu_tmp2_i32);
7298 }
2c0262af
FB
7299 break;
7300 case 0x1c8 ... 0x1cf: /* bswap reg */
14ce26e7
FB
7301 reg = (b & 7) | REX_B(s);
7302#ifdef TARGET_X86_64
7303 if (dflag == 2) {
57fec1fe 7304 gen_op_mov_TN_reg(OT_QUAD, 0, reg);
66896cb8 7305 tcg_gen_bswap64_i64(cpu_T[0], cpu_T[0]);
57fec1fe 7306 gen_op_mov_reg_T0(OT_QUAD, reg);
5fafdf24 7307 } else
8777643e 7308#endif
57fec1fe
FB
7309 {
7310 gen_op_mov_TN_reg(OT_LONG, 0, reg);
8777643e
AJ
7311 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
7312 tcg_gen_bswap32_tl(cpu_T[0], cpu_T[0]);
57fec1fe 7313 gen_op_mov_reg_T0(OT_LONG, reg);
14ce26e7 7314 }
2c0262af
FB
7315 break;
7316 case 0xd6: /* salc */
14ce26e7
FB
7317 if (CODE64(s))
7318 goto illegal_op;
cc8b6f5b 7319 gen_compute_eflags_c(s, cpu_T[0]);
bd7a7b33
FB
7320 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
7321 gen_op_mov_reg_T0(OT_BYTE, R_EAX);
2c0262af
FB
7322 break;
7323 case 0xe0: /* loopnz */
7324 case 0xe1: /* loopz */
2c0262af
FB
7325 case 0xe2: /* loop */
7326 case 0xe3: /* jecxz */
14ce26e7 7327 {
6e0d8677 7328 int l1, l2, l3;
14ce26e7 7329
0af10c86 7330 tval = (int8_t)insn_get(env, s, OT_BYTE);
14ce26e7
FB
7331 next_eip = s->pc - s->cs_base;
7332 tval += next_eip;
7333 if (s->dflag == 0)
7334 tval &= 0xffff;
3b46e624 7335
14ce26e7
FB
7336 l1 = gen_new_label();
7337 l2 = gen_new_label();
6e0d8677 7338 l3 = gen_new_label();
14ce26e7 7339 b &= 3;
6e0d8677
FB
7340 switch(b) {
7341 case 0: /* loopnz */
7342 case 1: /* loopz */
6e0d8677
FB
7343 gen_op_add_reg_im(s->aflag, R_ECX, -1);
7344 gen_op_jz_ecx(s->aflag, l3);
5bdb91b0 7345 gen_jcc1(s, (JCC_Z << 1) | (b ^ 1), l1);
6e0d8677
FB
7346 break;
7347 case 2: /* loop */
7348 gen_op_add_reg_im(s->aflag, R_ECX, -1);
7349 gen_op_jnz_ecx(s->aflag, l1);
7350 break;
7351 default:
7352 case 3: /* jcxz */
7353 gen_op_jz_ecx(s->aflag, l1);
7354 break;
14ce26e7
FB
7355 }
7356
6e0d8677 7357 gen_set_label(l3);
14ce26e7 7358 gen_jmp_im(next_eip);
8e1c85e3 7359 tcg_gen_br(l2);
6e0d8677 7360
14ce26e7
FB
7361 gen_set_label(l1);
7362 gen_jmp_im(tval);
7363 gen_set_label(l2);
7364 gen_eob(s);
7365 }
2c0262af
FB
7366 break;
7367 case 0x130: /* wrmsr */
7368 case 0x132: /* rdmsr */
7369 if (s->cpl != 0) {
7370 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7371 } else {
773cdfcc 7372 gen_update_cc_op(s);
872929aa 7373 gen_jmp_im(pc_start - s->cs_base);
0573fbfc 7374 if (b & 2) {
4a7443be 7375 gen_helper_rdmsr(cpu_env);
0573fbfc 7376 } else {
4a7443be 7377 gen_helper_wrmsr(cpu_env);
0573fbfc 7378 }
2c0262af
FB
7379 }
7380 break;
7381 case 0x131: /* rdtsc */
773cdfcc 7382 gen_update_cc_op(s);
ecada8a2 7383 gen_jmp_im(pc_start - s->cs_base);
efade670
PB
7384 if (use_icount)
7385 gen_io_start();
4a7443be 7386 gen_helper_rdtsc(cpu_env);
efade670
PB
7387 if (use_icount) {
7388 gen_io_end();
7389 gen_jmp(s, s->pc - s->cs_base);
7390 }
2c0262af 7391 break;
df01e0fc 7392 case 0x133: /* rdpmc */
773cdfcc 7393 gen_update_cc_op(s);
df01e0fc 7394 gen_jmp_im(pc_start - s->cs_base);
4a7443be 7395 gen_helper_rdpmc(cpu_env);
df01e0fc 7396 break;
023fe10d 7397 case 0x134: /* sysenter */
2436b61a 7398 /* For Intel SYSENTER is valid on 64-bit */
0af10c86 7399 if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
14ce26e7 7400 goto illegal_op;
023fe10d
FB
7401 if (!s->pe) {
7402 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7403 } else {
728d803b 7404 gen_update_cc_op(s);
14ce26e7 7405 gen_jmp_im(pc_start - s->cs_base);
2999a0b2 7406 gen_helper_sysenter(cpu_env);
023fe10d
FB
7407 gen_eob(s);
7408 }
7409 break;
7410 case 0x135: /* sysexit */
2436b61a 7411 /* For Intel SYSEXIT is valid on 64-bit */
0af10c86 7412 if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
14ce26e7 7413 goto illegal_op;
023fe10d
FB
7414 if (!s->pe) {
7415 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7416 } else {
728d803b 7417 gen_update_cc_op(s);
14ce26e7 7418 gen_jmp_im(pc_start - s->cs_base);
2999a0b2 7419 gen_helper_sysexit(cpu_env, tcg_const_i32(dflag));
023fe10d
FB
7420 gen_eob(s);
7421 }
7422 break;
14ce26e7
FB
7423#ifdef TARGET_X86_64
7424 case 0x105: /* syscall */
7425 /* XXX: is it usable in real mode ? */
728d803b 7426 gen_update_cc_op(s);
14ce26e7 7427 gen_jmp_im(pc_start - s->cs_base);
2999a0b2 7428 gen_helper_syscall(cpu_env, tcg_const_i32(s->pc - pc_start));
14ce26e7
FB
7429 gen_eob(s);
7430 break;
7431 case 0x107: /* sysret */
7432 if (!s->pe) {
7433 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7434 } else {
728d803b 7435 gen_update_cc_op(s);
14ce26e7 7436 gen_jmp_im(pc_start - s->cs_base);
2999a0b2 7437 gen_helper_sysret(cpu_env, tcg_const_i32(s->dflag));
aba9d61e 7438 /* condition codes are modified only in long mode */
3ca51d07
RH
7439 if (s->lma) {
7440 set_cc_op(s, CC_OP_EFLAGS);
7441 }
14ce26e7
FB
7442 gen_eob(s);
7443 }
7444 break;
7445#endif
2c0262af 7446 case 0x1a2: /* cpuid */
773cdfcc 7447 gen_update_cc_op(s);
9575cb94 7448 gen_jmp_im(pc_start - s->cs_base);
4a7443be 7449 gen_helper_cpuid(cpu_env);
2c0262af
FB
7450 break;
7451 case 0xf4: /* hlt */
7452 if (s->cpl != 0) {
7453 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7454 } else {
773cdfcc 7455 gen_update_cc_op(s);
94451178 7456 gen_jmp_im(pc_start - s->cs_base);
4a7443be 7457 gen_helper_hlt(cpu_env, tcg_const_i32(s->pc - pc_start));
5779406a 7458 s->is_jmp = DISAS_TB_JUMP;
2c0262af
FB
7459 }
7460 break;
7461 case 0x100:
0af10c86 7462 modrm = cpu_ldub_code(env, s->pc++);
2c0262af
FB
7463 mod = (modrm >> 6) & 3;
7464 op = (modrm >> 3) & 7;
7465 switch(op) {
7466 case 0: /* sldt */
f115e911
FB
7467 if (!s->pe || s->vm86)
7468 goto illegal_op;
872929aa 7469 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ);
651ba608 7470 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,ldt.selector));
2c0262af
FB
7471 ot = OT_WORD;
7472 if (mod == 3)
7473 ot += s->dflag;
0af10c86 7474 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
2c0262af
FB
7475 break;
7476 case 2: /* lldt */
f115e911
FB
7477 if (!s->pe || s->vm86)
7478 goto illegal_op;
2c0262af
FB
7479 if (s->cpl != 0) {
7480 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7481 } else {
872929aa 7482 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE);
0af10c86 7483 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
14ce26e7 7484 gen_jmp_im(pc_start - s->cs_base);
b6abf97d 7485 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2999a0b2 7486 gen_helper_lldt(cpu_env, cpu_tmp2_i32);
2c0262af
FB
7487 }
7488 break;
7489 case 1: /* str */
f115e911
FB
7490 if (!s->pe || s->vm86)
7491 goto illegal_op;
872929aa 7492 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ);
651ba608 7493 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,tr.selector));
2c0262af
FB
7494 ot = OT_WORD;
7495 if (mod == 3)
7496 ot += s->dflag;
0af10c86 7497 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
2c0262af
FB
7498 break;
7499 case 3: /* ltr */
f115e911
FB
7500 if (!s->pe || s->vm86)
7501 goto illegal_op;
2c0262af
FB
7502 if (s->cpl != 0) {
7503 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7504 } else {
872929aa 7505 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE);
0af10c86 7506 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
14ce26e7 7507 gen_jmp_im(pc_start - s->cs_base);
b6abf97d 7508 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2999a0b2 7509 gen_helper_ltr(cpu_env, cpu_tmp2_i32);
2c0262af
FB
7510 }
7511 break;
7512 case 4: /* verr */
7513 case 5: /* verw */
f115e911
FB
7514 if (!s->pe || s->vm86)
7515 goto illegal_op;
0af10c86 7516 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
773cdfcc 7517 gen_update_cc_op(s);
2999a0b2
BS
7518 if (op == 4) {
7519 gen_helper_verr(cpu_env, cpu_T[0]);
7520 } else {
7521 gen_helper_verw(cpu_env, cpu_T[0]);
7522 }
3ca51d07 7523 set_cc_op(s, CC_OP_EFLAGS);
f115e911 7524 break;
2c0262af
FB
7525 default:
7526 goto illegal_op;
7527 }
7528 break;
7529 case 0x101:
0af10c86 7530 modrm = cpu_ldub_code(env, s->pc++);
2c0262af
FB
7531 mod = (modrm >> 6) & 3;
7532 op = (modrm >> 3) & 7;
3d7374c5 7533 rm = modrm & 7;
2c0262af
FB
7534 switch(op) {
7535 case 0: /* sgdt */
2c0262af
FB
7536 if (mod == 3)
7537 goto illegal_op;
872929aa 7538 gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ);
0af10c86 7539 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
651ba608 7540 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.limit));
57fec1fe 7541 gen_op_st_T0_A0(OT_WORD + s->mem_index);
aba9d61e 7542 gen_add_A0_im(s, 2);
651ba608 7543 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.base));
2c0262af
FB
7544 if (!s->dflag)
7545 gen_op_andl_T0_im(0xffffff);
57fec1fe 7546 gen_op_st_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
2c0262af 7547 break;
3d7374c5
FB
7548 case 1:
7549 if (mod == 3) {
7550 switch (rm) {
7551 case 0: /* monitor */
7552 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
7553 s->cpl != 0)
7554 goto illegal_op;
773cdfcc 7555 gen_update_cc_op(s);
3d7374c5
FB
7556 gen_jmp_im(pc_start - s->cs_base);
7557#ifdef TARGET_X86_64
7558 if (s->aflag == 2) {
bbf662ee 7559 gen_op_movq_A0_reg(R_EAX);
5fafdf24 7560 } else
3d7374c5
FB
7561#endif
7562 {
bbf662ee 7563 gen_op_movl_A0_reg(R_EAX);
3d7374c5
FB
7564 if (s->aflag == 0)
7565 gen_op_andl_A0_ffff();
7566 }
7567 gen_add_A0_ds_seg(s);
4a7443be 7568 gen_helper_monitor(cpu_env, cpu_A0);
3d7374c5
FB
7569 break;
7570 case 1: /* mwait */
7571 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
7572 s->cpl != 0)
7573 goto illegal_op;
728d803b 7574 gen_update_cc_op(s);
94451178 7575 gen_jmp_im(pc_start - s->cs_base);
4a7443be 7576 gen_helper_mwait(cpu_env, tcg_const_i32(s->pc - pc_start));
3d7374c5
FB
7577 gen_eob(s);
7578 break;
a9321a4d
PA
7579 case 2: /* clac */
7580 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) ||
7581 s->cpl != 0) {
7582 goto illegal_op;
7583 }
7584 gen_helper_clac(cpu_env);
7585 gen_jmp_im(s->pc - s->cs_base);
7586 gen_eob(s);
7587 break;
7588 case 3: /* stac */
7589 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) ||
7590 s->cpl != 0) {
7591 goto illegal_op;
7592 }
7593 gen_helper_stac(cpu_env);
7594 gen_jmp_im(s->pc - s->cs_base);
7595 gen_eob(s);
7596 break;
3d7374c5
FB
7597 default:
7598 goto illegal_op;
7599 }
7600 } else { /* sidt */
872929aa 7601 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ);
0af10c86 7602 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
651ba608 7603 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.limit));
57fec1fe 7604 gen_op_st_T0_A0(OT_WORD + s->mem_index);
3d7374c5 7605 gen_add_A0_im(s, 2);
651ba608 7606 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.base));
3d7374c5
FB
7607 if (!s->dflag)
7608 gen_op_andl_T0_im(0xffffff);
57fec1fe 7609 gen_op_st_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
3d7374c5
FB
7610 }
7611 break;
2c0262af
FB
7612 case 2: /* lgdt */
7613 case 3: /* lidt */
0573fbfc 7614 if (mod == 3) {
773cdfcc 7615 gen_update_cc_op(s);
872929aa 7616 gen_jmp_im(pc_start - s->cs_base);
0573fbfc
TS
7617 switch(rm) {
7618 case 0: /* VMRUN */
872929aa
FB
7619 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7620 goto illegal_op;
7621 if (s->cpl != 0) {
7622 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
0573fbfc 7623 break;
872929aa 7624 } else {
052e80d5 7625 gen_helper_vmrun(cpu_env, tcg_const_i32(s->aflag),
a7812ae4 7626 tcg_const_i32(s->pc - pc_start));
db620f46 7627 tcg_gen_exit_tb(0);
5779406a 7628 s->is_jmp = DISAS_TB_JUMP;
872929aa 7629 }
0573fbfc
TS
7630 break;
7631 case 1: /* VMMCALL */
872929aa
FB
7632 if (!(s->flags & HF_SVME_MASK))
7633 goto illegal_op;
052e80d5 7634 gen_helper_vmmcall(cpu_env);
0573fbfc
TS
7635 break;
7636 case 2: /* VMLOAD */
872929aa
FB
7637 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7638 goto illegal_op;
7639 if (s->cpl != 0) {
7640 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7641 break;
7642 } else {
052e80d5 7643 gen_helper_vmload(cpu_env, tcg_const_i32(s->aflag));
872929aa 7644 }
0573fbfc
TS
7645 break;
7646 case 3: /* VMSAVE */
872929aa
FB
7647 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7648 goto illegal_op;
7649 if (s->cpl != 0) {
7650 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7651 break;
7652 } else {
052e80d5 7653 gen_helper_vmsave(cpu_env, tcg_const_i32(s->aflag));
872929aa 7654 }
0573fbfc
TS
7655 break;
7656 case 4: /* STGI */
872929aa
FB
7657 if ((!(s->flags & HF_SVME_MASK) &&
7658 !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
7659 !s->pe)
7660 goto illegal_op;
7661 if (s->cpl != 0) {
7662 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7663 break;
7664 } else {
052e80d5 7665 gen_helper_stgi(cpu_env);
872929aa 7666 }
0573fbfc
TS
7667 break;
7668 case 5: /* CLGI */
872929aa
FB
7669 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7670 goto illegal_op;
7671 if (s->cpl != 0) {
7672 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7673 break;
7674 } else {
052e80d5 7675 gen_helper_clgi(cpu_env);
872929aa 7676 }
0573fbfc
TS
7677 break;
7678 case 6: /* SKINIT */
872929aa
FB
7679 if ((!(s->flags & HF_SVME_MASK) &&
7680 !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
7681 !s->pe)
7682 goto illegal_op;
052e80d5 7683 gen_helper_skinit(cpu_env);
0573fbfc
TS
7684 break;
7685 case 7: /* INVLPGA */
872929aa
FB
7686 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7687 goto illegal_op;
7688 if (s->cpl != 0) {
7689 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7690 break;
7691 } else {
052e80d5 7692 gen_helper_invlpga(cpu_env, tcg_const_i32(s->aflag));
872929aa 7693 }
0573fbfc
TS
7694 break;
7695 default:
7696 goto illegal_op;
7697 }
7698 } else if (s->cpl != 0) {
2c0262af
FB
7699 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7700 } else {
872929aa
FB
7701 gen_svm_check_intercept(s, pc_start,
7702 op==2 ? SVM_EXIT_GDTR_WRITE : SVM_EXIT_IDTR_WRITE);
0af10c86 7703 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
57fec1fe 7704 gen_op_ld_T1_A0(OT_WORD + s->mem_index);
aba9d61e 7705 gen_add_A0_im(s, 2);
57fec1fe 7706 gen_op_ld_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
2c0262af
FB
7707 if (!s->dflag)
7708 gen_op_andl_T0_im(0xffffff);
7709 if (op == 2) {
651ba608
FB
7710 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,gdt.base));
7711 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,gdt.limit));
2c0262af 7712 } else {
651ba608
FB
7713 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,idt.base));
7714 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,idt.limit));
2c0262af
FB
7715 }
7716 }
7717 break;
7718 case 4: /* smsw */
872929aa 7719 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0);
e2542fe2 7720#if defined TARGET_X86_64 && defined HOST_WORDS_BIGENDIAN
f60d2728 7721 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]) + 4);
7722#else
651ba608 7723 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]));
f60d2728 7724#endif
0af10c86 7725 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 1);
2c0262af
FB
7726 break;
7727 case 6: /* lmsw */
7728 if (s->cpl != 0) {
7729 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7730 } else {
872929aa 7731 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
0af10c86 7732 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
4a7443be 7733 gen_helper_lmsw(cpu_env, cpu_T[0]);
14ce26e7 7734 gen_jmp_im(s->pc - s->cs_base);
d71b9a8b 7735 gen_eob(s);
2c0262af
FB
7736 }
7737 break;
1b050077
AP
7738 case 7:
7739 if (mod != 3) { /* invlpg */
7740 if (s->cpl != 0) {
7741 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7742 } else {
773cdfcc 7743 gen_update_cc_op(s);
1b050077 7744 gen_jmp_im(pc_start - s->cs_base);
0af10c86 7745 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4a7443be 7746 gen_helper_invlpg(cpu_env, cpu_A0);
1b050077
AP
7747 gen_jmp_im(s->pc - s->cs_base);
7748 gen_eob(s);
7749 }
2c0262af 7750 } else {
1b050077
AP
7751 switch (rm) {
7752 case 0: /* swapgs */
14ce26e7 7753#ifdef TARGET_X86_64
1b050077
AP
7754 if (CODE64(s)) {
7755 if (s->cpl != 0) {
7756 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7757 } else {
7758 tcg_gen_ld_tl(cpu_T[0], cpu_env,
7759 offsetof(CPUX86State,segs[R_GS].base));
7760 tcg_gen_ld_tl(cpu_T[1], cpu_env,
7761 offsetof(CPUX86State,kernelgsbase));
7762 tcg_gen_st_tl(cpu_T[1], cpu_env,
7763 offsetof(CPUX86State,segs[R_GS].base));
7764 tcg_gen_st_tl(cpu_T[0], cpu_env,
7765 offsetof(CPUX86State,kernelgsbase));
7766 }
5fafdf24 7767 } else
14ce26e7
FB
7768#endif
7769 {
7770 goto illegal_op;
7771 }
1b050077
AP
7772 break;
7773 case 1: /* rdtscp */
7774 if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP))
7775 goto illegal_op;
773cdfcc 7776 gen_update_cc_op(s);
9575cb94 7777 gen_jmp_im(pc_start - s->cs_base);
1b050077
AP
7778 if (use_icount)
7779 gen_io_start();
4a7443be 7780 gen_helper_rdtscp(cpu_env);
1b050077
AP
7781 if (use_icount) {
7782 gen_io_end();
7783 gen_jmp(s, s->pc - s->cs_base);
7784 }
7785 break;
7786 default:
7787 goto illegal_op;
14ce26e7 7788 }
2c0262af
FB
7789 }
7790 break;
7791 default:
7792 goto illegal_op;
7793 }
7794 break;
3415a4dd
FB
7795 case 0x108: /* invd */
7796 case 0x109: /* wbinvd */
7797 if (s->cpl != 0) {
7798 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7799 } else {
872929aa 7800 gen_svm_check_intercept(s, pc_start, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD);
3415a4dd
FB
7801 /* nothing to do */
7802 }
7803 break;
14ce26e7
FB
7804 case 0x63: /* arpl or movslS (x86_64) */
7805#ifdef TARGET_X86_64
7806 if (CODE64(s)) {
7807 int d_ot;
7808 /* d_ot is the size of destination */
7809 d_ot = dflag + OT_WORD;
7810
0af10c86 7811 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7
FB
7812 reg = ((modrm >> 3) & 7) | rex_r;
7813 mod = (modrm >> 6) & 3;
7814 rm = (modrm & 7) | REX_B(s);
3b46e624 7815
14ce26e7 7816 if (mod == 3) {
57fec1fe 7817 gen_op_mov_TN_reg(OT_LONG, 0, rm);
14ce26e7
FB
7818 /* sign extend */
7819 if (d_ot == OT_QUAD)
e108dd01 7820 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
57fec1fe 7821 gen_op_mov_reg_T0(d_ot, reg);
14ce26e7 7822 } else {
0af10c86 7823 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
14ce26e7 7824 if (d_ot == OT_QUAD) {
57fec1fe 7825 gen_op_lds_T0_A0(OT_LONG + s->mem_index);
14ce26e7 7826 } else {
57fec1fe 7827 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
14ce26e7 7828 }
57fec1fe 7829 gen_op_mov_reg_T0(d_ot, reg);
14ce26e7 7830 }
5fafdf24 7831 } else
14ce26e7
FB
7832#endif
7833 {
3bd7da9e 7834 int label1;
49d9fdcc 7835 TCGv t0, t1, t2, a0;
1e4840bf 7836
14ce26e7
FB
7837 if (!s->pe || s->vm86)
7838 goto illegal_op;
a7812ae4
PB
7839 t0 = tcg_temp_local_new();
7840 t1 = tcg_temp_local_new();
7841 t2 = tcg_temp_local_new();
3bd7da9e 7842 ot = OT_WORD;
0af10c86 7843 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7
FB
7844 reg = (modrm >> 3) & 7;
7845 mod = (modrm >> 6) & 3;
7846 rm = modrm & 7;
7847 if (mod != 3) {
0af10c86 7848 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
1e4840bf 7849 gen_op_ld_v(ot + s->mem_index, t0, cpu_A0);
49d9fdcc
LD
7850 a0 = tcg_temp_local_new();
7851 tcg_gen_mov_tl(a0, cpu_A0);
14ce26e7 7852 } else {
1e4840bf 7853 gen_op_mov_v_reg(ot, t0, rm);
49d9fdcc 7854 TCGV_UNUSED(a0);
14ce26e7 7855 }
1e4840bf
FB
7856 gen_op_mov_v_reg(ot, t1, reg);
7857 tcg_gen_andi_tl(cpu_tmp0, t0, 3);
7858 tcg_gen_andi_tl(t1, t1, 3);
7859 tcg_gen_movi_tl(t2, 0);
3bd7da9e 7860 label1 = gen_new_label();
1e4840bf
FB
7861 tcg_gen_brcond_tl(TCG_COND_GE, cpu_tmp0, t1, label1);
7862 tcg_gen_andi_tl(t0, t0, ~3);
7863 tcg_gen_or_tl(t0, t0, t1);
7864 tcg_gen_movi_tl(t2, CC_Z);
3bd7da9e 7865 gen_set_label(label1);
14ce26e7 7866 if (mod != 3) {
49d9fdcc
LD
7867 gen_op_st_v(ot + s->mem_index, t0, a0);
7868 tcg_temp_free(a0);
7869 } else {
1e4840bf 7870 gen_op_mov_reg_v(ot, rm, t0);
14ce26e7 7871 }
d229edce 7872 gen_compute_eflags(s);
3bd7da9e 7873 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z);
1e4840bf 7874 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2);
1e4840bf
FB
7875 tcg_temp_free(t0);
7876 tcg_temp_free(t1);
7877 tcg_temp_free(t2);
f115e911 7878 }
f115e911 7879 break;
2c0262af
FB
7880 case 0x102: /* lar */
7881 case 0x103: /* lsl */
cec6843e
FB
7882 {
7883 int label1;
1e4840bf 7884 TCGv t0;
cec6843e
FB
7885 if (!s->pe || s->vm86)
7886 goto illegal_op;
7887 ot = dflag ? OT_LONG : OT_WORD;
0af10c86 7888 modrm = cpu_ldub_code(env, s->pc++);
cec6843e 7889 reg = ((modrm >> 3) & 7) | rex_r;
0af10c86 7890 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
a7812ae4 7891 t0 = tcg_temp_local_new();
773cdfcc 7892 gen_update_cc_op(s);
2999a0b2
BS
7893 if (b == 0x102) {
7894 gen_helper_lar(t0, cpu_env, cpu_T[0]);
7895 } else {
7896 gen_helper_lsl(t0, cpu_env, cpu_T[0]);
7897 }
cec6843e
FB
7898 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_src, CC_Z);
7899 label1 = gen_new_label();
cb63669a 7900 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
1e4840bf 7901 gen_op_mov_reg_v(ot, reg, t0);
cec6843e 7902 gen_set_label(label1);
3ca51d07 7903 set_cc_op(s, CC_OP_EFLAGS);
1e4840bf 7904 tcg_temp_free(t0);
cec6843e 7905 }
2c0262af
FB
7906 break;
7907 case 0x118:
0af10c86 7908 modrm = cpu_ldub_code(env, s->pc++);
2c0262af
FB
7909 mod = (modrm >> 6) & 3;
7910 op = (modrm >> 3) & 7;
7911 switch(op) {
7912 case 0: /* prefetchnta */
7913 case 1: /* prefetchnt0 */
7914 case 2: /* prefetchnt0 */
7915 case 3: /* prefetchnt0 */
7916 if (mod == 3)
7917 goto illegal_op;
0af10c86 7918 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
2c0262af
FB
7919 /* nothing more to do */
7920 break;
e17a36ce 7921 default: /* nop (multi byte) */
0af10c86 7922 gen_nop_modrm(env, s, modrm);
e17a36ce 7923 break;
2c0262af
FB
7924 }
7925 break;
e17a36ce 7926 case 0x119 ... 0x11f: /* nop (multi byte) */
0af10c86
BS
7927 modrm = cpu_ldub_code(env, s->pc++);
7928 gen_nop_modrm(env, s, modrm);
e17a36ce 7929 break;
2c0262af
FB
7930 case 0x120: /* mov reg, crN */
7931 case 0x122: /* mov crN, reg */
7932 if (s->cpl != 0) {
7933 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7934 } else {
0af10c86 7935 modrm = cpu_ldub_code(env, s->pc++);
5c73b757
MO
7936 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7937 * AMD documentation (24594.pdf) and testing of
7938 * intel 386 and 486 processors all show that the mod bits
7939 * are assumed to be 1's, regardless of actual values.
7940 */
14ce26e7
FB
7941 rm = (modrm & 7) | REX_B(s);
7942 reg = ((modrm >> 3) & 7) | rex_r;
7943 if (CODE64(s))
7944 ot = OT_QUAD;
7945 else
7946 ot = OT_LONG;
ccd59d09
AP
7947 if ((prefixes & PREFIX_LOCK) && (reg == 0) &&
7948 (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) {
7949 reg = 8;
7950 }
2c0262af
FB
7951 switch(reg) {
7952 case 0:
7953 case 2:
7954 case 3:
7955 case 4:
9230e66e 7956 case 8:
773cdfcc 7957 gen_update_cc_op(s);
872929aa 7958 gen_jmp_im(pc_start - s->cs_base);
2c0262af 7959 if (b & 2) {
57fec1fe 7960 gen_op_mov_TN_reg(ot, 0, rm);
4a7443be
BS
7961 gen_helper_write_crN(cpu_env, tcg_const_i32(reg),
7962 cpu_T[0]);
14ce26e7 7963 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
7964 gen_eob(s);
7965 } else {
4a7443be 7966 gen_helper_read_crN(cpu_T[0], cpu_env, tcg_const_i32(reg));
57fec1fe 7967 gen_op_mov_reg_T0(ot, rm);
2c0262af
FB
7968 }
7969 break;
7970 default:
7971 goto illegal_op;
7972 }
7973 }
7974 break;
7975 case 0x121: /* mov reg, drN */
7976 case 0x123: /* mov drN, reg */
7977 if (s->cpl != 0) {
7978 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7979 } else {
0af10c86 7980 modrm = cpu_ldub_code(env, s->pc++);
5c73b757
MO
7981 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7982 * AMD documentation (24594.pdf) and testing of
7983 * intel 386 and 486 processors all show that the mod bits
7984 * are assumed to be 1's, regardless of actual values.
7985 */
14ce26e7
FB
7986 rm = (modrm & 7) | REX_B(s);
7987 reg = ((modrm >> 3) & 7) | rex_r;
7988 if (CODE64(s))
7989 ot = OT_QUAD;
7990 else
7991 ot = OT_LONG;
2c0262af 7992 /* XXX: do it dynamically with CR4.DE bit */
14ce26e7 7993 if (reg == 4 || reg == 5 || reg >= 8)
2c0262af
FB
7994 goto illegal_op;
7995 if (b & 2) {
0573fbfc 7996 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg);
57fec1fe 7997 gen_op_mov_TN_reg(ot, 0, rm);
4a7443be 7998 gen_helper_movl_drN_T0(cpu_env, tcg_const_i32(reg), cpu_T[0]);
14ce26e7 7999 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
8000 gen_eob(s);
8001 } else {
0573fbfc 8002 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg);
651ba608 8003 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,dr[reg]));
57fec1fe 8004 gen_op_mov_reg_T0(ot, rm);
2c0262af
FB
8005 }
8006 }
8007 break;
8008 case 0x106: /* clts */
8009 if (s->cpl != 0) {
8010 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
8011 } else {
0573fbfc 8012 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
f0967a1a 8013 gen_helper_clts(cpu_env);
7eee2a50 8014 /* abort block because static cpu state changed */
14ce26e7 8015 gen_jmp_im(s->pc - s->cs_base);
7eee2a50 8016 gen_eob(s);
2c0262af
FB
8017 }
8018 break;
222a3336 8019 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
664e0f19
FB
8020 case 0x1c3: /* MOVNTI reg, mem */
8021 if (!(s->cpuid_features & CPUID_SSE2))
14ce26e7 8022 goto illegal_op;
664e0f19 8023 ot = s->dflag == 2 ? OT_QUAD : OT_LONG;
0af10c86 8024 modrm = cpu_ldub_code(env, s->pc++);
664e0f19
FB
8025 mod = (modrm >> 6) & 3;
8026 if (mod == 3)
8027 goto illegal_op;
8028 reg = ((modrm >> 3) & 7) | rex_r;
8029 /* generate a generic store */
0af10c86 8030 gen_ldst_modrm(env, s, modrm, ot, reg, 1);
14ce26e7 8031 break;
664e0f19 8032 case 0x1ae:
0af10c86 8033 modrm = cpu_ldub_code(env, s->pc++);
664e0f19
FB
8034 mod = (modrm >> 6) & 3;
8035 op = (modrm >> 3) & 7;
8036 switch(op) {
8037 case 0: /* fxsave */
5fafdf24 8038 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
09d85fb8 8039 (s->prefix & PREFIX_LOCK))
14ce26e7 8040 goto illegal_op;
09d85fb8 8041 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
0fd14b72
FB
8042 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
8043 break;
8044 }
0af10c86 8045 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
773cdfcc 8046 gen_update_cc_op(s);
19e6c4b8 8047 gen_jmp_im(pc_start - s->cs_base);
d3eb5eae 8048 gen_helper_fxsave(cpu_env, cpu_A0, tcg_const_i32((s->dflag == 2)));
664e0f19
FB
8049 break;
8050 case 1: /* fxrstor */
5fafdf24 8051 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
09d85fb8 8052 (s->prefix & PREFIX_LOCK))
14ce26e7 8053 goto illegal_op;
09d85fb8 8054 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
0fd14b72
FB
8055 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
8056 break;
8057 }
0af10c86 8058 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
773cdfcc 8059 gen_update_cc_op(s);
19e6c4b8 8060 gen_jmp_im(pc_start - s->cs_base);
d3eb5eae
BS
8061 gen_helper_fxrstor(cpu_env, cpu_A0,
8062 tcg_const_i32((s->dflag == 2)));
664e0f19
FB
8063 break;
8064 case 2: /* ldmxcsr */
8065 case 3: /* stmxcsr */
8066 if (s->flags & HF_TS_MASK) {
8067 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
8068 break;
14ce26e7 8069 }
664e0f19
FB
8070 if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK) ||
8071 mod == 3)
14ce26e7 8072 goto illegal_op;
0af10c86 8073 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
664e0f19 8074 if (op == 2) {
57fec1fe 8075 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
20f8bd48 8076 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
d3eb5eae 8077 gen_helper_ldmxcsr(cpu_env, cpu_tmp2_i32);
14ce26e7 8078 } else {
651ba608 8079 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, mxcsr));
57fec1fe 8080 gen_op_st_T0_A0(OT_LONG + s->mem_index);
14ce26e7 8081 }
664e0f19
FB
8082 break;
8083 case 5: /* lfence */
8084 case 6: /* mfence */
8001c294 8085 if ((modrm & 0xc7) != 0xc0 || !(s->cpuid_features & CPUID_SSE2))
664e0f19
FB
8086 goto illegal_op;
8087 break;
8f091a59
FB
8088 case 7: /* sfence / clflush */
8089 if ((modrm & 0xc7) == 0xc0) {
8090 /* sfence */
a35f3ec7 8091 /* XXX: also check for cpuid_ext2_features & CPUID_EXT2_EMMX */
8f091a59
FB
8092 if (!(s->cpuid_features & CPUID_SSE))
8093 goto illegal_op;
8094 } else {
8095 /* clflush */
8096 if (!(s->cpuid_features & CPUID_CLFLUSH))
8097 goto illegal_op;
0af10c86 8098 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
8f091a59
FB
8099 }
8100 break;
664e0f19 8101 default:
14ce26e7
FB
8102 goto illegal_op;
8103 }
8104 break;
a35f3ec7 8105 case 0x10d: /* 3DNow! prefetch(w) */
0af10c86 8106 modrm = cpu_ldub_code(env, s->pc++);
a35f3ec7
AJ
8107 mod = (modrm >> 6) & 3;
8108 if (mod == 3)
8109 goto illegal_op;
0af10c86 8110 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
8f091a59
FB
8111 /* ignore for now */
8112 break;
3b21e03e 8113 case 0x1aa: /* rsm */
872929aa 8114 gen_svm_check_intercept(s, pc_start, SVM_EXIT_RSM);
3b21e03e
FB
8115 if (!(s->flags & HF_SMM_MASK))
8116 goto illegal_op;
728d803b 8117 gen_update_cc_op(s);
3b21e03e 8118 gen_jmp_im(s->pc - s->cs_base);
608badfc 8119 gen_helper_rsm(cpu_env);
3b21e03e
FB
8120 gen_eob(s);
8121 break;
222a3336
AZ
8122 case 0x1b8: /* SSE4.2 popcnt */
8123 if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) !=
8124 PREFIX_REPZ)
8125 goto illegal_op;
8126 if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
8127 goto illegal_op;
8128
0af10c86 8129 modrm = cpu_ldub_code(env, s->pc++);
8b4a3df8 8130 reg = ((modrm >> 3) & 7) | rex_r;
222a3336
AZ
8131
8132 if (s->prefix & PREFIX_DATA)
8133 ot = OT_WORD;
8134 else if (s->dflag != 2)
8135 ot = OT_LONG;
8136 else
8137 ot = OT_QUAD;
8138
0af10c86 8139 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
d3eb5eae 8140 gen_helper_popcnt(cpu_T[0], cpu_env, cpu_T[0], tcg_const_i32(ot));
222a3336 8141 gen_op_mov_reg_T0(ot, reg);
fdb0d09d 8142
3ca51d07 8143 set_cc_op(s, CC_OP_EFLAGS);
222a3336 8144 break;
a35f3ec7
AJ
8145 case 0x10e ... 0x10f:
8146 /* 3DNow! instructions, ignore prefixes */
8147 s->prefix &= ~(PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA);
664e0f19
FB
8148 case 0x110 ... 0x117:
8149 case 0x128 ... 0x12f:
4242b1bd 8150 case 0x138 ... 0x13a:
d9f4bb27 8151 case 0x150 ... 0x179:
664e0f19
FB
8152 case 0x17c ... 0x17f:
8153 case 0x1c2:
8154 case 0x1c4 ... 0x1c6:
8155 case 0x1d0 ... 0x1fe:
0af10c86 8156 gen_sse(env, s, b, pc_start, rex_r);
664e0f19 8157 break;
2c0262af
FB
8158 default:
8159 goto illegal_op;
8160 }
8161 /* lock generation */
8162 if (s->prefix & PREFIX_LOCK)
a7812ae4 8163 gen_helper_unlock();
2c0262af
FB
8164 return s->pc;
8165 illegal_op:
ab1f142b 8166 if (s->prefix & PREFIX_LOCK)
a7812ae4 8167 gen_helper_unlock();
2c0262af
FB
8168 /* XXX: ensure that no lock was generated */
8169 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
8170 return s->pc;
8171}
8172
2c0262af
FB
8173void optimize_flags_init(void)
8174{
a7812ae4
PB
8175 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
8176 cpu_cc_op = tcg_global_mem_new_i32(TCG_AREG0,
317ac620 8177 offsetof(CPUX86State, cc_op), "cc_op");
317ac620 8178 cpu_cc_dst = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_dst),
a7812ae4 8179 "cc_dst");
a3251186
RH
8180 cpu_cc_src = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_src),
8181 "cc_src");
988c3eb0
RH
8182 cpu_cc_src2 = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_src2),
8183 "cc_src2");
437a88a5 8184
cc739bb0
LD
8185#ifdef TARGET_X86_64
8186 cpu_regs[R_EAX] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8187 offsetof(CPUX86State, regs[R_EAX]), "rax");
cc739bb0 8188 cpu_regs[R_ECX] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8189 offsetof(CPUX86State, regs[R_ECX]), "rcx");
cc739bb0 8190 cpu_regs[R_EDX] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8191 offsetof(CPUX86State, regs[R_EDX]), "rdx");
cc739bb0 8192 cpu_regs[R_EBX] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8193 offsetof(CPUX86State, regs[R_EBX]), "rbx");
cc739bb0 8194 cpu_regs[R_ESP] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8195 offsetof(CPUX86State, regs[R_ESP]), "rsp");
cc739bb0 8196 cpu_regs[R_EBP] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8197 offsetof(CPUX86State, regs[R_EBP]), "rbp");
cc739bb0 8198 cpu_regs[R_ESI] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8199 offsetof(CPUX86State, regs[R_ESI]), "rsi");
cc739bb0 8200 cpu_regs[R_EDI] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8201 offsetof(CPUX86State, regs[R_EDI]), "rdi");
cc739bb0 8202 cpu_regs[8] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8203 offsetof(CPUX86State, regs[8]), "r8");
cc739bb0 8204 cpu_regs[9] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8205 offsetof(CPUX86State, regs[9]), "r9");
cc739bb0 8206 cpu_regs[10] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8207 offsetof(CPUX86State, regs[10]), "r10");
cc739bb0 8208 cpu_regs[11] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8209 offsetof(CPUX86State, regs[11]), "r11");
cc739bb0 8210 cpu_regs[12] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8211 offsetof(CPUX86State, regs[12]), "r12");
cc739bb0 8212 cpu_regs[13] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8213 offsetof(CPUX86State, regs[13]), "r13");
cc739bb0 8214 cpu_regs[14] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8215 offsetof(CPUX86State, regs[14]), "r14");
cc739bb0 8216 cpu_regs[15] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8217 offsetof(CPUX86State, regs[15]), "r15");
cc739bb0
LD
8218#else
8219 cpu_regs[R_EAX] = tcg_global_mem_new_i32(TCG_AREG0,
317ac620 8220 offsetof(CPUX86State, regs[R_EAX]), "eax");
cc739bb0 8221 cpu_regs[R_ECX] = tcg_global_mem_new_i32(TCG_AREG0,
317ac620 8222 offsetof(CPUX86State, regs[R_ECX]), "ecx");
cc739bb0 8223 cpu_regs[R_EDX] = tcg_global_mem_new_i32(TCG_AREG0,
317ac620 8224 offsetof(CPUX86State, regs[R_EDX]), "edx");
cc739bb0 8225 cpu_regs[R_EBX] = tcg_global_mem_new_i32(TCG_AREG0,
317ac620 8226 offsetof(CPUX86State, regs[R_EBX]), "ebx");
cc739bb0 8227 cpu_regs[R_ESP] = tcg_global_mem_new_i32(TCG_AREG0,
317ac620 8228 offsetof(CPUX86State, regs[R_ESP]), "esp");
cc739bb0 8229 cpu_regs[R_EBP] = tcg_global_mem_new_i32(TCG_AREG0,
317ac620 8230 offsetof(CPUX86State, regs[R_EBP]), "ebp");
cc739bb0 8231 cpu_regs[R_ESI] = tcg_global_mem_new_i32(TCG_AREG0,
317ac620 8232 offsetof(CPUX86State, regs[R_ESI]), "esi");
cc739bb0 8233 cpu_regs[R_EDI] = tcg_global_mem_new_i32(TCG_AREG0,
317ac620 8234 offsetof(CPUX86State, regs[R_EDI]), "edi");
cc739bb0
LD
8235#endif
8236
437a88a5 8237 /* register helpers */
a7812ae4 8238#define GEN_HELPER 2
437a88a5 8239#include "helper.h"
2c0262af
FB
8240}
8241
8242/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
8243 basic block 'tb'. If search_pc is TRUE, also generate PC
8244 information for each intermediate instruction. */
317ac620 8245static inline void gen_intermediate_code_internal(CPUX86State *env,
2cfc5f17
TS
8246 TranslationBlock *tb,
8247 int search_pc)
2c0262af
FB
8248{
8249 DisasContext dc1, *dc = &dc1;
14ce26e7 8250 target_ulong pc_ptr;
2c0262af 8251 uint16_t *gen_opc_end;
a1d1bb31 8252 CPUBreakpoint *bp;
7f5b7d3e 8253 int j, lj;
c068688b 8254 uint64_t flags;
14ce26e7
FB
8255 target_ulong pc_start;
8256 target_ulong cs_base;
2e70f6ef
PB
8257 int num_insns;
8258 int max_insns;
3b46e624 8259
2c0262af 8260 /* generate intermediate code */
14ce26e7
FB
8261 pc_start = tb->pc;
8262 cs_base = tb->cs_base;
2c0262af 8263 flags = tb->flags;
3a1d9b8b 8264
4f31916f 8265 dc->pe = (flags >> HF_PE_SHIFT) & 1;
2c0262af
FB
8266 dc->code32 = (flags >> HF_CS32_SHIFT) & 1;
8267 dc->ss32 = (flags >> HF_SS32_SHIFT) & 1;
8268 dc->addseg = (flags >> HF_ADDSEG_SHIFT) & 1;
8269 dc->f_st = 0;
8270 dc->vm86 = (flags >> VM_SHIFT) & 1;
8271 dc->cpl = (flags >> HF_CPL_SHIFT) & 3;
8272 dc->iopl = (flags >> IOPL_SHIFT) & 3;
8273 dc->tf = (flags >> TF_SHIFT) & 1;
34865134 8274 dc->singlestep_enabled = env->singlestep_enabled;
2c0262af 8275 dc->cc_op = CC_OP_DYNAMIC;
e207582f 8276 dc->cc_op_dirty = false;
2c0262af
FB
8277 dc->cs_base = cs_base;
8278 dc->tb = tb;
8279 dc->popl_esp_hack = 0;
8280 /* select memory access functions */
8281 dc->mem_index = 0;
8282 if (flags & HF_SOFTMMU_MASK) {
a9321a4d 8283 dc->mem_index = (cpu_mmu_index(env) + 1) << 2;
2c0262af 8284 }
14ce26e7 8285 dc->cpuid_features = env->cpuid_features;
3d7374c5 8286 dc->cpuid_ext_features = env->cpuid_ext_features;
e771edab 8287 dc->cpuid_ext2_features = env->cpuid_ext2_features;
12e26b75 8288 dc->cpuid_ext3_features = env->cpuid_ext3_features;
a9321a4d 8289 dc->cpuid_7_0_ebx_features = env->cpuid_7_0_ebx_features;
14ce26e7
FB
8290#ifdef TARGET_X86_64
8291 dc->lma = (flags >> HF_LMA_SHIFT) & 1;
8292 dc->code64 = (flags >> HF_CS64_SHIFT) & 1;
8293#endif
7eee2a50 8294 dc->flags = flags;
a2cc3b24
FB
8295 dc->jmp_opt = !(dc->tf || env->singlestep_enabled ||
8296 (flags & HF_INHIBIT_IRQ_MASK)
415fa2ea 8297#ifndef CONFIG_SOFTMMU
2c0262af
FB
8298 || (flags & HF_SOFTMMU_MASK)
8299#endif
8300 );
4f31916f
FB
8301#if 0
8302 /* check addseg logic */
dc196a57 8303 if (!dc->addseg && (dc->vm86 || !dc->pe || !dc->code32))
4f31916f
FB
8304 printf("ERROR addseg\n");
8305#endif
8306
a7812ae4
PB
8307 cpu_T[0] = tcg_temp_new();
8308 cpu_T[1] = tcg_temp_new();
8309 cpu_A0 = tcg_temp_new();
a7812ae4
PB
8310
8311 cpu_tmp0 = tcg_temp_new();
8312 cpu_tmp1_i64 = tcg_temp_new_i64();
8313 cpu_tmp2_i32 = tcg_temp_new_i32();
8314 cpu_tmp3_i32 = tcg_temp_new_i32();
8315 cpu_tmp4 = tcg_temp_new();
a7812ae4
PB
8316 cpu_ptr0 = tcg_temp_new_ptr();
8317 cpu_ptr1 = tcg_temp_new_ptr();
a3251186 8318 cpu_cc_srcT = tcg_temp_local_new();
57fec1fe 8319
92414b31 8320 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
8321
8322 dc->is_jmp = DISAS_NEXT;
8323 pc_ptr = pc_start;
8324 lj = -1;
2e70f6ef
PB
8325 num_insns = 0;
8326 max_insns = tb->cflags & CF_COUNT_MASK;
8327 if (max_insns == 0)
8328 max_insns = CF_COUNT_MASK;
2c0262af 8329
806f352d 8330 gen_tb_start();
2c0262af 8331 for(;;) {
72cf2d4f
BS
8332 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
8333 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a2397807
JK
8334 if (bp->pc == pc_ptr &&
8335 !((bp->flags & BP_CPU) && (tb->flags & HF_RF_MASK))) {
2c0262af
FB
8336 gen_debug(dc, pc_ptr - dc->cs_base);
8337 break;
8338 }
8339 }
8340 }
8341 if (search_pc) {
92414b31 8342 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
2c0262af
FB
8343 if (lj < j) {
8344 lj++;
8345 while (lj < j)
ab1103de 8346 tcg_ctx.gen_opc_instr_start[lj++] = 0;
2c0262af 8347 }
25983cad 8348 tcg_ctx.gen_opc_pc[lj] = pc_ptr;
2c0262af 8349 gen_opc_cc_op[lj] = dc->cc_op;
ab1103de 8350 tcg_ctx.gen_opc_instr_start[lj] = 1;
c9c99c22 8351 tcg_ctx.gen_opc_icount[lj] = num_insns;
2c0262af 8352 }
2e70f6ef
PB
8353 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
8354 gen_io_start();
8355
0af10c86 8356 pc_ptr = disas_insn(env, dc, pc_ptr);
2e70f6ef 8357 num_insns++;
2c0262af
FB
8358 /* stop translation if indicated */
8359 if (dc->is_jmp)
8360 break;
8361 /* if single step mode, we generate only one instruction and
8362 generate an exception */
a2cc3b24
FB
8363 /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
8364 the flag and abort the translation to give the irqs a
8365 change to be happen */
5fafdf24 8366 if (dc->tf || dc->singlestep_enabled ||
2e70f6ef 8367 (flags & HF_INHIBIT_IRQ_MASK)) {
14ce26e7 8368 gen_jmp_im(pc_ptr - dc->cs_base);
2c0262af
FB
8369 gen_eob(dc);
8370 break;
8371 }
8372 /* if too long translation, stop generation too */
efd7f486 8373 if (tcg_ctx.gen_opc_ptr >= gen_opc_end ||
2e70f6ef
PB
8374 (pc_ptr - pc_start) >= (TARGET_PAGE_SIZE - 32) ||
8375 num_insns >= max_insns) {
14ce26e7 8376 gen_jmp_im(pc_ptr - dc->cs_base);
2c0262af
FB
8377 gen_eob(dc);
8378 break;
8379 }
1b530a6d
AJ
8380 if (singlestep) {
8381 gen_jmp_im(pc_ptr - dc->cs_base);
8382 gen_eob(dc);
8383 break;
8384 }
2c0262af 8385 }
2e70f6ef
PB
8386 if (tb->cflags & CF_LAST_IO)
8387 gen_io_end();
806f352d 8388 gen_tb_end(tb, num_insns);
efd7f486 8389 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
2c0262af
FB
8390 /* we don't forget to fill the last values */
8391 if (search_pc) {
92414b31 8392 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
2c0262af
FB
8393 lj++;
8394 while (lj <= j)
ab1103de 8395 tcg_ctx.gen_opc_instr_start[lj++] = 0;
2c0262af 8396 }
3b46e624 8397
2c0262af 8398#ifdef DEBUG_DISAS
8fec2b8c 8399 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
14ce26e7 8400 int disas_flags;
93fcfe39
AL
8401 qemu_log("----------------\n");
8402 qemu_log("IN: %s\n", lookup_symbol(pc_start));
14ce26e7
FB
8403#ifdef TARGET_X86_64
8404 if (dc->code64)
8405 disas_flags = 2;
8406 else
8407#endif
8408 disas_flags = !dc->code32;
f4359b9f 8409 log_target_disas(env, pc_start, pc_ptr - pc_start, disas_flags);
93fcfe39 8410 qemu_log("\n");
2c0262af
FB
8411 }
8412#endif
8413
2e70f6ef 8414 if (!search_pc) {
2c0262af 8415 tb->size = pc_ptr - pc_start;
2e70f6ef
PB
8416 tb->icount = num_insns;
8417 }
2c0262af
FB
8418}
8419
317ac620 8420void gen_intermediate_code(CPUX86State *env, TranslationBlock *tb)
2c0262af 8421{
2cfc5f17 8422 gen_intermediate_code_internal(env, tb, 0);
2c0262af
FB
8423}
8424
317ac620 8425void gen_intermediate_code_pc(CPUX86State *env, TranslationBlock *tb)
2c0262af 8426{
2cfc5f17 8427 gen_intermediate_code_internal(env, tb, 1);
2c0262af
FB
8428}
8429
317ac620 8430void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb, int pc_pos)
d2856f1a
AJ
8431{
8432 int cc_op;
8433#ifdef DEBUG_DISAS
8fec2b8c 8434 if (qemu_loglevel_mask(CPU_LOG_TB_OP)) {
d2856f1a 8435 int i;
93fcfe39 8436 qemu_log("RESTORE:\n");
d2856f1a 8437 for(i = 0;i <= pc_pos; i++) {
ab1103de 8438 if (tcg_ctx.gen_opc_instr_start[i]) {
25983cad
EV
8439 qemu_log("0x%04x: " TARGET_FMT_lx "\n", i,
8440 tcg_ctx.gen_opc_pc[i]);
d2856f1a
AJ
8441 }
8442 }
e87b7cb0 8443 qemu_log("pc_pos=0x%x eip=" TARGET_FMT_lx " cs_base=%x\n",
25983cad 8444 pc_pos, tcg_ctx.gen_opc_pc[pc_pos] - tb->cs_base,
d2856f1a
AJ
8445 (uint32_t)tb->cs_base);
8446 }
8447#endif
25983cad 8448 env->eip = tcg_ctx.gen_opc_pc[pc_pos] - tb->cs_base;
d2856f1a
AJ
8449 cc_op = gen_opc_cc_op[pc_pos];
8450 if (cc_op != CC_OP_DYNAMIC)
8451 env->cc_op = cc_op;
8452}