]> git.proxmox.com Git - mirror_qemu.git/blame - target-i386/translate.c
target-i386: Replace OT_* constants with MO_* constants
[mirror_qemu.git] / target-i386 / translate.c
CommitLineData
2c0262af
FB
1/*
2 * i386 translation
5fafdf24 3 *
2c0262af
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
18 */
19#include <stdarg.h>
20#include <stdlib.h>
21#include <stdio.h>
22#include <string.h>
23#include <inttypes.h>
24#include <signal.h>
2c0262af 25
bec93d72 26#include "qemu/host-utils.h"
2c0262af 27#include "cpu.h"
76cad711 28#include "disas/disas.h"
57fec1fe 29#include "tcg-op.h"
2c0262af 30
a7812ae4
PB
31#include "helper.h"
32#define GEN_HELPER 1
33#include "helper.h"
34
2c0262af
FB
35#define PREFIX_REPZ 0x01
36#define PREFIX_REPNZ 0x02
37#define PREFIX_LOCK 0x04
38#define PREFIX_DATA 0x08
39#define PREFIX_ADR 0x10
701ed211 40#define PREFIX_VEX 0x20
2c0262af 41
14ce26e7 42#ifdef TARGET_X86_64
14ce26e7
FB
43#define CODE64(s) ((s)->code64)
44#define REX_X(s) ((s)->rex_x)
45#define REX_B(s) ((s)->rex_b)
14ce26e7 46#else
14ce26e7
FB
47#define CODE64(s) 0
48#define REX_X(s) 0
49#define REX_B(s) 0
50#endif
51
bec93d72
RH
52#ifdef TARGET_X86_64
53# define ctztl ctz64
54# define clztl clz64
55#else
56# define ctztl ctz32
57# define clztl clz32
58#endif
59
57fec1fe
FB
60//#define MACRO_TEST 1
61
57fec1fe 62/* global register indexes */
a7812ae4 63static TCGv_ptr cpu_env;
a3251186 64static TCGv cpu_A0;
988c3eb0 65static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2, cpu_cc_srcT;
a7812ae4 66static TCGv_i32 cpu_cc_op;
cc739bb0 67static TCGv cpu_regs[CPU_NB_REGS];
1e4840bf 68/* local temps */
3b9d3cf1 69static TCGv cpu_T[2];
57fec1fe 70/* local register indexes (only used inside old micro ops) */
a7812ae4
PB
71static TCGv cpu_tmp0, cpu_tmp4;
72static TCGv_ptr cpu_ptr0, cpu_ptr1;
73static TCGv_i32 cpu_tmp2_i32, cpu_tmp3_i32;
74static TCGv_i64 cpu_tmp1_i64;
57fec1fe 75
1a7ff922
PB
76static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
77
022c62cb 78#include "exec/gen-icount.h"
2e70f6ef 79
57fec1fe
FB
80#ifdef TARGET_X86_64
81static int x86_64_hregs;
ae063a68
FB
82#endif
83
2c0262af
FB
84typedef struct DisasContext {
85 /* current insn context */
86 int override; /* -1 if no override */
87 int prefix;
88 int aflag, dflag;
14ce26e7 89 target_ulong pc; /* pc = eip + cs_base */
2c0262af
FB
90 int is_jmp; /* 1 = means jump (stop translation), 2 means CPU
91 static state change (stop translation) */
92 /* current block context */
14ce26e7 93 target_ulong cs_base; /* base of CS segment */
2c0262af
FB
94 int pe; /* protected mode */
95 int code32; /* 32 bit code segment */
14ce26e7
FB
96#ifdef TARGET_X86_64
97 int lma; /* long mode active */
98 int code64; /* 64 bit code segment */
99 int rex_x, rex_b;
100#endif
701ed211
RH
101 int vex_l; /* vex vector length */
102 int vex_v; /* vex vvvv register, without 1's compliment. */
2c0262af 103 int ss32; /* 32 bit stack segment */
fee71888 104 CCOp cc_op; /* current CC operation */
e207582f 105 bool cc_op_dirty;
2c0262af
FB
106 int addseg; /* non zero if either DS/ES/SS have a non zero base */
107 int f_st; /* currently unused */
108 int vm86; /* vm86 mode */
109 int cpl;
110 int iopl;
111 int tf; /* TF cpu flag */
34865134 112 int singlestep_enabled; /* "hardware" single step enabled */
2c0262af
FB
113 int jmp_opt; /* use direct block chaining for direct jumps */
114 int mem_index; /* select memory access functions */
c068688b 115 uint64_t flags; /* all execution flags */
2c0262af
FB
116 struct TranslationBlock *tb;
117 int popl_esp_hack; /* for correct popl with esp base handling */
14ce26e7
FB
118 int rip_offset; /* only used in x86_64, but left for simplicity */
119 int cpuid_features;
3d7374c5 120 int cpuid_ext_features;
e771edab 121 int cpuid_ext2_features;
12e26b75 122 int cpuid_ext3_features;
a9321a4d 123 int cpuid_7_0_ebx_features;
2c0262af
FB
124} DisasContext;
125
126static void gen_eob(DisasContext *s);
14ce26e7
FB
127static void gen_jmp(DisasContext *s, target_ulong eip);
128static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num);
63633fe6 129static void gen_op(DisasContext *s1, int op, int ot, int d);
2c0262af
FB
130
131/* i386 arith/logic operations */
132enum {
5fafdf24
TS
133 OP_ADDL,
134 OP_ORL,
135 OP_ADCL,
2c0262af 136 OP_SBBL,
5fafdf24
TS
137 OP_ANDL,
138 OP_SUBL,
139 OP_XORL,
2c0262af
FB
140 OP_CMPL,
141};
142
143/* i386 shift ops */
144enum {
5fafdf24
TS
145 OP_ROL,
146 OP_ROR,
147 OP_RCL,
148 OP_RCR,
149 OP_SHL,
150 OP_SHR,
2c0262af
FB
151 OP_SHL1, /* undocumented */
152 OP_SAR = 7,
153};
154
8e1c85e3
FB
155enum {
156 JCC_O,
157 JCC_B,
158 JCC_Z,
159 JCC_BE,
160 JCC_S,
161 JCC_P,
162 JCC_L,
163 JCC_LE,
164};
165
2c0262af
FB
166enum {
167 /* I386 int registers */
168 OR_EAX, /* MUST be even numbered */
169 OR_ECX,
170 OR_EDX,
171 OR_EBX,
172 OR_ESP,
173 OR_EBP,
174 OR_ESI,
175 OR_EDI,
14ce26e7
FB
176
177 OR_TMP0 = 16, /* temporary operand register */
2c0262af
FB
178 OR_TMP1,
179 OR_A0, /* temporary register used when doing address evaluation */
2c0262af
FB
180};
181
b666265b 182enum {
a3251186
RH
183 USES_CC_DST = 1,
184 USES_CC_SRC = 2,
988c3eb0
RH
185 USES_CC_SRC2 = 4,
186 USES_CC_SRCT = 8,
b666265b
RH
187};
188
189/* Bit set if the global variable is live after setting CC_OP to X. */
190static const uint8_t cc_op_live[CC_OP_NB] = {
988c3eb0 191 [CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
b666265b
RH
192 [CC_OP_EFLAGS] = USES_CC_SRC,
193 [CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC,
194 [CC_OP_ADDB ... CC_OP_ADDQ] = USES_CC_DST | USES_CC_SRC,
988c3eb0 195 [CC_OP_ADCB ... CC_OP_ADCQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
a3251186 196 [CC_OP_SUBB ... CC_OP_SUBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT,
988c3eb0 197 [CC_OP_SBBB ... CC_OP_SBBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
b666265b
RH
198 [CC_OP_LOGICB ... CC_OP_LOGICQ] = USES_CC_DST,
199 [CC_OP_INCB ... CC_OP_INCQ] = USES_CC_DST | USES_CC_SRC,
200 [CC_OP_DECB ... CC_OP_DECQ] = USES_CC_DST | USES_CC_SRC,
201 [CC_OP_SHLB ... CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC,
202 [CC_OP_SARB ... CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC,
bc4b43dc 203 [CC_OP_BMILGB ... CC_OP_BMILGQ] = USES_CC_DST | USES_CC_SRC,
cd7f97ca
RH
204 [CC_OP_ADCX] = USES_CC_DST | USES_CC_SRC,
205 [CC_OP_ADOX] = USES_CC_SRC | USES_CC_SRC2,
206 [CC_OP_ADCOX] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
436ff2d2 207 [CC_OP_CLR] = 0,
b666265b
RH
208};
209
e207582f 210static void set_cc_op(DisasContext *s, CCOp op)
3ca51d07 211{
b666265b
RH
212 int dead;
213
214 if (s->cc_op == op) {
215 return;
216 }
217
218 /* Discard CC computation that will no longer be used. */
219 dead = cc_op_live[s->cc_op] & ~cc_op_live[op];
220 if (dead & USES_CC_DST) {
221 tcg_gen_discard_tl(cpu_cc_dst);
e207582f 222 }
b666265b
RH
223 if (dead & USES_CC_SRC) {
224 tcg_gen_discard_tl(cpu_cc_src);
225 }
988c3eb0
RH
226 if (dead & USES_CC_SRC2) {
227 tcg_gen_discard_tl(cpu_cc_src2);
228 }
a3251186
RH
229 if (dead & USES_CC_SRCT) {
230 tcg_gen_discard_tl(cpu_cc_srcT);
231 }
b666265b 232
e2f515cf
RH
233 if (op == CC_OP_DYNAMIC) {
234 /* The DYNAMIC setting is translator only, and should never be
235 stored. Thus we always consider it clean. */
236 s->cc_op_dirty = false;
237 } else {
238 /* Discard any computed CC_OP value (see shifts). */
239 if (s->cc_op == CC_OP_DYNAMIC) {
240 tcg_gen_discard_i32(cpu_cc_op);
241 }
242 s->cc_op_dirty = true;
243 }
b666265b 244 s->cc_op = op;
e207582f
RH
245}
246
e207582f
RH
247static void gen_update_cc_op(DisasContext *s)
248{
249 if (s->cc_op_dirty) {
773cdfcc 250 tcg_gen_movi_i32(cpu_cc_op, s->cc_op);
e207582f
RH
251 s->cc_op_dirty = false;
252 }
3ca51d07
RH
253}
254
57fec1fe
FB
255static inline void gen_op_movl_T0_0(void)
256{
257 tcg_gen_movi_tl(cpu_T[0], 0);
258}
259
260static inline void gen_op_movl_T0_im(int32_t val)
261{
262 tcg_gen_movi_tl(cpu_T[0], val);
263}
264
265static inline void gen_op_movl_T0_imu(uint32_t val)
266{
267 tcg_gen_movi_tl(cpu_T[0], val);
268}
269
270static inline void gen_op_movl_T1_im(int32_t val)
271{
272 tcg_gen_movi_tl(cpu_T[1], val);
273}
274
275static inline void gen_op_movl_T1_imu(uint32_t val)
276{
277 tcg_gen_movi_tl(cpu_T[1], val);
278}
279
280static inline void gen_op_movl_A0_im(uint32_t val)
281{
282 tcg_gen_movi_tl(cpu_A0, val);
283}
284
285#ifdef TARGET_X86_64
286static inline void gen_op_movq_A0_im(int64_t val)
287{
288 tcg_gen_movi_tl(cpu_A0, val);
289}
290#endif
291
292static inline void gen_movtl_T0_im(target_ulong val)
293{
294 tcg_gen_movi_tl(cpu_T[0], val);
295}
296
297static inline void gen_movtl_T1_im(target_ulong val)
298{
299 tcg_gen_movi_tl(cpu_T[1], val);
300}
301
302static inline void gen_op_andl_T0_ffff(void)
303{
304 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
305}
306
307static inline void gen_op_andl_T0_im(uint32_t val)
308{
309 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], val);
310}
311
312static inline void gen_op_movl_T0_T1(void)
313{
314 tcg_gen_mov_tl(cpu_T[0], cpu_T[1]);
315}
316
317static inline void gen_op_andl_A0_ffff(void)
318{
319 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffff);
320}
321
14ce26e7
FB
322#ifdef TARGET_X86_64
323
324#define NB_OP_SIZES 4
325
14ce26e7
FB
326#else /* !TARGET_X86_64 */
327
328#define NB_OP_SIZES 3
329
14ce26e7
FB
330#endif /* !TARGET_X86_64 */
331
e2542fe2 332#if defined(HOST_WORDS_BIGENDIAN)
57fec1fe
FB
333#define REG_B_OFFSET (sizeof(target_ulong) - 1)
334#define REG_H_OFFSET (sizeof(target_ulong) - 2)
335#define REG_W_OFFSET (sizeof(target_ulong) - 2)
336#define REG_L_OFFSET (sizeof(target_ulong) - 4)
337#define REG_LH_OFFSET (sizeof(target_ulong) - 8)
14ce26e7 338#else
57fec1fe
FB
339#define REG_B_OFFSET 0
340#define REG_H_OFFSET 1
341#define REG_W_OFFSET 0
342#define REG_L_OFFSET 0
343#define REG_LH_OFFSET 4
14ce26e7 344#endif
57fec1fe 345
96d7073f
PM
346/* In instruction encodings for byte register accesses the
347 * register number usually indicates "low 8 bits of register N";
348 * however there are some special cases where N 4..7 indicates
349 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
350 * true for this special case, false otherwise.
351 */
352static inline bool byte_reg_is_xH(int reg)
353{
354 if (reg < 4) {
355 return false;
356 }
357#ifdef TARGET_X86_64
358 if (reg >= 8 || x86_64_hregs) {
359 return false;
360 }
361#endif
362 return true;
363}
364
1e4840bf 365static inline void gen_op_mov_reg_v(int ot, int reg, TCGv t0)
57fec1fe
FB
366{
367 switch(ot) {
4ba9938c 368 case MO_8:
96d7073f 369 if (!byte_reg_is_xH(reg)) {
c832e3de 370 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 8);
57fec1fe 371 } else {
c832e3de 372 tcg_gen_deposit_tl(cpu_regs[reg - 4], cpu_regs[reg - 4], t0, 8, 8);
57fec1fe
FB
373 }
374 break;
4ba9938c 375 case MO_16:
c832e3de 376 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 16);
57fec1fe 377 break;
cc739bb0 378 default: /* XXX this shouldn't be reached; abort? */
4ba9938c 379 case MO_32:
cc739bb0
LD
380 /* For x86_64, this sets the higher half of register to zero.
381 For i386, this is equivalent to a mov. */
382 tcg_gen_ext32u_tl(cpu_regs[reg], t0);
57fec1fe 383 break;
cc739bb0 384#ifdef TARGET_X86_64
4ba9938c 385 case MO_64:
cc739bb0 386 tcg_gen_mov_tl(cpu_regs[reg], t0);
57fec1fe 387 break;
14ce26e7 388#endif
57fec1fe
FB
389 }
390}
2c0262af 391
57fec1fe
FB
392static inline void gen_op_mov_reg_T0(int ot, int reg)
393{
1e4840bf 394 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
57fec1fe
FB
395}
396
397static inline void gen_op_mov_reg_T1(int ot, int reg)
398{
1e4840bf 399 gen_op_mov_reg_v(ot, reg, cpu_T[1]);
57fec1fe
FB
400}
401
402static inline void gen_op_mov_reg_A0(int size, int reg)
403{
404 switch(size) {
4ba9938c 405 case MO_8:
c832e3de 406 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], cpu_A0, 0, 16);
57fec1fe 407 break;
cc739bb0 408 default: /* XXX this shouldn't be reached; abort? */
4ba9938c 409 case MO_16:
cc739bb0
LD
410 /* For x86_64, this sets the higher half of register to zero.
411 For i386, this is equivalent to a mov. */
412 tcg_gen_ext32u_tl(cpu_regs[reg], cpu_A0);
57fec1fe 413 break;
cc739bb0 414#ifdef TARGET_X86_64
4ba9938c 415 case MO_32:
cc739bb0 416 tcg_gen_mov_tl(cpu_regs[reg], cpu_A0);
57fec1fe 417 break;
14ce26e7 418#endif
57fec1fe
FB
419 }
420}
421
1e4840bf 422static inline void gen_op_mov_v_reg(int ot, TCGv t0, int reg)
57fec1fe 423{
4ba9938c 424 if (ot == MO_8 && byte_reg_is_xH(reg)) {
96d7073f
PM
425 tcg_gen_shri_tl(t0, cpu_regs[reg - 4], 8);
426 tcg_gen_ext8u_tl(t0, t0);
427 } else {
cc739bb0 428 tcg_gen_mov_tl(t0, cpu_regs[reg]);
57fec1fe
FB
429 }
430}
431
1e4840bf
FB
432static inline void gen_op_mov_TN_reg(int ot, int t_index, int reg)
433{
434 gen_op_mov_v_reg(ot, cpu_T[t_index], reg);
435}
436
57fec1fe
FB
437static inline void gen_op_movl_A0_reg(int reg)
438{
cc739bb0 439 tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]);
57fec1fe
FB
440}
441
442static inline void gen_op_addl_A0_im(int32_t val)
443{
444 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
14ce26e7 445#ifdef TARGET_X86_64
57fec1fe 446 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
14ce26e7 447#endif
57fec1fe 448}
2c0262af 449
14ce26e7 450#ifdef TARGET_X86_64
57fec1fe
FB
451static inline void gen_op_addq_A0_im(int64_t val)
452{
453 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
454}
14ce26e7 455#endif
57fec1fe
FB
456
457static void gen_add_A0_im(DisasContext *s, int val)
458{
459#ifdef TARGET_X86_64
460 if (CODE64(s))
461 gen_op_addq_A0_im(val);
462 else
463#endif
464 gen_op_addl_A0_im(val);
465}
2c0262af 466
57fec1fe 467static inline void gen_op_addl_T0_T1(void)
2c0262af 468{
57fec1fe
FB
469 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
470}
471
472static inline void gen_op_jmp_T0(void)
473{
317ac620 474 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, eip));
57fec1fe
FB
475}
476
6e0d8677 477static inline void gen_op_add_reg_im(int size, int reg, int32_t val)
57fec1fe 478{
6e0d8677 479 switch(size) {
4ba9938c 480 case MO_8:
cc739bb0 481 tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val);
c832e3de 482 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], cpu_tmp0, 0, 16);
6e0d8677 483 break;
4ba9938c 484 case MO_16:
cc739bb0
LD
485 tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val);
486 /* For x86_64, this sets the higher half of register to zero.
487 For i386, this is equivalent to a nop. */
488 tcg_gen_ext32u_tl(cpu_tmp0, cpu_tmp0);
489 tcg_gen_mov_tl(cpu_regs[reg], cpu_tmp0);
6e0d8677
FB
490 break;
491#ifdef TARGET_X86_64
4ba9938c 492 case MO_32:
cc739bb0 493 tcg_gen_addi_tl(cpu_regs[reg], cpu_regs[reg], val);
6e0d8677
FB
494 break;
495#endif
496 }
57fec1fe
FB
497}
498
6e0d8677 499static inline void gen_op_add_reg_T0(int size, int reg)
57fec1fe 500{
6e0d8677 501 switch(size) {
4ba9938c 502 case MO_8:
cc739bb0 503 tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T[0]);
c832e3de 504 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], cpu_tmp0, 0, 16);
6e0d8677 505 break;
4ba9938c 506 case MO_16:
cc739bb0
LD
507 tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T[0]);
508 /* For x86_64, this sets the higher half of register to zero.
509 For i386, this is equivalent to a nop. */
510 tcg_gen_ext32u_tl(cpu_tmp0, cpu_tmp0);
511 tcg_gen_mov_tl(cpu_regs[reg], cpu_tmp0);
6e0d8677 512 break;
14ce26e7 513#ifdef TARGET_X86_64
4ba9938c 514 case MO_32:
cc739bb0 515 tcg_gen_add_tl(cpu_regs[reg], cpu_regs[reg], cpu_T[0]);
6e0d8677 516 break;
14ce26e7 517#endif
6e0d8677
FB
518 }
519}
57fec1fe 520
57fec1fe
FB
521static inline void gen_op_addl_A0_reg_sN(int shift, int reg)
522{
cc739bb0
LD
523 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]);
524 if (shift != 0)
57fec1fe
FB
525 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
526 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
cc739bb0
LD
527 /* For x86_64, this sets the higher half of register to zero.
528 For i386, this is equivalent to a nop. */
529 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
57fec1fe 530}
2c0262af 531
57fec1fe
FB
532static inline void gen_op_movl_A0_seg(int reg)
533{
317ac620 534 tcg_gen_ld32u_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base) + REG_L_OFFSET);
57fec1fe 535}
2c0262af 536
7162ab21 537static inline void gen_op_addl_A0_seg(DisasContext *s, int reg)
57fec1fe 538{
317ac620 539 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base));
57fec1fe 540#ifdef TARGET_X86_64
7162ab21
VC
541 if (CODE64(s)) {
542 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
543 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
544 } else {
545 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
546 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
547 }
548#else
549 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
57fec1fe
FB
550#endif
551}
2c0262af 552
14ce26e7 553#ifdef TARGET_X86_64
57fec1fe
FB
554static inline void gen_op_movq_A0_seg(int reg)
555{
317ac620 556 tcg_gen_ld_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base));
57fec1fe 557}
14ce26e7 558
57fec1fe
FB
559static inline void gen_op_addq_A0_seg(int reg)
560{
317ac620 561 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base));
57fec1fe
FB
562 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
563}
564
565static inline void gen_op_movq_A0_reg(int reg)
566{
cc739bb0 567 tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]);
57fec1fe
FB
568}
569
570static inline void gen_op_addq_A0_reg_sN(int shift, int reg)
571{
cc739bb0
LD
572 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]);
573 if (shift != 0)
57fec1fe
FB
574 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
575 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
576}
14ce26e7
FB
577#endif
578
323d1876 579static inline void gen_op_lds_T0_A0(DisasContext *s, int idx)
57fec1fe 580{
3c5f4116 581 tcg_gen_qemu_ld_tl(cpu_T[0], cpu_A0, s->mem_index, idx | MO_LE | MO_SIGN);
57fec1fe 582}
2c0262af 583
323d1876 584static inline void gen_op_ld_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
57fec1fe 585{
3c5f4116 586 tcg_gen_qemu_ld_tl(t0, a0, s->mem_index, idx | MO_LE);
57fec1fe 587}
2c0262af 588
1e4840bf 589/* XXX: always use ldu or lds */
323d1876 590static inline void gen_op_ld_T0_A0(DisasContext *s, int idx)
1e4840bf 591{
323d1876 592 gen_op_ld_v(s, idx, cpu_T[0], cpu_A0);
1e4840bf
FB
593}
594
323d1876 595static inline void gen_op_ldu_T0_A0(DisasContext *s, int idx)
57fec1fe 596{
323d1876 597 gen_op_ld_v(s, idx, cpu_T[0], cpu_A0);
57fec1fe 598}
2c0262af 599
323d1876 600static inline void gen_op_ld_T1_A0(DisasContext *s, int idx)
1e4840bf 601{
323d1876 602 gen_op_ld_v(s, idx, cpu_T[1], cpu_A0);
1e4840bf
FB
603}
604
323d1876 605static inline void gen_op_st_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
57fec1fe 606{
3523e4bd 607 tcg_gen_qemu_st_tl(t0, a0, s->mem_index, idx | MO_LE);
57fec1fe 608}
4f31916f 609
323d1876 610static inline void gen_op_st_T0_A0(DisasContext *s, int idx)
57fec1fe 611{
323d1876 612 gen_op_st_v(s, idx, cpu_T[0], cpu_A0);
57fec1fe 613}
4f31916f 614
323d1876 615static inline void gen_op_st_T1_A0(DisasContext *s, int idx)
57fec1fe 616{
323d1876 617 gen_op_st_v(s, idx, cpu_T[1], cpu_A0);
57fec1fe 618}
4f31916f 619
14ce26e7
FB
620static inline void gen_jmp_im(target_ulong pc)
621{
57fec1fe 622 tcg_gen_movi_tl(cpu_tmp0, pc);
317ac620 623 tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, eip));
14ce26e7
FB
624}
625
2c0262af
FB
626static inline void gen_string_movl_A0_ESI(DisasContext *s)
627{
628 int override;
629
630 override = s->override;
14ce26e7
FB
631#ifdef TARGET_X86_64
632 if (s->aflag == 2) {
633 if (override >= 0) {
57fec1fe
FB
634 gen_op_movq_A0_seg(override);
635 gen_op_addq_A0_reg_sN(0, R_ESI);
14ce26e7 636 } else {
57fec1fe 637 gen_op_movq_A0_reg(R_ESI);
14ce26e7
FB
638 }
639 } else
640#endif
2c0262af
FB
641 if (s->aflag) {
642 /* 32 bit address */
643 if (s->addseg && override < 0)
644 override = R_DS;
645 if (override >= 0) {
57fec1fe
FB
646 gen_op_movl_A0_seg(override);
647 gen_op_addl_A0_reg_sN(0, R_ESI);
2c0262af 648 } else {
57fec1fe 649 gen_op_movl_A0_reg(R_ESI);
2c0262af
FB
650 }
651 } else {
652 /* 16 address, always override */
653 if (override < 0)
654 override = R_DS;
57fec1fe 655 gen_op_movl_A0_reg(R_ESI);
2c0262af 656 gen_op_andl_A0_ffff();
7162ab21 657 gen_op_addl_A0_seg(s, override);
2c0262af
FB
658 }
659}
660
661static inline void gen_string_movl_A0_EDI(DisasContext *s)
662{
14ce26e7
FB
663#ifdef TARGET_X86_64
664 if (s->aflag == 2) {
57fec1fe 665 gen_op_movq_A0_reg(R_EDI);
14ce26e7
FB
666 } else
667#endif
2c0262af
FB
668 if (s->aflag) {
669 if (s->addseg) {
57fec1fe
FB
670 gen_op_movl_A0_seg(R_ES);
671 gen_op_addl_A0_reg_sN(0, R_EDI);
2c0262af 672 } else {
57fec1fe 673 gen_op_movl_A0_reg(R_EDI);
2c0262af
FB
674 }
675 } else {
57fec1fe 676 gen_op_movl_A0_reg(R_EDI);
2c0262af 677 gen_op_andl_A0_ffff();
7162ab21 678 gen_op_addl_A0_seg(s, R_ES);
2c0262af
FB
679 }
680}
681
6e0d8677
FB
682static inline void gen_op_movl_T0_Dshift(int ot)
683{
317ac620 684 tcg_gen_ld32s_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, df));
6e0d8677 685 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], ot);
2c0262af
FB
686};
687
d824df34 688static TCGv gen_ext_tl(TCGv dst, TCGv src, int size, bool sign)
6e0d8677 689{
d824df34 690 switch (size) {
4ba9938c 691 case MO_8:
d824df34
PB
692 if (sign) {
693 tcg_gen_ext8s_tl(dst, src);
694 } else {
695 tcg_gen_ext8u_tl(dst, src);
696 }
697 return dst;
4ba9938c 698 case MO_16:
d824df34
PB
699 if (sign) {
700 tcg_gen_ext16s_tl(dst, src);
701 } else {
702 tcg_gen_ext16u_tl(dst, src);
703 }
704 return dst;
705#ifdef TARGET_X86_64
4ba9938c 706 case MO_32:
d824df34
PB
707 if (sign) {
708 tcg_gen_ext32s_tl(dst, src);
709 } else {
710 tcg_gen_ext32u_tl(dst, src);
711 }
712 return dst;
713#endif
6e0d8677 714 default:
d824df34 715 return src;
6e0d8677
FB
716 }
717}
3b46e624 718
d824df34
PB
719static void gen_extu(int ot, TCGv reg)
720{
721 gen_ext_tl(reg, reg, ot, false);
722}
723
6e0d8677
FB
724static void gen_exts(int ot, TCGv reg)
725{
d824df34 726 gen_ext_tl(reg, reg, ot, true);
6e0d8677 727}
2c0262af 728
6e0d8677
FB
729static inline void gen_op_jnz_ecx(int size, int label1)
730{
cc739bb0 731 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
6e0d8677 732 gen_extu(size + 1, cpu_tmp0);
cb63669a 733 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, label1);
6e0d8677
FB
734}
735
736static inline void gen_op_jz_ecx(int size, int label1)
737{
cc739bb0 738 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
6e0d8677 739 gen_extu(size + 1, cpu_tmp0);
cb63669a 740 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
6e0d8677 741}
2c0262af 742
a7812ae4
PB
743static void gen_helper_in_func(int ot, TCGv v, TCGv_i32 n)
744{
745 switch (ot) {
4ba9938c 746 case MO_8:
93ab25d7
PB
747 gen_helper_inb(v, n);
748 break;
4ba9938c 749 case MO_16:
93ab25d7
PB
750 gen_helper_inw(v, n);
751 break;
4ba9938c 752 case MO_32:
93ab25d7
PB
753 gen_helper_inl(v, n);
754 break;
a7812ae4 755 }
a7812ae4 756}
2c0262af 757
a7812ae4
PB
758static void gen_helper_out_func(int ot, TCGv_i32 v, TCGv_i32 n)
759{
760 switch (ot) {
4ba9938c 761 case MO_8:
93ab25d7
PB
762 gen_helper_outb(v, n);
763 break;
4ba9938c 764 case MO_16:
93ab25d7
PB
765 gen_helper_outw(v, n);
766 break;
4ba9938c 767 case MO_32:
93ab25d7
PB
768 gen_helper_outl(v, n);
769 break;
a7812ae4 770 }
a7812ae4 771}
f115e911 772
b8b6a50b
FB
773static void gen_check_io(DisasContext *s, int ot, target_ulong cur_eip,
774 uint32_t svm_flags)
f115e911 775{
b8b6a50b
FB
776 int state_saved;
777 target_ulong next_eip;
778
779 state_saved = 0;
f115e911 780 if (s->pe && (s->cpl > s->iopl || s->vm86)) {
773cdfcc 781 gen_update_cc_op(s);
14ce26e7 782 gen_jmp_im(cur_eip);
b8b6a50b 783 state_saved = 1;
b6abf97d 784 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
a7812ae4 785 switch (ot) {
4ba9938c 786 case MO_8:
4a7443be
BS
787 gen_helper_check_iob(cpu_env, cpu_tmp2_i32);
788 break;
4ba9938c 789 case MO_16:
4a7443be
BS
790 gen_helper_check_iow(cpu_env, cpu_tmp2_i32);
791 break;
4ba9938c 792 case MO_32:
4a7443be
BS
793 gen_helper_check_iol(cpu_env, cpu_tmp2_i32);
794 break;
a7812ae4 795 }
b8b6a50b 796 }
872929aa 797 if(s->flags & HF_SVMI_MASK) {
b8b6a50b 798 if (!state_saved) {
773cdfcc 799 gen_update_cc_op(s);
b8b6a50b 800 gen_jmp_im(cur_eip);
b8b6a50b
FB
801 }
802 svm_flags |= (1 << (4 + ot));
803 next_eip = s->pc - s->cs_base;
b6abf97d 804 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
052e80d5
BS
805 gen_helper_svm_check_io(cpu_env, cpu_tmp2_i32,
806 tcg_const_i32(svm_flags),
a7812ae4 807 tcg_const_i32(next_eip - cur_eip));
f115e911
FB
808 }
809}
810
2c0262af
FB
811static inline void gen_movs(DisasContext *s, int ot)
812{
813 gen_string_movl_A0_ESI(s);
323d1876 814 gen_op_ld_T0_A0(s, ot);
2c0262af 815 gen_string_movl_A0_EDI(s);
323d1876 816 gen_op_st_T0_A0(s, ot);
6e0d8677
FB
817 gen_op_movl_T0_Dshift(ot);
818 gen_op_add_reg_T0(s->aflag, R_ESI);
819 gen_op_add_reg_T0(s->aflag, R_EDI);
2c0262af
FB
820}
821
b6abf97d
FB
822static void gen_op_update1_cc(void)
823{
b6abf97d
FB
824 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
825}
826
827static void gen_op_update2_cc(void)
828{
829 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
830 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
831}
832
988c3eb0
RH
833static void gen_op_update3_cc(TCGv reg)
834{
835 tcg_gen_mov_tl(cpu_cc_src2, reg);
836 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
837 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
838}
839
b6abf97d
FB
840static inline void gen_op_testl_T0_T1_cc(void)
841{
b6abf97d
FB
842 tcg_gen_and_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
843}
844
845static void gen_op_update_neg_cc(void)
846{
b6abf97d 847 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
a3251186
RH
848 tcg_gen_neg_tl(cpu_cc_src, cpu_T[0]);
849 tcg_gen_movi_tl(cpu_cc_srcT, 0);
b6abf97d
FB
850}
851
d229edce
RH
852/* compute all eflags to cc_src */
853static void gen_compute_eflags(DisasContext *s)
8e1c85e3 854{
988c3eb0 855 TCGv zero, dst, src1, src2;
db9f2597
RH
856 int live, dead;
857
d229edce
RH
858 if (s->cc_op == CC_OP_EFLAGS) {
859 return;
860 }
436ff2d2
RH
861 if (s->cc_op == CC_OP_CLR) {
862 tcg_gen_movi_tl(cpu_cc_src, CC_Z);
863 set_cc_op(s, CC_OP_EFLAGS);
864 return;
865 }
db9f2597
RH
866
867 TCGV_UNUSED(zero);
868 dst = cpu_cc_dst;
869 src1 = cpu_cc_src;
988c3eb0 870 src2 = cpu_cc_src2;
db9f2597
RH
871
872 /* Take care to not read values that are not live. */
873 live = cc_op_live[s->cc_op] & ~USES_CC_SRCT;
988c3eb0 874 dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2);
db9f2597
RH
875 if (dead) {
876 zero = tcg_const_tl(0);
877 if (dead & USES_CC_DST) {
878 dst = zero;
879 }
880 if (dead & USES_CC_SRC) {
881 src1 = zero;
882 }
988c3eb0
RH
883 if (dead & USES_CC_SRC2) {
884 src2 = zero;
885 }
db9f2597
RH
886 }
887
773cdfcc 888 gen_update_cc_op(s);
988c3eb0 889 gen_helper_cc_compute_all(cpu_cc_src, dst, src1, src2, cpu_cc_op);
d229edce 890 set_cc_op(s, CC_OP_EFLAGS);
db9f2597
RH
891
892 if (dead) {
893 tcg_temp_free(zero);
894 }
8e1c85e3
FB
895}
896
bec93d72
RH
897typedef struct CCPrepare {
898 TCGCond cond;
899 TCGv reg;
900 TCGv reg2;
901 target_ulong imm;
902 target_ulong mask;
903 bool use_reg2;
904 bool no_setcond;
905} CCPrepare;
906
06847f1f 907/* compute eflags.C to reg */
bec93d72 908static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
06847f1f
RH
909{
910 TCGv t0, t1;
bec93d72 911 int size, shift;
06847f1f
RH
912
913 switch (s->cc_op) {
914 case CC_OP_SUBB ... CC_OP_SUBQ:
a3251186 915 /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
06847f1f
RH
916 size = s->cc_op - CC_OP_SUBB;
917 t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
918 /* If no temporary was used, be careful not to alias t1 and t0. */
919 t0 = TCGV_EQUAL(t1, cpu_cc_src) ? cpu_tmp0 : reg;
a3251186 920 tcg_gen_mov_tl(t0, cpu_cc_srcT);
06847f1f
RH
921 gen_extu(size, t0);
922 goto add_sub;
923
924 case CC_OP_ADDB ... CC_OP_ADDQ:
925 /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
926 size = s->cc_op - CC_OP_ADDB;
927 t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
928 t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
929 add_sub:
bec93d72
RH
930 return (CCPrepare) { .cond = TCG_COND_LTU, .reg = t0,
931 .reg2 = t1, .mask = -1, .use_reg2 = true };
06847f1f 932
06847f1f 933 case CC_OP_LOGICB ... CC_OP_LOGICQ:
436ff2d2 934 case CC_OP_CLR:
bec93d72 935 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
06847f1f
RH
936
937 case CC_OP_INCB ... CC_OP_INCQ:
938 case CC_OP_DECB ... CC_OP_DECQ:
bec93d72
RH
939 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
940 .mask = -1, .no_setcond = true };
06847f1f
RH
941
942 case CC_OP_SHLB ... CC_OP_SHLQ:
943 /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
944 size = s->cc_op - CC_OP_SHLB;
bec93d72
RH
945 shift = (8 << size) - 1;
946 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
947 .mask = (target_ulong)1 << shift };
06847f1f
RH
948
949 case CC_OP_MULB ... CC_OP_MULQ:
bec93d72
RH
950 return (CCPrepare) { .cond = TCG_COND_NE,
951 .reg = cpu_cc_src, .mask = -1 };
06847f1f 952
bc4b43dc
RH
953 case CC_OP_BMILGB ... CC_OP_BMILGQ:
954 size = s->cc_op - CC_OP_BMILGB;
955 t0 = gen_ext_tl(reg, cpu_cc_src, size, false);
956 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
957
cd7f97ca
RH
958 case CC_OP_ADCX:
959 case CC_OP_ADCOX:
960 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_dst,
961 .mask = -1, .no_setcond = true };
962
06847f1f
RH
963 case CC_OP_EFLAGS:
964 case CC_OP_SARB ... CC_OP_SARQ:
965 /* CC_SRC & 1 */
bec93d72
RH
966 return (CCPrepare) { .cond = TCG_COND_NE,
967 .reg = cpu_cc_src, .mask = CC_C };
06847f1f
RH
968
969 default:
970 /* The need to compute only C from CC_OP_DYNAMIC is important
971 in efficiently implementing e.g. INC at the start of a TB. */
972 gen_update_cc_op(s);
988c3eb0
RH
973 gen_helper_cc_compute_c(reg, cpu_cc_dst, cpu_cc_src,
974 cpu_cc_src2, cpu_cc_op);
bec93d72
RH
975 return (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
976 .mask = -1, .no_setcond = true };
06847f1f
RH
977 }
978}
979
1608ecca 980/* compute eflags.P to reg */
bec93d72 981static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGv reg)
1608ecca 982{
d229edce 983 gen_compute_eflags(s);
bec93d72
RH
984 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
985 .mask = CC_P };
1608ecca
PB
986}
987
988/* compute eflags.S to reg */
bec93d72 989static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg)
1608ecca 990{
086c4077
RH
991 switch (s->cc_op) {
992 case CC_OP_DYNAMIC:
993 gen_compute_eflags(s);
994 /* FALLTHRU */
995 case CC_OP_EFLAGS:
cd7f97ca
RH
996 case CC_OP_ADCX:
997 case CC_OP_ADOX:
998 case CC_OP_ADCOX:
bec93d72
RH
999 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1000 .mask = CC_S };
436ff2d2
RH
1001 case CC_OP_CLR:
1002 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
086c4077
RH
1003 default:
1004 {
1005 int size = (s->cc_op - CC_OP_ADDB) & 3;
1006 TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, true);
bec93d72 1007 return (CCPrepare) { .cond = TCG_COND_LT, .reg = t0, .mask = -1 };
086c4077 1008 }
086c4077 1009 }
1608ecca
PB
1010}
1011
1012/* compute eflags.O to reg */
bec93d72 1013static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg)
1608ecca 1014{
cd7f97ca
RH
1015 switch (s->cc_op) {
1016 case CC_OP_ADOX:
1017 case CC_OP_ADCOX:
1018 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src2,
1019 .mask = -1, .no_setcond = true };
436ff2d2
RH
1020 case CC_OP_CLR:
1021 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
cd7f97ca
RH
1022 default:
1023 gen_compute_eflags(s);
1024 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1025 .mask = CC_O };
1026 }
1608ecca
PB
1027}
1028
1029/* compute eflags.Z to reg */
bec93d72 1030static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
1608ecca 1031{
086c4077
RH
1032 switch (s->cc_op) {
1033 case CC_OP_DYNAMIC:
1034 gen_compute_eflags(s);
1035 /* FALLTHRU */
1036 case CC_OP_EFLAGS:
cd7f97ca
RH
1037 case CC_OP_ADCX:
1038 case CC_OP_ADOX:
1039 case CC_OP_ADCOX:
bec93d72
RH
1040 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1041 .mask = CC_Z };
436ff2d2
RH
1042 case CC_OP_CLR:
1043 return (CCPrepare) { .cond = TCG_COND_ALWAYS, .mask = -1 };
086c4077
RH
1044 default:
1045 {
1046 int size = (s->cc_op - CC_OP_ADDB) & 3;
1047 TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
bec93d72 1048 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
086c4077 1049 }
bec93d72
RH
1050 }
1051}
1052
c365395e
PB
1053/* perform a conditional store into register 'reg' according to jump opcode
1054 value 'b'. In the fast case, T0 is guaranted not to be used. */
276e6b5f 1055static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
8e1c85e3 1056{
c365395e 1057 int inv, jcc_op, size, cond;
276e6b5f 1058 CCPrepare cc;
c365395e
PB
1059 TCGv t0;
1060
1061 inv = b & 1;
8e1c85e3 1062 jcc_op = (b >> 1) & 7;
c365395e
PB
1063
1064 switch (s->cc_op) {
69d1aa31
RH
1065 case CC_OP_SUBB ... CC_OP_SUBQ:
1066 /* We optimize relational operators for the cmp/jcc case. */
c365395e
PB
1067 size = s->cc_op - CC_OP_SUBB;
1068 switch (jcc_op) {
1069 case JCC_BE:
a3251186 1070 tcg_gen_mov_tl(cpu_tmp4, cpu_cc_srcT);
c365395e
PB
1071 gen_extu(size, cpu_tmp4);
1072 t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
276e6b5f
RH
1073 cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = cpu_tmp4,
1074 .reg2 = t0, .mask = -1, .use_reg2 = true };
c365395e 1075 break;
8e1c85e3 1076
c365395e 1077 case JCC_L:
276e6b5f 1078 cond = TCG_COND_LT;
c365395e
PB
1079 goto fast_jcc_l;
1080 case JCC_LE:
276e6b5f 1081 cond = TCG_COND_LE;
c365395e 1082 fast_jcc_l:
a3251186 1083 tcg_gen_mov_tl(cpu_tmp4, cpu_cc_srcT);
c365395e
PB
1084 gen_exts(size, cpu_tmp4);
1085 t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, true);
276e6b5f
RH
1086 cc = (CCPrepare) { .cond = cond, .reg = cpu_tmp4,
1087 .reg2 = t0, .mask = -1, .use_reg2 = true };
c365395e 1088 break;
8e1c85e3 1089
c365395e 1090 default:
8e1c85e3 1091 goto slow_jcc;
c365395e 1092 }
8e1c85e3 1093 break;
c365395e 1094
8e1c85e3
FB
1095 default:
1096 slow_jcc:
69d1aa31
RH
1097 /* This actually generates good code for JC, JZ and JS. */
1098 switch (jcc_op) {
1099 case JCC_O:
1100 cc = gen_prepare_eflags_o(s, reg);
1101 break;
1102 case JCC_B:
1103 cc = gen_prepare_eflags_c(s, reg);
1104 break;
1105 case JCC_Z:
1106 cc = gen_prepare_eflags_z(s, reg);
1107 break;
1108 case JCC_BE:
1109 gen_compute_eflags(s);
1110 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1111 .mask = CC_Z | CC_C };
1112 break;
1113 case JCC_S:
1114 cc = gen_prepare_eflags_s(s, reg);
1115 break;
1116 case JCC_P:
1117 cc = gen_prepare_eflags_p(s, reg);
1118 break;
1119 case JCC_L:
1120 gen_compute_eflags(s);
1121 if (TCGV_EQUAL(reg, cpu_cc_src)) {
1122 reg = cpu_tmp0;
1123 }
1124 tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
1125 tcg_gen_xor_tl(reg, reg, cpu_cc_src);
1126 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1127 .mask = CC_S };
1128 break;
1129 default:
1130 case JCC_LE:
1131 gen_compute_eflags(s);
1132 if (TCGV_EQUAL(reg, cpu_cc_src)) {
1133 reg = cpu_tmp0;
1134 }
1135 tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
1136 tcg_gen_xor_tl(reg, reg, cpu_cc_src);
1137 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1138 .mask = CC_S | CC_Z };
1139 break;
1140 }
c365395e 1141 break;
8e1c85e3 1142 }
276e6b5f
RH
1143
1144 if (inv) {
1145 cc.cond = tcg_invert_cond(cc.cond);
1146 }
1147 return cc;
8e1c85e3
FB
1148}
1149
cc8b6f5b
PB
1150static void gen_setcc1(DisasContext *s, int b, TCGv reg)
1151{
1152 CCPrepare cc = gen_prepare_cc(s, b, reg);
1153
1154 if (cc.no_setcond) {
1155 if (cc.cond == TCG_COND_EQ) {
1156 tcg_gen_xori_tl(reg, cc.reg, 1);
1157 } else {
1158 tcg_gen_mov_tl(reg, cc.reg);
1159 }
1160 return;
1161 }
1162
1163 if (cc.cond == TCG_COND_NE && !cc.use_reg2 && cc.imm == 0 &&
1164 cc.mask != 0 && (cc.mask & (cc.mask - 1)) == 0) {
1165 tcg_gen_shri_tl(reg, cc.reg, ctztl(cc.mask));
1166 tcg_gen_andi_tl(reg, reg, 1);
1167 return;
1168 }
1169 if (cc.mask != -1) {
1170 tcg_gen_andi_tl(reg, cc.reg, cc.mask);
1171 cc.reg = reg;
1172 }
1173 if (cc.use_reg2) {
1174 tcg_gen_setcond_tl(cc.cond, reg, cc.reg, cc.reg2);
1175 } else {
1176 tcg_gen_setcondi_tl(cc.cond, reg, cc.reg, cc.imm);
1177 }
1178}
1179
1180static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg)
1181{
1182 gen_setcc1(s, JCC_B << 1, reg);
1183}
276e6b5f 1184
8e1c85e3
FB
1185/* generate a conditional jump to label 'l1' according to jump opcode
1186 value 'b'. In the fast case, T0 is guaranted not to be used. */
dc259201
RH
1187static inline void gen_jcc1_noeob(DisasContext *s, int b, int l1)
1188{
1189 CCPrepare cc = gen_prepare_cc(s, b, cpu_T[0]);
1190
1191 if (cc.mask != -1) {
1192 tcg_gen_andi_tl(cpu_T[0], cc.reg, cc.mask);
1193 cc.reg = cpu_T[0];
1194 }
1195 if (cc.use_reg2) {
1196 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1197 } else {
1198 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1199 }
1200}
1201
1202/* Generate a conditional jump to label 'l1' according to jump opcode
1203 value 'b'. In the fast case, T0 is guaranted not to be used.
1204 A translation block must end soon. */
b27fc131 1205static inline void gen_jcc1(DisasContext *s, int b, int l1)
8e1c85e3 1206{
943131ca 1207 CCPrepare cc = gen_prepare_cc(s, b, cpu_T[0]);
8e1c85e3 1208
dc259201 1209 gen_update_cc_op(s);
943131ca
PB
1210 if (cc.mask != -1) {
1211 tcg_gen_andi_tl(cpu_T[0], cc.reg, cc.mask);
1212 cc.reg = cpu_T[0];
1213 }
dc259201 1214 set_cc_op(s, CC_OP_DYNAMIC);
943131ca
PB
1215 if (cc.use_reg2) {
1216 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1217 } else {
1218 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
8e1c85e3
FB
1219 }
1220}
1221
14ce26e7
FB
1222/* XXX: does not work with gdbstub "ice" single step - not a
1223 serious problem */
1224static int gen_jz_ecx_string(DisasContext *s, target_ulong next_eip)
2c0262af 1225{
14ce26e7
FB
1226 int l1, l2;
1227
1228 l1 = gen_new_label();
1229 l2 = gen_new_label();
6e0d8677 1230 gen_op_jnz_ecx(s->aflag, l1);
14ce26e7
FB
1231 gen_set_label(l2);
1232 gen_jmp_tb(s, next_eip, 1);
1233 gen_set_label(l1);
1234 return l2;
2c0262af
FB
1235}
1236
1237static inline void gen_stos(DisasContext *s, int ot)
1238{
4ba9938c 1239 gen_op_mov_TN_reg(MO_32, 0, R_EAX);
2c0262af 1240 gen_string_movl_A0_EDI(s);
323d1876 1241 gen_op_st_T0_A0(s, ot);
6e0d8677
FB
1242 gen_op_movl_T0_Dshift(ot);
1243 gen_op_add_reg_T0(s->aflag, R_EDI);
2c0262af
FB
1244}
1245
1246static inline void gen_lods(DisasContext *s, int ot)
1247{
1248 gen_string_movl_A0_ESI(s);
323d1876 1249 gen_op_ld_T0_A0(s, ot);
57fec1fe 1250 gen_op_mov_reg_T0(ot, R_EAX);
6e0d8677
FB
1251 gen_op_movl_T0_Dshift(ot);
1252 gen_op_add_reg_T0(s->aflag, R_ESI);
2c0262af
FB
1253}
1254
1255static inline void gen_scas(DisasContext *s, int ot)
1256{
2c0262af 1257 gen_string_movl_A0_EDI(s);
323d1876 1258 gen_op_ld_T1_A0(s, ot);
63633fe6 1259 gen_op(s, OP_CMPL, ot, R_EAX);
6e0d8677
FB
1260 gen_op_movl_T0_Dshift(ot);
1261 gen_op_add_reg_T0(s->aflag, R_EDI);
2c0262af
FB
1262}
1263
1264static inline void gen_cmps(DisasContext *s, int ot)
1265{
2c0262af 1266 gen_string_movl_A0_EDI(s);
323d1876 1267 gen_op_ld_T1_A0(s, ot);
63633fe6
RH
1268 gen_string_movl_A0_ESI(s);
1269 gen_op(s, OP_CMPL, ot, OR_TMP0);
6e0d8677
FB
1270 gen_op_movl_T0_Dshift(ot);
1271 gen_op_add_reg_T0(s->aflag, R_ESI);
1272 gen_op_add_reg_T0(s->aflag, R_EDI);
2c0262af
FB
1273}
1274
1275static inline void gen_ins(DisasContext *s, int ot)
1276{
2e70f6ef
PB
1277 if (use_icount)
1278 gen_io_start();
2c0262af 1279 gen_string_movl_A0_EDI(s);
6e0d8677
FB
1280 /* Note: we must do this dummy write first to be restartable in
1281 case of page fault. */
9772c73b 1282 gen_op_movl_T0_0();
323d1876 1283 gen_op_st_T0_A0(s, ot);
4ba9938c 1284 gen_op_mov_TN_reg(MO_16, 1, R_EDX);
b6abf97d
FB
1285 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[1]);
1286 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
a7812ae4 1287 gen_helper_in_func(ot, cpu_T[0], cpu_tmp2_i32);
323d1876 1288 gen_op_st_T0_A0(s, ot);
6e0d8677
FB
1289 gen_op_movl_T0_Dshift(ot);
1290 gen_op_add_reg_T0(s->aflag, R_EDI);
2e70f6ef
PB
1291 if (use_icount)
1292 gen_io_end();
2c0262af
FB
1293}
1294
1295static inline void gen_outs(DisasContext *s, int ot)
1296{
2e70f6ef
PB
1297 if (use_icount)
1298 gen_io_start();
2c0262af 1299 gen_string_movl_A0_ESI(s);
323d1876 1300 gen_op_ld_T0_A0(s, ot);
b8b6a50b 1301
4ba9938c 1302 gen_op_mov_TN_reg(MO_16, 1, R_EDX);
b6abf97d
FB
1303 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[1]);
1304 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1305 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[0]);
a7812ae4 1306 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
b8b6a50b 1307
6e0d8677
FB
1308 gen_op_movl_T0_Dshift(ot);
1309 gen_op_add_reg_T0(s->aflag, R_ESI);
2e70f6ef
PB
1310 if (use_icount)
1311 gen_io_end();
2c0262af
FB
1312}
1313
1314/* same method as Valgrind : we generate jumps to current or next
1315 instruction */
1316#define GEN_REPZ(op) \
1317static inline void gen_repz_ ## op(DisasContext *s, int ot, \
14ce26e7 1318 target_ulong cur_eip, target_ulong next_eip) \
2c0262af 1319{ \
14ce26e7 1320 int l2;\
2c0262af 1321 gen_update_cc_op(s); \
14ce26e7 1322 l2 = gen_jz_ecx_string(s, next_eip); \
2c0262af 1323 gen_ ## op(s, ot); \
6e0d8677 1324 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
2c0262af
FB
1325 /* a loop would cause two single step exceptions if ECX = 1 \
1326 before rep string_insn */ \
1327 if (!s->jmp_opt) \
6e0d8677 1328 gen_op_jz_ecx(s->aflag, l2); \
2c0262af
FB
1329 gen_jmp(s, cur_eip); \
1330}
1331
1332#define GEN_REPZ2(op) \
1333static inline void gen_repz_ ## op(DisasContext *s, int ot, \
14ce26e7
FB
1334 target_ulong cur_eip, \
1335 target_ulong next_eip, \
2c0262af
FB
1336 int nz) \
1337{ \
14ce26e7 1338 int l2;\
2c0262af 1339 gen_update_cc_op(s); \
14ce26e7 1340 l2 = gen_jz_ecx_string(s, next_eip); \
2c0262af 1341 gen_ ## op(s, ot); \
6e0d8677 1342 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
773cdfcc 1343 gen_update_cc_op(s); \
b27fc131 1344 gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2); \
2c0262af 1345 if (!s->jmp_opt) \
6e0d8677 1346 gen_op_jz_ecx(s->aflag, l2); \
2c0262af
FB
1347 gen_jmp(s, cur_eip); \
1348}
1349
1350GEN_REPZ(movs)
1351GEN_REPZ(stos)
1352GEN_REPZ(lods)
1353GEN_REPZ(ins)
1354GEN_REPZ(outs)
1355GEN_REPZ2(scas)
1356GEN_REPZ2(cmps)
1357
a7812ae4
PB
1358static void gen_helper_fp_arith_ST0_FT0(int op)
1359{
1360 switch (op) {
d3eb5eae
BS
1361 case 0:
1362 gen_helper_fadd_ST0_FT0(cpu_env);
1363 break;
1364 case 1:
1365 gen_helper_fmul_ST0_FT0(cpu_env);
1366 break;
1367 case 2:
1368 gen_helper_fcom_ST0_FT0(cpu_env);
1369 break;
1370 case 3:
1371 gen_helper_fcom_ST0_FT0(cpu_env);
1372 break;
1373 case 4:
1374 gen_helper_fsub_ST0_FT0(cpu_env);
1375 break;
1376 case 5:
1377 gen_helper_fsubr_ST0_FT0(cpu_env);
1378 break;
1379 case 6:
1380 gen_helper_fdiv_ST0_FT0(cpu_env);
1381 break;
1382 case 7:
1383 gen_helper_fdivr_ST0_FT0(cpu_env);
1384 break;
a7812ae4
PB
1385 }
1386}
2c0262af
FB
1387
1388/* NOTE the exception in "r" op ordering */
a7812ae4
PB
1389static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
1390{
1391 TCGv_i32 tmp = tcg_const_i32(opreg);
1392 switch (op) {
d3eb5eae
BS
1393 case 0:
1394 gen_helper_fadd_STN_ST0(cpu_env, tmp);
1395 break;
1396 case 1:
1397 gen_helper_fmul_STN_ST0(cpu_env, tmp);
1398 break;
1399 case 4:
1400 gen_helper_fsubr_STN_ST0(cpu_env, tmp);
1401 break;
1402 case 5:
1403 gen_helper_fsub_STN_ST0(cpu_env, tmp);
1404 break;
1405 case 6:
1406 gen_helper_fdivr_STN_ST0(cpu_env, tmp);
1407 break;
1408 case 7:
1409 gen_helper_fdiv_STN_ST0(cpu_env, tmp);
1410 break;
a7812ae4
PB
1411 }
1412}
2c0262af
FB
1413
1414/* if d == OR_TMP0, it means memory operand (address in A0) */
1415static void gen_op(DisasContext *s1, int op, int ot, int d)
1416{
2c0262af 1417 if (d != OR_TMP0) {
57fec1fe 1418 gen_op_mov_TN_reg(ot, 0, d);
2c0262af 1419 } else {
323d1876 1420 gen_op_ld_T0_A0(s1, ot);
2c0262af
FB
1421 }
1422 switch(op) {
1423 case OP_ADCL:
cc8b6f5b 1424 gen_compute_eflags_c(s1, cpu_tmp4);
cad3a37d
FB
1425 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1426 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1427 if (d != OR_TMP0)
1428 gen_op_mov_reg_T0(ot, d);
1429 else
323d1876 1430 gen_op_st_T0_A0(s1, ot);
988c3eb0
RH
1431 gen_op_update3_cc(cpu_tmp4);
1432 set_cc_op(s1, CC_OP_ADCB + ot);
cad3a37d 1433 break;
2c0262af 1434 case OP_SBBL:
cc8b6f5b 1435 gen_compute_eflags_c(s1, cpu_tmp4);
cad3a37d
FB
1436 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1437 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1438 if (d != OR_TMP0)
57fec1fe 1439 gen_op_mov_reg_T0(ot, d);
cad3a37d 1440 else
323d1876 1441 gen_op_st_T0_A0(s1, ot);
988c3eb0
RH
1442 gen_op_update3_cc(cpu_tmp4);
1443 set_cc_op(s1, CC_OP_SBBB + ot);
cad3a37d 1444 break;
2c0262af
FB
1445 case OP_ADDL:
1446 gen_op_addl_T0_T1();
cad3a37d
FB
1447 if (d != OR_TMP0)
1448 gen_op_mov_reg_T0(ot, d);
1449 else
323d1876 1450 gen_op_st_T0_A0(s1, ot);
cad3a37d 1451 gen_op_update2_cc();
3ca51d07 1452 set_cc_op(s1, CC_OP_ADDB + ot);
2c0262af
FB
1453 break;
1454 case OP_SUBL:
a3251186 1455 tcg_gen_mov_tl(cpu_cc_srcT, cpu_T[0]);
57fec1fe 1456 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
cad3a37d
FB
1457 if (d != OR_TMP0)
1458 gen_op_mov_reg_T0(ot, d);
1459 else
323d1876 1460 gen_op_st_T0_A0(s1, ot);
cad3a37d 1461 gen_op_update2_cc();
3ca51d07 1462 set_cc_op(s1, CC_OP_SUBB + ot);
2c0262af
FB
1463 break;
1464 default:
1465 case OP_ANDL:
57fec1fe 1466 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
cad3a37d
FB
1467 if (d != OR_TMP0)
1468 gen_op_mov_reg_T0(ot, d);
1469 else
323d1876 1470 gen_op_st_T0_A0(s1, ot);
cad3a37d 1471 gen_op_update1_cc();
3ca51d07 1472 set_cc_op(s1, CC_OP_LOGICB + ot);
57fec1fe 1473 break;
2c0262af 1474 case OP_ORL:
57fec1fe 1475 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
cad3a37d
FB
1476 if (d != OR_TMP0)
1477 gen_op_mov_reg_T0(ot, d);
1478 else
323d1876 1479 gen_op_st_T0_A0(s1, ot);
cad3a37d 1480 gen_op_update1_cc();
3ca51d07 1481 set_cc_op(s1, CC_OP_LOGICB + ot);
57fec1fe 1482 break;
2c0262af 1483 case OP_XORL:
57fec1fe 1484 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
cad3a37d
FB
1485 if (d != OR_TMP0)
1486 gen_op_mov_reg_T0(ot, d);
1487 else
323d1876 1488 gen_op_st_T0_A0(s1, ot);
cad3a37d 1489 gen_op_update1_cc();
3ca51d07 1490 set_cc_op(s1, CC_OP_LOGICB + ot);
2c0262af
FB
1491 break;
1492 case OP_CMPL:
63633fe6 1493 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
a3251186 1494 tcg_gen_mov_tl(cpu_cc_srcT, cpu_T[0]);
63633fe6 1495 tcg_gen_sub_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
3ca51d07 1496 set_cc_op(s1, CC_OP_SUBB + ot);
2c0262af
FB
1497 break;
1498 }
b6abf97d
FB
1499}
1500
2c0262af
FB
1501/* if d == OR_TMP0, it means memory operand (address in A0) */
1502static void gen_inc(DisasContext *s1, int ot, int d, int c)
1503{
1504 if (d != OR_TMP0)
57fec1fe 1505 gen_op_mov_TN_reg(ot, 0, d);
2c0262af 1506 else
323d1876 1507 gen_op_ld_T0_A0(s1, ot);
cc8b6f5b 1508 gen_compute_eflags_c(s1, cpu_cc_src);
2c0262af 1509 if (c > 0) {
b6abf97d 1510 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], 1);
3ca51d07 1511 set_cc_op(s1, CC_OP_INCB + ot);
2c0262af 1512 } else {
b6abf97d 1513 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], -1);
3ca51d07 1514 set_cc_op(s1, CC_OP_DECB + ot);
2c0262af
FB
1515 }
1516 if (d != OR_TMP0)
57fec1fe 1517 gen_op_mov_reg_T0(ot, d);
2c0262af 1518 else
323d1876 1519 gen_op_st_T0_A0(s1, ot);
cd31fefa 1520 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
2c0262af
FB
1521}
1522
f437d0a3
RH
1523static void gen_shift_flags(DisasContext *s, int ot, TCGv result, TCGv shm1,
1524 TCGv count, bool is_right)
1525{
1526 TCGv_i32 z32, s32, oldop;
1527 TCGv z_tl;
1528
1529 /* Store the results into the CC variables. If we know that the
1530 variable must be dead, store unconditionally. Otherwise we'll
1531 need to not disrupt the current contents. */
1532 z_tl = tcg_const_tl(0);
1533 if (cc_op_live[s->cc_op] & USES_CC_DST) {
1534 tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_dst, count, z_tl,
1535 result, cpu_cc_dst);
1536 } else {
1537 tcg_gen_mov_tl(cpu_cc_dst, result);
1538 }
1539 if (cc_op_live[s->cc_op] & USES_CC_SRC) {
1540 tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_src, count, z_tl,
1541 shm1, cpu_cc_src);
1542 } else {
1543 tcg_gen_mov_tl(cpu_cc_src, shm1);
1544 }
1545 tcg_temp_free(z_tl);
1546
1547 /* Get the two potential CC_OP values into temporaries. */
1548 tcg_gen_movi_i32(cpu_tmp2_i32, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
1549 if (s->cc_op == CC_OP_DYNAMIC) {
1550 oldop = cpu_cc_op;
1551 } else {
1552 tcg_gen_movi_i32(cpu_tmp3_i32, s->cc_op);
1553 oldop = cpu_tmp3_i32;
1554 }
1555
1556 /* Conditionally store the CC_OP value. */
1557 z32 = tcg_const_i32(0);
1558 s32 = tcg_temp_new_i32();
1559 tcg_gen_trunc_tl_i32(s32, count);
1560 tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, s32, z32, cpu_tmp2_i32, oldop);
1561 tcg_temp_free_i32(z32);
1562 tcg_temp_free_i32(s32);
1563
1564 /* The CC_OP value is no longer predictable. */
1565 set_cc_op(s, CC_OP_DYNAMIC);
1566}
1567
b6abf97d
FB
1568static void gen_shift_rm_T1(DisasContext *s, int ot, int op1,
1569 int is_right, int is_arith)
2c0262af 1570{
4ba9938c 1571 target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
3b46e624 1572
b6abf97d 1573 /* load */
82786041 1574 if (op1 == OR_TMP0) {
323d1876 1575 gen_op_ld_T0_A0(s, ot);
82786041 1576 } else {
b6abf97d 1577 gen_op_mov_TN_reg(ot, 0, op1);
82786041 1578 }
b6abf97d 1579
a41f62f5
RH
1580 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], mask);
1581 tcg_gen_subi_tl(cpu_tmp0, cpu_T[1], 1);
b6abf97d
FB
1582
1583 if (is_right) {
1584 if (is_arith) {
f484d386 1585 gen_exts(ot, cpu_T[0]);
a41f62f5
RH
1586 tcg_gen_sar_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
1587 tcg_gen_sar_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
b6abf97d 1588 } else {
cad3a37d 1589 gen_extu(ot, cpu_T[0]);
a41f62f5
RH
1590 tcg_gen_shr_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
1591 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
b6abf97d
FB
1592 }
1593 } else {
a41f62f5
RH
1594 tcg_gen_shl_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
1595 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
b6abf97d
FB
1596 }
1597
1598 /* store */
82786041 1599 if (op1 == OR_TMP0) {
323d1876 1600 gen_op_st_T0_A0(s, ot);
82786041 1601 } else {
b6abf97d 1602 gen_op_mov_reg_T0(ot, op1);
82786041
RH
1603 }
1604
f437d0a3 1605 gen_shift_flags(s, ot, cpu_T[0], cpu_tmp0, cpu_T[1], is_right);
b6abf97d
FB
1606}
1607
c1c37968
FB
1608static void gen_shift_rm_im(DisasContext *s, int ot, int op1, int op2,
1609 int is_right, int is_arith)
1610{
4ba9938c 1611 int mask = (ot == MO_64 ? 0x3f : 0x1f);
c1c37968
FB
1612
1613 /* load */
1614 if (op1 == OR_TMP0)
323d1876 1615 gen_op_ld_T0_A0(s, ot);
c1c37968
FB
1616 else
1617 gen_op_mov_TN_reg(ot, 0, op1);
1618
1619 op2 &= mask;
1620 if (op2 != 0) {
1621 if (is_right) {
1622 if (is_arith) {
1623 gen_exts(ot, cpu_T[0]);
2a449d14 1624 tcg_gen_sari_tl(cpu_tmp4, cpu_T[0], op2 - 1);
c1c37968
FB
1625 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], op2);
1626 } else {
1627 gen_extu(ot, cpu_T[0]);
2a449d14 1628 tcg_gen_shri_tl(cpu_tmp4, cpu_T[0], op2 - 1);
c1c37968
FB
1629 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], op2);
1630 }
1631 } else {
2a449d14 1632 tcg_gen_shli_tl(cpu_tmp4, cpu_T[0], op2 - 1);
c1c37968
FB
1633 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], op2);
1634 }
1635 }
1636
1637 /* store */
1638 if (op1 == OR_TMP0)
323d1876 1639 gen_op_st_T0_A0(s, ot);
c1c37968
FB
1640 else
1641 gen_op_mov_reg_T0(ot, op1);
1642
1643 /* update eflags if non zero shift */
1644 if (op2 != 0) {
2a449d14 1645 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
c1c37968 1646 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
3ca51d07 1647 set_cc_op(s, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
c1c37968
FB
1648 }
1649}
1650
b6abf97d
FB
1651static inline void tcg_gen_lshift(TCGv ret, TCGv arg1, target_long arg2)
1652{
1653 if (arg2 >= 0)
1654 tcg_gen_shli_tl(ret, arg1, arg2);
1655 else
1656 tcg_gen_shri_tl(ret, arg1, -arg2);
1657}
1658
34d80a55 1659static void gen_rot_rm_T1(DisasContext *s, int ot, int op1, int is_right)
b6abf97d 1660{
4ba9938c 1661 target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
34d80a55 1662 TCGv_i32 t0, t1;
b6abf97d
FB
1663
1664 /* load */
1e4840bf 1665 if (op1 == OR_TMP0) {
323d1876 1666 gen_op_ld_T0_A0(s, ot);
1e4840bf 1667 } else {
34d80a55 1668 gen_op_mov_TN_reg(ot, 0, op1);
1e4840bf 1669 }
b6abf97d 1670
34d80a55 1671 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], mask);
b6abf97d 1672
34d80a55 1673 switch (ot) {
4ba9938c 1674 case MO_8:
34d80a55
RH
1675 /* Replicate the 8-bit input so that a 32-bit rotate works. */
1676 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
1677 tcg_gen_muli_tl(cpu_T[0], cpu_T[0], 0x01010101);
1678 goto do_long;
4ba9938c 1679 case MO_16:
34d80a55
RH
1680 /* Replicate the 16-bit input so that a 32-bit rotate works. */
1681 tcg_gen_deposit_tl(cpu_T[0], cpu_T[0], cpu_T[0], 16, 16);
1682 goto do_long;
1683 do_long:
1684#ifdef TARGET_X86_64
4ba9938c 1685 case MO_32:
34d80a55
RH
1686 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
1687 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
1688 if (is_right) {
1689 tcg_gen_rotr_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
1690 } else {
1691 tcg_gen_rotl_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
1692 }
1693 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
1694 break;
1695#endif
1696 default:
1697 if (is_right) {
1698 tcg_gen_rotr_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1699 } else {
1700 tcg_gen_rotl_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1701 }
1702 break;
b6abf97d 1703 }
b6abf97d 1704
b6abf97d 1705 /* store */
1e4840bf 1706 if (op1 == OR_TMP0) {
323d1876 1707 gen_op_st_T0_A0(s, ot);
1e4840bf 1708 } else {
34d80a55 1709 gen_op_mov_reg_T0(ot, op1);
1e4840bf 1710 }
b6abf97d 1711
34d80a55
RH
1712 /* We'll need the flags computed into CC_SRC. */
1713 gen_compute_eflags(s);
b6abf97d 1714
34d80a55
RH
1715 /* The value that was "rotated out" is now present at the other end
1716 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1717 since we've computed the flags into CC_SRC, these variables are
1718 currently dead. */
b6abf97d 1719 if (is_right) {
34d80a55
RH
1720 tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask - 1);
1721 tcg_gen_shri_tl(cpu_cc_dst, cpu_T[0], mask);
089305ac 1722 tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
34d80a55
RH
1723 } else {
1724 tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask);
1725 tcg_gen_andi_tl(cpu_cc_dst, cpu_T[0], 1);
b6abf97d 1726 }
34d80a55
RH
1727 tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1728 tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1729
1730 /* Now conditionally store the new CC_OP value. If the shift count
1731 is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live.
1732 Otherwise reuse CC_OP_ADCOX which have the C and O flags split out
1733 exactly as we computed above. */
1734 t0 = tcg_const_i32(0);
1735 t1 = tcg_temp_new_i32();
1736 tcg_gen_trunc_tl_i32(t1, cpu_T[1]);
1737 tcg_gen_movi_i32(cpu_tmp2_i32, CC_OP_ADCOX);
1738 tcg_gen_movi_i32(cpu_tmp3_i32, CC_OP_EFLAGS);
1739 tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, t1, t0,
1740 cpu_tmp2_i32, cpu_tmp3_i32);
1741 tcg_temp_free_i32(t0);
1742 tcg_temp_free_i32(t1);
1743
1744 /* The CC_OP value is no longer predictable. */
1745 set_cc_op(s, CC_OP_DYNAMIC);
b6abf97d
FB
1746}
1747
8cd6345d 1748static void gen_rot_rm_im(DisasContext *s, int ot, int op1, int op2,
1749 int is_right)
1750{
4ba9938c 1751 int mask = (ot == MO_64 ? 0x3f : 0x1f);
34d80a55 1752 int shift;
8cd6345d 1753
1754 /* load */
1755 if (op1 == OR_TMP0) {
323d1876 1756 gen_op_ld_T0_A0(s, ot);
8cd6345d 1757 } else {
34d80a55 1758 gen_op_mov_TN_reg(ot, 0, op1);
8cd6345d 1759 }
1760
8cd6345d 1761 op2 &= mask;
8cd6345d 1762 if (op2 != 0) {
34d80a55
RH
1763 switch (ot) {
1764#ifdef TARGET_X86_64
4ba9938c 1765 case MO_32:
34d80a55
RH
1766 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
1767 if (is_right) {
1768 tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, op2);
1769 } else {
1770 tcg_gen_rotli_i32(cpu_tmp2_i32, cpu_tmp2_i32, op2);
1771 }
1772 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
1773 break;
1774#endif
1775 default:
1776 if (is_right) {
1777 tcg_gen_rotri_tl(cpu_T[0], cpu_T[0], op2);
1778 } else {
1779 tcg_gen_rotli_tl(cpu_T[0], cpu_T[0], op2);
1780 }
1781 break;
4ba9938c 1782 case MO_8:
34d80a55
RH
1783 mask = 7;
1784 goto do_shifts;
4ba9938c 1785 case MO_16:
34d80a55
RH
1786 mask = 15;
1787 do_shifts:
1788 shift = op2 & mask;
1789 if (is_right) {
1790 shift = mask + 1 - shift;
1791 }
1792 gen_extu(ot, cpu_T[0]);
1793 tcg_gen_shli_tl(cpu_tmp0, cpu_T[0], shift);
1794 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], mask + 1 - shift);
1795 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
1796 break;
8cd6345d 1797 }
8cd6345d 1798 }
1799
1800 /* store */
1801 if (op1 == OR_TMP0) {
323d1876 1802 gen_op_st_T0_A0(s, ot);
8cd6345d 1803 } else {
34d80a55 1804 gen_op_mov_reg_T0(ot, op1);
8cd6345d 1805 }
1806
1807 if (op2 != 0) {
34d80a55 1808 /* Compute the flags into CC_SRC. */
d229edce 1809 gen_compute_eflags(s);
0ff6addd 1810
34d80a55
RH
1811 /* The value that was "rotated out" is now present at the other end
1812 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1813 since we've computed the flags into CC_SRC, these variables are
1814 currently dead. */
8cd6345d 1815 if (is_right) {
34d80a55
RH
1816 tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask - 1);
1817 tcg_gen_shri_tl(cpu_cc_dst, cpu_T[0], mask);
38ebb396 1818 tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
34d80a55
RH
1819 } else {
1820 tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask);
1821 tcg_gen_andi_tl(cpu_cc_dst, cpu_T[0], 1);
8cd6345d 1822 }
34d80a55
RH
1823 tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1824 tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1825 set_cc_op(s, CC_OP_ADCOX);
8cd6345d 1826 }
8cd6345d 1827}
1828
b6abf97d
FB
1829/* XXX: add faster immediate = 1 case */
1830static void gen_rotc_rm_T1(DisasContext *s, int ot, int op1,
1831 int is_right)
1832{
d229edce 1833 gen_compute_eflags(s);
c7b3c873 1834 assert(s->cc_op == CC_OP_EFLAGS);
b6abf97d
FB
1835
1836 /* load */
1837 if (op1 == OR_TMP0)
323d1876 1838 gen_op_ld_T0_A0(s, ot);
b6abf97d
FB
1839 else
1840 gen_op_mov_TN_reg(ot, 0, op1);
1841
a7812ae4
PB
1842 if (is_right) {
1843 switch (ot) {
4ba9938c 1844 case MO_8:
7923057b
BS
1845 gen_helper_rcrb(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1846 break;
4ba9938c 1847 case MO_16:
7923057b
BS
1848 gen_helper_rcrw(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1849 break;
4ba9938c 1850 case MO_32:
7923057b
BS
1851 gen_helper_rcrl(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1852 break;
a7812ae4 1853#ifdef TARGET_X86_64
4ba9938c 1854 case MO_64:
7923057b
BS
1855 gen_helper_rcrq(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1856 break;
a7812ae4
PB
1857#endif
1858 }
1859 } else {
1860 switch (ot) {
4ba9938c 1861 case MO_8:
7923057b
BS
1862 gen_helper_rclb(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1863 break;
4ba9938c 1864 case MO_16:
7923057b
BS
1865 gen_helper_rclw(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1866 break;
4ba9938c 1867 case MO_32:
7923057b
BS
1868 gen_helper_rcll(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1869 break;
a7812ae4 1870#ifdef TARGET_X86_64
4ba9938c 1871 case MO_64:
7923057b
BS
1872 gen_helper_rclq(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1873 break;
a7812ae4
PB
1874#endif
1875 }
1876 }
b6abf97d
FB
1877 /* store */
1878 if (op1 == OR_TMP0)
323d1876 1879 gen_op_st_T0_A0(s, ot);
b6abf97d
FB
1880 else
1881 gen_op_mov_reg_T0(ot, op1);
b6abf97d
FB
1882}
1883
1884/* XXX: add faster immediate case */
3b9d3cf1 1885static void gen_shiftd_rm_T1(DisasContext *s, int ot, int op1,
f437d0a3 1886 bool is_right, TCGv count_in)
b6abf97d 1887{
4ba9938c 1888 target_ulong mask = (ot == MO_64 ? 63 : 31);
f437d0a3 1889 TCGv count;
b6abf97d
FB
1890
1891 /* load */
1e4840bf 1892 if (op1 == OR_TMP0) {
323d1876 1893 gen_op_ld_T0_A0(s, ot);
1e4840bf 1894 } else {
f437d0a3 1895 gen_op_mov_TN_reg(ot, 0, op1);
1e4840bf 1896 }
b6abf97d 1897
f437d0a3
RH
1898 count = tcg_temp_new();
1899 tcg_gen_andi_tl(count, count_in, mask);
1e4840bf 1900
f437d0a3 1901 switch (ot) {
4ba9938c 1902 case MO_16:
f437d0a3
RH
1903 /* Note: we implement the Intel behaviour for shift count > 16.
1904 This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A
1905 portion by constructing it as a 32-bit value. */
b6abf97d 1906 if (is_right) {
f437d0a3
RH
1907 tcg_gen_deposit_tl(cpu_tmp0, cpu_T[0], cpu_T[1], 16, 16);
1908 tcg_gen_mov_tl(cpu_T[1], cpu_T[0]);
1909 tcg_gen_mov_tl(cpu_T[0], cpu_tmp0);
b6abf97d 1910 } else {
f437d0a3 1911 tcg_gen_deposit_tl(cpu_T[1], cpu_T[0], cpu_T[1], 16, 16);
b6abf97d 1912 }
f437d0a3
RH
1913 /* FALLTHRU */
1914#ifdef TARGET_X86_64
4ba9938c 1915 case MO_32:
f437d0a3
RH
1916 /* Concatenate the two 32-bit values and use a 64-bit shift. */
1917 tcg_gen_subi_tl(cpu_tmp0, count, 1);
b6abf97d 1918 if (is_right) {
f437d0a3
RH
1919 tcg_gen_concat_tl_i64(cpu_T[0], cpu_T[0], cpu_T[1]);
1920 tcg_gen_shr_i64(cpu_tmp0, cpu_T[0], cpu_tmp0);
1921 tcg_gen_shr_i64(cpu_T[0], cpu_T[0], count);
1922 } else {
1923 tcg_gen_concat_tl_i64(cpu_T[0], cpu_T[1], cpu_T[0]);
1924 tcg_gen_shl_i64(cpu_tmp0, cpu_T[0], cpu_tmp0);
1925 tcg_gen_shl_i64(cpu_T[0], cpu_T[0], count);
1926 tcg_gen_shri_i64(cpu_tmp0, cpu_tmp0, 32);
1927 tcg_gen_shri_i64(cpu_T[0], cpu_T[0], 32);
1928 }
1929 break;
1930#endif
1931 default:
1932 tcg_gen_subi_tl(cpu_tmp0, count, 1);
1933 if (is_right) {
1934 tcg_gen_shr_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
b6abf97d 1935
f437d0a3
RH
1936 tcg_gen_subfi_tl(cpu_tmp4, mask + 1, count);
1937 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], count);
1938 tcg_gen_shl_tl(cpu_T[1], cpu_T[1], cpu_tmp4);
b6abf97d 1939 } else {
f437d0a3 1940 tcg_gen_shl_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
4ba9938c 1941 if (ot == MO_16) {
f437d0a3
RH
1942 /* Only needed if count > 16, for Intel behaviour. */
1943 tcg_gen_subfi_tl(cpu_tmp4, 33, count);
1944 tcg_gen_shr_tl(cpu_tmp4, cpu_T[1], cpu_tmp4);
1945 tcg_gen_or_tl(cpu_tmp0, cpu_tmp0, cpu_tmp4);
1946 }
1947
1948 tcg_gen_subfi_tl(cpu_tmp4, mask + 1, count);
1949 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], count);
1950 tcg_gen_shr_tl(cpu_T[1], cpu_T[1], cpu_tmp4);
b6abf97d 1951 }
f437d0a3
RH
1952 tcg_gen_movi_tl(cpu_tmp4, 0);
1953 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_T[1], count, cpu_tmp4,
1954 cpu_tmp4, cpu_T[1]);
1955 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1956 break;
b6abf97d 1957 }
b6abf97d 1958
b6abf97d 1959 /* store */
1e4840bf 1960 if (op1 == OR_TMP0) {
323d1876 1961 gen_op_st_T0_A0(s, ot);
b6abf97d 1962 } else {
f437d0a3 1963 gen_op_mov_reg_T0(ot, op1);
b6abf97d 1964 }
1e4840bf 1965
f437d0a3
RH
1966 gen_shift_flags(s, ot, cpu_T[0], cpu_tmp0, count, is_right);
1967 tcg_temp_free(count);
b6abf97d
FB
1968}
1969
1970static void gen_shift(DisasContext *s1, int op, int ot, int d, int s)
1971{
1972 if (s != OR_TMP1)
1973 gen_op_mov_TN_reg(ot, 1, s);
1974 switch(op) {
1975 case OP_ROL:
1976 gen_rot_rm_T1(s1, ot, d, 0);
1977 break;
1978 case OP_ROR:
1979 gen_rot_rm_T1(s1, ot, d, 1);
1980 break;
1981 case OP_SHL:
1982 case OP_SHL1:
1983 gen_shift_rm_T1(s1, ot, d, 0, 0);
1984 break;
1985 case OP_SHR:
1986 gen_shift_rm_T1(s1, ot, d, 1, 0);
1987 break;
1988 case OP_SAR:
1989 gen_shift_rm_T1(s1, ot, d, 1, 1);
1990 break;
1991 case OP_RCL:
1992 gen_rotc_rm_T1(s1, ot, d, 0);
1993 break;
1994 case OP_RCR:
1995 gen_rotc_rm_T1(s1, ot, d, 1);
1996 break;
1997 }
2c0262af
FB
1998}
1999
2000static void gen_shifti(DisasContext *s1, int op, int ot, int d, int c)
2001{
c1c37968 2002 switch(op) {
8cd6345d 2003 case OP_ROL:
2004 gen_rot_rm_im(s1, ot, d, c, 0);
2005 break;
2006 case OP_ROR:
2007 gen_rot_rm_im(s1, ot, d, c, 1);
2008 break;
c1c37968
FB
2009 case OP_SHL:
2010 case OP_SHL1:
2011 gen_shift_rm_im(s1, ot, d, c, 0, 0);
2012 break;
2013 case OP_SHR:
2014 gen_shift_rm_im(s1, ot, d, c, 1, 0);
2015 break;
2016 case OP_SAR:
2017 gen_shift_rm_im(s1, ot, d, c, 1, 1);
2018 break;
2019 default:
2020 /* currently not optimized */
2021 gen_op_movl_T1_im(c);
2022 gen_shift(s1, op, ot, d, OR_TMP1);
2023 break;
2024 }
2c0262af
FB
2025}
2026
0af10c86
BS
2027static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm,
2028 int *reg_ptr, int *offset_ptr)
2c0262af 2029{
14ce26e7 2030 target_long disp;
2c0262af 2031 int havesib;
14ce26e7 2032 int base;
2c0262af
FB
2033 int index;
2034 int scale;
2035 int opreg;
2036 int mod, rm, code, override, must_add_seg;
7865eec4 2037 TCGv sum;
2c0262af
FB
2038
2039 override = s->override;
2040 must_add_seg = s->addseg;
2041 if (override >= 0)
2042 must_add_seg = 1;
2043 mod = (modrm >> 6) & 3;
2044 rm = modrm & 7;
2045
2046 if (s->aflag) {
2c0262af
FB
2047 havesib = 0;
2048 base = rm;
7865eec4 2049 index = -1;
2c0262af 2050 scale = 0;
3b46e624 2051
2c0262af
FB
2052 if (base == 4) {
2053 havesib = 1;
0af10c86 2054 code = cpu_ldub_code(env, s->pc++);
2c0262af 2055 scale = (code >> 6) & 3;
14ce26e7 2056 index = ((code >> 3) & 7) | REX_X(s);
7865eec4
RH
2057 if (index == 4) {
2058 index = -1; /* no index */
2059 }
14ce26e7 2060 base = (code & 7);
2c0262af 2061 }
14ce26e7 2062 base |= REX_B(s);
2c0262af
FB
2063
2064 switch (mod) {
2065 case 0:
14ce26e7 2066 if ((base & 7) == 5) {
2c0262af 2067 base = -1;
0af10c86 2068 disp = (int32_t)cpu_ldl_code(env, s->pc);
2c0262af 2069 s->pc += 4;
14ce26e7
FB
2070 if (CODE64(s) && !havesib) {
2071 disp += s->pc + s->rip_offset;
2072 }
2c0262af
FB
2073 } else {
2074 disp = 0;
2075 }
2076 break;
2077 case 1:
0af10c86 2078 disp = (int8_t)cpu_ldub_code(env, s->pc++);
2c0262af
FB
2079 break;
2080 default:
2081 case 2:
0af10c86 2082 disp = (int32_t)cpu_ldl_code(env, s->pc);
2c0262af
FB
2083 s->pc += 4;
2084 break;
2085 }
3b46e624 2086
7865eec4
RH
2087 /* For correct popl handling with esp. */
2088 if (base == R_ESP && s->popl_esp_hack) {
2089 disp += s->popl_esp_hack;
2090 }
2091
2092 /* Compute the address, with a minimum number of TCG ops. */
2093 TCGV_UNUSED(sum);
2094 if (index >= 0) {
2095 if (scale == 0) {
2096 sum = cpu_regs[index];
2097 } else {
2098 tcg_gen_shli_tl(cpu_A0, cpu_regs[index], scale);
2099 sum = cpu_A0;
14ce26e7 2100 }
7865eec4
RH
2101 if (base >= 0) {
2102 tcg_gen_add_tl(cpu_A0, sum, cpu_regs[base]);
2103 sum = cpu_A0;
14ce26e7 2104 }
7865eec4
RH
2105 } else if (base >= 0) {
2106 sum = cpu_regs[base];
2c0262af 2107 }
7865eec4
RH
2108 if (TCGV_IS_UNUSED(sum)) {
2109 tcg_gen_movi_tl(cpu_A0, disp);
2110 } else {
2111 tcg_gen_addi_tl(cpu_A0, sum, disp);
2c0262af 2112 }
7865eec4 2113
2c0262af
FB
2114 if (must_add_seg) {
2115 if (override < 0) {
7865eec4 2116 if (base == R_EBP || base == R_ESP) {
2c0262af 2117 override = R_SS;
7865eec4 2118 } else {
2c0262af 2119 override = R_DS;
7865eec4 2120 }
2c0262af 2121 }
7865eec4
RH
2122
2123 tcg_gen_ld_tl(cpu_tmp0, cpu_env,
2124 offsetof(CPUX86State, segs[override].base));
2125 if (CODE64(s)) {
2126 if (s->aflag != 2) {
2127 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
2128 }
2129 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
2130 goto done;
14ce26e7 2131 }
7865eec4
RH
2132
2133 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
2134 }
2135
2136 if (s->aflag != 2) {
2137 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
2c0262af
FB
2138 }
2139 } else {
2140 switch (mod) {
2141 case 0:
2142 if (rm == 6) {
0af10c86 2143 disp = cpu_lduw_code(env, s->pc);
2c0262af
FB
2144 s->pc += 2;
2145 gen_op_movl_A0_im(disp);
2146 rm = 0; /* avoid SS override */
2147 goto no_rm;
2148 } else {
2149 disp = 0;
2150 }
2151 break;
2152 case 1:
0af10c86 2153 disp = (int8_t)cpu_ldub_code(env, s->pc++);
2c0262af
FB
2154 break;
2155 default:
2156 case 2:
0af10c86 2157 disp = cpu_lduw_code(env, s->pc);
2c0262af
FB
2158 s->pc += 2;
2159 break;
2160 }
2161 switch(rm) {
2162 case 0:
57fec1fe
FB
2163 gen_op_movl_A0_reg(R_EBX);
2164 gen_op_addl_A0_reg_sN(0, R_ESI);
2c0262af
FB
2165 break;
2166 case 1:
57fec1fe
FB
2167 gen_op_movl_A0_reg(R_EBX);
2168 gen_op_addl_A0_reg_sN(0, R_EDI);
2c0262af
FB
2169 break;
2170 case 2:
57fec1fe
FB
2171 gen_op_movl_A0_reg(R_EBP);
2172 gen_op_addl_A0_reg_sN(0, R_ESI);
2c0262af
FB
2173 break;
2174 case 3:
57fec1fe
FB
2175 gen_op_movl_A0_reg(R_EBP);
2176 gen_op_addl_A0_reg_sN(0, R_EDI);
2c0262af
FB
2177 break;
2178 case 4:
57fec1fe 2179 gen_op_movl_A0_reg(R_ESI);
2c0262af
FB
2180 break;
2181 case 5:
57fec1fe 2182 gen_op_movl_A0_reg(R_EDI);
2c0262af
FB
2183 break;
2184 case 6:
57fec1fe 2185 gen_op_movl_A0_reg(R_EBP);
2c0262af
FB
2186 break;
2187 default:
2188 case 7:
57fec1fe 2189 gen_op_movl_A0_reg(R_EBX);
2c0262af
FB
2190 break;
2191 }
2192 if (disp != 0)
2193 gen_op_addl_A0_im(disp);
2194 gen_op_andl_A0_ffff();
2195 no_rm:
2196 if (must_add_seg) {
2197 if (override < 0) {
2198 if (rm == 2 || rm == 3 || rm == 6)
2199 override = R_SS;
2200 else
2201 override = R_DS;
2202 }
7162ab21 2203 gen_op_addl_A0_seg(s, override);
2c0262af
FB
2204 }
2205 }
2206
7865eec4 2207 done:
2c0262af
FB
2208 opreg = OR_A0;
2209 disp = 0;
2210 *reg_ptr = opreg;
2211 *offset_ptr = disp;
2212}
2213
0af10c86 2214static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm)
e17a36ce
FB
2215{
2216 int mod, rm, base, code;
2217
2218 mod = (modrm >> 6) & 3;
2219 if (mod == 3)
2220 return;
2221 rm = modrm & 7;
2222
2223 if (s->aflag) {
2224
2225 base = rm;
3b46e624 2226
e17a36ce 2227 if (base == 4) {
0af10c86 2228 code = cpu_ldub_code(env, s->pc++);
e17a36ce
FB
2229 base = (code & 7);
2230 }
3b46e624 2231
e17a36ce
FB
2232 switch (mod) {
2233 case 0:
2234 if (base == 5) {
2235 s->pc += 4;
2236 }
2237 break;
2238 case 1:
2239 s->pc++;
2240 break;
2241 default:
2242 case 2:
2243 s->pc += 4;
2244 break;
2245 }
2246 } else {
2247 switch (mod) {
2248 case 0:
2249 if (rm == 6) {
2250 s->pc += 2;
2251 }
2252 break;
2253 case 1:
2254 s->pc++;
2255 break;
2256 default:
2257 case 2:
2258 s->pc += 2;
2259 break;
2260 }
2261 }
2262}
2263
664e0f19
FB
2264/* used for LEA and MOV AX, mem */
2265static void gen_add_A0_ds_seg(DisasContext *s)
2266{
2267 int override, must_add_seg;
2268 must_add_seg = s->addseg;
2269 override = R_DS;
2270 if (s->override >= 0) {
2271 override = s->override;
2272 must_add_seg = 1;
664e0f19
FB
2273 }
2274 if (must_add_seg) {
8f091a59
FB
2275#ifdef TARGET_X86_64
2276 if (CODE64(s)) {
57fec1fe 2277 gen_op_addq_A0_seg(override);
5fafdf24 2278 } else
8f091a59
FB
2279#endif
2280 {
7162ab21 2281 gen_op_addl_A0_seg(s, override);
8f091a59 2282 }
664e0f19
FB
2283 }
2284}
2285
222a3336 2286/* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2c0262af 2287 OR_TMP0 */
0af10c86
BS
2288static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm,
2289 int ot, int reg, int is_store)
2c0262af
FB
2290{
2291 int mod, rm, opreg, disp;
2292
2293 mod = (modrm >> 6) & 3;
14ce26e7 2294 rm = (modrm & 7) | REX_B(s);
2c0262af
FB
2295 if (mod == 3) {
2296 if (is_store) {
2297 if (reg != OR_TMP0)
57fec1fe
FB
2298 gen_op_mov_TN_reg(ot, 0, reg);
2299 gen_op_mov_reg_T0(ot, rm);
2c0262af 2300 } else {
57fec1fe 2301 gen_op_mov_TN_reg(ot, 0, rm);
2c0262af 2302 if (reg != OR_TMP0)
57fec1fe 2303 gen_op_mov_reg_T0(ot, reg);
2c0262af
FB
2304 }
2305 } else {
0af10c86 2306 gen_lea_modrm(env, s, modrm, &opreg, &disp);
2c0262af
FB
2307 if (is_store) {
2308 if (reg != OR_TMP0)
57fec1fe 2309 gen_op_mov_TN_reg(ot, 0, reg);
323d1876 2310 gen_op_st_T0_A0(s, ot);
2c0262af 2311 } else {
323d1876 2312 gen_op_ld_T0_A0(s, ot);
2c0262af 2313 if (reg != OR_TMP0)
57fec1fe 2314 gen_op_mov_reg_T0(ot, reg);
2c0262af
FB
2315 }
2316 }
2317}
2318
0af10c86 2319static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, int ot)
2c0262af
FB
2320{
2321 uint32_t ret;
2322
2323 switch(ot) {
4ba9938c 2324 case MO_8:
0af10c86 2325 ret = cpu_ldub_code(env, s->pc);
2c0262af
FB
2326 s->pc++;
2327 break;
4ba9938c 2328 case MO_16:
0af10c86 2329 ret = cpu_lduw_code(env, s->pc);
2c0262af
FB
2330 s->pc += 2;
2331 break;
2332 default:
4ba9938c 2333 case MO_32:
0af10c86 2334 ret = cpu_ldl_code(env, s->pc);
2c0262af
FB
2335 s->pc += 4;
2336 break;
2337 }
2338 return ret;
2339}
2340
14ce26e7
FB
2341static inline int insn_const_size(unsigned int ot)
2342{
4ba9938c 2343 if (ot <= MO_32) {
14ce26e7 2344 return 1 << ot;
4ba9938c 2345 } else {
14ce26e7 2346 return 4;
4ba9938c 2347 }
14ce26e7
FB
2348}
2349
6e256c93
FB
2350static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong eip)
2351{
2352 TranslationBlock *tb;
2353 target_ulong pc;
2354
2355 pc = s->cs_base + eip;
2356 tb = s->tb;
2357 /* NOTE: we handle the case where the TB spans two pages here */
2358 if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) ||
2359 (pc & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK)) {
2360 /* jump to same page: we can use a direct jump */
57fec1fe 2361 tcg_gen_goto_tb(tb_num);
6e256c93 2362 gen_jmp_im(eip);
8cfd0495 2363 tcg_gen_exit_tb((uintptr_t)tb + tb_num);
6e256c93
FB
2364 } else {
2365 /* jump to another page: currently not optimized */
2366 gen_jmp_im(eip);
2367 gen_eob(s);
2368 }
2369}
2370
5fafdf24 2371static inline void gen_jcc(DisasContext *s, int b,
14ce26e7 2372 target_ulong val, target_ulong next_eip)
2c0262af 2373{
b27fc131 2374 int l1, l2;
3b46e624 2375
2c0262af 2376 if (s->jmp_opt) {
14ce26e7 2377 l1 = gen_new_label();
b27fc131 2378 gen_jcc1(s, b, l1);
dc259201 2379
6e256c93 2380 gen_goto_tb(s, 0, next_eip);
14ce26e7
FB
2381
2382 gen_set_label(l1);
6e256c93 2383 gen_goto_tb(s, 1, val);
5779406a 2384 s->is_jmp = DISAS_TB_JUMP;
2c0262af 2385 } else {
14ce26e7
FB
2386 l1 = gen_new_label();
2387 l2 = gen_new_label();
b27fc131 2388 gen_jcc1(s, b, l1);
8e1c85e3 2389
14ce26e7 2390 gen_jmp_im(next_eip);
8e1c85e3
FB
2391 tcg_gen_br(l2);
2392
14ce26e7
FB
2393 gen_set_label(l1);
2394 gen_jmp_im(val);
2395 gen_set_label(l2);
2c0262af
FB
2396 gen_eob(s);
2397 }
2398}
2399
f32d3781
PB
2400static void gen_cmovcc1(CPUX86State *env, DisasContext *s, int ot, int b,
2401 int modrm, int reg)
2402{
57eb0cc8 2403 CCPrepare cc;
f32d3781 2404
57eb0cc8 2405 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
f32d3781 2406
57eb0cc8
RH
2407 cc = gen_prepare_cc(s, b, cpu_T[1]);
2408 if (cc.mask != -1) {
2409 TCGv t0 = tcg_temp_new();
2410 tcg_gen_andi_tl(t0, cc.reg, cc.mask);
2411 cc.reg = t0;
2412 }
2413 if (!cc.use_reg2) {
2414 cc.reg2 = tcg_const_tl(cc.imm);
f32d3781
PB
2415 }
2416
57eb0cc8
RH
2417 tcg_gen_movcond_tl(cc.cond, cpu_T[0], cc.reg, cc.reg2,
2418 cpu_T[0], cpu_regs[reg]);
2419 gen_op_mov_reg_T0(ot, reg);
2420
2421 if (cc.mask != -1) {
2422 tcg_temp_free(cc.reg);
2423 }
2424 if (!cc.use_reg2) {
2425 tcg_temp_free(cc.reg2);
2426 }
f32d3781
PB
2427}
2428
3bd7da9e
FB
2429static inline void gen_op_movl_T0_seg(int seg_reg)
2430{
2431 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
2432 offsetof(CPUX86State,segs[seg_reg].selector));
2433}
2434
2435static inline void gen_op_movl_seg_T0_vm(int seg_reg)
2436{
2437 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
2438 tcg_gen_st32_tl(cpu_T[0], cpu_env,
2439 offsetof(CPUX86State,segs[seg_reg].selector));
2440 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], 4);
2441 tcg_gen_st_tl(cpu_T[0], cpu_env,
2442 offsetof(CPUX86State,segs[seg_reg].base));
2443}
2444
2c0262af
FB
2445/* move T0 to seg_reg and compute if the CPU state may change. Never
2446 call this function with seg_reg == R_CS */
14ce26e7 2447static void gen_movl_seg_T0(DisasContext *s, int seg_reg, target_ulong cur_eip)
2c0262af 2448{
3415a4dd
FB
2449 if (s->pe && !s->vm86) {
2450 /* XXX: optimize by finding processor state dynamically */
773cdfcc 2451 gen_update_cc_op(s);
14ce26e7 2452 gen_jmp_im(cur_eip);
b6abf97d 2453 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2999a0b2 2454 gen_helper_load_seg(cpu_env, tcg_const_i32(seg_reg), cpu_tmp2_i32);
dc196a57
FB
2455 /* abort translation because the addseg value may change or
2456 because ss32 may change. For R_SS, translation must always
2457 stop as a special handling must be done to disable hardware
2458 interrupts for the next instruction */
2459 if (seg_reg == R_SS || (s->code32 && seg_reg < R_FS))
5779406a 2460 s->is_jmp = DISAS_TB_JUMP;
3415a4dd 2461 } else {
3bd7da9e 2462 gen_op_movl_seg_T0_vm(seg_reg);
dc196a57 2463 if (seg_reg == R_SS)
5779406a 2464 s->is_jmp = DISAS_TB_JUMP;
3415a4dd 2465 }
2c0262af
FB
2466}
2467
0573fbfc
TS
2468static inline int svm_is_rep(int prefixes)
2469{
2470 return ((prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) ? 8 : 0);
2471}
2472
872929aa 2473static inline void
0573fbfc 2474gen_svm_check_intercept_param(DisasContext *s, target_ulong pc_start,
b8b6a50b 2475 uint32_t type, uint64_t param)
0573fbfc 2476{
872929aa
FB
2477 /* no SVM activated; fast case */
2478 if (likely(!(s->flags & HF_SVMI_MASK)))
2479 return;
773cdfcc 2480 gen_update_cc_op(s);
872929aa 2481 gen_jmp_im(pc_start - s->cs_base);
052e80d5 2482 gen_helper_svm_check_intercept_param(cpu_env, tcg_const_i32(type),
a7812ae4 2483 tcg_const_i64(param));
0573fbfc
TS
2484}
2485
872929aa 2486static inline void
0573fbfc
TS
2487gen_svm_check_intercept(DisasContext *s, target_ulong pc_start, uint64_t type)
2488{
872929aa 2489 gen_svm_check_intercept_param(s, pc_start, type, 0);
0573fbfc
TS
2490}
2491
4f31916f
FB
2492static inline void gen_stack_update(DisasContext *s, int addend)
2493{
14ce26e7
FB
2494#ifdef TARGET_X86_64
2495 if (CODE64(s)) {
6e0d8677 2496 gen_op_add_reg_im(2, R_ESP, addend);
14ce26e7
FB
2497 } else
2498#endif
4f31916f 2499 if (s->ss32) {
6e0d8677 2500 gen_op_add_reg_im(1, R_ESP, addend);
4f31916f 2501 } else {
6e0d8677 2502 gen_op_add_reg_im(0, R_ESP, addend);
4f31916f
FB
2503 }
2504}
2505
2c0262af
FB
2506/* generate a push. It depends on ss32, addseg and dflag */
2507static void gen_push_T0(DisasContext *s)
2508{
14ce26e7
FB
2509#ifdef TARGET_X86_64
2510 if (CODE64(s)) {
57fec1fe 2511 gen_op_movq_A0_reg(R_ESP);
8f091a59 2512 if (s->dflag) {
57fec1fe 2513 gen_op_addq_A0_im(-8);
4ba9938c 2514 gen_op_st_T0_A0(s, MO_64);
8f091a59 2515 } else {
57fec1fe 2516 gen_op_addq_A0_im(-2);
4ba9938c 2517 gen_op_st_T0_A0(s, MO_16);
8f091a59 2518 }
57fec1fe 2519 gen_op_mov_reg_A0(2, R_ESP);
5fafdf24 2520 } else
14ce26e7
FB
2521#endif
2522 {
57fec1fe 2523 gen_op_movl_A0_reg(R_ESP);
14ce26e7 2524 if (!s->dflag)
57fec1fe 2525 gen_op_addl_A0_im(-2);
14ce26e7 2526 else
57fec1fe 2527 gen_op_addl_A0_im(-4);
14ce26e7
FB
2528 if (s->ss32) {
2529 if (s->addseg) {
bbf662ee 2530 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
7162ab21 2531 gen_op_addl_A0_seg(s, R_SS);
14ce26e7
FB
2532 }
2533 } else {
2534 gen_op_andl_A0_ffff();
bbf662ee 2535 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
7162ab21 2536 gen_op_addl_A0_seg(s, R_SS);
2c0262af 2537 }
323d1876 2538 gen_op_st_T0_A0(s, s->dflag + 1);
14ce26e7 2539 if (s->ss32 && !s->addseg)
57fec1fe 2540 gen_op_mov_reg_A0(1, R_ESP);
14ce26e7 2541 else
57fec1fe 2542 gen_op_mov_reg_T1(s->ss32 + 1, R_ESP);
2c0262af
FB
2543 }
2544}
2545
4f31916f
FB
2546/* generate a push. It depends on ss32, addseg and dflag */
2547/* slower version for T1, only used for call Ev */
2548static void gen_push_T1(DisasContext *s)
2c0262af 2549{
14ce26e7
FB
2550#ifdef TARGET_X86_64
2551 if (CODE64(s)) {
57fec1fe 2552 gen_op_movq_A0_reg(R_ESP);
8f091a59 2553 if (s->dflag) {
57fec1fe 2554 gen_op_addq_A0_im(-8);
4ba9938c 2555 gen_op_st_T1_A0(s, MO_64);
8f091a59 2556 } else {
57fec1fe 2557 gen_op_addq_A0_im(-2);
4ba9938c 2558 gen_op_st_T0_A0(s, MO_16);
8f091a59 2559 }
57fec1fe 2560 gen_op_mov_reg_A0(2, R_ESP);
5fafdf24 2561 } else
14ce26e7
FB
2562#endif
2563 {
57fec1fe 2564 gen_op_movl_A0_reg(R_ESP);
14ce26e7 2565 if (!s->dflag)
57fec1fe 2566 gen_op_addl_A0_im(-2);
14ce26e7 2567 else
57fec1fe 2568 gen_op_addl_A0_im(-4);
14ce26e7
FB
2569 if (s->ss32) {
2570 if (s->addseg) {
7162ab21 2571 gen_op_addl_A0_seg(s, R_SS);
14ce26e7
FB
2572 }
2573 } else {
2574 gen_op_andl_A0_ffff();
7162ab21 2575 gen_op_addl_A0_seg(s, R_SS);
2c0262af 2576 }
323d1876 2577 gen_op_st_T1_A0(s, s->dflag + 1);
3b46e624 2578
14ce26e7 2579 if (s->ss32 && !s->addseg)
57fec1fe 2580 gen_op_mov_reg_A0(1, R_ESP);
14ce26e7
FB
2581 else
2582 gen_stack_update(s, (-2) << s->dflag);
2c0262af
FB
2583 }
2584}
2585
4f31916f
FB
2586/* two step pop is necessary for precise exceptions */
2587static void gen_pop_T0(DisasContext *s)
2c0262af 2588{
14ce26e7
FB
2589#ifdef TARGET_X86_64
2590 if (CODE64(s)) {
57fec1fe 2591 gen_op_movq_A0_reg(R_ESP);
4ba9938c 2592 gen_op_ld_T0_A0(s, s->dflag ? MO_64 : MO_16);
5fafdf24 2593 } else
14ce26e7
FB
2594#endif
2595 {
57fec1fe 2596 gen_op_movl_A0_reg(R_ESP);
14ce26e7
FB
2597 if (s->ss32) {
2598 if (s->addseg)
7162ab21 2599 gen_op_addl_A0_seg(s, R_SS);
14ce26e7
FB
2600 } else {
2601 gen_op_andl_A0_ffff();
7162ab21 2602 gen_op_addl_A0_seg(s, R_SS);
14ce26e7 2603 }
323d1876 2604 gen_op_ld_T0_A0(s, s->dflag + 1);
2c0262af
FB
2605 }
2606}
2607
2608static void gen_pop_update(DisasContext *s)
2609{
14ce26e7 2610#ifdef TARGET_X86_64
8f091a59 2611 if (CODE64(s) && s->dflag) {
14ce26e7
FB
2612 gen_stack_update(s, 8);
2613 } else
2614#endif
2615 {
2616 gen_stack_update(s, 2 << s->dflag);
2617 }
2c0262af
FB
2618}
2619
2620static void gen_stack_A0(DisasContext *s)
2621{
57fec1fe 2622 gen_op_movl_A0_reg(R_ESP);
2c0262af
FB
2623 if (!s->ss32)
2624 gen_op_andl_A0_ffff();
bbf662ee 2625 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2c0262af 2626 if (s->addseg)
7162ab21 2627 gen_op_addl_A0_seg(s, R_SS);
2c0262af
FB
2628}
2629
2630/* NOTE: wrap around in 16 bit not fully handled */
2631static void gen_pusha(DisasContext *s)
2632{
2633 int i;
57fec1fe 2634 gen_op_movl_A0_reg(R_ESP);
2c0262af
FB
2635 gen_op_addl_A0_im(-16 << s->dflag);
2636 if (!s->ss32)
2637 gen_op_andl_A0_ffff();
bbf662ee 2638 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2c0262af 2639 if (s->addseg)
7162ab21 2640 gen_op_addl_A0_seg(s, R_SS);
2c0262af 2641 for(i = 0;i < 8; i++) {
4ba9938c
RH
2642 gen_op_mov_TN_reg(MO_32, 0, 7 - i);
2643 gen_op_st_T0_A0(s, MO_16 + s->dflag);
2c0262af
FB
2644 gen_op_addl_A0_im(2 << s->dflag);
2645 }
4ba9938c 2646 gen_op_mov_reg_T1(MO_16 + s->ss32, R_ESP);
2c0262af
FB
2647}
2648
2649/* NOTE: wrap around in 16 bit not fully handled */
2650static void gen_popa(DisasContext *s)
2651{
2652 int i;
57fec1fe 2653 gen_op_movl_A0_reg(R_ESP);
2c0262af
FB
2654 if (!s->ss32)
2655 gen_op_andl_A0_ffff();
bbf662ee
FB
2656 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2657 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], 16 << s->dflag);
2c0262af 2658 if (s->addseg)
7162ab21 2659 gen_op_addl_A0_seg(s, R_SS);
2c0262af
FB
2660 for(i = 0;i < 8; i++) {
2661 /* ESP is not reloaded */
2662 if (i != 3) {
4ba9938c
RH
2663 gen_op_ld_T0_A0(s, MO_16 + s->dflag);
2664 gen_op_mov_reg_T0(MO_16 + s->dflag, 7 - i);
2c0262af
FB
2665 }
2666 gen_op_addl_A0_im(2 << s->dflag);
2667 }
4ba9938c 2668 gen_op_mov_reg_T1(MO_16 + s->ss32, R_ESP);
2c0262af
FB
2669}
2670
2c0262af
FB
2671static void gen_enter(DisasContext *s, int esp_addend, int level)
2672{
61a8c4ec 2673 int ot, opsize;
2c0262af 2674
2c0262af 2675 level &= 0x1f;
8f091a59
FB
2676#ifdef TARGET_X86_64
2677 if (CODE64(s)) {
4ba9938c 2678 ot = s->dflag ? MO_64 : MO_16;
8f091a59 2679 opsize = 1 << ot;
3b46e624 2680
57fec1fe 2681 gen_op_movl_A0_reg(R_ESP);
8f091a59 2682 gen_op_addq_A0_im(-opsize);
bbf662ee 2683 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
8f091a59
FB
2684
2685 /* push bp */
4ba9938c 2686 gen_op_mov_TN_reg(MO_32, 0, R_EBP);
323d1876 2687 gen_op_st_T0_A0(s, ot);
8f091a59 2688 if (level) {
b5b38f61 2689 /* XXX: must save state */
2999a0b2 2690 gen_helper_enter64_level(cpu_env, tcg_const_i32(level),
4ba9938c 2691 tcg_const_i32((ot == MO_64)),
a7812ae4 2692 cpu_T[1]);
8f091a59 2693 }
57fec1fe 2694 gen_op_mov_reg_T1(ot, R_EBP);
bbf662ee 2695 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
4ba9938c 2696 gen_op_mov_reg_T1(MO_64, R_ESP);
5fafdf24 2697 } else
8f091a59
FB
2698#endif
2699 {
4ba9938c 2700 ot = s->dflag + MO_16;
8f091a59 2701 opsize = 2 << s->dflag;
3b46e624 2702
57fec1fe 2703 gen_op_movl_A0_reg(R_ESP);
8f091a59
FB
2704 gen_op_addl_A0_im(-opsize);
2705 if (!s->ss32)
2706 gen_op_andl_A0_ffff();
bbf662ee 2707 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
8f091a59 2708 if (s->addseg)
7162ab21 2709 gen_op_addl_A0_seg(s, R_SS);
8f091a59 2710 /* push bp */
4ba9938c 2711 gen_op_mov_TN_reg(MO_32, 0, R_EBP);
323d1876 2712 gen_op_st_T0_A0(s, ot);
8f091a59 2713 if (level) {
b5b38f61 2714 /* XXX: must save state */
2999a0b2 2715 gen_helper_enter_level(cpu_env, tcg_const_i32(level),
a7812ae4
PB
2716 tcg_const_i32(s->dflag),
2717 cpu_T[1]);
8f091a59 2718 }
57fec1fe 2719 gen_op_mov_reg_T1(ot, R_EBP);
bbf662ee 2720 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
4ba9938c 2721 gen_op_mov_reg_T1(MO_16 + s->ss32, R_ESP);
2c0262af 2722 }
2c0262af
FB
2723}
2724
14ce26e7 2725static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip)
2c0262af 2726{
773cdfcc 2727 gen_update_cc_op(s);
14ce26e7 2728 gen_jmp_im(cur_eip);
77b2bc2c 2729 gen_helper_raise_exception(cpu_env, tcg_const_i32(trapno));
5779406a 2730 s->is_jmp = DISAS_TB_JUMP;
2c0262af
FB
2731}
2732
2733/* an interrupt is different from an exception because of the
7f75ffd3 2734 privilege checks */
5fafdf24 2735static void gen_interrupt(DisasContext *s, int intno,
14ce26e7 2736 target_ulong cur_eip, target_ulong next_eip)
2c0262af 2737{
773cdfcc 2738 gen_update_cc_op(s);
14ce26e7 2739 gen_jmp_im(cur_eip);
77b2bc2c 2740 gen_helper_raise_interrupt(cpu_env, tcg_const_i32(intno),
a7812ae4 2741 tcg_const_i32(next_eip - cur_eip));
5779406a 2742 s->is_jmp = DISAS_TB_JUMP;
2c0262af
FB
2743}
2744
14ce26e7 2745static void gen_debug(DisasContext *s, target_ulong cur_eip)
2c0262af 2746{
773cdfcc 2747 gen_update_cc_op(s);
14ce26e7 2748 gen_jmp_im(cur_eip);
4a7443be 2749 gen_helper_debug(cpu_env);
5779406a 2750 s->is_jmp = DISAS_TB_JUMP;
2c0262af
FB
2751}
2752
2753/* generate a generic end of block. Trace exception is also generated
2754 if needed */
2755static void gen_eob(DisasContext *s)
2756{
773cdfcc 2757 gen_update_cc_op(s);
a2cc3b24 2758 if (s->tb->flags & HF_INHIBIT_IRQ_MASK) {
f0967a1a 2759 gen_helper_reset_inhibit_irq(cpu_env);
a2cc3b24 2760 }
a2397807 2761 if (s->tb->flags & HF_RF_MASK) {
f0967a1a 2762 gen_helper_reset_rf(cpu_env);
a2397807 2763 }
34865134 2764 if (s->singlestep_enabled) {
4a7443be 2765 gen_helper_debug(cpu_env);
34865134 2766 } else if (s->tf) {
4a7443be 2767 gen_helper_single_step(cpu_env);
2c0262af 2768 } else {
57fec1fe 2769 tcg_gen_exit_tb(0);
2c0262af 2770 }
5779406a 2771 s->is_jmp = DISAS_TB_JUMP;
2c0262af
FB
2772}
2773
2774/* generate a jump to eip. No segment change must happen before as a
2775 direct call to the next block may occur */
14ce26e7 2776static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num)
2c0262af 2777{
a3251186
RH
2778 gen_update_cc_op(s);
2779 set_cc_op(s, CC_OP_DYNAMIC);
2c0262af 2780 if (s->jmp_opt) {
6e256c93 2781 gen_goto_tb(s, tb_num, eip);
5779406a 2782 s->is_jmp = DISAS_TB_JUMP;
2c0262af 2783 } else {
14ce26e7 2784 gen_jmp_im(eip);
2c0262af
FB
2785 gen_eob(s);
2786 }
2787}
2788
14ce26e7
FB
2789static void gen_jmp(DisasContext *s, target_ulong eip)
2790{
2791 gen_jmp_tb(s, eip, 0);
2792}
2793
323d1876 2794static inline void gen_ldq_env_A0(DisasContext *s, int offset)
8686c490 2795{
3c5f4116 2796 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
b6abf97d 2797 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset);
8686c490 2798}
664e0f19 2799
323d1876 2800static inline void gen_stq_env_A0(DisasContext *s, int offset)
8686c490 2801{
b6abf97d 2802 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset);
3523e4bd 2803 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
8686c490 2804}
664e0f19 2805
323d1876 2806static inline void gen_ldo_env_A0(DisasContext *s, int offset)
8686c490 2807{
5c42a7cd 2808 int mem_index = s->mem_index;
3c5f4116 2809 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, mem_index, MO_LEQ);
b6abf97d 2810 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
8686c490 2811 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
3c5f4116 2812 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_tmp0, mem_index, MO_LEQ);
b6abf97d 2813 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
8686c490 2814}
14ce26e7 2815
323d1876 2816static inline void gen_sto_env_A0(DisasContext *s, int offset)
8686c490 2817{
5c42a7cd 2818 int mem_index = s->mem_index;
b6abf97d 2819 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
3523e4bd 2820 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, mem_index, MO_LEQ);
8686c490 2821 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
b6abf97d 2822 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
3523e4bd 2823 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_tmp0, mem_index, MO_LEQ);
8686c490 2824}
14ce26e7 2825
5af45186
FB
2826static inline void gen_op_movo(int d_offset, int s_offset)
2827{
b6abf97d
FB
2828 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2829 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2830 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + 8);
2831 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + 8);
5af45186
FB
2832}
2833
2834static inline void gen_op_movq(int d_offset, int s_offset)
2835{
b6abf97d
FB
2836 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2837 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
5af45186
FB
2838}
2839
2840static inline void gen_op_movl(int d_offset, int s_offset)
2841{
b6abf97d
FB
2842 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env, s_offset);
2843 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, d_offset);
5af45186
FB
2844}
2845
2846static inline void gen_op_movq_env_0(int d_offset)
2847{
b6abf97d
FB
2848 tcg_gen_movi_i64(cpu_tmp1_i64, 0);
2849 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
5af45186 2850}
664e0f19 2851
d3eb5eae
BS
2852typedef void (*SSEFunc_i_ep)(TCGv_i32 val, TCGv_ptr env, TCGv_ptr reg);
2853typedef void (*SSEFunc_l_ep)(TCGv_i64 val, TCGv_ptr env, TCGv_ptr reg);
2854typedef void (*SSEFunc_0_epi)(TCGv_ptr env, TCGv_ptr reg, TCGv_i32 val);
2855typedef void (*SSEFunc_0_epl)(TCGv_ptr env, TCGv_ptr reg, TCGv_i64 val);
2856typedef void (*SSEFunc_0_epp)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b);
2857typedef void (*SSEFunc_0_eppi)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
2858 TCGv_i32 val);
c4baa050 2859typedef void (*SSEFunc_0_ppi)(TCGv_ptr reg_a, TCGv_ptr reg_b, TCGv_i32 val);
d3eb5eae
BS
2860typedef void (*SSEFunc_0_eppt)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
2861 TCGv val);
c4baa050 2862
5af45186
FB
2863#define SSE_SPECIAL ((void *)1)
2864#define SSE_DUMMY ((void *)2)
664e0f19 2865
a7812ae4
PB
2866#define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
2867#define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
2868 gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
5af45186 2869
d3eb5eae 2870static const SSEFunc_0_epp sse_op_table1[256][4] = {
a35f3ec7
AJ
2871 /* 3DNow! extensions */
2872 [0x0e] = { SSE_DUMMY }, /* femms */
2873 [0x0f] = { SSE_DUMMY }, /* pf... */
664e0f19
FB
2874 /* pure SSE operations */
2875 [0x10] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2876 [0x11] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
465e9838 2877 [0x12] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd, movsldup, movddup */
664e0f19 2878 [0x13] = { SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd */
a7812ae4
PB
2879 [0x14] = { gen_helper_punpckldq_xmm, gen_helper_punpcklqdq_xmm },
2880 [0x15] = { gen_helper_punpckhdq_xmm, gen_helper_punpckhqdq_xmm },
664e0f19
FB
2881 [0x16] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd, movshdup */
2882 [0x17] = { SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd */
2883
2884 [0x28] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2885 [0x29] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2886 [0x2a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
d9f4bb27 2887 [0x2b] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movntps, movntpd, movntss, movntsd */
664e0f19
FB
2888 [0x2c] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
2889 [0x2d] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
a7812ae4
PB
2890 [0x2e] = { gen_helper_ucomiss, gen_helper_ucomisd },
2891 [0x2f] = { gen_helper_comiss, gen_helper_comisd },
664e0f19
FB
2892 [0x50] = { SSE_SPECIAL, SSE_SPECIAL }, /* movmskps, movmskpd */
2893 [0x51] = SSE_FOP(sqrt),
a7812ae4
PB
2894 [0x52] = { gen_helper_rsqrtps, NULL, gen_helper_rsqrtss, NULL },
2895 [0x53] = { gen_helper_rcpps, NULL, gen_helper_rcpss, NULL },
2896 [0x54] = { gen_helper_pand_xmm, gen_helper_pand_xmm }, /* andps, andpd */
2897 [0x55] = { gen_helper_pandn_xmm, gen_helper_pandn_xmm }, /* andnps, andnpd */
2898 [0x56] = { gen_helper_por_xmm, gen_helper_por_xmm }, /* orps, orpd */
2899 [0x57] = { gen_helper_pxor_xmm, gen_helper_pxor_xmm }, /* xorps, xorpd */
664e0f19
FB
2900 [0x58] = SSE_FOP(add),
2901 [0x59] = SSE_FOP(mul),
a7812ae4
PB
2902 [0x5a] = { gen_helper_cvtps2pd, gen_helper_cvtpd2ps,
2903 gen_helper_cvtss2sd, gen_helper_cvtsd2ss },
2904 [0x5b] = { gen_helper_cvtdq2ps, gen_helper_cvtps2dq, gen_helper_cvttps2dq },
664e0f19
FB
2905 [0x5c] = SSE_FOP(sub),
2906 [0x5d] = SSE_FOP(min),
2907 [0x5e] = SSE_FOP(div),
2908 [0x5f] = SSE_FOP(max),
2909
2910 [0xc2] = SSE_FOP(cmpeq),
d3eb5eae
BS
2911 [0xc6] = { (SSEFunc_0_epp)gen_helper_shufps,
2912 (SSEFunc_0_epp)gen_helper_shufpd }, /* XXX: casts */
664e0f19 2913
7073fbad
RH
2914 /* SSSE3, SSE4, MOVBE, CRC32, BMI1, BMI2, ADX. */
2915 [0x38] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2916 [0x3a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
4242b1bd 2917
664e0f19
FB
2918 /* MMX ops and their SSE extensions */
2919 [0x60] = MMX_OP2(punpcklbw),
2920 [0x61] = MMX_OP2(punpcklwd),
2921 [0x62] = MMX_OP2(punpckldq),
2922 [0x63] = MMX_OP2(packsswb),
2923 [0x64] = MMX_OP2(pcmpgtb),
2924 [0x65] = MMX_OP2(pcmpgtw),
2925 [0x66] = MMX_OP2(pcmpgtl),
2926 [0x67] = MMX_OP2(packuswb),
2927 [0x68] = MMX_OP2(punpckhbw),
2928 [0x69] = MMX_OP2(punpckhwd),
2929 [0x6a] = MMX_OP2(punpckhdq),
2930 [0x6b] = MMX_OP2(packssdw),
a7812ae4
PB
2931 [0x6c] = { NULL, gen_helper_punpcklqdq_xmm },
2932 [0x6d] = { NULL, gen_helper_punpckhqdq_xmm },
664e0f19
FB
2933 [0x6e] = { SSE_SPECIAL, SSE_SPECIAL }, /* movd mm, ea */
2934 [0x6f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, , movqdu */
d3eb5eae
BS
2935 [0x70] = { (SSEFunc_0_epp)gen_helper_pshufw_mmx,
2936 (SSEFunc_0_epp)gen_helper_pshufd_xmm,
2937 (SSEFunc_0_epp)gen_helper_pshufhw_xmm,
2938 (SSEFunc_0_epp)gen_helper_pshuflw_xmm }, /* XXX: casts */
664e0f19
FB
2939 [0x71] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftw */
2940 [0x72] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftd */
2941 [0x73] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftq */
2942 [0x74] = MMX_OP2(pcmpeqb),
2943 [0x75] = MMX_OP2(pcmpeqw),
2944 [0x76] = MMX_OP2(pcmpeql),
a35f3ec7 2945 [0x77] = { SSE_DUMMY }, /* emms */
d9f4bb27
AP
2946 [0x78] = { NULL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* extrq_i, insertq_i */
2947 [0x79] = { NULL, gen_helper_extrq_r, NULL, gen_helper_insertq_r },
a7812ae4
PB
2948 [0x7c] = { NULL, gen_helper_haddpd, NULL, gen_helper_haddps },
2949 [0x7d] = { NULL, gen_helper_hsubpd, NULL, gen_helper_hsubps },
664e0f19
FB
2950 [0x7e] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movd, movd, , movq */
2951 [0x7f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, movdqu */
2952 [0xc4] = { SSE_SPECIAL, SSE_SPECIAL }, /* pinsrw */
2953 [0xc5] = { SSE_SPECIAL, SSE_SPECIAL }, /* pextrw */
a7812ae4 2954 [0xd0] = { NULL, gen_helper_addsubpd, NULL, gen_helper_addsubps },
664e0f19
FB
2955 [0xd1] = MMX_OP2(psrlw),
2956 [0xd2] = MMX_OP2(psrld),
2957 [0xd3] = MMX_OP2(psrlq),
2958 [0xd4] = MMX_OP2(paddq),
2959 [0xd5] = MMX_OP2(pmullw),
2960 [0xd6] = { NULL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2961 [0xd7] = { SSE_SPECIAL, SSE_SPECIAL }, /* pmovmskb */
2962 [0xd8] = MMX_OP2(psubusb),
2963 [0xd9] = MMX_OP2(psubusw),
2964 [0xda] = MMX_OP2(pminub),
2965 [0xdb] = MMX_OP2(pand),
2966 [0xdc] = MMX_OP2(paddusb),
2967 [0xdd] = MMX_OP2(paddusw),
2968 [0xde] = MMX_OP2(pmaxub),
2969 [0xdf] = MMX_OP2(pandn),
2970 [0xe0] = MMX_OP2(pavgb),
2971 [0xe1] = MMX_OP2(psraw),
2972 [0xe2] = MMX_OP2(psrad),
2973 [0xe3] = MMX_OP2(pavgw),
2974 [0xe4] = MMX_OP2(pmulhuw),
2975 [0xe5] = MMX_OP2(pmulhw),
a7812ae4 2976 [0xe6] = { NULL, gen_helper_cvttpd2dq, gen_helper_cvtdq2pd, gen_helper_cvtpd2dq },
664e0f19
FB
2977 [0xe7] = { SSE_SPECIAL , SSE_SPECIAL }, /* movntq, movntq */
2978 [0xe8] = MMX_OP2(psubsb),
2979 [0xe9] = MMX_OP2(psubsw),
2980 [0xea] = MMX_OP2(pminsw),
2981 [0xeb] = MMX_OP2(por),
2982 [0xec] = MMX_OP2(paddsb),
2983 [0xed] = MMX_OP2(paddsw),
2984 [0xee] = MMX_OP2(pmaxsw),
2985 [0xef] = MMX_OP2(pxor),
465e9838 2986 [0xf0] = { NULL, NULL, NULL, SSE_SPECIAL }, /* lddqu */
664e0f19
FB
2987 [0xf1] = MMX_OP2(psllw),
2988 [0xf2] = MMX_OP2(pslld),
2989 [0xf3] = MMX_OP2(psllq),
2990 [0xf4] = MMX_OP2(pmuludq),
2991 [0xf5] = MMX_OP2(pmaddwd),
2992 [0xf6] = MMX_OP2(psadbw),
d3eb5eae
BS
2993 [0xf7] = { (SSEFunc_0_epp)gen_helper_maskmov_mmx,
2994 (SSEFunc_0_epp)gen_helper_maskmov_xmm }, /* XXX: casts */
664e0f19
FB
2995 [0xf8] = MMX_OP2(psubb),
2996 [0xf9] = MMX_OP2(psubw),
2997 [0xfa] = MMX_OP2(psubl),
2998 [0xfb] = MMX_OP2(psubq),
2999 [0xfc] = MMX_OP2(paddb),
3000 [0xfd] = MMX_OP2(paddw),
3001 [0xfe] = MMX_OP2(paddl),
3002};
3003
d3eb5eae 3004static const SSEFunc_0_epp sse_op_table2[3 * 8][2] = {
664e0f19
FB
3005 [0 + 2] = MMX_OP2(psrlw),
3006 [0 + 4] = MMX_OP2(psraw),
3007 [0 + 6] = MMX_OP2(psllw),
3008 [8 + 2] = MMX_OP2(psrld),
3009 [8 + 4] = MMX_OP2(psrad),
3010 [8 + 6] = MMX_OP2(pslld),
3011 [16 + 2] = MMX_OP2(psrlq),
a7812ae4 3012 [16 + 3] = { NULL, gen_helper_psrldq_xmm },
664e0f19 3013 [16 + 6] = MMX_OP2(psllq),
a7812ae4 3014 [16 + 7] = { NULL, gen_helper_pslldq_xmm },
664e0f19
FB
3015};
3016
d3eb5eae 3017static const SSEFunc_0_epi sse_op_table3ai[] = {
a7812ae4 3018 gen_helper_cvtsi2ss,
11f8cdbc 3019 gen_helper_cvtsi2sd
c4baa050 3020};
a7812ae4 3021
11f8cdbc 3022#ifdef TARGET_X86_64
d3eb5eae 3023static const SSEFunc_0_epl sse_op_table3aq[] = {
11f8cdbc
SW
3024 gen_helper_cvtsq2ss,
3025 gen_helper_cvtsq2sd
3026};
3027#endif
3028
d3eb5eae 3029static const SSEFunc_i_ep sse_op_table3bi[] = {
a7812ae4 3030 gen_helper_cvttss2si,
a7812ae4 3031 gen_helper_cvtss2si,
bedc2ac1 3032 gen_helper_cvttsd2si,
11f8cdbc 3033 gen_helper_cvtsd2si
664e0f19 3034};
3b46e624 3035
11f8cdbc 3036#ifdef TARGET_X86_64
d3eb5eae 3037static const SSEFunc_l_ep sse_op_table3bq[] = {
11f8cdbc 3038 gen_helper_cvttss2sq,
11f8cdbc 3039 gen_helper_cvtss2sq,
bedc2ac1 3040 gen_helper_cvttsd2sq,
11f8cdbc
SW
3041 gen_helper_cvtsd2sq
3042};
3043#endif
3044
d3eb5eae 3045static const SSEFunc_0_epp sse_op_table4[8][4] = {
664e0f19
FB
3046 SSE_FOP(cmpeq),
3047 SSE_FOP(cmplt),
3048 SSE_FOP(cmple),
3049 SSE_FOP(cmpunord),
3050 SSE_FOP(cmpneq),
3051 SSE_FOP(cmpnlt),
3052 SSE_FOP(cmpnle),
3053 SSE_FOP(cmpord),
3054};
3b46e624 3055
d3eb5eae 3056static const SSEFunc_0_epp sse_op_table5[256] = {
a7812ae4
PB
3057 [0x0c] = gen_helper_pi2fw,
3058 [0x0d] = gen_helper_pi2fd,
3059 [0x1c] = gen_helper_pf2iw,
3060 [0x1d] = gen_helper_pf2id,
3061 [0x8a] = gen_helper_pfnacc,
3062 [0x8e] = gen_helper_pfpnacc,
3063 [0x90] = gen_helper_pfcmpge,
3064 [0x94] = gen_helper_pfmin,
3065 [0x96] = gen_helper_pfrcp,
3066 [0x97] = gen_helper_pfrsqrt,
3067 [0x9a] = gen_helper_pfsub,
3068 [0x9e] = gen_helper_pfadd,
3069 [0xa0] = gen_helper_pfcmpgt,
3070 [0xa4] = gen_helper_pfmax,
3071 [0xa6] = gen_helper_movq, /* pfrcpit1; no need to actually increase precision */
3072 [0xa7] = gen_helper_movq, /* pfrsqit1 */
3073 [0xaa] = gen_helper_pfsubr,
3074 [0xae] = gen_helper_pfacc,
3075 [0xb0] = gen_helper_pfcmpeq,
3076 [0xb4] = gen_helper_pfmul,
3077 [0xb6] = gen_helper_movq, /* pfrcpit2 */
3078 [0xb7] = gen_helper_pmulhrw_mmx,
3079 [0xbb] = gen_helper_pswapd,
3080 [0xbf] = gen_helper_pavgb_mmx /* pavgusb */
a35f3ec7
AJ
3081};
3082
d3eb5eae
BS
3083struct SSEOpHelper_epp {
3084 SSEFunc_0_epp op[2];
c4baa050
BS
3085 uint32_t ext_mask;
3086};
3087
d3eb5eae
BS
3088struct SSEOpHelper_eppi {
3089 SSEFunc_0_eppi op[2];
c4baa050 3090 uint32_t ext_mask;
222a3336 3091};
c4baa050 3092
222a3336 3093#define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
a7812ae4
PB
3094#define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
3095#define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
222a3336 3096#define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
e71827bc
AJ
3097#define PCLMULQDQ_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, \
3098 CPUID_EXT_PCLMULQDQ }
d640045a 3099#define AESNI_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_AES }
c4baa050 3100
d3eb5eae 3101static const struct SSEOpHelper_epp sse_op_table6[256] = {
222a3336
AZ
3102 [0x00] = SSSE3_OP(pshufb),
3103 [0x01] = SSSE3_OP(phaddw),
3104 [0x02] = SSSE3_OP(phaddd),
3105 [0x03] = SSSE3_OP(phaddsw),
3106 [0x04] = SSSE3_OP(pmaddubsw),
3107 [0x05] = SSSE3_OP(phsubw),
3108 [0x06] = SSSE3_OP(phsubd),
3109 [0x07] = SSSE3_OP(phsubsw),
3110 [0x08] = SSSE3_OP(psignb),
3111 [0x09] = SSSE3_OP(psignw),
3112 [0x0a] = SSSE3_OP(psignd),
3113 [0x0b] = SSSE3_OP(pmulhrsw),
3114 [0x10] = SSE41_OP(pblendvb),
3115 [0x14] = SSE41_OP(blendvps),
3116 [0x15] = SSE41_OP(blendvpd),
3117 [0x17] = SSE41_OP(ptest),
3118 [0x1c] = SSSE3_OP(pabsb),
3119 [0x1d] = SSSE3_OP(pabsw),
3120 [0x1e] = SSSE3_OP(pabsd),
3121 [0x20] = SSE41_OP(pmovsxbw),
3122 [0x21] = SSE41_OP(pmovsxbd),
3123 [0x22] = SSE41_OP(pmovsxbq),
3124 [0x23] = SSE41_OP(pmovsxwd),
3125 [0x24] = SSE41_OP(pmovsxwq),
3126 [0x25] = SSE41_OP(pmovsxdq),
3127 [0x28] = SSE41_OP(pmuldq),
3128 [0x29] = SSE41_OP(pcmpeqq),
3129 [0x2a] = SSE41_SPECIAL, /* movntqda */
3130 [0x2b] = SSE41_OP(packusdw),
3131 [0x30] = SSE41_OP(pmovzxbw),
3132 [0x31] = SSE41_OP(pmovzxbd),
3133 [0x32] = SSE41_OP(pmovzxbq),
3134 [0x33] = SSE41_OP(pmovzxwd),
3135 [0x34] = SSE41_OP(pmovzxwq),
3136 [0x35] = SSE41_OP(pmovzxdq),
3137 [0x37] = SSE42_OP(pcmpgtq),
3138 [0x38] = SSE41_OP(pminsb),
3139 [0x39] = SSE41_OP(pminsd),
3140 [0x3a] = SSE41_OP(pminuw),
3141 [0x3b] = SSE41_OP(pminud),
3142 [0x3c] = SSE41_OP(pmaxsb),
3143 [0x3d] = SSE41_OP(pmaxsd),
3144 [0x3e] = SSE41_OP(pmaxuw),
3145 [0x3f] = SSE41_OP(pmaxud),
3146 [0x40] = SSE41_OP(pmulld),
3147 [0x41] = SSE41_OP(phminposuw),
d640045a
AJ
3148 [0xdb] = AESNI_OP(aesimc),
3149 [0xdc] = AESNI_OP(aesenc),
3150 [0xdd] = AESNI_OP(aesenclast),
3151 [0xde] = AESNI_OP(aesdec),
3152 [0xdf] = AESNI_OP(aesdeclast),
4242b1bd
AZ
3153};
3154
d3eb5eae 3155static const struct SSEOpHelper_eppi sse_op_table7[256] = {
222a3336
AZ
3156 [0x08] = SSE41_OP(roundps),
3157 [0x09] = SSE41_OP(roundpd),
3158 [0x0a] = SSE41_OP(roundss),
3159 [0x0b] = SSE41_OP(roundsd),
3160 [0x0c] = SSE41_OP(blendps),
3161 [0x0d] = SSE41_OP(blendpd),
3162 [0x0e] = SSE41_OP(pblendw),
3163 [0x0f] = SSSE3_OP(palignr),
3164 [0x14] = SSE41_SPECIAL, /* pextrb */
3165 [0x15] = SSE41_SPECIAL, /* pextrw */
3166 [0x16] = SSE41_SPECIAL, /* pextrd/pextrq */
3167 [0x17] = SSE41_SPECIAL, /* extractps */
3168 [0x20] = SSE41_SPECIAL, /* pinsrb */
3169 [0x21] = SSE41_SPECIAL, /* insertps */
3170 [0x22] = SSE41_SPECIAL, /* pinsrd/pinsrq */
3171 [0x40] = SSE41_OP(dpps),
3172 [0x41] = SSE41_OP(dppd),
3173 [0x42] = SSE41_OP(mpsadbw),
e71827bc 3174 [0x44] = PCLMULQDQ_OP(pclmulqdq),
222a3336
AZ
3175 [0x60] = SSE42_OP(pcmpestrm),
3176 [0x61] = SSE42_OP(pcmpestri),
3177 [0x62] = SSE42_OP(pcmpistrm),
3178 [0x63] = SSE42_OP(pcmpistri),
d640045a 3179 [0xdf] = AESNI_OP(aeskeygenassist),
4242b1bd
AZ
3180};
3181
0af10c86
BS
3182static void gen_sse(CPUX86State *env, DisasContext *s, int b,
3183 target_ulong pc_start, int rex_r)
664e0f19
FB
3184{
3185 int b1, op1_offset, op2_offset, is_xmm, val, ot;
3186 int modrm, mod, rm, reg, reg_addr, offset_addr;
d3eb5eae
BS
3187 SSEFunc_0_epp sse_fn_epp;
3188 SSEFunc_0_eppi sse_fn_eppi;
c4baa050 3189 SSEFunc_0_ppi sse_fn_ppi;
d3eb5eae 3190 SSEFunc_0_eppt sse_fn_eppt;
664e0f19
FB
3191
3192 b &= 0xff;
5fafdf24 3193 if (s->prefix & PREFIX_DATA)
664e0f19 3194 b1 = 1;
5fafdf24 3195 else if (s->prefix & PREFIX_REPZ)
664e0f19 3196 b1 = 2;
5fafdf24 3197 else if (s->prefix & PREFIX_REPNZ)
664e0f19
FB
3198 b1 = 3;
3199 else
3200 b1 = 0;
d3eb5eae
BS
3201 sse_fn_epp = sse_op_table1[b][b1];
3202 if (!sse_fn_epp) {
664e0f19 3203 goto illegal_op;
c4baa050 3204 }
a35f3ec7 3205 if ((b <= 0x5f && b >= 0x10) || b == 0xc6 || b == 0xc2) {
664e0f19
FB
3206 is_xmm = 1;
3207 } else {
3208 if (b1 == 0) {
3209 /* MMX case */
3210 is_xmm = 0;
3211 } else {
3212 is_xmm = 1;
3213 }
3214 }
3215 /* simple MMX/SSE operation */
3216 if (s->flags & HF_TS_MASK) {
3217 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
3218 return;
3219 }
3220 if (s->flags & HF_EM_MASK) {
3221 illegal_op:
3222 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
3223 return;
3224 }
3225 if (is_xmm && !(s->flags & HF_OSFXSR_MASK))
4242b1bd
AZ
3226 if ((b != 0x38 && b != 0x3a) || (s->prefix & PREFIX_DATA))
3227 goto illegal_op;
e771edab
AJ
3228 if (b == 0x0e) {
3229 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
3230 goto illegal_op;
3231 /* femms */
d3eb5eae 3232 gen_helper_emms(cpu_env);
e771edab
AJ
3233 return;
3234 }
3235 if (b == 0x77) {
3236 /* emms */
d3eb5eae 3237 gen_helper_emms(cpu_env);
664e0f19
FB
3238 return;
3239 }
3240 /* prepare MMX state (XXX: optimize by storing fptt and fptags in
3241 the static cpu state) */
3242 if (!is_xmm) {
d3eb5eae 3243 gen_helper_enter_mmx(cpu_env);
664e0f19
FB
3244 }
3245
0af10c86 3246 modrm = cpu_ldub_code(env, s->pc++);
664e0f19
FB
3247 reg = ((modrm >> 3) & 7);
3248 if (is_xmm)
3249 reg |= rex_r;
3250 mod = (modrm >> 6) & 3;
d3eb5eae 3251 if (sse_fn_epp == SSE_SPECIAL) {
664e0f19
FB
3252 b |= (b1 << 8);
3253 switch(b) {
3254 case 0x0e7: /* movntq */
5fafdf24 3255 if (mod == 3)
664e0f19 3256 goto illegal_op;
0af10c86 3257 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
323d1876 3258 gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
664e0f19
FB
3259 break;
3260 case 0x1e7: /* movntdq */
3261 case 0x02b: /* movntps */
3262 case 0x12b: /* movntps */
2e21e749
T
3263 if (mod == 3)
3264 goto illegal_op;
0af10c86 3265 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
323d1876 3266 gen_sto_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
2e21e749 3267 break;
465e9838
FB
3268 case 0x3f0: /* lddqu */
3269 if (mod == 3)
664e0f19 3270 goto illegal_op;
0af10c86 3271 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
323d1876 3272 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
664e0f19 3273 break;
d9f4bb27
AP
3274 case 0x22b: /* movntss */
3275 case 0x32b: /* movntsd */
3276 if (mod == 3)
3277 goto illegal_op;
0af10c86 3278 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
d9f4bb27 3279 if (b1 & 1) {
323d1876 3280 gen_stq_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
d9f4bb27
AP
3281 } else {
3282 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3283 xmm_regs[reg].XMM_L(0)));
4ba9938c 3284 gen_op_st_T0_A0(s, MO_32);
d9f4bb27
AP
3285 }
3286 break;
664e0f19 3287 case 0x6e: /* movd mm, ea */
dabd98dd
FB
3288#ifdef TARGET_X86_64
3289 if (s->dflag == 2) {
4ba9938c 3290 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0);
5af45186 3291 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,fpregs[reg].mmx));
5fafdf24 3292 } else
dabd98dd
FB
3293#endif
3294 {
4ba9938c 3295 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0);
5af45186
FB
3296 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3297 offsetof(CPUX86State,fpregs[reg].mmx));
a7812ae4
PB
3298 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3299 gen_helper_movl_mm_T0_mmx(cpu_ptr0, cpu_tmp2_i32);
dabd98dd 3300 }
664e0f19
FB
3301 break;
3302 case 0x16e: /* movd xmm, ea */
dabd98dd
FB
3303#ifdef TARGET_X86_64
3304 if (s->dflag == 2) {
4ba9938c 3305 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0);
5af45186
FB
3306 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3307 offsetof(CPUX86State,xmm_regs[reg]));
a7812ae4 3308 gen_helper_movq_mm_T0_xmm(cpu_ptr0, cpu_T[0]);
5fafdf24 3309 } else
dabd98dd
FB
3310#endif
3311 {
4ba9938c 3312 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0);
5af45186
FB
3313 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3314 offsetof(CPUX86State,xmm_regs[reg]));
b6abf97d 3315 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
a7812ae4 3316 gen_helper_movl_mm_T0_xmm(cpu_ptr0, cpu_tmp2_i32);
dabd98dd 3317 }
664e0f19
FB
3318 break;
3319 case 0x6f: /* movq mm, ea */
3320 if (mod != 3) {
0af10c86 3321 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
323d1876 3322 gen_ldq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
664e0f19
FB
3323 } else {
3324 rm = (modrm & 7);
b6abf97d 3325 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
5af45186 3326 offsetof(CPUX86State,fpregs[rm].mmx));
b6abf97d 3327 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
5af45186 3328 offsetof(CPUX86State,fpregs[reg].mmx));
664e0f19
FB
3329 }
3330 break;
3331 case 0x010: /* movups */
3332 case 0x110: /* movupd */
3333 case 0x028: /* movaps */
3334 case 0x128: /* movapd */
3335 case 0x16f: /* movdqa xmm, ea */
3336 case 0x26f: /* movdqu xmm, ea */
3337 if (mod != 3) {
0af10c86 3338 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
323d1876 3339 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
664e0f19
FB
3340 } else {
3341 rm = (modrm & 7) | REX_B(s);
3342 gen_op_movo(offsetof(CPUX86State,xmm_regs[reg]),
3343 offsetof(CPUX86State,xmm_regs[rm]));
3344 }
3345 break;
3346 case 0x210: /* movss xmm, ea */
3347 if (mod != 3) {
0af10c86 3348 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4ba9938c 3349 gen_op_ld_T0_A0(s, MO_32);
651ba608 3350 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
664e0f19 3351 gen_op_movl_T0_0();
651ba608
FB
3352 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
3353 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3354 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
664e0f19
FB
3355 } else {
3356 rm = (modrm & 7) | REX_B(s);
3357 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3358 offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
3359 }
3360 break;
3361 case 0x310: /* movsd xmm, ea */
3362 if (mod != 3) {
0af10c86 3363 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
323d1876
RH
3364 gen_ldq_env_A0(s, offsetof(CPUX86State,
3365 xmm_regs[reg].XMM_Q(0)));
664e0f19 3366 gen_op_movl_T0_0();
651ba608
FB
3367 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3368 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
664e0f19
FB
3369 } else {
3370 rm = (modrm & 7) | REX_B(s);
3371 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3372 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3373 }
3374 break;
3375 case 0x012: /* movlps */
3376 case 0x112: /* movlpd */
3377 if (mod != 3) {
0af10c86 3378 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
323d1876
RH
3379 gen_ldq_env_A0(s, offsetof(CPUX86State,
3380 xmm_regs[reg].XMM_Q(0)));
664e0f19
FB
3381 } else {
3382 /* movhlps */
3383 rm = (modrm & 7) | REX_B(s);
3384 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3385 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
3386 }
3387 break;
465e9838
FB
3388 case 0x212: /* movsldup */
3389 if (mod != 3) {
0af10c86 3390 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
323d1876 3391 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
465e9838
FB
3392 } else {
3393 rm = (modrm & 7) | REX_B(s);
3394 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3395 offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
3396 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
3397 offsetof(CPUX86State,xmm_regs[rm].XMM_L(2)));
3398 }
3399 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
3400 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3401 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
3402 offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3403 break;
3404 case 0x312: /* movddup */
3405 if (mod != 3) {
0af10c86 3406 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
323d1876
RH
3407 gen_ldq_env_A0(s, offsetof(CPUX86State,
3408 xmm_regs[reg].XMM_Q(0)));
465e9838
FB
3409 } else {
3410 rm = (modrm & 7) | REX_B(s);
3411 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3412 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3413 }
3414 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
ba6526df 3415 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
465e9838 3416 break;
664e0f19
FB
3417 case 0x016: /* movhps */
3418 case 0x116: /* movhpd */
3419 if (mod != 3) {
0af10c86 3420 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
323d1876
RH
3421 gen_ldq_env_A0(s, offsetof(CPUX86State,
3422 xmm_regs[reg].XMM_Q(1)));
664e0f19
FB
3423 } else {
3424 /* movlhps */
3425 rm = (modrm & 7) | REX_B(s);
3426 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
3427 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3428 }
3429 break;
3430 case 0x216: /* movshdup */
3431 if (mod != 3) {
0af10c86 3432 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
323d1876 3433 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
664e0f19
FB
3434 } else {
3435 rm = (modrm & 7) | REX_B(s);
3436 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
3437 offsetof(CPUX86State,xmm_regs[rm].XMM_L(1)));
3438 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
3439 offsetof(CPUX86State,xmm_regs[rm].XMM_L(3)));
3440 }
3441 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3442 offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
3443 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
3444 offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3445 break;
d9f4bb27
AP
3446 case 0x178:
3447 case 0x378:
3448 {
3449 int bit_index, field_length;
3450
3451 if (b1 == 1 && reg != 0)
3452 goto illegal_op;
0af10c86
BS
3453 field_length = cpu_ldub_code(env, s->pc++) & 0x3F;
3454 bit_index = cpu_ldub_code(env, s->pc++) & 0x3F;
d9f4bb27
AP
3455 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3456 offsetof(CPUX86State,xmm_regs[reg]));
3457 if (b1 == 1)
d3eb5eae
BS
3458 gen_helper_extrq_i(cpu_env, cpu_ptr0,
3459 tcg_const_i32(bit_index),
3460 tcg_const_i32(field_length));
d9f4bb27 3461 else
d3eb5eae
BS
3462 gen_helper_insertq_i(cpu_env, cpu_ptr0,
3463 tcg_const_i32(bit_index),
3464 tcg_const_i32(field_length));
d9f4bb27
AP
3465 }
3466 break;
664e0f19 3467 case 0x7e: /* movd ea, mm */
dabd98dd
FB
3468#ifdef TARGET_X86_64
3469 if (s->dflag == 2) {
5af45186
FB
3470 tcg_gen_ld_i64(cpu_T[0], cpu_env,
3471 offsetof(CPUX86State,fpregs[reg].mmx));
4ba9938c 3472 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1);
5fafdf24 3473 } else
dabd98dd
FB
3474#endif
3475 {
5af45186
FB
3476 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
3477 offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0)));
4ba9938c 3478 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1);
dabd98dd 3479 }
664e0f19
FB
3480 break;
3481 case 0x17e: /* movd ea, xmm */
dabd98dd
FB
3482#ifdef TARGET_X86_64
3483 if (s->dflag == 2) {
5af45186
FB
3484 tcg_gen_ld_i64(cpu_T[0], cpu_env,
3485 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
4ba9938c 3486 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1);
5fafdf24 3487 } else
dabd98dd
FB
3488#endif
3489 {
5af45186
FB
3490 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
3491 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
4ba9938c 3492 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1);
dabd98dd 3493 }
664e0f19
FB
3494 break;
3495 case 0x27e: /* movq xmm, ea */
3496 if (mod != 3) {
0af10c86 3497 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
323d1876
RH
3498 gen_ldq_env_A0(s, offsetof(CPUX86State,
3499 xmm_regs[reg].XMM_Q(0)));
664e0f19
FB
3500 } else {
3501 rm = (modrm & 7) | REX_B(s);
3502 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3503 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3504 }
3505 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3506 break;
3507 case 0x7f: /* movq ea, mm */
3508 if (mod != 3) {
0af10c86 3509 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
323d1876 3510 gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
664e0f19
FB
3511 } else {
3512 rm = (modrm & 7);
3513 gen_op_movq(offsetof(CPUX86State,fpregs[rm].mmx),
3514 offsetof(CPUX86State,fpregs[reg].mmx));
3515 }
3516 break;
3517 case 0x011: /* movups */
3518 case 0x111: /* movupd */
3519 case 0x029: /* movaps */
3520 case 0x129: /* movapd */
3521 case 0x17f: /* movdqa ea, xmm */
3522 case 0x27f: /* movdqu ea, xmm */
3523 if (mod != 3) {
0af10c86 3524 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
323d1876 3525 gen_sto_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
664e0f19
FB
3526 } else {
3527 rm = (modrm & 7) | REX_B(s);
3528 gen_op_movo(offsetof(CPUX86State,xmm_regs[rm]),
3529 offsetof(CPUX86State,xmm_regs[reg]));
3530 }
3531 break;
3532 case 0x211: /* movss ea, xmm */
3533 if (mod != 3) {
0af10c86 3534 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
651ba608 3535 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
4ba9938c 3536 gen_op_st_T0_A0(s, MO_32);
664e0f19
FB
3537 } else {
3538 rm = (modrm & 7) | REX_B(s);
3539 gen_op_movl(offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)),
3540 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3541 }
3542 break;
3543 case 0x311: /* movsd ea, xmm */
3544 if (mod != 3) {
0af10c86 3545 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
323d1876
RH
3546 gen_stq_env_A0(s, offsetof(CPUX86State,
3547 xmm_regs[reg].XMM_Q(0)));
664e0f19
FB
3548 } else {
3549 rm = (modrm & 7) | REX_B(s);
3550 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
3551 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3552 }
3553 break;
3554 case 0x013: /* movlps */
3555 case 0x113: /* movlpd */
3556 if (mod != 3) {
0af10c86 3557 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
323d1876
RH
3558 gen_stq_env_A0(s, offsetof(CPUX86State,
3559 xmm_regs[reg].XMM_Q(0)));
664e0f19
FB
3560 } else {
3561 goto illegal_op;
3562 }
3563 break;
3564 case 0x017: /* movhps */
3565 case 0x117: /* movhpd */
3566 if (mod != 3) {
0af10c86 3567 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
323d1876
RH
3568 gen_stq_env_A0(s, offsetof(CPUX86State,
3569 xmm_regs[reg].XMM_Q(1)));
664e0f19
FB
3570 } else {
3571 goto illegal_op;
3572 }
3573 break;
3574 case 0x71: /* shift mm, im */
3575 case 0x72:
3576 case 0x73:
3577 case 0x171: /* shift xmm, im */
3578 case 0x172:
3579 case 0x173:
c045af25
AK
3580 if (b1 >= 2) {
3581 goto illegal_op;
3582 }
0af10c86 3583 val = cpu_ldub_code(env, s->pc++);
664e0f19
FB
3584 if (is_xmm) {
3585 gen_op_movl_T0_im(val);
651ba608 3586 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
664e0f19 3587 gen_op_movl_T0_0();
651ba608 3588 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(1)));
664e0f19
FB
3589 op1_offset = offsetof(CPUX86State,xmm_t0);
3590 } else {
3591 gen_op_movl_T0_im(val);
651ba608 3592 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(0)));
664e0f19 3593 gen_op_movl_T0_0();
651ba608 3594 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(1)));
664e0f19
FB
3595 op1_offset = offsetof(CPUX86State,mmx_t0);
3596 }
d3eb5eae
BS
3597 sse_fn_epp = sse_op_table2[((b - 1) & 3) * 8 +
3598 (((modrm >> 3)) & 7)][b1];
3599 if (!sse_fn_epp) {
664e0f19 3600 goto illegal_op;
c4baa050 3601 }
664e0f19
FB
3602 if (is_xmm) {
3603 rm = (modrm & 7) | REX_B(s);
3604 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3605 } else {
3606 rm = (modrm & 7);
3607 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3608 }
5af45186
FB
3609 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3610 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op1_offset);
d3eb5eae 3611 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
3612 break;
3613 case 0x050: /* movmskps */
664e0f19 3614 rm = (modrm & 7) | REX_B(s);
5af45186
FB
3615 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3616 offsetof(CPUX86State,xmm_regs[rm]));
d3eb5eae 3617 gen_helper_movmskps(cpu_tmp2_i32, cpu_env, cpu_ptr0);
b6abf97d 3618 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
4ba9938c 3619 gen_op_mov_reg_T0(MO_32, reg);
664e0f19
FB
3620 break;
3621 case 0x150: /* movmskpd */
664e0f19 3622 rm = (modrm & 7) | REX_B(s);
5af45186
FB
3623 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3624 offsetof(CPUX86State,xmm_regs[rm]));
d3eb5eae 3625 gen_helper_movmskpd(cpu_tmp2_i32, cpu_env, cpu_ptr0);
b6abf97d 3626 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
4ba9938c 3627 gen_op_mov_reg_T0(MO_32, reg);
664e0f19
FB
3628 break;
3629 case 0x02a: /* cvtpi2ps */
3630 case 0x12a: /* cvtpi2pd */
d3eb5eae 3631 gen_helper_enter_mmx(cpu_env);
664e0f19 3632 if (mod != 3) {
0af10c86 3633 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
664e0f19 3634 op2_offset = offsetof(CPUX86State,mmx_t0);
323d1876 3635 gen_ldq_env_A0(s, op2_offset);
664e0f19
FB
3636 } else {
3637 rm = (modrm & 7);
3638 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3639 }
3640 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
5af45186
FB
3641 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3642 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
664e0f19
FB
3643 switch(b >> 8) {
3644 case 0x0:
d3eb5eae 3645 gen_helper_cvtpi2ps(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
3646 break;
3647 default:
3648 case 0x1:
d3eb5eae 3649 gen_helper_cvtpi2pd(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
3650 break;
3651 }
3652 break;
3653 case 0x22a: /* cvtsi2ss */
3654 case 0x32a: /* cvtsi2sd */
4ba9938c 3655 ot = (s->dflag == 2) ? MO_64 : MO_32;
0af10c86 3656 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
664e0f19 3657 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
5af45186 3658 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4ba9938c 3659 if (ot == MO_32) {
d3eb5eae 3660 SSEFunc_0_epi sse_fn_epi = sse_op_table3ai[(b >> 8) & 1];
28e10711 3661 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
d3eb5eae 3662 sse_fn_epi(cpu_env, cpu_ptr0, cpu_tmp2_i32);
28e10711 3663 } else {
11f8cdbc 3664#ifdef TARGET_X86_64
d3eb5eae
BS
3665 SSEFunc_0_epl sse_fn_epl = sse_op_table3aq[(b >> 8) & 1];
3666 sse_fn_epl(cpu_env, cpu_ptr0, cpu_T[0]);
11f8cdbc
SW
3667#else
3668 goto illegal_op;
3669#endif
28e10711 3670 }
664e0f19
FB
3671 break;
3672 case 0x02c: /* cvttps2pi */
3673 case 0x12c: /* cvttpd2pi */
3674 case 0x02d: /* cvtps2pi */
3675 case 0x12d: /* cvtpd2pi */
d3eb5eae 3676 gen_helper_enter_mmx(cpu_env);
664e0f19 3677 if (mod != 3) {
0af10c86 3678 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
664e0f19 3679 op2_offset = offsetof(CPUX86State,xmm_t0);
323d1876 3680 gen_ldo_env_A0(s, op2_offset);
664e0f19
FB
3681 } else {
3682 rm = (modrm & 7) | REX_B(s);
3683 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3684 }
3685 op1_offset = offsetof(CPUX86State,fpregs[reg & 7].mmx);
5af45186
FB
3686 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3687 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
664e0f19
FB
3688 switch(b) {
3689 case 0x02c:
d3eb5eae 3690 gen_helper_cvttps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
3691 break;
3692 case 0x12c:
d3eb5eae 3693 gen_helper_cvttpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
3694 break;
3695 case 0x02d:
d3eb5eae 3696 gen_helper_cvtps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
3697 break;
3698 case 0x12d:
d3eb5eae 3699 gen_helper_cvtpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
3700 break;
3701 }
3702 break;
3703 case 0x22c: /* cvttss2si */
3704 case 0x32c: /* cvttsd2si */
3705 case 0x22d: /* cvtss2si */
3706 case 0x32d: /* cvtsd2si */
4ba9938c 3707 ot = (s->dflag == 2) ? MO_64 : MO_32;
31313213 3708 if (mod != 3) {
0af10c86 3709 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
31313213 3710 if ((b >> 8) & 1) {
323d1876 3711 gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.XMM_Q(0)));
31313213 3712 } else {
4ba9938c 3713 gen_op_ld_T0_A0(s, MO_32);
651ba608 3714 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
31313213
FB
3715 }
3716 op2_offset = offsetof(CPUX86State,xmm_t0);
3717 } else {
3718 rm = (modrm & 7) | REX_B(s);
3719 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3720 }
5af45186 3721 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
4ba9938c 3722 if (ot == MO_32) {
d3eb5eae 3723 SSEFunc_i_ep sse_fn_i_ep =
bedc2ac1 3724 sse_op_table3bi[((b >> 7) & 2) | (b & 1)];
d3eb5eae 3725 sse_fn_i_ep(cpu_tmp2_i32, cpu_env, cpu_ptr0);
b6abf97d 3726 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5af45186 3727 } else {
11f8cdbc 3728#ifdef TARGET_X86_64
d3eb5eae 3729 SSEFunc_l_ep sse_fn_l_ep =
bedc2ac1 3730 sse_op_table3bq[((b >> 7) & 2) | (b & 1)];
d3eb5eae 3731 sse_fn_l_ep(cpu_T[0], cpu_env, cpu_ptr0);
11f8cdbc
SW
3732#else
3733 goto illegal_op;
3734#endif
5af45186 3735 }
57fec1fe 3736 gen_op_mov_reg_T0(ot, reg);
664e0f19
FB
3737 break;
3738 case 0xc4: /* pinsrw */
5fafdf24 3739 case 0x1c4:
d1e42c5c 3740 s->rip_offset = 1;
4ba9938c 3741 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
0af10c86 3742 val = cpu_ldub_code(env, s->pc++);
664e0f19
FB
3743 if (b1) {
3744 val &= 7;
5af45186
FB
3745 tcg_gen_st16_tl(cpu_T[0], cpu_env,
3746 offsetof(CPUX86State,xmm_regs[reg].XMM_W(val)));
664e0f19
FB
3747 } else {
3748 val &= 3;
5af45186
FB
3749 tcg_gen_st16_tl(cpu_T[0], cpu_env,
3750 offsetof(CPUX86State,fpregs[reg].mmx.MMX_W(val)));
664e0f19
FB
3751 }
3752 break;
3753 case 0xc5: /* pextrw */
5fafdf24 3754 case 0x1c5:
664e0f19
FB
3755 if (mod != 3)
3756 goto illegal_op;
4ba9938c 3757 ot = (s->dflag == 2) ? MO_64 : MO_32;
0af10c86 3758 val = cpu_ldub_code(env, s->pc++);
664e0f19
FB
3759 if (b1) {
3760 val &= 7;
3761 rm = (modrm & 7) | REX_B(s);
5af45186
FB
3762 tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3763 offsetof(CPUX86State,xmm_regs[rm].XMM_W(val)));
664e0f19
FB
3764 } else {
3765 val &= 3;
3766 rm = (modrm & 7);
5af45186
FB
3767 tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3768 offsetof(CPUX86State,fpregs[rm].mmx.MMX_W(val)));
664e0f19
FB
3769 }
3770 reg = ((modrm >> 3) & 7) | rex_r;
6dc2d0da 3771 gen_op_mov_reg_T0(ot, reg);
664e0f19
FB
3772 break;
3773 case 0x1d6: /* movq ea, xmm */
3774 if (mod != 3) {
0af10c86 3775 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
323d1876
RH
3776 gen_stq_env_A0(s, offsetof(CPUX86State,
3777 xmm_regs[reg].XMM_Q(0)));
664e0f19
FB
3778 } else {
3779 rm = (modrm & 7) | REX_B(s);
3780 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
3781 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3782 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
3783 }
3784 break;
3785 case 0x2d6: /* movq2dq */
d3eb5eae 3786 gen_helper_enter_mmx(cpu_env);
480c1cdb
FB
3787 rm = (modrm & 7);
3788 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3789 offsetof(CPUX86State,fpregs[rm].mmx));
3790 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
664e0f19
FB
3791 break;
3792 case 0x3d6: /* movdq2q */
d3eb5eae 3793 gen_helper_enter_mmx(cpu_env);
480c1cdb
FB
3794 rm = (modrm & 7) | REX_B(s);
3795 gen_op_movq(offsetof(CPUX86State,fpregs[reg & 7].mmx),
3796 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
664e0f19
FB
3797 break;
3798 case 0xd7: /* pmovmskb */
3799 case 0x1d7:
3800 if (mod != 3)
3801 goto illegal_op;
3802 if (b1) {
3803 rm = (modrm & 7) | REX_B(s);
5af45186 3804 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[rm]));
d3eb5eae 3805 gen_helper_pmovmskb_xmm(cpu_tmp2_i32, cpu_env, cpu_ptr0);
664e0f19
FB
3806 } else {
3807 rm = (modrm & 7);
5af45186 3808 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,fpregs[rm].mmx));
d3eb5eae 3809 gen_helper_pmovmskb_mmx(cpu_tmp2_i32, cpu_env, cpu_ptr0);
664e0f19 3810 }
b6abf97d 3811 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
664e0f19 3812 reg = ((modrm >> 3) & 7) | rex_r;
4ba9938c 3813 gen_op_mov_reg_T0(MO_32, reg);
664e0f19 3814 break;
111994ee 3815
4242b1bd 3816 case 0x138:
000cacf6 3817 case 0x038:
4242b1bd 3818 b = modrm;
111994ee
RH
3819 if ((b & 0xf0) == 0xf0) {
3820 goto do_0f_38_fx;
3821 }
0af10c86 3822 modrm = cpu_ldub_code(env, s->pc++);
4242b1bd
AZ
3823 rm = modrm & 7;
3824 reg = ((modrm >> 3) & 7) | rex_r;
3825 mod = (modrm >> 6) & 3;
c045af25
AK
3826 if (b1 >= 2) {
3827 goto illegal_op;
3828 }
4242b1bd 3829
d3eb5eae
BS
3830 sse_fn_epp = sse_op_table6[b].op[b1];
3831 if (!sse_fn_epp) {
4242b1bd 3832 goto illegal_op;
c4baa050 3833 }
222a3336
AZ
3834 if (!(s->cpuid_ext_features & sse_op_table6[b].ext_mask))
3835 goto illegal_op;
4242b1bd
AZ
3836
3837 if (b1) {
3838 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3839 if (mod == 3) {
3840 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
3841 } else {
3842 op2_offset = offsetof(CPUX86State,xmm_t0);
0af10c86 3843 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
222a3336
AZ
3844 switch (b) {
3845 case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
3846 case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
3847 case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
323d1876 3848 gen_ldq_env_A0(s, op2_offset +
222a3336
AZ
3849 offsetof(XMMReg, XMM_Q(0)));
3850 break;
3851 case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
3852 case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
3c5f4116
RH
3853 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
3854 s->mem_index, MO_LEUL);
222a3336
AZ
3855 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, op2_offset +
3856 offsetof(XMMReg, XMM_L(0)));
3857 break;
3858 case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
3c5f4116
RH
3859 tcg_gen_qemu_ld_tl(cpu_tmp0, cpu_A0,
3860 s->mem_index, MO_LEUW);
222a3336
AZ
3861 tcg_gen_st16_tl(cpu_tmp0, cpu_env, op2_offset +
3862 offsetof(XMMReg, XMM_W(0)));
3863 break;
3864 case 0x2a: /* movntqda */
323d1876 3865 gen_ldo_env_A0(s, op1_offset);
222a3336
AZ
3866 return;
3867 default:
323d1876 3868 gen_ldo_env_A0(s, op2_offset);
222a3336 3869 }
4242b1bd
AZ
3870 }
3871 } else {
3872 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
3873 if (mod == 3) {
3874 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3875 } else {
3876 op2_offset = offsetof(CPUX86State,mmx_t0);
0af10c86 3877 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
323d1876 3878 gen_ldq_env_A0(s, op2_offset);
4242b1bd
AZ
3879 }
3880 }
d3eb5eae 3881 if (sse_fn_epp == SSE_SPECIAL) {
222a3336 3882 goto illegal_op;
c4baa050 3883 }
222a3336 3884
4242b1bd
AZ
3885 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3886 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
d3eb5eae 3887 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
222a3336 3888
3ca51d07
RH
3889 if (b == 0x17) {
3890 set_cc_op(s, CC_OP_EFLAGS);
3891 }
4242b1bd 3892 break;
111994ee
RH
3893
3894 case 0x238:
3895 case 0x338:
3896 do_0f_38_fx:
3897 /* Various integer extensions at 0f 38 f[0-f]. */
3898 b = modrm | (b1 << 8);
0af10c86 3899 modrm = cpu_ldub_code(env, s->pc++);
222a3336
AZ
3900 reg = ((modrm >> 3) & 7) | rex_r;
3901
111994ee
RH
3902 switch (b) {
3903 case 0x3f0: /* crc32 Gd,Eb */
3904 case 0x3f1: /* crc32 Gd,Ey */
3905 do_crc32:
3906 if (!(s->cpuid_ext_features & CPUID_EXT_SSE42)) {
3907 goto illegal_op;
3908 }
3909 if ((b & 0xff) == 0xf0) {
4ba9938c 3910 ot = MO_8;
111994ee 3911 } else if (s->dflag != 2) {
4ba9938c 3912 ot = (s->prefix & PREFIX_DATA ? MO_16 : MO_32);
111994ee 3913 } else {
4ba9938c 3914 ot = MO_64;
111994ee 3915 }
4242b1bd 3916
4ba9938c 3917 gen_op_mov_TN_reg(MO_32, 0, reg);
111994ee
RH
3918 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3919 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3920 gen_helper_crc32(cpu_T[0], cpu_tmp2_i32,
3921 cpu_T[0], tcg_const_i32(8 << ot));
222a3336 3922
4ba9938c 3923 ot = (s->dflag == 2) ? MO_64 : MO_32;
111994ee
RH
3924 gen_op_mov_reg_T0(ot, reg);
3925 break;
222a3336 3926
111994ee
RH
3927 case 0x1f0: /* crc32 or movbe */
3928 case 0x1f1:
3929 /* For these insns, the f3 prefix is supposed to have priority
3930 over the 66 prefix, but that's not what we implement above
3931 setting b1. */
3932 if (s->prefix & PREFIX_REPNZ) {
3933 goto do_crc32;
3934 }
3935 /* FALLTHRU */
3936 case 0x0f0: /* movbe Gy,My */
3937 case 0x0f1: /* movbe My,Gy */
3938 if (!(s->cpuid_ext_features & CPUID_EXT_MOVBE)) {
3939 goto illegal_op;
3940 }
3941 if (s->dflag != 2) {
4ba9938c 3942 ot = (s->prefix & PREFIX_DATA ? MO_16 : MO_32);
111994ee 3943 } else {
4ba9938c 3944 ot = MO_64;
111994ee
RH
3945 }
3946
3947 /* Load the data incoming to the bswap. Note that the TCG
3948 implementation of bswap requires the input be zero
3949 extended. In the case of the loads, we simply know that
3950 gen_op_ld_v via gen_ldst_modrm does that already. */
3951 if ((b & 1) == 0) {
3952 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3953 } else {
3954 switch (ot) {
4ba9938c 3955 case MO_16:
111994ee
RH
3956 tcg_gen_ext16u_tl(cpu_T[0], cpu_regs[reg]);
3957 break;
3958 default:
3959 tcg_gen_ext32u_tl(cpu_T[0], cpu_regs[reg]);
3960 break;
4ba9938c 3961 case MO_64:
111994ee
RH
3962 tcg_gen_mov_tl(cpu_T[0], cpu_regs[reg]);
3963 break;
3964 }
3965 }
3966
3967 switch (ot) {
4ba9938c 3968 case MO_16:
111994ee
RH
3969 tcg_gen_bswap16_tl(cpu_T[0], cpu_T[0]);
3970 break;
3971 default:
3972 tcg_gen_bswap32_tl(cpu_T[0], cpu_T[0]);
3973 break;
3974#ifdef TARGET_X86_64
4ba9938c 3975 case MO_64:
111994ee
RH
3976 tcg_gen_bswap64_tl(cpu_T[0], cpu_T[0]);
3977 break;
3978#endif
3979 }
3980
3981 if ((b & 1) == 0) {
3982 gen_op_mov_reg_T0(ot, reg);
3983 } else {
3984 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
3985 }
3986 break;
3987
7073fbad
RH
3988 case 0x0f2: /* andn Gy, By, Ey */
3989 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
3990 || !(s->prefix & PREFIX_VEX)
3991 || s->vex_l != 0) {
3992 goto illegal_op;
3993 }
4ba9938c 3994 ot = s->dflag == 2 ? MO_64 : MO_32;
7073fbad
RH
3995 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3996 tcg_gen_andc_tl(cpu_T[0], cpu_regs[s->vex_v], cpu_T[0]);
3997 gen_op_mov_reg_T0(ot, reg);
3998 gen_op_update1_cc();
3999 set_cc_op(s, CC_OP_LOGICB + ot);
4000 break;
4001
c7ab7565
RH
4002 case 0x0f7: /* bextr Gy, Ey, By */
4003 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
4004 || !(s->prefix & PREFIX_VEX)
4005 || s->vex_l != 0) {
4006 goto illegal_op;
4007 }
4ba9938c 4008 ot = s->dflag == 2 ? MO_64 : MO_32;
c7ab7565
RH
4009 {
4010 TCGv bound, zero;
4011
4012 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4013 /* Extract START, and shift the operand.
4014 Shifts larger than operand size get zeros. */
4015 tcg_gen_ext8u_tl(cpu_A0, cpu_regs[s->vex_v]);
4016 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_A0);
4017
4ba9938c 4018 bound = tcg_const_tl(ot == MO_64 ? 63 : 31);
c7ab7565
RH
4019 zero = tcg_const_tl(0);
4020 tcg_gen_movcond_tl(TCG_COND_LEU, cpu_T[0], cpu_A0, bound,
4021 cpu_T[0], zero);
4022 tcg_temp_free(zero);
4023
4024 /* Extract the LEN into a mask. Lengths larger than
4025 operand size get all ones. */
4026 tcg_gen_shri_tl(cpu_A0, cpu_regs[s->vex_v], 8);
4027 tcg_gen_ext8u_tl(cpu_A0, cpu_A0);
4028 tcg_gen_movcond_tl(TCG_COND_LEU, cpu_A0, cpu_A0, bound,
4029 cpu_A0, bound);
4030 tcg_temp_free(bound);
4031 tcg_gen_movi_tl(cpu_T[1], 1);
4032 tcg_gen_shl_tl(cpu_T[1], cpu_T[1], cpu_A0);
4033 tcg_gen_subi_tl(cpu_T[1], cpu_T[1], 1);
4034 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4035
4036 gen_op_mov_reg_T0(ot, reg);
4037 gen_op_update1_cc();
4038 set_cc_op(s, CC_OP_LOGICB + ot);
4039 }
4040 break;
4041
02ea1e6b
RH
4042 case 0x0f5: /* bzhi Gy, Ey, By */
4043 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
4044 || !(s->prefix & PREFIX_VEX)
4045 || s->vex_l != 0) {
4046 goto illegal_op;
4047 }
4ba9938c 4048 ot = s->dflag == 2 ? MO_64 : MO_32;
02ea1e6b
RH
4049 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4050 tcg_gen_ext8u_tl(cpu_T[1], cpu_regs[s->vex_v]);
4051 {
4ba9938c 4052 TCGv bound = tcg_const_tl(ot == MO_64 ? 63 : 31);
02ea1e6b
RH
4053 /* Note that since we're using BMILG (in order to get O
4054 cleared) we need to store the inverse into C. */
4055 tcg_gen_setcond_tl(TCG_COND_LT, cpu_cc_src,
4056 cpu_T[1], bound);
4057 tcg_gen_movcond_tl(TCG_COND_GT, cpu_T[1], cpu_T[1],
4058 bound, bound, cpu_T[1]);
4059 tcg_temp_free(bound);
4060 }
4061 tcg_gen_movi_tl(cpu_A0, -1);
4062 tcg_gen_shl_tl(cpu_A0, cpu_A0, cpu_T[1]);
4063 tcg_gen_andc_tl(cpu_T[0], cpu_T[0], cpu_A0);
4064 gen_op_mov_reg_T0(ot, reg);
4065 gen_op_update1_cc();
4066 set_cc_op(s, CC_OP_BMILGB + ot);
4067 break;
4068
5f1f4b17
RH
4069 case 0x3f6: /* mulx By, Gy, rdx, Ey */
4070 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
4071 || !(s->prefix & PREFIX_VEX)
4072 || s->vex_l != 0) {
4073 goto illegal_op;
4074 }
4ba9938c 4075 ot = s->dflag == 2 ? MO_64 : MO_32;
5f1f4b17
RH
4076 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4077 switch (ot) {
5f1f4b17 4078 default:
a4bcea3d
RH
4079 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4080 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EDX]);
4081 tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
4082 cpu_tmp2_i32, cpu_tmp3_i32);
4083 tcg_gen_extu_i32_tl(cpu_regs[s->vex_v], cpu_tmp2_i32);
4084 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp3_i32);
5f1f4b17
RH
4085 break;
4086#ifdef TARGET_X86_64
4ba9938c 4087 case MO_64:
a4bcea3d
RH
4088 tcg_gen_mulu2_i64(cpu_regs[s->vex_v], cpu_regs[reg],
4089 cpu_T[0], cpu_regs[R_EDX]);
5f1f4b17
RH
4090 break;
4091#endif
4092 }
4093 break;
4094
0592f74a
RH
4095 case 0x3f5: /* pdep Gy, By, Ey */
4096 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
4097 || !(s->prefix & PREFIX_VEX)
4098 || s->vex_l != 0) {
4099 goto illegal_op;
4100 }
4ba9938c 4101 ot = s->dflag == 2 ? MO_64 : MO_32;
0592f74a
RH
4102 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4103 /* Note that by zero-extending the mask operand, we
4104 automatically handle zero-extending the result. */
4105 if (s->dflag == 2) {
4106 tcg_gen_mov_tl(cpu_T[1], cpu_regs[s->vex_v]);
4107 } else {
4108 tcg_gen_ext32u_tl(cpu_T[1], cpu_regs[s->vex_v]);
4109 }
4110 gen_helper_pdep(cpu_regs[reg], cpu_T[0], cpu_T[1]);
4111 break;
4112
4113 case 0x2f5: /* pext Gy, By, Ey */
4114 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
4115 || !(s->prefix & PREFIX_VEX)
4116 || s->vex_l != 0) {
4117 goto illegal_op;
4118 }
4ba9938c 4119 ot = s->dflag == 2 ? MO_64 : MO_32;
0592f74a
RH
4120 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4121 /* Note that by zero-extending the mask operand, we
4122 automatically handle zero-extending the result. */
4123 if (s->dflag == 2) {
4124 tcg_gen_mov_tl(cpu_T[1], cpu_regs[s->vex_v]);
4125 } else {
4126 tcg_gen_ext32u_tl(cpu_T[1], cpu_regs[s->vex_v]);
4127 }
4128 gen_helper_pext(cpu_regs[reg], cpu_T[0], cpu_T[1]);
4129 break;
4130
cd7f97ca
RH
4131 case 0x1f6: /* adcx Gy, Ey */
4132 case 0x2f6: /* adox Gy, Ey */
4133 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_ADX)) {
4134 goto illegal_op;
4135 } else {
76f13133 4136 TCGv carry_in, carry_out, zero;
cd7f97ca
RH
4137 int end_op;
4138
4ba9938c 4139 ot = (s->dflag == 2 ? MO_64 : MO_32);
cd7f97ca
RH
4140 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4141
4142 /* Re-use the carry-out from a previous round. */
4143 TCGV_UNUSED(carry_in);
4144 carry_out = (b == 0x1f6 ? cpu_cc_dst : cpu_cc_src2);
4145 switch (s->cc_op) {
4146 case CC_OP_ADCX:
4147 if (b == 0x1f6) {
4148 carry_in = cpu_cc_dst;
4149 end_op = CC_OP_ADCX;
4150 } else {
4151 end_op = CC_OP_ADCOX;
4152 }
4153 break;
4154 case CC_OP_ADOX:
4155 if (b == 0x1f6) {
4156 end_op = CC_OP_ADCOX;
4157 } else {
4158 carry_in = cpu_cc_src2;
4159 end_op = CC_OP_ADOX;
4160 }
4161 break;
4162 case CC_OP_ADCOX:
4163 end_op = CC_OP_ADCOX;
4164 carry_in = carry_out;
4165 break;
4166 default:
c53de1a2 4167 end_op = (b == 0x1f6 ? CC_OP_ADCX : CC_OP_ADOX);
cd7f97ca
RH
4168 break;
4169 }
4170 /* If we can't reuse carry-out, get it out of EFLAGS. */
4171 if (TCGV_IS_UNUSED(carry_in)) {
4172 if (s->cc_op != CC_OP_ADCX && s->cc_op != CC_OP_ADOX) {
4173 gen_compute_eflags(s);
4174 }
4175 carry_in = cpu_tmp0;
4176 tcg_gen_shri_tl(carry_in, cpu_cc_src,
4177 ctz32(b == 0x1f6 ? CC_C : CC_O));
4178 tcg_gen_andi_tl(carry_in, carry_in, 1);
4179 }
4180
4181 switch (ot) {
4182#ifdef TARGET_X86_64
4ba9938c 4183 case MO_32:
cd7f97ca
RH
4184 /* If we know TL is 64-bit, and we want a 32-bit
4185 result, just do everything in 64-bit arithmetic. */
4186 tcg_gen_ext32u_i64(cpu_regs[reg], cpu_regs[reg]);
4187 tcg_gen_ext32u_i64(cpu_T[0], cpu_T[0]);
4188 tcg_gen_add_i64(cpu_T[0], cpu_T[0], cpu_regs[reg]);
4189 tcg_gen_add_i64(cpu_T[0], cpu_T[0], carry_in);
4190 tcg_gen_ext32u_i64(cpu_regs[reg], cpu_T[0]);
4191 tcg_gen_shri_i64(carry_out, cpu_T[0], 32);
4192 break;
4193#endif
4194 default:
4195 /* Otherwise compute the carry-out in two steps. */
76f13133
RH
4196 zero = tcg_const_tl(0);
4197 tcg_gen_add2_tl(cpu_T[0], carry_out,
4198 cpu_T[0], zero,
4199 carry_in, zero);
4200 tcg_gen_add2_tl(cpu_regs[reg], carry_out,
4201 cpu_regs[reg], carry_out,
4202 cpu_T[0], zero);
4203 tcg_temp_free(zero);
cd7f97ca
RH
4204 break;
4205 }
cd7f97ca
RH
4206 set_cc_op(s, end_op);
4207 }
4208 break;
4209
4a554890
RH
4210 case 0x1f7: /* shlx Gy, Ey, By */
4211 case 0x2f7: /* sarx Gy, Ey, By */
4212 case 0x3f7: /* shrx Gy, Ey, By */
4213 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
4214 || !(s->prefix & PREFIX_VEX)
4215 || s->vex_l != 0) {
4216 goto illegal_op;
4217 }
4ba9938c 4218 ot = (s->dflag == 2 ? MO_64 : MO_32);
4a554890 4219 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4ba9938c 4220 if (ot == MO_64) {
4a554890
RH
4221 tcg_gen_andi_tl(cpu_T[1], cpu_regs[s->vex_v], 63);
4222 } else {
4223 tcg_gen_andi_tl(cpu_T[1], cpu_regs[s->vex_v], 31);
4224 }
4225 if (b == 0x1f7) {
4226 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4227 } else if (b == 0x2f7) {
4ba9938c 4228 if (ot != MO_64) {
4a554890
RH
4229 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4230 }
4231 tcg_gen_sar_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4232 } else {
4ba9938c 4233 if (ot != MO_64) {
4a554890
RH
4234 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
4235 }
4236 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4237 }
4238 gen_op_mov_reg_T0(ot, reg);
4239 break;
4240
bc4b43dc
RH
4241 case 0x0f3:
4242 case 0x1f3:
4243 case 0x2f3:
4244 case 0x3f3: /* Group 17 */
4245 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
4246 || !(s->prefix & PREFIX_VEX)
4247 || s->vex_l != 0) {
4248 goto illegal_op;
4249 }
4ba9938c 4250 ot = s->dflag == 2 ? MO_64 : MO_32;
bc4b43dc
RH
4251 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4252
4253 switch (reg & 7) {
4254 case 1: /* blsr By,Ey */
4255 tcg_gen_neg_tl(cpu_T[1], cpu_T[0]);
4256 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4257 gen_op_mov_reg_T0(ot, s->vex_v);
4258 gen_op_update2_cc();
4259 set_cc_op(s, CC_OP_BMILGB + ot);
4260 break;
4261
4262 case 2: /* blsmsk By,Ey */
4263 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4264 tcg_gen_subi_tl(cpu_T[0], cpu_T[0], 1);
4265 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_cc_src);
4266 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4267 set_cc_op(s, CC_OP_BMILGB + ot);
4268 break;
4269
4270 case 3: /* blsi By, Ey */
4271 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4272 tcg_gen_subi_tl(cpu_T[0], cpu_T[0], 1);
4273 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_cc_src);
4274 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4275 set_cc_op(s, CC_OP_BMILGB + ot);
4276 break;
4277
4278 default:
4279 goto illegal_op;
4280 }
4281 break;
4282
111994ee
RH
4283 default:
4284 goto illegal_op;
4285 }
222a3336 4286 break;
111994ee 4287
222a3336
AZ
4288 case 0x03a:
4289 case 0x13a:
4242b1bd 4290 b = modrm;
0af10c86 4291 modrm = cpu_ldub_code(env, s->pc++);
4242b1bd
AZ
4292 rm = modrm & 7;
4293 reg = ((modrm >> 3) & 7) | rex_r;
4294 mod = (modrm >> 6) & 3;
c045af25
AK
4295 if (b1 >= 2) {
4296 goto illegal_op;
4297 }
4242b1bd 4298
d3eb5eae
BS
4299 sse_fn_eppi = sse_op_table7[b].op[b1];
4300 if (!sse_fn_eppi) {
4242b1bd 4301 goto illegal_op;
c4baa050 4302 }
222a3336
AZ
4303 if (!(s->cpuid_ext_features & sse_op_table7[b].ext_mask))
4304 goto illegal_op;
4305
d3eb5eae 4306 if (sse_fn_eppi == SSE_SPECIAL) {
4ba9938c 4307 ot = (s->dflag == 2) ? MO_64 : MO_32;
222a3336
AZ
4308 rm = (modrm & 7) | REX_B(s);
4309 if (mod != 3)
0af10c86 4310 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
222a3336 4311 reg = ((modrm >> 3) & 7) | rex_r;
0af10c86 4312 val = cpu_ldub_code(env, s->pc++);
222a3336
AZ
4313 switch (b) {
4314 case 0x14: /* pextrb */
4315 tcg_gen_ld8u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
4316 xmm_regs[reg].XMM_B(val & 15)));
3523e4bd 4317 if (mod == 3) {
222a3336 4318 gen_op_mov_reg_T0(ot, rm);
3523e4bd
RH
4319 } else {
4320 tcg_gen_qemu_st_tl(cpu_T[0], cpu_A0,
4321 s->mem_index, MO_UB);
4322 }
222a3336
AZ
4323 break;
4324 case 0x15: /* pextrw */
4325 tcg_gen_ld16u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
4326 xmm_regs[reg].XMM_W(val & 7)));
3523e4bd 4327 if (mod == 3) {
222a3336 4328 gen_op_mov_reg_T0(ot, rm);
3523e4bd
RH
4329 } else {
4330 tcg_gen_qemu_st_tl(cpu_T[0], cpu_A0,
4331 s->mem_index, MO_LEUW);
4332 }
222a3336
AZ
4333 break;
4334 case 0x16:
4ba9938c 4335 if (ot == MO_32) { /* pextrd */
222a3336
AZ
4336 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
4337 offsetof(CPUX86State,
4338 xmm_regs[reg].XMM_L(val & 3)));
a7812ae4 4339 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3523e4bd 4340 if (mod == 3) {
a7812ae4 4341 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
3523e4bd
RH
4342 } else {
4343 tcg_gen_qemu_st_tl(cpu_T[0], cpu_A0,
4344 s->mem_index, MO_LEUL);
4345 }
222a3336 4346 } else { /* pextrq */
a7812ae4 4347#ifdef TARGET_X86_64
222a3336
AZ
4348 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
4349 offsetof(CPUX86State,
4350 xmm_regs[reg].XMM_Q(val & 1)));
3523e4bd 4351 if (mod == 3) {
222a3336 4352 gen_op_mov_reg_v(ot, rm, cpu_tmp1_i64);
3523e4bd
RH
4353 } else {
4354 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
4355 s->mem_index, MO_LEQ);
4356 }
a7812ae4
PB
4357#else
4358 goto illegal_op;
4359#endif
222a3336
AZ
4360 }
4361 break;
4362 case 0x17: /* extractps */
4363 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
4364 xmm_regs[reg].XMM_L(val & 3)));
3523e4bd 4365 if (mod == 3) {
222a3336 4366 gen_op_mov_reg_T0(ot, rm);
3523e4bd
RH
4367 } else {
4368 tcg_gen_qemu_st_tl(cpu_T[0], cpu_A0,
4369 s->mem_index, MO_LEUL);
4370 }
222a3336
AZ
4371 break;
4372 case 0x20: /* pinsrb */
3c5f4116 4373 if (mod == 3) {
4ba9938c 4374 gen_op_mov_TN_reg(MO_32, 0, rm);
3c5f4116
RH
4375 } else {
4376 tcg_gen_qemu_ld_tl(cpu_T[0], cpu_A0,
4377 s->mem_index, MO_UB);
4378 }
34c6addd 4379 tcg_gen_st8_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
222a3336
AZ
4380 xmm_regs[reg].XMM_B(val & 15)));
4381 break;
4382 case 0x21: /* insertps */
a7812ae4 4383 if (mod == 3) {
222a3336
AZ
4384 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
4385 offsetof(CPUX86State,xmm_regs[rm]
4386 .XMM_L((val >> 6) & 3)));
a7812ae4 4387 } else {
3c5f4116
RH
4388 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
4389 s->mem_index, MO_LEUL);
a7812ae4 4390 }
222a3336
AZ
4391 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
4392 offsetof(CPUX86State,xmm_regs[reg]
4393 .XMM_L((val >> 4) & 3)));
4394 if ((val >> 0) & 1)
4395 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4396 cpu_env, offsetof(CPUX86State,
4397 xmm_regs[reg].XMM_L(0)));
4398 if ((val >> 1) & 1)
4399 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4400 cpu_env, offsetof(CPUX86State,
4401 xmm_regs[reg].XMM_L(1)));
4402 if ((val >> 2) & 1)
4403 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4404 cpu_env, offsetof(CPUX86State,
4405 xmm_regs[reg].XMM_L(2)));
4406 if ((val >> 3) & 1)
4407 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4408 cpu_env, offsetof(CPUX86State,
4409 xmm_regs[reg].XMM_L(3)));
4410 break;
4411 case 0x22:
4ba9938c 4412 if (ot == MO_32) { /* pinsrd */
3c5f4116 4413 if (mod == 3) {
a7812ae4 4414 gen_op_mov_v_reg(ot, cpu_tmp0, rm);
3c5f4116
RH
4415 } else {
4416 tcg_gen_qemu_ld_tl(cpu_tmp0, cpu_A0,
4417 s->mem_index, MO_LEUL);
4418 }
a7812ae4 4419 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0);
222a3336
AZ
4420 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
4421 offsetof(CPUX86State,
4422 xmm_regs[reg].XMM_L(val & 3)));
4423 } else { /* pinsrq */
a7812ae4 4424#ifdef TARGET_X86_64
3c5f4116 4425 if (mod == 3) {
222a3336 4426 gen_op_mov_v_reg(ot, cpu_tmp1_i64, rm);
3c5f4116
RH
4427 } else {
4428 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
4429 s->mem_index, MO_LEQ);
4430 }
222a3336
AZ
4431 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
4432 offsetof(CPUX86State,
4433 xmm_regs[reg].XMM_Q(val & 1)));
a7812ae4
PB
4434#else
4435 goto illegal_op;
4436#endif
222a3336
AZ
4437 }
4438 break;
4439 }
4440 return;
4441 }
4242b1bd
AZ
4442
4443 if (b1) {
4444 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
4445 if (mod == 3) {
4446 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
4447 } else {
4448 op2_offset = offsetof(CPUX86State,xmm_t0);
0af10c86 4449 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
323d1876 4450 gen_ldo_env_A0(s, op2_offset);
4242b1bd
AZ
4451 }
4452 } else {
4453 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
4454 if (mod == 3) {
4455 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
4456 } else {
4457 op2_offset = offsetof(CPUX86State,mmx_t0);
0af10c86 4458 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
323d1876 4459 gen_ldq_env_A0(s, op2_offset);
4242b1bd
AZ
4460 }
4461 }
0af10c86 4462 val = cpu_ldub_code(env, s->pc++);
4242b1bd 4463
222a3336 4464 if ((b & 0xfc) == 0x60) { /* pcmpXstrX */
3ca51d07 4465 set_cc_op(s, CC_OP_EFLAGS);
222a3336
AZ
4466
4467 if (s->dflag == 2)
4468 /* The helper must use entire 64-bit gp registers */
4469 val |= 1 << 8;
4470 }
4471
4242b1bd
AZ
4472 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4473 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
d3eb5eae 4474 sse_fn_eppi(cpu_env, cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
4242b1bd 4475 break;
e2c3c2c5
RH
4476
4477 case 0x33a:
4478 /* Various integer extensions at 0f 3a f[0-f]. */
4479 b = modrm | (b1 << 8);
4480 modrm = cpu_ldub_code(env, s->pc++);
4481 reg = ((modrm >> 3) & 7) | rex_r;
4482
4483 switch (b) {
4484 case 0x3f0: /* rorx Gy,Ey, Ib */
4485 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
4486 || !(s->prefix & PREFIX_VEX)
4487 || s->vex_l != 0) {
4488 goto illegal_op;
4489 }
4ba9938c 4490 ot = s->dflag == 2 ? MO_64 : MO_32;
e2c3c2c5
RH
4491 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4492 b = cpu_ldub_code(env, s->pc++);
4ba9938c 4493 if (ot == MO_64) {
e2c3c2c5
RH
4494 tcg_gen_rotri_tl(cpu_T[0], cpu_T[0], b & 63);
4495 } else {
4496 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4497 tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, b & 31);
4498 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
4499 }
4500 gen_op_mov_reg_T0(ot, reg);
4501 break;
4502
4503 default:
4504 goto illegal_op;
4505 }
4506 break;
4507
664e0f19
FB
4508 default:
4509 goto illegal_op;
4510 }
4511 } else {
4512 /* generic MMX or SSE operation */
d1e42c5c 4513 switch(b) {
d1e42c5c
FB
4514 case 0x70: /* pshufx insn */
4515 case 0xc6: /* pshufx insn */
4516 case 0xc2: /* compare insns */
4517 s->rip_offset = 1;
4518 break;
4519 default:
4520 break;
664e0f19
FB
4521 }
4522 if (is_xmm) {
4523 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
4524 if (mod != 3) {
0af10c86 4525 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
664e0f19 4526 op2_offset = offsetof(CPUX86State,xmm_t0);
480c1cdb 4527 if (b1 >= 2 && ((b >= 0x50 && b <= 0x5f && b != 0x5b) ||
664e0f19
FB
4528 b == 0xc2)) {
4529 /* specific case for SSE single instructions */
4530 if (b1 == 2) {
4531 /* 32 bit access */
4ba9938c 4532 gen_op_ld_T0_A0(s, MO_32);
651ba608 4533 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
664e0f19
FB
4534 } else {
4535 /* 64 bit access */
323d1876
RH
4536 gen_ldq_env_A0(s, offsetof(CPUX86State,
4537 xmm_t0.XMM_D(0)));
664e0f19
FB
4538 }
4539 } else {
323d1876 4540 gen_ldo_env_A0(s, op2_offset);
664e0f19
FB
4541 }
4542 } else {
4543 rm = (modrm & 7) | REX_B(s);
4544 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
4545 }
4546 } else {
4547 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
4548 if (mod != 3) {
0af10c86 4549 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
664e0f19 4550 op2_offset = offsetof(CPUX86State,mmx_t0);
323d1876 4551 gen_ldq_env_A0(s, op2_offset);
664e0f19
FB
4552 } else {
4553 rm = (modrm & 7);
4554 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
4555 }
4556 }
4557 switch(b) {
a35f3ec7 4558 case 0x0f: /* 3DNow! data insns */
e771edab
AJ
4559 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
4560 goto illegal_op;
0af10c86 4561 val = cpu_ldub_code(env, s->pc++);
d3eb5eae
BS
4562 sse_fn_epp = sse_op_table5[val];
4563 if (!sse_fn_epp) {
a35f3ec7 4564 goto illegal_op;
c4baa050 4565 }
5af45186
FB
4566 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4567 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
d3eb5eae 4568 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
a35f3ec7 4569 break;
664e0f19
FB
4570 case 0x70: /* pshufx insn */
4571 case 0xc6: /* pshufx insn */
0af10c86 4572 val = cpu_ldub_code(env, s->pc++);
5af45186
FB
4573 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4574 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
c4baa050 4575 /* XXX: introduce a new table? */
d3eb5eae 4576 sse_fn_ppi = (SSEFunc_0_ppi)sse_fn_epp;
c4baa050 4577 sse_fn_ppi(cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
664e0f19
FB
4578 break;
4579 case 0xc2:
4580 /* compare insns */
0af10c86 4581 val = cpu_ldub_code(env, s->pc++);
664e0f19
FB
4582 if (val >= 8)
4583 goto illegal_op;
d3eb5eae 4584 sse_fn_epp = sse_op_table4[val][b1];
c4baa050 4585
5af45186
FB
4586 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4587 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
d3eb5eae 4588 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19 4589 break;
b8b6a50b
FB
4590 case 0xf7:
4591 /* maskmov : we must prepare A0 */
4592 if (mod != 3)
4593 goto illegal_op;
4594#ifdef TARGET_X86_64
4595 if (s->aflag == 2) {
4596 gen_op_movq_A0_reg(R_EDI);
4597 } else
4598#endif
4599 {
4600 gen_op_movl_A0_reg(R_EDI);
4601 if (s->aflag == 0)
4602 gen_op_andl_A0_ffff();
4603 }
4604 gen_add_A0_ds_seg(s);
4605
4606 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4607 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
c4baa050 4608 /* XXX: introduce a new table? */
d3eb5eae
BS
4609 sse_fn_eppt = (SSEFunc_0_eppt)sse_fn_epp;
4610 sse_fn_eppt(cpu_env, cpu_ptr0, cpu_ptr1, cpu_A0);
b8b6a50b 4611 break;
664e0f19 4612 default:
5af45186
FB
4613 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4614 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
d3eb5eae 4615 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
4616 break;
4617 }
4618 if (b == 0x2e || b == 0x2f) {
3ca51d07 4619 set_cc_op(s, CC_OP_EFLAGS);
664e0f19
FB
4620 }
4621 }
4622}
4623
2c0262af
FB
4624/* convert one instruction. s->is_jmp is set if the translation must
4625 be stopped. Return the next pc value */
0af10c86
BS
4626static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
4627 target_ulong pc_start)
2c0262af
FB
4628{
4629 int b, prefixes, aflag, dflag;
4630 int shift, ot;
4631 int modrm, reg, rm, mod, reg_addr, op, opreg, offset_addr, val;
14ce26e7
FB
4632 target_ulong next_eip, tval;
4633 int rex_w, rex_r;
2c0262af 4634
fdefe51c 4635 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
70cff25e 4636 tcg_gen_debug_insn_start(pc_start);
fdefe51c 4637 }
2c0262af
FB
4638 s->pc = pc_start;
4639 prefixes = 0;
2c0262af 4640 s->override = -1;
14ce26e7
FB
4641 rex_w = -1;
4642 rex_r = 0;
4643#ifdef TARGET_X86_64
4644 s->rex_x = 0;
4645 s->rex_b = 0;
5fafdf24 4646 x86_64_hregs = 0;
14ce26e7
FB
4647#endif
4648 s->rip_offset = 0; /* for relative ip address */
701ed211
RH
4649 s->vex_l = 0;
4650 s->vex_v = 0;
2c0262af 4651 next_byte:
0af10c86 4652 b = cpu_ldub_code(env, s->pc);
2c0262af 4653 s->pc++;
4a6fd938
RH
4654 /* Collect prefixes. */
4655 switch (b) {
4656 case 0xf3:
4657 prefixes |= PREFIX_REPZ;
4658 goto next_byte;
4659 case 0xf2:
4660 prefixes |= PREFIX_REPNZ;
4661 goto next_byte;
4662 case 0xf0:
4663 prefixes |= PREFIX_LOCK;
4664 goto next_byte;
4665 case 0x2e:
4666 s->override = R_CS;
4667 goto next_byte;
4668 case 0x36:
4669 s->override = R_SS;
4670 goto next_byte;
4671 case 0x3e:
4672 s->override = R_DS;
4673 goto next_byte;
4674 case 0x26:
4675 s->override = R_ES;
4676 goto next_byte;
4677 case 0x64:
4678 s->override = R_FS;
4679 goto next_byte;
4680 case 0x65:
4681 s->override = R_GS;
4682 goto next_byte;
4683 case 0x66:
4684 prefixes |= PREFIX_DATA;
4685 goto next_byte;
4686 case 0x67:
4687 prefixes |= PREFIX_ADR;
4688 goto next_byte;
14ce26e7 4689#ifdef TARGET_X86_64
4a6fd938
RH
4690 case 0x40 ... 0x4f:
4691 if (CODE64(s)) {
14ce26e7
FB
4692 /* REX prefix */
4693 rex_w = (b >> 3) & 1;
4694 rex_r = (b & 0x4) << 1;
4695 s->rex_x = (b & 0x2) << 2;
4696 REX_B(s) = (b & 0x1) << 3;
4697 x86_64_hregs = 1; /* select uniform byte register addressing */
4698 goto next_byte;
4699 }
4a6fd938
RH
4700 break;
4701#endif
701ed211
RH
4702 case 0xc5: /* 2-byte VEX */
4703 case 0xc4: /* 3-byte VEX */
4704 /* VEX prefixes cannot be used except in 32-bit mode.
4705 Otherwise the instruction is LES or LDS. */
4706 if (s->code32 && !s->vm86) {
4707 static const int pp_prefix[4] = {
4708 0, PREFIX_DATA, PREFIX_REPZ, PREFIX_REPNZ
4709 };
4710 int vex3, vex2 = cpu_ldub_code(env, s->pc);
4711
4712 if (!CODE64(s) && (vex2 & 0xc0) != 0xc0) {
4713 /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
4714 otherwise the instruction is LES or LDS. */
4715 break;
4716 }
4717 s->pc++;
4718
085d8134 4719 /* 4.1.1-4.1.3: No preceding lock, 66, f2, f3, or rex prefixes. */
701ed211
RH
4720 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ
4721 | PREFIX_LOCK | PREFIX_DATA)) {
4722 goto illegal_op;
4723 }
4724#ifdef TARGET_X86_64
4725 if (x86_64_hregs) {
4726 goto illegal_op;
4727 }
4728#endif
4729 rex_r = (~vex2 >> 4) & 8;
4730 if (b == 0xc5) {
4731 vex3 = vex2;
4732 b = cpu_ldub_code(env, s->pc++);
4733 } else {
4734#ifdef TARGET_X86_64
4735 s->rex_x = (~vex2 >> 3) & 8;
4736 s->rex_b = (~vex2 >> 2) & 8;
4737#endif
4738 vex3 = cpu_ldub_code(env, s->pc++);
4739 rex_w = (vex3 >> 7) & 1;
4740 switch (vex2 & 0x1f) {
4741 case 0x01: /* Implied 0f leading opcode bytes. */
4742 b = cpu_ldub_code(env, s->pc++) | 0x100;
4743 break;
4744 case 0x02: /* Implied 0f 38 leading opcode bytes. */
4745 b = 0x138;
4746 break;
4747 case 0x03: /* Implied 0f 3a leading opcode bytes. */
4748 b = 0x13a;
4749 break;
4750 default: /* Reserved for future use. */
4751 goto illegal_op;
4752 }
4753 }
4754 s->vex_v = (~vex3 >> 3) & 0xf;
4755 s->vex_l = (vex3 >> 2) & 1;
4756 prefixes |= pp_prefix[vex3 & 3] | PREFIX_VEX;
4757 }
4758 break;
4a6fd938
RH
4759 }
4760
4761 /* Post-process prefixes. */
4a6fd938 4762 if (CODE64(s)) {
dec3fc96
RH
4763 /* In 64-bit mode, the default data size is 32-bit. Select 64-bit
4764 data with rex_w, and 16-bit data with 0x66; rex_w takes precedence
4765 over 0x66 if both are present. */
4766 dflag = (rex_w > 0 ? 2 : prefixes & PREFIX_DATA ? 0 : 1);
4767 /* In 64-bit mode, 0x67 selects 32-bit addressing. */
4768 aflag = (prefixes & PREFIX_ADR ? 1 : 2);
4769 } else {
4770 /* In 16/32-bit mode, 0x66 selects the opposite data size. */
4771 dflag = s->code32;
4772 if (prefixes & PREFIX_DATA) {
4773 dflag ^= 1;
14ce26e7 4774 }
dec3fc96
RH
4775 /* In 16/32-bit mode, 0x67 selects the opposite addressing. */
4776 aflag = s->code32;
4777 if (prefixes & PREFIX_ADR) {
4778 aflag ^= 1;
14ce26e7 4779 }
2c0262af
FB
4780 }
4781
2c0262af
FB
4782 s->prefix = prefixes;
4783 s->aflag = aflag;
4784 s->dflag = dflag;
4785
4786 /* lock generation */
4787 if (prefixes & PREFIX_LOCK)
a7812ae4 4788 gen_helper_lock();
2c0262af
FB
4789
4790 /* now check op code */
4791 reswitch:
4792 switch(b) {
4793 case 0x0f:
4794 /**************************/
4795 /* extended op code */
0af10c86 4796 b = cpu_ldub_code(env, s->pc++) | 0x100;
2c0262af 4797 goto reswitch;
3b46e624 4798
2c0262af
FB
4799 /**************************/
4800 /* arith & logic */
4801 case 0x00 ... 0x05:
4802 case 0x08 ... 0x0d:
4803 case 0x10 ... 0x15:
4804 case 0x18 ... 0x1d:
4805 case 0x20 ... 0x25:
4806 case 0x28 ... 0x2d:
4807 case 0x30 ... 0x35:
4808 case 0x38 ... 0x3d:
4809 {
4810 int op, f, val;
4811 op = (b >> 3) & 7;
4812 f = (b >> 1) & 3;
4813
4814 if ((b & 1) == 0)
4ba9938c 4815 ot = MO_8;
2c0262af 4816 else
4ba9938c 4817 ot = dflag + MO_16;
3b46e624 4818
2c0262af
FB
4819 switch(f) {
4820 case 0: /* OP Ev, Gv */
0af10c86 4821 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 4822 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af 4823 mod = (modrm >> 6) & 3;
14ce26e7 4824 rm = (modrm & 7) | REX_B(s);
2c0262af 4825 if (mod != 3) {
0af10c86 4826 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
2c0262af
FB
4827 opreg = OR_TMP0;
4828 } else if (op == OP_XORL && rm == reg) {
4829 xor_zero:
4830 /* xor reg, reg optimisation */
436ff2d2 4831 set_cc_op(s, CC_OP_CLR);
2c0262af 4832 gen_op_movl_T0_0();
57fec1fe 4833 gen_op_mov_reg_T0(ot, reg);
2c0262af
FB
4834 break;
4835 } else {
4836 opreg = rm;
4837 }
57fec1fe 4838 gen_op_mov_TN_reg(ot, 1, reg);
2c0262af
FB
4839 gen_op(s, op, ot, opreg);
4840 break;
4841 case 1: /* OP Gv, Ev */
0af10c86 4842 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 4843 mod = (modrm >> 6) & 3;
14ce26e7
FB
4844 reg = ((modrm >> 3) & 7) | rex_r;
4845 rm = (modrm & 7) | REX_B(s);
2c0262af 4846 if (mod != 3) {
0af10c86 4847 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
323d1876 4848 gen_op_ld_T1_A0(s, ot);
2c0262af
FB
4849 } else if (op == OP_XORL && rm == reg) {
4850 goto xor_zero;
4851 } else {
57fec1fe 4852 gen_op_mov_TN_reg(ot, 1, rm);
2c0262af
FB
4853 }
4854 gen_op(s, op, ot, reg);
4855 break;
4856 case 2: /* OP A, Iv */
0af10c86 4857 val = insn_get(env, s, ot);
2c0262af
FB
4858 gen_op_movl_T1_im(val);
4859 gen_op(s, op, ot, OR_EAX);
4860 break;
4861 }
4862 }
4863 break;
4864
ec9d6075
FB
4865 case 0x82:
4866 if (CODE64(s))
4867 goto illegal_op;
2c0262af
FB
4868 case 0x80: /* GRP1 */
4869 case 0x81:
4870 case 0x83:
4871 {
4872 int val;
4873
4874 if ((b & 1) == 0)
4ba9938c 4875 ot = MO_8;
2c0262af 4876 else
4ba9938c 4877 ot = dflag + MO_16;
3b46e624 4878
0af10c86 4879 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 4880 mod = (modrm >> 6) & 3;
14ce26e7 4881 rm = (modrm & 7) | REX_B(s);
2c0262af 4882 op = (modrm >> 3) & 7;
3b46e624 4883
2c0262af 4884 if (mod != 3) {
14ce26e7
FB
4885 if (b == 0x83)
4886 s->rip_offset = 1;
4887 else
4888 s->rip_offset = insn_const_size(ot);
0af10c86 4889 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
2c0262af
FB
4890 opreg = OR_TMP0;
4891 } else {
14ce26e7 4892 opreg = rm;
2c0262af
FB
4893 }
4894
4895 switch(b) {
4896 default:
4897 case 0x80:
4898 case 0x81:
d64477af 4899 case 0x82:
0af10c86 4900 val = insn_get(env, s, ot);
2c0262af
FB
4901 break;
4902 case 0x83:
4ba9938c 4903 val = (int8_t)insn_get(env, s, MO_8);
2c0262af
FB
4904 break;
4905 }
4906 gen_op_movl_T1_im(val);
4907 gen_op(s, op, ot, opreg);
4908 }
4909 break;
4910
4911 /**************************/
4912 /* inc, dec, and other misc arith */
4913 case 0x40 ... 0x47: /* inc Gv */
4ba9938c 4914 ot = dflag ? MO_32 : MO_16;
2c0262af
FB
4915 gen_inc(s, ot, OR_EAX + (b & 7), 1);
4916 break;
4917 case 0x48 ... 0x4f: /* dec Gv */
4ba9938c 4918 ot = dflag ? MO_32 : MO_16;
2c0262af
FB
4919 gen_inc(s, ot, OR_EAX + (b & 7), -1);
4920 break;
4921 case 0xf6: /* GRP3 */
4922 case 0xf7:
4923 if ((b & 1) == 0)
4ba9938c 4924 ot = MO_8;
2c0262af 4925 else
4ba9938c 4926 ot = dflag + MO_16;
2c0262af 4927
0af10c86 4928 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 4929 mod = (modrm >> 6) & 3;
14ce26e7 4930 rm = (modrm & 7) | REX_B(s);
2c0262af
FB
4931 op = (modrm >> 3) & 7;
4932 if (mod != 3) {
14ce26e7
FB
4933 if (op == 0)
4934 s->rip_offset = insn_const_size(ot);
0af10c86 4935 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
323d1876 4936 gen_op_ld_T0_A0(s, ot);
2c0262af 4937 } else {
57fec1fe 4938 gen_op_mov_TN_reg(ot, 0, rm);
2c0262af
FB
4939 }
4940
4941 switch(op) {
4942 case 0: /* test */
0af10c86 4943 val = insn_get(env, s, ot);
2c0262af
FB
4944 gen_op_movl_T1_im(val);
4945 gen_op_testl_T0_T1_cc();
3ca51d07 4946 set_cc_op(s, CC_OP_LOGICB + ot);
2c0262af
FB
4947 break;
4948 case 2: /* not */
b6abf97d 4949 tcg_gen_not_tl(cpu_T[0], cpu_T[0]);
2c0262af 4950 if (mod != 3) {
323d1876 4951 gen_op_st_T0_A0(s, ot);
2c0262af 4952 } else {
57fec1fe 4953 gen_op_mov_reg_T0(ot, rm);
2c0262af
FB
4954 }
4955 break;
4956 case 3: /* neg */
b6abf97d 4957 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
2c0262af 4958 if (mod != 3) {
323d1876 4959 gen_op_st_T0_A0(s, ot);
2c0262af 4960 } else {
57fec1fe 4961 gen_op_mov_reg_T0(ot, rm);
2c0262af
FB
4962 }
4963 gen_op_update_neg_cc();
3ca51d07 4964 set_cc_op(s, CC_OP_SUBB + ot);
2c0262af
FB
4965 break;
4966 case 4: /* mul */
4967 switch(ot) {
4ba9938c
RH
4968 case MO_8:
4969 gen_op_mov_TN_reg(MO_8, 1, R_EAX);
0211e5af
FB
4970 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
4971 tcg_gen_ext8u_tl(cpu_T[1], cpu_T[1]);
4972 /* XXX: use 32 bit mul which could be faster */
4973 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4ba9938c 4974 gen_op_mov_reg_T0(MO_16, R_EAX);
0211e5af
FB
4975 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4976 tcg_gen_andi_tl(cpu_cc_src, cpu_T[0], 0xff00);
3ca51d07 4977 set_cc_op(s, CC_OP_MULB);
2c0262af 4978 break;
4ba9938c
RH
4979 case MO_16:
4980 gen_op_mov_TN_reg(MO_16, 1, R_EAX);
0211e5af
FB
4981 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
4982 tcg_gen_ext16u_tl(cpu_T[1], cpu_T[1]);
4983 /* XXX: use 32 bit mul which could be faster */
4984 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4ba9938c 4985 gen_op_mov_reg_T0(MO_16, R_EAX);
0211e5af
FB
4986 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4987 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
4ba9938c 4988 gen_op_mov_reg_T0(MO_16, R_EDX);
0211e5af 4989 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
3ca51d07 4990 set_cc_op(s, CC_OP_MULW);
2c0262af
FB
4991 break;
4992 default:
4ba9938c 4993 case MO_32:
a4bcea3d
RH
4994 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4995 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]);
4996 tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
4997 cpu_tmp2_i32, cpu_tmp3_i32);
4998 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32);
4999 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32);
5000 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
5001 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
3ca51d07 5002 set_cc_op(s, CC_OP_MULL);
2c0262af 5003 break;
14ce26e7 5004#ifdef TARGET_X86_64
4ba9938c 5005 case MO_64:
a4bcea3d
RH
5006 tcg_gen_mulu2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
5007 cpu_T[0], cpu_regs[R_EAX]);
5008 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
5009 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
3ca51d07 5010 set_cc_op(s, CC_OP_MULQ);
14ce26e7
FB
5011 break;
5012#endif
2c0262af 5013 }
2c0262af
FB
5014 break;
5015 case 5: /* imul */
5016 switch(ot) {
4ba9938c
RH
5017 case MO_8:
5018 gen_op_mov_TN_reg(MO_8, 1, R_EAX);
0211e5af
FB
5019 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
5020 tcg_gen_ext8s_tl(cpu_T[1], cpu_T[1]);
5021 /* XXX: use 32 bit mul which could be faster */
5022 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4ba9938c 5023 gen_op_mov_reg_T0(MO_16, R_EAX);
0211e5af
FB
5024 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
5025 tcg_gen_ext8s_tl(cpu_tmp0, cpu_T[0]);
5026 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
3ca51d07 5027 set_cc_op(s, CC_OP_MULB);
2c0262af 5028 break;
4ba9938c
RH
5029 case MO_16:
5030 gen_op_mov_TN_reg(MO_16, 1, R_EAX);
0211e5af
FB
5031 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5032 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
5033 /* XXX: use 32 bit mul which could be faster */
5034 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4ba9938c 5035 gen_op_mov_reg_T0(MO_16, R_EAX);
0211e5af
FB
5036 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
5037 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
5038 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
5039 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
4ba9938c 5040 gen_op_mov_reg_T0(MO_16, R_EDX);
3ca51d07 5041 set_cc_op(s, CC_OP_MULW);
2c0262af
FB
5042 break;
5043 default:
4ba9938c 5044 case MO_32:
a4bcea3d
RH
5045 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5046 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]);
5047 tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
5048 cpu_tmp2_i32, cpu_tmp3_i32);
5049 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32);
5050 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32);
5051 tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31);
5052 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
5053 tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
5054 tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32);
3ca51d07 5055 set_cc_op(s, CC_OP_MULL);
2c0262af 5056 break;
14ce26e7 5057#ifdef TARGET_X86_64
4ba9938c 5058 case MO_64:
a4bcea3d
RH
5059 tcg_gen_muls2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
5060 cpu_T[0], cpu_regs[R_EAX]);
5061 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
5062 tcg_gen_sari_tl(cpu_cc_src, cpu_regs[R_EAX], 63);
5063 tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_regs[R_EDX]);
3ca51d07 5064 set_cc_op(s, CC_OP_MULQ);
14ce26e7
FB
5065 break;
5066#endif
2c0262af 5067 }
2c0262af
FB
5068 break;
5069 case 6: /* div */
5070 switch(ot) {
4ba9938c 5071 case MO_8:
14ce26e7 5072 gen_jmp_im(pc_start - s->cs_base);
7923057b 5073 gen_helper_divb_AL(cpu_env, cpu_T[0]);
2c0262af 5074 break;
4ba9938c 5075 case MO_16:
14ce26e7 5076 gen_jmp_im(pc_start - s->cs_base);
7923057b 5077 gen_helper_divw_AX(cpu_env, cpu_T[0]);
2c0262af
FB
5078 break;
5079 default:
4ba9938c 5080 case MO_32:
14ce26e7 5081 gen_jmp_im(pc_start - s->cs_base);
7923057b 5082 gen_helper_divl_EAX(cpu_env, cpu_T[0]);
14ce26e7
FB
5083 break;
5084#ifdef TARGET_X86_64
4ba9938c 5085 case MO_64:
14ce26e7 5086 gen_jmp_im(pc_start - s->cs_base);
7923057b 5087 gen_helper_divq_EAX(cpu_env, cpu_T[0]);
2c0262af 5088 break;
14ce26e7 5089#endif
2c0262af
FB
5090 }
5091 break;
5092 case 7: /* idiv */
5093 switch(ot) {
4ba9938c 5094 case MO_8:
14ce26e7 5095 gen_jmp_im(pc_start - s->cs_base);
7923057b 5096 gen_helper_idivb_AL(cpu_env, cpu_T[0]);
2c0262af 5097 break;
4ba9938c 5098 case MO_16:
14ce26e7 5099 gen_jmp_im(pc_start - s->cs_base);
7923057b 5100 gen_helper_idivw_AX(cpu_env, cpu_T[0]);
2c0262af
FB
5101 break;
5102 default:
4ba9938c 5103 case MO_32:
14ce26e7 5104 gen_jmp_im(pc_start - s->cs_base);
7923057b 5105 gen_helper_idivl_EAX(cpu_env, cpu_T[0]);
14ce26e7
FB
5106 break;
5107#ifdef TARGET_X86_64
4ba9938c 5108 case MO_64:
14ce26e7 5109 gen_jmp_im(pc_start - s->cs_base);
7923057b 5110 gen_helper_idivq_EAX(cpu_env, cpu_T[0]);
2c0262af 5111 break;
14ce26e7 5112#endif
2c0262af
FB
5113 }
5114 break;
5115 default:
5116 goto illegal_op;
5117 }
5118 break;
5119
5120 case 0xfe: /* GRP4 */
5121 case 0xff: /* GRP5 */
5122 if ((b & 1) == 0)
4ba9938c 5123 ot = MO_8;
2c0262af 5124 else
4ba9938c 5125 ot = dflag + MO_16;
2c0262af 5126
0af10c86 5127 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 5128 mod = (modrm >> 6) & 3;
14ce26e7 5129 rm = (modrm & 7) | REX_B(s);
2c0262af
FB
5130 op = (modrm >> 3) & 7;
5131 if (op >= 2 && b == 0xfe) {
5132 goto illegal_op;
5133 }
14ce26e7 5134 if (CODE64(s)) {
aba9d61e 5135 if (op == 2 || op == 4) {
14ce26e7 5136 /* operand size for jumps is 64 bit */
4ba9938c 5137 ot = MO_64;
aba9d61e 5138 } else if (op == 3 || op == 5) {
4ba9938c 5139 ot = dflag ? MO_32 + (rex_w == 1) : MO_16;
14ce26e7
FB
5140 } else if (op == 6) {
5141 /* default push size is 64 bit */
4ba9938c 5142 ot = dflag ? MO_64 : MO_16;
14ce26e7
FB
5143 }
5144 }
2c0262af 5145 if (mod != 3) {
0af10c86 5146 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
2c0262af 5147 if (op >= 2 && op != 3 && op != 5)
323d1876 5148 gen_op_ld_T0_A0(s, ot);
2c0262af 5149 } else {
57fec1fe 5150 gen_op_mov_TN_reg(ot, 0, rm);
2c0262af
FB
5151 }
5152
5153 switch(op) {
5154 case 0: /* inc Ev */
5155 if (mod != 3)
5156 opreg = OR_TMP0;
5157 else
5158 opreg = rm;
5159 gen_inc(s, ot, opreg, 1);
5160 break;
5161 case 1: /* dec Ev */
5162 if (mod != 3)
5163 opreg = OR_TMP0;
5164 else
5165 opreg = rm;
5166 gen_inc(s, ot, opreg, -1);
5167 break;
5168 case 2: /* call Ev */
4f31916f 5169 /* XXX: optimize if memory (no 'and' is necessary) */
2c0262af
FB
5170 if (s->dflag == 0)
5171 gen_op_andl_T0_ffff();
2c0262af 5172 next_eip = s->pc - s->cs_base;
1ef38687 5173 gen_movtl_T1_im(next_eip);
4f31916f
FB
5174 gen_push_T1(s);
5175 gen_op_jmp_T0();
2c0262af
FB
5176 gen_eob(s);
5177 break;
61382a50 5178 case 3: /* lcall Ev */
323d1876 5179 gen_op_ld_T1_A0(s, ot);
4ba9938c
RH
5180 gen_add_A0_im(s, 1 << (ot - MO_16 + 1));
5181 gen_op_ldu_T0_A0(s, MO_16);
2c0262af
FB
5182 do_lcall:
5183 if (s->pe && !s->vm86) {
773cdfcc 5184 gen_update_cc_op(s);
14ce26e7 5185 gen_jmp_im(pc_start - s->cs_base);
b6abf97d 5186 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2999a0b2
BS
5187 gen_helper_lcall_protected(cpu_env, cpu_tmp2_i32, cpu_T[1],
5188 tcg_const_i32(dflag),
a7812ae4 5189 tcg_const_i32(s->pc - pc_start));
2c0262af 5190 } else {
b6abf97d 5191 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2999a0b2
BS
5192 gen_helper_lcall_real(cpu_env, cpu_tmp2_i32, cpu_T[1],
5193 tcg_const_i32(dflag),
a7812ae4 5194 tcg_const_i32(s->pc - s->cs_base));
2c0262af
FB
5195 }
5196 gen_eob(s);
5197 break;
5198 case 4: /* jmp Ev */
5199 if (s->dflag == 0)
5200 gen_op_andl_T0_ffff();
5201 gen_op_jmp_T0();
5202 gen_eob(s);
5203 break;
5204 case 5: /* ljmp Ev */
323d1876 5205 gen_op_ld_T1_A0(s, ot);
4ba9938c
RH
5206 gen_add_A0_im(s, 1 << (ot - MO_16 + 1));
5207 gen_op_ldu_T0_A0(s, MO_16);
2c0262af
FB
5208 do_ljmp:
5209 if (s->pe && !s->vm86) {
773cdfcc 5210 gen_update_cc_op(s);
14ce26e7 5211 gen_jmp_im(pc_start - s->cs_base);
b6abf97d 5212 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2999a0b2 5213 gen_helper_ljmp_protected(cpu_env, cpu_tmp2_i32, cpu_T[1],
a7812ae4 5214 tcg_const_i32(s->pc - pc_start));
2c0262af 5215 } else {
3bd7da9e 5216 gen_op_movl_seg_T0_vm(R_CS);
2c0262af
FB
5217 gen_op_movl_T0_T1();
5218 gen_op_jmp_T0();
5219 }
5220 gen_eob(s);
5221 break;
5222 case 6: /* push Ev */
5223 gen_push_T0(s);
5224 break;
5225 default:
5226 goto illegal_op;
5227 }
5228 break;
5229
5230 case 0x84: /* test Ev, Gv */
5fafdf24 5231 case 0x85:
2c0262af 5232 if ((b & 1) == 0)
4ba9938c 5233 ot = MO_8;
2c0262af 5234 else
4ba9938c 5235 ot = dflag + MO_16;
2c0262af 5236
0af10c86 5237 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 5238 reg = ((modrm >> 3) & 7) | rex_r;
3b46e624 5239
0af10c86 5240 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
57fec1fe 5241 gen_op_mov_TN_reg(ot, 1, reg);
2c0262af 5242 gen_op_testl_T0_T1_cc();
3ca51d07 5243 set_cc_op(s, CC_OP_LOGICB + ot);
2c0262af 5244 break;
3b46e624 5245
2c0262af
FB
5246 case 0xa8: /* test eAX, Iv */
5247 case 0xa9:
5248 if ((b & 1) == 0)
4ba9938c 5249 ot = MO_8;
2c0262af 5250 else
4ba9938c 5251 ot = dflag + MO_16;
0af10c86 5252 val = insn_get(env, s, ot);
2c0262af 5253
57fec1fe 5254 gen_op_mov_TN_reg(ot, 0, OR_EAX);
2c0262af
FB
5255 gen_op_movl_T1_im(val);
5256 gen_op_testl_T0_T1_cc();
3ca51d07 5257 set_cc_op(s, CC_OP_LOGICB + ot);
2c0262af 5258 break;
3b46e624 5259
2c0262af 5260 case 0x98: /* CWDE/CBW */
14ce26e7
FB
5261#ifdef TARGET_X86_64
5262 if (dflag == 2) {
4ba9938c 5263 gen_op_mov_TN_reg(MO_32, 0, R_EAX);
e108dd01 5264 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4ba9938c 5265 gen_op_mov_reg_T0(MO_64, R_EAX);
14ce26e7
FB
5266 } else
5267#endif
e108dd01 5268 if (dflag == 1) {
4ba9938c 5269 gen_op_mov_TN_reg(MO_16, 0, R_EAX);
e108dd01 5270 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4ba9938c 5271 gen_op_mov_reg_T0(MO_32, R_EAX);
e108dd01 5272 } else {
4ba9938c 5273 gen_op_mov_TN_reg(MO_8, 0, R_EAX);
e108dd01 5274 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
4ba9938c 5275 gen_op_mov_reg_T0(MO_16, R_EAX);
e108dd01 5276 }
2c0262af
FB
5277 break;
5278 case 0x99: /* CDQ/CWD */
14ce26e7
FB
5279#ifdef TARGET_X86_64
5280 if (dflag == 2) {
4ba9938c 5281 gen_op_mov_TN_reg(MO_64, 0, R_EAX);
e108dd01 5282 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 63);
4ba9938c 5283 gen_op_mov_reg_T0(MO_64, R_EDX);
14ce26e7
FB
5284 } else
5285#endif
e108dd01 5286 if (dflag == 1) {
4ba9938c 5287 gen_op_mov_TN_reg(MO_32, 0, R_EAX);
e108dd01
FB
5288 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
5289 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 31);
4ba9938c 5290 gen_op_mov_reg_T0(MO_32, R_EDX);
e108dd01 5291 } else {
4ba9938c 5292 gen_op_mov_TN_reg(MO_16, 0, R_EAX);
e108dd01
FB
5293 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5294 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 15);
4ba9938c 5295 gen_op_mov_reg_T0(MO_16, R_EDX);
e108dd01 5296 }
2c0262af
FB
5297 break;
5298 case 0x1af: /* imul Gv, Ev */
5299 case 0x69: /* imul Gv, Ev, I */
5300 case 0x6b:
4ba9938c 5301 ot = dflag + MO_16;
0af10c86 5302 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7
FB
5303 reg = ((modrm >> 3) & 7) | rex_r;
5304 if (b == 0x69)
5305 s->rip_offset = insn_const_size(ot);
5306 else if (b == 0x6b)
5307 s->rip_offset = 1;
0af10c86 5308 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
2c0262af 5309 if (b == 0x69) {
0af10c86 5310 val = insn_get(env, s, ot);
2c0262af
FB
5311 gen_op_movl_T1_im(val);
5312 } else if (b == 0x6b) {
4ba9938c 5313 val = (int8_t)insn_get(env, s, MO_8);
2c0262af
FB
5314 gen_op_movl_T1_im(val);
5315 } else {
57fec1fe 5316 gen_op_mov_TN_reg(ot, 1, reg);
2c0262af 5317 }
a4bcea3d 5318 switch (ot) {
0211e5af 5319#ifdef TARGET_X86_64
4ba9938c 5320 case MO_64:
a4bcea3d
RH
5321 tcg_gen_muls2_i64(cpu_regs[reg], cpu_T[1], cpu_T[0], cpu_T[1]);
5322 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
5323 tcg_gen_sari_tl(cpu_cc_src, cpu_cc_dst, 63);
5324 tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_T[1]);
5325 break;
0211e5af 5326#endif
4ba9938c 5327 case MO_32:
a4bcea3d
RH
5328 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5329 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
5330 tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
5331 cpu_tmp2_i32, cpu_tmp3_i32);
5332 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
5333 tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31);
5334 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
5335 tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
5336 tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32);
5337 break;
5338 default:
0211e5af
FB
5339 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5340 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
5341 /* XXX: use 32 bit mul which could be faster */
5342 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
5343 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
5344 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
5345 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
a4bcea3d
RH
5346 gen_op_mov_reg_T0(ot, reg);
5347 break;
2c0262af 5348 }
3ca51d07 5349 set_cc_op(s, CC_OP_MULB + ot);
2c0262af
FB
5350 break;
5351 case 0x1c0:
5352 case 0x1c1: /* xadd Ev, Gv */
5353 if ((b & 1) == 0)
4ba9938c 5354 ot = MO_8;
2c0262af 5355 else
4ba9938c 5356 ot = dflag + MO_16;
0af10c86 5357 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 5358 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af
FB
5359 mod = (modrm >> 6) & 3;
5360 if (mod == 3) {
14ce26e7 5361 rm = (modrm & 7) | REX_B(s);
57fec1fe
FB
5362 gen_op_mov_TN_reg(ot, 0, reg);
5363 gen_op_mov_TN_reg(ot, 1, rm);
2c0262af 5364 gen_op_addl_T0_T1();
57fec1fe
FB
5365 gen_op_mov_reg_T1(ot, reg);
5366 gen_op_mov_reg_T0(ot, rm);
2c0262af 5367 } else {
0af10c86 5368 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
57fec1fe 5369 gen_op_mov_TN_reg(ot, 0, reg);
323d1876 5370 gen_op_ld_T1_A0(s, ot);
2c0262af 5371 gen_op_addl_T0_T1();
323d1876 5372 gen_op_st_T0_A0(s, ot);
57fec1fe 5373 gen_op_mov_reg_T1(ot, reg);
2c0262af
FB
5374 }
5375 gen_op_update2_cc();
3ca51d07 5376 set_cc_op(s, CC_OP_ADDB + ot);
2c0262af
FB
5377 break;
5378 case 0x1b0:
5379 case 0x1b1: /* cmpxchg Ev, Gv */
cad3a37d 5380 {
1130328e 5381 int label1, label2;
1e4840bf 5382 TCGv t0, t1, t2, a0;
cad3a37d
FB
5383
5384 if ((b & 1) == 0)
4ba9938c 5385 ot = MO_8;
cad3a37d 5386 else
4ba9938c 5387 ot = dflag + MO_16;
0af10c86 5388 modrm = cpu_ldub_code(env, s->pc++);
cad3a37d
FB
5389 reg = ((modrm >> 3) & 7) | rex_r;
5390 mod = (modrm >> 6) & 3;
a7812ae4
PB
5391 t0 = tcg_temp_local_new();
5392 t1 = tcg_temp_local_new();
5393 t2 = tcg_temp_local_new();
5394 a0 = tcg_temp_local_new();
1e4840bf 5395 gen_op_mov_v_reg(ot, t1, reg);
cad3a37d
FB
5396 if (mod == 3) {
5397 rm = (modrm & 7) | REX_B(s);
1e4840bf 5398 gen_op_mov_v_reg(ot, t0, rm);
cad3a37d 5399 } else {
0af10c86 5400 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
1e4840bf 5401 tcg_gen_mov_tl(a0, cpu_A0);
323d1876 5402 gen_op_ld_v(s, ot, t0, a0);
cad3a37d
FB
5403 rm = 0; /* avoid warning */
5404 }
5405 label1 = gen_new_label();
a3251186
RH
5406 tcg_gen_mov_tl(t2, cpu_regs[R_EAX]);
5407 gen_extu(ot, t0);
1e4840bf 5408 gen_extu(ot, t2);
a3251186 5409 tcg_gen_brcond_tl(TCG_COND_EQ, t2, t0, label1);
f7e80adf 5410 label2 = gen_new_label();
cad3a37d 5411 if (mod == 3) {
1e4840bf 5412 gen_op_mov_reg_v(ot, R_EAX, t0);
1130328e
FB
5413 tcg_gen_br(label2);
5414 gen_set_label(label1);
1e4840bf 5415 gen_op_mov_reg_v(ot, rm, t1);
cad3a37d 5416 } else {
f7e80adf
AG
5417 /* perform no-op store cycle like physical cpu; must be
5418 before changing accumulator to ensure idempotency if
5419 the store faults and the instruction is restarted */
323d1876 5420 gen_op_st_v(s, ot, t0, a0);
1e4840bf 5421 gen_op_mov_reg_v(ot, R_EAX, t0);
f7e80adf 5422 tcg_gen_br(label2);
1130328e 5423 gen_set_label(label1);
323d1876 5424 gen_op_st_v(s, ot, t1, a0);
cad3a37d 5425 }
f7e80adf 5426 gen_set_label(label2);
1e4840bf 5427 tcg_gen_mov_tl(cpu_cc_src, t0);
a3251186
RH
5428 tcg_gen_mov_tl(cpu_cc_srcT, t2);
5429 tcg_gen_sub_tl(cpu_cc_dst, t2, t0);
3ca51d07 5430 set_cc_op(s, CC_OP_SUBB + ot);
1e4840bf
FB
5431 tcg_temp_free(t0);
5432 tcg_temp_free(t1);
5433 tcg_temp_free(t2);
5434 tcg_temp_free(a0);
2c0262af 5435 }
2c0262af
FB
5436 break;
5437 case 0x1c7: /* cmpxchg8b */
0af10c86 5438 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 5439 mod = (modrm >> 6) & 3;
71c3558e 5440 if ((mod == 3) || ((modrm & 0x38) != 0x8))
2c0262af 5441 goto illegal_op;
1b9d9ebb
FB
5442#ifdef TARGET_X86_64
5443 if (dflag == 2) {
5444 if (!(s->cpuid_ext_features & CPUID_EXT_CX16))
5445 goto illegal_op;
5446 gen_jmp_im(pc_start - s->cs_base);
773cdfcc 5447 gen_update_cc_op(s);
0af10c86 5448 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
92fc4b58 5449 gen_helper_cmpxchg16b(cpu_env, cpu_A0);
1b9d9ebb
FB
5450 } else
5451#endif
5452 {
5453 if (!(s->cpuid_features & CPUID_CX8))
5454 goto illegal_op;
5455 gen_jmp_im(pc_start - s->cs_base);
773cdfcc 5456 gen_update_cc_op(s);
0af10c86 5457 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
92fc4b58 5458 gen_helper_cmpxchg8b(cpu_env, cpu_A0);
1b9d9ebb 5459 }
3ca51d07 5460 set_cc_op(s, CC_OP_EFLAGS);
2c0262af 5461 break;
3b46e624 5462
2c0262af
FB
5463 /**************************/
5464 /* push/pop */
5465 case 0x50 ... 0x57: /* push */
4ba9938c 5466 gen_op_mov_TN_reg(MO_32, 0, (b & 7) | REX_B(s));
2c0262af
FB
5467 gen_push_T0(s);
5468 break;
5469 case 0x58 ... 0x5f: /* pop */
14ce26e7 5470 if (CODE64(s)) {
4ba9938c 5471 ot = dflag ? MO_64 : MO_16;
14ce26e7 5472 } else {
4ba9938c 5473 ot = dflag + MO_16;
14ce26e7 5474 }
2c0262af 5475 gen_pop_T0(s);
77729c24 5476 /* NOTE: order is important for pop %sp */
2c0262af 5477 gen_pop_update(s);
57fec1fe 5478 gen_op_mov_reg_T0(ot, (b & 7) | REX_B(s));
2c0262af
FB
5479 break;
5480 case 0x60: /* pusha */
14ce26e7
FB
5481 if (CODE64(s))
5482 goto illegal_op;
2c0262af
FB
5483 gen_pusha(s);
5484 break;
5485 case 0x61: /* popa */
14ce26e7
FB
5486 if (CODE64(s))
5487 goto illegal_op;
2c0262af
FB
5488 gen_popa(s);
5489 break;
5490 case 0x68: /* push Iv */
5491 case 0x6a:
14ce26e7 5492 if (CODE64(s)) {
4ba9938c 5493 ot = dflag ? MO_64 : MO_16;
14ce26e7 5494 } else {
4ba9938c 5495 ot = dflag + MO_16;
14ce26e7 5496 }
2c0262af 5497 if (b == 0x68)
0af10c86 5498 val = insn_get(env, s, ot);
2c0262af 5499 else
4ba9938c 5500 val = (int8_t)insn_get(env, s, MO_8);
2c0262af
FB
5501 gen_op_movl_T0_im(val);
5502 gen_push_T0(s);
5503 break;
5504 case 0x8f: /* pop Ev */
14ce26e7 5505 if (CODE64(s)) {
4ba9938c 5506 ot = dflag ? MO_64 : MO_16;
14ce26e7 5507 } else {
4ba9938c 5508 ot = dflag + MO_16;
14ce26e7 5509 }
0af10c86 5510 modrm = cpu_ldub_code(env, s->pc++);
77729c24 5511 mod = (modrm >> 6) & 3;
2c0262af 5512 gen_pop_T0(s);
77729c24
FB
5513 if (mod == 3) {
5514 /* NOTE: order is important for pop %sp */
5515 gen_pop_update(s);
14ce26e7 5516 rm = (modrm & 7) | REX_B(s);
57fec1fe 5517 gen_op_mov_reg_T0(ot, rm);
77729c24
FB
5518 } else {
5519 /* NOTE: order is important too for MMU exceptions */
14ce26e7 5520 s->popl_esp_hack = 1 << ot;
0af10c86 5521 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
77729c24
FB
5522 s->popl_esp_hack = 0;
5523 gen_pop_update(s);
5524 }
2c0262af
FB
5525 break;
5526 case 0xc8: /* enter */
5527 {
5528 int level;
0af10c86 5529 val = cpu_lduw_code(env, s->pc);
2c0262af 5530 s->pc += 2;
0af10c86 5531 level = cpu_ldub_code(env, s->pc++);
2c0262af
FB
5532 gen_enter(s, val, level);
5533 }
5534 break;
5535 case 0xc9: /* leave */
5536 /* XXX: exception not precise (ESP is updated before potential exception) */
14ce26e7 5537 if (CODE64(s)) {
4ba9938c
RH
5538 gen_op_mov_TN_reg(MO_64, 0, R_EBP);
5539 gen_op_mov_reg_T0(MO_64, R_ESP);
14ce26e7 5540 } else if (s->ss32) {
4ba9938c
RH
5541 gen_op_mov_TN_reg(MO_32, 0, R_EBP);
5542 gen_op_mov_reg_T0(MO_32, R_ESP);
2c0262af 5543 } else {
4ba9938c
RH
5544 gen_op_mov_TN_reg(MO_16, 0, R_EBP);
5545 gen_op_mov_reg_T0(MO_16, R_ESP);
2c0262af
FB
5546 }
5547 gen_pop_T0(s);
14ce26e7 5548 if (CODE64(s)) {
4ba9938c 5549 ot = dflag ? MO_64 : MO_16;
14ce26e7 5550 } else {
4ba9938c 5551 ot = dflag + MO_16;
14ce26e7 5552 }
57fec1fe 5553 gen_op_mov_reg_T0(ot, R_EBP);
2c0262af
FB
5554 gen_pop_update(s);
5555 break;
5556 case 0x06: /* push es */
5557 case 0x0e: /* push cs */
5558 case 0x16: /* push ss */
5559 case 0x1e: /* push ds */
14ce26e7
FB
5560 if (CODE64(s))
5561 goto illegal_op;
2c0262af
FB
5562 gen_op_movl_T0_seg(b >> 3);
5563 gen_push_T0(s);
5564 break;
5565 case 0x1a0: /* push fs */
5566 case 0x1a8: /* push gs */
5567 gen_op_movl_T0_seg((b >> 3) & 7);
5568 gen_push_T0(s);
5569 break;
5570 case 0x07: /* pop es */
5571 case 0x17: /* pop ss */
5572 case 0x1f: /* pop ds */
14ce26e7
FB
5573 if (CODE64(s))
5574 goto illegal_op;
2c0262af
FB
5575 reg = b >> 3;
5576 gen_pop_T0(s);
5577 gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
5578 gen_pop_update(s);
5579 if (reg == R_SS) {
a2cc3b24
FB
5580 /* if reg == SS, inhibit interrupts/trace. */
5581 /* If several instructions disable interrupts, only the
5582 _first_ does it */
5583 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
f0967a1a 5584 gen_helper_set_inhibit_irq(cpu_env);
2c0262af
FB
5585 s->tf = 0;
5586 }
5587 if (s->is_jmp) {
14ce26e7 5588 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
5589 gen_eob(s);
5590 }
5591 break;
5592 case 0x1a1: /* pop fs */
5593 case 0x1a9: /* pop gs */
5594 gen_pop_T0(s);
5595 gen_movl_seg_T0(s, (b >> 3) & 7, pc_start - s->cs_base);
5596 gen_pop_update(s);
5597 if (s->is_jmp) {
14ce26e7 5598 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
5599 gen_eob(s);
5600 }
5601 break;
5602
5603 /**************************/
5604 /* mov */
5605 case 0x88:
5606 case 0x89: /* mov Gv, Ev */
5607 if ((b & 1) == 0)
4ba9938c 5608 ot = MO_8;
2c0262af 5609 else
4ba9938c 5610 ot = dflag + MO_16;
0af10c86 5611 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 5612 reg = ((modrm >> 3) & 7) | rex_r;
3b46e624 5613
2c0262af 5614 /* generate a generic store */
0af10c86 5615 gen_ldst_modrm(env, s, modrm, ot, reg, 1);
2c0262af
FB
5616 break;
5617 case 0xc6:
5618 case 0xc7: /* mov Ev, Iv */
5619 if ((b & 1) == 0)
4ba9938c 5620 ot = MO_8;
2c0262af 5621 else
4ba9938c 5622 ot = dflag + MO_16;
0af10c86 5623 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 5624 mod = (modrm >> 6) & 3;
14ce26e7
FB
5625 if (mod != 3) {
5626 s->rip_offset = insn_const_size(ot);
0af10c86 5627 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
14ce26e7 5628 }
0af10c86 5629 val = insn_get(env, s, ot);
2c0262af
FB
5630 gen_op_movl_T0_im(val);
5631 if (mod != 3)
323d1876 5632 gen_op_st_T0_A0(s, ot);
2c0262af 5633 else
57fec1fe 5634 gen_op_mov_reg_T0(ot, (modrm & 7) | REX_B(s));
2c0262af
FB
5635 break;
5636 case 0x8a:
5637 case 0x8b: /* mov Ev, Gv */
5638 if ((b & 1) == 0)
4ba9938c 5639 ot = MO_8;
2c0262af 5640 else
4ba9938c 5641 ot = MO_16 + dflag;
0af10c86 5642 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 5643 reg = ((modrm >> 3) & 7) | rex_r;
3b46e624 5644
0af10c86 5645 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
57fec1fe 5646 gen_op_mov_reg_T0(ot, reg);
2c0262af
FB
5647 break;
5648 case 0x8e: /* mov seg, Gv */
0af10c86 5649 modrm = cpu_ldub_code(env, s->pc++);
2c0262af
FB
5650 reg = (modrm >> 3) & 7;
5651 if (reg >= 6 || reg == R_CS)
5652 goto illegal_op;
4ba9938c 5653 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
2c0262af
FB
5654 gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
5655 if (reg == R_SS) {
5656 /* if reg == SS, inhibit interrupts/trace */
a2cc3b24
FB
5657 /* If several instructions disable interrupts, only the
5658 _first_ does it */
5659 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
f0967a1a 5660 gen_helper_set_inhibit_irq(cpu_env);
2c0262af
FB
5661 s->tf = 0;
5662 }
5663 if (s->is_jmp) {
14ce26e7 5664 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
5665 gen_eob(s);
5666 }
5667 break;
5668 case 0x8c: /* mov Gv, seg */
0af10c86 5669 modrm = cpu_ldub_code(env, s->pc++);
2c0262af
FB
5670 reg = (modrm >> 3) & 7;
5671 mod = (modrm >> 6) & 3;
5672 if (reg >= 6)
5673 goto illegal_op;
5674 gen_op_movl_T0_seg(reg);
14ce26e7 5675 if (mod == 3)
4ba9938c 5676 ot = MO_16 + dflag;
14ce26e7 5677 else
4ba9938c 5678 ot = MO_16;
0af10c86 5679 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
2c0262af
FB
5680 break;
5681
5682 case 0x1b6: /* movzbS Gv, Eb */
5683 case 0x1b7: /* movzwS Gv, Eb */
5684 case 0x1be: /* movsbS Gv, Eb */
5685 case 0x1bf: /* movswS Gv, Eb */
5686 {
5687 int d_ot;
5688 /* d_ot is the size of destination */
4ba9938c 5689 d_ot = dflag + MO_16;
2c0262af 5690 /* ot is the size of source */
4ba9938c 5691 ot = (b & 1) + MO_8;
0af10c86 5692 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 5693 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af 5694 mod = (modrm >> 6) & 3;
14ce26e7 5695 rm = (modrm & 7) | REX_B(s);
3b46e624 5696
2c0262af 5697 if (mod == 3) {
57fec1fe 5698 gen_op_mov_TN_reg(ot, 0, rm);
2c0262af 5699 switch(ot | (b & 8)) {
4ba9938c 5700 case MO_8:
e108dd01 5701 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
2c0262af 5702 break;
4ba9938c 5703 case MO_8 | 8:
e108dd01 5704 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
2c0262af 5705 break;
4ba9938c 5706 case MO_16:
e108dd01 5707 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
2c0262af
FB
5708 break;
5709 default:
4ba9938c 5710 case MO_16 | 8:
e108dd01 5711 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
2c0262af
FB
5712 break;
5713 }
57fec1fe 5714 gen_op_mov_reg_T0(d_ot, reg);
2c0262af 5715 } else {
0af10c86 5716 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
2c0262af 5717 if (b & 8) {
323d1876 5718 gen_op_lds_T0_A0(s, ot);
2c0262af 5719 } else {
323d1876 5720 gen_op_ldu_T0_A0(s, ot);
2c0262af 5721 }
57fec1fe 5722 gen_op_mov_reg_T0(d_ot, reg);
2c0262af
FB
5723 }
5724 }
5725 break;
5726
5727 case 0x8d: /* lea */
4ba9938c 5728 ot = dflag + MO_16;
0af10c86 5729 modrm = cpu_ldub_code(env, s->pc++);
3a1d9b8b
FB
5730 mod = (modrm >> 6) & 3;
5731 if (mod == 3)
5732 goto illegal_op;
14ce26e7 5733 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af
FB
5734 /* we must ensure that no segment is added */
5735 s->override = -1;
5736 val = s->addseg;
5737 s->addseg = 0;
0af10c86 5738 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
2c0262af 5739 s->addseg = val;
4ba9938c 5740 gen_op_mov_reg_A0(ot - MO_16, reg);
2c0262af 5741 break;
3b46e624 5742
2c0262af
FB
5743 case 0xa0: /* mov EAX, Ov */
5744 case 0xa1:
5745 case 0xa2: /* mov Ov, EAX */
5746 case 0xa3:
2c0262af 5747 {
14ce26e7
FB
5748 target_ulong offset_addr;
5749
5750 if ((b & 1) == 0)
4ba9938c 5751 ot = MO_8;
14ce26e7 5752 else
4ba9938c 5753 ot = dflag + MO_16;
14ce26e7 5754#ifdef TARGET_X86_64
8f091a59 5755 if (s->aflag == 2) {
0af10c86 5756 offset_addr = cpu_ldq_code(env, s->pc);
14ce26e7 5757 s->pc += 8;
57fec1fe 5758 gen_op_movq_A0_im(offset_addr);
5fafdf24 5759 } else
14ce26e7
FB
5760#endif
5761 {
5762 if (s->aflag) {
4ba9938c 5763 offset_addr = insn_get(env, s, MO_32);
14ce26e7 5764 } else {
4ba9938c 5765 offset_addr = insn_get(env, s, MO_16);
14ce26e7
FB
5766 }
5767 gen_op_movl_A0_im(offset_addr);
5768 }
664e0f19 5769 gen_add_A0_ds_seg(s);
14ce26e7 5770 if ((b & 2) == 0) {
323d1876 5771 gen_op_ld_T0_A0(s, ot);
57fec1fe 5772 gen_op_mov_reg_T0(ot, R_EAX);
14ce26e7 5773 } else {
57fec1fe 5774 gen_op_mov_TN_reg(ot, 0, R_EAX);
323d1876 5775 gen_op_st_T0_A0(s, ot);
2c0262af
FB
5776 }
5777 }
2c0262af
FB
5778 break;
5779 case 0xd7: /* xlat */
14ce26e7 5780#ifdef TARGET_X86_64
8f091a59 5781 if (s->aflag == 2) {
57fec1fe 5782 gen_op_movq_A0_reg(R_EBX);
4ba9938c 5783 gen_op_mov_TN_reg(MO_64, 0, R_EAX);
bbf662ee
FB
5784 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xff);
5785 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]);
5fafdf24 5786 } else
14ce26e7
FB
5787#endif
5788 {
57fec1fe 5789 gen_op_movl_A0_reg(R_EBX);
4ba9938c 5790 gen_op_mov_TN_reg(MO_32, 0, R_EAX);
bbf662ee
FB
5791 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xff);
5792 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]);
14ce26e7
FB
5793 if (s->aflag == 0)
5794 gen_op_andl_A0_ffff();
bbf662ee
FB
5795 else
5796 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
14ce26e7 5797 }
664e0f19 5798 gen_add_A0_ds_seg(s);
4ba9938c
RH
5799 gen_op_ldu_T0_A0(s, MO_8);
5800 gen_op_mov_reg_T0(MO_8, R_EAX);
2c0262af
FB
5801 break;
5802 case 0xb0 ... 0xb7: /* mov R, Ib */
4ba9938c 5803 val = insn_get(env, s, MO_8);
2c0262af 5804 gen_op_movl_T0_im(val);
4ba9938c 5805 gen_op_mov_reg_T0(MO_8, (b & 7) | REX_B(s));
2c0262af
FB
5806 break;
5807 case 0xb8 ... 0xbf: /* mov R, Iv */
14ce26e7
FB
5808#ifdef TARGET_X86_64
5809 if (dflag == 2) {
5810 uint64_t tmp;
5811 /* 64 bit case */
0af10c86 5812 tmp = cpu_ldq_code(env, s->pc);
14ce26e7
FB
5813 s->pc += 8;
5814 reg = (b & 7) | REX_B(s);
5815 gen_movtl_T0_im(tmp);
4ba9938c 5816 gen_op_mov_reg_T0(MO_64, reg);
5fafdf24 5817 } else
14ce26e7
FB
5818#endif
5819 {
4ba9938c 5820 ot = dflag ? MO_32 : MO_16;
0af10c86 5821 val = insn_get(env, s, ot);
14ce26e7
FB
5822 reg = (b & 7) | REX_B(s);
5823 gen_op_movl_T0_im(val);
57fec1fe 5824 gen_op_mov_reg_T0(ot, reg);
14ce26e7 5825 }
2c0262af
FB
5826 break;
5827
5828 case 0x91 ... 0x97: /* xchg R, EAX */
7418027e 5829 do_xchg_reg_eax:
4ba9938c 5830 ot = dflag + MO_16;
14ce26e7 5831 reg = (b & 7) | REX_B(s);
2c0262af
FB
5832 rm = R_EAX;
5833 goto do_xchg_reg;
5834 case 0x86:
5835 case 0x87: /* xchg Ev, Gv */
5836 if ((b & 1) == 0)
4ba9938c 5837 ot = MO_8;
2c0262af 5838 else
4ba9938c 5839 ot = dflag + MO_16;
0af10c86 5840 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 5841 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af
FB
5842 mod = (modrm >> 6) & 3;
5843 if (mod == 3) {
14ce26e7 5844 rm = (modrm & 7) | REX_B(s);
2c0262af 5845 do_xchg_reg:
57fec1fe
FB
5846 gen_op_mov_TN_reg(ot, 0, reg);
5847 gen_op_mov_TN_reg(ot, 1, rm);
5848 gen_op_mov_reg_T0(ot, rm);
5849 gen_op_mov_reg_T1(ot, reg);
2c0262af 5850 } else {
0af10c86 5851 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
57fec1fe 5852 gen_op_mov_TN_reg(ot, 0, reg);
2c0262af
FB
5853 /* for xchg, lock is implicit */
5854 if (!(prefixes & PREFIX_LOCK))
a7812ae4 5855 gen_helper_lock();
323d1876
RH
5856 gen_op_ld_T1_A0(s, ot);
5857 gen_op_st_T0_A0(s, ot);
2c0262af 5858 if (!(prefixes & PREFIX_LOCK))
a7812ae4 5859 gen_helper_unlock();
57fec1fe 5860 gen_op_mov_reg_T1(ot, reg);
2c0262af
FB
5861 }
5862 break;
5863 case 0xc4: /* les Gv */
701ed211 5864 /* In CODE64 this is VEX3; see above. */
2c0262af
FB
5865 op = R_ES;
5866 goto do_lxx;
5867 case 0xc5: /* lds Gv */
701ed211 5868 /* In CODE64 this is VEX2; see above. */
2c0262af
FB
5869 op = R_DS;
5870 goto do_lxx;
5871 case 0x1b2: /* lss Gv */
5872 op = R_SS;
5873 goto do_lxx;
5874 case 0x1b4: /* lfs Gv */
5875 op = R_FS;
5876 goto do_lxx;
5877 case 0x1b5: /* lgs Gv */
5878 op = R_GS;
5879 do_lxx:
4ba9938c 5880 ot = dflag ? MO_32 : MO_16;
0af10c86 5881 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 5882 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af
FB
5883 mod = (modrm >> 6) & 3;
5884 if (mod == 3)
5885 goto illegal_op;
0af10c86 5886 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
323d1876 5887 gen_op_ld_T1_A0(s, ot);
4ba9938c 5888 gen_add_A0_im(s, 1 << (ot - MO_16 + 1));
2c0262af 5889 /* load the segment first to handle exceptions properly */
4ba9938c 5890 gen_op_ldu_T0_A0(s, MO_16);
2c0262af
FB
5891 gen_movl_seg_T0(s, op, pc_start - s->cs_base);
5892 /* then put the data */
57fec1fe 5893 gen_op_mov_reg_T1(ot, reg);
2c0262af 5894 if (s->is_jmp) {
14ce26e7 5895 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
5896 gen_eob(s);
5897 }
5898 break;
3b46e624 5899
2c0262af
FB
5900 /************************/
5901 /* shifts */
5902 case 0xc0:
5903 case 0xc1:
5904 /* shift Ev,Ib */
5905 shift = 2;
5906 grp2:
5907 {
5908 if ((b & 1) == 0)
4ba9938c 5909 ot = MO_8;
2c0262af 5910 else
4ba9938c 5911 ot = dflag + MO_16;
3b46e624 5912
0af10c86 5913 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 5914 mod = (modrm >> 6) & 3;
2c0262af 5915 op = (modrm >> 3) & 7;
3b46e624 5916
2c0262af 5917 if (mod != 3) {
14ce26e7
FB
5918 if (shift == 2) {
5919 s->rip_offset = 1;
5920 }
0af10c86 5921 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
2c0262af
FB
5922 opreg = OR_TMP0;
5923 } else {
14ce26e7 5924 opreg = (modrm & 7) | REX_B(s);
2c0262af
FB
5925 }
5926
5927 /* simpler op */
5928 if (shift == 0) {
5929 gen_shift(s, op, ot, opreg, OR_ECX);
5930 } else {
5931 if (shift == 2) {
0af10c86 5932 shift = cpu_ldub_code(env, s->pc++);
2c0262af
FB
5933 }
5934 gen_shifti(s, op, ot, opreg, shift);
5935 }
5936 }
5937 break;
5938 case 0xd0:
5939 case 0xd1:
5940 /* shift Ev,1 */
5941 shift = 1;
5942 goto grp2;
5943 case 0xd2:
5944 case 0xd3:
5945 /* shift Ev,cl */
5946 shift = 0;
5947 goto grp2;
5948
5949 case 0x1a4: /* shld imm */
5950 op = 0;
5951 shift = 1;
5952 goto do_shiftd;
5953 case 0x1a5: /* shld cl */
5954 op = 0;
5955 shift = 0;
5956 goto do_shiftd;
5957 case 0x1ac: /* shrd imm */
5958 op = 1;
5959 shift = 1;
5960 goto do_shiftd;
5961 case 0x1ad: /* shrd cl */
5962 op = 1;
5963 shift = 0;
5964 do_shiftd:
4ba9938c 5965 ot = dflag + MO_16;
0af10c86 5966 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 5967 mod = (modrm >> 6) & 3;
14ce26e7
FB
5968 rm = (modrm & 7) | REX_B(s);
5969 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af 5970 if (mod != 3) {
0af10c86 5971 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
b6abf97d 5972 opreg = OR_TMP0;
2c0262af 5973 } else {
b6abf97d 5974 opreg = rm;
2c0262af 5975 }
57fec1fe 5976 gen_op_mov_TN_reg(ot, 1, reg);
3b46e624 5977
2c0262af 5978 if (shift) {
3b9d3cf1
PB
5979 TCGv imm = tcg_const_tl(cpu_ldub_code(env, s->pc++));
5980 gen_shiftd_rm_T1(s, ot, opreg, op, imm);
5981 tcg_temp_free(imm);
2c0262af 5982 } else {
3b9d3cf1 5983 gen_shiftd_rm_T1(s, ot, opreg, op, cpu_regs[R_ECX]);
2c0262af
FB
5984 }
5985 break;
5986
5987 /************************/
5988 /* floats */
5fafdf24 5989 case 0xd8 ... 0xdf:
7eee2a50
FB
5990 if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
5991 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
5992 /* XXX: what to do if illegal op ? */
5993 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
5994 break;
5995 }
0af10c86 5996 modrm = cpu_ldub_code(env, s->pc++);
2c0262af
FB
5997 mod = (modrm >> 6) & 3;
5998 rm = modrm & 7;
5999 op = ((b & 7) << 3) | ((modrm >> 3) & 7);
2c0262af
FB
6000 if (mod != 3) {
6001 /* memory op */
0af10c86 6002 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
2c0262af
FB
6003 switch(op) {
6004 case 0x00 ... 0x07: /* fxxxs */
6005 case 0x10 ... 0x17: /* fixxxl */
6006 case 0x20 ... 0x27: /* fxxxl */
6007 case 0x30 ... 0x37: /* fixxx */
6008 {
6009 int op1;
6010 op1 = op & 7;
6011
6012 switch(op >> 4) {
6013 case 0:
4ba9938c 6014 gen_op_ld_T0_A0(s, MO_32);
b6abf97d 6015 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
d3eb5eae 6016 gen_helper_flds_FT0(cpu_env, cpu_tmp2_i32);
2c0262af
FB
6017 break;
6018 case 1:
4ba9938c 6019 gen_op_ld_T0_A0(s, MO_32);
b6abf97d 6020 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
d3eb5eae 6021 gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
2c0262af
FB
6022 break;
6023 case 2:
3c5f4116
RH
6024 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
6025 s->mem_index, MO_LEQ);
d3eb5eae 6026 gen_helper_fldl_FT0(cpu_env, cpu_tmp1_i64);
2c0262af
FB
6027 break;
6028 case 3:
6029 default:
4ba9938c 6030 gen_op_lds_T0_A0(s, MO_16);
b6abf97d 6031 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
d3eb5eae 6032 gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
2c0262af
FB
6033 break;
6034 }
3b46e624 6035
a7812ae4 6036 gen_helper_fp_arith_ST0_FT0(op1);
2c0262af
FB
6037 if (op1 == 3) {
6038 /* fcomp needs pop */
d3eb5eae 6039 gen_helper_fpop(cpu_env);
2c0262af
FB
6040 }
6041 }
6042 break;
6043 case 0x08: /* flds */
6044 case 0x0a: /* fsts */
6045 case 0x0b: /* fstps */
465e9838
FB
6046 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
6047 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
6048 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
2c0262af
FB
6049 switch(op & 7) {
6050 case 0:
6051 switch(op >> 4) {
6052 case 0:
4ba9938c 6053 gen_op_ld_T0_A0(s, MO_32);
b6abf97d 6054 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
d3eb5eae 6055 gen_helper_flds_ST0(cpu_env, cpu_tmp2_i32);
2c0262af
FB
6056 break;
6057 case 1:
4ba9938c 6058 gen_op_ld_T0_A0(s, MO_32);
b6abf97d 6059 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
d3eb5eae 6060 gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
2c0262af
FB
6061 break;
6062 case 2:
3c5f4116
RH
6063 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
6064 s->mem_index, MO_LEQ);
d3eb5eae 6065 gen_helper_fldl_ST0(cpu_env, cpu_tmp1_i64);
2c0262af
FB
6066 break;
6067 case 3:
6068 default:
4ba9938c 6069 gen_op_lds_T0_A0(s, MO_16);
b6abf97d 6070 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
d3eb5eae 6071 gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
2c0262af
FB
6072 break;
6073 }
6074 break;
465e9838 6075 case 1:
19e6c4b8 6076 /* XXX: the corresponding CPUID bit must be tested ! */
465e9838
FB
6077 switch(op >> 4) {
6078 case 1:
d3eb5eae 6079 gen_helper_fisttl_ST0(cpu_tmp2_i32, cpu_env);
b6abf97d 6080 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
4ba9938c 6081 gen_op_st_T0_A0(s, MO_32);
465e9838
FB
6082 break;
6083 case 2:
d3eb5eae 6084 gen_helper_fisttll_ST0(cpu_tmp1_i64, cpu_env);
3523e4bd
RH
6085 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
6086 s->mem_index, MO_LEQ);
465e9838
FB
6087 break;
6088 case 3:
6089 default:
d3eb5eae 6090 gen_helper_fistt_ST0(cpu_tmp2_i32, cpu_env);
b6abf97d 6091 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
4ba9938c 6092 gen_op_st_T0_A0(s, MO_16);
19e6c4b8 6093 break;
465e9838 6094 }
d3eb5eae 6095 gen_helper_fpop(cpu_env);
465e9838 6096 break;
2c0262af
FB
6097 default:
6098 switch(op >> 4) {
6099 case 0:
d3eb5eae 6100 gen_helper_fsts_ST0(cpu_tmp2_i32, cpu_env);
b6abf97d 6101 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
4ba9938c 6102 gen_op_st_T0_A0(s, MO_32);
2c0262af
FB
6103 break;
6104 case 1:
d3eb5eae 6105 gen_helper_fistl_ST0(cpu_tmp2_i32, cpu_env);
b6abf97d 6106 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
4ba9938c 6107 gen_op_st_T0_A0(s, MO_32);
2c0262af
FB
6108 break;
6109 case 2:
d3eb5eae 6110 gen_helper_fstl_ST0(cpu_tmp1_i64, cpu_env);
3523e4bd
RH
6111 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
6112 s->mem_index, MO_LEQ);
2c0262af
FB
6113 break;
6114 case 3:
6115 default:
d3eb5eae 6116 gen_helper_fist_ST0(cpu_tmp2_i32, cpu_env);
b6abf97d 6117 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
4ba9938c 6118 gen_op_st_T0_A0(s, MO_16);
2c0262af
FB
6119 break;
6120 }
6121 if ((op & 7) == 3)
d3eb5eae 6122 gen_helper_fpop(cpu_env);
2c0262af
FB
6123 break;
6124 }
6125 break;
6126 case 0x0c: /* fldenv mem */
773cdfcc 6127 gen_update_cc_op(s);
19e6c4b8 6128 gen_jmp_im(pc_start - s->cs_base);
d3eb5eae 6129 gen_helper_fldenv(cpu_env, cpu_A0, tcg_const_i32(s->dflag));
2c0262af
FB
6130 break;
6131 case 0x0d: /* fldcw mem */
4ba9938c 6132 gen_op_ld_T0_A0(s, MO_16);
b6abf97d 6133 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
d3eb5eae 6134 gen_helper_fldcw(cpu_env, cpu_tmp2_i32);
2c0262af
FB
6135 break;
6136 case 0x0e: /* fnstenv mem */
773cdfcc 6137 gen_update_cc_op(s);
19e6c4b8 6138 gen_jmp_im(pc_start - s->cs_base);
d3eb5eae 6139 gen_helper_fstenv(cpu_env, cpu_A0, tcg_const_i32(s->dflag));
2c0262af
FB
6140 break;
6141 case 0x0f: /* fnstcw mem */
d3eb5eae 6142 gen_helper_fnstcw(cpu_tmp2_i32, cpu_env);
b6abf97d 6143 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
4ba9938c 6144 gen_op_st_T0_A0(s, MO_16);
2c0262af
FB
6145 break;
6146 case 0x1d: /* fldt mem */
773cdfcc 6147 gen_update_cc_op(s);
19e6c4b8 6148 gen_jmp_im(pc_start - s->cs_base);
d3eb5eae 6149 gen_helper_fldt_ST0(cpu_env, cpu_A0);
2c0262af
FB
6150 break;
6151 case 0x1f: /* fstpt mem */
773cdfcc 6152 gen_update_cc_op(s);
19e6c4b8 6153 gen_jmp_im(pc_start - s->cs_base);
d3eb5eae
BS
6154 gen_helper_fstt_ST0(cpu_env, cpu_A0);
6155 gen_helper_fpop(cpu_env);
2c0262af
FB
6156 break;
6157 case 0x2c: /* frstor mem */
773cdfcc 6158 gen_update_cc_op(s);
19e6c4b8 6159 gen_jmp_im(pc_start - s->cs_base);
d3eb5eae 6160 gen_helper_frstor(cpu_env, cpu_A0, tcg_const_i32(s->dflag));
2c0262af
FB
6161 break;
6162 case 0x2e: /* fnsave mem */
773cdfcc 6163 gen_update_cc_op(s);
19e6c4b8 6164 gen_jmp_im(pc_start - s->cs_base);
d3eb5eae 6165 gen_helper_fsave(cpu_env, cpu_A0, tcg_const_i32(s->dflag));
2c0262af
FB
6166 break;
6167 case 0x2f: /* fnstsw mem */
d3eb5eae 6168 gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
b6abf97d 6169 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
4ba9938c 6170 gen_op_st_T0_A0(s, MO_16);
2c0262af
FB
6171 break;
6172 case 0x3c: /* fbld */
773cdfcc 6173 gen_update_cc_op(s);
19e6c4b8 6174 gen_jmp_im(pc_start - s->cs_base);
d3eb5eae 6175 gen_helper_fbld_ST0(cpu_env, cpu_A0);
2c0262af
FB
6176 break;
6177 case 0x3e: /* fbstp */
773cdfcc 6178 gen_update_cc_op(s);
19e6c4b8 6179 gen_jmp_im(pc_start - s->cs_base);
d3eb5eae
BS
6180 gen_helper_fbst_ST0(cpu_env, cpu_A0);
6181 gen_helper_fpop(cpu_env);
2c0262af
FB
6182 break;
6183 case 0x3d: /* fildll */
3c5f4116 6184 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
d3eb5eae 6185 gen_helper_fildll_ST0(cpu_env, cpu_tmp1_i64);
2c0262af
FB
6186 break;
6187 case 0x3f: /* fistpll */
d3eb5eae 6188 gen_helper_fistll_ST0(cpu_tmp1_i64, cpu_env);
3523e4bd 6189 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
d3eb5eae 6190 gen_helper_fpop(cpu_env);
2c0262af
FB
6191 break;
6192 default:
6193 goto illegal_op;
6194 }
6195 } else {
6196 /* register float ops */
6197 opreg = rm;
6198
6199 switch(op) {
6200 case 0x08: /* fld sti */
d3eb5eae
BS
6201 gen_helper_fpush(cpu_env);
6202 gen_helper_fmov_ST0_STN(cpu_env,
6203 tcg_const_i32((opreg + 1) & 7));
2c0262af
FB
6204 break;
6205 case 0x09: /* fxchg sti */
c169c906
FB
6206 case 0x29: /* fxchg4 sti, undocumented op */
6207 case 0x39: /* fxchg7 sti, undocumented op */
d3eb5eae 6208 gen_helper_fxchg_ST0_STN(cpu_env, tcg_const_i32(opreg));
2c0262af
FB
6209 break;
6210 case 0x0a: /* grp d9/2 */
6211 switch(rm) {
6212 case 0: /* fnop */
023fe10d 6213 /* check exceptions (FreeBSD FPU probe) */
773cdfcc 6214 gen_update_cc_op(s);
14ce26e7 6215 gen_jmp_im(pc_start - s->cs_base);
d3eb5eae 6216 gen_helper_fwait(cpu_env);
2c0262af
FB
6217 break;
6218 default:
6219 goto illegal_op;
6220 }
6221 break;
6222 case 0x0c: /* grp d9/4 */
6223 switch(rm) {
6224 case 0: /* fchs */
d3eb5eae 6225 gen_helper_fchs_ST0(cpu_env);
2c0262af
FB
6226 break;
6227 case 1: /* fabs */
d3eb5eae 6228 gen_helper_fabs_ST0(cpu_env);
2c0262af
FB
6229 break;
6230 case 4: /* ftst */
d3eb5eae
BS
6231 gen_helper_fldz_FT0(cpu_env);
6232 gen_helper_fcom_ST0_FT0(cpu_env);
2c0262af
FB
6233 break;
6234 case 5: /* fxam */
d3eb5eae 6235 gen_helper_fxam_ST0(cpu_env);
2c0262af
FB
6236 break;
6237 default:
6238 goto illegal_op;
6239 }
6240 break;
6241 case 0x0d: /* grp d9/5 */
6242 {
6243 switch(rm) {
6244 case 0:
d3eb5eae
BS
6245 gen_helper_fpush(cpu_env);
6246 gen_helper_fld1_ST0(cpu_env);
2c0262af
FB
6247 break;
6248 case 1:
d3eb5eae
BS
6249 gen_helper_fpush(cpu_env);
6250 gen_helper_fldl2t_ST0(cpu_env);
2c0262af
FB
6251 break;
6252 case 2:
d3eb5eae
BS
6253 gen_helper_fpush(cpu_env);
6254 gen_helper_fldl2e_ST0(cpu_env);
2c0262af
FB
6255 break;
6256 case 3:
d3eb5eae
BS
6257 gen_helper_fpush(cpu_env);
6258 gen_helper_fldpi_ST0(cpu_env);
2c0262af
FB
6259 break;
6260 case 4:
d3eb5eae
BS
6261 gen_helper_fpush(cpu_env);
6262 gen_helper_fldlg2_ST0(cpu_env);
2c0262af
FB
6263 break;
6264 case 5:
d3eb5eae
BS
6265 gen_helper_fpush(cpu_env);
6266 gen_helper_fldln2_ST0(cpu_env);
2c0262af
FB
6267 break;
6268 case 6:
d3eb5eae
BS
6269 gen_helper_fpush(cpu_env);
6270 gen_helper_fldz_ST0(cpu_env);
2c0262af
FB
6271 break;
6272 default:
6273 goto illegal_op;
6274 }
6275 }
6276 break;
6277 case 0x0e: /* grp d9/6 */
6278 switch(rm) {
6279 case 0: /* f2xm1 */
d3eb5eae 6280 gen_helper_f2xm1(cpu_env);
2c0262af
FB
6281 break;
6282 case 1: /* fyl2x */
d3eb5eae 6283 gen_helper_fyl2x(cpu_env);
2c0262af
FB
6284 break;
6285 case 2: /* fptan */
d3eb5eae 6286 gen_helper_fptan(cpu_env);
2c0262af
FB
6287 break;
6288 case 3: /* fpatan */
d3eb5eae 6289 gen_helper_fpatan(cpu_env);
2c0262af
FB
6290 break;
6291 case 4: /* fxtract */
d3eb5eae 6292 gen_helper_fxtract(cpu_env);
2c0262af
FB
6293 break;
6294 case 5: /* fprem1 */
d3eb5eae 6295 gen_helper_fprem1(cpu_env);
2c0262af
FB
6296 break;
6297 case 6: /* fdecstp */
d3eb5eae 6298 gen_helper_fdecstp(cpu_env);
2c0262af
FB
6299 break;
6300 default:
6301 case 7: /* fincstp */
d3eb5eae 6302 gen_helper_fincstp(cpu_env);
2c0262af
FB
6303 break;
6304 }
6305 break;
6306 case 0x0f: /* grp d9/7 */
6307 switch(rm) {
6308 case 0: /* fprem */
d3eb5eae 6309 gen_helper_fprem(cpu_env);
2c0262af
FB
6310 break;
6311 case 1: /* fyl2xp1 */
d3eb5eae 6312 gen_helper_fyl2xp1(cpu_env);
2c0262af
FB
6313 break;
6314 case 2: /* fsqrt */
d3eb5eae 6315 gen_helper_fsqrt(cpu_env);
2c0262af
FB
6316 break;
6317 case 3: /* fsincos */
d3eb5eae 6318 gen_helper_fsincos(cpu_env);
2c0262af
FB
6319 break;
6320 case 5: /* fscale */
d3eb5eae 6321 gen_helper_fscale(cpu_env);
2c0262af
FB
6322 break;
6323 case 4: /* frndint */
d3eb5eae 6324 gen_helper_frndint(cpu_env);
2c0262af
FB
6325 break;
6326 case 6: /* fsin */
d3eb5eae 6327 gen_helper_fsin(cpu_env);
2c0262af
FB
6328 break;
6329 default:
6330 case 7: /* fcos */
d3eb5eae 6331 gen_helper_fcos(cpu_env);
2c0262af
FB
6332 break;
6333 }
6334 break;
6335 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
6336 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
6337 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
6338 {
6339 int op1;
3b46e624 6340
2c0262af
FB
6341 op1 = op & 7;
6342 if (op >= 0x20) {
a7812ae4 6343 gen_helper_fp_arith_STN_ST0(op1, opreg);
2c0262af 6344 if (op >= 0x30)
d3eb5eae 6345 gen_helper_fpop(cpu_env);
2c0262af 6346 } else {
d3eb5eae 6347 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
a7812ae4 6348 gen_helper_fp_arith_ST0_FT0(op1);
2c0262af
FB
6349 }
6350 }
6351 break;
6352 case 0x02: /* fcom */
c169c906 6353 case 0x22: /* fcom2, undocumented op */
d3eb5eae
BS
6354 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6355 gen_helper_fcom_ST0_FT0(cpu_env);
2c0262af
FB
6356 break;
6357 case 0x03: /* fcomp */
c169c906
FB
6358 case 0x23: /* fcomp3, undocumented op */
6359 case 0x32: /* fcomp5, undocumented op */
d3eb5eae
BS
6360 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6361 gen_helper_fcom_ST0_FT0(cpu_env);
6362 gen_helper_fpop(cpu_env);
2c0262af
FB
6363 break;
6364 case 0x15: /* da/5 */
6365 switch(rm) {
6366 case 1: /* fucompp */
d3eb5eae
BS
6367 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
6368 gen_helper_fucom_ST0_FT0(cpu_env);
6369 gen_helper_fpop(cpu_env);
6370 gen_helper_fpop(cpu_env);
2c0262af
FB
6371 break;
6372 default:
6373 goto illegal_op;
6374 }
6375 break;
6376 case 0x1c:
6377 switch(rm) {
6378 case 0: /* feni (287 only, just do nop here) */
6379 break;
6380 case 1: /* fdisi (287 only, just do nop here) */
6381 break;
6382 case 2: /* fclex */
d3eb5eae 6383 gen_helper_fclex(cpu_env);
2c0262af
FB
6384 break;
6385 case 3: /* fninit */
d3eb5eae 6386 gen_helper_fninit(cpu_env);
2c0262af
FB
6387 break;
6388 case 4: /* fsetpm (287 only, just do nop here) */
6389 break;
6390 default:
6391 goto illegal_op;
6392 }
6393 break;
6394 case 0x1d: /* fucomi */
bff93281
PM
6395 if (!(s->cpuid_features & CPUID_CMOV)) {
6396 goto illegal_op;
6397 }
773cdfcc 6398 gen_update_cc_op(s);
d3eb5eae
BS
6399 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6400 gen_helper_fucomi_ST0_FT0(cpu_env);
3ca51d07 6401 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
6402 break;
6403 case 0x1e: /* fcomi */
bff93281
PM
6404 if (!(s->cpuid_features & CPUID_CMOV)) {
6405 goto illegal_op;
6406 }
773cdfcc 6407 gen_update_cc_op(s);
d3eb5eae
BS
6408 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6409 gen_helper_fcomi_ST0_FT0(cpu_env);
3ca51d07 6410 set_cc_op(s, CC_OP_EFLAGS);
2c0262af 6411 break;
658c8bda 6412 case 0x28: /* ffree sti */
d3eb5eae 6413 gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
5fafdf24 6414 break;
2c0262af 6415 case 0x2a: /* fst sti */
d3eb5eae 6416 gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
2c0262af
FB
6417 break;
6418 case 0x2b: /* fstp sti */
c169c906
FB
6419 case 0x0b: /* fstp1 sti, undocumented op */
6420 case 0x3a: /* fstp8 sti, undocumented op */
6421 case 0x3b: /* fstp9 sti, undocumented op */
d3eb5eae
BS
6422 gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
6423 gen_helper_fpop(cpu_env);
2c0262af
FB
6424 break;
6425 case 0x2c: /* fucom st(i) */
d3eb5eae
BS
6426 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6427 gen_helper_fucom_ST0_FT0(cpu_env);
2c0262af
FB
6428 break;
6429 case 0x2d: /* fucomp st(i) */
d3eb5eae
BS
6430 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6431 gen_helper_fucom_ST0_FT0(cpu_env);
6432 gen_helper_fpop(cpu_env);
2c0262af
FB
6433 break;
6434 case 0x33: /* de/3 */
6435 switch(rm) {
6436 case 1: /* fcompp */
d3eb5eae
BS
6437 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
6438 gen_helper_fcom_ST0_FT0(cpu_env);
6439 gen_helper_fpop(cpu_env);
6440 gen_helper_fpop(cpu_env);
2c0262af
FB
6441 break;
6442 default:
6443 goto illegal_op;
6444 }
6445 break;
c169c906 6446 case 0x38: /* ffreep sti, undocumented op */
d3eb5eae
BS
6447 gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
6448 gen_helper_fpop(cpu_env);
c169c906 6449 break;
2c0262af
FB
6450 case 0x3c: /* df/4 */
6451 switch(rm) {
6452 case 0:
d3eb5eae 6453 gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
b6abf97d 6454 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
4ba9938c 6455 gen_op_mov_reg_T0(MO_16, R_EAX);
2c0262af
FB
6456 break;
6457 default:
6458 goto illegal_op;
6459 }
6460 break;
6461 case 0x3d: /* fucomip */
bff93281
PM
6462 if (!(s->cpuid_features & CPUID_CMOV)) {
6463 goto illegal_op;
6464 }
773cdfcc 6465 gen_update_cc_op(s);
d3eb5eae
BS
6466 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6467 gen_helper_fucomi_ST0_FT0(cpu_env);
6468 gen_helper_fpop(cpu_env);
3ca51d07 6469 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
6470 break;
6471 case 0x3e: /* fcomip */
bff93281
PM
6472 if (!(s->cpuid_features & CPUID_CMOV)) {
6473 goto illegal_op;
6474 }
773cdfcc 6475 gen_update_cc_op(s);
d3eb5eae
BS
6476 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6477 gen_helper_fcomi_ST0_FT0(cpu_env);
6478 gen_helper_fpop(cpu_env);
3ca51d07 6479 set_cc_op(s, CC_OP_EFLAGS);
2c0262af 6480 break;
a2cc3b24
FB
6481 case 0x10 ... 0x13: /* fcmovxx */
6482 case 0x18 ... 0x1b:
6483 {
19e6c4b8 6484 int op1, l1;
d70040bc 6485 static const uint8_t fcmov_cc[8] = {
a2cc3b24
FB
6486 (JCC_B << 1),
6487 (JCC_Z << 1),
6488 (JCC_BE << 1),
6489 (JCC_P << 1),
6490 };
bff93281
PM
6491
6492 if (!(s->cpuid_features & CPUID_CMOV)) {
6493 goto illegal_op;
6494 }
1e4840bf 6495 op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
19e6c4b8 6496 l1 = gen_new_label();
dc259201 6497 gen_jcc1_noeob(s, op1, l1);
d3eb5eae 6498 gen_helper_fmov_ST0_STN(cpu_env, tcg_const_i32(opreg));
19e6c4b8 6499 gen_set_label(l1);
a2cc3b24
FB
6500 }
6501 break;
2c0262af
FB
6502 default:
6503 goto illegal_op;
6504 }
6505 }
6506 break;
6507 /************************/
6508 /* string ops */
6509
6510 case 0xa4: /* movsS */
6511 case 0xa5:
6512 if ((b & 1) == 0)
4ba9938c 6513 ot = MO_8;
2c0262af 6514 else
4ba9938c 6515 ot = dflag + MO_16;
2c0262af
FB
6516
6517 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6518 gen_repz_movs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6519 } else {
6520 gen_movs(s, ot);
6521 }
6522 break;
3b46e624 6523
2c0262af
FB
6524 case 0xaa: /* stosS */
6525 case 0xab:
6526 if ((b & 1) == 0)
4ba9938c 6527 ot = MO_8;
2c0262af 6528 else
4ba9938c 6529 ot = dflag + MO_16;
2c0262af
FB
6530
6531 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6532 gen_repz_stos(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6533 } else {
6534 gen_stos(s, ot);
6535 }
6536 break;
6537 case 0xac: /* lodsS */
6538 case 0xad:
6539 if ((b & 1) == 0)
4ba9938c 6540 ot = MO_8;
2c0262af 6541 else
4ba9938c 6542 ot = dflag + MO_16;
2c0262af
FB
6543 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6544 gen_repz_lods(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6545 } else {
6546 gen_lods(s, ot);
6547 }
6548 break;
6549 case 0xae: /* scasS */
6550 case 0xaf:
6551 if ((b & 1) == 0)
4ba9938c 6552 ot = MO_8;
2c0262af 6553 else
4ba9938c 6554 ot = dflag + MO_16;
2c0262af
FB
6555 if (prefixes & PREFIX_REPNZ) {
6556 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6557 } else if (prefixes & PREFIX_REPZ) {
6558 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6559 } else {
6560 gen_scas(s, ot);
2c0262af
FB
6561 }
6562 break;
6563
6564 case 0xa6: /* cmpsS */
6565 case 0xa7:
6566 if ((b & 1) == 0)
4ba9938c 6567 ot = MO_8;
2c0262af 6568 else
4ba9938c 6569 ot = dflag + MO_16;
2c0262af
FB
6570 if (prefixes & PREFIX_REPNZ) {
6571 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6572 } else if (prefixes & PREFIX_REPZ) {
6573 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6574 } else {
6575 gen_cmps(s, ot);
2c0262af
FB
6576 }
6577 break;
6578 case 0x6c: /* insS */
6579 case 0x6d:
f115e911 6580 if ((b & 1) == 0)
4ba9938c 6581 ot = MO_8;
f115e911 6582 else
4ba9938c
RH
6583 ot = dflag ? MO_32 : MO_16;
6584 gen_op_mov_TN_reg(MO_16, 0, R_EDX);
0573fbfc 6585 gen_op_andl_T0_ffff();
b8b6a50b
FB
6586 gen_check_io(s, ot, pc_start - s->cs_base,
6587 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4);
f115e911
FB
6588 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6589 gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
2c0262af 6590 } else {
f115e911 6591 gen_ins(s, ot);
2e70f6ef
PB
6592 if (use_icount) {
6593 gen_jmp(s, s->pc - s->cs_base);
6594 }
2c0262af
FB
6595 }
6596 break;
6597 case 0x6e: /* outsS */
6598 case 0x6f:
f115e911 6599 if ((b & 1) == 0)
4ba9938c 6600 ot = MO_8;
f115e911 6601 else
4ba9938c
RH
6602 ot = dflag ? MO_32 : MO_16;
6603 gen_op_mov_TN_reg(MO_16, 0, R_EDX);
0573fbfc 6604 gen_op_andl_T0_ffff();
b8b6a50b
FB
6605 gen_check_io(s, ot, pc_start - s->cs_base,
6606 svm_is_rep(prefixes) | 4);
f115e911
FB
6607 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6608 gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
2c0262af 6609 } else {
f115e911 6610 gen_outs(s, ot);
2e70f6ef
PB
6611 if (use_icount) {
6612 gen_jmp(s, s->pc - s->cs_base);
6613 }
2c0262af
FB
6614 }
6615 break;
6616
6617 /************************/
6618 /* port I/O */
0573fbfc 6619
2c0262af
FB
6620 case 0xe4:
6621 case 0xe5:
f115e911 6622 if ((b & 1) == 0)
4ba9938c 6623 ot = MO_8;
f115e911 6624 else
4ba9938c 6625 ot = dflag ? MO_32 : MO_16;
0af10c86 6626 val = cpu_ldub_code(env, s->pc++);
f115e911 6627 gen_op_movl_T0_im(val);
b8b6a50b
FB
6628 gen_check_io(s, ot, pc_start - s->cs_base,
6629 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
2e70f6ef
PB
6630 if (use_icount)
6631 gen_io_start();
b6abf97d 6632 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
a7812ae4 6633 gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
57fec1fe 6634 gen_op_mov_reg_T1(ot, R_EAX);
2e70f6ef
PB
6635 if (use_icount) {
6636 gen_io_end();
6637 gen_jmp(s, s->pc - s->cs_base);
6638 }
2c0262af
FB
6639 break;
6640 case 0xe6:
6641 case 0xe7:
f115e911 6642 if ((b & 1) == 0)
4ba9938c 6643 ot = MO_8;
f115e911 6644 else
4ba9938c 6645 ot = dflag ? MO_32 : MO_16;
0af10c86 6646 val = cpu_ldub_code(env, s->pc++);
f115e911 6647 gen_op_movl_T0_im(val);
b8b6a50b
FB
6648 gen_check_io(s, ot, pc_start - s->cs_base,
6649 svm_is_rep(prefixes));
57fec1fe 6650 gen_op_mov_TN_reg(ot, 1, R_EAX);
b8b6a50b 6651
2e70f6ef
PB
6652 if (use_icount)
6653 gen_io_start();
b6abf97d 6654 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
b6abf97d 6655 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
a7812ae4 6656 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
2e70f6ef
PB
6657 if (use_icount) {
6658 gen_io_end();
6659 gen_jmp(s, s->pc - s->cs_base);
6660 }
2c0262af
FB
6661 break;
6662 case 0xec:
6663 case 0xed:
f115e911 6664 if ((b & 1) == 0)
4ba9938c 6665 ot = MO_8;
f115e911 6666 else
4ba9938c
RH
6667 ot = dflag ? MO_32 : MO_16;
6668 gen_op_mov_TN_reg(MO_16, 0, R_EDX);
4f31916f 6669 gen_op_andl_T0_ffff();
b8b6a50b
FB
6670 gen_check_io(s, ot, pc_start - s->cs_base,
6671 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
2e70f6ef
PB
6672 if (use_icount)
6673 gen_io_start();
b6abf97d 6674 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
a7812ae4 6675 gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
57fec1fe 6676 gen_op_mov_reg_T1(ot, R_EAX);
2e70f6ef
PB
6677 if (use_icount) {
6678 gen_io_end();
6679 gen_jmp(s, s->pc - s->cs_base);
6680 }
2c0262af
FB
6681 break;
6682 case 0xee:
6683 case 0xef:
f115e911 6684 if ((b & 1) == 0)
4ba9938c 6685 ot = MO_8;
f115e911 6686 else
4ba9938c
RH
6687 ot = dflag ? MO_32 : MO_16;
6688 gen_op_mov_TN_reg(MO_16, 0, R_EDX);
4f31916f 6689 gen_op_andl_T0_ffff();
b8b6a50b
FB
6690 gen_check_io(s, ot, pc_start - s->cs_base,
6691 svm_is_rep(prefixes));
57fec1fe 6692 gen_op_mov_TN_reg(ot, 1, R_EAX);
b8b6a50b 6693
2e70f6ef
PB
6694 if (use_icount)
6695 gen_io_start();
b6abf97d 6696 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
b6abf97d 6697 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
a7812ae4 6698 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
2e70f6ef
PB
6699 if (use_icount) {
6700 gen_io_end();
6701 gen_jmp(s, s->pc - s->cs_base);
6702 }
2c0262af
FB
6703 break;
6704
6705 /************************/
6706 /* control */
6707 case 0xc2: /* ret im */
0af10c86 6708 val = cpu_ldsw_code(env, s->pc);
2c0262af
FB
6709 s->pc += 2;
6710 gen_pop_T0(s);
8f091a59
FB
6711 if (CODE64(s) && s->dflag)
6712 s->dflag = 2;
2c0262af
FB
6713 gen_stack_update(s, val + (2 << s->dflag));
6714 if (s->dflag == 0)
6715 gen_op_andl_T0_ffff();
6716 gen_op_jmp_T0();
6717 gen_eob(s);
6718 break;
6719 case 0xc3: /* ret */
6720 gen_pop_T0(s);
6721 gen_pop_update(s);
6722 if (s->dflag == 0)
6723 gen_op_andl_T0_ffff();
6724 gen_op_jmp_T0();
6725 gen_eob(s);
6726 break;
6727 case 0xca: /* lret im */
0af10c86 6728 val = cpu_ldsw_code(env, s->pc);
2c0262af
FB
6729 s->pc += 2;
6730 do_lret:
6731 if (s->pe && !s->vm86) {
773cdfcc 6732 gen_update_cc_op(s);
14ce26e7 6733 gen_jmp_im(pc_start - s->cs_base);
2999a0b2 6734 gen_helper_lret_protected(cpu_env, tcg_const_i32(s->dflag),
a7812ae4 6735 tcg_const_i32(val));
2c0262af
FB
6736 } else {
6737 gen_stack_A0(s);
6738 /* pop offset */
323d1876 6739 gen_op_ld_T0_A0(s, 1 + s->dflag);
2c0262af
FB
6740 if (s->dflag == 0)
6741 gen_op_andl_T0_ffff();
6742 /* NOTE: keeping EIP updated is not a problem in case of
6743 exception */
6744 gen_op_jmp_T0();
6745 /* pop selector */
6746 gen_op_addl_A0_im(2 << s->dflag);
323d1876 6747 gen_op_ld_T0_A0(s, 1 + s->dflag);
3bd7da9e 6748 gen_op_movl_seg_T0_vm(R_CS);
2c0262af
FB
6749 /* add stack offset */
6750 gen_stack_update(s, val + (4 << s->dflag));
6751 }
6752 gen_eob(s);
6753 break;
6754 case 0xcb: /* lret */
6755 val = 0;
6756 goto do_lret;
6757 case 0xcf: /* iret */
872929aa 6758 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET);
2c0262af
FB
6759 if (!s->pe) {
6760 /* real mode */
2999a0b2 6761 gen_helper_iret_real(cpu_env, tcg_const_i32(s->dflag));
3ca51d07 6762 set_cc_op(s, CC_OP_EFLAGS);
f115e911
FB
6763 } else if (s->vm86) {
6764 if (s->iopl != 3) {
6765 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6766 } else {
2999a0b2 6767 gen_helper_iret_real(cpu_env, tcg_const_i32(s->dflag));
3ca51d07 6768 set_cc_op(s, CC_OP_EFLAGS);
f115e911 6769 }
2c0262af 6770 } else {
773cdfcc 6771 gen_update_cc_op(s);
14ce26e7 6772 gen_jmp_im(pc_start - s->cs_base);
2999a0b2 6773 gen_helper_iret_protected(cpu_env, tcg_const_i32(s->dflag),
a7812ae4 6774 tcg_const_i32(s->pc - s->cs_base));
3ca51d07 6775 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
6776 }
6777 gen_eob(s);
6778 break;
6779 case 0xe8: /* call im */
6780 {
14ce26e7 6781 if (dflag)
4ba9938c 6782 tval = (int32_t)insn_get(env, s, MO_32);
14ce26e7 6783 else
4ba9938c 6784 tval = (int16_t)insn_get(env, s, MO_16);
2c0262af 6785 next_eip = s->pc - s->cs_base;
14ce26e7 6786 tval += next_eip;
2c0262af 6787 if (s->dflag == 0)
14ce26e7 6788 tval &= 0xffff;
99596385
AJ
6789 else if(!CODE64(s))
6790 tval &= 0xffffffff;
14ce26e7 6791 gen_movtl_T0_im(next_eip);
2c0262af 6792 gen_push_T0(s);
14ce26e7 6793 gen_jmp(s, tval);
2c0262af
FB
6794 }
6795 break;
6796 case 0x9a: /* lcall im */
6797 {
6798 unsigned int selector, offset;
3b46e624 6799
14ce26e7
FB
6800 if (CODE64(s))
6801 goto illegal_op;
4ba9938c 6802 ot = dflag ? MO_32 : MO_16;
0af10c86 6803 offset = insn_get(env, s, ot);
4ba9938c 6804 selector = insn_get(env, s, MO_16);
3b46e624 6805
2c0262af 6806 gen_op_movl_T0_im(selector);
14ce26e7 6807 gen_op_movl_T1_imu(offset);
2c0262af
FB
6808 }
6809 goto do_lcall;
ecada8a2 6810 case 0xe9: /* jmp im */
14ce26e7 6811 if (dflag)
4ba9938c 6812 tval = (int32_t)insn_get(env, s, MO_32);
14ce26e7 6813 else
4ba9938c 6814 tval = (int16_t)insn_get(env, s, MO_16);
14ce26e7 6815 tval += s->pc - s->cs_base;
2c0262af 6816 if (s->dflag == 0)
14ce26e7 6817 tval &= 0xffff;
32938e12
AJ
6818 else if(!CODE64(s))
6819 tval &= 0xffffffff;
14ce26e7 6820 gen_jmp(s, tval);
2c0262af
FB
6821 break;
6822 case 0xea: /* ljmp im */
6823 {
6824 unsigned int selector, offset;
6825
14ce26e7
FB
6826 if (CODE64(s))
6827 goto illegal_op;
4ba9938c 6828 ot = dflag ? MO_32 : MO_16;
0af10c86 6829 offset = insn_get(env, s, ot);
4ba9938c 6830 selector = insn_get(env, s, MO_16);
3b46e624 6831
2c0262af 6832 gen_op_movl_T0_im(selector);
14ce26e7 6833 gen_op_movl_T1_imu(offset);
2c0262af
FB
6834 }
6835 goto do_ljmp;
6836 case 0xeb: /* jmp Jb */
4ba9938c 6837 tval = (int8_t)insn_get(env, s, MO_8);
14ce26e7 6838 tval += s->pc - s->cs_base;
2c0262af 6839 if (s->dflag == 0)
14ce26e7
FB
6840 tval &= 0xffff;
6841 gen_jmp(s, tval);
2c0262af
FB
6842 break;
6843 case 0x70 ... 0x7f: /* jcc Jb */
4ba9938c 6844 tval = (int8_t)insn_get(env, s, MO_8);
2c0262af
FB
6845 goto do_jcc;
6846 case 0x180 ... 0x18f: /* jcc Jv */
6847 if (dflag) {
4ba9938c 6848 tval = (int32_t)insn_get(env, s, MO_32);
2c0262af 6849 } else {
4ba9938c 6850 tval = (int16_t)insn_get(env, s, MO_16);
2c0262af
FB
6851 }
6852 do_jcc:
6853 next_eip = s->pc - s->cs_base;
14ce26e7 6854 tval += next_eip;
2c0262af 6855 if (s->dflag == 0)
14ce26e7
FB
6856 tval &= 0xffff;
6857 gen_jcc(s, b, tval, next_eip);
2c0262af
FB
6858 break;
6859
6860 case 0x190 ... 0x19f: /* setcc Gv */
0af10c86 6861 modrm = cpu_ldub_code(env, s->pc++);
cc8b6f5b 6862 gen_setcc1(s, b, cpu_T[0]);
4ba9938c 6863 gen_ldst_modrm(env, s, modrm, MO_8, OR_TMP0, 1);
2c0262af
FB
6864 break;
6865 case 0x140 ... 0x14f: /* cmov Gv, Ev */
bff93281
PM
6866 if (!(s->cpuid_features & CPUID_CMOV)) {
6867 goto illegal_op;
6868 }
4ba9938c 6869 ot = dflag + MO_16;
f32d3781
PB
6870 modrm = cpu_ldub_code(env, s->pc++);
6871 reg = ((modrm >> 3) & 7) | rex_r;
6872 gen_cmovcc1(env, s, ot, b, modrm, reg);
2c0262af 6873 break;
3b46e624 6874
2c0262af
FB
6875 /************************/
6876 /* flags */
6877 case 0x9c: /* pushf */
872929aa 6878 gen_svm_check_intercept(s, pc_start, SVM_EXIT_PUSHF);
2c0262af
FB
6879 if (s->vm86 && s->iopl != 3) {
6880 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6881 } else {
773cdfcc 6882 gen_update_cc_op(s);
f0967a1a 6883 gen_helper_read_eflags(cpu_T[0], cpu_env);
2c0262af
FB
6884 gen_push_T0(s);
6885 }
6886 break;
6887 case 0x9d: /* popf */
872929aa 6888 gen_svm_check_intercept(s, pc_start, SVM_EXIT_POPF);
2c0262af
FB
6889 if (s->vm86 && s->iopl != 3) {
6890 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6891 } else {
6892 gen_pop_T0(s);
6893 if (s->cpl == 0) {
6894 if (s->dflag) {
f0967a1a
BS
6895 gen_helper_write_eflags(cpu_env, cpu_T[0],
6896 tcg_const_i32((TF_MASK | AC_MASK |
6897 ID_MASK | NT_MASK |
6898 IF_MASK |
6899 IOPL_MASK)));
2c0262af 6900 } else {
f0967a1a
BS
6901 gen_helper_write_eflags(cpu_env, cpu_T[0],
6902 tcg_const_i32((TF_MASK | AC_MASK |
6903 ID_MASK | NT_MASK |
6904 IF_MASK | IOPL_MASK)
6905 & 0xffff));
2c0262af
FB
6906 }
6907 } else {
4136f33c
FB
6908 if (s->cpl <= s->iopl) {
6909 if (s->dflag) {
f0967a1a
BS
6910 gen_helper_write_eflags(cpu_env, cpu_T[0],
6911 tcg_const_i32((TF_MASK |
6912 AC_MASK |
6913 ID_MASK |
6914 NT_MASK |
6915 IF_MASK)));
4136f33c 6916 } else {
f0967a1a
BS
6917 gen_helper_write_eflags(cpu_env, cpu_T[0],
6918 tcg_const_i32((TF_MASK |
6919 AC_MASK |
6920 ID_MASK |
6921 NT_MASK |
6922 IF_MASK)
6923 & 0xffff));
4136f33c 6924 }
2c0262af 6925 } else {
4136f33c 6926 if (s->dflag) {
f0967a1a
BS
6927 gen_helper_write_eflags(cpu_env, cpu_T[0],
6928 tcg_const_i32((TF_MASK | AC_MASK |
6929 ID_MASK | NT_MASK)));
4136f33c 6930 } else {
f0967a1a
BS
6931 gen_helper_write_eflags(cpu_env, cpu_T[0],
6932 tcg_const_i32((TF_MASK | AC_MASK |
6933 ID_MASK | NT_MASK)
6934 & 0xffff));
4136f33c 6935 }
2c0262af
FB
6936 }
6937 }
6938 gen_pop_update(s);
3ca51d07 6939 set_cc_op(s, CC_OP_EFLAGS);
a9321a4d 6940 /* abort translation because TF/AC flag may change */
14ce26e7 6941 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
6942 gen_eob(s);
6943 }
6944 break;
6945 case 0x9e: /* sahf */
12e26b75 6946 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
14ce26e7 6947 goto illegal_op;
4ba9938c 6948 gen_op_mov_TN_reg(MO_8, 0, R_AH);
d229edce 6949 gen_compute_eflags(s);
bd7a7b33
FB
6950 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O);
6951 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], CC_S | CC_Z | CC_A | CC_P | CC_C);
6952 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_T[0]);
2c0262af
FB
6953 break;
6954 case 0x9f: /* lahf */
12e26b75 6955 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
14ce26e7 6956 goto illegal_op;
d229edce 6957 gen_compute_eflags(s);
bd7a7b33 6958 /* Note: gen_compute_eflags() only gives the condition codes */
d229edce 6959 tcg_gen_ori_tl(cpu_T[0], cpu_cc_src, 0x02);
4ba9938c 6960 gen_op_mov_reg_T0(MO_8, R_AH);
2c0262af
FB
6961 break;
6962 case 0xf5: /* cmc */
d229edce 6963 gen_compute_eflags(s);
bd7a7b33 6964 tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C);
2c0262af
FB
6965 break;
6966 case 0xf8: /* clc */
d229edce 6967 gen_compute_eflags(s);
bd7a7b33 6968 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C);
2c0262af
FB
6969 break;
6970 case 0xf9: /* stc */
d229edce 6971 gen_compute_eflags(s);
bd7a7b33 6972 tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C);
2c0262af
FB
6973 break;
6974 case 0xfc: /* cld */
b6abf97d 6975 tcg_gen_movi_i32(cpu_tmp2_i32, 1);
317ac620 6976 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
2c0262af
FB
6977 break;
6978 case 0xfd: /* std */
b6abf97d 6979 tcg_gen_movi_i32(cpu_tmp2_i32, -1);
317ac620 6980 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
2c0262af
FB
6981 break;
6982
6983 /************************/
6984 /* bit operations */
6985 case 0x1ba: /* bt/bts/btr/btc Gv, im */
4ba9938c 6986 ot = dflag + MO_16;
0af10c86 6987 modrm = cpu_ldub_code(env, s->pc++);
33698e5f 6988 op = (modrm >> 3) & 7;
2c0262af 6989 mod = (modrm >> 6) & 3;
14ce26e7 6990 rm = (modrm & 7) | REX_B(s);
2c0262af 6991 if (mod != 3) {
14ce26e7 6992 s->rip_offset = 1;
0af10c86 6993 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
323d1876 6994 gen_op_ld_T0_A0(s, ot);
2c0262af 6995 } else {
57fec1fe 6996 gen_op_mov_TN_reg(ot, 0, rm);
2c0262af
FB
6997 }
6998 /* load shift */
0af10c86 6999 val = cpu_ldub_code(env, s->pc++);
2c0262af
FB
7000 gen_op_movl_T1_im(val);
7001 if (op < 4)
7002 goto illegal_op;
7003 op -= 4;
f484d386 7004 goto bt_op;
2c0262af
FB
7005 case 0x1a3: /* bt Gv, Ev */
7006 op = 0;
7007 goto do_btx;
7008 case 0x1ab: /* bts */
7009 op = 1;
7010 goto do_btx;
7011 case 0x1b3: /* btr */
7012 op = 2;
7013 goto do_btx;
7014 case 0x1bb: /* btc */
7015 op = 3;
7016 do_btx:
4ba9938c 7017 ot = dflag + MO_16;
0af10c86 7018 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 7019 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af 7020 mod = (modrm >> 6) & 3;
14ce26e7 7021 rm = (modrm & 7) | REX_B(s);
4ba9938c 7022 gen_op_mov_TN_reg(MO_32, 1, reg);
2c0262af 7023 if (mod != 3) {
0af10c86 7024 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
2c0262af 7025 /* specific case: we need to add a displacement */
f484d386
FB
7026 gen_exts(ot, cpu_T[1]);
7027 tcg_gen_sari_tl(cpu_tmp0, cpu_T[1], 3 + ot);
7028 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, ot);
7029 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
323d1876 7030 gen_op_ld_T0_A0(s, ot);
2c0262af 7031 } else {
57fec1fe 7032 gen_op_mov_TN_reg(ot, 0, rm);
2c0262af 7033 }
f484d386
FB
7034 bt_op:
7035 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], (1 << (3 + ot)) - 1);
7036 switch(op) {
7037 case 0:
7038 tcg_gen_shr_tl(cpu_cc_src, cpu_T[0], cpu_T[1]);
7039 tcg_gen_movi_tl(cpu_cc_dst, 0);
7040 break;
7041 case 1:
7042 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
7043 tcg_gen_movi_tl(cpu_tmp0, 1);
7044 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
7045 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
7046 break;
7047 case 2:
7048 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
7049 tcg_gen_movi_tl(cpu_tmp0, 1);
7050 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
7051 tcg_gen_not_tl(cpu_tmp0, cpu_tmp0);
7052 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
7053 break;
7054 default:
7055 case 3:
7056 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
7057 tcg_gen_movi_tl(cpu_tmp0, 1);
7058 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
7059 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
7060 break;
7061 }
3ca51d07 7062 set_cc_op(s, CC_OP_SARB + ot);
2c0262af
FB
7063 if (op != 0) {
7064 if (mod != 3)
323d1876 7065 gen_op_st_T0_A0(s, ot);
2c0262af 7066 else
57fec1fe 7067 gen_op_mov_reg_T0(ot, rm);
f484d386
FB
7068 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
7069 tcg_gen_movi_tl(cpu_cc_dst, 0);
2c0262af
FB
7070 }
7071 break;
321c5351
RH
7072 case 0x1bc: /* bsf / tzcnt */
7073 case 0x1bd: /* bsr / lzcnt */
4ba9938c 7074 ot = dflag + MO_16;
321c5351
RH
7075 modrm = cpu_ldub_code(env, s->pc++);
7076 reg = ((modrm >> 3) & 7) | rex_r;
7077 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
7078 gen_extu(ot, cpu_T[0]);
7079
7080 /* Note that lzcnt and tzcnt are in different extensions. */
7081 if ((prefixes & PREFIX_REPZ)
7082 && (b & 1
7083 ? s->cpuid_ext3_features & CPUID_EXT3_ABM
7084 : s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)) {
7085 int size = 8 << ot;
7086 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
7087 if (b & 1) {
7088 /* For lzcnt, reduce the target_ulong result by the
7089 number of zeros that we expect to find at the top. */
7090 gen_helper_clz(cpu_T[0], cpu_T[0]);
7091 tcg_gen_subi_tl(cpu_T[0], cpu_T[0], TARGET_LONG_BITS - size);
6191b059 7092 } else {
321c5351
RH
7093 /* For tzcnt, a zero input must return the operand size:
7094 force all bits outside the operand size to 1. */
7095 target_ulong mask = (target_ulong)-2 << (size - 1);
7096 tcg_gen_ori_tl(cpu_T[0], cpu_T[0], mask);
7097 gen_helper_ctz(cpu_T[0], cpu_T[0]);
6191b059 7098 }
321c5351
RH
7099 /* For lzcnt/tzcnt, C and Z bits are defined and are
7100 related to the result. */
7101 gen_op_update1_cc();
7102 set_cc_op(s, CC_OP_BMILGB + ot);
7103 } else {
7104 /* For bsr/bsf, only the Z bit is defined and it is related
7105 to the input and not the result. */
7106 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
7107 set_cc_op(s, CC_OP_LOGICB + ot);
7108 if (b & 1) {
7109 /* For bsr, return the bit index of the first 1 bit,
7110 not the count of leading zeros. */
7111 gen_helper_clz(cpu_T[0], cpu_T[0]);
7112 tcg_gen_xori_tl(cpu_T[0], cpu_T[0], TARGET_LONG_BITS - 1);
7113 } else {
7114 gen_helper_ctz(cpu_T[0], cpu_T[0]);
7115 }
7116 /* ??? The manual says that the output is undefined when the
7117 input is zero, but real hardware leaves it unchanged, and
7118 real programs appear to depend on that. */
7119 tcg_gen_movi_tl(cpu_tmp0, 0);
7120 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_T[0], cpu_cc_dst, cpu_tmp0,
7121 cpu_regs[reg], cpu_T[0]);
6191b059 7122 }
321c5351 7123 gen_op_mov_reg_T0(ot, reg);
2c0262af
FB
7124 break;
7125 /************************/
7126 /* bcd */
7127 case 0x27: /* daa */
14ce26e7
FB
7128 if (CODE64(s))
7129 goto illegal_op;
773cdfcc 7130 gen_update_cc_op(s);
7923057b 7131 gen_helper_daa(cpu_env);
3ca51d07 7132 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
7133 break;
7134 case 0x2f: /* das */
14ce26e7
FB
7135 if (CODE64(s))
7136 goto illegal_op;
773cdfcc 7137 gen_update_cc_op(s);
7923057b 7138 gen_helper_das(cpu_env);
3ca51d07 7139 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
7140 break;
7141 case 0x37: /* aaa */
14ce26e7
FB
7142 if (CODE64(s))
7143 goto illegal_op;
773cdfcc 7144 gen_update_cc_op(s);
7923057b 7145 gen_helper_aaa(cpu_env);
3ca51d07 7146 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
7147 break;
7148 case 0x3f: /* aas */
14ce26e7
FB
7149 if (CODE64(s))
7150 goto illegal_op;
773cdfcc 7151 gen_update_cc_op(s);
7923057b 7152 gen_helper_aas(cpu_env);
3ca51d07 7153 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
7154 break;
7155 case 0xd4: /* aam */
14ce26e7
FB
7156 if (CODE64(s))
7157 goto illegal_op;
0af10c86 7158 val = cpu_ldub_code(env, s->pc++);
b6d7c3db
TS
7159 if (val == 0) {
7160 gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base);
7161 } else {
7923057b 7162 gen_helper_aam(cpu_env, tcg_const_i32(val));
3ca51d07 7163 set_cc_op(s, CC_OP_LOGICB);
b6d7c3db 7164 }
2c0262af
FB
7165 break;
7166 case 0xd5: /* aad */
14ce26e7
FB
7167 if (CODE64(s))
7168 goto illegal_op;
0af10c86 7169 val = cpu_ldub_code(env, s->pc++);
7923057b 7170 gen_helper_aad(cpu_env, tcg_const_i32(val));
3ca51d07 7171 set_cc_op(s, CC_OP_LOGICB);
2c0262af
FB
7172 break;
7173 /************************/
7174 /* misc */
7175 case 0x90: /* nop */
ab1f142b 7176 /* XXX: correct lock test for all insn */
7418027e 7177 if (prefixes & PREFIX_LOCK) {
ab1f142b 7178 goto illegal_op;
7418027e
RH
7179 }
7180 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
7181 if (REX_B(s)) {
7182 goto do_xchg_reg_eax;
7183 }
0573fbfc 7184 if (prefixes & PREFIX_REPZ) {
81f3053b
PB
7185 gen_update_cc_op(s);
7186 gen_jmp_im(pc_start - s->cs_base);
7187 gen_helper_pause(cpu_env, tcg_const_i32(s->pc - pc_start));
7188 s->is_jmp = DISAS_TB_JUMP;
0573fbfc 7189 }
2c0262af
FB
7190 break;
7191 case 0x9b: /* fwait */
5fafdf24 7192 if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) ==
7eee2a50
FB
7193 (HF_MP_MASK | HF_TS_MASK)) {
7194 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
2ee73ac3 7195 } else {
773cdfcc 7196 gen_update_cc_op(s);
14ce26e7 7197 gen_jmp_im(pc_start - s->cs_base);
d3eb5eae 7198 gen_helper_fwait(cpu_env);
7eee2a50 7199 }
2c0262af
FB
7200 break;
7201 case 0xcc: /* int3 */
7202 gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base);
7203 break;
7204 case 0xcd: /* int N */
0af10c86 7205 val = cpu_ldub_code(env, s->pc++);
f115e911 7206 if (s->vm86 && s->iopl != 3) {
5fafdf24 7207 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
f115e911
FB
7208 } else {
7209 gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base);
7210 }
2c0262af
FB
7211 break;
7212 case 0xce: /* into */
14ce26e7
FB
7213 if (CODE64(s))
7214 goto illegal_op;
773cdfcc 7215 gen_update_cc_op(s);
a8ede8ba 7216 gen_jmp_im(pc_start - s->cs_base);
4a7443be 7217 gen_helper_into(cpu_env, tcg_const_i32(s->pc - pc_start));
2c0262af 7218 break;
0b97134b 7219#ifdef WANT_ICEBP
2c0262af 7220 case 0xf1: /* icebp (undocumented, exits to external debugger) */
872929aa 7221 gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP);
aba9d61e 7222#if 1
2c0262af 7223 gen_debug(s, pc_start - s->cs_base);
aba9d61e
FB
7224#else
7225 /* start debug */
0af10c86 7226 tb_flush(env);
24537a01 7227 qemu_set_log(CPU_LOG_INT | CPU_LOG_TB_IN_ASM);
aba9d61e 7228#endif
2c0262af 7229 break;
0b97134b 7230#endif
2c0262af
FB
7231 case 0xfa: /* cli */
7232 if (!s->vm86) {
7233 if (s->cpl <= s->iopl) {
f0967a1a 7234 gen_helper_cli(cpu_env);
2c0262af
FB
7235 } else {
7236 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7237 }
7238 } else {
7239 if (s->iopl == 3) {
f0967a1a 7240 gen_helper_cli(cpu_env);
2c0262af
FB
7241 } else {
7242 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7243 }
7244 }
7245 break;
7246 case 0xfb: /* sti */
7247 if (!s->vm86) {
7248 if (s->cpl <= s->iopl) {
7249 gen_sti:
f0967a1a 7250 gen_helper_sti(cpu_env);
2c0262af 7251 /* interruptions are enabled only the first insn after sti */
a2cc3b24
FB
7252 /* If several instructions disable interrupts, only the
7253 _first_ does it */
7254 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
f0967a1a 7255 gen_helper_set_inhibit_irq(cpu_env);
2c0262af 7256 /* give a chance to handle pending irqs */
14ce26e7 7257 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
7258 gen_eob(s);
7259 } else {
7260 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7261 }
7262 } else {
7263 if (s->iopl == 3) {
7264 goto gen_sti;
7265 } else {
7266 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7267 }
7268 }
7269 break;
7270 case 0x62: /* bound */
14ce26e7
FB
7271 if (CODE64(s))
7272 goto illegal_op;
4ba9938c 7273 ot = dflag ? MO_32 : MO_16;
0af10c86 7274 modrm = cpu_ldub_code(env, s->pc++);
2c0262af
FB
7275 reg = (modrm >> 3) & 7;
7276 mod = (modrm >> 6) & 3;
7277 if (mod == 3)
7278 goto illegal_op;
57fec1fe 7279 gen_op_mov_TN_reg(ot, 0, reg);
0af10c86 7280 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
14ce26e7 7281 gen_jmp_im(pc_start - s->cs_base);
b6abf97d 7282 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4ba9938c 7283 if (ot == MO_16) {
92fc4b58
BS
7284 gen_helper_boundw(cpu_env, cpu_A0, cpu_tmp2_i32);
7285 } else {
7286 gen_helper_boundl(cpu_env, cpu_A0, cpu_tmp2_i32);
7287 }
2c0262af
FB
7288 break;
7289 case 0x1c8 ... 0x1cf: /* bswap reg */
14ce26e7
FB
7290 reg = (b & 7) | REX_B(s);
7291#ifdef TARGET_X86_64
7292 if (dflag == 2) {
4ba9938c 7293 gen_op_mov_TN_reg(MO_64, 0, reg);
66896cb8 7294 tcg_gen_bswap64_i64(cpu_T[0], cpu_T[0]);
4ba9938c 7295 gen_op_mov_reg_T0(MO_64, reg);
5fafdf24 7296 } else
8777643e 7297#endif
57fec1fe 7298 {
4ba9938c 7299 gen_op_mov_TN_reg(MO_32, 0, reg);
8777643e
AJ
7300 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
7301 tcg_gen_bswap32_tl(cpu_T[0], cpu_T[0]);
4ba9938c 7302 gen_op_mov_reg_T0(MO_32, reg);
14ce26e7 7303 }
2c0262af
FB
7304 break;
7305 case 0xd6: /* salc */
14ce26e7
FB
7306 if (CODE64(s))
7307 goto illegal_op;
cc8b6f5b 7308 gen_compute_eflags_c(s, cpu_T[0]);
bd7a7b33 7309 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
4ba9938c 7310 gen_op_mov_reg_T0(MO_8, R_EAX);
2c0262af
FB
7311 break;
7312 case 0xe0: /* loopnz */
7313 case 0xe1: /* loopz */
2c0262af
FB
7314 case 0xe2: /* loop */
7315 case 0xe3: /* jecxz */
14ce26e7 7316 {
6e0d8677 7317 int l1, l2, l3;
14ce26e7 7318
4ba9938c 7319 tval = (int8_t)insn_get(env, s, MO_8);
14ce26e7
FB
7320 next_eip = s->pc - s->cs_base;
7321 tval += next_eip;
7322 if (s->dflag == 0)
7323 tval &= 0xffff;
3b46e624 7324
14ce26e7
FB
7325 l1 = gen_new_label();
7326 l2 = gen_new_label();
6e0d8677 7327 l3 = gen_new_label();
14ce26e7 7328 b &= 3;
6e0d8677
FB
7329 switch(b) {
7330 case 0: /* loopnz */
7331 case 1: /* loopz */
6e0d8677
FB
7332 gen_op_add_reg_im(s->aflag, R_ECX, -1);
7333 gen_op_jz_ecx(s->aflag, l3);
5bdb91b0 7334 gen_jcc1(s, (JCC_Z << 1) | (b ^ 1), l1);
6e0d8677
FB
7335 break;
7336 case 2: /* loop */
7337 gen_op_add_reg_im(s->aflag, R_ECX, -1);
7338 gen_op_jnz_ecx(s->aflag, l1);
7339 break;
7340 default:
7341 case 3: /* jcxz */
7342 gen_op_jz_ecx(s->aflag, l1);
7343 break;
14ce26e7
FB
7344 }
7345
6e0d8677 7346 gen_set_label(l3);
14ce26e7 7347 gen_jmp_im(next_eip);
8e1c85e3 7348 tcg_gen_br(l2);
6e0d8677 7349
14ce26e7
FB
7350 gen_set_label(l1);
7351 gen_jmp_im(tval);
7352 gen_set_label(l2);
7353 gen_eob(s);
7354 }
2c0262af
FB
7355 break;
7356 case 0x130: /* wrmsr */
7357 case 0x132: /* rdmsr */
7358 if (s->cpl != 0) {
7359 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7360 } else {
773cdfcc 7361 gen_update_cc_op(s);
872929aa 7362 gen_jmp_im(pc_start - s->cs_base);
0573fbfc 7363 if (b & 2) {
4a7443be 7364 gen_helper_rdmsr(cpu_env);
0573fbfc 7365 } else {
4a7443be 7366 gen_helper_wrmsr(cpu_env);
0573fbfc 7367 }
2c0262af
FB
7368 }
7369 break;
7370 case 0x131: /* rdtsc */
773cdfcc 7371 gen_update_cc_op(s);
ecada8a2 7372 gen_jmp_im(pc_start - s->cs_base);
efade670
PB
7373 if (use_icount)
7374 gen_io_start();
4a7443be 7375 gen_helper_rdtsc(cpu_env);
efade670
PB
7376 if (use_icount) {
7377 gen_io_end();
7378 gen_jmp(s, s->pc - s->cs_base);
7379 }
2c0262af 7380 break;
df01e0fc 7381 case 0x133: /* rdpmc */
773cdfcc 7382 gen_update_cc_op(s);
df01e0fc 7383 gen_jmp_im(pc_start - s->cs_base);
4a7443be 7384 gen_helper_rdpmc(cpu_env);
df01e0fc 7385 break;
023fe10d 7386 case 0x134: /* sysenter */
2436b61a 7387 /* For Intel SYSENTER is valid on 64-bit */
0af10c86 7388 if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
14ce26e7 7389 goto illegal_op;
023fe10d
FB
7390 if (!s->pe) {
7391 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7392 } else {
728d803b 7393 gen_update_cc_op(s);
14ce26e7 7394 gen_jmp_im(pc_start - s->cs_base);
2999a0b2 7395 gen_helper_sysenter(cpu_env);
023fe10d
FB
7396 gen_eob(s);
7397 }
7398 break;
7399 case 0x135: /* sysexit */
2436b61a 7400 /* For Intel SYSEXIT is valid on 64-bit */
0af10c86 7401 if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
14ce26e7 7402 goto illegal_op;
023fe10d
FB
7403 if (!s->pe) {
7404 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7405 } else {
728d803b 7406 gen_update_cc_op(s);
14ce26e7 7407 gen_jmp_im(pc_start - s->cs_base);
2999a0b2 7408 gen_helper_sysexit(cpu_env, tcg_const_i32(dflag));
023fe10d
FB
7409 gen_eob(s);
7410 }
7411 break;
14ce26e7
FB
7412#ifdef TARGET_X86_64
7413 case 0x105: /* syscall */
7414 /* XXX: is it usable in real mode ? */
728d803b 7415 gen_update_cc_op(s);
14ce26e7 7416 gen_jmp_im(pc_start - s->cs_base);
2999a0b2 7417 gen_helper_syscall(cpu_env, tcg_const_i32(s->pc - pc_start));
14ce26e7
FB
7418 gen_eob(s);
7419 break;
7420 case 0x107: /* sysret */
7421 if (!s->pe) {
7422 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7423 } else {
728d803b 7424 gen_update_cc_op(s);
14ce26e7 7425 gen_jmp_im(pc_start - s->cs_base);
2999a0b2 7426 gen_helper_sysret(cpu_env, tcg_const_i32(s->dflag));
aba9d61e 7427 /* condition codes are modified only in long mode */
3ca51d07
RH
7428 if (s->lma) {
7429 set_cc_op(s, CC_OP_EFLAGS);
7430 }
14ce26e7
FB
7431 gen_eob(s);
7432 }
7433 break;
7434#endif
2c0262af 7435 case 0x1a2: /* cpuid */
773cdfcc 7436 gen_update_cc_op(s);
9575cb94 7437 gen_jmp_im(pc_start - s->cs_base);
4a7443be 7438 gen_helper_cpuid(cpu_env);
2c0262af
FB
7439 break;
7440 case 0xf4: /* hlt */
7441 if (s->cpl != 0) {
7442 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7443 } else {
773cdfcc 7444 gen_update_cc_op(s);
94451178 7445 gen_jmp_im(pc_start - s->cs_base);
4a7443be 7446 gen_helper_hlt(cpu_env, tcg_const_i32(s->pc - pc_start));
5779406a 7447 s->is_jmp = DISAS_TB_JUMP;
2c0262af
FB
7448 }
7449 break;
7450 case 0x100:
0af10c86 7451 modrm = cpu_ldub_code(env, s->pc++);
2c0262af
FB
7452 mod = (modrm >> 6) & 3;
7453 op = (modrm >> 3) & 7;
7454 switch(op) {
7455 case 0: /* sldt */
f115e911
FB
7456 if (!s->pe || s->vm86)
7457 goto illegal_op;
872929aa 7458 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ);
651ba608 7459 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,ldt.selector));
4ba9938c 7460 ot = MO_16;
2c0262af
FB
7461 if (mod == 3)
7462 ot += s->dflag;
0af10c86 7463 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
2c0262af
FB
7464 break;
7465 case 2: /* lldt */
f115e911
FB
7466 if (!s->pe || s->vm86)
7467 goto illegal_op;
2c0262af
FB
7468 if (s->cpl != 0) {
7469 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7470 } else {
872929aa 7471 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE);
4ba9938c 7472 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
14ce26e7 7473 gen_jmp_im(pc_start - s->cs_base);
b6abf97d 7474 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2999a0b2 7475 gen_helper_lldt(cpu_env, cpu_tmp2_i32);
2c0262af
FB
7476 }
7477 break;
7478 case 1: /* str */
f115e911
FB
7479 if (!s->pe || s->vm86)
7480 goto illegal_op;
872929aa 7481 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ);
651ba608 7482 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,tr.selector));
4ba9938c 7483 ot = MO_16;
2c0262af
FB
7484 if (mod == 3)
7485 ot += s->dflag;
0af10c86 7486 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
2c0262af
FB
7487 break;
7488 case 3: /* ltr */
f115e911
FB
7489 if (!s->pe || s->vm86)
7490 goto illegal_op;
2c0262af
FB
7491 if (s->cpl != 0) {
7492 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7493 } else {
872929aa 7494 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE);
4ba9938c 7495 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
14ce26e7 7496 gen_jmp_im(pc_start - s->cs_base);
b6abf97d 7497 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2999a0b2 7498 gen_helper_ltr(cpu_env, cpu_tmp2_i32);
2c0262af
FB
7499 }
7500 break;
7501 case 4: /* verr */
7502 case 5: /* verw */
f115e911
FB
7503 if (!s->pe || s->vm86)
7504 goto illegal_op;
4ba9938c 7505 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
773cdfcc 7506 gen_update_cc_op(s);
2999a0b2
BS
7507 if (op == 4) {
7508 gen_helper_verr(cpu_env, cpu_T[0]);
7509 } else {
7510 gen_helper_verw(cpu_env, cpu_T[0]);
7511 }
3ca51d07 7512 set_cc_op(s, CC_OP_EFLAGS);
f115e911 7513 break;
2c0262af
FB
7514 default:
7515 goto illegal_op;
7516 }
7517 break;
7518 case 0x101:
0af10c86 7519 modrm = cpu_ldub_code(env, s->pc++);
2c0262af
FB
7520 mod = (modrm >> 6) & 3;
7521 op = (modrm >> 3) & 7;
3d7374c5 7522 rm = modrm & 7;
2c0262af
FB
7523 switch(op) {
7524 case 0: /* sgdt */
2c0262af
FB
7525 if (mod == 3)
7526 goto illegal_op;
872929aa 7527 gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ);
0af10c86 7528 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
651ba608 7529 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.limit));
4ba9938c 7530 gen_op_st_T0_A0(s, MO_16);
aba9d61e 7531 gen_add_A0_im(s, 2);
651ba608 7532 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.base));
2c0262af
FB
7533 if (!s->dflag)
7534 gen_op_andl_T0_im(0xffffff);
4ba9938c 7535 gen_op_st_T0_A0(s, CODE64(s) + MO_32);
2c0262af 7536 break;
3d7374c5
FB
7537 case 1:
7538 if (mod == 3) {
7539 switch (rm) {
7540 case 0: /* monitor */
7541 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
7542 s->cpl != 0)
7543 goto illegal_op;
773cdfcc 7544 gen_update_cc_op(s);
3d7374c5
FB
7545 gen_jmp_im(pc_start - s->cs_base);
7546#ifdef TARGET_X86_64
7547 if (s->aflag == 2) {
bbf662ee 7548 gen_op_movq_A0_reg(R_EAX);
5fafdf24 7549 } else
3d7374c5
FB
7550#endif
7551 {
bbf662ee 7552 gen_op_movl_A0_reg(R_EAX);
3d7374c5
FB
7553 if (s->aflag == 0)
7554 gen_op_andl_A0_ffff();
7555 }
7556 gen_add_A0_ds_seg(s);
4a7443be 7557 gen_helper_monitor(cpu_env, cpu_A0);
3d7374c5
FB
7558 break;
7559 case 1: /* mwait */
7560 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
7561 s->cpl != 0)
7562 goto illegal_op;
728d803b 7563 gen_update_cc_op(s);
94451178 7564 gen_jmp_im(pc_start - s->cs_base);
4a7443be 7565 gen_helper_mwait(cpu_env, tcg_const_i32(s->pc - pc_start));
3d7374c5
FB
7566 gen_eob(s);
7567 break;
a9321a4d
PA
7568 case 2: /* clac */
7569 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) ||
7570 s->cpl != 0) {
7571 goto illegal_op;
7572 }
7573 gen_helper_clac(cpu_env);
7574 gen_jmp_im(s->pc - s->cs_base);
7575 gen_eob(s);
7576 break;
7577 case 3: /* stac */
7578 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) ||
7579 s->cpl != 0) {
7580 goto illegal_op;
7581 }
7582 gen_helper_stac(cpu_env);
7583 gen_jmp_im(s->pc - s->cs_base);
7584 gen_eob(s);
7585 break;
3d7374c5
FB
7586 default:
7587 goto illegal_op;
7588 }
7589 } else { /* sidt */
872929aa 7590 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ);
0af10c86 7591 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
651ba608 7592 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.limit));
4ba9938c 7593 gen_op_st_T0_A0(s, MO_16);
3d7374c5 7594 gen_add_A0_im(s, 2);
651ba608 7595 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.base));
3d7374c5
FB
7596 if (!s->dflag)
7597 gen_op_andl_T0_im(0xffffff);
4ba9938c 7598 gen_op_st_T0_A0(s, CODE64(s) + MO_32);
3d7374c5
FB
7599 }
7600 break;
2c0262af
FB
7601 case 2: /* lgdt */
7602 case 3: /* lidt */
0573fbfc 7603 if (mod == 3) {
773cdfcc 7604 gen_update_cc_op(s);
872929aa 7605 gen_jmp_im(pc_start - s->cs_base);
0573fbfc
TS
7606 switch(rm) {
7607 case 0: /* VMRUN */
872929aa
FB
7608 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7609 goto illegal_op;
7610 if (s->cpl != 0) {
7611 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
0573fbfc 7612 break;
872929aa 7613 } else {
052e80d5 7614 gen_helper_vmrun(cpu_env, tcg_const_i32(s->aflag),
a7812ae4 7615 tcg_const_i32(s->pc - pc_start));
db620f46 7616 tcg_gen_exit_tb(0);
5779406a 7617 s->is_jmp = DISAS_TB_JUMP;
872929aa 7618 }
0573fbfc
TS
7619 break;
7620 case 1: /* VMMCALL */
872929aa
FB
7621 if (!(s->flags & HF_SVME_MASK))
7622 goto illegal_op;
052e80d5 7623 gen_helper_vmmcall(cpu_env);
0573fbfc
TS
7624 break;
7625 case 2: /* VMLOAD */
872929aa
FB
7626 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7627 goto illegal_op;
7628 if (s->cpl != 0) {
7629 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7630 break;
7631 } else {
052e80d5 7632 gen_helper_vmload(cpu_env, tcg_const_i32(s->aflag));
872929aa 7633 }
0573fbfc
TS
7634 break;
7635 case 3: /* VMSAVE */
872929aa
FB
7636 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7637 goto illegal_op;
7638 if (s->cpl != 0) {
7639 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7640 break;
7641 } else {
052e80d5 7642 gen_helper_vmsave(cpu_env, tcg_const_i32(s->aflag));
872929aa 7643 }
0573fbfc
TS
7644 break;
7645 case 4: /* STGI */
872929aa
FB
7646 if ((!(s->flags & HF_SVME_MASK) &&
7647 !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
7648 !s->pe)
7649 goto illegal_op;
7650 if (s->cpl != 0) {
7651 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7652 break;
7653 } else {
052e80d5 7654 gen_helper_stgi(cpu_env);
872929aa 7655 }
0573fbfc
TS
7656 break;
7657 case 5: /* CLGI */
872929aa
FB
7658 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7659 goto illegal_op;
7660 if (s->cpl != 0) {
7661 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7662 break;
7663 } else {
052e80d5 7664 gen_helper_clgi(cpu_env);
872929aa 7665 }
0573fbfc
TS
7666 break;
7667 case 6: /* SKINIT */
872929aa
FB
7668 if ((!(s->flags & HF_SVME_MASK) &&
7669 !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
7670 !s->pe)
7671 goto illegal_op;
052e80d5 7672 gen_helper_skinit(cpu_env);
0573fbfc
TS
7673 break;
7674 case 7: /* INVLPGA */
872929aa
FB
7675 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7676 goto illegal_op;
7677 if (s->cpl != 0) {
7678 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7679 break;
7680 } else {
052e80d5 7681 gen_helper_invlpga(cpu_env, tcg_const_i32(s->aflag));
872929aa 7682 }
0573fbfc
TS
7683 break;
7684 default:
7685 goto illegal_op;
7686 }
7687 } else if (s->cpl != 0) {
2c0262af
FB
7688 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7689 } else {
872929aa
FB
7690 gen_svm_check_intercept(s, pc_start,
7691 op==2 ? SVM_EXIT_GDTR_WRITE : SVM_EXIT_IDTR_WRITE);
0af10c86 7692 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4ba9938c 7693 gen_op_ld_T1_A0(s, MO_16);
aba9d61e 7694 gen_add_A0_im(s, 2);
4ba9938c 7695 gen_op_ld_T0_A0(s, CODE64(s) + MO_32);
2c0262af
FB
7696 if (!s->dflag)
7697 gen_op_andl_T0_im(0xffffff);
7698 if (op == 2) {
651ba608
FB
7699 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,gdt.base));
7700 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,gdt.limit));
2c0262af 7701 } else {
651ba608
FB
7702 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,idt.base));
7703 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,idt.limit));
2c0262af
FB
7704 }
7705 }
7706 break;
7707 case 4: /* smsw */
872929aa 7708 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0);
e2542fe2 7709#if defined TARGET_X86_64 && defined HOST_WORDS_BIGENDIAN
f60d2728 7710 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]) + 4);
7711#else
651ba608 7712 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]));
f60d2728 7713#endif
4ba9938c 7714 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 1);
2c0262af
FB
7715 break;
7716 case 6: /* lmsw */
7717 if (s->cpl != 0) {
7718 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7719 } else {
872929aa 7720 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
4ba9938c 7721 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
4a7443be 7722 gen_helper_lmsw(cpu_env, cpu_T[0]);
14ce26e7 7723 gen_jmp_im(s->pc - s->cs_base);
d71b9a8b 7724 gen_eob(s);
2c0262af
FB
7725 }
7726 break;
1b050077
AP
7727 case 7:
7728 if (mod != 3) { /* invlpg */
7729 if (s->cpl != 0) {
7730 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7731 } else {
773cdfcc 7732 gen_update_cc_op(s);
1b050077 7733 gen_jmp_im(pc_start - s->cs_base);
0af10c86 7734 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4a7443be 7735 gen_helper_invlpg(cpu_env, cpu_A0);
1b050077
AP
7736 gen_jmp_im(s->pc - s->cs_base);
7737 gen_eob(s);
7738 }
2c0262af 7739 } else {
1b050077
AP
7740 switch (rm) {
7741 case 0: /* swapgs */
14ce26e7 7742#ifdef TARGET_X86_64
1b050077
AP
7743 if (CODE64(s)) {
7744 if (s->cpl != 0) {
7745 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7746 } else {
7747 tcg_gen_ld_tl(cpu_T[0], cpu_env,
7748 offsetof(CPUX86State,segs[R_GS].base));
7749 tcg_gen_ld_tl(cpu_T[1], cpu_env,
7750 offsetof(CPUX86State,kernelgsbase));
7751 tcg_gen_st_tl(cpu_T[1], cpu_env,
7752 offsetof(CPUX86State,segs[R_GS].base));
7753 tcg_gen_st_tl(cpu_T[0], cpu_env,
7754 offsetof(CPUX86State,kernelgsbase));
7755 }
5fafdf24 7756 } else
14ce26e7
FB
7757#endif
7758 {
7759 goto illegal_op;
7760 }
1b050077
AP
7761 break;
7762 case 1: /* rdtscp */
7763 if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP))
7764 goto illegal_op;
773cdfcc 7765 gen_update_cc_op(s);
9575cb94 7766 gen_jmp_im(pc_start - s->cs_base);
1b050077
AP
7767 if (use_icount)
7768 gen_io_start();
4a7443be 7769 gen_helper_rdtscp(cpu_env);
1b050077
AP
7770 if (use_icount) {
7771 gen_io_end();
7772 gen_jmp(s, s->pc - s->cs_base);
7773 }
7774 break;
7775 default:
7776 goto illegal_op;
14ce26e7 7777 }
2c0262af
FB
7778 }
7779 break;
7780 default:
7781 goto illegal_op;
7782 }
7783 break;
3415a4dd
FB
7784 case 0x108: /* invd */
7785 case 0x109: /* wbinvd */
7786 if (s->cpl != 0) {
7787 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7788 } else {
872929aa 7789 gen_svm_check_intercept(s, pc_start, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD);
3415a4dd
FB
7790 /* nothing to do */
7791 }
7792 break;
14ce26e7
FB
7793 case 0x63: /* arpl or movslS (x86_64) */
7794#ifdef TARGET_X86_64
7795 if (CODE64(s)) {
7796 int d_ot;
7797 /* d_ot is the size of destination */
4ba9938c 7798 d_ot = dflag + MO_16;
14ce26e7 7799
0af10c86 7800 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7
FB
7801 reg = ((modrm >> 3) & 7) | rex_r;
7802 mod = (modrm >> 6) & 3;
7803 rm = (modrm & 7) | REX_B(s);
3b46e624 7804
14ce26e7 7805 if (mod == 3) {
4ba9938c 7806 gen_op_mov_TN_reg(MO_32, 0, rm);
14ce26e7 7807 /* sign extend */
4ba9938c 7808 if (d_ot == MO_64) {
e108dd01 7809 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4ba9938c 7810 }
57fec1fe 7811 gen_op_mov_reg_T0(d_ot, reg);
14ce26e7 7812 } else {
0af10c86 7813 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4ba9938c
RH
7814 if (d_ot == MO_64) {
7815 gen_op_lds_T0_A0(s, MO_32);
14ce26e7 7816 } else {
4ba9938c 7817 gen_op_ld_T0_A0(s, MO_32);
14ce26e7 7818 }
57fec1fe 7819 gen_op_mov_reg_T0(d_ot, reg);
14ce26e7 7820 }
5fafdf24 7821 } else
14ce26e7
FB
7822#endif
7823 {
3bd7da9e 7824 int label1;
49d9fdcc 7825 TCGv t0, t1, t2, a0;
1e4840bf 7826
14ce26e7
FB
7827 if (!s->pe || s->vm86)
7828 goto illegal_op;
a7812ae4
PB
7829 t0 = tcg_temp_local_new();
7830 t1 = tcg_temp_local_new();
7831 t2 = tcg_temp_local_new();
4ba9938c 7832 ot = MO_16;
0af10c86 7833 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7
FB
7834 reg = (modrm >> 3) & 7;
7835 mod = (modrm >> 6) & 3;
7836 rm = modrm & 7;
7837 if (mod != 3) {
0af10c86 7838 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
323d1876 7839 gen_op_ld_v(s, ot, t0, cpu_A0);
49d9fdcc
LD
7840 a0 = tcg_temp_local_new();
7841 tcg_gen_mov_tl(a0, cpu_A0);
14ce26e7 7842 } else {
1e4840bf 7843 gen_op_mov_v_reg(ot, t0, rm);
49d9fdcc 7844 TCGV_UNUSED(a0);
14ce26e7 7845 }
1e4840bf
FB
7846 gen_op_mov_v_reg(ot, t1, reg);
7847 tcg_gen_andi_tl(cpu_tmp0, t0, 3);
7848 tcg_gen_andi_tl(t1, t1, 3);
7849 tcg_gen_movi_tl(t2, 0);
3bd7da9e 7850 label1 = gen_new_label();
1e4840bf
FB
7851 tcg_gen_brcond_tl(TCG_COND_GE, cpu_tmp0, t1, label1);
7852 tcg_gen_andi_tl(t0, t0, ~3);
7853 tcg_gen_or_tl(t0, t0, t1);
7854 tcg_gen_movi_tl(t2, CC_Z);
3bd7da9e 7855 gen_set_label(label1);
14ce26e7 7856 if (mod != 3) {
323d1876 7857 gen_op_st_v(s, ot, t0, a0);
49d9fdcc
LD
7858 tcg_temp_free(a0);
7859 } else {
1e4840bf 7860 gen_op_mov_reg_v(ot, rm, t0);
14ce26e7 7861 }
d229edce 7862 gen_compute_eflags(s);
3bd7da9e 7863 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z);
1e4840bf 7864 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2);
1e4840bf
FB
7865 tcg_temp_free(t0);
7866 tcg_temp_free(t1);
7867 tcg_temp_free(t2);
f115e911 7868 }
f115e911 7869 break;
2c0262af
FB
7870 case 0x102: /* lar */
7871 case 0x103: /* lsl */
cec6843e
FB
7872 {
7873 int label1;
1e4840bf 7874 TCGv t0;
cec6843e
FB
7875 if (!s->pe || s->vm86)
7876 goto illegal_op;
4ba9938c 7877 ot = dflag ? MO_32 : MO_16;
0af10c86 7878 modrm = cpu_ldub_code(env, s->pc++);
cec6843e 7879 reg = ((modrm >> 3) & 7) | rex_r;
4ba9938c 7880 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
a7812ae4 7881 t0 = tcg_temp_local_new();
773cdfcc 7882 gen_update_cc_op(s);
2999a0b2
BS
7883 if (b == 0x102) {
7884 gen_helper_lar(t0, cpu_env, cpu_T[0]);
7885 } else {
7886 gen_helper_lsl(t0, cpu_env, cpu_T[0]);
7887 }
cec6843e
FB
7888 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_src, CC_Z);
7889 label1 = gen_new_label();
cb63669a 7890 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
1e4840bf 7891 gen_op_mov_reg_v(ot, reg, t0);
cec6843e 7892 gen_set_label(label1);
3ca51d07 7893 set_cc_op(s, CC_OP_EFLAGS);
1e4840bf 7894 tcg_temp_free(t0);
cec6843e 7895 }
2c0262af
FB
7896 break;
7897 case 0x118:
0af10c86 7898 modrm = cpu_ldub_code(env, s->pc++);
2c0262af
FB
7899 mod = (modrm >> 6) & 3;
7900 op = (modrm >> 3) & 7;
7901 switch(op) {
7902 case 0: /* prefetchnta */
7903 case 1: /* prefetchnt0 */
7904 case 2: /* prefetchnt0 */
7905 case 3: /* prefetchnt0 */
7906 if (mod == 3)
7907 goto illegal_op;
0af10c86 7908 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
2c0262af
FB
7909 /* nothing more to do */
7910 break;
e17a36ce 7911 default: /* nop (multi byte) */
0af10c86 7912 gen_nop_modrm(env, s, modrm);
e17a36ce 7913 break;
2c0262af
FB
7914 }
7915 break;
e17a36ce 7916 case 0x119 ... 0x11f: /* nop (multi byte) */
0af10c86
BS
7917 modrm = cpu_ldub_code(env, s->pc++);
7918 gen_nop_modrm(env, s, modrm);
e17a36ce 7919 break;
2c0262af
FB
7920 case 0x120: /* mov reg, crN */
7921 case 0x122: /* mov crN, reg */
7922 if (s->cpl != 0) {
7923 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7924 } else {
0af10c86 7925 modrm = cpu_ldub_code(env, s->pc++);
5c73b757
MO
7926 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7927 * AMD documentation (24594.pdf) and testing of
7928 * intel 386 and 486 processors all show that the mod bits
7929 * are assumed to be 1's, regardless of actual values.
7930 */
14ce26e7
FB
7931 rm = (modrm & 7) | REX_B(s);
7932 reg = ((modrm >> 3) & 7) | rex_r;
7933 if (CODE64(s))
4ba9938c 7934 ot = MO_64;
14ce26e7 7935 else
4ba9938c 7936 ot = MO_32;
ccd59d09
AP
7937 if ((prefixes & PREFIX_LOCK) && (reg == 0) &&
7938 (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) {
7939 reg = 8;
7940 }
2c0262af
FB
7941 switch(reg) {
7942 case 0:
7943 case 2:
7944 case 3:
7945 case 4:
9230e66e 7946 case 8:
773cdfcc 7947 gen_update_cc_op(s);
872929aa 7948 gen_jmp_im(pc_start - s->cs_base);
2c0262af 7949 if (b & 2) {
57fec1fe 7950 gen_op_mov_TN_reg(ot, 0, rm);
4a7443be
BS
7951 gen_helper_write_crN(cpu_env, tcg_const_i32(reg),
7952 cpu_T[0]);
14ce26e7 7953 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
7954 gen_eob(s);
7955 } else {
4a7443be 7956 gen_helper_read_crN(cpu_T[0], cpu_env, tcg_const_i32(reg));
57fec1fe 7957 gen_op_mov_reg_T0(ot, rm);
2c0262af
FB
7958 }
7959 break;
7960 default:
7961 goto illegal_op;
7962 }
7963 }
7964 break;
7965 case 0x121: /* mov reg, drN */
7966 case 0x123: /* mov drN, reg */
7967 if (s->cpl != 0) {
7968 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7969 } else {
0af10c86 7970 modrm = cpu_ldub_code(env, s->pc++);
5c73b757
MO
7971 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7972 * AMD documentation (24594.pdf) and testing of
7973 * intel 386 and 486 processors all show that the mod bits
7974 * are assumed to be 1's, regardless of actual values.
7975 */
14ce26e7
FB
7976 rm = (modrm & 7) | REX_B(s);
7977 reg = ((modrm >> 3) & 7) | rex_r;
7978 if (CODE64(s))
4ba9938c 7979 ot = MO_64;
14ce26e7 7980 else
4ba9938c 7981 ot = MO_32;
2c0262af 7982 /* XXX: do it dynamically with CR4.DE bit */
14ce26e7 7983 if (reg == 4 || reg == 5 || reg >= 8)
2c0262af
FB
7984 goto illegal_op;
7985 if (b & 2) {
0573fbfc 7986 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg);
57fec1fe 7987 gen_op_mov_TN_reg(ot, 0, rm);
4a7443be 7988 gen_helper_movl_drN_T0(cpu_env, tcg_const_i32(reg), cpu_T[0]);
14ce26e7 7989 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
7990 gen_eob(s);
7991 } else {
0573fbfc 7992 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg);
651ba608 7993 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,dr[reg]));
57fec1fe 7994 gen_op_mov_reg_T0(ot, rm);
2c0262af
FB
7995 }
7996 }
7997 break;
7998 case 0x106: /* clts */
7999 if (s->cpl != 0) {
8000 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
8001 } else {
0573fbfc 8002 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
f0967a1a 8003 gen_helper_clts(cpu_env);
7eee2a50 8004 /* abort block because static cpu state changed */
14ce26e7 8005 gen_jmp_im(s->pc - s->cs_base);
7eee2a50 8006 gen_eob(s);
2c0262af
FB
8007 }
8008 break;
222a3336 8009 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
664e0f19
FB
8010 case 0x1c3: /* MOVNTI reg, mem */
8011 if (!(s->cpuid_features & CPUID_SSE2))
14ce26e7 8012 goto illegal_op;
4ba9938c 8013 ot = s->dflag == 2 ? MO_64 : MO_32;
0af10c86 8014 modrm = cpu_ldub_code(env, s->pc++);
664e0f19
FB
8015 mod = (modrm >> 6) & 3;
8016 if (mod == 3)
8017 goto illegal_op;
8018 reg = ((modrm >> 3) & 7) | rex_r;
8019 /* generate a generic store */
0af10c86 8020 gen_ldst_modrm(env, s, modrm, ot, reg, 1);
14ce26e7 8021 break;
664e0f19 8022 case 0x1ae:
0af10c86 8023 modrm = cpu_ldub_code(env, s->pc++);
664e0f19
FB
8024 mod = (modrm >> 6) & 3;
8025 op = (modrm >> 3) & 7;
8026 switch(op) {
8027 case 0: /* fxsave */
5fafdf24 8028 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
09d85fb8 8029 (s->prefix & PREFIX_LOCK))
14ce26e7 8030 goto illegal_op;
09d85fb8 8031 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
0fd14b72
FB
8032 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
8033 break;
8034 }
0af10c86 8035 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
773cdfcc 8036 gen_update_cc_op(s);
19e6c4b8 8037 gen_jmp_im(pc_start - s->cs_base);
d3eb5eae 8038 gen_helper_fxsave(cpu_env, cpu_A0, tcg_const_i32((s->dflag == 2)));
664e0f19
FB
8039 break;
8040 case 1: /* fxrstor */
5fafdf24 8041 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
09d85fb8 8042 (s->prefix & PREFIX_LOCK))
14ce26e7 8043 goto illegal_op;
09d85fb8 8044 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
0fd14b72
FB
8045 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
8046 break;
8047 }
0af10c86 8048 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
773cdfcc 8049 gen_update_cc_op(s);
19e6c4b8 8050 gen_jmp_im(pc_start - s->cs_base);
d3eb5eae
BS
8051 gen_helper_fxrstor(cpu_env, cpu_A0,
8052 tcg_const_i32((s->dflag == 2)));
664e0f19
FB
8053 break;
8054 case 2: /* ldmxcsr */
8055 case 3: /* stmxcsr */
8056 if (s->flags & HF_TS_MASK) {
8057 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
8058 break;
14ce26e7 8059 }
664e0f19
FB
8060 if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK) ||
8061 mod == 3)
14ce26e7 8062 goto illegal_op;
0af10c86 8063 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
664e0f19 8064 if (op == 2) {
4ba9938c 8065 gen_op_ld_T0_A0(s, MO_32);
20f8bd48 8066 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
d3eb5eae 8067 gen_helper_ldmxcsr(cpu_env, cpu_tmp2_i32);
14ce26e7 8068 } else {
651ba608 8069 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, mxcsr));
4ba9938c 8070 gen_op_st_T0_A0(s, MO_32);
14ce26e7 8071 }
664e0f19
FB
8072 break;
8073 case 5: /* lfence */
8074 case 6: /* mfence */
8001c294 8075 if ((modrm & 0xc7) != 0xc0 || !(s->cpuid_features & CPUID_SSE2))
664e0f19
FB
8076 goto illegal_op;
8077 break;
8f091a59
FB
8078 case 7: /* sfence / clflush */
8079 if ((modrm & 0xc7) == 0xc0) {
8080 /* sfence */
a35f3ec7 8081 /* XXX: also check for cpuid_ext2_features & CPUID_EXT2_EMMX */
8f091a59
FB
8082 if (!(s->cpuid_features & CPUID_SSE))
8083 goto illegal_op;
8084 } else {
8085 /* clflush */
8086 if (!(s->cpuid_features & CPUID_CLFLUSH))
8087 goto illegal_op;
0af10c86 8088 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
8f091a59
FB
8089 }
8090 break;
664e0f19 8091 default:
14ce26e7
FB
8092 goto illegal_op;
8093 }
8094 break;
a35f3ec7 8095 case 0x10d: /* 3DNow! prefetch(w) */
0af10c86 8096 modrm = cpu_ldub_code(env, s->pc++);
a35f3ec7
AJ
8097 mod = (modrm >> 6) & 3;
8098 if (mod == 3)
8099 goto illegal_op;
0af10c86 8100 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
8f091a59
FB
8101 /* ignore for now */
8102 break;
3b21e03e 8103 case 0x1aa: /* rsm */
872929aa 8104 gen_svm_check_intercept(s, pc_start, SVM_EXIT_RSM);
3b21e03e
FB
8105 if (!(s->flags & HF_SMM_MASK))
8106 goto illegal_op;
728d803b 8107 gen_update_cc_op(s);
3b21e03e 8108 gen_jmp_im(s->pc - s->cs_base);
608badfc 8109 gen_helper_rsm(cpu_env);
3b21e03e
FB
8110 gen_eob(s);
8111 break;
222a3336
AZ
8112 case 0x1b8: /* SSE4.2 popcnt */
8113 if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) !=
8114 PREFIX_REPZ)
8115 goto illegal_op;
8116 if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
8117 goto illegal_op;
8118
0af10c86 8119 modrm = cpu_ldub_code(env, s->pc++);
8b4a3df8 8120 reg = ((modrm >> 3) & 7) | rex_r;
222a3336
AZ
8121
8122 if (s->prefix & PREFIX_DATA)
4ba9938c 8123 ot = MO_16;
222a3336 8124 else if (s->dflag != 2)
4ba9938c 8125 ot = MO_32;
222a3336 8126 else
4ba9938c 8127 ot = MO_64;
222a3336 8128
0af10c86 8129 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
d3eb5eae 8130 gen_helper_popcnt(cpu_T[0], cpu_env, cpu_T[0], tcg_const_i32(ot));
222a3336 8131 gen_op_mov_reg_T0(ot, reg);
fdb0d09d 8132
3ca51d07 8133 set_cc_op(s, CC_OP_EFLAGS);
222a3336 8134 break;
a35f3ec7
AJ
8135 case 0x10e ... 0x10f:
8136 /* 3DNow! instructions, ignore prefixes */
8137 s->prefix &= ~(PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA);
664e0f19
FB
8138 case 0x110 ... 0x117:
8139 case 0x128 ... 0x12f:
4242b1bd 8140 case 0x138 ... 0x13a:
d9f4bb27 8141 case 0x150 ... 0x179:
664e0f19
FB
8142 case 0x17c ... 0x17f:
8143 case 0x1c2:
8144 case 0x1c4 ... 0x1c6:
8145 case 0x1d0 ... 0x1fe:
0af10c86 8146 gen_sse(env, s, b, pc_start, rex_r);
664e0f19 8147 break;
2c0262af
FB
8148 default:
8149 goto illegal_op;
8150 }
8151 /* lock generation */
8152 if (s->prefix & PREFIX_LOCK)
a7812ae4 8153 gen_helper_unlock();
2c0262af
FB
8154 return s->pc;
8155 illegal_op:
ab1f142b 8156 if (s->prefix & PREFIX_LOCK)
a7812ae4 8157 gen_helper_unlock();
2c0262af
FB
8158 /* XXX: ensure that no lock was generated */
8159 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
8160 return s->pc;
8161}
8162
2c0262af
FB
8163void optimize_flags_init(void)
8164{
a7812ae4
PB
8165 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
8166 cpu_cc_op = tcg_global_mem_new_i32(TCG_AREG0,
317ac620 8167 offsetof(CPUX86State, cc_op), "cc_op");
317ac620 8168 cpu_cc_dst = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_dst),
a7812ae4 8169 "cc_dst");
a3251186
RH
8170 cpu_cc_src = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_src),
8171 "cc_src");
988c3eb0
RH
8172 cpu_cc_src2 = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_src2),
8173 "cc_src2");
437a88a5 8174
cc739bb0
LD
8175#ifdef TARGET_X86_64
8176 cpu_regs[R_EAX] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8177 offsetof(CPUX86State, regs[R_EAX]), "rax");
cc739bb0 8178 cpu_regs[R_ECX] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8179 offsetof(CPUX86State, regs[R_ECX]), "rcx");
cc739bb0 8180 cpu_regs[R_EDX] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8181 offsetof(CPUX86State, regs[R_EDX]), "rdx");
cc739bb0 8182 cpu_regs[R_EBX] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8183 offsetof(CPUX86State, regs[R_EBX]), "rbx");
cc739bb0 8184 cpu_regs[R_ESP] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8185 offsetof(CPUX86State, regs[R_ESP]), "rsp");
cc739bb0 8186 cpu_regs[R_EBP] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8187 offsetof(CPUX86State, regs[R_EBP]), "rbp");
cc739bb0 8188 cpu_regs[R_ESI] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8189 offsetof(CPUX86State, regs[R_ESI]), "rsi");
cc739bb0 8190 cpu_regs[R_EDI] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8191 offsetof(CPUX86State, regs[R_EDI]), "rdi");
cc739bb0 8192 cpu_regs[8] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8193 offsetof(CPUX86State, regs[8]), "r8");
cc739bb0 8194 cpu_regs[9] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8195 offsetof(CPUX86State, regs[9]), "r9");
cc739bb0 8196 cpu_regs[10] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8197 offsetof(CPUX86State, regs[10]), "r10");
cc739bb0 8198 cpu_regs[11] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8199 offsetof(CPUX86State, regs[11]), "r11");
cc739bb0 8200 cpu_regs[12] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8201 offsetof(CPUX86State, regs[12]), "r12");
cc739bb0 8202 cpu_regs[13] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8203 offsetof(CPUX86State, regs[13]), "r13");
cc739bb0 8204 cpu_regs[14] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8205 offsetof(CPUX86State, regs[14]), "r14");
cc739bb0 8206 cpu_regs[15] = tcg_global_mem_new_i64(TCG_AREG0,
317ac620 8207 offsetof(CPUX86State, regs[15]), "r15");
cc739bb0
LD
8208#else
8209 cpu_regs[R_EAX] = tcg_global_mem_new_i32(TCG_AREG0,
317ac620 8210 offsetof(CPUX86State, regs[R_EAX]), "eax");
cc739bb0 8211 cpu_regs[R_ECX] = tcg_global_mem_new_i32(TCG_AREG0,
317ac620 8212 offsetof(CPUX86State, regs[R_ECX]), "ecx");
cc739bb0 8213 cpu_regs[R_EDX] = tcg_global_mem_new_i32(TCG_AREG0,
317ac620 8214 offsetof(CPUX86State, regs[R_EDX]), "edx");
cc739bb0 8215 cpu_regs[R_EBX] = tcg_global_mem_new_i32(TCG_AREG0,
317ac620 8216 offsetof(CPUX86State, regs[R_EBX]), "ebx");
cc739bb0 8217 cpu_regs[R_ESP] = tcg_global_mem_new_i32(TCG_AREG0,
317ac620 8218 offsetof(CPUX86State, regs[R_ESP]), "esp");
cc739bb0 8219 cpu_regs[R_EBP] = tcg_global_mem_new_i32(TCG_AREG0,
317ac620 8220 offsetof(CPUX86State, regs[R_EBP]), "ebp");
cc739bb0 8221 cpu_regs[R_ESI] = tcg_global_mem_new_i32(TCG_AREG0,
317ac620 8222 offsetof(CPUX86State, regs[R_ESI]), "esi");
cc739bb0 8223 cpu_regs[R_EDI] = tcg_global_mem_new_i32(TCG_AREG0,
317ac620 8224 offsetof(CPUX86State, regs[R_EDI]), "edi");
cc739bb0 8225#endif
2c0262af
FB
8226}
8227
8228/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
8229 basic block 'tb'. If search_pc is TRUE, also generate PC
8230 information for each intermediate instruction. */
467215c2 8231static inline void gen_intermediate_code_internal(X86CPU *cpu,
2cfc5f17 8232 TranslationBlock *tb,
467215c2 8233 bool search_pc)
2c0262af 8234{
ed2803da 8235 CPUState *cs = CPU(cpu);
467215c2 8236 CPUX86State *env = &cpu->env;
2c0262af 8237 DisasContext dc1, *dc = &dc1;
14ce26e7 8238 target_ulong pc_ptr;
2c0262af 8239 uint16_t *gen_opc_end;
a1d1bb31 8240 CPUBreakpoint *bp;
7f5b7d3e 8241 int j, lj;
c068688b 8242 uint64_t flags;
14ce26e7
FB
8243 target_ulong pc_start;
8244 target_ulong cs_base;
2e70f6ef
PB
8245 int num_insns;
8246 int max_insns;
3b46e624 8247
2c0262af 8248 /* generate intermediate code */
14ce26e7
FB
8249 pc_start = tb->pc;
8250 cs_base = tb->cs_base;
2c0262af 8251 flags = tb->flags;
3a1d9b8b 8252
4f31916f 8253 dc->pe = (flags >> HF_PE_SHIFT) & 1;
2c0262af
FB
8254 dc->code32 = (flags >> HF_CS32_SHIFT) & 1;
8255 dc->ss32 = (flags >> HF_SS32_SHIFT) & 1;
8256 dc->addseg = (flags >> HF_ADDSEG_SHIFT) & 1;
8257 dc->f_st = 0;
8258 dc->vm86 = (flags >> VM_SHIFT) & 1;
8259 dc->cpl = (flags >> HF_CPL_SHIFT) & 3;
8260 dc->iopl = (flags >> IOPL_SHIFT) & 3;
8261 dc->tf = (flags >> TF_SHIFT) & 1;
ed2803da 8262 dc->singlestep_enabled = cs->singlestep_enabled;
2c0262af 8263 dc->cc_op = CC_OP_DYNAMIC;
e207582f 8264 dc->cc_op_dirty = false;
2c0262af
FB
8265 dc->cs_base = cs_base;
8266 dc->tb = tb;
8267 dc->popl_esp_hack = 0;
8268 /* select memory access functions */
8269 dc->mem_index = 0;
8270 if (flags & HF_SOFTMMU_MASK) {
5c42a7cd 8271 dc->mem_index = cpu_mmu_index(env);
2c0262af 8272 }
0514ef2f
EH
8273 dc->cpuid_features = env->features[FEAT_1_EDX];
8274 dc->cpuid_ext_features = env->features[FEAT_1_ECX];
8275 dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX];
8276 dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX];
8277 dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX];
14ce26e7
FB
8278#ifdef TARGET_X86_64
8279 dc->lma = (flags >> HF_LMA_SHIFT) & 1;
8280 dc->code64 = (flags >> HF_CS64_SHIFT) & 1;
8281#endif
7eee2a50 8282 dc->flags = flags;
ed2803da 8283 dc->jmp_opt = !(dc->tf || cs->singlestep_enabled ||
a2cc3b24 8284 (flags & HF_INHIBIT_IRQ_MASK)
415fa2ea 8285#ifndef CONFIG_SOFTMMU
2c0262af
FB
8286 || (flags & HF_SOFTMMU_MASK)
8287#endif
8288 );
4f31916f
FB
8289#if 0
8290 /* check addseg logic */
dc196a57 8291 if (!dc->addseg && (dc->vm86 || !dc->pe || !dc->code32))
4f31916f
FB
8292 printf("ERROR addseg\n");
8293#endif
8294
a7812ae4
PB
8295 cpu_T[0] = tcg_temp_new();
8296 cpu_T[1] = tcg_temp_new();
8297 cpu_A0 = tcg_temp_new();
a7812ae4
PB
8298
8299 cpu_tmp0 = tcg_temp_new();
8300 cpu_tmp1_i64 = tcg_temp_new_i64();
8301 cpu_tmp2_i32 = tcg_temp_new_i32();
8302 cpu_tmp3_i32 = tcg_temp_new_i32();
8303 cpu_tmp4 = tcg_temp_new();
a7812ae4
PB
8304 cpu_ptr0 = tcg_temp_new_ptr();
8305 cpu_ptr1 = tcg_temp_new_ptr();
a3251186 8306 cpu_cc_srcT = tcg_temp_local_new();
57fec1fe 8307
92414b31 8308 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
8309
8310 dc->is_jmp = DISAS_NEXT;
8311 pc_ptr = pc_start;
8312 lj = -1;
2e70f6ef
PB
8313 num_insns = 0;
8314 max_insns = tb->cflags & CF_COUNT_MASK;
8315 if (max_insns == 0)
8316 max_insns = CF_COUNT_MASK;
2c0262af 8317
806f352d 8318 gen_tb_start();
2c0262af 8319 for(;;) {
72cf2d4f
BS
8320 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
8321 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a2397807
JK
8322 if (bp->pc == pc_ptr &&
8323 !((bp->flags & BP_CPU) && (tb->flags & HF_RF_MASK))) {
2c0262af
FB
8324 gen_debug(dc, pc_ptr - dc->cs_base);
8325 break;
8326 }
8327 }
8328 }
8329 if (search_pc) {
92414b31 8330 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
2c0262af
FB
8331 if (lj < j) {
8332 lj++;
8333 while (lj < j)
ab1103de 8334 tcg_ctx.gen_opc_instr_start[lj++] = 0;
2c0262af 8335 }
25983cad 8336 tcg_ctx.gen_opc_pc[lj] = pc_ptr;
2c0262af 8337 gen_opc_cc_op[lj] = dc->cc_op;
ab1103de 8338 tcg_ctx.gen_opc_instr_start[lj] = 1;
c9c99c22 8339 tcg_ctx.gen_opc_icount[lj] = num_insns;
2c0262af 8340 }
2e70f6ef
PB
8341 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
8342 gen_io_start();
8343
0af10c86 8344 pc_ptr = disas_insn(env, dc, pc_ptr);
2e70f6ef 8345 num_insns++;
2c0262af
FB
8346 /* stop translation if indicated */
8347 if (dc->is_jmp)
8348 break;
8349 /* if single step mode, we generate only one instruction and
8350 generate an exception */
a2cc3b24
FB
8351 /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
8352 the flag and abort the translation to give the irqs a
8353 change to be happen */
5fafdf24 8354 if (dc->tf || dc->singlestep_enabled ||
2e70f6ef 8355 (flags & HF_INHIBIT_IRQ_MASK)) {
14ce26e7 8356 gen_jmp_im(pc_ptr - dc->cs_base);
2c0262af
FB
8357 gen_eob(dc);
8358 break;
8359 }
8360 /* if too long translation, stop generation too */
efd7f486 8361 if (tcg_ctx.gen_opc_ptr >= gen_opc_end ||
2e70f6ef
PB
8362 (pc_ptr - pc_start) >= (TARGET_PAGE_SIZE - 32) ||
8363 num_insns >= max_insns) {
14ce26e7 8364 gen_jmp_im(pc_ptr - dc->cs_base);
2c0262af
FB
8365 gen_eob(dc);
8366 break;
8367 }
1b530a6d
AJ
8368 if (singlestep) {
8369 gen_jmp_im(pc_ptr - dc->cs_base);
8370 gen_eob(dc);
8371 break;
8372 }
2c0262af 8373 }
2e70f6ef
PB
8374 if (tb->cflags & CF_LAST_IO)
8375 gen_io_end();
806f352d 8376 gen_tb_end(tb, num_insns);
efd7f486 8377 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
2c0262af
FB
8378 /* we don't forget to fill the last values */
8379 if (search_pc) {
92414b31 8380 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
2c0262af
FB
8381 lj++;
8382 while (lj <= j)
ab1103de 8383 tcg_ctx.gen_opc_instr_start[lj++] = 0;
2c0262af 8384 }
3b46e624 8385
2c0262af 8386#ifdef DEBUG_DISAS
8fec2b8c 8387 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
14ce26e7 8388 int disas_flags;
93fcfe39
AL
8389 qemu_log("----------------\n");
8390 qemu_log("IN: %s\n", lookup_symbol(pc_start));
14ce26e7
FB
8391#ifdef TARGET_X86_64
8392 if (dc->code64)
8393 disas_flags = 2;
8394 else
8395#endif
8396 disas_flags = !dc->code32;
f4359b9f 8397 log_target_disas(env, pc_start, pc_ptr - pc_start, disas_flags);
93fcfe39 8398 qemu_log("\n");
2c0262af
FB
8399 }
8400#endif
8401
2e70f6ef 8402 if (!search_pc) {
2c0262af 8403 tb->size = pc_ptr - pc_start;
2e70f6ef
PB
8404 tb->icount = num_insns;
8405 }
2c0262af
FB
8406}
8407
317ac620 8408void gen_intermediate_code(CPUX86State *env, TranslationBlock *tb)
2c0262af 8409{
467215c2 8410 gen_intermediate_code_internal(x86_env_get_cpu(env), tb, false);
2c0262af
FB
8411}
8412
317ac620 8413void gen_intermediate_code_pc(CPUX86State *env, TranslationBlock *tb)
2c0262af 8414{
467215c2 8415 gen_intermediate_code_internal(x86_env_get_cpu(env), tb, true);
2c0262af
FB
8416}
8417
317ac620 8418void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb, int pc_pos)
d2856f1a
AJ
8419{
8420 int cc_op;
8421#ifdef DEBUG_DISAS
8fec2b8c 8422 if (qemu_loglevel_mask(CPU_LOG_TB_OP)) {
d2856f1a 8423 int i;
93fcfe39 8424 qemu_log("RESTORE:\n");
d2856f1a 8425 for(i = 0;i <= pc_pos; i++) {
ab1103de 8426 if (tcg_ctx.gen_opc_instr_start[i]) {
25983cad
EV
8427 qemu_log("0x%04x: " TARGET_FMT_lx "\n", i,
8428 tcg_ctx.gen_opc_pc[i]);
d2856f1a
AJ
8429 }
8430 }
e87b7cb0 8431 qemu_log("pc_pos=0x%x eip=" TARGET_FMT_lx " cs_base=%x\n",
25983cad 8432 pc_pos, tcg_ctx.gen_opc_pc[pc_pos] - tb->cs_base,
d2856f1a
AJ
8433 (uint32_t)tb->cs_base);
8434 }
8435#endif
25983cad 8436 env->eip = tcg_ctx.gen_opc_pc[pc_pos] - tb->cs_base;
d2856f1a
AJ
8437 cc_op = gen_opc_cc_op[pc_pos];
8438 if (cc_op != CC_OP_DYNAMIC)
8439 env->cc_op = cc_op;
8440}