]> git.proxmox.com Git - mirror_qemu.git/blame - target/i386/tcg/translate.c
target/i386: do not use s->tmp4 for push
[mirror_qemu.git] / target / i386 / tcg / translate.c
CommitLineData
2c0262af
FB
1/*
2 * i386 translation
5fafdf24 3 *
2c0262af
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
d9ff33ad 9 * version 2.1 of the License, or (at your option) any later version.
2c0262af
FB
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af 18 */
b6a0aa05 19#include "qemu/osdep.h"
2c0262af 20
bec93d72 21#include "qemu/host-utils.h"
2c0262af 22#include "cpu.h"
76cad711 23#include "disas/disas.h"
63c91552 24#include "exec/exec-all.h"
dcb32f1d 25#include "tcg/tcg-op.h"
20581aad 26#include "tcg/tcg-op-gvec.h"
f08b6170 27#include "exec/cpu_ldst.h"
77fc6f5e 28#include "exec/translator.h"
2872b0f3 29#include "fpu/softfloat.h"
2c0262af 30
2ef6175a
RH
31#include "exec/helper-proto.h"
32#include "exec/helper-gen.h"
ed69e831 33#include "helper-tcg.h"
a7812ae4 34
508127e2 35#include "exec/log.h"
a7e30d84 36
d53106c9
RH
37#define HELPER_H "helper.h"
38#include "exec/helper-info.c.inc"
39#undef HELPER_H
40
41
2c0262af
FB
42#define PREFIX_REPZ 0x01
43#define PREFIX_REPNZ 0x02
44#define PREFIX_LOCK 0x04
45#define PREFIX_DATA 0x08
46#define PREFIX_ADR 0x10
701ed211 47#define PREFIX_VEX 0x20
1e92b727 48#define PREFIX_REX 0x40
2c0262af 49
bec93d72
RH
50#ifdef TARGET_X86_64
51# define ctztl ctz64
52# define clztl clz64
53#else
54# define ctztl ctz32
55# define clztl clz32
56#endif
57
1906b2af 58/* For a switch indexed by MODRM, match all memory operands for a given OP. */
880f8486 59#define CASE_MODRM_MEM_OP(OP) \
1906b2af
RH
60 case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
61 case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
62 case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7
63
880f8486
PB
64#define CASE_MODRM_OP(OP) \
65 case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
66 case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
67 case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7: \
68 case (3 << 6) | (OP << 3) | 0 ... (3 << 6) | (OP << 3) | 7
69
57fec1fe
FB
70//#define MACRO_TEST 1
71
57fec1fe 72/* global register indexes */
93a3e108 73static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2;
f771ca6a 74static TCGv cpu_eip;
a7812ae4 75static TCGv_i32 cpu_cc_op;
cc739bb0 76static TCGv cpu_regs[CPU_NB_REGS];
3558f805 77static TCGv cpu_seg_base[6];
149b427b
RH
78static TCGv_i64 cpu_bndl[4];
79static TCGv_i64 cpu_bndu[4];
fbd80f02 80
2c0262af 81typedef struct DisasContext {
6cf147aa
LV
82 DisasContextBase base;
83
a6f62100 84 target_ulong pc; /* pc = eip + cs_base */
a6f62100 85 target_ulong cs_base; /* base of CS segment */
e3a79e0e 86 target_ulong pc_save;
a6f62100 87
14776ab5
TN
88 MemOp aflag;
89 MemOp dflag;
a6f62100
RH
90
91 int8_t override; /* -1 if no override, else R_CS, R_DS, etc */
92 uint8_t prefix;
01b9d8c1 93
b3e22b23
PB
94 bool has_modrm;
95 uint8_t modrm;
96
01b9d8c1
RH
97#ifndef CONFIG_USER_ONLY
98 uint8_t cpl; /* code priv level */
0ab011cc 99 uint8_t iopl; /* i/o priv level */
01b9d8c1 100#endif
a6f62100
RH
101 uint8_t vex_l; /* vex vector length */
102 uint8_t vex_v; /* vex vvvv register, without 1's complement. */
103 uint8_t popl_esp_hack; /* for correct popl with esp base handling */
104 uint8_t rip_offset; /* only used in x86_64, but left for simplicity */
01b9d8c1 105
14ce26e7 106#ifdef TARGET_X86_64
bbdb4237 107 uint8_t rex_r;
915ffe89
RH
108 uint8_t rex_x;
109 uint8_t rex_b;
14ce26e7 110#endif
a61ef762 111 bool vex_w; /* used by AVX even on 32-bit processors */
305d08e5
RH
112 bool jmp_opt; /* use direct block chaining for direct jumps */
113 bool repz_opt; /* optimize jumps within repz instructions */
a6f62100
RH
114 bool cc_op_dirty;
115
116 CCOp cc_op; /* current CC operation */
2c0262af 117 int mem_index; /* select memory access functions */
c6ad6f44 118 uint32_t flags; /* all execution flags */
14ce26e7 119 int cpuid_features;
3d7374c5 120 int cpuid_ext_features;
e771edab 121 int cpuid_ext2_features;
12e26b75 122 int cpuid_ext3_features;
a9321a4d 123 int cpuid_7_0_ebx_features;
268dc464 124 int cpuid_7_0_ecx_features;
c9cfe8f9 125 int cpuid_xsave_features;
93a3e108
EC
126
127 /* TCG local temps */
128 TCGv cc_srcT;
6b672b5d 129 TCGv A0;
c66f9727 130 TCGv T0;
b48597b0 131 TCGv T1;
93a3e108 132
fbd80f02
EC
133 /* TCG local register indexes (only used inside old micro ops) */
134 TCGv tmp0;
5022f28f 135 TCGv tmp4;
6bd48f6f 136 TCGv_i32 tmp2_i32;
4f82446d 137 TCGv_i32 tmp3_i32;
776678b2 138 TCGv_i64 tmp1_i64;
fbd80f02 139
b066c537 140 sigjmp_buf jmpbuf;
95093668 141 TCGOp *prev_insn_end;
2c0262af
FB
142} DisasContext;
143
200ef603
RH
144#define DISAS_EOB_ONLY DISAS_TARGET_0
145#define DISAS_EOB_NEXT DISAS_TARGET_1
146#define DISAS_EOB_INHIBIT_IRQ DISAS_TARGET_2
faf9ea5f 147#define DISAS_JUMP DISAS_TARGET_3
200ef603 148
d75f9129
RH
149/* The environment in which user-only runs is constrained. */
150#ifdef CONFIG_USER_ONLY
151#define PE(S) true
01b9d8c1 152#define CPL(S) 3
0ab011cc 153#define IOPL(S) 0
5d223889 154#define SVME(S) false
b322b3af 155#define GUEST(S) false
d75f9129
RH
156#else
157#define PE(S) (((S)->flags & HF_PE_MASK) != 0)
01b9d8c1 158#define CPL(S) ((S)->cpl)
0ab011cc 159#define IOPL(S) ((S)->iopl)
5d223889 160#define SVME(S) (((S)->flags & HF_SVME_MASK) != 0)
b322b3af 161#define GUEST(S) (((S)->flags & HF_GUEST_MASK) != 0)
d75f9129 162#endif
f8a35846
RH
163#if defined(CONFIG_USER_ONLY) && defined(TARGET_X86_64)
164#define VM86(S) false
9996dcfd 165#define CODE32(S) true
b40a47a1 166#define SS32(S) true
beedb93c 167#define ADDSEG(S) false
f8a35846
RH
168#else
169#define VM86(S) (((S)->flags & HF_VM_MASK) != 0)
9996dcfd 170#define CODE32(S) (((S)->flags & HF_CS32_MASK) != 0)
b40a47a1 171#define SS32(S) (((S)->flags & HF_SS32_MASK) != 0)
beedb93c 172#define ADDSEG(S) (((S)->flags & HF_ADDSEG_MASK) != 0)
f8a35846 173#endif
eec7d0f8
RH
174#if !defined(TARGET_X86_64)
175#define CODE64(S) false
176#elif defined(CONFIG_USER_ONLY)
177#define CODE64(S) true
178#else
179#define CODE64(S) (((S)->flags & HF_CS64_MASK) != 0)
40a205da 180#endif
1da389c5 181#if defined(CONFIG_USER_ONLY) || defined(TARGET_X86_64)
73e90dc4 182#define LMA(S) (((S)->flags & HF_LMA_MASK) != 0)
1da389c5
PMD
183#else
184#define LMA(S) false
eec7d0f8 185#endif
d75f9129 186
1e92b727
RH
187#ifdef TARGET_X86_64
188#define REX_PREFIX(S) (((S)->prefix & PREFIX_REX) != 0)
a61ef762 189#define REX_W(S) ((S)->vex_w)
bbdb4237 190#define REX_R(S) ((S)->rex_r + 0)
915ffe89
RH
191#define REX_X(S) ((S)->rex_x + 0)
192#define REX_B(S) ((S)->rex_b + 0)
1e92b727
RH
193#else
194#define REX_PREFIX(S) false
8ab1e486 195#define REX_W(S) false
bbdb4237 196#define REX_R(S) 0
915ffe89
RH
197#define REX_X(S) 0
198#define REX_B(S) 0
1e92b727
RH
199#endif
200
9f55e5a9
RH
201/*
202 * Many sysemu-only helpers are not reachable for user-only.
203 * Define stub generators here, so that we need not either sprinkle
204 * ifdefs through the translator, nor provide the helper function.
205 */
206#define STUB_HELPER(NAME, ...) \
207 static inline void gen_helper_##NAME(__VA_ARGS__) \
208 { qemu_build_not_reached(); }
209
210#ifdef CONFIG_USER_ONLY
8d6806c7 211STUB_HELPER(clgi, TCGv_env env)
35e5a5d5 212STUB_HELPER(flush_page, TCGv_env env, TCGv addr)
4ea2449b 213STUB_HELPER(hlt, TCGv_env env, TCGv_i32 pc_ofs)
7fb7c423
RH
214STUB_HELPER(inb, TCGv ret, TCGv_env env, TCGv_i32 port)
215STUB_HELPER(inw, TCGv ret, TCGv_env env, TCGv_i32 port)
216STUB_HELPER(inl, TCGv ret, TCGv_env env, TCGv_i32 port)
4ea2449b
RH
217STUB_HELPER(monitor, TCGv_env env, TCGv addr)
218STUB_HELPER(mwait, TCGv_env env, TCGv_i32 pc_ofs)
7fb7c423
RH
219STUB_HELPER(outb, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
220STUB_HELPER(outw, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
221STUB_HELPER(outl, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
f7803b77
RH
222STUB_HELPER(rdmsr, TCGv_env env)
223STUB_HELPER(read_crN, TCGv ret, TCGv_env env, TCGv_i32 reg)
533883fd 224STUB_HELPER(get_dr, TCGv ret, TCGv_env env, TCGv_i32 reg)
9f55e5a9 225STUB_HELPER(set_dr, TCGv_env env, TCGv_i32 reg, TCGv val)
8d6806c7 226STUB_HELPER(stgi, TCGv_env env)
d051ea04 227STUB_HELPER(svm_check_intercept, TCGv_env env, TCGv_i32 type)
8d6806c7
RH
228STUB_HELPER(vmload, TCGv_env env, TCGv_i32 aflag)
229STUB_HELPER(vmmcall, TCGv_env env)
230STUB_HELPER(vmrun, TCGv_env env, TCGv_i32 aflag, TCGv_i32 pc_ofs)
231STUB_HELPER(vmsave, TCGv_env env, TCGv_i32 aflag)
f7803b77
RH
232STUB_HELPER(write_crN, TCGv_env env, TCGv_i32 reg, TCGv val)
233STUB_HELPER(wrmsr, TCGv_env env)
9f55e5a9
RH
234#endif
235
2c0262af 236static void gen_eob(DisasContext *s);
faf9ea5f 237static void gen_jr(DisasContext *s);
8760ded6 238static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num);
2255da49 239static void gen_jmp_rel_csize(DisasContext *s, int diff, int tb_num);
14776ab5 240static void gen_op(DisasContext *s1, int op, MemOp ot, int d);
d76b9c6f 241static void gen_exception_gpf(DisasContext *s);
2c0262af
FB
242
243/* i386 arith/logic operations */
244enum {
5fafdf24
TS
245 OP_ADDL,
246 OP_ORL,
247 OP_ADCL,
2c0262af 248 OP_SBBL,
5fafdf24
TS
249 OP_ANDL,
250 OP_SUBL,
251 OP_XORL,
2c0262af
FB
252 OP_CMPL,
253};
254
255/* i386 shift ops */
256enum {
5fafdf24
TS
257 OP_ROL,
258 OP_ROR,
259 OP_RCL,
260 OP_RCR,
261 OP_SHL,
262 OP_SHR,
2c0262af
FB
263 OP_SHL1, /* undocumented */
264 OP_SAR = 7,
265};
266
8e1c85e3
FB
267enum {
268 JCC_O,
269 JCC_B,
270 JCC_Z,
271 JCC_BE,
272 JCC_S,
273 JCC_P,
274 JCC_L,
275 JCC_LE,
276};
277
2c0262af
FB
278enum {
279 /* I386 int registers */
280 OR_EAX, /* MUST be even numbered */
281 OR_ECX,
282 OR_EDX,
283 OR_EBX,
284 OR_ESP,
285 OR_EBP,
286 OR_ESI,
287 OR_EDI,
14ce26e7
FB
288
289 OR_TMP0 = 16, /* temporary operand register */
2c0262af
FB
290 OR_TMP1,
291 OR_A0, /* temporary register used when doing address evaluation */
2c0262af
FB
292};
293
b666265b 294enum {
a3251186
RH
295 USES_CC_DST = 1,
296 USES_CC_SRC = 2,
988c3eb0
RH
297 USES_CC_SRC2 = 4,
298 USES_CC_SRCT = 8,
b666265b
RH
299};
300
301/* Bit set if the global variable is live after setting CC_OP to X. */
302static const uint8_t cc_op_live[CC_OP_NB] = {
988c3eb0 303 [CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
b666265b
RH
304 [CC_OP_EFLAGS] = USES_CC_SRC,
305 [CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC,
306 [CC_OP_ADDB ... CC_OP_ADDQ] = USES_CC_DST | USES_CC_SRC,
988c3eb0 307 [CC_OP_ADCB ... CC_OP_ADCQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
a3251186 308 [CC_OP_SUBB ... CC_OP_SUBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT,
988c3eb0 309 [CC_OP_SBBB ... CC_OP_SBBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
b666265b
RH
310 [CC_OP_LOGICB ... CC_OP_LOGICQ] = USES_CC_DST,
311 [CC_OP_INCB ... CC_OP_INCQ] = USES_CC_DST | USES_CC_SRC,
312 [CC_OP_DECB ... CC_OP_DECQ] = USES_CC_DST | USES_CC_SRC,
313 [CC_OP_SHLB ... CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC,
314 [CC_OP_SARB ... CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC,
bc4b43dc 315 [CC_OP_BMILGB ... CC_OP_BMILGQ] = USES_CC_DST | USES_CC_SRC,
cd7f97ca
RH
316 [CC_OP_ADCX] = USES_CC_DST | USES_CC_SRC,
317 [CC_OP_ADOX] = USES_CC_SRC | USES_CC_SRC2,
318 [CC_OP_ADCOX] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
436ff2d2 319 [CC_OP_CLR] = 0,
4885c3c4 320 [CC_OP_POPCNT] = USES_CC_SRC,
b666265b
RH
321};
322
e207582f 323static void set_cc_op(DisasContext *s, CCOp op)
3ca51d07 324{
b666265b
RH
325 int dead;
326
327 if (s->cc_op == op) {
328 return;
329 }
330
331 /* Discard CC computation that will no longer be used. */
332 dead = cc_op_live[s->cc_op] & ~cc_op_live[op];
333 if (dead & USES_CC_DST) {
334 tcg_gen_discard_tl(cpu_cc_dst);
e207582f 335 }
b666265b
RH
336 if (dead & USES_CC_SRC) {
337 tcg_gen_discard_tl(cpu_cc_src);
338 }
988c3eb0
RH
339 if (dead & USES_CC_SRC2) {
340 tcg_gen_discard_tl(cpu_cc_src2);
341 }
a3251186 342 if (dead & USES_CC_SRCT) {
93a3e108 343 tcg_gen_discard_tl(s->cc_srcT);
a3251186 344 }
b666265b 345
e2f515cf
RH
346 if (op == CC_OP_DYNAMIC) {
347 /* The DYNAMIC setting is translator only, and should never be
348 stored. Thus we always consider it clean. */
349 s->cc_op_dirty = false;
350 } else {
351 /* Discard any computed CC_OP value (see shifts). */
352 if (s->cc_op == CC_OP_DYNAMIC) {
353 tcg_gen_discard_i32(cpu_cc_op);
354 }
355 s->cc_op_dirty = true;
356 }
b666265b 357 s->cc_op = op;
e207582f
RH
358}
359
e207582f
RH
360static void gen_update_cc_op(DisasContext *s)
361{
362 if (s->cc_op_dirty) {
773cdfcc 363 tcg_gen_movi_i32(cpu_cc_op, s->cc_op);
e207582f
RH
364 s->cc_op_dirty = false;
365 }
3ca51d07
RH
366}
367
14ce26e7
FB
368#ifdef TARGET_X86_64
369
370#define NB_OP_SIZES 4
371
14ce26e7
FB
372#else /* !TARGET_X86_64 */
373
374#define NB_OP_SIZES 3
375
14ce26e7
FB
376#endif /* !TARGET_X86_64 */
377
e03b5686 378#if HOST_BIG_ENDIAN
57fec1fe
FB
379#define REG_B_OFFSET (sizeof(target_ulong) - 1)
380#define REG_H_OFFSET (sizeof(target_ulong) - 2)
381#define REG_W_OFFSET (sizeof(target_ulong) - 2)
382#define REG_L_OFFSET (sizeof(target_ulong) - 4)
383#define REG_LH_OFFSET (sizeof(target_ulong) - 8)
14ce26e7 384#else
57fec1fe
FB
385#define REG_B_OFFSET 0
386#define REG_H_OFFSET 1
387#define REG_W_OFFSET 0
388#define REG_L_OFFSET 0
389#define REG_LH_OFFSET 4
14ce26e7 390#endif
57fec1fe 391
96d7073f
PM
392/* In instruction encodings for byte register accesses the
393 * register number usually indicates "low 8 bits of register N";
394 * however there are some special cases where N 4..7 indicates
395 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
396 * true for this special case, false otherwise.
397 */
1dbe15ef 398static inline bool byte_reg_is_xH(DisasContext *s, int reg)
96d7073f 399{
1e92b727
RH
400 /* Any time the REX prefix is present, byte registers are uniform */
401 if (reg < 4 || REX_PREFIX(s)) {
96d7073f
PM
402 return false;
403 }
96d7073f
PM
404 return true;
405}
406
ab4e4aec 407/* Select the size of a push/pop operation. */
14776ab5 408static inline MemOp mo_pushpop(DisasContext *s, MemOp ot)
ab4e4aec
RH
409{
410 if (CODE64(s)) {
411 return ot == MO_16 ? MO_16 : MO_64;
412 } else {
413 return ot;
414 }
415}
416
64ae256c 417/* Select the size of the stack pointer. */
14776ab5 418static inline MemOp mo_stacksize(DisasContext *s)
64ae256c 419{
b40a47a1 420 return CODE64(s) ? MO_64 : SS32(s) ? MO_32 : MO_16;
64ae256c
RH
421}
422
ab4e4aec 423/* Select only size 64 else 32. Used for SSE operand sizes. */
14776ab5 424static inline MemOp mo_64_32(MemOp ot)
ab4e4aec
RH
425{
426#ifdef TARGET_X86_64
427 return ot == MO_64 ? MO_64 : MO_32;
428#else
429 return MO_32;
430#endif
431}
432
433/* Select size 8 if lsb of B is clear, else OT. Used for decoding
434 byte vs word opcodes. */
14776ab5 435static inline MemOp mo_b_d(int b, MemOp ot)
ab4e4aec
RH
436{
437 return b & 1 ? ot : MO_8;
438}
439
440/* Select size 8 if lsb of B is clear, else OT capped at 32.
441 Used for decoding operand size of port opcodes. */
14776ab5 442static inline MemOp mo_b_d32(int b, MemOp ot)
ab4e4aec
RH
443{
444 return b & 1 ? (ot == MO_16 ? MO_16 : MO_32) : MO_8;
445}
446
d1bb978b
PB
447/* Compute the result of writing t0 to the OT-sized register REG.
448 *
449 * If DEST is NULL, store the result into the register and return the
450 * register's TCGv.
451 *
452 * If DEST is not NULL, store the result into DEST and return the
453 * register's TCGv.
454 */
455static TCGv gen_op_deposit_reg_v(DisasContext *s, MemOp ot, int reg, TCGv dest, TCGv t0)
57fec1fe
FB
456{
457 switch(ot) {
4ba9938c 458 case MO_8:
d1bb978b
PB
459 if (byte_reg_is_xH(s, reg)) {
460 dest = dest ? dest : cpu_regs[reg - 4];
461 tcg_gen_deposit_tl(dest, cpu_regs[reg - 4], t0, 8, 8);
462 return cpu_regs[reg - 4];
57fec1fe 463 }
d1bb978b
PB
464 dest = dest ? dest : cpu_regs[reg];
465 tcg_gen_deposit_tl(dest, cpu_regs[reg], t0, 0, 8);
57fec1fe 466 break;
4ba9938c 467 case MO_16:
d1bb978b
PB
468 dest = dest ? dest : cpu_regs[reg];
469 tcg_gen_deposit_tl(dest, cpu_regs[reg], t0, 0, 16);
57fec1fe 470 break;
4ba9938c 471 case MO_32:
cc739bb0
LD
472 /* For x86_64, this sets the higher half of register to zero.
473 For i386, this is equivalent to a mov. */
d1bb978b
PB
474 dest = dest ? dest : cpu_regs[reg];
475 tcg_gen_ext32u_tl(dest, t0);
57fec1fe 476 break;
cc739bb0 477#ifdef TARGET_X86_64
4ba9938c 478 case MO_64:
d1bb978b
PB
479 dest = dest ? dest : cpu_regs[reg];
480 tcg_gen_mov_tl(dest, t0);
57fec1fe 481 break;
14ce26e7 482#endif
d67dc9e6 483 default:
732e89f4 484 g_assert_not_reached();
57fec1fe 485 }
d1bb978b
PB
486 return cpu_regs[reg];
487}
488
489static void gen_op_mov_reg_v(DisasContext *s, MemOp ot, int reg, TCGv t0)
490{
491 gen_op_deposit_reg_v(s, ot, reg, NULL, t0);
57fec1fe 492}
2c0262af 493
1dbe15ef 494static inline
14776ab5 495void gen_op_mov_v_reg(DisasContext *s, MemOp ot, TCGv t0, int reg)
57fec1fe 496{
1dbe15ef 497 if (ot == MO_8 && byte_reg_is_xH(s, reg)) {
04fc2f1c 498 tcg_gen_extract_tl(t0, cpu_regs[reg - 4], 8, 8);
96d7073f 499 } else {
cc739bb0 500 tcg_gen_mov_tl(t0, cpu_regs[reg]);
57fec1fe
FB
501 }
502}
503
57fec1fe
FB
504static void gen_add_A0_im(DisasContext *s, int val)
505{
6b672b5d 506 tcg_gen_addi_tl(s->A0, s->A0, val);
4e85057b 507 if (!CODE64(s)) {
6b672b5d 508 tcg_gen_ext32u_tl(s->A0, s->A0);
4e85057b 509 }
57fec1fe 510}
2c0262af 511
e3a79e0e 512static inline void gen_op_jmp_v(DisasContext *s, TCGv dest)
57fec1fe 513{
f771ca6a 514 tcg_gen_mov_tl(cpu_eip, dest);
e3a79e0e 515 s->pc_save = -1;
57fec1fe
FB
516}
517
fbd80f02 518static inline
14776ab5 519void gen_op_add_reg_im(DisasContext *s, MemOp size, int reg, int32_t val)
57fec1fe 520{
fbd80f02 521 tcg_gen_addi_tl(s->tmp0, cpu_regs[reg], val);
1dbe15ef 522 gen_op_mov_reg_v(s, size, reg, s->tmp0);
57fec1fe
FB
523}
524
c0099cd4 525static inline void gen_op_add_reg(DisasContext *s, MemOp size, int reg, TCGv val)
57fec1fe 526{
c0099cd4 527 tcg_gen_add_tl(s->tmp0, cpu_regs[reg], val);
1dbe15ef 528 gen_op_mov_reg_v(s, size, reg, s->tmp0);
6e0d8677 529}
57fec1fe 530
323d1876 531static inline void gen_op_ld_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
57fec1fe 532{
3c5f4116 533 tcg_gen_qemu_ld_tl(t0, a0, s->mem_index, idx | MO_LE);
57fec1fe 534}
2c0262af 535
323d1876 536static inline void gen_op_st_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
57fec1fe 537{
3523e4bd 538 tcg_gen_qemu_st_tl(t0, a0, s->mem_index, idx | MO_LE);
57fec1fe 539}
4f31916f 540
d4faa3e0
RH
541static inline void gen_op_st_rm_T0_A0(DisasContext *s, int idx, int d)
542{
543 if (d == OR_TMP0) {
c66f9727 544 gen_op_st_v(s, idx, s->T0, s->A0);
d4faa3e0 545 } else {
1dbe15ef 546 gen_op_mov_reg_v(s, idx, d, s->T0);
d4faa3e0
RH
547 }
548}
549
65e4af23
RH
550static void gen_update_eip_cur(DisasContext *s)
551{
e3a79e0e 552 assert(s->pc_save != -1);
2e3afe8e 553 if (tb_cflags(s->base.tb) & CF_PCREL) {
e3a79e0e 554 tcg_gen_addi_tl(cpu_eip, cpu_eip, s->base.pc_next - s->pc_save);
b5e0d5d2
RH
555 } else if (CODE64(s)) {
556 tcg_gen_movi_tl(cpu_eip, s->base.pc_next);
e3a79e0e 557 } else {
b5e0d5d2 558 tcg_gen_movi_tl(cpu_eip, (uint32_t)(s->base.pc_next - s->cs_base));
e3a79e0e
RH
559 }
560 s->pc_save = s->base.pc_next;
14ce26e7
FB
561}
562
09e99df4
RH
563static void gen_update_eip_next(DisasContext *s)
564{
e3a79e0e 565 assert(s->pc_save != -1);
2e3afe8e 566 if (tb_cflags(s->base.tb) & CF_PCREL) {
e3a79e0e 567 tcg_gen_addi_tl(cpu_eip, cpu_eip, s->pc - s->pc_save);
b5e0d5d2
RH
568 } else if (CODE64(s)) {
569 tcg_gen_movi_tl(cpu_eip, s->base.pc_next);
e3a79e0e 570 } else {
b5e0d5d2 571 tcg_gen_movi_tl(cpu_eip, (uint32_t)(s->base.pc_next - s->cs_base));
e3a79e0e
RH
572 }
573 s->pc_save = s->pc;
09e99df4
RH
574}
575
ad1d6f07
RH
576static int cur_insn_len(DisasContext *s)
577{
578 return s->pc - s->base.pc_next;
579}
580
581static TCGv_i32 cur_insn_len_i32(DisasContext *s)
582{
583 return tcg_constant_i32(cur_insn_len(s));
584}
585
9e599bf7
RH
586static TCGv_i32 eip_next_i32(DisasContext *s)
587{
e3a79e0e 588 assert(s->pc_save != -1);
9e599bf7
RH
589 /*
590 * This function has two users: lcall_real (always 16-bit mode), and
591 * iret_protected (16, 32, or 64-bit mode). IRET only uses the value
592 * when EFLAGS.NT is set, which is illegal in 64-bit mode, which is
593 * why passing a 32-bit value isn't broken. To avoid using this where
594 * we shouldn't, return -1 in 64-bit mode so that execution goes into
595 * the weeds quickly.
596 */
597 if (CODE64(s)) {
598 return tcg_constant_i32(-1);
599 }
2e3afe8e 600 if (tb_cflags(s->base.tb) & CF_PCREL) {
e3a79e0e
RH
601 TCGv_i32 ret = tcg_temp_new_i32();
602 tcg_gen_trunc_tl_i32(ret, cpu_eip);
603 tcg_gen_addi_i32(ret, ret, s->pc - s->pc_save);
604 return ret;
605 } else {
606 return tcg_constant_i32(s->pc - s->cs_base);
607 }
9e599bf7
RH
608}
609
610static TCGv eip_next_tl(DisasContext *s)
611{
e3a79e0e 612 assert(s->pc_save != -1);
2e3afe8e 613 if (tb_cflags(s->base.tb) & CF_PCREL) {
e3a79e0e
RH
614 TCGv ret = tcg_temp_new();
615 tcg_gen_addi_tl(ret, cpu_eip, s->pc - s->pc_save);
616 return ret;
b5e0d5d2
RH
617 } else if (CODE64(s)) {
618 return tcg_constant_tl(s->pc);
e3a79e0e 619 } else {
b5e0d5d2 620 return tcg_constant_tl((uint32_t)(s->pc - s->cs_base));
e3a79e0e 621 }
9e599bf7
RH
622}
623
75ec746a
RH
624static TCGv eip_cur_tl(DisasContext *s)
625{
e3a79e0e 626 assert(s->pc_save != -1);
2e3afe8e 627 if (tb_cflags(s->base.tb) & CF_PCREL) {
e3a79e0e
RH
628 TCGv ret = tcg_temp_new();
629 tcg_gen_addi_tl(ret, cpu_eip, s->base.pc_next - s->pc_save);
630 return ret;
b5e0d5d2
RH
631 } else if (CODE64(s)) {
632 return tcg_constant_tl(s->base.pc_next);
e3a79e0e 633 } else {
b5e0d5d2 634 return tcg_constant_tl((uint32_t)(s->base.pc_next - s->cs_base));
e3a79e0e 635 }
75ec746a
RH
636}
637
24c0573b 638/* Compute SEG:REG into DEST. SEG is selected from the override segment
ca2f29f5
RH
639 (OVR_SEG) and the default segment (DEF_SEG). OVR_SEG may be -1 to
640 indicate no override. */
24c0573b
PB
641static void gen_lea_v_seg_dest(DisasContext *s, MemOp aflag, TCGv dest, TCGv a0,
642 int def_seg, int ovr_seg)
2c0262af 643{
ca2f29f5 644 switch (aflag) {
14ce26e7 645#ifdef TARGET_X86_64
1d71ddb1 646 case MO_64:
ca2f29f5 647 if (ovr_seg < 0) {
24c0573b 648 tcg_gen_mov_tl(dest, a0);
ca2f29f5 649 return;
14ce26e7 650 }
1d71ddb1 651 break;
14ce26e7 652#endif
1d71ddb1 653 case MO_32:
2c0262af 654 /* 32 bit address */
beedb93c 655 if (ovr_seg < 0 && ADDSEG(s)) {
620abfb0
PB
656 ovr_seg = def_seg;
657 }
ca2f29f5 658 if (ovr_seg < 0) {
24c0573b 659 tcg_gen_ext32u_tl(dest, a0);
620abfb0 660 return;
2c0262af 661 }
1d71ddb1
RH
662 break;
663 case MO_16:
ca2f29f5 664 /* 16 bit address */
24c0573b
PB
665 tcg_gen_ext16u_tl(dest, a0);
666 a0 = dest;
e2e02a82 667 if (ovr_seg < 0) {
beedb93c 668 if (ADDSEG(s)) {
e2e02a82
PB
669 ovr_seg = def_seg;
670 } else {
671 return;
672 }
673 }
1d71ddb1
RH
674 break;
675 default:
732e89f4 676 g_assert_not_reached();
2c0262af 677 }
2c0262af 678
ca2f29f5 679 if (ovr_seg >= 0) {
3558f805 680 TCGv seg = cpu_seg_base[ovr_seg];
ca2f29f5
RH
681
682 if (aflag == MO_64) {
24c0573b 683 tcg_gen_add_tl(dest, a0, seg);
ca2f29f5 684 } else if (CODE64(s)) {
24c0573b
PB
685 tcg_gen_ext32u_tl(dest, a0);
686 tcg_gen_add_tl(dest, dest, seg);
2c0262af 687 } else {
24c0573b
PB
688 tcg_gen_add_tl(dest, a0, seg);
689 tcg_gen_ext32u_tl(dest, dest);
2c0262af 690 }
2c0262af
FB
691 }
692}
693
24c0573b
PB
694static void gen_lea_v_seg(DisasContext *s, MemOp aflag, TCGv a0,
695 int def_seg, int ovr_seg)
696{
697 gen_lea_v_seg_dest(s, aflag, s->A0, a0, def_seg, ovr_seg);
698}
699
ca2f29f5
RH
700static inline void gen_string_movl_A0_ESI(DisasContext *s)
701{
77ebcad0 702 gen_lea_v_seg(s, s->aflag, cpu_regs[R_ESI], R_DS, s->override);
ca2f29f5
RH
703}
704
705static inline void gen_string_movl_A0_EDI(DisasContext *s)
706{
77ebcad0 707 gen_lea_v_seg(s, s->aflag, cpu_regs[R_EDI], R_ES, -1);
ca2f29f5
RH
708}
709
c0099cd4 710static inline TCGv gen_compute_Dshift(DisasContext *s, MemOp ot)
6e0d8677 711{
c0099cd4
PB
712 TCGv dshift = tcg_temp_new();
713 tcg_gen_ld32s_tl(dshift, tcg_env, offsetof(CPUX86State, df));
714 tcg_gen_shli_tl(dshift, dshift, ot);
715 return dshift;
2c0262af
FB
716};
717
14776ab5 718static TCGv gen_ext_tl(TCGv dst, TCGv src, MemOp size, bool sign)
6e0d8677 719{
23f3d586 720 if (size == MO_TL) {
d824df34 721 return src;
6e0d8677 722 }
23f3d586
RH
723 tcg_gen_ext_tl(dst, src, size | (sign ? MO_SIGN : 0));
724 return dst;
6e0d8677 725}
3b46e624 726
14776ab5 727static void gen_extu(MemOp ot, TCGv reg)
d824df34
PB
728{
729 gen_ext_tl(reg, reg, ot, false);
730}
731
14776ab5 732static void gen_exts(MemOp ot, TCGv reg)
6e0d8677 733{
d824df34 734 gen_ext_tl(reg, reg, ot, true);
6e0d8677 735}
2c0262af 736
0ebacb5d 737static void gen_op_j_ecx(DisasContext *s, TCGCond cond, TCGLabel *label1)
6e0d8677 738{
fbd80f02 739 tcg_gen_mov_tl(s->tmp0, cpu_regs[R_ECX]);
0ebacb5d
RH
740 gen_extu(s->aflag, s->tmp0);
741 tcg_gen_brcondi_tl(cond, s->tmp0, 0, label1);
6e0d8677
FB
742}
743
0ebacb5d 744static inline void gen_op_jz_ecx(DisasContext *s, TCGLabel *label1)
6e0d8677 745{
0ebacb5d
RH
746 gen_op_j_ecx(s, TCG_COND_EQ, label1);
747}
748
749static inline void gen_op_jnz_ecx(DisasContext *s, TCGLabel *label1)
750{
751 gen_op_j_ecx(s, TCG_COND_NE, label1);
6e0d8677 752}
2c0262af 753
14776ab5 754static void gen_helper_in_func(MemOp ot, TCGv v, TCGv_i32 n)
a7812ae4
PB
755{
756 switch (ot) {
4ba9938c 757 case MO_8:
ad75a51e 758 gen_helper_inb(v, tcg_env, n);
93ab25d7 759 break;
4ba9938c 760 case MO_16:
ad75a51e 761 gen_helper_inw(v, tcg_env, n);
93ab25d7 762 break;
4ba9938c 763 case MO_32:
ad75a51e 764 gen_helper_inl(v, tcg_env, n);
93ab25d7 765 break;
d67dc9e6 766 default:
732e89f4 767 g_assert_not_reached();
a7812ae4 768 }
a7812ae4 769}
2c0262af 770
14776ab5 771static void gen_helper_out_func(MemOp ot, TCGv_i32 v, TCGv_i32 n)
a7812ae4
PB
772{
773 switch (ot) {
4ba9938c 774 case MO_8:
ad75a51e 775 gen_helper_outb(tcg_env, v, n);
93ab25d7 776 break;
4ba9938c 777 case MO_16:
ad75a51e 778 gen_helper_outw(tcg_env, v, n);
93ab25d7 779 break;
4ba9938c 780 case MO_32:
ad75a51e 781 gen_helper_outl(tcg_env, v, n);
93ab25d7 782 break;
d67dc9e6 783 default:
732e89f4 784 g_assert_not_reached();
a7812ae4 785 }
a7812ae4 786}
f115e911 787
1bca40fe
RH
788/*
789 * Validate that access to [port, port + 1<<ot) is allowed.
790 * Raise #GP, or VMM exit if not.
791 */
792static bool gen_check_io(DisasContext *s, MemOp ot, TCGv_i32 port,
793 uint32_t svm_flags)
f115e911 794{
d76b9c6f
RH
795#ifdef CONFIG_USER_ONLY
796 /*
797 * We do not implement the ioperm(2) syscall, so the TSS check
798 * will always fail.
799 */
800 gen_exception_gpf(s);
801 return false;
802#else
f8a35846 803 if (PE(s) && (CPL(s) > IOPL(s) || VM86(s))) {
ad75a51e 804 gen_helper_check_io(tcg_env, port, tcg_constant_i32(1 << ot));
b8b6a50b 805 }
b322b3af 806 if (GUEST(s)) {
100ec099 807 gen_update_cc_op(s);
65e4af23 808 gen_update_eip_cur(s);
bc2e436d
RH
809 if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) {
810 svm_flags |= SVM_IOIO_REP_MASK;
811 }
812 svm_flags |= 1 << (SVM_IOIO_SIZE_SHIFT + ot);
ad75a51e 813 gen_helper_svm_check_io(tcg_env, port,
bc2e436d 814 tcg_constant_i32(svm_flags),
ad1d6f07 815 cur_insn_len_i32(s));
f115e911 816 }
bc2e436d 817 return true;
d76b9c6f 818#endif
f115e911
FB
819}
820
122e6d7b 821static void gen_movs(DisasContext *s, MemOp ot)
2c0262af 822{
c0099cd4
PB
823 TCGv dshift;
824
2c0262af 825 gen_string_movl_A0_ESI(s);
c66f9727 826 gen_op_ld_v(s, ot, s->T0, s->A0);
2c0262af 827 gen_string_movl_A0_EDI(s);
c66f9727 828 gen_op_st_v(s, ot, s->T0, s->A0);
c0099cd4
PB
829
830 dshift = gen_compute_Dshift(s, ot);
831 gen_op_add_reg(s, s->aflag, R_ESI, dshift);
832 gen_op_add_reg(s, s->aflag, R_EDI, dshift);
2c0262af
FB
833}
834
c66f9727 835static void gen_op_update1_cc(DisasContext *s)
b6abf97d 836{
c66f9727 837 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
b6abf97d
FB
838}
839
c66f9727 840static void gen_op_update2_cc(DisasContext *s)
b6abf97d 841{
b48597b0 842 tcg_gen_mov_tl(cpu_cc_src, s->T1);
c66f9727 843 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
b6abf97d
FB
844}
845
c66f9727 846static void gen_op_update3_cc(DisasContext *s, TCGv reg)
988c3eb0
RH
847{
848 tcg_gen_mov_tl(cpu_cc_src2, reg);
b48597b0 849 tcg_gen_mov_tl(cpu_cc_src, s->T1);
c66f9727 850 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
988c3eb0
RH
851}
852
c66f9727 853static inline void gen_op_testl_T0_T1_cc(DisasContext *s)
b6abf97d 854{
b48597b0 855 tcg_gen_and_tl(cpu_cc_dst, s->T0, s->T1);
b6abf97d
FB
856}
857
93a3e108 858static void gen_op_update_neg_cc(DisasContext *s)
b6abf97d 859{
c66f9727
EC
860 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
861 tcg_gen_neg_tl(cpu_cc_src, s->T0);
93a3e108 862 tcg_gen_movi_tl(s->cc_srcT, 0);
b6abf97d
FB
863}
864
80e55f54
PB
865/* compute all eflags to reg */
866static void gen_mov_eflags(DisasContext *s, TCGv reg)
8e1c85e3 867{
80e55f54
PB
868 TCGv dst, src1, src2;
869 TCGv_i32 cc_op;
db9f2597
RH
870 int live, dead;
871
d229edce 872 if (s->cc_op == CC_OP_EFLAGS) {
80e55f54 873 tcg_gen_mov_tl(reg, cpu_cc_src);
d229edce
RH
874 return;
875 }
436ff2d2 876 if (s->cc_op == CC_OP_CLR) {
80e55f54 877 tcg_gen_movi_tl(reg, CC_Z | CC_P);
436ff2d2
RH
878 return;
879 }
db9f2597 880
db9f2597
RH
881 dst = cpu_cc_dst;
882 src1 = cpu_cc_src;
988c3eb0 883 src2 = cpu_cc_src2;
db9f2597
RH
884
885 /* Take care to not read values that are not live. */
886 live = cc_op_live[s->cc_op] & ~USES_CC_SRCT;
988c3eb0 887 dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2);
db9f2597 888 if (dead) {
80e55f54 889 TCGv zero = tcg_constant_tl(0);
db9f2597
RH
890 if (dead & USES_CC_DST) {
891 dst = zero;
892 }
893 if (dead & USES_CC_SRC) {
894 src1 = zero;
895 }
988c3eb0
RH
896 if (dead & USES_CC_SRC2) {
897 src2 = zero;
898 }
db9f2597
RH
899 }
900
80e55f54
PB
901 if (s->cc_op != CC_OP_DYNAMIC) {
902 cc_op = tcg_constant_i32(s->cc_op);
903 } else {
904 cc_op = cpu_cc_op;
905 }
906 gen_helper_cc_compute_all(reg, dst, src1, src2, cc_op);
907}
908
909/* compute all eflags to cc_src */
910static void gen_compute_eflags(DisasContext *s)
911{
912 gen_mov_eflags(s, cpu_cc_src);
d229edce 913 set_cc_op(s, CC_OP_EFLAGS);
8e1c85e3
FB
914}
915
bec93d72
RH
916typedef struct CCPrepare {
917 TCGCond cond;
918 TCGv reg;
919 TCGv reg2;
920 target_ulong imm;
921 target_ulong mask;
922 bool use_reg2;
923 bool no_setcond;
924} CCPrepare;
925
06847f1f 926/* compute eflags.C to reg */
bec93d72 927static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
06847f1f
RH
928{
929 TCGv t0, t1;
bec93d72 930 int size, shift;
06847f1f
RH
931
932 switch (s->cc_op) {
933 case CC_OP_SUBB ... CC_OP_SUBQ:
a3251186 934 /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
06847f1f 935 size = s->cc_op - CC_OP_SUBB;
fbd80f02 936 t1 = gen_ext_tl(s->tmp0, cpu_cc_src, size, false);
06847f1f 937 /* If no temporary was used, be careful not to alias t1 and t0. */
fbd80f02 938 t0 = t1 == cpu_cc_src ? s->tmp0 : reg;
93a3e108 939 tcg_gen_mov_tl(t0, s->cc_srcT);
06847f1f
RH
940 gen_extu(size, t0);
941 goto add_sub;
942
943 case CC_OP_ADDB ... CC_OP_ADDQ:
944 /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
945 size = s->cc_op - CC_OP_ADDB;
fbd80f02 946 t1 = gen_ext_tl(s->tmp0, cpu_cc_src, size, false);
06847f1f
RH
947 t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
948 add_sub:
bec93d72
RH
949 return (CCPrepare) { .cond = TCG_COND_LTU, .reg = t0,
950 .reg2 = t1, .mask = -1, .use_reg2 = true };
06847f1f 951
06847f1f 952 case CC_OP_LOGICB ... CC_OP_LOGICQ:
436ff2d2 953 case CC_OP_CLR:
4885c3c4 954 case CC_OP_POPCNT:
bec93d72 955 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
06847f1f
RH
956
957 case CC_OP_INCB ... CC_OP_INCQ:
958 case CC_OP_DECB ... CC_OP_DECQ:
bec93d72
RH
959 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
960 .mask = -1, .no_setcond = true };
06847f1f
RH
961
962 case CC_OP_SHLB ... CC_OP_SHLQ:
963 /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
964 size = s->cc_op - CC_OP_SHLB;
bec93d72
RH
965 shift = (8 << size) - 1;
966 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
967 .mask = (target_ulong)1 << shift };
06847f1f
RH
968
969 case CC_OP_MULB ... CC_OP_MULQ:
bec93d72
RH
970 return (CCPrepare) { .cond = TCG_COND_NE,
971 .reg = cpu_cc_src, .mask = -1 };
06847f1f 972
bc4b43dc
RH
973 case CC_OP_BMILGB ... CC_OP_BMILGQ:
974 size = s->cc_op - CC_OP_BMILGB;
975 t0 = gen_ext_tl(reg, cpu_cc_src, size, false);
976 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
977
cd7f97ca
RH
978 case CC_OP_ADCX:
979 case CC_OP_ADCOX:
980 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_dst,
981 .mask = -1, .no_setcond = true };
982
06847f1f
RH
983 case CC_OP_EFLAGS:
984 case CC_OP_SARB ... CC_OP_SARQ:
985 /* CC_SRC & 1 */
bec93d72
RH
986 return (CCPrepare) { .cond = TCG_COND_NE,
987 .reg = cpu_cc_src, .mask = CC_C };
06847f1f
RH
988
989 default:
990 /* The need to compute only C from CC_OP_DYNAMIC is important
991 in efficiently implementing e.g. INC at the start of a TB. */
992 gen_update_cc_op(s);
988c3eb0
RH
993 gen_helper_cc_compute_c(reg, cpu_cc_dst, cpu_cc_src,
994 cpu_cc_src2, cpu_cc_op);
bec93d72
RH
995 return (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
996 .mask = -1, .no_setcond = true };
06847f1f
RH
997 }
998}
999
1608ecca 1000/* compute eflags.P to reg */
bec93d72 1001static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGv reg)
1608ecca 1002{
d229edce 1003 gen_compute_eflags(s);
bec93d72
RH
1004 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1005 .mask = CC_P };
1608ecca
PB
1006}
1007
1008/* compute eflags.S to reg */
bec93d72 1009static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg)
1608ecca 1010{
086c4077
RH
1011 switch (s->cc_op) {
1012 case CC_OP_DYNAMIC:
1013 gen_compute_eflags(s);
1014 /* FALLTHRU */
1015 case CC_OP_EFLAGS:
cd7f97ca
RH
1016 case CC_OP_ADCX:
1017 case CC_OP_ADOX:
1018 case CC_OP_ADCOX:
bec93d72
RH
1019 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1020 .mask = CC_S };
436ff2d2 1021 case CC_OP_CLR:
4885c3c4 1022 case CC_OP_POPCNT:
436ff2d2 1023 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
086c4077
RH
1024 default:
1025 {
14776ab5 1026 MemOp size = (s->cc_op - CC_OP_ADDB) & 3;
086c4077 1027 TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, true);
bec93d72 1028 return (CCPrepare) { .cond = TCG_COND_LT, .reg = t0, .mask = -1 };
086c4077 1029 }
086c4077 1030 }
1608ecca
PB
1031}
1032
1033/* compute eflags.O to reg */
bec93d72 1034static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg)
1608ecca 1035{
cd7f97ca
RH
1036 switch (s->cc_op) {
1037 case CC_OP_ADOX:
1038 case CC_OP_ADCOX:
1039 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src2,
1040 .mask = -1, .no_setcond = true };
436ff2d2 1041 case CC_OP_CLR:
4885c3c4 1042 case CC_OP_POPCNT:
436ff2d2 1043 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
1e7dde80
PB
1044 case CC_OP_MULB ... CC_OP_MULQ:
1045 return (CCPrepare) { .cond = TCG_COND_NE,
1046 .reg = cpu_cc_src, .mask = -1 };
cd7f97ca
RH
1047 default:
1048 gen_compute_eflags(s);
1049 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1050 .mask = CC_O };
1051 }
1608ecca
PB
1052}
1053
1054/* compute eflags.Z to reg */
bec93d72 1055static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
1608ecca 1056{
086c4077
RH
1057 switch (s->cc_op) {
1058 case CC_OP_DYNAMIC:
1059 gen_compute_eflags(s);
1060 /* FALLTHRU */
1061 case CC_OP_EFLAGS:
cd7f97ca
RH
1062 case CC_OP_ADCX:
1063 case CC_OP_ADOX:
1064 case CC_OP_ADCOX:
bec93d72
RH
1065 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1066 .mask = CC_Z };
436ff2d2
RH
1067 case CC_OP_CLR:
1068 return (CCPrepare) { .cond = TCG_COND_ALWAYS, .mask = -1 };
4885c3c4
RH
1069 case CC_OP_POPCNT:
1070 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = cpu_cc_src,
1071 .mask = -1 };
086c4077
RH
1072 default:
1073 {
14776ab5 1074 MemOp size = (s->cc_op - CC_OP_ADDB) & 3;
086c4077 1075 TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
bec93d72 1076 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
086c4077 1077 }
bec93d72
RH
1078 }
1079}
1080
c365395e 1081/* perform a conditional store into register 'reg' according to jump opcode
bad5cfcd 1082 value 'b'. In the fast case, T0 is guaranteed not to be used. */
276e6b5f 1083static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
8e1c85e3 1084{
d67dc9e6 1085 int inv, jcc_op, cond;
14776ab5 1086 MemOp size;
276e6b5f 1087 CCPrepare cc;
c365395e
PB
1088 TCGv t0;
1089
1090 inv = b & 1;
8e1c85e3 1091 jcc_op = (b >> 1) & 7;
c365395e
PB
1092
1093 switch (s->cc_op) {
69d1aa31
RH
1094 case CC_OP_SUBB ... CC_OP_SUBQ:
1095 /* We optimize relational operators for the cmp/jcc case. */
c365395e
PB
1096 size = s->cc_op - CC_OP_SUBB;
1097 switch (jcc_op) {
1098 case JCC_BE:
5022f28f
EC
1099 tcg_gen_mov_tl(s->tmp4, s->cc_srcT);
1100 gen_extu(size, s->tmp4);
fbd80f02 1101 t0 = gen_ext_tl(s->tmp0, cpu_cc_src, size, false);
5022f28f 1102 cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = s->tmp4,
276e6b5f 1103 .reg2 = t0, .mask = -1, .use_reg2 = true };
c365395e 1104 break;
8e1c85e3 1105
c365395e 1106 case JCC_L:
276e6b5f 1107 cond = TCG_COND_LT;
c365395e
PB
1108 goto fast_jcc_l;
1109 case JCC_LE:
276e6b5f 1110 cond = TCG_COND_LE;
c365395e 1111 fast_jcc_l:
5022f28f
EC
1112 tcg_gen_mov_tl(s->tmp4, s->cc_srcT);
1113 gen_exts(size, s->tmp4);
fbd80f02 1114 t0 = gen_ext_tl(s->tmp0, cpu_cc_src, size, true);
5022f28f 1115 cc = (CCPrepare) { .cond = cond, .reg = s->tmp4,
276e6b5f 1116 .reg2 = t0, .mask = -1, .use_reg2 = true };
c365395e 1117 break;
8e1c85e3 1118
c365395e 1119 default:
8e1c85e3 1120 goto slow_jcc;
c365395e 1121 }
8e1c85e3 1122 break;
c365395e 1123
8e1c85e3
FB
1124 default:
1125 slow_jcc:
69d1aa31
RH
1126 /* This actually generates good code for JC, JZ and JS. */
1127 switch (jcc_op) {
1128 case JCC_O:
1129 cc = gen_prepare_eflags_o(s, reg);
1130 break;
1131 case JCC_B:
1132 cc = gen_prepare_eflags_c(s, reg);
1133 break;
1134 case JCC_Z:
1135 cc = gen_prepare_eflags_z(s, reg);
1136 break;
1137 case JCC_BE:
1138 gen_compute_eflags(s);
1139 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1140 .mask = CC_Z | CC_C };
1141 break;
1142 case JCC_S:
1143 cc = gen_prepare_eflags_s(s, reg);
1144 break;
1145 case JCC_P:
1146 cc = gen_prepare_eflags_p(s, reg);
1147 break;
1148 case JCC_L:
1149 gen_compute_eflags(s);
11f4e8f8 1150 if (reg == cpu_cc_src) {
fbd80f02 1151 reg = s->tmp0;
69d1aa31 1152 }
6032627f 1153 tcg_gen_addi_tl(reg, cpu_cc_src, CC_O - CC_S);
69d1aa31 1154 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
6032627f 1155 .mask = CC_O };
69d1aa31
RH
1156 break;
1157 default:
1158 case JCC_LE:
1159 gen_compute_eflags(s);
11f4e8f8 1160 if (reg == cpu_cc_src) {
fbd80f02 1161 reg = s->tmp0;
69d1aa31 1162 }
6032627f 1163 tcg_gen_addi_tl(reg, cpu_cc_src, CC_O - CC_S);
69d1aa31 1164 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
6032627f 1165 .mask = CC_O | CC_Z };
69d1aa31
RH
1166 break;
1167 }
c365395e 1168 break;
8e1c85e3 1169 }
276e6b5f
RH
1170
1171 if (inv) {
1172 cc.cond = tcg_invert_cond(cc.cond);
1173 }
1174 return cc;
8e1c85e3
FB
1175}
1176
cc8b6f5b
PB
1177static void gen_setcc1(DisasContext *s, int b, TCGv reg)
1178{
1179 CCPrepare cc = gen_prepare_cc(s, b, reg);
1180
1181 if (cc.no_setcond) {
1182 if (cc.cond == TCG_COND_EQ) {
1183 tcg_gen_xori_tl(reg, cc.reg, 1);
1184 } else {
1185 tcg_gen_mov_tl(reg, cc.reg);
1186 }
1187 return;
1188 }
1189
1190 if (cc.cond == TCG_COND_NE && !cc.use_reg2 && cc.imm == 0 &&
1191 cc.mask != 0 && (cc.mask & (cc.mask - 1)) == 0) {
1192 tcg_gen_shri_tl(reg, cc.reg, ctztl(cc.mask));
1193 tcg_gen_andi_tl(reg, reg, 1);
1194 return;
1195 }
1196 if (cc.mask != -1) {
1197 tcg_gen_andi_tl(reg, cc.reg, cc.mask);
1198 cc.reg = reg;
1199 }
1200 if (cc.use_reg2) {
1201 tcg_gen_setcond_tl(cc.cond, reg, cc.reg, cc.reg2);
1202 } else {
1203 tcg_gen_setcondi_tl(cc.cond, reg, cc.reg, cc.imm);
1204 }
1205}
1206
1207static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg)
1208{
1209 gen_setcc1(s, JCC_B << 1, reg);
1210}
276e6b5f 1211
8e1c85e3 1212/* generate a conditional jump to label 'l1' according to jump opcode
bad5cfcd 1213 value 'b'. In the fast case, T0 is guaranteed not to be used. */
42a268c2 1214static inline void gen_jcc1_noeob(DisasContext *s, int b, TCGLabel *l1)
dc259201 1215{
c66f9727 1216 CCPrepare cc = gen_prepare_cc(s, b, s->T0);
dc259201
RH
1217
1218 if (cc.mask != -1) {
c66f9727
EC
1219 tcg_gen_andi_tl(s->T0, cc.reg, cc.mask);
1220 cc.reg = s->T0;
dc259201
RH
1221 }
1222 if (cc.use_reg2) {
1223 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1224 } else {
1225 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1226 }
1227}
1228
1229/* Generate a conditional jump to label 'l1' according to jump opcode
bad5cfcd 1230 value 'b'. In the fast case, T0 is guaranteed not to be used.
dc259201 1231 A translation block must end soon. */
42a268c2 1232static inline void gen_jcc1(DisasContext *s, int b, TCGLabel *l1)
8e1c85e3 1233{
c66f9727 1234 CCPrepare cc = gen_prepare_cc(s, b, s->T0);
8e1c85e3 1235
dc259201 1236 gen_update_cc_op(s);
943131ca 1237 if (cc.mask != -1) {
c66f9727
EC
1238 tcg_gen_andi_tl(s->T0, cc.reg, cc.mask);
1239 cc.reg = s->T0;
943131ca 1240 }
dc259201 1241 set_cc_op(s, CC_OP_DYNAMIC);
943131ca
PB
1242 if (cc.use_reg2) {
1243 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1244 } else {
1245 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
8e1c85e3
FB
1246 }
1247}
1248
14ce26e7
FB
1249/* XXX: does not work with gdbstub "ice" single step - not a
1250 serious problem */
122e6d7b 1251static TCGLabel *gen_jz_ecx_string(DisasContext *s)
2c0262af 1252{
42a268c2
RH
1253 TCGLabel *l1 = gen_new_label();
1254 TCGLabel *l2 = gen_new_label();
0ebacb5d 1255 gen_op_jnz_ecx(s, l1);
14ce26e7 1256 gen_set_label(l2);
2255da49 1257 gen_jmp_rel_csize(s, 0, 1);
14ce26e7
FB
1258 gen_set_label(l1);
1259 return l2;
2c0262af
FB
1260}
1261
122e6d7b 1262static void gen_stos(DisasContext *s, MemOp ot)
2c0262af 1263{
1dbe15ef 1264 gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX);
2c0262af 1265 gen_string_movl_A0_EDI(s);
c66f9727 1266 gen_op_st_v(s, ot, s->T0, s->A0);
c0099cd4 1267 gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot));
2c0262af
FB
1268}
1269
122e6d7b 1270static void gen_lods(DisasContext *s, MemOp ot)
2c0262af
FB
1271{
1272 gen_string_movl_A0_ESI(s);
c66f9727 1273 gen_op_ld_v(s, ot, s->T0, s->A0);
1dbe15ef 1274 gen_op_mov_reg_v(s, ot, R_EAX, s->T0);
c0099cd4 1275 gen_op_add_reg(s, s->aflag, R_ESI, gen_compute_Dshift(s, ot));
2c0262af
FB
1276}
1277
122e6d7b 1278static void gen_scas(DisasContext *s, MemOp ot)
2c0262af 1279{
2c0262af 1280 gen_string_movl_A0_EDI(s);
b48597b0 1281 gen_op_ld_v(s, ot, s->T1, s->A0);
63633fe6 1282 gen_op(s, OP_CMPL, ot, R_EAX);
c0099cd4 1283 gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot));
2c0262af
FB
1284}
1285
122e6d7b 1286static void gen_cmps(DisasContext *s, MemOp ot)
2c0262af 1287{
c0099cd4
PB
1288 TCGv dshift;
1289
2c0262af 1290 gen_string_movl_A0_EDI(s);
b48597b0 1291 gen_op_ld_v(s, ot, s->T1, s->A0);
63633fe6
RH
1292 gen_string_movl_A0_ESI(s);
1293 gen_op(s, OP_CMPL, ot, OR_TMP0);
c0099cd4
PB
1294
1295 dshift = gen_compute_Dshift(s, ot);
1296 gen_op_add_reg(s, s->aflag, R_ESI, dshift);
1297 gen_op_add_reg(s, s->aflag, R_EDI, dshift);
2c0262af
FB
1298}
1299
5223a942
EH
1300static void gen_bpt_io(DisasContext *s, TCGv_i32 t_port, int ot)
1301{
1302 if (s->flags & HF_IOBPT_MASK) {
6d8d1a03
CF
1303#ifdef CONFIG_USER_ONLY
1304 /* user-mode cpu should not be in IOBPT mode */
1305 g_assert_not_reached();
1306#else
9e599bf7
RH
1307 TCGv_i32 t_size = tcg_constant_i32(1 << ot);
1308 TCGv t_next = eip_next_tl(s);
ad75a51e 1309 gen_helper_bpt_io(tcg_env, t_port, t_size, t_next);
6d8d1a03 1310#endif /* CONFIG_USER_ONLY */
5223a942
EH
1311 }
1312}
1313
122e6d7b 1314static void gen_ins(DisasContext *s, MemOp ot)
2c0262af 1315{
2c0262af 1316 gen_string_movl_A0_EDI(s);
6e0d8677
FB
1317 /* Note: we must do this dummy write first to be restartable in
1318 case of page fault. */
c66f9727
EC
1319 tcg_gen_movi_tl(s->T0, 0);
1320 gen_op_st_v(s, ot, s->T0, s->A0);
6bd48f6f
EC
1321 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
1322 tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff);
1323 gen_helper_in_func(ot, s->T0, s->tmp2_i32);
c66f9727 1324 gen_op_st_v(s, ot, s->T0, s->A0);
c0099cd4 1325 gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot));
6bd48f6f 1326 gen_bpt_io(s, s->tmp2_i32, ot);
2c0262af
FB
1327}
1328
122e6d7b 1329static void gen_outs(DisasContext *s, MemOp ot)
2c0262af
FB
1330{
1331 gen_string_movl_A0_ESI(s);
c66f9727 1332 gen_op_ld_v(s, ot, s->T0, s->A0);
b8b6a50b 1333
6bd48f6f
EC
1334 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
1335 tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff);
4f82446d
EC
1336 tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T0);
1337 gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32);
c0099cd4 1338 gen_op_add_reg(s, s->aflag, R_ESI, gen_compute_Dshift(s, ot));
6bd48f6f 1339 gen_bpt_io(s, s->tmp2_i32, ot);
2c0262af
FB
1340}
1341
122e6d7b
RH
1342/* Generate jumps to current or next instruction */
1343static void gen_repz(DisasContext *s, MemOp ot,
1344 void (*fn)(DisasContext *s, MemOp ot))
1345{
1346 TCGLabel *l2;
1347 gen_update_cc_op(s);
1348 l2 = gen_jz_ecx_string(s);
1349 fn(s, ot);
1350 gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
1351 /*
1352 * A loop would cause two single step exceptions if ECX = 1
1353 * before rep string_insn
1354 */
1355 if (s->repz_opt) {
0ebacb5d 1356 gen_op_jz_ecx(s, l2);
122e6d7b 1357 }
2255da49 1358 gen_jmp_rel_csize(s, -cur_insn_len(s), 0);
2c0262af
FB
1359}
1360
122e6d7b
RH
1361#define GEN_REPZ(op) \
1362 static inline void gen_repz_ ## op(DisasContext *s, MemOp ot) \
1363 { gen_repz(s, ot, gen_##op); }
1364
1365static void gen_repz2(DisasContext *s, MemOp ot, int nz,
1366 void (*fn)(DisasContext *s, MemOp ot))
1367{
1368 TCGLabel *l2;
1369 gen_update_cc_op(s);
1370 l2 = gen_jz_ecx_string(s);
1371 fn(s, ot);
1372 gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
1373 gen_update_cc_op(s);
1374 gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2);
1375 if (s->repz_opt) {
0ebacb5d 1376 gen_op_jz_ecx(s, l2);
122e6d7b 1377 }
2255da49 1378 gen_jmp_rel_csize(s, -cur_insn_len(s), 0);
122e6d7b
RH
1379}
1380
1381#define GEN_REPZ2(op) \
1382 static inline void gen_repz_ ## op(DisasContext *s, MemOp ot, int nz) \
1383 { gen_repz2(s, ot, nz, gen_##op); }
1384
2c0262af
FB
1385GEN_REPZ(movs)
1386GEN_REPZ(stos)
1387GEN_REPZ(lods)
1388GEN_REPZ(ins)
1389GEN_REPZ(outs)
1390GEN_REPZ2(scas)
1391GEN_REPZ2(cmps)
1392
a7812ae4
PB
1393static void gen_helper_fp_arith_ST0_FT0(int op)
1394{
1395 switch (op) {
d3eb5eae 1396 case 0:
ad75a51e 1397 gen_helper_fadd_ST0_FT0(tcg_env);
d3eb5eae
BS
1398 break;
1399 case 1:
ad75a51e 1400 gen_helper_fmul_ST0_FT0(tcg_env);
d3eb5eae
BS
1401 break;
1402 case 2:
ad75a51e 1403 gen_helper_fcom_ST0_FT0(tcg_env);
d3eb5eae
BS
1404 break;
1405 case 3:
ad75a51e 1406 gen_helper_fcom_ST0_FT0(tcg_env);
d3eb5eae
BS
1407 break;
1408 case 4:
ad75a51e 1409 gen_helper_fsub_ST0_FT0(tcg_env);
d3eb5eae
BS
1410 break;
1411 case 5:
ad75a51e 1412 gen_helper_fsubr_ST0_FT0(tcg_env);
d3eb5eae
BS
1413 break;
1414 case 6:
ad75a51e 1415 gen_helper_fdiv_ST0_FT0(tcg_env);
d3eb5eae
BS
1416 break;
1417 case 7:
ad75a51e 1418 gen_helper_fdivr_ST0_FT0(tcg_env);
d3eb5eae 1419 break;
a7812ae4
PB
1420 }
1421}
2c0262af
FB
1422
1423/* NOTE the exception in "r" op ordering */
a7812ae4
PB
1424static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
1425{
3df11bb1 1426 TCGv_i32 tmp = tcg_constant_i32(opreg);
a7812ae4 1427 switch (op) {
d3eb5eae 1428 case 0:
ad75a51e 1429 gen_helper_fadd_STN_ST0(tcg_env, tmp);
d3eb5eae
BS
1430 break;
1431 case 1:
ad75a51e 1432 gen_helper_fmul_STN_ST0(tcg_env, tmp);
d3eb5eae
BS
1433 break;
1434 case 4:
ad75a51e 1435 gen_helper_fsubr_STN_ST0(tcg_env, tmp);
d3eb5eae
BS
1436 break;
1437 case 5:
ad75a51e 1438 gen_helper_fsub_STN_ST0(tcg_env, tmp);
d3eb5eae
BS
1439 break;
1440 case 6:
ad75a51e 1441 gen_helper_fdivr_STN_ST0(tcg_env, tmp);
d3eb5eae
BS
1442 break;
1443 case 7:
ad75a51e 1444 gen_helper_fdiv_STN_ST0(tcg_env, tmp);
d3eb5eae 1445 break;
a7812ae4
PB
1446 }
1447}
2c0262af 1448
52236550 1449static void gen_exception(DisasContext *s, int trapno)
e84fcd7f
RH
1450{
1451 gen_update_cc_op(s);
65e4af23 1452 gen_update_eip_cur(s);
ad75a51e 1453 gen_helper_raise_exception(tcg_env, tcg_constant_i32(trapno));
e84fcd7f
RH
1454 s->base.is_jmp = DISAS_NORETURN;
1455}
1456
1457/* Generate #UD for the current instruction. The assumption here is that
1458 the instruction is known, but it isn't allowed in the current cpu mode. */
1459static void gen_illegal_opcode(DisasContext *s)
1460{
52236550 1461 gen_exception(s, EXCP06_ILLOP);
e84fcd7f
RH
1462}
1463
6bd99586
RH
1464/* Generate #GP for the current instruction. */
1465static void gen_exception_gpf(DisasContext *s)
1466{
52236550 1467 gen_exception(s, EXCP0D_GPF);
6bd99586
RH
1468}
1469
bc19f505
RH
1470/* Check for cpl == 0; if not, raise #GP and return false. */
1471static bool check_cpl0(DisasContext *s)
1472{
01b9d8c1 1473 if (CPL(s) == 0) {
bc19f505
RH
1474 return true;
1475 }
1476 gen_exception_gpf(s);
1477 return false;
1478}
1479
aa9f21b1
RH
1480/* If vm86, check for iopl == 3; if not, raise #GP and return false. */
1481static bool check_vm86_iopl(DisasContext *s)
1482{
f8a35846 1483 if (!VM86(s) || IOPL(s) == 3) {
aa9f21b1
RH
1484 return true;
1485 }
1486 gen_exception_gpf(s);
1487 return false;
1488}
1489
ca7874c2
RH
1490/* Check for iopl allowing access; if not, raise #GP and return false. */
1491static bool check_iopl(DisasContext *s)
1492{
f8a35846 1493 if (VM86(s) ? IOPL(s) == 3 : CPL(s) <= IOPL(s)) {
ca7874c2
RH
1494 return true;
1495 }
1496 gen_exception_gpf(s);
1497 return false;
1498}
1499
2c0262af 1500/* if d == OR_TMP0, it means memory operand (address in A0) */
14776ab5 1501static void gen_op(DisasContext *s1, int op, MemOp ot, int d)
2c0262af 1502{
2c0262af 1503 if (d != OR_TMP0) {
e84fcd7f
RH
1504 if (s1->prefix & PREFIX_LOCK) {
1505 /* Lock prefix when destination is not memory. */
1506 gen_illegal_opcode(s1);
1507 return;
1508 }
1dbe15ef 1509 gen_op_mov_v_reg(s1, ot, s1->T0, d);
a7cee522 1510 } else if (!(s1->prefix & PREFIX_LOCK)) {
c66f9727 1511 gen_op_ld_v(s1, ot, s1->T0, s1->A0);
2c0262af
FB
1512 }
1513 switch(op) {
1514 case OP_ADCL:
5022f28f 1515 gen_compute_eflags_c(s1, s1->tmp4);
a7cee522 1516 if (s1->prefix & PREFIX_LOCK) {
5022f28f 1517 tcg_gen_add_tl(s1->T0, s1->tmp4, s1->T1);
c66f9727 1518 tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T0,
a7cee522
EC
1519 s1->mem_index, ot | MO_LE);
1520 } else {
b48597b0 1521 tcg_gen_add_tl(s1->T0, s1->T0, s1->T1);
5022f28f 1522 tcg_gen_add_tl(s1->T0, s1->T0, s1->tmp4);
a7cee522
EC
1523 gen_op_st_rm_T0_A0(s1, ot, d);
1524 }
5022f28f 1525 gen_op_update3_cc(s1, s1->tmp4);
988c3eb0 1526 set_cc_op(s1, CC_OP_ADCB + ot);
cad3a37d 1527 break;
2c0262af 1528 case OP_SBBL:
5022f28f 1529 gen_compute_eflags_c(s1, s1->tmp4);
a7cee522 1530 if (s1->prefix & PREFIX_LOCK) {
5022f28f 1531 tcg_gen_add_tl(s1->T0, s1->T1, s1->tmp4);
c66f9727
EC
1532 tcg_gen_neg_tl(s1->T0, s1->T0);
1533 tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T0,
a7cee522
EC
1534 s1->mem_index, ot | MO_LE);
1535 } else {
b48597b0 1536 tcg_gen_sub_tl(s1->T0, s1->T0, s1->T1);
5022f28f 1537 tcg_gen_sub_tl(s1->T0, s1->T0, s1->tmp4);
a7cee522
EC
1538 gen_op_st_rm_T0_A0(s1, ot, d);
1539 }
5022f28f 1540 gen_op_update3_cc(s1, s1->tmp4);
988c3eb0 1541 set_cc_op(s1, CC_OP_SBBB + ot);
cad3a37d 1542 break;
2c0262af 1543 case OP_ADDL:
a7cee522 1544 if (s1->prefix & PREFIX_LOCK) {
b48597b0 1545 tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T1,
a7cee522
EC
1546 s1->mem_index, ot | MO_LE);
1547 } else {
b48597b0 1548 tcg_gen_add_tl(s1->T0, s1->T0, s1->T1);
a7cee522
EC
1549 gen_op_st_rm_T0_A0(s1, ot, d);
1550 }
c66f9727 1551 gen_op_update2_cc(s1);
3ca51d07 1552 set_cc_op(s1, CC_OP_ADDB + ot);
2c0262af
FB
1553 break;
1554 case OP_SUBL:
a7cee522 1555 if (s1->prefix & PREFIX_LOCK) {
b48597b0 1556 tcg_gen_neg_tl(s1->T0, s1->T1);
c66f9727 1557 tcg_gen_atomic_fetch_add_tl(s1->cc_srcT, s1->A0, s1->T0,
a7cee522 1558 s1->mem_index, ot | MO_LE);
b48597b0 1559 tcg_gen_sub_tl(s1->T0, s1->cc_srcT, s1->T1);
a7cee522 1560 } else {
c66f9727 1561 tcg_gen_mov_tl(s1->cc_srcT, s1->T0);
b48597b0 1562 tcg_gen_sub_tl(s1->T0, s1->T0, s1->T1);
a7cee522
EC
1563 gen_op_st_rm_T0_A0(s1, ot, d);
1564 }
c66f9727 1565 gen_op_update2_cc(s1);
3ca51d07 1566 set_cc_op(s1, CC_OP_SUBB + ot);
2c0262af
FB
1567 break;
1568 default:
1569 case OP_ANDL:
a7cee522 1570 if (s1->prefix & PREFIX_LOCK) {
b48597b0 1571 tcg_gen_atomic_and_fetch_tl(s1->T0, s1->A0, s1->T1,
a7cee522
EC
1572 s1->mem_index, ot | MO_LE);
1573 } else {
b48597b0 1574 tcg_gen_and_tl(s1->T0, s1->T0, s1->T1);
a7cee522
EC
1575 gen_op_st_rm_T0_A0(s1, ot, d);
1576 }
c66f9727 1577 gen_op_update1_cc(s1);
3ca51d07 1578 set_cc_op(s1, CC_OP_LOGICB + ot);
57fec1fe 1579 break;
2c0262af 1580 case OP_ORL:
a7cee522 1581 if (s1->prefix & PREFIX_LOCK) {
b48597b0 1582 tcg_gen_atomic_or_fetch_tl(s1->T0, s1->A0, s1->T1,
a7cee522
EC
1583 s1->mem_index, ot | MO_LE);
1584 } else {
b48597b0 1585 tcg_gen_or_tl(s1->T0, s1->T0, s1->T1);
a7cee522
EC
1586 gen_op_st_rm_T0_A0(s1, ot, d);
1587 }
c66f9727 1588 gen_op_update1_cc(s1);
3ca51d07 1589 set_cc_op(s1, CC_OP_LOGICB + ot);
57fec1fe 1590 break;
2c0262af 1591 case OP_XORL:
a7cee522 1592 if (s1->prefix & PREFIX_LOCK) {
b48597b0 1593 tcg_gen_atomic_xor_fetch_tl(s1->T0, s1->A0, s1->T1,
a7cee522
EC
1594 s1->mem_index, ot | MO_LE);
1595 } else {
b48597b0 1596 tcg_gen_xor_tl(s1->T0, s1->T0, s1->T1);
a7cee522
EC
1597 gen_op_st_rm_T0_A0(s1, ot, d);
1598 }
c66f9727 1599 gen_op_update1_cc(s1);
3ca51d07 1600 set_cc_op(s1, CC_OP_LOGICB + ot);
2c0262af
FB
1601 break;
1602 case OP_CMPL:
b48597b0 1603 tcg_gen_mov_tl(cpu_cc_src, s1->T1);
c66f9727 1604 tcg_gen_mov_tl(s1->cc_srcT, s1->T0);
b48597b0 1605 tcg_gen_sub_tl(cpu_cc_dst, s1->T0, s1->T1);
3ca51d07 1606 set_cc_op(s1, CC_OP_SUBB + ot);
2c0262af
FB
1607 break;
1608 }
b6abf97d
FB
1609}
1610
2c0262af 1611/* if d == OR_TMP0, it means memory operand (address in A0) */
14776ab5 1612static void gen_inc(DisasContext *s1, MemOp ot, int d, int c)
2c0262af 1613{
60e57346 1614 if (s1->prefix & PREFIX_LOCK) {
8cb2ca3d
PM
1615 if (d != OR_TMP0) {
1616 /* Lock prefix when destination is not memory */
1617 gen_illegal_opcode(s1);
1618 return;
1619 }
c66f9727
EC
1620 tcg_gen_movi_tl(s1->T0, c > 0 ? 1 : -1);
1621 tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T0,
60e57346 1622 s1->mem_index, ot | MO_LE);
909be183 1623 } else {
60e57346 1624 if (d != OR_TMP0) {
1dbe15ef 1625 gen_op_mov_v_reg(s1, ot, s1->T0, d);
60e57346 1626 } else {
c66f9727 1627 gen_op_ld_v(s1, ot, s1->T0, s1->A0);
60e57346 1628 }
c66f9727 1629 tcg_gen_addi_tl(s1->T0, s1->T0, (c > 0 ? 1 : -1));
60e57346 1630 gen_op_st_rm_T0_A0(s1, ot, d);
909be183 1631 }
60e57346 1632
cc8b6f5b 1633 gen_compute_eflags_c(s1, cpu_cc_src);
c66f9727 1634 tcg_gen_mov_tl(cpu_cc_dst, s1->T0);
60e57346 1635 set_cc_op(s1, (c > 0 ? CC_OP_INCB : CC_OP_DECB) + ot);
2c0262af
FB
1636}
1637
14776ab5 1638static void gen_shift_flags(DisasContext *s, MemOp ot, TCGv result,
d67dc9e6 1639 TCGv shm1, TCGv count, bool is_right)
f437d0a3
RH
1640{
1641 TCGv_i32 z32, s32, oldop;
1642 TCGv z_tl;
1643
1644 /* Store the results into the CC variables. If we know that the
1645 variable must be dead, store unconditionally. Otherwise we'll
1646 need to not disrupt the current contents. */
3df11bb1 1647 z_tl = tcg_constant_tl(0);
f437d0a3
RH
1648 if (cc_op_live[s->cc_op] & USES_CC_DST) {
1649 tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_dst, count, z_tl,
1650 result, cpu_cc_dst);
1651 } else {
1652 tcg_gen_mov_tl(cpu_cc_dst, result);
1653 }
1654 if (cc_op_live[s->cc_op] & USES_CC_SRC) {
1655 tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_src, count, z_tl,
1656 shm1, cpu_cc_src);
1657 } else {
1658 tcg_gen_mov_tl(cpu_cc_src, shm1);
1659 }
f437d0a3
RH
1660
1661 /* Get the two potential CC_OP values into temporaries. */
6bd48f6f 1662 tcg_gen_movi_i32(s->tmp2_i32, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
f437d0a3
RH
1663 if (s->cc_op == CC_OP_DYNAMIC) {
1664 oldop = cpu_cc_op;
1665 } else {
4f82446d
EC
1666 tcg_gen_movi_i32(s->tmp3_i32, s->cc_op);
1667 oldop = s->tmp3_i32;
f437d0a3
RH
1668 }
1669
1670 /* Conditionally store the CC_OP value. */
3df11bb1 1671 z32 = tcg_constant_i32(0);
f437d0a3
RH
1672 s32 = tcg_temp_new_i32();
1673 tcg_gen_trunc_tl_i32(s32, count);
6bd48f6f 1674 tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, s32, z32, s->tmp2_i32, oldop);
f437d0a3
RH
1675
1676 /* The CC_OP value is no longer predictable. */
1677 set_cc_op(s, CC_OP_DYNAMIC);
1678}
1679
14776ab5 1680static void gen_shift_rm_T1(DisasContext *s, MemOp ot, int op1,
b6abf97d 1681 int is_right, int is_arith)
2c0262af 1682{
4ba9938c 1683 target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
3b46e624 1684
b6abf97d 1685 /* load */
82786041 1686 if (op1 == OR_TMP0) {
c66f9727 1687 gen_op_ld_v(s, ot, s->T0, s->A0);
82786041 1688 } else {
1dbe15ef 1689 gen_op_mov_v_reg(s, ot, s->T0, op1);
82786041 1690 }
b6abf97d 1691
b48597b0 1692 tcg_gen_andi_tl(s->T1, s->T1, mask);
fbd80f02 1693 tcg_gen_subi_tl(s->tmp0, s->T1, 1);
b6abf97d
FB
1694
1695 if (is_right) {
1696 if (is_arith) {
c66f9727 1697 gen_exts(ot, s->T0);
fbd80f02 1698 tcg_gen_sar_tl(s->tmp0, s->T0, s->tmp0);
b48597b0 1699 tcg_gen_sar_tl(s->T0, s->T0, s->T1);
b6abf97d 1700 } else {
c66f9727 1701 gen_extu(ot, s->T0);
fbd80f02 1702 tcg_gen_shr_tl(s->tmp0, s->T0, s->tmp0);
b48597b0 1703 tcg_gen_shr_tl(s->T0, s->T0, s->T1);
b6abf97d
FB
1704 }
1705 } else {
fbd80f02 1706 tcg_gen_shl_tl(s->tmp0, s->T0, s->tmp0);
b48597b0 1707 tcg_gen_shl_tl(s->T0, s->T0, s->T1);
b6abf97d
FB
1708 }
1709
1710 /* store */
d4faa3e0 1711 gen_op_st_rm_T0_A0(s, ot, op1);
82786041 1712
fbd80f02 1713 gen_shift_flags(s, ot, s->T0, s->tmp0, s->T1, is_right);
b6abf97d
FB
1714}
1715
14776ab5 1716static void gen_shift_rm_im(DisasContext *s, MemOp ot, int op1, int op2,
c1c37968
FB
1717 int is_right, int is_arith)
1718{
4ba9938c 1719 int mask = (ot == MO_64 ? 0x3f : 0x1f);
c1c37968
FB
1720
1721 /* load */
1722 if (op1 == OR_TMP0)
c66f9727 1723 gen_op_ld_v(s, ot, s->T0, s->A0);
c1c37968 1724 else
1dbe15ef 1725 gen_op_mov_v_reg(s, ot, s->T0, op1);
c1c37968
FB
1726
1727 op2 &= mask;
1728 if (op2 != 0) {
1729 if (is_right) {
1730 if (is_arith) {
c66f9727 1731 gen_exts(ot, s->T0);
5022f28f 1732 tcg_gen_sari_tl(s->tmp4, s->T0, op2 - 1);
c66f9727 1733 tcg_gen_sari_tl(s->T0, s->T0, op2);
c1c37968 1734 } else {
c66f9727 1735 gen_extu(ot, s->T0);
5022f28f 1736 tcg_gen_shri_tl(s->tmp4, s->T0, op2 - 1);
c66f9727 1737 tcg_gen_shri_tl(s->T0, s->T0, op2);
c1c37968
FB
1738 }
1739 } else {
5022f28f 1740 tcg_gen_shli_tl(s->tmp4, s->T0, op2 - 1);
c66f9727 1741 tcg_gen_shli_tl(s->T0, s->T0, op2);
c1c37968
FB
1742 }
1743 }
1744
1745 /* store */
d4faa3e0
RH
1746 gen_op_st_rm_T0_A0(s, ot, op1);
1747
c1c37968
FB
1748 /* update eflags if non zero shift */
1749 if (op2 != 0) {
5022f28f 1750 tcg_gen_mov_tl(cpu_cc_src, s->tmp4);
c66f9727 1751 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
3ca51d07 1752 set_cc_op(s, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
c1c37968
FB
1753 }
1754}
1755
14776ab5 1756static void gen_rot_rm_T1(DisasContext *s, MemOp ot, int op1, int is_right)
b6abf97d 1757{
4ba9938c 1758 target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
34d80a55 1759 TCGv_i32 t0, t1;
b6abf97d
FB
1760
1761 /* load */
1e4840bf 1762 if (op1 == OR_TMP0) {
c66f9727 1763 gen_op_ld_v(s, ot, s->T0, s->A0);
1e4840bf 1764 } else {
1dbe15ef 1765 gen_op_mov_v_reg(s, ot, s->T0, op1);
1e4840bf 1766 }
b6abf97d 1767
b48597b0 1768 tcg_gen_andi_tl(s->T1, s->T1, mask);
b6abf97d 1769
34d80a55 1770 switch (ot) {
4ba9938c 1771 case MO_8:
34d80a55 1772 /* Replicate the 8-bit input so that a 32-bit rotate works. */
c66f9727
EC
1773 tcg_gen_ext8u_tl(s->T0, s->T0);
1774 tcg_gen_muli_tl(s->T0, s->T0, 0x01010101);
34d80a55 1775 goto do_long;
4ba9938c 1776 case MO_16:
34d80a55 1777 /* Replicate the 16-bit input so that a 32-bit rotate works. */
c66f9727 1778 tcg_gen_deposit_tl(s->T0, s->T0, s->T0, 16, 16);
34d80a55
RH
1779 goto do_long;
1780 do_long:
1781#ifdef TARGET_X86_64
4ba9938c 1782 case MO_32:
6bd48f6f 1783 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
4f82446d 1784 tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
34d80a55 1785 if (is_right) {
4f82446d 1786 tcg_gen_rotr_i32(s->tmp2_i32, s->tmp2_i32, s->tmp3_i32);
34d80a55 1787 } else {
4f82446d 1788 tcg_gen_rotl_i32(s->tmp2_i32, s->tmp2_i32, s->tmp3_i32);
34d80a55 1789 }
6bd48f6f 1790 tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
34d80a55
RH
1791 break;
1792#endif
1793 default:
1794 if (is_right) {
b48597b0 1795 tcg_gen_rotr_tl(s->T0, s->T0, s->T1);
34d80a55 1796 } else {
b48597b0 1797 tcg_gen_rotl_tl(s->T0, s->T0, s->T1);
34d80a55
RH
1798 }
1799 break;
b6abf97d 1800 }
b6abf97d 1801
b6abf97d 1802 /* store */
d4faa3e0 1803 gen_op_st_rm_T0_A0(s, ot, op1);
b6abf97d 1804
34d80a55
RH
1805 /* We'll need the flags computed into CC_SRC. */
1806 gen_compute_eflags(s);
b6abf97d 1807
34d80a55
RH
1808 /* The value that was "rotated out" is now present at the other end
1809 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1810 since we've computed the flags into CC_SRC, these variables are
1811 currently dead. */
b6abf97d 1812 if (is_right) {
c66f9727
EC
1813 tcg_gen_shri_tl(cpu_cc_src2, s->T0, mask - 1);
1814 tcg_gen_shri_tl(cpu_cc_dst, s->T0, mask);
089305ac 1815 tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
34d80a55 1816 } else {
c66f9727
EC
1817 tcg_gen_shri_tl(cpu_cc_src2, s->T0, mask);
1818 tcg_gen_andi_tl(cpu_cc_dst, s->T0, 1);
b6abf97d 1819 }
34d80a55
RH
1820 tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1821 tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1822
1823 /* Now conditionally store the new CC_OP value. If the shift count
1824 is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live.
1825 Otherwise reuse CC_OP_ADCOX which have the C and O flags split out
1826 exactly as we computed above. */
3df11bb1 1827 t0 = tcg_constant_i32(0);
34d80a55 1828 t1 = tcg_temp_new_i32();
b48597b0 1829 tcg_gen_trunc_tl_i32(t1, s->T1);
6bd48f6f 1830 tcg_gen_movi_i32(s->tmp2_i32, CC_OP_ADCOX);
4f82446d 1831 tcg_gen_movi_i32(s->tmp3_i32, CC_OP_EFLAGS);
34d80a55 1832 tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, t1, t0,
4f82446d 1833 s->tmp2_i32, s->tmp3_i32);
34d80a55 1834
2e3afe8e 1835 /* The CC_OP value is no longer predictable. */
34d80a55 1836 set_cc_op(s, CC_OP_DYNAMIC);
b6abf97d
FB
1837}
1838
14776ab5 1839static void gen_rot_rm_im(DisasContext *s, MemOp ot, int op1, int op2,
8cd6345d 1840 int is_right)
1841{
4ba9938c 1842 int mask = (ot == MO_64 ? 0x3f : 0x1f);
34d80a55 1843 int shift;
8cd6345d 1844
1845 /* load */
1846 if (op1 == OR_TMP0) {
c66f9727 1847 gen_op_ld_v(s, ot, s->T0, s->A0);
8cd6345d 1848 } else {
1dbe15ef 1849 gen_op_mov_v_reg(s, ot, s->T0, op1);
8cd6345d 1850 }
1851
8cd6345d 1852 op2 &= mask;
8cd6345d 1853 if (op2 != 0) {
34d80a55
RH
1854 switch (ot) {
1855#ifdef TARGET_X86_64
4ba9938c 1856 case MO_32:
6bd48f6f 1857 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
34d80a55 1858 if (is_right) {
6bd48f6f 1859 tcg_gen_rotri_i32(s->tmp2_i32, s->tmp2_i32, op2);
34d80a55 1860 } else {
6bd48f6f 1861 tcg_gen_rotli_i32(s->tmp2_i32, s->tmp2_i32, op2);
34d80a55 1862 }
6bd48f6f 1863 tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
34d80a55
RH
1864 break;
1865#endif
1866 default:
1867 if (is_right) {
c66f9727 1868 tcg_gen_rotri_tl(s->T0, s->T0, op2);
34d80a55 1869 } else {
c66f9727 1870 tcg_gen_rotli_tl(s->T0, s->T0, op2);
34d80a55
RH
1871 }
1872 break;
4ba9938c 1873 case MO_8:
34d80a55
RH
1874 mask = 7;
1875 goto do_shifts;
4ba9938c 1876 case MO_16:
34d80a55
RH
1877 mask = 15;
1878 do_shifts:
1879 shift = op2 & mask;
1880 if (is_right) {
1881 shift = mask + 1 - shift;
1882 }
c66f9727 1883 gen_extu(ot, s->T0);
fbd80f02 1884 tcg_gen_shli_tl(s->tmp0, s->T0, shift);
c66f9727 1885 tcg_gen_shri_tl(s->T0, s->T0, mask + 1 - shift);
fbd80f02 1886 tcg_gen_or_tl(s->T0, s->T0, s->tmp0);
34d80a55 1887 break;
8cd6345d 1888 }
8cd6345d 1889 }
1890
1891 /* store */
d4faa3e0 1892 gen_op_st_rm_T0_A0(s, ot, op1);
8cd6345d 1893
1894 if (op2 != 0) {
34d80a55 1895 /* Compute the flags into CC_SRC. */
d229edce 1896 gen_compute_eflags(s);
0ff6addd 1897
34d80a55
RH
1898 /* The value that was "rotated out" is now present at the other end
1899 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1900 since we've computed the flags into CC_SRC, these variables are
1901 currently dead. */
8cd6345d 1902 if (is_right) {
c66f9727
EC
1903 tcg_gen_shri_tl(cpu_cc_src2, s->T0, mask - 1);
1904 tcg_gen_shri_tl(cpu_cc_dst, s->T0, mask);
38ebb396 1905 tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
34d80a55 1906 } else {
c66f9727
EC
1907 tcg_gen_shri_tl(cpu_cc_src2, s->T0, mask);
1908 tcg_gen_andi_tl(cpu_cc_dst, s->T0, 1);
8cd6345d 1909 }
34d80a55
RH
1910 tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1911 tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1912 set_cc_op(s, CC_OP_ADCOX);
8cd6345d 1913 }
8cd6345d 1914}
1915
b6abf97d 1916/* XXX: add faster immediate = 1 case */
14776ab5 1917static void gen_rotc_rm_T1(DisasContext *s, MemOp ot, int op1,
b6abf97d
FB
1918 int is_right)
1919{
d229edce 1920 gen_compute_eflags(s);
c7b3c873 1921 assert(s->cc_op == CC_OP_EFLAGS);
b6abf97d
FB
1922
1923 /* load */
1924 if (op1 == OR_TMP0)
c66f9727 1925 gen_op_ld_v(s, ot, s->T0, s->A0);
b6abf97d 1926 else
1dbe15ef 1927 gen_op_mov_v_reg(s, ot, s->T0, op1);
2e3afe8e 1928
a7812ae4
PB
1929 if (is_right) {
1930 switch (ot) {
4ba9938c 1931 case MO_8:
ad75a51e 1932 gen_helper_rcrb(s->T0, tcg_env, s->T0, s->T1);
7923057b 1933 break;
4ba9938c 1934 case MO_16:
ad75a51e 1935 gen_helper_rcrw(s->T0, tcg_env, s->T0, s->T1);
7923057b 1936 break;
4ba9938c 1937 case MO_32:
ad75a51e 1938 gen_helper_rcrl(s->T0, tcg_env, s->T0, s->T1);
7923057b 1939 break;
a7812ae4 1940#ifdef TARGET_X86_64
4ba9938c 1941 case MO_64:
ad75a51e 1942 gen_helper_rcrq(s->T0, tcg_env, s->T0, s->T1);
7923057b 1943 break;
a7812ae4 1944#endif
d67dc9e6 1945 default:
732e89f4 1946 g_assert_not_reached();
a7812ae4
PB
1947 }
1948 } else {
1949 switch (ot) {
4ba9938c 1950 case MO_8:
ad75a51e 1951 gen_helper_rclb(s->T0, tcg_env, s->T0, s->T1);
7923057b 1952 break;
4ba9938c 1953 case MO_16:
ad75a51e 1954 gen_helper_rclw(s->T0, tcg_env, s->T0, s->T1);
7923057b 1955 break;
4ba9938c 1956 case MO_32:
ad75a51e 1957 gen_helper_rcll(s->T0, tcg_env, s->T0, s->T1);
7923057b 1958 break;
a7812ae4 1959#ifdef TARGET_X86_64
4ba9938c 1960 case MO_64:
ad75a51e 1961 gen_helper_rclq(s->T0, tcg_env, s->T0, s->T1);
7923057b 1962 break;
a7812ae4 1963#endif
d67dc9e6 1964 default:
732e89f4 1965 g_assert_not_reached();
a7812ae4
PB
1966 }
1967 }
b6abf97d 1968 /* store */
d4faa3e0 1969 gen_op_st_rm_T0_A0(s, ot, op1);
b6abf97d
FB
1970}
1971
1972/* XXX: add faster immediate case */
14776ab5 1973static void gen_shiftd_rm_T1(DisasContext *s, MemOp ot, int op1,
f437d0a3 1974 bool is_right, TCGv count_in)
b6abf97d 1975{
4ba9938c 1976 target_ulong mask = (ot == MO_64 ? 63 : 31);
f437d0a3 1977 TCGv count;
b6abf97d
FB
1978
1979 /* load */
1e4840bf 1980 if (op1 == OR_TMP0) {
c66f9727 1981 gen_op_ld_v(s, ot, s->T0, s->A0);
1e4840bf 1982 } else {
1dbe15ef 1983 gen_op_mov_v_reg(s, ot, s->T0, op1);
1e4840bf 1984 }
b6abf97d 1985
f437d0a3
RH
1986 count = tcg_temp_new();
1987 tcg_gen_andi_tl(count, count_in, mask);
1e4840bf 1988
f437d0a3 1989 switch (ot) {
4ba9938c 1990 case MO_16:
f437d0a3
RH
1991 /* Note: we implement the Intel behaviour for shift count > 16.
1992 This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A
1993 portion by constructing it as a 32-bit value. */
b6abf97d 1994 if (is_right) {
fbd80f02 1995 tcg_gen_deposit_tl(s->tmp0, s->T0, s->T1, 16, 16);
b48597b0 1996 tcg_gen_mov_tl(s->T1, s->T0);
fbd80f02 1997 tcg_gen_mov_tl(s->T0, s->tmp0);
b6abf97d 1998 } else {
b48597b0 1999 tcg_gen_deposit_tl(s->T1, s->T0, s->T1, 16, 16);
b6abf97d 2000 }
bdddc1c4
CQ
2001 /*
2002 * If TARGET_X86_64 defined then fall through into MO_32 case,
2003 * otherwise fall through default case.
2004 */
4ba9938c 2005 case MO_32:
bdddc1c4 2006#ifdef TARGET_X86_64
f437d0a3 2007 /* Concatenate the two 32-bit values and use a 64-bit shift. */
fbd80f02 2008 tcg_gen_subi_tl(s->tmp0, count, 1);
b6abf97d 2009 if (is_right) {
b48597b0 2010 tcg_gen_concat_tl_i64(s->T0, s->T0, s->T1);
fbd80f02 2011 tcg_gen_shr_i64(s->tmp0, s->T0, s->tmp0);
c66f9727 2012 tcg_gen_shr_i64(s->T0, s->T0, count);
f437d0a3 2013 } else {
b48597b0 2014 tcg_gen_concat_tl_i64(s->T0, s->T1, s->T0);
fbd80f02 2015 tcg_gen_shl_i64(s->tmp0, s->T0, s->tmp0);
c66f9727 2016 tcg_gen_shl_i64(s->T0, s->T0, count);
fbd80f02 2017 tcg_gen_shri_i64(s->tmp0, s->tmp0, 32);
c66f9727 2018 tcg_gen_shri_i64(s->T0, s->T0, 32);
f437d0a3
RH
2019 }
2020 break;
2021#endif
2022 default:
fbd80f02 2023 tcg_gen_subi_tl(s->tmp0, count, 1);
f437d0a3 2024 if (is_right) {
fbd80f02 2025 tcg_gen_shr_tl(s->tmp0, s->T0, s->tmp0);
b6abf97d 2026
5022f28f 2027 tcg_gen_subfi_tl(s->tmp4, mask + 1, count);
c66f9727 2028 tcg_gen_shr_tl(s->T0, s->T0, count);
5022f28f 2029 tcg_gen_shl_tl(s->T1, s->T1, s->tmp4);
b6abf97d 2030 } else {
fbd80f02 2031 tcg_gen_shl_tl(s->tmp0, s->T0, s->tmp0);
4ba9938c 2032 if (ot == MO_16) {
f437d0a3 2033 /* Only needed if count > 16, for Intel behaviour. */
5022f28f
EC
2034 tcg_gen_subfi_tl(s->tmp4, 33, count);
2035 tcg_gen_shr_tl(s->tmp4, s->T1, s->tmp4);
2036 tcg_gen_or_tl(s->tmp0, s->tmp0, s->tmp4);
f437d0a3
RH
2037 }
2038
5022f28f 2039 tcg_gen_subfi_tl(s->tmp4, mask + 1, count);
c66f9727 2040 tcg_gen_shl_tl(s->T0, s->T0, count);
5022f28f 2041 tcg_gen_shr_tl(s->T1, s->T1, s->tmp4);
b6abf97d 2042 }
5022f28f
EC
2043 tcg_gen_movi_tl(s->tmp4, 0);
2044 tcg_gen_movcond_tl(TCG_COND_EQ, s->T1, count, s->tmp4,
2045 s->tmp4, s->T1);
b48597b0 2046 tcg_gen_or_tl(s->T0, s->T0, s->T1);
f437d0a3 2047 break;
b6abf97d 2048 }
b6abf97d 2049
b6abf97d 2050 /* store */
d4faa3e0 2051 gen_op_st_rm_T0_A0(s, ot, op1);
1e4840bf 2052
fbd80f02 2053 gen_shift_flags(s, ot, s->T0, s->tmp0, count, is_right);
b6abf97d
FB
2054}
2055
14776ab5 2056static void gen_shift(DisasContext *s1, int op, MemOp ot, int d, int s)
b6abf97d
FB
2057{
2058 if (s != OR_TMP1)
1dbe15ef 2059 gen_op_mov_v_reg(s1, ot, s1->T1, s);
b6abf97d
FB
2060 switch(op) {
2061 case OP_ROL:
2062 gen_rot_rm_T1(s1, ot, d, 0);
2063 break;
2064 case OP_ROR:
2065 gen_rot_rm_T1(s1, ot, d, 1);
2066 break;
2067 case OP_SHL:
2068 case OP_SHL1:
2069 gen_shift_rm_T1(s1, ot, d, 0, 0);
2070 break;
2071 case OP_SHR:
2072 gen_shift_rm_T1(s1, ot, d, 1, 0);
2073 break;
2074 case OP_SAR:
2075 gen_shift_rm_T1(s1, ot, d, 1, 1);
2076 break;
2077 case OP_RCL:
2078 gen_rotc_rm_T1(s1, ot, d, 0);
2079 break;
2080 case OP_RCR:
2081 gen_rotc_rm_T1(s1, ot, d, 1);
2082 break;
2083 }
2c0262af
FB
2084}
2085
14776ab5 2086static void gen_shifti(DisasContext *s1, int op, MemOp ot, int d, int c)
2c0262af 2087{
c1c37968 2088 switch(op) {
8cd6345d 2089 case OP_ROL:
2090 gen_rot_rm_im(s1, ot, d, c, 0);
2091 break;
2092 case OP_ROR:
2093 gen_rot_rm_im(s1, ot, d, c, 1);
2094 break;
c1c37968
FB
2095 case OP_SHL:
2096 case OP_SHL1:
2097 gen_shift_rm_im(s1, ot, d, c, 0, 0);
2098 break;
2099 case OP_SHR:
2100 gen_shift_rm_im(s1, ot, d, c, 1, 0);
2101 break;
2102 case OP_SAR:
2103 gen_shift_rm_im(s1, ot, d, c, 1, 1);
2104 break;
2105 default:
2106 /* currently not optimized */
b48597b0 2107 tcg_gen_movi_tl(s1->T1, c);
c1c37968
FB
2108 gen_shift(s1, op, ot, d, OR_TMP1);
2109 break;
2110 }
2c0262af
FB
2111}
2112
b066c537
PB
2113#define X86_MAX_INSN_LENGTH 15
2114
e3af7c78
PB
2115static uint64_t advance_pc(CPUX86State *env, DisasContext *s, int num_bytes)
2116{
2117 uint64_t pc = s->pc;
2118
95093668
IL
2119 /* This is a subsequent insn that crosses a page boundary. */
2120 if (s->base.num_insns > 1 &&
2121 !is_same_page(&s->base, s->pc + num_bytes - 1)) {
2122 siglongjmp(s->jmpbuf, 2);
2123 }
2124
e3af7c78 2125 s->pc += num_bytes;
ad1d6f07 2126 if (unlikely(cur_insn_len(s) > X86_MAX_INSN_LENGTH)) {
b066c537
PB
2127 /* If the instruction's 16th byte is on a different page than the 1st, a
2128 * page fault on the second page wins over the general protection fault
2129 * caused by the instruction being too long.
2130 * This can happen even if the operand is only one byte long!
2131 */
2132 if (((s->pc - 1) ^ (pc - 1)) & TARGET_PAGE_MASK) {
2133 volatile uint8_t unused =
2134 cpu_ldub_code(env, (s->pc - 1) & TARGET_PAGE_MASK);
2135 (void) unused;
2136 }
2137 siglongjmp(s->jmpbuf, 1);
2138 }
2139
e3af7c78
PB
2140 return pc;
2141}
2142
2143static inline uint8_t x86_ldub_code(CPUX86State *env, DisasContext *s)
2144{
4e116893 2145 return translator_ldub(env, &s->base, advance_pc(env, s, 1));
e3af7c78
PB
2146}
2147
2148static inline int16_t x86_ldsw_code(CPUX86State *env, DisasContext *s)
2149{
dac8d19b 2150 return translator_lduw(env, &s->base, advance_pc(env, s, 2));
e3af7c78
PB
2151}
2152
2153static inline uint16_t x86_lduw_code(CPUX86State *env, DisasContext *s)
2154{
4e116893 2155 return translator_lduw(env, &s->base, advance_pc(env, s, 2));
e3af7c78
PB
2156}
2157
2158static inline uint32_t x86_ldl_code(CPUX86State *env, DisasContext *s)
2159{
4e116893 2160 return translator_ldl(env, &s->base, advance_pc(env, s, 4));
e3af7c78
PB
2161}
2162
2163#ifdef TARGET_X86_64
2164static inline uint64_t x86_ldq_code(CPUX86State *env, DisasContext *s)
2165{
4e116893 2166 return translator_ldq(env, &s->base, advance_pc(env, s, 8));
e3af7c78
PB
2167}
2168#endif
2169
a074ce42
RH
2170/* Decompose an address. */
2171
2172typedef struct AddressParts {
2173 int def_seg;
2174 int base;
2175 int index;
2176 int scale;
2177 target_long disp;
2178} AddressParts;
2179
2180static AddressParts gen_lea_modrm_0(CPUX86State *env, DisasContext *s,
2181 int modrm)
2c0262af 2182{
a074ce42 2183 int def_seg, base, index, scale, mod, rm;
14ce26e7 2184 target_long disp;
a074ce42 2185 bool havesib;
2c0262af 2186
d6a29149 2187 def_seg = R_DS;
a074ce42
RH
2188 index = -1;
2189 scale = 0;
2190 disp = 0;
2191
2c0262af
FB
2192 mod = (modrm >> 6) & 3;
2193 rm = modrm & 7;
a074ce42
RH
2194 base = rm | REX_B(s);
2195
2196 if (mod == 3) {
2197 /* Normally filtered out earlier, but including this path
2198 simplifies multi-byte nop, as well as bndcl, bndcu, bndcn. */
2199 goto done;
2200 }
2c0262af 2201
1d71ddb1
RH
2202 switch (s->aflag) {
2203 case MO_64:
2204 case MO_32:
2c0262af 2205 havesib = 0;
a074ce42 2206 if (rm == 4) {
e3af7c78 2207 int code = x86_ldub_code(env, s);
2c0262af 2208 scale = (code >> 6) & 3;
14ce26e7 2209 index = ((code >> 3) & 7) | REX_X(s);
7865eec4
RH
2210 if (index == 4) {
2211 index = -1; /* no index */
2212 }
a074ce42
RH
2213 base = (code & 7) | REX_B(s);
2214 havesib = 1;
2c0262af
FB
2215 }
2216
2217 switch (mod) {
2218 case 0:
14ce26e7 2219 if ((base & 7) == 5) {
2c0262af 2220 base = -1;
e3af7c78 2221 disp = (int32_t)x86_ldl_code(env, s);
14ce26e7 2222 if (CODE64(s) && !havesib) {
a074ce42 2223 base = -2;
14ce26e7
FB
2224 disp += s->pc + s->rip_offset;
2225 }
2c0262af
FB
2226 }
2227 break;
2228 case 1:
e3af7c78 2229 disp = (int8_t)x86_ldub_code(env, s);
2c0262af
FB
2230 break;
2231 default:
2232 case 2:
e3af7c78 2233 disp = (int32_t)x86_ldl_code(env, s);
2c0262af
FB
2234 break;
2235 }
3b46e624 2236
7865eec4
RH
2237 /* For correct popl handling with esp. */
2238 if (base == R_ESP && s->popl_esp_hack) {
2239 disp += s->popl_esp_hack;
2240 }
d6a29149
RH
2241 if (base == R_EBP || base == R_ESP) {
2242 def_seg = R_SS;
2c0262af 2243 }
1d71ddb1
RH
2244 break;
2245
2246 case MO_16:
d6a29149 2247 if (mod == 0) {
2c0262af 2248 if (rm == 6) {
a074ce42 2249 base = -1;
e3af7c78 2250 disp = x86_lduw_code(env, s);
d6a29149 2251 break;
2c0262af 2252 }
d6a29149 2253 } else if (mod == 1) {
e3af7c78 2254 disp = (int8_t)x86_ldub_code(env, s);
d6a29149 2255 } else {
e3af7c78 2256 disp = (int16_t)x86_lduw_code(env, s);
2c0262af 2257 }
7effd625 2258
7effd625 2259 switch (rm) {
2c0262af 2260 case 0:
a074ce42
RH
2261 base = R_EBX;
2262 index = R_ESI;
2c0262af
FB
2263 break;
2264 case 1:
a074ce42
RH
2265 base = R_EBX;
2266 index = R_EDI;
2c0262af
FB
2267 break;
2268 case 2:
a074ce42
RH
2269 base = R_EBP;
2270 index = R_ESI;
d6a29149 2271 def_seg = R_SS;
2c0262af
FB
2272 break;
2273 case 3:
a074ce42
RH
2274 base = R_EBP;
2275 index = R_EDI;
d6a29149 2276 def_seg = R_SS;
2c0262af
FB
2277 break;
2278 case 4:
a074ce42 2279 base = R_ESI;
2c0262af
FB
2280 break;
2281 case 5:
a074ce42 2282 base = R_EDI;
2c0262af
FB
2283 break;
2284 case 6:
a074ce42 2285 base = R_EBP;
d6a29149 2286 def_seg = R_SS;
2c0262af
FB
2287 break;
2288 default:
2289 case 7:
a074ce42 2290 base = R_EBX;
2c0262af
FB
2291 break;
2292 }
1d71ddb1
RH
2293 break;
2294
2295 default:
732e89f4 2296 g_assert_not_reached();
2c0262af 2297 }
d6a29149 2298
a074ce42
RH
2299 done:
2300 return (AddressParts){ def_seg, base, index, scale, disp };
2c0262af
FB
2301}
2302
a074ce42 2303/* Compute the address, with a minimum number of TCG ops. */
20581aad 2304static TCGv gen_lea_modrm_1(DisasContext *s, AddressParts a, bool is_vsib)
e17a36ce 2305{
f764718d 2306 TCGv ea = NULL;
3b46e624 2307
20581aad 2308 if (a.index >= 0 && !is_vsib) {
a074ce42
RH
2309 if (a.scale == 0) {
2310 ea = cpu_regs[a.index];
2311 } else {
6b672b5d
EC
2312 tcg_gen_shli_tl(s->A0, cpu_regs[a.index], a.scale);
2313 ea = s->A0;
e17a36ce 2314 }
a074ce42 2315 if (a.base >= 0) {
6b672b5d
EC
2316 tcg_gen_add_tl(s->A0, ea, cpu_regs[a.base]);
2317 ea = s->A0;
e17a36ce 2318 }
a074ce42
RH
2319 } else if (a.base >= 0) {
2320 ea = cpu_regs[a.base];
2321 }
f764718d 2322 if (!ea) {
2e3afe8e 2323 if (tb_cflags(s->base.tb) & CF_PCREL && a.base == -2) {
e3a79e0e
RH
2324 /* With cpu_eip ~= pc_save, the expression is pc-relative. */
2325 tcg_gen_addi_tl(s->A0, cpu_eip, a.disp - s->pc_save);
2326 } else {
2327 tcg_gen_movi_tl(s->A0, a.disp);
2328 }
6b672b5d 2329 ea = s->A0;
a074ce42 2330 } else if (a.disp != 0) {
6b672b5d
EC
2331 tcg_gen_addi_tl(s->A0, ea, a.disp);
2332 ea = s->A0;
a074ce42 2333 }
1d71ddb1 2334
a074ce42
RH
2335 return ea;
2336}
1d71ddb1 2337
a074ce42
RH
2338static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm)
2339{
2340 AddressParts a = gen_lea_modrm_0(env, s, modrm);
20581aad 2341 TCGv ea = gen_lea_modrm_1(s, a, false);
a074ce42
RH
2342 gen_lea_v_seg(s, s->aflag, ea, a.def_seg, s->override);
2343}
2344
2345static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm)
2346{
2347 (void)gen_lea_modrm_0(env, s, modrm);
e17a36ce
FB
2348}
2349
523e28d7
RH
2350/* Used for BNDCL, BNDCU, BNDCN. */
2351static void gen_bndck(CPUX86State *env, DisasContext *s, int modrm,
2352 TCGCond cond, TCGv_i64 bndv)
2353{
20581aad
PB
2354 AddressParts a = gen_lea_modrm_0(env, s, modrm);
2355 TCGv ea = gen_lea_modrm_1(s, a, false);
523e28d7 2356
776678b2 2357 tcg_gen_extu_tl_i64(s->tmp1_i64, ea);
523e28d7 2358 if (!CODE64(s)) {
776678b2 2359 tcg_gen_ext32u_i64(s->tmp1_i64, s->tmp1_i64);
523e28d7 2360 }
776678b2
EC
2361 tcg_gen_setcond_i64(cond, s->tmp1_i64, s->tmp1_i64, bndv);
2362 tcg_gen_extrl_i64_i32(s->tmp2_i32, s->tmp1_i64);
ad75a51e 2363 gen_helper_bndck(tcg_env, s->tmp2_i32);
523e28d7
RH
2364}
2365
664e0f19
FB
2366/* used for LEA and MOV AX, mem */
2367static void gen_add_A0_ds_seg(DisasContext *s)
2368{
6b672b5d 2369 gen_lea_v_seg(s, s->aflag, s->A0, R_DS, s->override);
664e0f19
FB
2370}
2371
222a3336 2372/* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2c0262af 2373 OR_TMP0 */
0af10c86 2374static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm,
14776ab5 2375 MemOp ot, int reg, int is_store)
2c0262af 2376{
4eeb3939 2377 int mod, rm;
2c0262af
FB
2378
2379 mod = (modrm >> 6) & 3;
14ce26e7 2380 rm = (modrm & 7) | REX_B(s);
2c0262af
FB
2381 if (mod == 3) {
2382 if (is_store) {
2383 if (reg != OR_TMP0)
1dbe15ef
EC
2384 gen_op_mov_v_reg(s, ot, s->T0, reg);
2385 gen_op_mov_reg_v(s, ot, rm, s->T0);
2c0262af 2386 } else {
1dbe15ef 2387 gen_op_mov_v_reg(s, ot, s->T0, rm);
2c0262af 2388 if (reg != OR_TMP0)
1dbe15ef 2389 gen_op_mov_reg_v(s, ot, reg, s->T0);
2c0262af
FB
2390 }
2391 } else {
4eeb3939 2392 gen_lea_modrm(env, s, modrm);
2c0262af
FB
2393 if (is_store) {
2394 if (reg != OR_TMP0)
1dbe15ef 2395 gen_op_mov_v_reg(s, ot, s->T0, reg);
c66f9727 2396 gen_op_st_v(s, ot, s->T0, s->A0);
2c0262af 2397 } else {
c66f9727 2398 gen_op_ld_v(s, ot, s->T0, s->A0);
2c0262af 2399 if (reg != OR_TMP0)
1dbe15ef 2400 gen_op_mov_reg_v(s, ot, reg, s->T0);
2c0262af
FB
2401 }
2402 }
2403}
2404
efcca7ef
PB
2405static target_ulong insn_get_addr(CPUX86State *env, DisasContext *s, MemOp ot)
2406{
2407 target_ulong ret;
2408
2409 switch (ot) {
2410 case MO_8:
2411 ret = x86_ldub_code(env, s);
2412 break;
2413 case MO_16:
2414 ret = x86_lduw_code(env, s);
2415 break;
2416 case MO_32:
2417 ret = x86_ldl_code(env, s);
2418 break;
2419#ifdef TARGET_X86_64
2420 case MO_64:
2421 ret = x86_ldq_code(env, s);
2422 break;
2423#endif
2424 default:
2425 g_assert_not_reached();
2426 }
2427 return ret;
2428}
2429
14776ab5 2430static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, MemOp ot)
2c0262af
FB
2431{
2432 uint32_t ret;
2433
d67dc9e6 2434 switch (ot) {
4ba9938c 2435 case MO_8:
e3af7c78 2436 ret = x86_ldub_code(env, s);
2c0262af 2437 break;
4ba9938c 2438 case MO_16:
e3af7c78 2439 ret = x86_lduw_code(env, s);
2c0262af 2440 break;
4ba9938c 2441 case MO_32:
d67dc9e6
RH
2442#ifdef TARGET_X86_64
2443 case MO_64:
2444#endif
e3af7c78 2445 ret = x86_ldl_code(env, s);
2c0262af 2446 break;
d67dc9e6 2447 default:
732e89f4 2448 g_assert_not_reached();
2c0262af
FB
2449 }
2450 return ret;
2451}
2452
b3e22b23
PB
2453static target_long insn_get_signed(CPUX86State *env, DisasContext *s, MemOp ot)
2454{
2455 target_long ret;
2456
2457 switch (ot) {
2458 case MO_8:
2459 ret = (int8_t) x86_ldub_code(env, s);
2460 break;
2461 case MO_16:
2462 ret = (int16_t) x86_lduw_code(env, s);
2463 break;
2464 case MO_32:
2465 ret = (int32_t) x86_ldl_code(env, s);
2466 break;
2467#ifdef TARGET_X86_64
2468 case MO_64:
2469 ret = x86_ldq_code(env, s);
2470 break;
2471#endif
2472 default:
2473 g_assert_not_reached();
2474 }
2475 return ret;
2476}
2477
14776ab5 2478static inline int insn_const_size(MemOp ot)
14ce26e7 2479{
4ba9938c 2480 if (ot <= MO_32) {
14ce26e7 2481 return 1 << ot;
4ba9938c 2482 } else {
14ce26e7 2483 return 4;
4ba9938c 2484 }
14ce26e7
FB
2485}
2486
54b191de 2487static void gen_jcc(DisasContext *s, int b, int diff)
2c0262af 2488{
54b191de 2489 TCGLabel *l1 = gen_new_label();
8e1c85e3 2490
54b191de
RH
2491 gen_jcc1(s, b, l1);
2492 gen_jmp_rel_csize(s, 0, 1);
2493 gen_set_label(l1);
2494 gen_jmp_rel(s, s->dflag, diff, 0);
2c0262af
FB
2495}
2496
14776ab5 2497static void gen_cmovcc1(CPUX86State *env, DisasContext *s, MemOp ot, int b,
f32d3781
PB
2498 int modrm, int reg)
2499{
57eb0cc8 2500 CCPrepare cc;
f32d3781 2501
57eb0cc8 2502 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
f32d3781 2503
b48597b0 2504 cc = gen_prepare_cc(s, b, s->T1);
57eb0cc8
RH
2505 if (cc.mask != -1) {
2506 TCGv t0 = tcg_temp_new();
2507 tcg_gen_andi_tl(t0, cc.reg, cc.mask);
2508 cc.reg = t0;
2509 }
2510 if (!cc.use_reg2) {
3df11bb1 2511 cc.reg2 = tcg_constant_tl(cc.imm);
f32d3781
PB
2512 }
2513
c66f9727
EC
2514 tcg_gen_movcond_tl(cc.cond, s->T0, cc.reg, cc.reg2,
2515 s->T0, cpu_regs[reg]);
1dbe15ef 2516 gen_op_mov_reg_v(s, ot, reg, s->T0);
f32d3781
PB
2517}
2518
c117e5b1 2519static inline void gen_op_movl_T0_seg(DisasContext *s, X86Seg seg_reg)
3bd7da9e 2520{
ad75a51e 2521 tcg_gen_ld32u_tl(s->T0, tcg_env,
3bd7da9e
FB
2522 offsetof(CPUX86State,segs[seg_reg].selector));
2523}
2524
c117e5b1 2525static inline void gen_op_movl_seg_T0_vm(DisasContext *s, X86Seg seg_reg)
3bd7da9e 2526{
c66f9727 2527 tcg_gen_ext16u_tl(s->T0, s->T0);
ad75a51e 2528 tcg_gen_st32_tl(s->T0, tcg_env,
3bd7da9e 2529 offsetof(CPUX86State,segs[seg_reg].selector));
c66f9727 2530 tcg_gen_shli_tl(cpu_seg_base[seg_reg], s->T0, 4);
3bd7da9e
FB
2531}
2532
2c0262af
FB
2533/* move T0 to seg_reg and compute if the CPU state may change. Never
2534 call this function with seg_reg == R_CS */
c117e5b1 2535static void gen_movl_seg_T0(DisasContext *s, X86Seg seg_reg)
2c0262af 2536{
f8a35846 2537 if (PE(s) && !VM86(s)) {
6bd48f6f 2538 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
ad75a51e 2539 gen_helper_load_seg(tcg_env, tcg_constant_i32(seg_reg), s->tmp2_i32);
dc196a57
FB
2540 /* abort translation because the addseg value may change or
2541 because ss32 may change. For R_SS, translation must always
2542 stop as a special handling must be done to disable hardware
2543 interrupts for the next instruction */
4da4523c
RH
2544 if (seg_reg == R_SS) {
2545 s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ;
2546 } else if (CODE32(s) && seg_reg < R_FS) {
2547 s->base.is_jmp = DISAS_EOB_NEXT;
1e39d97a 2548 }
3415a4dd 2549 } else {
c66f9727 2550 gen_op_movl_seg_T0_vm(s, seg_reg);
1e39d97a 2551 if (seg_reg == R_SS) {
4da4523c 2552 s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ;
1e39d97a 2553 }
3415a4dd 2554 }
2c0262af
FB
2555}
2556
b53605db 2557static void gen_svm_check_intercept(DisasContext *s, uint32_t type)
0573fbfc 2558{
872929aa 2559 /* no SVM activated; fast case */
b322b3af 2560 if (likely(!GUEST(s))) {
872929aa 2561 return;
b322b3af 2562 }
ad75a51e 2563 gen_helper_svm_check_intercept(tcg_env, tcg_constant_i32(type));
0573fbfc
TS
2564}
2565
4f31916f
FB
2566static inline void gen_stack_update(DisasContext *s, int addend)
2567{
fbd80f02 2568 gen_op_add_reg_im(s, mo_stacksize(s), R_ESP, addend);
4f31916f
FB
2569}
2570
432baffe
RH
2571/* Generate a push. It depends on ss32, addseg and dflag. */
2572static void gen_push_v(DisasContext *s, TCGv val)
2c0262af 2573{
14776ab5
TN
2574 MemOp d_ot = mo_pushpop(s, s->dflag);
2575 MemOp a_ot = mo_stacksize(s);
432baffe 2576 int size = 1 << d_ot;
6b672b5d 2577 TCGv new_esp = s->A0;
432baffe 2578
6b672b5d 2579 tcg_gen_subi_tl(s->A0, cpu_regs[R_ESP], size);
2c0262af 2580
77ebcad0 2581 if (!CODE64(s)) {
beedb93c 2582 if (ADDSEG(s)) {
1ec46bf2 2583 new_esp = tcg_temp_new();
6b672b5d 2584 tcg_gen_mov_tl(new_esp, s->A0);
2c0262af 2585 }
6b672b5d 2586 gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1);
2c0262af 2587 }
432baffe 2588
6b672b5d 2589 gen_op_st_v(s, d_ot, val, s->A0);
1dbe15ef 2590 gen_op_mov_reg_v(s, a_ot, R_ESP, new_esp);
2c0262af
FB
2591}
2592
4f31916f 2593/* two step pop is necessary for precise exceptions */
14776ab5 2594static MemOp gen_pop_T0(DisasContext *s)
2c0262af 2595{
14776ab5 2596 MemOp d_ot = mo_pushpop(s, s->dflag);
8e31d234 2597
24c0573b
PB
2598 gen_lea_v_seg_dest(s, mo_stacksize(s), s->T0, cpu_regs[R_ESP], R_SS, -1);
2599 gen_op_ld_v(s, d_ot, s->T0, s->T0);
8e31d234 2600
8e31d234 2601 return d_ot;
2c0262af
FB
2602}
2603
14776ab5 2604static inline void gen_pop_update(DisasContext *s, MemOp ot)
2c0262af 2605{
8e31d234 2606 gen_stack_update(s, 1 << ot);
2c0262af
FB
2607}
2608
77ebcad0 2609static inline void gen_stack_A0(DisasContext *s)
2c0262af 2610{
b40a47a1 2611 gen_lea_v_seg(s, SS32(s) ? MO_32 : MO_16, cpu_regs[R_ESP], R_SS, -1);
2c0262af
FB
2612}
2613
2c0262af
FB
2614static void gen_pusha(DisasContext *s)
2615{
b40a47a1 2616 MemOp s_ot = SS32(s) ? MO_32 : MO_16;
14776ab5 2617 MemOp d_ot = s->dflag;
d37ea0c0 2618 int size = 1 << d_ot;
2c0262af 2619 int i;
d37ea0c0
RH
2620
2621 for (i = 0; i < 8; i++) {
6b672b5d
EC
2622 tcg_gen_addi_tl(s->A0, cpu_regs[R_ESP], (i - 8) * size);
2623 gen_lea_v_seg(s, s_ot, s->A0, R_SS, -1);
2624 gen_op_st_v(s, d_ot, cpu_regs[7 - i], s->A0);
d37ea0c0
RH
2625 }
2626
2627 gen_stack_update(s, -8 * size);
2628}
2629
2c0262af
FB
2630static void gen_popa(DisasContext *s)
2631{
b40a47a1 2632 MemOp s_ot = SS32(s) ? MO_32 : MO_16;
14776ab5 2633 MemOp d_ot = s->dflag;
d37ea0c0 2634 int size = 1 << d_ot;
2c0262af 2635 int i;
d37ea0c0
RH
2636
2637 for (i = 0; i < 8; i++) {
2c0262af 2638 /* ESP is not reloaded */
d37ea0c0
RH
2639 if (7 - i == R_ESP) {
2640 continue;
2c0262af 2641 }
6b672b5d
EC
2642 tcg_gen_addi_tl(s->A0, cpu_regs[R_ESP], i * size);
2643 gen_lea_v_seg(s, s_ot, s->A0, R_SS, -1);
c66f9727 2644 gen_op_ld_v(s, d_ot, s->T0, s->A0);
1dbe15ef 2645 gen_op_mov_reg_v(s, d_ot, 7 - i, s->T0);
2c0262af 2646 }
d37ea0c0
RH
2647
2648 gen_stack_update(s, 8 * size);
2c0262af
FB
2649}
2650
2c0262af
FB
2651static void gen_enter(DisasContext *s, int esp_addend, int level)
2652{
14776ab5 2653 MemOp d_ot = mo_pushpop(s, s->dflag);
b40a47a1 2654 MemOp a_ot = CODE64(s) ? MO_64 : SS32(s) ? MO_32 : MO_16;
743e398e 2655 int size = 1 << d_ot;
2c0262af 2656
743e398e 2657 /* Push BP; compute FrameTemp into T1. */
b48597b0
EC
2658 tcg_gen_subi_tl(s->T1, cpu_regs[R_ESP], size);
2659 gen_lea_v_seg(s, a_ot, s->T1, R_SS, -1);
6b672b5d 2660 gen_op_st_v(s, d_ot, cpu_regs[R_EBP], s->A0);
743e398e
RH
2661
2662 level &= 31;
2663 if (level != 0) {
2664 int i;
2665
2666 /* Copy level-1 pointers from the previous frame. */
2667 for (i = 1; i < level; ++i) {
6b672b5d
EC
2668 tcg_gen_subi_tl(s->A0, cpu_regs[R_EBP], size * i);
2669 gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1);
fbd80f02 2670 gen_op_ld_v(s, d_ot, s->tmp0, s->A0);
743e398e 2671
b48597b0 2672 tcg_gen_subi_tl(s->A0, s->T1, size * i);
6b672b5d 2673 gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1);
fbd80f02 2674 gen_op_st_v(s, d_ot, s->tmp0, s->A0);
8f091a59 2675 }
743e398e
RH
2676
2677 /* Push the current FrameTemp as the last level. */
b48597b0 2678 tcg_gen_subi_tl(s->A0, s->T1, size * level);
6b672b5d 2679 gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1);
b48597b0 2680 gen_op_st_v(s, d_ot, s->T1, s->A0);
2c0262af 2681 }
743e398e
RH
2682
2683 /* Copy the FrameTemp value to EBP. */
1dbe15ef 2684 gen_op_mov_reg_v(s, a_ot, R_EBP, s->T1);
743e398e
RH
2685
2686 /* Compute the final value of ESP. */
b48597b0 2687 tcg_gen_subi_tl(s->T1, s->T1, esp_addend + size * level);
1dbe15ef 2688 gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1);
2c0262af
FB
2689}
2690
2045f04c
RH
2691static void gen_leave(DisasContext *s)
2692{
14776ab5
TN
2693 MemOp d_ot = mo_pushpop(s, s->dflag);
2694 MemOp a_ot = mo_stacksize(s);
2045f04c
RH
2695
2696 gen_lea_v_seg(s, a_ot, cpu_regs[R_EBP], R_SS, -1);
c66f9727 2697 gen_op_ld_v(s, d_ot, s->T0, s->A0);
2045f04c 2698
b48597b0 2699 tcg_gen_addi_tl(s->T1, cpu_regs[R_EBP], 1 << d_ot);
2045f04c 2700
1dbe15ef
EC
2701 gen_op_mov_reg_v(s, d_ot, R_EBP, s->T0);
2702 gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1);
2045f04c
RH
2703}
2704
b9f9c5b4
RH
2705/* Similarly, except that the assumption here is that we don't decode
2706 the instruction at all -- either a missing opcode, an unimplemented
2707 feature, or just a bogus instruction stream. */
2708static void gen_unknown_opcode(CPUX86State *env, DisasContext *s)
2709{
2710 gen_illegal_opcode(s);
2711
2712 if (qemu_loglevel_mask(LOG_UNIMP)) {
c60f599b 2713 FILE *logfile = qemu_log_trylock();
78b54858 2714 if (logfile) {
ddf83b35 2715 target_ulong pc = s->base.pc_next, end = s->pc;
fc59d2d8 2716
78b54858
RH
2717 fprintf(logfile, "ILLOPC: " TARGET_FMT_lx ":", pc);
2718 for (; pc < end; ++pc) {
2719 fprintf(logfile, " %02x", cpu_ldub_code(env, pc));
2720 }
2721 fprintf(logfile, "\n");
2722 qemu_log_unlock(logfile);
b9f9c5b4 2723 }
b9f9c5b4
RH
2724 }
2725}
2726
2c0262af 2727/* an interrupt is different from an exception because of the
7f75ffd3 2728 privilege checks */
8ed6c985 2729static void gen_interrupt(DisasContext *s, int intno)
2c0262af 2730{
773cdfcc 2731 gen_update_cc_op(s);
65e4af23 2732 gen_update_eip_cur(s);
ad75a51e 2733 gen_helper_raise_interrupt(tcg_env, tcg_constant_i32(intno),
ad1d6f07 2734 cur_insn_len_i32(s));
6cf147aa 2735 s->base.is_jmp = DISAS_NORETURN;
2c0262af
FB
2736}
2737
7f0b7141
RH
2738static void gen_set_hflag(DisasContext *s, uint32_t mask)
2739{
2740 if ((s->flags & mask) == 0) {
2741 TCGv_i32 t = tcg_temp_new_i32();
ad75a51e 2742 tcg_gen_ld_i32(t, tcg_env, offsetof(CPUX86State, hflags));
7f0b7141 2743 tcg_gen_ori_i32(t, t, mask);
ad75a51e 2744 tcg_gen_st_i32(t, tcg_env, offsetof(CPUX86State, hflags));
7f0b7141
RH
2745 s->flags |= mask;
2746 }
2747}
2748
2749static void gen_reset_hflag(DisasContext *s, uint32_t mask)
2750{
2751 if (s->flags & mask) {
2752 TCGv_i32 t = tcg_temp_new_i32();
ad75a51e 2753 tcg_gen_ld_i32(t, tcg_env, offsetof(CPUX86State, hflags));
7f0b7141 2754 tcg_gen_andi_i32(t, t, ~mask);
ad75a51e 2755 tcg_gen_st_i32(t, tcg_env, offsetof(CPUX86State, hflags));
7f0b7141
RH
2756 s->flags &= ~mask;
2757 }
2758}
2759
63179330
RH
2760static void gen_set_eflags(DisasContext *s, target_ulong mask)
2761{
2762 TCGv t = tcg_temp_new();
2763
ad75a51e 2764 tcg_gen_ld_tl(t, tcg_env, offsetof(CPUX86State, eflags));
63179330 2765 tcg_gen_ori_tl(t, t, mask);
ad75a51e 2766 tcg_gen_st_tl(t, tcg_env, offsetof(CPUX86State, eflags));
63179330
RH
2767}
2768
2769static void gen_reset_eflags(DisasContext *s, target_ulong mask)
2770{
2771 TCGv t = tcg_temp_new();
2772
ad75a51e 2773 tcg_gen_ld_tl(t, tcg_env, offsetof(CPUX86State, eflags));
63179330 2774 tcg_gen_andi_tl(t, t, ~mask);
ad75a51e 2775 tcg_gen_st_tl(t, tcg_env, offsetof(CPUX86State, eflags));
63179330
RH
2776}
2777
7d117ce8
RH
2778/* Clear BND registers during legacy branches. */
2779static void gen_bnd_jmp(DisasContext *s)
2780{
8b33e82b
PB
2781 /* Clear the registers only if BND prefix is missing, MPX is enabled,
2782 and if the BNDREGs are known to be in use (non-zero) already.
2783 The helper itself will check BNDPRESERVE at runtime. */
7d117ce8 2784 if ((s->prefix & PREFIX_REPNZ) == 0
8b33e82b
PB
2785 && (s->flags & HF_MPX_EN_MASK) != 0
2786 && (s->flags & HF_MPX_IU_MASK) != 0) {
ad75a51e 2787 gen_helper_bnd_jmp(tcg_env);
7d117ce8
RH
2788 }
2789}
2790
f083d92c 2791/* Generate an end of block. Trace exception is also generated if needed.
c52ab08a
DE
2792 If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set.
2793 If RECHECK_TF, emit a rechecking helper for #DB, ignoring the state of
2794 S->TF. This is used by the syscall/sysret insns. */
1ebb1af1 2795static void
7f11636d 2796do_gen_eob_worker(DisasContext *s, bool inhibit, bool recheck_tf, bool jr)
2c0262af 2797{
773cdfcc 2798 gen_update_cc_op(s);
f083d92c
RH
2799
2800 /* If several instructions disable interrupts, only the first does it. */
2801 if (inhibit && !(s->flags & HF_INHIBIT_IRQ_MASK)) {
2802 gen_set_hflag(s, HF_INHIBIT_IRQ_MASK);
2803 } else {
2804 gen_reset_hflag(s, HF_INHIBIT_IRQ_MASK);
2805 }
2806
6cf147aa 2807 if (s->base.tb->flags & HF_RF_MASK) {
63179330 2808 gen_reset_eflags(s, RF_MASK);
a2397807 2809 }
4bc4c313 2810 if (recheck_tf) {
ad75a51e 2811 gen_helper_rechecking_single_step(tcg_env);
07ea28b4 2812 tcg_gen_exit_tb(NULL, 0);
c1de1a1a 2813 } else if (s->flags & HF_TF_MASK) {
ad75a51e 2814 gen_helper_single_step(tcg_env);
7f11636d
EC
2815 } else if (jr) {
2816 tcg_gen_lookup_and_goto_ptr();
2c0262af 2817 } else {
07ea28b4 2818 tcg_gen_exit_tb(NULL, 0);
2c0262af 2819 }
6cf147aa 2820 s->base.is_jmp = DISAS_NORETURN;
2c0262af
FB
2821}
2822
1ebb1af1
EC
2823static inline void
2824gen_eob_worker(DisasContext *s, bool inhibit, bool recheck_tf)
2825{
7f11636d 2826 do_gen_eob_worker(s, inhibit, recheck_tf, false);
1ebb1af1
EC
2827}
2828
c52ab08a
DE
2829/* End of block.
2830 If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set. */
2831static void gen_eob_inhibit_irq(DisasContext *s, bool inhibit)
2832{
2833 gen_eob_worker(s, inhibit, false);
2834}
2835
f083d92c
RH
2836/* End of block, resetting the inhibit irq flag. */
2837static void gen_eob(DisasContext *s)
2838{
c52ab08a 2839 gen_eob_worker(s, false, false);
f083d92c
RH
2840}
2841
1ebb1af1 2842/* Jump to register */
faf9ea5f 2843static void gen_jr(DisasContext *s)
1ebb1af1 2844{
7f11636d 2845 do_gen_eob_worker(s, false, false, true);
1ebb1af1
EC
2846}
2847
2255da49 2848/* Jump to eip+diff, truncating the result to OT. */
8760ded6
RH
2849static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num)
2850{
e3a79e0e
RH
2851 bool use_goto_tb = s->jmp_opt;
2852 target_ulong mask = -1;
2853 target_ulong new_pc = s->pc + diff;
2854 target_ulong new_eip = new_pc - s->cs_base;
8760ded6
RH
2855
2856 /* In 64-bit mode, operand size is fixed at 64 bits. */
2857 if (!CODE64(s)) {
2858 if (ot == MO_16) {
e3a79e0e 2859 mask = 0xffff;
2e3afe8e 2860 if (tb_cflags(s->base.tb) & CF_PCREL && CODE32(s)) {
e3a79e0e
RH
2861 use_goto_tb = false;
2862 }
8760ded6 2863 } else {
e3a79e0e 2864 mask = 0xffffffff;
8760ded6
RH
2865 }
2866 }
e3a79e0e 2867 new_eip &= mask;
b5e0d5d2
RH
2868 new_pc = new_eip + s->cs_base;
2869 if (!CODE64(s)) {
2870 new_pc = (uint32_t)new_pc;
2871 }
900cc7e5
RH
2872
2873 gen_update_cc_op(s);
2874 set_cc_op(s, CC_OP_DYNAMIC);
e3a79e0e 2875
2e3afe8e 2876 if (tb_cflags(s->base.tb) & CF_PCREL) {
e3a79e0e
RH
2877 tcg_gen_addi_tl(cpu_eip, cpu_eip, new_pc - s->pc_save);
2878 /*
2879 * If we can prove the branch does not leave the page and we have
2880 * no extra masking to apply (data16 branch in code32, see above),
2881 * then we have also proven that the addition does not wrap.
2882 */
2883 if (!use_goto_tb || !is_same_page(&s->base, new_pc)) {
2884 tcg_gen_andi_tl(cpu_eip, cpu_eip, mask);
2885 use_goto_tb = false;
2886 }
2887 }
2888
b5e0d5d2 2889 if (use_goto_tb && translator_use_goto_tb(&s->base, new_pc)) {
900cc7e5
RH
2890 /* jump to same page: we can use a direct jump */
2891 tcg_gen_goto_tb(tb_num);
2e3afe8e 2892 if (!(tb_cflags(s->base.tb) & CF_PCREL)) {
e3a79e0e
RH
2893 tcg_gen_movi_tl(cpu_eip, new_eip);
2894 }
900cc7e5
RH
2895 tcg_gen_exit_tb(s->base.tb, tb_num);
2896 s->base.is_jmp = DISAS_NORETURN;
2897 } else {
2e3afe8e 2898 if (!(tb_cflags(s->base.tb) & CF_PCREL)) {
e3a79e0e
RH
2899 tcg_gen_movi_tl(cpu_eip, new_eip);
2900 }
2901 if (s->jmp_opt) {
2902 gen_jr(s); /* jump to another page */
2903 } else {
2904 gen_eob(s); /* exit to main loop */
2905 }
900cc7e5 2906 }
8760ded6
RH
2907}
2908
2255da49
RH
2909/* Jump to eip+diff, truncating to the current code size. */
2910static void gen_jmp_rel_csize(DisasContext *s, int diff, int tb_num)
14ce26e7 2911{
2255da49
RH
2912 /* CODE64 ignores the OT argument, so we need not consider it. */
2913 gen_jmp_rel(s, CODE32(s) ? MO_32 : MO_16, diff, tb_num);
14ce26e7
FB
2914}
2915
323d1876 2916static inline void gen_ldq_env_A0(DisasContext *s, int offset)
8686c490 2917{
fc313c64 2918 tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
ad75a51e 2919 tcg_gen_st_i64(s->tmp1_i64, tcg_env, offset);
8686c490 2920}
664e0f19 2921
323d1876 2922static inline void gen_stq_env_A0(DisasContext *s, int offset)
8686c490 2923{
ad75a51e 2924 tcg_gen_ld_i64(s->tmp1_i64, tcg_env, offset);
fc313c64 2925 tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
8686c490 2926}
664e0f19 2927
958e1dd1 2928static inline void gen_ldo_env_A0(DisasContext *s, int offset, bool align)
8686c490 2929{
46c684c8
RH
2930 MemOp atom = (s->cpuid_ext_features & CPUID_EXT_AVX
2931 ? MO_ATOM_IFALIGN : MO_ATOM_IFALIGN_PAIR);
2932 MemOp mop = MO_128 | MO_LE | atom | (align ? MO_ALIGN_16 : 0);
5c42a7cd 2933 int mem_index = s->mem_index;
46c684c8
RH
2934 TCGv_i128 t = tcg_temp_new_i128();
2935
2936 tcg_gen_qemu_ld_i128(t, s->A0, mem_index, mop);
2937 tcg_gen_st_i128(t, tcg_env, offset);
8686c490 2938}
14ce26e7 2939
958e1dd1 2940static inline void gen_sto_env_A0(DisasContext *s, int offset, bool align)
8686c490 2941{
46c684c8
RH
2942 MemOp atom = (s->cpuid_ext_features & CPUID_EXT_AVX
2943 ? MO_ATOM_IFALIGN : MO_ATOM_IFALIGN_PAIR);
2944 MemOp mop = MO_128 | MO_LE | atom | (align ? MO_ALIGN_16 : 0);
5c42a7cd 2945 int mem_index = s->mem_index;
46c684c8
RH
2946 TCGv_i128 t = tcg_temp_new_i128();
2947
2948 tcg_gen_ld_i128(t, tcg_env, offset);
2949 tcg_gen_qemu_st_i128(t, s->A0, mem_index, mop);
8686c490 2950}
14ce26e7 2951
6ba13999
PB
2952static void gen_ldy_env_A0(DisasContext *s, int offset, bool align)
2953{
46c684c8 2954 MemOp mop = MO_128 | MO_LE | MO_ATOM_IFALIGN_PAIR;
6ba13999 2955 int mem_index = s->mem_index;
46c684c8
RH
2956 TCGv_i128 t0 = tcg_temp_new_i128();
2957 TCGv_i128 t1 = tcg_temp_new_i128();
6ba13999 2958
46c684c8 2959 tcg_gen_qemu_ld_i128(t0, s->A0, mem_index, mop | (align ? MO_ALIGN_32 : 0));
6ba13999 2960 tcg_gen_addi_tl(s->tmp0, s->A0, 16);
46c684c8
RH
2961 tcg_gen_qemu_ld_i128(t1, s->tmp0, mem_index, mop);
2962
2963 tcg_gen_st_i128(t0, tcg_env, offset + offsetof(YMMReg, YMM_X(0)));
2964 tcg_gen_st_i128(t1, tcg_env, offset + offsetof(YMMReg, YMM_X(1)));
6ba13999
PB
2965}
2966
92ec056a
PB
2967static void gen_sty_env_A0(DisasContext *s, int offset, bool align)
2968{
46c684c8 2969 MemOp mop = MO_128 | MO_LE | MO_ATOM_IFALIGN_PAIR;
92ec056a 2970 int mem_index = s->mem_index;
46c684c8
RH
2971 TCGv_i128 t = tcg_temp_new_i128();
2972
2973 tcg_gen_ld_i128(t, tcg_env, offset + offsetof(YMMReg, YMM_X(0)));
2974 tcg_gen_qemu_st_i128(t, s->A0, mem_index, mop | (align ? MO_ALIGN_32 : 0));
92ec056a 2975 tcg_gen_addi_tl(s->tmp0, s->A0, 16);
46c684c8
RH
2976 tcg_gen_ld_i128(t, tcg_env, offset + offsetof(YMMReg, YMM_X(1)));
2977 tcg_gen_qemu_st_i128(t, s->tmp0, mem_index, mop);
92ec056a
PB
2978}
2979
653fad24
PB
2980#include "decode-new.h"
2981#include "emit.c.inc"
2982#include "decode-new.c.inc"
664e0f19 2983
6218c177
RH
2984static void gen_cmpxchg8b(DisasContext *s, CPUX86State *env, int modrm)
2985{
326ad06c
RH
2986 TCGv_i64 cmp, val, old;
2987 TCGv Z;
2988
6218c177
RH
2989 gen_lea_modrm(env, s, modrm);
2990
326ad06c
RH
2991 cmp = tcg_temp_new_i64();
2992 val = tcg_temp_new_i64();
2993 old = tcg_temp_new_i64();
2994
2995 /* Construct the comparison values from the register pair. */
2996 tcg_gen_concat_tl_i64(cmp, cpu_regs[R_EAX], cpu_regs[R_EDX]);
2997 tcg_gen_concat_tl_i64(val, cpu_regs[R_EBX], cpu_regs[R_ECX]);
2998
2999 /* Only require atomic with LOCK; non-parallel handled in generator. */
3000 if (s->prefix & PREFIX_LOCK) {
3001 tcg_gen_atomic_cmpxchg_i64(old, s->A0, cmp, val, s->mem_index, MO_TEUQ);
6218c177 3002 } else {
326ad06c
RH
3003 tcg_gen_nonatomic_cmpxchg_i64(old, s->A0, cmp, val,
3004 s->mem_index, MO_TEUQ);
6218c177 3005 }
326ad06c
RH
3006
3007 /* Set tmp0 to match the required value of Z. */
3008 tcg_gen_setcond_i64(TCG_COND_EQ, cmp, old, cmp);
3009 Z = tcg_temp_new();
3010 tcg_gen_trunc_i64_tl(Z, cmp);
326ad06c
RH
3011
3012 /*
3013 * Extract the result values for the register pair.
3014 * For 32-bit, we may do this unconditionally, because on success (Z=1),
3015 * the old value matches the previous value in EDX:EAX. For x86_64,
3016 * the store must be conditional, because we must leave the source
3017 * registers unchanged on success, and zero-extend the writeback
3018 * on failure (Z=0).
3019 */
3020 if (TARGET_LONG_BITS == 32) {
3021 tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], old);
3022 } else {
3023 TCGv zero = tcg_constant_tl(0);
3024
3025 tcg_gen_extr_i64_tl(s->T0, s->T1, old);
3026 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_regs[R_EAX], Z, zero,
3027 s->T0, cpu_regs[R_EAX]);
3028 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_regs[R_EDX], Z, zero,
3029 s->T1, cpu_regs[R_EDX]);
3030 }
326ad06c
RH
3031
3032 /* Update Z. */
3033 gen_compute_eflags(s);
3034 tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, Z, ctz32(CC_Z), 1);
6218c177
RH
3035}
3036
3037#ifdef TARGET_X86_64
3038static void gen_cmpxchg16b(DisasContext *s, CPUX86State *env, int modrm)
3039{
5f0dd8cd
RH
3040 MemOp mop = MO_TE | MO_128 | MO_ALIGN;
3041 TCGv_i64 t0, t1;
3042 TCGv_i128 cmp, val;
3043
6218c177
RH
3044 gen_lea_modrm(env, s, modrm);
3045
5f0dd8cd
RH
3046 cmp = tcg_temp_new_i128();
3047 val = tcg_temp_new_i128();
3048 tcg_gen_concat_i64_i128(cmp, cpu_regs[R_EAX], cpu_regs[R_EDX]);
3049 tcg_gen_concat_i64_i128(val, cpu_regs[R_EBX], cpu_regs[R_ECX]);
3050
3051 /* Only require atomic with LOCK; non-parallel handled in generator. */
3052 if (s->prefix & PREFIX_LOCK) {
3053 tcg_gen_atomic_cmpxchg_i128(val, s->A0, cmp, val, s->mem_index, mop);
6218c177 3054 } else {
5f0dd8cd 3055 tcg_gen_nonatomic_cmpxchg_i128(val, s->A0, cmp, val, s->mem_index, mop);
6218c177 3056 }
5f0dd8cd
RH
3057
3058 tcg_gen_extr_i128_i64(s->T0, s->T1, val);
5f0dd8cd
RH
3059
3060 /* Determine success after the fact. */
3061 t0 = tcg_temp_new_i64();
3062 t1 = tcg_temp_new_i64();
3063 tcg_gen_xor_i64(t0, s->T0, cpu_regs[R_EAX]);
3064 tcg_gen_xor_i64(t1, s->T1, cpu_regs[R_EDX]);
3065 tcg_gen_or_i64(t0, t0, t1);
5f0dd8cd
RH
3066
3067 /* Update Z. */
3068 gen_compute_eflags(s);
3069 tcg_gen_setcondi_i64(TCG_COND_EQ, t0, t0, 0);
3070 tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, t0, ctz32(CC_Z), 1);
5f0dd8cd
RH
3071
3072 /*
3073 * Extract the result values for the register pair. We may do this
3074 * unconditionally, because on success (Z=1), the old value matches
3075 * the previous value in RDX:RAX.
3076 */
3077 tcg_gen_mov_i64(cpu_regs[R_EAX], s->T0);
3078 tcg_gen_mov_i64(cpu_regs[R_EDX], s->T1);
6218c177
RH
3079}
3080#endif
3081
6cf147aa 3082/* convert one instruction. s->base.is_jmp is set if the translation must
2c0262af 3083 be stopped. Return the next pc value */
f66c8e8c 3084static bool disas_insn(DisasContext *s, CPUState *cpu)
2c0262af 3085{
b77af26e 3086 CPUX86State *env = cpu_env(cpu);
ab4e4aec 3087 int b, prefixes;
d67dc9e6 3088 int shift;
14776ab5 3089 MemOp ot, aflag, dflag;
4eeb3939 3090 int modrm, reg, rm, mod, op, opreg, val;
95093668
IL
3091 bool orig_cc_op_dirty = s->cc_op_dirty;
3092 CCOp orig_cc_op = s->cc_op;
913f0836 3093 target_ulong orig_pc_save = s->pc_save;
2c0262af 3094
ddf83b35 3095 s->pc = s->base.pc_next;
2c0262af 3096 s->override = -1;
14ce26e7 3097#ifdef TARGET_X86_64
bbdb4237 3098 s->rex_r = 0;
14ce26e7
FB
3099 s->rex_x = 0;
3100 s->rex_b = 0;
14ce26e7
FB
3101#endif
3102 s->rip_offset = 0; /* for relative ip address */
701ed211
RH
3103 s->vex_l = 0;
3104 s->vex_v = 0;
a61ef762 3105 s->vex_w = false;
95093668
IL
3106 switch (sigsetjmp(s->jmpbuf, 0)) {
3107 case 0:
3108 break;
3109 case 1:
6bd99586 3110 gen_exception_gpf(s);
f66c8e8c 3111 return true;
95093668
IL
3112 case 2:
3113 /* Restore state that may affect the next instruction. */
f66c8e8c 3114 s->pc = s->base.pc_next;
913f0836
RH
3115 /*
3116 * TODO: These save/restore can be removed after the table-based
3117 * decoder is complete; we will be decoding the insn completely
3118 * before any code generation that might affect these variables.
3119 */
95093668
IL
3120 s->cc_op_dirty = orig_cc_op_dirty;
3121 s->cc_op = orig_cc_op;
913f0836
RH
3122 s->pc_save = orig_pc_save;
3123 /* END TODO */
95093668
IL
3124 s->base.num_insns--;
3125 tcg_remove_ops_after(s->prev_insn_end);
3126 s->base.is_jmp = DISAS_TOO_MANY;
f66c8e8c 3127 return false;
95093668
IL
3128 default:
3129 g_assert_not_reached();
30663fd2 3130 }
b066c537 3131
a4926d99 3132 prefixes = 0;
a4926d99 3133
b066c537 3134 next_byte:
b3e22b23 3135 s->prefix = prefixes;
e3af7c78 3136 b = x86_ldub_code(env, s);
4a6fd938
RH
3137 /* Collect prefixes. */
3138 switch (b) {
b3e22b23 3139 default:
b3e22b23
PB
3140 break;
3141 case 0x0f:
3142 b = x86_ldub_code(env, s) + 0x100;
b3e22b23 3143 break;
4a6fd938
RH
3144 case 0xf3:
3145 prefixes |= PREFIX_REPZ;
5c2f60bd 3146 prefixes &= ~PREFIX_REPNZ;
4a6fd938
RH
3147 goto next_byte;
3148 case 0xf2:
3149 prefixes |= PREFIX_REPNZ;
5c2f60bd 3150 prefixes &= ~PREFIX_REPZ;
4a6fd938
RH
3151 goto next_byte;
3152 case 0xf0:
3153 prefixes |= PREFIX_LOCK;
3154 goto next_byte;
3155 case 0x2e:
3156 s->override = R_CS;
3157 goto next_byte;
3158 case 0x36:
3159 s->override = R_SS;
3160 goto next_byte;
3161 case 0x3e:
3162 s->override = R_DS;
3163 goto next_byte;
3164 case 0x26:
3165 s->override = R_ES;
3166 goto next_byte;
3167 case 0x64:
3168 s->override = R_FS;
3169 goto next_byte;
3170 case 0x65:
3171 s->override = R_GS;
3172 goto next_byte;
3173 case 0x66:
3174 prefixes |= PREFIX_DATA;
3175 goto next_byte;
3176 case 0x67:
3177 prefixes |= PREFIX_ADR;
3178 goto next_byte;
14ce26e7 3179#ifdef TARGET_X86_64
4a6fd938
RH
3180 case 0x40 ... 0x4f:
3181 if (CODE64(s)) {
14ce26e7 3182 /* REX prefix */
1e92b727 3183 prefixes |= PREFIX_REX;
a61ef762 3184 s->vex_w = (b >> 3) & 1;
bbdb4237 3185 s->rex_r = (b & 0x4) << 1;
14ce26e7 3186 s->rex_x = (b & 0x2) << 2;
915ffe89 3187 s->rex_b = (b & 0x1) << 3;
14ce26e7
FB
3188 goto next_byte;
3189 }
4a6fd938
RH
3190 break;
3191#endif
701ed211
RH
3192 case 0xc5: /* 2-byte VEX */
3193 case 0xc4: /* 3-byte VEX */
9996dcfd 3194 if (CODE32(s) && !VM86(s)) {
1d0b9261
PB
3195 int vex2 = x86_ldub_code(env, s);
3196 s->pc--; /* rewind the advance_pc() x86_ldub_code() did */
701ed211
RH
3197
3198 if (!CODE64(s) && (vex2 & 0xc0) != 0xc0) {
3199 /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
3200 otherwise the instruction is LES or LDS. */
3201 break;
3202 }
1d0b9261
PB
3203 disas_insn_new(s, cpu, b);
3204 return s->pc;
701ed211
RH
3205 }
3206 break;
4a6fd938
RH
3207 }
3208
3209 /* Post-process prefixes. */
4a6fd938 3210 if (CODE64(s)) {
dec3fc96
RH
3211 /* In 64-bit mode, the default data size is 32-bit. Select 64-bit
3212 data with rex_w, and 16-bit data with 0x66; rex_w takes precedence
3213 over 0x66 if both are present. */
8ab1e486 3214 dflag = (REX_W(s) ? MO_64 : prefixes & PREFIX_DATA ? MO_16 : MO_32);
dec3fc96 3215 /* In 64-bit mode, 0x67 selects 32-bit addressing. */
1d71ddb1 3216 aflag = (prefixes & PREFIX_ADR ? MO_32 : MO_64);
dec3fc96
RH
3217 } else {
3218 /* In 16/32-bit mode, 0x66 selects the opposite data size. */
9996dcfd 3219 if (CODE32(s) ^ ((prefixes & PREFIX_DATA) != 0)) {
ab4e4aec
RH
3220 dflag = MO_32;
3221 } else {
3222 dflag = MO_16;
14ce26e7 3223 }
dec3fc96 3224 /* In 16/32-bit mode, 0x67 selects the opposite addressing. */
9996dcfd 3225 if (CODE32(s) ^ ((prefixes & PREFIX_ADR) != 0)) {
1d71ddb1
RH
3226 aflag = MO_32;
3227 } else {
3228 aflag = MO_16;
14ce26e7 3229 }
2c0262af
FB
3230 }
3231
2c0262af
FB
3232 s->prefix = prefixes;
3233 s->aflag = aflag;
3234 s->dflag = dflag;
3235
2c0262af 3236 /* now check op code */
b3e22b23 3237 switch (b) {
2c0262af
FB
3238 /**************************/
3239 /* arith & logic */
3240 case 0x00 ... 0x05:
3241 case 0x08 ... 0x0d:
3242 case 0x10 ... 0x15:
3243 case 0x18 ... 0x1d:
3244 case 0x20 ... 0x25:
3245 case 0x28 ... 0x2d:
3246 case 0x30 ... 0x35:
3247 case 0x38 ... 0x3d:
3248 {
19729aff 3249 int f;
2c0262af
FB
3250 op = (b >> 3) & 7;
3251 f = (b >> 1) & 3;
3252
ab4e4aec 3253 ot = mo_b_d(b, dflag);
3b46e624 3254
2c0262af
FB
3255 switch(f) {
3256 case 0: /* OP Ev, Gv */
e3af7c78 3257 modrm = x86_ldub_code(env, s);
bbdb4237 3258 reg = ((modrm >> 3) & 7) | REX_R(s);
2c0262af 3259 mod = (modrm >> 6) & 3;
14ce26e7 3260 rm = (modrm & 7) | REX_B(s);
2c0262af 3261 if (mod != 3) {
4eeb3939 3262 gen_lea_modrm(env, s, modrm);
2c0262af
FB
3263 opreg = OR_TMP0;
3264 } else if (op == OP_XORL && rm == reg) {
3265 xor_zero:
3266 /* xor reg, reg optimisation */
436ff2d2 3267 set_cc_op(s, CC_OP_CLR);
c66f9727 3268 tcg_gen_movi_tl(s->T0, 0);
1dbe15ef 3269 gen_op_mov_reg_v(s, ot, reg, s->T0);
2c0262af
FB
3270 break;
3271 } else {
3272 opreg = rm;
3273 }
1dbe15ef 3274 gen_op_mov_v_reg(s, ot, s->T1, reg);
2c0262af
FB
3275 gen_op(s, op, ot, opreg);
3276 break;
3277 case 1: /* OP Gv, Ev */
e3af7c78 3278 modrm = x86_ldub_code(env, s);
2c0262af 3279 mod = (modrm >> 6) & 3;
bbdb4237 3280 reg = ((modrm >> 3) & 7) | REX_R(s);
14ce26e7 3281 rm = (modrm & 7) | REX_B(s);
2c0262af 3282 if (mod != 3) {
4eeb3939 3283 gen_lea_modrm(env, s, modrm);
b48597b0 3284 gen_op_ld_v(s, ot, s->T1, s->A0);
2c0262af
FB
3285 } else if (op == OP_XORL && rm == reg) {
3286 goto xor_zero;
3287 } else {
1dbe15ef 3288 gen_op_mov_v_reg(s, ot, s->T1, rm);
2c0262af
FB
3289 }
3290 gen_op(s, op, ot, reg);
3291 break;
3292 case 2: /* OP A, Iv */
0af10c86 3293 val = insn_get(env, s, ot);
b48597b0 3294 tcg_gen_movi_tl(s->T1, val);
2c0262af
FB
3295 gen_op(s, op, ot, OR_EAX);
3296 break;
3297 }
3298 }
3299 break;
3300
ec9d6075
FB
3301 case 0x82:
3302 if (CODE64(s))
3303 goto illegal_op;
edd7541b 3304 /* fall through */
2c0262af
FB
3305 case 0x80: /* GRP1 */
3306 case 0x81:
3307 case 0x83:
3308 {
ab4e4aec 3309 ot = mo_b_d(b, dflag);
3b46e624 3310
e3af7c78 3311 modrm = x86_ldub_code(env, s);
2c0262af 3312 mod = (modrm >> 6) & 3;
14ce26e7 3313 rm = (modrm & 7) | REX_B(s);
2c0262af 3314 op = (modrm >> 3) & 7;
3b46e624 3315
2c0262af 3316 if (mod != 3) {
14ce26e7
FB
3317 if (b == 0x83)
3318 s->rip_offset = 1;
3319 else
3320 s->rip_offset = insn_const_size(ot);
4eeb3939 3321 gen_lea_modrm(env, s, modrm);
2c0262af
FB
3322 opreg = OR_TMP0;
3323 } else {
14ce26e7 3324 opreg = rm;
2c0262af
FB
3325 }
3326
3327 switch(b) {
3328 default:
3329 case 0x80:
3330 case 0x81:
d64477af 3331 case 0x82:
0af10c86 3332 val = insn_get(env, s, ot);
2c0262af
FB
3333 break;
3334 case 0x83:
4ba9938c 3335 val = (int8_t)insn_get(env, s, MO_8);
2c0262af
FB
3336 break;
3337 }
b48597b0 3338 tcg_gen_movi_tl(s->T1, val);
2c0262af
FB
3339 gen_op(s, op, ot, opreg);
3340 }
3341 break;
3342
3343 /**************************/
3344 /* inc, dec, and other misc arith */
3345 case 0x40 ... 0x47: /* inc Gv */
ab4e4aec 3346 ot = dflag;
2c0262af
FB
3347 gen_inc(s, ot, OR_EAX + (b & 7), 1);
3348 break;
3349 case 0x48 ... 0x4f: /* dec Gv */
ab4e4aec 3350 ot = dflag;
2c0262af
FB
3351 gen_inc(s, ot, OR_EAX + (b & 7), -1);
3352 break;
3353 case 0xf6: /* GRP3 */
3354 case 0xf7:
ab4e4aec 3355 ot = mo_b_d(b, dflag);
2c0262af 3356
e3af7c78 3357 modrm = x86_ldub_code(env, s);
2c0262af 3358 mod = (modrm >> 6) & 3;
14ce26e7 3359 rm = (modrm & 7) | REX_B(s);
2c0262af
FB
3360 op = (modrm >> 3) & 7;
3361 if (mod != 3) {
2a5fe8ae 3362 if (op == 0) {
14ce26e7 3363 s->rip_offset = insn_const_size(ot);
2a5fe8ae 3364 }
4eeb3939 3365 gen_lea_modrm(env, s, modrm);
2a5fe8ae
EC
3366 /* For those below that handle locked memory, don't load here. */
3367 if (!(s->prefix & PREFIX_LOCK)
3368 || op != 2) {
c66f9727 3369 gen_op_ld_v(s, ot, s->T0, s->A0);
2a5fe8ae 3370 }
2c0262af 3371 } else {
1dbe15ef 3372 gen_op_mov_v_reg(s, ot, s->T0, rm);
2c0262af
FB
3373 }
3374
3375 switch(op) {
3376 case 0: /* test */
0af10c86 3377 val = insn_get(env, s, ot);
b48597b0 3378 tcg_gen_movi_tl(s->T1, val);
c66f9727 3379 gen_op_testl_T0_T1_cc(s);
3ca51d07 3380 set_cc_op(s, CC_OP_LOGICB + ot);
2c0262af
FB
3381 break;
3382 case 2: /* not */
2a5fe8ae
EC
3383 if (s->prefix & PREFIX_LOCK) {
3384 if (mod == 3) {
3385 goto illegal_op;
3386 }
c66f9727
EC
3387 tcg_gen_movi_tl(s->T0, ~0);
3388 tcg_gen_atomic_xor_fetch_tl(s->T0, s->A0, s->T0,
2a5fe8ae 3389 s->mem_index, ot | MO_LE);
2c0262af 3390 } else {
c66f9727 3391 tcg_gen_not_tl(s->T0, s->T0);
2a5fe8ae 3392 if (mod != 3) {
c66f9727 3393 gen_op_st_v(s, ot, s->T0, s->A0);
2a5fe8ae 3394 } else {
1dbe15ef 3395 gen_op_mov_reg_v(s, ot, rm, s->T0);
2a5fe8ae 3396 }
2c0262af
FB
3397 }
3398 break;
3399 case 3: /* neg */
8eb8c738
EC
3400 if (s->prefix & PREFIX_LOCK) {
3401 TCGLabel *label1;
3402 TCGv a0, t0, t1, t2;
3403
3404 if (mod == 3) {
3405 goto illegal_op;
3406 }
3a5d1773
RH
3407 a0 = s->A0;
3408 t0 = s->T0;
8eb8c738
EC
3409 label1 = gen_new_label();
3410
8eb8c738
EC
3411 gen_set_label(label1);
3412 t1 = tcg_temp_new();
3413 t2 = tcg_temp_new();
3414 tcg_gen_mov_tl(t2, t0);
3415 tcg_gen_neg_tl(t1, t0);
3416 tcg_gen_atomic_cmpxchg_tl(t0, a0, t0, t1,
3417 s->mem_index, ot | MO_LE);
8eb8c738
EC
3418 tcg_gen_brcond_tl(TCG_COND_NE, t0, t2, label1);
3419
12153175 3420 tcg_gen_neg_tl(s->T0, t0);
2c0262af 3421 } else {
c66f9727 3422 tcg_gen_neg_tl(s->T0, s->T0);
8eb8c738 3423 if (mod != 3) {
c66f9727 3424 gen_op_st_v(s, ot, s->T0, s->A0);
8eb8c738 3425 } else {
1dbe15ef 3426 gen_op_mov_reg_v(s, ot, rm, s->T0);
8eb8c738 3427 }
2c0262af 3428 }
93a3e108 3429 gen_op_update_neg_cc(s);
3ca51d07 3430 set_cc_op(s, CC_OP_SUBB + ot);
2c0262af
FB
3431 break;
3432 case 4: /* mul */
3433 switch(ot) {
4ba9938c 3434 case MO_8:
1dbe15ef 3435 gen_op_mov_v_reg(s, MO_8, s->T1, R_EAX);
c66f9727 3436 tcg_gen_ext8u_tl(s->T0, s->T0);
b48597b0 3437 tcg_gen_ext8u_tl(s->T1, s->T1);
0211e5af 3438 /* XXX: use 32 bit mul which could be faster */
b48597b0 3439 tcg_gen_mul_tl(s->T0, s->T0, s->T1);
1dbe15ef 3440 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
c66f9727
EC
3441 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
3442 tcg_gen_andi_tl(cpu_cc_src, s->T0, 0xff00);
3ca51d07 3443 set_cc_op(s, CC_OP_MULB);
2c0262af 3444 break;
4ba9938c 3445 case MO_16:
1dbe15ef 3446 gen_op_mov_v_reg(s, MO_16, s->T1, R_EAX);
c66f9727 3447 tcg_gen_ext16u_tl(s->T0, s->T0);
b48597b0 3448 tcg_gen_ext16u_tl(s->T1, s->T1);
0211e5af 3449 /* XXX: use 32 bit mul which could be faster */
b48597b0 3450 tcg_gen_mul_tl(s->T0, s->T0, s->T1);
1dbe15ef 3451 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
c66f9727
EC
3452 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
3453 tcg_gen_shri_tl(s->T0, s->T0, 16);
1dbe15ef 3454 gen_op_mov_reg_v(s, MO_16, R_EDX, s->T0);
c66f9727 3455 tcg_gen_mov_tl(cpu_cc_src, s->T0);
3ca51d07 3456 set_cc_op(s, CC_OP_MULW);
2c0262af
FB
3457 break;
3458 default:
4ba9938c 3459 case MO_32:
6bd48f6f 3460 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
4f82446d
EC
3461 tcg_gen_trunc_tl_i32(s->tmp3_i32, cpu_regs[R_EAX]);
3462 tcg_gen_mulu2_i32(s->tmp2_i32, s->tmp3_i32,
3463 s->tmp2_i32, s->tmp3_i32);
6bd48f6f 3464 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], s->tmp2_i32);
4f82446d 3465 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], s->tmp3_i32);
a4bcea3d
RH
3466 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
3467 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
3ca51d07 3468 set_cc_op(s, CC_OP_MULL);
2c0262af 3469 break;
14ce26e7 3470#ifdef TARGET_X86_64
4ba9938c 3471 case MO_64:
a4bcea3d 3472 tcg_gen_mulu2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
c66f9727 3473 s->T0, cpu_regs[R_EAX]);
a4bcea3d
RH
3474 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
3475 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
3ca51d07 3476 set_cc_op(s, CC_OP_MULQ);
14ce26e7
FB
3477 break;
3478#endif
2c0262af 3479 }
2c0262af
FB
3480 break;
3481 case 5: /* imul */
3482 switch(ot) {
4ba9938c 3483 case MO_8:
1dbe15ef 3484 gen_op_mov_v_reg(s, MO_8, s->T1, R_EAX);
c66f9727 3485 tcg_gen_ext8s_tl(s->T0, s->T0);
b48597b0 3486 tcg_gen_ext8s_tl(s->T1, s->T1);
0211e5af 3487 /* XXX: use 32 bit mul which could be faster */
b48597b0 3488 tcg_gen_mul_tl(s->T0, s->T0, s->T1);
1dbe15ef 3489 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
c66f9727 3490 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
fbd80f02
EC
3491 tcg_gen_ext8s_tl(s->tmp0, s->T0);
3492 tcg_gen_sub_tl(cpu_cc_src, s->T0, s->tmp0);
3ca51d07 3493 set_cc_op(s, CC_OP_MULB);
2c0262af 3494 break;
4ba9938c 3495 case MO_16:
1dbe15ef 3496 gen_op_mov_v_reg(s, MO_16, s->T1, R_EAX);
c66f9727 3497 tcg_gen_ext16s_tl(s->T0, s->T0);
b48597b0 3498 tcg_gen_ext16s_tl(s->T1, s->T1);
0211e5af 3499 /* XXX: use 32 bit mul which could be faster */
b48597b0 3500 tcg_gen_mul_tl(s->T0, s->T0, s->T1);
1dbe15ef 3501 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
c66f9727 3502 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
fbd80f02
EC
3503 tcg_gen_ext16s_tl(s->tmp0, s->T0);
3504 tcg_gen_sub_tl(cpu_cc_src, s->T0, s->tmp0);
c66f9727 3505 tcg_gen_shri_tl(s->T0, s->T0, 16);
1dbe15ef 3506 gen_op_mov_reg_v(s, MO_16, R_EDX, s->T0);
3ca51d07 3507 set_cc_op(s, CC_OP_MULW);
2c0262af
FB
3508 break;
3509 default:
4ba9938c 3510 case MO_32:
6bd48f6f 3511 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
4f82446d
EC
3512 tcg_gen_trunc_tl_i32(s->tmp3_i32, cpu_regs[R_EAX]);
3513 tcg_gen_muls2_i32(s->tmp2_i32, s->tmp3_i32,
3514 s->tmp2_i32, s->tmp3_i32);
6bd48f6f 3515 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], s->tmp2_i32);
4f82446d 3516 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], s->tmp3_i32);
6bd48f6f 3517 tcg_gen_sari_i32(s->tmp2_i32, s->tmp2_i32, 31);
a4bcea3d 3518 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4f82446d 3519 tcg_gen_sub_i32(s->tmp2_i32, s->tmp2_i32, s->tmp3_i32);
6bd48f6f 3520 tcg_gen_extu_i32_tl(cpu_cc_src, s->tmp2_i32);
3ca51d07 3521 set_cc_op(s, CC_OP_MULL);
2c0262af 3522 break;
14ce26e7 3523#ifdef TARGET_X86_64
4ba9938c 3524 case MO_64:
a4bcea3d 3525 tcg_gen_muls2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
c66f9727 3526 s->T0, cpu_regs[R_EAX]);
a4bcea3d
RH
3527 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
3528 tcg_gen_sari_tl(cpu_cc_src, cpu_regs[R_EAX], 63);
3529 tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_regs[R_EDX]);
3ca51d07 3530 set_cc_op(s, CC_OP_MULQ);
14ce26e7
FB
3531 break;
3532#endif
2c0262af 3533 }
2c0262af
FB
3534 break;
3535 case 6: /* div */
3536 switch(ot) {
4ba9938c 3537 case MO_8:
ad75a51e 3538 gen_helper_divb_AL(tcg_env, s->T0);
2c0262af 3539 break;
4ba9938c 3540 case MO_16:
ad75a51e 3541 gen_helper_divw_AX(tcg_env, s->T0);
2c0262af
FB
3542 break;
3543 default:
4ba9938c 3544 case MO_32:
ad75a51e 3545 gen_helper_divl_EAX(tcg_env, s->T0);
14ce26e7
FB
3546 break;
3547#ifdef TARGET_X86_64
4ba9938c 3548 case MO_64:
ad75a51e 3549 gen_helper_divq_EAX(tcg_env, s->T0);
2c0262af 3550 break;
14ce26e7 3551#endif
2c0262af
FB
3552 }
3553 break;
3554 case 7: /* idiv */
3555 switch(ot) {
4ba9938c 3556 case MO_8:
ad75a51e 3557 gen_helper_idivb_AL(tcg_env, s->T0);
2c0262af 3558 break;
4ba9938c 3559 case MO_16:
ad75a51e 3560 gen_helper_idivw_AX(tcg_env, s->T0);
2c0262af
FB
3561 break;
3562 default:
4ba9938c 3563 case MO_32:
ad75a51e 3564 gen_helper_idivl_EAX(tcg_env, s->T0);
14ce26e7
FB
3565 break;
3566#ifdef TARGET_X86_64
4ba9938c 3567 case MO_64:
ad75a51e 3568 gen_helper_idivq_EAX(tcg_env, s->T0);
2c0262af 3569 break;
14ce26e7 3570#endif
2c0262af
FB
3571 }
3572 break;
3573 default:
b9f9c5b4 3574 goto unknown_op;
2c0262af
FB
3575 }
3576 break;
3577
3578 case 0xfe: /* GRP4 */
3579 case 0xff: /* GRP5 */
ab4e4aec 3580 ot = mo_b_d(b, dflag);
2c0262af 3581
e3af7c78 3582 modrm = x86_ldub_code(env, s);
2c0262af 3583 mod = (modrm >> 6) & 3;
14ce26e7 3584 rm = (modrm & 7) | REX_B(s);
2c0262af
FB
3585 op = (modrm >> 3) & 7;
3586 if (op >= 2 && b == 0xfe) {
b9f9c5b4 3587 goto unknown_op;
2c0262af 3588 }
14ce26e7 3589 if (CODE64(s)) {
aba9d61e 3590 if (op == 2 || op == 4) {
14ce26e7 3591 /* operand size for jumps is 64 bit */
4ba9938c 3592 ot = MO_64;
aba9d61e 3593 } else if (op == 3 || op == 5) {
8ab1e486 3594 ot = dflag != MO_16 ? MO_32 + REX_W(s) : MO_16;
14ce26e7
FB
3595 } else if (op == 6) {
3596 /* default push size is 64 bit */
ab4e4aec 3597 ot = mo_pushpop(s, dflag);
14ce26e7
FB
3598 }
3599 }
2c0262af 3600 if (mod != 3) {
4eeb3939 3601 gen_lea_modrm(env, s, modrm);
2c0262af 3602 if (op >= 2 && op != 3 && op != 5)
c66f9727 3603 gen_op_ld_v(s, ot, s->T0, s->A0);
2c0262af 3604 } else {
1dbe15ef 3605 gen_op_mov_v_reg(s, ot, s->T0, rm);
2c0262af
FB
3606 }
3607
3608 switch(op) {
3609 case 0: /* inc Ev */
3610 if (mod != 3)
3611 opreg = OR_TMP0;
3612 else
3613 opreg = rm;
3614 gen_inc(s, ot, opreg, 1);
3615 break;
3616 case 1: /* dec Ev */
3617 if (mod != 3)
3618 opreg = OR_TMP0;
3619 else
3620 opreg = rm;
3621 gen_inc(s, ot, opreg, -1);
3622 break;
3623 case 2: /* call Ev */
4f31916f 3624 /* XXX: optimize if memory (no 'and' is necessary) */
ab4e4aec 3625 if (dflag == MO_16) {
c66f9727 3626 tcg_gen_ext16u_tl(s->T0, s->T0);
40b90233 3627 }
9e599bf7 3628 gen_push_v(s, eip_next_tl(s));
e3a79e0e 3629 gen_op_jmp_v(s, s->T0);
7d117ce8 3630 gen_bnd_jmp(s);
faf9ea5f 3631 s->base.is_jmp = DISAS_JUMP;
2c0262af 3632 break;
61382a50 3633 case 3: /* lcall Ev */
10b8eb94
RH
3634 if (mod == 3) {
3635 goto illegal_op;
3636 }
b48597b0 3637 gen_op_ld_v(s, ot, s->T1, s->A0);
830a19a4 3638 gen_add_A0_im(s, 1 << ot);
c66f9727 3639 gen_op_ld_v(s, MO_16, s->T0, s->A0);
2c0262af 3640 do_lcall:
f8a35846 3641 if (PE(s) && !VM86(s)) {
6bd48f6f 3642 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
ad75a51e 3643 gen_helper_lcall_protected(tcg_env, s->tmp2_i32, s->T1,
9e599bf7
RH
3644 tcg_constant_i32(dflag - 1),
3645 eip_next_tl(s));
2c0262af 3646 } else {
6bd48f6f 3647 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
8c03ab9f 3648 tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
ad75a51e 3649 gen_helper_lcall_real(tcg_env, s->tmp2_i32, s->tmp3_i32,
9e599bf7
RH
3650 tcg_constant_i32(dflag - 1),
3651 eip_next_i32(s));
2c0262af 3652 }
faf9ea5f 3653 s->base.is_jmp = DISAS_JUMP;
2c0262af
FB
3654 break;
3655 case 4: /* jmp Ev */
ab4e4aec 3656 if (dflag == MO_16) {
c66f9727 3657 tcg_gen_ext16u_tl(s->T0, s->T0);
40b90233 3658 }
e3a79e0e 3659 gen_op_jmp_v(s, s->T0);
7d117ce8 3660 gen_bnd_jmp(s);
faf9ea5f 3661 s->base.is_jmp = DISAS_JUMP;
2c0262af
FB
3662 break;
3663 case 5: /* ljmp Ev */
10b8eb94
RH
3664 if (mod == 3) {
3665 goto illegal_op;
3666 }
b48597b0 3667 gen_op_ld_v(s, ot, s->T1, s->A0);
830a19a4 3668 gen_add_A0_im(s, 1 << ot);
c66f9727 3669 gen_op_ld_v(s, MO_16, s->T0, s->A0);
2c0262af 3670 do_ljmp:
f8a35846 3671 if (PE(s) && !VM86(s)) {
6bd48f6f 3672 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
ad75a51e 3673 gen_helper_ljmp_protected(tcg_env, s->tmp2_i32, s->T1,
9e599bf7 3674 eip_next_tl(s));
2c0262af 3675 } else {
c66f9727 3676 gen_op_movl_seg_T0_vm(s, R_CS);
e3a79e0e 3677 gen_op_jmp_v(s, s->T1);
2c0262af 3678 }
faf9ea5f 3679 s->base.is_jmp = DISAS_JUMP;
2c0262af
FB
3680 break;
3681 case 6: /* push Ev */
c66f9727 3682 gen_push_v(s, s->T0);
2c0262af
FB
3683 break;
3684 default:
b9f9c5b4 3685 goto unknown_op;
2c0262af
FB
3686 }
3687 break;
3688
3689 case 0x84: /* test Ev, Gv */
5fafdf24 3690 case 0x85:
ab4e4aec 3691 ot = mo_b_d(b, dflag);
2c0262af 3692
e3af7c78 3693 modrm = x86_ldub_code(env, s);
bbdb4237 3694 reg = ((modrm >> 3) & 7) | REX_R(s);
3b46e624 3695
0af10c86 3696 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
1dbe15ef 3697 gen_op_mov_v_reg(s, ot, s->T1, reg);
c66f9727 3698 gen_op_testl_T0_T1_cc(s);
3ca51d07 3699 set_cc_op(s, CC_OP_LOGICB + ot);
2c0262af 3700 break;
3b46e624 3701
2c0262af
FB
3702 case 0xa8: /* test eAX, Iv */
3703 case 0xa9:
ab4e4aec 3704 ot = mo_b_d(b, dflag);
0af10c86 3705 val = insn_get(env, s, ot);
2c0262af 3706
1dbe15ef 3707 gen_op_mov_v_reg(s, ot, s->T0, OR_EAX);
b48597b0 3708 tcg_gen_movi_tl(s->T1, val);
c66f9727 3709 gen_op_testl_T0_T1_cc(s);
3ca51d07 3710 set_cc_op(s, CC_OP_LOGICB + ot);
2c0262af 3711 break;
3b46e624 3712
2c0262af 3713 case 0x98: /* CWDE/CBW */
ab4e4aec 3714 switch (dflag) {
14ce26e7 3715#ifdef TARGET_X86_64
ab4e4aec 3716 case MO_64:
1dbe15ef 3717 gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX);
c66f9727 3718 tcg_gen_ext32s_tl(s->T0, s->T0);
1dbe15ef 3719 gen_op_mov_reg_v(s, MO_64, R_EAX, s->T0);
ab4e4aec 3720 break;
14ce26e7 3721#endif
ab4e4aec 3722 case MO_32:
1dbe15ef 3723 gen_op_mov_v_reg(s, MO_16, s->T0, R_EAX);
c66f9727 3724 tcg_gen_ext16s_tl(s->T0, s->T0);
1dbe15ef 3725 gen_op_mov_reg_v(s, MO_32, R_EAX, s->T0);
ab4e4aec
RH
3726 break;
3727 case MO_16:
1dbe15ef 3728 gen_op_mov_v_reg(s, MO_8, s->T0, R_EAX);
c66f9727 3729 tcg_gen_ext8s_tl(s->T0, s->T0);
1dbe15ef 3730 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
ab4e4aec
RH
3731 break;
3732 default:
732e89f4 3733 g_assert_not_reached();
e108dd01 3734 }
2c0262af
FB
3735 break;
3736 case 0x99: /* CDQ/CWD */
ab4e4aec 3737 switch (dflag) {
14ce26e7 3738#ifdef TARGET_X86_64
ab4e4aec 3739 case MO_64:
1dbe15ef 3740 gen_op_mov_v_reg(s, MO_64, s->T0, R_EAX);
c66f9727 3741 tcg_gen_sari_tl(s->T0, s->T0, 63);
1dbe15ef 3742 gen_op_mov_reg_v(s, MO_64, R_EDX, s->T0);
ab4e4aec 3743 break;
14ce26e7 3744#endif
ab4e4aec 3745 case MO_32:
1dbe15ef 3746 gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX);
c66f9727
EC
3747 tcg_gen_ext32s_tl(s->T0, s->T0);
3748 tcg_gen_sari_tl(s->T0, s->T0, 31);
1dbe15ef 3749 gen_op_mov_reg_v(s, MO_32, R_EDX, s->T0);
ab4e4aec
RH
3750 break;
3751 case MO_16:
1dbe15ef 3752 gen_op_mov_v_reg(s, MO_16, s->T0, R_EAX);
c66f9727
EC
3753 tcg_gen_ext16s_tl(s->T0, s->T0);
3754 tcg_gen_sari_tl(s->T0, s->T0, 15);
1dbe15ef 3755 gen_op_mov_reg_v(s, MO_16, R_EDX, s->T0);
ab4e4aec
RH
3756 break;
3757 default:
732e89f4 3758 g_assert_not_reached();
e108dd01 3759 }
2c0262af
FB
3760 break;
3761 case 0x1af: /* imul Gv, Ev */
3762 case 0x69: /* imul Gv, Ev, I */
3763 case 0x6b:
ab4e4aec 3764 ot = dflag;
e3af7c78 3765 modrm = x86_ldub_code(env, s);
bbdb4237 3766 reg = ((modrm >> 3) & 7) | REX_R(s);
14ce26e7
FB
3767 if (b == 0x69)
3768 s->rip_offset = insn_const_size(ot);
3769 else if (b == 0x6b)
3770 s->rip_offset = 1;
0af10c86 3771 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
2c0262af 3772 if (b == 0x69) {
0af10c86 3773 val = insn_get(env, s, ot);
b48597b0 3774 tcg_gen_movi_tl(s->T1, val);
2c0262af 3775 } else if (b == 0x6b) {
4ba9938c 3776 val = (int8_t)insn_get(env, s, MO_8);
b48597b0 3777 tcg_gen_movi_tl(s->T1, val);
2c0262af 3778 } else {
1dbe15ef 3779 gen_op_mov_v_reg(s, ot, s->T1, reg);
2c0262af 3780 }
a4bcea3d 3781 switch (ot) {
0211e5af 3782#ifdef TARGET_X86_64
4ba9938c 3783 case MO_64:
b48597b0 3784 tcg_gen_muls2_i64(cpu_regs[reg], s->T1, s->T0, s->T1);
a4bcea3d
RH
3785 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
3786 tcg_gen_sari_tl(cpu_cc_src, cpu_cc_dst, 63);
b48597b0 3787 tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, s->T1);
a4bcea3d 3788 break;
0211e5af 3789#endif
4ba9938c 3790 case MO_32:
6bd48f6f 3791 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
4f82446d
EC
3792 tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
3793 tcg_gen_muls2_i32(s->tmp2_i32, s->tmp3_i32,
3794 s->tmp2_i32, s->tmp3_i32);
6bd48f6f
EC
3795 tcg_gen_extu_i32_tl(cpu_regs[reg], s->tmp2_i32);
3796 tcg_gen_sari_i32(s->tmp2_i32, s->tmp2_i32, 31);
a4bcea3d 3797 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
4f82446d 3798 tcg_gen_sub_i32(s->tmp2_i32, s->tmp2_i32, s->tmp3_i32);
6bd48f6f 3799 tcg_gen_extu_i32_tl(cpu_cc_src, s->tmp2_i32);
a4bcea3d
RH
3800 break;
3801 default:
c66f9727 3802 tcg_gen_ext16s_tl(s->T0, s->T0);
b48597b0 3803 tcg_gen_ext16s_tl(s->T1, s->T1);
0211e5af 3804 /* XXX: use 32 bit mul which could be faster */
b48597b0 3805 tcg_gen_mul_tl(s->T0, s->T0, s->T1);
c66f9727 3806 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
fbd80f02
EC
3807 tcg_gen_ext16s_tl(s->tmp0, s->T0);
3808 tcg_gen_sub_tl(cpu_cc_src, s->T0, s->tmp0);
1dbe15ef 3809 gen_op_mov_reg_v(s, ot, reg, s->T0);
a4bcea3d 3810 break;
2c0262af 3811 }
3ca51d07 3812 set_cc_op(s, CC_OP_MULB + ot);
2c0262af
FB
3813 break;
3814 case 0x1c0:
3815 case 0x1c1: /* xadd Ev, Gv */
ab4e4aec 3816 ot = mo_b_d(b, dflag);
e3af7c78 3817 modrm = x86_ldub_code(env, s);
bbdb4237 3818 reg = ((modrm >> 3) & 7) | REX_R(s);
2c0262af 3819 mod = (modrm >> 6) & 3;
1dbe15ef 3820 gen_op_mov_v_reg(s, ot, s->T0, reg);
2c0262af 3821 if (mod == 3) {
14ce26e7 3822 rm = (modrm & 7) | REX_B(s);
1dbe15ef 3823 gen_op_mov_v_reg(s, ot, s->T1, rm);
b48597b0 3824 tcg_gen_add_tl(s->T0, s->T0, s->T1);
1dbe15ef
EC
3825 gen_op_mov_reg_v(s, ot, reg, s->T1);
3826 gen_op_mov_reg_v(s, ot, rm, s->T0);
2c0262af 3827 } else {
4eeb3939 3828 gen_lea_modrm(env, s, modrm);
f53b0181 3829 if (s->prefix & PREFIX_LOCK) {
b48597b0 3830 tcg_gen_atomic_fetch_add_tl(s->T1, s->A0, s->T0,
f53b0181 3831 s->mem_index, ot | MO_LE);
b48597b0 3832 tcg_gen_add_tl(s->T0, s->T0, s->T1);
f53b0181 3833 } else {
b48597b0
EC
3834 gen_op_ld_v(s, ot, s->T1, s->A0);
3835 tcg_gen_add_tl(s->T0, s->T0, s->T1);
c66f9727 3836 gen_op_st_v(s, ot, s->T0, s->A0);
f53b0181 3837 }
1dbe15ef 3838 gen_op_mov_reg_v(s, ot, reg, s->T1);
2c0262af 3839 }
c66f9727 3840 gen_op_update2_cc(s);
3ca51d07 3841 set_cc_op(s, CC_OP_ADDB + ot);
2c0262af
FB
3842 break;
3843 case 0x1b0:
3844 case 0x1b1: /* cmpxchg Ev, Gv */
cad3a37d 3845 {
d1bb978b 3846 TCGv oldv, newv, cmpv, dest;
cad3a37d 3847
ab4e4aec 3848 ot = mo_b_d(b, dflag);
e3af7c78 3849 modrm = x86_ldub_code(env, s);
bbdb4237 3850 reg = ((modrm >> 3) & 7) | REX_R(s);
cad3a37d 3851 mod = (modrm >> 6) & 3;
ae03f8de
EC
3852 oldv = tcg_temp_new();
3853 newv = tcg_temp_new();
3854 cmpv = tcg_temp_new();
1dbe15ef 3855 gen_op_mov_v_reg(s, ot, newv, reg);
ae03f8de 3856 tcg_gen_mov_tl(cmpv, cpu_regs[R_EAX]);
d1bb978b 3857 gen_extu(ot, cmpv);
ae03f8de
EC
3858 if (s->prefix & PREFIX_LOCK) {
3859 if (mod == 3) {
3860 goto illegal_op;
3861 }
4eeb3939 3862 gen_lea_modrm(env, s, modrm);
6b672b5d 3863 tcg_gen_atomic_cmpxchg_tl(oldv, s->A0, cmpv, newv,
ae03f8de 3864 s->mem_index, ot | MO_LE);
cad3a37d 3865 } else {
ae03f8de
EC
3866 if (mod == 3) {
3867 rm = (modrm & 7) | REX_B(s);
1dbe15ef 3868 gen_op_mov_v_reg(s, ot, oldv, rm);
d1bb978b
PB
3869 gen_extu(ot, oldv);
3870
3871 /*
3872 * Unlike the memory case, where "the destination operand receives
3873 * a write cycle without regard to the result of the comparison",
3874 * rm must not be touched altogether if the write fails, including
3875 * not zero-extending it on 64-bit processors. So, precompute
3876 * the result of a successful writeback and perform the movcond
3877 * directly on cpu_regs. Also need to write accumulator first, in
3878 * case rm is part of RAX too.
3879 */
3880 dest = gen_op_deposit_reg_v(s, ot, rm, newv, newv);
3881 tcg_gen_movcond_tl(TCG_COND_EQ, dest, oldv, cmpv, newv, dest);
ae03f8de
EC
3882 } else {
3883 gen_lea_modrm(env, s, modrm);
6b672b5d 3884 gen_op_ld_v(s, ot, oldv, s->A0);
d1bb978b
PB
3885
3886 /*
3887 * Perform an unconditional store cycle like physical cpu;
3888 * must be before changing accumulator to ensure
3889 * idempotency if the store faults and the instruction
3890 * is restarted
3891 */
3892 tcg_gen_movcond_tl(TCG_COND_EQ, newv, oldv, cmpv, newv, oldv);
6b672b5d 3893 gen_op_st_v(s, ot, newv, s->A0);
ae03f8de 3894 }
cad3a37d 3895 }
d1bb978b
PB
3896 /*
3897 * Write EAX only if the cmpxchg fails; reuse newv as the destination,
3898 * since it's dead here.
3899 */
3900 dest = gen_op_deposit_reg_v(s, ot, R_EAX, newv, oldv);
3901 tcg_gen_movcond_tl(TCG_COND_EQ, dest, oldv, cmpv, dest, newv);
ae03f8de 3902 tcg_gen_mov_tl(cpu_cc_src, oldv);
93a3e108 3903 tcg_gen_mov_tl(s->cc_srcT, cmpv);
ae03f8de 3904 tcg_gen_sub_tl(cpu_cc_dst, cmpv, oldv);
3ca51d07 3905 set_cc_op(s, CC_OP_SUBB + ot);
2c0262af 3906 }
2c0262af
FB
3907 break;
3908 case 0x1c7: /* cmpxchg8b */
e3af7c78 3909 modrm = x86_ldub_code(env, s);
2c0262af 3910 mod = (modrm >> 6) & 3;
369fd5ca
RH
3911 switch ((modrm >> 3) & 7) {
3912 case 1: /* CMPXCHG8, CMPXCHG16 */
3913 if (mod == 3) {
1b9d9ebb 3914 goto illegal_op;
ae03f8de 3915 }
369fd5ca
RH
3916#ifdef TARGET_X86_64
3917 if (dflag == MO_64) {
3918 if (!(s->cpuid_ext_features & CPUID_EXT_CX16)) {
3919 goto illegal_op;
3920 }
6218c177 3921 gen_cmpxchg16b(s, env, modrm);
369fd5ca
RH
3922 break;
3923 }
6218c177 3924#endif
369fd5ca 3925 if (!(s->cpuid_features & CPUID_CX8)) {
1b9d9ebb 3926 goto illegal_op;
369fd5ca 3927 }
6218c177 3928 gen_cmpxchg8b(s, env, modrm);
369fd5ca
RH
3929 break;
3930
6750485b 3931 case 7: /* RDSEED, RDPID with f3 prefix */
f9e0dbae 3932 if (mod != 3 ||
6750485b 3933 (s->prefix & (PREFIX_LOCK | PREFIX_REPNZ))) {
f9e0dbae
PB
3934 goto illegal_op;
3935 }
6750485b
PB
3936 if (s->prefix & PREFIX_REPZ) {
3937 if (!(s->cpuid_ext_features & CPUID_7_0_ECX_RDPID)) {
3938 goto illegal_op;
3939 }
ad75a51e 3940 gen_helper_rdpid(s->T0, tcg_env);
6750485b
PB
3941 rm = (modrm & 7) | REX_B(s);
3942 gen_op_mov_reg_v(s, dflag, rm, s->T0);
3943 break;
3944 } else {
3945 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_RDSEED)) {
3946 goto illegal_op;
3947 }
3948 goto do_rdrand;
3949 }
f9e0dbae 3950
369fd5ca
RH
3951 case 6: /* RDRAND */
3952 if (mod != 3 ||
3953 (s->prefix & (PREFIX_LOCK | PREFIX_REPZ | PREFIX_REPNZ)) ||
3954 !(s->cpuid_ext_features & CPUID_EXT_RDRAND)) {
3955 goto illegal_op;
3956 }
f9e0dbae 3957 do_rdrand:
dfd1b812 3958 translator_io_start(&s->base);
ad75a51e 3959 gen_helper_rdrand(s->T0, tcg_env);
369fd5ca
RH
3960 rm = (modrm & 7) | REX_B(s);
3961 gen_op_mov_reg_v(s, dflag, rm, s->T0);
3962 set_cc_op(s, CC_OP_EFLAGS);
369fd5ca
RH
3963 break;
3964
3965 default:
3966 goto illegal_op;
1b9d9ebb 3967 }
2c0262af 3968 break;
3b46e624 3969
2c0262af
FB
3970 /**************************/
3971 /* push/pop */
3972 case 0x50 ... 0x57: /* push */
1dbe15ef 3973 gen_op_mov_v_reg(s, MO_32, s->T0, (b & 7) | REX_B(s));
c66f9727 3974 gen_push_v(s, s->T0);
2c0262af
FB
3975 break;
3976 case 0x58 ... 0x5f: /* pop */
8e31d234 3977 ot = gen_pop_T0(s);
77729c24 3978 /* NOTE: order is important for pop %sp */
8e31d234 3979 gen_pop_update(s, ot);
1dbe15ef 3980 gen_op_mov_reg_v(s, ot, (b & 7) | REX_B(s), s->T0);
2c0262af
FB
3981 break;
3982 case 0x60: /* pusha */
14ce26e7
FB
3983 if (CODE64(s))
3984 goto illegal_op;
2c0262af
FB
3985 gen_pusha(s);
3986 break;
3987 case 0x61: /* popa */
14ce26e7
FB
3988 if (CODE64(s))
3989 goto illegal_op;
2c0262af
FB
3990 gen_popa(s);
3991 break;
3992 case 0x68: /* push Iv */
3993 case 0x6a:
ab4e4aec 3994 ot = mo_pushpop(s, dflag);
2c0262af 3995 if (b == 0x68)
0af10c86 3996 val = insn_get(env, s, ot);
2c0262af 3997 else
4ba9938c 3998 val = (int8_t)insn_get(env, s, MO_8);
c66f9727
EC
3999 tcg_gen_movi_tl(s->T0, val);
4000 gen_push_v(s, s->T0);
2c0262af
FB
4001 break;
4002 case 0x8f: /* pop Ev */
e3af7c78 4003 modrm = x86_ldub_code(env, s);
77729c24 4004 mod = (modrm >> 6) & 3;
8e31d234 4005 ot = gen_pop_T0(s);
77729c24
FB
4006 if (mod == 3) {
4007 /* NOTE: order is important for pop %sp */
8e31d234 4008 gen_pop_update(s, ot);
14ce26e7 4009 rm = (modrm & 7) | REX_B(s);
1dbe15ef 4010 gen_op_mov_reg_v(s, ot, rm, s->T0);
77729c24
FB
4011 } else {
4012 /* NOTE: order is important too for MMU exceptions */
14ce26e7 4013 s->popl_esp_hack = 1 << ot;
0af10c86 4014 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
77729c24 4015 s->popl_esp_hack = 0;
8e31d234 4016 gen_pop_update(s, ot);
77729c24 4017 }
2c0262af
FB
4018 break;
4019 case 0xc8: /* enter */
4020 {
4021 int level;
e3af7c78
PB
4022 val = x86_lduw_code(env, s);
4023 level = x86_ldub_code(env, s);
2c0262af
FB
4024 gen_enter(s, val, level);
4025 }
4026 break;
4027 case 0xc9: /* leave */
2045f04c 4028 gen_leave(s);
2c0262af
FB
4029 break;
4030 case 0x06: /* push es */
4031 case 0x0e: /* push cs */
4032 case 0x16: /* push ss */
4033 case 0x1e: /* push ds */
14ce26e7
FB
4034 if (CODE64(s))
4035 goto illegal_op;
c66f9727
EC
4036 gen_op_movl_T0_seg(s, b >> 3);
4037 gen_push_v(s, s->T0);
2c0262af
FB
4038 break;
4039 case 0x1a0: /* push fs */
4040 case 0x1a8: /* push gs */
c66f9727
EC
4041 gen_op_movl_T0_seg(s, (b >> 3) & 7);
4042 gen_push_v(s, s->T0);
2c0262af
FB
4043 break;
4044 case 0x07: /* pop es */
4045 case 0x17: /* pop ss */
4046 case 0x1f: /* pop ds */
14ce26e7
FB
4047 if (CODE64(s))
4048 goto illegal_op;
2c0262af 4049 reg = b >> 3;
8e31d234 4050 ot = gen_pop_T0(s);
100ec099 4051 gen_movl_seg_T0(s, reg);
8e31d234 4052 gen_pop_update(s, ot);
2c0262af
FB
4053 break;
4054 case 0x1a1: /* pop fs */
4055 case 0x1a9: /* pop gs */
8e31d234 4056 ot = gen_pop_T0(s);
100ec099 4057 gen_movl_seg_T0(s, (b >> 3) & 7);
8e31d234 4058 gen_pop_update(s, ot);
2c0262af
FB
4059 break;
4060
4061 /**************************/
4062 /* mov */
4063 case 0x88:
4064 case 0x89: /* mov Gv, Ev */
ab4e4aec 4065 ot = mo_b_d(b, dflag);
e3af7c78 4066 modrm = x86_ldub_code(env, s);
bbdb4237 4067 reg = ((modrm >> 3) & 7) | REX_R(s);
3b46e624 4068
2c0262af 4069 /* generate a generic store */
0af10c86 4070 gen_ldst_modrm(env, s, modrm, ot, reg, 1);
2c0262af
FB
4071 break;
4072 case 0xc6:
4073 case 0xc7: /* mov Ev, Iv */
ab4e4aec 4074 ot = mo_b_d(b, dflag);
e3af7c78 4075 modrm = x86_ldub_code(env, s);
2c0262af 4076 mod = (modrm >> 6) & 3;
14ce26e7
FB
4077 if (mod != 3) {
4078 s->rip_offset = insn_const_size(ot);
4eeb3939 4079 gen_lea_modrm(env, s, modrm);
14ce26e7 4080 }
0af10c86 4081 val = insn_get(env, s, ot);
c66f9727 4082 tcg_gen_movi_tl(s->T0, val);
fd8ca9f6 4083 if (mod != 3) {
c66f9727 4084 gen_op_st_v(s, ot, s->T0, s->A0);
fd8ca9f6 4085 } else {
1dbe15ef 4086 gen_op_mov_reg_v(s, ot, (modrm & 7) | REX_B(s), s->T0);
fd8ca9f6 4087 }
2c0262af
FB
4088 break;
4089 case 0x8a:
4090 case 0x8b: /* mov Ev, Gv */
ab4e4aec 4091 ot = mo_b_d(b, dflag);
e3af7c78 4092 modrm = x86_ldub_code(env, s);
bbdb4237 4093 reg = ((modrm >> 3) & 7) | REX_R(s);
3b46e624 4094
0af10c86 4095 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
1dbe15ef 4096 gen_op_mov_reg_v(s, ot, reg, s->T0);
2c0262af
FB
4097 break;
4098 case 0x8e: /* mov seg, Gv */
e3af7c78 4099 modrm = x86_ldub_code(env, s);
2c0262af
FB
4100 reg = (modrm >> 3) & 7;
4101 if (reg >= 6 || reg == R_CS)
4102 goto illegal_op;
4ba9938c 4103 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
100ec099 4104 gen_movl_seg_T0(s, reg);
2c0262af
FB
4105 break;
4106 case 0x8c: /* mov Gv, seg */
e3af7c78 4107 modrm = x86_ldub_code(env, s);
2c0262af
FB
4108 reg = (modrm >> 3) & 7;
4109 mod = (modrm >> 6) & 3;
4110 if (reg >= 6)
4111 goto illegal_op;
c66f9727 4112 gen_op_movl_T0_seg(s, reg);
ab4e4aec 4113 ot = mod == 3 ? dflag : MO_16;
0af10c86 4114 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
2c0262af
FB
4115 break;
4116
4117 case 0x1b6: /* movzbS Gv, Eb */
4118 case 0x1b7: /* movzwS Gv, Eb */
4119 case 0x1be: /* movsbS Gv, Eb */
4120 case 0x1bf: /* movswS Gv, Eb */
4121 {
14776ab5
TN
4122 MemOp d_ot;
4123 MemOp s_ot;
c8fbc479 4124
2c0262af 4125 /* d_ot is the size of destination */
ab4e4aec 4126 d_ot = dflag;
2c0262af 4127 /* ot is the size of source */
4ba9938c 4128 ot = (b & 1) + MO_8;
c8fbc479
RH
4129 /* s_ot is the sign+size of source */
4130 s_ot = b & 8 ? MO_SIGN | ot : ot;
4131
e3af7c78 4132 modrm = x86_ldub_code(env, s);
bbdb4237 4133 reg = ((modrm >> 3) & 7) | REX_R(s);
2c0262af 4134 mod = (modrm >> 6) & 3;
14ce26e7 4135 rm = (modrm & 7) | REX_B(s);
3b46e624 4136
2c0262af 4137 if (mod == 3) {
1dbe15ef 4138 if (s_ot == MO_SB && byte_reg_is_xH(s, rm)) {
c66f9727 4139 tcg_gen_sextract_tl(s->T0, cpu_regs[rm - 4], 8, 8);
04fc2f1c 4140 } else {
1dbe15ef 4141 gen_op_mov_v_reg(s, ot, s->T0, rm);
04fc2f1c
RH
4142 switch (s_ot) {
4143 case MO_UB:
c66f9727 4144 tcg_gen_ext8u_tl(s->T0, s->T0);
04fc2f1c
RH
4145 break;
4146 case MO_SB:
c66f9727 4147 tcg_gen_ext8s_tl(s->T0, s->T0);
04fc2f1c
RH
4148 break;
4149 case MO_UW:
c66f9727 4150 tcg_gen_ext16u_tl(s->T0, s->T0);
04fc2f1c
RH
4151 break;
4152 default:
4153 case MO_SW:
c66f9727 4154 tcg_gen_ext16s_tl(s->T0, s->T0);
04fc2f1c
RH
4155 break;
4156 }
2c0262af 4157 }
1dbe15ef 4158 gen_op_mov_reg_v(s, d_ot, reg, s->T0);
2c0262af 4159 } else {
4eeb3939 4160 gen_lea_modrm(env, s, modrm);
c66f9727 4161 gen_op_ld_v(s, s_ot, s->T0, s->A0);
1dbe15ef 4162 gen_op_mov_reg_v(s, d_ot, reg, s->T0);
2c0262af
FB
4163 }
4164 }
4165 break;
4166
4167 case 0x8d: /* lea */
e3af7c78 4168 modrm = x86_ldub_code(env, s);
3a1d9b8b
FB
4169 mod = (modrm >> 6) & 3;
4170 if (mod == 3)
4171 goto illegal_op;
bbdb4237 4172 reg = ((modrm >> 3) & 7) | REX_R(s);
a074ce42
RH
4173 {
4174 AddressParts a = gen_lea_modrm_0(env, s, modrm);
20581aad 4175 TCGv ea = gen_lea_modrm_1(s, a, false);
620abfb0 4176 gen_lea_v_seg(s, s->aflag, ea, -1, -1);
1dbe15ef 4177 gen_op_mov_reg_v(s, dflag, reg, s->A0);
a074ce42 4178 }
2c0262af 4179 break;
3b46e624 4180
2c0262af
FB
4181 case 0xa0: /* mov EAX, Ov */
4182 case 0xa1:
4183 case 0xa2: /* mov Ov, EAX */
4184 case 0xa3:
2c0262af 4185 {
14ce26e7
FB
4186 target_ulong offset_addr;
4187
ab4e4aec 4188 ot = mo_b_d(b, dflag);
efcca7ef 4189 offset_addr = insn_get_addr(env, s, s->aflag);
6b672b5d 4190 tcg_gen_movi_tl(s->A0, offset_addr);
664e0f19 4191 gen_add_A0_ds_seg(s);
14ce26e7 4192 if ((b & 2) == 0) {
c66f9727 4193 gen_op_ld_v(s, ot, s->T0, s->A0);
1dbe15ef 4194 gen_op_mov_reg_v(s, ot, R_EAX, s->T0);
14ce26e7 4195 } else {
1dbe15ef 4196 gen_op_mov_v_reg(s, ot, s->T0, R_EAX);
c66f9727 4197 gen_op_st_v(s, ot, s->T0, s->A0);
2c0262af
FB
4198 }
4199 }
2c0262af
FB
4200 break;
4201 case 0xd7: /* xlat */
6b672b5d 4202 tcg_gen_mov_tl(s->A0, cpu_regs[R_EBX]);
c66f9727
EC
4203 tcg_gen_ext8u_tl(s->T0, cpu_regs[R_EAX]);
4204 tcg_gen_add_tl(s->A0, s->A0, s->T0);
664e0f19 4205 gen_add_A0_ds_seg(s);
c66f9727 4206 gen_op_ld_v(s, MO_8, s->T0, s->A0);
1dbe15ef 4207 gen_op_mov_reg_v(s, MO_8, R_EAX, s->T0);
2c0262af
FB
4208 break;
4209 case 0xb0 ... 0xb7: /* mov R, Ib */
4ba9938c 4210 val = insn_get(env, s, MO_8);
c66f9727 4211 tcg_gen_movi_tl(s->T0, val);
1dbe15ef 4212 gen_op_mov_reg_v(s, MO_8, (b & 7) | REX_B(s), s->T0);
2c0262af
FB
4213 break;
4214 case 0xb8 ... 0xbf: /* mov R, Iv */
14ce26e7 4215#ifdef TARGET_X86_64
ab4e4aec 4216 if (dflag == MO_64) {
14ce26e7
FB
4217 uint64_t tmp;
4218 /* 64 bit case */
e3af7c78 4219 tmp = x86_ldq_code(env, s);
14ce26e7 4220 reg = (b & 7) | REX_B(s);
c66f9727 4221 tcg_gen_movi_tl(s->T0, tmp);
1dbe15ef 4222 gen_op_mov_reg_v(s, MO_64, reg, s->T0);
5fafdf24 4223 } else
14ce26e7
FB
4224#endif
4225 {
ab4e4aec 4226 ot = dflag;
0af10c86 4227 val = insn_get(env, s, ot);
14ce26e7 4228 reg = (b & 7) | REX_B(s);
c66f9727 4229 tcg_gen_movi_tl(s->T0, val);
1dbe15ef 4230 gen_op_mov_reg_v(s, ot, reg, s->T0);
14ce26e7 4231 }
2c0262af
FB
4232 break;
4233
4234 case 0x91 ... 0x97: /* xchg R, EAX */
7418027e 4235 do_xchg_reg_eax:
ab4e4aec 4236 ot = dflag;
14ce26e7 4237 reg = (b & 7) | REX_B(s);
2c0262af
FB
4238 rm = R_EAX;
4239 goto do_xchg_reg;
4240 case 0x86:
4241 case 0x87: /* xchg Ev, Gv */
ab4e4aec 4242 ot = mo_b_d(b, dflag);
e3af7c78 4243 modrm = x86_ldub_code(env, s);
bbdb4237 4244 reg = ((modrm >> 3) & 7) | REX_R(s);
2c0262af
FB
4245 mod = (modrm >> 6) & 3;
4246 if (mod == 3) {
14ce26e7 4247 rm = (modrm & 7) | REX_B(s);
2c0262af 4248 do_xchg_reg:
1dbe15ef
EC
4249 gen_op_mov_v_reg(s, ot, s->T0, reg);
4250 gen_op_mov_v_reg(s, ot, s->T1, rm);
4251 gen_op_mov_reg_v(s, ot, rm, s->T0);
4252 gen_op_mov_reg_v(s, ot, reg, s->T1);
2c0262af 4253 } else {
4eeb3939 4254 gen_lea_modrm(env, s, modrm);
1dbe15ef 4255 gen_op_mov_v_reg(s, ot, s->T0, reg);
2c0262af 4256 /* for xchg, lock is implicit */
b48597b0 4257 tcg_gen_atomic_xchg_tl(s->T1, s->A0, s->T0,
ea97ebe8 4258 s->mem_index, ot | MO_LE);
1dbe15ef 4259 gen_op_mov_reg_v(s, ot, reg, s->T1);
2c0262af
FB
4260 }
4261 break;
4262 case 0xc4: /* les Gv */
701ed211 4263 /* In CODE64 this is VEX3; see above. */
2c0262af
FB
4264 op = R_ES;
4265 goto do_lxx;
4266 case 0xc5: /* lds Gv */
701ed211 4267 /* In CODE64 this is VEX2; see above. */
2c0262af
FB
4268 op = R_DS;
4269 goto do_lxx;
4270 case 0x1b2: /* lss Gv */
4271 op = R_SS;
4272 goto do_lxx;
4273 case 0x1b4: /* lfs Gv */
4274 op = R_FS;
4275 goto do_lxx;
4276 case 0x1b5: /* lgs Gv */
4277 op = R_GS;
4278 do_lxx:
ab4e4aec 4279 ot = dflag != MO_16 ? MO_32 : MO_16;
e3af7c78 4280 modrm = x86_ldub_code(env, s);
bbdb4237 4281 reg = ((modrm >> 3) & 7) | REX_R(s);
2c0262af
FB
4282 mod = (modrm >> 6) & 3;
4283 if (mod == 3)
4284 goto illegal_op;
4eeb3939 4285 gen_lea_modrm(env, s, modrm);
b48597b0 4286 gen_op_ld_v(s, ot, s->T1, s->A0);
830a19a4 4287 gen_add_A0_im(s, 1 << ot);
2c0262af 4288 /* load the segment first to handle exceptions properly */
c66f9727 4289 gen_op_ld_v(s, MO_16, s->T0, s->A0);
100ec099 4290 gen_movl_seg_T0(s, op);
2c0262af 4291 /* then put the data */
1dbe15ef 4292 gen_op_mov_reg_v(s, ot, reg, s->T1);
2c0262af 4293 break;
3b46e624 4294
2c0262af
FB
4295 /************************/
4296 /* shifts */
4297 case 0xc0:
4298 case 0xc1:
4299 /* shift Ev,Ib */
4300 shift = 2;
4301 grp2:
4302 {
ab4e4aec 4303 ot = mo_b_d(b, dflag);
e3af7c78 4304 modrm = x86_ldub_code(env, s);
2c0262af 4305 mod = (modrm >> 6) & 3;
2c0262af 4306 op = (modrm >> 3) & 7;
3b46e624 4307
2c0262af 4308 if (mod != 3) {
14ce26e7
FB
4309 if (shift == 2) {
4310 s->rip_offset = 1;
4311 }
4eeb3939 4312 gen_lea_modrm(env, s, modrm);
2c0262af
FB
4313 opreg = OR_TMP0;
4314 } else {
14ce26e7 4315 opreg = (modrm & 7) | REX_B(s);
2c0262af
FB
4316 }
4317
4318 /* simpler op */
4319 if (shift == 0) {
4320 gen_shift(s, op, ot, opreg, OR_ECX);
4321 } else {
4322 if (shift == 2) {
e3af7c78 4323 shift = x86_ldub_code(env, s);
2c0262af
FB
4324 }
4325 gen_shifti(s, op, ot, opreg, shift);
4326 }
4327 }
4328 break;
4329 case 0xd0:
4330 case 0xd1:
4331 /* shift Ev,1 */
4332 shift = 1;
4333 goto grp2;
4334 case 0xd2:
4335 case 0xd3:
4336 /* shift Ev,cl */
4337 shift = 0;
4338 goto grp2;
4339
4340 case 0x1a4: /* shld imm */
4341 op = 0;
4342 shift = 1;
4343 goto do_shiftd;
4344 case 0x1a5: /* shld cl */
4345 op = 0;
4346 shift = 0;
4347 goto do_shiftd;
4348 case 0x1ac: /* shrd imm */
4349 op = 1;
4350 shift = 1;
4351 goto do_shiftd;
4352 case 0x1ad: /* shrd cl */
4353 op = 1;
4354 shift = 0;
4355 do_shiftd:
ab4e4aec 4356 ot = dflag;
e3af7c78 4357 modrm = x86_ldub_code(env, s);
2c0262af 4358 mod = (modrm >> 6) & 3;
14ce26e7 4359 rm = (modrm & 7) | REX_B(s);
bbdb4237 4360 reg = ((modrm >> 3) & 7) | REX_R(s);
2c0262af 4361 if (mod != 3) {
4eeb3939 4362 gen_lea_modrm(env, s, modrm);
b6abf97d 4363 opreg = OR_TMP0;
2c0262af 4364 } else {
b6abf97d 4365 opreg = rm;
2c0262af 4366 }
1dbe15ef 4367 gen_op_mov_v_reg(s, ot, s->T1, reg);
3b46e624 4368
2c0262af 4369 if (shift) {
3df11bb1 4370 TCGv imm = tcg_constant_tl(x86_ldub_code(env, s));
3b9d3cf1 4371 gen_shiftd_rm_T1(s, ot, opreg, op, imm);
2c0262af 4372 } else {
3b9d3cf1 4373 gen_shiftd_rm_T1(s, ot, opreg, op, cpu_regs[R_ECX]);
2c0262af
FB
4374 }
4375 break;
4376
4377 /************************/
4378 /* floats */
5fafdf24 4379 case 0xd8 ... 0xdf:
505910a6 4380 {
84abdd7d
ZK
4381 bool update_fip = true;
4382
505910a6
ZK
4383 if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
4384 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
4385 /* XXX: what to do if illegal op ? */
52236550 4386 gen_exception(s, EXCP07_PREX);
505910a6
ZK
4387 break;
4388 }
4389 modrm = x86_ldub_code(env, s);
4390 mod = (modrm >> 6) & 3;
4391 rm = modrm & 7;
4392 op = ((b & 7) << 3) | ((modrm >> 3) & 7);
4393 if (mod != 3) {
4394 /* memory op */
84abdd7d 4395 AddressParts a = gen_lea_modrm_0(env, s, modrm);
20581aad 4396 TCGv ea = gen_lea_modrm_1(s, a, false);
84abdd7d
ZK
4397 TCGv last_addr = tcg_temp_new();
4398 bool update_fdp = true;
4399
4400 tcg_gen_mov_tl(last_addr, ea);
4401 gen_lea_v_seg(s, s->aflag, ea, a.def_seg, s->override);
4402
505910a6
ZK
4403 switch (op) {
4404 case 0x00 ... 0x07: /* fxxxs */
4405 case 0x10 ... 0x17: /* fixxxl */
4406 case 0x20 ... 0x27: /* fxxxl */
4407 case 0x30 ... 0x37: /* fixxx */
4408 {
4409 int op1;
4410 op1 = op & 7;
4411
4412 switch (op >> 4) {
4413 case 0:
4414 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4415 s->mem_index, MO_LEUL);
ad75a51e 4416 gen_helper_flds_FT0(tcg_env, s->tmp2_i32);
505910a6
ZK
4417 break;
4418 case 1:
4419 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4420 s->mem_index, MO_LEUL);
ad75a51e 4421 gen_helper_fildl_FT0(tcg_env, s->tmp2_i32);
505910a6
ZK
4422 break;
4423 case 2:
4424 tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
fc313c64 4425 s->mem_index, MO_LEUQ);
ad75a51e 4426 gen_helper_fldl_FT0(tcg_env, s->tmp1_i64);
505910a6
ZK
4427 break;
4428 case 3:
4429 default:
4430 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4431 s->mem_index, MO_LESW);
ad75a51e 4432 gen_helper_fildl_FT0(tcg_env, s->tmp2_i32);
505910a6
ZK
4433 break;
4434 }
2c0262af 4435
505910a6
ZK
4436 gen_helper_fp_arith_ST0_FT0(op1);
4437 if (op1 == 3) {
4438 /* fcomp needs pop */
ad75a51e 4439 gen_helper_fpop(tcg_env);
505910a6
ZK
4440 }
4441 }
4442 break;
4443 case 0x08: /* flds */
4444 case 0x0a: /* fsts */
4445 case 0x0b: /* fstps */
4446 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
4447 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
4448 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
4449 switch (op & 7) {
2c0262af 4450 case 0:
505910a6
ZK
4451 switch (op >> 4) {
4452 case 0:
4453 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4454 s->mem_index, MO_LEUL);
ad75a51e 4455 gen_helper_flds_ST0(tcg_env, s->tmp2_i32);
505910a6
ZK
4456 break;
4457 case 1:
4458 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4459 s->mem_index, MO_LEUL);
ad75a51e 4460 gen_helper_fildl_ST0(tcg_env, s->tmp2_i32);
505910a6
ZK
4461 break;
4462 case 2:
4463 tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
fc313c64 4464 s->mem_index, MO_LEUQ);
ad75a51e 4465 gen_helper_fldl_ST0(tcg_env, s->tmp1_i64);
505910a6
ZK
4466 break;
4467 case 3:
4468 default:
4469 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4470 s->mem_index, MO_LESW);
ad75a51e 4471 gen_helper_fildl_ST0(tcg_env, s->tmp2_i32);
505910a6
ZK
4472 break;
4473 }
2c0262af
FB
4474 break;
4475 case 1:
505910a6
ZK
4476 /* XXX: the corresponding CPUID bit must be tested ! */
4477 switch (op >> 4) {
4478 case 1:
ad75a51e 4479 gen_helper_fisttl_ST0(s->tmp2_i32, tcg_env);
505910a6
ZK
4480 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4481 s->mem_index, MO_LEUL);
4482 break;
4483 case 2:
ad75a51e 4484 gen_helper_fisttll_ST0(s->tmp1_i64, tcg_env);
505910a6 4485 tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
fc313c64 4486 s->mem_index, MO_LEUQ);
505910a6
ZK
4487 break;
4488 case 3:
4489 default:
ad75a51e 4490 gen_helper_fistt_ST0(s->tmp2_i32, tcg_env);
505910a6
ZK
4491 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4492 s->mem_index, MO_LEUW);
4493 break;
4494 }
ad75a51e 4495 gen_helper_fpop(tcg_env);
2c0262af 4496 break;
2c0262af 4497 default:
505910a6
ZK
4498 switch (op >> 4) {
4499 case 0:
ad75a51e 4500 gen_helper_fsts_ST0(s->tmp2_i32, tcg_env);
505910a6
ZK
4501 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4502 s->mem_index, MO_LEUL);
4503 break;
4504 case 1:
ad75a51e 4505 gen_helper_fistl_ST0(s->tmp2_i32, tcg_env);
505910a6
ZK
4506 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4507 s->mem_index, MO_LEUL);
4508 break;
4509 case 2:
ad75a51e 4510 gen_helper_fstl_ST0(s->tmp1_i64, tcg_env);
505910a6 4511 tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
fc313c64 4512 s->mem_index, MO_LEUQ);
505910a6
ZK
4513 break;
4514 case 3:
4515 default:
ad75a51e 4516 gen_helper_fist_ST0(s->tmp2_i32, tcg_env);
505910a6
ZK
4517 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4518 s->mem_index, MO_LEUW);
4519 break;
4520 }
4521 if ((op & 7) == 3) {
ad75a51e 4522 gen_helper_fpop(tcg_env);
505910a6 4523 }
2c0262af
FB
4524 break;
4525 }
505910a6
ZK
4526 break;
4527 case 0x0c: /* fldenv mem */
ad75a51e 4528 gen_helper_fldenv(tcg_env, s->A0,
3df11bb1 4529 tcg_constant_i32(dflag - 1));
84abdd7d 4530 update_fip = update_fdp = false;
505910a6
ZK
4531 break;
4532 case 0x0d: /* fldcw mem */
4533 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4534 s->mem_index, MO_LEUW);
ad75a51e 4535 gen_helper_fldcw(tcg_env, s->tmp2_i32);
84abdd7d 4536 update_fip = update_fdp = false;
505910a6
ZK
4537 break;
4538 case 0x0e: /* fnstenv mem */
ad75a51e 4539 gen_helper_fstenv(tcg_env, s->A0,
3df11bb1 4540 tcg_constant_i32(dflag - 1));
84abdd7d 4541 update_fip = update_fdp = false;
505910a6
ZK
4542 break;
4543 case 0x0f: /* fnstcw mem */
ad75a51e 4544 gen_helper_fnstcw(s->tmp2_i32, tcg_env);
505910a6
ZK
4545 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4546 s->mem_index, MO_LEUW);
84abdd7d 4547 update_fip = update_fdp = false;
505910a6
ZK
4548 break;
4549 case 0x1d: /* fldt mem */
ad75a51e 4550 gen_helper_fldt_ST0(tcg_env, s->A0);
505910a6
ZK
4551 break;
4552 case 0x1f: /* fstpt mem */
ad75a51e
RH
4553 gen_helper_fstt_ST0(tcg_env, s->A0);
4554 gen_helper_fpop(tcg_env);
505910a6
ZK
4555 break;
4556 case 0x2c: /* frstor mem */
ad75a51e 4557 gen_helper_frstor(tcg_env, s->A0,
3df11bb1 4558 tcg_constant_i32(dflag - 1));
84abdd7d 4559 update_fip = update_fdp = false;
505910a6
ZK
4560 break;
4561 case 0x2e: /* fnsave mem */
ad75a51e 4562 gen_helper_fsave(tcg_env, s->A0,
3df11bb1 4563 tcg_constant_i32(dflag - 1));
84abdd7d 4564 update_fip = update_fdp = false;
505910a6
ZK
4565 break;
4566 case 0x2f: /* fnstsw mem */
ad75a51e 4567 gen_helper_fnstsw(s->tmp2_i32, tcg_env);
505910a6
ZK
4568 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4569 s->mem_index, MO_LEUW);
84abdd7d 4570 update_fip = update_fdp = false;
505910a6
ZK
4571 break;
4572 case 0x3c: /* fbld */
ad75a51e 4573 gen_helper_fbld_ST0(tcg_env, s->A0);
505910a6
ZK
4574 break;
4575 case 0x3e: /* fbstp */
ad75a51e
RH
4576 gen_helper_fbst_ST0(tcg_env, s->A0);
4577 gen_helper_fpop(tcg_env);
505910a6
ZK
4578 break;
4579 case 0x3d: /* fildll */
4580 tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
fc313c64 4581 s->mem_index, MO_LEUQ);
ad75a51e 4582 gen_helper_fildll_ST0(tcg_env, s->tmp1_i64);
505910a6
ZK
4583 break;
4584 case 0x3f: /* fistpll */
ad75a51e 4585 gen_helper_fistll_ST0(s->tmp1_i64, tcg_env);
505910a6 4586 tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
fc313c64 4587 s->mem_index, MO_LEUQ);
ad75a51e 4588 gen_helper_fpop(tcg_env);
505910a6
ZK
4589 break;
4590 default:
4591 goto unknown_op;
4592 }
84abdd7d
ZK
4593
4594 if (update_fdp) {
4595 int last_seg = s->override >= 0 ? s->override : a.def_seg;
4596
ad75a51e 4597 tcg_gen_ld_i32(s->tmp2_i32, tcg_env,
84abdd7d
ZK
4598 offsetof(CPUX86State,
4599 segs[last_seg].selector));
ad75a51e 4600 tcg_gen_st16_i32(s->tmp2_i32, tcg_env,
84abdd7d 4601 offsetof(CPUX86State, fpds));
ad75a51e 4602 tcg_gen_st_tl(last_addr, tcg_env,
84abdd7d
ZK
4603 offsetof(CPUX86State, fpdp));
4604 }
505910a6
ZK
4605 } else {
4606 /* register float ops */
4607 opreg = rm;
3b46e624 4608
505910a6
ZK
4609 switch (op) {
4610 case 0x08: /* fld sti */
ad75a51e
RH
4611 gen_helper_fpush(tcg_env);
4612 gen_helper_fmov_ST0_STN(tcg_env,
3df11bb1 4613 tcg_constant_i32((opreg + 1) & 7));
505910a6
ZK
4614 break;
4615 case 0x09: /* fxchg sti */
4616 case 0x29: /* fxchg4 sti, undocumented op */
4617 case 0x39: /* fxchg7 sti, undocumented op */
ad75a51e 4618 gen_helper_fxchg_ST0_STN(tcg_env, tcg_constant_i32(opreg));
505910a6
ZK
4619 break;
4620 case 0x0a: /* grp d9/2 */
4621 switch (rm) {
4622 case 0: /* fnop */
c1f27a0c
PB
4623 /*
4624 * check exceptions (FreeBSD FPU probe)
4625 * needs to be treated as I/O because of ferr_irq
4626 */
4627 translator_io_start(&s->base);
ad75a51e 4628 gen_helper_fwait(tcg_env);
84abdd7d 4629 update_fip = false;
505910a6
ZK
4630 break;
4631 default:
4632 goto unknown_op;
2c0262af 4633 }
505910a6
ZK
4634 break;
4635 case 0x0c: /* grp d9/4 */
4636 switch (rm) {
4637 case 0: /* fchs */
ad75a51e 4638 gen_helper_fchs_ST0(tcg_env);
2c0262af 4639 break;
505910a6 4640 case 1: /* fabs */
ad75a51e 4641 gen_helper_fabs_ST0(tcg_env);
2c0262af 4642 break;
505910a6 4643 case 4: /* ftst */
ad75a51e
RH
4644 gen_helper_fldz_FT0(tcg_env);
4645 gen_helper_fcom_ST0_FT0(tcg_env);
2c0262af 4646 break;
505910a6 4647 case 5: /* fxam */
ad75a51e 4648 gen_helper_fxam_ST0(tcg_env);
2c0262af 4649 break;
505910a6
ZK
4650 default:
4651 goto unknown_op;
2c0262af
FB
4652 }
4653 break;
505910a6
ZK
4654 case 0x0d: /* grp d9/5 */
4655 {
4656 switch (rm) {
4657 case 0:
ad75a51e
RH
4658 gen_helper_fpush(tcg_env);
4659 gen_helper_fld1_ST0(tcg_env);
505910a6
ZK
4660 break;
4661 case 1:
ad75a51e
RH
4662 gen_helper_fpush(tcg_env);
4663 gen_helper_fldl2t_ST0(tcg_env);
505910a6
ZK
4664 break;
4665 case 2:
ad75a51e
RH
4666 gen_helper_fpush(tcg_env);
4667 gen_helper_fldl2e_ST0(tcg_env);
505910a6
ZK
4668 break;
4669 case 3:
ad75a51e
RH
4670 gen_helper_fpush(tcg_env);
4671 gen_helper_fldpi_ST0(tcg_env);
505910a6
ZK
4672 break;
4673 case 4:
ad75a51e
RH
4674 gen_helper_fpush(tcg_env);
4675 gen_helper_fldlg2_ST0(tcg_env);
505910a6
ZK
4676 break;
4677 case 5:
ad75a51e
RH
4678 gen_helper_fpush(tcg_env);
4679 gen_helper_fldln2_ST0(tcg_env);
505910a6
ZK
4680 break;
4681 case 6:
ad75a51e
RH
4682 gen_helper_fpush(tcg_env);
4683 gen_helper_fldz_ST0(tcg_env);
505910a6
ZK
4684 break;
4685 default:
4686 goto unknown_op;
4687 }
4688 }
4689 break;
4690 case 0x0e: /* grp d9/6 */
4691 switch (rm) {
4692 case 0: /* f2xm1 */
ad75a51e 4693 gen_helper_f2xm1(tcg_env);
505910a6
ZK
4694 break;
4695 case 1: /* fyl2x */
ad75a51e 4696 gen_helper_fyl2x(tcg_env);
505910a6
ZK
4697 break;
4698 case 2: /* fptan */
ad75a51e 4699 gen_helper_fptan(tcg_env);
505910a6
ZK
4700 break;
4701 case 3: /* fpatan */
ad75a51e 4702 gen_helper_fpatan(tcg_env);
465e9838 4703 break;
505910a6 4704 case 4: /* fxtract */
ad75a51e 4705 gen_helper_fxtract(tcg_env);
505910a6
ZK
4706 break;
4707 case 5: /* fprem1 */
ad75a51e 4708 gen_helper_fprem1(tcg_env);
505910a6
ZK
4709 break;
4710 case 6: /* fdecstp */
ad75a51e 4711 gen_helper_fdecstp(tcg_env);
465e9838 4712 break;
465e9838 4713 default:
505910a6 4714 case 7: /* fincstp */
ad75a51e 4715 gen_helper_fincstp(tcg_env);
19e6c4b8 4716 break;
465e9838 4717 }
465e9838 4718 break;
505910a6
ZK
4719 case 0x0f: /* grp d9/7 */
4720 switch (rm) {
4721 case 0: /* fprem */
ad75a51e 4722 gen_helper_fprem(tcg_env);
2c0262af 4723 break;
505910a6 4724 case 1: /* fyl2xp1 */
ad75a51e 4725 gen_helper_fyl2xp1(tcg_env);
505910a6
ZK
4726 break;
4727 case 2: /* fsqrt */
ad75a51e 4728 gen_helper_fsqrt(tcg_env);
505910a6
ZK
4729 break;
4730 case 3: /* fsincos */
ad75a51e 4731 gen_helper_fsincos(tcg_env);
505910a6
ZK
4732 break;
4733 case 5: /* fscale */
ad75a51e 4734 gen_helper_fscale(tcg_env);
505910a6
ZK
4735 break;
4736 case 4: /* frndint */
ad75a51e 4737 gen_helper_frndint(tcg_env);
2c0262af 4738 break;
505910a6 4739 case 6: /* fsin */
ad75a51e 4740 gen_helper_fsin(tcg_env);
2c0262af 4741 break;
2c0262af 4742 default:
505910a6 4743 case 7: /* fcos */
ad75a51e 4744 gen_helper_fcos(tcg_env);
2c0262af
FB
4745 break;
4746 }
2c0262af 4747 break;
505910a6
ZK
4748 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
4749 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
4750 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
4751 {
4752 int op1;
4753
4754 op1 = op & 7;
4755 if (op >= 0x20) {
4756 gen_helper_fp_arith_STN_ST0(op1, opreg);
4757 if (op >= 0x30) {
ad75a51e 4758 gen_helper_fpop(tcg_env);
505910a6
ZK
4759 }
4760 } else {
ad75a51e 4761 gen_helper_fmov_FT0_STN(tcg_env,
3df11bb1 4762 tcg_constant_i32(opreg));
505910a6
ZK
4763 gen_helper_fp_arith_ST0_FT0(op1);
4764 }
4765 }
2c0262af 4766 break;
505910a6
ZK
4767 case 0x02: /* fcom */
4768 case 0x22: /* fcom2, undocumented op */
ad75a51e
RH
4769 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
4770 gen_helper_fcom_ST0_FT0(tcg_env);
2c0262af 4771 break;
505910a6
ZK
4772 case 0x03: /* fcomp */
4773 case 0x23: /* fcomp3, undocumented op */
4774 case 0x32: /* fcomp5, undocumented op */
ad75a51e
RH
4775 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
4776 gen_helper_fcom_ST0_FT0(tcg_env);
4777 gen_helper_fpop(tcg_env);
2c0262af 4778 break;
505910a6
ZK
4779 case 0x15: /* da/5 */
4780 switch (rm) {
4781 case 1: /* fucompp */
ad75a51e
RH
4782 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(1));
4783 gen_helper_fucom_ST0_FT0(tcg_env);
4784 gen_helper_fpop(tcg_env);
4785 gen_helper_fpop(tcg_env);
2c0262af 4786 break;
505910a6
ZK
4787 default:
4788 goto unknown_op;
4789 }
4790 break;
4791 case 0x1c:
4792 switch (rm) {
4793 case 0: /* feni (287 only, just do nop here) */
2c0262af 4794 break;
505910a6 4795 case 1: /* fdisi (287 only, just do nop here) */
2c0262af 4796 break;
505910a6 4797 case 2: /* fclex */
ad75a51e 4798 gen_helper_fclex(tcg_env);
84abdd7d 4799 update_fip = false;
2c0262af 4800 break;
505910a6 4801 case 3: /* fninit */
ad75a51e 4802 gen_helper_fninit(tcg_env);
84abdd7d 4803 update_fip = false;
2c0262af 4804 break;
505910a6 4805 case 4: /* fsetpm (287 only, just do nop here) */
2c0262af
FB
4806 break;
4807 default:
b9f9c5b4 4808 goto unknown_op;
2c0262af 4809 }
2c0262af 4810 break;
505910a6
ZK
4811 case 0x1d: /* fucomi */
4812 if (!(s->cpuid_features & CPUID_CMOV)) {
4813 goto illegal_op;
4814 }
4815 gen_update_cc_op(s);
ad75a51e
RH
4816 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
4817 gen_helper_fucomi_ST0_FT0(tcg_env);
505910a6 4818 set_cc_op(s, CC_OP_EFLAGS);
2c0262af 4819 break;
505910a6
ZK
4820 case 0x1e: /* fcomi */
4821 if (!(s->cpuid_features & CPUID_CMOV)) {
4822 goto illegal_op;
4823 }
4824 gen_update_cc_op(s);
ad75a51e
RH
4825 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
4826 gen_helper_fcomi_ST0_FT0(tcg_env);
505910a6 4827 set_cc_op(s, CC_OP_EFLAGS);
2c0262af 4828 break;
505910a6 4829 case 0x28: /* ffree sti */
ad75a51e 4830 gen_helper_ffree_STN(tcg_env, tcg_constant_i32(opreg));
2c0262af 4831 break;
505910a6 4832 case 0x2a: /* fst sti */
ad75a51e 4833 gen_helper_fmov_STN_ST0(tcg_env, tcg_constant_i32(opreg));
2c0262af 4834 break;
505910a6
ZK
4835 case 0x2b: /* fstp sti */
4836 case 0x0b: /* fstp1 sti, undocumented op */
4837 case 0x3a: /* fstp8 sti, undocumented op */
4838 case 0x3b: /* fstp9 sti, undocumented op */
ad75a51e
RH
4839 gen_helper_fmov_STN_ST0(tcg_env, tcg_constant_i32(opreg));
4840 gen_helper_fpop(tcg_env);
2c0262af 4841 break;
505910a6 4842 case 0x2c: /* fucom st(i) */
ad75a51e
RH
4843 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
4844 gen_helper_fucom_ST0_FT0(tcg_env);
2c0262af 4845 break;
505910a6 4846 case 0x2d: /* fucomp st(i) */
ad75a51e
RH
4847 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
4848 gen_helper_fucom_ST0_FT0(tcg_env);
4849 gen_helper_fpop(tcg_env);
2c0262af 4850 break;
505910a6
ZK
4851 case 0x33: /* de/3 */
4852 switch (rm) {
4853 case 1: /* fcompp */
ad75a51e
RH
4854 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(1));
4855 gen_helper_fcom_ST0_FT0(tcg_env);
4856 gen_helper_fpop(tcg_env);
4857 gen_helper_fpop(tcg_env);
505910a6
ZK
4858 break;
4859 default:
4860 goto unknown_op;
4861 }
2c0262af 4862 break;
505910a6 4863 case 0x38: /* ffreep sti, undocumented op */
ad75a51e
RH
4864 gen_helper_ffree_STN(tcg_env, tcg_constant_i32(opreg));
4865 gen_helper_fpop(tcg_env);
2c0262af 4866 break;
505910a6
ZK
4867 case 0x3c: /* df/4 */
4868 switch (rm) {
4869 case 0:
ad75a51e 4870 gen_helper_fnstsw(s->tmp2_i32, tcg_env);
505910a6
ZK
4871 tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
4872 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
4873 break;
4874 default:
4875 goto unknown_op;
4876 }
2c0262af 4877 break;
505910a6
ZK
4878 case 0x3d: /* fucomip */
4879 if (!(s->cpuid_features & CPUID_CMOV)) {
4880 goto illegal_op;
4881 }
4882 gen_update_cc_op(s);
ad75a51e
RH
4883 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
4884 gen_helper_fucomi_ST0_FT0(tcg_env);
4885 gen_helper_fpop(tcg_env);
505910a6
ZK
4886 set_cc_op(s, CC_OP_EFLAGS);
4887 break;
4888 case 0x3e: /* fcomip */
4889 if (!(s->cpuid_features & CPUID_CMOV)) {
4890 goto illegal_op;
4891 }
4892 gen_update_cc_op(s);
ad75a51e
RH
4893 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
4894 gen_helper_fcomi_ST0_FT0(tcg_env);
4895 gen_helper_fpop(tcg_env);
505910a6 4896 set_cc_op(s, CC_OP_EFLAGS);
2c0262af 4897 break;
505910a6
ZK
4898 case 0x10 ... 0x13: /* fcmovxx */
4899 case 0x18 ... 0x1b:
4900 {
4901 int op1;
4902 TCGLabel *l1;
4903 static const uint8_t fcmov_cc[8] = {
4904 (JCC_B << 1),
4905 (JCC_Z << 1),
4906 (JCC_BE << 1),
4907 (JCC_P << 1),
4908 };
4909
4910 if (!(s->cpuid_features & CPUID_CMOV)) {
4911 goto illegal_op;
4912 }
4913 op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
4914 l1 = gen_new_label();
4915 gen_jcc1_noeob(s, op1, l1);
ad75a51e 4916 gen_helper_fmov_ST0_STN(tcg_env,
3df11bb1 4917 tcg_constant_i32(opreg));
505910a6
ZK
4918 gen_set_label(l1);
4919 }
2c0262af
FB
4920 break;
4921 default:
b9f9c5b4 4922 goto unknown_op;
2c0262af 4923 }
2c0262af 4924 }
84abdd7d
ZK
4925
4926 if (update_fip) {
ad75a51e 4927 tcg_gen_ld_i32(s->tmp2_i32, tcg_env,
84abdd7d 4928 offsetof(CPUX86State, segs[R_CS].selector));
ad75a51e 4929 tcg_gen_st16_i32(s->tmp2_i32, tcg_env,
84abdd7d 4930 offsetof(CPUX86State, fpcs));
75ec746a 4931 tcg_gen_st_tl(eip_cur_tl(s),
ad75a51e 4932 tcg_env, offsetof(CPUX86State, fpip));
84abdd7d 4933 }
2c0262af
FB
4934 }
4935 break;
4936 /************************/
4937 /* string ops */
4938
4939 case 0xa4: /* movsS */
4940 case 0xa5:
ab4e4aec 4941 ot = mo_b_d(b, dflag);
2c0262af 4942 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
122e6d7b 4943 gen_repz_movs(s, ot);
2c0262af
FB
4944 } else {
4945 gen_movs(s, ot);
4946 }
4947 break;
3b46e624 4948
2c0262af
FB
4949 case 0xaa: /* stosS */
4950 case 0xab:
ab4e4aec 4951 ot = mo_b_d(b, dflag);
2c0262af 4952 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
122e6d7b 4953 gen_repz_stos(s, ot);
2c0262af
FB
4954 } else {
4955 gen_stos(s, ot);
4956 }
4957 break;
4958 case 0xac: /* lodsS */
4959 case 0xad:
ab4e4aec 4960 ot = mo_b_d(b, dflag);
2c0262af 4961 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
122e6d7b 4962 gen_repz_lods(s, ot);
2c0262af
FB
4963 } else {
4964 gen_lods(s, ot);
4965 }
4966 break;
4967 case 0xae: /* scasS */
4968 case 0xaf:
ab4e4aec 4969 ot = mo_b_d(b, dflag);
2c0262af 4970 if (prefixes & PREFIX_REPNZ) {
122e6d7b 4971 gen_repz_scas(s, ot, 1);
2c0262af 4972 } else if (prefixes & PREFIX_REPZ) {
122e6d7b 4973 gen_repz_scas(s, ot, 0);
2c0262af
FB
4974 } else {
4975 gen_scas(s, ot);
2c0262af
FB
4976 }
4977 break;
4978
4979 case 0xa6: /* cmpsS */
4980 case 0xa7:
ab4e4aec 4981 ot = mo_b_d(b, dflag);
2c0262af 4982 if (prefixes & PREFIX_REPNZ) {
122e6d7b 4983 gen_repz_cmps(s, ot, 1);
2c0262af 4984 } else if (prefixes & PREFIX_REPZ) {
122e6d7b 4985 gen_repz_cmps(s, ot, 0);
2c0262af
FB
4986 } else {
4987 gen_cmps(s, ot);
2c0262af
FB
4988 }
4989 break;
4990 case 0x6c: /* insS */
4991 case 0x6d:
ab4e4aec 4992 ot = mo_b_d32(b, dflag);
1bca40fe
RH
4993 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
4994 tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32);
4995 if (!gen_check_io(s, ot, s->tmp2_i32,
4996 SVM_IOIO_TYPE_MASK | SVM_IOIO_STR_MASK)) {
bc2e436d
RH
4997 break;
4998 }
dfd1b812 4999 translator_io_start(&s->base);
f115e911 5000 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
122e6d7b 5001 gen_repz_ins(s, ot);
2c0262af 5002 } else {
f115e911 5003 gen_ins(s, ot);
2c0262af
FB
5004 }
5005 break;
5006 case 0x6e: /* outsS */
5007 case 0x6f:
ab4e4aec 5008 ot = mo_b_d32(b, dflag);
1bca40fe
RH
5009 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
5010 tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32);
5011 if (!gen_check_io(s, ot, s->tmp2_i32, SVM_IOIO_STR_MASK)) {
bc2e436d
RH
5012 break;
5013 }
dfd1b812 5014 translator_io_start(&s->base);
f115e911 5015 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
122e6d7b 5016 gen_repz_outs(s, ot);
2c0262af 5017 } else {
f115e911 5018 gen_outs(s, ot);
2c0262af
FB
5019 }
5020 break;
5021
5022 /************************/
5023 /* port I/O */
0573fbfc 5024
2c0262af
FB
5025 case 0xe4:
5026 case 0xe5:
ab4e4aec 5027 ot = mo_b_d32(b, dflag);
e3af7c78 5028 val = x86_ldub_code(env, s);
1bca40fe
RH
5029 tcg_gen_movi_i32(s->tmp2_i32, val);
5030 if (!gen_check_io(s, ot, s->tmp2_i32, SVM_IOIO_TYPE_MASK)) {
bc2e436d
RH
5031 break;
5032 }
dfd1b812 5033 translator_io_start(&s->base);
6bd48f6f 5034 gen_helper_in_func(ot, s->T1, s->tmp2_i32);
1dbe15ef 5035 gen_op_mov_reg_v(s, ot, R_EAX, s->T1);
6bd48f6f 5036 gen_bpt_io(s, s->tmp2_i32, ot);
2c0262af
FB
5037 break;
5038 case 0xe6:
5039 case 0xe7:
ab4e4aec 5040 ot = mo_b_d32(b, dflag);
e3af7c78 5041 val = x86_ldub_code(env, s);
1bca40fe
RH
5042 tcg_gen_movi_i32(s->tmp2_i32, val);
5043 if (!gen_check_io(s, ot, s->tmp2_i32, 0)) {
bc2e436d
RH
5044 break;
5045 }
dfd1b812 5046 translator_io_start(&s->base);
1bca40fe 5047 gen_op_mov_v_reg(s, ot, s->T1, R_EAX);
4f82446d
EC
5048 tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
5049 gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32);
6bd48f6f 5050 gen_bpt_io(s, s->tmp2_i32, ot);
2c0262af
FB
5051 break;
5052 case 0xec:
5053 case 0xed:
ab4e4aec 5054 ot = mo_b_d32(b, dflag);
1bca40fe
RH
5055 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
5056 tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32);
5057 if (!gen_check_io(s, ot, s->tmp2_i32, SVM_IOIO_TYPE_MASK)) {
bc2e436d
RH
5058 break;
5059 }
dfd1b812 5060 translator_io_start(&s->base);
6bd48f6f 5061 gen_helper_in_func(ot, s->T1, s->tmp2_i32);
1dbe15ef 5062 gen_op_mov_reg_v(s, ot, R_EAX, s->T1);
6bd48f6f 5063 gen_bpt_io(s, s->tmp2_i32, ot);
2c0262af
FB
5064 break;
5065 case 0xee:
5066 case 0xef:
ab4e4aec 5067 ot = mo_b_d32(b, dflag);
1bca40fe
RH
5068 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
5069 tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32);
5070 if (!gen_check_io(s, ot, s->tmp2_i32, 0)) {
bc2e436d
RH
5071 break;
5072 }
dfd1b812 5073 translator_io_start(&s->base);
1bca40fe 5074 gen_op_mov_v_reg(s, ot, s->T1, R_EAX);
4f82446d
EC
5075 tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
5076 gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32);
6bd48f6f 5077 gen_bpt_io(s, s->tmp2_i32, ot);
2c0262af
FB
5078 break;
5079
5080 /************************/
5081 /* control */
5082 case 0xc2: /* ret im */
e3af7c78 5083 val = x86_ldsw_code(env, s);
8e31d234
RH
5084 ot = gen_pop_T0(s);
5085 gen_stack_update(s, val + (1 << ot));
5086 /* Note that gen_pop_T0 uses a zero-extending load. */
e3a79e0e 5087 gen_op_jmp_v(s, s->T0);
7d117ce8 5088 gen_bnd_jmp(s);
faf9ea5f 5089 s->base.is_jmp = DISAS_JUMP;
2c0262af
FB
5090 break;
5091 case 0xc3: /* ret */
8e31d234
RH
5092 ot = gen_pop_T0(s);
5093 gen_pop_update(s, ot);
5094 /* Note that gen_pop_T0 uses a zero-extending load. */
e3a79e0e 5095 gen_op_jmp_v(s, s->T0);
7d117ce8 5096 gen_bnd_jmp(s);
faf9ea5f 5097 s->base.is_jmp = DISAS_JUMP;
2c0262af
FB
5098 break;
5099 case 0xca: /* lret im */
e3af7c78 5100 val = x86_ldsw_code(env, s);
2c0262af 5101 do_lret:
f8a35846 5102 if (PE(s) && !VM86(s)) {
773cdfcc 5103 gen_update_cc_op(s);
65e4af23 5104 gen_update_eip_cur(s);
ad75a51e 5105 gen_helper_lret_protected(tcg_env, tcg_constant_i32(dflag - 1),
3df11bb1 5106 tcg_constant_i32(val));
2c0262af
FB
5107 } else {
5108 gen_stack_A0(s);
5109 /* pop offset */
c66f9727 5110 gen_op_ld_v(s, dflag, s->T0, s->A0);
2c0262af
FB
5111 /* NOTE: keeping EIP updated is not a problem in case of
5112 exception */
e3a79e0e 5113 gen_op_jmp_v(s, s->T0);
2c0262af 5114 /* pop selector */
4e85057b 5115 gen_add_A0_im(s, 1 << dflag);
c66f9727
EC
5116 gen_op_ld_v(s, dflag, s->T0, s->A0);
5117 gen_op_movl_seg_T0_vm(s, R_CS);
2c0262af 5118 /* add stack offset */
ab4e4aec 5119 gen_stack_update(s, val + (2 << dflag));
2c0262af 5120 }
6424ac8e 5121 s->base.is_jmp = DISAS_EOB_ONLY;
2c0262af
FB
5122 break;
5123 case 0xcb: /* lret */
5124 val = 0;
5125 goto do_lret;
5126 case 0xcf: /* iret */
b53605db 5127 gen_svm_check_intercept(s, SVM_EXIT_IRET);
f8a35846 5128 if (!PE(s) || VM86(s)) {
e048f3d6 5129 /* real mode or vm86 mode */
aa9f21b1 5130 if (!check_vm86_iopl(s)) {
e048f3d6 5131 break;
f115e911 5132 }
ad75a51e 5133 gen_helper_iret_real(tcg_env, tcg_constant_i32(dflag - 1));
2c0262af 5134 } else {
ad75a51e 5135 gen_helper_iret_protected(tcg_env, tcg_constant_i32(dflag - 1),
9e599bf7 5136 eip_next_i32(s));
2c0262af 5137 }
e048f3d6 5138 set_cc_op(s, CC_OP_EFLAGS);
6424ac8e 5139 s->base.is_jmp = DISAS_EOB_ONLY;
2c0262af
FB
5140 break;
5141 case 0xe8: /* call im */
5142 {
8760ded6
RH
5143 int diff = (dflag != MO_16
5144 ? (int32_t)insn_get(env, s, MO_32)
5145 : (int16_t)insn_get(env, s, MO_16));
9e599bf7 5146 gen_push_v(s, eip_next_tl(s));
7d117ce8 5147 gen_bnd_jmp(s);
8760ded6 5148 gen_jmp_rel(s, dflag, diff, 0);
2c0262af
FB
5149 }
5150 break;
5151 case 0x9a: /* lcall im */
5152 {
5153 unsigned int selector, offset;
3b46e624 5154
14ce26e7
FB
5155 if (CODE64(s))
5156 goto illegal_op;
ab4e4aec 5157 ot = dflag;
0af10c86 5158 offset = insn_get(env, s, ot);
4ba9938c 5159 selector = insn_get(env, s, MO_16);
3b46e624 5160
c66f9727 5161 tcg_gen_movi_tl(s->T0, selector);
b48597b0 5162 tcg_gen_movi_tl(s->T1, offset);
2c0262af
FB
5163 }
5164 goto do_lcall;
ecada8a2 5165 case 0xe9: /* jmp im */
8760ded6
RH
5166 {
5167 int diff = (dflag != MO_16
5168 ? (int32_t)insn_get(env, s, MO_32)
5169 : (int16_t)insn_get(env, s, MO_16));
5170 gen_bnd_jmp(s);
5171 gen_jmp_rel(s, dflag, diff, 0);
ab4e4aec 5172 }
2c0262af
FB
5173 break;
5174 case 0xea: /* ljmp im */
5175 {
5176 unsigned int selector, offset;
5177
14ce26e7
FB
5178 if (CODE64(s))
5179 goto illegal_op;
ab4e4aec 5180 ot = dflag;
0af10c86 5181 offset = insn_get(env, s, ot);
4ba9938c 5182 selector = insn_get(env, s, MO_16);
3b46e624 5183
c66f9727 5184 tcg_gen_movi_tl(s->T0, selector);
b48597b0 5185 tcg_gen_movi_tl(s->T1, offset);
2c0262af
FB
5186 }
5187 goto do_ljmp;
5188 case 0xeb: /* jmp Jb */
8760ded6
RH
5189 {
5190 int diff = (int8_t)insn_get(env, s, MO_8);
5191 gen_jmp_rel(s, dflag, diff, 0);
ab4e4aec 5192 }
2c0262af
FB
5193 break;
5194 case 0x70 ... 0x7f: /* jcc Jb */
54b191de
RH
5195 {
5196 int diff = (int8_t)insn_get(env, s, MO_8);
5197 gen_bnd_jmp(s);
5198 gen_jcc(s, b, diff);
2c0262af 5199 }
54b191de
RH
5200 break;
5201 case 0x180 ... 0x18f: /* jcc Jv */
5202 {
5203 int diff = (dflag != MO_16
5204 ? (int32_t)insn_get(env, s, MO_32)
5205 : (int16_t)insn_get(env, s, MO_16));
5206 gen_bnd_jmp(s);
5207 gen_jcc(s, b, diff);
ab4e4aec 5208 }
2c0262af
FB
5209 break;
5210
5211 case 0x190 ... 0x19f: /* setcc Gv */
e3af7c78 5212 modrm = x86_ldub_code(env, s);
c66f9727 5213 gen_setcc1(s, b, s->T0);
4ba9938c 5214 gen_ldst_modrm(env, s, modrm, MO_8, OR_TMP0, 1);
2c0262af
FB
5215 break;
5216 case 0x140 ... 0x14f: /* cmov Gv, Ev */
bff93281
PM
5217 if (!(s->cpuid_features & CPUID_CMOV)) {
5218 goto illegal_op;
5219 }
ab4e4aec 5220 ot = dflag;
e3af7c78 5221 modrm = x86_ldub_code(env, s);
bbdb4237 5222 reg = ((modrm >> 3) & 7) | REX_R(s);
f32d3781 5223 gen_cmovcc1(env, s, ot, b, modrm, reg);
2c0262af 5224 break;
3b46e624 5225
2c0262af
FB
5226 /************************/
5227 /* flags */
5228 case 0x9c: /* pushf */
b53605db 5229 gen_svm_check_intercept(s, SVM_EXIT_PUSHF);
aa9f21b1 5230 if (check_vm86_iopl(s)) {
773cdfcc 5231 gen_update_cc_op(s);
ad75a51e 5232 gen_helper_read_eflags(s->T0, tcg_env);
c66f9727 5233 gen_push_v(s, s->T0);
2c0262af
FB
5234 }
5235 break;
5236 case 0x9d: /* popf */
b53605db 5237 gen_svm_check_intercept(s, SVM_EXIT_POPF);
aa9f21b1 5238 if (check_vm86_iopl(s)) {
3e7da311
RH
5239 int mask = TF_MASK | AC_MASK | ID_MASK | NT_MASK;
5240
01b9d8c1 5241 if (CPL(s) == 0) {
3e7da311
RH
5242 mask |= IF_MASK | IOPL_MASK;
5243 } else if (CPL(s) <= IOPL(s)) {
5244 mask |= IF_MASK;
5245 }
5246 if (dflag == MO_16) {
5247 mask &= 0xffff;
2c0262af 5248 }
3e7da311
RH
5249
5250 ot = gen_pop_T0(s);
ad75a51e 5251 gen_helper_write_eflags(tcg_env, s->T0, tcg_constant_i32(mask));
8e31d234 5252 gen_pop_update(s, ot);
3ca51d07 5253 set_cc_op(s, CC_OP_EFLAGS);
a9321a4d 5254 /* abort translation because TF/AC flag may change */
634a4051 5255 s->base.is_jmp = DISAS_EOB_NEXT;
2c0262af
FB
5256 }
5257 break;
5258 case 0x9e: /* sahf */
12e26b75 5259 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
14ce26e7 5260 goto illegal_op;
35d95e41 5261 tcg_gen_shri_tl(s->T0, cpu_regs[R_EAX], 8);
d229edce 5262 gen_compute_eflags(s);
bd7a7b33 5263 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O);
c66f9727
EC
5264 tcg_gen_andi_tl(s->T0, s->T0, CC_S | CC_Z | CC_A | CC_P | CC_C);
5265 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, s->T0);
2c0262af
FB
5266 break;
5267 case 0x9f: /* lahf */
12e26b75 5268 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
14ce26e7 5269 goto illegal_op;
d229edce 5270 gen_compute_eflags(s);
bd7a7b33 5271 /* Note: gen_compute_eflags() only gives the condition codes */
c66f9727 5272 tcg_gen_ori_tl(s->T0, cpu_cc_src, 0x02);
35d95e41 5273 tcg_gen_deposit_tl(cpu_regs[R_EAX], cpu_regs[R_EAX], s->T0, 8, 8);
2c0262af
FB
5274 break;
5275 case 0xf5: /* cmc */
d229edce 5276 gen_compute_eflags(s);
bd7a7b33 5277 tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C);
2c0262af
FB
5278 break;
5279 case 0xf8: /* clc */
d229edce 5280 gen_compute_eflags(s);
bd7a7b33 5281 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C);
2c0262af
FB
5282 break;
5283 case 0xf9: /* stc */
d229edce 5284 gen_compute_eflags(s);
bd7a7b33 5285 tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C);
2c0262af
FB
5286 break;
5287 case 0xfc: /* cld */
6bd48f6f 5288 tcg_gen_movi_i32(s->tmp2_i32, 1);
ad75a51e 5289 tcg_gen_st_i32(s->tmp2_i32, tcg_env, offsetof(CPUX86State, df));
2c0262af
FB
5290 break;
5291 case 0xfd: /* std */
6bd48f6f 5292 tcg_gen_movi_i32(s->tmp2_i32, -1);
ad75a51e 5293 tcg_gen_st_i32(s->tmp2_i32, tcg_env, offsetof(CPUX86State, df));
2c0262af
FB
5294 break;
5295
5296 /************************/
5297 /* bit operations */
5298 case 0x1ba: /* bt/bts/btr/btc Gv, im */
ab4e4aec 5299 ot = dflag;
e3af7c78 5300 modrm = x86_ldub_code(env, s);
33698e5f 5301 op = (modrm >> 3) & 7;
2c0262af 5302 mod = (modrm >> 6) & 3;
14ce26e7 5303 rm = (modrm & 7) | REX_B(s);
2c0262af 5304 if (mod != 3) {
14ce26e7 5305 s->rip_offset = 1;
4eeb3939 5306 gen_lea_modrm(env, s, modrm);
cfe819d3 5307 if (!(s->prefix & PREFIX_LOCK)) {
c66f9727 5308 gen_op_ld_v(s, ot, s->T0, s->A0);
cfe819d3 5309 }
2c0262af 5310 } else {
1dbe15ef 5311 gen_op_mov_v_reg(s, ot, s->T0, rm);
2c0262af
FB
5312 }
5313 /* load shift */
e3af7c78 5314 val = x86_ldub_code(env, s);
b48597b0 5315 tcg_gen_movi_tl(s->T1, val);
2c0262af 5316 if (op < 4)
b9f9c5b4 5317 goto unknown_op;
2c0262af 5318 op -= 4;
f484d386 5319 goto bt_op;
2c0262af
FB
5320 case 0x1a3: /* bt Gv, Ev */
5321 op = 0;
5322 goto do_btx;
5323 case 0x1ab: /* bts */
5324 op = 1;
5325 goto do_btx;
5326 case 0x1b3: /* btr */
5327 op = 2;
5328 goto do_btx;
5329 case 0x1bb: /* btc */
5330 op = 3;
5331 do_btx:
ab4e4aec 5332 ot = dflag;
e3af7c78 5333 modrm = x86_ldub_code(env, s);
bbdb4237 5334 reg = ((modrm >> 3) & 7) | REX_R(s);
2c0262af 5335 mod = (modrm >> 6) & 3;
14ce26e7 5336 rm = (modrm & 7) | REX_B(s);
1dbe15ef 5337 gen_op_mov_v_reg(s, MO_32, s->T1, reg);
2c0262af 5338 if (mod != 3) {
cfe819d3 5339 AddressParts a = gen_lea_modrm_0(env, s, modrm);
2c0262af 5340 /* specific case: we need to add a displacement */
b48597b0 5341 gen_exts(ot, s->T1);
fbd80f02
EC
5342 tcg_gen_sari_tl(s->tmp0, s->T1, 3 + ot);
5343 tcg_gen_shli_tl(s->tmp0, s->tmp0, ot);
20581aad 5344 tcg_gen_add_tl(s->A0, gen_lea_modrm_1(s, a, false), s->tmp0);
6b672b5d 5345 gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override);
cfe819d3 5346 if (!(s->prefix & PREFIX_LOCK)) {
c66f9727 5347 gen_op_ld_v(s, ot, s->T0, s->A0);
cfe819d3 5348 }
2c0262af 5349 } else {
1dbe15ef 5350 gen_op_mov_v_reg(s, ot, s->T0, rm);
2c0262af 5351 }
f484d386 5352 bt_op:
b48597b0 5353 tcg_gen_andi_tl(s->T1, s->T1, (1 << (3 + ot)) - 1);
fbd80f02
EC
5354 tcg_gen_movi_tl(s->tmp0, 1);
5355 tcg_gen_shl_tl(s->tmp0, s->tmp0, s->T1);
cfe819d3
EC
5356 if (s->prefix & PREFIX_LOCK) {
5357 switch (op) {
5358 case 0: /* bt */
bad5cfcd 5359 /* Needs no atomic ops; we suppressed the normal
cfe819d3 5360 memory load for LOCK above so do it now. */
c66f9727 5361 gen_op_ld_v(s, ot, s->T0, s->A0);
cfe819d3
EC
5362 break;
5363 case 1: /* bts */
fbd80f02 5364 tcg_gen_atomic_fetch_or_tl(s->T0, s->A0, s->tmp0,
cfe819d3
EC
5365 s->mem_index, ot | MO_LE);
5366 break;
5367 case 2: /* btr */
fbd80f02
EC
5368 tcg_gen_not_tl(s->tmp0, s->tmp0);
5369 tcg_gen_atomic_fetch_and_tl(s->T0, s->A0, s->tmp0,
cfe819d3
EC
5370 s->mem_index, ot | MO_LE);
5371 break;
5372 default:
5373 case 3: /* btc */
fbd80f02 5374 tcg_gen_atomic_fetch_xor_tl(s->T0, s->A0, s->tmp0,
cfe819d3
EC
5375 s->mem_index, ot | MO_LE);
5376 break;
5377 }
5022f28f 5378 tcg_gen_shr_tl(s->tmp4, s->T0, s->T1);
cfe819d3 5379 } else {
5022f28f 5380 tcg_gen_shr_tl(s->tmp4, s->T0, s->T1);
cfe819d3
EC
5381 switch (op) {
5382 case 0: /* bt */
5383 /* Data already loaded; nothing to do. */
5384 break;
5385 case 1: /* bts */
fbd80f02 5386 tcg_gen_or_tl(s->T0, s->T0, s->tmp0);
cfe819d3
EC
5387 break;
5388 case 2: /* btr */
fbd80f02 5389 tcg_gen_andc_tl(s->T0, s->T0, s->tmp0);
cfe819d3
EC
5390 break;
5391 default:
5392 case 3: /* btc */
fbd80f02 5393 tcg_gen_xor_tl(s->T0, s->T0, s->tmp0);
cfe819d3
EC
5394 break;
5395 }
5396 if (op != 0) {
5397 if (mod != 3) {
c66f9727 5398 gen_op_st_v(s, ot, s->T0, s->A0);
cfe819d3 5399 } else {
1dbe15ef 5400 gen_op_mov_reg_v(s, ot, rm, s->T0);
cfe819d3 5401 }
fd8ca9f6 5402 }
dc1823ce
RH
5403 }
5404
5405 /* Delay all CC updates until after the store above. Note that
5406 C is the result of the test, Z is unchanged, and the others
5407 are all undefined. */
5408 switch (s->cc_op) {
5409 case CC_OP_MULB ... CC_OP_MULQ:
5410 case CC_OP_ADDB ... CC_OP_ADDQ:
5411 case CC_OP_ADCB ... CC_OP_ADCQ:
5412 case CC_OP_SUBB ... CC_OP_SUBQ:
5413 case CC_OP_SBBB ... CC_OP_SBBQ:
5414 case CC_OP_LOGICB ... CC_OP_LOGICQ:
5415 case CC_OP_INCB ... CC_OP_INCQ:
5416 case CC_OP_DECB ... CC_OP_DECQ:
5417 case CC_OP_SHLB ... CC_OP_SHLQ:
5418 case CC_OP_SARB ... CC_OP_SARQ:
5419 case CC_OP_BMILGB ... CC_OP_BMILGQ:
5420 /* Z was going to be computed from the non-zero status of CC_DST.
5421 We can get that same Z value (and the new C value) by leaving
5422 CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the
5423 same width. */
5022f28f 5424 tcg_gen_mov_tl(cpu_cc_src, s->tmp4);
dc1823ce
RH
5425 set_cc_op(s, ((s->cc_op - CC_OP_MULB) & 3) + CC_OP_SARB);
5426 break;
5427 default:
5428 /* Otherwise, generate EFLAGS and replace the C bit. */
5429 gen_compute_eflags(s);
5022f28f 5430 tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, s->tmp4,
dc1823ce
RH
5431 ctz32(CC_C), 1);
5432 break;
2c0262af
FB
5433 }
5434 break;
321c5351
RH
5435 case 0x1bc: /* bsf / tzcnt */
5436 case 0x1bd: /* bsr / lzcnt */
ab4e4aec 5437 ot = dflag;
e3af7c78 5438 modrm = x86_ldub_code(env, s);
bbdb4237 5439 reg = ((modrm >> 3) & 7) | REX_R(s);
321c5351 5440 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
c66f9727 5441 gen_extu(ot, s->T0);
321c5351
RH
5442
5443 /* Note that lzcnt and tzcnt are in different extensions. */
5444 if ((prefixes & PREFIX_REPZ)
5445 && (b & 1
5446 ? s->cpuid_ext3_features & CPUID_EXT3_ABM
5447 : s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)) {
5448 int size = 8 << ot;
e5143c90 5449 /* For lzcnt/tzcnt, C bit is defined related to the input. */
c66f9727 5450 tcg_gen_mov_tl(cpu_cc_src, s->T0);
321c5351
RH
5451 if (b & 1) {
5452 /* For lzcnt, reduce the target_ulong result by the
5453 number of zeros that we expect to find at the top. */
c66f9727
EC
5454 tcg_gen_clzi_tl(s->T0, s->T0, TARGET_LONG_BITS);
5455 tcg_gen_subi_tl(s->T0, s->T0, TARGET_LONG_BITS - size);
6191b059 5456 } else {
e5143c90 5457 /* For tzcnt, a zero input must return the operand size. */
c66f9727 5458 tcg_gen_ctzi_tl(s->T0, s->T0, size);
6191b059 5459 }
e5143c90 5460 /* For lzcnt/tzcnt, Z bit is defined related to the result. */
c66f9727 5461 gen_op_update1_cc(s);
321c5351
RH
5462 set_cc_op(s, CC_OP_BMILGB + ot);
5463 } else {
5464 /* For bsr/bsf, only the Z bit is defined and it is related
5465 to the input and not the result. */
c66f9727 5466 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
321c5351 5467 set_cc_op(s, CC_OP_LOGICB + ot);
e5143c90
RH
5468
5469 /* ??? The manual says that the output is undefined when the
5470 input is zero, but real hardware leaves it unchanged, and
5471 real programs appear to depend on that. Accomplish this
5472 by passing the output as the value to return upon zero. */
321c5351
RH
5473 if (b & 1) {
5474 /* For bsr, return the bit index of the first 1 bit,
5475 not the count of leading zeros. */
b48597b0
EC
5476 tcg_gen_xori_tl(s->T1, cpu_regs[reg], TARGET_LONG_BITS - 1);
5477 tcg_gen_clz_tl(s->T0, s->T0, s->T1);
c66f9727 5478 tcg_gen_xori_tl(s->T0, s->T0, TARGET_LONG_BITS - 1);
321c5351 5479 } else {
c66f9727 5480 tcg_gen_ctz_tl(s->T0, s->T0, cpu_regs[reg]);
321c5351 5481 }
6191b059 5482 }
1dbe15ef 5483 gen_op_mov_reg_v(s, ot, reg, s->T0);
2c0262af
FB
5484 break;
5485 /************************/
5486 /* bcd */
5487 case 0x27: /* daa */
14ce26e7
FB
5488 if (CODE64(s))
5489 goto illegal_op;
773cdfcc 5490 gen_update_cc_op(s);
ad75a51e 5491 gen_helper_daa(tcg_env);
3ca51d07 5492 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
5493 break;
5494 case 0x2f: /* das */
14ce26e7
FB
5495 if (CODE64(s))
5496 goto illegal_op;
773cdfcc 5497 gen_update_cc_op(s);
ad75a51e 5498 gen_helper_das(tcg_env);
3ca51d07 5499 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
5500 break;
5501 case 0x37: /* aaa */
14ce26e7
FB
5502 if (CODE64(s))
5503 goto illegal_op;
773cdfcc 5504 gen_update_cc_op(s);
ad75a51e 5505 gen_helper_aaa(tcg_env);
3ca51d07 5506 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
5507 break;
5508 case 0x3f: /* aas */
14ce26e7
FB
5509 if (CODE64(s))
5510 goto illegal_op;
773cdfcc 5511 gen_update_cc_op(s);
ad75a51e 5512 gen_helper_aas(tcg_env);
3ca51d07 5513 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
5514 break;
5515 case 0xd4: /* aam */
14ce26e7
FB
5516 if (CODE64(s))
5517 goto illegal_op;
e3af7c78 5518 val = x86_ldub_code(env, s);
b6d7c3db 5519 if (val == 0) {
52236550 5520 gen_exception(s, EXCP00_DIVZ);
b6d7c3db 5521 } else {
ad75a51e 5522 gen_helper_aam(tcg_env, tcg_constant_i32(val));
3ca51d07 5523 set_cc_op(s, CC_OP_LOGICB);
b6d7c3db 5524 }
2c0262af
FB
5525 break;
5526 case 0xd5: /* aad */
14ce26e7
FB
5527 if (CODE64(s))
5528 goto illegal_op;
e3af7c78 5529 val = x86_ldub_code(env, s);
ad75a51e 5530 gen_helper_aad(tcg_env, tcg_constant_i32(val));
3ca51d07 5531 set_cc_op(s, CC_OP_LOGICB);
2c0262af
FB
5532 break;
5533 /************************/
5534 /* misc */
5535 case 0x90: /* nop */
ab1f142b 5536 /* XXX: correct lock test for all insn */
7418027e 5537 if (prefixes & PREFIX_LOCK) {
ab1f142b 5538 goto illegal_op;
7418027e
RH
5539 }
5540 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
5541 if (REX_B(s)) {
5542 goto do_xchg_reg_eax;
5543 }
0573fbfc 5544 if (prefixes & PREFIX_REPZ) {
81f3053b 5545 gen_update_cc_op(s);
65e4af23 5546 gen_update_eip_cur(s);
ad75a51e 5547 gen_helper_pause(tcg_env, cur_insn_len_i32(s));
6cf147aa 5548 s->base.is_jmp = DISAS_NORETURN;
0573fbfc 5549 }
2c0262af
FB
5550 break;
5551 case 0x9b: /* fwait */
5fafdf24 5552 if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) ==
7eee2a50 5553 (HF_MP_MASK | HF_TS_MASK)) {
52236550 5554 gen_exception(s, EXCP07_PREX);
2ee73ac3 5555 } else {
c1f27a0c
PB
5556 /* needs to be treated as I/O because of ferr_irq */
5557 translator_io_start(&s->base);
ad75a51e 5558 gen_helper_fwait(tcg_env);
7eee2a50 5559 }
2c0262af
FB
5560 break;
5561 case 0xcc: /* int3 */
8ed6c985 5562 gen_interrupt(s, EXCP03_INT3);
2c0262af
FB
5563 break;
5564 case 0xcd: /* int N */
e3af7c78 5565 val = x86_ldub_code(env, s);
aa9f21b1 5566 if (check_vm86_iopl(s)) {
8ed6c985 5567 gen_interrupt(s, val);
f115e911 5568 }
2c0262af
FB
5569 break;
5570 case 0xce: /* into */
14ce26e7
FB
5571 if (CODE64(s))
5572 goto illegal_op;
773cdfcc 5573 gen_update_cc_op(s);
65e4af23 5574 gen_update_eip_cur(s);
ad75a51e 5575 gen_helper_into(tcg_env, cur_insn_len_i32(s));
2c0262af 5576 break;
0b97134b 5577#ifdef WANT_ICEBP
2c0262af 5578 case 0xf1: /* icebp (undocumented, exits to external debugger) */
b53605db 5579 gen_svm_check_intercept(s, SVM_EXIT_ICEBP);
ed3c4739 5580 gen_debug(s);
2c0262af 5581 break;
0b97134b 5582#endif
2c0262af 5583 case 0xfa: /* cli */
ca7874c2 5584 if (check_iopl(s)) {
63179330 5585 gen_reset_eflags(s, IF_MASK);
2c0262af
FB
5586 }
5587 break;
5588 case 0xfb: /* sti */
ca7874c2 5589 if (check_iopl(s)) {
63179330 5590 gen_set_eflags(s, IF_MASK);
f083d92c 5591 /* interruptions are enabled only the first insn after sti */
09e99df4 5592 gen_update_eip_next(s);
f083d92c 5593 gen_eob_inhibit_irq(s, true);
2c0262af
FB
5594 }
5595 break;
5596 case 0x62: /* bound */
14ce26e7
FB
5597 if (CODE64(s))
5598 goto illegal_op;
ab4e4aec 5599 ot = dflag;
e3af7c78 5600 modrm = x86_ldub_code(env, s);
2c0262af
FB
5601 reg = (modrm >> 3) & 7;
5602 mod = (modrm >> 6) & 3;
5603 if (mod == 3)
5604 goto illegal_op;
1dbe15ef 5605 gen_op_mov_v_reg(s, ot, s->T0, reg);
4eeb3939 5606 gen_lea_modrm(env, s, modrm);
6bd48f6f 5607 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
4ba9938c 5608 if (ot == MO_16) {
ad75a51e 5609 gen_helper_boundw(tcg_env, s->A0, s->tmp2_i32);
92fc4b58 5610 } else {
ad75a51e 5611 gen_helper_boundl(tcg_env, s->A0, s->tmp2_i32);
92fc4b58 5612 }
2c0262af
FB
5613 break;
5614 case 0x1c8 ... 0x1cf: /* bswap reg */
14ce26e7
FB
5615 reg = (b & 7) | REX_B(s);
5616#ifdef TARGET_X86_64
ab4e4aec 5617 if (dflag == MO_64) {
94fdf987
RH
5618 tcg_gen_bswap64_i64(cpu_regs[reg], cpu_regs[reg]);
5619 break;
14ce26e7 5620 }
94fdf987
RH
5621#endif
5622 tcg_gen_bswap32_tl(cpu_regs[reg], cpu_regs[reg], TCG_BSWAP_OZ);
2c0262af
FB
5623 break;
5624 case 0xd6: /* salc */
14ce26e7
FB
5625 if (CODE64(s))
5626 goto illegal_op;
c66f9727
EC
5627 gen_compute_eflags_c(s, s->T0);
5628 tcg_gen_neg_tl(s->T0, s->T0);
1dbe15ef 5629 gen_op_mov_reg_v(s, MO_8, R_EAX, s->T0);
2c0262af
FB
5630 break;
5631 case 0xe0: /* loopnz */
5632 case 0xe1: /* loopz */
2c0262af
FB
5633 case 0xe2: /* loop */
5634 case 0xe3: /* jecxz */
14ce26e7 5635 {
2255da49
RH
5636 TCGLabel *l1, *l2;
5637 int diff = (int8_t)insn_get(env, s, MO_8);
3b46e624 5638
14ce26e7
FB
5639 l1 = gen_new_label();
5640 l2 = gen_new_label();
3cb3a772 5641 gen_update_cc_op(s);
14ce26e7 5642 b &= 3;
6e0d8677
FB
5643 switch(b) {
5644 case 0: /* loopnz */
5645 case 1: /* loopz */
fbd80f02 5646 gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
0ebacb5d 5647 gen_op_jz_ecx(s, l2);
5bdb91b0 5648 gen_jcc1(s, (JCC_Z << 1) | (b ^ 1), l1);
6e0d8677
FB
5649 break;
5650 case 2: /* loop */
fbd80f02 5651 gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
0ebacb5d 5652 gen_op_jnz_ecx(s, l1);
6e0d8677
FB
5653 break;
5654 default:
5655 case 3: /* jcxz */
0ebacb5d 5656 gen_op_jz_ecx(s, l1);
6e0d8677 5657 break;
14ce26e7
FB
5658 }
5659
2255da49
RH
5660 gen_set_label(l2);
5661 gen_jmp_rel_csize(s, 0, 1);
6e0d8677 5662
14ce26e7 5663 gen_set_label(l1);
2255da49 5664 gen_jmp_rel(s, dflag, diff, 0);
14ce26e7 5665 }
2c0262af
FB
5666 break;
5667 case 0x130: /* wrmsr */
5668 case 0x132: /* rdmsr */
bc19f505 5669 if (check_cpl0(s)) {
773cdfcc 5670 gen_update_cc_op(s);
65e4af23 5671 gen_update_eip_cur(s);
0573fbfc 5672 if (b & 2) {
ad75a51e 5673 gen_helper_rdmsr(tcg_env);
0573fbfc 5674 } else {
ad75a51e 5675 gen_helper_wrmsr(tcg_env);
634a4051 5676 s->base.is_jmp = DISAS_EOB_NEXT;
0573fbfc 5677 }
2c0262af
FB
5678 }
5679 break;
5680 case 0x131: /* rdtsc */
773cdfcc 5681 gen_update_cc_op(s);
65e4af23 5682 gen_update_eip_cur(s);
dfd1b812 5683 translator_io_start(&s->base);
ad75a51e 5684 gen_helper_rdtsc(tcg_env);
2c0262af 5685 break;
df01e0fc 5686 case 0x133: /* rdpmc */
773cdfcc 5687 gen_update_cc_op(s);
65e4af23 5688 gen_update_eip_cur(s);
ad75a51e 5689 gen_helper_rdpmc(tcg_env);
b82055ae 5690 s->base.is_jmp = DISAS_NORETURN;
df01e0fc 5691 break;
023fe10d 5692 case 0x134: /* sysenter */
75a02adf
PB
5693 /* For AMD SYSENTER is not valid in long mode */
5694 if (LMA(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1) {
14ce26e7 5695 goto illegal_op;
75a02adf 5696 }
d75f9129 5697 if (!PE(s)) {
6bd99586 5698 gen_exception_gpf(s);
023fe10d 5699 } else {
ad75a51e 5700 gen_helper_sysenter(tcg_env);
6424ac8e 5701 s->base.is_jmp = DISAS_EOB_ONLY;
023fe10d
FB
5702 }
5703 break;
5704 case 0x135: /* sysexit */
75a02adf
PB
5705 /* For AMD SYSEXIT is not valid in long mode */
5706 if (LMA(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1) {
14ce26e7 5707 goto illegal_op;
75a02adf 5708 }
53b9b4cc 5709 if (!PE(s) || CPL(s) != 0) {
6bd99586 5710 gen_exception_gpf(s);
023fe10d 5711 } else {
ad75a51e 5712 gen_helper_sysexit(tcg_env, tcg_constant_i32(dflag - 1));
6424ac8e 5713 s->base.is_jmp = DISAS_EOB_ONLY;
023fe10d
FB
5714 }
5715 break;
14ce26e7 5716 case 0x105: /* syscall */
fd5dcb1c
PB
5717 /* For Intel SYSCALL is only valid in long mode */
5718 if (!LMA(s) && env->cpuid_vendor1 == CPUID_VENDOR_INTEL_1) {
5719 goto illegal_op;
5720 }
728d803b 5721 gen_update_cc_op(s);
65e4af23 5722 gen_update_eip_cur(s);
ad75a51e 5723 gen_helper_syscall(tcg_env, cur_insn_len_i32(s));
410e9814
DE
5724 /* TF handling for the syscall insn is different. The TF bit is checked
5725 after the syscall insn completes. This allows #DB to not be
5726 generated after one has entered CPL0 if TF is set in FMASK. */
5727 gen_eob_worker(s, false, true);
14ce26e7
FB
5728 break;
5729 case 0x107: /* sysret */
fd5dcb1c
PB
5730 /* For Intel SYSRET is only valid in long mode */
5731 if (!LMA(s) && env->cpuid_vendor1 == CPUID_VENDOR_INTEL_1) {
5732 goto illegal_op;
5733 }
53b9b4cc 5734 if (!PE(s) || CPL(s) != 0) {
6bd99586 5735 gen_exception_gpf(s);
14ce26e7 5736 } else {
ad75a51e 5737 gen_helper_sysret(tcg_env, tcg_constant_i32(dflag - 1));
aba9d61e 5738 /* condition codes are modified only in long mode */
73e90dc4 5739 if (LMA(s)) {
3ca51d07
RH
5740 set_cc_op(s, CC_OP_EFLAGS);
5741 }
c52ab08a
DE
5742 /* TF handling for the sysret insn is different. The TF bit is
5743 checked after the sysret insn completes. This allows #DB to be
5744 generated "as if" the syscall insn in userspace has just
5745 completed. */
5746 gen_eob_worker(s, false, true);
14ce26e7
FB
5747 }
5748 break;
2c0262af 5749 case 0x1a2: /* cpuid */
773cdfcc 5750 gen_update_cc_op(s);
65e4af23 5751 gen_update_eip_cur(s);
ad75a51e 5752 gen_helper_cpuid(tcg_env);
2c0262af
FB
5753 break;
5754 case 0xf4: /* hlt */
bc19f505 5755 if (check_cpl0(s)) {
773cdfcc 5756 gen_update_cc_op(s);
65e4af23 5757 gen_update_eip_cur(s);
ad75a51e 5758 gen_helper_hlt(tcg_env, cur_insn_len_i32(s));
6cf147aa 5759 s->base.is_jmp = DISAS_NORETURN;
2c0262af
FB
5760 }
5761 break;
5762 case 0x100:
e3af7c78 5763 modrm = x86_ldub_code(env, s);
2c0262af
FB
5764 mod = (modrm >> 6) & 3;
5765 op = (modrm >> 3) & 7;
5766 switch(op) {
5767 case 0: /* sldt */
f8a35846 5768 if (!PE(s) || VM86(s))
f115e911 5769 goto illegal_op;
637f1ee3
GW
5770 if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
5771 break;
5772 }
b53605db 5773 gen_svm_check_intercept(s, SVM_EXIT_LDTR_READ);
ad75a51e 5774 tcg_gen_ld32u_tl(s->T0, tcg_env,
1d1cc4d0 5775 offsetof(CPUX86State, ldt.selector));
ab4e4aec 5776 ot = mod == 3 ? dflag : MO_16;
0af10c86 5777 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
2c0262af
FB
5778 break;
5779 case 2: /* lldt */
f8a35846 5780 if (!PE(s) || VM86(s))
f115e911 5781 goto illegal_op;
bc19f505 5782 if (check_cpl0(s)) {
b53605db 5783 gen_svm_check_intercept(s, SVM_EXIT_LDTR_WRITE);
4ba9938c 5784 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
6bd48f6f 5785 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
ad75a51e 5786 gen_helper_lldt(tcg_env, s->tmp2_i32);
2c0262af
FB
5787 }
5788 break;
5789 case 1: /* str */
f8a35846 5790 if (!PE(s) || VM86(s))
f115e911 5791 goto illegal_op;
637f1ee3
GW
5792 if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
5793 break;
5794 }
b53605db 5795 gen_svm_check_intercept(s, SVM_EXIT_TR_READ);
ad75a51e 5796 tcg_gen_ld32u_tl(s->T0, tcg_env,
1d1cc4d0 5797 offsetof(CPUX86State, tr.selector));
ab4e4aec 5798 ot = mod == 3 ? dflag : MO_16;
0af10c86 5799 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
2c0262af
FB
5800 break;
5801 case 3: /* ltr */
f8a35846 5802 if (!PE(s) || VM86(s))
f115e911 5803 goto illegal_op;
bc19f505 5804 if (check_cpl0(s)) {
b53605db 5805 gen_svm_check_intercept(s, SVM_EXIT_TR_WRITE);
4ba9938c 5806 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
6bd48f6f 5807 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
ad75a51e 5808 gen_helper_ltr(tcg_env, s->tmp2_i32);
2c0262af
FB
5809 }
5810 break;
5811 case 4: /* verr */
5812 case 5: /* verw */
f8a35846 5813 if (!PE(s) || VM86(s))
f115e911 5814 goto illegal_op;
4ba9938c 5815 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
773cdfcc 5816 gen_update_cc_op(s);
2999a0b2 5817 if (op == 4) {
ad75a51e 5818 gen_helper_verr(tcg_env, s->T0);
2999a0b2 5819 } else {
ad75a51e 5820 gen_helper_verw(tcg_env, s->T0);
2999a0b2 5821 }
3ca51d07 5822 set_cc_op(s, CC_OP_EFLAGS);
f115e911 5823 break;
2c0262af 5824 default:
b9f9c5b4 5825 goto unknown_op;
2c0262af
FB
5826 }
5827 break;
1906b2af 5828
2c0262af 5829 case 0x101:
e3af7c78 5830 modrm = x86_ldub_code(env, s);
1906b2af 5831 switch (modrm) {
880f8486 5832 CASE_MODRM_MEM_OP(0): /* sgdt */
637f1ee3
GW
5833 if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
5834 break;
5835 }
b53605db 5836 gen_svm_check_intercept(s, SVM_EXIT_GDTR_READ);
4eeb3939 5837 gen_lea_modrm(env, s, modrm);
c66f9727 5838 tcg_gen_ld32u_tl(s->T0,
ad75a51e 5839 tcg_env, offsetof(CPUX86State, gdt.limit));
c66f9727 5840 gen_op_st_v(s, MO_16, s->T0, s->A0);
aba9d61e 5841 gen_add_A0_im(s, 2);
ad75a51e 5842 tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, gdt.base));
ab4e4aec 5843 if (dflag == MO_16) {
c66f9727 5844 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
f0706f0c 5845 }
c66f9727 5846 gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0);
2c0262af 5847 break;
1906b2af
RH
5848
5849 case 0xc8: /* monitor */
01b9d8c1 5850 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) {
1906b2af 5851 goto illegal_op;
3d7374c5 5852 }
1906b2af 5853 gen_update_cc_op(s);
65e4af23 5854 gen_update_eip_cur(s);
6b672b5d 5855 tcg_gen_mov_tl(s->A0, cpu_regs[R_EAX]);
1906b2af 5856 gen_add_A0_ds_seg(s);
ad75a51e 5857 gen_helper_monitor(tcg_env, s->A0);
3d7374c5 5858 break;
1906b2af
RH
5859
5860 case 0xc9: /* mwait */
01b9d8c1 5861 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) {
1906b2af
RH
5862 goto illegal_op;
5863 }
5864 gen_update_cc_op(s);
65e4af23 5865 gen_update_eip_cur(s);
ad75a51e 5866 gen_helper_mwait(tcg_env, cur_insn_len_i32(s));
b82055ae 5867 s->base.is_jmp = DISAS_NORETURN;
1906b2af
RH
5868 break;
5869
5870 case 0xca: /* clac */
5871 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
01b9d8c1 5872 || CPL(s) != 0) {
1906b2af
RH
5873 goto illegal_op;
5874 }
63179330 5875 gen_reset_eflags(s, AC_MASK);
634a4051 5876 s->base.is_jmp = DISAS_EOB_NEXT;
1906b2af
RH
5877 break;
5878
5879 case 0xcb: /* stac */
5880 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
01b9d8c1 5881 || CPL(s) != 0) {
1906b2af
RH
5882 goto illegal_op;
5883 }
63179330 5884 gen_set_eflags(s, AC_MASK);
634a4051 5885 s->base.is_jmp = DISAS_EOB_NEXT;
1906b2af
RH
5886 break;
5887
880f8486 5888 CASE_MODRM_MEM_OP(1): /* sidt */
637f1ee3
GW
5889 if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
5890 break;
5891 }
b53605db 5892 gen_svm_check_intercept(s, SVM_EXIT_IDTR_READ);
1906b2af 5893 gen_lea_modrm(env, s, modrm);
ad75a51e 5894 tcg_gen_ld32u_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.limit));
c66f9727 5895 gen_op_st_v(s, MO_16, s->T0, s->A0);
1906b2af 5896 gen_add_A0_im(s, 2);
ad75a51e 5897 tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.base));
1906b2af 5898 if (dflag == MO_16) {
c66f9727 5899 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
1906b2af 5900 }
c66f9727 5901 gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0);
1906b2af
RH
5902 break;
5903
19dc85db
RH
5904 case 0xd0: /* xgetbv */
5905 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
5906 || (s->prefix & (PREFIX_LOCK | PREFIX_DATA
5907 | PREFIX_REPZ | PREFIX_REPNZ))) {
5908 goto illegal_op;
5909 }
6bd48f6f 5910 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
ad75a51e 5911 gen_helper_xgetbv(s->tmp1_i64, tcg_env, s->tmp2_i32);
776678b2 5912 tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64);
19dc85db
RH
5913 break;
5914
5915 case 0xd1: /* xsetbv */
5916 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
5917 || (s->prefix & (PREFIX_LOCK | PREFIX_DATA
5918 | PREFIX_REPZ | PREFIX_REPNZ))) {
5919 goto illegal_op;
5920 }
24b34590 5921 gen_svm_check_intercept(s, SVM_EXIT_XSETBV);
bc19f505 5922 if (!check_cpl0(s)) {
19dc85db
RH
5923 break;
5924 }
776678b2 5925 tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
19dc85db 5926 cpu_regs[R_EDX]);
6bd48f6f 5927 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
ad75a51e 5928 gen_helper_xsetbv(tcg_env, s->tmp2_i32, s->tmp1_i64);
19dc85db 5929 /* End TB because translation flags may change. */
634a4051 5930 s->base.is_jmp = DISAS_EOB_NEXT;
19dc85db
RH
5931 break;
5932
1906b2af 5933 case 0xd8: /* VMRUN */
5d223889 5934 if (!SVME(s) || !PE(s)) {
1906b2af
RH
5935 goto illegal_op;
5936 }
bc19f505 5937 if (!check_cpl0(s)) {
1906b2af 5938 break;
2c0262af 5939 }
1906b2af 5940 gen_update_cc_op(s);
65e4af23 5941 gen_update_eip_cur(s);
ad75a51e 5942 gen_helper_vmrun(tcg_env, tcg_constant_i32(s->aflag - 1),
ad1d6f07 5943 cur_insn_len_i32(s));
07ea28b4 5944 tcg_gen_exit_tb(NULL, 0);
6cf147aa 5945 s->base.is_jmp = DISAS_NORETURN;
2c0262af 5946 break;
1906b2af
RH
5947
5948 case 0xd9: /* VMMCALL */
5d223889 5949 if (!SVME(s)) {
1906b2af
RH
5950 goto illegal_op;
5951 }
5952 gen_update_cc_op(s);
65e4af23 5953 gen_update_eip_cur(s);
ad75a51e 5954 gen_helper_vmmcall(tcg_env);
1906b2af
RH
5955 break;
5956
5957 case 0xda: /* VMLOAD */
5d223889 5958 if (!SVME(s) || !PE(s)) {
1906b2af
RH
5959 goto illegal_op;
5960 }
bc19f505 5961 if (!check_cpl0(s)) {
1906b2af
RH
5962 break;
5963 }
5964 gen_update_cc_op(s);
65e4af23 5965 gen_update_eip_cur(s);
ad75a51e 5966 gen_helper_vmload(tcg_env, tcg_constant_i32(s->aflag - 1));
1906b2af
RH
5967 break;
5968
5969 case 0xdb: /* VMSAVE */
5d223889 5970 if (!SVME(s) || !PE(s)) {
1906b2af
RH
5971 goto illegal_op;
5972 }
bc19f505 5973 if (!check_cpl0(s)) {
1906b2af
RH
5974 break;
5975 }
5976 gen_update_cc_op(s);
65e4af23 5977 gen_update_eip_cur(s);
ad75a51e 5978 gen_helper_vmsave(tcg_env, tcg_constant_i32(s->aflag - 1));
1906b2af
RH
5979 break;
5980
5981 case 0xdc: /* STGI */
5d223889 5982 if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
d75f9129 5983 || !PE(s)) {
1906b2af
RH
5984 goto illegal_op;
5985 }
bc19f505 5986 if (!check_cpl0(s)) {
1906b2af
RH
5987 break;
5988 }
5989 gen_update_cc_op(s);
ad75a51e 5990 gen_helper_stgi(tcg_env);
634a4051 5991 s->base.is_jmp = DISAS_EOB_NEXT;
1906b2af
RH
5992 break;
5993
5994 case 0xdd: /* CLGI */
5d223889 5995 if (!SVME(s) || !PE(s)) {
1906b2af
RH
5996 goto illegal_op;
5997 }
bc19f505 5998 if (!check_cpl0(s)) {
1906b2af
RH
5999 break;
6000 }
6001 gen_update_cc_op(s);
65e4af23 6002 gen_update_eip_cur(s);
ad75a51e 6003 gen_helper_clgi(tcg_env);
1906b2af
RH
6004 break;
6005
6006 case 0xde: /* SKINIT */
5d223889 6007 if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
d75f9129 6008 || !PE(s)) {
1906b2af
RH
6009 goto illegal_op;
6010 }
b53605db 6011 gen_svm_check_intercept(s, SVM_EXIT_SKINIT);
e6aeb948
RH
6012 /* If not intercepted, not implemented -- raise #UD. */
6013 goto illegal_op;
1906b2af
RH
6014
6015 case 0xdf: /* INVLPGA */
5d223889 6016 if (!SVME(s) || !PE(s)) {
1906b2af
RH
6017 goto illegal_op;
6018 }
bc19f505 6019 if (!check_cpl0(s)) {
1906b2af
RH
6020 break;
6021 }
35e5a5d5
RH
6022 gen_svm_check_intercept(s, SVM_EXIT_INVLPGA);
6023 if (s->aflag == MO_64) {
6024 tcg_gen_mov_tl(s->A0, cpu_regs[R_EAX]);
6025 } else {
6026 tcg_gen_ext32u_tl(s->A0, cpu_regs[R_EAX]);
6027 }
ad75a51e 6028 gen_helper_flush_page(tcg_env, s->A0);
634a4051 6029 s->base.is_jmp = DISAS_EOB_NEXT;
1906b2af
RH
6030 break;
6031
880f8486 6032 CASE_MODRM_MEM_OP(2): /* lgdt */
bc19f505 6033 if (!check_cpl0(s)) {
1906b2af
RH
6034 break;
6035 }
b53605db 6036 gen_svm_check_intercept(s, SVM_EXIT_GDTR_WRITE);
1906b2af 6037 gen_lea_modrm(env, s, modrm);
b48597b0 6038 gen_op_ld_v(s, MO_16, s->T1, s->A0);
1906b2af 6039 gen_add_A0_im(s, 2);
c66f9727 6040 gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0);
1906b2af 6041 if (dflag == MO_16) {
c66f9727 6042 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
1906b2af 6043 }
ad75a51e
RH
6044 tcg_gen_st_tl(s->T0, tcg_env, offsetof(CPUX86State, gdt.base));
6045 tcg_gen_st32_tl(s->T1, tcg_env, offsetof(CPUX86State, gdt.limit));
1906b2af
RH
6046 break;
6047
880f8486 6048 CASE_MODRM_MEM_OP(3): /* lidt */
bc19f505 6049 if (!check_cpl0(s)) {
1906b2af
RH
6050 break;
6051 }
b53605db 6052 gen_svm_check_intercept(s, SVM_EXIT_IDTR_WRITE);
1906b2af 6053 gen_lea_modrm(env, s, modrm);
b48597b0 6054 gen_op_ld_v(s, MO_16, s->T1, s->A0);
1906b2af 6055 gen_add_A0_im(s, 2);
c66f9727 6056 gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0);
1906b2af 6057 if (dflag == MO_16) {
c66f9727 6058 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
1906b2af 6059 }
ad75a51e
RH
6060 tcg_gen_st_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.base));
6061 tcg_gen_st32_tl(s->T1, tcg_env, offsetof(CPUX86State, idt.limit));
1906b2af
RH
6062 break;
6063
880f8486 6064 CASE_MODRM_OP(4): /* smsw */
637f1ee3
GW
6065 if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
6066 break;
6067 }
b53605db 6068 gen_svm_check_intercept(s, SVM_EXIT_READ_CR0);
ad75a51e 6069 tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, cr[0]));
c0c84452
PB
6070 /*
6071 * In 32-bit mode, the higher 16 bits of the destination
6072 * register are undefined. In practice CR0[31:0] is stored
6073 * just like in 64-bit mode.
6074 */
6075 mod = (modrm >> 6) & 3;
6076 ot = (mod != 3 ? MO_16 : s->dflag);
a657f79e 6077 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
2c0262af 6078 break;
0f70ed47
PB
6079 case 0xee: /* rdpkru */
6080 if (prefixes & PREFIX_LOCK) {
6081 goto illegal_op;
6082 }
6bd48f6f 6083 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
ad75a51e 6084 gen_helper_rdpkru(s->tmp1_i64, tcg_env, s->tmp2_i32);
776678b2 6085 tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64);
0f70ed47
PB
6086 break;
6087 case 0xef: /* wrpkru */
6088 if (prefixes & PREFIX_LOCK) {
6089 goto illegal_op;
6090 }
776678b2 6091 tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
0f70ed47 6092 cpu_regs[R_EDX]);
6bd48f6f 6093 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
ad75a51e 6094 gen_helper_wrpkru(tcg_env, s->tmp2_i32, s->tmp1_i64);
0f70ed47 6095 break;
7eff2e7c 6096
880f8486 6097 CASE_MODRM_OP(6): /* lmsw */
bc19f505 6098 if (!check_cpl0(s)) {
1906b2af 6099 break;
2c0262af 6100 }
b53605db 6101 gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0);
1906b2af 6102 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
7eff2e7c
RH
6103 /*
6104 * Only the 4 lower bits of CR0 are modified.
6105 * PE cannot be set to zero if already set to one.
6106 */
ad75a51e 6107 tcg_gen_ld_tl(s->T1, tcg_env, offsetof(CPUX86State, cr[0]));
7eff2e7c
RH
6108 tcg_gen_andi_tl(s->T0, s->T0, 0xf);
6109 tcg_gen_andi_tl(s->T1, s->T1, ~0xe);
6110 tcg_gen_or_tl(s->T0, s->T0, s->T1);
ad75a51e 6111 gen_helper_write_crN(tcg_env, tcg_constant_i32(0), s->T0);
634a4051 6112 s->base.is_jmp = DISAS_EOB_NEXT;
2c0262af 6113 break;
1906b2af 6114
880f8486 6115 CASE_MODRM_MEM_OP(7): /* invlpg */
bc19f505 6116 if (!check_cpl0(s)) {
1906b2af
RH
6117 break;
6118 }
35e5a5d5 6119 gen_svm_check_intercept(s, SVM_EXIT_INVLPG);
1906b2af 6120 gen_lea_modrm(env, s, modrm);
ad75a51e 6121 gen_helper_flush_page(tcg_env, s->A0);
634a4051 6122 s->base.is_jmp = DISAS_EOB_NEXT;
1906b2af
RH
6123 break;
6124
6125 case 0xf8: /* swapgs */
6126#ifdef TARGET_X86_64
6127 if (CODE64(s)) {
bc19f505 6128 if (check_cpl0(s)) {
c66f9727 6129 tcg_gen_mov_tl(s->T0, cpu_seg_base[R_GS]);
ad75a51e 6130 tcg_gen_ld_tl(cpu_seg_base[R_GS], tcg_env,
1906b2af 6131 offsetof(CPUX86State, kernelgsbase));
ad75a51e 6132 tcg_gen_st_tl(s->T0, tcg_env,
1906b2af 6133 offsetof(CPUX86State, kernelgsbase));
1b050077 6134 }
1906b2af
RH
6135 break;
6136 }
3558f805 6137#endif
1906b2af
RH
6138 goto illegal_op;
6139
6140 case 0xf9: /* rdtscp */
6141 if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP)) {
6142 goto illegal_op;
6143 }
6144 gen_update_cc_op(s);
65e4af23 6145 gen_update_eip_cur(s);
dfd1b812 6146 translator_io_start(&s->base);
ad75a51e
RH
6147 gen_helper_rdtsc(tcg_env);
6148 gen_helper_rdpid(s->T0, tcg_env);
6750485b 6149 gen_op_mov_reg_v(s, dflag, R_ECX, s->T0);
2c0262af 6150 break;
1906b2af 6151
2c0262af 6152 default:
b9f9c5b4 6153 goto unknown_op;
2c0262af
FB
6154 }
6155 break;
1906b2af 6156
3415a4dd 6157 case 0x108: /* invd */
431c51e9 6158 case 0x109: /* wbinvd; wbnoinvd with REPZ prefix */
bc19f505 6159 if (check_cpl0(s)) {
4d714d1a 6160 gen_svm_check_intercept(s, (b & 1) ? SVM_EXIT_WBINVD : SVM_EXIT_INVD);
3415a4dd
FB
6161 /* nothing to do */
6162 }
6163 break;
14ce26e7
FB
6164 case 0x63: /* arpl or movslS (x86_64) */
6165#ifdef TARGET_X86_64
6166 if (CODE64(s)) {
6167 int d_ot;
6168 /* d_ot is the size of destination */
ab4e4aec 6169 d_ot = dflag;
14ce26e7 6170
e3af7c78 6171 modrm = x86_ldub_code(env, s);
bbdb4237 6172 reg = ((modrm >> 3) & 7) | REX_R(s);
14ce26e7
FB
6173 mod = (modrm >> 6) & 3;
6174 rm = (modrm & 7) | REX_B(s);
3b46e624 6175
14ce26e7 6176 if (mod == 3) {
1dbe15ef 6177 gen_op_mov_v_reg(s, MO_32, s->T0, rm);
14ce26e7 6178 /* sign extend */
4ba9938c 6179 if (d_ot == MO_64) {
c66f9727 6180 tcg_gen_ext32s_tl(s->T0, s->T0);
4ba9938c 6181 }
1dbe15ef 6182 gen_op_mov_reg_v(s, d_ot, reg, s->T0);
14ce26e7 6183 } else {
4eeb3939 6184 gen_lea_modrm(env, s, modrm);
c66f9727 6185 gen_op_ld_v(s, MO_32 | MO_SIGN, s->T0, s->A0);
1dbe15ef 6186 gen_op_mov_reg_v(s, d_ot, reg, s->T0);
14ce26e7 6187 }
5fafdf24 6188 } else
14ce26e7
FB
6189#endif
6190 {
42a268c2 6191 TCGLabel *label1;
3a5d1773 6192 TCGv t0, t1, t2;
1e4840bf 6193
f8a35846 6194 if (!PE(s) || VM86(s))
14ce26e7 6195 goto illegal_op;
3a5d1773
RH
6196 t0 = tcg_temp_new();
6197 t1 = tcg_temp_new();
6198 t2 = tcg_temp_new();
4ba9938c 6199 ot = MO_16;
e3af7c78 6200 modrm = x86_ldub_code(env, s);
14ce26e7
FB
6201 reg = (modrm >> 3) & 7;
6202 mod = (modrm >> 6) & 3;
6203 rm = modrm & 7;
6204 if (mod != 3) {
4eeb3939 6205 gen_lea_modrm(env, s, modrm);
6b672b5d 6206 gen_op_ld_v(s, ot, t0, s->A0);
14ce26e7 6207 } else {
1dbe15ef 6208 gen_op_mov_v_reg(s, ot, t0, rm);
14ce26e7 6209 }
1dbe15ef 6210 gen_op_mov_v_reg(s, ot, t1, reg);
fbd80f02 6211 tcg_gen_andi_tl(s->tmp0, t0, 3);
1e4840bf
FB
6212 tcg_gen_andi_tl(t1, t1, 3);
6213 tcg_gen_movi_tl(t2, 0);
3bd7da9e 6214 label1 = gen_new_label();
fbd80f02 6215 tcg_gen_brcond_tl(TCG_COND_GE, s->tmp0, t1, label1);
1e4840bf
FB
6216 tcg_gen_andi_tl(t0, t0, ~3);
6217 tcg_gen_or_tl(t0, t0, t1);
6218 tcg_gen_movi_tl(t2, CC_Z);
3bd7da9e 6219 gen_set_label(label1);
14ce26e7 6220 if (mod != 3) {
3a5d1773 6221 gen_op_st_v(s, ot, t0, s->A0);
49d9fdcc 6222 } else {
1dbe15ef 6223 gen_op_mov_reg_v(s, ot, rm, t0);
14ce26e7 6224 }
d229edce 6225 gen_compute_eflags(s);
3bd7da9e 6226 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z);
1e4840bf 6227 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2);
f115e911 6228 }
f115e911 6229 break;
2c0262af
FB
6230 case 0x102: /* lar */
6231 case 0x103: /* lsl */
cec6843e 6232 {
42a268c2 6233 TCGLabel *label1;
1e4840bf 6234 TCGv t0;
f8a35846 6235 if (!PE(s) || VM86(s))
cec6843e 6236 goto illegal_op;
ab4e4aec 6237 ot = dflag != MO_16 ? MO_32 : MO_16;
e3af7c78 6238 modrm = x86_ldub_code(env, s);
bbdb4237 6239 reg = ((modrm >> 3) & 7) | REX_R(s);
4ba9938c 6240 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
3a5d1773 6241 t0 = tcg_temp_new();
773cdfcc 6242 gen_update_cc_op(s);
2999a0b2 6243 if (b == 0x102) {
ad75a51e 6244 gen_helper_lar(t0, tcg_env, s->T0);
2999a0b2 6245 } else {
ad75a51e 6246 gen_helper_lsl(t0, tcg_env, s->T0);
2999a0b2 6247 }
fbd80f02 6248 tcg_gen_andi_tl(s->tmp0, cpu_cc_src, CC_Z);
cec6843e 6249 label1 = gen_new_label();
fbd80f02 6250 tcg_gen_brcondi_tl(TCG_COND_EQ, s->tmp0, 0, label1);
1dbe15ef 6251 gen_op_mov_reg_v(s, ot, reg, t0);
cec6843e 6252 gen_set_label(label1);
3ca51d07 6253 set_cc_op(s, CC_OP_EFLAGS);
cec6843e 6254 }
2c0262af
FB
6255 break;
6256 case 0x118:
e3af7c78 6257 modrm = x86_ldub_code(env, s);
2c0262af
FB
6258 mod = (modrm >> 6) & 3;
6259 op = (modrm >> 3) & 7;
6260 switch(op) {
6261 case 0: /* prefetchnta */
6262 case 1: /* prefetchnt0 */
6263 case 2: /* prefetchnt0 */
6264 case 3: /* prefetchnt0 */
6265 if (mod == 3)
6266 goto illegal_op;
26317698 6267 gen_nop_modrm(env, s, modrm);
2c0262af
FB
6268 /* nothing more to do */
6269 break;
e17a36ce 6270 default: /* nop (multi byte) */
0af10c86 6271 gen_nop_modrm(env, s, modrm);
e17a36ce 6272 break;
2c0262af
FB
6273 }
6274 break;
62b58ba5 6275 case 0x11a:
e3af7c78 6276 modrm = x86_ldub_code(env, s);
62b58ba5
RH
6277 if (s->flags & HF_MPX_EN_MASK) {
6278 mod = (modrm >> 6) & 3;
bbdb4237 6279 reg = ((modrm >> 3) & 7) | REX_R(s);
523e28d7
RH
6280 if (prefixes & PREFIX_REPZ) {
6281 /* bndcl */
6282 if (reg >= 4
6283 || (prefixes & PREFIX_LOCK)
6284 || s->aflag == MO_16) {
6285 goto illegal_op;
6286 }
6287 gen_bndck(env, s, modrm, TCG_COND_LTU, cpu_bndl[reg]);
6288 } else if (prefixes & PREFIX_REPNZ) {
6289 /* bndcu */
6290 if (reg >= 4
6291 || (prefixes & PREFIX_LOCK)
6292 || s->aflag == MO_16) {
6293 goto illegal_op;
6294 }
6295 TCGv_i64 notu = tcg_temp_new_i64();
6296 tcg_gen_not_i64(notu, cpu_bndu[reg]);
6297 gen_bndck(env, s, modrm, TCG_COND_GTU, notu);
523e28d7 6298 } else if (prefixes & PREFIX_DATA) {
62b58ba5
RH
6299 /* bndmov -- from reg/mem */
6300 if (reg >= 4 || s->aflag == MO_16) {
6301 goto illegal_op;
6302 }
6303 if (mod == 3) {
6304 int reg2 = (modrm & 7) | REX_B(s);
6305 if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
6306 goto illegal_op;
6307 }
6308 if (s->flags & HF_MPX_IU_MASK) {
6309 tcg_gen_mov_i64(cpu_bndl[reg], cpu_bndl[reg2]);
6310 tcg_gen_mov_i64(cpu_bndu[reg], cpu_bndu[reg2]);
6311 }
6312 } else {
6313 gen_lea_modrm(env, s, modrm);
6314 if (CODE64(s)) {
6b672b5d 6315 tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0,
fc313c64 6316 s->mem_index, MO_LEUQ);
6b672b5d
EC
6317 tcg_gen_addi_tl(s->A0, s->A0, 8);
6318 tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0,
fc313c64 6319 s->mem_index, MO_LEUQ);
62b58ba5 6320 } else {
6b672b5d 6321 tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0,
62b58ba5 6322 s->mem_index, MO_LEUL);
6b672b5d
EC
6323 tcg_gen_addi_tl(s->A0, s->A0, 4);
6324 tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0,
62b58ba5
RH
6325 s->mem_index, MO_LEUL);
6326 }
6327 /* bnd registers are now in-use */
6328 gen_set_hflag(s, HF_MPX_IU_MASK);
6329 }
bdd87b3b
RH
6330 } else if (mod != 3) {
6331 /* bndldx */
6332 AddressParts a = gen_lea_modrm_0(env, s, modrm);
6333 if (reg >= 4
6334 || (prefixes & PREFIX_LOCK)
6335 || s->aflag == MO_16
6336 || a.base < -1) {
6337 goto illegal_op;
6338 }
6339 if (a.base >= 0) {
6b672b5d 6340 tcg_gen_addi_tl(s->A0, cpu_regs[a.base], a.disp);
bdd87b3b 6341 } else {
6b672b5d 6342 tcg_gen_movi_tl(s->A0, 0);
bdd87b3b 6343 }
6b672b5d 6344 gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override);
bdd87b3b 6345 if (a.index >= 0) {
c66f9727 6346 tcg_gen_mov_tl(s->T0, cpu_regs[a.index]);
bdd87b3b 6347 } else {
c66f9727 6348 tcg_gen_movi_tl(s->T0, 0);
bdd87b3b
RH
6349 }
6350 if (CODE64(s)) {
ad75a51e
RH
6351 gen_helper_bndldx64(cpu_bndl[reg], tcg_env, s->A0, s->T0);
6352 tcg_gen_ld_i64(cpu_bndu[reg], tcg_env,
bdd87b3b
RH
6353 offsetof(CPUX86State, mmx_t0.MMX_Q(0)));
6354 } else {
ad75a51e 6355 gen_helper_bndldx32(cpu_bndu[reg], tcg_env, s->A0, s->T0);
bdd87b3b
RH
6356 tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndu[reg]);
6357 tcg_gen_shri_i64(cpu_bndu[reg], cpu_bndu[reg], 32);
6358 }
6359 gen_set_hflag(s, HF_MPX_IU_MASK);
62b58ba5
RH
6360 }
6361 }
6362 gen_nop_modrm(env, s, modrm);
6363 break;
149b427b 6364 case 0x11b:
e3af7c78 6365 modrm = x86_ldub_code(env, s);
149b427b
RH
6366 if (s->flags & HF_MPX_EN_MASK) {
6367 mod = (modrm >> 6) & 3;
bbdb4237 6368 reg = ((modrm >> 3) & 7) | REX_R(s);
149b427b
RH
6369 if (mod != 3 && (prefixes & PREFIX_REPZ)) {
6370 /* bndmk */
6371 if (reg >= 4
6372 || (prefixes & PREFIX_LOCK)
6373 || s->aflag == MO_16) {
6374 goto illegal_op;
6375 }
6376 AddressParts a = gen_lea_modrm_0(env, s, modrm);
6377 if (a.base >= 0) {
6378 tcg_gen_extu_tl_i64(cpu_bndl[reg], cpu_regs[a.base]);
6379 if (!CODE64(s)) {
6380 tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndl[reg]);
6381 }
6382 } else if (a.base == -1) {
6383 /* no base register has lower bound of 0 */
6384 tcg_gen_movi_i64(cpu_bndl[reg], 0);
6385 } else {
6386 /* rip-relative generates #ud */
6387 goto illegal_op;
6388 }
20581aad 6389 tcg_gen_not_tl(s->A0, gen_lea_modrm_1(s, a, false));
149b427b 6390 if (!CODE64(s)) {
6b672b5d 6391 tcg_gen_ext32u_tl(s->A0, s->A0);
149b427b 6392 }
6b672b5d 6393 tcg_gen_extu_tl_i64(cpu_bndu[reg], s->A0);
149b427b
RH
6394 /* bnd registers are now in-use */
6395 gen_set_hflag(s, HF_MPX_IU_MASK);
6396 break;
523e28d7
RH
6397 } else if (prefixes & PREFIX_REPNZ) {
6398 /* bndcn */
6399 if (reg >= 4
6400 || (prefixes & PREFIX_LOCK)
6401 || s->aflag == MO_16) {
6402 goto illegal_op;
6403 }
6404 gen_bndck(env, s, modrm, TCG_COND_GTU, cpu_bndu[reg]);
62b58ba5
RH
6405 } else if (prefixes & PREFIX_DATA) {
6406 /* bndmov -- to reg/mem */
6407 if (reg >= 4 || s->aflag == MO_16) {
6408 goto illegal_op;
6409 }
6410 if (mod == 3) {
6411 int reg2 = (modrm & 7) | REX_B(s);
6412 if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
6413 goto illegal_op;
6414 }
6415 if (s->flags & HF_MPX_IU_MASK) {
6416 tcg_gen_mov_i64(cpu_bndl[reg2], cpu_bndl[reg]);
6417 tcg_gen_mov_i64(cpu_bndu[reg2], cpu_bndu[reg]);
6418 }
6419 } else {
6420 gen_lea_modrm(env, s, modrm);
6421 if (CODE64(s)) {
6b672b5d 6422 tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0,
fc313c64 6423 s->mem_index, MO_LEUQ);
6b672b5d
EC
6424 tcg_gen_addi_tl(s->A0, s->A0, 8);
6425 tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0,
fc313c64 6426 s->mem_index, MO_LEUQ);
62b58ba5 6427 } else {
6b672b5d 6428 tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0,
62b58ba5 6429 s->mem_index, MO_LEUL);
6b672b5d
EC
6430 tcg_gen_addi_tl(s->A0, s->A0, 4);
6431 tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0,
62b58ba5
RH
6432 s->mem_index, MO_LEUL);
6433 }
6434 }
bdd87b3b
RH
6435 } else if (mod != 3) {
6436 /* bndstx */
6437 AddressParts a = gen_lea_modrm_0(env, s, modrm);
6438 if (reg >= 4
6439 || (prefixes & PREFIX_LOCK)
6440 || s->aflag == MO_16
6441 || a.base < -1) {
6442 goto illegal_op;
6443 }
6444 if (a.base >= 0) {
6b672b5d 6445 tcg_gen_addi_tl(s->A0, cpu_regs[a.base], a.disp);
bdd87b3b 6446 } else {
6b672b5d 6447 tcg_gen_movi_tl(s->A0, 0);
bdd87b3b 6448 }
6b672b5d 6449 gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override);
bdd87b3b 6450 if (a.index >= 0) {
c66f9727 6451 tcg_gen_mov_tl(s->T0, cpu_regs[a.index]);
bdd87b3b 6452 } else {
c66f9727 6453 tcg_gen_movi_tl(s->T0, 0);
bdd87b3b
RH
6454 }
6455 if (CODE64(s)) {
ad75a51e 6456 gen_helper_bndstx64(tcg_env, s->A0, s->T0,
bdd87b3b
RH
6457 cpu_bndl[reg], cpu_bndu[reg]);
6458 } else {
ad75a51e 6459 gen_helper_bndstx32(tcg_env, s->A0, s->T0,
bdd87b3b
RH
6460 cpu_bndl[reg], cpu_bndu[reg]);
6461 }
149b427b
RH
6462 }
6463 }
6464 gen_nop_modrm(env, s, modrm);
6465 break;
62b58ba5 6466 case 0x119: case 0x11c ... 0x11f: /* nop (multi byte) */
e3af7c78 6467 modrm = x86_ldub_code(env, s);
0af10c86 6468 gen_nop_modrm(env, s, modrm);
e17a36ce 6469 break;
7eff2e7c 6470
2c0262af
FB
6471 case 0x120: /* mov reg, crN */
6472 case 0x122: /* mov crN, reg */
7eff2e7c
RH
6473 if (!check_cpl0(s)) {
6474 break;
6475 }
6476 modrm = x86_ldub_code(env, s);
6477 /*
6478 * Ignore the mod bits (assume (modrm&0xc0)==0xc0).
6479 * AMD documentation (24594.pdf) and testing of Intel 386 and 486
6480 * processors all show that the mod bits are assumed to be 1's,
6481 * regardless of actual values.
6482 */
6483 rm = (modrm & 7) | REX_B(s);
6484 reg = ((modrm >> 3) & 7) | REX_R(s);
6485 switch (reg) {
6486 case 0:
6487 if ((prefixes & PREFIX_LOCK) &&
ccd59d09
AP
6488 (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) {
6489 reg = 8;
6490 }
7eff2e7c
RH
6491 break;
6492 case 2:
6493 case 3:
6494 case 4:
e18a6ec8 6495 case 8:
7eff2e7c
RH
6496 break;
6497 default:
6498 goto unknown_op;
6499 }
6500 ot = (CODE64(s) ? MO_64 : MO_32);
6501
dfd1b812 6502 translator_io_start(&s->base);
7eff2e7c
RH
6503 if (b & 2) {
6504 gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0 + reg);
6505 gen_op_mov_v_reg(s, ot, s->T0, rm);
ad75a51e 6506 gen_helper_write_crN(tcg_env, tcg_constant_i32(reg), s->T0);
634a4051 6507 s->base.is_jmp = DISAS_EOB_NEXT;
7eff2e7c
RH
6508 } else {
6509 gen_svm_check_intercept(s, SVM_EXIT_READ_CR0 + reg);
ad75a51e 6510 gen_helper_read_crN(s->T0, tcg_env, tcg_constant_i32(reg));
7eff2e7c 6511 gen_op_mov_reg_v(s, ot, rm, s->T0);
2c0262af
FB
6512 }
6513 break;
7eff2e7c 6514
2c0262af
FB
6515 case 0x121: /* mov reg, drN */
6516 case 0x123: /* mov drN, reg */
bc19f505 6517 if (check_cpl0(s)) {
e3af7c78 6518 modrm = x86_ldub_code(env, s);
5c73b757
MO
6519 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
6520 * AMD documentation (24594.pdf) and testing of
6521 * intel 386 and 486 processors all show that the mod bits
6522 * are assumed to be 1's, regardless of actual values.
6523 */
14ce26e7 6524 rm = (modrm & 7) | REX_B(s);
bbdb4237 6525 reg = ((modrm >> 3) & 7) | REX_R(s);
14ce26e7 6526 if (CODE64(s))
4ba9938c 6527 ot = MO_64;
14ce26e7 6528 else
4ba9938c 6529 ot = MO_32;
d0052339 6530 if (reg >= 8) {
2c0262af 6531 goto illegal_op;
d0052339 6532 }
2c0262af 6533 if (b & 2) {
b53605db 6534 gen_svm_check_intercept(s, SVM_EXIT_WRITE_DR0 + reg);
1dbe15ef 6535 gen_op_mov_v_reg(s, ot, s->T0, rm);
6bd48f6f 6536 tcg_gen_movi_i32(s->tmp2_i32, reg);
ad75a51e 6537 gen_helper_set_dr(tcg_env, s->tmp2_i32, s->T0);
634a4051 6538 s->base.is_jmp = DISAS_EOB_NEXT;
2c0262af 6539 } else {
b53605db 6540 gen_svm_check_intercept(s, SVM_EXIT_READ_DR0 + reg);
6bd48f6f 6541 tcg_gen_movi_i32(s->tmp2_i32, reg);
ad75a51e 6542 gen_helper_get_dr(s->T0, tcg_env, s->tmp2_i32);
1dbe15ef 6543 gen_op_mov_reg_v(s, ot, rm, s->T0);
2c0262af
FB
6544 }
6545 }
6546 break;
6547 case 0x106: /* clts */
bc19f505 6548 if (check_cpl0(s)) {
b53605db 6549 gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0);
ad75a51e 6550 gen_helper_clts(tcg_env);
7eee2a50 6551 /* abort block because static cpu state changed */
634a4051 6552 s->base.is_jmp = DISAS_EOB_NEXT;
2c0262af
FB
6553 }
6554 break;
222a3336 6555 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
664e0f19
FB
6556 case 0x1c3: /* MOVNTI reg, mem */
6557 if (!(s->cpuid_features & CPUID_SSE2))
14ce26e7 6558 goto illegal_op;
ab4e4aec 6559 ot = mo_64_32(dflag);
e3af7c78 6560 modrm = x86_ldub_code(env, s);
664e0f19
FB
6561 mod = (modrm >> 6) & 3;
6562 if (mod == 3)
6563 goto illegal_op;
bbdb4237 6564 reg = ((modrm >> 3) & 7) | REX_R(s);
664e0f19 6565 /* generate a generic store */
0af10c86 6566 gen_ldst_modrm(env, s, modrm, ot, reg, 1);
14ce26e7 6567 break;
664e0f19 6568 case 0x1ae:
e3af7c78 6569 modrm = x86_ldub_code(env, s);
121f3157 6570 switch (modrm) {
880f8486 6571 CASE_MODRM_MEM_OP(0): /* fxsave */
121f3157
RH
6572 if (!(s->cpuid_features & CPUID_FXSR)
6573 || (prefixes & PREFIX_LOCK)) {
14ce26e7 6574 goto illegal_op;
121f3157 6575 }
09d85fb8 6576 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
52236550 6577 gen_exception(s, EXCP07_PREX);
0fd14b72
FB
6578 break;
6579 }
4eeb3939 6580 gen_lea_modrm(env, s, modrm);
ad75a51e 6581 gen_helper_fxsave(tcg_env, s->A0);
664e0f19 6582 break;
121f3157 6583
880f8486 6584 CASE_MODRM_MEM_OP(1): /* fxrstor */
121f3157
RH
6585 if (!(s->cpuid_features & CPUID_FXSR)
6586 || (prefixes & PREFIX_LOCK)) {
14ce26e7 6587 goto illegal_op;
121f3157 6588 }
09d85fb8 6589 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
52236550 6590 gen_exception(s, EXCP07_PREX);
0fd14b72
FB
6591 break;
6592 }
4eeb3939 6593 gen_lea_modrm(env, s, modrm);
ad75a51e 6594 gen_helper_fxrstor(tcg_env, s->A0);
664e0f19 6595 break;
121f3157 6596
880f8486 6597 CASE_MODRM_MEM_OP(2): /* ldmxcsr */
121f3157
RH
6598 if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) {
6599 goto illegal_op;
6600 }
664e0f19 6601 if (s->flags & HF_TS_MASK) {
52236550 6602 gen_exception(s, EXCP07_PREX);
664e0f19 6603 break;
14ce26e7 6604 }
4eeb3939 6605 gen_lea_modrm(env, s, modrm);
6bd48f6f 6606 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, s->mem_index, MO_LEUL);
ad75a51e 6607 gen_helper_ldmxcsr(tcg_env, s->tmp2_i32);
664e0f19 6608 break;
121f3157 6609
880f8486 6610 CASE_MODRM_MEM_OP(3): /* stmxcsr */
121f3157 6611 if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) {
664e0f19 6612 goto illegal_op;
121f3157
RH
6613 }
6614 if (s->flags & HF_TS_MASK) {
52236550 6615 gen_exception(s, EXCP07_PREX);
121f3157
RH
6616 break;
6617 }
ad75a51e 6618 gen_helper_update_mxcsr(tcg_env);
121f3157 6619 gen_lea_modrm(env, s, modrm);
ad75a51e 6620 tcg_gen_ld32u_tl(s->T0, tcg_env, offsetof(CPUX86State, mxcsr));
c66f9727 6621 gen_op_st_v(s, MO_32, s->T0, s->A0);
664e0f19 6622 break;
121f3157 6623
880f8486 6624 CASE_MODRM_MEM_OP(4): /* xsave */
19dc85db
RH
6625 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
6626 || (prefixes & (PREFIX_LOCK | PREFIX_DATA
6627 | PREFIX_REPZ | PREFIX_REPNZ))) {
6628 goto illegal_op;
6629 }
6630 gen_lea_modrm(env, s, modrm);
776678b2 6631 tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
19dc85db 6632 cpu_regs[R_EDX]);
ad75a51e 6633 gen_helper_xsave(tcg_env, s->A0, s->tmp1_i64);
19dc85db
RH
6634 break;
6635
880f8486 6636 CASE_MODRM_MEM_OP(5): /* xrstor */
19dc85db
RH
6637 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
6638 || (prefixes & (PREFIX_LOCK | PREFIX_DATA
6639 | PREFIX_REPZ | PREFIX_REPNZ))) {
6640 goto illegal_op;
6641 }
6642 gen_lea_modrm(env, s, modrm);
776678b2 6643 tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
19dc85db 6644 cpu_regs[R_EDX]);
ad75a51e 6645 gen_helper_xrstor(tcg_env, s->A0, s->tmp1_i64);
f4f1110e
RH
6646 /* XRSTOR is how MPX is enabled, which changes how
6647 we translate. Thus we need to end the TB. */
634a4051 6648 s->base.is_jmp = DISAS_EOB_NEXT;
19dc85db
RH
6649 break;
6650
880f8486 6651 CASE_MODRM_MEM_OP(6): /* xsaveopt / clwb */
121f3157
RH
6652 if (prefixes & PREFIX_LOCK) {
6653 goto illegal_op;
6654 }
6655 if (prefixes & PREFIX_DATA) {
5e1fac2d 6656 /* clwb */
121f3157 6657 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLWB)) {
5e1fac2d 6658 goto illegal_op;
121f3157 6659 }
5e1fac2d 6660 gen_nop_modrm(env, s, modrm);
c9cfe8f9
RH
6661 } else {
6662 /* xsaveopt */
6663 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
6664 || (s->cpuid_xsave_features & CPUID_XSAVE_XSAVEOPT) == 0
6665 || (prefixes & (PREFIX_REPZ | PREFIX_REPNZ))) {
6666 goto illegal_op;
6667 }
6668 gen_lea_modrm(env, s, modrm);
776678b2 6669 tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
c9cfe8f9 6670 cpu_regs[R_EDX]);
ad75a51e 6671 gen_helper_xsaveopt(tcg_env, s->A0, s->tmp1_i64);
121f3157 6672 }
c9cfe8f9 6673 break;
121f3157 6674
880f8486 6675 CASE_MODRM_MEM_OP(7): /* clflush / clflushopt */
121f3157
RH
6676 if (prefixes & PREFIX_LOCK) {
6677 goto illegal_op;
6678 }
6679 if (prefixes & PREFIX_DATA) {
6680 /* clflushopt */
6681 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLFLUSHOPT)) {
6682 goto illegal_op;
6683 }
5e1fac2d 6684 } else {
121f3157
RH
6685 /* clflush */
6686 if ((s->prefix & (PREFIX_REPZ | PREFIX_REPNZ))
6687 || !(s->cpuid_features & CPUID_CLFLUSH)) {
5e1fac2d 6688 goto illegal_op;
121f3157 6689 }
5e1fac2d 6690 }
121f3157 6691 gen_nop_modrm(env, s, modrm);
5e1fac2d 6692 break;
121f3157 6693
07929f2a 6694 case 0xc0 ... 0xc7: /* rdfsbase (f3 0f ae /0) */
e0dd5fd4 6695 case 0xc8 ... 0xcf: /* rdgsbase (f3 0f ae /1) */
07929f2a 6696 case 0xd0 ... 0xd7: /* wrfsbase (f3 0f ae /2) */
e0dd5fd4 6697 case 0xd8 ... 0xdf: /* wrgsbase (f3 0f ae /3) */
07929f2a
RH
6698 if (CODE64(s)
6699 && (prefixes & PREFIX_REPZ)
6700 && !(prefixes & PREFIX_LOCK)
6701 && (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_FSGSBASE)) {
6702 TCGv base, treg, src, dst;
6703
6704 /* Preserve hflags bits by testing CR4 at runtime. */
6bd48f6f 6705 tcg_gen_movi_i32(s->tmp2_i32, CR4_FSGSBASE_MASK);
ad75a51e 6706 gen_helper_cr4_testbit(tcg_env, s->tmp2_i32);
07929f2a
RH
6707
6708 base = cpu_seg_base[modrm & 8 ? R_GS : R_FS];
6709 treg = cpu_regs[(modrm & 7) | REX_B(s)];
6710
6711 if (modrm & 0x10) {
6712 /* wr*base */
6713 dst = base, src = treg;
6714 } else {
6715 /* rd*base */
6716 dst = treg, src = base;
6717 }
6718
6719 if (s->dflag == MO_32) {
6720 tcg_gen_ext32u_tl(dst, src);
6721 } else {
6722 tcg_gen_mov_tl(dst, src);
6723 }
6724 break;
6725 }
b9f9c5b4 6726 goto unknown_op;
07929f2a 6727
121f3157
RH
6728 case 0xf8: /* sfence / pcommit */
6729 if (prefixes & PREFIX_DATA) {
6730 /* pcommit */
6731 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_PCOMMIT)
6732 || (prefixes & PREFIX_LOCK)) {
6733 goto illegal_op;
891bc821 6734 }
121f3157
RH
6735 break;
6736 }
6737 /* fallthru */
6738 case 0xf9 ... 0xff: /* sfence */
14cb949a
PB
6739 if (!(s->cpuid_features & CPUID_SSE)
6740 || (prefixes & PREFIX_LOCK)) {
6741 goto illegal_op;
6742 }
cc19e497 6743 tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
14cb949a 6744 break;
121f3157 6745 case 0xe8 ... 0xef: /* lfence */
cc19e497
PK
6746 if (!(s->cpuid_features & CPUID_SSE)
6747 || (prefixes & PREFIX_LOCK)) {
6748 goto illegal_op;
6749 }
6750 tcg_gen_mb(TCG_MO_LD_LD | TCG_BAR_SC);
6751 break;
121f3157
RH
6752 case 0xf0 ... 0xf7: /* mfence */
6753 if (!(s->cpuid_features & CPUID_SSE2)
6754 || (prefixes & PREFIX_LOCK)) {
6755 goto illegal_op;
8f091a59 6756 }
cc19e497 6757 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
8f091a59 6758 break;
121f3157 6759
664e0f19 6760 default:
b9f9c5b4 6761 goto unknown_op;
14ce26e7
FB
6762 }
6763 break;
121f3157 6764
a35f3ec7 6765 case 0x10d: /* 3DNow! prefetch(w) */
e3af7c78 6766 modrm = x86_ldub_code(env, s);
a35f3ec7
AJ
6767 mod = (modrm >> 6) & 3;
6768 if (mod == 3)
6769 goto illegal_op;
26317698 6770 gen_nop_modrm(env, s, modrm);
8f091a59 6771 break;
3b21e03e 6772 case 0x1aa: /* rsm */
b53605db 6773 gen_svm_check_intercept(s, SVM_EXIT_RSM);
3b21e03e
FB
6774 if (!(s->flags & HF_SMM_MASK))
6775 goto illegal_op;
a93b55ec
CF
6776#ifdef CONFIG_USER_ONLY
6777 /* we should not be in SMM mode */
6778 g_assert_not_reached();
6779#else
728d803b 6780 gen_update_cc_op(s);
09e99df4 6781 gen_update_eip_next(s);
ad75a51e 6782 gen_helper_rsm(tcg_env);
a93b55ec 6783#endif /* CONFIG_USER_ONLY */
6424ac8e 6784 s->base.is_jmp = DISAS_EOB_ONLY;
3b21e03e 6785 break;
222a3336
AZ
6786 case 0x1b8: /* SSE4.2 popcnt */
6787 if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) !=
6788 PREFIX_REPZ)
6789 goto illegal_op;
6790 if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
6791 goto illegal_op;
6792
e3af7c78 6793 modrm = x86_ldub_code(env, s);
bbdb4237 6794 reg = ((modrm >> 3) & 7) | REX_R(s);
222a3336 6795
ab4e4aec 6796 if (s->prefix & PREFIX_DATA) {
4ba9938c 6797 ot = MO_16;
ab4e4aec
RH
6798 } else {
6799 ot = mo_64_32(dflag);
6800 }
222a3336 6801
0af10c86 6802 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
c66f9727
EC
6803 gen_extu(ot, s->T0);
6804 tcg_gen_mov_tl(cpu_cc_src, s->T0);
6805 tcg_gen_ctpop_tl(s->T0, s->T0);
1dbe15ef 6806 gen_op_mov_reg_v(s, ot, reg, s->T0);
fdb0d09d 6807
4885c3c4 6808 set_cc_op(s, CC_OP_POPCNT);
222a3336 6809 break;
653fad24 6810 case 0x10e ... 0x117:
664e0f19 6811 case 0x128 ... 0x12f:
4242b1bd 6812 case 0x138 ... 0x13a:
d9f4bb27 6813 case 0x150 ... 0x179:
664e0f19
FB
6814 case 0x17c ... 0x17f:
6815 case 0x1c2:
6816 case 0x1c4 ... 0x1c6:
6817 case 0x1d0 ... 0x1fe:
653fad24 6818 disas_insn_new(s, cpu, b);
664e0f19 6819 break;
2c0262af 6820 default:
b9f9c5b4 6821 goto unknown_op;
2c0262af 6822 }
f66c8e8c 6823 return true;
2c0262af 6824 illegal_op:
b9f9c5b4 6825 gen_illegal_opcode(s);
f66c8e8c 6826 return true;
b9f9c5b4 6827 unknown_op:
b9f9c5b4 6828 gen_unknown_opcode(env, s);
f66c8e8c 6829 return true;
2c0262af
FB
6830}
6831
63618b4e 6832void tcg_x86_init(void)
2c0262af 6833{
fac0aff9
RH
6834 static const char reg_names[CPU_NB_REGS][4] = {
6835#ifdef TARGET_X86_64
6836 [R_EAX] = "rax",
6837 [R_EBX] = "rbx",
6838 [R_ECX] = "rcx",
6839 [R_EDX] = "rdx",
6840 [R_ESI] = "rsi",
6841 [R_EDI] = "rdi",
6842 [R_EBP] = "rbp",
6843 [R_ESP] = "rsp",
6844 [8] = "r8",
6845 [9] = "r9",
6846 [10] = "r10",
6847 [11] = "r11",
6848 [12] = "r12",
6849 [13] = "r13",
6850 [14] = "r14",
6851 [15] = "r15",
6852#else
6853 [R_EAX] = "eax",
6854 [R_EBX] = "ebx",
6855 [R_ECX] = "ecx",
6856 [R_EDX] = "edx",
6857 [R_ESI] = "esi",
6858 [R_EDI] = "edi",
6859 [R_EBP] = "ebp",
6860 [R_ESP] = "esp",
f771ca6a
RH
6861#endif
6862 };
6863 static const char eip_name[] = {
6864#ifdef TARGET_X86_64
6865 "rip"
6866#else
6867 "eip"
fac0aff9
RH
6868#endif
6869 };
3558f805
RH
6870 static const char seg_base_names[6][8] = {
6871 [R_CS] = "cs_base",
6872 [R_DS] = "ds_base",
6873 [R_ES] = "es_base",
6874 [R_FS] = "fs_base",
6875 [R_GS] = "gs_base",
6876 [R_SS] = "ss_base",
6877 };
149b427b
RH
6878 static const char bnd_regl_names[4][8] = {
6879 "bnd0_lb", "bnd1_lb", "bnd2_lb", "bnd3_lb"
6880 };
6881 static const char bnd_regu_names[4][8] = {
6882 "bnd0_ub", "bnd1_ub", "bnd2_ub", "bnd3_ub"
6883 };
fac0aff9
RH
6884 int i;
6885
ad75a51e 6886 cpu_cc_op = tcg_global_mem_new_i32(tcg_env,
317ac620 6887 offsetof(CPUX86State, cc_op), "cc_op");
ad75a51e 6888 cpu_cc_dst = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_dst),
a7812ae4 6889 "cc_dst");
ad75a51e 6890 cpu_cc_src = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_src),
a3251186 6891 "cc_src");
ad75a51e 6892 cpu_cc_src2 = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_src2),
988c3eb0 6893 "cc_src2");
ad75a51e 6894 cpu_eip = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, eip), eip_name);
437a88a5 6895
fac0aff9 6896 for (i = 0; i < CPU_NB_REGS; ++i) {
ad75a51e 6897 cpu_regs[i] = tcg_global_mem_new(tcg_env,
fac0aff9
RH
6898 offsetof(CPUX86State, regs[i]),
6899 reg_names[i]);
6900 }
677ef623 6901
3558f805
RH
6902 for (i = 0; i < 6; ++i) {
6903 cpu_seg_base[i]
ad75a51e 6904 = tcg_global_mem_new(tcg_env,
3558f805
RH
6905 offsetof(CPUX86State, segs[i].base),
6906 seg_base_names[i]);
6907 }
6908
149b427b
RH
6909 for (i = 0; i < 4; ++i) {
6910 cpu_bndl[i]
ad75a51e 6911 = tcg_global_mem_new_i64(tcg_env,
149b427b
RH
6912 offsetof(CPUX86State, bnd_regs[i].lb),
6913 bnd_regl_names[i]);
6914 cpu_bndu[i]
ad75a51e 6915 = tcg_global_mem_new_i64(tcg_env,
149b427b
RH
6916 offsetof(CPUX86State, bnd_regs[i].ub),
6917 bnd_regu_names[i]);
6918 }
2c0262af
FB
6919}
6920
b542683d 6921static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
2c0262af 6922{
9761d39b 6923 DisasContext *dc = container_of(dcbase, DisasContext, base);
b77af26e 6924 CPUX86State *env = cpu_env(cpu);
9761d39b 6925 uint32_t flags = dc->base.tb->flags;
9ef6c6ec 6926 uint32_t cflags = tb_cflags(dc->base.tb);
01b9d8c1 6927 int cpl = (flags >> HF_CPL_SHIFT) & 3;
0ab011cc 6928 int iopl = (flags >> IOPL_SHIFT) & 3;
3a1d9b8b 6929
d75f9129 6930 dc->cs_base = dc->base.tb->cs_base;
e3a79e0e 6931 dc->pc_save = dc->base.pc_next;
d75f9129 6932 dc->flags = flags;
01b9d8c1
RH
6933#ifndef CONFIG_USER_ONLY
6934 dc->cpl = cpl;
0ab011cc 6935 dc->iopl = iopl;
01b9d8c1 6936#endif
d75f9129
RH
6937
6938 /* We make some simplifying assumptions; validate they're correct. */
6939 g_assert(PE(dc) == ((flags & HF_PE_MASK) != 0));
01b9d8c1 6940 g_assert(CPL(dc) == cpl);
0ab011cc 6941 g_assert(IOPL(dc) == iopl);
f8a35846 6942 g_assert(VM86(dc) == ((flags & HF_VM_MASK) != 0));
9996dcfd 6943 g_assert(CODE32(dc) == ((flags & HF_CS32_MASK) != 0));
eec7d0f8 6944 g_assert(CODE64(dc) == ((flags & HF_CS64_MASK) != 0));
b40a47a1 6945 g_assert(SS32(dc) == ((flags & HF_SS32_MASK) != 0));
73e90dc4 6946 g_assert(LMA(dc) == ((flags & HF_LMA_MASK) != 0));
beedb93c 6947 g_assert(ADDSEG(dc) == ((flags & HF_ADDSEG_MASK) != 0));
5d223889 6948 g_assert(SVME(dc) == ((flags & HF_SVME_MASK) != 0));
b322b3af 6949 g_assert(GUEST(dc) == ((flags & HF_GUEST_MASK) != 0));
d75f9129 6950
2c0262af 6951 dc->cc_op = CC_OP_DYNAMIC;
e207582f 6952 dc->cc_op_dirty = false;
2c0262af
FB
6953 dc->popl_esp_hack = 0;
6954 /* select memory access functions */
da6d48e3 6955 dc->mem_index = cpu_mmu_index(env, false);
0514ef2f
EH
6956 dc->cpuid_features = env->features[FEAT_1_EDX];
6957 dc->cpuid_ext_features = env->features[FEAT_1_ECX];
6958 dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX];
6959 dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX];
6960 dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX];
268dc464 6961 dc->cpuid_7_0_ecx_features = env->features[FEAT_7_0_ECX];
c9cfe8f9 6962 dc->cpuid_xsave_features = env->features[FEAT_XSAVE];
9ef6c6ec 6963 dc->jmp_opt = !((cflags & CF_NO_GOTO_TB) ||
c1de1a1a 6964 (flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)));
3236c2ad
RH
6965 /*
6966 * If jmp_opt, we want to handle each string instruction individually.
6967 * For icount also disable repz optimization so that each iteration
6968 * is accounted separately.
c4d4525c 6969 */
9ef6c6ec 6970 dc->repz_opt = !dc->jmp_opt && !(cflags & CF_USE_ICOUNT);
4f31916f 6971
c66f9727 6972 dc->T0 = tcg_temp_new();
b48597b0 6973 dc->T1 = tcg_temp_new();
6b672b5d 6974 dc->A0 = tcg_temp_new();
a7812ae4 6975
fbd80f02 6976 dc->tmp0 = tcg_temp_new();
776678b2 6977 dc->tmp1_i64 = tcg_temp_new_i64();
6bd48f6f 6978 dc->tmp2_i32 = tcg_temp_new_i32();
4f82446d 6979 dc->tmp3_i32 = tcg_temp_new_i32();
5022f28f 6980 dc->tmp4 = tcg_temp_new();
3a5d1773 6981 dc->cc_srcT = tcg_temp_new();
9761d39b
LV
6982}
6983
d2e6eedf
LV
6984static void i386_tr_tb_start(DisasContextBase *db, CPUState *cpu)
6985{
6986}
6987
9d75f52b
LV
6988static void i386_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
6989{
6990 DisasContext *dc = container_of(dcbase, DisasContext, base);
e3a79e0e 6991 target_ulong pc_arg = dc->base.pc_next;
9d75f52b 6992
95093668 6993 dc->prev_insn_end = tcg_last_op();
2e3afe8e 6994 if (tb_cflags(dcbase->tb) & CF_PCREL) {
e3a79e0e
RH
6995 pc_arg -= dc->cs_base;
6996 pc_arg &= ~TARGET_PAGE_MASK;
6997 }
6998 tcg_gen_insn_start(pc_arg, dc->cc_op);
9d75f52b
LV
6999}
7000
2c2f8cac
LV
7001static void i386_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
7002{
7003 DisasContext *dc = container_of(dcbase, DisasContext, base);
b26491b4
RH
7004
7005#ifdef TARGET_VSYSCALL_PAGE
7006 /*
7007 * Detect entry into the vsyscall page and invoke the syscall.
7008 */
7009 if ((dc->base.pc_next & TARGET_PAGE_MASK) == TARGET_VSYSCALL_PAGE) {
52236550 7010 gen_exception(dc, EXCP_VSYSCALL);
9b21049e 7011 dc->base.pc_next = dc->pc + 1;
b26491b4
RH
7012 return;
7013 }
7014#endif
7015
f66c8e8c
RH
7016 if (disas_insn(dc, cpu)) {
7017 target_ulong pc_next = dc->pc;
7018 dc->base.pc_next = pc_next;
7019
7020 if (dc->base.is_jmp == DISAS_NEXT) {
7021 if (dc->flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)) {
7022 /*
7023 * If single step mode, we generate only one instruction and
7024 * generate an exception.
7025 * If irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
7026 * the flag and abort the translation to give the irqs a
7027 * chance to happen.
7028 */
200ef603 7029 dc->base.is_jmp = DISAS_EOB_NEXT;
f66c8e8c
RH
7030 } else if (!is_same_page(&dc->base, pc_next)) {
7031 dc->base.is_jmp = DISAS_TOO_MANY;
7032 }
95093668 7033 }
2c2f8cac 7034 }
2c2f8cac
LV
7035}
7036
47e981b4
LV
7037static void i386_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
7038{
7039 DisasContext *dc = container_of(dcbase, DisasContext, base);
7040
200ef603
RH
7041 switch (dc->base.is_jmp) {
7042 case DISAS_NORETURN:
7043 break;
7044 case DISAS_TOO_MANY:
5f7ec6ef
RH
7045 gen_update_cc_op(dc);
7046 gen_jmp_rel_csize(dc, 0, 0);
7047 break;
200ef603
RH
7048 case DISAS_EOB_NEXT:
7049 gen_update_cc_op(dc);
65e4af23 7050 gen_update_eip_cur(dc);
200ef603
RH
7051 /* fall through */
7052 case DISAS_EOB_ONLY:
47e981b4 7053 gen_eob(dc);
200ef603
RH
7054 break;
7055 case DISAS_EOB_INHIBIT_IRQ:
7056 gen_update_cc_op(dc);
7057 gen_update_eip_cur(dc);
7058 gen_eob_inhibit_irq(dc, true);
7059 break;
faf9ea5f
RH
7060 case DISAS_JUMP:
7061 gen_jr(dc);
7062 break;
200ef603
RH
7063 default:
7064 g_assert_not_reached();
47e981b4
LV
7065 }
7066}
7067
e0d110d9 7068static void i386_tr_disas_log(const DisasContextBase *dcbase,
8eb806a7 7069 CPUState *cpu, FILE *logfile)
e0d110d9
LV
7070{
7071 DisasContext *dc = container_of(dcbase, DisasContext, base);
e0d110d9 7072
8eb806a7
RH
7073 fprintf(logfile, "IN: %s\n", lookup_symbol(dc->base.pc_first));
7074 target_disas(logfile, cpu, dc->base.pc_first, dc->base.tb->size);
e0d110d9
LV
7075}
7076
d2e6eedf
LV
7077static const TranslatorOps i386_tr_ops = {
7078 .init_disas_context = i386_tr_init_disas_context,
7079 .tb_start = i386_tr_tb_start,
7080 .insn_start = i386_tr_insn_start,
d2e6eedf
LV
7081 .translate_insn = i386_tr_translate_insn,
7082 .tb_stop = i386_tr_tb_stop,
7083 .disas_log = i386_tr_disas_log,
7084};
0a7df5da 7085
d2e6eedf 7086/* generate intermediate code for basic block 'tb'. */
597f9b2d 7087void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
306c8721 7088 target_ulong pc, void *host_pc)
d2e6eedf
LV
7089{
7090 DisasContext dc;
e0d110d9 7091
306c8721 7092 translator_loop(cpu, tb, max_insns, pc, host_pc, &i386_tr_ops, &dc.base);
2c0262af 7093}