]> git.proxmox.com Git - mirror_qemu.git/blame - target/i386/tcg/translate.c
target/i386: Do not re-compute new pc with CF_PCREL
[mirror_qemu.git] / target / i386 / tcg / translate.c
CommitLineData
2c0262af
FB
1/*
2 * i386 translation
5fafdf24 3 *
2c0262af
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
d9ff33ad 9 * version 2.1 of the License, or (at your option) any later version.
2c0262af
FB
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af 18 */
b6a0aa05 19#include "qemu/osdep.h"
2c0262af 20
bec93d72 21#include "qemu/host-utils.h"
2c0262af 22#include "cpu.h"
76cad711 23#include "disas/disas.h"
63c91552 24#include "exec/exec-all.h"
dcb32f1d 25#include "tcg/tcg-op.h"
20581aad 26#include "tcg/tcg-op-gvec.h"
f08b6170 27#include "exec/cpu_ldst.h"
77fc6f5e 28#include "exec/translator.h"
2872b0f3 29#include "fpu/softfloat.h"
2c0262af 30
2ef6175a
RH
31#include "exec/helper-proto.h"
32#include "exec/helper-gen.h"
ed69e831 33#include "helper-tcg.h"
a7812ae4 34
508127e2 35#include "exec/log.h"
a7e30d84 36
d53106c9
RH
37#define HELPER_H "helper.h"
38#include "exec/helper-info.c.inc"
39#undef HELPER_H
40
41
2c0262af
FB
42#define PREFIX_REPZ 0x01
43#define PREFIX_REPNZ 0x02
44#define PREFIX_LOCK 0x04
45#define PREFIX_DATA 0x08
46#define PREFIX_ADR 0x10
701ed211 47#define PREFIX_VEX 0x20
1e92b727 48#define PREFIX_REX 0x40
2c0262af 49
bec93d72
RH
50#ifdef TARGET_X86_64
51# define ctztl ctz64
52# define clztl clz64
53#else
54# define ctztl ctz32
55# define clztl clz32
56#endif
57
1906b2af 58/* For a switch indexed by MODRM, match all memory operands for a given OP. */
880f8486 59#define CASE_MODRM_MEM_OP(OP) \
1906b2af
RH
60 case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
61 case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
62 case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7
63
880f8486
PB
64#define CASE_MODRM_OP(OP) \
65 case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
66 case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
67 case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7: \
68 case (3 << 6) | (OP << 3) | 0 ... (3 << 6) | (OP << 3) | 7
69
57fec1fe
FB
70//#define MACRO_TEST 1
71
57fec1fe 72/* global register indexes */
93a3e108 73static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2;
f771ca6a 74static TCGv cpu_eip;
a7812ae4 75static TCGv_i32 cpu_cc_op;
cc739bb0 76static TCGv cpu_regs[CPU_NB_REGS];
3558f805 77static TCGv cpu_seg_base[6];
149b427b
RH
78static TCGv_i64 cpu_bndl[4];
79static TCGv_i64 cpu_bndu[4];
fbd80f02 80
2c0262af 81typedef struct DisasContext {
6cf147aa
LV
82 DisasContextBase base;
83
a6f62100 84 target_ulong pc; /* pc = eip + cs_base */
a6f62100 85 target_ulong cs_base; /* base of CS segment */
e3a79e0e 86 target_ulong pc_save;
a6f62100 87
14776ab5
TN
88 MemOp aflag;
89 MemOp dflag;
a6f62100
RH
90
91 int8_t override; /* -1 if no override, else R_CS, R_DS, etc */
92 uint8_t prefix;
01b9d8c1 93
b3e22b23
PB
94 bool has_modrm;
95 uint8_t modrm;
96
01b9d8c1
RH
97#ifndef CONFIG_USER_ONLY
98 uint8_t cpl; /* code priv level */
0ab011cc 99 uint8_t iopl; /* i/o priv level */
01b9d8c1 100#endif
a6f62100
RH
101 uint8_t vex_l; /* vex vector length */
102 uint8_t vex_v; /* vex vvvv register, without 1's complement. */
103 uint8_t popl_esp_hack; /* for correct popl with esp base handling */
104 uint8_t rip_offset; /* only used in x86_64, but left for simplicity */
01b9d8c1 105
14ce26e7 106#ifdef TARGET_X86_64
bbdb4237 107 uint8_t rex_r;
915ffe89
RH
108 uint8_t rex_x;
109 uint8_t rex_b;
14ce26e7 110#endif
a61ef762 111 bool vex_w; /* used by AVX even on 32-bit processors */
305d08e5
RH
112 bool jmp_opt; /* use direct block chaining for direct jumps */
113 bool repz_opt; /* optimize jumps within repz instructions */
a6f62100
RH
114 bool cc_op_dirty;
115
116 CCOp cc_op; /* current CC operation */
2c0262af 117 int mem_index; /* select memory access functions */
c6ad6f44 118 uint32_t flags; /* all execution flags */
14ce26e7 119 int cpuid_features;
3d7374c5 120 int cpuid_ext_features;
e771edab 121 int cpuid_ext2_features;
12e26b75 122 int cpuid_ext3_features;
a9321a4d 123 int cpuid_7_0_ebx_features;
268dc464 124 int cpuid_7_0_ecx_features;
405c7c07 125 int cpuid_7_1_eax_features;
c9cfe8f9 126 int cpuid_xsave_features;
93a3e108
EC
127
128 /* TCG local temps */
129 TCGv cc_srcT;
6b672b5d 130 TCGv A0;
c66f9727 131 TCGv T0;
b48597b0 132 TCGv T1;
93a3e108 133
fbd80f02
EC
134 /* TCG local register indexes (only used inside old micro ops) */
135 TCGv tmp0;
5022f28f 136 TCGv tmp4;
6bd48f6f 137 TCGv_i32 tmp2_i32;
4f82446d 138 TCGv_i32 tmp3_i32;
776678b2 139 TCGv_i64 tmp1_i64;
fbd80f02 140
b066c537 141 sigjmp_buf jmpbuf;
95093668 142 TCGOp *prev_insn_end;
2c0262af
FB
143} DisasContext;
144
200ef603
RH
145#define DISAS_EOB_ONLY DISAS_TARGET_0
146#define DISAS_EOB_NEXT DISAS_TARGET_1
147#define DISAS_EOB_INHIBIT_IRQ DISAS_TARGET_2
faf9ea5f 148#define DISAS_JUMP DISAS_TARGET_3
200ef603 149
d75f9129
RH
150/* The environment in which user-only runs is constrained. */
151#ifdef CONFIG_USER_ONLY
152#define PE(S) true
01b9d8c1 153#define CPL(S) 3
0ab011cc 154#define IOPL(S) 0
5d223889 155#define SVME(S) false
b322b3af 156#define GUEST(S) false
d75f9129
RH
157#else
158#define PE(S) (((S)->flags & HF_PE_MASK) != 0)
01b9d8c1 159#define CPL(S) ((S)->cpl)
0ab011cc 160#define IOPL(S) ((S)->iopl)
5d223889 161#define SVME(S) (((S)->flags & HF_SVME_MASK) != 0)
b322b3af 162#define GUEST(S) (((S)->flags & HF_GUEST_MASK) != 0)
d75f9129 163#endif
f8a35846
RH
164#if defined(CONFIG_USER_ONLY) && defined(TARGET_X86_64)
165#define VM86(S) false
9996dcfd 166#define CODE32(S) true
b40a47a1 167#define SS32(S) true
beedb93c 168#define ADDSEG(S) false
f8a35846
RH
169#else
170#define VM86(S) (((S)->flags & HF_VM_MASK) != 0)
9996dcfd 171#define CODE32(S) (((S)->flags & HF_CS32_MASK) != 0)
b40a47a1 172#define SS32(S) (((S)->flags & HF_SS32_MASK) != 0)
beedb93c 173#define ADDSEG(S) (((S)->flags & HF_ADDSEG_MASK) != 0)
f8a35846 174#endif
eec7d0f8
RH
175#if !defined(TARGET_X86_64)
176#define CODE64(S) false
177#elif defined(CONFIG_USER_ONLY)
178#define CODE64(S) true
179#else
180#define CODE64(S) (((S)->flags & HF_CS64_MASK) != 0)
40a205da 181#endif
1da389c5 182#if defined(CONFIG_USER_ONLY) || defined(TARGET_X86_64)
73e90dc4 183#define LMA(S) (((S)->flags & HF_LMA_MASK) != 0)
1da389c5
PMD
184#else
185#define LMA(S) false
eec7d0f8 186#endif
d75f9129 187
1e92b727
RH
188#ifdef TARGET_X86_64
189#define REX_PREFIX(S) (((S)->prefix & PREFIX_REX) != 0)
a61ef762 190#define REX_W(S) ((S)->vex_w)
bbdb4237 191#define REX_R(S) ((S)->rex_r + 0)
915ffe89
RH
192#define REX_X(S) ((S)->rex_x + 0)
193#define REX_B(S) ((S)->rex_b + 0)
1e92b727
RH
194#else
195#define REX_PREFIX(S) false
8ab1e486 196#define REX_W(S) false
bbdb4237 197#define REX_R(S) 0
915ffe89
RH
198#define REX_X(S) 0
199#define REX_B(S) 0
1e92b727
RH
200#endif
201
9f55e5a9
RH
202/*
203 * Many sysemu-only helpers are not reachable for user-only.
204 * Define stub generators here, so that we need not either sprinkle
205 * ifdefs through the translator, nor provide the helper function.
206 */
207#define STUB_HELPER(NAME, ...) \
208 static inline void gen_helper_##NAME(__VA_ARGS__) \
209 { qemu_build_not_reached(); }
210
211#ifdef CONFIG_USER_ONLY
8d6806c7 212STUB_HELPER(clgi, TCGv_env env)
35e5a5d5 213STUB_HELPER(flush_page, TCGv_env env, TCGv addr)
4ea2449b 214STUB_HELPER(hlt, TCGv_env env, TCGv_i32 pc_ofs)
7fb7c423
RH
215STUB_HELPER(inb, TCGv ret, TCGv_env env, TCGv_i32 port)
216STUB_HELPER(inw, TCGv ret, TCGv_env env, TCGv_i32 port)
217STUB_HELPER(inl, TCGv ret, TCGv_env env, TCGv_i32 port)
4ea2449b
RH
218STUB_HELPER(monitor, TCGv_env env, TCGv addr)
219STUB_HELPER(mwait, TCGv_env env, TCGv_i32 pc_ofs)
7fb7c423
RH
220STUB_HELPER(outb, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
221STUB_HELPER(outw, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
222STUB_HELPER(outl, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
f7803b77
RH
223STUB_HELPER(rdmsr, TCGv_env env)
224STUB_HELPER(read_crN, TCGv ret, TCGv_env env, TCGv_i32 reg)
533883fd 225STUB_HELPER(get_dr, TCGv ret, TCGv_env env, TCGv_i32 reg)
9f55e5a9 226STUB_HELPER(set_dr, TCGv_env env, TCGv_i32 reg, TCGv val)
8d6806c7 227STUB_HELPER(stgi, TCGv_env env)
d051ea04 228STUB_HELPER(svm_check_intercept, TCGv_env env, TCGv_i32 type)
8d6806c7
RH
229STUB_HELPER(vmload, TCGv_env env, TCGv_i32 aflag)
230STUB_HELPER(vmmcall, TCGv_env env)
231STUB_HELPER(vmrun, TCGv_env env, TCGv_i32 aflag, TCGv_i32 pc_ofs)
232STUB_HELPER(vmsave, TCGv_env env, TCGv_i32 aflag)
f7803b77
RH
233STUB_HELPER(write_crN, TCGv_env env, TCGv_i32 reg, TCGv val)
234STUB_HELPER(wrmsr, TCGv_env env)
9f55e5a9
RH
235#endif
236
2c0262af 237static void gen_eob(DisasContext *s);
faf9ea5f 238static void gen_jr(DisasContext *s);
8760ded6 239static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num);
2255da49 240static void gen_jmp_rel_csize(DisasContext *s, int diff, int tb_num);
14776ab5 241static void gen_op(DisasContext *s1, int op, MemOp ot, int d);
d76b9c6f 242static void gen_exception_gpf(DisasContext *s);
2c0262af
FB
243
244/* i386 arith/logic operations */
245enum {
5fafdf24
TS
246 OP_ADDL,
247 OP_ORL,
248 OP_ADCL,
2c0262af 249 OP_SBBL,
5fafdf24
TS
250 OP_ANDL,
251 OP_SUBL,
252 OP_XORL,
2c0262af
FB
253 OP_CMPL,
254};
255
256/* i386 shift ops */
257enum {
5fafdf24
TS
258 OP_ROL,
259 OP_ROR,
260 OP_RCL,
261 OP_RCR,
262 OP_SHL,
263 OP_SHR,
2c0262af
FB
264 OP_SHL1, /* undocumented */
265 OP_SAR = 7,
266};
267
8e1c85e3
FB
268enum {
269 JCC_O,
270 JCC_B,
271 JCC_Z,
272 JCC_BE,
273 JCC_S,
274 JCC_P,
275 JCC_L,
276 JCC_LE,
277};
278
2c0262af
FB
279enum {
280 /* I386 int registers */
281 OR_EAX, /* MUST be even numbered */
282 OR_ECX,
283 OR_EDX,
284 OR_EBX,
285 OR_ESP,
286 OR_EBP,
287 OR_ESI,
288 OR_EDI,
14ce26e7
FB
289
290 OR_TMP0 = 16, /* temporary operand register */
2c0262af
FB
291 OR_TMP1,
292 OR_A0, /* temporary register used when doing address evaluation */
2c0262af
FB
293};
294
b666265b 295enum {
a3251186
RH
296 USES_CC_DST = 1,
297 USES_CC_SRC = 2,
988c3eb0
RH
298 USES_CC_SRC2 = 4,
299 USES_CC_SRCT = 8,
b666265b
RH
300};
301
302/* Bit set if the global variable is live after setting CC_OP to X. */
303static const uint8_t cc_op_live[CC_OP_NB] = {
988c3eb0 304 [CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
b666265b
RH
305 [CC_OP_EFLAGS] = USES_CC_SRC,
306 [CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC,
307 [CC_OP_ADDB ... CC_OP_ADDQ] = USES_CC_DST | USES_CC_SRC,
988c3eb0 308 [CC_OP_ADCB ... CC_OP_ADCQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
a3251186 309 [CC_OP_SUBB ... CC_OP_SUBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT,
988c3eb0 310 [CC_OP_SBBB ... CC_OP_SBBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
b666265b
RH
311 [CC_OP_LOGICB ... CC_OP_LOGICQ] = USES_CC_DST,
312 [CC_OP_INCB ... CC_OP_INCQ] = USES_CC_DST | USES_CC_SRC,
313 [CC_OP_DECB ... CC_OP_DECQ] = USES_CC_DST | USES_CC_SRC,
314 [CC_OP_SHLB ... CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC,
315 [CC_OP_SARB ... CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC,
bc4b43dc 316 [CC_OP_BMILGB ... CC_OP_BMILGQ] = USES_CC_DST | USES_CC_SRC,
cd7f97ca
RH
317 [CC_OP_ADCX] = USES_CC_DST | USES_CC_SRC,
318 [CC_OP_ADOX] = USES_CC_SRC | USES_CC_SRC2,
319 [CC_OP_ADCOX] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
436ff2d2 320 [CC_OP_CLR] = 0,
4885c3c4 321 [CC_OP_POPCNT] = USES_CC_SRC,
b666265b
RH
322};
323
e207582f 324static void set_cc_op(DisasContext *s, CCOp op)
3ca51d07 325{
b666265b
RH
326 int dead;
327
328 if (s->cc_op == op) {
329 return;
330 }
331
332 /* Discard CC computation that will no longer be used. */
333 dead = cc_op_live[s->cc_op] & ~cc_op_live[op];
334 if (dead & USES_CC_DST) {
335 tcg_gen_discard_tl(cpu_cc_dst);
e207582f 336 }
b666265b
RH
337 if (dead & USES_CC_SRC) {
338 tcg_gen_discard_tl(cpu_cc_src);
339 }
988c3eb0
RH
340 if (dead & USES_CC_SRC2) {
341 tcg_gen_discard_tl(cpu_cc_src2);
342 }
a3251186 343 if (dead & USES_CC_SRCT) {
93a3e108 344 tcg_gen_discard_tl(s->cc_srcT);
a3251186 345 }
b666265b 346
e2f515cf
RH
347 if (op == CC_OP_DYNAMIC) {
348 /* The DYNAMIC setting is translator only, and should never be
349 stored. Thus we always consider it clean. */
350 s->cc_op_dirty = false;
351 } else {
352 /* Discard any computed CC_OP value (see shifts). */
353 if (s->cc_op == CC_OP_DYNAMIC) {
354 tcg_gen_discard_i32(cpu_cc_op);
355 }
356 s->cc_op_dirty = true;
357 }
b666265b 358 s->cc_op = op;
e207582f
RH
359}
360
e207582f
RH
361static void gen_update_cc_op(DisasContext *s)
362{
363 if (s->cc_op_dirty) {
773cdfcc 364 tcg_gen_movi_i32(cpu_cc_op, s->cc_op);
e207582f
RH
365 s->cc_op_dirty = false;
366 }
3ca51d07
RH
367}
368
14ce26e7
FB
369#ifdef TARGET_X86_64
370
371#define NB_OP_SIZES 4
372
14ce26e7
FB
373#else /* !TARGET_X86_64 */
374
375#define NB_OP_SIZES 3
376
14ce26e7
FB
377#endif /* !TARGET_X86_64 */
378
e03b5686 379#if HOST_BIG_ENDIAN
57fec1fe
FB
380#define REG_B_OFFSET (sizeof(target_ulong) - 1)
381#define REG_H_OFFSET (sizeof(target_ulong) - 2)
382#define REG_W_OFFSET (sizeof(target_ulong) - 2)
383#define REG_L_OFFSET (sizeof(target_ulong) - 4)
384#define REG_LH_OFFSET (sizeof(target_ulong) - 8)
14ce26e7 385#else
57fec1fe
FB
386#define REG_B_OFFSET 0
387#define REG_H_OFFSET 1
388#define REG_W_OFFSET 0
389#define REG_L_OFFSET 0
390#define REG_LH_OFFSET 4
14ce26e7 391#endif
57fec1fe 392
96d7073f
PM
393/* In instruction encodings for byte register accesses the
394 * register number usually indicates "low 8 bits of register N";
395 * however there are some special cases where N 4..7 indicates
396 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
397 * true for this special case, false otherwise.
398 */
1dbe15ef 399static inline bool byte_reg_is_xH(DisasContext *s, int reg)
96d7073f 400{
1e92b727
RH
401 /* Any time the REX prefix is present, byte registers are uniform */
402 if (reg < 4 || REX_PREFIX(s)) {
96d7073f
PM
403 return false;
404 }
96d7073f
PM
405 return true;
406}
407
ab4e4aec 408/* Select the size of a push/pop operation. */
14776ab5 409static inline MemOp mo_pushpop(DisasContext *s, MemOp ot)
ab4e4aec
RH
410{
411 if (CODE64(s)) {
412 return ot == MO_16 ? MO_16 : MO_64;
413 } else {
414 return ot;
415 }
416}
417
64ae256c 418/* Select the size of the stack pointer. */
14776ab5 419static inline MemOp mo_stacksize(DisasContext *s)
64ae256c 420{
b40a47a1 421 return CODE64(s) ? MO_64 : SS32(s) ? MO_32 : MO_16;
64ae256c
RH
422}
423
ab4e4aec 424/* Select only size 64 else 32. Used for SSE operand sizes. */
14776ab5 425static inline MemOp mo_64_32(MemOp ot)
ab4e4aec
RH
426{
427#ifdef TARGET_X86_64
428 return ot == MO_64 ? MO_64 : MO_32;
429#else
430 return MO_32;
431#endif
432}
433
434/* Select size 8 if lsb of B is clear, else OT. Used for decoding
435 byte vs word opcodes. */
14776ab5 436static inline MemOp mo_b_d(int b, MemOp ot)
ab4e4aec
RH
437{
438 return b & 1 ? ot : MO_8;
439}
440
441/* Select size 8 if lsb of B is clear, else OT capped at 32.
442 Used for decoding operand size of port opcodes. */
14776ab5 443static inline MemOp mo_b_d32(int b, MemOp ot)
ab4e4aec
RH
444{
445 return b & 1 ? (ot == MO_16 ? MO_16 : MO_32) : MO_8;
446}
447
d1bb978b
PB
448/* Compute the result of writing t0 to the OT-sized register REG.
449 *
450 * If DEST is NULL, store the result into the register and return the
451 * register's TCGv.
452 *
453 * If DEST is not NULL, store the result into DEST and return the
454 * register's TCGv.
455 */
456static TCGv gen_op_deposit_reg_v(DisasContext *s, MemOp ot, int reg, TCGv dest, TCGv t0)
57fec1fe
FB
457{
458 switch(ot) {
4ba9938c 459 case MO_8:
d1bb978b
PB
460 if (byte_reg_is_xH(s, reg)) {
461 dest = dest ? dest : cpu_regs[reg - 4];
462 tcg_gen_deposit_tl(dest, cpu_regs[reg - 4], t0, 8, 8);
463 return cpu_regs[reg - 4];
57fec1fe 464 }
d1bb978b
PB
465 dest = dest ? dest : cpu_regs[reg];
466 tcg_gen_deposit_tl(dest, cpu_regs[reg], t0, 0, 8);
57fec1fe 467 break;
4ba9938c 468 case MO_16:
d1bb978b
PB
469 dest = dest ? dest : cpu_regs[reg];
470 tcg_gen_deposit_tl(dest, cpu_regs[reg], t0, 0, 16);
57fec1fe 471 break;
4ba9938c 472 case MO_32:
cc739bb0
LD
473 /* For x86_64, this sets the higher half of register to zero.
474 For i386, this is equivalent to a mov. */
d1bb978b
PB
475 dest = dest ? dest : cpu_regs[reg];
476 tcg_gen_ext32u_tl(dest, t0);
57fec1fe 477 break;
cc739bb0 478#ifdef TARGET_X86_64
4ba9938c 479 case MO_64:
d1bb978b
PB
480 dest = dest ? dest : cpu_regs[reg];
481 tcg_gen_mov_tl(dest, t0);
57fec1fe 482 break;
14ce26e7 483#endif
d67dc9e6 484 default:
732e89f4 485 g_assert_not_reached();
57fec1fe 486 }
d1bb978b
PB
487 return cpu_regs[reg];
488}
489
490static void gen_op_mov_reg_v(DisasContext *s, MemOp ot, int reg, TCGv t0)
491{
492 gen_op_deposit_reg_v(s, ot, reg, NULL, t0);
57fec1fe 493}
2c0262af 494
1dbe15ef 495static inline
14776ab5 496void gen_op_mov_v_reg(DisasContext *s, MemOp ot, TCGv t0, int reg)
57fec1fe 497{
1dbe15ef 498 if (ot == MO_8 && byte_reg_is_xH(s, reg)) {
04fc2f1c 499 tcg_gen_extract_tl(t0, cpu_regs[reg - 4], 8, 8);
96d7073f 500 } else {
cc739bb0 501 tcg_gen_mov_tl(t0, cpu_regs[reg]);
57fec1fe
FB
502 }
503}
504
57fec1fe
FB
505static void gen_add_A0_im(DisasContext *s, int val)
506{
6b672b5d 507 tcg_gen_addi_tl(s->A0, s->A0, val);
4e85057b 508 if (!CODE64(s)) {
6b672b5d 509 tcg_gen_ext32u_tl(s->A0, s->A0);
4e85057b 510 }
57fec1fe 511}
2c0262af 512
e3a79e0e 513static inline void gen_op_jmp_v(DisasContext *s, TCGv dest)
57fec1fe 514{
f771ca6a 515 tcg_gen_mov_tl(cpu_eip, dest);
e3a79e0e 516 s->pc_save = -1;
57fec1fe
FB
517}
518
fbd80f02 519static inline
14776ab5 520void gen_op_add_reg_im(DisasContext *s, MemOp size, int reg, int32_t val)
57fec1fe 521{
fbd80f02 522 tcg_gen_addi_tl(s->tmp0, cpu_regs[reg], val);
1dbe15ef 523 gen_op_mov_reg_v(s, size, reg, s->tmp0);
57fec1fe
FB
524}
525
c0099cd4 526static inline void gen_op_add_reg(DisasContext *s, MemOp size, int reg, TCGv val)
57fec1fe 527{
c0099cd4 528 tcg_gen_add_tl(s->tmp0, cpu_regs[reg], val);
1dbe15ef 529 gen_op_mov_reg_v(s, size, reg, s->tmp0);
6e0d8677 530}
57fec1fe 531
323d1876 532static inline void gen_op_ld_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
57fec1fe 533{
3c5f4116 534 tcg_gen_qemu_ld_tl(t0, a0, s->mem_index, idx | MO_LE);
57fec1fe 535}
2c0262af 536
323d1876 537static inline void gen_op_st_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
57fec1fe 538{
3523e4bd 539 tcg_gen_qemu_st_tl(t0, a0, s->mem_index, idx | MO_LE);
57fec1fe 540}
4f31916f 541
d4faa3e0
RH
542static inline void gen_op_st_rm_T0_A0(DisasContext *s, int idx, int d)
543{
544 if (d == OR_TMP0) {
c66f9727 545 gen_op_st_v(s, idx, s->T0, s->A0);
d4faa3e0 546 } else {
1dbe15ef 547 gen_op_mov_reg_v(s, idx, d, s->T0);
d4faa3e0
RH
548 }
549}
550
65e4af23
RH
551static void gen_update_eip_cur(DisasContext *s)
552{
e3a79e0e 553 assert(s->pc_save != -1);
2e3afe8e 554 if (tb_cflags(s->base.tb) & CF_PCREL) {
e3a79e0e 555 tcg_gen_addi_tl(cpu_eip, cpu_eip, s->base.pc_next - s->pc_save);
b5e0d5d2
RH
556 } else if (CODE64(s)) {
557 tcg_gen_movi_tl(cpu_eip, s->base.pc_next);
e3a79e0e 558 } else {
b5e0d5d2 559 tcg_gen_movi_tl(cpu_eip, (uint32_t)(s->base.pc_next - s->cs_base));
e3a79e0e
RH
560 }
561 s->pc_save = s->base.pc_next;
14ce26e7
FB
562}
563
09e99df4
RH
564static void gen_update_eip_next(DisasContext *s)
565{
e3a79e0e 566 assert(s->pc_save != -1);
2e3afe8e 567 if (tb_cflags(s->base.tb) & CF_PCREL) {
e3a79e0e 568 tcg_gen_addi_tl(cpu_eip, cpu_eip, s->pc - s->pc_save);
b5e0d5d2
RH
569 } else if (CODE64(s)) {
570 tcg_gen_movi_tl(cpu_eip, s->base.pc_next);
e3a79e0e 571 } else {
b5e0d5d2 572 tcg_gen_movi_tl(cpu_eip, (uint32_t)(s->base.pc_next - s->cs_base));
e3a79e0e
RH
573 }
574 s->pc_save = s->pc;
09e99df4
RH
575}
576
ad1d6f07
RH
577static int cur_insn_len(DisasContext *s)
578{
579 return s->pc - s->base.pc_next;
580}
581
582static TCGv_i32 cur_insn_len_i32(DisasContext *s)
583{
584 return tcg_constant_i32(cur_insn_len(s));
585}
586
9e599bf7
RH
587static TCGv_i32 eip_next_i32(DisasContext *s)
588{
e3a79e0e 589 assert(s->pc_save != -1);
9e599bf7
RH
590 /*
591 * This function has two users: lcall_real (always 16-bit mode), and
592 * iret_protected (16, 32, or 64-bit mode). IRET only uses the value
593 * when EFLAGS.NT is set, which is illegal in 64-bit mode, which is
594 * why passing a 32-bit value isn't broken. To avoid using this where
595 * we shouldn't, return -1 in 64-bit mode so that execution goes into
596 * the weeds quickly.
597 */
598 if (CODE64(s)) {
599 return tcg_constant_i32(-1);
600 }
2e3afe8e 601 if (tb_cflags(s->base.tb) & CF_PCREL) {
e3a79e0e
RH
602 TCGv_i32 ret = tcg_temp_new_i32();
603 tcg_gen_trunc_tl_i32(ret, cpu_eip);
604 tcg_gen_addi_i32(ret, ret, s->pc - s->pc_save);
605 return ret;
606 } else {
607 return tcg_constant_i32(s->pc - s->cs_base);
608 }
9e599bf7
RH
609}
610
611static TCGv eip_next_tl(DisasContext *s)
612{
e3a79e0e 613 assert(s->pc_save != -1);
2e3afe8e 614 if (tb_cflags(s->base.tb) & CF_PCREL) {
e3a79e0e
RH
615 TCGv ret = tcg_temp_new();
616 tcg_gen_addi_tl(ret, cpu_eip, s->pc - s->pc_save);
617 return ret;
b5e0d5d2
RH
618 } else if (CODE64(s)) {
619 return tcg_constant_tl(s->pc);
e3a79e0e 620 } else {
b5e0d5d2 621 return tcg_constant_tl((uint32_t)(s->pc - s->cs_base));
e3a79e0e 622 }
9e599bf7
RH
623}
624
75ec746a
RH
625static TCGv eip_cur_tl(DisasContext *s)
626{
e3a79e0e 627 assert(s->pc_save != -1);
2e3afe8e 628 if (tb_cflags(s->base.tb) & CF_PCREL) {
e3a79e0e
RH
629 TCGv ret = tcg_temp_new();
630 tcg_gen_addi_tl(ret, cpu_eip, s->base.pc_next - s->pc_save);
631 return ret;
b5e0d5d2
RH
632 } else if (CODE64(s)) {
633 return tcg_constant_tl(s->base.pc_next);
e3a79e0e 634 } else {
b5e0d5d2 635 return tcg_constant_tl((uint32_t)(s->base.pc_next - s->cs_base));
e3a79e0e 636 }
75ec746a
RH
637}
638
24c0573b 639/* Compute SEG:REG into DEST. SEG is selected from the override segment
ca2f29f5
RH
640 (OVR_SEG) and the default segment (DEF_SEG). OVR_SEG may be -1 to
641 indicate no override. */
24c0573b
PB
642static void gen_lea_v_seg_dest(DisasContext *s, MemOp aflag, TCGv dest, TCGv a0,
643 int def_seg, int ovr_seg)
2c0262af 644{
ca2f29f5 645 switch (aflag) {
14ce26e7 646#ifdef TARGET_X86_64
1d71ddb1 647 case MO_64:
ca2f29f5 648 if (ovr_seg < 0) {
24c0573b 649 tcg_gen_mov_tl(dest, a0);
ca2f29f5 650 return;
14ce26e7 651 }
1d71ddb1 652 break;
14ce26e7 653#endif
1d71ddb1 654 case MO_32:
2c0262af 655 /* 32 bit address */
beedb93c 656 if (ovr_seg < 0 && ADDSEG(s)) {
620abfb0
PB
657 ovr_seg = def_seg;
658 }
ca2f29f5 659 if (ovr_seg < 0) {
24c0573b 660 tcg_gen_ext32u_tl(dest, a0);
620abfb0 661 return;
2c0262af 662 }
1d71ddb1
RH
663 break;
664 case MO_16:
ca2f29f5 665 /* 16 bit address */
24c0573b
PB
666 tcg_gen_ext16u_tl(dest, a0);
667 a0 = dest;
e2e02a82 668 if (ovr_seg < 0) {
beedb93c 669 if (ADDSEG(s)) {
e2e02a82
PB
670 ovr_seg = def_seg;
671 } else {
672 return;
673 }
674 }
1d71ddb1
RH
675 break;
676 default:
732e89f4 677 g_assert_not_reached();
2c0262af 678 }
2c0262af 679
ca2f29f5 680 if (ovr_seg >= 0) {
3558f805 681 TCGv seg = cpu_seg_base[ovr_seg];
ca2f29f5
RH
682
683 if (aflag == MO_64) {
24c0573b 684 tcg_gen_add_tl(dest, a0, seg);
ca2f29f5 685 } else if (CODE64(s)) {
24c0573b
PB
686 tcg_gen_ext32u_tl(dest, a0);
687 tcg_gen_add_tl(dest, dest, seg);
2c0262af 688 } else {
24c0573b
PB
689 tcg_gen_add_tl(dest, a0, seg);
690 tcg_gen_ext32u_tl(dest, dest);
2c0262af 691 }
2c0262af
FB
692 }
693}
694
24c0573b
PB
695static void gen_lea_v_seg(DisasContext *s, MemOp aflag, TCGv a0,
696 int def_seg, int ovr_seg)
697{
698 gen_lea_v_seg_dest(s, aflag, s->A0, a0, def_seg, ovr_seg);
699}
700
ca2f29f5
RH
701static inline void gen_string_movl_A0_ESI(DisasContext *s)
702{
77ebcad0 703 gen_lea_v_seg(s, s->aflag, cpu_regs[R_ESI], R_DS, s->override);
ca2f29f5
RH
704}
705
706static inline void gen_string_movl_A0_EDI(DisasContext *s)
707{
77ebcad0 708 gen_lea_v_seg(s, s->aflag, cpu_regs[R_EDI], R_ES, -1);
ca2f29f5
RH
709}
710
c0099cd4 711static inline TCGv gen_compute_Dshift(DisasContext *s, MemOp ot)
6e0d8677 712{
c0099cd4
PB
713 TCGv dshift = tcg_temp_new();
714 tcg_gen_ld32s_tl(dshift, tcg_env, offsetof(CPUX86State, df));
715 tcg_gen_shli_tl(dshift, dshift, ot);
716 return dshift;
2c0262af
FB
717};
718
14776ab5 719static TCGv gen_ext_tl(TCGv dst, TCGv src, MemOp size, bool sign)
6e0d8677 720{
23f3d586 721 if (size == MO_TL) {
d824df34 722 return src;
6e0d8677 723 }
9a5922d6
PB
724 if (!dst) {
725 dst = tcg_temp_new();
726 }
23f3d586
RH
727 tcg_gen_ext_tl(dst, src, size | (sign ? MO_SIGN : 0));
728 return dst;
6e0d8677 729}
3b46e624 730
14776ab5 731static void gen_extu(MemOp ot, TCGv reg)
d824df34
PB
732{
733 gen_ext_tl(reg, reg, ot, false);
734}
735
14776ab5 736static void gen_exts(MemOp ot, TCGv reg)
6e0d8677 737{
d824df34 738 gen_ext_tl(reg, reg, ot, true);
6e0d8677 739}
2c0262af 740
0ebacb5d 741static void gen_op_j_ecx(DisasContext *s, TCGCond cond, TCGLabel *label1)
6e0d8677 742{
9a5922d6
PB
743 TCGv tmp = gen_ext_tl(NULL, cpu_regs[R_ECX], s->aflag, false);
744
745 tcg_gen_brcondi_tl(cond, tmp, 0, label1);
6e0d8677
FB
746}
747
0ebacb5d 748static inline void gen_op_jz_ecx(DisasContext *s, TCGLabel *label1)
6e0d8677 749{
0ebacb5d
RH
750 gen_op_j_ecx(s, TCG_COND_EQ, label1);
751}
752
753static inline void gen_op_jnz_ecx(DisasContext *s, TCGLabel *label1)
754{
755 gen_op_j_ecx(s, TCG_COND_NE, label1);
6e0d8677 756}
2c0262af 757
14776ab5 758static void gen_helper_in_func(MemOp ot, TCGv v, TCGv_i32 n)
a7812ae4
PB
759{
760 switch (ot) {
4ba9938c 761 case MO_8:
ad75a51e 762 gen_helper_inb(v, tcg_env, n);
93ab25d7 763 break;
4ba9938c 764 case MO_16:
ad75a51e 765 gen_helper_inw(v, tcg_env, n);
93ab25d7 766 break;
4ba9938c 767 case MO_32:
ad75a51e 768 gen_helper_inl(v, tcg_env, n);
93ab25d7 769 break;
d67dc9e6 770 default:
732e89f4 771 g_assert_not_reached();
a7812ae4 772 }
a7812ae4 773}
2c0262af 774
14776ab5 775static void gen_helper_out_func(MemOp ot, TCGv_i32 v, TCGv_i32 n)
a7812ae4
PB
776{
777 switch (ot) {
4ba9938c 778 case MO_8:
ad75a51e 779 gen_helper_outb(tcg_env, v, n);
93ab25d7 780 break;
4ba9938c 781 case MO_16:
ad75a51e 782 gen_helper_outw(tcg_env, v, n);
93ab25d7 783 break;
4ba9938c 784 case MO_32:
ad75a51e 785 gen_helper_outl(tcg_env, v, n);
93ab25d7 786 break;
d67dc9e6 787 default:
732e89f4 788 g_assert_not_reached();
a7812ae4 789 }
a7812ae4 790}
f115e911 791
1bca40fe
RH
792/*
793 * Validate that access to [port, port + 1<<ot) is allowed.
794 * Raise #GP, or VMM exit if not.
795 */
796static bool gen_check_io(DisasContext *s, MemOp ot, TCGv_i32 port,
797 uint32_t svm_flags)
f115e911 798{
d76b9c6f
RH
799#ifdef CONFIG_USER_ONLY
800 /*
801 * We do not implement the ioperm(2) syscall, so the TSS check
802 * will always fail.
803 */
804 gen_exception_gpf(s);
805 return false;
806#else
f8a35846 807 if (PE(s) && (CPL(s) > IOPL(s) || VM86(s))) {
ad75a51e 808 gen_helper_check_io(tcg_env, port, tcg_constant_i32(1 << ot));
b8b6a50b 809 }
b322b3af 810 if (GUEST(s)) {
100ec099 811 gen_update_cc_op(s);
65e4af23 812 gen_update_eip_cur(s);
bc2e436d
RH
813 if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) {
814 svm_flags |= SVM_IOIO_REP_MASK;
815 }
816 svm_flags |= 1 << (SVM_IOIO_SIZE_SHIFT + ot);
ad75a51e 817 gen_helper_svm_check_io(tcg_env, port,
bc2e436d 818 tcg_constant_i32(svm_flags),
ad1d6f07 819 cur_insn_len_i32(s));
f115e911 820 }
bc2e436d 821 return true;
d76b9c6f 822#endif
f115e911
FB
823}
824
122e6d7b 825static void gen_movs(DisasContext *s, MemOp ot)
2c0262af 826{
c0099cd4
PB
827 TCGv dshift;
828
2c0262af 829 gen_string_movl_A0_ESI(s);
c66f9727 830 gen_op_ld_v(s, ot, s->T0, s->A0);
2c0262af 831 gen_string_movl_A0_EDI(s);
c66f9727 832 gen_op_st_v(s, ot, s->T0, s->A0);
c0099cd4
PB
833
834 dshift = gen_compute_Dshift(s, ot);
835 gen_op_add_reg(s, s->aflag, R_ESI, dshift);
836 gen_op_add_reg(s, s->aflag, R_EDI, dshift);
2c0262af
FB
837}
838
c66f9727 839static void gen_op_update1_cc(DisasContext *s)
b6abf97d 840{
c66f9727 841 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
b6abf97d
FB
842}
843
c66f9727 844static void gen_op_update2_cc(DisasContext *s)
b6abf97d 845{
b48597b0 846 tcg_gen_mov_tl(cpu_cc_src, s->T1);
c66f9727 847 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
b6abf97d
FB
848}
849
c66f9727 850static void gen_op_update3_cc(DisasContext *s, TCGv reg)
988c3eb0
RH
851{
852 tcg_gen_mov_tl(cpu_cc_src2, reg);
b48597b0 853 tcg_gen_mov_tl(cpu_cc_src, s->T1);
c66f9727 854 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
988c3eb0
RH
855}
856
c66f9727 857static inline void gen_op_testl_T0_T1_cc(DisasContext *s)
b6abf97d 858{
b48597b0 859 tcg_gen_and_tl(cpu_cc_dst, s->T0, s->T1);
b6abf97d
FB
860}
861
93a3e108 862static void gen_op_update_neg_cc(DisasContext *s)
b6abf97d 863{
c66f9727
EC
864 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
865 tcg_gen_neg_tl(cpu_cc_src, s->T0);
93a3e108 866 tcg_gen_movi_tl(s->cc_srcT, 0);
b6abf97d
FB
867}
868
80e55f54
PB
869/* compute all eflags to reg */
870static void gen_mov_eflags(DisasContext *s, TCGv reg)
8e1c85e3 871{
80e55f54
PB
872 TCGv dst, src1, src2;
873 TCGv_i32 cc_op;
db9f2597
RH
874 int live, dead;
875
d229edce 876 if (s->cc_op == CC_OP_EFLAGS) {
80e55f54 877 tcg_gen_mov_tl(reg, cpu_cc_src);
d229edce
RH
878 return;
879 }
436ff2d2 880 if (s->cc_op == CC_OP_CLR) {
80e55f54 881 tcg_gen_movi_tl(reg, CC_Z | CC_P);
436ff2d2
RH
882 return;
883 }
db9f2597 884
db9f2597
RH
885 dst = cpu_cc_dst;
886 src1 = cpu_cc_src;
988c3eb0 887 src2 = cpu_cc_src2;
db9f2597
RH
888
889 /* Take care to not read values that are not live. */
890 live = cc_op_live[s->cc_op] & ~USES_CC_SRCT;
988c3eb0 891 dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2);
db9f2597 892 if (dead) {
80e55f54 893 TCGv zero = tcg_constant_tl(0);
db9f2597
RH
894 if (dead & USES_CC_DST) {
895 dst = zero;
896 }
897 if (dead & USES_CC_SRC) {
898 src1 = zero;
899 }
988c3eb0
RH
900 if (dead & USES_CC_SRC2) {
901 src2 = zero;
902 }
db9f2597
RH
903 }
904
80e55f54
PB
905 if (s->cc_op != CC_OP_DYNAMIC) {
906 cc_op = tcg_constant_i32(s->cc_op);
907 } else {
908 cc_op = cpu_cc_op;
909 }
910 gen_helper_cc_compute_all(reg, dst, src1, src2, cc_op);
911}
912
913/* compute all eflags to cc_src */
914static void gen_compute_eflags(DisasContext *s)
915{
916 gen_mov_eflags(s, cpu_cc_src);
d229edce 917 set_cc_op(s, CC_OP_EFLAGS);
8e1c85e3
FB
918}
919
bec93d72
RH
920typedef struct CCPrepare {
921 TCGCond cond;
922 TCGv reg;
923 TCGv reg2;
924 target_ulong imm;
925 target_ulong mask;
926 bool use_reg2;
927 bool no_setcond;
928} CCPrepare;
929
06847f1f 930/* compute eflags.C to reg */
bec93d72 931static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
06847f1f
RH
932{
933 TCGv t0, t1;
bec93d72 934 int size, shift;
06847f1f
RH
935
936 switch (s->cc_op) {
937 case CC_OP_SUBB ... CC_OP_SUBQ:
a3251186 938 /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
06847f1f 939 size = s->cc_op - CC_OP_SUBB;
fbd80f02 940 t1 = gen_ext_tl(s->tmp0, cpu_cc_src, size, false);
06847f1f 941 /* If no temporary was used, be careful not to alias t1 and t0. */
fbd80f02 942 t0 = t1 == cpu_cc_src ? s->tmp0 : reg;
93a3e108 943 tcg_gen_mov_tl(t0, s->cc_srcT);
06847f1f
RH
944 gen_extu(size, t0);
945 goto add_sub;
946
947 case CC_OP_ADDB ... CC_OP_ADDQ:
948 /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
949 size = s->cc_op - CC_OP_ADDB;
fbd80f02 950 t1 = gen_ext_tl(s->tmp0, cpu_cc_src, size, false);
06847f1f
RH
951 t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
952 add_sub:
bec93d72
RH
953 return (CCPrepare) { .cond = TCG_COND_LTU, .reg = t0,
954 .reg2 = t1, .mask = -1, .use_reg2 = true };
06847f1f 955
06847f1f 956 case CC_OP_LOGICB ... CC_OP_LOGICQ:
436ff2d2 957 case CC_OP_CLR:
4885c3c4 958 case CC_OP_POPCNT:
bec93d72 959 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
06847f1f
RH
960
961 case CC_OP_INCB ... CC_OP_INCQ:
962 case CC_OP_DECB ... CC_OP_DECQ:
bec93d72
RH
963 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
964 .mask = -1, .no_setcond = true };
06847f1f
RH
965
966 case CC_OP_SHLB ... CC_OP_SHLQ:
967 /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
968 size = s->cc_op - CC_OP_SHLB;
bec93d72
RH
969 shift = (8 << size) - 1;
970 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
971 .mask = (target_ulong)1 << shift };
06847f1f
RH
972
973 case CC_OP_MULB ... CC_OP_MULQ:
bec93d72
RH
974 return (CCPrepare) { .cond = TCG_COND_NE,
975 .reg = cpu_cc_src, .mask = -1 };
06847f1f 976
bc4b43dc
RH
977 case CC_OP_BMILGB ... CC_OP_BMILGQ:
978 size = s->cc_op - CC_OP_BMILGB;
979 t0 = gen_ext_tl(reg, cpu_cc_src, size, false);
980 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
981
cd7f97ca
RH
982 case CC_OP_ADCX:
983 case CC_OP_ADCOX:
984 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_dst,
985 .mask = -1, .no_setcond = true };
986
06847f1f
RH
987 case CC_OP_EFLAGS:
988 case CC_OP_SARB ... CC_OP_SARQ:
989 /* CC_SRC & 1 */
bec93d72
RH
990 return (CCPrepare) { .cond = TCG_COND_NE,
991 .reg = cpu_cc_src, .mask = CC_C };
06847f1f
RH
992
993 default:
994 /* The need to compute only C from CC_OP_DYNAMIC is important
995 in efficiently implementing e.g. INC at the start of a TB. */
996 gen_update_cc_op(s);
988c3eb0
RH
997 gen_helper_cc_compute_c(reg, cpu_cc_dst, cpu_cc_src,
998 cpu_cc_src2, cpu_cc_op);
bec93d72
RH
999 return (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1000 .mask = -1, .no_setcond = true };
06847f1f
RH
1001 }
1002}
1003
1608ecca 1004/* compute eflags.P to reg */
bec93d72 1005static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGv reg)
1608ecca 1006{
d229edce 1007 gen_compute_eflags(s);
bec93d72
RH
1008 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1009 .mask = CC_P };
1608ecca
PB
1010}
1011
1012/* compute eflags.S to reg */
bec93d72 1013static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg)
1608ecca 1014{
086c4077
RH
1015 switch (s->cc_op) {
1016 case CC_OP_DYNAMIC:
1017 gen_compute_eflags(s);
1018 /* FALLTHRU */
1019 case CC_OP_EFLAGS:
cd7f97ca
RH
1020 case CC_OP_ADCX:
1021 case CC_OP_ADOX:
1022 case CC_OP_ADCOX:
bec93d72
RH
1023 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1024 .mask = CC_S };
436ff2d2 1025 case CC_OP_CLR:
4885c3c4 1026 case CC_OP_POPCNT:
436ff2d2 1027 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
086c4077
RH
1028 default:
1029 {
14776ab5 1030 MemOp size = (s->cc_op - CC_OP_ADDB) & 3;
086c4077 1031 TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, true);
bec93d72 1032 return (CCPrepare) { .cond = TCG_COND_LT, .reg = t0, .mask = -1 };
086c4077 1033 }
086c4077 1034 }
1608ecca
PB
1035}
1036
1037/* compute eflags.O to reg */
bec93d72 1038static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg)
1608ecca 1039{
cd7f97ca
RH
1040 switch (s->cc_op) {
1041 case CC_OP_ADOX:
1042 case CC_OP_ADCOX:
1043 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src2,
1044 .mask = -1, .no_setcond = true };
436ff2d2 1045 case CC_OP_CLR:
4885c3c4 1046 case CC_OP_POPCNT:
436ff2d2 1047 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
1e7dde80
PB
1048 case CC_OP_MULB ... CC_OP_MULQ:
1049 return (CCPrepare) { .cond = TCG_COND_NE,
1050 .reg = cpu_cc_src, .mask = -1 };
cd7f97ca
RH
1051 default:
1052 gen_compute_eflags(s);
1053 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1054 .mask = CC_O };
1055 }
1608ecca
PB
1056}
1057
1058/* compute eflags.Z to reg */
bec93d72 1059static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
1608ecca 1060{
086c4077
RH
1061 switch (s->cc_op) {
1062 case CC_OP_DYNAMIC:
1063 gen_compute_eflags(s);
1064 /* FALLTHRU */
1065 case CC_OP_EFLAGS:
cd7f97ca
RH
1066 case CC_OP_ADCX:
1067 case CC_OP_ADOX:
1068 case CC_OP_ADCOX:
bec93d72
RH
1069 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1070 .mask = CC_Z };
436ff2d2
RH
1071 case CC_OP_CLR:
1072 return (CCPrepare) { .cond = TCG_COND_ALWAYS, .mask = -1 };
4885c3c4
RH
1073 case CC_OP_POPCNT:
1074 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = cpu_cc_src,
1075 .mask = -1 };
086c4077
RH
1076 default:
1077 {
14776ab5 1078 MemOp size = (s->cc_op - CC_OP_ADDB) & 3;
086c4077 1079 TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
bec93d72 1080 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
086c4077 1081 }
bec93d72
RH
1082 }
1083}
1084
c365395e 1085/* perform a conditional store into register 'reg' according to jump opcode
bad5cfcd 1086 value 'b'. In the fast case, T0 is guaranteed not to be used. */
276e6b5f 1087static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
8e1c85e3 1088{
d67dc9e6 1089 int inv, jcc_op, cond;
14776ab5 1090 MemOp size;
276e6b5f 1091 CCPrepare cc;
c365395e
PB
1092 TCGv t0;
1093
1094 inv = b & 1;
8e1c85e3 1095 jcc_op = (b >> 1) & 7;
c365395e
PB
1096
1097 switch (s->cc_op) {
69d1aa31
RH
1098 case CC_OP_SUBB ... CC_OP_SUBQ:
1099 /* We optimize relational operators for the cmp/jcc case. */
c365395e
PB
1100 size = s->cc_op - CC_OP_SUBB;
1101 switch (jcc_op) {
1102 case JCC_BE:
5022f28f
EC
1103 tcg_gen_mov_tl(s->tmp4, s->cc_srcT);
1104 gen_extu(size, s->tmp4);
fbd80f02 1105 t0 = gen_ext_tl(s->tmp0, cpu_cc_src, size, false);
5022f28f 1106 cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = s->tmp4,
276e6b5f 1107 .reg2 = t0, .mask = -1, .use_reg2 = true };
c365395e 1108 break;
8e1c85e3 1109
c365395e 1110 case JCC_L:
276e6b5f 1111 cond = TCG_COND_LT;
c365395e
PB
1112 goto fast_jcc_l;
1113 case JCC_LE:
276e6b5f 1114 cond = TCG_COND_LE;
c365395e 1115 fast_jcc_l:
5022f28f
EC
1116 tcg_gen_mov_tl(s->tmp4, s->cc_srcT);
1117 gen_exts(size, s->tmp4);
fbd80f02 1118 t0 = gen_ext_tl(s->tmp0, cpu_cc_src, size, true);
5022f28f 1119 cc = (CCPrepare) { .cond = cond, .reg = s->tmp4,
276e6b5f 1120 .reg2 = t0, .mask = -1, .use_reg2 = true };
c365395e 1121 break;
8e1c85e3 1122
c365395e 1123 default:
8e1c85e3 1124 goto slow_jcc;
c365395e 1125 }
8e1c85e3 1126 break;
c365395e 1127
8e1c85e3
FB
1128 default:
1129 slow_jcc:
69d1aa31
RH
1130 /* This actually generates good code for JC, JZ and JS. */
1131 switch (jcc_op) {
1132 case JCC_O:
1133 cc = gen_prepare_eflags_o(s, reg);
1134 break;
1135 case JCC_B:
1136 cc = gen_prepare_eflags_c(s, reg);
1137 break;
1138 case JCC_Z:
1139 cc = gen_prepare_eflags_z(s, reg);
1140 break;
1141 case JCC_BE:
1142 gen_compute_eflags(s);
1143 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1144 .mask = CC_Z | CC_C };
1145 break;
1146 case JCC_S:
1147 cc = gen_prepare_eflags_s(s, reg);
1148 break;
1149 case JCC_P:
1150 cc = gen_prepare_eflags_p(s, reg);
1151 break;
1152 case JCC_L:
1153 gen_compute_eflags(s);
11f4e8f8 1154 if (reg == cpu_cc_src) {
fbd80f02 1155 reg = s->tmp0;
69d1aa31 1156 }
6032627f 1157 tcg_gen_addi_tl(reg, cpu_cc_src, CC_O - CC_S);
69d1aa31 1158 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
6032627f 1159 .mask = CC_O };
69d1aa31
RH
1160 break;
1161 default:
1162 case JCC_LE:
1163 gen_compute_eflags(s);
11f4e8f8 1164 if (reg == cpu_cc_src) {
fbd80f02 1165 reg = s->tmp0;
69d1aa31 1166 }
6032627f 1167 tcg_gen_addi_tl(reg, cpu_cc_src, CC_O - CC_S);
69d1aa31 1168 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
6032627f 1169 .mask = CC_O | CC_Z };
69d1aa31
RH
1170 break;
1171 }
c365395e 1172 break;
8e1c85e3 1173 }
276e6b5f
RH
1174
1175 if (inv) {
1176 cc.cond = tcg_invert_cond(cc.cond);
1177 }
1178 return cc;
8e1c85e3
FB
1179}
1180
cc8b6f5b
PB
1181static void gen_setcc1(DisasContext *s, int b, TCGv reg)
1182{
1183 CCPrepare cc = gen_prepare_cc(s, b, reg);
1184
1185 if (cc.no_setcond) {
1186 if (cc.cond == TCG_COND_EQ) {
1187 tcg_gen_xori_tl(reg, cc.reg, 1);
1188 } else {
1189 tcg_gen_mov_tl(reg, cc.reg);
1190 }
1191 return;
1192 }
1193
1194 if (cc.cond == TCG_COND_NE && !cc.use_reg2 && cc.imm == 0 &&
1195 cc.mask != 0 && (cc.mask & (cc.mask - 1)) == 0) {
1196 tcg_gen_shri_tl(reg, cc.reg, ctztl(cc.mask));
1197 tcg_gen_andi_tl(reg, reg, 1);
1198 return;
1199 }
1200 if (cc.mask != -1) {
1201 tcg_gen_andi_tl(reg, cc.reg, cc.mask);
1202 cc.reg = reg;
1203 }
1204 if (cc.use_reg2) {
1205 tcg_gen_setcond_tl(cc.cond, reg, cc.reg, cc.reg2);
1206 } else {
1207 tcg_gen_setcondi_tl(cc.cond, reg, cc.reg, cc.imm);
1208 }
1209}
1210
1211static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg)
1212{
1213 gen_setcc1(s, JCC_B << 1, reg);
1214}
276e6b5f 1215
8e1c85e3 1216/* generate a conditional jump to label 'l1' according to jump opcode
bad5cfcd 1217 value 'b'. In the fast case, T0 is guaranteed not to be used. */
42a268c2 1218static inline void gen_jcc1_noeob(DisasContext *s, int b, TCGLabel *l1)
dc259201 1219{
c66f9727 1220 CCPrepare cc = gen_prepare_cc(s, b, s->T0);
dc259201
RH
1221
1222 if (cc.mask != -1) {
c66f9727
EC
1223 tcg_gen_andi_tl(s->T0, cc.reg, cc.mask);
1224 cc.reg = s->T0;
dc259201
RH
1225 }
1226 if (cc.use_reg2) {
1227 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1228 } else {
1229 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1230 }
1231}
1232
1233/* Generate a conditional jump to label 'l1' according to jump opcode
bad5cfcd 1234 value 'b'. In the fast case, T0 is guaranteed not to be used.
dc259201 1235 A translation block must end soon. */
42a268c2 1236static inline void gen_jcc1(DisasContext *s, int b, TCGLabel *l1)
8e1c85e3 1237{
c66f9727 1238 CCPrepare cc = gen_prepare_cc(s, b, s->T0);
8e1c85e3 1239
dc259201 1240 gen_update_cc_op(s);
943131ca 1241 if (cc.mask != -1) {
c66f9727
EC
1242 tcg_gen_andi_tl(s->T0, cc.reg, cc.mask);
1243 cc.reg = s->T0;
943131ca 1244 }
dc259201 1245 set_cc_op(s, CC_OP_DYNAMIC);
943131ca
PB
1246 if (cc.use_reg2) {
1247 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1248 } else {
1249 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
8e1c85e3
FB
1250 }
1251}
1252
14ce26e7
FB
1253/* XXX: does not work with gdbstub "ice" single step - not a
1254 serious problem */
122e6d7b 1255static TCGLabel *gen_jz_ecx_string(DisasContext *s)
2c0262af 1256{
42a268c2
RH
1257 TCGLabel *l1 = gen_new_label();
1258 TCGLabel *l2 = gen_new_label();
0ebacb5d 1259 gen_op_jnz_ecx(s, l1);
14ce26e7 1260 gen_set_label(l2);
2255da49 1261 gen_jmp_rel_csize(s, 0, 1);
14ce26e7
FB
1262 gen_set_label(l1);
1263 return l2;
2c0262af
FB
1264}
1265
122e6d7b 1266static void gen_stos(DisasContext *s, MemOp ot)
2c0262af 1267{
2c0262af 1268 gen_string_movl_A0_EDI(s);
c66f9727 1269 gen_op_st_v(s, ot, s->T0, s->A0);
c0099cd4 1270 gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot));
2c0262af
FB
1271}
1272
122e6d7b 1273static void gen_lods(DisasContext *s, MemOp ot)
2c0262af
FB
1274{
1275 gen_string_movl_A0_ESI(s);
c66f9727 1276 gen_op_ld_v(s, ot, s->T0, s->A0);
1dbe15ef 1277 gen_op_mov_reg_v(s, ot, R_EAX, s->T0);
c0099cd4 1278 gen_op_add_reg(s, s->aflag, R_ESI, gen_compute_Dshift(s, ot));
2c0262af
FB
1279}
1280
122e6d7b 1281static void gen_scas(DisasContext *s, MemOp ot)
2c0262af 1282{
2c0262af 1283 gen_string_movl_A0_EDI(s);
b48597b0 1284 gen_op_ld_v(s, ot, s->T1, s->A0);
3497f164
PB
1285 tcg_gen_mov_tl(cpu_cc_src, s->T1);
1286 tcg_gen_mov_tl(s->cc_srcT, s->T0);
1287 tcg_gen_sub_tl(cpu_cc_dst, s->T0, s->T1);
1288 set_cc_op(s, CC_OP_SUBB + ot);
1289
c0099cd4 1290 gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot));
2c0262af
FB
1291}
1292
122e6d7b 1293static void gen_cmps(DisasContext *s, MemOp ot)
2c0262af 1294{
c0099cd4
PB
1295 TCGv dshift;
1296
2c0262af 1297 gen_string_movl_A0_EDI(s);
b48597b0 1298 gen_op_ld_v(s, ot, s->T1, s->A0);
63633fe6
RH
1299 gen_string_movl_A0_ESI(s);
1300 gen_op(s, OP_CMPL, ot, OR_TMP0);
c0099cd4
PB
1301
1302 dshift = gen_compute_Dshift(s, ot);
1303 gen_op_add_reg(s, s->aflag, R_ESI, dshift);
1304 gen_op_add_reg(s, s->aflag, R_EDI, dshift);
2c0262af
FB
1305}
1306
5223a942
EH
1307static void gen_bpt_io(DisasContext *s, TCGv_i32 t_port, int ot)
1308{
1309 if (s->flags & HF_IOBPT_MASK) {
6d8d1a03
CF
1310#ifdef CONFIG_USER_ONLY
1311 /* user-mode cpu should not be in IOBPT mode */
1312 g_assert_not_reached();
1313#else
9e599bf7
RH
1314 TCGv_i32 t_size = tcg_constant_i32(1 << ot);
1315 TCGv t_next = eip_next_tl(s);
ad75a51e 1316 gen_helper_bpt_io(tcg_env, t_port, t_size, t_next);
6d8d1a03 1317#endif /* CONFIG_USER_ONLY */
5223a942
EH
1318 }
1319}
1320
122e6d7b 1321static void gen_ins(DisasContext *s, MemOp ot)
2c0262af 1322{
2c0262af 1323 gen_string_movl_A0_EDI(s);
6e0d8677
FB
1324 /* Note: we must do this dummy write first to be restartable in
1325 case of page fault. */
c66f9727
EC
1326 tcg_gen_movi_tl(s->T0, 0);
1327 gen_op_st_v(s, ot, s->T0, s->A0);
6bd48f6f
EC
1328 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
1329 tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff);
1330 gen_helper_in_func(ot, s->T0, s->tmp2_i32);
c66f9727 1331 gen_op_st_v(s, ot, s->T0, s->A0);
c0099cd4 1332 gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot));
6bd48f6f 1333 gen_bpt_io(s, s->tmp2_i32, ot);
2c0262af
FB
1334}
1335
122e6d7b 1336static void gen_outs(DisasContext *s, MemOp ot)
2c0262af
FB
1337{
1338 gen_string_movl_A0_ESI(s);
c66f9727 1339 gen_op_ld_v(s, ot, s->T0, s->A0);
b8b6a50b 1340
6bd48f6f
EC
1341 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
1342 tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff);
4f82446d
EC
1343 tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T0);
1344 gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32);
c0099cd4 1345 gen_op_add_reg(s, s->aflag, R_ESI, gen_compute_Dshift(s, ot));
6bd48f6f 1346 gen_bpt_io(s, s->tmp2_i32, ot);
2c0262af
FB
1347}
1348
122e6d7b
RH
1349/* Generate jumps to current or next instruction */
1350static void gen_repz(DisasContext *s, MemOp ot,
1351 void (*fn)(DisasContext *s, MemOp ot))
1352{
1353 TCGLabel *l2;
1354 gen_update_cc_op(s);
1355 l2 = gen_jz_ecx_string(s);
1356 fn(s, ot);
1357 gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
1358 /*
1359 * A loop would cause two single step exceptions if ECX = 1
1360 * before rep string_insn
1361 */
1362 if (s->repz_opt) {
0ebacb5d 1363 gen_op_jz_ecx(s, l2);
122e6d7b 1364 }
2255da49 1365 gen_jmp_rel_csize(s, -cur_insn_len(s), 0);
2c0262af
FB
1366}
1367
122e6d7b
RH
1368#define GEN_REPZ(op) \
1369 static inline void gen_repz_ ## op(DisasContext *s, MemOp ot) \
1370 { gen_repz(s, ot, gen_##op); }
1371
1372static void gen_repz2(DisasContext *s, MemOp ot, int nz,
1373 void (*fn)(DisasContext *s, MemOp ot))
1374{
1375 TCGLabel *l2;
1376 gen_update_cc_op(s);
1377 l2 = gen_jz_ecx_string(s);
1378 fn(s, ot);
1379 gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
1380 gen_update_cc_op(s);
1381 gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2);
1382 if (s->repz_opt) {
0ebacb5d 1383 gen_op_jz_ecx(s, l2);
122e6d7b 1384 }
2255da49 1385 gen_jmp_rel_csize(s, -cur_insn_len(s), 0);
122e6d7b
RH
1386}
1387
1388#define GEN_REPZ2(op) \
1389 static inline void gen_repz_ ## op(DisasContext *s, MemOp ot, int nz) \
1390 { gen_repz2(s, ot, nz, gen_##op); }
1391
2c0262af
FB
1392GEN_REPZ(movs)
1393GEN_REPZ(stos)
1394GEN_REPZ(lods)
1395GEN_REPZ(ins)
1396GEN_REPZ(outs)
1397GEN_REPZ2(scas)
1398GEN_REPZ2(cmps)
1399
a7812ae4
PB
1400static void gen_helper_fp_arith_ST0_FT0(int op)
1401{
1402 switch (op) {
d3eb5eae 1403 case 0:
ad75a51e 1404 gen_helper_fadd_ST0_FT0(tcg_env);
d3eb5eae
BS
1405 break;
1406 case 1:
ad75a51e 1407 gen_helper_fmul_ST0_FT0(tcg_env);
d3eb5eae
BS
1408 break;
1409 case 2:
ad75a51e 1410 gen_helper_fcom_ST0_FT0(tcg_env);
d3eb5eae
BS
1411 break;
1412 case 3:
ad75a51e 1413 gen_helper_fcom_ST0_FT0(tcg_env);
d3eb5eae
BS
1414 break;
1415 case 4:
ad75a51e 1416 gen_helper_fsub_ST0_FT0(tcg_env);
d3eb5eae
BS
1417 break;
1418 case 5:
ad75a51e 1419 gen_helper_fsubr_ST0_FT0(tcg_env);
d3eb5eae
BS
1420 break;
1421 case 6:
ad75a51e 1422 gen_helper_fdiv_ST0_FT0(tcg_env);
d3eb5eae
BS
1423 break;
1424 case 7:
ad75a51e 1425 gen_helper_fdivr_ST0_FT0(tcg_env);
d3eb5eae 1426 break;
a7812ae4
PB
1427 }
1428}
2c0262af
FB
1429
1430/* NOTE the exception in "r" op ordering */
a7812ae4
PB
1431static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
1432{
3df11bb1 1433 TCGv_i32 tmp = tcg_constant_i32(opreg);
a7812ae4 1434 switch (op) {
d3eb5eae 1435 case 0:
ad75a51e 1436 gen_helper_fadd_STN_ST0(tcg_env, tmp);
d3eb5eae
BS
1437 break;
1438 case 1:
ad75a51e 1439 gen_helper_fmul_STN_ST0(tcg_env, tmp);
d3eb5eae
BS
1440 break;
1441 case 4:
ad75a51e 1442 gen_helper_fsubr_STN_ST0(tcg_env, tmp);
d3eb5eae
BS
1443 break;
1444 case 5:
ad75a51e 1445 gen_helper_fsub_STN_ST0(tcg_env, tmp);
d3eb5eae
BS
1446 break;
1447 case 6:
ad75a51e 1448 gen_helper_fdivr_STN_ST0(tcg_env, tmp);
d3eb5eae
BS
1449 break;
1450 case 7:
ad75a51e 1451 gen_helper_fdiv_STN_ST0(tcg_env, tmp);
d3eb5eae 1452 break;
a7812ae4
PB
1453 }
1454}
2c0262af 1455
52236550 1456static void gen_exception(DisasContext *s, int trapno)
e84fcd7f
RH
1457{
1458 gen_update_cc_op(s);
65e4af23 1459 gen_update_eip_cur(s);
ad75a51e 1460 gen_helper_raise_exception(tcg_env, tcg_constant_i32(trapno));
e84fcd7f
RH
1461 s->base.is_jmp = DISAS_NORETURN;
1462}
1463
1464/* Generate #UD for the current instruction. The assumption here is that
1465 the instruction is known, but it isn't allowed in the current cpu mode. */
1466static void gen_illegal_opcode(DisasContext *s)
1467{
52236550 1468 gen_exception(s, EXCP06_ILLOP);
e84fcd7f
RH
1469}
1470
6bd99586
RH
1471/* Generate #GP for the current instruction. */
1472static void gen_exception_gpf(DisasContext *s)
1473{
52236550 1474 gen_exception(s, EXCP0D_GPF);
6bd99586
RH
1475}
1476
bc19f505
RH
1477/* Check for cpl == 0; if not, raise #GP and return false. */
1478static bool check_cpl0(DisasContext *s)
1479{
01b9d8c1 1480 if (CPL(s) == 0) {
bc19f505
RH
1481 return true;
1482 }
1483 gen_exception_gpf(s);
1484 return false;
1485}
1486
aa9f21b1
RH
1487/* If vm86, check for iopl == 3; if not, raise #GP and return false. */
1488static bool check_vm86_iopl(DisasContext *s)
1489{
f8a35846 1490 if (!VM86(s) || IOPL(s) == 3) {
aa9f21b1
RH
1491 return true;
1492 }
1493 gen_exception_gpf(s);
1494 return false;
1495}
1496
ca7874c2
RH
1497/* Check for iopl allowing access; if not, raise #GP and return false. */
1498static bool check_iopl(DisasContext *s)
1499{
f8a35846 1500 if (VM86(s) ? IOPL(s) == 3 : CPL(s) <= IOPL(s)) {
ca7874c2
RH
1501 return true;
1502 }
1503 gen_exception_gpf(s);
1504 return false;
1505}
1506
2c0262af 1507/* if d == OR_TMP0, it means memory operand (address in A0) */
14776ab5 1508static void gen_op(DisasContext *s1, int op, MemOp ot, int d)
2c0262af 1509{
2c0262af 1510 if (d != OR_TMP0) {
e84fcd7f
RH
1511 if (s1->prefix & PREFIX_LOCK) {
1512 /* Lock prefix when destination is not memory. */
1513 gen_illegal_opcode(s1);
1514 return;
1515 }
1dbe15ef 1516 gen_op_mov_v_reg(s1, ot, s1->T0, d);
a7cee522 1517 } else if (!(s1->prefix & PREFIX_LOCK)) {
c66f9727 1518 gen_op_ld_v(s1, ot, s1->T0, s1->A0);
2c0262af
FB
1519 }
1520 switch(op) {
1521 case OP_ADCL:
5022f28f 1522 gen_compute_eflags_c(s1, s1->tmp4);
a7cee522 1523 if (s1->prefix & PREFIX_LOCK) {
5022f28f 1524 tcg_gen_add_tl(s1->T0, s1->tmp4, s1->T1);
c66f9727 1525 tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T0,
a7cee522
EC
1526 s1->mem_index, ot | MO_LE);
1527 } else {
b48597b0 1528 tcg_gen_add_tl(s1->T0, s1->T0, s1->T1);
5022f28f 1529 tcg_gen_add_tl(s1->T0, s1->T0, s1->tmp4);
a7cee522
EC
1530 gen_op_st_rm_T0_A0(s1, ot, d);
1531 }
5022f28f 1532 gen_op_update3_cc(s1, s1->tmp4);
988c3eb0 1533 set_cc_op(s1, CC_OP_ADCB + ot);
cad3a37d 1534 break;
2c0262af 1535 case OP_SBBL:
5022f28f 1536 gen_compute_eflags_c(s1, s1->tmp4);
a7cee522 1537 if (s1->prefix & PREFIX_LOCK) {
5022f28f 1538 tcg_gen_add_tl(s1->T0, s1->T1, s1->tmp4);
c66f9727
EC
1539 tcg_gen_neg_tl(s1->T0, s1->T0);
1540 tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T0,
a7cee522
EC
1541 s1->mem_index, ot | MO_LE);
1542 } else {
b48597b0 1543 tcg_gen_sub_tl(s1->T0, s1->T0, s1->T1);
5022f28f 1544 tcg_gen_sub_tl(s1->T0, s1->T0, s1->tmp4);
a7cee522
EC
1545 gen_op_st_rm_T0_A0(s1, ot, d);
1546 }
5022f28f 1547 gen_op_update3_cc(s1, s1->tmp4);
988c3eb0 1548 set_cc_op(s1, CC_OP_SBBB + ot);
cad3a37d 1549 break;
2c0262af 1550 case OP_ADDL:
a7cee522 1551 if (s1->prefix & PREFIX_LOCK) {
b48597b0 1552 tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T1,
a7cee522
EC
1553 s1->mem_index, ot | MO_LE);
1554 } else {
b48597b0 1555 tcg_gen_add_tl(s1->T0, s1->T0, s1->T1);
a7cee522
EC
1556 gen_op_st_rm_T0_A0(s1, ot, d);
1557 }
c66f9727 1558 gen_op_update2_cc(s1);
3ca51d07 1559 set_cc_op(s1, CC_OP_ADDB + ot);
2c0262af
FB
1560 break;
1561 case OP_SUBL:
a7cee522 1562 if (s1->prefix & PREFIX_LOCK) {
b48597b0 1563 tcg_gen_neg_tl(s1->T0, s1->T1);
c66f9727 1564 tcg_gen_atomic_fetch_add_tl(s1->cc_srcT, s1->A0, s1->T0,
a7cee522 1565 s1->mem_index, ot | MO_LE);
b48597b0 1566 tcg_gen_sub_tl(s1->T0, s1->cc_srcT, s1->T1);
a7cee522 1567 } else {
c66f9727 1568 tcg_gen_mov_tl(s1->cc_srcT, s1->T0);
b48597b0 1569 tcg_gen_sub_tl(s1->T0, s1->T0, s1->T1);
a7cee522
EC
1570 gen_op_st_rm_T0_A0(s1, ot, d);
1571 }
c66f9727 1572 gen_op_update2_cc(s1);
3ca51d07 1573 set_cc_op(s1, CC_OP_SUBB + ot);
2c0262af
FB
1574 break;
1575 default:
1576 case OP_ANDL:
a7cee522 1577 if (s1->prefix & PREFIX_LOCK) {
b48597b0 1578 tcg_gen_atomic_and_fetch_tl(s1->T0, s1->A0, s1->T1,
a7cee522
EC
1579 s1->mem_index, ot | MO_LE);
1580 } else {
b48597b0 1581 tcg_gen_and_tl(s1->T0, s1->T0, s1->T1);
a7cee522
EC
1582 gen_op_st_rm_T0_A0(s1, ot, d);
1583 }
c66f9727 1584 gen_op_update1_cc(s1);
3ca51d07 1585 set_cc_op(s1, CC_OP_LOGICB + ot);
57fec1fe 1586 break;
2c0262af 1587 case OP_ORL:
a7cee522 1588 if (s1->prefix & PREFIX_LOCK) {
b48597b0 1589 tcg_gen_atomic_or_fetch_tl(s1->T0, s1->A0, s1->T1,
a7cee522
EC
1590 s1->mem_index, ot | MO_LE);
1591 } else {
b48597b0 1592 tcg_gen_or_tl(s1->T0, s1->T0, s1->T1);
a7cee522
EC
1593 gen_op_st_rm_T0_A0(s1, ot, d);
1594 }
c66f9727 1595 gen_op_update1_cc(s1);
3ca51d07 1596 set_cc_op(s1, CC_OP_LOGICB + ot);
57fec1fe 1597 break;
2c0262af 1598 case OP_XORL:
a7cee522 1599 if (s1->prefix & PREFIX_LOCK) {
b48597b0 1600 tcg_gen_atomic_xor_fetch_tl(s1->T0, s1->A0, s1->T1,
a7cee522
EC
1601 s1->mem_index, ot | MO_LE);
1602 } else {
b48597b0 1603 tcg_gen_xor_tl(s1->T0, s1->T0, s1->T1);
a7cee522
EC
1604 gen_op_st_rm_T0_A0(s1, ot, d);
1605 }
c66f9727 1606 gen_op_update1_cc(s1);
3ca51d07 1607 set_cc_op(s1, CC_OP_LOGICB + ot);
2c0262af
FB
1608 break;
1609 case OP_CMPL:
b48597b0 1610 tcg_gen_mov_tl(cpu_cc_src, s1->T1);
c66f9727 1611 tcg_gen_mov_tl(s1->cc_srcT, s1->T0);
b48597b0 1612 tcg_gen_sub_tl(cpu_cc_dst, s1->T0, s1->T1);
3ca51d07 1613 set_cc_op(s1, CC_OP_SUBB + ot);
2c0262af
FB
1614 break;
1615 }
b6abf97d
FB
1616}
1617
2c0262af 1618/* if d == OR_TMP0, it means memory operand (address in A0) */
14776ab5 1619static void gen_inc(DisasContext *s1, MemOp ot, int d, int c)
2c0262af 1620{
60e57346 1621 if (s1->prefix & PREFIX_LOCK) {
8cb2ca3d
PM
1622 if (d != OR_TMP0) {
1623 /* Lock prefix when destination is not memory */
1624 gen_illegal_opcode(s1);
1625 return;
1626 }
c66f9727
EC
1627 tcg_gen_movi_tl(s1->T0, c > 0 ? 1 : -1);
1628 tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T0,
60e57346 1629 s1->mem_index, ot | MO_LE);
909be183 1630 } else {
60e57346 1631 if (d != OR_TMP0) {
1dbe15ef 1632 gen_op_mov_v_reg(s1, ot, s1->T0, d);
60e57346 1633 } else {
c66f9727 1634 gen_op_ld_v(s1, ot, s1->T0, s1->A0);
60e57346 1635 }
c66f9727 1636 tcg_gen_addi_tl(s1->T0, s1->T0, (c > 0 ? 1 : -1));
60e57346 1637 gen_op_st_rm_T0_A0(s1, ot, d);
909be183 1638 }
60e57346 1639
cc8b6f5b 1640 gen_compute_eflags_c(s1, cpu_cc_src);
c66f9727 1641 tcg_gen_mov_tl(cpu_cc_dst, s1->T0);
60e57346 1642 set_cc_op(s1, (c > 0 ? CC_OP_INCB : CC_OP_DECB) + ot);
2c0262af
FB
1643}
1644
14776ab5 1645static void gen_shift_flags(DisasContext *s, MemOp ot, TCGv result,
d67dc9e6 1646 TCGv shm1, TCGv count, bool is_right)
f437d0a3
RH
1647{
1648 TCGv_i32 z32, s32, oldop;
1649 TCGv z_tl;
1650
1651 /* Store the results into the CC variables. If we know that the
1652 variable must be dead, store unconditionally. Otherwise we'll
1653 need to not disrupt the current contents. */
3df11bb1 1654 z_tl = tcg_constant_tl(0);
f437d0a3
RH
1655 if (cc_op_live[s->cc_op] & USES_CC_DST) {
1656 tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_dst, count, z_tl,
1657 result, cpu_cc_dst);
1658 } else {
1659 tcg_gen_mov_tl(cpu_cc_dst, result);
1660 }
1661 if (cc_op_live[s->cc_op] & USES_CC_SRC) {
1662 tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_src, count, z_tl,
1663 shm1, cpu_cc_src);
1664 } else {
1665 tcg_gen_mov_tl(cpu_cc_src, shm1);
1666 }
f437d0a3
RH
1667
1668 /* Get the two potential CC_OP values into temporaries. */
6bd48f6f 1669 tcg_gen_movi_i32(s->tmp2_i32, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
f437d0a3
RH
1670 if (s->cc_op == CC_OP_DYNAMIC) {
1671 oldop = cpu_cc_op;
1672 } else {
4f82446d
EC
1673 tcg_gen_movi_i32(s->tmp3_i32, s->cc_op);
1674 oldop = s->tmp3_i32;
f437d0a3
RH
1675 }
1676
1677 /* Conditionally store the CC_OP value. */
3df11bb1 1678 z32 = tcg_constant_i32(0);
f437d0a3
RH
1679 s32 = tcg_temp_new_i32();
1680 tcg_gen_trunc_tl_i32(s32, count);
6bd48f6f 1681 tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, s32, z32, s->tmp2_i32, oldop);
f437d0a3
RH
1682
1683 /* The CC_OP value is no longer predictable. */
1684 set_cc_op(s, CC_OP_DYNAMIC);
1685}
1686
14776ab5 1687static void gen_shift_rm_T1(DisasContext *s, MemOp ot, int op1,
b6abf97d 1688 int is_right, int is_arith)
2c0262af 1689{
4ba9938c 1690 target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
3b46e624 1691
b6abf97d 1692 /* load */
82786041 1693 if (op1 == OR_TMP0) {
c66f9727 1694 gen_op_ld_v(s, ot, s->T0, s->A0);
82786041 1695 } else {
1dbe15ef 1696 gen_op_mov_v_reg(s, ot, s->T0, op1);
82786041 1697 }
b6abf97d 1698
b48597b0 1699 tcg_gen_andi_tl(s->T1, s->T1, mask);
fbd80f02 1700 tcg_gen_subi_tl(s->tmp0, s->T1, 1);
b6abf97d
FB
1701
1702 if (is_right) {
1703 if (is_arith) {
c66f9727 1704 gen_exts(ot, s->T0);
fbd80f02 1705 tcg_gen_sar_tl(s->tmp0, s->T0, s->tmp0);
b48597b0 1706 tcg_gen_sar_tl(s->T0, s->T0, s->T1);
b6abf97d 1707 } else {
c66f9727 1708 gen_extu(ot, s->T0);
fbd80f02 1709 tcg_gen_shr_tl(s->tmp0, s->T0, s->tmp0);
b48597b0 1710 tcg_gen_shr_tl(s->T0, s->T0, s->T1);
b6abf97d
FB
1711 }
1712 } else {
fbd80f02 1713 tcg_gen_shl_tl(s->tmp0, s->T0, s->tmp0);
b48597b0 1714 tcg_gen_shl_tl(s->T0, s->T0, s->T1);
b6abf97d
FB
1715 }
1716
1717 /* store */
d4faa3e0 1718 gen_op_st_rm_T0_A0(s, ot, op1);
82786041 1719
fbd80f02 1720 gen_shift_flags(s, ot, s->T0, s->tmp0, s->T1, is_right);
b6abf97d
FB
1721}
1722
14776ab5 1723static void gen_shift_rm_im(DisasContext *s, MemOp ot, int op1, int op2,
c1c37968
FB
1724 int is_right, int is_arith)
1725{
4ba9938c 1726 int mask = (ot == MO_64 ? 0x3f : 0x1f);
c1c37968
FB
1727
1728 /* load */
1729 if (op1 == OR_TMP0)
c66f9727 1730 gen_op_ld_v(s, ot, s->T0, s->A0);
c1c37968 1731 else
1dbe15ef 1732 gen_op_mov_v_reg(s, ot, s->T0, op1);
c1c37968
FB
1733
1734 op2 &= mask;
1735 if (op2 != 0) {
1736 if (is_right) {
1737 if (is_arith) {
c66f9727 1738 gen_exts(ot, s->T0);
5022f28f 1739 tcg_gen_sari_tl(s->tmp4, s->T0, op2 - 1);
c66f9727 1740 tcg_gen_sari_tl(s->T0, s->T0, op2);
c1c37968 1741 } else {
c66f9727 1742 gen_extu(ot, s->T0);
5022f28f 1743 tcg_gen_shri_tl(s->tmp4, s->T0, op2 - 1);
c66f9727 1744 tcg_gen_shri_tl(s->T0, s->T0, op2);
c1c37968
FB
1745 }
1746 } else {
5022f28f 1747 tcg_gen_shli_tl(s->tmp4, s->T0, op2 - 1);
c66f9727 1748 tcg_gen_shli_tl(s->T0, s->T0, op2);
c1c37968
FB
1749 }
1750 }
1751
1752 /* store */
d4faa3e0
RH
1753 gen_op_st_rm_T0_A0(s, ot, op1);
1754
c1c37968
FB
1755 /* update eflags if non zero shift */
1756 if (op2 != 0) {
5022f28f 1757 tcg_gen_mov_tl(cpu_cc_src, s->tmp4);
c66f9727 1758 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
3ca51d07 1759 set_cc_op(s, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
c1c37968
FB
1760 }
1761}
1762
14776ab5 1763static void gen_rot_rm_T1(DisasContext *s, MemOp ot, int op1, int is_right)
b6abf97d 1764{
4ba9938c 1765 target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
34d80a55 1766 TCGv_i32 t0, t1;
b6abf97d
FB
1767
1768 /* load */
1e4840bf 1769 if (op1 == OR_TMP0) {
c66f9727 1770 gen_op_ld_v(s, ot, s->T0, s->A0);
1e4840bf 1771 } else {
1dbe15ef 1772 gen_op_mov_v_reg(s, ot, s->T0, op1);
1e4840bf 1773 }
b6abf97d 1774
b48597b0 1775 tcg_gen_andi_tl(s->T1, s->T1, mask);
b6abf97d 1776
34d80a55 1777 switch (ot) {
4ba9938c 1778 case MO_8:
34d80a55 1779 /* Replicate the 8-bit input so that a 32-bit rotate works. */
c66f9727
EC
1780 tcg_gen_ext8u_tl(s->T0, s->T0);
1781 tcg_gen_muli_tl(s->T0, s->T0, 0x01010101);
34d80a55 1782 goto do_long;
4ba9938c 1783 case MO_16:
34d80a55 1784 /* Replicate the 16-bit input so that a 32-bit rotate works. */
c66f9727 1785 tcg_gen_deposit_tl(s->T0, s->T0, s->T0, 16, 16);
34d80a55
RH
1786 goto do_long;
1787 do_long:
1788#ifdef TARGET_X86_64
4ba9938c 1789 case MO_32:
6bd48f6f 1790 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
4f82446d 1791 tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
34d80a55 1792 if (is_right) {
4f82446d 1793 tcg_gen_rotr_i32(s->tmp2_i32, s->tmp2_i32, s->tmp3_i32);
34d80a55 1794 } else {
4f82446d 1795 tcg_gen_rotl_i32(s->tmp2_i32, s->tmp2_i32, s->tmp3_i32);
34d80a55 1796 }
6bd48f6f 1797 tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
34d80a55
RH
1798 break;
1799#endif
1800 default:
1801 if (is_right) {
b48597b0 1802 tcg_gen_rotr_tl(s->T0, s->T0, s->T1);
34d80a55 1803 } else {
b48597b0 1804 tcg_gen_rotl_tl(s->T0, s->T0, s->T1);
34d80a55
RH
1805 }
1806 break;
b6abf97d 1807 }
b6abf97d 1808
b6abf97d 1809 /* store */
d4faa3e0 1810 gen_op_st_rm_T0_A0(s, ot, op1);
b6abf97d 1811
34d80a55
RH
1812 /* We'll need the flags computed into CC_SRC. */
1813 gen_compute_eflags(s);
b6abf97d 1814
34d80a55
RH
1815 /* The value that was "rotated out" is now present at the other end
1816 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1817 since we've computed the flags into CC_SRC, these variables are
1818 currently dead. */
b6abf97d 1819 if (is_right) {
c66f9727
EC
1820 tcg_gen_shri_tl(cpu_cc_src2, s->T0, mask - 1);
1821 tcg_gen_shri_tl(cpu_cc_dst, s->T0, mask);
089305ac 1822 tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
34d80a55 1823 } else {
c66f9727
EC
1824 tcg_gen_shri_tl(cpu_cc_src2, s->T0, mask);
1825 tcg_gen_andi_tl(cpu_cc_dst, s->T0, 1);
b6abf97d 1826 }
34d80a55
RH
1827 tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1828 tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1829
1830 /* Now conditionally store the new CC_OP value. If the shift count
1831 is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live.
1832 Otherwise reuse CC_OP_ADCOX which have the C and O flags split out
1833 exactly as we computed above. */
3df11bb1 1834 t0 = tcg_constant_i32(0);
34d80a55 1835 t1 = tcg_temp_new_i32();
b48597b0 1836 tcg_gen_trunc_tl_i32(t1, s->T1);
6bd48f6f 1837 tcg_gen_movi_i32(s->tmp2_i32, CC_OP_ADCOX);
4f82446d 1838 tcg_gen_movi_i32(s->tmp3_i32, CC_OP_EFLAGS);
34d80a55 1839 tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, t1, t0,
4f82446d 1840 s->tmp2_i32, s->tmp3_i32);
34d80a55 1841
2e3afe8e 1842 /* The CC_OP value is no longer predictable. */
34d80a55 1843 set_cc_op(s, CC_OP_DYNAMIC);
b6abf97d
FB
1844}
1845
14776ab5 1846static void gen_rot_rm_im(DisasContext *s, MemOp ot, int op1, int op2,
8cd6345d 1847 int is_right)
1848{
4ba9938c 1849 int mask = (ot == MO_64 ? 0x3f : 0x1f);
34d80a55 1850 int shift;
8cd6345d 1851
1852 /* load */
1853 if (op1 == OR_TMP0) {
c66f9727 1854 gen_op_ld_v(s, ot, s->T0, s->A0);
8cd6345d 1855 } else {
1dbe15ef 1856 gen_op_mov_v_reg(s, ot, s->T0, op1);
8cd6345d 1857 }
1858
8cd6345d 1859 op2 &= mask;
8cd6345d 1860 if (op2 != 0) {
34d80a55
RH
1861 switch (ot) {
1862#ifdef TARGET_X86_64
4ba9938c 1863 case MO_32:
6bd48f6f 1864 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
34d80a55 1865 if (is_right) {
6bd48f6f 1866 tcg_gen_rotri_i32(s->tmp2_i32, s->tmp2_i32, op2);
34d80a55 1867 } else {
6bd48f6f 1868 tcg_gen_rotli_i32(s->tmp2_i32, s->tmp2_i32, op2);
34d80a55 1869 }
6bd48f6f 1870 tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
34d80a55
RH
1871 break;
1872#endif
1873 default:
1874 if (is_right) {
c66f9727 1875 tcg_gen_rotri_tl(s->T0, s->T0, op2);
34d80a55 1876 } else {
c66f9727 1877 tcg_gen_rotli_tl(s->T0, s->T0, op2);
34d80a55
RH
1878 }
1879 break;
4ba9938c 1880 case MO_8:
34d80a55
RH
1881 mask = 7;
1882 goto do_shifts;
4ba9938c 1883 case MO_16:
34d80a55
RH
1884 mask = 15;
1885 do_shifts:
1886 shift = op2 & mask;
1887 if (is_right) {
1888 shift = mask + 1 - shift;
1889 }
c66f9727 1890 gen_extu(ot, s->T0);
fbd80f02 1891 tcg_gen_shli_tl(s->tmp0, s->T0, shift);
c66f9727 1892 tcg_gen_shri_tl(s->T0, s->T0, mask + 1 - shift);
fbd80f02 1893 tcg_gen_or_tl(s->T0, s->T0, s->tmp0);
34d80a55 1894 break;
8cd6345d 1895 }
8cd6345d 1896 }
1897
1898 /* store */
d4faa3e0 1899 gen_op_st_rm_T0_A0(s, ot, op1);
8cd6345d 1900
1901 if (op2 != 0) {
34d80a55 1902 /* Compute the flags into CC_SRC. */
d229edce 1903 gen_compute_eflags(s);
0ff6addd 1904
34d80a55
RH
1905 /* The value that was "rotated out" is now present at the other end
1906 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1907 since we've computed the flags into CC_SRC, these variables are
1908 currently dead. */
8cd6345d 1909 if (is_right) {
c66f9727
EC
1910 tcg_gen_shri_tl(cpu_cc_src2, s->T0, mask - 1);
1911 tcg_gen_shri_tl(cpu_cc_dst, s->T0, mask);
38ebb396 1912 tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
34d80a55 1913 } else {
c66f9727
EC
1914 tcg_gen_shri_tl(cpu_cc_src2, s->T0, mask);
1915 tcg_gen_andi_tl(cpu_cc_dst, s->T0, 1);
8cd6345d 1916 }
34d80a55
RH
1917 tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1918 tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1919 set_cc_op(s, CC_OP_ADCOX);
8cd6345d 1920 }
8cd6345d 1921}
1922
b6abf97d 1923/* XXX: add faster immediate = 1 case */
14776ab5 1924static void gen_rotc_rm_T1(DisasContext *s, MemOp ot, int op1,
b6abf97d
FB
1925 int is_right)
1926{
d229edce 1927 gen_compute_eflags(s);
c7b3c873 1928 assert(s->cc_op == CC_OP_EFLAGS);
b6abf97d
FB
1929
1930 /* load */
1931 if (op1 == OR_TMP0)
c66f9727 1932 gen_op_ld_v(s, ot, s->T0, s->A0);
b6abf97d 1933 else
1dbe15ef 1934 gen_op_mov_v_reg(s, ot, s->T0, op1);
2e3afe8e 1935
a7812ae4
PB
1936 if (is_right) {
1937 switch (ot) {
4ba9938c 1938 case MO_8:
ad75a51e 1939 gen_helper_rcrb(s->T0, tcg_env, s->T0, s->T1);
7923057b 1940 break;
4ba9938c 1941 case MO_16:
ad75a51e 1942 gen_helper_rcrw(s->T0, tcg_env, s->T0, s->T1);
7923057b 1943 break;
4ba9938c 1944 case MO_32:
ad75a51e 1945 gen_helper_rcrl(s->T0, tcg_env, s->T0, s->T1);
7923057b 1946 break;
a7812ae4 1947#ifdef TARGET_X86_64
4ba9938c 1948 case MO_64:
ad75a51e 1949 gen_helper_rcrq(s->T0, tcg_env, s->T0, s->T1);
7923057b 1950 break;
a7812ae4 1951#endif
d67dc9e6 1952 default:
732e89f4 1953 g_assert_not_reached();
a7812ae4
PB
1954 }
1955 } else {
1956 switch (ot) {
4ba9938c 1957 case MO_8:
ad75a51e 1958 gen_helper_rclb(s->T0, tcg_env, s->T0, s->T1);
7923057b 1959 break;
4ba9938c 1960 case MO_16:
ad75a51e 1961 gen_helper_rclw(s->T0, tcg_env, s->T0, s->T1);
7923057b 1962 break;
4ba9938c 1963 case MO_32:
ad75a51e 1964 gen_helper_rcll(s->T0, tcg_env, s->T0, s->T1);
7923057b 1965 break;
a7812ae4 1966#ifdef TARGET_X86_64
4ba9938c 1967 case MO_64:
ad75a51e 1968 gen_helper_rclq(s->T0, tcg_env, s->T0, s->T1);
7923057b 1969 break;
a7812ae4 1970#endif
d67dc9e6 1971 default:
732e89f4 1972 g_assert_not_reached();
a7812ae4
PB
1973 }
1974 }
b6abf97d 1975 /* store */
d4faa3e0 1976 gen_op_st_rm_T0_A0(s, ot, op1);
b6abf97d
FB
1977}
1978
1979/* XXX: add faster immediate case */
14776ab5 1980static void gen_shiftd_rm_T1(DisasContext *s, MemOp ot, int op1,
f437d0a3 1981 bool is_right, TCGv count_in)
b6abf97d 1982{
4ba9938c 1983 target_ulong mask = (ot == MO_64 ? 63 : 31);
f437d0a3 1984 TCGv count;
b6abf97d
FB
1985
1986 /* load */
1e4840bf 1987 if (op1 == OR_TMP0) {
c66f9727 1988 gen_op_ld_v(s, ot, s->T0, s->A0);
1e4840bf 1989 } else {
1dbe15ef 1990 gen_op_mov_v_reg(s, ot, s->T0, op1);
1e4840bf 1991 }
b6abf97d 1992
f437d0a3
RH
1993 count = tcg_temp_new();
1994 tcg_gen_andi_tl(count, count_in, mask);
1e4840bf 1995
f437d0a3 1996 switch (ot) {
4ba9938c 1997 case MO_16:
f437d0a3
RH
1998 /* Note: we implement the Intel behaviour for shift count > 16.
1999 This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A
2000 portion by constructing it as a 32-bit value. */
b6abf97d 2001 if (is_right) {
fbd80f02 2002 tcg_gen_deposit_tl(s->tmp0, s->T0, s->T1, 16, 16);
b48597b0 2003 tcg_gen_mov_tl(s->T1, s->T0);
fbd80f02 2004 tcg_gen_mov_tl(s->T0, s->tmp0);
b6abf97d 2005 } else {
b48597b0 2006 tcg_gen_deposit_tl(s->T1, s->T0, s->T1, 16, 16);
b6abf97d 2007 }
bdddc1c4
CQ
2008 /*
2009 * If TARGET_X86_64 defined then fall through into MO_32 case,
2010 * otherwise fall through default case.
2011 */
4ba9938c 2012 case MO_32:
bdddc1c4 2013#ifdef TARGET_X86_64
f437d0a3 2014 /* Concatenate the two 32-bit values and use a 64-bit shift. */
fbd80f02 2015 tcg_gen_subi_tl(s->tmp0, count, 1);
b6abf97d 2016 if (is_right) {
b48597b0 2017 tcg_gen_concat_tl_i64(s->T0, s->T0, s->T1);
fbd80f02 2018 tcg_gen_shr_i64(s->tmp0, s->T0, s->tmp0);
c66f9727 2019 tcg_gen_shr_i64(s->T0, s->T0, count);
f437d0a3 2020 } else {
b48597b0 2021 tcg_gen_concat_tl_i64(s->T0, s->T1, s->T0);
fbd80f02 2022 tcg_gen_shl_i64(s->tmp0, s->T0, s->tmp0);
c66f9727 2023 tcg_gen_shl_i64(s->T0, s->T0, count);
fbd80f02 2024 tcg_gen_shri_i64(s->tmp0, s->tmp0, 32);
c66f9727 2025 tcg_gen_shri_i64(s->T0, s->T0, 32);
f437d0a3
RH
2026 }
2027 break;
2028#endif
2029 default:
fbd80f02 2030 tcg_gen_subi_tl(s->tmp0, count, 1);
f437d0a3 2031 if (is_right) {
fbd80f02 2032 tcg_gen_shr_tl(s->tmp0, s->T0, s->tmp0);
b6abf97d 2033
5022f28f 2034 tcg_gen_subfi_tl(s->tmp4, mask + 1, count);
c66f9727 2035 tcg_gen_shr_tl(s->T0, s->T0, count);
5022f28f 2036 tcg_gen_shl_tl(s->T1, s->T1, s->tmp4);
b6abf97d 2037 } else {
fbd80f02 2038 tcg_gen_shl_tl(s->tmp0, s->T0, s->tmp0);
4ba9938c 2039 if (ot == MO_16) {
f437d0a3 2040 /* Only needed if count > 16, for Intel behaviour. */
5022f28f
EC
2041 tcg_gen_subfi_tl(s->tmp4, 33, count);
2042 tcg_gen_shr_tl(s->tmp4, s->T1, s->tmp4);
2043 tcg_gen_or_tl(s->tmp0, s->tmp0, s->tmp4);
f437d0a3
RH
2044 }
2045
5022f28f 2046 tcg_gen_subfi_tl(s->tmp4, mask + 1, count);
c66f9727 2047 tcg_gen_shl_tl(s->T0, s->T0, count);
5022f28f 2048 tcg_gen_shr_tl(s->T1, s->T1, s->tmp4);
b6abf97d 2049 }
5022f28f
EC
2050 tcg_gen_movi_tl(s->tmp4, 0);
2051 tcg_gen_movcond_tl(TCG_COND_EQ, s->T1, count, s->tmp4,
2052 s->tmp4, s->T1);
b48597b0 2053 tcg_gen_or_tl(s->T0, s->T0, s->T1);
f437d0a3 2054 break;
b6abf97d 2055 }
b6abf97d 2056
b6abf97d 2057 /* store */
d4faa3e0 2058 gen_op_st_rm_T0_A0(s, ot, op1);
1e4840bf 2059
fbd80f02 2060 gen_shift_flags(s, ot, s->T0, s->tmp0, count, is_right);
b6abf97d
FB
2061}
2062
14776ab5 2063static void gen_shift(DisasContext *s1, int op, MemOp ot, int d, int s)
b6abf97d
FB
2064{
2065 if (s != OR_TMP1)
1dbe15ef 2066 gen_op_mov_v_reg(s1, ot, s1->T1, s);
b6abf97d
FB
2067 switch(op) {
2068 case OP_ROL:
2069 gen_rot_rm_T1(s1, ot, d, 0);
2070 break;
2071 case OP_ROR:
2072 gen_rot_rm_T1(s1, ot, d, 1);
2073 break;
2074 case OP_SHL:
2075 case OP_SHL1:
2076 gen_shift_rm_T1(s1, ot, d, 0, 0);
2077 break;
2078 case OP_SHR:
2079 gen_shift_rm_T1(s1, ot, d, 1, 0);
2080 break;
2081 case OP_SAR:
2082 gen_shift_rm_T1(s1, ot, d, 1, 1);
2083 break;
2084 case OP_RCL:
2085 gen_rotc_rm_T1(s1, ot, d, 0);
2086 break;
2087 case OP_RCR:
2088 gen_rotc_rm_T1(s1, ot, d, 1);
2089 break;
2090 }
2c0262af
FB
2091}
2092
14776ab5 2093static void gen_shifti(DisasContext *s1, int op, MemOp ot, int d, int c)
2c0262af 2094{
c1c37968 2095 switch(op) {
8cd6345d 2096 case OP_ROL:
2097 gen_rot_rm_im(s1, ot, d, c, 0);
2098 break;
2099 case OP_ROR:
2100 gen_rot_rm_im(s1, ot, d, c, 1);
2101 break;
c1c37968
FB
2102 case OP_SHL:
2103 case OP_SHL1:
2104 gen_shift_rm_im(s1, ot, d, c, 0, 0);
2105 break;
2106 case OP_SHR:
2107 gen_shift_rm_im(s1, ot, d, c, 1, 0);
2108 break;
2109 case OP_SAR:
2110 gen_shift_rm_im(s1, ot, d, c, 1, 1);
2111 break;
2112 default:
2113 /* currently not optimized */
b48597b0 2114 tcg_gen_movi_tl(s1->T1, c);
c1c37968
FB
2115 gen_shift(s1, op, ot, d, OR_TMP1);
2116 break;
2117 }
2c0262af
FB
2118}
2119
b066c537
PB
2120#define X86_MAX_INSN_LENGTH 15
2121
e3af7c78
PB
2122static uint64_t advance_pc(CPUX86State *env, DisasContext *s, int num_bytes)
2123{
2124 uint64_t pc = s->pc;
2125
95093668
IL
2126 /* This is a subsequent insn that crosses a page boundary. */
2127 if (s->base.num_insns > 1 &&
2128 !is_same_page(&s->base, s->pc + num_bytes - 1)) {
2129 siglongjmp(s->jmpbuf, 2);
2130 }
2131
e3af7c78 2132 s->pc += num_bytes;
ad1d6f07 2133 if (unlikely(cur_insn_len(s) > X86_MAX_INSN_LENGTH)) {
b066c537
PB
2134 /* If the instruction's 16th byte is on a different page than the 1st, a
2135 * page fault on the second page wins over the general protection fault
2136 * caused by the instruction being too long.
2137 * This can happen even if the operand is only one byte long!
2138 */
2139 if (((s->pc - 1) ^ (pc - 1)) & TARGET_PAGE_MASK) {
2140 volatile uint8_t unused =
2141 cpu_ldub_code(env, (s->pc - 1) & TARGET_PAGE_MASK);
2142 (void) unused;
2143 }
2144 siglongjmp(s->jmpbuf, 1);
2145 }
2146
e3af7c78
PB
2147 return pc;
2148}
2149
2150static inline uint8_t x86_ldub_code(CPUX86State *env, DisasContext *s)
2151{
4e116893 2152 return translator_ldub(env, &s->base, advance_pc(env, s, 1));
e3af7c78
PB
2153}
2154
2155static inline int16_t x86_ldsw_code(CPUX86State *env, DisasContext *s)
2156{
dac8d19b 2157 return translator_lduw(env, &s->base, advance_pc(env, s, 2));
e3af7c78
PB
2158}
2159
2160static inline uint16_t x86_lduw_code(CPUX86State *env, DisasContext *s)
2161{
4e116893 2162 return translator_lduw(env, &s->base, advance_pc(env, s, 2));
e3af7c78
PB
2163}
2164
2165static inline uint32_t x86_ldl_code(CPUX86State *env, DisasContext *s)
2166{
4e116893 2167 return translator_ldl(env, &s->base, advance_pc(env, s, 4));
e3af7c78
PB
2168}
2169
2170#ifdef TARGET_X86_64
2171static inline uint64_t x86_ldq_code(CPUX86State *env, DisasContext *s)
2172{
4e116893 2173 return translator_ldq(env, &s->base, advance_pc(env, s, 8));
e3af7c78
PB
2174}
2175#endif
2176
a074ce42
RH
2177/* Decompose an address. */
2178
2179typedef struct AddressParts {
2180 int def_seg;
2181 int base;
2182 int index;
2183 int scale;
2184 target_long disp;
2185} AddressParts;
2186
2187static AddressParts gen_lea_modrm_0(CPUX86State *env, DisasContext *s,
2188 int modrm)
2c0262af 2189{
a074ce42 2190 int def_seg, base, index, scale, mod, rm;
14ce26e7 2191 target_long disp;
a074ce42 2192 bool havesib;
2c0262af 2193
d6a29149 2194 def_seg = R_DS;
a074ce42
RH
2195 index = -1;
2196 scale = 0;
2197 disp = 0;
2198
2c0262af
FB
2199 mod = (modrm >> 6) & 3;
2200 rm = modrm & 7;
a074ce42
RH
2201 base = rm | REX_B(s);
2202
2203 if (mod == 3) {
2204 /* Normally filtered out earlier, but including this path
2205 simplifies multi-byte nop, as well as bndcl, bndcu, bndcn. */
2206 goto done;
2207 }
2c0262af 2208
1d71ddb1
RH
2209 switch (s->aflag) {
2210 case MO_64:
2211 case MO_32:
2c0262af 2212 havesib = 0;
a074ce42 2213 if (rm == 4) {
e3af7c78 2214 int code = x86_ldub_code(env, s);
2c0262af 2215 scale = (code >> 6) & 3;
14ce26e7 2216 index = ((code >> 3) & 7) | REX_X(s);
7865eec4
RH
2217 if (index == 4) {
2218 index = -1; /* no index */
2219 }
a074ce42
RH
2220 base = (code & 7) | REX_B(s);
2221 havesib = 1;
2c0262af
FB
2222 }
2223
2224 switch (mod) {
2225 case 0:
14ce26e7 2226 if ((base & 7) == 5) {
2c0262af 2227 base = -1;
e3af7c78 2228 disp = (int32_t)x86_ldl_code(env, s);
14ce26e7 2229 if (CODE64(s) && !havesib) {
a074ce42 2230 base = -2;
14ce26e7
FB
2231 disp += s->pc + s->rip_offset;
2232 }
2c0262af
FB
2233 }
2234 break;
2235 case 1:
e3af7c78 2236 disp = (int8_t)x86_ldub_code(env, s);
2c0262af
FB
2237 break;
2238 default:
2239 case 2:
e3af7c78 2240 disp = (int32_t)x86_ldl_code(env, s);
2c0262af
FB
2241 break;
2242 }
3b46e624 2243
7865eec4
RH
2244 /* For correct popl handling with esp. */
2245 if (base == R_ESP && s->popl_esp_hack) {
2246 disp += s->popl_esp_hack;
2247 }
d6a29149
RH
2248 if (base == R_EBP || base == R_ESP) {
2249 def_seg = R_SS;
2c0262af 2250 }
1d71ddb1
RH
2251 break;
2252
2253 case MO_16:
d6a29149 2254 if (mod == 0) {
2c0262af 2255 if (rm == 6) {
a074ce42 2256 base = -1;
e3af7c78 2257 disp = x86_lduw_code(env, s);
d6a29149 2258 break;
2c0262af 2259 }
d6a29149 2260 } else if (mod == 1) {
e3af7c78 2261 disp = (int8_t)x86_ldub_code(env, s);
d6a29149 2262 } else {
e3af7c78 2263 disp = (int16_t)x86_lduw_code(env, s);
2c0262af 2264 }
7effd625 2265
7effd625 2266 switch (rm) {
2c0262af 2267 case 0:
a074ce42
RH
2268 base = R_EBX;
2269 index = R_ESI;
2c0262af
FB
2270 break;
2271 case 1:
a074ce42
RH
2272 base = R_EBX;
2273 index = R_EDI;
2c0262af
FB
2274 break;
2275 case 2:
a074ce42
RH
2276 base = R_EBP;
2277 index = R_ESI;
d6a29149 2278 def_seg = R_SS;
2c0262af
FB
2279 break;
2280 case 3:
a074ce42
RH
2281 base = R_EBP;
2282 index = R_EDI;
d6a29149 2283 def_seg = R_SS;
2c0262af
FB
2284 break;
2285 case 4:
a074ce42 2286 base = R_ESI;
2c0262af
FB
2287 break;
2288 case 5:
a074ce42 2289 base = R_EDI;
2c0262af
FB
2290 break;
2291 case 6:
a074ce42 2292 base = R_EBP;
d6a29149 2293 def_seg = R_SS;
2c0262af
FB
2294 break;
2295 default:
2296 case 7:
a074ce42 2297 base = R_EBX;
2c0262af
FB
2298 break;
2299 }
1d71ddb1
RH
2300 break;
2301
2302 default:
732e89f4 2303 g_assert_not_reached();
2c0262af 2304 }
d6a29149 2305
a074ce42
RH
2306 done:
2307 return (AddressParts){ def_seg, base, index, scale, disp };
2c0262af
FB
2308}
2309
a074ce42 2310/* Compute the address, with a minimum number of TCG ops. */
20581aad 2311static TCGv gen_lea_modrm_1(DisasContext *s, AddressParts a, bool is_vsib)
e17a36ce 2312{
f764718d 2313 TCGv ea = NULL;
3b46e624 2314
20581aad 2315 if (a.index >= 0 && !is_vsib) {
a074ce42
RH
2316 if (a.scale == 0) {
2317 ea = cpu_regs[a.index];
2318 } else {
6b672b5d
EC
2319 tcg_gen_shli_tl(s->A0, cpu_regs[a.index], a.scale);
2320 ea = s->A0;
e17a36ce 2321 }
a074ce42 2322 if (a.base >= 0) {
6b672b5d
EC
2323 tcg_gen_add_tl(s->A0, ea, cpu_regs[a.base]);
2324 ea = s->A0;
e17a36ce 2325 }
a074ce42
RH
2326 } else if (a.base >= 0) {
2327 ea = cpu_regs[a.base];
2328 }
f764718d 2329 if (!ea) {
2e3afe8e 2330 if (tb_cflags(s->base.tb) & CF_PCREL && a.base == -2) {
e3a79e0e
RH
2331 /* With cpu_eip ~= pc_save, the expression is pc-relative. */
2332 tcg_gen_addi_tl(s->A0, cpu_eip, a.disp - s->pc_save);
2333 } else {
2334 tcg_gen_movi_tl(s->A0, a.disp);
2335 }
6b672b5d 2336 ea = s->A0;
a074ce42 2337 } else if (a.disp != 0) {
6b672b5d
EC
2338 tcg_gen_addi_tl(s->A0, ea, a.disp);
2339 ea = s->A0;
a074ce42 2340 }
1d71ddb1 2341
a074ce42
RH
2342 return ea;
2343}
1d71ddb1 2344
a074ce42
RH
2345static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm)
2346{
2347 AddressParts a = gen_lea_modrm_0(env, s, modrm);
20581aad 2348 TCGv ea = gen_lea_modrm_1(s, a, false);
a074ce42
RH
2349 gen_lea_v_seg(s, s->aflag, ea, a.def_seg, s->override);
2350}
2351
2352static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm)
2353{
2354 (void)gen_lea_modrm_0(env, s, modrm);
e17a36ce
FB
2355}
2356
523e28d7
RH
2357/* Used for BNDCL, BNDCU, BNDCN. */
2358static void gen_bndck(CPUX86State *env, DisasContext *s, int modrm,
2359 TCGCond cond, TCGv_i64 bndv)
2360{
20581aad
PB
2361 AddressParts a = gen_lea_modrm_0(env, s, modrm);
2362 TCGv ea = gen_lea_modrm_1(s, a, false);
523e28d7 2363
776678b2 2364 tcg_gen_extu_tl_i64(s->tmp1_i64, ea);
523e28d7 2365 if (!CODE64(s)) {
776678b2 2366 tcg_gen_ext32u_i64(s->tmp1_i64, s->tmp1_i64);
523e28d7 2367 }
776678b2
EC
2368 tcg_gen_setcond_i64(cond, s->tmp1_i64, s->tmp1_i64, bndv);
2369 tcg_gen_extrl_i64_i32(s->tmp2_i32, s->tmp1_i64);
ad75a51e 2370 gen_helper_bndck(tcg_env, s->tmp2_i32);
523e28d7
RH
2371}
2372
664e0f19
FB
2373/* used for LEA and MOV AX, mem */
2374static void gen_add_A0_ds_seg(DisasContext *s)
2375{
6b672b5d 2376 gen_lea_v_seg(s, s->aflag, s->A0, R_DS, s->override);
664e0f19
FB
2377}
2378
222a3336 2379/* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2c0262af 2380 OR_TMP0 */
0af10c86 2381static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm,
14776ab5 2382 MemOp ot, int reg, int is_store)
2c0262af 2383{
4eeb3939 2384 int mod, rm;
2c0262af
FB
2385
2386 mod = (modrm >> 6) & 3;
14ce26e7 2387 rm = (modrm & 7) | REX_B(s);
2c0262af
FB
2388 if (mod == 3) {
2389 if (is_store) {
2390 if (reg != OR_TMP0)
1dbe15ef
EC
2391 gen_op_mov_v_reg(s, ot, s->T0, reg);
2392 gen_op_mov_reg_v(s, ot, rm, s->T0);
2c0262af 2393 } else {
1dbe15ef 2394 gen_op_mov_v_reg(s, ot, s->T0, rm);
2c0262af 2395 if (reg != OR_TMP0)
1dbe15ef 2396 gen_op_mov_reg_v(s, ot, reg, s->T0);
2c0262af
FB
2397 }
2398 } else {
4eeb3939 2399 gen_lea_modrm(env, s, modrm);
2c0262af
FB
2400 if (is_store) {
2401 if (reg != OR_TMP0)
1dbe15ef 2402 gen_op_mov_v_reg(s, ot, s->T0, reg);
c66f9727 2403 gen_op_st_v(s, ot, s->T0, s->A0);
2c0262af 2404 } else {
c66f9727 2405 gen_op_ld_v(s, ot, s->T0, s->A0);
2c0262af 2406 if (reg != OR_TMP0)
1dbe15ef 2407 gen_op_mov_reg_v(s, ot, reg, s->T0);
2c0262af
FB
2408 }
2409 }
2410}
2411
efcca7ef
PB
2412static target_ulong insn_get_addr(CPUX86State *env, DisasContext *s, MemOp ot)
2413{
2414 target_ulong ret;
2415
2416 switch (ot) {
2417 case MO_8:
2418 ret = x86_ldub_code(env, s);
2419 break;
2420 case MO_16:
2421 ret = x86_lduw_code(env, s);
2422 break;
2423 case MO_32:
2424 ret = x86_ldl_code(env, s);
2425 break;
2426#ifdef TARGET_X86_64
2427 case MO_64:
2428 ret = x86_ldq_code(env, s);
2429 break;
2430#endif
2431 default:
2432 g_assert_not_reached();
2433 }
2434 return ret;
2435}
2436
14776ab5 2437static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, MemOp ot)
2c0262af
FB
2438{
2439 uint32_t ret;
2440
d67dc9e6 2441 switch (ot) {
4ba9938c 2442 case MO_8:
e3af7c78 2443 ret = x86_ldub_code(env, s);
2c0262af 2444 break;
4ba9938c 2445 case MO_16:
e3af7c78 2446 ret = x86_lduw_code(env, s);
2c0262af 2447 break;
4ba9938c 2448 case MO_32:
d67dc9e6
RH
2449#ifdef TARGET_X86_64
2450 case MO_64:
2451#endif
e3af7c78 2452 ret = x86_ldl_code(env, s);
2c0262af 2453 break;
d67dc9e6 2454 default:
732e89f4 2455 g_assert_not_reached();
2c0262af
FB
2456 }
2457 return ret;
2458}
2459
b3e22b23
PB
2460static target_long insn_get_signed(CPUX86State *env, DisasContext *s, MemOp ot)
2461{
2462 target_long ret;
2463
2464 switch (ot) {
2465 case MO_8:
2466 ret = (int8_t) x86_ldub_code(env, s);
2467 break;
2468 case MO_16:
2469 ret = (int16_t) x86_lduw_code(env, s);
2470 break;
2471 case MO_32:
2472 ret = (int32_t) x86_ldl_code(env, s);
2473 break;
2474#ifdef TARGET_X86_64
2475 case MO_64:
2476 ret = x86_ldq_code(env, s);
2477 break;
2478#endif
2479 default:
2480 g_assert_not_reached();
2481 }
2482 return ret;
2483}
2484
14776ab5 2485static inline int insn_const_size(MemOp ot)
14ce26e7 2486{
4ba9938c 2487 if (ot <= MO_32) {
14ce26e7 2488 return 1 << ot;
4ba9938c 2489 } else {
14ce26e7 2490 return 4;
4ba9938c 2491 }
14ce26e7
FB
2492}
2493
54b191de 2494static void gen_jcc(DisasContext *s, int b, int diff)
2c0262af 2495{
54b191de 2496 TCGLabel *l1 = gen_new_label();
8e1c85e3 2497
54b191de
RH
2498 gen_jcc1(s, b, l1);
2499 gen_jmp_rel_csize(s, 0, 1);
2500 gen_set_label(l1);
2501 gen_jmp_rel(s, s->dflag, diff, 0);
2c0262af
FB
2502}
2503
d4f61171 2504static void gen_cmovcc1(DisasContext *s, int b, TCGv dest, TCGv src)
f32d3781 2505{
d4f61171 2506 CCPrepare cc = gen_prepare_cc(s, b, s->T1);
f32d3781 2507
57eb0cc8
RH
2508 if (cc.mask != -1) {
2509 TCGv t0 = tcg_temp_new();
2510 tcg_gen_andi_tl(t0, cc.reg, cc.mask);
2511 cc.reg = t0;
2512 }
2513 if (!cc.use_reg2) {
3df11bb1 2514 cc.reg2 = tcg_constant_tl(cc.imm);
f32d3781
PB
2515 }
2516
d4f61171 2517 tcg_gen_movcond_tl(cc.cond, dest, cc.reg, cc.reg2, src, dest);
f32d3781
PB
2518}
2519
c117e5b1 2520static inline void gen_op_movl_T0_seg(DisasContext *s, X86Seg seg_reg)
3bd7da9e 2521{
ad75a51e 2522 tcg_gen_ld32u_tl(s->T0, tcg_env,
3bd7da9e
FB
2523 offsetof(CPUX86State,segs[seg_reg].selector));
2524}
2525
c117e5b1 2526static inline void gen_op_movl_seg_T0_vm(DisasContext *s, X86Seg seg_reg)
3bd7da9e 2527{
c66f9727 2528 tcg_gen_ext16u_tl(s->T0, s->T0);
ad75a51e 2529 tcg_gen_st32_tl(s->T0, tcg_env,
3bd7da9e 2530 offsetof(CPUX86State,segs[seg_reg].selector));
c66f9727 2531 tcg_gen_shli_tl(cpu_seg_base[seg_reg], s->T0, 4);
3bd7da9e
FB
2532}
2533
2c0262af
FB
2534/* move T0 to seg_reg and compute if the CPU state may change. Never
2535 call this function with seg_reg == R_CS */
c117e5b1 2536static void gen_movl_seg_T0(DisasContext *s, X86Seg seg_reg)
2c0262af 2537{
f8a35846 2538 if (PE(s) && !VM86(s)) {
6bd48f6f 2539 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
ad75a51e 2540 gen_helper_load_seg(tcg_env, tcg_constant_i32(seg_reg), s->tmp2_i32);
dc196a57
FB
2541 /* abort translation because the addseg value may change or
2542 because ss32 may change. For R_SS, translation must always
2543 stop as a special handling must be done to disable hardware
2544 interrupts for the next instruction */
4da4523c
RH
2545 if (seg_reg == R_SS) {
2546 s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ;
2547 } else if (CODE32(s) && seg_reg < R_FS) {
2548 s->base.is_jmp = DISAS_EOB_NEXT;
1e39d97a 2549 }
3415a4dd 2550 } else {
c66f9727 2551 gen_op_movl_seg_T0_vm(s, seg_reg);
1e39d97a 2552 if (seg_reg == R_SS) {
4da4523c 2553 s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ;
1e39d97a 2554 }
3415a4dd 2555 }
2c0262af
FB
2556}
2557
b53605db 2558static void gen_svm_check_intercept(DisasContext *s, uint32_t type)
0573fbfc 2559{
872929aa 2560 /* no SVM activated; fast case */
b322b3af 2561 if (likely(!GUEST(s))) {
872929aa 2562 return;
b322b3af 2563 }
ad75a51e 2564 gen_helper_svm_check_intercept(tcg_env, tcg_constant_i32(type));
0573fbfc
TS
2565}
2566
4f31916f
FB
2567static inline void gen_stack_update(DisasContext *s, int addend)
2568{
fbd80f02 2569 gen_op_add_reg_im(s, mo_stacksize(s), R_ESP, addend);
4f31916f
FB
2570}
2571
432baffe
RH
2572/* Generate a push. It depends on ss32, addseg and dflag. */
2573static void gen_push_v(DisasContext *s, TCGv val)
2c0262af 2574{
14776ab5
TN
2575 MemOp d_ot = mo_pushpop(s, s->dflag);
2576 MemOp a_ot = mo_stacksize(s);
432baffe 2577 int size = 1 << d_ot;
6b672b5d 2578 TCGv new_esp = s->A0;
432baffe 2579
6b672b5d 2580 tcg_gen_subi_tl(s->A0, cpu_regs[R_ESP], size);
2c0262af 2581
77ebcad0 2582 if (!CODE64(s)) {
beedb93c 2583 if (ADDSEG(s)) {
1ec46bf2 2584 new_esp = tcg_temp_new();
6b672b5d 2585 tcg_gen_mov_tl(new_esp, s->A0);
2c0262af 2586 }
6b672b5d 2587 gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1);
2c0262af 2588 }
432baffe 2589
6b672b5d 2590 gen_op_st_v(s, d_ot, val, s->A0);
1dbe15ef 2591 gen_op_mov_reg_v(s, a_ot, R_ESP, new_esp);
2c0262af
FB
2592}
2593
4f31916f 2594/* two step pop is necessary for precise exceptions */
14776ab5 2595static MemOp gen_pop_T0(DisasContext *s)
2c0262af 2596{
14776ab5 2597 MemOp d_ot = mo_pushpop(s, s->dflag);
8e31d234 2598
24c0573b
PB
2599 gen_lea_v_seg_dest(s, mo_stacksize(s), s->T0, cpu_regs[R_ESP], R_SS, -1);
2600 gen_op_ld_v(s, d_ot, s->T0, s->T0);
8e31d234 2601
8e31d234 2602 return d_ot;
2c0262af
FB
2603}
2604
14776ab5 2605static inline void gen_pop_update(DisasContext *s, MemOp ot)
2c0262af 2606{
8e31d234 2607 gen_stack_update(s, 1 << ot);
2c0262af
FB
2608}
2609
77ebcad0 2610static inline void gen_stack_A0(DisasContext *s)
2c0262af 2611{
b40a47a1 2612 gen_lea_v_seg(s, SS32(s) ? MO_32 : MO_16, cpu_regs[R_ESP], R_SS, -1);
2c0262af
FB
2613}
2614
2c0262af
FB
2615static void gen_pusha(DisasContext *s)
2616{
b40a47a1 2617 MemOp s_ot = SS32(s) ? MO_32 : MO_16;
14776ab5 2618 MemOp d_ot = s->dflag;
d37ea0c0 2619 int size = 1 << d_ot;
2c0262af 2620 int i;
d37ea0c0
RH
2621
2622 for (i = 0; i < 8; i++) {
6b672b5d
EC
2623 tcg_gen_addi_tl(s->A0, cpu_regs[R_ESP], (i - 8) * size);
2624 gen_lea_v_seg(s, s_ot, s->A0, R_SS, -1);
2625 gen_op_st_v(s, d_ot, cpu_regs[7 - i], s->A0);
d37ea0c0
RH
2626 }
2627
2628 gen_stack_update(s, -8 * size);
2629}
2630
2c0262af
FB
2631static void gen_popa(DisasContext *s)
2632{
b40a47a1 2633 MemOp s_ot = SS32(s) ? MO_32 : MO_16;
14776ab5 2634 MemOp d_ot = s->dflag;
d37ea0c0 2635 int size = 1 << d_ot;
2c0262af 2636 int i;
d37ea0c0
RH
2637
2638 for (i = 0; i < 8; i++) {
2c0262af 2639 /* ESP is not reloaded */
d37ea0c0
RH
2640 if (7 - i == R_ESP) {
2641 continue;
2c0262af 2642 }
6b672b5d
EC
2643 tcg_gen_addi_tl(s->A0, cpu_regs[R_ESP], i * size);
2644 gen_lea_v_seg(s, s_ot, s->A0, R_SS, -1);
c66f9727 2645 gen_op_ld_v(s, d_ot, s->T0, s->A0);
1dbe15ef 2646 gen_op_mov_reg_v(s, d_ot, 7 - i, s->T0);
2c0262af 2647 }
d37ea0c0
RH
2648
2649 gen_stack_update(s, 8 * size);
2c0262af
FB
2650}
2651
2c0262af
FB
2652static void gen_enter(DisasContext *s, int esp_addend, int level)
2653{
14776ab5 2654 MemOp d_ot = mo_pushpop(s, s->dflag);
b40a47a1 2655 MemOp a_ot = CODE64(s) ? MO_64 : SS32(s) ? MO_32 : MO_16;
743e398e 2656 int size = 1 << d_ot;
2c0262af 2657
743e398e 2658 /* Push BP; compute FrameTemp into T1. */
b48597b0
EC
2659 tcg_gen_subi_tl(s->T1, cpu_regs[R_ESP], size);
2660 gen_lea_v_seg(s, a_ot, s->T1, R_SS, -1);
6b672b5d 2661 gen_op_st_v(s, d_ot, cpu_regs[R_EBP], s->A0);
743e398e
RH
2662
2663 level &= 31;
2664 if (level != 0) {
2665 int i;
2666
2667 /* Copy level-1 pointers from the previous frame. */
2668 for (i = 1; i < level; ++i) {
6b672b5d
EC
2669 tcg_gen_subi_tl(s->A0, cpu_regs[R_EBP], size * i);
2670 gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1);
fbd80f02 2671 gen_op_ld_v(s, d_ot, s->tmp0, s->A0);
743e398e 2672
b48597b0 2673 tcg_gen_subi_tl(s->A0, s->T1, size * i);
6b672b5d 2674 gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1);
fbd80f02 2675 gen_op_st_v(s, d_ot, s->tmp0, s->A0);
8f091a59 2676 }
743e398e
RH
2677
2678 /* Push the current FrameTemp as the last level. */
b48597b0 2679 tcg_gen_subi_tl(s->A0, s->T1, size * level);
6b672b5d 2680 gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1);
b48597b0 2681 gen_op_st_v(s, d_ot, s->T1, s->A0);
2c0262af 2682 }
743e398e
RH
2683
2684 /* Copy the FrameTemp value to EBP. */
1dbe15ef 2685 gen_op_mov_reg_v(s, a_ot, R_EBP, s->T1);
743e398e
RH
2686
2687 /* Compute the final value of ESP. */
b48597b0 2688 tcg_gen_subi_tl(s->T1, s->T1, esp_addend + size * level);
1dbe15ef 2689 gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1);
2c0262af
FB
2690}
2691
2045f04c
RH
2692static void gen_leave(DisasContext *s)
2693{
14776ab5
TN
2694 MemOp d_ot = mo_pushpop(s, s->dflag);
2695 MemOp a_ot = mo_stacksize(s);
2045f04c
RH
2696
2697 gen_lea_v_seg(s, a_ot, cpu_regs[R_EBP], R_SS, -1);
c66f9727 2698 gen_op_ld_v(s, d_ot, s->T0, s->A0);
2045f04c 2699
b48597b0 2700 tcg_gen_addi_tl(s->T1, cpu_regs[R_EBP], 1 << d_ot);
2045f04c 2701
1dbe15ef
EC
2702 gen_op_mov_reg_v(s, d_ot, R_EBP, s->T0);
2703 gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1);
2045f04c
RH
2704}
2705
b9f9c5b4
RH
2706/* Similarly, except that the assumption here is that we don't decode
2707 the instruction at all -- either a missing opcode, an unimplemented
2708 feature, or just a bogus instruction stream. */
2709static void gen_unknown_opcode(CPUX86State *env, DisasContext *s)
2710{
2711 gen_illegal_opcode(s);
2712
2713 if (qemu_loglevel_mask(LOG_UNIMP)) {
c60f599b 2714 FILE *logfile = qemu_log_trylock();
78b54858 2715 if (logfile) {
ddf83b35 2716 target_ulong pc = s->base.pc_next, end = s->pc;
fc59d2d8 2717
78b54858
RH
2718 fprintf(logfile, "ILLOPC: " TARGET_FMT_lx ":", pc);
2719 for (; pc < end; ++pc) {
2720 fprintf(logfile, " %02x", cpu_ldub_code(env, pc));
2721 }
2722 fprintf(logfile, "\n");
2723 qemu_log_unlock(logfile);
b9f9c5b4 2724 }
b9f9c5b4
RH
2725 }
2726}
2727
2c0262af 2728/* an interrupt is different from an exception because of the
7f75ffd3 2729 privilege checks */
8ed6c985 2730static void gen_interrupt(DisasContext *s, int intno)
2c0262af 2731{
773cdfcc 2732 gen_update_cc_op(s);
65e4af23 2733 gen_update_eip_cur(s);
ad75a51e 2734 gen_helper_raise_interrupt(tcg_env, tcg_constant_i32(intno),
ad1d6f07 2735 cur_insn_len_i32(s));
6cf147aa 2736 s->base.is_jmp = DISAS_NORETURN;
2c0262af
FB
2737}
2738
7f0b7141
RH
2739static void gen_set_hflag(DisasContext *s, uint32_t mask)
2740{
2741 if ((s->flags & mask) == 0) {
2742 TCGv_i32 t = tcg_temp_new_i32();
ad75a51e 2743 tcg_gen_ld_i32(t, tcg_env, offsetof(CPUX86State, hflags));
7f0b7141 2744 tcg_gen_ori_i32(t, t, mask);
ad75a51e 2745 tcg_gen_st_i32(t, tcg_env, offsetof(CPUX86State, hflags));
7f0b7141
RH
2746 s->flags |= mask;
2747 }
2748}
2749
2750static void gen_reset_hflag(DisasContext *s, uint32_t mask)
2751{
2752 if (s->flags & mask) {
2753 TCGv_i32 t = tcg_temp_new_i32();
ad75a51e 2754 tcg_gen_ld_i32(t, tcg_env, offsetof(CPUX86State, hflags));
7f0b7141 2755 tcg_gen_andi_i32(t, t, ~mask);
ad75a51e 2756 tcg_gen_st_i32(t, tcg_env, offsetof(CPUX86State, hflags));
7f0b7141
RH
2757 s->flags &= ~mask;
2758 }
2759}
2760
63179330
RH
2761static void gen_set_eflags(DisasContext *s, target_ulong mask)
2762{
2763 TCGv t = tcg_temp_new();
2764
ad75a51e 2765 tcg_gen_ld_tl(t, tcg_env, offsetof(CPUX86State, eflags));
63179330 2766 tcg_gen_ori_tl(t, t, mask);
ad75a51e 2767 tcg_gen_st_tl(t, tcg_env, offsetof(CPUX86State, eflags));
63179330
RH
2768}
2769
2770static void gen_reset_eflags(DisasContext *s, target_ulong mask)
2771{
2772 TCGv t = tcg_temp_new();
2773
ad75a51e 2774 tcg_gen_ld_tl(t, tcg_env, offsetof(CPUX86State, eflags));
63179330 2775 tcg_gen_andi_tl(t, t, ~mask);
ad75a51e 2776 tcg_gen_st_tl(t, tcg_env, offsetof(CPUX86State, eflags));
63179330
RH
2777}
2778
7d117ce8
RH
2779/* Clear BND registers during legacy branches. */
2780static void gen_bnd_jmp(DisasContext *s)
2781{
8b33e82b
PB
2782 /* Clear the registers only if BND prefix is missing, MPX is enabled,
2783 and if the BNDREGs are known to be in use (non-zero) already.
2784 The helper itself will check BNDPRESERVE at runtime. */
7d117ce8 2785 if ((s->prefix & PREFIX_REPNZ) == 0
8b33e82b
PB
2786 && (s->flags & HF_MPX_EN_MASK) != 0
2787 && (s->flags & HF_MPX_IU_MASK) != 0) {
ad75a51e 2788 gen_helper_bnd_jmp(tcg_env);
7d117ce8
RH
2789 }
2790}
2791
f083d92c 2792/* Generate an end of block. Trace exception is also generated if needed.
c52ab08a
DE
2793 If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set.
2794 If RECHECK_TF, emit a rechecking helper for #DB, ignoring the state of
2795 S->TF. This is used by the syscall/sysret insns. */
1ebb1af1 2796static void
7f11636d 2797do_gen_eob_worker(DisasContext *s, bool inhibit, bool recheck_tf, bool jr)
2c0262af 2798{
773cdfcc 2799 gen_update_cc_op(s);
f083d92c
RH
2800
2801 /* If several instructions disable interrupts, only the first does it. */
2802 if (inhibit && !(s->flags & HF_INHIBIT_IRQ_MASK)) {
2803 gen_set_hflag(s, HF_INHIBIT_IRQ_MASK);
2804 } else {
2805 gen_reset_hflag(s, HF_INHIBIT_IRQ_MASK);
2806 }
2807
6cf147aa 2808 if (s->base.tb->flags & HF_RF_MASK) {
63179330 2809 gen_reset_eflags(s, RF_MASK);
a2397807 2810 }
4bc4c313 2811 if (recheck_tf) {
ad75a51e 2812 gen_helper_rechecking_single_step(tcg_env);
07ea28b4 2813 tcg_gen_exit_tb(NULL, 0);
c1de1a1a 2814 } else if (s->flags & HF_TF_MASK) {
ad75a51e 2815 gen_helper_single_step(tcg_env);
7f11636d
EC
2816 } else if (jr) {
2817 tcg_gen_lookup_and_goto_ptr();
2c0262af 2818 } else {
07ea28b4 2819 tcg_gen_exit_tb(NULL, 0);
2c0262af 2820 }
6cf147aa 2821 s->base.is_jmp = DISAS_NORETURN;
2c0262af
FB
2822}
2823
1ebb1af1
EC
2824static inline void
2825gen_eob_worker(DisasContext *s, bool inhibit, bool recheck_tf)
2826{
7f11636d 2827 do_gen_eob_worker(s, inhibit, recheck_tf, false);
1ebb1af1
EC
2828}
2829
c52ab08a
DE
2830/* End of block.
2831 If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set. */
2832static void gen_eob_inhibit_irq(DisasContext *s, bool inhibit)
2833{
2834 gen_eob_worker(s, inhibit, false);
2835}
2836
f083d92c
RH
2837/* End of block, resetting the inhibit irq flag. */
2838static void gen_eob(DisasContext *s)
2839{
c52ab08a 2840 gen_eob_worker(s, false, false);
f083d92c
RH
2841}
2842
1ebb1af1 2843/* Jump to register */
faf9ea5f 2844static void gen_jr(DisasContext *s)
1ebb1af1 2845{
7f11636d 2846 do_gen_eob_worker(s, false, false, true);
1ebb1af1
EC
2847}
2848
2255da49 2849/* Jump to eip+diff, truncating the result to OT. */
8760ded6
RH
2850static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num)
2851{
e3a79e0e
RH
2852 bool use_goto_tb = s->jmp_opt;
2853 target_ulong mask = -1;
2854 target_ulong new_pc = s->pc + diff;
2855 target_ulong new_eip = new_pc - s->cs_base;
8760ded6
RH
2856
2857 /* In 64-bit mode, operand size is fixed at 64 bits. */
2858 if (!CODE64(s)) {
2859 if (ot == MO_16) {
e3a79e0e 2860 mask = 0xffff;
2e3afe8e 2861 if (tb_cflags(s->base.tb) & CF_PCREL && CODE32(s)) {
e3a79e0e
RH
2862 use_goto_tb = false;
2863 }
8760ded6 2864 } else {
e3a79e0e 2865 mask = 0xffffffff;
8760ded6
RH
2866 }
2867 }
e3a79e0e 2868 new_eip &= mask;
900cc7e5
RH
2869
2870 gen_update_cc_op(s);
2871 set_cc_op(s, CC_OP_DYNAMIC);
e3a79e0e 2872
2e3afe8e 2873 if (tb_cflags(s->base.tb) & CF_PCREL) {
e3a79e0e
RH
2874 tcg_gen_addi_tl(cpu_eip, cpu_eip, new_pc - s->pc_save);
2875 /*
2876 * If we can prove the branch does not leave the page and we have
2877 * no extra masking to apply (data16 branch in code32, see above),
2878 * then we have also proven that the addition does not wrap.
2879 */
2880 if (!use_goto_tb || !is_same_page(&s->base, new_pc)) {
2881 tcg_gen_andi_tl(cpu_eip, cpu_eip, mask);
2882 use_goto_tb = false;
2883 }
a58506b7
RH
2884 } else if (!CODE64(s)) {
2885 new_pc = (uint32_t)(new_eip + s->cs_base);
e3a79e0e
RH
2886 }
2887
b5e0d5d2 2888 if (use_goto_tb && translator_use_goto_tb(&s->base, new_pc)) {
900cc7e5
RH
2889 /* jump to same page: we can use a direct jump */
2890 tcg_gen_goto_tb(tb_num);
2e3afe8e 2891 if (!(tb_cflags(s->base.tb) & CF_PCREL)) {
e3a79e0e
RH
2892 tcg_gen_movi_tl(cpu_eip, new_eip);
2893 }
900cc7e5
RH
2894 tcg_gen_exit_tb(s->base.tb, tb_num);
2895 s->base.is_jmp = DISAS_NORETURN;
2896 } else {
2e3afe8e 2897 if (!(tb_cflags(s->base.tb) & CF_PCREL)) {
e3a79e0e
RH
2898 tcg_gen_movi_tl(cpu_eip, new_eip);
2899 }
2900 if (s->jmp_opt) {
2901 gen_jr(s); /* jump to another page */
2902 } else {
2903 gen_eob(s); /* exit to main loop */
2904 }
900cc7e5 2905 }
8760ded6
RH
2906}
2907
2255da49
RH
2908/* Jump to eip+diff, truncating to the current code size. */
2909static void gen_jmp_rel_csize(DisasContext *s, int diff, int tb_num)
14ce26e7 2910{
2255da49
RH
2911 /* CODE64 ignores the OT argument, so we need not consider it. */
2912 gen_jmp_rel(s, CODE32(s) ? MO_32 : MO_16, diff, tb_num);
14ce26e7
FB
2913}
2914
323d1876 2915static inline void gen_ldq_env_A0(DisasContext *s, int offset)
8686c490 2916{
fc313c64 2917 tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
ad75a51e 2918 tcg_gen_st_i64(s->tmp1_i64, tcg_env, offset);
8686c490 2919}
664e0f19 2920
323d1876 2921static inline void gen_stq_env_A0(DisasContext *s, int offset)
8686c490 2922{
ad75a51e 2923 tcg_gen_ld_i64(s->tmp1_i64, tcg_env, offset);
fc313c64 2924 tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
8686c490 2925}
664e0f19 2926
958e1dd1 2927static inline void gen_ldo_env_A0(DisasContext *s, int offset, bool align)
8686c490 2928{
46c684c8
RH
2929 MemOp atom = (s->cpuid_ext_features & CPUID_EXT_AVX
2930 ? MO_ATOM_IFALIGN : MO_ATOM_IFALIGN_PAIR);
2931 MemOp mop = MO_128 | MO_LE | atom | (align ? MO_ALIGN_16 : 0);
5c42a7cd 2932 int mem_index = s->mem_index;
46c684c8
RH
2933 TCGv_i128 t = tcg_temp_new_i128();
2934
2935 tcg_gen_qemu_ld_i128(t, s->A0, mem_index, mop);
2936 tcg_gen_st_i128(t, tcg_env, offset);
8686c490 2937}
14ce26e7 2938
958e1dd1 2939static inline void gen_sto_env_A0(DisasContext *s, int offset, bool align)
8686c490 2940{
46c684c8
RH
2941 MemOp atom = (s->cpuid_ext_features & CPUID_EXT_AVX
2942 ? MO_ATOM_IFALIGN : MO_ATOM_IFALIGN_PAIR);
2943 MemOp mop = MO_128 | MO_LE | atom | (align ? MO_ALIGN_16 : 0);
5c42a7cd 2944 int mem_index = s->mem_index;
46c684c8
RH
2945 TCGv_i128 t = tcg_temp_new_i128();
2946
2947 tcg_gen_ld_i128(t, tcg_env, offset);
2948 tcg_gen_qemu_st_i128(t, s->A0, mem_index, mop);
8686c490 2949}
14ce26e7 2950
6ba13999
PB
2951static void gen_ldy_env_A0(DisasContext *s, int offset, bool align)
2952{
46c684c8 2953 MemOp mop = MO_128 | MO_LE | MO_ATOM_IFALIGN_PAIR;
6ba13999 2954 int mem_index = s->mem_index;
46c684c8
RH
2955 TCGv_i128 t0 = tcg_temp_new_i128();
2956 TCGv_i128 t1 = tcg_temp_new_i128();
6ba13999 2957
46c684c8 2958 tcg_gen_qemu_ld_i128(t0, s->A0, mem_index, mop | (align ? MO_ALIGN_32 : 0));
6ba13999 2959 tcg_gen_addi_tl(s->tmp0, s->A0, 16);
46c684c8
RH
2960 tcg_gen_qemu_ld_i128(t1, s->tmp0, mem_index, mop);
2961
2962 tcg_gen_st_i128(t0, tcg_env, offset + offsetof(YMMReg, YMM_X(0)));
2963 tcg_gen_st_i128(t1, tcg_env, offset + offsetof(YMMReg, YMM_X(1)));
6ba13999
PB
2964}
2965
92ec056a
PB
2966static void gen_sty_env_A0(DisasContext *s, int offset, bool align)
2967{
46c684c8 2968 MemOp mop = MO_128 | MO_LE | MO_ATOM_IFALIGN_PAIR;
92ec056a 2969 int mem_index = s->mem_index;
46c684c8
RH
2970 TCGv_i128 t = tcg_temp_new_i128();
2971
2972 tcg_gen_ld_i128(t, tcg_env, offset + offsetof(YMMReg, YMM_X(0)));
2973 tcg_gen_qemu_st_i128(t, s->A0, mem_index, mop | (align ? MO_ALIGN_32 : 0));
92ec056a 2974 tcg_gen_addi_tl(s->tmp0, s->A0, 16);
46c684c8
RH
2975 tcg_gen_ld_i128(t, tcg_env, offset + offsetof(YMMReg, YMM_X(1)));
2976 tcg_gen_qemu_st_i128(t, s->tmp0, mem_index, mop);
92ec056a
PB
2977}
2978
653fad24
PB
2979#include "decode-new.h"
2980#include "emit.c.inc"
2981#include "decode-new.c.inc"
664e0f19 2982
6218c177
RH
2983static void gen_cmpxchg8b(DisasContext *s, CPUX86State *env, int modrm)
2984{
326ad06c
RH
2985 TCGv_i64 cmp, val, old;
2986 TCGv Z;
2987
6218c177
RH
2988 gen_lea_modrm(env, s, modrm);
2989
326ad06c
RH
2990 cmp = tcg_temp_new_i64();
2991 val = tcg_temp_new_i64();
2992 old = tcg_temp_new_i64();
2993
2994 /* Construct the comparison values from the register pair. */
2995 tcg_gen_concat_tl_i64(cmp, cpu_regs[R_EAX], cpu_regs[R_EDX]);
2996 tcg_gen_concat_tl_i64(val, cpu_regs[R_EBX], cpu_regs[R_ECX]);
2997
2998 /* Only require atomic with LOCK; non-parallel handled in generator. */
2999 if (s->prefix & PREFIX_LOCK) {
3000 tcg_gen_atomic_cmpxchg_i64(old, s->A0, cmp, val, s->mem_index, MO_TEUQ);
6218c177 3001 } else {
326ad06c
RH
3002 tcg_gen_nonatomic_cmpxchg_i64(old, s->A0, cmp, val,
3003 s->mem_index, MO_TEUQ);
6218c177 3004 }
326ad06c
RH
3005
3006 /* Set tmp0 to match the required value of Z. */
3007 tcg_gen_setcond_i64(TCG_COND_EQ, cmp, old, cmp);
3008 Z = tcg_temp_new();
3009 tcg_gen_trunc_i64_tl(Z, cmp);
326ad06c
RH
3010
3011 /*
3012 * Extract the result values for the register pair.
3013 * For 32-bit, we may do this unconditionally, because on success (Z=1),
3014 * the old value matches the previous value in EDX:EAX. For x86_64,
3015 * the store must be conditional, because we must leave the source
3016 * registers unchanged on success, and zero-extend the writeback
3017 * on failure (Z=0).
3018 */
3019 if (TARGET_LONG_BITS == 32) {
3020 tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], old);
3021 } else {
3022 TCGv zero = tcg_constant_tl(0);
3023
3024 tcg_gen_extr_i64_tl(s->T0, s->T1, old);
3025 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_regs[R_EAX], Z, zero,
3026 s->T0, cpu_regs[R_EAX]);
3027 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_regs[R_EDX], Z, zero,
3028 s->T1, cpu_regs[R_EDX]);
3029 }
326ad06c
RH
3030
3031 /* Update Z. */
3032 gen_compute_eflags(s);
3033 tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, Z, ctz32(CC_Z), 1);
6218c177
RH
3034}
3035
3036#ifdef TARGET_X86_64
3037static void gen_cmpxchg16b(DisasContext *s, CPUX86State *env, int modrm)
3038{
5f0dd8cd
RH
3039 MemOp mop = MO_TE | MO_128 | MO_ALIGN;
3040 TCGv_i64 t0, t1;
3041 TCGv_i128 cmp, val;
3042
6218c177
RH
3043 gen_lea_modrm(env, s, modrm);
3044
5f0dd8cd
RH
3045 cmp = tcg_temp_new_i128();
3046 val = tcg_temp_new_i128();
3047 tcg_gen_concat_i64_i128(cmp, cpu_regs[R_EAX], cpu_regs[R_EDX]);
3048 tcg_gen_concat_i64_i128(val, cpu_regs[R_EBX], cpu_regs[R_ECX]);
3049
3050 /* Only require atomic with LOCK; non-parallel handled in generator. */
3051 if (s->prefix & PREFIX_LOCK) {
3052 tcg_gen_atomic_cmpxchg_i128(val, s->A0, cmp, val, s->mem_index, mop);
6218c177 3053 } else {
5f0dd8cd 3054 tcg_gen_nonatomic_cmpxchg_i128(val, s->A0, cmp, val, s->mem_index, mop);
6218c177 3055 }
5f0dd8cd
RH
3056
3057 tcg_gen_extr_i128_i64(s->T0, s->T1, val);
5f0dd8cd
RH
3058
3059 /* Determine success after the fact. */
3060 t0 = tcg_temp_new_i64();
3061 t1 = tcg_temp_new_i64();
3062 tcg_gen_xor_i64(t0, s->T0, cpu_regs[R_EAX]);
3063 tcg_gen_xor_i64(t1, s->T1, cpu_regs[R_EDX]);
3064 tcg_gen_or_i64(t0, t0, t1);
5f0dd8cd
RH
3065
3066 /* Update Z. */
3067 gen_compute_eflags(s);
3068 tcg_gen_setcondi_i64(TCG_COND_EQ, t0, t0, 0);
3069 tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, t0, ctz32(CC_Z), 1);
5f0dd8cd
RH
3070
3071 /*
3072 * Extract the result values for the register pair. We may do this
3073 * unconditionally, because on success (Z=1), the old value matches
3074 * the previous value in RDX:RAX.
3075 */
3076 tcg_gen_mov_i64(cpu_regs[R_EAX], s->T0);
3077 tcg_gen_mov_i64(cpu_regs[R_EDX], s->T1);
6218c177
RH
3078}
3079#endif
3080
6cf147aa 3081/* convert one instruction. s->base.is_jmp is set if the translation must
2c0262af 3082 be stopped. Return the next pc value */
f66c8e8c 3083static bool disas_insn(DisasContext *s, CPUState *cpu)
2c0262af 3084{
b77af26e 3085 CPUX86State *env = cpu_env(cpu);
ab4e4aec 3086 int b, prefixes;
d67dc9e6 3087 int shift;
14776ab5 3088 MemOp ot, aflag, dflag;
4eeb3939 3089 int modrm, reg, rm, mod, op, opreg, val;
95093668
IL
3090 bool orig_cc_op_dirty = s->cc_op_dirty;
3091 CCOp orig_cc_op = s->cc_op;
913f0836 3092 target_ulong orig_pc_save = s->pc_save;
2c0262af 3093
ddf83b35 3094 s->pc = s->base.pc_next;
2c0262af 3095 s->override = -1;
14ce26e7 3096#ifdef TARGET_X86_64
bbdb4237 3097 s->rex_r = 0;
14ce26e7
FB
3098 s->rex_x = 0;
3099 s->rex_b = 0;
14ce26e7
FB
3100#endif
3101 s->rip_offset = 0; /* for relative ip address */
701ed211
RH
3102 s->vex_l = 0;
3103 s->vex_v = 0;
a61ef762 3104 s->vex_w = false;
95093668
IL
3105 switch (sigsetjmp(s->jmpbuf, 0)) {
3106 case 0:
3107 break;
3108 case 1:
6bd99586 3109 gen_exception_gpf(s);
f66c8e8c 3110 return true;
95093668
IL
3111 case 2:
3112 /* Restore state that may affect the next instruction. */
f66c8e8c 3113 s->pc = s->base.pc_next;
913f0836
RH
3114 /*
3115 * TODO: These save/restore can be removed after the table-based
3116 * decoder is complete; we will be decoding the insn completely
3117 * before any code generation that might affect these variables.
3118 */
95093668
IL
3119 s->cc_op_dirty = orig_cc_op_dirty;
3120 s->cc_op = orig_cc_op;
913f0836
RH
3121 s->pc_save = orig_pc_save;
3122 /* END TODO */
95093668
IL
3123 s->base.num_insns--;
3124 tcg_remove_ops_after(s->prev_insn_end);
3125 s->base.is_jmp = DISAS_TOO_MANY;
f66c8e8c 3126 return false;
95093668
IL
3127 default:
3128 g_assert_not_reached();
30663fd2 3129 }
b066c537 3130
a4926d99 3131 prefixes = 0;
a4926d99 3132
b066c537 3133 next_byte:
b3e22b23 3134 s->prefix = prefixes;
e3af7c78 3135 b = x86_ldub_code(env, s);
4a6fd938
RH
3136 /* Collect prefixes. */
3137 switch (b) {
b3e22b23 3138 default:
b3e22b23
PB
3139 break;
3140 case 0x0f:
3141 b = x86_ldub_code(env, s) + 0x100;
b3e22b23 3142 break;
4a6fd938
RH
3143 case 0xf3:
3144 prefixes |= PREFIX_REPZ;
5c2f60bd 3145 prefixes &= ~PREFIX_REPNZ;
4a6fd938
RH
3146 goto next_byte;
3147 case 0xf2:
3148 prefixes |= PREFIX_REPNZ;
5c2f60bd 3149 prefixes &= ~PREFIX_REPZ;
4a6fd938
RH
3150 goto next_byte;
3151 case 0xf0:
3152 prefixes |= PREFIX_LOCK;
3153 goto next_byte;
3154 case 0x2e:
3155 s->override = R_CS;
3156 goto next_byte;
3157 case 0x36:
3158 s->override = R_SS;
3159 goto next_byte;
3160 case 0x3e:
3161 s->override = R_DS;
3162 goto next_byte;
3163 case 0x26:
3164 s->override = R_ES;
3165 goto next_byte;
3166 case 0x64:
3167 s->override = R_FS;
3168 goto next_byte;
3169 case 0x65:
3170 s->override = R_GS;
3171 goto next_byte;
3172 case 0x66:
3173 prefixes |= PREFIX_DATA;
3174 goto next_byte;
3175 case 0x67:
3176 prefixes |= PREFIX_ADR;
3177 goto next_byte;
14ce26e7 3178#ifdef TARGET_X86_64
4a6fd938
RH
3179 case 0x40 ... 0x4f:
3180 if (CODE64(s)) {
14ce26e7 3181 /* REX prefix */
1e92b727 3182 prefixes |= PREFIX_REX;
a61ef762 3183 s->vex_w = (b >> 3) & 1;
bbdb4237 3184 s->rex_r = (b & 0x4) << 1;
14ce26e7 3185 s->rex_x = (b & 0x2) << 2;
915ffe89 3186 s->rex_b = (b & 0x1) << 3;
14ce26e7
FB
3187 goto next_byte;
3188 }
4a6fd938
RH
3189 break;
3190#endif
701ed211
RH
3191 case 0xc5: /* 2-byte VEX */
3192 case 0xc4: /* 3-byte VEX */
9996dcfd 3193 if (CODE32(s) && !VM86(s)) {
1d0b9261
PB
3194 int vex2 = x86_ldub_code(env, s);
3195 s->pc--; /* rewind the advance_pc() x86_ldub_code() did */
701ed211
RH
3196
3197 if (!CODE64(s) && (vex2 & 0xc0) != 0xc0) {
3198 /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
3199 otherwise the instruction is LES or LDS. */
3200 break;
3201 }
1d0b9261
PB
3202 disas_insn_new(s, cpu, b);
3203 return s->pc;
701ed211
RH
3204 }
3205 break;
4a6fd938
RH
3206 }
3207
3208 /* Post-process prefixes. */
4a6fd938 3209 if (CODE64(s)) {
dec3fc96
RH
3210 /* In 64-bit mode, the default data size is 32-bit. Select 64-bit
3211 data with rex_w, and 16-bit data with 0x66; rex_w takes precedence
3212 over 0x66 if both are present. */
8ab1e486 3213 dflag = (REX_W(s) ? MO_64 : prefixes & PREFIX_DATA ? MO_16 : MO_32);
dec3fc96 3214 /* In 64-bit mode, 0x67 selects 32-bit addressing. */
1d71ddb1 3215 aflag = (prefixes & PREFIX_ADR ? MO_32 : MO_64);
dec3fc96
RH
3216 } else {
3217 /* In 16/32-bit mode, 0x66 selects the opposite data size. */
9996dcfd 3218 if (CODE32(s) ^ ((prefixes & PREFIX_DATA) != 0)) {
ab4e4aec
RH
3219 dflag = MO_32;
3220 } else {
3221 dflag = MO_16;
14ce26e7 3222 }
dec3fc96 3223 /* In 16/32-bit mode, 0x67 selects the opposite addressing. */
9996dcfd 3224 if (CODE32(s) ^ ((prefixes & PREFIX_ADR) != 0)) {
1d71ddb1
RH
3225 aflag = MO_32;
3226 } else {
3227 aflag = MO_16;
14ce26e7 3228 }
2c0262af
FB
3229 }
3230
2c0262af
FB
3231 s->prefix = prefixes;
3232 s->aflag = aflag;
3233 s->dflag = dflag;
3234
2c0262af 3235 /* now check op code */
b3e22b23 3236 switch (b) {
2c0262af
FB
3237 /**************************/
3238 /* arith & logic */
3239 case 0x00 ... 0x05:
3240 case 0x08 ... 0x0d:
3241 case 0x10 ... 0x15:
3242 case 0x18 ... 0x1d:
3243 case 0x20 ... 0x25:
3244 case 0x28 ... 0x2d:
3245 case 0x30 ... 0x35:
3246 case 0x38 ... 0x3d:
3247 {
19729aff 3248 int f;
2c0262af
FB
3249 op = (b >> 3) & 7;
3250 f = (b >> 1) & 3;
3251
ab4e4aec 3252 ot = mo_b_d(b, dflag);
3b46e624 3253
2c0262af
FB
3254 switch(f) {
3255 case 0: /* OP Ev, Gv */
e3af7c78 3256 modrm = x86_ldub_code(env, s);
bbdb4237 3257 reg = ((modrm >> 3) & 7) | REX_R(s);
2c0262af 3258 mod = (modrm >> 6) & 3;
14ce26e7 3259 rm = (modrm & 7) | REX_B(s);
2c0262af 3260 if (mod != 3) {
4eeb3939 3261 gen_lea_modrm(env, s, modrm);
2c0262af
FB
3262 opreg = OR_TMP0;
3263 } else if (op == OP_XORL && rm == reg) {
3264 xor_zero:
3265 /* xor reg, reg optimisation */
436ff2d2 3266 set_cc_op(s, CC_OP_CLR);
c66f9727 3267 tcg_gen_movi_tl(s->T0, 0);
1dbe15ef 3268 gen_op_mov_reg_v(s, ot, reg, s->T0);
2c0262af
FB
3269 break;
3270 } else {
3271 opreg = rm;
3272 }
1dbe15ef 3273 gen_op_mov_v_reg(s, ot, s->T1, reg);
2c0262af
FB
3274 gen_op(s, op, ot, opreg);
3275 break;
3276 case 1: /* OP Gv, Ev */
e3af7c78 3277 modrm = x86_ldub_code(env, s);
2c0262af 3278 mod = (modrm >> 6) & 3;
bbdb4237 3279 reg = ((modrm >> 3) & 7) | REX_R(s);
14ce26e7 3280 rm = (modrm & 7) | REX_B(s);
2c0262af 3281 if (mod != 3) {
4eeb3939 3282 gen_lea_modrm(env, s, modrm);
b48597b0 3283 gen_op_ld_v(s, ot, s->T1, s->A0);
2c0262af
FB
3284 } else if (op == OP_XORL && rm == reg) {
3285 goto xor_zero;
3286 } else {
1dbe15ef 3287 gen_op_mov_v_reg(s, ot, s->T1, rm);
2c0262af
FB
3288 }
3289 gen_op(s, op, ot, reg);
3290 break;
3291 case 2: /* OP A, Iv */
0af10c86 3292 val = insn_get(env, s, ot);
b48597b0 3293 tcg_gen_movi_tl(s->T1, val);
2c0262af
FB
3294 gen_op(s, op, ot, OR_EAX);
3295 break;
3296 }
3297 }
3298 break;
3299
ec9d6075
FB
3300 case 0x82:
3301 if (CODE64(s))
3302 goto illegal_op;
edd7541b 3303 /* fall through */
2c0262af
FB
3304 case 0x80: /* GRP1 */
3305 case 0x81:
3306 case 0x83:
3307 {
ab4e4aec 3308 ot = mo_b_d(b, dflag);
3b46e624 3309
e3af7c78 3310 modrm = x86_ldub_code(env, s);
2c0262af 3311 mod = (modrm >> 6) & 3;
14ce26e7 3312 rm = (modrm & 7) | REX_B(s);
2c0262af 3313 op = (modrm >> 3) & 7;
3b46e624 3314
2c0262af 3315 if (mod != 3) {
14ce26e7
FB
3316 if (b == 0x83)
3317 s->rip_offset = 1;
3318 else
3319 s->rip_offset = insn_const_size(ot);
4eeb3939 3320 gen_lea_modrm(env, s, modrm);
2c0262af
FB
3321 opreg = OR_TMP0;
3322 } else {
14ce26e7 3323 opreg = rm;
2c0262af
FB
3324 }
3325
3326 switch(b) {
3327 default:
3328 case 0x80:
3329 case 0x81:
d64477af 3330 case 0x82:
0af10c86 3331 val = insn_get(env, s, ot);
2c0262af
FB
3332 break;
3333 case 0x83:
4ba9938c 3334 val = (int8_t)insn_get(env, s, MO_8);
2c0262af
FB
3335 break;
3336 }
b48597b0 3337 tcg_gen_movi_tl(s->T1, val);
2c0262af
FB
3338 gen_op(s, op, ot, opreg);
3339 }
3340 break;
3341
3342 /**************************/
3343 /* inc, dec, and other misc arith */
3344 case 0x40 ... 0x47: /* inc Gv */
ab4e4aec 3345 ot = dflag;
2c0262af
FB
3346 gen_inc(s, ot, OR_EAX + (b & 7), 1);
3347 break;
3348 case 0x48 ... 0x4f: /* dec Gv */
ab4e4aec 3349 ot = dflag;
2c0262af
FB
3350 gen_inc(s, ot, OR_EAX + (b & 7), -1);
3351 break;
3352 case 0xf6: /* GRP3 */
3353 case 0xf7:
ab4e4aec 3354 ot = mo_b_d(b, dflag);
2c0262af 3355
e3af7c78 3356 modrm = x86_ldub_code(env, s);
2c0262af 3357 mod = (modrm >> 6) & 3;
14ce26e7 3358 rm = (modrm & 7) | REX_B(s);
2c0262af
FB
3359 op = (modrm >> 3) & 7;
3360 if (mod != 3) {
2a5fe8ae 3361 if (op == 0) {
14ce26e7 3362 s->rip_offset = insn_const_size(ot);
2a5fe8ae 3363 }
4eeb3939 3364 gen_lea_modrm(env, s, modrm);
2a5fe8ae
EC
3365 /* For those below that handle locked memory, don't load here. */
3366 if (!(s->prefix & PREFIX_LOCK)
3367 || op != 2) {
c66f9727 3368 gen_op_ld_v(s, ot, s->T0, s->A0);
2a5fe8ae 3369 }
2c0262af 3370 } else {
1dbe15ef 3371 gen_op_mov_v_reg(s, ot, s->T0, rm);
2c0262af
FB
3372 }
3373
3374 switch(op) {
3375 case 0: /* test */
0af10c86 3376 val = insn_get(env, s, ot);
b48597b0 3377 tcg_gen_movi_tl(s->T1, val);
c66f9727 3378 gen_op_testl_T0_T1_cc(s);
3ca51d07 3379 set_cc_op(s, CC_OP_LOGICB + ot);
2c0262af
FB
3380 break;
3381 case 2: /* not */
2a5fe8ae
EC
3382 if (s->prefix & PREFIX_LOCK) {
3383 if (mod == 3) {
3384 goto illegal_op;
3385 }
c66f9727
EC
3386 tcg_gen_movi_tl(s->T0, ~0);
3387 tcg_gen_atomic_xor_fetch_tl(s->T0, s->A0, s->T0,
2a5fe8ae 3388 s->mem_index, ot | MO_LE);
2c0262af 3389 } else {
c66f9727 3390 tcg_gen_not_tl(s->T0, s->T0);
2a5fe8ae 3391 if (mod != 3) {
c66f9727 3392 gen_op_st_v(s, ot, s->T0, s->A0);
2a5fe8ae 3393 } else {
1dbe15ef 3394 gen_op_mov_reg_v(s, ot, rm, s->T0);
2a5fe8ae 3395 }
2c0262af
FB
3396 }
3397 break;
3398 case 3: /* neg */
8eb8c738
EC
3399 if (s->prefix & PREFIX_LOCK) {
3400 TCGLabel *label1;
3401 TCGv a0, t0, t1, t2;
3402
3403 if (mod == 3) {
3404 goto illegal_op;
3405 }
3a5d1773
RH
3406 a0 = s->A0;
3407 t0 = s->T0;
8eb8c738
EC
3408 label1 = gen_new_label();
3409
8eb8c738
EC
3410 gen_set_label(label1);
3411 t1 = tcg_temp_new();
3412 t2 = tcg_temp_new();
3413 tcg_gen_mov_tl(t2, t0);
3414 tcg_gen_neg_tl(t1, t0);
3415 tcg_gen_atomic_cmpxchg_tl(t0, a0, t0, t1,
3416 s->mem_index, ot | MO_LE);
8eb8c738
EC
3417 tcg_gen_brcond_tl(TCG_COND_NE, t0, t2, label1);
3418
12153175 3419 tcg_gen_neg_tl(s->T0, t0);
2c0262af 3420 } else {
c66f9727 3421 tcg_gen_neg_tl(s->T0, s->T0);
8eb8c738 3422 if (mod != 3) {
c66f9727 3423 gen_op_st_v(s, ot, s->T0, s->A0);
8eb8c738 3424 } else {
1dbe15ef 3425 gen_op_mov_reg_v(s, ot, rm, s->T0);
8eb8c738 3426 }
2c0262af 3427 }
93a3e108 3428 gen_op_update_neg_cc(s);
3ca51d07 3429 set_cc_op(s, CC_OP_SUBB + ot);
2c0262af
FB
3430 break;
3431 case 4: /* mul */
3432 switch(ot) {
4ba9938c 3433 case MO_8:
1dbe15ef 3434 gen_op_mov_v_reg(s, MO_8, s->T1, R_EAX);
c66f9727 3435 tcg_gen_ext8u_tl(s->T0, s->T0);
b48597b0 3436 tcg_gen_ext8u_tl(s->T1, s->T1);
0211e5af 3437 /* XXX: use 32 bit mul which could be faster */
b48597b0 3438 tcg_gen_mul_tl(s->T0, s->T0, s->T1);
1dbe15ef 3439 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
c66f9727
EC
3440 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
3441 tcg_gen_andi_tl(cpu_cc_src, s->T0, 0xff00);
3ca51d07 3442 set_cc_op(s, CC_OP_MULB);
2c0262af 3443 break;
4ba9938c 3444 case MO_16:
1dbe15ef 3445 gen_op_mov_v_reg(s, MO_16, s->T1, R_EAX);
c66f9727 3446 tcg_gen_ext16u_tl(s->T0, s->T0);
b48597b0 3447 tcg_gen_ext16u_tl(s->T1, s->T1);
0211e5af 3448 /* XXX: use 32 bit mul which could be faster */
b48597b0 3449 tcg_gen_mul_tl(s->T0, s->T0, s->T1);
1dbe15ef 3450 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
c66f9727
EC
3451 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
3452 tcg_gen_shri_tl(s->T0, s->T0, 16);
1dbe15ef 3453 gen_op_mov_reg_v(s, MO_16, R_EDX, s->T0);
c66f9727 3454 tcg_gen_mov_tl(cpu_cc_src, s->T0);
3ca51d07 3455 set_cc_op(s, CC_OP_MULW);
2c0262af
FB
3456 break;
3457 default:
4ba9938c 3458 case MO_32:
6bd48f6f 3459 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
4f82446d
EC
3460 tcg_gen_trunc_tl_i32(s->tmp3_i32, cpu_regs[R_EAX]);
3461 tcg_gen_mulu2_i32(s->tmp2_i32, s->tmp3_i32,
3462 s->tmp2_i32, s->tmp3_i32);
6bd48f6f 3463 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], s->tmp2_i32);
4f82446d 3464 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], s->tmp3_i32);
a4bcea3d
RH
3465 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
3466 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
3ca51d07 3467 set_cc_op(s, CC_OP_MULL);
2c0262af 3468 break;
14ce26e7 3469#ifdef TARGET_X86_64
4ba9938c 3470 case MO_64:
a4bcea3d 3471 tcg_gen_mulu2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
c66f9727 3472 s->T0, cpu_regs[R_EAX]);
a4bcea3d
RH
3473 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
3474 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
3ca51d07 3475 set_cc_op(s, CC_OP_MULQ);
14ce26e7
FB
3476 break;
3477#endif
2c0262af 3478 }
2c0262af
FB
3479 break;
3480 case 5: /* imul */
3481 switch(ot) {
4ba9938c 3482 case MO_8:
1dbe15ef 3483 gen_op_mov_v_reg(s, MO_8, s->T1, R_EAX);
c66f9727 3484 tcg_gen_ext8s_tl(s->T0, s->T0);
b48597b0 3485 tcg_gen_ext8s_tl(s->T1, s->T1);
0211e5af 3486 /* XXX: use 32 bit mul which could be faster */
b48597b0 3487 tcg_gen_mul_tl(s->T0, s->T0, s->T1);
1dbe15ef 3488 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
c66f9727 3489 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
fbd80f02
EC
3490 tcg_gen_ext8s_tl(s->tmp0, s->T0);
3491 tcg_gen_sub_tl(cpu_cc_src, s->T0, s->tmp0);
3ca51d07 3492 set_cc_op(s, CC_OP_MULB);
2c0262af 3493 break;
4ba9938c 3494 case MO_16:
1dbe15ef 3495 gen_op_mov_v_reg(s, MO_16, s->T1, R_EAX);
c66f9727 3496 tcg_gen_ext16s_tl(s->T0, s->T0);
b48597b0 3497 tcg_gen_ext16s_tl(s->T1, s->T1);
0211e5af 3498 /* XXX: use 32 bit mul which could be faster */
b48597b0 3499 tcg_gen_mul_tl(s->T0, s->T0, s->T1);
1dbe15ef 3500 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
c66f9727 3501 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
fbd80f02
EC
3502 tcg_gen_ext16s_tl(s->tmp0, s->T0);
3503 tcg_gen_sub_tl(cpu_cc_src, s->T0, s->tmp0);
c66f9727 3504 tcg_gen_shri_tl(s->T0, s->T0, 16);
1dbe15ef 3505 gen_op_mov_reg_v(s, MO_16, R_EDX, s->T0);
3ca51d07 3506 set_cc_op(s, CC_OP_MULW);
2c0262af
FB
3507 break;
3508 default:
4ba9938c 3509 case MO_32:
6bd48f6f 3510 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
4f82446d
EC
3511 tcg_gen_trunc_tl_i32(s->tmp3_i32, cpu_regs[R_EAX]);
3512 tcg_gen_muls2_i32(s->tmp2_i32, s->tmp3_i32,
3513 s->tmp2_i32, s->tmp3_i32);
6bd48f6f 3514 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], s->tmp2_i32);
4f82446d 3515 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], s->tmp3_i32);
6bd48f6f 3516 tcg_gen_sari_i32(s->tmp2_i32, s->tmp2_i32, 31);
a4bcea3d 3517 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4f82446d 3518 tcg_gen_sub_i32(s->tmp2_i32, s->tmp2_i32, s->tmp3_i32);
6bd48f6f 3519 tcg_gen_extu_i32_tl(cpu_cc_src, s->tmp2_i32);
3ca51d07 3520 set_cc_op(s, CC_OP_MULL);
2c0262af 3521 break;
14ce26e7 3522#ifdef TARGET_X86_64
4ba9938c 3523 case MO_64:
a4bcea3d 3524 tcg_gen_muls2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
c66f9727 3525 s->T0, cpu_regs[R_EAX]);
a4bcea3d
RH
3526 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
3527 tcg_gen_sari_tl(cpu_cc_src, cpu_regs[R_EAX], 63);
3528 tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_regs[R_EDX]);
3ca51d07 3529 set_cc_op(s, CC_OP_MULQ);
14ce26e7
FB
3530 break;
3531#endif
2c0262af 3532 }
2c0262af
FB
3533 break;
3534 case 6: /* div */
3535 switch(ot) {
4ba9938c 3536 case MO_8:
ad75a51e 3537 gen_helper_divb_AL(tcg_env, s->T0);
2c0262af 3538 break;
4ba9938c 3539 case MO_16:
ad75a51e 3540 gen_helper_divw_AX(tcg_env, s->T0);
2c0262af
FB
3541 break;
3542 default:
4ba9938c 3543 case MO_32:
ad75a51e 3544 gen_helper_divl_EAX(tcg_env, s->T0);
14ce26e7
FB
3545 break;
3546#ifdef TARGET_X86_64
4ba9938c 3547 case MO_64:
ad75a51e 3548 gen_helper_divq_EAX(tcg_env, s->T0);
2c0262af 3549 break;
14ce26e7 3550#endif
2c0262af
FB
3551 }
3552 break;
3553 case 7: /* idiv */
3554 switch(ot) {
4ba9938c 3555 case MO_8:
ad75a51e 3556 gen_helper_idivb_AL(tcg_env, s->T0);
2c0262af 3557 break;
4ba9938c 3558 case MO_16:
ad75a51e 3559 gen_helper_idivw_AX(tcg_env, s->T0);
2c0262af
FB
3560 break;
3561 default:
4ba9938c 3562 case MO_32:
ad75a51e 3563 gen_helper_idivl_EAX(tcg_env, s->T0);
14ce26e7
FB
3564 break;
3565#ifdef TARGET_X86_64
4ba9938c 3566 case MO_64:
ad75a51e 3567 gen_helper_idivq_EAX(tcg_env, s->T0);
2c0262af 3568 break;
14ce26e7 3569#endif
2c0262af
FB
3570 }
3571 break;
3572 default:
b9f9c5b4 3573 goto unknown_op;
2c0262af
FB
3574 }
3575 break;
3576
3577 case 0xfe: /* GRP4 */
3578 case 0xff: /* GRP5 */
ab4e4aec 3579 ot = mo_b_d(b, dflag);
2c0262af 3580
e3af7c78 3581 modrm = x86_ldub_code(env, s);
2c0262af 3582 mod = (modrm >> 6) & 3;
14ce26e7 3583 rm = (modrm & 7) | REX_B(s);
2c0262af
FB
3584 op = (modrm >> 3) & 7;
3585 if (op >= 2 && b == 0xfe) {
b9f9c5b4 3586 goto unknown_op;
2c0262af 3587 }
14ce26e7 3588 if (CODE64(s)) {
aba9d61e 3589 if (op == 2 || op == 4) {
14ce26e7 3590 /* operand size for jumps is 64 bit */
4ba9938c 3591 ot = MO_64;
aba9d61e 3592 } else if (op == 3 || op == 5) {
8ab1e486 3593 ot = dflag != MO_16 ? MO_32 + REX_W(s) : MO_16;
14ce26e7
FB
3594 } else if (op == 6) {
3595 /* default push size is 64 bit */
ab4e4aec 3596 ot = mo_pushpop(s, dflag);
14ce26e7
FB
3597 }
3598 }
2c0262af 3599 if (mod != 3) {
4eeb3939 3600 gen_lea_modrm(env, s, modrm);
2c0262af 3601 if (op >= 2 && op != 3 && op != 5)
c66f9727 3602 gen_op_ld_v(s, ot, s->T0, s->A0);
2c0262af 3603 } else {
1dbe15ef 3604 gen_op_mov_v_reg(s, ot, s->T0, rm);
2c0262af
FB
3605 }
3606
3607 switch(op) {
3608 case 0: /* inc Ev */
3609 if (mod != 3)
3610 opreg = OR_TMP0;
3611 else
3612 opreg = rm;
3613 gen_inc(s, ot, opreg, 1);
3614 break;
3615 case 1: /* dec Ev */
3616 if (mod != 3)
3617 opreg = OR_TMP0;
3618 else
3619 opreg = rm;
3620 gen_inc(s, ot, opreg, -1);
3621 break;
3622 case 2: /* call Ev */
4f31916f 3623 /* XXX: optimize if memory (no 'and' is necessary) */
ab4e4aec 3624 if (dflag == MO_16) {
c66f9727 3625 tcg_gen_ext16u_tl(s->T0, s->T0);
40b90233 3626 }
9e599bf7 3627 gen_push_v(s, eip_next_tl(s));
e3a79e0e 3628 gen_op_jmp_v(s, s->T0);
7d117ce8 3629 gen_bnd_jmp(s);
faf9ea5f 3630 s->base.is_jmp = DISAS_JUMP;
2c0262af 3631 break;
61382a50 3632 case 3: /* lcall Ev */
10b8eb94
RH
3633 if (mod == 3) {
3634 goto illegal_op;
3635 }
b48597b0 3636 gen_op_ld_v(s, ot, s->T1, s->A0);
830a19a4 3637 gen_add_A0_im(s, 1 << ot);
c66f9727 3638 gen_op_ld_v(s, MO_16, s->T0, s->A0);
2c0262af 3639 do_lcall:
f8a35846 3640 if (PE(s) && !VM86(s)) {
6bd48f6f 3641 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
ad75a51e 3642 gen_helper_lcall_protected(tcg_env, s->tmp2_i32, s->T1,
9e599bf7
RH
3643 tcg_constant_i32(dflag - 1),
3644 eip_next_tl(s));
2c0262af 3645 } else {
6bd48f6f 3646 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
8c03ab9f 3647 tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
ad75a51e 3648 gen_helper_lcall_real(tcg_env, s->tmp2_i32, s->tmp3_i32,
9e599bf7
RH
3649 tcg_constant_i32(dflag - 1),
3650 eip_next_i32(s));
2c0262af 3651 }
faf9ea5f 3652 s->base.is_jmp = DISAS_JUMP;
2c0262af
FB
3653 break;
3654 case 4: /* jmp Ev */
ab4e4aec 3655 if (dflag == MO_16) {
c66f9727 3656 tcg_gen_ext16u_tl(s->T0, s->T0);
40b90233 3657 }
e3a79e0e 3658 gen_op_jmp_v(s, s->T0);
7d117ce8 3659 gen_bnd_jmp(s);
faf9ea5f 3660 s->base.is_jmp = DISAS_JUMP;
2c0262af
FB
3661 break;
3662 case 5: /* ljmp Ev */
10b8eb94
RH
3663 if (mod == 3) {
3664 goto illegal_op;
3665 }
b48597b0 3666 gen_op_ld_v(s, ot, s->T1, s->A0);
830a19a4 3667 gen_add_A0_im(s, 1 << ot);
c66f9727 3668 gen_op_ld_v(s, MO_16, s->T0, s->A0);
2c0262af 3669 do_ljmp:
f8a35846 3670 if (PE(s) && !VM86(s)) {
6bd48f6f 3671 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
ad75a51e 3672 gen_helper_ljmp_protected(tcg_env, s->tmp2_i32, s->T1,
9e599bf7 3673 eip_next_tl(s));
2c0262af 3674 } else {
c66f9727 3675 gen_op_movl_seg_T0_vm(s, R_CS);
e3a79e0e 3676 gen_op_jmp_v(s, s->T1);
2c0262af 3677 }
faf9ea5f 3678 s->base.is_jmp = DISAS_JUMP;
2c0262af
FB
3679 break;
3680 case 6: /* push Ev */
c66f9727 3681 gen_push_v(s, s->T0);
2c0262af
FB
3682 break;
3683 default:
b9f9c5b4 3684 goto unknown_op;
2c0262af
FB
3685 }
3686 break;
3687
3688 case 0x84: /* test Ev, Gv */
5fafdf24 3689 case 0x85:
ab4e4aec 3690 ot = mo_b_d(b, dflag);
2c0262af 3691
e3af7c78 3692 modrm = x86_ldub_code(env, s);
bbdb4237 3693 reg = ((modrm >> 3) & 7) | REX_R(s);
3b46e624 3694
0af10c86 3695 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
1dbe15ef 3696 gen_op_mov_v_reg(s, ot, s->T1, reg);
c66f9727 3697 gen_op_testl_T0_T1_cc(s);
3ca51d07 3698 set_cc_op(s, CC_OP_LOGICB + ot);
2c0262af 3699 break;
3b46e624 3700
2c0262af
FB
3701 case 0xa8: /* test eAX, Iv */
3702 case 0xa9:
ab4e4aec 3703 ot = mo_b_d(b, dflag);
0af10c86 3704 val = insn_get(env, s, ot);
2c0262af 3705
1dbe15ef 3706 gen_op_mov_v_reg(s, ot, s->T0, OR_EAX);
b48597b0 3707 tcg_gen_movi_tl(s->T1, val);
c66f9727 3708 gen_op_testl_T0_T1_cc(s);
3ca51d07 3709 set_cc_op(s, CC_OP_LOGICB + ot);
2c0262af 3710 break;
3b46e624 3711
2c0262af 3712 case 0x98: /* CWDE/CBW */
ab4e4aec 3713 switch (dflag) {
14ce26e7 3714#ifdef TARGET_X86_64
ab4e4aec 3715 case MO_64:
1dbe15ef 3716 gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX);
c66f9727 3717 tcg_gen_ext32s_tl(s->T0, s->T0);
1dbe15ef 3718 gen_op_mov_reg_v(s, MO_64, R_EAX, s->T0);
ab4e4aec 3719 break;
14ce26e7 3720#endif
ab4e4aec 3721 case MO_32:
1dbe15ef 3722 gen_op_mov_v_reg(s, MO_16, s->T0, R_EAX);
c66f9727 3723 tcg_gen_ext16s_tl(s->T0, s->T0);
1dbe15ef 3724 gen_op_mov_reg_v(s, MO_32, R_EAX, s->T0);
ab4e4aec
RH
3725 break;
3726 case MO_16:
1dbe15ef 3727 gen_op_mov_v_reg(s, MO_8, s->T0, R_EAX);
c66f9727 3728 tcg_gen_ext8s_tl(s->T0, s->T0);
1dbe15ef 3729 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
ab4e4aec
RH
3730 break;
3731 default:
732e89f4 3732 g_assert_not_reached();
e108dd01 3733 }
2c0262af
FB
3734 break;
3735 case 0x99: /* CDQ/CWD */
ab4e4aec 3736 switch (dflag) {
14ce26e7 3737#ifdef TARGET_X86_64
ab4e4aec 3738 case MO_64:
1dbe15ef 3739 gen_op_mov_v_reg(s, MO_64, s->T0, R_EAX);
c66f9727 3740 tcg_gen_sari_tl(s->T0, s->T0, 63);
1dbe15ef 3741 gen_op_mov_reg_v(s, MO_64, R_EDX, s->T0);
ab4e4aec 3742 break;
14ce26e7 3743#endif
ab4e4aec 3744 case MO_32:
1dbe15ef 3745 gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX);
c66f9727
EC
3746 tcg_gen_ext32s_tl(s->T0, s->T0);
3747 tcg_gen_sari_tl(s->T0, s->T0, 31);
1dbe15ef 3748 gen_op_mov_reg_v(s, MO_32, R_EDX, s->T0);
ab4e4aec
RH
3749 break;
3750 case MO_16:
1dbe15ef 3751 gen_op_mov_v_reg(s, MO_16, s->T0, R_EAX);
c66f9727
EC
3752 tcg_gen_ext16s_tl(s->T0, s->T0);
3753 tcg_gen_sari_tl(s->T0, s->T0, 15);
1dbe15ef 3754 gen_op_mov_reg_v(s, MO_16, R_EDX, s->T0);
ab4e4aec
RH
3755 break;
3756 default:
732e89f4 3757 g_assert_not_reached();
e108dd01 3758 }
2c0262af
FB
3759 break;
3760 case 0x1af: /* imul Gv, Ev */
3761 case 0x69: /* imul Gv, Ev, I */
3762 case 0x6b:
ab4e4aec 3763 ot = dflag;
e3af7c78 3764 modrm = x86_ldub_code(env, s);
bbdb4237 3765 reg = ((modrm >> 3) & 7) | REX_R(s);
14ce26e7
FB
3766 if (b == 0x69)
3767 s->rip_offset = insn_const_size(ot);
3768 else if (b == 0x6b)
3769 s->rip_offset = 1;
0af10c86 3770 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
2c0262af 3771 if (b == 0x69) {
0af10c86 3772 val = insn_get(env, s, ot);
b48597b0 3773 tcg_gen_movi_tl(s->T1, val);
2c0262af 3774 } else if (b == 0x6b) {
4ba9938c 3775 val = (int8_t)insn_get(env, s, MO_8);
b48597b0 3776 tcg_gen_movi_tl(s->T1, val);
2c0262af 3777 } else {
1dbe15ef 3778 gen_op_mov_v_reg(s, ot, s->T1, reg);
2c0262af 3779 }
a4bcea3d 3780 switch (ot) {
0211e5af 3781#ifdef TARGET_X86_64
4ba9938c 3782 case MO_64:
b48597b0 3783 tcg_gen_muls2_i64(cpu_regs[reg], s->T1, s->T0, s->T1);
a4bcea3d
RH
3784 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
3785 tcg_gen_sari_tl(cpu_cc_src, cpu_cc_dst, 63);
b48597b0 3786 tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, s->T1);
a4bcea3d 3787 break;
0211e5af 3788#endif
4ba9938c 3789 case MO_32:
6bd48f6f 3790 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
4f82446d
EC
3791 tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
3792 tcg_gen_muls2_i32(s->tmp2_i32, s->tmp3_i32,
3793 s->tmp2_i32, s->tmp3_i32);
6bd48f6f
EC
3794 tcg_gen_extu_i32_tl(cpu_regs[reg], s->tmp2_i32);
3795 tcg_gen_sari_i32(s->tmp2_i32, s->tmp2_i32, 31);
a4bcea3d 3796 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
4f82446d 3797 tcg_gen_sub_i32(s->tmp2_i32, s->tmp2_i32, s->tmp3_i32);
6bd48f6f 3798 tcg_gen_extu_i32_tl(cpu_cc_src, s->tmp2_i32);
a4bcea3d
RH
3799 break;
3800 default:
c66f9727 3801 tcg_gen_ext16s_tl(s->T0, s->T0);
b48597b0 3802 tcg_gen_ext16s_tl(s->T1, s->T1);
0211e5af 3803 /* XXX: use 32 bit mul which could be faster */
b48597b0 3804 tcg_gen_mul_tl(s->T0, s->T0, s->T1);
c66f9727 3805 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
fbd80f02
EC
3806 tcg_gen_ext16s_tl(s->tmp0, s->T0);
3807 tcg_gen_sub_tl(cpu_cc_src, s->T0, s->tmp0);
1dbe15ef 3808 gen_op_mov_reg_v(s, ot, reg, s->T0);
a4bcea3d 3809 break;
2c0262af 3810 }
3ca51d07 3811 set_cc_op(s, CC_OP_MULB + ot);
2c0262af
FB
3812 break;
3813 case 0x1c0:
3814 case 0x1c1: /* xadd Ev, Gv */
ab4e4aec 3815 ot = mo_b_d(b, dflag);
e3af7c78 3816 modrm = x86_ldub_code(env, s);
bbdb4237 3817 reg = ((modrm >> 3) & 7) | REX_R(s);
2c0262af 3818 mod = (modrm >> 6) & 3;
1dbe15ef 3819 gen_op_mov_v_reg(s, ot, s->T0, reg);
2c0262af 3820 if (mod == 3) {
14ce26e7 3821 rm = (modrm & 7) | REX_B(s);
1dbe15ef 3822 gen_op_mov_v_reg(s, ot, s->T1, rm);
b48597b0 3823 tcg_gen_add_tl(s->T0, s->T0, s->T1);
1dbe15ef
EC
3824 gen_op_mov_reg_v(s, ot, reg, s->T1);
3825 gen_op_mov_reg_v(s, ot, rm, s->T0);
2c0262af 3826 } else {
4eeb3939 3827 gen_lea_modrm(env, s, modrm);
f53b0181 3828 if (s->prefix & PREFIX_LOCK) {
b48597b0 3829 tcg_gen_atomic_fetch_add_tl(s->T1, s->A0, s->T0,
f53b0181 3830 s->mem_index, ot | MO_LE);
b48597b0 3831 tcg_gen_add_tl(s->T0, s->T0, s->T1);
f53b0181 3832 } else {
b48597b0
EC
3833 gen_op_ld_v(s, ot, s->T1, s->A0);
3834 tcg_gen_add_tl(s->T0, s->T0, s->T1);
c66f9727 3835 gen_op_st_v(s, ot, s->T0, s->A0);
f53b0181 3836 }
1dbe15ef 3837 gen_op_mov_reg_v(s, ot, reg, s->T1);
2c0262af 3838 }
c66f9727 3839 gen_op_update2_cc(s);
3ca51d07 3840 set_cc_op(s, CC_OP_ADDB + ot);
2c0262af
FB
3841 break;
3842 case 0x1b0:
3843 case 0x1b1: /* cmpxchg Ev, Gv */
cad3a37d 3844 {
d1bb978b 3845 TCGv oldv, newv, cmpv, dest;
cad3a37d 3846
ab4e4aec 3847 ot = mo_b_d(b, dflag);
e3af7c78 3848 modrm = x86_ldub_code(env, s);
bbdb4237 3849 reg = ((modrm >> 3) & 7) | REX_R(s);
cad3a37d 3850 mod = (modrm >> 6) & 3;
ae03f8de
EC
3851 oldv = tcg_temp_new();
3852 newv = tcg_temp_new();
3853 cmpv = tcg_temp_new();
1dbe15ef 3854 gen_op_mov_v_reg(s, ot, newv, reg);
ae03f8de 3855 tcg_gen_mov_tl(cmpv, cpu_regs[R_EAX]);
d1bb978b 3856 gen_extu(ot, cmpv);
ae03f8de
EC
3857 if (s->prefix & PREFIX_LOCK) {
3858 if (mod == 3) {
3859 goto illegal_op;
3860 }
4eeb3939 3861 gen_lea_modrm(env, s, modrm);
6b672b5d 3862 tcg_gen_atomic_cmpxchg_tl(oldv, s->A0, cmpv, newv,
ae03f8de 3863 s->mem_index, ot | MO_LE);
cad3a37d 3864 } else {
ae03f8de
EC
3865 if (mod == 3) {
3866 rm = (modrm & 7) | REX_B(s);
1dbe15ef 3867 gen_op_mov_v_reg(s, ot, oldv, rm);
d1bb978b
PB
3868 gen_extu(ot, oldv);
3869
3870 /*
3871 * Unlike the memory case, where "the destination operand receives
3872 * a write cycle without regard to the result of the comparison",
3873 * rm must not be touched altogether if the write fails, including
3874 * not zero-extending it on 64-bit processors. So, precompute
3875 * the result of a successful writeback and perform the movcond
3876 * directly on cpu_regs. Also need to write accumulator first, in
3877 * case rm is part of RAX too.
3878 */
3879 dest = gen_op_deposit_reg_v(s, ot, rm, newv, newv);
3880 tcg_gen_movcond_tl(TCG_COND_EQ, dest, oldv, cmpv, newv, dest);
ae03f8de
EC
3881 } else {
3882 gen_lea_modrm(env, s, modrm);
6b672b5d 3883 gen_op_ld_v(s, ot, oldv, s->A0);
d1bb978b
PB
3884
3885 /*
3886 * Perform an unconditional store cycle like physical cpu;
3887 * must be before changing accumulator to ensure
3888 * idempotency if the store faults and the instruction
3889 * is restarted
3890 */
3891 tcg_gen_movcond_tl(TCG_COND_EQ, newv, oldv, cmpv, newv, oldv);
6b672b5d 3892 gen_op_st_v(s, ot, newv, s->A0);
ae03f8de 3893 }
cad3a37d 3894 }
d1bb978b
PB
3895 /*
3896 * Write EAX only if the cmpxchg fails; reuse newv as the destination,
3897 * since it's dead here.
3898 */
3899 dest = gen_op_deposit_reg_v(s, ot, R_EAX, newv, oldv);
3900 tcg_gen_movcond_tl(TCG_COND_EQ, dest, oldv, cmpv, dest, newv);
ae03f8de 3901 tcg_gen_mov_tl(cpu_cc_src, oldv);
93a3e108 3902 tcg_gen_mov_tl(s->cc_srcT, cmpv);
ae03f8de 3903 tcg_gen_sub_tl(cpu_cc_dst, cmpv, oldv);
3ca51d07 3904 set_cc_op(s, CC_OP_SUBB + ot);
2c0262af 3905 }
2c0262af
FB
3906 break;
3907 case 0x1c7: /* cmpxchg8b */
e3af7c78 3908 modrm = x86_ldub_code(env, s);
2c0262af 3909 mod = (modrm >> 6) & 3;
369fd5ca
RH
3910 switch ((modrm >> 3) & 7) {
3911 case 1: /* CMPXCHG8, CMPXCHG16 */
3912 if (mod == 3) {
1b9d9ebb 3913 goto illegal_op;
ae03f8de 3914 }
369fd5ca
RH
3915#ifdef TARGET_X86_64
3916 if (dflag == MO_64) {
3917 if (!(s->cpuid_ext_features & CPUID_EXT_CX16)) {
3918 goto illegal_op;
3919 }
6218c177 3920 gen_cmpxchg16b(s, env, modrm);
369fd5ca
RH
3921 break;
3922 }
6218c177 3923#endif
369fd5ca 3924 if (!(s->cpuid_features & CPUID_CX8)) {
1b9d9ebb 3925 goto illegal_op;
369fd5ca 3926 }
6218c177 3927 gen_cmpxchg8b(s, env, modrm);
369fd5ca
RH
3928 break;
3929
6750485b 3930 case 7: /* RDSEED, RDPID with f3 prefix */
f9e0dbae 3931 if (mod != 3 ||
6750485b 3932 (s->prefix & (PREFIX_LOCK | PREFIX_REPNZ))) {
f9e0dbae
PB
3933 goto illegal_op;
3934 }
6750485b
PB
3935 if (s->prefix & PREFIX_REPZ) {
3936 if (!(s->cpuid_ext_features & CPUID_7_0_ECX_RDPID)) {
3937 goto illegal_op;
3938 }
ad75a51e 3939 gen_helper_rdpid(s->T0, tcg_env);
6750485b
PB
3940 rm = (modrm & 7) | REX_B(s);
3941 gen_op_mov_reg_v(s, dflag, rm, s->T0);
3942 break;
3943 } else {
3944 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_RDSEED)) {
3945 goto illegal_op;
3946 }
3947 goto do_rdrand;
3948 }
f9e0dbae 3949
369fd5ca
RH
3950 case 6: /* RDRAND */
3951 if (mod != 3 ||
3952 (s->prefix & (PREFIX_LOCK | PREFIX_REPZ | PREFIX_REPNZ)) ||
3953 !(s->cpuid_ext_features & CPUID_EXT_RDRAND)) {
3954 goto illegal_op;
3955 }
f9e0dbae 3956 do_rdrand:
dfd1b812 3957 translator_io_start(&s->base);
ad75a51e 3958 gen_helper_rdrand(s->T0, tcg_env);
369fd5ca
RH
3959 rm = (modrm & 7) | REX_B(s);
3960 gen_op_mov_reg_v(s, dflag, rm, s->T0);
3961 set_cc_op(s, CC_OP_EFLAGS);
369fd5ca
RH
3962 break;
3963
3964 default:
3965 goto illegal_op;
1b9d9ebb 3966 }
2c0262af 3967 break;
3b46e624 3968
2c0262af
FB
3969 /**************************/
3970 /* push/pop */
3971 case 0x50 ... 0x57: /* push */
1dbe15ef 3972 gen_op_mov_v_reg(s, MO_32, s->T0, (b & 7) | REX_B(s));
c66f9727 3973 gen_push_v(s, s->T0);
2c0262af
FB
3974 break;
3975 case 0x58 ... 0x5f: /* pop */
8e31d234 3976 ot = gen_pop_T0(s);
77729c24 3977 /* NOTE: order is important for pop %sp */
8e31d234 3978 gen_pop_update(s, ot);
1dbe15ef 3979 gen_op_mov_reg_v(s, ot, (b & 7) | REX_B(s), s->T0);
2c0262af
FB
3980 break;
3981 case 0x60: /* pusha */
14ce26e7
FB
3982 if (CODE64(s))
3983 goto illegal_op;
2c0262af
FB
3984 gen_pusha(s);
3985 break;
3986 case 0x61: /* popa */
14ce26e7
FB
3987 if (CODE64(s))
3988 goto illegal_op;
2c0262af
FB
3989 gen_popa(s);
3990 break;
3991 case 0x68: /* push Iv */
3992 case 0x6a:
ab4e4aec 3993 ot = mo_pushpop(s, dflag);
2c0262af 3994 if (b == 0x68)
0af10c86 3995 val = insn_get(env, s, ot);
2c0262af 3996 else
4ba9938c 3997 val = (int8_t)insn_get(env, s, MO_8);
c66f9727
EC
3998 tcg_gen_movi_tl(s->T0, val);
3999 gen_push_v(s, s->T0);
2c0262af
FB
4000 break;
4001 case 0x8f: /* pop Ev */
e3af7c78 4002 modrm = x86_ldub_code(env, s);
77729c24 4003 mod = (modrm >> 6) & 3;
8e31d234 4004 ot = gen_pop_T0(s);
77729c24
FB
4005 if (mod == 3) {
4006 /* NOTE: order is important for pop %sp */
8e31d234 4007 gen_pop_update(s, ot);
14ce26e7 4008 rm = (modrm & 7) | REX_B(s);
1dbe15ef 4009 gen_op_mov_reg_v(s, ot, rm, s->T0);
77729c24
FB
4010 } else {
4011 /* NOTE: order is important too for MMU exceptions */
14ce26e7 4012 s->popl_esp_hack = 1 << ot;
0af10c86 4013 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
77729c24 4014 s->popl_esp_hack = 0;
8e31d234 4015 gen_pop_update(s, ot);
77729c24 4016 }
2c0262af
FB
4017 break;
4018 case 0xc8: /* enter */
4019 {
4020 int level;
e3af7c78
PB
4021 val = x86_lduw_code(env, s);
4022 level = x86_ldub_code(env, s);
2c0262af
FB
4023 gen_enter(s, val, level);
4024 }
4025 break;
4026 case 0xc9: /* leave */
2045f04c 4027 gen_leave(s);
2c0262af
FB
4028 break;
4029 case 0x06: /* push es */
4030 case 0x0e: /* push cs */
4031 case 0x16: /* push ss */
4032 case 0x1e: /* push ds */
14ce26e7
FB
4033 if (CODE64(s))
4034 goto illegal_op;
c66f9727
EC
4035 gen_op_movl_T0_seg(s, b >> 3);
4036 gen_push_v(s, s->T0);
2c0262af
FB
4037 break;
4038 case 0x1a0: /* push fs */
4039 case 0x1a8: /* push gs */
c66f9727
EC
4040 gen_op_movl_T0_seg(s, (b >> 3) & 7);
4041 gen_push_v(s, s->T0);
2c0262af
FB
4042 break;
4043 case 0x07: /* pop es */
4044 case 0x17: /* pop ss */
4045 case 0x1f: /* pop ds */
14ce26e7
FB
4046 if (CODE64(s))
4047 goto illegal_op;
2c0262af 4048 reg = b >> 3;
8e31d234 4049 ot = gen_pop_T0(s);
100ec099 4050 gen_movl_seg_T0(s, reg);
8e31d234 4051 gen_pop_update(s, ot);
2c0262af
FB
4052 break;
4053 case 0x1a1: /* pop fs */
4054 case 0x1a9: /* pop gs */
8e31d234 4055 ot = gen_pop_T0(s);
100ec099 4056 gen_movl_seg_T0(s, (b >> 3) & 7);
8e31d234 4057 gen_pop_update(s, ot);
2c0262af
FB
4058 break;
4059
4060 /**************************/
4061 /* mov */
4062 case 0x88:
4063 case 0x89: /* mov Gv, Ev */
ab4e4aec 4064 ot = mo_b_d(b, dflag);
e3af7c78 4065 modrm = x86_ldub_code(env, s);
bbdb4237 4066 reg = ((modrm >> 3) & 7) | REX_R(s);
3b46e624 4067
2c0262af 4068 /* generate a generic store */
0af10c86 4069 gen_ldst_modrm(env, s, modrm, ot, reg, 1);
2c0262af
FB
4070 break;
4071 case 0xc6:
4072 case 0xc7: /* mov Ev, Iv */
ab4e4aec 4073 ot = mo_b_d(b, dflag);
e3af7c78 4074 modrm = x86_ldub_code(env, s);
2c0262af 4075 mod = (modrm >> 6) & 3;
14ce26e7
FB
4076 if (mod != 3) {
4077 s->rip_offset = insn_const_size(ot);
4eeb3939 4078 gen_lea_modrm(env, s, modrm);
14ce26e7 4079 }
0af10c86 4080 val = insn_get(env, s, ot);
c66f9727 4081 tcg_gen_movi_tl(s->T0, val);
fd8ca9f6 4082 if (mod != 3) {
c66f9727 4083 gen_op_st_v(s, ot, s->T0, s->A0);
fd8ca9f6 4084 } else {
1dbe15ef 4085 gen_op_mov_reg_v(s, ot, (modrm & 7) | REX_B(s), s->T0);
fd8ca9f6 4086 }
2c0262af
FB
4087 break;
4088 case 0x8a:
4089 case 0x8b: /* mov Ev, Gv */
ab4e4aec 4090 ot = mo_b_d(b, dflag);
e3af7c78 4091 modrm = x86_ldub_code(env, s);
bbdb4237 4092 reg = ((modrm >> 3) & 7) | REX_R(s);
3b46e624 4093
0af10c86 4094 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
1dbe15ef 4095 gen_op_mov_reg_v(s, ot, reg, s->T0);
2c0262af
FB
4096 break;
4097 case 0x8e: /* mov seg, Gv */
e3af7c78 4098 modrm = x86_ldub_code(env, s);
2c0262af
FB
4099 reg = (modrm >> 3) & 7;
4100 if (reg >= 6 || reg == R_CS)
4101 goto illegal_op;
4ba9938c 4102 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
100ec099 4103 gen_movl_seg_T0(s, reg);
2c0262af
FB
4104 break;
4105 case 0x8c: /* mov Gv, seg */
e3af7c78 4106 modrm = x86_ldub_code(env, s);
2c0262af
FB
4107 reg = (modrm >> 3) & 7;
4108 mod = (modrm >> 6) & 3;
4109 if (reg >= 6)
4110 goto illegal_op;
c66f9727 4111 gen_op_movl_T0_seg(s, reg);
ab4e4aec 4112 ot = mod == 3 ? dflag : MO_16;
0af10c86 4113 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
2c0262af
FB
4114 break;
4115
4116 case 0x1b6: /* movzbS Gv, Eb */
4117 case 0x1b7: /* movzwS Gv, Eb */
4118 case 0x1be: /* movsbS Gv, Eb */
4119 case 0x1bf: /* movswS Gv, Eb */
4120 {
14776ab5
TN
4121 MemOp d_ot;
4122 MemOp s_ot;
c8fbc479 4123
2c0262af 4124 /* d_ot is the size of destination */
ab4e4aec 4125 d_ot = dflag;
2c0262af 4126 /* ot is the size of source */
4ba9938c 4127 ot = (b & 1) + MO_8;
c8fbc479
RH
4128 /* s_ot is the sign+size of source */
4129 s_ot = b & 8 ? MO_SIGN | ot : ot;
4130
e3af7c78 4131 modrm = x86_ldub_code(env, s);
bbdb4237 4132 reg = ((modrm >> 3) & 7) | REX_R(s);
2c0262af 4133 mod = (modrm >> 6) & 3;
14ce26e7 4134 rm = (modrm & 7) | REX_B(s);
3b46e624 4135
2c0262af 4136 if (mod == 3) {
1dbe15ef 4137 if (s_ot == MO_SB && byte_reg_is_xH(s, rm)) {
c66f9727 4138 tcg_gen_sextract_tl(s->T0, cpu_regs[rm - 4], 8, 8);
04fc2f1c 4139 } else {
1dbe15ef 4140 gen_op_mov_v_reg(s, ot, s->T0, rm);
04fc2f1c
RH
4141 switch (s_ot) {
4142 case MO_UB:
c66f9727 4143 tcg_gen_ext8u_tl(s->T0, s->T0);
04fc2f1c
RH
4144 break;
4145 case MO_SB:
c66f9727 4146 tcg_gen_ext8s_tl(s->T0, s->T0);
04fc2f1c
RH
4147 break;
4148 case MO_UW:
c66f9727 4149 tcg_gen_ext16u_tl(s->T0, s->T0);
04fc2f1c
RH
4150 break;
4151 default:
4152 case MO_SW:
c66f9727 4153 tcg_gen_ext16s_tl(s->T0, s->T0);
04fc2f1c
RH
4154 break;
4155 }
2c0262af 4156 }
1dbe15ef 4157 gen_op_mov_reg_v(s, d_ot, reg, s->T0);
2c0262af 4158 } else {
4eeb3939 4159 gen_lea_modrm(env, s, modrm);
c66f9727 4160 gen_op_ld_v(s, s_ot, s->T0, s->A0);
1dbe15ef 4161 gen_op_mov_reg_v(s, d_ot, reg, s->T0);
2c0262af
FB
4162 }
4163 }
4164 break;
4165
4166 case 0x8d: /* lea */
e3af7c78 4167 modrm = x86_ldub_code(env, s);
3a1d9b8b
FB
4168 mod = (modrm >> 6) & 3;
4169 if (mod == 3)
4170 goto illegal_op;
bbdb4237 4171 reg = ((modrm >> 3) & 7) | REX_R(s);
a074ce42
RH
4172 {
4173 AddressParts a = gen_lea_modrm_0(env, s, modrm);
20581aad 4174 TCGv ea = gen_lea_modrm_1(s, a, false);
620abfb0 4175 gen_lea_v_seg(s, s->aflag, ea, -1, -1);
1dbe15ef 4176 gen_op_mov_reg_v(s, dflag, reg, s->A0);
a074ce42 4177 }
2c0262af 4178 break;
3b46e624 4179
2c0262af
FB
4180 case 0xa0: /* mov EAX, Ov */
4181 case 0xa1:
4182 case 0xa2: /* mov Ov, EAX */
4183 case 0xa3:
2c0262af 4184 {
14ce26e7
FB
4185 target_ulong offset_addr;
4186
ab4e4aec 4187 ot = mo_b_d(b, dflag);
efcca7ef 4188 offset_addr = insn_get_addr(env, s, s->aflag);
6b672b5d 4189 tcg_gen_movi_tl(s->A0, offset_addr);
664e0f19 4190 gen_add_A0_ds_seg(s);
14ce26e7 4191 if ((b & 2) == 0) {
c66f9727 4192 gen_op_ld_v(s, ot, s->T0, s->A0);
1dbe15ef 4193 gen_op_mov_reg_v(s, ot, R_EAX, s->T0);
14ce26e7 4194 } else {
1dbe15ef 4195 gen_op_mov_v_reg(s, ot, s->T0, R_EAX);
c66f9727 4196 gen_op_st_v(s, ot, s->T0, s->A0);
2c0262af
FB
4197 }
4198 }
2c0262af
FB
4199 break;
4200 case 0xd7: /* xlat */
6b672b5d 4201 tcg_gen_mov_tl(s->A0, cpu_regs[R_EBX]);
c66f9727
EC
4202 tcg_gen_ext8u_tl(s->T0, cpu_regs[R_EAX]);
4203 tcg_gen_add_tl(s->A0, s->A0, s->T0);
664e0f19 4204 gen_add_A0_ds_seg(s);
c66f9727 4205 gen_op_ld_v(s, MO_8, s->T0, s->A0);
1dbe15ef 4206 gen_op_mov_reg_v(s, MO_8, R_EAX, s->T0);
2c0262af
FB
4207 break;
4208 case 0xb0 ... 0xb7: /* mov R, Ib */
4ba9938c 4209 val = insn_get(env, s, MO_8);
c66f9727 4210 tcg_gen_movi_tl(s->T0, val);
1dbe15ef 4211 gen_op_mov_reg_v(s, MO_8, (b & 7) | REX_B(s), s->T0);
2c0262af
FB
4212 break;
4213 case 0xb8 ... 0xbf: /* mov R, Iv */
14ce26e7 4214#ifdef TARGET_X86_64
ab4e4aec 4215 if (dflag == MO_64) {
14ce26e7
FB
4216 uint64_t tmp;
4217 /* 64 bit case */
e3af7c78 4218 tmp = x86_ldq_code(env, s);
14ce26e7 4219 reg = (b & 7) | REX_B(s);
c66f9727 4220 tcg_gen_movi_tl(s->T0, tmp);
1dbe15ef 4221 gen_op_mov_reg_v(s, MO_64, reg, s->T0);
5fafdf24 4222 } else
14ce26e7
FB
4223#endif
4224 {
ab4e4aec 4225 ot = dflag;
0af10c86 4226 val = insn_get(env, s, ot);
14ce26e7 4227 reg = (b & 7) | REX_B(s);
c66f9727 4228 tcg_gen_movi_tl(s->T0, val);
1dbe15ef 4229 gen_op_mov_reg_v(s, ot, reg, s->T0);
14ce26e7 4230 }
2c0262af
FB
4231 break;
4232
4233 case 0x91 ... 0x97: /* xchg R, EAX */
7418027e 4234 do_xchg_reg_eax:
ab4e4aec 4235 ot = dflag;
14ce26e7 4236 reg = (b & 7) | REX_B(s);
2c0262af
FB
4237 rm = R_EAX;
4238 goto do_xchg_reg;
4239 case 0x86:
4240 case 0x87: /* xchg Ev, Gv */
ab4e4aec 4241 ot = mo_b_d(b, dflag);
e3af7c78 4242 modrm = x86_ldub_code(env, s);
bbdb4237 4243 reg = ((modrm >> 3) & 7) | REX_R(s);
2c0262af
FB
4244 mod = (modrm >> 6) & 3;
4245 if (mod == 3) {
14ce26e7 4246 rm = (modrm & 7) | REX_B(s);
2c0262af 4247 do_xchg_reg:
1dbe15ef
EC
4248 gen_op_mov_v_reg(s, ot, s->T0, reg);
4249 gen_op_mov_v_reg(s, ot, s->T1, rm);
4250 gen_op_mov_reg_v(s, ot, rm, s->T0);
4251 gen_op_mov_reg_v(s, ot, reg, s->T1);
2c0262af 4252 } else {
4eeb3939 4253 gen_lea_modrm(env, s, modrm);
1dbe15ef 4254 gen_op_mov_v_reg(s, ot, s->T0, reg);
2c0262af 4255 /* for xchg, lock is implicit */
b48597b0 4256 tcg_gen_atomic_xchg_tl(s->T1, s->A0, s->T0,
ea97ebe8 4257 s->mem_index, ot | MO_LE);
1dbe15ef 4258 gen_op_mov_reg_v(s, ot, reg, s->T1);
2c0262af
FB
4259 }
4260 break;
4261 case 0xc4: /* les Gv */
701ed211 4262 /* In CODE64 this is VEX3; see above. */
2c0262af
FB
4263 op = R_ES;
4264 goto do_lxx;
4265 case 0xc5: /* lds Gv */
701ed211 4266 /* In CODE64 this is VEX2; see above. */
2c0262af
FB
4267 op = R_DS;
4268 goto do_lxx;
4269 case 0x1b2: /* lss Gv */
4270 op = R_SS;
4271 goto do_lxx;
4272 case 0x1b4: /* lfs Gv */
4273 op = R_FS;
4274 goto do_lxx;
4275 case 0x1b5: /* lgs Gv */
4276 op = R_GS;
4277 do_lxx:
ab4e4aec 4278 ot = dflag != MO_16 ? MO_32 : MO_16;
e3af7c78 4279 modrm = x86_ldub_code(env, s);
bbdb4237 4280 reg = ((modrm >> 3) & 7) | REX_R(s);
2c0262af
FB
4281 mod = (modrm >> 6) & 3;
4282 if (mod == 3)
4283 goto illegal_op;
4eeb3939 4284 gen_lea_modrm(env, s, modrm);
b48597b0 4285 gen_op_ld_v(s, ot, s->T1, s->A0);
830a19a4 4286 gen_add_A0_im(s, 1 << ot);
2c0262af 4287 /* load the segment first to handle exceptions properly */
c66f9727 4288 gen_op_ld_v(s, MO_16, s->T0, s->A0);
100ec099 4289 gen_movl_seg_T0(s, op);
2c0262af 4290 /* then put the data */
1dbe15ef 4291 gen_op_mov_reg_v(s, ot, reg, s->T1);
2c0262af 4292 break;
3b46e624 4293
2c0262af
FB
4294 /************************/
4295 /* shifts */
4296 case 0xc0:
4297 case 0xc1:
4298 /* shift Ev,Ib */
4299 shift = 2;
4300 grp2:
4301 {
ab4e4aec 4302 ot = mo_b_d(b, dflag);
e3af7c78 4303 modrm = x86_ldub_code(env, s);
2c0262af 4304 mod = (modrm >> 6) & 3;
2c0262af 4305 op = (modrm >> 3) & 7;
3b46e624 4306
2c0262af 4307 if (mod != 3) {
14ce26e7
FB
4308 if (shift == 2) {
4309 s->rip_offset = 1;
4310 }
4eeb3939 4311 gen_lea_modrm(env, s, modrm);
2c0262af
FB
4312 opreg = OR_TMP0;
4313 } else {
14ce26e7 4314 opreg = (modrm & 7) | REX_B(s);
2c0262af
FB
4315 }
4316
4317 /* simpler op */
4318 if (shift == 0) {
4319 gen_shift(s, op, ot, opreg, OR_ECX);
4320 } else {
4321 if (shift == 2) {
e3af7c78 4322 shift = x86_ldub_code(env, s);
2c0262af
FB
4323 }
4324 gen_shifti(s, op, ot, opreg, shift);
4325 }
4326 }
4327 break;
4328 case 0xd0:
4329 case 0xd1:
4330 /* shift Ev,1 */
4331 shift = 1;
4332 goto grp2;
4333 case 0xd2:
4334 case 0xd3:
4335 /* shift Ev,cl */
4336 shift = 0;
4337 goto grp2;
4338
4339 case 0x1a4: /* shld imm */
4340 op = 0;
4341 shift = 1;
4342 goto do_shiftd;
4343 case 0x1a5: /* shld cl */
4344 op = 0;
4345 shift = 0;
4346 goto do_shiftd;
4347 case 0x1ac: /* shrd imm */
4348 op = 1;
4349 shift = 1;
4350 goto do_shiftd;
4351 case 0x1ad: /* shrd cl */
4352 op = 1;
4353 shift = 0;
4354 do_shiftd:
ab4e4aec 4355 ot = dflag;
e3af7c78 4356 modrm = x86_ldub_code(env, s);
2c0262af 4357 mod = (modrm >> 6) & 3;
14ce26e7 4358 rm = (modrm & 7) | REX_B(s);
bbdb4237 4359 reg = ((modrm >> 3) & 7) | REX_R(s);
2c0262af 4360 if (mod != 3) {
4eeb3939 4361 gen_lea_modrm(env, s, modrm);
b6abf97d 4362 opreg = OR_TMP0;
2c0262af 4363 } else {
b6abf97d 4364 opreg = rm;
2c0262af 4365 }
1dbe15ef 4366 gen_op_mov_v_reg(s, ot, s->T1, reg);
3b46e624 4367
2c0262af 4368 if (shift) {
3df11bb1 4369 TCGv imm = tcg_constant_tl(x86_ldub_code(env, s));
3b9d3cf1 4370 gen_shiftd_rm_T1(s, ot, opreg, op, imm);
2c0262af 4371 } else {
3b9d3cf1 4372 gen_shiftd_rm_T1(s, ot, opreg, op, cpu_regs[R_ECX]);
2c0262af
FB
4373 }
4374 break;
4375
4376 /************************/
4377 /* floats */
5fafdf24 4378 case 0xd8 ... 0xdf:
505910a6 4379 {
84abdd7d
ZK
4380 bool update_fip = true;
4381
505910a6
ZK
4382 if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
4383 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
4384 /* XXX: what to do if illegal op ? */
52236550 4385 gen_exception(s, EXCP07_PREX);
505910a6
ZK
4386 break;
4387 }
4388 modrm = x86_ldub_code(env, s);
4389 mod = (modrm >> 6) & 3;
4390 rm = modrm & 7;
4391 op = ((b & 7) << 3) | ((modrm >> 3) & 7);
4392 if (mod != 3) {
4393 /* memory op */
84abdd7d 4394 AddressParts a = gen_lea_modrm_0(env, s, modrm);
20581aad 4395 TCGv ea = gen_lea_modrm_1(s, a, false);
84abdd7d
ZK
4396 TCGv last_addr = tcg_temp_new();
4397 bool update_fdp = true;
4398
4399 tcg_gen_mov_tl(last_addr, ea);
4400 gen_lea_v_seg(s, s->aflag, ea, a.def_seg, s->override);
4401
505910a6
ZK
4402 switch (op) {
4403 case 0x00 ... 0x07: /* fxxxs */
4404 case 0x10 ... 0x17: /* fixxxl */
4405 case 0x20 ... 0x27: /* fxxxl */
4406 case 0x30 ... 0x37: /* fixxx */
4407 {
4408 int op1;
4409 op1 = op & 7;
4410
4411 switch (op >> 4) {
4412 case 0:
4413 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4414 s->mem_index, MO_LEUL);
ad75a51e 4415 gen_helper_flds_FT0(tcg_env, s->tmp2_i32);
505910a6
ZK
4416 break;
4417 case 1:
4418 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4419 s->mem_index, MO_LEUL);
ad75a51e 4420 gen_helper_fildl_FT0(tcg_env, s->tmp2_i32);
505910a6
ZK
4421 break;
4422 case 2:
4423 tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
fc313c64 4424 s->mem_index, MO_LEUQ);
ad75a51e 4425 gen_helper_fldl_FT0(tcg_env, s->tmp1_i64);
505910a6
ZK
4426 break;
4427 case 3:
4428 default:
4429 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4430 s->mem_index, MO_LESW);
ad75a51e 4431 gen_helper_fildl_FT0(tcg_env, s->tmp2_i32);
505910a6
ZK
4432 break;
4433 }
2c0262af 4434
505910a6
ZK
4435 gen_helper_fp_arith_ST0_FT0(op1);
4436 if (op1 == 3) {
4437 /* fcomp needs pop */
ad75a51e 4438 gen_helper_fpop(tcg_env);
505910a6
ZK
4439 }
4440 }
4441 break;
4442 case 0x08: /* flds */
4443 case 0x0a: /* fsts */
4444 case 0x0b: /* fstps */
4445 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
4446 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
4447 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
4448 switch (op & 7) {
2c0262af 4449 case 0:
505910a6
ZK
4450 switch (op >> 4) {
4451 case 0:
4452 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4453 s->mem_index, MO_LEUL);
ad75a51e 4454 gen_helper_flds_ST0(tcg_env, s->tmp2_i32);
505910a6
ZK
4455 break;
4456 case 1:
4457 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4458 s->mem_index, MO_LEUL);
ad75a51e 4459 gen_helper_fildl_ST0(tcg_env, s->tmp2_i32);
505910a6
ZK
4460 break;
4461 case 2:
4462 tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
fc313c64 4463 s->mem_index, MO_LEUQ);
ad75a51e 4464 gen_helper_fldl_ST0(tcg_env, s->tmp1_i64);
505910a6
ZK
4465 break;
4466 case 3:
4467 default:
4468 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4469 s->mem_index, MO_LESW);
ad75a51e 4470 gen_helper_fildl_ST0(tcg_env, s->tmp2_i32);
505910a6
ZK
4471 break;
4472 }
2c0262af
FB
4473 break;
4474 case 1:
505910a6
ZK
4475 /* XXX: the corresponding CPUID bit must be tested ! */
4476 switch (op >> 4) {
4477 case 1:
ad75a51e 4478 gen_helper_fisttl_ST0(s->tmp2_i32, tcg_env);
505910a6
ZK
4479 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4480 s->mem_index, MO_LEUL);
4481 break;
4482 case 2:
ad75a51e 4483 gen_helper_fisttll_ST0(s->tmp1_i64, tcg_env);
505910a6 4484 tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
fc313c64 4485 s->mem_index, MO_LEUQ);
505910a6
ZK
4486 break;
4487 case 3:
4488 default:
ad75a51e 4489 gen_helper_fistt_ST0(s->tmp2_i32, tcg_env);
505910a6
ZK
4490 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4491 s->mem_index, MO_LEUW);
4492 break;
4493 }
ad75a51e 4494 gen_helper_fpop(tcg_env);
2c0262af 4495 break;
2c0262af 4496 default:
505910a6
ZK
4497 switch (op >> 4) {
4498 case 0:
ad75a51e 4499 gen_helper_fsts_ST0(s->tmp2_i32, tcg_env);
505910a6
ZK
4500 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4501 s->mem_index, MO_LEUL);
4502 break;
4503 case 1:
ad75a51e 4504 gen_helper_fistl_ST0(s->tmp2_i32, tcg_env);
505910a6
ZK
4505 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4506 s->mem_index, MO_LEUL);
4507 break;
4508 case 2:
ad75a51e 4509 gen_helper_fstl_ST0(s->tmp1_i64, tcg_env);
505910a6 4510 tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
fc313c64 4511 s->mem_index, MO_LEUQ);
505910a6
ZK
4512 break;
4513 case 3:
4514 default:
ad75a51e 4515 gen_helper_fist_ST0(s->tmp2_i32, tcg_env);
505910a6
ZK
4516 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4517 s->mem_index, MO_LEUW);
4518 break;
4519 }
4520 if ((op & 7) == 3) {
ad75a51e 4521 gen_helper_fpop(tcg_env);
505910a6 4522 }
2c0262af
FB
4523 break;
4524 }
505910a6
ZK
4525 break;
4526 case 0x0c: /* fldenv mem */
ad75a51e 4527 gen_helper_fldenv(tcg_env, s->A0,
3df11bb1 4528 tcg_constant_i32(dflag - 1));
84abdd7d 4529 update_fip = update_fdp = false;
505910a6
ZK
4530 break;
4531 case 0x0d: /* fldcw mem */
4532 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4533 s->mem_index, MO_LEUW);
ad75a51e 4534 gen_helper_fldcw(tcg_env, s->tmp2_i32);
84abdd7d 4535 update_fip = update_fdp = false;
505910a6
ZK
4536 break;
4537 case 0x0e: /* fnstenv mem */
ad75a51e 4538 gen_helper_fstenv(tcg_env, s->A0,
3df11bb1 4539 tcg_constant_i32(dflag - 1));
84abdd7d 4540 update_fip = update_fdp = false;
505910a6
ZK
4541 break;
4542 case 0x0f: /* fnstcw mem */
ad75a51e 4543 gen_helper_fnstcw(s->tmp2_i32, tcg_env);
505910a6
ZK
4544 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4545 s->mem_index, MO_LEUW);
84abdd7d 4546 update_fip = update_fdp = false;
505910a6
ZK
4547 break;
4548 case 0x1d: /* fldt mem */
ad75a51e 4549 gen_helper_fldt_ST0(tcg_env, s->A0);
505910a6
ZK
4550 break;
4551 case 0x1f: /* fstpt mem */
ad75a51e
RH
4552 gen_helper_fstt_ST0(tcg_env, s->A0);
4553 gen_helper_fpop(tcg_env);
505910a6
ZK
4554 break;
4555 case 0x2c: /* frstor mem */
ad75a51e 4556 gen_helper_frstor(tcg_env, s->A0,
3df11bb1 4557 tcg_constant_i32(dflag - 1));
84abdd7d 4558 update_fip = update_fdp = false;
505910a6
ZK
4559 break;
4560 case 0x2e: /* fnsave mem */
ad75a51e 4561 gen_helper_fsave(tcg_env, s->A0,
3df11bb1 4562 tcg_constant_i32(dflag - 1));
84abdd7d 4563 update_fip = update_fdp = false;
505910a6
ZK
4564 break;
4565 case 0x2f: /* fnstsw mem */
ad75a51e 4566 gen_helper_fnstsw(s->tmp2_i32, tcg_env);
505910a6
ZK
4567 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4568 s->mem_index, MO_LEUW);
84abdd7d 4569 update_fip = update_fdp = false;
505910a6
ZK
4570 break;
4571 case 0x3c: /* fbld */
ad75a51e 4572 gen_helper_fbld_ST0(tcg_env, s->A0);
505910a6
ZK
4573 break;
4574 case 0x3e: /* fbstp */
ad75a51e
RH
4575 gen_helper_fbst_ST0(tcg_env, s->A0);
4576 gen_helper_fpop(tcg_env);
505910a6
ZK
4577 break;
4578 case 0x3d: /* fildll */
4579 tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
fc313c64 4580 s->mem_index, MO_LEUQ);
ad75a51e 4581 gen_helper_fildll_ST0(tcg_env, s->tmp1_i64);
505910a6
ZK
4582 break;
4583 case 0x3f: /* fistpll */
ad75a51e 4584 gen_helper_fistll_ST0(s->tmp1_i64, tcg_env);
505910a6 4585 tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
fc313c64 4586 s->mem_index, MO_LEUQ);
ad75a51e 4587 gen_helper_fpop(tcg_env);
505910a6
ZK
4588 break;
4589 default:
4590 goto unknown_op;
4591 }
84abdd7d
ZK
4592
4593 if (update_fdp) {
4594 int last_seg = s->override >= 0 ? s->override : a.def_seg;
4595
ad75a51e 4596 tcg_gen_ld_i32(s->tmp2_i32, tcg_env,
84abdd7d
ZK
4597 offsetof(CPUX86State,
4598 segs[last_seg].selector));
ad75a51e 4599 tcg_gen_st16_i32(s->tmp2_i32, tcg_env,
84abdd7d 4600 offsetof(CPUX86State, fpds));
ad75a51e 4601 tcg_gen_st_tl(last_addr, tcg_env,
84abdd7d
ZK
4602 offsetof(CPUX86State, fpdp));
4603 }
505910a6
ZK
4604 } else {
4605 /* register float ops */
4606 opreg = rm;
3b46e624 4607
505910a6
ZK
4608 switch (op) {
4609 case 0x08: /* fld sti */
ad75a51e
RH
4610 gen_helper_fpush(tcg_env);
4611 gen_helper_fmov_ST0_STN(tcg_env,
3df11bb1 4612 tcg_constant_i32((opreg + 1) & 7));
505910a6
ZK
4613 break;
4614 case 0x09: /* fxchg sti */
4615 case 0x29: /* fxchg4 sti, undocumented op */
4616 case 0x39: /* fxchg7 sti, undocumented op */
ad75a51e 4617 gen_helper_fxchg_ST0_STN(tcg_env, tcg_constant_i32(opreg));
505910a6
ZK
4618 break;
4619 case 0x0a: /* grp d9/2 */
4620 switch (rm) {
4621 case 0: /* fnop */
c1f27a0c
PB
4622 /*
4623 * check exceptions (FreeBSD FPU probe)
4624 * needs to be treated as I/O because of ferr_irq
4625 */
4626 translator_io_start(&s->base);
ad75a51e 4627 gen_helper_fwait(tcg_env);
84abdd7d 4628 update_fip = false;
505910a6
ZK
4629 break;
4630 default:
4631 goto unknown_op;
2c0262af 4632 }
505910a6
ZK
4633 break;
4634 case 0x0c: /* grp d9/4 */
4635 switch (rm) {
4636 case 0: /* fchs */
ad75a51e 4637 gen_helper_fchs_ST0(tcg_env);
2c0262af 4638 break;
505910a6 4639 case 1: /* fabs */
ad75a51e 4640 gen_helper_fabs_ST0(tcg_env);
2c0262af 4641 break;
505910a6 4642 case 4: /* ftst */
ad75a51e
RH
4643 gen_helper_fldz_FT0(tcg_env);
4644 gen_helper_fcom_ST0_FT0(tcg_env);
2c0262af 4645 break;
505910a6 4646 case 5: /* fxam */
ad75a51e 4647 gen_helper_fxam_ST0(tcg_env);
2c0262af 4648 break;
505910a6
ZK
4649 default:
4650 goto unknown_op;
2c0262af
FB
4651 }
4652 break;
505910a6
ZK
4653 case 0x0d: /* grp d9/5 */
4654 {
4655 switch (rm) {
4656 case 0:
ad75a51e
RH
4657 gen_helper_fpush(tcg_env);
4658 gen_helper_fld1_ST0(tcg_env);
505910a6
ZK
4659 break;
4660 case 1:
ad75a51e
RH
4661 gen_helper_fpush(tcg_env);
4662 gen_helper_fldl2t_ST0(tcg_env);
505910a6
ZK
4663 break;
4664 case 2:
ad75a51e
RH
4665 gen_helper_fpush(tcg_env);
4666 gen_helper_fldl2e_ST0(tcg_env);
505910a6
ZK
4667 break;
4668 case 3:
ad75a51e
RH
4669 gen_helper_fpush(tcg_env);
4670 gen_helper_fldpi_ST0(tcg_env);
505910a6
ZK
4671 break;
4672 case 4:
ad75a51e
RH
4673 gen_helper_fpush(tcg_env);
4674 gen_helper_fldlg2_ST0(tcg_env);
505910a6
ZK
4675 break;
4676 case 5:
ad75a51e
RH
4677 gen_helper_fpush(tcg_env);
4678 gen_helper_fldln2_ST0(tcg_env);
505910a6
ZK
4679 break;
4680 case 6:
ad75a51e
RH
4681 gen_helper_fpush(tcg_env);
4682 gen_helper_fldz_ST0(tcg_env);
505910a6
ZK
4683 break;
4684 default:
4685 goto unknown_op;
4686 }
4687 }
4688 break;
4689 case 0x0e: /* grp d9/6 */
4690 switch (rm) {
4691 case 0: /* f2xm1 */
ad75a51e 4692 gen_helper_f2xm1(tcg_env);
505910a6
ZK
4693 break;
4694 case 1: /* fyl2x */
ad75a51e 4695 gen_helper_fyl2x(tcg_env);
505910a6
ZK
4696 break;
4697 case 2: /* fptan */
ad75a51e 4698 gen_helper_fptan(tcg_env);
505910a6
ZK
4699 break;
4700 case 3: /* fpatan */
ad75a51e 4701 gen_helper_fpatan(tcg_env);
465e9838 4702 break;
505910a6 4703 case 4: /* fxtract */
ad75a51e 4704 gen_helper_fxtract(tcg_env);
505910a6
ZK
4705 break;
4706 case 5: /* fprem1 */
ad75a51e 4707 gen_helper_fprem1(tcg_env);
505910a6
ZK
4708 break;
4709 case 6: /* fdecstp */
ad75a51e 4710 gen_helper_fdecstp(tcg_env);
465e9838 4711 break;
465e9838 4712 default:
505910a6 4713 case 7: /* fincstp */
ad75a51e 4714 gen_helper_fincstp(tcg_env);
19e6c4b8 4715 break;
465e9838 4716 }
465e9838 4717 break;
505910a6
ZK
4718 case 0x0f: /* grp d9/7 */
4719 switch (rm) {
4720 case 0: /* fprem */
ad75a51e 4721 gen_helper_fprem(tcg_env);
2c0262af 4722 break;
505910a6 4723 case 1: /* fyl2xp1 */
ad75a51e 4724 gen_helper_fyl2xp1(tcg_env);
505910a6
ZK
4725 break;
4726 case 2: /* fsqrt */
ad75a51e 4727 gen_helper_fsqrt(tcg_env);
505910a6
ZK
4728 break;
4729 case 3: /* fsincos */
ad75a51e 4730 gen_helper_fsincos(tcg_env);
505910a6
ZK
4731 break;
4732 case 5: /* fscale */
ad75a51e 4733 gen_helper_fscale(tcg_env);
505910a6
ZK
4734 break;
4735 case 4: /* frndint */
ad75a51e 4736 gen_helper_frndint(tcg_env);
2c0262af 4737 break;
505910a6 4738 case 6: /* fsin */
ad75a51e 4739 gen_helper_fsin(tcg_env);
2c0262af 4740 break;
2c0262af 4741 default:
505910a6 4742 case 7: /* fcos */
ad75a51e 4743 gen_helper_fcos(tcg_env);
2c0262af
FB
4744 break;
4745 }
2c0262af 4746 break;
505910a6
ZK
4747 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
4748 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
4749 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
4750 {
4751 int op1;
4752
4753 op1 = op & 7;
4754 if (op >= 0x20) {
4755 gen_helper_fp_arith_STN_ST0(op1, opreg);
4756 if (op >= 0x30) {
ad75a51e 4757 gen_helper_fpop(tcg_env);
505910a6
ZK
4758 }
4759 } else {
ad75a51e 4760 gen_helper_fmov_FT0_STN(tcg_env,
3df11bb1 4761 tcg_constant_i32(opreg));
505910a6
ZK
4762 gen_helper_fp_arith_ST0_FT0(op1);
4763 }
4764 }
2c0262af 4765 break;
505910a6
ZK
4766 case 0x02: /* fcom */
4767 case 0x22: /* fcom2, undocumented op */
ad75a51e
RH
4768 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
4769 gen_helper_fcom_ST0_FT0(tcg_env);
2c0262af 4770 break;
505910a6
ZK
4771 case 0x03: /* fcomp */
4772 case 0x23: /* fcomp3, undocumented op */
4773 case 0x32: /* fcomp5, undocumented op */
ad75a51e
RH
4774 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
4775 gen_helper_fcom_ST0_FT0(tcg_env);
4776 gen_helper_fpop(tcg_env);
2c0262af 4777 break;
505910a6
ZK
4778 case 0x15: /* da/5 */
4779 switch (rm) {
4780 case 1: /* fucompp */
ad75a51e
RH
4781 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(1));
4782 gen_helper_fucom_ST0_FT0(tcg_env);
4783 gen_helper_fpop(tcg_env);
4784 gen_helper_fpop(tcg_env);
2c0262af 4785 break;
505910a6
ZK
4786 default:
4787 goto unknown_op;
4788 }
4789 break;
4790 case 0x1c:
4791 switch (rm) {
4792 case 0: /* feni (287 only, just do nop here) */
2c0262af 4793 break;
505910a6 4794 case 1: /* fdisi (287 only, just do nop here) */
2c0262af 4795 break;
505910a6 4796 case 2: /* fclex */
ad75a51e 4797 gen_helper_fclex(tcg_env);
84abdd7d 4798 update_fip = false;
2c0262af 4799 break;
505910a6 4800 case 3: /* fninit */
ad75a51e 4801 gen_helper_fninit(tcg_env);
84abdd7d 4802 update_fip = false;
2c0262af 4803 break;
505910a6 4804 case 4: /* fsetpm (287 only, just do nop here) */
2c0262af
FB
4805 break;
4806 default:
b9f9c5b4 4807 goto unknown_op;
2c0262af 4808 }
2c0262af 4809 break;
505910a6
ZK
4810 case 0x1d: /* fucomi */
4811 if (!(s->cpuid_features & CPUID_CMOV)) {
4812 goto illegal_op;
4813 }
4814 gen_update_cc_op(s);
ad75a51e
RH
4815 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
4816 gen_helper_fucomi_ST0_FT0(tcg_env);
505910a6 4817 set_cc_op(s, CC_OP_EFLAGS);
2c0262af 4818 break;
505910a6
ZK
4819 case 0x1e: /* fcomi */
4820 if (!(s->cpuid_features & CPUID_CMOV)) {
4821 goto illegal_op;
4822 }
4823 gen_update_cc_op(s);
ad75a51e
RH
4824 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
4825 gen_helper_fcomi_ST0_FT0(tcg_env);
505910a6 4826 set_cc_op(s, CC_OP_EFLAGS);
2c0262af 4827 break;
505910a6 4828 case 0x28: /* ffree sti */
ad75a51e 4829 gen_helper_ffree_STN(tcg_env, tcg_constant_i32(opreg));
2c0262af 4830 break;
505910a6 4831 case 0x2a: /* fst sti */
ad75a51e 4832 gen_helper_fmov_STN_ST0(tcg_env, tcg_constant_i32(opreg));
2c0262af 4833 break;
505910a6
ZK
4834 case 0x2b: /* fstp sti */
4835 case 0x0b: /* fstp1 sti, undocumented op */
4836 case 0x3a: /* fstp8 sti, undocumented op */
4837 case 0x3b: /* fstp9 sti, undocumented op */
ad75a51e
RH
4838 gen_helper_fmov_STN_ST0(tcg_env, tcg_constant_i32(opreg));
4839 gen_helper_fpop(tcg_env);
2c0262af 4840 break;
505910a6 4841 case 0x2c: /* fucom st(i) */
ad75a51e
RH
4842 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
4843 gen_helper_fucom_ST0_FT0(tcg_env);
2c0262af 4844 break;
505910a6 4845 case 0x2d: /* fucomp st(i) */
ad75a51e
RH
4846 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
4847 gen_helper_fucom_ST0_FT0(tcg_env);
4848 gen_helper_fpop(tcg_env);
2c0262af 4849 break;
505910a6
ZK
4850 case 0x33: /* de/3 */
4851 switch (rm) {
4852 case 1: /* fcompp */
ad75a51e
RH
4853 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(1));
4854 gen_helper_fcom_ST0_FT0(tcg_env);
4855 gen_helper_fpop(tcg_env);
4856 gen_helper_fpop(tcg_env);
505910a6
ZK
4857 break;
4858 default:
4859 goto unknown_op;
4860 }
2c0262af 4861 break;
505910a6 4862 case 0x38: /* ffreep sti, undocumented op */
ad75a51e
RH
4863 gen_helper_ffree_STN(tcg_env, tcg_constant_i32(opreg));
4864 gen_helper_fpop(tcg_env);
2c0262af 4865 break;
505910a6
ZK
4866 case 0x3c: /* df/4 */
4867 switch (rm) {
4868 case 0:
ad75a51e 4869 gen_helper_fnstsw(s->tmp2_i32, tcg_env);
505910a6
ZK
4870 tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
4871 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
4872 break;
4873 default:
4874 goto unknown_op;
4875 }
2c0262af 4876 break;
505910a6
ZK
4877 case 0x3d: /* fucomip */
4878 if (!(s->cpuid_features & CPUID_CMOV)) {
4879 goto illegal_op;
4880 }
4881 gen_update_cc_op(s);
ad75a51e
RH
4882 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
4883 gen_helper_fucomi_ST0_FT0(tcg_env);
4884 gen_helper_fpop(tcg_env);
505910a6
ZK
4885 set_cc_op(s, CC_OP_EFLAGS);
4886 break;
4887 case 0x3e: /* fcomip */
4888 if (!(s->cpuid_features & CPUID_CMOV)) {
4889 goto illegal_op;
4890 }
4891 gen_update_cc_op(s);
ad75a51e
RH
4892 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
4893 gen_helper_fcomi_ST0_FT0(tcg_env);
4894 gen_helper_fpop(tcg_env);
505910a6 4895 set_cc_op(s, CC_OP_EFLAGS);
2c0262af 4896 break;
505910a6
ZK
4897 case 0x10 ... 0x13: /* fcmovxx */
4898 case 0x18 ... 0x1b:
4899 {
4900 int op1;
4901 TCGLabel *l1;
4902 static const uint8_t fcmov_cc[8] = {
4903 (JCC_B << 1),
4904 (JCC_Z << 1),
4905 (JCC_BE << 1),
4906 (JCC_P << 1),
4907 };
4908
4909 if (!(s->cpuid_features & CPUID_CMOV)) {
4910 goto illegal_op;
4911 }
4912 op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
4913 l1 = gen_new_label();
4914 gen_jcc1_noeob(s, op1, l1);
ad75a51e 4915 gen_helper_fmov_ST0_STN(tcg_env,
3df11bb1 4916 tcg_constant_i32(opreg));
505910a6
ZK
4917 gen_set_label(l1);
4918 }
2c0262af
FB
4919 break;
4920 default:
b9f9c5b4 4921 goto unknown_op;
2c0262af 4922 }
2c0262af 4923 }
84abdd7d
ZK
4924
4925 if (update_fip) {
ad75a51e 4926 tcg_gen_ld_i32(s->tmp2_i32, tcg_env,
84abdd7d 4927 offsetof(CPUX86State, segs[R_CS].selector));
ad75a51e 4928 tcg_gen_st16_i32(s->tmp2_i32, tcg_env,
84abdd7d 4929 offsetof(CPUX86State, fpcs));
75ec746a 4930 tcg_gen_st_tl(eip_cur_tl(s),
ad75a51e 4931 tcg_env, offsetof(CPUX86State, fpip));
84abdd7d 4932 }
2c0262af
FB
4933 }
4934 break;
4935 /************************/
4936 /* string ops */
4937
4938 case 0xa4: /* movsS */
4939 case 0xa5:
ab4e4aec 4940 ot = mo_b_d(b, dflag);
2c0262af 4941 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
122e6d7b 4942 gen_repz_movs(s, ot);
2c0262af
FB
4943 } else {
4944 gen_movs(s, ot);
4945 }
4946 break;
3b46e624 4947
2c0262af
FB
4948 case 0xaa: /* stosS */
4949 case 0xab:
ab4e4aec 4950 ot = mo_b_d(b, dflag);
3497f164 4951 gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX);
2c0262af 4952 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
122e6d7b 4953 gen_repz_stos(s, ot);
2c0262af
FB
4954 } else {
4955 gen_stos(s, ot);
4956 }
4957 break;
4958 case 0xac: /* lodsS */
4959 case 0xad:
ab4e4aec 4960 ot = mo_b_d(b, dflag);
2c0262af 4961 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
122e6d7b 4962 gen_repz_lods(s, ot);
2c0262af
FB
4963 } else {
4964 gen_lods(s, ot);
4965 }
4966 break;
4967 case 0xae: /* scasS */
4968 case 0xaf:
ab4e4aec 4969 ot = mo_b_d(b, dflag);
3497f164 4970 gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX);
2c0262af 4971 if (prefixes & PREFIX_REPNZ) {
122e6d7b 4972 gen_repz_scas(s, ot, 1);
2c0262af 4973 } else if (prefixes & PREFIX_REPZ) {
122e6d7b 4974 gen_repz_scas(s, ot, 0);
2c0262af
FB
4975 } else {
4976 gen_scas(s, ot);
2c0262af
FB
4977 }
4978 break;
4979
4980 case 0xa6: /* cmpsS */
4981 case 0xa7:
ab4e4aec 4982 ot = mo_b_d(b, dflag);
2c0262af 4983 if (prefixes & PREFIX_REPNZ) {
122e6d7b 4984 gen_repz_cmps(s, ot, 1);
2c0262af 4985 } else if (prefixes & PREFIX_REPZ) {
122e6d7b 4986 gen_repz_cmps(s, ot, 0);
2c0262af
FB
4987 } else {
4988 gen_cmps(s, ot);
2c0262af
FB
4989 }
4990 break;
4991 case 0x6c: /* insS */
4992 case 0x6d:
ab4e4aec 4993 ot = mo_b_d32(b, dflag);
1bca40fe
RH
4994 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
4995 tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32);
4996 if (!gen_check_io(s, ot, s->tmp2_i32,
4997 SVM_IOIO_TYPE_MASK | SVM_IOIO_STR_MASK)) {
bc2e436d
RH
4998 break;
4999 }
dfd1b812 5000 translator_io_start(&s->base);
f115e911 5001 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
122e6d7b 5002 gen_repz_ins(s, ot);
2c0262af 5003 } else {
f115e911 5004 gen_ins(s, ot);
2c0262af
FB
5005 }
5006 break;
5007 case 0x6e: /* outsS */
5008 case 0x6f:
ab4e4aec 5009 ot = mo_b_d32(b, dflag);
1bca40fe
RH
5010 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
5011 tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32);
5012 if (!gen_check_io(s, ot, s->tmp2_i32, SVM_IOIO_STR_MASK)) {
bc2e436d
RH
5013 break;
5014 }
dfd1b812 5015 translator_io_start(&s->base);
f115e911 5016 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
122e6d7b 5017 gen_repz_outs(s, ot);
2c0262af 5018 } else {
f115e911 5019 gen_outs(s, ot);
2c0262af
FB
5020 }
5021 break;
5022
5023 /************************/
5024 /* port I/O */
0573fbfc 5025
2c0262af
FB
5026 case 0xe4:
5027 case 0xe5:
ab4e4aec 5028 ot = mo_b_d32(b, dflag);
e3af7c78 5029 val = x86_ldub_code(env, s);
1bca40fe
RH
5030 tcg_gen_movi_i32(s->tmp2_i32, val);
5031 if (!gen_check_io(s, ot, s->tmp2_i32, SVM_IOIO_TYPE_MASK)) {
bc2e436d
RH
5032 break;
5033 }
dfd1b812 5034 translator_io_start(&s->base);
6bd48f6f 5035 gen_helper_in_func(ot, s->T1, s->tmp2_i32);
1dbe15ef 5036 gen_op_mov_reg_v(s, ot, R_EAX, s->T1);
6bd48f6f 5037 gen_bpt_io(s, s->tmp2_i32, ot);
2c0262af
FB
5038 break;
5039 case 0xe6:
5040 case 0xe7:
ab4e4aec 5041 ot = mo_b_d32(b, dflag);
e3af7c78 5042 val = x86_ldub_code(env, s);
1bca40fe
RH
5043 tcg_gen_movi_i32(s->tmp2_i32, val);
5044 if (!gen_check_io(s, ot, s->tmp2_i32, 0)) {
bc2e436d
RH
5045 break;
5046 }
dfd1b812 5047 translator_io_start(&s->base);
1bca40fe 5048 gen_op_mov_v_reg(s, ot, s->T1, R_EAX);
4f82446d
EC
5049 tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
5050 gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32);
6bd48f6f 5051 gen_bpt_io(s, s->tmp2_i32, ot);
2c0262af
FB
5052 break;
5053 case 0xec:
5054 case 0xed:
ab4e4aec 5055 ot = mo_b_d32(b, dflag);
1bca40fe
RH
5056 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
5057 tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32);
5058 if (!gen_check_io(s, ot, s->tmp2_i32, SVM_IOIO_TYPE_MASK)) {
bc2e436d
RH
5059 break;
5060 }
dfd1b812 5061 translator_io_start(&s->base);
6bd48f6f 5062 gen_helper_in_func(ot, s->T1, s->tmp2_i32);
1dbe15ef 5063 gen_op_mov_reg_v(s, ot, R_EAX, s->T1);
6bd48f6f 5064 gen_bpt_io(s, s->tmp2_i32, ot);
2c0262af
FB
5065 break;
5066 case 0xee:
5067 case 0xef:
ab4e4aec 5068 ot = mo_b_d32(b, dflag);
1bca40fe
RH
5069 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
5070 tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32);
5071 if (!gen_check_io(s, ot, s->tmp2_i32, 0)) {
bc2e436d
RH
5072 break;
5073 }
dfd1b812 5074 translator_io_start(&s->base);
1bca40fe 5075 gen_op_mov_v_reg(s, ot, s->T1, R_EAX);
4f82446d
EC
5076 tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
5077 gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32);
6bd48f6f 5078 gen_bpt_io(s, s->tmp2_i32, ot);
2c0262af
FB
5079 break;
5080
5081 /************************/
5082 /* control */
5083 case 0xc2: /* ret im */
e3af7c78 5084 val = x86_ldsw_code(env, s);
8e31d234
RH
5085 ot = gen_pop_T0(s);
5086 gen_stack_update(s, val + (1 << ot));
5087 /* Note that gen_pop_T0 uses a zero-extending load. */
e3a79e0e 5088 gen_op_jmp_v(s, s->T0);
7d117ce8 5089 gen_bnd_jmp(s);
faf9ea5f 5090 s->base.is_jmp = DISAS_JUMP;
2c0262af
FB
5091 break;
5092 case 0xc3: /* ret */
8e31d234
RH
5093 ot = gen_pop_T0(s);
5094 gen_pop_update(s, ot);
5095 /* Note that gen_pop_T0 uses a zero-extending load. */
e3a79e0e 5096 gen_op_jmp_v(s, s->T0);
7d117ce8 5097 gen_bnd_jmp(s);
faf9ea5f 5098 s->base.is_jmp = DISAS_JUMP;
2c0262af
FB
5099 break;
5100 case 0xca: /* lret im */
e3af7c78 5101 val = x86_ldsw_code(env, s);
2c0262af 5102 do_lret:
f8a35846 5103 if (PE(s) && !VM86(s)) {
773cdfcc 5104 gen_update_cc_op(s);
65e4af23 5105 gen_update_eip_cur(s);
ad75a51e 5106 gen_helper_lret_protected(tcg_env, tcg_constant_i32(dflag - 1),
3df11bb1 5107 tcg_constant_i32(val));
2c0262af
FB
5108 } else {
5109 gen_stack_A0(s);
5110 /* pop offset */
c66f9727 5111 gen_op_ld_v(s, dflag, s->T0, s->A0);
2c0262af
FB
5112 /* NOTE: keeping EIP updated is not a problem in case of
5113 exception */
e3a79e0e 5114 gen_op_jmp_v(s, s->T0);
2c0262af 5115 /* pop selector */
4e85057b 5116 gen_add_A0_im(s, 1 << dflag);
c66f9727
EC
5117 gen_op_ld_v(s, dflag, s->T0, s->A0);
5118 gen_op_movl_seg_T0_vm(s, R_CS);
2c0262af 5119 /* add stack offset */
ab4e4aec 5120 gen_stack_update(s, val + (2 << dflag));
2c0262af 5121 }
6424ac8e 5122 s->base.is_jmp = DISAS_EOB_ONLY;
2c0262af
FB
5123 break;
5124 case 0xcb: /* lret */
5125 val = 0;
5126 goto do_lret;
5127 case 0xcf: /* iret */
b53605db 5128 gen_svm_check_intercept(s, SVM_EXIT_IRET);
f8a35846 5129 if (!PE(s) || VM86(s)) {
e048f3d6 5130 /* real mode or vm86 mode */
aa9f21b1 5131 if (!check_vm86_iopl(s)) {
e048f3d6 5132 break;
f115e911 5133 }
ad75a51e 5134 gen_helper_iret_real(tcg_env, tcg_constant_i32(dflag - 1));
2c0262af 5135 } else {
ad75a51e 5136 gen_helper_iret_protected(tcg_env, tcg_constant_i32(dflag - 1),
9e599bf7 5137 eip_next_i32(s));
2c0262af 5138 }
e048f3d6 5139 set_cc_op(s, CC_OP_EFLAGS);
6424ac8e 5140 s->base.is_jmp = DISAS_EOB_ONLY;
2c0262af
FB
5141 break;
5142 case 0xe8: /* call im */
5143 {
8760ded6
RH
5144 int diff = (dflag != MO_16
5145 ? (int32_t)insn_get(env, s, MO_32)
5146 : (int16_t)insn_get(env, s, MO_16));
9e599bf7 5147 gen_push_v(s, eip_next_tl(s));
7d117ce8 5148 gen_bnd_jmp(s);
8760ded6 5149 gen_jmp_rel(s, dflag, diff, 0);
2c0262af
FB
5150 }
5151 break;
5152 case 0x9a: /* lcall im */
5153 {
5154 unsigned int selector, offset;
3b46e624 5155
14ce26e7
FB
5156 if (CODE64(s))
5157 goto illegal_op;
ab4e4aec 5158 ot = dflag;
0af10c86 5159 offset = insn_get(env, s, ot);
4ba9938c 5160 selector = insn_get(env, s, MO_16);
3b46e624 5161
c66f9727 5162 tcg_gen_movi_tl(s->T0, selector);
b48597b0 5163 tcg_gen_movi_tl(s->T1, offset);
2c0262af
FB
5164 }
5165 goto do_lcall;
ecada8a2 5166 case 0xe9: /* jmp im */
8760ded6
RH
5167 {
5168 int diff = (dflag != MO_16
5169 ? (int32_t)insn_get(env, s, MO_32)
5170 : (int16_t)insn_get(env, s, MO_16));
5171 gen_bnd_jmp(s);
5172 gen_jmp_rel(s, dflag, diff, 0);
ab4e4aec 5173 }
2c0262af
FB
5174 break;
5175 case 0xea: /* ljmp im */
5176 {
5177 unsigned int selector, offset;
5178
14ce26e7
FB
5179 if (CODE64(s))
5180 goto illegal_op;
ab4e4aec 5181 ot = dflag;
0af10c86 5182 offset = insn_get(env, s, ot);
4ba9938c 5183 selector = insn_get(env, s, MO_16);
3b46e624 5184
c66f9727 5185 tcg_gen_movi_tl(s->T0, selector);
b48597b0 5186 tcg_gen_movi_tl(s->T1, offset);
2c0262af
FB
5187 }
5188 goto do_ljmp;
5189 case 0xeb: /* jmp Jb */
8760ded6
RH
5190 {
5191 int diff = (int8_t)insn_get(env, s, MO_8);
5192 gen_jmp_rel(s, dflag, diff, 0);
ab4e4aec 5193 }
2c0262af
FB
5194 break;
5195 case 0x70 ... 0x7f: /* jcc Jb */
54b191de
RH
5196 {
5197 int diff = (int8_t)insn_get(env, s, MO_8);
5198 gen_bnd_jmp(s);
5199 gen_jcc(s, b, diff);
2c0262af 5200 }
54b191de
RH
5201 break;
5202 case 0x180 ... 0x18f: /* jcc Jv */
5203 {
5204 int diff = (dflag != MO_16
5205 ? (int32_t)insn_get(env, s, MO_32)
5206 : (int16_t)insn_get(env, s, MO_16));
5207 gen_bnd_jmp(s);
5208 gen_jcc(s, b, diff);
ab4e4aec 5209 }
2c0262af
FB
5210 break;
5211
5212 case 0x190 ... 0x19f: /* setcc Gv */
e3af7c78 5213 modrm = x86_ldub_code(env, s);
c66f9727 5214 gen_setcc1(s, b, s->T0);
4ba9938c 5215 gen_ldst_modrm(env, s, modrm, MO_8, OR_TMP0, 1);
2c0262af
FB
5216 break;
5217 case 0x140 ... 0x14f: /* cmov Gv, Ev */
bff93281
PM
5218 if (!(s->cpuid_features & CPUID_CMOV)) {
5219 goto illegal_op;
5220 }
ab4e4aec 5221 ot = dflag;
e3af7c78 5222 modrm = x86_ldub_code(env, s);
bbdb4237 5223 reg = ((modrm >> 3) & 7) | REX_R(s);
d4f61171
PB
5224 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
5225 gen_cmovcc1(s, b ^ 1, s->T0, cpu_regs[reg]);
5226 gen_op_mov_reg_v(s, ot, reg, s->T0);
2c0262af 5227 break;
3b46e624 5228
2c0262af
FB
5229 /************************/
5230 /* flags */
5231 case 0x9c: /* pushf */
b53605db 5232 gen_svm_check_intercept(s, SVM_EXIT_PUSHF);
aa9f21b1 5233 if (check_vm86_iopl(s)) {
773cdfcc 5234 gen_update_cc_op(s);
ad75a51e 5235 gen_helper_read_eflags(s->T0, tcg_env);
c66f9727 5236 gen_push_v(s, s->T0);
2c0262af
FB
5237 }
5238 break;
5239 case 0x9d: /* popf */
b53605db 5240 gen_svm_check_intercept(s, SVM_EXIT_POPF);
aa9f21b1 5241 if (check_vm86_iopl(s)) {
3e7da311
RH
5242 int mask = TF_MASK | AC_MASK | ID_MASK | NT_MASK;
5243
01b9d8c1 5244 if (CPL(s) == 0) {
3e7da311
RH
5245 mask |= IF_MASK | IOPL_MASK;
5246 } else if (CPL(s) <= IOPL(s)) {
5247 mask |= IF_MASK;
5248 }
5249 if (dflag == MO_16) {
5250 mask &= 0xffff;
2c0262af 5251 }
3e7da311
RH
5252
5253 ot = gen_pop_T0(s);
ad75a51e 5254 gen_helper_write_eflags(tcg_env, s->T0, tcg_constant_i32(mask));
8e31d234 5255 gen_pop_update(s, ot);
3ca51d07 5256 set_cc_op(s, CC_OP_EFLAGS);
a9321a4d 5257 /* abort translation because TF/AC flag may change */
634a4051 5258 s->base.is_jmp = DISAS_EOB_NEXT;
2c0262af
FB
5259 }
5260 break;
5261 case 0x9e: /* sahf */
12e26b75 5262 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
14ce26e7 5263 goto illegal_op;
35d95e41 5264 tcg_gen_shri_tl(s->T0, cpu_regs[R_EAX], 8);
d229edce 5265 gen_compute_eflags(s);
bd7a7b33 5266 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O);
c66f9727
EC
5267 tcg_gen_andi_tl(s->T0, s->T0, CC_S | CC_Z | CC_A | CC_P | CC_C);
5268 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, s->T0);
2c0262af
FB
5269 break;
5270 case 0x9f: /* lahf */
12e26b75 5271 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
14ce26e7 5272 goto illegal_op;
d229edce 5273 gen_compute_eflags(s);
bd7a7b33 5274 /* Note: gen_compute_eflags() only gives the condition codes */
c66f9727 5275 tcg_gen_ori_tl(s->T0, cpu_cc_src, 0x02);
35d95e41 5276 tcg_gen_deposit_tl(cpu_regs[R_EAX], cpu_regs[R_EAX], s->T0, 8, 8);
2c0262af
FB
5277 break;
5278 case 0xf5: /* cmc */
d229edce 5279 gen_compute_eflags(s);
bd7a7b33 5280 tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C);
2c0262af
FB
5281 break;
5282 case 0xf8: /* clc */
d229edce 5283 gen_compute_eflags(s);
bd7a7b33 5284 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C);
2c0262af
FB
5285 break;
5286 case 0xf9: /* stc */
d229edce 5287 gen_compute_eflags(s);
bd7a7b33 5288 tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C);
2c0262af
FB
5289 break;
5290 case 0xfc: /* cld */
6bd48f6f 5291 tcg_gen_movi_i32(s->tmp2_i32, 1);
ad75a51e 5292 tcg_gen_st_i32(s->tmp2_i32, tcg_env, offsetof(CPUX86State, df));
2c0262af
FB
5293 break;
5294 case 0xfd: /* std */
6bd48f6f 5295 tcg_gen_movi_i32(s->tmp2_i32, -1);
ad75a51e 5296 tcg_gen_st_i32(s->tmp2_i32, tcg_env, offsetof(CPUX86State, df));
2c0262af
FB
5297 break;
5298
5299 /************************/
5300 /* bit operations */
5301 case 0x1ba: /* bt/bts/btr/btc Gv, im */
ab4e4aec 5302 ot = dflag;
e3af7c78 5303 modrm = x86_ldub_code(env, s);
33698e5f 5304 op = (modrm >> 3) & 7;
2c0262af 5305 mod = (modrm >> 6) & 3;
14ce26e7 5306 rm = (modrm & 7) | REX_B(s);
2c0262af 5307 if (mod != 3) {
14ce26e7 5308 s->rip_offset = 1;
4eeb3939 5309 gen_lea_modrm(env, s, modrm);
cfe819d3 5310 if (!(s->prefix & PREFIX_LOCK)) {
c66f9727 5311 gen_op_ld_v(s, ot, s->T0, s->A0);
cfe819d3 5312 }
2c0262af 5313 } else {
1dbe15ef 5314 gen_op_mov_v_reg(s, ot, s->T0, rm);
2c0262af
FB
5315 }
5316 /* load shift */
e3af7c78 5317 val = x86_ldub_code(env, s);
b48597b0 5318 tcg_gen_movi_tl(s->T1, val);
2c0262af 5319 if (op < 4)
b9f9c5b4 5320 goto unknown_op;
2c0262af 5321 op -= 4;
f484d386 5322 goto bt_op;
2c0262af
FB
5323 case 0x1a3: /* bt Gv, Ev */
5324 op = 0;
5325 goto do_btx;
5326 case 0x1ab: /* bts */
5327 op = 1;
5328 goto do_btx;
5329 case 0x1b3: /* btr */
5330 op = 2;
5331 goto do_btx;
5332 case 0x1bb: /* btc */
5333 op = 3;
5334 do_btx:
ab4e4aec 5335 ot = dflag;
e3af7c78 5336 modrm = x86_ldub_code(env, s);
bbdb4237 5337 reg = ((modrm >> 3) & 7) | REX_R(s);
2c0262af 5338 mod = (modrm >> 6) & 3;
14ce26e7 5339 rm = (modrm & 7) | REX_B(s);
1dbe15ef 5340 gen_op_mov_v_reg(s, MO_32, s->T1, reg);
2c0262af 5341 if (mod != 3) {
cfe819d3 5342 AddressParts a = gen_lea_modrm_0(env, s, modrm);
2c0262af 5343 /* specific case: we need to add a displacement */
b48597b0 5344 gen_exts(ot, s->T1);
fbd80f02
EC
5345 tcg_gen_sari_tl(s->tmp0, s->T1, 3 + ot);
5346 tcg_gen_shli_tl(s->tmp0, s->tmp0, ot);
20581aad 5347 tcg_gen_add_tl(s->A0, gen_lea_modrm_1(s, a, false), s->tmp0);
6b672b5d 5348 gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override);
cfe819d3 5349 if (!(s->prefix & PREFIX_LOCK)) {
c66f9727 5350 gen_op_ld_v(s, ot, s->T0, s->A0);
cfe819d3 5351 }
2c0262af 5352 } else {
1dbe15ef 5353 gen_op_mov_v_reg(s, ot, s->T0, rm);
2c0262af 5354 }
f484d386 5355 bt_op:
b48597b0 5356 tcg_gen_andi_tl(s->T1, s->T1, (1 << (3 + ot)) - 1);
fbd80f02
EC
5357 tcg_gen_movi_tl(s->tmp0, 1);
5358 tcg_gen_shl_tl(s->tmp0, s->tmp0, s->T1);
cfe819d3
EC
5359 if (s->prefix & PREFIX_LOCK) {
5360 switch (op) {
5361 case 0: /* bt */
bad5cfcd 5362 /* Needs no atomic ops; we suppressed the normal
cfe819d3 5363 memory load for LOCK above so do it now. */
c66f9727 5364 gen_op_ld_v(s, ot, s->T0, s->A0);
cfe819d3
EC
5365 break;
5366 case 1: /* bts */
fbd80f02 5367 tcg_gen_atomic_fetch_or_tl(s->T0, s->A0, s->tmp0,
cfe819d3
EC
5368 s->mem_index, ot | MO_LE);
5369 break;
5370 case 2: /* btr */
fbd80f02
EC
5371 tcg_gen_not_tl(s->tmp0, s->tmp0);
5372 tcg_gen_atomic_fetch_and_tl(s->T0, s->A0, s->tmp0,
cfe819d3
EC
5373 s->mem_index, ot | MO_LE);
5374 break;
5375 default:
5376 case 3: /* btc */
fbd80f02 5377 tcg_gen_atomic_fetch_xor_tl(s->T0, s->A0, s->tmp0,
cfe819d3
EC
5378 s->mem_index, ot | MO_LE);
5379 break;
5380 }
5022f28f 5381 tcg_gen_shr_tl(s->tmp4, s->T0, s->T1);
cfe819d3 5382 } else {
5022f28f 5383 tcg_gen_shr_tl(s->tmp4, s->T0, s->T1);
cfe819d3
EC
5384 switch (op) {
5385 case 0: /* bt */
5386 /* Data already loaded; nothing to do. */
5387 break;
5388 case 1: /* bts */
fbd80f02 5389 tcg_gen_or_tl(s->T0, s->T0, s->tmp0);
cfe819d3
EC
5390 break;
5391 case 2: /* btr */
fbd80f02 5392 tcg_gen_andc_tl(s->T0, s->T0, s->tmp0);
cfe819d3
EC
5393 break;
5394 default:
5395 case 3: /* btc */
fbd80f02 5396 tcg_gen_xor_tl(s->T0, s->T0, s->tmp0);
cfe819d3
EC
5397 break;
5398 }
5399 if (op != 0) {
5400 if (mod != 3) {
c66f9727 5401 gen_op_st_v(s, ot, s->T0, s->A0);
cfe819d3 5402 } else {
1dbe15ef 5403 gen_op_mov_reg_v(s, ot, rm, s->T0);
cfe819d3 5404 }
fd8ca9f6 5405 }
dc1823ce
RH
5406 }
5407
5408 /* Delay all CC updates until after the store above. Note that
5409 C is the result of the test, Z is unchanged, and the others
5410 are all undefined. */
5411 switch (s->cc_op) {
5412 case CC_OP_MULB ... CC_OP_MULQ:
5413 case CC_OP_ADDB ... CC_OP_ADDQ:
5414 case CC_OP_ADCB ... CC_OP_ADCQ:
5415 case CC_OP_SUBB ... CC_OP_SUBQ:
5416 case CC_OP_SBBB ... CC_OP_SBBQ:
5417 case CC_OP_LOGICB ... CC_OP_LOGICQ:
5418 case CC_OP_INCB ... CC_OP_INCQ:
5419 case CC_OP_DECB ... CC_OP_DECQ:
5420 case CC_OP_SHLB ... CC_OP_SHLQ:
5421 case CC_OP_SARB ... CC_OP_SARQ:
5422 case CC_OP_BMILGB ... CC_OP_BMILGQ:
5423 /* Z was going to be computed from the non-zero status of CC_DST.
5424 We can get that same Z value (and the new C value) by leaving
5425 CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the
5426 same width. */
5022f28f 5427 tcg_gen_mov_tl(cpu_cc_src, s->tmp4);
dc1823ce
RH
5428 set_cc_op(s, ((s->cc_op - CC_OP_MULB) & 3) + CC_OP_SARB);
5429 break;
5430 default:
5431 /* Otherwise, generate EFLAGS and replace the C bit. */
5432 gen_compute_eflags(s);
5022f28f 5433 tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, s->tmp4,
dc1823ce
RH
5434 ctz32(CC_C), 1);
5435 break;
2c0262af
FB
5436 }
5437 break;
321c5351
RH
5438 case 0x1bc: /* bsf / tzcnt */
5439 case 0x1bd: /* bsr / lzcnt */
ab4e4aec 5440 ot = dflag;
e3af7c78 5441 modrm = x86_ldub_code(env, s);
bbdb4237 5442 reg = ((modrm >> 3) & 7) | REX_R(s);
321c5351 5443 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
c66f9727 5444 gen_extu(ot, s->T0);
321c5351
RH
5445
5446 /* Note that lzcnt and tzcnt are in different extensions. */
5447 if ((prefixes & PREFIX_REPZ)
5448 && (b & 1
5449 ? s->cpuid_ext3_features & CPUID_EXT3_ABM
5450 : s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)) {
5451 int size = 8 << ot;
e5143c90 5452 /* For lzcnt/tzcnt, C bit is defined related to the input. */
c66f9727 5453 tcg_gen_mov_tl(cpu_cc_src, s->T0);
321c5351
RH
5454 if (b & 1) {
5455 /* For lzcnt, reduce the target_ulong result by the
5456 number of zeros that we expect to find at the top. */
c66f9727
EC
5457 tcg_gen_clzi_tl(s->T0, s->T0, TARGET_LONG_BITS);
5458 tcg_gen_subi_tl(s->T0, s->T0, TARGET_LONG_BITS - size);
6191b059 5459 } else {
e5143c90 5460 /* For tzcnt, a zero input must return the operand size. */
c66f9727 5461 tcg_gen_ctzi_tl(s->T0, s->T0, size);
6191b059 5462 }
e5143c90 5463 /* For lzcnt/tzcnt, Z bit is defined related to the result. */
c66f9727 5464 gen_op_update1_cc(s);
321c5351
RH
5465 set_cc_op(s, CC_OP_BMILGB + ot);
5466 } else {
5467 /* For bsr/bsf, only the Z bit is defined and it is related
5468 to the input and not the result. */
c66f9727 5469 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
321c5351 5470 set_cc_op(s, CC_OP_LOGICB + ot);
e5143c90
RH
5471
5472 /* ??? The manual says that the output is undefined when the
5473 input is zero, but real hardware leaves it unchanged, and
5474 real programs appear to depend on that. Accomplish this
5475 by passing the output as the value to return upon zero. */
321c5351
RH
5476 if (b & 1) {
5477 /* For bsr, return the bit index of the first 1 bit,
5478 not the count of leading zeros. */
b48597b0
EC
5479 tcg_gen_xori_tl(s->T1, cpu_regs[reg], TARGET_LONG_BITS - 1);
5480 tcg_gen_clz_tl(s->T0, s->T0, s->T1);
c66f9727 5481 tcg_gen_xori_tl(s->T0, s->T0, TARGET_LONG_BITS - 1);
321c5351 5482 } else {
c66f9727 5483 tcg_gen_ctz_tl(s->T0, s->T0, cpu_regs[reg]);
321c5351 5484 }
6191b059 5485 }
1dbe15ef 5486 gen_op_mov_reg_v(s, ot, reg, s->T0);
2c0262af
FB
5487 break;
5488 /************************/
5489 /* bcd */
5490 case 0x27: /* daa */
14ce26e7
FB
5491 if (CODE64(s))
5492 goto illegal_op;
773cdfcc 5493 gen_update_cc_op(s);
ad75a51e 5494 gen_helper_daa(tcg_env);
3ca51d07 5495 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
5496 break;
5497 case 0x2f: /* das */
14ce26e7
FB
5498 if (CODE64(s))
5499 goto illegal_op;
773cdfcc 5500 gen_update_cc_op(s);
ad75a51e 5501 gen_helper_das(tcg_env);
3ca51d07 5502 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
5503 break;
5504 case 0x37: /* aaa */
14ce26e7
FB
5505 if (CODE64(s))
5506 goto illegal_op;
773cdfcc 5507 gen_update_cc_op(s);
ad75a51e 5508 gen_helper_aaa(tcg_env);
3ca51d07 5509 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
5510 break;
5511 case 0x3f: /* aas */
14ce26e7
FB
5512 if (CODE64(s))
5513 goto illegal_op;
773cdfcc 5514 gen_update_cc_op(s);
ad75a51e 5515 gen_helper_aas(tcg_env);
3ca51d07 5516 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
5517 break;
5518 case 0xd4: /* aam */
14ce26e7
FB
5519 if (CODE64(s))
5520 goto illegal_op;
e3af7c78 5521 val = x86_ldub_code(env, s);
b6d7c3db 5522 if (val == 0) {
52236550 5523 gen_exception(s, EXCP00_DIVZ);
b6d7c3db 5524 } else {
ad75a51e 5525 gen_helper_aam(tcg_env, tcg_constant_i32(val));
3ca51d07 5526 set_cc_op(s, CC_OP_LOGICB);
b6d7c3db 5527 }
2c0262af
FB
5528 break;
5529 case 0xd5: /* aad */
14ce26e7
FB
5530 if (CODE64(s))
5531 goto illegal_op;
e3af7c78 5532 val = x86_ldub_code(env, s);
ad75a51e 5533 gen_helper_aad(tcg_env, tcg_constant_i32(val));
3ca51d07 5534 set_cc_op(s, CC_OP_LOGICB);
2c0262af
FB
5535 break;
5536 /************************/
5537 /* misc */
5538 case 0x90: /* nop */
ab1f142b 5539 /* XXX: correct lock test for all insn */
7418027e 5540 if (prefixes & PREFIX_LOCK) {
ab1f142b 5541 goto illegal_op;
7418027e
RH
5542 }
5543 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
5544 if (REX_B(s)) {
5545 goto do_xchg_reg_eax;
5546 }
0573fbfc 5547 if (prefixes & PREFIX_REPZ) {
81f3053b 5548 gen_update_cc_op(s);
65e4af23 5549 gen_update_eip_cur(s);
ad75a51e 5550 gen_helper_pause(tcg_env, cur_insn_len_i32(s));
6cf147aa 5551 s->base.is_jmp = DISAS_NORETURN;
0573fbfc 5552 }
2c0262af
FB
5553 break;
5554 case 0x9b: /* fwait */
5fafdf24 5555 if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) ==
7eee2a50 5556 (HF_MP_MASK | HF_TS_MASK)) {
52236550 5557 gen_exception(s, EXCP07_PREX);
2ee73ac3 5558 } else {
c1f27a0c
PB
5559 /* needs to be treated as I/O because of ferr_irq */
5560 translator_io_start(&s->base);
ad75a51e 5561 gen_helper_fwait(tcg_env);
7eee2a50 5562 }
2c0262af
FB
5563 break;
5564 case 0xcc: /* int3 */
8ed6c985 5565 gen_interrupt(s, EXCP03_INT3);
2c0262af
FB
5566 break;
5567 case 0xcd: /* int N */
e3af7c78 5568 val = x86_ldub_code(env, s);
aa9f21b1 5569 if (check_vm86_iopl(s)) {
8ed6c985 5570 gen_interrupt(s, val);
f115e911 5571 }
2c0262af
FB
5572 break;
5573 case 0xce: /* into */
14ce26e7
FB
5574 if (CODE64(s))
5575 goto illegal_op;
773cdfcc 5576 gen_update_cc_op(s);
65e4af23 5577 gen_update_eip_cur(s);
ad75a51e 5578 gen_helper_into(tcg_env, cur_insn_len_i32(s));
2c0262af 5579 break;
0b97134b 5580#ifdef WANT_ICEBP
2c0262af 5581 case 0xf1: /* icebp (undocumented, exits to external debugger) */
b53605db 5582 gen_svm_check_intercept(s, SVM_EXIT_ICEBP);
ed3c4739 5583 gen_debug(s);
2c0262af 5584 break;
0b97134b 5585#endif
2c0262af 5586 case 0xfa: /* cli */
ca7874c2 5587 if (check_iopl(s)) {
63179330 5588 gen_reset_eflags(s, IF_MASK);
2c0262af
FB
5589 }
5590 break;
5591 case 0xfb: /* sti */
ca7874c2 5592 if (check_iopl(s)) {
63179330 5593 gen_set_eflags(s, IF_MASK);
f083d92c 5594 /* interruptions are enabled only the first insn after sti */
09e99df4 5595 gen_update_eip_next(s);
f083d92c 5596 gen_eob_inhibit_irq(s, true);
2c0262af
FB
5597 }
5598 break;
5599 case 0x62: /* bound */
14ce26e7
FB
5600 if (CODE64(s))
5601 goto illegal_op;
ab4e4aec 5602 ot = dflag;
e3af7c78 5603 modrm = x86_ldub_code(env, s);
2c0262af
FB
5604 reg = (modrm >> 3) & 7;
5605 mod = (modrm >> 6) & 3;
5606 if (mod == 3)
5607 goto illegal_op;
1dbe15ef 5608 gen_op_mov_v_reg(s, ot, s->T0, reg);
4eeb3939 5609 gen_lea_modrm(env, s, modrm);
6bd48f6f 5610 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
4ba9938c 5611 if (ot == MO_16) {
ad75a51e 5612 gen_helper_boundw(tcg_env, s->A0, s->tmp2_i32);
92fc4b58 5613 } else {
ad75a51e 5614 gen_helper_boundl(tcg_env, s->A0, s->tmp2_i32);
92fc4b58 5615 }
2c0262af
FB
5616 break;
5617 case 0x1c8 ... 0x1cf: /* bswap reg */
14ce26e7
FB
5618 reg = (b & 7) | REX_B(s);
5619#ifdef TARGET_X86_64
ab4e4aec 5620 if (dflag == MO_64) {
94fdf987
RH
5621 tcg_gen_bswap64_i64(cpu_regs[reg], cpu_regs[reg]);
5622 break;
14ce26e7 5623 }
94fdf987
RH
5624#endif
5625 tcg_gen_bswap32_tl(cpu_regs[reg], cpu_regs[reg], TCG_BSWAP_OZ);
2c0262af
FB
5626 break;
5627 case 0xd6: /* salc */
14ce26e7
FB
5628 if (CODE64(s))
5629 goto illegal_op;
c66f9727
EC
5630 gen_compute_eflags_c(s, s->T0);
5631 tcg_gen_neg_tl(s->T0, s->T0);
1dbe15ef 5632 gen_op_mov_reg_v(s, MO_8, R_EAX, s->T0);
2c0262af
FB
5633 break;
5634 case 0xe0: /* loopnz */
5635 case 0xe1: /* loopz */
2c0262af
FB
5636 case 0xe2: /* loop */
5637 case 0xe3: /* jecxz */
14ce26e7 5638 {
2255da49
RH
5639 TCGLabel *l1, *l2;
5640 int diff = (int8_t)insn_get(env, s, MO_8);
3b46e624 5641
14ce26e7
FB
5642 l1 = gen_new_label();
5643 l2 = gen_new_label();
3cb3a772 5644 gen_update_cc_op(s);
14ce26e7 5645 b &= 3;
6e0d8677
FB
5646 switch(b) {
5647 case 0: /* loopnz */
5648 case 1: /* loopz */
fbd80f02 5649 gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
0ebacb5d 5650 gen_op_jz_ecx(s, l2);
5bdb91b0 5651 gen_jcc1(s, (JCC_Z << 1) | (b ^ 1), l1);
6e0d8677
FB
5652 break;
5653 case 2: /* loop */
fbd80f02 5654 gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
0ebacb5d 5655 gen_op_jnz_ecx(s, l1);
6e0d8677
FB
5656 break;
5657 default:
5658 case 3: /* jcxz */
0ebacb5d 5659 gen_op_jz_ecx(s, l1);
6e0d8677 5660 break;
14ce26e7
FB
5661 }
5662
2255da49
RH
5663 gen_set_label(l2);
5664 gen_jmp_rel_csize(s, 0, 1);
6e0d8677 5665
14ce26e7 5666 gen_set_label(l1);
2255da49 5667 gen_jmp_rel(s, dflag, diff, 0);
14ce26e7 5668 }
2c0262af
FB
5669 break;
5670 case 0x130: /* wrmsr */
5671 case 0x132: /* rdmsr */
bc19f505 5672 if (check_cpl0(s)) {
773cdfcc 5673 gen_update_cc_op(s);
65e4af23 5674 gen_update_eip_cur(s);
0573fbfc 5675 if (b & 2) {
ad75a51e 5676 gen_helper_rdmsr(tcg_env);
0573fbfc 5677 } else {
ad75a51e 5678 gen_helper_wrmsr(tcg_env);
634a4051 5679 s->base.is_jmp = DISAS_EOB_NEXT;
0573fbfc 5680 }
2c0262af
FB
5681 }
5682 break;
5683 case 0x131: /* rdtsc */
773cdfcc 5684 gen_update_cc_op(s);
65e4af23 5685 gen_update_eip_cur(s);
dfd1b812 5686 translator_io_start(&s->base);
ad75a51e 5687 gen_helper_rdtsc(tcg_env);
2c0262af 5688 break;
df01e0fc 5689 case 0x133: /* rdpmc */
773cdfcc 5690 gen_update_cc_op(s);
65e4af23 5691 gen_update_eip_cur(s);
ad75a51e 5692 gen_helper_rdpmc(tcg_env);
b82055ae 5693 s->base.is_jmp = DISAS_NORETURN;
df01e0fc 5694 break;
023fe10d 5695 case 0x134: /* sysenter */
75a02adf
PB
5696 /* For AMD SYSENTER is not valid in long mode */
5697 if (LMA(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1) {
14ce26e7 5698 goto illegal_op;
75a02adf 5699 }
d75f9129 5700 if (!PE(s)) {
6bd99586 5701 gen_exception_gpf(s);
023fe10d 5702 } else {
ad75a51e 5703 gen_helper_sysenter(tcg_env);
6424ac8e 5704 s->base.is_jmp = DISAS_EOB_ONLY;
023fe10d
FB
5705 }
5706 break;
5707 case 0x135: /* sysexit */
75a02adf
PB
5708 /* For AMD SYSEXIT is not valid in long mode */
5709 if (LMA(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1) {
14ce26e7 5710 goto illegal_op;
75a02adf 5711 }
53b9b4cc 5712 if (!PE(s) || CPL(s) != 0) {
6bd99586 5713 gen_exception_gpf(s);
023fe10d 5714 } else {
ad75a51e 5715 gen_helper_sysexit(tcg_env, tcg_constant_i32(dflag - 1));
6424ac8e 5716 s->base.is_jmp = DISAS_EOB_ONLY;
023fe10d
FB
5717 }
5718 break;
14ce26e7 5719 case 0x105: /* syscall */
fd5dcb1c
PB
5720 /* For Intel SYSCALL is only valid in long mode */
5721 if (!LMA(s) && env->cpuid_vendor1 == CPUID_VENDOR_INTEL_1) {
5722 goto illegal_op;
5723 }
728d803b 5724 gen_update_cc_op(s);
65e4af23 5725 gen_update_eip_cur(s);
ad75a51e 5726 gen_helper_syscall(tcg_env, cur_insn_len_i32(s));
410e9814
DE
5727 /* TF handling for the syscall insn is different. The TF bit is checked
5728 after the syscall insn completes. This allows #DB to not be
5729 generated after one has entered CPL0 if TF is set in FMASK. */
5730 gen_eob_worker(s, false, true);
14ce26e7
FB
5731 break;
5732 case 0x107: /* sysret */
fd5dcb1c
PB
5733 /* For Intel SYSRET is only valid in long mode */
5734 if (!LMA(s) && env->cpuid_vendor1 == CPUID_VENDOR_INTEL_1) {
5735 goto illegal_op;
5736 }
53b9b4cc 5737 if (!PE(s) || CPL(s) != 0) {
6bd99586 5738 gen_exception_gpf(s);
14ce26e7 5739 } else {
ad75a51e 5740 gen_helper_sysret(tcg_env, tcg_constant_i32(dflag - 1));
aba9d61e 5741 /* condition codes are modified only in long mode */
73e90dc4 5742 if (LMA(s)) {
3ca51d07
RH
5743 set_cc_op(s, CC_OP_EFLAGS);
5744 }
c52ab08a
DE
5745 /* TF handling for the sysret insn is different. The TF bit is
5746 checked after the sysret insn completes. This allows #DB to be
5747 generated "as if" the syscall insn in userspace has just
5748 completed. */
5749 gen_eob_worker(s, false, true);
14ce26e7
FB
5750 }
5751 break;
2c0262af 5752 case 0x1a2: /* cpuid */
773cdfcc 5753 gen_update_cc_op(s);
65e4af23 5754 gen_update_eip_cur(s);
ad75a51e 5755 gen_helper_cpuid(tcg_env);
2c0262af
FB
5756 break;
5757 case 0xf4: /* hlt */
bc19f505 5758 if (check_cpl0(s)) {
773cdfcc 5759 gen_update_cc_op(s);
65e4af23 5760 gen_update_eip_cur(s);
ad75a51e 5761 gen_helper_hlt(tcg_env, cur_insn_len_i32(s));
6cf147aa 5762 s->base.is_jmp = DISAS_NORETURN;
2c0262af
FB
5763 }
5764 break;
5765 case 0x100:
e3af7c78 5766 modrm = x86_ldub_code(env, s);
2c0262af
FB
5767 mod = (modrm >> 6) & 3;
5768 op = (modrm >> 3) & 7;
5769 switch(op) {
5770 case 0: /* sldt */
f8a35846 5771 if (!PE(s) || VM86(s))
f115e911 5772 goto illegal_op;
637f1ee3
GW
5773 if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
5774 break;
5775 }
b53605db 5776 gen_svm_check_intercept(s, SVM_EXIT_LDTR_READ);
ad75a51e 5777 tcg_gen_ld32u_tl(s->T0, tcg_env,
1d1cc4d0 5778 offsetof(CPUX86State, ldt.selector));
ab4e4aec 5779 ot = mod == 3 ? dflag : MO_16;
0af10c86 5780 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
2c0262af
FB
5781 break;
5782 case 2: /* lldt */
f8a35846 5783 if (!PE(s) || VM86(s))
f115e911 5784 goto illegal_op;
bc19f505 5785 if (check_cpl0(s)) {
b53605db 5786 gen_svm_check_intercept(s, SVM_EXIT_LDTR_WRITE);
4ba9938c 5787 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
6bd48f6f 5788 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
ad75a51e 5789 gen_helper_lldt(tcg_env, s->tmp2_i32);
2c0262af
FB
5790 }
5791 break;
5792 case 1: /* str */
f8a35846 5793 if (!PE(s) || VM86(s))
f115e911 5794 goto illegal_op;
637f1ee3
GW
5795 if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
5796 break;
5797 }
b53605db 5798 gen_svm_check_intercept(s, SVM_EXIT_TR_READ);
ad75a51e 5799 tcg_gen_ld32u_tl(s->T0, tcg_env,
1d1cc4d0 5800 offsetof(CPUX86State, tr.selector));
ab4e4aec 5801 ot = mod == 3 ? dflag : MO_16;
0af10c86 5802 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
2c0262af
FB
5803 break;
5804 case 3: /* ltr */
f8a35846 5805 if (!PE(s) || VM86(s))
f115e911 5806 goto illegal_op;
bc19f505 5807 if (check_cpl0(s)) {
b53605db 5808 gen_svm_check_intercept(s, SVM_EXIT_TR_WRITE);
4ba9938c 5809 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
6bd48f6f 5810 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
ad75a51e 5811 gen_helper_ltr(tcg_env, s->tmp2_i32);
2c0262af
FB
5812 }
5813 break;
5814 case 4: /* verr */
5815 case 5: /* verw */
f8a35846 5816 if (!PE(s) || VM86(s))
f115e911 5817 goto illegal_op;
4ba9938c 5818 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
773cdfcc 5819 gen_update_cc_op(s);
2999a0b2 5820 if (op == 4) {
ad75a51e 5821 gen_helper_verr(tcg_env, s->T0);
2999a0b2 5822 } else {
ad75a51e 5823 gen_helper_verw(tcg_env, s->T0);
2999a0b2 5824 }
3ca51d07 5825 set_cc_op(s, CC_OP_EFLAGS);
f115e911 5826 break;
2c0262af 5827 default:
b9f9c5b4 5828 goto unknown_op;
2c0262af
FB
5829 }
5830 break;
1906b2af 5831
2c0262af 5832 case 0x101:
e3af7c78 5833 modrm = x86_ldub_code(env, s);
1906b2af 5834 switch (modrm) {
880f8486 5835 CASE_MODRM_MEM_OP(0): /* sgdt */
637f1ee3
GW
5836 if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
5837 break;
5838 }
b53605db 5839 gen_svm_check_intercept(s, SVM_EXIT_GDTR_READ);
4eeb3939 5840 gen_lea_modrm(env, s, modrm);
c66f9727 5841 tcg_gen_ld32u_tl(s->T0,
ad75a51e 5842 tcg_env, offsetof(CPUX86State, gdt.limit));
c66f9727 5843 gen_op_st_v(s, MO_16, s->T0, s->A0);
aba9d61e 5844 gen_add_A0_im(s, 2);
ad75a51e 5845 tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, gdt.base));
ab4e4aec 5846 if (dflag == MO_16) {
c66f9727 5847 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
f0706f0c 5848 }
c66f9727 5849 gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0);
2c0262af 5850 break;
1906b2af
RH
5851
5852 case 0xc8: /* monitor */
01b9d8c1 5853 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) {
1906b2af 5854 goto illegal_op;
3d7374c5 5855 }
1906b2af 5856 gen_update_cc_op(s);
65e4af23 5857 gen_update_eip_cur(s);
6b672b5d 5858 tcg_gen_mov_tl(s->A0, cpu_regs[R_EAX]);
1906b2af 5859 gen_add_A0_ds_seg(s);
ad75a51e 5860 gen_helper_monitor(tcg_env, s->A0);
3d7374c5 5861 break;
1906b2af
RH
5862
5863 case 0xc9: /* mwait */
01b9d8c1 5864 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) {
1906b2af
RH
5865 goto illegal_op;
5866 }
5867 gen_update_cc_op(s);
65e4af23 5868 gen_update_eip_cur(s);
ad75a51e 5869 gen_helper_mwait(tcg_env, cur_insn_len_i32(s));
b82055ae 5870 s->base.is_jmp = DISAS_NORETURN;
1906b2af
RH
5871 break;
5872
5873 case 0xca: /* clac */
5874 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
01b9d8c1 5875 || CPL(s) != 0) {
1906b2af
RH
5876 goto illegal_op;
5877 }
63179330 5878 gen_reset_eflags(s, AC_MASK);
634a4051 5879 s->base.is_jmp = DISAS_EOB_NEXT;
1906b2af
RH
5880 break;
5881
5882 case 0xcb: /* stac */
5883 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
01b9d8c1 5884 || CPL(s) != 0) {
1906b2af
RH
5885 goto illegal_op;
5886 }
63179330 5887 gen_set_eflags(s, AC_MASK);
634a4051 5888 s->base.is_jmp = DISAS_EOB_NEXT;
1906b2af
RH
5889 break;
5890
880f8486 5891 CASE_MODRM_MEM_OP(1): /* sidt */
637f1ee3
GW
5892 if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
5893 break;
5894 }
b53605db 5895 gen_svm_check_intercept(s, SVM_EXIT_IDTR_READ);
1906b2af 5896 gen_lea_modrm(env, s, modrm);
ad75a51e 5897 tcg_gen_ld32u_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.limit));
c66f9727 5898 gen_op_st_v(s, MO_16, s->T0, s->A0);
1906b2af 5899 gen_add_A0_im(s, 2);
ad75a51e 5900 tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.base));
1906b2af 5901 if (dflag == MO_16) {
c66f9727 5902 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
1906b2af 5903 }
c66f9727 5904 gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0);
1906b2af
RH
5905 break;
5906
19dc85db
RH
5907 case 0xd0: /* xgetbv */
5908 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
5909 || (s->prefix & (PREFIX_LOCK | PREFIX_DATA
5910 | PREFIX_REPZ | PREFIX_REPNZ))) {
5911 goto illegal_op;
5912 }
6bd48f6f 5913 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
ad75a51e 5914 gen_helper_xgetbv(s->tmp1_i64, tcg_env, s->tmp2_i32);
776678b2 5915 tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64);
19dc85db
RH
5916 break;
5917
5918 case 0xd1: /* xsetbv */
5919 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
5920 || (s->prefix & (PREFIX_LOCK | PREFIX_DATA
5921 | PREFIX_REPZ | PREFIX_REPNZ))) {
5922 goto illegal_op;
5923 }
24b34590 5924 gen_svm_check_intercept(s, SVM_EXIT_XSETBV);
bc19f505 5925 if (!check_cpl0(s)) {
19dc85db
RH
5926 break;
5927 }
776678b2 5928 tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
19dc85db 5929 cpu_regs[R_EDX]);
6bd48f6f 5930 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
ad75a51e 5931 gen_helper_xsetbv(tcg_env, s->tmp2_i32, s->tmp1_i64);
19dc85db 5932 /* End TB because translation flags may change. */
634a4051 5933 s->base.is_jmp = DISAS_EOB_NEXT;
19dc85db
RH
5934 break;
5935
1906b2af 5936 case 0xd8: /* VMRUN */
5d223889 5937 if (!SVME(s) || !PE(s)) {
1906b2af
RH
5938 goto illegal_op;
5939 }
bc19f505 5940 if (!check_cpl0(s)) {
1906b2af 5941 break;
2c0262af 5942 }
1906b2af 5943 gen_update_cc_op(s);
65e4af23 5944 gen_update_eip_cur(s);
ad75a51e 5945 gen_helper_vmrun(tcg_env, tcg_constant_i32(s->aflag - 1),
ad1d6f07 5946 cur_insn_len_i32(s));
07ea28b4 5947 tcg_gen_exit_tb(NULL, 0);
6cf147aa 5948 s->base.is_jmp = DISAS_NORETURN;
2c0262af 5949 break;
1906b2af
RH
5950
5951 case 0xd9: /* VMMCALL */
5d223889 5952 if (!SVME(s)) {
1906b2af
RH
5953 goto illegal_op;
5954 }
5955 gen_update_cc_op(s);
65e4af23 5956 gen_update_eip_cur(s);
ad75a51e 5957 gen_helper_vmmcall(tcg_env);
1906b2af
RH
5958 break;
5959
5960 case 0xda: /* VMLOAD */
5d223889 5961 if (!SVME(s) || !PE(s)) {
1906b2af
RH
5962 goto illegal_op;
5963 }
bc19f505 5964 if (!check_cpl0(s)) {
1906b2af
RH
5965 break;
5966 }
5967 gen_update_cc_op(s);
65e4af23 5968 gen_update_eip_cur(s);
ad75a51e 5969 gen_helper_vmload(tcg_env, tcg_constant_i32(s->aflag - 1));
1906b2af
RH
5970 break;
5971
5972 case 0xdb: /* VMSAVE */
5d223889 5973 if (!SVME(s) || !PE(s)) {
1906b2af
RH
5974 goto illegal_op;
5975 }
bc19f505 5976 if (!check_cpl0(s)) {
1906b2af
RH
5977 break;
5978 }
5979 gen_update_cc_op(s);
65e4af23 5980 gen_update_eip_cur(s);
ad75a51e 5981 gen_helper_vmsave(tcg_env, tcg_constant_i32(s->aflag - 1));
1906b2af
RH
5982 break;
5983
5984 case 0xdc: /* STGI */
5d223889 5985 if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
d75f9129 5986 || !PE(s)) {
1906b2af
RH
5987 goto illegal_op;
5988 }
bc19f505 5989 if (!check_cpl0(s)) {
1906b2af
RH
5990 break;
5991 }
5992 gen_update_cc_op(s);
ad75a51e 5993 gen_helper_stgi(tcg_env);
634a4051 5994 s->base.is_jmp = DISAS_EOB_NEXT;
1906b2af
RH
5995 break;
5996
5997 case 0xdd: /* CLGI */
5d223889 5998 if (!SVME(s) || !PE(s)) {
1906b2af
RH
5999 goto illegal_op;
6000 }
bc19f505 6001 if (!check_cpl0(s)) {
1906b2af
RH
6002 break;
6003 }
6004 gen_update_cc_op(s);
65e4af23 6005 gen_update_eip_cur(s);
ad75a51e 6006 gen_helper_clgi(tcg_env);
1906b2af
RH
6007 break;
6008
6009 case 0xde: /* SKINIT */
5d223889 6010 if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
d75f9129 6011 || !PE(s)) {
1906b2af
RH
6012 goto illegal_op;
6013 }
b53605db 6014 gen_svm_check_intercept(s, SVM_EXIT_SKINIT);
e6aeb948
RH
6015 /* If not intercepted, not implemented -- raise #UD. */
6016 goto illegal_op;
1906b2af
RH
6017
6018 case 0xdf: /* INVLPGA */
5d223889 6019 if (!SVME(s) || !PE(s)) {
1906b2af
RH
6020 goto illegal_op;
6021 }
bc19f505 6022 if (!check_cpl0(s)) {
1906b2af
RH
6023 break;
6024 }
35e5a5d5
RH
6025 gen_svm_check_intercept(s, SVM_EXIT_INVLPGA);
6026 if (s->aflag == MO_64) {
6027 tcg_gen_mov_tl(s->A0, cpu_regs[R_EAX]);
6028 } else {
6029 tcg_gen_ext32u_tl(s->A0, cpu_regs[R_EAX]);
6030 }
ad75a51e 6031 gen_helper_flush_page(tcg_env, s->A0);
634a4051 6032 s->base.is_jmp = DISAS_EOB_NEXT;
1906b2af
RH
6033 break;
6034
880f8486 6035 CASE_MODRM_MEM_OP(2): /* lgdt */
bc19f505 6036 if (!check_cpl0(s)) {
1906b2af
RH
6037 break;
6038 }
b53605db 6039 gen_svm_check_intercept(s, SVM_EXIT_GDTR_WRITE);
1906b2af 6040 gen_lea_modrm(env, s, modrm);
b48597b0 6041 gen_op_ld_v(s, MO_16, s->T1, s->A0);
1906b2af 6042 gen_add_A0_im(s, 2);
c66f9727 6043 gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0);
1906b2af 6044 if (dflag == MO_16) {
c66f9727 6045 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
1906b2af 6046 }
ad75a51e
RH
6047 tcg_gen_st_tl(s->T0, tcg_env, offsetof(CPUX86State, gdt.base));
6048 tcg_gen_st32_tl(s->T1, tcg_env, offsetof(CPUX86State, gdt.limit));
1906b2af
RH
6049 break;
6050
880f8486 6051 CASE_MODRM_MEM_OP(3): /* lidt */
bc19f505 6052 if (!check_cpl0(s)) {
1906b2af
RH
6053 break;
6054 }
b53605db 6055 gen_svm_check_intercept(s, SVM_EXIT_IDTR_WRITE);
1906b2af 6056 gen_lea_modrm(env, s, modrm);
b48597b0 6057 gen_op_ld_v(s, MO_16, s->T1, s->A0);
1906b2af 6058 gen_add_A0_im(s, 2);
c66f9727 6059 gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0);
1906b2af 6060 if (dflag == MO_16) {
c66f9727 6061 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
1906b2af 6062 }
ad75a51e
RH
6063 tcg_gen_st_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.base));
6064 tcg_gen_st32_tl(s->T1, tcg_env, offsetof(CPUX86State, idt.limit));
1906b2af
RH
6065 break;
6066
880f8486 6067 CASE_MODRM_OP(4): /* smsw */
637f1ee3
GW
6068 if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
6069 break;
6070 }
b53605db 6071 gen_svm_check_intercept(s, SVM_EXIT_READ_CR0);
ad75a51e 6072 tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, cr[0]));
c0c84452
PB
6073 /*
6074 * In 32-bit mode, the higher 16 bits of the destination
6075 * register are undefined. In practice CR0[31:0] is stored
6076 * just like in 64-bit mode.
6077 */
6078 mod = (modrm >> 6) & 3;
6079 ot = (mod != 3 ? MO_16 : s->dflag);
a657f79e 6080 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
2c0262af 6081 break;
0f70ed47
PB
6082 case 0xee: /* rdpkru */
6083 if (prefixes & PREFIX_LOCK) {
6084 goto illegal_op;
6085 }
6bd48f6f 6086 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
ad75a51e 6087 gen_helper_rdpkru(s->tmp1_i64, tcg_env, s->tmp2_i32);
776678b2 6088 tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64);
0f70ed47
PB
6089 break;
6090 case 0xef: /* wrpkru */
6091 if (prefixes & PREFIX_LOCK) {
6092 goto illegal_op;
6093 }
776678b2 6094 tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
0f70ed47 6095 cpu_regs[R_EDX]);
6bd48f6f 6096 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
ad75a51e 6097 gen_helper_wrpkru(tcg_env, s->tmp2_i32, s->tmp1_i64);
0f70ed47 6098 break;
7eff2e7c 6099
880f8486 6100 CASE_MODRM_OP(6): /* lmsw */
bc19f505 6101 if (!check_cpl0(s)) {
1906b2af 6102 break;
2c0262af 6103 }
b53605db 6104 gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0);
1906b2af 6105 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
7eff2e7c
RH
6106 /*
6107 * Only the 4 lower bits of CR0 are modified.
6108 * PE cannot be set to zero if already set to one.
6109 */
ad75a51e 6110 tcg_gen_ld_tl(s->T1, tcg_env, offsetof(CPUX86State, cr[0]));
7eff2e7c
RH
6111 tcg_gen_andi_tl(s->T0, s->T0, 0xf);
6112 tcg_gen_andi_tl(s->T1, s->T1, ~0xe);
6113 tcg_gen_or_tl(s->T0, s->T0, s->T1);
ad75a51e 6114 gen_helper_write_crN(tcg_env, tcg_constant_i32(0), s->T0);
634a4051 6115 s->base.is_jmp = DISAS_EOB_NEXT;
2c0262af 6116 break;
1906b2af 6117
880f8486 6118 CASE_MODRM_MEM_OP(7): /* invlpg */
bc19f505 6119 if (!check_cpl0(s)) {
1906b2af
RH
6120 break;
6121 }
35e5a5d5 6122 gen_svm_check_intercept(s, SVM_EXIT_INVLPG);
1906b2af 6123 gen_lea_modrm(env, s, modrm);
ad75a51e 6124 gen_helper_flush_page(tcg_env, s->A0);
634a4051 6125 s->base.is_jmp = DISAS_EOB_NEXT;
1906b2af
RH
6126 break;
6127
6128 case 0xf8: /* swapgs */
6129#ifdef TARGET_X86_64
6130 if (CODE64(s)) {
bc19f505 6131 if (check_cpl0(s)) {
c66f9727 6132 tcg_gen_mov_tl(s->T0, cpu_seg_base[R_GS]);
ad75a51e 6133 tcg_gen_ld_tl(cpu_seg_base[R_GS], tcg_env,
1906b2af 6134 offsetof(CPUX86State, kernelgsbase));
ad75a51e 6135 tcg_gen_st_tl(s->T0, tcg_env,
1906b2af 6136 offsetof(CPUX86State, kernelgsbase));
1b050077 6137 }
1906b2af
RH
6138 break;
6139 }
3558f805 6140#endif
1906b2af
RH
6141 goto illegal_op;
6142
6143 case 0xf9: /* rdtscp */
6144 if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP)) {
6145 goto illegal_op;
6146 }
6147 gen_update_cc_op(s);
65e4af23 6148 gen_update_eip_cur(s);
dfd1b812 6149 translator_io_start(&s->base);
ad75a51e
RH
6150 gen_helper_rdtsc(tcg_env);
6151 gen_helper_rdpid(s->T0, tcg_env);
6750485b 6152 gen_op_mov_reg_v(s, dflag, R_ECX, s->T0);
2c0262af 6153 break;
1906b2af 6154
2c0262af 6155 default:
b9f9c5b4 6156 goto unknown_op;
2c0262af
FB
6157 }
6158 break;
1906b2af 6159
3415a4dd 6160 case 0x108: /* invd */
431c51e9 6161 case 0x109: /* wbinvd; wbnoinvd with REPZ prefix */
bc19f505 6162 if (check_cpl0(s)) {
4d714d1a 6163 gen_svm_check_intercept(s, (b & 1) ? SVM_EXIT_WBINVD : SVM_EXIT_INVD);
3415a4dd
FB
6164 /* nothing to do */
6165 }
6166 break;
14ce26e7
FB
6167 case 0x63: /* arpl or movslS (x86_64) */
6168#ifdef TARGET_X86_64
6169 if (CODE64(s)) {
6170 int d_ot;
6171 /* d_ot is the size of destination */
ab4e4aec 6172 d_ot = dflag;
14ce26e7 6173
e3af7c78 6174 modrm = x86_ldub_code(env, s);
bbdb4237 6175 reg = ((modrm >> 3) & 7) | REX_R(s);
14ce26e7
FB
6176 mod = (modrm >> 6) & 3;
6177 rm = (modrm & 7) | REX_B(s);
3b46e624 6178
14ce26e7 6179 if (mod == 3) {
1dbe15ef 6180 gen_op_mov_v_reg(s, MO_32, s->T0, rm);
14ce26e7 6181 /* sign extend */
4ba9938c 6182 if (d_ot == MO_64) {
c66f9727 6183 tcg_gen_ext32s_tl(s->T0, s->T0);
4ba9938c 6184 }
1dbe15ef 6185 gen_op_mov_reg_v(s, d_ot, reg, s->T0);
14ce26e7 6186 } else {
4eeb3939 6187 gen_lea_modrm(env, s, modrm);
c66f9727 6188 gen_op_ld_v(s, MO_32 | MO_SIGN, s->T0, s->A0);
1dbe15ef 6189 gen_op_mov_reg_v(s, d_ot, reg, s->T0);
14ce26e7 6190 }
5fafdf24 6191 } else
14ce26e7
FB
6192#endif
6193 {
42a268c2 6194 TCGLabel *label1;
3a5d1773 6195 TCGv t0, t1, t2;
1e4840bf 6196
f8a35846 6197 if (!PE(s) || VM86(s))
14ce26e7 6198 goto illegal_op;
3a5d1773
RH
6199 t0 = tcg_temp_new();
6200 t1 = tcg_temp_new();
6201 t2 = tcg_temp_new();
4ba9938c 6202 ot = MO_16;
e3af7c78 6203 modrm = x86_ldub_code(env, s);
14ce26e7
FB
6204 reg = (modrm >> 3) & 7;
6205 mod = (modrm >> 6) & 3;
6206 rm = modrm & 7;
6207 if (mod != 3) {
4eeb3939 6208 gen_lea_modrm(env, s, modrm);
6b672b5d 6209 gen_op_ld_v(s, ot, t0, s->A0);
14ce26e7 6210 } else {
1dbe15ef 6211 gen_op_mov_v_reg(s, ot, t0, rm);
14ce26e7 6212 }
1dbe15ef 6213 gen_op_mov_v_reg(s, ot, t1, reg);
fbd80f02 6214 tcg_gen_andi_tl(s->tmp0, t0, 3);
1e4840bf
FB
6215 tcg_gen_andi_tl(t1, t1, 3);
6216 tcg_gen_movi_tl(t2, 0);
3bd7da9e 6217 label1 = gen_new_label();
fbd80f02 6218 tcg_gen_brcond_tl(TCG_COND_GE, s->tmp0, t1, label1);
1e4840bf
FB
6219 tcg_gen_andi_tl(t0, t0, ~3);
6220 tcg_gen_or_tl(t0, t0, t1);
6221 tcg_gen_movi_tl(t2, CC_Z);
3bd7da9e 6222 gen_set_label(label1);
14ce26e7 6223 if (mod != 3) {
3a5d1773 6224 gen_op_st_v(s, ot, t0, s->A0);
49d9fdcc 6225 } else {
1dbe15ef 6226 gen_op_mov_reg_v(s, ot, rm, t0);
14ce26e7 6227 }
d229edce 6228 gen_compute_eflags(s);
3bd7da9e 6229 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z);
1e4840bf 6230 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2);
f115e911 6231 }
f115e911 6232 break;
2c0262af
FB
6233 case 0x102: /* lar */
6234 case 0x103: /* lsl */
cec6843e 6235 {
42a268c2 6236 TCGLabel *label1;
1e4840bf 6237 TCGv t0;
f8a35846 6238 if (!PE(s) || VM86(s))
cec6843e 6239 goto illegal_op;
ab4e4aec 6240 ot = dflag != MO_16 ? MO_32 : MO_16;
e3af7c78 6241 modrm = x86_ldub_code(env, s);
bbdb4237 6242 reg = ((modrm >> 3) & 7) | REX_R(s);
4ba9938c 6243 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
3a5d1773 6244 t0 = tcg_temp_new();
773cdfcc 6245 gen_update_cc_op(s);
2999a0b2 6246 if (b == 0x102) {
ad75a51e 6247 gen_helper_lar(t0, tcg_env, s->T0);
2999a0b2 6248 } else {
ad75a51e 6249 gen_helper_lsl(t0, tcg_env, s->T0);
2999a0b2 6250 }
fbd80f02 6251 tcg_gen_andi_tl(s->tmp0, cpu_cc_src, CC_Z);
cec6843e 6252 label1 = gen_new_label();
fbd80f02 6253 tcg_gen_brcondi_tl(TCG_COND_EQ, s->tmp0, 0, label1);
1dbe15ef 6254 gen_op_mov_reg_v(s, ot, reg, t0);
cec6843e 6255 gen_set_label(label1);
3ca51d07 6256 set_cc_op(s, CC_OP_EFLAGS);
cec6843e 6257 }
2c0262af
FB
6258 break;
6259 case 0x118:
e3af7c78 6260 modrm = x86_ldub_code(env, s);
2c0262af
FB
6261 mod = (modrm >> 6) & 3;
6262 op = (modrm >> 3) & 7;
6263 switch(op) {
6264 case 0: /* prefetchnta */
6265 case 1: /* prefetchnt0 */
6266 case 2: /* prefetchnt0 */
6267 case 3: /* prefetchnt0 */
6268 if (mod == 3)
6269 goto illegal_op;
26317698 6270 gen_nop_modrm(env, s, modrm);
2c0262af
FB
6271 /* nothing more to do */
6272 break;
e17a36ce 6273 default: /* nop (multi byte) */
0af10c86 6274 gen_nop_modrm(env, s, modrm);
e17a36ce 6275 break;
2c0262af
FB
6276 }
6277 break;
62b58ba5 6278 case 0x11a:
e3af7c78 6279 modrm = x86_ldub_code(env, s);
62b58ba5
RH
6280 if (s->flags & HF_MPX_EN_MASK) {
6281 mod = (modrm >> 6) & 3;
bbdb4237 6282 reg = ((modrm >> 3) & 7) | REX_R(s);
523e28d7
RH
6283 if (prefixes & PREFIX_REPZ) {
6284 /* bndcl */
6285 if (reg >= 4
6286 || (prefixes & PREFIX_LOCK)
6287 || s->aflag == MO_16) {
6288 goto illegal_op;
6289 }
6290 gen_bndck(env, s, modrm, TCG_COND_LTU, cpu_bndl[reg]);
6291 } else if (prefixes & PREFIX_REPNZ) {
6292 /* bndcu */
6293 if (reg >= 4
6294 || (prefixes & PREFIX_LOCK)
6295 || s->aflag == MO_16) {
6296 goto illegal_op;
6297 }
6298 TCGv_i64 notu = tcg_temp_new_i64();
6299 tcg_gen_not_i64(notu, cpu_bndu[reg]);
6300 gen_bndck(env, s, modrm, TCG_COND_GTU, notu);
523e28d7 6301 } else if (prefixes & PREFIX_DATA) {
62b58ba5
RH
6302 /* bndmov -- from reg/mem */
6303 if (reg >= 4 || s->aflag == MO_16) {
6304 goto illegal_op;
6305 }
6306 if (mod == 3) {
6307 int reg2 = (modrm & 7) | REX_B(s);
6308 if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
6309 goto illegal_op;
6310 }
6311 if (s->flags & HF_MPX_IU_MASK) {
6312 tcg_gen_mov_i64(cpu_bndl[reg], cpu_bndl[reg2]);
6313 tcg_gen_mov_i64(cpu_bndu[reg], cpu_bndu[reg2]);
6314 }
6315 } else {
6316 gen_lea_modrm(env, s, modrm);
6317 if (CODE64(s)) {
6b672b5d 6318 tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0,
fc313c64 6319 s->mem_index, MO_LEUQ);
6b672b5d
EC
6320 tcg_gen_addi_tl(s->A0, s->A0, 8);
6321 tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0,
fc313c64 6322 s->mem_index, MO_LEUQ);
62b58ba5 6323 } else {
6b672b5d 6324 tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0,
62b58ba5 6325 s->mem_index, MO_LEUL);
6b672b5d
EC
6326 tcg_gen_addi_tl(s->A0, s->A0, 4);
6327 tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0,
62b58ba5
RH
6328 s->mem_index, MO_LEUL);
6329 }
6330 /* bnd registers are now in-use */
6331 gen_set_hflag(s, HF_MPX_IU_MASK);
6332 }
bdd87b3b
RH
6333 } else if (mod != 3) {
6334 /* bndldx */
6335 AddressParts a = gen_lea_modrm_0(env, s, modrm);
6336 if (reg >= 4
6337 || (prefixes & PREFIX_LOCK)
6338 || s->aflag == MO_16
6339 || a.base < -1) {
6340 goto illegal_op;
6341 }
6342 if (a.base >= 0) {
6b672b5d 6343 tcg_gen_addi_tl(s->A0, cpu_regs[a.base], a.disp);
bdd87b3b 6344 } else {
6b672b5d 6345 tcg_gen_movi_tl(s->A0, 0);
bdd87b3b 6346 }
6b672b5d 6347 gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override);
bdd87b3b 6348 if (a.index >= 0) {
c66f9727 6349 tcg_gen_mov_tl(s->T0, cpu_regs[a.index]);
bdd87b3b 6350 } else {
c66f9727 6351 tcg_gen_movi_tl(s->T0, 0);
bdd87b3b
RH
6352 }
6353 if (CODE64(s)) {
ad75a51e
RH
6354 gen_helper_bndldx64(cpu_bndl[reg], tcg_env, s->A0, s->T0);
6355 tcg_gen_ld_i64(cpu_bndu[reg], tcg_env,
bdd87b3b
RH
6356 offsetof(CPUX86State, mmx_t0.MMX_Q(0)));
6357 } else {
ad75a51e 6358 gen_helper_bndldx32(cpu_bndu[reg], tcg_env, s->A0, s->T0);
bdd87b3b
RH
6359 tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndu[reg]);
6360 tcg_gen_shri_i64(cpu_bndu[reg], cpu_bndu[reg], 32);
6361 }
6362 gen_set_hflag(s, HF_MPX_IU_MASK);
62b58ba5
RH
6363 }
6364 }
6365 gen_nop_modrm(env, s, modrm);
6366 break;
149b427b 6367 case 0x11b:
e3af7c78 6368 modrm = x86_ldub_code(env, s);
149b427b
RH
6369 if (s->flags & HF_MPX_EN_MASK) {
6370 mod = (modrm >> 6) & 3;
bbdb4237 6371 reg = ((modrm >> 3) & 7) | REX_R(s);
149b427b
RH
6372 if (mod != 3 && (prefixes & PREFIX_REPZ)) {
6373 /* bndmk */
6374 if (reg >= 4
6375 || (prefixes & PREFIX_LOCK)
6376 || s->aflag == MO_16) {
6377 goto illegal_op;
6378 }
6379 AddressParts a = gen_lea_modrm_0(env, s, modrm);
6380 if (a.base >= 0) {
6381 tcg_gen_extu_tl_i64(cpu_bndl[reg], cpu_regs[a.base]);
6382 if (!CODE64(s)) {
6383 tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndl[reg]);
6384 }
6385 } else if (a.base == -1) {
6386 /* no base register has lower bound of 0 */
6387 tcg_gen_movi_i64(cpu_bndl[reg], 0);
6388 } else {
6389 /* rip-relative generates #ud */
6390 goto illegal_op;
6391 }
20581aad 6392 tcg_gen_not_tl(s->A0, gen_lea_modrm_1(s, a, false));
149b427b 6393 if (!CODE64(s)) {
6b672b5d 6394 tcg_gen_ext32u_tl(s->A0, s->A0);
149b427b 6395 }
6b672b5d 6396 tcg_gen_extu_tl_i64(cpu_bndu[reg], s->A0);
149b427b
RH
6397 /* bnd registers are now in-use */
6398 gen_set_hflag(s, HF_MPX_IU_MASK);
6399 break;
523e28d7
RH
6400 } else if (prefixes & PREFIX_REPNZ) {
6401 /* bndcn */
6402 if (reg >= 4
6403 || (prefixes & PREFIX_LOCK)
6404 || s->aflag == MO_16) {
6405 goto illegal_op;
6406 }
6407 gen_bndck(env, s, modrm, TCG_COND_GTU, cpu_bndu[reg]);
62b58ba5
RH
6408 } else if (prefixes & PREFIX_DATA) {
6409 /* bndmov -- to reg/mem */
6410 if (reg >= 4 || s->aflag == MO_16) {
6411 goto illegal_op;
6412 }
6413 if (mod == 3) {
6414 int reg2 = (modrm & 7) | REX_B(s);
6415 if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
6416 goto illegal_op;
6417 }
6418 if (s->flags & HF_MPX_IU_MASK) {
6419 tcg_gen_mov_i64(cpu_bndl[reg2], cpu_bndl[reg]);
6420 tcg_gen_mov_i64(cpu_bndu[reg2], cpu_bndu[reg]);
6421 }
6422 } else {
6423 gen_lea_modrm(env, s, modrm);
6424 if (CODE64(s)) {
6b672b5d 6425 tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0,
fc313c64 6426 s->mem_index, MO_LEUQ);
6b672b5d
EC
6427 tcg_gen_addi_tl(s->A0, s->A0, 8);
6428 tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0,
fc313c64 6429 s->mem_index, MO_LEUQ);
62b58ba5 6430 } else {
6b672b5d 6431 tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0,
62b58ba5 6432 s->mem_index, MO_LEUL);
6b672b5d
EC
6433 tcg_gen_addi_tl(s->A0, s->A0, 4);
6434 tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0,
62b58ba5
RH
6435 s->mem_index, MO_LEUL);
6436 }
6437 }
bdd87b3b
RH
6438 } else if (mod != 3) {
6439 /* bndstx */
6440 AddressParts a = gen_lea_modrm_0(env, s, modrm);
6441 if (reg >= 4
6442 || (prefixes & PREFIX_LOCK)
6443 || s->aflag == MO_16
6444 || a.base < -1) {
6445 goto illegal_op;
6446 }
6447 if (a.base >= 0) {
6b672b5d 6448 tcg_gen_addi_tl(s->A0, cpu_regs[a.base], a.disp);
bdd87b3b 6449 } else {
6b672b5d 6450 tcg_gen_movi_tl(s->A0, 0);
bdd87b3b 6451 }
6b672b5d 6452 gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override);
bdd87b3b 6453 if (a.index >= 0) {
c66f9727 6454 tcg_gen_mov_tl(s->T0, cpu_regs[a.index]);
bdd87b3b 6455 } else {
c66f9727 6456 tcg_gen_movi_tl(s->T0, 0);
bdd87b3b
RH
6457 }
6458 if (CODE64(s)) {
ad75a51e 6459 gen_helper_bndstx64(tcg_env, s->A0, s->T0,
bdd87b3b
RH
6460 cpu_bndl[reg], cpu_bndu[reg]);
6461 } else {
ad75a51e 6462 gen_helper_bndstx32(tcg_env, s->A0, s->T0,
bdd87b3b
RH
6463 cpu_bndl[reg], cpu_bndu[reg]);
6464 }
149b427b
RH
6465 }
6466 }
6467 gen_nop_modrm(env, s, modrm);
6468 break;
62b58ba5 6469 case 0x119: case 0x11c ... 0x11f: /* nop (multi byte) */
e3af7c78 6470 modrm = x86_ldub_code(env, s);
0af10c86 6471 gen_nop_modrm(env, s, modrm);
e17a36ce 6472 break;
7eff2e7c 6473
2c0262af
FB
6474 case 0x120: /* mov reg, crN */
6475 case 0x122: /* mov crN, reg */
7eff2e7c
RH
6476 if (!check_cpl0(s)) {
6477 break;
6478 }
6479 modrm = x86_ldub_code(env, s);
6480 /*
6481 * Ignore the mod bits (assume (modrm&0xc0)==0xc0).
6482 * AMD documentation (24594.pdf) and testing of Intel 386 and 486
6483 * processors all show that the mod bits are assumed to be 1's,
6484 * regardless of actual values.
6485 */
6486 rm = (modrm & 7) | REX_B(s);
6487 reg = ((modrm >> 3) & 7) | REX_R(s);
6488 switch (reg) {
6489 case 0:
6490 if ((prefixes & PREFIX_LOCK) &&
ccd59d09
AP
6491 (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) {
6492 reg = 8;
6493 }
7eff2e7c
RH
6494 break;
6495 case 2:
6496 case 3:
6497 case 4:
e18a6ec8 6498 case 8:
7eff2e7c
RH
6499 break;
6500 default:
6501 goto unknown_op;
6502 }
6503 ot = (CODE64(s) ? MO_64 : MO_32);
6504
dfd1b812 6505 translator_io_start(&s->base);
7eff2e7c
RH
6506 if (b & 2) {
6507 gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0 + reg);
6508 gen_op_mov_v_reg(s, ot, s->T0, rm);
ad75a51e 6509 gen_helper_write_crN(tcg_env, tcg_constant_i32(reg), s->T0);
634a4051 6510 s->base.is_jmp = DISAS_EOB_NEXT;
7eff2e7c
RH
6511 } else {
6512 gen_svm_check_intercept(s, SVM_EXIT_READ_CR0 + reg);
ad75a51e 6513 gen_helper_read_crN(s->T0, tcg_env, tcg_constant_i32(reg));
7eff2e7c 6514 gen_op_mov_reg_v(s, ot, rm, s->T0);
2c0262af
FB
6515 }
6516 break;
7eff2e7c 6517
2c0262af
FB
6518 case 0x121: /* mov reg, drN */
6519 case 0x123: /* mov drN, reg */
bc19f505 6520 if (check_cpl0(s)) {
e3af7c78 6521 modrm = x86_ldub_code(env, s);
5c73b757
MO
6522 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
6523 * AMD documentation (24594.pdf) and testing of
6524 * intel 386 and 486 processors all show that the mod bits
6525 * are assumed to be 1's, regardless of actual values.
6526 */
14ce26e7 6527 rm = (modrm & 7) | REX_B(s);
bbdb4237 6528 reg = ((modrm >> 3) & 7) | REX_R(s);
14ce26e7 6529 if (CODE64(s))
4ba9938c 6530 ot = MO_64;
14ce26e7 6531 else
4ba9938c 6532 ot = MO_32;
d0052339 6533 if (reg >= 8) {
2c0262af 6534 goto illegal_op;
d0052339 6535 }
2c0262af 6536 if (b & 2) {
b53605db 6537 gen_svm_check_intercept(s, SVM_EXIT_WRITE_DR0 + reg);
1dbe15ef 6538 gen_op_mov_v_reg(s, ot, s->T0, rm);
6bd48f6f 6539 tcg_gen_movi_i32(s->tmp2_i32, reg);
ad75a51e 6540 gen_helper_set_dr(tcg_env, s->tmp2_i32, s->T0);
634a4051 6541 s->base.is_jmp = DISAS_EOB_NEXT;
2c0262af 6542 } else {
b53605db 6543 gen_svm_check_intercept(s, SVM_EXIT_READ_DR0 + reg);
6bd48f6f 6544 tcg_gen_movi_i32(s->tmp2_i32, reg);
ad75a51e 6545 gen_helper_get_dr(s->T0, tcg_env, s->tmp2_i32);
1dbe15ef 6546 gen_op_mov_reg_v(s, ot, rm, s->T0);
2c0262af
FB
6547 }
6548 }
6549 break;
6550 case 0x106: /* clts */
bc19f505 6551 if (check_cpl0(s)) {
b53605db 6552 gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0);
ad75a51e 6553 gen_helper_clts(tcg_env);
7eee2a50 6554 /* abort block because static cpu state changed */
634a4051 6555 s->base.is_jmp = DISAS_EOB_NEXT;
2c0262af
FB
6556 }
6557 break;
222a3336 6558 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
664e0f19
FB
6559 case 0x1c3: /* MOVNTI reg, mem */
6560 if (!(s->cpuid_features & CPUID_SSE2))
14ce26e7 6561 goto illegal_op;
ab4e4aec 6562 ot = mo_64_32(dflag);
e3af7c78 6563 modrm = x86_ldub_code(env, s);
664e0f19
FB
6564 mod = (modrm >> 6) & 3;
6565 if (mod == 3)
6566 goto illegal_op;
bbdb4237 6567 reg = ((modrm >> 3) & 7) | REX_R(s);
664e0f19 6568 /* generate a generic store */
0af10c86 6569 gen_ldst_modrm(env, s, modrm, ot, reg, 1);
14ce26e7 6570 break;
664e0f19 6571 case 0x1ae:
e3af7c78 6572 modrm = x86_ldub_code(env, s);
121f3157 6573 switch (modrm) {
880f8486 6574 CASE_MODRM_MEM_OP(0): /* fxsave */
121f3157
RH
6575 if (!(s->cpuid_features & CPUID_FXSR)
6576 || (prefixes & PREFIX_LOCK)) {
14ce26e7 6577 goto illegal_op;
121f3157 6578 }
09d85fb8 6579 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
52236550 6580 gen_exception(s, EXCP07_PREX);
0fd14b72
FB
6581 break;
6582 }
4eeb3939 6583 gen_lea_modrm(env, s, modrm);
ad75a51e 6584 gen_helper_fxsave(tcg_env, s->A0);
664e0f19 6585 break;
121f3157 6586
880f8486 6587 CASE_MODRM_MEM_OP(1): /* fxrstor */
121f3157
RH
6588 if (!(s->cpuid_features & CPUID_FXSR)
6589 || (prefixes & PREFIX_LOCK)) {
14ce26e7 6590 goto illegal_op;
121f3157 6591 }
09d85fb8 6592 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
52236550 6593 gen_exception(s, EXCP07_PREX);
0fd14b72
FB
6594 break;
6595 }
4eeb3939 6596 gen_lea_modrm(env, s, modrm);
ad75a51e 6597 gen_helper_fxrstor(tcg_env, s->A0);
664e0f19 6598 break;
121f3157 6599
880f8486 6600 CASE_MODRM_MEM_OP(2): /* ldmxcsr */
121f3157
RH
6601 if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) {
6602 goto illegal_op;
6603 }
664e0f19 6604 if (s->flags & HF_TS_MASK) {
52236550 6605 gen_exception(s, EXCP07_PREX);
664e0f19 6606 break;
14ce26e7 6607 }
4eeb3939 6608 gen_lea_modrm(env, s, modrm);
6bd48f6f 6609 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, s->mem_index, MO_LEUL);
ad75a51e 6610 gen_helper_ldmxcsr(tcg_env, s->tmp2_i32);
664e0f19 6611 break;
121f3157 6612
880f8486 6613 CASE_MODRM_MEM_OP(3): /* stmxcsr */
121f3157 6614 if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) {
664e0f19 6615 goto illegal_op;
121f3157
RH
6616 }
6617 if (s->flags & HF_TS_MASK) {
52236550 6618 gen_exception(s, EXCP07_PREX);
121f3157
RH
6619 break;
6620 }
ad75a51e 6621 gen_helper_update_mxcsr(tcg_env);
121f3157 6622 gen_lea_modrm(env, s, modrm);
ad75a51e 6623 tcg_gen_ld32u_tl(s->T0, tcg_env, offsetof(CPUX86State, mxcsr));
c66f9727 6624 gen_op_st_v(s, MO_32, s->T0, s->A0);
664e0f19 6625 break;
121f3157 6626
880f8486 6627 CASE_MODRM_MEM_OP(4): /* xsave */
19dc85db
RH
6628 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
6629 || (prefixes & (PREFIX_LOCK | PREFIX_DATA
6630 | PREFIX_REPZ | PREFIX_REPNZ))) {
6631 goto illegal_op;
6632 }
6633 gen_lea_modrm(env, s, modrm);
776678b2 6634 tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
19dc85db 6635 cpu_regs[R_EDX]);
ad75a51e 6636 gen_helper_xsave(tcg_env, s->A0, s->tmp1_i64);
19dc85db
RH
6637 break;
6638
880f8486 6639 CASE_MODRM_MEM_OP(5): /* xrstor */
19dc85db
RH
6640 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
6641 || (prefixes & (PREFIX_LOCK | PREFIX_DATA
6642 | PREFIX_REPZ | PREFIX_REPNZ))) {
6643 goto illegal_op;
6644 }
6645 gen_lea_modrm(env, s, modrm);
776678b2 6646 tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
19dc85db 6647 cpu_regs[R_EDX]);
ad75a51e 6648 gen_helper_xrstor(tcg_env, s->A0, s->tmp1_i64);
f4f1110e
RH
6649 /* XRSTOR is how MPX is enabled, which changes how
6650 we translate. Thus we need to end the TB. */
634a4051 6651 s->base.is_jmp = DISAS_EOB_NEXT;
19dc85db
RH
6652 break;
6653
880f8486 6654 CASE_MODRM_MEM_OP(6): /* xsaveopt / clwb */
121f3157
RH
6655 if (prefixes & PREFIX_LOCK) {
6656 goto illegal_op;
6657 }
6658 if (prefixes & PREFIX_DATA) {
5e1fac2d 6659 /* clwb */
121f3157 6660 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLWB)) {
5e1fac2d 6661 goto illegal_op;
121f3157 6662 }
5e1fac2d 6663 gen_nop_modrm(env, s, modrm);
c9cfe8f9
RH
6664 } else {
6665 /* xsaveopt */
6666 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
6667 || (s->cpuid_xsave_features & CPUID_XSAVE_XSAVEOPT) == 0
6668 || (prefixes & (PREFIX_REPZ | PREFIX_REPNZ))) {
6669 goto illegal_op;
6670 }
6671 gen_lea_modrm(env, s, modrm);
776678b2 6672 tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
c9cfe8f9 6673 cpu_regs[R_EDX]);
ad75a51e 6674 gen_helper_xsaveopt(tcg_env, s->A0, s->tmp1_i64);
121f3157 6675 }
c9cfe8f9 6676 break;
121f3157 6677
880f8486 6678 CASE_MODRM_MEM_OP(7): /* clflush / clflushopt */
121f3157
RH
6679 if (prefixes & PREFIX_LOCK) {
6680 goto illegal_op;
6681 }
6682 if (prefixes & PREFIX_DATA) {
6683 /* clflushopt */
6684 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLFLUSHOPT)) {
6685 goto illegal_op;
6686 }
5e1fac2d 6687 } else {
121f3157
RH
6688 /* clflush */
6689 if ((s->prefix & (PREFIX_REPZ | PREFIX_REPNZ))
6690 || !(s->cpuid_features & CPUID_CLFLUSH)) {
5e1fac2d 6691 goto illegal_op;
121f3157 6692 }
5e1fac2d 6693 }
121f3157 6694 gen_nop_modrm(env, s, modrm);
5e1fac2d 6695 break;
121f3157 6696
07929f2a 6697 case 0xc0 ... 0xc7: /* rdfsbase (f3 0f ae /0) */
e0dd5fd4 6698 case 0xc8 ... 0xcf: /* rdgsbase (f3 0f ae /1) */
07929f2a 6699 case 0xd0 ... 0xd7: /* wrfsbase (f3 0f ae /2) */
e0dd5fd4 6700 case 0xd8 ... 0xdf: /* wrgsbase (f3 0f ae /3) */
07929f2a
RH
6701 if (CODE64(s)
6702 && (prefixes & PREFIX_REPZ)
6703 && !(prefixes & PREFIX_LOCK)
6704 && (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_FSGSBASE)) {
6705 TCGv base, treg, src, dst;
6706
6707 /* Preserve hflags bits by testing CR4 at runtime. */
6bd48f6f 6708 tcg_gen_movi_i32(s->tmp2_i32, CR4_FSGSBASE_MASK);
ad75a51e 6709 gen_helper_cr4_testbit(tcg_env, s->tmp2_i32);
07929f2a
RH
6710
6711 base = cpu_seg_base[modrm & 8 ? R_GS : R_FS];
6712 treg = cpu_regs[(modrm & 7) | REX_B(s)];
6713
6714 if (modrm & 0x10) {
6715 /* wr*base */
6716 dst = base, src = treg;
6717 } else {
6718 /* rd*base */
6719 dst = treg, src = base;
6720 }
6721
6722 if (s->dflag == MO_32) {
6723 tcg_gen_ext32u_tl(dst, src);
6724 } else {
6725 tcg_gen_mov_tl(dst, src);
6726 }
6727 break;
6728 }
b9f9c5b4 6729 goto unknown_op;
07929f2a 6730
121f3157
RH
6731 case 0xf8: /* sfence / pcommit */
6732 if (prefixes & PREFIX_DATA) {
6733 /* pcommit */
6734 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_PCOMMIT)
6735 || (prefixes & PREFIX_LOCK)) {
6736 goto illegal_op;
891bc821 6737 }
121f3157
RH
6738 break;
6739 }
6740 /* fallthru */
6741 case 0xf9 ... 0xff: /* sfence */
14cb949a
PB
6742 if (!(s->cpuid_features & CPUID_SSE)
6743 || (prefixes & PREFIX_LOCK)) {
6744 goto illegal_op;
6745 }
cc19e497 6746 tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
14cb949a 6747 break;
121f3157 6748 case 0xe8 ... 0xef: /* lfence */
cc19e497
PK
6749 if (!(s->cpuid_features & CPUID_SSE)
6750 || (prefixes & PREFIX_LOCK)) {
6751 goto illegal_op;
6752 }
6753 tcg_gen_mb(TCG_MO_LD_LD | TCG_BAR_SC);
6754 break;
121f3157
RH
6755 case 0xf0 ... 0xf7: /* mfence */
6756 if (!(s->cpuid_features & CPUID_SSE2)
6757 || (prefixes & PREFIX_LOCK)) {
6758 goto illegal_op;
8f091a59 6759 }
cc19e497 6760 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
8f091a59 6761 break;
121f3157 6762
664e0f19 6763 default:
b9f9c5b4 6764 goto unknown_op;
14ce26e7
FB
6765 }
6766 break;
121f3157 6767
a35f3ec7 6768 case 0x10d: /* 3DNow! prefetch(w) */
e3af7c78 6769 modrm = x86_ldub_code(env, s);
a35f3ec7
AJ
6770 mod = (modrm >> 6) & 3;
6771 if (mod == 3)
6772 goto illegal_op;
26317698 6773 gen_nop_modrm(env, s, modrm);
8f091a59 6774 break;
3b21e03e 6775 case 0x1aa: /* rsm */
b53605db 6776 gen_svm_check_intercept(s, SVM_EXIT_RSM);
3b21e03e
FB
6777 if (!(s->flags & HF_SMM_MASK))
6778 goto illegal_op;
a93b55ec
CF
6779#ifdef CONFIG_USER_ONLY
6780 /* we should not be in SMM mode */
6781 g_assert_not_reached();
6782#else
728d803b 6783 gen_update_cc_op(s);
09e99df4 6784 gen_update_eip_next(s);
ad75a51e 6785 gen_helper_rsm(tcg_env);
a93b55ec 6786#endif /* CONFIG_USER_ONLY */
6424ac8e 6787 s->base.is_jmp = DISAS_EOB_ONLY;
3b21e03e 6788 break;
222a3336
AZ
6789 case 0x1b8: /* SSE4.2 popcnt */
6790 if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) !=
6791 PREFIX_REPZ)
6792 goto illegal_op;
6793 if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
6794 goto illegal_op;
6795
e3af7c78 6796 modrm = x86_ldub_code(env, s);
bbdb4237 6797 reg = ((modrm >> 3) & 7) | REX_R(s);
222a3336 6798
ab4e4aec 6799 if (s->prefix & PREFIX_DATA) {
4ba9938c 6800 ot = MO_16;
ab4e4aec
RH
6801 } else {
6802 ot = mo_64_32(dflag);
6803 }
222a3336 6804
0af10c86 6805 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
c66f9727
EC
6806 gen_extu(ot, s->T0);
6807 tcg_gen_mov_tl(cpu_cc_src, s->T0);
6808 tcg_gen_ctpop_tl(s->T0, s->T0);
1dbe15ef 6809 gen_op_mov_reg_v(s, ot, reg, s->T0);
fdb0d09d 6810
4885c3c4 6811 set_cc_op(s, CC_OP_POPCNT);
222a3336 6812 break;
653fad24 6813 case 0x10e ... 0x117:
664e0f19 6814 case 0x128 ... 0x12f:
4242b1bd 6815 case 0x138 ... 0x13a:
d9f4bb27 6816 case 0x150 ... 0x179:
664e0f19
FB
6817 case 0x17c ... 0x17f:
6818 case 0x1c2:
6819 case 0x1c4 ... 0x1c6:
6820 case 0x1d0 ... 0x1fe:
653fad24 6821 disas_insn_new(s, cpu, b);
664e0f19 6822 break;
2c0262af 6823 default:
b9f9c5b4 6824 goto unknown_op;
2c0262af 6825 }
f66c8e8c 6826 return true;
2c0262af 6827 illegal_op:
b9f9c5b4 6828 gen_illegal_opcode(s);
f66c8e8c 6829 return true;
b9f9c5b4 6830 unknown_op:
b9f9c5b4 6831 gen_unknown_opcode(env, s);
f66c8e8c 6832 return true;
2c0262af
FB
6833}
6834
63618b4e 6835void tcg_x86_init(void)
2c0262af 6836{
fac0aff9
RH
6837 static const char reg_names[CPU_NB_REGS][4] = {
6838#ifdef TARGET_X86_64
6839 [R_EAX] = "rax",
6840 [R_EBX] = "rbx",
6841 [R_ECX] = "rcx",
6842 [R_EDX] = "rdx",
6843 [R_ESI] = "rsi",
6844 [R_EDI] = "rdi",
6845 [R_EBP] = "rbp",
6846 [R_ESP] = "rsp",
6847 [8] = "r8",
6848 [9] = "r9",
6849 [10] = "r10",
6850 [11] = "r11",
6851 [12] = "r12",
6852 [13] = "r13",
6853 [14] = "r14",
6854 [15] = "r15",
6855#else
6856 [R_EAX] = "eax",
6857 [R_EBX] = "ebx",
6858 [R_ECX] = "ecx",
6859 [R_EDX] = "edx",
6860 [R_ESI] = "esi",
6861 [R_EDI] = "edi",
6862 [R_EBP] = "ebp",
6863 [R_ESP] = "esp",
f771ca6a
RH
6864#endif
6865 };
6866 static const char eip_name[] = {
6867#ifdef TARGET_X86_64
6868 "rip"
6869#else
6870 "eip"
fac0aff9
RH
6871#endif
6872 };
3558f805
RH
6873 static const char seg_base_names[6][8] = {
6874 [R_CS] = "cs_base",
6875 [R_DS] = "ds_base",
6876 [R_ES] = "es_base",
6877 [R_FS] = "fs_base",
6878 [R_GS] = "gs_base",
6879 [R_SS] = "ss_base",
6880 };
149b427b
RH
6881 static const char bnd_regl_names[4][8] = {
6882 "bnd0_lb", "bnd1_lb", "bnd2_lb", "bnd3_lb"
6883 };
6884 static const char bnd_regu_names[4][8] = {
6885 "bnd0_ub", "bnd1_ub", "bnd2_ub", "bnd3_ub"
6886 };
fac0aff9
RH
6887 int i;
6888
ad75a51e 6889 cpu_cc_op = tcg_global_mem_new_i32(tcg_env,
317ac620 6890 offsetof(CPUX86State, cc_op), "cc_op");
ad75a51e 6891 cpu_cc_dst = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_dst),
a7812ae4 6892 "cc_dst");
ad75a51e 6893 cpu_cc_src = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_src),
a3251186 6894 "cc_src");
ad75a51e 6895 cpu_cc_src2 = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_src2),
988c3eb0 6896 "cc_src2");
ad75a51e 6897 cpu_eip = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, eip), eip_name);
437a88a5 6898
fac0aff9 6899 for (i = 0; i < CPU_NB_REGS; ++i) {
ad75a51e 6900 cpu_regs[i] = tcg_global_mem_new(tcg_env,
fac0aff9
RH
6901 offsetof(CPUX86State, regs[i]),
6902 reg_names[i]);
6903 }
677ef623 6904
3558f805
RH
6905 for (i = 0; i < 6; ++i) {
6906 cpu_seg_base[i]
ad75a51e 6907 = tcg_global_mem_new(tcg_env,
3558f805
RH
6908 offsetof(CPUX86State, segs[i].base),
6909 seg_base_names[i]);
6910 }
6911
149b427b
RH
6912 for (i = 0; i < 4; ++i) {
6913 cpu_bndl[i]
ad75a51e 6914 = tcg_global_mem_new_i64(tcg_env,
149b427b
RH
6915 offsetof(CPUX86State, bnd_regs[i].lb),
6916 bnd_regl_names[i]);
6917 cpu_bndu[i]
ad75a51e 6918 = tcg_global_mem_new_i64(tcg_env,
149b427b
RH
6919 offsetof(CPUX86State, bnd_regs[i].ub),
6920 bnd_regu_names[i]);
6921 }
2c0262af
FB
6922}
6923
b542683d 6924static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
2c0262af 6925{
9761d39b 6926 DisasContext *dc = container_of(dcbase, DisasContext, base);
b77af26e 6927 CPUX86State *env = cpu_env(cpu);
9761d39b 6928 uint32_t flags = dc->base.tb->flags;
9ef6c6ec 6929 uint32_t cflags = tb_cflags(dc->base.tb);
01b9d8c1 6930 int cpl = (flags >> HF_CPL_SHIFT) & 3;
0ab011cc 6931 int iopl = (flags >> IOPL_SHIFT) & 3;
3a1d9b8b 6932
d75f9129 6933 dc->cs_base = dc->base.tb->cs_base;
e3a79e0e 6934 dc->pc_save = dc->base.pc_next;
d75f9129 6935 dc->flags = flags;
01b9d8c1
RH
6936#ifndef CONFIG_USER_ONLY
6937 dc->cpl = cpl;
0ab011cc 6938 dc->iopl = iopl;
01b9d8c1 6939#endif
d75f9129
RH
6940
6941 /* We make some simplifying assumptions; validate they're correct. */
6942 g_assert(PE(dc) == ((flags & HF_PE_MASK) != 0));
01b9d8c1 6943 g_assert(CPL(dc) == cpl);
0ab011cc 6944 g_assert(IOPL(dc) == iopl);
f8a35846 6945 g_assert(VM86(dc) == ((flags & HF_VM_MASK) != 0));
9996dcfd 6946 g_assert(CODE32(dc) == ((flags & HF_CS32_MASK) != 0));
eec7d0f8 6947 g_assert(CODE64(dc) == ((flags & HF_CS64_MASK) != 0));
b40a47a1 6948 g_assert(SS32(dc) == ((flags & HF_SS32_MASK) != 0));
73e90dc4 6949 g_assert(LMA(dc) == ((flags & HF_LMA_MASK) != 0));
beedb93c 6950 g_assert(ADDSEG(dc) == ((flags & HF_ADDSEG_MASK) != 0));
5d223889 6951 g_assert(SVME(dc) == ((flags & HF_SVME_MASK) != 0));
b322b3af 6952 g_assert(GUEST(dc) == ((flags & HF_GUEST_MASK) != 0));
d75f9129 6953
2c0262af 6954 dc->cc_op = CC_OP_DYNAMIC;
e207582f 6955 dc->cc_op_dirty = false;
2c0262af
FB
6956 dc->popl_esp_hack = 0;
6957 /* select memory access functions */
da6d48e3 6958 dc->mem_index = cpu_mmu_index(env, false);
0514ef2f
EH
6959 dc->cpuid_features = env->features[FEAT_1_EDX];
6960 dc->cpuid_ext_features = env->features[FEAT_1_ECX];
6961 dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX];
6962 dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX];
6963 dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX];
268dc464 6964 dc->cpuid_7_0_ecx_features = env->features[FEAT_7_0_ECX];
405c7c07 6965 dc->cpuid_7_1_eax_features = env->features[FEAT_7_1_EAX];
c9cfe8f9 6966 dc->cpuid_xsave_features = env->features[FEAT_XSAVE];
9ef6c6ec 6967 dc->jmp_opt = !((cflags & CF_NO_GOTO_TB) ||
c1de1a1a 6968 (flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)));
3236c2ad
RH
6969 /*
6970 * If jmp_opt, we want to handle each string instruction individually.
6971 * For icount also disable repz optimization so that each iteration
6972 * is accounted separately.
c4d4525c 6973 */
9ef6c6ec 6974 dc->repz_opt = !dc->jmp_opt && !(cflags & CF_USE_ICOUNT);
4f31916f 6975
c66f9727 6976 dc->T0 = tcg_temp_new();
b48597b0 6977 dc->T1 = tcg_temp_new();
6b672b5d 6978 dc->A0 = tcg_temp_new();
a7812ae4 6979
fbd80f02 6980 dc->tmp0 = tcg_temp_new();
776678b2 6981 dc->tmp1_i64 = tcg_temp_new_i64();
6bd48f6f 6982 dc->tmp2_i32 = tcg_temp_new_i32();
4f82446d 6983 dc->tmp3_i32 = tcg_temp_new_i32();
5022f28f 6984 dc->tmp4 = tcg_temp_new();
3a5d1773 6985 dc->cc_srcT = tcg_temp_new();
9761d39b
LV
6986}
6987
d2e6eedf
LV
6988static void i386_tr_tb_start(DisasContextBase *db, CPUState *cpu)
6989{
6990}
6991
9d75f52b
LV
6992static void i386_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
6993{
6994 DisasContext *dc = container_of(dcbase, DisasContext, base);
e3a79e0e 6995 target_ulong pc_arg = dc->base.pc_next;
9d75f52b 6996
95093668 6997 dc->prev_insn_end = tcg_last_op();
2e3afe8e 6998 if (tb_cflags(dcbase->tb) & CF_PCREL) {
e3a79e0e
RH
6999 pc_arg -= dc->cs_base;
7000 pc_arg &= ~TARGET_PAGE_MASK;
7001 }
7002 tcg_gen_insn_start(pc_arg, dc->cc_op);
9d75f52b
LV
7003}
7004
2c2f8cac
LV
7005static void i386_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
7006{
7007 DisasContext *dc = container_of(dcbase, DisasContext, base);
b26491b4
RH
7008
7009#ifdef TARGET_VSYSCALL_PAGE
7010 /*
7011 * Detect entry into the vsyscall page and invoke the syscall.
7012 */
7013 if ((dc->base.pc_next & TARGET_PAGE_MASK) == TARGET_VSYSCALL_PAGE) {
52236550 7014 gen_exception(dc, EXCP_VSYSCALL);
9b21049e 7015 dc->base.pc_next = dc->pc + 1;
b26491b4
RH
7016 return;
7017 }
7018#endif
7019
f66c8e8c
RH
7020 if (disas_insn(dc, cpu)) {
7021 target_ulong pc_next = dc->pc;
7022 dc->base.pc_next = pc_next;
7023
7024 if (dc->base.is_jmp == DISAS_NEXT) {
7025 if (dc->flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)) {
7026 /*
7027 * If single step mode, we generate only one instruction and
7028 * generate an exception.
7029 * If irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
7030 * the flag and abort the translation to give the irqs a
7031 * chance to happen.
7032 */
200ef603 7033 dc->base.is_jmp = DISAS_EOB_NEXT;
f66c8e8c
RH
7034 } else if (!is_same_page(&dc->base, pc_next)) {
7035 dc->base.is_jmp = DISAS_TOO_MANY;
7036 }
95093668 7037 }
2c2f8cac 7038 }
2c2f8cac
LV
7039}
7040
47e981b4
LV
7041static void i386_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
7042{
7043 DisasContext *dc = container_of(dcbase, DisasContext, base);
7044
200ef603
RH
7045 switch (dc->base.is_jmp) {
7046 case DISAS_NORETURN:
7047 break;
7048 case DISAS_TOO_MANY:
5f7ec6ef
RH
7049 gen_update_cc_op(dc);
7050 gen_jmp_rel_csize(dc, 0, 0);
7051 break;
200ef603
RH
7052 case DISAS_EOB_NEXT:
7053 gen_update_cc_op(dc);
65e4af23 7054 gen_update_eip_cur(dc);
200ef603
RH
7055 /* fall through */
7056 case DISAS_EOB_ONLY:
47e981b4 7057 gen_eob(dc);
200ef603
RH
7058 break;
7059 case DISAS_EOB_INHIBIT_IRQ:
7060 gen_update_cc_op(dc);
7061 gen_update_eip_cur(dc);
7062 gen_eob_inhibit_irq(dc, true);
7063 break;
faf9ea5f
RH
7064 case DISAS_JUMP:
7065 gen_jr(dc);
7066 break;
200ef603
RH
7067 default:
7068 g_assert_not_reached();
47e981b4
LV
7069 }
7070}
7071
e0d110d9 7072static void i386_tr_disas_log(const DisasContextBase *dcbase,
8eb806a7 7073 CPUState *cpu, FILE *logfile)
e0d110d9
LV
7074{
7075 DisasContext *dc = container_of(dcbase, DisasContext, base);
e0d110d9 7076
8eb806a7
RH
7077 fprintf(logfile, "IN: %s\n", lookup_symbol(dc->base.pc_first));
7078 target_disas(logfile, cpu, dc->base.pc_first, dc->base.tb->size);
e0d110d9
LV
7079}
7080
d2e6eedf
LV
7081static const TranslatorOps i386_tr_ops = {
7082 .init_disas_context = i386_tr_init_disas_context,
7083 .tb_start = i386_tr_tb_start,
7084 .insn_start = i386_tr_insn_start,
d2e6eedf
LV
7085 .translate_insn = i386_tr_translate_insn,
7086 .tb_stop = i386_tr_tb_stop,
7087 .disas_log = i386_tr_disas_log,
7088};
0a7df5da 7089
d2e6eedf 7090/* generate intermediate code for basic block 'tb'. */
597f9b2d 7091void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
306c8721 7092 target_ulong pc, void *host_pc)
d2e6eedf
LV
7093{
7094 DisasContext dc;
e0d110d9 7095
306c8721 7096 translator_loop(cpu, tb, max_insns, pc, host_pc, &i386_tr_ops, &dc.base);
2c0262af 7097}