]> git.proxmox.com Git - mirror_qemu.git/blob - target-i386/translate.c
target-i386: Remove gen_op_mov*_A0_im
[mirror_qemu.git] / target-i386 / translate.c
1 /*
2 * i386 translation
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include <stdarg.h>
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <string.h>
23 #include <inttypes.h>
24 #include <signal.h>
25
26 #include "qemu/host-utils.h"
27 #include "cpu.h"
28 #include "disas/disas.h"
29 #include "tcg-op.h"
30
31 #include "helper.h"
32 #define GEN_HELPER 1
33 #include "helper.h"
34
35 #define PREFIX_REPZ 0x01
36 #define PREFIX_REPNZ 0x02
37 #define PREFIX_LOCK 0x04
38 #define PREFIX_DATA 0x08
39 #define PREFIX_ADR 0x10
40 #define PREFIX_VEX 0x20
41
42 #ifdef TARGET_X86_64
43 #define CODE64(s) ((s)->code64)
44 #define REX_X(s) ((s)->rex_x)
45 #define REX_B(s) ((s)->rex_b)
46 #else
47 #define CODE64(s) 0
48 #define REX_X(s) 0
49 #define REX_B(s) 0
50 #endif
51
52 #ifdef TARGET_X86_64
53 # define ctztl ctz64
54 # define clztl clz64
55 #else
56 # define ctztl ctz32
57 # define clztl clz32
58 #endif
59
60 //#define MACRO_TEST 1
61
62 /* global register indexes */
63 static TCGv_ptr cpu_env;
64 static TCGv cpu_A0;
65 static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2, cpu_cc_srcT;
66 static TCGv_i32 cpu_cc_op;
67 static TCGv cpu_regs[CPU_NB_REGS];
68 /* local temps */
69 static TCGv cpu_T[2];
70 /* local register indexes (only used inside old micro ops) */
71 static TCGv cpu_tmp0, cpu_tmp4;
72 static TCGv_ptr cpu_ptr0, cpu_ptr1;
73 static TCGv_i32 cpu_tmp2_i32, cpu_tmp3_i32;
74 static TCGv_i64 cpu_tmp1_i64;
75
76 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
77
78 #include "exec/gen-icount.h"
79
80 #ifdef TARGET_X86_64
81 static int x86_64_hregs;
82 #endif
83
84 typedef struct DisasContext {
85 /* current insn context */
86 int override; /* -1 if no override */
87 int prefix;
88 int aflag, dflag;
89 target_ulong pc; /* pc = eip + cs_base */
90 int is_jmp; /* 1 = means jump (stop translation), 2 means CPU
91 static state change (stop translation) */
92 /* current block context */
93 target_ulong cs_base; /* base of CS segment */
94 int pe; /* protected mode */
95 int code32; /* 32 bit code segment */
96 #ifdef TARGET_X86_64
97 int lma; /* long mode active */
98 int code64; /* 64 bit code segment */
99 int rex_x, rex_b;
100 #endif
101 int vex_l; /* vex vector length */
102 int vex_v; /* vex vvvv register, without 1's compliment. */
103 int ss32; /* 32 bit stack segment */
104 CCOp cc_op; /* current CC operation */
105 bool cc_op_dirty;
106 int addseg; /* non zero if either DS/ES/SS have a non zero base */
107 int f_st; /* currently unused */
108 int vm86; /* vm86 mode */
109 int cpl;
110 int iopl;
111 int tf; /* TF cpu flag */
112 int singlestep_enabled; /* "hardware" single step enabled */
113 int jmp_opt; /* use direct block chaining for direct jumps */
114 int mem_index; /* select memory access functions */
115 uint64_t flags; /* all execution flags */
116 struct TranslationBlock *tb;
117 int popl_esp_hack; /* for correct popl with esp base handling */
118 int rip_offset; /* only used in x86_64, but left for simplicity */
119 int cpuid_features;
120 int cpuid_ext_features;
121 int cpuid_ext2_features;
122 int cpuid_ext3_features;
123 int cpuid_7_0_ebx_features;
124 } DisasContext;
125
126 static void gen_eob(DisasContext *s);
127 static void gen_jmp(DisasContext *s, target_ulong eip);
128 static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num);
129 static void gen_op(DisasContext *s1, int op, int ot, int d);
130
131 /* i386 arith/logic operations */
132 enum {
133 OP_ADDL,
134 OP_ORL,
135 OP_ADCL,
136 OP_SBBL,
137 OP_ANDL,
138 OP_SUBL,
139 OP_XORL,
140 OP_CMPL,
141 };
142
143 /* i386 shift ops */
144 enum {
145 OP_ROL,
146 OP_ROR,
147 OP_RCL,
148 OP_RCR,
149 OP_SHL,
150 OP_SHR,
151 OP_SHL1, /* undocumented */
152 OP_SAR = 7,
153 };
154
155 enum {
156 JCC_O,
157 JCC_B,
158 JCC_Z,
159 JCC_BE,
160 JCC_S,
161 JCC_P,
162 JCC_L,
163 JCC_LE,
164 };
165
166 enum {
167 /* I386 int registers */
168 OR_EAX, /* MUST be even numbered */
169 OR_ECX,
170 OR_EDX,
171 OR_EBX,
172 OR_ESP,
173 OR_EBP,
174 OR_ESI,
175 OR_EDI,
176
177 OR_TMP0 = 16, /* temporary operand register */
178 OR_TMP1,
179 OR_A0, /* temporary register used when doing address evaluation */
180 };
181
182 enum {
183 USES_CC_DST = 1,
184 USES_CC_SRC = 2,
185 USES_CC_SRC2 = 4,
186 USES_CC_SRCT = 8,
187 };
188
189 /* Bit set if the global variable is live after setting CC_OP to X. */
190 static const uint8_t cc_op_live[CC_OP_NB] = {
191 [CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
192 [CC_OP_EFLAGS] = USES_CC_SRC,
193 [CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC,
194 [CC_OP_ADDB ... CC_OP_ADDQ] = USES_CC_DST | USES_CC_SRC,
195 [CC_OP_ADCB ... CC_OP_ADCQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
196 [CC_OP_SUBB ... CC_OP_SUBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT,
197 [CC_OP_SBBB ... CC_OP_SBBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
198 [CC_OP_LOGICB ... CC_OP_LOGICQ] = USES_CC_DST,
199 [CC_OP_INCB ... CC_OP_INCQ] = USES_CC_DST | USES_CC_SRC,
200 [CC_OP_DECB ... CC_OP_DECQ] = USES_CC_DST | USES_CC_SRC,
201 [CC_OP_SHLB ... CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC,
202 [CC_OP_SARB ... CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC,
203 [CC_OP_BMILGB ... CC_OP_BMILGQ] = USES_CC_DST | USES_CC_SRC,
204 [CC_OP_ADCX] = USES_CC_DST | USES_CC_SRC,
205 [CC_OP_ADOX] = USES_CC_SRC | USES_CC_SRC2,
206 [CC_OP_ADCOX] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
207 [CC_OP_CLR] = 0,
208 };
209
210 static void set_cc_op(DisasContext *s, CCOp op)
211 {
212 int dead;
213
214 if (s->cc_op == op) {
215 return;
216 }
217
218 /* Discard CC computation that will no longer be used. */
219 dead = cc_op_live[s->cc_op] & ~cc_op_live[op];
220 if (dead & USES_CC_DST) {
221 tcg_gen_discard_tl(cpu_cc_dst);
222 }
223 if (dead & USES_CC_SRC) {
224 tcg_gen_discard_tl(cpu_cc_src);
225 }
226 if (dead & USES_CC_SRC2) {
227 tcg_gen_discard_tl(cpu_cc_src2);
228 }
229 if (dead & USES_CC_SRCT) {
230 tcg_gen_discard_tl(cpu_cc_srcT);
231 }
232
233 if (op == CC_OP_DYNAMIC) {
234 /* The DYNAMIC setting is translator only, and should never be
235 stored. Thus we always consider it clean. */
236 s->cc_op_dirty = false;
237 } else {
238 /* Discard any computed CC_OP value (see shifts). */
239 if (s->cc_op == CC_OP_DYNAMIC) {
240 tcg_gen_discard_i32(cpu_cc_op);
241 }
242 s->cc_op_dirty = true;
243 }
244 s->cc_op = op;
245 }
246
247 static void gen_update_cc_op(DisasContext *s)
248 {
249 if (s->cc_op_dirty) {
250 tcg_gen_movi_i32(cpu_cc_op, s->cc_op);
251 s->cc_op_dirty = false;
252 }
253 }
254
255 static inline void gen_movtl_T0_im(target_ulong val)
256 {
257 tcg_gen_movi_tl(cpu_T[0], val);
258 }
259
260 static inline void gen_movtl_T1_im(target_ulong val)
261 {
262 tcg_gen_movi_tl(cpu_T[1], val);
263 }
264
265 static inline void gen_op_andl_T0_ffff(void)
266 {
267 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
268 }
269
270 static inline void gen_op_andl_T0_im(uint32_t val)
271 {
272 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], val);
273 }
274
275 static inline void gen_op_movl_T0_T1(void)
276 {
277 tcg_gen_mov_tl(cpu_T[0], cpu_T[1]);
278 }
279
280 static inline void gen_op_andl_A0_ffff(void)
281 {
282 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffff);
283 }
284
285 #ifdef TARGET_X86_64
286
287 #define NB_OP_SIZES 4
288
289 #else /* !TARGET_X86_64 */
290
291 #define NB_OP_SIZES 3
292
293 #endif /* !TARGET_X86_64 */
294
295 #if defined(HOST_WORDS_BIGENDIAN)
296 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
297 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
298 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
299 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
300 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
301 #else
302 #define REG_B_OFFSET 0
303 #define REG_H_OFFSET 1
304 #define REG_W_OFFSET 0
305 #define REG_L_OFFSET 0
306 #define REG_LH_OFFSET 4
307 #endif
308
309 /* In instruction encodings for byte register accesses the
310 * register number usually indicates "low 8 bits of register N";
311 * however there are some special cases where N 4..7 indicates
312 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
313 * true for this special case, false otherwise.
314 */
315 static inline bool byte_reg_is_xH(int reg)
316 {
317 if (reg < 4) {
318 return false;
319 }
320 #ifdef TARGET_X86_64
321 if (reg >= 8 || x86_64_hregs) {
322 return false;
323 }
324 #endif
325 return true;
326 }
327
328 static inline void gen_op_mov_reg_v(int ot, int reg, TCGv t0)
329 {
330 switch(ot) {
331 case MO_8:
332 if (!byte_reg_is_xH(reg)) {
333 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 8);
334 } else {
335 tcg_gen_deposit_tl(cpu_regs[reg - 4], cpu_regs[reg - 4], t0, 8, 8);
336 }
337 break;
338 case MO_16:
339 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 16);
340 break;
341 default: /* XXX this shouldn't be reached; abort? */
342 case MO_32:
343 /* For x86_64, this sets the higher half of register to zero.
344 For i386, this is equivalent to a mov. */
345 tcg_gen_ext32u_tl(cpu_regs[reg], t0);
346 break;
347 #ifdef TARGET_X86_64
348 case MO_64:
349 tcg_gen_mov_tl(cpu_regs[reg], t0);
350 break;
351 #endif
352 }
353 }
354
355 static inline void gen_op_mov_reg_T0(int ot, int reg)
356 {
357 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
358 }
359
360 static inline void gen_op_mov_reg_T1(int ot, int reg)
361 {
362 gen_op_mov_reg_v(ot, reg, cpu_T[1]);
363 }
364
365 static inline void gen_op_mov_reg_A0(int size, int reg)
366 {
367 switch(size) {
368 case MO_8:
369 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], cpu_A0, 0, 16);
370 break;
371 default: /* XXX this shouldn't be reached; abort? */
372 case MO_16:
373 /* For x86_64, this sets the higher half of register to zero.
374 For i386, this is equivalent to a mov. */
375 tcg_gen_ext32u_tl(cpu_regs[reg], cpu_A0);
376 break;
377 #ifdef TARGET_X86_64
378 case MO_32:
379 tcg_gen_mov_tl(cpu_regs[reg], cpu_A0);
380 break;
381 #endif
382 }
383 }
384
385 static inline void gen_op_mov_v_reg(int ot, TCGv t0, int reg)
386 {
387 if (ot == MO_8 && byte_reg_is_xH(reg)) {
388 tcg_gen_shri_tl(t0, cpu_regs[reg - 4], 8);
389 tcg_gen_ext8u_tl(t0, t0);
390 } else {
391 tcg_gen_mov_tl(t0, cpu_regs[reg]);
392 }
393 }
394
395 static inline void gen_op_mov_TN_reg(int ot, int t_index, int reg)
396 {
397 gen_op_mov_v_reg(ot, cpu_T[t_index], reg);
398 }
399
400 static inline void gen_op_movl_A0_reg(int reg)
401 {
402 tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]);
403 }
404
405 static inline void gen_op_addl_A0_im(int32_t val)
406 {
407 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
408 #ifdef TARGET_X86_64
409 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
410 #endif
411 }
412
413 #ifdef TARGET_X86_64
414 static inline void gen_op_addq_A0_im(int64_t val)
415 {
416 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
417 }
418 #endif
419
420 static void gen_add_A0_im(DisasContext *s, int val)
421 {
422 #ifdef TARGET_X86_64
423 if (CODE64(s))
424 gen_op_addq_A0_im(val);
425 else
426 #endif
427 gen_op_addl_A0_im(val);
428 }
429
430 static inline void gen_op_addl_T0_T1(void)
431 {
432 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
433 }
434
435 static inline void gen_op_jmp_T0(void)
436 {
437 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, eip));
438 }
439
440 static inline void gen_op_add_reg_im(int size, int reg, int32_t val)
441 {
442 switch(size) {
443 case MO_8:
444 tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val);
445 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], cpu_tmp0, 0, 16);
446 break;
447 case MO_16:
448 tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val);
449 /* For x86_64, this sets the higher half of register to zero.
450 For i386, this is equivalent to a nop. */
451 tcg_gen_ext32u_tl(cpu_tmp0, cpu_tmp0);
452 tcg_gen_mov_tl(cpu_regs[reg], cpu_tmp0);
453 break;
454 #ifdef TARGET_X86_64
455 case MO_32:
456 tcg_gen_addi_tl(cpu_regs[reg], cpu_regs[reg], val);
457 break;
458 #endif
459 }
460 }
461
462 static inline void gen_op_add_reg_T0(int size, int reg)
463 {
464 switch(size) {
465 case MO_8:
466 tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T[0]);
467 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], cpu_tmp0, 0, 16);
468 break;
469 case MO_16:
470 tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T[0]);
471 /* For x86_64, this sets the higher half of register to zero.
472 For i386, this is equivalent to a nop. */
473 tcg_gen_ext32u_tl(cpu_tmp0, cpu_tmp0);
474 tcg_gen_mov_tl(cpu_regs[reg], cpu_tmp0);
475 break;
476 #ifdef TARGET_X86_64
477 case MO_32:
478 tcg_gen_add_tl(cpu_regs[reg], cpu_regs[reg], cpu_T[0]);
479 break;
480 #endif
481 }
482 }
483
484 static inline void gen_op_addl_A0_reg_sN(int shift, int reg)
485 {
486 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]);
487 if (shift != 0)
488 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
489 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
490 /* For x86_64, this sets the higher half of register to zero.
491 For i386, this is equivalent to a nop. */
492 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
493 }
494
495 static inline void gen_op_movl_A0_seg(int reg)
496 {
497 tcg_gen_ld32u_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base) + REG_L_OFFSET);
498 }
499
500 static inline void gen_op_addl_A0_seg(DisasContext *s, int reg)
501 {
502 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base));
503 #ifdef TARGET_X86_64
504 if (CODE64(s)) {
505 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
506 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
507 } else {
508 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
509 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
510 }
511 #else
512 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
513 #endif
514 }
515
516 #ifdef TARGET_X86_64
517 static inline void gen_op_movq_A0_seg(int reg)
518 {
519 tcg_gen_ld_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base));
520 }
521
522 static inline void gen_op_addq_A0_seg(int reg)
523 {
524 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base));
525 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
526 }
527
528 static inline void gen_op_movq_A0_reg(int reg)
529 {
530 tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]);
531 }
532
533 static inline void gen_op_addq_A0_reg_sN(int shift, int reg)
534 {
535 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]);
536 if (shift != 0)
537 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
538 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
539 }
540 #endif
541
542 static inline void gen_op_ld_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
543 {
544 tcg_gen_qemu_ld_tl(t0, a0, s->mem_index, idx | MO_LE);
545 }
546
547 static inline void gen_op_st_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
548 {
549 tcg_gen_qemu_st_tl(t0, a0, s->mem_index, idx | MO_LE);
550 }
551
552 static inline void gen_op_st_rm_T0_A0(DisasContext *s, int idx, int d)
553 {
554 if (d == OR_TMP0) {
555 gen_op_st_v(s, idx, cpu_T[0], cpu_A0);
556 } else {
557 gen_op_mov_reg_T0(idx, d);
558 }
559 }
560
561 static inline void gen_jmp_im(target_ulong pc)
562 {
563 tcg_gen_movi_tl(cpu_tmp0, pc);
564 tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, eip));
565 }
566
567 static inline void gen_string_movl_A0_ESI(DisasContext *s)
568 {
569 int override;
570
571 override = s->override;
572 #ifdef TARGET_X86_64
573 if (s->aflag == 2) {
574 if (override >= 0) {
575 gen_op_movq_A0_seg(override);
576 gen_op_addq_A0_reg_sN(0, R_ESI);
577 } else {
578 gen_op_movq_A0_reg(R_ESI);
579 }
580 } else
581 #endif
582 if (s->aflag) {
583 /* 32 bit address */
584 if (s->addseg && override < 0)
585 override = R_DS;
586 if (override >= 0) {
587 gen_op_movl_A0_seg(override);
588 gen_op_addl_A0_reg_sN(0, R_ESI);
589 } else {
590 gen_op_movl_A0_reg(R_ESI);
591 }
592 } else {
593 /* 16 address, always override */
594 if (override < 0)
595 override = R_DS;
596 gen_op_movl_A0_reg(R_ESI);
597 gen_op_andl_A0_ffff();
598 gen_op_addl_A0_seg(s, override);
599 }
600 }
601
602 static inline void gen_string_movl_A0_EDI(DisasContext *s)
603 {
604 #ifdef TARGET_X86_64
605 if (s->aflag == 2) {
606 gen_op_movq_A0_reg(R_EDI);
607 } else
608 #endif
609 if (s->aflag) {
610 if (s->addseg) {
611 gen_op_movl_A0_seg(R_ES);
612 gen_op_addl_A0_reg_sN(0, R_EDI);
613 } else {
614 gen_op_movl_A0_reg(R_EDI);
615 }
616 } else {
617 gen_op_movl_A0_reg(R_EDI);
618 gen_op_andl_A0_ffff();
619 gen_op_addl_A0_seg(s, R_ES);
620 }
621 }
622
623 static inline void gen_op_movl_T0_Dshift(int ot)
624 {
625 tcg_gen_ld32s_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, df));
626 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], ot);
627 };
628
629 static TCGv gen_ext_tl(TCGv dst, TCGv src, int size, bool sign)
630 {
631 switch (size) {
632 case MO_8:
633 if (sign) {
634 tcg_gen_ext8s_tl(dst, src);
635 } else {
636 tcg_gen_ext8u_tl(dst, src);
637 }
638 return dst;
639 case MO_16:
640 if (sign) {
641 tcg_gen_ext16s_tl(dst, src);
642 } else {
643 tcg_gen_ext16u_tl(dst, src);
644 }
645 return dst;
646 #ifdef TARGET_X86_64
647 case MO_32:
648 if (sign) {
649 tcg_gen_ext32s_tl(dst, src);
650 } else {
651 tcg_gen_ext32u_tl(dst, src);
652 }
653 return dst;
654 #endif
655 default:
656 return src;
657 }
658 }
659
660 static void gen_extu(int ot, TCGv reg)
661 {
662 gen_ext_tl(reg, reg, ot, false);
663 }
664
665 static void gen_exts(int ot, TCGv reg)
666 {
667 gen_ext_tl(reg, reg, ot, true);
668 }
669
670 static inline void gen_op_jnz_ecx(int size, int label1)
671 {
672 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
673 gen_extu(size + 1, cpu_tmp0);
674 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, label1);
675 }
676
677 static inline void gen_op_jz_ecx(int size, int label1)
678 {
679 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
680 gen_extu(size + 1, cpu_tmp0);
681 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
682 }
683
684 static void gen_helper_in_func(int ot, TCGv v, TCGv_i32 n)
685 {
686 switch (ot) {
687 case MO_8:
688 gen_helper_inb(v, n);
689 break;
690 case MO_16:
691 gen_helper_inw(v, n);
692 break;
693 case MO_32:
694 gen_helper_inl(v, n);
695 break;
696 }
697 }
698
699 static void gen_helper_out_func(int ot, TCGv_i32 v, TCGv_i32 n)
700 {
701 switch (ot) {
702 case MO_8:
703 gen_helper_outb(v, n);
704 break;
705 case MO_16:
706 gen_helper_outw(v, n);
707 break;
708 case MO_32:
709 gen_helper_outl(v, n);
710 break;
711 }
712 }
713
714 static void gen_check_io(DisasContext *s, int ot, target_ulong cur_eip,
715 uint32_t svm_flags)
716 {
717 int state_saved;
718 target_ulong next_eip;
719
720 state_saved = 0;
721 if (s->pe && (s->cpl > s->iopl || s->vm86)) {
722 gen_update_cc_op(s);
723 gen_jmp_im(cur_eip);
724 state_saved = 1;
725 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
726 switch (ot) {
727 case MO_8:
728 gen_helper_check_iob(cpu_env, cpu_tmp2_i32);
729 break;
730 case MO_16:
731 gen_helper_check_iow(cpu_env, cpu_tmp2_i32);
732 break;
733 case MO_32:
734 gen_helper_check_iol(cpu_env, cpu_tmp2_i32);
735 break;
736 }
737 }
738 if(s->flags & HF_SVMI_MASK) {
739 if (!state_saved) {
740 gen_update_cc_op(s);
741 gen_jmp_im(cur_eip);
742 }
743 svm_flags |= (1 << (4 + ot));
744 next_eip = s->pc - s->cs_base;
745 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
746 gen_helper_svm_check_io(cpu_env, cpu_tmp2_i32,
747 tcg_const_i32(svm_flags),
748 tcg_const_i32(next_eip - cur_eip));
749 }
750 }
751
752 static inline void gen_movs(DisasContext *s, int ot)
753 {
754 gen_string_movl_A0_ESI(s);
755 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
756 gen_string_movl_A0_EDI(s);
757 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
758 gen_op_movl_T0_Dshift(ot);
759 gen_op_add_reg_T0(s->aflag, R_ESI);
760 gen_op_add_reg_T0(s->aflag, R_EDI);
761 }
762
763 static void gen_op_update1_cc(void)
764 {
765 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
766 }
767
768 static void gen_op_update2_cc(void)
769 {
770 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
771 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
772 }
773
774 static void gen_op_update3_cc(TCGv reg)
775 {
776 tcg_gen_mov_tl(cpu_cc_src2, reg);
777 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
778 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
779 }
780
781 static inline void gen_op_testl_T0_T1_cc(void)
782 {
783 tcg_gen_and_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
784 }
785
786 static void gen_op_update_neg_cc(void)
787 {
788 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
789 tcg_gen_neg_tl(cpu_cc_src, cpu_T[0]);
790 tcg_gen_movi_tl(cpu_cc_srcT, 0);
791 }
792
793 /* compute all eflags to cc_src */
794 static void gen_compute_eflags(DisasContext *s)
795 {
796 TCGv zero, dst, src1, src2;
797 int live, dead;
798
799 if (s->cc_op == CC_OP_EFLAGS) {
800 return;
801 }
802 if (s->cc_op == CC_OP_CLR) {
803 tcg_gen_movi_tl(cpu_cc_src, CC_Z);
804 set_cc_op(s, CC_OP_EFLAGS);
805 return;
806 }
807
808 TCGV_UNUSED(zero);
809 dst = cpu_cc_dst;
810 src1 = cpu_cc_src;
811 src2 = cpu_cc_src2;
812
813 /* Take care to not read values that are not live. */
814 live = cc_op_live[s->cc_op] & ~USES_CC_SRCT;
815 dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2);
816 if (dead) {
817 zero = tcg_const_tl(0);
818 if (dead & USES_CC_DST) {
819 dst = zero;
820 }
821 if (dead & USES_CC_SRC) {
822 src1 = zero;
823 }
824 if (dead & USES_CC_SRC2) {
825 src2 = zero;
826 }
827 }
828
829 gen_update_cc_op(s);
830 gen_helper_cc_compute_all(cpu_cc_src, dst, src1, src2, cpu_cc_op);
831 set_cc_op(s, CC_OP_EFLAGS);
832
833 if (dead) {
834 tcg_temp_free(zero);
835 }
836 }
837
838 typedef struct CCPrepare {
839 TCGCond cond;
840 TCGv reg;
841 TCGv reg2;
842 target_ulong imm;
843 target_ulong mask;
844 bool use_reg2;
845 bool no_setcond;
846 } CCPrepare;
847
848 /* compute eflags.C to reg */
849 static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
850 {
851 TCGv t0, t1;
852 int size, shift;
853
854 switch (s->cc_op) {
855 case CC_OP_SUBB ... CC_OP_SUBQ:
856 /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
857 size = s->cc_op - CC_OP_SUBB;
858 t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
859 /* If no temporary was used, be careful not to alias t1 and t0. */
860 t0 = TCGV_EQUAL(t1, cpu_cc_src) ? cpu_tmp0 : reg;
861 tcg_gen_mov_tl(t0, cpu_cc_srcT);
862 gen_extu(size, t0);
863 goto add_sub;
864
865 case CC_OP_ADDB ... CC_OP_ADDQ:
866 /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
867 size = s->cc_op - CC_OP_ADDB;
868 t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
869 t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
870 add_sub:
871 return (CCPrepare) { .cond = TCG_COND_LTU, .reg = t0,
872 .reg2 = t1, .mask = -1, .use_reg2 = true };
873
874 case CC_OP_LOGICB ... CC_OP_LOGICQ:
875 case CC_OP_CLR:
876 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
877
878 case CC_OP_INCB ... CC_OP_INCQ:
879 case CC_OP_DECB ... CC_OP_DECQ:
880 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
881 .mask = -1, .no_setcond = true };
882
883 case CC_OP_SHLB ... CC_OP_SHLQ:
884 /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
885 size = s->cc_op - CC_OP_SHLB;
886 shift = (8 << size) - 1;
887 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
888 .mask = (target_ulong)1 << shift };
889
890 case CC_OP_MULB ... CC_OP_MULQ:
891 return (CCPrepare) { .cond = TCG_COND_NE,
892 .reg = cpu_cc_src, .mask = -1 };
893
894 case CC_OP_BMILGB ... CC_OP_BMILGQ:
895 size = s->cc_op - CC_OP_BMILGB;
896 t0 = gen_ext_tl(reg, cpu_cc_src, size, false);
897 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
898
899 case CC_OP_ADCX:
900 case CC_OP_ADCOX:
901 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_dst,
902 .mask = -1, .no_setcond = true };
903
904 case CC_OP_EFLAGS:
905 case CC_OP_SARB ... CC_OP_SARQ:
906 /* CC_SRC & 1 */
907 return (CCPrepare) { .cond = TCG_COND_NE,
908 .reg = cpu_cc_src, .mask = CC_C };
909
910 default:
911 /* The need to compute only C from CC_OP_DYNAMIC is important
912 in efficiently implementing e.g. INC at the start of a TB. */
913 gen_update_cc_op(s);
914 gen_helper_cc_compute_c(reg, cpu_cc_dst, cpu_cc_src,
915 cpu_cc_src2, cpu_cc_op);
916 return (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
917 .mask = -1, .no_setcond = true };
918 }
919 }
920
921 /* compute eflags.P to reg */
922 static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGv reg)
923 {
924 gen_compute_eflags(s);
925 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
926 .mask = CC_P };
927 }
928
929 /* compute eflags.S to reg */
930 static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg)
931 {
932 switch (s->cc_op) {
933 case CC_OP_DYNAMIC:
934 gen_compute_eflags(s);
935 /* FALLTHRU */
936 case CC_OP_EFLAGS:
937 case CC_OP_ADCX:
938 case CC_OP_ADOX:
939 case CC_OP_ADCOX:
940 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
941 .mask = CC_S };
942 case CC_OP_CLR:
943 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
944 default:
945 {
946 int size = (s->cc_op - CC_OP_ADDB) & 3;
947 TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, true);
948 return (CCPrepare) { .cond = TCG_COND_LT, .reg = t0, .mask = -1 };
949 }
950 }
951 }
952
953 /* compute eflags.O to reg */
954 static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg)
955 {
956 switch (s->cc_op) {
957 case CC_OP_ADOX:
958 case CC_OP_ADCOX:
959 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src2,
960 .mask = -1, .no_setcond = true };
961 case CC_OP_CLR:
962 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
963 default:
964 gen_compute_eflags(s);
965 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
966 .mask = CC_O };
967 }
968 }
969
970 /* compute eflags.Z to reg */
971 static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
972 {
973 switch (s->cc_op) {
974 case CC_OP_DYNAMIC:
975 gen_compute_eflags(s);
976 /* FALLTHRU */
977 case CC_OP_EFLAGS:
978 case CC_OP_ADCX:
979 case CC_OP_ADOX:
980 case CC_OP_ADCOX:
981 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
982 .mask = CC_Z };
983 case CC_OP_CLR:
984 return (CCPrepare) { .cond = TCG_COND_ALWAYS, .mask = -1 };
985 default:
986 {
987 int size = (s->cc_op - CC_OP_ADDB) & 3;
988 TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
989 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
990 }
991 }
992 }
993
994 /* perform a conditional store into register 'reg' according to jump opcode
995 value 'b'. In the fast case, T0 is guaranted not to be used. */
996 static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
997 {
998 int inv, jcc_op, size, cond;
999 CCPrepare cc;
1000 TCGv t0;
1001
1002 inv = b & 1;
1003 jcc_op = (b >> 1) & 7;
1004
1005 switch (s->cc_op) {
1006 case CC_OP_SUBB ... CC_OP_SUBQ:
1007 /* We optimize relational operators for the cmp/jcc case. */
1008 size = s->cc_op - CC_OP_SUBB;
1009 switch (jcc_op) {
1010 case JCC_BE:
1011 tcg_gen_mov_tl(cpu_tmp4, cpu_cc_srcT);
1012 gen_extu(size, cpu_tmp4);
1013 t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
1014 cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = cpu_tmp4,
1015 .reg2 = t0, .mask = -1, .use_reg2 = true };
1016 break;
1017
1018 case JCC_L:
1019 cond = TCG_COND_LT;
1020 goto fast_jcc_l;
1021 case JCC_LE:
1022 cond = TCG_COND_LE;
1023 fast_jcc_l:
1024 tcg_gen_mov_tl(cpu_tmp4, cpu_cc_srcT);
1025 gen_exts(size, cpu_tmp4);
1026 t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, true);
1027 cc = (CCPrepare) { .cond = cond, .reg = cpu_tmp4,
1028 .reg2 = t0, .mask = -1, .use_reg2 = true };
1029 break;
1030
1031 default:
1032 goto slow_jcc;
1033 }
1034 break;
1035
1036 default:
1037 slow_jcc:
1038 /* This actually generates good code for JC, JZ and JS. */
1039 switch (jcc_op) {
1040 case JCC_O:
1041 cc = gen_prepare_eflags_o(s, reg);
1042 break;
1043 case JCC_B:
1044 cc = gen_prepare_eflags_c(s, reg);
1045 break;
1046 case JCC_Z:
1047 cc = gen_prepare_eflags_z(s, reg);
1048 break;
1049 case JCC_BE:
1050 gen_compute_eflags(s);
1051 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1052 .mask = CC_Z | CC_C };
1053 break;
1054 case JCC_S:
1055 cc = gen_prepare_eflags_s(s, reg);
1056 break;
1057 case JCC_P:
1058 cc = gen_prepare_eflags_p(s, reg);
1059 break;
1060 case JCC_L:
1061 gen_compute_eflags(s);
1062 if (TCGV_EQUAL(reg, cpu_cc_src)) {
1063 reg = cpu_tmp0;
1064 }
1065 tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
1066 tcg_gen_xor_tl(reg, reg, cpu_cc_src);
1067 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1068 .mask = CC_S };
1069 break;
1070 default:
1071 case JCC_LE:
1072 gen_compute_eflags(s);
1073 if (TCGV_EQUAL(reg, cpu_cc_src)) {
1074 reg = cpu_tmp0;
1075 }
1076 tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
1077 tcg_gen_xor_tl(reg, reg, cpu_cc_src);
1078 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1079 .mask = CC_S | CC_Z };
1080 break;
1081 }
1082 break;
1083 }
1084
1085 if (inv) {
1086 cc.cond = tcg_invert_cond(cc.cond);
1087 }
1088 return cc;
1089 }
1090
1091 static void gen_setcc1(DisasContext *s, int b, TCGv reg)
1092 {
1093 CCPrepare cc = gen_prepare_cc(s, b, reg);
1094
1095 if (cc.no_setcond) {
1096 if (cc.cond == TCG_COND_EQ) {
1097 tcg_gen_xori_tl(reg, cc.reg, 1);
1098 } else {
1099 tcg_gen_mov_tl(reg, cc.reg);
1100 }
1101 return;
1102 }
1103
1104 if (cc.cond == TCG_COND_NE && !cc.use_reg2 && cc.imm == 0 &&
1105 cc.mask != 0 && (cc.mask & (cc.mask - 1)) == 0) {
1106 tcg_gen_shri_tl(reg, cc.reg, ctztl(cc.mask));
1107 tcg_gen_andi_tl(reg, reg, 1);
1108 return;
1109 }
1110 if (cc.mask != -1) {
1111 tcg_gen_andi_tl(reg, cc.reg, cc.mask);
1112 cc.reg = reg;
1113 }
1114 if (cc.use_reg2) {
1115 tcg_gen_setcond_tl(cc.cond, reg, cc.reg, cc.reg2);
1116 } else {
1117 tcg_gen_setcondi_tl(cc.cond, reg, cc.reg, cc.imm);
1118 }
1119 }
1120
1121 static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg)
1122 {
1123 gen_setcc1(s, JCC_B << 1, reg);
1124 }
1125
1126 /* generate a conditional jump to label 'l1' according to jump opcode
1127 value 'b'. In the fast case, T0 is guaranted not to be used. */
1128 static inline void gen_jcc1_noeob(DisasContext *s, int b, int l1)
1129 {
1130 CCPrepare cc = gen_prepare_cc(s, b, cpu_T[0]);
1131
1132 if (cc.mask != -1) {
1133 tcg_gen_andi_tl(cpu_T[0], cc.reg, cc.mask);
1134 cc.reg = cpu_T[0];
1135 }
1136 if (cc.use_reg2) {
1137 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1138 } else {
1139 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1140 }
1141 }
1142
1143 /* Generate a conditional jump to label 'l1' according to jump opcode
1144 value 'b'. In the fast case, T0 is guaranted not to be used.
1145 A translation block must end soon. */
1146 static inline void gen_jcc1(DisasContext *s, int b, int l1)
1147 {
1148 CCPrepare cc = gen_prepare_cc(s, b, cpu_T[0]);
1149
1150 gen_update_cc_op(s);
1151 if (cc.mask != -1) {
1152 tcg_gen_andi_tl(cpu_T[0], cc.reg, cc.mask);
1153 cc.reg = cpu_T[0];
1154 }
1155 set_cc_op(s, CC_OP_DYNAMIC);
1156 if (cc.use_reg2) {
1157 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1158 } else {
1159 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1160 }
1161 }
1162
1163 /* XXX: does not work with gdbstub "ice" single step - not a
1164 serious problem */
1165 static int gen_jz_ecx_string(DisasContext *s, target_ulong next_eip)
1166 {
1167 int l1, l2;
1168
1169 l1 = gen_new_label();
1170 l2 = gen_new_label();
1171 gen_op_jnz_ecx(s->aflag, l1);
1172 gen_set_label(l2);
1173 gen_jmp_tb(s, next_eip, 1);
1174 gen_set_label(l1);
1175 return l2;
1176 }
1177
1178 static inline void gen_stos(DisasContext *s, int ot)
1179 {
1180 gen_op_mov_TN_reg(MO_32, 0, R_EAX);
1181 gen_string_movl_A0_EDI(s);
1182 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
1183 gen_op_movl_T0_Dshift(ot);
1184 gen_op_add_reg_T0(s->aflag, R_EDI);
1185 }
1186
1187 static inline void gen_lods(DisasContext *s, int ot)
1188 {
1189 gen_string_movl_A0_ESI(s);
1190 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1191 gen_op_mov_reg_T0(ot, R_EAX);
1192 gen_op_movl_T0_Dshift(ot);
1193 gen_op_add_reg_T0(s->aflag, R_ESI);
1194 }
1195
1196 static inline void gen_scas(DisasContext *s, int ot)
1197 {
1198 gen_string_movl_A0_EDI(s);
1199 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
1200 gen_op(s, OP_CMPL, ot, R_EAX);
1201 gen_op_movl_T0_Dshift(ot);
1202 gen_op_add_reg_T0(s->aflag, R_EDI);
1203 }
1204
1205 static inline void gen_cmps(DisasContext *s, int ot)
1206 {
1207 gen_string_movl_A0_EDI(s);
1208 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
1209 gen_string_movl_A0_ESI(s);
1210 gen_op(s, OP_CMPL, ot, OR_TMP0);
1211 gen_op_movl_T0_Dshift(ot);
1212 gen_op_add_reg_T0(s->aflag, R_ESI);
1213 gen_op_add_reg_T0(s->aflag, R_EDI);
1214 }
1215
1216 static inline void gen_ins(DisasContext *s, int ot)
1217 {
1218 if (use_icount)
1219 gen_io_start();
1220 gen_string_movl_A0_EDI(s);
1221 /* Note: we must do this dummy write first to be restartable in
1222 case of page fault. */
1223 tcg_gen_movi_tl(cpu_T[0], 0);
1224 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
1225 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_EDX]);
1226 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1227 gen_helper_in_func(ot, cpu_T[0], cpu_tmp2_i32);
1228 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
1229 gen_op_movl_T0_Dshift(ot);
1230 gen_op_add_reg_T0(s->aflag, R_EDI);
1231 if (use_icount)
1232 gen_io_end();
1233 }
1234
1235 static inline void gen_outs(DisasContext *s, int ot)
1236 {
1237 if (use_icount)
1238 gen_io_start();
1239 gen_string_movl_A0_ESI(s);
1240 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1241
1242 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_EDX]);
1243 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1244 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[0]);
1245 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
1246
1247 gen_op_movl_T0_Dshift(ot);
1248 gen_op_add_reg_T0(s->aflag, R_ESI);
1249 if (use_icount)
1250 gen_io_end();
1251 }
1252
1253 /* same method as Valgrind : we generate jumps to current or next
1254 instruction */
1255 #define GEN_REPZ(op) \
1256 static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1257 target_ulong cur_eip, target_ulong next_eip) \
1258 { \
1259 int l2;\
1260 gen_update_cc_op(s); \
1261 l2 = gen_jz_ecx_string(s, next_eip); \
1262 gen_ ## op(s, ot); \
1263 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1264 /* a loop would cause two single step exceptions if ECX = 1 \
1265 before rep string_insn */ \
1266 if (!s->jmp_opt) \
1267 gen_op_jz_ecx(s->aflag, l2); \
1268 gen_jmp(s, cur_eip); \
1269 }
1270
1271 #define GEN_REPZ2(op) \
1272 static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1273 target_ulong cur_eip, \
1274 target_ulong next_eip, \
1275 int nz) \
1276 { \
1277 int l2;\
1278 gen_update_cc_op(s); \
1279 l2 = gen_jz_ecx_string(s, next_eip); \
1280 gen_ ## op(s, ot); \
1281 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1282 gen_update_cc_op(s); \
1283 gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2); \
1284 if (!s->jmp_opt) \
1285 gen_op_jz_ecx(s->aflag, l2); \
1286 gen_jmp(s, cur_eip); \
1287 }
1288
1289 GEN_REPZ(movs)
1290 GEN_REPZ(stos)
1291 GEN_REPZ(lods)
1292 GEN_REPZ(ins)
1293 GEN_REPZ(outs)
1294 GEN_REPZ2(scas)
1295 GEN_REPZ2(cmps)
1296
1297 static void gen_helper_fp_arith_ST0_FT0(int op)
1298 {
1299 switch (op) {
1300 case 0:
1301 gen_helper_fadd_ST0_FT0(cpu_env);
1302 break;
1303 case 1:
1304 gen_helper_fmul_ST0_FT0(cpu_env);
1305 break;
1306 case 2:
1307 gen_helper_fcom_ST0_FT0(cpu_env);
1308 break;
1309 case 3:
1310 gen_helper_fcom_ST0_FT0(cpu_env);
1311 break;
1312 case 4:
1313 gen_helper_fsub_ST0_FT0(cpu_env);
1314 break;
1315 case 5:
1316 gen_helper_fsubr_ST0_FT0(cpu_env);
1317 break;
1318 case 6:
1319 gen_helper_fdiv_ST0_FT0(cpu_env);
1320 break;
1321 case 7:
1322 gen_helper_fdivr_ST0_FT0(cpu_env);
1323 break;
1324 }
1325 }
1326
1327 /* NOTE the exception in "r" op ordering */
1328 static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
1329 {
1330 TCGv_i32 tmp = tcg_const_i32(opreg);
1331 switch (op) {
1332 case 0:
1333 gen_helper_fadd_STN_ST0(cpu_env, tmp);
1334 break;
1335 case 1:
1336 gen_helper_fmul_STN_ST0(cpu_env, tmp);
1337 break;
1338 case 4:
1339 gen_helper_fsubr_STN_ST0(cpu_env, tmp);
1340 break;
1341 case 5:
1342 gen_helper_fsub_STN_ST0(cpu_env, tmp);
1343 break;
1344 case 6:
1345 gen_helper_fdivr_STN_ST0(cpu_env, tmp);
1346 break;
1347 case 7:
1348 gen_helper_fdiv_STN_ST0(cpu_env, tmp);
1349 break;
1350 }
1351 }
1352
1353 /* if d == OR_TMP0, it means memory operand (address in A0) */
1354 static void gen_op(DisasContext *s1, int op, int ot, int d)
1355 {
1356 if (d != OR_TMP0) {
1357 gen_op_mov_TN_reg(ot, 0, d);
1358 } else {
1359 gen_op_ld_v(s1, ot, cpu_T[0], cpu_A0);
1360 }
1361 switch(op) {
1362 case OP_ADCL:
1363 gen_compute_eflags_c(s1, cpu_tmp4);
1364 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1365 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1366 gen_op_st_rm_T0_A0(s1, ot, d);
1367 gen_op_update3_cc(cpu_tmp4);
1368 set_cc_op(s1, CC_OP_ADCB + ot);
1369 break;
1370 case OP_SBBL:
1371 gen_compute_eflags_c(s1, cpu_tmp4);
1372 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1373 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1374 gen_op_st_rm_T0_A0(s1, ot, d);
1375 gen_op_update3_cc(cpu_tmp4);
1376 set_cc_op(s1, CC_OP_SBBB + ot);
1377 break;
1378 case OP_ADDL:
1379 gen_op_addl_T0_T1();
1380 gen_op_st_rm_T0_A0(s1, ot, d);
1381 gen_op_update2_cc();
1382 set_cc_op(s1, CC_OP_ADDB + ot);
1383 break;
1384 case OP_SUBL:
1385 tcg_gen_mov_tl(cpu_cc_srcT, cpu_T[0]);
1386 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1387 gen_op_st_rm_T0_A0(s1, ot, d);
1388 gen_op_update2_cc();
1389 set_cc_op(s1, CC_OP_SUBB + ot);
1390 break;
1391 default:
1392 case OP_ANDL:
1393 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1394 gen_op_st_rm_T0_A0(s1, ot, d);
1395 gen_op_update1_cc();
1396 set_cc_op(s1, CC_OP_LOGICB + ot);
1397 break;
1398 case OP_ORL:
1399 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1400 gen_op_st_rm_T0_A0(s1, ot, d);
1401 gen_op_update1_cc();
1402 set_cc_op(s1, CC_OP_LOGICB + ot);
1403 break;
1404 case OP_XORL:
1405 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1406 gen_op_st_rm_T0_A0(s1, ot, d);
1407 gen_op_update1_cc();
1408 set_cc_op(s1, CC_OP_LOGICB + ot);
1409 break;
1410 case OP_CMPL:
1411 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
1412 tcg_gen_mov_tl(cpu_cc_srcT, cpu_T[0]);
1413 tcg_gen_sub_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
1414 set_cc_op(s1, CC_OP_SUBB + ot);
1415 break;
1416 }
1417 }
1418
1419 /* if d == OR_TMP0, it means memory operand (address in A0) */
1420 static void gen_inc(DisasContext *s1, int ot, int d, int c)
1421 {
1422 if (d != OR_TMP0) {
1423 gen_op_mov_TN_reg(ot, 0, d);
1424 } else {
1425 gen_op_ld_v(s1, ot, cpu_T[0], cpu_A0);
1426 }
1427 gen_compute_eflags_c(s1, cpu_cc_src);
1428 if (c > 0) {
1429 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], 1);
1430 set_cc_op(s1, CC_OP_INCB + ot);
1431 } else {
1432 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], -1);
1433 set_cc_op(s1, CC_OP_DECB + ot);
1434 }
1435 gen_op_st_rm_T0_A0(s1, ot, d);
1436 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1437 }
1438
1439 static void gen_shift_flags(DisasContext *s, int ot, TCGv result, TCGv shm1,
1440 TCGv count, bool is_right)
1441 {
1442 TCGv_i32 z32, s32, oldop;
1443 TCGv z_tl;
1444
1445 /* Store the results into the CC variables. If we know that the
1446 variable must be dead, store unconditionally. Otherwise we'll
1447 need to not disrupt the current contents. */
1448 z_tl = tcg_const_tl(0);
1449 if (cc_op_live[s->cc_op] & USES_CC_DST) {
1450 tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_dst, count, z_tl,
1451 result, cpu_cc_dst);
1452 } else {
1453 tcg_gen_mov_tl(cpu_cc_dst, result);
1454 }
1455 if (cc_op_live[s->cc_op] & USES_CC_SRC) {
1456 tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_src, count, z_tl,
1457 shm1, cpu_cc_src);
1458 } else {
1459 tcg_gen_mov_tl(cpu_cc_src, shm1);
1460 }
1461 tcg_temp_free(z_tl);
1462
1463 /* Get the two potential CC_OP values into temporaries. */
1464 tcg_gen_movi_i32(cpu_tmp2_i32, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
1465 if (s->cc_op == CC_OP_DYNAMIC) {
1466 oldop = cpu_cc_op;
1467 } else {
1468 tcg_gen_movi_i32(cpu_tmp3_i32, s->cc_op);
1469 oldop = cpu_tmp3_i32;
1470 }
1471
1472 /* Conditionally store the CC_OP value. */
1473 z32 = tcg_const_i32(0);
1474 s32 = tcg_temp_new_i32();
1475 tcg_gen_trunc_tl_i32(s32, count);
1476 tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, s32, z32, cpu_tmp2_i32, oldop);
1477 tcg_temp_free_i32(z32);
1478 tcg_temp_free_i32(s32);
1479
1480 /* The CC_OP value is no longer predictable. */
1481 set_cc_op(s, CC_OP_DYNAMIC);
1482 }
1483
1484 static void gen_shift_rm_T1(DisasContext *s, int ot, int op1,
1485 int is_right, int is_arith)
1486 {
1487 target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
1488
1489 /* load */
1490 if (op1 == OR_TMP0) {
1491 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1492 } else {
1493 gen_op_mov_TN_reg(ot, 0, op1);
1494 }
1495
1496 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], mask);
1497 tcg_gen_subi_tl(cpu_tmp0, cpu_T[1], 1);
1498
1499 if (is_right) {
1500 if (is_arith) {
1501 gen_exts(ot, cpu_T[0]);
1502 tcg_gen_sar_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
1503 tcg_gen_sar_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1504 } else {
1505 gen_extu(ot, cpu_T[0]);
1506 tcg_gen_shr_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
1507 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1508 }
1509 } else {
1510 tcg_gen_shl_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
1511 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1512 }
1513
1514 /* store */
1515 gen_op_st_rm_T0_A0(s, ot, op1);
1516
1517 gen_shift_flags(s, ot, cpu_T[0], cpu_tmp0, cpu_T[1], is_right);
1518 }
1519
1520 static void gen_shift_rm_im(DisasContext *s, int ot, int op1, int op2,
1521 int is_right, int is_arith)
1522 {
1523 int mask = (ot == MO_64 ? 0x3f : 0x1f);
1524
1525 /* load */
1526 if (op1 == OR_TMP0)
1527 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1528 else
1529 gen_op_mov_TN_reg(ot, 0, op1);
1530
1531 op2 &= mask;
1532 if (op2 != 0) {
1533 if (is_right) {
1534 if (is_arith) {
1535 gen_exts(ot, cpu_T[0]);
1536 tcg_gen_sari_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1537 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], op2);
1538 } else {
1539 gen_extu(ot, cpu_T[0]);
1540 tcg_gen_shri_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1541 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], op2);
1542 }
1543 } else {
1544 tcg_gen_shli_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1545 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], op2);
1546 }
1547 }
1548
1549 /* store */
1550 gen_op_st_rm_T0_A0(s, ot, op1);
1551
1552 /* update eflags if non zero shift */
1553 if (op2 != 0) {
1554 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
1555 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1556 set_cc_op(s, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
1557 }
1558 }
1559
1560 static inline void tcg_gen_lshift(TCGv ret, TCGv arg1, target_long arg2)
1561 {
1562 if (arg2 >= 0)
1563 tcg_gen_shli_tl(ret, arg1, arg2);
1564 else
1565 tcg_gen_shri_tl(ret, arg1, -arg2);
1566 }
1567
1568 static void gen_rot_rm_T1(DisasContext *s, int ot, int op1, int is_right)
1569 {
1570 target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
1571 TCGv_i32 t0, t1;
1572
1573 /* load */
1574 if (op1 == OR_TMP0) {
1575 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1576 } else {
1577 gen_op_mov_TN_reg(ot, 0, op1);
1578 }
1579
1580 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], mask);
1581
1582 switch (ot) {
1583 case MO_8:
1584 /* Replicate the 8-bit input so that a 32-bit rotate works. */
1585 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
1586 tcg_gen_muli_tl(cpu_T[0], cpu_T[0], 0x01010101);
1587 goto do_long;
1588 case MO_16:
1589 /* Replicate the 16-bit input so that a 32-bit rotate works. */
1590 tcg_gen_deposit_tl(cpu_T[0], cpu_T[0], cpu_T[0], 16, 16);
1591 goto do_long;
1592 do_long:
1593 #ifdef TARGET_X86_64
1594 case MO_32:
1595 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
1596 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
1597 if (is_right) {
1598 tcg_gen_rotr_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
1599 } else {
1600 tcg_gen_rotl_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
1601 }
1602 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
1603 break;
1604 #endif
1605 default:
1606 if (is_right) {
1607 tcg_gen_rotr_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1608 } else {
1609 tcg_gen_rotl_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1610 }
1611 break;
1612 }
1613
1614 /* store */
1615 gen_op_st_rm_T0_A0(s, ot, op1);
1616
1617 /* We'll need the flags computed into CC_SRC. */
1618 gen_compute_eflags(s);
1619
1620 /* The value that was "rotated out" is now present at the other end
1621 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1622 since we've computed the flags into CC_SRC, these variables are
1623 currently dead. */
1624 if (is_right) {
1625 tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask - 1);
1626 tcg_gen_shri_tl(cpu_cc_dst, cpu_T[0], mask);
1627 tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
1628 } else {
1629 tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask);
1630 tcg_gen_andi_tl(cpu_cc_dst, cpu_T[0], 1);
1631 }
1632 tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1633 tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1634
1635 /* Now conditionally store the new CC_OP value. If the shift count
1636 is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live.
1637 Otherwise reuse CC_OP_ADCOX which have the C and O flags split out
1638 exactly as we computed above. */
1639 t0 = tcg_const_i32(0);
1640 t1 = tcg_temp_new_i32();
1641 tcg_gen_trunc_tl_i32(t1, cpu_T[1]);
1642 tcg_gen_movi_i32(cpu_tmp2_i32, CC_OP_ADCOX);
1643 tcg_gen_movi_i32(cpu_tmp3_i32, CC_OP_EFLAGS);
1644 tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, t1, t0,
1645 cpu_tmp2_i32, cpu_tmp3_i32);
1646 tcg_temp_free_i32(t0);
1647 tcg_temp_free_i32(t1);
1648
1649 /* The CC_OP value is no longer predictable. */
1650 set_cc_op(s, CC_OP_DYNAMIC);
1651 }
1652
1653 static void gen_rot_rm_im(DisasContext *s, int ot, int op1, int op2,
1654 int is_right)
1655 {
1656 int mask = (ot == MO_64 ? 0x3f : 0x1f);
1657 int shift;
1658
1659 /* load */
1660 if (op1 == OR_TMP0) {
1661 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1662 } else {
1663 gen_op_mov_TN_reg(ot, 0, op1);
1664 }
1665
1666 op2 &= mask;
1667 if (op2 != 0) {
1668 switch (ot) {
1669 #ifdef TARGET_X86_64
1670 case MO_32:
1671 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
1672 if (is_right) {
1673 tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, op2);
1674 } else {
1675 tcg_gen_rotli_i32(cpu_tmp2_i32, cpu_tmp2_i32, op2);
1676 }
1677 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
1678 break;
1679 #endif
1680 default:
1681 if (is_right) {
1682 tcg_gen_rotri_tl(cpu_T[0], cpu_T[0], op2);
1683 } else {
1684 tcg_gen_rotli_tl(cpu_T[0], cpu_T[0], op2);
1685 }
1686 break;
1687 case MO_8:
1688 mask = 7;
1689 goto do_shifts;
1690 case MO_16:
1691 mask = 15;
1692 do_shifts:
1693 shift = op2 & mask;
1694 if (is_right) {
1695 shift = mask + 1 - shift;
1696 }
1697 gen_extu(ot, cpu_T[0]);
1698 tcg_gen_shli_tl(cpu_tmp0, cpu_T[0], shift);
1699 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], mask + 1 - shift);
1700 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
1701 break;
1702 }
1703 }
1704
1705 /* store */
1706 gen_op_st_rm_T0_A0(s, ot, op1);
1707
1708 if (op2 != 0) {
1709 /* Compute the flags into CC_SRC. */
1710 gen_compute_eflags(s);
1711
1712 /* The value that was "rotated out" is now present at the other end
1713 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1714 since we've computed the flags into CC_SRC, these variables are
1715 currently dead. */
1716 if (is_right) {
1717 tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask - 1);
1718 tcg_gen_shri_tl(cpu_cc_dst, cpu_T[0], mask);
1719 tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
1720 } else {
1721 tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask);
1722 tcg_gen_andi_tl(cpu_cc_dst, cpu_T[0], 1);
1723 }
1724 tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1725 tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1726 set_cc_op(s, CC_OP_ADCOX);
1727 }
1728 }
1729
1730 /* XXX: add faster immediate = 1 case */
1731 static void gen_rotc_rm_T1(DisasContext *s, int ot, int op1,
1732 int is_right)
1733 {
1734 gen_compute_eflags(s);
1735 assert(s->cc_op == CC_OP_EFLAGS);
1736
1737 /* load */
1738 if (op1 == OR_TMP0)
1739 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1740 else
1741 gen_op_mov_TN_reg(ot, 0, op1);
1742
1743 if (is_right) {
1744 switch (ot) {
1745 case MO_8:
1746 gen_helper_rcrb(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1747 break;
1748 case MO_16:
1749 gen_helper_rcrw(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1750 break;
1751 case MO_32:
1752 gen_helper_rcrl(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1753 break;
1754 #ifdef TARGET_X86_64
1755 case MO_64:
1756 gen_helper_rcrq(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1757 break;
1758 #endif
1759 }
1760 } else {
1761 switch (ot) {
1762 case MO_8:
1763 gen_helper_rclb(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1764 break;
1765 case MO_16:
1766 gen_helper_rclw(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1767 break;
1768 case MO_32:
1769 gen_helper_rcll(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1770 break;
1771 #ifdef TARGET_X86_64
1772 case MO_64:
1773 gen_helper_rclq(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1774 break;
1775 #endif
1776 }
1777 }
1778 /* store */
1779 gen_op_st_rm_T0_A0(s, ot, op1);
1780 }
1781
1782 /* XXX: add faster immediate case */
1783 static void gen_shiftd_rm_T1(DisasContext *s, int ot, int op1,
1784 bool is_right, TCGv count_in)
1785 {
1786 target_ulong mask = (ot == MO_64 ? 63 : 31);
1787 TCGv count;
1788
1789 /* load */
1790 if (op1 == OR_TMP0) {
1791 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1792 } else {
1793 gen_op_mov_TN_reg(ot, 0, op1);
1794 }
1795
1796 count = tcg_temp_new();
1797 tcg_gen_andi_tl(count, count_in, mask);
1798
1799 switch (ot) {
1800 case MO_16:
1801 /* Note: we implement the Intel behaviour for shift count > 16.
1802 This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A
1803 portion by constructing it as a 32-bit value. */
1804 if (is_right) {
1805 tcg_gen_deposit_tl(cpu_tmp0, cpu_T[0], cpu_T[1], 16, 16);
1806 tcg_gen_mov_tl(cpu_T[1], cpu_T[0]);
1807 tcg_gen_mov_tl(cpu_T[0], cpu_tmp0);
1808 } else {
1809 tcg_gen_deposit_tl(cpu_T[1], cpu_T[0], cpu_T[1], 16, 16);
1810 }
1811 /* FALLTHRU */
1812 #ifdef TARGET_X86_64
1813 case MO_32:
1814 /* Concatenate the two 32-bit values and use a 64-bit shift. */
1815 tcg_gen_subi_tl(cpu_tmp0, count, 1);
1816 if (is_right) {
1817 tcg_gen_concat_tl_i64(cpu_T[0], cpu_T[0], cpu_T[1]);
1818 tcg_gen_shr_i64(cpu_tmp0, cpu_T[0], cpu_tmp0);
1819 tcg_gen_shr_i64(cpu_T[0], cpu_T[0], count);
1820 } else {
1821 tcg_gen_concat_tl_i64(cpu_T[0], cpu_T[1], cpu_T[0]);
1822 tcg_gen_shl_i64(cpu_tmp0, cpu_T[0], cpu_tmp0);
1823 tcg_gen_shl_i64(cpu_T[0], cpu_T[0], count);
1824 tcg_gen_shri_i64(cpu_tmp0, cpu_tmp0, 32);
1825 tcg_gen_shri_i64(cpu_T[0], cpu_T[0], 32);
1826 }
1827 break;
1828 #endif
1829 default:
1830 tcg_gen_subi_tl(cpu_tmp0, count, 1);
1831 if (is_right) {
1832 tcg_gen_shr_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
1833
1834 tcg_gen_subfi_tl(cpu_tmp4, mask + 1, count);
1835 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], count);
1836 tcg_gen_shl_tl(cpu_T[1], cpu_T[1], cpu_tmp4);
1837 } else {
1838 tcg_gen_shl_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
1839 if (ot == MO_16) {
1840 /* Only needed if count > 16, for Intel behaviour. */
1841 tcg_gen_subfi_tl(cpu_tmp4, 33, count);
1842 tcg_gen_shr_tl(cpu_tmp4, cpu_T[1], cpu_tmp4);
1843 tcg_gen_or_tl(cpu_tmp0, cpu_tmp0, cpu_tmp4);
1844 }
1845
1846 tcg_gen_subfi_tl(cpu_tmp4, mask + 1, count);
1847 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], count);
1848 tcg_gen_shr_tl(cpu_T[1], cpu_T[1], cpu_tmp4);
1849 }
1850 tcg_gen_movi_tl(cpu_tmp4, 0);
1851 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_T[1], count, cpu_tmp4,
1852 cpu_tmp4, cpu_T[1]);
1853 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1854 break;
1855 }
1856
1857 /* store */
1858 gen_op_st_rm_T0_A0(s, ot, op1);
1859
1860 gen_shift_flags(s, ot, cpu_T[0], cpu_tmp0, count, is_right);
1861 tcg_temp_free(count);
1862 }
1863
1864 static void gen_shift(DisasContext *s1, int op, int ot, int d, int s)
1865 {
1866 if (s != OR_TMP1)
1867 gen_op_mov_TN_reg(ot, 1, s);
1868 switch(op) {
1869 case OP_ROL:
1870 gen_rot_rm_T1(s1, ot, d, 0);
1871 break;
1872 case OP_ROR:
1873 gen_rot_rm_T1(s1, ot, d, 1);
1874 break;
1875 case OP_SHL:
1876 case OP_SHL1:
1877 gen_shift_rm_T1(s1, ot, d, 0, 0);
1878 break;
1879 case OP_SHR:
1880 gen_shift_rm_T1(s1, ot, d, 1, 0);
1881 break;
1882 case OP_SAR:
1883 gen_shift_rm_T1(s1, ot, d, 1, 1);
1884 break;
1885 case OP_RCL:
1886 gen_rotc_rm_T1(s1, ot, d, 0);
1887 break;
1888 case OP_RCR:
1889 gen_rotc_rm_T1(s1, ot, d, 1);
1890 break;
1891 }
1892 }
1893
1894 static void gen_shifti(DisasContext *s1, int op, int ot, int d, int c)
1895 {
1896 switch(op) {
1897 case OP_ROL:
1898 gen_rot_rm_im(s1, ot, d, c, 0);
1899 break;
1900 case OP_ROR:
1901 gen_rot_rm_im(s1, ot, d, c, 1);
1902 break;
1903 case OP_SHL:
1904 case OP_SHL1:
1905 gen_shift_rm_im(s1, ot, d, c, 0, 0);
1906 break;
1907 case OP_SHR:
1908 gen_shift_rm_im(s1, ot, d, c, 1, 0);
1909 break;
1910 case OP_SAR:
1911 gen_shift_rm_im(s1, ot, d, c, 1, 1);
1912 break;
1913 default:
1914 /* currently not optimized */
1915 tcg_gen_movi_tl(cpu_T[1], c);
1916 gen_shift(s1, op, ot, d, OR_TMP1);
1917 break;
1918 }
1919 }
1920
1921 static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm)
1922 {
1923 target_long disp;
1924 int havesib;
1925 int base;
1926 int index;
1927 int scale;
1928 int mod, rm, code, override, must_add_seg;
1929 TCGv sum;
1930
1931 override = s->override;
1932 must_add_seg = s->addseg;
1933 if (override >= 0)
1934 must_add_seg = 1;
1935 mod = (modrm >> 6) & 3;
1936 rm = modrm & 7;
1937
1938 if (s->aflag) {
1939 havesib = 0;
1940 base = rm;
1941 index = -1;
1942 scale = 0;
1943
1944 if (base == 4) {
1945 havesib = 1;
1946 code = cpu_ldub_code(env, s->pc++);
1947 scale = (code >> 6) & 3;
1948 index = ((code >> 3) & 7) | REX_X(s);
1949 if (index == 4) {
1950 index = -1; /* no index */
1951 }
1952 base = (code & 7);
1953 }
1954 base |= REX_B(s);
1955
1956 switch (mod) {
1957 case 0:
1958 if ((base & 7) == 5) {
1959 base = -1;
1960 disp = (int32_t)cpu_ldl_code(env, s->pc);
1961 s->pc += 4;
1962 if (CODE64(s) && !havesib) {
1963 disp += s->pc + s->rip_offset;
1964 }
1965 } else {
1966 disp = 0;
1967 }
1968 break;
1969 case 1:
1970 disp = (int8_t)cpu_ldub_code(env, s->pc++);
1971 break;
1972 default:
1973 case 2:
1974 disp = (int32_t)cpu_ldl_code(env, s->pc);
1975 s->pc += 4;
1976 break;
1977 }
1978
1979 /* For correct popl handling with esp. */
1980 if (base == R_ESP && s->popl_esp_hack) {
1981 disp += s->popl_esp_hack;
1982 }
1983
1984 /* Compute the address, with a minimum number of TCG ops. */
1985 TCGV_UNUSED(sum);
1986 if (index >= 0) {
1987 if (scale == 0) {
1988 sum = cpu_regs[index];
1989 } else {
1990 tcg_gen_shli_tl(cpu_A0, cpu_regs[index], scale);
1991 sum = cpu_A0;
1992 }
1993 if (base >= 0) {
1994 tcg_gen_add_tl(cpu_A0, sum, cpu_regs[base]);
1995 sum = cpu_A0;
1996 }
1997 } else if (base >= 0) {
1998 sum = cpu_regs[base];
1999 }
2000 if (TCGV_IS_UNUSED(sum)) {
2001 tcg_gen_movi_tl(cpu_A0, disp);
2002 } else {
2003 tcg_gen_addi_tl(cpu_A0, sum, disp);
2004 }
2005
2006 if (must_add_seg) {
2007 if (override < 0) {
2008 if (base == R_EBP || base == R_ESP) {
2009 override = R_SS;
2010 } else {
2011 override = R_DS;
2012 }
2013 }
2014
2015 tcg_gen_ld_tl(cpu_tmp0, cpu_env,
2016 offsetof(CPUX86State, segs[override].base));
2017 if (CODE64(s)) {
2018 if (s->aflag != 2) {
2019 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
2020 }
2021 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
2022 return;
2023 }
2024
2025 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
2026 }
2027
2028 if (s->aflag != 2) {
2029 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
2030 }
2031 } else {
2032 switch (mod) {
2033 case 0:
2034 if (rm == 6) {
2035 disp = cpu_lduw_code(env, s->pc);
2036 s->pc += 2;
2037 tcg_gen_movi_tl(cpu_A0, disp);
2038 rm = 0; /* avoid SS override */
2039 goto no_rm;
2040 } else {
2041 disp = 0;
2042 }
2043 break;
2044 case 1:
2045 disp = (int8_t)cpu_ldub_code(env, s->pc++);
2046 break;
2047 default:
2048 case 2:
2049 disp = cpu_lduw_code(env, s->pc);
2050 s->pc += 2;
2051 break;
2052 }
2053 switch(rm) {
2054 case 0:
2055 gen_op_movl_A0_reg(R_EBX);
2056 gen_op_addl_A0_reg_sN(0, R_ESI);
2057 break;
2058 case 1:
2059 gen_op_movl_A0_reg(R_EBX);
2060 gen_op_addl_A0_reg_sN(0, R_EDI);
2061 break;
2062 case 2:
2063 gen_op_movl_A0_reg(R_EBP);
2064 gen_op_addl_A0_reg_sN(0, R_ESI);
2065 break;
2066 case 3:
2067 gen_op_movl_A0_reg(R_EBP);
2068 gen_op_addl_A0_reg_sN(0, R_EDI);
2069 break;
2070 case 4:
2071 gen_op_movl_A0_reg(R_ESI);
2072 break;
2073 case 5:
2074 gen_op_movl_A0_reg(R_EDI);
2075 break;
2076 case 6:
2077 gen_op_movl_A0_reg(R_EBP);
2078 break;
2079 default:
2080 case 7:
2081 gen_op_movl_A0_reg(R_EBX);
2082 break;
2083 }
2084 if (disp != 0)
2085 gen_op_addl_A0_im(disp);
2086 gen_op_andl_A0_ffff();
2087 no_rm:
2088 if (must_add_seg) {
2089 if (override < 0) {
2090 if (rm == 2 || rm == 3 || rm == 6)
2091 override = R_SS;
2092 else
2093 override = R_DS;
2094 }
2095 gen_op_addl_A0_seg(s, override);
2096 }
2097 }
2098 }
2099
2100 static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm)
2101 {
2102 int mod, rm, base, code;
2103
2104 mod = (modrm >> 6) & 3;
2105 if (mod == 3)
2106 return;
2107 rm = modrm & 7;
2108
2109 if (s->aflag) {
2110
2111 base = rm;
2112
2113 if (base == 4) {
2114 code = cpu_ldub_code(env, s->pc++);
2115 base = (code & 7);
2116 }
2117
2118 switch (mod) {
2119 case 0:
2120 if (base == 5) {
2121 s->pc += 4;
2122 }
2123 break;
2124 case 1:
2125 s->pc++;
2126 break;
2127 default:
2128 case 2:
2129 s->pc += 4;
2130 break;
2131 }
2132 } else {
2133 switch (mod) {
2134 case 0:
2135 if (rm == 6) {
2136 s->pc += 2;
2137 }
2138 break;
2139 case 1:
2140 s->pc++;
2141 break;
2142 default:
2143 case 2:
2144 s->pc += 2;
2145 break;
2146 }
2147 }
2148 }
2149
2150 /* used for LEA and MOV AX, mem */
2151 static void gen_add_A0_ds_seg(DisasContext *s)
2152 {
2153 int override, must_add_seg;
2154 must_add_seg = s->addseg;
2155 override = R_DS;
2156 if (s->override >= 0) {
2157 override = s->override;
2158 must_add_seg = 1;
2159 }
2160 if (must_add_seg) {
2161 #ifdef TARGET_X86_64
2162 if (CODE64(s)) {
2163 gen_op_addq_A0_seg(override);
2164 } else
2165 #endif
2166 {
2167 gen_op_addl_A0_seg(s, override);
2168 }
2169 }
2170 }
2171
2172 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2173 OR_TMP0 */
2174 static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm,
2175 int ot, int reg, int is_store)
2176 {
2177 int mod, rm;
2178
2179 mod = (modrm >> 6) & 3;
2180 rm = (modrm & 7) | REX_B(s);
2181 if (mod == 3) {
2182 if (is_store) {
2183 if (reg != OR_TMP0)
2184 gen_op_mov_TN_reg(ot, 0, reg);
2185 gen_op_mov_reg_T0(ot, rm);
2186 } else {
2187 gen_op_mov_TN_reg(ot, 0, rm);
2188 if (reg != OR_TMP0)
2189 gen_op_mov_reg_T0(ot, reg);
2190 }
2191 } else {
2192 gen_lea_modrm(env, s, modrm);
2193 if (is_store) {
2194 if (reg != OR_TMP0)
2195 gen_op_mov_TN_reg(ot, 0, reg);
2196 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
2197 } else {
2198 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
2199 if (reg != OR_TMP0)
2200 gen_op_mov_reg_T0(ot, reg);
2201 }
2202 }
2203 }
2204
2205 static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, int ot)
2206 {
2207 uint32_t ret;
2208
2209 switch(ot) {
2210 case MO_8:
2211 ret = cpu_ldub_code(env, s->pc);
2212 s->pc++;
2213 break;
2214 case MO_16:
2215 ret = cpu_lduw_code(env, s->pc);
2216 s->pc += 2;
2217 break;
2218 default:
2219 case MO_32:
2220 ret = cpu_ldl_code(env, s->pc);
2221 s->pc += 4;
2222 break;
2223 }
2224 return ret;
2225 }
2226
2227 static inline int insn_const_size(unsigned int ot)
2228 {
2229 if (ot <= MO_32) {
2230 return 1 << ot;
2231 } else {
2232 return 4;
2233 }
2234 }
2235
2236 static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong eip)
2237 {
2238 TranslationBlock *tb;
2239 target_ulong pc;
2240
2241 pc = s->cs_base + eip;
2242 tb = s->tb;
2243 /* NOTE: we handle the case where the TB spans two pages here */
2244 if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) ||
2245 (pc & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK)) {
2246 /* jump to same page: we can use a direct jump */
2247 tcg_gen_goto_tb(tb_num);
2248 gen_jmp_im(eip);
2249 tcg_gen_exit_tb((uintptr_t)tb + tb_num);
2250 } else {
2251 /* jump to another page: currently not optimized */
2252 gen_jmp_im(eip);
2253 gen_eob(s);
2254 }
2255 }
2256
2257 static inline void gen_jcc(DisasContext *s, int b,
2258 target_ulong val, target_ulong next_eip)
2259 {
2260 int l1, l2;
2261
2262 if (s->jmp_opt) {
2263 l1 = gen_new_label();
2264 gen_jcc1(s, b, l1);
2265
2266 gen_goto_tb(s, 0, next_eip);
2267
2268 gen_set_label(l1);
2269 gen_goto_tb(s, 1, val);
2270 s->is_jmp = DISAS_TB_JUMP;
2271 } else {
2272 l1 = gen_new_label();
2273 l2 = gen_new_label();
2274 gen_jcc1(s, b, l1);
2275
2276 gen_jmp_im(next_eip);
2277 tcg_gen_br(l2);
2278
2279 gen_set_label(l1);
2280 gen_jmp_im(val);
2281 gen_set_label(l2);
2282 gen_eob(s);
2283 }
2284 }
2285
2286 static void gen_cmovcc1(CPUX86State *env, DisasContext *s, int ot, int b,
2287 int modrm, int reg)
2288 {
2289 CCPrepare cc;
2290
2291 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
2292
2293 cc = gen_prepare_cc(s, b, cpu_T[1]);
2294 if (cc.mask != -1) {
2295 TCGv t0 = tcg_temp_new();
2296 tcg_gen_andi_tl(t0, cc.reg, cc.mask);
2297 cc.reg = t0;
2298 }
2299 if (!cc.use_reg2) {
2300 cc.reg2 = tcg_const_tl(cc.imm);
2301 }
2302
2303 tcg_gen_movcond_tl(cc.cond, cpu_T[0], cc.reg, cc.reg2,
2304 cpu_T[0], cpu_regs[reg]);
2305 gen_op_mov_reg_T0(ot, reg);
2306
2307 if (cc.mask != -1) {
2308 tcg_temp_free(cc.reg);
2309 }
2310 if (!cc.use_reg2) {
2311 tcg_temp_free(cc.reg2);
2312 }
2313 }
2314
2315 static inline void gen_op_movl_T0_seg(int seg_reg)
2316 {
2317 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
2318 offsetof(CPUX86State,segs[seg_reg].selector));
2319 }
2320
2321 static inline void gen_op_movl_seg_T0_vm(int seg_reg)
2322 {
2323 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
2324 tcg_gen_st32_tl(cpu_T[0], cpu_env,
2325 offsetof(CPUX86State,segs[seg_reg].selector));
2326 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], 4);
2327 tcg_gen_st_tl(cpu_T[0], cpu_env,
2328 offsetof(CPUX86State,segs[seg_reg].base));
2329 }
2330
2331 /* move T0 to seg_reg and compute if the CPU state may change. Never
2332 call this function with seg_reg == R_CS */
2333 static void gen_movl_seg_T0(DisasContext *s, int seg_reg, target_ulong cur_eip)
2334 {
2335 if (s->pe && !s->vm86) {
2336 /* XXX: optimize by finding processor state dynamically */
2337 gen_update_cc_op(s);
2338 gen_jmp_im(cur_eip);
2339 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2340 gen_helper_load_seg(cpu_env, tcg_const_i32(seg_reg), cpu_tmp2_i32);
2341 /* abort translation because the addseg value may change or
2342 because ss32 may change. For R_SS, translation must always
2343 stop as a special handling must be done to disable hardware
2344 interrupts for the next instruction */
2345 if (seg_reg == R_SS || (s->code32 && seg_reg < R_FS))
2346 s->is_jmp = DISAS_TB_JUMP;
2347 } else {
2348 gen_op_movl_seg_T0_vm(seg_reg);
2349 if (seg_reg == R_SS)
2350 s->is_jmp = DISAS_TB_JUMP;
2351 }
2352 }
2353
2354 static inline int svm_is_rep(int prefixes)
2355 {
2356 return ((prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) ? 8 : 0);
2357 }
2358
2359 static inline void
2360 gen_svm_check_intercept_param(DisasContext *s, target_ulong pc_start,
2361 uint32_t type, uint64_t param)
2362 {
2363 /* no SVM activated; fast case */
2364 if (likely(!(s->flags & HF_SVMI_MASK)))
2365 return;
2366 gen_update_cc_op(s);
2367 gen_jmp_im(pc_start - s->cs_base);
2368 gen_helper_svm_check_intercept_param(cpu_env, tcg_const_i32(type),
2369 tcg_const_i64(param));
2370 }
2371
2372 static inline void
2373 gen_svm_check_intercept(DisasContext *s, target_ulong pc_start, uint64_t type)
2374 {
2375 gen_svm_check_intercept_param(s, pc_start, type, 0);
2376 }
2377
2378 static inline void gen_stack_update(DisasContext *s, int addend)
2379 {
2380 #ifdef TARGET_X86_64
2381 if (CODE64(s)) {
2382 gen_op_add_reg_im(2, R_ESP, addend);
2383 } else
2384 #endif
2385 if (s->ss32) {
2386 gen_op_add_reg_im(1, R_ESP, addend);
2387 } else {
2388 gen_op_add_reg_im(0, R_ESP, addend);
2389 }
2390 }
2391
2392 /* generate a push. It depends on ss32, addseg and dflag */
2393 static void gen_push_T0(DisasContext *s)
2394 {
2395 #ifdef TARGET_X86_64
2396 if (CODE64(s)) {
2397 gen_op_movq_A0_reg(R_ESP);
2398 if (s->dflag) {
2399 gen_op_addq_A0_im(-8);
2400 gen_op_st_v(s, MO_64, cpu_T[0], cpu_A0);
2401 } else {
2402 gen_op_addq_A0_im(-2);
2403 gen_op_st_v(s, MO_16, cpu_T[0], cpu_A0);
2404 }
2405 gen_op_mov_reg_A0(2, R_ESP);
2406 } else
2407 #endif
2408 {
2409 gen_op_movl_A0_reg(R_ESP);
2410 if (!s->dflag)
2411 gen_op_addl_A0_im(-2);
2412 else
2413 gen_op_addl_A0_im(-4);
2414 if (s->ss32) {
2415 if (s->addseg) {
2416 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2417 gen_op_addl_A0_seg(s, R_SS);
2418 }
2419 } else {
2420 gen_op_andl_A0_ffff();
2421 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2422 gen_op_addl_A0_seg(s, R_SS);
2423 }
2424 gen_op_st_v(s, s->dflag + 1, cpu_T[0], cpu_A0);
2425 if (s->ss32 && !s->addseg)
2426 gen_op_mov_reg_A0(1, R_ESP);
2427 else
2428 gen_op_mov_reg_T1(s->ss32 + 1, R_ESP);
2429 }
2430 }
2431
2432 /* generate a push. It depends on ss32, addseg and dflag */
2433 /* slower version for T1, only used for call Ev */
2434 static void gen_push_T1(DisasContext *s)
2435 {
2436 #ifdef TARGET_X86_64
2437 if (CODE64(s)) {
2438 gen_op_movq_A0_reg(R_ESP);
2439 if (s->dflag) {
2440 gen_op_addq_A0_im(-8);
2441 gen_op_st_v(s, MO_64, cpu_T[1], cpu_A0);
2442 } else {
2443 gen_op_addq_A0_im(-2);
2444 gen_op_st_v(s, MO_16, cpu_T[1], cpu_A0);
2445 }
2446 gen_op_mov_reg_A0(2, R_ESP);
2447 } else
2448 #endif
2449 {
2450 gen_op_movl_A0_reg(R_ESP);
2451 if (!s->dflag)
2452 gen_op_addl_A0_im(-2);
2453 else
2454 gen_op_addl_A0_im(-4);
2455 if (s->ss32) {
2456 if (s->addseg) {
2457 gen_op_addl_A0_seg(s, R_SS);
2458 }
2459 } else {
2460 gen_op_andl_A0_ffff();
2461 gen_op_addl_A0_seg(s, R_SS);
2462 }
2463 gen_op_st_v(s, s->dflag + 1, cpu_T[1], cpu_A0);
2464
2465 if (s->ss32 && !s->addseg)
2466 gen_op_mov_reg_A0(1, R_ESP);
2467 else
2468 gen_stack_update(s, (-2) << s->dflag);
2469 }
2470 }
2471
2472 /* two step pop is necessary for precise exceptions */
2473 static void gen_pop_T0(DisasContext *s)
2474 {
2475 #ifdef TARGET_X86_64
2476 if (CODE64(s)) {
2477 gen_op_movq_A0_reg(R_ESP);
2478 gen_op_ld_v(s, s->dflag ? MO_64 : MO_16, cpu_T[0], cpu_A0);
2479 } else
2480 #endif
2481 {
2482 gen_op_movl_A0_reg(R_ESP);
2483 if (s->ss32) {
2484 if (s->addseg)
2485 gen_op_addl_A0_seg(s, R_SS);
2486 } else {
2487 gen_op_andl_A0_ffff();
2488 gen_op_addl_A0_seg(s, R_SS);
2489 }
2490 gen_op_ld_v(s, s->dflag + 1, cpu_T[0], cpu_A0);
2491 }
2492 }
2493
2494 static void gen_pop_update(DisasContext *s)
2495 {
2496 #ifdef TARGET_X86_64
2497 if (CODE64(s) && s->dflag) {
2498 gen_stack_update(s, 8);
2499 } else
2500 #endif
2501 {
2502 gen_stack_update(s, 2 << s->dflag);
2503 }
2504 }
2505
2506 static void gen_stack_A0(DisasContext *s)
2507 {
2508 gen_op_movl_A0_reg(R_ESP);
2509 if (!s->ss32)
2510 gen_op_andl_A0_ffff();
2511 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2512 if (s->addseg)
2513 gen_op_addl_A0_seg(s, R_SS);
2514 }
2515
2516 /* NOTE: wrap around in 16 bit not fully handled */
2517 static void gen_pusha(DisasContext *s)
2518 {
2519 int i;
2520 gen_op_movl_A0_reg(R_ESP);
2521 gen_op_addl_A0_im(-16 << s->dflag);
2522 if (!s->ss32)
2523 gen_op_andl_A0_ffff();
2524 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2525 if (s->addseg)
2526 gen_op_addl_A0_seg(s, R_SS);
2527 for(i = 0;i < 8; i++) {
2528 gen_op_mov_TN_reg(MO_32, 0, 7 - i);
2529 gen_op_st_v(s, MO_16 + s->dflag, cpu_T[0], cpu_A0);
2530 gen_op_addl_A0_im(2 << s->dflag);
2531 }
2532 gen_op_mov_reg_T1(MO_16 + s->ss32, R_ESP);
2533 }
2534
2535 /* NOTE: wrap around in 16 bit not fully handled */
2536 static void gen_popa(DisasContext *s)
2537 {
2538 int i;
2539 gen_op_movl_A0_reg(R_ESP);
2540 if (!s->ss32)
2541 gen_op_andl_A0_ffff();
2542 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2543 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], 16 << s->dflag);
2544 if (s->addseg)
2545 gen_op_addl_A0_seg(s, R_SS);
2546 for(i = 0;i < 8; i++) {
2547 /* ESP is not reloaded */
2548 if (i != 3) {
2549 gen_op_ld_v(s, MO_16 + s->dflag, cpu_T[0], cpu_A0);
2550 gen_op_mov_reg_T0(MO_16 + s->dflag, 7 - i);
2551 }
2552 gen_op_addl_A0_im(2 << s->dflag);
2553 }
2554 gen_op_mov_reg_T1(MO_16 + s->ss32, R_ESP);
2555 }
2556
2557 static void gen_enter(DisasContext *s, int esp_addend, int level)
2558 {
2559 int ot, opsize;
2560
2561 level &= 0x1f;
2562 #ifdef TARGET_X86_64
2563 if (CODE64(s)) {
2564 ot = s->dflag ? MO_64 : MO_16;
2565 opsize = 1 << ot;
2566
2567 gen_op_movl_A0_reg(R_ESP);
2568 gen_op_addq_A0_im(-opsize);
2569 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2570
2571 /* push bp */
2572 gen_op_mov_TN_reg(MO_32, 0, R_EBP);
2573 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
2574 if (level) {
2575 /* XXX: must save state */
2576 gen_helper_enter64_level(cpu_env, tcg_const_i32(level),
2577 tcg_const_i32((ot == MO_64)),
2578 cpu_T[1]);
2579 }
2580 gen_op_mov_reg_T1(ot, R_EBP);
2581 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
2582 gen_op_mov_reg_T1(MO_64, R_ESP);
2583 } else
2584 #endif
2585 {
2586 ot = s->dflag + MO_16;
2587 opsize = 2 << s->dflag;
2588
2589 gen_op_movl_A0_reg(R_ESP);
2590 gen_op_addl_A0_im(-opsize);
2591 if (!s->ss32)
2592 gen_op_andl_A0_ffff();
2593 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2594 if (s->addseg)
2595 gen_op_addl_A0_seg(s, R_SS);
2596 /* push bp */
2597 gen_op_mov_TN_reg(MO_32, 0, R_EBP);
2598 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
2599 if (level) {
2600 /* XXX: must save state */
2601 gen_helper_enter_level(cpu_env, tcg_const_i32(level),
2602 tcg_const_i32(s->dflag),
2603 cpu_T[1]);
2604 }
2605 gen_op_mov_reg_T1(ot, R_EBP);
2606 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
2607 gen_op_mov_reg_T1(MO_16 + s->ss32, R_ESP);
2608 }
2609 }
2610
2611 static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip)
2612 {
2613 gen_update_cc_op(s);
2614 gen_jmp_im(cur_eip);
2615 gen_helper_raise_exception(cpu_env, tcg_const_i32(trapno));
2616 s->is_jmp = DISAS_TB_JUMP;
2617 }
2618
2619 /* an interrupt is different from an exception because of the
2620 privilege checks */
2621 static void gen_interrupt(DisasContext *s, int intno,
2622 target_ulong cur_eip, target_ulong next_eip)
2623 {
2624 gen_update_cc_op(s);
2625 gen_jmp_im(cur_eip);
2626 gen_helper_raise_interrupt(cpu_env, tcg_const_i32(intno),
2627 tcg_const_i32(next_eip - cur_eip));
2628 s->is_jmp = DISAS_TB_JUMP;
2629 }
2630
2631 static void gen_debug(DisasContext *s, target_ulong cur_eip)
2632 {
2633 gen_update_cc_op(s);
2634 gen_jmp_im(cur_eip);
2635 gen_helper_debug(cpu_env);
2636 s->is_jmp = DISAS_TB_JUMP;
2637 }
2638
2639 /* generate a generic end of block. Trace exception is also generated
2640 if needed */
2641 static void gen_eob(DisasContext *s)
2642 {
2643 gen_update_cc_op(s);
2644 if (s->tb->flags & HF_INHIBIT_IRQ_MASK) {
2645 gen_helper_reset_inhibit_irq(cpu_env);
2646 }
2647 if (s->tb->flags & HF_RF_MASK) {
2648 gen_helper_reset_rf(cpu_env);
2649 }
2650 if (s->singlestep_enabled) {
2651 gen_helper_debug(cpu_env);
2652 } else if (s->tf) {
2653 gen_helper_single_step(cpu_env);
2654 } else {
2655 tcg_gen_exit_tb(0);
2656 }
2657 s->is_jmp = DISAS_TB_JUMP;
2658 }
2659
2660 /* generate a jump to eip. No segment change must happen before as a
2661 direct call to the next block may occur */
2662 static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num)
2663 {
2664 gen_update_cc_op(s);
2665 set_cc_op(s, CC_OP_DYNAMIC);
2666 if (s->jmp_opt) {
2667 gen_goto_tb(s, tb_num, eip);
2668 s->is_jmp = DISAS_TB_JUMP;
2669 } else {
2670 gen_jmp_im(eip);
2671 gen_eob(s);
2672 }
2673 }
2674
2675 static void gen_jmp(DisasContext *s, target_ulong eip)
2676 {
2677 gen_jmp_tb(s, eip, 0);
2678 }
2679
2680 static inline void gen_ldq_env_A0(DisasContext *s, int offset)
2681 {
2682 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
2683 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset);
2684 }
2685
2686 static inline void gen_stq_env_A0(DisasContext *s, int offset)
2687 {
2688 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset);
2689 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
2690 }
2691
2692 static inline void gen_ldo_env_A0(DisasContext *s, int offset)
2693 {
2694 int mem_index = s->mem_index;
2695 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, mem_index, MO_LEQ);
2696 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
2697 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
2698 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_tmp0, mem_index, MO_LEQ);
2699 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
2700 }
2701
2702 static inline void gen_sto_env_A0(DisasContext *s, int offset)
2703 {
2704 int mem_index = s->mem_index;
2705 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
2706 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, mem_index, MO_LEQ);
2707 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
2708 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
2709 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_tmp0, mem_index, MO_LEQ);
2710 }
2711
2712 static inline void gen_op_movo(int d_offset, int s_offset)
2713 {
2714 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2715 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2716 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + 8);
2717 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + 8);
2718 }
2719
2720 static inline void gen_op_movq(int d_offset, int s_offset)
2721 {
2722 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2723 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2724 }
2725
2726 static inline void gen_op_movl(int d_offset, int s_offset)
2727 {
2728 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env, s_offset);
2729 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, d_offset);
2730 }
2731
2732 static inline void gen_op_movq_env_0(int d_offset)
2733 {
2734 tcg_gen_movi_i64(cpu_tmp1_i64, 0);
2735 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2736 }
2737
2738 typedef void (*SSEFunc_i_ep)(TCGv_i32 val, TCGv_ptr env, TCGv_ptr reg);
2739 typedef void (*SSEFunc_l_ep)(TCGv_i64 val, TCGv_ptr env, TCGv_ptr reg);
2740 typedef void (*SSEFunc_0_epi)(TCGv_ptr env, TCGv_ptr reg, TCGv_i32 val);
2741 typedef void (*SSEFunc_0_epl)(TCGv_ptr env, TCGv_ptr reg, TCGv_i64 val);
2742 typedef void (*SSEFunc_0_epp)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b);
2743 typedef void (*SSEFunc_0_eppi)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
2744 TCGv_i32 val);
2745 typedef void (*SSEFunc_0_ppi)(TCGv_ptr reg_a, TCGv_ptr reg_b, TCGv_i32 val);
2746 typedef void (*SSEFunc_0_eppt)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
2747 TCGv val);
2748
2749 #define SSE_SPECIAL ((void *)1)
2750 #define SSE_DUMMY ((void *)2)
2751
2752 #define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
2753 #define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
2754 gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
2755
2756 static const SSEFunc_0_epp sse_op_table1[256][4] = {
2757 /* 3DNow! extensions */
2758 [0x0e] = { SSE_DUMMY }, /* femms */
2759 [0x0f] = { SSE_DUMMY }, /* pf... */
2760 /* pure SSE operations */
2761 [0x10] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2762 [0x11] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2763 [0x12] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd, movsldup, movddup */
2764 [0x13] = { SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd */
2765 [0x14] = { gen_helper_punpckldq_xmm, gen_helper_punpcklqdq_xmm },
2766 [0x15] = { gen_helper_punpckhdq_xmm, gen_helper_punpckhqdq_xmm },
2767 [0x16] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd, movshdup */
2768 [0x17] = { SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd */
2769
2770 [0x28] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2771 [0x29] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2772 [0x2a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
2773 [0x2b] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movntps, movntpd, movntss, movntsd */
2774 [0x2c] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
2775 [0x2d] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
2776 [0x2e] = { gen_helper_ucomiss, gen_helper_ucomisd },
2777 [0x2f] = { gen_helper_comiss, gen_helper_comisd },
2778 [0x50] = { SSE_SPECIAL, SSE_SPECIAL }, /* movmskps, movmskpd */
2779 [0x51] = SSE_FOP(sqrt),
2780 [0x52] = { gen_helper_rsqrtps, NULL, gen_helper_rsqrtss, NULL },
2781 [0x53] = { gen_helper_rcpps, NULL, gen_helper_rcpss, NULL },
2782 [0x54] = { gen_helper_pand_xmm, gen_helper_pand_xmm }, /* andps, andpd */
2783 [0x55] = { gen_helper_pandn_xmm, gen_helper_pandn_xmm }, /* andnps, andnpd */
2784 [0x56] = { gen_helper_por_xmm, gen_helper_por_xmm }, /* orps, orpd */
2785 [0x57] = { gen_helper_pxor_xmm, gen_helper_pxor_xmm }, /* xorps, xorpd */
2786 [0x58] = SSE_FOP(add),
2787 [0x59] = SSE_FOP(mul),
2788 [0x5a] = { gen_helper_cvtps2pd, gen_helper_cvtpd2ps,
2789 gen_helper_cvtss2sd, gen_helper_cvtsd2ss },
2790 [0x5b] = { gen_helper_cvtdq2ps, gen_helper_cvtps2dq, gen_helper_cvttps2dq },
2791 [0x5c] = SSE_FOP(sub),
2792 [0x5d] = SSE_FOP(min),
2793 [0x5e] = SSE_FOP(div),
2794 [0x5f] = SSE_FOP(max),
2795
2796 [0xc2] = SSE_FOP(cmpeq),
2797 [0xc6] = { (SSEFunc_0_epp)gen_helper_shufps,
2798 (SSEFunc_0_epp)gen_helper_shufpd }, /* XXX: casts */
2799
2800 /* SSSE3, SSE4, MOVBE, CRC32, BMI1, BMI2, ADX. */
2801 [0x38] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2802 [0x3a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2803
2804 /* MMX ops and their SSE extensions */
2805 [0x60] = MMX_OP2(punpcklbw),
2806 [0x61] = MMX_OP2(punpcklwd),
2807 [0x62] = MMX_OP2(punpckldq),
2808 [0x63] = MMX_OP2(packsswb),
2809 [0x64] = MMX_OP2(pcmpgtb),
2810 [0x65] = MMX_OP2(pcmpgtw),
2811 [0x66] = MMX_OP2(pcmpgtl),
2812 [0x67] = MMX_OP2(packuswb),
2813 [0x68] = MMX_OP2(punpckhbw),
2814 [0x69] = MMX_OP2(punpckhwd),
2815 [0x6a] = MMX_OP2(punpckhdq),
2816 [0x6b] = MMX_OP2(packssdw),
2817 [0x6c] = { NULL, gen_helper_punpcklqdq_xmm },
2818 [0x6d] = { NULL, gen_helper_punpckhqdq_xmm },
2819 [0x6e] = { SSE_SPECIAL, SSE_SPECIAL }, /* movd mm, ea */
2820 [0x6f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, , movqdu */
2821 [0x70] = { (SSEFunc_0_epp)gen_helper_pshufw_mmx,
2822 (SSEFunc_0_epp)gen_helper_pshufd_xmm,
2823 (SSEFunc_0_epp)gen_helper_pshufhw_xmm,
2824 (SSEFunc_0_epp)gen_helper_pshuflw_xmm }, /* XXX: casts */
2825 [0x71] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftw */
2826 [0x72] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftd */
2827 [0x73] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftq */
2828 [0x74] = MMX_OP2(pcmpeqb),
2829 [0x75] = MMX_OP2(pcmpeqw),
2830 [0x76] = MMX_OP2(pcmpeql),
2831 [0x77] = { SSE_DUMMY }, /* emms */
2832 [0x78] = { NULL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* extrq_i, insertq_i */
2833 [0x79] = { NULL, gen_helper_extrq_r, NULL, gen_helper_insertq_r },
2834 [0x7c] = { NULL, gen_helper_haddpd, NULL, gen_helper_haddps },
2835 [0x7d] = { NULL, gen_helper_hsubpd, NULL, gen_helper_hsubps },
2836 [0x7e] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movd, movd, , movq */
2837 [0x7f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, movdqu */
2838 [0xc4] = { SSE_SPECIAL, SSE_SPECIAL }, /* pinsrw */
2839 [0xc5] = { SSE_SPECIAL, SSE_SPECIAL }, /* pextrw */
2840 [0xd0] = { NULL, gen_helper_addsubpd, NULL, gen_helper_addsubps },
2841 [0xd1] = MMX_OP2(psrlw),
2842 [0xd2] = MMX_OP2(psrld),
2843 [0xd3] = MMX_OP2(psrlq),
2844 [0xd4] = MMX_OP2(paddq),
2845 [0xd5] = MMX_OP2(pmullw),
2846 [0xd6] = { NULL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2847 [0xd7] = { SSE_SPECIAL, SSE_SPECIAL }, /* pmovmskb */
2848 [0xd8] = MMX_OP2(psubusb),
2849 [0xd9] = MMX_OP2(psubusw),
2850 [0xda] = MMX_OP2(pminub),
2851 [0xdb] = MMX_OP2(pand),
2852 [0xdc] = MMX_OP2(paddusb),
2853 [0xdd] = MMX_OP2(paddusw),
2854 [0xde] = MMX_OP2(pmaxub),
2855 [0xdf] = MMX_OP2(pandn),
2856 [0xe0] = MMX_OP2(pavgb),
2857 [0xe1] = MMX_OP2(psraw),
2858 [0xe2] = MMX_OP2(psrad),
2859 [0xe3] = MMX_OP2(pavgw),
2860 [0xe4] = MMX_OP2(pmulhuw),
2861 [0xe5] = MMX_OP2(pmulhw),
2862 [0xe6] = { NULL, gen_helper_cvttpd2dq, gen_helper_cvtdq2pd, gen_helper_cvtpd2dq },
2863 [0xe7] = { SSE_SPECIAL , SSE_SPECIAL }, /* movntq, movntq */
2864 [0xe8] = MMX_OP2(psubsb),
2865 [0xe9] = MMX_OP2(psubsw),
2866 [0xea] = MMX_OP2(pminsw),
2867 [0xeb] = MMX_OP2(por),
2868 [0xec] = MMX_OP2(paddsb),
2869 [0xed] = MMX_OP2(paddsw),
2870 [0xee] = MMX_OP2(pmaxsw),
2871 [0xef] = MMX_OP2(pxor),
2872 [0xf0] = { NULL, NULL, NULL, SSE_SPECIAL }, /* lddqu */
2873 [0xf1] = MMX_OP2(psllw),
2874 [0xf2] = MMX_OP2(pslld),
2875 [0xf3] = MMX_OP2(psllq),
2876 [0xf4] = MMX_OP2(pmuludq),
2877 [0xf5] = MMX_OP2(pmaddwd),
2878 [0xf6] = MMX_OP2(psadbw),
2879 [0xf7] = { (SSEFunc_0_epp)gen_helper_maskmov_mmx,
2880 (SSEFunc_0_epp)gen_helper_maskmov_xmm }, /* XXX: casts */
2881 [0xf8] = MMX_OP2(psubb),
2882 [0xf9] = MMX_OP2(psubw),
2883 [0xfa] = MMX_OP2(psubl),
2884 [0xfb] = MMX_OP2(psubq),
2885 [0xfc] = MMX_OP2(paddb),
2886 [0xfd] = MMX_OP2(paddw),
2887 [0xfe] = MMX_OP2(paddl),
2888 };
2889
2890 static const SSEFunc_0_epp sse_op_table2[3 * 8][2] = {
2891 [0 + 2] = MMX_OP2(psrlw),
2892 [0 + 4] = MMX_OP2(psraw),
2893 [0 + 6] = MMX_OP2(psllw),
2894 [8 + 2] = MMX_OP2(psrld),
2895 [8 + 4] = MMX_OP2(psrad),
2896 [8 + 6] = MMX_OP2(pslld),
2897 [16 + 2] = MMX_OP2(psrlq),
2898 [16 + 3] = { NULL, gen_helper_psrldq_xmm },
2899 [16 + 6] = MMX_OP2(psllq),
2900 [16 + 7] = { NULL, gen_helper_pslldq_xmm },
2901 };
2902
2903 static const SSEFunc_0_epi sse_op_table3ai[] = {
2904 gen_helper_cvtsi2ss,
2905 gen_helper_cvtsi2sd
2906 };
2907
2908 #ifdef TARGET_X86_64
2909 static const SSEFunc_0_epl sse_op_table3aq[] = {
2910 gen_helper_cvtsq2ss,
2911 gen_helper_cvtsq2sd
2912 };
2913 #endif
2914
2915 static const SSEFunc_i_ep sse_op_table3bi[] = {
2916 gen_helper_cvttss2si,
2917 gen_helper_cvtss2si,
2918 gen_helper_cvttsd2si,
2919 gen_helper_cvtsd2si
2920 };
2921
2922 #ifdef TARGET_X86_64
2923 static const SSEFunc_l_ep sse_op_table3bq[] = {
2924 gen_helper_cvttss2sq,
2925 gen_helper_cvtss2sq,
2926 gen_helper_cvttsd2sq,
2927 gen_helper_cvtsd2sq
2928 };
2929 #endif
2930
2931 static const SSEFunc_0_epp sse_op_table4[8][4] = {
2932 SSE_FOP(cmpeq),
2933 SSE_FOP(cmplt),
2934 SSE_FOP(cmple),
2935 SSE_FOP(cmpunord),
2936 SSE_FOP(cmpneq),
2937 SSE_FOP(cmpnlt),
2938 SSE_FOP(cmpnle),
2939 SSE_FOP(cmpord),
2940 };
2941
2942 static const SSEFunc_0_epp sse_op_table5[256] = {
2943 [0x0c] = gen_helper_pi2fw,
2944 [0x0d] = gen_helper_pi2fd,
2945 [0x1c] = gen_helper_pf2iw,
2946 [0x1d] = gen_helper_pf2id,
2947 [0x8a] = gen_helper_pfnacc,
2948 [0x8e] = gen_helper_pfpnacc,
2949 [0x90] = gen_helper_pfcmpge,
2950 [0x94] = gen_helper_pfmin,
2951 [0x96] = gen_helper_pfrcp,
2952 [0x97] = gen_helper_pfrsqrt,
2953 [0x9a] = gen_helper_pfsub,
2954 [0x9e] = gen_helper_pfadd,
2955 [0xa0] = gen_helper_pfcmpgt,
2956 [0xa4] = gen_helper_pfmax,
2957 [0xa6] = gen_helper_movq, /* pfrcpit1; no need to actually increase precision */
2958 [0xa7] = gen_helper_movq, /* pfrsqit1 */
2959 [0xaa] = gen_helper_pfsubr,
2960 [0xae] = gen_helper_pfacc,
2961 [0xb0] = gen_helper_pfcmpeq,
2962 [0xb4] = gen_helper_pfmul,
2963 [0xb6] = gen_helper_movq, /* pfrcpit2 */
2964 [0xb7] = gen_helper_pmulhrw_mmx,
2965 [0xbb] = gen_helper_pswapd,
2966 [0xbf] = gen_helper_pavgb_mmx /* pavgusb */
2967 };
2968
2969 struct SSEOpHelper_epp {
2970 SSEFunc_0_epp op[2];
2971 uint32_t ext_mask;
2972 };
2973
2974 struct SSEOpHelper_eppi {
2975 SSEFunc_0_eppi op[2];
2976 uint32_t ext_mask;
2977 };
2978
2979 #define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
2980 #define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
2981 #define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
2982 #define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
2983 #define PCLMULQDQ_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, \
2984 CPUID_EXT_PCLMULQDQ }
2985 #define AESNI_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_AES }
2986
2987 static const struct SSEOpHelper_epp sse_op_table6[256] = {
2988 [0x00] = SSSE3_OP(pshufb),
2989 [0x01] = SSSE3_OP(phaddw),
2990 [0x02] = SSSE3_OP(phaddd),
2991 [0x03] = SSSE3_OP(phaddsw),
2992 [0x04] = SSSE3_OP(pmaddubsw),
2993 [0x05] = SSSE3_OP(phsubw),
2994 [0x06] = SSSE3_OP(phsubd),
2995 [0x07] = SSSE3_OP(phsubsw),
2996 [0x08] = SSSE3_OP(psignb),
2997 [0x09] = SSSE3_OP(psignw),
2998 [0x0a] = SSSE3_OP(psignd),
2999 [0x0b] = SSSE3_OP(pmulhrsw),
3000 [0x10] = SSE41_OP(pblendvb),
3001 [0x14] = SSE41_OP(blendvps),
3002 [0x15] = SSE41_OP(blendvpd),
3003 [0x17] = SSE41_OP(ptest),
3004 [0x1c] = SSSE3_OP(pabsb),
3005 [0x1d] = SSSE3_OP(pabsw),
3006 [0x1e] = SSSE3_OP(pabsd),
3007 [0x20] = SSE41_OP(pmovsxbw),
3008 [0x21] = SSE41_OP(pmovsxbd),
3009 [0x22] = SSE41_OP(pmovsxbq),
3010 [0x23] = SSE41_OP(pmovsxwd),
3011 [0x24] = SSE41_OP(pmovsxwq),
3012 [0x25] = SSE41_OP(pmovsxdq),
3013 [0x28] = SSE41_OP(pmuldq),
3014 [0x29] = SSE41_OP(pcmpeqq),
3015 [0x2a] = SSE41_SPECIAL, /* movntqda */
3016 [0x2b] = SSE41_OP(packusdw),
3017 [0x30] = SSE41_OP(pmovzxbw),
3018 [0x31] = SSE41_OP(pmovzxbd),
3019 [0x32] = SSE41_OP(pmovzxbq),
3020 [0x33] = SSE41_OP(pmovzxwd),
3021 [0x34] = SSE41_OP(pmovzxwq),
3022 [0x35] = SSE41_OP(pmovzxdq),
3023 [0x37] = SSE42_OP(pcmpgtq),
3024 [0x38] = SSE41_OP(pminsb),
3025 [0x39] = SSE41_OP(pminsd),
3026 [0x3a] = SSE41_OP(pminuw),
3027 [0x3b] = SSE41_OP(pminud),
3028 [0x3c] = SSE41_OP(pmaxsb),
3029 [0x3d] = SSE41_OP(pmaxsd),
3030 [0x3e] = SSE41_OP(pmaxuw),
3031 [0x3f] = SSE41_OP(pmaxud),
3032 [0x40] = SSE41_OP(pmulld),
3033 [0x41] = SSE41_OP(phminposuw),
3034 [0xdb] = AESNI_OP(aesimc),
3035 [0xdc] = AESNI_OP(aesenc),
3036 [0xdd] = AESNI_OP(aesenclast),
3037 [0xde] = AESNI_OP(aesdec),
3038 [0xdf] = AESNI_OP(aesdeclast),
3039 };
3040
3041 static const struct SSEOpHelper_eppi sse_op_table7[256] = {
3042 [0x08] = SSE41_OP(roundps),
3043 [0x09] = SSE41_OP(roundpd),
3044 [0x0a] = SSE41_OP(roundss),
3045 [0x0b] = SSE41_OP(roundsd),
3046 [0x0c] = SSE41_OP(blendps),
3047 [0x0d] = SSE41_OP(blendpd),
3048 [0x0e] = SSE41_OP(pblendw),
3049 [0x0f] = SSSE3_OP(palignr),
3050 [0x14] = SSE41_SPECIAL, /* pextrb */
3051 [0x15] = SSE41_SPECIAL, /* pextrw */
3052 [0x16] = SSE41_SPECIAL, /* pextrd/pextrq */
3053 [0x17] = SSE41_SPECIAL, /* extractps */
3054 [0x20] = SSE41_SPECIAL, /* pinsrb */
3055 [0x21] = SSE41_SPECIAL, /* insertps */
3056 [0x22] = SSE41_SPECIAL, /* pinsrd/pinsrq */
3057 [0x40] = SSE41_OP(dpps),
3058 [0x41] = SSE41_OP(dppd),
3059 [0x42] = SSE41_OP(mpsadbw),
3060 [0x44] = PCLMULQDQ_OP(pclmulqdq),
3061 [0x60] = SSE42_OP(pcmpestrm),
3062 [0x61] = SSE42_OP(pcmpestri),
3063 [0x62] = SSE42_OP(pcmpistrm),
3064 [0x63] = SSE42_OP(pcmpistri),
3065 [0xdf] = AESNI_OP(aeskeygenassist),
3066 };
3067
3068 static void gen_sse(CPUX86State *env, DisasContext *s, int b,
3069 target_ulong pc_start, int rex_r)
3070 {
3071 int b1, op1_offset, op2_offset, is_xmm, val, ot;
3072 int modrm, mod, rm, reg;
3073 SSEFunc_0_epp sse_fn_epp;
3074 SSEFunc_0_eppi sse_fn_eppi;
3075 SSEFunc_0_ppi sse_fn_ppi;
3076 SSEFunc_0_eppt sse_fn_eppt;
3077
3078 b &= 0xff;
3079 if (s->prefix & PREFIX_DATA)
3080 b1 = 1;
3081 else if (s->prefix & PREFIX_REPZ)
3082 b1 = 2;
3083 else if (s->prefix & PREFIX_REPNZ)
3084 b1 = 3;
3085 else
3086 b1 = 0;
3087 sse_fn_epp = sse_op_table1[b][b1];
3088 if (!sse_fn_epp) {
3089 goto illegal_op;
3090 }
3091 if ((b <= 0x5f && b >= 0x10) || b == 0xc6 || b == 0xc2) {
3092 is_xmm = 1;
3093 } else {
3094 if (b1 == 0) {
3095 /* MMX case */
3096 is_xmm = 0;
3097 } else {
3098 is_xmm = 1;
3099 }
3100 }
3101 /* simple MMX/SSE operation */
3102 if (s->flags & HF_TS_MASK) {
3103 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
3104 return;
3105 }
3106 if (s->flags & HF_EM_MASK) {
3107 illegal_op:
3108 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
3109 return;
3110 }
3111 if (is_xmm && !(s->flags & HF_OSFXSR_MASK))
3112 if ((b != 0x38 && b != 0x3a) || (s->prefix & PREFIX_DATA))
3113 goto illegal_op;
3114 if (b == 0x0e) {
3115 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
3116 goto illegal_op;
3117 /* femms */
3118 gen_helper_emms(cpu_env);
3119 return;
3120 }
3121 if (b == 0x77) {
3122 /* emms */
3123 gen_helper_emms(cpu_env);
3124 return;
3125 }
3126 /* prepare MMX state (XXX: optimize by storing fptt and fptags in
3127 the static cpu state) */
3128 if (!is_xmm) {
3129 gen_helper_enter_mmx(cpu_env);
3130 }
3131
3132 modrm = cpu_ldub_code(env, s->pc++);
3133 reg = ((modrm >> 3) & 7);
3134 if (is_xmm)
3135 reg |= rex_r;
3136 mod = (modrm >> 6) & 3;
3137 if (sse_fn_epp == SSE_SPECIAL) {
3138 b |= (b1 << 8);
3139 switch(b) {
3140 case 0x0e7: /* movntq */
3141 if (mod == 3)
3142 goto illegal_op;
3143 gen_lea_modrm(env, s, modrm);
3144 gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
3145 break;
3146 case 0x1e7: /* movntdq */
3147 case 0x02b: /* movntps */
3148 case 0x12b: /* movntps */
3149 if (mod == 3)
3150 goto illegal_op;
3151 gen_lea_modrm(env, s, modrm);
3152 gen_sto_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3153 break;
3154 case 0x3f0: /* lddqu */
3155 if (mod == 3)
3156 goto illegal_op;
3157 gen_lea_modrm(env, s, modrm);
3158 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3159 break;
3160 case 0x22b: /* movntss */
3161 case 0x32b: /* movntsd */
3162 if (mod == 3)
3163 goto illegal_op;
3164 gen_lea_modrm(env, s, modrm);
3165 if (b1 & 1) {
3166 gen_stq_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3167 } else {
3168 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3169 xmm_regs[reg].XMM_L(0)));
3170 gen_op_st_v(s, MO_32, cpu_T[0], cpu_A0);
3171 }
3172 break;
3173 case 0x6e: /* movd mm, ea */
3174 #ifdef TARGET_X86_64
3175 if (s->dflag == 2) {
3176 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0);
3177 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,fpregs[reg].mmx));
3178 } else
3179 #endif
3180 {
3181 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0);
3182 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3183 offsetof(CPUX86State,fpregs[reg].mmx));
3184 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3185 gen_helper_movl_mm_T0_mmx(cpu_ptr0, cpu_tmp2_i32);
3186 }
3187 break;
3188 case 0x16e: /* movd xmm, ea */
3189 #ifdef TARGET_X86_64
3190 if (s->dflag == 2) {
3191 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0);
3192 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3193 offsetof(CPUX86State,xmm_regs[reg]));
3194 gen_helper_movq_mm_T0_xmm(cpu_ptr0, cpu_T[0]);
3195 } else
3196 #endif
3197 {
3198 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0);
3199 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3200 offsetof(CPUX86State,xmm_regs[reg]));
3201 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3202 gen_helper_movl_mm_T0_xmm(cpu_ptr0, cpu_tmp2_i32);
3203 }
3204 break;
3205 case 0x6f: /* movq mm, ea */
3206 if (mod != 3) {
3207 gen_lea_modrm(env, s, modrm);
3208 gen_ldq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
3209 } else {
3210 rm = (modrm & 7);
3211 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
3212 offsetof(CPUX86State,fpregs[rm].mmx));
3213 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
3214 offsetof(CPUX86State,fpregs[reg].mmx));
3215 }
3216 break;
3217 case 0x010: /* movups */
3218 case 0x110: /* movupd */
3219 case 0x028: /* movaps */
3220 case 0x128: /* movapd */
3221 case 0x16f: /* movdqa xmm, ea */
3222 case 0x26f: /* movdqu xmm, ea */
3223 if (mod != 3) {
3224 gen_lea_modrm(env, s, modrm);
3225 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3226 } else {
3227 rm = (modrm & 7) | REX_B(s);
3228 gen_op_movo(offsetof(CPUX86State,xmm_regs[reg]),
3229 offsetof(CPUX86State,xmm_regs[rm]));
3230 }
3231 break;
3232 case 0x210: /* movss xmm, ea */
3233 if (mod != 3) {
3234 gen_lea_modrm(env, s, modrm);
3235 gen_op_ld_v(s, MO_32, cpu_T[0], cpu_A0);
3236 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3237 tcg_gen_movi_tl(cpu_T[0], 0);
3238 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
3239 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3240 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3241 } else {
3242 rm = (modrm & 7) | REX_B(s);
3243 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3244 offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
3245 }
3246 break;
3247 case 0x310: /* movsd xmm, ea */
3248 if (mod != 3) {
3249 gen_lea_modrm(env, s, modrm);
3250 gen_ldq_env_A0(s, offsetof(CPUX86State,
3251 xmm_regs[reg].XMM_Q(0)));
3252 tcg_gen_movi_tl(cpu_T[0], 0);
3253 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3254 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3255 } else {
3256 rm = (modrm & 7) | REX_B(s);
3257 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3258 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3259 }
3260 break;
3261 case 0x012: /* movlps */
3262 case 0x112: /* movlpd */
3263 if (mod != 3) {
3264 gen_lea_modrm(env, s, modrm);
3265 gen_ldq_env_A0(s, offsetof(CPUX86State,
3266 xmm_regs[reg].XMM_Q(0)));
3267 } else {
3268 /* movhlps */
3269 rm = (modrm & 7) | REX_B(s);
3270 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3271 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
3272 }
3273 break;
3274 case 0x212: /* movsldup */
3275 if (mod != 3) {
3276 gen_lea_modrm(env, s, modrm);
3277 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3278 } else {
3279 rm = (modrm & 7) | REX_B(s);
3280 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3281 offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
3282 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
3283 offsetof(CPUX86State,xmm_regs[rm].XMM_L(2)));
3284 }
3285 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
3286 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3287 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
3288 offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3289 break;
3290 case 0x312: /* movddup */
3291 if (mod != 3) {
3292 gen_lea_modrm(env, s, modrm);
3293 gen_ldq_env_A0(s, offsetof(CPUX86State,
3294 xmm_regs[reg].XMM_Q(0)));
3295 } else {
3296 rm = (modrm & 7) | REX_B(s);
3297 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3298 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3299 }
3300 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
3301 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3302 break;
3303 case 0x016: /* movhps */
3304 case 0x116: /* movhpd */
3305 if (mod != 3) {
3306 gen_lea_modrm(env, s, modrm);
3307 gen_ldq_env_A0(s, offsetof(CPUX86State,
3308 xmm_regs[reg].XMM_Q(1)));
3309 } else {
3310 /* movlhps */
3311 rm = (modrm & 7) | REX_B(s);
3312 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
3313 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3314 }
3315 break;
3316 case 0x216: /* movshdup */
3317 if (mod != 3) {
3318 gen_lea_modrm(env, s, modrm);
3319 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3320 } else {
3321 rm = (modrm & 7) | REX_B(s);
3322 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
3323 offsetof(CPUX86State,xmm_regs[rm].XMM_L(1)));
3324 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
3325 offsetof(CPUX86State,xmm_regs[rm].XMM_L(3)));
3326 }
3327 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3328 offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
3329 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
3330 offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3331 break;
3332 case 0x178:
3333 case 0x378:
3334 {
3335 int bit_index, field_length;
3336
3337 if (b1 == 1 && reg != 0)
3338 goto illegal_op;
3339 field_length = cpu_ldub_code(env, s->pc++) & 0x3F;
3340 bit_index = cpu_ldub_code(env, s->pc++) & 0x3F;
3341 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3342 offsetof(CPUX86State,xmm_regs[reg]));
3343 if (b1 == 1)
3344 gen_helper_extrq_i(cpu_env, cpu_ptr0,
3345 tcg_const_i32(bit_index),
3346 tcg_const_i32(field_length));
3347 else
3348 gen_helper_insertq_i(cpu_env, cpu_ptr0,
3349 tcg_const_i32(bit_index),
3350 tcg_const_i32(field_length));
3351 }
3352 break;
3353 case 0x7e: /* movd ea, mm */
3354 #ifdef TARGET_X86_64
3355 if (s->dflag == 2) {
3356 tcg_gen_ld_i64(cpu_T[0], cpu_env,
3357 offsetof(CPUX86State,fpregs[reg].mmx));
3358 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1);
3359 } else
3360 #endif
3361 {
3362 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
3363 offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0)));
3364 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1);
3365 }
3366 break;
3367 case 0x17e: /* movd ea, xmm */
3368 #ifdef TARGET_X86_64
3369 if (s->dflag == 2) {
3370 tcg_gen_ld_i64(cpu_T[0], cpu_env,
3371 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3372 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1);
3373 } else
3374 #endif
3375 {
3376 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
3377 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3378 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1);
3379 }
3380 break;
3381 case 0x27e: /* movq xmm, ea */
3382 if (mod != 3) {
3383 gen_lea_modrm(env, s, modrm);
3384 gen_ldq_env_A0(s, offsetof(CPUX86State,
3385 xmm_regs[reg].XMM_Q(0)));
3386 } else {
3387 rm = (modrm & 7) | REX_B(s);
3388 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3389 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3390 }
3391 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3392 break;
3393 case 0x7f: /* movq ea, mm */
3394 if (mod != 3) {
3395 gen_lea_modrm(env, s, modrm);
3396 gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
3397 } else {
3398 rm = (modrm & 7);
3399 gen_op_movq(offsetof(CPUX86State,fpregs[rm].mmx),
3400 offsetof(CPUX86State,fpregs[reg].mmx));
3401 }
3402 break;
3403 case 0x011: /* movups */
3404 case 0x111: /* movupd */
3405 case 0x029: /* movaps */
3406 case 0x129: /* movapd */
3407 case 0x17f: /* movdqa ea, xmm */
3408 case 0x27f: /* movdqu ea, xmm */
3409 if (mod != 3) {
3410 gen_lea_modrm(env, s, modrm);
3411 gen_sto_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3412 } else {
3413 rm = (modrm & 7) | REX_B(s);
3414 gen_op_movo(offsetof(CPUX86State,xmm_regs[rm]),
3415 offsetof(CPUX86State,xmm_regs[reg]));
3416 }
3417 break;
3418 case 0x211: /* movss ea, xmm */
3419 if (mod != 3) {
3420 gen_lea_modrm(env, s, modrm);
3421 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3422 gen_op_st_v(s, MO_32, cpu_T[0], cpu_A0);
3423 } else {
3424 rm = (modrm & 7) | REX_B(s);
3425 gen_op_movl(offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)),
3426 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3427 }
3428 break;
3429 case 0x311: /* movsd ea, xmm */
3430 if (mod != 3) {
3431 gen_lea_modrm(env, s, modrm);
3432 gen_stq_env_A0(s, offsetof(CPUX86State,
3433 xmm_regs[reg].XMM_Q(0)));
3434 } else {
3435 rm = (modrm & 7) | REX_B(s);
3436 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
3437 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3438 }
3439 break;
3440 case 0x013: /* movlps */
3441 case 0x113: /* movlpd */
3442 if (mod != 3) {
3443 gen_lea_modrm(env, s, modrm);
3444 gen_stq_env_A0(s, offsetof(CPUX86State,
3445 xmm_regs[reg].XMM_Q(0)));
3446 } else {
3447 goto illegal_op;
3448 }
3449 break;
3450 case 0x017: /* movhps */
3451 case 0x117: /* movhpd */
3452 if (mod != 3) {
3453 gen_lea_modrm(env, s, modrm);
3454 gen_stq_env_A0(s, offsetof(CPUX86State,
3455 xmm_regs[reg].XMM_Q(1)));
3456 } else {
3457 goto illegal_op;
3458 }
3459 break;
3460 case 0x71: /* shift mm, im */
3461 case 0x72:
3462 case 0x73:
3463 case 0x171: /* shift xmm, im */
3464 case 0x172:
3465 case 0x173:
3466 if (b1 >= 2) {
3467 goto illegal_op;
3468 }
3469 val = cpu_ldub_code(env, s->pc++);
3470 if (is_xmm) {
3471 tcg_gen_movi_tl(cpu_T[0], val);
3472 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
3473 tcg_gen_movi_tl(cpu_T[0], 0);
3474 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(1)));
3475 op1_offset = offsetof(CPUX86State,xmm_t0);
3476 } else {
3477 tcg_gen_movi_tl(cpu_T[0], val);
3478 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(0)));
3479 tcg_gen_movi_tl(cpu_T[0], 0);
3480 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(1)));
3481 op1_offset = offsetof(CPUX86State,mmx_t0);
3482 }
3483 sse_fn_epp = sse_op_table2[((b - 1) & 3) * 8 +
3484 (((modrm >> 3)) & 7)][b1];
3485 if (!sse_fn_epp) {
3486 goto illegal_op;
3487 }
3488 if (is_xmm) {
3489 rm = (modrm & 7) | REX_B(s);
3490 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3491 } else {
3492 rm = (modrm & 7);
3493 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3494 }
3495 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3496 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op1_offset);
3497 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
3498 break;
3499 case 0x050: /* movmskps */
3500 rm = (modrm & 7) | REX_B(s);
3501 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3502 offsetof(CPUX86State,xmm_regs[rm]));
3503 gen_helper_movmskps(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3504 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
3505 break;
3506 case 0x150: /* movmskpd */
3507 rm = (modrm & 7) | REX_B(s);
3508 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3509 offsetof(CPUX86State,xmm_regs[rm]));
3510 gen_helper_movmskpd(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3511 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
3512 break;
3513 case 0x02a: /* cvtpi2ps */
3514 case 0x12a: /* cvtpi2pd */
3515 gen_helper_enter_mmx(cpu_env);
3516 if (mod != 3) {
3517 gen_lea_modrm(env, s, modrm);
3518 op2_offset = offsetof(CPUX86State,mmx_t0);
3519 gen_ldq_env_A0(s, op2_offset);
3520 } else {
3521 rm = (modrm & 7);
3522 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3523 }
3524 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3525 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3526 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3527 switch(b >> 8) {
3528 case 0x0:
3529 gen_helper_cvtpi2ps(cpu_env, cpu_ptr0, cpu_ptr1);
3530 break;
3531 default:
3532 case 0x1:
3533 gen_helper_cvtpi2pd(cpu_env, cpu_ptr0, cpu_ptr1);
3534 break;
3535 }
3536 break;
3537 case 0x22a: /* cvtsi2ss */
3538 case 0x32a: /* cvtsi2sd */
3539 ot = (s->dflag == 2) ? MO_64 : MO_32;
3540 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3541 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3542 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3543 if (ot == MO_32) {
3544 SSEFunc_0_epi sse_fn_epi = sse_op_table3ai[(b >> 8) & 1];
3545 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3546 sse_fn_epi(cpu_env, cpu_ptr0, cpu_tmp2_i32);
3547 } else {
3548 #ifdef TARGET_X86_64
3549 SSEFunc_0_epl sse_fn_epl = sse_op_table3aq[(b >> 8) & 1];
3550 sse_fn_epl(cpu_env, cpu_ptr0, cpu_T[0]);
3551 #else
3552 goto illegal_op;
3553 #endif
3554 }
3555 break;
3556 case 0x02c: /* cvttps2pi */
3557 case 0x12c: /* cvttpd2pi */
3558 case 0x02d: /* cvtps2pi */
3559 case 0x12d: /* cvtpd2pi */
3560 gen_helper_enter_mmx(cpu_env);
3561 if (mod != 3) {
3562 gen_lea_modrm(env, s, modrm);
3563 op2_offset = offsetof(CPUX86State,xmm_t0);
3564 gen_ldo_env_A0(s, op2_offset);
3565 } else {
3566 rm = (modrm & 7) | REX_B(s);
3567 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3568 }
3569 op1_offset = offsetof(CPUX86State,fpregs[reg & 7].mmx);
3570 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3571 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3572 switch(b) {
3573 case 0x02c:
3574 gen_helper_cvttps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3575 break;
3576 case 0x12c:
3577 gen_helper_cvttpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3578 break;
3579 case 0x02d:
3580 gen_helper_cvtps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3581 break;
3582 case 0x12d:
3583 gen_helper_cvtpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3584 break;
3585 }
3586 break;
3587 case 0x22c: /* cvttss2si */
3588 case 0x32c: /* cvttsd2si */
3589 case 0x22d: /* cvtss2si */
3590 case 0x32d: /* cvtsd2si */
3591 ot = (s->dflag == 2) ? MO_64 : MO_32;
3592 if (mod != 3) {
3593 gen_lea_modrm(env, s, modrm);
3594 if ((b >> 8) & 1) {
3595 gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.XMM_Q(0)));
3596 } else {
3597 gen_op_ld_v(s, MO_32, cpu_T[0], cpu_A0);
3598 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
3599 }
3600 op2_offset = offsetof(CPUX86State,xmm_t0);
3601 } else {
3602 rm = (modrm & 7) | REX_B(s);
3603 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3604 }
3605 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3606 if (ot == MO_32) {
3607 SSEFunc_i_ep sse_fn_i_ep =
3608 sse_op_table3bi[((b >> 7) & 2) | (b & 1)];
3609 sse_fn_i_ep(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3610 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3611 } else {
3612 #ifdef TARGET_X86_64
3613 SSEFunc_l_ep sse_fn_l_ep =
3614 sse_op_table3bq[((b >> 7) & 2) | (b & 1)];
3615 sse_fn_l_ep(cpu_T[0], cpu_env, cpu_ptr0);
3616 #else
3617 goto illegal_op;
3618 #endif
3619 }
3620 gen_op_mov_reg_T0(ot, reg);
3621 break;
3622 case 0xc4: /* pinsrw */
3623 case 0x1c4:
3624 s->rip_offset = 1;
3625 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
3626 val = cpu_ldub_code(env, s->pc++);
3627 if (b1) {
3628 val &= 7;
3629 tcg_gen_st16_tl(cpu_T[0], cpu_env,
3630 offsetof(CPUX86State,xmm_regs[reg].XMM_W(val)));
3631 } else {
3632 val &= 3;
3633 tcg_gen_st16_tl(cpu_T[0], cpu_env,
3634 offsetof(CPUX86State,fpregs[reg].mmx.MMX_W(val)));
3635 }
3636 break;
3637 case 0xc5: /* pextrw */
3638 case 0x1c5:
3639 if (mod != 3)
3640 goto illegal_op;
3641 ot = (s->dflag == 2) ? MO_64 : MO_32;
3642 val = cpu_ldub_code(env, s->pc++);
3643 if (b1) {
3644 val &= 7;
3645 rm = (modrm & 7) | REX_B(s);
3646 tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3647 offsetof(CPUX86State,xmm_regs[rm].XMM_W(val)));
3648 } else {
3649 val &= 3;
3650 rm = (modrm & 7);
3651 tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3652 offsetof(CPUX86State,fpregs[rm].mmx.MMX_W(val)));
3653 }
3654 reg = ((modrm >> 3) & 7) | rex_r;
3655 gen_op_mov_reg_T0(ot, reg);
3656 break;
3657 case 0x1d6: /* movq ea, xmm */
3658 if (mod != 3) {
3659 gen_lea_modrm(env, s, modrm);
3660 gen_stq_env_A0(s, offsetof(CPUX86State,
3661 xmm_regs[reg].XMM_Q(0)));
3662 } else {
3663 rm = (modrm & 7) | REX_B(s);
3664 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
3665 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3666 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
3667 }
3668 break;
3669 case 0x2d6: /* movq2dq */
3670 gen_helper_enter_mmx(cpu_env);
3671 rm = (modrm & 7);
3672 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3673 offsetof(CPUX86State,fpregs[rm].mmx));
3674 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3675 break;
3676 case 0x3d6: /* movdq2q */
3677 gen_helper_enter_mmx(cpu_env);
3678 rm = (modrm & 7) | REX_B(s);
3679 gen_op_movq(offsetof(CPUX86State,fpregs[reg & 7].mmx),
3680 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3681 break;
3682 case 0xd7: /* pmovmskb */
3683 case 0x1d7:
3684 if (mod != 3)
3685 goto illegal_op;
3686 if (b1) {
3687 rm = (modrm & 7) | REX_B(s);
3688 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[rm]));
3689 gen_helper_pmovmskb_xmm(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3690 } else {
3691 rm = (modrm & 7);
3692 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,fpregs[rm].mmx));
3693 gen_helper_pmovmskb_mmx(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3694 }
3695 reg = ((modrm >> 3) & 7) | rex_r;
3696 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
3697 break;
3698
3699 case 0x138:
3700 case 0x038:
3701 b = modrm;
3702 if ((b & 0xf0) == 0xf0) {
3703 goto do_0f_38_fx;
3704 }
3705 modrm = cpu_ldub_code(env, s->pc++);
3706 rm = modrm & 7;
3707 reg = ((modrm >> 3) & 7) | rex_r;
3708 mod = (modrm >> 6) & 3;
3709 if (b1 >= 2) {
3710 goto illegal_op;
3711 }
3712
3713 sse_fn_epp = sse_op_table6[b].op[b1];
3714 if (!sse_fn_epp) {
3715 goto illegal_op;
3716 }
3717 if (!(s->cpuid_ext_features & sse_op_table6[b].ext_mask))
3718 goto illegal_op;
3719
3720 if (b1) {
3721 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3722 if (mod == 3) {
3723 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
3724 } else {
3725 op2_offset = offsetof(CPUX86State,xmm_t0);
3726 gen_lea_modrm(env, s, modrm);
3727 switch (b) {
3728 case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
3729 case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
3730 case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
3731 gen_ldq_env_A0(s, op2_offset +
3732 offsetof(XMMReg, XMM_Q(0)));
3733 break;
3734 case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
3735 case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
3736 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
3737 s->mem_index, MO_LEUL);
3738 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, op2_offset +
3739 offsetof(XMMReg, XMM_L(0)));
3740 break;
3741 case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
3742 tcg_gen_qemu_ld_tl(cpu_tmp0, cpu_A0,
3743 s->mem_index, MO_LEUW);
3744 tcg_gen_st16_tl(cpu_tmp0, cpu_env, op2_offset +
3745 offsetof(XMMReg, XMM_W(0)));
3746 break;
3747 case 0x2a: /* movntqda */
3748 gen_ldo_env_A0(s, op1_offset);
3749 return;
3750 default:
3751 gen_ldo_env_A0(s, op2_offset);
3752 }
3753 }
3754 } else {
3755 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
3756 if (mod == 3) {
3757 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3758 } else {
3759 op2_offset = offsetof(CPUX86State,mmx_t0);
3760 gen_lea_modrm(env, s, modrm);
3761 gen_ldq_env_A0(s, op2_offset);
3762 }
3763 }
3764 if (sse_fn_epp == SSE_SPECIAL) {
3765 goto illegal_op;
3766 }
3767
3768 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3769 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3770 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
3771
3772 if (b == 0x17) {
3773 set_cc_op(s, CC_OP_EFLAGS);
3774 }
3775 break;
3776
3777 case 0x238:
3778 case 0x338:
3779 do_0f_38_fx:
3780 /* Various integer extensions at 0f 38 f[0-f]. */
3781 b = modrm | (b1 << 8);
3782 modrm = cpu_ldub_code(env, s->pc++);
3783 reg = ((modrm >> 3) & 7) | rex_r;
3784
3785 switch (b) {
3786 case 0x3f0: /* crc32 Gd,Eb */
3787 case 0x3f1: /* crc32 Gd,Ey */
3788 do_crc32:
3789 if (!(s->cpuid_ext_features & CPUID_EXT_SSE42)) {
3790 goto illegal_op;
3791 }
3792 if ((b & 0xff) == 0xf0) {
3793 ot = MO_8;
3794 } else if (s->dflag != 2) {
3795 ot = (s->prefix & PREFIX_DATA ? MO_16 : MO_32);
3796 } else {
3797 ot = MO_64;
3798 }
3799
3800 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[reg]);
3801 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3802 gen_helper_crc32(cpu_T[0], cpu_tmp2_i32,
3803 cpu_T[0], tcg_const_i32(8 << ot));
3804
3805 ot = (s->dflag == 2) ? MO_64 : MO_32;
3806 gen_op_mov_reg_T0(ot, reg);
3807 break;
3808
3809 case 0x1f0: /* crc32 or movbe */
3810 case 0x1f1:
3811 /* For these insns, the f3 prefix is supposed to have priority
3812 over the 66 prefix, but that's not what we implement above
3813 setting b1. */
3814 if (s->prefix & PREFIX_REPNZ) {
3815 goto do_crc32;
3816 }
3817 /* FALLTHRU */
3818 case 0x0f0: /* movbe Gy,My */
3819 case 0x0f1: /* movbe My,Gy */
3820 if (!(s->cpuid_ext_features & CPUID_EXT_MOVBE)) {
3821 goto illegal_op;
3822 }
3823 if (s->dflag != 2) {
3824 ot = (s->prefix & PREFIX_DATA ? MO_16 : MO_32);
3825 } else {
3826 ot = MO_64;
3827 }
3828
3829 gen_lea_modrm(env, s, modrm);
3830 if ((b & 1) == 0) {
3831 tcg_gen_qemu_ld_tl(cpu_T[0], cpu_A0,
3832 s->mem_index, ot | MO_BE);
3833 gen_op_mov_reg_T0(ot, reg);
3834 } else {
3835 tcg_gen_qemu_st_tl(cpu_regs[reg], cpu_A0,
3836 s->mem_index, ot | MO_BE);
3837 }
3838 break;
3839
3840 case 0x0f2: /* andn Gy, By, Ey */
3841 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
3842 || !(s->prefix & PREFIX_VEX)
3843 || s->vex_l != 0) {
3844 goto illegal_op;
3845 }
3846 ot = s->dflag == 2 ? MO_64 : MO_32;
3847 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3848 tcg_gen_andc_tl(cpu_T[0], cpu_regs[s->vex_v], cpu_T[0]);
3849 gen_op_mov_reg_T0(ot, reg);
3850 gen_op_update1_cc();
3851 set_cc_op(s, CC_OP_LOGICB + ot);
3852 break;
3853
3854 case 0x0f7: /* bextr Gy, Ey, By */
3855 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
3856 || !(s->prefix & PREFIX_VEX)
3857 || s->vex_l != 0) {
3858 goto illegal_op;
3859 }
3860 ot = s->dflag == 2 ? MO_64 : MO_32;
3861 {
3862 TCGv bound, zero;
3863
3864 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3865 /* Extract START, and shift the operand.
3866 Shifts larger than operand size get zeros. */
3867 tcg_gen_ext8u_tl(cpu_A0, cpu_regs[s->vex_v]);
3868 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_A0);
3869
3870 bound = tcg_const_tl(ot == MO_64 ? 63 : 31);
3871 zero = tcg_const_tl(0);
3872 tcg_gen_movcond_tl(TCG_COND_LEU, cpu_T[0], cpu_A0, bound,
3873 cpu_T[0], zero);
3874 tcg_temp_free(zero);
3875
3876 /* Extract the LEN into a mask. Lengths larger than
3877 operand size get all ones. */
3878 tcg_gen_shri_tl(cpu_A0, cpu_regs[s->vex_v], 8);
3879 tcg_gen_ext8u_tl(cpu_A0, cpu_A0);
3880 tcg_gen_movcond_tl(TCG_COND_LEU, cpu_A0, cpu_A0, bound,
3881 cpu_A0, bound);
3882 tcg_temp_free(bound);
3883 tcg_gen_movi_tl(cpu_T[1], 1);
3884 tcg_gen_shl_tl(cpu_T[1], cpu_T[1], cpu_A0);
3885 tcg_gen_subi_tl(cpu_T[1], cpu_T[1], 1);
3886 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
3887
3888 gen_op_mov_reg_T0(ot, reg);
3889 gen_op_update1_cc();
3890 set_cc_op(s, CC_OP_LOGICB + ot);
3891 }
3892 break;
3893
3894 case 0x0f5: /* bzhi Gy, Ey, By */
3895 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3896 || !(s->prefix & PREFIX_VEX)
3897 || s->vex_l != 0) {
3898 goto illegal_op;
3899 }
3900 ot = s->dflag == 2 ? MO_64 : MO_32;
3901 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3902 tcg_gen_ext8u_tl(cpu_T[1], cpu_regs[s->vex_v]);
3903 {
3904 TCGv bound = tcg_const_tl(ot == MO_64 ? 63 : 31);
3905 /* Note that since we're using BMILG (in order to get O
3906 cleared) we need to store the inverse into C. */
3907 tcg_gen_setcond_tl(TCG_COND_LT, cpu_cc_src,
3908 cpu_T[1], bound);
3909 tcg_gen_movcond_tl(TCG_COND_GT, cpu_T[1], cpu_T[1],
3910 bound, bound, cpu_T[1]);
3911 tcg_temp_free(bound);
3912 }
3913 tcg_gen_movi_tl(cpu_A0, -1);
3914 tcg_gen_shl_tl(cpu_A0, cpu_A0, cpu_T[1]);
3915 tcg_gen_andc_tl(cpu_T[0], cpu_T[0], cpu_A0);
3916 gen_op_mov_reg_T0(ot, reg);
3917 gen_op_update1_cc();
3918 set_cc_op(s, CC_OP_BMILGB + ot);
3919 break;
3920
3921 case 0x3f6: /* mulx By, Gy, rdx, Ey */
3922 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3923 || !(s->prefix & PREFIX_VEX)
3924 || s->vex_l != 0) {
3925 goto illegal_op;
3926 }
3927 ot = s->dflag == 2 ? MO_64 : MO_32;
3928 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3929 switch (ot) {
3930 default:
3931 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3932 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EDX]);
3933 tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
3934 cpu_tmp2_i32, cpu_tmp3_i32);
3935 tcg_gen_extu_i32_tl(cpu_regs[s->vex_v], cpu_tmp2_i32);
3936 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp3_i32);
3937 break;
3938 #ifdef TARGET_X86_64
3939 case MO_64:
3940 tcg_gen_mulu2_i64(cpu_regs[s->vex_v], cpu_regs[reg],
3941 cpu_T[0], cpu_regs[R_EDX]);
3942 break;
3943 #endif
3944 }
3945 break;
3946
3947 case 0x3f5: /* pdep Gy, By, Ey */
3948 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3949 || !(s->prefix & PREFIX_VEX)
3950 || s->vex_l != 0) {
3951 goto illegal_op;
3952 }
3953 ot = s->dflag == 2 ? MO_64 : MO_32;
3954 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3955 /* Note that by zero-extending the mask operand, we
3956 automatically handle zero-extending the result. */
3957 if (s->dflag == 2) {
3958 tcg_gen_mov_tl(cpu_T[1], cpu_regs[s->vex_v]);
3959 } else {
3960 tcg_gen_ext32u_tl(cpu_T[1], cpu_regs[s->vex_v]);
3961 }
3962 gen_helper_pdep(cpu_regs[reg], cpu_T[0], cpu_T[1]);
3963 break;
3964
3965 case 0x2f5: /* pext Gy, By, Ey */
3966 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3967 || !(s->prefix & PREFIX_VEX)
3968 || s->vex_l != 0) {
3969 goto illegal_op;
3970 }
3971 ot = s->dflag == 2 ? MO_64 : MO_32;
3972 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3973 /* Note that by zero-extending the mask operand, we
3974 automatically handle zero-extending the result. */
3975 if (s->dflag == 2) {
3976 tcg_gen_mov_tl(cpu_T[1], cpu_regs[s->vex_v]);
3977 } else {
3978 tcg_gen_ext32u_tl(cpu_T[1], cpu_regs[s->vex_v]);
3979 }
3980 gen_helper_pext(cpu_regs[reg], cpu_T[0], cpu_T[1]);
3981 break;
3982
3983 case 0x1f6: /* adcx Gy, Ey */
3984 case 0x2f6: /* adox Gy, Ey */
3985 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_ADX)) {
3986 goto illegal_op;
3987 } else {
3988 TCGv carry_in, carry_out, zero;
3989 int end_op;
3990
3991 ot = (s->dflag == 2 ? MO_64 : MO_32);
3992 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3993
3994 /* Re-use the carry-out from a previous round. */
3995 TCGV_UNUSED(carry_in);
3996 carry_out = (b == 0x1f6 ? cpu_cc_dst : cpu_cc_src2);
3997 switch (s->cc_op) {
3998 case CC_OP_ADCX:
3999 if (b == 0x1f6) {
4000 carry_in = cpu_cc_dst;
4001 end_op = CC_OP_ADCX;
4002 } else {
4003 end_op = CC_OP_ADCOX;
4004 }
4005 break;
4006 case CC_OP_ADOX:
4007 if (b == 0x1f6) {
4008 end_op = CC_OP_ADCOX;
4009 } else {
4010 carry_in = cpu_cc_src2;
4011 end_op = CC_OP_ADOX;
4012 }
4013 break;
4014 case CC_OP_ADCOX:
4015 end_op = CC_OP_ADCOX;
4016 carry_in = carry_out;
4017 break;
4018 default:
4019 end_op = (b == 0x1f6 ? CC_OP_ADCX : CC_OP_ADOX);
4020 break;
4021 }
4022 /* If we can't reuse carry-out, get it out of EFLAGS. */
4023 if (TCGV_IS_UNUSED(carry_in)) {
4024 if (s->cc_op != CC_OP_ADCX && s->cc_op != CC_OP_ADOX) {
4025 gen_compute_eflags(s);
4026 }
4027 carry_in = cpu_tmp0;
4028 tcg_gen_shri_tl(carry_in, cpu_cc_src,
4029 ctz32(b == 0x1f6 ? CC_C : CC_O));
4030 tcg_gen_andi_tl(carry_in, carry_in, 1);
4031 }
4032
4033 switch (ot) {
4034 #ifdef TARGET_X86_64
4035 case MO_32:
4036 /* If we know TL is 64-bit, and we want a 32-bit
4037 result, just do everything in 64-bit arithmetic. */
4038 tcg_gen_ext32u_i64(cpu_regs[reg], cpu_regs[reg]);
4039 tcg_gen_ext32u_i64(cpu_T[0], cpu_T[0]);
4040 tcg_gen_add_i64(cpu_T[0], cpu_T[0], cpu_regs[reg]);
4041 tcg_gen_add_i64(cpu_T[0], cpu_T[0], carry_in);
4042 tcg_gen_ext32u_i64(cpu_regs[reg], cpu_T[0]);
4043 tcg_gen_shri_i64(carry_out, cpu_T[0], 32);
4044 break;
4045 #endif
4046 default:
4047 /* Otherwise compute the carry-out in two steps. */
4048 zero = tcg_const_tl(0);
4049 tcg_gen_add2_tl(cpu_T[0], carry_out,
4050 cpu_T[0], zero,
4051 carry_in, zero);
4052 tcg_gen_add2_tl(cpu_regs[reg], carry_out,
4053 cpu_regs[reg], carry_out,
4054 cpu_T[0], zero);
4055 tcg_temp_free(zero);
4056 break;
4057 }
4058 set_cc_op(s, end_op);
4059 }
4060 break;
4061
4062 case 0x1f7: /* shlx Gy, Ey, By */
4063 case 0x2f7: /* sarx Gy, Ey, By */
4064 case 0x3f7: /* shrx Gy, Ey, By */
4065 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
4066 || !(s->prefix & PREFIX_VEX)
4067 || s->vex_l != 0) {
4068 goto illegal_op;
4069 }
4070 ot = (s->dflag == 2 ? MO_64 : MO_32);
4071 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4072 if (ot == MO_64) {
4073 tcg_gen_andi_tl(cpu_T[1], cpu_regs[s->vex_v], 63);
4074 } else {
4075 tcg_gen_andi_tl(cpu_T[1], cpu_regs[s->vex_v], 31);
4076 }
4077 if (b == 0x1f7) {
4078 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4079 } else if (b == 0x2f7) {
4080 if (ot != MO_64) {
4081 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4082 }
4083 tcg_gen_sar_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4084 } else {
4085 if (ot != MO_64) {
4086 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
4087 }
4088 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4089 }
4090 gen_op_mov_reg_T0(ot, reg);
4091 break;
4092
4093 case 0x0f3:
4094 case 0x1f3:
4095 case 0x2f3:
4096 case 0x3f3: /* Group 17 */
4097 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
4098 || !(s->prefix & PREFIX_VEX)
4099 || s->vex_l != 0) {
4100 goto illegal_op;
4101 }
4102 ot = s->dflag == 2 ? MO_64 : MO_32;
4103 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4104
4105 switch (reg & 7) {
4106 case 1: /* blsr By,Ey */
4107 tcg_gen_neg_tl(cpu_T[1], cpu_T[0]);
4108 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4109 gen_op_mov_reg_T0(ot, s->vex_v);
4110 gen_op_update2_cc();
4111 set_cc_op(s, CC_OP_BMILGB + ot);
4112 break;
4113
4114 case 2: /* blsmsk By,Ey */
4115 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4116 tcg_gen_subi_tl(cpu_T[0], cpu_T[0], 1);
4117 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_cc_src);
4118 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4119 set_cc_op(s, CC_OP_BMILGB + ot);
4120 break;
4121
4122 case 3: /* blsi By, Ey */
4123 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4124 tcg_gen_subi_tl(cpu_T[0], cpu_T[0], 1);
4125 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_cc_src);
4126 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4127 set_cc_op(s, CC_OP_BMILGB + ot);
4128 break;
4129
4130 default:
4131 goto illegal_op;
4132 }
4133 break;
4134
4135 default:
4136 goto illegal_op;
4137 }
4138 break;
4139
4140 case 0x03a:
4141 case 0x13a:
4142 b = modrm;
4143 modrm = cpu_ldub_code(env, s->pc++);
4144 rm = modrm & 7;
4145 reg = ((modrm >> 3) & 7) | rex_r;
4146 mod = (modrm >> 6) & 3;
4147 if (b1 >= 2) {
4148 goto illegal_op;
4149 }
4150
4151 sse_fn_eppi = sse_op_table7[b].op[b1];
4152 if (!sse_fn_eppi) {
4153 goto illegal_op;
4154 }
4155 if (!(s->cpuid_ext_features & sse_op_table7[b].ext_mask))
4156 goto illegal_op;
4157
4158 if (sse_fn_eppi == SSE_SPECIAL) {
4159 ot = (s->dflag == 2) ? MO_64 : MO_32;
4160 rm = (modrm & 7) | REX_B(s);
4161 if (mod != 3)
4162 gen_lea_modrm(env, s, modrm);
4163 reg = ((modrm >> 3) & 7) | rex_r;
4164 val = cpu_ldub_code(env, s->pc++);
4165 switch (b) {
4166 case 0x14: /* pextrb */
4167 tcg_gen_ld8u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
4168 xmm_regs[reg].XMM_B(val & 15)));
4169 if (mod == 3) {
4170 gen_op_mov_reg_T0(ot, rm);
4171 } else {
4172 tcg_gen_qemu_st_tl(cpu_T[0], cpu_A0,
4173 s->mem_index, MO_UB);
4174 }
4175 break;
4176 case 0x15: /* pextrw */
4177 tcg_gen_ld16u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
4178 xmm_regs[reg].XMM_W(val & 7)));
4179 if (mod == 3) {
4180 gen_op_mov_reg_T0(ot, rm);
4181 } else {
4182 tcg_gen_qemu_st_tl(cpu_T[0], cpu_A0,
4183 s->mem_index, MO_LEUW);
4184 }
4185 break;
4186 case 0x16:
4187 if (ot == MO_32) { /* pextrd */
4188 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
4189 offsetof(CPUX86State,
4190 xmm_regs[reg].XMM_L(val & 3)));
4191 if (mod == 3) {
4192 tcg_gen_extu_i32_tl(cpu_regs[rm], cpu_tmp2_i32);
4193 } else {
4194 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
4195 s->mem_index, MO_LEUL);
4196 }
4197 } else { /* pextrq */
4198 #ifdef TARGET_X86_64
4199 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
4200 offsetof(CPUX86State,
4201 xmm_regs[reg].XMM_Q(val & 1)));
4202 if (mod == 3) {
4203 tcg_gen_mov_i64(cpu_regs[rm], cpu_tmp1_i64);
4204 } else {
4205 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
4206 s->mem_index, MO_LEQ);
4207 }
4208 #else
4209 goto illegal_op;
4210 #endif
4211 }
4212 break;
4213 case 0x17: /* extractps */
4214 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
4215 xmm_regs[reg].XMM_L(val & 3)));
4216 if (mod == 3) {
4217 gen_op_mov_reg_T0(ot, rm);
4218 } else {
4219 tcg_gen_qemu_st_tl(cpu_T[0], cpu_A0,
4220 s->mem_index, MO_LEUL);
4221 }
4222 break;
4223 case 0x20: /* pinsrb */
4224 if (mod == 3) {
4225 gen_op_mov_TN_reg(MO_32, 0, rm);
4226 } else {
4227 tcg_gen_qemu_ld_tl(cpu_T[0], cpu_A0,
4228 s->mem_index, MO_UB);
4229 }
4230 tcg_gen_st8_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
4231 xmm_regs[reg].XMM_B(val & 15)));
4232 break;
4233 case 0x21: /* insertps */
4234 if (mod == 3) {
4235 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
4236 offsetof(CPUX86State,xmm_regs[rm]
4237 .XMM_L((val >> 6) & 3)));
4238 } else {
4239 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
4240 s->mem_index, MO_LEUL);
4241 }
4242 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
4243 offsetof(CPUX86State,xmm_regs[reg]
4244 .XMM_L((val >> 4) & 3)));
4245 if ((val >> 0) & 1)
4246 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4247 cpu_env, offsetof(CPUX86State,
4248 xmm_regs[reg].XMM_L(0)));
4249 if ((val >> 1) & 1)
4250 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4251 cpu_env, offsetof(CPUX86State,
4252 xmm_regs[reg].XMM_L(1)));
4253 if ((val >> 2) & 1)
4254 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4255 cpu_env, offsetof(CPUX86State,
4256 xmm_regs[reg].XMM_L(2)));
4257 if ((val >> 3) & 1)
4258 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4259 cpu_env, offsetof(CPUX86State,
4260 xmm_regs[reg].XMM_L(3)));
4261 break;
4262 case 0x22:
4263 if (ot == MO_32) { /* pinsrd */
4264 if (mod == 3) {
4265 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[rm]);
4266 } else {
4267 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
4268 s->mem_index, MO_LEUL);
4269 }
4270 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
4271 offsetof(CPUX86State,
4272 xmm_regs[reg].XMM_L(val & 3)));
4273 } else { /* pinsrq */
4274 #ifdef TARGET_X86_64
4275 if (mod == 3) {
4276 gen_op_mov_v_reg(ot, cpu_tmp1_i64, rm);
4277 } else {
4278 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
4279 s->mem_index, MO_LEQ);
4280 }
4281 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
4282 offsetof(CPUX86State,
4283 xmm_regs[reg].XMM_Q(val & 1)));
4284 #else
4285 goto illegal_op;
4286 #endif
4287 }
4288 break;
4289 }
4290 return;
4291 }
4292
4293 if (b1) {
4294 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
4295 if (mod == 3) {
4296 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
4297 } else {
4298 op2_offset = offsetof(CPUX86State,xmm_t0);
4299 gen_lea_modrm(env, s, modrm);
4300 gen_ldo_env_A0(s, op2_offset);
4301 }
4302 } else {
4303 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
4304 if (mod == 3) {
4305 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
4306 } else {
4307 op2_offset = offsetof(CPUX86State,mmx_t0);
4308 gen_lea_modrm(env, s, modrm);
4309 gen_ldq_env_A0(s, op2_offset);
4310 }
4311 }
4312 val = cpu_ldub_code(env, s->pc++);
4313
4314 if ((b & 0xfc) == 0x60) { /* pcmpXstrX */
4315 set_cc_op(s, CC_OP_EFLAGS);
4316
4317 if (s->dflag == 2)
4318 /* The helper must use entire 64-bit gp registers */
4319 val |= 1 << 8;
4320 }
4321
4322 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4323 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4324 sse_fn_eppi(cpu_env, cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
4325 break;
4326
4327 case 0x33a:
4328 /* Various integer extensions at 0f 3a f[0-f]. */
4329 b = modrm | (b1 << 8);
4330 modrm = cpu_ldub_code(env, s->pc++);
4331 reg = ((modrm >> 3) & 7) | rex_r;
4332
4333 switch (b) {
4334 case 0x3f0: /* rorx Gy,Ey, Ib */
4335 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
4336 || !(s->prefix & PREFIX_VEX)
4337 || s->vex_l != 0) {
4338 goto illegal_op;
4339 }
4340 ot = s->dflag == 2 ? MO_64 : MO_32;
4341 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4342 b = cpu_ldub_code(env, s->pc++);
4343 if (ot == MO_64) {
4344 tcg_gen_rotri_tl(cpu_T[0], cpu_T[0], b & 63);
4345 } else {
4346 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4347 tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, b & 31);
4348 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
4349 }
4350 gen_op_mov_reg_T0(ot, reg);
4351 break;
4352
4353 default:
4354 goto illegal_op;
4355 }
4356 break;
4357
4358 default:
4359 goto illegal_op;
4360 }
4361 } else {
4362 /* generic MMX or SSE operation */
4363 switch(b) {
4364 case 0x70: /* pshufx insn */
4365 case 0xc6: /* pshufx insn */
4366 case 0xc2: /* compare insns */
4367 s->rip_offset = 1;
4368 break;
4369 default:
4370 break;
4371 }
4372 if (is_xmm) {
4373 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
4374 if (mod != 3) {
4375 gen_lea_modrm(env, s, modrm);
4376 op2_offset = offsetof(CPUX86State,xmm_t0);
4377 if (b1 >= 2 && ((b >= 0x50 && b <= 0x5f && b != 0x5b) ||
4378 b == 0xc2)) {
4379 /* specific case for SSE single instructions */
4380 if (b1 == 2) {
4381 /* 32 bit access */
4382 gen_op_ld_v(s, MO_32, cpu_T[0], cpu_A0);
4383 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
4384 } else {
4385 /* 64 bit access */
4386 gen_ldq_env_A0(s, offsetof(CPUX86State,
4387 xmm_t0.XMM_D(0)));
4388 }
4389 } else {
4390 gen_ldo_env_A0(s, op2_offset);
4391 }
4392 } else {
4393 rm = (modrm & 7) | REX_B(s);
4394 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
4395 }
4396 } else {
4397 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
4398 if (mod != 3) {
4399 gen_lea_modrm(env, s, modrm);
4400 op2_offset = offsetof(CPUX86State,mmx_t0);
4401 gen_ldq_env_A0(s, op2_offset);
4402 } else {
4403 rm = (modrm & 7);
4404 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
4405 }
4406 }
4407 switch(b) {
4408 case 0x0f: /* 3DNow! data insns */
4409 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
4410 goto illegal_op;
4411 val = cpu_ldub_code(env, s->pc++);
4412 sse_fn_epp = sse_op_table5[val];
4413 if (!sse_fn_epp) {
4414 goto illegal_op;
4415 }
4416 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4417 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4418 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
4419 break;
4420 case 0x70: /* pshufx insn */
4421 case 0xc6: /* pshufx insn */
4422 val = cpu_ldub_code(env, s->pc++);
4423 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4424 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4425 /* XXX: introduce a new table? */
4426 sse_fn_ppi = (SSEFunc_0_ppi)sse_fn_epp;
4427 sse_fn_ppi(cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
4428 break;
4429 case 0xc2:
4430 /* compare insns */
4431 val = cpu_ldub_code(env, s->pc++);
4432 if (val >= 8)
4433 goto illegal_op;
4434 sse_fn_epp = sse_op_table4[val][b1];
4435
4436 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4437 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4438 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
4439 break;
4440 case 0xf7:
4441 /* maskmov : we must prepare A0 */
4442 if (mod != 3)
4443 goto illegal_op;
4444 #ifdef TARGET_X86_64
4445 if (s->aflag == 2) {
4446 gen_op_movq_A0_reg(R_EDI);
4447 } else
4448 #endif
4449 {
4450 gen_op_movl_A0_reg(R_EDI);
4451 if (s->aflag == 0)
4452 gen_op_andl_A0_ffff();
4453 }
4454 gen_add_A0_ds_seg(s);
4455
4456 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4457 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4458 /* XXX: introduce a new table? */
4459 sse_fn_eppt = (SSEFunc_0_eppt)sse_fn_epp;
4460 sse_fn_eppt(cpu_env, cpu_ptr0, cpu_ptr1, cpu_A0);
4461 break;
4462 default:
4463 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4464 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4465 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
4466 break;
4467 }
4468 if (b == 0x2e || b == 0x2f) {
4469 set_cc_op(s, CC_OP_EFLAGS);
4470 }
4471 }
4472 }
4473
4474 /* convert one instruction. s->is_jmp is set if the translation must
4475 be stopped. Return the next pc value */
4476 static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
4477 target_ulong pc_start)
4478 {
4479 int b, prefixes, aflag, dflag;
4480 int shift, ot;
4481 int modrm, reg, rm, mod, op, opreg, val;
4482 target_ulong next_eip, tval;
4483 int rex_w, rex_r;
4484
4485 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
4486 tcg_gen_debug_insn_start(pc_start);
4487 }
4488 s->pc = pc_start;
4489 prefixes = 0;
4490 s->override = -1;
4491 rex_w = -1;
4492 rex_r = 0;
4493 #ifdef TARGET_X86_64
4494 s->rex_x = 0;
4495 s->rex_b = 0;
4496 x86_64_hregs = 0;
4497 #endif
4498 s->rip_offset = 0; /* for relative ip address */
4499 s->vex_l = 0;
4500 s->vex_v = 0;
4501 next_byte:
4502 b = cpu_ldub_code(env, s->pc);
4503 s->pc++;
4504 /* Collect prefixes. */
4505 switch (b) {
4506 case 0xf3:
4507 prefixes |= PREFIX_REPZ;
4508 goto next_byte;
4509 case 0xf2:
4510 prefixes |= PREFIX_REPNZ;
4511 goto next_byte;
4512 case 0xf0:
4513 prefixes |= PREFIX_LOCK;
4514 goto next_byte;
4515 case 0x2e:
4516 s->override = R_CS;
4517 goto next_byte;
4518 case 0x36:
4519 s->override = R_SS;
4520 goto next_byte;
4521 case 0x3e:
4522 s->override = R_DS;
4523 goto next_byte;
4524 case 0x26:
4525 s->override = R_ES;
4526 goto next_byte;
4527 case 0x64:
4528 s->override = R_FS;
4529 goto next_byte;
4530 case 0x65:
4531 s->override = R_GS;
4532 goto next_byte;
4533 case 0x66:
4534 prefixes |= PREFIX_DATA;
4535 goto next_byte;
4536 case 0x67:
4537 prefixes |= PREFIX_ADR;
4538 goto next_byte;
4539 #ifdef TARGET_X86_64
4540 case 0x40 ... 0x4f:
4541 if (CODE64(s)) {
4542 /* REX prefix */
4543 rex_w = (b >> 3) & 1;
4544 rex_r = (b & 0x4) << 1;
4545 s->rex_x = (b & 0x2) << 2;
4546 REX_B(s) = (b & 0x1) << 3;
4547 x86_64_hregs = 1; /* select uniform byte register addressing */
4548 goto next_byte;
4549 }
4550 break;
4551 #endif
4552 case 0xc5: /* 2-byte VEX */
4553 case 0xc4: /* 3-byte VEX */
4554 /* VEX prefixes cannot be used except in 32-bit mode.
4555 Otherwise the instruction is LES or LDS. */
4556 if (s->code32 && !s->vm86) {
4557 static const int pp_prefix[4] = {
4558 0, PREFIX_DATA, PREFIX_REPZ, PREFIX_REPNZ
4559 };
4560 int vex3, vex2 = cpu_ldub_code(env, s->pc);
4561
4562 if (!CODE64(s) && (vex2 & 0xc0) != 0xc0) {
4563 /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
4564 otherwise the instruction is LES or LDS. */
4565 break;
4566 }
4567 s->pc++;
4568
4569 /* 4.1.1-4.1.3: No preceding lock, 66, f2, f3, or rex prefixes. */
4570 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ
4571 | PREFIX_LOCK | PREFIX_DATA)) {
4572 goto illegal_op;
4573 }
4574 #ifdef TARGET_X86_64
4575 if (x86_64_hregs) {
4576 goto illegal_op;
4577 }
4578 #endif
4579 rex_r = (~vex2 >> 4) & 8;
4580 if (b == 0xc5) {
4581 vex3 = vex2;
4582 b = cpu_ldub_code(env, s->pc++);
4583 } else {
4584 #ifdef TARGET_X86_64
4585 s->rex_x = (~vex2 >> 3) & 8;
4586 s->rex_b = (~vex2 >> 2) & 8;
4587 #endif
4588 vex3 = cpu_ldub_code(env, s->pc++);
4589 rex_w = (vex3 >> 7) & 1;
4590 switch (vex2 & 0x1f) {
4591 case 0x01: /* Implied 0f leading opcode bytes. */
4592 b = cpu_ldub_code(env, s->pc++) | 0x100;
4593 break;
4594 case 0x02: /* Implied 0f 38 leading opcode bytes. */
4595 b = 0x138;
4596 break;
4597 case 0x03: /* Implied 0f 3a leading opcode bytes. */
4598 b = 0x13a;
4599 break;
4600 default: /* Reserved for future use. */
4601 goto illegal_op;
4602 }
4603 }
4604 s->vex_v = (~vex3 >> 3) & 0xf;
4605 s->vex_l = (vex3 >> 2) & 1;
4606 prefixes |= pp_prefix[vex3 & 3] | PREFIX_VEX;
4607 }
4608 break;
4609 }
4610
4611 /* Post-process prefixes. */
4612 if (CODE64(s)) {
4613 /* In 64-bit mode, the default data size is 32-bit. Select 64-bit
4614 data with rex_w, and 16-bit data with 0x66; rex_w takes precedence
4615 over 0x66 if both are present. */
4616 dflag = (rex_w > 0 ? 2 : prefixes & PREFIX_DATA ? 0 : 1);
4617 /* In 64-bit mode, 0x67 selects 32-bit addressing. */
4618 aflag = (prefixes & PREFIX_ADR ? 1 : 2);
4619 } else {
4620 /* In 16/32-bit mode, 0x66 selects the opposite data size. */
4621 dflag = s->code32;
4622 if (prefixes & PREFIX_DATA) {
4623 dflag ^= 1;
4624 }
4625 /* In 16/32-bit mode, 0x67 selects the opposite addressing. */
4626 aflag = s->code32;
4627 if (prefixes & PREFIX_ADR) {
4628 aflag ^= 1;
4629 }
4630 }
4631
4632 s->prefix = prefixes;
4633 s->aflag = aflag;
4634 s->dflag = dflag;
4635
4636 /* lock generation */
4637 if (prefixes & PREFIX_LOCK)
4638 gen_helper_lock();
4639
4640 /* now check op code */
4641 reswitch:
4642 switch(b) {
4643 case 0x0f:
4644 /**************************/
4645 /* extended op code */
4646 b = cpu_ldub_code(env, s->pc++) | 0x100;
4647 goto reswitch;
4648
4649 /**************************/
4650 /* arith & logic */
4651 case 0x00 ... 0x05:
4652 case 0x08 ... 0x0d:
4653 case 0x10 ... 0x15:
4654 case 0x18 ... 0x1d:
4655 case 0x20 ... 0x25:
4656 case 0x28 ... 0x2d:
4657 case 0x30 ... 0x35:
4658 case 0x38 ... 0x3d:
4659 {
4660 int op, f, val;
4661 op = (b >> 3) & 7;
4662 f = (b >> 1) & 3;
4663
4664 if ((b & 1) == 0)
4665 ot = MO_8;
4666 else
4667 ot = dflag + MO_16;
4668
4669 switch(f) {
4670 case 0: /* OP Ev, Gv */
4671 modrm = cpu_ldub_code(env, s->pc++);
4672 reg = ((modrm >> 3) & 7) | rex_r;
4673 mod = (modrm >> 6) & 3;
4674 rm = (modrm & 7) | REX_B(s);
4675 if (mod != 3) {
4676 gen_lea_modrm(env, s, modrm);
4677 opreg = OR_TMP0;
4678 } else if (op == OP_XORL && rm == reg) {
4679 xor_zero:
4680 /* xor reg, reg optimisation */
4681 set_cc_op(s, CC_OP_CLR);
4682 tcg_gen_movi_tl(cpu_T[0], 0);
4683 gen_op_mov_reg_T0(ot, reg);
4684 break;
4685 } else {
4686 opreg = rm;
4687 }
4688 gen_op_mov_TN_reg(ot, 1, reg);
4689 gen_op(s, op, ot, opreg);
4690 break;
4691 case 1: /* OP Gv, Ev */
4692 modrm = cpu_ldub_code(env, s->pc++);
4693 mod = (modrm >> 6) & 3;
4694 reg = ((modrm >> 3) & 7) | rex_r;
4695 rm = (modrm & 7) | REX_B(s);
4696 if (mod != 3) {
4697 gen_lea_modrm(env, s, modrm);
4698 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
4699 } else if (op == OP_XORL && rm == reg) {
4700 goto xor_zero;
4701 } else {
4702 gen_op_mov_TN_reg(ot, 1, rm);
4703 }
4704 gen_op(s, op, ot, reg);
4705 break;
4706 case 2: /* OP A, Iv */
4707 val = insn_get(env, s, ot);
4708 tcg_gen_movi_tl(cpu_T[1], val);
4709 gen_op(s, op, ot, OR_EAX);
4710 break;
4711 }
4712 }
4713 break;
4714
4715 case 0x82:
4716 if (CODE64(s))
4717 goto illegal_op;
4718 case 0x80: /* GRP1 */
4719 case 0x81:
4720 case 0x83:
4721 {
4722 int val;
4723
4724 if ((b & 1) == 0)
4725 ot = MO_8;
4726 else
4727 ot = dflag + MO_16;
4728
4729 modrm = cpu_ldub_code(env, s->pc++);
4730 mod = (modrm >> 6) & 3;
4731 rm = (modrm & 7) | REX_B(s);
4732 op = (modrm >> 3) & 7;
4733
4734 if (mod != 3) {
4735 if (b == 0x83)
4736 s->rip_offset = 1;
4737 else
4738 s->rip_offset = insn_const_size(ot);
4739 gen_lea_modrm(env, s, modrm);
4740 opreg = OR_TMP0;
4741 } else {
4742 opreg = rm;
4743 }
4744
4745 switch(b) {
4746 default:
4747 case 0x80:
4748 case 0x81:
4749 case 0x82:
4750 val = insn_get(env, s, ot);
4751 break;
4752 case 0x83:
4753 val = (int8_t)insn_get(env, s, MO_8);
4754 break;
4755 }
4756 tcg_gen_movi_tl(cpu_T[1], val);
4757 gen_op(s, op, ot, opreg);
4758 }
4759 break;
4760
4761 /**************************/
4762 /* inc, dec, and other misc arith */
4763 case 0x40 ... 0x47: /* inc Gv */
4764 ot = dflag ? MO_32 : MO_16;
4765 gen_inc(s, ot, OR_EAX + (b & 7), 1);
4766 break;
4767 case 0x48 ... 0x4f: /* dec Gv */
4768 ot = dflag ? MO_32 : MO_16;
4769 gen_inc(s, ot, OR_EAX + (b & 7), -1);
4770 break;
4771 case 0xf6: /* GRP3 */
4772 case 0xf7:
4773 if ((b & 1) == 0)
4774 ot = MO_8;
4775 else
4776 ot = dflag + MO_16;
4777
4778 modrm = cpu_ldub_code(env, s->pc++);
4779 mod = (modrm >> 6) & 3;
4780 rm = (modrm & 7) | REX_B(s);
4781 op = (modrm >> 3) & 7;
4782 if (mod != 3) {
4783 if (op == 0)
4784 s->rip_offset = insn_const_size(ot);
4785 gen_lea_modrm(env, s, modrm);
4786 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
4787 } else {
4788 gen_op_mov_TN_reg(ot, 0, rm);
4789 }
4790
4791 switch(op) {
4792 case 0: /* test */
4793 val = insn_get(env, s, ot);
4794 tcg_gen_movi_tl(cpu_T[1], val);
4795 gen_op_testl_T0_T1_cc();
4796 set_cc_op(s, CC_OP_LOGICB + ot);
4797 break;
4798 case 2: /* not */
4799 tcg_gen_not_tl(cpu_T[0], cpu_T[0]);
4800 if (mod != 3) {
4801 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
4802 } else {
4803 gen_op_mov_reg_T0(ot, rm);
4804 }
4805 break;
4806 case 3: /* neg */
4807 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
4808 if (mod != 3) {
4809 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
4810 } else {
4811 gen_op_mov_reg_T0(ot, rm);
4812 }
4813 gen_op_update_neg_cc();
4814 set_cc_op(s, CC_OP_SUBB + ot);
4815 break;
4816 case 4: /* mul */
4817 switch(ot) {
4818 case MO_8:
4819 gen_op_mov_TN_reg(MO_8, 1, R_EAX);
4820 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
4821 tcg_gen_ext8u_tl(cpu_T[1], cpu_T[1]);
4822 /* XXX: use 32 bit mul which could be faster */
4823 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4824 gen_op_mov_reg_T0(MO_16, R_EAX);
4825 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4826 tcg_gen_andi_tl(cpu_cc_src, cpu_T[0], 0xff00);
4827 set_cc_op(s, CC_OP_MULB);
4828 break;
4829 case MO_16:
4830 gen_op_mov_TN_reg(MO_16, 1, R_EAX);
4831 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
4832 tcg_gen_ext16u_tl(cpu_T[1], cpu_T[1]);
4833 /* XXX: use 32 bit mul which could be faster */
4834 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4835 gen_op_mov_reg_T0(MO_16, R_EAX);
4836 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4837 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
4838 gen_op_mov_reg_T0(MO_16, R_EDX);
4839 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4840 set_cc_op(s, CC_OP_MULW);
4841 break;
4842 default:
4843 case MO_32:
4844 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4845 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]);
4846 tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
4847 cpu_tmp2_i32, cpu_tmp3_i32);
4848 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32);
4849 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32);
4850 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4851 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
4852 set_cc_op(s, CC_OP_MULL);
4853 break;
4854 #ifdef TARGET_X86_64
4855 case MO_64:
4856 tcg_gen_mulu2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
4857 cpu_T[0], cpu_regs[R_EAX]);
4858 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4859 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
4860 set_cc_op(s, CC_OP_MULQ);
4861 break;
4862 #endif
4863 }
4864 break;
4865 case 5: /* imul */
4866 switch(ot) {
4867 case MO_8:
4868 gen_op_mov_TN_reg(MO_8, 1, R_EAX);
4869 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
4870 tcg_gen_ext8s_tl(cpu_T[1], cpu_T[1]);
4871 /* XXX: use 32 bit mul which could be faster */
4872 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4873 gen_op_mov_reg_T0(MO_16, R_EAX);
4874 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4875 tcg_gen_ext8s_tl(cpu_tmp0, cpu_T[0]);
4876 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4877 set_cc_op(s, CC_OP_MULB);
4878 break;
4879 case MO_16:
4880 gen_op_mov_TN_reg(MO_16, 1, R_EAX);
4881 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4882 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
4883 /* XXX: use 32 bit mul which could be faster */
4884 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4885 gen_op_mov_reg_T0(MO_16, R_EAX);
4886 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4887 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
4888 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4889 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
4890 gen_op_mov_reg_T0(MO_16, R_EDX);
4891 set_cc_op(s, CC_OP_MULW);
4892 break;
4893 default:
4894 case MO_32:
4895 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4896 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]);
4897 tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
4898 cpu_tmp2_i32, cpu_tmp3_i32);
4899 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32);
4900 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32);
4901 tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31);
4902 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4903 tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
4904 tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32);
4905 set_cc_op(s, CC_OP_MULL);
4906 break;
4907 #ifdef TARGET_X86_64
4908 case MO_64:
4909 tcg_gen_muls2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
4910 cpu_T[0], cpu_regs[R_EAX]);
4911 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4912 tcg_gen_sari_tl(cpu_cc_src, cpu_regs[R_EAX], 63);
4913 tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_regs[R_EDX]);
4914 set_cc_op(s, CC_OP_MULQ);
4915 break;
4916 #endif
4917 }
4918 break;
4919 case 6: /* div */
4920 switch(ot) {
4921 case MO_8:
4922 gen_jmp_im(pc_start - s->cs_base);
4923 gen_helper_divb_AL(cpu_env, cpu_T[0]);
4924 break;
4925 case MO_16:
4926 gen_jmp_im(pc_start - s->cs_base);
4927 gen_helper_divw_AX(cpu_env, cpu_T[0]);
4928 break;
4929 default:
4930 case MO_32:
4931 gen_jmp_im(pc_start - s->cs_base);
4932 gen_helper_divl_EAX(cpu_env, cpu_T[0]);
4933 break;
4934 #ifdef TARGET_X86_64
4935 case MO_64:
4936 gen_jmp_im(pc_start - s->cs_base);
4937 gen_helper_divq_EAX(cpu_env, cpu_T[0]);
4938 break;
4939 #endif
4940 }
4941 break;
4942 case 7: /* idiv */
4943 switch(ot) {
4944 case MO_8:
4945 gen_jmp_im(pc_start - s->cs_base);
4946 gen_helper_idivb_AL(cpu_env, cpu_T[0]);
4947 break;
4948 case MO_16:
4949 gen_jmp_im(pc_start - s->cs_base);
4950 gen_helper_idivw_AX(cpu_env, cpu_T[0]);
4951 break;
4952 default:
4953 case MO_32:
4954 gen_jmp_im(pc_start - s->cs_base);
4955 gen_helper_idivl_EAX(cpu_env, cpu_T[0]);
4956 break;
4957 #ifdef TARGET_X86_64
4958 case MO_64:
4959 gen_jmp_im(pc_start - s->cs_base);
4960 gen_helper_idivq_EAX(cpu_env, cpu_T[0]);
4961 break;
4962 #endif
4963 }
4964 break;
4965 default:
4966 goto illegal_op;
4967 }
4968 break;
4969
4970 case 0xfe: /* GRP4 */
4971 case 0xff: /* GRP5 */
4972 if ((b & 1) == 0)
4973 ot = MO_8;
4974 else
4975 ot = dflag + MO_16;
4976
4977 modrm = cpu_ldub_code(env, s->pc++);
4978 mod = (modrm >> 6) & 3;
4979 rm = (modrm & 7) | REX_B(s);
4980 op = (modrm >> 3) & 7;
4981 if (op >= 2 && b == 0xfe) {
4982 goto illegal_op;
4983 }
4984 if (CODE64(s)) {
4985 if (op == 2 || op == 4) {
4986 /* operand size for jumps is 64 bit */
4987 ot = MO_64;
4988 } else if (op == 3 || op == 5) {
4989 ot = dflag ? MO_32 + (rex_w == 1) : MO_16;
4990 } else if (op == 6) {
4991 /* default push size is 64 bit */
4992 ot = dflag ? MO_64 : MO_16;
4993 }
4994 }
4995 if (mod != 3) {
4996 gen_lea_modrm(env, s, modrm);
4997 if (op >= 2 && op != 3 && op != 5)
4998 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
4999 } else {
5000 gen_op_mov_TN_reg(ot, 0, rm);
5001 }
5002
5003 switch(op) {
5004 case 0: /* inc Ev */
5005 if (mod != 3)
5006 opreg = OR_TMP0;
5007 else
5008 opreg = rm;
5009 gen_inc(s, ot, opreg, 1);
5010 break;
5011 case 1: /* dec Ev */
5012 if (mod != 3)
5013 opreg = OR_TMP0;
5014 else
5015 opreg = rm;
5016 gen_inc(s, ot, opreg, -1);
5017 break;
5018 case 2: /* call Ev */
5019 /* XXX: optimize if memory (no 'and' is necessary) */
5020 if (s->dflag == 0)
5021 gen_op_andl_T0_ffff();
5022 next_eip = s->pc - s->cs_base;
5023 gen_movtl_T1_im(next_eip);
5024 gen_push_T1(s);
5025 gen_op_jmp_T0();
5026 gen_eob(s);
5027 break;
5028 case 3: /* lcall Ev */
5029 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
5030 gen_add_A0_im(s, 1 << (ot - MO_16 + 1));
5031 gen_op_ld_v(s, MO_16, cpu_T[0], cpu_A0);
5032 do_lcall:
5033 if (s->pe && !s->vm86) {
5034 gen_update_cc_op(s);
5035 gen_jmp_im(pc_start - s->cs_base);
5036 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5037 gen_helper_lcall_protected(cpu_env, cpu_tmp2_i32, cpu_T[1],
5038 tcg_const_i32(dflag),
5039 tcg_const_i32(s->pc - pc_start));
5040 } else {
5041 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5042 gen_helper_lcall_real(cpu_env, cpu_tmp2_i32, cpu_T[1],
5043 tcg_const_i32(dflag),
5044 tcg_const_i32(s->pc - s->cs_base));
5045 }
5046 gen_eob(s);
5047 break;
5048 case 4: /* jmp Ev */
5049 if (s->dflag == 0)
5050 gen_op_andl_T0_ffff();
5051 gen_op_jmp_T0();
5052 gen_eob(s);
5053 break;
5054 case 5: /* ljmp Ev */
5055 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
5056 gen_add_A0_im(s, 1 << (ot - MO_16 + 1));
5057 gen_op_ld_v(s, MO_16, cpu_T[0], cpu_A0);
5058 do_ljmp:
5059 if (s->pe && !s->vm86) {
5060 gen_update_cc_op(s);
5061 gen_jmp_im(pc_start - s->cs_base);
5062 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5063 gen_helper_ljmp_protected(cpu_env, cpu_tmp2_i32, cpu_T[1],
5064 tcg_const_i32(s->pc - pc_start));
5065 } else {
5066 gen_op_movl_seg_T0_vm(R_CS);
5067 gen_op_movl_T0_T1();
5068 gen_op_jmp_T0();
5069 }
5070 gen_eob(s);
5071 break;
5072 case 6: /* push Ev */
5073 gen_push_T0(s);
5074 break;
5075 default:
5076 goto illegal_op;
5077 }
5078 break;
5079
5080 case 0x84: /* test Ev, Gv */
5081 case 0x85:
5082 if ((b & 1) == 0)
5083 ot = MO_8;
5084 else
5085 ot = dflag + MO_16;
5086
5087 modrm = cpu_ldub_code(env, s->pc++);
5088 reg = ((modrm >> 3) & 7) | rex_r;
5089
5090 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
5091 gen_op_mov_TN_reg(ot, 1, reg);
5092 gen_op_testl_T0_T1_cc();
5093 set_cc_op(s, CC_OP_LOGICB + ot);
5094 break;
5095
5096 case 0xa8: /* test eAX, Iv */
5097 case 0xa9:
5098 if ((b & 1) == 0)
5099 ot = MO_8;
5100 else
5101 ot = dflag + MO_16;
5102 val = insn_get(env, s, ot);
5103
5104 gen_op_mov_TN_reg(ot, 0, OR_EAX);
5105 tcg_gen_movi_tl(cpu_T[1], val);
5106 gen_op_testl_T0_T1_cc();
5107 set_cc_op(s, CC_OP_LOGICB + ot);
5108 break;
5109
5110 case 0x98: /* CWDE/CBW */
5111 #ifdef TARGET_X86_64
5112 if (dflag == 2) {
5113 gen_op_mov_TN_reg(MO_32, 0, R_EAX);
5114 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
5115 gen_op_mov_reg_T0(MO_64, R_EAX);
5116 } else
5117 #endif
5118 if (dflag == 1) {
5119 gen_op_mov_TN_reg(MO_16, 0, R_EAX);
5120 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5121 gen_op_mov_reg_T0(MO_32, R_EAX);
5122 } else {
5123 gen_op_mov_TN_reg(MO_8, 0, R_EAX);
5124 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
5125 gen_op_mov_reg_T0(MO_16, R_EAX);
5126 }
5127 break;
5128 case 0x99: /* CDQ/CWD */
5129 #ifdef TARGET_X86_64
5130 if (dflag == 2) {
5131 gen_op_mov_TN_reg(MO_64, 0, R_EAX);
5132 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 63);
5133 gen_op_mov_reg_T0(MO_64, R_EDX);
5134 } else
5135 #endif
5136 if (dflag == 1) {
5137 gen_op_mov_TN_reg(MO_32, 0, R_EAX);
5138 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
5139 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 31);
5140 gen_op_mov_reg_T0(MO_32, R_EDX);
5141 } else {
5142 gen_op_mov_TN_reg(MO_16, 0, R_EAX);
5143 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5144 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 15);
5145 gen_op_mov_reg_T0(MO_16, R_EDX);
5146 }
5147 break;
5148 case 0x1af: /* imul Gv, Ev */
5149 case 0x69: /* imul Gv, Ev, I */
5150 case 0x6b:
5151 ot = dflag + MO_16;
5152 modrm = cpu_ldub_code(env, s->pc++);
5153 reg = ((modrm >> 3) & 7) | rex_r;
5154 if (b == 0x69)
5155 s->rip_offset = insn_const_size(ot);
5156 else if (b == 0x6b)
5157 s->rip_offset = 1;
5158 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
5159 if (b == 0x69) {
5160 val = insn_get(env, s, ot);
5161 tcg_gen_movi_tl(cpu_T[1], val);
5162 } else if (b == 0x6b) {
5163 val = (int8_t)insn_get(env, s, MO_8);
5164 tcg_gen_movi_tl(cpu_T[1], val);
5165 } else {
5166 gen_op_mov_TN_reg(ot, 1, reg);
5167 }
5168 switch (ot) {
5169 #ifdef TARGET_X86_64
5170 case MO_64:
5171 tcg_gen_muls2_i64(cpu_regs[reg], cpu_T[1], cpu_T[0], cpu_T[1]);
5172 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
5173 tcg_gen_sari_tl(cpu_cc_src, cpu_cc_dst, 63);
5174 tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_T[1]);
5175 break;
5176 #endif
5177 case MO_32:
5178 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5179 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
5180 tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
5181 cpu_tmp2_i32, cpu_tmp3_i32);
5182 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
5183 tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31);
5184 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
5185 tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
5186 tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32);
5187 break;
5188 default:
5189 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5190 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
5191 /* XXX: use 32 bit mul which could be faster */
5192 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
5193 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
5194 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
5195 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
5196 gen_op_mov_reg_T0(ot, reg);
5197 break;
5198 }
5199 set_cc_op(s, CC_OP_MULB + ot);
5200 break;
5201 case 0x1c0:
5202 case 0x1c1: /* xadd Ev, Gv */
5203 if ((b & 1) == 0)
5204 ot = MO_8;
5205 else
5206 ot = dflag + MO_16;
5207 modrm = cpu_ldub_code(env, s->pc++);
5208 reg = ((modrm >> 3) & 7) | rex_r;
5209 mod = (modrm >> 6) & 3;
5210 if (mod == 3) {
5211 rm = (modrm & 7) | REX_B(s);
5212 gen_op_mov_TN_reg(ot, 0, reg);
5213 gen_op_mov_TN_reg(ot, 1, rm);
5214 gen_op_addl_T0_T1();
5215 gen_op_mov_reg_T1(ot, reg);
5216 gen_op_mov_reg_T0(ot, rm);
5217 } else {
5218 gen_lea_modrm(env, s, modrm);
5219 gen_op_mov_TN_reg(ot, 0, reg);
5220 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
5221 gen_op_addl_T0_T1();
5222 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
5223 gen_op_mov_reg_T1(ot, reg);
5224 }
5225 gen_op_update2_cc();
5226 set_cc_op(s, CC_OP_ADDB + ot);
5227 break;
5228 case 0x1b0:
5229 case 0x1b1: /* cmpxchg Ev, Gv */
5230 {
5231 int label1, label2;
5232 TCGv t0, t1, t2, a0;
5233
5234 if ((b & 1) == 0)
5235 ot = MO_8;
5236 else
5237 ot = dflag + MO_16;
5238 modrm = cpu_ldub_code(env, s->pc++);
5239 reg = ((modrm >> 3) & 7) | rex_r;
5240 mod = (modrm >> 6) & 3;
5241 t0 = tcg_temp_local_new();
5242 t1 = tcg_temp_local_new();
5243 t2 = tcg_temp_local_new();
5244 a0 = tcg_temp_local_new();
5245 gen_op_mov_v_reg(ot, t1, reg);
5246 if (mod == 3) {
5247 rm = (modrm & 7) | REX_B(s);
5248 gen_op_mov_v_reg(ot, t0, rm);
5249 } else {
5250 gen_lea_modrm(env, s, modrm);
5251 tcg_gen_mov_tl(a0, cpu_A0);
5252 gen_op_ld_v(s, ot, t0, a0);
5253 rm = 0; /* avoid warning */
5254 }
5255 label1 = gen_new_label();
5256 tcg_gen_mov_tl(t2, cpu_regs[R_EAX]);
5257 gen_extu(ot, t0);
5258 gen_extu(ot, t2);
5259 tcg_gen_brcond_tl(TCG_COND_EQ, t2, t0, label1);
5260 label2 = gen_new_label();
5261 if (mod == 3) {
5262 gen_op_mov_reg_v(ot, R_EAX, t0);
5263 tcg_gen_br(label2);
5264 gen_set_label(label1);
5265 gen_op_mov_reg_v(ot, rm, t1);
5266 } else {
5267 /* perform no-op store cycle like physical cpu; must be
5268 before changing accumulator to ensure idempotency if
5269 the store faults and the instruction is restarted */
5270 gen_op_st_v(s, ot, t0, a0);
5271 gen_op_mov_reg_v(ot, R_EAX, t0);
5272 tcg_gen_br(label2);
5273 gen_set_label(label1);
5274 gen_op_st_v(s, ot, t1, a0);
5275 }
5276 gen_set_label(label2);
5277 tcg_gen_mov_tl(cpu_cc_src, t0);
5278 tcg_gen_mov_tl(cpu_cc_srcT, t2);
5279 tcg_gen_sub_tl(cpu_cc_dst, t2, t0);
5280 set_cc_op(s, CC_OP_SUBB + ot);
5281 tcg_temp_free(t0);
5282 tcg_temp_free(t1);
5283 tcg_temp_free(t2);
5284 tcg_temp_free(a0);
5285 }
5286 break;
5287 case 0x1c7: /* cmpxchg8b */
5288 modrm = cpu_ldub_code(env, s->pc++);
5289 mod = (modrm >> 6) & 3;
5290 if ((mod == 3) || ((modrm & 0x38) != 0x8))
5291 goto illegal_op;
5292 #ifdef TARGET_X86_64
5293 if (dflag == 2) {
5294 if (!(s->cpuid_ext_features & CPUID_EXT_CX16))
5295 goto illegal_op;
5296 gen_jmp_im(pc_start - s->cs_base);
5297 gen_update_cc_op(s);
5298 gen_lea_modrm(env, s, modrm);
5299 gen_helper_cmpxchg16b(cpu_env, cpu_A0);
5300 } else
5301 #endif
5302 {
5303 if (!(s->cpuid_features & CPUID_CX8))
5304 goto illegal_op;
5305 gen_jmp_im(pc_start - s->cs_base);
5306 gen_update_cc_op(s);
5307 gen_lea_modrm(env, s, modrm);
5308 gen_helper_cmpxchg8b(cpu_env, cpu_A0);
5309 }
5310 set_cc_op(s, CC_OP_EFLAGS);
5311 break;
5312
5313 /**************************/
5314 /* push/pop */
5315 case 0x50 ... 0x57: /* push */
5316 gen_op_mov_TN_reg(MO_32, 0, (b & 7) | REX_B(s));
5317 gen_push_T0(s);
5318 break;
5319 case 0x58 ... 0x5f: /* pop */
5320 if (CODE64(s)) {
5321 ot = dflag ? MO_64 : MO_16;
5322 } else {
5323 ot = dflag + MO_16;
5324 }
5325 gen_pop_T0(s);
5326 /* NOTE: order is important for pop %sp */
5327 gen_pop_update(s);
5328 gen_op_mov_reg_T0(ot, (b & 7) | REX_B(s));
5329 break;
5330 case 0x60: /* pusha */
5331 if (CODE64(s))
5332 goto illegal_op;
5333 gen_pusha(s);
5334 break;
5335 case 0x61: /* popa */
5336 if (CODE64(s))
5337 goto illegal_op;
5338 gen_popa(s);
5339 break;
5340 case 0x68: /* push Iv */
5341 case 0x6a:
5342 if (CODE64(s)) {
5343 ot = dflag ? MO_64 : MO_16;
5344 } else {
5345 ot = dflag + MO_16;
5346 }
5347 if (b == 0x68)
5348 val = insn_get(env, s, ot);
5349 else
5350 val = (int8_t)insn_get(env, s, MO_8);
5351 tcg_gen_movi_tl(cpu_T[0], val);
5352 gen_push_T0(s);
5353 break;
5354 case 0x8f: /* pop Ev */
5355 if (CODE64(s)) {
5356 ot = dflag ? MO_64 : MO_16;
5357 } else {
5358 ot = dflag + MO_16;
5359 }
5360 modrm = cpu_ldub_code(env, s->pc++);
5361 mod = (modrm >> 6) & 3;
5362 gen_pop_T0(s);
5363 if (mod == 3) {
5364 /* NOTE: order is important for pop %sp */
5365 gen_pop_update(s);
5366 rm = (modrm & 7) | REX_B(s);
5367 gen_op_mov_reg_T0(ot, rm);
5368 } else {
5369 /* NOTE: order is important too for MMU exceptions */
5370 s->popl_esp_hack = 1 << ot;
5371 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
5372 s->popl_esp_hack = 0;
5373 gen_pop_update(s);
5374 }
5375 break;
5376 case 0xc8: /* enter */
5377 {
5378 int level;
5379 val = cpu_lduw_code(env, s->pc);
5380 s->pc += 2;
5381 level = cpu_ldub_code(env, s->pc++);
5382 gen_enter(s, val, level);
5383 }
5384 break;
5385 case 0xc9: /* leave */
5386 /* XXX: exception not precise (ESP is updated before potential exception) */
5387 if (CODE64(s)) {
5388 gen_op_mov_TN_reg(MO_64, 0, R_EBP);
5389 gen_op_mov_reg_T0(MO_64, R_ESP);
5390 } else if (s->ss32) {
5391 gen_op_mov_TN_reg(MO_32, 0, R_EBP);
5392 gen_op_mov_reg_T0(MO_32, R_ESP);
5393 } else {
5394 gen_op_mov_TN_reg(MO_16, 0, R_EBP);
5395 gen_op_mov_reg_T0(MO_16, R_ESP);
5396 }
5397 gen_pop_T0(s);
5398 if (CODE64(s)) {
5399 ot = dflag ? MO_64 : MO_16;
5400 } else {
5401 ot = dflag + MO_16;
5402 }
5403 gen_op_mov_reg_T0(ot, R_EBP);
5404 gen_pop_update(s);
5405 break;
5406 case 0x06: /* push es */
5407 case 0x0e: /* push cs */
5408 case 0x16: /* push ss */
5409 case 0x1e: /* push ds */
5410 if (CODE64(s))
5411 goto illegal_op;
5412 gen_op_movl_T0_seg(b >> 3);
5413 gen_push_T0(s);
5414 break;
5415 case 0x1a0: /* push fs */
5416 case 0x1a8: /* push gs */
5417 gen_op_movl_T0_seg((b >> 3) & 7);
5418 gen_push_T0(s);
5419 break;
5420 case 0x07: /* pop es */
5421 case 0x17: /* pop ss */
5422 case 0x1f: /* pop ds */
5423 if (CODE64(s))
5424 goto illegal_op;
5425 reg = b >> 3;
5426 gen_pop_T0(s);
5427 gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
5428 gen_pop_update(s);
5429 if (reg == R_SS) {
5430 /* if reg == SS, inhibit interrupts/trace. */
5431 /* If several instructions disable interrupts, only the
5432 _first_ does it */
5433 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
5434 gen_helper_set_inhibit_irq(cpu_env);
5435 s->tf = 0;
5436 }
5437 if (s->is_jmp) {
5438 gen_jmp_im(s->pc - s->cs_base);
5439 gen_eob(s);
5440 }
5441 break;
5442 case 0x1a1: /* pop fs */
5443 case 0x1a9: /* pop gs */
5444 gen_pop_T0(s);
5445 gen_movl_seg_T0(s, (b >> 3) & 7, pc_start - s->cs_base);
5446 gen_pop_update(s);
5447 if (s->is_jmp) {
5448 gen_jmp_im(s->pc - s->cs_base);
5449 gen_eob(s);
5450 }
5451 break;
5452
5453 /**************************/
5454 /* mov */
5455 case 0x88:
5456 case 0x89: /* mov Gv, Ev */
5457 if ((b & 1) == 0)
5458 ot = MO_8;
5459 else
5460 ot = dflag + MO_16;
5461 modrm = cpu_ldub_code(env, s->pc++);
5462 reg = ((modrm >> 3) & 7) | rex_r;
5463
5464 /* generate a generic store */
5465 gen_ldst_modrm(env, s, modrm, ot, reg, 1);
5466 break;
5467 case 0xc6:
5468 case 0xc7: /* mov Ev, Iv */
5469 if ((b & 1) == 0)
5470 ot = MO_8;
5471 else
5472 ot = dflag + MO_16;
5473 modrm = cpu_ldub_code(env, s->pc++);
5474 mod = (modrm >> 6) & 3;
5475 if (mod != 3) {
5476 s->rip_offset = insn_const_size(ot);
5477 gen_lea_modrm(env, s, modrm);
5478 }
5479 val = insn_get(env, s, ot);
5480 tcg_gen_movi_tl(cpu_T[0], val);
5481 if (mod != 3) {
5482 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
5483 } else {
5484 gen_op_mov_reg_T0(ot, (modrm & 7) | REX_B(s));
5485 }
5486 break;
5487 case 0x8a:
5488 case 0x8b: /* mov Ev, Gv */
5489 if ((b & 1) == 0)
5490 ot = MO_8;
5491 else
5492 ot = MO_16 + dflag;
5493 modrm = cpu_ldub_code(env, s->pc++);
5494 reg = ((modrm >> 3) & 7) | rex_r;
5495
5496 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
5497 gen_op_mov_reg_T0(ot, reg);
5498 break;
5499 case 0x8e: /* mov seg, Gv */
5500 modrm = cpu_ldub_code(env, s->pc++);
5501 reg = (modrm >> 3) & 7;
5502 if (reg >= 6 || reg == R_CS)
5503 goto illegal_op;
5504 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
5505 gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
5506 if (reg == R_SS) {
5507 /* if reg == SS, inhibit interrupts/trace */
5508 /* If several instructions disable interrupts, only the
5509 _first_ does it */
5510 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
5511 gen_helper_set_inhibit_irq(cpu_env);
5512 s->tf = 0;
5513 }
5514 if (s->is_jmp) {
5515 gen_jmp_im(s->pc - s->cs_base);
5516 gen_eob(s);
5517 }
5518 break;
5519 case 0x8c: /* mov Gv, seg */
5520 modrm = cpu_ldub_code(env, s->pc++);
5521 reg = (modrm >> 3) & 7;
5522 mod = (modrm >> 6) & 3;
5523 if (reg >= 6)
5524 goto illegal_op;
5525 gen_op_movl_T0_seg(reg);
5526 if (mod == 3)
5527 ot = MO_16 + dflag;
5528 else
5529 ot = MO_16;
5530 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
5531 break;
5532
5533 case 0x1b6: /* movzbS Gv, Eb */
5534 case 0x1b7: /* movzwS Gv, Eb */
5535 case 0x1be: /* movsbS Gv, Eb */
5536 case 0x1bf: /* movswS Gv, Eb */
5537 {
5538 TCGMemOp d_ot;
5539 TCGMemOp s_ot;
5540
5541 /* d_ot is the size of destination */
5542 d_ot = dflag + MO_16;
5543 /* ot is the size of source */
5544 ot = (b & 1) + MO_8;
5545 /* s_ot is the sign+size of source */
5546 s_ot = b & 8 ? MO_SIGN | ot : ot;
5547
5548 modrm = cpu_ldub_code(env, s->pc++);
5549 reg = ((modrm >> 3) & 7) | rex_r;
5550 mod = (modrm >> 6) & 3;
5551 rm = (modrm & 7) | REX_B(s);
5552
5553 if (mod == 3) {
5554 gen_op_mov_TN_reg(ot, 0, rm);
5555 switch (s_ot) {
5556 case MO_UB:
5557 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
5558 break;
5559 case MO_SB:
5560 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
5561 break;
5562 case MO_UW:
5563 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
5564 break;
5565 default:
5566 case MO_SW:
5567 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5568 break;
5569 }
5570 gen_op_mov_reg_T0(d_ot, reg);
5571 } else {
5572 gen_lea_modrm(env, s, modrm);
5573 gen_op_ld_v(s, s_ot, cpu_T[0], cpu_A0);
5574 gen_op_mov_reg_T0(d_ot, reg);
5575 }
5576 }
5577 break;
5578
5579 case 0x8d: /* lea */
5580 ot = dflag + MO_16;
5581 modrm = cpu_ldub_code(env, s->pc++);
5582 mod = (modrm >> 6) & 3;
5583 if (mod == 3)
5584 goto illegal_op;
5585 reg = ((modrm >> 3) & 7) | rex_r;
5586 /* we must ensure that no segment is added */
5587 s->override = -1;
5588 val = s->addseg;
5589 s->addseg = 0;
5590 gen_lea_modrm(env, s, modrm);
5591 s->addseg = val;
5592 gen_op_mov_reg_A0(ot - MO_16, reg);
5593 break;
5594
5595 case 0xa0: /* mov EAX, Ov */
5596 case 0xa1:
5597 case 0xa2: /* mov Ov, EAX */
5598 case 0xa3:
5599 {
5600 target_ulong offset_addr;
5601
5602 if ((b & 1) == 0)
5603 ot = MO_8;
5604 else
5605 ot = dflag + MO_16;
5606 #ifdef TARGET_X86_64
5607 if (s->aflag == 2) {
5608 offset_addr = cpu_ldq_code(env, s->pc);
5609 s->pc += 8;
5610 } else
5611 #endif
5612 {
5613 if (s->aflag) {
5614 offset_addr = insn_get(env, s, MO_32);
5615 } else {
5616 offset_addr = insn_get(env, s, MO_16);
5617 }
5618 }
5619 tcg_gen_movi_tl(cpu_A0, offset_addr);
5620 gen_add_A0_ds_seg(s);
5621 if ((b & 2) == 0) {
5622 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
5623 gen_op_mov_reg_T0(ot, R_EAX);
5624 } else {
5625 gen_op_mov_TN_reg(ot, 0, R_EAX);
5626 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
5627 }
5628 }
5629 break;
5630 case 0xd7: /* xlat */
5631 #ifdef TARGET_X86_64
5632 if (s->aflag == 2) {
5633 gen_op_movq_A0_reg(R_EBX);
5634 gen_op_mov_TN_reg(MO_64, 0, R_EAX);
5635 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xff);
5636 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]);
5637 } else
5638 #endif
5639 {
5640 gen_op_movl_A0_reg(R_EBX);
5641 gen_op_mov_TN_reg(MO_32, 0, R_EAX);
5642 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xff);
5643 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]);
5644 if (s->aflag == 0)
5645 gen_op_andl_A0_ffff();
5646 else
5647 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
5648 }
5649 gen_add_A0_ds_seg(s);
5650 gen_op_ld_v(s, MO_8, cpu_T[0], cpu_A0);
5651 gen_op_mov_reg_T0(MO_8, R_EAX);
5652 break;
5653 case 0xb0 ... 0xb7: /* mov R, Ib */
5654 val = insn_get(env, s, MO_8);
5655 tcg_gen_movi_tl(cpu_T[0], val);
5656 gen_op_mov_reg_T0(MO_8, (b & 7) | REX_B(s));
5657 break;
5658 case 0xb8 ... 0xbf: /* mov R, Iv */
5659 #ifdef TARGET_X86_64
5660 if (dflag == 2) {
5661 uint64_t tmp;
5662 /* 64 bit case */
5663 tmp = cpu_ldq_code(env, s->pc);
5664 s->pc += 8;
5665 reg = (b & 7) | REX_B(s);
5666 gen_movtl_T0_im(tmp);
5667 gen_op_mov_reg_T0(MO_64, reg);
5668 } else
5669 #endif
5670 {
5671 ot = dflag ? MO_32 : MO_16;
5672 val = insn_get(env, s, ot);
5673 reg = (b & 7) | REX_B(s);
5674 tcg_gen_movi_tl(cpu_T[0], val);
5675 gen_op_mov_reg_T0(ot, reg);
5676 }
5677 break;
5678
5679 case 0x91 ... 0x97: /* xchg R, EAX */
5680 do_xchg_reg_eax:
5681 ot = dflag + MO_16;
5682 reg = (b & 7) | REX_B(s);
5683 rm = R_EAX;
5684 goto do_xchg_reg;
5685 case 0x86:
5686 case 0x87: /* xchg Ev, Gv */
5687 if ((b & 1) == 0)
5688 ot = MO_8;
5689 else
5690 ot = dflag + MO_16;
5691 modrm = cpu_ldub_code(env, s->pc++);
5692 reg = ((modrm >> 3) & 7) | rex_r;
5693 mod = (modrm >> 6) & 3;
5694 if (mod == 3) {
5695 rm = (modrm & 7) | REX_B(s);
5696 do_xchg_reg:
5697 gen_op_mov_TN_reg(ot, 0, reg);
5698 gen_op_mov_TN_reg(ot, 1, rm);
5699 gen_op_mov_reg_T0(ot, rm);
5700 gen_op_mov_reg_T1(ot, reg);
5701 } else {
5702 gen_lea_modrm(env, s, modrm);
5703 gen_op_mov_TN_reg(ot, 0, reg);
5704 /* for xchg, lock is implicit */
5705 if (!(prefixes & PREFIX_LOCK))
5706 gen_helper_lock();
5707 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
5708 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
5709 if (!(prefixes & PREFIX_LOCK))
5710 gen_helper_unlock();
5711 gen_op_mov_reg_T1(ot, reg);
5712 }
5713 break;
5714 case 0xc4: /* les Gv */
5715 /* In CODE64 this is VEX3; see above. */
5716 op = R_ES;
5717 goto do_lxx;
5718 case 0xc5: /* lds Gv */
5719 /* In CODE64 this is VEX2; see above. */
5720 op = R_DS;
5721 goto do_lxx;
5722 case 0x1b2: /* lss Gv */
5723 op = R_SS;
5724 goto do_lxx;
5725 case 0x1b4: /* lfs Gv */
5726 op = R_FS;
5727 goto do_lxx;
5728 case 0x1b5: /* lgs Gv */
5729 op = R_GS;
5730 do_lxx:
5731 ot = dflag ? MO_32 : MO_16;
5732 modrm = cpu_ldub_code(env, s->pc++);
5733 reg = ((modrm >> 3) & 7) | rex_r;
5734 mod = (modrm >> 6) & 3;
5735 if (mod == 3)
5736 goto illegal_op;
5737 gen_lea_modrm(env, s, modrm);
5738 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
5739 gen_add_A0_im(s, 1 << (ot - MO_16 + 1));
5740 /* load the segment first to handle exceptions properly */
5741 gen_op_ld_v(s, MO_16, cpu_T[0], cpu_A0);
5742 gen_movl_seg_T0(s, op, pc_start - s->cs_base);
5743 /* then put the data */
5744 gen_op_mov_reg_T1(ot, reg);
5745 if (s->is_jmp) {
5746 gen_jmp_im(s->pc - s->cs_base);
5747 gen_eob(s);
5748 }
5749 break;
5750
5751 /************************/
5752 /* shifts */
5753 case 0xc0:
5754 case 0xc1:
5755 /* shift Ev,Ib */
5756 shift = 2;
5757 grp2:
5758 {
5759 if ((b & 1) == 0)
5760 ot = MO_8;
5761 else
5762 ot = dflag + MO_16;
5763
5764 modrm = cpu_ldub_code(env, s->pc++);
5765 mod = (modrm >> 6) & 3;
5766 op = (modrm >> 3) & 7;
5767
5768 if (mod != 3) {
5769 if (shift == 2) {
5770 s->rip_offset = 1;
5771 }
5772 gen_lea_modrm(env, s, modrm);
5773 opreg = OR_TMP0;
5774 } else {
5775 opreg = (modrm & 7) | REX_B(s);
5776 }
5777
5778 /* simpler op */
5779 if (shift == 0) {
5780 gen_shift(s, op, ot, opreg, OR_ECX);
5781 } else {
5782 if (shift == 2) {
5783 shift = cpu_ldub_code(env, s->pc++);
5784 }
5785 gen_shifti(s, op, ot, opreg, shift);
5786 }
5787 }
5788 break;
5789 case 0xd0:
5790 case 0xd1:
5791 /* shift Ev,1 */
5792 shift = 1;
5793 goto grp2;
5794 case 0xd2:
5795 case 0xd3:
5796 /* shift Ev,cl */
5797 shift = 0;
5798 goto grp2;
5799
5800 case 0x1a4: /* shld imm */
5801 op = 0;
5802 shift = 1;
5803 goto do_shiftd;
5804 case 0x1a5: /* shld cl */
5805 op = 0;
5806 shift = 0;
5807 goto do_shiftd;
5808 case 0x1ac: /* shrd imm */
5809 op = 1;
5810 shift = 1;
5811 goto do_shiftd;
5812 case 0x1ad: /* shrd cl */
5813 op = 1;
5814 shift = 0;
5815 do_shiftd:
5816 ot = dflag + MO_16;
5817 modrm = cpu_ldub_code(env, s->pc++);
5818 mod = (modrm >> 6) & 3;
5819 rm = (modrm & 7) | REX_B(s);
5820 reg = ((modrm >> 3) & 7) | rex_r;
5821 if (mod != 3) {
5822 gen_lea_modrm(env, s, modrm);
5823 opreg = OR_TMP0;
5824 } else {
5825 opreg = rm;
5826 }
5827 gen_op_mov_TN_reg(ot, 1, reg);
5828
5829 if (shift) {
5830 TCGv imm = tcg_const_tl(cpu_ldub_code(env, s->pc++));
5831 gen_shiftd_rm_T1(s, ot, opreg, op, imm);
5832 tcg_temp_free(imm);
5833 } else {
5834 gen_shiftd_rm_T1(s, ot, opreg, op, cpu_regs[R_ECX]);
5835 }
5836 break;
5837
5838 /************************/
5839 /* floats */
5840 case 0xd8 ... 0xdf:
5841 if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
5842 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
5843 /* XXX: what to do if illegal op ? */
5844 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
5845 break;
5846 }
5847 modrm = cpu_ldub_code(env, s->pc++);
5848 mod = (modrm >> 6) & 3;
5849 rm = modrm & 7;
5850 op = ((b & 7) << 3) | ((modrm >> 3) & 7);
5851 if (mod != 3) {
5852 /* memory op */
5853 gen_lea_modrm(env, s, modrm);
5854 switch(op) {
5855 case 0x00 ... 0x07: /* fxxxs */
5856 case 0x10 ... 0x17: /* fixxxl */
5857 case 0x20 ... 0x27: /* fxxxl */
5858 case 0x30 ... 0x37: /* fixxx */
5859 {
5860 int op1;
5861 op1 = op & 7;
5862
5863 switch(op >> 4) {
5864 case 0:
5865 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5866 s->mem_index, MO_LEUL);
5867 gen_helper_flds_FT0(cpu_env, cpu_tmp2_i32);
5868 break;
5869 case 1:
5870 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5871 s->mem_index, MO_LEUL);
5872 gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
5873 break;
5874 case 2:
5875 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
5876 s->mem_index, MO_LEQ);
5877 gen_helper_fldl_FT0(cpu_env, cpu_tmp1_i64);
5878 break;
5879 case 3:
5880 default:
5881 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5882 s->mem_index, MO_LESW);
5883 gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
5884 break;
5885 }
5886
5887 gen_helper_fp_arith_ST0_FT0(op1);
5888 if (op1 == 3) {
5889 /* fcomp needs pop */
5890 gen_helper_fpop(cpu_env);
5891 }
5892 }
5893 break;
5894 case 0x08: /* flds */
5895 case 0x0a: /* fsts */
5896 case 0x0b: /* fstps */
5897 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
5898 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
5899 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
5900 switch(op & 7) {
5901 case 0:
5902 switch(op >> 4) {
5903 case 0:
5904 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5905 s->mem_index, MO_LEUL);
5906 gen_helper_flds_ST0(cpu_env, cpu_tmp2_i32);
5907 break;
5908 case 1:
5909 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5910 s->mem_index, MO_LEUL);
5911 gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
5912 break;
5913 case 2:
5914 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
5915 s->mem_index, MO_LEQ);
5916 gen_helper_fldl_ST0(cpu_env, cpu_tmp1_i64);
5917 break;
5918 case 3:
5919 default:
5920 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5921 s->mem_index, MO_LESW);
5922 gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
5923 break;
5924 }
5925 break;
5926 case 1:
5927 /* XXX: the corresponding CPUID bit must be tested ! */
5928 switch(op >> 4) {
5929 case 1:
5930 gen_helper_fisttl_ST0(cpu_tmp2_i32, cpu_env);
5931 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5932 s->mem_index, MO_LEUL);
5933 break;
5934 case 2:
5935 gen_helper_fisttll_ST0(cpu_tmp1_i64, cpu_env);
5936 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
5937 s->mem_index, MO_LEQ);
5938 break;
5939 case 3:
5940 default:
5941 gen_helper_fistt_ST0(cpu_tmp2_i32, cpu_env);
5942 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5943 s->mem_index, MO_LEUW);
5944 break;
5945 }
5946 gen_helper_fpop(cpu_env);
5947 break;
5948 default:
5949 switch(op >> 4) {
5950 case 0:
5951 gen_helper_fsts_ST0(cpu_tmp2_i32, cpu_env);
5952 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5953 s->mem_index, MO_LEUL);
5954 break;
5955 case 1:
5956 gen_helper_fistl_ST0(cpu_tmp2_i32, cpu_env);
5957 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5958 s->mem_index, MO_LEUL);
5959 break;
5960 case 2:
5961 gen_helper_fstl_ST0(cpu_tmp1_i64, cpu_env);
5962 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
5963 s->mem_index, MO_LEQ);
5964 break;
5965 case 3:
5966 default:
5967 gen_helper_fist_ST0(cpu_tmp2_i32, cpu_env);
5968 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5969 s->mem_index, MO_LEUW);
5970 break;
5971 }
5972 if ((op & 7) == 3)
5973 gen_helper_fpop(cpu_env);
5974 break;
5975 }
5976 break;
5977 case 0x0c: /* fldenv mem */
5978 gen_update_cc_op(s);
5979 gen_jmp_im(pc_start - s->cs_base);
5980 gen_helper_fldenv(cpu_env, cpu_A0, tcg_const_i32(s->dflag));
5981 break;
5982 case 0x0d: /* fldcw mem */
5983 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5984 s->mem_index, MO_LEUW);
5985 gen_helper_fldcw(cpu_env, cpu_tmp2_i32);
5986 break;
5987 case 0x0e: /* fnstenv mem */
5988 gen_update_cc_op(s);
5989 gen_jmp_im(pc_start - s->cs_base);
5990 gen_helper_fstenv(cpu_env, cpu_A0, tcg_const_i32(s->dflag));
5991 break;
5992 case 0x0f: /* fnstcw mem */
5993 gen_helper_fnstcw(cpu_tmp2_i32, cpu_env);
5994 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5995 s->mem_index, MO_LEUW);
5996 break;
5997 case 0x1d: /* fldt mem */
5998 gen_update_cc_op(s);
5999 gen_jmp_im(pc_start - s->cs_base);
6000 gen_helper_fldt_ST0(cpu_env, cpu_A0);
6001 break;
6002 case 0x1f: /* fstpt mem */
6003 gen_update_cc_op(s);
6004 gen_jmp_im(pc_start - s->cs_base);
6005 gen_helper_fstt_ST0(cpu_env, cpu_A0);
6006 gen_helper_fpop(cpu_env);
6007 break;
6008 case 0x2c: /* frstor mem */
6009 gen_update_cc_op(s);
6010 gen_jmp_im(pc_start - s->cs_base);
6011 gen_helper_frstor(cpu_env, cpu_A0, tcg_const_i32(s->dflag));
6012 break;
6013 case 0x2e: /* fnsave mem */
6014 gen_update_cc_op(s);
6015 gen_jmp_im(pc_start - s->cs_base);
6016 gen_helper_fsave(cpu_env, cpu_A0, tcg_const_i32(s->dflag));
6017 break;
6018 case 0x2f: /* fnstsw mem */
6019 gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
6020 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
6021 s->mem_index, MO_LEUW);
6022 break;
6023 case 0x3c: /* fbld */
6024 gen_update_cc_op(s);
6025 gen_jmp_im(pc_start - s->cs_base);
6026 gen_helper_fbld_ST0(cpu_env, cpu_A0);
6027 break;
6028 case 0x3e: /* fbstp */
6029 gen_update_cc_op(s);
6030 gen_jmp_im(pc_start - s->cs_base);
6031 gen_helper_fbst_ST0(cpu_env, cpu_A0);
6032 gen_helper_fpop(cpu_env);
6033 break;
6034 case 0x3d: /* fildll */
6035 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
6036 gen_helper_fildll_ST0(cpu_env, cpu_tmp1_i64);
6037 break;
6038 case 0x3f: /* fistpll */
6039 gen_helper_fistll_ST0(cpu_tmp1_i64, cpu_env);
6040 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
6041 gen_helper_fpop(cpu_env);
6042 break;
6043 default:
6044 goto illegal_op;
6045 }
6046 } else {
6047 /* register float ops */
6048 opreg = rm;
6049
6050 switch(op) {
6051 case 0x08: /* fld sti */
6052 gen_helper_fpush(cpu_env);
6053 gen_helper_fmov_ST0_STN(cpu_env,
6054 tcg_const_i32((opreg + 1) & 7));
6055 break;
6056 case 0x09: /* fxchg sti */
6057 case 0x29: /* fxchg4 sti, undocumented op */
6058 case 0x39: /* fxchg7 sti, undocumented op */
6059 gen_helper_fxchg_ST0_STN(cpu_env, tcg_const_i32(opreg));
6060 break;
6061 case 0x0a: /* grp d9/2 */
6062 switch(rm) {
6063 case 0: /* fnop */
6064 /* check exceptions (FreeBSD FPU probe) */
6065 gen_update_cc_op(s);
6066 gen_jmp_im(pc_start - s->cs_base);
6067 gen_helper_fwait(cpu_env);
6068 break;
6069 default:
6070 goto illegal_op;
6071 }
6072 break;
6073 case 0x0c: /* grp d9/4 */
6074 switch(rm) {
6075 case 0: /* fchs */
6076 gen_helper_fchs_ST0(cpu_env);
6077 break;
6078 case 1: /* fabs */
6079 gen_helper_fabs_ST0(cpu_env);
6080 break;
6081 case 4: /* ftst */
6082 gen_helper_fldz_FT0(cpu_env);
6083 gen_helper_fcom_ST0_FT0(cpu_env);
6084 break;
6085 case 5: /* fxam */
6086 gen_helper_fxam_ST0(cpu_env);
6087 break;
6088 default:
6089 goto illegal_op;
6090 }
6091 break;
6092 case 0x0d: /* grp d9/5 */
6093 {
6094 switch(rm) {
6095 case 0:
6096 gen_helper_fpush(cpu_env);
6097 gen_helper_fld1_ST0(cpu_env);
6098 break;
6099 case 1:
6100 gen_helper_fpush(cpu_env);
6101 gen_helper_fldl2t_ST0(cpu_env);
6102 break;
6103 case 2:
6104 gen_helper_fpush(cpu_env);
6105 gen_helper_fldl2e_ST0(cpu_env);
6106 break;
6107 case 3:
6108 gen_helper_fpush(cpu_env);
6109 gen_helper_fldpi_ST0(cpu_env);
6110 break;
6111 case 4:
6112 gen_helper_fpush(cpu_env);
6113 gen_helper_fldlg2_ST0(cpu_env);
6114 break;
6115 case 5:
6116 gen_helper_fpush(cpu_env);
6117 gen_helper_fldln2_ST0(cpu_env);
6118 break;
6119 case 6:
6120 gen_helper_fpush(cpu_env);
6121 gen_helper_fldz_ST0(cpu_env);
6122 break;
6123 default:
6124 goto illegal_op;
6125 }
6126 }
6127 break;
6128 case 0x0e: /* grp d9/6 */
6129 switch(rm) {
6130 case 0: /* f2xm1 */
6131 gen_helper_f2xm1(cpu_env);
6132 break;
6133 case 1: /* fyl2x */
6134 gen_helper_fyl2x(cpu_env);
6135 break;
6136 case 2: /* fptan */
6137 gen_helper_fptan(cpu_env);
6138 break;
6139 case 3: /* fpatan */
6140 gen_helper_fpatan(cpu_env);
6141 break;
6142 case 4: /* fxtract */
6143 gen_helper_fxtract(cpu_env);
6144 break;
6145 case 5: /* fprem1 */
6146 gen_helper_fprem1(cpu_env);
6147 break;
6148 case 6: /* fdecstp */
6149 gen_helper_fdecstp(cpu_env);
6150 break;
6151 default:
6152 case 7: /* fincstp */
6153 gen_helper_fincstp(cpu_env);
6154 break;
6155 }
6156 break;
6157 case 0x0f: /* grp d9/7 */
6158 switch(rm) {
6159 case 0: /* fprem */
6160 gen_helper_fprem(cpu_env);
6161 break;
6162 case 1: /* fyl2xp1 */
6163 gen_helper_fyl2xp1(cpu_env);
6164 break;
6165 case 2: /* fsqrt */
6166 gen_helper_fsqrt(cpu_env);
6167 break;
6168 case 3: /* fsincos */
6169 gen_helper_fsincos(cpu_env);
6170 break;
6171 case 5: /* fscale */
6172 gen_helper_fscale(cpu_env);
6173 break;
6174 case 4: /* frndint */
6175 gen_helper_frndint(cpu_env);
6176 break;
6177 case 6: /* fsin */
6178 gen_helper_fsin(cpu_env);
6179 break;
6180 default:
6181 case 7: /* fcos */
6182 gen_helper_fcos(cpu_env);
6183 break;
6184 }
6185 break;
6186 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
6187 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
6188 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
6189 {
6190 int op1;
6191
6192 op1 = op & 7;
6193 if (op >= 0x20) {
6194 gen_helper_fp_arith_STN_ST0(op1, opreg);
6195 if (op >= 0x30)
6196 gen_helper_fpop(cpu_env);
6197 } else {
6198 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6199 gen_helper_fp_arith_ST0_FT0(op1);
6200 }
6201 }
6202 break;
6203 case 0x02: /* fcom */
6204 case 0x22: /* fcom2, undocumented op */
6205 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6206 gen_helper_fcom_ST0_FT0(cpu_env);
6207 break;
6208 case 0x03: /* fcomp */
6209 case 0x23: /* fcomp3, undocumented op */
6210 case 0x32: /* fcomp5, undocumented op */
6211 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6212 gen_helper_fcom_ST0_FT0(cpu_env);
6213 gen_helper_fpop(cpu_env);
6214 break;
6215 case 0x15: /* da/5 */
6216 switch(rm) {
6217 case 1: /* fucompp */
6218 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
6219 gen_helper_fucom_ST0_FT0(cpu_env);
6220 gen_helper_fpop(cpu_env);
6221 gen_helper_fpop(cpu_env);
6222 break;
6223 default:
6224 goto illegal_op;
6225 }
6226 break;
6227 case 0x1c:
6228 switch(rm) {
6229 case 0: /* feni (287 only, just do nop here) */
6230 break;
6231 case 1: /* fdisi (287 only, just do nop here) */
6232 break;
6233 case 2: /* fclex */
6234 gen_helper_fclex(cpu_env);
6235 break;
6236 case 3: /* fninit */
6237 gen_helper_fninit(cpu_env);
6238 break;
6239 case 4: /* fsetpm (287 only, just do nop here) */
6240 break;
6241 default:
6242 goto illegal_op;
6243 }
6244 break;
6245 case 0x1d: /* fucomi */
6246 if (!(s->cpuid_features & CPUID_CMOV)) {
6247 goto illegal_op;
6248 }
6249 gen_update_cc_op(s);
6250 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6251 gen_helper_fucomi_ST0_FT0(cpu_env);
6252 set_cc_op(s, CC_OP_EFLAGS);
6253 break;
6254 case 0x1e: /* fcomi */
6255 if (!(s->cpuid_features & CPUID_CMOV)) {
6256 goto illegal_op;
6257 }
6258 gen_update_cc_op(s);
6259 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6260 gen_helper_fcomi_ST0_FT0(cpu_env);
6261 set_cc_op(s, CC_OP_EFLAGS);
6262 break;
6263 case 0x28: /* ffree sti */
6264 gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
6265 break;
6266 case 0x2a: /* fst sti */
6267 gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
6268 break;
6269 case 0x2b: /* fstp sti */
6270 case 0x0b: /* fstp1 sti, undocumented op */
6271 case 0x3a: /* fstp8 sti, undocumented op */
6272 case 0x3b: /* fstp9 sti, undocumented op */
6273 gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
6274 gen_helper_fpop(cpu_env);
6275 break;
6276 case 0x2c: /* fucom st(i) */
6277 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6278 gen_helper_fucom_ST0_FT0(cpu_env);
6279 break;
6280 case 0x2d: /* fucomp st(i) */
6281 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6282 gen_helper_fucom_ST0_FT0(cpu_env);
6283 gen_helper_fpop(cpu_env);
6284 break;
6285 case 0x33: /* de/3 */
6286 switch(rm) {
6287 case 1: /* fcompp */
6288 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
6289 gen_helper_fcom_ST0_FT0(cpu_env);
6290 gen_helper_fpop(cpu_env);
6291 gen_helper_fpop(cpu_env);
6292 break;
6293 default:
6294 goto illegal_op;
6295 }
6296 break;
6297 case 0x38: /* ffreep sti, undocumented op */
6298 gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
6299 gen_helper_fpop(cpu_env);
6300 break;
6301 case 0x3c: /* df/4 */
6302 switch(rm) {
6303 case 0:
6304 gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
6305 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
6306 gen_op_mov_reg_T0(MO_16, R_EAX);
6307 break;
6308 default:
6309 goto illegal_op;
6310 }
6311 break;
6312 case 0x3d: /* fucomip */
6313 if (!(s->cpuid_features & CPUID_CMOV)) {
6314 goto illegal_op;
6315 }
6316 gen_update_cc_op(s);
6317 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6318 gen_helper_fucomi_ST0_FT0(cpu_env);
6319 gen_helper_fpop(cpu_env);
6320 set_cc_op(s, CC_OP_EFLAGS);
6321 break;
6322 case 0x3e: /* fcomip */
6323 if (!(s->cpuid_features & CPUID_CMOV)) {
6324 goto illegal_op;
6325 }
6326 gen_update_cc_op(s);
6327 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6328 gen_helper_fcomi_ST0_FT0(cpu_env);
6329 gen_helper_fpop(cpu_env);
6330 set_cc_op(s, CC_OP_EFLAGS);
6331 break;
6332 case 0x10 ... 0x13: /* fcmovxx */
6333 case 0x18 ... 0x1b:
6334 {
6335 int op1, l1;
6336 static const uint8_t fcmov_cc[8] = {
6337 (JCC_B << 1),
6338 (JCC_Z << 1),
6339 (JCC_BE << 1),
6340 (JCC_P << 1),
6341 };
6342
6343 if (!(s->cpuid_features & CPUID_CMOV)) {
6344 goto illegal_op;
6345 }
6346 op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
6347 l1 = gen_new_label();
6348 gen_jcc1_noeob(s, op1, l1);
6349 gen_helper_fmov_ST0_STN(cpu_env, tcg_const_i32(opreg));
6350 gen_set_label(l1);
6351 }
6352 break;
6353 default:
6354 goto illegal_op;
6355 }
6356 }
6357 break;
6358 /************************/
6359 /* string ops */
6360
6361 case 0xa4: /* movsS */
6362 case 0xa5:
6363 if ((b & 1) == 0)
6364 ot = MO_8;
6365 else
6366 ot = dflag + MO_16;
6367
6368 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6369 gen_repz_movs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6370 } else {
6371 gen_movs(s, ot);
6372 }
6373 break;
6374
6375 case 0xaa: /* stosS */
6376 case 0xab:
6377 if ((b & 1) == 0)
6378 ot = MO_8;
6379 else
6380 ot = dflag + MO_16;
6381
6382 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6383 gen_repz_stos(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6384 } else {
6385 gen_stos(s, ot);
6386 }
6387 break;
6388 case 0xac: /* lodsS */
6389 case 0xad:
6390 if ((b & 1) == 0)
6391 ot = MO_8;
6392 else
6393 ot = dflag + MO_16;
6394 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6395 gen_repz_lods(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6396 } else {
6397 gen_lods(s, ot);
6398 }
6399 break;
6400 case 0xae: /* scasS */
6401 case 0xaf:
6402 if ((b & 1) == 0)
6403 ot = MO_8;
6404 else
6405 ot = dflag + MO_16;
6406 if (prefixes & PREFIX_REPNZ) {
6407 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6408 } else if (prefixes & PREFIX_REPZ) {
6409 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6410 } else {
6411 gen_scas(s, ot);
6412 }
6413 break;
6414
6415 case 0xa6: /* cmpsS */
6416 case 0xa7:
6417 if ((b & 1) == 0)
6418 ot = MO_8;
6419 else
6420 ot = dflag + MO_16;
6421 if (prefixes & PREFIX_REPNZ) {
6422 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6423 } else if (prefixes & PREFIX_REPZ) {
6424 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6425 } else {
6426 gen_cmps(s, ot);
6427 }
6428 break;
6429 case 0x6c: /* insS */
6430 case 0x6d:
6431 if ((b & 1) == 0)
6432 ot = MO_8;
6433 else
6434 ot = dflag ? MO_32 : MO_16;
6435 gen_op_mov_TN_reg(MO_16, 0, R_EDX);
6436 gen_op_andl_T0_ffff();
6437 gen_check_io(s, ot, pc_start - s->cs_base,
6438 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4);
6439 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6440 gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6441 } else {
6442 gen_ins(s, ot);
6443 if (use_icount) {
6444 gen_jmp(s, s->pc - s->cs_base);
6445 }
6446 }
6447 break;
6448 case 0x6e: /* outsS */
6449 case 0x6f:
6450 if ((b & 1) == 0)
6451 ot = MO_8;
6452 else
6453 ot = dflag ? MO_32 : MO_16;
6454 gen_op_mov_TN_reg(MO_16, 0, R_EDX);
6455 gen_op_andl_T0_ffff();
6456 gen_check_io(s, ot, pc_start - s->cs_base,
6457 svm_is_rep(prefixes) | 4);
6458 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6459 gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6460 } else {
6461 gen_outs(s, ot);
6462 if (use_icount) {
6463 gen_jmp(s, s->pc - s->cs_base);
6464 }
6465 }
6466 break;
6467
6468 /************************/
6469 /* port I/O */
6470
6471 case 0xe4:
6472 case 0xe5:
6473 if ((b & 1) == 0)
6474 ot = MO_8;
6475 else
6476 ot = dflag ? MO_32 : MO_16;
6477 val = cpu_ldub_code(env, s->pc++);
6478 gen_check_io(s, ot, pc_start - s->cs_base,
6479 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
6480 if (use_icount)
6481 gen_io_start();
6482 tcg_gen_movi_i32(cpu_tmp2_i32, val);
6483 gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
6484 gen_op_mov_reg_T1(ot, R_EAX);
6485 if (use_icount) {
6486 gen_io_end();
6487 gen_jmp(s, s->pc - s->cs_base);
6488 }
6489 break;
6490 case 0xe6:
6491 case 0xe7:
6492 if ((b & 1) == 0)
6493 ot = MO_8;
6494 else
6495 ot = dflag ? MO_32 : MO_16;
6496 val = cpu_ldub_code(env, s->pc++);
6497 gen_check_io(s, ot, pc_start - s->cs_base,
6498 svm_is_rep(prefixes));
6499 gen_op_mov_TN_reg(ot, 1, R_EAX);
6500
6501 if (use_icount)
6502 gen_io_start();
6503 tcg_gen_movi_i32(cpu_tmp2_i32, val);
6504 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
6505 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
6506 if (use_icount) {
6507 gen_io_end();
6508 gen_jmp(s, s->pc - s->cs_base);
6509 }
6510 break;
6511 case 0xec:
6512 case 0xed:
6513 if ((b & 1) == 0)
6514 ot = MO_8;
6515 else
6516 ot = dflag ? MO_32 : MO_16;
6517 gen_op_mov_TN_reg(MO_16, 0, R_EDX);
6518 gen_op_andl_T0_ffff();
6519 gen_check_io(s, ot, pc_start - s->cs_base,
6520 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
6521 if (use_icount)
6522 gen_io_start();
6523 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6524 gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
6525 gen_op_mov_reg_T1(ot, R_EAX);
6526 if (use_icount) {
6527 gen_io_end();
6528 gen_jmp(s, s->pc - s->cs_base);
6529 }
6530 break;
6531 case 0xee:
6532 case 0xef:
6533 if ((b & 1) == 0)
6534 ot = MO_8;
6535 else
6536 ot = dflag ? MO_32 : MO_16;
6537 gen_op_mov_TN_reg(MO_16, 0, R_EDX);
6538 gen_op_andl_T0_ffff();
6539 gen_check_io(s, ot, pc_start - s->cs_base,
6540 svm_is_rep(prefixes));
6541 gen_op_mov_TN_reg(ot, 1, R_EAX);
6542
6543 if (use_icount)
6544 gen_io_start();
6545 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6546 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
6547 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
6548 if (use_icount) {
6549 gen_io_end();
6550 gen_jmp(s, s->pc - s->cs_base);
6551 }
6552 break;
6553
6554 /************************/
6555 /* control */
6556 case 0xc2: /* ret im */
6557 val = cpu_ldsw_code(env, s->pc);
6558 s->pc += 2;
6559 gen_pop_T0(s);
6560 if (CODE64(s) && s->dflag)
6561 s->dflag = 2;
6562 gen_stack_update(s, val + (2 << s->dflag));
6563 if (s->dflag == 0)
6564 gen_op_andl_T0_ffff();
6565 gen_op_jmp_T0();
6566 gen_eob(s);
6567 break;
6568 case 0xc3: /* ret */
6569 gen_pop_T0(s);
6570 gen_pop_update(s);
6571 if (s->dflag == 0)
6572 gen_op_andl_T0_ffff();
6573 gen_op_jmp_T0();
6574 gen_eob(s);
6575 break;
6576 case 0xca: /* lret im */
6577 val = cpu_ldsw_code(env, s->pc);
6578 s->pc += 2;
6579 do_lret:
6580 if (s->pe && !s->vm86) {
6581 gen_update_cc_op(s);
6582 gen_jmp_im(pc_start - s->cs_base);
6583 gen_helper_lret_protected(cpu_env, tcg_const_i32(s->dflag),
6584 tcg_const_i32(val));
6585 } else {
6586 gen_stack_A0(s);
6587 /* pop offset */
6588 gen_op_ld_v(s, 1 + s->dflag, cpu_T[0], cpu_A0);
6589 if (s->dflag == 0)
6590 gen_op_andl_T0_ffff();
6591 /* NOTE: keeping EIP updated is not a problem in case of
6592 exception */
6593 gen_op_jmp_T0();
6594 /* pop selector */
6595 gen_op_addl_A0_im(2 << s->dflag);
6596 gen_op_ld_v(s, 1 + s->dflag, cpu_T[0], cpu_A0);
6597 gen_op_movl_seg_T0_vm(R_CS);
6598 /* add stack offset */
6599 gen_stack_update(s, val + (4 << s->dflag));
6600 }
6601 gen_eob(s);
6602 break;
6603 case 0xcb: /* lret */
6604 val = 0;
6605 goto do_lret;
6606 case 0xcf: /* iret */
6607 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET);
6608 if (!s->pe) {
6609 /* real mode */
6610 gen_helper_iret_real(cpu_env, tcg_const_i32(s->dflag));
6611 set_cc_op(s, CC_OP_EFLAGS);
6612 } else if (s->vm86) {
6613 if (s->iopl != 3) {
6614 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6615 } else {
6616 gen_helper_iret_real(cpu_env, tcg_const_i32(s->dflag));
6617 set_cc_op(s, CC_OP_EFLAGS);
6618 }
6619 } else {
6620 gen_update_cc_op(s);
6621 gen_jmp_im(pc_start - s->cs_base);
6622 gen_helper_iret_protected(cpu_env, tcg_const_i32(s->dflag),
6623 tcg_const_i32(s->pc - s->cs_base));
6624 set_cc_op(s, CC_OP_EFLAGS);
6625 }
6626 gen_eob(s);
6627 break;
6628 case 0xe8: /* call im */
6629 {
6630 if (dflag)
6631 tval = (int32_t)insn_get(env, s, MO_32);
6632 else
6633 tval = (int16_t)insn_get(env, s, MO_16);
6634 next_eip = s->pc - s->cs_base;
6635 tval += next_eip;
6636 if (s->dflag == 0)
6637 tval &= 0xffff;
6638 else if(!CODE64(s))
6639 tval &= 0xffffffff;
6640 gen_movtl_T0_im(next_eip);
6641 gen_push_T0(s);
6642 gen_jmp(s, tval);
6643 }
6644 break;
6645 case 0x9a: /* lcall im */
6646 {
6647 unsigned int selector, offset;
6648
6649 if (CODE64(s))
6650 goto illegal_op;
6651 ot = dflag ? MO_32 : MO_16;
6652 offset = insn_get(env, s, ot);
6653 selector = insn_get(env, s, MO_16);
6654
6655 tcg_gen_movi_tl(cpu_T[0], selector);
6656 tcg_gen_movi_tl(cpu_T[1], offset);
6657 }
6658 goto do_lcall;
6659 case 0xe9: /* jmp im */
6660 if (dflag)
6661 tval = (int32_t)insn_get(env, s, MO_32);
6662 else
6663 tval = (int16_t)insn_get(env, s, MO_16);
6664 tval += s->pc - s->cs_base;
6665 if (s->dflag == 0)
6666 tval &= 0xffff;
6667 else if(!CODE64(s))
6668 tval &= 0xffffffff;
6669 gen_jmp(s, tval);
6670 break;
6671 case 0xea: /* ljmp im */
6672 {
6673 unsigned int selector, offset;
6674
6675 if (CODE64(s))
6676 goto illegal_op;
6677 ot = dflag ? MO_32 : MO_16;
6678 offset = insn_get(env, s, ot);
6679 selector = insn_get(env, s, MO_16);
6680
6681 tcg_gen_movi_tl(cpu_T[0], selector);
6682 tcg_gen_movi_tl(cpu_T[1], offset);
6683 }
6684 goto do_ljmp;
6685 case 0xeb: /* jmp Jb */
6686 tval = (int8_t)insn_get(env, s, MO_8);
6687 tval += s->pc - s->cs_base;
6688 if (s->dflag == 0)
6689 tval &= 0xffff;
6690 gen_jmp(s, tval);
6691 break;
6692 case 0x70 ... 0x7f: /* jcc Jb */
6693 tval = (int8_t)insn_get(env, s, MO_8);
6694 goto do_jcc;
6695 case 0x180 ... 0x18f: /* jcc Jv */
6696 if (dflag) {
6697 tval = (int32_t)insn_get(env, s, MO_32);
6698 } else {
6699 tval = (int16_t)insn_get(env, s, MO_16);
6700 }
6701 do_jcc:
6702 next_eip = s->pc - s->cs_base;
6703 tval += next_eip;
6704 if (s->dflag == 0)
6705 tval &= 0xffff;
6706 gen_jcc(s, b, tval, next_eip);
6707 break;
6708
6709 case 0x190 ... 0x19f: /* setcc Gv */
6710 modrm = cpu_ldub_code(env, s->pc++);
6711 gen_setcc1(s, b, cpu_T[0]);
6712 gen_ldst_modrm(env, s, modrm, MO_8, OR_TMP0, 1);
6713 break;
6714 case 0x140 ... 0x14f: /* cmov Gv, Ev */
6715 if (!(s->cpuid_features & CPUID_CMOV)) {
6716 goto illegal_op;
6717 }
6718 ot = dflag + MO_16;
6719 modrm = cpu_ldub_code(env, s->pc++);
6720 reg = ((modrm >> 3) & 7) | rex_r;
6721 gen_cmovcc1(env, s, ot, b, modrm, reg);
6722 break;
6723
6724 /************************/
6725 /* flags */
6726 case 0x9c: /* pushf */
6727 gen_svm_check_intercept(s, pc_start, SVM_EXIT_PUSHF);
6728 if (s->vm86 && s->iopl != 3) {
6729 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6730 } else {
6731 gen_update_cc_op(s);
6732 gen_helper_read_eflags(cpu_T[0], cpu_env);
6733 gen_push_T0(s);
6734 }
6735 break;
6736 case 0x9d: /* popf */
6737 gen_svm_check_intercept(s, pc_start, SVM_EXIT_POPF);
6738 if (s->vm86 && s->iopl != 3) {
6739 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6740 } else {
6741 gen_pop_T0(s);
6742 if (s->cpl == 0) {
6743 if (s->dflag) {
6744 gen_helper_write_eflags(cpu_env, cpu_T[0],
6745 tcg_const_i32((TF_MASK | AC_MASK |
6746 ID_MASK | NT_MASK |
6747 IF_MASK |
6748 IOPL_MASK)));
6749 } else {
6750 gen_helper_write_eflags(cpu_env, cpu_T[0],
6751 tcg_const_i32((TF_MASK | AC_MASK |
6752 ID_MASK | NT_MASK |
6753 IF_MASK | IOPL_MASK)
6754 & 0xffff));
6755 }
6756 } else {
6757 if (s->cpl <= s->iopl) {
6758 if (s->dflag) {
6759 gen_helper_write_eflags(cpu_env, cpu_T[0],
6760 tcg_const_i32((TF_MASK |
6761 AC_MASK |
6762 ID_MASK |
6763 NT_MASK |
6764 IF_MASK)));
6765 } else {
6766 gen_helper_write_eflags(cpu_env, cpu_T[0],
6767 tcg_const_i32((TF_MASK |
6768 AC_MASK |
6769 ID_MASK |
6770 NT_MASK |
6771 IF_MASK)
6772 & 0xffff));
6773 }
6774 } else {
6775 if (s->dflag) {
6776 gen_helper_write_eflags(cpu_env, cpu_T[0],
6777 tcg_const_i32((TF_MASK | AC_MASK |
6778 ID_MASK | NT_MASK)));
6779 } else {
6780 gen_helper_write_eflags(cpu_env, cpu_T[0],
6781 tcg_const_i32((TF_MASK | AC_MASK |
6782 ID_MASK | NT_MASK)
6783 & 0xffff));
6784 }
6785 }
6786 }
6787 gen_pop_update(s);
6788 set_cc_op(s, CC_OP_EFLAGS);
6789 /* abort translation because TF/AC flag may change */
6790 gen_jmp_im(s->pc - s->cs_base);
6791 gen_eob(s);
6792 }
6793 break;
6794 case 0x9e: /* sahf */
6795 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
6796 goto illegal_op;
6797 gen_op_mov_TN_reg(MO_8, 0, R_AH);
6798 gen_compute_eflags(s);
6799 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O);
6800 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], CC_S | CC_Z | CC_A | CC_P | CC_C);
6801 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_T[0]);
6802 break;
6803 case 0x9f: /* lahf */
6804 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
6805 goto illegal_op;
6806 gen_compute_eflags(s);
6807 /* Note: gen_compute_eflags() only gives the condition codes */
6808 tcg_gen_ori_tl(cpu_T[0], cpu_cc_src, 0x02);
6809 gen_op_mov_reg_T0(MO_8, R_AH);
6810 break;
6811 case 0xf5: /* cmc */
6812 gen_compute_eflags(s);
6813 tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C);
6814 break;
6815 case 0xf8: /* clc */
6816 gen_compute_eflags(s);
6817 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C);
6818 break;
6819 case 0xf9: /* stc */
6820 gen_compute_eflags(s);
6821 tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C);
6822 break;
6823 case 0xfc: /* cld */
6824 tcg_gen_movi_i32(cpu_tmp2_i32, 1);
6825 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
6826 break;
6827 case 0xfd: /* std */
6828 tcg_gen_movi_i32(cpu_tmp2_i32, -1);
6829 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
6830 break;
6831
6832 /************************/
6833 /* bit operations */
6834 case 0x1ba: /* bt/bts/btr/btc Gv, im */
6835 ot = dflag + MO_16;
6836 modrm = cpu_ldub_code(env, s->pc++);
6837 op = (modrm >> 3) & 7;
6838 mod = (modrm >> 6) & 3;
6839 rm = (modrm & 7) | REX_B(s);
6840 if (mod != 3) {
6841 s->rip_offset = 1;
6842 gen_lea_modrm(env, s, modrm);
6843 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
6844 } else {
6845 gen_op_mov_TN_reg(ot, 0, rm);
6846 }
6847 /* load shift */
6848 val = cpu_ldub_code(env, s->pc++);
6849 tcg_gen_movi_tl(cpu_T[1], val);
6850 if (op < 4)
6851 goto illegal_op;
6852 op -= 4;
6853 goto bt_op;
6854 case 0x1a3: /* bt Gv, Ev */
6855 op = 0;
6856 goto do_btx;
6857 case 0x1ab: /* bts */
6858 op = 1;
6859 goto do_btx;
6860 case 0x1b3: /* btr */
6861 op = 2;
6862 goto do_btx;
6863 case 0x1bb: /* btc */
6864 op = 3;
6865 do_btx:
6866 ot = dflag + MO_16;
6867 modrm = cpu_ldub_code(env, s->pc++);
6868 reg = ((modrm >> 3) & 7) | rex_r;
6869 mod = (modrm >> 6) & 3;
6870 rm = (modrm & 7) | REX_B(s);
6871 gen_op_mov_TN_reg(MO_32, 1, reg);
6872 if (mod != 3) {
6873 gen_lea_modrm(env, s, modrm);
6874 /* specific case: we need to add a displacement */
6875 gen_exts(ot, cpu_T[1]);
6876 tcg_gen_sari_tl(cpu_tmp0, cpu_T[1], 3 + ot);
6877 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, ot);
6878 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
6879 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
6880 } else {
6881 gen_op_mov_TN_reg(ot, 0, rm);
6882 }
6883 bt_op:
6884 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], (1 << (3 + ot)) - 1);
6885 switch(op) {
6886 case 0:
6887 tcg_gen_shr_tl(cpu_cc_src, cpu_T[0], cpu_T[1]);
6888 tcg_gen_movi_tl(cpu_cc_dst, 0);
6889 break;
6890 case 1:
6891 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6892 tcg_gen_movi_tl(cpu_tmp0, 1);
6893 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6894 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6895 break;
6896 case 2:
6897 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6898 tcg_gen_movi_tl(cpu_tmp0, 1);
6899 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6900 tcg_gen_not_tl(cpu_tmp0, cpu_tmp0);
6901 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6902 break;
6903 default:
6904 case 3:
6905 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6906 tcg_gen_movi_tl(cpu_tmp0, 1);
6907 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6908 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6909 break;
6910 }
6911 set_cc_op(s, CC_OP_SARB + ot);
6912 if (op != 0) {
6913 if (mod != 3) {
6914 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
6915 } else {
6916 gen_op_mov_reg_T0(ot, rm);
6917 }
6918 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
6919 tcg_gen_movi_tl(cpu_cc_dst, 0);
6920 }
6921 break;
6922 case 0x1bc: /* bsf / tzcnt */
6923 case 0x1bd: /* bsr / lzcnt */
6924 ot = dflag + MO_16;
6925 modrm = cpu_ldub_code(env, s->pc++);
6926 reg = ((modrm >> 3) & 7) | rex_r;
6927 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
6928 gen_extu(ot, cpu_T[0]);
6929
6930 /* Note that lzcnt and tzcnt are in different extensions. */
6931 if ((prefixes & PREFIX_REPZ)
6932 && (b & 1
6933 ? s->cpuid_ext3_features & CPUID_EXT3_ABM
6934 : s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)) {
6935 int size = 8 << ot;
6936 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
6937 if (b & 1) {
6938 /* For lzcnt, reduce the target_ulong result by the
6939 number of zeros that we expect to find at the top. */
6940 gen_helper_clz(cpu_T[0], cpu_T[0]);
6941 tcg_gen_subi_tl(cpu_T[0], cpu_T[0], TARGET_LONG_BITS - size);
6942 } else {
6943 /* For tzcnt, a zero input must return the operand size:
6944 force all bits outside the operand size to 1. */
6945 target_ulong mask = (target_ulong)-2 << (size - 1);
6946 tcg_gen_ori_tl(cpu_T[0], cpu_T[0], mask);
6947 gen_helper_ctz(cpu_T[0], cpu_T[0]);
6948 }
6949 /* For lzcnt/tzcnt, C and Z bits are defined and are
6950 related to the result. */
6951 gen_op_update1_cc();
6952 set_cc_op(s, CC_OP_BMILGB + ot);
6953 } else {
6954 /* For bsr/bsf, only the Z bit is defined and it is related
6955 to the input and not the result. */
6956 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
6957 set_cc_op(s, CC_OP_LOGICB + ot);
6958 if (b & 1) {
6959 /* For bsr, return the bit index of the first 1 bit,
6960 not the count of leading zeros. */
6961 gen_helper_clz(cpu_T[0], cpu_T[0]);
6962 tcg_gen_xori_tl(cpu_T[0], cpu_T[0], TARGET_LONG_BITS - 1);
6963 } else {
6964 gen_helper_ctz(cpu_T[0], cpu_T[0]);
6965 }
6966 /* ??? The manual says that the output is undefined when the
6967 input is zero, but real hardware leaves it unchanged, and
6968 real programs appear to depend on that. */
6969 tcg_gen_movi_tl(cpu_tmp0, 0);
6970 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_T[0], cpu_cc_dst, cpu_tmp0,
6971 cpu_regs[reg], cpu_T[0]);
6972 }
6973 gen_op_mov_reg_T0(ot, reg);
6974 break;
6975 /************************/
6976 /* bcd */
6977 case 0x27: /* daa */
6978 if (CODE64(s))
6979 goto illegal_op;
6980 gen_update_cc_op(s);
6981 gen_helper_daa(cpu_env);
6982 set_cc_op(s, CC_OP_EFLAGS);
6983 break;
6984 case 0x2f: /* das */
6985 if (CODE64(s))
6986 goto illegal_op;
6987 gen_update_cc_op(s);
6988 gen_helper_das(cpu_env);
6989 set_cc_op(s, CC_OP_EFLAGS);
6990 break;
6991 case 0x37: /* aaa */
6992 if (CODE64(s))
6993 goto illegal_op;
6994 gen_update_cc_op(s);
6995 gen_helper_aaa(cpu_env);
6996 set_cc_op(s, CC_OP_EFLAGS);
6997 break;
6998 case 0x3f: /* aas */
6999 if (CODE64(s))
7000 goto illegal_op;
7001 gen_update_cc_op(s);
7002 gen_helper_aas(cpu_env);
7003 set_cc_op(s, CC_OP_EFLAGS);
7004 break;
7005 case 0xd4: /* aam */
7006 if (CODE64(s))
7007 goto illegal_op;
7008 val = cpu_ldub_code(env, s->pc++);
7009 if (val == 0) {
7010 gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base);
7011 } else {
7012 gen_helper_aam(cpu_env, tcg_const_i32(val));
7013 set_cc_op(s, CC_OP_LOGICB);
7014 }
7015 break;
7016 case 0xd5: /* aad */
7017 if (CODE64(s))
7018 goto illegal_op;
7019 val = cpu_ldub_code(env, s->pc++);
7020 gen_helper_aad(cpu_env, tcg_const_i32(val));
7021 set_cc_op(s, CC_OP_LOGICB);
7022 break;
7023 /************************/
7024 /* misc */
7025 case 0x90: /* nop */
7026 /* XXX: correct lock test for all insn */
7027 if (prefixes & PREFIX_LOCK) {
7028 goto illegal_op;
7029 }
7030 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
7031 if (REX_B(s)) {
7032 goto do_xchg_reg_eax;
7033 }
7034 if (prefixes & PREFIX_REPZ) {
7035 gen_update_cc_op(s);
7036 gen_jmp_im(pc_start - s->cs_base);
7037 gen_helper_pause(cpu_env, tcg_const_i32(s->pc - pc_start));
7038 s->is_jmp = DISAS_TB_JUMP;
7039 }
7040 break;
7041 case 0x9b: /* fwait */
7042 if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) ==
7043 (HF_MP_MASK | HF_TS_MASK)) {
7044 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7045 } else {
7046 gen_update_cc_op(s);
7047 gen_jmp_im(pc_start - s->cs_base);
7048 gen_helper_fwait(cpu_env);
7049 }
7050 break;
7051 case 0xcc: /* int3 */
7052 gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base);
7053 break;
7054 case 0xcd: /* int N */
7055 val = cpu_ldub_code(env, s->pc++);
7056 if (s->vm86 && s->iopl != 3) {
7057 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7058 } else {
7059 gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base);
7060 }
7061 break;
7062 case 0xce: /* into */
7063 if (CODE64(s))
7064 goto illegal_op;
7065 gen_update_cc_op(s);
7066 gen_jmp_im(pc_start - s->cs_base);
7067 gen_helper_into(cpu_env, tcg_const_i32(s->pc - pc_start));
7068 break;
7069 #ifdef WANT_ICEBP
7070 case 0xf1: /* icebp (undocumented, exits to external debugger) */
7071 gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP);
7072 #if 1
7073 gen_debug(s, pc_start - s->cs_base);
7074 #else
7075 /* start debug */
7076 tb_flush(env);
7077 qemu_set_log(CPU_LOG_INT | CPU_LOG_TB_IN_ASM);
7078 #endif
7079 break;
7080 #endif
7081 case 0xfa: /* cli */
7082 if (!s->vm86) {
7083 if (s->cpl <= s->iopl) {
7084 gen_helper_cli(cpu_env);
7085 } else {
7086 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7087 }
7088 } else {
7089 if (s->iopl == 3) {
7090 gen_helper_cli(cpu_env);
7091 } else {
7092 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7093 }
7094 }
7095 break;
7096 case 0xfb: /* sti */
7097 if (!s->vm86) {
7098 if (s->cpl <= s->iopl) {
7099 gen_sti:
7100 gen_helper_sti(cpu_env);
7101 /* interruptions are enabled only the first insn after sti */
7102 /* If several instructions disable interrupts, only the
7103 _first_ does it */
7104 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
7105 gen_helper_set_inhibit_irq(cpu_env);
7106 /* give a chance to handle pending irqs */
7107 gen_jmp_im(s->pc - s->cs_base);
7108 gen_eob(s);
7109 } else {
7110 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7111 }
7112 } else {
7113 if (s->iopl == 3) {
7114 goto gen_sti;
7115 } else {
7116 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7117 }
7118 }
7119 break;
7120 case 0x62: /* bound */
7121 if (CODE64(s))
7122 goto illegal_op;
7123 ot = dflag ? MO_32 : MO_16;
7124 modrm = cpu_ldub_code(env, s->pc++);
7125 reg = (modrm >> 3) & 7;
7126 mod = (modrm >> 6) & 3;
7127 if (mod == 3)
7128 goto illegal_op;
7129 gen_op_mov_TN_reg(ot, 0, reg);
7130 gen_lea_modrm(env, s, modrm);
7131 gen_jmp_im(pc_start - s->cs_base);
7132 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
7133 if (ot == MO_16) {
7134 gen_helper_boundw(cpu_env, cpu_A0, cpu_tmp2_i32);
7135 } else {
7136 gen_helper_boundl(cpu_env, cpu_A0, cpu_tmp2_i32);
7137 }
7138 break;
7139 case 0x1c8 ... 0x1cf: /* bswap reg */
7140 reg = (b & 7) | REX_B(s);
7141 #ifdef TARGET_X86_64
7142 if (dflag == 2) {
7143 gen_op_mov_TN_reg(MO_64, 0, reg);
7144 tcg_gen_bswap64_i64(cpu_T[0], cpu_T[0]);
7145 gen_op_mov_reg_T0(MO_64, reg);
7146 } else
7147 #endif
7148 {
7149 gen_op_mov_TN_reg(MO_32, 0, reg);
7150 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
7151 tcg_gen_bswap32_tl(cpu_T[0], cpu_T[0]);
7152 gen_op_mov_reg_T0(MO_32, reg);
7153 }
7154 break;
7155 case 0xd6: /* salc */
7156 if (CODE64(s))
7157 goto illegal_op;
7158 gen_compute_eflags_c(s, cpu_T[0]);
7159 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
7160 gen_op_mov_reg_T0(MO_8, R_EAX);
7161 break;
7162 case 0xe0: /* loopnz */
7163 case 0xe1: /* loopz */
7164 case 0xe2: /* loop */
7165 case 0xe3: /* jecxz */
7166 {
7167 int l1, l2, l3;
7168
7169 tval = (int8_t)insn_get(env, s, MO_8);
7170 next_eip = s->pc - s->cs_base;
7171 tval += next_eip;
7172 if (s->dflag == 0)
7173 tval &= 0xffff;
7174
7175 l1 = gen_new_label();
7176 l2 = gen_new_label();
7177 l3 = gen_new_label();
7178 b &= 3;
7179 switch(b) {
7180 case 0: /* loopnz */
7181 case 1: /* loopz */
7182 gen_op_add_reg_im(s->aflag, R_ECX, -1);
7183 gen_op_jz_ecx(s->aflag, l3);
7184 gen_jcc1(s, (JCC_Z << 1) | (b ^ 1), l1);
7185 break;
7186 case 2: /* loop */
7187 gen_op_add_reg_im(s->aflag, R_ECX, -1);
7188 gen_op_jnz_ecx(s->aflag, l1);
7189 break;
7190 default:
7191 case 3: /* jcxz */
7192 gen_op_jz_ecx(s->aflag, l1);
7193 break;
7194 }
7195
7196 gen_set_label(l3);
7197 gen_jmp_im(next_eip);
7198 tcg_gen_br(l2);
7199
7200 gen_set_label(l1);
7201 gen_jmp_im(tval);
7202 gen_set_label(l2);
7203 gen_eob(s);
7204 }
7205 break;
7206 case 0x130: /* wrmsr */
7207 case 0x132: /* rdmsr */
7208 if (s->cpl != 0) {
7209 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7210 } else {
7211 gen_update_cc_op(s);
7212 gen_jmp_im(pc_start - s->cs_base);
7213 if (b & 2) {
7214 gen_helper_rdmsr(cpu_env);
7215 } else {
7216 gen_helper_wrmsr(cpu_env);
7217 }
7218 }
7219 break;
7220 case 0x131: /* rdtsc */
7221 gen_update_cc_op(s);
7222 gen_jmp_im(pc_start - s->cs_base);
7223 if (use_icount)
7224 gen_io_start();
7225 gen_helper_rdtsc(cpu_env);
7226 if (use_icount) {
7227 gen_io_end();
7228 gen_jmp(s, s->pc - s->cs_base);
7229 }
7230 break;
7231 case 0x133: /* rdpmc */
7232 gen_update_cc_op(s);
7233 gen_jmp_im(pc_start - s->cs_base);
7234 gen_helper_rdpmc(cpu_env);
7235 break;
7236 case 0x134: /* sysenter */
7237 /* For Intel SYSENTER is valid on 64-bit */
7238 if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
7239 goto illegal_op;
7240 if (!s->pe) {
7241 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7242 } else {
7243 gen_update_cc_op(s);
7244 gen_jmp_im(pc_start - s->cs_base);
7245 gen_helper_sysenter(cpu_env);
7246 gen_eob(s);
7247 }
7248 break;
7249 case 0x135: /* sysexit */
7250 /* For Intel SYSEXIT is valid on 64-bit */
7251 if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
7252 goto illegal_op;
7253 if (!s->pe) {
7254 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7255 } else {
7256 gen_update_cc_op(s);
7257 gen_jmp_im(pc_start - s->cs_base);
7258 gen_helper_sysexit(cpu_env, tcg_const_i32(dflag));
7259 gen_eob(s);
7260 }
7261 break;
7262 #ifdef TARGET_X86_64
7263 case 0x105: /* syscall */
7264 /* XXX: is it usable in real mode ? */
7265 gen_update_cc_op(s);
7266 gen_jmp_im(pc_start - s->cs_base);
7267 gen_helper_syscall(cpu_env, tcg_const_i32(s->pc - pc_start));
7268 gen_eob(s);
7269 break;
7270 case 0x107: /* sysret */
7271 if (!s->pe) {
7272 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7273 } else {
7274 gen_update_cc_op(s);
7275 gen_jmp_im(pc_start - s->cs_base);
7276 gen_helper_sysret(cpu_env, tcg_const_i32(s->dflag));
7277 /* condition codes are modified only in long mode */
7278 if (s->lma) {
7279 set_cc_op(s, CC_OP_EFLAGS);
7280 }
7281 gen_eob(s);
7282 }
7283 break;
7284 #endif
7285 case 0x1a2: /* cpuid */
7286 gen_update_cc_op(s);
7287 gen_jmp_im(pc_start - s->cs_base);
7288 gen_helper_cpuid(cpu_env);
7289 break;
7290 case 0xf4: /* hlt */
7291 if (s->cpl != 0) {
7292 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7293 } else {
7294 gen_update_cc_op(s);
7295 gen_jmp_im(pc_start - s->cs_base);
7296 gen_helper_hlt(cpu_env, tcg_const_i32(s->pc - pc_start));
7297 s->is_jmp = DISAS_TB_JUMP;
7298 }
7299 break;
7300 case 0x100:
7301 modrm = cpu_ldub_code(env, s->pc++);
7302 mod = (modrm >> 6) & 3;
7303 op = (modrm >> 3) & 7;
7304 switch(op) {
7305 case 0: /* sldt */
7306 if (!s->pe || s->vm86)
7307 goto illegal_op;
7308 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ);
7309 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,ldt.selector));
7310 ot = MO_16;
7311 if (mod == 3)
7312 ot += s->dflag;
7313 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
7314 break;
7315 case 2: /* lldt */
7316 if (!s->pe || s->vm86)
7317 goto illegal_op;
7318 if (s->cpl != 0) {
7319 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7320 } else {
7321 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE);
7322 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
7323 gen_jmp_im(pc_start - s->cs_base);
7324 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
7325 gen_helper_lldt(cpu_env, cpu_tmp2_i32);
7326 }
7327 break;
7328 case 1: /* str */
7329 if (!s->pe || s->vm86)
7330 goto illegal_op;
7331 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ);
7332 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,tr.selector));
7333 ot = MO_16;
7334 if (mod == 3)
7335 ot += s->dflag;
7336 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
7337 break;
7338 case 3: /* ltr */
7339 if (!s->pe || s->vm86)
7340 goto illegal_op;
7341 if (s->cpl != 0) {
7342 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7343 } else {
7344 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE);
7345 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
7346 gen_jmp_im(pc_start - s->cs_base);
7347 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
7348 gen_helper_ltr(cpu_env, cpu_tmp2_i32);
7349 }
7350 break;
7351 case 4: /* verr */
7352 case 5: /* verw */
7353 if (!s->pe || s->vm86)
7354 goto illegal_op;
7355 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
7356 gen_update_cc_op(s);
7357 if (op == 4) {
7358 gen_helper_verr(cpu_env, cpu_T[0]);
7359 } else {
7360 gen_helper_verw(cpu_env, cpu_T[0]);
7361 }
7362 set_cc_op(s, CC_OP_EFLAGS);
7363 break;
7364 default:
7365 goto illegal_op;
7366 }
7367 break;
7368 case 0x101:
7369 modrm = cpu_ldub_code(env, s->pc++);
7370 mod = (modrm >> 6) & 3;
7371 op = (modrm >> 3) & 7;
7372 rm = modrm & 7;
7373 switch(op) {
7374 case 0: /* sgdt */
7375 if (mod == 3)
7376 goto illegal_op;
7377 gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ);
7378 gen_lea_modrm(env, s, modrm);
7379 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.limit));
7380 gen_op_st_v(s, MO_16, cpu_T[0], cpu_A0);
7381 gen_add_A0_im(s, 2);
7382 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.base));
7383 if (!s->dflag)
7384 gen_op_andl_T0_im(0xffffff);
7385 gen_op_st_v(s, CODE64(s) + MO_32, cpu_T[0], cpu_A0);
7386 break;
7387 case 1:
7388 if (mod == 3) {
7389 switch (rm) {
7390 case 0: /* monitor */
7391 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
7392 s->cpl != 0)
7393 goto illegal_op;
7394 gen_update_cc_op(s);
7395 gen_jmp_im(pc_start - s->cs_base);
7396 #ifdef TARGET_X86_64
7397 if (s->aflag == 2) {
7398 gen_op_movq_A0_reg(R_EAX);
7399 } else
7400 #endif
7401 {
7402 gen_op_movl_A0_reg(R_EAX);
7403 if (s->aflag == 0)
7404 gen_op_andl_A0_ffff();
7405 }
7406 gen_add_A0_ds_seg(s);
7407 gen_helper_monitor(cpu_env, cpu_A0);
7408 break;
7409 case 1: /* mwait */
7410 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
7411 s->cpl != 0)
7412 goto illegal_op;
7413 gen_update_cc_op(s);
7414 gen_jmp_im(pc_start - s->cs_base);
7415 gen_helper_mwait(cpu_env, tcg_const_i32(s->pc - pc_start));
7416 gen_eob(s);
7417 break;
7418 case 2: /* clac */
7419 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) ||
7420 s->cpl != 0) {
7421 goto illegal_op;
7422 }
7423 gen_helper_clac(cpu_env);
7424 gen_jmp_im(s->pc - s->cs_base);
7425 gen_eob(s);
7426 break;
7427 case 3: /* stac */
7428 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) ||
7429 s->cpl != 0) {
7430 goto illegal_op;
7431 }
7432 gen_helper_stac(cpu_env);
7433 gen_jmp_im(s->pc - s->cs_base);
7434 gen_eob(s);
7435 break;
7436 default:
7437 goto illegal_op;
7438 }
7439 } else { /* sidt */
7440 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ);
7441 gen_lea_modrm(env, s, modrm);
7442 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.limit));
7443 gen_op_st_v(s, MO_16, cpu_T[0], cpu_A0);
7444 gen_add_A0_im(s, 2);
7445 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.base));
7446 if (!s->dflag)
7447 gen_op_andl_T0_im(0xffffff);
7448 gen_op_st_v(s, CODE64(s) + MO_32, cpu_T[0], cpu_A0);
7449 }
7450 break;
7451 case 2: /* lgdt */
7452 case 3: /* lidt */
7453 if (mod == 3) {
7454 gen_update_cc_op(s);
7455 gen_jmp_im(pc_start - s->cs_base);
7456 switch(rm) {
7457 case 0: /* VMRUN */
7458 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7459 goto illegal_op;
7460 if (s->cpl != 0) {
7461 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7462 break;
7463 } else {
7464 gen_helper_vmrun(cpu_env, tcg_const_i32(s->aflag),
7465 tcg_const_i32(s->pc - pc_start));
7466 tcg_gen_exit_tb(0);
7467 s->is_jmp = DISAS_TB_JUMP;
7468 }
7469 break;
7470 case 1: /* VMMCALL */
7471 if (!(s->flags & HF_SVME_MASK))
7472 goto illegal_op;
7473 gen_helper_vmmcall(cpu_env);
7474 break;
7475 case 2: /* VMLOAD */
7476 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7477 goto illegal_op;
7478 if (s->cpl != 0) {
7479 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7480 break;
7481 } else {
7482 gen_helper_vmload(cpu_env, tcg_const_i32(s->aflag));
7483 }
7484 break;
7485 case 3: /* VMSAVE */
7486 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7487 goto illegal_op;
7488 if (s->cpl != 0) {
7489 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7490 break;
7491 } else {
7492 gen_helper_vmsave(cpu_env, tcg_const_i32(s->aflag));
7493 }
7494 break;
7495 case 4: /* STGI */
7496 if ((!(s->flags & HF_SVME_MASK) &&
7497 !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
7498 !s->pe)
7499 goto illegal_op;
7500 if (s->cpl != 0) {
7501 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7502 break;
7503 } else {
7504 gen_helper_stgi(cpu_env);
7505 }
7506 break;
7507 case 5: /* CLGI */
7508 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7509 goto illegal_op;
7510 if (s->cpl != 0) {
7511 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7512 break;
7513 } else {
7514 gen_helper_clgi(cpu_env);
7515 }
7516 break;
7517 case 6: /* SKINIT */
7518 if ((!(s->flags & HF_SVME_MASK) &&
7519 !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
7520 !s->pe)
7521 goto illegal_op;
7522 gen_helper_skinit(cpu_env);
7523 break;
7524 case 7: /* INVLPGA */
7525 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7526 goto illegal_op;
7527 if (s->cpl != 0) {
7528 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7529 break;
7530 } else {
7531 gen_helper_invlpga(cpu_env, tcg_const_i32(s->aflag));
7532 }
7533 break;
7534 default:
7535 goto illegal_op;
7536 }
7537 } else if (s->cpl != 0) {
7538 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7539 } else {
7540 gen_svm_check_intercept(s, pc_start,
7541 op==2 ? SVM_EXIT_GDTR_WRITE : SVM_EXIT_IDTR_WRITE);
7542 gen_lea_modrm(env, s, modrm);
7543 gen_op_ld_v(s, MO_16, cpu_T[1], cpu_A0);
7544 gen_add_A0_im(s, 2);
7545 gen_op_ld_v(s, CODE64(s) + MO_32, cpu_T[0], cpu_A0);
7546 if (!s->dflag)
7547 gen_op_andl_T0_im(0xffffff);
7548 if (op == 2) {
7549 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,gdt.base));
7550 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,gdt.limit));
7551 } else {
7552 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,idt.base));
7553 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,idt.limit));
7554 }
7555 }
7556 break;
7557 case 4: /* smsw */
7558 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0);
7559 #if defined TARGET_X86_64 && defined HOST_WORDS_BIGENDIAN
7560 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]) + 4);
7561 #else
7562 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]));
7563 #endif
7564 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 1);
7565 break;
7566 case 6: /* lmsw */
7567 if (s->cpl != 0) {
7568 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7569 } else {
7570 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
7571 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
7572 gen_helper_lmsw(cpu_env, cpu_T[0]);
7573 gen_jmp_im(s->pc - s->cs_base);
7574 gen_eob(s);
7575 }
7576 break;
7577 case 7:
7578 if (mod != 3) { /* invlpg */
7579 if (s->cpl != 0) {
7580 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7581 } else {
7582 gen_update_cc_op(s);
7583 gen_jmp_im(pc_start - s->cs_base);
7584 gen_lea_modrm(env, s, modrm);
7585 gen_helper_invlpg(cpu_env, cpu_A0);
7586 gen_jmp_im(s->pc - s->cs_base);
7587 gen_eob(s);
7588 }
7589 } else {
7590 switch (rm) {
7591 case 0: /* swapgs */
7592 #ifdef TARGET_X86_64
7593 if (CODE64(s)) {
7594 if (s->cpl != 0) {
7595 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7596 } else {
7597 tcg_gen_ld_tl(cpu_T[0], cpu_env,
7598 offsetof(CPUX86State,segs[R_GS].base));
7599 tcg_gen_ld_tl(cpu_T[1], cpu_env,
7600 offsetof(CPUX86State,kernelgsbase));
7601 tcg_gen_st_tl(cpu_T[1], cpu_env,
7602 offsetof(CPUX86State,segs[R_GS].base));
7603 tcg_gen_st_tl(cpu_T[0], cpu_env,
7604 offsetof(CPUX86State,kernelgsbase));
7605 }
7606 } else
7607 #endif
7608 {
7609 goto illegal_op;
7610 }
7611 break;
7612 case 1: /* rdtscp */
7613 if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP))
7614 goto illegal_op;
7615 gen_update_cc_op(s);
7616 gen_jmp_im(pc_start - s->cs_base);
7617 if (use_icount)
7618 gen_io_start();
7619 gen_helper_rdtscp(cpu_env);
7620 if (use_icount) {
7621 gen_io_end();
7622 gen_jmp(s, s->pc - s->cs_base);
7623 }
7624 break;
7625 default:
7626 goto illegal_op;
7627 }
7628 }
7629 break;
7630 default:
7631 goto illegal_op;
7632 }
7633 break;
7634 case 0x108: /* invd */
7635 case 0x109: /* wbinvd */
7636 if (s->cpl != 0) {
7637 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7638 } else {
7639 gen_svm_check_intercept(s, pc_start, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD);
7640 /* nothing to do */
7641 }
7642 break;
7643 case 0x63: /* arpl or movslS (x86_64) */
7644 #ifdef TARGET_X86_64
7645 if (CODE64(s)) {
7646 int d_ot;
7647 /* d_ot is the size of destination */
7648 d_ot = dflag + MO_16;
7649
7650 modrm = cpu_ldub_code(env, s->pc++);
7651 reg = ((modrm >> 3) & 7) | rex_r;
7652 mod = (modrm >> 6) & 3;
7653 rm = (modrm & 7) | REX_B(s);
7654
7655 if (mod == 3) {
7656 gen_op_mov_TN_reg(MO_32, 0, rm);
7657 /* sign extend */
7658 if (d_ot == MO_64) {
7659 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
7660 }
7661 gen_op_mov_reg_T0(d_ot, reg);
7662 } else {
7663 gen_lea_modrm(env, s, modrm);
7664 gen_op_ld_v(s, MO_32 | MO_SIGN, cpu_T[0], cpu_A0);
7665 gen_op_mov_reg_T0(d_ot, reg);
7666 }
7667 } else
7668 #endif
7669 {
7670 int label1;
7671 TCGv t0, t1, t2, a0;
7672
7673 if (!s->pe || s->vm86)
7674 goto illegal_op;
7675 t0 = tcg_temp_local_new();
7676 t1 = tcg_temp_local_new();
7677 t2 = tcg_temp_local_new();
7678 ot = MO_16;
7679 modrm = cpu_ldub_code(env, s->pc++);
7680 reg = (modrm >> 3) & 7;
7681 mod = (modrm >> 6) & 3;
7682 rm = modrm & 7;
7683 if (mod != 3) {
7684 gen_lea_modrm(env, s, modrm);
7685 gen_op_ld_v(s, ot, t0, cpu_A0);
7686 a0 = tcg_temp_local_new();
7687 tcg_gen_mov_tl(a0, cpu_A0);
7688 } else {
7689 gen_op_mov_v_reg(ot, t0, rm);
7690 TCGV_UNUSED(a0);
7691 }
7692 gen_op_mov_v_reg(ot, t1, reg);
7693 tcg_gen_andi_tl(cpu_tmp0, t0, 3);
7694 tcg_gen_andi_tl(t1, t1, 3);
7695 tcg_gen_movi_tl(t2, 0);
7696 label1 = gen_new_label();
7697 tcg_gen_brcond_tl(TCG_COND_GE, cpu_tmp0, t1, label1);
7698 tcg_gen_andi_tl(t0, t0, ~3);
7699 tcg_gen_or_tl(t0, t0, t1);
7700 tcg_gen_movi_tl(t2, CC_Z);
7701 gen_set_label(label1);
7702 if (mod != 3) {
7703 gen_op_st_v(s, ot, t0, a0);
7704 tcg_temp_free(a0);
7705 } else {
7706 gen_op_mov_reg_v(ot, rm, t0);
7707 }
7708 gen_compute_eflags(s);
7709 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z);
7710 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2);
7711 tcg_temp_free(t0);
7712 tcg_temp_free(t1);
7713 tcg_temp_free(t2);
7714 }
7715 break;
7716 case 0x102: /* lar */
7717 case 0x103: /* lsl */
7718 {
7719 int label1;
7720 TCGv t0;
7721 if (!s->pe || s->vm86)
7722 goto illegal_op;
7723 ot = dflag ? MO_32 : MO_16;
7724 modrm = cpu_ldub_code(env, s->pc++);
7725 reg = ((modrm >> 3) & 7) | rex_r;
7726 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
7727 t0 = tcg_temp_local_new();
7728 gen_update_cc_op(s);
7729 if (b == 0x102) {
7730 gen_helper_lar(t0, cpu_env, cpu_T[0]);
7731 } else {
7732 gen_helper_lsl(t0, cpu_env, cpu_T[0]);
7733 }
7734 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_src, CC_Z);
7735 label1 = gen_new_label();
7736 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
7737 gen_op_mov_reg_v(ot, reg, t0);
7738 gen_set_label(label1);
7739 set_cc_op(s, CC_OP_EFLAGS);
7740 tcg_temp_free(t0);
7741 }
7742 break;
7743 case 0x118:
7744 modrm = cpu_ldub_code(env, s->pc++);
7745 mod = (modrm >> 6) & 3;
7746 op = (modrm >> 3) & 7;
7747 switch(op) {
7748 case 0: /* prefetchnta */
7749 case 1: /* prefetchnt0 */
7750 case 2: /* prefetchnt0 */
7751 case 3: /* prefetchnt0 */
7752 if (mod == 3)
7753 goto illegal_op;
7754 gen_lea_modrm(env, s, modrm);
7755 /* nothing more to do */
7756 break;
7757 default: /* nop (multi byte) */
7758 gen_nop_modrm(env, s, modrm);
7759 break;
7760 }
7761 break;
7762 case 0x119 ... 0x11f: /* nop (multi byte) */
7763 modrm = cpu_ldub_code(env, s->pc++);
7764 gen_nop_modrm(env, s, modrm);
7765 break;
7766 case 0x120: /* mov reg, crN */
7767 case 0x122: /* mov crN, reg */
7768 if (s->cpl != 0) {
7769 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7770 } else {
7771 modrm = cpu_ldub_code(env, s->pc++);
7772 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7773 * AMD documentation (24594.pdf) and testing of
7774 * intel 386 and 486 processors all show that the mod bits
7775 * are assumed to be 1's, regardless of actual values.
7776 */
7777 rm = (modrm & 7) | REX_B(s);
7778 reg = ((modrm >> 3) & 7) | rex_r;
7779 if (CODE64(s))
7780 ot = MO_64;
7781 else
7782 ot = MO_32;
7783 if ((prefixes & PREFIX_LOCK) && (reg == 0) &&
7784 (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) {
7785 reg = 8;
7786 }
7787 switch(reg) {
7788 case 0:
7789 case 2:
7790 case 3:
7791 case 4:
7792 case 8:
7793 gen_update_cc_op(s);
7794 gen_jmp_im(pc_start - s->cs_base);
7795 if (b & 2) {
7796 gen_op_mov_TN_reg(ot, 0, rm);
7797 gen_helper_write_crN(cpu_env, tcg_const_i32(reg),
7798 cpu_T[0]);
7799 gen_jmp_im(s->pc - s->cs_base);
7800 gen_eob(s);
7801 } else {
7802 gen_helper_read_crN(cpu_T[0], cpu_env, tcg_const_i32(reg));
7803 gen_op_mov_reg_T0(ot, rm);
7804 }
7805 break;
7806 default:
7807 goto illegal_op;
7808 }
7809 }
7810 break;
7811 case 0x121: /* mov reg, drN */
7812 case 0x123: /* mov drN, reg */
7813 if (s->cpl != 0) {
7814 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7815 } else {
7816 modrm = cpu_ldub_code(env, s->pc++);
7817 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7818 * AMD documentation (24594.pdf) and testing of
7819 * intel 386 and 486 processors all show that the mod bits
7820 * are assumed to be 1's, regardless of actual values.
7821 */
7822 rm = (modrm & 7) | REX_B(s);
7823 reg = ((modrm >> 3) & 7) | rex_r;
7824 if (CODE64(s))
7825 ot = MO_64;
7826 else
7827 ot = MO_32;
7828 /* XXX: do it dynamically with CR4.DE bit */
7829 if (reg == 4 || reg == 5 || reg >= 8)
7830 goto illegal_op;
7831 if (b & 2) {
7832 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg);
7833 gen_op_mov_TN_reg(ot, 0, rm);
7834 gen_helper_movl_drN_T0(cpu_env, tcg_const_i32(reg), cpu_T[0]);
7835 gen_jmp_im(s->pc - s->cs_base);
7836 gen_eob(s);
7837 } else {
7838 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg);
7839 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,dr[reg]));
7840 gen_op_mov_reg_T0(ot, rm);
7841 }
7842 }
7843 break;
7844 case 0x106: /* clts */
7845 if (s->cpl != 0) {
7846 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7847 } else {
7848 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
7849 gen_helper_clts(cpu_env);
7850 /* abort block because static cpu state changed */
7851 gen_jmp_im(s->pc - s->cs_base);
7852 gen_eob(s);
7853 }
7854 break;
7855 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
7856 case 0x1c3: /* MOVNTI reg, mem */
7857 if (!(s->cpuid_features & CPUID_SSE2))
7858 goto illegal_op;
7859 ot = s->dflag == 2 ? MO_64 : MO_32;
7860 modrm = cpu_ldub_code(env, s->pc++);
7861 mod = (modrm >> 6) & 3;
7862 if (mod == 3)
7863 goto illegal_op;
7864 reg = ((modrm >> 3) & 7) | rex_r;
7865 /* generate a generic store */
7866 gen_ldst_modrm(env, s, modrm, ot, reg, 1);
7867 break;
7868 case 0x1ae:
7869 modrm = cpu_ldub_code(env, s->pc++);
7870 mod = (modrm >> 6) & 3;
7871 op = (modrm >> 3) & 7;
7872 switch(op) {
7873 case 0: /* fxsave */
7874 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
7875 (s->prefix & PREFIX_LOCK))
7876 goto illegal_op;
7877 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
7878 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7879 break;
7880 }
7881 gen_lea_modrm(env, s, modrm);
7882 gen_update_cc_op(s);
7883 gen_jmp_im(pc_start - s->cs_base);
7884 gen_helper_fxsave(cpu_env, cpu_A0, tcg_const_i32((s->dflag == 2)));
7885 break;
7886 case 1: /* fxrstor */
7887 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
7888 (s->prefix & PREFIX_LOCK))
7889 goto illegal_op;
7890 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
7891 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7892 break;
7893 }
7894 gen_lea_modrm(env, s, modrm);
7895 gen_update_cc_op(s);
7896 gen_jmp_im(pc_start - s->cs_base);
7897 gen_helper_fxrstor(cpu_env, cpu_A0,
7898 tcg_const_i32((s->dflag == 2)));
7899 break;
7900 case 2: /* ldmxcsr */
7901 case 3: /* stmxcsr */
7902 if (s->flags & HF_TS_MASK) {
7903 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7904 break;
7905 }
7906 if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK) ||
7907 mod == 3)
7908 goto illegal_op;
7909 gen_lea_modrm(env, s, modrm);
7910 if (op == 2) {
7911 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
7912 s->mem_index, MO_LEUL);
7913 gen_helper_ldmxcsr(cpu_env, cpu_tmp2_i32);
7914 } else {
7915 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, mxcsr));
7916 gen_op_st_v(s, MO_32, cpu_T[0], cpu_A0);
7917 }
7918 break;
7919 case 5: /* lfence */
7920 case 6: /* mfence */
7921 if ((modrm & 0xc7) != 0xc0 || !(s->cpuid_features & CPUID_SSE2))
7922 goto illegal_op;
7923 break;
7924 case 7: /* sfence / clflush */
7925 if ((modrm & 0xc7) == 0xc0) {
7926 /* sfence */
7927 /* XXX: also check for cpuid_ext2_features & CPUID_EXT2_EMMX */
7928 if (!(s->cpuid_features & CPUID_SSE))
7929 goto illegal_op;
7930 } else {
7931 /* clflush */
7932 if (!(s->cpuid_features & CPUID_CLFLUSH))
7933 goto illegal_op;
7934 gen_lea_modrm(env, s, modrm);
7935 }
7936 break;
7937 default:
7938 goto illegal_op;
7939 }
7940 break;
7941 case 0x10d: /* 3DNow! prefetch(w) */
7942 modrm = cpu_ldub_code(env, s->pc++);
7943 mod = (modrm >> 6) & 3;
7944 if (mod == 3)
7945 goto illegal_op;
7946 gen_lea_modrm(env, s, modrm);
7947 /* ignore for now */
7948 break;
7949 case 0x1aa: /* rsm */
7950 gen_svm_check_intercept(s, pc_start, SVM_EXIT_RSM);
7951 if (!(s->flags & HF_SMM_MASK))
7952 goto illegal_op;
7953 gen_update_cc_op(s);
7954 gen_jmp_im(s->pc - s->cs_base);
7955 gen_helper_rsm(cpu_env);
7956 gen_eob(s);
7957 break;
7958 case 0x1b8: /* SSE4.2 popcnt */
7959 if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) !=
7960 PREFIX_REPZ)
7961 goto illegal_op;
7962 if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
7963 goto illegal_op;
7964
7965 modrm = cpu_ldub_code(env, s->pc++);
7966 reg = ((modrm >> 3) & 7) | rex_r;
7967
7968 if (s->prefix & PREFIX_DATA)
7969 ot = MO_16;
7970 else if (s->dflag != 2)
7971 ot = MO_32;
7972 else
7973 ot = MO_64;
7974
7975 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
7976 gen_helper_popcnt(cpu_T[0], cpu_env, cpu_T[0], tcg_const_i32(ot));
7977 gen_op_mov_reg_T0(ot, reg);
7978
7979 set_cc_op(s, CC_OP_EFLAGS);
7980 break;
7981 case 0x10e ... 0x10f:
7982 /* 3DNow! instructions, ignore prefixes */
7983 s->prefix &= ~(PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA);
7984 case 0x110 ... 0x117:
7985 case 0x128 ... 0x12f:
7986 case 0x138 ... 0x13a:
7987 case 0x150 ... 0x179:
7988 case 0x17c ... 0x17f:
7989 case 0x1c2:
7990 case 0x1c4 ... 0x1c6:
7991 case 0x1d0 ... 0x1fe:
7992 gen_sse(env, s, b, pc_start, rex_r);
7993 break;
7994 default:
7995 goto illegal_op;
7996 }
7997 /* lock generation */
7998 if (s->prefix & PREFIX_LOCK)
7999 gen_helper_unlock();
8000 return s->pc;
8001 illegal_op:
8002 if (s->prefix & PREFIX_LOCK)
8003 gen_helper_unlock();
8004 /* XXX: ensure that no lock was generated */
8005 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
8006 return s->pc;
8007 }
8008
8009 void optimize_flags_init(void)
8010 {
8011 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
8012 cpu_cc_op = tcg_global_mem_new_i32(TCG_AREG0,
8013 offsetof(CPUX86State, cc_op), "cc_op");
8014 cpu_cc_dst = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_dst),
8015 "cc_dst");
8016 cpu_cc_src = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_src),
8017 "cc_src");
8018 cpu_cc_src2 = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_src2),
8019 "cc_src2");
8020
8021 #ifdef TARGET_X86_64
8022 cpu_regs[R_EAX] = tcg_global_mem_new_i64(TCG_AREG0,
8023 offsetof(CPUX86State, regs[R_EAX]), "rax");
8024 cpu_regs[R_ECX] = tcg_global_mem_new_i64(TCG_AREG0,
8025 offsetof(CPUX86State, regs[R_ECX]), "rcx");
8026 cpu_regs[R_EDX] = tcg_global_mem_new_i64(TCG_AREG0,
8027 offsetof(CPUX86State, regs[R_EDX]), "rdx");
8028 cpu_regs[R_EBX] = tcg_global_mem_new_i64(TCG_AREG0,
8029 offsetof(CPUX86State, regs[R_EBX]), "rbx");
8030 cpu_regs[R_ESP] = tcg_global_mem_new_i64(TCG_AREG0,
8031 offsetof(CPUX86State, regs[R_ESP]), "rsp");
8032 cpu_regs[R_EBP] = tcg_global_mem_new_i64(TCG_AREG0,
8033 offsetof(CPUX86State, regs[R_EBP]), "rbp");
8034 cpu_regs[R_ESI] = tcg_global_mem_new_i64(TCG_AREG0,
8035 offsetof(CPUX86State, regs[R_ESI]), "rsi");
8036 cpu_regs[R_EDI] = tcg_global_mem_new_i64(TCG_AREG0,
8037 offsetof(CPUX86State, regs[R_EDI]), "rdi");
8038 cpu_regs[8] = tcg_global_mem_new_i64(TCG_AREG0,
8039 offsetof(CPUX86State, regs[8]), "r8");
8040 cpu_regs[9] = tcg_global_mem_new_i64(TCG_AREG0,
8041 offsetof(CPUX86State, regs[9]), "r9");
8042 cpu_regs[10] = tcg_global_mem_new_i64(TCG_AREG0,
8043 offsetof(CPUX86State, regs[10]), "r10");
8044 cpu_regs[11] = tcg_global_mem_new_i64(TCG_AREG0,
8045 offsetof(CPUX86State, regs[11]), "r11");
8046 cpu_regs[12] = tcg_global_mem_new_i64(TCG_AREG0,
8047 offsetof(CPUX86State, regs[12]), "r12");
8048 cpu_regs[13] = tcg_global_mem_new_i64(TCG_AREG0,
8049 offsetof(CPUX86State, regs[13]), "r13");
8050 cpu_regs[14] = tcg_global_mem_new_i64(TCG_AREG0,
8051 offsetof(CPUX86State, regs[14]), "r14");
8052 cpu_regs[15] = tcg_global_mem_new_i64(TCG_AREG0,
8053 offsetof(CPUX86State, regs[15]), "r15");
8054 #else
8055 cpu_regs[R_EAX] = tcg_global_mem_new_i32(TCG_AREG0,
8056 offsetof(CPUX86State, regs[R_EAX]), "eax");
8057 cpu_regs[R_ECX] = tcg_global_mem_new_i32(TCG_AREG0,
8058 offsetof(CPUX86State, regs[R_ECX]), "ecx");
8059 cpu_regs[R_EDX] = tcg_global_mem_new_i32(TCG_AREG0,
8060 offsetof(CPUX86State, regs[R_EDX]), "edx");
8061 cpu_regs[R_EBX] = tcg_global_mem_new_i32(TCG_AREG0,
8062 offsetof(CPUX86State, regs[R_EBX]), "ebx");
8063 cpu_regs[R_ESP] = tcg_global_mem_new_i32(TCG_AREG0,
8064 offsetof(CPUX86State, regs[R_ESP]), "esp");
8065 cpu_regs[R_EBP] = tcg_global_mem_new_i32(TCG_AREG0,
8066 offsetof(CPUX86State, regs[R_EBP]), "ebp");
8067 cpu_regs[R_ESI] = tcg_global_mem_new_i32(TCG_AREG0,
8068 offsetof(CPUX86State, regs[R_ESI]), "esi");
8069 cpu_regs[R_EDI] = tcg_global_mem_new_i32(TCG_AREG0,
8070 offsetof(CPUX86State, regs[R_EDI]), "edi");
8071 #endif
8072 }
8073
8074 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
8075 basic block 'tb'. If search_pc is TRUE, also generate PC
8076 information for each intermediate instruction. */
8077 static inline void gen_intermediate_code_internal(X86CPU *cpu,
8078 TranslationBlock *tb,
8079 bool search_pc)
8080 {
8081 CPUState *cs = CPU(cpu);
8082 CPUX86State *env = &cpu->env;
8083 DisasContext dc1, *dc = &dc1;
8084 target_ulong pc_ptr;
8085 uint16_t *gen_opc_end;
8086 CPUBreakpoint *bp;
8087 int j, lj;
8088 uint64_t flags;
8089 target_ulong pc_start;
8090 target_ulong cs_base;
8091 int num_insns;
8092 int max_insns;
8093
8094 /* generate intermediate code */
8095 pc_start = tb->pc;
8096 cs_base = tb->cs_base;
8097 flags = tb->flags;
8098
8099 dc->pe = (flags >> HF_PE_SHIFT) & 1;
8100 dc->code32 = (flags >> HF_CS32_SHIFT) & 1;
8101 dc->ss32 = (flags >> HF_SS32_SHIFT) & 1;
8102 dc->addseg = (flags >> HF_ADDSEG_SHIFT) & 1;
8103 dc->f_st = 0;
8104 dc->vm86 = (flags >> VM_SHIFT) & 1;
8105 dc->cpl = (flags >> HF_CPL_SHIFT) & 3;
8106 dc->iopl = (flags >> IOPL_SHIFT) & 3;
8107 dc->tf = (flags >> TF_SHIFT) & 1;
8108 dc->singlestep_enabled = cs->singlestep_enabled;
8109 dc->cc_op = CC_OP_DYNAMIC;
8110 dc->cc_op_dirty = false;
8111 dc->cs_base = cs_base;
8112 dc->tb = tb;
8113 dc->popl_esp_hack = 0;
8114 /* select memory access functions */
8115 dc->mem_index = 0;
8116 if (flags & HF_SOFTMMU_MASK) {
8117 dc->mem_index = cpu_mmu_index(env);
8118 }
8119 dc->cpuid_features = env->features[FEAT_1_EDX];
8120 dc->cpuid_ext_features = env->features[FEAT_1_ECX];
8121 dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX];
8122 dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX];
8123 dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX];
8124 #ifdef TARGET_X86_64
8125 dc->lma = (flags >> HF_LMA_SHIFT) & 1;
8126 dc->code64 = (flags >> HF_CS64_SHIFT) & 1;
8127 #endif
8128 dc->flags = flags;
8129 dc->jmp_opt = !(dc->tf || cs->singlestep_enabled ||
8130 (flags & HF_INHIBIT_IRQ_MASK)
8131 #ifndef CONFIG_SOFTMMU
8132 || (flags & HF_SOFTMMU_MASK)
8133 #endif
8134 );
8135 #if 0
8136 /* check addseg logic */
8137 if (!dc->addseg && (dc->vm86 || !dc->pe || !dc->code32))
8138 printf("ERROR addseg\n");
8139 #endif
8140
8141 cpu_T[0] = tcg_temp_new();
8142 cpu_T[1] = tcg_temp_new();
8143 cpu_A0 = tcg_temp_new();
8144
8145 cpu_tmp0 = tcg_temp_new();
8146 cpu_tmp1_i64 = tcg_temp_new_i64();
8147 cpu_tmp2_i32 = tcg_temp_new_i32();
8148 cpu_tmp3_i32 = tcg_temp_new_i32();
8149 cpu_tmp4 = tcg_temp_new();
8150 cpu_ptr0 = tcg_temp_new_ptr();
8151 cpu_ptr1 = tcg_temp_new_ptr();
8152 cpu_cc_srcT = tcg_temp_local_new();
8153
8154 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
8155
8156 dc->is_jmp = DISAS_NEXT;
8157 pc_ptr = pc_start;
8158 lj = -1;
8159 num_insns = 0;
8160 max_insns = tb->cflags & CF_COUNT_MASK;
8161 if (max_insns == 0)
8162 max_insns = CF_COUNT_MASK;
8163
8164 gen_tb_start();
8165 for(;;) {
8166 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
8167 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
8168 if (bp->pc == pc_ptr &&
8169 !((bp->flags & BP_CPU) && (tb->flags & HF_RF_MASK))) {
8170 gen_debug(dc, pc_ptr - dc->cs_base);
8171 break;
8172 }
8173 }
8174 }
8175 if (search_pc) {
8176 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
8177 if (lj < j) {
8178 lj++;
8179 while (lj < j)
8180 tcg_ctx.gen_opc_instr_start[lj++] = 0;
8181 }
8182 tcg_ctx.gen_opc_pc[lj] = pc_ptr;
8183 gen_opc_cc_op[lj] = dc->cc_op;
8184 tcg_ctx.gen_opc_instr_start[lj] = 1;
8185 tcg_ctx.gen_opc_icount[lj] = num_insns;
8186 }
8187 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
8188 gen_io_start();
8189
8190 pc_ptr = disas_insn(env, dc, pc_ptr);
8191 num_insns++;
8192 /* stop translation if indicated */
8193 if (dc->is_jmp)
8194 break;
8195 /* if single step mode, we generate only one instruction and
8196 generate an exception */
8197 /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
8198 the flag and abort the translation to give the irqs a
8199 change to be happen */
8200 if (dc->tf || dc->singlestep_enabled ||
8201 (flags & HF_INHIBIT_IRQ_MASK)) {
8202 gen_jmp_im(pc_ptr - dc->cs_base);
8203 gen_eob(dc);
8204 break;
8205 }
8206 /* if too long translation, stop generation too */
8207 if (tcg_ctx.gen_opc_ptr >= gen_opc_end ||
8208 (pc_ptr - pc_start) >= (TARGET_PAGE_SIZE - 32) ||
8209 num_insns >= max_insns) {
8210 gen_jmp_im(pc_ptr - dc->cs_base);
8211 gen_eob(dc);
8212 break;
8213 }
8214 if (singlestep) {
8215 gen_jmp_im(pc_ptr - dc->cs_base);
8216 gen_eob(dc);
8217 break;
8218 }
8219 }
8220 if (tb->cflags & CF_LAST_IO)
8221 gen_io_end();
8222 gen_tb_end(tb, num_insns);
8223 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
8224 /* we don't forget to fill the last values */
8225 if (search_pc) {
8226 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
8227 lj++;
8228 while (lj <= j)
8229 tcg_ctx.gen_opc_instr_start[lj++] = 0;
8230 }
8231
8232 #ifdef DEBUG_DISAS
8233 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
8234 int disas_flags;
8235 qemu_log("----------------\n");
8236 qemu_log("IN: %s\n", lookup_symbol(pc_start));
8237 #ifdef TARGET_X86_64
8238 if (dc->code64)
8239 disas_flags = 2;
8240 else
8241 #endif
8242 disas_flags = !dc->code32;
8243 log_target_disas(env, pc_start, pc_ptr - pc_start, disas_flags);
8244 qemu_log("\n");
8245 }
8246 #endif
8247
8248 if (!search_pc) {
8249 tb->size = pc_ptr - pc_start;
8250 tb->icount = num_insns;
8251 }
8252 }
8253
8254 void gen_intermediate_code(CPUX86State *env, TranslationBlock *tb)
8255 {
8256 gen_intermediate_code_internal(x86_env_get_cpu(env), tb, false);
8257 }
8258
8259 void gen_intermediate_code_pc(CPUX86State *env, TranslationBlock *tb)
8260 {
8261 gen_intermediate_code_internal(x86_env_get_cpu(env), tb, true);
8262 }
8263
8264 void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb, int pc_pos)
8265 {
8266 int cc_op;
8267 #ifdef DEBUG_DISAS
8268 if (qemu_loglevel_mask(CPU_LOG_TB_OP)) {
8269 int i;
8270 qemu_log("RESTORE:\n");
8271 for(i = 0;i <= pc_pos; i++) {
8272 if (tcg_ctx.gen_opc_instr_start[i]) {
8273 qemu_log("0x%04x: " TARGET_FMT_lx "\n", i,
8274 tcg_ctx.gen_opc_pc[i]);
8275 }
8276 }
8277 qemu_log("pc_pos=0x%x eip=" TARGET_FMT_lx " cs_base=%x\n",
8278 pc_pos, tcg_ctx.gen_opc_pc[pc_pos] - tb->cs_base,
8279 (uint32_t)tb->cs_base);
8280 }
8281 #endif
8282 env->eip = tcg_ctx.gen_opc_pc[pc_pos] - tb->cs_base;
8283 cc_op = gen_opc_cc_op[pc_pos];
8284 if (cc_op != CC_OP_DYNAMIC)
8285 env->cc_op = cc_op;
8286 }