]> git.proxmox.com Git - qemu.git/blob - target-i386/translate.c
target-i386: kill cpu_T3
[qemu.git] / target-i386 / translate.c
1 /*
2 * i386 translation
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include <stdarg.h>
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <string.h>
23 #include <inttypes.h>
24 #include <signal.h>
25
26 #include "qemu/host-utils.h"
27 #include "cpu.h"
28 #include "disas/disas.h"
29 #include "tcg-op.h"
30
31 #include "helper.h"
32 #define GEN_HELPER 1
33 #include "helper.h"
34
35 #define PREFIX_REPZ 0x01
36 #define PREFIX_REPNZ 0x02
37 #define PREFIX_LOCK 0x04
38 #define PREFIX_DATA 0x08
39 #define PREFIX_ADR 0x10
40
41 #ifdef TARGET_X86_64
42 #define CODE64(s) ((s)->code64)
43 #define REX_X(s) ((s)->rex_x)
44 #define REX_B(s) ((s)->rex_b)
45 #else
46 #define CODE64(s) 0
47 #define REX_X(s) 0
48 #define REX_B(s) 0
49 #endif
50
51 #ifdef TARGET_X86_64
52 # define ctztl ctz64
53 # define clztl clz64
54 #else
55 # define ctztl ctz32
56 # define clztl clz32
57 #endif
58
59 //#define MACRO_TEST 1
60
61 /* global register indexes */
62 static TCGv_ptr cpu_env;
63 static TCGv cpu_A0, cpu_cc_src, cpu_cc_dst;
64 static TCGv_i32 cpu_cc_op;
65 static TCGv cpu_regs[CPU_NB_REGS];
66 /* local temps */
67 static TCGv cpu_T[2];
68 /* local register indexes (only used inside old micro ops) */
69 static TCGv cpu_tmp0, cpu_tmp4;
70 static TCGv_ptr cpu_ptr0, cpu_ptr1;
71 static TCGv_i32 cpu_tmp2_i32, cpu_tmp3_i32;
72 static TCGv_i64 cpu_tmp1_i64;
73 static TCGv cpu_tmp5;
74
75 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
76
77 #include "exec/gen-icount.h"
78
79 #ifdef TARGET_X86_64
80 static int x86_64_hregs;
81 #endif
82
83 typedef struct DisasContext {
84 /* current insn context */
85 int override; /* -1 if no override */
86 int prefix;
87 int aflag, dflag;
88 target_ulong pc; /* pc = eip + cs_base */
89 int is_jmp; /* 1 = means jump (stop translation), 2 means CPU
90 static state change (stop translation) */
91 /* current block context */
92 target_ulong cs_base; /* base of CS segment */
93 int pe; /* protected mode */
94 int code32; /* 32 bit code segment */
95 #ifdef TARGET_X86_64
96 int lma; /* long mode active */
97 int code64; /* 64 bit code segment */
98 int rex_x, rex_b;
99 #endif
100 int ss32; /* 32 bit stack segment */
101 CCOp cc_op; /* current CC operation */
102 bool cc_op_dirty;
103 int addseg; /* non zero if either DS/ES/SS have a non zero base */
104 int f_st; /* currently unused */
105 int vm86; /* vm86 mode */
106 int cpl;
107 int iopl;
108 int tf; /* TF cpu flag */
109 int singlestep_enabled; /* "hardware" single step enabled */
110 int jmp_opt; /* use direct block chaining for direct jumps */
111 int mem_index; /* select memory access functions */
112 uint64_t flags; /* all execution flags */
113 struct TranslationBlock *tb;
114 int popl_esp_hack; /* for correct popl with esp base handling */
115 int rip_offset; /* only used in x86_64, but left for simplicity */
116 int cpuid_features;
117 int cpuid_ext_features;
118 int cpuid_ext2_features;
119 int cpuid_ext3_features;
120 int cpuid_7_0_ebx_features;
121 } DisasContext;
122
123 static void gen_eob(DisasContext *s);
124 static void gen_jmp(DisasContext *s, target_ulong eip);
125 static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num);
126
127 /* i386 arith/logic operations */
128 enum {
129 OP_ADDL,
130 OP_ORL,
131 OP_ADCL,
132 OP_SBBL,
133 OP_ANDL,
134 OP_SUBL,
135 OP_XORL,
136 OP_CMPL,
137 };
138
139 /* i386 shift ops */
140 enum {
141 OP_ROL,
142 OP_ROR,
143 OP_RCL,
144 OP_RCR,
145 OP_SHL,
146 OP_SHR,
147 OP_SHL1, /* undocumented */
148 OP_SAR = 7,
149 };
150
151 enum {
152 JCC_O,
153 JCC_B,
154 JCC_Z,
155 JCC_BE,
156 JCC_S,
157 JCC_P,
158 JCC_L,
159 JCC_LE,
160 };
161
162 /* operand size */
163 enum {
164 OT_BYTE = 0,
165 OT_WORD,
166 OT_LONG,
167 OT_QUAD,
168 };
169
170 enum {
171 /* I386 int registers */
172 OR_EAX, /* MUST be even numbered */
173 OR_ECX,
174 OR_EDX,
175 OR_EBX,
176 OR_ESP,
177 OR_EBP,
178 OR_ESI,
179 OR_EDI,
180
181 OR_TMP0 = 16, /* temporary operand register */
182 OR_TMP1,
183 OR_A0, /* temporary register used when doing address evaluation */
184 };
185
186 enum {
187 USES_CC_DST = 1,
188 USES_CC_SRC = 2,
189 };
190
191 /* Bit set if the global variable is live after setting CC_OP to X. */
192 static const uint8_t cc_op_live[CC_OP_NB] = {
193 [CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC,
194 [CC_OP_EFLAGS] = USES_CC_SRC,
195 [CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC,
196 [CC_OP_ADDB ... CC_OP_ADDQ] = USES_CC_DST | USES_CC_SRC,
197 [CC_OP_ADCB ... CC_OP_ADCQ] = USES_CC_DST | USES_CC_SRC,
198 [CC_OP_SUBB ... CC_OP_SUBQ] = USES_CC_DST | USES_CC_SRC,
199 [CC_OP_SBBB ... CC_OP_SBBQ] = USES_CC_DST | USES_CC_SRC,
200 [CC_OP_LOGICB ... CC_OP_LOGICQ] = USES_CC_DST,
201 [CC_OP_INCB ... CC_OP_INCQ] = USES_CC_DST | USES_CC_SRC,
202 [CC_OP_DECB ... CC_OP_DECQ] = USES_CC_DST | USES_CC_SRC,
203 [CC_OP_SHLB ... CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC,
204 [CC_OP_SARB ... CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC,
205 };
206
207 static void set_cc_op(DisasContext *s, CCOp op)
208 {
209 int dead;
210
211 if (s->cc_op == op) {
212 return;
213 }
214
215 /* Discard CC computation that will no longer be used. */
216 dead = cc_op_live[s->cc_op] & ~cc_op_live[op];
217 if (dead & USES_CC_DST) {
218 tcg_gen_discard_tl(cpu_cc_dst);
219 }
220 if (dead & USES_CC_SRC) {
221 tcg_gen_discard_tl(cpu_cc_src);
222 }
223
224 s->cc_op = op;
225 /* The DYNAMIC setting is translator only, and should never be
226 stored. Thus we always consider it clean. */
227 s->cc_op_dirty = (op != CC_OP_DYNAMIC);
228 }
229
230 static void gen_update_cc_op(DisasContext *s)
231 {
232 if (s->cc_op_dirty) {
233 tcg_gen_movi_i32(cpu_cc_op, s->cc_op);
234 s->cc_op_dirty = false;
235 }
236 }
237
238 static inline void gen_op_movl_T0_0(void)
239 {
240 tcg_gen_movi_tl(cpu_T[0], 0);
241 }
242
243 static inline void gen_op_movl_T0_im(int32_t val)
244 {
245 tcg_gen_movi_tl(cpu_T[0], val);
246 }
247
248 static inline void gen_op_movl_T0_imu(uint32_t val)
249 {
250 tcg_gen_movi_tl(cpu_T[0], val);
251 }
252
253 static inline void gen_op_movl_T1_im(int32_t val)
254 {
255 tcg_gen_movi_tl(cpu_T[1], val);
256 }
257
258 static inline void gen_op_movl_T1_imu(uint32_t val)
259 {
260 tcg_gen_movi_tl(cpu_T[1], val);
261 }
262
263 static inline void gen_op_movl_A0_im(uint32_t val)
264 {
265 tcg_gen_movi_tl(cpu_A0, val);
266 }
267
268 #ifdef TARGET_X86_64
269 static inline void gen_op_movq_A0_im(int64_t val)
270 {
271 tcg_gen_movi_tl(cpu_A0, val);
272 }
273 #endif
274
275 static inline void gen_movtl_T0_im(target_ulong val)
276 {
277 tcg_gen_movi_tl(cpu_T[0], val);
278 }
279
280 static inline void gen_movtl_T1_im(target_ulong val)
281 {
282 tcg_gen_movi_tl(cpu_T[1], val);
283 }
284
285 static inline void gen_op_andl_T0_ffff(void)
286 {
287 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
288 }
289
290 static inline void gen_op_andl_T0_im(uint32_t val)
291 {
292 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], val);
293 }
294
295 static inline void gen_op_movl_T0_T1(void)
296 {
297 tcg_gen_mov_tl(cpu_T[0], cpu_T[1]);
298 }
299
300 static inline void gen_op_andl_A0_ffff(void)
301 {
302 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffff);
303 }
304
305 #ifdef TARGET_X86_64
306
307 #define NB_OP_SIZES 4
308
309 #else /* !TARGET_X86_64 */
310
311 #define NB_OP_SIZES 3
312
313 #endif /* !TARGET_X86_64 */
314
315 #if defined(HOST_WORDS_BIGENDIAN)
316 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
317 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
318 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
319 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
320 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
321 #else
322 #define REG_B_OFFSET 0
323 #define REG_H_OFFSET 1
324 #define REG_W_OFFSET 0
325 #define REG_L_OFFSET 0
326 #define REG_LH_OFFSET 4
327 #endif
328
329 /* In instruction encodings for byte register accesses the
330 * register number usually indicates "low 8 bits of register N";
331 * however there are some special cases where N 4..7 indicates
332 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
333 * true for this special case, false otherwise.
334 */
335 static inline bool byte_reg_is_xH(int reg)
336 {
337 if (reg < 4) {
338 return false;
339 }
340 #ifdef TARGET_X86_64
341 if (reg >= 8 || x86_64_hregs) {
342 return false;
343 }
344 #endif
345 return true;
346 }
347
348 static inline void gen_op_mov_reg_v(int ot, int reg, TCGv t0)
349 {
350 switch(ot) {
351 case OT_BYTE:
352 if (!byte_reg_is_xH(reg)) {
353 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 8);
354 } else {
355 tcg_gen_deposit_tl(cpu_regs[reg - 4], cpu_regs[reg - 4], t0, 8, 8);
356 }
357 break;
358 case OT_WORD:
359 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 16);
360 break;
361 default: /* XXX this shouldn't be reached; abort? */
362 case OT_LONG:
363 /* For x86_64, this sets the higher half of register to zero.
364 For i386, this is equivalent to a mov. */
365 tcg_gen_ext32u_tl(cpu_regs[reg], t0);
366 break;
367 #ifdef TARGET_X86_64
368 case OT_QUAD:
369 tcg_gen_mov_tl(cpu_regs[reg], t0);
370 break;
371 #endif
372 }
373 }
374
375 static inline void gen_op_mov_reg_T0(int ot, int reg)
376 {
377 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
378 }
379
380 static inline void gen_op_mov_reg_T1(int ot, int reg)
381 {
382 gen_op_mov_reg_v(ot, reg, cpu_T[1]);
383 }
384
385 static inline void gen_op_mov_reg_A0(int size, int reg)
386 {
387 switch(size) {
388 case OT_BYTE:
389 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], cpu_A0, 0, 16);
390 break;
391 default: /* XXX this shouldn't be reached; abort? */
392 case OT_WORD:
393 /* For x86_64, this sets the higher half of register to zero.
394 For i386, this is equivalent to a mov. */
395 tcg_gen_ext32u_tl(cpu_regs[reg], cpu_A0);
396 break;
397 #ifdef TARGET_X86_64
398 case OT_LONG:
399 tcg_gen_mov_tl(cpu_regs[reg], cpu_A0);
400 break;
401 #endif
402 }
403 }
404
405 static inline void gen_op_mov_v_reg(int ot, TCGv t0, int reg)
406 {
407 if (ot == OT_BYTE && byte_reg_is_xH(reg)) {
408 tcg_gen_shri_tl(t0, cpu_regs[reg - 4], 8);
409 tcg_gen_ext8u_tl(t0, t0);
410 } else {
411 tcg_gen_mov_tl(t0, cpu_regs[reg]);
412 }
413 }
414
415 static inline void gen_op_mov_TN_reg(int ot, int t_index, int reg)
416 {
417 gen_op_mov_v_reg(ot, cpu_T[t_index], reg);
418 }
419
420 static inline void gen_op_movl_A0_reg(int reg)
421 {
422 tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]);
423 }
424
425 static inline void gen_op_addl_A0_im(int32_t val)
426 {
427 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
428 #ifdef TARGET_X86_64
429 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
430 #endif
431 }
432
433 #ifdef TARGET_X86_64
434 static inline void gen_op_addq_A0_im(int64_t val)
435 {
436 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
437 }
438 #endif
439
440 static void gen_add_A0_im(DisasContext *s, int val)
441 {
442 #ifdef TARGET_X86_64
443 if (CODE64(s))
444 gen_op_addq_A0_im(val);
445 else
446 #endif
447 gen_op_addl_A0_im(val);
448 }
449
450 static inline void gen_op_addl_T0_T1(void)
451 {
452 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
453 }
454
455 static inline void gen_op_jmp_T0(void)
456 {
457 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, eip));
458 }
459
460 static inline void gen_op_add_reg_im(int size, int reg, int32_t val)
461 {
462 switch(size) {
463 case OT_BYTE:
464 tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val);
465 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], cpu_tmp0, 0, 16);
466 break;
467 case OT_WORD:
468 tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val);
469 /* For x86_64, this sets the higher half of register to zero.
470 For i386, this is equivalent to a nop. */
471 tcg_gen_ext32u_tl(cpu_tmp0, cpu_tmp0);
472 tcg_gen_mov_tl(cpu_regs[reg], cpu_tmp0);
473 break;
474 #ifdef TARGET_X86_64
475 case OT_LONG:
476 tcg_gen_addi_tl(cpu_regs[reg], cpu_regs[reg], val);
477 break;
478 #endif
479 }
480 }
481
482 static inline void gen_op_add_reg_T0(int size, int reg)
483 {
484 switch(size) {
485 case OT_BYTE:
486 tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T[0]);
487 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], cpu_tmp0, 0, 16);
488 break;
489 case OT_WORD:
490 tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T[0]);
491 /* For x86_64, this sets the higher half of register to zero.
492 For i386, this is equivalent to a nop. */
493 tcg_gen_ext32u_tl(cpu_tmp0, cpu_tmp0);
494 tcg_gen_mov_tl(cpu_regs[reg], cpu_tmp0);
495 break;
496 #ifdef TARGET_X86_64
497 case OT_LONG:
498 tcg_gen_add_tl(cpu_regs[reg], cpu_regs[reg], cpu_T[0]);
499 break;
500 #endif
501 }
502 }
503
504 static inline void gen_op_addl_A0_reg_sN(int shift, int reg)
505 {
506 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]);
507 if (shift != 0)
508 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
509 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
510 /* For x86_64, this sets the higher half of register to zero.
511 For i386, this is equivalent to a nop. */
512 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
513 }
514
515 static inline void gen_op_movl_A0_seg(int reg)
516 {
517 tcg_gen_ld32u_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base) + REG_L_OFFSET);
518 }
519
520 static inline void gen_op_addl_A0_seg(DisasContext *s, int reg)
521 {
522 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base));
523 #ifdef TARGET_X86_64
524 if (CODE64(s)) {
525 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
526 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
527 } else {
528 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
529 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
530 }
531 #else
532 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
533 #endif
534 }
535
536 #ifdef TARGET_X86_64
537 static inline void gen_op_movq_A0_seg(int reg)
538 {
539 tcg_gen_ld_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base));
540 }
541
542 static inline void gen_op_addq_A0_seg(int reg)
543 {
544 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base));
545 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
546 }
547
548 static inline void gen_op_movq_A0_reg(int reg)
549 {
550 tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]);
551 }
552
553 static inline void gen_op_addq_A0_reg_sN(int shift, int reg)
554 {
555 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]);
556 if (shift != 0)
557 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
558 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
559 }
560 #endif
561
562 static inline void gen_op_lds_T0_A0(int idx)
563 {
564 int mem_index = (idx >> 2) - 1;
565 switch(idx & 3) {
566 case OT_BYTE:
567 tcg_gen_qemu_ld8s(cpu_T[0], cpu_A0, mem_index);
568 break;
569 case OT_WORD:
570 tcg_gen_qemu_ld16s(cpu_T[0], cpu_A0, mem_index);
571 break;
572 default:
573 case OT_LONG:
574 tcg_gen_qemu_ld32s(cpu_T[0], cpu_A0, mem_index);
575 break;
576 }
577 }
578
579 static inline void gen_op_ld_v(int idx, TCGv t0, TCGv a0)
580 {
581 int mem_index = (idx >> 2) - 1;
582 switch(idx & 3) {
583 case OT_BYTE:
584 tcg_gen_qemu_ld8u(t0, a0, mem_index);
585 break;
586 case OT_WORD:
587 tcg_gen_qemu_ld16u(t0, a0, mem_index);
588 break;
589 case OT_LONG:
590 tcg_gen_qemu_ld32u(t0, a0, mem_index);
591 break;
592 default:
593 case OT_QUAD:
594 /* Should never happen on 32-bit targets. */
595 #ifdef TARGET_X86_64
596 tcg_gen_qemu_ld64(t0, a0, mem_index);
597 #endif
598 break;
599 }
600 }
601
602 /* XXX: always use ldu or lds */
603 static inline void gen_op_ld_T0_A0(int idx)
604 {
605 gen_op_ld_v(idx, cpu_T[0], cpu_A0);
606 }
607
608 static inline void gen_op_ldu_T0_A0(int idx)
609 {
610 gen_op_ld_v(idx, cpu_T[0], cpu_A0);
611 }
612
613 static inline void gen_op_ld_T1_A0(int idx)
614 {
615 gen_op_ld_v(idx, cpu_T[1], cpu_A0);
616 }
617
618 static inline void gen_op_st_v(int idx, TCGv t0, TCGv a0)
619 {
620 int mem_index = (idx >> 2) - 1;
621 switch(idx & 3) {
622 case OT_BYTE:
623 tcg_gen_qemu_st8(t0, a0, mem_index);
624 break;
625 case OT_WORD:
626 tcg_gen_qemu_st16(t0, a0, mem_index);
627 break;
628 case OT_LONG:
629 tcg_gen_qemu_st32(t0, a0, mem_index);
630 break;
631 default:
632 case OT_QUAD:
633 /* Should never happen on 32-bit targets. */
634 #ifdef TARGET_X86_64
635 tcg_gen_qemu_st64(t0, a0, mem_index);
636 #endif
637 break;
638 }
639 }
640
641 static inline void gen_op_st_T0_A0(int idx)
642 {
643 gen_op_st_v(idx, cpu_T[0], cpu_A0);
644 }
645
646 static inline void gen_op_st_T1_A0(int idx)
647 {
648 gen_op_st_v(idx, cpu_T[1], cpu_A0);
649 }
650
651 static inline void gen_jmp_im(target_ulong pc)
652 {
653 tcg_gen_movi_tl(cpu_tmp0, pc);
654 tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, eip));
655 }
656
657 static inline void gen_string_movl_A0_ESI(DisasContext *s)
658 {
659 int override;
660
661 override = s->override;
662 #ifdef TARGET_X86_64
663 if (s->aflag == 2) {
664 if (override >= 0) {
665 gen_op_movq_A0_seg(override);
666 gen_op_addq_A0_reg_sN(0, R_ESI);
667 } else {
668 gen_op_movq_A0_reg(R_ESI);
669 }
670 } else
671 #endif
672 if (s->aflag) {
673 /* 32 bit address */
674 if (s->addseg && override < 0)
675 override = R_DS;
676 if (override >= 0) {
677 gen_op_movl_A0_seg(override);
678 gen_op_addl_A0_reg_sN(0, R_ESI);
679 } else {
680 gen_op_movl_A0_reg(R_ESI);
681 }
682 } else {
683 /* 16 address, always override */
684 if (override < 0)
685 override = R_DS;
686 gen_op_movl_A0_reg(R_ESI);
687 gen_op_andl_A0_ffff();
688 gen_op_addl_A0_seg(s, override);
689 }
690 }
691
692 static inline void gen_string_movl_A0_EDI(DisasContext *s)
693 {
694 #ifdef TARGET_X86_64
695 if (s->aflag == 2) {
696 gen_op_movq_A0_reg(R_EDI);
697 } else
698 #endif
699 if (s->aflag) {
700 if (s->addseg) {
701 gen_op_movl_A0_seg(R_ES);
702 gen_op_addl_A0_reg_sN(0, R_EDI);
703 } else {
704 gen_op_movl_A0_reg(R_EDI);
705 }
706 } else {
707 gen_op_movl_A0_reg(R_EDI);
708 gen_op_andl_A0_ffff();
709 gen_op_addl_A0_seg(s, R_ES);
710 }
711 }
712
713 static inline void gen_op_movl_T0_Dshift(int ot)
714 {
715 tcg_gen_ld32s_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, df));
716 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], ot);
717 };
718
719 static TCGv gen_ext_tl(TCGv dst, TCGv src, int size, bool sign)
720 {
721 switch (size) {
722 case OT_BYTE:
723 if (sign) {
724 tcg_gen_ext8s_tl(dst, src);
725 } else {
726 tcg_gen_ext8u_tl(dst, src);
727 }
728 return dst;
729 case OT_WORD:
730 if (sign) {
731 tcg_gen_ext16s_tl(dst, src);
732 } else {
733 tcg_gen_ext16u_tl(dst, src);
734 }
735 return dst;
736 #ifdef TARGET_X86_64
737 case OT_LONG:
738 if (sign) {
739 tcg_gen_ext32s_tl(dst, src);
740 } else {
741 tcg_gen_ext32u_tl(dst, src);
742 }
743 return dst;
744 #endif
745 default:
746 return src;
747 }
748 }
749
750 static void gen_extu(int ot, TCGv reg)
751 {
752 gen_ext_tl(reg, reg, ot, false);
753 }
754
755 static void gen_exts(int ot, TCGv reg)
756 {
757 gen_ext_tl(reg, reg, ot, true);
758 }
759
760 static inline void gen_op_jnz_ecx(int size, int label1)
761 {
762 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
763 gen_extu(size + 1, cpu_tmp0);
764 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, label1);
765 }
766
767 static inline void gen_op_jz_ecx(int size, int label1)
768 {
769 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
770 gen_extu(size + 1, cpu_tmp0);
771 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
772 }
773
774 static void gen_helper_in_func(int ot, TCGv v, TCGv_i32 n)
775 {
776 switch (ot) {
777 case OT_BYTE:
778 gen_helper_inb(v, n);
779 break;
780 case OT_WORD:
781 gen_helper_inw(v, n);
782 break;
783 case OT_LONG:
784 gen_helper_inl(v, n);
785 break;
786 }
787 }
788
789 static void gen_helper_out_func(int ot, TCGv_i32 v, TCGv_i32 n)
790 {
791 switch (ot) {
792 case OT_BYTE:
793 gen_helper_outb(v, n);
794 break;
795 case OT_WORD:
796 gen_helper_outw(v, n);
797 break;
798 case OT_LONG:
799 gen_helper_outl(v, n);
800 break;
801 }
802 }
803
804 static void gen_check_io(DisasContext *s, int ot, target_ulong cur_eip,
805 uint32_t svm_flags)
806 {
807 int state_saved;
808 target_ulong next_eip;
809
810 state_saved = 0;
811 if (s->pe && (s->cpl > s->iopl || s->vm86)) {
812 gen_update_cc_op(s);
813 gen_jmp_im(cur_eip);
814 state_saved = 1;
815 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
816 switch (ot) {
817 case OT_BYTE:
818 gen_helper_check_iob(cpu_env, cpu_tmp2_i32);
819 break;
820 case OT_WORD:
821 gen_helper_check_iow(cpu_env, cpu_tmp2_i32);
822 break;
823 case OT_LONG:
824 gen_helper_check_iol(cpu_env, cpu_tmp2_i32);
825 break;
826 }
827 }
828 if(s->flags & HF_SVMI_MASK) {
829 if (!state_saved) {
830 gen_update_cc_op(s);
831 gen_jmp_im(cur_eip);
832 }
833 svm_flags |= (1 << (4 + ot));
834 next_eip = s->pc - s->cs_base;
835 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
836 gen_helper_svm_check_io(cpu_env, cpu_tmp2_i32,
837 tcg_const_i32(svm_flags),
838 tcg_const_i32(next_eip - cur_eip));
839 }
840 }
841
842 static inline void gen_movs(DisasContext *s, int ot)
843 {
844 gen_string_movl_A0_ESI(s);
845 gen_op_ld_T0_A0(ot + s->mem_index);
846 gen_string_movl_A0_EDI(s);
847 gen_op_st_T0_A0(ot + s->mem_index);
848 gen_op_movl_T0_Dshift(ot);
849 gen_op_add_reg_T0(s->aflag, R_ESI);
850 gen_op_add_reg_T0(s->aflag, R_EDI);
851 }
852
853 static void gen_op_update1_cc(void)
854 {
855 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
856 }
857
858 static void gen_op_update2_cc(void)
859 {
860 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
861 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
862 }
863
864 static inline void gen_op_cmpl_T0_T1_cc(void)
865 {
866 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
867 tcg_gen_sub_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
868 }
869
870 static inline void gen_op_testl_T0_T1_cc(void)
871 {
872 tcg_gen_and_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
873 }
874
875 static void gen_op_update_neg_cc(void)
876 {
877 tcg_gen_neg_tl(cpu_cc_src, cpu_T[0]);
878 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
879 }
880
881 /* compute all eflags to cc_src */
882 static void gen_compute_eflags(DisasContext *s)
883 {
884 if (s->cc_op == CC_OP_EFLAGS) {
885 return;
886 }
887 gen_update_cc_op(s);
888 gen_helper_cc_compute_all(cpu_tmp2_i32, cpu_env, cpu_cc_op);
889 set_cc_op(s, CC_OP_EFLAGS);
890 tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32);
891 }
892
893 typedef struct CCPrepare {
894 TCGCond cond;
895 TCGv reg;
896 TCGv reg2;
897 target_ulong imm;
898 target_ulong mask;
899 bool use_reg2;
900 bool no_setcond;
901 } CCPrepare;
902
903 /* compute eflags.C to reg */
904 static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
905 {
906 TCGv t0, t1;
907 int size, shift;
908
909 switch (s->cc_op) {
910 case CC_OP_SUBB ... CC_OP_SUBQ:
911 /* (DATA_TYPE)(CC_DST + CC_SRC) < (DATA_TYPE)CC_SRC */
912 size = s->cc_op - CC_OP_SUBB;
913 t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
914 /* If no temporary was used, be careful not to alias t1 and t0. */
915 t0 = TCGV_EQUAL(t1, cpu_cc_src) ? cpu_tmp0 : reg;
916 tcg_gen_add_tl(t0, cpu_cc_dst, cpu_cc_src);
917 gen_extu(size, t0);
918 goto add_sub;
919
920 case CC_OP_ADDB ... CC_OP_ADDQ:
921 /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
922 size = s->cc_op - CC_OP_ADDB;
923 t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
924 t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
925 add_sub:
926 return (CCPrepare) { .cond = TCG_COND_LTU, .reg = t0,
927 .reg2 = t1, .mask = -1, .use_reg2 = true };
928
929 case CC_OP_SBBB ... CC_OP_SBBQ:
930 /* (DATA_TYPE)(CC_DST + CC_SRC + 1) <= (DATA_TYPE)CC_SRC */
931 size = s->cc_op - CC_OP_SBBB;
932 t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
933 if (TCGV_EQUAL(t1, reg) && TCGV_EQUAL(reg, cpu_cc_src)) {
934 tcg_gen_mov_tl(cpu_tmp0, cpu_cc_src);
935 t1 = cpu_tmp0;
936 }
937
938 tcg_gen_add_tl(reg, cpu_cc_dst, cpu_cc_src);
939 tcg_gen_addi_tl(reg, reg, 1);
940 gen_extu(size, reg);
941 t0 = reg;
942 goto adc_sbb;
943
944 case CC_OP_ADCB ... CC_OP_ADCQ:
945 /* (DATA_TYPE)CC_DST <= (DATA_TYPE)CC_SRC */
946 size = s->cc_op - CC_OP_ADCB;
947 t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
948 t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
949 adc_sbb:
950 return (CCPrepare) { .cond = TCG_COND_LEU, .reg = t0,
951 .reg2 = t1, .mask = -1, .use_reg2 = true };
952
953 case CC_OP_LOGICB ... CC_OP_LOGICQ:
954 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
955
956 case CC_OP_INCB ... CC_OP_INCQ:
957 case CC_OP_DECB ... CC_OP_DECQ:
958 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
959 .mask = -1, .no_setcond = true };
960
961 case CC_OP_SHLB ... CC_OP_SHLQ:
962 /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
963 size = s->cc_op - CC_OP_SHLB;
964 shift = (8 << size) - 1;
965 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
966 .mask = (target_ulong)1 << shift };
967
968 case CC_OP_MULB ... CC_OP_MULQ:
969 return (CCPrepare) { .cond = TCG_COND_NE,
970 .reg = cpu_cc_src, .mask = -1 };
971
972 case CC_OP_EFLAGS:
973 case CC_OP_SARB ... CC_OP_SARQ:
974 /* CC_SRC & 1 */
975 return (CCPrepare) { .cond = TCG_COND_NE,
976 .reg = cpu_cc_src, .mask = CC_C };
977
978 default:
979 /* The need to compute only C from CC_OP_DYNAMIC is important
980 in efficiently implementing e.g. INC at the start of a TB. */
981 gen_update_cc_op(s);
982 gen_helper_cc_compute_c(cpu_tmp2_i32, cpu_env, cpu_cc_op);
983 tcg_gen_extu_i32_tl(reg, cpu_tmp2_i32);
984 return (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
985 .mask = -1, .no_setcond = true };
986 }
987 }
988
989 /* compute eflags.P to reg */
990 static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGv reg)
991 {
992 gen_compute_eflags(s);
993 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
994 .mask = CC_P };
995 }
996
997 /* compute eflags.S to reg */
998 static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg)
999 {
1000 switch (s->cc_op) {
1001 case CC_OP_DYNAMIC:
1002 gen_compute_eflags(s);
1003 /* FALLTHRU */
1004 case CC_OP_EFLAGS:
1005 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1006 .mask = CC_S };
1007 default:
1008 {
1009 int size = (s->cc_op - CC_OP_ADDB) & 3;
1010 TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, true);
1011 return (CCPrepare) { .cond = TCG_COND_LT, .reg = t0, .mask = -1 };
1012 }
1013 }
1014 }
1015
1016 /* compute eflags.O to reg */
1017 static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg)
1018 {
1019 gen_compute_eflags(s);
1020 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1021 .mask = CC_O };
1022 }
1023
1024 /* compute eflags.Z to reg */
1025 static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
1026 {
1027 switch (s->cc_op) {
1028 case CC_OP_DYNAMIC:
1029 gen_compute_eflags(s);
1030 /* FALLTHRU */
1031 case CC_OP_EFLAGS:
1032 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1033 .mask = CC_Z };
1034 default:
1035 {
1036 int size = (s->cc_op - CC_OP_ADDB) & 3;
1037 TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
1038 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
1039 }
1040 }
1041 }
1042
1043 /* perform a conditional store into register 'reg' according to jump opcode
1044 value 'b'. In the fast case, T0 is guaranted not to be used. */
1045 static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
1046 {
1047 int inv, jcc_op, size, cond;
1048 CCPrepare cc;
1049 TCGv t0;
1050
1051 inv = b & 1;
1052 jcc_op = (b >> 1) & 7;
1053
1054 switch (s->cc_op) {
1055 case CC_OP_SUBB ... CC_OP_SUBQ:
1056 /* We optimize relational operators for the cmp/jcc case. */
1057 size = s->cc_op - CC_OP_SUBB;
1058 switch (jcc_op) {
1059 case JCC_BE:
1060 tcg_gen_add_tl(cpu_tmp4, cpu_cc_dst, cpu_cc_src);
1061 gen_extu(size, cpu_tmp4);
1062 t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
1063 cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = cpu_tmp4,
1064 .reg2 = t0, .mask = -1, .use_reg2 = true };
1065 break;
1066
1067 case JCC_L:
1068 cond = TCG_COND_LT;
1069 goto fast_jcc_l;
1070 case JCC_LE:
1071 cond = TCG_COND_LE;
1072 fast_jcc_l:
1073 tcg_gen_add_tl(cpu_tmp4, cpu_cc_dst, cpu_cc_src);
1074 gen_exts(size, cpu_tmp4);
1075 t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, true);
1076 cc = (CCPrepare) { .cond = cond, .reg = cpu_tmp4,
1077 .reg2 = t0, .mask = -1, .use_reg2 = true };
1078 break;
1079
1080 default:
1081 goto slow_jcc;
1082 }
1083 break;
1084
1085 default:
1086 slow_jcc:
1087 /* This actually generates good code for JC, JZ and JS. */
1088 switch (jcc_op) {
1089 case JCC_O:
1090 cc = gen_prepare_eflags_o(s, reg);
1091 break;
1092 case JCC_B:
1093 cc = gen_prepare_eflags_c(s, reg);
1094 break;
1095 case JCC_Z:
1096 cc = gen_prepare_eflags_z(s, reg);
1097 break;
1098 case JCC_BE:
1099 gen_compute_eflags(s);
1100 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1101 .mask = CC_Z | CC_C };
1102 break;
1103 case JCC_S:
1104 cc = gen_prepare_eflags_s(s, reg);
1105 break;
1106 case JCC_P:
1107 cc = gen_prepare_eflags_p(s, reg);
1108 break;
1109 case JCC_L:
1110 gen_compute_eflags(s);
1111 if (TCGV_EQUAL(reg, cpu_cc_src)) {
1112 reg = cpu_tmp0;
1113 }
1114 tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
1115 tcg_gen_xor_tl(reg, reg, cpu_cc_src);
1116 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1117 .mask = CC_S };
1118 break;
1119 default:
1120 case JCC_LE:
1121 gen_compute_eflags(s);
1122 if (TCGV_EQUAL(reg, cpu_cc_src)) {
1123 reg = cpu_tmp0;
1124 }
1125 tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
1126 tcg_gen_xor_tl(reg, reg, cpu_cc_src);
1127 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1128 .mask = CC_S | CC_Z };
1129 break;
1130 }
1131 break;
1132 }
1133
1134 if (inv) {
1135 cc.cond = tcg_invert_cond(cc.cond);
1136 }
1137 return cc;
1138 }
1139
1140 static void gen_setcc1(DisasContext *s, int b, TCGv reg)
1141 {
1142 CCPrepare cc = gen_prepare_cc(s, b, reg);
1143
1144 if (cc.no_setcond) {
1145 if (cc.cond == TCG_COND_EQ) {
1146 tcg_gen_xori_tl(reg, cc.reg, 1);
1147 } else {
1148 tcg_gen_mov_tl(reg, cc.reg);
1149 }
1150 return;
1151 }
1152
1153 if (cc.cond == TCG_COND_NE && !cc.use_reg2 && cc.imm == 0 &&
1154 cc.mask != 0 && (cc.mask & (cc.mask - 1)) == 0) {
1155 tcg_gen_shri_tl(reg, cc.reg, ctztl(cc.mask));
1156 tcg_gen_andi_tl(reg, reg, 1);
1157 return;
1158 }
1159 if (cc.mask != -1) {
1160 tcg_gen_andi_tl(reg, cc.reg, cc.mask);
1161 cc.reg = reg;
1162 }
1163 if (cc.use_reg2) {
1164 tcg_gen_setcond_tl(cc.cond, reg, cc.reg, cc.reg2);
1165 } else {
1166 tcg_gen_setcondi_tl(cc.cond, reg, cc.reg, cc.imm);
1167 }
1168 }
1169
1170 static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg)
1171 {
1172 gen_setcc1(s, JCC_B << 1, reg);
1173 }
1174
1175 /* generate a conditional jump to label 'l1' according to jump opcode
1176 value 'b'. In the fast case, T0 is guaranted not to be used. */
1177 static inline void gen_jcc1(DisasContext *s, int b, int l1)
1178 {
1179 CCPrepare cc = gen_prepare_cc(s, b, cpu_T[0]);
1180
1181 if (cc.mask != -1) {
1182 tcg_gen_andi_tl(cpu_T[0], cc.reg, cc.mask);
1183 cc.reg = cpu_T[0];
1184 }
1185 if (cc.use_reg2) {
1186 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1187 } else {
1188 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1189 }
1190 }
1191
1192 /* XXX: does not work with gdbstub "ice" single step - not a
1193 serious problem */
1194 static int gen_jz_ecx_string(DisasContext *s, target_ulong next_eip)
1195 {
1196 int l1, l2;
1197
1198 l1 = gen_new_label();
1199 l2 = gen_new_label();
1200 gen_op_jnz_ecx(s->aflag, l1);
1201 gen_set_label(l2);
1202 gen_jmp_tb(s, next_eip, 1);
1203 gen_set_label(l1);
1204 return l2;
1205 }
1206
1207 static inline void gen_stos(DisasContext *s, int ot)
1208 {
1209 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
1210 gen_string_movl_A0_EDI(s);
1211 gen_op_st_T0_A0(ot + s->mem_index);
1212 gen_op_movl_T0_Dshift(ot);
1213 gen_op_add_reg_T0(s->aflag, R_EDI);
1214 }
1215
1216 static inline void gen_lods(DisasContext *s, int ot)
1217 {
1218 gen_string_movl_A0_ESI(s);
1219 gen_op_ld_T0_A0(ot + s->mem_index);
1220 gen_op_mov_reg_T0(ot, R_EAX);
1221 gen_op_movl_T0_Dshift(ot);
1222 gen_op_add_reg_T0(s->aflag, R_ESI);
1223 }
1224
1225 static inline void gen_scas(DisasContext *s, int ot)
1226 {
1227 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
1228 gen_string_movl_A0_EDI(s);
1229 gen_op_ld_T1_A0(ot + s->mem_index);
1230 gen_op_cmpl_T0_T1_cc();
1231 gen_op_movl_T0_Dshift(ot);
1232 gen_op_add_reg_T0(s->aflag, R_EDI);
1233 set_cc_op(s, CC_OP_SUBB + ot);
1234 }
1235
1236 static inline void gen_cmps(DisasContext *s, int ot)
1237 {
1238 gen_string_movl_A0_ESI(s);
1239 gen_op_ld_T0_A0(ot + s->mem_index);
1240 gen_string_movl_A0_EDI(s);
1241 gen_op_ld_T1_A0(ot + s->mem_index);
1242 gen_op_cmpl_T0_T1_cc();
1243 gen_op_movl_T0_Dshift(ot);
1244 gen_op_add_reg_T0(s->aflag, R_ESI);
1245 gen_op_add_reg_T0(s->aflag, R_EDI);
1246 set_cc_op(s, CC_OP_SUBB + ot);
1247 }
1248
1249 static inline void gen_ins(DisasContext *s, int ot)
1250 {
1251 if (use_icount)
1252 gen_io_start();
1253 gen_string_movl_A0_EDI(s);
1254 /* Note: we must do this dummy write first to be restartable in
1255 case of page fault. */
1256 gen_op_movl_T0_0();
1257 gen_op_st_T0_A0(ot + s->mem_index);
1258 gen_op_mov_TN_reg(OT_WORD, 1, R_EDX);
1259 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[1]);
1260 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1261 gen_helper_in_func(ot, cpu_T[0], cpu_tmp2_i32);
1262 gen_op_st_T0_A0(ot + s->mem_index);
1263 gen_op_movl_T0_Dshift(ot);
1264 gen_op_add_reg_T0(s->aflag, R_EDI);
1265 if (use_icount)
1266 gen_io_end();
1267 }
1268
1269 static inline void gen_outs(DisasContext *s, int ot)
1270 {
1271 if (use_icount)
1272 gen_io_start();
1273 gen_string_movl_A0_ESI(s);
1274 gen_op_ld_T0_A0(ot + s->mem_index);
1275
1276 gen_op_mov_TN_reg(OT_WORD, 1, R_EDX);
1277 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[1]);
1278 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1279 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[0]);
1280 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
1281
1282 gen_op_movl_T0_Dshift(ot);
1283 gen_op_add_reg_T0(s->aflag, R_ESI);
1284 if (use_icount)
1285 gen_io_end();
1286 }
1287
1288 /* same method as Valgrind : we generate jumps to current or next
1289 instruction */
1290 #define GEN_REPZ(op) \
1291 static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1292 target_ulong cur_eip, target_ulong next_eip) \
1293 { \
1294 int l2;\
1295 gen_update_cc_op(s); \
1296 l2 = gen_jz_ecx_string(s, next_eip); \
1297 gen_ ## op(s, ot); \
1298 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1299 /* a loop would cause two single step exceptions if ECX = 1 \
1300 before rep string_insn */ \
1301 if (!s->jmp_opt) \
1302 gen_op_jz_ecx(s->aflag, l2); \
1303 gen_jmp(s, cur_eip); \
1304 }
1305
1306 #define GEN_REPZ2(op) \
1307 static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1308 target_ulong cur_eip, \
1309 target_ulong next_eip, \
1310 int nz) \
1311 { \
1312 int l2;\
1313 gen_update_cc_op(s); \
1314 l2 = gen_jz_ecx_string(s, next_eip); \
1315 gen_ ## op(s, ot); \
1316 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1317 gen_update_cc_op(s); \
1318 gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2); \
1319 if (!s->jmp_opt) \
1320 gen_op_jz_ecx(s->aflag, l2); \
1321 gen_jmp(s, cur_eip); \
1322 set_cc_op(s, CC_OP_DYNAMIC); \
1323 }
1324
1325 GEN_REPZ(movs)
1326 GEN_REPZ(stos)
1327 GEN_REPZ(lods)
1328 GEN_REPZ(ins)
1329 GEN_REPZ(outs)
1330 GEN_REPZ2(scas)
1331 GEN_REPZ2(cmps)
1332
1333 static void gen_helper_fp_arith_ST0_FT0(int op)
1334 {
1335 switch (op) {
1336 case 0:
1337 gen_helper_fadd_ST0_FT0(cpu_env);
1338 break;
1339 case 1:
1340 gen_helper_fmul_ST0_FT0(cpu_env);
1341 break;
1342 case 2:
1343 gen_helper_fcom_ST0_FT0(cpu_env);
1344 break;
1345 case 3:
1346 gen_helper_fcom_ST0_FT0(cpu_env);
1347 break;
1348 case 4:
1349 gen_helper_fsub_ST0_FT0(cpu_env);
1350 break;
1351 case 5:
1352 gen_helper_fsubr_ST0_FT0(cpu_env);
1353 break;
1354 case 6:
1355 gen_helper_fdiv_ST0_FT0(cpu_env);
1356 break;
1357 case 7:
1358 gen_helper_fdivr_ST0_FT0(cpu_env);
1359 break;
1360 }
1361 }
1362
1363 /* NOTE the exception in "r" op ordering */
1364 static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
1365 {
1366 TCGv_i32 tmp = tcg_const_i32(opreg);
1367 switch (op) {
1368 case 0:
1369 gen_helper_fadd_STN_ST0(cpu_env, tmp);
1370 break;
1371 case 1:
1372 gen_helper_fmul_STN_ST0(cpu_env, tmp);
1373 break;
1374 case 4:
1375 gen_helper_fsubr_STN_ST0(cpu_env, tmp);
1376 break;
1377 case 5:
1378 gen_helper_fsub_STN_ST0(cpu_env, tmp);
1379 break;
1380 case 6:
1381 gen_helper_fdivr_STN_ST0(cpu_env, tmp);
1382 break;
1383 case 7:
1384 gen_helper_fdiv_STN_ST0(cpu_env, tmp);
1385 break;
1386 }
1387 }
1388
1389 /* if d == OR_TMP0, it means memory operand (address in A0) */
1390 static void gen_op(DisasContext *s1, int op, int ot, int d)
1391 {
1392 if (d != OR_TMP0) {
1393 gen_op_mov_TN_reg(ot, 0, d);
1394 } else {
1395 gen_op_ld_T0_A0(ot + s1->mem_index);
1396 }
1397 switch(op) {
1398 case OP_ADCL:
1399 gen_compute_eflags_c(s1, cpu_tmp4);
1400 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1401 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1402 if (d != OR_TMP0)
1403 gen_op_mov_reg_T0(ot, d);
1404 else
1405 gen_op_st_T0_A0(ot + s1->mem_index);
1406 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
1407 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1408 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp4);
1409 tcg_gen_shli_i32(cpu_tmp2_i32, cpu_tmp2_i32, 2);
1410 tcg_gen_addi_i32(cpu_cc_op, cpu_tmp2_i32, CC_OP_ADDB + ot);
1411 set_cc_op(s1, CC_OP_DYNAMIC);
1412 break;
1413 case OP_SBBL:
1414 gen_compute_eflags_c(s1, cpu_tmp4);
1415 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1416 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1417 if (d != OR_TMP0)
1418 gen_op_mov_reg_T0(ot, d);
1419 else
1420 gen_op_st_T0_A0(ot + s1->mem_index);
1421 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
1422 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1423 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp4);
1424 tcg_gen_shli_i32(cpu_tmp2_i32, cpu_tmp2_i32, 2);
1425 tcg_gen_addi_i32(cpu_cc_op, cpu_tmp2_i32, CC_OP_SUBB + ot);
1426 set_cc_op(s1, CC_OP_DYNAMIC);
1427 break;
1428 case OP_ADDL:
1429 gen_op_addl_T0_T1();
1430 if (d != OR_TMP0)
1431 gen_op_mov_reg_T0(ot, d);
1432 else
1433 gen_op_st_T0_A0(ot + s1->mem_index);
1434 gen_op_update2_cc();
1435 set_cc_op(s1, CC_OP_ADDB + ot);
1436 break;
1437 case OP_SUBL:
1438 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1439 if (d != OR_TMP0)
1440 gen_op_mov_reg_T0(ot, d);
1441 else
1442 gen_op_st_T0_A0(ot + s1->mem_index);
1443 gen_op_update2_cc();
1444 set_cc_op(s1, CC_OP_SUBB + ot);
1445 break;
1446 default:
1447 case OP_ANDL:
1448 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1449 if (d != OR_TMP0)
1450 gen_op_mov_reg_T0(ot, d);
1451 else
1452 gen_op_st_T0_A0(ot + s1->mem_index);
1453 gen_op_update1_cc();
1454 set_cc_op(s1, CC_OP_LOGICB + ot);
1455 break;
1456 case OP_ORL:
1457 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1458 if (d != OR_TMP0)
1459 gen_op_mov_reg_T0(ot, d);
1460 else
1461 gen_op_st_T0_A0(ot + s1->mem_index);
1462 gen_op_update1_cc();
1463 set_cc_op(s1, CC_OP_LOGICB + ot);
1464 break;
1465 case OP_XORL:
1466 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1467 if (d != OR_TMP0)
1468 gen_op_mov_reg_T0(ot, d);
1469 else
1470 gen_op_st_T0_A0(ot + s1->mem_index);
1471 gen_op_update1_cc();
1472 set_cc_op(s1, CC_OP_LOGICB + ot);
1473 break;
1474 case OP_CMPL:
1475 gen_op_cmpl_T0_T1_cc();
1476 set_cc_op(s1, CC_OP_SUBB + ot);
1477 break;
1478 }
1479 }
1480
1481 /* if d == OR_TMP0, it means memory operand (address in A0) */
1482 static void gen_inc(DisasContext *s1, int ot, int d, int c)
1483 {
1484 if (d != OR_TMP0)
1485 gen_op_mov_TN_reg(ot, 0, d);
1486 else
1487 gen_op_ld_T0_A0(ot + s1->mem_index);
1488 gen_compute_eflags_c(s1, cpu_cc_src);
1489 if (c > 0) {
1490 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], 1);
1491 set_cc_op(s1, CC_OP_INCB + ot);
1492 } else {
1493 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], -1);
1494 set_cc_op(s1, CC_OP_DECB + ot);
1495 }
1496 if (d != OR_TMP0)
1497 gen_op_mov_reg_T0(ot, d);
1498 else
1499 gen_op_st_T0_A0(ot + s1->mem_index);
1500 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1501 }
1502
1503 static void gen_shift_rm_T1(DisasContext *s, int ot, int op1,
1504 int is_right, int is_arith)
1505 {
1506 target_ulong mask;
1507 int shift_label;
1508 TCGv t0, t1, t2;
1509
1510 if (ot == OT_QUAD) {
1511 mask = 0x3f;
1512 } else {
1513 mask = 0x1f;
1514 }
1515
1516 /* load */
1517 if (op1 == OR_TMP0) {
1518 gen_op_ld_T0_A0(ot + s->mem_index);
1519 } else {
1520 gen_op_mov_TN_reg(ot, 0, op1);
1521 }
1522
1523 t0 = tcg_temp_local_new();
1524 t1 = tcg_temp_local_new();
1525 t2 = tcg_temp_local_new();
1526
1527 tcg_gen_andi_tl(t2, cpu_T[1], mask);
1528
1529 if (is_right) {
1530 if (is_arith) {
1531 gen_exts(ot, cpu_T[0]);
1532 tcg_gen_mov_tl(t0, cpu_T[0]);
1533 tcg_gen_sar_tl(cpu_T[0], cpu_T[0], t2);
1534 } else {
1535 gen_extu(ot, cpu_T[0]);
1536 tcg_gen_mov_tl(t0, cpu_T[0]);
1537 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], t2);
1538 }
1539 } else {
1540 tcg_gen_mov_tl(t0, cpu_T[0]);
1541 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], t2);
1542 }
1543
1544 /* store */
1545 if (op1 == OR_TMP0) {
1546 gen_op_st_T0_A0(ot + s->mem_index);
1547 } else {
1548 gen_op_mov_reg_T0(ot, op1);
1549 }
1550
1551 /* update eflags */
1552 gen_update_cc_op(s);
1553
1554 tcg_gen_mov_tl(t1, cpu_T[0]);
1555
1556 shift_label = gen_new_label();
1557 tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, shift_label);
1558
1559 tcg_gen_addi_tl(t2, t2, -1);
1560 tcg_gen_mov_tl(cpu_cc_dst, t1);
1561
1562 if (is_right) {
1563 if (is_arith) {
1564 tcg_gen_sar_tl(cpu_cc_src, t0, t2);
1565 } else {
1566 tcg_gen_shr_tl(cpu_cc_src, t0, t2);
1567 }
1568 } else {
1569 tcg_gen_shl_tl(cpu_cc_src, t0, t2);
1570 }
1571
1572 if (is_right) {
1573 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SARB + ot);
1574 } else {
1575 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SHLB + ot);
1576 }
1577
1578 gen_set_label(shift_label);
1579 set_cc_op(s, CC_OP_DYNAMIC); /* cannot predict flags after */
1580
1581 tcg_temp_free(t0);
1582 tcg_temp_free(t1);
1583 tcg_temp_free(t2);
1584 }
1585
1586 static void gen_shift_rm_im(DisasContext *s, int ot, int op1, int op2,
1587 int is_right, int is_arith)
1588 {
1589 int mask;
1590
1591 if (ot == OT_QUAD)
1592 mask = 0x3f;
1593 else
1594 mask = 0x1f;
1595
1596 /* load */
1597 if (op1 == OR_TMP0)
1598 gen_op_ld_T0_A0(ot + s->mem_index);
1599 else
1600 gen_op_mov_TN_reg(ot, 0, op1);
1601
1602 op2 &= mask;
1603 if (op2 != 0) {
1604 if (is_right) {
1605 if (is_arith) {
1606 gen_exts(ot, cpu_T[0]);
1607 tcg_gen_sari_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1608 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], op2);
1609 } else {
1610 gen_extu(ot, cpu_T[0]);
1611 tcg_gen_shri_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1612 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], op2);
1613 }
1614 } else {
1615 tcg_gen_shli_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1616 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], op2);
1617 }
1618 }
1619
1620 /* store */
1621 if (op1 == OR_TMP0)
1622 gen_op_st_T0_A0(ot + s->mem_index);
1623 else
1624 gen_op_mov_reg_T0(ot, op1);
1625
1626 /* update eflags if non zero shift */
1627 if (op2 != 0) {
1628 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
1629 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1630 set_cc_op(s, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
1631 }
1632 }
1633
1634 static inline void tcg_gen_lshift(TCGv ret, TCGv arg1, target_long arg2)
1635 {
1636 if (arg2 >= 0)
1637 tcg_gen_shli_tl(ret, arg1, arg2);
1638 else
1639 tcg_gen_shri_tl(ret, arg1, -arg2);
1640 }
1641
1642 static void gen_rot_rm_T1(DisasContext *s, int ot, int op1,
1643 int is_right)
1644 {
1645 target_ulong mask;
1646 int label1, label2, data_bits;
1647 TCGv t0, t1, t2, a0;
1648
1649 /* XXX: inefficient, but we must use local temps */
1650 t0 = tcg_temp_local_new();
1651 t1 = tcg_temp_local_new();
1652 t2 = tcg_temp_local_new();
1653 a0 = tcg_temp_local_new();
1654
1655 if (ot == OT_QUAD)
1656 mask = 0x3f;
1657 else
1658 mask = 0x1f;
1659
1660 /* load */
1661 if (op1 == OR_TMP0) {
1662 tcg_gen_mov_tl(a0, cpu_A0);
1663 gen_op_ld_v(ot + s->mem_index, t0, a0);
1664 } else {
1665 gen_op_mov_v_reg(ot, t0, op1);
1666 }
1667
1668 tcg_gen_mov_tl(t1, cpu_T[1]);
1669
1670 tcg_gen_andi_tl(t1, t1, mask);
1671
1672 /* Must test zero case to avoid using undefined behaviour in TCG
1673 shifts. */
1674 label1 = gen_new_label();
1675 tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, label1);
1676
1677 if (ot <= OT_WORD)
1678 tcg_gen_andi_tl(cpu_tmp0, t1, (1 << (3 + ot)) - 1);
1679 else
1680 tcg_gen_mov_tl(cpu_tmp0, t1);
1681
1682 gen_extu(ot, t0);
1683 tcg_gen_mov_tl(t2, t0);
1684
1685 data_bits = 8 << ot;
1686 /* XXX: rely on behaviour of shifts when operand 2 overflows (XXX:
1687 fix TCG definition) */
1688 if (is_right) {
1689 tcg_gen_shr_tl(cpu_tmp4, t0, cpu_tmp0);
1690 tcg_gen_subfi_tl(cpu_tmp0, data_bits, cpu_tmp0);
1691 tcg_gen_shl_tl(t0, t0, cpu_tmp0);
1692 } else {
1693 tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp0);
1694 tcg_gen_subfi_tl(cpu_tmp0, data_bits, cpu_tmp0);
1695 tcg_gen_shr_tl(t0, t0, cpu_tmp0);
1696 }
1697 tcg_gen_or_tl(t0, t0, cpu_tmp4);
1698
1699 gen_set_label(label1);
1700 /* store */
1701 if (op1 == OR_TMP0) {
1702 gen_op_st_v(ot + s->mem_index, t0, a0);
1703 } else {
1704 gen_op_mov_reg_v(ot, op1, t0);
1705 }
1706
1707 /* update eflags. It is needed anyway most of the time, do it always. */
1708 gen_compute_eflags(s);
1709 assert(s->cc_op == CC_OP_EFLAGS);
1710
1711 label2 = gen_new_label();
1712 tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, label2);
1713
1714 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~(CC_O | CC_C));
1715 tcg_gen_xor_tl(cpu_tmp0, t2, t0);
1716 tcg_gen_lshift(cpu_tmp0, cpu_tmp0, 11 - (data_bits - 1));
1717 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, CC_O);
1718 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_tmp0);
1719 if (is_right) {
1720 tcg_gen_shri_tl(t0, t0, data_bits - 1);
1721 }
1722 tcg_gen_andi_tl(t0, t0, CC_C);
1723 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
1724
1725 gen_set_label(label2);
1726
1727 tcg_temp_free(t0);
1728 tcg_temp_free(t1);
1729 tcg_temp_free(t2);
1730 tcg_temp_free(a0);
1731 }
1732
1733 static void gen_rot_rm_im(DisasContext *s, int ot, int op1, int op2,
1734 int is_right)
1735 {
1736 int mask;
1737 int data_bits;
1738 TCGv t0, t1, a0;
1739
1740 /* XXX: inefficient, but we must use local temps */
1741 t0 = tcg_temp_local_new();
1742 t1 = tcg_temp_local_new();
1743 a0 = tcg_temp_local_new();
1744
1745 if (ot == OT_QUAD)
1746 mask = 0x3f;
1747 else
1748 mask = 0x1f;
1749
1750 /* load */
1751 if (op1 == OR_TMP0) {
1752 tcg_gen_mov_tl(a0, cpu_A0);
1753 gen_op_ld_v(ot + s->mem_index, t0, a0);
1754 } else {
1755 gen_op_mov_v_reg(ot, t0, op1);
1756 }
1757
1758 gen_extu(ot, t0);
1759 tcg_gen_mov_tl(t1, t0);
1760
1761 op2 &= mask;
1762 data_bits = 8 << ot;
1763 if (op2 != 0) {
1764 int shift = op2 & ((1 << (3 + ot)) - 1);
1765 if (is_right) {
1766 tcg_gen_shri_tl(cpu_tmp4, t0, shift);
1767 tcg_gen_shli_tl(t0, t0, data_bits - shift);
1768 }
1769 else {
1770 tcg_gen_shli_tl(cpu_tmp4, t0, shift);
1771 tcg_gen_shri_tl(t0, t0, data_bits - shift);
1772 }
1773 tcg_gen_or_tl(t0, t0, cpu_tmp4);
1774 }
1775
1776 /* store */
1777 if (op1 == OR_TMP0) {
1778 gen_op_st_v(ot + s->mem_index, t0, a0);
1779 } else {
1780 gen_op_mov_reg_v(ot, op1, t0);
1781 }
1782
1783 if (op2 != 0) {
1784 /* update eflags */
1785 gen_compute_eflags(s);
1786 assert(s->cc_op == CC_OP_EFLAGS);
1787
1788 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~(CC_O | CC_C));
1789 tcg_gen_xor_tl(cpu_tmp0, t1, t0);
1790 tcg_gen_lshift(cpu_tmp0, cpu_tmp0, 11 - (data_bits - 1));
1791 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, CC_O);
1792 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_tmp0);
1793 if (is_right) {
1794 tcg_gen_shri_tl(t0, t0, data_bits - 1);
1795 }
1796 tcg_gen_andi_tl(t0, t0, CC_C);
1797 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
1798 }
1799
1800 tcg_temp_free(t0);
1801 tcg_temp_free(t1);
1802 tcg_temp_free(a0);
1803 }
1804
1805 /* XXX: add faster immediate = 1 case */
1806 static void gen_rotc_rm_T1(DisasContext *s, int ot, int op1,
1807 int is_right)
1808 {
1809 gen_compute_eflags(s);
1810 assert(s->cc_op == CC_OP_EFLAGS);
1811
1812 /* load */
1813 if (op1 == OR_TMP0)
1814 gen_op_ld_T0_A0(ot + s->mem_index);
1815 else
1816 gen_op_mov_TN_reg(ot, 0, op1);
1817
1818 if (is_right) {
1819 switch (ot) {
1820 case OT_BYTE:
1821 gen_helper_rcrb(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1822 break;
1823 case OT_WORD:
1824 gen_helper_rcrw(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1825 break;
1826 case OT_LONG:
1827 gen_helper_rcrl(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1828 break;
1829 #ifdef TARGET_X86_64
1830 case OT_QUAD:
1831 gen_helper_rcrq(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1832 break;
1833 #endif
1834 }
1835 } else {
1836 switch (ot) {
1837 case OT_BYTE:
1838 gen_helper_rclb(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1839 break;
1840 case OT_WORD:
1841 gen_helper_rclw(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1842 break;
1843 case OT_LONG:
1844 gen_helper_rcll(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1845 break;
1846 #ifdef TARGET_X86_64
1847 case OT_QUAD:
1848 gen_helper_rclq(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1849 break;
1850 #endif
1851 }
1852 }
1853 /* store */
1854 if (op1 == OR_TMP0)
1855 gen_op_st_T0_A0(ot + s->mem_index);
1856 else
1857 gen_op_mov_reg_T0(ot, op1);
1858 }
1859
1860 /* XXX: add faster immediate case */
1861 static void gen_shiftd_rm_T1(DisasContext *s, int ot, int op1,
1862 int is_right, TCGv count)
1863 {
1864 int label1, label2, data_bits;
1865 target_ulong mask;
1866 TCGv t0, t1, t2, a0;
1867
1868 t0 = tcg_temp_local_new();
1869 t1 = tcg_temp_local_new();
1870 t2 = tcg_temp_local_new();
1871 a0 = tcg_temp_local_new();
1872
1873 if (ot == OT_QUAD)
1874 mask = 0x3f;
1875 else
1876 mask = 0x1f;
1877
1878 /* load */
1879 if (op1 == OR_TMP0) {
1880 tcg_gen_mov_tl(a0, cpu_A0);
1881 gen_op_ld_v(ot + s->mem_index, t0, a0);
1882 } else {
1883 gen_op_mov_v_reg(ot, t0, op1);
1884 }
1885
1886 tcg_gen_andi_tl(t2, count, mask);
1887 tcg_gen_mov_tl(t1, cpu_T[1]);
1888
1889 /* Must test zero case to avoid using undefined behaviour in TCG
1890 shifts. */
1891 label1 = gen_new_label();
1892 tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label1);
1893
1894 tcg_gen_addi_tl(cpu_tmp5, t2, -1);
1895 if (ot == OT_WORD) {
1896 /* Note: we implement the Intel behaviour for shift count > 16 */
1897 if (is_right) {
1898 tcg_gen_andi_tl(t0, t0, 0xffff);
1899 tcg_gen_shli_tl(cpu_tmp0, t1, 16);
1900 tcg_gen_or_tl(t0, t0, cpu_tmp0);
1901 tcg_gen_ext32u_tl(t0, t0);
1902
1903 tcg_gen_shr_tl(cpu_tmp4, t0, cpu_tmp5);
1904
1905 /* only needed if count > 16, but a test would complicate */
1906 tcg_gen_subfi_tl(cpu_tmp5, 32, t2);
1907 tcg_gen_shl_tl(cpu_tmp0, t0, cpu_tmp5);
1908
1909 tcg_gen_shr_tl(t0, t0, t2);
1910
1911 tcg_gen_or_tl(t0, t0, cpu_tmp0);
1912 } else {
1913 /* XXX: not optimal */
1914 tcg_gen_andi_tl(t0, t0, 0xffff);
1915 tcg_gen_shli_tl(t1, t1, 16);
1916 tcg_gen_or_tl(t1, t1, t0);
1917 tcg_gen_ext32u_tl(t1, t1);
1918
1919 tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp5);
1920 tcg_gen_subfi_tl(cpu_tmp0, 32, cpu_tmp5);
1921 tcg_gen_shr_tl(cpu_tmp5, t1, cpu_tmp0);
1922 tcg_gen_or_tl(cpu_tmp4, cpu_tmp4, cpu_tmp5);
1923
1924 tcg_gen_shl_tl(t0, t0, t2);
1925 tcg_gen_subfi_tl(cpu_tmp5, 32, t2);
1926 tcg_gen_shr_tl(t1, t1, cpu_tmp5);
1927 tcg_gen_or_tl(t0, t0, t1);
1928 }
1929 } else {
1930 data_bits = 8 << ot;
1931 if (is_right) {
1932 if (ot == OT_LONG)
1933 tcg_gen_ext32u_tl(t0, t0);
1934
1935 tcg_gen_shr_tl(cpu_tmp4, t0, cpu_tmp5);
1936
1937 tcg_gen_shr_tl(t0, t0, t2);
1938 tcg_gen_subfi_tl(cpu_tmp5, data_bits, t2);
1939 tcg_gen_shl_tl(t1, t1, cpu_tmp5);
1940 tcg_gen_or_tl(t0, t0, t1);
1941
1942 } else {
1943 if (ot == OT_LONG)
1944 tcg_gen_ext32u_tl(t1, t1);
1945
1946 tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp5);
1947
1948 tcg_gen_shl_tl(t0, t0, t2);
1949 tcg_gen_subfi_tl(cpu_tmp5, data_bits, t2);
1950 tcg_gen_shr_tl(t1, t1, cpu_tmp5);
1951 tcg_gen_or_tl(t0, t0, t1);
1952 }
1953 }
1954 tcg_gen_mov_tl(t1, cpu_tmp4);
1955
1956 gen_set_label(label1);
1957 /* store */
1958 if (op1 == OR_TMP0) {
1959 gen_op_st_v(ot + s->mem_index, t0, a0);
1960 } else {
1961 gen_op_mov_reg_v(ot, op1, t0);
1962 }
1963
1964 /* update eflags */
1965 gen_update_cc_op(s);
1966
1967 label2 = gen_new_label();
1968 tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label2);
1969
1970 tcg_gen_mov_tl(cpu_cc_src, t1);
1971 tcg_gen_mov_tl(cpu_cc_dst, t0);
1972 if (is_right) {
1973 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SARB + ot);
1974 } else {
1975 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SHLB + ot);
1976 }
1977 gen_set_label(label2);
1978 set_cc_op(s, CC_OP_DYNAMIC); /* cannot predict flags after */
1979
1980 tcg_temp_free(t0);
1981 tcg_temp_free(t1);
1982 tcg_temp_free(t2);
1983 tcg_temp_free(a0);
1984 }
1985
1986 static void gen_shift(DisasContext *s1, int op, int ot, int d, int s)
1987 {
1988 if (s != OR_TMP1)
1989 gen_op_mov_TN_reg(ot, 1, s);
1990 switch(op) {
1991 case OP_ROL:
1992 gen_rot_rm_T1(s1, ot, d, 0);
1993 break;
1994 case OP_ROR:
1995 gen_rot_rm_T1(s1, ot, d, 1);
1996 break;
1997 case OP_SHL:
1998 case OP_SHL1:
1999 gen_shift_rm_T1(s1, ot, d, 0, 0);
2000 break;
2001 case OP_SHR:
2002 gen_shift_rm_T1(s1, ot, d, 1, 0);
2003 break;
2004 case OP_SAR:
2005 gen_shift_rm_T1(s1, ot, d, 1, 1);
2006 break;
2007 case OP_RCL:
2008 gen_rotc_rm_T1(s1, ot, d, 0);
2009 break;
2010 case OP_RCR:
2011 gen_rotc_rm_T1(s1, ot, d, 1);
2012 break;
2013 }
2014 }
2015
2016 static void gen_shifti(DisasContext *s1, int op, int ot, int d, int c)
2017 {
2018 switch(op) {
2019 case OP_ROL:
2020 gen_rot_rm_im(s1, ot, d, c, 0);
2021 break;
2022 case OP_ROR:
2023 gen_rot_rm_im(s1, ot, d, c, 1);
2024 break;
2025 case OP_SHL:
2026 case OP_SHL1:
2027 gen_shift_rm_im(s1, ot, d, c, 0, 0);
2028 break;
2029 case OP_SHR:
2030 gen_shift_rm_im(s1, ot, d, c, 1, 0);
2031 break;
2032 case OP_SAR:
2033 gen_shift_rm_im(s1, ot, d, c, 1, 1);
2034 break;
2035 default:
2036 /* currently not optimized */
2037 gen_op_movl_T1_im(c);
2038 gen_shift(s1, op, ot, d, OR_TMP1);
2039 break;
2040 }
2041 }
2042
2043 static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm,
2044 int *reg_ptr, int *offset_ptr)
2045 {
2046 target_long disp;
2047 int havesib;
2048 int base;
2049 int index;
2050 int scale;
2051 int opreg;
2052 int mod, rm, code, override, must_add_seg;
2053
2054 override = s->override;
2055 must_add_seg = s->addseg;
2056 if (override >= 0)
2057 must_add_seg = 1;
2058 mod = (modrm >> 6) & 3;
2059 rm = modrm & 7;
2060
2061 if (s->aflag) {
2062
2063 havesib = 0;
2064 base = rm;
2065 index = 0;
2066 scale = 0;
2067
2068 if (base == 4) {
2069 havesib = 1;
2070 code = cpu_ldub_code(env, s->pc++);
2071 scale = (code >> 6) & 3;
2072 index = ((code >> 3) & 7) | REX_X(s);
2073 base = (code & 7);
2074 }
2075 base |= REX_B(s);
2076
2077 switch (mod) {
2078 case 0:
2079 if ((base & 7) == 5) {
2080 base = -1;
2081 disp = (int32_t)cpu_ldl_code(env, s->pc);
2082 s->pc += 4;
2083 if (CODE64(s) && !havesib) {
2084 disp += s->pc + s->rip_offset;
2085 }
2086 } else {
2087 disp = 0;
2088 }
2089 break;
2090 case 1:
2091 disp = (int8_t)cpu_ldub_code(env, s->pc++);
2092 break;
2093 default:
2094 case 2:
2095 disp = (int32_t)cpu_ldl_code(env, s->pc);
2096 s->pc += 4;
2097 break;
2098 }
2099
2100 if (base >= 0) {
2101 /* for correct popl handling with esp */
2102 if (base == 4 && s->popl_esp_hack)
2103 disp += s->popl_esp_hack;
2104 #ifdef TARGET_X86_64
2105 if (s->aflag == 2) {
2106 gen_op_movq_A0_reg(base);
2107 if (disp != 0) {
2108 gen_op_addq_A0_im(disp);
2109 }
2110 } else
2111 #endif
2112 {
2113 gen_op_movl_A0_reg(base);
2114 if (disp != 0)
2115 gen_op_addl_A0_im(disp);
2116 }
2117 } else {
2118 #ifdef TARGET_X86_64
2119 if (s->aflag == 2) {
2120 gen_op_movq_A0_im(disp);
2121 } else
2122 #endif
2123 {
2124 gen_op_movl_A0_im(disp);
2125 }
2126 }
2127 /* index == 4 means no index */
2128 if (havesib && (index != 4)) {
2129 #ifdef TARGET_X86_64
2130 if (s->aflag == 2) {
2131 gen_op_addq_A0_reg_sN(scale, index);
2132 } else
2133 #endif
2134 {
2135 gen_op_addl_A0_reg_sN(scale, index);
2136 }
2137 }
2138 if (must_add_seg) {
2139 if (override < 0) {
2140 if (base == R_EBP || base == R_ESP)
2141 override = R_SS;
2142 else
2143 override = R_DS;
2144 }
2145 #ifdef TARGET_X86_64
2146 if (s->aflag == 2) {
2147 gen_op_addq_A0_seg(override);
2148 } else
2149 #endif
2150 {
2151 gen_op_addl_A0_seg(s, override);
2152 }
2153 }
2154 } else {
2155 switch (mod) {
2156 case 0:
2157 if (rm == 6) {
2158 disp = cpu_lduw_code(env, s->pc);
2159 s->pc += 2;
2160 gen_op_movl_A0_im(disp);
2161 rm = 0; /* avoid SS override */
2162 goto no_rm;
2163 } else {
2164 disp = 0;
2165 }
2166 break;
2167 case 1:
2168 disp = (int8_t)cpu_ldub_code(env, s->pc++);
2169 break;
2170 default:
2171 case 2:
2172 disp = cpu_lduw_code(env, s->pc);
2173 s->pc += 2;
2174 break;
2175 }
2176 switch(rm) {
2177 case 0:
2178 gen_op_movl_A0_reg(R_EBX);
2179 gen_op_addl_A0_reg_sN(0, R_ESI);
2180 break;
2181 case 1:
2182 gen_op_movl_A0_reg(R_EBX);
2183 gen_op_addl_A0_reg_sN(0, R_EDI);
2184 break;
2185 case 2:
2186 gen_op_movl_A0_reg(R_EBP);
2187 gen_op_addl_A0_reg_sN(0, R_ESI);
2188 break;
2189 case 3:
2190 gen_op_movl_A0_reg(R_EBP);
2191 gen_op_addl_A0_reg_sN(0, R_EDI);
2192 break;
2193 case 4:
2194 gen_op_movl_A0_reg(R_ESI);
2195 break;
2196 case 5:
2197 gen_op_movl_A0_reg(R_EDI);
2198 break;
2199 case 6:
2200 gen_op_movl_A0_reg(R_EBP);
2201 break;
2202 default:
2203 case 7:
2204 gen_op_movl_A0_reg(R_EBX);
2205 break;
2206 }
2207 if (disp != 0)
2208 gen_op_addl_A0_im(disp);
2209 gen_op_andl_A0_ffff();
2210 no_rm:
2211 if (must_add_seg) {
2212 if (override < 0) {
2213 if (rm == 2 || rm == 3 || rm == 6)
2214 override = R_SS;
2215 else
2216 override = R_DS;
2217 }
2218 gen_op_addl_A0_seg(s, override);
2219 }
2220 }
2221
2222 opreg = OR_A0;
2223 disp = 0;
2224 *reg_ptr = opreg;
2225 *offset_ptr = disp;
2226 }
2227
2228 static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm)
2229 {
2230 int mod, rm, base, code;
2231
2232 mod = (modrm >> 6) & 3;
2233 if (mod == 3)
2234 return;
2235 rm = modrm & 7;
2236
2237 if (s->aflag) {
2238
2239 base = rm;
2240
2241 if (base == 4) {
2242 code = cpu_ldub_code(env, s->pc++);
2243 base = (code & 7);
2244 }
2245
2246 switch (mod) {
2247 case 0:
2248 if (base == 5) {
2249 s->pc += 4;
2250 }
2251 break;
2252 case 1:
2253 s->pc++;
2254 break;
2255 default:
2256 case 2:
2257 s->pc += 4;
2258 break;
2259 }
2260 } else {
2261 switch (mod) {
2262 case 0:
2263 if (rm == 6) {
2264 s->pc += 2;
2265 }
2266 break;
2267 case 1:
2268 s->pc++;
2269 break;
2270 default:
2271 case 2:
2272 s->pc += 2;
2273 break;
2274 }
2275 }
2276 }
2277
2278 /* used for LEA and MOV AX, mem */
2279 static void gen_add_A0_ds_seg(DisasContext *s)
2280 {
2281 int override, must_add_seg;
2282 must_add_seg = s->addseg;
2283 override = R_DS;
2284 if (s->override >= 0) {
2285 override = s->override;
2286 must_add_seg = 1;
2287 }
2288 if (must_add_seg) {
2289 #ifdef TARGET_X86_64
2290 if (CODE64(s)) {
2291 gen_op_addq_A0_seg(override);
2292 } else
2293 #endif
2294 {
2295 gen_op_addl_A0_seg(s, override);
2296 }
2297 }
2298 }
2299
2300 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2301 OR_TMP0 */
2302 static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm,
2303 int ot, int reg, int is_store)
2304 {
2305 int mod, rm, opreg, disp;
2306
2307 mod = (modrm >> 6) & 3;
2308 rm = (modrm & 7) | REX_B(s);
2309 if (mod == 3) {
2310 if (is_store) {
2311 if (reg != OR_TMP0)
2312 gen_op_mov_TN_reg(ot, 0, reg);
2313 gen_op_mov_reg_T0(ot, rm);
2314 } else {
2315 gen_op_mov_TN_reg(ot, 0, rm);
2316 if (reg != OR_TMP0)
2317 gen_op_mov_reg_T0(ot, reg);
2318 }
2319 } else {
2320 gen_lea_modrm(env, s, modrm, &opreg, &disp);
2321 if (is_store) {
2322 if (reg != OR_TMP0)
2323 gen_op_mov_TN_reg(ot, 0, reg);
2324 gen_op_st_T0_A0(ot + s->mem_index);
2325 } else {
2326 gen_op_ld_T0_A0(ot + s->mem_index);
2327 if (reg != OR_TMP0)
2328 gen_op_mov_reg_T0(ot, reg);
2329 }
2330 }
2331 }
2332
2333 static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, int ot)
2334 {
2335 uint32_t ret;
2336
2337 switch(ot) {
2338 case OT_BYTE:
2339 ret = cpu_ldub_code(env, s->pc);
2340 s->pc++;
2341 break;
2342 case OT_WORD:
2343 ret = cpu_lduw_code(env, s->pc);
2344 s->pc += 2;
2345 break;
2346 default:
2347 case OT_LONG:
2348 ret = cpu_ldl_code(env, s->pc);
2349 s->pc += 4;
2350 break;
2351 }
2352 return ret;
2353 }
2354
2355 static inline int insn_const_size(unsigned int ot)
2356 {
2357 if (ot <= OT_LONG)
2358 return 1 << ot;
2359 else
2360 return 4;
2361 }
2362
2363 static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong eip)
2364 {
2365 TranslationBlock *tb;
2366 target_ulong pc;
2367
2368 pc = s->cs_base + eip;
2369 tb = s->tb;
2370 /* NOTE: we handle the case where the TB spans two pages here */
2371 if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) ||
2372 (pc & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK)) {
2373 /* jump to same page: we can use a direct jump */
2374 tcg_gen_goto_tb(tb_num);
2375 gen_jmp_im(eip);
2376 tcg_gen_exit_tb((tcg_target_long)tb + tb_num);
2377 } else {
2378 /* jump to another page: currently not optimized */
2379 gen_jmp_im(eip);
2380 gen_eob(s);
2381 }
2382 }
2383
2384 static inline void gen_jcc(DisasContext *s, int b,
2385 target_ulong val, target_ulong next_eip)
2386 {
2387 int l1, l2;
2388
2389 if (s->jmp_opt) {
2390 gen_update_cc_op(s);
2391 l1 = gen_new_label();
2392 gen_jcc1(s, b, l1);
2393 set_cc_op(s, CC_OP_DYNAMIC);
2394
2395 gen_goto_tb(s, 0, next_eip);
2396
2397 gen_set_label(l1);
2398 gen_goto_tb(s, 1, val);
2399 s->is_jmp = DISAS_TB_JUMP;
2400 } else {
2401 l1 = gen_new_label();
2402 l2 = gen_new_label();
2403 gen_jcc1(s, b, l1);
2404
2405 gen_jmp_im(next_eip);
2406 tcg_gen_br(l2);
2407
2408 gen_set_label(l1);
2409 gen_jmp_im(val);
2410 gen_set_label(l2);
2411 gen_eob(s);
2412 }
2413 }
2414
2415 static void gen_cmovcc1(CPUX86State *env, DisasContext *s, int ot, int b,
2416 int modrm, int reg)
2417 {
2418 CCPrepare cc;
2419
2420 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
2421
2422 cc = gen_prepare_cc(s, b, cpu_T[1]);
2423 if (cc.mask != -1) {
2424 TCGv t0 = tcg_temp_new();
2425 tcg_gen_andi_tl(t0, cc.reg, cc.mask);
2426 cc.reg = t0;
2427 }
2428 if (!cc.use_reg2) {
2429 cc.reg2 = tcg_const_tl(cc.imm);
2430 }
2431
2432 tcg_gen_movcond_tl(cc.cond, cpu_T[0], cc.reg, cc.reg2,
2433 cpu_T[0], cpu_regs[reg]);
2434 gen_op_mov_reg_T0(ot, reg);
2435
2436 if (cc.mask != -1) {
2437 tcg_temp_free(cc.reg);
2438 }
2439 if (!cc.use_reg2) {
2440 tcg_temp_free(cc.reg2);
2441 }
2442 }
2443
2444 static inline void gen_op_movl_T0_seg(int seg_reg)
2445 {
2446 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
2447 offsetof(CPUX86State,segs[seg_reg].selector));
2448 }
2449
2450 static inline void gen_op_movl_seg_T0_vm(int seg_reg)
2451 {
2452 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
2453 tcg_gen_st32_tl(cpu_T[0], cpu_env,
2454 offsetof(CPUX86State,segs[seg_reg].selector));
2455 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], 4);
2456 tcg_gen_st_tl(cpu_T[0], cpu_env,
2457 offsetof(CPUX86State,segs[seg_reg].base));
2458 }
2459
2460 /* move T0 to seg_reg and compute if the CPU state may change. Never
2461 call this function with seg_reg == R_CS */
2462 static void gen_movl_seg_T0(DisasContext *s, int seg_reg, target_ulong cur_eip)
2463 {
2464 if (s->pe && !s->vm86) {
2465 /* XXX: optimize by finding processor state dynamically */
2466 gen_update_cc_op(s);
2467 gen_jmp_im(cur_eip);
2468 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2469 gen_helper_load_seg(cpu_env, tcg_const_i32(seg_reg), cpu_tmp2_i32);
2470 /* abort translation because the addseg value may change or
2471 because ss32 may change. For R_SS, translation must always
2472 stop as a special handling must be done to disable hardware
2473 interrupts for the next instruction */
2474 if (seg_reg == R_SS || (s->code32 && seg_reg < R_FS))
2475 s->is_jmp = DISAS_TB_JUMP;
2476 } else {
2477 gen_op_movl_seg_T0_vm(seg_reg);
2478 if (seg_reg == R_SS)
2479 s->is_jmp = DISAS_TB_JUMP;
2480 }
2481 }
2482
2483 static inline int svm_is_rep(int prefixes)
2484 {
2485 return ((prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) ? 8 : 0);
2486 }
2487
2488 static inline void
2489 gen_svm_check_intercept_param(DisasContext *s, target_ulong pc_start,
2490 uint32_t type, uint64_t param)
2491 {
2492 /* no SVM activated; fast case */
2493 if (likely(!(s->flags & HF_SVMI_MASK)))
2494 return;
2495 gen_update_cc_op(s);
2496 gen_jmp_im(pc_start - s->cs_base);
2497 gen_helper_svm_check_intercept_param(cpu_env, tcg_const_i32(type),
2498 tcg_const_i64(param));
2499 }
2500
2501 static inline void
2502 gen_svm_check_intercept(DisasContext *s, target_ulong pc_start, uint64_t type)
2503 {
2504 gen_svm_check_intercept_param(s, pc_start, type, 0);
2505 }
2506
2507 static inline void gen_stack_update(DisasContext *s, int addend)
2508 {
2509 #ifdef TARGET_X86_64
2510 if (CODE64(s)) {
2511 gen_op_add_reg_im(2, R_ESP, addend);
2512 } else
2513 #endif
2514 if (s->ss32) {
2515 gen_op_add_reg_im(1, R_ESP, addend);
2516 } else {
2517 gen_op_add_reg_im(0, R_ESP, addend);
2518 }
2519 }
2520
2521 /* generate a push. It depends on ss32, addseg and dflag */
2522 static void gen_push_T0(DisasContext *s)
2523 {
2524 #ifdef TARGET_X86_64
2525 if (CODE64(s)) {
2526 gen_op_movq_A0_reg(R_ESP);
2527 if (s->dflag) {
2528 gen_op_addq_A0_im(-8);
2529 gen_op_st_T0_A0(OT_QUAD + s->mem_index);
2530 } else {
2531 gen_op_addq_A0_im(-2);
2532 gen_op_st_T0_A0(OT_WORD + s->mem_index);
2533 }
2534 gen_op_mov_reg_A0(2, R_ESP);
2535 } else
2536 #endif
2537 {
2538 gen_op_movl_A0_reg(R_ESP);
2539 if (!s->dflag)
2540 gen_op_addl_A0_im(-2);
2541 else
2542 gen_op_addl_A0_im(-4);
2543 if (s->ss32) {
2544 if (s->addseg) {
2545 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2546 gen_op_addl_A0_seg(s, R_SS);
2547 }
2548 } else {
2549 gen_op_andl_A0_ffff();
2550 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2551 gen_op_addl_A0_seg(s, R_SS);
2552 }
2553 gen_op_st_T0_A0(s->dflag + 1 + s->mem_index);
2554 if (s->ss32 && !s->addseg)
2555 gen_op_mov_reg_A0(1, R_ESP);
2556 else
2557 gen_op_mov_reg_T1(s->ss32 + 1, R_ESP);
2558 }
2559 }
2560
2561 /* generate a push. It depends on ss32, addseg and dflag */
2562 /* slower version for T1, only used for call Ev */
2563 static void gen_push_T1(DisasContext *s)
2564 {
2565 #ifdef TARGET_X86_64
2566 if (CODE64(s)) {
2567 gen_op_movq_A0_reg(R_ESP);
2568 if (s->dflag) {
2569 gen_op_addq_A0_im(-8);
2570 gen_op_st_T1_A0(OT_QUAD + s->mem_index);
2571 } else {
2572 gen_op_addq_A0_im(-2);
2573 gen_op_st_T0_A0(OT_WORD + s->mem_index);
2574 }
2575 gen_op_mov_reg_A0(2, R_ESP);
2576 } else
2577 #endif
2578 {
2579 gen_op_movl_A0_reg(R_ESP);
2580 if (!s->dflag)
2581 gen_op_addl_A0_im(-2);
2582 else
2583 gen_op_addl_A0_im(-4);
2584 if (s->ss32) {
2585 if (s->addseg) {
2586 gen_op_addl_A0_seg(s, R_SS);
2587 }
2588 } else {
2589 gen_op_andl_A0_ffff();
2590 gen_op_addl_A0_seg(s, R_SS);
2591 }
2592 gen_op_st_T1_A0(s->dflag + 1 + s->mem_index);
2593
2594 if (s->ss32 && !s->addseg)
2595 gen_op_mov_reg_A0(1, R_ESP);
2596 else
2597 gen_stack_update(s, (-2) << s->dflag);
2598 }
2599 }
2600
2601 /* two step pop is necessary for precise exceptions */
2602 static void gen_pop_T0(DisasContext *s)
2603 {
2604 #ifdef TARGET_X86_64
2605 if (CODE64(s)) {
2606 gen_op_movq_A0_reg(R_ESP);
2607 gen_op_ld_T0_A0((s->dflag ? OT_QUAD : OT_WORD) + s->mem_index);
2608 } else
2609 #endif
2610 {
2611 gen_op_movl_A0_reg(R_ESP);
2612 if (s->ss32) {
2613 if (s->addseg)
2614 gen_op_addl_A0_seg(s, R_SS);
2615 } else {
2616 gen_op_andl_A0_ffff();
2617 gen_op_addl_A0_seg(s, R_SS);
2618 }
2619 gen_op_ld_T0_A0(s->dflag + 1 + s->mem_index);
2620 }
2621 }
2622
2623 static void gen_pop_update(DisasContext *s)
2624 {
2625 #ifdef TARGET_X86_64
2626 if (CODE64(s) && s->dflag) {
2627 gen_stack_update(s, 8);
2628 } else
2629 #endif
2630 {
2631 gen_stack_update(s, 2 << s->dflag);
2632 }
2633 }
2634
2635 static void gen_stack_A0(DisasContext *s)
2636 {
2637 gen_op_movl_A0_reg(R_ESP);
2638 if (!s->ss32)
2639 gen_op_andl_A0_ffff();
2640 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2641 if (s->addseg)
2642 gen_op_addl_A0_seg(s, R_SS);
2643 }
2644
2645 /* NOTE: wrap around in 16 bit not fully handled */
2646 static void gen_pusha(DisasContext *s)
2647 {
2648 int i;
2649 gen_op_movl_A0_reg(R_ESP);
2650 gen_op_addl_A0_im(-16 << s->dflag);
2651 if (!s->ss32)
2652 gen_op_andl_A0_ffff();
2653 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2654 if (s->addseg)
2655 gen_op_addl_A0_seg(s, R_SS);
2656 for(i = 0;i < 8; i++) {
2657 gen_op_mov_TN_reg(OT_LONG, 0, 7 - i);
2658 gen_op_st_T0_A0(OT_WORD + s->dflag + s->mem_index);
2659 gen_op_addl_A0_im(2 << s->dflag);
2660 }
2661 gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2662 }
2663
2664 /* NOTE: wrap around in 16 bit not fully handled */
2665 static void gen_popa(DisasContext *s)
2666 {
2667 int i;
2668 gen_op_movl_A0_reg(R_ESP);
2669 if (!s->ss32)
2670 gen_op_andl_A0_ffff();
2671 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2672 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], 16 << s->dflag);
2673 if (s->addseg)
2674 gen_op_addl_A0_seg(s, R_SS);
2675 for(i = 0;i < 8; i++) {
2676 /* ESP is not reloaded */
2677 if (i != 3) {
2678 gen_op_ld_T0_A0(OT_WORD + s->dflag + s->mem_index);
2679 gen_op_mov_reg_T0(OT_WORD + s->dflag, 7 - i);
2680 }
2681 gen_op_addl_A0_im(2 << s->dflag);
2682 }
2683 gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2684 }
2685
2686 static void gen_enter(DisasContext *s, int esp_addend, int level)
2687 {
2688 int ot, opsize;
2689
2690 level &= 0x1f;
2691 #ifdef TARGET_X86_64
2692 if (CODE64(s)) {
2693 ot = s->dflag ? OT_QUAD : OT_WORD;
2694 opsize = 1 << ot;
2695
2696 gen_op_movl_A0_reg(R_ESP);
2697 gen_op_addq_A0_im(-opsize);
2698 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2699
2700 /* push bp */
2701 gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
2702 gen_op_st_T0_A0(ot + s->mem_index);
2703 if (level) {
2704 /* XXX: must save state */
2705 gen_helper_enter64_level(cpu_env, tcg_const_i32(level),
2706 tcg_const_i32((ot == OT_QUAD)),
2707 cpu_T[1]);
2708 }
2709 gen_op_mov_reg_T1(ot, R_EBP);
2710 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
2711 gen_op_mov_reg_T1(OT_QUAD, R_ESP);
2712 } else
2713 #endif
2714 {
2715 ot = s->dflag + OT_WORD;
2716 opsize = 2 << s->dflag;
2717
2718 gen_op_movl_A0_reg(R_ESP);
2719 gen_op_addl_A0_im(-opsize);
2720 if (!s->ss32)
2721 gen_op_andl_A0_ffff();
2722 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2723 if (s->addseg)
2724 gen_op_addl_A0_seg(s, R_SS);
2725 /* push bp */
2726 gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
2727 gen_op_st_T0_A0(ot + s->mem_index);
2728 if (level) {
2729 /* XXX: must save state */
2730 gen_helper_enter_level(cpu_env, tcg_const_i32(level),
2731 tcg_const_i32(s->dflag),
2732 cpu_T[1]);
2733 }
2734 gen_op_mov_reg_T1(ot, R_EBP);
2735 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
2736 gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2737 }
2738 }
2739
2740 static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip)
2741 {
2742 gen_update_cc_op(s);
2743 gen_jmp_im(cur_eip);
2744 gen_helper_raise_exception(cpu_env, tcg_const_i32(trapno));
2745 s->is_jmp = DISAS_TB_JUMP;
2746 }
2747
2748 /* an interrupt is different from an exception because of the
2749 privilege checks */
2750 static void gen_interrupt(DisasContext *s, int intno,
2751 target_ulong cur_eip, target_ulong next_eip)
2752 {
2753 gen_update_cc_op(s);
2754 gen_jmp_im(cur_eip);
2755 gen_helper_raise_interrupt(cpu_env, tcg_const_i32(intno),
2756 tcg_const_i32(next_eip - cur_eip));
2757 s->is_jmp = DISAS_TB_JUMP;
2758 }
2759
2760 static void gen_debug(DisasContext *s, target_ulong cur_eip)
2761 {
2762 gen_update_cc_op(s);
2763 gen_jmp_im(cur_eip);
2764 gen_helper_debug(cpu_env);
2765 s->is_jmp = DISAS_TB_JUMP;
2766 }
2767
2768 /* generate a generic end of block. Trace exception is also generated
2769 if needed */
2770 static void gen_eob(DisasContext *s)
2771 {
2772 gen_update_cc_op(s);
2773 if (s->tb->flags & HF_INHIBIT_IRQ_MASK) {
2774 gen_helper_reset_inhibit_irq(cpu_env);
2775 }
2776 if (s->tb->flags & HF_RF_MASK) {
2777 gen_helper_reset_rf(cpu_env);
2778 }
2779 if (s->singlestep_enabled) {
2780 gen_helper_debug(cpu_env);
2781 } else if (s->tf) {
2782 gen_helper_single_step(cpu_env);
2783 } else {
2784 tcg_gen_exit_tb(0);
2785 }
2786 s->is_jmp = DISAS_TB_JUMP;
2787 }
2788
2789 /* generate a jump to eip. No segment change must happen before as a
2790 direct call to the next block may occur */
2791 static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num)
2792 {
2793 if (s->jmp_opt) {
2794 gen_update_cc_op(s);
2795 gen_goto_tb(s, tb_num, eip);
2796 s->is_jmp = DISAS_TB_JUMP;
2797 } else {
2798 gen_jmp_im(eip);
2799 gen_eob(s);
2800 }
2801 }
2802
2803 static void gen_jmp(DisasContext *s, target_ulong eip)
2804 {
2805 gen_jmp_tb(s, eip, 0);
2806 }
2807
2808 static inline void gen_ldq_env_A0(int idx, int offset)
2809 {
2810 int mem_index = (idx >> 2) - 1;
2811 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, mem_index);
2812 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset);
2813 }
2814
2815 static inline void gen_stq_env_A0(int idx, int offset)
2816 {
2817 int mem_index = (idx >> 2) - 1;
2818 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset);
2819 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, mem_index);
2820 }
2821
2822 static inline void gen_ldo_env_A0(int idx, int offset)
2823 {
2824 int mem_index = (idx >> 2) - 1;
2825 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, mem_index);
2826 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
2827 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
2828 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_tmp0, mem_index);
2829 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
2830 }
2831
2832 static inline void gen_sto_env_A0(int idx, int offset)
2833 {
2834 int mem_index = (idx >> 2) - 1;
2835 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
2836 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, mem_index);
2837 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
2838 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
2839 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_tmp0, mem_index);
2840 }
2841
2842 static inline void gen_op_movo(int d_offset, int s_offset)
2843 {
2844 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2845 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2846 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + 8);
2847 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + 8);
2848 }
2849
2850 static inline void gen_op_movq(int d_offset, int s_offset)
2851 {
2852 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2853 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2854 }
2855
2856 static inline void gen_op_movl(int d_offset, int s_offset)
2857 {
2858 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env, s_offset);
2859 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, d_offset);
2860 }
2861
2862 static inline void gen_op_movq_env_0(int d_offset)
2863 {
2864 tcg_gen_movi_i64(cpu_tmp1_i64, 0);
2865 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2866 }
2867
2868 typedef void (*SSEFunc_i_ep)(TCGv_i32 val, TCGv_ptr env, TCGv_ptr reg);
2869 typedef void (*SSEFunc_l_ep)(TCGv_i64 val, TCGv_ptr env, TCGv_ptr reg);
2870 typedef void (*SSEFunc_0_epi)(TCGv_ptr env, TCGv_ptr reg, TCGv_i32 val);
2871 typedef void (*SSEFunc_0_epl)(TCGv_ptr env, TCGv_ptr reg, TCGv_i64 val);
2872 typedef void (*SSEFunc_0_epp)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b);
2873 typedef void (*SSEFunc_0_eppi)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
2874 TCGv_i32 val);
2875 typedef void (*SSEFunc_0_ppi)(TCGv_ptr reg_a, TCGv_ptr reg_b, TCGv_i32 val);
2876 typedef void (*SSEFunc_0_eppt)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
2877 TCGv val);
2878
2879 #define SSE_SPECIAL ((void *)1)
2880 #define SSE_DUMMY ((void *)2)
2881
2882 #define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
2883 #define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
2884 gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
2885
2886 static const SSEFunc_0_epp sse_op_table1[256][4] = {
2887 /* 3DNow! extensions */
2888 [0x0e] = { SSE_DUMMY }, /* femms */
2889 [0x0f] = { SSE_DUMMY }, /* pf... */
2890 /* pure SSE operations */
2891 [0x10] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2892 [0x11] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2893 [0x12] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd, movsldup, movddup */
2894 [0x13] = { SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd */
2895 [0x14] = { gen_helper_punpckldq_xmm, gen_helper_punpcklqdq_xmm },
2896 [0x15] = { gen_helper_punpckhdq_xmm, gen_helper_punpckhqdq_xmm },
2897 [0x16] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd, movshdup */
2898 [0x17] = { SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd */
2899
2900 [0x28] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2901 [0x29] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2902 [0x2a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
2903 [0x2b] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movntps, movntpd, movntss, movntsd */
2904 [0x2c] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
2905 [0x2d] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
2906 [0x2e] = { gen_helper_ucomiss, gen_helper_ucomisd },
2907 [0x2f] = { gen_helper_comiss, gen_helper_comisd },
2908 [0x50] = { SSE_SPECIAL, SSE_SPECIAL }, /* movmskps, movmskpd */
2909 [0x51] = SSE_FOP(sqrt),
2910 [0x52] = { gen_helper_rsqrtps, NULL, gen_helper_rsqrtss, NULL },
2911 [0x53] = { gen_helper_rcpps, NULL, gen_helper_rcpss, NULL },
2912 [0x54] = { gen_helper_pand_xmm, gen_helper_pand_xmm }, /* andps, andpd */
2913 [0x55] = { gen_helper_pandn_xmm, gen_helper_pandn_xmm }, /* andnps, andnpd */
2914 [0x56] = { gen_helper_por_xmm, gen_helper_por_xmm }, /* orps, orpd */
2915 [0x57] = { gen_helper_pxor_xmm, gen_helper_pxor_xmm }, /* xorps, xorpd */
2916 [0x58] = SSE_FOP(add),
2917 [0x59] = SSE_FOP(mul),
2918 [0x5a] = { gen_helper_cvtps2pd, gen_helper_cvtpd2ps,
2919 gen_helper_cvtss2sd, gen_helper_cvtsd2ss },
2920 [0x5b] = { gen_helper_cvtdq2ps, gen_helper_cvtps2dq, gen_helper_cvttps2dq },
2921 [0x5c] = SSE_FOP(sub),
2922 [0x5d] = SSE_FOP(min),
2923 [0x5e] = SSE_FOP(div),
2924 [0x5f] = SSE_FOP(max),
2925
2926 [0xc2] = SSE_FOP(cmpeq),
2927 [0xc6] = { (SSEFunc_0_epp)gen_helper_shufps,
2928 (SSEFunc_0_epp)gen_helper_shufpd }, /* XXX: casts */
2929
2930 [0x38] = { SSE_SPECIAL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* SSSE3/SSE4 */
2931 [0x3a] = { SSE_SPECIAL, SSE_SPECIAL }, /* SSSE3/SSE4 */
2932
2933 /* MMX ops and their SSE extensions */
2934 [0x60] = MMX_OP2(punpcklbw),
2935 [0x61] = MMX_OP2(punpcklwd),
2936 [0x62] = MMX_OP2(punpckldq),
2937 [0x63] = MMX_OP2(packsswb),
2938 [0x64] = MMX_OP2(pcmpgtb),
2939 [0x65] = MMX_OP2(pcmpgtw),
2940 [0x66] = MMX_OP2(pcmpgtl),
2941 [0x67] = MMX_OP2(packuswb),
2942 [0x68] = MMX_OP2(punpckhbw),
2943 [0x69] = MMX_OP2(punpckhwd),
2944 [0x6a] = MMX_OP2(punpckhdq),
2945 [0x6b] = MMX_OP2(packssdw),
2946 [0x6c] = { NULL, gen_helper_punpcklqdq_xmm },
2947 [0x6d] = { NULL, gen_helper_punpckhqdq_xmm },
2948 [0x6e] = { SSE_SPECIAL, SSE_SPECIAL }, /* movd mm, ea */
2949 [0x6f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, , movqdu */
2950 [0x70] = { (SSEFunc_0_epp)gen_helper_pshufw_mmx,
2951 (SSEFunc_0_epp)gen_helper_pshufd_xmm,
2952 (SSEFunc_0_epp)gen_helper_pshufhw_xmm,
2953 (SSEFunc_0_epp)gen_helper_pshuflw_xmm }, /* XXX: casts */
2954 [0x71] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftw */
2955 [0x72] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftd */
2956 [0x73] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftq */
2957 [0x74] = MMX_OP2(pcmpeqb),
2958 [0x75] = MMX_OP2(pcmpeqw),
2959 [0x76] = MMX_OP2(pcmpeql),
2960 [0x77] = { SSE_DUMMY }, /* emms */
2961 [0x78] = { NULL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* extrq_i, insertq_i */
2962 [0x79] = { NULL, gen_helper_extrq_r, NULL, gen_helper_insertq_r },
2963 [0x7c] = { NULL, gen_helper_haddpd, NULL, gen_helper_haddps },
2964 [0x7d] = { NULL, gen_helper_hsubpd, NULL, gen_helper_hsubps },
2965 [0x7e] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movd, movd, , movq */
2966 [0x7f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, movdqu */
2967 [0xc4] = { SSE_SPECIAL, SSE_SPECIAL }, /* pinsrw */
2968 [0xc5] = { SSE_SPECIAL, SSE_SPECIAL }, /* pextrw */
2969 [0xd0] = { NULL, gen_helper_addsubpd, NULL, gen_helper_addsubps },
2970 [0xd1] = MMX_OP2(psrlw),
2971 [0xd2] = MMX_OP2(psrld),
2972 [0xd3] = MMX_OP2(psrlq),
2973 [0xd4] = MMX_OP2(paddq),
2974 [0xd5] = MMX_OP2(pmullw),
2975 [0xd6] = { NULL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2976 [0xd7] = { SSE_SPECIAL, SSE_SPECIAL }, /* pmovmskb */
2977 [0xd8] = MMX_OP2(psubusb),
2978 [0xd9] = MMX_OP2(psubusw),
2979 [0xda] = MMX_OP2(pminub),
2980 [0xdb] = MMX_OP2(pand),
2981 [0xdc] = MMX_OP2(paddusb),
2982 [0xdd] = MMX_OP2(paddusw),
2983 [0xde] = MMX_OP2(pmaxub),
2984 [0xdf] = MMX_OP2(pandn),
2985 [0xe0] = MMX_OP2(pavgb),
2986 [0xe1] = MMX_OP2(psraw),
2987 [0xe2] = MMX_OP2(psrad),
2988 [0xe3] = MMX_OP2(pavgw),
2989 [0xe4] = MMX_OP2(pmulhuw),
2990 [0xe5] = MMX_OP2(pmulhw),
2991 [0xe6] = { NULL, gen_helper_cvttpd2dq, gen_helper_cvtdq2pd, gen_helper_cvtpd2dq },
2992 [0xe7] = { SSE_SPECIAL , SSE_SPECIAL }, /* movntq, movntq */
2993 [0xe8] = MMX_OP2(psubsb),
2994 [0xe9] = MMX_OP2(psubsw),
2995 [0xea] = MMX_OP2(pminsw),
2996 [0xeb] = MMX_OP2(por),
2997 [0xec] = MMX_OP2(paddsb),
2998 [0xed] = MMX_OP2(paddsw),
2999 [0xee] = MMX_OP2(pmaxsw),
3000 [0xef] = MMX_OP2(pxor),
3001 [0xf0] = { NULL, NULL, NULL, SSE_SPECIAL }, /* lddqu */
3002 [0xf1] = MMX_OP2(psllw),
3003 [0xf2] = MMX_OP2(pslld),
3004 [0xf3] = MMX_OP2(psllq),
3005 [0xf4] = MMX_OP2(pmuludq),
3006 [0xf5] = MMX_OP2(pmaddwd),
3007 [0xf6] = MMX_OP2(psadbw),
3008 [0xf7] = { (SSEFunc_0_epp)gen_helper_maskmov_mmx,
3009 (SSEFunc_0_epp)gen_helper_maskmov_xmm }, /* XXX: casts */
3010 [0xf8] = MMX_OP2(psubb),
3011 [0xf9] = MMX_OP2(psubw),
3012 [0xfa] = MMX_OP2(psubl),
3013 [0xfb] = MMX_OP2(psubq),
3014 [0xfc] = MMX_OP2(paddb),
3015 [0xfd] = MMX_OP2(paddw),
3016 [0xfe] = MMX_OP2(paddl),
3017 };
3018
3019 static const SSEFunc_0_epp sse_op_table2[3 * 8][2] = {
3020 [0 + 2] = MMX_OP2(psrlw),
3021 [0 + 4] = MMX_OP2(psraw),
3022 [0 + 6] = MMX_OP2(psllw),
3023 [8 + 2] = MMX_OP2(psrld),
3024 [8 + 4] = MMX_OP2(psrad),
3025 [8 + 6] = MMX_OP2(pslld),
3026 [16 + 2] = MMX_OP2(psrlq),
3027 [16 + 3] = { NULL, gen_helper_psrldq_xmm },
3028 [16 + 6] = MMX_OP2(psllq),
3029 [16 + 7] = { NULL, gen_helper_pslldq_xmm },
3030 };
3031
3032 static const SSEFunc_0_epi sse_op_table3ai[] = {
3033 gen_helper_cvtsi2ss,
3034 gen_helper_cvtsi2sd
3035 };
3036
3037 #ifdef TARGET_X86_64
3038 static const SSEFunc_0_epl sse_op_table3aq[] = {
3039 gen_helper_cvtsq2ss,
3040 gen_helper_cvtsq2sd
3041 };
3042 #endif
3043
3044 static const SSEFunc_i_ep sse_op_table3bi[] = {
3045 gen_helper_cvttss2si,
3046 gen_helper_cvtss2si,
3047 gen_helper_cvttsd2si,
3048 gen_helper_cvtsd2si
3049 };
3050
3051 #ifdef TARGET_X86_64
3052 static const SSEFunc_l_ep sse_op_table3bq[] = {
3053 gen_helper_cvttss2sq,
3054 gen_helper_cvtss2sq,
3055 gen_helper_cvttsd2sq,
3056 gen_helper_cvtsd2sq
3057 };
3058 #endif
3059
3060 static const SSEFunc_0_epp sse_op_table4[8][4] = {
3061 SSE_FOP(cmpeq),
3062 SSE_FOP(cmplt),
3063 SSE_FOP(cmple),
3064 SSE_FOP(cmpunord),
3065 SSE_FOP(cmpneq),
3066 SSE_FOP(cmpnlt),
3067 SSE_FOP(cmpnle),
3068 SSE_FOP(cmpord),
3069 };
3070
3071 static const SSEFunc_0_epp sse_op_table5[256] = {
3072 [0x0c] = gen_helper_pi2fw,
3073 [0x0d] = gen_helper_pi2fd,
3074 [0x1c] = gen_helper_pf2iw,
3075 [0x1d] = gen_helper_pf2id,
3076 [0x8a] = gen_helper_pfnacc,
3077 [0x8e] = gen_helper_pfpnacc,
3078 [0x90] = gen_helper_pfcmpge,
3079 [0x94] = gen_helper_pfmin,
3080 [0x96] = gen_helper_pfrcp,
3081 [0x97] = gen_helper_pfrsqrt,
3082 [0x9a] = gen_helper_pfsub,
3083 [0x9e] = gen_helper_pfadd,
3084 [0xa0] = gen_helper_pfcmpgt,
3085 [0xa4] = gen_helper_pfmax,
3086 [0xa6] = gen_helper_movq, /* pfrcpit1; no need to actually increase precision */
3087 [0xa7] = gen_helper_movq, /* pfrsqit1 */
3088 [0xaa] = gen_helper_pfsubr,
3089 [0xae] = gen_helper_pfacc,
3090 [0xb0] = gen_helper_pfcmpeq,
3091 [0xb4] = gen_helper_pfmul,
3092 [0xb6] = gen_helper_movq, /* pfrcpit2 */
3093 [0xb7] = gen_helper_pmulhrw_mmx,
3094 [0xbb] = gen_helper_pswapd,
3095 [0xbf] = gen_helper_pavgb_mmx /* pavgusb */
3096 };
3097
3098 struct SSEOpHelper_epp {
3099 SSEFunc_0_epp op[2];
3100 uint32_t ext_mask;
3101 };
3102
3103 struct SSEOpHelper_eppi {
3104 SSEFunc_0_eppi op[2];
3105 uint32_t ext_mask;
3106 };
3107
3108 #define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
3109 #define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
3110 #define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
3111 #define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
3112
3113 static const struct SSEOpHelper_epp sse_op_table6[256] = {
3114 [0x00] = SSSE3_OP(pshufb),
3115 [0x01] = SSSE3_OP(phaddw),
3116 [0x02] = SSSE3_OP(phaddd),
3117 [0x03] = SSSE3_OP(phaddsw),
3118 [0x04] = SSSE3_OP(pmaddubsw),
3119 [0x05] = SSSE3_OP(phsubw),
3120 [0x06] = SSSE3_OP(phsubd),
3121 [0x07] = SSSE3_OP(phsubsw),
3122 [0x08] = SSSE3_OP(psignb),
3123 [0x09] = SSSE3_OP(psignw),
3124 [0x0a] = SSSE3_OP(psignd),
3125 [0x0b] = SSSE3_OP(pmulhrsw),
3126 [0x10] = SSE41_OP(pblendvb),
3127 [0x14] = SSE41_OP(blendvps),
3128 [0x15] = SSE41_OP(blendvpd),
3129 [0x17] = SSE41_OP(ptest),
3130 [0x1c] = SSSE3_OP(pabsb),
3131 [0x1d] = SSSE3_OP(pabsw),
3132 [0x1e] = SSSE3_OP(pabsd),
3133 [0x20] = SSE41_OP(pmovsxbw),
3134 [0x21] = SSE41_OP(pmovsxbd),
3135 [0x22] = SSE41_OP(pmovsxbq),
3136 [0x23] = SSE41_OP(pmovsxwd),
3137 [0x24] = SSE41_OP(pmovsxwq),
3138 [0x25] = SSE41_OP(pmovsxdq),
3139 [0x28] = SSE41_OP(pmuldq),
3140 [0x29] = SSE41_OP(pcmpeqq),
3141 [0x2a] = SSE41_SPECIAL, /* movntqda */
3142 [0x2b] = SSE41_OP(packusdw),
3143 [0x30] = SSE41_OP(pmovzxbw),
3144 [0x31] = SSE41_OP(pmovzxbd),
3145 [0x32] = SSE41_OP(pmovzxbq),
3146 [0x33] = SSE41_OP(pmovzxwd),
3147 [0x34] = SSE41_OP(pmovzxwq),
3148 [0x35] = SSE41_OP(pmovzxdq),
3149 [0x37] = SSE42_OP(pcmpgtq),
3150 [0x38] = SSE41_OP(pminsb),
3151 [0x39] = SSE41_OP(pminsd),
3152 [0x3a] = SSE41_OP(pminuw),
3153 [0x3b] = SSE41_OP(pminud),
3154 [0x3c] = SSE41_OP(pmaxsb),
3155 [0x3d] = SSE41_OP(pmaxsd),
3156 [0x3e] = SSE41_OP(pmaxuw),
3157 [0x3f] = SSE41_OP(pmaxud),
3158 [0x40] = SSE41_OP(pmulld),
3159 [0x41] = SSE41_OP(phminposuw),
3160 };
3161
3162 static const struct SSEOpHelper_eppi sse_op_table7[256] = {
3163 [0x08] = SSE41_OP(roundps),
3164 [0x09] = SSE41_OP(roundpd),
3165 [0x0a] = SSE41_OP(roundss),
3166 [0x0b] = SSE41_OP(roundsd),
3167 [0x0c] = SSE41_OP(blendps),
3168 [0x0d] = SSE41_OP(blendpd),
3169 [0x0e] = SSE41_OP(pblendw),
3170 [0x0f] = SSSE3_OP(palignr),
3171 [0x14] = SSE41_SPECIAL, /* pextrb */
3172 [0x15] = SSE41_SPECIAL, /* pextrw */
3173 [0x16] = SSE41_SPECIAL, /* pextrd/pextrq */
3174 [0x17] = SSE41_SPECIAL, /* extractps */
3175 [0x20] = SSE41_SPECIAL, /* pinsrb */
3176 [0x21] = SSE41_SPECIAL, /* insertps */
3177 [0x22] = SSE41_SPECIAL, /* pinsrd/pinsrq */
3178 [0x40] = SSE41_OP(dpps),
3179 [0x41] = SSE41_OP(dppd),
3180 [0x42] = SSE41_OP(mpsadbw),
3181 [0x60] = SSE42_OP(pcmpestrm),
3182 [0x61] = SSE42_OP(pcmpestri),
3183 [0x62] = SSE42_OP(pcmpistrm),
3184 [0x63] = SSE42_OP(pcmpistri),
3185 };
3186
3187 static void gen_sse(CPUX86State *env, DisasContext *s, int b,
3188 target_ulong pc_start, int rex_r)
3189 {
3190 int b1, op1_offset, op2_offset, is_xmm, val, ot;
3191 int modrm, mod, rm, reg, reg_addr, offset_addr;
3192 SSEFunc_0_epp sse_fn_epp;
3193 SSEFunc_0_eppi sse_fn_eppi;
3194 SSEFunc_0_ppi sse_fn_ppi;
3195 SSEFunc_0_eppt sse_fn_eppt;
3196
3197 b &= 0xff;
3198 if (s->prefix & PREFIX_DATA)
3199 b1 = 1;
3200 else if (s->prefix & PREFIX_REPZ)
3201 b1 = 2;
3202 else if (s->prefix & PREFIX_REPNZ)
3203 b1 = 3;
3204 else
3205 b1 = 0;
3206 sse_fn_epp = sse_op_table1[b][b1];
3207 if (!sse_fn_epp) {
3208 goto illegal_op;
3209 }
3210 if ((b <= 0x5f && b >= 0x10) || b == 0xc6 || b == 0xc2) {
3211 is_xmm = 1;
3212 } else {
3213 if (b1 == 0) {
3214 /* MMX case */
3215 is_xmm = 0;
3216 } else {
3217 is_xmm = 1;
3218 }
3219 }
3220 /* simple MMX/SSE operation */
3221 if (s->flags & HF_TS_MASK) {
3222 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
3223 return;
3224 }
3225 if (s->flags & HF_EM_MASK) {
3226 illegal_op:
3227 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
3228 return;
3229 }
3230 if (is_xmm && !(s->flags & HF_OSFXSR_MASK))
3231 if ((b != 0x38 && b != 0x3a) || (s->prefix & PREFIX_DATA))
3232 goto illegal_op;
3233 if (b == 0x0e) {
3234 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
3235 goto illegal_op;
3236 /* femms */
3237 gen_helper_emms(cpu_env);
3238 return;
3239 }
3240 if (b == 0x77) {
3241 /* emms */
3242 gen_helper_emms(cpu_env);
3243 return;
3244 }
3245 /* prepare MMX state (XXX: optimize by storing fptt and fptags in
3246 the static cpu state) */
3247 if (!is_xmm) {
3248 gen_helper_enter_mmx(cpu_env);
3249 }
3250
3251 modrm = cpu_ldub_code(env, s->pc++);
3252 reg = ((modrm >> 3) & 7);
3253 if (is_xmm)
3254 reg |= rex_r;
3255 mod = (modrm >> 6) & 3;
3256 if (sse_fn_epp == SSE_SPECIAL) {
3257 b |= (b1 << 8);
3258 switch(b) {
3259 case 0x0e7: /* movntq */
3260 if (mod == 3)
3261 goto illegal_op;
3262 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3263 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
3264 break;
3265 case 0x1e7: /* movntdq */
3266 case 0x02b: /* movntps */
3267 case 0x12b: /* movntps */
3268 if (mod == 3)
3269 goto illegal_op;
3270 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3271 gen_sto_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3272 break;
3273 case 0x3f0: /* lddqu */
3274 if (mod == 3)
3275 goto illegal_op;
3276 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3277 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3278 break;
3279 case 0x22b: /* movntss */
3280 case 0x32b: /* movntsd */
3281 if (mod == 3)
3282 goto illegal_op;
3283 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3284 if (b1 & 1) {
3285 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,
3286 xmm_regs[reg]));
3287 } else {
3288 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3289 xmm_regs[reg].XMM_L(0)));
3290 gen_op_st_T0_A0(OT_LONG + s->mem_index);
3291 }
3292 break;
3293 case 0x6e: /* movd mm, ea */
3294 #ifdef TARGET_X86_64
3295 if (s->dflag == 2) {
3296 gen_ldst_modrm(env, s, modrm, OT_QUAD, OR_TMP0, 0);
3297 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,fpregs[reg].mmx));
3298 } else
3299 #endif
3300 {
3301 gen_ldst_modrm(env, s, modrm, OT_LONG, OR_TMP0, 0);
3302 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3303 offsetof(CPUX86State,fpregs[reg].mmx));
3304 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3305 gen_helper_movl_mm_T0_mmx(cpu_ptr0, cpu_tmp2_i32);
3306 }
3307 break;
3308 case 0x16e: /* movd xmm, ea */
3309 #ifdef TARGET_X86_64
3310 if (s->dflag == 2) {
3311 gen_ldst_modrm(env, s, modrm, OT_QUAD, OR_TMP0, 0);
3312 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3313 offsetof(CPUX86State,xmm_regs[reg]));
3314 gen_helper_movq_mm_T0_xmm(cpu_ptr0, cpu_T[0]);
3315 } else
3316 #endif
3317 {
3318 gen_ldst_modrm(env, s, modrm, OT_LONG, OR_TMP0, 0);
3319 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3320 offsetof(CPUX86State,xmm_regs[reg]));
3321 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3322 gen_helper_movl_mm_T0_xmm(cpu_ptr0, cpu_tmp2_i32);
3323 }
3324 break;
3325 case 0x6f: /* movq mm, ea */
3326 if (mod != 3) {
3327 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3328 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
3329 } else {
3330 rm = (modrm & 7);
3331 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
3332 offsetof(CPUX86State,fpregs[rm].mmx));
3333 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
3334 offsetof(CPUX86State,fpregs[reg].mmx));
3335 }
3336 break;
3337 case 0x010: /* movups */
3338 case 0x110: /* movupd */
3339 case 0x028: /* movaps */
3340 case 0x128: /* movapd */
3341 case 0x16f: /* movdqa xmm, ea */
3342 case 0x26f: /* movdqu xmm, ea */
3343 if (mod != 3) {
3344 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3345 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3346 } else {
3347 rm = (modrm & 7) | REX_B(s);
3348 gen_op_movo(offsetof(CPUX86State,xmm_regs[reg]),
3349 offsetof(CPUX86State,xmm_regs[rm]));
3350 }
3351 break;
3352 case 0x210: /* movss xmm, ea */
3353 if (mod != 3) {
3354 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3355 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
3356 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3357 gen_op_movl_T0_0();
3358 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
3359 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3360 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3361 } else {
3362 rm = (modrm & 7) | REX_B(s);
3363 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3364 offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
3365 }
3366 break;
3367 case 0x310: /* movsd xmm, ea */
3368 if (mod != 3) {
3369 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3370 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3371 gen_op_movl_T0_0();
3372 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3373 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3374 } else {
3375 rm = (modrm & 7) | REX_B(s);
3376 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3377 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3378 }
3379 break;
3380 case 0x012: /* movlps */
3381 case 0x112: /* movlpd */
3382 if (mod != 3) {
3383 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3384 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3385 } else {
3386 /* movhlps */
3387 rm = (modrm & 7) | REX_B(s);
3388 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3389 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
3390 }
3391 break;
3392 case 0x212: /* movsldup */
3393 if (mod != 3) {
3394 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3395 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3396 } else {
3397 rm = (modrm & 7) | REX_B(s);
3398 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3399 offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
3400 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
3401 offsetof(CPUX86State,xmm_regs[rm].XMM_L(2)));
3402 }
3403 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
3404 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3405 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
3406 offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3407 break;
3408 case 0x312: /* movddup */
3409 if (mod != 3) {
3410 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3411 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3412 } else {
3413 rm = (modrm & 7) | REX_B(s);
3414 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3415 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3416 }
3417 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
3418 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3419 break;
3420 case 0x016: /* movhps */
3421 case 0x116: /* movhpd */
3422 if (mod != 3) {
3423 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3424 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3425 } else {
3426 /* movlhps */
3427 rm = (modrm & 7) | REX_B(s);
3428 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
3429 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3430 }
3431 break;
3432 case 0x216: /* movshdup */
3433 if (mod != 3) {
3434 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3435 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3436 } else {
3437 rm = (modrm & 7) | REX_B(s);
3438 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
3439 offsetof(CPUX86State,xmm_regs[rm].XMM_L(1)));
3440 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
3441 offsetof(CPUX86State,xmm_regs[rm].XMM_L(3)));
3442 }
3443 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3444 offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
3445 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
3446 offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3447 break;
3448 case 0x178:
3449 case 0x378:
3450 {
3451 int bit_index, field_length;
3452
3453 if (b1 == 1 && reg != 0)
3454 goto illegal_op;
3455 field_length = cpu_ldub_code(env, s->pc++) & 0x3F;
3456 bit_index = cpu_ldub_code(env, s->pc++) & 0x3F;
3457 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3458 offsetof(CPUX86State,xmm_regs[reg]));
3459 if (b1 == 1)
3460 gen_helper_extrq_i(cpu_env, cpu_ptr0,
3461 tcg_const_i32(bit_index),
3462 tcg_const_i32(field_length));
3463 else
3464 gen_helper_insertq_i(cpu_env, cpu_ptr0,
3465 tcg_const_i32(bit_index),
3466 tcg_const_i32(field_length));
3467 }
3468 break;
3469 case 0x7e: /* movd ea, mm */
3470 #ifdef TARGET_X86_64
3471 if (s->dflag == 2) {
3472 tcg_gen_ld_i64(cpu_T[0], cpu_env,
3473 offsetof(CPUX86State,fpregs[reg].mmx));
3474 gen_ldst_modrm(env, s, modrm, OT_QUAD, OR_TMP0, 1);
3475 } else
3476 #endif
3477 {
3478 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
3479 offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0)));
3480 gen_ldst_modrm(env, s, modrm, OT_LONG, OR_TMP0, 1);
3481 }
3482 break;
3483 case 0x17e: /* movd ea, xmm */
3484 #ifdef TARGET_X86_64
3485 if (s->dflag == 2) {
3486 tcg_gen_ld_i64(cpu_T[0], cpu_env,
3487 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3488 gen_ldst_modrm(env, s, modrm, OT_QUAD, OR_TMP0, 1);
3489 } else
3490 #endif
3491 {
3492 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
3493 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3494 gen_ldst_modrm(env, s, modrm, OT_LONG, OR_TMP0, 1);
3495 }
3496 break;
3497 case 0x27e: /* movq xmm, ea */
3498 if (mod != 3) {
3499 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3500 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3501 } else {
3502 rm = (modrm & 7) | REX_B(s);
3503 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3504 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3505 }
3506 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3507 break;
3508 case 0x7f: /* movq ea, mm */
3509 if (mod != 3) {
3510 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3511 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
3512 } else {
3513 rm = (modrm & 7);
3514 gen_op_movq(offsetof(CPUX86State,fpregs[rm].mmx),
3515 offsetof(CPUX86State,fpregs[reg].mmx));
3516 }
3517 break;
3518 case 0x011: /* movups */
3519 case 0x111: /* movupd */
3520 case 0x029: /* movaps */
3521 case 0x129: /* movapd */
3522 case 0x17f: /* movdqa ea, xmm */
3523 case 0x27f: /* movdqu ea, xmm */
3524 if (mod != 3) {
3525 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3526 gen_sto_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3527 } else {
3528 rm = (modrm & 7) | REX_B(s);
3529 gen_op_movo(offsetof(CPUX86State,xmm_regs[rm]),
3530 offsetof(CPUX86State,xmm_regs[reg]));
3531 }
3532 break;
3533 case 0x211: /* movss ea, xmm */
3534 if (mod != 3) {
3535 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3536 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3537 gen_op_st_T0_A0(OT_LONG + s->mem_index);
3538 } else {
3539 rm = (modrm & 7) | REX_B(s);
3540 gen_op_movl(offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)),
3541 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3542 }
3543 break;
3544 case 0x311: /* movsd ea, xmm */
3545 if (mod != 3) {
3546 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3547 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3548 } else {
3549 rm = (modrm & 7) | REX_B(s);
3550 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
3551 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3552 }
3553 break;
3554 case 0x013: /* movlps */
3555 case 0x113: /* movlpd */
3556 if (mod != 3) {
3557 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3558 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3559 } else {
3560 goto illegal_op;
3561 }
3562 break;
3563 case 0x017: /* movhps */
3564 case 0x117: /* movhpd */
3565 if (mod != 3) {
3566 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3567 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3568 } else {
3569 goto illegal_op;
3570 }
3571 break;
3572 case 0x71: /* shift mm, im */
3573 case 0x72:
3574 case 0x73:
3575 case 0x171: /* shift xmm, im */
3576 case 0x172:
3577 case 0x173:
3578 if (b1 >= 2) {
3579 goto illegal_op;
3580 }
3581 val = cpu_ldub_code(env, s->pc++);
3582 if (is_xmm) {
3583 gen_op_movl_T0_im(val);
3584 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
3585 gen_op_movl_T0_0();
3586 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(1)));
3587 op1_offset = offsetof(CPUX86State,xmm_t0);
3588 } else {
3589 gen_op_movl_T0_im(val);
3590 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(0)));
3591 gen_op_movl_T0_0();
3592 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(1)));
3593 op1_offset = offsetof(CPUX86State,mmx_t0);
3594 }
3595 sse_fn_epp = sse_op_table2[((b - 1) & 3) * 8 +
3596 (((modrm >> 3)) & 7)][b1];
3597 if (!sse_fn_epp) {
3598 goto illegal_op;
3599 }
3600 if (is_xmm) {
3601 rm = (modrm & 7) | REX_B(s);
3602 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3603 } else {
3604 rm = (modrm & 7);
3605 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3606 }
3607 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3608 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op1_offset);
3609 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
3610 break;
3611 case 0x050: /* movmskps */
3612 rm = (modrm & 7) | REX_B(s);
3613 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3614 offsetof(CPUX86State,xmm_regs[rm]));
3615 gen_helper_movmskps(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3616 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3617 gen_op_mov_reg_T0(OT_LONG, reg);
3618 break;
3619 case 0x150: /* movmskpd */
3620 rm = (modrm & 7) | REX_B(s);
3621 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3622 offsetof(CPUX86State,xmm_regs[rm]));
3623 gen_helper_movmskpd(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3624 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3625 gen_op_mov_reg_T0(OT_LONG, reg);
3626 break;
3627 case 0x02a: /* cvtpi2ps */
3628 case 0x12a: /* cvtpi2pd */
3629 gen_helper_enter_mmx(cpu_env);
3630 if (mod != 3) {
3631 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3632 op2_offset = offsetof(CPUX86State,mmx_t0);
3633 gen_ldq_env_A0(s->mem_index, op2_offset);
3634 } else {
3635 rm = (modrm & 7);
3636 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3637 }
3638 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3639 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3640 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3641 switch(b >> 8) {
3642 case 0x0:
3643 gen_helper_cvtpi2ps(cpu_env, cpu_ptr0, cpu_ptr1);
3644 break;
3645 default:
3646 case 0x1:
3647 gen_helper_cvtpi2pd(cpu_env, cpu_ptr0, cpu_ptr1);
3648 break;
3649 }
3650 break;
3651 case 0x22a: /* cvtsi2ss */
3652 case 0x32a: /* cvtsi2sd */
3653 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3654 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3655 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3656 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3657 if (ot == OT_LONG) {
3658 SSEFunc_0_epi sse_fn_epi = sse_op_table3ai[(b >> 8) & 1];
3659 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3660 sse_fn_epi(cpu_env, cpu_ptr0, cpu_tmp2_i32);
3661 } else {
3662 #ifdef TARGET_X86_64
3663 SSEFunc_0_epl sse_fn_epl = sse_op_table3aq[(b >> 8) & 1];
3664 sse_fn_epl(cpu_env, cpu_ptr0, cpu_T[0]);
3665 #else
3666 goto illegal_op;
3667 #endif
3668 }
3669 break;
3670 case 0x02c: /* cvttps2pi */
3671 case 0x12c: /* cvttpd2pi */
3672 case 0x02d: /* cvtps2pi */
3673 case 0x12d: /* cvtpd2pi */
3674 gen_helper_enter_mmx(cpu_env);
3675 if (mod != 3) {
3676 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3677 op2_offset = offsetof(CPUX86State,xmm_t0);
3678 gen_ldo_env_A0(s->mem_index, op2_offset);
3679 } else {
3680 rm = (modrm & 7) | REX_B(s);
3681 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3682 }
3683 op1_offset = offsetof(CPUX86State,fpregs[reg & 7].mmx);
3684 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3685 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3686 switch(b) {
3687 case 0x02c:
3688 gen_helper_cvttps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3689 break;
3690 case 0x12c:
3691 gen_helper_cvttpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3692 break;
3693 case 0x02d:
3694 gen_helper_cvtps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3695 break;
3696 case 0x12d:
3697 gen_helper_cvtpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3698 break;
3699 }
3700 break;
3701 case 0x22c: /* cvttss2si */
3702 case 0x32c: /* cvttsd2si */
3703 case 0x22d: /* cvtss2si */
3704 case 0x32d: /* cvtsd2si */
3705 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3706 if (mod != 3) {
3707 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3708 if ((b >> 8) & 1) {
3709 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_t0.XMM_Q(0)));
3710 } else {
3711 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
3712 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
3713 }
3714 op2_offset = offsetof(CPUX86State,xmm_t0);
3715 } else {
3716 rm = (modrm & 7) | REX_B(s);
3717 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3718 }
3719 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3720 if (ot == OT_LONG) {
3721 SSEFunc_i_ep sse_fn_i_ep =
3722 sse_op_table3bi[((b >> 7) & 2) | (b & 1)];
3723 sse_fn_i_ep(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3724 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3725 } else {
3726 #ifdef TARGET_X86_64
3727 SSEFunc_l_ep sse_fn_l_ep =
3728 sse_op_table3bq[((b >> 7) & 2) | (b & 1)];
3729 sse_fn_l_ep(cpu_T[0], cpu_env, cpu_ptr0);
3730 #else
3731 goto illegal_op;
3732 #endif
3733 }
3734 gen_op_mov_reg_T0(ot, reg);
3735 break;
3736 case 0xc4: /* pinsrw */
3737 case 0x1c4:
3738 s->rip_offset = 1;
3739 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
3740 val = cpu_ldub_code(env, s->pc++);
3741 if (b1) {
3742 val &= 7;
3743 tcg_gen_st16_tl(cpu_T[0], cpu_env,
3744 offsetof(CPUX86State,xmm_regs[reg].XMM_W(val)));
3745 } else {
3746 val &= 3;
3747 tcg_gen_st16_tl(cpu_T[0], cpu_env,
3748 offsetof(CPUX86State,fpregs[reg].mmx.MMX_W(val)));
3749 }
3750 break;
3751 case 0xc5: /* pextrw */
3752 case 0x1c5:
3753 if (mod != 3)
3754 goto illegal_op;
3755 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3756 val = cpu_ldub_code(env, s->pc++);
3757 if (b1) {
3758 val &= 7;
3759 rm = (modrm & 7) | REX_B(s);
3760 tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3761 offsetof(CPUX86State,xmm_regs[rm].XMM_W(val)));
3762 } else {
3763 val &= 3;
3764 rm = (modrm & 7);
3765 tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3766 offsetof(CPUX86State,fpregs[rm].mmx.MMX_W(val)));
3767 }
3768 reg = ((modrm >> 3) & 7) | rex_r;
3769 gen_op_mov_reg_T0(ot, reg);
3770 break;
3771 case 0x1d6: /* movq ea, xmm */
3772 if (mod != 3) {
3773 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3774 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3775 } else {
3776 rm = (modrm & 7) | REX_B(s);
3777 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
3778 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3779 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
3780 }
3781 break;
3782 case 0x2d6: /* movq2dq */
3783 gen_helper_enter_mmx(cpu_env);
3784 rm = (modrm & 7);
3785 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3786 offsetof(CPUX86State,fpregs[rm].mmx));
3787 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3788 break;
3789 case 0x3d6: /* movdq2q */
3790 gen_helper_enter_mmx(cpu_env);
3791 rm = (modrm & 7) | REX_B(s);
3792 gen_op_movq(offsetof(CPUX86State,fpregs[reg & 7].mmx),
3793 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3794 break;
3795 case 0xd7: /* pmovmskb */
3796 case 0x1d7:
3797 if (mod != 3)
3798 goto illegal_op;
3799 if (b1) {
3800 rm = (modrm & 7) | REX_B(s);
3801 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[rm]));
3802 gen_helper_pmovmskb_xmm(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3803 } else {
3804 rm = (modrm & 7);
3805 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,fpregs[rm].mmx));
3806 gen_helper_pmovmskb_mmx(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3807 }
3808 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3809 reg = ((modrm >> 3) & 7) | rex_r;
3810 gen_op_mov_reg_T0(OT_LONG, reg);
3811 break;
3812 case 0x138:
3813 if (s->prefix & PREFIX_REPNZ)
3814 goto crc32;
3815 case 0x038:
3816 b = modrm;
3817 modrm = cpu_ldub_code(env, s->pc++);
3818 rm = modrm & 7;
3819 reg = ((modrm >> 3) & 7) | rex_r;
3820 mod = (modrm >> 6) & 3;
3821 if (b1 >= 2) {
3822 goto illegal_op;
3823 }
3824
3825 sse_fn_epp = sse_op_table6[b].op[b1];
3826 if (!sse_fn_epp) {
3827 goto illegal_op;
3828 }
3829 if (!(s->cpuid_ext_features & sse_op_table6[b].ext_mask))
3830 goto illegal_op;
3831
3832 if (b1) {
3833 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3834 if (mod == 3) {
3835 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
3836 } else {
3837 op2_offset = offsetof(CPUX86State,xmm_t0);
3838 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3839 switch (b) {
3840 case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
3841 case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
3842 case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
3843 gen_ldq_env_A0(s->mem_index, op2_offset +
3844 offsetof(XMMReg, XMM_Q(0)));
3845 break;
3846 case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
3847 case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
3848 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0,
3849 (s->mem_index >> 2) - 1);
3850 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0);
3851 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, op2_offset +
3852 offsetof(XMMReg, XMM_L(0)));
3853 break;
3854 case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
3855 tcg_gen_qemu_ld16u(cpu_tmp0, cpu_A0,
3856 (s->mem_index >> 2) - 1);
3857 tcg_gen_st16_tl(cpu_tmp0, cpu_env, op2_offset +
3858 offsetof(XMMReg, XMM_W(0)));
3859 break;
3860 case 0x2a: /* movntqda */
3861 gen_ldo_env_A0(s->mem_index, op1_offset);
3862 return;
3863 default:
3864 gen_ldo_env_A0(s->mem_index, op2_offset);
3865 }
3866 }
3867 } else {
3868 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
3869 if (mod == 3) {
3870 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3871 } else {
3872 op2_offset = offsetof(CPUX86State,mmx_t0);
3873 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3874 gen_ldq_env_A0(s->mem_index, op2_offset);
3875 }
3876 }
3877 if (sse_fn_epp == SSE_SPECIAL) {
3878 goto illegal_op;
3879 }
3880
3881 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3882 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3883 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
3884
3885 if (b == 0x17) {
3886 set_cc_op(s, CC_OP_EFLAGS);
3887 }
3888 break;
3889 case 0x338: /* crc32 */
3890 crc32:
3891 b = modrm;
3892 modrm = cpu_ldub_code(env, s->pc++);
3893 reg = ((modrm >> 3) & 7) | rex_r;
3894
3895 if (b != 0xf0 && b != 0xf1)
3896 goto illegal_op;
3897 if (!(s->cpuid_ext_features & CPUID_EXT_SSE42))
3898 goto illegal_op;
3899
3900 if (b == 0xf0)
3901 ot = OT_BYTE;
3902 else if (b == 0xf1 && s->dflag != 2)
3903 if (s->prefix & PREFIX_DATA)
3904 ot = OT_WORD;
3905 else
3906 ot = OT_LONG;
3907 else
3908 ot = OT_QUAD;
3909
3910 gen_op_mov_TN_reg(OT_LONG, 0, reg);
3911 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3912 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3913 gen_helper_crc32(cpu_T[0], cpu_tmp2_i32,
3914 cpu_T[0], tcg_const_i32(8 << ot));
3915
3916 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3917 gen_op_mov_reg_T0(ot, reg);
3918 break;
3919 case 0x03a:
3920 case 0x13a:
3921 b = modrm;
3922 modrm = cpu_ldub_code(env, s->pc++);
3923 rm = modrm & 7;
3924 reg = ((modrm >> 3) & 7) | rex_r;
3925 mod = (modrm >> 6) & 3;
3926 if (b1 >= 2) {
3927 goto illegal_op;
3928 }
3929
3930 sse_fn_eppi = sse_op_table7[b].op[b1];
3931 if (!sse_fn_eppi) {
3932 goto illegal_op;
3933 }
3934 if (!(s->cpuid_ext_features & sse_op_table7[b].ext_mask))
3935 goto illegal_op;
3936
3937 if (sse_fn_eppi == SSE_SPECIAL) {
3938 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3939 rm = (modrm & 7) | REX_B(s);
3940 if (mod != 3)
3941 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3942 reg = ((modrm >> 3) & 7) | rex_r;
3943 val = cpu_ldub_code(env, s->pc++);
3944 switch (b) {
3945 case 0x14: /* pextrb */
3946 tcg_gen_ld8u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3947 xmm_regs[reg].XMM_B(val & 15)));
3948 if (mod == 3)
3949 gen_op_mov_reg_T0(ot, rm);
3950 else
3951 tcg_gen_qemu_st8(cpu_T[0], cpu_A0,
3952 (s->mem_index >> 2) - 1);
3953 break;
3954 case 0x15: /* pextrw */
3955 tcg_gen_ld16u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3956 xmm_regs[reg].XMM_W(val & 7)));
3957 if (mod == 3)
3958 gen_op_mov_reg_T0(ot, rm);
3959 else
3960 tcg_gen_qemu_st16(cpu_T[0], cpu_A0,
3961 (s->mem_index >> 2) - 1);
3962 break;
3963 case 0x16:
3964 if (ot == OT_LONG) { /* pextrd */
3965 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
3966 offsetof(CPUX86State,
3967 xmm_regs[reg].XMM_L(val & 3)));
3968 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3969 if (mod == 3)
3970 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
3971 else
3972 tcg_gen_qemu_st32(cpu_T[0], cpu_A0,
3973 (s->mem_index >> 2) - 1);
3974 } else { /* pextrq */
3975 #ifdef TARGET_X86_64
3976 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
3977 offsetof(CPUX86State,
3978 xmm_regs[reg].XMM_Q(val & 1)));
3979 if (mod == 3)
3980 gen_op_mov_reg_v(ot, rm, cpu_tmp1_i64);
3981 else
3982 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
3983 (s->mem_index >> 2) - 1);
3984 #else
3985 goto illegal_op;
3986 #endif
3987 }
3988 break;
3989 case 0x17: /* extractps */
3990 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3991 xmm_regs[reg].XMM_L(val & 3)));
3992 if (mod == 3)
3993 gen_op_mov_reg_T0(ot, rm);
3994 else
3995 tcg_gen_qemu_st32(cpu_T[0], cpu_A0,
3996 (s->mem_index >> 2) - 1);
3997 break;
3998 case 0x20: /* pinsrb */
3999 if (mod == 3)
4000 gen_op_mov_TN_reg(OT_LONG, 0, rm);
4001 else
4002 tcg_gen_qemu_ld8u(cpu_tmp0, cpu_A0,
4003 (s->mem_index >> 2) - 1);
4004 tcg_gen_st8_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State,
4005 xmm_regs[reg].XMM_B(val & 15)));
4006 break;
4007 case 0x21: /* insertps */
4008 if (mod == 3) {
4009 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
4010 offsetof(CPUX86State,xmm_regs[rm]
4011 .XMM_L((val >> 6) & 3)));
4012 } else {
4013 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0,
4014 (s->mem_index >> 2) - 1);
4015 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0);
4016 }
4017 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
4018 offsetof(CPUX86State,xmm_regs[reg]
4019 .XMM_L((val >> 4) & 3)));
4020 if ((val >> 0) & 1)
4021 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4022 cpu_env, offsetof(CPUX86State,
4023 xmm_regs[reg].XMM_L(0)));
4024 if ((val >> 1) & 1)
4025 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4026 cpu_env, offsetof(CPUX86State,
4027 xmm_regs[reg].XMM_L(1)));
4028 if ((val >> 2) & 1)
4029 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4030 cpu_env, offsetof(CPUX86State,
4031 xmm_regs[reg].XMM_L(2)));
4032 if ((val >> 3) & 1)
4033 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4034 cpu_env, offsetof(CPUX86State,
4035 xmm_regs[reg].XMM_L(3)));
4036 break;
4037 case 0x22:
4038 if (ot == OT_LONG) { /* pinsrd */
4039 if (mod == 3)
4040 gen_op_mov_v_reg(ot, cpu_tmp0, rm);
4041 else
4042 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0,
4043 (s->mem_index >> 2) - 1);
4044 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0);
4045 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
4046 offsetof(CPUX86State,
4047 xmm_regs[reg].XMM_L(val & 3)));
4048 } else { /* pinsrq */
4049 #ifdef TARGET_X86_64
4050 if (mod == 3)
4051 gen_op_mov_v_reg(ot, cpu_tmp1_i64, rm);
4052 else
4053 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
4054 (s->mem_index >> 2) - 1);
4055 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
4056 offsetof(CPUX86State,
4057 xmm_regs[reg].XMM_Q(val & 1)));
4058 #else
4059 goto illegal_op;
4060 #endif
4061 }
4062 break;
4063 }
4064 return;
4065 }
4066
4067 if (b1) {
4068 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
4069 if (mod == 3) {
4070 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
4071 } else {
4072 op2_offset = offsetof(CPUX86State,xmm_t0);
4073 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4074 gen_ldo_env_A0(s->mem_index, op2_offset);
4075 }
4076 } else {
4077 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
4078 if (mod == 3) {
4079 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
4080 } else {
4081 op2_offset = offsetof(CPUX86State,mmx_t0);
4082 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4083 gen_ldq_env_A0(s->mem_index, op2_offset);
4084 }
4085 }
4086 val = cpu_ldub_code(env, s->pc++);
4087
4088 if ((b & 0xfc) == 0x60) { /* pcmpXstrX */
4089 set_cc_op(s, CC_OP_EFLAGS);
4090
4091 if (s->dflag == 2)
4092 /* The helper must use entire 64-bit gp registers */
4093 val |= 1 << 8;
4094 }
4095
4096 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4097 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4098 sse_fn_eppi(cpu_env, cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
4099 break;
4100 default:
4101 goto illegal_op;
4102 }
4103 } else {
4104 /* generic MMX or SSE operation */
4105 switch(b) {
4106 case 0x70: /* pshufx insn */
4107 case 0xc6: /* pshufx insn */
4108 case 0xc2: /* compare insns */
4109 s->rip_offset = 1;
4110 break;
4111 default:
4112 break;
4113 }
4114 if (is_xmm) {
4115 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
4116 if (mod != 3) {
4117 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4118 op2_offset = offsetof(CPUX86State,xmm_t0);
4119 if (b1 >= 2 && ((b >= 0x50 && b <= 0x5f && b != 0x5b) ||
4120 b == 0xc2)) {
4121 /* specific case for SSE single instructions */
4122 if (b1 == 2) {
4123 /* 32 bit access */
4124 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
4125 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
4126 } else {
4127 /* 64 bit access */
4128 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_t0.XMM_D(0)));
4129 }
4130 } else {
4131 gen_ldo_env_A0(s->mem_index, op2_offset);
4132 }
4133 } else {
4134 rm = (modrm & 7) | REX_B(s);
4135 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
4136 }
4137 } else {
4138 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
4139 if (mod != 3) {
4140 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4141 op2_offset = offsetof(CPUX86State,mmx_t0);
4142 gen_ldq_env_A0(s->mem_index, op2_offset);
4143 } else {
4144 rm = (modrm & 7);
4145 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
4146 }
4147 }
4148 switch(b) {
4149 case 0x0f: /* 3DNow! data insns */
4150 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
4151 goto illegal_op;
4152 val = cpu_ldub_code(env, s->pc++);
4153 sse_fn_epp = sse_op_table5[val];
4154 if (!sse_fn_epp) {
4155 goto illegal_op;
4156 }
4157 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4158 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4159 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
4160 break;
4161 case 0x70: /* pshufx insn */
4162 case 0xc6: /* pshufx insn */
4163 val = cpu_ldub_code(env, s->pc++);
4164 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4165 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4166 /* XXX: introduce a new table? */
4167 sse_fn_ppi = (SSEFunc_0_ppi)sse_fn_epp;
4168 sse_fn_ppi(cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
4169 break;
4170 case 0xc2:
4171 /* compare insns */
4172 val = cpu_ldub_code(env, s->pc++);
4173 if (val >= 8)
4174 goto illegal_op;
4175 sse_fn_epp = sse_op_table4[val][b1];
4176
4177 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4178 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4179 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
4180 break;
4181 case 0xf7:
4182 /* maskmov : we must prepare A0 */
4183 if (mod != 3)
4184 goto illegal_op;
4185 #ifdef TARGET_X86_64
4186 if (s->aflag == 2) {
4187 gen_op_movq_A0_reg(R_EDI);
4188 } else
4189 #endif
4190 {
4191 gen_op_movl_A0_reg(R_EDI);
4192 if (s->aflag == 0)
4193 gen_op_andl_A0_ffff();
4194 }
4195 gen_add_A0_ds_seg(s);
4196
4197 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4198 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4199 /* XXX: introduce a new table? */
4200 sse_fn_eppt = (SSEFunc_0_eppt)sse_fn_epp;
4201 sse_fn_eppt(cpu_env, cpu_ptr0, cpu_ptr1, cpu_A0);
4202 break;
4203 default:
4204 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4205 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4206 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
4207 break;
4208 }
4209 if (b == 0x2e || b == 0x2f) {
4210 set_cc_op(s, CC_OP_EFLAGS);
4211 }
4212 }
4213 }
4214
4215 /* convert one instruction. s->is_jmp is set if the translation must
4216 be stopped. Return the next pc value */
4217 static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
4218 target_ulong pc_start)
4219 {
4220 int b, prefixes, aflag, dflag;
4221 int shift, ot;
4222 int modrm, reg, rm, mod, reg_addr, op, opreg, offset_addr, val;
4223 target_ulong next_eip, tval;
4224 int rex_w, rex_r;
4225
4226 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
4227 tcg_gen_debug_insn_start(pc_start);
4228 }
4229 s->pc = pc_start;
4230 prefixes = 0;
4231 aflag = s->code32;
4232 dflag = s->code32;
4233 s->override = -1;
4234 rex_w = -1;
4235 rex_r = 0;
4236 #ifdef TARGET_X86_64
4237 s->rex_x = 0;
4238 s->rex_b = 0;
4239 x86_64_hregs = 0;
4240 #endif
4241 s->rip_offset = 0; /* for relative ip address */
4242 next_byte:
4243 b = cpu_ldub_code(env, s->pc);
4244 s->pc++;
4245 /* check prefixes */
4246 #ifdef TARGET_X86_64
4247 if (CODE64(s)) {
4248 switch (b) {
4249 case 0xf3:
4250 prefixes |= PREFIX_REPZ;
4251 goto next_byte;
4252 case 0xf2:
4253 prefixes |= PREFIX_REPNZ;
4254 goto next_byte;
4255 case 0xf0:
4256 prefixes |= PREFIX_LOCK;
4257 goto next_byte;
4258 case 0x2e:
4259 s->override = R_CS;
4260 goto next_byte;
4261 case 0x36:
4262 s->override = R_SS;
4263 goto next_byte;
4264 case 0x3e:
4265 s->override = R_DS;
4266 goto next_byte;
4267 case 0x26:
4268 s->override = R_ES;
4269 goto next_byte;
4270 case 0x64:
4271 s->override = R_FS;
4272 goto next_byte;
4273 case 0x65:
4274 s->override = R_GS;
4275 goto next_byte;
4276 case 0x66:
4277 prefixes |= PREFIX_DATA;
4278 goto next_byte;
4279 case 0x67:
4280 prefixes |= PREFIX_ADR;
4281 goto next_byte;
4282 case 0x40 ... 0x4f:
4283 /* REX prefix */
4284 rex_w = (b >> 3) & 1;
4285 rex_r = (b & 0x4) << 1;
4286 s->rex_x = (b & 0x2) << 2;
4287 REX_B(s) = (b & 0x1) << 3;
4288 x86_64_hregs = 1; /* select uniform byte register addressing */
4289 goto next_byte;
4290 }
4291 if (rex_w == 1) {
4292 /* 0x66 is ignored if rex.w is set */
4293 dflag = 2;
4294 } else {
4295 if (prefixes & PREFIX_DATA)
4296 dflag ^= 1;
4297 }
4298 if (!(prefixes & PREFIX_ADR))
4299 aflag = 2;
4300 } else
4301 #endif
4302 {
4303 switch (b) {
4304 case 0xf3:
4305 prefixes |= PREFIX_REPZ;
4306 goto next_byte;
4307 case 0xf2:
4308 prefixes |= PREFIX_REPNZ;
4309 goto next_byte;
4310 case 0xf0:
4311 prefixes |= PREFIX_LOCK;
4312 goto next_byte;
4313 case 0x2e:
4314 s->override = R_CS;
4315 goto next_byte;
4316 case 0x36:
4317 s->override = R_SS;
4318 goto next_byte;
4319 case 0x3e:
4320 s->override = R_DS;
4321 goto next_byte;
4322 case 0x26:
4323 s->override = R_ES;
4324 goto next_byte;
4325 case 0x64:
4326 s->override = R_FS;
4327 goto next_byte;
4328 case 0x65:
4329 s->override = R_GS;
4330 goto next_byte;
4331 case 0x66:
4332 prefixes |= PREFIX_DATA;
4333 goto next_byte;
4334 case 0x67:
4335 prefixes |= PREFIX_ADR;
4336 goto next_byte;
4337 }
4338 if (prefixes & PREFIX_DATA)
4339 dflag ^= 1;
4340 if (prefixes & PREFIX_ADR)
4341 aflag ^= 1;
4342 }
4343
4344 s->prefix = prefixes;
4345 s->aflag = aflag;
4346 s->dflag = dflag;
4347
4348 /* lock generation */
4349 if (prefixes & PREFIX_LOCK)
4350 gen_helper_lock();
4351
4352 /* now check op code */
4353 reswitch:
4354 switch(b) {
4355 case 0x0f:
4356 /**************************/
4357 /* extended op code */
4358 b = cpu_ldub_code(env, s->pc++) | 0x100;
4359 goto reswitch;
4360
4361 /**************************/
4362 /* arith & logic */
4363 case 0x00 ... 0x05:
4364 case 0x08 ... 0x0d:
4365 case 0x10 ... 0x15:
4366 case 0x18 ... 0x1d:
4367 case 0x20 ... 0x25:
4368 case 0x28 ... 0x2d:
4369 case 0x30 ... 0x35:
4370 case 0x38 ... 0x3d:
4371 {
4372 int op, f, val;
4373 op = (b >> 3) & 7;
4374 f = (b >> 1) & 3;
4375
4376 if ((b & 1) == 0)
4377 ot = OT_BYTE;
4378 else
4379 ot = dflag + OT_WORD;
4380
4381 switch(f) {
4382 case 0: /* OP Ev, Gv */
4383 modrm = cpu_ldub_code(env, s->pc++);
4384 reg = ((modrm >> 3) & 7) | rex_r;
4385 mod = (modrm >> 6) & 3;
4386 rm = (modrm & 7) | REX_B(s);
4387 if (mod != 3) {
4388 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4389 opreg = OR_TMP0;
4390 } else if (op == OP_XORL && rm == reg) {
4391 xor_zero:
4392 /* xor reg, reg optimisation */
4393 gen_op_movl_T0_0();
4394 set_cc_op(s, CC_OP_LOGICB + ot);
4395 gen_op_mov_reg_T0(ot, reg);
4396 gen_op_update1_cc();
4397 break;
4398 } else {
4399 opreg = rm;
4400 }
4401 gen_op_mov_TN_reg(ot, 1, reg);
4402 gen_op(s, op, ot, opreg);
4403 break;
4404 case 1: /* OP Gv, Ev */
4405 modrm = cpu_ldub_code(env, s->pc++);
4406 mod = (modrm >> 6) & 3;
4407 reg = ((modrm >> 3) & 7) | rex_r;
4408 rm = (modrm & 7) | REX_B(s);
4409 if (mod != 3) {
4410 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4411 gen_op_ld_T1_A0(ot + s->mem_index);
4412 } else if (op == OP_XORL && rm == reg) {
4413 goto xor_zero;
4414 } else {
4415 gen_op_mov_TN_reg(ot, 1, rm);
4416 }
4417 gen_op(s, op, ot, reg);
4418 break;
4419 case 2: /* OP A, Iv */
4420 val = insn_get(env, s, ot);
4421 gen_op_movl_T1_im(val);
4422 gen_op(s, op, ot, OR_EAX);
4423 break;
4424 }
4425 }
4426 break;
4427
4428 case 0x82:
4429 if (CODE64(s))
4430 goto illegal_op;
4431 case 0x80: /* GRP1 */
4432 case 0x81:
4433 case 0x83:
4434 {
4435 int val;
4436
4437 if ((b & 1) == 0)
4438 ot = OT_BYTE;
4439 else
4440 ot = dflag + OT_WORD;
4441
4442 modrm = cpu_ldub_code(env, s->pc++);
4443 mod = (modrm >> 6) & 3;
4444 rm = (modrm & 7) | REX_B(s);
4445 op = (modrm >> 3) & 7;
4446
4447 if (mod != 3) {
4448 if (b == 0x83)
4449 s->rip_offset = 1;
4450 else
4451 s->rip_offset = insn_const_size(ot);
4452 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4453 opreg = OR_TMP0;
4454 } else {
4455 opreg = rm;
4456 }
4457
4458 switch(b) {
4459 default:
4460 case 0x80:
4461 case 0x81:
4462 case 0x82:
4463 val = insn_get(env, s, ot);
4464 break;
4465 case 0x83:
4466 val = (int8_t)insn_get(env, s, OT_BYTE);
4467 break;
4468 }
4469 gen_op_movl_T1_im(val);
4470 gen_op(s, op, ot, opreg);
4471 }
4472 break;
4473
4474 /**************************/
4475 /* inc, dec, and other misc arith */
4476 case 0x40 ... 0x47: /* inc Gv */
4477 ot = dflag ? OT_LONG : OT_WORD;
4478 gen_inc(s, ot, OR_EAX + (b & 7), 1);
4479 break;
4480 case 0x48 ... 0x4f: /* dec Gv */
4481 ot = dflag ? OT_LONG : OT_WORD;
4482 gen_inc(s, ot, OR_EAX + (b & 7), -1);
4483 break;
4484 case 0xf6: /* GRP3 */
4485 case 0xf7:
4486 if ((b & 1) == 0)
4487 ot = OT_BYTE;
4488 else
4489 ot = dflag + OT_WORD;
4490
4491 modrm = cpu_ldub_code(env, s->pc++);
4492 mod = (modrm >> 6) & 3;
4493 rm = (modrm & 7) | REX_B(s);
4494 op = (modrm >> 3) & 7;
4495 if (mod != 3) {
4496 if (op == 0)
4497 s->rip_offset = insn_const_size(ot);
4498 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4499 gen_op_ld_T0_A0(ot + s->mem_index);
4500 } else {
4501 gen_op_mov_TN_reg(ot, 0, rm);
4502 }
4503
4504 switch(op) {
4505 case 0: /* test */
4506 val = insn_get(env, s, ot);
4507 gen_op_movl_T1_im(val);
4508 gen_op_testl_T0_T1_cc();
4509 set_cc_op(s, CC_OP_LOGICB + ot);
4510 break;
4511 case 2: /* not */
4512 tcg_gen_not_tl(cpu_T[0], cpu_T[0]);
4513 if (mod != 3) {
4514 gen_op_st_T0_A0(ot + s->mem_index);
4515 } else {
4516 gen_op_mov_reg_T0(ot, rm);
4517 }
4518 break;
4519 case 3: /* neg */
4520 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
4521 if (mod != 3) {
4522 gen_op_st_T0_A0(ot + s->mem_index);
4523 } else {
4524 gen_op_mov_reg_T0(ot, rm);
4525 }
4526 gen_op_update_neg_cc();
4527 set_cc_op(s, CC_OP_SUBB + ot);
4528 break;
4529 case 4: /* mul */
4530 switch(ot) {
4531 case OT_BYTE:
4532 gen_op_mov_TN_reg(OT_BYTE, 1, R_EAX);
4533 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
4534 tcg_gen_ext8u_tl(cpu_T[1], cpu_T[1]);
4535 /* XXX: use 32 bit mul which could be faster */
4536 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4537 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4538 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4539 tcg_gen_andi_tl(cpu_cc_src, cpu_T[0], 0xff00);
4540 set_cc_op(s, CC_OP_MULB);
4541 break;
4542 case OT_WORD:
4543 gen_op_mov_TN_reg(OT_WORD, 1, R_EAX);
4544 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
4545 tcg_gen_ext16u_tl(cpu_T[1], cpu_T[1]);
4546 /* XXX: use 32 bit mul which could be faster */
4547 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4548 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4549 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4550 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
4551 gen_op_mov_reg_T0(OT_WORD, R_EDX);
4552 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4553 set_cc_op(s, CC_OP_MULW);
4554 break;
4555 default:
4556 case OT_LONG:
4557 #ifdef TARGET_X86_64
4558 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4559 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
4560 tcg_gen_ext32u_tl(cpu_T[1], cpu_T[1]);
4561 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4562 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4563 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4564 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 32);
4565 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4566 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4567 #else
4568 {
4569 TCGv_i64 t0, t1;
4570 t0 = tcg_temp_new_i64();
4571 t1 = tcg_temp_new_i64();
4572 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4573 tcg_gen_extu_i32_i64(t0, cpu_T[0]);
4574 tcg_gen_extu_i32_i64(t1, cpu_T[1]);
4575 tcg_gen_mul_i64(t0, t0, t1);
4576 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4577 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4578 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4579 tcg_gen_shri_i64(t0, t0, 32);
4580 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4581 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4582 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4583 }
4584 #endif
4585 set_cc_op(s, CC_OP_MULL);
4586 break;
4587 #ifdef TARGET_X86_64
4588 case OT_QUAD:
4589 gen_helper_mulq_EAX_T0(cpu_env, cpu_T[0]);
4590 set_cc_op(s, CC_OP_MULQ);
4591 break;
4592 #endif
4593 }
4594 break;
4595 case 5: /* imul */
4596 switch(ot) {
4597 case OT_BYTE:
4598 gen_op_mov_TN_reg(OT_BYTE, 1, R_EAX);
4599 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
4600 tcg_gen_ext8s_tl(cpu_T[1], cpu_T[1]);
4601 /* XXX: use 32 bit mul which could be faster */
4602 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4603 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4604 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4605 tcg_gen_ext8s_tl(cpu_tmp0, cpu_T[0]);
4606 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4607 set_cc_op(s, CC_OP_MULB);
4608 break;
4609 case OT_WORD:
4610 gen_op_mov_TN_reg(OT_WORD, 1, R_EAX);
4611 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4612 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
4613 /* XXX: use 32 bit mul which could be faster */
4614 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4615 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4616 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4617 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
4618 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4619 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
4620 gen_op_mov_reg_T0(OT_WORD, R_EDX);
4621 set_cc_op(s, CC_OP_MULW);
4622 break;
4623 default:
4624 case OT_LONG:
4625 #ifdef TARGET_X86_64
4626 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4627 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4628 tcg_gen_ext32s_tl(cpu_T[1], cpu_T[1]);
4629 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4630 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4631 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4632 tcg_gen_ext32s_tl(cpu_tmp0, cpu_T[0]);
4633 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4634 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 32);
4635 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4636 #else
4637 {
4638 TCGv_i64 t0, t1;
4639 t0 = tcg_temp_new_i64();
4640 t1 = tcg_temp_new_i64();
4641 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4642 tcg_gen_ext_i32_i64(t0, cpu_T[0]);
4643 tcg_gen_ext_i32_i64(t1, cpu_T[1]);
4644 tcg_gen_mul_i64(t0, t0, t1);
4645 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4646 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4647 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4648 tcg_gen_sari_tl(cpu_tmp0, cpu_T[0], 31);
4649 tcg_gen_shri_i64(t0, t0, 32);
4650 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4651 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4652 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4653 }
4654 #endif
4655 set_cc_op(s, CC_OP_MULL);
4656 break;
4657 #ifdef TARGET_X86_64
4658 case OT_QUAD:
4659 gen_helper_imulq_EAX_T0(cpu_env, cpu_T[0]);
4660 set_cc_op(s, CC_OP_MULQ);
4661 break;
4662 #endif
4663 }
4664 break;
4665 case 6: /* div */
4666 switch(ot) {
4667 case OT_BYTE:
4668 gen_jmp_im(pc_start - s->cs_base);
4669 gen_helper_divb_AL(cpu_env, cpu_T[0]);
4670 break;
4671 case OT_WORD:
4672 gen_jmp_im(pc_start - s->cs_base);
4673 gen_helper_divw_AX(cpu_env, cpu_T[0]);
4674 break;
4675 default:
4676 case OT_LONG:
4677 gen_jmp_im(pc_start - s->cs_base);
4678 gen_helper_divl_EAX(cpu_env, cpu_T[0]);
4679 break;
4680 #ifdef TARGET_X86_64
4681 case OT_QUAD:
4682 gen_jmp_im(pc_start - s->cs_base);
4683 gen_helper_divq_EAX(cpu_env, cpu_T[0]);
4684 break;
4685 #endif
4686 }
4687 break;
4688 case 7: /* idiv */
4689 switch(ot) {
4690 case OT_BYTE:
4691 gen_jmp_im(pc_start - s->cs_base);
4692 gen_helper_idivb_AL(cpu_env, cpu_T[0]);
4693 break;
4694 case OT_WORD:
4695 gen_jmp_im(pc_start - s->cs_base);
4696 gen_helper_idivw_AX(cpu_env, cpu_T[0]);
4697 break;
4698 default:
4699 case OT_LONG:
4700 gen_jmp_im(pc_start - s->cs_base);
4701 gen_helper_idivl_EAX(cpu_env, cpu_T[0]);
4702 break;
4703 #ifdef TARGET_X86_64
4704 case OT_QUAD:
4705 gen_jmp_im(pc_start - s->cs_base);
4706 gen_helper_idivq_EAX(cpu_env, cpu_T[0]);
4707 break;
4708 #endif
4709 }
4710 break;
4711 default:
4712 goto illegal_op;
4713 }
4714 break;
4715
4716 case 0xfe: /* GRP4 */
4717 case 0xff: /* GRP5 */
4718 if ((b & 1) == 0)
4719 ot = OT_BYTE;
4720 else
4721 ot = dflag + OT_WORD;
4722
4723 modrm = cpu_ldub_code(env, s->pc++);
4724 mod = (modrm >> 6) & 3;
4725 rm = (modrm & 7) | REX_B(s);
4726 op = (modrm >> 3) & 7;
4727 if (op >= 2 && b == 0xfe) {
4728 goto illegal_op;
4729 }
4730 if (CODE64(s)) {
4731 if (op == 2 || op == 4) {
4732 /* operand size for jumps is 64 bit */
4733 ot = OT_QUAD;
4734 } else if (op == 3 || op == 5) {
4735 ot = dflag ? OT_LONG + (rex_w == 1) : OT_WORD;
4736 } else if (op == 6) {
4737 /* default push size is 64 bit */
4738 ot = dflag ? OT_QUAD : OT_WORD;
4739 }
4740 }
4741 if (mod != 3) {
4742 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4743 if (op >= 2 && op != 3 && op != 5)
4744 gen_op_ld_T0_A0(ot + s->mem_index);
4745 } else {
4746 gen_op_mov_TN_reg(ot, 0, rm);
4747 }
4748
4749 switch(op) {
4750 case 0: /* inc Ev */
4751 if (mod != 3)
4752 opreg = OR_TMP0;
4753 else
4754 opreg = rm;
4755 gen_inc(s, ot, opreg, 1);
4756 break;
4757 case 1: /* dec Ev */
4758 if (mod != 3)
4759 opreg = OR_TMP0;
4760 else
4761 opreg = rm;
4762 gen_inc(s, ot, opreg, -1);
4763 break;
4764 case 2: /* call Ev */
4765 /* XXX: optimize if memory (no 'and' is necessary) */
4766 if (s->dflag == 0)
4767 gen_op_andl_T0_ffff();
4768 next_eip = s->pc - s->cs_base;
4769 gen_movtl_T1_im(next_eip);
4770 gen_push_T1(s);
4771 gen_op_jmp_T0();
4772 gen_eob(s);
4773 break;
4774 case 3: /* lcall Ev */
4775 gen_op_ld_T1_A0(ot + s->mem_index);
4776 gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
4777 gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
4778 do_lcall:
4779 if (s->pe && !s->vm86) {
4780 gen_update_cc_op(s);
4781 gen_jmp_im(pc_start - s->cs_base);
4782 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4783 gen_helper_lcall_protected(cpu_env, cpu_tmp2_i32, cpu_T[1],
4784 tcg_const_i32(dflag),
4785 tcg_const_i32(s->pc - pc_start));
4786 } else {
4787 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4788 gen_helper_lcall_real(cpu_env, cpu_tmp2_i32, cpu_T[1],
4789 tcg_const_i32(dflag),
4790 tcg_const_i32(s->pc - s->cs_base));
4791 }
4792 gen_eob(s);
4793 break;
4794 case 4: /* jmp Ev */
4795 if (s->dflag == 0)
4796 gen_op_andl_T0_ffff();
4797 gen_op_jmp_T0();
4798 gen_eob(s);
4799 break;
4800 case 5: /* ljmp Ev */
4801 gen_op_ld_T1_A0(ot + s->mem_index);
4802 gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
4803 gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
4804 do_ljmp:
4805 if (s->pe && !s->vm86) {
4806 gen_update_cc_op(s);
4807 gen_jmp_im(pc_start - s->cs_base);
4808 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4809 gen_helper_ljmp_protected(cpu_env, cpu_tmp2_i32, cpu_T[1],
4810 tcg_const_i32(s->pc - pc_start));
4811 } else {
4812 gen_op_movl_seg_T0_vm(R_CS);
4813 gen_op_movl_T0_T1();
4814 gen_op_jmp_T0();
4815 }
4816 gen_eob(s);
4817 break;
4818 case 6: /* push Ev */
4819 gen_push_T0(s);
4820 break;
4821 default:
4822 goto illegal_op;
4823 }
4824 break;
4825
4826 case 0x84: /* test Ev, Gv */
4827 case 0x85:
4828 if ((b & 1) == 0)
4829 ot = OT_BYTE;
4830 else
4831 ot = dflag + OT_WORD;
4832
4833 modrm = cpu_ldub_code(env, s->pc++);
4834 reg = ((modrm >> 3) & 7) | rex_r;
4835
4836 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4837 gen_op_mov_TN_reg(ot, 1, reg);
4838 gen_op_testl_T0_T1_cc();
4839 set_cc_op(s, CC_OP_LOGICB + ot);
4840 break;
4841
4842 case 0xa8: /* test eAX, Iv */
4843 case 0xa9:
4844 if ((b & 1) == 0)
4845 ot = OT_BYTE;
4846 else
4847 ot = dflag + OT_WORD;
4848 val = insn_get(env, s, ot);
4849
4850 gen_op_mov_TN_reg(ot, 0, OR_EAX);
4851 gen_op_movl_T1_im(val);
4852 gen_op_testl_T0_T1_cc();
4853 set_cc_op(s, CC_OP_LOGICB + ot);
4854 break;
4855
4856 case 0x98: /* CWDE/CBW */
4857 #ifdef TARGET_X86_64
4858 if (dflag == 2) {
4859 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
4860 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4861 gen_op_mov_reg_T0(OT_QUAD, R_EAX);
4862 } else
4863 #endif
4864 if (dflag == 1) {
4865 gen_op_mov_TN_reg(OT_WORD, 0, R_EAX);
4866 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4867 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4868 } else {
4869 gen_op_mov_TN_reg(OT_BYTE, 0, R_EAX);
4870 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
4871 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4872 }
4873 break;
4874 case 0x99: /* CDQ/CWD */
4875 #ifdef TARGET_X86_64
4876 if (dflag == 2) {
4877 gen_op_mov_TN_reg(OT_QUAD, 0, R_EAX);
4878 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 63);
4879 gen_op_mov_reg_T0(OT_QUAD, R_EDX);
4880 } else
4881 #endif
4882 if (dflag == 1) {
4883 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
4884 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4885 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 31);
4886 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4887 } else {
4888 gen_op_mov_TN_reg(OT_WORD, 0, R_EAX);
4889 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4890 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 15);
4891 gen_op_mov_reg_T0(OT_WORD, R_EDX);
4892 }
4893 break;
4894 case 0x1af: /* imul Gv, Ev */
4895 case 0x69: /* imul Gv, Ev, I */
4896 case 0x6b:
4897 ot = dflag + OT_WORD;
4898 modrm = cpu_ldub_code(env, s->pc++);
4899 reg = ((modrm >> 3) & 7) | rex_r;
4900 if (b == 0x69)
4901 s->rip_offset = insn_const_size(ot);
4902 else if (b == 0x6b)
4903 s->rip_offset = 1;
4904 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4905 if (b == 0x69) {
4906 val = insn_get(env, s, ot);
4907 gen_op_movl_T1_im(val);
4908 } else if (b == 0x6b) {
4909 val = (int8_t)insn_get(env, s, OT_BYTE);
4910 gen_op_movl_T1_im(val);
4911 } else {
4912 gen_op_mov_TN_reg(ot, 1, reg);
4913 }
4914
4915 #ifdef TARGET_X86_64
4916 if (ot == OT_QUAD) {
4917 gen_helper_imulq_T0_T1(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
4918 } else
4919 #endif
4920 if (ot == OT_LONG) {
4921 #ifdef TARGET_X86_64
4922 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4923 tcg_gen_ext32s_tl(cpu_T[1], cpu_T[1]);
4924 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4925 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4926 tcg_gen_ext32s_tl(cpu_tmp0, cpu_T[0]);
4927 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4928 #else
4929 {
4930 TCGv_i64 t0, t1;
4931 t0 = tcg_temp_new_i64();
4932 t1 = tcg_temp_new_i64();
4933 tcg_gen_ext_i32_i64(t0, cpu_T[0]);
4934 tcg_gen_ext_i32_i64(t1, cpu_T[1]);
4935 tcg_gen_mul_i64(t0, t0, t1);
4936 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4937 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4938 tcg_gen_sari_tl(cpu_tmp0, cpu_T[0], 31);
4939 tcg_gen_shri_i64(t0, t0, 32);
4940 tcg_gen_trunc_i64_i32(cpu_T[1], t0);
4941 tcg_gen_sub_tl(cpu_cc_src, cpu_T[1], cpu_tmp0);
4942 }
4943 #endif
4944 } else {
4945 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4946 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
4947 /* XXX: use 32 bit mul which could be faster */
4948 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4949 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4950 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
4951 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4952 }
4953 gen_op_mov_reg_T0(ot, reg);
4954 set_cc_op(s, CC_OP_MULB + ot);
4955 break;
4956 case 0x1c0:
4957 case 0x1c1: /* xadd Ev, Gv */
4958 if ((b & 1) == 0)
4959 ot = OT_BYTE;
4960 else
4961 ot = dflag + OT_WORD;
4962 modrm = cpu_ldub_code(env, s->pc++);
4963 reg = ((modrm >> 3) & 7) | rex_r;
4964 mod = (modrm >> 6) & 3;
4965 if (mod == 3) {
4966 rm = (modrm & 7) | REX_B(s);
4967 gen_op_mov_TN_reg(ot, 0, reg);
4968 gen_op_mov_TN_reg(ot, 1, rm);
4969 gen_op_addl_T0_T1();
4970 gen_op_mov_reg_T1(ot, reg);
4971 gen_op_mov_reg_T0(ot, rm);
4972 } else {
4973 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4974 gen_op_mov_TN_reg(ot, 0, reg);
4975 gen_op_ld_T1_A0(ot + s->mem_index);
4976 gen_op_addl_T0_T1();
4977 gen_op_st_T0_A0(ot + s->mem_index);
4978 gen_op_mov_reg_T1(ot, reg);
4979 }
4980 gen_op_update2_cc();
4981 set_cc_op(s, CC_OP_ADDB + ot);
4982 break;
4983 case 0x1b0:
4984 case 0x1b1: /* cmpxchg Ev, Gv */
4985 {
4986 int label1, label2;
4987 TCGv t0, t1, t2, a0;
4988
4989 if ((b & 1) == 0)
4990 ot = OT_BYTE;
4991 else
4992 ot = dflag + OT_WORD;
4993 modrm = cpu_ldub_code(env, s->pc++);
4994 reg = ((modrm >> 3) & 7) | rex_r;
4995 mod = (modrm >> 6) & 3;
4996 t0 = tcg_temp_local_new();
4997 t1 = tcg_temp_local_new();
4998 t2 = tcg_temp_local_new();
4999 a0 = tcg_temp_local_new();
5000 gen_op_mov_v_reg(ot, t1, reg);
5001 if (mod == 3) {
5002 rm = (modrm & 7) | REX_B(s);
5003 gen_op_mov_v_reg(ot, t0, rm);
5004 } else {
5005 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5006 tcg_gen_mov_tl(a0, cpu_A0);
5007 gen_op_ld_v(ot + s->mem_index, t0, a0);
5008 rm = 0; /* avoid warning */
5009 }
5010 label1 = gen_new_label();
5011 tcg_gen_sub_tl(t2, cpu_regs[R_EAX], t0);
5012 gen_extu(ot, t2);
5013 tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label1);
5014 label2 = gen_new_label();
5015 if (mod == 3) {
5016 gen_op_mov_reg_v(ot, R_EAX, t0);
5017 tcg_gen_br(label2);
5018 gen_set_label(label1);
5019 gen_op_mov_reg_v(ot, rm, t1);
5020 } else {
5021 /* perform no-op store cycle like physical cpu; must be
5022 before changing accumulator to ensure idempotency if
5023 the store faults and the instruction is restarted */
5024 gen_op_st_v(ot + s->mem_index, t0, a0);
5025 gen_op_mov_reg_v(ot, R_EAX, t0);
5026 tcg_gen_br(label2);
5027 gen_set_label(label1);
5028 gen_op_st_v(ot + s->mem_index, t1, a0);
5029 }
5030 gen_set_label(label2);
5031 tcg_gen_mov_tl(cpu_cc_src, t0);
5032 tcg_gen_mov_tl(cpu_cc_dst, t2);
5033 set_cc_op(s, CC_OP_SUBB + ot);
5034 tcg_temp_free(t0);
5035 tcg_temp_free(t1);
5036 tcg_temp_free(t2);
5037 tcg_temp_free(a0);
5038 }
5039 break;
5040 case 0x1c7: /* cmpxchg8b */
5041 modrm = cpu_ldub_code(env, s->pc++);
5042 mod = (modrm >> 6) & 3;
5043 if ((mod == 3) || ((modrm & 0x38) != 0x8))
5044 goto illegal_op;
5045 #ifdef TARGET_X86_64
5046 if (dflag == 2) {
5047 if (!(s->cpuid_ext_features & CPUID_EXT_CX16))
5048 goto illegal_op;
5049 gen_jmp_im(pc_start - s->cs_base);
5050 gen_update_cc_op(s);
5051 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5052 gen_helper_cmpxchg16b(cpu_env, cpu_A0);
5053 } else
5054 #endif
5055 {
5056 if (!(s->cpuid_features & CPUID_CX8))
5057 goto illegal_op;
5058 gen_jmp_im(pc_start - s->cs_base);
5059 gen_update_cc_op(s);
5060 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5061 gen_helper_cmpxchg8b(cpu_env, cpu_A0);
5062 }
5063 set_cc_op(s, CC_OP_EFLAGS);
5064 break;
5065
5066 /**************************/
5067 /* push/pop */
5068 case 0x50 ... 0x57: /* push */
5069 gen_op_mov_TN_reg(OT_LONG, 0, (b & 7) | REX_B(s));
5070 gen_push_T0(s);
5071 break;
5072 case 0x58 ... 0x5f: /* pop */
5073 if (CODE64(s)) {
5074 ot = dflag ? OT_QUAD : OT_WORD;
5075 } else {
5076 ot = dflag + OT_WORD;
5077 }
5078 gen_pop_T0(s);
5079 /* NOTE: order is important for pop %sp */
5080 gen_pop_update(s);
5081 gen_op_mov_reg_T0(ot, (b & 7) | REX_B(s));
5082 break;
5083 case 0x60: /* pusha */
5084 if (CODE64(s))
5085 goto illegal_op;
5086 gen_pusha(s);
5087 break;
5088 case 0x61: /* popa */
5089 if (CODE64(s))
5090 goto illegal_op;
5091 gen_popa(s);
5092 break;
5093 case 0x68: /* push Iv */
5094 case 0x6a:
5095 if (CODE64(s)) {
5096 ot = dflag ? OT_QUAD : OT_WORD;
5097 } else {
5098 ot = dflag + OT_WORD;
5099 }
5100 if (b == 0x68)
5101 val = insn_get(env, s, ot);
5102 else
5103 val = (int8_t)insn_get(env, s, OT_BYTE);
5104 gen_op_movl_T0_im(val);
5105 gen_push_T0(s);
5106 break;
5107 case 0x8f: /* pop Ev */
5108 if (CODE64(s)) {
5109 ot = dflag ? OT_QUAD : OT_WORD;
5110 } else {
5111 ot = dflag + OT_WORD;
5112 }
5113 modrm = cpu_ldub_code(env, s->pc++);
5114 mod = (modrm >> 6) & 3;
5115 gen_pop_T0(s);
5116 if (mod == 3) {
5117 /* NOTE: order is important for pop %sp */
5118 gen_pop_update(s);
5119 rm = (modrm & 7) | REX_B(s);
5120 gen_op_mov_reg_T0(ot, rm);
5121 } else {
5122 /* NOTE: order is important too for MMU exceptions */
5123 s->popl_esp_hack = 1 << ot;
5124 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
5125 s->popl_esp_hack = 0;
5126 gen_pop_update(s);
5127 }
5128 break;
5129 case 0xc8: /* enter */
5130 {
5131 int level;
5132 val = cpu_lduw_code(env, s->pc);
5133 s->pc += 2;
5134 level = cpu_ldub_code(env, s->pc++);
5135 gen_enter(s, val, level);
5136 }
5137 break;
5138 case 0xc9: /* leave */
5139 /* XXX: exception not precise (ESP is updated before potential exception) */
5140 if (CODE64(s)) {
5141 gen_op_mov_TN_reg(OT_QUAD, 0, R_EBP);
5142 gen_op_mov_reg_T0(OT_QUAD, R_ESP);
5143 } else if (s->ss32) {
5144 gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
5145 gen_op_mov_reg_T0(OT_LONG, R_ESP);
5146 } else {
5147 gen_op_mov_TN_reg(OT_WORD, 0, R_EBP);
5148 gen_op_mov_reg_T0(OT_WORD, R_ESP);
5149 }
5150 gen_pop_T0(s);
5151 if (CODE64(s)) {
5152 ot = dflag ? OT_QUAD : OT_WORD;
5153 } else {
5154 ot = dflag + OT_WORD;
5155 }
5156 gen_op_mov_reg_T0(ot, R_EBP);
5157 gen_pop_update(s);
5158 break;
5159 case 0x06: /* push es */
5160 case 0x0e: /* push cs */
5161 case 0x16: /* push ss */
5162 case 0x1e: /* push ds */
5163 if (CODE64(s))
5164 goto illegal_op;
5165 gen_op_movl_T0_seg(b >> 3);
5166 gen_push_T0(s);
5167 break;
5168 case 0x1a0: /* push fs */
5169 case 0x1a8: /* push gs */
5170 gen_op_movl_T0_seg((b >> 3) & 7);
5171 gen_push_T0(s);
5172 break;
5173 case 0x07: /* pop es */
5174 case 0x17: /* pop ss */
5175 case 0x1f: /* pop ds */
5176 if (CODE64(s))
5177 goto illegal_op;
5178 reg = b >> 3;
5179 gen_pop_T0(s);
5180 gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
5181 gen_pop_update(s);
5182 if (reg == R_SS) {
5183 /* if reg == SS, inhibit interrupts/trace. */
5184 /* If several instructions disable interrupts, only the
5185 _first_ does it */
5186 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
5187 gen_helper_set_inhibit_irq(cpu_env);
5188 s->tf = 0;
5189 }
5190 if (s->is_jmp) {
5191 gen_jmp_im(s->pc - s->cs_base);
5192 gen_eob(s);
5193 }
5194 break;
5195 case 0x1a1: /* pop fs */
5196 case 0x1a9: /* pop gs */
5197 gen_pop_T0(s);
5198 gen_movl_seg_T0(s, (b >> 3) & 7, pc_start - s->cs_base);
5199 gen_pop_update(s);
5200 if (s->is_jmp) {
5201 gen_jmp_im(s->pc - s->cs_base);
5202 gen_eob(s);
5203 }
5204 break;
5205
5206 /**************************/
5207 /* mov */
5208 case 0x88:
5209 case 0x89: /* mov Gv, Ev */
5210 if ((b & 1) == 0)
5211 ot = OT_BYTE;
5212 else
5213 ot = dflag + OT_WORD;
5214 modrm = cpu_ldub_code(env, s->pc++);
5215 reg = ((modrm >> 3) & 7) | rex_r;
5216
5217 /* generate a generic store */
5218 gen_ldst_modrm(env, s, modrm, ot, reg, 1);
5219 break;
5220 case 0xc6:
5221 case 0xc7: /* mov Ev, Iv */
5222 if ((b & 1) == 0)
5223 ot = OT_BYTE;
5224 else
5225 ot = dflag + OT_WORD;
5226 modrm = cpu_ldub_code(env, s->pc++);
5227 mod = (modrm >> 6) & 3;
5228 if (mod != 3) {
5229 s->rip_offset = insn_const_size(ot);
5230 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5231 }
5232 val = insn_get(env, s, ot);
5233 gen_op_movl_T0_im(val);
5234 if (mod != 3)
5235 gen_op_st_T0_A0(ot + s->mem_index);
5236 else
5237 gen_op_mov_reg_T0(ot, (modrm & 7) | REX_B(s));
5238 break;
5239 case 0x8a:
5240 case 0x8b: /* mov Ev, Gv */
5241 if ((b & 1) == 0)
5242 ot = OT_BYTE;
5243 else
5244 ot = OT_WORD + dflag;
5245 modrm = cpu_ldub_code(env, s->pc++);
5246 reg = ((modrm >> 3) & 7) | rex_r;
5247
5248 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
5249 gen_op_mov_reg_T0(ot, reg);
5250 break;
5251 case 0x8e: /* mov seg, Gv */
5252 modrm = cpu_ldub_code(env, s->pc++);
5253 reg = (modrm >> 3) & 7;
5254 if (reg >= 6 || reg == R_CS)
5255 goto illegal_op;
5256 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
5257 gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
5258 if (reg == R_SS) {
5259 /* if reg == SS, inhibit interrupts/trace */
5260 /* If several instructions disable interrupts, only the
5261 _first_ does it */
5262 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
5263 gen_helper_set_inhibit_irq(cpu_env);
5264 s->tf = 0;
5265 }
5266 if (s->is_jmp) {
5267 gen_jmp_im(s->pc - s->cs_base);
5268 gen_eob(s);
5269 }
5270 break;
5271 case 0x8c: /* mov Gv, seg */
5272 modrm = cpu_ldub_code(env, s->pc++);
5273 reg = (modrm >> 3) & 7;
5274 mod = (modrm >> 6) & 3;
5275 if (reg >= 6)
5276 goto illegal_op;
5277 gen_op_movl_T0_seg(reg);
5278 if (mod == 3)
5279 ot = OT_WORD + dflag;
5280 else
5281 ot = OT_WORD;
5282 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
5283 break;
5284
5285 case 0x1b6: /* movzbS Gv, Eb */
5286 case 0x1b7: /* movzwS Gv, Eb */
5287 case 0x1be: /* movsbS Gv, Eb */
5288 case 0x1bf: /* movswS Gv, Eb */
5289 {
5290 int d_ot;
5291 /* d_ot is the size of destination */
5292 d_ot = dflag + OT_WORD;
5293 /* ot is the size of source */
5294 ot = (b & 1) + OT_BYTE;
5295 modrm = cpu_ldub_code(env, s->pc++);
5296 reg = ((modrm >> 3) & 7) | rex_r;
5297 mod = (modrm >> 6) & 3;
5298 rm = (modrm & 7) | REX_B(s);
5299
5300 if (mod == 3) {
5301 gen_op_mov_TN_reg(ot, 0, rm);
5302 switch(ot | (b & 8)) {
5303 case OT_BYTE:
5304 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
5305 break;
5306 case OT_BYTE | 8:
5307 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
5308 break;
5309 case OT_WORD:
5310 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
5311 break;
5312 default:
5313 case OT_WORD | 8:
5314 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5315 break;
5316 }
5317 gen_op_mov_reg_T0(d_ot, reg);
5318 } else {
5319 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5320 if (b & 8) {
5321 gen_op_lds_T0_A0(ot + s->mem_index);
5322 } else {
5323 gen_op_ldu_T0_A0(ot + s->mem_index);
5324 }
5325 gen_op_mov_reg_T0(d_ot, reg);
5326 }
5327 }
5328 break;
5329
5330 case 0x8d: /* lea */
5331 ot = dflag + OT_WORD;
5332 modrm = cpu_ldub_code(env, s->pc++);
5333 mod = (modrm >> 6) & 3;
5334 if (mod == 3)
5335 goto illegal_op;
5336 reg = ((modrm >> 3) & 7) | rex_r;
5337 /* we must ensure that no segment is added */
5338 s->override = -1;
5339 val = s->addseg;
5340 s->addseg = 0;
5341 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5342 s->addseg = val;
5343 gen_op_mov_reg_A0(ot - OT_WORD, reg);
5344 break;
5345
5346 case 0xa0: /* mov EAX, Ov */
5347 case 0xa1:
5348 case 0xa2: /* mov Ov, EAX */
5349 case 0xa3:
5350 {
5351 target_ulong offset_addr;
5352
5353 if ((b & 1) == 0)
5354 ot = OT_BYTE;
5355 else
5356 ot = dflag + OT_WORD;
5357 #ifdef TARGET_X86_64
5358 if (s->aflag == 2) {
5359 offset_addr = cpu_ldq_code(env, s->pc);
5360 s->pc += 8;
5361 gen_op_movq_A0_im(offset_addr);
5362 } else
5363 #endif
5364 {
5365 if (s->aflag) {
5366 offset_addr = insn_get(env, s, OT_LONG);
5367 } else {
5368 offset_addr = insn_get(env, s, OT_WORD);
5369 }
5370 gen_op_movl_A0_im(offset_addr);
5371 }
5372 gen_add_A0_ds_seg(s);
5373 if ((b & 2) == 0) {
5374 gen_op_ld_T0_A0(ot + s->mem_index);
5375 gen_op_mov_reg_T0(ot, R_EAX);
5376 } else {
5377 gen_op_mov_TN_reg(ot, 0, R_EAX);
5378 gen_op_st_T0_A0(ot + s->mem_index);
5379 }
5380 }
5381 break;
5382 case 0xd7: /* xlat */
5383 #ifdef TARGET_X86_64
5384 if (s->aflag == 2) {
5385 gen_op_movq_A0_reg(R_EBX);
5386 gen_op_mov_TN_reg(OT_QUAD, 0, R_EAX);
5387 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xff);
5388 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]);
5389 } else
5390 #endif
5391 {
5392 gen_op_movl_A0_reg(R_EBX);
5393 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
5394 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xff);
5395 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]);
5396 if (s->aflag == 0)
5397 gen_op_andl_A0_ffff();
5398 else
5399 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
5400 }
5401 gen_add_A0_ds_seg(s);
5402 gen_op_ldu_T0_A0(OT_BYTE + s->mem_index);
5403 gen_op_mov_reg_T0(OT_BYTE, R_EAX);
5404 break;
5405 case 0xb0 ... 0xb7: /* mov R, Ib */
5406 val = insn_get(env, s, OT_BYTE);
5407 gen_op_movl_T0_im(val);
5408 gen_op_mov_reg_T0(OT_BYTE, (b & 7) | REX_B(s));
5409 break;
5410 case 0xb8 ... 0xbf: /* mov R, Iv */
5411 #ifdef TARGET_X86_64
5412 if (dflag == 2) {
5413 uint64_t tmp;
5414 /* 64 bit case */
5415 tmp = cpu_ldq_code(env, s->pc);
5416 s->pc += 8;
5417 reg = (b & 7) | REX_B(s);
5418 gen_movtl_T0_im(tmp);
5419 gen_op_mov_reg_T0(OT_QUAD, reg);
5420 } else
5421 #endif
5422 {
5423 ot = dflag ? OT_LONG : OT_WORD;
5424 val = insn_get(env, s, ot);
5425 reg = (b & 7) | REX_B(s);
5426 gen_op_movl_T0_im(val);
5427 gen_op_mov_reg_T0(ot, reg);
5428 }
5429 break;
5430
5431 case 0x91 ... 0x97: /* xchg R, EAX */
5432 do_xchg_reg_eax:
5433 ot = dflag + OT_WORD;
5434 reg = (b & 7) | REX_B(s);
5435 rm = R_EAX;
5436 goto do_xchg_reg;
5437 case 0x86:
5438 case 0x87: /* xchg Ev, Gv */
5439 if ((b & 1) == 0)
5440 ot = OT_BYTE;
5441 else
5442 ot = dflag + OT_WORD;
5443 modrm = cpu_ldub_code(env, s->pc++);
5444 reg = ((modrm >> 3) & 7) | rex_r;
5445 mod = (modrm >> 6) & 3;
5446 if (mod == 3) {
5447 rm = (modrm & 7) | REX_B(s);
5448 do_xchg_reg:
5449 gen_op_mov_TN_reg(ot, 0, reg);
5450 gen_op_mov_TN_reg(ot, 1, rm);
5451 gen_op_mov_reg_T0(ot, rm);
5452 gen_op_mov_reg_T1(ot, reg);
5453 } else {
5454 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5455 gen_op_mov_TN_reg(ot, 0, reg);
5456 /* for xchg, lock is implicit */
5457 if (!(prefixes & PREFIX_LOCK))
5458 gen_helper_lock();
5459 gen_op_ld_T1_A0(ot + s->mem_index);
5460 gen_op_st_T0_A0(ot + s->mem_index);
5461 if (!(prefixes & PREFIX_LOCK))
5462 gen_helper_unlock();
5463 gen_op_mov_reg_T1(ot, reg);
5464 }
5465 break;
5466 case 0xc4: /* les Gv */
5467 if (CODE64(s))
5468 goto illegal_op;
5469 op = R_ES;
5470 goto do_lxx;
5471 case 0xc5: /* lds Gv */
5472 if (CODE64(s))
5473 goto illegal_op;
5474 op = R_DS;
5475 goto do_lxx;
5476 case 0x1b2: /* lss Gv */
5477 op = R_SS;
5478 goto do_lxx;
5479 case 0x1b4: /* lfs Gv */
5480 op = R_FS;
5481 goto do_lxx;
5482 case 0x1b5: /* lgs Gv */
5483 op = R_GS;
5484 do_lxx:
5485 ot = dflag ? OT_LONG : OT_WORD;
5486 modrm = cpu_ldub_code(env, s->pc++);
5487 reg = ((modrm >> 3) & 7) | rex_r;
5488 mod = (modrm >> 6) & 3;
5489 if (mod == 3)
5490 goto illegal_op;
5491 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5492 gen_op_ld_T1_A0(ot + s->mem_index);
5493 gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
5494 /* load the segment first to handle exceptions properly */
5495 gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
5496 gen_movl_seg_T0(s, op, pc_start - s->cs_base);
5497 /* then put the data */
5498 gen_op_mov_reg_T1(ot, reg);
5499 if (s->is_jmp) {
5500 gen_jmp_im(s->pc - s->cs_base);
5501 gen_eob(s);
5502 }
5503 break;
5504
5505 /************************/
5506 /* shifts */
5507 case 0xc0:
5508 case 0xc1:
5509 /* shift Ev,Ib */
5510 shift = 2;
5511 grp2:
5512 {
5513 if ((b & 1) == 0)
5514 ot = OT_BYTE;
5515 else
5516 ot = dflag + OT_WORD;
5517
5518 modrm = cpu_ldub_code(env, s->pc++);
5519 mod = (modrm >> 6) & 3;
5520 op = (modrm >> 3) & 7;
5521
5522 if (mod != 3) {
5523 if (shift == 2) {
5524 s->rip_offset = 1;
5525 }
5526 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5527 opreg = OR_TMP0;
5528 } else {
5529 opreg = (modrm & 7) | REX_B(s);
5530 }
5531
5532 /* simpler op */
5533 if (shift == 0) {
5534 gen_shift(s, op, ot, opreg, OR_ECX);
5535 } else {
5536 if (shift == 2) {
5537 shift = cpu_ldub_code(env, s->pc++);
5538 }
5539 gen_shifti(s, op, ot, opreg, shift);
5540 }
5541 }
5542 break;
5543 case 0xd0:
5544 case 0xd1:
5545 /* shift Ev,1 */
5546 shift = 1;
5547 goto grp2;
5548 case 0xd2:
5549 case 0xd3:
5550 /* shift Ev,cl */
5551 shift = 0;
5552 goto grp2;
5553
5554 case 0x1a4: /* shld imm */
5555 op = 0;
5556 shift = 1;
5557 goto do_shiftd;
5558 case 0x1a5: /* shld cl */
5559 op = 0;
5560 shift = 0;
5561 goto do_shiftd;
5562 case 0x1ac: /* shrd imm */
5563 op = 1;
5564 shift = 1;
5565 goto do_shiftd;
5566 case 0x1ad: /* shrd cl */
5567 op = 1;
5568 shift = 0;
5569 do_shiftd:
5570 ot = dflag + OT_WORD;
5571 modrm = cpu_ldub_code(env, s->pc++);
5572 mod = (modrm >> 6) & 3;
5573 rm = (modrm & 7) | REX_B(s);
5574 reg = ((modrm >> 3) & 7) | rex_r;
5575 if (mod != 3) {
5576 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5577 opreg = OR_TMP0;
5578 } else {
5579 opreg = rm;
5580 }
5581 gen_op_mov_TN_reg(ot, 1, reg);
5582
5583 if (shift) {
5584 TCGv imm = tcg_const_tl(cpu_ldub_code(env, s->pc++));
5585 gen_shiftd_rm_T1(s, ot, opreg, op, imm);
5586 tcg_temp_free(imm);
5587 } else {
5588 gen_shiftd_rm_T1(s, ot, opreg, op, cpu_regs[R_ECX]);
5589 }
5590 break;
5591
5592 /************************/
5593 /* floats */
5594 case 0xd8 ... 0xdf:
5595 if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
5596 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
5597 /* XXX: what to do if illegal op ? */
5598 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
5599 break;
5600 }
5601 modrm = cpu_ldub_code(env, s->pc++);
5602 mod = (modrm >> 6) & 3;
5603 rm = modrm & 7;
5604 op = ((b & 7) << 3) | ((modrm >> 3) & 7);
5605 if (mod != 3) {
5606 /* memory op */
5607 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5608 switch(op) {
5609 case 0x00 ... 0x07: /* fxxxs */
5610 case 0x10 ... 0x17: /* fixxxl */
5611 case 0x20 ... 0x27: /* fxxxl */
5612 case 0x30 ... 0x37: /* fixxx */
5613 {
5614 int op1;
5615 op1 = op & 7;
5616
5617 switch(op >> 4) {
5618 case 0:
5619 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5620 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5621 gen_helper_flds_FT0(cpu_env, cpu_tmp2_i32);
5622 break;
5623 case 1:
5624 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5625 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5626 gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
5627 break;
5628 case 2:
5629 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
5630 (s->mem_index >> 2) - 1);
5631 gen_helper_fldl_FT0(cpu_env, cpu_tmp1_i64);
5632 break;
5633 case 3:
5634 default:
5635 gen_op_lds_T0_A0(OT_WORD + s->mem_index);
5636 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5637 gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
5638 break;
5639 }
5640
5641 gen_helper_fp_arith_ST0_FT0(op1);
5642 if (op1 == 3) {
5643 /* fcomp needs pop */
5644 gen_helper_fpop(cpu_env);
5645 }
5646 }
5647 break;
5648 case 0x08: /* flds */
5649 case 0x0a: /* fsts */
5650 case 0x0b: /* fstps */
5651 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
5652 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
5653 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
5654 switch(op & 7) {
5655 case 0:
5656 switch(op >> 4) {
5657 case 0:
5658 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5659 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5660 gen_helper_flds_ST0(cpu_env, cpu_tmp2_i32);
5661 break;
5662 case 1:
5663 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5664 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5665 gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
5666 break;
5667 case 2:
5668 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
5669 (s->mem_index >> 2) - 1);
5670 gen_helper_fldl_ST0(cpu_env, cpu_tmp1_i64);
5671 break;
5672 case 3:
5673 default:
5674 gen_op_lds_T0_A0(OT_WORD + s->mem_index);
5675 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5676 gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
5677 break;
5678 }
5679 break;
5680 case 1:
5681 /* XXX: the corresponding CPUID bit must be tested ! */
5682 switch(op >> 4) {
5683 case 1:
5684 gen_helper_fisttl_ST0(cpu_tmp2_i32, cpu_env);
5685 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5686 gen_op_st_T0_A0(OT_LONG + s->mem_index);
5687 break;
5688 case 2:
5689 gen_helper_fisttll_ST0(cpu_tmp1_i64, cpu_env);
5690 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
5691 (s->mem_index >> 2) - 1);
5692 break;
5693 case 3:
5694 default:
5695 gen_helper_fistt_ST0(cpu_tmp2_i32, cpu_env);
5696 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5697 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5698 break;
5699 }
5700 gen_helper_fpop(cpu_env);
5701 break;
5702 default:
5703 switch(op >> 4) {
5704 case 0:
5705 gen_helper_fsts_ST0(cpu_tmp2_i32, cpu_env);
5706 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5707 gen_op_st_T0_A0(OT_LONG + s->mem_index);
5708 break;
5709 case 1:
5710 gen_helper_fistl_ST0(cpu_tmp2_i32, cpu_env);
5711 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5712 gen_op_st_T0_A0(OT_LONG + s->mem_index);
5713 break;
5714 case 2:
5715 gen_helper_fstl_ST0(cpu_tmp1_i64, cpu_env);
5716 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
5717 (s->mem_index >> 2) - 1);
5718 break;
5719 case 3:
5720 default:
5721 gen_helper_fist_ST0(cpu_tmp2_i32, cpu_env);
5722 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5723 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5724 break;
5725 }
5726 if ((op & 7) == 3)
5727 gen_helper_fpop(cpu_env);
5728 break;
5729 }
5730 break;
5731 case 0x0c: /* fldenv mem */
5732 gen_update_cc_op(s);
5733 gen_jmp_im(pc_start - s->cs_base);
5734 gen_helper_fldenv(cpu_env, cpu_A0, tcg_const_i32(s->dflag));
5735 break;
5736 case 0x0d: /* fldcw mem */
5737 gen_op_ld_T0_A0(OT_WORD + s->mem_index);
5738 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5739 gen_helper_fldcw(cpu_env, cpu_tmp2_i32);
5740 break;
5741 case 0x0e: /* fnstenv mem */
5742 gen_update_cc_op(s);
5743 gen_jmp_im(pc_start - s->cs_base);
5744 gen_helper_fstenv(cpu_env, cpu_A0, tcg_const_i32(s->dflag));
5745 break;
5746 case 0x0f: /* fnstcw mem */
5747 gen_helper_fnstcw(cpu_tmp2_i32, cpu_env);
5748 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5749 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5750 break;
5751 case 0x1d: /* fldt mem */
5752 gen_update_cc_op(s);
5753 gen_jmp_im(pc_start - s->cs_base);
5754 gen_helper_fldt_ST0(cpu_env, cpu_A0);
5755 break;
5756 case 0x1f: /* fstpt mem */
5757 gen_update_cc_op(s);
5758 gen_jmp_im(pc_start - s->cs_base);
5759 gen_helper_fstt_ST0(cpu_env, cpu_A0);
5760 gen_helper_fpop(cpu_env);
5761 break;
5762 case 0x2c: /* frstor mem */
5763 gen_update_cc_op(s);
5764 gen_jmp_im(pc_start - s->cs_base);
5765 gen_helper_frstor(cpu_env, cpu_A0, tcg_const_i32(s->dflag));
5766 break;
5767 case 0x2e: /* fnsave mem */
5768 gen_update_cc_op(s);
5769 gen_jmp_im(pc_start - s->cs_base);
5770 gen_helper_fsave(cpu_env, cpu_A0, tcg_const_i32(s->dflag));
5771 break;
5772 case 0x2f: /* fnstsw mem */
5773 gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
5774 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5775 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5776 break;
5777 case 0x3c: /* fbld */
5778 gen_update_cc_op(s);
5779 gen_jmp_im(pc_start - s->cs_base);
5780 gen_helper_fbld_ST0(cpu_env, cpu_A0);
5781 break;
5782 case 0x3e: /* fbstp */
5783 gen_update_cc_op(s);
5784 gen_jmp_im(pc_start - s->cs_base);
5785 gen_helper_fbst_ST0(cpu_env, cpu_A0);
5786 gen_helper_fpop(cpu_env);
5787 break;
5788 case 0x3d: /* fildll */
5789 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
5790 (s->mem_index >> 2) - 1);
5791 gen_helper_fildll_ST0(cpu_env, cpu_tmp1_i64);
5792 break;
5793 case 0x3f: /* fistpll */
5794 gen_helper_fistll_ST0(cpu_tmp1_i64, cpu_env);
5795 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
5796 (s->mem_index >> 2) - 1);
5797 gen_helper_fpop(cpu_env);
5798 break;
5799 default:
5800 goto illegal_op;
5801 }
5802 } else {
5803 /* register float ops */
5804 opreg = rm;
5805
5806 switch(op) {
5807 case 0x08: /* fld sti */
5808 gen_helper_fpush(cpu_env);
5809 gen_helper_fmov_ST0_STN(cpu_env,
5810 tcg_const_i32((opreg + 1) & 7));
5811 break;
5812 case 0x09: /* fxchg sti */
5813 case 0x29: /* fxchg4 sti, undocumented op */
5814 case 0x39: /* fxchg7 sti, undocumented op */
5815 gen_helper_fxchg_ST0_STN(cpu_env, tcg_const_i32(opreg));
5816 break;
5817 case 0x0a: /* grp d9/2 */
5818 switch(rm) {
5819 case 0: /* fnop */
5820 /* check exceptions (FreeBSD FPU probe) */
5821 gen_update_cc_op(s);
5822 gen_jmp_im(pc_start - s->cs_base);
5823 gen_helper_fwait(cpu_env);
5824 break;
5825 default:
5826 goto illegal_op;
5827 }
5828 break;
5829 case 0x0c: /* grp d9/4 */
5830 switch(rm) {
5831 case 0: /* fchs */
5832 gen_helper_fchs_ST0(cpu_env);
5833 break;
5834 case 1: /* fabs */
5835 gen_helper_fabs_ST0(cpu_env);
5836 break;
5837 case 4: /* ftst */
5838 gen_helper_fldz_FT0(cpu_env);
5839 gen_helper_fcom_ST0_FT0(cpu_env);
5840 break;
5841 case 5: /* fxam */
5842 gen_helper_fxam_ST0(cpu_env);
5843 break;
5844 default:
5845 goto illegal_op;
5846 }
5847 break;
5848 case 0x0d: /* grp d9/5 */
5849 {
5850 switch(rm) {
5851 case 0:
5852 gen_helper_fpush(cpu_env);
5853 gen_helper_fld1_ST0(cpu_env);
5854 break;
5855 case 1:
5856 gen_helper_fpush(cpu_env);
5857 gen_helper_fldl2t_ST0(cpu_env);
5858 break;
5859 case 2:
5860 gen_helper_fpush(cpu_env);
5861 gen_helper_fldl2e_ST0(cpu_env);
5862 break;
5863 case 3:
5864 gen_helper_fpush(cpu_env);
5865 gen_helper_fldpi_ST0(cpu_env);
5866 break;
5867 case 4:
5868 gen_helper_fpush(cpu_env);
5869 gen_helper_fldlg2_ST0(cpu_env);
5870 break;
5871 case 5:
5872 gen_helper_fpush(cpu_env);
5873 gen_helper_fldln2_ST0(cpu_env);
5874 break;
5875 case 6:
5876 gen_helper_fpush(cpu_env);
5877 gen_helper_fldz_ST0(cpu_env);
5878 break;
5879 default:
5880 goto illegal_op;
5881 }
5882 }
5883 break;
5884 case 0x0e: /* grp d9/6 */
5885 switch(rm) {
5886 case 0: /* f2xm1 */
5887 gen_helper_f2xm1(cpu_env);
5888 break;
5889 case 1: /* fyl2x */
5890 gen_helper_fyl2x(cpu_env);
5891 break;
5892 case 2: /* fptan */
5893 gen_helper_fptan(cpu_env);
5894 break;
5895 case 3: /* fpatan */
5896 gen_helper_fpatan(cpu_env);
5897 break;
5898 case 4: /* fxtract */
5899 gen_helper_fxtract(cpu_env);
5900 break;
5901 case 5: /* fprem1 */
5902 gen_helper_fprem1(cpu_env);
5903 break;
5904 case 6: /* fdecstp */
5905 gen_helper_fdecstp(cpu_env);
5906 break;
5907 default:
5908 case 7: /* fincstp */
5909 gen_helper_fincstp(cpu_env);
5910 break;
5911 }
5912 break;
5913 case 0x0f: /* grp d9/7 */
5914 switch(rm) {
5915 case 0: /* fprem */
5916 gen_helper_fprem(cpu_env);
5917 break;
5918 case 1: /* fyl2xp1 */
5919 gen_helper_fyl2xp1(cpu_env);
5920 break;
5921 case 2: /* fsqrt */
5922 gen_helper_fsqrt(cpu_env);
5923 break;
5924 case 3: /* fsincos */
5925 gen_helper_fsincos(cpu_env);
5926 break;
5927 case 5: /* fscale */
5928 gen_helper_fscale(cpu_env);
5929 break;
5930 case 4: /* frndint */
5931 gen_helper_frndint(cpu_env);
5932 break;
5933 case 6: /* fsin */
5934 gen_helper_fsin(cpu_env);
5935 break;
5936 default:
5937 case 7: /* fcos */
5938 gen_helper_fcos(cpu_env);
5939 break;
5940 }
5941 break;
5942 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
5943 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
5944 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
5945 {
5946 int op1;
5947
5948 op1 = op & 7;
5949 if (op >= 0x20) {
5950 gen_helper_fp_arith_STN_ST0(op1, opreg);
5951 if (op >= 0x30)
5952 gen_helper_fpop(cpu_env);
5953 } else {
5954 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
5955 gen_helper_fp_arith_ST0_FT0(op1);
5956 }
5957 }
5958 break;
5959 case 0x02: /* fcom */
5960 case 0x22: /* fcom2, undocumented op */
5961 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
5962 gen_helper_fcom_ST0_FT0(cpu_env);
5963 break;
5964 case 0x03: /* fcomp */
5965 case 0x23: /* fcomp3, undocumented op */
5966 case 0x32: /* fcomp5, undocumented op */
5967 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
5968 gen_helper_fcom_ST0_FT0(cpu_env);
5969 gen_helper_fpop(cpu_env);
5970 break;
5971 case 0x15: /* da/5 */
5972 switch(rm) {
5973 case 1: /* fucompp */
5974 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
5975 gen_helper_fucom_ST0_FT0(cpu_env);
5976 gen_helper_fpop(cpu_env);
5977 gen_helper_fpop(cpu_env);
5978 break;
5979 default:
5980 goto illegal_op;
5981 }
5982 break;
5983 case 0x1c:
5984 switch(rm) {
5985 case 0: /* feni (287 only, just do nop here) */
5986 break;
5987 case 1: /* fdisi (287 only, just do nop here) */
5988 break;
5989 case 2: /* fclex */
5990 gen_helper_fclex(cpu_env);
5991 break;
5992 case 3: /* fninit */
5993 gen_helper_fninit(cpu_env);
5994 break;
5995 case 4: /* fsetpm (287 only, just do nop here) */
5996 break;
5997 default:
5998 goto illegal_op;
5999 }
6000 break;
6001 case 0x1d: /* fucomi */
6002 gen_update_cc_op(s);
6003 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6004 gen_helper_fucomi_ST0_FT0(cpu_env);
6005 set_cc_op(s, CC_OP_EFLAGS);
6006 break;
6007 case 0x1e: /* fcomi */
6008 gen_update_cc_op(s);
6009 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6010 gen_helper_fcomi_ST0_FT0(cpu_env);
6011 set_cc_op(s, CC_OP_EFLAGS);
6012 break;
6013 case 0x28: /* ffree sti */
6014 gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
6015 break;
6016 case 0x2a: /* fst sti */
6017 gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
6018 break;
6019 case 0x2b: /* fstp sti */
6020 case 0x0b: /* fstp1 sti, undocumented op */
6021 case 0x3a: /* fstp8 sti, undocumented op */
6022 case 0x3b: /* fstp9 sti, undocumented op */
6023 gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
6024 gen_helper_fpop(cpu_env);
6025 break;
6026 case 0x2c: /* fucom st(i) */
6027 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6028 gen_helper_fucom_ST0_FT0(cpu_env);
6029 break;
6030 case 0x2d: /* fucomp st(i) */
6031 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6032 gen_helper_fucom_ST0_FT0(cpu_env);
6033 gen_helper_fpop(cpu_env);
6034 break;
6035 case 0x33: /* de/3 */
6036 switch(rm) {
6037 case 1: /* fcompp */
6038 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
6039 gen_helper_fcom_ST0_FT0(cpu_env);
6040 gen_helper_fpop(cpu_env);
6041 gen_helper_fpop(cpu_env);
6042 break;
6043 default:
6044 goto illegal_op;
6045 }
6046 break;
6047 case 0x38: /* ffreep sti, undocumented op */
6048 gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
6049 gen_helper_fpop(cpu_env);
6050 break;
6051 case 0x3c: /* df/4 */
6052 switch(rm) {
6053 case 0:
6054 gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
6055 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
6056 gen_op_mov_reg_T0(OT_WORD, R_EAX);
6057 break;
6058 default:
6059 goto illegal_op;
6060 }
6061 break;
6062 case 0x3d: /* fucomip */
6063 gen_update_cc_op(s);
6064 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6065 gen_helper_fucomi_ST0_FT0(cpu_env);
6066 gen_helper_fpop(cpu_env);
6067 set_cc_op(s, CC_OP_EFLAGS);
6068 break;
6069 case 0x3e: /* fcomip */
6070 gen_update_cc_op(s);
6071 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6072 gen_helper_fcomi_ST0_FT0(cpu_env);
6073 gen_helper_fpop(cpu_env);
6074 set_cc_op(s, CC_OP_EFLAGS);
6075 break;
6076 case 0x10 ... 0x13: /* fcmovxx */
6077 case 0x18 ... 0x1b:
6078 {
6079 int op1, l1;
6080 static const uint8_t fcmov_cc[8] = {
6081 (JCC_B << 1),
6082 (JCC_Z << 1),
6083 (JCC_BE << 1),
6084 (JCC_P << 1),
6085 };
6086 op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
6087 l1 = gen_new_label();
6088 gen_jcc1(s, op1, l1);
6089 gen_helper_fmov_ST0_STN(cpu_env, tcg_const_i32(opreg));
6090 gen_set_label(l1);
6091 }
6092 break;
6093 default:
6094 goto illegal_op;
6095 }
6096 }
6097 break;
6098 /************************/
6099 /* string ops */
6100
6101 case 0xa4: /* movsS */
6102 case 0xa5:
6103 if ((b & 1) == 0)
6104 ot = OT_BYTE;
6105 else
6106 ot = dflag + OT_WORD;
6107
6108 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6109 gen_repz_movs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6110 } else {
6111 gen_movs(s, ot);
6112 }
6113 break;
6114
6115 case 0xaa: /* stosS */
6116 case 0xab:
6117 if ((b & 1) == 0)
6118 ot = OT_BYTE;
6119 else
6120 ot = dflag + OT_WORD;
6121
6122 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6123 gen_repz_stos(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6124 } else {
6125 gen_stos(s, ot);
6126 }
6127 break;
6128 case 0xac: /* lodsS */
6129 case 0xad:
6130 if ((b & 1) == 0)
6131 ot = OT_BYTE;
6132 else
6133 ot = dflag + OT_WORD;
6134 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6135 gen_repz_lods(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6136 } else {
6137 gen_lods(s, ot);
6138 }
6139 break;
6140 case 0xae: /* scasS */
6141 case 0xaf:
6142 if ((b & 1) == 0)
6143 ot = OT_BYTE;
6144 else
6145 ot = dflag + OT_WORD;
6146 if (prefixes & PREFIX_REPNZ) {
6147 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6148 } else if (prefixes & PREFIX_REPZ) {
6149 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6150 } else {
6151 gen_scas(s, ot);
6152 }
6153 break;
6154
6155 case 0xa6: /* cmpsS */
6156 case 0xa7:
6157 if ((b & 1) == 0)
6158 ot = OT_BYTE;
6159 else
6160 ot = dflag + OT_WORD;
6161 if (prefixes & PREFIX_REPNZ) {
6162 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6163 } else if (prefixes & PREFIX_REPZ) {
6164 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6165 } else {
6166 gen_cmps(s, ot);
6167 }
6168 break;
6169 case 0x6c: /* insS */
6170 case 0x6d:
6171 if ((b & 1) == 0)
6172 ot = OT_BYTE;
6173 else
6174 ot = dflag ? OT_LONG : OT_WORD;
6175 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
6176 gen_op_andl_T0_ffff();
6177 gen_check_io(s, ot, pc_start - s->cs_base,
6178 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4);
6179 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6180 gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6181 } else {
6182 gen_ins(s, ot);
6183 if (use_icount) {
6184 gen_jmp(s, s->pc - s->cs_base);
6185 }
6186 }
6187 break;
6188 case 0x6e: /* outsS */
6189 case 0x6f:
6190 if ((b & 1) == 0)
6191 ot = OT_BYTE;
6192 else
6193 ot = dflag ? OT_LONG : OT_WORD;
6194 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
6195 gen_op_andl_T0_ffff();
6196 gen_check_io(s, ot, pc_start - s->cs_base,
6197 svm_is_rep(prefixes) | 4);
6198 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6199 gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6200 } else {
6201 gen_outs(s, ot);
6202 if (use_icount) {
6203 gen_jmp(s, s->pc - s->cs_base);
6204 }
6205 }
6206 break;
6207
6208 /************************/
6209 /* port I/O */
6210
6211 case 0xe4:
6212 case 0xe5:
6213 if ((b & 1) == 0)
6214 ot = OT_BYTE;
6215 else
6216 ot = dflag ? OT_LONG : OT_WORD;
6217 val = cpu_ldub_code(env, s->pc++);
6218 gen_op_movl_T0_im(val);
6219 gen_check_io(s, ot, pc_start - s->cs_base,
6220 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
6221 if (use_icount)
6222 gen_io_start();
6223 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6224 gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
6225 gen_op_mov_reg_T1(ot, R_EAX);
6226 if (use_icount) {
6227 gen_io_end();
6228 gen_jmp(s, s->pc - s->cs_base);
6229 }
6230 break;
6231 case 0xe6:
6232 case 0xe7:
6233 if ((b & 1) == 0)
6234 ot = OT_BYTE;
6235 else
6236 ot = dflag ? OT_LONG : OT_WORD;
6237 val = cpu_ldub_code(env, s->pc++);
6238 gen_op_movl_T0_im(val);
6239 gen_check_io(s, ot, pc_start - s->cs_base,
6240 svm_is_rep(prefixes));
6241 gen_op_mov_TN_reg(ot, 1, R_EAX);
6242
6243 if (use_icount)
6244 gen_io_start();
6245 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6246 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
6247 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
6248 if (use_icount) {
6249 gen_io_end();
6250 gen_jmp(s, s->pc - s->cs_base);
6251 }
6252 break;
6253 case 0xec:
6254 case 0xed:
6255 if ((b & 1) == 0)
6256 ot = OT_BYTE;
6257 else
6258 ot = dflag ? OT_LONG : OT_WORD;
6259 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
6260 gen_op_andl_T0_ffff();
6261 gen_check_io(s, ot, pc_start - s->cs_base,
6262 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
6263 if (use_icount)
6264 gen_io_start();
6265 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6266 gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
6267 gen_op_mov_reg_T1(ot, R_EAX);
6268 if (use_icount) {
6269 gen_io_end();
6270 gen_jmp(s, s->pc - s->cs_base);
6271 }
6272 break;
6273 case 0xee:
6274 case 0xef:
6275 if ((b & 1) == 0)
6276 ot = OT_BYTE;
6277 else
6278 ot = dflag ? OT_LONG : OT_WORD;
6279 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
6280 gen_op_andl_T0_ffff();
6281 gen_check_io(s, ot, pc_start - s->cs_base,
6282 svm_is_rep(prefixes));
6283 gen_op_mov_TN_reg(ot, 1, R_EAX);
6284
6285 if (use_icount)
6286 gen_io_start();
6287 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6288 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
6289 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
6290 if (use_icount) {
6291 gen_io_end();
6292 gen_jmp(s, s->pc - s->cs_base);
6293 }
6294 break;
6295
6296 /************************/
6297 /* control */
6298 case 0xc2: /* ret im */
6299 val = cpu_ldsw_code(env, s->pc);
6300 s->pc += 2;
6301 gen_pop_T0(s);
6302 if (CODE64(s) && s->dflag)
6303 s->dflag = 2;
6304 gen_stack_update(s, val + (2 << s->dflag));
6305 if (s->dflag == 0)
6306 gen_op_andl_T0_ffff();
6307 gen_op_jmp_T0();
6308 gen_eob(s);
6309 break;
6310 case 0xc3: /* ret */
6311 gen_pop_T0(s);
6312 gen_pop_update(s);
6313 if (s->dflag == 0)
6314 gen_op_andl_T0_ffff();
6315 gen_op_jmp_T0();
6316 gen_eob(s);
6317 break;
6318 case 0xca: /* lret im */
6319 val = cpu_ldsw_code(env, s->pc);
6320 s->pc += 2;
6321 do_lret:
6322 if (s->pe && !s->vm86) {
6323 gen_update_cc_op(s);
6324 gen_jmp_im(pc_start - s->cs_base);
6325 gen_helper_lret_protected(cpu_env, tcg_const_i32(s->dflag),
6326 tcg_const_i32(val));
6327 } else {
6328 gen_stack_A0(s);
6329 /* pop offset */
6330 gen_op_ld_T0_A0(1 + s->dflag + s->mem_index);
6331 if (s->dflag == 0)
6332 gen_op_andl_T0_ffff();
6333 /* NOTE: keeping EIP updated is not a problem in case of
6334 exception */
6335 gen_op_jmp_T0();
6336 /* pop selector */
6337 gen_op_addl_A0_im(2 << s->dflag);
6338 gen_op_ld_T0_A0(1 + s->dflag + s->mem_index);
6339 gen_op_movl_seg_T0_vm(R_CS);
6340 /* add stack offset */
6341 gen_stack_update(s, val + (4 << s->dflag));
6342 }
6343 gen_eob(s);
6344 break;
6345 case 0xcb: /* lret */
6346 val = 0;
6347 goto do_lret;
6348 case 0xcf: /* iret */
6349 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET);
6350 if (!s->pe) {
6351 /* real mode */
6352 gen_helper_iret_real(cpu_env, tcg_const_i32(s->dflag));
6353 set_cc_op(s, CC_OP_EFLAGS);
6354 } else if (s->vm86) {
6355 if (s->iopl != 3) {
6356 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6357 } else {
6358 gen_helper_iret_real(cpu_env, tcg_const_i32(s->dflag));
6359 set_cc_op(s, CC_OP_EFLAGS);
6360 }
6361 } else {
6362 gen_update_cc_op(s);
6363 gen_jmp_im(pc_start - s->cs_base);
6364 gen_helper_iret_protected(cpu_env, tcg_const_i32(s->dflag),
6365 tcg_const_i32(s->pc - s->cs_base));
6366 set_cc_op(s, CC_OP_EFLAGS);
6367 }
6368 gen_eob(s);
6369 break;
6370 case 0xe8: /* call im */
6371 {
6372 if (dflag)
6373 tval = (int32_t)insn_get(env, s, OT_LONG);
6374 else
6375 tval = (int16_t)insn_get(env, s, OT_WORD);
6376 next_eip = s->pc - s->cs_base;
6377 tval += next_eip;
6378 if (s->dflag == 0)
6379 tval &= 0xffff;
6380 else if(!CODE64(s))
6381 tval &= 0xffffffff;
6382 gen_movtl_T0_im(next_eip);
6383 gen_push_T0(s);
6384 gen_jmp(s, tval);
6385 }
6386 break;
6387 case 0x9a: /* lcall im */
6388 {
6389 unsigned int selector, offset;
6390
6391 if (CODE64(s))
6392 goto illegal_op;
6393 ot = dflag ? OT_LONG : OT_WORD;
6394 offset = insn_get(env, s, ot);
6395 selector = insn_get(env, s, OT_WORD);
6396
6397 gen_op_movl_T0_im(selector);
6398 gen_op_movl_T1_imu(offset);
6399 }
6400 goto do_lcall;
6401 case 0xe9: /* jmp im */
6402 if (dflag)
6403 tval = (int32_t)insn_get(env, s, OT_LONG);
6404 else
6405 tval = (int16_t)insn_get(env, s, OT_WORD);
6406 tval += s->pc - s->cs_base;
6407 if (s->dflag == 0)
6408 tval &= 0xffff;
6409 else if(!CODE64(s))
6410 tval &= 0xffffffff;
6411 gen_jmp(s, tval);
6412 break;
6413 case 0xea: /* ljmp im */
6414 {
6415 unsigned int selector, offset;
6416
6417 if (CODE64(s))
6418 goto illegal_op;
6419 ot = dflag ? OT_LONG : OT_WORD;
6420 offset = insn_get(env, s, ot);
6421 selector = insn_get(env, s, OT_WORD);
6422
6423 gen_op_movl_T0_im(selector);
6424 gen_op_movl_T1_imu(offset);
6425 }
6426 goto do_ljmp;
6427 case 0xeb: /* jmp Jb */
6428 tval = (int8_t)insn_get(env, s, OT_BYTE);
6429 tval += s->pc - s->cs_base;
6430 if (s->dflag == 0)
6431 tval &= 0xffff;
6432 gen_jmp(s, tval);
6433 break;
6434 case 0x70 ... 0x7f: /* jcc Jb */
6435 tval = (int8_t)insn_get(env, s, OT_BYTE);
6436 goto do_jcc;
6437 case 0x180 ... 0x18f: /* jcc Jv */
6438 if (dflag) {
6439 tval = (int32_t)insn_get(env, s, OT_LONG);
6440 } else {
6441 tval = (int16_t)insn_get(env, s, OT_WORD);
6442 }
6443 do_jcc:
6444 next_eip = s->pc - s->cs_base;
6445 tval += next_eip;
6446 if (s->dflag == 0)
6447 tval &= 0xffff;
6448 gen_jcc(s, b, tval, next_eip);
6449 break;
6450
6451 case 0x190 ... 0x19f: /* setcc Gv */
6452 modrm = cpu_ldub_code(env, s->pc++);
6453 gen_setcc1(s, b, cpu_T[0]);
6454 gen_ldst_modrm(env, s, modrm, OT_BYTE, OR_TMP0, 1);
6455 break;
6456 case 0x140 ... 0x14f: /* cmov Gv, Ev */
6457 ot = dflag + OT_WORD;
6458 modrm = cpu_ldub_code(env, s->pc++);
6459 reg = ((modrm >> 3) & 7) | rex_r;
6460 gen_cmovcc1(env, s, ot, b, modrm, reg);
6461 break;
6462
6463 /************************/
6464 /* flags */
6465 case 0x9c: /* pushf */
6466 gen_svm_check_intercept(s, pc_start, SVM_EXIT_PUSHF);
6467 if (s->vm86 && s->iopl != 3) {
6468 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6469 } else {
6470 gen_update_cc_op(s);
6471 gen_helper_read_eflags(cpu_T[0], cpu_env);
6472 gen_push_T0(s);
6473 }
6474 break;
6475 case 0x9d: /* popf */
6476 gen_svm_check_intercept(s, pc_start, SVM_EXIT_POPF);
6477 if (s->vm86 && s->iopl != 3) {
6478 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6479 } else {
6480 gen_pop_T0(s);
6481 if (s->cpl == 0) {
6482 if (s->dflag) {
6483 gen_helper_write_eflags(cpu_env, cpu_T[0],
6484 tcg_const_i32((TF_MASK | AC_MASK |
6485 ID_MASK | NT_MASK |
6486 IF_MASK |
6487 IOPL_MASK)));
6488 } else {
6489 gen_helper_write_eflags(cpu_env, cpu_T[0],
6490 tcg_const_i32((TF_MASK | AC_MASK |
6491 ID_MASK | NT_MASK |
6492 IF_MASK | IOPL_MASK)
6493 & 0xffff));
6494 }
6495 } else {
6496 if (s->cpl <= s->iopl) {
6497 if (s->dflag) {
6498 gen_helper_write_eflags(cpu_env, cpu_T[0],
6499 tcg_const_i32((TF_MASK |
6500 AC_MASK |
6501 ID_MASK |
6502 NT_MASK |
6503 IF_MASK)));
6504 } else {
6505 gen_helper_write_eflags(cpu_env, cpu_T[0],
6506 tcg_const_i32((TF_MASK |
6507 AC_MASK |
6508 ID_MASK |
6509 NT_MASK |
6510 IF_MASK)
6511 & 0xffff));
6512 }
6513 } else {
6514 if (s->dflag) {
6515 gen_helper_write_eflags(cpu_env, cpu_T[0],
6516 tcg_const_i32((TF_MASK | AC_MASK |
6517 ID_MASK | NT_MASK)));
6518 } else {
6519 gen_helper_write_eflags(cpu_env, cpu_T[0],
6520 tcg_const_i32((TF_MASK | AC_MASK |
6521 ID_MASK | NT_MASK)
6522 & 0xffff));
6523 }
6524 }
6525 }
6526 gen_pop_update(s);
6527 set_cc_op(s, CC_OP_EFLAGS);
6528 /* abort translation because TF/AC flag may change */
6529 gen_jmp_im(s->pc - s->cs_base);
6530 gen_eob(s);
6531 }
6532 break;
6533 case 0x9e: /* sahf */
6534 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
6535 goto illegal_op;
6536 gen_op_mov_TN_reg(OT_BYTE, 0, R_AH);
6537 gen_compute_eflags(s);
6538 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O);
6539 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], CC_S | CC_Z | CC_A | CC_P | CC_C);
6540 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_T[0]);
6541 break;
6542 case 0x9f: /* lahf */
6543 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
6544 goto illegal_op;
6545 gen_compute_eflags(s);
6546 /* Note: gen_compute_eflags() only gives the condition codes */
6547 tcg_gen_ori_tl(cpu_T[0], cpu_cc_src, 0x02);
6548 gen_op_mov_reg_T0(OT_BYTE, R_AH);
6549 break;
6550 case 0xf5: /* cmc */
6551 gen_compute_eflags(s);
6552 tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C);
6553 break;
6554 case 0xf8: /* clc */
6555 gen_compute_eflags(s);
6556 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C);
6557 break;
6558 case 0xf9: /* stc */
6559 gen_compute_eflags(s);
6560 tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C);
6561 break;
6562 case 0xfc: /* cld */
6563 tcg_gen_movi_i32(cpu_tmp2_i32, 1);
6564 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
6565 break;
6566 case 0xfd: /* std */
6567 tcg_gen_movi_i32(cpu_tmp2_i32, -1);
6568 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
6569 break;
6570
6571 /************************/
6572 /* bit operations */
6573 case 0x1ba: /* bt/bts/btr/btc Gv, im */
6574 ot = dflag + OT_WORD;
6575 modrm = cpu_ldub_code(env, s->pc++);
6576 op = (modrm >> 3) & 7;
6577 mod = (modrm >> 6) & 3;
6578 rm = (modrm & 7) | REX_B(s);
6579 if (mod != 3) {
6580 s->rip_offset = 1;
6581 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
6582 gen_op_ld_T0_A0(ot + s->mem_index);
6583 } else {
6584 gen_op_mov_TN_reg(ot, 0, rm);
6585 }
6586 /* load shift */
6587 val = cpu_ldub_code(env, s->pc++);
6588 gen_op_movl_T1_im(val);
6589 if (op < 4)
6590 goto illegal_op;
6591 op -= 4;
6592 goto bt_op;
6593 case 0x1a3: /* bt Gv, Ev */
6594 op = 0;
6595 goto do_btx;
6596 case 0x1ab: /* bts */
6597 op = 1;
6598 goto do_btx;
6599 case 0x1b3: /* btr */
6600 op = 2;
6601 goto do_btx;
6602 case 0x1bb: /* btc */
6603 op = 3;
6604 do_btx:
6605 ot = dflag + OT_WORD;
6606 modrm = cpu_ldub_code(env, s->pc++);
6607 reg = ((modrm >> 3) & 7) | rex_r;
6608 mod = (modrm >> 6) & 3;
6609 rm = (modrm & 7) | REX_B(s);
6610 gen_op_mov_TN_reg(OT_LONG, 1, reg);
6611 if (mod != 3) {
6612 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
6613 /* specific case: we need to add a displacement */
6614 gen_exts(ot, cpu_T[1]);
6615 tcg_gen_sari_tl(cpu_tmp0, cpu_T[1], 3 + ot);
6616 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, ot);
6617 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
6618 gen_op_ld_T0_A0(ot + s->mem_index);
6619 } else {
6620 gen_op_mov_TN_reg(ot, 0, rm);
6621 }
6622 bt_op:
6623 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], (1 << (3 + ot)) - 1);
6624 switch(op) {
6625 case 0:
6626 tcg_gen_shr_tl(cpu_cc_src, cpu_T[0], cpu_T[1]);
6627 tcg_gen_movi_tl(cpu_cc_dst, 0);
6628 break;
6629 case 1:
6630 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6631 tcg_gen_movi_tl(cpu_tmp0, 1);
6632 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6633 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6634 break;
6635 case 2:
6636 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6637 tcg_gen_movi_tl(cpu_tmp0, 1);
6638 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6639 tcg_gen_not_tl(cpu_tmp0, cpu_tmp0);
6640 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6641 break;
6642 default:
6643 case 3:
6644 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6645 tcg_gen_movi_tl(cpu_tmp0, 1);
6646 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6647 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6648 break;
6649 }
6650 set_cc_op(s, CC_OP_SARB + ot);
6651 if (op != 0) {
6652 if (mod != 3)
6653 gen_op_st_T0_A0(ot + s->mem_index);
6654 else
6655 gen_op_mov_reg_T0(ot, rm);
6656 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
6657 tcg_gen_movi_tl(cpu_cc_dst, 0);
6658 }
6659 break;
6660 case 0x1bc: /* bsf */
6661 case 0x1bd: /* bsr */
6662 {
6663 int label1;
6664 TCGv t0;
6665
6666 ot = dflag + OT_WORD;
6667 modrm = cpu_ldub_code(env, s->pc++);
6668 reg = ((modrm >> 3) & 7) | rex_r;
6669 gen_ldst_modrm(env, s,modrm, ot, OR_TMP0, 0);
6670 gen_extu(ot, cpu_T[0]);
6671 t0 = tcg_temp_local_new();
6672 tcg_gen_mov_tl(t0, cpu_T[0]);
6673 if ((b & 1) && (prefixes & PREFIX_REPZ) &&
6674 (s->cpuid_ext3_features & CPUID_EXT3_ABM)) {
6675 switch(ot) {
6676 case OT_WORD: gen_helper_lzcnt(cpu_T[0], t0,
6677 tcg_const_i32(16)); break;
6678 case OT_LONG: gen_helper_lzcnt(cpu_T[0], t0,
6679 tcg_const_i32(32)); break;
6680 case OT_QUAD: gen_helper_lzcnt(cpu_T[0], t0,
6681 tcg_const_i32(64)); break;
6682 }
6683 gen_op_mov_reg_T0(ot, reg);
6684 } else {
6685 label1 = gen_new_label();
6686 tcg_gen_movi_tl(cpu_cc_dst, 0);
6687 tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, label1);
6688 if (b & 1) {
6689 gen_helper_bsr(cpu_T[0], t0);
6690 } else {
6691 gen_helper_bsf(cpu_T[0], t0);
6692 }
6693 gen_op_mov_reg_T0(ot, reg);
6694 tcg_gen_movi_tl(cpu_cc_dst, 1);
6695 gen_set_label(label1);
6696 set_cc_op(s, CC_OP_LOGICB + ot);
6697 }
6698 tcg_temp_free(t0);
6699 }
6700 break;
6701 /************************/
6702 /* bcd */
6703 case 0x27: /* daa */
6704 if (CODE64(s))
6705 goto illegal_op;
6706 gen_update_cc_op(s);
6707 gen_helper_daa(cpu_env);
6708 set_cc_op(s, CC_OP_EFLAGS);
6709 break;
6710 case 0x2f: /* das */
6711 if (CODE64(s))
6712 goto illegal_op;
6713 gen_update_cc_op(s);
6714 gen_helper_das(cpu_env);
6715 set_cc_op(s, CC_OP_EFLAGS);
6716 break;
6717 case 0x37: /* aaa */
6718 if (CODE64(s))
6719 goto illegal_op;
6720 gen_update_cc_op(s);
6721 gen_helper_aaa(cpu_env);
6722 set_cc_op(s, CC_OP_EFLAGS);
6723 break;
6724 case 0x3f: /* aas */
6725 if (CODE64(s))
6726 goto illegal_op;
6727 gen_update_cc_op(s);
6728 gen_helper_aas(cpu_env);
6729 set_cc_op(s, CC_OP_EFLAGS);
6730 break;
6731 case 0xd4: /* aam */
6732 if (CODE64(s))
6733 goto illegal_op;
6734 val = cpu_ldub_code(env, s->pc++);
6735 if (val == 0) {
6736 gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base);
6737 } else {
6738 gen_helper_aam(cpu_env, tcg_const_i32(val));
6739 set_cc_op(s, CC_OP_LOGICB);
6740 }
6741 break;
6742 case 0xd5: /* aad */
6743 if (CODE64(s))
6744 goto illegal_op;
6745 val = cpu_ldub_code(env, s->pc++);
6746 gen_helper_aad(cpu_env, tcg_const_i32(val));
6747 set_cc_op(s, CC_OP_LOGICB);
6748 break;
6749 /************************/
6750 /* misc */
6751 case 0x90: /* nop */
6752 /* XXX: correct lock test for all insn */
6753 if (prefixes & PREFIX_LOCK) {
6754 goto illegal_op;
6755 }
6756 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
6757 if (REX_B(s)) {
6758 goto do_xchg_reg_eax;
6759 }
6760 if (prefixes & PREFIX_REPZ) {
6761 gen_svm_check_intercept(s, pc_start, SVM_EXIT_PAUSE);
6762 }
6763 break;
6764 case 0x9b: /* fwait */
6765 if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) ==
6766 (HF_MP_MASK | HF_TS_MASK)) {
6767 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
6768 } else {
6769 gen_update_cc_op(s);
6770 gen_jmp_im(pc_start - s->cs_base);
6771 gen_helper_fwait(cpu_env);
6772 }
6773 break;
6774 case 0xcc: /* int3 */
6775 gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base);
6776 break;
6777 case 0xcd: /* int N */
6778 val = cpu_ldub_code(env, s->pc++);
6779 if (s->vm86 && s->iopl != 3) {
6780 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6781 } else {
6782 gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base);
6783 }
6784 break;
6785 case 0xce: /* into */
6786 if (CODE64(s))
6787 goto illegal_op;
6788 gen_update_cc_op(s);
6789 gen_jmp_im(pc_start - s->cs_base);
6790 gen_helper_into(cpu_env, tcg_const_i32(s->pc - pc_start));
6791 break;
6792 #ifdef WANT_ICEBP
6793 case 0xf1: /* icebp (undocumented, exits to external debugger) */
6794 gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP);
6795 #if 1
6796 gen_debug(s, pc_start - s->cs_base);
6797 #else
6798 /* start debug */
6799 tb_flush(env);
6800 qemu_set_log(CPU_LOG_INT | CPU_LOG_TB_IN_ASM);
6801 #endif
6802 break;
6803 #endif
6804 case 0xfa: /* cli */
6805 if (!s->vm86) {
6806 if (s->cpl <= s->iopl) {
6807 gen_helper_cli(cpu_env);
6808 } else {
6809 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6810 }
6811 } else {
6812 if (s->iopl == 3) {
6813 gen_helper_cli(cpu_env);
6814 } else {
6815 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6816 }
6817 }
6818 break;
6819 case 0xfb: /* sti */
6820 if (!s->vm86) {
6821 if (s->cpl <= s->iopl) {
6822 gen_sti:
6823 gen_helper_sti(cpu_env);
6824 /* interruptions are enabled only the first insn after sti */
6825 /* If several instructions disable interrupts, only the
6826 _first_ does it */
6827 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
6828 gen_helper_set_inhibit_irq(cpu_env);
6829 /* give a chance to handle pending irqs */
6830 gen_jmp_im(s->pc - s->cs_base);
6831 gen_eob(s);
6832 } else {
6833 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6834 }
6835 } else {
6836 if (s->iopl == 3) {
6837 goto gen_sti;
6838 } else {
6839 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6840 }
6841 }
6842 break;
6843 case 0x62: /* bound */
6844 if (CODE64(s))
6845 goto illegal_op;
6846 ot = dflag ? OT_LONG : OT_WORD;
6847 modrm = cpu_ldub_code(env, s->pc++);
6848 reg = (modrm >> 3) & 7;
6849 mod = (modrm >> 6) & 3;
6850 if (mod == 3)
6851 goto illegal_op;
6852 gen_op_mov_TN_reg(ot, 0, reg);
6853 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
6854 gen_jmp_im(pc_start - s->cs_base);
6855 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6856 if (ot == OT_WORD) {
6857 gen_helper_boundw(cpu_env, cpu_A0, cpu_tmp2_i32);
6858 } else {
6859 gen_helper_boundl(cpu_env, cpu_A0, cpu_tmp2_i32);
6860 }
6861 break;
6862 case 0x1c8 ... 0x1cf: /* bswap reg */
6863 reg = (b & 7) | REX_B(s);
6864 #ifdef TARGET_X86_64
6865 if (dflag == 2) {
6866 gen_op_mov_TN_reg(OT_QUAD, 0, reg);
6867 tcg_gen_bswap64_i64(cpu_T[0], cpu_T[0]);
6868 gen_op_mov_reg_T0(OT_QUAD, reg);
6869 } else
6870 #endif
6871 {
6872 gen_op_mov_TN_reg(OT_LONG, 0, reg);
6873 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
6874 tcg_gen_bswap32_tl(cpu_T[0], cpu_T[0]);
6875 gen_op_mov_reg_T0(OT_LONG, reg);
6876 }
6877 break;
6878 case 0xd6: /* salc */
6879 if (CODE64(s))
6880 goto illegal_op;
6881 gen_compute_eflags_c(s, cpu_T[0]);
6882 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
6883 gen_op_mov_reg_T0(OT_BYTE, R_EAX);
6884 break;
6885 case 0xe0: /* loopnz */
6886 case 0xe1: /* loopz */
6887 case 0xe2: /* loop */
6888 case 0xe3: /* jecxz */
6889 {
6890 int l1, l2, l3;
6891
6892 tval = (int8_t)insn_get(env, s, OT_BYTE);
6893 next_eip = s->pc - s->cs_base;
6894 tval += next_eip;
6895 if (s->dflag == 0)
6896 tval &= 0xffff;
6897
6898 l1 = gen_new_label();
6899 l2 = gen_new_label();
6900 l3 = gen_new_label();
6901 b &= 3;
6902 switch(b) {
6903 case 0: /* loopnz */
6904 case 1: /* loopz */
6905 gen_op_add_reg_im(s->aflag, R_ECX, -1);
6906 gen_op_jz_ecx(s->aflag, l3);
6907 gen_jcc1(s, (JCC_Z << 1) | (b ^ 1), l1);
6908 break;
6909 case 2: /* loop */
6910 gen_op_add_reg_im(s->aflag, R_ECX, -1);
6911 gen_op_jnz_ecx(s->aflag, l1);
6912 break;
6913 default:
6914 case 3: /* jcxz */
6915 gen_op_jz_ecx(s->aflag, l1);
6916 break;
6917 }
6918
6919 gen_set_label(l3);
6920 gen_jmp_im(next_eip);
6921 tcg_gen_br(l2);
6922
6923 gen_set_label(l1);
6924 gen_jmp_im(tval);
6925 gen_set_label(l2);
6926 gen_eob(s);
6927 }
6928 break;
6929 case 0x130: /* wrmsr */
6930 case 0x132: /* rdmsr */
6931 if (s->cpl != 0) {
6932 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6933 } else {
6934 gen_update_cc_op(s);
6935 gen_jmp_im(pc_start - s->cs_base);
6936 if (b & 2) {
6937 gen_helper_rdmsr(cpu_env);
6938 } else {
6939 gen_helper_wrmsr(cpu_env);
6940 }
6941 }
6942 break;
6943 case 0x131: /* rdtsc */
6944 gen_update_cc_op(s);
6945 gen_jmp_im(pc_start - s->cs_base);
6946 if (use_icount)
6947 gen_io_start();
6948 gen_helper_rdtsc(cpu_env);
6949 if (use_icount) {
6950 gen_io_end();
6951 gen_jmp(s, s->pc - s->cs_base);
6952 }
6953 break;
6954 case 0x133: /* rdpmc */
6955 gen_update_cc_op(s);
6956 gen_jmp_im(pc_start - s->cs_base);
6957 gen_helper_rdpmc(cpu_env);
6958 break;
6959 case 0x134: /* sysenter */
6960 /* For Intel SYSENTER is valid on 64-bit */
6961 if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
6962 goto illegal_op;
6963 if (!s->pe) {
6964 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6965 } else {
6966 gen_update_cc_op(s);
6967 gen_jmp_im(pc_start - s->cs_base);
6968 gen_helper_sysenter(cpu_env);
6969 gen_eob(s);
6970 }
6971 break;
6972 case 0x135: /* sysexit */
6973 /* For Intel SYSEXIT is valid on 64-bit */
6974 if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
6975 goto illegal_op;
6976 if (!s->pe) {
6977 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6978 } else {
6979 gen_update_cc_op(s);
6980 gen_jmp_im(pc_start - s->cs_base);
6981 gen_helper_sysexit(cpu_env, tcg_const_i32(dflag));
6982 gen_eob(s);
6983 }
6984 break;
6985 #ifdef TARGET_X86_64
6986 case 0x105: /* syscall */
6987 /* XXX: is it usable in real mode ? */
6988 gen_update_cc_op(s);
6989 gen_jmp_im(pc_start - s->cs_base);
6990 gen_helper_syscall(cpu_env, tcg_const_i32(s->pc - pc_start));
6991 gen_eob(s);
6992 break;
6993 case 0x107: /* sysret */
6994 if (!s->pe) {
6995 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6996 } else {
6997 gen_update_cc_op(s);
6998 gen_jmp_im(pc_start - s->cs_base);
6999 gen_helper_sysret(cpu_env, tcg_const_i32(s->dflag));
7000 /* condition codes are modified only in long mode */
7001 if (s->lma) {
7002 set_cc_op(s, CC_OP_EFLAGS);
7003 }
7004 gen_eob(s);
7005 }
7006 break;
7007 #endif
7008 case 0x1a2: /* cpuid */
7009 gen_update_cc_op(s);
7010 gen_jmp_im(pc_start - s->cs_base);
7011 gen_helper_cpuid(cpu_env);
7012 break;
7013 case 0xf4: /* hlt */
7014 if (s->cpl != 0) {
7015 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7016 } else {
7017 gen_update_cc_op(s);
7018 gen_jmp_im(pc_start - s->cs_base);
7019 gen_helper_hlt(cpu_env, tcg_const_i32(s->pc - pc_start));
7020 s->is_jmp = DISAS_TB_JUMP;
7021 }
7022 break;
7023 case 0x100:
7024 modrm = cpu_ldub_code(env, s->pc++);
7025 mod = (modrm >> 6) & 3;
7026 op = (modrm >> 3) & 7;
7027 switch(op) {
7028 case 0: /* sldt */
7029 if (!s->pe || s->vm86)
7030 goto illegal_op;
7031 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ);
7032 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,ldt.selector));
7033 ot = OT_WORD;
7034 if (mod == 3)
7035 ot += s->dflag;
7036 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
7037 break;
7038 case 2: /* lldt */
7039 if (!s->pe || s->vm86)
7040 goto illegal_op;
7041 if (s->cpl != 0) {
7042 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7043 } else {
7044 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE);
7045 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
7046 gen_jmp_im(pc_start - s->cs_base);
7047 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
7048 gen_helper_lldt(cpu_env, cpu_tmp2_i32);
7049 }
7050 break;
7051 case 1: /* str */
7052 if (!s->pe || s->vm86)
7053 goto illegal_op;
7054 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ);
7055 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,tr.selector));
7056 ot = OT_WORD;
7057 if (mod == 3)
7058 ot += s->dflag;
7059 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
7060 break;
7061 case 3: /* ltr */
7062 if (!s->pe || s->vm86)
7063 goto illegal_op;
7064 if (s->cpl != 0) {
7065 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7066 } else {
7067 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE);
7068 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
7069 gen_jmp_im(pc_start - s->cs_base);
7070 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
7071 gen_helper_ltr(cpu_env, cpu_tmp2_i32);
7072 }
7073 break;
7074 case 4: /* verr */
7075 case 5: /* verw */
7076 if (!s->pe || s->vm86)
7077 goto illegal_op;
7078 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
7079 gen_update_cc_op(s);
7080 if (op == 4) {
7081 gen_helper_verr(cpu_env, cpu_T[0]);
7082 } else {
7083 gen_helper_verw(cpu_env, cpu_T[0]);
7084 }
7085 set_cc_op(s, CC_OP_EFLAGS);
7086 break;
7087 default:
7088 goto illegal_op;
7089 }
7090 break;
7091 case 0x101:
7092 modrm = cpu_ldub_code(env, s->pc++);
7093 mod = (modrm >> 6) & 3;
7094 op = (modrm >> 3) & 7;
7095 rm = modrm & 7;
7096 switch(op) {
7097 case 0: /* sgdt */
7098 if (mod == 3)
7099 goto illegal_op;
7100 gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ);
7101 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7102 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.limit));
7103 gen_op_st_T0_A0(OT_WORD + s->mem_index);
7104 gen_add_A0_im(s, 2);
7105 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.base));
7106 if (!s->dflag)
7107 gen_op_andl_T0_im(0xffffff);
7108 gen_op_st_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
7109 break;
7110 case 1:
7111 if (mod == 3) {
7112 switch (rm) {
7113 case 0: /* monitor */
7114 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
7115 s->cpl != 0)
7116 goto illegal_op;
7117 gen_update_cc_op(s);
7118 gen_jmp_im(pc_start - s->cs_base);
7119 #ifdef TARGET_X86_64
7120 if (s->aflag == 2) {
7121 gen_op_movq_A0_reg(R_EAX);
7122 } else
7123 #endif
7124 {
7125 gen_op_movl_A0_reg(R_EAX);
7126 if (s->aflag == 0)
7127 gen_op_andl_A0_ffff();
7128 }
7129 gen_add_A0_ds_seg(s);
7130 gen_helper_monitor(cpu_env, cpu_A0);
7131 break;
7132 case 1: /* mwait */
7133 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
7134 s->cpl != 0)
7135 goto illegal_op;
7136 gen_update_cc_op(s);
7137 gen_jmp_im(pc_start - s->cs_base);
7138 gen_helper_mwait(cpu_env, tcg_const_i32(s->pc - pc_start));
7139 gen_eob(s);
7140 break;
7141 case 2: /* clac */
7142 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) ||
7143 s->cpl != 0) {
7144 goto illegal_op;
7145 }
7146 gen_helper_clac(cpu_env);
7147 gen_jmp_im(s->pc - s->cs_base);
7148 gen_eob(s);
7149 break;
7150 case 3: /* stac */
7151 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) ||
7152 s->cpl != 0) {
7153 goto illegal_op;
7154 }
7155 gen_helper_stac(cpu_env);
7156 gen_jmp_im(s->pc - s->cs_base);
7157 gen_eob(s);
7158 break;
7159 default:
7160 goto illegal_op;
7161 }
7162 } else { /* sidt */
7163 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ);
7164 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7165 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.limit));
7166 gen_op_st_T0_A0(OT_WORD + s->mem_index);
7167 gen_add_A0_im(s, 2);
7168 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.base));
7169 if (!s->dflag)
7170 gen_op_andl_T0_im(0xffffff);
7171 gen_op_st_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
7172 }
7173 break;
7174 case 2: /* lgdt */
7175 case 3: /* lidt */
7176 if (mod == 3) {
7177 gen_update_cc_op(s);
7178 gen_jmp_im(pc_start - s->cs_base);
7179 switch(rm) {
7180 case 0: /* VMRUN */
7181 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7182 goto illegal_op;
7183 if (s->cpl != 0) {
7184 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7185 break;
7186 } else {
7187 gen_helper_vmrun(cpu_env, tcg_const_i32(s->aflag),
7188 tcg_const_i32(s->pc - pc_start));
7189 tcg_gen_exit_tb(0);
7190 s->is_jmp = DISAS_TB_JUMP;
7191 }
7192 break;
7193 case 1: /* VMMCALL */
7194 if (!(s->flags & HF_SVME_MASK))
7195 goto illegal_op;
7196 gen_helper_vmmcall(cpu_env);
7197 break;
7198 case 2: /* VMLOAD */
7199 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7200 goto illegal_op;
7201 if (s->cpl != 0) {
7202 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7203 break;
7204 } else {
7205 gen_helper_vmload(cpu_env, tcg_const_i32(s->aflag));
7206 }
7207 break;
7208 case 3: /* VMSAVE */
7209 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7210 goto illegal_op;
7211 if (s->cpl != 0) {
7212 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7213 break;
7214 } else {
7215 gen_helper_vmsave(cpu_env, tcg_const_i32(s->aflag));
7216 }
7217 break;
7218 case 4: /* STGI */
7219 if ((!(s->flags & HF_SVME_MASK) &&
7220 !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
7221 !s->pe)
7222 goto illegal_op;
7223 if (s->cpl != 0) {
7224 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7225 break;
7226 } else {
7227 gen_helper_stgi(cpu_env);
7228 }
7229 break;
7230 case 5: /* CLGI */
7231 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7232 goto illegal_op;
7233 if (s->cpl != 0) {
7234 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7235 break;
7236 } else {
7237 gen_helper_clgi(cpu_env);
7238 }
7239 break;
7240 case 6: /* SKINIT */
7241 if ((!(s->flags & HF_SVME_MASK) &&
7242 !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
7243 !s->pe)
7244 goto illegal_op;
7245 gen_helper_skinit(cpu_env);
7246 break;
7247 case 7: /* INVLPGA */
7248 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7249 goto illegal_op;
7250 if (s->cpl != 0) {
7251 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7252 break;
7253 } else {
7254 gen_helper_invlpga(cpu_env, tcg_const_i32(s->aflag));
7255 }
7256 break;
7257 default:
7258 goto illegal_op;
7259 }
7260 } else if (s->cpl != 0) {
7261 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7262 } else {
7263 gen_svm_check_intercept(s, pc_start,
7264 op==2 ? SVM_EXIT_GDTR_WRITE : SVM_EXIT_IDTR_WRITE);
7265 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7266 gen_op_ld_T1_A0(OT_WORD + s->mem_index);
7267 gen_add_A0_im(s, 2);
7268 gen_op_ld_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
7269 if (!s->dflag)
7270 gen_op_andl_T0_im(0xffffff);
7271 if (op == 2) {
7272 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,gdt.base));
7273 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,gdt.limit));
7274 } else {
7275 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,idt.base));
7276 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,idt.limit));
7277 }
7278 }
7279 break;
7280 case 4: /* smsw */
7281 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0);
7282 #if defined TARGET_X86_64 && defined HOST_WORDS_BIGENDIAN
7283 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]) + 4);
7284 #else
7285 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]));
7286 #endif
7287 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 1);
7288 break;
7289 case 6: /* lmsw */
7290 if (s->cpl != 0) {
7291 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7292 } else {
7293 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
7294 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
7295 gen_helper_lmsw(cpu_env, cpu_T[0]);
7296 gen_jmp_im(s->pc - s->cs_base);
7297 gen_eob(s);
7298 }
7299 break;
7300 case 7:
7301 if (mod != 3) { /* invlpg */
7302 if (s->cpl != 0) {
7303 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7304 } else {
7305 gen_update_cc_op(s);
7306 gen_jmp_im(pc_start - s->cs_base);
7307 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7308 gen_helper_invlpg(cpu_env, cpu_A0);
7309 gen_jmp_im(s->pc - s->cs_base);
7310 gen_eob(s);
7311 }
7312 } else {
7313 switch (rm) {
7314 case 0: /* swapgs */
7315 #ifdef TARGET_X86_64
7316 if (CODE64(s)) {
7317 if (s->cpl != 0) {
7318 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7319 } else {
7320 tcg_gen_ld_tl(cpu_T[0], cpu_env,
7321 offsetof(CPUX86State,segs[R_GS].base));
7322 tcg_gen_ld_tl(cpu_T[1], cpu_env,
7323 offsetof(CPUX86State,kernelgsbase));
7324 tcg_gen_st_tl(cpu_T[1], cpu_env,
7325 offsetof(CPUX86State,segs[R_GS].base));
7326 tcg_gen_st_tl(cpu_T[0], cpu_env,
7327 offsetof(CPUX86State,kernelgsbase));
7328 }
7329 } else
7330 #endif
7331 {
7332 goto illegal_op;
7333 }
7334 break;
7335 case 1: /* rdtscp */
7336 if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP))
7337 goto illegal_op;
7338 gen_update_cc_op(s);
7339 gen_jmp_im(pc_start - s->cs_base);
7340 if (use_icount)
7341 gen_io_start();
7342 gen_helper_rdtscp(cpu_env);
7343 if (use_icount) {
7344 gen_io_end();
7345 gen_jmp(s, s->pc - s->cs_base);
7346 }
7347 break;
7348 default:
7349 goto illegal_op;
7350 }
7351 }
7352 break;
7353 default:
7354 goto illegal_op;
7355 }
7356 break;
7357 case 0x108: /* invd */
7358 case 0x109: /* wbinvd */
7359 if (s->cpl != 0) {
7360 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7361 } else {
7362 gen_svm_check_intercept(s, pc_start, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD);
7363 /* nothing to do */
7364 }
7365 break;
7366 case 0x63: /* arpl or movslS (x86_64) */
7367 #ifdef TARGET_X86_64
7368 if (CODE64(s)) {
7369 int d_ot;
7370 /* d_ot is the size of destination */
7371 d_ot = dflag + OT_WORD;
7372
7373 modrm = cpu_ldub_code(env, s->pc++);
7374 reg = ((modrm >> 3) & 7) | rex_r;
7375 mod = (modrm >> 6) & 3;
7376 rm = (modrm & 7) | REX_B(s);
7377
7378 if (mod == 3) {
7379 gen_op_mov_TN_reg(OT_LONG, 0, rm);
7380 /* sign extend */
7381 if (d_ot == OT_QUAD)
7382 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
7383 gen_op_mov_reg_T0(d_ot, reg);
7384 } else {
7385 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7386 if (d_ot == OT_QUAD) {
7387 gen_op_lds_T0_A0(OT_LONG + s->mem_index);
7388 } else {
7389 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
7390 }
7391 gen_op_mov_reg_T0(d_ot, reg);
7392 }
7393 } else
7394 #endif
7395 {
7396 int label1;
7397 TCGv t0, t1, t2, a0;
7398
7399 if (!s->pe || s->vm86)
7400 goto illegal_op;
7401 t0 = tcg_temp_local_new();
7402 t1 = tcg_temp_local_new();
7403 t2 = tcg_temp_local_new();
7404 ot = OT_WORD;
7405 modrm = cpu_ldub_code(env, s->pc++);
7406 reg = (modrm >> 3) & 7;
7407 mod = (modrm >> 6) & 3;
7408 rm = modrm & 7;
7409 if (mod != 3) {
7410 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7411 gen_op_ld_v(ot + s->mem_index, t0, cpu_A0);
7412 a0 = tcg_temp_local_new();
7413 tcg_gen_mov_tl(a0, cpu_A0);
7414 } else {
7415 gen_op_mov_v_reg(ot, t0, rm);
7416 TCGV_UNUSED(a0);
7417 }
7418 gen_op_mov_v_reg(ot, t1, reg);
7419 tcg_gen_andi_tl(cpu_tmp0, t0, 3);
7420 tcg_gen_andi_tl(t1, t1, 3);
7421 tcg_gen_movi_tl(t2, 0);
7422 label1 = gen_new_label();
7423 tcg_gen_brcond_tl(TCG_COND_GE, cpu_tmp0, t1, label1);
7424 tcg_gen_andi_tl(t0, t0, ~3);
7425 tcg_gen_or_tl(t0, t0, t1);
7426 tcg_gen_movi_tl(t2, CC_Z);
7427 gen_set_label(label1);
7428 if (mod != 3) {
7429 gen_op_st_v(ot + s->mem_index, t0, a0);
7430 tcg_temp_free(a0);
7431 } else {
7432 gen_op_mov_reg_v(ot, rm, t0);
7433 }
7434 gen_compute_eflags(s);
7435 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z);
7436 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2);
7437 tcg_temp_free(t0);
7438 tcg_temp_free(t1);
7439 tcg_temp_free(t2);
7440 }
7441 break;
7442 case 0x102: /* lar */
7443 case 0x103: /* lsl */
7444 {
7445 int label1;
7446 TCGv t0;
7447 if (!s->pe || s->vm86)
7448 goto illegal_op;
7449 ot = dflag ? OT_LONG : OT_WORD;
7450 modrm = cpu_ldub_code(env, s->pc++);
7451 reg = ((modrm >> 3) & 7) | rex_r;
7452 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
7453 t0 = tcg_temp_local_new();
7454 gen_update_cc_op(s);
7455 if (b == 0x102) {
7456 gen_helper_lar(t0, cpu_env, cpu_T[0]);
7457 } else {
7458 gen_helper_lsl(t0, cpu_env, cpu_T[0]);
7459 }
7460 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_src, CC_Z);
7461 label1 = gen_new_label();
7462 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
7463 gen_op_mov_reg_v(ot, reg, t0);
7464 gen_set_label(label1);
7465 set_cc_op(s, CC_OP_EFLAGS);
7466 tcg_temp_free(t0);
7467 }
7468 break;
7469 case 0x118:
7470 modrm = cpu_ldub_code(env, s->pc++);
7471 mod = (modrm >> 6) & 3;
7472 op = (modrm >> 3) & 7;
7473 switch(op) {
7474 case 0: /* prefetchnta */
7475 case 1: /* prefetchnt0 */
7476 case 2: /* prefetchnt0 */
7477 case 3: /* prefetchnt0 */
7478 if (mod == 3)
7479 goto illegal_op;
7480 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7481 /* nothing more to do */
7482 break;
7483 default: /* nop (multi byte) */
7484 gen_nop_modrm(env, s, modrm);
7485 break;
7486 }
7487 break;
7488 case 0x119 ... 0x11f: /* nop (multi byte) */
7489 modrm = cpu_ldub_code(env, s->pc++);
7490 gen_nop_modrm(env, s, modrm);
7491 break;
7492 case 0x120: /* mov reg, crN */
7493 case 0x122: /* mov crN, reg */
7494 if (s->cpl != 0) {
7495 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7496 } else {
7497 modrm = cpu_ldub_code(env, s->pc++);
7498 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7499 * AMD documentation (24594.pdf) and testing of
7500 * intel 386 and 486 processors all show that the mod bits
7501 * are assumed to be 1's, regardless of actual values.
7502 */
7503 rm = (modrm & 7) | REX_B(s);
7504 reg = ((modrm >> 3) & 7) | rex_r;
7505 if (CODE64(s))
7506 ot = OT_QUAD;
7507 else
7508 ot = OT_LONG;
7509 if ((prefixes & PREFIX_LOCK) && (reg == 0) &&
7510 (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) {
7511 reg = 8;
7512 }
7513 switch(reg) {
7514 case 0:
7515 case 2:
7516 case 3:
7517 case 4:
7518 case 8:
7519 gen_update_cc_op(s);
7520 gen_jmp_im(pc_start - s->cs_base);
7521 if (b & 2) {
7522 gen_op_mov_TN_reg(ot, 0, rm);
7523 gen_helper_write_crN(cpu_env, tcg_const_i32(reg),
7524 cpu_T[0]);
7525 gen_jmp_im(s->pc - s->cs_base);
7526 gen_eob(s);
7527 } else {
7528 gen_helper_read_crN(cpu_T[0], cpu_env, tcg_const_i32(reg));
7529 gen_op_mov_reg_T0(ot, rm);
7530 }
7531 break;
7532 default:
7533 goto illegal_op;
7534 }
7535 }
7536 break;
7537 case 0x121: /* mov reg, drN */
7538 case 0x123: /* mov drN, reg */
7539 if (s->cpl != 0) {
7540 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7541 } else {
7542 modrm = cpu_ldub_code(env, s->pc++);
7543 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7544 * AMD documentation (24594.pdf) and testing of
7545 * intel 386 and 486 processors all show that the mod bits
7546 * are assumed to be 1's, regardless of actual values.
7547 */
7548 rm = (modrm & 7) | REX_B(s);
7549 reg = ((modrm >> 3) & 7) | rex_r;
7550 if (CODE64(s))
7551 ot = OT_QUAD;
7552 else
7553 ot = OT_LONG;
7554 /* XXX: do it dynamically with CR4.DE bit */
7555 if (reg == 4 || reg == 5 || reg >= 8)
7556 goto illegal_op;
7557 if (b & 2) {
7558 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg);
7559 gen_op_mov_TN_reg(ot, 0, rm);
7560 gen_helper_movl_drN_T0(cpu_env, tcg_const_i32(reg), cpu_T[0]);
7561 gen_jmp_im(s->pc - s->cs_base);
7562 gen_eob(s);
7563 } else {
7564 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg);
7565 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,dr[reg]));
7566 gen_op_mov_reg_T0(ot, rm);
7567 }
7568 }
7569 break;
7570 case 0x106: /* clts */
7571 if (s->cpl != 0) {
7572 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7573 } else {
7574 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
7575 gen_helper_clts(cpu_env);
7576 /* abort block because static cpu state changed */
7577 gen_jmp_im(s->pc - s->cs_base);
7578 gen_eob(s);
7579 }
7580 break;
7581 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
7582 case 0x1c3: /* MOVNTI reg, mem */
7583 if (!(s->cpuid_features & CPUID_SSE2))
7584 goto illegal_op;
7585 ot = s->dflag == 2 ? OT_QUAD : OT_LONG;
7586 modrm = cpu_ldub_code(env, s->pc++);
7587 mod = (modrm >> 6) & 3;
7588 if (mod == 3)
7589 goto illegal_op;
7590 reg = ((modrm >> 3) & 7) | rex_r;
7591 /* generate a generic store */
7592 gen_ldst_modrm(env, s, modrm, ot, reg, 1);
7593 break;
7594 case 0x1ae:
7595 modrm = cpu_ldub_code(env, s->pc++);
7596 mod = (modrm >> 6) & 3;
7597 op = (modrm >> 3) & 7;
7598 switch(op) {
7599 case 0: /* fxsave */
7600 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
7601 (s->prefix & PREFIX_LOCK))
7602 goto illegal_op;
7603 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
7604 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7605 break;
7606 }
7607 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7608 gen_update_cc_op(s);
7609 gen_jmp_im(pc_start - s->cs_base);
7610 gen_helper_fxsave(cpu_env, cpu_A0, tcg_const_i32((s->dflag == 2)));
7611 break;
7612 case 1: /* fxrstor */
7613 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
7614 (s->prefix & PREFIX_LOCK))
7615 goto illegal_op;
7616 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
7617 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7618 break;
7619 }
7620 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7621 gen_update_cc_op(s);
7622 gen_jmp_im(pc_start - s->cs_base);
7623 gen_helper_fxrstor(cpu_env, cpu_A0,
7624 tcg_const_i32((s->dflag == 2)));
7625 break;
7626 case 2: /* ldmxcsr */
7627 case 3: /* stmxcsr */
7628 if (s->flags & HF_TS_MASK) {
7629 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7630 break;
7631 }
7632 if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK) ||
7633 mod == 3)
7634 goto illegal_op;
7635 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7636 if (op == 2) {
7637 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
7638 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
7639 gen_helper_ldmxcsr(cpu_env, cpu_tmp2_i32);
7640 } else {
7641 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, mxcsr));
7642 gen_op_st_T0_A0(OT_LONG + s->mem_index);
7643 }
7644 break;
7645 case 5: /* lfence */
7646 case 6: /* mfence */
7647 if ((modrm & 0xc7) != 0xc0 || !(s->cpuid_features & CPUID_SSE2))
7648 goto illegal_op;
7649 break;
7650 case 7: /* sfence / clflush */
7651 if ((modrm & 0xc7) == 0xc0) {
7652 /* sfence */
7653 /* XXX: also check for cpuid_ext2_features & CPUID_EXT2_EMMX */
7654 if (!(s->cpuid_features & CPUID_SSE))
7655 goto illegal_op;
7656 } else {
7657 /* clflush */
7658 if (!(s->cpuid_features & CPUID_CLFLUSH))
7659 goto illegal_op;
7660 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7661 }
7662 break;
7663 default:
7664 goto illegal_op;
7665 }
7666 break;
7667 case 0x10d: /* 3DNow! prefetch(w) */
7668 modrm = cpu_ldub_code(env, s->pc++);
7669 mod = (modrm >> 6) & 3;
7670 if (mod == 3)
7671 goto illegal_op;
7672 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7673 /* ignore for now */
7674 break;
7675 case 0x1aa: /* rsm */
7676 gen_svm_check_intercept(s, pc_start, SVM_EXIT_RSM);
7677 if (!(s->flags & HF_SMM_MASK))
7678 goto illegal_op;
7679 gen_update_cc_op(s);
7680 gen_jmp_im(s->pc - s->cs_base);
7681 gen_helper_rsm(cpu_env);
7682 gen_eob(s);
7683 break;
7684 case 0x1b8: /* SSE4.2 popcnt */
7685 if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) !=
7686 PREFIX_REPZ)
7687 goto illegal_op;
7688 if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
7689 goto illegal_op;
7690
7691 modrm = cpu_ldub_code(env, s->pc++);
7692 reg = ((modrm >> 3) & 7) | rex_r;
7693
7694 if (s->prefix & PREFIX_DATA)
7695 ot = OT_WORD;
7696 else if (s->dflag != 2)
7697 ot = OT_LONG;
7698 else
7699 ot = OT_QUAD;
7700
7701 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
7702 gen_helper_popcnt(cpu_T[0], cpu_env, cpu_T[0], tcg_const_i32(ot));
7703 gen_op_mov_reg_T0(ot, reg);
7704
7705 set_cc_op(s, CC_OP_EFLAGS);
7706 break;
7707 case 0x10e ... 0x10f:
7708 /* 3DNow! instructions, ignore prefixes */
7709 s->prefix &= ~(PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA);
7710 case 0x110 ... 0x117:
7711 case 0x128 ... 0x12f:
7712 case 0x138 ... 0x13a:
7713 case 0x150 ... 0x179:
7714 case 0x17c ... 0x17f:
7715 case 0x1c2:
7716 case 0x1c4 ... 0x1c6:
7717 case 0x1d0 ... 0x1fe:
7718 gen_sse(env, s, b, pc_start, rex_r);
7719 break;
7720 default:
7721 goto illegal_op;
7722 }
7723 /* lock generation */
7724 if (s->prefix & PREFIX_LOCK)
7725 gen_helper_unlock();
7726 return s->pc;
7727 illegal_op:
7728 if (s->prefix & PREFIX_LOCK)
7729 gen_helper_unlock();
7730 /* XXX: ensure that no lock was generated */
7731 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
7732 return s->pc;
7733 }
7734
7735 void optimize_flags_init(void)
7736 {
7737 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
7738 cpu_cc_op = tcg_global_mem_new_i32(TCG_AREG0,
7739 offsetof(CPUX86State, cc_op), "cc_op");
7740 cpu_cc_src = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_src),
7741 "cc_src");
7742 cpu_cc_dst = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_dst),
7743 "cc_dst");
7744
7745 #ifdef TARGET_X86_64
7746 cpu_regs[R_EAX] = tcg_global_mem_new_i64(TCG_AREG0,
7747 offsetof(CPUX86State, regs[R_EAX]), "rax");
7748 cpu_regs[R_ECX] = tcg_global_mem_new_i64(TCG_AREG0,
7749 offsetof(CPUX86State, regs[R_ECX]), "rcx");
7750 cpu_regs[R_EDX] = tcg_global_mem_new_i64(TCG_AREG0,
7751 offsetof(CPUX86State, regs[R_EDX]), "rdx");
7752 cpu_regs[R_EBX] = tcg_global_mem_new_i64(TCG_AREG0,
7753 offsetof(CPUX86State, regs[R_EBX]), "rbx");
7754 cpu_regs[R_ESP] = tcg_global_mem_new_i64(TCG_AREG0,
7755 offsetof(CPUX86State, regs[R_ESP]), "rsp");
7756 cpu_regs[R_EBP] = tcg_global_mem_new_i64(TCG_AREG0,
7757 offsetof(CPUX86State, regs[R_EBP]), "rbp");
7758 cpu_regs[R_ESI] = tcg_global_mem_new_i64(TCG_AREG0,
7759 offsetof(CPUX86State, regs[R_ESI]), "rsi");
7760 cpu_regs[R_EDI] = tcg_global_mem_new_i64(TCG_AREG0,
7761 offsetof(CPUX86State, regs[R_EDI]), "rdi");
7762 cpu_regs[8] = tcg_global_mem_new_i64(TCG_AREG0,
7763 offsetof(CPUX86State, regs[8]), "r8");
7764 cpu_regs[9] = tcg_global_mem_new_i64(TCG_AREG0,
7765 offsetof(CPUX86State, regs[9]), "r9");
7766 cpu_regs[10] = tcg_global_mem_new_i64(TCG_AREG0,
7767 offsetof(CPUX86State, regs[10]), "r10");
7768 cpu_regs[11] = tcg_global_mem_new_i64(TCG_AREG0,
7769 offsetof(CPUX86State, regs[11]), "r11");
7770 cpu_regs[12] = tcg_global_mem_new_i64(TCG_AREG0,
7771 offsetof(CPUX86State, regs[12]), "r12");
7772 cpu_regs[13] = tcg_global_mem_new_i64(TCG_AREG0,
7773 offsetof(CPUX86State, regs[13]), "r13");
7774 cpu_regs[14] = tcg_global_mem_new_i64(TCG_AREG0,
7775 offsetof(CPUX86State, regs[14]), "r14");
7776 cpu_regs[15] = tcg_global_mem_new_i64(TCG_AREG0,
7777 offsetof(CPUX86State, regs[15]), "r15");
7778 #else
7779 cpu_regs[R_EAX] = tcg_global_mem_new_i32(TCG_AREG0,
7780 offsetof(CPUX86State, regs[R_EAX]), "eax");
7781 cpu_regs[R_ECX] = tcg_global_mem_new_i32(TCG_AREG0,
7782 offsetof(CPUX86State, regs[R_ECX]), "ecx");
7783 cpu_regs[R_EDX] = tcg_global_mem_new_i32(TCG_AREG0,
7784 offsetof(CPUX86State, regs[R_EDX]), "edx");
7785 cpu_regs[R_EBX] = tcg_global_mem_new_i32(TCG_AREG0,
7786 offsetof(CPUX86State, regs[R_EBX]), "ebx");
7787 cpu_regs[R_ESP] = tcg_global_mem_new_i32(TCG_AREG0,
7788 offsetof(CPUX86State, regs[R_ESP]), "esp");
7789 cpu_regs[R_EBP] = tcg_global_mem_new_i32(TCG_AREG0,
7790 offsetof(CPUX86State, regs[R_EBP]), "ebp");
7791 cpu_regs[R_ESI] = tcg_global_mem_new_i32(TCG_AREG0,
7792 offsetof(CPUX86State, regs[R_ESI]), "esi");
7793 cpu_regs[R_EDI] = tcg_global_mem_new_i32(TCG_AREG0,
7794 offsetof(CPUX86State, regs[R_EDI]), "edi");
7795 #endif
7796
7797 /* register helpers */
7798 #define GEN_HELPER 2
7799 #include "helper.h"
7800 }
7801
7802 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
7803 basic block 'tb'. If search_pc is TRUE, also generate PC
7804 information for each intermediate instruction. */
7805 static inline void gen_intermediate_code_internal(CPUX86State *env,
7806 TranslationBlock *tb,
7807 int search_pc)
7808 {
7809 DisasContext dc1, *dc = &dc1;
7810 target_ulong pc_ptr;
7811 uint16_t *gen_opc_end;
7812 CPUBreakpoint *bp;
7813 int j, lj;
7814 uint64_t flags;
7815 target_ulong pc_start;
7816 target_ulong cs_base;
7817 int num_insns;
7818 int max_insns;
7819
7820 /* generate intermediate code */
7821 pc_start = tb->pc;
7822 cs_base = tb->cs_base;
7823 flags = tb->flags;
7824
7825 dc->pe = (flags >> HF_PE_SHIFT) & 1;
7826 dc->code32 = (flags >> HF_CS32_SHIFT) & 1;
7827 dc->ss32 = (flags >> HF_SS32_SHIFT) & 1;
7828 dc->addseg = (flags >> HF_ADDSEG_SHIFT) & 1;
7829 dc->f_st = 0;
7830 dc->vm86 = (flags >> VM_SHIFT) & 1;
7831 dc->cpl = (flags >> HF_CPL_SHIFT) & 3;
7832 dc->iopl = (flags >> IOPL_SHIFT) & 3;
7833 dc->tf = (flags >> TF_SHIFT) & 1;
7834 dc->singlestep_enabled = env->singlestep_enabled;
7835 dc->cc_op = CC_OP_DYNAMIC;
7836 dc->cc_op_dirty = false;
7837 dc->cs_base = cs_base;
7838 dc->tb = tb;
7839 dc->popl_esp_hack = 0;
7840 /* select memory access functions */
7841 dc->mem_index = 0;
7842 if (flags & HF_SOFTMMU_MASK) {
7843 dc->mem_index = (cpu_mmu_index(env) + 1) << 2;
7844 }
7845 dc->cpuid_features = env->cpuid_features;
7846 dc->cpuid_ext_features = env->cpuid_ext_features;
7847 dc->cpuid_ext2_features = env->cpuid_ext2_features;
7848 dc->cpuid_ext3_features = env->cpuid_ext3_features;
7849 dc->cpuid_7_0_ebx_features = env->cpuid_7_0_ebx_features;
7850 #ifdef TARGET_X86_64
7851 dc->lma = (flags >> HF_LMA_SHIFT) & 1;
7852 dc->code64 = (flags >> HF_CS64_SHIFT) & 1;
7853 #endif
7854 dc->flags = flags;
7855 dc->jmp_opt = !(dc->tf || env->singlestep_enabled ||
7856 (flags & HF_INHIBIT_IRQ_MASK)
7857 #ifndef CONFIG_SOFTMMU
7858 || (flags & HF_SOFTMMU_MASK)
7859 #endif
7860 );
7861 #if 0
7862 /* check addseg logic */
7863 if (!dc->addseg && (dc->vm86 || !dc->pe || !dc->code32))
7864 printf("ERROR addseg\n");
7865 #endif
7866
7867 cpu_T[0] = tcg_temp_new();
7868 cpu_T[1] = tcg_temp_new();
7869 cpu_A0 = tcg_temp_new();
7870
7871 cpu_tmp0 = tcg_temp_new();
7872 cpu_tmp1_i64 = tcg_temp_new_i64();
7873 cpu_tmp2_i32 = tcg_temp_new_i32();
7874 cpu_tmp3_i32 = tcg_temp_new_i32();
7875 cpu_tmp4 = tcg_temp_new();
7876 cpu_tmp5 = tcg_temp_new();
7877 cpu_ptr0 = tcg_temp_new_ptr();
7878 cpu_ptr1 = tcg_temp_new_ptr();
7879
7880 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
7881
7882 dc->is_jmp = DISAS_NEXT;
7883 pc_ptr = pc_start;
7884 lj = -1;
7885 num_insns = 0;
7886 max_insns = tb->cflags & CF_COUNT_MASK;
7887 if (max_insns == 0)
7888 max_insns = CF_COUNT_MASK;
7889
7890 gen_icount_start();
7891 for(;;) {
7892 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
7893 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
7894 if (bp->pc == pc_ptr &&
7895 !((bp->flags & BP_CPU) && (tb->flags & HF_RF_MASK))) {
7896 gen_debug(dc, pc_ptr - dc->cs_base);
7897 break;
7898 }
7899 }
7900 }
7901 if (search_pc) {
7902 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
7903 if (lj < j) {
7904 lj++;
7905 while (lj < j)
7906 tcg_ctx.gen_opc_instr_start[lj++] = 0;
7907 }
7908 tcg_ctx.gen_opc_pc[lj] = pc_ptr;
7909 gen_opc_cc_op[lj] = dc->cc_op;
7910 tcg_ctx.gen_opc_instr_start[lj] = 1;
7911 tcg_ctx.gen_opc_icount[lj] = num_insns;
7912 }
7913 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
7914 gen_io_start();
7915
7916 pc_ptr = disas_insn(env, dc, pc_ptr);
7917 num_insns++;
7918 /* stop translation if indicated */
7919 if (dc->is_jmp)
7920 break;
7921 /* if single step mode, we generate only one instruction and
7922 generate an exception */
7923 /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
7924 the flag and abort the translation to give the irqs a
7925 change to be happen */
7926 if (dc->tf || dc->singlestep_enabled ||
7927 (flags & HF_INHIBIT_IRQ_MASK)) {
7928 gen_jmp_im(pc_ptr - dc->cs_base);
7929 gen_eob(dc);
7930 break;
7931 }
7932 /* if too long translation, stop generation too */
7933 if (tcg_ctx.gen_opc_ptr >= gen_opc_end ||
7934 (pc_ptr - pc_start) >= (TARGET_PAGE_SIZE - 32) ||
7935 num_insns >= max_insns) {
7936 gen_jmp_im(pc_ptr - dc->cs_base);
7937 gen_eob(dc);
7938 break;
7939 }
7940 if (singlestep) {
7941 gen_jmp_im(pc_ptr - dc->cs_base);
7942 gen_eob(dc);
7943 break;
7944 }
7945 }
7946 if (tb->cflags & CF_LAST_IO)
7947 gen_io_end();
7948 gen_icount_end(tb, num_insns);
7949 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
7950 /* we don't forget to fill the last values */
7951 if (search_pc) {
7952 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
7953 lj++;
7954 while (lj <= j)
7955 tcg_ctx.gen_opc_instr_start[lj++] = 0;
7956 }
7957
7958 #ifdef DEBUG_DISAS
7959 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
7960 int disas_flags;
7961 qemu_log("----------------\n");
7962 qemu_log("IN: %s\n", lookup_symbol(pc_start));
7963 #ifdef TARGET_X86_64
7964 if (dc->code64)
7965 disas_flags = 2;
7966 else
7967 #endif
7968 disas_flags = !dc->code32;
7969 log_target_disas(env, pc_start, pc_ptr - pc_start, disas_flags);
7970 qemu_log("\n");
7971 }
7972 #endif
7973
7974 if (!search_pc) {
7975 tb->size = pc_ptr - pc_start;
7976 tb->icount = num_insns;
7977 }
7978 }
7979
7980 void gen_intermediate_code(CPUX86State *env, TranslationBlock *tb)
7981 {
7982 gen_intermediate_code_internal(env, tb, 0);
7983 }
7984
7985 void gen_intermediate_code_pc(CPUX86State *env, TranslationBlock *tb)
7986 {
7987 gen_intermediate_code_internal(env, tb, 1);
7988 }
7989
7990 void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb, int pc_pos)
7991 {
7992 int cc_op;
7993 #ifdef DEBUG_DISAS
7994 if (qemu_loglevel_mask(CPU_LOG_TB_OP)) {
7995 int i;
7996 qemu_log("RESTORE:\n");
7997 for(i = 0;i <= pc_pos; i++) {
7998 if (tcg_ctx.gen_opc_instr_start[i]) {
7999 qemu_log("0x%04x: " TARGET_FMT_lx "\n", i,
8000 tcg_ctx.gen_opc_pc[i]);
8001 }
8002 }
8003 qemu_log("pc_pos=0x%x eip=" TARGET_FMT_lx " cs_base=%x\n",
8004 pc_pos, tcg_ctx.gen_opc_pc[pc_pos] - tb->cs_base,
8005 (uint32_t)tb->cs_base);
8006 }
8007 #endif
8008 env->eip = tcg_ctx.gen_opc_pc[pc_pos] - tb->cs_base;
8009 cc_op = gen_opc_cc_op[pc_pos];
8010 if (cc_op != CC_OP_DYNAMIC)
8011 env->cc_op = cc_op;
8012 }