]> git.proxmox.com Git - mirror_qemu.git/blob - target-i386/translate.c
disas: avoid using cpu_single_env
[mirror_qemu.git] / target-i386 / translate.c
1 /*
2 * i386 translation
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include <stdarg.h>
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <string.h>
23 #include <inttypes.h>
24 #include <signal.h>
25
26 #include "cpu.h"
27 #include "disas.h"
28 #include "tcg-op.h"
29
30 #include "helper.h"
31 #define GEN_HELPER 1
32 #include "helper.h"
33
34 #define PREFIX_REPZ 0x01
35 #define PREFIX_REPNZ 0x02
36 #define PREFIX_LOCK 0x04
37 #define PREFIX_DATA 0x08
38 #define PREFIX_ADR 0x10
39
40 #ifdef TARGET_X86_64
41 #define CODE64(s) ((s)->code64)
42 #define REX_X(s) ((s)->rex_x)
43 #define REX_B(s) ((s)->rex_b)
44 #else
45 #define CODE64(s) 0
46 #define REX_X(s) 0
47 #define REX_B(s) 0
48 #endif
49
50 //#define MACRO_TEST 1
51
52 /* global register indexes */
53 static TCGv_ptr cpu_env;
54 static TCGv cpu_A0, cpu_cc_src, cpu_cc_dst, cpu_cc_tmp;
55 static TCGv_i32 cpu_cc_op;
56 static TCGv cpu_regs[CPU_NB_REGS];
57 /* local temps */
58 static TCGv cpu_T[2], cpu_T3;
59 /* local register indexes (only used inside old micro ops) */
60 static TCGv cpu_tmp0, cpu_tmp4;
61 static TCGv_ptr cpu_ptr0, cpu_ptr1;
62 static TCGv_i32 cpu_tmp2_i32, cpu_tmp3_i32;
63 static TCGv_i64 cpu_tmp1_i64;
64 static TCGv cpu_tmp5;
65
66 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
67
68 #include "gen-icount.h"
69
70 #ifdef TARGET_X86_64
71 static int x86_64_hregs;
72 #endif
73
74 typedef struct DisasContext {
75 /* current insn context */
76 int override; /* -1 if no override */
77 int prefix;
78 int aflag, dflag;
79 target_ulong pc; /* pc = eip + cs_base */
80 int is_jmp; /* 1 = means jump (stop translation), 2 means CPU
81 static state change (stop translation) */
82 /* current block context */
83 target_ulong cs_base; /* base of CS segment */
84 int pe; /* protected mode */
85 int code32; /* 32 bit code segment */
86 #ifdef TARGET_X86_64
87 int lma; /* long mode active */
88 int code64; /* 64 bit code segment */
89 int rex_x, rex_b;
90 #endif
91 int ss32; /* 32 bit stack segment */
92 int cc_op; /* current CC operation */
93 int addseg; /* non zero if either DS/ES/SS have a non zero base */
94 int f_st; /* currently unused */
95 int vm86; /* vm86 mode */
96 int cpl;
97 int iopl;
98 int tf; /* TF cpu flag */
99 int singlestep_enabled; /* "hardware" single step enabled */
100 int jmp_opt; /* use direct block chaining for direct jumps */
101 int mem_index; /* select memory access functions */
102 uint64_t flags; /* all execution flags */
103 struct TranslationBlock *tb;
104 int popl_esp_hack; /* for correct popl with esp base handling */
105 int rip_offset; /* only used in x86_64, but left for simplicity */
106 int cpuid_features;
107 int cpuid_ext_features;
108 int cpuid_ext2_features;
109 int cpuid_ext3_features;
110 int cpuid_7_0_ebx_features;
111 } DisasContext;
112
113 static void gen_eob(DisasContext *s);
114 static void gen_jmp(DisasContext *s, target_ulong eip);
115 static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num);
116
117 /* i386 arith/logic operations */
118 enum {
119 OP_ADDL,
120 OP_ORL,
121 OP_ADCL,
122 OP_SBBL,
123 OP_ANDL,
124 OP_SUBL,
125 OP_XORL,
126 OP_CMPL,
127 };
128
129 /* i386 shift ops */
130 enum {
131 OP_ROL,
132 OP_ROR,
133 OP_RCL,
134 OP_RCR,
135 OP_SHL,
136 OP_SHR,
137 OP_SHL1, /* undocumented */
138 OP_SAR = 7,
139 };
140
141 enum {
142 JCC_O,
143 JCC_B,
144 JCC_Z,
145 JCC_BE,
146 JCC_S,
147 JCC_P,
148 JCC_L,
149 JCC_LE,
150 };
151
152 /* operand size */
153 enum {
154 OT_BYTE = 0,
155 OT_WORD,
156 OT_LONG,
157 OT_QUAD,
158 };
159
160 enum {
161 /* I386 int registers */
162 OR_EAX, /* MUST be even numbered */
163 OR_ECX,
164 OR_EDX,
165 OR_EBX,
166 OR_ESP,
167 OR_EBP,
168 OR_ESI,
169 OR_EDI,
170
171 OR_TMP0 = 16, /* temporary operand register */
172 OR_TMP1,
173 OR_A0, /* temporary register used when doing address evaluation */
174 };
175
176 static inline void gen_op_movl_T0_0(void)
177 {
178 tcg_gen_movi_tl(cpu_T[0], 0);
179 }
180
181 static inline void gen_op_movl_T0_im(int32_t val)
182 {
183 tcg_gen_movi_tl(cpu_T[0], val);
184 }
185
186 static inline void gen_op_movl_T0_imu(uint32_t val)
187 {
188 tcg_gen_movi_tl(cpu_T[0], val);
189 }
190
191 static inline void gen_op_movl_T1_im(int32_t val)
192 {
193 tcg_gen_movi_tl(cpu_T[1], val);
194 }
195
196 static inline void gen_op_movl_T1_imu(uint32_t val)
197 {
198 tcg_gen_movi_tl(cpu_T[1], val);
199 }
200
201 static inline void gen_op_movl_A0_im(uint32_t val)
202 {
203 tcg_gen_movi_tl(cpu_A0, val);
204 }
205
206 #ifdef TARGET_X86_64
207 static inline void gen_op_movq_A0_im(int64_t val)
208 {
209 tcg_gen_movi_tl(cpu_A0, val);
210 }
211 #endif
212
213 static inline void gen_movtl_T0_im(target_ulong val)
214 {
215 tcg_gen_movi_tl(cpu_T[0], val);
216 }
217
218 static inline void gen_movtl_T1_im(target_ulong val)
219 {
220 tcg_gen_movi_tl(cpu_T[1], val);
221 }
222
223 static inline void gen_op_andl_T0_ffff(void)
224 {
225 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
226 }
227
228 static inline void gen_op_andl_T0_im(uint32_t val)
229 {
230 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], val);
231 }
232
233 static inline void gen_op_movl_T0_T1(void)
234 {
235 tcg_gen_mov_tl(cpu_T[0], cpu_T[1]);
236 }
237
238 static inline void gen_op_andl_A0_ffff(void)
239 {
240 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffff);
241 }
242
243 #ifdef TARGET_X86_64
244
245 #define NB_OP_SIZES 4
246
247 #else /* !TARGET_X86_64 */
248
249 #define NB_OP_SIZES 3
250
251 #endif /* !TARGET_X86_64 */
252
253 #if defined(HOST_WORDS_BIGENDIAN)
254 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
255 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
256 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
257 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
258 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
259 #else
260 #define REG_B_OFFSET 0
261 #define REG_H_OFFSET 1
262 #define REG_W_OFFSET 0
263 #define REG_L_OFFSET 0
264 #define REG_LH_OFFSET 4
265 #endif
266
267 /* In instruction encodings for byte register accesses the
268 * register number usually indicates "low 8 bits of register N";
269 * however there are some special cases where N 4..7 indicates
270 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
271 * true for this special case, false otherwise.
272 */
273 static inline bool byte_reg_is_xH(int reg)
274 {
275 if (reg < 4) {
276 return false;
277 }
278 #ifdef TARGET_X86_64
279 if (reg >= 8 || x86_64_hregs) {
280 return false;
281 }
282 #endif
283 return true;
284 }
285
286 static inline void gen_op_mov_reg_v(int ot, int reg, TCGv t0)
287 {
288 switch(ot) {
289 case OT_BYTE:
290 if (!byte_reg_is_xH(reg)) {
291 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 8);
292 } else {
293 tcg_gen_deposit_tl(cpu_regs[reg - 4], cpu_regs[reg - 4], t0, 8, 8);
294 }
295 break;
296 case OT_WORD:
297 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 16);
298 break;
299 default: /* XXX this shouldn't be reached; abort? */
300 case OT_LONG:
301 /* For x86_64, this sets the higher half of register to zero.
302 For i386, this is equivalent to a mov. */
303 tcg_gen_ext32u_tl(cpu_regs[reg], t0);
304 break;
305 #ifdef TARGET_X86_64
306 case OT_QUAD:
307 tcg_gen_mov_tl(cpu_regs[reg], t0);
308 break;
309 #endif
310 }
311 }
312
313 static inline void gen_op_mov_reg_T0(int ot, int reg)
314 {
315 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
316 }
317
318 static inline void gen_op_mov_reg_T1(int ot, int reg)
319 {
320 gen_op_mov_reg_v(ot, reg, cpu_T[1]);
321 }
322
323 static inline void gen_op_mov_reg_A0(int size, int reg)
324 {
325 switch(size) {
326 case 0:
327 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], cpu_A0, 0, 16);
328 break;
329 default: /* XXX this shouldn't be reached; abort? */
330 case 1:
331 /* For x86_64, this sets the higher half of register to zero.
332 For i386, this is equivalent to a mov. */
333 tcg_gen_ext32u_tl(cpu_regs[reg], cpu_A0);
334 break;
335 #ifdef TARGET_X86_64
336 case 2:
337 tcg_gen_mov_tl(cpu_regs[reg], cpu_A0);
338 break;
339 #endif
340 }
341 }
342
343 static inline void gen_op_mov_v_reg(int ot, TCGv t0, int reg)
344 {
345 if (ot == OT_BYTE && byte_reg_is_xH(reg)) {
346 tcg_gen_shri_tl(t0, cpu_regs[reg - 4], 8);
347 tcg_gen_ext8u_tl(t0, t0);
348 } else {
349 tcg_gen_mov_tl(t0, cpu_regs[reg]);
350 }
351 }
352
353 static inline void gen_op_mov_TN_reg(int ot, int t_index, int reg)
354 {
355 gen_op_mov_v_reg(ot, cpu_T[t_index], reg);
356 }
357
358 static inline void gen_op_movl_A0_reg(int reg)
359 {
360 tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]);
361 }
362
363 static inline void gen_op_addl_A0_im(int32_t val)
364 {
365 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
366 #ifdef TARGET_X86_64
367 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
368 #endif
369 }
370
371 #ifdef TARGET_X86_64
372 static inline void gen_op_addq_A0_im(int64_t val)
373 {
374 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
375 }
376 #endif
377
378 static void gen_add_A0_im(DisasContext *s, int val)
379 {
380 #ifdef TARGET_X86_64
381 if (CODE64(s))
382 gen_op_addq_A0_im(val);
383 else
384 #endif
385 gen_op_addl_A0_im(val);
386 }
387
388 static inline void gen_op_addl_T0_T1(void)
389 {
390 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
391 }
392
393 static inline void gen_op_jmp_T0(void)
394 {
395 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, eip));
396 }
397
398 static inline void gen_op_add_reg_im(int size, int reg, int32_t val)
399 {
400 switch(size) {
401 case 0:
402 tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val);
403 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], cpu_tmp0, 0, 16);
404 break;
405 case 1:
406 tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val);
407 /* For x86_64, this sets the higher half of register to zero.
408 For i386, this is equivalent to a nop. */
409 tcg_gen_ext32u_tl(cpu_tmp0, cpu_tmp0);
410 tcg_gen_mov_tl(cpu_regs[reg], cpu_tmp0);
411 break;
412 #ifdef TARGET_X86_64
413 case 2:
414 tcg_gen_addi_tl(cpu_regs[reg], cpu_regs[reg], val);
415 break;
416 #endif
417 }
418 }
419
420 static inline void gen_op_add_reg_T0(int size, int reg)
421 {
422 switch(size) {
423 case 0:
424 tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T[0]);
425 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], cpu_tmp0, 0, 16);
426 break;
427 case 1:
428 tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T[0]);
429 /* For x86_64, this sets the higher half of register to zero.
430 For i386, this is equivalent to a nop. */
431 tcg_gen_ext32u_tl(cpu_tmp0, cpu_tmp0);
432 tcg_gen_mov_tl(cpu_regs[reg], cpu_tmp0);
433 break;
434 #ifdef TARGET_X86_64
435 case 2:
436 tcg_gen_add_tl(cpu_regs[reg], cpu_regs[reg], cpu_T[0]);
437 break;
438 #endif
439 }
440 }
441
442 static inline void gen_op_set_cc_op(int32_t val)
443 {
444 tcg_gen_movi_i32(cpu_cc_op, val);
445 }
446
447 static inline void gen_op_addl_A0_reg_sN(int shift, int reg)
448 {
449 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]);
450 if (shift != 0)
451 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
452 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
453 /* For x86_64, this sets the higher half of register to zero.
454 For i386, this is equivalent to a nop. */
455 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
456 }
457
458 static inline void gen_op_movl_A0_seg(int reg)
459 {
460 tcg_gen_ld32u_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base) + REG_L_OFFSET);
461 }
462
463 static inline void gen_op_addl_A0_seg(DisasContext *s, int reg)
464 {
465 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base));
466 #ifdef TARGET_X86_64
467 if (CODE64(s)) {
468 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
469 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
470 } else {
471 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
472 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
473 }
474 #else
475 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
476 #endif
477 }
478
479 #ifdef TARGET_X86_64
480 static inline void gen_op_movq_A0_seg(int reg)
481 {
482 tcg_gen_ld_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base));
483 }
484
485 static inline void gen_op_addq_A0_seg(int reg)
486 {
487 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base));
488 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
489 }
490
491 static inline void gen_op_movq_A0_reg(int reg)
492 {
493 tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]);
494 }
495
496 static inline void gen_op_addq_A0_reg_sN(int shift, int reg)
497 {
498 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]);
499 if (shift != 0)
500 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
501 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
502 }
503 #endif
504
505 static inline void gen_op_lds_T0_A0(int idx)
506 {
507 int mem_index = (idx >> 2) - 1;
508 switch(idx & 3) {
509 case 0:
510 tcg_gen_qemu_ld8s(cpu_T[0], cpu_A0, mem_index);
511 break;
512 case 1:
513 tcg_gen_qemu_ld16s(cpu_T[0], cpu_A0, mem_index);
514 break;
515 default:
516 case 2:
517 tcg_gen_qemu_ld32s(cpu_T[0], cpu_A0, mem_index);
518 break;
519 }
520 }
521
522 static inline void gen_op_ld_v(int idx, TCGv t0, TCGv a0)
523 {
524 int mem_index = (idx >> 2) - 1;
525 switch(idx & 3) {
526 case 0:
527 tcg_gen_qemu_ld8u(t0, a0, mem_index);
528 break;
529 case 1:
530 tcg_gen_qemu_ld16u(t0, a0, mem_index);
531 break;
532 case 2:
533 tcg_gen_qemu_ld32u(t0, a0, mem_index);
534 break;
535 default:
536 case 3:
537 /* Should never happen on 32-bit targets. */
538 #ifdef TARGET_X86_64
539 tcg_gen_qemu_ld64(t0, a0, mem_index);
540 #endif
541 break;
542 }
543 }
544
545 /* XXX: always use ldu or lds */
546 static inline void gen_op_ld_T0_A0(int idx)
547 {
548 gen_op_ld_v(idx, cpu_T[0], cpu_A0);
549 }
550
551 static inline void gen_op_ldu_T0_A0(int idx)
552 {
553 gen_op_ld_v(idx, cpu_T[0], cpu_A0);
554 }
555
556 static inline void gen_op_ld_T1_A0(int idx)
557 {
558 gen_op_ld_v(idx, cpu_T[1], cpu_A0);
559 }
560
561 static inline void gen_op_st_v(int idx, TCGv t0, TCGv a0)
562 {
563 int mem_index = (idx >> 2) - 1;
564 switch(idx & 3) {
565 case 0:
566 tcg_gen_qemu_st8(t0, a0, mem_index);
567 break;
568 case 1:
569 tcg_gen_qemu_st16(t0, a0, mem_index);
570 break;
571 case 2:
572 tcg_gen_qemu_st32(t0, a0, mem_index);
573 break;
574 default:
575 case 3:
576 /* Should never happen on 32-bit targets. */
577 #ifdef TARGET_X86_64
578 tcg_gen_qemu_st64(t0, a0, mem_index);
579 #endif
580 break;
581 }
582 }
583
584 static inline void gen_op_st_T0_A0(int idx)
585 {
586 gen_op_st_v(idx, cpu_T[0], cpu_A0);
587 }
588
589 static inline void gen_op_st_T1_A0(int idx)
590 {
591 gen_op_st_v(idx, cpu_T[1], cpu_A0);
592 }
593
594 static inline void gen_jmp_im(target_ulong pc)
595 {
596 tcg_gen_movi_tl(cpu_tmp0, pc);
597 tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, eip));
598 }
599
600 static inline void gen_string_movl_A0_ESI(DisasContext *s)
601 {
602 int override;
603
604 override = s->override;
605 #ifdef TARGET_X86_64
606 if (s->aflag == 2) {
607 if (override >= 0) {
608 gen_op_movq_A0_seg(override);
609 gen_op_addq_A0_reg_sN(0, R_ESI);
610 } else {
611 gen_op_movq_A0_reg(R_ESI);
612 }
613 } else
614 #endif
615 if (s->aflag) {
616 /* 32 bit address */
617 if (s->addseg && override < 0)
618 override = R_DS;
619 if (override >= 0) {
620 gen_op_movl_A0_seg(override);
621 gen_op_addl_A0_reg_sN(0, R_ESI);
622 } else {
623 gen_op_movl_A0_reg(R_ESI);
624 }
625 } else {
626 /* 16 address, always override */
627 if (override < 0)
628 override = R_DS;
629 gen_op_movl_A0_reg(R_ESI);
630 gen_op_andl_A0_ffff();
631 gen_op_addl_A0_seg(s, override);
632 }
633 }
634
635 static inline void gen_string_movl_A0_EDI(DisasContext *s)
636 {
637 #ifdef TARGET_X86_64
638 if (s->aflag == 2) {
639 gen_op_movq_A0_reg(R_EDI);
640 } else
641 #endif
642 if (s->aflag) {
643 if (s->addseg) {
644 gen_op_movl_A0_seg(R_ES);
645 gen_op_addl_A0_reg_sN(0, R_EDI);
646 } else {
647 gen_op_movl_A0_reg(R_EDI);
648 }
649 } else {
650 gen_op_movl_A0_reg(R_EDI);
651 gen_op_andl_A0_ffff();
652 gen_op_addl_A0_seg(s, R_ES);
653 }
654 }
655
656 static inline void gen_op_movl_T0_Dshift(int ot)
657 {
658 tcg_gen_ld32s_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, df));
659 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], ot);
660 };
661
662 static void gen_extu(int ot, TCGv reg)
663 {
664 switch(ot) {
665 case OT_BYTE:
666 tcg_gen_ext8u_tl(reg, reg);
667 break;
668 case OT_WORD:
669 tcg_gen_ext16u_tl(reg, reg);
670 break;
671 case OT_LONG:
672 tcg_gen_ext32u_tl(reg, reg);
673 break;
674 default:
675 break;
676 }
677 }
678
679 static void gen_exts(int ot, TCGv reg)
680 {
681 switch(ot) {
682 case OT_BYTE:
683 tcg_gen_ext8s_tl(reg, reg);
684 break;
685 case OT_WORD:
686 tcg_gen_ext16s_tl(reg, reg);
687 break;
688 case OT_LONG:
689 tcg_gen_ext32s_tl(reg, reg);
690 break;
691 default:
692 break;
693 }
694 }
695
696 static inline void gen_op_jnz_ecx(int size, int label1)
697 {
698 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
699 gen_extu(size + 1, cpu_tmp0);
700 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, label1);
701 }
702
703 static inline void gen_op_jz_ecx(int size, int label1)
704 {
705 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
706 gen_extu(size + 1, cpu_tmp0);
707 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
708 }
709
710 static void gen_helper_in_func(int ot, TCGv v, TCGv_i32 n)
711 {
712 switch (ot) {
713 case 0: gen_helper_inb(v, n); break;
714 case 1: gen_helper_inw(v, n); break;
715 case 2: gen_helper_inl(v, n); break;
716 }
717
718 }
719
720 static void gen_helper_out_func(int ot, TCGv_i32 v, TCGv_i32 n)
721 {
722 switch (ot) {
723 case 0: gen_helper_outb(v, n); break;
724 case 1: gen_helper_outw(v, n); break;
725 case 2: gen_helper_outl(v, n); break;
726 }
727
728 }
729
730 static void gen_check_io(DisasContext *s, int ot, target_ulong cur_eip,
731 uint32_t svm_flags)
732 {
733 int state_saved;
734 target_ulong next_eip;
735
736 state_saved = 0;
737 if (s->pe && (s->cpl > s->iopl || s->vm86)) {
738 if (s->cc_op != CC_OP_DYNAMIC)
739 gen_op_set_cc_op(s->cc_op);
740 gen_jmp_im(cur_eip);
741 state_saved = 1;
742 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
743 switch (ot) {
744 case 0:
745 gen_helper_check_iob(cpu_env, cpu_tmp2_i32);
746 break;
747 case 1:
748 gen_helper_check_iow(cpu_env, cpu_tmp2_i32);
749 break;
750 case 2:
751 gen_helper_check_iol(cpu_env, cpu_tmp2_i32);
752 break;
753 }
754 }
755 if(s->flags & HF_SVMI_MASK) {
756 if (!state_saved) {
757 if (s->cc_op != CC_OP_DYNAMIC)
758 gen_op_set_cc_op(s->cc_op);
759 gen_jmp_im(cur_eip);
760 }
761 svm_flags |= (1 << (4 + ot));
762 next_eip = s->pc - s->cs_base;
763 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
764 gen_helper_svm_check_io(cpu_env, cpu_tmp2_i32,
765 tcg_const_i32(svm_flags),
766 tcg_const_i32(next_eip - cur_eip));
767 }
768 }
769
770 static inline void gen_movs(DisasContext *s, int ot)
771 {
772 gen_string_movl_A0_ESI(s);
773 gen_op_ld_T0_A0(ot + s->mem_index);
774 gen_string_movl_A0_EDI(s);
775 gen_op_st_T0_A0(ot + s->mem_index);
776 gen_op_movl_T0_Dshift(ot);
777 gen_op_add_reg_T0(s->aflag, R_ESI);
778 gen_op_add_reg_T0(s->aflag, R_EDI);
779 }
780
781 static inline void gen_update_cc_op(DisasContext *s)
782 {
783 if (s->cc_op != CC_OP_DYNAMIC) {
784 gen_op_set_cc_op(s->cc_op);
785 s->cc_op = CC_OP_DYNAMIC;
786 }
787 }
788
789 static void gen_op_update1_cc(void)
790 {
791 tcg_gen_discard_tl(cpu_cc_src);
792 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
793 }
794
795 static void gen_op_update2_cc(void)
796 {
797 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
798 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
799 }
800
801 static inline void gen_op_cmpl_T0_T1_cc(void)
802 {
803 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
804 tcg_gen_sub_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
805 }
806
807 static inline void gen_op_testl_T0_T1_cc(void)
808 {
809 tcg_gen_discard_tl(cpu_cc_src);
810 tcg_gen_and_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
811 }
812
813 static void gen_op_update_neg_cc(void)
814 {
815 tcg_gen_neg_tl(cpu_cc_src, cpu_T[0]);
816 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
817 }
818
819 /* compute eflags.C to reg */
820 static void gen_compute_eflags_c(TCGv reg)
821 {
822 gen_helper_cc_compute_c(cpu_tmp2_i32, cpu_env, cpu_cc_op);
823 tcg_gen_extu_i32_tl(reg, cpu_tmp2_i32);
824 }
825
826 /* compute all eflags to cc_src */
827 static void gen_compute_eflags(TCGv reg)
828 {
829 gen_helper_cc_compute_all(cpu_tmp2_i32, cpu_env, cpu_cc_op);
830 tcg_gen_extu_i32_tl(reg, cpu_tmp2_i32);
831 }
832
833 static inline void gen_setcc_slow_T0(DisasContext *s, int jcc_op)
834 {
835 if (s->cc_op != CC_OP_DYNAMIC)
836 gen_op_set_cc_op(s->cc_op);
837 switch(jcc_op) {
838 case JCC_O:
839 gen_compute_eflags(cpu_T[0]);
840 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 11);
841 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
842 break;
843 case JCC_B:
844 gen_compute_eflags_c(cpu_T[0]);
845 break;
846 case JCC_Z:
847 gen_compute_eflags(cpu_T[0]);
848 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 6);
849 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
850 break;
851 case JCC_BE:
852 gen_compute_eflags(cpu_tmp0);
853 tcg_gen_shri_tl(cpu_T[0], cpu_tmp0, 6);
854 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
855 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
856 break;
857 case JCC_S:
858 gen_compute_eflags(cpu_T[0]);
859 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 7);
860 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
861 break;
862 case JCC_P:
863 gen_compute_eflags(cpu_T[0]);
864 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 2);
865 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
866 break;
867 case JCC_L:
868 gen_compute_eflags(cpu_tmp0);
869 tcg_gen_shri_tl(cpu_T[0], cpu_tmp0, 11); /* CC_O */
870 tcg_gen_shri_tl(cpu_tmp0, cpu_tmp0, 7); /* CC_S */
871 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
872 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
873 break;
874 default:
875 case JCC_LE:
876 gen_compute_eflags(cpu_tmp0);
877 tcg_gen_shri_tl(cpu_T[0], cpu_tmp0, 11); /* CC_O */
878 tcg_gen_shri_tl(cpu_tmp4, cpu_tmp0, 7); /* CC_S */
879 tcg_gen_shri_tl(cpu_tmp0, cpu_tmp0, 6); /* CC_Z */
880 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
881 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
882 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
883 break;
884 }
885 }
886
887 /* return true if setcc_slow is not needed (WARNING: must be kept in
888 sync with gen_jcc1) */
889 static int is_fast_jcc_case(DisasContext *s, int b)
890 {
891 int jcc_op;
892 jcc_op = (b >> 1) & 7;
893 switch(s->cc_op) {
894 /* we optimize the cmp/jcc case */
895 case CC_OP_SUBB:
896 case CC_OP_SUBW:
897 case CC_OP_SUBL:
898 case CC_OP_SUBQ:
899 if (jcc_op == JCC_O || jcc_op == JCC_P)
900 goto slow_jcc;
901 break;
902
903 /* some jumps are easy to compute */
904 case CC_OP_ADDB:
905 case CC_OP_ADDW:
906 case CC_OP_ADDL:
907 case CC_OP_ADDQ:
908
909 case CC_OP_LOGICB:
910 case CC_OP_LOGICW:
911 case CC_OP_LOGICL:
912 case CC_OP_LOGICQ:
913
914 case CC_OP_INCB:
915 case CC_OP_INCW:
916 case CC_OP_INCL:
917 case CC_OP_INCQ:
918
919 case CC_OP_DECB:
920 case CC_OP_DECW:
921 case CC_OP_DECL:
922 case CC_OP_DECQ:
923
924 case CC_OP_SHLB:
925 case CC_OP_SHLW:
926 case CC_OP_SHLL:
927 case CC_OP_SHLQ:
928 if (jcc_op != JCC_Z && jcc_op != JCC_S)
929 goto slow_jcc;
930 break;
931 default:
932 slow_jcc:
933 return 0;
934 }
935 return 1;
936 }
937
938 /* generate a conditional jump to label 'l1' according to jump opcode
939 value 'b'. In the fast case, T0 is guaranted not to be used. */
940 static inline void gen_jcc1(DisasContext *s, int cc_op, int b, int l1)
941 {
942 int inv, jcc_op, size, cond;
943 TCGv t0;
944
945 inv = b & 1;
946 jcc_op = (b >> 1) & 7;
947
948 switch(cc_op) {
949 /* we optimize the cmp/jcc case */
950 case CC_OP_SUBB:
951 case CC_OP_SUBW:
952 case CC_OP_SUBL:
953 case CC_OP_SUBQ:
954
955 size = cc_op - CC_OP_SUBB;
956 switch(jcc_op) {
957 case JCC_Z:
958 fast_jcc_z:
959 switch(size) {
960 case 0:
961 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0xff);
962 t0 = cpu_tmp0;
963 break;
964 case 1:
965 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0xffff);
966 t0 = cpu_tmp0;
967 break;
968 #ifdef TARGET_X86_64
969 case 2:
970 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0xffffffff);
971 t0 = cpu_tmp0;
972 break;
973 #endif
974 default:
975 t0 = cpu_cc_dst;
976 break;
977 }
978 tcg_gen_brcondi_tl(inv ? TCG_COND_NE : TCG_COND_EQ, t0, 0, l1);
979 break;
980 case JCC_S:
981 fast_jcc_s:
982 switch(size) {
983 case 0:
984 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0x80);
985 tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0,
986 0, l1);
987 break;
988 case 1:
989 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0x8000);
990 tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0,
991 0, l1);
992 break;
993 #ifdef TARGET_X86_64
994 case 2:
995 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0x80000000);
996 tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0,
997 0, l1);
998 break;
999 #endif
1000 default:
1001 tcg_gen_brcondi_tl(inv ? TCG_COND_GE : TCG_COND_LT, cpu_cc_dst,
1002 0, l1);
1003 break;
1004 }
1005 break;
1006
1007 case JCC_B:
1008 cond = inv ? TCG_COND_GEU : TCG_COND_LTU;
1009 goto fast_jcc_b;
1010 case JCC_BE:
1011 cond = inv ? TCG_COND_GTU : TCG_COND_LEU;
1012 fast_jcc_b:
1013 tcg_gen_add_tl(cpu_tmp4, cpu_cc_dst, cpu_cc_src);
1014 switch(size) {
1015 case 0:
1016 t0 = cpu_tmp0;
1017 tcg_gen_andi_tl(cpu_tmp4, cpu_tmp4, 0xff);
1018 tcg_gen_andi_tl(t0, cpu_cc_src, 0xff);
1019 break;
1020 case 1:
1021 t0 = cpu_tmp0;
1022 tcg_gen_andi_tl(cpu_tmp4, cpu_tmp4, 0xffff);
1023 tcg_gen_andi_tl(t0, cpu_cc_src, 0xffff);
1024 break;
1025 #ifdef TARGET_X86_64
1026 case 2:
1027 t0 = cpu_tmp0;
1028 tcg_gen_andi_tl(cpu_tmp4, cpu_tmp4, 0xffffffff);
1029 tcg_gen_andi_tl(t0, cpu_cc_src, 0xffffffff);
1030 break;
1031 #endif
1032 default:
1033 t0 = cpu_cc_src;
1034 break;
1035 }
1036 tcg_gen_brcond_tl(cond, cpu_tmp4, t0, l1);
1037 break;
1038
1039 case JCC_L:
1040 cond = inv ? TCG_COND_GE : TCG_COND_LT;
1041 goto fast_jcc_l;
1042 case JCC_LE:
1043 cond = inv ? TCG_COND_GT : TCG_COND_LE;
1044 fast_jcc_l:
1045 tcg_gen_add_tl(cpu_tmp4, cpu_cc_dst, cpu_cc_src);
1046 switch(size) {
1047 case 0:
1048 t0 = cpu_tmp0;
1049 tcg_gen_ext8s_tl(cpu_tmp4, cpu_tmp4);
1050 tcg_gen_ext8s_tl(t0, cpu_cc_src);
1051 break;
1052 case 1:
1053 t0 = cpu_tmp0;
1054 tcg_gen_ext16s_tl(cpu_tmp4, cpu_tmp4);
1055 tcg_gen_ext16s_tl(t0, cpu_cc_src);
1056 break;
1057 #ifdef TARGET_X86_64
1058 case 2:
1059 t0 = cpu_tmp0;
1060 tcg_gen_ext32s_tl(cpu_tmp4, cpu_tmp4);
1061 tcg_gen_ext32s_tl(t0, cpu_cc_src);
1062 break;
1063 #endif
1064 default:
1065 t0 = cpu_cc_src;
1066 break;
1067 }
1068 tcg_gen_brcond_tl(cond, cpu_tmp4, t0, l1);
1069 break;
1070
1071 default:
1072 goto slow_jcc;
1073 }
1074 break;
1075
1076 /* some jumps are easy to compute */
1077 case CC_OP_ADDB:
1078 case CC_OP_ADDW:
1079 case CC_OP_ADDL:
1080 case CC_OP_ADDQ:
1081
1082 case CC_OP_ADCB:
1083 case CC_OP_ADCW:
1084 case CC_OP_ADCL:
1085 case CC_OP_ADCQ:
1086
1087 case CC_OP_SBBB:
1088 case CC_OP_SBBW:
1089 case CC_OP_SBBL:
1090 case CC_OP_SBBQ:
1091
1092 case CC_OP_LOGICB:
1093 case CC_OP_LOGICW:
1094 case CC_OP_LOGICL:
1095 case CC_OP_LOGICQ:
1096
1097 case CC_OP_INCB:
1098 case CC_OP_INCW:
1099 case CC_OP_INCL:
1100 case CC_OP_INCQ:
1101
1102 case CC_OP_DECB:
1103 case CC_OP_DECW:
1104 case CC_OP_DECL:
1105 case CC_OP_DECQ:
1106
1107 case CC_OP_SHLB:
1108 case CC_OP_SHLW:
1109 case CC_OP_SHLL:
1110 case CC_OP_SHLQ:
1111
1112 case CC_OP_SARB:
1113 case CC_OP_SARW:
1114 case CC_OP_SARL:
1115 case CC_OP_SARQ:
1116 switch(jcc_op) {
1117 case JCC_Z:
1118 size = (cc_op - CC_OP_ADDB) & 3;
1119 goto fast_jcc_z;
1120 case JCC_S:
1121 size = (cc_op - CC_OP_ADDB) & 3;
1122 goto fast_jcc_s;
1123 default:
1124 goto slow_jcc;
1125 }
1126 break;
1127 default:
1128 slow_jcc:
1129 gen_setcc_slow_T0(s, jcc_op);
1130 tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE,
1131 cpu_T[0], 0, l1);
1132 break;
1133 }
1134 }
1135
1136 /* XXX: does not work with gdbstub "ice" single step - not a
1137 serious problem */
1138 static int gen_jz_ecx_string(DisasContext *s, target_ulong next_eip)
1139 {
1140 int l1, l2;
1141
1142 l1 = gen_new_label();
1143 l2 = gen_new_label();
1144 gen_op_jnz_ecx(s->aflag, l1);
1145 gen_set_label(l2);
1146 gen_jmp_tb(s, next_eip, 1);
1147 gen_set_label(l1);
1148 return l2;
1149 }
1150
1151 static inline void gen_stos(DisasContext *s, int ot)
1152 {
1153 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
1154 gen_string_movl_A0_EDI(s);
1155 gen_op_st_T0_A0(ot + s->mem_index);
1156 gen_op_movl_T0_Dshift(ot);
1157 gen_op_add_reg_T0(s->aflag, R_EDI);
1158 }
1159
1160 static inline void gen_lods(DisasContext *s, int ot)
1161 {
1162 gen_string_movl_A0_ESI(s);
1163 gen_op_ld_T0_A0(ot + s->mem_index);
1164 gen_op_mov_reg_T0(ot, R_EAX);
1165 gen_op_movl_T0_Dshift(ot);
1166 gen_op_add_reg_T0(s->aflag, R_ESI);
1167 }
1168
1169 static inline void gen_scas(DisasContext *s, int ot)
1170 {
1171 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
1172 gen_string_movl_A0_EDI(s);
1173 gen_op_ld_T1_A0(ot + s->mem_index);
1174 gen_op_cmpl_T0_T1_cc();
1175 gen_op_movl_T0_Dshift(ot);
1176 gen_op_add_reg_T0(s->aflag, R_EDI);
1177 }
1178
1179 static inline void gen_cmps(DisasContext *s, int ot)
1180 {
1181 gen_string_movl_A0_ESI(s);
1182 gen_op_ld_T0_A0(ot + s->mem_index);
1183 gen_string_movl_A0_EDI(s);
1184 gen_op_ld_T1_A0(ot + s->mem_index);
1185 gen_op_cmpl_T0_T1_cc();
1186 gen_op_movl_T0_Dshift(ot);
1187 gen_op_add_reg_T0(s->aflag, R_ESI);
1188 gen_op_add_reg_T0(s->aflag, R_EDI);
1189 }
1190
1191 static inline void gen_ins(DisasContext *s, int ot)
1192 {
1193 if (use_icount)
1194 gen_io_start();
1195 gen_string_movl_A0_EDI(s);
1196 /* Note: we must do this dummy write first to be restartable in
1197 case of page fault. */
1198 gen_op_movl_T0_0();
1199 gen_op_st_T0_A0(ot + s->mem_index);
1200 gen_op_mov_TN_reg(OT_WORD, 1, R_EDX);
1201 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[1]);
1202 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1203 gen_helper_in_func(ot, cpu_T[0], cpu_tmp2_i32);
1204 gen_op_st_T0_A0(ot + s->mem_index);
1205 gen_op_movl_T0_Dshift(ot);
1206 gen_op_add_reg_T0(s->aflag, R_EDI);
1207 if (use_icount)
1208 gen_io_end();
1209 }
1210
1211 static inline void gen_outs(DisasContext *s, int ot)
1212 {
1213 if (use_icount)
1214 gen_io_start();
1215 gen_string_movl_A0_ESI(s);
1216 gen_op_ld_T0_A0(ot + s->mem_index);
1217
1218 gen_op_mov_TN_reg(OT_WORD, 1, R_EDX);
1219 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[1]);
1220 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1221 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[0]);
1222 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
1223
1224 gen_op_movl_T0_Dshift(ot);
1225 gen_op_add_reg_T0(s->aflag, R_ESI);
1226 if (use_icount)
1227 gen_io_end();
1228 }
1229
1230 /* same method as Valgrind : we generate jumps to current or next
1231 instruction */
1232 #define GEN_REPZ(op) \
1233 static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1234 target_ulong cur_eip, target_ulong next_eip) \
1235 { \
1236 int l2;\
1237 gen_update_cc_op(s); \
1238 l2 = gen_jz_ecx_string(s, next_eip); \
1239 gen_ ## op(s, ot); \
1240 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1241 /* a loop would cause two single step exceptions if ECX = 1 \
1242 before rep string_insn */ \
1243 if (!s->jmp_opt) \
1244 gen_op_jz_ecx(s->aflag, l2); \
1245 gen_jmp(s, cur_eip); \
1246 }
1247
1248 #define GEN_REPZ2(op) \
1249 static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1250 target_ulong cur_eip, \
1251 target_ulong next_eip, \
1252 int nz) \
1253 { \
1254 int l2;\
1255 gen_update_cc_op(s); \
1256 l2 = gen_jz_ecx_string(s, next_eip); \
1257 gen_ ## op(s, ot); \
1258 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1259 gen_op_set_cc_op(CC_OP_SUBB + ot); \
1260 gen_jcc1(s, CC_OP_SUBB + ot, (JCC_Z << 1) | (nz ^ 1), l2); \
1261 if (!s->jmp_opt) \
1262 gen_op_jz_ecx(s->aflag, l2); \
1263 gen_jmp(s, cur_eip); \
1264 }
1265
1266 GEN_REPZ(movs)
1267 GEN_REPZ(stos)
1268 GEN_REPZ(lods)
1269 GEN_REPZ(ins)
1270 GEN_REPZ(outs)
1271 GEN_REPZ2(scas)
1272 GEN_REPZ2(cmps)
1273
1274 static void gen_helper_fp_arith_ST0_FT0(int op)
1275 {
1276 switch (op) {
1277 case 0:
1278 gen_helper_fadd_ST0_FT0(cpu_env);
1279 break;
1280 case 1:
1281 gen_helper_fmul_ST0_FT0(cpu_env);
1282 break;
1283 case 2:
1284 gen_helper_fcom_ST0_FT0(cpu_env);
1285 break;
1286 case 3:
1287 gen_helper_fcom_ST0_FT0(cpu_env);
1288 break;
1289 case 4:
1290 gen_helper_fsub_ST0_FT0(cpu_env);
1291 break;
1292 case 5:
1293 gen_helper_fsubr_ST0_FT0(cpu_env);
1294 break;
1295 case 6:
1296 gen_helper_fdiv_ST0_FT0(cpu_env);
1297 break;
1298 case 7:
1299 gen_helper_fdivr_ST0_FT0(cpu_env);
1300 break;
1301 }
1302 }
1303
1304 /* NOTE the exception in "r" op ordering */
1305 static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
1306 {
1307 TCGv_i32 tmp = tcg_const_i32(opreg);
1308 switch (op) {
1309 case 0:
1310 gen_helper_fadd_STN_ST0(cpu_env, tmp);
1311 break;
1312 case 1:
1313 gen_helper_fmul_STN_ST0(cpu_env, tmp);
1314 break;
1315 case 4:
1316 gen_helper_fsubr_STN_ST0(cpu_env, tmp);
1317 break;
1318 case 5:
1319 gen_helper_fsub_STN_ST0(cpu_env, tmp);
1320 break;
1321 case 6:
1322 gen_helper_fdivr_STN_ST0(cpu_env, tmp);
1323 break;
1324 case 7:
1325 gen_helper_fdiv_STN_ST0(cpu_env, tmp);
1326 break;
1327 }
1328 }
1329
1330 /* if d == OR_TMP0, it means memory operand (address in A0) */
1331 static void gen_op(DisasContext *s1, int op, int ot, int d)
1332 {
1333 if (d != OR_TMP0) {
1334 gen_op_mov_TN_reg(ot, 0, d);
1335 } else {
1336 gen_op_ld_T0_A0(ot + s1->mem_index);
1337 }
1338 switch(op) {
1339 case OP_ADCL:
1340 if (s1->cc_op != CC_OP_DYNAMIC)
1341 gen_op_set_cc_op(s1->cc_op);
1342 gen_compute_eflags_c(cpu_tmp4);
1343 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1344 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1345 if (d != OR_TMP0)
1346 gen_op_mov_reg_T0(ot, d);
1347 else
1348 gen_op_st_T0_A0(ot + s1->mem_index);
1349 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
1350 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1351 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp4);
1352 tcg_gen_shli_i32(cpu_tmp2_i32, cpu_tmp2_i32, 2);
1353 tcg_gen_addi_i32(cpu_cc_op, cpu_tmp2_i32, CC_OP_ADDB + ot);
1354 s1->cc_op = CC_OP_DYNAMIC;
1355 break;
1356 case OP_SBBL:
1357 if (s1->cc_op != CC_OP_DYNAMIC)
1358 gen_op_set_cc_op(s1->cc_op);
1359 gen_compute_eflags_c(cpu_tmp4);
1360 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1361 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1362 if (d != OR_TMP0)
1363 gen_op_mov_reg_T0(ot, d);
1364 else
1365 gen_op_st_T0_A0(ot + s1->mem_index);
1366 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
1367 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1368 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp4);
1369 tcg_gen_shli_i32(cpu_tmp2_i32, cpu_tmp2_i32, 2);
1370 tcg_gen_addi_i32(cpu_cc_op, cpu_tmp2_i32, CC_OP_SUBB + ot);
1371 s1->cc_op = CC_OP_DYNAMIC;
1372 break;
1373 case OP_ADDL:
1374 gen_op_addl_T0_T1();
1375 if (d != OR_TMP0)
1376 gen_op_mov_reg_T0(ot, d);
1377 else
1378 gen_op_st_T0_A0(ot + s1->mem_index);
1379 gen_op_update2_cc();
1380 s1->cc_op = CC_OP_ADDB + ot;
1381 break;
1382 case OP_SUBL:
1383 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1384 if (d != OR_TMP0)
1385 gen_op_mov_reg_T0(ot, d);
1386 else
1387 gen_op_st_T0_A0(ot + s1->mem_index);
1388 gen_op_update2_cc();
1389 s1->cc_op = CC_OP_SUBB + ot;
1390 break;
1391 default:
1392 case OP_ANDL:
1393 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1394 if (d != OR_TMP0)
1395 gen_op_mov_reg_T0(ot, d);
1396 else
1397 gen_op_st_T0_A0(ot + s1->mem_index);
1398 gen_op_update1_cc();
1399 s1->cc_op = CC_OP_LOGICB + ot;
1400 break;
1401 case OP_ORL:
1402 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1403 if (d != OR_TMP0)
1404 gen_op_mov_reg_T0(ot, d);
1405 else
1406 gen_op_st_T0_A0(ot + s1->mem_index);
1407 gen_op_update1_cc();
1408 s1->cc_op = CC_OP_LOGICB + ot;
1409 break;
1410 case OP_XORL:
1411 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1412 if (d != OR_TMP0)
1413 gen_op_mov_reg_T0(ot, d);
1414 else
1415 gen_op_st_T0_A0(ot + s1->mem_index);
1416 gen_op_update1_cc();
1417 s1->cc_op = CC_OP_LOGICB + ot;
1418 break;
1419 case OP_CMPL:
1420 gen_op_cmpl_T0_T1_cc();
1421 s1->cc_op = CC_OP_SUBB + ot;
1422 break;
1423 }
1424 }
1425
1426 /* if d == OR_TMP0, it means memory operand (address in A0) */
1427 static void gen_inc(DisasContext *s1, int ot, int d, int c)
1428 {
1429 if (d != OR_TMP0)
1430 gen_op_mov_TN_reg(ot, 0, d);
1431 else
1432 gen_op_ld_T0_A0(ot + s1->mem_index);
1433 if (s1->cc_op != CC_OP_DYNAMIC)
1434 gen_op_set_cc_op(s1->cc_op);
1435 if (c > 0) {
1436 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], 1);
1437 s1->cc_op = CC_OP_INCB + ot;
1438 } else {
1439 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], -1);
1440 s1->cc_op = CC_OP_DECB + ot;
1441 }
1442 if (d != OR_TMP0)
1443 gen_op_mov_reg_T0(ot, d);
1444 else
1445 gen_op_st_T0_A0(ot + s1->mem_index);
1446 gen_compute_eflags_c(cpu_cc_src);
1447 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1448 }
1449
1450 static void gen_shift_rm_T1(DisasContext *s, int ot, int op1,
1451 int is_right, int is_arith)
1452 {
1453 target_ulong mask;
1454 int shift_label;
1455 TCGv t0, t1, t2;
1456
1457 if (ot == OT_QUAD) {
1458 mask = 0x3f;
1459 } else {
1460 mask = 0x1f;
1461 }
1462
1463 /* load */
1464 if (op1 == OR_TMP0) {
1465 gen_op_ld_T0_A0(ot + s->mem_index);
1466 } else {
1467 gen_op_mov_TN_reg(ot, 0, op1);
1468 }
1469
1470 t0 = tcg_temp_local_new();
1471 t1 = tcg_temp_local_new();
1472 t2 = tcg_temp_local_new();
1473
1474 tcg_gen_andi_tl(t2, cpu_T[1], mask);
1475
1476 if (is_right) {
1477 if (is_arith) {
1478 gen_exts(ot, cpu_T[0]);
1479 tcg_gen_mov_tl(t0, cpu_T[0]);
1480 tcg_gen_sar_tl(cpu_T[0], cpu_T[0], t2);
1481 } else {
1482 gen_extu(ot, cpu_T[0]);
1483 tcg_gen_mov_tl(t0, cpu_T[0]);
1484 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], t2);
1485 }
1486 } else {
1487 tcg_gen_mov_tl(t0, cpu_T[0]);
1488 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], t2);
1489 }
1490
1491 /* store */
1492 if (op1 == OR_TMP0) {
1493 gen_op_st_T0_A0(ot + s->mem_index);
1494 } else {
1495 gen_op_mov_reg_T0(ot, op1);
1496 }
1497
1498 /* update eflags if non zero shift */
1499 if (s->cc_op != CC_OP_DYNAMIC) {
1500 gen_op_set_cc_op(s->cc_op);
1501 }
1502
1503 tcg_gen_mov_tl(t1, cpu_T[0]);
1504
1505 shift_label = gen_new_label();
1506 tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, shift_label);
1507
1508 tcg_gen_addi_tl(t2, t2, -1);
1509 tcg_gen_mov_tl(cpu_cc_dst, t1);
1510
1511 if (is_right) {
1512 if (is_arith) {
1513 tcg_gen_sar_tl(cpu_cc_src, t0, t2);
1514 } else {
1515 tcg_gen_shr_tl(cpu_cc_src, t0, t2);
1516 }
1517 } else {
1518 tcg_gen_shl_tl(cpu_cc_src, t0, t2);
1519 }
1520
1521 if (is_right) {
1522 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SARB + ot);
1523 } else {
1524 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SHLB + ot);
1525 }
1526
1527 gen_set_label(shift_label);
1528 s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
1529
1530 tcg_temp_free(t0);
1531 tcg_temp_free(t1);
1532 tcg_temp_free(t2);
1533 }
1534
1535 static void gen_shift_rm_im(DisasContext *s, int ot, int op1, int op2,
1536 int is_right, int is_arith)
1537 {
1538 int mask;
1539
1540 if (ot == OT_QUAD)
1541 mask = 0x3f;
1542 else
1543 mask = 0x1f;
1544
1545 /* load */
1546 if (op1 == OR_TMP0)
1547 gen_op_ld_T0_A0(ot + s->mem_index);
1548 else
1549 gen_op_mov_TN_reg(ot, 0, op1);
1550
1551 op2 &= mask;
1552 if (op2 != 0) {
1553 if (is_right) {
1554 if (is_arith) {
1555 gen_exts(ot, cpu_T[0]);
1556 tcg_gen_sari_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1557 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], op2);
1558 } else {
1559 gen_extu(ot, cpu_T[0]);
1560 tcg_gen_shri_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1561 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], op2);
1562 }
1563 } else {
1564 tcg_gen_shli_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1565 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], op2);
1566 }
1567 }
1568
1569 /* store */
1570 if (op1 == OR_TMP0)
1571 gen_op_st_T0_A0(ot + s->mem_index);
1572 else
1573 gen_op_mov_reg_T0(ot, op1);
1574
1575 /* update eflags if non zero shift */
1576 if (op2 != 0) {
1577 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
1578 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1579 if (is_right)
1580 s->cc_op = CC_OP_SARB + ot;
1581 else
1582 s->cc_op = CC_OP_SHLB + ot;
1583 }
1584 }
1585
1586 static inline void tcg_gen_lshift(TCGv ret, TCGv arg1, target_long arg2)
1587 {
1588 if (arg2 >= 0)
1589 tcg_gen_shli_tl(ret, arg1, arg2);
1590 else
1591 tcg_gen_shri_tl(ret, arg1, -arg2);
1592 }
1593
1594 static void gen_rot_rm_T1(DisasContext *s, int ot, int op1,
1595 int is_right)
1596 {
1597 target_ulong mask;
1598 int label1, label2, data_bits;
1599 TCGv t0, t1, t2, a0;
1600
1601 /* XXX: inefficient, but we must use local temps */
1602 t0 = tcg_temp_local_new();
1603 t1 = tcg_temp_local_new();
1604 t2 = tcg_temp_local_new();
1605 a0 = tcg_temp_local_new();
1606
1607 if (ot == OT_QUAD)
1608 mask = 0x3f;
1609 else
1610 mask = 0x1f;
1611
1612 /* load */
1613 if (op1 == OR_TMP0) {
1614 tcg_gen_mov_tl(a0, cpu_A0);
1615 gen_op_ld_v(ot + s->mem_index, t0, a0);
1616 } else {
1617 gen_op_mov_v_reg(ot, t0, op1);
1618 }
1619
1620 tcg_gen_mov_tl(t1, cpu_T[1]);
1621
1622 tcg_gen_andi_tl(t1, t1, mask);
1623
1624 /* Must test zero case to avoid using undefined behaviour in TCG
1625 shifts. */
1626 label1 = gen_new_label();
1627 tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, label1);
1628
1629 if (ot <= OT_WORD)
1630 tcg_gen_andi_tl(cpu_tmp0, t1, (1 << (3 + ot)) - 1);
1631 else
1632 tcg_gen_mov_tl(cpu_tmp0, t1);
1633
1634 gen_extu(ot, t0);
1635 tcg_gen_mov_tl(t2, t0);
1636
1637 data_bits = 8 << ot;
1638 /* XXX: rely on behaviour of shifts when operand 2 overflows (XXX:
1639 fix TCG definition) */
1640 if (is_right) {
1641 tcg_gen_shr_tl(cpu_tmp4, t0, cpu_tmp0);
1642 tcg_gen_subfi_tl(cpu_tmp0, data_bits, cpu_tmp0);
1643 tcg_gen_shl_tl(t0, t0, cpu_tmp0);
1644 } else {
1645 tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp0);
1646 tcg_gen_subfi_tl(cpu_tmp0, data_bits, cpu_tmp0);
1647 tcg_gen_shr_tl(t0, t0, cpu_tmp0);
1648 }
1649 tcg_gen_or_tl(t0, t0, cpu_tmp4);
1650
1651 gen_set_label(label1);
1652 /* store */
1653 if (op1 == OR_TMP0) {
1654 gen_op_st_v(ot + s->mem_index, t0, a0);
1655 } else {
1656 gen_op_mov_reg_v(ot, op1, t0);
1657 }
1658
1659 /* update eflags */
1660 if (s->cc_op != CC_OP_DYNAMIC)
1661 gen_op_set_cc_op(s->cc_op);
1662
1663 label2 = gen_new_label();
1664 tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, label2);
1665
1666 gen_compute_eflags(cpu_cc_src);
1667 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~(CC_O | CC_C));
1668 tcg_gen_xor_tl(cpu_tmp0, t2, t0);
1669 tcg_gen_lshift(cpu_tmp0, cpu_tmp0, 11 - (data_bits - 1));
1670 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, CC_O);
1671 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_tmp0);
1672 if (is_right) {
1673 tcg_gen_shri_tl(t0, t0, data_bits - 1);
1674 }
1675 tcg_gen_andi_tl(t0, t0, CC_C);
1676 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
1677
1678 tcg_gen_discard_tl(cpu_cc_dst);
1679 tcg_gen_movi_i32(cpu_cc_op, CC_OP_EFLAGS);
1680
1681 gen_set_label(label2);
1682 s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
1683
1684 tcg_temp_free(t0);
1685 tcg_temp_free(t1);
1686 tcg_temp_free(t2);
1687 tcg_temp_free(a0);
1688 }
1689
1690 static void gen_rot_rm_im(DisasContext *s, int ot, int op1, int op2,
1691 int is_right)
1692 {
1693 int mask;
1694 int data_bits;
1695 TCGv t0, t1, a0;
1696
1697 /* XXX: inefficient, but we must use local temps */
1698 t0 = tcg_temp_local_new();
1699 t1 = tcg_temp_local_new();
1700 a0 = tcg_temp_local_new();
1701
1702 if (ot == OT_QUAD)
1703 mask = 0x3f;
1704 else
1705 mask = 0x1f;
1706
1707 /* load */
1708 if (op1 == OR_TMP0) {
1709 tcg_gen_mov_tl(a0, cpu_A0);
1710 gen_op_ld_v(ot + s->mem_index, t0, a0);
1711 } else {
1712 gen_op_mov_v_reg(ot, t0, op1);
1713 }
1714
1715 gen_extu(ot, t0);
1716 tcg_gen_mov_tl(t1, t0);
1717
1718 op2 &= mask;
1719 data_bits = 8 << ot;
1720 if (op2 != 0) {
1721 int shift = op2 & ((1 << (3 + ot)) - 1);
1722 if (is_right) {
1723 tcg_gen_shri_tl(cpu_tmp4, t0, shift);
1724 tcg_gen_shli_tl(t0, t0, data_bits - shift);
1725 }
1726 else {
1727 tcg_gen_shli_tl(cpu_tmp4, t0, shift);
1728 tcg_gen_shri_tl(t0, t0, data_bits - shift);
1729 }
1730 tcg_gen_or_tl(t0, t0, cpu_tmp4);
1731 }
1732
1733 /* store */
1734 if (op1 == OR_TMP0) {
1735 gen_op_st_v(ot + s->mem_index, t0, a0);
1736 } else {
1737 gen_op_mov_reg_v(ot, op1, t0);
1738 }
1739
1740 if (op2 != 0) {
1741 /* update eflags */
1742 if (s->cc_op != CC_OP_DYNAMIC)
1743 gen_op_set_cc_op(s->cc_op);
1744
1745 gen_compute_eflags(cpu_cc_src);
1746 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~(CC_O | CC_C));
1747 tcg_gen_xor_tl(cpu_tmp0, t1, t0);
1748 tcg_gen_lshift(cpu_tmp0, cpu_tmp0, 11 - (data_bits - 1));
1749 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, CC_O);
1750 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_tmp0);
1751 if (is_right) {
1752 tcg_gen_shri_tl(t0, t0, data_bits - 1);
1753 }
1754 tcg_gen_andi_tl(t0, t0, CC_C);
1755 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
1756
1757 tcg_gen_discard_tl(cpu_cc_dst);
1758 tcg_gen_movi_i32(cpu_cc_op, CC_OP_EFLAGS);
1759 s->cc_op = CC_OP_EFLAGS;
1760 }
1761
1762 tcg_temp_free(t0);
1763 tcg_temp_free(t1);
1764 tcg_temp_free(a0);
1765 }
1766
1767 /* XXX: add faster immediate = 1 case */
1768 static void gen_rotc_rm_T1(DisasContext *s, int ot, int op1,
1769 int is_right)
1770 {
1771 int label1;
1772
1773 if (s->cc_op != CC_OP_DYNAMIC)
1774 gen_op_set_cc_op(s->cc_op);
1775
1776 /* load */
1777 if (op1 == OR_TMP0)
1778 gen_op_ld_T0_A0(ot + s->mem_index);
1779 else
1780 gen_op_mov_TN_reg(ot, 0, op1);
1781
1782 if (is_right) {
1783 switch (ot) {
1784 case 0:
1785 gen_helper_rcrb(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1786 break;
1787 case 1:
1788 gen_helper_rcrw(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1789 break;
1790 case 2:
1791 gen_helper_rcrl(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1792 break;
1793 #ifdef TARGET_X86_64
1794 case 3:
1795 gen_helper_rcrq(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1796 break;
1797 #endif
1798 }
1799 } else {
1800 switch (ot) {
1801 case 0:
1802 gen_helper_rclb(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1803 break;
1804 case 1:
1805 gen_helper_rclw(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1806 break;
1807 case 2:
1808 gen_helper_rcll(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1809 break;
1810 #ifdef TARGET_X86_64
1811 case 3:
1812 gen_helper_rclq(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1813 break;
1814 #endif
1815 }
1816 }
1817 /* store */
1818 if (op1 == OR_TMP0)
1819 gen_op_st_T0_A0(ot + s->mem_index);
1820 else
1821 gen_op_mov_reg_T0(ot, op1);
1822
1823 /* update eflags */
1824 label1 = gen_new_label();
1825 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_cc_tmp, -1, label1);
1826
1827 tcg_gen_mov_tl(cpu_cc_src, cpu_cc_tmp);
1828 tcg_gen_discard_tl(cpu_cc_dst);
1829 tcg_gen_movi_i32(cpu_cc_op, CC_OP_EFLAGS);
1830
1831 gen_set_label(label1);
1832 s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
1833 }
1834
1835 /* XXX: add faster immediate case */
1836 static void gen_shiftd_rm_T1_T3(DisasContext *s, int ot, int op1,
1837 int is_right)
1838 {
1839 int label1, label2, data_bits;
1840 target_ulong mask;
1841 TCGv t0, t1, t2, a0;
1842
1843 t0 = tcg_temp_local_new();
1844 t1 = tcg_temp_local_new();
1845 t2 = tcg_temp_local_new();
1846 a0 = tcg_temp_local_new();
1847
1848 if (ot == OT_QUAD)
1849 mask = 0x3f;
1850 else
1851 mask = 0x1f;
1852
1853 /* load */
1854 if (op1 == OR_TMP0) {
1855 tcg_gen_mov_tl(a0, cpu_A0);
1856 gen_op_ld_v(ot + s->mem_index, t0, a0);
1857 } else {
1858 gen_op_mov_v_reg(ot, t0, op1);
1859 }
1860
1861 tcg_gen_andi_tl(cpu_T3, cpu_T3, mask);
1862
1863 tcg_gen_mov_tl(t1, cpu_T[1]);
1864 tcg_gen_mov_tl(t2, cpu_T3);
1865
1866 /* Must test zero case to avoid using undefined behaviour in TCG
1867 shifts. */
1868 label1 = gen_new_label();
1869 tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label1);
1870
1871 tcg_gen_addi_tl(cpu_tmp5, t2, -1);
1872 if (ot == OT_WORD) {
1873 /* Note: we implement the Intel behaviour for shift count > 16 */
1874 if (is_right) {
1875 tcg_gen_andi_tl(t0, t0, 0xffff);
1876 tcg_gen_shli_tl(cpu_tmp0, t1, 16);
1877 tcg_gen_or_tl(t0, t0, cpu_tmp0);
1878 tcg_gen_ext32u_tl(t0, t0);
1879
1880 tcg_gen_shr_tl(cpu_tmp4, t0, cpu_tmp5);
1881
1882 /* only needed if count > 16, but a test would complicate */
1883 tcg_gen_subfi_tl(cpu_tmp5, 32, t2);
1884 tcg_gen_shl_tl(cpu_tmp0, t0, cpu_tmp5);
1885
1886 tcg_gen_shr_tl(t0, t0, t2);
1887
1888 tcg_gen_or_tl(t0, t0, cpu_tmp0);
1889 } else {
1890 /* XXX: not optimal */
1891 tcg_gen_andi_tl(t0, t0, 0xffff);
1892 tcg_gen_shli_tl(t1, t1, 16);
1893 tcg_gen_or_tl(t1, t1, t0);
1894 tcg_gen_ext32u_tl(t1, t1);
1895
1896 tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp5);
1897 tcg_gen_subfi_tl(cpu_tmp0, 32, cpu_tmp5);
1898 tcg_gen_shr_tl(cpu_tmp5, t1, cpu_tmp0);
1899 tcg_gen_or_tl(cpu_tmp4, cpu_tmp4, cpu_tmp5);
1900
1901 tcg_gen_shl_tl(t0, t0, t2);
1902 tcg_gen_subfi_tl(cpu_tmp5, 32, t2);
1903 tcg_gen_shr_tl(t1, t1, cpu_tmp5);
1904 tcg_gen_or_tl(t0, t0, t1);
1905 }
1906 } else {
1907 data_bits = 8 << ot;
1908 if (is_right) {
1909 if (ot == OT_LONG)
1910 tcg_gen_ext32u_tl(t0, t0);
1911
1912 tcg_gen_shr_tl(cpu_tmp4, t0, cpu_tmp5);
1913
1914 tcg_gen_shr_tl(t0, t0, t2);
1915 tcg_gen_subfi_tl(cpu_tmp5, data_bits, t2);
1916 tcg_gen_shl_tl(t1, t1, cpu_tmp5);
1917 tcg_gen_or_tl(t0, t0, t1);
1918
1919 } else {
1920 if (ot == OT_LONG)
1921 tcg_gen_ext32u_tl(t1, t1);
1922
1923 tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp5);
1924
1925 tcg_gen_shl_tl(t0, t0, t2);
1926 tcg_gen_subfi_tl(cpu_tmp5, data_bits, t2);
1927 tcg_gen_shr_tl(t1, t1, cpu_tmp5);
1928 tcg_gen_or_tl(t0, t0, t1);
1929 }
1930 }
1931 tcg_gen_mov_tl(t1, cpu_tmp4);
1932
1933 gen_set_label(label1);
1934 /* store */
1935 if (op1 == OR_TMP0) {
1936 gen_op_st_v(ot + s->mem_index, t0, a0);
1937 } else {
1938 gen_op_mov_reg_v(ot, op1, t0);
1939 }
1940
1941 /* update eflags */
1942 if (s->cc_op != CC_OP_DYNAMIC)
1943 gen_op_set_cc_op(s->cc_op);
1944
1945 label2 = gen_new_label();
1946 tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label2);
1947
1948 tcg_gen_mov_tl(cpu_cc_src, t1);
1949 tcg_gen_mov_tl(cpu_cc_dst, t0);
1950 if (is_right) {
1951 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SARB + ot);
1952 } else {
1953 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SHLB + ot);
1954 }
1955 gen_set_label(label2);
1956 s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
1957
1958 tcg_temp_free(t0);
1959 tcg_temp_free(t1);
1960 tcg_temp_free(t2);
1961 tcg_temp_free(a0);
1962 }
1963
1964 static void gen_shift(DisasContext *s1, int op, int ot, int d, int s)
1965 {
1966 if (s != OR_TMP1)
1967 gen_op_mov_TN_reg(ot, 1, s);
1968 switch(op) {
1969 case OP_ROL:
1970 gen_rot_rm_T1(s1, ot, d, 0);
1971 break;
1972 case OP_ROR:
1973 gen_rot_rm_T1(s1, ot, d, 1);
1974 break;
1975 case OP_SHL:
1976 case OP_SHL1:
1977 gen_shift_rm_T1(s1, ot, d, 0, 0);
1978 break;
1979 case OP_SHR:
1980 gen_shift_rm_T1(s1, ot, d, 1, 0);
1981 break;
1982 case OP_SAR:
1983 gen_shift_rm_T1(s1, ot, d, 1, 1);
1984 break;
1985 case OP_RCL:
1986 gen_rotc_rm_T1(s1, ot, d, 0);
1987 break;
1988 case OP_RCR:
1989 gen_rotc_rm_T1(s1, ot, d, 1);
1990 break;
1991 }
1992 }
1993
1994 static void gen_shifti(DisasContext *s1, int op, int ot, int d, int c)
1995 {
1996 switch(op) {
1997 case OP_ROL:
1998 gen_rot_rm_im(s1, ot, d, c, 0);
1999 break;
2000 case OP_ROR:
2001 gen_rot_rm_im(s1, ot, d, c, 1);
2002 break;
2003 case OP_SHL:
2004 case OP_SHL1:
2005 gen_shift_rm_im(s1, ot, d, c, 0, 0);
2006 break;
2007 case OP_SHR:
2008 gen_shift_rm_im(s1, ot, d, c, 1, 0);
2009 break;
2010 case OP_SAR:
2011 gen_shift_rm_im(s1, ot, d, c, 1, 1);
2012 break;
2013 default:
2014 /* currently not optimized */
2015 gen_op_movl_T1_im(c);
2016 gen_shift(s1, op, ot, d, OR_TMP1);
2017 break;
2018 }
2019 }
2020
2021 static void gen_lea_modrm(DisasContext *s, int modrm, int *reg_ptr, int *offset_ptr)
2022 {
2023 target_long disp;
2024 int havesib;
2025 int base;
2026 int index;
2027 int scale;
2028 int opreg;
2029 int mod, rm, code, override, must_add_seg;
2030
2031 override = s->override;
2032 must_add_seg = s->addseg;
2033 if (override >= 0)
2034 must_add_seg = 1;
2035 mod = (modrm >> 6) & 3;
2036 rm = modrm & 7;
2037
2038 if (s->aflag) {
2039
2040 havesib = 0;
2041 base = rm;
2042 index = 0;
2043 scale = 0;
2044
2045 if (base == 4) {
2046 havesib = 1;
2047 code = cpu_ldub_code(cpu_single_env, s->pc++);
2048 scale = (code >> 6) & 3;
2049 index = ((code >> 3) & 7) | REX_X(s);
2050 base = (code & 7);
2051 }
2052 base |= REX_B(s);
2053
2054 switch (mod) {
2055 case 0:
2056 if ((base & 7) == 5) {
2057 base = -1;
2058 disp = (int32_t)cpu_ldl_code(cpu_single_env, s->pc);
2059 s->pc += 4;
2060 if (CODE64(s) && !havesib) {
2061 disp += s->pc + s->rip_offset;
2062 }
2063 } else {
2064 disp = 0;
2065 }
2066 break;
2067 case 1:
2068 disp = (int8_t)cpu_ldub_code(cpu_single_env, s->pc++);
2069 break;
2070 default:
2071 case 2:
2072 disp = (int32_t)cpu_ldl_code(cpu_single_env, s->pc);
2073 s->pc += 4;
2074 break;
2075 }
2076
2077 if (base >= 0) {
2078 /* for correct popl handling with esp */
2079 if (base == 4 && s->popl_esp_hack)
2080 disp += s->popl_esp_hack;
2081 #ifdef TARGET_X86_64
2082 if (s->aflag == 2) {
2083 gen_op_movq_A0_reg(base);
2084 if (disp != 0) {
2085 gen_op_addq_A0_im(disp);
2086 }
2087 } else
2088 #endif
2089 {
2090 gen_op_movl_A0_reg(base);
2091 if (disp != 0)
2092 gen_op_addl_A0_im(disp);
2093 }
2094 } else {
2095 #ifdef TARGET_X86_64
2096 if (s->aflag == 2) {
2097 gen_op_movq_A0_im(disp);
2098 } else
2099 #endif
2100 {
2101 gen_op_movl_A0_im(disp);
2102 }
2103 }
2104 /* index == 4 means no index */
2105 if (havesib && (index != 4)) {
2106 #ifdef TARGET_X86_64
2107 if (s->aflag == 2) {
2108 gen_op_addq_A0_reg_sN(scale, index);
2109 } else
2110 #endif
2111 {
2112 gen_op_addl_A0_reg_sN(scale, index);
2113 }
2114 }
2115 if (must_add_seg) {
2116 if (override < 0) {
2117 if (base == R_EBP || base == R_ESP)
2118 override = R_SS;
2119 else
2120 override = R_DS;
2121 }
2122 #ifdef TARGET_X86_64
2123 if (s->aflag == 2) {
2124 gen_op_addq_A0_seg(override);
2125 } else
2126 #endif
2127 {
2128 gen_op_addl_A0_seg(s, override);
2129 }
2130 }
2131 } else {
2132 switch (mod) {
2133 case 0:
2134 if (rm == 6) {
2135 disp = cpu_lduw_code(cpu_single_env, s->pc);
2136 s->pc += 2;
2137 gen_op_movl_A0_im(disp);
2138 rm = 0; /* avoid SS override */
2139 goto no_rm;
2140 } else {
2141 disp = 0;
2142 }
2143 break;
2144 case 1:
2145 disp = (int8_t)cpu_ldub_code(cpu_single_env, s->pc++);
2146 break;
2147 default:
2148 case 2:
2149 disp = cpu_lduw_code(cpu_single_env, s->pc);
2150 s->pc += 2;
2151 break;
2152 }
2153 switch(rm) {
2154 case 0:
2155 gen_op_movl_A0_reg(R_EBX);
2156 gen_op_addl_A0_reg_sN(0, R_ESI);
2157 break;
2158 case 1:
2159 gen_op_movl_A0_reg(R_EBX);
2160 gen_op_addl_A0_reg_sN(0, R_EDI);
2161 break;
2162 case 2:
2163 gen_op_movl_A0_reg(R_EBP);
2164 gen_op_addl_A0_reg_sN(0, R_ESI);
2165 break;
2166 case 3:
2167 gen_op_movl_A0_reg(R_EBP);
2168 gen_op_addl_A0_reg_sN(0, R_EDI);
2169 break;
2170 case 4:
2171 gen_op_movl_A0_reg(R_ESI);
2172 break;
2173 case 5:
2174 gen_op_movl_A0_reg(R_EDI);
2175 break;
2176 case 6:
2177 gen_op_movl_A0_reg(R_EBP);
2178 break;
2179 default:
2180 case 7:
2181 gen_op_movl_A0_reg(R_EBX);
2182 break;
2183 }
2184 if (disp != 0)
2185 gen_op_addl_A0_im(disp);
2186 gen_op_andl_A0_ffff();
2187 no_rm:
2188 if (must_add_seg) {
2189 if (override < 0) {
2190 if (rm == 2 || rm == 3 || rm == 6)
2191 override = R_SS;
2192 else
2193 override = R_DS;
2194 }
2195 gen_op_addl_A0_seg(s, override);
2196 }
2197 }
2198
2199 opreg = OR_A0;
2200 disp = 0;
2201 *reg_ptr = opreg;
2202 *offset_ptr = disp;
2203 }
2204
2205 static void gen_nop_modrm(DisasContext *s, int modrm)
2206 {
2207 int mod, rm, base, code;
2208
2209 mod = (modrm >> 6) & 3;
2210 if (mod == 3)
2211 return;
2212 rm = modrm & 7;
2213
2214 if (s->aflag) {
2215
2216 base = rm;
2217
2218 if (base == 4) {
2219 code = cpu_ldub_code(cpu_single_env, s->pc++);
2220 base = (code & 7);
2221 }
2222
2223 switch (mod) {
2224 case 0:
2225 if (base == 5) {
2226 s->pc += 4;
2227 }
2228 break;
2229 case 1:
2230 s->pc++;
2231 break;
2232 default:
2233 case 2:
2234 s->pc += 4;
2235 break;
2236 }
2237 } else {
2238 switch (mod) {
2239 case 0:
2240 if (rm == 6) {
2241 s->pc += 2;
2242 }
2243 break;
2244 case 1:
2245 s->pc++;
2246 break;
2247 default:
2248 case 2:
2249 s->pc += 2;
2250 break;
2251 }
2252 }
2253 }
2254
2255 /* used for LEA and MOV AX, mem */
2256 static void gen_add_A0_ds_seg(DisasContext *s)
2257 {
2258 int override, must_add_seg;
2259 must_add_seg = s->addseg;
2260 override = R_DS;
2261 if (s->override >= 0) {
2262 override = s->override;
2263 must_add_seg = 1;
2264 }
2265 if (must_add_seg) {
2266 #ifdef TARGET_X86_64
2267 if (CODE64(s)) {
2268 gen_op_addq_A0_seg(override);
2269 } else
2270 #endif
2271 {
2272 gen_op_addl_A0_seg(s, override);
2273 }
2274 }
2275 }
2276
2277 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2278 OR_TMP0 */
2279 static void gen_ldst_modrm(DisasContext *s, int modrm, int ot, int reg, int is_store)
2280 {
2281 int mod, rm, opreg, disp;
2282
2283 mod = (modrm >> 6) & 3;
2284 rm = (modrm & 7) | REX_B(s);
2285 if (mod == 3) {
2286 if (is_store) {
2287 if (reg != OR_TMP0)
2288 gen_op_mov_TN_reg(ot, 0, reg);
2289 gen_op_mov_reg_T0(ot, rm);
2290 } else {
2291 gen_op_mov_TN_reg(ot, 0, rm);
2292 if (reg != OR_TMP0)
2293 gen_op_mov_reg_T0(ot, reg);
2294 }
2295 } else {
2296 gen_lea_modrm(s, modrm, &opreg, &disp);
2297 if (is_store) {
2298 if (reg != OR_TMP0)
2299 gen_op_mov_TN_reg(ot, 0, reg);
2300 gen_op_st_T0_A0(ot + s->mem_index);
2301 } else {
2302 gen_op_ld_T0_A0(ot + s->mem_index);
2303 if (reg != OR_TMP0)
2304 gen_op_mov_reg_T0(ot, reg);
2305 }
2306 }
2307 }
2308
2309 static inline uint32_t insn_get(DisasContext *s, int ot)
2310 {
2311 uint32_t ret;
2312
2313 switch(ot) {
2314 case OT_BYTE:
2315 ret = cpu_ldub_code(cpu_single_env, s->pc);
2316 s->pc++;
2317 break;
2318 case OT_WORD:
2319 ret = cpu_lduw_code(cpu_single_env, s->pc);
2320 s->pc += 2;
2321 break;
2322 default:
2323 case OT_LONG:
2324 ret = cpu_ldl_code(cpu_single_env, s->pc);
2325 s->pc += 4;
2326 break;
2327 }
2328 return ret;
2329 }
2330
2331 static inline int insn_const_size(unsigned int ot)
2332 {
2333 if (ot <= OT_LONG)
2334 return 1 << ot;
2335 else
2336 return 4;
2337 }
2338
2339 static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong eip)
2340 {
2341 TranslationBlock *tb;
2342 target_ulong pc;
2343
2344 pc = s->cs_base + eip;
2345 tb = s->tb;
2346 /* NOTE: we handle the case where the TB spans two pages here */
2347 if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) ||
2348 (pc & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK)) {
2349 /* jump to same page: we can use a direct jump */
2350 tcg_gen_goto_tb(tb_num);
2351 gen_jmp_im(eip);
2352 tcg_gen_exit_tb((tcg_target_long)tb + tb_num);
2353 } else {
2354 /* jump to another page: currently not optimized */
2355 gen_jmp_im(eip);
2356 gen_eob(s);
2357 }
2358 }
2359
2360 static inline void gen_jcc(DisasContext *s, int b,
2361 target_ulong val, target_ulong next_eip)
2362 {
2363 int l1, l2, cc_op;
2364
2365 cc_op = s->cc_op;
2366 gen_update_cc_op(s);
2367 if (s->jmp_opt) {
2368 l1 = gen_new_label();
2369 gen_jcc1(s, cc_op, b, l1);
2370
2371 gen_goto_tb(s, 0, next_eip);
2372
2373 gen_set_label(l1);
2374 gen_goto_tb(s, 1, val);
2375 s->is_jmp = DISAS_TB_JUMP;
2376 } else {
2377
2378 l1 = gen_new_label();
2379 l2 = gen_new_label();
2380 gen_jcc1(s, cc_op, b, l1);
2381
2382 gen_jmp_im(next_eip);
2383 tcg_gen_br(l2);
2384
2385 gen_set_label(l1);
2386 gen_jmp_im(val);
2387 gen_set_label(l2);
2388 gen_eob(s);
2389 }
2390 }
2391
2392 static void gen_setcc(DisasContext *s, int b)
2393 {
2394 int inv, jcc_op, l1;
2395 TCGv t0;
2396
2397 if (is_fast_jcc_case(s, b)) {
2398 /* nominal case: we use a jump */
2399 /* XXX: make it faster by adding new instructions in TCG */
2400 t0 = tcg_temp_local_new();
2401 tcg_gen_movi_tl(t0, 0);
2402 l1 = gen_new_label();
2403 gen_jcc1(s, s->cc_op, b ^ 1, l1);
2404 tcg_gen_movi_tl(t0, 1);
2405 gen_set_label(l1);
2406 tcg_gen_mov_tl(cpu_T[0], t0);
2407 tcg_temp_free(t0);
2408 } else {
2409 /* slow case: it is more efficient not to generate a jump,
2410 although it is questionnable whether this optimization is
2411 worth to */
2412 inv = b & 1;
2413 jcc_op = (b >> 1) & 7;
2414 gen_setcc_slow_T0(s, jcc_op);
2415 if (inv) {
2416 tcg_gen_xori_tl(cpu_T[0], cpu_T[0], 1);
2417 }
2418 }
2419 }
2420
2421 static inline void gen_op_movl_T0_seg(int seg_reg)
2422 {
2423 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
2424 offsetof(CPUX86State,segs[seg_reg].selector));
2425 }
2426
2427 static inline void gen_op_movl_seg_T0_vm(int seg_reg)
2428 {
2429 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
2430 tcg_gen_st32_tl(cpu_T[0], cpu_env,
2431 offsetof(CPUX86State,segs[seg_reg].selector));
2432 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], 4);
2433 tcg_gen_st_tl(cpu_T[0], cpu_env,
2434 offsetof(CPUX86State,segs[seg_reg].base));
2435 }
2436
2437 /* move T0 to seg_reg and compute if the CPU state may change. Never
2438 call this function with seg_reg == R_CS */
2439 static void gen_movl_seg_T0(DisasContext *s, int seg_reg, target_ulong cur_eip)
2440 {
2441 if (s->pe && !s->vm86) {
2442 /* XXX: optimize by finding processor state dynamically */
2443 if (s->cc_op != CC_OP_DYNAMIC)
2444 gen_op_set_cc_op(s->cc_op);
2445 gen_jmp_im(cur_eip);
2446 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2447 gen_helper_load_seg(cpu_env, tcg_const_i32(seg_reg), cpu_tmp2_i32);
2448 /* abort translation because the addseg value may change or
2449 because ss32 may change. For R_SS, translation must always
2450 stop as a special handling must be done to disable hardware
2451 interrupts for the next instruction */
2452 if (seg_reg == R_SS || (s->code32 && seg_reg < R_FS))
2453 s->is_jmp = DISAS_TB_JUMP;
2454 } else {
2455 gen_op_movl_seg_T0_vm(seg_reg);
2456 if (seg_reg == R_SS)
2457 s->is_jmp = DISAS_TB_JUMP;
2458 }
2459 }
2460
2461 static inline int svm_is_rep(int prefixes)
2462 {
2463 return ((prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) ? 8 : 0);
2464 }
2465
2466 static inline void
2467 gen_svm_check_intercept_param(DisasContext *s, target_ulong pc_start,
2468 uint32_t type, uint64_t param)
2469 {
2470 /* no SVM activated; fast case */
2471 if (likely(!(s->flags & HF_SVMI_MASK)))
2472 return;
2473 if (s->cc_op != CC_OP_DYNAMIC)
2474 gen_op_set_cc_op(s->cc_op);
2475 gen_jmp_im(pc_start - s->cs_base);
2476 gen_helper_svm_check_intercept_param(cpu_env, tcg_const_i32(type),
2477 tcg_const_i64(param));
2478 }
2479
2480 static inline void
2481 gen_svm_check_intercept(DisasContext *s, target_ulong pc_start, uint64_t type)
2482 {
2483 gen_svm_check_intercept_param(s, pc_start, type, 0);
2484 }
2485
2486 static inline void gen_stack_update(DisasContext *s, int addend)
2487 {
2488 #ifdef TARGET_X86_64
2489 if (CODE64(s)) {
2490 gen_op_add_reg_im(2, R_ESP, addend);
2491 } else
2492 #endif
2493 if (s->ss32) {
2494 gen_op_add_reg_im(1, R_ESP, addend);
2495 } else {
2496 gen_op_add_reg_im(0, R_ESP, addend);
2497 }
2498 }
2499
2500 /* generate a push. It depends on ss32, addseg and dflag */
2501 static void gen_push_T0(DisasContext *s)
2502 {
2503 #ifdef TARGET_X86_64
2504 if (CODE64(s)) {
2505 gen_op_movq_A0_reg(R_ESP);
2506 if (s->dflag) {
2507 gen_op_addq_A0_im(-8);
2508 gen_op_st_T0_A0(OT_QUAD + s->mem_index);
2509 } else {
2510 gen_op_addq_A0_im(-2);
2511 gen_op_st_T0_A0(OT_WORD + s->mem_index);
2512 }
2513 gen_op_mov_reg_A0(2, R_ESP);
2514 } else
2515 #endif
2516 {
2517 gen_op_movl_A0_reg(R_ESP);
2518 if (!s->dflag)
2519 gen_op_addl_A0_im(-2);
2520 else
2521 gen_op_addl_A0_im(-4);
2522 if (s->ss32) {
2523 if (s->addseg) {
2524 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2525 gen_op_addl_A0_seg(s, R_SS);
2526 }
2527 } else {
2528 gen_op_andl_A0_ffff();
2529 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2530 gen_op_addl_A0_seg(s, R_SS);
2531 }
2532 gen_op_st_T0_A0(s->dflag + 1 + s->mem_index);
2533 if (s->ss32 && !s->addseg)
2534 gen_op_mov_reg_A0(1, R_ESP);
2535 else
2536 gen_op_mov_reg_T1(s->ss32 + 1, R_ESP);
2537 }
2538 }
2539
2540 /* generate a push. It depends on ss32, addseg and dflag */
2541 /* slower version for T1, only used for call Ev */
2542 static void gen_push_T1(DisasContext *s)
2543 {
2544 #ifdef TARGET_X86_64
2545 if (CODE64(s)) {
2546 gen_op_movq_A0_reg(R_ESP);
2547 if (s->dflag) {
2548 gen_op_addq_A0_im(-8);
2549 gen_op_st_T1_A0(OT_QUAD + s->mem_index);
2550 } else {
2551 gen_op_addq_A0_im(-2);
2552 gen_op_st_T0_A0(OT_WORD + s->mem_index);
2553 }
2554 gen_op_mov_reg_A0(2, R_ESP);
2555 } else
2556 #endif
2557 {
2558 gen_op_movl_A0_reg(R_ESP);
2559 if (!s->dflag)
2560 gen_op_addl_A0_im(-2);
2561 else
2562 gen_op_addl_A0_im(-4);
2563 if (s->ss32) {
2564 if (s->addseg) {
2565 gen_op_addl_A0_seg(s, R_SS);
2566 }
2567 } else {
2568 gen_op_andl_A0_ffff();
2569 gen_op_addl_A0_seg(s, R_SS);
2570 }
2571 gen_op_st_T1_A0(s->dflag + 1 + s->mem_index);
2572
2573 if (s->ss32 && !s->addseg)
2574 gen_op_mov_reg_A0(1, R_ESP);
2575 else
2576 gen_stack_update(s, (-2) << s->dflag);
2577 }
2578 }
2579
2580 /* two step pop is necessary for precise exceptions */
2581 static void gen_pop_T0(DisasContext *s)
2582 {
2583 #ifdef TARGET_X86_64
2584 if (CODE64(s)) {
2585 gen_op_movq_A0_reg(R_ESP);
2586 gen_op_ld_T0_A0((s->dflag ? OT_QUAD : OT_WORD) + s->mem_index);
2587 } else
2588 #endif
2589 {
2590 gen_op_movl_A0_reg(R_ESP);
2591 if (s->ss32) {
2592 if (s->addseg)
2593 gen_op_addl_A0_seg(s, R_SS);
2594 } else {
2595 gen_op_andl_A0_ffff();
2596 gen_op_addl_A0_seg(s, R_SS);
2597 }
2598 gen_op_ld_T0_A0(s->dflag + 1 + s->mem_index);
2599 }
2600 }
2601
2602 static void gen_pop_update(DisasContext *s)
2603 {
2604 #ifdef TARGET_X86_64
2605 if (CODE64(s) && s->dflag) {
2606 gen_stack_update(s, 8);
2607 } else
2608 #endif
2609 {
2610 gen_stack_update(s, 2 << s->dflag);
2611 }
2612 }
2613
2614 static void gen_stack_A0(DisasContext *s)
2615 {
2616 gen_op_movl_A0_reg(R_ESP);
2617 if (!s->ss32)
2618 gen_op_andl_A0_ffff();
2619 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2620 if (s->addseg)
2621 gen_op_addl_A0_seg(s, R_SS);
2622 }
2623
2624 /* NOTE: wrap around in 16 bit not fully handled */
2625 static void gen_pusha(DisasContext *s)
2626 {
2627 int i;
2628 gen_op_movl_A0_reg(R_ESP);
2629 gen_op_addl_A0_im(-16 << s->dflag);
2630 if (!s->ss32)
2631 gen_op_andl_A0_ffff();
2632 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2633 if (s->addseg)
2634 gen_op_addl_A0_seg(s, R_SS);
2635 for(i = 0;i < 8; i++) {
2636 gen_op_mov_TN_reg(OT_LONG, 0, 7 - i);
2637 gen_op_st_T0_A0(OT_WORD + s->dflag + s->mem_index);
2638 gen_op_addl_A0_im(2 << s->dflag);
2639 }
2640 gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2641 }
2642
2643 /* NOTE: wrap around in 16 bit not fully handled */
2644 static void gen_popa(DisasContext *s)
2645 {
2646 int i;
2647 gen_op_movl_A0_reg(R_ESP);
2648 if (!s->ss32)
2649 gen_op_andl_A0_ffff();
2650 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2651 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], 16 << s->dflag);
2652 if (s->addseg)
2653 gen_op_addl_A0_seg(s, R_SS);
2654 for(i = 0;i < 8; i++) {
2655 /* ESP is not reloaded */
2656 if (i != 3) {
2657 gen_op_ld_T0_A0(OT_WORD + s->dflag + s->mem_index);
2658 gen_op_mov_reg_T0(OT_WORD + s->dflag, 7 - i);
2659 }
2660 gen_op_addl_A0_im(2 << s->dflag);
2661 }
2662 gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2663 }
2664
2665 static void gen_enter(DisasContext *s, int esp_addend, int level)
2666 {
2667 int ot, opsize;
2668
2669 level &= 0x1f;
2670 #ifdef TARGET_X86_64
2671 if (CODE64(s)) {
2672 ot = s->dflag ? OT_QUAD : OT_WORD;
2673 opsize = 1 << ot;
2674
2675 gen_op_movl_A0_reg(R_ESP);
2676 gen_op_addq_A0_im(-opsize);
2677 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2678
2679 /* push bp */
2680 gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
2681 gen_op_st_T0_A0(ot + s->mem_index);
2682 if (level) {
2683 /* XXX: must save state */
2684 gen_helper_enter64_level(cpu_env, tcg_const_i32(level),
2685 tcg_const_i32((ot == OT_QUAD)),
2686 cpu_T[1]);
2687 }
2688 gen_op_mov_reg_T1(ot, R_EBP);
2689 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
2690 gen_op_mov_reg_T1(OT_QUAD, R_ESP);
2691 } else
2692 #endif
2693 {
2694 ot = s->dflag + OT_WORD;
2695 opsize = 2 << s->dflag;
2696
2697 gen_op_movl_A0_reg(R_ESP);
2698 gen_op_addl_A0_im(-opsize);
2699 if (!s->ss32)
2700 gen_op_andl_A0_ffff();
2701 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2702 if (s->addseg)
2703 gen_op_addl_A0_seg(s, R_SS);
2704 /* push bp */
2705 gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
2706 gen_op_st_T0_A0(ot + s->mem_index);
2707 if (level) {
2708 /* XXX: must save state */
2709 gen_helper_enter_level(cpu_env, tcg_const_i32(level),
2710 tcg_const_i32(s->dflag),
2711 cpu_T[1]);
2712 }
2713 gen_op_mov_reg_T1(ot, R_EBP);
2714 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
2715 gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2716 }
2717 }
2718
2719 static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip)
2720 {
2721 if (s->cc_op != CC_OP_DYNAMIC)
2722 gen_op_set_cc_op(s->cc_op);
2723 gen_jmp_im(cur_eip);
2724 gen_helper_raise_exception(cpu_env, tcg_const_i32(trapno));
2725 s->is_jmp = DISAS_TB_JUMP;
2726 }
2727
2728 /* an interrupt is different from an exception because of the
2729 privilege checks */
2730 static void gen_interrupt(DisasContext *s, int intno,
2731 target_ulong cur_eip, target_ulong next_eip)
2732 {
2733 if (s->cc_op != CC_OP_DYNAMIC)
2734 gen_op_set_cc_op(s->cc_op);
2735 gen_jmp_im(cur_eip);
2736 gen_helper_raise_interrupt(cpu_env, tcg_const_i32(intno),
2737 tcg_const_i32(next_eip - cur_eip));
2738 s->is_jmp = DISAS_TB_JUMP;
2739 }
2740
2741 static void gen_debug(DisasContext *s, target_ulong cur_eip)
2742 {
2743 if (s->cc_op != CC_OP_DYNAMIC)
2744 gen_op_set_cc_op(s->cc_op);
2745 gen_jmp_im(cur_eip);
2746 gen_helper_debug(cpu_env);
2747 s->is_jmp = DISAS_TB_JUMP;
2748 }
2749
2750 /* generate a generic end of block. Trace exception is also generated
2751 if needed */
2752 static void gen_eob(DisasContext *s)
2753 {
2754 if (s->cc_op != CC_OP_DYNAMIC)
2755 gen_op_set_cc_op(s->cc_op);
2756 if (s->tb->flags & HF_INHIBIT_IRQ_MASK) {
2757 gen_helper_reset_inhibit_irq(cpu_env);
2758 }
2759 if (s->tb->flags & HF_RF_MASK) {
2760 gen_helper_reset_rf(cpu_env);
2761 }
2762 if (s->singlestep_enabled) {
2763 gen_helper_debug(cpu_env);
2764 } else if (s->tf) {
2765 gen_helper_single_step(cpu_env);
2766 } else {
2767 tcg_gen_exit_tb(0);
2768 }
2769 s->is_jmp = DISAS_TB_JUMP;
2770 }
2771
2772 /* generate a jump to eip. No segment change must happen before as a
2773 direct call to the next block may occur */
2774 static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num)
2775 {
2776 if (s->jmp_opt) {
2777 gen_update_cc_op(s);
2778 gen_goto_tb(s, tb_num, eip);
2779 s->is_jmp = DISAS_TB_JUMP;
2780 } else {
2781 gen_jmp_im(eip);
2782 gen_eob(s);
2783 }
2784 }
2785
2786 static void gen_jmp(DisasContext *s, target_ulong eip)
2787 {
2788 gen_jmp_tb(s, eip, 0);
2789 }
2790
2791 static inline void gen_ldq_env_A0(int idx, int offset)
2792 {
2793 int mem_index = (idx >> 2) - 1;
2794 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, mem_index);
2795 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset);
2796 }
2797
2798 static inline void gen_stq_env_A0(int idx, int offset)
2799 {
2800 int mem_index = (idx >> 2) - 1;
2801 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset);
2802 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, mem_index);
2803 }
2804
2805 static inline void gen_ldo_env_A0(int idx, int offset)
2806 {
2807 int mem_index = (idx >> 2) - 1;
2808 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, mem_index);
2809 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
2810 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
2811 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_tmp0, mem_index);
2812 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
2813 }
2814
2815 static inline void gen_sto_env_A0(int idx, int offset)
2816 {
2817 int mem_index = (idx >> 2) - 1;
2818 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
2819 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, mem_index);
2820 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
2821 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
2822 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_tmp0, mem_index);
2823 }
2824
2825 static inline void gen_op_movo(int d_offset, int s_offset)
2826 {
2827 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2828 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2829 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + 8);
2830 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + 8);
2831 }
2832
2833 static inline void gen_op_movq(int d_offset, int s_offset)
2834 {
2835 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2836 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2837 }
2838
2839 static inline void gen_op_movl(int d_offset, int s_offset)
2840 {
2841 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env, s_offset);
2842 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, d_offset);
2843 }
2844
2845 static inline void gen_op_movq_env_0(int d_offset)
2846 {
2847 tcg_gen_movi_i64(cpu_tmp1_i64, 0);
2848 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2849 }
2850
2851 typedef void (*SSEFunc_i_ep)(TCGv_i32 val, TCGv_ptr env, TCGv_ptr reg);
2852 typedef void (*SSEFunc_l_ep)(TCGv_i64 val, TCGv_ptr env, TCGv_ptr reg);
2853 typedef void (*SSEFunc_0_epi)(TCGv_ptr env, TCGv_ptr reg, TCGv_i32 val);
2854 typedef void (*SSEFunc_0_epl)(TCGv_ptr env, TCGv_ptr reg, TCGv_i64 val);
2855 typedef void (*SSEFunc_0_epp)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b);
2856 typedef void (*SSEFunc_0_eppi)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
2857 TCGv_i32 val);
2858 typedef void (*SSEFunc_0_ppi)(TCGv_ptr reg_a, TCGv_ptr reg_b, TCGv_i32 val);
2859 typedef void (*SSEFunc_0_eppt)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
2860 TCGv val);
2861
2862 #define SSE_SPECIAL ((void *)1)
2863 #define SSE_DUMMY ((void *)2)
2864
2865 #define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
2866 #define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
2867 gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
2868
2869 static const SSEFunc_0_epp sse_op_table1[256][4] = {
2870 /* 3DNow! extensions */
2871 [0x0e] = { SSE_DUMMY }, /* femms */
2872 [0x0f] = { SSE_DUMMY }, /* pf... */
2873 /* pure SSE operations */
2874 [0x10] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2875 [0x11] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2876 [0x12] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd, movsldup, movddup */
2877 [0x13] = { SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd */
2878 [0x14] = { gen_helper_punpckldq_xmm, gen_helper_punpcklqdq_xmm },
2879 [0x15] = { gen_helper_punpckhdq_xmm, gen_helper_punpckhqdq_xmm },
2880 [0x16] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd, movshdup */
2881 [0x17] = { SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd */
2882
2883 [0x28] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2884 [0x29] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2885 [0x2a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
2886 [0x2b] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movntps, movntpd, movntss, movntsd */
2887 [0x2c] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
2888 [0x2d] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
2889 [0x2e] = { gen_helper_ucomiss, gen_helper_ucomisd },
2890 [0x2f] = { gen_helper_comiss, gen_helper_comisd },
2891 [0x50] = { SSE_SPECIAL, SSE_SPECIAL }, /* movmskps, movmskpd */
2892 [0x51] = SSE_FOP(sqrt),
2893 [0x52] = { gen_helper_rsqrtps, NULL, gen_helper_rsqrtss, NULL },
2894 [0x53] = { gen_helper_rcpps, NULL, gen_helper_rcpss, NULL },
2895 [0x54] = { gen_helper_pand_xmm, gen_helper_pand_xmm }, /* andps, andpd */
2896 [0x55] = { gen_helper_pandn_xmm, gen_helper_pandn_xmm }, /* andnps, andnpd */
2897 [0x56] = { gen_helper_por_xmm, gen_helper_por_xmm }, /* orps, orpd */
2898 [0x57] = { gen_helper_pxor_xmm, gen_helper_pxor_xmm }, /* xorps, xorpd */
2899 [0x58] = SSE_FOP(add),
2900 [0x59] = SSE_FOP(mul),
2901 [0x5a] = { gen_helper_cvtps2pd, gen_helper_cvtpd2ps,
2902 gen_helper_cvtss2sd, gen_helper_cvtsd2ss },
2903 [0x5b] = { gen_helper_cvtdq2ps, gen_helper_cvtps2dq, gen_helper_cvttps2dq },
2904 [0x5c] = SSE_FOP(sub),
2905 [0x5d] = SSE_FOP(min),
2906 [0x5e] = SSE_FOP(div),
2907 [0x5f] = SSE_FOP(max),
2908
2909 [0xc2] = SSE_FOP(cmpeq),
2910 [0xc6] = { (SSEFunc_0_epp)gen_helper_shufps,
2911 (SSEFunc_0_epp)gen_helper_shufpd }, /* XXX: casts */
2912
2913 [0x38] = { SSE_SPECIAL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* SSSE3/SSE4 */
2914 [0x3a] = { SSE_SPECIAL, SSE_SPECIAL }, /* SSSE3/SSE4 */
2915
2916 /* MMX ops and their SSE extensions */
2917 [0x60] = MMX_OP2(punpcklbw),
2918 [0x61] = MMX_OP2(punpcklwd),
2919 [0x62] = MMX_OP2(punpckldq),
2920 [0x63] = MMX_OP2(packsswb),
2921 [0x64] = MMX_OP2(pcmpgtb),
2922 [0x65] = MMX_OP2(pcmpgtw),
2923 [0x66] = MMX_OP2(pcmpgtl),
2924 [0x67] = MMX_OP2(packuswb),
2925 [0x68] = MMX_OP2(punpckhbw),
2926 [0x69] = MMX_OP2(punpckhwd),
2927 [0x6a] = MMX_OP2(punpckhdq),
2928 [0x6b] = MMX_OP2(packssdw),
2929 [0x6c] = { NULL, gen_helper_punpcklqdq_xmm },
2930 [0x6d] = { NULL, gen_helper_punpckhqdq_xmm },
2931 [0x6e] = { SSE_SPECIAL, SSE_SPECIAL }, /* movd mm, ea */
2932 [0x6f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, , movqdu */
2933 [0x70] = { (SSEFunc_0_epp)gen_helper_pshufw_mmx,
2934 (SSEFunc_0_epp)gen_helper_pshufd_xmm,
2935 (SSEFunc_0_epp)gen_helper_pshufhw_xmm,
2936 (SSEFunc_0_epp)gen_helper_pshuflw_xmm }, /* XXX: casts */
2937 [0x71] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftw */
2938 [0x72] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftd */
2939 [0x73] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftq */
2940 [0x74] = MMX_OP2(pcmpeqb),
2941 [0x75] = MMX_OP2(pcmpeqw),
2942 [0x76] = MMX_OP2(pcmpeql),
2943 [0x77] = { SSE_DUMMY }, /* emms */
2944 [0x78] = { NULL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* extrq_i, insertq_i */
2945 [0x79] = { NULL, gen_helper_extrq_r, NULL, gen_helper_insertq_r },
2946 [0x7c] = { NULL, gen_helper_haddpd, NULL, gen_helper_haddps },
2947 [0x7d] = { NULL, gen_helper_hsubpd, NULL, gen_helper_hsubps },
2948 [0x7e] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movd, movd, , movq */
2949 [0x7f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, movdqu */
2950 [0xc4] = { SSE_SPECIAL, SSE_SPECIAL }, /* pinsrw */
2951 [0xc5] = { SSE_SPECIAL, SSE_SPECIAL }, /* pextrw */
2952 [0xd0] = { NULL, gen_helper_addsubpd, NULL, gen_helper_addsubps },
2953 [0xd1] = MMX_OP2(psrlw),
2954 [0xd2] = MMX_OP2(psrld),
2955 [0xd3] = MMX_OP2(psrlq),
2956 [0xd4] = MMX_OP2(paddq),
2957 [0xd5] = MMX_OP2(pmullw),
2958 [0xd6] = { NULL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2959 [0xd7] = { SSE_SPECIAL, SSE_SPECIAL }, /* pmovmskb */
2960 [0xd8] = MMX_OP2(psubusb),
2961 [0xd9] = MMX_OP2(psubusw),
2962 [0xda] = MMX_OP2(pminub),
2963 [0xdb] = MMX_OP2(pand),
2964 [0xdc] = MMX_OP2(paddusb),
2965 [0xdd] = MMX_OP2(paddusw),
2966 [0xde] = MMX_OP2(pmaxub),
2967 [0xdf] = MMX_OP2(pandn),
2968 [0xe0] = MMX_OP2(pavgb),
2969 [0xe1] = MMX_OP2(psraw),
2970 [0xe2] = MMX_OP2(psrad),
2971 [0xe3] = MMX_OP2(pavgw),
2972 [0xe4] = MMX_OP2(pmulhuw),
2973 [0xe5] = MMX_OP2(pmulhw),
2974 [0xe6] = { NULL, gen_helper_cvttpd2dq, gen_helper_cvtdq2pd, gen_helper_cvtpd2dq },
2975 [0xe7] = { SSE_SPECIAL , SSE_SPECIAL }, /* movntq, movntq */
2976 [0xe8] = MMX_OP2(psubsb),
2977 [0xe9] = MMX_OP2(psubsw),
2978 [0xea] = MMX_OP2(pminsw),
2979 [0xeb] = MMX_OP2(por),
2980 [0xec] = MMX_OP2(paddsb),
2981 [0xed] = MMX_OP2(paddsw),
2982 [0xee] = MMX_OP2(pmaxsw),
2983 [0xef] = MMX_OP2(pxor),
2984 [0xf0] = { NULL, NULL, NULL, SSE_SPECIAL }, /* lddqu */
2985 [0xf1] = MMX_OP2(psllw),
2986 [0xf2] = MMX_OP2(pslld),
2987 [0xf3] = MMX_OP2(psllq),
2988 [0xf4] = MMX_OP2(pmuludq),
2989 [0xf5] = MMX_OP2(pmaddwd),
2990 [0xf6] = MMX_OP2(psadbw),
2991 [0xf7] = { (SSEFunc_0_epp)gen_helper_maskmov_mmx,
2992 (SSEFunc_0_epp)gen_helper_maskmov_xmm }, /* XXX: casts */
2993 [0xf8] = MMX_OP2(psubb),
2994 [0xf9] = MMX_OP2(psubw),
2995 [0xfa] = MMX_OP2(psubl),
2996 [0xfb] = MMX_OP2(psubq),
2997 [0xfc] = MMX_OP2(paddb),
2998 [0xfd] = MMX_OP2(paddw),
2999 [0xfe] = MMX_OP2(paddl),
3000 };
3001
3002 static const SSEFunc_0_epp sse_op_table2[3 * 8][2] = {
3003 [0 + 2] = MMX_OP2(psrlw),
3004 [0 + 4] = MMX_OP2(psraw),
3005 [0 + 6] = MMX_OP2(psllw),
3006 [8 + 2] = MMX_OP2(psrld),
3007 [8 + 4] = MMX_OP2(psrad),
3008 [8 + 6] = MMX_OP2(pslld),
3009 [16 + 2] = MMX_OP2(psrlq),
3010 [16 + 3] = { NULL, gen_helper_psrldq_xmm },
3011 [16 + 6] = MMX_OP2(psllq),
3012 [16 + 7] = { NULL, gen_helper_pslldq_xmm },
3013 };
3014
3015 static const SSEFunc_0_epi sse_op_table3ai[] = {
3016 gen_helper_cvtsi2ss,
3017 gen_helper_cvtsi2sd
3018 };
3019
3020 #ifdef TARGET_X86_64
3021 static const SSEFunc_0_epl sse_op_table3aq[] = {
3022 gen_helper_cvtsq2ss,
3023 gen_helper_cvtsq2sd
3024 };
3025 #endif
3026
3027 static const SSEFunc_i_ep sse_op_table3bi[] = {
3028 gen_helper_cvttss2si,
3029 gen_helper_cvtss2si,
3030 gen_helper_cvttsd2si,
3031 gen_helper_cvtsd2si
3032 };
3033
3034 #ifdef TARGET_X86_64
3035 static const SSEFunc_l_ep sse_op_table3bq[] = {
3036 gen_helper_cvttss2sq,
3037 gen_helper_cvtss2sq,
3038 gen_helper_cvttsd2sq,
3039 gen_helper_cvtsd2sq
3040 };
3041 #endif
3042
3043 static const SSEFunc_0_epp sse_op_table4[8][4] = {
3044 SSE_FOP(cmpeq),
3045 SSE_FOP(cmplt),
3046 SSE_FOP(cmple),
3047 SSE_FOP(cmpunord),
3048 SSE_FOP(cmpneq),
3049 SSE_FOP(cmpnlt),
3050 SSE_FOP(cmpnle),
3051 SSE_FOP(cmpord),
3052 };
3053
3054 static const SSEFunc_0_epp sse_op_table5[256] = {
3055 [0x0c] = gen_helper_pi2fw,
3056 [0x0d] = gen_helper_pi2fd,
3057 [0x1c] = gen_helper_pf2iw,
3058 [0x1d] = gen_helper_pf2id,
3059 [0x8a] = gen_helper_pfnacc,
3060 [0x8e] = gen_helper_pfpnacc,
3061 [0x90] = gen_helper_pfcmpge,
3062 [0x94] = gen_helper_pfmin,
3063 [0x96] = gen_helper_pfrcp,
3064 [0x97] = gen_helper_pfrsqrt,
3065 [0x9a] = gen_helper_pfsub,
3066 [0x9e] = gen_helper_pfadd,
3067 [0xa0] = gen_helper_pfcmpgt,
3068 [0xa4] = gen_helper_pfmax,
3069 [0xa6] = gen_helper_movq, /* pfrcpit1; no need to actually increase precision */
3070 [0xa7] = gen_helper_movq, /* pfrsqit1 */
3071 [0xaa] = gen_helper_pfsubr,
3072 [0xae] = gen_helper_pfacc,
3073 [0xb0] = gen_helper_pfcmpeq,
3074 [0xb4] = gen_helper_pfmul,
3075 [0xb6] = gen_helper_movq, /* pfrcpit2 */
3076 [0xb7] = gen_helper_pmulhrw_mmx,
3077 [0xbb] = gen_helper_pswapd,
3078 [0xbf] = gen_helper_pavgb_mmx /* pavgusb */
3079 };
3080
3081 struct SSEOpHelper_epp {
3082 SSEFunc_0_epp op[2];
3083 uint32_t ext_mask;
3084 };
3085
3086 struct SSEOpHelper_eppi {
3087 SSEFunc_0_eppi op[2];
3088 uint32_t ext_mask;
3089 };
3090
3091 #define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
3092 #define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
3093 #define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
3094 #define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
3095
3096 static const struct SSEOpHelper_epp sse_op_table6[256] = {
3097 [0x00] = SSSE3_OP(pshufb),
3098 [0x01] = SSSE3_OP(phaddw),
3099 [0x02] = SSSE3_OP(phaddd),
3100 [0x03] = SSSE3_OP(phaddsw),
3101 [0x04] = SSSE3_OP(pmaddubsw),
3102 [0x05] = SSSE3_OP(phsubw),
3103 [0x06] = SSSE3_OP(phsubd),
3104 [0x07] = SSSE3_OP(phsubsw),
3105 [0x08] = SSSE3_OP(psignb),
3106 [0x09] = SSSE3_OP(psignw),
3107 [0x0a] = SSSE3_OP(psignd),
3108 [0x0b] = SSSE3_OP(pmulhrsw),
3109 [0x10] = SSE41_OP(pblendvb),
3110 [0x14] = SSE41_OP(blendvps),
3111 [0x15] = SSE41_OP(blendvpd),
3112 [0x17] = SSE41_OP(ptest),
3113 [0x1c] = SSSE3_OP(pabsb),
3114 [0x1d] = SSSE3_OP(pabsw),
3115 [0x1e] = SSSE3_OP(pabsd),
3116 [0x20] = SSE41_OP(pmovsxbw),
3117 [0x21] = SSE41_OP(pmovsxbd),
3118 [0x22] = SSE41_OP(pmovsxbq),
3119 [0x23] = SSE41_OP(pmovsxwd),
3120 [0x24] = SSE41_OP(pmovsxwq),
3121 [0x25] = SSE41_OP(pmovsxdq),
3122 [0x28] = SSE41_OP(pmuldq),
3123 [0x29] = SSE41_OP(pcmpeqq),
3124 [0x2a] = SSE41_SPECIAL, /* movntqda */
3125 [0x2b] = SSE41_OP(packusdw),
3126 [0x30] = SSE41_OP(pmovzxbw),
3127 [0x31] = SSE41_OP(pmovzxbd),
3128 [0x32] = SSE41_OP(pmovzxbq),
3129 [0x33] = SSE41_OP(pmovzxwd),
3130 [0x34] = SSE41_OP(pmovzxwq),
3131 [0x35] = SSE41_OP(pmovzxdq),
3132 [0x37] = SSE42_OP(pcmpgtq),
3133 [0x38] = SSE41_OP(pminsb),
3134 [0x39] = SSE41_OP(pminsd),
3135 [0x3a] = SSE41_OP(pminuw),
3136 [0x3b] = SSE41_OP(pminud),
3137 [0x3c] = SSE41_OP(pmaxsb),
3138 [0x3d] = SSE41_OP(pmaxsd),
3139 [0x3e] = SSE41_OP(pmaxuw),
3140 [0x3f] = SSE41_OP(pmaxud),
3141 [0x40] = SSE41_OP(pmulld),
3142 [0x41] = SSE41_OP(phminposuw),
3143 };
3144
3145 static const struct SSEOpHelper_eppi sse_op_table7[256] = {
3146 [0x08] = SSE41_OP(roundps),
3147 [0x09] = SSE41_OP(roundpd),
3148 [0x0a] = SSE41_OP(roundss),
3149 [0x0b] = SSE41_OP(roundsd),
3150 [0x0c] = SSE41_OP(blendps),
3151 [0x0d] = SSE41_OP(blendpd),
3152 [0x0e] = SSE41_OP(pblendw),
3153 [0x0f] = SSSE3_OP(palignr),
3154 [0x14] = SSE41_SPECIAL, /* pextrb */
3155 [0x15] = SSE41_SPECIAL, /* pextrw */
3156 [0x16] = SSE41_SPECIAL, /* pextrd/pextrq */
3157 [0x17] = SSE41_SPECIAL, /* extractps */
3158 [0x20] = SSE41_SPECIAL, /* pinsrb */
3159 [0x21] = SSE41_SPECIAL, /* insertps */
3160 [0x22] = SSE41_SPECIAL, /* pinsrd/pinsrq */
3161 [0x40] = SSE41_OP(dpps),
3162 [0x41] = SSE41_OP(dppd),
3163 [0x42] = SSE41_OP(mpsadbw),
3164 [0x60] = SSE42_OP(pcmpestrm),
3165 [0x61] = SSE42_OP(pcmpestri),
3166 [0x62] = SSE42_OP(pcmpistrm),
3167 [0x63] = SSE42_OP(pcmpistri),
3168 };
3169
3170 static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
3171 {
3172 int b1, op1_offset, op2_offset, is_xmm, val, ot;
3173 int modrm, mod, rm, reg, reg_addr, offset_addr;
3174 SSEFunc_0_epp sse_fn_epp;
3175 SSEFunc_0_eppi sse_fn_eppi;
3176 SSEFunc_0_ppi sse_fn_ppi;
3177 SSEFunc_0_eppt sse_fn_eppt;
3178
3179 b &= 0xff;
3180 if (s->prefix & PREFIX_DATA)
3181 b1 = 1;
3182 else if (s->prefix & PREFIX_REPZ)
3183 b1 = 2;
3184 else if (s->prefix & PREFIX_REPNZ)
3185 b1 = 3;
3186 else
3187 b1 = 0;
3188 sse_fn_epp = sse_op_table1[b][b1];
3189 if (!sse_fn_epp) {
3190 goto illegal_op;
3191 }
3192 if ((b <= 0x5f && b >= 0x10) || b == 0xc6 || b == 0xc2) {
3193 is_xmm = 1;
3194 } else {
3195 if (b1 == 0) {
3196 /* MMX case */
3197 is_xmm = 0;
3198 } else {
3199 is_xmm = 1;
3200 }
3201 }
3202 /* simple MMX/SSE operation */
3203 if (s->flags & HF_TS_MASK) {
3204 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
3205 return;
3206 }
3207 if (s->flags & HF_EM_MASK) {
3208 illegal_op:
3209 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
3210 return;
3211 }
3212 if (is_xmm && !(s->flags & HF_OSFXSR_MASK))
3213 if ((b != 0x38 && b != 0x3a) || (s->prefix & PREFIX_DATA))
3214 goto illegal_op;
3215 if (b == 0x0e) {
3216 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
3217 goto illegal_op;
3218 /* femms */
3219 gen_helper_emms(cpu_env);
3220 return;
3221 }
3222 if (b == 0x77) {
3223 /* emms */
3224 gen_helper_emms(cpu_env);
3225 return;
3226 }
3227 /* prepare MMX state (XXX: optimize by storing fptt and fptags in
3228 the static cpu state) */
3229 if (!is_xmm) {
3230 gen_helper_enter_mmx(cpu_env);
3231 }
3232
3233 modrm = cpu_ldub_code(cpu_single_env, s->pc++);
3234 reg = ((modrm >> 3) & 7);
3235 if (is_xmm)
3236 reg |= rex_r;
3237 mod = (modrm >> 6) & 3;
3238 if (sse_fn_epp == SSE_SPECIAL) {
3239 b |= (b1 << 8);
3240 switch(b) {
3241 case 0x0e7: /* movntq */
3242 if (mod == 3)
3243 goto illegal_op;
3244 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3245 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
3246 break;
3247 case 0x1e7: /* movntdq */
3248 case 0x02b: /* movntps */
3249 case 0x12b: /* movntps */
3250 if (mod == 3)
3251 goto illegal_op;
3252 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3253 gen_sto_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3254 break;
3255 case 0x3f0: /* lddqu */
3256 if (mod == 3)
3257 goto illegal_op;
3258 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3259 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3260 break;
3261 case 0x22b: /* movntss */
3262 case 0x32b: /* movntsd */
3263 if (mod == 3)
3264 goto illegal_op;
3265 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3266 if (b1 & 1) {
3267 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,
3268 xmm_regs[reg]));
3269 } else {
3270 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3271 xmm_regs[reg].XMM_L(0)));
3272 gen_op_st_T0_A0(OT_LONG + s->mem_index);
3273 }
3274 break;
3275 case 0x6e: /* movd mm, ea */
3276 #ifdef TARGET_X86_64
3277 if (s->dflag == 2) {
3278 gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 0);
3279 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,fpregs[reg].mmx));
3280 } else
3281 #endif
3282 {
3283 gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 0);
3284 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3285 offsetof(CPUX86State,fpregs[reg].mmx));
3286 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3287 gen_helper_movl_mm_T0_mmx(cpu_ptr0, cpu_tmp2_i32);
3288 }
3289 break;
3290 case 0x16e: /* movd xmm, ea */
3291 #ifdef TARGET_X86_64
3292 if (s->dflag == 2) {
3293 gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 0);
3294 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3295 offsetof(CPUX86State,xmm_regs[reg]));
3296 gen_helper_movq_mm_T0_xmm(cpu_ptr0, cpu_T[0]);
3297 } else
3298 #endif
3299 {
3300 gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 0);
3301 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3302 offsetof(CPUX86State,xmm_regs[reg]));
3303 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3304 gen_helper_movl_mm_T0_xmm(cpu_ptr0, cpu_tmp2_i32);
3305 }
3306 break;
3307 case 0x6f: /* movq mm, ea */
3308 if (mod != 3) {
3309 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3310 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
3311 } else {
3312 rm = (modrm & 7);
3313 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
3314 offsetof(CPUX86State,fpregs[rm].mmx));
3315 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
3316 offsetof(CPUX86State,fpregs[reg].mmx));
3317 }
3318 break;
3319 case 0x010: /* movups */
3320 case 0x110: /* movupd */
3321 case 0x028: /* movaps */
3322 case 0x128: /* movapd */
3323 case 0x16f: /* movdqa xmm, ea */
3324 case 0x26f: /* movdqu xmm, ea */
3325 if (mod != 3) {
3326 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3327 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3328 } else {
3329 rm = (modrm & 7) | REX_B(s);
3330 gen_op_movo(offsetof(CPUX86State,xmm_regs[reg]),
3331 offsetof(CPUX86State,xmm_regs[rm]));
3332 }
3333 break;
3334 case 0x210: /* movss xmm, ea */
3335 if (mod != 3) {
3336 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3337 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
3338 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3339 gen_op_movl_T0_0();
3340 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
3341 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3342 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3343 } else {
3344 rm = (modrm & 7) | REX_B(s);
3345 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3346 offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
3347 }
3348 break;
3349 case 0x310: /* movsd xmm, ea */
3350 if (mod != 3) {
3351 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3352 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3353 gen_op_movl_T0_0();
3354 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3355 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3356 } else {
3357 rm = (modrm & 7) | REX_B(s);
3358 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3359 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3360 }
3361 break;
3362 case 0x012: /* movlps */
3363 case 0x112: /* movlpd */
3364 if (mod != 3) {
3365 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3366 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3367 } else {
3368 /* movhlps */
3369 rm = (modrm & 7) | REX_B(s);
3370 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3371 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
3372 }
3373 break;
3374 case 0x212: /* movsldup */
3375 if (mod != 3) {
3376 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3377 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3378 } else {
3379 rm = (modrm & 7) | REX_B(s);
3380 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3381 offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
3382 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
3383 offsetof(CPUX86State,xmm_regs[rm].XMM_L(2)));
3384 }
3385 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
3386 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3387 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
3388 offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3389 break;
3390 case 0x312: /* movddup */
3391 if (mod != 3) {
3392 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3393 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3394 } else {
3395 rm = (modrm & 7) | REX_B(s);
3396 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3397 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3398 }
3399 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
3400 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3401 break;
3402 case 0x016: /* movhps */
3403 case 0x116: /* movhpd */
3404 if (mod != 3) {
3405 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3406 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3407 } else {
3408 /* movlhps */
3409 rm = (modrm & 7) | REX_B(s);
3410 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
3411 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3412 }
3413 break;
3414 case 0x216: /* movshdup */
3415 if (mod != 3) {
3416 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3417 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3418 } else {
3419 rm = (modrm & 7) | REX_B(s);
3420 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
3421 offsetof(CPUX86State,xmm_regs[rm].XMM_L(1)));
3422 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
3423 offsetof(CPUX86State,xmm_regs[rm].XMM_L(3)));
3424 }
3425 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3426 offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
3427 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
3428 offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3429 break;
3430 case 0x178:
3431 case 0x378:
3432 {
3433 int bit_index, field_length;
3434
3435 if (b1 == 1 && reg != 0)
3436 goto illegal_op;
3437 field_length = cpu_ldub_code(cpu_single_env, s->pc++) & 0x3F;
3438 bit_index = cpu_ldub_code(cpu_single_env, s->pc++) & 0x3F;
3439 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3440 offsetof(CPUX86State,xmm_regs[reg]));
3441 if (b1 == 1)
3442 gen_helper_extrq_i(cpu_env, cpu_ptr0,
3443 tcg_const_i32(bit_index),
3444 tcg_const_i32(field_length));
3445 else
3446 gen_helper_insertq_i(cpu_env, cpu_ptr0,
3447 tcg_const_i32(bit_index),
3448 tcg_const_i32(field_length));
3449 }
3450 break;
3451 case 0x7e: /* movd ea, mm */
3452 #ifdef TARGET_X86_64
3453 if (s->dflag == 2) {
3454 tcg_gen_ld_i64(cpu_T[0], cpu_env,
3455 offsetof(CPUX86State,fpregs[reg].mmx));
3456 gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 1);
3457 } else
3458 #endif
3459 {
3460 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
3461 offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0)));
3462 gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 1);
3463 }
3464 break;
3465 case 0x17e: /* movd ea, xmm */
3466 #ifdef TARGET_X86_64
3467 if (s->dflag == 2) {
3468 tcg_gen_ld_i64(cpu_T[0], cpu_env,
3469 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3470 gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 1);
3471 } else
3472 #endif
3473 {
3474 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
3475 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3476 gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 1);
3477 }
3478 break;
3479 case 0x27e: /* movq xmm, ea */
3480 if (mod != 3) {
3481 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3482 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3483 } else {
3484 rm = (modrm & 7) | REX_B(s);
3485 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3486 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3487 }
3488 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3489 break;
3490 case 0x7f: /* movq ea, mm */
3491 if (mod != 3) {
3492 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3493 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
3494 } else {
3495 rm = (modrm & 7);
3496 gen_op_movq(offsetof(CPUX86State,fpregs[rm].mmx),
3497 offsetof(CPUX86State,fpregs[reg].mmx));
3498 }
3499 break;
3500 case 0x011: /* movups */
3501 case 0x111: /* movupd */
3502 case 0x029: /* movaps */
3503 case 0x129: /* movapd */
3504 case 0x17f: /* movdqa ea, xmm */
3505 case 0x27f: /* movdqu ea, xmm */
3506 if (mod != 3) {
3507 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3508 gen_sto_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3509 } else {
3510 rm = (modrm & 7) | REX_B(s);
3511 gen_op_movo(offsetof(CPUX86State,xmm_regs[rm]),
3512 offsetof(CPUX86State,xmm_regs[reg]));
3513 }
3514 break;
3515 case 0x211: /* movss ea, xmm */
3516 if (mod != 3) {
3517 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3518 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3519 gen_op_st_T0_A0(OT_LONG + s->mem_index);
3520 } else {
3521 rm = (modrm & 7) | REX_B(s);
3522 gen_op_movl(offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)),
3523 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3524 }
3525 break;
3526 case 0x311: /* movsd ea, xmm */
3527 if (mod != 3) {
3528 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3529 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3530 } else {
3531 rm = (modrm & 7) | REX_B(s);
3532 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
3533 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3534 }
3535 break;
3536 case 0x013: /* movlps */
3537 case 0x113: /* movlpd */
3538 if (mod != 3) {
3539 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3540 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3541 } else {
3542 goto illegal_op;
3543 }
3544 break;
3545 case 0x017: /* movhps */
3546 case 0x117: /* movhpd */
3547 if (mod != 3) {
3548 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3549 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3550 } else {
3551 goto illegal_op;
3552 }
3553 break;
3554 case 0x71: /* shift mm, im */
3555 case 0x72:
3556 case 0x73:
3557 case 0x171: /* shift xmm, im */
3558 case 0x172:
3559 case 0x173:
3560 if (b1 >= 2) {
3561 goto illegal_op;
3562 }
3563 val = cpu_ldub_code(cpu_single_env, s->pc++);
3564 if (is_xmm) {
3565 gen_op_movl_T0_im(val);
3566 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
3567 gen_op_movl_T0_0();
3568 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(1)));
3569 op1_offset = offsetof(CPUX86State,xmm_t0);
3570 } else {
3571 gen_op_movl_T0_im(val);
3572 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(0)));
3573 gen_op_movl_T0_0();
3574 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(1)));
3575 op1_offset = offsetof(CPUX86State,mmx_t0);
3576 }
3577 sse_fn_epp = sse_op_table2[((b - 1) & 3) * 8 +
3578 (((modrm >> 3)) & 7)][b1];
3579 if (!sse_fn_epp) {
3580 goto illegal_op;
3581 }
3582 if (is_xmm) {
3583 rm = (modrm & 7) | REX_B(s);
3584 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3585 } else {
3586 rm = (modrm & 7);
3587 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3588 }
3589 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3590 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op1_offset);
3591 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
3592 break;
3593 case 0x050: /* movmskps */
3594 rm = (modrm & 7) | REX_B(s);
3595 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3596 offsetof(CPUX86State,xmm_regs[rm]));
3597 gen_helper_movmskps(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3598 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3599 gen_op_mov_reg_T0(OT_LONG, reg);
3600 break;
3601 case 0x150: /* movmskpd */
3602 rm = (modrm & 7) | REX_B(s);
3603 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3604 offsetof(CPUX86State,xmm_regs[rm]));
3605 gen_helper_movmskpd(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3606 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3607 gen_op_mov_reg_T0(OT_LONG, reg);
3608 break;
3609 case 0x02a: /* cvtpi2ps */
3610 case 0x12a: /* cvtpi2pd */
3611 gen_helper_enter_mmx(cpu_env);
3612 if (mod != 3) {
3613 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3614 op2_offset = offsetof(CPUX86State,mmx_t0);
3615 gen_ldq_env_A0(s->mem_index, op2_offset);
3616 } else {
3617 rm = (modrm & 7);
3618 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3619 }
3620 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3621 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3622 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3623 switch(b >> 8) {
3624 case 0x0:
3625 gen_helper_cvtpi2ps(cpu_env, cpu_ptr0, cpu_ptr1);
3626 break;
3627 default:
3628 case 0x1:
3629 gen_helper_cvtpi2pd(cpu_env, cpu_ptr0, cpu_ptr1);
3630 break;
3631 }
3632 break;
3633 case 0x22a: /* cvtsi2ss */
3634 case 0x32a: /* cvtsi2sd */
3635 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3636 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
3637 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3638 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3639 if (ot == OT_LONG) {
3640 SSEFunc_0_epi sse_fn_epi = sse_op_table3ai[(b >> 8) & 1];
3641 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3642 sse_fn_epi(cpu_env, cpu_ptr0, cpu_tmp2_i32);
3643 } else {
3644 #ifdef TARGET_X86_64
3645 SSEFunc_0_epl sse_fn_epl = sse_op_table3aq[(b >> 8) & 1];
3646 sse_fn_epl(cpu_env, cpu_ptr0, cpu_T[0]);
3647 #else
3648 goto illegal_op;
3649 #endif
3650 }
3651 break;
3652 case 0x02c: /* cvttps2pi */
3653 case 0x12c: /* cvttpd2pi */
3654 case 0x02d: /* cvtps2pi */
3655 case 0x12d: /* cvtpd2pi */
3656 gen_helper_enter_mmx(cpu_env);
3657 if (mod != 3) {
3658 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3659 op2_offset = offsetof(CPUX86State,xmm_t0);
3660 gen_ldo_env_A0(s->mem_index, op2_offset);
3661 } else {
3662 rm = (modrm & 7) | REX_B(s);
3663 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3664 }
3665 op1_offset = offsetof(CPUX86State,fpregs[reg & 7].mmx);
3666 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3667 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3668 switch(b) {
3669 case 0x02c:
3670 gen_helper_cvttps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3671 break;
3672 case 0x12c:
3673 gen_helper_cvttpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3674 break;
3675 case 0x02d:
3676 gen_helper_cvtps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3677 break;
3678 case 0x12d:
3679 gen_helper_cvtpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3680 break;
3681 }
3682 break;
3683 case 0x22c: /* cvttss2si */
3684 case 0x32c: /* cvttsd2si */
3685 case 0x22d: /* cvtss2si */
3686 case 0x32d: /* cvtsd2si */
3687 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3688 if (mod != 3) {
3689 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3690 if ((b >> 8) & 1) {
3691 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_t0.XMM_Q(0)));
3692 } else {
3693 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
3694 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
3695 }
3696 op2_offset = offsetof(CPUX86State,xmm_t0);
3697 } else {
3698 rm = (modrm & 7) | REX_B(s);
3699 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3700 }
3701 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3702 if (ot == OT_LONG) {
3703 SSEFunc_i_ep sse_fn_i_ep =
3704 sse_op_table3bi[((b >> 7) & 2) | (b & 1)];
3705 sse_fn_i_ep(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3706 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3707 } else {
3708 #ifdef TARGET_X86_64
3709 SSEFunc_l_ep sse_fn_l_ep =
3710 sse_op_table3bq[((b >> 7) & 2) | (b & 1)];
3711 sse_fn_l_ep(cpu_T[0], cpu_env, cpu_ptr0);
3712 #else
3713 goto illegal_op;
3714 #endif
3715 }
3716 gen_op_mov_reg_T0(ot, reg);
3717 break;
3718 case 0xc4: /* pinsrw */
3719 case 0x1c4:
3720 s->rip_offset = 1;
3721 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
3722 val = cpu_ldub_code(cpu_single_env, s->pc++);
3723 if (b1) {
3724 val &= 7;
3725 tcg_gen_st16_tl(cpu_T[0], cpu_env,
3726 offsetof(CPUX86State,xmm_regs[reg].XMM_W(val)));
3727 } else {
3728 val &= 3;
3729 tcg_gen_st16_tl(cpu_T[0], cpu_env,
3730 offsetof(CPUX86State,fpregs[reg].mmx.MMX_W(val)));
3731 }
3732 break;
3733 case 0xc5: /* pextrw */
3734 case 0x1c5:
3735 if (mod != 3)
3736 goto illegal_op;
3737 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3738 val = cpu_ldub_code(cpu_single_env, s->pc++);
3739 if (b1) {
3740 val &= 7;
3741 rm = (modrm & 7) | REX_B(s);
3742 tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3743 offsetof(CPUX86State,xmm_regs[rm].XMM_W(val)));
3744 } else {
3745 val &= 3;
3746 rm = (modrm & 7);
3747 tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3748 offsetof(CPUX86State,fpregs[rm].mmx.MMX_W(val)));
3749 }
3750 reg = ((modrm >> 3) & 7) | rex_r;
3751 gen_op_mov_reg_T0(ot, reg);
3752 break;
3753 case 0x1d6: /* movq ea, xmm */
3754 if (mod != 3) {
3755 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3756 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3757 } else {
3758 rm = (modrm & 7) | REX_B(s);
3759 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
3760 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3761 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
3762 }
3763 break;
3764 case 0x2d6: /* movq2dq */
3765 gen_helper_enter_mmx(cpu_env);
3766 rm = (modrm & 7);
3767 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3768 offsetof(CPUX86State,fpregs[rm].mmx));
3769 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3770 break;
3771 case 0x3d6: /* movdq2q */
3772 gen_helper_enter_mmx(cpu_env);
3773 rm = (modrm & 7) | REX_B(s);
3774 gen_op_movq(offsetof(CPUX86State,fpregs[reg & 7].mmx),
3775 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3776 break;
3777 case 0xd7: /* pmovmskb */
3778 case 0x1d7:
3779 if (mod != 3)
3780 goto illegal_op;
3781 if (b1) {
3782 rm = (modrm & 7) | REX_B(s);
3783 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[rm]));
3784 gen_helper_pmovmskb_xmm(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3785 } else {
3786 rm = (modrm & 7);
3787 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,fpregs[rm].mmx));
3788 gen_helper_pmovmskb_mmx(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3789 }
3790 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3791 reg = ((modrm >> 3) & 7) | rex_r;
3792 gen_op_mov_reg_T0(OT_LONG, reg);
3793 break;
3794 case 0x138:
3795 if (s->prefix & PREFIX_REPNZ)
3796 goto crc32;
3797 case 0x038:
3798 b = modrm;
3799 modrm = cpu_ldub_code(cpu_single_env, s->pc++);
3800 rm = modrm & 7;
3801 reg = ((modrm >> 3) & 7) | rex_r;
3802 mod = (modrm >> 6) & 3;
3803 if (b1 >= 2) {
3804 goto illegal_op;
3805 }
3806
3807 sse_fn_epp = sse_op_table6[b].op[b1];
3808 if (!sse_fn_epp) {
3809 goto illegal_op;
3810 }
3811 if (!(s->cpuid_ext_features & sse_op_table6[b].ext_mask))
3812 goto illegal_op;
3813
3814 if (b1) {
3815 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3816 if (mod == 3) {
3817 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
3818 } else {
3819 op2_offset = offsetof(CPUX86State,xmm_t0);
3820 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3821 switch (b) {
3822 case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
3823 case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
3824 case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
3825 gen_ldq_env_A0(s->mem_index, op2_offset +
3826 offsetof(XMMReg, XMM_Q(0)));
3827 break;
3828 case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
3829 case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
3830 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0,
3831 (s->mem_index >> 2) - 1);
3832 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0);
3833 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, op2_offset +
3834 offsetof(XMMReg, XMM_L(0)));
3835 break;
3836 case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
3837 tcg_gen_qemu_ld16u(cpu_tmp0, cpu_A0,
3838 (s->mem_index >> 2) - 1);
3839 tcg_gen_st16_tl(cpu_tmp0, cpu_env, op2_offset +
3840 offsetof(XMMReg, XMM_W(0)));
3841 break;
3842 case 0x2a: /* movntqda */
3843 gen_ldo_env_A0(s->mem_index, op1_offset);
3844 return;
3845 default:
3846 gen_ldo_env_A0(s->mem_index, op2_offset);
3847 }
3848 }
3849 } else {
3850 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
3851 if (mod == 3) {
3852 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3853 } else {
3854 op2_offset = offsetof(CPUX86State,mmx_t0);
3855 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3856 gen_ldq_env_A0(s->mem_index, op2_offset);
3857 }
3858 }
3859 if (sse_fn_epp == SSE_SPECIAL) {
3860 goto illegal_op;
3861 }
3862
3863 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3864 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3865 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
3866
3867 if (b == 0x17)
3868 s->cc_op = CC_OP_EFLAGS;
3869 break;
3870 case 0x338: /* crc32 */
3871 crc32:
3872 b = modrm;
3873 modrm = cpu_ldub_code(cpu_single_env, s->pc++);
3874 reg = ((modrm >> 3) & 7) | rex_r;
3875
3876 if (b != 0xf0 && b != 0xf1)
3877 goto illegal_op;
3878 if (!(s->cpuid_ext_features & CPUID_EXT_SSE42))
3879 goto illegal_op;
3880
3881 if (b == 0xf0)
3882 ot = OT_BYTE;
3883 else if (b == 0xf1 && s->dflag != 2)
3884 if (s->prefix & PREFIX_DATA)
3885 ot = OT_WORD;
3886 else
3887 ot = OT_LONG;
3888 else
3889 ot = OT_QUAD;
3890
3891 gen_op_mov_TN_reg(OT_LONG, 0, reg);
3892 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3893 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
3894 gen_helper_crc32(cpu_T[0], cpu_tmp2_i32,
3895 cpu_T[0], tcg_const_i32(8 << ot));
3896
3897 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3898 gen_op_mov_reg_T0(ot, reg);
3899 break;
3900 case 0x03a:
3901 case 0x13a:
3902 b = modrm;
3903 modrm = cpu_ldub_code(cpu_single_env, s->pc++);
3904 rm = modrm & 7;
3905 reg = ((modrm >> 3) & 7) | rex_r;
3906 mod = (modrm >> 6) & 3;
3907 if (b1 >= 2) {
3908 goto illegal_op;
3909 }
3910
3911 sse_fn_eppi = sse_op_table7[b].op[b1];
3912 if (!sse_fn_eppi) {
3913 goto illegal_op;
3914 }
3915 if (!(s->cpuid_ext_features & sse_op_table7[b].ext_mask))
3916 goto illegal_op;
3917
3918 if (sse_fn_eppi == SSE_SPECIAL) {
3919 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3920 rm = (modrm & 7) | REX_B(s);
3921 if (mod != 3)
3922 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3923 reg = ((modrm >> 3) & 7) | rex_r;
3924 val = cpu_ldub_code(cpu_single_env, s->pc++);
3925 switch (b) {
3926 case 0x14: /* pextrb */
3927 tcg_gen_ld8u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3928 xmm_regs[reg].XMM_B(val & 15)));
3929 if (mod == 3)
3930 gen_op_mov_reg_T0(ot, rm);
3931 else
3932 tcg_gen_qemu_st8(cpu_T[0], cpu_A0,
3933 (s->mem_index >> 2) - 1);
3934 break;
3935 case 0x15: /* pextrw */
3936 tcg_gen_ld16u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3937 xmm_regs[reg].XMM_W(val & 7)));
3938 if (mod == 3)
3939 gen_op_mov_reg_T0(ot, rm);
3940 else
3941 tcg_gen_qemu_st16(cpu_T[0], cpu_A0,
3942 (s->mem_index >> 2) - 1);
3943 break;
3944 case 0x16:
3945 if (ot == OT_LONG) { /* pextrd */
3946 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
3947 offsetof(CPUX86State,
3948 xmm_regs[reg].XMM_L(val & 3)));
3949 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3950 if (mod == 3)
3951 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
3952 else
3953 tcg_gen_qemu_st32(cpu_T[0], cpu_A0,
3954 (s->mem_index >> 2) - 1);
3955 } else { /* pextrq */
3956 #ifdef TARGET_X86_64
3957 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
3958 offsetof(CPUX86State,
3959 xmm_regs[reg].XMM_Q(val & 1)));
3960 if (mod == 3)
3961 gen_op_mov_reg_v(ot, rm, cpu_tmp1_i64);
3962 else
3963 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
3964 (s->mem_index >> 2) - 1);
3965 #else
3966 goto illegal_op;
3967 #endif
3968 }
3969 break;
3970 case 0x17: /* extractps */
3971 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3972 xmm_regs[reg].XMM_L(val & 3)));
3973 if (mod == 3)
3974 gen_op_mov_reg_T0(ot, rm);
3975 else
3976 tcg_gen_qemu_st32(cpu_T[0], cpu_A0,
3977 (s->mem_index >> 2) - 1);
3978 break;
3979 case 0x20: /* pinsrb */
3980 if (mod == 3)
3981 gen_op_mov_TN_reg(OT_LONG, 0, rm);
3982 else
3983 tcg_gen_qemu_ld8u(cpu_tmp0, cpu_A0,
3984 (s->mem_index >> 2) - 1);
3985 tcg_gen_st8_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State,
3986 xmm_regs[reg].XMM_B(val & 15)));
3987 break;
3988 case 0x21: /* insertps */
3989 if (mod == 3) {
3990 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
3991 offsetof(CPUX86State,xmm_regs[rm]
3992 .XMM_L((val >> 6) & 3)));
3993 } else {
3994 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0,
3995 (s->mem_index >> 2) - 1);
3996 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0);
3997 }
3998 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
3999 offsetof(CPUX86State,xmm_regs[reg]
4000 .XMM_L((val >> 4) & 3)));
4001 if ((val >> 0) & 1)
4002 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4003 cpu_env, offsetof(CPUX86State,
4004 xmm_regs[reg].XMM_L(0)));
4005 if ((val >> 1) & 1)
4006 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4007 cpu_env, offsetof(CPUX86State,
4008 xmm_regs[reg].XMM_L(1)));
4009 if ((val >> 2) & 1)
4010 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4011 cpu_env, offsetof(CPUX86State,
4012 xmm_regs[reg].XMM_L(2)));
4013 if ((val >> 3) & 1)
4014 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4015 cpu_env, offsetof(CPUX86State,
4016 xmm_regs[reg].XMM_L(3)));
4017 break;
4018 case 0x22:
4019 if (ot == OT_LONG) { /* pinsrd */
4020 if (mod == 3)
4021 gen_op_mov_v_reg(ot, cpu_tmp0, rm);
4022 else
4023 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0,
4024 (s->mem_index >> 2) - 1);
4025 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0);
4026 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
4027 offsetof(CPUX86State,
4028 xmm_regs[reg].XMM_L(val & 3)));
4029 } else { /* pinsrq */
4030 #ifdef TARGET_X86_64
4031 if (mod == 3)
4032 gen_op_mov_v_reg(ot, cpu_tmp1_i64, rm);
4033 else
4034 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
4035 (s->mem_index >> 2) - 1);
4036 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
4037 offsetof(CPUX86State,
4038 xmm_regs[reg].XMM_Q(val & 1)));
4039 #else
4040 goto illegal_op;
4041 #endif
4042 }
4043 break;
4044 }
4045 return;
4046 }
4047
4048 if (b1) {
4049 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
4050 if (mod == 3) {
4051 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
4052 } else {
4053 op2_offset = offsetof(CPUX86State,xmm_t0);
4054 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4055 gen_ldo_env_A0(s->mem_index, op2_offset);
4056 }
4057 } else {
4058 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
4059 if (mod == 3) {
4060 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
4061 } else {
4062 op2_offset = offsetof(CPUX86State,mmx_t0);
4063 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4064 gen_ldq_env_A0(s->mem_index, op2_offset);
4065 }
4066 }
4067 val = cpu_ldub_code(cpu_single_env, s->pc++);
4068
4069 if ((b & 0xfc) == 0x60) { /* pcmpXstrX */
4070 s->cc_op = CC_OP_EFLAGS;
4071
4072 if (s->dflag == 2)
4073 /* The helper must use entire 64-bit gp registers */
4074 val |= 1 << 8;
4075 }
4076
4077 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4078 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4079 sse_fn_eppi(cpu_env, cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
4080 break;
4081 default:
4082 goto illegal_op;
4083 }
4084 } else {
4085 /* generic MMX or SSE operation */
4086 switch(b) {
4087 case 0x70: /* pshufx insn */
4088 case 0xc6: /* pshufx insn */
4089 case 0xc2: /* compare insns */
4090 s->rip_offset = 1;
4091 break;
4092 default:
4093 break;
4094 }
4095 if (is_xmm) {
4096 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
4097 if (mod != 3) {
4098 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4099 op2_offset = offsetof(CPUX86State,xmm_t0);
4100 if (b1 >= 2 && ((b >= 0x50 && b <= 0x5f && b != 0x5b) ||
4101 b == 0xc2)) {
4102 /* specific case for SSE single instructions */
4103 if (b1 == 2) {
4104 /* 32 bit access */
4105 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
4106 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
4107 } else {
4108 /* 64 bit access */
4109 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_t0.XMM_D(0)));
4110 }
4111 } else {
4112 gen_ldo_env_A0(s->mem_index, op2_offset);
4113 }
4114 } else {
4115 rm = (modrm & 7) | REX_B(s);
4116 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
4117 }
4118 } else {
4119 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
4120 if (mod != 3) {
4121 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4122 op2_offset = offsetof(CPUX86State,mmx_t0);
4123 gen_ldq_env_A0(s->mem_index, op2_offset);
4124 } else {
4125 rm = (modrm & 7);
4126 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
4127 }
4128 }
4129 switch(b) {
4130 case 0x0f: /* 3DNow! data insns */
4131 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
4132 goto illegal_op;
4133 val = cpu_ldub_code(cpu_single_env, s->pc++);
4134 sse_fn_epp = sse_op_table5[val];
4135 if (!sse_fn_epp) {
4136 goto illegal_op;
4137 }
4138 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4139 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4140 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
4141 break;
4142 case 0x70: /* pshufx insn */
4143 case 0xc6: /* pshufx insn */
4144 val = cpu_ldub_code(cpu_single_env, s->pc++);
4145 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4146 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4147 /* XXX: introduce a new table? */
4148 sse_fn_ppi = (SSEFunc_0_ppi)sse_fn_epp;
4149 sse_fn_ppi(cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
4150 break;
4151 case 0xc2:
4152 /* compare insns */
4153 val = cpu_ldub_code(cpu_single_env, s->pc++);
4154 if (val >= 8)
4155 goto illegal_op;
4156 sse_fn_epp = sse_op_table4[val][b1];
4157
4158 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4159 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4160 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
4161 break;
4162 case 0xf7:
4163 /* maskmov : we must prepare A0 */
4164 if (mod != 3)
4165 goto illegal_op;
4166 #ifdef TARGET_X86_64
4167 if (s->aflag == 2) {
4168 gen_op_movq_A0_reg(R_EDI);
4169 } else
4170 #endif
4171 {
4172 gen_op_movl_A0_reg(R_EDI);
4173 if (s->aflag == 0)
4174 gen_op_andl_A0_ffff();
4175 }
4176 gen_add_A0_ds_seg(s);
4177
4178 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4179 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4180 /* XXX: introduce a new table? */
4181 sse_fn_eppt = (SSEFunc_0_eppt)sse_fn_epp;
4182 sse_fn_eppt(cpu_env, cpu_ptr0, cpu_ptr1, cpu_A0);
4183 break;
4184 default:
4185 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4186 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4187 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
4188 break;
4189 }
4190 if (b == 0x2e || b == 0x2f) {
4191 s->cc_op = CC_OP_EFLAGS;
4192 }
4193 }
4194 }
4195
4196 /* convert one instruction. s->is_jmp is set if the translation must
4197 be stopped. Return the next pc value */
4198 static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
4199 {
4200 int b, prefixes, aflag, dflag;
4201 int shift, ot;
4202 int modrm, reg, rm, mod, reg_addr, op, opreg, offset_addr, val;
4203 target_ulong next_eip, tval;
4204 int rex_w, rex_r;
4205
4206 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
4207 tcg_gen_debug_insn_start(pc_start);
4208 }
4209 s->pc = pc_start;
4210 prefixes = 0;
4211 aflag = s->code32;
4212 dflag = s->code32;
4213 s->override = -1;
4214 rex_w = -1;
4215 rex_r = 0;
4216 #ifdef TARGET_X86_64
4217 s->rex_x = 0;
4218 s->rex_b = 0;
4219 x86_64_hregs = 0;
4220 #endif
4221 s->rip_offset = 0; /* for relative ip address */
4222 next_byte:
4223 b = cpu_ldub_code(cpu_single_env, s->pc);
4224 s->pc++;
4225 /* check prefixes */
4226 #ifdef TARGET_X86_64
4227 if (CODE64(s)) {
4228 switch (b) {
4229 case 0xf3:
4230 prefixes |= PREFIX_REPZ;
4231 goto next_byte;
4232 case 0xf2:
4233 prefixes |= PREFIX_REPNZ;
4234 goto next_byte;
4235 case 0xf0:
4236 prefixes |= PREFIX_LOCK;
4237 goto next_byte;
4238 case 0x2e:
4239 s->override = R_CS;
4240 goto next_byte;
4241 case 0x36:
4242 s->override = R_SS;
4243 goto next_byte;
4244 case 0x3e:
4245 s->override = R_DS;
4246 goto next_byte;
4247 case 0x26:
4248 s->override = R_ES;
4249 goto next_byte;
4250 case 0x64:
4251 s->override = R_FS;
4252 goto next_byte;
4253 case 0x65:
4254 s->override = R_GS;
4255 goto next_byte;
4256 case 0x66:
4257 prefixes |= PREFIX_DATA;
4258 goto next_byte;
4259 case 0x67:
4260 prefixes |= PREFIX_ADR;
4261 goto next_byte;
4262 case 0x40 ... 0x4f:
4263 /* REX prefix */
4264 rex_w = (b >> 3) & 1;
4265 rex_r = (b & 0x4) << 1;
4266 s->rex_x = (b & 0x2) << 2;
4267 REX_B(s) = (b & 0x1) << 3;
4268 x86_64_hregs = 1; /* select uniform byte register addressing */
4269 goto next_byte;
4270 }
4271 if (rex_w == 1) {
4272 /* 0x66 is ignored if rex.w is set */
4273 dflag = 2;
4274 } else {
4275 if (prefixes & PREFIX_DATA)
4276 dflag ^= 1;
4277 }
4278 if (!(prefixes & PREFIX_ADR))
4279 aflag = 2;
4280 } else
4281 #endif
4282 {
4283 switch (b) {
4284 case 0xf3:
4285 prefixes |= PREFIX_REPZ;
4286 goto next_byte;
4287 case 0xf2:
4288 prefixes |= PREFIX_REPNZ;
4289 goto next_byte;
4290 case 0xf0:
4291 prefixes |= PREFIX_LOCK;
4292 goto next_byte;
4293 case 0x2e:
4294 s->override = R_CS;
4295 goto next_byte;
4296 case 0x36:
4297 s->override = R_SS;
4298 goto next_byte;
4299 case 0x3e:
4300 s->override = R_DS;
4301 goto next_byte;
4302 case 0x26:
4303 s->override = R_ES;
4304 goto next_byte;
4305 case 0x64:
4306 s->override = R_FS;
4307 goto next_byte;
4308 case 0x65:
4309 s->override = R_GS;
4310 goto next_byte;
4311 case 0x66:
4312 prefixes |= PREFIX_DATA;
4313 goto next_byte;
4314 case 0x67:
4315 prefixes |= PREFIX_ADR;
4316 goto next_byte;
4317 }
4318 if (prefixes & PREFIX_DATA)
4319 dflag ^= 1;
4320 if (prefixes & PREFIX_ADR)
4321 aflag ^= 1;
4322 }
4323
4324 s->prefix = prefixes;
4325 s->aflag = aflag;
4326 s->dflag = dflag;
4327
4328 /* lock generation */
4329 if (prefixes & PREFIX_LOCK)
4330 gen_helper_lock();
4331
4332 /* now check op code */
4333 reswitch:
4334 switch(b) {
4335 case 0x0f:
4336 /**************************/
4337 /* extended op code */
4338 b = cpu_ldub_code(cpu_single_env, s->pc++) | 0x100;
4339 goto reswitch;
4340
4341 /**************************/
4342 /* arith & logic */
4343 case 0x00 ... 0x05:
4344 case 0x08 ... 0x0d:
4345 case 0x10 ... 0x15:
4346 case 0x18 ... 0x1d:
4347 case 0x20 ... 0x25:
4348 case 0x28 ... 0x2d:
4349 case 0x30 ... 0x35:
4350 case 0x38 ... 0x3d:
4351 {
4352 int op, f, val;
4353 op = (b >> 3) & 7;
4354 f = (b >> 1) & 3;
4355
4356 if ((b & 1) == 0)
4357 ot = OT_BYTE;
4358 else
4359 ot = dflag + OT_WORD;
4360
4361 switch(f) {
4362 case 0: /* OP Ev, Gv */
4363 modrm = cpu_ldub_code(cpu_single_env, s->pc++);
4364 reg = ((modrm >> 3) & 7) | rex_r;
4365 mod = (modrm >> 6) & 3;
4366 rm = (modrm & 7) | REX_B(s);
4367 if (mod != 3) {
4368 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4369 opreg = OR_TMP0;
4370 } else if (op == OP_XORL && rm == reg) {
4371 xor_zero:
4372 /* xor reg, reg optimisation */
4373 gen_op_movl_T0_0();
4374 s->cc_op = CC_OP_LOGICB + ot;
4375 gen_op_mov_reg_T0(ot, reg);
4376 gen_op_update1_cc();
4377 break;
4378 } else {
4379 opreg = rm;
4380 }
4381 gen_op_mov_TN_reg(ot, 1, reg);
4382 gen_op(s, op, ot, opreg);
4383 break;
4384 case 1: /* OP Gv, Ev */
4385 modrm = cpu_ldub_code(cpu_single_env, s->pc++);
4386 mod = (modrm >> 6) & 3;
4387 reg = ((modrm >> 3) & 7) | rex_r;
4388 rm = (modrm & 7) | REX_B(s);
4389 if (mod != 3) {
4390 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4391 gen_op_ld_T1_A0(ot + s->mem_index);
4392 } else if (op == OP_XORL && rm == reg) {
4393 goto xor_zero;
4394 } else {
4395 gen_op_mov_TN_reg(ot, 1, rm);
4396 }
4397 gen_op(s, op, ot, reg);
4398 break;
4399 case 2: /* OP A, Iv */
4400 val = insn_get(s, ot);
4401 gen_op_movl_T1_im(val);
4402 gen_op(s, op, ot, OR_EAX);
4403 break;
4404 }
4405 }
4406 break;
4407
4408 case 0x82:
4409 if (CODE64(s))
4410 goto illegal_op;
4411 case 0x80: /* GRP1 */
4412 case 0x81:
4413 case 0x83:
4414 {
4415 int val;
4416
4417 if ((b & 1) == 0)
4418 ot = OT_BYTE;
4419 else
4420 ot = dflag + OT_WORD;
4421
4422 modrm = cpu_ldub_code(cpu_single_env, s->pc++);
4423 mod = (modrm >> 6) & 3;
4424 rm = (modrm & 7) | REX_B(s);
4425 op = (modrm >> 3) & 7;
4426
4427 if (mod != 3) {
4428 if (b == 0x83)
4429 s->rip_offset = 1;
4430 else
4431 s->rip_offset = insn_const_size(ot);
4432 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4433 opreg = OR_TMP0;
4434 } else {
4435 opreg = rm;
4436 }
4437
4438 switch(b) {
4439 default:
4440 case 0x80:
4441 case 0x81:
4442 case 0x82:
4443 val = insn_get(s, ot);
4444 break;
4445 case 0x83:
4446 val = (int8_t)insn_get(s, OT_BYTE);
4447 break;
4448 }
4449 gen_op_movl_T1_im(val);
4450 gen_op(s, op, ot, opreg);
4451 }
4452 break;
4453
4454 /**************************/
4455 /* inc, dec, and other misc arith */
4456 case 0x40 ... 0x47: /* inc Gv */
4457 ot = dflag ? OT_LONG : OT_WORD;
4458 gen_inc(s, ot, OR_EAX + (b & 7), 1);
4459 break;
4460 case 0x48 ... 0x4f: /* dec Gv */
4461 ot = dflag ? OT_LONG : OT_WORD;
4462 gen_inc(s, ot, OR_EAX + (b & 7), -1);
4463 break;
4464 case 0xf6: /* GRP3 */
4465 case 0xf7:
4466 if ((b & 1) == 0)
4467 ot = OT_BYTE;
4468 else
4469 ot = dflag + OT_WORD;
4470
4471 modrm = cpu_ldub_code(cpu_single_env, s->pc++);
4472 mod = (modrm >> 6) & 3;
4473 rm = (modrm & 7) | REX_B(s);
4474 op = (modrm >> 3) & 7;
4475 if (mod != 3) {
4476 if (op == 0)
4477 s->rip_offset = insn_const_size(ot);
4478 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4479 gen_op_ld_T0_A0(ot + s->mem_index);
4480 } else {
4481 gen_op_mov_TN_reg(ot, 0, rm);
4482 }
4483
4484 switch(op) {
4485 case 0: /* test */
4486 val = insn_get(s, ot);
4487 gen_op_movl_T1_im(val);
4488 gen_op_testl_T0_T1_cc();
4489 s->cc_op = CC_OP_LOGICB + ot;
4490 break;
4491 case 2: /* not */
4492 tcg_gen_not_tl(cpu_T[0], cpu_T[0]);
4493 if (mod != 3) {
4494 gen_op_st_T0_A0(ot + s->mem_index);
4495 } else {
4496 gen_op_mov_reg_T0(ot, rm);
4497 }
4498 break;
4499 case 3: /* neg */
4500 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
4501 if (mod != 3) {
4502 gen_op_st_T0_A0(ot + s->mem_index);
4503 } else {
4504 gen_op_mov_reg_T0(ot, rm);
4505 }
4506 gen_op_update_neg_cc();
4507 s->cc_op = CC_OP_SUBB + ot;
4508 break;
4509 case 4: /* mul */
4510 switch(ot) {
4511 case OT_BYTE:
4512 gen_op_mov_TN_reg(OT_BYTE, 1, R_EAX);
4513 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
4514 tcg_gen_ext8u_tl(cpu_T[1], cpu_T[1]);
4515 /* XXX: use 32 bit mul which could be faster */
4516 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4517 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4518 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4519 tcg_gen_andi_tl(cpu_cc_src, cpu_T[0], 0xff00);
4520 s->cc_op = CC_OP_MULB;
4521 break;
4522 case OT_WORD:
4523 gen_op_mov_TN_reg(OT_WORD, 1, R_EAX);
4524 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
4525 tcg_gen_ext16u_tl(cpu_T[1], cpu_T[1]);
4526 /* XXX: use 32 bit mul which could be faster */
4527 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4528 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4529 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4530 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
4531 gen_op_mov_reg_T0(OT_WORD, R_EDX);
4532 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4533 s->cc_op = CC_OP_MULW;
4534 break;
4535 default:
4536 case OT_LONG:
4537 #ifdef TARGET_X86_64
4538 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4539 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
4540 tcg_gen_ext32u_tl(cpu_T[1], cpu_T[1]);
4541 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4542 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4543 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4544 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 32);
4545 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4546 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4547 #else
4548 {
4549 TCGv_i64 t0, t1;
4550 t0 = tcg_temp_new_i64();
4551 t1 = tcg_temp_new_i64();
4552 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4553 tcg_gen_extu_i32_i64(t0, cpu_T[0]);
4554 tcg_gen_extu_i32_i64(t1, cpu_T[1]);
4555 tcg_gen_mul_i64(t0, t0, t1);
4556 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4557 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4558 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4559 tcg_gen_shri_i64(t0, t0, 32);
4560 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4561 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4562 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4563 }
4564 #endif
4565 s->cc_op = CC_OP_MULL;
4566 break;
4567 #ifdef TARGET_X86_64
4568 case OT_QUAD:
4569 gen_helper_mulq_EAX_T0(cpu_env, cpu_T[0]);
4570 s->cc_op = CC_OP_MULQ;
4571 break;
4572 #endif
4573 }
4574 break;
4575 case 5: /* imul */
4576 switch(ot) {
4577 case OT_BYTE:
4578 gen_op_mov_TN_reg(OT_BYTE, 1, R_EAX);
4579 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
4580 tcg_gen_ext8s_tl(cpu_T[1], cpu_T[1]);
4581 /* XXX: use 32 bit mul which could be faster */
4582 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4583 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4584 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4585 tcg_gen_ext8s_tl(cpu_tmp0, cpu_T[0]);
4586 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4587 s->cc_op = CC_OP_MULB;
4588 break;
4589 case OT_WORD:
4590 gen_op_mov_TN_reg(OT_WORD, 1, R_EAX);
4591 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4592 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
4593 /* XXX: use 32 bit mul which could be faster */
4594 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4595 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4596 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4597 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
4598 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4599 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
4600 gen_op_mov_reg_T0(OT_WORD, R_EDX);
4601 s->cc_op = CC_OP_MULW;
4602 break;
4603 default:
4604 case OT_LONG:
4605 #ifdef TARGET_X86_64
4606 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4607 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4608 tcg_gen_ext32s_tl(cpu_T[1], cpu_T[1]);
4609 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4610 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4611 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4612 tcg_gen_ext32s_tl(cpu_tmp0, cpu_T[0]);
4613 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4614 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 32);
4615 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4616 #else
4617 {
4618 TCGv_i64 t0, t1;
4619 t0 = tcg_temp_new_i64();
4620 t1 = tcg_temp_new_i64();
4621 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4622 tcg_gen_ext_i32_i64(t0, cpu_T[0]);
4623 tcg_gen_ext_i32_i64(t1, cpu_T[1]);
4624 tcg_gen_mul_i64(t0, t0, t1);
4625 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4626 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4627 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4628 tcg_gen_sari_tl(cpu_tmp0, cpu_T[0], 31);
4629 tcg_gen_shri_i64(t0, t0, 32);
4630 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4631 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4632 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4633 }
4634 #endif
4635 s->cc_op = CC_OP_MULL;
4636 break;
4637 #ifdef TARGET_X86_64
4638 case OT_QUAD:
4639 gen_helper_imulq_EAX_T0(cpu_env, cpu_T[0]);
4640 s->cc_op = CC_OP_MULQ;
4641 break;
4642 #endif
4643 }
4644 break;
4645 case 6: /* div */
4646 switch(ot) {
4647 case OT_BYTE:
4648 gen_jmp_im(pc_start - s->cs_base);
4649 gen_helper_divb_AL(cpu_env, cpu_T[0]);
4650 break;
4651 case OT_WORD:
4652 gen_jmp_im(pc_start - s->cs_base);
4653 gen_helper_divw_AX(cpu_env, cpu_T[0]);
4654 break;
4655 default:
4656 case OT_LONG:
4657 gen_jmp_im(pc_start - s->cs_base);
4658 gen_helper_divl_EAX(cpu_env, cpu_T[0]);
4659 break;
4660 #ifdef TARGET_X86_64
4661 case OT_QUAD:
4662 gen_jmp_im(pc_start - s->cs_base);
4663 gen_helper_divq_EAX(cpu_env, cpu_T[0]);
4664 break;
4665 #endif
4666 }
4667 break;
4668 case 7: /* idiv */
4669 switch(ot) {
4670 case OT_BYTE:
4671 gen_jmp_im(pc_start - s->cs_base);
4672 gen_helper_idivb_AL(cpu_env, cpu_T[0]);
4673 break;
4674 case OT_WORD:
4675 gen_jmp_im(pc_start - s->cs_base);
4676 gen_helper_idivw_AX(cpu_env, cpu_T[0]);
4677 break;
4678 default:
4679 case OT_LONG:
4680 gen_jmp_im(pc_start - s->cs_base);
4681 gen_helper_idivl_EAX(cpu_env, cpu_T[0]);
4682 break;
4683 #ifdef TARGET_X86_64
4684 case OT_QUAD:
4685 gen_jmp_im(pc_start - s->cs_base);
4686 gen_helper_idivq_EAX(cpu_env, cpu_T[0]);
4687 break;
4688 #endif
4689 }
4690 break;
4691 default:
4692 goto illegal_op;
4693 }
4694 break;
4695
4696 case 0xfe: /* GRP4 */
4697 case 0xff: /* GRP5 */
4698 if ((b & 1) == 0)
4699 ot = OT_BYTE;
4700 else
4701 ot = dflag + OT_WORD;
4702
4703 modrm = cpu_ldub_code(cpu_single_env, s->pc++);
4704 mod = (modrm >> 6) & 3;
4705 rm = (modrm & 7) | REX_B(s);
4706 op = (modrm >> 3) & 7;
4707 if (op >= 2 && b == 0xfe) {
4708 goto illegal_op;
4709 }
4710 if (CODE64(s)) {
4711 if (op == 2 || op == 4) {
4712 /* operand size for jumps is 64 bit */
4713 ot = OT_QUAD;
4714 } else if (op == 3 || op == 5) {
4715 ot = dflag ? OT_LONG + (rex_w == 1) : OT_WORD;
4716 } else if (op == 6) {
4717 /* default push size is 64 bit */
4718 ot = dflag ? OT_QUAD : OT_WORD;
4719 }
4720 }
4721 if (mod != 3) {
4722 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4723 if (op >= 2 && op != 3 && op != 5)
4724 gen_op_ld_T0_A0(ot + s->mem_index);
4725 } else {
4726 gen_op_mov_TN_reg(ot, 0, rm);
4727 }
4728
4729 switch(op) {
4730 case 0: /* inc Ev */
4731 if (mod != 3)
4732 opreg = OR_TMP0;
4733 else
4734 opreg = rm;
4735 gen_inc(s, ot, opreg, 1);
4736 break;
4737 case 1: /* dec Ev */
4738 if (mod != 3)
4739 opreg = OR_TMP0;
4740 else
4741 opreg = rm;
4742 gen_inc(s, ot, opreg, -1);
4743 break;
4744 case 2: /* call Ev */
4745 /* XXX: optimize if memory (no 'and' is necessary) */
4746 if (s->dflag == 0)
4747 gen_op_andl_T0_ffff();
4748 next_eip = s->pc - s->cs_base;
4749 gen_movtl_T1_im(next_eip);
4750 gen_push_T1(s);
4751 gen_op_jmp_T0();
4752 gen_eob(s);
4753 break;
4754 case 3: /* lcall Ev */
4755 gen_op_ld_T1_A0(ot + s->mem_index);
4756 gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
4757 gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
4758 do_lcall:
4759 if (s->pe && !s->vm86) {
4760 if (s->cc_op != CC_OP_DYNAMIC)
4761 gen_op_set_cc_op(s->cc_op);
4762 gen_jmp_im(pc_start - s->cs_base);
4763 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4764 gen_helper_lcall_protected(cpu_env, cpu_tmp2_i32, cpu_T[1],
4765 tcg_const_i32(dflag),
4766 tcg_const_i32(s->pc - pc_start));
4767 } else {
4768 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4769 gen_helper_lcall_real(cpu_env, cpu_tmp2_i32, cpu_T[1],
4770 tcg_const_i32(dflag),
4771 tcg_const_i32(s->pc - s->cs_base));
4772 }
4773 gen_eob(s);
4774 break;
4775 case 4: /* jmp Ev */
4776 if (s->dflag == 0)
4777 gen_op_andl_T0_ffff();
4778 gen_op_jmp_T0();
4779 gen_eob(s);
4780 break;
4781 case 5: /* ljmp Ev */
4782 gen_op_ld_T1_A0(ot + s->mem_index);
4783 gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
4784 gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
4785 do_ljmp:
4786 if (s->pe && !s->vm86) {
4787 if (s->cc_op != CC_OP_DYNAMIC)
4788 gen_op_set_cc_op(s->cc_op);
4789 gen_jmp_im(pc_start - s->cs_base);
4790 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4791 gen_helper_ljmp_protected(cpu_env, cpu_tmp2_i32, cpu_T[1],
4792 tcg_const_i32(s->pc - pc_start));
4793 } else {
4794 gen_op_movl_seg_T0_vm(R_CS);
4795 gen_op_movl_T0_T1();
4796 gen_op_jmp_T0();
4797 }
4798 gen_eob(s);
4799 break;
4800 case 6: /* push Ev */
4801 gen_push_T0(s);
4802 break;
4803 default:
4804 goto illegal_op;
4805 }
4806 break;
4807
4808 case 0x84: /* test Ev, Gv */
4809 case 0x85:
4810 if ((b & 1) == 0)
4811 ot = OT_BYTE;
4812 else
4813 ot = dflag + OT_WORD;
4814
4815 modrm = cpu_ldub_code(cpu_single_env, s->pc++);
4816 reg = ((modrm >> 3) & 7) | rex_r;
4817
4818 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
4819 gen_op_mov_TN_reg(ot, 1, reg);
4820 gen_op_testl_T0_T1_cc();
4821 s->cc_op = CC_OP_LOGICB + ot;
4822 break;
4823
4824 case 0xa8: /* test eAX, Iv */
4825 case 0xa9:
4826 if ((b & 1) == 0)
4827 ot = OT_BYTE;
4828 else
4829 ot = dflag + OT_WORD;
4830 val = insn_get(s, ot);
4831
4832 gen_op_mov_TN_reg(ot, 0, OR_EAX);
4833 gen_op_movl_T1_im(val);
4834 gen_op_testl_T0_T1_cc();
4835 s->cc_op = CC_OP_LOGICB + ot;
4836 break;
4837
4838 case 0x98: /* CWDE/CBW */
4839 #ifdef TARGET_X86_64
4840 if (dflag == 2) {
4841 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
4842 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4843 gen_op_mov_reg_T0(OT_QUAD, R_EAX);
4844 } else
4845 #endif
4846 if (dflag == 1) {
4847 gen_op_mov_TN_reg(OT_WORD, 0, R_EAX);
4848 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4849 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4850 } else {
4851 gen_op_mov_TN_reg(OT_BYTE, 0, R_EAX);
4852 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
4853 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4854 }
4855 break;
4856 case 0x99: /* CDQ/CWD */
4857 #ifdef TARGET_X86_64
4858 if (dflag == 2) {
4859 gen_op_mov_TN_reg(OT_QUAD, 0, R_EAX);
4860 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 63);
4861 gen_op_mov_reg_T0(OT_QUAD, R_EDX);
4862 } else
4863 #endif
4864 if (dflag == 1) {
4865 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
4866 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4867 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 31);
4868 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4869 } else {
4870 gen_op_mov_TN_reg(OT_WORD, 0, R_EAX);
4871 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4872 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 15);
4873 gen_op_mov_reg_T0(OT_WORD, R_EDX);
4874 }
4875 break;
4876 case 0x1af: /* imul Gv, Ev */
4877 case 0x69: /* imul Gv, Ev, I */
4878 case 0x6b:
4879 ot = dflag + OT_WORD;
4880 modrm = cpu_ldub_code(cpu_single_env, s->pc++);
4881 reg = ((modrm >> 3) & 7) | rex_r;
4882 if (b == 0x69)
4883 s->rip_offset = insn_const_size(ot);
4884 else if (b == 0x6b)
4885 s->rip_offset = 1;
4886 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
4887 if (b == 0x69) {
4888 val = insn_get(s, ot);
4889 gen_op_movl_T1_im(val);
4890 } else if (b == 0x6b) {
4891 val = (int8_t)insn_get(s, OT_BYTE);
4892 gen_op_movl_T1_im(val);
4893 } else {
4894 gen_op_mov_TN_reg(ot, 1, reg);
4895 }
4896
4897 #ifdef TARGET_X86_64
4898 if (ot == OT_QUAD) {
4899 gen_helper_imulq_T0_T1(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
4900 } else
4901 #endif
4902 if (ot == OT_LONG) {
4903 #ifdef TARGET_X86_64
4904 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4905 tcg_gen_ext32s_tl(cpu_T[1], cpu_T[1]);
4906 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4907 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4908 tcg_gen_ext32s_tl(cpu_tmp0, cpu_T[0]);
4909 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4910 #else
4911 {
4912 TCGv_i64 t0, t1;
4913 t0 = tcg_temp_new_i64();
4914 t1 = tcg_temp_new_i64();
4915 tcg_gen_ext_i32_i64(t0, cpu_T[0]);
4916 tcg_gen_ext_i32_i64(t1, cpu_T[1]);
4917 tcg_gen_mul_i64(t0, t0, t1);
4918 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4919 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4920 tcg_gen_sari_tl(cpu_tmp0, cpu_T[0], 31);
4921 tcg_gen_shri_i64(t0, t0, 32);
4922 tcg_gen_trunc_i64_i32(cpu_T[1], t0);
4923 tcg_gen_sub_tl(cpu_cc_src, cpu_T[1], cpu_tmp0);
4924 }
4925 #endif
4926 } else {
4927 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4928 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
4929 /* XXX: use 32 bit mul which could be faster */
4930 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4931 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4932 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
4933 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4934 }
4935 gen_op_mov_reg_T0(ot, reg);
4936 s->cc_op = CC_OP_MULB + ot;
4937 break;
4938 case 0x1c0:
4939 case 0x1c1: /* xadd Ev, Gv */
4940 if ((b & 1) == 0)
4941 ot = OT_BYTE;
4942 else
4943 ot = dflag + OT_WORD;
4944 modrm = cpu_ldub_code(cpu_single_env, s->pc++);
4945 reg = ((modrm >> 3) & 7) | rex_r;
4946 mod = (modrm >> 6) & 3;
4947 if (mod == 3) {
4948 rm = (modrm & 7) | REX_B(s);
4949 gen_op_mov_TN_reg(ot, 0, reg);
4950 gen_op_mov_TN_reg(ot, 1, rm);
4951 gen_op_addl_T0_T1();
4952 gen_op_mov_reg_T1(ot, reg);
4953 gen_op_mov_reg_T0(ot, rm);
4954 } else {
4955 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4956 gen_op_mov_TN_reg(ot, 0, reg);
4957 gen_op_ld_T1_A0(ot + s->mem_index);
4958 gen_op_addl_T0_T1();
4959 gen_op_st_T0_A0(ot + s->mem_index);
4960 gen_op_mov_reg_T1(ot, reg);
4961 }
4962 gen_op_update2_cc();
4963 s->cc_op = CC_OP_ADDB + ot;
4964 break;
4965 case 0x1b0:
4966 case 0x1b1: /* cmpxchg Ev, Gv */
4967 {
4968 int label1, label2;
4969 TCGv t0, t1, t2, a0;
4970
4971 if ((b & 1) == 0)
4972 ot = OT_BYTE;
4973 else
4974 ot = dflag + OT_WORD;
4975 modrm = cpu_ldub_code(cpu_single_env, s->pc++);
4976 reg = ((modrm >> 3) & 7) | rex_r;
4977 mod = (modrm >> 6) & 3;
4978 t0 = tcg_temp_local_new();
4979 t1 = tcg_temp_local_new();
4980 t2 = tcg_temp_local_new();
4981 a0 = tcg_temp_local_new();
4982 gen_op_mov_v_reg(ot, t1, reg);
4983 if (mod == 3) {
4984 rm = (modrm & 7) | REX_B(s);
4985 gen_op_mov_v_reg(ot, t0, rm);
4986 } else {
4987 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4988 tcg_gen_mov_tl(a0, cpu_A0);
4989 gen_op_ld_v(ot + s->mem_index, t0, a0);
4990 rm = 0; /* avoid warning */
4991 }
4992 label1 = gen_new_label();
4993 tcg_gen_sub_tl(t2, cpu_regs[R_EAX], t0);
4994 gen_extu(ot, t2);
4995 tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label1);
4996 label2 = gen_new_label();
4997 if (mod == 3) {
4998 gen_op_mov_reg_v(ot, R_EAX, t0);
4999 tcg_gen_br(label2);
5000 gen_set_label(label1);
5001 gen_op_mov_reg_v(ot, rm, t1);
5002 } else {
5003 /* perform no-op store cycle like physical cpu; must be
5004 before changing accumulator to ensure idempotency if
5005 the store faults and the instruction is restarted */
5006 gen_op_st_v(ot + s->mem_index, t0, a0);
5007 gen_op_mov_reg_v(ot, R_EAX, t0);
5008 tcg_gen_br(label2);
5009 gen_set_label(label1);
5010 gen_op_st_v(ot + s->mem_index, t1, a0);
5011 }
5012 gen_set_label(label2);
5013 tcg_gen_mov_tl(cpu_cc_src, t0);
5014 tcg_gen_mov_tl(cpu_cc_dst, t2);
5015 s->cc_op = CC_OP_SUBB + ot;
5016 tcg_temp_free(t0);
5017 tcg_temp_free(t1);
5018 tcg_temp_free(t2);
5019 tcg_temp_free(a0);
5020 }
5021 break;
5022 case 0x1c7: /* cmpxchg8b */
5023 modrm = cpu_ldub_code(cpu_single_env, s->pc++);
5024 mod = (modrm >> 6) & 3;
5025 if ((mod == 3) || ((modrm & 0x38) != 0x8))
5026 goto illegal_op;
5027 #ifdef TARGET_X86_64
5028 if (dflag == 2) {
5029 if (!(s->cpuid_ext_features & CPUID_EXT_CX16))
5030 goto illegal_op;
5031 gen_jmp_im(pc_start - s->cs_base);
5032 if (s->cc_op != CC_OP_DYNAMIC)
5033 gen_op_set_cc_op(s->cc_op);
5034 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5035 gen_helper_cmpxchg16b(cpu_env, cpu_A0);
5036 } else
5037 #endif
5038 {
5039 if (!(s->cpuid_features & CPUID_CX8))
5040 goto illegal_op;
5041 gen_jmp_im(pc_start - s->cs_base);
5042 if (s->cc_op != CC_OP_DYNAMIC)
5043 gen_op_set_cc_op(s->cc_op);
5044 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5045 gen_helper_cmpxchg8b(cpu_env, cpu_A0);
5046 }
5047 s->cc_op = CC_OP_EFLAGS;
5048 break;
5049
5050 /**************************/
5051 /* push/pop */
5052 case 0x50 ... 0x57: /* push */
5053 gen_op_mov_TN_reg(OT_LONG, 0, (b & 7) | REX_B(s));
5054 gen_push_T0(s);
5055 break;
5056 case 0x58 ... 0x5f: /* pop */
5057 if (CODE64(s)) {
5058 ot = dflag ? OT_QUAD : OT_WORD;
5059 } else {
5060 ot = dflag + OT_WORD;
5061 }
5062 gen_pop_T0(s);
5063 /* NOTE: order is important for pop %sp */
5064 gen_pop_update(s);
5065 gen_op_mov_reg_T0(ot, (b & 7) | REX_B(s));
5066 break;
5067 case 0x60: /* pusha */
5068 if (CODE64(s))
5069 goto illegal_op;
5070 gen_pusha(s);
5071 break;
5072 case 0x61: /* popa */
5073 if (CODE64(s))
5074 goto illegal_op;
5075 gen_popa(s);
5076 break;
5077 case 0x68: /* push Iv */
5078 case 0x6a:
5079 if (CODE64(s)) {
5080 ot = dflag ? OT_QUAD : OT_WORD;
5081 } else {
5082 ot = dflag + OT_WORD;
5083 }
5084 if (b == 0x68)
5085 val = insn_get(s, ot);
5086 else
5087 val = (int8_t)insn_get(s, OT_BYTE);
5088 gen_op_movl_T0_im(val);
5089 gen_push_T0(s);
5090 break;
5091 case 0x8f: /* pop Ev */
5092 if (CODE64(s)) {
5093 ot = dflag ? OT_QUAD : OT_WORD;
5094 } else {
5095 ot = dflag + OT_WORD;
5096 }
5097 modrm = cpu_ldub_code(cpu_single_env, s->pc++);
5098 mod = (modrm >> 6) & 3;
5099 gen_pop_T0(s);
5100 if (mod == 3) {
5101 /* NOTE: order is important for pop %sp */
5102 gen_pop_update(s);
5103 rm = (modrm & 7) | REX_B(s);
5104 gen_op_mov_reg_T0(ot, rm);
5105 } else {
5106 /* NOTE: order is important too for MMU exceptions */
5107 s->popl_esp_hack = 1 << ot;
5108 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1);
5109 s->popl_esp_hack = 0;
5110 gen_pop_update(s);
5111 }
5112 break;
5113 case 0xc8: /* enter */
5114 {
5115 int level;
5116 val = cpu_lduw_code(cpu_single_env, s->pc);
5117 s->pc += 2;
5118 level = cpu_ldub_code(cpu_single_env, s->pc++);
5119 gen_enter(s, val, level);
5120 }
5121 break;
5122 case 0xc9: /* leave */
5123 /* XXX: exception not precise (ESP is updated before potential exception) */
5124 if (CODE64(s)) {
5125 gen_op_mov_TN_reg(OT_QUAD, 0, R_EBP);
5126 gen_op_mov_reg_T0(OT_QUAD, R_ESP);
5127 } else if (s->ss32) {
5128 gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
5129 gen_op_mov_reg_T0(OT_LONG, R_ESP);
5130 } else {
5131 gen_op_mov_TN_reg(OT_WORD, 0, R_EBP);
5132 gen_op_mov_reg_T0(OT_WORD, R_ESP);
5133 }
5134 gen_pop_T0(s);
5135 if (CODE64(s)) {
5136 ot = dflag ? OT_QUAD : OT_WORD;
5137 } else {
5138 ot = dflag + OT_WORD;
5139 }
5140 gen_op_mov_reg_T0(ot, R_EBP);
5141 gen_pop_update(s);
5142 break;
5143 case 0x06: /* push es */
5144 case 0x0e: /* push cs */
5145 case 0x16: /* push ss */
5146 case 0x1e: /* push ds */
5147 if (CODE64(s))
5148 goto illegal_op;
5149 gen_op_movl_T0_seg(b >> 3);
5150 gen_push_T0(s);
5151 break;
5152 case 0x1a0: /* push fs */
5153 case 0x1a8: /* push gs */
5154 gen_op_movl_T0_seg((b >> 3) & 7);
5155 gen_push_T0(s);
5156 break;
5157 case 0x07: /* pop es */
5158 case 0x17: /* pop ss */
5159 case 0x1f: /* pop ds */
5160 if (CODE64(s))
5161 goto illegal_op;
5162 reg = b >> 3;
5163 gen_pop_T0(s);
5164 gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
5165 gen_pop_update(s);
5166 if (reg == R_SS) {
5167 /* if reg == SS, inhibit interrupts/trace. */
5168 /* If several instructions disable interrupts, only the
5169 _first_ does it */
5170 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
5171 gen_helper_set_inhibit_irq(cpu_env);
5172 s->tf = 0;
5173 }
5174 if (s->is_jmp) {
5175 gen_jmp_im(s->pc - s->cs_base);
5176 gen_eob(s);
5177 }
5178 break;
5179 case 0x1a1: /* pop fs */
5180 case 0x1a9: /* pop gs */
5181 gen_pop_T0(s);
5182 gen_movl_seg_T0(s, (b >> 3) & 7, pc_start - s->cs_base);
5183 gen_pop_update(s);
5184 if (s->is_jmp) {
5185 gen_jmp_im(s->pc - s->cs_base);
5186 gen_eob(s);
5187 }
5188 break;
5189
5190 /**************************/
5191 /* mov */
5192 case 0x88:
5193 case 0x89: /* mov Gv, Ev */
5194 if ((b & 1) == 0)
5195 ot = OT_BYTE;
5196 else
5197 ot = dflag + OT_WORD;
5198 modrm = cpu_ldub_code(cpu_single_env, s->pc++);
5199 reg = ((modrm >> 3) & 7) | rex_r;
5200
5201 /* generate a generic store */
5202 gen_ldst_modrm(s, modrm, ot, reg, 1);
5203 break;
5204 case 0xc6:
5205 case 0xc7: /* mov Ev, Iv */
5206 if ((b & 1) == 0)
5207 ot = OT_BYTE;
5208 else
5209 ot = dflag + OT_WORD;
5210 modrm = cpu_ldub_code(cpu_single_env, s->pc++);
5211 mod = (modrm >> 6) & 3;
5212 if (mod != 3) {
5213 s->rip_offset = insn_const_size(ot);
5214 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5215 }
5216 val = insn_get(s, ot);
5217 gen_op_movl_T0_im(val);
5218 if (mod != 3)
5219 gen_op_st_T0_A0(ot + s->mem_index);
5220 else
5221 gen_op_mov_reg_T0(ot, (modrm & 7) | REX_B(s));
5222 break;
5223 case 0x8a:
5224 case 0x8b: /* mov Ev, Gv */
5225 if ((b & 1) == 0)
5226 ot = OT_BYTE;
5227 else
5228 ot = OT_WORD + dflag;
5229 modrm = cpu_ldub_code(cpu_single_env, s->pc++);
5230 reg = ((modrm >> 3) & 7) | rex_r;
5231
5232 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
5233 gen_op_mov_reg_T0(ot, reg);
5234 break;
5235 case 0x8e: /* mov seg, Gv */
5236 modrm = cpu_ldub_code(cpu_single_env, s->pc++);
5237 reg = (modrm >> 3) & 7;
5238 if (reg >= 6 || reg == R_CS)
5239 goto illegal_op;
5240 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
5241 gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
5242 if (reg == R_SS) {
5243 /* if reg == SS, inhibit interrupts/trace */
5244 /* If several instructions disable interrupts, only the
5245 _first_ does it */
5246 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
5247 gen_helper_set_inhibit_irq(cpu_env);
5248 s->tf = 0;
5249 }
5250 if (s->is_jmp) {
5251 gen_jmp_im(s->pc - s->cs_base);
5252 gen_eob(s);
5253 }
5254 break;
5255 case 0x8c: /* mov Gv, seg */
5256 modrm = cpu_ldub_code(cpu_single_env, s->pc++);
5257 reg = (modrm >> 3) & 7;
5258 mod = (modrm >> 6) & 3;
5259 if (reg >= 6)
5260 goto illegal_op;
5261 gen_op_movl_T0_seg(reg);
5262 if (mod == 3)
5263 ot = OT_WORD + dflag;
5264 else
5265 ot = OT_WORD;
5266 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1);
5267 break;
5268
5269 case 0x1b6: /* movzbS Gv, Eb */
5270 case 0x1b7: /* movzwS Gv, Eb */
5271 case 0x1be: /* movsbS Gv, Eb */
5272 case 0x1bf: /* movswS Gv, Eb */
5273 {
5274 int d_ot;
5275 /* d_ot is the size of destination */
5276 d_ot = dflag + OT_WORD;
5277 /* ot is the size of source */
5278 ot = (b & 1) + OT_BYTE;
5279 modrm = cpu_ldub_code(cpu_single_env, s->pc++);
5280 reg = ((modrm >> 3) & 7) | rex_r;
5281 mod = (modrm >> 6) & 3;
5282 rm = (modrm & 7) | REX_B(s);
5283
5284 if (mod == 3) {
5285 gen_op_mov_TN_reg(ot, 0, rm);
5286 switch(ot | (b & 8)) {
5287 case OT_BYTE:
5288 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
5289 break;
5290 case OT_BYTE | 8:
5291 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
5292 break;
5293 case OT_WORD:
5294 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
5295 break;
5296 default:
5297 case OT_WORD | 8:
5298 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5299 break;
5300 }
5301 gen_op_mov_reg_T0(d_ot, reg);
5302 } else {
5303 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5304 if (b & 8) {
5305 gen_op_lds_T0_A0(ot + s->mem_index);
5306 } else {
5307 gen_op_ldu_T0_A0(ot + s->mem_index);
5308 }
5309 gen_op_mov_reg_T0(d_ot, reg);
5310 }
5311 }
5312 break;
5313
5314 case 0x8d: /* lea */
5315 ot = dflag + OT_WORD;
5316 modrm = cpu_ldub_code(cpu_single_env, s->pc++);
5317 mod = (modrm >> 6) & 3;
5318 if (mod == 3)
5319 goto illegal_op;
5320 reg = ((modrm >> 3) & 7) | rex_r;
5321 /* we must ensure that no segment is added */
5322 s->override = -1;
5323 val = s->addseg;
5324 s->addseg = 0;
5325 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5326 s->addseg = val;
5327 gen_op_mov_reg_A0(ot - OT_WORD, reg);
5328 break;
5329
5330 case 0xa0: /* mov EAX, Ov */
5331 case 0xa1:
5332 case 0xa2: /* mov Ov, EAX */
5333 case 0xa3:
5334 {
5335 target_ulong offset_addr;
5336
5337 if ((b & 1) == 0)
5338 ot = OT_BYTE;
5339 else
5340 ot = dflag + OT_WORD;
5341 #ifdef TARGET_X86_64
5342 if (s->aflag == 2) {
5343 offset_addr = cpu_ldq_code(cpu_single_env, s->pc);
5344 s->pc += 8;
5345 gen_op_movq_A0_im(offset_addr);
5346 } else
5347 #endif
5348 {
5349 if (s->aflag) {
5350 offset_addr = insn_get(s, OT_LONG);
5351 } else {
5352 offset_addr = insn_get(s, OT_WORD);
5353 }
5354 gen_op_movl_A0_im(offset_addr);
5355 }
5356 gen_add_A0_ds_seg(s);
5357 if ((b & 2) == 0) {
5358 gen_op_ld_T0_A0(ot + s->mem_index);
5359 gen_op_mov_reg_T0(ot, R_EAX);
5360 } else {
5361 gen_op_mov_TN_reg(ot, 0, R_EAX);
5362 gen_op_st_T0_A0(ot + s->mem_index);
5363 }
5364 }
5365 break;
5366 case 0xd7: /* xlat */
5367 #ifdef TARGET_X86_64
5368 if (s->aflag == 2) {
5369 gen_op_movq_A0_reg(R_EBX);
5370 gen_op_mov_TN_reg(OT_QUAD, 0, R_EAX);
5371 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xff);
5372 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]);
5373 } else
5374 #endif
5375 {
5376 gen_op_movl_A0_reg(R_EBX);
5377 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
5378 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xff);
5379 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]);
5380 if (s->aflag == 0)
5381 gen_op_andl_A0_ffff();
5382 else
5383 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
5384 }
5385 gen_add_A0_ds_seg(s);
5386 gen_op_ldu_T0_A0(OT_BYTE + s->mem_index);
5387 gen_op_mov_reg_T0(OT_BYTE, R_EAX);
5388 break;
5389 case 0xb0 ... 0xb7: /* mov R, Ib */
5390 val = insn_get(s, OT_BYTE);
5391 gen_op_movl_T0_im(val);
5392 gen_op_mov_reg_T0(OT_BYTE, (b & 7) | REX_B(s));
5393 break;
5394 case 0xb8 ... 0xbf: /* mov R, Iv */
5395 #ifdef TARGET_X86_64
5396 if (dflag == 2) {
5397 uint64_t tmp;
5398 /* 64 bit case */
5399 tmp = cpu_ldq_code(cpu_single_env, s->pc);
5400 s->pc += 8;
5401 reg = (b & 7) | REX_B(s);
5402 gen_movtl_T0_im(tmp);
5403 gen_op_mov_reg_T0(OT_QUAD, reg);
5404 } else
5405 #endif
5406 {
5407 ot = dflag ? OT_LONG : OT_WORD;
5408 val = insn_get(s, ot);
5409 reg = (b & 7) | REX_B(s);
5410 gen_op_movl_T0_im(val);
5411 gen_op_mov_reg_T0(ot, reg);
5412 }
5413 break;
5414
5415 case 0x91 ... 0x97: /* xchg R, EAX */
5416 do_xchg_reg_eax:
5417 ot = dflag + OT_WORD;
5418 reg = (b & 7) | REX_B(s);
5419 rm = R_EAX;
5420 goto do_xchg_reg;
5421 case 0x86:
5422 case 0x87: /* xchg Ev, Gv */
5423 if ((b & 1) == 0)
5424 ot = OT_BYTE;
5425 else
5426 ot = dflag + OT_WORD;
5427 modrm = cpu_ldub_code(cpu_single_env, s->pc++);
5428 reg = ((modrm >> 3) & 7) | rex_r;
5429 mod = (modrm >> 6) & 3;
5430 if (mod == 3) {
5431 rm = (modrm & 7) | REX_B(s);
5432 do_xchg_reg:
5433 gen_op_mov_TN_reg(ot, 0, reg);
5434 gen_op_mov_TN_reg(ot, 1, rm);
5435 gen_op_mov_reg_T0(ot, rm);
5436 gen_op_mov_reg_T1(ot, reg);
5437 } else {
5438 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5439 gen_op_mov_TN_reg(ot, 0, reg);
5440 /* for xchg, lock is implicit */
5441 if (!(prefixes & PREFIX_LOCK))
5442 gen_helper_lock();
5443 gen_op_ld_T1_A0(ot + s->mem_index);
5444 gen_op_st_T0_A0(ot + s->mem_index);
5445 if (!(prefixes & PREFIX_LOCK))
5446 gen_helper_unlock();
5447 gen_op_mov_reg_T1(ot, reg);
5448 }
5449 break;
5450 case 0xc4: /* les Gv */
5451 if (CODE64(s))
5452 goto illegal_op;
5453 op = R_ES;
5454 goto do_lxx;
5455 case 0xc5: /* lds Gv */
5456 if (CODE64(s))
5457 goto illegal_op;
5458 op = R_DS;
5459 goto do_lxx;
5460 case 0x1b2: /* lss Gv */
5461 op = R_SS;
5462 goto do_lxx;
5463 case 0x1b4: /* lfs Gv */
5464 op = R_FS;
5465 goto do_lxx;
5466 case 0x1b5: /* lgs Gv */
5467 op = R_GS;
5468 do_lxx:
5469 ot = dflag ? OT_LONG : OT_WORD;
5470 modrm = cpu_ldub_code(cpu_single_env, s->pc++);
5471 reg = ((modrm >> 3) & 7) | rex_r;
5472 mod = (modrm >> 6) & 3;
5473 if (mod == 3)
5474 goto illegal_op;
5475 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5476 gen_op_ld_T1_A0(ot + s->mem_index);
5477 gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
5478 /* load the segment first to handle exceptions properly */
5479 gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
5480 gen_movl_seg_T0(s, op, pc_start - s->cs_base);
5481 /* then put the data */
5482 gen_op_mov_reg_T1(ot, reg);
5483 if (s->is_jmp) {
5484 gen_jmp_im(s->pc - s->cs_base);
5485 gen_eob(s);
5486 }
5487 break;
5488
5489 /************************/
5490 /* shifts */
5491 case 0xc0:
5492 case 0xc1:
5493 /* shift Ev,Ib */
5494 shift = 2;
5495 grp2:
5496 {
5497 if ((b & 1) == 0)
5498 ot = OT_BYTE;
5499 else
5500 ot = dflag + OT_WORD;
5501
5502 modrm = cpu_ldub_code(cpu_single_env, s->pc++);
5503 mod = (modrm >> 6) & 3;
5504 op = (modrm >> 3) & 7;
5505
5506 if (mod != 3) {
5507 if (shift == 2) {
5508 s->rip_offset = 1;
5509 }
5510 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5511 opreg = OR_TMP0;
5512 } else {
5513 opreg = (modrm & 7) | REX_B(s);
5514 }
5515
5516 /* simpler op */
5517 if (shift == 0) {
5518 gen_shift(s, op, ot, opreg, OR_ECX);
5519 } else {
5520 if (shift == 2) {
5521 shift = cpu_ldub_code(cpu_single_env, s->pc++);
5522 }
5523 gen_shifti(s, op, ot, opreg, shift);
5524 }
5525 }
5526 break;
5527 case 0xd0:
5528 case 0xd1:
5529 /* shift Ev,1 */
5530 shift = 1;
5531 goto grp2;
5532 case 0xd2:
5533 case 0xd3:
5534 /* shift Ev,cl */
5535 shift = 0;
5536 goto grp2;
5537
5538 case 0x1a4: /* shld imm */
5539 op = 0;
5540 shift = 1;
5541 goto do_shiftd;
5542 case 0x1a5: /* shld cl */
5543 op = 0;
5544 shift = 0;
5545 goto do_shiftd;
5546 case 0x1ac: /* shrd imm */
5547 op = 1;
5548 shift = 1;
5549 goto do_shiftd;
5550 case 0x1ad: /* shrd cl */
5551 op = 1;
5552 shift = 0;
5553 do_shiftd:
5554 ot = dflag + OT_WORD;
5555 modrm = cpu_ldub_code(cpu_single_env, s->pc++);
5556 mod = (modrm >> 6) & 3;
5557 rm = (modrm & 7) | REX_B(s);
5558 reg = ((modrm >> 3) & 7) | rex_r;
5559 if (mod != 3) {
5560 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5561 opreg = OR_TMP0;
5562 } else {
5563 opreg = rm;
5564 }
5565 gen_op_mov_TN_reg(ot, 1, reg);
5566
5567 if (shift) {
5568 val = cpu_ldub_code(cpu_single_env, s->pc++);
5569 tcg_gen_movi_tl(cpu_T3, val);
5570 } else {
5571 tcg_gen_mov_tl(cpu_T3, cpu_regs[R_ECX]);
5572 }
5573 gen_shiftd_rm_T1_T3(s, ot, opreg, op);
5574 break;
5575
5576 /************************/
5577 /* floats */
5578 case 0xd8 ... 0xdf:
5579 if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
5580 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
5581 /* XXX: what to do if illegal op ? */
5582 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
5583 break;
5584 }
5585 modrm = cpu_ldub_code(cpu_single_env, s->pc++);
5586 mod = (modrm >> 6) & 3;
5587 rm = modrm & 7;
5588 op = ((b & 7) << 3) | ((modrm >> 3) & 7);
5589 if (mod != 3) {
5590 /* memory op */
5591 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5592 switch(op) {
5593 case 0x00 ... 0x07: /* fxxxs */
5594 case 0x10 ... 0x17: /* fixxxl */
5595 case 0x20 ... 0x27: /* fxxxl */
5596 case 0x30 ... 0x37: /* fixxx */
5597 {
5598 int op1;
5599 op1 = op & 7;
5600
5601 switch(op >> 4) {
5602 case 0:
5603 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5604 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5605 gen_helper_flds_FT0(cpu_env, cpu_tmp2_i32);
5606 break;
5607 case 1:
5608 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5609 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5610 gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
5611 break;
5612 case 2:
5613 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
5614 (s->mem_index >> 2) - 1);
5615 gen_helper_fldl_FT0(cpu_env, cpu_tmp1_i64);
5616 break;
5617 case 3:
5618 default:
5619 gen_op_lds_T0_A0(OT_WORD + s->mem_index);
5620 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5621 gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
5622 break;
5623 }
5624
5625 gen_helper_fp_arith_ST0_FT0(op1);
5626 if (op1 == 3) {
5627 /* fcomp needs pop */
5628 gen_helper_fpop(cpu_env);
5629 }
5630 }
5631 break;
5632 case 0x08: /* flds */
5633 case 0x0a: /* fsts */
5634 case 0x0b: /* fstps */
5635 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
5636 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
5637 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
5638 switch(op & 7) {
5639 case 0:
5640 switch(op >> 4) {
5641 case 0:
5642 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5643 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5644 gen_helper_flds_ST0(cpu_env, cpu_tmp2_i32);
5645 break;
5646 case 1:
5647 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5648 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5649 gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
5650 break;
5651 case 2:
5652 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
5653 (s->mem_index >> 2) - 1);
5654 gen_helper_fldl_ST0(cpu_env, cpu_tmp1_i64);
5655 break;
5656 case 3:
5657 default:
5658 gen_op_lds_T0_A0(OT_WORD + s->mem_index);
5659 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5660 gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
5661 break;
5662 }
5663 break;
5664 case 1:
5665 /* XXX: the corresponding CPUID bit must be tested ! */
5666 switch(op >> 4) {
5667 case 1:
5668 gen_helper_fisttl_ST0(cpu_tmp2_i32, cpu_env);
5669 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5670 gen_op_st_T0_A0(OT_LONG + s->mem_index);
5671 break;
5672 case 2:
5673 gen_helper_fisttll_ST0(cpu_tmp1_i64, cpu_env);
5674 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
5675 (s->mem_index >> 2) - 1);
5676 break;
5677 case 3:
5678 default:
5679 gen_helper_fistt_ST0(cpu_tmp2_i32, cpu_env);
5680 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5681 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5682 break;
5683 }
5684 gen_helper_fpop(cpu_env);
5685 break;
5686 default:
5687 switch(op >> 4) {
5688 case 0:
5689 gen_helper_fsts_ST0(cpu_tmp2_i32, cpu_env);
5690 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5691 gen_op_st_T0_A0(OT_LONG + s->mem_index);
5692 break;
5693 case 1:
5694 gen_helper_fistl_ST0(cpu_tmp2_i32, cpu_env);
5695 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5696 gen_op_st_T0_A0(OT_LONG + s->mem_index);
5697 break;
5698 case 2:
5699 gen_helper_fstl_ST0(cpu_tmp1_i64, cpu_env);
5700 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
5701 (s->mem_index >> 2) - 1);
5702 break;
5703 case 3:
5704 default:
5705 gen_helper_fist_ST0(cpu_tmp2_i32, cpu_env);
5706 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5707 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5708 break;
5709 }
5710 if ((op & 7) == 3)
5711 gen_helper_fpop(cpu_env);
5712 break;
5713 }
5714 break;
5715 case 0x0c: /* fldenv mem */
5716 if (s->cc_op != CC_OP_DYNAMIC)
5717 gen_op_set_cc_op(s->cc_op);
5718 gen_jmp_im(pc_start - s->cs_base);
5719 gen_helper_fldenv(cpu_env, cpu_A0, tcg_const_i32(s->dflag));
5720 break;
5721 case 0x0d: /* fldcw mem */
5722 gen_op_ld_T0_A0(OT_WORD + s->mem_index);
5723 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5724 gen_helper_fldcw(cpu_env, cpu_tmp2_i32);
5725 break;
5726 case 0x0e: /* fnstenv mem */
5727 if (s->cc_op != CC_OP_DYNAMIC)
5728 gen_op_set_cc_op(s->cc_op);
5729 gen_jmp_im(pc_start - s->cs_base);
5730 gen_helper_fstenv(cpu_env, cpu_A0, tcg_const_i32(s->dflag));
5731 break;
5732 case 0x0f: /* fnstcw mem */
5733 gen_helper_fnstcw(cpu_tmp2_i32, cpu_env);
5734 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5735 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5736 break;
5737 case 0x1d: /* fldt mem */
5738 if (s->cc_op != CC_OP_DYNAMIC)
5739 gen_op_set_cc_op(s->cc_op);
5740 gen_jmp_im(pc_start - s->cs_base);
5741 gen_helper_fldt_ST0(cpu_env, cpu_A0);
5742 break;
5743 case 0x1f: /* fstpt mem */
5744 if (s->cc_op != CC_OP_DYNAMIC)
5745 gen_op_set_cc_op(s->cc_op);
5746 gen_jmp_im(pc_start - s->cs_base);
5747 gen_helper_fstt_ST0(cpu_env, cpu_A0);
5748 gen_helper_fpop(cpu_env);
5749 break;
5750 case 0x2c: /* frstor mem */
5751 if (s->cc_op != CC_OP_DYNAMIC)
5752 gen_op_set_cc_op(s->cc_op);
5753 gen_jmp_im(pc_start - s->cs_base);
5754 gen_helper_frstor(cpu_env, cpu_A0, tcg_const_i32(s->dflag));
5755 break;
5756 case 0x2e: /* fnsave mem */
5757 if (s->cc_op != CC_OP_DYNAMIC)
5758 gen_op_set_cc_op(s->cc_op);
5759 gen_jmp_im(pc_start - s->cs_base);
5760 gen_helper_fsave(cpu_env, cpu_A0, tcg_const_i32(s->dflag));
5761 break;
5762 case 0x2f: /* fnstsw mem */
5763 gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
5764 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5765 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5766 break;
5767 case 0x3c: /* fbld */
5768 if (s->cc_op != CC_OP_DYNAMIC)
5769 gen_op_set_cc_op(s->cc_op);
5770 gen_jmp_im(pc_start - s->cs_base);
5771 gen_helper_fbld_ST0(cpu_env, cpu_A0);
5772 break;
5773 case 0x3e: /* fbstp */
5774 if (s->cc_op != CC_OP_DYNAMIC)
5775 gen_op_set_cc_op(s->cc_op);
5776 gen_jmp_im(pc_start - s->cs_base);
5777 gen_helper_fbst_ST0(cpu_env, cpu_A0);
5778 gen_helper_fpop(cpu_env);
5779 break;
5780 case 0x3d: /* fildll */
5781 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
5782 (s->mem_index >> 2) - 1);
5783 gen_helper_fildll_ST0(cpu_env, cpu_tmp1_i64);
5784 break;
5785 case 0x3f: /* fistpll */
5786 gen_helper_fistll_ST0(cpu_tmp1_i64, cpu_env);
5787 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
5788 (s->mem_index >> 2) - 1);
5789 gen_helper_fpop(cpu_env);
5790 break;
5791 default:
5792 goto illegal_op;
5793 }
5794 } else {
5795 /* register float ops */
5796 opreg = rm;
5797
5798 switch(op) {
5799 case 0x08: /* fld sti */
5800 gen_helper_fpush(cpu_env);
5801 gen_helper_fmov_ST0_STN(cpu_env,
5802 tcg_const_i32((opreg + 1) & 7));
5803 break;
5804 case 0x09: /* fxchg sti */
5805 case 0x29: /* fxchg4 sti, undocumented op */
5806 case 0x39: /* fxchg7 sti, undocumented op */
5807 gen_helper_fxchg_ST0_STN(cpu_env, tcg_const_i32(opreg));
5808 break;
5809 case 0x0a: /* grp d9/2 */
5810 switch(rm) {
5811 case 0: /* fnop */
5812 /* check exceptions (FreeBSD FPU probe) */
5813 if (s->cc_op != CC_OP_DYNAMIC)
5814 gen_op_set_cc_op(s->cc_op);
5815 gen_jmp_im(pc_start - s->cs_base);
5816 gen_helper_fwait(cpu_env);
5817 break;
5818 default:
5819 goto illegal_op;
5820 }
5821 break;
5822 case 0x0c: /* grp d9/4 */
5823 switch(rm) {
5824 case 0: /* fchs */
5825 gen_helper_fchs_ST0(cpu_env);
5826 break;
5827 case 1: /* fabs */
5828 gen_helper_fabs_ST0(cpu_env);
5829 break;
5830 case 4: /* ftst */
5831 gen_helper_fldz_FT0(cpu_env);
5832 gen_helper_fcom_ST0_FT0(cpu_env);
5833 break;
5834 case 5: /* fxam */
5835 gen_helper_fxam_ST0(cpu_env);
5836 break;
5837 default:
5838 goto illegal_op;
5839 }
5840 break;
5841 case 0x0d: /* grp d9/5 */
5842 {
5843 switch(rm) {
5844 case 0:
5845 gen_helper_fpush(cpu_env);
5846 gen_helper_fld1_ST0(cpu_env);
5847 break;
5848 case 1:
5849 gen_helper_fpush(cpu_env);
5850 gen_helper_fldl2t_ST0(cpu_env);
5851 break;
5852 case 2:
5853 gen_helper_fpush(cpu_env);
5854 gen_helper_fldl2e_ST0(cpu_env);
5855 break;
5856 case 3:
5857 gen_helper_fpush(cpu_env);
5858 gen_helper_fldpi_ST0(cpu_env);
5859 break;
5860 case 4:
5861 gen_helper_fpush(cpu_env);
5862 gen_helper_fldlg2_ST0(cpu_env);
5863 break;
5864 case 5:
5865 gen_helper_fpush(cpu_env);
5866 gen_helper_fldln2_ST0(cpu_env);
5867 break;
5868 case 6:
5869 gen_helper_fpush(cpu_env);
5870 gen_helper_fldz_ST0(cpu_env);
5871 break;
5872 default:
5873 goto illegal_op;
5874 }
5875 }
5876 break;
5877 case 0x0e: /* grp d9/6 */
5878 switch(rm) {
5879 case 0: /* f2xm1 */
5880 gen_helper_f2xm1(cpu_env);
5881 break;
5882 case 1: /* fyl2x */
5883 gen_helper_fyl2x(cpu_env);
5884 break;
5885 case 2: /* fptan */
5886 gen_helper_fptan(cpu_env);
5887 break;
5888 case 3: /* fpatan */
5889 gen_helper_fpatan(cpu_env);
5890 break;
5891 case 4: /* fxtract */
5892 gen_helper_fxtract(cpu_env);
5893 break;
5894 case 5: /* fprem1 */
5895 gen_helper_fprem1(cpu_env);
5896 break;
5897 case 6: /* fdecstp */
5898 gen_helper_fdecstp(cpu_env);
5899 break;
5900 default:
5901 case 7: /* fincstp */
5902 gen_helper_fincstp(cpu_env);
5903 break;
5904 }
5905 break;
5906 case 0x0f: /* grp d9/7 */
5907 switch(rm) {
5908 case 0: /* fprem */
5909 gen_helper_fprem(cpu_env);
5910 break;
5911 case 1: /* fyl2xp1 */
5912 gen_helper_fyl2xp1(cpu_env);
5913 break;
5914 case 2: /* fsqrt */
5915 gen_helper_fsqrt(cpu_env);
5916 break;
5917 case 3: /* fsincos */
5918 gen_helper_fsincos(cpu_env);
5919 break;
5920 case 5: /* fscale */
5921 gen_helper_fscale(cpu_env);
5922 break;
5923 case 4: /* frndint */
5924 gen_helper_frndint(cpu_env);
5925 break;
5926 case 6: /* fsin */
5927 gen_helper_fsin(cpu_env);
5928 break;
5929 default:
5930 case 7: /* fcos */
5931 gen_helper_fcos(cpu_env);
5932 break;
5933 }
5934 break;
5935 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
5936 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
5937 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
5938 {
5939 int op1;
5940
5941 op1 = op & 7;
5942 if (op >= 0x20) {
5943 gen_helper_fp_arith_STN_ST0(op1, opreg);
5944 if (op >= 0x30)
5945 gen_helper_fpop(cpu_env);
5946 } else {
5947 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
5948 gen_helper_fp_arith_ST0_FT0(op1);
5949 }
5950 }
5951 break;
5952 case 0x02: /* fcom */
5953 case 0x22: /* fcom2, undocumented op */
5954 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
5955 gen_helper_fcom_ST0_FT0(cpu_env);
5956 break;
5957 case 0x03: /* fcomp */
5958 case 0x23: /* fcomp3, undocumented op */
5959 case 0x32: /* fcomp5, undocumented op */
5960 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
5961 gen_helper_fcom_ST0_FT0(cpu_env);
5962 gen_helper_fpop(cpu_env);
5963 break;
5964 case 0x15: /* da/5 */
5965 switch(rm) {
5966 case 1: /* fucompp */
5967 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
5968 gen_helper_fucom_ST0_FT0(cpu_env);
5969 gen_helper_fpop(cpu_env);
5970 gen_helper_fpop(cpu_env);
5971 break;
5972 default:
5973 goto illegal_op;
5974 }
5975 break;
5976 case 0x1c:
5977 switch(rm) {
5978 case 0: /* feni (287 only, just do nop here) */
5979 break;
5980 case 1: /* fdisi (287 only, just do nop here) */
5981 break;
5982 case 2: /* fclex */
5983 gen_helper_fclex(cpu_env);
5984 break;
5985 case 3: /* fninit */
5986 gen_helper_fninit(cpu_env);
5987 break;
5988 case 4: /* fsetpm (287 only, just do nop here) */
5989 break;
5990 default:
5991 goto illegal_op;
5992 }
5993 break;
5994 case 0x1d: /* fucomi */
5995 if (s->cc_op != CC_OP_DYNAMIC)
5996 gen_op_set_cc_op(s->cc_op);
5997 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
5998 gen_helper_fucomi_ST0_FT0(cpu_env);
5999 s->cc_op = CC_OP_EFLAGS;
6000 break;
6001 case 0x1e: /* fcomi */
6002 if (s->cc_op != CC_OP_DYNAMIC)
6003 gen_op_set_cc_op(s->cc_op);
6004 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6005 gen_helper_fcomi_ST0_FT0(cpu_env);
6006 s->cc_op = CC_OP_EFLAGS;
6007 break;
6008 case 0x28: /* ffree sti */
6009 gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
6010 break;
6011 case 0x2a: /* fst sti */
6012 gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
6013 break;
6014 case 0x2b: /* fstp sti */
6015 case 0x0b: /* fstp1 sti, undocumented op */
6016 case 0x3a: /* fstp8 sti, undocumented op */
6017 case 0x3b: /* fstp9 sti, undocumented op */
6018 gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
6019 gen_helper_fpop(cpu_env);
6020 break;
6021 case 0x2c: /* fucom st(i) */
6022 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6023 gen_helper_fucom_ST0_FT0(cpu_env);
6024 break;
6025 case 0x2d: /* fucomp st(i) */
6026 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6027 gen_helper_fucom_ST0_FT0(cpu_env);
6028 gen_helper_fpop(cpu_env);
6029 break;
6030 case 0x33: /* de/3 */
6031 switch(rm) {
6032 case 1: /* fcompp */
6033 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
6034 gen_helper_fcom_ST0_FT0(cpu_env);
6035 gen_helper_fpop(cpu_env);
6036 gen_helper_fpop(cpu_env);
6037 break;
6038 default:
6039 goto illegal_op;
6040 }
6041 break;
6042 case 0x38: /* ffreep sti, undocumented op */
6043 gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
6044 gen_helper_fpop(cpu_env);
6045 break;
6046 case 0x3c: /* df/4 */
6047 switch(rm) {
6048 case 0:
6049 gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
6050 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
6051 gen_op_mov_reg_T0(OT_WORD, R_EAX);
6052 break;
6053 default:
6054 goto illegal_op;
6055 }
6056 break;
6057 case 0x3d: /* fucomip */
6058 if (s->cc_op != CC_OP_DYNAMIC)
6059 gen_op_set_cc_op(s->cc_op);
6060 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6061 gen_helper_fucomi_ST0_FT0(cpu_env);
6062 gen_helper_fpop(cpu_env);
6063 s->cc_op = CC_OP_EFLAGS;
6064 break;
6065 case 0x3e: /* fcomip */
6066 if (s->cc_op != CC_OP_DYNAMIC)
6067 gen_op_set_cc_op(s->cc_op);
6068 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6069 gen_helper_fcomi_ST0_FT0(cpu_env);
6070 gen_helper_fpop(cpu_env);
6071 s->cc_op = CC_OP_EFLAGS;
6072 break;
6073 case 0x10 ... 0x13: /* fcmovxx */
6074 case 0x18 ... 0x1b:
6075 {
6076 int op1, l1;
6077 static const uint8_t fcmov_cc[8] = {
6078 (JCC_B << 1),
6079 (JCC_Z << 1),
6080 (JCC_BE << 1),
6081 (JCC_P << 1),
6082 };
6083 op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
6084 l1 = gen_new_label();
6085 gen_jcc1(s, s->cc_op, op1, l1);
6086 gen_helper_fmov_ST0_STN(cpu_env, tcg_const_i32(opreg));
6087 gen_set_label(l1);
6088 }
6089 break;
6090 default:
6091 goto illegal_op;
6092 }
6093 }
6094 break;
6095 /************************/
6096 /* string ops */
6097
6098 case 0xa4: /* movsS */
6099 case 0xa5:
6100 if ((b & 1) == 0)
6101 ot = OT_BYTE;
6102 else
6103 ot = dflag + OT_WORD;
6104
6105 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6106 gen_repz_movs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6107 } else {
6108 gen_movs(s, ot);
6109 }
6110 break;
6111
6112 case 0xaa: /* stosS */
6113 case 0xab:
6114 if ((b & 1) == 0)
6115 ot = OT_BYTE;
6116 else
6117 ot = dflag + OT_WORD;
6118
6119 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6120 gen_repz_stos(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6121 } else {
6122 gen_stos(s, ot);
6123 }
6124 break;
6125 case 0xac: /* lodsS */
6126 case 0xad:
6127 if ((b & 1) == 0)
6128 ot = OT_BYTE;
6129 else
6130 ot = dflag + OT_WORD;
6131 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6132 gen_repz_lods(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6133 } else {
6134 gen_lods(s, ot);
6135 }
6136 break;
6137 case 0xae: /* scasS */
6138 case 0xaf:
6139 if ((b & 1) == 0)
6140 ot = OT_BYTE;
6141 else
6142 ot = dflag + OT_WORD;
6143 if (prefixes & PREFIX_REPNZ) {
6144 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6145 } else if (prefixes & PREFIX_REPZ) {
6146 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6147 } else {
6148 gen_scas(s, ot);
6149 s->cc_op = CC_OP_SUBB + ot;
6150 }
6151 break;
6152
6153 case 0xa6: /* cmpsS */
6154 case 0xa7:
6155 if ((b & 1) == 0)
6156 ot = OT_BYTE;
6157 else
6158 ot = dflag + OT_WORD;
6159 if (prefixes & PREFIX_REPNZ) {
6160 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6161 } else if (prefixes & PREFIX_REPZ) {
6162 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6163 } else {
6164 gen_cmps(s, ot);
6165 s->cc_op = CC_OP_SUBB + ot;
6166 }
6167 break;
6168 case 0x6c: /* insS */
6169 case 0x6d:
6170 if ((b & 1) == 0)
6171 ot = OT_BYTE;
6172 else
6173 ot = dflag ? OT_LONG : OT_WORD;
6174 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
6175 gen_op_andl_T0_ffff();
6176 gen_check_io(s, ot, pc_start - s->cs_base,
6177 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4);
6178 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6179 gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6180 } else {
6181 gen_ins(s, ot);
6182 if (use_icount) {
6183 gen_jmp(s, s->pc - s->cs_base);
6184 }
6185 }
6186 break;
6187 case 0x6e: /* outsS */
6188 case 0x6f:
6189 if ((b & 1) == 0)
6190 ot = OT_BYTE;
6191 else
6192 ot = dflag ? OT_LONG : OT_WORD;
6193 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
6194 gen_op_andl_T0_ffff();
6195 gen_check_io(s, ot, pc_start - s->cs_base,
6196 svm_is_rep(prefixes) | 4);
6197 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6198 gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6199 } else {
6200 gen_outs(s, ot);
6201 if (use_icount) {
6202 gen_jmp(s, s->pc - s->cs_base);
6203 }
6204 }
6205 break;
6206
6207 /************************/
6208 /* port I/O */
6209
6210 case 0xe4:
6211 case 0xe5:
6212 if ((b & 1) == 0)
6213 ot = OT_BYTE;
6214 else
6215 ot = dflag ? OT_LONG : OT_WORD;
6216 val = cpu_ldub_code(cpu_single_env, s->pc++);
6217 gen_op_movl_T0_im(val);
6218 gen_check_io(s, ot, pc_start - s->cs_base,
6219 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
6220 if (use_icount)
6221 gen_io_start();
6222 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6223 gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
6224 gen_op_mov_reg_T1(ot, R_EAX);
6225 if (use_icount) {
6226 gen_io_end();
6227 gen_jmp(s, s->pc - s->cs_base);
6228 }
6229 break;
6230 case 0xe6:
6231 case 0xe7:
6232 if ((b & 1) == 0)
6233 ot = OT_BYTE;
6234 else
6235 ot = dflag ? OT_LONG : OT_WORD;
6236 val = cpu_ldub_code(cpu_single_env, s->pc++);
6237 gen_op_movl_T0_im(val);
6238 gen_check_io(s, ot, pc_start - s->cs_base,
6239 svm_is_rep(prefixes));
6240 gen_op_mov_TN_reg(ot, 1, R_EAX);
6241
6242 if (use_icount)
6243 gen_io_start();
6244 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6245 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
6246 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
6247 if (use_icount) {
6248 gen_io_end();
6249 gen_jmp(s, s->pc - s->cs_base);
6250 }
6251 break;
6252 case 0xec:
6253 case 0xed:
6254 if ((b & 1) == 0)
6255 ot = OT_BYTE;
6256 else
6257 ot = dflag ? OT_LONG : OT_WORD;
6258 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
6259 gen_op_andl_T0_ffff();
6260 gen_check_io(s, ot, pc_start - s->cs_base,
6261 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
6262 if (use_icount)
6263 gen_io_start();
6264 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6265 gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
6266 gen_op_mov_reg_T1(ot, R_EAX);
6267 if (use_icount) {
6268 gen_io_end();
6269 gen_jmp(s, s->pc - s->cs_base);
6270 }
6271 break;
6272 case 0xee:
6273 case 0xef:
6274 if ((b & 1) == 0)
6275 ot = OT_BYTE;
6276 else
6277 ot = dflag ? OT_LONG : OT_WORD;
6278 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
6279 gen_op_andl_T0_ffff();
6280 gen_check_io(s, ot, pc_start - s->cs_base,
6281 svm_is_rep(prefixes));
6282 gen_op_mov_TN_reg(ot, 1, R_EAX);
6283
6284 if (use_icount)
6285 gen_io_start();
6286 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6287 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
6288 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
6289 if (use_icount) {
6290 gen_io_end();
6291 gen_jmp(s, s->pc - s->cs_base);
6292 }
6293 break;
6294
6295 /************************/
6296 /* control */
6297 case 0xc2: /* ret im */
6298 val = cpu_ldsw_code(cpu_single_env, s->pc);
6299 s->pc += 2;
6300 gen_pop_T0(s);
6301 if (CODE64(s) && s->dflag)
6302 s->dflag = 2;
6303 gen_stack_update(s, val + (2 << s->dflag));
6304 if (s->dflag == 0)
6305 gen_op_andl_T0_ffff();
6306 gen_op_jmp_T0();
6307 gen_eob(s);
6308 break;
6309 case 0xc3: /* ret */
6310 gen_pop_T0(s);
6311 gen_pop_update(s);
6312 if (s->dflag == 0)
6313 gen_op_andl_T0_ffff();
6314 gen_op_jmp_T0();
6315 gen_eob(s);
6316 break;
6317 case 0xca: /* lret im */
6318 val = cpu_ldsw_code(cpu_single_env, s->pc);
6319 s->pc += 2;
6320 do_lret:
6321 if (s->pe && !s->vm86) {
6322 if (s->cc_op != CC_OP_DYNAMIC)
6323 gen_op_set_cc_op(s->cc_op);
6324 gen_jmp_im(pc_start - s->cs_base);
6325 gen_helper_lret_protected(cpu_env, tcg_const_i32(s->dflag),
6326 tcg_const_i32(val));
6327 } else {
6328 gen_stack_A0(s);
6329 /* pop offset */
6330 gen_op_ld_T0_A0(1 + s->dflag + s->mem_index);
6331 if (s->dflag == 0)
6332 gen_op_andl_T0_ffff();
6333 /* NOTE: keeping EIP updated is not a problem in case of
6334 exception */
6335 gen_op_jmp_T0();
6336 /* pop selector */
6337 gen_op_addl_A0_im(2 << s->dflag);
6338 gen_op_ld_T0_A0(1 + s->dflag + s->mem_index);
6339 gen_op_movl_seg_T0_vm(R_CS);
6340 /* add stack offset */
6341 gen_stack_update(s, val + (4 << s->dflag));
6342 }
6343 gen_eob(s);
6344 break;
6345 case 0xcb: /* lret */
6346 val = 0;
6347 goto do_lret;
6348 case 0xcf: /* iret */
6349 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET);
6350 if (!s->pe) {
6351 /* real mode */
6352 gen_helper_iret_real(cpu_env, tcg_const_i32(s->dflag));
6353 s->cc_op = CC_OP_EFLAGS;
6354 } else if (s->vm86) {
6355 if (s->iopl != 3) {
6356 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6357 } else {
6358 gen_helper_iret_real(cpu_env, tcg_const_i32(s->dflag));
6359 s->cc_op = CC_OP_EFLAGS;
6360 }
6361 } else {
6362 if (s->cc_op != CC_OP_DYNAMIC)
6363 gen_op_set_cc_op(s->cc_op);
6364 gen_jmp_im(pc_start - s->cs_base);
6365 gen_helper_iret_protected(cpu_env, tcg_const_i32(s->dflag),
6366 tcg_const_i32(s->pc - s->cs_base));
6367 s->cc_op = CC_OP_EFLAGS;
6368 }
6369 gen_eob(s);
6370 break;
6371 case 0xe8: /* call im */
6372 {
6373 if (dflag)
6374 tval = (int32_t)insn_get(s, OT_LONG);
6375 else
6376 tval = (int16_t)insn_get(s, OT_WORD);
6377 next_eip = s->pc - s->cs_base;
6378 tval += next_eip;
6379 if (s->dflag == 0)
6380 tval &= 0xffff;
6381 else if(!CODE64(s))
6382 tval &= 0xffffffff;
6383 gen_movtl_T0_im(next_eip);
6384 gen_push_T0(s);
6385 gen_jmp(s, tval);
6386 }
6387 break;
6388 case 0x9a: /* lcall im */
6389 {
6390 unsigned int selector, offset;
6391
6392 if (CODE64(s))
6393 goto illegal_op;
6394 ot = dflag ? OT_LONG : OT_WORD;
6395 offset = insn_get(s, ot);
6396 selector = insn_get(s, OT_WORD);
6397
6398 gen_op_movl_T0_im(selector);
6399 gen_op_movl_T1_imu(offset);
6400 }
6401 goto do_lcall;
6402 case 0xe9: /* jmp im */
6403 if (dflag)
6404 tval = (int32_t)insn_get(s, OT_LONG);
6405 else
6406 tval = (int16_t)insn_get(s, OT_WORD);
6407 tval += s->pc - s->cs_base;
6408 if (s->dflag == 0)
6409 tval &= 0xffff;
6410 else if(!CODE64(s))
6411 tval &= 0xffffffff;
6412 gen_jmp(s, tval);
6413 break;
6414 case 0xea: /* ljmp im */
6415 {
6416 unsigned int selector, offset;
6417
6418 if (CODE64(s))
6419 goto illegal_op;
6420 ot = dflag ? OT_LONG : OT_WORD;
6421 offset = insn_get(s, ot);
6422 selector = insn_get(s, OT_WORD);
6423
6424 gen_op_movl_T0_im(selector);
6425 gen_op_movl_T1_imu(offset);
6426 }
6427 goto do_ljmp;
6428 case 0xeb: /* jmp Jb */
6429 tval = (int8_t)insn_get(s, OT_BYTE);
6430 tval += s->pc - s->cs_base;
6431 if (s->dflag == 0)
6432 tval &= 0xffff;
6433 gen_jmp(s, tval);
6434 break;
6435 case 0x70 ... 0x7f: /* jcc Jb */
6436 tval = (int8_t)insn_get(s, OT_BYTE);
6437 goto do_jcc;
6438 case 0x180 ... 0x18f: /* jcc Jv */
6439 if (dflag) {
6440 tval = (int32_t)insn_get(s, OT_LONG);
6441 } else {
6442 tval = (int16_t)insn_get(s, OT_WORD);
6443 }
6444 do_jcc:
6445 next_eip = s->pc - s->cs_base;
6446 tval += next_eip;
6447 if (s->dflag == 0)
6448 tval &= 0xffff;
6449 gen_jcc(s, b, tval, next_eip);
6450 break;
6451
6452 case 0x190 ... 0x19f: /* setcc Gv */
6453 modrm = cpu_ldub_code(cpu_single_env, s->pc++);
6454 gen_setcc(s, b);
6455 gen_ldst_modrm(s, modrm, OT_BYTE, OR_TMP0, 1);
6456 break;
6457 case 0x140 ... 0x14f: /* cmov Gv, Ev */
6458 {
6459 int l1;
6460 TCGv t0;
6461
6462 ot = dflag + OT_WORD;
6463 modrm = cpu_ldub_code(cpu_single_env, s->pc++);
6464 reg = ((modrm >> 3) & 7) | rex_r;
6465 mod = (modrm >> 6) & 3;
6466 t0 = tcg_temp_local_new();
6467 if (mod != 3) {
6468 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6469 gen_op_ld_v(ot + s->mem_index, t0, cpu_A0);
6470 } else {
6471 rm = (modrm & 7) | REX_B(s);
6472 gen_op_mov_v_reg(ot, t0, rm);
6473 }
6474 #ifdef TARGET_X86_64
6475 if (ot == OT_LONG) {
6476 /* XXX: specific Intel behaviour ? */
6477 l1 = gen_new_label();
6478 gen_jcc1(s, s->cc_op, b ^ 1, l1);
6479 tcg_gen_mov_tl(cpu_regs[reg], t0);
6480 gen_set_label(l1);
6481 tcg_gen_ext32u_tl(cpu_regs[reg], cpu_regs[reg]);
6482 } else
6483 #endif
6484 {
6485 l1 = gen_new_label();
6486 gen_jcc1(s, s->cc_op, b ^ 1, l1);
6487 gen_op_mov_reg_v(ot, reg, t0);
6488 gen_set_label(l1);
6489 }
6490 tcg_temp_free(t0);
6491 }
6492 break;
6493
6494 /************************/
6495 /* flags */
6496 case 0x9c: /* pushf */
6497 gen_svm_check_intercept(s, pc_start, SVM_EXIT_PUSHF);
6498 if (s->vm86 && s->iopl != 3) {
6499 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6500 } else {
6501 if (s->cc_op != CC_OP_DYNAMIC)
6502 gen_op_set_cc_op(s->cc_op);
6503 gen_helper_read_eflags(cpu_T[0], cpu_env);
6504 gen_push_T0(s);
6505 }
6506 break;
6507 case 0x9d: /* popf */
6508 gen_svm_check_intercept(s, pc_start, SVM_EXIT_POPF);
6509 if (s->vm86 && s->iopl != 3) {
6510 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6511 } else {
6512 gen_pop_T0(s);
6513 if (s->cpl == 0) {
6514 if (s->dflag) {
6515 gen_helper_write_eflags(cpu_env, cpu_T[0],
6516 tcg_const_i32((TF_MASK | AC_MASK |
6517 ID_MASK | NT_MASK |
6518 IF_MASK |
6519 IOPL_MASK)));
6520 } else {
6521 gen_helper_write_eflags(cpu_env, cpu_T[0],
6522 tcg_const_i32((TF_MASK | AC_MASK |
6523 ID_MASK | NT_MASK |
6524 IF_MASK | IOPL_MASK)
6525 & 0xffff));
6526 }
6527 } else {
6528 if (s->cpl <= s->iopl) {
6529 if (s->dflag) {
6530 gen_helper_write_eflags(cpu_env, cpu_T[0],
6531 tcg_const_i32((TF_MASK |
6532 AC_MASK |
6533 ID_MASK |
6534 NT_MASK |
6535 IF_MASK)));
6536 } else {
6537 gen_helper_write_eflags(cpu_env, cpu_T[0],
6538 tcg_const_i32((TF_MASK |
6539 AC_MASK |
6540 ID_MASK |
6541 NT_MASK |
6542 IF_MASK)
6543 & 0xffff));
6544 }
6545 } else {
6546 if (s->dflag) {
6547 gen_helper_write_eflags(cpu_env, cpu_T[0],
6548 tcg_const_i32((TF_MASK | AC_MASK |
6549 ID_MASK | NT_MASK)));
6550 } else {
6551 gen_helper_write_eflags(cpu_env, cpu_T[0],
6552 tcg_const_i32((TF_MASK | AC_MASK |
6553 ID_MASK | NT_MASK)
6554 & 0xffff));
6555 }
6556 }
6557 }
6558 gen_pop_update(s);
6559 s->cc_op = CC_OP_EFLAGS;
6560 /* abort translation because TF/AC flag may change */
6561 gen_jmp_im(s->pc - s->cs_base);
6562 gen_eob(s);
6563 }
6564 break;
6565 case 0x9e: /* sahf */
6566 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
6567 goto illegal_op;
6568 gen_op_mov_TN_reg(OT_BYTE, 0, R_AH);
6569 if (s->cc_op != CC_OP_DYNAMIC)
6570 gen_op_set_cc_op(s->cc_op);
6571 gen_compute_eflags(cpu_cc_src);
6572 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O);
6573 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], CC_S | CC_Z | CC_A | CC_P | CC_C);
6574 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_T[0]);
6575 s->cc_op = CC_OP_EFLAGS;
6576 break;
6577 case 0x9f: /* lahf */
6578 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
6579 goto illegal_op;
6580 if (s->cc_op != CC_OP_DYNAMIC)
6581 gen_op_set_cc_op(s->cc_op);
6582 gen_compute_eflags(cpu_T[0]);
6583 /* Note: gen_compute_eflags() only gives the condition codes */
6584 tcg_gen_ori_tl(cpu_T[0], cpu_T[0], 0x02);
6585 gen_op_mov_reg_T0(OT_BYTE, R_AH);
6586 break;
6587 case 0xf5: /* cmc */
6588 if (s->cc_op != CC_OP_DYNAMIC)
6589 gen_op_set_cc_op(s->cc_op);
6590 gen_compute_eflags(cpu_cc_src);
6591 tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C);
6592 s->cc_op = CC_OP_EFLAGS;
6593 break;
6594 case 0xf8: /* clc */
6595 if (s->cc_op != CC_OP_DYNAMIC)
6596 gen_op_set_cc_op(s->cc_op);
6597 gen_compute_eflags(cpu_cc_src);
6598 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C);
6599 s->cc_op = CC_OP_EFLAGS;
6600 break;
6601 case 0xf9: /* stc */
6602 if (s->cc_op != CC_OP_DYNAMIC)
6603 gen_op_set_cc_op(s->cc_op);
6604 gen_compute_eflags(cpu_cc_src);
6605 tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C);
6606 s->cc_op = CC_OP_EFLAGS;
6607 break;
6608 case 0xfc: /* cld */
6609 tcg_gen_movi_i32(cpu_tmp2_i32, 1);
6610 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
6611 break;
6612 case 0xfd: /* std */
6613 tcg_gen_movi_i32(cpu_tmp2_i32, -1);
6614 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
6615 break;
6616
6617 /************************/
6618 /* bit operations */
6619 case 0x1ba: /* bt/bts/btr/btc Gv, im */
6620 ot = dflag + OT_WORD;
6621 modrm = cpu_ldub_code(cpu_single_env, s->pc++);
6622 op = (modrm >> 3) & 7;
6623 mod = (modrm >> 6) & 3;
6624 rm = (modrm & 7) | REX_B(s);
6625 if (mod != 3) {
6626 s->rip_offset = 1;
6627 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6628 gen_op_ld_T0_A0(ot + s->mem_index);
6629 } else {
6630 gen_op_mov_TN_reg(ot, 0, rm);
6631 }
6632 /* load shift */
6633 val = cpu_ldub_code(cpu_single_env, s->pc++);
6634 gen_op_movl_T1_im(val);
6635 if (op < 4)
6636 goto illegal_op;
6637 op -= 4;
6638 goto bt_op;
6639 case 0x1a3: /* bt Gv, Ev */
6640 op = 0;
6641 goto do_btx;
6642 case 0x1ab: /* bts */
6643 op = 1;
6644 goto do_btx;
6645 case 0x1b3: /* btr */
6646 op = 2;
6647 goto do_btx;
6648 case 0x1bb: /* btc */
6649 op = 3;
6650 do_btx:
6651 ot = dflag + OT_WORD;
6652 modrm = cpu_ldub_code(cpu_single_env, s->pc++);
6653 reg = ((modrm >> 3) & 7) | rex_r;
6654 mod = (modrm >> 6) & 3;
6655 rm = (modrm & 7) | REX_B(s);
6656 gen_op_mov_TN_reg(OT_LONG, 1, reg);
6657 if (mod != 3) {
6658 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6659 /* specific case: we need to add a displacement */
6660 gen_exts(ot, cpu_T[1]);
6661 tcg_gen_sari_tl(cpu_tmp0, cpu_T[1], 3 + ot);
6662 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, ot);
6663 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
6664 gen_op_ld_T0_A0(ot + s->mem_index);
6665 } else {
6666 gen_op_mov_TN_reg(ot, 0, rm);
6667 }
6668 bt_op:
6669 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], (1 << (3 + ot)) - 1);
6670 switch(op) {
6671 case 0:
6672 tcg_gen_shr_tl(cpu_cc_src, cpu_T[0], cpu_T[1]);
6673 tcg_gen_movi_tl(cpu_cc_dst, 0);
6674 break;
6675 case 1:
6676 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6677 tcg_gen_movi_tl(cpu_tmp0, 1);
6678 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6679 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6680 break;
6681 case 2:
6682 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6683 tcg_gen_movi_tl(cpu_tmp0, 1);
6684 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6685 tcg_gen_not_tl(cpu_tmp0, cpu_tmp0);
6686 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6687 break;
6688 default:
6689 case 3:
6690 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6691 tcg_gen_movi_tl(cpu_tmp0, 1);
6692 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6693 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6694 break;
6695 }
6696 s->cc_op = CC_OP_SARB + ot;
6697 if (op != 0) {
6698 if (mod != 3)
6699 gen_op_st_T0_A0(ot + s->mem_index);
6700 else
6701 gen_op_mov_reg_T0(ot, rm);
6702 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
6703 tcg_gen_movi_tl(cpu_cc_dst, 0);
6704 }
6705 break;
6706 case 0x1bc: /* bsf */
6707 case 0x1bd: /* bsr */
6708 {
6709 int label1;
6710 TCGv t0;
6711
6712 ot = dflag + OT_WORD;
6713 modrm = cpu_ldub_code(cpu_single_env, s->pc++);
6714 reg = ((modrm >> 3) & 7) | rex_r;
6715 gen_ldst_modrm(s,modrm, ot, OR_TMP0, 0);
6716 gen_extu(ot, cpu_T[0]);
6717 t0 = tcg_temp_local_new();
6718 tcg_gen_mov_tl(t0, cpu_T[0]);
6719 if ((b & 1) && (prefixes & PREFIX_REPZ) &&
6720 (s->cpuid_ext3_features & CPUID_EXT3_ABM)) {
6721 switch(ot) {
6722 case OT_WORD: gen_helper_lzcnt(cpu_T[0], t0,
6723 tcg_const_i32(16)); break;
6724 case OT_LONG: gen_helper_lzcnt(cpu_T[0], t0,
6725 tcg_const_i32(32)); break;
6726 case OT_QUAD: gen_helper_lzcnt(cpu_T[0], t0,
6727 tcg_const_i32(64)); break;
6728 }
6729 gen_op_mov_reg_T0(ot, reg);
6730 } else {
6731 label1 = gen_new_label();
6732 tcg_gen_movi_tl(cpu_cc_dst, 0);
6733 tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, label1);
6734 if (b & 1) {
6735 gen_helper_bsr(cpu_T[0], t0);
6736 } else {
6737 gen_helper_bsf(cpu_T[0], t0);
6738 }
6739 gen_op_mov_reg_T0(ot, reg);
6740 tcg_gen_movi_tl(cpu_cc_dst, 1);
6741 gen_set_label(label1);
6742 tcg_gen_discard_tl(cpu_cc_src);
6743 s->cc_op = CC_OP_LOGICB + ot;
6744 }
6745 tcg_temp_free(t0);
6746 }
6747 break;
6748 /************************/
6749 /* bcd */
6750 case 0x27: /* daa */
6751 if (CODE64(s))
6752 goto illegal_op;
6753 if (s->cc_op != CC_OP_DYNAMIC)
6754 gen_op_set_cc_op(s->cc_op);
6755 gen_helper_daa(cpu_env);
6756 s->cc_op = CC_OP_EFLAGS;
6757 break;
6758 case 0x2f: /* das */
6759 if (CODE64(s))
6760 goto illegal_op;
6761 if (s->cc_op != CC_OP_DYNAMIC)
6762 gen_op_set_cc_op(s->cc_op);
6763 gen_helper_das(cpu_env);
6764 s->cc_op = CC_OP_EFLAGS;
6765 break;
6766 case 0x37: /* aaa */
6767 if (CODE64(s))
6768 goto illegal_op;
6769 if (s->cc_op != CC_OP_DYNAMIC)
6770 gen_op_set_cc_op(s->cc_op);
6771 gen_helper_aaa(cpu_env);
6772 s->cc_op = CC_OP_EFLAGS;
6773 break;
6774 case 0x3f: /* aas */
6775 if (CODE64(s))
6776 goto illegal_op;
6777 if (s->cc_op != CC_OP_DYNAMIC)
6778 gen_op_set_cc_op(s->cc_op);
6779 gen_helper_aas(cpu_env);
6780 s->cc_op = CC_OP_EFLAGS;
6781 break;
6782 case 0xd4: /* aam */
6783 if (CODE64(s))
6784 goto illegal_op;
6785 val = cpu_ldub_code(cpu_single_env, s->pc++);
6786 if (val == 0) {
6787 gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base);
6788 } else {
6789 gen_helper_aam(cpu_env, tcg_const_i32(val));
6790 s->cc_op = CC_OP_LOGICB;
6791 }
6792 break;
6793 case 0xd5: /* aad */
6794 if (CODE64(s))
6795 goto illegal_op;
6796 val = cpu_ldub_code(cpu_single_env, s->pc++);
6797 gen_helper_aad(cpu_env, tcg_const_i32(val));
6798 s->cc_op = CC_OP_LOGICB;
6799 break;
6800 /************************/
6801 /* misc */
6802 case 0x90: /* nop */
6803 /* XXX: correct lock test for all insn */
6804 if (prefixes & PREFIX_LOCK) {
6805 goto illegal_op;
6806 }
6807 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
6808 if (REX_B(s)) {
6809 goto do_xchg_reg_eax;
6810 }
6811 if (prefixes & PREFIX_REPZ) {
6812 gen_svm_check_intercept(s, pc_start, SVM_EXIT_PAUSE);
6813 }
6814 break;
6815 case 0x9b: /* fwait */
6816 if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) ==
6817 (HF_MP_MASK | HF_TS_MASK)) {
6818 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
6819 } else {
6820 if (s->cc_op != CC_OP_DYNAMIC)
6821 gen_op_set_cc_op(s->cc_op);
6822 gen_jmp_im(pc_start - s->cs_base);
6823 gen_helper_fwait(cpu_env);
6824 }
6825 break;
6826 case 0xcc: /* int3 */
6827 gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base);
6828 break;
6829 case 0xcd: /* int N */
6830 val = cpu_ldub_code(cpu_single_env, s->pc++);
6831 if (s->vm86 && s->iopl != 3) {
6832 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6833 } else {
6834 gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base);
6835 }
6836 break;
6837 case 0xce: /* into */
6838 if (CODE64(s))
6839 goto illegal_op;
6840 if (s->cc_op != CC_OP_DYNAMIC)
6841 gen_op_set_cc_op(s->cc_op);
6842 gen_jmp_im(pc_start - s->cs_base);
6843 gen_helper_into(cpu_env, tcg_const_i32(s->pc - pc_start));
6844 break;
6845 #ifdef WANT_ICEBP
6846 case 0xf1: /* icebp (undocumented, exits to external debugger) */
6847 gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP);
6848 #if 1
6849 gen_debug(s, pc_start - s->cs_base);
6850 #else
6851 /* start debug */
6852 tb_flush(cpu_single_env);
6853 cpu_set_log(CPU_LOG_INT | CPU_LOG_TB_IN_ASM);
6854 #endif
6855 break;
6856 #endif
6857 case 0xfa: /* cli */
6858 if (!s->vm86) {
6859 if (s->cpl <= s->iopl) {
6860 gen_helper_cli(cpu_env);
6861 } else {
6862 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6863 }
6864 } else {
6865 if (s->iopl == 3) {
6866 gen_helper_cli(cpu_env);
6867 } else {
6868 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6869 }
6870 }
6871 break;
6872 case 0xfb: /* sti */
6873 if (!s->vm86) {
6874 if (s->cpl <= s->iopl) {
6875 gen_sti:
6876 gen_helper_sti(cpu_env);
6877 /* interruptions are enabled only the first insn after sti */
6878 /* If several instructions disable interrupts, only the
6879 _first_ does it */
6880 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
6881 gen_helper_set_inhibit_irq(cpu_env);
6882 /* give a chance to handle pending irqs */
6883 gen_jmp_im(s->pc - s->cs_base);
6884 gen_eob(s);
6885 } else {
6886 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6887 }
6888 } else {
6889 if (s->iopl == 3) {
6890 goto gen_sti;
6891 } else {
6892 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6893 }
6894 }
6895 break;
6896 case 0x62: /* bound */
6897 if (CODE64(s))
6898 goto illegal_op;
6899 ot = dflag ? OT_LONG : OT_WORD;
6900 modrm = cpu_ldub_code(cpu_single_env, s->pc++);
6901 reg = (modrm >> 3) & 7;
6902 mod = (modrm >> 6) & 3;
6903 if (mod == 3)
6904 goto illegal_op;
6905 gen_op_mov_TN_reg(ot, 0, reg);
6906 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6907 gen_jmp_im(pc_start - s->cs_base);
6908 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6909 if (ot == OT_WORD) {
6910 gen_helper_boundw(cpu_env, cpu_A0, cpu_tmp2_i32);
6911 } else {
6912 gen_helper_boundl(cpu_env, cpu_A0, cpu_tmp2_i32);
6913 }
6914 break;
6915 case 0x1c8 ... 0x1cf: /* bswap reg */
6916 reg = (b & 7) | REX_B(s);
6917 #ifdef TARGET_X86_64
6918 if (dflag == 2) {
6919 gen_op_mov_TN_reg(OT_QUAD, 0, reg);
6920 tcg_gen_bswap64_i64(cpu_T[0], cpu_T[0]);
6921 gen_op_mov_reg_T0(OT_QUAD, reg);
6922 } else
6923 #endif
6924 {
6925 gen_op_mov_TN_reg(OT_LONG, 0, reg);
6926 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
6927 tcg_gen_bswap32_tl(cpu_T[0], cpu_T[0]);
6928 gen_op_mov_reg_T0(OT_LONG, reg);
6929 }
6930 break;
6931 case 0xd6: /* salc */
6932 if (CODE64(s))
6933 goto illegal_op;
6934 if (s->cc_op != CC_OP_DYNAMIC)
6935 gen_op_set_cc_op(s->cc_op);
6936 gen_compute_eflags_c(cpu_T[0]);
6937 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
6938 gen_op_mov_reg_T0(OT_BYTE, R_EAX);
6939 break;
6940 case 0xe0: /* loopnz */
6941 case 0xe1: /* loopz */
6942 case 0xe2: /* loop */
6943 case 0xe3: /* jecxz */
6944 {
6945 int l1, l2, l3;
6946
6947 tval = (int8_t)insn_get(s, OT_BYTE);
6948 next_eip = s->pc - s->cs_base;
6949 tval += next_eip;
6950 if (s->dflag == 0)
6951 tval &= 0xffff;
6952
6953 l1 = gen_new_label();
6954 l2 = gen_new_label();
6955 l3 = gen_new_label();
6956 b &= 3;
6957 switch(b) {
6958 case 0: /* loopnz */
6959 case 1: /* loopz */
6960 if (s->cc_op != CC_OP_DYNAMIC)
6961 gen_op_set_cc_op(s->cc_op);
6962 gen_op_add_reg_im(s->aflag, R_ECX, -1);
6963 gen_op_jz_ecx(s->aflag, l3);
6964 gen_compute_eflags(cpu_tmp0);
6965 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, CC_Z);
6966 if (b == 0) {
6967 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, l1);
6968 } else {
6969 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, l1);
6970 }
6971 break;
6972 case 2: /* loop */
6973 gen_op_add_reg_im(s->aflag, R_ECX, -1);
6974 gen_op_jnz_ecx(s->aflag, l1);
6975 break;
6976 default:
6977 case 3: /* jcxz */
6978 gen_op_jz_ecx(s->aflag, l1);
6979 break;
6980 }
6981
6982 gen_set_label(l3);
6983 gen_jmp_im(next_eip);
6984 tcg_gen_br(l2);
6985
6986 gen_set_label(l1);
6987 gen_jmp_im(tval);
6988 gen_set_label(l2);
6989 gen_eob(s);
6990 }
6991 break;
6992 case 0x130: /* wrmsr */
6993 case 0x132: /* rdmsr */
6994 if (s->cpl != 0) {
6995 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6996 } else {
6997 if (s->cc_op != CC_OP_DYNAMIC)
6998 gen_op_set_cc_op(s->cc_op);
6999 gen_jmp_im(pc_start - s->cs_base);
7000 if (b & 2) {
7001 gen_helper_rdmsr(cpu_env);
7002 } else {
7003 gen_helper_wrmsr(cpu_env);
7004 }
7005 }
7006 break;
7007 case 0x131: /* rdtsc */
7008 if (s->cc_op != CC_OP_DYNAMIC)
7009 gen_op_set_cc_op(s->cc_op);
7010 gen_jmp_im(pc_start - s->cs_base);
7011 if (use_icount)
7012 gen_io_start();
7013 gen_helper_rdtsc(cpu_env);
7014 if (use_icount) {
7015 gen_io_end();
7016 gen_jmp(s, s->pc - s->cs_base);
7017 }
7018 break;
7019 case 0x133: /* rdpmc */
7020 if (s->cc_op != CC_OP_DYNAMIC)
7021 gen_op_set_cc_op(s->cc_op);
7022 gen_jmp_im(pc_start - s->cs_base);
7023 gen_helper_rdpmc(cpu_env);
7024 break;
7025 case 0x134: /* sysenter */
7026 /* For Intel SYSENTER is valid on 64-bit */
7027 if (CODE64(s) && cpu_single_env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
7028 goto illegal_op;
7029 if (!s->pe) {
7030 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7031 } else {
7032 gen_update_cc_op(s);
7033 gen_jmp_im(pc_start - s->cs_base);
7034 gen_helper_sysenter(cpu_env);
7035 gen_eob(s);
7036 }
7037 break;
7038 case 0x135: /* sysexit */
7039 /* For Intel SYSEXIT is valid on 64-bit */
7040 if (CODE64(s) && cpu_single_env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
7041 goto illegal_op;
7042 if (!s->pe) {
7043 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7044 } else {
7045 gen_update_cc_op(s);
7046 gen_jmp_im(pc_start - s->cs_base);
7047 gen_helper_sysexit(cpu_env, tcg_const_i32(dflag));
7048 gen_eob(s);
7049 }
7050 break;
7051 #ifdef TARGET_X86_64
7052 case 0x105: /* syscall */
7053 /* XXX: is it usable in real mode ? */
7054 gen_update_cc_op(s);
7055 gen_jmp_im(pc_start - s->cs_base);
7056 gen_helper_syscall(cpu_env, tcg_const_i32(s->pc - pc_start));
7057 gen_eob(s);
7058 break;
7059 case 0x107: /* sysret */
7060 if (!s->pe) {
7061 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7062 } else {
7063 gen_update_cc_op(s);
7064 gen_jmp_im(pc_start - s->cs_base);
7065 gen_helper_sysret(cpu_env, tcg_const_i32(s->dflag));
7066 /* condition codes are modified only in long mode */
7067 if (s->lma)
7068 s->cc_op = CC_OP_EFLAGS;
7069 gen_eob(s);
7070 }
7071 break;
7072 #endif
7073 case 0x1a2: /* cpuid */
7074 if (s->cc_op != CC_OP_DYNAMIC)
7075 gen_op_set_cc_op(s->cc_op);
7076 gen_jmp_im(pc_start - s->cs_base);
7077 gen_helper_cpuid(cpu_env);
7078 break;
7079 case 0xf4: /* hlt */
7080 if (s->cpl != 0) {
7081 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7082 } else {
7083 if (s->cc_op != CC_OP_DYNAMIC)
7084 gen_op_set_cc_op(s->cc_op);
7085 gen_jmp_im(pc_start - s->cs_base);
7086 gen_helper_hlt(cpu_env, tcg_const_i32(s->pc - pc_start));
7087 s->is_jmp = DISAS_TB_JUMP;
7088 }
7089 break;
7090 case 0x100:
7091 modrm = cpu_ldub_code(cpu_single_env, s->pc++);
7092 mod = (modrm >> 6) & 3;
7093 op = (modrm >> 3) & 7;
7094 switch(op) {
7095 case 0: /* sldt */
7096 if (!s->pe || s->vm86)
7097 goto illegal_op;
7098 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ);
7099 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,ldt.selector));
7100 ot = OT_WORD;
7101 if (mod == 3)
7102 ot += s->dflag;
7103 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1);
7104 break;
7105 case 2: /* lldt */
7106 if (!s->pe || s->vm86)
7107 goto illegal_op;
7108 if (s->cpl != 0) {
7109 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7110 } else {
7111 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE);
7112 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
7113 gen_jmp_im(pc_start - s->cs_base);
7114 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
7115 gen_helper_lldt(cpu_env, cpu_tmp2_i32);
7116 }
7117 break;
7118 case 1: /* str */
7119 if (!s->pe || s->vm86)
7120 goto illegal_op;
7121 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ);
7122 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,tr.selector));
7123 ot = OT_WORD;
7124 if (mod == 3)
7125 ot += s->dflag;
7126 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1);
7127 break;
7128 case 3: /* ltr */
7129 if (!s->pe || s->vm86)
7130 goto illegal_op;
7131 if (s->cpl != 0) {
7132 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7133 } else {
7134 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE);
7135 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
7136 gen_jmp_im(pc_start - s->cs_base);
7137 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
7138 gen_helper_ltr(cpu_env, cpu_tmp2_i32);
7139 }
7140 break;
7141 case 4: /* verr */
7142 case 5: /* verw */
7143 if (!s->pe || s->vm86)
7144 goto illegal_op;
7145 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
7146 if (s->cc_op != CC_OP_DYNAMIC)
7147 gen_op_set_cc_op(s->cc_op);
7148 if (op == 4) {
7149 gen_helper_verr(cpu_env, cpu_T[0]);
7150 } else {
7151 gen_helper_verw(cpu_env, cpu_T[0]);
7152 }
7153 s->cc_op = CC_OP_EFLAGS;
7154 break;
7155 default:
7156 goto illegal_op;
7157 }
7158 break;
7159 case 0x101:
7160 modrm = cpu_ldub_code(cpu_single_env, s->pc++);
7161 mod = (modrm >> 6) & 3;
7162 op = (modrm >> 3) & 7;
7163 rm = modrm & 7;
7164 switch(op) {
7165 case 0: /* sgdt */
7166 if (mod == 3)
7167 goto illegal_op;
7168 gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ);
7169 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7170 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.limit));
7171 gen_op_st_T0_A0(OT_WORD + s->mem_index);
7172 gen_add_A0_im(s, 2);
7173 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.base));
7174 if (!s->dflag)
7175 gen_op_andl_T0_im(0xffffff);
7176 gen_op_st_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
7177 break;
7178 case 1:
7179 if (mod == 3) {
7180 switch (rm) {
7181 case 0: /* monitor */
7182 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
7183 s->cpl != 0)
7184 goto illegal_op;
7185 if (s->cc_op != CC_OP_DYNAMIC)
7186 gen_op_set_cc_op(s->cc_op);
7187 gen_jmp_im(pc_start - s->cs_base);
7188 #ifdef TARGET_X86_64
7189 if (s->aflag == 2) {
7190 gen_op_movq_A0_reg(R_EAX);
7191 } else
7192 #endif
7193 {
7194 gen_op_movl_A0_reg(R_EAX);
7195 if (s->aflag == 0)
7196 gen_op_andl_A0_ffff();
7197 }
7198 gen_add_A0_ds_seg(s);
7199 gen_helper_monitor(cpu_env, cpu_A0);
7200 break;
7201 case 1: /* mwait */
7202 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
7203 s->cpl != 0)
7204 goto illegal_op;
7205 gen_update_cc_op(s);
7206 gen_jmp_im(pc_start - s->cs_base);
7207 gen_helper_mwait(cpu_env, tcg_const_i32(s->pc - pc_start));
7208 gen_eob(s);
7209 break;
7210 case 2: /* clac */
7211 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) ||
7212 s->cpl != 0) {
7213 goto illegal_op;
7214 }
7215 gen_helper_clac(cpu_env);
7216 gen_jmp_im(s->pc - s->cs_base);
7217 gen_eob(s);
7218 break;
7219 case 3: /* stac */
7220 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) ||
7221 s->cpl != 0) {
7222 goto illegal_op;
7223 }
7224 gen_helper_stac(cpu_env);
7225 gen_jmp_im(s->pc - s->cs_base);
7226 gen_eob(s);
7227 break;
7228 default:
7229 goto illegal_op;
7230 }
7231 } else { /* sidt */
7232 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ);
7233 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7234 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.limit));
7235 gen_op_st_T0_A0(OT_WORD + s->mem_index);
7236 gen_add_A0_im(s, 2);
7237 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.base));
7238 if (!s->dflag)
7239 gen_op_andl_T0_im(0xffffff);
7240 gen_op_st_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
7241 }
7242 break;
7243 case 2: /* lgdt */
7244 case 3: /* lidt */
7245 if (mod == 3) {
7246 if (s->cc_op != CC_OP_DYNAMIC)
7247 gen_op_set_cc_op(s->cc_op);
7248 gen_jmp_im(pc_start - s->cs_base);
7249 switch(rm) {
7250 case 0: /* VMRUN */
7251 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7252 goto illegal_op;
7253 if (s->cpl != 0) {
7254 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7255 break;
7256 } else {
7257 gen_helper_vmrun(cpu_env, tcg_const_i32(s->aflag),
7258 tcg_const_i32(s->pc - pc_start));
7259 tcg_gen_exit_tb(0);
7260 s->is_jmp = DISAS_TB_JUMP;
7261 }
7262 break;
7263 case 1: /* VMMCALL */
7264 if (!(s->flags & HF_SVME_MASK))
7265 goto illegal_op;
7266 gen_helper_vmmcall(cpu_env);
7267 break;
7268 case 2: /* VMLOAD */
7269 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7270 goto illegal_op;
7271 if (s->cpl != 0) {
7272 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7273 break;
7274 } else {
7275 gen_helper_vmload(cpu_env, tcg_const_i32(s->aflag));
7276 }
7277 break;
7278 case 3: /* VMSAVE */
7279 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7280 goto illegal_op;
7281 if (s->cpl != 0) {
7282 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7283 break;
7284 } else {
7285 gen_helper_vmsave(cpu_env, tcg_const_i32(s->aflag));
7286 }
7287 break;
7288 case 4: /* STGI */
7289 if ((!(s->flags & HF_SVME_MASK) &&
7290 !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
7291 !s->pe)
7292 goto illegal_op;
7293 if (s->cpl != 0) {
7294 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7295 break;
7296 } else {
7297 gen_helper_stgi(cpu_env);
7298 }
7299 break;
7300 case 5: /* CLGI */
7301 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7302 goto illegal_op;
7303 if (s->cpl != 0) {
7304 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7305 break;
7306 } else {
7307 gen_helper_clgi(cpu_env);
7308 }
7309 break;
7310 case 6: /* SKINIT */
7311 if ((!(s->flags & HF_SVME_MASK) &&
7312 !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
7313 !s->pe)
7314 goto illegal_op;
7315 gen_helper_skinit(cpu_env);
7316 break;
7317 case 7: /* INVLPGA */
7318 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7319 goto illegal_op;
7320 if (s->cpl != 0) {
7321 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7322 break;
7323 } else {
7324 gen_helper_invlpga(cpu_env, tcg_const_i32(s->aflag));
7325 }
7326 break;
7327 default:
7328 goto illegal_op;
7329 }
7330 } else if (s->cpl != 0) {
7331 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7332 } else {
7333 gen_svm_check_intercept(s, pc_start,
7334 op==2 ? SVM_EXIT_GDTR_WRITE : SVM_EXIT_IDTR_WRITE);
7335 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7336 gen_op_ld_T1_A0(OT_WORD + s->mem_index);
7337 gen_add_A0_im(s, 2);
7338 gen_op_ld_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
7339 if (!s->dflag)
7340 gen_op_andl_T0_im(0xffffff);
7341 if (op == 2) {
7342 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,gdt.base));
7343 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,gdt.limit));
7344 } else {
7345 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,idt.base));
7346 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,idt.limit));
7347 }
7348 }
7349 break;
7350 case 4: /* smsw */
7351 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0);
7352 #if defined TARGET_X86_64 && defined HOST_WORDS_BIGENDIAN
7353 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]) + 4);
7354 #else
7355 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]));
7356 #endif
7357 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 1);
7358 break;
7359 case 6: /* lmsw */
7360 if (s->cpl != 0) {
7361 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7362 } else {
7363 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
7364 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
7365 gen_helper_lmsw(cpu_env, cpu_T[0]);
7366 gen_jmp_im(s->pc - s->cs_base);
7367 gen_eob(s);
7368 }
7369 break;
7370 case 7:
7371 if (mod != 3) { /* invlpg */
7372 if (s->cpl != 0) {
7373 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7374 } else {
7375 if (s->cc_op != CC_OP_DYNAMIC)
7376 gen_op_set_cc_op(s->cc_op);
7377 gen_jmp_im(pc_start - s->cs_base);
7378 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7379 gen_helper_invlpg(cpu_env, cpu_A0);
7380 gen_jmp_im(s->pc - s->cs_base);
7381 gen_eob(s);
7382 }
7383 } else {
7384 switch (rm) {
7385 case 0: /* swapgs */
7386 #ifdef TARGET_X86_64
7387 if (CODE64(s)) {
7388 if (s->cpl != 0) {
7389 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7390 } else {
7391 tcg_gen_ld_tl(cpu_T[0], cpu_env,
7392 offsetof(CPUX86State,segs[R_GS].base));
7393 tcg_gen_ld_tl(cpu_T[1], cpu_env,
7394 offsetof(CPUX86State,kernelgsbase));
7395 tcg_gen_st_tl(cpu_T[1], cpu_env,
7396 offsetof(CPUX86State,segs[R_GS].base));
7397 tcg_gen_st_tl(cpu_T[0], cpu_env,
7398 offsetof(CPUX86State,kernelgsbase));
7399 }
7400 } else
7401 #endif
7402 {
7403 goto illegal_op;
7404 }
7405 break;
7406 case 1: /* rdtscp */
7407 if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP))
7408 goto illegal_op;
7409 if (s->cc_op != CC_OP_DYNAMIC)
7410 gen_op_set_cc_op(s->cc_op);
7411 gen_jmp_im(pc_start - s->cs_base);
7412 if (use_icount)
7413 gen_io_start();
7414 gen_helper_rdtscp(cpu_env);
7415 if (use_icount) {
7416 gen_io_end();
7417 gen_jmp(s, s->pc - s->cs_base);
7418 }
7419 break;
7420 default:
7421 goto illegal_op;
7422 }
7423 }
7424 break;
7425 default:
7426 goto illegal_op;
7427 }
7428 break;
7429 case 0x108: /* invd */
7430 case 0x109: /* wbinvd */
7431 if (s->cpl != 0) {
7432 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7433 } else {
7434 gen_svm_check_intercept(s, pc_start, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD);
7435 /* nothing to do */
7436 }
7437 break;
7438 case 0x63: /* arpl or movslS (x86_64) */
7439 #ifdef TARGET_X86_64
7440 if (CODE64(s)) {
7441 int d_ot;
7442 /* d_ot is the size of destination */
7443 d_ot = dflag + OT_WORD;
7444
7445 modrm = cpu_ldub_code(cpu_single_env, s->pc++);
7446 reg = ((modrm >> 3) & 7) | rex_r;
7447 mod = (modrm >> 6) & 3;
7448 rm = (modrm & 7) | REX_B(s);
7449
7450 if (mod == 3) {
7451 gen_op_mov_TN_reg(OT_LONG, 0, rm);
7452 /* sign extend */
7453 if (d_ot == OT_QUAD)
7454 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
7455 gen_op_mov_reg_T0(d_ot, reg);
7456 } else {
7457 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7458 if (d_ot == OT_QUAD) {
7459 gen_op_lds_T0_A0(OT_LONG + s->mem_index);
7460 } else {
7461 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
7462 }
7463 gen_op_mov_reg_T0(d_ot, reg);
7464 }
7465 } else
7466 #endif
7467 {
7468 int label1;
7469 TCGv t0, t1, t2, a0;
7470
7471 if (!s->pe || s->vm86)
7472 goto illegal_op;
7473 t0 = tcg_temp_local_new();
7474 t1 = tcg_temp_local_new();
7475 t2 = tcg_temp_local_new();
7476 ot = OT_WORD;
7477 modrm = cpu_ldub_code(cpu_single_env, s->pc++);
7478 reg = (modrm >> 3) & 7;
7479 mod = (modrm >> 6) & 3;
7480 rm = modrm & 7;
7481 if (mod != 3) {
7482 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7483 gen_op_ld_v(ot + s->mem_index, t0, cpu_A0);
7484 a0 = tcg_temp_local_new();
7485 tcg_gen_mov_tl(a0, cpu_A0);
7486 } else {
7487 gen_op_mov_v_reg(ot, t0, rm);
7488 TCGV_UNUSED(a0);
7489 }
7490 gen_op_mov_v_reg(ot, t1, reg);
7491 tcg_gen_andi_tl(cpu_tmp0, t0, 3);
7492 tcg_gen_andi_tl(t1, t1, 3);
7493 tcg_gen_movi_tl(t2, 0);
7494 label1 = gen_new_label();
7495 tcg_gen_brcond_tl(TCG_COND_GE, cpu_tmp0, t1, label1);
7496 tcg_gen_andi_tl(t0, t0, ~3);
7497 tcg_gen_or_tl(t0, t0, t1);
7498 tcg_gen_movi_tl(t2, CC_Z);
7499 gen_set_label(label1);
7500 if (mod != 3) {
7501 gen_op_st_v(ot + s->mem_index, t0, a0);
7502 tcg_temp_free(a0);
7503 } else {
7504 gen_op_mov_reg_v(ot, rm, t0);
7505 }
7506 if (s->cc_op != CC_OP_DYNAMIC)
7507 gen_op_set_cc_op(s->cc_op);
7508 gen_compute_eflags(cpu_cc_src);
7509 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z);
7510 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2);
7511 s->cc_op = CC_OP_EFLAGS;
7512 tcg_temp_free(t0);
7513 tcg_temp_free(t1);
7514 tcg_temp_free(t2);
7515 }
7516 break;
7517 case 0x102: /* lar */
7518 case 0x103: /* lsl */
7519 {
7520 int label1;
7521 TCGv t0;
7522 if (!s->pe || s->vm86)
7523 goto illegal_op;
7524 ot = dflag ? OT_LONG : OT_WORD;
7525 modrm = cpu_ldub_code(cpu_single_env, s->pc++);
7526 reg = ((modrm >> 3) & 7) | rex_r;
7527 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
7528 t0 = tcg_temp_local_new();
7529 if (s->cc_op != CC_OP_DYNAMIC)
7530 gen_op_set_cc_op(s->cc_op);
7531 if (b == 0x102) {
7532 gen_helper_lar(t0, cpu_env, cpu_T[0]);
7533 } else {
7534 gen_helper_lsl(t0, cpu_env, cpu_T[0]);
7535 }
7536 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_src, CC_Z);
7537 label1 = gen_new_label();
7538 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
7539 gen_op_mov_reg_v(ot, reg, t0);
7540 gen_set_label(label1);
7541 s->cc_op = CC_OP_EFLAGS;
7542 tcg_temp_free(t0);
7543 }
7544 break;
7545 case 0x118:
7546 modrm = cpu_ldub_code(cpu_single_env, s->pc++);
7547 mod = (modrm >> 6) & 3;
7548 op = (modrm >> 3) & 7;
7549 switch(op) {
7550 case 0: /* prefetchnta */
7551 case 1: /* prefetchnt0 */
7552 case 2: /* prefetchnt0 */
7553 case 3: /* prefetchnt0 */
7554 if (mod == 3)
7555 goto illegal_op;
7556 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7557 /* nothing more to do */
7558 break;
7559 default: /* nop (multi byte) */
7560 gen_nop_modrm(s, modrm);
7561 break;
7562 }
7563 break;
7564 case 0x119 ... 0x11f: /* nop (multi byte) */
7565 modrm = cpu_ldub_code(cpu_single_env, s->pc++);
7566 gen_nop_modrm(s, modrm);
7567 break;
7568 case 0x120: /* mov reg, crN */
7569 case 0x122: /* mov crN, reg */
7570 if (s->cpl != 0) {
7571 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7572 } else {
7573 modrm = cpu_ldub_code(cpu_single_env, s->pc++);
7574 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7575 * AMD documentation (24594.pdf) and testing of
7576 * intel 386 and 486 processors all show that the mod bits
7577 * are assumed to be 1's, regardless of actual values.
7578 */
7579 rm = (modrm & 7) | REX_B(s);
7580 reg = ((modrm >> 3) & 7) | rex_r;
7581 if (CODE64(s))
7582 ot = OT_QUAD;
7583 else
7584 ot = OT_LONG;
7585 if ((prefixes & PREFIX_LOCK) && (reg == 0) &&
7586 (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) {
7587 reg = 8;
7588 }
7589 switch(reg) {
7590 case 0:
7591 case 2:
7592 case 3:
7593 case 4:
7594 case 8:
7595 if (s->cc_op != CC_OP_DYNAMIC)
7596 gen_op_set_cc_op(s->cc_op);
7597 gen_jmp_im(pc_start - s->cs_base);
7598 if (b & 2) {
7599 gen_op_mov_TN_reg(ot, 0, rm);
7600 gen_helper_write_crN(cpu_env, tcg_const_i32(reg),
7601 cpu_T[0]);
7602 gen_jmp_im(s->pc - s->cs_base);
7603 gen_eob(s);
7604 } else {
7605 gen_helper_read_crN(cpu_T[0], cpu_env, tcg_const_i32(reg));
7606 gen_op_mov_reg_T0(ot, rm);
7607 }
7608 break;
7609 default:
7610 goto illegal_op;
7611 }
7612 }
7613 break;
7614 case 0x121: /* mov reg, drN */
7615 case 0x123: /* mov drN, reg */
7616 if (s->cpl != 0) {
7617 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7618 } else {
7619 modrm = cpu_ldub_code(cpu_single_env, s->pc++);
7620 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7621 * AMD documentation (24594.pdf) and testing of
7622 * intel 386 and 486 processors all show that the mod bits
7623 * are assumed to be 1's, regardless of actual values.
7624 */
7625 rm = (modrm & 7) | REX_B(s);
7626 reg = ((modrm >> 3) & 7) | rex_r;
7627 if (CODE64(s))
7628 ot = OT_QUAD;
7629 else
7630 ot = OT_LONG;
7631 /* XXX: do it dynamically with CR4.DE bit */
7632 if (reg == 4 || reg == 5 || reg >= 8)
7633 goto illegal_op;
7634 if (b & 2) {
7635 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg);
7636 gen_op_mov_TN_reg(ot, 0, rm);
7637 gen_helper_movl_drN_T0(cpu_env, tcg_const_i32(reg), cpu_T[0]);
7638 gen_jmp_im(s->pc - s->cs_base);
7639 gen_eob(s);
7640 } else {
7641 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg);
7642 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,dr[reg]));
7643 gen_op_mov_reg_T0(ot, rm);
7644 }
7645 }
7646 break;
7647 case 0x106: /* clts */
7648 if (s->cpl != 0) {
7649 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7650 } else {
7651 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
7652 gen_helper_clts(cpu_env);
7653 /* abort block because static cpu state changed */
7654 gen_jmp_im(s->pc - s->cs_base);
7655 gen_eob(s);
7656 }
7657 break;
7658 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
7659 case 0x1c3: /* MOVNTI reg, mem */
7660 if (!(s->cpuid_features & CPUID_SSE2))
7661 goto illegal_op;
7662 ot = s->dflag == 2 ? OT_QUAD : OT_LONG;
7663 modrm = cpu_ldub_code(cpu_single_env, s->pc++);
7664 mod = (modrm >> 6) & 3;
7665 if (mod == 3)
7666 goto illegal_op;
7667 reg = ((modrm >> 3) & 7) | rex_r;
7668 /* generate a generic store */
7669 gen_ldst_modrm(s, modrm, ot, reg, 1);
7670 break;
7671 case 0x1ae:
7672 modrm = cpu_ldub_code(cpu_single_env, s->pc++);
7673 mod = (modrm >> 6) & 3;
7674 op = (modrm >> 3) & 7;
7675 switch(op) {
7676 case 0: /* fxsave */
7677 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
7678 (s->prefix & PREFIX_LOCK))
7679 goto illegal_op;
7680 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
7681 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7682 break;
7683 }
7684 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7685 if (s->cc_op != CC_OP_DYNAMIC)
7686 gen_op_set_cc_op(s->cc_op);
7687 gen_jmp_im(pc_start - s->cs_base);
7688 gen_helper_fxsave(cpu_env, cpu_A0, tcg_const_i32((s->dflag == 2)));
7689 break;
7690 case 1: /* fxrstor */
7691 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
7692 (s->prefix & PREFIX_LOCK))
7693 goto illegal_op;
7694 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
7695 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7696 break;
7697 }
7698 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7699 if (s->cc_op != CC_OP_DYNAMIC)
7700 gen_op_set_cc_op(s->cc_op);
7701 gen_jmp_im(pc_start - s->cs_base);
7702 gen_helper_fxrstor(cpu_env, cpu_A0,
7703 tcg_const_i32((s->dflag == 2)));
7704 break;
7705 case 2: /* ldmxcsr */
7706 case 3: /* stmxcsr */
7707 if (s->flags & HF_TS_MASK) {
7708 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7709 break;
7710 }
7711 if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK) ||
7712 mod == 3)
7713 goto illegal_op;
7714 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7715 if (op == 2) {
7716 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
7717 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
7718 gen_helper_ldmxcsr(cpu_env, cpu_tmp2_i32);
7719 } else {
7720 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, mxcsr));
7721 gen_op_st_T0_A0(OT_LONG + s->mem_index);
7722 }
7723 break;
7724 case 5: /* lfence */
7725 case 6: /* mfence */
7726 if ((modrm & 0xc7) != 0xc0 || !(s->cpuid_features & CPUID_SSE2))
7727 goto illegal_op;
7728 break;
7729 case 7: /* sfence / clflush */
7730 if ((modrm & 0xc7) == 0xc0) {
7731 /* sfence */
7732 /* XXX: also check for cpuid_ext2_features & CPUID_EXT2_EMMX */
7733 if (!(s->cpuid_features & CPUID_SSE))
7734 goto illegal_op;
7735 } else {
7736 /* clflush */
7737 if (!(s->cpuid_features & CPUID_CLFLUSH))
7738 goto illegal_op;
7739 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7740 }
7741 break;
7742 default:
7743 goto illegal_op;
7744 }
7745 break;
7746 case 0x10d: /* 3DNow! prefetch(w) */
7747 modrm = cpu_ldub_code(cpu_single_env, s->pc++);
7748 mod = (modrm >> 6) & 3;
7749 if (mod == 3)
7750 goto illegal_op;
7751 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7752 /* ignore for now */
7753 break;
7754 case 0x1aa: /* rsm */
7755 gen_svm_check_intercept(s, pc_start, SVM_EXIT_RSM);
7756 if (!(s->flags & HF_SMM_MASK))
7757 goto illegal_op;
7758 gen_update_cc_op(s);
7759 gen_jmp_im(s->pc - s->cs_base);
7760 gen_helper_rsm(cpu_env);
7761 gen_eob(s);
7762 break;
7763 case 0x1b8: /* SSE4.2 popcnt */
7764 if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) !=
7765 PREFIX_REPZ)
7766 goto illegal_op;
7767 if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
7768 goto illegal_op;
7769
7770 modrm = cpu_ldub_code(cpu_single_env, s->pc++);
7771 reg = ((modrm >> 3) & 7) | rex_r;
7772
7773 if (s->prefix & PREFIX_DATA)
7774 ot = OT_WORD;
7775 else if (s->dflag != 2)
7776 ot = OT_LONG;
7777 else
7778 ot = OT_QUAD;
7779
7780 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
7781 gen_helper_popcnt(cpu_T[0], cpu_env, cpu_T[0], tcg_const_i32(ot));
7782 gen_op_mov_reg_T0(ot, reg);
7783
7784 s->cc_op = CC_OP_EFLAGS;
7785 break;
7786 case 0x10e ... 0x10f:
7787 /* 3DNow! instructions, ignore prefixes */
7788 s->prefix &= ~(PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA);
7789 case 0x110 ... 0x117:
7790 case 0x128 ... 0x12f:
7791 case 0x138 ... 0x13a:
7792 case 0x150 ... 0x179:
7793 case 0x17c ... 0x17f:
7794 case 0x1c2:
7795 case 0x1c4 ... 0x1c6:
7796 case 0x1d0 ... 0x1fe:
7797 gen_sse(s, b, pc_start, rex_r);
7798 break;
7799 default:
7800 goto illegal_op;
7801 }
7802 /* lock generation */
7803 if (s->prefix & PREFIX_LOCK)
7804 gen_helper_unlock();
7805 return s->pc;
7806 illegal_op:
7807 if (s->prefix & PREFIX_LOCK)
7808 gen_helper_unlock();
7809 /* XXX: ensure that no lock was generated */
7810 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
7811 return s->pc;
7812 }
7813
7814 void optimize_flags_init(void)
7815 {
7816 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
7817 cpu_cc_op = tcg_global_mem_new_i32(TCG_AREG0,
7818 offsetof(CPUX86State, cc_op), "cc_op");
7819 cpu_cc_src = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_src),
7820 "cc_src");
7821 cpu_cc_dst = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_dst),
7822 "cc_dst");
7823 cpu_cc_tmp = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_tmp),
7824 "cc_tmp");
7825
7826 #ifdef TARGET_X86_64
7827 cpu_regs[R_EAX] = tcg_global_mem_new_i64(TCG_AREG0,
7828 offsetof(CPUX86State, regs[R_EAX]), "rax");
7829 cpu_regs[R_ECX] = tcg_global_mem_new_i64(TCG_AREG0,
7830 offsetof(CPUX86State, regs[R_ECX]), "rcx");
7831 cpu_regs[R_EDX] = tcg_global_mem_new_i64(TCG_AREG0,
7832 offsetof(CPUX86State, regs[R_EDX]), "rdx");
7833 cpu_regs[R_EBX] = tcg_global_mem_new_i64(TCG_AREG0,
7834 offsetof(CPUX86State, regs[R_EBX]), "rbx");
7835 cpu_regs[R_ESP] = tcg_global_mem_new_i64(TCG_AREG0,
7836 offsetof(CPUX86State, regs[R_ESP]), "rsp");
7837 cpu_regs[R_EBP] = tcg_global_mem_new_i64(TCG_AREG0,
7838 offsetof(CPUX86State, regs[R_EBP]), "rbp");
7839 cpu_regs[R_ESI] = tcg_global_mem_new_i64(TCG_AREG0,
7840 offsetof(CPUX86State, regs[R_ESI]), "rsi");
7841 cpu_regs[R_EDI] = tcg_global_mem_new_i64(TCG_AREG0,
7842 offsetof(CPUX86State, regs[R_EDI]), "rdi");
7843 cpu_regs[8] = tcg_global_mem_new_i64(TCG_AREG0,
7844 offsetof(CPUX86State, regs[8]), "r8");
7845 cpu_regs[9] = tcg_global_mem_new_i64(TCG_AREG0,
7846 offsetof(CPUX86State, regs[9]), "r9");
7847 cpu_regs[10] = tcg_global_mem_new_i64(TCG_AREG0,
7848 offsetof(CPUX86State, regs[10]), "r10");
7849 cpu_regs[11] = tcg_global_mem_new_i64(TCG_AREG0,
7850 offsetof(CPUX86State, regs[11]), "r11");
7851 cpu_regs[12] = tcg_global_mem_new_i64(TCG_AREG0,
7852 offsetof(CPUX86State, regs[12]), "r12");
7853 cpu_regs[13] = tcg_global_mem_new_i64(TCG_AREG0,
7854 offsetof(CPUX86State, regs[13]), "r13");
7855 cpu_regs[14] = tcg_global_mem_new_i64(TCG_AREG0,
7856 offsetof(CPUX86State, regs[14]), "r14");
7857 cpu_regs[15] = tcg_global_mem_new_i64(TCG_AREG0,
7858 offsetof(CPUX86State, regs[15]), "r15");
7859 #else
7860 cpu_regs[R_EAX] = tcg_global_mem_new_i32(TCG_AREG0,
7861 offsetof(CPUX86State, regs[R_EAX]), "eax");
7862 cpu_regs[R_ECX] = tcg_global_mem_new_i32(TCG_AREG0,
7863 offsetof(CPUX86State, regs[R_ECX]), "ecx");
7864 cpu_regs[R_EDX] = tcg_global_mem_new_i32(TCG_AREG0,
7865 offsetof(CPUX86State, regs[R_EDX]), "edx");
7866 cpu_regs[R_EBX] = tcg_global_mem_new_i32(TCG_AREG0,
7867 offsetof(CPUX86State, regs[R_EBX]), "ebx");
7868 cpu_regs[R_ESP] = tcg_global_mem_new_i32(TCG_AREG0,
7869 offsetof(CPUX86State, regs[R_ESP]), "esp");
7870 cpu_regs[R_EBP] = tcg_global_mem_new_i32(TCG_AREG0,
7871 offsetof(CPUX86State, regs[R_EBP]), "ebp");
7872 cpu_regs[R_ESI] = tcg_global_mem_new_i32(TCG_AREG0,
7873 offsetof(CPUX86State, regs[R_ESI]), "esi");
7874 cpu_regs[R_EDI] = tcg_global_mem_new_i32(TCG_AREG0,
7875 offsetof(CPUX86State, regs[R_EDI]), "edi");
7876 #endif
7877
7878 /* register helpers */
7879 #define GEN_HELPER 2
7880 #include "helper.h"
7881 }
7882
7883 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
7884 basic block 'tb'. If search_pc is TRUE, also generate PC
7885 information for each intermediate instruction. */
7886 static inline void gen_intermediate_code_internal(CPUX86State *env,
7887 TranslationBlock *tb,
7888 int search_pc)
7889 {
7890 DisasContext dc1, *dc = &dc1;
7891 target_ulong pc_ptr;
7892 uint16_t *gen_opc_end;
7893 CPUBreakpoint *bp;
7894 int j, lj;
7895 uint64_t flags;
7896 target_ulong pc_start;
7897 target_ulong cs_base;
7898 int num_insns;
7899 int max_insns;
7900
7901 /* generate intermediate code */
7902 pc_start = tb->pc;
7903 cs_base = tb->cs_base;
7904 flags = tb->flags;
7905
7906 dc->pe = (flags >> HF_PE_SHIFT) & 1;
7907 dc->code32 = (flags >> HF_CS32_SHIFT) & 1;
7908 dc->ss32 = (flags >> HF_SS32_SHIFT) & 1;
7909 dc->addseg = (flags >> HF_ADDSEG_SHIFT) & 1;
7910 dc->f_st = 0;
7911 dc->vm86 = (flags >> VM_SHIFT) & 1;
7912 dc->cpl = (flags >> HF_CPL_SHIFT) & 3;
7913 dc->iopl = (flags >> IOPL_SHIFT) & 3;
7914 dc->tf = (flags >> TF_SHIFT) & 1;
7915 dc->singlestep_enabled = env->singlestep_enabled;
7916 dc->cc_op = CC_OP_DYNAMIC;
7917 dc->cs_base = cs_base;
7918 dc->tb = tb;
7919 dc->popl_esp_hack = 0;
7920 /* select memory access functions */
7921 dc->mem_index = 0;
7922 if (flags & HF_SOFTMMU_MASK) {
7923 dc->mem_index = (cpu_mmu_index(env) + 1) << 2;
7924 }
7925 dc->cpuid_features = env->cpuid_features;
7926 dc->cpuid_ext_features = env->cpuid_ext_features;
7927 dc->cpuid_ext2_features = env->cpuid_ext2_features;
7928 dc->cpuid_ext3_features = env->cpuid_ext3_features;
7929 dc->cpuid_7_0_ebx_features = env->cpuid_7_0_ebx_features;
7930 #ifdef TARGET_X86_64
7931 dc->lma = (flags >> HF_LMA_SHIFT) & 1;
7932 dc->code64 = (flags >> HF_CS64_SHIFT) & 1;
7933 #endif
7934 dc->flags = flags;
7935 dc->jmp_opt = !(dc->tf || env->singlestep_enabled ||
7936 (flags & HF_INHIBIT_IRQ_MASK)
7937 #ifndef CONFIG_SOFTMMU
7938 || (flags & HF_SOFTMMU_MASK)
7939 #endif
7940 );
7941 #if 0
7942 /* check addseg logic */
7943 if (!dc->addseg && (dc->vm86 || !dc->pe || !dc->code32))
7944 printf("ERROR addseg\n");
7945 #endif
7946
7947 cpu_T[0] = tcg_temp_new();
7948 cpu_T[1] = tcg_temp_new();
7949 cpu_A0 = tcg_temp_new();
7950 cpu_T3 = tcg_temp_new();
7951
7952 cpu_tmp0 = tcg_temp_new();
7953 cpu_tmp1_i64 = tcg_temp_new_i64();
7954 cpu_tmp2_i32 = tcg_temp_new_i32();
7955 cpu_tmp3_i32 = tcg_temp_new_i32();
7956 cpu_tmp4 = tcg_temp_new();
7957 cpu_tmp5 = tcg_temp_new();
7958 cpu_ptr0 = tcg_temp_new_ptr();
7959 cpu_ptr1 = tcg_temp_new_ptr();
7960
7961 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
7962
7963 dc->is_jmp = DISAS_NEXT;
7964 pc_ptr = pc_start;
7965 lj = -1;
7966 num_insns = 0;
7967 max_insns = tb->cflags & CF_COUNT_MASK;
7968 if (max_insns == 0)
7969 max_insns = CF_COUNT_MASK;
7970
7971 gen_icount_start();
7972 for(;;) {
7973 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
7974 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
7975 if (bp->pc == pc_ptr &&
7976 !((bp->flags & BP_CPU) && (tb->flags & HF_RF_MASK))) {
7977 gen_debug(dc, pc_ptr - dc->cs_base);
7978 break;
7979 }
7980 }
7981 }
7982 if (search_pc) {
7983 j = gen_opc_ptr - gen_opc_buf;
7984 if (lj < j) {
7985 lj++;
7986 while (lj < j)
7987 gen_opc_instr_start[lj++] = 0;
7988 }
7989 gen_opc_pc[lj] = pc_ptr;
7990 gen_opc_cc_op[lj] = dc->cc_op;
7991 gen_opc_instr_start[lj] = 1;
7992 gen_opc_icount[lj] = num_insns;
7993 }
7994 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
7995 gen_io_start();
7996
7997 pc_ptr = disas_insn(dc, pc_ptr);
7998 num_insns++;
7999 /* stop translation if indicated */
8000 if (dc->is_jmp)
8001 break;
8002 /* if single step mode, we generate only one instruction and
8003 generate an exception */
8004 /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
8005 the flag and abort the translation to give the irqs a
8006 change to be happen */
8007 if (dc->tf || dc->singlestep_enabled ||
8008 (flags & HF_INHIBIT_IRQ_MASK)) {
8009 gen_jmp_im(pc_ptr - dc->cs_base);
8010 gen_eob(dc);
8011 break;
8012 }
8013 /* if too long translation, stop generation too */
8014 if (gen_opc_ptr >= gen_opc_end ||
8015 (pc_ptr - pc_start) >= (TARGET_PAGE_SIZE - 32) ||
8016 num_insns >= max_insns) {
8017 gen_jmp_im(pc_ptr - dc->cs_base);
8018 gen_eob(dc);
8019 break;
8020 }
8021 if (singlestep) {
8022 gen_jmp_im(pc_ptr - dc->cs_base);
8023 gen_eob(dc);
8024 break;
8025 }
8026 }
8027 if (tb->cflags & CF_LAST_IO)
8028 gen_io_end();
8029 gen_icount_end(tb, num_insns);
8030 *gen_opc_ptr = INDEX_op_end;
8031 /* we don't forget to fill the last values */
8032 if (search_pc) {
8033 j = gen_opc_ptr - gen_opc_buf;
8034 lj++;
8035 while (lj <= j)
8036 gen_opc_instr_start[lj++] = 0;
8037 }
8038
8039 #ifdef DEBUG_DISAS
8040 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
8041 int disas_flags;
8042 qemu_log("----------------\n");
8043 qemu_log("IN: %s\n", lookup_symbol(pc_start));
8044 #ifdef TARGET_X86_64
8045 if (dc->code64)
8046 disas_flags = 2;
8047 else
8048 #endif
8049 disas_flags = !dc->code32;
8050 log_target_disas(env, pc_start, pc_ptr - pc_start, disas_flags);
8051 qemu_log("\n");
8052 }
8053 #endif
8054
8055 if (!search_pc) {
8056 tb->size = pc_ptr - pc_start;
8057 tb->icount = num_insns;
8058 }
8059 }
8060
8061 void gen_intermediate_code(CPUX86State *env, TranslationBlock *tb)
8062 {
8063 gen_intermediate_code_internal(env, tb, 0);
8064 }
8065
8066 void gen_intermediate_code_pc(CPUX86State *env, TranslationBlock *tb)
8067 {
8068 gen_intermediate_code_internal(env, tb, 1);
8069 }
8070
8071 void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb, int pc_pos)
8072 {
8073 int cc_op;
8074 #ifdef DEBUG_DISAS
8075 if (qemu_loglevel_mask(CPU_LOG_TB_OP)) {
8076 int i;
8077 qemu_log("RESTORE:\n");
8078 for(i = 0;i <= pc_pos; i++) {
8079 if (gen_opc_instr_start[i]) {
8080 qemu_log("0x%04x: " TARGET_FMT_lx "\n", i, gen_opc_pc[i]);
8081 }
8082 }
8083 qemu_log("pc_pos=0x%x eip=" TARGET_FMT_lx " cs_base=%x\n",
8084 pc_pos, gen_opc_pc[pc_pos] - tb->cs_base,
8085 (uint32_t)tb->cs_base);
8086 }
8087 #endif
8088 env->eip = gen_opc_pc[pc_pos] - tb->cs_base;
8089 cc_op = gen_opc_cc_op[pc_pos];
8090 if (cc_op != CC_OP_DYNAMIC)
8091 env->cc_op = cc_op;
8092 }