]> git.proxmox.com Git - qemu.git/blob - target-i386/translate.c
target-i386: Tidy prefix parsing
[qemu.git] / target-i386 / translate.c
1 /*
2 * i386 translation
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include <stdarg.h>
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <string.h>
23 #include <inttypes.h>
24 #include <signal.h>
25
26 #include "qemu/host-utils.h"
27 #include "cpu.h"
28 #include "disas/disas.h"
29 #include "tcg-op.h"
30
31 #include "helper.h"
32 #define GEN_HELPER 1
33 #include "helper.h"
34
35 #define PREFIX_REPZ 0x01
36 #define PREFIX_REPNZ 0x02
37 #define PREFIX_LOCK 0x04
38 #define PREFIX_DATA 0x08
39 #define PREFIX_ADR 0x10
40
41 #ifdef TARGET_X86_64
42 #define CODE64(s) ((s)->code64)
43 #define REX_X(s) ((s)->rex_x)
44 #define REX_B(s) ((s)->rex_b)
45 #else
46 #define CODE64(s) 0
47 #define REX_X(s) 0
48 #define REX_B(s) 0
49 #endif
50
51 #ifdef TARGET_X86_64
52 # define ctztl ctz64
53 # define clztl clz64
54 #else
55 # define ctztl ctz32
56 # define clztl clz32
57 #endif
58
59 //#define MACRO_TEST 1
60
61 /* global register indexes */
62 static TCGv_ptr cpu_env;
63 static TCGv cpu_A0;
64 static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2, cpu_cc_srcT;
65 static TCGv_i32 cpu_cc_op;
66 static TCGv cpu_regs[CPU_NB_REGS];
67 /* local temps */
68 static TCGv cpu_T[2];
69 /* local register indexes (only used inside old micro ops) */
70 static TCGv cpu_tmp0, cpu_tmp4;
71 static TCGv_ptr cpu_ptr0, cpu_ptr1;
72 static TCGv_i32 cpu_tmp2_i32, cpu_tmp3_i32;
73 static TCGv_i64 cpu_tmp1_i64;
74 static TCGv cpu_tmp5;
75
76 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
77
78 #include "exec/gen-icount.h"
79
80 #ifdef TARGET_X86_64
81 static int x86_64_hregs;
82 #endif
83
84 typedef struct DisasContext {
85 /* current insn context */
86 int override; /* -1 if no override */
87 int prefix;
88 int aflag, dflag;
89 target_ulong pc; /* pc = eip + cs_base */
90 int is_jmp; /* 1 = means jump (stop translation), 2 means CPU
91 static state change (stop translation) */
92 /* current block context */
93 target_ulong cs_base; /* base of CS segment */
94 int pe; /* protected mode */
95 int code32; /* 32 bit code segment */
96 #ifdef TARGET_X86_64
97 int lma; /* long mode active */
98 int code64; /* 64 bit code segment */
99 int rex_x, rex_b;
100 #endif
101 int ss32; /* 32 bit stack segment */
102 CCOp cc_op; /* current CC operation */
103 bool cc_op_dirty;
104 int addseg; /* non zero if either DS/ES/SS have a non zero base */
105 int f_st; /* currently unused */
106 int vm86; /* vm86 mode */
107 int cpl;
108 int iopl;
109 int tf; /* TF cpu flag */
110 int singlestep_enabled; /* "hardware" single step enabled */
111 int jmp_opt; /* use direct block chaining for direct jumps */
112 int mem_index; /* select memory access functions */
113 uint64_t flags; /* all execution flags */
114 struct TranslationBlock *tb;
115 int popl_esp_hack; /* for correct popl with esp base handling */
116 int rip_offset; /* only used in x86_64, but left for simplicity */
117 int cpuid_features;
118 int cpuid_ext_features;
119 int cpuid_ext2_features;
120 int cpuid_ext3_features;
121 int cpuid_7_0_ebx_features;
122 } DisasContext;
123
124 static void gen_eob(DisasContext *s);
125 static void gen_jmp(DisasContext *s, target_ulong eip);
126 static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num);
127 static void gen_op(DisasContext *s1, int op, int ot, int d);
128
129 /* i386 arith/logic operations */
130 enum {
131 OP_ADDL,
132 OP_ORL,
133 OP_ADCL,
134 OP_SBBL,
135 OP_ANDL,
136 OP_SUBL,
137 OP_XORL,
138 OP_CMPL,
139 };
140
141 /* i386 shift ops */
142 enum {
143 OP_ROL,
144 OP_ROR,
145 OP_RCL,
146 OP_RCR,
147 OP_SHL,
148 OP_SHR,
149 OP_SHL1, /* undocumented */
150 OP_SAR = 7,
151 };
152
153 enum {
154 JCC_O,
155 JCC_B,
156 JCC_Z,
157 JCC_BE,
158 JCC_S,
159 JCC_P,
160 JCC_L,
161 JCC_LE,
162 };
163
164 /* operand size */
165 enum {
166 OT_BYTE = 0,
167 OT_WORD,
168 OT_LONG,
169 OT_QUAD,
170 };
171
172 enum {
173 /* I386 int registers */
174 OR_EAX, /* MUST be even numbered */
175 OR_ECX,
176 OR_EDX,
177 OR_EBX,
178 OR_ESP,
179 OR_EBP,
180 OR_ESI,
181 OR_EDI,
182
183 OR_TMP0 = 16, /* temporary operand register */
184 OR_TMP1,
185 OR_A0, /* temporary register used when doing address evaluation */
186 };
187
188 enum {
189 USES_CC_DST = 1,
190 USES_CC_SRC = 2,
191 USES_CC_SRC2 = 4,
192 USES_CC_SRCT = 8,
193 };
194
195 /* Bit set if the global variable is live after setting CC_OP to X. */
196 static const uint8_t cc_op_live[CC_OP_NB] = {
197 [CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
198 [CC_OP_EFLAGS] = USES_CC_SRC,
199 [CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC,
200 [CC_OP_ADDB ... CC_OP_ADDQ] = USES_CC_DST | USES_CC_SRC,
201 [CC_OP_ADCB ... CC_OP_ADCQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
202 [CC_OP_SUBB ... CC_OP_SUBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT,
203 [CC_OP_SBBB ... CC_OP_SBBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
204 [CC_OP_LOGICB ... CC_OP_LOGICQ] = USES_CC_DST,
205 [CC_OP_INCB ... CC_OP_INCQ] = USES_CC_DST | USES_CC_SRC,
206 [CC_OP_DECB ... CC_OP_DECQ] = USES_CC_DST | USES_CC_SRC,
207 [CC_OP_SHLB ... CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC,
208 [CC_OP_SARB ... CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC,
209 };
210
211 static void set_cc_op(DisasContext *s, CCOp op)
212 {
213 int dead;
214
215 if (s->cc_op == op) {
216 return;
217 }
218
219 /* Discard CC computation that will no longer be used. */
220 dead = cc_op_live[s->cc_op] & ~cc_op_live[op];
221 if (dead & USES_CC_DST) {
222 tcg_gen_discard_tl(cpu_cc_dst);
223 }
224 if (dead & USES_CC_SRC) {
225 tcg_gen_discard_tl(cpu_cc_src);
226 }
227 if (dead & USES_CC_SRC2) {
228 tcg_gen_discard_tl(cpu_cc_src2);
229 }
230 if (dead & USES_CC_SRCT) {
231 tcg_gen_discard_tl(cpu_cc_srcT);
232 }
233
234 s->cc_op = op;
235 /* The DYNAMIC setting is translator only, and should never be
236 stored. Thus we always consider it clean. */
237 s->cc_op_dirty = (op != CC_OP_DYNAMIC);
238 }
239
240 static void gen_update_cc_op(DisasContext *s)
241 {
242 if (s->cc_op_dirty) {
243 tcg_gen_movi_i32(cpu_cc_op, s->cc_op);
244 s->cc_op_dirty = false;
245 }
246 }
247
248 static inline void gen_op_movl_T0_0(void)
249 {
250 tcg_gen_movi_tl(cpu_T[0], 0);
251 }
252
253 static inline void gen_op_movl_T0_im(int32_t val)
254 {
255 tcg_gen_movi_tl(cpu_T[0], val);
256 }
257
258 static inline void gen_op_movl_T0_imu(uint32_t val)
259 {
260 tcg_gen_movi_tl(cpu_T[0], val);
261 }
262
263 static inline void gen_op_movl_T1_im(int32_t val)
264 {
265 tcg_gen_movi_tl(cpu_T[1], val);
266 }
267
268 static inline void gen_op_movl_T1_imu(uint32_t val)
269 {
270 tcg_gen_movi_tl(cpu_T[1], val);
271 }
272
273 static inline void gen_op_movl_A0_im(uint32_t val)
274 {
275 tcg_gen_movi_tl(cpu_A0, val);
276 }
277
278 #ifdef TARGET_X86_64
279 static inline void gen_op_movq_A0_im(int64_t val)
280 {
281 tcg_gen_movi_tl(cpu_A0, val);
282 }
283 #endif
284
285 static inline void gen_movtl_T0_im(target_ulong val)
286 {
287 tcg_gen_movi_tl(cpu_T[0], val);
288 }
289
290 static inline void gen_movtl_T1_im(target_ulong val)
291 {
292 tcg_gen_movi_tl(cpu_T[1], val);
293 }
294
295 static inline void gen_op_andl_T0_ffff(void)
296 {
297 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
298 }
299
300 static inline void gen_op_andl_T0_im(uint32_t val)
301 {
302 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], val);
303 }
304
305 static inline void gen_op_movl_T0_T1(void)
306 {
307 tcg_gen_mov_tl(cpu_T[0], cpu_T[1]);
308 }
309
310 static inline void gen_op_andl_A0_ffff(void)
311 {
312 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffff);
313 }
314
315 #ifdef TARGET_X86_64
316
317 #define NB_OP_SIZES 4
318
319 #else /* !TARGET_X86_64 */
320
321 #define NB_OP_SIZES 3
322
323 #endif /* !TARGET_X86_64 */
324
325 #if defined(HOST_WORDS_BIGENDIAN)
326 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
327 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
328 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
329 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
330 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
331 #else
332 #define REG_B_OFFSET 0
333 #define REG_H_OFFSET 1
334 #define REG_W_OFFSET 0
335 #define REG_L_OFFSET 0
336 #define REG_LH_OFFSET 4
337 #endif
338
339 /* In instruction encodings for byte register accesses the
340 * register number usually indicates "low 8 bits of register N";
341 * however there are some special cases where N 4..7 indicates
342 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
343 * true for this special case, false otherwise.
344 */
345 static inline bool byte_reg_is_xH(int reg)
346 {
347 if (reg < 4) {
348 return false;
349 }
350 #ifdef TARGET_X86_64
351 if (reg >= 8 || x86_64_hregs) {
352 return false;
353 }
354 #endif
355 return true;
356 }
357
358 static inline void gen_op_mov_reg_v(int ot, int reg, TCGv t0)
359 {
360 switch(ot) {
361 case OT_BYTE:
362 if (!byte_reg_is_xH(reg)) {
363 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 8);
364 } else {
365 tcg_gen_deposit_tl(cpu_regs[reg - 4], cpu_regs[reg - 4], t0, 8, 8);
366 }
367 break;
368 case OT_WORD:
369 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 16);
370 break;
371 default: /* XXX this shouldn't be reached; abort? */
372 case OT_LONG:
373 /* For x86_64, this sets the higher half of register to zero.
374 For i386, this is equivalent to a mov. */
375 tcg_gen_ext32u_tl(cpu_regs[reg], t0);
376 break;
377 #ifdef TARGET_X86_64
378 case OT_QUAD:
379 tcg_gen_mov_tl(cpu_regs[reg], t0);
380 break;
381 #endif
382 }
383 }
384
385 static inline void gen_op_mov_reg_T0(int ot, int reg)
386 {
387 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
388 }
389
390 static inline void gen_op_mov_reg_T1(int ot, int reg)
391 {
392 gen_op_mov_reg_v(ot, reg, cpu_T[1]);
393 }
394
395 static inline void gen_op_mov_reg_A0(int size, int reg)
396 {
397 switch(size) {
398 case OT_BYTE:
399 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], cpu_A0, 0, 16);
400 break;
401 default: /* XXX this shouldn't be reached; abort? */
402 case OT_WORD:
403 /* For x86_64, this sets the higher half of register to zero.
404 For i386, this is equivalent to a mov. */
405 tcg_gen_ext32u_tl(cpu_regs[reg], cpu_A0);
406 break;
407 #ifdef TARGET_X86_64
408 case OT_LONG:
409 tcg_gen_mov_tl(cpu_regs[reg], cpu_A0);
410 break;
411 #endif
412 }
413 }
414
415 static inline void gen_op_mov_v_reg(int ot, TCGv t0, int reg)
416 {
417 if (ot == OT_BYTE && byte_reg_is_xH(reg)) {
418 tcg_gen_shri_tl(t0, cpu_regs[reg - 4], 8);
419 tcg_gen_ext8u_tl(t0, t0);
420 } else {
421 tcg_gen_mov_tl(t0, cpu_regs[reg]);
422 }
423 }
424
425 static inline void gen_op_mov_TN_reg(int ot, int t_index, int reg)
426 {
427 gen_op_mov_v_reg(ot, cpu_T[t_index], reg);
428 }
429
430 static inline void gen_op_movl_A0_reg(int reg)
431 {
432 tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]);
433 }
434
435 static inline void gen_op_addl_A0_im(int32_t val)
436 {
437 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
438 #ifdef TARGET_X86_64
439 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
440 #endif
441 }
442
443 #ifdef TARGET_X86_64
444 static inline void gen_op_addq_A0_im(int64_t val)
445 {
446 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
447 }
448 #endif
449
450 static void gen_add_A0_im(DisasContext *s, int val)
451 {
452 #ifdef TARGET_X86_64
453 if (CODE64(s))
454 gen_op_addq_A0_im(val);
455 else
456 #endif
457 gen_op_addl_A0_im(val);
458 }
459
460 static inline void gen_op_addl_T0_T1(void)
461 {
462 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
463 }
464
465 static inline void gen_op_jmp_T0(void)
466 {
467 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, eip));
468 }
469
470 static inline void gen_op_add_reg_im(int size, int reg, int32_t val)
471 {
472 switch(size) {
473 case OT_BYTE:
474 tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val);
475 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], cpu_tmp0, 0, 16);
476 break;
477 case OT_WORD:
478 tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val);
479 /* For x86_64, this sets the higher half of register to zero.
480 For i386, this is equivalent to a nop. */
481 tcg_gen_ext32u_tl(cpu_tmp0, cpu_tmp0);
482 tcg_gen_mov_tl(cpu_regs[reg], cpu_tmp0);
483 break;
484 #ifdef TARGET_X86_64
485 case OT_LONG:
486 tcg_gen_addi_tl(cpu_regs[reg], cpu_regs[reg], val);
487 break;
488 #endif
489 }
490 }
491
492 static inline void gen_op_add_reg_T0(int size, int reg)
493 {
494 switch(size) {
495 case OT_BYTE:
496 tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T[0]);
497 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], cpu_tmp0, 0, 16);
498 break;
499 case OT_WORD:
500 tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T[0]);
501 /* For x86_64, this sets the higher half of register to zero.
502 For i386, this is equivalent to a nop. */
503 tcg_gen_ext32u_tl(cpu_tmp0, cpu_tmp0);
504 tcg_gen_mov_tl(cpu_regs[reg], cpu_tmp0);
505 break;
506 #ifdef TARGET_X86_64
507 case OT_LONG:
508 tcg_gen_add_tl(cpu_regs[reg], cpu_regs[reg], cpu_T[0]);
509 break;
510 #endif
511 }
512 }
513
514 static inline void gen_op_addl_A0_reg_sN(int shift, int reg)
515 {
516 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]);
517 if (shift != 0)
518 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
519 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
520 /* For x86_64, this sets the higher half of register to zero.
521 For i386, this is equivalent to a nop. */
522 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
523 }
524
525 static inline void gen_op_movl_A0_seg(int reg)
526 {
527 tcg_gen_ld32u_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base) + REG_L_OFFSET);
528 }
529
530 static inline void gen_op_addl_A0_seg(DisasContext *s, int reg)
531 {
532 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base));
533 #ifdef TARGET_X86_64
534 if (CODE64(s)) {
535 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
536 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
537 } else {
538 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
539 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
540 }
541 #else
542 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
543 #endif
544 }
545
546 #ifdef TARGET_X86_64
547 static inline void gen_op_movq_A0_seg(int reg)
548 {
549 tcg_gen_ld_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base));
550 }
551
552 static inline void gen_op_addq_A0_seg(int reg)
553 {
554 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base));
555 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
556 }
557
558 static inline void gen_op_movq_A0_reg(int reg)
559 {
560 tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]);
561 }
562
563 static inline void gen_op_addq_A0_reg_sN(int shift, int reg)
564 {
565 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]);
566 if (shift != 0)
567 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
568 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
569 }
570 #endif
571
572 static inline void gen_op_lds_T0_A0(int idx)
573 {
574 int mem_index = (idx >> 2) - 1;
575 switch(idx & 3) {
576 case OT_BYTE:
577 tcg_gen_qemu_ld8s(cpu_T[0], cpu_A0, mem_index);
578 break;
579 case OT_WORD:
580 tcg_gen_qemu_ld16s(cpu_T[0], cpu_A0, mem_index);
581 break;
582 default:
583 case OT_LONG:
584 tcg_gen_qemu_ld32s(cpu_T[0], cpu_A0, mem_index);
585 break;
586 }
587 }
588
589 static inline void gen_op_ld_v(int idx, TCGv t0, TCGv a0)
590 {
591 int mem_index = (idx >> 2) - 1;
592 switch(idx & 3) {
593 case OT_BYTE:
594 tcg_gen_qemu_ld8u(t0, a0, mem_index);
595 break;
596 case OT_WORD:
597 tcg_gen_qemu_ld16u(t0, a0, mem_index);
598 break;
599 case OT_LONG:
600 tcg_gen_qemu_ld32u(t0, a0, mem_index);
601 break;
602 default:
603 case OT_QUAD:
604 /* Should never happen on 32-bit targets. */
605 #ifdef TARGET_X86_64
606 tcg_gen_qemu_ld64(t0, a0, mem_index);
607 #endif
608 break;
609 }
610 }
611
612 /* XXX: always use ldu or lds */
613 static inline void gen_op_ld_T0_A0(int idx)
614 {
615 gen_op_ld_v(idx, cpu_T[0], cpu_A0);
616 }
617
618 static inline void gen_op_ldu_T0_A0(int idx)
619 {
620 gen_op_ld_v(idx, cpu_T[0], cpu_A0);
621 }
622
623 static inline void gen_op_ld_T1_A0(int idx)
624 {
625 gen_op_ld_v(idx, cpu_T[1], cpu_A0);
626 }
627
628 static inline void gen_op_st_v(int idx, TCGv t0, TCGv a0)
629 {
630 int mem_index = (idx >> 2) - 1;
631 switch(idx & 3) {
632 case OT_BYTE:
633 tcg_gen_qemu_st8(t0, a0, mem_index);
634 break;
635 case OT_WORD:
636 tcg_gen_qemu_st16(t0, a0, mem_index);
637 break;
638 case OT_LONG:
639 tcg_gen_qemu_st32(t0, a0, mem_index);
640 break;
641 default:
642 case OT_QUAD:
643 /* Should never happen on 32-bit targets. */
644 #ifdef TARGET_X86_64
645 tcg_gen_qemu_st64(t0, a0, mem_index);
646 #endif
647 break;
648 }
649 }
650
651 static inline void gen_op_st_T0_A0(int idx)
652 {
653 gen_op_st_v(idx, cpu_T[0], cpu_A0);
654 }
655
656 static inline void gen_op_st_T1_A0(int idx)
657 {
658 gen_op_st_v(idx, cpu_T[1], cpu_A0);
659 }
660
661 static inline void gen_jmp_im(target_ulong pc)
662 {
663 tcg_gen_movi_tl(cpu_tmp0, pc);
664 tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, eip));
665 }
666
667 static inline void gen_string_movl_A0_ESI(DisasContext *s)
668 {
669 int override;
670
671 override = s->override;
672 #ifdef TARGET_X86_64
673 if (s->aflag == 2) {
674 if (override >= 0) {
675 gen_op_movq_A0_seg(override);
676 gen_op_addq_A0_reg_sN(0, R_ESI);
677 } else {
678 gen_op_movq_A0_reg(R_ESI);
679 }
680 } else
681 #endif
682 if (s->aflag) {
683 /* 32 bit address */
684 if (s->addseg && override < 0)
685 override = R_DS;
686 if (override >= 0) {
687 gen_op_movl_A0_seg(override);
688 gen_op_addl_A0_reg_sN(0, R_ESI);
689 } else {
690 gen_op_movl_A0_reg(R_ESI);
691 }
692 } else {
693 /* 16 address, always override */
694 if (override < 0)
695 override = R_DS;
696 gen_op_movl_A0_reg(R_ESI);
697 gen_op_andl_A0_ffff();
698 gen_op_addl_A0_seg(s, override);
699 }
700 }
701
702 static inline void gen_string_movl_A0_EDI(DisasContext *s)
703 {
704 #ifdef TARGET_X86_64
705 if (s->aflag == 2) {
706 gen_op_movq_A0_reg(R_EDI);
707 } else
708 #endif
709 if (s->aflag) {
710 if (s->addseg) {
711 gen_op_movl_A0_seg(R_ES);
712 gen_op_addl_A0_reg_sN(0, R_EDI);
713 } else {
714 gen_op_movl_A0_reg(R_EDI);
715 }
716 } else {
717 gen_op_movl_A0_reg(R_EDI);
718 gen_op_andl_A0_ffff();
719 gen_op_addl_A0_seg(s, R_ES);
720 }
721 }
722
723 static inline void gen_op_movl_T0_Dshift(int ot)
724 {
725 tcg_gen_ld32s_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, df));
726 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], ot);
727 };
728
729 static TCGv gen_ext_tl(TCGv dst, TCGv src, int size, bool sign)
730 {
731 switch (size) {
732 case OT_BYTE:
733 if (sign) {
734 tcg_gen_ext8s_tl(dst, src);
735 } else {
736 tcg_gen_ext8u_tl(dst, src);
737 }
738 return dst;
739 case OT_WORD:
740 if (sign) {
741 tcg_gen_ext16s_tl(dst, src);
742 } else {
743 tcg_gen_ext16u_tl(dst, src);
744 }
745 return dst;
746 #ifdef TARGET_X86_64
747 case OT_LONG:
748 if (sign) {
749 tcg_gen_ext32s_tl(dst, src);
750 } else {
751 tcg_gen_ext32u_tl(dst, src);
752 }
753 return dst;
754 #endif
755 default:
756 return src;
757 }
758 }
759
760 static void gen_extu(int ot, TCGv reg)
761 {
762 gen_ext_tl(reg, reg, ot, false);
763 }
764
765 static void gen_exts(int ot, TCGv reg)
766 {
767 gen_ext_tl(reg, reg, ot, true);
768 }
769
770 static inline void gen_op_jnz_ecx(int size, int label1)
771 {
772 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
773 gen_extu(size + 1, cpu_tmp0);
774 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, label1);
775 }
776
777 static inline void gen_op_jz_ecx(int size, int label1)
778 {
779 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
780 gen_extu(size + 1, cpu_tmp0);
781 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
782 }
783
784 static void gen_helper_in_func(int ot, TCGv v, TCGv_i32 n)
785 {
786 switch (ot) {
787 case OT_BYTE:
788 gen_helper_inb(v, n);
789 break;
790 case OT_WORD:
791 gen_helper_inw(v, n);
792 break;
793 case OT_LONG:
794 gen_helper_inl(v, n);
795 break;
796 }
797 }
798
799 static void gen_helper_out_func(int ot, TCGv_i32 v, TCGv_i32 n)
800 {
801 switch (ot) {
802 case OT_BYTE:
803 gen_helper_outb(v, n);
804 break;
805 case OT_WORD:
806 gen_helper_outw(v, n);
807 break;
808 case OT_LONG:
809 gen_helper_outl(v, n);
810 break;
811 }
812 }
813
814 static void gen_check_io(DisasContext *s, int ot, target_ulong cur_eip,
815 uint32_t svm_flags)
816 {
817 int state_saved;
818 target_ulong next_eip;
819
820 state_saved = 0;
821 if (s->pe && (s->cpl > s->iopl || s->vm86)) {
822 gen_update_cc_op(s);
823 gen_jmp_im(cur_eip);
824 state_saved = 1;
825 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
826 switch (ot) {
827 case OT_BYTE:
828 gen_helper_check_iob(cpu_env, cpu_tmp2_i32);
829 break;
830 case OT_WORD:
831 gen_helper_check_iow(cpu_env, cpu_tmp2_i32);
832 break;
833 case OT_LONG:
834 gen_helper_check_iol(cpu_env, cpu_tmp2_i32);
835 break;
836 }
837 }
838 if(s->flags & HF_SVMI_MASK) {
839 if (!state_saved) {
840 gen_update_cc_op(s);
841 gen_jmp_im(cur_eip);
842 }
843 svm_flags |= (1 << (4 + ot));
844 next_eip = s->pc - s->cs_base;
845 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
846 gen_helper_svm_check_io(cpu_env, cpu_tmp2_i32,
847 tcg_const_i32(svm_flags),
848 tcg_const_i32(next_eip - cur_eip));
849 }
850 }
851
852 static inline void gen_movs(DisasContext *s, int ot)
853 {
854 gen_string_movl_A0_ESI(s);
855 gen_op_ld_T0_A0(ot + s->mem_index);
856 gen_string_movl_A0_EDI(s);
857 gen_op_st_T0_A0(ot + s->mem_index);
858 gen_op_movl_T0_Dshift(ot);
859 gen_op_add_reg_T0(s->aflag, R_ESI);
860 gen_op_add_reg_T0(s->aflag, R_EDI);
861 }
862
863 static void gen_op_update1_cc(void)
864 {
865 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
866 }
867
868 static void gen_op_update2_cc(void)
869 {
870 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
871 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
872 }
873
874 static void gen_op_update3_cc(TCGv reg)
875 {
876 tcg_gen_mov_tl(cpu_cc_src2, reg);
877 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
878 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
879 }
880
881 static inline void gen_op_testl_T0_T1_cc(void)
882 {
883 tcg_gen_and_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
884 }
885
886 static void gen_op_update_neg_cc(void)
887 {
888 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
889 tcg_gen_neg_tl(cpu_cc_src, cpu_T[0]);
890 tcg_gen_movi_tl(cpu_cc_srcT, 0);
891 }
892
893 /* compute all eflags to cc_src */
894 static void gen_compute_eflags(DisasContext *s)
895 {
896 TCGv zero, dst, src1, src2;
897 int live, dead;
898
899 if (s->cc_op == CC_OP_EFLAGS) {
900 return;
901 }
902
903 TCGV_UNUSED(zero);
904 dst = cpu_cc_dst;
905 src1 = cpu_cc_src;
906 src2 = cpu_cc_src2;
907
908 /* Take care to not read values that are not live. */
909 live = cc_op_live[s->cc_op] & ~USES_CC_SRCT;
910 dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2);
911 if (dead) {
912 zero = tcg_const_tl(0);
913 if (dead & USES_CC_DST) {
914 dst = zero;
915 }
916 if (dead & USES_CC_SRC) {
917 src1 = zero;
918 }
919 if (dead & USES_CC_SRC2) {
920 src2 = zero;
921 }
922 }
923
924 gen_update_cc_op(s);
925 gen_helper_cc_compute_all(cpu_cc_src, dst, src1, src2, cpu_cc_op);
926 set_cc_op(s, CC_OP_EFLAGS);
927
928 if (dead) {
929 tcg_temp_free(zero);
930 }
931 }
932
933 typedef struct CCPrepare {
934 TCGCond cond;
935 TCGv reg;
936 TCGv reg2;
937 target_ulong imm;
938 target_ulong mask;
939 bool use_reg2;
940 bool no_setcond;
941 } CCPrepare;
942
943 /* compute eflags.C to reg */
944 static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
945 {
946 TCGv t0, t1;
947 int size, shift;
948
949 switch (s->cc_op) {
950 case CC_OP_SUBB ... CC_OP_SUBQ:
951 /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
952 size = s->cc_op - CC_OP_SUBB;
953 t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
954 /* If no temporary was used, be careful not to alias t1 and t0. */
955 t0 = TCGV_EQUAL(t1, cpu_cc_src) ? cpu_tmp0 : reg;
956 tcg_gen_mov_tl(t0, cpu_cc_srcT);
957 gen_extu(size, t0);
958 goto add_sub;
959
960 case CC_OP_ADDB ... CC_OP_ADDQ:
961 /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
962 size = s->cc_op - CC_OP_ADDB;
963 t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
964 t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
965 add_sub:
966 return (CCPrepare) { .cond = TCG_COND_LTU, .reg = t0,
967 .reg2 = t1, .mask = -1, .use_reg2 = true };
968
969 case CC_OP_LOGICB ... CC_OP_LOGICQ:
970 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
971
972 case CC_OP_INCB ... CC_OP_INCQ:
973 case CC_OP_DECB ... CC_OP_DECQ:
974 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
975 .mask = -1, .no_setcond = true };
976
977 case CC_OP_SHLB ... CC_OP_SHLQ:
978 /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
979 size = s->cc_op - CC_OP_SHLB;
980 shift = (8 << size) - 1;
981 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
982 .mask = (target_ulong)1 << shift };
983
984 case CC_OP_MULB ... CC_OP_MULQ:
985 return (CCPrepare) { .cond = TCG_COND_NE,
986 .reg = cpu_cc_src, .mask = -1 };
987
988 case CC_OP_EFLAGS:
989 case CC_OP_SARB ... CC_OP_SARQ:
990 /* CC_SRC & 1 */
991 return (CCPrepare) { .cond = TCG_COND_NE,
992 .reg = cpu_cc_src, .mask = CC_C };
993
994 default:
995 /* The need to compute only C from CC_OP_DYNAMIC is important
996 in efficiently implementing e.g. INC at the start of a TB. */
997 gen_update_cc_op(s);
998 gen_helper_cc_compute_c(reg, cpu_cc_dst, cpu_cc_src,
999 cpu_cc_src2, cpu_cc_op);
1000 return (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1001 .mask = -1, .no_setcond = true };
1002 }
1003 }
1004
1005 /* compute eflags.P to reg */
1006 static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGv reg)
1007 {
1008 gen_compute_eflags(s);
1009 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1010 .mask = CC_P };
1011 }
1012
1013 /* compute eflags.S to reg */
1014 static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg)
1015 {
1016 switch (s->cc_op) {
1017 case CC_OP_DYNAMIC:
1018 gen_compute_eflags(s);
1019 /* FALLTHRU */
1020 case CC_OP_EFLAGS:
1021 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1022 .mask = CC_S };
1023 default:
1024 {
1025 int size = (s->cc_op - CC_OP_ADDB) & 3;
1026 TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, true);
1027 return (CCPrepare) { .cond = TCG_COND_LT, .reg = t0, .mask = -1 };
1028 }
1029 }
1030 }
1031
1032 /* compute eflags.O to reg */
1033 static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg)
1034 {
1035 gen_compute_eflags(s);
1036 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1037 .mask = CC_O };
1038 }
1039
1040 /* compute eflags.Z to reg */
1041 static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
1042 {
1043 switch (s->cc_op) {
1044 case CC_OP_DYNAMIC:
1045 gen_compute_eflags(s);
1046 /* FALLTHRU */
1047 case CC_OP_EFLAGS:
1048 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1049 .mask = CC_Z };
1050 default:
1051 {
1052 int size = (s->cc_op - CC_OP_ADDB) & 3;
1053 TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
1054 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
1055 }
1056 }
1057 }
1058
1059 /* perform a conditional store into register 'reg' according to jump opcode
1060 value 'b'. In the fast case, T0 is guaranted not to be used. */
1061 static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
1062 {
1063 int inv, jcc_op, size, cond;
1064 CCPrepare cc;
1065 TCGv t0;
1066
1067 inv = b & 1;
1068 jcc_op = (b >> 1) & 7;
1069
1070 switch (s->cc_op) {
1071 case CC_OP_SUBB ... CC_OP_SUBQ:
1072 /* We optimize relational operators for the cmp/jcc case. */
1073 size = s->cc_op - CC_OP_SUBB;
1074 switch (jcc_op) {
1075 case JCC_BE:
1076 tcg_gen_mov_tl(cpu_tmp4, cpu_cc_srcT);
1077 gen_extu(size, cpu_tmp4);
1078 t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
1079 cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = cpu_tmp4,
1080 .reg2 = t0, .mask = -1, .use_reg2 = true };
1081 break;
1082
1083 case JCC_L:
1084 cond = TCG_COND_LT;
1085 goto fast_jcc_l;
1086 case JCC_LE:
1087 cond = TCG_COND_LE;
1088 fast_jcc_l:
1089 tcg_gen_mov_tl(cpu_tmp4, cpu_cc_srcT);
1090 gen_exts(size, cpu_tmp4);
1091 t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, true);
1092 cc = (CCPrepare) { .cond = cond, .reg = cpu_tmp4,
1093 .reg2 = t0, .mask = -1, .use_reg2 = true };
1094 break;
1095
1096 default:
1097 goto slow_jcc;
1098 }
1099 break;
1100
1101 default:
1102 slow_jcc:
1103 /* This actually generates good code for JC, JZ and JS. */
1104 switch (jcc_op) {
1105 case JCC_O:
1106 cc = gen_prepare_eflags_o(s, reg);
1107 break;
1108 case JCC_B:
1109 cc = gen_prepare_eflags_c(s, reg);
1110 break;
1111 case JCC_Z:
1112 cc = gen_prepare_eflags_z(s, reg);
1113 break;
1114 case JCC_BE:
1115 gen_compute_eflags(s);
1116 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1117 .mask = CC_Z | CC_C };
1118 break;
1119 case JCC_S:
1120 cc = gen_prepare_eflags_s(s, reg);
1121 break;
1122 case JCC_P:
1123 cc = gen_prepare_eflags_p(s, reg);
1124 break;
1125 case JCC_L:
1126 gen_compute_eflags(s);
1127 if (TCGV_EQUAL(reg, cpu_cc_src)) {
1128 reg = cpu_tmp0;
1129 }
1130 tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
1131 tcg_gen_xor_tl(reg, reg, cpu_cc_src);
1132 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1133 .mask = CC_S };
1134 break;
1135 default:
1136 case JCC_LE:
1137 gen_compute_eflags(s);
1138 if (TCGV_EQUAL(reg, cpu_cc_src)) {
1139 reg = cpu_tmp0;
1140 }
1141 tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
1142 tcg_gen_xor_tl(reg, reg, cpu_cc_src);
1143 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1144 .mask = CC_S | CC_Z };
1145 break;
1146 }
1147 break;
1148 }
1149
1150 if (inv) {
1151 cc.cond = tcg_invert_cond(cc.cond);
1152 }
1153 return cc;
1154 }
1155
1156 static void gen_setcc1(DisasContext *s, int b, TCGv reg)
1157 {
1158 CCPrepare cc = gen_prepare_cc(s, b, reg);
1159
1160 if (cc.no_setcond) {
1161 if (cc.cond == TCG_COND_EQ) {
1162 tcg_gen_xori_tl(reg, cc.reg, 1);
1163 } else {
1164 tcg_gen_mov_tl(reg, cc.reg);
1165 }
1166 return;
1167 }
1168
1169 if (cc.cond == TCG_COND_NE && !cc.use_reg2 && cc.imm == 0 &&
1170 cc.mask != 0 && (cc.mask & (cc.mask - 1)) == 0) {
1171 tcg_gen_shri_tl(reg, cc.reg, ctztl(cc.mask));
1172 tcg_gen_andi_tl(reg, reg, 1);
1173 return;
1174 }
1175 if (cc.mask != -1) {
1176 tcg_gen_andi_tl(reg, cc.reg, cc.mask);
1177 cc.reg = reg;
1178 }
1179 if (cc.use_reg2) {
1180 tcg_gen_setcond_tl(cc.cond, reg, cc.reg, cc.reg2);
1181 } else {
1182 tcg_gen_setcondi_tl(cc.cond, reg, cc.reg, cc.imm);
1183 }
1184 }
1185
1186 static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg)
1187 {
1188 gen_setcc1(s, JCC_B << 1, reg);
1189 }
1190
1191 /* generate a conditional jump to label 'l1' according to jump opcode
1192 value 'b'. In the fast case, T0 is guaranted not to be used. */
1193 static inline void gen_jcc1_noeob(DisasContext *s, int b, int l1)
1194 {
1195 CCPrepare cc = gen_prepare_cc(s, b, cpu_T[0]);
1196
1197 if (cc.mask != -1) {
1198 tcg_gen_andi_tl(cpu_T[0], cc.reg, cc.mask);
1199 cc.reg = cpu_T[0];
1200 }
1201 if (cc.use_reg2) {
1202 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1203 } else {
1204 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1205 }
1206 }
1207
1208 /* Generate a conditional jump to label 'l1' according to jump opcode
1209 value 'b'. In the fast case, T0 is guaranted not to be used.
1210 A translation block must end soon. */
1211 static inline void gen_jcc1(DisasContext *s, int b, int l1)
1212 {
1213 CCPrepare cc = gen_prepare_cc(s, b, cpu_T[0]);
1214
1215 gen_update_cc_op(s);
1216 if (cc.mask != -1) {
1217 tcg_gen_andi_tl(cpu_T[0], cc.reg, cc.mask);
1218 cc.reg = cpu_T[0];
1219 }
1220 set_cc_op(s, CC_OP_DYNAMIC);
1221 if (cc.use_reg2) {
1222 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1223 } else {
1224 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1225 }
1226 }
1227
1228 /* XXX: does not work with gdbstub "ice" single step - not a
1229 serious problem */
1230 static int gen_jz_ecx_string(DisasContext *s, target_ulong next_eip)
1231 {
1232 int l1, l2;
1233
1234 l1 = gen_new_label();
1235 l2 = gen_new_label();
1236 gen_op_jnz_ecx(s->aflag, l1);
1237 gen_set_label(l2);
1238 gen_jmp_tb(s, next_eip, 1);
1239 gen_set_label(l1);
1240 return l2;
1241 }
1242
1243 static inline void gen_stos(DisasContext *s, int ot)
1244 {
1245 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
1246 gen_string_movl_A0_EDI(s);
1247 gen_op_st_T0_A0(ot + s->mem_index);
1248 gen_op_movl_T0_Dshift(ot);
1249 gen_op_add_reg_T0(s->aflag, R_EDI);
1250 }
1251
1252 static inline void gen_lods(DisasContext *s, int ot)
1253 {
1254 gen_string_movl_A0_ESI(s);
1255 gen_op_ld_T0_A0(ot + s->mem_index);
1256 gen_op_mov_reg_T0(ot, R_EAX);
1257 gen_op_movl_T0_Dshift(ot);
1258 gen_op_add_reg_T0(s->aflag, R_ESI);
1259 }
1260
1261 static inline void gen_scas(DisasContext *s, int ot)
1262 {
1263 gen_string_movl_A0_EDI(s);
1264 gen_op_ld_T1_A0(ot + s->mem_index);
1265 gen_op(s, OP_CMPL, ot, R_EAX);
1266 gen_op_movl_T0_Dshift(ot);
1267 gen_op_add_reg_T0(s->aflag, R_EDI);
1268 }
1269
1270 static inline void gen_cmps(DisasContext *s, int ot)
1271 {
1272 gen_string_movl_A0_EDI(s);
1273 gen_op_ld_T1_A0(ot + s->mem_index);
1274 gen_string_movl_A0_ESI(s);
1275 gen_op(s, OP_CMPL, ot, OR_TMP0);
1276 gen_op_movl_T0_Dshift(ot);
1277 gen_op_add_reg_T0(s->aflag, R_ESI);
1278 gen_op_add_reg_T0(s->aflag, R_EDI);
1279 }
1280
1281 static inline void gen_ins(DisasContext *s, int ot)
1282 {
1283 if (use_icount)
1284 gen_io_start();
1285 gen_string_movl_A0_EDI(s);
1286 /* Note: we must do this dummy write first to be restartable in
1287 case of page fault. */
1288 gen_op_movl_T0_0();
1289 gen_op_st_T0_A0(ot + s->mem_index);
1290 gen_op_mov_TN_reg(OT_WORD, 1, R_EDX);
1291 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[1]);
1292 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1293 gen_helper_in_func(ot, cpu_T[0], cpu_tmp2_i32);
1294 gen_op_st_T0_A0(ot + s->mem_index);
1295 gen_op_movl_T0_Dshift(ot);
1296 gen_op_add_reg_T0(s->aflag, R_EDI);
1297 if (use_icount)
1298 gen_io_end();
1299 }
1300
1301 static inline void gen_outs(DisasContext *s, int ot)
1302 {
1303 if (use_icount)
1304 gen_io_start();
1305 gen_string_movl_A0_ESI(s);
1306 gen_op_ld_T0_A0(ot + s->mem_index);
1307
1308 gen_op_mov_TN_reg(OT_WORD, 1, R_EDX);
1309 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[1]);
1310 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1311 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[0]);
1312 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
1313
1314 gen_op_movl_T0_Dshift(ot);
1315 gen_op_add_reg_T0(s->aflag, R_ESI);
1316 if (use_icount)
1317 gen_io_end();
1318 }
1319
1320 /* same method as Valgrind : we generate jumps to current or next
1321 instruction */
1322 #define GEN_REPZ(op) \
1323 static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1324 target_ulong cur_eip, target_ulong next_eip) \
1325 { \
1326 int l2;\
1327 gen_update_cc_op(s); \
1328 l2 = gen_jz_ecx_string(s, next_eip); \
1329 gen_ ## op(s, ot); \
1330 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1331 /* a loop would cause two single step exceptions if ECX = 1 \
1332 before rep string_insn */ \
1333 if (!s->jmp_opt) \
1334 gen_op_jz_ecx(s->aflag, l2); \
1335 gen_jmp(s, cur_eip); \
1336 }
1337
1338 #define GEN_REPZ2(op) \
1339 static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1340 target_ulong cur_eip, \
1341 target_ulong next_eip, \
1342 int nz) \
1343 { \
1344 int l2;\
1345 gen_update_cc_op(s); \
1346 l2 = gen_jz_ecx_string(s, next_eip); \
1347 gen_ ## op(s, ot); \
1348 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1349 gen_update_cc_op(s); \
1350 gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2); \
1351 if (!s->jmp_opt) \
1352 gen_op_jz_ecx(s->aflag, l2); \
1353 gen_jmp(s, cur_eip); \
1354 }
1355
1356 GEN_REPZ(movs)
1357 GEN_REPZ(stos)
1358 GEN_REPZ(lods)
1359 GEN_REPZ(ins)
1360 GEN_REPZ(outs)
1361 GEN_REPZ2(scas)
1362 GEN_REPZ2(cmps)
1363
1364 static void gen_helper_fp_arith_ST0_FT0(int op)
1365 {
1366 switch (op) {
1367 case 0:
1368 gen_helper_fadd_ST0_FT0(cpu_env);
1369 break;
1370 case 1:
1371 gen_helper_fmul_ST0_FT0(cpu_env);
1372 break;
1373 case 2:
1374 gen_helper_fcom_ST0_FT0(cpu_env);
1375 break;
1376 case 3:
1377 gen_helper_fcom_ST0_FT0(cpu_env);
1378 break;
1379 case 4:
1380 gen_helper_fsub_ST0_FT0(cpu_env);
1381 break;
1382 case 5:
1383 gen_helper_fsubr_ST0_FT0(cpu_env);
1384 break;
1385 case 6:
1386 gen_helper_fdiv_ST0_FT0(cpu_env);
1387 break;
1388 case 7:
1389 gen_helper_fdivr_ST0_FT0(cpu_env);
1390 break;
1391 }
1392 }
1393
1394 /* NOTE the exception in "r" op ordering */
1395 static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
1396 {
1397 TCGv_i32 tmp = tcg_const_i32(opreg);
1398 switch (op) {
1399 case 0:
1400 gen_helper_fadd_STN_ST0(cpu_env, tmp);
1401 break;
1402 case 1:
1403 gen_helper_fmul_STN_ST0(cpu_env, tmp);
1404 break;
1405 case 4:
1406 gen_helper_fsubr_STN_ST0(cpu_env, tmp);
1407 break;
1408 case 5:
1409 gen_helper_fsub_STN_ST0(cpu_env, tmp);
1410 break;
1411 case 6:
1412 gen_helper_fdivr_STN_ST0(cpu_env, tmp);
1413 break;
1414 case 7:
1415 gen_helper_fdiv_STN_ST0(cpu_env, tmp);
1416 break;
1417 }
1418 }
1419
1420 /* if d == OR_TMP0, it means memory operand (address in A0) */
1421 static void gen_op(DisasContext *s1, int op, int ot, int d)
1422 {
1423 if (d != OR_TMP0) {
1424 gen_op_mov_TN_reg(ot, 0, d);
1425 } else {
1426 gen_op_ld_T0_A0(ot + s1->mem_index);
1427 }
1428 switch(op) {
1429 case OP_ADCL:
1430 gen_compute_eflags_c(s1, cpu_tmp4);
1431 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1432 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1433 if (d != OR_TMP0)
1434 gen_op_mov_reg_T0(ot, d);
1435 else
1436 gen_op_st_T0_A0(ot + s1->mem_index);
1437 gen_op_update3_cc(cpu_tmp4);
1438 set_cc_op(s1, CC_OP_ADCB + ot);
1439 break;
1440 case OP_SBBL:
1441 gen_compute_eflags_c(s1, cpu_tmp4);
1442 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1443 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1444 if (d != OR_TMP0)
1445 gen_op_mov_reg_T0(ot, d);
1446 else
1447 gen_op_st_T0_A0(ot + s1->mem_index);
1448 gen_op_update3_cc(cpu_tmp4);
1449 set_cc_op(s1, CC_OP_SBBB + ot);
1450 break;
1451 case OP_ADDL:
1452 gen_op_addl_T0_T1();
1453 if (d != OR_TMP0)
1454 gen_op_mov_reg_T0(ot, d);
1455 else
1456 gen_op_st_T0_A0(ot + s1->mem_index);
1457 gen_op_update2_cc();
1458 set_cc_op(s1, CC_OP_ADDB + ot);
1459 break;
1460 case OP_SUBL:
1461 tcg_gen_mov_tl(cpu_cc_srcT, cpu_T[0]);
1462 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1463 if (d != OR_TMP0)
1464 gen_op_mov_reg_T0(ot, d);
1465 else
1466 gen_op_st_T0_A0(ot + s1->mem_index);
1467 gen_op_update2_cc();
1468 set_cc_op(s1, CC_OP_SUBB + ot);
1469 break;
1470 default:
1471 case OP_ANDL:
1472 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1473 if (d != OR_TMP0)
1474 gen_op_mov_reg_T0(ot, d);
1475 else
1476 gen_op_st_T0_A0(ot + s1->mem_index);
1477 gen_op_update1_cc();
1478 set_cc_op(s1, CC_OP_LOGICB + ot);
1479 break;
1480 case OP_ORL:
1481 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1482 if (d != OR_TMP0)
1483 gen_op_mov_reg_T0(ot, d);
1484 else
1485 gen_op_st_T0_A0(ot + s1->mem_index);
1486 gen_op_update1_cc();
1487 set_cc_op(s1, CC_OP_LOGICB + ot);
1488 break;
1489 case OP_XORL:
1490 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1491 if (d != OR_TMP0)
1492 gen_op_mov_reg_T0(ot, d);
1493 else
1494 gen_op_st_T0_A0(ot + s1->mem_index);
1495 gen_op_update1_cc();
1496 set_cc_op(s1, CC_OP_LOGICB + ot);
1497 break;
1498 case OP_CMPL:
1499 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
1500 tcg_gen_mov_tl(cpu_cc_srcT, cpu_T[0]);
1501 tcg_gen_sub_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
1502 set_cc_op(s1, CC_OP_SUBB + ot);
1503 break;
1504 }
1505 }
1506
1507 /* if d == OR_TMP0, it means memory operand (address in A0) */
1508 static void gen_inc(DisasContext *s1, int ot, int d, int c)
1509 {
1510 if (d != OR_TMP0)
1511 gen_op_mov_TN_reg(ot, 0, d);
1512 else
1513 gen_op_ld_T0_A0(ot + s1->mem_index);
1514 gen_compute_eflags_c(s1, cpu_cc_src);
1515 if (c > 0) {
1516 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], 1);
1517 set_cc_op(s1, CC_OP_INCB + ot);
1518 } else {
1519 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], -1);
1520 set_cc_op(s1, CC_OP_DECB + ot);
1521 }
1522 if (d != OR_TMP0)
1523 gen_op_mov_reg_T0(ot, d);
1524 else
1525 gen_op_st_T0_A0(ot + s1->mem_index);
1526 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1527 }
1528
1529 static void gen_shift_rm_T1(DisasContext *s, int ot, int op1,
1530 int is_right, int is_arith)
1531 {
1532 target_ulong mask;
1533 int shift_label;
1534 TCGv t0, t1, t2;
1535
1536 if (ot == OT_QUAD) {
1537 mask = 0x3f;
1538 } else {
1539 mask = 0x1f;
1540 }
1541
1542 /* load */
1543 if (op1 == OR_TMP0) {
1544 gen_op_ld_T0_A0(ot + s->mem_index);
1545 } else {
1546 gen_op_mov_TN_reg(ot, 0, op1);
1547 }
1548
1549 t0 = tcg_temp_local_new();
1550 t1 = tcg_temp_local_new();
1551 t2 = tcg_temp_local_new();
1552
1553 tcg_gen_andi_tl(t2, cpu_T[1], mask);
1554
1555 if (is_right) {
1556 if (is_arith) {
1557 gen_exts(ot, cpu_T[0]);
1558 tcg_gen_mov_tl(t0, cpu_T[0]);
1559 tcg_gen_sar_tl(cpu_T[0], cpu_T[0], t2);
1560 } else {
1561 gen_extu(ot, cpu_T[0]);
1562 tcg_gen_mov_tl(t0, cpu_T[0]);
1563 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], t2);
1564 }
1565 } else {
1566 tcg_gen_mov_tl(t0, cpu_T[0]);
1567 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], t2);
1568 }
1569
1570 /* store */
1571 if (op1 == OR_TMP0) {
1572 gen_op_st_T0_A0(ot + s->mem_index);
1573 } else {
1574 gen_op_mov_reg_T0(ot, op1);
1575 }
1576
1577 /* Update eflags data because we cannot predict flags afterward. */
1578 gen_update_cc_op(s);
1579 set_cc_op(s, CC_OP_DYNAMIC);
1580
1581 tcg_gen_mov_tl(t1, cpu_T[0]);
1582
1583 shift_label = gen_new_label();
1584 tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, shift_label);
1585
1586 tcg_gen_addi_tl(t2, t2, -1);
1587 tcg_gen_mov_tl(cpu_cc_dst, t1);
1588
1589 if (is_right) {
1590 if (is_arith) {
1591 tcg_gen_sar_tl(cpu_cc_src, t0, t2);
1592 } else {
1593 tcg_gen_shr_tl(cpu_cc_src, t0, t2);
1594 }
1595 } else {
1596 tcg_gen_shl_tl(cpu_cc_src, t0, t2);
1597 }
1598
1599 if (is_right) {
1600 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SARB + ot);
1601 } else {
1602 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SHLB + ot);
1603 }
1604
1605 gen_set_label(shift_label);
1606
1607 tcg_temp_free(t0);
1608 tcg_temp_free(t1);
1609 tcg_temp_free(t2);
1610 }
1611
1612 static void gen_shift_rm_im(DisasContext *s, int ot, int op1, int op2,
1613 int is_right, int is_arith)
1614 {
1615 int mask;
1616
1617 if (ot == OT_QUAD)
1618 mask = 0x3f;
1619 else
1620 mask = 0x1f;
1621
1622 /* load */
1623 if (op1 == OR_TMP0)
1624 gen_op_ld_T0_A0(ot + s->mem_index);
1625 else
1626 gen_op_mov_TN_reg(ot, 0, op1);
1627
1628 op2 &= mask;
1629 if (op2 != 0) {
1630 if (is_right) {
1631 if (is_arith) {
1632 gen_exts(ot, cpu_T[0]);
1633 tcg_gen_sari_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1634 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], op2);
1635 } else {
1636 gen_extu(ot, cpu_T[0]);
1637 tcg_gen_shri_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1638 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], op2);
1639 }
1640 } else {
1641 tcg_gen_shli_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1642 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], op2);
1643 }
1644 }
1645
1646 /* store */
1647 if (op1 == OR_TMP0)
1648 gen_op_st_T0_A0(ot + s->mem_index);
1649 else
1650 gen_op_mov_reg_T0(ot, op1);
1651
1652 /* update eflags if non zero shift */
1653 if (op2 != 0) {
1654 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
1655 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1656 set_cc_op(s, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
1657 }
1658 }
1659
1660 static inline void tcg_gen_lshift(TCGv ret, TCGv arg1, target_long arg2)
1661 {
1662 if (arg2 >= 0)
1663 tcg_gen_shli_tl(ret, arg1, arg2);
1664 else
1665 tcg_gen_shri_tl(ret, arg1, -arg2);
1666 }
1667
1668 static void gen_rot_rm_T1(DisasContext *s, int ot, int op1,
1669 int is_right)
1670 {
1671 target_ulong mask;
1672 int label1, label2, data_bits;
1673 TCGv t0, t1, t2, a0;
1674
1675 /* XXX: inefficient, but we must use local temps */
1676 t0 = tcg_temp_local_new();
1677 t1 = tcg_temp_local_new();
1678 t2 = tcg_temp_local_new();
1679 a0 = tcg_temp_local_new();
1680
1681 if (ot == OT_QUAD)
1682 mask = 0x3f;
1683 else
1684 mask = 0x1f;
1685
1686 /* load */
1687 if (op1 == OR_TMP0) {
1688 tcg_gen_mov_tl(a0, cpu_A0);
1689 gen_op_ld_v(ot + s->mem_index, t0, a0);
1690 } else {
1691 gen_op_mov_v_reg(ot, t0, op1);
1692 }
1693
1694 tcg_gen_mov_tl(t1, cpu_T[1]);
1695
1696 tcg_gen_andi_tl(t1, t1, mask);
1697
1698 /* Must test zero case to avoid using undefined behaviour in TCG
1699 shifts. */
1700 label1 = gen_new_label();
1701 tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, label1);
1702
1703 if (ot <= OT_WORD)
1704 tcg_gen_andi_tl(cpu_tmp0, t1, (1 << (3 + ot)) - 1);
1705 else
1706 tcg_gen_mov_tl(cpu_tmp0, t1);
1707
1708 gen_extu(ot, t0);
1709 tcg_gen_mov_tl(t2, t0);
1710
1711 data_bits = 8 << ot;
1712 /* XXX: rely on behaviour of shifts when operand 2 overflows (XXX:
1713 fix TCG definition) */
1714 if (is_right) {
1715 tcg_gen_shr_tl(cpu_tmp4, t0, cpu_tmp0);
1716 tcg_gen_subfi_tl(cpu_tmp0, data_bits, cpu_tmp0);
1717 tcg_gen_shl_tl(t0, t0, cpu_tmp0);
1718 } else {
1719 tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp0);
1720 tcg_gen_subfi_tl(cpu_tmp0, data_bits, cpu_tmp0);
1721 tcg_gen_shr_tl(t0, t0, cpu_tmp0);
1722 }
1723 tcg_gen_or_tl(t0, t0, cpu_tmp4);
1724
1725 gen_set_label(label1);
1726 /* store */
1727 if (op1 == OR_TMP0) {
1728 gen_op_st_v(ot + s->mem_index, t0, a0);
1729 } else {
1730 gen_op_mov_reg_v(ot, op1, t0);
1731 }
1732
1733 /* update eflags. It is needed anyway most of the time, do it always. */
1734 gen_compute_eflags(s);
1735 assert(s->cc_op == CC_OP_EFLAGS);
1736
1737 label2 = gen_new_label();
1738 tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, label2);
1739
1740 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~(CC_O | CC_C));
1741 tcg_gen_xor_tl(cpu_tmp0, t2, t0);
1742 tcg_gen_lshift(cpu_tmp0, cpu_tmp0, 11 - (data_bits - 1));
1743 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, CC_O);
1744 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_tmp0);
1745 if (is_right) {
1746 tcg_gen_shri_tl(t0, t0, data_bits - 1);
1747 }
1748 tcg_gen_andi_tl(t0, t0, CC_C);
1749 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
1750
1751 gen_set_label(label2);
1752
1753 tcg_temp_free(t0);
1754 tcg_temp_free(t1);
1755 tcg_temp_free(t2);
1756 tcg_temp_free(a0);
1757 }
1758
1759 static void gen_rot_rm_im(DisasContext *s, int ot, int op1, int op2,
1760 int is_right)
1761 {
1762 int mask;
1763 int data_bits;
1764 TCGv t0, t1, a0;
1765
1766 /* XXX: inefficient, but we must use local temps */
1767 t0 = tcg_temp_local_new();
1768 t1 = tcg_temp_local_new();
1769 a0 = tcg_temp_local_new();
1770
1771 if (ot == OT_QUAD)
1772 mask = 0x3f;
1773 else
1774 mask = 0x1f;
1775
1776 /* load */
1777 if (op1 == OR_TMP0) {
1778 tcg_gen_mov_tl(a0, cpu_A0);
1779 gen_op_ld_v(ot + s->mem_index, t0, a0);
1780 } else {
1781 gen_op_mov_v_reg(ot, t0, op1);
1782 }
1783
1784 gen_extu(ot, t0);
1785 tcg_gen_mov_tl(t1, t0);
1786
1787 op2 &= mask;
1788 data_bits = 8 << ot;
1789 if (op2 != 0) {
1790 int shift = op2 & ((1 << (3 + ot)) - 1);
1791 if (is_right) {
1792 tcg_gen_shri_tl(cpu_tmp4, t0, shift);
1793 tcg_gen_shli_tl(t0, t0, data_bits - shift);
1794 }
1795 else {
1796 tcg_gen_shli_tl(cpu_tmp4, t0, shift);
1797 tcg_gen_shri_tl(t0, t0, data_bits - shift);
1798 }
1799 tcg_gen_or_tl(t0, t0, cpu_tmp4);
1800 }
1801
1802 /* store */
1803 if (op1 == OR_TMP0) {
1804 gen_op_st_v(ot + s->mem_index, t0, a0);
1805 } else {
1806 gen_op_mov_reg_v(ot, op1, t0);
1807 }
1808
1809 if (op2 != 0) {
1810 /* update eflags */
1811 gen_compute_eflags(s);
1812 assert(s->cc_op == CC_OP_EFLAGS);
1813
1814 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~(CC_O | CC_C));
1815 tcg_gen_xor_tl(cpu_tmp0, t1, t0);
1816 tcg_gen_lshift(cpu_tmp0, cpu_tmp0, 11 - (data_bits - 1));
1817 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, CC_O);
1818 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_tmp0);
1819 if (is_right) {
1820 tcg_gen_shri_tl(t0, t0, data_bits - 1);
1821 }
1822 tcg_gen_andi_tl(t0, t0, CC_C);
1823 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
1824 }
1825
1826 tcg_temp_free(t0);
1827 tcg_temp_free(t1);
1828 tcg_temp_free(a0);
1829 }
1830
1831 /* XXX: add faster immediate = 1 case */
1832 static void gen_rotc_rm_T1(DisasContext *s, int ot, int op1,
1833 int is_right)
1834 {
1835 gen_compute_eflags(s);
1836 assert(s->cc_op == CC_OP_EFLAGS);
1837
1838 /* load */
1839 if (op1 == OR_TMP0)
1840 gen_op_ld_T0_A0(ot + s->mem_index);
1841 else
1842 gen_op_mov_TN_reg(ot, 0, op1);
1843
1844 if (is_right) {
1845 switch (ot) {
1846 case OT_BYTE:
1847 gen_helper_rcrb(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1848 break;
1849 case OT_WORD:
1850 gen_helper_rcrw(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1851 break;
1852 case OT_LONG:
1853 gen_helper_rcrl(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1854 break;
1855 #ifdef TARGET_X86_64
1856 case OT_QUAD:
1857 gen_helper_rcrq(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1858 break;
1859 #endif
1860 }
1861 } else {
1862 switch (ot) {
1863 case OT_BYTE:
1864 gen_helper_rclb(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1865 break;
1866 case OT_WORD:
1867 gen_helper_rclw(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1868 break;
1869 case OT_LONG:
1870 gen_helper_rcll(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1871 break;
1872 #ifdef TARGET_X86_64
1873 case OT_QUAD:
1874 gen_helper_rclq(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1875 break;
1876 #endif
1877 }
1878 }
1879 /* store */
1880 if (op1 == OR_TMP0)
1881 gen_op_st_T0_A0(ot + s->mem_index);
1882 else
1883 gen_op_mov_reg_T0(ot, op1);
1884 }
1885
1886 /* XXX: add faster immediate case */
1887 static void gen_shiftd_rm_T1(DisasContext *s, int ot, int op1,
1888 int is_right, TCGv count)
1889 {
1890 int label1, label2, data_bits;
1891 target_ulong mask;
1892 TCGv t0, t1, t2, a0;
1893
1894 t0 = tcg_temp_local_new();
1895 t1 = tcg_temp_local_new();
1896 t2 = tcg_temp_local_new();
1897 a0 = tcg_temp_local_new();
1898
1899 if (ot == OT_QUAD)
1900 mask = 0x3f;
1901 else
1902 mask = 0x1f;
1903
1904 /* load */
1905 if (op1 == OR_TMP0) {
1906 tcg_gen_mov_tl(a0, cpu_A0);
1907 gen_op_ld_v(ot + s->mem_index, t0, a0);
1908 } else {
1909 gen_op_mov_v_reg(ot, t0, op1);
1910 }
1911
1912 tcg_gen_andi_tl(t2, count, mask);
1913 tcg_gen_mov_tl(t1, cpu_T[1]);
1914
1915 /* Must test zero case to avoid using undefined behaviour in TCG
1916 shifts. */
1917 label1 = gen_new_label();
1918 tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label1);
1919
1920 tcg_gen_addi_tl(cpu_tmp5, t2, -1);
1921 if (ot == OT_WORD) {
1922 /* Note: we implement the Intel behaviour for shift count > 16 */
1923 if (is_right) {
1924 tcg_gen_andi_tl(t0, t0, 0xffff);
1925 tcg_gen_shli_tl(cpu_tmp0, t1, 16);
1926 tcg_gen_or_tl(t0, t0, cpu_tmp0);
1927 tcg_gen_ext32u_tl(t0, t0);
1928
1929 tcg_gen_shr_tl(cpu_tmp4, t0, cpu_tmp5);
1930
1931 /* only needed if count > 16, but a test would complicate */
1932 tcg_gen_subfi_tl(cpu_tmp5, 32, t2);
1933 tcg_gen_shl_tl(cpu_tmp0, t0, cpu_tmp5);
1934
1935 tcg_gen_shr_tl(t0, t0, t2);
1936
1937 tcg_gen_or_tl(t0, t0, cpu_tmp0);
1938 } else {
1939 /* XXX: not optimal */
1940 tcg_gen_andi_tl(t0, t0, 0xffff);
1941 tcg_gen_shli_tl(t1, t1, 16);
1942 tcg_gen_or_tl(t1, t1, t0);
1943 tcg_gen_ext32u_tl(t1, t1);
1944
1945 tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp5);
1946 tcg_gen_subfi_tl(cpu_tmp0, 32, cpu_tmp5);
1947 tcg_gen_shr_tl(cpu_tmp5, t1, cpu_tmp0);
1948 tcg_gen_or_tl(cpu_tmp4, cpu_tmp4, cpu_tmp5);
1949
1950 tcg_gen_shl_tl(t0, t0, t2);
1951 tcg_gen_subfi_tl(cpu_tmp5, 32, t2);
1952 tcg_gen_shr_tl(t1, t1, cpu_tmp5);
1953 tcg_gen_or_tl(t0, t0, t1);
1954 }
1955 } else {
1956 data_bits = 8 << ot;
1957 if (is_right) {
1958 if (ot == OT_LONG)
1959 tcg_gen_ext32u_tl(t0, t0);
1960
1961 tcg_gen_shr_tl(cpu_tmp4, t0, cpu_tmp5);
1962
1963 tcg_gen_shr_tl(t0, t0, t2);
1964 tcg_gen_subfi_tl(cpu_tmp5, data_bits, t2);
1965 tcg_gen_shl_tl(t1, t1, cpu_tmp5);
1966 tcg_gen_or_tl(t0, t0, t1);
1967
1968 } else {
1969 if (ot == OT_LONG)
1970 tcg_gen_ext32u_tl(t1, t1);
1971
1972 tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp5);
1973
1974 tcg_gen_shl_tl(t0, t0, t2);
1975 tcg_gen_subfi_tl(cpu_tmp5, data_bits, t2);
1976 tcg_gen_shr_tl(t1, t1, cpu_tmp5);
1977 tcg_gen_or_tl(t0, t0, t1);
1978 }
1979 }
1980 tcg_gen_mov_tl(t1, cpu_tmp4);
1981
1982 gen_set_label(label1);
1983 /* store */
1984 if (op1 == OR_TMP0) {
1985 gen_op_st_v(ot + s->mem_index, t0, a0);
1986 } else {
1987 gen_op_mov_reg_v(ot, op1, t0);
1988 }
1989
1990 /* Update eflags data because we cannot predict flags afterward. */
1991 gen_update_cc_op(s);
1992 set_cc_op(s, CC_OP_DYNAMIC);
1993
1994 label2 = gen_new_label();
1995 tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label2);
1996
1997 tcg_gen_mov_tl(cpu_cc_src, t1);
1998 tcg_gen_mov_tl(cpu_cc_dst, t0);
1999 if (is_right) {
2000 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SARB + ot);
2001 } else {
2002 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SHLB + ot);
2003 }
2004 gen_set_label(label2);
2005
2006 tcg_temp_free(t0);
2007 tcg_temp_free(t1);
2008 tcg_temp_free(t2);
2009 tcg_temp_free(a0);
2010 }
2011
2012 static void gen_shift(DisasContext *s1, int op, int ot, int d, int s)
2013 {
2014 if (s != OR_TMP1)
2015 gen_op_mov_TN_reg(ot, 1, s);
2016 switch(op) {
2017 case OP_ROL:
2018 gen_rot_rm_T1(s1, ot, d, 0);
2019 break;
2020 case OP_ROR:
2021 gen_rot_rm_T1(s1, ot, d, 1);
2022 break;
2023 case OP_SHL:
2024 case OP_SHL1:
2025 gen_shift_rm_T1(s1, ot, d, 0, 0);
2026 break;
2027 case OP_SHR:
2028 gen_shift_rm_T1(s1, ot, d, 1, 0);
2029 break;
2030 case OP_SAR:
2031 gen_shift_rm_T1(s1, ot, d, 1, 1);
2032 break;
2033 case OP_RCL:
2034 gen_rotc_rm_T1(s1, ot, d, 0);
2035 break;
2036 case OP_RCR:
2037 gen_rotc_rm_T1(s1, ot, d, 1);
2038 break;
2039 }
2040 }
2041
2042 static void gen_shifti(DisasContext *s1, int op, int ot, int d, int c)
2043 {
2044 switch(op) {
2045 case OP_ROL:
2046 gen_rot_rm_im(s1, ot, d, c, 0);
2047 break;
2048 case OP_ROR:
2049 gen_rot_rm_im(s1, ot, d, c, 1);
2050 break;
2051 case OP_SHL:
2052 case OP_SHL1:
2053 gen_shift_rm_im(s1, ot, d, c, 0, 0);
2054 break;
2055 case OP_SHR:
2056 gen_shift_rm_im(s1, ot, d, c, 1, 0);
2057 break;
2058 case OP_SAR:
2059 gen_shift_rm_im(s1, ot, d, c, 1, 1);
2060 break;
2061 default:
2062 /* currently not optimized */
2063 gen_op_movl_T1_im(c);
2064 gen_shift(s1, op, ot, d, OR_TMP1);
2065 break;
2066 }
2067 }
2068
2069 static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm,
2070 int *reg_ptr, int *offset_ptr)
2071 {
2072 target_long disp;
2073 int havesib;
2074 int base;
2075 int index;
2076 int scale;
2077 int opreg;
2078 int mod, rm, code, override, must_add_seg;
2079
2080 override = s->override;
2081 must_add_seg = s->addseg;
2082 if (override >= 0)
2083 must_add_seg = 1;
2084 mod = (modrm >> 6) & 3;
2085 rm = modrm & 7;
2086
2087 if (s->aflag) {
2088
2089 havesib = 0;
2090 base = rm;
2091 index = 0;
2092 scale = 0;
2093
2094 if (base == 4) {
2095 havesib = 1;
2096 code = cpu_ldub_code(env, s->pc++);
2097 scale = (code >> 6) & 3;
2098 index = ((code >> 3) & 7) | REX_X(s);
2099 base = (code & 7);
2100 }
2101 base |= REX_B(s);
2102
2103 switch (mod) {
2104 case 0:
2105 if ((base & 7) == 5) {
2106 base = -1;
2107 disp = (int32_t)cpu_ldl_code(env, s->pc);
2108 s->pc += 4;
2109 if (CODE64(s) && !havesib) {
2110 disp += s->pc + s->rip_offset;
2111 }
2112 } else {
2113 disp = 0;
2114 }
2115 break;
2116 case 1:
2117 disp = (int8_t)cpu_ldub_code(env, s->pc++);
2118 break;
2119 default:
2120 case 2:
2121 disp = (int32_t)cpu_ldl_code(env, s->pc);
2122 s->pc += 4;
2123 break;
2124 }
2125
2126 if (base >= 0) {
2127 /* for correct popl handling with esp */
2128 if (base == 4 && s->popl_esp_hack)
2129 disp += s->popl_esp_hack;
2130 #ifdef TARGET_X86_64
2131 if (s->aflag == 2) {
2132 gen_op_movq_A0_reg(base);
2133 if (disp != 0) {
2134 gen_op_addq_A0_im(disp);
2135 }
2136 } else
2137 #endif
2138 {
2139 gen_op_movl_A0_reg(base);
2140 if (disp != 0)
2141 gen_op_addl_A0_im(disp);
2142 }
2143 } else {
2144 #ifdef TARGET_X86_64
2145 if (s->aflag == 2) {
2146 gen_op_movq_A0_im(disp);
2147 } else
2148 #endif
2149 {
2150 gen_op_movl_A0_im(disp);
2151 }
2152 }
2153 /* index == 4 means no index */
2154 if (havesib && (index != 4)) {
2155 #ifdef TARGET_X86_64
2156 if (s->aflag == 2) {
2157 gen_op_addq_A0_reg_sN(scale, index);
2158 } else
2159 #endif
2160 {
2161 gen_op_addl_A0_reg_sN(scale, index);
2162 }
2163 }
2164 if (must_add_seg) {
2165 if (override < 0) {
2166 if (base == R_EBP || base == R_ESP)
2167 override = R_SS;
2168 else
2169 override = R_DS;
2170 }
2171 #ifdef TARGET_X86_64
2172 if (s->aflag == 2) {
2173 gen_op_addq_A0_seg(override);
2174 } else
2175 #endif
2176 {
2177 gen_op_addl_A0_seg(s, override);
2178 }
2179 }
2180 } else {
2181 switch (mod) {
2182 case 0:
2183 if (rm == 6) {
2184 disp = cpu_lduw_code(env, s->pc);
2185 s->pc += 2;
2186 gen_op_movl_A0_im(disp);
2187 rm = 0; /* avoid SS override */
2188 goto no_rm;
2189 } else {
2190 disp = 0;
2191 }
2192 break;
2193 case 1:
2194 disp = (int8_t)cpu_ldub_code(env, s->pc++);
2195 break;
2196 default:
2197 case 2:
2198 disp = cpu_lduw_code(env, s->pc);
2199 s->pc += 2;
2200 break;
2201 }
2202 switch(rm) {
2203 case 0:
2204 gen_op_movl_A0_reg(R_EBX);
2205 gen_op_addl_A0_reg_sN(0, R_ESI);
2206 break;
2207 case 1:
2208 gen_op_movl_A0_reg(R_EBX);
2209 gen_op_addl_A0_reg_sN(0, R_EDI);
2210 break;
2211 case 2:
2212 gen_op_movl_A0_reg(R_EBP);
2213 gen_op_addl_A0_reg_sN(0, R_ESI);
2214 break;
2215 case 3:
2216 gen_op_movl_A0_reg(R_EBP);
2217 gen_op_addl_A0_reg_sN(0, R_EDI);
2218 break;
2219 case 4:
2220 gen_op_movl_A0_reg(R_ESI);
2221 break;
2222 case 5:
2223 gen_op_movl_A0_reg(R_EDI);
2224 break;
2225 case 6:
2226 gen_op_movl_A0_reg(R_EBP);
2227 break;
2228 default:
2229 case 7:
2230 gen_op_movl_A0_reg(R_EBX);
2231 break;
2232 }
2233 if (disp != 0)
2234 gen_op_addl_A0_im(disp);
2235 gen_op_andl_A0_ffff();
2236 no_rm:
2237 if (must_add_seg) {
2238 if (override < 0) {
2239 if (rm == 2 || rm == 3 || rm == 6)
2240 override = R_SS;
2241 else
2242 override = R_DS;
2243 }
2244 gen_op_addl_A0_seg(s, override);
2245 }
2246 }
2247
2248 opreg = OR_A0;
2249 disp = 0;
2250 *reg_ptr = opreg;
2251 *offset_ptr = disp;
2252 }
2253
2254 static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm)
2255 {
2256 int mod, rm, base, code;
2257
2258 mod = (modrm >> 6) & 3;
2259 if (mod == 3)
2260 return;
2261 rm = modrm & 7;
2262
2263 if (s->aflag) {
2264
2265 base = rm;
2266
2267 if (base == 4) {
2268 code = cpu_ldub_code(env, s->pc++);
2269 base = (code & 7);
2270 }
2271
2272 switch (mod) {
2273 case 0:
2274 if (base == 5) {
2275 s->pc += 4;
2276 }
2277 break;
2278 case 1:
2279 s->pc++;
2280 break;
2281 default:
2282 case 2:
2283 s->pc += 4;
2284 break;
2285 }
2286 } else {
2287 switch (mod) {
2288 case 0:
2289 if (rm == 6) {
2290 s->pc += 2;
2291 }
2292 break;
2293 case 1:
2294 s->pc++;
2295 break;
2296 default:
2297 case 2:
2298 s->pc += 2;
2299 break;
2300 }
2301 }
2302 }
2303
2304 /* used for LEA and MOV AX, mem */
2305 static void gen_add_A0_ds_seg(DisasContext *s)
2306 {
2307 int override, must_add_seg;
2308 must_add_seg = s->addseg;
2309 override = R_DS;
2310 if (s->override >= 0) {
2311 override = s->override;
2312 must_add_seg = 1;
2313 }
2314 if (must_add_seg) {
2315 #ifdef TARGET_X86_64
2316 if (CODE64(s)) {
2317 gen_op_addq_A0_seg(override);
2318 } else
2319 #endif
2320 {
2321 gen_op_addl_A0_seg(s, override);
2322 }
2323 }
2324 }
2325
2326 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2327 OR_TMP0 */
2328 static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm,
2329 int ot, int reg, int is_store)
2330 {
2331 int mod, rm, opreg, disp;
2332
2333 mod = (modrm >> 6) & 3;
2334 rm = (modrm & 7) | REX_B(s);
2335 if (mod == 3) {
2336 if (is_store) {
2337 if (reg != OR_TMP0)
2338 gen_op_mov_TN_reg(ot, 0, reg);
2339 gen_op_mov_reg_T0(ot, rm);
2340 } else {
2341 gen_op_mov_TN_reg(ot, 0, rm);
2342 if (reg != OR_TMP0)
2343 gen_op_mov_reg_T0(ot, reg);
2344 }
2345 } else {
2346 gen_lea_modrm(env, s, modrm, &opreg, &disp);
2347 if (is_store) {
2348 if (reg != OR_TMP0)
2349 gen_op_mov_TN_reg(ot, 0, reg);
2350 gen_op_st_T0_A0(ot + s->mem_index);
2351 } else {
2352 gen_op_ld_T0_A0(ot + s->mem_index);
2353 if (reg != OR_TMP0)
2354 gen_op_mov_reg_T0(ot, reg);
2355 }
2356 }
2357 }
2358
2359 static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, int ot)
2360 {
2361 uint32_t ret;
2362
2363 switch(ot) {
2364 case OT_BYTE:
2365 ret = cpu_ldub_code(env, s->pc);
2366 s->pc++;
2367 break;
2368 case OT_WORD:
2369 ret = cpu_lduw_code(env, s->pc);
2370 s->pc += 2;
2371 break;
2372 default:
2373 case OT_LONG:
2374 ret = cpu_ldl_code(env, s->pc);
2375 s->pc += 4;
2376 break;
2377 }
2378 return ret;
2379 }
2380
2381 static inline int insn_const_size(unsigned int ot)
2382 {
2383 if (ot <= OT_LONG)
2384 return 1 << ot;
2385 else
2386 return 4;
2387 }
2388
2389 static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong eip)
2390 {
2391 TranslationBlock *tb;
2392 target_ulong pc;
2393
2394 pc = s->cs_base + eip;
2395 tb = s->tb;
2396 /* NOTE: we handle the case where the TB spans two pages here */
2397 if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) ||
2398 (pc & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK)) {
2399 /* jump to same page: we can use a direct jump */
2400 tcg_gen_goto_tb(tb_num);
2401 gen_jmp_im(eip);
2402 tcg_gen_exit_tb((tcg_target_long)tb + tb_num);
2403 } else {
2404 /* jump to another page: currently not optimized */
2405 gen_jmp_im(eip);
2406 gen_eob(s);
2407 }
2408 }
2409
2410 static inline void gen_jcc(DisasContext *s, int b,
2411 target_ulong val, target_ulong next_eip)
2412 {
2413 int l1, l2;
2414
2415 if (s->jmp_opt) {
2416 l1 = gen_new_label();
2417 gen_jcc1(s, b, l1);
2418
2419 gen_goto_tb(s, 0, next_eip);
2420
2421 gen_set_label(l1);
2422 gen_goto_tb(s, 1, val);
2423 s->is_jmp = DISAS_TB_JUMP;
2424 } else {
2425 l1 = gen_new_label();
2426 l2 = gen_new_label();
2427 gen_jcc1(s, b, l1);
2428
2429 gen_jmp_im(next_eip);
2430 tcg_gen_br(l2);
2431
2432 gen_set_label(l1);
2433 gen_jmp_im(val);
2434 gen_set_label(l2);
2435 gen_eob(s);
2436 }
2437 }
2438
2439 static void gen_cmovcc1(CPUX86State *env, DisasContext *s, int ot, int b,
2440 int modrm, int reg)
2441 {
2442 CCPrepare cc;
2443
2444 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
2445
2446 cc = gen_prepare_cc(s, b, cpu_T[1]);
2447 if (cc.mask != -1) {
2448 TCGv t0 = tcg_temp_new();
2449 tcg_gen_andi_tl(t0, cc.reg, cc.mask);
2450 cc.reg = t0;
2451 }
2452 if (!cc.use_reg2) {
2453 cc.reg2 = tcg_const_tl(cc.imm);
2454 }
2455
2456 tcg_gen_movcond_tl(cc.cond, cpu_T[0], cc.reg, cc.reg2,
2457 cpu_T[0], cpu_regs[reg]);
2458 gen_op_mov_reg_T0(ot, reg);
2459
2460 if (cc.mask != -1) {
2461 tcg_temp_free(cc.reg);
2462 }
2463 if (!cc.use_reg2) {
2464 tcg_temp_free(cc.reg2);
2465 }
2466 }
2467
2468 static inline void gen_op_movl_T0_seg(int seg_reg)
2469 {
2470 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
2471 offsetof(CPUX86State,segs[seg_reg].selector));
2472 }
2473
2474 static inline void gen_op_movl_seg_T0_vm(int seg_reg)
2475 {
2476 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
2477 tcg_gen_st32_tl(cpu_T[0], cpu_env,
2478 offsetof(CPUX86State,segs[seg_reg].selector));
2479 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], 4);
2480 tcg_gen_st_tl(cpu_T[0], cpu_env,
2481 offsetof(CPUX86State,segs[seg_reg].base));
2482 }
2483
2484 /* move T0 to seg_reg and compute if the CPU state may change. Never
2485 call this function with seg_reg == R_CS */
2486 static void gen_movl_seg_T0(DisasContext *s, int seg_reg, target_ulong cur_eip)
2487 {
2488 if (s->pe && !s->vm86) {
2489 /* XXX: optimize by finding processor state dynamically */
2490 gen_update_cc_op(s);
2491 gen_jmp_im(cur_eip);
2492 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2493 gen_helper_load_seg(cpu_env, tcg_const_i32(seg_reg), cpu_tmp2_i32);
2494 /* abort translation because the addseg value may change or
2495 because ss32 may change. For R_SS, translation must always
2496 stop as a special handling must be done to disable hardware
2497 interrupts for the next instruction */
2498 if (seg_reg == R_SS || (s->code32 && seg_reg < R_FS))
2499 s->is_jmp = DISAS_TB_JUMP;
2500 } else {
2501 gen_op_movl_seg_T0_vm(seg_reg);
2502 if (seg_reg == R_SS)
2503 s->is_jmp = DISAS_TB_JUMP;
2504 }
2505 }
2506
2507 static inline int svm_is_rep(int prefixes)
2508 {
2509 return ((prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) ? 8 : 0);
2510 }
2511
2512 static inline void
2513 gen_svm_check_intercept_param(DisasContext *s, target_ulong pc_start,
2514 uint32_t type, uint64_t param)
2515 {
2516 /* no SVM activated; fast case */
2517 if (likely(!(s->flags & HF_SVMI_MASK)))
2518 return;
2519 gen_update_cc_op(s);
2520 gen_jmp_im(pc_start - s->cs_base);
2521 gen_helper_svm_check_intercept_param(cpu_env, tcg_const_i32(type),
2522 tcg_const_i64(param));
2523 }
2524
2525 static inline void
2526 gen_svm_check_intercept(DisasContext *s, target_ulong pc_start, uint64_t type)
2527 {
2528 gen_svm_check_intercept_param(s, pc_start, type, 0);
2529 }
2530
2531 static inline void gen_stack_update(DisasContext *s, int addend)
2532 {
2533 #ifdef TARGET_X86_64
2534 if (CODE64(s)) {
2535 gen_op_add_reg_im(2, R_ESP, addend);
2536 } else
2537 #endif
2538 if (s->ss32) {
2539 gen_op_add_reg_im(1, R_ESP, addend);
2540 } else {
2541 gen_op_add_reg_im(0, R_ESP, addend);
2542 }
2543 }
2544
2545 /* generate a push. It depends on ss32, addseg and dflag */
2546 static void gen_push_T0(DisasContext *s)
2547 {
2548 #ifdef TARGET_X86_64
2549 if (CODE64(s)) {
2550 gen_op_movq_A0_reg(R_ESP);
2551 if (s->dflag) {
2552 gen_op_addq_A0_im(-8);
2553 gen_op_st_T0_A0(OT_QUAD + s->mem_index);
2554 } else {
2555 gen_op_addq_A0_im(-2);
2556 gen_op_st_T0_A0(OT_WORD + s->mem_index);
2557 }
2558 gen_op_mov_reg_A0(2, R_ESP);
2559 } else
2560 #endif
2561 {
2562 gen_op_movl_A0_reg(R_ESP);
2563 if (!s->dflag)
2564 gen_op_addl_A0_im(-2);
2565 else
2566 gen_op_addl_A0_im(-4);
2567 if (s->ss32) {
2568 if (s->addseg) {
2569 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2570 gen_op_addl_A0_seg(s, R_SS);
2571 }
2572 } else {
2573 gen_op_andl_A0_ffff();
2574 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2575 gen_op_addl_A0_seg(s, R_SS);
2576 }
2577 gen_op_st_T0_A0(s->dflag + 1 + s->mem_index);
2578 if (s->ss32 && !s->addseg)
2579 gen_op_mov_reg_A0(1, R_ESP);
2580 else
2581 gen_op_mov_reg_T1(s->ss32 + 1, R_ESP);
2582 }
2583 }
2584
2585 /* generate a push. It depends on ss32, addseg and dflag */
2586 /* slower version for T1, only used for call Ev */
2587 static void gen_push_T1(DisasContext *s)
2588 {
2589 #ifdef TARGET_X86_64
2590 if (CODE64(s)) {
2591 gen_op_movq_A0_reg(R_ESP);
2592 if (s->dflag) {
2593 gen_op_addq_A0_im(-8);
2594 gen_op_st_T1_A0(OT_QUAD + s->mem_index);
2595 } else {
2596 gen_op_addq_A0_im(-2);
2597 gen_op_st_T0_A0(OT_WORD + s->mem_index);
2598 }
2599 gen_op_mov_reg_A0(2, R_ESP);
2600 } else
2601 #endif
2602 {
2603 gen_op_movl_A0_reg(R_ESP);
2604 if (!s->dflag)
2605 gen_op_addl_A0_im(-2);
2606 else
2607 gen_op_addl_A0_im(-4);
2608 if (s->ss32) {
2609 if (s->addseg) {
2610 gen_op_addl_A0_seg(s, R_SS);
2611 }
2612 } else {
2613 gen_op_andl_A0_ffff();
2614 gen_op_addl_A0_seg(s, R_SS);
2615 }
2616 gen_op_st_T1_A0(s->dflag + 1 + s->mem_index);
2617
2618 if (s->ss32 && !s->addseg)
2619 gen_op_mov_reg_A0(1, R_ESP);
2620 else
2621 gen_stack_update(s, (-2) << s->dflag);
2622 }
2623 }
2624
2625 /* two step pop is necessary for precise exceptions */
2626 static void gen_pop_T0(DisasContext *s)
2627 {
2628 #ifdef TARGET_X86_64
2629 if (CODE64(s)) {
2630 gen_op_movq_A0_reg(R_ESP);
2631 gen_op_ld_T0_A0((s->dflag ? OT_QUAD : OT_WORD) + s->mem_index);
2632 } else
2633 #endif
2634 {
2635 gen_op_movl_A0_reg(R_ESP);
2636 if (s->ss32) {
2637 if (s->addseg)
2638 gen_op_addl_A0_seg(s, R_SS);
2639 } else {
2640 gen_op_andl_A0_ffff();
2641 gen_op_addl_A0_seg(s, R_SS);
2642 }
2643 gen_op_ld_T0_A0(s->dflag + 1 + s->mem_index);
2644 }
2645 }
2646
2647 static void gen_pop_update(DisasContext *s)
2648 {
2649 #ifdef TARGET_X86_64
2650 if (CODE64(s) && s->dflag) {
2651 gen_stack_update(s, 8);
2652 } else
2653 #endif
2654 {
2655 gen_stack_update(s, 2 << s->dflag);
2656 }
2657 }
2658
2659 static void gen_stack_A0(DisasContext *s)
2660 {
2661 gen_op_movl_A0_reg(R_ESP);
2662 if (!s->ss32)
2663 gen_op_andl_A0_ffff();
2664 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2665 if (s->addseg)
2666 gen_op_addl_A0_seg(s, R_SS);
2667 }
2668
2669 /* NOTE: wrap around in 16 bit not fully handled */
2670 static void gen_pusha(DisasContext *s)
2671 {
2672 int i;
2673 gen_op_movl_A0_reg(R_ESP);
2674 gen_op_addl_A0_im(-16 << s->dflag);
2675 if (!s->ss32)
2676 gen_op_andl_A0_ffff();
2677 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2678 if (s->addseg)
2679 gen_op_addl_A0_seg(s, R_SS);
2680 for(i = 0;i < 8; i++) {
2681 gen_op_mov_TN_reg(OT_LONG, 0, 7 - i);
2682 gen_op_st_T0_A0(OT_WORD + s->dflag + s->mem_index);
2683 gen_op_addl_A0_im(2 << s->dflag);
2684 }
2685 gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2686 }
2687
2688 /* NOTE: wrap around in 16 bit not fully handled */
2689 static void gen_popa(DisasContext *s)
2690 {
2691 int i;
2692 gen_op_movl_A0_reg(R_ESP);
2693 if (!s->ss32)
2694 gen_op_andl_A0_ffff();
2695 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2696 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], 16 << s->dflag);
2697 if (s->addseg)
2698 gen_op_addl_A0_seg(s, R_SS);
2699 for(i = 0;i < 8; i++) {
2700 /* ESP is not reloaded */
2701 if (i != 3) {
2702 gen_op_ld_T0_A0(OT_WORD + s->dflag + s->mem_index);
2703 gen_op_mov_reg_T0(OT_WORD + s->dflag, 7 - i);
2704 }
2705 gen_op_addl_A0_im(2 << s->dflag);
2706 }
2707 gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2708 }
2709
2710 static void gen_enter(DisasContext *s, int esp_addend, int level)
2711 {
2712 int ot, opsize;
2713
2714 level &= 0x1f;
2715 #ifdef TARGET_X86_64
2716 if (CODE64(s)) {
2717 ot = s->dflag ? OT_QUAD : OT_WORD;
2718 opsize = 1 << ot;
2719
2720 gen_op_movl_A0_reg(R_ESP);
2721 gen_op_addq_A0_im(-opsize);
2722 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2723
2724 /* push bp */
2725 gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
2726 gen_op_st_T0_A0(ot + s->mem_index);
2727 if (level) {
2728 /* XXX: must save state */
2729 gen_helper_enter64_level(cpu_env, tcg_const_i32(level),
2730 tcg_const_i32((ot == OT_QUAD)),
2731 cpu_T[1]);
2732 }
2733 gen_op_mov_reg_T1(ot, R_EBP);
2734 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
2735 gen_op_mov_reg_T1(OT_QUAD, R_ESP);
2736 } else
2737 #endif
2738 {
2739 ot = s->dflag + OT_WORD;
2740 opsize = 2 << s->dflag;
2741
2742 gen_op_movl_A0_reg(R_ESP);
2743 gen_op_addl_A0_im(-opsize);
2744 if (!s->ss32)
2745 gen_op_andl_A0_ffff();
2746 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2747 if (s->addseg)
2748 gen_op_addl_A0_seg(s, R_SS);
2749 /* push bp */
2750 gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
2751 gen_op_st_T0_A0(ot + s->mem_index);
2752 if (level) {
2753 /* XXX: must save state */
2754 gen_helper_enter_level(cpu_env, tcg_const_i32(level),
2755 tcg_const_i32(s->dflag),
2756 cpu_T[1]);
2757 }
2758 gen_op_mov_reg_T1(ot, R_EBP);
2759 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
2760 gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2761 }
2762 }
2763
2764 static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip)
2765 {
2766 gen_update_cc_op(s);
2767 gen_jmp_im(cur_eip);
2768 gen_helper_raise_exception(cpu_env, tcg_const_i32(trapno));
2769 s->is_jmp = DISAS_TB_JUMP;
2770 }
2771
2772 /* an interrupt is different from an exception because of the
2773 privilege checks */
2774 static void gen_interrupt(DisasContext *s, int intno,
2775 target_ulong cur_eip, target_ulong next_eip)
2776 {
2777 gen_update_cc_op(s);
2778 gen_jmp_im(cur_eip);
2779 gen_helper_raise_interrupt(cpu_env, tcg_const_i32(intno),
2780 tcg_const_i32(next_eip - cur_eip));
2781 s->is_jmp = DISAS_TB_JUMP;
2782 }
2783
2784 static void gen_debug(DisasContext *s, target_ulong cur_eip)
2785 {
2786 gen_update_cc_op(s);
2787 gen_jmp_im(cur_eip);
2788 gen_helper_debug(cpu_env);
2789 s->is_jmp = DISAS_TB_JUMP;
2790 }
2791
2792 /* generate a generic end of block. Trace exception is also generated
2793 if needed */
2794 static void gen_eob(DisasContext *s)
2795 {
2796 gen_update_cc_op(s);
2797 if (s->tb->flags & HF_INHIBIT_IRQ_MASK) {
2798 gen_helper_reset_inhibit_irq(cpu_env);
2799 }
2800 if (s->tb->flags & HF_RF_MASK) {
2801 gen_helper_reset_rf(cpu_env);
2802 }
2803 if (s->singlestep_enabled) {
2804 gen_helper_debug(cpu_env);
2805 } else if (s->tf) {
2806 gen_helper_single_step(cpu_env);
2807 } else {
2808 tcg_gen_exit_tb(0);
2809 }
2810 s->is_jmp = DISAS_TB_JUMP;
2811 }
2812
2813 /* generate a jump to eip. No segment change must happen before as a
2814 direct call to the next block may occur */
2815 static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num)
2816 {
2817 gen_update_cc_op(s);
2818 set_cc_op(s, CC_OP_DYNAMIC);
2819 if (s->jmp_opt) {
2820 gen_goto_tb(s, tb_num, eip);
2821 s->is_jmp = DISAS_TB_JUMP;
2822 } else {
2823 gen_jmp_im(eip);
2824 gen_eob(s);
2825 }
2826 }
2827
2828 static void gen_jmp(DisasContext *s, target_ulong eip)
2829 {
2830 gen_jmp_tb(s, eip, 0);
2831 }
2832
2833 static inline void gen_ldq_env_A0(int idx, int offset)
2834 {
2835 int mem_index = (idx >> 2) - 1;
2836 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, mem_index);
2837 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset);
2838 }
2839
2840 static inline void gen_stq_env_A0(int idx, int offset)
2841 {
2842 int mem_index = (idx >> 2) - 1;
2843 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset);
2844 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, mem_index);
2845 }
2846
2847 static inline void gen_ldo_env_A0(int idx, int offset)
2848 {
2849 int mem_index = (idx >> 2) - 1;
2850 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, mem_index);
2851 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
2852 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
2853 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_tmp0, mem_index);
2854 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
2855 }
2856
2857 static inline void gen_sto_env_A0(int idx, int offset)
2858 {
2859 int mem_index = (idx >> 2) - 1;
2860 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
2861 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, mem_index);
2862 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
2863 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
2864 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_tmp0, mem_index);
2865 }
2866
2867 static inline void gen_op_movo(int d_offset, int s_offset)
2868 {
2869 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2870 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2871 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + 8);
2872 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + 8);
2873 }
2874
2875 static inline void gen_op_movq(int d_offset, int s_offset)
2876 {
2877 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2878 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2879 }
2880
2881 static inline void gen_op_movl(int d_offset, int s_offset)
2882 {
2883 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env, s_offset);
2884 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, d_offset);
2885 }
2886
2887 static inline void gen_op_movq_env_0(int d_offset)
2888 {
2889 tcg_gen_movi_i64(cpu_tmp1_i64, 0);
2890 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2891 }
2892
2893 typedef void (*SSEFunc_i_ep)(TCGv_i32 val, TCGv_ptr env, TCGv_ptr reg);
2894 typedef void (*SSEFunc_l_ep)(TCGv_i64 val, TCGv_ptr env, TCGv_ptr reg);
2895 typedef void (*SSEFunc_0_epi)(TCGv_ptr env, TCGv_ptr reg, TCGv_i32 val);
2896 typedef void (*SSEFunc_0_epl)(TCGv_ptr env, TCGv_ptr reg, TCGv_i64 val);
2897 typedef void (*SSEFunc_0_epp)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b);
2898 typedef void (*SSEFunc_0_eppi)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
2899 TCGv_i32 val);
2900 typedef void (*SSEFunc_0_ppi)(TCGv_ptr reg_a, TCGv_ptr reg_b, TCGv_i32 val);
2901 typedef void (*SSEFunc_0_eppt)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
2902 TCGv val);
2903
2904 #define SSE_SPECIAL ((void *)1)
2905 #define SSE_DUMMY ((void *)2)
2906
2907 #define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
2908 #define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
2909 gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
2910
2911 static const SSEFunc_0_epp sse_op_table1[256][4] = {
2912 /* 3DNow! extensions */
2913 [0x0e] = { SSE_DUMMY }, /* femms */
2914 [0x0f] = { SSE_DUMMY }, /* pf... */
2915 /* pure SSE operations */
2916 [0x10] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2917 [0x11] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2918 [0x12] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd, movsldup, movddup */
2919 [0x13] = { SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd */
2920 [0x14] = { gen_helper_punpckldq_xmm, gen_helper_punpcklqdq_xmm },
2921 [0x15] = { gen_helper_punpckhdq_xmm, gen_helper_punpckhqdq_xmm },
2922 [0x16] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd, movshdup */
2923 [0x17] = { SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd */
2924
2925 [0x28] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2926 [0x29] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2927 [0x2a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
2928 [0x2b] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movntps, movntpd, movntss, movntsd */
2929 [0x2c] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
2930 [0x2d] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
2931 [0x2e] = { gen_helper_ucomiss, gen_helper_ucomisd },
2932 [0x2f] = { gen_helper_comiss, gen_helper_comisd },
2933 [0x50] = { SSE_SPECIAL, SSE_SPECIAL }, /* movmskps, movmskpd */
2934 [0x51] = SSE_FOP(sqrt),
2935 [0x52] = { gen_helper_rsqrtps, NULL, gen_helper_rsqrtss, NULL },
2936 [0x53] = { gen_helper_rcpps, NULL, gen_helper_rcpss, NULL },
2937 [0x54] = { gen_helper_pand_xmm, gen_helper_pand_xmm }, /* andps, andpd */
2938 [0x55] = { gen_helper_pandn_xmm, gen_helper_pandn_xmm }, /* andnps, andnpd */
2939 [0x56] = { gen_helper_por_xmm, gen_helper_por_xmm }, /* orps, orpd */
2940 [0x57] = { gen_helper_pxor_xmm, gen_helper_pxor_xmm }, /* xorps, xorpd */
2941 [0x58] = SSE_FOP(add),
2942 [0x59] = SSE_FOP(mul),
2943 [0x5a] = { gen_helper_cvtps2pd, gen_helper_cvtpd2ps,
2944 gen_helper_cvtss2sd, gen_helper_cvtsd2ss },
2945 [0x5b] = { gen_helper_cvtdq2ps, gen_helper_cvtps2dq, gen_helper_cvttps2dq },
2946 [0x5c] = SSE_FOP(sub),
2947 [0x5d] = SSE_FOP(min),
2948 [0x5e] = SSE_FOP(div),
2949 [0x5f] = SSE_FOP(max),
2950
2951 [0xc2] = SSE_FOP(cmpeq),
2952 [0xc6] = { (SSEFunc_0_epp)gen_helper_shufps,
2953 (SSEFunc_0_epp)gen_helper_shufpd }, /* XXX: casts */
2954
2955 [0x38] = { SSE_SPECIAL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* SSSE3/SSE4 */
2956 [0x3a] = { SSE_SPECIAL, SSE_SPECIAL }, /* SSSE3/SSE4 */
2957
2958 /* MMX ops and their SSE extensions */
2959 [0x60] = MMX_OP2(punpcklbw),
2960 [0x61] = MMX_OP2(punpcklwd),
2961 [0x62] = MMX_OP2(punpckldq),
2962 [0x63] = MMX_OP2(packsswb),
2963 [0x64] = MMX_OP2(pcmpgtb),
2964 [0x65] = MMX_OP2(pcmpgtw),
2965 [0x66] = MMX_OP2(pcmpgtl),
2966 [0x67] = MMX_OP2(packuswb),
2967 [0x68] = MMX_OP2(punpckhbw),
2968 [0x69] = MMX_OP2(punpckhwd),
2969 [0x6a] = MMX_OP2(punpckhdq),
2970 [0x6b] = MMX_OP2(packssdw),
2971 [0x6c] = { NULL, gen_helper_punpcklqdq_xmm },
2972 [0x6d] = { NULL, gen_helper_punpckhqdq_xmm },
2973 [0x6e] = { SSE_SPECIAL, SSE_SPECIAL }, /* movd mm, ea */
2974 [0x6f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, , movqdu */
2975 [0x70] = { (SSEFunc_0_epp)gen_helper_pshufw_mmx,
2976 (SSEFunc_0_epp)gen_helper_pshufd_xmm,
2977 (SSEFunc_0_epp)gen_helper_pshufhw_xmm,
2978 (SSEFunc_0_epp)gen_helper_pshuflw_xmm }, /* XXX: casts */
2979 [0x71] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftw */
2980 [0x72] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftd */
2981 [0x73] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftq */
2982 [0x74] = MMX_OP2(pcmpeqb),
2983 [0x75] = MMX_OP2(pcmpeqw),
2984 [0x76] = MMX_OP2(pcmpeql),
2985 [0x77] = { SSE_DUMMY }, /* emms */
2986 [0x78] = { NULL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* extrq_i, insertq_i */
2987 [0x79] = { NULL, gen_helper_extrq_r, NULL, gen_helper_insertq_r },
2988 [0x7c] = { NULL, gen_helper_haddpd, NULL, gen_helper_haddps },
2989 [0x7d] = { NULL, gen_helper_hsubpd, NULL, gen_helper_hsubps },
2990 [0x7e] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movd, movd, , movq */
2991 [0x7f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, movdqu */
2992 [0xc4] = { SSE_SPECIAL, SSE_SPECIAL }, /* pinsrw */
2993 [0xc5] = { SSE_SPECIAL, SSE_SPECIAL }, /* pextrw */
2994 [0xd0] = { NULL, gen_helper_addsubpd, NULL, gen_helper_addsubps },
2995 [0xd1] = MMX_OP2(psrlw),
2996 [0xd2] = MMX_OP2(psrld),
2997 [0xd3] = MMX_OP2(psrlq),
2998 [0xd4] = MMX_OP2(paddq),
2999 [0xd5] = MMX_OP2(pmullw),
3000 [0xd6] = { NULL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
3001 [0xd7] = { SSE_SPECIAL, SSE_SPECIAL }, /* pmovmskb */
3002 [0xd8] = MMX_OP2(psubusb),
3003 [0xd9] = MMX_OP2(psubusw),
3004 [0xda] = MMX_OP2(pminub),
3005 [0xdb] = MMX_OP2(pand),
3006 [0xdc] = MMX_OP2(paddusb),
3007 [0xdd] = MMX_OP2(paddusw),
3008 [0xde] = MMX_OP2(pmaxub),
3009 [0xdf] = MMX_OP2(pandn),
3010 [0xe0] = MMX_OP2(pavgb),
3011 [0xe1] = MMX_OP2(psraw),
3012 [0xe2] = MMX_OP2(psrad),
3013 [0xe3] = MMX_OP2(pavgw),
3014 [0xe4] = MMX_OP2(pmulhuw),
3015 [0xe5] = MMX_OP2(pmulhw),
3016 [0xe6] = { NULL, gen_helper_cvttpd2dq, gen_helper_cvtdq2pd, gen_helper_cvtpd2dq },
3017 [0xe7] = { SSE_SPECIAL , SSE_SPECIAL }, /* movntq, movntq */
3018 [0xe8] = MMX_OP2(psubsb),
3019 [0xe9] = MMX_OP2(psubsw),
3020 [0xea] = MMX_OP2(pminsw),
3021 [0xeb] = MMX_OP2(por),
3022 [0xec] = MMX_OP2(paddsb),
3023 [0xed] = MMX_OP2(paddsw),
3024 [0xee] = MMX_OP2(pmaxsw),
3025 [0xef] = MMX_OP2(pxor),
3026 [0xf0] = { NULL, NULL, NULL, SSE_SPECIAL }, /* lddqu */
3027 [0xf1] = MMX_OP2(psllw),
3028 [0xf2] = MMX_OP2(pslld),
3029 [0xf3] = MMX_OP2(psllq),
3030 [0xf4] = MMX_OP2(pmuludq),
3031 [0xf5] = MMX_OP2(pmaddwd),
3032 [0xf6] = MMX_OP2(psadbw),
3033 [0xf7] = { (SSEFunc_0_epp)gen_helper_maskmov_mmx,
3034 (SSEFunc_0_epp)gen_helper_maskmov_xmm }, /* XXX: casts */
3035 [0xf8] = MMX_OP2(psubb),
3036 [0xf9] = MMX_OP2(psubw),
3037 [0xfa] = MMX_OP2(psubl),
3038 [0xfb] = MMX_OP2(psubq),
3039 [0xfc] = MMX_OP2(paddb),
3040 [0xfd] = MMX_OP2(paddw),
3041 [0xfe] = MMX_OP2(paddl),
3042 };
3043
3044 static const SSEFunc_0_epp sse_op_table2[3 * 8][2] = {
3045 [0 + 2] = MMX_OP2(psrlw),
3046 [0 + 4] = MMX_OP2(psraw),
3047 [0 + 6] = MMX_OP2(psllw),
3048 [8 + 2] = MMX_OP2(psrld),
3049 [8 + 4] = MMX_OP2(psrad),
3050 [8 + 6] = MMX_OP2(pslld),
3051 [16 + 2] = MMX_OP2(psrlq),
3052 [16 + 3] = { NULL, gen_helper_psrldq_xmm },
3053 [16 + 6] = MMX_OP2(psllq),
3054 [16 + 7] = { NULL, gen_helper_pslldq_xmm },
3055 };
3056
3057 static const SSEFunc_0_epi sse_op_table3ai[] = {
3058 gen_helper_cvtsi2ss,
3059 gen_helper_cvtsi2sd
3060 };
3061
3062 #ifdef TARGET_X86_64
3063 static const SSEFunc_0_epl sse_op_table3aq[] = {
3064 gen_helper_cvtsq2ss,
3065 gen_helper_cvtsq2sd
3066 };
3067 #endif
3068
3069 static const SSEFunc_i_ep sse_op_table3bi[] = {
3070 gen_helper_cvttss2si,
3071 gen_helper_cvtss2si,
3072 gen_helper_cvttsd2si,
3073 gen_helper_cvtsd2si
3074 };
3075
3076 #ifdef TARGET_X86_64
3077 static const SSEFunc_l_ep sse_op_table3bq[] = {
3078 gen_helper_cvttss2sq,
3079 gen_helper_cvtss2sq,
3080 gen_helper_cvttsd2sq,
3081 gen_helper_cvtsd2sq
3082 };
3083 #endif
3084
3085 static const SSEFunc_0_epp sse_op_table4[8][4] = {
3086 SSE_FOP(cmpeq),
3087 SSE_FOP(cmplt),
3088 SSE_FOP(cmple),
3089 SSE_FOP(cmpunord),
3090 SSE_FOP(cmpneq),
3091 SSE_FOP(cmpnlt),
3092 SSE_FOP(cmpnle),
3093 SSE_FOP(cmpord),
3094 };
3095
3096 static const SSEFunc_0_epp sse_op_table5[256] = {
3097 [0x0c] = gen_helper_pi2fw,
3098 [0x0d] = gen_helper_pi2fd,
3099 [0x1c] = gen_helper_pf2iw,
3100 [0x1d] = gen_helper_pf2id,
3101 [0x8a] = gen_helper_pfnacc,
3102 [0x8e] = gen_helper_pfpnacc,
3103 [0x90] = gen_helper_pfcmpge,
3104 [0x94] = gen_helper_pfmin,
3105 [0x96] = gen_helper_pfrcp,
3106 [0x97] = gen_helper_pfrsqrt,
3107 [0x9a] = gen_helper_pfsub,
3108 [0x9e] = gen_helper_pfadd,
3109 [0xa0] = gen_helper_pfcmpgt,
3110 [0xa4] = gen_helper_pfmax,
3111 [0xa6] = gen_helper_movq, /* pfrcpit1; no need to actually increase precision */
3112 [0xa7] = gen_helper_movq, /* pfrsqit1 */
3113 [0xaa] = gen_helper_pfsubr,
3114 [0xae] = gen_helper_pfacc,
3115 [0xb0] = gen_helper_pfcmpeq,
3116 [0xb4] = gen_helper_pfmul,
3117 [0xb6] = gen_helper_movq, /* pfrcpit2 */
3118 [0xb7] = gen_helper_pmulhrw_mmx,
3119 [0xbb] = gen_helper_pswapd,
3120 [0xbf] = gen_helper_pavgb_mmx /* pavgusb */
3121 };
3122
3123 struct SSEOpHelper_epp {
3124 SSEFunc_0_epp op[2];
3125 uint32_t ext_mask;
3126 };
3127
3128 struct SSEOpHelper_eppi {
3129 SSEFunc_0_eppi op[2];
3130 uint32_t ext_mask;
3131 };
3132
3133 #define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
3134 #define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
3135 #define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
3136 #define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
3137
3138 static const struct SSEOpHelper_epp sse_op_table6[256] = {
3139 [0x00] = SSSE3_OP(pshufb),
3140 [0x01] = SSSE3_OP(phaddw),
3141 [0x02] = SSSE3_OP(phaddd),
3142 [0x03] = SSSE3_OP(phaddsw),
3143 [0x04] = SSSE3_OP(pmaddubsw),
3144 [0x05] = SSSE3_OP(phsubw),
3145 [0x06] = SSSE3_OP(phsubd),
3146 [0x07] = SSSE3_OP(phsubsw),
3147 [0x08] = SSSE3_OP(psignb),
3148 [0x09] = SSSE3_OP(psignw),
3149 [0x0a] = SSSE3_OP(psignd),
3150 [0x0b] = SSSE3_OP(pmulhrsw),
3151 [0x10] = SSE41_OP(pblendvb),
3152 [0x14] = SSE41_OP(blendvps),
3153 [0x15] = SSE41_OP(blendvpd),
3154 [0x17] = SSE41_OP(ptest),
3155 [0x1c] = SSSE3_OP(pabsb),
3156 [0x1d] = SSSE3_OP(pabsw),
3157 [0x1e] = SSSE3_OP(pabsd),
3158 [0x20] = SSE41_OP(pmovsxbw),
3159 [0x21] = SSE41_OP(pmovsxbd),
3160 [0x22] = SSE41_OP(pmovsxbq),
3161 [0x23] = SSE41_OP(pmovsxwd),
3162 [0x24] = SSE41_OP(pmovsxwq),
3163 [0x25] = SSE41_OP(pmovsxdq),
3164 [0x28] = SSE41_OP(pmuldq),
3165 [0x29] = SSE41_OP(pcmpeqq),
3166 [0x2a] = SSE41_SPECIAL, /* movntqda */
3167 [0x2b] = SSE41_OP(packusdw),
3168 [0x30] = SSE41_OP(pmovzxbw),
3169 [0x31] = SSE41_OP(pmovzxbd),
3170 [0x32] = SSE41_OP(pmovzxbq),
3171 [0x33] = SSE41_OP(pmovzxwd),
3172 [0x34] = SSE41_OP(pmovzxwq),
3173 [0x35] = SSE41_OP(pmovzxdq),
3174 [0x37] = SSE42_OP(pcmpgtq),
3175 [0x38] = SSE41_OP(pminsb),
3176 [0x39] = SSE41_OP(pminsd),
3177 [0x3a] = SSE41_OP(pminuw),
3178 [0x3b] = SSE41_OP(pminud),
3179 [0x3c] = SSE41_OP(pmaxsb),
3180 [0x3d] = SSE41_OP(pmaxsd),
3181 [0x3e] = SSE41_OP(pmaxuw),
3182 [0x3f] = SSE41_OP(pmaxud),
3183 [0x40] = SSE41_OP(pmulld),
3184 [0x41] = SSE41_OP(phminposuw),
3185 };
3186
3187 static const struct SSEOpHelper_eppi sse_op_table7[256] = {
3188 [0x08] = SSE41_OP(roundps),
3189 [0x09] = SSE41_OP(roundpd),
3190 [0x0a] = SSE41_OP(roundss),
3191 [0x0b] = SSE41_OP(roundsd),
3192 [0x0c] = SSE41_OP(blendps),
3193 [0x0d] = SSE41_OP(blendpd),
3194 [0x0e] = SSE41_OP(pblendw),
3195 [0x0f] = SSSE3_OP(palignr),
3196 [0x14] = SSE41_SPECIAL, /* pextrb */
3197 [0x15] = SSE41_SPECIAL, /* pextrw */
3198 [0x16] = SSE41_SPECIAL, /* pextrd/pextrq */
3199 [0x17] = SSE41_SPECIAL, /* extractps */
3200 [0x20] = SSE41_SPECIAL, /* pinsrb */
3201 [0x21] = SSE41_SPECIAL, /* insertps */
3202 [0x22] = SSE41_SPECIAL, /* pinsrd/pinsrq */
3203 [0x40] = SSE41_OP(dpps),
3204 [0x41] = SSE41_OP(dppd),
3205 [0x42] = SSE41_OP(mpsadbw),
3206 [0x60] = SSE42_OP(pcmpestrm),
3207 [0x61] = SSE42_OP(pcmpestri),
3208 [0x62] = SSE42_OP(pcmpistrm),
3209 [0x63] = SSE42_OP(pcmpistri),
3210 };
3211
3212 static void gen_sse(CPUX86State *env, DisasContext *s, int b,
3213 target_ulong pc_start, int rex_r)
3214 {
3215 int b1, op1_offset, op2_offset, is_xmm, val, ot;
3216 int modrm, mod, rm, reg, reg_addr, offset_addr;
3217 SSEFunc_0_epp sse_fn_epp;
3218 SSEFunc_0_eppi sse_fn_eppi;
3219 SSEFunc_0_ppi sse_fn_ppi;
3220 SSEFunc_0_eppt sse_fn_eppt;
3221
3222 b &= 0xff;
3223 if (s->prefix & PREFIX_DATA)
3224 b1 = 1;
3225 else if (s->prefix & PREFIX_REPZ)
3226 b1 = 2;
3227 else if (s->prefix & PREFIX_REPNZ)
3228 b1 = 3;
3229 else
3230 b1 = 0;
3231 sse_fn_epp = sse_op_table1[b][b1];
3232 if (!sse_fn_epp) {
3233 goto illegal_op;
3234 }
3235 if ((b <= 0x5f && b >= 0x10) || b == 0xc6 || b == 0xc2) {
3236 is_xmm = 1;
3237 } else {
3238 if (b1 == 0) {
3239 /* MMX case */
3240 is_xmm = 0;
3241 } else {
3242 is_xmm = 1;
3243 }
3244 }
3245 /* simple MMX/SSE operation */
3246 if (s->flags & HF_TS_MASK) {
3247 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
3248 return;
3249 }
3250 if (s->flags & HF_EM_MASK) {
3251 illegal_op:
3252 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
3253 return;
3254 }
3255 if (is_xmm && !(s->flags & HF_OSFXSR_MASK))
3256 if ((b != 0x38 && b != 0x3a) || (s->prefix & PREFIX_DATA))
3257 goto illegal_op;
3258 if (b == 0x0e) {
3259 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
3260 goto illegal_op;
3261 /* femms */
3262 gen_helper_emms(cpu_env);
3263 return;
3264 }
3265 if (b == 0x77) {
3266 /* emms */
3267 gen_helper_emms(cpu_env);
3268 return;
3269 }
3270 /* prepare MMX state (XXX: optimize by storing fptt and fptags in
3271 the static cpu state) */
3272 if (!is_xmm) {
3273 gen_helper_enter_mmx(cpu_env);
3274 }
3275
3276 modrm = cpu_ldub_code(env, s->pc++);
3277 reg = ((modrm >> 3) & 7);
3278 if (is_xmm)
3279 reg |= rex_r;
3280 mod = (modrm >> 6) & 3;
3281 if (sse_fn_epp == SSE_SPECIAL) {
3282 b |= (b1 << 8);
3283 switch(b) {
3284 case 0x0e7: /* movntq */
3285 if (mod == 3)
3286 goto illegal_op;
3287 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3288 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
3289 break;
3290 case 0x1e7: /* movntdq */
3291 case 0x02b: /* movntps */
3292 case 0x12b: /* movntps */
3293 if (mod == 3)
3294 goto illegal_op;
3295 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3296 gen_sto_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3297 break;
3298 case 0x3f0: /* lddqu */
3299 if (mod == 3)
3300 goto illegal_op;
3301 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3302 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3303 break;
3304 case 0x22b: /* movntss */
3305 case 0x32b: /* movntsd */
3306 if (mod == 3)
3307 goto illegal_op;
3308 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3309 if (b1 & 1) {
3310 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,
3311 xmm_regs[reg]));
3312 } else {
3313 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3314 xmm_regs[reg].XMM_L(0)));
3315 gen_op_st_T0_A0(OT_LONG + s->mem_index);
3316 }
3317 break;
3318 case 0x6e: /* movd mm, ea */
3319 #ifdef TARGET_X86_64
3320 if (s->dflag == 2) {
3321 gen_ldst_modrm(env, s, modrm, OT_QUAD, OR_TMP0, 0);
3322 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,fpregs[reg].mmx));
3323 } else
3324 #endif
3325 {
3326 gen_ldst_modrm(env, s, modrm, OT_LONG, OR_TMP0, 0);
3327 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3328 offsetof(CPUX86State,fpregs[reg].mmx));
3329 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3330 gen_helper_movl_mm_T0_mmx(cpu_ptr0, cpu_tmp2_i32);
3331 }
3332 break;
3333 case 0x16e: /* movd xmm, ea */
3334 #ifdef TARGET_X86_64
3335 if (s->dflag == 2) {
3336 gen_ldst_modrm(env, s, modrm, OT_QUAD, OR_TMP0, 0);
3337 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3338 offsetof(CPUX86State,xmm_regs[reg]));
3339 gen_helper_movq_mm_T0_xmm(cpu_ptr0, cpu_T[0]);
3340 } else
3341 #endif
3342 {
3343 gen_ldst_modrm(env, s, modrm, OT_LONG, OR_TMP0, 0);
3344 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3345 offsetof(CPUX86State,xmm_regs[reg]));
3346 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3347 gen_helper_movl_mm_T0_xmm(cpu_ptr0, cpu_tmp2_i32);
3348 }
3349 break;
3350 case 0x6f: /* movq mm, ea */
3351 if (mod != 3) {
3352 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3353 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
3354 } else {
3355 rm = (modrm & 7);
3356 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
3357 offsetof(CPUX86State,fpregs[rm].mmx));
3358 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
3359 offsetof(CPUX86State,fpregs[reg].mmx));
3360 }
3361 break;
3362 case 0x010: /* movups */
3363 case 0x110: /* movupd */
3364 case 0x028: /* movaps */
3365 case 0x128: /* movapd */
3366 case 0x16f: /* movdqa xmm, ea */
3367 case 0x26f: /* movdqu xmm, ea */
3368 if (mod != 3) {
3369 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3370 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3371 } else {
3372 rm = (modrm & 7) | REX_B(s);
3373 gen_op_movo(offsetof(CPUX86State,xmm_regs[reg]),
3374 offsetof(CPUX86State,xmm_regs[rm]));
3375 }
3376 break;
3377 case 0x210: /* movss xmm, ea */
3378 if (mod != 3) {
3379 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3380 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
3381 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3382 gen_op_movl_T0_0();
3383 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
3384 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3385 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3386 } else {
3387 rm = (modrm & 7) | REX_B(s);
3388 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3389 offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
3390 }
3391 break;
3392 case 0x310: /* movsd xmm, ea */
3393 if (mod != 3) {
3394 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3395 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3396 gen_op_movl_T0_0();
3397 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3398 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3399 } else {
3400 rm = (modrm & 7) | REX_B(s);
3401 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3402 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3403 }
3404 break;
3405 case 0x012: /* movlps */
3406 case 0x112: /* movlpd */
3407 if (mod != 3) {
3408 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3409 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3410 } else {
3411 /* movhlps */
3412 rm = (modrm & 7) | REX_B(s);
3413 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3414 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
3415 }
3416 break;
3417 case 0x212: /* movsldup */
3418 if (mod != 3) {
3419 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3420 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3421 } else {
3422 rm = (modrm & 7) | REX_B(s);
3423 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3424 offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
3425 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
3426 offsetof(CPUX86State,xmm_regs[rm].XMM_L(2)));
3427 }
3428 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
3429 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3430 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
3431 offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3432 break;
3433 case 0x312: /* movddup */
3434 if (mod != 3) {
3435 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3436 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3437 } else {
3438 rm = (modrm & 7) | REX_B(s);
3439 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3440 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3441 }
3442 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
3443 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3444 break;
3445 case 0x016: /* movhps */
3446 case 0x116: /* movhpd */
3447 if (mod != 3) {
3448 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3449 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3450 } else {
3451 /* movlhps */
3452 rm = (modrm & 7) | REX_B(s);
3453 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
3454 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3455 }
3456 break;
3457 case 0x216: /* movshdup */
3458 if (mod != 3) {
3459 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3460 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3461 } else {
3462 rm = (modrm & 7) | REX_B(s);
3463 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
3464 offsetof(CPUX86State,xmm_regs[rm].XMM_L(1)));
3465 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
3466 offsetof(CPUX86State,xmm_regs[rm].XMM_L(3)));
3467 }
3468 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3469 offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
3470 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
3471 offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3472 break;
3473 case 0x178:
3474 case 0x378:
3475 {
3476 int bit_index, field_length;
3477
3478 if (b1 == 1 && reg != 0)
3479 goto illegal_op;
3480 field_length = cpu_ldub_code(env, s->pc++) & 0x3F;
3481 bit_index = cpu_ldub_code(env, s->pc++) & 0x3F;
3482 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3483 offsetof(CPUX86State,xmm_regs[reg]));
3484 if (b1 == 1)
3485 gen_helper_extrq_i(cpu_env, cpu_ptr0,
3486 tcg_const_i32(bit_index),
3487 tcg_const_i32(field_length));
3488 else
3489 gen_helper_insertq_i(cpu_env, cpu_ptr0,
3490 tcg_const_i32(bit_index),
3491 tcg_const_i32(field_length));
3492 }
3493 break;
3494 case 0x7e: /* movd ea, mm */
3495 #ifdef TARGET_X86_64
3496 if (s->dflag == 2) {
3497 tcg_gen_ld_i64(cpu_T[0], cpu_env,
3498 offsetof(CPUX86State,fpregs[reg].mmx));
3499 gen_ldst_modrm(env, s, modrm, OT_QUAD, OR_TMP0, 1);
3500 } else
3501 #endif
3502 {
3503 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
3504 offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0)));
3505 gen_ldst_modrm(env, s, modrm, OT_LONG, OR_TMP0, 1);
3506 }
3507 break;
3508 case 0x17e: /* movd ea, xmm */
3509 #ifdef TARGET_X86_64
3510 if (s->dflag == 2) {
3511 tcg_gen_ld_i64(cpu_T[0], cpu_env,
3512 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3513 gen_ldst_modrm(env, s, modrm, OT_QUAD, OR_TMP0, 1);
3514 } else
3515 #endif
3516 {
3517 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
3518 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3519 gen_ldst_modrm(env, s, modrm, OT_LONG, OR_TMP0, 1);
3520 }
3521 break;
3522 case 0x27e: /* movq xmm, ea */
3523 if (mod != 3) {
3524 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3525 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3526 } else {
3527 rm = (modrm & 7) | REX_B(s);
3528 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3529 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3530 }
3531 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3532 break;
3533 case 0x7f: /* movq ea, mm */
3534 if (mod != 3) {
3535 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3536 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
3537 } else {
3538 rm = (modrm & 7);
3539 gen_op_movq(offsetof(CPUX86State,fpregs[rm].mmx),
3540 offsetof(CPUX86State,fpregs[reg].mmx));
3541 }
3542 break;
3543 case 0x011: /* movups */
3544 case 0x111: /* movupd */
3545 case 0x029: /* movaps */
3546 case 0x129: /* movapd */
3547 case 0x17f: /* movdqa ea, xmm */
3548 case 0x27f: /* movdqu ea, xmm */
3549 if (mod != 3) {
3550 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3551 gen_sto_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3552 } else {
3553 rm = (modrm & 7) | REX_B(s);
3554 gen_op_movo(offsetof(CPUX86State,xmm_regs[rm]),
3555 offsetof(CPUX86State,xmm_regs[reg]));
3556 }
3557 break;
3558 case 0x211: /* movss ea, xmm */
3559 if (mod != 3) {
3560 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3561 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3562 gen_op_st_T0_A0(OT_LONG + s->mem_index);
3563 } else {
3564 rm = (modrm & 7) | REX_B(s);
3565 gen_op_movl(offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)),
3566 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3567 }
3568 break;
3569 case 0x311: /* movsd ea, xmm */
3570 if (mod != 3) {
3571 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3572 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3573 } else {
3574 rm = (modrm & 7) | REX_B(s);
3575 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
3576 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3577 }
3578 break;
3579 case 0x013: /* movlps */
3580 case 0x113: /* movlpd */
3581 if (mod != 3) {
3582 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3583 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3584 } else {
3585 goto illegal_op;
3586 }
3587 break;
3588 case 0x017: /* movhps */
3589 case 0x117: /* movhpd */
3590 if (mod != 3) {
3591 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3592 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3593 } else {
3594 goto illegal_op;
3595 }
3596 break;
3597 case 0x71: /* shift mm, im */
3598 case 0x72:
3599 case 0x73:
3600 case 0x171: /* shift xmm, im */
3601 case 0x172:
3602 case 0x173:
3603 if (b1 >= 2) {
3604 goto illegal_op;
3605 }
3606 val = cpu_ldub_code(env, s->pc++);
3607 if (is_xmm) {
3608 gen_op_movl_T0_im(val);
3609 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
3610 gen_op_movl_T0_0();
3611 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(1)));
3612 op1_offset = offsetof(CPUX86State,xmm_t0);
3613 } else {
3614 gen_op_movl_T0_im(val);
3615 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(0)));
3616 gen_op_movl_T0_0();
3617 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(1)));
3618 op1_offset = offsetof(CPUX86State,mmx_t0);
3619 }
3620 sse_fn_epp = sse_op_table2[((b - 1) & 3) * 8 +
3621 (((modrm >> 3)) & 7)][b1];
3622 if (!sse_fn_epp) {
3623 goto illegal_op;
3624 }
3625 if (is_xmm) {
3626 rm = (modrm & 7) | REX_B(s);
3627 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3628 } else {
3629 rm = (modrm & 7);
3630 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3631 }
3632 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3633 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op1_offset);
3634 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
3635 break;
3636 case 0x050: /* movmskps */
3637 rm = (modrm & 7) | REX_B(s);
3638 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3639 offsetof(CPUX86State,xmm_regs[rm]));
3640 gen_helper_movmskps(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3641 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3642 gen_op_mov_reg_T0(OT_LONG, reg);
3643 break;
3644 case 0x150: /* movmskpd */
3645 rm = (modrm & 7) | REX_B(s);
3646 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3647 offsetof(CPUX86State,xmm_regs[rm]));
3648 gen_helper_movmskpd(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3649 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3650 gen_op_mov_reg_T0(OT_LONG, reg);
3651 break;
3652 case 0x02a: /* cvtpi2ps */
3653 case 0x12a: /* cvtpi2pd */
3654 gen_helper_enter_mmx(cpu_env);
3655 if (mod != 3) {
3656 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3657 op2_offset = offsetof(CPUX86State,mmx_t0);
3658 gen_ldq_env_A0(s->mem_index, op2_offset);
3659 } else {
3660 rm = (modrm & 7);
3661 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3662 }
3663 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3664 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3665 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3666 switch(b >> 8) {
3667 case 0x0:
3668 gen_helper_cvtpi2ps(cpu_env, cpu_ptr0, cpu_ptr1);
3669 break;
3670 default:
3671 case 0x1:
3672 gen_helper_cvtpi2pd(cpu_env, cpu_ptr0, cpu_ptr1);
3673 break;
3674 }
3675 break;
3676 case 0x22a: /* cvtsi2ss */
3677 case 0x32a: /* cvtsi2sd */
3678 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3679 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3680 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3681 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3682 if (ot == OT_LONG) {
3683 SSEFunc_0_epi sse_fn_epi = sse_op_table3ai[(b >> 8) & 1];
3684 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3685 sse_fn_epi(cpu_env, cpu_ptr0, cpu_tmp2_i32);
3686 } else {
3687 #ifdef TARGET_X86_64
3688 SSEFunc_0_epl sse_fn_epl = sse_op_table3aq[(b >> 8) & 1];
3689 sse_fn_epl(cpu_env, cpu_ptr0, cpu_T[0]);
3690 #else
3691 goto illegal_op;
3692 #endif
3693 }
3694 break;
3695 case 0x02c: /* cvttps2pi */
3696 case 0x12c: /* cvttpd2pi */
3697 case 0x02d: /* cvtps2pi */
3698 case 0x12d: /* cvtpd2pi */
3699 gen_helper_enter_mmx(cpu_env);
3700 if (mod != 3) {
3701 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3702 op2_offset = offsetof(CPUX86State,xmm_t0);
3703 gen_ldo_env_A0(s->mem_index, op2_offset);
3704 } else {
3705 rm = (modrm & 7) | REX_B(s);
3706 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3707 }
3708 op1_offset = offsetof(CPUX86State,fpregs[reg & 7].mmx);
3709 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3710 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3711 switch(b) {
3712 case 0x02c:
3713 gen_helper_cvttps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3714 break;
3715 case 0x12c:
3716 gen_helper_cvttpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3717 break;
3718 case 0x02d:
3719 gen_helper_cvtps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3720 break;
3721 case 0x12d:
3722 gen_helper_cvtpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3723 break;
3724 }
3725 break;
3726 case 0x22c: /* cvttss2si */
3727 case 0x32c: /* cvttsd2si */
3728 case 0x22d: /* cvtss2si */
3729 case 0x32d: /* cvtsd2si */
3730 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3731 if (mod != 3) {
3732 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3733 if ((b >> 8) & 1) {
3734 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_t0.XMM_Q(0)));
3735 } else {
3736 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
3737 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
3738 }
3739 op2_offset = offsetof(CPUX86State,xmm_t0);
3740 } else {
3741 rm = (modrm & 7) | REX_B(s);
3742 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3743 }
3744 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3745 if (ot == OT_LONG) {
3746 SSEFunc_i_ep sse_fn_i_ep =
3747 sse_op_table3bi[((b >> 7) & 2) | (b & 1)];
3748 sse_fn_i_ep(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3749 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3750 } else {
3751 #ifdef TARGET_X86_64
3752 SSEFunc_l_ep sse_fn_l_ep =
3753 sse_op_table3bq[((b >> 7) & 2) | (b & 1)];
3754 sse_fn_l_ep(cpu_T[0], cpu_env, cpu_ptr0);
3755 #else
3756 goto illegal_op;
3757 #endif
3758 }
3759 gen_op_mov_reg_T0(ot, reg);
3760 break;
3761 case 0xc4: /* pinsrw */
3762 case 0x1c4:
3763 s->rip_offset = 1;
3764 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
3765 val = cpu_ldub_code(env, s->pc++);
3766 if (b1) {
3767 val &= 7;
3768 tcg_gen_st16_tl(cpu_T[0], cpu_env,
3769 offsetof(CPUX86State,xmm_regs[reg].XMM_W(val)));
3770 } else {
3771 val &= 3;
3772 tcg_gen_st16_tl(cpu_T[0], cpu_env,
3773 offsetof(CPUX86State,fpregs[reg].mmx.MMX_W(val)));
3774 }
3775 break;
3776 case 0xc5: /* pextrw */
3777 case 0x1c5:
3778 if (mod != 3)
3779 goto illegal_op;
3780 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3781 val = cpu_ldub_code(env, s->pc++);
3782 if (b1) {
3783 val &= 7;
3784 rm = (modrm & 7) | REX_B(s);
3785 tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3786 offsetof(CPUX86State,xmm_regs[rm].XMM_W(val)));
3787 } else {
3788 val &= 3;
3789 rm = (modrm & 7);
3790 tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3791 offsetof(CPUX86State,fpregs[rm].mmx.MMX_W(val)));
3792 }
3793 reg = ((modrm >> 3) & 7) | rex_r;
3794 gen_op_mov_reg_T0(ot, reg);
3795 break;
3796 case 0x1d6: /* movq ea, xmm */
3797 if (mod != 3) {
3798 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3799 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3800 } else {
3801 rm = (modrm & 7) | REX_B(s);
3802 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
3803 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3804 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
3805 }
3806 break;
3807 case 0x2d6: /* movq2dq */
3808 gen_helper_enter_mmx(cpu_env);
3809 rm = (modrm & 7);
3810 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3811 offsetof(CPUX86State,fpregs[rm].mmx));
3812 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3813 break;
3814 case 0x3d6: /* movdq2q */
3815 gen_helper_enter_mmx(cpu_env);
3816 rm = (modrm & 7) | REX_B(s);
3817 gen_op_movq(offsetof(CPUX86State,fpregs[reg & 7].mmx),
3818 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3819 break;
3820 case 0xd7: /* pmovmskb */
3821 case 0x1d7:
3822 if (mod != 3)
3823 goto illegal_op;
3824 if (b1) {
3825 rm = (modrm & 7) | REX_B(s);
3826 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[rm]));
3827 gen_helper_pmovmskb_xmm(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3828 } else {
3829 rm = (modrm & 7);
3830 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,fpregs[rm].mmx));
3831 gen_helper_pmovmskb_mmx(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3832 }
3833 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3834 reg = ((modrm >> 3) & 7) | rex_r;
3835 gen_op_mov_reg_T0(OT_LONG, reg);
3836 break;
3837 case 0x138:
3838 if (s->prefix & PREFIX_REPNZ)
3839 goto crc32;
3840 case 0x038:
3841 b = modrm;
3842 modrm = cpu_ldub_code(env, s->pc++);
3843 rm = modrm & 7;
3844 reg = ((modrm >> 3) & 7) | rex_r;
3845 mod = (modrm >> 6) & 3;
3846 if (b1 >= 2) {
3847 goto illegal_op;
3848 }
3849
3850 sse_fn_epp = sse_op_table6[b].op[b1];
3851 if (!sse_fn_epp) {
3852 goto illegal_op;
3853 }
3854 if (!(s->cpuid_ext_features & sse_op_table6[b].ext_mask))
3855 goto illegal_op;
3856
3857 if (b1) {
3858 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3859 if (mod == 3) {
3860 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
3861 } else {
3862 op2_offset = offsetof(CPUX86State,xmm_t0);
3863 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3864 switch (b) {
3865 case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
3866 case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
3867 case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
3868 gen_ldq_env_A0(s->mem_index, op2_offset +
3869 offsetof(XMMReg, XMM_Q(0)));
3870 break;
3871 case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
3872 case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
3873 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0,
3874 (s->mem_index >> 2) - 1);
3875 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0);
3876 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, op2_offset +
3877 offsetof(XMMReg, XMM_L(0)));
3878 break;
3879 case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
3880 tcg_gen_qemu_ld16u(cpu_tmp0, cpu_A0,
3881 (s->mem_index >> 2) - 1);
3882 tcg_gen_st16_tl(cpu_tmp0, cpu_env, op2_offset +
3883 offsetof(XMMReg, XMM_W(0)));
3884 break;
3885 case 0x2a: /* movntqda */
3886 gen_ldo_env_A0(s->mem_index, op1_offset);
3887 return;
3888 default:
3889 gen_ldo_env_A0(s->mem_index, op2_offset);
3890 }
3891 }
3892 } else {
3893 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
3894 if (mod == 3) {
3895 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3896 } else {
3897 op2_offset = offsetof(CPUX86State,mmx_t0);
3898 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3899 gen_ldq_env_A0(s->mem_index, op2_offset);
3900 }
3901 }
3902 if (sse_fn_epp == SSE_SPECIAL) {
3903 goto illegal_op;
3904 }
3905
3906 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3907 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3908 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
3909
3910 if (b == 0x17) {
3911 set_cc_op(s, CC_OP_EFLAGS);
3912 }
3913 break;
3914 case 0x338: /* crc32 */
3915 crc32:
3916 b = modrm;
3917 modrm = cpu_ldub_code(env, s->pc++);
3918 reg = ((modrm >> 3) & 7) | rex_r;
3919
3920 if (b != 0xf0 && b != 0xf1)
3921 goto illegal_op;
3922 if (!(s->cpuid_ext_features & CPUID_EXT_SSE42))
3923 goto illegal_op;
3924
3925 if (b == 0xf0)
3926 ot = OT_BYTE;
3927 else if (b == 0xf1 && s->dflag != 2)
3928 if (s->prefix & PREFIX_DATA)
3929 ot = OT_WORD;
3930 else
3931 ot = OT_LONG;
3932 else
3933 ot = OT_QUAD;
3934
3935 gen_op_mov_TN_reg(OT_LONG, 0, reg);
3936 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3937 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3938 gen_helper_crc32(cpu_T[0], cpu_tmp2_i32,
3939 cpu_T[0], tcg_const_i32(8 << ot));
3940
3941 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3942 gen_op_mov_reg_T0(ot, reg);
3943 break;
3944 case 0x03a:
3945 case 0x13a:
3946 b = modrm;
3947 modrm = cpu_ldub_code(env, s->pc++);
3948 rm = modrm & 7;
3949 reg = ((modrm >> 3) & 7) | rex_r;
3950 mod = (modrm >> 6) & 3;
3951 if (b1 >= 2) {
3952 goto illegal_op;
3953 }
3954
3955 sse_fn_eppi = sse_op_table7[b].op[b1];
3956 if (!sse_fn_eppi) {
3957 goto illegal_op;
3958 }
3959 if (!(s->cpuid_ext_features & sse_op_table7[b].ext_mask))
3960 goto illegal_op;
3961
3962 if (sse_fn_eppi == SSE_SPECIAL) {
3963 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3964 rm = (modrm & 7) | REX_B(s);
3965 if (mod != 3)
3966 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3967 reg = ((modrm >> 3) & 7) | rex_r;
3968 val = cpu_ldub_code(env, s->pc++);
3969 switch (b) {
3970 case 0x14: /* pextrb */
3971 tcg_gen_ld8u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3972 xmm_regs[reg].XMM_B(val & 15)));
3973 if (mod == 3)
3974 gen_op_mov_reg_T0(ot, rm);
3975 else
3976 tcg_gen_qemu_st8(cpu_T[0], cpu_A0,
3977 (s->mem_index >> 2) - 1);
3978 break;
3979 case 0x15: /* pextrw */
3980 tcg_gen_ld16u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3981 xmm_regs[reg].XMM_W(val & 7)));
3982 if (mod == 3)
3983 gen_op_mov_reg_T0(ot, rm);
3984 else
3985 tcg_gen_qemu_st16(cpu_T[0], cpu_A0,
3986 (s->mem_index >> 2) - 1);
3987 break;
3988 case 0x16:
3989 if (ot == OT_LONG) { /* pextrd */
3990 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
3991 offsetof(CPUX86State,
3992 xmm_regs[reg].XMM_L(val & 3)));
3993 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3994 if (mod == 3)
3995 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
3996 else
3997 tcg_gen_qemu_st32(cpu_T[0], cpu_A0,
3998 (s->mem_index >> 2) - 1);
3999 } else { /* pextrq */
4000 #ifdef TARGET_X86_64
4001 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
4002 offsetof(CPUX86State,
4003 xmm_regs[reg].XMM_Q(val & 1)));
4004 if (mod == 3)
4005 gen_op_mov_reg_v(ot, rm, cpu_tmp1_i64);
4006 else
4007 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
4008 (s->mem_index >> 2) - 1);
4009 #else
4010 goto illegal_op;
4011 #endif
4012 }
4013 break;
4014 case 0x17: /* extractps */
4015 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
4016 xmm_regs[reg].XMM_L(val & 3)));
4017 if (mod == 3)
4018 gen_op_mov_reg_T0(ot, rm);
4019 else
4020 tcg_gen_qemu_st32(cpu_T[0], cpu_A0,
4021 (s->mem_index >> 2) - 1);
4022 break;
4023 case 0x20: /* pinsrb */
4024 if (mod == 3)
4025 gen_op_mov_TN_reg(OT_LONG, 0, rm);
4026 else
4027 tcg_gen_qemu_ld8u(cpu_tmp0, cpu_A0,
4028 (s->mem_index >> 2) - 1);
4029 tcg_gen_st8_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State,
4030 xmm_regs[reg].XMM_B(val & 15)));
4031 break;
4032 case 0x21: /* insertps */
4033 if (mod == 3) {
4034 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
4035 offsetof(CPUX86State,xmm_regs[rm]
4036 .XMM_L((val >> 6) & 3)));
4037 } else {
4038 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0,
4039 (s->mem_index >> 2) - 1);
4040 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0);
4041 }
4042 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
4043 offsetof(CPUX86State,xmm_regs[reg]
4044 .XMM_L((val >> 4) & 3)));
4045 if ((val >> 0) & 1)
4046 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4047 cpu_env, offsetof(CPUX86State,
4048 xmm_regs[reg].XMM_L(0)));
4049 if ((val >> 1) & 1)
4050 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4051 cpu_env, offsetof(CPUX86State,
4052 xmm_regs[reg].XMM_L(1)));
4053 if ((val >> 2) & 1)
4054 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4055 cpu_env, offsetof(CPUX86State,
4056 xmm_regs[reg].XMM_L(2)));
4057 if ((val >> 3) & 1)
4058 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4059 cpu_env, offsetof(CPUX86State,
4060 xmm_regs[reg].XMM_L(3)));
4061 break;
4062 case 0x22:
4063 if (ot == OT_LONG) { /* pinsrd */
4064 if (mod == 3)
4065 gen_op_mov_v_reg(ot, cpu_tmp0, rm);
4066 else
4067 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0,
4068 (s->mem_index >> 2) - 1);
4069 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0);
4070 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
4071 offsetof(CPUX86State,
4072 xmm_regs[reg].XMM_L(val & 3)));
4073 } else { /* pinsrq */
4074 #ifdef TARGET_X86_64
4075 if (mod == 3)
4076 gen_op_mov_v_reg(ot, cpu_tmp1_i64, rm);
4077 else
4078 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
4079 (s->mem_index >> 2) - 1);
4080 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
4081 offsetof(CPUX86State,
4082 xmm_regs[reg].XMM_Q(val & 1)));
4083 #else
4084 goto illegal_op;
4085 #endif
4086 }
4087 break;
4088 }
4089 return;
4090 }
4091
4092 if (b1) {
4093 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
4094 if (mod == 3) {
4095 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
4096 } else {
4097 op2_offset = offsetof(CPUX86State,xmm_t0);
4098 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4099 gen_ldo_env_A0(s->mem_index, op2_offset);
4100 }
4101 } else {
4102 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
4103 if (mod == 3) {
4104 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
4105 } else {
4106 op2_offset = offsetof(CPUX86State,mmx_t0);
4107 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4108 gen_ldq_env_A0(s->mem_index, op2_offset);
4109 }
4110 }
4111 val = cpu_ldub_code(env, s->pc++);
4112
4113 if ((b & 0xfc) == 0x60) { /* pcmpXstrX */
4114 set_cc_op(s, CC_OP_EFLAGS);
4115
4116 if (s->dflag == 2)
4117 /* The helper must use entire 64-bit gp registers */
4118 val |= 1 << 8;
4119 }
4120
4121 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4122 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4123 sse_fn_eppi(cpu_env, cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
4124 break;
4125 default:
4126 goto illegal_op;
4127 }
4128 } else {
4129 /* generic MMX or SSE operation */
4130 switch(b) {
4131 case 0x70: /* pshufx insn */
4132 case 0xc6: /* pshufx insn */
4133 case 0xc2: /* compare insns */
4134 s->rip_offset = 1;
4135 break;
4136 default:
4137 break;
4138 }
4139 if (is_xmm) {
4140 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
4141 if (mod != 3) {
4142 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4143 op2_offset = offsetof(CPUX86State,xmm_t0);
4144 if (b1 >= 2 && ((b >= 0x50 && b <= 0x5f && b != 0x5b) ||
4145 b == 0xc2)) {
4146 /* specific case for SSE single instructions */
4147 if (b1 == 2) {
4148 /* 32 bit access */
4149 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
4150 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
4151 } else {
4152 /* 64 bit access */
4153 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_t0.XMM_D(0)));
4154 }
4155 } else {
4156 gen_ldo_env_A0(s->mem_index, op2_offset);
4157 }
4158 } else {
4159 rm = (modrm & 7) | REX_B(s);
4160 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
4161 }
4162 } else {
4163 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
4164 if (mod != 3) {
4165 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4166 op2_offset = offsetof(CPUX86State,mmx_t0);
4167 gen_ldq_env_A0(s->mem_index, op2_offset);
4168 } else {
4169 rm = (modrm & 7);
4170 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
4171 }
4172 }
4173 switch(b) {
4174 case 0x0f: /* 3DNow! data insns */
4175 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
4176 goto illegal_op;
4177 val = cpu_ldub_code(env, s->pc++);
4178 sse_fn_epp = sse_op_table5[val];
4179 if (!sse_fn_epp) {
4180 goto illegal_op;
4181 }
4182 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4183 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4184 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
4185 break;
4186 case 0x70: /* pshufx insn */
4187 case 0xc6: /* pshufx insn */
4188 val = cpu_ldub_code(env, s->pc++);
4189 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4190 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4191 /* XXX: introduce a new table? */
4192 sse_fn_ppi = (SSEFunc_0_ppi)sse_fn_epp;
4193 sse_fn_ppi(cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
4194 break;
4195 case 0xc2:
4196 /* compare insns */
4197 val = cpu_ldub_code(env, s->pc++);
4198 if (val >= 8)
4199 goto illegal_op;
4200 sse_fn_epp = sse_op_table4[val][b1];
4201
4202 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4203 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4204 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
4205 break;
4206 case 0xf7:
4207 /* maskmov : we must prepare A0 */
4208 if (mod != 3)
4209 goto illegal_op;
4210 #ifdef TARGET_X86_64
4211 if (s->aflag == 2) {
4212 gen_op_movq_A0_reg(R_EDI);
4213 } else
4214 #endif
4215 {
4216 gen_op_movl_A0_reg(R_EDI);
4217 if (s->aflag == 0)
4218 gen_op_andl_A0_ffff();
4219 }
4220 gen_add_A0_ds_seg(s);
4221
4222 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4223 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4224 /* XXX: introduce a new table? */
4225 sse_fn_eppt = (SSEFunc_0_eppt)sse_fn_epp;
4226 sse_fn_eppt(cpu_env, cpu_ptr0, cpu_ptr1, cpu_A0);
4227 break;
4228 default:
4229 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4230 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4231 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
4232 break;
4233 }
4234 if (b == 0x2e || b == 0x2f) {
4235 set_cc_op(s, CC_OP_EFLAGS);
4236 }
4237 }
4238 }
4239
4240 /* convert one instruction. s->is_jmp is set if the translation must
4241 be stopped. Return the next pc value */
4242 static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
4243 target_ulong pc_start)
4244 {
4245 int b, prefixes, aflag, dflag;
4246 int shift, ot;
4247 int modrm, reg, rm, mod, reg_addr, op, opreg, offset_addr, val;
4248 target_ulong next_eip, tval;
4249 int rex_w, rex_r;
4250
4251 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
4252 tcg_gen_debug_insn_start(pc_start);
4253 }
4254 s->pc = pc_start;
4255 prefixes = 0;
4256 aflag = s->code32;
4257 dflag = s->code32;
4258 s->override = -1;
4259 rex_w = -1;
4260 rex_r = 0;
4261 #ifdef TARGET_X86_64
4262 s->rex_x = 0;
4263 s->rex_b = 0;
4264 x86_64_hregs = 0;
4265 #endif
4266 s->rip_offset = 0; /* for relative ip address */
4267 next_byte:
4268 b = cpu_ldub_code(env, s->pc);
4269 s->pc++;
4270 /* Collect prefixes. */
4271 switch (b) {
4272 case 0xf3:
4273 prefixes |= PREFIX_REPZ;
4274 goto next_byte;
4275 case 0xf2:
4276 prefixes |= PREFIX_REPNZ;
4277 goto next_byte;
4278 case 0xf0:
4279 prefixes |= PREFIX_LOCK;
4280 goto next_byte;
4281 case 0x2e:
4282 s->override = R_CS;
4283 goto next_byte;
4284 case 0x36:
4285 s->override = R_SS;
4286 goto next_byte;
4287 case 0x3e:
4288 s->override = R_DS;
4289 goto next_byte;
4290 case 0x26:
4291 s->override = R_ES;
4292 goto next_byte;
4293 case 0x64:
4294 s->override = R_FS;
4295 goto next_byte;
4296 case 0x65:
4297 s->override = R_GS;
4298 goto next_byte;
4299 case 0x66:
4300 prefixes |= PREFIX_DATA;
4301 goto next_byte;
4302 case 0x67:
4303 prefixes |= PREFIX_ADR;
4304 goto next_byte;
4305 #ifdef TARGET_X86_64
4306 case 0x40 ... 0x4f:
4307 if (CODE64(s)) {
4308 /* REX prefix */
4309 rex_w = (b >> 3) & 1;
4310 rex_r = (b & 0x4) << 1;
4311 s->rex_x = (b & 0x2) << 2;
4312 REX_B(s) = (b & 0x1) << 3;
4313 x86_64_hregs = 1; /* select uniform byte register addressing */
4314 goto next_byte;
4315 }
4316 break;
4317 #endif
4318 }
4319
4320 /* Post-process prefixes. */
4321 if (prefixes & PREFIX_DATA) {
4322 dflag ^= 1;
4323 }
4324 if (prefixes & PREFIX_ADR) {
4325 aflag ^= 1;
4326 }
4327 #ifdef TARGET_X86_64
4328 if (CODE64(s)) {
4329 if (rex_w == 1) {
4330 /* 0x66 is ignored if rex.w is set */
4331 dflag = 2;
4332 }
4333 if (!(prefixes & PREFIX_ADR)) {
4334 aflag = 2;
4335 }
4336 }
4337 #endif
4338
4339 s->prefix = prefixes;
4340 s->aflag = aflag;
4341 s->dflag = dflag;
4342
4343 /* lock generation */
4344 if (prefixes & PREFIX_LOCK)
4345 gen_helper_lock();
4346
4347 /* now check op code */
4348 reswitch:
4349 switch(b) {
4350 case 0x0f:
4351 /**************************/
4352 /* extended op code */
4353 b = cpu_ldub_code(env, s->pc++) | 0x100;
4354 goto reswitch;
4355
4356 /**************************/
4357 /* arith & logic */
4358 case 0x00 ... 0x05:
4359 case 0x08 ... 0x0d:
4360 case 0x10 ... 0x15:
4361 case 0x18 ... 0x1d:
4362 case 0x20 ... 0x25:
4363 case 0x28 ... 0x2d:
4364 case 0x30 ... 0x35:
4365 case 0x38 ... 0x3d:
4366 {
4367 int op, f, val;
4368 op = (b >> 3) & 7;
4369 f = (b >> 1) & 3;
4370
4371 if ((b & 1) == 0)
4372 ot = OT_BYTE;
4373 else
4374 ot = dflag + OT_WORD;
4375
4376 switch(f) {
4377 case 0: /* OP Ev, Gv */
4378 modrm = cpu_ldub_code(env, s->pc++);
4379 reg = ((modrm >> 3) & 7) | rex_r;
4380 mod = (modrm >> 6) & 3;
4381 rm = (modrm & 7) | REX_B(s);
4382 if (mod != 3) {
4383 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4384 opreg = OR_TMP0;
4385 } else if (op == OP_XORL && rm == reg) {
4386 xor_zero:
4387 /* xor reg, reg optimisation */
4388 gen_op_movl_T0_0();
4389 set_cc_op(s, CC_OP_LOGICB + ot);
4390 gen_op_mov_reg_T0(ot, reg);
4391 gen_op_update1_cc();
4392 break;
4393 } else {
4394 opreg = rm;
4395 }
4396 gen_op_mov_TN_reg(ot, 1, reg);
4397 gen_op(s, op, ot, opreg);
4398 break;
4399 case 1: /* OP Gv, Ev */
4400 modrm = cpu_ldub_code(env, s->pc++);
4401 mod = (modrm >> 6) & 3;
4402 reg = ((modrm >> 3) & 7) | rex_r;
4403 rm = (modrm & 7) | REX_B(s);
4404 if (mod != 3) {
4405 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4406 gen_op_ld_T1_A0(ot + s->mem_index);
4407 } else if (op == OP_XORL && rm == reg) {
4408 goto xor_zero;
4409 } else {
4410 gen_op_mov_TN_reg(ot, 1, rm);
4411 }
4412 gen_op(s, op, ot, reg);
4413 break;
4414 case 2: /* OP A, Iv */
4415 val = insn_get(env, s, ot);
4416 gen_op_movl_T1_im(val);
4417 gen_op(s, op, ot, OR_EAX);
4418 break;
4419 }
4420 }
4421 break;
4422
4423 case 0x82:
4424 if (CODE64(s))
4425 goto illegal_op;
4426 case 0x80: /* GRP1 */
4427 case 0x81:
4428 case 0x83:
4429 {
4430 int val;
4431
4432 if ((b & 1) == 0)
4433 ot = OT_BYTE;
4434 else
4435 ot = dflag + OT_WORD;
4436
4437 modrm = cpu_ldub_code(env, s->pc++);
4438 mod = (modrm >> 6) & 3;
4439 rm = (modrm & 7) | REX_B(s);
4440 op = (modrm >> 3) & 7;
4441
4442 if (mod != 3) {
4443 if (b == 0x83)
4444 s->rip_offset = 1;
4445 else
4446 s->rip_offset = insn_const_size(ot);
4447 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4448 opreg = OR_TMP0;
4449 } else {
4450 opreg = rm;
4451 }
4452
4453 switch(b) {
4454 default:
4455 case 0x80:
4456 case 0x81:
4457 case 0x82:
4458 val = insn_get(env, s, ot);
4459 break;
4460 case 0x83:
4461 val = (int8_t)insn_get(env, s, OT_BYTE);
4462 break;
4463 }
4464 gen_op_movl_T1_im(val);
4465 gen_op(s, op, ot, opreg);
4466 }
4467 break;
4468
4469 /**************************/
4470 /* inc, dec, and other misc arith */
4471 case 0x40 ... 0x47: /* inc Gv */
4472 ot = dflag ? OT_LONG : OT_WORD;
4473 gen_inc(s, ot, OR_EAX + (b & 7), 1);
4474 break;
4475 case 0x48 ... 0x4f: /* dec Gv */
4476 ot = dflag ? OT_LONG : OT_WORD;
4477 gen_inc(s, ot, OR_EAX + (b & 7), -1);
4478 break;
4479 case 0xf6: /* GRP3 */
4480 case 0xf7:
4481 if ((b & 1) == 0)
4482 ot = OT_BYTE;
4483 else
4484 ot = dflag + OT_WORD;
4485
4486 modrm = cpu_ldub_code(env, s->pc++);
4487 mod = (modrm >> 6) & 3;
4488 rm = (modrm & 7) | REX_B(s);
4489 op = (modrm >> 3) & 7;
4490 if (mod != 3) {
4491 if (op == 0)
4492 s->rip_offset = insn_const_size(ot);
4493 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4494 gen_op_ld_T0_A0(ot + s->mem_index);
4495 } else {
4496 gen_op_mov_TN_reg(ot, 0, rm);
4497 }
4498
4499 switch(op) {
4500 case 0: /* test */
4501 val = insn_get(env, s, ot);
4502 gen_op_movl_T1_im(val);
4503 gen_op_testl_T0_T1_cc();
4504 set_cc_op(s, CC_OP_LOGICB + ot);
4505 break;
4506 case 2: /* not */
4507 tcg_gen_not_tl(cpu_T[0], cpu_T[0]);
4508 if (mod != 3) {
4509 gen_op_st_T0_A0(ot + s->mem_index);
4510 } else {
4511 gen_op_mov_reg_T0(ot, rm);
4512 }
4513 break;
4514 case 3: /* neg */
4515 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
4516 if (mod != 3) {
4517 gen_op_st_T0_A0(ot + s->mem_index);
4518 } else {
4519 gen_op_mov_reg_T0(ot, rm);
4520 }
4521 gen_op_update_neg_cc();
4522 set_cc_op(s, CC_OP_SUBB + ot);
4523 break;
4524 case 4: /* mul */
4525 switch(ot) {
4526 case OT_BYTE:
4527 gen_op_mov_TN_reg(OT_BYTE, 1, R_EAX);
4528 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
4529 tcg_gen_ext8u_tl(cpu_T[1], cpu_T[1]);
4530 /* XXX: use 32 bit mul which could be faster */
4531 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4532 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4533 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4534 tcg_gen_andi_tl(cpu_cc_src, cpu_T[0], 0xff00);
4535 set_cc_op(s, CC_OP_MULB);
4536 break;
4537 case OT_WORD:
4538 gen_op_mov_TN_reg(OT_WORD, 1, R_EAX);
4539 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
4540 tcg_gen_ext16u_tl(cpu_T[1], cpu_T[1]);
4541 /* XXX: use 32 bit mul which could be faster */
4542 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4543 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4544 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4545 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
4546 gen_op_mov_reg_T0(OT_WORD, R_EDX);
4547 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4548 set_cc_op(s, CC_OP_MULW);
4549 break;
4550 default:
4551 case OT_LONG:
4552 #ifdef TARGET_X86_64
4553 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4554 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
4555 tcg_gen_ext32u_tl(cpu_T[1], cpu_T[1]);
4556 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4557 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4558 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4559 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 32);
4560 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4561 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4562 #else
4563 {
4564 TCGv_i64 t0, t1;
4565 t0 = tcg_temp_new_i64();
4566 t1 = tcg_temp_new_i64();
4567 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4568 tcg_gen_extu_i32_i64(t0, cpu_T[0]);
4569 tcg_gen_extu_i32_i64(t1, cpu_T[1]);
4570 tcg_gen_mul_i64(t0, t0, t1);
4571 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4572 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4573 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4574 tcg_gen_shri_i64(t0, t0, 32);
4575 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4576 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4577 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4578 }
4579 #endif
4580 set_cc_op(s, CC_OP_MULL);
4581 break;
4582 #ifdef TARGET_X86_64
4583 case OT_QUAD:
4584 gen_helper_mulq_EAX_T0(cpu_env, cpu_T[0]);
4585 set_cc_op(s, CC_OP_MULQ);
4586 break;
4587 #endif
4588 }
4589 break;
4590 case 5: /* imul */
4591 switch(ot) {
4592 case OT_BYTE:
4593 gen_op_mov_TN_reg(OT_BYTE, 1, R_EAX);
4594 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
4595 tcg_gen_ext8s_tl(cpu_T[1], cpu_T[1]);
4596 /* XXX: use 32 bit mul which could be faster */
4597 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4598 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4599 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4600 tcg_gen_ext8s_tl(cpu_tmp0, cpu_T[0]);
4601 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4602 set_cc_op(s, CC_OP_MULB);
4603 break;
4604 case OT_WORD:
4605 gen_op_mov_TN_reg(OT_WORD, 1, R_EAX);
4606 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4607 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
4608 /* XXX: use 32 bit mul which could be faster */
4609 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4610 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4611 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4612 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
4613 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4614 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
4615 gen_op_mov_reg_T0(OT_WORD, R_EDX);
4616 set_cc_op(s, CC_OP_MULW);
4617 break;
4618 default:
4619 case OT_LONG:
4620 #ifdef TARGET_X86_64
4621 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4622 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4623 tcg_gen_ext32s_tl(cpu_T[1], cpu_T[1]);
4624 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4625 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4626 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4627 tcg_gen_ext32s_tl(cpu_tmp0, cpu_T[0]);
4628 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4629 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 32);
4630 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4631 #else
4632 {
4633 TCGv_i64 t0, t1;
4634 t0 = tcg_temp_new_i64();
4635 t1 = tcg_temp_new_i64();
4636 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4637 tcg_gen_ext_i32_i64(t0, cpu_T[0]);
4638 tcg_gen_ext_i32_i64(t1, cpu_T[1]);
4639 tcg_gen_mul_i64(t0, t0, t1);
4640 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4641 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4642 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4643 tcg_gen_sari_tl(cpu_tmp0, cpu_T[0], 31);
4644 tcg_gen_shri_i64(t0, t0, 32);
4645 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4646 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4647 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4648 }
4649 #endif
4650 set_cc_op(s, CC_OP_MULL);
4651 break;
4652 #ifdef TARGET_X86_64
4653 case OT_QUAD:
4654 gen_helper_imulq_EAX_T0(cpu_env, cpu_T[0]);
4655 set_cc_op(s, CC_OP_MULQ);
4656 break;
4657 #endif
4658 }
4659 break;
4660 case 6: /* div */
4661 switch(ot) {
4662 case OT_BYTE:
4663 gen_jmp_im(pc_start - s->cs_base);
4664 gen_helper_divb_AL(cpu_env, cpu_T[0]);
4665 break;
4666 case OT_WORD:
4667 gen_jmp_im(pc_start - s->cs_base);
4668 gen_helper_divw_AX(cpu_env, cpu_T[0]);
4669 break;
4670 default:
4671 case OT_LONG:
4672 gen_jmp_im(pc_start - s->cs_base);
4673 gen_helper_divl_EAX(cpu_env, cpu_T[0]);
4674 break;
4675 #ifdef TARGET_X86_64
4676 case OT_QUAD:
4677 gen_jmp_im(pc_start - s->cs_base);
4678 gen_helper_divq_EAX(cpu_env, cpu_T[0]);
4679 break;
4680 #endif
4681 }
4682 break;
4683 case 7: /* idiv */
4684 switch(ot) {
4685 case OT_BYTE:
4686 gen_jmp_im(pc_start - s->cs_base);
4687 gen_helper_idivb_AL(cpu_env, cpu_T[0]);
4688 break;
4689 case OT_WORD:
4690 gen_jmp_im(pc_start - s->cs_base);
4691 gen_helper_idivw_AX(cpu_env, cpu_T[0]);
4692 break;
4693 default:
4694 case OT_LONG:
4695 gen_jmp_im(pc_start - s->cs_base);
4696 gen_helper_idivl_EAX(cpu_env, cpu_T[0]);
4697 break;
4698 #ifdef TARGET_X86_64
4699 case OT_QUAD:
4700 gen_jmp_im(pc_start - s->cs_base);
4701 gen_helper_idivq_EAX(cpu_env, cpu_T[0]);
4702 break;
4703 #endif
4704 }
4705 break;
4706 default:
4707 goto illegal_op;
4708 }
4709 break;
4710
4711 case 0xfe: /* GRP4 */
4712 case 0xff: /* GRP5 */
4713 if ((b & 1) == 0)
4714 ot = OT_BYTE;
4715 else
4716 ot = dflag + OT_WORD;
4717
4718 modrm = cpu_ldub_code(env, s->pc++);
4719 mod = (modrm >> 6) & 3;
4720 rm = (modrm & 7) | REX_B(s);
4721 op = (modrm >> 3) & 7;
4722 if (op >= 2 && b == 0xfe) {
4723 goto illegal_op;
4724 }
4725 if (CODE64(s)) {
4726 if (op == 2 || op == 4) {
4727 /* operand size for jumps is 64 bit */
4728 ot = OT_QUAD;
4729 } else if (op == 3 || op == 5) {
4730 ot = dflag ? OT_LONG + (rex_w == 1) : OT_WORD;
4731 } else if (op == 6) {
4732 /* default push size is 64 bit */
4733 ot = dflag ? OT_QUAD : OT_WORD;
4734 }
4735 }
4736 if (mod != 3) {
4737 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4738 if (op >= 2 && op != 3 && op != 5)
4739 gen_op_ld_T0_A0(ot + s->mem_index);
4740 } else {
4741 gen_op_mov_TN_reg(ot, 0, rm);
4742 }
4743
4744 switch(op) {
4745 case 0: /* inc Ev */
4746 if (mod != 3)
4747 opreg = OR_TMP0;
4748 else
4749 opreg = rm;
4750 gen_inc(s, ot, opreg, 1);
4751 break;
4752 case 1: /* dec Ev */
4753 if (mod != 3)
4754 opreg = OR_TMP0;
4755 else
4756 opreg = rm;
4757 gen_inc(s, ot, opreg, -1);
4758 break;
4759 case 2: /* call Ev */
4760 /* XXX: optimize if memory (no 'and' is necessary) */
4761 if (s->dflag == 0)
4762 gen_op_andl_T0_ffff();
4763 next_eip = s->pc - s->cs_base;
4764 gen_movtl_T1_im(next_eip);
4765 gen_push_T1(s);
4766 gen_op_jmp_T0();
4767 gen_eob(s);
4768 break;
4769 case 3: /* lcall Ev */
4770 gen_op_ld_T1_A0(ot + s->mem_index);
4771 gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
4772 gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
4773 do_lcall:
4774 if (s->pe && !s->vm86) {
4775 gen_update_cc_op(s);
4776 gen_jmp_im(pc_start - s->cs_base);
4777 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4778 gen_helper_lcall_protected(cpu_env, cpu_tmp2_i32, cpu_T[1],
4779 tcg_const_i32(dflag),
4780 tcg_const_i32(s->pc - pc_start));
4781 } else {
4782 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4783 gen_helper_lcall_real(cpu_env, cpu_tmp2_i32, cpu_T[1],
4784 tcg_const_i32(dflag),
4785 tcg_const_i32(s->pc - s->cs_base));
4786 }
4787 gen_eob(s);
4788 break;
4789 case 4: /* jmp Ev */
4790 if (s->dflag == 0)
4791 gen_op_andl_T0_ffff();
4792 gen_op_jmp_T0();
4793 gen_eob(s);
4794 break;
4795 case 5: /* ljmp Ev */
4796 gen_op_ld_T1_A0(ot + s->mem_index);
4797 gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
4798 gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
4799 do_ljmp:
4800 if (s->pe && !s->vm86) {
4801 gen_update_cc_op(s);
4802 gen_jmp_im(pc_start - s->cs_base);
4803 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4804 gen_helper_ljmp_protected(cpu_env, cpu_tmp2_i32, cpu_T[1],
4805 tcg_const_i32(s->pc - pc_start));
4806 } else {
4807 gen_op_movl_seg_T0_vm(R_CS);
4808 gen_op_movl_T0_T1();
4809 gen_op_jmp_T0();
4810 }
4811 gen_eob(s);
4812 break;
4813 case 6: /* push Ev */
4814 gen_push_T0(s);
4815 break;
4816 default:
4817 goto illegal_op;
4818 }
4819 break;
4820
4821 case 0x84: /* test Ev, Gv */
4822 case 0x85:
4823 if ((b & 1) == 0)
4824 ot = OT_BYTE;
4825 else
4826 ot = dflag + OT_WORD;
4827
4828 modrm = cpu_ldub_code(env, s->pc++);
4829 reg = ((modrm >> 3) & 7) | rex_r;
4830
4831 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4832 gen_op_mov_TN_reg(ot, 1, reg);
4833 gen_op_testl_T0_T1_cc();
4834 set_cc_op(s, CC_OP_LOGICB + ot);
4835 break;
4836
4837 case 0xa8: /* test eAX, Iv */
4838 case 0xa9:
4839 if ((b & 1) == 0)
4840 ot = OT_BYTE;
4841 else
4842 ot = dflag + OT_WORD;
4843 val = insn_get(env, s, ot);
4844
4845 gen_op_mov_TN_reg(ot, 0, OR_EAX);
4846 gen_op_movl_T1_im(val);
4847 gen_op_testl_T0_T1_cc();
4848 set_cc_op(s, CC_OP_LOGICB + ot);
4849 break;
4850
4851 case 0x98: /* CWDE/CBW */
4852 #ifdef TARGET_X86_64
4853 if (dflag == 2) {
4854 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
4855 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4856 gen_op_mov_reg_T0(OT_QUAD, R_EAX);
4857 } else
4858 #endif
4859 if (dflag == 1) {
4860 gen_op_mov_TN_reg(OT_WORD, 0, R_EAX);
4861 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4862 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4863 } else {
4864 gen_op_mov_TN_reg(OT_BYTE, 0, R_EAX);
4865 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
4866 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4867 }
4868 break;
4869 case 0x99: /* CDQ/CWD */
4870 #ifdef TARGET_X86_64
4871 if (dflag == 2) {
4872 gen_op_mov_TN_reg(OT_QUAD, 0, R_EAX);
4873 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 63);
4874 gen_op_mov_reg_T0(OT_QUAD, R_EDX);
4875 } else
4876 #endif
4877 if (dflag == 1) {
4878 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
4879 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4880 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 31);
4881 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4882 } else {
4883 gen_op_mov_TN_reg(OT_WORD, 0, R_EAX);
4884 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4885 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 15);
4886 gen_op_mov_reg_T0(OT_WORD, R_EDX);
4887 }
4888 break;
4889 case 0x1af: /* imul Gv, Ev */
4890 case 0x69: /* imul Gv, Ev, I */
4891 case 0x6b:
4892 ot = dflag + OT_WORD;
4893 modrm = cpu_ldub_code(env, s->pc++);
4894 reg = ((modrm >> 3) & 7) | rex_r;
4895 if (b == 0x69)
4896 s->rip_offset = insn_const_size(ot);
4897 else if (b == 0x6b)
4898 s->rip_offset = 1;
4899 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4900 if (b == 0x69) {
4901 val = insn_get(env, s, ot);
4902 gen_op_movl_T1_im(val);
4903 } else if (b == 0x6b) {
4904 val = (int8_t)insn_get(env, s, OT_BYTE);
4905 gen_op_movl_T1_im(val);
4906 } else {
4907 gen_op_mov_TN_reg(ot, 1, reg);
4908 }
4909
4910 #ifdef TARGET_X86_64
4911 if (ot == OT_QUAD) {
4912 gen_helper_imulq_T0_T1(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
4913 } else
4914 #endif
4915 if (ot == OT_LONG) {
4916 #ifdef TARGET_X86_64
4917 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4918 tcg_gen_ext32s_tl(cpu_T[1], cpu_T[1]);
4919 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4920 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4921 tcg_gen_ext32s_tl(cpu_tmp0, cpu_T[0]);
4922 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4923 #else
4924 {
4925 TCGv_i64 t0, t1;
4926 t0 = tcg_temp_new_i64();
4927 t1 = tcg_temp_new_i64();
4928 tcg_gen_ext_i32_i64(t0, cpu_T[0]);
4929 tcg_gen_ext_i32_i64(t1, cpu_T[1]);
4930 tcg_gen_mul_i64(t0, t0, t1);
4931 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4932 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4933 tcg_gen_sari_tl(cpu_tmp0, cpu_T[0], 31);
4934 tcg_gen_shri_i64(t0, t0, 32);
4935 tcg_gen_trunc_i64_i32(cpu_T[1], t0);
4936 tcg_gen_sub_tl(cpu_cc_src, cpu_T[1], cpu_tmp0);
4937 }
4938 #endif
4939 } else {
4940 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4941 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
4942 /* XXX: use 32 bit mul which could be faster */
4943 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4944 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4945 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
4946 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4947 }
4948 gen_op_mov_reg_T0(ot, reg);
4949 set_cc_op(s, CC_OP_MULB + ot);
4950 break;
4951 case 0x1c0:
4952 case 0x1c1: /* xadd Ev, Gv */
4953 if ((b & 1) == 0)
4954 ot = OT_BYTE;
4955 else
4956 ot = dflag + OT_WORD;
4957 modrm = cpu_ldub_code(env, s->pc++);
4958 reg = ((modrm >> 3) & 7) | rex_r;
4959 mod = (modrm >> 6) & 3;
4960 if (mod == 3) {
4961 rm = (modrm & 7) | REX_B(s);
4962 gen_op_mov_TN_reg(ot, 0, reg);
4963 gen_op_mov_TN_reg(ot, 1, rm);
4964 gen_op_addl_T0_T1();
4965 gen_op_mov_reg_T1(ot, reg);
4966 gen_op_mov_reg_T0(ot, rm);
4967 } else {
4968 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4969 gen_op_mov_TN_reg(ot, 0, reg);
4970 gen_op_ld_T1_A0(ot + s->mem_index);
4971 gen_op_addl_T0_T1();
4972 gen_op_st_T0_A0(ot + s->mem_index);
4973 gen_op_mov_reg_T1(ot, reg);
4974 }
4975 gen_op_update2_cc();
4976 set_cc_op(s, CC_OP_ADDB + ot);
4977 break;
4978 case 0x1b0:
4979 case 0x1b1: /* cmpxchg Ev, Gv */
4980 {
4981 int label1, label2;
4982 TCGv t0, t1, t2, a0;
4983
4984 if ((b & 1) == 0)
4985 ot = OT_BYTE;
4986 else
4987 ot = dflag + OT_WORD;
4988 modrm = cpu_ldub_code(env, s->pc++);
4989 reg = ((modrm >> 3) & 7) | rex_r;
4990 mod = (modrm >> 6) & 3;
4991 t0 = tcg_temp_local_new();
4992 t1 = tcg_temp_local_new();
4993 t2 = tcg_temp_local_new();
4994 a0 = tcg_temp_local_new();
4995 gen_op_mov_v_reg(ot, t1, reg);
4996 if (mod == 3) {
4997 rm = (modrm & 7) | REX_B(s);
4998 gen_op_mov_v_reg(ot, t0, rm);
4999 } else {
5000 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5001 tcg_gen_mov_tl(a0, cpu_A0);
5002 gen_op_ld_v(ot + s->mem_index, t0, a0);
5003 rm = 0; /* avoid warning */
5004 }
5005 label1 = gen_new_label();
5006 tcg_gen_mov_tl(t2, cpu_regs[R_EAX]);
5007 gen_extu(ot, t0);
5008 gen_extu(ot, t2);
5009 tcg_gen_brcond_tl(TCG_COND_EQ, t2, t0, label1);
5010 label2 = gen_new_label();
5011 if (mod == 3) {
5012 gen_op_mov_reg_v(ot, R_EAX, t0);
5013 tcg_gen_br(label2);
5014 gen_set_label(label1);
5015 gen_op_mov_reg_v(ot, rm, t1);
5016 } else {
5017 /* perform no-op store cycle like physical cpu; must be
5018 before changing accumulator to ensure idempotency if
5019 the store faults and the instruction is restarted */
5020 gen_op_st_v(ot + s->mem_index, t0, a0);
5021 gen_op_mov_reg_v(ot, R_EAX, t0);
5022 tcg_gen_br(label2);
5023 gen_set_label(label1);
5024 gen_op_st_v(ot + s->mem_index, t1, a0);
5025 }
5026 gen_set_label(label2);
5027 tcg_gen_mov_tl(cpu_cc_src, t0);
5028 tcg_gen_mov_tl(cpu_cc_srcT, t2);
5029 tcg_gen_sub_tl(cpu_cc_dst, t2, t0);
5030 set_cc_op(s, CC_OP_SUBB + ot);
5031 tcg_temp_free(t0);
5032 tcg_temp_free(t1);
5033 tcg_temp_free(t2);
5034 tcg_temp_free(a0);
5035 }
5036 break;
5037 case 0x1c7: /* cmpxchg8b */
5038 modrm = cpu_ldub_code(env, s->pc++);
5039 mod = (modrm >> 6) & 3;
5040 if ((mod == 3) || ((modrm & 0x38) != 0x8))
5041 goto illegal_op;
5042 #ifdef TARGET_X86_64
5043 if (dflag == 2) {
5044 if (!(s->cpuid_ext_features & CPUID_EXT_CX16))
5045 goto illegal_op;
5046 gen_jmp_im(pc_start - s->cs_base);
5047 gen_update_cc_op(s);
5048 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5049 gen_helper_cmpxchg16b(cpu_env, cpu_A0);
5050 } else
5051 #endif
5052 {
5053 if (!(s->cpuid_features & CPUID_CX8))
5054 goto illegal_op;
5055 gen_jmp_im(pc_start - s->cs_base);
5056 gen_update_cc_op(s);
5057 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5058 gen_helper_cmpxchg8b(cpu_env, cpu_A0);
5059 }
5060 set_cc_op(s, CC_OP_EFLAGS);
5061 break;
5062
5063 /**************************/
5064 /* push/pop */
5065 case 0x50 ... 0x57: /* push */
5066 gen_op_mov_TN_reg(OT_LONG, 0, (b & 7) | REX_B(s));
5067 gen_push_T0(s);
5068 break;
5069 case 0x58 ... 0x5f: /* pop */
5070 if (CODE64(s)) {
5071 ot = dflag ? OT_QUAD : OT_WORD;
5072 } else {
5073 ot = dflag + OT_WORD;
5074 }
5075 gen_pop_T0(s);
5076 /* NOTE: order is important for pop %sp */
5077 gen_pop_update(s);
5078 gen_op_mov_reg_T0(ot, (b & 7) | REX_B(s));
5079 break;
5080 case 0x60: /* pusha */
5081 if (CODE64(s))
5082 goto illegal_op;
5083 gen_pusha(s);
5084 break;
5085 case 0x61: /* popa */
5086 if (CODE64(s))
5087 goto illegal_op;
5088 gen_popa(s);
5089 break;
5090 case 0x68: /* push Iv */
5091 case 0x6a:
5092 if (CODE64(s)) {
5093 ot = dflag ? OT_QUAD : OT_WORD;
5094 } else {
5095 ot = dflag + OT_WORD;
5096 }
5097 if (b == 0x68)
5098 val = insn_get(env, s, ot);
5099 else
5100 val = (int8_t)insn_get(env, s, OT_BYTE);
5101 gen_op_movl_T0_im(val);
5102 gen_push_T0(s);
5103 break;
5104 case 0x8f: /* pop Ev */
5105 if (CODE64(s)) {
5106 ot = dflag ? OT_QUAD : OT_WORD;
5107 } else {
5108 ot = dflag + OT_WORD;
5109 }
5110 modrm = cpu_ldub_code(env, s->pc++);
5111 mod = (modrm >> 6) & 3;
5112 gen_pop_T0(s);
5113 if (mod == 3) {
5114 /* NOTE: order is important for pop %sp */
5115 gen_pop_update(s);
5116 rm = (modrm & 7) | REX_B(s);
5117 gen_op_mov_reg_T0(ot, rm);
5118 } else {
5119 /* NOTE: order is important too for MMU exceptions */
5120 s->popl_esp_hack = 1 << ot;
5121 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
5122 s->popl_esp_hack = 0;
5123 gen_pop_update(s);
5124 }
5125 break;
5126 case 0xc8: /* enter */
5127 {
5128 int level;
5129 val = cpu_lduw_code(env, s->pc);
5130 s->pc += 2;
5131 level = cpu_ldub_code(env, s->pc++);
5132 gen_enter(s, val, level);
5133 }
5134 break;
5135 case 0xc9: /* leave */
5136 /* XXX: exception not precise (ESP is updated before potential exception) */
5137 if (CODE64(s)) {
5138 gen_op_mov_TN_reg(OT_QUAD, 0, R_EBP);
5139 gen_op_mov_reg_T0(OT_QUAD, R_ESP);
5140 } else if (s->ss32) {
5141 gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
5142 gen_op_mov_reg_T0(OT_LONG, R_ESP);
5143 } else {
5144 gen_op_mov_TN_reg(OT_WORD, 0, R_EBP);
5145 gen_op_mov_reg_T0(OT_WORD, R_ESP);
5146 }
5147 gen_pop_T0(s);
5148 if (CODE64(s)) {
5149 ot = dflag ? OT_QUAD : OT_WORD;
5150 } else {
5151 ot = dflag + OT_WORD;
5152 }
5153 gen_op_mov_reg_T0(ot, R_EBP);
5154 gen_pop_update(s);
5155 break;
5156 case 0x06: /* push es */
5157 case 0x0e: /* push cs */
5158 case 0x16: /* push ss */
5159 case 0x1e: /* push ds */
5160 if (CODE64(s))
5161 goto illegal_op;
5162 gen_op_movl_T0_seg(b >> 3);
5163 gen_push_T0(s);
5164 break;
5165 case 0x1a0: /* push fs */
5166 case 0x1a8: /* push gs */
5167 gen_op_movl_T0_seg((b >> 3) & 7);
5168 gen_push_T0(s);
5169 break;
5170 case 0x07: /* pop es */
5171 case 0x17: /* pop ss */
5172 case 0x1f: /* pop ds */
5173 if (CODE64(s))
5174 goto illegal_op;
5175 reg = b >> 3;
5176 gen_pop_T0(s);
5177 gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
5178 gen_pop_update(s);
5179 if (reg == R_SS) {
5180 /* if reg == SS, inhibit interrupts/trace. */
5181 /* If several instructions disable interrupts, only the
5182 _first_ does it */
5183 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
5184 gen_helper_set_inhibit_irq(cpu_env);
5185 s->tf = 0;
5186 }
5187 if (s->is_jmp) {
5188 gen_jmp_im(s->pc - s->cs_base);
5189 gen_eob(s);
5190 }
5191 break;
5192 case 0x1a1: /* pop fs */
5193 case 0x1a9: /* pop gs */
5194 gen_pop_T0(s);
5195 gen_movl_seg_T0(s, (b >> 3) & 7, pc_start - s->cs_base);
5196 gen_pop_update(s);
5197 if (s->is_jmp) {
5198 gen_jmp_im(s->pc - s->cs_base);
5199 gen_eob(s);
5200 }
5201 break;
5202
5203 /**************************/
5204 /* mov */
5205 case 0x88:
5206 case 0x89: /* mov Gv, Ev */
5207 if ((b & 1) == 0)
5208 ot = OT_BYTE;
5209 else
5210 ot = dflag + OT_WORD;
5211 modrm = cpu_ldub_code(env, s->pc++);
5212 reg = ((modrm >> 3) & 7) | rex_r;
5213
5214 /* generate a generic store */
5215 gen_ldst_modrm(env, s, modrm, ot, reg, 1);
5216 break;
5217 case 0xc6:
5218 case 0xc7: /* mov Ev, Iv */
5219 if ((b & 1) == 0)
5220 ot = OT_BYTE;
5221 else
5222 ot = dflag + OT_WORD;
5223 modrm = cpu_ldub_code(env, s->pc++);
5224 mod = (modrm >> 6) & 3;
5225 if (mod != 3) {
5226 s->rip_offset = insn_const_size(ot);
5227 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5228 }
5229 val = insn_get(env, s, ot);
5230 gen_op_movl_T0_im(val);
5231 if (mod != 3)
5232 gen_op_st_T0_A0(ot + s->mem_index);
5233 else
5234 gen_op_mov_reg_T0(ot, (modrm & 7) | REX_B(s));
5235 break;
5236 case 0x8a:
5237 case 0x8b: /* mov Ev, Gv */
5238 if ((b & 1) == 0)
5239 ot = OT_BYTE;
5240 else
5241 ot = OT_WORD + dflag;
5242 modrm = cpu_ldub_code(env, s->pc++);
5243 reg = ((modrm >> 3) & 7) | rex_r;
5244
5245 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
5246 gen_op_mov_reg_T0(ot, reg);
5247 break;
5248 case 0x8e: /* mov seg, Gv */
5249 modrm = cpu_ldub_code(env, s->pc++);
5250 reg = (modrm >> 3) & 7;
5251 if (reg >= 6 || reg == R_CS)
5252 goto illegal_op;
5253 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
5254 gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
5255 if (reg == R_SS) {
5256 /* if reg == SS, inhibit interrupts/trace */
5257 /* If several instructions disable interrupts, only the
5258 _first_ does it */
5259 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
5260 gen_helper_set_inhibit_irq(cpu_env);
5261 s->tf = 0;
5262 }
5263 if (s->is_jmp) {
5264 gen_jmp_im(s->pc - s->cs_base);
5265 gen_eob(s);
5266 }
5267 break;
5268 case 0x8c: /* mov Gv, seg */
5269 modrm = cpu_ldub_code(env, s->pc++);
5270 reg = (modrm >> 3) & 7;
5271 mod = (modrm >> 6) & 3;
5272 if (reg >= 6)
5273 goto illegal_op;
5274 gen_op_movl_T0_seg(reg);
5275 if (mod == 3)
5276 ot = OT_WORD + dflag;
5277 else
5278 ot = OT_WORD;
5279 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
5280 break;
5281
5282 case 0x1b6: /* movzbS Gv, Eb */
5283 case 0x1b7: /* movzwS Gv, Eb */
5284 case 0x1be: /* movsbS Gv, Eb */
5285 case 0x1bf: /* movswS Gv, Eb */
5286 {
5287 int d_ot;
5288 /* d_ot is the size of destination */
5289 d_ot = dflag + OT_WORD;
5290 /* ot is the size of source */
5291 ot = (b & 1) + OT_BYTE;
5292 modrm = cpu_ldub_code(env, s->pc++);
5293 reg = ((modrm >> 3) & 7) | rex_r;
5294 mod = (modrm >> 6) & 3;
5295 rm = (modrm & 7) | REX_B(s);
5296
5297 if (mod == 3) {
5298 gen_op_mov_TN_reg(ot, 0, rm);
5299 switch(ot | (b & 8)) {
5300 case OT_BYTE:
5301 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
5302 break;
5303 case OT_BYTE | 8:
5304 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
5305 break;
5306 case OT_WORD:
5307 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
5308 break;
5309 default:
5310 case OT_WORD | 8:
5311 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5312 break;
5313 }
5314 gen_op_mov_reg_T0(d_ot, reg);
5315 } else {
5316 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5317 if (b & 8) {
5318 gen_op_lds_T0_A0(ot + s->mem_index);
5319 } else {
5320 gen_op_ldu_T0_A0(ot + s->mem_index);
5321 }
5322 gen_op_mov_reg_T0(d_ot, reg);
5323 }
5324 }
5325 break;
5326
5327 case 0x8d: /* lea */
5328 ot = dflag + OT_WORD;
5329 modrm = cpu_ldub_code(env, s->pc++);
5330 mod = (modrm >> 6) & 3;
5331 if (mod == 3)
5332 goto illegal_op;
5333 reg = ((modrm >> 3) & 7) | rex_r;
5334 /* we must ensure that no segment is added */
5335 s->override = -1;
5336 val = s->addseg;
5337 s->addseg = 0;
5338 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5339 s->addseg = val;
5340 gen_op_mov_reg_A0(ot - OT_WORD, reg);
5341 break;
5342
5343 case 0xa0: /* mov EAX, Ov */
5344 case 0xa1:
5345 case 0xa2: /* mov Ov, EAX */
5346 case 0xa3:
5347 {
5348 target_ulong offset_addr;
5349
5350 if ((b & 1) == 0)
5351 ot = OT_BYTE;
5352 else
5353 ot = dflag + OT_WORD;
5354 #ifdef TARGET_X86_64
5355 if (s->aflag == 2) {
5356 offset_addr = cpu_ldq_code(env, s->pc);
5357 s->pc += 8;
5358 gen_op_movq_A0_im(offset_addr);
5359 } else
5360 #endif
5361 {
5362 if (s->aflag) {
5363 offset_addr = insn_get(env, s, OT_LONG);
5364 } else {
5365 offset_addr = insn_get(env, s, OT_WORD);
5366 }
5367 gen_op_movl_A0_im(offset_addr);
5368 }
5369 gen_add_A0_ds_seg(s);
5370 if ((b & 2) == 0) {
5371 gen_op_ld_T0_A0(ot + s->mem_index);
5372 gen_op_mov_reg_T0(ot, R_EAX);
5373 } else {
5374 gen_op_mov_TN_reg(ot, 0, R_EAX);
5375 gen_op_st_T0_A0(ot + s->mem_index);
5376 }
5377 }
5378 break;
5379 case 0xd7: /* xlat */
5380 #ifdef TARGET_X86_64
5381 if (s->aflag == 2) {
5382 gen_op_movq_A0_reg(R_EBX);
5383 gen_op_mov_TN_reg(OT_QUAD, 0, R_EAX);
5384 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xff);
5385 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]);
5386 } else
5387 #endif
5388 {
5389 gen_op_movl_A0_reg(R_EBX);
5390 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
5391 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xff);
5392 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]);
5393 if (s->aflag == 0)
5394 gen_op_andl_A0_ffff();
5395 else
5396 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
5397 }
5398 gen_add_A0_ds_seg(s);
5399 gen_op_ldu_T0_A0(OT_BYTE + s->mem_index);
5400 gen_op_mov_reg_T0(OT_BYTE, R_EAX);
5401 break;
5402 case 0xb0 ... 0xb7: /* mov R, Ib */
5403 val = insn_get(env, s, OT_BYTE);
5404 gen_op_movl_T0_im(val);
5405 gen_op_mov_reg_T0(OT_BYTE, (b & 7) | REX_B(s));
5406 break;
5407 case 0xb8 ... 0xbf: /* mov R, Iv */
5408 #ifdef TARGET_X86_64
5409 if (dflag == 2) {
5410 uint64_t tmp;
5411 /* 64 bit case */
5412 tmp = cpu_ldq_code(env, s->pc);
5413 s->pc += 8;
5414 reg = (b & 7) | REX_B(s);
5415 gen_movtl_T0_im(tmp);
5416 gen_op_mov_reg_T0(OT_QUAD, reg);
5417 } else
5418 #endif
5419 {
5420 ot = dflag ? OT_LONG : OT_WORD;
5421 val = insn_get(env, s, ot);
5422 reg = (b & 7) | REX_B(s);
5423 gen_op_movl_T0_im(val);
5424 gen_op_mov_reg_T0(ot, reg);
5425 }
5426 break;
5427
5428 case 0x91 ... 0x97: /* xchg R, EAX */
5429 do_xchg_reg_eax:
5430 ot = dflag + OT_WORD;
5431 reg = (b & 7) | REX_B(s);
5432 rm = R_EAX;
5433 goto do_xchg_reg;
5434 case 0x86:
5435 case 0x87: /* xchg Ev, Gv */
5436 if ((b & 1) == 0)
5437 ot = OT_BYTE;
5438 else
5439 ot = dflag + OT_WORD;
5440 modrm = cpu_ldub_code(env, s->pc++);
5441 reg = ((modrm >> 3) & 7) | rex_r;
5442 mod = (modrm >> 6) & 3;
5443 if (mod == 3) {
5444 rm = (modrm & 7) | REX_B(s);
5445 do_xchg_reg:
5446 gen_op_mov_TN_reg(ot, 0, reg);
5447 gen_op_mov_TN_reg(ot, 1, rm);
5448 gen_op_mov_reg_T0(ot, rm);
5449 gen_op_mov_reg_T1(ot, reg);
5450 } else {
5451 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5452 gen_op_mov_TN_reg(ot, 0, reg);
5453 /* for xchg, lock is implicit */
5454 if (!(prefixes & PREFIX_LOCK))
5455 gen_helper_lock();
5456 gen_op_ld_T1_A0(ot + s->mem_index);
5457 gen_op_st_T0_A0(ot + s->mem_index);
5458 if (!(prefixes & PREFIX_LOCK))
5459 gen_helper_unlock();
5460 gen_op_mov_reg_T1(ot, reg);
5461 }
5462 break;
5463 case 0xc4: /* les Gv */
5464 if (CODE64(s))
5465 goto illegal_op;
5466 op = R_ES;
5467 goto do_lxx;
5468 case 0xc5: /* lds Gv */
5469 if (CODE64(s))
5470 goto illegal_op;
5471 op = R_DS;
5472 goto do_lxx;
5473 case 0x1b2: /* lss Gv */
5474 op = R_SS;
5475 goto do_lxx;
5476 case 0x1b4: /* lfs Gv */
5477 op = R_FS;
5478 goto do_lxx;
5479 case 0x1b5: /* lgs Gv */
5480 op = R_GS;
5481 do_lxx:
5482 ot = dflag ? OT_LONG : OT_WORD;
5483 modrm = cpu_ldub_code(env, s->pc++);
5484 reg = ((modrm >> 3) & 7) | rex_r;
5485 mod = (modrm >> 6) & 3;
5486 if (mod == 3)
5487 goto illegal_op;
5488 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5489 gen_op_ld_T1_A0(ot + s->mem_index);
5490 gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
5491 /* load the segment first to handle exceptions properly */
5492 gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
5493 gen_movl_seg_T0(s, op, pc_start - s->cs_base);
5494 /* then put the data */
5495 gen_op_mov_reg_T1(ot, reg);
5496 if (s->is_jmp) {
5497 gen_jmp_im(s->pc - s->cs_base);
5498 gen_eob(s);
5499 }
5500 break;
5501
5502 /************************/
5503 /* shifts */
5504 case 0xc0:
5505 case 0xc1:
5506 /* shift Ev,Ib */
5507 shift = 2;
5508 grp2:
5509 {
5510 if ((b & 1) == 0)
5511 ot = OT_BYTE;
5512 else
5513 ot = dflag + OT_WORD;
5514
5515 modrm = cpu_ldub_code(env, s->pc++);
5516 mod = (modrm >> 6) & 3;
5517 op = (modrm >> 3) & 7;
5518
5519 if (mod != 3) {
5520 if (shift == 2) {
5521 s->rip_offset = 1;
5522 }
5523 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5524 opreg = OR_TMP0;
5525 } else {
5526 opreg = (modrm & 7) | REX_B(s);
5527 }
5528
5529 /* simpler op */
5530 if (shift == 0) {
5531 gen_shift(s, op, ot, opreg, OR_ECX);
5532 } else {
5533 if (shift == 2) {
5534 shift = cpu_ldub_code(env, s->pc++);
5535 }
5536 gen_shifti(s, op, ot, opreg, shift);
5537 }
5538 }
5539 break;
5540 case 0xd0:
5541 case 0xd1:
5542 /* shift Ev,1 */
5543 shift = 1;
5544 goto grp2;
5545 case 0xd2:
5546 case 0xd3:
5547 /* shift Ev,cl */
5548 shift = 0;
5549 goto grp2;
5550
5551 case 0x1a4: /* shld imm */
5552 op = 0;
5553 shift = 1;
5554 goto do_shiftd;
5555 case 0x1a5: /* shld cl */
5556 op = 0;
5557 shift = 0;
5558 goto do_shiftd;
5559 case 0x1ac: /* shrd imm */
5560 op = 1;
5561 shift = 1;
5562 goto do_shiftd;
5563 case 0x1ad: /* shrd cl */
5564 op = 1;
5565 shift = 0;
5566 do_shiftd:
5567 ot = dflag + OT_WORD;
5568 modrm = cpu_ldub_code(env, s->pc++);
5569 mod = (modrm >> 6) & 3;
5570 rm = (modrm & 7) | REX_B(s);
5571 reg = ((modrm >> 3) & 7) | rex_r;
5572 if (mod != 3) {
5573 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5574 opreg = OR_TMP0;
5575 } else {
5576 opreg = rm;
5577 }
5578 gen_op_mov_TN_reg(ot, 1, reg);
5579
5580 if (shift) {
5581 TCGv imm = tcg_const_tl(cpu_ldub_code(env, s->pc++));
5582 gen_shiftd_rm_T1(s, ot, opreg, op, imm);
5583 tcg_temp_free(imm);
5584 } else {
5585 gen_shiftd_rm_T1(s, ot, opreg, op, cpu_regs[R_ECX]);
5586 }
5587 break;
5588
5589 /************************/
5590 /* floats */
5591 case 0xd8 ... 0xdf:
5592 if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
5593 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
5594 /* XXX: what to do if illegal op ? */
5595 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
5596 break;
5597 }
5598 modrm = cpu_ldub_code(env, s->pc++);
5599 mod = (modrm >> 6) & 3;
5600 rm = modrm & 7;
5601 op = ((b & 7) << 3) | ((modrm >> 3) & 7);
5602 if (mod != 3) {
5603 /* memory op */
5604 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5605 switch(op) {
5606 case 0x00 ... 0x07: /* fxxxs */
5607 case 0x10 ... 0x17: /* fixxxl */
5608 case 0x20 ... 0x27: /* fxxxl */
5609 case 0x30 ... 0x37: /* fixxx */
5610 {
5611 int op1;
5612 op1 = op & 7;
5613
5614 switch(op >> 4) {
5615 case 0:
5616 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5617 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5618 gen_helper_flds_FT0(cpu_env, cpu_tmp2_i32);
5619 break;
5620 case 1:
5621 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5622 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5623 gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
5624 break;
5625 case 2:
5626 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
5627 (s->mem_index >> 2) - 1);
5628 gen_helper_fldl_FT0(cpu_env, cpu_tmp1_i64);
5629 break;
5630 case 3:
5631 default:
5632 gen_op_lds_T0_A0(OT_WORD + s->mem_index);
5633 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5634 gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
5635 break;
5636 }
5637
5638 gen_helper_fp_arith_ST0_FT0(op1);
5639 if (op1 == 3) {
5640 /* fcomp needs pop */
5641 gen_helper_fpop(cpu_env);
5642 }
5643 }
5644 break;
5645 case 0x08: /* flds */
5646 case 0x0a: /* fsts */
5647 case 0x0b: /* fstps */
5648 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
5649 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
5650 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
5651 switch(op & 7) {
5652 case 0:
5653 switch(op >> 4) {
5654 case 0:
5655 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5656 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5657 gen_helper_flds_ST0(cpu_env, cpu_tmp2_i32);
5658 break;
5659 case 1:
5660 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5661 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5662 gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
5663 break;
5664 case 2:
5665 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
5666 (s->mem_index >> 2) - 1);
5667 gen_helper_fldl_ST0(cpu_env, cpu_tmp1_i64);
5668 break;
5669 case 3:
5670 default:
5671 gen_op_lds_T0_A0(OT_WORD + s->mem_index);
5672 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5673 gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
5674 break;
5675 }
5676 break;
5677 case 1:
5678 /* XXX: the corresponding CPUID bit must be tested ! */
5679 switch(op >> 4) {
5680 case 1:
5681 gen_helper_fisttl_ST0(cpu_tmp2_i32, cpu_env);
5682 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5683 gen_op_st_T0_A0(OT_LONG + s->mem_index);
5684 break;
5685 case 2:
5686 gen_helper_fisttll_ST0(cpu_tmp1_i64, cpu_env);
5687 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
5688 (s->mem_index >> 2) - 1);
5689 break;
5690 case 3:
5691 default:
5692 gen_helper_fistt_ST0(cpu_tmp2_i32, cpu_env);
5693 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5694 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5695 break;
5696 }
5697 gen_helper_fpop(cpu_env);
5698 break;
5699 default:
5700 switch(op >> 4) {
5701 case 0:
5702 gen_helper_fsts_ST0(cpu_tmp2_i32, cpu_env);
5703 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5704 gen_op_st_T0_A0(OT_LONG + s->mem_index);
5705 break;
5706 case 1:
5707 gen_helper_fistl_ST0(cpu_tmp2_i32, cpu_env);
5708 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5709 gen_op_st_T0_A0(OT_LONG + s->mem_index);
5710 break;
5711 case 2:
5712 gen_helper_fstl_ST0(cpu_tmp1_i64, cpu_env);
5713 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
5714 (s->mem_index >> 2) - 1);
5715 break;
5716 case 3:
5717 default:
5718 gen_helper_fist_ST0(cpu_tmp2_i32, cpu_env);
5719 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5720 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5721 break;
5722 }
5723 if ((op & 7) == 3)
5724 gen_helper_fpop(cpu_env);
5725 break;
5726 }
5727 break;
5728 case 0x0c: /* fldenv mem */
5729 gen_update_cc_op(s);
5730 gen_jmp_im(pc_start - s->cs_base);
5731 gen_helper_fldenv(cpu_env, cpu_A0, tcg_const_i32(s->dflag));
5732 break;
5733 case 0x0d: /* fldcw mem */
5734 gen_op_ld_T0_A0(OT_WORD + s->mem_index);
5735 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5736 gen_helper_fldcw(cpu_env, cpu_tmp2_i32);
5737 break;
5738 case 0x0e: /* fnstenv mem */
5739 gen_update_cc_op(s);
5740 gen_jmp_im(pc_start - s->cs_base);
5741 gen_helper_fstenv(cpu_env, cpu_A0, tcg_const_i32(s->dflag));
5742 break;
5743 case 0x0f: /* fnstcw mem */
5744 gen_helper_fnstcw(cpu_tmp2_i32, cpu_env);
5745 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5746 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5747 break;
5748 case 0x1d: /* fldt mem */
5749 gen_update_cc_op(s);
5750 gen_jmp_im(pc_start - s->cs_base);
5751 gen_helper_fldt_ST0(cpu_env, cpu_A0);
5752 break;
5753 case 0x1f: /* fstpt mem */
5754 gen_update_cc_op(s);
5755 gen_jmp_im(pc_start - s->cs_base);
5756 gen_helper_fstt_ST0(cpu_env, cpu_A0);
5757 gen_helper_fpop(cpu_env);
5758 break;
5759 case 0x2c: /* frstor mem */
5760 gen_update_cc_op(s);
5761 gen_jmp_im(pc_start - s->cs_base);
5762 gen_helper_frstor(cpu_env, cpu_A0, tcg_const_i32(s->dflag));
5763 break;
5764 case 0x2e: /* fnsave mem */
5765 gen_update_cc_op(s);
5766 gen_jmp_im(pc_start - s->cs_base);
5767 gen_helper_fsave(cpu_env, cpu_A0, tcg_const_i32(s->dflag));
5768 break;
5769 case 0x2f: /* fnstsw mem */
5770 gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
5771 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5772 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5773 break;
5774 case 0x3c: /* fbld */
5775 gen_update_cc_op(s);
5776 gen_jmp_im(pc_start - s->cs_base);
5777 gen_helper_fbld_ST0(cpu_env, cpu_A0);
5778 break;
5779 case 0x3e: /* fbstp */
5780 gen_update_cc_op(s);
5781 gen_jmp_im(pc_start - s->cs_base);
5782 gen_helper_fbst_ST0(cpu_env, cpu_A0);
5783 gen_helper_fpop(cpu_env);
5784 break;
5785 case 0x3d: /* fildll */
5786 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
5787 (s->mem_index >> 2) - 1);
5788 gen_helper_fildll_ST0(cpu_env, cpu_tmp1_i64);
5789 break;
5790 case 0x3f: /* fistpll */
5791 gen_helper_fistll_ST0(cpu_tmp1_i64, cpu_env);
5792 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
5793 (s->mem_index >> 2) - 1);
5794 gen_helper_fpop(cpu_env);
5795 break;
5796 default:
5797 goto illegal_op;
5798 }
5799 } else {
5800 /* register float ops */
5801 opreg = rm;
5802
5803 switch(op) {
5804 case 0x08: /* fld sti */
5805 gen_helper_fpush(cpu_env);
5806 gen_helper_fmov_ST0_STN(cpu_env,
5807 tcg_const_i32((opreg + 1) & 7));
5808 break;
5809 case 0x09: /* fxchg sti */
5810 case 0x29: /* fxchg4 sti, undocumented op */
5811 case 0x39: /* fxchg7 sti, undocumented op */
5812 gen_helper_fxchg_ST0_STN(cpu_env, tcg_const_i32(opreg));
5813 break;
5814 case 0x0a: /* grp d9/2 */
5815 switch(rm) {
5816 case 0: /* fnop */
5817 /* check exceptions (FreeBSD FPU probe) */
5818 gen_update_cc_op(s);
5819 gen_jmp_im(pc_start - s->cs_base);
5820 gen_helper_fwait(cpu_env);
5821 break;
5822 default:
5823 goto illegal_op;
5824 }
5825 break;
5826 case 0x0c: /* grp d9/4 */
5827 switch(rm) {
5828 case 0: /* fchs */
5829 gen_helper_fchs_ST0(cpu_env);
5830 break;
5831 case 1: /* fabs */
5832 gen_helper_fabs_ST0(cpu_env);
5833 break;
5834 case 4: /* ftst */
5835 gen_helper_fldz_FT0(cpu_env);
5836 gen_helper_fcom_ST0_FT0(cpu_env);
5837 break;
5838 case 5: /* fxam */
5839 gen_helper_fxam_ST0(cpu_env);
5840 break;
5841 default:
5842 goto illegal_op;
5843 }
5844 break;
5845 case 0x0d: /* grp d9/5 */
5846 {
5847 switch(rm) {
5848 case 0:
5849 gen_helper_fpush(cpu_env);
5850 gen_helper_fld1_ST0(cpu_env);
5851 break;
5852 case 1:
5853 gen_helper_fpush(cpu_env);
5854 gen_helper_fldl2t_ST0(cpu_env);
5855 break;
5856 case 2:
5857 gen_helper_fpush(cpu_env);
5858 gen_helper_fldl2e_ST0(cpu_env);
5859 break;
5860 case 3:
5861 gen_helper_fpush(cpu_env);
5862 gen_helper_fldpi_ST0(cpu_env);
5863 break;
5864 case 4:
5865 gen_helper_fpush(cpu_env);
5866 gen_helper_fldlg2_ST0(cpu_env);
5867 break;
5868 case 5:
5869 gen_helper_fpush(cpu_env);
5870 gen_helper_fldln2_ST0(cpu_env);
5871 break;
5872 case 6:
5873 gen_helper_fpush(cpu_env);
5874 gen_helper_fldz_ST0(cpu_env);
5875 break;
5876 default:
5877 goto illegal_op;
5878 }
5879 }
5880 break;
5881 case 0x0e: /* grp d9/6 */
5882 switch(rm) {
5883 case 0: /* f2xm1 */
5884 gen_helper_f2xm1(cpu_env);
5885 break;
5886 case 1: /* fyl2x */
5887 gen_helper_fyl2x(cpu_env);
5888 break;
5889 case 2: /* fptan */
5890 gen_helper_fptan(cpu_env);
5891 break;
5892 case 3: /* fpatan */
5893 gen_helper_fpatan(cpu_env);
5894 break;
5895 case 4: /* fxtract */
5896 gen_helper_fxtract(cpu_env);
5897 break;
5898 case 5: /* fprem1 */
5899 gen_helper_fprem1(cpu_env);
5900 break;
5901 case 6: /* fdecstp */
5902 gen_helper_fdecstp(cpu_env);
5903 break;
5904 default:
5905 case 7: /* fincstp */
5906 gen_helper_fincstp(cpu_env);
5907 break;
5908 }
5909 break;
5910 case 0x0f: /* grp d9/7 */
5911 switch(rm) {
5912 case 0: /* fprem */
5913 gen_helper_fprem(cpu_env);
5914 break;
5915 case 1: /* fyl2xp1 */
5916 gen_helper_fyl2xp1(cpu_env);
5917 break;
5918 case 2: /* fsqrt */
5919 gen_helper_fsqrt(cpu_env);
5920 break;
5921 case 3: /* fsincos */
5922 gen_helper_fsincos(cpu_env);
5923 break;
5924 case 5: /* fscale */
5925 gen_helper_fscale(cpu_env);
5926 break;
5927 case 4: /* frndint */
5928 gen_helper_frndint(cpu_env);
5929 break;
5930 case 6: /* fsin */
5931 gen_helper_fsin(cpu_env);
5932 break;
5933 default:
5934 case 7: /* fcos */
5935 gen_helper_fcos(cpu_env);
5936 break;
5937 }
5938 break;
5939 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
5940 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
5941 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
5942 {
5943 int op1;
5944
5945 op1 = op & 7;
5946 if (op >= 0x20) {
5947 gen_helper_fp_arith_STN_ST0(op1, opreg);
5948 if (op >= 0x30)
5949 gen_helper_fpop(cpu_env);
5950 } else {
5951 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
5952 gen_helper_fp_arith_ST0_FT0(op1);
5953 }
5954 }
5955 break;
5956 case 0x02: /* fcom */
5957 case 0x22: /* fcom2, undocumented op */
5958 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
5959 gen_helper_fcom_ST0_FT0(cpu_env);
5960 break;
5961 case 0x03: /* fcomp */
5962 case 0x23: /* fcomp3, undocumented op */
5963 case 0x32: /* fcomp5, undocumented op */
5964 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
5965 gen_helper_fcom_ST0_FT0(cpu_env);
5966 gen_helper_fpop(cpu_env);
5967 break;
5968 case 0x15: /* da/5 */
5969 switch(rm) {
5970 case 1: /* fucompp */
5971 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
5972 gen_helper_fucom_ST0_FT0(cpu_env);
5973 gen_helper_fpop(cpu_env);
5974 gen_helper_fpop(cpu_env);
5975 break;
5976 default:
5977 goto illegal_op;
5978 }
5979 break;
5980 case 0x1c:
5981 switch(rm) {
5982 case 0: /* feni (287 only, just do nop here) */
5983 break;
5984 case 1: /* fdisi (287 only, just do nop here) */
5985 break;
5986 case 2: /* fclex */
5987 gen_helper_fclex(cpu_env);
5988 break;
5989 case 3: /* fninit */
5990 gen_helper_fninit(cpu_env);
5991 break;
5992 case 4: /* fsetpm (287 only, just do nop here) */
5993 break;
5994 default:
5995 goto illegal_op;
5996 }
5997 break;
5998 case 0x1d: /* fucomi */
5999 gen_update_cc_op(s);
6000 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6001 gen_helper_fucomi_ST0_FT0(cpu_env);
6002 set_cc_op(s, CC_OP_EFLAGS);
6003 break;
6004 case 0x1e: /* fcomi */
6005 gen_update_cc_op(s);
6006 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6007 gen_helper_fcomi_ST0_FT0(cpu_env);
6008 set_cc_op(s, CC_OP_EFLAGS);
6009 break;
6010 case 0x28: /* ffree sti */
6011 gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
6012 break;
6013 case 0x2a: /* fst sti */
6014 gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
6015 break;
6016 case 0x2b: /* fstp sti */
6017 case 0x0b: /* fstp1 sti, undocumented op */
6018 case 0x3a: /* fstp8 sti, undocumented op */
6019 case 0x3b: /* fstp9 sti, undocumented op */
6020 gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
6021 gen_helper_fpop(cpu_env);
6022 break;
6023 case 0x2c: /* fucom st(i) */
6024 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6025 gen_helper_fucom_ST0_FT0(cpu_env);
6026 break;
6027 case 0x2d: /* fucomp st(i) */
6028 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6029 gen_helper_fucom_ST0_FT0(cpu_env);
6030 gen_helper_fpop(cpu_env);
6031 break;
6032 case 0x33: /* de/3 */
6033 switch(rm) {
6034 case 1: /* fcompp */
6035 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
6036 gen_helper_fcom_ST0_FT0(cpu_env);
6037 gen_helper_fpop(cpu_env);
6038 gen_helper_fpop(cpu_env);
6039 break;
6040 default:
6041 goto illegal_op;
6042 }
6043 break;
6044 case 0x38: /* ffreep sti, undocumented op */
6045 gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
6046 gen_helper_fpop(cpu_env);
6047 break;
6048 case 0x3c: /* df/4 */
6049 switch(rm) {
6050 case 0:
6051 gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
6052 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
6053 gen_op_mov_reg_T0(OT_WORD, R_EAX);
6054 break;
6055 default:
6056 goto illegal_op;
6057 }
6058 break;
6059 case 0x3d: /* fucomip */
6060 gen_update_cc_op(s);
6061 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6062 gen_helper_fucomi_ST0_FT0(cpu_env);
6063 gen_helper_fpop(cpu_env);
6064 set_cc_op(s, CC_OP_EFLAGS);
6065 break;
6066 case 0x3e: /* fcomip */
6067 gen_update_cc_op(s);
6068 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6069 gen_helper_fcomi_ST0_FT0(cpu_env);
6070 gen_helper_fpop(cpu_env);
6071 set_cc_op(s, CC_OP_EFLAGS);
6072 break;
6073 case 0x10 ... 0x13: /* fcmovxx */
6074 case 0x18 ... 0x1b:
6075 {
6076 int op1, l1;
6077 static const uint8_t fcmov_cc[8] = {
6078 (JCC_B << 1),
6079 (JCC_Z << 1),
6080 (JCC_BE << 1),
6081 (JCC_P << 1),
6082 };
6083 op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
6084 l1 = gen_new_label();
6085 gen_jcc1_noeob(s, op1, l1);
6086 gen_helper_fmov_ST0_STN(cpu_env, tcg_const_i32(opreg));
6087 gen_set_label(l1);
6088 }
6089 break;
6090 default:
6091 goto illegal_op;
6092 }
6093 }
6094 break;
6095 /************************/
6096 /* string ops */
6097
6098 case 0xa4: /* movsS */
6099 case 0xa5:
6100 if ((b & 1) == 0)
6101 ot = OT_BYTE;
6102 else
6103 ot = dflag + OT_WORD;
6104
6105 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6106 gen_repz_movs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6107 } else {
6108 gen_movs(s, ot);
6109 }
6110 break;
6111
6112 case 0xaa: /* stosS */
6113 case 0xab:
6114 if ((b & 1) == 0)
6115 ot = OT_BYTE;
6116 else
6117 ot = dflag + OT_WORD;
6118
6119 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6120 gen_repz_stos(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6121 } else {
6122 gen_stos(s, ot);
6123 }
6124 break;
6125 case 0xac: /* lodsS */
6126 case 0xad:
6127 if ((b & 1) == 0)
6128 ot = OT_BYTE;
6129 else
6130 ot = dflag + OT_WORD;
6131 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6132 gen_repz_lods(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6133 } else {
6134 gen_lods(s, ot);
6135 }
6136 break;
6137 case 0xae: /* scasS */
6138 case 0xaf:
6139 if ((b & 1) == 0)
6140 ot = OT_BYTE;
6141 else
6142 ot = dflag + OT_WORD;
6143 if (prefixes & PREFIX_REPNZ) {
6144 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6145 } else if (prefixes & PREFIX_REPZ) {
6146 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6147 } else {
6148 gen_scas(s, ot);
6149 }
6150 break;
6151
6152 case 0xa6: /* cmpsS */
6153 case 0xa7:
6154 if ((b & 1) == 0)
6155 ot = OT_BYTE;
6156 else
6157 ot = dflag + OT_WORD;
6158 if (prefixes & PREFIX_REPNZ) {
6159 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6160 } else if (prefixes & PREFIX_REPZ) {
6161 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6162 } else {
6163 gen_cmps(s, ot);
6164 }
6165 break;
6166 case 0x6c: /* insS */
6167 case 0x6d:
6168 if ((b & 1) == 0)
6169 ot = OT_BYTE;
6170 else
6171 ot = dflag ? OT_LONG : OT_WORD;
6172 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
6173 gen_op_andl_T0_ffff();
6174 gen_check_io(s, ot, pc_start - s->cs_base,
6175 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4);
6176 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6177 gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6178 } else {
6179 gen_ins(s, ot);
6180 if (use_icount) {
6181 gen_jmp(s, s->pc - s->cs_base);
6182 }
6183 }
6184 break;
6185 case 0x6e: /* outsS */
6186 case 0x6f:
6187 if ((b & 1) == 0)
6188 ot = OT_BYTE;
6189 else
6190 ot = dflag ? OT_LONG : OT_WORD;
6191 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
6192 gen_op_andl_T0_ffff();
6193 gen_check_io(s, ot, pc_start - s->cs_base,
6194 svm_is_rep(prefixes) | 4);
6195 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6196 gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6197 } else {
6198 gen_outs(s, ot);
6199 if (use_icount) {
6200 gen_jmp(s, s->pc - s->cs_base);
6201 }
6202 }
6203 break;
6204
6205 /************************/
6206 /* port I/O */
6207
6208 case 0xe4:
6209 case 0xe5:
6210 if ((b & 1) == 0)
6211 ot = OT_BYTE;
6212 else
6213 ot = dflag ? OT_LONG : OT_WORD;
6214 val = cpu_ldub_code(env, s->pc++);
6215 gen_op_movl_T0_im(val);
6216 gen_check_io(s, ot, pc_start - s->cs_base,
6217 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
6218 if (use_icount)
6219 gen_io_start();
6220 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6221 gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
6222 gen_op_mov_reg_T1(ot, R_EAX);
6223 if (use_icount) {
6224 gen_io_end();
6225 gen_jmp(s, s->pc - s->cs_base);
6226 }
6227 break;
6228 case 0xe6:
6229 case 0xe7:
6230 if ((b & 1) == 0)
6231 ot = OT_BYTE;
6232 else
6233 ot = dflag ? OT_LONG : OT_WORD;
6234 val = cpu_ldub_code(env, s->pc++);
6235 gen_op_movl_T0_im(val);
6236 gen_check_io(s, ot, pc_start - s->cs_base,
6237 svm_is_rep(prefixes));
6238 gen_op_mov_TN_reg(ot, 1, R_EAX);
6239
6240 if (use_icount)
6241 gen_io_start();
6242 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6243 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
6244 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
6245 if (use_icount) {
6246 gen_io_end();
6247 gen_jmp(s, s->pc - s->cs_base);
6248 }
6249 break;
6250 case 0xec:
6251 case 0xed:
6252 if ((b & 1) == 0)
6253 ot = OT_BYTE;
6254 else
6255 ot = dflag ? OT_LONG : OT_WORD;
6256 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
6257 gen_op_andl_T0_ffff();
6258 gen_check_io(s, ot, pc_start - s->cs_base,
6259 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
6260 if (use_icount)
6261 gen_io_start();
6262 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6263 gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
6264 gen_op_mov_reg_T1(ot, R_EAX);
6265 if (use_icount) {
6266 gen_io_end();
6267 gen_jmp(s, s->pc - s->cs_base);
6268 }
6269 break;
6270 case 0xee:
6271 case 0xef:
6272 if ((b & 1) == 0)
6273 ot = OT_BYTE;
6274 else
6275 ot = dflag ? OT_LONG : OT_WORD;
6276 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
6277 gen_op_andl_T0_ffff();
6278 gen_check_io(s, ot, pc_start - s->cs_base,
6279 svm_is_rep(prefixes));
6280 gen_op_mov_TN_reg(ot, 1, R_EAX);
6281
6282 if (use_icount)
6283 gen_io_start();
6284 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6285 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
6286 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
6287 if (use_icount) {
6288 gen_io_end();
6289 gen_jmp(s, s->pc - s->cs_base);
6290 }
6291 break;
6292
6293 /************************/
6294 /* control */
6295 case 0xc2: /* ret im */
6296 val = cpu_ldsw_code(env, s->pc);
6297 s->pc += 2;
6298 gen_pop_T0(s);
6299 if (CODE64(s) && s->dflag)
6300 s->dflag = 2;
6301 gen_stack_update(s, val + (2 << s->dflag));
6302 if (s->dflag == 0)
6303 gen_op_andl_T0_ffff();
6304 gen_op_jmp_T0();
6305 gen_eob(s);
6306 break;
6307 case 0xc3: /* ret */
6308 gen_pop_T0(s);
6309 gen_pop_update(s);
6310 if (s->dflag == 0)
6311 gen_op_andl_T0_ffff();
6312 gen_op_jmp_T0();
6313 gen_eob(s);
6314 break;
6315 case 0xca: /* lret im */
6316 val = cpu_ldsw_code(env, s->pc);
6317 s->pc += 2;
6318 do_lret:
6319 if (s->pe && !s->vm86) {
6320 gen_update_cc_op(s);
6321 gen_jmp_im(pc_start - s->cs_base);
6322 gen_helper_lret_protected(cpu_env, tcg_const_i32(s->dflag),
6323 tcg_const_i32(val));
6324 } else {
6325 gen_stack_A0(s);
6326 /* pop offset */
6327 gen_op_ld_T0_A0(1 + s->dflag + s->mem_index);
6328 if (s->dflag == 0)
6329 gen_op_andl_T0_ffff();
6330 /* NOTE: keeping EIP updated is not a problem in case of
6331 exception */
6332 gen_op_jmp_T0();
6333 /* pop selector */
6334 gen_op_addl_A0_im(2 << s->dflag);
6335 gen_op_ld_T0_A0(1 + s->dflag + s->mem_index);
6336 gen_op_movl_seg_T0_vm(R_CS);
6337 /* add stack offset */
6338 gen_stack_update(s, val + (4 << s->dflag));
6339 }
6340 gen_eob(s);
6341 break;
6342 case 0xcb: /* lret */
6343 val = 0;
6344 goto do_lret;
6345 case 0xcf: /* iret */
6346 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET);
6347 if (!s->pe) {
6348 /* real mode */
6349 gen_helper_iret_real(cpu_env, tcg_const_i32(s->dflag));
6350 set_cc_op(s, CC_OP_EFLAGS);
6351 } else if (s->vm86) {
6352 if (s->iopl != 3) {
6353 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6354 } else {
6355 gen_helper_iret_real(cpu_env, tcg_const_i32(s->dflag));
6356 set_cc_op(s, CC_OP_EFLAGS);
6357 }
6358 } else {
6359 gen_update_cc_op(s);
6360 gen_jmp_im(pc_start - s->cs_base);
6361 gen_helper_iret_protected(cpu_env, tcg_const_i32(s->dflag),
6362 tcg_const_i32(s->pc - s->cs_base));
6363 set_cc_op(s, CC_OP_EFLAGS);
6364 }
6365 gen_eob(s);
6366 break;
6367 case 0xe8: /* call im */
6368 {
6369 if (dflag)
6370 tval = (int32_t)insn_get(env, s, OT_LONG);
6371 else
6372 tval = (int16_t)insn_get(env, s, OT_WORD);
6373 next_eip = s->pc - s->cs_base;
6374 tval += next_eip;
6375 if (s->dflag == 0)
6376 tval &= 0xffff;
6377 else if(!CODE64(s))
6378 tval &= 0xffffffff;
6379 gen_movtl_T0_im(next_eip);
6380 gen_push_T0(s);
6381 gen_jmp(s, tval);
6382 }
6383 break;
6384 case 0x9a: /* lcall im */
6385 {
6386 unsigned int selector, offset;
6387
6388 if (CODE64(s))
6389 goto illegal_op;
6390 ot = dflag ? OT_LONG : OT_WORD;
6391 offset = insn_get(env, s, ot);
6392 selector = insn_get(env, s, OT_WORD);
6393
6394 gen_op_movl_T0_im(selector);
6395 gen_op_movl_T1_imu(offset);
6396 }
6397 goto do_lcall;
6398 case 0xe9: /* jmp im */
6399 if (dflag)
6400 tval = (int32_t)insn_get(env, s, OT_LONG);
6401 else
6402 tval = (int16_t)insn_get(env, s, OT_WORD);
6403 tval += s->pc - s->cs_base;
6404 if (s->dflag == 0)
6405 tval &= 0xffff;
6406 else if(!CODE64(s))
6407 tval &= 0xffffffff;
6408 gen_jmp(s, tval);
6409 break;
6410 case 0xea: /* ljmp im */
6411 {
6412 unsigned int selector, offset;
6413
6414 if (CODE64(s))
6415 goto illegal_op;
6416 ot = dflag ? OT_LONG : OT_WORD;
6417 offset = insn_get(env, s, ot);
6418 selector = insn_get(env, s, OT_WORD);
6419
6420 gen_op_movl_T0_im(selector);
6421 gen_op_movl_T1_imu(offset);
6422 }
6423 goto do_ljmp;
6424 case 0xeb: /* jmp Jb */
6425 tval = (int8_t)insn_get(env, s, OT_BYTE);
6426 tval += s->pc - s->cs_base;
6427 if (s->dflag == 0)
6428 tval &= 0xffff;
6429 gen_jmp(s, tval);
6430 break;
6431 case 0x70 ... 0x7f: /* jcc Jb */
6432 tval = (int8_t)insn_get(env, s, OT_BYTE);
6433 goto do_jcc;
6434 case 0x180 ... 0x18f: /* jcc Jv */
6435 if (dflag) {
6436 tval = (int32_t)insn_get(env, s, OT_LONG);
6437 } else {
6438 tval = (int16_t)insn_get(env, s, OT_WORD);
6439 }
6440 do_jcc:
6441 next_eip = s->pc - s->cs_base;
6442 tval += next_eip;
6443 if (s->dflag == 0)
6444 tval &= 0xffff;
6445 gen_jcc(s, b, tval, next_eip);
6446 break;
6447
6448 case 0x190 ... 0x19f: /* setcc Gv */
6449 modrm = cpu_ldub_code(env, s->pc++);
6450 gen_setcc1(s, b, cpu_T[0]);
6451 gen_ldst_modrm(env, s, modrm, OT_BYTE, OR_TMP0, 1);
6452 break;
6453 case 0x140 ... 0x14f: /* cmov Gv, Ev */
6454 ot = dflag + OT_WORD;
6455 modrm = cpu_ldub_code(env, s->pc++);
6456 reg = ((modrm >> 3) & 7) | rex_r;
6457 gen_cmovcc1(env, s, ot, b, modrm, reg);
6458 break;
6459
6460 /************************/
6461 /* flags */
6462 case 0x9c: /* pushf */
6463 gen_svm_check_intercept(s, pc_start, SVM_EXIT_PUSHF);
6464 if (s->vm86 && s->iopl != 3) {
6465 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6466 } else {
6467 gen_update_cc_op(s);
6468 gen_helper_read_eflags(cpu_T[0], cpu_env);
6469 gen_push_T0(s);
6470 }
6471 break;
6472 case 0x9d: /* popf */
6473 gen_svm_check_intercept(s, pc_start, SVM_EXIT_POPF);
6474 if (s->vm86 && s->iopl != 3) {
6475 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6476 } else {
6477 gen_pop_T0(s);
6478 if (s->cpl == 0) {
6479 if (s->dflag) {
6480 gen_helper_write_eflags(cpu_env, cpu_T[0],
6481 tcg_const_i32((TF_MASK | AC_MASK |
6482 ID_MASK | NT_MASK |
6483 IF_MASK |
6484 IOPL_MASK)));
6485 } else {
6486 gen_helper_write_eflags(cpu_env, cpu_T[0],
6487 tcg_const_i32((TF_MASK | AC_MASK |
6488 ID_MASK | NT_MASK |
6489 IF_MASK | IOPL_MASK)
6490 & 0xffff));
6491 }
6492 } else {
6493 if (s->cpl <= s->iopl) {
6494 if (s->dflag) {
6495 gen_helper_write_eflags(cpu_env, cpu_T[0],
6496 tcg_const_i32((TF_MASK |
6497 AC_MASK |
6498 ID_MASK |
6499 NT_MASK |
6500 IF_MASK)));
6501 } else {
6502 gen_helper_write_eflags(cpu_env, cpu_T[0],
6503 tcg_const_i32((TF_MASK |
6504 AC_MASK |
6505 ID_MASK |
6506 NT_MASK |
6507 IF_MASK)
6508 & 0xffff));
6509 }
6510 } else {
6511 if (s->dflag) {
6512 gen_helper_write_eflags(cpu_env, cpu_T[0],
6513 tcg_const_i32((TF_MASK | AC_MASK |
6514 ID_MASK | NT_MASK)));
6515 } else {
6516 gen_helper_write_eflags(cpu_env, cpu_T[0],
6517 tcg_const_i32((TF_MASK | AC_MASK |
6518 ID_MASK | NT_MASK)
6519 & 0xffff));
6520 }
6521 }
6522 }
6523 gen_pop_update(s);
6524 set_cc_op(s, CC_OP_EFLAGS);
6525 /* abort translation because TF/AC flag may change */
6526 gen_jmp_im(s->pc - s->cs_base);
6527 gen_eob(s);
6528 }
6529 break;
6530 case 0x9e: /* sahf */
6531 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
6532 goto illegal_op;
6533 gen_op_mov_TN_reg(OT_BYTE, 0, R_AH);
6534 gen_compute_eflags(s);
6535 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O);
6536 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], CC_S | CC_Z | CC_A | CC_P | CC_C);
6537 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_T[0]);
6538 break;
6539 case 0x9f: /* lahf */
6540 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
6541 goto illegal_op;
6542 gen_compute_eflags(s);
6543 /* Note: gen_compute_eflags() only gives the condition codes */
6544 tcg_gen_ori_tl(cpu_T[0], cpu_cc_src, 0x02);
6545 gen_op_mov_reg_T0(OT_BYTE, R_AH);
6546 break;
6547 case 0xf5: /* cmc */
6548 gen_compute_eflags(s);
6549 tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C);
6550 break;
6551 case 0xf8: /* clc */
6552 gen_compute_eflags(s);
6553 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C);
6554 break;
6555 case 0xf9: /* stc */
6556 gen_compute_eflags(s);
6557 tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C);
6558 break;
6559 case 0xfc: /* cld */
6560 tcg_gen_movi_i32(cpu_tmp2_i32, 1);
6561 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
6562 break;
6563 case 0xfd: /* std */
6564 tcg_gen_movi_i32(cpu_tmp2_i32, -1);
6565 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
6566 break;
6567
6568 /************************/
6569 /* bit operations */
6570 case 0x1ba: /* bt/bts/btr/btc Gv, im */
6571 ot = dflag + OT_WORD;
6572 modrm = cpu_ldub_code(env, s->pc++);
6573 op = (modrm >> 3) & 7;
6574 mod = (modrm >> 6) & 3;
6575 rm = (modrm & 7) | REX_B(s);
6576 if (mod != 3) {
6577 s->rip_offset = 1;
6578 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
6579 gen_op_ld_T0_A0(ot + s->mem_index);
6580 } else {
6581 gen_op_mov_TN_reg(ot, 0, rm);
6582 }
6583 /* load shift */
6584 val = cpu_ldub_code(env, s->pc++);
6585 gen_op_movl_T1_im(val);
6586 if (op < 4)
6587 goto illegal_op;
6588 op -= 4;
6589 goto bt_op;
6590 case 0x1a3: /* bt Gv, Ev */
6591 op = 0;
6592 goto do_btx;
6593 case 0x1ab: /* bts */
6594 op = 1;
6595 goto do_btx;
6596 case 0x1b3: /* btr */
6597 op = 2;
6598 goto do_btx;
6599 case 0x1bb: /* btc */
6600 op = 3;
6601 do_btx:
6602 ot = dflag + OT_WORD;
6603 modrm = cpu_ldub_code(env, s->pc++);
6604 reg = ((modrm >> 3) & 7) | rex_r;
6605 mod = (modrm >> 6) & 3;
6606 rm = (modrm & 7) | REX_B(s);
6607 gen_op_mov_TN_reg(OT_LONG, 1, reg);
6608 if (mod != 3) {
6609 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
6610 /* specific case: we need to add a displacement */
6611 gen_exts(ot, cpu_T[1]);
6612 tcg_gen_sari_tl(cpu_tmp0, cpu_T[1], 3 + ot);
6613 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, ot);
6614 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
6615 gen_op_ld_T0_A0(ot + s->mem_index);
6616 } else {
6617 gen_op_mov_TN_reg(ot, 0, rm);
6618 }
6619 bt_op:
6620 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], (1 << (3 + ot)) - 1);
6621 switch(op) {
6622 case 0:
6623 tcg_gen_shr_tl(cpu_cc_src, cpu_T[0], cpu_T[1]);
6624 tcg_gen_movi_tl(cpu_cc_dst, 0);
6625 break;
6626 case 1:
6627 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6628 tcg_gen_movi_tl(cpu_tmp0, 1);
6629 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6630 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6631 break;
6632 case 2:
6633 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6634 tcg_gen_movi_tl(cpu_tmp0, 1);
6635 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6636 tcg_gen_not_tl(cpu_tmp0, cpu_tmp0);
6637 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6638 break;
6639 default:
6640 case 3:
6641 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6642 tcg_gen_movi_tl(cpu_tmp0, 1);
6643 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6644 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6645 break;
6646 }
6647 set_cc_op(s, CC_OP_SARB + ot);
6648 if (op != 0) {
6649 if (mod != 3)
6650 gen_op_st_T0_A0(ot + s->mem_index);
6651 else
6652 gen_op_mov_reg_T0(ot, rm);
6653 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
6654 tcg_gen_movi_tl(cpu_cc_dst, 0);
6655 }
6656 break;
6657 case 0x1bc: /* bsf */
6658 case 0x1bd: /* bsr */
6659 {
6660 int label1;
6661 TCGv t0;
6662
6663 ot = dflag + OT_WORD;
6664 modrm = cpu_ldub_code(env, s->pc++);
6665 reg = ((modrm >> 3) & 7) | rex_r;
6666 gen_ldst_modrm(env, s,modrm, ot, OR_TMP0, 0);
6667 gen_extu(ot, cpu_T[0]);
6668 t0 = tcg_temp_local_new();
6669 tcg_gen_mov_tl(t0, cpu_T[0]);
6670 if ((b & 1) && (prefixes & PREFIX_REPZ) &&
6671 (s->cpuid_ext3_features & CPUID_EXT3_ABM)) {
6672 switch(ot) {
6673 case OT_WORD: gen_helper_lzcnt(cpu_T[0], t0,
6674 tcg_const_i32(16)); break;
6675 case OT_LONG: gen_helper_lzcnt(cpu_T[0], t0,
6676 tcg_const_i32(32)); break;
6677 case OT_QUAD: gen_helper_lzcnt(cpu_T[0], t0,
6678 tcg_const_i32(64)); break;
6679 }
6680 gen_op_mov_reg_T0(ot, reg);
6681 } else {
6682 label1 = gen_new_label();
6683 tcg_gen_movi_tl(cpu_cc_dst, 0);
6684 tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, label1);
6685 if (b & 1) {
6686 gen_helper_bsr(cpu_T[0], t0);
6687 } else {
6688 gen_helper_bsf(cpu_T[0], t0);
6689 }
6690 gen_op_mov_reg_T0(ot, reg);
6691 tcg_gen_movi_tl(cpu_cc_dst, 1);
6692 gen_set_label(label1);
6693 set_cc_op(s, CC_OP_LOGICB + ot);
6694 }
6695 tcg_temp_free(t0);
6696 }
6697 break;
6698 /************************/
6699 /* bcd */
6700 case 0x27: /* daa */
6701 if (CODE64(s))
6702 goto illegal_op;
6703 gen_update_cc_op(s);
6704 gen_helper_daa(cpu_env);
6705 set_cc_op(s, CC_OP_EFLAGS);
6706 break;
6707 case 0x2f: /* das */
6708 if (CODE64(s))
6709 goto illegal_op;
6710 gen_update_cc_op(s);
6711 gen_helper_das(cpu_env);
6712 set_cc_op(s, CC_OP_EFLAGS);
6713 break;
6714 case 0x37: /* aaa */
6715 if (CODE64(s))
6716 goto illegal_op;
6717 gen_update_cc_op(s);
6718 gen_helper_aaa(cpu_env);
6719 set_cc_op(s, CC_OP_EFLAGS);
6720 break;
6721 case 0x3f: /* aas */
6722 if (CODE64(s))
6723 goto illegal_op;
6724 gen_update_cc_op(s);
6725 gen_helper_aas(cpu_env);
6726 set_cc_op(s, CC_OP_EFLAGS);
6727 break;
6728 case 0xd4: /* aam */
6729 if (CODE64(s))
6730 goto illegal_op;
6731 val = cpu_ldub_code(env, s->pc++);
6732 if (val == 0) {
6733 gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base);
6734 } else {
6735 gen_helper_aam(cpu_env, tcg_const_i32(val));
6736 set_cc_op(s, CC_OP_LOGICB);
6737 }
6738 break;
6739 case 0xd5: /* aad */
6740 if (CODE64(s))
6741 goto illegal_op;
6742 val = cpu_ldub_code(env, s->pc++);
6743 gen_helper_aad(cpu_env, tcg_const_i32(val));
6744 set_cc_op(s, CC_OP_LOGICB);
6745 break;
6746 /************************/
6747 /* misc */
6748 case 0x90: /* nop */
6749 /* XXX: correct lock test for all insn */
6750 if (prefixes & PREFIX_LOCK) {
6751 goto illegal_op;
6752 }
6753 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
6754 if (REX_B(s)) {
6755 goto do_xchg_reg_eax;
6756 }
6757 if (prefixes & PREFIX_REPZ) {
6758 gen_svm_check_intercept(s, pc_start, SVM_EXIT_PAUSE);
6759 }
6760 break;
6761 case 0x9b: /* fwait */
6762 if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) ==
6763 (HF_MP_MASK | HF_TS_MASK)) {
6764 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
6765 } else {
6766 gen_update_cc_op(s);
6767 gen_jmp_im(pc_start - s->cs_base);
6768 gen_helper_fwait(cpu_env);
6769 }
6770 break;
6771 case 0xcc: /* int3 */
6772 gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base);
6773 break;
6774 case 0xcd: /* int N */
6775 val = cpu_ldub_code(env, s->pc++);
6776 if (s->vm86 && s->iopl != 3) {
6777 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6778 } else {
6779 gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base);
6780 }
6781 break;
6782 case 0xce: /* into */
6783 if (CODE64(s))
6784 goto illegal_op;
6785 gen_update_cc_op(s);
6786 gen_jmp_im(pc_start - s->cs_base);
6787 gen_helper_into(cpu_env, tcg_const_i32(s->pc - pc_start));
6788 break;
6789 #ifdef WANT_ICEBP
6790 case 0xf1: /* icebp (undocumented, exits to external debugger) */
6791 gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP);
6792 #if 1
6793 gen_debug(s, pc_start - s->cs_base);
6794 #else
6795 /* start debug */
6796 tb_flush(env);
6797 qemu_set_log(CPU_LOG_INT | CPU_LOG_TB_IN_ASM);
6798 #endif
6799 break;
6800 #endif
6801 case 0xfa: /* cli */
6802 if (!s->vm86) {
6803 if (s->cpl <= s->iopl) {
6804 gen_helper_cli(cpu_env);
6805 } else {
6806 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6807 }
6808 } else {
6809 if (s->iopl == 3) {
6810 gen_helper_cli(cpu_env);
6811 } else {
6812 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6813 }
6814 }
6815 break;
6816 case 0xfb: /* sti */
6817 if (!s->vm86) {
6818 if (s->cpl <= s->iopl) {
6819 gen_sti:
6820 gen_helper_sti(cpu_env);
6821 /* interruptions are enabled only the first insn after sti */
6822 /* If several instructions disable interrupts, only the
6823 _first_ does it */
6824 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
6825 gen_helper_set_inhibit_irq(cpu_env);
6826 /* give a chance to handle pending irqs */
6827 gen_jmp_im(s->pc - s->cs_base);
6828 gen_eob(s);
6829 } else {
6830 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6831 }
6832 } else {
6833 if (s->iopl == 3) {
6834 goto gen_sti;
6835 } else {
6836 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6837 }
6838 }
6839 break;
6840 case 0x62: /* bound */
6841 if (CODE64(s))
6842 goto illegal_op;
6843 ot = dflag ? OT_LONG : OT_WORD;
6844 modrm = cpu_ldub_code(env, s->pc++);
6845 reg = (modrm >> 3) & 7;
6846 mod = (modrm >> 6) & 3;
6847 if (mod == 3)
6848 goto illegal_op;
6849 gen_op_mov_TN_reg(ot, 0, reg);
6850 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
6851 gen_jmp_im(pc_start - s->cs_base);
6852 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6853 if (ot == OT_WORD) {
6854 gen_helper_boundw(cpu_env, cpu_A0, cpu_tmp2_i32);
6855 } else {
6856 gen_helper_boundl(cpu_env, cpu_A0, cpu_tmp2_i32);
6857 }
6858 break;
6859 case 0x1c8 ... 0x1cf: /* bswap reg */
6860 reg = (b & 7) | REX_B(s);
6861 #ifdef TARGET_X86_64
6862 if (dflag == 2) {
6863 gen_op_mov_TN_reg(OT_QUAD, 0, reg);
6864 tcg_gen_bswap64_i64(cpu_T[0], cpu_T[0]);
6865 gen_op_mov_reg_T0(OT_QUAD, reg);
6866 } else
6867 #endif
6868 {
6869 gen_op_mov_TN_reg(OT_LONG, 0, reg);
6870 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
6871 tcg_gen_bswap32_tl(cpu_T[0], cpu_T[0]);
6872 gen_op_mov_reg_T0(OT_LONG, reg);
6873 }
6874 break;
6875 case 0xd6: /* salc */
6876 if (CODE64(s))
6877 goto illegal_op;
6878 gen_compute_eflags_c(s, cpu_T[0]);
6879 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
6880 gen_op_mov_reg_T0(OT_BYTE, R_EAX);
6881 break;
6882 case 0xe0: /* loopnz */
6883 case 0xe1: /* loopz */
6884 case 0xe2: /* loop */
6885 case 0xe3: /* jecxz */
6886 {
6887 int l1, l2, l3;
6888
6889 tval = (int8_t)insn_get(env, s, OT_BYTE);
6890 next_eip = s->pc - s->cs_base;
6891 tval += next_eip;
6892 if (s->dflag == 0)
6893 tval &= 0xffff;
6894
6895 l1 = gen_new_label();
6896 l2 = gen_new_label();
6897 l3 = gen_new_label();
6898 b &= 3;
6899 switch(b) {
6900 case 0: /* loopnz */
6901 case 1: /* loopz */
6902 gen_op_add_reg_im(s->aflag, R_ECX, -1);
6903 gen_op_jz_ecx(s->aflag, l3);
6904 gen_jcc1(s, (JCC_Z << 1) | (b ^ 1), l1);
6905 break;
6906 case 2: /* loop */
6907 gen_op_add_reg_im(s->aflag, R_ECX, -1);
6908 gen_op_jnz_ecx(s->aflag, l1);
6909 break;
6910 default:
6911 case 3: /* jcxz */
6912 gen_op_jz_ecx(s->aflag, l1);
6913 break;
6914 }
6915
6916 gen_set_label(l3);
6917 gen_jmp_im(next_eip);
6918 tcg_gen_br(l2);
6919
6920 gen_set_label(l1);
6921 gen_jmp_im(tval);
6922 gen_set_label(l2);
6923 gen_eob(s);
6924 }
6925 break;
6926 case 0x130: /* wrmsr */
6927 case 0x132: /* rdmsr */
6928 if (s->cpl != 0) {
6929 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6930 } else {
6931 gen_update_cc_op(s);
6932 gen_jmp_im(pc_start - s->cs_base);
6933 if (b & 2) {
6934 gen_helper_rdmsr(cpu_env);
6935 } else {
6936 gen_helper_wrmsr(cpu_env);
6937 }
6938 }
6939 break;
6940 case 0x131: /* rdtsc */
6941 gen_update_cc_op(s);
6942 gen_jmp_im(pc_start - s->cs_base);
6943 if (use_icount)
6944 gen_io_start();
6945 gen_helper_rdtsc(cpu_env);
6946 if (use_icount) {
6947 gen_io_end();
6948 gen_jmp(s, s->pc - s->cs_base);
6949 }
6950 break;
6951 case 0x133: /* rdpmc */
6952 gen_update_cc_op(s);
6953 gen_jmp_im(pc_start - s->cs_base);
6954 gen_helper_rdpmc(cpu_env);
6955 break;
6956 case 0x134: /* sysenter */
6957 /* For Intel SYSENTER is valid on 64-bit */
6958 if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
6959 goto illegal_op;
6960 if (!s->pe) {
6961 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6962 } else {
6963 gen_update_cc_op(s);
6964 gen_jmp_im(pc_start - s->cs_base);
6965 gen_helper_sysenter(cpu_env);
6966 gen_eob(s);
6967 }
6968 break;
6969 case 0x135: /* sysexit */
6970 /* For Intel SYSEXIT is valid on 64-bit */
6971 if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
6972 goto illegal_op;
6973 if (!s->pe) {
6974 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6975 } else {
6976 gen_update_cc_op(s);
6977 gen_jmp_im(pc_start - s->cs_base);
6978 gen_helper_sysexit(cpu_env, tcg_const_i32(dflag));
6979 gen_eob(s);
6980 }
6981 break;
6982 #ifdef TARGET_X86_64
6983 case 0x105: /* syscall */
6984 /* XXX: is it usable in real mode ? */
6985 gen_update_cc_op(s);
6986 gen_jmp_im(pc_start - s->cs_base);
6987 gen_helper_syscall(cpu_env, tcg_const_i32(s->pc - pc_start));
6988 gen_eob(s);
6989 break;
6990 case 0x107: /* sysret */
6991 if (!s->pe) {
6992 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6993 } else {
6994 gen_update_cc_op(s);
6995 gen_jmp_im(pc_start - s->cs_base);
6996 gen_helper_sysret(cpu_env, tcg_const_i32(s->dflag));
6997 /* condition codes are modified only in long mode */
6998 if (s->lma) {
6999 set_cc_op(s, CC_OP_EFLAGS);
7000 }
7001 gen_eob(s);
7002 }
7003 break;
7004 #endif
7005 case 0x1a2: /* cpuid */
7006 gen_update_cc_op(s);
7007 gen_jmp_im(pc_start - s->cs_base);
7008 gen_helper_cpuid(cpu_env);
7009 break;
7010 case 0xf4: /* hlt */
7011 if (s->cpl != 0) {
7012 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7013 } else {
7014 gen_update_cc_op(s);
7015 gen_jmp_im(pc_start - s->cs_base);
7016 gen_helper_hlt(cpu_env, tcg_const_i32(s->pc - pc_start));
7017 s->is_jmp = DISAS_TB_JUMP;
7018 }
7019 break;
7020 case 0x100:
7021 modrm = cpu_ldub_code(env, s->pc++);
7022 mod = (modrm >> 6) & 3;
7023 op = (modrm >> 3) & 7;
7024 switch(op) {
7025 case 0: /* sldt */
7026 if (!s->pe || s->vm86)
7027 goto illegal_op;
7028 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ);
7029 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,ldt.selector));
7030 ot = OT_WORD;
7031 if (mod == 3)
7032 ot += s->dflag;
7033 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
7034 break;
7035 case 2: /* lldt */
7036 if (!s->pe || s->vm86)
7037 goto illegal_op;
7038 if (s->cpl != 0) {
7039 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7040 } else {
7041 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE);
7042 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
7043 gen_jmp_im(pc_start - s->cs_base);
7044 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
7045 gen_helper_lldt(cpu_env, cpu_tmp2_i32);
7046 }
7047 break;
7048 case 1: /* str */
7049 if (!s->pe || s->vm86)
7050 goto illegal_op;
7051 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ);
7052 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,tr.selector));
7053 ot = OT_WORD;
7054 if (mod == 3)
7055 ot += s->dflag;
7056 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
7057 break;
7058 case 3: /* ltr */
7059 if (!s->pe || s->vm86)
7060 goto illegal_op;
7061 if (s->cpl != 0) {
7062 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7063 } else {
7064 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE);
7065 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
7066 gen_jmp_im(pc_start - s->cs_base);
7067 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
7068 gen_helper_ltr(cpu_env, cpu_tmp2_i32);
7069 }
7070 break;
7071 case 4: /* verr */
7072 case 5: /* verw */
7073 if (!s->pe || s->vm86)
7074 goto illegal_op;
7075 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
7076 gen_update_cc_op(s);
7077 if (op == 4) {
7078 gen_helper_verr(cpu_env, cpu_T[0]);
7079 } else {
7080 gen_helper_verw(cpu_env, cpu_T[0]);
7081 }
7082 set_cc_op(s, CC_OP_EFLAGS);
7083 break;
7084 default:
7085 goto illegal_op;
7086 }
7087 break;
7088 case 0x101:
7089 modrm = cpu_ldub_code(env, s->pc++);
7090 mod = (modrm >> 6) & 3;
7091 op = (modrm >> 3) & 7;
7092 rm = modrm & 7;
7093 switch(op) {
7094 case 0: /* sgdt */
7095 if (mod == 3)
7096 goto illegal_op;
7097 gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ);
7098 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7099 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.limit));
7100 gen_op_st_T0_A0(OT_WORD + s->mem_index);
7101 gen_add_A0_im(s, 2);
7102 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.base));
7103 if (!s->dflag)
7104 gen_op_andl_T0_im(0xffffff);
7105 gen_op_st_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
7106 break;
7107 case 1:
7108 if (mod == 3) {
7109 switch (rm) {
7110 case 0: /* monitor */
7111 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
7112 s->cpl != 0)
7113 goto illegal_op;
7114 gen_update_cc_op(s);
7115 gen_jmp_im(pc_start - s->cs_base);
7116 #ifdef TARGET_X86_64
7117 if (s->aflag == 2) {
7118 gen_op_movq_A0_reg(R_EAX);
7119 } else
7120 #endif
7121 {
7122 gen_op_movl_A0_reg(R_EAX);
7123 if (s->aflag == 0)
7124 gen_op_andl_A0_ffff();
7125 }
7126 gen_add_A0_ds_seg(s);
7127 gen_helper_monitor(cpu_env, cpu_A0);
7128 break;
7129 case 1: /* mwait */
7130 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
7131 s->cpl != 0)
7132 goto illegal_op;
7133 gen_update_cc_op(s);
7134 gen_jmp_im(pc_start - s->cs_base);
7135 gen_helper_mwait(cpu_env, tcg_const_i32(s->pc - pc_start));
7136 gen_eob(s);
7137 break;
7138 case 2: /* clac */
7139 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) ||
7140 s->cpl != 0) {
7141 goto illegal_op;
7142 }
7143 gen_helper_clac(cpu_env);
7144 gen_jmp_im(s->pc - s->cs_base);
7145 gen_eob(s);
7146 break;
7147 case 3: /* stac */
7148 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) ||
7149 s->cpl != 0) {
7150 goto illegal_op;
7151 }
7152 gen_helper_stac(cpu_env);
7153 gen_jmp_im(s->pc - s->cs_base);
7154 gen_eob(s);
7155 break;
7156 default:
7157 goto illegal_op;
7158 }
7159 } else { /* sidt */
7160 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ);
7161 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7162 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.limit));
7163 gen_op_st_T0_A0(OT_WORD + s->mem_index);
7164 gen_add_A0_im(s, 2);
7165 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.base));
7166 if (!s->dflag)
7167 gen_op_andl_T0_im(0xffffff);
7168 gen_op_st_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
7169 }
7170 break;
7171 case 2: /* lgdt */
7172 case 3: /* lidt */
7173 if (mod == 3) {
7174 gen_update_cc_op(s);
7175 gen_jmp_im(pc_start - s->cs_base);
7176 switch(rm) {
7177 case 0: /* VMRUN */
7178 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7179 goto illegal_op;
7180 if (s->cpl != 0) {
7181 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7182 break;
7183 } else {
7184 gen_helper_vmrun(cpu_env, tcg_const_i32(s->aflag),
7185 tcg_const_i32(s->pc - pc_start));
7186 tcg_gen_exit_tb(0);
7187 s->is_jmp = DISAS_TB_JUMP;
7188 }
7189 break;
7190 case 1: /* VMMCALL */
7191 if (!(s->flags & HF_SVME_MASK))
7192 goto illegal_op;
7193 gen_helper_vmmcall(cpu_env);
7194 break;
7195 case 2: /* VMLOAD */
7196 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7197 goto illegal_op;
7198 if (s->cpl != 0) {
7199 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7200 break;
7201 } else {
7202 gen_helper_vmload(cpu_env, tcg_const_i32(s->aflag));
7203 }
7204 break;
7205 case 3: /* VMSAVE */
7206 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7207 goto illegal_op;
7208 if (s->cpl != 0) {
7209 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7210 break;
7211 } else {
7212 gen_helper_vmsave(cpu_env, tcg_const_i32(s->aflag));
7213 }
7214 break;
7215 case 4: /* STGI */
7216 if ((!(s->flags & HF_SVME_MASK) &&
7217 !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
7218 !s->pe)
7219 goto illegal_op;
7220 if (s->cpl != 0) {
7221 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7222 break;
7223 } else {
7224 gen_helper_stgi(cpu_env);
7225 }
7226 break;
7227 case 5: /* CLGI */
7228 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7229 goto illegal_op;
7230 if (s->cpl != 0) {
7231 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7232 break;
7233 } else {
7234 gen_helper_clgi(cpu_env);
7235 }
7236 break;
7237 case 6: /* SKINIT */
7238 if ((!(s->flags & HF_SVME_MASK) &&
7239 !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
7240 !s->pe)
7241 goto illegal_op;
7242 gen_helper_skinit(cpu_env);
7243 break;
7244 case 7: /* INVLPGA */
7245 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7246 goto illegal_op;
7247 if (s->cpl != 0) {
7248 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7249 break;
7250 } else {
7251 gen_helper_invlpga(cpu_env, tcg_const_i32(s->aflag));
7252 }
7253 break;
7254 default:
7255 goto illegal_op;
7256 }
7257 } else if (s->cpl != 0) {
7258 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7259 } else {
7260 gen_svm_check_intercept(s, pc_start,
7261 op==2 ? SVM_EXIT_GDTR_WRITE : SVM_EXIT_IDTR_WRITE);
7262 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7263 gen_op_ld_T1_A0(OT_WORD + s->mem_index);
7264 gen_add_A0_im(s, 2);
7265 gen_op_ld_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
7266 if (!s->dflag)
7267 gen_op_andl_T0_im(0xffffff);
7268 if (op == 2) {
7269 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,gdt.base));
7270 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,gdt.limit));
7271 } else {
7272 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,idt.base));
7273 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,idt.limit));
7274 }
7275 }
7276 break;
7277 case 4: /* smsw */
7278 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0);
7279 #if defined TARGET_X86_64 && defined HOST_WORDS_BIGENDIAN
7280 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]) + 4);
7281 #else
7282 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]));
7283 #endif
7284 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 1);
7285 break;
7286 case 6: /* lmsw */
7287 if (s->cpl != 0) {
7288 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7289 } else {
7290 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
7291 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
7292 gen_helper_lmsw(cpu_env, cpu_T[0]);
7293 gen_jmp_im(s->pc - s->cs_base);
7294 gen_eob(s);
7295 }
7296 break;
7297 case 7:
7298 if (mod != 3) { /* invlpg */
7299 if (s->cpl != 0) {
7300 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7301 } else {
7302 gen_update_cc_op(s);
7303 gen_jmp_im(pc_start - s->cs_base);
7304 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7305 gen_helper_invlpg(cpu_env, cpu_A0);
7306 gen_jmp_im(s->pc - s->cs_base);
7307 gen_eob(s);
7308 }
7309 } else {
7310 switch (rm) {
7311 case 0: /* swapgs */
7312 #ifdef TARGET_X86_64
7313 if (CODE64(s)) {
7314 if (s->cpl != 0) {
7315 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7316 } else {
7317 tcg_gen_ld_tl(cpu_T[0], cpu_env,
7318 offsetof(CPUX86State,segs[R_GS].base));
7319 tcg_gen_ld_tl(cpu_T[1], cpu_env,
7320 offsetof(CPUX86State,kernelgsbase));
7321 tcg_gen_st_tl(cpu_T[1], cpu_env,
7322 offsetof(CPUX86State,segs[R_GS].base));
7323 tcg_gen_st_tl(cpu_T[0], cpu_env,
7324 offsetof(CPUX86State,kernelgsbase));
7325 }
7326 } else
7327 #endif
7328 {
7329 goto illegal_op;
7330 }
7331 break;
7332 case 1: /* rdtscp */
7333 if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP))
7334 goto illegal_op;
7335 gen_update_cc_op(s);
7336 gen_jmp_im(pc_start - s->cs_base);
7337 if (use_icount)
7338 gen_io_start();
7339 gen_helper_rdtscp(cpu_env);
7340 if (use_icount) {
7341 gen_io_end();
7342 gen_jmp(s, s->pc - s->cs_base);
7343 }
7344 break;
7345 default:
7346 goto illegal_op;
7347 }
7348 }
7349 break;
7350 default:
7351 goto illegal_op;
7352 }
7353 break;
7354 case 0x108: /* invd */
7355 case 0x109: /* wbinvd */
7356 if (s->cpl != 0) {
7357 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7358 } else {
7359 gen_svm_check_intercept(s, pc_start, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD);
7360 /* nothing to do */
7361 }
7362 break;
7363 case 0x63: /* arpl or movslS (x86_64) */
7364 #ifdef TARGET_X86_64
7365 if (CODE64(s)) {
7366 int d_ot;
7367 /* d_ot is the size of destination */
7368 d_ot = dflag + OT_WORD;
7369
7370 modrm = cpu_ldub_code(env, s->pc++);
7371 reg = ((modrm >> 3) & 7) | rex_r;
7372 mod = (modrm >> 6) & 3;
7373 rm = (modrm & 7) | REX_B(s);
7374
7375 if (mod == 3) {
7376 gen_op_mov_TN_reg(OT_LONG, 0, rm);
7377 /* sign extend */
7378 if (d_ot == OT_QUAD)
7379 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
7380 gen_op_mov_reg_T0(d_ot, reg);
7381 } else {
7382 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7383 if (d_ot == OT_QUAD) {
7384 gen_op_lds_T0_A0(OT_LONG + s->mem_index);
7385 } else {
7386 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
7387 }
7388 gen_op_mov_reg_T0(d_ot, reg);
7389 }
7390 } else
7391 #endif
7392 {
7393 int label1;
7394 TCGv t0, t1, t2, a0;
7395
7396 if (!s->pe || s->vm86)
7397 goto illegal_op;
7398 t0 = tcg_temp_local_new();
7399 t1 = tcg_temp_local_new();
7400 t2 = tcg_temp_local_new();
7401 ot = OT_WORD;
7402 modrm = cpu_ldub_code(env, s->pc++);
7403 reg = (modrm >> 3) & 7;
7404 mod = (modrm >> 6) & 3;
7405 rm = modrm & 7;
7406 if (mod != 3) {
7407 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7408 gen_op_ld_v(ot + s->mem_index, t0, cpu_A0);
7409 a0 = tcg_temp_local_new();
7410 tcg_gen_mov_tl(a0, cpu_A0);
7411 } else {
7412 gen_op_mov_v_reg(ot, t0, rm);
7413 TCGV_UNUSED(a0);
7414 }
7415 gen_op_mov_v_reg(ot, t1, reg);
7416 tcg_gen_andi_tl(cpu_tmp0, t0, 3);
7417 tcg_gen_andi_tl(t1, t1, 3);
7418 tcg_gen_movi_tl(t2, 0);
7419 label1 = gen_new_label();
7420 tcg_gen_brcond_tl(TCG_COND_GE, cpu_tmp0, t1, label1);
7421 tcg_gen_andi_tl(t0, t0, ~3);
7422 tcg_gen_or_tl(t0, t0, t1);
7423 tcg_gen_movi_tl(t2, CC_Z);
7424 gen_set_label(label1);
7425 if (mod != 3) {
7426 gen_op_st_v(ot + s->mem_index, t0, a0);
7427 tcg_temp_free(a0);
7428 } else {
7429 gen_op_mov_reg_v(ot, rm, t0);
7430 }
7431 gen_compute_eflags(s);
7432 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z);
7433 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2);
7434 tcg_temp_free(t0);
7435 tcg_temp_free(t1);
7436 tcg_temp_free(t2);
7437 }
7438 break;
7439 case 0x102: /* lar */
7440 case 0x103: /* lsl */
7441 {
7442 int label1;
7443 TCGv t0;
7444 if (!s->pe || s->vm86)
7445 goto illegal_op;
7446 ot = dflag ? OT_LONG : OT_WORD;
7447 modrm = cpu_ldub_code(env, s->pc++);
7448 reg = ((modrm >> 3) & 7) | rex_r;
7449 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
7450 t0 = tcg_temp_local_new();
7451 gen_update_cc_op(s);
7452 if (b == 0x102) {
7453 gen_helper_lar(t0, cpu_env, cpu_T[0]);
7454 } else {
7455 gen_helper_lsl(t0, cpu_env, cpu_T[0]);
7456 }
7457 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_src, CC_Z);
7458 label1 = gen_new_label();
7459 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
7460 gen_op_mov_reg_v(ot, reg, t0);
7461 gen_set_label(label1);
7462 set_cc_op(s, CC_OP_EFLAGS);
7463 tcg_temp_free(t0);
7464 }
7465 break;
7466 case 0x118:
7467 modrm = cpu_ldub_code(env, s->pc++);
7468 mod = (modrm >> 6) & 3;
7469 op = (modrm >> 3) & 7;
7470 switch(op) {
7471 case 0: /* prefetchnta */
7472 case 1: /* prefetchnt0 */
7473 case 2: /* prefetchnt0 */
7474 case 3: /* prefetchnt0 */
7475 if (mod == 3)
7476 goto illegal_op;
7477 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7478 /* nothing more to do */
7479 break;
7480 default: /* nop (multi byte) */
7481 gen_nop_modrm(env, s, modrm);
7482 break;
7483 }
7484 break;
7485 case 0x119 ... 0x11f: /* nop (multi byte) */
7486 modrm = cpu_ldub_code(env, s->pc++);
7487 gen_nop_modrm(env, s, modrm);
7488 break;
7489 case 0x120: /* mov reg, crN */
7490 case 0x122: /* mov crN, reg */
7491 if (s->cpl != 0) {
7492 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7493 } else {
7494 modrm = cpu_ldub_code(env, s->pc++);
7495 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7496 * AMD documentation (24594.pdf) and testing of
7497 * intel 386 and 486 processors all show that the mod bits
7498 * are assumed to be 1's, regardless of actual values.
7499 */
7500 rm = (modrm & 7) | REX_B(s);
7501 reg = ((modrm >> 3) & 7) | rex_r;
7502 if (CODE64(s))
7503 ot = OT_QUAD;
7504 else
7505 ot = OT_LONG;
7506 if ((prefixes & PREFIX_LOCK) && (reg == 0) &&
7507 (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) {
7508 reg = 8;
7509 }
7510 switch(reg) {
7511 case 0:
7512 case 2:
7513 case 3:
7514 case 4:
7515 case 8:
7516 gen_update_cc_op(s);
7517 gen_jmp_im(pc_start - s->cs_base);
7518 if (b & 2) {
7519 gen_op_mov_TN_reg(ot, 0, rm);
7520 gen_helper_write_crN(cpu_env, tcg_const_i32(reg),
7521 cpu_T[0]);
7522 gen_jmp_im(s->pc - s->cs_base);
7523 gen_eob(s);
7524 } else {
7525 gen_helper_read_crN(cpu_T[0], cpu_env, tcg_const_i32(reg));
7526 gen_op_mov_reg_T0(ot, rm);
7527 }
7528 break;
7529 default:
7530 goto illegal_op;
7531 }
7532 }
7533 break;
7534 case 0x121: /* mov reg, drN */
7535 case 0x123: /* mov drN, reg */
7536 if (s->cpl != 0) {
7537 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7538 } else {
7539 modrm = cpu_ldub_code(env, s->pc++);
7540 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7541 * AMD documentation (24594.pdf) and testing of
7542 * intel 386 and 486 processors all show that the mod bits
7543 * are assumed to be 1's, regardless of actual values.
7544 */
7545 rm = (modrm & 7) | REX_B(s);
7546 reg = ((modrm >> 3) & 7) | rex_r;
7547 if (CODE64(s))
7548 ot = OT_QUAD;
7549 else
7550 ot = OT_LONG;
7551 /* XXX: do it dynamically with CR4.DE bit */
7552 if (reg == 4 || reg == 5 || reg >= 8)
7553 goto illegal_op;
7554 if (b & 2) {
7555 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg);
7556 gen_op_mov_TN_reg(ot, 0, rm);
7557 gen_helper_movl_drN_T0(cpu_env, tcg_const_i32(reg), cpu_T[0]);
7558 gen_jmp_im(s->pc - s->cs_base);
7559 gen_eob(s);
7560 } else {
7561 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg);
7562 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,dr[reg]));
7563 gen_op_mov_reg_T0(ot, rm);
7564 }
7565 }
7566 break;
7567 case 0x106: /* clts */
7568 if (s->cpl != 0) {
7569 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7570 } else {
7571 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
7572 gen_helper_clts(cpu_env);
7573 /* abort block because static cpu state changed */
7574 gen_jmp_im(s->pc - s->cs_base);
7575 gen_eob(s);
7576 }
7577 break;
7578 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
7579 case 0x1c3: /* MOVNTI reg, mem */
7580 if (!(s->cpuid_features & CPUID_SSE2))
7581 goto illegal_op;
7582 ot = s->dflag == 2 ? OT_QUAD : OT_LONG;
7583 modrm = cpu_ldub_code(env, s->pc++);
7584 mod = (modrm >> 6) & 3;
7585 if (mod == 3)
7586 goto illegal_op;
7587 reg = ((modrm >> 3) & 7) | rex_r;
7588 /* generate a generic store */
7589 gen_ldst_modrm(env, s, modrm, ot, reg, 1);
7590 break;
7591 case 0x1ae:
7592 modrm = cpu_ldub_code(env, s->pc++);
7593 mod = (modrm >> 6) & 3;
7594 op = (modrm >> 3) & 7;
7595 switch(op) {
7596 case 0: /* fxsave */
7597 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
7598 (s->prefix & PREFIX_LOCK))
7599 goto illegal_op;
7600 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
7601 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7602 break;
7603 }
7604 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7605 gen_update_cc_op(s);
7606 gen_jmp_im(pc_start - s->cs_base);
7607 gen_helper_fxsave(cpu_env, cpu_A0, tcg_const_i32((s->dflag == 2)));
7608 break;
7609 case 1: /* fxrstor */
7610 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
7611 (s->prefix & PREFIX_LOCK))
7612 goto illegal_op;
7613 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
7614 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7615 break;
7616 }
7617 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7618 gen_update_cc_op(s);
7619 gen_jmp_im(pc_start - s->cs_base);
7620 gen_helper_fxrstor(cpu_env, cpu_A0,
7621 tcg_const_i32((s->dflag == 2)));
7622 break;
7623 case 2: /* ldmxcsr */
7624 case 3: /* stmxcsr */
7625 if (s->flags & HF_TS_MASK) {
7626 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7627 break;
7628 }
7629 if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK) ||
7630 mod == 3)
7631 goto illegal_op;
7632 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7633 if (op == 2) {
7634 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
7635 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
7636 gen_helper_ldmxcsr(cpu_env, cpu_tmp2_i32);
7637 } else {
7638 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, mxcsr));
7639 gen_op_st_T0_A0(OT_LONG + s->mem_index);
7640 }
7641 break;
7642 case 5: /* lfence */
7643 case 6: /* mfence */
7644 if ((modrm & 0xc7) != 0xc0 || !(s->cpuid_features & CPUID_SSE2))
7645 goto illegal_op;
7646 break;
7647 case 7: /* sfence / clflush */
7648 if ((modrm & 0xc7) == 0xc0) {
7649 /* sfence */
7650 /* XXX: also check for cpuid_ext2_features & CPUID_EXT2_EMMX */
7651 if (!(s->cpuid_features & CPUID_SSE))
7652 goto illegal_op;
7653 } else {
7654 /* clflush */
7655 if (!(s->cpuid_features & CPUID_CLFLUSH))
7656 goto illegal_op;
7657 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7658 }
7659 break;
7660 default:
7661 goto illegal_op;
7662 }
7663 break;
7664 case 0x10d: /* 3DNow! prefetch(w) */
7665 modrm = cpu_ldub_code(env, s->pc++);
7666 mod = (modrm >> 6) & 3;
7667 if (mod == 3)
7668 goto illegal_op;
7669 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7670 /* ignore for now */
7671 break;
7672 case 0x1aa: /* rsm */
7673 gen_svm_check_intercept(s, pc_start, SVM_EXIT_RSM);
7674 if (!(s->flags & HF_SMM_MASK))
7675 goto illegal_op;
7676 gen_update_cc_op(s);
7677 gen_jmp_im(s->pc - s->cs_base);
7678 gen_helper_rsm(cpu_env);
7679 gen_eob(s);
7680 break;
7681 case 0x1b8: /* SSE4.2 popcnt */
7682 if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) !=
7683 PREFIX_REPZ)
7684 goto illegal_op;
7685 if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
7686 goto illegal_op;
7687
7688 modrm = cpu_ldub_code(env, s->pc++);
7689 reg = ((modrm >> 3) & 7) | rex_r;
7690
7691 if (s->prefix & PREFIX_DATA)
7692 ot = OT_WORD;
7693 else if (s->dflag != 2)
7694 ot = OT_LONG;
7695 else
7696 ot = OT_QUAD;
7697
7698 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
7699 gen_helper_popcnt(cpu_T[0], cpu_env, cpu_T[0], tcg_const_i32(ot));
7700 gen_op_mov_reg_T0(ot, reg);
7701
7702 set_cc_op(s, CC_OP_EFLAGS);
7703 break;
7704 case 0x10e ... 0x10f:
7705 /* 3DNow! instructions, ignore prefixes */
7706 s->prefix &= ~(PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA);
7707 case 0x110 ... 0x117:
7708 case 0x128 ... 0x12f:
7709 case 0x138 ... 0x13a:
7710 case 0x150 ... 0x179:
7711 case 0x17c ... 0x17f:
7712 case 0x1c2:
7713 case 0x1c4 ... 0x1c6:
7714 case 0x1d0 ... 0x1fe:
7715 gen_sse(env, s, b, pc_start, rex_r);
7716 break;
7717 default:
7718 goto illegal_op;
7719 }
7720 /* lock generation */
7721 if (s->prefix & PREFIX_LOCK)
7722 gen_helper_unlock();
7723 return s->pc;
7724 illegal_op:
7725 if (s->prefix & PREFIX_LOCK)
7726 gen_helper_unlock();
7727 /* XXX: ensure that no lock was generated */
7728 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
7729 return s->pc;
7730 }
7731
7732 void optimize_flags_init(void)
7733 {
7734 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
7735 cpu_cc_op = tcg_global_mem_new_i32(TCG_AREG0,
7736 offsetof(CPUX86State, cc_op), "cc_op");
7737 cpu_cc_dst = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_dst),
7738 "cc_dst");
7739 cpu_cc_src = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_src),
7740 "cc_src");
7741 cpu_cc_src2 = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_src2),
7742 "cc_src2");
7743
7744 #ifdef TARGET_X86_64
7745 cpu_regs[R_EAX] = tcg_global_mem_new_i64(TCG_AREG0,
7746 offsetof(CPUX86State, regs[R_EAX]), "rax");
7747 cpu_regs[R_ECX] = tcg_global_mem_new_i64(TCG_AREG0,
7748 offsetof(CPUX86State, regs[R_ECX]), "rcx");
7749 cpu_regs[R_EDX] = tcg_global_mem_new_i64(TCG_AREG0,
7750 offsetof(CPUX86State, regs[R_EDX]), "rdx");
7751 cpu_regs[R_EBX] = tcg_global_mem_new_i64(TCG_AREG0,
7752 offsetof(CPUX86State, regs[R_EBX]), "rbx");
7753 cpu_regs[R_ESP] = tcg_global_mem_new_i64(TCG_AREG0,
7754 offsetof(CPUX86State, regs[R_ESP]), "rsp");
7755 cpu_regs[R_EBP] = tcg_global_mem_new_i64(TCG_AREG0,
7756 offsetof(CPUX86State, regs[R_EBP]), "rbp");
7757 cpu_regs[R_ESI] = tcg_global_mem_new_i64(TCG_AREG0,
7758 offsetof(CPUX86State, regs[R_ESI]), "rsi");
7759 cpu_regs[R_EDI] = tcg_global_mem_new_i64(TCG_AREG0,
7760 offsetof(CPUX86State, regs[R_EDI]), "rdi");
7761 cpu_regs[8] = tcg_global_mem_new_i64(TCG_AREG0,
7762 offsetof(CPUX86State, regs[8]), "r8");
7763 cpu_regs[9] = tcg_global_mem_new_i64(TCG_AREG0,
7764 offsetof(CPUX86State, regs[9]), "r9");
7765 cpu_regs[10] = tcg_global_mem_new_i64(TCG_AREG0,
7766 offsetof(CPUX86State, regs[10]), "r10");
7767 cpu_regs[11] = tcg_global_mem_new_i64(TCG_AREG0,
7768 offsetof(CPUX86State, regs[11]), "r11");
7769 cpu_regs[12] = tcg_global_mem_new_i64(TCG_AREG0,
7770 offsetof(CPUX86State, regs[12]), "r12");
7771 cpu_regs[13] = tcg_global_mem_new_i64(TCG_AREG0,
7772 offsetof(CPUX86State, regs[13]), "r13");
7773 cpu_regs[14] = tcg_global_mem_new_i64(TCG_AREG0,
7774 offsetof(CPUX86State, regs[14]), "r14");
7775 cpu_regs[15] = tcg_global_mem_new_i64(TCG_AREG0,
7776 offsetof(CPUX86State, regs[15]), "r15");
7777 #else
7778 cpu_regs[R_EAX] = tcg_global_mem_new_i32(TCG_AREG0,
7779 offsetof(CPUX86State, regs[R_EAX]), "eax");
7780 cpu_regs[R_ECX] = tcg_global_mem_new_i32(TCG_AREG0,
7781 offsetof(CPUX86State, regs[R_ECX]), "ecx");
7782 cpu_regs[R_EDX] = tcg_global_mem_new_i32(TCG_AREG0,
7783 offsetof(CPUX86State, regs[R_EDX]), "edx");
7784 cpu_regs[R_EBX] = tcg_global_mem_new_i32(TCG_AREG0,
7785 offsetof(CPUX86State, regs[R_EBX]), "ebx");
7786 cpu_regs[R_ESP] = tcg_global_mem_new_i32(TCG_AREG0,
7787 offsetof(CPUX86State, regs[R_ESP]), "esp");
7788 cpu_regs[R_EBP] = tcg_global_mem_new_i32(TCG_AREG0,
7789 offsetof(CPUX86State, regs[R_EBP]), "ebp");
7790 cpu_regs[R_ESI] = tcg_global_mem_new_i32(TCG_AREG0,
7791 offsetof(CPUX86State, regs[R_ESI]), "esi");
7792 cpu_regs[R_EDI] = tcg_global_mem_new_i32(TCG_AREG0,
7793 offsetof(CPUX86State, regs[R_EDI]), "edi");
7794 #endif
7795
7796 /* register helpers */
7797 #define GEN_HELPER 2
7798 #include "helper.h"
7799 }
7800
7801 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
7802 basic block 'tb'. If search_pc is TRUE, also generate PC
7803 information for each intermediate instruction. */
7804 static inline void gen_intermediate_code_internal(CPUX86State *env,
7805 TranslationBlock *tb,
7806 int search_pc)
7807 {
7808 DisasContext dc1, *dc = &dc1;
7809 target_ulong pc_ptr;
7810 uint16_t *gen_opc_end;
7811 CPUBreakpoint *bp;
7812 int j, lj;
7813 uint64_t flags;
7814 target_ulong pc_start;
7815 target_ulong cs_base;
7816 int num_insns;
7817 int max_insns;
7818
7819 /* generate intermediate code */
7820 pc_start = tb->pc;
7821 cs_base = tb->cs_base;
7822 flags = tb->flags;
7823
7824 dc->pe = (flags >> HF_PE_SHIFT) & 1;
7825 dc->code32 = (flags >> HF_CS32_SHIFT) & 1;
7826 dc->ss32 = (flags >> HF_SS32_SHIFT) & 1;
7827 dc->addseg = (flags >> HF_ADDSEG_SHIFT) & 1;
7828 dc->f_st = 0;
7829 dc->vm86 = (flags >> VM_SHIFT) & 1;
7830 dc->cpl = (flags >> HF_CPL_SHIFT) & 3;
7831 dc->iopl = (flags >> IOPL_SHIFT) & 3;
7832 dc->tf = (flags >> TF_SHIFT) & 1;
7833 dc->singlestep_enabled = env->singlestep_enabled;
7834 dc->cc_op = CC_OP_DYNAMIC;
7835 dc->cc_op_dirty = false;
7836 dc->cs_base = cs_base;
7837 dc->tb = tb;
7838 dc->popl_esp_hack = 0;
7839 /* select memory access functions */
7840 dc->mem_index = 0;
7841 if (flags & HF_SOFTMMU_MASK) {
7842 dc->mem_index = (cpu_mmu_index(env) + 1) << 2;
7843 }
7844 dc->cpuid_features = env->cpuid_features;
7845 dc->cpuid_ext_features = env->cpuid_ext_features;
7846 dc->cpuid_ext2_features = env->cpuid_ext2_features;
7847 dc->cpuid_ext3_features = env->cpuid_ext3_features;
7848 dc->cpuid_7_0_ebx_features = env->cpuid_7_0_ebx_features;
7849 #ifdef TARGET_X86_64
7850 dc->lma = (flags >> HF_LMA_SHIFT) & 1;
7851 dc->code64 = (flags >> HF_CS64_SHIFT) & 1;
7852 #endif
7853 dc->flags = flags;
7854 dc->jmp_opt = !(dc->tf || env->singlestep_enabled ||
7855 (flags & HF_INHIBIT_IRQ_MASK)
7856 #ifndef CONFIG_SOFTMMU
7857 || (flags & HF_SOFTMMU_MASK)
7858 #endif
7859 );
7860 #if 0
7861 /* check addseg logic */
7862 if (!dc->addseg && (dc->vm86 || !dc->pe || !dc->code32))
7863 printf("ERROR addseg\n");
7864 #endif
7865
7866 cpu_T[0] = tcg_temp_new();
7867 cpu_T[1] = tcg_temp_new();
7868 cpu_A0 = tcg_temp_new();
7869
7870 cpu_tmp0 = tcg_temp_new();
7871 cpu_tmp1_i64 = tcg_temp_new_i64();
7872 cpu_tmp2_i32 = tcg_temp_new_i32();
7873 cpu_tmp3_i32 = tcg_temp_new_i32();
7874 cpu_tmp4 = tcg_temp_new();
7875 cpu_tmp5 = tcg_temp_new();
7876 cpu_ptr0 = tcg_temp_new_ptr();
7877 cpu_ptr1 = tcg_temp_new_ptr();
7878 cpu_cc_srcT = tcg_temp_local_new();
7879
7880 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
7881
7882 dc->is_jmp = DISAS_NEXT;
7883 pc_ptr = pc_start;
7884 lj = -1;
7885 num_insns = 0;
7886 max_insns = tb->cflags & CF_COUNT_MASK;
7887 if (max_insns == 0)
7888 max_insns = CF_COUNT_MASK;
7889
7890 gen_icount_start();
7891 for(;;) {
7892 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
7893 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
7894 if (bp->pc == pc_ptr &&
7895 !((bp->flags & BP_CPU) && (tb->flags & HF_RF_MASK))) {
7896 gen_debug(dc, pc_ptr - dc->cs_base);
7897 break;
7898 }
7899 }
7900 }
7901 if (search_pc) {
7902 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
7903 if (lj < j) {
7904 lj++;
7905 while (lj < j)
7906 tcg_ctx.gen_opc_instr_start[lj++] = 0;
7907 }
7908 tcg_ctx.gen_opc_pc[lj] = pc_ptr;
7909 gen_opc_cc_op[lj] = dc->cc_op;
7910 tcg_ctx.gen_opc_instr_start[lj] = 1;
7911 tcg_ctx.gen_opc_icount[lj] = num_insns;
7912 }
7913 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
7914 gen_io_start();
7915
7916 pc_ptr = disas_insn(env, dc, pc_ptr);
7917 num_insns++;
7918 /* stop translation if indicated */
7919 if (dc->is_jmp)
7920 break;
7921 /* if single step mode, we generate only one instruction and
7922 generate an exception */
7923 /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
7924 the flag and abort the translation to give the irqs a
7925 change to be happen */
7926 if (dc->tf || dc->singlestep_enabled ||
7927 (flags & HF_INHIBIT_IRQ_MASK)) {
7928 gen_jmp_im(pc_ptr - dc->cs_base);
7929 gen_eob(dc);
7930 break;
7931 }
7932 /* if too long translation, stop generation too */
7933 if (tcg_ctx.gen_opc_ptr >= gen_opc_end ||
7934 (pc_ptr - pc_start) >= (TARGET_PAGE_SIZE - 32) ||
7935 num_insns >= max_insns) {
7936 gen_jmp_im(pc_ptr - dc->cs_base);
7937 gen_eob(dc);
7938 break;
7939 }
7940 if (singlestep) {
7941 gen_jmp_im(pc_ptr - dc->cs_base);
7942 gen_eob(dc);
7943 break;
7944 }
7945 }
7946 if (tb->cflags & CF_LAST_IO)
7947 gen_io_end();
7948 gen_icount_end(tb, num_insns);
7949 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
7950 /* we don't forget to fill the last values */
7951 if (search_pc) {
7952 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
7953 lj++;
7954 while (lj <= j)
7955 tcg_ctx.gen_opc_instr_start[lj++] = 0;
7956 }
7957
7958 #ifdef DEBUG_DISAS
7959 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
7960 int disas_flags;
7961 qemu_log("----------------\n");
7962 qemu_log("IN: %s\n", lookup_symbol(pc_start));
7963 #ifdef TARGET_X86_64
7964 if (dc->code64)
7965 disas_flags = 2;
7966 else
7967 #endif
7968 disas_flags = !dc->code32;
7969 log_target_disas(env, pc_start, pc_ptr - pc_start, disas_flags);
7970 qemu_log("\n");
7971 }
7972 #endif
7973
7974 if (!search_pc) {
7975 tb->size = pc_ptr - pc_start;
7976 tb->icount = num_insns;
7977 }
7978 }
7979
7980 void gen_intermediate_code(CPUX86State *env, TranslationBlock *tb)
7981 {
7982 gen_intermediate_code_internal(env, tb, 0);
7983 }
7984
7985 void gen_intermediate_code_pc(CPUX86State *env, TranslationBlock *tb)
7986 {
7987 gen_intermediate_code_internal(env, tb, 1);
7988 }
7989
7990 void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb, int pc_pos)
7991 {
7992 int cc_op;
7993 #ifdef DEBUG_DISAS
7994 if (qemu_loglevel_mask(CPU_LOG_TB_OP)) {
7995 int i;
7996 qemu_log("RESTORE:\n");
7997 for(i = 0;i <= pc_pos; i++) {
7998 if (tcg_ctx.gen_opc_instr_start[i]) {
7999 qemu_log("0x%04x: " TARGET_FMT_lx "\n", i,
8000 tcg_ctx.gen_opc_pc[i]);
8001 }
8002 }
8003 qemu_log("pc_pos=0x%x eip=" TARGET_FMT_lx " cs_base=%x\n",
8004 pc_pos, tcg_ctx.gen_opc_pc[pc_pos] - tb->cs_base,
8005 (uint32_t)tb->cs_base);
8006 }
8007 #endif
8008 env->eip = tcg_ctx.gen_opc_pc[pc_pos] - tb->cs_base;
8009 cc_op = gen_opc_cc_op[pc_pos];
8010 if (cc_op != CC_OP_DYNAMIC)
8011 env->cc_op = cc_op;
8012 }