]> git.proxmox.com Git - qemu.git/blob - target-i386/translate.c
31e344244290fd662f8c81405ba4007ccd38a4f2
[qemu.git] / target-i386 / translate.c
1 /*
2 * i386 translation
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include <stdarg.h>
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <string.h>
23 #include <inttypes.h>
24 #include <signal.h>
25
26 #include "qemu/host-utils.h"
27 #include "cpu.h"
28 #include "disas/disas.h"
29 #include "tcg-op.h"
30
31 #include "helper.h"
32 #define GEN_HELPER 1
33 #include "helper.h"
34
35 #define PREFIX_REPZ 0x01
36 #define PREFIX_REPNZ 0x02
37 #define PREFIX_LOCK 0x04
38 #define PREFIX_DATA 0x08
39 #define PREFIX_ADR 0x10
40
41 #ifdef TARGET_X86_64
42 #define CODE64(s) ((s)->code64)
43 #define REX_X(s) ((s)->rex_x)
44 #define REX_B(s) ((s)->rex_b)
45 #else
46 #define CODE64(s) 0
47 #define REX_X(s) 0
48 #define REX_B(s) 0
49 #endif
50
51 #ifdef TARGET_X86_64
52 # define ctztl ctz64
53 # define clztl clz64
54 #else
55 # define ctztl ctz32
56 # define clztl clz32
57 #endif
58
59 //#define MACRO_TEST 1
60
61 /* global register indexes */
62 static TCGv_ptr cpu_env;
63 static TCGv cpu_A0;
64 static TCGv cpu_cc_src, cpu_cc_dst, cpu_cc_srcT;
65 static TCGv_i32 cpu_cc_op;
66 static TCGv cpu_regs[CPU_NB_REGS];
67 /* local temps */
68 static TCGv cpu_T[2];
69 /* local register indexes (only used inside old micro ops) */
70 static TCGv cpu_tmp0, cpu_tmp4;
71 static TCGv_ptr cpu_ptr0, cpu_ptr1;
72 static TCGv_i32 cpu_tmp2_i32, cpu_tmp3_i32;
73 static TCGv_i64 cpu_tmp1_i64;
74 static TCGv cpu_tmp5;
75
76 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
77
78 #include "exec/gen-icount.h"
79
80 #ifdef TARGET_X86_64
81 static int x86_64_hregs;
82 #endif
83
84 typedef struct DisasContext {
85 /* current insn context */
86 int override; /* -1 if no override */
87 int prefix;
88 int aflag, dflag;
89 target_ulong pc; /* pc = eip + cs_base */
90 int is_jmp; /* 1 = means jump (stop translation), 2 means CPU
91 static state change (stop translation) */
92 /* current block context */
93 target_ulong cs_base; /* base of CS segment */
94 int pe; /* protected mode */
95 int code32; /* 32 bit code segment */
96 #ifdef TARGET_X86_64
97 int lma; /* long mode active */
98 int code64; /* 64 bit code segment */
99 int rex_x, rex_b;
100 #endif
101 int ss32; /* 32 bit stack segment */
102 CCOp cc_op; /* current CC operation */
103 bool cc_op_dirty;
104 int addseg; /* non zero if either DS/ES/SS have a non zero base */
105 int f_st; /* currently unused */
106 int vm86; /* vm86 mode */
107 int cpl;
108 int iopl;
109 int tf; /* TF cpu flag */
110 int singlestep_enabled; /* "hardware" single step enabled */
111 int jmp_opt; /* use direct block chaining for direct jumps */
112 int mem_index; /* select memory access functions */
113 uint64_t flags; /* all execution flags */
114 struct TranslationBlock *tb;
115 int popl_esp_hack; /* for correct popl with esp base handling */
116 int rip_offset; /* only used in x86_64, but left for simplicity */
117 int cpuid_features;
118 int cpuid_ext_features;
119 int cpuid_ext2_features;
120 int cpuid_ext3_features;
121 int cpuid_7_0_ebx_features;
122 } DisasContext;
123
124 static void gen_eob(DisasContext *s);
125 static void gen_jmp(DisasContext *s, target_ulong eip);
126 static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num);
127 static void gen_op(DisasContext *s1, int op, int ot, int d);
128
129 /* i386 arith/logic operations */
130 enum {
131 OP_ADDL,
132 OP_ORL,
133 OP_ADCL,
134 OP_SBBL,
135 OP_ANDL,
136 OP_SUBL,
137 OP_XORL,
138 OP_CMPL,
139 };
140
141 /* i386 shift ops */
142 enum {
143 OP_ROL,
144 OP_ROR,
145 OP_RCL,
146 OP_RCR,
147 OP_SHL,
148 OP_SHR,
149 OP_SHL1, /* undocumented */
150 OP_SAR = 7,
151 };
152
153 enum {
154 JCC_O,
155 JCC_B,
156 JCC_Z,
157 JCC_BE,
158 JCC_S,
159 JCC_P,
160 JCC_L,
161 JCC_LE,
162 };
163
164 /* operand size */
165 enum {
166 OT_BYTE = 0,
167 OT_WORD,
168 OT_LONG,
169 OT_QUAD,
170 };
171
172 enum {
173 /* I386 int registers */
174 OR_EAX, /* MUST be even numbered */
175 OR_ECX,
176 OR_EDX,
177 OR_EBX,
178 OR_ESP,
179 OR_EBP,
180 OR_ESI,
181 OR_EDI,
182
183 OR_TMP0 = 16, /* temporary operand register */
184 OR_TMP1,
185 OR_A0, /* temporary register used when doing address evaluation */
186 };
187
188 enum {
189 USES_CC_DST = 1,
190 USES_CC_SRC = 2,
191 USES_CC_SRCT = 4,
192 };
193
194 /* Bit set if the global variable is live after setting CC_OP to X. */
195 static const uint8_t cc_op_live[CC_OP_NB] = {
196 [CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC,
197 [CC_OP_EFLAGS] = USES_CC_SRC,
198 [CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC,
199 [CC_OP_ADDB ... CC_OP_ADDQ] = USES_CC_DST | USES_CC_SRC,
200 [CC_OP_ADCB ... CC_OP_ADCQ] = USES_CC_DST | USES_CC_SRC,
201 [CC_OP_SUBB ... CC_OP_SUBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT,
202 [CC_OP_SBBB ... CC_OP_SBBQ] = USES_CC_DST | USES_CC_SRC,
203 [CC_OP_LOGICB ... CC_OP_LOGICQ] = USES_CC_DST,
204 [CC_OP_INCB ... CC_OP_INCQ] = USES_CC_DST | USES_CC_SRC,
205 [CC_OP_DECB ... CC_OP_DECQ] = USES_CC_DST | USES_CC_SRC,
206 [CC_OP_SHLB ... CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC,
207 [CC_OP_SARB ... CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC,
208 };
209
210 static void set_cc_op(DisasContext *s, CCOp op)
211 {
212 int dead;
213
214 if (s->cc_op == op) {
215 return;
216 }
217
218 /* Discard CC computation that will no longer be used. */
219 dead = cc_op_live[s->cc_op] & ~cc_op_live[op];
220 if (dead & USES_CC_DST) {
221 tcg_gen_discard_tl(cpu_cc_dst);
222 }
223 if (dead & USES_CC_SRC) {
224 tcg_gen_discard_tl(cpu_cc_src);
225 }
226 if (dead & USES_CC_SRCT) {
227 tcg_gen_discard_tl(cpu_cc_srcT);
228 }
229
230 s->cc_op = op;
231 /* The DYNAMIC setting is translator only, and should never be
232 stored. Thus we always consider it clean. */
233 s->cc_op_dirty = (op != CC_OP_DYNAMIC);
234 }
235
236 static void gen_update_cc_op(DisasContext *s)
237 {
238 if (s->cc_op_dirty) {
239 tcg_gen_movi_i32(cpu_cc_op, s->cc_op);
240 s->cc_op_dirty = false;
241 }
242 }
243
244 static inline void gen_op_movl_T0_0(void)
245 {
246 tcg_gen_movi_tl(cpu_T[0], 0);
247 }
248
249 static inline void gen_op_movl_T0_im(int32_t val)
250 {
251 tcg_gen_movi_tl(cpu_T[0], val);
252 }
253
254 static inline void gen_op_movl_T0_imu(uint32_t val)
255 {
256 tcg_gen_movi_tl(cpu_T[0], val);
257 }
258
259 static inline void gen_op_movl_T1_im(int32_t val)
260 {
261 tcg_gen_movi_tl(cpu_T[1], val);
262 }
263
264 static inline void gen_op_movl_T1_imu(uint32_t val)
265 {
266 tcg_gen_movi_tl(cpu_T[1], val);
267 }
268
269 static inline void gen_op_movl_A0_im(uint32_t val)
270 {
271 tcg_gen_movi_tl(cpu_A0, val);
272 }
273
274 #ifdef TARGET_X86_64
275 static inline void gen_op_movq_A0_im(int64_t val)
276 {
277 tcg_gen_movi_tl(cpu_A0, val);
278 }
279 #endif
280
281 static inline void gen_movtl_T0_im(target_ulong val)
282 {
283 tcg_gen_movi_tl(cpu_T[0], val);
284 }
285
286 static inline void gen_movtl_T1_im(target_ulong val)
287 {
288 tcg_gen_movi_tl(cpu_T[1], val);
289 }
290
291 static inline void gen_op_andl_T0_ffff(void)
292 {
293 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
294 }
295
296 static inline void gen_op_andl_T0_im(uint32_t val)
297 {
298 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], val);
299 }
300
301 static inline void gen_op_movl_T0_T1(void)
302 {
303 tcg_gen_mov_tl(cpu_T[0], cpu_T[1]);
304 }
305
306 static inline void gen_op_andl_A0_ffff(void)
307 {
308 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffff);
309 }
310
311 #ifdef TARGET_X86_64
312
313 #define NB_OP_SIZES 4
314
315 #else /* !TARGET_X86_64 */
316
317 #define NB_OP_SIZES 3
318
319 #endif /* !TARGET_X86_64 */
320
321 #if defined(HOST_WORDS_BIGENDIAN)
322 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
323 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
324 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
325 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
326 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
327 #else
328 #define REG_B_OFFSET 0
329 #define REG_H_OFFSET 1
330 #define REG_W_OFFSET 0
331 #define REG_L_OFFSET 0
332 #define REG_LH_OFFSET 4
333 #endif
334
335 /* In instruction encodings for byte register accesses the
336 * register number usually indicates "low 8 bits of register N";
337 * however there are some special cases where N 4..7 indicates
338 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
339 * true for this special case, false otherwise.
340 */
341 static inline bool byte_reg_is_xH(int reg)
342 {
343 if (reg < 4) {
344 return false;
345 }
346 #ifdef TARGET_X86_64
347 if (reg >= 8 || x86_64_hregs) {
348 return false;
349 }
350 #endif
351 return true;
352 }
353
354 static inline void gen_op_mov_reg_v(int ot, int reg, TCGv t0)
355 {
356 switch(ot) {
357 case OT_BYTE:
358 if (!byte_reg_is_xH(reg)) {
359 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 8);
360 } else {
361 tcg_gen_deposit_tl(cpu_regs[reg - 4], cpu_regs[reg - 4], t0, 8, 8);
362 }
363 break;
364 case OT_WORD:
365 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 16);
366 break;
367 default: /* XXX this shouldn't be reached; abort? */
368 case OT_LONG:
369 /* For x86_64, this sets the higher half of register to zero.
370 For i386, this is equivalent to a mov. */
371 tcg_gen_ext32u_tl(cpu_regs[reg], t0);
372 break;
373 #ifdef TARGET_X86_64
374 case OT_QUAD:
375 tcg_gen_mov_tl(cpu_regs[reg], t0);
376 break;
377 #endif
378 }
379 }
380
381 static inline void gen_op_mov_reg_T0(int ot, int reg)
382 {
383 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
384 }
385
386 static inline void gen_op_mov_reg_T1(int ot, int reg)
387 {
388 gen_op_mov_reg_v(ot, reg, cpu_T[1]);
389 }
390
391 static inline void gen_op_mov_reg_A0(int size, int reg)
392 {
393 switch(size) {
394 case OT_BYTE:
395 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], cpu_A0, 0, 16);
396 break;
397 default: /* XXX this shouldn't be reached; abort? */
398 case OT_WORD:
399 /* For x86_64, this sets the higher half of register to zero.
400 For i386, this is equivalent to a mov. */
401 tcg_gen_ext32u_tl(cpu_regs[reg], cpu_A0);
402 break;
403 #ifdef TARGET_X86_64
404 case OT_LONG:
405 tcg_gen_mov_tl(cpu_regs[reg], cpu_A0);
406 break;
407 #endif
408 }
409 }
410
411 static inline void gen_op_mov_v_reg(int ot, TCGv t0, int reg)
412 {
413 if (ot == OT_BYTE && byte_reg_is_xH(reg)) {
414 tcg_gen_shri_tl(t0, cpu_regs[reg - 4], 8);
415 tcg_gen_ext8u_tl(t0, t0);
416 } else {
417 tcg_gen_mov_tl(t0, cpu_regs[reg]);
418 }
419 }
420
421 static inline void gen_op_mov_TN_reg(int ot, int t_index, int reg)
422 {
423 gen_op_mov_v_reg(ot, cpu_T[t_index], reg);
424 }
425
426 static inline void gen_op_movl_A0_reg(int reg)
427 {
428 tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]);
429 }
430
431 static inline void gen_op_addl_A0_im(int32_t val)
432 {
433 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
434 #ifdef TARGET_X86_64
435 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
436 #endif
437 }
438
439 #ifdef TARGET_X86_64
440 static inline void gen_op_addq_A0_im(int64_t val)
441 {
442 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
443 }
444 #endif
445
446 static void gen_add_A0_im(DisasContext *s, int val)
447 {
448 #ifdef TARGET_X86_64
449 if (CODE64(s))
450 gen_op_addq_A0_im(val);
451 else
452 #endif
453 gen_op_addl_A0_im(val);
454 }
455
456 static inline void gen_op_addl_T0_T1(void)
457 {
458 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
459 }
460
461 static inline void gen_op_jmp_T0(void)
462 {
463 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, eip));
464 }
465
466 static inline void gen_op_add_reg_im(int size, int reg, int32_t val)
467 {
468 switch(size) {
469 case OT_BYTE:
470 tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val);
471 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], cpu_tmp0, 0, 16);
472 break;
473 case OT_WORD:
474 tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val);
475 /* For x86_64, this sets the higher half of register to zero.
476 For i386, this is equivalent to a nop. */
477 tcg_gen_ext32u_tl(cpu_tmp0, cpu_tmp0);
478 tcg_gen_mov_tl(cpu_regs[reg], cpu_tmp0);
479 break;
480 #ifdef TARGET_X86_64
481 case OT_LONG:
482 tcg_gen_addi_tl(cpu_regs[reg], cpu_regs[reg], val);
483 break;
484 #endif
485 }
486 }
487
488 static inline void gen_op_add_reg_T0(int size, int reg)
489 {
490 switch(size) {
491 case OT_BYTE:
492 tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T[0]);
493 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], cpu_tmp0, 0, 16);
494 break;
495 case OT_WORD:
496 tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T[0]);
497 /* For x86_64, this sets the higher half of register to zero.
498 For i386, this is equivalent to a nop. */
499 tcg_gen_ext32u_tl(cpu_tmp0, cpu_tmp0);
500 tcg_gen_mov_tl(cpu_regs[reg], cpu_tmp0);
501 break;
502 #ifdef TARGET_X86_64
503 case OT_LONG:
504 tcg_gen_add_tl(cpu_regs[reg], cpu_regs[reg], cpu_T[0]);
505 break;
506 #endif
507 }
508 }
509
510 static inline void gen_op_addl_A0_reg_sN(int shift, int reg)
511 {
512 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]);
513 if (shift != 0)
514 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
515 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
516 /* For x86_64, this sets the higher half of register to zero.
517 For i386, this is equivalent to a nop. */
518 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
519 }
520
521 static inline void gen_op_movl_A0_seg(int reg)
522 {
523 tcg_gen_ld32u_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base) + REG_L_OFFSET);
524 }
525
526 static inline void gen_op_addl_A0_seg(DisasContext *s, int reg)
527 {
528 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base));
529 #ifdef TARGET_X86_64
530 if (CODE64(s)) {
531 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
532 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
533 } else {
534 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
535 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
536 }
537 #else
538 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
539 #endif
540 }
541
542 #ifdef TARGET_X86_64
543 static inline void gen_op_movq_A0_seg(int reg)
544 {
545 tcg_gen_ld_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base));
546 }
547
548 static inline void gen_op_addq_A0_seg(int reg)
549 {
550 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base));
551 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
552 }
553
554 static inline void gen_op_movq_A0_reg(int reg)
555 {
556 tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]);
557 }
558
559 static inline void gen_op_addq_A0_reg_sN(int shift, int reg)
560 {
561 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]);
562 if (shift != 0)
563 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
564 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
565 }
566 #endif
567
568 static inline void gen_op_lds_T0_A0(int idx)
569 {
570 int mem_index = (idx >> 2) - 1;
571 switch(idx & 3) {
572 case OT_BYTE:
573 tcg_gen_qemu_ld8s(cpu_T[0], cpu_A0, mem_index);
574 break;
575 case OT_WORD:
576 tcg_gen_qemu_ld16s(cpu_T[0], cpu_A0, mem_index);
577 break;
578 default:
579 case OT_LONG:
580 tcg_gen_qemu_ld32s(cpu_T[0], cpu_A0, mem_index);
581 break;
582 }
583 }
584
585 static inline void gen_op_ld_v(int idx, TCGv t0, TCGv a0)
586 {
587 int mem_index = (idx >> 2) - 1;
588 switch(idx & 3) {
589 case OT_BYTE:
590 tcg_gen_qemu_ld8u(t0, a0, mem_index);
591 break;
592 case OT_WORD:
593 tcg_gen_qemu_ld16u(t0, a0, mem_index);
594 break;
595 case OT_LONG:
596 tcg_gen_qemu_ld32u(t0, a0, mem_index);
597 break;
598 default:
599 case OT_QUAD:
600 /* Should never happen on 32-bit targets. */
601 #ifdef TARGET_X86_64
602 tcg_gen_qemu_ld64(t0, a0, mem_index);
603 #endif
604 break;
605 }
606 }
607
608 /* XXX: always use ldu or lds */
609 static inline void gen_op_ld_T0_A0(int idx)
610 {
611 gen_op_ld_v(idx, cpu_T[0], cpu_A0);
612 }
613
614 static inline void gen_op_ldu_T0_A0(int idx)
615 {
616 gen_op_ld_v(idx, cpu_T[0], cpu_A0);
617 }
618
619 static inline void gen_op_ld_T1_A0(int idx)
620 {
621 gen_op_ld_v(idx, cpu_T[1], cpu_A0);
622 }
623
624 static inline void gen_op_st_v(int idx, TCGv t0, TCGv a0)
625 {
626 int mem_index = (idx >> 2) - 1;
627 switch(idx & 3) {
628 case OT_BYTE:
629 tcg_gen_qemu_st8(t0, a0, mem_index);
630 break;
631 case OT_WORD:
632 tcg_gen_qemu_st16(t0, a0, mem_index);
633 break;
634 case OT_LONG:
635 tcg_gen_qemu_st32(t0, a0, mem_index);
636 break;
637 default:
638 case OT_QUAD:
639 /* Should never happen on 32-bit targets. */
640 #ifdef TARGET_X86_64
641 tcg_gen_qemu_st64(t0, a0, mem_index);
642 #endif
643 break;
644 }
645 }
646
647 static inline void gen_op_st_T0_A0(int idx)
648 {
649 gen_op_st_v(idx, cpu_T[0], cpu_A0);
650 }
651
652 static inline void gen_op_st_T1_A0(int idx)
653 {
654 gen_op_st_v(idx, cpu_T[1], cpu_A0);
655 }
656
657 static inline void gen_jmp_im(target_ulong pc)
658 {
659 tcg_gen_movi_tl(cpu_tmp0, pc);
660 tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, eip));
661 }
662
663 static inline void gen_string_movl_A0_ESI(DisasContext *s)
664 {
665 int override;
666
667 override = s->override;
668 #ifdef TARGET_X86_64
669 if (s->aflag == 2) {
670 if (override >= 0) {
671 gen_op_movq_A0_seg(override);
672 gen_op_addq_A0_reg_sN(0, R_ESI);
673 } else {
674 gen_op_movq_A0_reg(R_ESI);
675 }
676 } else
677 #endif
678 if (s->aflag) {
679 /* 32 bit address */
680 if (s->addseg && override < 0)
681 override = R_DS;
682 if (override >= 0) {
683 gen_op_movl_A0_seg(override);
684 gen_op_addl_A0_reg_sN(0, R_ESI);
685 } else {
686 gen_op_movl_A0_reg(R_ESI);
687 }
688 } else {
689 /* 16 address, always override */
690 if (override < 0)
691 override = R_DS;
692 gen_op_movl_A0_reg(R_ESI);
693 gen_op_andl_A0_ffff();
694 gen_op_addl_A0_seg(s, override);
695 }
696 }
697
698 static inline void gen_string_movl_A0_EDI(DisasContext *s)
699 {
700 #ifdef TARGET_X86_64
701 if (s->aflag == 2) {
702 gen_op_movq_A0_reg(R_EDI);
703 } else
704 #endif
705 if (s->aflag) {
706 if (s->addseg) {
707 gen_op_movl_A0_seg(R_ES);
708 gen_op_addl_A0_reg_sN(0, R_EDI);
709 } else {
710 gen_op_movl_A0_reg(R_EDI);
711 }
712 } else {
713 gen_op_movl_A0_reg(R_EDI);
714 gen_op_andl_A0_ffff();
715 gen_op_addl_A0_seg(s, R_ES);
716 }
717 }
718
719 static inline void gen_op_movl_T0_Dshift(int ot)
720 {
721 tcg_gen_ld32s_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, df));
722 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], ot);
723 };
724
725 static TCGv gen_ext_tl(TCGv dst, TCGv src, int size, bool sign)
726 {
727 switch (size) {
728 case OT_BYTE:
729 if (sign) {
730 tcg_gen_ext8s_tl(dst, src);
731 } else {
732 tcg_gen_ext8u_tl(dst, src);
733 }
734 return dst;
735 case OT_WORD:
736 if (sign) {
737 tcg_gen_ext16s_tl(dst, src);
738 } else {
739 tcg_gen_ext16u_tl(dst, src);
740 }
741 return dst;
742 #ifdef TARGET_X86_64
743 case OT_LONG:
744 if (sign) {
745 tcg_gen_ext32s_tl(dst, src);
746 } else {
747 tcg_gen_ext32u_tl(dst, src);
748 }
749 return dst;
750 #endif
751 default:
752 return src;
753 }
754 }
755
756 static void gen_extu(int ot, TCGv reg)
757 {
758 gen_ext_tl(reg, reg, ot, false);
759 }
760
761 static void gen_exts(int ot, TCGv reg)
762 {
763 gen_ext_tl(reg, reg, ot, true);
764 }
765
766 static inline void gen_op_jnz_ecx(int size, int label1)
767 {
768 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
769 gen_extu(size + 1, cpu_tmp0);
770 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, label1);
771 }
772
773 static inline void gen_op_jz_ecx(int size, int label1)
774 {
775 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
776 gen_extu(size + 1, cpu_tmp0);
777 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
778 }
779
780 static void gen_helper_in_func(int ot, TCGv v, TCGv_i32 n)
781 {
782 switch (ot) {
783 case OT_BYTE:
784 gen_helper_inb(v, n);
785 break;
786 case OT_WORD:
787 gen_helper_inw(v, n);
788 break;
789 case OT_LONG:
790 gen_helper_inl(v, n);
791 break;
792 }
793 }
794
795 static void gen_helper_out_func(int ot, TCGv_i32 v, TCGv_i32 n)
796 {
797 switch (ot) {
798 case OT_BYTE:
799 gen_helper_outb(v, n);
800 break;
801 case OT_WORD:
802 gen_helper_outw(v, n);
803 break;
804 case OT_LONG:
805 gen_helper_outl(v, n);
806 break;
807 }
808 }
809
810 static void gen_check_io(DisasContext *s, int ot, target_ulong cur_eip,
811 uint32_t svm_flags)
812 {
813 int state_saved;
814 target_ulong next_eip;
815
816 state_saved = 0;
817 if (s->pe && (s->cpl > s->iopl || s->vm86)) {
818 gen_update_cc_op(s);
819 gen_jmp_im(cur_eip);
820 state_saved = 1;
821 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
822 switch (ot) {
823 case OT_BYTE:
824 gen_helper_check_iob(cpu_env, cpu_tmp2_i32);
825 break;
826 case OT_WORD:
827 gen_helper_check_iow(cpu_env, cpu_tmp2_i32);
828 break;
829 case OT_LONG:
830 gen_helper_check_iol(cpu_env, cpu_tmp2_i32);
831 break;
832 }
833 }
834 if(s->flags & HF_SVMI_MASK) {
835 if (!state_saved) {
836 gen_update_cc_op(s);
837 gen_jmp_im(cur_eip);
838 }
839 svm_flags |= (1 << (4 + ot));
840 next_eip = s->pc - s->cs_base;
841 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
842 gen_helper_svm_check_io(cpu_env, cpu_tmp2_i32,
843 tcg_const_i32(svm_flags),
844 tcg_const_i32(next_eip - cur_eip));
845 }
846 }
847
848 static inline void gen_movs(DisasContext *s, int ot)
849 {
850 gen_string_movl_A0_ESI(s);
851 gen_op_ld_T0_A0(ot + s->mem_index);
852 gen_string_movl_A0_EDI(s);
853 gen_op_st_T0_A0(ot + s->mem_index);
854 gen_op_movl_T0_Dshift(ot);
855 gen_op_add_reg_T0(s->aflag, R_ESI);
856 gen_op_add_reg_T0(s->aflag, R_EDI);
857 }
858
859 static void gen_op_update1_cc(void)
860 {
861 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
862 }
863
864 static void gen_op_update2_cc(void)
865 {
866 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
867 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
868 }
869
870 static inline void gen_op_testl_T0_T1_cc(void)
871 {
872 tcg_gen_and_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
873 }
874
875 static void gen_op_update_neg_cc(void)
876 {
877 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
878 tcg_gen_neg_tl(cpu_cc_src, cpu_T[0]);
879 tcg_gen_movi_tl(cpu_cc_srcT, 0);
880 }
881
882 /* compute all eflags to cc_src */
883 static void gen_compute_eflags(DisasContext *s)
884 {
885 if (s->cc_op == CC_OP_EFLAGS) {
886 return;
887 }
888 gen_update_cc_op(s);
889 gen_helper_cc_compute_all(cpu_tmp2_i32, cpu_env, cpu_cc_op);
890 set_cc_op(s, CC_OP_EFLAGS);
891 tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32);
892 }
893
894 typedef struct CCPrepare {
895 TCGCond cond;
896 TCGv reg;
897 TCGv reg2;
898 target_ulong imm;
899 target_ulong mask;
900 bool use_reg2;
901 bool no_setcond;
902 } CCPrepare;
903
904 /* compute eflags.C to reg */
905 static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
906 {
907 TCGv t0, t1;
908 int size, shift;
909
910 switch (s->cc_op) {
911 case CC_OP_SUBB ... CC_OP_SUBQ:
912 /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
913 size = s->cc_op - CC_OP_SUBB;
914 t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
915 /* If no temporary was used, be careful not to alias t1 and t0. */
916 t0 = TCGV_EQUAL(t1, cpu_cc_src) ? cpu_tmp0 : reg;
917 tcg_gen_mov_tl(t0, cpu_cc_srcT);
918 gen_extu(size, t0);
919 goto add_sub;
920
921 case CC_OP_ADDB ... CC_OP_ADDQ:
922 /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
923 size = s->cc_op - CC_OP_ADDB;
924 t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
925 t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
926 add_sub:
927 return (CCPrepare) { .cond = TCG_COND_LTU, .reg = t0,
928 .reg2 = t1, .mask = -1, .use_reg2 = true };
929
930 case CC_OP_SBBB ... CC_OP_SBBQ:
931 /* (DATA_TYPE)(CC_DST + CC_SRC + 1) <= (DATA_TYPE)CC_SRC */
932 size = s->cc_op - CC_OP_SBBB;
933 t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
934 if (TCGV_EQUAL(t1, reg) && TCGV_EQUAL(reg, cpu_cc_src)) {
935 tcg_gen_mov_tl(cpu_tmp0, cpu_cc_src);
936 t1 = cpu_tmp0;
937 }
938
939 tcg_gen_add_tl(reg, cpu_cc_dst, cpu_cc_src);
940 tcg_gen_addi_tl(reg, reg, 1);
941 gen_extu(size, reg);
942 t0 = reg;
943 goto adc_sbb;
944
945 case CC_OP_ADCB ... CC_OP_ADCQ:
946 /* (DATA_TYPE)CC_DST <= (DATA_TYPE)CC_SRC */
947 size = s->cc_op - CC_OP_ADCB;
948 t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
949 t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
950 adc_sbb:
951 return (CCPrepare) { .cond = TCG_COND_LEU, .reg = t0,
952 .reg2 = t1, .mask = -1, .use_reg2 = true };
953
954 case CC_OP_LOGICB ... CC_OP_LOGICQ:
955 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
956
957 case CC_OP_INCB ... CC_OP_INCQ:
958 case CC_OP_DECB ... CC_OP_DECQ:
959 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
960 .mask = -1, .no_setcond = true };
961
962 case CC_OP_SHLB ... CC_OP_SHLQ:
963 /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
964 size = s->cc_op - CC_OP_SHLB;
965 shift = (8 << size) - 1;
966 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
967 .mask = (target_ulong)1 << shift };
968
969 case CC_OP_MULB ... CC_OP_MULQ:
970 return (CCPrepare) { .cond = TCG_COND_NE,
971 .reg = cpu_cc_src, .mask = -1 };
972
973 case CC_OP_EFLAGS:
974 case CC_OP_SARB ... CC_OP_SARQ:
975 /* CC_SRC & 1 */
976 return (CCPrepare) { .cond = TCG_COND_NE,
977 .reg = cpu_cc_src, .mask = CC_C };
978
979 default:
980 /* The need to compute only C from CC_OP_DYNAMIC is important
981 in efficiently implementing e.g. INC at the start of a TB. */
982 gen_update_cc_op(s);
983 gen_helper_cc_compute_c(cpu_tmp2_i32, cpu_env, cpu_cc_op);
984 tcg_gen_extu_i32_tl(reg, cpu_tmp2_i32);
985 return (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
986 .mask = -1, .no_setcond = true };
987 }
988 }
989
990 /* compute eflags.P to reg */
991 static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGv reg)
992 {
993 gen_compute_eflags(s);
994 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
995 .mask = CC_P };
996 }
997
998 /* compute eflags.S to reg */
999 static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg)
1000 {
1001 switch (s->cc_op) {
1002 case CC_OP_DYNAMIC:
1003 gen_compute_eflags(s);
1004 /* FALLTHRU */
1005 case CC_OP_EFLAGS:
1006 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1007 .mask = CC_S };
1008 default:
1009 {
1010 int size = (s->cc_op - CC_OP_ADDB) & 3;
1011 TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, true);
1012 return (CCPrepare) { .cond = TCG_COND_LT, .reg = t0, .mask = -1 };
1013 }
1014 }
1015 }
1016
1017 /* compute eflags.O to reg */
1018 static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg)
1019 {
1020 gen_compute_eflags(s);
1021 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1022 .mask = CC_O };
1023 }
1024
1025 /* compute eflags.Z to reg */
1026 static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
1027 {
1028 switch (s->cc_op) {
1029 case CC_OP_DYNAMIC:
1030 gen_compute_eflags(s);
1031 /* FALLTHRU */
1032 case CC_OP_EFLAGS:
1033 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1034 .mask = CC_Z };
1035 default:
1036 {
1037 int size = (s->cc_op - CC_OP_ADDB) & 3;
1038 TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
1039 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
1040 }
1041 }
1042 }
1043
1044 /* perform a conditional store into register 'reg' according to jump opcode
1045 value 'b'. In the fast case, T0 is guaranted not to be used. */
1046 static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
1047 {
1048 int inv, jcc_op, size, cond;
1049 CCPrepare cc;
1050 TCGv t0;
1051
1052 inv = b & 1;
1053 jcc_op = (b >> 1) & 7;
1054
1055 switch (s->cc_op) {
1056 case CC_OP_SUBB ... CC_OP_SUBQ:
1057 /* We optimize relational operators for the cmp/jcc case. */
1058 size = s->cc_op - CC_OP_SUBB;
1059 switch (jcc_op) {
1060 case JCC_BE:
1061 tcg_gen_mov_tl(cpu_tmp4, cpu_cc_srcT);
1062 gen_extu(size, cpu_tmp4);
1063 t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
1064 cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = cpu_tmp4,
1065 .reg2 = t0, .mask = -1, .use_reg2 = true };
1066 break;
1067
1068 case JCC_L:
1069 cond = TCG_COND_LT;
1070 goto fast_jcc_l;
1071 case JCC_LE:
1072 cond = TCG_COND_LE;
1073 fast_jcc_l:
1074 tcg_gen_mov_tl(cpu_tmp4, cpu_cc_srcT);
1075 gen_exts(size, cpu_tmp4);
1076 t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, true);
1077 cc = (CCPrepare) { .cond = cond, .reg = cpu_tmp4,
1078 .reg2 = t0, .mask = -1, .use_reg2 = true };
1079 break;
1080
1081 default:
1082 goto slow_jcc;
1083 }
1084 break;
1085
1086 default:
1087 slow_jcc:
1088 /* This actually generates good code for JC, JZ and JS. */
1089 switch (jcc_op) {
1090 case JCC_O:
1091 cc = gen_prepare_eflags_o(s, reg);
1092 break;
1093 case JCC_B:
1094 cc = gen_prepare_eflags_c(s, reg);
1095 break;
1096 case JCC_Z:
1097 cc = gen_prepare_eflags_z(s, reg);
1098 break;
1099 case JCC_BE:
1100 gen_compute_eflags(s);
1101 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1102 .mask = CC_Z | CC_C };
1103 break;
1104 case JCC_S:
1105 cc = gen_prepare_eflags_s(s, reg);
1106 break;
1107 case JCC_P:
1108 cc = gen_prepare_eflags_p(s, reg);
1109 break;
1110 case JCC_L:
1111 gen_compute_eflags(s);
1112 if (TCGV_EQUAL(reg, cpu_cc_src)) {
1113 reg = cpu_tmp0;
1114 }
1115 tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
1116 tcg_gen_xor_tl(reg, reg, cpu_cc_src);
1117 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1118 .mask = CC_S };
1119 break;
1120 default:
1121 case JCC_LE:
1122 gen_compute_eflags(s);
1123 if (TCGV_EQUAL(reg, cpu_cc_src)) {
1124 reg = cpu_tmp0;
1125 }
1126 tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
1127 tcg_gen_xor_tl(reg, reg, cpu_cc_src);
1128 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1129 .mask = CC_S | CC_Z };
1130 break;
1131 }
1132 break;
1133 }
1134
1135 if (inv) {
1136 cc.cond = tcg_invert_cond(cc.cond);
1137 }
1138 return cc;
1139 }
1140
1141 static void gen_setcc1(DisasContext *s, int b, TCGv reg)
1142 {
1143 CCPrepare cc = gen_prepare_cc(s, b, reg);
1144
1145 if (cc.no_setcond) {
1146 if (cc.cond == TCG_COND_EQ) {
1147 tcg_gen_xori_tl(reg, cc.reg, 1);
1148 } else {
1149 tcg_gen_mov_tl(reg, cc.reg);
1150 }
1151 return;
1152 }
1153
1154 if (cc.cond == TCG_COND_NE && !cc.use_reg2 && cc.imm == 0 &&
1155 cc.mask != 0 && (cc.mask & (cc.mask - 1)) == 0) {
1156 tcg_gen_shri_tl(reg, cc.reg, ctztl(cc.mask));
1157 tcg_gen_andi_tl(reg, reg, 1);
1158 return;
1159 }
1160 if (cc.mask != -1) {
1161 tcg_gen_andi_tl(reg, cc.reg, cc.mask);
1162 cc.reg = reg;
1163 }
1164 if (cc.use_reg2) {
1165 tcg_gen_setcond_tl(cc.cond, reg, cc.reg, cc.reg2);
1166 } else {
1167 tcg_gen_setcondi_tl(cc.cond, reg, cc.reg, cc.imm);
1168 }
1169 }
1170
1171 static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg)
1172 {
1173 gen_setcc1(s, JCC_B << 1, reg);
1174 }
1175
1176 /* generate a conditional jump to label 'l1' according to jump opcode
1177 value 'b'. In the fast case, T0 is guaranted not to be used. */
1178 static inline void gen_jcc1_noeob(DisasContext *s, int b, int l1)
1179 {
1180 CCPrepare cc = gen_prepare_cc(s, b, cpu_T[0]);
1181
1182 if (cc.mask != -1) {
1183 tcg_gen_andi_tl(cpu_T[0], cc.reg, cc.mask);
1184 cc.reg = cpu_T[0];
1185 }
1186 if (cc.use_reg2) {
1187 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1188 } else {
1189 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1190 }
1191 }
1192
1193 /* Generate a conditional jump to label 'l1' according to jump opcode
1194 value 'b'. In the fast case, T0 is guaranted not to be used.
1195 A translation block must end soon. */
1196 static inline void gen_jcc1(DisasContext *s, int b, int l1)
1197 {
1198 CCPrepare cc = gen_prepare_cc(s, b, cpu_T[0]);
1199
1200 gen_update_cc_op(s);
1201 if (cc.mask != -1) {
1202 tcg_gen_andi_tl(cpu_T[0], cc.reg, cc.mask);
1203 cc.reg = cpu_T[0];
1204 }
1205 set_cc_op(s, CC_OP_DYNAMIC);
1206 if (cc.use_reg2) {
1207 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1208 } else {
1209 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1210 }
1211 }
1212
1213 /* XXX: does not work with gdbstub "ice" single step - not a
1214 serious problem */
1215 static int gen_jz_ecx_string(DisasContext *s, target_ulong next_eip)
1216 {
1217 int l1, l2;
1218
1219 l1 = gen_new_label();
1220 l2 = gen_new_label();
1221 gen_op_jnz_ecx(s->aflag, l1);
1222 gen_set_label(l2);
1223 gen_jmp_tb(s, next_eip, 1);
1224 gen_set_label(l1);
1225 return l2;
1226 }
1227
1228 static inline void gen_stos(DisasContext *s, int ot)
1229 {
1230 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
1231 gen_string_movl_A0_EDI(s);
1232 gen_op_st_T0_A0(ot + s->mem_index);
1233 gen_op_movl_T0_Dshift(ot);
1234 gen_op_add_reg_T0(s->aflag, R_EDI);
1235 }
1236
1237 static inline void gen_lods(DisasContext *s, int ot)
1238 {
1239 gen_string_movl_A0_ESI(s);
1240 gen_op_ld_T0_A0(ot + s->mem_index);
1241 gen_op_mov_reg_T0(ot, R_EAX);
1242 gen_op_movl_T0_Dshift(ot);
1243 gen_op_add_reg_T0(s->aflag, R_ESI);
1244 }
1245
1246 static inline void gen_scas(DisasContext *s, int ot)
1247 {
1248 gen_string_movl_A0_EDI(s);
1249 gen_op_ld_T1_A0(ot + s->mem_index);
1250 gen_op(s, OP_CMPL, ot, R_EAX);
1251 gen_op_movl_T0_Dshift(ot);
1252 gen_op_add_reg_T0(s->aflag, R_EDI);
1253 }
1254
1255 static inline void gen_cmps(DisasContext *s, int ot)
1256 {
1257 gen_string_movl_A0_EDI(s);
1258 gen_op_ld_T1_A0(ot + s->mem_index);
1259 gen_string_movl_A0_ESI(s);
1260 gen_op(s, OP_CMPL, ot, OR_TMP0);
1261 gen_op_movl_T0_Dshift(ot);
1262 gen_op_add_reg_T0(s->aflag, R_ESI);
1263 gen_op_add_reg_T0(s->aflag, R_EDI);
1264 }
1265
1266 static inline void gen_ins(DisasContext *s, int ot)
1267 {
1268 if (use_icount)
1269 gen_io_start();
1270 gen_string_movl_A0_EDI(s);
1271 /* Note: we must do this dummy write first to be restartable in
1272 case of page fault. */
1273 gen_op_movl_T0_0();
1274 gen_op_st_T0_A0(ot + s->mem_index);
1275 gen_op_mov_TN_reg(OT_WORD, 1, R_EDX);
1276 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[1]);
1277 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1278 gen_helper_in_func(ot, cpu_T[0], cpu_tmp2_i32);
1279 gen_op_st_T0_A0(ot + s->mem_index);
1280 gen_op_movl_T0_Dshift(ot);
1281 gen_op_add_reg_T0(s->aflag, R_EDI);
1282 if (use_icount)
1283 gen_io_end();
1284 }
1285
1286 static inline void gen_outs(DisasContext *s, int ot)
1287 {
1288 if (use_icount)
1289 gen_io_start();
1290 gen_string_movl_A0_ESI(s);
1291 gen_op_ld_T0_A0(ot + s->mem_index);
1292
1293 gen_op_mov_TN_reg(OT_WORD, 1, R_EDX);
1294 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[1]);
1295 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1296 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[0]);
1297 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
1298
1299 gen_op_movl_T0_Dshift(ot);
1300 gen_op_add_reg_T0(s->aflag, R_ESI);
1301 if (use_icount)
1302 gen_io_end();
1303 }
1304
1305 /* same method as Valgrind : we generate jumps to current or next
1306 instruction */
1307 #define GEN_REPZ(op) \
1308 static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1309 target_ulong cur_eip, target_ulong next_eip) \
1310 { \
1311 int l2;\
1312 gen_update_cc_op(s); \
1313 l2 = gen_jz_ecx_string(s, next_eip); \
1314 gen_ ## op(s, ot); \
1315 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1316 /* a loop would cause two single step exceptions if ECX = 1 \
1317 before rep string_insn */ \
1318 if (!s->jmp_opt) \
1319 gen_op_jz_ecx(s->aflag, l2); \
1320 gen_jmp(s, cur_eip); \
1321 }
1322
1323 #define GEN_REPZ2(op) \
1324 static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1325 target_ulong cur_eip, \
1326 target_ulong next_eip, \
1327 int nz) \
1328 { \
1329 int l2;\
1330 gen_update_cc_op(s); \
1331 l2 = gen_jz_ecx_string(s, next_eip); \
1332 gen_ ## op(s, ot); \
1333 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1334 gen_update_cc_op(s); \
1335 gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2); \
1336 if (!s->jmp_opt) \
1337 gen_op_jz_ecx(s->aflag, l2); \
1338 gen_jmp(s, cur_eip); \
1339 }
1340
1341 GEN_REPZ(movs)
1342 GEN_REPZ(stos)
1343 GEN_REPZ(lods)
1344 GEN_REPZ(ins)
1345 GEN_REPZ(outs)
1346 GEN_REPZ2(scas)
1347 GEN_REPZ2(cmps)
1348
1349 static void gen_helper_fp_arith_ST0_FT0(int op)
1350 {
1351 switch (op) {
1352 case 0:
1353 gen_helper_fadd_ST0_FT0(cpu_env);
1354 break;
1355 case 1:
1356 gen_helper_fmul_ST0_FT0(cpu_env);
1357 break;
1358 case 2:
1359 gen_helper_fcom_ST0_FT0(cpu_env);
1360 break;
1361 case 3:
1362 gen_helper_fcom_ST0_FT0(cpu_env);
1363 break;
1364 case 4:
1365 gen_helper_fsub_ST0_FT0(cpu_env);
1366 break;
1367 case 5:
1368 gen_helper_fsubr_ST0_FT0(cpu_env);
1369 break;
1370 case 6:
1371 gen_helper_fdiv_ST0_FT0(cpu_env);
1372 break;
1373 case 7:
1374 gen_helper_fdivr_ST0_FT0(cpu_env);
1375 break;
1376 }
1377 }
1378
1379 /* NOTE the exception in "r" op ordering */
1380 static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
1381 {
1382 TCGv_i32 tmp = tcg_const_i32(opreg);
1383 switch (op) {
1384 case 0:
1385 gen_helper_fadd_STN_ST0(cpu_env, tmp);
1386 break;
1387 case 1:
1388 gen_helper_fmul_STN_ST0(cpu_env, tmp);
1389 break;
1390 case 4:
1391 gen_helper_fsubr_STN_ST0(cpu_env, tmp);
1392 break;
1393 case 5:
1394 gen_helper_fsub_STN_ST0(cpu_env, tmp);
1395 break;
1396 case 6:
1397 gen_helper_fdivr_STN_ST0(cpu_env, tmp);
1398 break;
1399 case 7:
1400 gen_helper_fdiv_STN_ST0(cpu_env, tmp);
1401 break;
1402 }
1403 }
1404
1405 /* if d == OR_TMP0, it means memory operand (address in A0) */
1406 static void gen_op(DisasContext *s1, int op, int ot, int d)
1407 {
1408 if (d != OR_TMP0) {
1409 gen_op_mov_TN_reg(ot, 0, d);
1410 } else {
1411 gen_op_ld_T0_A0(ot + s1->mem_index);
1412 }
1413 switch(op) {
1414 case OP_ADCL:
1415 gen_compute_eflags_c(s1, cpu_tmp4);
1416 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1417 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1418 if (d != OR_TMP0)
1419 gen_op_mov_reg_T0(ot, d);
1420 else
1421 gen_op_st_T0_A0(ot + s1->mem_index);
1422 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
1423 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1424 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp4);
1425 tcg_gen_shli_i32(cpu_tmp2_i32, cpu_tmp2_i32, 2);
1426 tcg_gen_addi_i32(cpu_cc_op, cpu_tmp2_i32, CC_OP_ADDB + ot);
1427 set_cc_op(s1, CC_OP_DYNAMIC);
1428 break;
1429 case OP_SBBL:
1430 /*
1431 * No need to store cpu_cc_srcT, because it is used only
1432 * when the cc_op is known.
1433 */
1434 gen_compute_eflags_c(s1, cpu_tmp4);
1435 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1436 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1437 if (d != OR_TMP0)
1438 gen_op_mov_reg_T0(ot, d);
1439 else
1440 gen_op_st_T0_A0(ot + s1->mem_index);
1441 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
1442 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1443 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp4);
1444 tcg_gen_shli_i32(cpu_tmp2_i32, cpu_tmp2_i32, 2);
1445 tcg_gen_addi_i32(cpu_cc_op, cpu_tmp2_i32, CC_OP_SUBB + ot);
1446 set_cc_op(s1, CC_OP_DYNAMIC);
1447 break;
1448 case OP_ADDL:
1449 gen_op_addl_T0_T1();
1450 if (d != OR_TMP0)
1451 gen_op_mov_reg_T0(ot, d);
1452 else
1453 gen_op_st_T0_A0(ot + s1->mem_index);
1454 gen_op_update2_cc();
1455 set_cc_op(s1, CC_OP_ADDB + ot);
1456 break;
1457 case OP_SUBL:
1458 tcg_gen_mov_tl(cpu_cc_srcT, cpu_T[0]);
1459 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1460 if (d != OR_TMP0)
1461 gen_op_mov_reg_T0(ot, d);
1462 else
1463 gen_op_st_T0_A0(ot + s1->mem_index);
1464 gen_op_update2_cc();
1465 set_cc_op(s1, CC_OP_SUBB + ot);
1466 break;
1467 default:
1468 case OP_ANDL:
1469 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1470 if (d != OR_TMP0)
1471 gen_op_mov_reg_T0(ot, d);
1472 else
1473 gen_op_st_T0_A0(ot + s1->mem_index);
1474 gen_op_update1_cc();
1475 set_cc_op(s1, CC_OP_LOGICB + ot);
1476 break;
1477 case OP_ORL:
1478 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1479 if (d != OR_TMP0)
1480 gen_op_mov_reg_T0(ot, d);
1481 else
1482 gen_op_st_T0_A0(ot + s1->mem_index);
1483 gen_op_update1_cc();
1484 set_cc_op(s1, CC_OP_LOGICB + ot);
1485 break;
1486 case OP_XORL:
1487 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1488 if (d != OR_TMP0)
1489 gen_op_mov_reg_T0(ot, d);
1490 else
1491 gen_op_st_T0_A0(ot + s1->mem_index);
1492 gen_op_update1_cc();
1493 set_cc_op(s1, CC_OP_LOGICB + ot);
1494 break;
1495 case OP_CMPL:
1496 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
1497 tcg_gen_mov_tl(cpu_cc_srcT, cpu_T[0]);
1498 tcg_gen_sub_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
1499 set_cc_op(s1, CC_OP_SUBB + ot);
1500 break;
1501 }
1502 }
1503
1504 /* if d == OR_TMP0, it means memory operand (address in A0) */
1505 static void gen_inc(DisasContext *s1, int ot, int d, int c)
1506 {
1507 if (d != OR_TMP0)
1508 gen_op_mov_TN_reg(ot, 0, d);
1509 else
1510 gen_op_ld_T0_A0(ot + s1->mem_index);
1511 gen_compute_eflags_c(s1, cpu_cc_src);
1512 if (c > 0) {
1513 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], 1);
1514 set_cc_op(s1, CC_OP_INCB + ot);
1515 } else {
1516 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], -1);
1517 set_cc_op(s1, CC_OP_DECB + ot);
1518 }
1519 if (d != OR_TMP0)
1520 gen_op_mov_reg_T0(ot, d);
1521 else
1522 gen_op_st_T0_A0(ot + s1->mem_index);
1523 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1524 }
1525
1526 static void gen_shift_rm_T1(DisasContext *s, int ot, int op1,
1527 int is_right, int is_arith)
1528 {
1529 target_ulong mask;
1530 int shift_label;
1531 TCGv t0, t1, t2;
1532
1533 if (ot == OT_QUAD) {
1534 mask = 0x3f;
1535 } else {
1536 mask = 0x1f;
1537 }
1538
1539 /* load */
1540 if (op1 == OR_TMP0) {
1541 gen_op_ld_T0_A0(ot + s->mem_index);
1542 } else {
1543 gen_op_mov_TN_reg(ot, 0, op1);
1544 }
1545
1546 t0 = tcg_temp_local_new();
1547 t1 = tcg_temp_local_new();
1548 t2 = tcg_temp_local_new();
1549
1550 tcg_gen_andi_tl(t2, cpu_T[1], mask);
1551
1552 if (is_right) {
1553 if (is_arith) {
1554 gen_exts(ot, cpu_T[0]);
1555 tcg_gen_mov_tl(t0, cpu_T[0]);
1556 tcg_gen_sar_tl(cpu_T[0], cpu_T[0], t2);
1557 } else {
1558 gen_extu(ot, cpu_T[0]);
1559 tcg_gen_mov_tl(t0, cpu_T[0]);
1560 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], t2);
1561 }
1562 } else {
1563 tcg_gen_mov_tl(t0, cpu_T[0]);
1564 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], t2);
1565 }
1566
1567 /* store */
1568 if (op1 == OR_TMP0) {
1569 gen_op_st_T0_A0(ot + s->mem_index);
1570 } else {
1571 gen_op_mov_reg_T0(ot, op1);
1572 }
1573
1574 /* Update eflags data because we cannot predict flags afterward. */
1575 gen_update_cc_op(s);
1576 set_cc_op(s, CC_OP_DYNAMIC);
1577
1578 tcg_gen_mov_tl(t1, cpu_T[0]);
1579
1580 shift_label = gen_new_label();
1581 tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, shift_label);
1582
1583 tcg_gen_addi_tl(t2, t2, -1);
1584 tcg_gen_mov_tl(cpu_cc_dst, t1);
1585
1586 if (is_right) {
1587 if (is_arith) {
1588 tcg_gen_sar_tl(cpu_cc_src, t0, t2);
1589 } else {
1590 tcg_gen_shr_tl(cpu_cc_src, t0, t2);
1591 }
1592 } else {
1593 tcg_gen_shl_tl(cpu_cc_src, t0, t2);
1594 }
1595
1596 if (is_right) {
1597 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SARB + ot);
1598 } else {
1599 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SHLB + ot);
1600 }
1601
1602 gen_set_label(shift_label);
1603
1604 tcg_temp_free(t0);
1605 tcg_temp_free(t1);
1606 tcg_temp_free(t2);
1607 }
1608
1609 static void gen_shift_rm_im(DisasContext *s, int ot, int op1, int op2,
1610 int is_right, int is_arith)
1611 {
1612 int mask;
1613
1614 if (ot == OT_QUAD)
1615 mask = 0x3f;
1616 else
1617 mask = 0x1f;
1618
1619 /* load */
1620 if (op1 == OR_TMP0)
1621 gen_op_ld_T0_A0(ot + s->mem_index);
1622 else
1623 gen_op_mov_TN_reg(ot, 0, op1);
1624
1625 op2 &= mask;
1626 if (op2 != 0) {
1627 if (is_right) {
1628 if (is_arith) {
1629 gen_exts(ot, cpu_T[0]);
1630 tcg_gen_sari_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1631 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], op2);
1632 } else {
1633 gen_extu(ot, cpu_T[0]);
1634 tcg_gen_shri_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1635 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], op2);
1636 }
1637 } else {
1638 tcg_gen_shli_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1639 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], op2);
1640 }
1641 }
1642
1643 /* store */
1644 if (op1 == OR_TMP0)
1645 gen_op_st_T0_A0(ot + s->mem_index);
1646 else
1647 gen_op_mov_reg_T0(ot, op1);
1648
1649 /* update eflags if non zero shift */
1650 if (op2 != 0) {
1651 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
1652 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1653 set_cc_op(s, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
1654 }
1655 }
1656
1657 static inline void tcg_gen_lshift(TCGv ret, TCGv arg1, target_long arg2)
1658 {
1659 if (arg2 >= 0)
1660 tcg_gen_shli_tl(ret, arg1, arg2);
1661 else
1662 tcg_gen_shri_tl(ret, arg1, -arg2);
1663 }
1664
1665 static void gen_rot_rm_T1(DisasContext *s, int ot, int op1,
1666 int is_right)
1667 {
1668 target_ulong mask;
1669 int label1, label2, data_bits;
1670 TCGv t0, t1, t2, a0;
1671
1672 /* XXX: inefficient, but we must use local temps */
1673 t0 = tcg_temp_local_new();
1674 t1 = tcg_temp_local_new();
1675 t2 = tcg_temp_local_new();
1676 a0 = tcg_temp_local_new();
1677
1678 if (ot == OT_QUAD)
1679 mask = 0x3f;
1680 else
1681 mask = 0x1f;
1682
1683 /* load */
1684 if (op1 == OR_TMP0) {
1685 tcg_gen_mov_tl(a0, cpu_A0);
1686 gen_op_ld_v(ot + s->mem_index, t0, a0);
1687 } else {
1688 gen_op_mov_v_reg(ot, t0, op1);
1689 }
1690
1691 tcg_gen_mov_tl(t1, cpu_T[1]);
1692
1693 tcg_gen_andi_tl(t1, t1, mask);
1694
1695 /* Must test zero case to avoid using undefined behaviour in TCG
1696 shifts. */
1697 label1 = gen_new_label();
1698 tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, label1);
1699
1700 if (ot <= OT_WORD)
1701 tcg_gen_andi_tl(cpu_tmp0, t1, (1 << (3 + ot)) - 1);
1702 else
1703 tcg_gen_mov_tl(cpu_tmp0, t1);
1704
1705 gen_extu(ot, t0);
1706 tcg_gen_mov_tl(t2, t0);
1707
1708 data_bits = 8 << ot;
1709 /* XXX: rely on behaviour of shifts when operand 2 overflows (XXX:
1710 fix TCG definition) */
1711 if (is_right) {
1712 tcg_gen_shr_tl(cpu_tmp4, t0, cpu_tmp0);
1713 tcg_gen_subfi_tl(cpu_tmp0, data_bits, cpu_tmp0);
1714 tcg_gen_shl_tl(t0, t0, cpu_tmp0);
1715 } else {
1716 tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp0);
1717 tcg_gen_subfi_tl(cpu_tmp0, data_bits, cpu_tmp0);
1718 tcg_gen_shr_tl(t0, t0, cpu_tmp0);
1719 }
1720 tcg_gen_or_tl(t0, t0, cpu_tmp4);
1721
1722 gen_set_label(label1);
1723 /* store */
1724 if (op1 == OR_TMP0) {
1725 gen_op_st_v(ot + s->mem_index, t0, a0);
1726 } else {
1727 gen_op_mov_reg_v(ot, op1, t0);
1728 }
1729
1730 /* update eflags. It is needed anyway most of the time, do it always. */
1731 gen_compute_eflags(s);
1732 assert(s->cc_op == CC_OP_EFLAGS);
1733
1734 label2 = gen_new_label();
1735 tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, label2);
1736
1737 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~(CC_O | CC_C));
1738 tcg_gen_xor_tl(cpu_tmp0, t2, t0);
1739 tcg_gen_lshift(cpu_tmp0, cpu_tmp0, 11 - (data_bits - 1));
1740 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, CC_O);
1741 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_tmp0);
1742 if (is_right) {
1743 tcg_gen_shri_tl(t0, t0, data_bits - 1);
1744 }
1745 tcg_gen_andi_tl(t0, t0, CC_C);
1746 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
1747
1748 gen_set_label(label2);
1749
1750 tcg_temp_free(t0);
1751 tcg_temp_free(t1);
1752 tcg_temp_free(t2);
1753 tcg_temp_free(a0);
1754 }
1755
1756 static void gen_rot_rm_im(DisasContext *s, int ot, int op1, int op2,
1757 int is_right)
1758 {
1759 int mask;
1760 int data_bits;
1761 TCGv t0, t1, a0;
1762
1763 /* XXX: inefficient, but we must use local temps */
1764 t0 = tcg_temp_local_new();
1765 t1 = tcg_temp_local_new();
1766 a0 = tcg_temp_local_new();
1767
1768 if (ot == OT_QUAD)
1769 mask = 0x3f;
1770 else
1771 mask = 0x1f;
1772
1773 /* load */
1774 if (op1 == OR_TMP0) {
1775 tcg_gen_mov_tl(a0, cpu_A0);
1776 gen_op_ld_v(ot + s->mem_index, t0, a0);
1777 } else {
1778 gen_op_mov_v_reg(ot, t0, op1);
1779 }
1780
1781 gen_extu(ot, t0);
1782 tcg_gen_mov_tl(t1, t0);
1783
1784 op2 &= mask;
1785 data_bits = 8 << ot;
1786 if (op2 != 0) {
1787 int shift = op2 & ((1 << (3 + ot)) - 1);
1788 if (is_right) {
1789 tcg_gen_shri_tl(cpu_tmp4, t0, shift);
1790 tcg_gen_shli_tl(t0, t0, data_bits - shift);
1791 }
1792 else {
1793 tcg_gen_shli_tl(cpu_tmp4, t0, shift);
1794 tcg_gen_shri_tl(t0, t0, data_bits - shift);
1795 }
1796 tcg_gen_or_tl(t0, t0, cpu_tmp4);
1797 }
1798
1799 /* store */
1800 if (op1 == OR_TMP0) {
1801 gen_op_st_v(ot + s->mem_index, t0, a0);
1802 } else {
1803 gen_op_mov_reg_v(ot, op1, t0);
1804 }
1805
1806 if (op2 != 0) {
1807 /* update eflags */
1808 gen_compute_eflags(s);
1809 assert(s->cc_op == CC_OP_EFLAGS);
1810
1811 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~(CC_O | CC_C));
1812 tcg_gen_xor_tl(cpu_tmp0, t1, t0);
1813 tcg_gen_lshift(cpu_tmp0, cpu_tmp0, 11 - (data_bits - 1));
1814 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, CC_O);
1815 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_tmp0);
1816 if (is_right) {
1817 tcg_gen_shri_tl(t0, t0, data_bits - 1);
1818 }
1819 tcg_gen_andi_tl(t0, t0, CC_C);
1820 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
1821 }
1822
1823 tcg_temp_free(t0);
1824 tcg_temp_free(t1);
1825 tcg_temp_free(a0);
1826 }
1827
1828 /* XXX: add faster immediate = 1 case */
1829 static void gen_rotc_rm_T1(DisasContext *s, int ot, int op1,
1830 int is_right)
1831 {
1832 gen_compute_eflags(s);
1833 assert(s->cc_op == CC_OP_EFLAGS);
1834
1835 /* load */
1836 if (op1 == OR_TMP0)
1837 gen_op_ld_T0_A0(ot + s->mem_index);
1838 else
1839 gen_op_mov_TN_reg(ot, 0, op1);
1840
1841 if (is_right) {
1842 switch (ot) {
1843 case OT_BYTE:
1844 gen_helper_rcrb(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1845 break;
1846 case OT_WORD:
1847 gen_helper_rcrw(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1848 break;
1849 case OT_LONG:
1850 gen_helper_rcrl(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1851 break;
1852 #ifdef TARGET_X86_64
1853 case OT_QUAD:
1854 gen_helper_rcrq(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1855 break;
1856 #endif
1857 }
1858 } else {
1859 switch (ot) {
1860 case OT_BYTE:
1861 gen_helper_rclb(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1862 break;
1863 case OT_WORD:
1864 gen_helper_rclw(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1865 break;
1866 case OT_LONG:
1867 gen_helper_rcll(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1868 break;
1869 #ifdef TARGET_X86_64
1870 case OT_QUAD:
1871 gen_helper_rclq(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1872 break;
1873 #endif
1874 }
1875 }
1876 /* store */
1877 if (op1 == OR_TMP0)
1878 gen_op_st_T0_A0(ot + s->mem_index);
1879 else
1880 gen_op_mov_reg_T0(ot, op1);
1881 }
1882
1883 /* XXX: add faster immediate case */
1884 static void gen_shiftd_rm_T1(DisasContext *s, int ot, int op1,
1885 int is_right, TCGv count)
1886 {
1887 int label1, label2, data_bits;
1888 target_ulong mask;
1889 TCGv t0, t1, t2, a0;
1890
1891 t0 = tcg_temp_local_new();
1892 t1 = tcg_temp_local_new();
1893 t2 = tcg_temp_local_new();
1894 a0 = tcg_temp_local_new();
1895
1896 if (ot == OT_QUAD)
1897 mask = 0x3f;
1898 else
1899 mask = 0x1f;
1900
1901 /* load */
1902 if (op1 == OR_TMP0) {
1903 tcg_gen_mov_tl(a0, cpu_A0);
1904 gen_op_ld_v(ot + s->mem_index, t0, a0);
1905 } else {
1906 gen_op_mov_v_reg(ot, t0, op1);
1907 }
1908
1909 tcg_gen_andi_tl(t2, count, mask);
1910 tcg_gen_mov_tl(t1, cpu_T[1]);
1911
1912 /* Must test zero case to avoid using undefined behaviour in TCG
1913 shifts. */
1914 label1 = gen_new_label();
1915 tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label1);
1916
1917 tcg_gen_addi_tl(cpu_tmp5, t2, -1);
1918 if (ot == OT_WORD) {
1919 /* Note: we implement the Intel behaviour for shift count > 16 */
1920 if (is_right) {
1921 tcg_gen_andi_tl(t0, t0, 0xffff);
1922 tcg_gen_shli_tl(cpu_tmp0, t1, 16);
1923 tcg_gen_or_tl(t0, t0, cpu_tmp0);
1924 tcg_gen_ext32u_tl(t0, t0);
1925
1926 tcg_gen_shr_tl(cpu_tmp4, t0, cpu_tmp5);
1927
1928 /* only needed if count > 16, but a test would complicate */
1929 tcg_gen_subfi_tl(cpu_tmp5, 32, t2);
1930 tcg_gen_shl_tl(cpu_tmp0, t0, cpu_tmp5);
1931
1932 tcg_gen_shr_tl(t0, t0, t2);
1933
1934 tcg_gen_or_tl(t0, t0, cpu_tmp0);
1935 } else {
1936 /* XXX: not optimal */
1937 tcg_gen_andi_tl(t0, t0, 0xffff);
1938 tcg_gen_shli_tl(t1, t1, 16);
1939 tcg_gen_or_tl(t1, t1, t0);
1940 tcg_gen_ext32u_tl(t1, t1);
1941
1942 tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp5);
1943 tcg_gen_subfi_tl(cpu_tmp0, 32, cpu_tmp5);
1944 tcg_gen_shr_tl(cpu_tmp5, t1, cpu_tmp0);
1945 tcg_gen_or_tl(cpu_tmp4, cpu_tmp4, cpu_tmp5);
1946
1947 tcg_gen_shl_tl(t0, t0, t2);
1948 tcg_gen_subfi_tl(cpu_tmp5, 32, t2);
1949 tcg_gen_shr_tl(t1, t1, cpu_tmp5);
1950 tcg_gen_or_tl(t0, t0, t1);
1951 }
1952 } else {
1953 data_bits = 8 << ot;
1954 if (is_right) {
1955 if (ot == OT_LONG)
1956 tcg_gen_ext32u_tl(t0, t0);
1957
1958 tcg_gen_shr_tl(cpu_tmp4, t0, cpu_tmp5);
1959
1960 tcg_gen_shr_tl(t0, t0, t2);
1961 tcg_gen_subfi_tl(cpu_tmp5, data_bits, t2);
1962 tcg_gen_shl_tl(t1, t1, cpu_tmp5);
1963 tcg_gen_or_tl(t0, t0, t1);
1964
1965 } else {
1966 if (ot == OT_LONG)
1967 tcg_gen_ext32u_tl(t1, t1);
1968
1969 tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp5);
1970
1971 tcg_gen_shl_tl(t0, t0, t2);
1972 tcg_gen_subfi_tl(cpu_tmp5, data_bits, t2);
1973 tcg_gen_shr_tl(t1, t1, cpu_tmp5);
1974 tcg_gen_or_tl(t0, t0, t1);
1975 }
1976 }
1977 tcg_gen_mov_tl(t1, cpu_tmp4);
1978
1979 gen_set_label(label1);
1980 /* store */
1981 if (op1 == OR_TMP0) {
1982 gen_op_st_v(ot + s->mem_index, t0, a0);
1983 } else {
1984 gen_op_mov_reg_v(ot, op1, t0);
1985 }
1986
1987 /* Update eflags data because we cannot predict flags afterward. */
1988 gen_update_cc_op(s);
1989 set_cc_op(s, CC_OP_DYNAMIC);
1990
1991 label2 = gen_new_label();
1992 tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label2);
1993
1994 tcg_gen_mov_tl(cpu_cc_src, t1);
1995 tcg_gen_mov_tl(cpu_cc_dst, t0);
1996 if (is_right) {
1997 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SARB + ot);
1998 } else {
1999 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SHLB + ot);
2000 }
2001 gen_set_label(label2);
2002
2003 tcg_temp_free(t0);
2004 tcg_temp_free(t1);
2005 tcg_temp_free(t2);
2006 tcg_temp_free(a0);
2007 }
2008
2009 static void gen_shift(DisasContext *s1, int op, int ot, int d, int s)
2010 {
2011 if (s != OR_TMP1)
2012 gen_op_mov_TN_reg(ot, 1, s);
2013 switch(op) {
2014 case OP_ROL:
2015 gen_rot_rm_T1(s1, ot, d, 0);
2016 break;
2017 case OP_ROR:
2018 gen_rot_rm_T1(s1, ot, d, 1);
2019 break;
2020 case OP_SHL:
2021 case OP_SHL1:
2022 gen_shift_rm_T1(s1, ot, d, 0, 0);
2023 break;
2024 case OP_SHR:
2025 gen_shift_rm_T1(s1, ot, d, 1, 0);
2026 break;
2027 case OP_SAR:
2028 gen_shift_rm_T1(s1, ot, d, 1, 1);
2029 break;
2030 case OP_RCL:
2031 gen_rotc_rm_T1(s1, ot, d, 0);
2032 break;
2033 case OP_RCR:
2034 gen_rotc_rm_T1(s1, ot, d, 1);
2035 break;
2036 }
2037 }
2038
2039 static void gen_shifti(DisasContext *s1, int op, int ot, int d, int c)
2040 {
2041 switch(op) {
2042 case OP_ROL:
2043 gen_rot_rm_im(s1, ot, d, c, 0);
2044 break;
2045 case OP_ROR:
2046 gen_rot_rm_im(s1, ot, d, c, 1);
2047 break;
2048 case OP_SHL:
2049 case OP_SHL1:
2050 gen_shift_rm_im(s1, ot, d, c, 0, 0);
2051 break;
2052 case OP_SHR:
2053 gen_shift_rm_im(s1, ot, d, c, 1, 0);
2054 break;
2055 case OP_SAR:
2056 gen_shift_rm_im(s1, ot, d, c, 1, 1);
2057 break;
2058 default:
2059 /* currently not optimized */
2060 gen_op_movl_T1_im(c);
2061 gen_shift(s1, op, ot, d, OR_TMP1);
2062 break;
2063 }
2064 }
2065
2066 static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm,
2067 int *reg_ptr, int *offset_ptr)
2068 {
2069 target_long disp;
2070 int havesib;
2071 int base;
2072 int index;
2073 int scale;
2074 int opreg;
2075 int mod, rm, code, override, must_add_seg;
2076
2077 override = s->override;
2078 must_add_seg = s->addseg;
2079 if (override >= 0)
2080 must_add_seg = 1;
2081 mod = (modrm >> 6) & 3;
2082 rm = modrm & 7;
2083
2084 if (s->aflag) {
2085
2086 havesib = 0;
2087 base = rm;
2088 index = 0;
2089 scale = 0;
2090
2091 if (base == 4) {
2092 havesib = 1;
2093 code = cpu_ldub_code(env, s->pc++);
2094 scale = (code >> 6) & 3;
2095 index = ((code >> 3) & 7) | REX_X(s);
2096 base = (code & 7);
2097 }
2098 base |= REX_B(s);
2099
2100 switch (mod) {
2101 case 0:
2102 if ((base & 7) == 5) {
2103 base = -1;
2104 disp = (int32_t)cpu_ldl_code(env, s->pc);
2105 s->pc += 4;
2106 if (CODE64(s) && !havesib) {
2107 disp += s->pc + s->rip_offset;
2108 }
2109 } else {
2110 disp = 0;
2111 }
2112 break;
2113 case 1:
2114 disp = (int8_t)cpu_ldub_code(env, s->pc++);
2115 break;
2116 default:
2117 case 2:
2118 disp = (int32_t)cpu_ldl_code(env, s->pc);
2119 s->pc += 4;
2120 break;
2121 }
2122
2123 if (base >= 0) {
2124 /* for correct popl handling with esp */
2125 if (base == 4 && s->popl_esp_hack)
2126 disp += s->popl_esp_hack;
2127 #ifdef TARGET_X86_64
2128 if (s->aflag == 2) {
2129 gen_op_movq_A0_reg(base);
2130 if (disp != 0) {
2131 gen_op_addq_A0_im(disp);
2132 }
2133 } else
2134 #endif
2135 {
2136 gen_op_movl_A0_reg(base);
2137 if (disp != 0)
2138 gen_op_addl_A0_im(disp);
2139 }
2140 } else {
2141 #ifdef TARGET_X86_64
2142 if (s->aflag == 2) {
2143 gen_op_movq_A0_im(disp);
2144 } else
2145 #endif
2146 {
2147 gen_op_movl_A0_im(disp);
2148 }
2149 }
2150 /* index == 4 means no index */
2151 if (havesib && (index != 4)) {
2152 #ifdef TARGET_X86_64
2153 if (s->aflag == 2) {
2154 gen_op_addq_A0_reg_sN(scale, index);
2155 } else
2156 #endif
2157 {
2158 gen_op_addl_A0_reg_sN(scale, index);
2159 }
2160 }
2161 if (must_add_seg) {
2162 if (override < 0) {
2163 if (base == R_EBP || base == R_ESP)
2164 override = R_SS;
2165 else
2166 override = R_DS;
2167 }
2168 #ifdef TARGET_X86_64
2169 if (s->aflag == 2) {
2170 gen_op_addq_A0_seg(override);
2171 } else
2172 #endif
2173 {
2174 gen_op_addl_A0_seg(s, override);
2175 }
2176 }
2177 } else {
2178 switch (mod) {
2179 case 0:
2180 if (rm == 6) {
2181 disp = cpu_lduw_code(env, s->pc);
2182 s->pc += 2;
2183 gen_op_movl_A0_im(disp);
2184 rm = 0; /* avoid SS override */
2185 goto no_rm;
2186 } else {
2187 disp = 0;
2188 }
2189 break;
2190 case 1:
2191 disp = (int8_t)cpu_ldub_code(env, s->pc++);
2192 break;
2193 default:
2194 case 2:
2195 disp = cpu_lduw_code(env, s->pc);
2196 s->pc += 2;
2197 break;
2198 }
2199 switch(rm) {
2200 case 0:
2201 gen_op_movl_A0_reg(R_EBX);
2202 gen_op_addl_A0_reg_sN(0, R_ESI);
2203 break;
2204 case 1:
2205 gen_op_movl_A0_reg(R_EBX);
2206 gen_op_addl_A0_reg_sN(0, R_EDI);
2207 break;
2208 case 2:
2209 gen_op_movl_A0_reg(R_EBP);
2210 gen_op_addl_A0_reg_sN(0, R_ESI);
2211 break;
2212 case 3:
2213 gen_op_movl_A0_reg(R_EBP);
2214 gen_op_addl_A0_reg_sN(0, R_EDI);
2215 break;
2216 case 4:
2217 gen_op_movl_A0_reg(R_ESI);
2218 break;
2219 case 5:
2220 gen_op_movl_A0_reg(R_EDI);
2221 break;
2222 case 6:
2223 gen_op_movl_A0_reg(R_EBP);
2224 break;
2225 default:
2226 case 7:
2227 gen_op_movl_A0_reg(R_EBX);
2228 break;
2229 }
2230 if (disp != 0)
2231 gen_op_addl_A0_im(disp);
2232 gen_op_andl_A0_ffff();
2233 no_rm:
2234 if (must_add_seg) {
2235 if (override < 0) {
2236 if (rm == 2 || rm == 3 || rm == 6)
2237 override = R_SS;
2238 else
2239 override = R_DS;
2240 }
2241 gen_op_addl_A0_seg(s, override);
2242 }
2243 }
2244
2245 opreg = OR_A0;
2246 disp = 0;
2247 *reg_ptr = opreg;
2248 *offset_ptr = disp;
2249 }
2250
2251 static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm)
2252 {
2253 int mod, rm, base, code;
2254
2255 mod = (modrm >> 6) & 3;
2256 if (mod == 3)
2257 return;
2258 rm = modrm & 7;
2259
2260 if (s->aflag) {
2261
2262 base = rm;
2263
2264 if (base == 4) {
2265 code = cpu_ldub_code(env, s->pc++);
2266 base = (code & 7);
2267 }
2268
2269 switch (mod) {
2270 case 0:
2271 if (base == 5) {
2272 s->pc += 4;
2273 }
2274 break;
2275 case 1:
2276 s->pc++;
2277 break;
2278 default:
2279 case 2:
2280 s->pc += 4;
2281 break;
2282 }
2283 } else {
2284 switch (mod) {
2285 case 0:
2286 if (rm == 6) {
2287 s->pc += 2;
2288 }
2289 break;
2290 case 1:
2291 s->pc++;
2292 break;
2293 default:
2294 case 2:
2295 s->pc += 2;
2296 break;
2297 }
2298 }
2299 }
2300
2301 /* used for LEA and MOV AX, mem */
2302 static void gen_add_A0_ds_seg(DisasContext *s)
2303 {
2304 int override, must_add_seg;
2305 must_add_seg = s->addseg;
2306 override = R_DS;
2307 if (s->override >= 0) {
2308 override = s->override;
2309 must_add_seg = 1;
2310 }
2311 if (must_add_seg) {
2312 #ifdef TARGET_X86_64
2313 if (CODE64(s)) {
2314 gen_op_addq_A0_seg(override);
2315 } else
2316 #endif
2317 {
2318 gen_op_addl_A0_seg(s, override);
2319 }
2320 }
2321 }
2322
2323 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2324 OR_TMP0 */
2325 static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm,
2326 int ot, int reg, int is_store)
2327 {
2328 int mod, rm, opreg, disp;
2329
2330 mod = (modrm >> 6) & 3;
2331 rm = (modrm & 7) | REX_B(s);
2332 if (mod == 3) {
2333 if (is_store) {
2334 if (reg != OR_TMP0)
2335 gen_op_mov_TN_reg(ot, 0, reg);
2336 gen_op_mov_reg_T0(ot, rm);
2337 } else {
2338 gen_op_mov_TN_reg(ot, 0, rm);
2339 if (reg != OR_TMP0)
2340 gen_op_mov_reg_T0(ot, reg);
2341 }
2342 } else {
2343 gen_lea_modrm(env, s, modrm, &opreg, &disp);
2344 if (is_store) {
2345 if (reg != OR_TMP0)
2346 gen_op_mov_TN_reg(ot, 0, reg);
2347 gen_op_st_T0_A0(ot + s->mem_index);
2348 } else {
2349 gen_op_ld_T0_A0(ot + s->mem_index);
2350 if (reg != OR_TMP0)
2351 gen_op_mov_reg_T0(ot, reg);
2352 }
2353 }
2354 }
2355
2356 static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, int ot)
2357 {
2358 uint32_t ret;
2359
2360 switch(ot) {
2361 case OT_BYTE:
2362 ret = cpu_ldub_code(env, s->pc);
2363 s->pc++;
2364 break;
2365 case OT_WORD:
2366 ret = cpu_lduw_code(env, s->pc);
2367 s->pc += 2;
2368 break;
2369 default:
2370 case OT_LONG:
2371 ret = cpu_ldl_code(env, s->pc);
2372 s->pc += 4;
2373 break;
2374 }
2375 return ret;
2376 }
2377
2378 static inline int insn_const_size(unsigned int ot)
2379 {
2380 if (ot <= OT_LONG)
2381 return 1 << ot;
2382 else
2383 return 4;
2384 }
2385
2386 static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong eip)
2387 {
2388 TranslationBlock *tb;
2389 target_ulong pc;
2390
2391 pc = s->cs_base + eip;
2392 tb = s->tb;
2393 /* NOTE: we handle the case where the TB spans two pages here */
2394 if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) ||
2395 (pc & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK)) {
2396 /* jump to same page: we can use a direct jump */
2397 tcg_gen_goto_tb(tb_num);
2398 gen_jmp_im(eip);
2399 tcg_gen_exit_tb((tcg_target_long)tb + tb_num);
2400 } else {
2401 /* jump to another page: currently not optimized */
2402 gen_jmp_im(eip);
2403 gen_eob(s);
2404 }
2405 }
2406
2407 static inline void gen_jcc(DisasContext *s, int b,
2408 target_ulong val, target_ulong next_eip)
2409 {
2410 int l1, l2;
2411
2412 if (s->jmp_opt) {
2413 l1 = gen_new_label();
2414 gen_jcc1(s, b, l1);
2415
2416 gen_goto_tb(s, 0, next_eip);
2417
2418 gen_set_label(l1);
2419 gen_goto_tb(s, 1, val);
2420 s->is_jmp = DISAS_TB_JUMP;
2421 } else {
2422 l1 = gen_new_label();
2423 l2 = gen_new_label();
2424 gen_jcc1(s, b, l1);
2425
2426 gen_jmp_im(next_eip);
2427 tcg_gen_br(l2);
2428
2429 gen_set_label(l1);
2430 gen_jmp_im(val);
2431 gen_set_label(l2);
2432 gen_eob(s);
2433 }
2434 }
2435
2436 static void gen_cmovcc1(CPUX86State *env, DisasContext *s, int ot, int b,
2437 int modrm, int reg)
2438 {
2439 CCPrepare cc;
2440
2441 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
2442
2443 cc = gen_prepare_cc(s, b, cpu_T[1]);
2444 if (cc.mask != -1) {
2445 TCGv t0 = tcg_temp_new();
2446 tcg_gen_andi_tl(t0, cc.reg, cc.mask);
2447 cc.reg = t0;
2448 }
2449 if (!cc.use_reg2) {
2450 cc.reg2 = tcg_const_tl(cc.imm);
2451 }
2452
2453 tcg_gen_movcond_tl(cc.cond, cpu_T[0], cc.reg, cc.reg2,
2454 cpu_T[0], cpu_regs[reg]);
2455 gen_op_mov_reg_T0(ot, reg);
2456
2457 if (cc.mask != -1) {
2458 tcg_temp_free(cc.reg);
2459 }
2460 if (!cc.use_reg2) {
2461 tcg_temp_free(cc.reg2);
2462 }
2463 }
2464
2465 static inline void gen_op_movl_T0_seg(int seg_reg)
2466 {
2467 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
2468 offsetof(CPUX86State,segs[seg_reg].selector));
2469 }
2470
2471 static inline void gen_op_movl_seg_T0_vm(int seg_reg)
2472 {
2473 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
2474 tcg_gen_st32_tl(cpu_T[0], cpu_env,
2475 offsetof(CPUX86State,segs[seg_reg].selector));
2476 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], 4);
2477 tcg_gen_st_tl(cpu_T[0], cpu_env,
2478 offsetof(CPUX86State,segs[seg_reg].base));
2479 }
2480
2481 /* move T0 to seg_reg and compute if the CPU state may change. Never
2482 call this function with seg_reg == R_CS */
2483 static void gen_movl_seg_T0(DisasContext *s, int seg_reg, target_ulong cur_eip)
2484 {
2485 if (s->pe && !s->vm86) {
2486 /* XXX: optimize by finding processor state dynamically */
2487 gen_update_cc_op(s);
2488 gen_jmp_im(cur_eip);
2489 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2490 gen_helper_load_seg(cpu_env, tcg_const_i32(seg_reg), cpu_tmp2_i32);
2491 /* abort translation because the addseg value may change or
2492 because ss32 may change. For R_SS, translation must always
2493 stop as a special handling must be done to disable hardware
2494 interrupts for the next instruction */
2495 if (seg_reg == R_SS || (s->code32 && seg_reg < R_FS))
2496 s->is_jmp = DISAS_TB_JUMP;
2497 } else {
2498 gen_op_movl_seg_T0_vm(seg_reg);
2499 if (seg_reg == R_SS)
2500 s->is_jmp = DISAS_TB_JUMP;
2501 }
2502 }
2503
2504 static inline int svm_is_rep(int prefixes)
2505 {
2506 return ((prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) ? 8 : 0);
2507 }
2508
2509 static inline void
2510 gen_svm_check_intercept_param(DisasContext *s, target_ulong pc_start,
2511 uint32_t type, uint64_t param)
2512 {
2513 /* no SVM activated; fast case */
2514 if (likely(!(s->flags & HF_SVMI_MASK)))
2515 return;
2516 gen_update_cc_op(s);
2517 gen_jmp_im(pc_start - s->cs_base);
2518 gen_helper_svm_check_intercept_param(cpu_env, tcg_const_i32(type),
2519 tcg_const_i64(param));
2520 }
2521
2522 static inline void
2523 gen_svm_check_intercept(DisasContext *s, target_ulong pc_start, uint64_t type)
2524 {
2525 gen_svm_check_intercept_param(s, pc_start, type, 0);
2526 }
2527
2528 static inline void gen_stack_update(DisasContext *s, int addend)
2529 {
2530 #ifdef TARGET_X86_64
2531 if (CODE64(s)) {
2532 gen_op_add_reg_im(2, R_ESP, addend);
2533 } else
2534 #endif
2535 if (s->ss32) {
2536 gen_op_add_reg_im(1, R_ESP, addend);
2537 } else {
2538 gen_op_add_reg_im(0, R_ESP, addend);
2539 }
2540 }
2541
2542 /* generate a push. It depends on ss32, addseg and dflag */
2543 static void gen_push_T0(DisasContext *s)
2544 {
2545 #ifdef TARGET_X86_64
2546 if (CODE64(s)) {
2547 gen_op_movq_A0_reg(R_ESP);
2548 if (s->dflag) {
2549 gen_op_addq_A0_im(-8);
2550 gen_op_st_T0_A0(OT_QUAD + s->mem_index);
2551 } else {
2552 gen_op_addq_A0_im(-2);
2553 gen_op_st_T0_A0(OT_WORD + s->mem_index);
2554 }
2555 gen_op_mov_reg_A0(2, R_ESP);
2556 } else
2557 #endif
2558 {
2559 gen_op_movl_A0_reg(R_ESP);
2560 if (!s->dflag)
2561 gen_op_addl_A0_im(-2);
2562 else
2563 gen_op_addl_A0_im(-4);
2564 if (s->ss32) {
2565 if (s->addseg) {
2566 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2567 gen_op_addl_A0_seg(s, R_SS);
2568 }
2569 } else {
2570 gen_op_andl_A0_ffff();
2571 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2572 gen_op_addl_A0_seg(s, R_SS);
2573 }
2574 gen_op_st_T0_A0(s->dflag + 1 + s->mem_index);
2575 if (s->ss32 && !s->addseg)
2576 gen_op_mov_reg_A0(1, R_ESP);
2577 else
2578 gen_op_mov_reg_T1(s->ss32 + 1, R_ESP);
2579 }
2580 }
2581
2582 /* generate a push. It depends on ss32, addseg and dflag */
2583 /* slower version for T1, only used for call Ev */
2584 static void gen_push_T1(DisasContext *s)
2585 {
2586 #ifdef TARGET_X86_64
2587 if (CODE64(s)) {
2588 gen_op_movq_A0_reg(R_ESP);
2589 if (s->dflag) {
2590 gen_op_addq_A0_im(-8);
2591 gen_op_st_T1_A0(OT_QUAD + s->mem_index);
2592 } else {
2593 gen_op_addq_A0_im(-2);
2594 gen_op_st_T0_A0(OT_WORD + s->mem_index);
2595 }
2596 gen_op_mov_reg_A0(2, R_ESP);
2597 } else
2598 #endif
2599 {
2600 gen_op_movl_A0_reg(R_ESP);
2601 if (!s->dflag)
2602 gen_op_addl_A0_im(-2);
2603 else
2604 gen_op_addl_A0_im(-4);
2605 if (s->ss32) {
2606 if (s->addseg) {
2607 gen_op_addl_A0_seg(s, R_SS);
2608 }
2609 } else {
2610 gen_op_andl_A0_ffff();
2611 gen_op_addl_A0_seg(s, R_SS);
2612 }
2613 gen_op_st_T1_A0(s->dflag + 1 + s->mem_index);
2614
2615 if (s->ss32 && !s->addseg)
2616 gen_op_mov_reg_A0(1, R_ESP);
2617 else
2618 gen_stack_update(s, (-2) << s->dflag);
2619 }
2620 }
2621
2622 /* two step pop is necessary for precise exceptions */
2623 static void gen_pop_T0(DisasContext *s)
2624 {
2625 #ifdef TARGET_X86_64
2626 if (CODE64(s)) {
2627 gen_op_movq_A0_reg(R_ESP);
2628 gen_op_ld_T0_A0((s->dflag ? OT_QUAD : OT_WORD) + s->mem_index);
2629 } else
2630 #endif
2631 {
2632 gen_op_movl_A0_reg(R_ESP);
2633 if (s->ss32) {
2634 if (s->addseg)
2635 gen_op_addl_A0_seg(s, R_SS);
2636 } else {
2637 gen_op_andl_A0_ffff();
2638 gen_op_addl_A0_seg(s, R_SS);
2639 }
2640 gen_op_ld_T0_A0(s->dflag + 1 + s->mem_index);
2641 }
2642 }
2643
2644 static void gen_pop_update(DisasContext *s)
2645 {
2646 #ifdef TARGET_X86_64
2647 if (CODE64(s) && s->dflag) {
2648 gen_stack_update(s, 8);
2649 } else
2650 #endif
2651 {
2652 gen_stack_update(s, 2 << s->dflag);
2653 }
2654 }
2655
2656 static void gen_stack_A0(DisasContext *s)
2657 {
2658 gen_op_movl_A0_reg(R_ESP);
2659 if (!s->ss32)
2660 gen_op_andl_A0_ffff();
2661 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2662 if (s->addseg)
2663 gen_op_addl_A0_seg(s, R_SS);
2664 }
2665
2666 /* NOTE: wrap around in 16 bit not fully handled */
2667 static void gen_pusha(DisasContext *s)
2668 {
2669 int i;
2670 gen_op_movl_A0_reg(R_ESP);
2671 gen_op_addl_A0_im(-16 << s->dflag);
2672 if (!s->ss32)
2673 gen_op_andl_A0_ffff();
2674 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2675 if (s->addseg)
2676 gen_op_addl_A0_seg(s, R_SS);
2677 for(i = 0;i < 8; i++) {
2678 gen_op_mov_TN_reg(OT_LONG, 0, 7 - i);
2679 gen_op_st_T0_A0(OT_WORD + s->dflag + s->mem_index);
2680 gen_op_addl_A0_im(2 << s->dflag);
2681 }
2682 gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2683 }
2684
2685 /* NOTE: wrap around in 16 bit not fully handled */
2686 static void gen_popa(DisasContext *s)
2687 {
2688 int i;
2689 gen_op_movl_A0_reg(R_ESP);
2690 if (!s->ss32)
2691 gen_op_andl_A0_ffff();
2692 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2693 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], 16 << s->dflag);
2694 if (s->addseg)
2695 gen_op_addl_A0_seg(s, R_SS);
2696 for(i = 0;i < 8; i++) {
2697 /* ESP is not reloaded */
2698 if (i != 3) {
2699 gen_op_ld_T0_A0(OT_WORD + s->dflag + s->mem_index);
2700 gen_op_mov_reg_T0(OT_WORD + s->dflag, 7 - i);
2701 }
2702 gen_op_addl_A0_im(2 << s->dflag);
2703 }
2704 gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2705 }
2706
2707 static void gen_enter(DisasContext *s, int esp_addend, int level)
2708 {
2709 int ot, opsize;
2710
2711 level &= 0x1f;
2712 #ifdef TARGET_X86_64
2713 if (CODE64(s)) {
2714 ot = s->dflag ? OT_QUAD : OT_WORD;
2715 opsize = 1 << ot;
2716
2717 gen_op_movl_A0_reg(R_ESP);
2718 gen_op_addq_A0_im(-opsize);
2719 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2720
2721 /* push bp */
2722 gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
2723 gen_op_st_T0_A0(ot + s->mem_index);
2724 if (level) {
2725 /* XXX: must save state */
2726 gen_helper_enter64_level(cpu_env, tcg_const_i32(level),
2727 tcg_const_i32((ot == OT_QUAD)),
2728 cpu_T[1]);
2729 }
2730 gen_op_mov_reg_T1(ot, R_EBP);
2731 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
2732 gen_op_mov_reg_T1(OT_QUAD, R_ESP);
2733 } else
2734 #endif
2735 {
2736 ot = s->dflag + OT_WORD;
2737 opsize = 2 << s->dflag;
2738
2739 gen_op_movl_A0_reg(R_ESP);
2740 gen_op_addl_A0_im(-opsize);
2741 if (!s->ss32)
2742 gen_op_andl_A0_ffff();
2743 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2744 if (s->addseg)
2745 gen_op_addl_A0_seg(s, R_SS);
2746 /* push bp */
2747 gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
2748 gen_op_st_T0_A0(ot + s->mem_index);
2749 if (level) {
2750 /* XXX: must save state */
2751 gen_helper_enter_level(cpu_env, tcg_const_i32(level),
2752 tcg_const_i32(s->dflag),
2753 cpu_T[1]);
2754 }
2755 gen_op_mov_reg_T1(ot, R_EBP);
2756 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
2757 gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2758 }
2759 }
2760
2761 static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip)
2762 {
2763 gen_update_cc_op(s);
2764 gen_jmp_im(cur_eip);
2765 gen_helper_raise_exception(cpu_env, tcg_const_i32(trapno));
2766 s->is_jmp = DISAS_TB_JUMP;
2767 }
2768
2769 /* an interrupt is different from an exception because of the
2770 privilege checks */
2771 static void gen_interrupt(DisasContext *s, int intno,
2772 target_ulong cur_eip, target_ulong next_eip)
2773 {
2774 gen_update_cc_op(s);
2775 gen_jmp_im(cur_eip);
2776 gen_helper_raise_interrupt(cpu_env, tcg_const_i32(intno),
2777 tcg_const_i32(next_eip - cur_eip));
2778 s->is_jmp = DISAS_TB_JUMP;
2779 }
2780
2781 static void gen_debug(DisasContext *s, target_ulong cur_eip)
2782 {
2783 gen_update_cc_op(s);
2784 gen_jmp_im(cur_eip);
2785 gen_helper_debug(cpu_env);
2786 s->is_jmp = DISAS_TB_JUMP;
2787 }
2788
2789 /* generate a generic end of block. Trace exception is also generated
2790 if needed */
2791 static void gen_eob(DisasContext *s)
2792 {
2793 gen_update_cc_op(s);
2794 if (s->tb->flags & HF_INHIBIT_IRQ_MASK) {
2795 gen_helper_reset_inhibit_irq(cpu_env);
2796 }
2797 if (s->tb->flags & HF_RF_MASK) {
2798 gen_helper_reset_rf(cpu_env);
2799 }
2800 if (s->singlestep_enabled) {
2801 gen_helper_debug(cpu_env);
2802 } else if (s->tf) {
2803 gen_helper_single_step(cpu_env);
2804 } else {
2805 tcg_gen_exit_tb(0);
2806 }
2807 s->is_jmp = DISAS_TB_JUMP;
2808 }
2809
2810 /* generate a jump to eip. No segment change must happen before as a
2811 direct call to the next block may occur */
2812 static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num)
2813 {
2814 gen_update_cc_op(s);
2815 set_cc_op(s, CC_OP_DYNAMIC);
2816 if (s->jmp_opt) {
2817 gen_goto_tb(s, tb_num, eip);
2818 s->is_jmp = DISAS_TB_JUMP;
2819 } else {
2820 gen_jmp_im(eip);
2821 gen_eob(s);
2822 }
2823 }
2824
2825 static void gen_jmp(DisasContext *s, target_ulong eip)
2826 {
2827 gen_jmp_tb(s, eip, 0);
2828 }
2829
2830 static inline void gen_ldq_env_A0(int idx, int offset)
2831 {
2832 int mem_index = (idx >> 2) - 1;
2833 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, mem_index);
2834 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset);
2835 }
2836
2837 static inline void gen_stq_env_A0(int idx, int offset)
2838 {
2839 int mem_index = (idx >> 2) - 1;
2840 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset);
2841 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, mem_index);
2842 }
2843
2844 static inline void gen_ldo_env_A0(int idx, int offset)
2845 {
2846 int mem_index = (idx >> 2) - 1;
2847 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, mem_index);
2848 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
2849 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
2850 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_tmp0, mem_index);
2851 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
2852 }
2853
2854 static inline void gen_sto_env_A0(int idx, int offset)
2855 {
2856 int mem_index = (idx >> 2) - 1;
2857 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
2858 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, mem_index);
2859 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
2860 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
2861 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_tmp0, mem_index);
2862 }
2863
2864 static inline void gen_op_movo(int d_offset, int s_offset)
2865 {
2866 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2867 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2868 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + 8);
2869 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + 8);
2870 }
2871
2872 static inline void gen_op_movq(int d_offset, int s_offset)
2873 {
2874 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2875 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2876 }
2877
2878 static inline void gen_op_movl(int d_offset, int s_offset)
2879 {
2880 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env, s_offset);
2881 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, d_offset);
2882 }
2883
2884 static inline void gen_op_movq_env_0(int d_offset)
2885 {
2886 tcg_gen_movi_i64(cpu_tmp1_i64, 0);
2887 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2888 }
2889
2890 typedef void (*SSEFunc_i_ep)(TCGv_i32 val, TCGv_ptr env, TCGv_ptr reg);
2891 typedef void (*SSEFunc_l_ep)(TCGv_i64 val, TCGv_ptr env, TCGv_ptr reg);
2892 typedef void (*SSEFunc_0_epi)(TCGv_ptr env, TCGv_ptr reg, TCGv_i32 val);
2893 typedef void (*SSEFunc_0_epl)(TCGv_ptr env, TCGv_ptr reg, TCGv_i64 val);
2894 typedef void (*SSEFunc_0_epp)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b);
2895 typedef void (*SSEFunc_0_eppi)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
2896 TCGv_i32 val);
2897 typedef void (*SSEFunc_0_ppi)(TCGv_ptr reg_a, TCGv_ptr reg_b, TCGv_i32 val);
2898 typedef void (*SSEFunc_0_eppt)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
2899 TCGv val);
2900
2901 #define SSE_SPECIAL ((void *)1)
2902 #define SSE_DUMMY ((void *)2)
2903
2904 #define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
2905 #define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
2906 gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
2907
2908 static const SSEFunc_0_epp sse_op_table1[256][4] = {
2909 /* 3DNow! extensions */
2910 [0x0e] = { SSE_DUMMY }, /* femms */
2911 [0x0f] = { SSE_DUMMY }, /* pf... */
2912 /* pure SSE operations */
2913 [0x10] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2914 [0x11] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2915 [0x12] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd, movsldup, movddup */
2916 [0x13] = { SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd */
2917 [0x14] = { gen_helper_punpckldq_xmm, gen_helper_punpcklqdq_xmm },
2918 [0x15] = { gen_helper_punpckhdq_xmm, gen_helper_punpckhqdq_xmm },
2919 [0x16] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd, movshdup */
2920 [0x17] = { SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd */
2921
2922 [0x28] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2923 [0x29] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2924 [0x2a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
2925 [0x2b] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movntps, movntpd, movntss, movntsd */
2926 [0x2c] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
2927 [0x2d] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
2928 [0x2e] = { gen_helper_ucomiss, gen_helper_ucomisd },
2929 [0x2f] = { gen_helper_comiss, gen_helper_comisd },
2930 [0x50] = { SSE_SPECIAL, SSE_SPECIAL }, /* movmskps, movmskpd */
2931 [0x51] = SSE_FOP(sqrt),
2932 [0x52] = { gen_helper_rsqrtps, NULL, gen_helper_rsqrtss, NULL },
2933 [0x53] = { gen_helper_rcpps, NULL, gen_helper_rcpss, NULL },
2934 [0x54] = { gen_helper_pand_xmm, gen_helper_pand_xmm }, /* andps, andpd */
2935 [0x55] = { gen_helper_pandn_xmm, gen_helper_pandn_xmm }, /* andnps, andnpd */
2936 [0x56] = { gen_helper_por_xmm, gen_helper_por_xmm }, /* orps, orpd */
2937 [0x57] = { gen_helper_pxor_xmm, gen_helper_pxor_xmm }, /* xorps, xorpd */
2938 [0x58] = SSE_FOP(add),
2939 [0x59] = SSE_FOP(mul),
2940 [0x5a] = { gen_helper_cvtps2pd, gen_helper_cvtpd2ps,
2941 gen_helper_cvtss2sd, gen_helper_cvtsd2ss },
2942 [0x5b] = { gen_helper_cvtdq2ps, gen_helper_cvtps2dq, gen_helper_cvttps2dq },
2943 [0x5c] = SSE_FOP(sub),
2944 [0x5d] = SSE_FOP(min),
2945 [0x5e] = SSE_FOP(div),
2946 [0x5f] = SSE_FOP(max),
2947
2948 [0xc2] = SSE_FOP(cmpeq),
2949 [0xc6] = { (SSEFunc_0_epp)gen_helper_shufps,
2950 (SSEFunc_0_epp)gen_helper_shufpd }, /* XXX: casts */
2951
2952 [0x38] = { SSE_SPECIAL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* SSSE3/SSE4 */
2953 [0x3a] = { SSE_SPECIAL, SSE_SPECIAL }, /* SSSE3/SSE4 */
2954
2955 /* MMX ops and their SSE extensions */
2956 [0x60] = MMX_OP2(punpcklbw),
2957 [0x61] = MMX_OP2(punpcklwd),
2958 [0x62] = MMX_OP2(punpckldq),
2959 [0x63] = MMX_OP2(packsswb),
2960 [0x64] = MMX_OP2(pcmpgtb),
2961 [0x65] = MMX_OP2(pcmpgtw),
2962 [0x66] = MMX_OP2(pcmpgtl),
2963 [0x67] = MMX_OP2(packuswb),
2964 [0x68] = MMX_OP2(punpckhbw),
2965 [0x69] = MMX_OP2(punpckhwd),
2966 [0x6a] = MMX_OP2(punpckhdq),
2967 [0x6b] = MMX_OP2(packssdw),
2968 [0x6c] = { NULL, gen_helper_punpcklqdq_xmm },
2969 [0x6d] = { NULL, gen_helper_punpckhqdq_xmm },
2970 [0x6e] = { SSE_SPECIAL, SSE_SPECIAL }, /* movd mm, ea */
2971 [0x6f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, , movqdu */
2972 [0x70] = { (SSEFunc_0_epp)gen_helper_pshufw_mmx,
2973 (SSEFunc_0_epp)gen_helper_pshufd_xmm,
2974 (SSEFunc_0_epp)gen_helper_pshufhw_xmm,
2975 (SSEFunc_0_epp)gen_helper_pshuflw_xmm }, /* XXX: casts */
2976 [0x71] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftw */
2977 [0x72] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftd */
2978 [0x73] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftq */
2979 [0x74] = MMX_OP2(pcmpeqb),
2980 [0x75] = MMX_OP2(pcmpeqw),
2981 [0x76] = MMX_OP2(pcmpeql),
2982 [0x77] = { SSE_DUMMY }, /* emms */
2983 [0x78] = { NULL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* extrq_i, insertq_i */
2984 [0x79] = { NULL, gen_helper_extrq_r, NULL, gen_helper_insertq_r },
2985 [0x7c] = { NULL, gen_helper_haddpd, NULL, gen_helper_haddps },
2986 [0x7d] = { NULL, gen_helper_hsubpd, NULL, gen_helper_hsubps },
2987 [0x7e] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movd, movd, , movq */
2988 [0x7f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, movdqu */
2989 [0xc4] = { SSE_SPECIAL, SSE_SPECIAL }, /* pinsrw */
2990 [0xc5] = { SSE_SPECIAL, SSE_SPECIAL }, /* pextrw */
2991 [0xd0] = { NULL, gen_helper_addsubpd, NULL, gen_helper_addsubps },
2992 [0xd1] = MMX_OP2(psrlw),
2993 [0xd2] = MMX_OP2(psrld),
2994 [0xd3] = MMX_OP2(psrlq),
2995 [0xd4] = MMX_OP2(paddq),
2996 [0xd5] = MMX_OP2(pmullw),
2997 [0xd6] = { NULL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2998 [0xd7] = { SSE_SPECIAL, SSE_SPECIAL }, /* pmovmskb */
2999 [0xd8] = MMX_OP2(psubusb),
3000 [0xd9] = MMX_OP2(psubusw),
3001 [0xda] = MMX_OP2(pminub),
3002 [0xdb] = MMX_OP2(pand),
3003 [0xdc] = MMX_OP2(paddusb),
3004 [0xdd] = MMX_OP2(paddusw),
3005 [0xde] = MMX_OP2(pmaxub),
3006 [0xdf] = MMX_OP2(pandn),
3007 [0xe0] = MMX_OP2(pavgb),
3008 [0xe1] = MMX_OP2(psraw),
3009 [0xe2] = MMX_OP2(psrad),
3010 [0xe3] = MMX_OP2(pavgw),
3011 [0xe4] = MMX_OP2(pmulhuw),
3012 [0xe5] = MMX_OP2(pmulhw),
3013 [0xe6] = { NULL, gen_helper_cvttpd2dq, gen_helper_cvtdq2pd, gen_helper_cvtpd2dq },
3014 [0xe7] = { SSE_SPECIAL , SSE_SPECIAL }, /* movntq, movntq */
3015 [0xe8] = MMX_OP2(psubsb),
3016 [0xe9] = MMX_OP2(psubsw),
3017 [0xea] = MMX_OP2(pminsw),
3018 [0xeb] = MMX_OP2(por),
3019 [0xec] = MMX_OP2(paddsb),
3020 [0xed] = MMX_OP2(paddsw),
3021 [0xee] = MMX_OP2(pmaxsw),
3022 [0xef] = MMX_OP2(pxor),
3023 [0xf0] = { NULL, NULL, NULL, SSE_SPECIAL }, /* lddqu */
3024 [0xf1] = MMX_OP2(psllw),
3025 [0xf2] = MMX_OP2(pslld),
3026 [0xf3] = MMX_OP2(psllq),
3027 [0xf4] = MMX_OP2(pmuludq),
3028 [0xf5] = MMX_OP2(pmaddwd),
3029 [0xf6] = MMX_OP2(psadbw),
3030 [0xf7] = { (SSEFunc_0_epp)gen_helper_maskmov_mmx,
3031 (SSEFunc_0_epp)gen_helper_maskmov_xmm }, /* XXX: casts */
3032 [0xf8] = MMX_OP2(psubb),
3033 [0xf9] = MMX_OP2(psubw),
3034 [0xfa] = MMX_OP2(psubl),
3035 [0xfb] = MMX_OP2(psubq),
3036 [0xfc] = MMX_OP2(paddb),
3037 [0xfd] = MMX_OP2(paddw),
3038 [0xfe] = MMX_OP2(paddl),
3039 };
3040
3041 static const SSEFunc_0_epp sse_op_table2[3 * 8][2] = {
3042 [0 + 2] = MMX_OP2(psrlw),
3043 [0 + 4] = MMX_OP2(psraw),
3044 [0 + 6] = MMX_OP2(psllw),
3045 [8 + 2] = MMX_OP2(psrld),
3046 [8 + 4] = MMX_OP2(psrad),
3047 [8 + 6] = MMX_OP2(pslld),
3048 [16 + 2] = MMX_OP2(psrlq),
3049 [16 + 3] = { NULL, gen_helper_psrldq_xmm },
3050 [16 + 6] = MMX_OP2(psllq),
3051 [16 + 7] = { NULL, gen_helper_pslldq_xmm },
3052 };
3053
3054 static const SSEFunc_0_epi sse_op_table3ai[] = {
3055 gen_helper_cvtsi2ss,
3056 gen_helper_cvtsi2sd
3057 };
3058
3059 #ifdef TARGET_X86_64
3060 static const SSEFunc_0_epl sse_op_table3aq[] = {
3061 gen_helper_cvtsq2ss,
3062 gen_helper_cvtsq2sd
3063 };
3064 #endif
3065
3066 static const SSEFunc_i_ep sse_op_table3bi[] = {
3067 gen_helper_cvttss2si,
3068 gen_helper_cvtss2si,
3069 gen_helper_cvttsd2si,
3070 gen_helper_cvtsd2si
3071 };
3072
3073 #ifdef TARGET_X86_64
3074 static const SSEFunc_l_ep sse_op_table3bq[] = {
3075 gen_helper_cvttss2sq,
3076 gen_helper_cvtss2sq,
3077 gen_helper_cvttsd2sq,
3078 gen_helper_cvtsd2sq
3079 };
3080 #endif
3081
3082 static const SSEFunc_0_epp sse_op_table4[8][4] = {
3083 SSE_FOP(cmpeq),
3084 SSE_FOP(cmplt),
3085 SSE_FOP(cmple),
3086 SSE_FOP(cmpunord),
3087 SSE_FOP(cmpneq),
3088 SSE_FOP(cmpnlt),
3089 SSE_FOP(cmpnle),
3090 SSE_FOP(cmpord),
3091 };
3092
3093 static const SSEFunc_0_epp sse_op_table5[256] = {
3094 [0x0c] = gen_helper_pi2fw,
3095 [0x0d] = gen_helper_pi2fd,
3096 [0x1c] = gen_helper_pf2iw,
3097 [0x1d] = gen_helper_pf2id,
3098 [0x8a] = gen_helper_pfnacc,
3099 [0x8e] = gen_helper_pfpnacc,
3100 [0x90] = gen_helper_pfcmpge,
3101 [0x94] = gen_helper_pfmin,
3102 [0x96] = gen_helper_pfrcp,
3103 [0x97] = gen_helper_pfrsqrt,
3104 [0x9a] = gen_helper_pfsub,
3105 [0x9e] = gen_helper_pfadd,
3106 [0xa0] = gen_helper_pfcmpgt,
3107 [0xa4] = gen_helper_pfmax,
3108 [0xa6] = gen_helper_movq, /* pfrcpit1; no need to actually increase precision */
3109 [0xa7] = gen_helper_movq, /* pfrsqit1 */
3110 [0xaa] = gen_helper_pfsubr,
3111 [0xae] = gen_helper_pfacc,
3112 [0xb0] = gen_helper_pfcmpeq,
3113 [0xb4] = gen_helper_pfmul,
3114 [0xb6] = gen_helper_movq, /* pfrcpit2 */
3115 [0xb7] = gen_helper_pmulhrw_mmx,
3116 [0xbb] = gen_helper_pswapd,
3117 [0xbf] = gen_helper_pavgb_mmx /* pavgusb */
3118 };
3119
3120 struct SSEOpHelper_epp {
3121 SSEFunc_0_epp op[2];
3122 uint32_t ext_mask;
3123 };
3124
3125 struct SSEOpHelper_eppi {
3126 SSEFunc_0_eppi op[2];
3127 uint32_t ext_mask;
3128 };
3129
3130 #define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
3131 #define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
3132 #define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
3133 #define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
3134
3135 static const struct SSEOpHelper_epp sse_op_table6[256] = {
3136 [0x00] = SSSE3_OP(pshufb),
3137 [0x01] = SSSE3_OP(phaddw),
3138 [0x02] = SSSE3_OP(phaddd),
3139 [0x03] = SSSE3_OP(phaddsw),
3140 [0x04] = SSSE3_OP(pmaddubsw),
3141 [0x05] = SSSE3_OP(phsubw),
3142 [0x06] = SSSE3_OP(phsubd),
3143 [0x07] = SSSE3_OP(phsubsw),
3144 [0x08] = SSSE3_OP(psignb),
3145 [0x09] = SSSE3_OP(psignw),
3146 [0x0a] = SSSE3_OP(psignd),
3147 [0x0b] = SSSE3_OP(pmulhrsw),
3148 [0x10] = SSE41_OP(pblendvb),
3149 [0x14] = SSE41_OP(blendvps),
3150 [0x15] = SSE41_OP(blendvpd),
3151 [0x17] = SSE41_OP(ptest),
3152 [0x1c] = SSSE3_OP(pabsb),
3153 [0x1d] = SSSE3_OP(pabsw),
3154 [0x1e] = SSSE3_OP(pabsd),
3155 [0x20] = SSE41_OP(pmovsxbw),
3156 [0x21] = SSE41_OP(pmovsxbd),
3157 [0x22] = SSE41_OP(pmovsxbq),
3158 [0x23] = SSE41_OP(pmovsxwd),
3159 [0x24] = SSE41_OP(pmovsxwq),
3160 [0x25] = SSE41_OP(pmovsxdq),
3161 [0x28] = SSE41_OP(pmuldq),
3162 [0x29] = SSE41_OP(pcmpeqq),
3163 [0x2a] = SSE41_SPECIAL, /* movntqda */
3164 [0x2b] = SSE41_OP(packusdw),
3165 [0x30] = SSE41_OP(pmovzxbw),
3166 [0x31] = SSE41_OP(pmovzxbd),
3167 [0x32] = SSE41_OP(pmovzxbq),
3168 [0x33] = SSE41_OP(pmovzxwd),
3169 [0x34] = SSE41_OP(pmovzxwq),
3170 [0x35] = SSE41_OP(pmovzxdq),
3171 [0x37] = SSE42_OP(pcmpgtq),
3172 [0x38] = SSE41_OP(pminsb),
3173 [0x39] = SSE41_OP(pminsd),
3174 [0x3a] = SSE41_OP(pminuw),
3175 [0x3b] = SSE41_OP(pminud),
3176 [0x3c] = SSE41_OP(pmaxsb),
3177 [0x3d] = SSE41_OP(pmaxsd),
3178 [0x3e] = SSE41_OP(pmaxuw),
3179 [0x3f] = SSE41_OP(pmaxud),
3180 [0x40] = SSE41_OP(pmulld),
3181 [0x41] = SSE41_OP(phminposuw),
3182 };
3183
3184 static const struct SSEOpHelper_eppi sse_op_table7[256] = {
3185 [0x08] = SSE41_OP(roundps),
3186 [0x09] = SSE41_OP(roundpd),
3187 [0x0a] = SSE41_OP(roundss),
3188 [0x0b] = SSE41_OP(roundsd),
3189 [0x0c] = SSE41_OP(blendps),
3190 [0x0d] = SSE41_OP(blendpd),
3191 [0x0e] = SSE41_OP(pblendw),
3192 [0x0f] = SSSE3_OP(palignr),
3193 [0x14] = SSE41_SPECIAL, /* pextrb */
3194 [0x15] = SSE41_SPECIAL, /* pextrw */
3195 [0x16] = SSE41_SPECIAL, /* pextrd/pextrq */
3196 [0x17] = SSE41_SPECIAL, /* extractps */
3197 [0x20] = SSE41_SPECIAL, /* pinsrb */
3198 [0x21] = SSE41_SPECIAL, /* insertps */
3199 [0x22] = SSE41_SPECIAL, /* pinsrd/pinsrq */
3200 [0x40] = SSE41_OP(dpps),
3201 [0x41] = SSE41_OP(dppd),
3202 [0x42] = SSE41_OP(mpsadbw),
3203 [0x60] = SSE42_OP(pcmpestrm),
3204 [0x61] = SSE42_OP(pcmpestri),
3205 [0x62] = SSE42_OP(pcmpistrm),
3206 [0x63] = SSE42_OP(pcmpistri),
3207 };
3208
3209 static void gen_sse(CPUX86State *env, DisasContext *s, int b,
3210 target_ulong pc_start, int rex_r)
3211 {
3212 int b1, op1_offset, op2_offset, is_xmm, val, ot;
3213 int modrm, mod, rm, reg, reg_addr, offset_addr;
3214 SSEFunc_0_epp sse_fn_epp;
3215 SSEFunc_0_eppi sse_fn_eppi;
3216 SSEFunc_0_ppi sse_fn_ppi;
3217 SSEFunc_0_eppt sse_fn_eppt;
3218
3219 b &= 0xff;
3220 if (s->prefix & PREFIX_DATA)
3221 b1 = 1;
3222 else if (s->prefix & PREFIX_REPZ)
3223 b1 = 2;
3224 else if (s->prefix & PREFIX_REPNZ)
3225 b1 = 3;
3226 else
3227 b1 = 0;
3228 sse_fn_epp = sse_op_table1[b][b1];
3229 if (!sse_fn_epp) {
3230 goto illegal_op;
3231 }
3232 if ((b <= 0x5f && b >= 0x10) || b == 0xc6 || b == 0xc2) {
3233 is_xmm = 1;
3234 } else {
3235 if (b1 == 0) {
3236 /* MMX case */
3237 is_xmm = 0;
3238 } else {
3239 is_xmm = 1;
3240 }
3241 }
3242 /* simple MMX/SSE operation */
3243 if (s->flags & HF_TS_MASK) {
3244 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
3245 return;
3246 }
3247 if (s->flags & HF_EM_MASK) {
3248 illegal_op:
3249 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
3250 return;
3251 }
3252 if (is_xmm && !(s->flags & HF_OSFXSR_MASK))
3253 if ((b != 0x38 && b != 0x3a) || (s->prefix & PREFIX_DATA))
3254 goto illegal_op;
3255 if (b == 0x0e) {
3256 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
3257 goto illegal_op;
3258 /* femms */
3259 gen_helper_emms(cpu_env);
3260 return;
3261 }
3262 if (b == 0x77) {
3263 /* emms */
3264 gen_helper_emms(cpu_env);
3265 return;
3266 }
3267 /* prepare MMX state (XXX: optimize by storing fptt and fptags in
3268 the static cpu state) */
3269 if (!is_xmm) {
3270 gen_helper_enter_mmx(cpu_env);
3271 }
3272
3273 modrm = cpu_ldub_code(env, s->pc++);
3274 reg = ((modrm >> 3) & 7);
3275 if (is_xmm)
3276 reg |= rex_r;
3277 mod = (modrm >> 6) & 3;
3278 if (sse_fn_epp == SSE_SPECIAL) {
3279 b |= (b1 << 8);
3280 switch(b) {
3281 case 0x0e7: /* movntq */
3282 if (mod == 3)
3283 goto illegal_op;
3284 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3285 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
3286 break;
3287 case 0x1e7: /* movntdq */
3288 case 0x02b: /* movntps */
3289 case 0x12b: /* movntps */
3290 if (mod == 3)
3291 goto illegal_op;
3292 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3293 gen_sto_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3294 break;
3295 case 0x3f0: /* lddqu */
3296 if (mod == 3)
3297 goto illegal_op;
3298 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3299 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3300 break;
3301 case 0x22b: /* movntss */
3302 case 0x32b: /* movntsd */
3303 if (mod == 3)
3304 goto illegal_op;
3305 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3306 if (b1 & 1) {
3307 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,
3308 xmm_regs[reg]));
3309 } else {
3310 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3311 xmm_regs[reg].XMM_L(0)));
3312 gen_op_st_T0_A0(OT_LONG + s->mem_index);
3313 }
3314 break;
3315 case 0x6e: /* movd mm, ea */
3316 #ifdef TARGET_X86_64
3317 if (s->dflag == 2) {
3318 gen_ldst_modrm(env, s, modrm, OT_QUAD, OR_TMP0, 0);
3319 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,fpregs[reg].mmx));
3320 } else
3321 #endif
3322 {
3323 gen_ldst_modrm(env, s, modrm, OT_LONG, OR_TMP0, 0);
3324 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3325 offsetof(CPUX86State,fpregs[reg].mmx));
3326 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3327 gen_helper_movl_mm_T0_mmx(cpu_ptr0, cpu_tmp2_i32);
3328 }
3329 break;
3330 case 0x16e: /* movd xmm, ea */
3331 #ifdef TARGET_X86_64
3332 if (s->dflag == 2) {
3333 gen_ldst_modrm(env, s, modrm, OT_QUAD, OR_TMP0, 0);
3334 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3335 offsetof(CPUX86State,xmm_regs[reg]));
3336 gen_helper_movq_mm_T0_xmm(cpu_ptr0, cpu_T[0]);
3337 } else
3338 #endif
3339 {
3340 gen_ldst_modrm(env, s, modrm, OT_LONG, OR_TMP0, 0);
3341 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3342 offsetof(CPUX86State,xmm_regs[reg]));
3343 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3344 gen_helper_movl_mm_T0_xmm(cpu_ptr0, cpu_tmp2_i32);
3345 }
3346 break;
3347 case 0x6f: /* movq mm, ea */
3348 if (mod != 3) {
3349 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3350 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
3351 } else {
3352 rm = (modrm & 7);
3353 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
3354 offsetof(CPUX86State,fpregs[rm].mmx));
3355 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
3356 offsetof(CPUX86State,fpregs[reg].mmx));
3357 }
3358 break;
3359 case 0x010: /* movups */
3360 case 0x110: /* movupd */
3361 case 0x028: /* movaps */
3362 case 0x128: /* movapd */
3363 case 0x16f: /* movdqa xmm, ea */
3364 case 0x26f: /* movdqu xmm, ea */
3365 if (mod != 3) {
3366 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3367 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3368 } else {
3369 rm = (modrm & 7) | REX_B(s);
3370 gen_op_movo(offsetof(CPUX86State,xmm_regs[reg]),
3371 offsetof(CPUX86State,xmm_regs[rm]));
3372 }
3373 break;
3374 case 0x210: /* movss xmm, ea */
3375 if (mod != 3) {
3376 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3377 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
3378 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3379 gen_op_movl_T0_0();
3380 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
3381 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3382 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3383 } else {
3384 rm = (modrm & 7) | REX_B(s);
3385 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3386 offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
3387 }
3388 break;
3389 case 0x310: /* movsd xmm, ea */
3390 if (mod != 3) {
3391 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3392 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3393 gen_op_movl_T0_0();
3394 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3395 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3396 } else {
3397 rm = (modrm & 7) | REX_B(s);
3398 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3399 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3400 }
3401 break;
3402 case 0x012: /* movlps */
3403 case 0x112: /* movlpd */
3404 if (mod != 3) {
3405 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3406 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3407 } else {
3408 /* movhlps */
3409 rm = (modrm & 7) | REX_B(s);
3410 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3411 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
3412 }
3413 break;
3414 case 0x212: /* movsldup */
3415 if (mod != 3) {
3416 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3417 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3418 } else {
3419 rm = (modrm & 7) | REX_B(s);
3420 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3421 offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
3422 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
3423 offsetof(CPUX86State,xmm_regs[rm].XMM_L(2)));
3424 }
3425 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
3426 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3427 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
3428 offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3429 break;
3430 case 0x312: /* movddup */
3431 if (mod != 3) {
3432 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3433 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3434 } else {
3435 rm = (modrm & 7) | REX_B(s);
3436 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3437 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3438 }
3439 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
3440 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3441 break;
3442 case 0x016: /* movhps */
3443 case 0x116: /* movhpd */
3444 if (mod != 3) {
3445 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3446 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3447 } else {
3448 /* movlhps */
3449 rm = (modrm & 7) | REX_B(s);
3450 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
3451 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3452 }
3453 break;
3454 case 0x216: /* movshdup */
3455 if (mod != 3) {
3456 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3457 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3458 } else {
3459 rm = (modrm & 7) | REX_B(s);
3460 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
3461 offsetof(CPUX86State,xmm_regs[rm].XMM_L(1)));
3462 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
3463 offsetof(CPUX86State,xmm_regs[rm].XMM_L(3)));
3464 }
3465 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3466 offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
3467 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
3468 offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3469 break;
3470 case 0x178:
3471 case 0x378:
3472 {
3473 int bit_index, field_length;
3474
3475 if (b1 == 1 && reg != 0)
3476 goto illegal_op;
3477 field_length = cpu_ldub_code(env, s->pc++) & 0x3F;
3478 bit_index = cpu_ldub_code(env, s->pc++) & 0x3F;
3479 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3480 offsetof(CPUX86State,xmm_regs[reg]));
3481 if (b1 == 1)
3482 gen_helper_extrq_i(cpu_env, cpu_ptr0,
3483 tcg_const_i32(bit_index),
3484 tcg_const_i32(field_length));
3485 else
3486 gen_helper_insertq_i(cpu_env, cpu_ptr0,
3487 tcg_const_i32(bit_index),
3488 tcg_const_i32(field_length));
3489 }
3490 break;
3491 case 0x7e: /* movd ea, mm */
3492 #ifdef TARGET_X86_64
3493 if (s->dflag == 2) {
3494 tcg_gen_ld_i64(cpu_T[0], cpu_env,
3495 offsetof(CPUX86State,fpregs[reg].mmx));
3496 gen_ldst_modrm(env, s, modrm, OT_QUAD, OR_TMP0, 1);
3497 } else
3498 #endif
3499 {
3500 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
3501 offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0)));
3502 gen_ldst_modrm(env, s, modrm, OT_LONG, OR_TMP0, 1);
3503 }
3504 break;
3505 case 0x17e: /* movd ea, xmm */
3506 #ifdef TARGET_X86_64
3507 if (s->dflag == 2) {
3508 tcg_gen_ld_i64(cpu_T[0], cpu_env,
3509 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3510 gen_ldst_modrm(env, s, modrm, OT_QUAD, OR_TMP0, 1);
3511 } else
3512 #endif
3513 {
3514 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
3515 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3516 gen_ldst_modrm(env, s, modrm, OT_LONG, OR_TMP0, 1);
3517 }
3518 break;
3519 case 0x27e: /* movq xmm, ea */
3520 if (mod != 3) {
3521 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3522 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3523 } else {
3524 rm = (modrm & 7) | REX_B(s);
3525 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3526 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3527 }
3528 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3529 break;
3530 case 0x7f: /* movq ea, mm */
3531 if (mod != 3) {
3532 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3533 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
3534 } else {
3535 rm = (modrm & 7);
3536 gen_op_movq(offsetof(CPUX86State,fpregs[rm].mmx),
3537 offsetof(CPUX86State,fpregs[reg].mmx));
3538 }
3539 break;
3540 case 0x011: /* movups */
3541 case 0x111: /* movupd */
3542 case 0x029: /* movaps */
3543 case 0x129: /* movapd */
3544 case 0x17f: /* movdqa ea, xmm */
3545 case 0x27f: /* movdqu ea, xmm */
3546 if (mod != 3) {
3547 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3548 gen_sto_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3549 } else {
3550 rm = (modrm & 7) | REX_B(s);
3551 gen_op_movo(offsetof(CPUX86State,xmm_regs[rm]),
3552 offsetof(CPUX86State,xmm_regs[reg]));
3553 }
3554 break;
3555 case 0x211: /* movss ea, xmm */
3556 if (mod != 3) {
3557 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3558 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3559 gen_op_st_T0_A0(OT_LONG + s->mem_index);
3560 } else {
3561 rm = (modrm & 7) | REX_B(s);
3562 gen_op_movl(offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)),
3563 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3564 }
3565 break;
3566 case 0x311: /* movsd ea, xmm */
3567 if (mod != 3) {
3568 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3569 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3570 } else {
3571 rm = (modrm & 7) | REX_B(s);
3572 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
3573 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3574 }
3575 break;
3576 case 0x013: /* movlps */
3577 case 0x113: /* movlpd */
3578 if (mod != 3) {
3579 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3580 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3581 } else {
3582 goto illegal_op;
3583 }
3584 break;
3585 case 0x017: /* movhps */
3586 case 0x117: /* movhpd */
3587 if (mod != 3) {
3588 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3589 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3590 } else {
3591 goto illegal_op;
3592 }
3593 break;
3594 case 0x71: /* shift mm, im */
3595 case 0x72:
3596 case 0x73:
3597 case 0x171: /* shift xmm, im */
3598 case 0x172:
3599 case 0x173:
3600 if (b1 >= 2) {
3601 goto illegal_op;
3602 }
3603 val = cpu_ldub_code(env, s->pc++);
3604 if (is_xmm) {
3605 gen_op_movl_T0_im(val);
3606 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
3607 gen_op_movl_T0_0();
3608 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(1)));
3609 op1_offset = offsetof(CPUX86State,xmm_t0);
3610 } else {
3611 gen_op_movl_T0_im(val);
3612 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(0)));
3613 gen_op_movl_T0_0();
3614 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(1)));
3615 op1_offset = offsetof(CPUX86State,mmx_t0);
3616 }
3617 sse_fn_epp = sse_op_table2[((b - 1) & 3) * 8 +
3618 (((modrm >> 3)) & 7)][b1];
3619 if (!sse_fn_epp) {
3620 goto illegal_op;
3621 }
3622 if (is_xmm) {
3623 rm = (modrm & 7) | REX_B(s);
3624 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3625 } else {
3626 rm = (modrm & 7);
3627 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3628 }
3629 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3630 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op1_offset);
3631 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
3632 break;
3633 case 0x050: /* movmskps */
3634 rm = (modrm & 7) | REX_B(s);
3635 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3636 offsetof(CPUX86State,xmm_regs[rm]));
3637 gen_helper_movmskps(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3638 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3639 gen_op_mov_reg_T0(OT_LONG, reg);
3640 break;
3641 case 0x150: /* movmskpd */
3642 rm = (modrm & 7) | REX_B(s);
3643 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3644 offsetof(CPUX86State,xmm_regs[rm]));
3645 gen_helper_movmskpd(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3646 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3647 gen_op_mov_reg_T0(OT_LONG, reg);
3648 break;
3649 case 0x02a: /* cvtpi2ps */
3650 case 0x12a: /* cvtpi2pd */
3651 gen_helper_enter_mmx(cpu_env);
3652 if (mod != 3) {
3653 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3654 op2_offset = offsetof(CPUX86State,mmx_t0);
3655 gen_ldq_env_A0(s->mem_index, op2_offset);
3656 } else {
3657 rm = (modrm & 7);
3658 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3659 }
3660 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3661 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3662 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3663 switch(b >> 8) {
3664 case 0x0:
3665 gen_helper_cvtpi2ps(cpu_env, cpu_ptr0, cpu_ptr1);
3666 break;
3667 default:
3668 case 0x1:
3669 gen_helper_cvtpi2pd(cpu_env, cpu_ptr0, cpu_ptr1);
3670 break;
3671 }
3672 break;
3673 case 0x22a: /* cvtsi2ss */
3674 case 0x32a: /* cvtsi2sd */
3675 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3676 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3677 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3678 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3679 if (ot == OT_LONG) {
3680 SSEFunc_0_epi sse_fn_epi = sse_op_table3ai[(b >> 8) & 1];
3681 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3682 sse_fn_epi(cpu_env, cpu_ptr0, cpu_tmp2_i32);
3683 } else {
3684 #ifdef TARGET_X86_64
3685 SSEFunc_0_epl sse_fn_epl = sse_op_table3aq[(b >> 8) & 1];
3686 sse_fn_epl(cpu_env, cpu_ptr0, cpu_T[0]);
3687 #else
3688 goto illegal_op;
3689 #endif
3690 }
3691 break;
3692 case 0x02c: /* cvttps2pi */
3693 case 0x12c: /* cvttpd2pi */
3694 case 0x02d: /* cvtps2pi */
3695 case 0x12d: /* cvtpd2pi */
3696 gen_helper_enter_mmx(cpu_env);
3697 if (mod != 3) {
3698 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3699 op2_offset = offsetof(CPUX86State,xmm_t0);
3700 gen_ldo_env_A0(s->mem_index, op2_offset);
3701 } else {
3702 rm = (modrm & 7) | REX_B(s);
3703 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3704 }
3705 op1_offset = offsetof(CPUX86State,fpregs[reg & 7].mmx);
3706 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3707 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3708 switch(b) {
3709 case 0x02c:
3710 gen_helper_cvttps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3711 break;
3712 case 0x12c:
3713 gen_helper_cvttpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3714 break;
3715 case 0x02d:
3716 gen_helper_cvtps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3717 break;
3718 case 0x12d:
3719 gen_helper_cvtpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3720 break;
3721 }
3722 break;
3723 case 0x22c: /* cvttss2si */
3724 case 0x32c: /* cvttsd2si */
3725 case 0x22d: /* cvtss2si */
3726 case 0x32d: /* cvtsd2si */
3727 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3728 if (mod != 3) {
3729 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3730 if ((b >> 8) & 1) {
3731 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_t0.XMM_Q(0)));
3732 } else {
3733 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
3734 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
3735 }
3736 op2_offset = offsetof(CPUX86State,xmm_t0);
3737 } else {
3738 rm = (modrm & 7) | REX_B(s);
3739 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3740 }
3741 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3742 if (ot == OT_LONG) {
3743 SSEFunc_i_ep sse_fn_i_ep =
3744 sse_op_table3bi[((b >> 7) & 2) | (b & 1)];
3745 sse_fn_i_ep(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3746 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3747 } else {
3748 #ifdef TARGET_X86_64
3749 SSEFunc_l_ep sse_fn_l_ep =
3750 sse_op_table3bq[((b >> 7) & 2) | (b & 1)];
3751 sse_fn_l_ep(cpu_T[0], cpu_env, cpu_ptr0);
3752 #else
3753 goto illegal_op;
3754 #endif
3755 }
3756 gen_op_mov_reg_T0(ot, reg);
3757 break;
3758 case 0xc4: /* pinsrw */
3759 case 0x1c4:
3760 s->rip_offset = 1;
3761 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
3762 val = cpu_ldub_code(env, s->pc++);
3763 if (b1) {
3764 val &= 7;
3765 tcg_gen_st16_tl(cpu_T[0], cpu_env,
3766 offsetof(CPUX86State,xmm_regs[reg].XMM_W(val)));
3767 } else {
3768 val &= 3;
3769 tcg_gen_st16_tl(cpu_T[0], cpu_env,
3770 offsetof(CPUX86State,fpregs[reg].mmx.MMX_W(val)));
3771 }
3772 break;
3773 case 0xc5: /* pextrw */
3774 case 0x1c5:
3775 if (mod != 3)
3776 goto illegal_op;
3777 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3778 val = cpu_ldub_code(env, s->pc++);
3779 if (b1) {
3780 val &= 7;
3781 rm = (modrm & 7) | REX_B(s);
3782 tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3783 offsetof(CPUX86State,xmm_regs[rm].XMM_W(val)));
3784 } else {
3785 val &= 3;
3786 rm = (modrm & 7);
3787 tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3788 offsetof(CPUX86State,fpregs[rm].mmx.MMX_W(val)));
3789 }
3790 reg = ((modrm >> 3) & 7) | rex_r;
3791 gen_op_mov_reg_T0(ot, reg);
3792 break;
3793 case 0x1d6: /* movq ea, xmm */
3794 if (mod != 3) {
3795 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3796 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3797 } else {
3798 rm = (modrm & 7) | REX_B(s);
3799 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
3800 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3801 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
3802 }
3803 break;
3804 case 0x2d6: /* movq2dq */
3805 gen_helper_enter_mmx(cpu_env);
3806 rm = (modrm & 7);
3807 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3808 offsetof(CPUX86State,fpregs[rm].mmx));
3809 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3810 break;
3811 case 0x3d6: /* movdq2q */
3812 gen_helper_enter_mmx(cpu_env);
3813 rm = (modrm & 7) | REX_B(s);
3814 gen_op_movq(offsetof(CPUX86State,fpregs[reg & 7].mmx),
3815 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3816 break;
3817 case 0xd7: /* pmovmskb */
3818 case 0x1d7:
3819 if (mod != 3)
3820 goto illegal_op;
3821 if (b1) {
3822 rm = (modrm & 7) | REX_B(s);
3823 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[rm]));
3824 gen_helper_pmovmskb_xmm(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3825 } else {
3826 rm = (modrm & 7);
3827 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,fpregs[rm].mmx));
3828 gen_helper_pmovmskb_mmx(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3829 }
3830 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3831 reg = ((modrm >> 3) & 7) | rex_r;
3832 gen_op_mov_reg_T0(OT_LONG, reg);
3833 break;
3834 case 0x138:
3835 if (s->prefix & PREFIX_REPNZ)
3836 goto crc32;
3837 case 0x038:
3838 b = modrm;
3839 modrm = cpu_ldub_code(env, s->pc++);
3840 rm = modrm & 7;
3841 reg = ((modrm >> 3) & 7) | rex_r;
3842 mod = (modrm >> 6) & 3;
3843 if (b1 >= 2) {
3844 goto illegal_op;
3845 }
3846
3847 sse_fn_epp = sse_op_table6[b].op[b1];
3848 if (!sse_fn_epp) {
3849 goto illegal_op;
3850 }
3851 if (!(s->cpuid_ext_features & sse_op_table6[b].ext_mask))
3852 goto illegal_op;
3853
3854 if (b1) {
3855 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3856 if (mod == 3) {
3857 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
3858 } else {
3859 op2_offset = offsetof(CPUX86State,xmm_t0);
3860 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3861 switch (b) {
3862 case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
3863 case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
3864 case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
3865 gen_ldq_env_A0(s->mem_index, op2_offset +
3866 offsetof(XMMReg, XMM_Q(0)));
3867 break;
3868 case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
3869 case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
3870 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0,
3871 (s->mem_index >> 2) - 1);
3872 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0);
3873 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, op2_offset +
3874 offsetof(XMMReg, XMM_L(0)));
3875 break;
3876 case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
3877 tcg_gen_qemu_ld16u(cpu_tmp0, cpu_A0,
3878 (s->mem_index >> 2) - 1);
3879 tcg_gen_st16_tl(cpu_tmp0, cpu_env, op2_offset +
3880 offsetof(XMMReg, XMM_W(0)));
3881 break;
3882 case 0x2a: /* movntqda */
3883 gen_ldo_env_A0(s->mem_index, op1_offset);
3884 return;
3885 default:
3886 gen_ldo_env_A0(s->mem_index, op2_offset);
3887 }
3888 }
3889 } else {
3890 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
3891 if (mod == 3) {
3892 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3893 } else {
3894 op2_offset = offsetof(CPUX86State,mmx_t0);
3895 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3896 gen_ldq_env_A0(s->mem_index, op2_offset);
3897 }
3898 }
3899 if (sse_fn_epp == SSE_SPECIAL) {
3900 goto illegal_op;
3901 }
3902
3903 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3904 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3905 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
3906
3907 if (b == 0x17) {
3908 set_cc_op(s, CC_OP_EFLAGS);
3909 }
3910 break;
3911 case 0x338: /* crc32 */
3912 crc32:
3913 b = modrm;
3914 modrm = cpu_ldub_code(env, s->pc++);
3915 reg = ((modrm >> 3) & 7) | rex_r;
3916
3917 if (b != 0xf0 && b != 0xf1)
3918 goto illegal_op;
3919 if (!(s->cpuid_ext_features & CPUID_EXT_SSE42))
3920 goto illegal_op;
3921
3922 if (b == 0xf0)
3923 ot = OT_BYTE;
3924 else if (b == 0xf1 && s->dflag != 2)
3925 if (s->prefix & PREFIX_DATA)
3926 ot = OT_WORD;
3927 else
3928 ot = OT_LONG;
3929 else
3930 ot = OT_QUAD;
3931
3932 gen_op_mov_TN_reg(OT_LONG, 0, reg);
3933 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3934 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3935 gen_helper_crc32(cpu_T[0], cpu_tmp2_i32,
3936 cpu_T[0], tcg_const_i32(8 << ot));
3937
3938 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3939 gen_op_mov_reg_T0(ot, reg);
3940 break;
3941 case 0x03a:
3942 case 0x13a:
3943 b = modrm;
3944 modrm = cpu_ldub_code(env, s->pc++);
3945 rm = modrm & 7;
3946 reg = ((modrm >> 3) & 7) | rex_r;
3947 mod = (modrm >> 6) & 3;
3948 if (b1 >= 2) {
3949 goto illegal_op;
3950 }
3951
3952 sse_fn_eppi = sse_op_table7[b].op[b1];
3953 if (!sse_fn_eppi) {
3954 goto illegal_op;
3955 }
3956 if (!(s->cpuid_ext_features & sse_op_table7[b].ext_mask))
3957 goto illegal_op;
3958
3959 if (sse_fn_eppi == SSE_SPECIAL) {
3960 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3961 rm = (modrm & 7) | REX_B(s);
3962 if (mod != 3)
3963 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3964 reg = ((modrm >> 3) & 7) | rex_r;
3965 val = cpu_ldub_code(env, s->pc++);
3966 switch (b) {
3967 case 0x14: /* pextrb */
3968 tcg_gen_ld8u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3969 xmm_regs[reg].XMM_B(val & 15)));
3970 if (mod == 3)
3971 gen_op_mov_reg_T0(ot, rm);
3972 else
3973 tcg_gen_qemu_st8(cpu_T[0], cpu_A0,
3974 (s->mem_index >> 2) - 1);
3975 break;
3976 case 0x15: /* pextrw */
3977 tcg_gen_ld16u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3978 xmm_regs[reg].XMM_W(val & 7)));
3979 if (mod == 3)
3980 gen_op_mov_reg_T0(ot, rm);
3981 else
3982 tcg_gen_qemu_st16(cpu_T[0], cpu_A0,
3983 (s->mem_index >> 2) - 1);
3984 break;
3985 case 0x16:
3986 if (ot == OT_LONG) { /* pextrd */
3987 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
3988 offsetof(CPUX86State,
3989 xmm_regs[reg].XMM_L(val & 3)));
3990 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3991 if (mod == 3)
3992 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
3993 else
3994 tcg_gen_qemu_st32(cpu_T[0], cpu_A0,
3995 (s->mem_index >> 2) - 1);
3996 } else { /* pextrq */
3997 #ifdef TARGET_X86_64
3998 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
3999 offsetof(CPUX86State,
4000 xmm_regs[reg].XMM_Q(val & 1)));
4001 if (mod == 3)
4002 gen_op_mov_reg_v(ot, rm, cpu_tmp1_i64);
4003 else
4004 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
4005 (s->mem_index >> 2) - 1);
4006 #else
4007 goto illegal_op;
4008 #endif
4009 }
4010 break;
4011 case 0x17: /* extractps */
4012 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
4013 xmm_regs[reg].XMM_L(val & 3)));
4014 if (mod == 3)
4015 gen_op_mov_reg_T0(ot, rm);
4016 else
4017 tcg_gen_qemu_st32(cpu_T[0], cpu_A0,
4018 (s->mem_index >> 2) - 1);
4019 break;
4020 case 0x20: /* pinsrb */
4021 if (mod == 3)
4022 gen_op_mov_TN_reg(OT_LONG, 0, rm);
4023 else
4024 tcg_gen_qemu_ld8u(cpu_tmp0, cpu_A0,
4025 (s->mem_index >> 2) - 1);
4026 tcg_gen_st8_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State,
4027 xmm_regs[reg].XMM_B(val & 15)));
4028 break;
4029 case 0x21: /* insertps */
4030 if (mod == 3) {
4031 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
4032 offsetof(CPUX86State,xmm_regs[rm]
4033 .XMM_L((val >> 6) & 3)));
4034 } else {
4035 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0,
4036 (s->mem_index >> 2) - 1);
4037 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0);
4038 }
4039 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
4040 offsetof(CPUX86State,xmm_regs[reg]
4041 .XMM_L((val >> 4) & 3)));
4042 if ((val >> 0) & 1)
4043 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4044 cpu_env, offsetof(CPUX86State,
4045 xmm_regs[reg].XMM_L(0)));
4046 if ((val >> 1) & 1)
4047 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4048 cpu_env, offsetof(CPUX86State,
4049 xmm_regs[reg].XMM_L(1)));
4050 if ((val >> 2) & 1)
4051 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4052 cpu_env, offsetof(CPUX86State,
4053 xmm_regs[reg].XMM_L(2)));
4054 if ((val >> 3) & 1)
4055 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4056 cpu_env, offsetof(CPUX86State,
4057 xmm_regs[reg].XMM_L(3)));
4058 break;
4059 case 0x22:
4060 if (ot == OT_LONG) { /* pinsrd */
4061 if (mod == 3)
4062 gen_op_mov_v_reg(ot, cpu_tmp0, rm);
4063 else
4064 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0,
4065 (s->mem_index >> 2) - 1);
4066 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0);
4067 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
4068 offsetof(CPUX86State,
4069 xmm_regs[reg].XMM_L(val & 3)));
4070 } else { /* pinsrq */
4071 #ifdef TARGET_X86_64
4072 if (mod == 3)
4073 gen_op_mov_v_reg(ot, cpu_tmp1_i64, rm);
4074 else
4075 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
4076 (s->mem_index >> 2) - 1);
4077 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
4078 offsetof(CPUX86State,
4079 xmm_regs[reg].XMM_Q(val & 1)));
4080 #else
4081 goto illegal_op;
4082 #endif
4083 }
4084 break;
4085 }
4086 return;
4087 }
4088
4089 if (b1) {
4090 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
4091 if (mod == 3) {
4092 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
4093 } else {
4094 op2_offset = offsetof(CPUX86State,xmm_t0);
4095 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4096 gen_ldo_env_A0(s->mem_index, op2_offset);
4097 }
4098 } else {
4099 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
4100 if (mod == 3) {
4101 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
4102 } else {
4103 op2_offset = offsetof(CPUX86State,mmx_t0);
4104 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4105 gen_ldq_env_A0(s->mem_index, op2_offset);
4106 }
4107 }
4108 val = cpu_ldub_code(env, s->pc++);
4109
4110 if ((b & 0xfc) == 0x60) { /* pcmpXstrX */
4111 set_cc_op(s, CC_OP_EFLAGS);
4112
4113 if (s->dflag == 2)
4114 /* The helper must use entire 64-bit gp registers */
4115 val |= 1 << 8;
4116 }
4117
4118 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4119 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4120 sse_fn_eppi(cpu_env, cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
4121 break;
4122 default:
4123 goto illegal_op;
4124 }
4125 } else {
4126 /* generic MMX or SSE operation */
4127 switch(b) {
4128 case 0x70: /* pshufx insn */
4129 case 0xc6: /* pshufx insn */
4130 case 0xc2: /* compare insns */
4131 s->rip_offset = 1;
4132 break;
4133 default:
4134 break;
4135 }
4136 if (is_xmm) {
4137 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
4138 if (mod != 3) {
4139 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4140 op2_offset = offsetof(CPUX86State,xmm_t0);
4141 if (b1 >= 2 && ((b >= 0x50 && b <= 0x5f && b != 0x5b) ||
4142 b == 0xc2)) {
4143 /* specific case for SSE single instructions */
4144 if (b1 == 2) {
4145 /* 32 bit access */
4146 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
4147 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
4148 } else {
4149 /* 64 bit access */
4150 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_t0.XMM_D(0)));
4151 }
4152 } else {
4153 gen_ldo_env_A0(s->mem_index, op2_offset);
4154 }
4155 } else {
4156 rm = (modrm & 7) | REX_B(s);
4157 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
4158 }
4159 } else {
4160 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
4161 if (mod != 3) {
4162 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4163 op2_offset = offsetof(CPUX86State,mmx_t0);
4164 gen_ldq_env_A0(s->mem_index, op2_offset);
4165 } else {
4166 rm = (modrm & 7);
4167 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
4168 }
4169 }
4170 switch(b) {
4171 case 0x0f: /* 3DNow! data insns */
4172 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
4173 goto illegal_op;
4174 val = cpu_ldub_code(env, s->pc++);
4175 sse_fn_epp = sse_op_table5[val];
4176 if (!sse_fn_epp) {
4177 goto illegal_op;
4178 }
4179 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4180 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4181 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
4182 break;
4183 case 0x70: /* pshufx insn */
4184 case 0xc6: /* pshufx insn */
4185 val = cpu_ldub_code(env, s->pc++);
4186 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4187 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4188 /* XXX: introduce a new table? */
4189 sse_fn_ppi = (SSEFunc_0_ppi)sse_fn_epp;
4190 sse_fn_ppi(cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
4191 break;
4192 case 0xc2:
4193 /* compare insns */
4194 val = cpu_ldub_code(env, s->pc++);
4195 if (val >= 8)
4196 goto illegal_op;
4197 sse_fn_epp = sse_op_table4[val][b1];
4198
4199 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4200 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4201 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
4202 break;
4203 case 0xf7:
4204 /* maskmov : we must prepare A0 */
4205 if (mod != 3)
4206 goto illegal_op;
4207 #ifdef TARGET_X86_64
4208 if (s->aflag == 2) {
4209 gen_op_movq_A0_reg(R_EDI);
4210 } else
4211 #endif
4212 {
4213 gen_op_movl_A0_reg(R_EDI);
4214 if (s->aflag == 0)
4215 gen_op_andl_A0_ffff();
4216 }
4217 gen_add_A0_ds_seg(s);
4218
4219 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4220 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4221 /* XXX: introduce a new table? */
4222 sse_fn_eppt = (SSEFunc_0_eppt)sse_fn_epp;
4223 sse_fn_eppt(cpu_env, cpu_ptr0, cpu_ptr1, cpu_A0);
4224 break;
4225 default:
4226 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4227 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4228 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
4229 break;
4230 }
4231 if (b == 0x2e || b == 0x2f) {
4232 set_cc_op(s, CC_OP_EFLAGS);
4233 }
4234 }
4235 }
4236
4237 /* convert one instruction. s->is_jmp is set if the translation must
4238 be stopped. Return the next pc value */
4239 static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
4240 target_ulong pc_start)
4241 {
4242 int b, prefixes, aflag, dflag;
4243 int shift, ot;
4244 int modrm, reg, rm, mod, reg_addr, op, opreg, offset_addr, val;
4245 target_ulong next_eip, tval;
4246 int rex_w, rex_r;
4247
4248 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
4249 tcg_gen_debug_insn_start(pc_start);
4250 }
4251 s->pc = pc_start;
4252 prefixes = 0;
4253 aflag = s->code32;
4254 dflag = s->code32;
4255 s->override = -1;
4256 rex_w = -1;
4257 rex_r = 0;
4258 #ifdef TARGET_X86_64
4259 s->rex_x = 0;
4260 s->rex_b = 0;
4261 x86_64_hregs = 0;
4262 #endif
4263 s->rip_offset = 0; /* for relative ip address */
4264 next_byte:
4265 b = cpu_ldub_code(env, s->pc);
4266 s->pc++;
4267 /* check prefixes */
4268 #ifdef TARGET_X86_64
4269 if (CODE64(s)) {
4270 switch (b) {
4271 case 0xf3:
4272 prefixes |= PREFIX_REPZ;
4273 goto next_byte;
4274 case 0xf2:
4275 prefixes |= PREFIX_REPNZ;
4276 goto next_byte;
4277 case 0xf0:
4278 prefixes |= PREFIX_LOCK;
4279 goto next_byte;
4280 case 0x2e:
4281 s->override = R_CS;
4282 goto next_byte;
4283 case 0x36:
4284 s->override = R_SS;
4285 goto next_byte;
4286 case 0x3e:
4287 s->override = R_DS;
4288 goto next_byte;
4289 case 0x26:
4290 s->override = R_ES;
4291 goto next_byte;
4292 case 0x64:
4293 s->override = R_FS;
4294 goto next_byte;
4295 case 0x65:
4296 s->override = R_GS;
4297 goto next_byte;
4298 case 0x66:
4299 prefixes |= PREFIX_DATA;
4300 goto next_byte;
4301 case 0x67:
4302 prefixes |= PREFIX_ADR;
4303 goto next_byte;
4304 case 0x40 ... 0x4f:
4305 /* REX prefix */
4306 rex_w = (b >> 3) & 1;
4307 rex_r = (b & 0x4) << 1;
4308 s->rex_x = (b & 0x2) << 2;
4309 REX_B(s) = (b & 0x1) << 3;
4310 x86_64_hregs = 1; /* select uniform byte register addressing */
4311 goto next_byte;
4312 }
4313 if (rex_w == 1) {
4314 /* 0x66 is ignored if rex.w is set */
4315 dflag = 2;
4316 } else {
4317 if (prefixes & PREFIX_DATA)
4318 dflag ^= 1;
4319 }
4320 if (!(prefixes & PREFIX_ADR))
4321 aflag = 2;
4322 } else
4323 #endif
4324 {
4325 switch (b) {
4326 case 0xf3:
4327 prefixes |= PREFIX_REPZ;
4328 goto next_byte;
4329 case 0xf2:
4330 prefixes |= PREFIX_REPNZ;
4331 goto next_byte;
4332 case 0xf0:
4333 prefixes |= PREFIX_LOCK;
4334 goto next_byte;
4335 case 0x2e:
4336 s->override = R_CS;
4337 goto next_byte;
4338 case 0x36:
4339 s->override = R_SS;
4340 goto next_byte;
4341 case 0x3e:
4342 s->override = R_DS;
4343 goto next_byte;
4344 case 0x26:
4345 s->override = R_ES;
4346 goto next_byte;
4347 case 0x64:
4348 s->override = R_FS;
4349 goto next_byte;
4350 case 0x65:
4351 s->override = R_GS;
4352 goto next_byte;
4353 case 0x66:
4354 prefixes |= PREFIX_DATA;
4355 goto next_byte;
4356 case 0x67:
4357 prefixes |= PREFIX_ADR;
4358 goto next_byte;
4359 }
4360 if (prefixes & PREFIX_DATA)
4361 dflag ^= 1;
4362 if (prefixes & PREFIX_ADR)
4363 aflag ^= 1;
4364 }
4365
4366 s->prefix = prefixes;
4367 s->aflag = aflag;
4368 s->dflag = dflag;
4369
4370 /* lock generation */
4371 if (prefixes & PREFIX_LOCK)
4372 gen_helper_lock();
4373
4374 /* now check op code */
4375 reswitch:
4376 switch(b) {
4377 case 0x0f:
4378 /**************************/
4379 /* extended op code */
4380 b = cpu_ldub_code(env, s->pc++) | 0x100;
4381 goto reswitch;
4382
4383 /**************************/
4384 /* arith & logic */
4385 case 0x00 ... 0x05:
4386 case 0x08 ... 0x0d:
4387 case 0x10 ... 0x15:
4388 case 0x18 ... 0x1d:
4389 case 0x20 ... 0x25:
4390 case 0x28 ... 0x2d:
4391 case 0x30 ... 0x35:
4392 case 0x38 ... 0x3d:
4393 {
4394 int op, f, val;
4395 op = (b >> 3) & 7;
4396 f = (b >> 1) & 3;
4397
4398 if ((b & 1) == 0)
4399 ot = OT_BYTE;
4400 else
4401 ot = dflag + OT_WORD;
4402
4403 switch(f) {
4404 case 0: /* OP Ev, Gv */
4405 modrm = cpu_ldub_code(env, s->pc++);
4406 reg = ((modrm >> 3) & 7) | rex_r;
4407 mod = (modrm >> 6) & 3;
4408 rm = (modrm & 7) | REX_B(s);
4409 if (mod != 3) {
4410 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4411 opreg = OR_TMP0;
4412 } else if (op == OP_XORL && rm == reg) {
4413 xor_zero:
4414 /* xor reg, reg optimisation */
4415 gen_op_movl_T0_0();
4416 set_cc_op(s, CC_OP_LOGICB + ot);
4417 gen_op_mov_reg_T0(ot, reg);
4418 gen_op_update1_cc();
4419 break;
4420 } else {
4421 opreg = rm;
4422 }
4423 gen_op_mov_TN_reg(ot, 1, reg);
4424 gen_op(s, op, ot, opreg);
4425 break;
4426 case 1: /* OP Gv, Ev */
4427 modrm = cpu_ldub_code(env, s->pc++);
4428 mod = (modrm >> 6) & 3;
4429 reg = ((modrm >> 3) & 7) | rex_r;
4430 rm = (modrm & 7) | REX_B(s);
4431 if (mod != 3) {
4432 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4433 gen_op_ld_T1_A0(ot + s->mem_index);
4434 } else if (op == OP_XORL && rm == reg) {
4435 goto xor_zero;
4436 } else {
4437 gen_op_mov_TN_reg(ot, 1, rm);
4438 }
4439 gen_op(s, op, ot, reg);
4440 break;
4441 case 2: /* OP A, Iv */
4442 val = insn_get(env, s, ot);
4443 gen_op_movl_T1_im(val);
4444 gen_op(s, op, ot, OR_EAX);
4445 break;
4446 }
4447 }
4448 break;
4449
4450 case 0x82:
4451 if (CODE64(s))
4452 goto illegal_op;
4453 case 0x80: /* GRP1 */
4454 case 0x81:
4455 case 0x83:
4456 {
4457 int val;
4458
4459 if ((b & 1) == 0)
4460 ot = OT_BYTE;
4461 else
4462 ot = dflag + OT_WORD;
4463
4464 modrm = cpu_ldub_code(env, s->pc++);
4465 mod = (modrm >> 6) & 3;
4466 rm = (modrm & 7) | REX_B(s);
4467 op = (modrm >> 3) & 7;
4468
4469 if (mod != 3) {
4470 if (b == 0x83)
4471 s->rip_offset = 1;
4472 else
4473 s->rip_offset = insn_const_size(ot);
4474 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4475 opreg = OR_TMP0;
4476 } else {
4477 opreg = rm;
4478 }
4479
4480 switch(b) {
4481 default:
4482 case 0x80:
4483 case 0x81:
4484 case 0x82:
4485 val = insn_get(env, s, ot);
4486 break;
4487 case 0x83:
4488 val = (int8_t)insn_get(env, s, OT_BYTE);
4489 break;
4490 }
4491 gen_op_movl_T1_im(val);
4492 gen_op(s, op, ot, opreg);
4493 }
4494 break;
4495
4496 /**************************/
4497 /* inc, dec, and other misc arith */
4498 case 0x40 ... 0x47: /* inc Gv */
4499 ot = dflag ? OT_LONG : OT_WORD;
4500 gen_inc(s, ot, OR_EAX + (b & 7), 1);
4501 break;
4502 case 0x48 ... 0x4f: /* dec Gv */
4503 ot = dflag ? OT_LONG : OT_WORD;
4504 gen_inc(s, ot, OR_EAX + (b & 7), -1);
4505 break;
4506 case 0xf6: /* GRP3 */
4507 case 0xf7:
4508 if ((b & 1) == 0)
4509 ot = OT_BYTE;
4510 else
4511 ot = dflag + OT_WORD;
4512
4513 modrm = cpu_ldub_code(env, s->pc++);
4514 mod = (modrm >> 6) & 3;
4515 rm = (modrm & 7) | REX_B(s);
4516 op = (modrm >> 3) & 7;
4517 if (mod != 3) {
4518 if (op == 0)
4519 s->rip_offset = insn_const_size(ot);
4520 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4521 gen_op_ld_T0_A0(ot + s->mem_index);
4522 } else {
4523 gen_op_mov_TN_reg(ot, 0, rm);
4524 }
4525
4526 switch(op) {
4527 case 0: /* test */
4528 val = insn_get(env, s, ot);
4529 gen_op_movl_T1_im(val);
4530 gen_op_testl_T0_T1_cc();
4531 set_cc_op(s, CC_OP_LOGICB + ot);
4532 break;
4533 case 2: /* not */
4534 tcg_gen_not_tl(cpu_T[0], cpu_T[0]);
4535 if (mod != 3) {
4536 gen_op_st_T0_A0(ot + s->mem_index);
4537 } else {
4538 gen_op_mov_reg_T0(ot, rm);
4539 }
4540 break;
4541 case 3: /* neg */
4542 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
4543 if (mod != 3) {
4544 gen_op_st_T0_A0(ot + s->mem_index);
4545 } else {
4546 gen_op_mov_reg_T0(ot, rm);
4547 }
4548 gen_op_update_neg_cc();
4549 set_cc_op(s, CC_OP_SUBB + ot);
4550 break;
4551 case 4: /* mul */
4552 switch(ot) {
4553 case OT_BYTE:
4554 gen_op_mov_TN_reg(OT_BYTE, 1, R_EAX);
4555 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
4556 tcg_gen_ext8u_tl(cpu_T[1], cpu_T[1]);
4557 /* XXX: use 32 bit mul which could be faster */
4558 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4559 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4560 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4561 tcg_gen_andi_tl(cpu_cc_src, cpu_T[0], 0xff00);
4562 set_cc_op(s, CC_OP_MULB);
4563 break;
4564 case OT_WORD:
4565 gen_op_mov_TN_reg(OT_WORD, 1, R_EAX);
4566 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
4567 tcg_gen_ext16u_tl(cpu_T[1], cpu_T[1]);
4568 /* XXX: use 32 bit mul which could be faster */
4569 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4570 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4571 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4572 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
4573 gen_op_mov_reg_T0(OT_WORD, R_EDX);
4574 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4575 set_cc_op(s, CC_OP_MULW);
4576 break;
4577 default:
4578 case OT_LONG:
4579 #ifdef TARGET_X86_64
4580 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4581 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
4582 tcg_gen_ext32u_tl(cpu_T[1], cpu_T[1]);
4583 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4584 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4585 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4586 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 32);
4587 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4588 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4589 #else
4590 {
4591 TCGv_i64 t0, t1;
4592 t0 = tcg_temp_new_i64();
4593 t1 = tcg_temp_new_i64();
4594 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4595 tcg_gen_extu_i32_i64(t0, cpu_T[0]);
4596 tcg_gen_extu_i32_i64(t1, cpu_T[1]);
4597 tcg_gen_mul_i64(t0, t0, t1);
4598 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4599 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4600 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4601 tcg_gen_shri_i64(t0, t0, 32);
4602 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4603 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4604 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4605 }
4606 #endif
4607 set_cc_op(s, CC_OP_MULL);
4608 break;
4609 #ifdef TARGET_X86_64
4610 case OT_QUAD:
4611 gen_helper_mulq_EAX_T0(cpu_env, cpu_T[0]);
4612 set_cc_op(s, CC_OP_MULQ);
4613 break;
4614 #endif
4615 }
4616 break;
4617 case 5: /* imul */
4618 switch(ot) {
4619 case OT_BYTE:
4620 gen_op_mov_TN_reg(OT_BYTE, 1, R_EAX);
4621 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
4622 tcg_gen_ext8s_tl(cpu_T[1], cpu_T[1]);
4623 /* XXX: use 32 bit mul which could be faster */
4624 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4625 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4626 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4627 tcg_gen_ext8s_tl(cpu_tmp0, cpu_T[0]);
4628 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4629 set_cc_op(s, CC_OP_MULB);
4630 break;
4631 case OT_WORD:
4632 gen_op_mov_TN_reg(OT_WORD, 1, R_EAX);
4633 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4634 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
4635 /* XXX: use 32 bit mul which could be faster */
4636 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4637 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4638 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4639 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
4640 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4641 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
4642 gen_op_mov_reg_T0(OT_WORD, R_EDX);
4643 set_cc_op(s, CC_OP_MULW);
4644 break;
4645 default:
4646 case OT_LONG:
4647 #ifdef TARGET_X86_64
4648 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4649 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4650 tcg_gen_ext32s_tl(cpu_T[1], cpu_T[1]);
4651 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4652 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4653 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4654 tcg_gen_ext32s_tl(cpu_tmp0, cpu_T[0]);
4655 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4656 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 32);
4657 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4658 #else
4659 {
4660 TCGv_i64 t0, t1;
4661 t0 = tcg_temp_new_i64();
4662 t1 = tcg_temp_new_i64();
4663 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4664 tcg_gen_ext_i32_i64(t0, cpu_T[0]);
4665 tcg_gen_ext_i32_i64(t1, cpu_T[1]);
4666 tcg_gen_mul_i64(t0, t0, t1);
4667 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4668 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4669 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4670 tcg_gen_sari_tl(cpu_tmp0, cpu_T[0], 31);
4671 tcg_gen_shri_i64(t0, t0, 32);
4672 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4673 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4674 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4675 }
4676 #endif
4677 set_cc_op(s, CC_OP_MULL);
4678 break;
4679 #ifdef TARGET_X86_64
4680 case OT_QUAD:
4681 gen_helper_imulq_EAX_T0(cpu_env, cpu_T[0]);
4682 set_cc_op(s, CC_OP_MULQ);
4683 break;
4684 #endif
4685 }
4686 break;
4687 case 6: /* div */
4688 switch(ot) {
4689 case OT_BYTE:
4690 gen_jmp_im(pc_start - s->cs_base);
4691 gen_helper_divb_AL(cpu_env, cpu_T[0]);
4692 break;
4693 case OT_WORD:
4694 gen_jmp_im(pc_start - s->cs_base);
4695 gen_helper_divw_AX(cpu_env, cpu_T[0]);
4696 break;
4697 default:
4698 case OT_LONG:
4699 gen_jmp_im(pc_start - s->cs_base);
4700 gen_helper_divl_EAX(cpu_env, cpu_T[0]);
4701 break;
4702 #ifdef TARGET_X86_64
4703 case OT_QUAD:
4704 gen_jmp_im(pc_start - s->cs_base);
4705 gen_helper_divq_EAX(cpu_env, cpu_T[0]);
4706 break;
4707 #endif
4708 }
4709 break;
4710 case 7: /* idiv */
4711 switch(ot) {
4712 case OT_BYTE:
4713 gen_jmp_im(pc_start - s->cs_base);
4714 gen_helper_idivb_AL(cpu_env, cpu_T[0]);
4715 break;
4716 case OT_WORD:
4717 gen_jmp_im(pc_start - s->cs_base);
4718 gen_helper_idivw_AX(cpu_env, cpu_T[0]);
4719 break;
4720 default:
4721 case OT_LONG:
4722 gen_jmp_im(pc_start - s->cs_base);
4723 gen_helper_idivl_EAX(cpu_env, cpu_T[0]);
4724 break;
4725 #ifdef TARGET_X86_64
4726 case OT_QUAD:
4727 gen_jmp_im(pc_start - s->cs_base);
4728 gen_helper_idivq_EAX(cpu_env, cpu_T[0]);
4729 break;
4730 #endif
4731 }
4732 break;
4733 default:
4734 goto illegal_op;
4735 }
4736 break;
4737
4738 case 0xfe: /* GRP4 */
4739 case 0xff: /* GRP5 */
4740 if ((b & 1) == 0)
4741 ot = OT_BYTE;
4742 else
4743 ot = dflag + OT_WORD;
4744
4745 modrm = cpu_ldub_code(env, s->pc++);
4746 mod = (modrm >> 6) & 3;
4747 rm = (modrm & 7) | REX_B(s);
4748 op = (modrm >> 3) & 7;
4749 if (op >= 2 && b == 0xfe) {
4750 goto illegal_op;
4751 }
4752 if (CODE64(s)) {
4753 if (op == 2 || op == 4) {
4754 /* operand size for jumps is 64 bit */
4755 ot = OT_QUAD;
4756 } else if (op == 3 || op == 5) {
4757 ot = dflag ? OT_LONG + (rex_w == 1) : OT_WORD;
4758 } else if (op == 6) {
4759 /* default push size is 64 bit */
4760 ot = dflag ? OT_QUAD : OT_WORD;
4761 }
4762 }
4763 if (mod != 3) {
4764 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4765 if (op >= 2 && op != 3 && op != 5)
4766 gen_op_ld_T0_A0(ot + s->mem_index);
4767 } else {
4768 gen_op_mov_TN_reg(ot, 0, rm);
4769 }
4770
4771 switch(op) {
4772 case 0: /* inc Ev */
4773 if (mod != 3)
4774 opreg = OR_TMP0;
4775 else
4776 opreg = rm;
4777 gen_inc(s, ot, opreg, 1);
4778 break;
4779 case 1: /* dec Ev */
4780 if (mod != 3)
4781 opreg = OR_TMP0;
4782 else
4783 opreg = rm;
4784 gen_inc(s, ot, opreg, -1);
4785 break;
4786 case 2: /* call Ev */
4787 /* XXX: optimize if memory (no 'and' is necessary) */
4788 if (s->dflag == 0)
4789 gen_op_andl_T0_ffff();
4790 next_eip = s->pc - s->cs_base;
4791 gen_movtl_T1_im(next_eip);
4792 gen_push_T1(s);
4793 gen_op_jmp_T0();
4794 gen_eob(s);
4795 break;
4796 case 3: /* lcall Ev */
4797 gen_op_ld_T1_A0(ot + s->mem_index);
4798 gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
4799 gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
4800 do_lcall:
4801 if (s->pe && !s->vm86) {
4802 gen_update_cc_op(s);
4803 gen_jmp_im(pc_start - s->cs_base);
4804 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4805 gen_helper_lcall_protected(cpu_env, cpu_tmp2_i32, cpu_T[1],
4806 tcg_const_i32(dflag),
4807 tcg_const_i32(s->pc - pc_start));
4808 } else {
4809 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4810 gen_helper_lcall_real(cpu_env, cpu_tmp2_i32, cpu_T[1],
4811 tcg_const_i32(dflag),
4812 tcg_const_i32(s->pc - s->cs_base));
4813 }
4814 gen_eob(s);
4815 break;
4816 case 4: /* jmp Ev */
4817 if (s->dflag == 0)
4818 gen_op_andl_T0_ffff();
4819 gen_op_jmp_T0();
4820 gen_eob(s);
4821 break;
4822 case 5: /* ljmp Ev */
4823 gen_op_ld_T1_A0(ot + s->mem_index);
4824 gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
4825 gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
4826 do_ljmp:
4827 if (s->pe && !s->vm86) {
4828 gen_update_cc_op(s);
4829 gen_jmp_im(pc_start - s->cs_base);
4830 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4831 gen_helper_ljmp_protected(cpu_env, cpu_tmp2_i32, cpu_T[1],
4832 tcg_const_i32(s->pc - pc_start));
4833 } else {
4834 gen_op_movl_seg_T0_vm(R_CS);
4835 gen_op_movl_T0_T1();
4836 gen_op_jmp_T0();
4837 }
4838 gen_eob(s);
4839 break;
4840 case 6: /* push Ev */
4841 gen_push_T0(s);
4842 break;
4843 default:
4844 goto illegal_op;
4845 }
4846 break;
4847
4848 case 0x84: /* test Ev, Gv */
4849 case 0x85:
4850 if ((b & 1) == 0)
4851 ot = OT_BYTE;
4852 else
4853 ot = dflag + OT_WORD;
4854
4855 modrm = cpu_ldub_code(env, s->pc++);
4856 reg = ((modrm >> 3) & 7) | rex_r;
4857
4858 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4859 gen_op_mov_TN_reg(ot, 1, reg);
4860 gen_op_testl_T0_T1_cc();
4861 set_cc_op(s, CC_OP_LOGICB + ot);
4862 break;
4863
4864 case 0xa8: /* test eAX, Iv */
4865 case 0xa9:
4866 if ((b & 1) == 0)
4867 ot = OT_BYTE;
4868 else
4869 ot = dflag + OT_WORD;
4870 val = insn_get(env, s, ot);
4871
4872 gen_op_mov_TN_reg(ot, 0, OR_EAX);
4873 gen_op_movl_T1_im(val);
4874 gen_op_testl_T0_T1_cc();
4875 set_cc_op(s, CC_OP_LOGICB + ot);
4876 break;
4877
4878 case 0x98: /* CWDE/CBW */
4879 #ifdef TARGET_X86_64
4880 if (dflag == 2) {
4881 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
4882 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4883 gen_op_mov_reg_T0(OT_QUAD, R_EAX);
4884 } else
4885 #endif
4886 if (dflag == 1) {
4887 gen_op_mov_TN_reg(OT_WORD, 0, R_EAX);
4888 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4889 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4890 } else {
4891 gen_op_mov_TN_reg(OT_BYTE, 0, R_EAX);
4892 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
4893 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4894 }
4895 break;
4896 case 0x99: /* CDQ/CWD */
4897 #ifdef TARGET_X86_64
4898 if (dflag == 2) {
4899 gen_op_mov_TN_reg(OT_QUAD, 0, R_EAX);
4900 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 63);
4901 gen_op_mov_reg_T0(OT_QUAD, R_EDX);
4902 } else
4903 #endif
4904 if (dflag == 1) {
4905 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
4906 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4907 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 31);
4908 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4909 } else {
4910 gen_op_mov_TN_reg(OT_WORD, 0, R_EAX);
4911 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4912 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 15);
4913 gen_op_mov_reg_T0(OT_WORD, R_EDX);
4914 }
4915 break;
4916 case 0x1af: /* imul Gv, Ev */
4917 case 0x69: /* imul Gv, Ev, I */
4918 case 0x6b:
4919 ot = dflag + OT_WORD;
4920 modrm = cpu_ldub_code(env, s->pc++);
4921 reg = ((modrm >> 3) & 7) | rex_r;
4922 if (b == 0x69)
4923 s->rip_offset = insn_const_size(ot);
4924 else if (b == 0x6b)
4925 s->rip_offset = 1;
4926 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4927 if (b == 0x69) {
4928 val = insn_get(env, s, ot);
4929 gen_op_movl_T1_im(val);
4930 } else if (b == 0x6b) {
4931 val = (int8_t)insn_get(env, s, OT_BYTE);
4932 gen_op_movl_T1_im(val);
4933 } else {
4934 gen_op_mov_TN_reg(ot, 1, reg);
4935 }
4936
4937 #ifdef TARGET_X86_64
4938 if (ot == OT_QUAD) {
4939 gen_helper_imulq_T0_T1(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
4940 } else
4941 #endif
4942 if (ot == OT_LONG) {
4943 #ifdef TARGET_X86_64
4944 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4945 tcg_gen_ext32s_tl(cpu_T[1], cpu_T[1]);
4946 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4947 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4948 tcg_gen_ext32s_tl(cpu_tmp0, cpu_T[0]);
4949 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4950 #else
4951 {
4952 TCGv_i64 t0, t1;
4953 t0 = tcg_temp_new_i64();
4954 t1 = tcg_temp_new_i64();
4955 tcg_gen_ext_i32_i64(t0, cpu_T[0]);
4956 tcg_gen_ext_i32_i64(t1, cpu_T[1]);
4957 tcg_gen_mul_i64(t0, t0, t1);
4958 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4959 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4960 tcg_gen_sari_tl(cpu_tmp0, cpu_T[0], 31);
4961 tcg_gen_shri_i64(t0, t0, 32);
4962 tcg_gen_trunc_i64_i32(cpu_T[1], t0);
4963 tcg_gen_sub_tl(cpu_cc_src, cpu_T[1], cpu_tmp0);
4964 }
4965 #endif
4966 } else {
4967 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4968 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
4969 /* XXX: use 32 bit mul which could be faster */
4970 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4971 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4972 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
4973 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4974 }
4975 gen_op_mov_reg_T0(ot, reg);
4976 set_cc_op(s, CC_OP_MULB + ot);
4977 break;
4978 case 0x1c0:
4979 case 0x1c1: /* xadd Ev, Gv */
4980 if ((b & 1) == 0)
4981 ot = OT_BYTE;
4982 else
4983 ot = dflag + OT_WORD;
4984 modrm = cpu_ldub_code(env, s->pc++);
4985 reg = ((modrm >> 3) & 7) | rex_r;
4986 mod = (modrm >> 6) & 3;
4987 if (mod == 3) {
4988 rm = (modrm & 7) | REX_B(s);
4989 gen_op_mov_TN_reg(ot, 0, reg);
4990 gen_op_mov_TN_reg(ot, 1, rm);
4991 gen_op_addl_T0_T1();
4992 gen_op_mov_reg_T1(ot, reg);
4993 gen_op_mov_reg_T0(ot, rm);
4994 } else {
4995 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4996 gen_op_mov_TN_reg(ot, 0, reg);
4997 gen_op_ld_T1_A0(ot + s->mem_index);
4998 gen_op_addl_T0_T1();
4999 gen_op_st_T0_A0(ot + s->mem_index);
5000 gen_op_mov_reg_T1(ot, reg);
5001 }
5002 gen_op_update2_cc();
5003 set_cc_op(s, CC_OP_ADDB + ot);
5004 break;
5005 case 0x1b0:
5006 case 0x1b1: /* cmpxchg Ev, Gv */
5007 {
5008 int label1, label2;
5009 TCGv t0, t1, t2, a0;
5010
5011 if ((b & 1) == 0)
5012 ot = OT_BYTE;
5013 else
5014 ot = dflag + OT_WORD;
5015 modrm = cpu_ldub_code(env, s->pc++);
5016 reg = ((modrm >> 3) & 7) | rex_r;
5017 mod = (modrm >> 6) & 3;
5018 t0 = tcg_temp_local_new();
5019 t1 = tcg_temp_local_new();
5020 t2 = tcg_temp_local_new();
5021 a0 = tcg_temp_local_new();
5022 gen_op_mov_v_reg(ot, t1, reg);
5023 if (mod == 3) {
5024 rm = (modrm & 7) | REX_B(s);
5025 gen_op_mov_v_reg(ot, t0, rm);
5026 } else {
5027 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5028 tcg_gen_mov_tl(a0, cpu_A0);
5029 gen_op_ld_v(ot + s->mem_index, t0, a0);
5030 rm = 0; /* avoid warning */
5031 }
5032 label1 = gen_new_label();
5033 tcg_gen_mov_tl(t2, cpu_regs[R_EAX]);
5034 gen_extu(ot, t0);
5035 gen_extu(ot, t2);
5036 tcg_gen_brcond_tl(TCG_COND_EQ, t2, t0, label1);
5037 label2 = gen_new_label();
5038 if (mod == 3) {
5039 gen_op_mov_reg_v(ot, R_EAX, t0);
5040 tcg_gen_br(label2);
5041 gen_set_label(label1);
5042 gen_op_mov_reg_v(ot, rm, t1);
5043 } else {
5044 /* perform no-op store cycle like physical cpu; must be
5045 before changing accumulator to ensure idempotency if
5046 the store faults and the instruction is restarted */
5047 gen_op_st_v(ot + s->mem_index, t0, a0);
5048 gen_op_mov_reg_v(ot, R_EAX, t0);
5049 tcg_gen_br(label2);
5050 gen_set_label(label1);
5051 gen_op_st_v(ot + s->mem_index, t1, a0);
5052 }
5053 gen_set_label(label2);
5054 tcg_gen_mov_tl(cpu_cc_src, t0);
5055 tcg_gen_mov_tl(cpu_cc_srcT, t2);
5056 tcg_gen_sub_tl(cpu_cc_dst, t2, t0);
5057 set_cc_op(s, CC_OP_SUBB + ot);
5058 tcg_temp_free(t0);
5059 tcg_temp_free(t1);
5060 tcg_temp_free(t2);
5061 tcg_temp_free(a0);
5062 }
5063 break;
5064 case 0x1c7: /* cmpxchg8b */
5065 modrm = cpu_ldub_code(env, s->pc++);
5066 mod = (modrm >> 6) & 3;
5067 if ((mod == 3) || ((modrm & 0x38) != 0x8))
5068 goto illegal_op;
5069 #ifdef TARGET_X86_64
5070 if (dflag == 2) {
5071 if (!(s->cpuid_ext_features & CPUID_EXT_CX16))
5072 goto illegal_op;
5073 gen_jmp_im(pc_start - s->cs_base);
5074 gen_update_cc_op(s);
5075 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5076 gen_helper_cmpxchg16b(cpu_env, cpu_A0);
5077 } else
5078 #endif
5079 {
5080 if (!(s->cpuid_features & CPUID_CX8))
5081 goto illegal_op;
5082 gen_jmp_im(pc_start - s->cs_base);
5083 gen_update_cc_op(s);
5084 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5085 gen_helper_cmpxchg8b(cpu_env, cpu_A0);
5086 }
5087 set_cc_op(s, CC_OP_EFLAGS);
5088 break;
5089
5090 /**************************/
5091 /* push/pop */
5092 case 0x50 ... 0x57: /* push */
5093 gen_op_mov_TN_reg(OT_LONG, 0, (b & 7) | REX_B(s));
5094 gen_push_T0(s);
5095 break;
5096 case 0x58 ... 0x5f: /* pop */
5097 if (CODE64(s)) {
5098 ot = dflag ? OT_QUAD : OT_WORD;
5099 } else {
5100 ot = dflag + OT_WORD;
5101 }
5102 gen_pop_T0(s);
5103 /* NOTE: order is important for pop %sp */
5104 gen_pop_update(s);
5105 gen_op_mov_reg_T0(ot, (b & 7) | REX_B(s));
5106 break;
5107 case 0x60: /* pusha */
5108 if (CODE64(s))
5109 goto illegal_op;
5110 gen_pusha(s);
5111 break;
5112 case 0x61: /* popa */
5113 if (CODE64(s))
5114 goto illegal_op;
5115 gen_popa(s);
5116 break;
5117 case 0x68: /* push Iv */
5118 case 0x6a:
5119 if (CODE64(s)) {
5120 ot = dflag ? OT_QUAD : OT_WORD;
5121 } else {
5122 ot = dflag + OT_WORD;
5123 }
5124 if (b == 0x68)
5125 val = insn_get(env, s, ot);
5126 else
5127 val = (int8_t)insn_get(env, s, OT_BYTE);
5128 gen_op_movl_T0_im(val);
5129 gen_push_T0(s);
5130 break;
5131 case 0x8f: /* pop Ev */
5132 if (CODE64(s)) {
5133 ot = dflag ? OT_QUAD : OT_WORD;
5134 } else {
5135 ot = dflag + OT_WORD;
5136 }
5137 modrm = cpu_ldub_code(env, s->pc++);
5138 mod = (modrm >> 6) & 3;
5139 gen_pop_T0(s);
5140 if (mod == 3) {
5141 /* NOTE: order is important for pop %sp */
5142 gen_pop_update(s);
5143 rm = (modrm & 7) | REX_B(s);
5144 gen_op_mov_reg_T0(ot, rm);
5145 } else {
5146 /* NOTE: order is important too for MMU exceptions */
5147 s->popl_esp_hack = 1 << ot;
5148 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
5149 s->popl_esp_hack = 0;
5150 gen_pop_update(s);
5151 }
5152 break;
5153 case 0xc8: /* enter */
5154 {
5155 int level;
5156 val = cpu_lduw_code(env, s->pc);
5157 s->pc += 2;
5158 level = cpu_ldub_code(env, s->pc++);
5159 gen_enter(s, val, level);
5160 }
5161 break;
5162 case 0xc9: /* leave */
5163 /* XXX: exception not precise (ESP is updated before potential exception) */
5164 if (CODE64(s)) {
5165 gen_op_mov_TN_reg(OT_QUAD, 0, R_EBP);
5166 gen_op_mov_reg_T0(OT_QUAD, R_ESP);
5167 } else if (s->ss32) {
5168 gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
5169 gen_op_mov_reg_T0(OT_LONG, R_ESP);
5170 } else {
5171 gen_op_mov_TN_reg(OT_WORD, 0, R_EBP);
5172 gen_op_mov_reg_T0(OT_WORD, R_ESP);
5173 }
5174 gen_pop_T0(s);
5175 if (CODE64(s)) {
5176 ot = dflag ? OT_QUAD : OT_WORD;
5177 } else {
5178 ot = dflag + OT_WORD;
5179 }
5180 gen_op_mov_reg_T0(ot, R_EBP);
5181 gen_pop_update(s);
5182 break;
5183 case 0x06: /* push es */
5184 case 0x0e: /* push cs */
5185 case 0x16: /* push ss */
5186 case 0x1e: /* push ds */
5187 if (CODE64(s))
5188 goto illegal_op;
5189 gen_op_movl_T0_seg(b >> 3);
5190 gen_push_T0(s);
5191 break;
5192 case 0x1a0: /* push fs */
5193 case 0x1a8: /* push gs */
5194 gen_op_movl_T0_seg((b >> 3) & 7);
5195 gen_push_T0(s);
5196 break;
5197 case 0x07: /* pop es */
5198 case 0x17: /* pop ss */
5199 case 0x1f: /* pop ds */
5200 if (CODE64(s))
5201 goto illegal_op;
5202 reg = b >> 3;
5203 gen_pop_T0(s);
5204 gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
5205 gen_pop_update(s);
5206 if (reg == R_SS) {
5207 /* if reg == SS, inhibit interrupts/trace. */
5208 /* If several instructions disable interrupts, only the
5209 _first_ does it */
5210 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
5211 gen_helper_set_inhibit_irq(cpu_env);
5212 s->tf = 0;
5213 }
5214 if (s->is_jmp) {
5215 gen_jmp_im(s->pc - s->cs_base);
5216 gen_eob(s);
5217 }
5218 break;
5219 case 0x1a1: /* pop fs */
5220 case 0x1a9: /* pop gs */
5221 gen_pop_T0(s);
5222 gen_movl_seg_T0(s, (b >> 3) & 7, pc_start - s->cs_base);
5223 gen_pop_update(s);
5224 if (s->is_jmp) {
5225 gen_jmp_im(s->pc - s->cs_base);
5226 gen_eob(s);
5227 }
5228 break;
5229
5230 /**************************/
5231 /* mov */
5232 case 0x88:
5233 case 0x89: /* mov Gv, Ev */
5234 if ((b & 1) == 0)
5235 ot = OT_BYTE;
5236 else
5237 ot = dflag + OT_WORD;
5238 modrm = cpu_ldub_code(env, s->pc++);
5239 reg = ((modrm >> 3) & 7) | rex_r;
5240
5241 /* generate a generic store */
5242 gen_ldst_modrm(env, s, modrm, ot, reg, 1);
5243 break;
5244 case 0xc6:
5245 case 0xc7: /* mov Ev, Iv */
5246 if ((b & 1) == 0)
5247 ot = OT_BYTE;
5248 else
5249 ot = dflag + OT_WORD;
5250 modrm = cpu_ldub_code(env, s->pc++);
5251 mod = (modrm >> 6) & 3;
5252 if (mod != 3) {
5253 s->rip_offset = insn_const_size(ot);
5254 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5255 }
5256 val = insn_get(env, s, ot);
5257 gen_op_movl_T0_im(val);
5258 if (mod != 3)
5259 gen_op_st_T0_A0(ot + s->mem_index);
5260 else
5261 gen_op_mov_reg_T0(ot, (modrm & 7) | REX_B(s));
5262 break;
5263 case 0x8a:
5264 case 0x8b: /* mov Ev, Gv */
5265 if ((b & 1) == 0)
5266 ot = OT_BYTE;
5267 else
5268 ot = OT_WORD + dflag;
5269 modrm = cpu_ldub_code(env, s->pc++);
5270 reg = ((modrm >> 3) & 7) | rex_r;
5271
5272 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
5273 gen_op_mov_reg_T0(ot, reg);
5274 break;
5275 case 0x8e: /* mov seg, Gv */
5276 modrm = cpu_ldub_code(env, s->pc++);
5277 reg = (modrm >> 3) & 7;
5278 if (reg >= 6 || reg == R_CS)
5279 goto illegal_op;
5280 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
5281 gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
5282 if (reg == R_SS) {
5283 /* if reg == SS, inhibit interrupts/trace */
5284 /* If several instructions disable interrupts, only the
5285 _first_ does it */
5286 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
5287 gen_helper_set_inhibit_irq(cpu_env);
5288 s->tf = 0;
5289 }
5290 if (s->is_jmp) {
5291 gen_jmp_im(s->pc - s->cs_base);
5292 gen_eob(s);
5293 }
5294 break;
5295 case 0x8c: /* mov Gv, seg */
5296 modrm = cpu_ldub_code(env, s->pc++);
5297 reg = (modrm >> 3) & 7;
5298 mod = (modrm >> 6) & 3;
5299 if (reg >= 6)
5300 goto illegal_op;
5301 gen_op_movl_T0_seg(reg);
5302 if (mod == 3)
5303 ot = OT_WORD + dflag;
5304 else
5305 ot = OT_WORD;
5306 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
5307 break;
5308
5309 case 0x1b6: /* movzbS Gv, Eb */
5310 case 0x1b7: /* movzwS Gv, Eb */
5311 case 0x1be: /* movsbS Gv, Eb */
5312 case 0x1bf: /* movswS Gv, Eb */
5313 {
5314 int d_ot;
5315 /* d_ot is the size of destination */
5316 d_ot = dflag + OT_WORD;
5317 /* ot is the size of source */
5318 ot = (b & 1) + OT_BYTE;
5319 modrm = cpu_ldub_code(env, s->pc++);
5320 reg = ((modrm >> 3) & 7) | rex_r;
5321 mod = (modrm >> 6) & 3;
5322 rm = (modrm & 7) | REX_B(s);
5323
5324 if (mod == 3) {
5325 gen_op_mov_TN_reg(ot, 0, rm);
5326 switch(ot | (b & 8)) {
5327 case OT_BYTE:
5328 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
5329 break;
5330 case OT_BYTE | 8:
5331 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
5332 break;
5333 case OT_WORD:
5334 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
5335 break;
5336 default:
5337 case OT_WORD | 8:
5338 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5339 break;
5340 }
5341 gen_op_mov_reg_T0(d_ot, reg);
5342 } else {
5343 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5344 if (b & 8) {
5345 gen_op_lds_T0_A0(ot + s->mem_index);
5346 } else {
5347 gen_op_ldu_T0_A0(ot + s->mem_index);
5348 }
5349 gen_op_mov_reg_T0(d_ot, reg);
5350 }
5351 }
5352 break;
5353
5354 case 0x8d: /* lea */
5355 ot = dflag + OT_WORD;
5356 modrm = cpu_ldub_code(env, s->pc++);
5357 mod = (modrm >> 6) & 3;
5358 if (mod == 3)
5359 goto illegal_op;
5360 reg = ((modrm >> 3) & 7) | rex_r;
5361 /* we must ensure that no segment is added */
5362 s->override = -1;
5363 val = s->addseg;
5364 s->addseg = 0;
5365 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5366 s->addseg = val;
5367 gen_op_mov_reg_A0(ot - OT_WORD, reg);
5368 break;
5369
5370 case 0xa0: /* mov EAX, Ov */
5371 case 0xa1:
5372 case 0xa2: /* mov Ov, EAX */
5373 case 0xa3:
5374 {
5375 target_ulong offset_addr;
5376
5377 if ((b & 1) == 0)
5378 ot = OT_BYTE;
5379 else
5380 ot = dflag + OT_WORD;
5381 #ifdef TARGET_X86_64
5382 if (s->aflag == 2) {
5383 offset_addr = cpu_ldq_code(env, s->pc);
5384 s->pc += 8;
5385 gen_op_movq_A0_im(offset_addr);
5386 } else
5387 #endif
5388 {
5389 if (s->aflag) {
5390 offset_addr = insn_get(env, s, OT_LONG);
5391 } else {
5392 offset_addr = insn_get(env, s, OT_WORD);
5393 }
5394 gen_op_movl_A0_im(offset_addr);
5395 }
5396 gen_add_A0_ds_seg(s);
5397 if ((b & 2) == 0) {
5398 gen_op_ld_T0_A0(ot + s->mem_index);
5399 gen_op_mov_reg_T0(ot, R_EAX);
5400 } else {
5401 gen_op_mov_TN_reg(ot, 0, R_EAX);
5402 gen_op_st_T0_A0(ot + s->mem_index);
5403 }
5404 }
5405 break;
5406 case 0xd7: /* xlat */
5407 #ifdef TARGET_X86_64
5408 if (s->aflag == 2) {
5409 gen_op_movq_A0_reg(R_EBX);
5410 gen_op_mov_TN_reg(OT_QUAD, 0, R_EAX);
5411 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xff);
5412 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]);
5413 } else
5414 #endif
5415 {
5416 gen_op_movl_A0_reg(R_EBX);
5417 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
5418 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xff);
5419 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]);
5420 if (s->aflag == 0)
5421 gen_op_andl_A0_ffff();
5422 else
5423 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
5424 }
5425 gen_add_A0_ds_seg(s);
5426 gen_op_ldu_T0_A0(OT_BYTE + s->mem_index);
5427 gen_op_mov_reg_T0(OT_BYTE, R_EAX);
5428 break;
5429 case 0xb0 ... 0xb7: /* mov R, Ib */
5430 val = insn_get(env, s, OT_BYTE);
5431 gen_op_movl_T0_im(val);
5432 gen_op_mov_reg_T0(OT_BYTE, (b & 7) | REX_B(s));
5433 break;
5434 case 0xb8 ... 0xbf: /* mov R, Iv */
5435 #ifdef TARGET_X86_64
5436 if (dflag == 2) {
5437 uint64_t tmp;
5438 /* 64 bit case */
5439 tmp = cpu_ldq_code(env, s->pc);
5440 s->pc += 8;
5441 reg = (b & 7) | REX_B(s);
5442 gen_movtl_T0_im(tmp);
5443 gen_op_mov_reg_T0(OT_QUAD, reg);
5444 } else
5445 #endif
5446 {
5447 ot = dflag ? OT_LONG : OT_WORD;
5448 val = insn_get(env, s, ot);
5449 reg = (b & 7) | REX_B(s);
5450 gen_op_movl_T0_im(val);
5451 gen_op_mov_reg_T0(ot, reg);
5452 }
5453 break;
5454
5455 case 0x91 ... 0x97: /* xchg R, EAX */
5456 do_xchg_reg_eax:
5457 ot = dflag + OT_WORD;
5458 reg = (b & 7) | REX_B(s);
5459 rm = R_EAX;
5460 goto do_xchg_reg;
5461 case 0x86:
5462 case 0x87: /* xchg Ev, Gv */
5463 if ((b & 1) == 0)
5464 ot = OT_BYTE;
5465 else
5466 ot = dflag + OT_WORD;
5467 modrm = cpu_ldub_code(env, s->pc++);
5468 reg = ((modrm >> 3) & 7) | rex_r;
5469 mod = (modrm >> 6) & 3;
5470 if (mod == 3) {
5471 rm = (modrm & 7) | REX_B(s);
5472 do_xchg_reg:
5473 gen_op_mov_TN_reg(ot, 0, reg);
5474 gen_op_mov_TN_reg(ot, 1, rm);
5475 gen_op_mov_reg_T0(ot, rm);
5476 gen_op_mov_reg_T1(ot, reg);
5477 } else {
5478 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5479 gen_op_mov_TN_reg(ot, 0, reg);
5480 /* for xchg, lock is implicit */
5481 if (!(prefixes & PREFIX_LOCK))
5482 gen_helper_lock();
5483 gen_op_ld_T1_A0(ot + s->mem_index);
5484 gen_op_st_T0_A0(ot + s->mem_index);
5485 if (!(prefixes & PREFIX_LOCK))
5486 gen_helper_unlock();
5487 gen_op_mov_reg_T1(ot, reg);
5488 }
5489 break;
5490 case 0xc4: /* les Gv */
5491 if (CODE64(s))
5492 goto illegal_op;
5493 op = R_ES;
5494 goto do_lxx;
5495 case 0xc5: /* lds Gv */
5496 if (CODE64(s))
5497 goto illegal_op;
5498 op = R_DS;
5499 goto do_lxx;
5500 case 0x1b2: /* lss Gv */
5501 op = R_SS;
5502 goto do_lxx;
5503 case 0x1b4: /* lfs Gv */
5504 op = R_FS;
5505 goto do_lxx;
5506 case 0x1b5: /* lgs Gv */
5507 op = R_GS;
5508 do_lxx:
5509 ot = dflag ? OT_LONG : OT_WORD;
5510 modrm = cpu_ldub_code(env, s->pc++);
5511 reg = ((modrm >> 3) & 7) | rex_r;
5512 mod = (modrm >> 6) & 3;
5513 if (mod == 3)
5514 goto illegal_op;
5515 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5516 gen_op_ld_T1_A0(ot + s->mem_index);
5517 gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
5518 /* load the segment first to handle exceptions properly */
5519 gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
5520 gen_movl_seg_T0(s, op, pc_start - s->cs_base);
5521 /* then put the data */
5522 gen_op_mov_reg_T1(ot, reg);
5523 if (s->is_jmp) {
5524 gen_jmp_im(s->pc - s->cs_base);
5525 gen_eob(s);
5526 }
5527 break;
5528
5529 /************************/
5530 /* shifts */
5531 case 0xc0:
5532 case 0xc1:
5533 /* shift Ev,Ib */
5534 shift = 2;
5535 grp2:
5536 {
5537 if ((b & 1) == 0)
5538 ot = OT_BYTE;
5539 else
5540 ot = dflag + OT_WORD;
5541
5542 modrm = cpu_ldub_code(env, s->pc++);
5543 mod = (modrm >> 6) & 3;
5544 op = (modrm >> 3) & 7;
5545
5546 if (mod != 3) {
5547 if (shift == 2) {
5548 s->rip_offset = 1;
5549 }
5550 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5551 opreg = OR_TMP0;
5552 } else {
5553 opreg = (modrm & 7) | REX_B(s);
5554 }
5555
5556 /* simpler op */
5557 if (shift == 0) {
5558 gen_shift(s, op, ot, opreg, OR_ECX);
5559 } else {
5560 if (shift == 2) {
5561 shift = cpu_ldub_code(env, s->pc++);
5562 }
5563 gen_shifti(s, op, ot, opreg, shift);
5564 }
5565 }
5566 break;
5567 case 0xd0:
5568 case 0xd1:
5569 /* shift Ev,1 */
5570 shift = 1;
5571 goto grp2;
5572 case 0xd2:
5573 case 0xd3:
5574 /* shift Ev,cl */
5575 shift = 0;
5576 goto grp2;
5577
5578 case 0x1a4: /* shld imm */
5579 op = 0;
5580 shift = 1;
5581 goto do_shiftd;
5582 case 0x1a5: /* shld cl */
5583 op = 0;
5584 shift = 0;
5585 goto do_shiftd;
5586 case 0x1ac: /* shrd imm */
5587 op = 1;
5588 shift = 1;
5589 goto do_shiftd;
5590 case 0x1ad: /* shrd cl */
5591 op = 1;
5592 shift = 0;
5593 do_shiftd:
5594 ot = dflag + OT_WORD;
5595 modrm = cpu_ldub_code(env, s->pc++);
5596 mod = (modrm >> 6) & 3;
5597 rm = (modrm & 7) | REX_B(s);
5598 reg = ((modrm >> 3) & 7) | rex_r;
5599 if (mod != 3) {
5600 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5601 opreg = OR_TMP0;
5602 } else {
5603 opreg = rm;
5604 }
5605 gen_op_mov_TN_reg(ot, 1, reg);
5606
5607 if (shift) {
5608 TCGv imm = tcg_const_tl(cpu_ldub_code(env, s->pc++));
5609 gen_shiftd_rm_T1(s, ot, opreg, op, imm);
5610 tcg_temp_free(imm);
5611 } else {
5612 gen_shiftd_rm_T1(s, ot, opreg, op, cpu_regs[R_ECX]);
5613 }
5614 break;
5615
5616 /************************/
5617 /* floats */
5618 case 0xd8 ... 0xdf:
5619 if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
5620 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
5621 /* XXX: what to do if illegal op ? */
5622 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
5623 break;
5624 }
5625 modrm = cpu_ldub_code(env, s->pc++);
5626 mod = (modrm >> 6) & 3;
5627 rm = modrm & 7;
5628 op = ((b & 7) << 3) | ((modrm >> 3) & 7);
5629 if (mod != 3) {
5630 /* memory op */
5631 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5632 switch(op) {
5633 case 0x00 ... 0x07: /* fxxxs */
5634 case 0x10 ... 0x17: /* fixxxl */
5635 case 0x20 ... 0x27: /* fxxxl */
5636 case 0x30 ... 0x37: /* fixxx */
5637 {
5638 int op1;
5639 op1 = op & 7;
5640
5641 switch(op >> 4) {
5642 case 0:
5643 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5644 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5645 gen_helper_flds_FT0(cpu_env, cpu_tmp2_i32);
5646 break;
5647 case 1:
5648 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5649 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5650 gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
5651 break;
5652 case 2:
5653 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
5654 (s->mem_index >> 2) - 1);
5655 gen_helper_fldl_FT0(cpu_env, cpu_tmp1_i64);
5656 break;
5657 case 3:
5658 default:
5659 gen_op_lds_T0_A0(OT_WORD + s->mem_index);
5660 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5661 gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
5662 break;
5663 }
5664
5665 gen_helper_fp_arith_ST0_FT0(op1);
5666 if (op1 == 3) {
5667 /* fcomp needs pop */
5668 gen_helper_fpop(cpu_env);
5669 }
5670 }
5671 break;
5672 case 0x08: /* flds */
5673 case 0x0a: /* fsts */
5674 case 0x0b: /* fstps */
5675 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
5676 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
5677 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
5678 switch(op & 7) {
5679 case 0:
5680 switch(op >> 4) {
5681 case 0:
5682 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5683 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5684 gen_helper_flds_ST0(cpu_env, cpu_tmp2_i32);
5685 break;
5686 case 1:
5687 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5688 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5689 gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
5690 break;
5691 case 2:
5692 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
5693 (s->mem_index >> 2) - 1);
5694 gen_helper_fldl_ST0(cpu_env, cpu_tmp1_i64);
5695 break;
5696 case 3:
5697 default:
5698 gen_op_lds_T0_A0(OT_WORD + s->mem_index);
5699 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5700 gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
5701 break;
5702 }
5703 break;
5704 case 1:
5705 /* XXX: the corresponding CPUID bit must be tested ! */
5706 switch(op >> 4) {
5707 case 1:
5708 gen_helper_fisttl_ST0(cpu_tmp2_i32, cpu_env);
5709 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5710 gen_op_st_T0_A0(OT_LONG + s->mem_index);
5711 break;
5712 case 2:
5713 gen_helper_fisttll_ST0(cpu_tmp1_i64, cpu_env);
5714 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
5715 (s->mem_index >> 2) - 1);
5716 break;
5717 case 3:
5718 default:
5719 gen_helper_fistt_ST0(cpu_tmp2_i32, cpu_env);
5720 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5721 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5722 break;
5723 }
5724 gen_helper_fpop(cpu_env);
5725 break;
5726 default:
5727 switch(op >> 4) {
5728 case 0:
5729 gen_helper_fsts_ST0(cpu_tmp2_i32, cpu_env);
5730 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5731 gen_op_st_T0_A0(OT_LONG + s->mem_index);
5732 break;
5733 case 1:
5734 gen_helper_fistl_ST0(cpu_tmp2_i32, cpu_env);
5735 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5736 gen_op_st_T0_A0(OT_LONG + s->mem_index);
5737 break;
5738 case 2:
5739 gen_helper_fstl_ST0(cpu_tmp1_i64, cpu_env);
5740 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
5741 (s->mem_index >> 2) - 1);
5742 break;
5743 case 3:
5744 default:
5745 gen_helper_fist_ST0(cpu_tmp2_i32, cpu_env);
5746 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5747 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5748 break;
5749 }
5750 if ((op & 7) == 3)
5751 gen_helper_fpop(cpu_env);
5752 break;
5753 }
5754 break;
5755 case 0x0c: /* fldenv mem */
5756 gen_update_cc_op(s);
5757 gen_jmp_im(pc_start - s->cs_base);
5758 gen_helper_fldenv(cpu_env, cpu_A0, tcg_const_i32(s->dflag));
5759 break;
5760 case 0x0d: /* fldcw mem */
5761 gen_op_ld_T0_A0(OT_WORD + s->mem_index);
5762 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5763 gen_helper_fldcw(cpu_env, cpu_tmp2_i32);
5764 break;
5765 case 0x0e: /* fnstenv mem */
5766 gen_update_cc_op(s);
5767 gen_jmp_im(pc_start - s->cs_base);
5768 gen_helper_fstenv(cpu_env, cpu_A0, tcg_const_i32(s->dflag));
5769 break;
5770 case 0x0f: /* fnstcw mem */
5771 gen_helper_fnstcw(cpu_tmp2_i32, cpu_env);
5772 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5773 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5774 break;
5775 case 0x1d: /* fldt mem */
5776 gen_update_cc_op(s);
5777 gen_jmp_im(pc_start - s->cs_base);
5778 gen_helper_fldt_ST0(cpu_env, cpu_A0);
5779 break;
5780 case 0x1f: /* fstpt mem */
5781 gen_update_cc_op(s);
5782 gen_jmp_im(pc_start - s->cs_base);
5783 gen_helper_fstt_ST0(cpu_env, cpu_A0);
5784 gen_helper_fpop(cpu_env);
5785 break;
5786 case 0x2c: /* frstor mem */
5787 gen_update_cc_op(s);
5788 gen_jmp_im(pc_start - s->cs_base);
5789 gen_helper_frstor(cpu_env, cpu_A0, tcg_const_i32(s->dflag));
5790 break;
5791 case 0x2e: /* fnsave mem */
5792 gen_update_cc_op(s);
5793 gen_jmp_im(pc_start - s->cs_base);
5794 gen_helper_fsave(cpu_env, cpu_A0, tcg_const_i32(s->dflag));
5795 break;
5796 case 0x2f: /* fnstsw mem */
5797 gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
5798 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5799 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5800 break;
5801 case 0x3c: /* fbld */
5802 gen_update_cc_op(s);
5803 gen_jmp_im(pc_start - s->cs_base);
5804 gen_helper_fbld_ST0(cpu_env, cpu_A0);
5805 break;
5806 case 0x3e: /* fbstp */
5807 gen_update_cc_op(s);
5808 gen_jmp_im(pc_start - s->cs_base);
5809 gen_helper_fbst_ST0(cpu_env, cpu_A0);
5810 gen_helper_fpop(cpu_env);
5811 break;
5812 case 0x3d: /* fildll */
5813 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
5814 (s->mem_index >> 2) - 1);
5815 gen_helper_fildll_ST0(cpu_env, cpu_tmp1_i64);
5816 break;
5817 case 0x3f: /* fistpll */
5818 gen_helper_fistll_ST0(cpu_tmp1_i64, cpu_env);
5819 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
5820 (s->mem_index >> 2) - 1);
5821 gen_helper_fpop(cpu_env);
5822 break;
5823 default:
5824 goto illegal_op;
5825 }
5826 } else {
5827 /* register float ops */
5828 opreg = rm;
5829
5830 switch(op) {
5831 case 0x08: /* fld sti */
5832 gen_helper_fpush(cpu_env);
5833 gen_helper_fmov_ST0_STN(cpu_env,
5834 tcg_const_i32((opreg + 1) & 7));
5835 break;
5836 case 0x09: /* fxchg sti */
5837 case 0x29: /* fxchg4 sti, undocumented op */
5838 case 0x39: /* fxchg7 sti, undocumented op */
5839 gen_helper_fxchg_ST0_STN(cpu_env, tcg_const_i32(opreg));
5840 break;
5841 case 0x0a: /* grp d9/2 */
5842 switch(rm) {
5843 case 0: /* fnop */
5844 /* check exceptions (FreeBSD FPU probe) */
5845 gen_update_cc_op(s);
5846 gen_jmp_im(pc_start - s->cs_base);
5847 gen_helper_fwait(cpu_env);
5848 break;
5849 default:
5850 goto illegal_op;
5851 }
5852 break;
5853 case 0x0c: /* grp d9/4 */
5854 switch(rm) {
5855 case 0: /* fchs */
5856 gen_helper_fchs_ST0(cpu_env);
5857 break;
5858 case 1: /* fabs */
5859 gen_helper_fabs_ST0(cpu_env);
5860 break;
5861 case 4: /* ftst */
5862 gen_helper_fldz_FT0(cpu_env);
5863 gen_helper_fcom_ST0_FT0(cpu_env);
5864 break;
5865 case 5: /* fxam */
5866 gen_helper_fxam_ST0(cpu_env);
5867 break;
5868 default:
5869 goto illegal_op;
5870 }
5871 break;
5872 case 0x0d: /* grp d9/5 */
5873 {
5874 switch(rm) {
5875 case 0:
5876 gen_helper_fpush(cpu_env);
5877 gen_helper_fld1_ST0(cpu_env);
5878 break;
5879 case 1:
5880 gen_helper_fpush(cpu_env);
5881 gen_helper_fldl2t_ST0(cpu_env);
5882 break;
5883 case 2:
5884 gen_helper_fpush(cpu_env);
5885 gen_helper_fldl2e_ST0(cpu_env);
5886 break;
5887 case 3:
5888 gen_helper_fpush(cpu_env);
5889 gen_helper_fldpi_ST0(cpu_env);
5890 break;
5891 case 4:
5892 gen_helper_fpush(cpu_env);
5893 gen_helper_fldlg2_ST0(cpu_env);
5894 break;
5895 case 5:
5896 gen_helper_fpush(cpu_env);
5897 gen_helper_fldln2_ST0(cpu_env);
5898 break;
5899 case 6:
5900 gen_helper_fpush(cpu_env);
5901 gen_helper_fldz_ST0(cpu_env);
5902 break;
5903 default:
5904 goto illegal_op;
5905 }
5906 }
5907 break;
5908 case 0x0e: /* grp d9/6 */
5909 switch(rm) {
5910 case 0: /* f2xm1 */
5911 gen_helper_f2xm1(cpu_env);
5912 break;
5913 case 1: /* fyl2x */
5914 gen_helper_fyl2x(cpu_env);
5915 break;
5916 case 2: /* fptan */
5917 gen_helper_fptan(cpu_env);
5918 break;
5919 case 3: /* fpatan */
5920 gen_helper_fpatan(cpu_env);
5921 break;
5922 case 4: /* fxtract */
5923 gen_helper_fxtract(cpu_env);
5924 break;
5925 case 5: /* fprem1 */
5926 gen_helper_fprem1(cpu_env);
5927 break;
5928 case 6: /* fdecstp */
5929 gen_helper_fdecstp(cpu_env);
5930 break;
5931 default:
5932 case 7: /* fincstp */
5933 gen_helper_fincstp(cpu_env);
5934 break;
5935 }
5936 break;
5937 case 0x0f: /* grp d9/7 */
5938 switch(rm) {
5939 case 0: /* fprem */
5940 gen_helper_fprem(cpu_env);
5941 break;
5942 case 1: /* fyl2xp1 */
5943 gen_helper_fyl2xp1(cpu_env);
5944 break;
5945 case 2: /* fsqrt */
5946 gen_helper_fsqrt(cpu_env);
5947 break;
5948 case 3: /* fsincos */
5949 gen_helper_fsincos(cpu_env);
5950 break;
5951 case 5: /* fscale */
5952 gen_helper_fscale(cpu_env);
5953 break;
5954 case 4: /* frndint */
5955 gen_helper_frndint(cpu_env);
5956 break;
5957 case 6: /* fsin */
5958 gen_helper_fsin(cpu_env);
5959 break;
5960 default:
5961 case 7: /* fcos */
5962 gen_helper_fcos(cpu_env);
5963 break;
5964 }
5965 break;
5966 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
5967 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
5968 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
5969 {
5970 int op1;
5971
5972 op1 = op & 7;
5973 if (op >= 0x20) {
5974 gen_helper_fp_arith_STN_ST0(op1, opreg);
5975 if (op >= 0x30)
5976 gen_helper_fpop(cpu_env);
5977 } else {
5978 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
5979 gen_helper_fp_arith_ST0_FT0(op1);
5980 }
5981 }
5982 break;
5983 case 0x02: /* fcom */
5984 case 0x22: /* fcom2, undocumented op */
5985 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
5986 gen_helper_fcom_ST0_FT0(cpu_env);
5987 break;
5988 case 0x03: /* fcomp */
5989 case 0x23: /* fcomp3, undocumented op */
5990 case 0x32: /* fcomp5, undocumented op */
5991 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
5992 gen_helper_fcom_ST0_FT0(cpu_env);
5993 gen_helper_fpop(cpu_env);
5994 break;
5995 case 0x15: /* da/5 */
5996 switch(rm) {
5997 case 1: /* fucompp */
5998 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
5999 gen_helper_fucom_ST0_FT0(cpu_env);
6000 gen_helper_fpop(cpu_env);
6001 gen_helper_fpop(cpu_env);
6002 break;
6003 default:
6004 goto illegal_op;
6005 }
6006 break;
6007 case 0x1c:
6008 switch(rm) {
6009 case 0: /* feni (287 only, just do nop here) */
6010 break;
6011 case 1: /* fdisi (287 only, just do nop here) */
6012 break;
6013 case 2: /* fclex */
6014 gen_helper_fclex(cpu_env);
6015 break;
6016 case 3: /* fninit */
6017 gen_helper_fninit(cpu_env);
6018 break;
6019 case 4: /* fsetpm (287 only, just do nop here) */
6020 break;
6021 default:
6022 goto illegal_op;
6023 }
6024 break;
6025 case 0x1d: /* fucomi */
6026 gen_update_cc_op(s);
6027 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6028 gen_helper_fucomi_ST0_FT0(cpu_env);
6029 set_cc_op(s, CC_OP_EFLAGS);
6030 break;
6031 case 0x1e: /* fcomi */
6032 gen_update_cc_op(s);
6033 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6034 gen_helper_fcomi_ST0_FT0(cpu_env);
6035 set_cc_op(s, CC_OP_EFLAGS);
6036 break;
6037 case 0x28: /* ffree sti */
6038 gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
6039 break;
6040 case 0x2a: /* fst sti */
6041 gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
6042 break;
6043 case 0x2b: /* fstp sti */
6044 case 0x0b: /* fstp1 sti, undocumented op */
6045 case 0x3a: /* fstp8 sti, undocumented op */
6046 case 0x3b: /* fstp9 sti, undocumented op */
6047 gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
6048 gen_helper_fpop(cpu_env);
6049 break;
6050 case 0x2c: /* fucom st(i) */
6051 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6052 gen_helper_fucom_ST0_FT0(cpu_env);
6053 break;
6054 case 0x2d: /* fucomp st(i) */
6055 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6056 gen_helper_fucom_ST0_FT0(cpu_env);
6057 gen_helper_fpop(cpu_env);
6058 break;
6059 case 0x33: /* de/3 */
6060 switch(rm) {
6061 case 1: /* fcompp */
6062 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
6063 gen_helper_fcom_ST0_FT0(cpu_env);
6064 gen_helper_fpop(cpu_env);
6065 gen_helper_fpop(cpu_env);
6066 break;
6067 default:
6068 goto illegal_op;
6069 }
6070 break;
6071 case 0x38: /* ffreep sti, undocumented op */
6072 gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
6073 gen_helper_fpop(cpu_env);
6074 break;
6075 case 0x3c: /* df/4 */
6076 switch(rm) {
6077 case 0:
6078 gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
6079 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
6080 gen_op_mov_reg_T0(OT_WORD, R_EAX);
6081 break;
6082 default:
6083 goto illegal_op;
6084 }
6085 break;
6086 case 0x3d: /* fucomip */
6087 gen_update_cc_op(s);
6088 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6089 gen_helper_fucomi_ST0_FT0(cpu_env);
6090 gen_helper_fpop(cpu_env);
6091 set_cc_op(s, CC_OP_EFLAGS);
6092 break;
6093 case 0x3e: /* fcomip */
6094 gen_update_cc_op(s);
6095 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6096 gen_helper_fcomi_ST0_FT0(cpu_env);
6097 gen_helper_fpop(cpu_env);
6098 set_cc_op(s, CC_OP_EFLAGS);
6099 break;
6100 case 0x10 ... 0x13: /* fcmovxx */
6101 case 0x18 ... 0x1b:
6102 {
6103 int op1, l1;
6104 static const uint8_t fcmov_cc[8] = {
6105 (JCC_B << 1),
6106 (JCC_Z << 1),
6107 (JCC_BE << 1),
6108 (JCC_P << 1),
6109 };
6110 op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
6111 l1 = gen_new_label();
6112 gen_jcc1_noeob(s, op1, l1);
6113 gen_helper_fmov_ST0_STN(cpu_env, tcg_const_i32(opreg));
6114 gen_set_label(l1);
6115 }
6116 break;
6117 default:
6118 goto illegal_op;
6119 }
6120 }
6121 break;
6122 /************************/
6123 /* string ops */
6124
6125 case 0xa4: /* movsS */
6126 case 0xa5:
6127 if ((b & 1) == 0)
6128 ot = OT_BYTE;
6129 else
6130 ot = dflag + OT_WORD;
6131
6132 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6133 gen_repz_movs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6134 } else {
6135 gen_movs(s, ot);
6136 }
6137 break;
6138
6139 case 0xaa: /* stosS */
6140 case 0xab:
6141 if ((b & 1) == 0)
6142 ot = OT_BYTE;
6143 else
6144 ot = dflag + OT_WORD;
6145
6146 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6147 gen_repz_stos(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6148 } else {
6149 gen_stos(s, ot);
6150 }
6151 break;
6152 case 0xac: /* lodsS */
6153 case 0xad:
6154 if ((b & 1) == 0)
6155 ot = OT_BYTE;
6156 else
6157 ot = dflag + OT_WORD;
6158 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6159 gen_repz_lods(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6160 } else {
6161 gen_lods(s, ot);
6162 }
6163 break;
6164 case 0xae: /* scasS */
6165 case 0xaf:
6166 if ((b & 1) == 0)
6167 ot = OT_BYTE;
6168 else
6169 ot = dflag + OT_WORD;
6170 if (prefixes & PREFIX_REPNZ) {
6171 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6172 } else if (prefixes & PREFIX_REPZ) {
6173 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6174 } else {
6175 gen_scas(s, ot);
6176 }
6177 break;
6178
6179 case 0xa6: /* cmpsS */
6180 case 0xa7:
6181 if ((b & 1) == 0)
6182 ot = OT_BYTE;
6183 else
6184 ot = dflag + OT_WORD;
6185 if (prefixes & PREFIX_REPNZ) {
6186 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6187 } else if (prefixes & PREFIX_REPZ) {
6188 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6189 } else {
6190 gen_cmps(s, ot);
6191 }
6192 break;
6193 case 0x6c: /* insS */
6194 case 0x6d:
6195 if ((b & 1) == 0)
6196 ot = OT_BYTE;
6197 else
6198 ot = dflag ? OT_LONG : OT_WORD;
6199 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
6200 gen_op_andl_T0_ffff();
6201 gen_check_io(s, ot, pc_start - s->cs_base,
6202 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4);
6203 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6204 gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6205 } else {
6206 gen_ins(s, ot);
6207 if (use_icount) {
6208 gen_jmp(s, s->pc - s->cs_base);
6209 }
6210 }
6211 break;
6212 case 0x6e: /* outsS */
6213 case 0x6f:
6214 if ((b & 1) == 0)
6215 ot = OT_BYTE;
6216 else
6217 ot = dflag ? OT_LONG : OT_WORD;
6218 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
6219 gen_op_andl_T0_ffff();
6220 gen_check_io(s, ot, pc_start - s->cs_base,
6221 svm_is_rep(prefixes) | 4);
6222 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6223 gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6224 } else {
6225 gen_outs(s, ot);
6226 if (use_icount) {
6227 gen_jmp(s, s->pc - s->cs_base);
6228 }
6229 }
6230 break;
6231
6232 /************************/
6233 /* port I/O */
6234
6235 case 0xe4:
6236 case 0xe5:
6237 if ((b & 1) == 0)
6238 ot = OT_BYTE;
6239 else
6240 ot = dflag ? OT_LONG : OT_WORD;
6241 val = cpu_ldub_code(env, s->pc++);
6242 gen_op_movl_T0_im(val);
6243 gen_check_io(s, ot, pc_start - s->cs_base,
6244 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
6245 if (use_icount)
6246 gen_io_start();
6247 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6248 gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
6249 gen_op_mov_reg_T1(ot, R_EAX);
6250 if (use_icount) {
6251 gen_io_end();
6252 gen_jmp(s, s->pc - s->cs_base);
6253 }
6254 break;
6255 case 0xe6:
6256 case 0xe7:
6257 if ((b & 1) == 0)
6258 ot = OT_BYTE;
6259 else
6260 ot = dflag ? OT_LONG : OT_WORD;
6261 val = cpu_ldub_code(env, s->pc++);
6262 gen_op_movl_T0_im(val);
6263 gen_check_io(s, ot, pc_start - s->cs_base,
6264 svm_is_rep(prefixes));
6265 gen_op_mov_TN_reg(ot, 1, R_EAX);
6266
6267 if (use_icount)
6268 gen_io_start();
6269 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6270 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
6271 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
6272 if (use_icount) {
6273 gen_io_end();
6274 gen_jmp(s, s->pc - s->cs_base);
6275 }
6276 break;
6277 case 0xec:
6278 case 0xed:
6279 if ((b & 1) == 0)
6280 ot = OT_BYTE;
6281 else
6282 ot = dflag ? OT_LONG : OT_WORD;
6283 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
6284 gen_op_andl_T0_ffff();
6285 gen_check_io(s, ot, pc_start - s->cs_base,
6286 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
6287 if (use_icount)
6288 gen_io_start();
6289 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6290 gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
6291 gen_op_mov_reg_T1(ot, R_EAX);
6292 if (use_icount) {
6293 gen_io_end();
6294 gen_jmp(s, s->pc - s->cs_base);
6295 }
6296 break;
6297 case 0xee:
6298 case 0xef:
6299 if ((b & 1) == 0)
6300 ot = OT_BYTE;
6301 else
6302 ot = dflag ? OT_LONG : OT_WORD;
6303 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
6304 gen_op_andl_T0_ffff();
6305 gen_check_io(s, ot, pc_start - s->cs_base,
6306 svm_is_rep(prefixes));
6307 gen_op_mov_TN_reg(ot, 1, R_EAX);
6308
6309 if (use_icount)
6310 gen_io_start();
6311 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6312 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
6313 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
6314 if (use_icount) {
6315 gen_io_end();
6316 gen_jmp(s, s->pc - s->cs_base);
6317 }
6318 break;
6319
6320 /************************/
6321 /* control */
6322 case 0xc2: /* ret im */
6323 val = cpu_ldsw_code(env, s->pc);
6324 s->pc += 2;
6325 gen_pop_T0(s);
6326 if (CODE64(s) && s->dflag)
6327 s->dflag = 2;
6328 gen_stack_update(s, val + (2 << s->dflag));
6329 if (s->dflag == 0)
6330 gen_op_andl_T0_ffff();
6331 gen_op_jmp_T0();
6332 gen_eob(s);
6333 break;
6334 case 0xc3: /* ret */
6335 gen_pop_T0(s);
6336 gen_pop_update(s);
6337 if (s->dflag == 0)
6338 gen_op_andl_T0_ffff();
6339 gen_op_jmp_T0();
6340 gen_eob(s);
6341 break;
6342 case 0xca: /* lret im */
6343 val = cpu_ldsw_code(env, s->pc);
6344 s->pc += 2;
6345 do_lret:
6346 if (s->pe && !s->vm86) {
6347 gen_update_cc_op(s);
6348 gen_jmp_im(pc_start - s->cs_base);
6349 gen_helper_lret_protected(cpu_env, tcg_const_i32(s->dflag),
6350 tcg_const_i32(val));
6351 } else {
6352 gen_stack_A0(s);
6353 /* pop offset */
6354 gen_op_ld_T0_A0(1 + s->dflag + s->mem_index);
6355 if (s->dflag == 0)
6356 gen_op_andl_T0_ffff();
6357 /* NOTE: keeping EIP updated is not a problem in case of
6358 exception */
6359 gen_op_jmp_T0();
6360 /* pop selector */
6361 gen_op_addl_A0_im(2 << s->dflag);
6362 gen_op_ld_T0_A0(1 + s->dflag + s->mem_index);
6363 gen_op_movl_seg_T0_vm(R_CS);
6364 /* add stack offset */
6365 gen_stack_update(s, val + (4 << s->dflag));
6366 }
6367 gen_eob(s);
6368 break;
6369 case 0xcb: /* lret */
6370 val = 0;
6371 goto do_lret;
6372 case 0xcf: /* iret */
6373 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET);
6374 if (!s->pe) {
6375 /* real mode */
6376 gen_helper_iret_real(cpu_env, tcg_const_i32(s->dflag));
6377 set_cc_op(s, CC_OP_EFLAGS);
6378 } else if (s->vm86) {
6379 if (s->iopl != 3) {
6380 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6381 } else {
6382 gen_helper_iret_real(cpu_env, tcg_const_i32(s->dflag));
6383 set_cc_op(s, CC_OP_EFLAGS);
6384 }
6385 } else {
6386 gen_update_cc_op(s);
6387 gen_jmp_im(pc_start - s->cs_base);
6388 gen_helper_iret_protected(cpu_env, tcg_const_i32(s->dflag),
6389 tcg_const_i32(s->pc - s->cs_base));
6390 set_cc_op(s, CC_OP_EFLAGS);
6391 }
6392 gen_eob(s);
6393 break;
6394 case 0xe8: /* call im */
6395 {
6396 if (dflag)
6397 tval = (int32_t)insn_get(env, s, OT_LONG);
6398 else
6399 tval = (int16_t)insn_get(env, s, OT_WORD);
6400 next_eip = s->pc - s->cs_base;
6401 tval += next_eip;
6402 if (s->dflag == 0)
6403 tval &= 0xffff;
6404 else if(!CODE64(s))
6405 tval &= 0xffffffff;
6406 gen_movtl_T0_im(next_eip);
6407 gen_push_T0(s);
6408 gen_jmp(s, tval);
6409 }
6410 break;
6411 case 0x9a: /* lcall im */
6412 {
6413 unsigned int selector, offset;
6414
6415 if (CODE64(s))
6416 goto illegal_op;
6417 ot = dflag ? OT_LONG : OT_WORD;
6418 offset = insn_get(env, s, ot);
6419 selector = insn_get(env, s, OT_WORD);
6420
6421 gen_op_movl_T0_im(selector);
6422 gen_op_movl_T1_imu(offset);
6423 }
6424 goto do_lcall;
6425 case 0xe9: /* jmp im */
6426 if (dflag)
6427 tval = (int32_t)insn_get(env, s, OT_LONG);
6428 else
6429 tval = (int16_t)insn_get(env, s, OT_WORD);
6430 tval += s->pc - s->cs_base;
6431 if (s->dflag == 0)
6432 tval &= 0xffff;
6433 else if(!CODE64(s))
6434 tval &= 0xffffffff;
6435 gen_jmp(s, tval);
6436 break;
6437 case 0xea: /* ljmp im */
6438 {
6439 unsigned int selector, offset;
6440
6441 if (CODE64(s))
6442 goto illegal_op;
6443 ot = dflag ? OT_LONG : OT_WORD;
6444 offset = insn_get(env, s, ot);
6445 selector = insn_get(env, s, OT_WORD);
6446
6447 gen_op_movl_T0_im(selector);
6448 gen_op_movl_T1_imu(offset);
6449 }
6450 goto do_ljmp;
6451 case 0xeb: /* jmp Jb */
6452 tval = (int8_t)insn_get(env, s, OT_BYTE);
6453 tval += s->pc - s->cs_base;
6454 if (s->dflag == 0)
6455 tval &= 0xffff;
6456 gen_jmp(s, tval);
6457 break;
6458 case 0x70 ... 0x7f: /* jcc Jb */
6459 tval = (int8_t)insn_get(env, s, OT_BYTE);
6460 goto do_jcc;
6461 case 0x180 ... 0x18f: /* jcc Jv */
6462 if (dflag) {
6463 tval = (int32_t)insn_get(env, s, OT_LONG);
6464 } else {
6465 tval = (int16_t)insn_get(env, s, OT_WORD);
6466 }
6467 do_jcc:
6468 next_eip = s->pc - s->cs_base;
6469 tval += next_eip;
6470 if (s->dflag == 0)
6471 tval &= 0xffff;
6472 gen_jcc(s, b, tval, next_eip);
6473 break;
6474
6475 case 0x190 ... 0x19f: /* setcc Gv */
6476 modrm = cpu_ldub_code(env, s->pc++);
6477 gen_setcc1(s, b, cpu_T[0]);
6478 gen_ldst_modrm(env, s, modrm, OT_BYTE, OR_TMP0, 1);
6479 break;
6480 case 0x140 ... 0x14f: /* cmov Gv, Ev */
6481 ot = dflag + OT_WORD;
6482 modrm = cpu_ldub_code(env, s->pc++);
6483 reg = ((modrm >> 3) & 7) | rex_r;
6484 gen_cmovcc1(env, s, ot, b, modrm, reg);
6485 break;
6486
6487 /************************/
6488 /* flags */
6489 case 0x9c: /* pushf */
6490 gen_svm_check_intercept(s, pc_start, SVM_EXIT_PUSHF);
6491 if (s->vm86 && s->iopl != 3) {
6492 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6493 } else {
6494 gen_update_cc_op(s);
6495 gen_helper_read_eflags(cpu_T[0], cpu_env);
6496 gen_push_T0(s);
6497 }
6498 break;
6499 case 0x9d: /* popf */
6500 gen_svm_check_intercept(s, pc_start, SVM_EXIT_POPF);
6501 if (s->vm86 && s->iopl != 3) {
6502 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6503 } else {
6504 gen_pop_T0(s);
6505 if (s->cpl == 0) {
6506 if (s->dflag) {
6507 gen_helper_write_eflags(cpu_env, cpu_T[0],
6508 tcg_const_i32((TF_MASK | AC_MASK |
6509 ID_MASK | NT_MASK |
6510 IF_MASK |
6511 IOPL_MASK)));
6512 } else {
6513 gen_helper_write_eflags(cpu_env, cpu_T[0],
6514 tcg_const_i32((TF_MASK | AC_MASK |
6515 ID_MASK | NT_MASK |
6516 IF_MASK | IOPL_MASK)
6517 & 0xffff));
6518 }
6519 } else {
6520 if (s->cpl <= s->iopl) {
6521 if (s->dflag) {
6522 gen_helper_write_eflags(cpu_env, cpu_T[0],
6523 tcg_const_i32((TF_MASK |
6524 AC_MASK |
6525 ID_MASK |
6526 NT_MASK |
6527 IF_MASK)));
6528 } else {
6529 gen_helper_write_eflags(cpu_env, cpu_T[0],
6530 tcg_const_i32((TF_MASK |
6531 AC_MASK |
6532 ID_MASK |
6533 NT_MASK |
6534 IF_MASK)
6535 & 0xffff));
6536 }
6537 } else {
6538 if (s->dflag) {
6539 gen_helper_write_eflags(cpu_env, cpu_T[0],
6540 tcg_const_i32((TF_MASK | AC_MASK |
6541 ID_MASK | NT_MASK)));
6542 } else {
6543 gen_helper_write_eflags(cpu_env, cpu_T[0],
6544 tcg_const_i32((TF_MASK | AC_MASK |
6545 ID_MASK | NT_MASK)
6546 & 0xffff));
6547 }
6548 }
6549 }
6550 gen_pop_update(s);
6551 set_cc_op(s, CC_OP_EFLAGS);
6552 /* abort translation because TF/AC flag may change */
6553 gen_jmp_im(s->pc - s->cs_base);
6554 gen_eob(s);
6555 }
6556 break;
6557 case 0x9e: /* sahf */
6558 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
6559 goto illegal_op;
6560 gen_op_mov_TN_reg(OT_BYTE, 0, R_AH);
6561 gen_compute_eflags(s);
6562 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O);
6563 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], CC_S | CC_Z | CC_A | CC_P | CC_C);
6564 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_T[0]);
6565 break;
6566 case 0x9f: /* lahf */
6567 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
6568 goto illegal_op;
6569 gen_compute_eflags(s);
6570 /* Note: gen_compute_eflags() only gives the condition codes */
6571 tcg_gen_ori_tl(cpu_T[0], cpu_cc_src, 0x02);
6572 gen_op_mov_reg_T0(OT_BYTE, R_AH);
6573 break;
6574 case 0xf5: /* cmc */
6575 gen_compute_eflags(s);
6576 tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C);
6577 break;
6578 case 0xf8: /* clc */
6579 gen_compute_eflags(s);
6580 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C);
6581 break;
6582 case 0xf9: /* stc */
6583 gen_compute_eflags(s);
6584 tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C);
6585 break;
6586 case 0xfc: /* cld */
6587 tcg_gen_movi_i32(cpu_tmp2_i32, 1);
6588 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
6589 break;
6590 case 0xfd: /* std */
6591 tcg_gen_movi_i32(cpu_tmp2_i32, -1);
6592 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
6593 break;
6594
6595 /************************/
6596 /* bit operations */
6597 case 0x1ba: /* bt/bts/btr/btc Gv, im */
6598 ot = dflag + OT_WORD;
6599 modrm = cpu_ldub_code(env, s->pc++);
6600 op = (modrm >> 3) & 7;
6601 mod = (modrm >> 6) & 3;
6602 rm = (modrm & 7) | REX_B(s);
6603 if (mod != 3) {
6604 s->rip_offset = 1;
6605 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
6606 gen_op_ld_T0_A0(ot + s->mem_index);
6607 } else {
6608 gen_op_mov_TN_reg(ot, 0, rm);
6609 }
6610 /* load shift */
6611 val = cpu_ldub_code(env, s->pc++);
6612 gen_op_movl_T1_im(val);
6613 if (op < 4)
6614 goto illegal_op;
6615 op -= 4;
6616 goto bt_op;
6617 case 0x1a3: /* bt Gv, Ev */
6618 op = 0;
6619 goto do_btx;
6620 case 0x1ab: /* bts */
6621 op = 1;
6622 goto do_btx;
6623 case 0x1b3: /* btr */
6624 op = 2;
6625 goto do_btx;
6626 case 0x1bb: /* btc */
6627 op = 3;
6628 do_btx:
6629 ot = dflag + OT_WORD;
6630 modrm = cpu_ldub_code(env, s->pc++);
6631 reg = ((modrm >> 3) & 7) | rex_r;
6632 mod = (modrm >> 6) & 3;
6633 rm = (modrm & 7) | REX_B(s);
6634 gen_op_mov_TN_reg(OT_LONG, 1, reg);
6635 if (mod != 3) {
6636 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
6637 /* specific case: we need to add a displacement */
6638 gen_exts(ot, cpu_T[1]);
6639 tcg_gen_sari_tl(cpu_tmp0, cpu_T[1], 3 + ot);
6640 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, ot);
6641 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
6642 gen_op_ld_T0_A0(ot + s->mem_index);
6643 } else {
6644 gen_op_mov_TN_reg(ot, 0, rm);
6645 }
6646 bt_op:
6647 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], (1 << (3 + ot)) - 1);
6648 switch(op) {
6649 case 0:
6650 tcg_gen_shr_tl(cpu_cc_src, cpu_T[0], cpu_T[1]);
6651 tcg_gen_movi_tl(cpu_cc_dst, 0);
6652 break;
6653 case 1:
6654 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6655 tcg_gen_movi_tl(cpu_tmp0, 1);
6656 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6657 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6658 break;
6659 case 2:
6660 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6661 tcg_gen_movi_tl(cpu_tmp0, 1);
6662 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6663 tcg_gen_not_tl(cpu_tmp0, cpu_tmp0);
6664 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6665 break;
6666 default:
6667 case 3:
6668 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6669 tcg_gen_movi_tl(cpu_tmp0, 1);
6670 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6671 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6672 break;
6673 }
6674 set_cc_op(s, CC_OP_SARB + ot);
6675 if (op != 0) {
6676 if (mod != 3)
6677 gen_op_st_T0_A0(ot + s->mem_index);
6678 else
6679 gen_op_mov_reg_T0(ot, rm);
6680 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
6681 tcg_gen_movi_tl(cpu_cc_dst, 0);
6682 }
6683 break;
6684 case 0x1bc: /* bsf */
6685 case 0x1bd: /* bsr */
6686 {
6687 int label1;
6688 TCGv t0;
6689
6690 ot = dflag + OT_WORD;
6691 modrm = cpu_ldub_code(env, s->pc++);
6692 reg = ((modrm >> 3) & 7) | rex_r;
6693 gen_ldst_modrm(env, s,modrm, ot, OR_TMP0, 0);
6694 gen_extu(ot, cpu_T[0]);
6695 t0 = tcg_temp_local_new();
6696 tcg_gen_mov_tl(t0, cpu_T[0]);
6697 if ((b & 1) && (prefixes & PREFIX_REPZ) &&
6698 (s->cpuid_ext3_features & CPUID_EXT3_ABM)) {
6699 switch(ot) {
6700 case OT_WORD: gen_helper_lzcnt(cpu_T[0], t0,
6701 tcg_const_i32(16)); break;
6702 case OT_LONG: gen_helper_lzcnt(cpu_T[0], t0,
6703 tcg_const_i32(32)); break;
6704 case OT_QUAD: gen_helper_lzcnt(cpu_T[0], t0,
6705 tcg_const_i32(64)); break;
6706 }
6707 gen_op_mov_reg_T0(ot, reg);
6708 } else {
6709 label1 = gen_new_label();
6710 tcg_gen_movi_tl(cpu_cc_dst, 0);
6711 tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, label1);
6712 if (b & 1) {
6713 gen_helper_bsr(cpu_T[0], t0);
6714 } else {
6715 gen_helper_bsf(cpu_T[0], t0);
6716 }
6717 gen_op_mov_reg_T0(ot, reg);
6718 tcg_gen_movi_tl(cpu_cc_dst, 1);
6719 gen_set_label(label1);
6720 set_cc_op(s, CC_OP_LOGICB + ot);
6721 }
6722 tcg_temp_free(t0);
6723 }
6724 break;
6725 /************************/
6726 /* bcd */
6727 case 0x27: /* daa */
6728 if (CODE64(s))
6729 goto illegal_op;
6730 gen_update_cc_op(s);
6731 gen_helper_daa(cpu_env);
6732 set_cc_op(s, CC_OP_EFLAGS);
6733 break;
6734 case 0x2f: /* das */
6735 if (CODE64(s))
6736 goto illegal_op;
6737 gen_update_cc_op(s);
6738 gen_helper_das(cpu_env);
6739 set_cc_op(s, CC_OP_EFLAGS);
6740 break;
6741 case 0x37: /* aaa */
6742 if (CODE64(s))
6743 goto illegal_op;
6744 gen_update_cc_op(s);
6745 gen_helper_aaa(cpu_env);
6746 set_cc_op(s, CC_OP_EFLAGS);
6747 break;
6748 case 0x3f: /* aas */
6749 if (CODE64(s))
6750 goto illegal_op;
6751 gen_update_cc_op(s);
6752 gen_helper_aas(cpu_env);
6753 set_cc_op(s, CC_OP_EFLAGS);
6754 break;
6755 case 0xd4: /* aam */
6756 if (CODE64(s))
6757 goto illegal_op;
6758 val = cpu_ldub_code(env, s->pc++);
6759 if (val == 0) {
6760 gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base);
6761 } else {
6762 gen_helper_aam(cpu_env, tcg_const_i32(val));
6763 set_cc_op(s, CC_OP_LOGICB);
6764 }
6765 break;
6766 case 0xd5: /* aad */
6767 if (CODE64(s))
6768 goto illegal_op;
6769 val = cpu_ldub_code(env, s->pc++);
6770 gen_helper_aad(cpu_env, tcg_const_i32(val));
6771 set_cc_op(s, CC_OP_LOGICB);
6772 break;
6773 /************************/
6774 /* misc */
6775 case 0x90: /* nop */
6776 /* XXX: correct lock test for all insn */
6777 if (prefixes & PREFIX_LOCK) {
6778 goto illegal_op;
6779 }
6780 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
6781 if (REX_B(s)) {
6782 goto do_xchg_reg_eax;
6783 }
6784 if (prefixes & PREFIX_REPZ) {
6785 gen_svm_check_intercept(s, pc_start, SVM_EXIT_PAUSE);
6786 }
6787 break;
6788 case 0x9b: /* fwait */
6789 if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) ==
6790 (HF_MP_MASK | HF_TS_MASK)) {
6791 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
6792 } else {
6793 gen_update_cc_op(s);
6794 gen_jmp_im(pc_start - s->cs_base);
6795 gen_helper_fwait(cpu_env);
6796 }
6797 break;
6798 case 0xcc: /* int3 */
6799 gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base);
6800 break;
6801 case 0xcd: /* int N */
6802 val = cpu_ldub_code(env, s->pc++);
6803 if (s->vm86 && s->iopl != 3) {
6804 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6805 } else {
6806 gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base);
6807 }
6808 break;
6809 case 0xce: /* into */
6810 if (CODE64(s))
6811 goto illegal_op;
6812 gen_update_cc_op(s);
6813 gen_jmp_im(pc_start - s->cs_base);
6814 gen_helper_into(cpu_env, tcg_const_i32(s->pc - pc_start));
6815 break;
6816 #ifdef WANT_ICEBP
6817 case 0xf1: /* icebp (undocumented, exits to external debugger) */
6818 gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP);
6819 #if 1
6820 gen_debug(s, pc_start - s->cs_base);
6821 #else
6822 /* start debug */
6823 tb_flush(env);
6824 qemu_set_log(CPU_LOG_INT | CPU_LOG_TB_IN_ASM);
6825 #endif
6826 break;
6827 #endif
6828 case 0xfa: /* cli */
6829 if (!s->vm86) {
6830 if (s->cpl <= s->iopl) {
6831 gen_helper_cli(cpu_env);
6832 } else {
6833 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6834 }
6835 } else {
6836 if (s->iopl == 3) {
6837 gen_helper_cli(cpu_env);
6838 } else {
6839 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6840 }
6841 }
6842 break;
6843 case 0xfb: /* sti */
6844 if (!s->vm86) {
6845 if (s->cpl <= s->iopl) {
6846 gen_sti:
6847 gen_helper_sti(cpu_env);
6848 /* interruptions are enabled only the first insn after sti */
6849 /* If several instructions disable interrupts, only the
6850 _first_ does it */
6851 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
6852 gen_helper_set_inhibit_irq(cpu_env);
6853 /* give a chance to handle pending irqs */
6854 gen_jmp_im(s->pc - s->cs_base);
6855 gen_eob(s);
6856 } else {
6857 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6858 }
6859 } else {
6860 if (s->iopl == 3) {
6861 goto gen_sti;
6862 } else {
6863 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6864 }
6865 }
6866 break;
6867 case 0x62: /* bound */
6868 if (CODE64(s))
6869 goto illegal_op;
6870 ot = dflag ? OT_LONG : OT_WORD;
6871 modrm = cpu_ldub_code(env, s->pc++);
6872 reg = (modrm >> 3) & 7;
6873 mod = (modrm >> 6) & 3;
6874 if (mod == 3)
6875 goto illegal_op;
6876 gen_op_mov_TN_reg(ot, 0, reg);
6877 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
6878 gen_jmp_im(pc_start - s->cs_base);
6879 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6880 if (ot == OT_WORD) {
6881 gen_helper_boundw(cpu_env, cpu_A0, cpu_tmp2_i32);
6882 } else {
6883 gen_helper_boundl(cpu_env, cpu_A0, cpu_tmp2_i32);
6884 }
6885 break;
6886 case 0x1c8 ... 0x1cf: /* bswap reg */
6887 reg = (b & 7) | REX_B(s);
6888 #ifdef TARGET_X86_64
6889 if (dflag == 2) {
6890 gen_op_mov_TN_reg(OT_QUAD, 0, reg);
6891 tcg_gen_bswap64_i64(cpu_T[0], cpu_T[0]);
6892 gen_op_mov_reg_T0(OT_QUAD, reg);
6893 } else
6894 #endif
6895 {
6896 gen_op_mov_TN_reg(OT_LONG, 0, reg);
6897 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
6898 tcg_gen_bswap32_tl(cpu_T[0], cpu_T[0]);
6899 gen_op_mov_reg_T0(OT_LONG, reg);
6900 }
6901 break;
6902 case 0xd6: /* salc */
6903 if (CODE64(s))
6904 goto illegal_op;
6905 gen_compute_eflags_c(s, cpu_T[0]);
6906 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
6907 gen_op_mov_reg_T0(OT_BYTE, R_EAX);
6908 break;
6909 case 0xe0: /* loopnz */
6910 case 0xe1: /* loopz */
6911 case 0xe2: /* loop */
6912 case 0xe3: /* jecxz */
6913 {
6914 int l1, l2, l3;
6915
6916 tval = (int8_t)insn_get(env, s, OT_BYTE);
6917 next_eip = s->pc - s->cs_base;
6918 tval += next_eip;
6919 if (s->dflag == 0)
6920 tval &= 0xffff;
6921
6922 l1 = gen_new_label();
6923 l2 = gen_new_label();
6924 l3 = gen_new_label();
6925 b &= 3;
6926 switch(b) {
6927 case 0: /* loopnz */
6928 case 1: /* loopz */
6929 gen_op_add_reg_im(s->aflag, R_ECX, -1);
6930 gen_op_jz_ecx(s->aflag, l3);
6931 gen_jcc1(s, (JCC_Z << 1) | (b ^ 1), l1);
6932 break;
6933 case 2: /* loop */
6934 gen_op_add_reg_im(s->aflag, R_ECX, -1);
6935 gen_op_jnz_ecx(s->aflag, l1);
6936 break;
6937 default:
6938 case 3: /* jcxz */
6939 gen_op_jz_ecx(s->aflag, l1);
6940 break;
6941 }
6942
6943 gen_set_label(l3);
6944 gen_jmp_im(next_eip);
6945 tcg_gen_br(l2);
6946
6947 gen_set_label(l1);
6948 gen_jmp_im(tval);
6949 gen_set_label(l2);
6950 gen_eob(s);
6951 }
6952 break;
6953 case 0x130: /* wrmsr */
6954 case 0x132: /* rdmsr */
6955 if (s->cpl != 0) {
6956 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6957 } else {
6958 gen_update_cc_op(s);
6959 gen_jmp_im(pc_start - s->cs_base);
6960 if (b & 2) {
6961 gen_helper_rdmsr(cpu_env);
6962 } else {
6963 gen_helper_wrmsr(cpu_env);
6964 }
6965 }
6966 break;
6967 case 0x131: /* rdtsc */
6968 gen_update_cc_op(s);
6969 gen_jmp_im(pc_start - s->cs_base);
6970 if (use_icount)
6971 gen_io_start();
6972 gen_helper_rdtsc(cpu_env);
6973 if (use_icount) {
6974 gen_io_end();
6975 gen_jmp(s, s->pc - s->cs_base);
6976 }
6977 break;
6978 case 0x133: /* rdpmc */
6979 gen_update_cc_op(s);
6980 gen_jmp_im(pc_start - s->cs_base);
6981 gen_helper_rdpmc(cpu_env);
6982 break;
6983 case 0x134: /* sysenter */
6984 /* For Intel SYSENTER is valid on 64-bit */
6985 if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
6986 goto illegal_op;
6987 if (!s->pe) {
6988 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6989 } else {
6990 gen_update_cc_op(s);
6991 gen_jmp_im(pc_start - s->cs_base);
6992 gen_helper_sysenter(cpu_env);
6993 gen_eob(s);
6994 }
6995 break;
6996 case 0x135: /* sysexit */
6997 /* For Intel SYSEXIT is valid on 64-bit */
6998 if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
6999 goto illegal_op;
7000 if (!s->pe) {
7001 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7002 } else {
7003 gen_update_cc_op(s);
7004 gen_jmp_im(pc_start - s->cs_base);
7005 gen_helper_sysexit(cpu_env, tcg_const_i32(dflag));
7006 gen_eob(s);
7007 }
7008 break;
7009 #ifdef TARGET_X86_64
7010 case 0x105: /* syscall */
7011 /* XXX: is it usable in real mode ? */
7012 gen_update_cc_op(s);
7013 gen_jmp_im(pc_start - s->cs_base);
7014 gen_helper_syscall(cpu_env, tcg_const_i32(s->pc - pc_start));
7015 gen_eob(s);
7016 break;
7017 case 0x107: /* sysret */
7018 if (!s->pe) {
7019 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7020 } else {
7021 gen_update_cc_op(s);
7022 gen_jmp_im(pc_start - s->cs_base);
7023 gen_helper_sysret(cpu_env, tcg_const_i32(s->dflag));
7024 /* condition codes are modified only in long mode */
7025 if (s->lma) {
7026 set_cc_op(s, CC_OP_EFLAGS);
7027 }
7028 gen_eob(s);
7029 }
7030 break;
7031 #endif
7032 case 0x1a2: /* cpuid */
7033 gen_update_cc_op(s);
7034 gen_jmp_im(pc_start - s->cs_base);
7035 gen_helper_cpuid(cpu_env);
7036 break;
7037 case 0xf4: /* hlt */
7038 if (s->cpl != 0) {
7039 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7040 } else {
7041 gen_update_cc_op(s);
7042 gen_jmp_im(pc_start - s->cs_base);
7043 gen_helper_hlt(cpu_env, tcg_const_i32(s->pc - pc_start));
7044 s->is_jmp = DISAS_TB_JUMP;
7045 }
7046 break;
7047 case 0x100:
7048 modrm = cpu_ldub_code(env, s->pc++);
7049 mod = (modrm >> 6) & 3;
7050 op = (modrm >> 3) & 7;
7051 switch(op) {
7052 case 0: /* sldt */
7053 if (!s->pe || s->vm86)
7054 goto illegal_op;
7055 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ);
7056 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,ldt.selector));
7057 ot = OT_WORD;
7058 if (mod == 3)
7059 ot += s->dflag;
7060 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
7061 break;
7062 case 2: /* lldt */
7063 if (!s->pe || s->vm86)
7064 goto illegal_op;
7065 if (s->cpl != 0) {
7066 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7067 } else {
7068 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE);
7069 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
7070 gen_jmp_im(pc_start - s->cs_base);
7071 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
7072 gen_helper_lldt(cpu_env, cpu_tmp2_i32);
7073 }
7074 break;
7075 case 1: /* str */
7076 if (!s->pe || s->vm86)
7077 goto illegal_op;
7078 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ);
7079 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,tr.selector));
7080 ot = OT_WORD;
7081 if (mod == 3)
7082 ot += s->dflag;
7083 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
7084 break;
7085 case 3: /* ltr */
7086 if (!s->pe || s->vm86)
7087 goto illegal_op;
7088 if (s->cpl != 0) {
7089 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7090 } else {
7091 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE);
7092 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
7093 gen_jmp_im(pc_start - s->cs_base);
7094 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
7095 gen_helper_ltr(cpu_env, cpu_tmp2_i32);
7096 }
7097 break;
7098 case 4: /* verr */
7099 case 5: /* verw */
7100 if (!s->pe || s->vm86)
7101 goto illegal_op;
7102 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
7103 gen_update_cc_op(s);
7104 if (op == 4) {
7105 gen_helper_verr(cpu_env, cpu_T[0]);
7106 } else {
7107 gen_helper_verw(cpu_env, cpu_T[0]);
7108 }
7109 set_cc_op(s, CC_OP_EFLAGS);
7110 break;
7111 default:
7112 goto illegal_op;
7113 }
7114 break;
7115 case 0x101:
7116 modrm = cpu_ldub_code(env, s->pc++);
7117 mod = (modrm >> 6) & 3;
7118 op = (modrm >> 3) & 7;
7119 rm = modrm & 7;
7120 switch(op) {
7121 case 0: /* sgdt */
7122 if (mod == 3)
7123 goto illegal_op;
7124 gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ);
7125 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7126 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.limit));
7127 gen_op_st_T0_A0(OT_WORD + s->mem_index);
7128 gen_add_A0_im(s, 2);
7129 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.base));
7130 if (!s->dflag)
7131 gen_op_andl_T0_im(0xffffff);
7132 gen_op_st_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
7133 break;
7134 case 1:
7135 if (mod == 3) {
7136 switch (rm) {
7137 case 0: /* monitor */
7138 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
7139 s->cpl != 0)
7140 goto illegal_op;
7141 gen_update_cc_op(s);
7142 gen_jmp_im(pc_start - s->cs_base);
7143 #ifdef TARGET_X86_64
7144 if (s->aflag == 2) {
7145 gen_op_movq_A0_reg(R_EAX);
7146 } else
7147 #endif
7148 {
7149 gen_op_movl_A0_reg(R_EAX);
7150 if (s->aflag == 0)
7151 gen_op_andl_A0_ffff();
7152 }
7153 gen_add_A0_ds_seg(s);
7154 gen_helper_monitor(cpu_env, cpu_A0);
7155 break;
7156 case 1: /* mwait */
7157 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
7158 s->cpl != 0)
7159 goto illegal_op;
7160 gen_update_cc_op(s);
7161 gen_jmp_im(pc_start - s->cs_base);
7162 gen_helper_mwait(cpu_env, tcg_const_i32(s->pc - pc_start));
7163 gen_eob(s);
7164 break;
7165 case 2: /* clac */
7166 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) ||
7167 s->cpl != 0) {
7168 goto illegal_op;
7169 }
7170 gen_helper_clac(cpu_env);
7171 gen_jmp_im(s->pc - s->cs_base);
7172 gen_eob(s);
7173 break;
7174 case 3: /* stac */
7175 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) ||
7176 s->cpl != 0) {
7177 goto illegal_op;
7178 }
7179 gen_helper_stac(cpu_env);
7180 gen_jmp_im(s->pc - s->cs_base);
7181 gen_eob(s);
7182 break;
7183 default:
7184 goto illegal_op;
7185 }
7186 } else { /* sidt */
7187 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ);
7188 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7189 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.limit));
7190 gen_op_st_T0_A0(OT_WORD + s->mem_index);
7191 gen_add_A0_im(s, 2);
7192 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.base));
7193 if (!s->dflag)
7194 gen_op_andl_T0_im(0xffffff);
7195 gen_op_st_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
7196 }
7197 break;
7198 case 2: /* lgdt */
7199 case 3: /* lidt */
7200 if (mod == 3) {
7201 gen_update_cc_op(s);
7202 gen_jmp_im(pc_start - s->cs_base);
7203 switch(rm) {
7204 case 0: /* VMRUN */
7205 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7206 goto illegal_op;
7207 if (s->cpl != 0) {
7208 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7209 break;
7210 } else {
7211 gen_helper_vmrun(cpu_env, tcg_const_i32(s->aflag),
7212 tcg_const_i32(s->pc - pc_start));
7213 tcg_gen_exit_tb(0);
7214 s->is_jmp = DISAS_TB_JUMP;
7215 }
7216 break;
7217 case 1: /* VMMCALL */
7218 if (!(s->flags & HF_SVME_MASK))
7219 goto illegal_op;
7220 gen_helper_vmmcall(cpu_env);
7221 break;
7222 case 2: /* VMLOAD */
7223 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7224 goto illegal_op;
7225 if (s->cpl != 0) {
7226 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7227 break;
7228 } else {
7229 gen_helper_vmload(cpu_env, tcg_const_i32(s->aflag));
7230 }
7231 break;
7232 case 3: /* VMSAVE */
7233 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7234 goto illegal_op;
7235 if (s->cpl != 0) {
7236 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7237 break;
7238 } else {
7239 gen_helper_vmsave(cpu_env, tcg_const_i32(s->aflag));
7240 }
7241 break;
7242 case 4: /* STGI */
7243 if ((!(s->flags & HF_SVME_MASK) &&
7244 !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
7245 !s->pe)
7246 goto illegal_op;
7247 if (s->cpl != 0) {
7248 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7249 break;
7250 } else {
7251 gen_helper_stgi(cpu_env);
7252 }
7253 break;
7254 case 5: /* CLGI */
7255 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7256 goto illegal_op;
7257 if (s->cpl != 0) {
7258 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7259 break;
7260 } else {
7261 gen_helper_clgi(cpu_env);
7262 }
7263 break;
7264 case 6: /* SKINIT */
7265 if ((!(s->flags & HF_SVME_MASK) &&
7266 !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
7267 !s->pe)
7268 goto illegal_op;
7269 gen_helper_skinit(cpu_env);
7270 break;
7271 case 7: /* INVLPGA */
7272 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7273 goto illegal_op;
7274 if (s->cpl != 0) {
7275 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7276 break;
7277 } else {
7278 gen_helper_invlpga(cpu_env, tcg_const_i32(s->aflag));
7279 }
7280 break;
7281 default:
7282 goto illegal_op;
7283 }
7284 } else if (s->cpl != 0) {
7285 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7286 } else {
7287 gen_svm_check_intercept(s, pc_start,
7288 op==2 ? SVM_EXIT_GDTR_WRITE : SVM_EXIT_IDTR_WRITE);
7289 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7290 gen_op_ld_T1_A0(OT_WORD + s->mem_index);
7291 gen_add_A0_im(s, 2);
7292 gen_op_ld_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
7293 if (!s->dflag)
7294 gen_op_andl_T0_im(0xffffff);
7295 if (op == 2) {
7296 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,gdt.base));
7297 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,gdt.limit));
7298 } else {
7299 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,idt.base));
7300 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,idt.limit));
7301 }
7302 }
7303 break;
7304 case 4: /* smsw */
7305 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0);
7306 #if defined TARGET_X86_64 && defined HOST_WORDS_BIGENDIAN
7307 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]) + 4);
7308 #else
7309 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]));
7310 #endif
7311 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 1);
7312 break;
7313 case 6: /* lmsw */
7314 if (s->cpl != 0) {
7315 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7316 } else {
7317 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
7318 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
7319 gen_helper_lmsw(cpu_env, cpu_T[0]);
7320 gen_jmp_im(s->pc - s->cs_base);
7321 gen_eob(s);
7322 }
7323 break;
7324 case 7:
7325 if (mod != 3) { /* invlpg */
7326 if (s->cpl != 0) {
7327 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7328 } else {
7329 gen_update_cc_op(s);
7330 gen_jmp_im(pc_start - s->cs_base);
7331 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7332 gen_helper_invlpg(cpu_env, cpu_A0);
7333 gen_jmp_im(s->pc - s->cs_base);
7334 gen_eob(s);
7335 }
7336 } else {
7337 switch (rm) {
7338 case 0: /* swapgs */
7339 #ifdef TARGET_X86_64
7340 if (CODE64(s)) {
7341 if (s->cpl != 0) {
7342 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7343 } else {
7344 tcg_gen_ld_tl(cpu_T[0], cpu_env,
7345 offsetof(CPUX86State,segs[R_GS].base));
7346 tcg_gen_ld_tl(cpu_T[1], cpu_env,
7347 offsetof(CPUX86State,kernelgsbase));
7348 tcg_gen_st_tl(cpu_T[1], cpu_env,
7349 offsetof(CPUX86State,segs[R_GS].base));
7350 tcg_gen_st_tl(cpu_T[0], cpu_env,
7351 offsetof(CPUX86State,kernelgsbase));
7352 }
7353 } else
7354 #endif
7355 {
7356 goto illegal_op;
7357 }
7358 break;
7359 case 1: /* rdtscp */
7360 if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP))
7361 goto illegal_op;
7362 gen_update_cc_op(s);
7363 gen_jmp_im(pc_start - s->cs_base);
7364 if (use_icount)
7365 gen_io_start();
7366 gen_helper_rdtscp(cpu_env);
7367 if (use_icount) {
7368 gen_io_end();
7369 gen_jmp(s, s->pc - s->cs_base);
7370 }
7371 break;
7372 default:
7373 goto illegal_op;
7374 }
7375 }
7376 break;
7377 default:
7378 goto illegal_op;
7379 }
7380 break;
7381 case 0x108: /* invd */
7382 case 0x109: /* wbinvd */
7383 if (s->cpl != 0) {
7384 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7385 } else {
7386 gen_svm_check_intercept(s, pc_start, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD);
7387 /* nothing to do */
7388 }
7389 break;
7390 case 0x63: /* arpl or movslS (x86_64) */
7391 #ifdef TARGET_X86_64
7392 if (CODE64(s)) {
7393 int d_ot;
7394 /* d_ot is the size of destination */
7395 d_ot = dflag + OT_WORD;
7396
7397 modrm = cpu_ldub_code(env, s->pc++);
7398 reg = ((modrm >> 3) & 7) | rex_r;
7399 mod = (modrm >> 6) & 3;
7400 rm = (modrm & 7) | REX_B(s);
7401
7402 if (mod == 3) {
7403 gen_op_mov_TN_reg(OT_LONG, 0, rm);
7404 /* sign extend */
7405 if (d_ot == OT_QUAD)
7406 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
7407 gen_op_mov_reg_T0(d_ot, reg);
7408 } else {
7409 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7410 if (d_ot == OT_QUAD) {
7411 gen_op_lds_T0_A0(OT_LONG + s->mem_index);
7412 } else {
7413 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
7414 }
7415 gen_op_mov_reg_T0(d_ot, reg);
7416 }
7417 } else
7418 #endif
7419 {
7420 int label1;
7421 TCGv t0, t1, t2, a0;
7422
7423 if (!s->pe || s->vm86)
7424 goto illegal_op;
7425 t0 = tcg_temp_local_new();
7426 t1 = tcg_temp_local_new();
7427 t2 = tcg_temp_local_new();
7428 ot = OT_WORD;
7429 modrm = cpu_ldub_code(env, s->pc++);
7430 reg = (modrm >> 3) & 7;
7431 mod = (modrm >> 6) & 3;
7432 rm = modrm & 7;
7433 if (mod != 3) {
7434 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7435 gen_op_ld_v(ot + s->mem_index, t0, cpu_A0);
7436 a0 = tcg_temp_local_new();
7437 tcg_gen_mov_tl(a0, cpu_A0);
7438 } else {
7439 gen_op_mov_v_reg(ot, t0, rm);
7440 TCGV_UNUSED(a0);
7441 }
7442 gen_op_mov_v_reg(ot, t1, reg);
7443 tcg_gen_andi_tl(cpu_tmp0, t0, 3);
7444 tcg_gen_andi_tl(t1, t1, 3);
7445 tcg_gen_movi_tl(t2, 0);
7446 label1 = gen_new_label();
7447 tcg_gen_brcond_tl(TCG_COND_GE, cpu_tmp0, t1, label1);
7448 tcg_gen_andi_tl(t0, t0, ~3);
7449 tcg_gen_or_tl(t0, t0, t1);
7450 tcg_gen_movi_tl(t2, CC_Z);
7451 gen_set_label(label1);
7452 if (mod != 3) {
7453 gen_op_st_v(ot + s->mem_index, t0, a0);
7454 tcg_temp_free(a0);
7455 } else {
7456 gen_op_mov_reg_v(ot, rm, t0);
7457 }
7458 gen_compute_eflags(s);
7459 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z);
7460 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2);
7461 tcg_temp_free(t0);
7462 tcg_temp_free(t1);
7463 tcg_temp_free(t2);
7464 }
7465 break;
7466 case 0x102: /* lar */
7467 case 0x103: /* lsl */
7468 {
7469 int label1;
7470 TCGv t0;
7471 if (!s->pe || s->vm86)
7472 goto illegal_op;
7473 ot = dflag ? OT_LONG : OT_WORD;
7474 modrm = cpu_ldub_code(env, s->pc++);
7475 reg = ((modrm >> 3) & 7) | rex_r;
7476 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
7477 t0 = tcg_temp_local_new();
7478 gen_update_cc_op(s);
7479 if (b == 0x102) {
7480 gen_helper_lar(t0, cpu_env, cpu_T[0]);
7481 } else {
7482 gen_helper_lsl(t0, cpu_env, cpu_T[0]);
7483 }
7484 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_src, CC_Z);
7485 label1 = gen_new_label();
7486 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
7487 gen_op_mov_reg_v(ot, reg, t0);
7488 gen_set_label(label1);
7489 set_cc_op(s, CC_OP_EFLAGS);
7490 tcg_temp_free(t0);
7491 }
7492 break;
7493 case 0x118:
7494 modrm = cpu_ldub_code(env, s->pc++);
7495 mod = (modrm >> 6) & 3;
7496 op = (modrm >> 3) & 7;
7497 switch(op) {
7498 case 0: /* prefetchnta */
7499 case 1: /* prefetchnt0 */
7500 case 2: /* prefetchnt0 */
7501 case 3: /* prefetchnt0 */
7502 if (mod == 3)
7503 goto illegal_op;
7504 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7505 /* nothing more to do */
7506 break;
7507 default: /* nop (multi byte) */
7508 gen_nop_modrm(env, s, modrm);
7509 break;
7510 }
7511 break;
7512 case 0x119 ... 0x11f: /* nop (multi byte) */
7513 modrm = cpu_ldub_code(env, s->pc++);
7514 gen_nop_modrm(env, s, modrm);
7515 break;
7516 case 0x120: /* mov reg, crN */
7517 case 0x122: /* mov crN, reg */
7518 if (s->cpl != 0) {
7519 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7520 } else {
7521 modrm = cpu_ldub_code(env, s->pc++);
7522 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7523 * AMD documentation (24594.pdf) and testing of
7524 * intel 386 and 486 processors all show that the mod bits
7525 * are assumed to be 1's, regardless of actual values.
7526 */
7527 rm = (modrm & 7) | REX_B(s);
7528 reg = ((modrm >> 3) & 7) | rex_r;
7529 if (CODE64(s))
7530 ot = OT_QUAD;
7531 else
7532 ot = OT_LONG;
7533 if ((prefixes & PREFIX_LOCK) && (reg == 0) &&
7534 (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) {
7535 reg = 8;
7536 }
7537 switch(reg) {
7538 case 0:
7539 case 2:
7540 case 3:
7541 case 4:
7542 case 8:
7543 gen_update_cc_op(s);
7544 gen_jmp_im(pc_start - s->cs_base);
7545 if (b & 2) {
7546 gen_op_mov_TN_reg(ot, 0, rm);
7547 gen_helper_write_crN(cpu_env, tcg_const_i32(reg),
7548 cpu_T[0]);
7549 gen_jmp_im(s->pc - s->cs_base);
7550 gen_eob(s);
7551 } else {
7552 gen_helper_read_crN(cpu_T[0], cpu_env, tcg_const_i32(reg));
7553 gen_op_mov_reg_T0(ot, rm);
7554 }
7555 break;
7556 default:
7557 goto illegal_op;
7558 }
7559 }
7560 break;
7561 case 0x121: /* mov reg, drN */
7562 case 0x123: /* mov drN, reg */
7563 if (s->cpl != 0) {
7564 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7565 } else {
7566 modrm = cpu_ldub_code(env, s->pc++);
7567 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7568 * AMD documentation (24594.pdf) and testing of
7569 * intel 386 and 486 processors all show that the mod bits
7570 * are assumed to be 1's, regardless of actual values.
7571 */
7572 rm = (modrm & 7) | REX_B(s);
7573 reg = ((modrm >> 3) & 7) | rex_r;
7574 if (CODE64(s))
7575 ot = OT_QUAD;
7576 else
7577 ot = OT_LONG;
7578 /* XXX: do it dynamically with CR4.DE bit */
7579 if (reg == 4 || reg == 5 || reg >= 8)
7580 goto illegal_op;
7581 if (b & 2) {
7582 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg);
7583 gen_op_mov_TN_reg(ot, 0, rm);
7584 gen_helper_movl_drN_T0(cpu_env, tcg_const_i32(reg), cpu_T[0]);
7585 gen_jmp_im(s->pc - s->cs_base);
7586 gen_eob(s);
7587 } else {
7588 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg);
7589 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,dr[reg]));
7590 gen_op_mov_reg_T0(ot, rm);
7591 }
7592 }
7593 break;
7594 case 0x106: /* clts */
7595 if (s->cpl != 0) {
7596 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7597 } else {
7598 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
7599 gen_helper_clts(cpu_env);
7600 /* abort block because static cpu state changed */
7601 gen_jmp_im(s->pc - s->cs_base);
7602 gen_eob(s);
7603 }
7604 break;
7605 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
7606 case 0x1c3: /* MOVNTI reg, mem */
7607 if (!(s->cpuid_features & CPUID_SSE2))
7608 goto illegal_op;
7609 ot = s->dflag == 2 ? OT_QUAD : OT_LONG;
7610 modrm = cpu_ldub_code(env, s->pc++);
7611 mod = (modrm >> 6) & 3;
7612 if (mod == 3)
7613 goto illegal_op;
7614 reg = ((modrm >> 3) & 7) | rex_r;
7615 /* generate a generic store */
7616 gen_ldst_modrm(env, s, modrm, ot, reg, 1);
7617 break;
7618 case 0x1ae:
7619 modrm = cpu_ldub_code(env, s->pc++);
7620 mod = (modrm >> 6) & 3;
7621 op = (modrm >> 3) & 7;
7622 switch(op) {
7623 case 0: /* fxsave */
7624 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
7625 (s->prefix & PREFIX_LOCK))
7626 goto illegal_op;
7627 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
7628 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7629 break;
7630 }
7631 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7632 gen_update_cc_op(s);
7633 gen_jmp_im(pc_start - s->cs_base);
7634 gen_helper_fxsave(cpu_env, cpu_A0, tcg_const_i32((s->dflag == 2)));
7635 break;
7636 case 1: /* fxrstor */
7637 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
7638 (s->prefix & PREFIX_LOCK))
7639 goto illegal_op;
7640 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
7641 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7642 break;
7643 }
7644 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7645 gen_update_cc_op(s);
7646 gen_jmp_im(pc_start - s->cs_base);
7647 gen_helper_fxrstor(cpu_env, cpu_A0,
7648 tcg_const_i32((s->dflag == 2)));
7649 break;
7650 case 2: /* ldmxcsr */
7651 case 3: /* stmxcsr */
7652 if (s->flags & HF_TS_MASK) {
7653 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7654 break;
7655 }
7656 if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK) ||
7657 mod == 3)
7658 goto illegal_op;
7659 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7660 if (op == 2) {
7661 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
7662 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
7663 gen_helper_ldmxcsr(cpu_env, cpu_tmp2_i32);
7664 } else {
7665 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, mxcsr));
7666 gen_op_st_T0_A0(OT_LONG + s->mem_index);
7667 }
7668 break;
7669 case 5: /* lfence */
7670 case 6: /* mfence */
7671 if ((modrm & 0xc7) != 0xc0 || !(s->cpuid_features & CPUID_SSE2))
7672 goto illegal_op;
7673 break;
7674 case 7: /* sfence / clflush */
7675 if ((modrm & 0xc7) == 0xc0) {
7676 /* sfence */
7677 /* XXX: also check for cpuid_ext2_features & CPUID_EXT2_EMMX */
7678 if (!(s->cpuid_features & CPUID_SSE))
7679 goto illegal_op;
7680 } else {
7681 /* clflush */
7682 if (!(s->cpuid_features & CPUID_CLFLUSH))
7683 goto illegal_op;
7684 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7685 }
7686 break;
7687 default:
7688 goto illegal_op;
7689 }
7690 break;
7691 case 0x10d: /* 3DNow! prefetch(w) */
7692 modrm = cpu_ldub_code(env, s->pc++);
7693 mod = (modrm >> 6) & 3;
7694 if (mod == 3)
7695 goto illegal_op;
7696 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7697 /* ignore for now */
7698 break;
7699 case 0x1aa: /* rsm */
7700 gen_svm_check_intercept(s, pc_start, SVM_EXIT_RSM);
7701 if (!(s->flags & HF_SMM_MASK))
7702 goto illegal_op;
7703 gen_update_cc_op(s);
7704 gen_jmp_im(s->pc - s->cs_base);
7705 gen_helper_rsm(cpu_env);
7706 gen_eob(s);
7707 break;
7708 case 0x1b8: /* SSE4.2 popcnt */
7709 if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) !=
7710 PREFIX_REPZ)
7711 goto illegal_op;
7712 if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
7713 goto illegal_op;
7714
7715 modrm = cpu_ldub_code(env, s->pc++);
7716 reg = ((modrm >> 3) & 7) | rex_r;
7717
7718 if (s->prefix & PREFIX_DATA)
7719 ot = OT_WORD;
7720 else if (s->dflag != 2)
7721 ot = OT_LONG;
7722 else
7723 ot = OT_QUAD;
7724
7725 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
7726 gen_helper_popcnt(cpu_T[0], cpu_env, cpu_T[0], tcg_const_i32(ot));
7727 gen_op_mov_reg_T0(ot, reg);
7728
7729 set_cc_op(s, CC_OP_EFLAGS);
7730 break;
7731 case 0x10e ... 0x10f:
7732 /* 3DNow! instructions, ignore prefixes */
7733 s->prefix &= ~(PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA);
7734 case 0x110 ... 0x117:
7735 case 0x128 ... 0x12f:
7736 case 0x138 ... 0x13a:
7737 case 0x150 ... 0x179:
7738 case 0x17c ... 0x17f:
7739 case 0x1c2:
7740 case 0x1c4 ... 0x1c6:
7741 case 0x1d0 ... 0x1fe:
7742 gen_sse(env, s, b, pc_start, rex_r);
7743 break;
7744 default:
7745 goto illegal_op;
7746 }
7747 /* lock generation */
7748 if (s->prefix & PREFIX_LOCK)
7749 gen_helper_unlock();
7750 return s->pc;
7751 illegal_op:
7752 if (s->prefix & PREFIX_LOCK)
7753 gen_helper_unlock();
7754 /* XXX: ensure that no lock was generated */
7755 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
7756 return s->pc;
7757 }
7758
7759 void optimize_flags_init(void)
7760 {
7761 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
7762 cpu_cc_op = tcg_global_mem_new_i32(TCG_AREG0,
7763 offsetof(CPUX86State, cc_op), "cc_op");
7764 cpu_cc_dst = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_dst),
7765 "cc_dst");
7766 cpu_cc_src = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_src),
7767 "cc_src");
7768
7769 #ifdef TARGET_X86_64
7770 cpu_regs[R_EAX] = tcg_global_mem_new_i64(TCG_AREG0,
7771 offsetof(CPUX86State, regs[R_EAX]), "rax");
7772 cpu_regs[R_ECX] = tcg_global_mem_new_i64(TCG_AREG0,
7773 offsetof(CPUX86State, regs[R_ECX]), "rcx");
7774 cpu_regs[R_EDX] = tcg_global_mem_new_i64(TCG_AREG0,
7775 offsetof(CPUX86State, regs[R_EDX]), "rdx");
7776 cpu_regs[R_EBX] = tcg_global_mem_new_i64(TCG_AREG0,
7777 offsetof(CPUX86State, regs[R_EBX]), "rbx");
7778 cpu_regs[R_ESP] = tcg_global_mem_new_i64(TCG_AREG0,
7779 offsetof(CPUX86State, regs[R_ESP]), "rsp");
7780 cpu_regs[R_EBP] = tcg_global_mem_new_i64(TCG_AREG0,
7781 offsetof(CPUX86State, regs[R_EBP]), "rbp");
7782 cpu_regs[R_ESI] = tcg_global_mem_new_i64(TCG_AREG0,
7783 offsetof(CPUX86State, regs[R_ESI]), "rsi");
7784 cpu_regs[R_EDI] = tcg_global_mem_new_i64(TCG_AREG0,
7785 offsetof(CPUX86State, regs[R_EDI]), "rdi");
7786 cpu_regs[8] = tcg_global_mem_new_i64(TCG_AREG0,
7787 offsetof(CPUX86State, regs[8]), "r8");
7788 cpu_regs[9] = tcg_global_mem_new_i64(TCG_AREG0,
7789 offsetof(CPUX86State, regs[9]), "r9");
7790 cpu_regs[10] = tcg_global_mem_new_i64(TCG_AREG0,
7791 offsetof(CPUX86State, regs[10]), "r10");
7792 cpu_regs[11] = tcg_global_mem_new_i64(TCG_AREG0,
7793 offsetof(CPUX86State, regs[11]), "r11");
7794 cpu_regs[12] = tcg_global_mem_new_i64(TCG_AREG0,
7795 offsetof(CPUX86State, regs[12]), "r12");
7796 cpu_regs[13] = tcg_global_mem_new_i64(TCG_AREG0,
7797 offsetof(CPUX86State, regs[13]), "r13");
7798 cpu_regs[14] = tcg_global_mem_new_i64(TCG_AREG0,
7799 offsetof(CPUX86State, regs[14]), "r14");
7800 cpu_regs[15] = tcg_global_mem_new_i64(TCG_AREG0,
7801 offsetof(CPUX86State, regs[15]), "r15");
7802 #else
7803 cpu_regs[R_EAX] = tcg_global_mem_new_i32(TCG_AREG0,
7804 offsetof(CPUX86State, regs[R_EAX]), "eax");
7805 cpu_regs[R_ECX] = tcg_global_mem_new_i32(TCG_AREG0,
7806 offsetof(CPUX86State, regs[R_ECX]), "ecx");
7807 cpu_regs[R_EDX] = tcg_global_mem_new_i32(TCG_AREG0,
7808 offsetof(CPUX86State, regs[R_EDX]), "edx");
7809 cpu_regs[R_EBX] = tcg_global_mem_new_i32(TCG_AREG0,
7810 offsetof(CPUX86State, regs[R_EBX]), "ebx");
7811 cpu_regs[R_ESP] = tcg_global_mem_new_i32(TCG_AREG0,
7812 offsetof(CPUX86State, regs[R_ESP]), "esp");
7813 cpu_regs[R_EBP] = tcg_global_mem_new_i32(TCG_AREG0,
7814 offsetof(CPUX86State, regs[R_EBP]), "ebp");
7815 cpu_regs[R_ESI] = tcg_global_mem_new_i32(TCG_AREG0,
7816 offsetof(CPUX86State, regs[R_ESI]), "esi");
7817 cpu_regs[R_EDI] = tcg_global_mem_new_i32(TCG_AREG0,
7818 offsetof(CPUX86State, regs[R_EDI]), "edi");
7819 #endif
7820
7821 /* register helpers */
7822 #define GEN_HELPER 2
7823 #include "helper.h"
7824 }
7825
7826 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
7827 basic block 'tb'. If search_pc is TRUE, also generate PC
7828 information for each intermediate instruction. */
7829 static inline void gen_intermediate_code_internal(CPUX86State *env,
7830 TranslationBlock *tb,
7831 int search_pc)
7832 {
7833 DisasContext dc1, *dc = &dc1;
7834 target_ulong pc_ptr;
7835 uint16_t *gen_opc_end;
7836 CPUBreakpoint *bp;
7837 int j, lj;
7838 uint64_t flags;
7839 target_ulong pc_start;
7840 target_ulong cs_base;
7841 int num_insns;
7842 int max_insns;
7843
7844 /* generate intermediate code */
7845 pc_start = tb->pc;
7846 cs_base = tb->cs_base;
7847 flags = tb->flags;
7848
7849 dc->pe = (flags >> HF_PE_SHIFT) & 1;
7850 dc->code32 = (flags >> HF_CS32_SHIFT) & 1;
7851 dc->ss32 = (flags >> HF_SS32_SHIFT) & 1;
7852 dc->addseg = (flags >> HF_ADDSEG_SHIFT) & 1;
7853 dc->f_st = 0;
7854 dc->vm86 = (flags >> VM_SHIFT) & 1;
7855 dc->cpl = (flags >> HF_CPL_SHIFT) & 3;
7856 dc->iopl = (flags >> IOPL_SHIFT) & 3;
7857 dc->tf = (flags >> TF_SHIFT) & 1;
7858 dc->singlestep_enabled = env->singlestep_enabled;
7859 dc->cc_op = CC_OP_DYNAMIC;
7860 dc->cc_op_dirty = false;
7861 dc->cs_base = cs_base;
7862 dc->tb = tb;
7863 dc->popl_esp_hack = 0;
7864 /* select memory access functions */
7865 dc->mem_index = 0;
7866 if (flags & HF_SOFTMMU_MASK) {
7867 dc->mem_index = (cpu_mmu_index(env) + 1) << 2;
7868 }
7869 dc->cpuid_features = env->cpuid_features;
7870 dc->cpuid_ext_features = env->cpuid_ext_features;
7871 dc->cpuid_ext2_features = env->cpuid_ext2_features;
7872 dc->cpuid_ext3_features = env->cpuid_ext3_features;
7873 dc->cpuid_7_0_ebx_features = env->cpuid_7_0_ebx_features;
7874 #ifdef TARGET_X86_64
7875 dc->lma = (flags >> HF_LMA_SHIFT) & 1;
7876 dc->code64 = (flags >> HF_CS64_SHIFT) & 1;
7877 #endif
7878 dc->flags = flags;
7879 dc->jmp_opt = !(dc->tf || env->singlestep_enabled ||
7880 (flags & HF_INHIBIT_IRQ_MASK)
7881 #ifndef CONFIG_SOFTMMU
7882 || (flags & HF_SOFTMMU_MASK)
7883 #endif
7884 );
7885 #if 0
7886 /* check addseg logic */
7887 if (!dc->addseg && (dc->vm86 || !dc->pe || !dc->code32))
7888 printf("ERROR addseg\n");
7889 #endif
7890
7891 cpu_T[0] = tcg_temp_new();
7892 cpu_T[1] = tcg_temp_new();
7893 cpu_A0 = tcg_temp_new();
7894
7895 cpu_tmp0 = tcg_temp_new();
7896 cpu_tmp1_i64 = tcg_temp_new_i64();
7897 cpu_tmp2_i32 = tcg_temp_new_i32();
7898 cpu_tmp3_i32 = tcg_temp_new_i32();
7899 cpu_tmp4 = tcg_temp_new();
7900 cpu_tmp5 = tcg_temp_new();
7901 cpu_ptr0 = tcg_temp_new_ptr();
7902 cpu_ptr1 = tcg_temp_new_ptr();
7903 cpu_cc_srcT = tcg_temp_local_new();
7904
7905 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
7906
7907 dc->is_jmp = DISAS_NEXT;
7908 pc_ptr = pc_start;
7909 lj = -1;
7910 num_insns = 0;
7911 max_insns = tb->cflags & CF_COUNT_MASK;
7912 if (max_insns == 0)
7913 max_insns = CF_COUNT_MASK;
7914
7915 gen_icount_start();
7916 for(;;) {
7917 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
7918 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
7919 if (bp->pc == pc_ptr &&
7920 !((bp->flags & BP_CPU) && (tb->flags & HF_RF_MASK))) {
7921 gen_debug(dc, pc_ptr - dc->cs_base);
7922 break;
7923 }
7924 }
7925 }
7926 if (search_pc) {
7927 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
7928 if (lj < j) {
7929 lj++;
7930 while (lj < j)
7931 tcg_ctx.gen_opc_instr_start[lj++] = 0;
7932 }
7933 tcg_ctx.gen_opc_pc[lj] = pc_ptr;
7934 gen_opc_cc_op[lj] = dc->cc_op;
7935 tcg_ctx.gen_opc_instr_start[lj] = 1;
7936 tcg_ctx.gen_opc_icount[lj] = num_insns;
7937 }
7938 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
7939 gen_io_start();
7940
7941 pc_ptr = disas_insn(env, dc, pc_ptr);
7942 num_insns++;
7943 /* stop translation if indicated */
7944 if (dc->is_jmp)
7945 break;
7946 /* if single step mode, we generate only one instruction and
7947 generate an exception */
7948 /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
7949 the flag and abort the translation to give the irqs a
7950 change to be happen */
7951 if (dc->tf || dc->singlestep_enabled ||
7952 (flags & HF_INHIBIT_IRQ_MASK)) {
7953 gen_jmp_im(pc_ptr - dc->cs_base);
7954 gen_eob(dc);
7955 break;
7956 }
7957 /* if too long translation, stop generation too */
7958 if (tcg_ctx.gen_opc_ptr >= gen_opc_end ||
7959 (pc_ptr - pc_start) >= (TARGET_PAGE_SIZE - 32) ||
7960 num_insns >= max_insns) {
7961 gen_jmp_im(pc_ptr - dc->cs_base);
7962 gen_eob(dc);
7963 break;
7964 }
7965 if (singlestep) {
7966 gen_jmp_im(pc_ptr - dc->cs_base);
7967 gen_eob(dc);
7968 break;
7969 }
7970 }
7971 if (tb->cflags & CF_LAST_IO)
7972 gen_io_end();
7973 gen_icount_end(tb, num_insns);
7974 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
7975 /* we don't forget to fill the last values */
7976 if (search_pc) {
7977 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
7978 lj++;
7979 while (lj <= j)
7980 tcg_ctx.gen_opc_instr_start[lj++] = 0;
7981 }
7982
7983 #ifdef DEBUG_DISAS
7984 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
7985 int disas_flags;
7986 qemu_log("----------------\n");
7987 qemu_log("IN: %s\n", lookup_symbol(pc_start));
7988 #ifdef TARGET_X86_64
7989 if (dc->code64)
7990 disas_flags = 2;
7991 else
7992 #endif
7993 disas_flags = !dc->code32;
7994 log_target_disas(env, pc_start, pc_ptr - pc_start, disas_flags);
7995 qemu_log("\n");
7996 }
7997 #endif
7998
7999 if (!search_pc) {
8000 tb->size = pc_ptr - pc_start;
8001 tb->icount = num_insns;
8002 }
8003 }
8004
8005 void gen_intermediate_code(CPUX86State *env, TranslationBlock *tb)
8006 {
8007 gen_intermediate_code_internal(env, tb, 0);
8008 }
8009
8010 void gen_intermediate_code_pc(CPUX86State *env, TranslationBlock *tb)
8011 {
8012 gen_intermediate_code_internal(env, tb, 1);
8013 }
8014
8015 void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb, int pc_pos)
8016 {
8017 int cc_op;
8018 #ifdef DEBUG_DISAS
8019 if (qemu_loglevel_mask(CPU_LOG_TB_OP)) {
8020 int i;
8021 qemu_log("RESTORE:\n");
8022 for(i = 0;i <= pc_pos; i++) {
8023 if (tcg_ctx.gen_opc_instr_start[i]) {
8024 qemu_log("0x%04x: " TARGET_FMT_lx "\n", i,
8025 tcg_ctx.gen_opc_pc[i]);
8026 }
8027 }
8028 qemu_log("pc_pos=0x%x eip=" TARGET_FMT_lx " cs_base=%x\n",
8029 pc_pos, tcg_ctx.gen_opc_pc[pc_pos] - tb->cs_base,
8030 (uint32_t)tb->cs_base);
8031 }
8032 #endif
8033 env->eip = tcg_ctx.gen_opc_pc[pc_pos] - tb->cs_base;
8034 cc_op = gen_opc_cc_op[pc_pos];
8035 if (cc_op != CC_OP_DYNAMIC)
8036 env->cc_op = cc_op;
8037 }