]> git.proxmox.com Git - qemu.git/blob - target-i386/translate.c
target-i386: Make helper_cc_compute_{all,c} const
[qemu.git] / target-i386 / translate.c
1 /*
2 * i386 translation
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include <stdarg.h>
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <string.h>
23 #include <inttypes.h>
24 #include <signal.h>
25
26 #include "qemu/host-utils.h"
27 #include "cpu.h"
28 #include "disas/disas.h"
29 #include "tcg-op.h"
30
31 #include "helper.h"
32 #define GEN_HELPER 1
33 #include "helper.h"
34
35 #define PREFIX_REPZ 0x01
36 #define PREFIX_REPNZ 0x02
37 #define PREFIX_LOCK 0x04
38 #define PREFIX_DATA 0x08
39 #define PREFIX_ADR 0x10
40
41 #ifdef TARGET_X86_64
42 #define CODE64(s) ((s)->code64)
43 #define REX_X(s) ((s)->rex_x)
44 #define REX_B(s) ((s)->rex_b)
45 #else
46 #define CODE64(s) 0
47 #define REX_X(s) 0
48 #define REX_B(s) 0
49 #endif
50
51 #ifdef TARGET_X86_64
52 # define ctztl ctz64
53 # define clztl clz64
54 #else
55 # define ctztl ctz32
56 # define clztl clz32
57 #endif
58
59 //#define MACRO_TEST 1
60
61 /* global register indexes */
62 static TCGv_ptr cpu_env;
63 static TCGv cpu_A0;
64 static TCGv cpu_cc_src, cpu_cc_dst, cpu_cc_srcT;
65 static TCGv_i32 cpu_cc_op;
66 static TCGv cpu_regs[CPU_NB_REGS];
67 /* local temps */
68 static TCGv cpu_T[2];
69 /* local register indexes (only used inside old micro ops) */
70 static TCGv cpu_tmp0, cpu_tmp4;
71 static TCGv_ptr cpu_ptr0, cpu_ptr1;
72 static TCGv_i32 cpu_tmp2_i32, cpu_tmp3_i32;
73 static TCGv_i64 cpu_tmp1_i64;
74 static TCGv cpu_tmp5;
75
76 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
77
78 #include "exec/gen-icount.h"
79
80 #ifdef TARGET_X86_64
81 static int x86_64_hregs;
82 #endif
83
84 typedef struct DisasContext {
85 /* current insn context */
86 int override; /* -1 if no override */
87 int prefix;
88 int aflag, dflag;
89 target_ulong pc; /* pc = eip + cs_base */
90 int is_jmp; /* 1 = means jump (stop translation), 2 means CPU
91 static state change (stop translation) */
92 /* current block context */
93 target_ulong cs_base; /* base of CS segment */
94 int pe; /* protected mode */
95 int code32; /* 32 bit code segment */
96 #ifdef TARGET_X86_64
97 int lma; /* long mode active */
98 int code64; /* 64 bit code segment */
99 int rex_x, rex_b;
100 #endif
101 int ss32; /* 32 bit stack segment */
102 CCOp cc_op; /* current CC operation */
103 bool cc_op_dirty;
104 int addseg; /* non zero if either DS/ES/SS have a non zero base */
105 int f_st; /* currently unused */
106 int vm86; /* vm86 mode */
107 int cpl;
108 int iopl;
109 int tf; /* TF cpu flag */
110 int singlestep_enabled; /* "hardware" single step enabled */
111 int jmp_opt; /* use direct block chaining for direct jumps */
112 int mem_index; /* select memory access functions */
113 uint64_t flags; /* all execution flags */
114 struct TranslationBlock *tb;
115 int popl_esp_hack; /* for correct popl with esp base handling */
116 int rip_offset; /* only used in x86_64, but left for simplicity */
117 int cpuid_features;
118 int cpuid_ext_features;
119 int cpuid_ext2_features;
120 int cpuid_ext3_features;
121 int cpuid_7_0_ebx_features;
122 } DisasContext;
123
124 static void gen_eob(DisasContext *s);
125 static void gen_jmp(DisasContext *s, target_ulong eip);
126 static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num);
127 static void gen_op(DisasContext *s1, int op, int ot, int d);
128
129 /* i386 arith/logic operations */
130 enum {
131 OP_ADDL,
132 OP_ORL,
133 OP_ADCL,
134 OP_SBBL,
135 OP_ANDL,
136 OP_SUBL,
137 OP_XORL,
138 OP_CMPL,
139 };
140
141 /* i386 shift ops */
142 enum {
143 OP_ROL,
144 OP_ROR,
145 OP_RCL,
146 OP_RCR,
147 OP_SHL,
148 OP_SHR,
149 OP_SHL1, /* undocumented */
150 OP_SAR = 7,
151 };
152
153 enum {
154 JCC_O,
155 JCC_B,
156 JCC_Z,
157 JCC_BE,
158 JCC_S,
159 JCC_P,
160 JCC_L,
161 JCC_LE,
162 };
163
164 /* operand size */
165 enum {
166 OT_BYTE = 0,
167 OT_WORD,
168 OT_LONG,
169 OT_QUAD,
170 };
171
172 enum {
173 /* I386 int registers */
174 OR_EAX, /* MUST be even numbered */
175 OR_ECX,
176 OR_EDX,
177 OR_EBX,
178 OR_ESP,
179 OR_EBP,
180 OR_ESI,
181 OR_EDI,
182
183 OR_TMP0 = 16, /* temporary operand register */
184 OR_TMP1,
185 OR_A0, /* temporary register used when doing address evaluation */
186 };
187
188 enum {
189 USES_CC_DST = 1,
190 USES_CC_SRC = 2,
191 USES_CC_SRCT = 4,
192 };
193
194 /* Bit set if the global variable is live after setting CC_OP to X. */
195 static const uint8_t cc_op_live[CC_OP_NB] = {
196 [CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC,
197 [CC_OP_EFLAGS] = USES_CC_SRC,
198 [CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC,
199 [CC_OP_ADDB ... CC_OP_ADDQ] = USES_CC_DST | USES_CC_SRC,
200 [CC_OP_ADCB ... CC_OP_ADCQ] = USES_CC_DST | USES_CC_SRC,
201 [CC_OP_SUBB ... CC_OP_SUBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT,
202 [CC_OP_SBBB ... CC_OP_SBBQ] = USES_CC_DST | USES_CC_SRC,
203 [CC_OP_LOGICB ... CC_OP_LOGICQ] = USES_CC_DST,
204 [CC_OP_INCB ... CC_OP_INCQ] = USES_CC_DST | USES_CC_SRC,
205 [CC_OP_DECB ... CC_OP_DECQ] = USES_CC_DST | USES_CC_SRC,
206 [CC_OP_SHLB ... CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC,
207 [CC_OP_SARB ... CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC,
208 };
209
210 static void set_cc_op(DisasContext *s, CCOp op)
211 {
212 int dead;
213
214 if (s->cc_op == op) {
215 return;
216 }
217
218 /* Discard CC computation that will no longer be used. */
219 dead = cc_op_live[s->cc_op] & ~cc_op_live[op];
220 if (dead & USES_CC_DST) {
221 tcg_gen_discard_tl(cpu_cc_dst);
222 }
223 if (dead & USES_CC_SRC) {
224 tcg_gen_discard_tl(cpu_cc_src);
225 }
226 if (dead & USES_CC_SRCT) {
227 tcg_gen_discard_tl(cpu_cc_srcT);
228 }
229
230 s->cc_op = op;
231 /* The DYNAMIC setting is translator only, and should never be
232 stored. Thus we always consider it clean. */
233 s->cc_op_dirty = (op != CC_OP_DYNAMIC);
234 }
235
236 static void gen_update_cc_op(DisasContext *s)
237 {
238 if (s->cc_op_dirty) {
239 tcg_gen_movi_i32(cpu_cc_op, s->cc_op);
240 s->cc_op_dirty = false;
241 }
242 }
243
244 static inline void gen_op_movl_T0_0(void)
245 {
246 tcg_gen_movi_tl(cpu_T[0], 0);
247 }
248
249 static inline void gen_op_movl_T0_im(int32_t val)
250 {
251 tcg_gen_movi_tl(cpu_T[0], val);
252 }
253
254 static inline void gen_op_movl_T0_imu(uint32_t val)
255 {
256 tcg_gen_movi_tl(cpu_T[0], val);
257 }
258
259 static inline void gen_op_movl_T1_im(int32_t val)
260 {
261 tcg_gen_movi_tl(cpu_T[1], val);
262 }
263
264 static inline void gen_op_movl_T1_imu(uint32_t val)
265 {
266 tcg_gen_movi_tl(cpu_T[1], val);
267 }
268
269 static inline void gen_op_movl_A0_im(uint32_t val)
270 {
271 tcg_gen_movi_tl(cpu_A0, val);
272 }
273
274 #ifdef TARGET_X86_64
275 static inline void gen_op_movq_A0_im(int64_t val)
276 {
277 tcg_gen_movi_tl(cpu_A0, val);
278 }
279 #endif
280
281 static inline void gen_movtl_T0_im(target_ulong val)
282 {
283 tcg_gen_movi_tl(cpu_T[0], val);
284 }
285
286 static inline void gen_movtl_T1_im(target_ulong val)
287 {
288 tcg_gen_movi_tl(cpu_T[1], val);
289 }
290
291 static inline void gen_op_andl_T0_ffff(void)
292 {
293 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
294 }
295
296 static inline void gen_op_andl_T0_im(uint32_t val)
297 {
298 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], val);
299 }
300
301 static inline void gen_op_movl_T0_T1(void)
302 {
303 tcg_gen_mov_tl(cpu_T[0], cpu_T[1]);
304 }
305
306 static inline void gen_op_andl_A0_ffff(void)
307 {
308 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffff);
309 }
310
311 #ifdef TARGET_X86_64
312
313 #define NB_OP_SIZES 4
314
315 #else /* !TARGET_X86_64 */
316
317 #define NB_OP_SIZES 3
318
319 #endif /* !TARGET_X86_64 */
320
321 #if defined(HOST_WORDS_BIGENDIAN)
322 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
323 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
324 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
325 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
326 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
327 #else
328 #define REG_B_OFFSET 0
329 #define REG_H_OFFSET 1
330 #define REG_W_OFFSET 0
331 #define REG_L_OFFSET 0
332 #define REG_LH_OFFSET 4
333 #endif
334
335 /* In instruction encodings for byte register accesses the
336 * register number usually indicates "low 8 bits of register N";
337 * however there are some special cases where N 4..7 indicates
338 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
339 * true for this special case, false otherwise.
340 */
341 static inline bool byte_reg_is_xH(int reg)
342 {
343 if (reg < 4) {
344 return false;
345 }
346 #ifdef TARGET_X86_64
347 if (reg >= 8 || x86_64_hregs) {
348 return false;
349 }
350 #endif
351 return true;
352 }
353
354 static inline void gen_op_mov_reg_v(int ot, int reg, TCGv t0)
355 {
356 switch(ot) {
357 case OT_BYTE:
358 if (!byte_reg_is_xH(reg)) {
359 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 8);
360 } else {
361 tcg_gen_deposit_tl(cpu_regs[reg - 4], cpu_regs[reg - 4], t0, 8, 8);
362 }
363 break;
364 case OT_WORD:
365 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 16);
366 break;
367 default: /* XXX this shouldn't be reached; abort? */
368 case OT_LONG:
369 /* For x86_64, this sets the higher half of register to zero.
370 For i386, this is equivalent to a mov. */
371 tcg_gen_ext32u_tl(cpu_regs[reg], t0);
372 break;
373 #ifdef TARGET_X86_64
374 case OT_QUAD:
375 tcg_gen_mov_tl(cpu_regs[reg], t0);
376 break;
377 #endif
378 }
379 }
380
381 static inline void gen_op_mov_reg_T0(int ot, int reg)
382 {
383 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
384 }
385
386 static inline void gen_op_mov_reg_T1(int ot, int reg)
387 {
388 gen_op_mov_reg_v(ot, reg, cpu_T[1]);
389 }
390
391 static inline void gen_op_mov_reg_A0(int size, int reg)
392 {
393 switch(size) {
394 case OT_BYTE:
395 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], cpu_A0, 0, 16);
396 break;
397 default: /* XXX this shouldn't be reached; abort? */
398 case OT_WORD:
399 /* For x86_64, this sets the higher half of register to zero.
400 For i386, this is equivalent to a mov. */
401 tcg_gen_ext32u_tl(cpu_regs[reg], cpu_A0);
402 break;
403 #ifdef TARGET_X86_64
404 case OT_LONG:
405 tcg_gen_mov_tl(cpu_regs[reg], cpu_A0);
406 break;
407 #endif
408 }
409 }
410
411 static inline void gen_op_mov_v_reg(int ot, TCGv t0, int reg)
412 {
413 if (ot == OT_BYTE && byte_reg_is_xH(reg)) {
414 tcg_gen_shri_tl(t0, cpu_regs[reg - 4], 8);
415 tcg_gen_ext8u_tl(t0, t0);
416 } else {
417 tcg_gen_mov_tl(t0, cpu_regs[reg]);
418 }
419 }
420
421 static inline void gen_op_mov_TN_reg(int ot, int t_index, int reg)
422 {
423 gen_op_mov_v_reg(ot, cpu_T[t_index], reg);
424 }
425
426 static inline void gen_op_movl_A0_reg(int reg)
427 {
428 tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]);
429 }
430
431 static inline void gen_op_addl_A0_im(int32_t val)
432 {
433 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
434 #ifdef TARGET_X86_64
435 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
436 #endif
437 }
438
439 #ifdef TARGET_X86_64
440 static inline void gen_op_addq_A0_im(int64_t val)
441 {
442 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
443 }
444 #endif
445
446 static void gen_add_A0_im(DisasContext *s, int val)
447 {
448 #ifdef TARGET_X86_64
449 if (CODE64(s))
450 gen_op_addq_A0_im(val);
451 else
452 #endif
453 gen_op_addl_A0_im(val);
454 }
455
456 static inline void gen_op_addl_T0_T1(void)
457 {
458 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
459 }
460
461 static inline void gen_op_jmp_T0(void)
462 {
463 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, eip));
464 }
465
466 static inline void gen_op_add_reg_im(int size, int reg, int32_t val)
467 {
468 switch(size) {
469 case OT_BYTE:
470 tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val);
471 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], cpu_tmp0, 0, 16);
472 break;
473 case OT_WORD:
474 tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val);
475 /* For x86_64, this sets the higher half of register to zero.
476 For i386, this is equivalent to a nop. */
477 tcg_gen_ext32u_tl(cpu_tmp0, cpu_tmp0);
478 tcg_gen_mov_tl(cpu_regs[reg], cpu_tmp0);
479 break;
480 #ifdef TARGET_X86_64
481 case OT_LONG:
482 tcg_gen_addi_tl(cpu_regs[reg], cpu_regs[reg], val);
483 break;
484 #endif
485 }
486 }
487
488 static inline void gen_op_add_reg_T0(int size, int reg)
489 {
490 switch(size) {
491 case OT_BYTE:
492 tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T[0]);
493 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], cpu_tmp0, 0, 16);
494 break;
495 case OT_WORD:
496 tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T[0]);
497 /* For x86_64, this sets the higher half of register to zero.
498 For i386, this is equivalent to a nop. */
499 tcg_gen_ext32u_tl(cpu_tmp0, cpu_tmp0);
500 tcg_gen_mov_tl(cpu_regs[reg], cpu_tmp0);
501 break;
502 #ifdef TARGET_X86_64
503 case OT_LONG:
504 tcg_gen_add_tl(cpu_regs[reg], cpu_regs[reg], cpu_T[0]);
505 break;
506 #endif
507 }
508 }
509
510 static inline void gen_op_addl_A0_reg_sN(int shift, int reg)
511 {
512 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]);
513 if (shift != 0)
514 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
515 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
516 /* For x86_64, this sets the higher half of register to zero.
517 For i386, this is equivalent to a nop. */
518 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
519 }
520
521 static inline void gen_op_movl_A0_seg(int reg)
522 {
523 tcg_gen_ld32u_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base) + REG_L_OFFSET);
524 }
525
526 static inline void gen_op_addl_A0_seg(DisasContext *s, int reg)
527 {
528 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base));
529 #ifdef TARGET_X86_64
530 if (CODE64(s)) {
531 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
532 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
533 } else {
534 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
535 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
536 }
537 #else
538 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
539 #endif
540 }
541
542 #ifdef TARGET_X86_64
543 static inline void gen_op_movq_A0_seg(int reg)
544 {
545 tcg_gen_ld_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base));
546 }
547
548 static inline void gen_op_addq_A0_seg(int reg)
549 {
550 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base));
551 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
552 }
553
554 static inline void gen_op_movq_A0_reg(int reg)
555 {
556 tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]);
557 }
558
559 static inline void gen_op_addq_A0_reg_sN(int shift, int reg)
560 {
561 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]);
562 if (shift != 0)
563 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
564 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
565 }
566 #endif
567
568 static inline void gen_op_lds_T0_A0(int idx)
569 {
570 int mem_index = (idx >> 2) - 1;
571 switch(idx & 3) {
572 case OT_BYTE:
573 tcg_gen_qemu_ld8s(cpu_T[0], cpu_A0, mem_index);
574 break;
575 case OT_WORD:
576 tcg_gen_qemu_ld16s(cpu_T[0], cpu_A0, mem_index);
577 break;
578 default:
579 case OT_LONG:
580 tcg_gen_qemu_ld32s(cpu_T[0], cpu_A0, mem_index);
581 break;
582 }
583 }
584
585 static inline void gen_op_ld_v(int idx, TCGv t0, TCGv a0)
586 {
587 int mem_index = (idx >> 2) - 1;
588 switch(idx & 3) {
589 case OT_BYTE:
590 tcg_gen_qemu_ld8u(t0, a0, mem_index);
591 break;
592 case OT_WORD:
593 tcg_gen_qemu_ld16u(t0, a0, mem_index);
594 break;
595 case OT_LONG:
596 tcg_gen_qemu_ld32u(t0, a0, mem_index);
597 break;
598 default:
599 case OT_QUAD:
600 /* Should never happen on 32-bit targets. */
601 #ifdef TARGET_X86_64
602 tcg_gen_qemu_ld64(t0, a0, mem_index);
603 #endif
604 break;
605 }
606 }
607
608 /* XXX: always use ldu or lds */
609 static inline void gen_op_ld_T0_A0(int idx)
610 {
611 gen_op_ld_v(idx, cpu_T[0], cpu_A0);
612 }
613
614 static inline void gen_op_ldu_T0_A0(int idx)
615 {
616 gen_op_ld_v(idx, cpu_T[0], cpu_A0);
617 }
618
619 static inline void gen_op_ld_T1_A0(int idx)
620 {
621 gen_op_ld_v(idx, cpu_T[1], cpu_A0);
622 }
623
624 static inline void gen_op_st_v(int idx, TCGv t0, TCGv a0)
625 {
626 int mem_index = (idx >> 2) - 1;
627 switch(idx & 3) {
628 case OT_BYTE:
629 tcg_gen_qemu_st8(t0, a0, mem_index);
630 break;
631 case OT_WORD:
632 tcg_gen_qemu_st16(t0, a0, mem_index);
633 break;
634 case OT_LONG:
635 tcg_gen_qemu_st32(t0, a0, mem_index);
636 break;
637 default:
638 case OT_QUAD:
639 /* Should never happen on 32-bit targets. */
640 #ifdef TARGET_X86_64
641 tcg_gen_qemu_st64(t0, a0, mem_index);
642 #endif
643 break;
644 }
645 }
646
647 static inline void gen_op_st_T0_A0(int idx)
648 {
649 gen_op_st_v(idx, cpu_T[0], cpu_A0);
650 }
651
652 static inline void gen_op_st_T1_A0(int idx)
653 {
654 gen_op_st_v(idx, cpu_T[1], cpu_A0);
655 }
656
657 static inline void gen_jmp_im(target_ulong pc)
658 {
659 tcg_gen_movi_tl(cpu_tmp0, pc);
660 tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, eip));
661 }
662
663 static inline void gen_string_movl_A0_ESI(DisasContext *s)
664 {
665 int override;
666
667 override = s->override;
668 #ifdef TARGET_X86_64
669 if (s->aflag == 2) {
670 if (override >= 0) {
671 gen_op_movq_A0_seg(override);
672 gen_op_addq_A0_reg_sN(0, R_ESI);
673 } else {
674 gen_op_movq_A0_reg(R_ESI);
675 }
676 } else
677 #endif
678 if (s->aflag) {
679 /* 32 bit address */
680 if (s->addseg && override < 0)
681 override = R_DS;
682 if (override >= 0) {
683 gen_op_movl_A0_seg(override);
684 gen_op_addl_A0_reg_sN(0, R_ESI);
685 } else {
686 gen_op_movl_A0_reg(R_ESI);
687 }
688 } else {
689 /* 16 address, always override */
690 if (override < 0)
691 override = R_DS;
692 gen_op_movl_A0_reg(R_ESI);
693 gen_op_andl_A0_ffff();
694 gen_op_addl_A0_seg(s, override);
695 }
696 }
697
698 static inline void gen_string_movl_A0_EDI(DisasContext *s)
699 {
700 #ifdef TARGET_X86_64
701 if (s->aflag == 2) {
702 gen_op_movq_A0_reg(R_EDI);
703 } else
704 #endif
705 if (s->aflag) {
706 if (s->addseg) {
707 gen_op_movl_A0_seg(R_ES);
708 gen_op_addl_A0_reg_sN(0, R_EDI);
709 } else {
710 gen_op_movl_A0_reg(R_EDI);
711 }
712 } else {
713 gen_op_movl_A0_reg(R_EDI);
714 gen_op_andl_A0_ffff();
715 gen_op_addl_A0_seg(s, R_ES);
716 }
717 }
718
719 static inline void gen_op_movl_T0_Dshift(int ot)
720 {
721 tcg_gen_ld32s_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, df));
722 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], ot);
723 };
724
725 static TCGv gen_ext_tl(TCGv dst, TCGv src, int size, bool sign)
726 {
727 switch (size) {
728 case OT_BYTE:
729 if (sign) {
730 tcg_gen_ext8s_tl(dst, src);
731 } else {
732 tcg_gen_ext8u_tl(dst, src);
733 }
734 return dst;
735 case OT_WORD:
736 if (sign) {
737 tcg_gen_ext16s_tl(dst, src);
738 } else {
739 tcg_gen_ext16u_tl(dst, src);
740 }
741 return dst;
742 #ifdef TARGET_X86_64
743 case OT_LONG:
744 if (sign) {
745 tcg_gen_ext32s_tl(dst, src);
746 } else {
747 tcg_gen_ext32u_tl(dst, src);
748 }
749 return dst;
750 #endif
751 default:
752 return src;
753 }
754 }
755
756 static void gen_extu(int ot, TCGv reg)
757 {
758 gen_ext_tl(reg, reg, ot, false);
759 }
760
761 static void gen_exts(int ot, TCGv reg)
762 {
763 gen_ext_tl(reg, reg, ot, true);
764 }
765
766 static inline void gen_op_jnz_ecx(int size, int label1)
767 {
768 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
769 gen_extu(size + 1, cpu_tmp0);
770 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, label1);
771 }
772
773 static inline void gen_op_jz_ecx(int size, int label1)
774 {
775 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
776 gen_extu(size + 1, cpu_tmp0);
777 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
778 }
779
780 static void gen_helper_in_func(int ot, TCGv v, TCGv_i32 n)
781 {
782 switch (ot) {
783 case OT_BYTE:
784 gen_helper_inb(v, n);
785 break;
786 case OT_WORD:
787 gen_helper_inw(v, n);
788 break;
789 case OT_LONG:
790 gen_helper_inl(v, n);
791 break;
792 }
793 }
794
795 static void gen_helper_out_func(int ot, TCGv_i32 v, TCGv_i32 n)
796 {
797 switch (ot) {
798 case OT_BYTE:
799 gen_helper_outb(v, n);
800 break;
801 case OT_WORD:
802 gen_helper_outw(v, n);
803 break;
804 case OT_LONG:
805 gen_helper_outl(v, n);
806 break;
807 }
808 }
809
810 static void gen_check_io(DisasContext *s, int ot, target_ulong cur_eip,
811 uint32_t svm_flags)
812 {
813 int state_saved;
814 target_ulong next_eip;
815
816 state_saved = 0;
817 if (s->pe && (s->cpl > s->iopl || s->vm86)) {
818 gen_update_cc_op(s);
819 gen_jmp_im(cur_eip);
820 state_saved = 1;
821 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
822 switch (ot) {
823 case OT_BYTE:
824 gen_helper_check_iob(cpu_env, cpu_tmp2_i32);
825 break;
826 case OT_WORD:
827 gen_helper_check_iow(cpu_env, cpu_tmp2_i32);
828 break;
829 case OT_LONG:
830 gen_helper_check_iol(cpu_env, cpu_tmp2_i32);
831 break;
832 }
833 }
834 if(s->flags & HF_SVMI_MASK) {
835 if (!state_saved) {
836 gen_update_cc_op(s);
837 gen_jmp_im(cur_eip);
838 }
839 svm_flags |= (1 << (4 + ot));
840 next_eip = s->pc - s->cs_base;
841 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
842 gen_helper_svm_check_io(cpu_env, cpu_tmp2_i32,
843 tcg_const_i32(svm_flags),
844 tcg_const_i32(next_eip - cur_eip));
845 }
846 }
847
848 static inline void gen_movs(DisasContext *s, int ot)
849 {
850 gen_string_movl_A0_ESI(s);
851 gen_op_ld_T0_A0(ot + s->mem_index);
852 gen_string_movl_A0_EDI(s);
853 gen_op_st_T0_A0(ot + s->mem_index);
854 gen_op_movl_T0_Dshift(ot);
855 gen_op_add_reg_T0(s->aflag, R_ESI);
856 gen_op_add_reg_T0(s->aflag, R_EDI);
857 }
858
859 static void gen_op_update1_cc(void)
860 {
861 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
862 }
863
864 static void gen_op_update2_cc(void)
865 {
866 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
867 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
868 }
869
870 static inline void gen_op_testl_T0_T1_cc(void)
871 {
872 tcg_gen_and_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
873 }
874
875 static void gen_op_update_neg_cc(void)
876 {
877 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
878 tcg_gen_neg_tl(cpu_cc_src, cpu_T[0]);
879 tcg_gen_movi_tl(cpu_cc_srcT, 0);
880 }
881
882 /* compute all eflags to cc_src */
883 static void gen_compute_eflags(DisasContext *s)
884 {
885 TCGv zero, dst, src1;
886 int live, dead;
887
888 if (s->cc_op == CC_OP_EFLAGS) {
889 return;
890 }
891
892 TCGV_UNUSED(zero);
893 dst = cpu_cc_dst;
894 src1 = cpu_cc_src;
895
896 /* Take care to not read values that are not live. */
897 live = cc_op_live[s->cc_op] & ~USES_CC_SRCT;
898 dead = live ^ (USES_CC_DST | USES_CC_SRC);
899 if (dead) {
900 zero = tcg_const_tl(0);
901 if (dead & USES_CC_DST) {
902 dst = zero;
903 }
904 if (dead & USES_CC_SRC) {
905 src1 = zero;
906 }
907 }
908
909 gen_update_cc_op(s);
910 gen_helper_cc_compute_all(cpu_cc_src, dst, src1, cpu_cc_op);
911 set_cc_op(s, CC_OP_EFLAGS);
912
913 if (dead) {
914 tcg_temp_free(zero);
915 }
916 }
917
918 typedef struct CCPrepare {
919 TCGCond cond;
920 TCGv reg;
921 TCGv reg2;
922 target_ulong imm;
923 target_ulong mask;
924 bool use_reg2;
925 bool no_setcond;
926 } CCPrepare;
927
928 /* compute eflags.C to reg */
929 static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
930 {
931 TCGv t0, t1;
932 int size, shift;
933
934 switch (s->cc_op) {
935 case CC_OP_SUBB ... CC_OP_SUBQ:
936 /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
937 size = s->cc_op - CC_OP_SUBB;
938 t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
939 /* If no temporary was used, be careful not to alias t1 and t0. */
940 t0 = TCGV_EQUAL(t1, cpu_cc_src) ? cpu_tmp0 : reg;
941 tcg_gen_mov_tl(t0, cpu_cc_srcT);
942 gen_extu(size, t0);
943 goto add_sub;
944
945 case CC_OP_ADDB ... CC_OP_ADDQ:
946 /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
947 size = s->cc_op - CC_OP_ADDB;
948 t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
949 t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
950 add_sub:
951 return (CCPrepare) { .cond = TCG_COND_LTU, .reg = t0,
952 .reg2 = t1, .mask = -1, .use_reg2 = true };
953
954 case CC_OP_SBBB ... CC_OP_SBBQ:
955 /* (DATA_TYPE)(CC_DST + CC_SRC + 1) <= (DATA_TYPE)CC_SRC */
956 size = s->cc_op - CC_OP_SBBB;
957 t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
958 if (TCGV_EQUAL(t1, reg) && TCGV_EQUAL(reg, cpu_cc_src)) {
959 tcg_gen_mov_tl(cpu_tmp0, cpu_cc_src);
960 t1 = cpu_tmp0;
961 }
962
963 tcg_gen_add_tl(reg, cpu_cc_dst, cpu_cc_src);
964 tcg_gen_addi_tl(reg, reg, 1);
965 gen_extu(size, reg);
966 t0 = reg;
967 goto adc_sbb;
968
969 case CC_OP_ADCB ... CC_OP_ADCQ:
970 /* (DATA_TYPE)CC_DST <= (DATA_TYPE)CC_SRC */
971 size = s->cc_op - CC_OP_ADCB;
972 t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
973 t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
974 adc_sbb:
975 return (CCPrepare) { .cond = TCG_COND_LEU, .reg = t0,
976 .reg2 = t1, .mask = -1, .use_reg2 = true };
977
978 case CC_OP_LOGICB ... CC_OP_LOGICQ:
979 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
980
981 case CC_OP_INCB ... CC_OP_INCQ:
982 case CC_OP_DECB ... CC_OP_DECQ:
983 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
984 .mask = -1, .no_setcond = true };
985
986 case CC_OP_SHLB ... CC_OP_SHLQ:
987 /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
988 size = s->cc_op - CC_OP_SHLB;
989 shift = (8 << size) - 1;
990 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
991 .mask = (target_ulong)1 << shift };
992
993 case CC_OP_MULB ... CC_OP_MULQ:
994 return (CCPrepare) { .cond = TCG_COND_NE,
995 .reg = cpu_cc_src, .mask = -1 };
996
997 case CC_OP_EFLAGS:
998 case CC_OP_SARB ... CC_OP_SARQ:
999 /* CC_SRC & 1 */
1000 return (CCPrepare) { .cond = TCG_COND_NE,
1001 .reg = cpu_cc_src, .mask = CC_C };
1002
1003 default:
1004 /* The need to compute only C from CC_OP_DYNAMIC is important
1005 in efficiently implementing e.g. INC at the start of a TB. */
1006 gen_update_cc_op(s);
1007 gen_helper_cc_compute_c(reg, cpu_cc_dst, cpu_cc_src, cpu_cc_op);
1008 return (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1009 .mask = -1, .no_setcond = true };
1010 }
1011 }
1012
1013 /* compute eflags.P to reg */
1014 static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGv reg)
1015 {
1016 gen_compute_eflags(s);
1017 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1018 .mask = CC_P };
1019 }
1020
1021 /* compute eflags.S to reg */
1022 static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg)
1023 {
1024 switch (s->cc_op) {
1025 case CC_OP_DYNAMIC:
1026 gen_compute_eflags(s);
1027 /* FALLTHRU */
1028 case CC_OP_EFLAGS:
1029 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1030 .mask = CC_S };
1031 default:
1032 {
1033 int size = (s->cc_op - CC_OP_ADDB) & 3;
1034 TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, true);
1035 return (CCPrepare) { .cond = TCG_COND_LT, .reg = t0, .mask = -1 };
1036 }
1037 }
1038 }
1039
1040 /* compute eflags.O to reg */
1041 static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg)
1042 {
1043 gen_compute_eflags(s);
1044 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1045 .mask = CC_O };
1046 }
1047
1048 /* compute eflags.Z to reg */
1049 static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
1050 {
1051 switch (s->cc_op) {
1052 case CC_OP_DYNAMIC:
1053 gen_compute_eflags(s);
1054 /* FALLTHRU */
1055 case CC_OP_EFLAGS:
1056 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1057 .mask = CC_Z };
1058 default:
1059 {
1060 int size = (s->cc_op - CC_OP_ADDB) & 3;
1061 TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
1062 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
1063 }
1064 }
1065 }
1066
1067 /* perform a conditional store into register 'reg' according to jump opcode
1068 value 'b'. In the fast case, T0 is guaranted not to be used. */
1069 static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
1070 {
1071 int inv, jcc_op, size, cond;
1072 CCPrepare cc;
1073 TCGv t0;
1074
1075 inv = b & 1;
1076 jcc_op = (b >> 1) & 7;
1077
1078 switch (s->cc_op) {
1079 case CC_OP_SUBB ... CC_OP_SUBQ:
1080 /* We optimize relational operators for the cmp/jcc case. */
1081 size = s->cc_op - CC_OP_SUBB;
1082 switch (jcc_op) {
1083 case JCC_BE:
1084 tcg_gen_mov_tl(cpu_tmp4, cpu_cc_srcT);
1085 gen_extu(size, cpu_tmp4);
1086 t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
1087 cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = cpu_tmp4,
1088 .reg2 = t0, .mask = -1, .use_reg2 = true };
1089 break;
1090
1091 case JCC_L:
1092 cond = TCG_COND_LT;
1093 goto fast_jcc_l;
1094 case JCC_LE:
1095 cond = TCG_COND_LE;
1096 fast_jcc_l:
1097 tcg_gen_mov_tl(cpu_tmp4, cpu_cc_srcT);
1098 gen_exts(size, cpu_tmp4);
1099 t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, true);
1100 cc = (CCPrepare) { .cond = cond, .reg = cpu_tmp4,
1101 .reg2 = t0, .mask = -1, .use_reg2 = true };
1102 break;
1103
1104 default:
1105 goto slow_jcc;
1106 }
1107 break;
1108
1109 default:
1110 slow_jcc:
1111 /* This actually generates good code for JC, JZ and JS. */
1112 switch (jcc_op) {
1113 case JCC_O:
1114 cc = gen_prepare_eflags_o(s, reg);
1115 break;
1116 case JCC_B:
1117 cc = gen_prepare_eflags_c(s, reg);
1118 break;
1119 case JCC_Z:
1120 cc = gen_prepare_eflags_z(s, reg);
1121 break;
1122 case JCC_BE:
1123 gen_compute_eflags(s);
1124 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1125 .mask = CC_Z | CC_C };
1126 break;
1127 case JCC_S:
1128 cc = gen_prepare_eflags_s(s, reg);
1129 break;
1130 case JCC_P:
1131 cc = gen_prepare_eflags_p(s, reg);
1132 break;
1133 case JCC_L:
1134 gen_compute_eflags(s);
1135 if (TCGV_EQUAL(reg, cpu_cc_src)) {
1136 reg = cpu_tmp0;
1137 }
1138 tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
1139 tcg_gen_xor_tl(reg, reg, cpu_cc_src);
1140 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1141 .mask = CC_S };
1142 break;
1143 default:
1144 case JCC_LE:
1145 gen_compute_eflags(s);
1146 if (TCGV_EQUAL(reg, cpu_cc_src)) {
1147 reg = cpu_tmp0;
1148 }
1149 tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
1150 tcg_gen_xor_tl(reg, reg, cpu_cc_src);
1151 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1152 .mask = CC_S | CC_Z };
1153 break;
1154 }
1155 break;
1156 }
1157
1158 if (inv) {
1159 cc.cond = tcg_invert_cond(cc.cond);
1160 }
1161 return cc;
1162 }
1163
1164 static void gen_setcc1(DisasContext *s, int b, TCGv reg)
1165 {
1166 CCPrepare cc = gen_prepare_cc(s, b, reg);
1167
1168 if (cc.no_setcond) {
1169 if (cc.cond == TCG_COND_EQ) {
1170 tcg_gen_xori_tl(reg, cc.reg, 1);
1171 } else {
1172 tcg_gen_mov_tl(reg, cc.reg);
1173 }
1174 return;
1175 }
1176
1177 if (cc.cond == TCG_COND_NE && !cc.use_reg2 && cc.imm == 0 &&
1178 cc.mask != 0 && (cc.mask & (cc.mask - 1)) == 0) {
1179 tcg_gen_shri_tl(reg, cc.reg, ctztl(cc.mask));
1180 tcg_gen_andi_tl(reg, reg, 1);
1181 return;
1182 }
1183 if (cc.mask != -1) {
1184 tcg_gen_andi_tl(reg, cc.reg, cc.mask);
1185 cc.reg = reg;
1186 }
1187 if (cc.use_reg2) {
1188 tcg_gen_setcond_tl(cc.cond, reg, cc.reg, cc.reg2);
1189 } else {
1190 tcg_gen_setcondi_tl(cc.cond, reg, cc.reg, cc.imm);
1191 }
1192 }
1193
1194 static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg)
1195 {
1196 gen_setcc1(s, JCC_B << 1, reg);
1197 }
1198
1199 /* generate a conditional jump to label 'l1' according to jump opcode
1200 value 'b'. In the fast case, T0 is guaranted not to be used. */
1201 static inline void gen_jcc1_noeob(DisasContext *s, int b, int l1)
1202 {
1203 CCPrepare cc = gen_prepare_cc(s, b, cpu_T[0]);
1204
1205 if (cc.mask != -1) {
1206 tcg_gen_andi_tl(cpu_T[0], cc.reg, cc.mask);
1207 cc.reg = cpu_T[0];
1208 }
1209 if (cc.use_reg2) {
1210 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1211 } else {
1212 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1213 }
1214 }
1215
1216 /* Generate a conditional jump to label 'l1' according to jump opcode
1217 value 'b'. In the fast case, T0 is guaranted not to be used.
1218 A translation block must end soon. */
1219 static inline void gen_jcc1(DisasContext *s, int b, int l1)
1220 {
1221 CCPrepare cc = gen_prepare_cc(s, b, cpu_T[0]);
1222
1223 gen_update_cc_op(s);
1224 if (cc.mask != -1) {
1225 tcg_gen_andi_tl(cpu_T[0], cc.reg, cc.mask);
1226 cc.reg = cpu_T[0];
1227 }
1228 set_cc_op(s, CC_OP_DYNAMIC);
1229 if (cc.use_reg2) {
1230 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1231 } else {
1232 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1233 }
1234 }
1235
1236 /* XXX: does not work with gdbstub "ice" single step - not a
1237 serious problem */
1238 static int gen_jz_ecx_string(DisasContext *s, target_ulong next_eip)
1239 {
1240 int l1, l2;
1241
1242 l1 = gen_new_label();
1243 l2 = gen_new_label();
1244 gen_op_jnz_ecx(s->aflag, l1);
1245 gen_set_label(l2);
1246 gen_jmp_tb(s, next_eip, 1);
1247 gen_set_label(l1);
1248 return l2;
1249 }
1250
1251 static inline void gen_stos(DisasContext *s, int ot)
1252 {
1253 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
1254 gen_string_movl_A0_EDI(s);
1255 gen_op_st_T0_A0(ot + s->mem_index);
1256 gen_op_movl_T0_Dshift(ot);
1257 gen_op_add_reg_T0(s->aflag, R_EDI);
1258 }
1259
1260 static inline void gen_lods(DisasContext *s, int ot)
1261 {
1262 gen_string_movl_A0_ESI(s);
1263 gen_op_ld_T0_A0(ot + s->mem_index);
1264 gen_op_mov_reg_T0(ot, R_EAX);
1265 gen_op_movl_T0_Dshift(ot);
1266 gen_op_add_reg_T0(s->aflag, R_ESI);
1267 }
1268
1269 static inline void gen_scas(DisasContext *s, int ot)
1270 {
1271 gen_string_movl_A0_EDI(s);
1272 gen_op_ld_T1_A0(ot + s->mem_index);
1273 gen_op(s, OP_CMPL, ot, R_EAX);
1274 gen_op_movl_T0_Dshift(ot);
1275 gen_op_add_reg_T0(s->aflag, R_EDI);
1276 }
1277
1278 static inline void gen_cmps(DisasContext *s, int ot)
1279 {
1280 gen_string_movl_A0_EDI(s);
1281 gen_op_ld_T1_A0(ot + s->mem_index);
1282 gen_string_movl_A0_ESI(s);
1283 gen_op(s, OP_CMPL, ot, OR_TMP0);
1284 gen_op_movl_T0_Dshift(ot);
1285 gen_op_add_reg_T0(s->aflag, R_ESI);
1286 gen_op_add_reg_T0(s->aflag, R_EDI);
1287 }
1288
1289 static inline void gen_ins(DisasContext *s, int ot)
1290 {
1291 if (use_icount)
1292 gen_io_start();
1293 gen_string_movl_A0_EDI(s);
1294 /* Note: we must do this dummy write first to be restartable in
1295 case of page fault. */
1296 gen_op_movl_T0_0();
1297 gen_op_st_T0_A0(ot + s->mem_index);
1298 gen_op_mov_TN_reg(OT_WORD, 1, R_EDX);
1299 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[1]);
1300 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1301 gen_helper_in_func(ot, cpu_T[0], cpu_tmp2_i32);
1302 gen_op_st_T0_A0(ot + s->mem_index);
1303 gen_op_movl_T0_Dshift(ot);
1304 gen_op_add_reg_T0(s->aflag, R_EDI);
1305 if (use_icount)
1306 gen_io_end();
1307 }
1308
1309 static inline void gen_outs(DisasContext *s, int ot)
1310 {
1311 if (use_icount)
1312 gen_io_start();
1313 gen_string_movl_A0_ESI(s);
1314 gen_op_ld_T0_A0(ot + s->mem_index);
1315
1316 gen_op_mov_TN_reg(OT_WORD, 1, R_EDX);
1317 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[1]);
1318 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1319 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[0]);
1320 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
1321
1322 gen_op_movl_T0_Dshift(ot);
1323 gen_op_add_reg_T0(s->aflag, R_ESI);
1324 if (use_icount)
1325 gen_io_end();
1326 }
1327
1328 /* same method as Valgrind : we generate jumps to current or next
1329 instruction */
1330 #define GEN_REPZ(op) \
1331 static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1332 target_ulong cur_eip, target_ulong next_eip) \
1333 { \
1334 int l2;\
1335 gen_update_cc_op(s); \
1336 l2 = gen_jz_ecx_string(s, next_eip); \
1337 gen_ ## op(s, ot); \
1338 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1339 /* a loop would cause two single step exceptions if ECX = 1 \
1340 before rep string_insn */ \
1341 if (!s->jmp_opt) \
1342 gen_op_jz_ecx(s->aflag, l2); \
1343 gen_jmp(s, cur_eip); \
1344 }
1345
1346 #define GEN_REPZ2(op) \
1347 static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1348 target_ulong cur_eip, \
1349 target_ulong next_eip, \
1350 int nz) \
1351 { \
1352 int l2;\
1353 gen_update_cc_op(s); \
1354 l2 = gen_jz_ecx_string(s, next_eip); \
1355 gen_ ## op(s, ot); \
1356 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1357 gen_update_cc_op(s); \
1358 gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2); \
1359 if (!s->jmp_opt) \
1360 gen_op_jz_ecx(s->aflag, l2); \
1361 gen_jmp(s, cur_eip); \
1362 }
1363
1364 GEN_REPZ(movs)
1365 GEN_REPZ(stos)
1366 GEN_REPZ(lods)
1367 GEN_REPZ(ins)
1368 GEN_REPZ(outs)
1369 GEN_REPZ2(scas)
1370 GEN_REPZ2(cmps)
1371
1372 static void gen_helper_fp_arith_ST0_FT0(int op)
1373 {
1374 switch (op) {
1375 case 0:
1376 gen_helper_fadd_ST0_FT0(cpu_env);
1377 break;
1378 case 1:
1379 gen_helper_fmul_ST0_FT0(cpu_env);
1380 break;
1381 case 2:
1382 gen_helper_fcom_ST0_FT0(cpu_env);
1383 break;
1384 case 3:
1385 gen_helper_fcom_ST0_FT0(cpu_env);
1386 break;
1387 case 4:
1388 gen_helper_fsub_ST0_FT0(cpu_env);
1389 break;
1390 case 5:
1391 gen_helper_fsubr_ST0_FT0(cpu_env);
1392 break;
1393 case 6:
1394 gen_helper_fdiv_ST0_FT0(cpu_env);
1395 break;
1396 case 7:
1397 gen_helper_fdivr_ST0_FT0(cpu_env);
1398 break;
1399 }
1400 }
1401
1402 /* NOTE the exception in "r" op ordering */
1403 static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
1404 {
1405 TCGv_i32 tmp = tcg_const_i32(opreg);
1406 switch (op) {
1407 case 0:
1408 gen_helper_fadd_STN_ST0(cpu_env, tmp);
1409 break;
1410 case 1:
1411 gen_helper_fmul_STN_ST0(cpu_env, tmp);
1412 break;
1413 case 4:
1414 gen_helper_fsubr_STN_ST0(cpu_env, tmp);
1415 break;
1416 case 5:
1417 gen_helper_fsub_STN_ST0(cpu_env, tmp);
1418 break;
1419 case 6:
1420 gen_helper_fdivr_STN_ST0(cpu_env, tmp);
1421 break;
1422 case 7:
1423 gen_helper_fdiv_STN_ST0(cpu_env, tmp);
1424 break;
1425 }
1426 }
1427
1428 /* if d == OR_TMP0, it means memory operand (address in A0) */
1429 static void gen_op(DisasContext *s1, int op, int ot, int d)
1430 {
1431 if (d != OR_TMP0) {
1432 gen_op_mov_TN_reg(ot, 0, d);
1433 } else {
1434 gen_op_ld_T0_A0(ot + s1->mem_index);
1435 }
1436 switch(op) {
1437 case OP_ADCL:
1438 gen_compute_eflags_c(s1, cpu_tmp4);
1439 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1440 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1441 if (d != OR_TMP0)
1442 gen_op_mov_reg_T0(ot, d);
1443 else
1444 gen_op_st_T0_A0(ot + s1->mem_index);
1445 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
1446 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1447 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp4);
1448 tcg_gen_shli_i32(cpu_tmp2_i32, cpu_tmp2_i32, 2);
1449 tcg_gen_addi_i32(cpu_cc_op, cpu_tmp2_i32, CC_OP_ADDB + ot);
1450 set_cc_op(s1, CC_OP_DYNAMIC);
1451 break;
1452 case OP_SBBL:
1453 /*
1454 * No need to store cpu_cc_srcT, because it is used only
1455 * when the cc_op is known.
1456 */
1457 gen_compute_eflags_c(s1, cpu_tmp4);
1458 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1459 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1460 if (d != OR_TMP0)
1461 gen_op_mov_reg_T0(ot, d);
1462 else
1463 gen_op_st_T0_A0(ot + s1->mem_index);
1464 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
1465 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1466 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp4);
1467 tcg_gen_shli_i32(cpu_tmp2_i32, cpu_tmp2_i32, 2);
1468 tcg_gen_addi_i32(cpu_cc_op, cpu_tmp2_i32, CC_OP_SUBB + ot);
1469 set_cc_op(s1, CC_OP_DYNAMIC);
1470 break;
1471 case OP_ADDL:
1472 gen_op_addl_T0_T1();
1473 if (d != OR_TMP0)
1474 gen_op_mov_reg_T0(ot, d);
1475 else
1476 gen_op_st_T0_A0(ot + s1->mem_index);
1477 gen_op_update2_cc();
1478 set_cc_op(s1, CC_OP_ADDB + ot);
1479 break;
1480 case OP_SUBL:
1481 tcg_gen_mov_tl(cpu_cc_srcT, cpu_T[0]);
1482 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1483 if (d != OR_TMP0)
1484 gen_op_mov_reg_T0(ot, d);
1485 else
1486 gen_op_st_T0_A0(ot + s1->mem_index);
1487 gen_op_update2_cc();
1488 set_cc_op(s1, CC_OP_SUBB + ot);
1489 break;
1490 default:
1491 case OP_ANDL:
1492 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1493 if (d != OR_TMP0)
1494 gen_op_mov_reg_T0(ot, d);
1495 else
1496 gen_op_st_T0_A0(ot + s1->mem_index);
1497 gen_op_update1_cc();
1498 set_cc_op(s1, CC_OP_LOGICB + ot);
1499 break;
1500 case OP_ORL:
1501 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1502 if (d != OR_TMP0)
1503 gen_op_mov_reg_T0(ot, d);
1504 else
1505 gen_op_st_T0_A0(ot + s1->mem_index);
1506 gen_op_update1_cc();
1507 set_cc_op(s1, CC_OP_LOGICB + ot);
1508 break;
1509 case OP_XORL:
1510 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1511 if (d != OR_TMP0)
1512 gen_op_mov_reg_T0(ot, d);
1513 else
1514 gen_op_st_T0_A0(ot + s1->mem_index);
1515 gen_op_update1_cc();
1516 set_cc_op(s1, CC_OP_LOGICB + ot);
1517 break;
1518 case OP_CMPL:
1519 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
1520 tcg_gen_mov_tl(cpu_cc_srcT, cpu_T[0]);
1521 tcg_gen_sub_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
1522 set_cc_op(s1, CC_OP_SUBB + ot);
1523 break;
1524 }
1525 }
1526
1527 /* if d == OR_TMP0, it means memory operand (address in A0) */
1528 static void gen_inc(DisasContext *s1, int ot, int d, int c)
1529 {
1530 if (d != OR_TMP0)
1531 gen_op_mov_TN_reg(ot, 0, d);
1532 else
1533 gen_op_ld_T0_A0(ot + s1->mem_index);
1534 gen_compute_eflags_c(s1, cpu_cc_src);
1535 if (c > 0) {
1536 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], 1);
1537 set_cc_op(s1, CC_OP_INCB + ot);
1538 } else {
1539 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], -1);
1540 set_cc_op(s1, CC_OP_DECB + ot);
1541 }
1542 if (d != OR_TMP0)
1543 gen_op_mov_reg_T0(ot, d);
1544 else
1545 gen_op_st_T0_A0(ot + s1->mem_index);
1546 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1547 }
1548
1549 static void gen_shift_rm_T1(DisasContext *s, int ot, int op1,
1550 int is_right, int is_arith)
1551 {
1552 target_ulong mask;
1553 int shift_label;
1554 TCGv t0, t1, t2;
1555
1556 if (ot == OT_QUAD) {
1557 mask = 0x3f;
1558 } else {
1559 mask = 0x1f;
1560 }
1561
1562 /* load */
1563 if (op1 == OR_TMP0) {
1564 gen_op_ld_T0_A0(ot + s->mem_index);
1565 } else {
1566 gen_op_mov_TN_reg(ot, 0, op1);
1567 }
1568
1569 t0 = tcg_temp_local_new();
1570 t1 = tcg_temp_local_new();
1571 t2 = tcg_temp_local_new();
1572
1573 tcg_gen_andi_tl(t2, cpu_T[1], mask);
1574
1575 if (is_right) {
1576 if (is_arith) {
1577 gen_exts(ot, cpu_T[0]);
1578 tcg_gen_mov_tl(t0, cpu_T[0]);
1579 tcg_gen_sar_tl(cpu_T[0], cpu_T[0], t2);
1580 } else {
1581 gen_extu(ot, cpu_T[0]);
1582 tcg_gen_mov_tl(t0, cpu_T[0]);
1583 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], t2);
1584 }
1585 } else {
1586 tcg_gen_mov_tl(t0, cpu_T[0]);
1587 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], t2);
1588 }
1589
1590 /* store */
1591 if (op1 == OR_TMP0) {
1592 gen_op_st_T0_A0(ot + s->mem_index);
1593 } else {
1594 gen_op_mov_reg_T0(ot, op1);
1595 }
1596
1597 /* Update eflags data because we cannot predict flags afterward. */
1598 gen_update_cc_op(s);
1599 set_cc_op(s, CC_OP_DYNAMIC);
1600
1601 tcg_gen_mov_tl(t1, cpu_T[0]);
1602
1603 shift_label = gen_new_label();
1604 tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, shift_label);
1605
1606 tcg_gen_addi_tl(t2, t2, -1);
1607 tcg_gen_mov_tl(cpu_cc_dst, t1);
1608
1609 if (is_right) {
1610 if (is_arith) {
1611 tcg_gen_sar_tl(cpu_cc_src, t0, t2);
1612 } else {
1613 tcg_gen_shr_tl(cpu_cc_src, t0, t2);
1614 }
1615 } else {
1616 tcg_gen_shl_tl(cpu_cc_src, t0, t2);
1617 }
1618
1619 if (is_right) {
1620 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SARB + ot);
1621 } else {
1622 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SHLB + ot);
1623 }
1624
1625 gen_set_label(shift_label);
1626
1627 tcg_temp_free(t0);
1628 tcg_temp_free(t1);
1629 tcg_temp_free(t2);
1630 }
1631
1632 static void gen_shift_rm_im(DisasContext *s, int ot, int op1, int op2,
1633 int is_right, int is_arith)
1634 {
1635 int mask;
1636
1637 if (ot == OT_QUAD)
1638 mask = 0x3f;
1639 else
1640 mask = 0x1f;
1641
1642 /* load */
1643 if (op1 == OR_TMP0)
1644 gen_op_ld_T0_A0(ot + s->mem_index);
1645 else
1646 gen_op_mov_TN_reg(ot, 0, op1);
1647
1648 op2 &= mask;
1649 if (op2 != 0) {
1650 if (is_right) {
1651 if (is_arith) {
1652 gen_exts(ot, cpu_T[0]);
1653 tcg_gen_sari_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1654 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], op2);
1655 } else {
1656 gen_extu(ot, cpu_T[0]);
1657 tcg_gen_shri_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1658 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], op2);
1659 }
1660 } else {
1661 tcg_gen_shli_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1662 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], op2);
1663 }
1664 }
1665
1666 /* store */
1667 if (op1 == OR_TMP0)
1668 gen_op_st_T0_A0(ot + s->mem_index);
1669 else
1670 gen_op_mov_reg_T0(ot, op1);
1671
1672 /* update eflags if non zero shift */
1673 if (op2 != 0) {
1674 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
1675 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1676 set_cc_op(s, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
1677 }
1678 }
1679
1680 static inline void tcg_gen_lshift(TCGv ret, TCGv arg1, target_long arg2)
1681 {
1682 if (arg2 >= 0)
1683 tcg_gen_shli_tl(ret, arg1, arg2);
1684 else
1685 tcg_gen_shri_tl(ret, arg1, -arg2);
1686 }
1687
1688 static void gen_rot_rm_T1(DisasContext *s, int ot, int op1,
1689 int is_right)
1690 {
1691 target_ulong mask;
1692 int label1, label2, data_bits;
1693 TCGv t0, t1, t2, a0;
1694
1695 /* XXX: inefficient, but we must use local temps */
1696 t0 = tcg_temp_local_new();
1697 t1 = tcg_temp_local_new();
1698 t2 = tcg_temp_local_new();
1699 a0 = tcg_temp_local_new();
1700
1701 if (ot == OT_QUAD)
1702 mask = 0x3f;
1703 else
1704 mask = 0x1f;
1705
1706 /* load */
1707 if (op1 == OR_TMP0) {
1708 tcg_gen_mov_tl(a0, cpu_A0);
1709 gen_op_ld_v(ot + s->mem_index, t0, a0);
1710 } else {
1711 gen_op_mov_v_reg(ot, t0, op1);
1712 }
1713
1714 tcg_gen_mov_tl(t1, cpu_T[1]);
1715
1716 tcg_gen_andi_tl(t1, t1, mask);
1717
1718 /* Must test zero case to avoid using undefined behaviour in TCG
1719 shifts. */
1720 label1 = gen_new_label();
1721 tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, label1);
1722
1723 if (ot <= OT_WORD)
1724 tcg_gen_andi_tl(cpu_tmp0, t1, (1 << (3 + ot)) - 1);
1725 else
1726 tcg_gen_mov_tl(cpu_tmp0, t1);
1727
1728 gen_extu(ot, t0);
1729 tcg_gen_mov_tl(t2, t0);
1730
1731 data_bits = 8 << ot;
1732 /* XXX: rely on behaviour of shifts when operand 2 overflows (XXX:
1733 fix TCG definition) */
1734 if (is_right) {
1735 tcg_gen_shr_tl(cpu_tmp4, t0, cpu_tmp0);
1736 tcg_gen_subfi_tl(cpu_tmp0, data_bits, cpu_tmp0);
1737 tcg_gen_shl_tl(t0, t0, cpu_tmp0);
1738 } else {
1739 tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp0);
1740 tcg_gen_subfi_tl(cpu_tmp0, data_bits, cpu_tmp0);
1741 tcg_gen_shr_tl(t0, t0, cpu_tmp0);
1742 }
1743 tcg_gen_or_tl(t0, t0, cpu_tmp4);
1744
1745 gen_set_label(label1);
1746 /* store */
1747 if (op1 == OR_TMP0) {
1748 gen_op_st_v(ot + s->mem_index, t0, a0);
1749 } else {
1750 gen_op_mov_reg_v(ot, op1, t0);
1751 }
1752
1753 /* update eflags. It is needed anyway most of the time, do it always. */
1754 gen_compute_eflags(s);
1755 assert(s->cc_op == CC_OP_EFLAGS);
1756
1757 label2 = gen_new_label();
1758 tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, label2);
1759
1760 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~(CC_O | CC_C));
1761 tcg_gen_xor_tl(cpu_tmp0, t2, t0);
1762 tcg_gen_lshift(cpu_tmp0, cpu_tmp0, 11 - (data_bits - 1));
1763 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, CC_O);
1764 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_tmp0);
1765 if (is_right) {
1766 tcg_gen_shri_tl(t0, t0, data_bits - 1);
1767 }
1768 tcg_gen_andi_tl(t0, t0, CC_C);
1769 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
1770
1771 gen_set_label(label2);
1772
1773 tcg_temp_free(t0);
1774 tcg_temp_free(t1);
1775 tcg_temp_free(t2);
1776 tcg_temp_free(a0);
1777 }
1778
1779 static void gen_rot_rm_im(DisasContext *s, int ot, int op1, int op2,
1780 int is_right)
1781 {
1782 int mask;
1783 int data_bits;
1784 TCGv t0, t1, a0;
1785
1786 /* XXX: inefficient, but we must use local temps */
1787 t0 = tcg_temp_local_new();
1788 t1 = tcg_temp_local_new();
1789 a0 = tcg_temp_local_new();
1790
1791 if (ot == OT_QUAD)
1792 mask = 0x3f;
1793 else
1794 mask = 0x1f;
1795
1796 /* load */
1797 if (op1 == OR_TMP0) {
1798 tcg_gen_mov_tl(a0, cpu_A0);
1799 gen_op_ld_v(ot + s->mem_index, t0, a0);
1800 } else {
1801 gen_op_mov_v_reg(ot, t0, op1);
1802 }
1803
1804 gen_extu(ot, t0);
1805 tcg_gen_mov_tl(t1, t0);
1806
1807 op2 &= mask;
1808 data_bits = 8 << ot;
1809 if (op2 != 0) {
1810 int shift = op2 & ((1 << (3 + ot)) - 1);
1811 if (is_right) {
1812 tcg_gen_shri_tl(cpu_tmp4, t0, shift);
1813 tcg_gen_shli_tl(t0, t0, data_bits - shift);
1814 }
1815 else {
1816 tcg_gen_shli_tl(cpu_tmp4, t0, shift);
1817 tcg_gen_shri_tl(t0, t0, data_bits - shift);
1818 }
1819 tcg_gen_or_tl(t0, t0, cpu_tmp4);
1820 }
1821
1822 /* store */
1823 if (op1 == OR_TMP0) {
1824 gen_op_st_v(ot + s->mem_index, t0, a0);
1825 } else {
1826 gen_op_mov_reg_v(ot, op1, t0);
1827 }
1828
1829 if (op2 != 0) {
1830 /* update eflags */
1831 gen_compute_eflags(s);
1832 assert(s->cc_op == CC_OP_EFLAGS);
1833
1834 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~(CC_O | CC_C));
1835 tcg_gen_xor_tl(cpu_tmp0, t1, t0);
1836 tcg_gen_lshift(cpu_tmp0, cpu_tmp0, 11 - (data_bits - 1));
1837 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, CC_O);
1838 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_tmp0);
1839 if (is_right) {
1840 tcg_gen_shri_tl(t0, t0, data_bits - 1);
1841 }
1842 tcg_gen_andi_tl(t0, t0, CC_C);
1843 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
1844 }
1845
1846 tcg_temp_free(t0);
1847 tcg_temp_free(t1);
1848 tcg_temp_free(a0);
1849 }
1850
1851 /* XXX: add faster immediate = 1 case */
1852 static void gen_rotc_rm_T1(DisasContext *s, int ot, int op1,
1853 int is_right)
1854 {
1855 gen_compute_eflags(s);
1856 assert(s->cc_op == CC_OP_EFLAGS);
1857
1858 /* load */
1859 if (op1 == OR_TMP0)
1860 gen_op_ld_T0_A0(ot + s->mem_index);
1861 else
1862 gen_op_mov_TN_reg(ot, 0, op1);
1863
1864 if (is_right) {
1865 switch (ot) {
1866 case OT_BYTE:
1867 gen_helper_rcrb(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1868 break;
1869 case OT_WORD:
1870 gen_helper_rcrw(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1871 break;
1872 case OT_LONG:
1873 gen_helper_rcrl(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1874 break;
1875 #ifdef TARGET_X86_64
1876 case OT_QUAD:
1877 gen_helper_rcrq(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1878 break;
1879 #endif
1880 }
1881 } else {
1882 switch (ot) {
1883 case OT_BYTE:
1884 gen_helper_rclb(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1885 break;
1886 case OT_WORD:
1887 gen_helper_rclw(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1888 break;
1889 case OT_LONG:
1890 gen_helper_rcll(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1891 break;
1892 #ifdef TARGET_X86_64
1893 case OT_QUAD:
1894 gen_helper_rclq(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1895 break;
1896 #endif
1897 }
1898 }
1899 /* store */
1900 if (op1 == OR_TMP0)
1901 gen_op_st_T0_A0(ot + s->mem_index);
1902 else
1903 gen_op_mov_reg_T0(ot, op1);
1904 }
1905
1906 /* XXX: add faster immediate case */
1907 static void gen_shiftd_rm_T1(DisasContext *s, int ot, int op1,
1908 int is_right, TCGv count)
1909 {
1910 int label1, label2, data_bits;
1911 target_ulong mask;
1912 TCGv t0, t1, t2, a0;
1913
1914 t0 = tcg_temp_local_new();
1915 t1 = tcg_temp_local_new();
1916 t2 = tcg_temp_local_new();
1917 a0 = tcg_temp_local_new();
1918
1919 if (ot == OT_QUAD)
1920 mask = 0x3f;
1921 else
1922 mask = 0x1f;
1923
1924 /* load */
1925 if (op1 == OR_TMP0) {
1926 tcg_gen_mov_tl(a0, cpu_A0);
1927 gen_op_ld_v(ot + s->mem_index, t0, a0);
1928 } else {
1929 gen_op_mov_v_reg(ot, t0, op1);
1930 }
1931
1932 tcg_gen_andi_tl(t2, count, mask);
1933 tcg_gen_mov_tl(t1, cpu_T[1]);
1934
1935 /* Must test zero case to avoid using undefined behaviour in TCG
1936 shifts. */
1937 label1 = gen_new_label();
1938 tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label1);
1939
1940 tcg_gen_addi_tl(cpu_tmp5, t2, -1);
1941 if (ot == OT_WORD) {
1942 /* Note: we implement the Intel behaviour for shift count > 16 */
1943 if (is_right) {
1944 tcg_gen_andi_tl(t0, t0, 0xffff);
1945 tcg_gen_shli_tl(cpu_tmp0, t1, 16);
1946 tcg_gen_or_tl(t0, t0, cpu_tmp0);
1947 tcg_gen_ext32u_tl(t0, t0);
1948
1949 tcg_gen_shr_tl(cpu_tmp4, t0, cpu_tmp5);
1950
1951 /* only needed if count > 16, but a test would complicate */
1952 tcg_gen_subfi_tl(cpu_tmp5, 32, t2);
1953 tcg_gen_shl_tl(cpu_tmp0, t0, cpu_tmp5);
1954
1955 tcg_gen_shr_tl(t0, t0, t2);
1956
1957 tcg_gen_or_tl(t0, t0, cpu_tmp0);
1958 } else {
1959 /* XXX: not optimal */
1960 tcg_gen_andi_tl(t0, t0, 0xffff);
1961 tcg_gen_shli_tl(t1, t1, 16);
1962 tcg_gen_or_tl(t1, t1, t0);
1963 tcg_gen_ext32u_tl(t1, t1);
1964
1965 tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp5);
1966 tcg_gen_subfi_tl(cpu_tmp0, 32, cpu_tmp5);
1967 tcg_gen_shr_tl(cpu_tmp5, t1, cpu_tmp0);
1968 tcg_gen_or_tl(cpu_tmp4, cpu_tmp4, cpu_tmp5);
1969
1970 tcg_gen_shl_tl(t0, t0, t2);
1971 tcg_gen_subfi_tl(cpu_tmp5, 32, t2);
1972 tcg_gen_shr_tl(t1, t1, cpu_tmp5);
1973 tcg_gen_or_tl(t0, t0, t1);
1974 }
1975 } else {
1976 data_bits = 8 << ot;
1977 if (is_right) {
1978 if (ot == OT_LONG)
1979 tcg_gen_ext32u_tl(t0, t0);
1980
1981 tcg_gen_shr_tl(cpu_tmp4, t0, cpu_tmp5);
1982
1983 tcg_gen_shr_tl(t0, t0, t2);
1984 tcg_gen_subfi_tl(cpu_tmp5, data_bits, t2);
1985 tcg_gen_shl_tl(t1, t1, cpu_tmp5);
1986 tcg_gen_or_tl(t0, t0, t1);
1987
1988 } else {
1989 if (ot == OT_LONG)
1990 tcg_gen_ext32u_tl(t1, t1);
1991
1992 tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp5);
1993
1994 tcg_gen_shl_tl(t0, t0, t2);
1995 tcg_gen_subfi_tl(cpu_tmp5, data_bits, t2);
1996 tcg_gen_shr_tl(t1, t1, cpu_tmp5);
1997 tcg_gen_or_tl(t0, t0, t1);
1998 }
1999 }
2000 tcg_gen_mov_tl(t1, cpu_tmp4);
2001
2002 gen_set_label(label1);
2003 /* store */
2004 if (op1 == OR_TMP0) {
2005 gen_op_st_v(ot + s->mem_index, t0, a0);
2006 } else {
2007 gen_op_mov_reg_v(ot, op1, t0);
2008 }
2009
2010 /* Update eflags data because we cannot predict flags afterward. */
2011 gen_update_cc_op(s);
2012 set_cc_op(s, CC_OP_DYNAMIC);
2013
2014 label2 = gen_new_label();
2015 tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label2);
2016
2017 tcg_gen_mov_tl(cpu_cc_src, t1);
2018 tcg_gen_mov_tl(cpu_cc_dst, t0);
2019 if (is_right) {
2020 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SARB + ot);
2021 } else {
2022 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SHLB + ot);
2023 }
2024 gen_set_label(label2);
2025
2026 tcg_temp_free(t0);
2027 tcg_temp_free(t1);
2028 tcg_temp_free(t2);
2029 tcg_temp_free(a0);
2030 }
2031
2032 static void gen_shift(DisasContext *s1, int op, int ot, int d, int s)
2033 {
2034 if (s != OR_TMP1)
2035 gen_op_mov_TN_reg(ot, 1, s);
2036 switch(op) {
2037 case OP_ROL:
2038 gen_rot_rm_T1(s1, ot, d, 0);
2039 break;
2040 case OP_ROR:
2041 gen_rot_rm_T1(s1, ot, d, 1);
2042 break;
2043 case OP_SHL:
2044 case OP_SHL1:
2045 gen_shift_rm_T1(s1, ot, d, 0, 0);
2046 break;
2047 case OP_SHR:
2048 gen_shift_rm_T1(s1, ot, d, 1, 0);
2049 break;
2050 case OP_SAR:
2051 gen_shift_rm_T1(s1, ot, d, 1, 1);
2052 break;
2053 case OP_RCL:
2054 gen_rotc_rm_T1(s1, ot, d, 0);
2055 break;
2056 case OP_RCR:
2057 gen_rotc_rm_T1(s1, ot, d, 1);
2058 break;
2059 }
2060 }
2061
2062 static void gen_shifti(DisasContext *s1, int op, int ot, int d, int c)
2063 {
2064 switch(op) {
2065 case OP_ROL:
2066 gen_rot_rm_im(s1, ot, d, c, 0);
2067 break;
2068 case OP_ROR:
2069 gen_rot_rm_im(s1, ot, d, c, 1);
2070 break;
2071 case OP_SHL:
2072 case OP_SHL1:
2073 gen_shift_rm_im(s1, ot, d, c, 0, 0);
2074 break;
2075 case OP_SHR:
2076 gen_shift_rm_im(s1, ot, d, c, 1, 0);
2077 break;
2078 case OP_SAR:
2079 gen_shift_rm_im(s1, ot, d, c, 1, 1);
2080 break;
2081 default:
2082 /* currently not optimized */
2083 gen_op_movl_T1_im(c);
2084 gen_shift(s1, op, ot, d, OR_TMP1);
2085 break;
2086 }
2087 }
2088
2089 static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm,
2090 int *reg_ptr, int *offset_ptr)
2091 {
2092 target_long disp;
2093 int havesib;
2094 int base;
2095 int index;
2096 int scale;
2097 int opreg;
2098 int mod, rm, code, override, must_add_seg;
2099
2100 override = s->override;
2101 must_add_seg = s->addseg;
2102 if (override >= 0)
2103 must_add_seg = 1;
2104 mod = (modrm >> 6) & 3;
2105 rm = modrm & 7;
2106
2107 if (s->aflag) {
2108
2109 havesib = 0;
2110 base = rm;
2111 index = 0;
2112 scale = 0;
2113
2114 if (base == 4) {
2115 havesib = 1;
2116 code = cpu_ldub_code(env, s->pc++);
2117 scale = (code >> 6) & 3;
2118 index = ((code >> 3) & 7) | REX_X(s);
2119 base = (code & 7);
2120 }
2121 base |= REX_B(s);
2122
2123 switch (mod) {
2124 case 0:
2125 if ((base & 7) == 5) {
2126 base = -1;
2127 disp = (int32_t)cpu_ldl_code(env, s->pc);
2128 s->pc += 4;
2129 if (CODE64(s) && !havesib) {
2130 disp += s->pc + s->rip_offset;
2131 }
2132 } else {
2133 disp = 0;
2134 }
2135 break;
2136 case 1:
2137 disp = (int8_t)cpu_ldub_code(env, s->pc++);
2138 break;
2139 default:
2140 case 2:
2141 disp = (int32_t)cpu_ldl_code(env, s->pc);
2142 s->pc += 4;
2143 break;
2144 }
2145
2146 if (base >= 0) {
2147 /* for correct popl handling with esp */
2148 if (base == 4 && s->popl_esp_hack)
2149 disp += s->popl_esp_hack;
2150 #ifdef TARGET_X86_64
2151 if (s->aflag == 2) {
2152 gen_op_movq_A0_reg(base);
2153 if (disp != 0) {
2154 gen_op_addq_A0_im(disp);
2155 }
2156 } else
2157 #endif
2158 {
2159 gen_op_movl_A0_reg(base);
2160 if (disp != 0)
2161 gen_op_addl_A0_im(disp);
2162 }
2163 } else {
2164 #ifdef TARGET_X86_64
2165 if (s->aflag == 2) {
2166 gen_op_movq_A0_im(disp);
2167 } else
2168 #endif
2169 {
2170 gen_op_movl_A0_im(disp);
2171 }
2172 }
2173 /* index == 4 means no index */
2174 if (havesib && (index != 4)) {
2175 #ifdef TARGET_X86_64
2176 if (s->aflag == 2) {
2177 gen_op_addq_A0_reg_sN(scale, index);
2178 } else
2179 #endif
2180 {
2181 gen_op_addl_A0_reg_sN(scale, index);
2182 }
2183 }
2184 if (must_add_seg) {
2185 if (override < 0) {
2186 if (base == R_EBP || base == R_ESP)
2187 override = R_SS;
2188 else
2189 override = R_DS;
2190 }
2191 #ifdef TARGET_X86_64
2192 if (s->aflag == 2) {
2193 gen_op_addq_A0_seg(override);
2194 } else
2195 #endif
2196 {
2197 gen_op_addl_A0_seg(s, override);
2198 }
2199 }
2200 } else {
2201 switch (mod) {
2202 case 0:
2203 if (rm == 6) {
2204 disp = cpu_lduw_code(env, s->pc);
2205 s->pc += 2;
2206 gen_op_movl_A0_im(disp);
2207 rm = 0; /* avoid SS override */
2208 goto no_rm;
2209 } else {
2210 disp = 0;
2211 }
2212 break;
2213 case 1:
2214 disp = (int8_t)cpu_ldub_code(env, s->pc++);
2215 break;
2216 default:
2217 case 2:
2218 disp = cpu_lduw_code(env, s->pc);
2219 s->pc += 2;
2220 break;
2221 }
2222 switch(rm) {
2223 case 0:
2224 gen_op_movl_A0_reg(R_EBX);
2225 gen_op_addl_A0_reg_sN(0, R_ESI);
2226 break;
2227 case 1:
2228 gen_op_movl_A0_reg(R_EBX);
2229 gen_op_addl_A0_reg_sN(0, R_EDI);
2230 break;
2231 case 2:
2232 gen_op_movl_A0_reg(R_EBP);
2233 gen_op_addl_A0_reg_sN(0, R_ESI);
2234 break;
2235 case 3:
2236 gen_op_movl_A0_reg(R_EBP);
2237 gen_op_addl_A0_reg_sN(0, R_EDI);
2238 break;
2239 case 4:
2240 gen_op_movl_A0_reg(R_ESI);
2241 break;
2242 case 5:
2243 gen_op_movl_A0_reg(R_EDI);
2244 break;
2245 case 6:
2246 gen_op_movl_A0_reg(R_EBP);
2247 break;
2248 default:
2249 case 7:
2250 gen_op_movl_A0_reg(R_EBX);
2251 break;
2252 }
2253 if (disp != 0)
2254 gen_op_addl_A0_im(disp);
2255 gen_op_andl_A0_ffff();
2256 no_rm:
2257 if (must_add_seg) {
2258 if (override < 0) {
2259 if (rm == 2 || rm == 3 || rm == 6)
2260 override = R_SS;
2261 else
2262 override = R_DS;
2263 }
2264 gen_op_addl_A0_seg(s, override);
2265 }
2266 }
2267
2268 opreg = OR_A0;
2269 disp = 0;
2270 *reg_ptr = opreg;
2271 *offset_ptr = disp;
2272 }
2273
2274 static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm)
2275 {
2276 int mod, rm, base, code;
2277
2278 mod = (modrm >> 6) & 3;
2279 if (mod == 3)
2280 return;
2281 rm = modrm & 7;
2282
2283 if (s->aflag) {
2284
2285 base = rm;
2286
2287 if (base == 4) {
2288 code = cpu_ldub_code(env, s->pc++);
2289 base = (code & 7);
2290 }
2291
2292 switch (mod) {
2293 case 0:
2294 if (base == 5) {
2295 s->pc += 4;
2296 }
2297 break;
2298 case 1:
2299 s->pc++;
2300 break;
2301 default:
2302 case 2:
2303 s->pc += 4;
2304 break;
2305 }
2306 } else {
2307 switch (mod) {
2308 case 0:
2309 if (rm == 6) {
2310 s->pc += 2;
2311 }
2312 break;
2313 case 1:
2314 s->pc++;
2315 break;
2316 default:
2317 case 2:
2318 s->pc += 2;
2319 break;
2320 }
2321 }
2322 }
2323
2324 /* used for LEA and MOV AX, mem */
2325 static void gen_add_A0_ds_seg(DisasContext *s)
2326 {
2327 int override, must_add_seg;
2328 must_add_seg = s->addseg;
2329 override = R_DS;
2330 if (s->override >= 0) {
2331 override = s->override;
2332 must_add_seg = 1;
2333 }
2334 if (must_add_seg) {
2335 #ifdef TARGET_X86_64
2336 if (CODE64(s)) {
2337 gen_op_addq_A0_seg(override);
2338 } else
2339 #endif
2340 {
2341 gen_op_addl_A0_seg(s, override);
2342 }
2343 }
2344 }
2345
2346 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2347 OR_TMP0 */
2348 static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm,
2349 int ot, int reg, int is_store)
2350 {
2351 int mod, rm, opreg, disp;
2352
2353 mod = (modrm >> 6) & 3;
2354 rm = (modrm & 7) | REX_B(s);
2355 if (mod == 3) {
2356 if (is_store) {
2357 if (reg != OR_TMP0)
2358 gen_op_mov_TN_reg(ot, 0, reg);
2359 gen_op_mov_reg_T0(ot, rm);
2360 } else {
2361 gen_op_mov_TN_reg(ot, 0, rm);
2362 if (reg != OR_TMP0)
2363 gen_op_mov_reg_T0(ot, reg);
2364 }
2365 } else {
2366 gen_lea_modrm(env, s, modrm, &opreg, &disp);
2367 if (is_store) {
2368 if (reg != OR_TMP0)
2369 gen_op_mov_TN_reg(ot, 0, reg);
2370 gen_op_st_T0_A0(ot + s->mem_index);
2371 } else {
2372 gen_op_ld_T0_A0(ot + s->mem_index);
2373 if (reg != OR_TMP0)
2374 gen_op_mov_reg_T0(ot, reg);
2375 }
2376 }
2377 }
2378
2379 static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, int ot)
2380 {
2381 uint32_t ret;
2382
2383 switch(ot) {
2384 case OT_BYTE:
2385 ret = cpu_ldub_code(env, s->pc);
2386 s->pc++;
2387 break;
2388 case OT_WORD:
2389 ret = cpu_lduw_code(env, s->pc);
2390 s->pc += 2;
2391 break;
2392 default:
2393 case OT_LONG:
2394 ret = cpu_ldl_code(env, s->pc);
2395 s->pc += 4;
2396 break;
2397 }
2398 return ret;
2399 }
2400
2401 static inline int insn_const_size(unsigned int ot)
2402 {
2403 if (ot <= OT_LONG)
2404 return 1 << ot;
2405 else
2406 return 4;
2407 }
2408
2409 static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong eip)
2410 {
2411 TranslationBlock *tb;
2412 target_ulong pc;
2413
2414 pc = s->cs_base + eip;
2415 tb = s->tb;
2416 /* NOTE: we handle the case where the TB spans two pages here */
2417 if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) ||
2418 (pc & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK)) {
2419 /* jump to same page: we can use a direct jump */
2420 tcg_gen_goto_tb(tb_num);
2421 gen_jmp_im(eip);
2422 tcg_gen_exit_tb((tcg_target_long)tb + tb_num);
2423 } else {
2424 /* jump to another page: currently not optimized */
2425 gen_jmp_im(eip);
2426 gen_eob(s);
2427 }
2428 }
2429
2430 static inline void gen_jcc(DisasContext *s, int b,
2431 target_ulong val, target_ulong next_eip)
2432 {
2433 int l1, l2;
2434
2435 if (s->jmp_opt) {
2436 l1 = gen_new_label();
2437 gen_jcc1(s, b, l1);
2438
2439 gen_goto_tb(s, 0, next_eip);
2440
2441 gen_set_label(l1);
2442 gen_goto_tb(s, 1, val);
2443 s->is_jmp = DISAS_TB_JUMP;
2444 } else {
2445 l1 = gen_new_label();
2446 l2 = gen_new_label();
2447 gen_jcc1(s, b, l1);
2448
2449 gen_jmp_im(next_eip);
2450 tcg_gen_br(l2);
2451
2452 gen_set_label(l1);
2453 gen_jmp_im(val);
2454 gen_set_label(l2);
2455 gen_eob(s);
2456 }
2457 }
2458
2459 static void gen_cmovcc1(CPUX86State *env, DisasContext *s, int ot, int b,
2460 int modrm, int reg)
2461 {
2462 CCPrepare cc;
2463
2464 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
2465
2466 cc = gen_prepare_cc(s, b, cpu_T[1]);
2467 if (cc.mask != -1) {
2468 TCGv t0 = tcg_temp_new();
2469 tcg_gen_andi_tl(t0, cc.reg, cc.mask);
2470 cc.reg = t0;
2471 }
2472 if (!cc.use_reg2) {
2473 cc.reg2 = tcg_const_tl(cc.imm);
2474 }
2475
2476 tcg_gen_movcond_tl(cc.cond, cpu_T[0], cc.reg, cc.reg2,
2477 cpu_T[0], cpu_regs[reg]);
2478 gen_op_mov_reg_T0(ot, reg);
2479
2480 if (cc.mask != -1) {
2481 tcg_temp_free(cc.reg);
2482 }
2483 if (!cc.use_reg2) {
2484 tcg_temp_free(cc.reg2);
2485 }
2486 }
2487
2488 static inline void gen_op_movl_T0_seg(int seg_reg)
2489 {
2490 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
2491 offsetof(CPUX86State,segs[seg_reg].selector));
2492 }
2493
2494 static inline void gen_op_movl_seg_T0_vm(int seg_reg)
2495 {
2496 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
2497 tcg_gen_st32_tl(cpu_T[0], cpu_env,
2498 offsetof(CPUX86State,segs[seg_reg].selector));
2499 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], 4);
2500 tcg_gen_st_tl(cpu_T[0], cpu_env,
2501 offsetof(CPUX86State,segs[seg_reg].base));
2502 }
2503
2504 /* move T0 to seg_reg and compute if the CPU state may change. Never
2505 call this function with seg_reg == R_CS */
2506 static void gen_movl_seg_T0(DisasContext *s, int seg_reg, target_ulong cur_eip)
2507 {
2508 if (s->pe && !s->vm86) {
2509 /* XXX: optimize by finding processor state dynamically */
2510 gen_update_cc_op(s);
2511 gen_jmp_im(cur_eip);
2512 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2513 gen_helper_load_seg(cpu_env, tcg_const_i32(seg_reg), cpu_tmp2_i32);
2514 /* abort translation because the addseg value may change or
2515 because ss32 may change. For R_SS, translation must always
2516 stop as a special handling must be done to disable hardware
2517 interrupts for the next instruction */
2518 if (seg_reg == R_SS || (s->code32 && seg_reg < R_FS))
2519 s->is_jmp = DISAS_TB_JUMP;
2520 } else {
2521 gen_op_movl_seg_T0_vm(seg_reg);
2522 if (seg_reg == R_SS)
2523 s->is_jmp = DISAS_TB_JUMP;
2524 }
2525 }
2526
2527 static inline int svm_is_rep(int prefixes)
2528 {
2529 return ((prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) ? 8 : 0);
2530 }
2531
2532 static inline void
2533 gen_svm_check_intercept_param(DisasContext *s, target_ulong pc_start,
2534 uint32_t type, uint64_t param)
2535 {
2536 /* no SVM activated; fast case */
2537 if (likely(!(s->flags & HF_SVMI_MASK)))
2538 return;
2539 gen_update_cc_op(s);
2540 gen_jmp_im(pc_start - s->cs_base);
2541 gen_helper_svm_check_intercept_param(cpu_env, tcg_const_i32(type),
2542 tcg_const_i64(param));
2543 }
2544
2545 static inline void
2546 gen_svm_check_intercept(DisasContext *s, target_ulong pc_start, uint64_t type)
2547 {
2548 gen_svm_check_intercept_param(s, pc_start, type, 0);
2549 }
2550
2551 static inline void gen_stack_update(DisasContext *s, int addend)
2552 {
2553 #ifdef TARGET_X86_64
2554 if (CODE64(s)) {
2555 gen_op_add_reg_im(2, R_ESP, addend);
2556 } else
2557 #endif
2558 if (s->ss32) {
2559 gen_op_add_reg_im(1, R_ESP, addend);
2560 } else {
2561 gen_op_add_reg_im(0, R_ESP, addend);
2562 }
2563 }
2564
2565 /* generate a push. It depends on ss32, addseg and dflag */
2566 static void gen_push_T0(DisasContext *s)
2567 {
2568 #ifdef TARGET_X86_64
2569 if (CODE64(s)) {
2570 gen_op_movq_A0_reg(R_ESP);
2571 if (s->dflag) {
2572 gen_op_addq_A0_im(-8);
2573 gen_op_st_T0_A0(OT_QUAD + s->mem_index);
2574 } else {
2575 gen_op_addq_A0_im(-2);
2576 gen_op_st_T0_A0(OT_WORD + s->mem_index);
2577 }
2578 gen_op_mov_reg_A0(2, R_ESP);
2579 } else
2580 #endif
2581 {
2582 gen_op_movl_A0_reg(R_ESP);
2583 if (!s->dflag)
2584 gen_op_addl_A0_im(-2);
2585 else
2586 gen_op_addl_A0_im(-4);
2587 if (s->ss32) {
2588 if (s->addseg) {
2589 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2590 gen_op_addl_A0_seg(s, R_SS);
2591 }
2592 } else {
2593 gen_op_andl_A0_ffff();
2594 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2595 gen_op_addl_A0_seg(s, R_SS);
2596 }
2597 gen_op_st_T0_A0(s->dflag + 1 + s->mem_index);
2598 if (s->ss32 && !s->addseg)
2599 gen_op_mov_reg_A0(1, R_ESP);
2600 else
2601 gen_op_mov_reg_T1(s->ss32 + 1, R_ESP);
2602 }
2603 }
2604
2605 /* generate a push. It depends on ss32, addseg and dflag */
2606 /* slower version for T1, only used for call Ev */
2607 static void gen_push_T1(DisasContext *s)
2608 {
2609 #ifdef TARGET_X86_64
2610 if (CODE64(s)) {
2611 gen_op_movq_A0_reg(R_ESP);
2612 if (s->dflag) {
2613 gen_op_addq_A0_im(-8);
2614 gen_op_st_T1_A0(OT_QUAD + s->mem_index);
2615 } else {
2616 gen_op_addq_A0_im(-2);
2617 gen_op_st_T0_A0(OT_WORD + s->mem_index);
2618 }
2619 gen_op_mov_reg_A0(2, R_ESP);
2620 } else
2621 #endif
2622 {
2623 gen_op_movl_A0_reg(R_ESP);
2624 if (!s->dflag)
2625 gen_op_addl_A0_im(-2);
2626 else
2627 gen_op_addl_A0_im(-4);
2628 if (s->ss32) {
2629 if (s->addseg) {
2630 gen_op_addl_A0_seg(s, R_SS);
2631 }
2632 } else {
2633 gen_op_andl_A0_ffff();
2634 gen_op_addl_A0_seg(s, R_SS);
2635 }
2636 gen_op_st_T1_A0(s->dflag + 1 + s->mem_index);
2637
2638 if (s->ss32 && !s->addseg)
2639 gen_op_mov_reg_A0(1, R_ESP);
2640 else
2641 gen_stack_update(s, (-2) << s->dflag);
2642 }
2643 }
2644
2645 /* two step pop is necessary for precise exceptions */
2646 static void gen_pop_T0(DisasContext *s)
2647 {
2648 #ifdef TARGET_X86_64
2649 if (CODE64(s)) {
2650 gen_op_movq_A0_reg(R_ESP);
2651 gen_op_ld_T0_A0((s->dflag ? OT_QUAD : OT_WORD) + s->mem_index);
2652 } else
2653 #endif
2654 {
2655 gen_op_movl_A0_reg(R_ESP);
2656 if (s->ss32) {
2657 if (s->addseg)
2658 gen_op_addl_A0_seg(s, R_SS);
2659 } else {
2660 gen_op_andl_A0_ffff();
2661 gen_op_addl_A0_seg(s, R_SS);
2662 }
2663 gen_op_ld_T0_A0(s->dflag + 1 + s->mem_index);
2664 }
2665 }
2666
2667 static void gen_pop_update(DisasContext *s)
2668 {
2669 #ifdef TARGET_X86_64
2670 if (CODE64(s) && s->dflag) {
2671 gen_stack_update(s, 8);
2672 } else
2673 #endif
2674 {
2675 gen_stack_update(s, 2 << s->dflag);
2676 }
2677 }
2678
2679 static void gen_stack_A0(DisasContext *s)
2680 {
2681 gen_op_movl_A0_reg(R_ESP);
2682 if (!s->ss32)
2683 gen_op_andl_A0_ffff();
2684 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2685 if (s->addseg)
2686 gen_op_addl_A0_seg(s, R_SS);
2687 }
2688
2689 /* NOTE: wrap around in 16 bit not fully handled */
2690 static void gen_pusha(DisasContext *s)
2691 {
2692 int i;
2693 gen_op_movl_A0_reg(R_ESP);
2694 gen_op_addl_A0_im(-16 << s->dflag);
2695 if (!s->ss32)
2696 gen_op_andl_A0_ffff();
2697 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2698 if (s->addseg)
2699 gen_op_addl_A0_seg(s, R_SS);
2700 for(i = 0;i < 8; i++) {
2701 gen_op_mov_TN_reg(OT_LONG, 0, 7 - i);
2702 gen_op_st_T0_A0(OT_WORD + s->dflag + s->mem_index);
2703 gen_op_addl_A0_im(2 << s->dflag);
2704 }
2705 gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2706 }
2707
2708 /* NOTE: wrap around in 16 bit not fully handled */
2709 static void gen_popa(DisasContext *s)
2710 {
2711 int i;
2712 gen_op_movl_A0_reg(R_ESP);
2713 if (!s->ss32)
2714 gen_op_andl_A0_ffff();
2715 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2716 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], 16 << s->dflag);
2717 if (s->addseg)
2718 gen_op_addl_A0_seg(s, R_SS);
2719 for(i = 0;i < 8; i++) {
2720 /* ESP is not reloaded */
2721 if (i != 3) {
2722 gen_op_ld_T0_A0(OT_WORD + s->dflag + s->mem_index);
2723 gen_op_mov_reg_T0(OT_WORD + s->dflag, 7 - i);
2724 }
2725 gen_op_addl_A0_im(2 << s->dflag);
2726 }
2727 gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2728 }
2729
2730 static void gen_enter(DisasContext *s, int esp_addend, int level)
2731 {
2732 int ot, opsize;
2733
2734 level &= 0x1f;
2735 #ifdef TARGET_X86_64
2736 if (CODE64(s)) {
2737 ot = s->dflag ? OT_QUAD : OT_WORD;
2738 opsize = 1 << ot;
2739
2740 gen_op_movl_A0_reg(R_ESP);
2741 gen_op_addq_A0_im(-opsize);
2742 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2743
2744 /* push bp */
2745 gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
2746 gen_op_st_T0_A0(ot + s->mem_index);
2747 if (level) {
2748 /* XXX: must save state */
2749 gen_helper_enter64_level(cpu_env, tcg_const_i32(level),
2750 tcg_const_i32((ot == OT_QUAD)),
2751 cpu_T[1]);
2752 }
2753 gen_op_mov_reg_T1(ot, R_EBP);
2754 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
2755 gen_op_mov_reg_T1(OT_QUAD, R_ESP);
2756 } else
2757 #endif
2758 {
2759 ot = s->dflag + OT_WORD;
2760 opsize = 2 << s->dflag;
2761
2762 gen_op_movl_A0_reg(R_ESP);
2763 gen_op_addl_A0_im(-opsize);
2764 if (!s->ss32)
2765 gen_op_andl_A0_ffff();
2766 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2767 if (s->addseg)
2768 gen_op_addl_A0_seg(s, R_SS);
2769 /* push bp */
2770 gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
2771 gen_op_st_T0_A0(ot + s->mem_index);
2772 if (level) {
2773 /* XXX: must save state */
2774 gen_helper_enter_level(cpu_env, tcg_const_i32(level),
2775 tcg_const_i32(s->dflag),
2776 cpu_T[1]);
2777 }
2778 gen_op_mov_reg_T1(ot, R_EBP);
2779 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
2780 gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2781 }
2782 }
2783
2784 static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip)
2785 {
2786 gen_update_cc_op(s);
2787 gen_jmp_im(cur_eip);
2788 gen_helper_raise_exception(cpu_env, tcg_const_i32(trapno));
2789 s->is_jmp = DISAS_TB_JUMP;
2790 }
2791
2792 /* an interrupt is different from an exception because of the
2793 privilege checks */
2794 static void gen_interrupt(DisasContext *s, int intno,
2795 target_ulong cur_eip, target_ulong next_eip)
2796 {
2797 gen_update_cc_op(s);
2798 gen_jmp_im(cur_eip);
2799 gen_helper_raise_interrupt(cpu_env, tcg_const_i32(intno),
2800 tcg_const_i32(next_eip - cur_eip));
2801 s->is_jmp = DISAS_TB_JUMP;
2802 }
2803
2804 static void gen_debug(DisasContext *s, target_ulong cur_eip)
2805 {
2806 gen_update_cc_op(s);
2807 gen_jmp_im(cur_eip);
2808 gen_helper_debug(cpu_env);
2809 s->is_jmp = DISAS_TB_JUMP;
2810 }
2811
2812 /* generate a generic end of block. Trace exception is also generated
2813 if needed */
2814 static void gen_eob(DisasContext *s)
2815 {
2816 gen_update_cc_op(s);
2817 if (s->tb->flags & HF_INHIBIT_IRQ_MASK) {
2818 gen_helper_reset_inhibit_irq(cpu_env);
2819 }
2820 if (s->tb->flags & HF_RF_MASK) {
2821 gen_helper_reset_rf(cpu_env);
2822 }
2823 if (s->singlestep_enabled) {
2824 gen_helper_debug(cpu_env);
2825 } else if (s->tf) {
2826 gen_helper_single_step(cpu_env);
2827 } else {
2828 tcg_gen_exit_tb(0);
2829 }
2830 s->is_jmp = DISAS_TB_JUMP;
2831 }
2832
2833 /* generate a jump to eip. No segment change must happen before as a
2834 direct call to the next block may occur */
2835 static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num)
2836 {
2837 gen_update_cc_op(s);
2838 set_cc_op(s, CC_OP_DYNAMIC);
2839 if (s->jmp_opt) {
2840 gen_goto_tb(s, tb_num, eip);
2841 s->is_jmp = DISAS_TB_JUMP;
2842 } else {
2843 gen_jmp_im(eip);
2844 gen_eob(s);
2845 }
2846 }
2847
2848 static void gen_jmp(DisasContext *s, target_ulong eip)
2849 {
2850 gen_jmp_tb(s, eip, 0);
2851 }
2852
2853 static inline void gen_ldq_env_A0(int idx, int offset)
2854 {
2855 int mem_index = (idx >> 2) - 1;
2856 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, mem_index);
2857 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset);
2858 }
2859
2860 static inline void gen_stq_env_A0(int idx, int offset)
2861 {
2862 int mem_index = (idx >> 2) - 1;
2863 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset);
2864 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, mem_index);
2865 }
2866
2867 static inline void gen_ldo_env_A0(int idx, int offset)
2868 {
2869 int mem_index = (idx >> 2) - 1;
2870 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, mem_index);
2871 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
2872 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
2873 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_tmp0, mem_index);
2874 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
2875 }
2876
2877 static inline void gen_sto_env_A0(int idx, int offset)
2878 {
2879 int mem_index = (idx >> 2) - 1;
2880 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
2881 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, mem_index);
2882 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
2883 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
2884 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_tmp0, mem_index);
2885 }
2886
2887 static inline void gen_op_movo(int d_offset, int s_offset)
2888 {
2889 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2890 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2891 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + 8);
2892 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + 8);
2893 }
2894
2895 static inline void gen_op_movq(int d_offset, int s_offset)
2896 {
2897 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2898 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2899 }
2900
2901 static inline void gen_op_movl(int d_offset, int s_offset)
2902 {
2903 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env, s_offset);
2904 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, d_offset);
2905 }
2906
2907 static inline void gen_op_movq_env_0(int d_offset)
2908 {
2909 tcg_gen_movi_i64(cpu_tmp1_i64, 0);
2910 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2911 }
2912
2913 typedef void (*SSEFunc_i_ep)(TCGv_i32 val, TCGv_ptr env, TCGv_ptr reg);
2914 typedef void (*SSEFunc_l_ep)(TCGv_i64 val, TCGv_ptr env, TCGv_ptr reg);
2915 typedef void (*SSEFunc_0_epi)(TCGv_ptr env, TCGv_ptr reg, TCGv_i32 val);
2916 typedef void (*SSEFunc_0_epl)(TCGv_ptr env, TCGv_ptr reg, TCGv_i64 val);
2917 typedef void (*SSEFunc_0_epp)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b);
2918 typedef void (*SSEFunc_0_eppi)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
2919 TCGv_i32 val);
2920 typedef void (*SSEFunc_0_ppi)(TCGv_ptr reg_a, TCGv_ptr reg_b, TCGv_i32 val);
2921 typedef void (*SSEFunc_0_eppt)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
2922 TCGv val);
2923
2924 #define SSE_SPECIAL ((void *)1)
2925 #define SSE_DUMMY ((void *)2)
2926
2927 #define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
2928 #define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
2929 gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
2930
2931 static const SSEFunc_0_epp sse_op_table1[256][4] = {
2932 /* 3DNow! extensions */
2933 [0x0e] = { SSE_DUMMY }, /* femms */
2934 [0x0f] = { SSE_DUMMY }, /* pf... */
2935 /* pure SSE operations */
2936 [0x10] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2937 [0x11] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2938 [0x12] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd, movsldup, movddup */
2939 [0x13] = { SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd */
2940 [0x14] = { gen_helper_punpckldq_xmm, gen_helper_punpcklqdq_xmm },
2941 [0x15] = { gen_helper_punpckhdq_xmm, gen_helper_punpckhqdq_xmm },
2942 [0x16] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd, movshdup */
2943 [0x17] = { SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd */
2944
2945 [0x28] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2946 [0x29] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2947 [0x2a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
2948 [0x2b] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movntps, movntpd, movntss, movntsd */
2949 [0x2c] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
2950 [0x2d] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
2951 [0x2e] = { gen_helper_ucomiss, gen_helper_ucomisd },
2952 [0x2f] = { gen_helper_comiss, gen_helper_comisd },
2953 [0x50] = { SSE_SPECIAL, SSE_SPECIAL }, /* movmskps, movmskpd */
2954 [0x51] = SSE_FOP(sqrt),
2955 [0x52] = { gen_helper_rsqrtps, NULL, gen_helper_rsqrtss, NULL },
2956 [0x53] = { gen_helper_rcpps, NULL, gen_helper_rcpss, NULL },
2957 [0x54] = { gen_helper_pand_xmm, gen_helper_pand_xmm }, /* andps, andpd */
2958 [0x55] = { gen_helper_pandn_xmm, gen_helper_pandn_xmm }, /* andnps, andnpd */
2959 [0x56] = { gen_helper_por_xmm, gen_helper_por_xmm }, /* orps, orpd */
2960 [0x57] = { gen_helper_pxor_xmm, gen_helper_pxor_xmm }, /* xorps, xorpd */
2961 [0x58] = SSE_FOP(add),
2962 [0x59] = SSE_FOP(mul),
2963 [0x5a] = { gen_helper_cvtps2pd, gen_helper_cvtpd2ps,
2964 gen_helper_cvtss2sd, gen_helper_cvtsd2ss },
2965 [0x5b] = { gen_helper_cvtdq2ps, gen_helper_cvtps2dq, gen_helper_cvttps2dq },
2966 [0x5c] = SSE_FOP(sub),
2967 [0x5d] = SSE_FOP(min),
2968 [0x5e] = SSE_FOP(div),
2969 [0x5f] = SSE_FOP(max),
2970
2971 [0xc2] = SSE_FOP(cmpeq),
2972 [0xc6] = { (SSEFunc_0_epp)gen_helper_shufps,
2973 (SSEFunc_0_epp)gen_helper_shufpd }, /* XXX: casts */
2974
2975 [0x38] = { SSE_SPECIAL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* SSSE3/SSE4 */
2976 [0x3a] = { SSE_SPECIAL, SSE_SPECIAL }, /* SSSE3/SSE4 */
2977
2978 /* MMX ops and their SSE extensions */
2979 [0x60] = MMX_OP2(punpcklbw),
2980 [0x61] = MMX_OP2(punpcklwd),
2981 [0x62] = MMX_OP2(punpckldq),
2982 [0x63] = MMX_OP2(packsswb),
2983 [0x64] = MMX_OP2(pcmpgtb),
2984 [0x65] = MMX_OP2(pcmpgtw),
2985 [0x66] = MMX_OP2(pcmpgtl),
2986 [0x67] = MMX_OP2(packuswb),
2987 [0x68] = MMX_OP2(punpckhbw),
2988 [0x69] = MMX_OP2(punpckhwd),
2989 [0x6a] = MMX_OP2(punpckhdq),
2990 [0x6b] = MMX_OP2(packssdw),
2991 [0x6c] = { NULL, gen_helper_punpcklqdq_xmm },
2992 [0x6d] = { NULL, gen_helper_punpckhqdq_xmm },
2993 [0x6e] = { SSE_SPECIAL, SSE_SPECIAL }, /* movd mm, ea */
2994 [0x6f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, , movqdu */
2995 [0x70] = { (SSEFunc_0_epp)gen_helper_pshufw_mmx,
2996 (SSEFunc_0_epp)gen_helper_pshufd_xmm,
2997 (SSEFunc_0_epp)gen_helper_pshufhw_xmm,
2998 (SSEFunc_0_epp)gen_helper_pshuflw_xmm }, /* XXX: casts */
2999 [0x71] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftw */
3000 [0x72] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftd */
3001 [0x73] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftq */
3002 [0x74] = MMX_OP2(pcmpeqb),
3003 [0x75] = MMX_OP2(pcmpeqw),
3004 [0x76] = MMX_OP2(pcmpeql),
3005 [0x77] = { SSE_DUMMY }, /* emms */
3006 [0x78] = { NULL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* extrq_i, insertq_i */
3007 [0x79] = { NULL, gen_helper_extrq_r, NULL, gen_helper_insertq_r },
3008 [0x7c] = { NULL, gen_helper_haddpd, NULL, gen_helper_haddps },
3009 [0x7d] = { NULL, gen_helper_hsubpd, NULL, gen_helper_hsubps },
3010 [0x7e] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movd, movd, , movq */
3011 [0x7f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, movdqu */
3012 [0xc4] = { SSE_SPECIAL, SSE_SPECIAL }, /* pinsrw */
3013 [0xc5] = { SSE_SPECIAL, SSE_SPECIAL }, /* pextrw */
3014 [0xd0] = { NULL, gen_helper_addsubpd, NULL, gen_helper_addsubps },
3015 [0xd1] = MMX_OP2(psrlw),
3016 [0xd2] = MMX_OP2(psrld),
3017 [0xd3] = MMX_OP2(psrlq),
3018 [0xd4] = MMX_OP2(paddq),
3019 [0xd5] = MMX_OP2(pmullw),
3020 [0xd6] = { NULL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
3021 [0xd7] = { SSE_SPECIAL, SSE_SPECIAL }, /* pmovmskb */
3022 [0xd8] = MMX_OP2(psubusb),
3023 [0xd9] = MMX_OP2(psubusw),
3024 [0xda] = MMX_OP2(pminub),
3025 [0xdb] = MMX_OP2(pand),
3026 [0xdc] = MMX_OP2(paddusb),
3027 [0xdd] = MMX_OP2(paddusw),
3028 [0xde] = MMX_OP2(pmaxub),
3029 [0xdf] = MMX_OP2(pandn),
3030 [0xe0] = MMX_OP2(pavgb),
3031 [0xe1] = MMX_OP2(psraw),
3032 [0xe2] = MMX_OP2(psrad),
3033 [0xe3] = MMX_OP2(pavgw),
3034 [0xe4] = MMX_OP2(pmulhuw),
3035 [0xe5] = MMX_OP2(pmulhw),
3036 [0xe6] = { NULL, gen_helper_cvttpd2dq, gen_helper_cvtdq2pd, gen_helper_cvtpd2dq },
3037 [0xe7] = { SSE_SPECIAL , SSE_SPECIAL }, /* movntq, movntq */
3038 [0xe8] = MMX_OP2(psubsb),
3039 [0xe9] = MMX_OP2(psubsw),
3040 [0xea] = MMX_OP2(pminsw),
3041 [0xeb] = MMX_OP2(por),
3042 [0xec] = MMX_OP2(paddsb),
3043 [0xed] = MMX_OP2(paddsw),
3044 [0xee] = MMX_OP2(pmaxsw),
3045 [0xef] = MMX_OP2(pxor),
3046 [0xf0] = { NULL, NULL, NULL, SSE_SPECIAL }, /* lddqu */
3047 [0xf1] = MMX_OP2(psllw),
3048 [0xf2] = MMX_OP2(pslld),
3049 [0xf3] = MMX_OP2(psllq),
3050 [0xf4] = MMX_OP2(pmuludq),
3051 [0xf5] = MMX_OP2(pmaddwd),
3052 [0xf6] = MMX_OP2(psadbw),
3053 [0xf7] = { (SSEFunc_0_epp)gen_helper_maskmov_mmx,
3054 (SSEFunc_0_epp)gen_helper_maskmov_xmm }, /* XXX: casts */
3055 [0xf8] = MMX_OP2(psubb),
3056 [0xf9] = MMX_OP2(psubw),
3057 [0xfa] = MMX_OP2(psubl),
3058 [0xfb] = MMX_OP2(psubq),
3059 [0xfc] = MMX_OP2(paddb),
3060 [0xfd] = MMX_OP2(paddw),
3061 [0xfe] = MMX_OP2(paddl),
3062 };
3063
3064 static const SSEFunc_0_epp sse_op_table2[3 * 8][2] = {
3065 [0 + 2] = MMX_OP2(psrlw),
3066 [0 + 4] = MMX_OP2(psraw),
3067 [0 + 6] = MMX_OP2(psllw),
3068 [8 + 2] = MMX_OP2(psrld),
3069 [8 + 4] = MMX_OP2(psrad),
3070 [8 + 6] = MMX_OP2(pslld),
3071 [16 + 2] = MMX_OP2(psrlq),
3072 [16 + 3] = { NULL, gen_helper_psrldq_xmm },
3073 [16 + 6] = MMX_OP2(psllq),
3074 [16 + 7] = { NULL, gen_helper_pslldq_xmm },
3075 };
3076
3077 static const SSEFunc_0_epi sse_op_table3ai[] = {
3078 gen_helper_cvtsi2ss,
3079 gen_helper_cvtsi2sd
3080 };
3081
3082 #ifdef TARGET_X86_64
3083 static const SSEFunc_0_epl sse_op_table3aq[] = {
3084 gen_helper_cvtsq2ss,
3085 gen_helper_cvtsq2sd
3086 };
3087 #endif
3088
3089 static const SSEFunc_i_ep sse_op_table3bi[] = {
3090 gen_helper_cvttss2si,
3091 gen_helper_cvtss2si,
3092 gen_helper_cvttsd2si,
3093 gen_helper_cvtsd2si
3094 };
3095
3096 #ifdef TARGET_X86_64
3097 static const SSEFunc_l_ep sse_op_table3bq[] = {
3098 gen_helper_cvttss2sq,
3099 gen_helper_cvtss2sq,
3100 gen_helper_cvttsd2sq,
3101 gen_helper_cvtsd2sq
3102 };
3103 #endif
3104
3105 static const SSEFunc_0_epp sse_op_table4[8][4] = {
3106 SSE_FOP(cmpeq),
3107 SSE_FOP(cmplt),
3108 SSE_FOP(cmple),
3109 SSE_FOP(cmpunord),
3110 SSE_FOP(cmpneq),
3111 SSE_FOP(cmpnlt),
3112 SSE_FOP(cmpnle),
3113 SSE_FOP(cmpord),
3114 };
3115
3116 static const SSEFunc_0_epp sse_op_table5[256] = {
3117 [0x0c] = gen_helper_pi2fw,
3118 [0x0d] = gen_helper_pi2fd,
3119 [0x1c] = gen_helper_pf2iw,
3120 [0x1d] = gen_helper_pf2id,
3121 [0x8a] = gen_helper_pfnacc,
3122 [0x8e] = gen_helper_pfpnacc,
3123 [0x90] = gen_helper_pfcmpge,
3124 [0x94] = gen_helper_pfmin,
3125 [0x96] = gen_helper_pfrcp,
3126 [0x97] = gen_helper_pfrsqrt,
3127 [0x9a] = gen_helper_pfsub,
3128 [0x9e] = gen_helper_pfadd,
3129 [0xa0] = gen_helper_pfcmpgt,
3130 [0xa4] = gen_helper_pfmax,
3131 [0xa6] = gen_helper_movq, /* pfrcpit1; no need to actually increase precision */
3132 [0xa7] = gen_helper_movq, /* pfrsqit1 */
3133 [0xaa] = gen_helper_pfsubr,
3134 [0xae] = gen_helper_pfacc,
3135 [0xb0] = gen_helper_pfcmpeq,
3136 [0xb4] = gen_helper_pfmul,
3137 [0xb6] = gen_helper_movq, /* pfrcpit2 */
3138 [0xb7] = gen_helper_pmulhrw_mmx,
3139 [0xbb] = gen_helper_pswapd,
3140 [0xbf] = gen_helper_pavgb_mmx /* pavgusb */
3141 };
3142
3143 struct SSEOpHelper_epp {
3144 SSEFunc_0_epp op[2];
3145 uint32_t ext_mask;
3146 };
3147
3148 struct SSEOpHelper_eppi {
3149 SSEFunc_0_eppi op[2];
3150 uint32_t ext_mask;
3151 };
3152
3153 #define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
3154 #define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
3155 #define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
3156 #define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
3157
3158 static const struct SSEOpHelper_epp sse_op_table6[256] = {
3159 [0x00] = SSSE3_OP(pshufb),
3160 [0x01] = SSSE3_OP(phaddw),
3161 [0x02] = SSSE3_OP(phaddd),
3162 [0x03] = SSSE3_OP(phaddsw),
3163 [0x04] = SSSE3_OP(pmaddubsw),
3164 [0x05] = SSSE3_OP(phsubw),
3165 [0x06] = SSSE3_OP(phsubd),
3166 [0x07] = SSSE3_OP(phsubsw),
3167 [0x08] = SSSE3_OP(psignb),
3168 [0x09] = SSSE3_OP(psignw),
3169 [0x0a] = SSSE3_OP(psignd),
3170 [0x0b] = SSSE3_OP(pmulhrsw),
3171 [0x10] = SSE41_OP(pblendvb),
3172 [0x14] = SSE41_OP(blendvps),
3173 [0x15] = SSE41_OP(blendvpd),
3174 [0x17] = SSE41_OP(ptest),
3175 [0x1c] = SSSE3_OP(pabsb),
3176 [0x1d] = SSSE3_OP(pabsw),
3177 [0x1e] = SSSE3_OP(pabsd),
3178 [0x20] = SSE41_OP(pmovsxbw),
3179 [0x21] = SSE41_OP(pmovsxbd),
3180 [0x22] = SSE41_OP(pmovsxbq),
3181 [0x23] = SSE41_OP(pmovsxwd),
3182 [0x24] = SSE41_OP(pmovsxwq),
3183 [0x25] = SSE41_OP(pmovsxdq),
3184 [0x28] = SSE41_OP(pmuldq),
3185 [0x29] = SSE41_OP(pcmpeqq),
3186 [0x2a] = SSE41_SPECIAL, /* movntqda */
3187 [0x2b] = SSE41_OP(packusdw),
3188 [0x30] = SSE41_OP(pmovzxbw),
3189 [0x31] = SSE41_OP(pmovzxbd),
3190 [0x32] = SSE41_OP(pmovzxbq),
3191 [0x33] = SSE41_OP(pmovzxwd),
3192 [0x34] = SSE41_OP(pmovzxwq),
3193 [0x35] = SSE41_OP(pmovzxdq),
3194 [0x37] = SSE42_OP(pcmpgtq),
3195 [0x38] = SSE41_OP(pminsb),
3196 [0x39] = SSE41_OP(pminsd),
3197 [0x3a] = SSE41_OP(pminuw),
3198 [0x3b] = SSE41_OP(pminud),
3199 [0x3c] = SSE41_OP(pmaxsb),
3200 [0x3d] = SSE41_OP(pmaxsd),
3201 [0x3e] = SSE41_OP(pmaxuw),
3202 [0x3f] = SSE41_OP(pmaxud),
3203 [0x40] = SSE41_OP(pmulld),
3204 [0x41] = SSE41_OP(phminposuw),
3205 };
3206
3207 static const struct SSEOpHelper_eppi sse_op_table7[256] = {
3208 [0x08] = SSE41_OP(roundps),
3209 [0x09] = SSE41_OP(roundpd),
3210 [0x0a] = SSE41_OP(roundss),
3211 [0x0b] = SSE41_OP(roundsd),
3212 [0x0c] = SSE41_OP(blendps),
3213 [0x0d] = SSE41_OP(blendpd),
3214 [0x0e] = SSE41_OP(pblendw),
3215 [0x0f] = SSSE3_OP(palignr),
3216 [0x14] = SSE41_SPECIAL, /* pextrb */
3217 [0x15] = SSE41_SPECIAL, /* pextrw */
3218 [0x16] = SSE41_SPECIAL, /* pextrd/pextrq */
3219 [0x17] = SSE41_SPECIAL, /* extractps */
3220 [0x20] = SSE41_SPECIAL, /* pinsrb */
3221 [0x21] = SSE41_SPECIAL, /* insertps */
3222 [0x22] = SSE41_SPECIAL, /* pinsrd/pinsrq */
3223 [0x40] = SSE41_OP(dpps),
3224 [0x41] = SSE41_OP(dppd),
3225 [0x42] = SSE41_OP(mpsadbw),
3226 [0x60] = SSE42_OP(pcmpestrm),
3227 [0x61] = SSE42_OP(pcmpestri),
3228 [0x62] = SSE42_OP(pcmpistrm),
3229 [0x63] = SSE42_OP(pcmpistri),
3230 };
3231
3232 static void gen_sse(CPUX86State *env, DisasContext *s, int b,
3233 target_ulong pc_start, int rex_r)
3234 {
3235 int b1, op1_offset, op2_offset, is_xmm, val, ot;
3236 int modrm, mod, rm, reg, reg_addr, offset_addr;
3237 SSEFunc_0_epp sse_fn_epp;
3238 SSEFunc_0_eppi sse_fn_eppi;
3239 SSEFunc_0_ppi sse_fn_ppi;
3240 SSEFunc_0_eppt sse_fn_eppt;
3241
3242 b &= 0xff;
3243 if (s->prefix & PREFIX_DATA)
3244 b1 = 1;
3245 else if (s->prefix & PREFIX_REPZ)
3246 b1 = 2;
3247 else if (s->prefix & PREFIX_REPNZ)
3248 b1 = 3;
3249 else
3250 b1 = 0;
3251 sse_fn_epp = sse_op_table1[b][b1];
3252 if (!sse_fn_epp) {
3253 goto illegal_op;
3254 }
3255 if ((b <= 0x5f && b >= 0x10) || b == 0xc6 || b == 0xc2) {
3256 is_xmm = 1;
3257 } else {
3258 if (b1 == 0) {
3259 /* MMX case */
3260 is_xmm = 0;
3261 } else {
3262 is_xmm = 1;
3263 }
3264 }
3265 /* simple MMX/SSE operation */
3266 if (s->flags & HF_TS_MASK) {
3267 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
3268 return;
3269 }
3270 if (s->flags & HF_EM_MASK) {
3271 illegal_op:
3272 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
3273 return;
3274 }
3275 if (is_xmm && !(s->flags & HF_OSFXSR_MASK))
3276 if ((b != 0x38 && b != 0x3a) || (s->prefix & PREFIX_DATA))
3277 goto illegal_op;
3278 if (b == 0x0e) {
3279 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
3280 goto illegal_op;
3281 /* femms */
3282 gen_helper_emms(cpu_env);
3283 return;
3284 }
3285 if (b == 0x77) {
3286 /* emms */
3287 gen_helper_emms(cpu_env);
3288 return;
3289 }
3290 /* prepare MMX state (XXX: optimize by storing fptt and fptags in
3291 the static cpu state) */
3292 if (!is_xmm) {
3293 gen_helper_enter_mmx(cpu_env);
3294 }
3295
3296 modrm = cpu_ldub_code(env, s->pc++);
3297 reg = ((modrm >> 3) & 7);
3298 if (is_xmm)
3299 reg |= rex_r;
3300 mod = (modrm >> 6) & 3;
3301 if (sse_fn_epp == SSE_SPECIAL) {
3302 b |= (b1 << 8);
3303 switch(b) {
3304 case 0x0e7: /* movntq */
3305 if (mod == 3)
3306 goto illegal_op;
3307 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3308 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
3309 break;
3310 case 0x1e7: /* movntdq */
3311 case 0x02b: /* movntps */
3312 case 0x12b: /* movntps */
3313 if (mod == 3)
3314 goto illegal_op;
3315 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3316 gen_sto_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3317 break;
3318 case 0x3f0: /* lddqu */
3319 if (mod == 3)
3320 goto illegal_op;
3321 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3322 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3323 break;
3324 case 0x22b: /* movntss */
3325 case 0x32b: /* movntsd */
3326 if (mod == 3)
3327 goto illegal_op;
3328 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3329 if (b1 & 1) {
3330 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,
3331 xmm_regs[reg]));
3332 } else {
3333 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3334 xmm_regs[reg].XMM_L(0)));
3335 gen_op_st_T0_A0(OT_LONG + s->mem_index);
3336 }
3337 break;
3338 case 0x6e: /* movd mm, ea */
3339 #ifdef TARGET_X86_64
3340 if (s->dflag == 2) {
3341 gen_ldst_modrm(env, s, modrm, OT_QUAD, OR_TMP0, 0);
3342 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,fpregs[reg].mmx));
3343 } else
3344 #endif
3345 {
3346 gen_ldst_modrm(env, s, modrm, OT_LONG, OR_TMP0, 0);
3347 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3348 offsetof(CPUX86State,fpregs[reg].mmx));
3349 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3350 gen_helper_movl_mm_T0_mmx(cpu_ptr0, cpu_tmp2_i32);
3351 }
3352 break;
3353 case 0x16e: /* movd xmm, ea */
3354 #ifdef TARGET_X86_64
3355 if (s->dflag == 2) {
3356 gen_ldst_modrm(env, s, modrm, OT_QUAD, OR_TMP0, 0);
3357 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3358 offsetof(CPUX86State,xmm_regs[reg]));
3359 gen_helper_movq_mm_T0_xmm(cpu_ptr0, cpu_T[0]);
3360 } else
3361 #endif
3362 {
3363 gen_ldst_modrm(env, s, modrm, OT_LONG, OR_TMP0, 0);
3364 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3365 offsetof(CPUX86State,xmm_regs[reg]));
3366 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3367 gen_helper_movl_mm_T0_xmm(cpu_ptr0, cpu_tmp2_i32);
3368 }
3369 break;
3370 case 0x6f: /* movq mm, ea */
3371 if (mod != 3) {
3372 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3373 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
3374 } else {
3375 rm = (modrm & 7);
3376 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
3377 offsetof(CPUX86State,fpregs[rm].mmx));
3378 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
3379 offsetof(CPUX86State,fpregs[reg].mmx));
3380 }
3381 break;
3382 case 0x010: /* movups */
3383 case 0x110: /* movupd */
3384 case 0x028: /* movaps */
3385 case 0x128: /* movapd */
3386 case 0x16f: /* movdqa xmm, ea */
3387 case 0x26f: /* movdqu xmm, ea */
3388 if (mod != 3) {
3389 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3390 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3391 } else {
3392 rm = (modrm & 7) | REX_B(s);
3393 gen_op_movo(offsetof(CPUX86State,xmm_regs[reg]),
3394 offsetof(CPUX86State,xmm_regs[rm]));
3395 }
3396 break;
3397 case 0x210: /* movss xmm, ea */
3398 if (mod != 3) {
3399 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3400 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
3401 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3402 gen_op_movl_T0_0();
3403 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
3404 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3405 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3406 } else {
3407 rm = (modrm & 7) | REX_B(s);
3408 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3409 offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
3410 }
3411 break;
3412 case 0x310: /* movsd xmm, ea */
3413 if (mod != 3) {
3414 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3415 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3416 gen_op_movl_T0_0();
3417 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3418 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3419 } else {
3420 rm = (modrm & 7) | REX_B(s);
3421 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3422 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3423 }
3424 break;
3425 case 0x012: /* movlps */
3426 case 0x112: /* movlpd */
3427 if (mod != 3) {
3428 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3429 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3430 } else {
3431 /* movhlps */
3432 rm = (modrm & 7) | REX_B(s);
3433 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3434 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
3435 }
3436 break;
3437 case 0x212: /* movsldup */
3438 if (mod != 3) {
3439 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3440 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3441 } else {
3442 rm = (modrm & 7) | REX_B(s);
3443 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3444 offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
3445 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
3446 offsetof(CPUX86State,xmm_regs[rm].XMM_L(2)));
3447 }
3448 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
3449 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3450 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
3451 offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3452 break;
3453 case 0x312: /* movddup */
3454 if (mod != 3) {
3455 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3456 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3457 } else {
3458 rm = (modrm & 7) | REX_B(s);
3459 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3460 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3461 }
3462 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
3463 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3464 break;
3465 case 0x016: /* movhps */
3466 case 0x116: /* movhpd */
3467 if (mod != 3) {
3468 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3469 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3470 } else {
3471 /* movlhps */
3472 rm = (modrm & 7) | REX_B(s);
3473 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
3474 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3475 }
3476 break;
3477 case 0x216: /* movshdup */
3478 if (mod != 3) {
3479 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3480 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3481 } else {
3482 rm = (modrm & 7) | REX_B(s);
3483 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
3484 offsetof(CPUX86State,xmm_regs[rm].XMM_L(1)));
3485 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
3486 offsetof(CPUX86State,xmm_regs[rm].XMM_L(3)));
3487 }
3488 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3489 offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
3490 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
3491 offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3492 break;
3493 case 0x178:
3494 case 0x378:
3495 {
3496 int bit_index, field_length;
3497
3498 if (b1 == 1 && reg != 0)
3499 goto illegal_op;
3500 field_length = cpu_ldub_code(env, s->pc++) & 0x3F;
3501 bit_index = cpu_ldub_code(env, s->pc++) & 0x3F;
3502 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3503 offsetof(CPUX86State,xmm_regs[reg]));
3504 if (b1 == 1)
3505 gen_helper_extrq_i(cpu_env, cpu_ptr0,
3506 tcg_const_i32(bit_index),
3507 tcg_const_i32(field_length));
3508 else
3509 gen_helper_insertq_i(cpu_env, cpu_ptr0,
3510 tcg_const_i32(bit_index),
3511 tcg_const_i32(field_length));
3512 }
3513 break;
3514 case 0x7e: /* movd ea, mm */
3515 #ifdef TARGET_X86_64
3516 if (s->dflag == 2) {
3517 tcg_gen_ld_i64(cpu_T[0], cpu_env,
3518 offsetof(CPUX86State,fpregs[reg].mmx));
3519 gen_ldst_modrm(env, s, modrm, OT_QUAD, OR_TMP0, 1);
3520 } else
3521 #endif
3522 {
3523 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
3524 offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0)));
3525 gen_ldst_modrm(env, s, modrm, OT_LONG, OR_TMP0, 1);
3526 }
3527 break;
3528 case 0x17e: /* movd ea, xmm */
3529 #ifdef TARGET_X86_64
3530 if (s->dflag == 2) {
3531 tcg_gen_ld_i64(cpu_T[0], cpu_env,
3532 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3533 gen_ldst_modrm(env, s, modrm, OT_QUAD, OR_TMP0, 1);
3534 } else
3535 #endif
3536 {
3537 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
3538 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3539 gen_ldst_modrm(env, s, modrm, OT_LONG, OR_TMP0, 1);
3540 }
3541 break;
3542 case 0x27e: /* movq xmm, ea */
3543 if (mod != 3) {
3544 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3545 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3546 } else {
3547 rm = (modrm & 7) | REX_B(s);
3548 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3549 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3550 }
3551 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3552 break;
3553 case 0x7f: /* movq ea, mm */
3554 if (mod != 3) {
3555 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3556 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
3557 } else {
3558 rm = (modrm & 7);
3559 gen_op_movq(offsetof(CPUX86State,fpregs[rm].mmx),
3560 offsetof(CPUX86State,fpregs[reg].mmx));
3561 }
3562 break;
3563 case 0x011: /* movups */
3564 case 0x111: /* movupd */
3565 case 0x029: /* movaps */
3566 case 0x129: /* movapd */
3567 case 0x17f: /* movdqa ea, xmm */
3568 case 0x27f: /* movdqu ea, xmm */
3569 if (mod != 3) {
3570 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3571 gen_sto_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3572 } else {
3573 rm = (modrm & 7) | REX_B(s);
3574 gen_op_movo(offsetof(CPUX86State,xmm_regs[rm]),
3575 offsetof(CPUX86State,xmm_regs[reg]));
3576 }
3577 break;
3578 case 0x211: /* movss ea, xmm */
3579 if (mod != 3) {
3580 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3581 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3582 gen_op_st_T0_A0(OT_LONG + s->mem_index);
3583 } else {
3584 rm = (modrm & 7) | REX_B(s);
3585 gen_op_movl(offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)),
3586 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3587 }
3588 break;
3589 case 0x311: /* movsd ea, xmm */
3590 if (mod != 3) {
3591 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3592 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3593 } else {
3594 rm = (modrm & 7) | REX_B(s);
3595 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
3596 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3597 }
3598 break;
3599 case 0x013: /* movlps */
3600 case 0x113: /* movlpd */
3601 if (mod != 3) {
3602 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3603 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3604 } else {
3605 goto illegal_op;
3606 }
3607 break;
3608 case 0x017: /* movhps */
3609 case 0x117: /* movhpd */
3610 if (mod != 3) {
3611 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3612 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3613 } else {
3614 goto illegal_op;
3615 }
3616 break;
3617 case 0x71: /* shift mm, im */
3618 case 0x72:
3619 case 0x73:
3620 case 0x171: /* shift xmm, im */
3621 case 0x172:
3622 case 0x173:
3623 if (b1 >= 2) {
3624 goto illegal_op;
3625 }
3626 val = cpu_ldub_code(env, s->pc++);
3627 if (is_xmm) {
3628 gen_op_movl_T0_im(val);
3629 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
3630 gen_op_movl_T0_0();
3631 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(1)));
3632 op1_offset = offsetof(CPUX86State,xmm_t0);
3633 } else {
3634 gen_op_movl_T0_im(val);
3635 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(0)));
3636 gen_op_movl_T0_0();
3637 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(1)));
3638 op1_offset = offsetof(CPUX86State,mmx_t0);
3639 }
3640 sse_fn_epp = sse_op_table2[((b - 1) & 3) * 8 +
3641 (((modrm >> 3)) & 7)][b1];
3642 if (!sse_fn_epp) {
3643 goto illegal_op;
3644 }
3645 if (is_xmm) {
3646 rm = (modrm & 7) | REX_B(s);
3647 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3648 } else {
3649 rm = (modrm & 7);
3650 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3651 }
3652 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3653 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op1_offset);
3654 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
3655 break;
3656 case 0x050: /* movmskps */
3657 rm = (modrm & 7) | REX_B(s);
3658 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3659 offsetof(CPUX86State,xmm_regs[rm]));
3660 gen_helper_movmskps(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3661 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3662 gen_op_mov_reg_T0(OT_LONG, reg);
3663 break;
3664 case 0x150: /* movmskpd */
3665 rm = (modrm & 7) | REX_B(s);
3666 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3667 offsetof(CPUX86State,xmm_regs[rm]));
3668 gen_helper_movmskpd(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3669 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3670 gen_op_mov_reg_T0(OT_LONG, reg);
3671 break;
3672 case 0x02a: /* cvtpi2ps */
3673 case 0x12a: /* cvtpi2pd */
3674 gen_helper_enter_mmx(cpu_env);
3675 if (mod != 3) {
3676 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3677 op2_offset = offsetof(CPUX86State,mmx_t0);
3678 gen_ldq_env_A0(s->mem_index, op2_offset);
3679 } else {
3680 rm = (modrm & 7);
3681 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3682 }
3683 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3684 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3685 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3686 switch(b >> 8) {
3687 case 0x0:
3688 gen_helper_cvtpi2ps(cpu_env, cpu_ptr0, cpu_ptr1);
3689 break;
3690 default:
3691 case 0x1:
3692 gen_helper_cvtpi2pd(cpu_env, cpu_ptr0, cpu_ptr1);
3693 break;
3694 }
3695 break;
3696 case 0x22a: /* cvtsi2ss */
3697 case 0x32a: /* cvtsi2sd */
3698 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3699 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3700 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3701 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3702 if (ot == OT_LONG) {
3703 SSEFunc_0_epi sse_fn_epi = sse_op_table3ai[(b >> 8) & 1];
3704 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3705 sse_fn_epi(cpu_env, cpu_ptr0, cpu_tmp2_i32);
3706 } else {
3707 #ifdef TARGET_X86_64
3708 SSEFunc_0_epl sse_fn_epl = sse_op_table3aq[(b >> 8) & 1];
3709 sse_fn_epl(cpu_env, cpu_ptr0, cpu_T[0]);
3710 #else
3711 goto illegal_op;
3712 #endif
3713 }
3714 break;
3715 case 0x02c: /* cvttps2pi */
3716 case 0x12c: /* cvttpd2pi */
3717 case 0x02d: /* cvtps2pi */
3718 case 0x12d: /* cvtpd2pi */
3719 gen_helper_enter_mmx(cpu_env);
3720 if (mod != 3) {
3721 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3722 op2_offset = offsetof(CPUX86State,xmm_t0);
3723 gen_ldo_env_A0(s->mem_index, op2_offset);
3724 } else {
3725 rm = (modrm & 7) | REX_B(s);
3726 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3727 }
3728 op1_offset = offsetof(CPUX86State,fpregs[reg & 7].mmx);
3729 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3730 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3731 switch(b) {
3732 case 0x02c:
3733 gen_helper_cvttps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3734 break;
3735 case 0x12c:
3736 gen_helper_cvttpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3737 break;
3738 case 0x02d:
3739 gen_helper_cvtps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3740 break;
3741 case 0x12d:
3742 gen_helper_cvtpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3743 break;
3744 }
3745 break;
3746 case 0x22c: /* cvttss2si */
3747 case 0x32c: /* cvttsd2si */
3748 case 0x22d: /* cvtss2si */
3749 case 0x32d: /* cvtsd2si */
3750 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3751 if (mod != 3) {
3752 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3753 if ((b >> 8) & 1) {
3754 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_t0.XMM_Q(0)));
3755 } else {
3756 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
3757 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
3758 }
3759 op2_offset = offsetof(CPUX86State,xmm_t0);
3760 } else {
3761 rm = (modrm & 7) | REX_B(s);
3762 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3763 }
3764 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3765 if (ot == OT_LONG) {
3766 SSEFunc_i_ep sse_fn_i_ep =
3767 sse_op_table3bi[((b >> 7) & 2) | (b & 1)];
3768 sse_fn_i_ep(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3769 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3770 } else {
3771 #ifdef TARGET_X86_64
3772 SSEFunc_l_ep sse_fn_l_ep =
3773 sse_op_table3bq[((b >> 7) & 2) | (b & 1)];
3774 sse_fn_l_ep(cpu_T[0], cpu_env, cpu_ptr0);
3775 #else
3776 goto illegal_op;
3777 #endif
3778 }
3779 gen_op_mov_reg_T0(ot, reg);
3780 break;
3781 case 0xc4: /* pinsrw */
3782 case 0x1c4:
3783 s->rip_offset = 1;
3784 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
3785 val = cpu_ldub_code(env, s->pc++);
3786 if (b1) {
3787 val &= 7;
3788 tcg_gen_st16_tl(cpu_T[0], cpu_env,
3789 offsetof(CPUX86State,xmm_regs[reg].XMM_W(val)));
3790 } else {
3791 val &= 3;
3792 tcg_gen_st16_tl(cpu_T[0], cpu_env,
3793 offsetof(CPUX86State,fpregs[reg].mmx.MMX_W(val)));
3794 }
3795 break;
3796 case 0xc5: /* pextrw */
3797 case 0x1c5:
3798 if (mod != 3)
3799 goto illegal_op;
3800 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3801 val = cpu_ldub_code(env, s->pc++);
3802 if (b1) {
3803 val &= 7;
3804 rm = (modrm & 7) | REX_B(s);
3805 tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3806 offsetof(CPUX86State,xmm_regs[rm].XMM_W(val)));
3807 } else {
3808 val &= 3;
3809 rm = (modrm & 7);
3810 tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3811 offsetof(CPUX86State,fpregs[rm].mmx.MMX_W(val)));
3812 }
3813 reg = ((modrm >> 3) & 7) | rex_r;
3814 gen_op_mov_reg_T0(ot, reg);
3815 break;
3816 case 0x1d6: /* movq ea, xmm */
3817 if (mod != 3) {
3818 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3819 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3820 } else {
3821 rm = (modrm & 7) | REX_B(s);
3822 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
3823 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3824 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
3825 }
3826 break;
3827 case 0x2d6: /* movq2dq */
3828 gen_helper_enter_mmx(cpu_env);
3829 rm = (modrm & 7);
3830 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3831 offsetof(CPUX86State,fpregs[rm].mmx));
3832 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3833 break;
3834 case 0x3d6: /* movdq2q */
3835 gen_helper_enter_mmx(cpu_env);
3836 rm = (modrm & 7) | REX_B(s);
3837 gen_op_movq(offsetof(CPUX86State,fpregs[reg & 7].mmx),
3838 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3839 break;
3840 case 0xd7: /* pmovmskb */
3841 case 0x1d7:
3842 if (mod != 3)
3843 goto illegal_op;
3844 if (b1) {
3845 rm = (modrm & 7) | REX_B(s);
3846 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[rm]));
3847 gen_helper_pmovmskb_xmm(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3848 } else {
3849 rm = (modrm & 7);
3850 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,fpregs[rm].mmx));
3851 gen_helper_pmovmskb_mmx(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3852 }
3853 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3854 reg = ((modrm >> 3) & 7) | rex_r;
3855 gen_op_mov_reg_T0(OT_LONG, reg);
3856 break;
3857 case 0x138:
3858 if (s->prefix & PREFIX_REPNZ)
3859 goto crc32;
3860 case 0x038:
3861 b = modrm;
3862 modrm = cpu_ldub_code(env, s->pc++);
3863 rm = modrm & 7;
3864 reg = ((modrm >> 3) & 7) | rex_r;
3865 mod = (modrm >> 6) & 3;
3866 if (b1 >= 2) {
3867 goto illegal_op;
3868 }
3869
3870 sse_fn_epp = sse_op_table6[b].op[b1];
3871 if (!sse_fn_epp) {
3872 goto illegal_op;
3873 }
3874 if (!(s->cpuid_ext_features & sse_op_table6[b].ext_mask))
3875 goto illegal_op;
3876
3877 if (b1) {
3878 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3879 if (mod == 3) {
3880 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
3881 } else {
3882 op2_offset = offsetof(CPUX86State,xmm_t0);
3883 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3884 switch (b) {
3885 case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
3886 case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
3887 case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
3888 gen_ldq_env_A0(s->mem_index, op2_offset +
3889 offsetof(XMMReg, XMM_Q(0)));
3890 break;
3891 case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
3892 case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
3893 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0,
3894 (s->mem_index >> 2) - 1);
3895 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0);
3896 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, op2_offset +
3897 offsetof(XMMReg, XMM_L(0)));
3898 break;
3899 case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
3900 tcg_gen_qemu_ld16u(cpu_tmp0, cpu_A0,
3901 (s->mem_index >> 2) - 1);
3902 tcg_gen_st16_tl(cpu_tmp0, cpu_env, op2_offset +
3903 offsetof(XMMReg, XMM_W(0)));
3904 break;
3905 case 0x2a: /* movntqda */
3906 gen_ldo_env_A0(s->mem_index, op1_offset);
3907 return;
3908 default:
3909 gen_ldo_env_A0(s->mem_index, op2_offset);
3910 }
3911 }
3912 } else {
3913 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
3914 if (mod == 3) {
3915 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3916 } else {
3917 op2_offset = offsetof(CPUX86State,mmx_t0);
3918 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3919 gen_ldq_env_A0(s->mem_index, op2_offset);
3920 }
3921 }
3922 if (sse_fn_epp == SSE_SPECIAL) {
3923 goto illegal_op;
3924 }
3925
3926 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3927 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3928 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
3929
3930 if (b == 0x17) {
3931 set_cc_op(s, CC_OP_EFLAGS);
3932 }
3933 break;
3934 case 0x338: /* crc32 */
3935 crc32:
3936 b = modrm;
3937 modrm = cpu_ldub_code(env, s->pc++);
3938 reg = ((modrm >> 3) & 7) | rex_r;
3939
3940 if (b != 0xf0 && b != 0xf1)
3941 goto illegal_op;
3942 if (!(s->cpuid_ext_features & CPUID_EXT_SSE42))
3943 goto illegal_op;
3944
3945 if (b == 0xf0)
3946 ot = OT_BYTE;
3947 else if (b == 0xf1 && s->dflag != 2)
3948 if (s->prefix & PREFIX_DATA)
3949 ot = OT_WORD;
3950 else
3951 ot = OT_LONG;
3952 else
3953 ot = OT_QUAD;
3954
3955 gen_op_mov_TN_reg(OT_LONG, 0, reg);
3956 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3957 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3958 gen_helper_crc32(cpu_T[0], cpu_tmp2_i32,
3959 cpu_T[0], tcg_const_i32(8 << ot));
3960
3961 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3962 gen_op_mov_reg_T0(ot, reg);
3963 break;
3964 case 0x03a:
3965 case 0x13a:
3966 b = modrm;
3967 modrm = cpu_ldub_code(env, s->pc++);
3968 rm = modrm & 7;
3969 reg = ((modrm >> 3) & 7) | rex_r;
3970 mod = (modrm >> 6) & 3;
3971 if (b1 >= 2) {
3972 goto illegal_op;
3973 }
3974
3975 sse_fn_eppi = sse_op_table7[b].op[b1];
3976 if (!sse_fn_eppi) {
3977 goto illegal_op;
3978 }
3979 if (!(s->cpuid_ext_features & sse_op_table7[b].ext_mask))
3980 goto illegal_op;
3981
3982 if (sse_fn_eppi == SSE_SPECIAL) {
3983 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3984 rm = (modrm & 7) | REX_B(s);
3985 if (mod != 3)
3986 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3987 reg = ((modrm >> 3) & 7) | rex_r;
3988 val = cpu_ldub_code(env, s->pc++);
3989 switch (b) {
3990 case 0x14: /* pextrb */
3991 tcg_gen_ld8u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3992 xmm_regs[reg].XMM_B(val & 15)));
3993 if (mod == 3)
3994 gen_op_mov_reg_T0(ot, rm);
3995 else
3996 tcg_gen_qemu_st8(cpu_T[0], cpu_A0,
3997 (s->mem_index >> 2) - 1);
3998 break;
3999 case 0x15: /* pextrw */
4000 tcg_gen_ld16u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
4001 xmm_regs[reg].XMM_W(val & 7)));
4002 if (mod == 3)
4003 gen_op_mov_reg_T0(ot, rm);
4004 else
4005 tcg_gen_qemu_st16(cpu_T[0], cpu_A0,
4006 (s->mem_index >> 2) - 1);
4007 break;
4008 case 0x16:
4009 if (ot == OT_LONG) { /* pextrd */
4010 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
4011 offsetof(CPUX86State,
4012 xmm_regs[reg].XMM_L(val & 3)));
4013 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
4014 if (mod == 3)
4015 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
4016 else
4017 tcg_gen_qemu_st32(cpu_T[0], cpu_A0,
4018 (s->mem_index >> 2) - 1);
4019 } else { /* pextrq */
4020 #ifdef TARGET_X86_64
4021 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
4022 offsetof(CPUX86State,
4023 xmm_regs[reg].XMM_Q(val & 1)));
4024 if (mod == 3)
4025 gen_op_mov_reg_v(ot, rm, cpu_tmp1_i64);
4026 else
4027 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
4028 (s->mem_index >> 2) - 1);
4029 #else
4030 goto illegal_op;
4031 #endif
4032 }
4033 break;
4034 case 0x17: /* extractps */
4035 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
4036 xmm_regs[reg].XMM_L(val & 3)));
4037 if (mod == 3)
4038 gen_op_mov_reg_T0(ot, rm);
4039 else
4040 tcg_gen_qemu_st32(cpu_T[0], cpu_A0,
4041 (s->mem_index >> 2) - 1);
4042 break;
4043 case 0x20: /* pinsrb */
4044 if (mod == 3)
4045 gen_op_mov_TN_reg(OT_LONG, 0, rm);
4046 else
4047 tcg_gen_qemu_ld8u(cpu_tmp0, cpu_A0,
4048 (s->mem_index >> 2) - 1);
4049 tcg_gen_st8_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State,
4050 xmm_regs[reg].XMM_B(val & 15)));
4051 break;
4052 case 0x21: /* insertps */
4053 if (mod == 3) {
4054 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
4055 offsetof(CPUX86State,xmm_regs[rm]
4056 .XMM_L((val >> 6) & 3)));
4057 } else {
4058 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0,
4059 (s->mem_index >> 2) - 1);
4060 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0);
4061 }
4062 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
4063 offsetof(CPUX86State,xmm_regs[reg]
4064 .XMM_L((val >> 4) & 3)));
4065 if ((val >> 0) & 1)
4066 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4067 cpu_env, offsetof(CPUX86State,
4068 xmm_regs[reg].XMM_L(0)));
4069 if ((val >> 1) & 1)
4070 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4071 cpu_env, offsetof(CPUX86State,
4072 xmm_regs[reg].XMM_L(1)));
4073 if ((val >> 2) & 1)
4074 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4075 cpu_env, offsetof(CPUX86State,
4076 xmm_regs[reg].XMM_L(2)));
4077 if ((val >> 3) & 1)
4078 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4079 cpu_env, offsetof(CPUX86State,
4080 xmm_regs[reg].XMM_L(3)));
4081 break;
4082 case 0x22:
4083 if (ot == OT_LONG) { /* pinsrd */
4084 if (mod == 3)
4085 gen_op_mov_v_reg(ot, cpu_tmp0, rm);
4086 else
4087 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0,
4088 (s->mem_index >> 2) - 1);
4089 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0);
4090 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
4091 offsetof(CPUX86State,
4092 xmm_regs[reg].XMM_L(val & 3)));
4093 } else { /* pinsrq */
4094 #ifdef TARGET_X86_64
4095 if (mod == 3)
4096 gen_op_mov_v_reg(ot, cpu_tmp1_i64, rm);
4097 else
4098 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
4099 (s->mem_index >> 2) - 1);
4100 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
4101 offsetof(CPUX86State,
4102 xmm_regs[reg].XMM_Q(val & 1)));
4103 #else
4104 goto illegal_op;
4105 #endif
4106 }
4107 break;
4108 }
4109 return;
4110 }
4111
4112 if (b1) {
4113 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
4114 if (mod == 3) {
4115 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
4116 } else {
4117 op2_offset = offsetof(CPUX86State,xmm_t0);
4118 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4119 gen_ldo_env_A0(s->mem_index, op2_offset);
4120 }
4121 } else {
4122 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
4123 if (mod == 3) {
4124 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
4125 } else {
4126 op2_offset = offsetof(CPUX86State,mmx_t0);
4127 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4128 gen_ldq_env_A0(s->mem_index, op2_offset);
4129 }
4130 }
4131 val = cpu_ldub_code(env, s->pc++);
4132
4133 if ((b & 0xfc) == 0x60) { /* pcmpXstrX */
4134 set_cc_op(s, CC_OP_EFLAGS);
4135
4136 if (s->dflag == 2)
4137 /* The helper must use entire 64-bit gp registers */
4138 val |= 1 << 8;
4139 }
4140
4141 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4142 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4143 sse_fn_eppi(cpu_env, cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
4144 break;
4145 default:
4146 goto illegal_op;
4147 }
4148 } else {
4149 /* generic MMX or SSE operation */
4150 switch(b) {
4151 case 0x70: /* pshufx insn */
4152 case 0xc6: /* pshufx insn */
4153 case 0xc2: /* compare insns */
4154 s->rip_offset = 1;
4155 break;
4156 default:
4157 break;
4158 }
4159 if (is_xmm) {
4160 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
4161 if (mod != 3) {
4162 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4163 op2_offset = offsetof(CPUX86State,xmm_t0);
4164 if (b1 >= 2 && ((b >= 0x50 && b <= 0x5f && b != 0x5b) ||
4165 b == 0xc2)) {
4166 /* specific case for SSE single instructions */
4167 if (b1 == 2) {
4168 /* 32 bit access */
4169 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
4170 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
4171 } else {
4172 /* 64 bit access */
4173 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_t0.XMM_D(0)));
4174 }
4175 } else {
4176 gen_ldo_env_A0(s->mem_index, op2_offset);
4177 }
4178 } else {
4179 rm = (modrm & 7) | REX_B(s);
4180 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
4181 }
4182 } else {
4183 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
4184 if (mod != 3) {
4185 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4186 op2_offset = offsetof(CPUX86State,mmx_t0);
4187 gen_ldq_env_A0(s->mem_index, op2_offset);
4188 } else {
4189 rm = (modrm & 7);
4190 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
4191 }
4192 }
4193 switch(b) {
4194 case 0x0f: /* 3DNow! data insns */
4195 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
4196 goto illegal_op;
4197 val = cpu_ldub_code(env, s->pc++);
4198 sse_fn_epp = sse_op_table5[val];
4199 if (!sse_fn_epp) {
4200 goto illegal_op;
4201 }
4202 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4203 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4204 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
4205 break;
4206 case 0x70: /* pshufx insn */
4207 case 0xc6: /* pshufx insn */
4208 val = cpu_ldub_code(env, s->pc++);
4209 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4210 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4211 /* XXX: introduce a new table? */
4212 sse_fn_ppi = (SSEFunc_0_ppi)sse_fn_epp;
4213 sse_fn_ppi(cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
4214 break;
4215 case 0xc2:
4216 /* compare insns */
4217 val = cpu_ldub_code(env, s->pc++);
4218 if (val >= 8)
4219 goto illegal_op;
4220 sse_fn_epp = sse_op_table4[val][b1];
4221
4222 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4223 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4224 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
4225 break;
4226 case 0xf7:
4227 /* maskmov : we must prepare A0 */
4228 if (mod != 3)
4229 goto illegal_op;
4230 #ifdef TARGET_X86_64
4231 if (s->aflag == 2) {
4232 gen_op_movq_A0_reg(R_EDI);
4233 } else
4234 #endif
4235 {
4236 gen_op_movl_A0_reg(R_EDI);
4237 if (s->aflag == 0)
4238 gen_op_andl_A0_ffff();
4239 }
4240 gen_add_A0_ds_seg(s);
4241
4242 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4243 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4244 /* XXX: introduce a new table? */
4245 sse_fn_eppt = (SSEFunc_0_eppt)sse_fn_epp;
4246 sse_fn_eppt(cpu_env, cpu_ptr0, cpu_ptr1, cpu_A0);
4247 break;
4248 default:
4249 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4250 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4251 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
4252 break;
4253 }
4254 if (b == 0x2e || b == 0x2f) {
4255 set_cc_op(s, CC_OP_EFLAGS);
4256 }
4257 }
4258 }
4259
4260 /* convert one instruction. s->is_jmp is set if the translation must
4261 be stopped. Return the next pc value */
4262 static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
4263 target_ulong pc_start)
4264 {
4265 int b, prefixes, aflag, dflag;
4266 int shift, ot;
4267 int modrm, reg, rm, mod, reg_addr, op, opreg, offset_addr, val;
4268 target_ulong next_eip, tval;
4269 int rex_w, rex_r;
4270
4271 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
4272 tcg_gen_debug_insn_start(pc_start);
4273 }
4274 s->pc = pc_start;
4275 prefixes = 0;
4276 aflag = s->code32;
4277 dflag = s->code32;
4278 s->override = -1;
4279 rex_w = -1;
4280 rex_r = 0;
4281 #ifdef TARGET_X86_64
4282 s->rex_x = 0;
4283 s->rex_b = 0;
4284 x86_64_hregs = 0;
4285 #endif
4286 s->rip_offset = 0; /* for relative ip address */
4287 next_byte:
4288 b = cpu_ldub_code(env, s->pc);
4289 s->pc++;
4290 /* check prefixes */
4291 #ifdef TARGET_X86_64
4292 if (CODE64(s)) {
4293 switch (b) {
4294 case 0xf3:
4295 prefixes |= PREFIX_REPZ;
4296 goto next_byte;
4297 case 0xf2:
4298 prefixes |= PREFIX_REPNZ;
4299 goto next_byte;
4300 case 0xf0:
4301 prefixes |= PREFIX_LOCK;
4302 goto next_byte;
4303 case 0x2e:
4304 s->override = R_CS;
4305 goto next_byte;
4306 case 0x36:
4307 s->override = R_SS;
4308 goto next_byte;
4309 case 0x3e:
4310 s->override = R_DS;
4311 goto next_byte;
4312 case 0x26:
4313 s->override = R_ES;
4314 goto next_byte;
4315 case 0x64:
4316 s->override = R_FS;
4317 goto next_byte;
4318 case 0x65:
4319 s->override = R_GS;
4320 goto next_byte;
4321 case 0x66:
4322 prefixes |= PREFIX_DATA;
4323 goto next_byte;
4324 case 0x67:
4325 prefixes |= PREFIX_ADR;
4326 goto next_byte;
4327 case 0x40 ... 0x4f:
4328 /* REX prefix */
4329 rex_w = (b >> 3) & 1;
4330 rex_r = (b & 0x4) << 1;
4331 s->rex_x = (b & 0x2) << 2;
4332 REX_B(s) = (b & 0x1) << 3;
4333 x86_64_hregs = 1; /* select uniform byte register addressing */
4334 goto next_byte;
4335 }
4336 if (rex_w == 1) {
4337 /* 0x66 is ignored if rex.w is set */
4338 dflag = 2;
4339 } else {
4340 if (prefixes & PREFIX_DATA)
4341 dflag ^= 1;
4342 }
4343 if (!(prefixes & PREFIX_ADR))
4344 aflag = 2;
4345 } else
4346 #endif
4347 {
4348 switch (b) {
4349 case 0xf3:
4350 prefixes |= PREFIX_REPZ;
4351 goto next_byte;
4352 case 0xf2:
4353 prefixes |= PREFIX_REPNZ;
4354 goto next_byte;
4355 case 0xf0:
4356 prefixes |= PREFIX_LOCK;
4357 goto next_byte;
4358 case 0x2e:
4359 s->override = R_CS;
4360 goto next_byte;
4361 case 0x36:
4362 s->override = R_SS;
4363 goto next_byte;
4364 case 0x3e:
4365 s->override = R_DS;
4366 goto next_byte;
4367 case 0x26:
4368 s->override = R_ES;
4369 goto next_byte;
4370 case 0x64:
4371 s->override = R_FS;
4372 goto next_byte;
4373 case 0x65:
4374 s->override = R_GS;
4375 goto next_byte;
4376 case 0x66:
4377 prefixes |= PREFIX_DATA;
4378 goto next_byte;
4379 case 0x67:
4380 prefixes |= PREFIX_ADR;
4381 goto next_byte;
4382 }
4383 if (prefixes & PREFIX_DATA)
4384 dflag ^= 1;
4385 if (prefixes & PREFIX_ADR)
4386 aflag ^= 1;
4387 }
4388
4389 s->prefix = prefixes;
4390 s->aflag = aflag;
4391 s->dflag = dflag;
4392
4393 /* lock generation */
4394 if (prefixes & PREFIX_LOCK)
4395 gen_helper_lock();
4396
4397 /* now check op code */
4398 reswitch:
4399 switch(b) {
4400 case 0x0f:
4401 /**************************/
4402 /* extended op code */
4403 b = cpu_ldub_code(env, s->pc++) | 0x100;
4404 goto reswitch;
4405
4406 /**************************/
4407 /* arith & logic */
4408 case 0x00 ... 0x05:
4409 case 0x08 ... 0x0d:
4410 case 0x10 ... 0x15:
4411 case 0x18 ... 0x1d:
4412 case 0x20 ... 0x25:
4413 case 0x28 ... 0x2d:
4414 case 0x30 ... 0x35:
4415 case 0x38 ... 0x3d:
4416 {
4417 int op, f, val;
4418 op = (b >> 3) & 7;
4419 f = (b >> 1) & 3;
4420
4421 if ((b & 1) == 0)
4422 ot = OT_BYTE;
4423 else
4424 ot = dflag + OT_WORD;
4425
4426 switch(f) {
4427 case 0: /* OP Ev, Gv */
4428 modrm = cpu_ldub_code(env, s->pc++);
4429 reg = ((modrm >> 3) & 7) | rex_r;
4430 mod = (modrm >> 6) & 3;
4431 rm = (modrm & 7) | REX_B(s);
4432 if (mod != 3) {
4433 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4434 opreg = OR_TMP0;
4435 } else if (op == OP_XORL && rm == reg) {
4436 xor_zero:
4437 /* xor reg, reg optimisation */
4438 gen_op_movl_T0_0();
4439 set_cc_op(s, CC_OP_LOGICB + ot);
4440 gen_op_mov_reg_T0(ot, reg);
4441 gen_op_update1_cc();
4442 break;
4443 } else {
4444 opreg = rm;
4445 }
4446 gen_op_mov_TN_reg(ot, 1, reg);
4447 gen_op(s, op, ot, opreg);
4448 break;
4449 case 1: /* OP Gv, Ev */
4450 modrm = cpu_ldub_code(env, s->pc++);
4451 mod = (modrm >> 6) & 3;
4452 reg = ((modrm >> 3) & 7) | rex_r;
4453 rm = (modrm & 7) | REX_B(s);
4454 if (mod != 3) {
4455 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4456 gen_op_ld_T1_A0(ot + s->mem_index);
4457 } else if (op == OP_XORL && rm == reg) {
4458 goto xor_zero;
4459 } else {
4460 gen_op_mov_TN_reg(ot, 1, rm);
4461 }
4462 gen_op(s, op, ot, reg);
4463 break;
4464 case 2: /* OP A, Iv */
4465 val = insn_get(env, s, ot);
4466 gen_op_movl_T1_im(val);
4467 gen_op(s, op, ot, OR_EAX);
4468 break;
4469 }
4470 }
4471 break;
4472
4473 case 0x82:
4474 if (CODE64(s))
4475 goto illegal_op;
4476 case 0x80: /* GRP1 */
4477 case 0x81:
4478 case 0x83:
4479 {
4480 int val;
4481
4482 if ((b & 1) == 0)
4483 ot = OT_BYTE;
4484 else
4485 ot = dflag + OT_WORD;
4486
4487 modrm = cpu_ldub_code(env, s->pc++);
4488 mod = (modrm >> 6) & 3;
4489 rm = (modrm & 7) | REX_B(s);
4490 op = (modrm >> 3) & 7;
4491
4492 if (mod != 3) {
4493 if (b == 0x83)
4494 s->rip_offset = 1;
4495 else
4496 s->rip_offset = insn_const_size(ot);
4497 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4498 opreg = OR_TMP0;
4499 } else {
4500 opreg = rm;
4501 }
4502
4503 switch(b) {
4504 default:
4505 case 0x80:
4506 case 0x81:
4507 case 0x82:
4508 val = insn_get(env, s, ot);
4509 break;
4510 case 0x83:
4511 val = (int8_t)insn_get(env, s, OT_BYTE);
4512 break;
4513 }
4514 gen_op_movl_T1_im(val);
4515 gen_op(s, op, ot, opreg);
4516 }
4517 break;
4518
4519 /**************************/
4520 /* inc, dec, and other misc arith */
4521 case 0x40 ... 0x47: /* inc Gv */
4522 ot = dflag ? OT_LONG : OT_WORD;
4523 gen_inc(s, ot, OR_EAX + (b & 7), 1);
4524 break;
4525 case 0x48 ... 0x4f: /* dec Gv */
4526 ot = dflag ? OT_LONG : OT_WORD;
4527 gen_inc(s, ot, OR_EAX + (b & 7), -1);
4528 break;
4529 case 0xf6: /* GRP3 */
4530 case 0xf7:
4531 if ((b & 1) == 0)
4532 ot = OT_BYTE;
4533 else
4534 ot = dflag + OT_WORD;
4535
4536 modrm = cpu_ldub_code(env, s->pc++);
4537 mod = (modrm >> 6) & 3;
4538 rm = (modrm & 7) | REX_B(s);
4539 op = (modrm >> 3) & 7;
4540 if (mod != 3) {
4541 if (op == 0)
4542 s->rip_offset = insn_const_size(ot);
4543 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4544 gen_op_ld_T0_A0(ot + s->mem_index);
4545 } else {
4546 gen_op_mov_TN_reg(ot, 0, rm);
4547 }
4548
4549 switch(op) {
4550 case 0: /* test */
4551 val = insn_get(env, s, ot);
4552 gen_op_movl_T1_im(val);
4553 gen_op_testl_T0_T1_cc();
4554 set_cc_op(s, CC_OP_LOGICB + ot);
4555 break;
4556 case 2: /* not */
4557 tcg_gen_not_tl(cpu_T[0], cpu_T[0]);
4558 if (mod != 3) {
4559 gen_op_st_T0_A0(ot + s->mem_index);
4560 } else {
4561 gen_op_mov_reg_T0(ot, rm);
4562 }
4563 break;
4564 case 3: /* neg */
4565 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
4566 if (mod != 3) {
4567 gen_op_st_T0_A0(ot + s->mem_index);
4568 } else {
4569 gen_op_mov_reg_T0(ot, rm);
4570 }
4571 gen_op_update_neg_cc();
4572 set_cc_op(s, CC_OP_SUBB + ot);
4573 break;
4574 case 4: /* mul */
4575 switch(ot) {
4576 case OT_BYTE:
4577 gen_op_mov_TN_reg(OT_BYTE, 1, R_EAX);
4578 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
4579 tcg_gen_ext8u_tl(cpu_T[1], cpu_T[1]);
4580 /* XXX: use 32 bit mul which could be faster */
4581 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4582 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4583 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4584 tcg_gen_andi_tl(cpu_cc_src, cpu_T[0], 0xff00);
4585 set_cc_op(s, CC_OP_MULB);
4586 break;
4587 case OT_WORD:
4588 gen_op_mov_TN_reg(OT_WORD, 1, R_EAX);
4589 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
4590 tcg_gen_ext16u_tl(cpu_T[1], cpu_T[1]);
4591 /* XXX: use 32 bit mul which could be faster */
4592 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4593 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4594 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4595 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
4596 gen_op_mov_reg_T0(OT_WORD, R_EDX);
4597 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4598 set_cc_op(s, CC_OP_MULW);
4599 break;
4600 default:
4601 case OT_LONG:
4602 #ifdef TARGET_X86_64
4603 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4604 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
4605 tcg_gen_ext32u_tl(cpu_T[1], cpu_T[1]);
4606 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4607 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4608 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4609 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 32);
4610 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4611 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4612 #else
4613 {
4614 TCGv_i64 t0, t1;
4615 t0 = tcg_temp_new_i64();
4616 t1 = tcg_temp_new_i64();
4617 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4618 tcg_gen_extu_i32_i64(t0, cpu_T[0]);
4619 tcg_gen_extu_i32_i64(t1, cpu_T[1]);
4620 tcg_gen_mul_i64(t0, t0, t1);
4621 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4622 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4623 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4624 tcg_gen_shri_i64(t0, t0, 32);
4625 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4626 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4627 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4628 }
4629 #endif
4630 set_cc_op(s, CC_OP_MULL);
4631 break;
4632 #ifdef TARGET_X86_64
4633 case OT_QUAD:
4634 gen_helper_mulq_EAX_T0(cpu_env, cpu_T[0]);
4635 set_cc_op(s, CC_OP_MULQ);
4636 break;
4637 #endif
4638 }
4639 break;
4640 case 5: /* imul */
4641 switch(ot) {
4642 case OT_BYTE:
4643 gen_op_mov_TN_reg(OT_BYTE, 1, R_EAX);
4644 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
4645 tcg_gen_ext8s_tl(cpu_T[1], cpu_T[1]);
4646 /* XXX: use 32 bit mul which could be faster */
4647 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4648 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4649 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4650 tcg_gen_ext8s_tl(cpu_tmp0, cpu_T[0]);
4651 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4652 set_cc_op(s, CC_OP_MULB);
4653 break;
4654 case OT_WORD:
4655 gen_op_mov_TN_reg(OT_WORD, 1, R_EAX);
4656 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4657 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
4658 /* XXX: use 32 bit mul which could be faster */
4659 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4660 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4661 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4662 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
4663 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4664 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
4665 gen_op_mov_reg_T0(OT_WORD, R_EDX);
4666 set_cc_op(s, CC_OP_MULW);
4667 break;
4668 default:
4669 case OT_LONG:
4670 #ifdef TARGET_X86_64
4671 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4672 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4673 tcg_gen_ext32s_tl(cpu_T[1], cpu_T[1]);
4674 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4675 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4676 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4677 tcg_gen_ext32s_tl(cpu_tmp0, cpu_T[0]);
4678 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4679 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 32);
4680 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4681 #else
4682 {
4683 TCGv_i64 t0, t1;
4684 t0 = tcg_temp_new_i64();
4685 t1 = tcg_temp_new_i64();
4686 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4687 tcg_gen_ext_i32_i64(t0, cpu_T[0]);
4688 tcg_gen_ext_i32_i64(t1, cpu_T[1]);
4689 tcg_gen_mul_i64(t0, t0, t1);
4690 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4691 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4692 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4693 tcg_gen_sari_tl(cpu_tmp0, cpu_T[0], 31);
4694 tcg_gen_shri_i64(t0, t0, 32);
4695 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4696 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4697 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4698 }
4699 #endif
4700 set_cc_op(s, CC_OP_MULL);
4701 break;
4702 #ifdef TARGET_X86_64
4703 case OT_QUAD:
4704 gen_helper_imulq_EAX_T0(cpu_env, cpu_T[0]);
4705 set_cc_op(s, CC_OP_MULQ);
4706 break;
4707 #endif
4708 }
4709 break;
4710 case 6: /* div */
4711 switch(ot) {
4712 case OT_BYTE:
4713 gen_jmp_im(pc_start - s->cs_base);
4714 gen_helper_divb_AL(cpu_env, cpu_T[0]);
4715 break;
4716 case OT_WORD:
4717 gen_jmp_im(pc_start - s->cs_base);
4718 gen_helper_divw_AX(cpu_env, cpu_T[0]);
4719 break;
4720 default:
4721 case OT_LONG:
4722 gen_jmp_im(pc_start - s->cs_base);
4723 gen_helper_divl_EAX(cpu_env, cpu_T[0]);
4724 break;
4725 #ifdef TARGET_X86_64
4726 case OT_QUAD:
4727 gen_jmp_im(pc_start - s->cs_base);
4728 gen_helper_divq_EAX(cpu_env, cpu_T[0]);
4729 break;
4730 #endif
4731 }
4732 break;
4733 case 7: /* idiv */
4734 switch(ot) {
4735 case OT_BYTE:
4736 gen_jmp_im(pc_start - s->cs_base);
4737 gen_helper_idivb_AL(cpu_env, cpu_T[0]);
4738 break;
4739 case OT_WORD:
4740 gen_jmp_im(pc_start - s->cs_base);
4741 gen_helper_idivw_AX(cpu_env, cpu_T[0]);
4742 break;
4743 default:
4744 case OT_LONG:
4745 gen_jmp_im(pc_start - s->cs_base);
4746 gen_helper_idivl_EAX(cpu_env, cpu_T[0]);
4747 break;
4748 #ifdef TARGET_X86_64
4749 case OT_QUAD:
4750 gen_jmp_im(pc_start - s->cs_base);
4751 gen_helper_idivq_EAX(cpu_env, cpu_T[0]);
4752 break;
4753 #endif
4754 }
4755 break;
4756 default:
4757 goto illegal_op;
4758 }
4759 break;
4760
4761 case 0xfe: /* GRP4 */
4762 case 0xff: /* GRP5 */
4763 if ((b & 1) == 0)
4764 ot = OT_BYTE;
4765 else
4766 ot = dflag + OT_WORD;
4767
4768 modrm = cpu_ldub_code(env, s->pc++);
4769 mod = (modrm >> 6) & 3;
4770 rm = (modrm & 7) | REX_B(s);
4771 op = (modrm >> 3) & 7;
4772 if (op >= 2 && b == 0xfe) {
4773 goto illegal_op;
4774 }
4775 if (CODE64(s)) {
4776 if (op == 2 || op == 4) {
4777 /* operand size for jumps is 64 bit */
4778 ot = OT_QUAD;
4779 } else if (op == 3 || op == 5) {
4780 ot = dflag ? OT_LONG + (rex_w == 1) : OT_WORD;
4781 } else if (op == 6) {
4782 /* default push size is 64 bit */
4783 ot = dflag ? OT_QUAD : OT_WORD;
4784 }
4785 }
4786 if (mod != 3) {
4787 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4788 if (op >= 2 && op != 3 && op != 5)
4789 gen_op_ld_T0_A0(ot + s->mem_index);
4790 } else {
4791 gen_op_mov_TN_reg(ot, 0, rm);
4792 }
4793
4794 switch(op) {
4795 case 0: /* inc Ev */
4796 if (mod != 3)
4797 opreg = OR_TMP0;
4798 else
4799 opreg = rm;
4800 gen_inc(s, ot, opreg, 1);
4801 break;
4802 case 1: /* dec Ev */
4803 if (mod != 3)
4804 opreg = OR_TMP0;
4805 else
4806 opreg = rm;
4807 gen_inc(s, ot, opreg, -1);
4808 break;
4809 case 2: /* call Ev */
4810 /* XXX: optimize if memory (no 'and' is necessary) */
4811 if (s->dflag == 0)
4812 gen_op_andl_T0_ffff();
4813 next_eip = s->pc - s->cs_base;
4814 gen_movtl_T1_im(next_eip);
4815 gen_push_T1(s);
4816 gen_op_jmp_T0();
4817 gen_eob(s);
4818 break;
4819 case 3: /* lcall Ev */
4820 gen_op_ld_T1_A0(ot + s->mem_index);
4821 gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
4822 gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
4823 do_lcall:
4824 if (s->pe && !s->vm86) {
4825 gen_update_cc_op(s);
4826 gen_jmp_im(pc_start - s->cs_base);
4827 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4828 gen_helper_lcall_protected(cpu_env, cpu_tmp2_i32, cpu_T[1],
4829 tcg_const_i32(dflag),
4830 tcg_const_i32(s->pc - pc_start));
4831 } else {
4832 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4833 gen_helper_lcall_real(cpu_env, cpu_tmp2_i32, cpu_T[1],
4834 tcg_const_i32(dflag),
4835 tcg_const_i32(s->pc - s->cs_base));
4836 }
4837 gen_eob(s);
4838 break;
4839 case 4: /* jmp Ev */
4840 if (s->dflag == 0)
4841 gen_op_andl_T0_ffff();
4842 gen_op_jmp_T0();
4843 gen_eob(s);
4844 break;
4845 case 5: /* ljmp Ev */
4846 gen_op_ld_T1_A0(ot + s->mem_index);
4847 gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
4848 gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
4849 do_ljmp:
4850 if (s->pe && !s->vm86) {
4851 gen_update_cc_op(s);
4852 gen_jmp_im(pc_start - s->cs_base);
4853 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4854 gen_helper_ljmp_protected(cpu_env, cpu_tmp2_i32, cpu_T[1],
4855 tcg_const_i32(s->pc - pc_start));
4856 } else {
4857 gen_op_movl_seg_T0_vm(R_CS);
4858 gen_op_movl_T0_T1();
4859 gen_op_jmp_T0();
4860 }
4861 gen_eob(s);
4862 break;
4863 case 6: /* push Ev */
4864 gen_push_T0(s);
4865 break;
4866 default:
4867 goto illegal_op;
4868 }
4869 break;
4870
4871 case 0x84: /* test Ev, Gv */
4872 case 0x85:
4873 if ((b & 1) == 0)
4874 ot = OT_BYTE;
4875 else
4876 ot = dflag + OT_WORD;
4877
4878 modrm = cpu_ldub_code(env, s->pc++);
4879 reg = ((modrm >> 3) & 7) | rex_r;
4880
4881 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4882 gen_op_mov_TN_reg(ot, 1, reg);
4883 gen_op_testl_T0_T1_cc();
4884 set_cc_op(s, CC_OP_LOGICB + ot);
4885 break;
4886
4887 case 0xa8: /* test eAX, Iv */
4888 case 0xa9:
4889 if ((b & 1) == 0)
4890 ot = OT_BYTE;
4891 else
4892 ot = dflag + OT_WORD;
4893 val = insn_get(env, s, ot);
4894
4895 gen_op_mov_TN_reg(ot, 0, OR_EAX);
4896 gen_op_movl_T1_im(val);
4897 gen_op_testl_T0_T1_cc();
4898 set_cc_op(s, CC_OP_LOGICB + ot);
4899 break;
4900
4901 case 0x98: /* CWDE/CBW */
4902 #ifdef TARGET_X86_64
4903 if (dflag == 2) {
4904 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
4905 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4906 gen_op_mov_reg_T0(OT_QUAD, R_EAX);
4907 } else
4908 #endif
4909 if (dflag == 1) {
4910 gen_op_mov_TN_reg(OT_WORD, 0, R_EAX);
4911 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4912 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4913 } else {
4914 gen_op_mov_TN_reg(OT_BYTE, 0, R_EAX);
4915 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
4916 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4917 }
4918 break;
4919 case 0x99: /* CDQ/CWD */
4920 #ifdef TARGET_X86_64
4921 if (dflag == 2) {
4922 gen_op_mov_TN_reg(OT_QUAD, 0, R_EAX);
4923 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 63);
4924 gen_op_mov_reg_T0(OT_QUAD, R_EDX);
4925 } else
4926 #endif
4927 if (dflag == 1) {
4928 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
4929 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4930 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 31);
4931 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4932 } else {
4933 gen_op_mov_TN_reg(OT_WORD, 0, R_EAX);
4934 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4935 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 15);
4936 gen_op_mov_reg_T0(OT_WORD, R_EDX);
4937 }
4938 break;
4939 case 0x1af: /* imul Gv, Ev */
4940 case 0x69: /* imul Gv, Ev, I */
4941 case 0x6b:
4942 ot = dflag + OT_WORD;
4943 modrm = cpu_ldub_code(env, s->pc++);
4944 reg = ((modrm >> 3) & 7) | rex_r;
4945 if (b == 0x69)
4946 s->rip_offset = insn_const_size(ot);
4947 else if (b == 0x6b)
4948 s->rip_offset = 1;
4949 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4950 if (b == 0x69) {
4951 val = insn_get(env, s, ot);
4952 gen_op_movl_T1_im(val);
4953 } else if (b == 0x6b) {
4954 val = (int8_t)insn_get(env, s, OT_BYTE);
4955 gen_op_movl_T1_im(val);
4956 } else {
4957 gen_op_mov_TN_reg(ot, 1, reg);
4958 }
4959
4960 #ifdef TARGET_X86_64
4961 if (ot == OT_QUAD) {
4962 gen_helper_imulq_T0_T1(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
4963 } else
4964 #endif
4965 if (ot == OT_LONG) {
4966 #ifdef TARGET_X86_64
4967 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4968 tcg_gen_ext32s_tl(cpu_T[1], cpu_T[1]);
4969 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4970 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4971 tcg_gen_ext32s_tl(cpu_tmp0, cpu_T[0]);
4972 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4973 #else
4974 {
4975 TCGv_i64 t0, t1;
4976 t0 = tcg_temp_new_i64();
4977 t1 = tcg_temp_new_i64();
4978 tcg_gen_ext_i32_i64(t0, cpu_T[0]);
4979 tcg_gen_ext_i32_i64(t1, cpu_T[1]);
4980 tcg_gen_mul_i64(t0, t0, t1);
4981 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4982 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4983 tcg_gen_sari_tl(cpu_tmp0, cpu_T[0], 31);
4984 tcg_gen_shri_i64(t0, t0, 32);
4985 tcg_gen_trunc_i64_i32(cpu_T[1], t0);
4986 tcg_gen_sub_tl(cpu_cc_src, cpu_T[1], cpu_tmp0);
4987 }
4988 #endif
4989 } else {
4990 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4991 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
4992 /* XXX: use 32 bit mul which could be faster */
4993 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4994 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4995 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
4996 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4997 }
4998 gen_op_mov_reg_T0(ot, reg);
4999 set_cc_op(s, CC_OP_MULB + ot);
5000 break;
5001 case 0x1c0:
5002 case 0x1c1: /* xadd Ev, Gv */
5003 if ((b & 1) == 0)
5004 ot = OT_BYTE;
5005 else
5006 ot = dflag + OT_WORD;
5007 modrm = cpu_ldub_code(env, s->pc++);
5008 reg = ((modrm >> 3) & 7) | rex_r;
5009 mod = (modrm >> 6) & 3;
5010 if (mod == 3) {
5011 rm = (modrm & 7) | REX_B(s);
5012 gen_op_mov_TN_reg(ot, 0, reg);
5013 gen_op_mov_TN_reg(ot, 1, rm);
5014 gen_op_addl_T0_T1();
5015 gen_op_mov_reg_T1(ot, reg);
5016 gen_op_mov_reg_T0(ot, rm);
5017 } else {
5018 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5019 gen_op_mov_TN_reg(ot, 0, reg);
5020 gen_op_ld_T1_A0(ot + s->mem_index);
5021 gen_op_addl_T0_T1();
5022 gen_op_st_T0_A0(ot + s->mem_index);
5023 gen_op_mov_reg_T1(ot, reg);
5024 }
5025 gen_op_update2_cc();
5026 set_cc_op(s, CC_OP_ADDB + ot);
5027 break;
5028 case 0x1b0:
5029 case 0x1b1: /* cmpxchg Ev, Gv */
5030 {
5031 int label1, label2;
5032 TCGv t0, t1, t2, a0;
5033
5034 if ((b & 1) == 0)
5035 ot = OT_BYTE;
5036 else
5037 ot = dflag + OT_WORD;
5038 modrm = cpu_ldub_code(env, s->pc++);
5039 reg = ((modrm >> 3) & 7) | rex_r;
5040 mod = (modrm >> 6) & 3;
5041 t0 = tcg_temp_local_new();
5042 t1 = tcg_temp_local_new();
5043 t2 = tcg_temp_local_new();
5044 a0 = tcg_temp_local_new();
5045 gen_op_mov_v_reg(ot, t1, reg);
5046 if (mod == 3) {
5047 rm = (modrm & 7) | REX_B(s);
5048 gen_op_mov_v_reg(ot, t0, rm);
5049 } else {
5050 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5051 tcg_gen_mov_tl(a0, cpu_A0);
5052 gen_op_ld_v(ot + s->mem_index, t0, a0);
5053 rm = 0; /* avoid warning */
5054 }
5055 label1 = gen_new_label();
5056 tcg_gen_mov_tl(t2, cpu_regs[R_EAX]);
5057 gen_extu(ot, t0);
5058 gen_extu(ot, t2);
5059 tcg_gen_brcond_tl(TCG_COND_EQ, t2, t0, label1);
5060 label2 = gen_new_label();
5061 if (mod == 3) {
5062 gen_op_mov_reg_v(ot, R_EAX, t0);
5063 tcg_gen_br(label2);
5064 gen_set_label(label1);
5065 gen_op_mov_reg_v(ot, rm, t1);
5066 } else {
5067 /* perform no-op store cycle like physical cpu; must be
5068 before changing accumulator to ensure idempotency if
5069 the store faults and the instruction is restarted */
5070 gen_op_st_v(ot + s->mem_index, t0, a0);
5071 gen_op_mov_reg_v(ot, R_EAX, t0);
5072 tcg_gen_br(label2);
5073 gen_set_label(label1);
5074 gen_op_st_v(ot + s->mem_index, t1, a0);
5075 }
5076 gen_set_label(label2);
5077 tcg_gen_mov_tl(cpu_cc_src, t0);
5078 tcg_gen_mov_tl(cpu_cc_srcT, t2);
5079 tcg_gen_sub_tl(cpu_cc_dst, t2, t0);
5080 set_cc_op(s, CC_OP_SUBB + ot);
5081 tcg_temp_free(t0);
5082 tcg_temp_free(t1);
5083 tcg_temp_free(t2);
5084 tcg_temp_free(a0);
5085 }
5086 break;
5087 case 0x1c7: /* cmpxchg8b */
5088 modrm = cpu_ldub_code(env, s->pc++);
5089 mod = (modrm >> 6) & 3;
5090 if ((mod == 3) || ((modrm & 0x38) != 0x8))
5091 goto illegal_op;
5092 #ifdef TARGET_X86_64
5093 if (dflag == 2) {
5094 if (!(s->cpuid_ext_features & CPUID_EXT_CX16))
5095 goto illegal_op;
5096 gen_jmp_im(pc_start - s->cs_base);
5097 gen_update_cc_op(s);
5098 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5099 gen_helper_cmpxchg16b(cpu_env, cpu_A0);
5100 } else
5101 #endif
5102 {
5103 if (!(s->cpuid_features & CPUID_CX8))
5104 goto illegal_op;
5105 gen_jmp_im(pc_start - s->cs_base);
5106 gen_update_cc_op(s);
5107 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5108 gen_helper_cmpxchg8b(cpu_env, cpu_A0);
5109 }
5110 set_cc_op(s, CC_OP_EFLAGS);
5111 break;
5112
5113 /**************************/
5114 /* push/pop */
5115 case 0x50 ... 0x57: /* push */
5116 gen_op_mov_TN_reg(OT_LONG, 0, (b & 7) | REX_B(s));
5117 gen_push_T0(s);
5118 break;
5119 case 0x58 ... 0x5f: /* pop */
5120 if (CODE64(s)) {
5121 ot = dflag ? OT_QUAD : OT_WORD;
5122 } else {
5123 ot = dflag + OT_WORD;
5124 }
5125 gen_pop_T0(s);
5126 /* NOTE: order is important for pop %sp */
5127 gen_pop_update(s);
5128 gen_op_mov_reg_T0(ot, (b & 7) | REX_B(s));
5129 break;
5130 case 0x60: /* pusha */
5131 if (CODE64(s))
5132 goto illegal_op;
5133 gen_pusha(s);
5134 break;
5135 case 0x61: /* popa */
5136 if (CODE64(s))
5137 goto illegal_op;
5138 gen_popa(s);
5139 break;
5140 case 0x68: /* push Iv */
5141 case 0x6a:
5142 if (CODE64(s)) {
5143 ot = dflag ? OT_QUAD : OT_WORD;
5144 } else {
5145 ot = dflag + OT_WORD;
5146 }
5147 if (b == 0x68)
5148 val = insn_get(env, s, ot);
5149 else
5150 val = (int8_t)insn_get(env, s, OT_BYTE);
5151 gen_op_movl_T0_im(val);
5152 gen_push_T0(s);
5153 break;
5154 case 0x8f: /* pop Ev */
5155 if (CODE64(s)) {
5156 ot = dflag ? OT_QUAD : OT_WORD;
5157 } else {
5158 ot = dflag + OT_WORD;
5159 }
5160 modrm = cpu_ldub_code(env, s->pc++);
5161 mod = (modrm >> 6) & 3;
5162 gen_pop_T0(s);
5163 if (mod == 3) {
5164 /* NOTE: order is important for pop %sp */
5165 gen_pop_update(s);
5166 rm = (modrm & 7) | REX_B(s);
5167 gen_op_mov_reg_T0(ot, rm);
5168 } else {
5169 /* NOTE: order is important too for MMU exceptions */
5170 s->popl_esp_hack = 1 << ot;
5171 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
5172 s->popl_esp_hack = 0;
5173 gen_pop_update(s);
5174 }
5175 break;
5176 case 0xc8: /* enter */
5177 {
5178 int level;
5179 val = cpu_lduw_code(env, s->pc);
5180 s->pc += 2;
5181 level = cpu_ldub_code(env, s->pc++);
5182 gen_enter(s, val, level);
5183 }
5184 break;
5185 case 0xc9: /* leave */
5186 /* XXX: exception not precise (ESP is updated before potential exception) */
5187 if (CODE64(s)) {
5188 gen_op_mov_TN_reg(OT_QUAD, 0, R_EBP);
5189 gen_op_mov_reg_T0(OT_QUAD, R_ESP);
5190 } else if (s->ss32) {
5191 gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
5192 gen_op_mov_reg_T0(OT_LONG, R_ESP);
5193 } else {
5194 gen_op_mov_TN_reg(OT_WORD, 0, R_EBP);
5195 gen_op_mov_reg_T0(OT_WORD, R_ESP);
5196 }
5197 gen_pop_T0(s);
5198 if (CODE64(s)) {
5199 ot = dflag ? OT_QUAD : OT_WORD;
5200 } else {
5201 ot = dflag + OT_WORD;
5202 }
5203 gen_op_mov_reg_T0(ot, R_EBP);
5204 gen_pop_update(s);
5205 break;
5206 case 0x06: /* push es */
5207 case 0x0e: /* push cs */
5208 case 0x16: /* push ss */
5209 case 0x1e: /* push ds */
5210 if (CODE64(s))
5211 goto illegal_op;
5212 gen_op_movl_T0_seg(b >> 3);
5213 gen_push_T0(s);
5214 break;
5215 case 0x1a0: /* push fs */
5216 case 0x1a8: /* push gs */
5217 gen_op_movl_T0_seg((b >> 3) & 7);
5218 gen_push_T0(s);
5219 break;
5220 case 0x07: /* pop es */
5221 case 0x17: /* pop ss */
5222 case 0x1f: /* pop ds */
5223 if (CODE64(s))
5224 goto illegal_op;
5225 reg = b >> 3;
5226 gen_pop_T0(s);
5227 gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
5228 gen_pop_update(s);
5229 if (reg == R_SS) {
5230 /* if reg == SS, inhibit interrupts/trace. */
5231 /* If several instructions disable interrupts, only the
5232 _first_ does it */
5233 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
5234 gen_helper_set_inhibit_irq(cpu_env);
5235 s->tf = 0;
5236 }
5237 if (s->is_jmp) {
5238 gen_jmp_im(s->pc - s->cs_base);
5239 gen_eob(s);
5240 }
5241 break;
5242 case 0x1a1: /* pop fs */
5243 case 0x1a9: /* pop gs */
5244 gen_pop_T0(s);
5245 gen_movl_seg_T0(s, (b >> 3) & 7, pc_start - s->cs_base);
5246 gen_pop_update(s);
5247 if (s->is_jmp) {
5248 gen_jmp_im(s->pc - s->cs_base);
5249 gen_eob(s);
5250 }
5251 break;
5252
5253 /**************************/
5254 /* mov */
5255 case 0x88:
5256 case 0x89: /* mov Gv, Ev */
5257 if ((b & 1) == 0)
5258 ot = OT_BYTE;
5259 else
5260 ot = dflag + OT_WORD;
5261 modrm = cpu_ldub_code(env, s->pc++);
5262 reg = ((modrm >> 3) & 7) | rex_r;
5263
5264 /* generate a generic store */
5265 gen_ldst_modrm(env, s, modrm, ot, reg, 1);
5266 break;
5267 case 0xc6:
5268 case 0xc7: /* mov Ev, Iv */
5269 if ((b & 1) == 0)
5270 ot = OT_BYTE;
5271 else
5272 ot = dflag + OT_WORD;
5273 modrm = cpu_ldub_code(env, s->pc++);
5274 mod = (modrm >> 6) & 3;
5275 if (mod != 3) {
5276 s->rip_offset = insn_const_size(ot);
5277 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5278 }
5279 val = insn_get(env, s, ot);
5280 gen_op_movl_T0_im(val);
5281 if (mod != 3)
5282 gen_op_st_T0_A0(ot + s->mem_index);
5283 else
5284 gen_op_mov_reg_T0(ot, (modrm & 7) | REX_B(s));
5285 break;
5286 case 0x8a:
5287 case 0x8b: /* mov Ev, Gv */
5288 if ((b & 1) == 0)
5289 ot = OT_BYTE;
5290 else
5291 ot = OT_WORD + dflag;
5292 modrm = cpu_ldub_code(env, s->pc++);
5293 reg = ((modrm >> 3) & 7) | rex_r;
5294
5295 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
5296 gen_op_mov_reg_T0(ot, reg);
5297 break;
5298 case 0x8e: /* mov seg, Gv */
5299 modrm = cpu_ldub_code(env, s->pc++);
5300 reg = (modrm >> 3) & 7;
5301 if (reg >= 6 || reg == R_CS)
5302 goto illegal_op;
5303 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
5304 gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
5305 if (reg == R_SS) {
5306 /* if reg == SS, inhibit interrupts/trace */
5307 /* If several instructions disable interrupts, only the
5308 _first_ does it */
5309 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
5310 gen_helper_set_inhibit_irq(cpu_env);
5311 s->tf = 0;
5312 }
5313 if (s->is_jmp) {
5314 gen_jmp_im(s->pc - s->cs_base);
5315 gen_eob(s);
5316 }
5317 break;
5318 case 0x8c: /* mov Gv, seg */
5319 modrm = cpu_ldub_code(env, s->pc++);
5320 reg = (modrm >> 3) & 7;
5321 mod = (modrm >> 6) & 3;
5322 if (reg >= 6)
5323 goto illegal_op;
5324 gen_op_movl_T0_seg(reg);
5325 if (mod == 3)
5326 ot = OT_WORD + dflag;
5327 else
5328 ot = OT_WORD;
5329 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
5330 break;
5331
5332 case 0x1b6: /* movzbS Gv, Eb */
5333 case 0x1b7: /* movzwS Gv, Eb */
5334 case 0x1be: /* movsbS Gv, Eb */
5335 case 0x1bf: /* movswS Gv, Eb */
5336 {
5337 int d_ot;
5338 /* d_ot is the size of destination */
5339 d_ot = dflag + OT_WORD;
5340 /* ot is the size of source */
5341 ot = (b & 1) + OT_BYTE;
5342 modrm = cpu_ldub_code(env, s->pc++);
5343 reg = ((modrm >> 3) & 7) | rex_r;
5344 mod = (modrm >> 6) & 3;
5345 rm = (modrm & 7) | REX_B(s);
5346
5347 if (mod == 3) {
5348 gen_op_mov_TN_reg(ot, 0, rm);
5349 switch(ot | (b & 8)) {
5350 case OT_BYTE:
5351 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
5352 break;
5353 case OT_BYTE | 8:
5354 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
5355 break;
5356 case OT_WORD:
5357 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
5358 break;
5359 default:
5360 case OT_WORD | 8:
5361 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5362 break;
5363 }
5364 gen_op_mov_reg_T0(d_ot, reg);
5365 } else {
5366 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5367 if (b & 8) {
5368 gen_op_lds_T0_A0(ot + s->mem_index);
5369 } else {
5370 gen_op_ldu_T0_A0(ot + s->mem_index);
5371 }
5372 gen_op_mov_reg_T0(d_ot, reg);
5373 }
5374 }
5375 break;
5376
5377 case 0x8d: /* lea */
5378 ot = dflag + OT_WORD;
5379 modrm = cpu_ldub_code(env, s->pc++);
5380 mod = (modrm >> 6) & 3;
5381 if (mod == 3)
5382 goto illegal_op;
5383 reg = ((modrm >> 3) & 7) | rex_r;
5384 /* we must ensure that no segment is added */
5385 s->override = -1;
5386 val = s->addseg;
5387 s->addseg = 0;
5388 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5389 s->addseg = val;
5390 gen_op_mov_reg_A0(ot - OT_WORD, reg);
5391 break;
5392
5393 case 0xa0: /* mov EAX, Ov */
5394 case 0xa1:
5395 case 0xa2: /* mov Ov, EAX */
5396 case 0xa3:
5397 {
5398 target_ulong offset_addr;
5399
5400 if ((b & 1) == 0)
5401 ot = OT_BYTE;
5402 else
5403 ot = dflag + OT_WORD;
5404 #ifdef TARGET_X86_64
5405 if (s->aflag == 2) {
5406 offset_addr = cpu_ldq_code(env, s->pc);
5407 s->pc += 8;
5408 gen_op_movq_A0_im(offset_addr);
5409 } else
5410 #endif
5411 {
5412 if (s->aflag) {
5413 offset_addr = insn_get(env, s, OT_LONG);
5414 } else {
5415 offset_addr = insn_get(env, s, OT_WORD);
5416 }
5417 gen_op_movl_A0_im(offset_addr);
5418 }
5419 gen_add_A0_ds_seg(s);
5420 if ((b & 2) == 0) {
5421 gen_op_ld_T0_A0(ot + s->mem_index);
5422 gen_op_mov_reg_T0(ot, R_EAX);
5423 } else {
5424 gen_op_mov_TN_reg(ot, 0, R_EAX);
5425 gen_op_st_T0_A0(ot + s->mem_index);
5426 }
5427 }
5428 break;
5429 case 0xd7: /* xlat */
5430 #ifdef TARGET_X86_64
5431 if (s->aflag == 2) {
5432 gen_op_movq_A0_reg(R_EBX);
5433 gen_op_mov_TN_reg(OT_QUAD, 0, R_EAX);
5434 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xff);
5435 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]);
5436 } else
5437 #endif
5438 {
5439 gen_op_movl_A0_reg(R_EBX);
5440 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
5441 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xff);
5442 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]);
5443 if (s->aflag == 0)
5444 gen_op_andl_A0_ffff();
5445 else
5446 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
5447 }
5448 gen_add_A0_ds_seg(s);
5449 gen_op_ldu_T0_A0(OT_BYTE + s->mem_index);
5450 gen_op_mov_reg_T0(OT_BYTE, R_EAX);
5451 break;
5452 case 0xb0 ... 0xb7: /* mov R, Ib */
5453 val = insn_get(env, s, OT_BYTE);
5454 gen_op_movl_T0_im(val);
5455 gen_op_mov_reg_T0(OT_BYTE, (b & 7) | REX_B(s));
5456 break;
5457 case 0xb8 ... 0xbf: /* mov R, Iv */
5458 #ifdef TARGET_X86_64
5459 if (dflag == 2) {
5460 uint64_t tmp;
5461 /* 64 bit case */
5462 tmp = cpu_ldq_code(env, s->pc);
5463 s->pc += 8;
5464 reg = (b & 7) | REX_B(s);
5465 gen_movtl_T0_im(tmp);
5466 gen_op_mov_reg_T0(OT_QUAD, reg);
5467 } else
5468 #endif
5469 {
5470 ot = dflag ? OT_LONG : OT_WORD;
5471 val = insn_get(env, s, ot);
5472 reg = (b & 7) | REX_B(s);
5473 gen_op_movl_T0_im(val);
5474 gen_op_mov_reg_T0(ot, reg);
5475 }
5476 break;
5477
5478 case 0x91 ... 0x97: /* xchg R, EAX */
5479 do_xchg_reg_eax:
5480 ot = dflag + OT_WORD;
5481 reg = (b & 7) | REX_B(s);
5482 rm = R_EAX;
5483 goto do_xchg_reg;
5484 case 0x86:
5485 case 0x87: /* xchg Ev, Gv */
5486 if ((b & 1) == 0)
5487 ot = OT_BYTE;
5488 else
5489 ot = dflag + OT_WORD;
5490 modrm = cpu_ldub_code(env, s->pc++);
5491 reg = ((modrm >> 3) & 7) | rex_r;
5492 mod = (modrm >> 6) & 3;
5493 if (mod == 3) {
5494 rm = (modrm & 7) | REX_B(s);
5495 do_xchg_reg:
5496 gen_op_mov_TN_reg(ot, 0, reg);
5497 gen_op_mov_TN_reg(ot, 1, rm);
5498 gen_op_mov_reg_T0(ot, rm);
5499 gen_op_mov_reg_T1(ot, reg);
5500 } else {
5501 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5502 gen_op_mov_TN_reg(ot, 0, reg);
5503 /* for xchg, lock is implicit */
5504 if (!(prefixes & PREFIX_LOCK))
5505 gen_helper_lock();
5506 gen_op_ld_T1_A0(ot + s->mem_index);
5507 gen_op_st_T0_A0(ot + s->mem_index);
5508 if (!(prefixes & PREFIX_LOCK))
5509 gen_helper_unlock();
5510 gen_op_mov_reg_T1(ot, reg);
5511 }
5512 break;
5513 case 0xc4: /* les Gv */
5514 if (CODE64(s))
5515 goto illegal_op;
5516 op = R_ES;
5517 goto do_lxx;
5518 case 0xc5: /* lds Gv */
5519 if (CODE64(s))
5520 goto illegal_op;
5521 op = R_DS;
5522 goto do_lxx;
5523 case 0x1b2: /* lss Gv */
5524 op = R_SS;
5525 goto do_lxx;
5526 case 0x1b4: /* lfs Gv */
5527 op = R_FS;
5528 goto do_lxx;
5529 case 0x1b5: /* lgs Gv */
5530 op = R_GS;
5531 do_lxx:
5532 ot = dflag ? OT_LONG : OT_WORD;
5533 modrm = cpu_ldub_code(env, s->pc++);
5534 reg = ((modrm >> 3) & 7) | rex_r;
5535 mod = (modrm >> 6) & 3;
5536 if (mod == 3)
5537 goto illegal_op;
5538 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5539 gen_op_ld_T1_A0(ot + s->mem_index);
5540 gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
5541 /* load the segment first to handle exceptions properly */
5542 gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
5543 gen_movl_seg_T0(s, op, pc_start - s->cs_base);
5544 /* then put the data */
5545 gen_op_mov_reg_T1(ot, reg);
5546 if (s->is_jmp) {
5547 gen_jmp_im(s->pc - s->cs_base);
5548 gen_eob(s);
5549 }
5550 break;
5551
5552 /************************/
5553 /* shifts */
5554 case 0xc0:
5555 case 0xc1:
5556 /* shift Ev,Ib */
5557 shift = 2;
5558 grp2:
5559 {
5560 if ((b & 1) == 0)
5561 ot = OT_BYTE;
5562 else
5563 ot = dflag + OT_WORD;
5564
5565 modrm = cpu_ldub_code(env, s->pc++);
5566 mod = (modrm >> 6) & 3;
5567 op = (modrm >> 3) & 7;
5568
5569 if (mod != 3) {
5570 if (shift == 2) {
5571 s->rip_offset = 1;
5572 }
5573 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5574 opreg = OR_TMP0;
5575 } else {
5576 opreg = (modrm & 7) | REX_B(s);
5577 }
5578
5579 /* simpler op */
5580 if (shift == 0) {
5581 gen_shift(s, op, ot, opreg, OR_ECX);
5582 } else {
5583 if (shift == 2) {
5584 shift = cpu_ldub_code(env, s->pc++);
5585 }
5586 gen_shifti(s, op, ot, opreg, shift);
5587 }
5588 }
5589 break;
5590 case 0xd0:
5591 case 0xd1:
5592 /* shift Ev,1 */
5593 shift = 1;
5594 goto grp2;
5595 case 0xd2:
5596 case 0xd3:
5597 /* shift Ev,cl */
5598 shift = 0;
5599 goto grp2;
5600
5601 case 0x1a4: /* shld imm */
5602 op = 0;
5603 shift = 1;
5604 goto do_shiftd;
5605 case 0x1a5: /* shld cl */
5606 op = 0;
5607 shift = 0;
5608 goto do_shiftd;
5609 case 0x1ac: /* shrd imm */
5610 op = 1;
5611 shift = 1;
5612 goto do_shiftd;
5613 case 0x1ad: /* shrd cl */
5614 op = 1;
5615 shift = 0;
5616 do_shiftd:
5617 ot = dflag + OT_WORD;
5618 modrm = cpu_ldub_code(env, s->pc++);
5619 mod = (modrm >> 6) & 3;
5620 rm = (modrm & 7) | REX_B(s);
5621 reg = ((modrm >> 3) & 7) | rex_r;
5622 if (mod != 3) {
5623 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5624 opreg = OR_TMP0;
5625 } else {
5626 opreg = rm;
5627 }
5628 gen_op_mov_TN_reg(ot, 1, reg);
5629
5630 if (shift) {
5631 TCGv imm = tcg_const_tl(cpu_ldub_code(env, s->pc++));
5632 gen_shiftd_rm_T1(s, ot, opreg, op, imm);
5633 tcg_temp_free(imm);
5634 } else {
5635 gen_shiftd_rm_T1(s, ot, opreg, op, cpu_regs[R_ECX]);
5636 }
5637 break;
5638
5639 /************************/
5640 /* floats */
5641 case 0xd8 ... 0xdf:
5642 if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
5643 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
5644 /* XXX: what to do if illegal op ? */
5645 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
5646 break;
5647 }
5648 modrm = cpu_ldub_code(env, s->pc++);
5649 mod = (modrm >> 6) & 3;
5650 rm = modrm & 7;
5651 op = ((b & 7) << 3) | ((modrm >> 3) & 7);
5652 if (mod != 3) {
5653 /* memory op */
5654 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5655 switch(op) {
5656 case 0x00 ... 0x07: /* fxxxs */
5657 case 0x10 ... 0x17: /* fixxxl */
5658 case 0x20 ... 0x27: /* fxxxl */
5659 case 0x30 ... 0x37: /* fixxx */
5660 {
5661 int op1;
5662 op1 = op & 7;
5663
5664 switch(op >> 4) {
5665 case 0:
5666 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5667 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5668 gen_helper_flds_FT0(cpu_env, cpu_tmp2_i32);
5669 break;
5670 case 1:
5671 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5672 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5673 gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
5674 break;
5675 case 2:
5676 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
5677 (s->mem_index >> 2) - 1);
5678 gen_helper_fldl_FT0(cpu_env, cpu_tmp1_i64);
5679 break;
5680 case 3:
5681 default:
5682 gen_op_lds_T0_A0(OT_WORD + s->mem_index);
5683 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5684 gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
5685 break;
5686 }
5687
5688 gen_helper_fp_arith_ST0_FT0(op1);
5689 if (op1 == 3) {
5690 /* fcomp needs pop */
5691 gen_helper_fpop(cpu_env);
5692 }
5693 }
5694 break;
5695 case 0x08: /* flds */
5696 case 0x0a: /* fsts */
5697 case 0x0b: /* fstps */
5698 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
5699 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
5700 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
5701 switch(op & 7) {
5702 case 0:
5703 switch(op >> 4) {
5704 case 0:
5705 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5706 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5707 gen_helper_flds_ST0(cpu_env, cpu_tmp2_i32);
5708 break;
5709 case 1:
5710 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5711 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5712 gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
5713 break;
5714 case 2:
5715 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
5716 (s->mem_index >> 2) - 1);
5717 gen_helper_fldl_ST0(cpu_env, cpu_tmp1_i64);
5718 break;
5719 case 3:
5720 default:
5721 gen_op_lds_T0_A0(OT_WORD + s->mem_index);
5722 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5723 gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
5724 break;
5725 }
5726 break;
5727 case 1:
5728 /* XXX: the corresponding CPUID bit must be tested ! */
5729 switch(op >> 4) {
5730 case 1:
5731 gen_helper_fisttl_ST0(cpu_tmp2_i32, cpu_env);
5732 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5733 gen_op_st_T0_A0(OT_LONG + s->mem_index);
5734 break;
5735 case 2:
5736 gen_helper_fisttll_ST0(cpu_tmp1_i64, cpu_env);
5737 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
5738 (s->mem_index >> 2) - 1);
5739 break;
5740 case 3:
5741 default:
5742 gen_helper_fistt_ST0(cpu_tmp2_i32, cpu_env);
5743 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5744 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5745 break;
5746 }
5747 gen_helper_fpop(cpu_env);
5748 break;
5749 default:
5750 switch(op >> 4) {
5751 case 0:
5752 gen_helper_fsts_ST0(cpu_tmp2_i32, cpu_env);
5753 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5754 gen_op_st_T0_A0(OT_LONG + s->mem_index);
5755 break;
5756 case 1:
5757 gen_helper_fistl_ST0(cpu_tmp2_i32, cpu_env);
5758 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5759 gen_op_st_T0_A0(OT_LONG + s->mem_index);
5760 break;
5761 case 2:
5762 gen_helper_fstl_ST0(cpu_tmp1_i64, cpu_env);
5763 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
5764 (s->mem_index >> 2) - 1);
5765 break;
5766 case 3:
5767 default:
5768 gen_helper_fist_ST0(cpu_tmp2_i32, cpu_env);
5769 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5770 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5771 break;
5772 }
5773 if ((op & 7) == 3)
5774 gen_helper_fpop(cpu_env);
5775 break;
5776 }
5777 break;
5778 case 0x0c: /* fldenv mem */
5779 gen_update_cc_op(s);
5780 gen_jmp_im(pc_start - s->cs_base);
5781 gen_helper_fldenv(cpu_env, cpu_A0, tcg_const_i32(s->dflag));
5782 break;
5783 case 0x0d: /* fldcw mem */
5784 gen_op_ld_T0_A0(OT_WORD + s->mem_index);
5785 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5786 gen_helper_fldcw(cpu_env, cpu_tmp2_i32);
5787 break;
5788 case 0x0e: /* fnstenv mem */
5789 gen_update_cc_op(s);
5790 gen_jmp_im(pc_start - s->cs_base);
5791 gen_helper_fstenv(cpu_env, cpu_A0, tcg_const_i32(s->dflag));
5792 break;
5793 case 0x0f: /* fnstcw mem */
5794 gen_helper_fnstcw(cpu_tmp2_i32, cpu_env);
5795 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5796 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5797 break;
5798 case 0x1d: /* fldt mem */
5799 gen_update_cc_op(s);
5800 gen_jmp_im(pc_start - s->cs_base);
5801 gen_helper_fldt_ST0(cpu_env, cpu_A0);
5802 break;
5803 case 0x1f: /* fstpt mem */
5804 gen_update_cc_op(s);
5805 gen_jmp_im(pc_start - s->cs_base);
5806 gen_helper_fstt_ST0(cpu_env, cpu_A0);
5807 gen_helper_fpop(cpu_env);
5808 break;
5809 case 0x2c: /* frstor mem */
5810 gen_update_cc_op(s);
5811 gen_jmp_im(pc_start - s->cs_base);
5812 gen_helper_frstor(cpu_env, cpu_A0, tcg_const_i32(s->dflag));
5813 break;
5814 case 0x2e: /* fnsave mem */
5815 gen_update_cc_op(s);
5816 gen_jmp_im(pc_start - s->cs_base);
5817 gen_helper_fsave(cpu_env, cpu_A0, tcg_const_i32(s->dflag));
5818 break;
5819 case 0x2f: /* fnstsw mem */
5820 gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
5821 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5822 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5823 break;
5824 case 0x3c: /* fbld */
5825 gen_update_cc_op(s);
5826 gen_jmp_im(pc_start - s->cs_base);
5827 gen_helper_fbld_ST0(cpu_env, cpu_A0);
5828 break;
5829 case 0x3e: /* fbstp */
5830 gen_update_cc_op(s);
5831 gen_jmp_im(pc_start - s->cs_base);
5832 gen_helper_fbst_ST0(cpu_env, cpu_A0);
5833 gen_helper_fpop(cpu_env);
5834 break;
5835 case 0x3d: /* fildll */
5836 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
5837 (s->mem_index >> 2) - 1);
5838 gen_helper_fildll_ST0(cpu_env, cpu_tmp1_i64);
5839 break;
5840 case 0x3f: /* fistpll */
5841 gen_helper_fistll_ST0(cpu_tmp1_i64, cpu_env);
5842 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
5843 (s->mem_index >> 2) - 1);
5844 gen_helper_fpop(cpu_env);
5845 break;
5846 default:
5847 goto illegal_op;
5848 }
5849 } else {
5850 /* register float ops */
5851 opreg = rm;
5852
5853 switch(op) {
5854 case 0x08: /* fld sti */
5855 gen_helper_fpush(cpu_env);
5856 gen_helper_fmov_ST0_STN(cpu_env,
5857 tcg_const_i32((opreg + 1) & 7));
5858 break;
5859 case 0x09: /* fxchg sti */
5860 case 0x29: /* fxchg4 sti, undocumented op */
5861 case 0x39: /* fxchg7 sti, undocumented op */
5862 gen_helper_fxchg_ST0_STN(cpu_env, tcg_const_i32(opreg));
5863 break;
5864 case 0x0a: /* grp d9/2 */
5865 switch(rm) {
5866 case 0: /* fnop */
5867 /* check exceptions (FreeBSD FPU probe) */
5868 gen_update_cc_op(s);
5869 gen_jmp_im(pc_start - s->cs_base);
5870 gen_helper_fwait(cpu_env);
5871 break;
5872 default:
5873 goto illegal_op;
5874 }
5875 break;
5876 case 0x0c: /* grp d9/4 */
5877 switch(rm) {
5878 case 0: /* fchs */
5879 gen_helper_fchs_ST0(cpu_env);
5880 break;
5881 case 1: /* fabs */
5882 gen_helper_fabs_ST0(cpu_env);
5883 break;
5884 case 4: /* ftst */
5885 gen_helper_fldz_FT0(cpu_env);
5886 gen_helper_fcom_ST0_FT0(cpu_env);
5887 break;
5888 case 5: /* fxam */
5889 gen_helper_fxam_ST0(cpu_env);
5890 break;
5891 default:
5892 goto illegal_op;
5893 }
5894 break;
5895 case 0x0d: /* grp d9/5 */
5896 {
5897 switch(rm) {
5898 case 0:
5899 gen_helper_fpush(cpu_env);
5900 gen_helper_fld1_ST0(cpu_env);
5901 break;
5902 case 1:
5903 gen_helper_fpush(cpu_env);
5904 gen_helper_fldl2t_ST0(cpu_env);
5905 break;
5906 case 2:
5907 gen_helper_fpush(cpu_env);
5908 gen_helper_fldl2e_ST0(cpu_env);
5909 break;
5910 case 3:
5911 gen_helper_fpush(cpu_env);
5912 gen_helper_fldpi_ST0(cpu_env);
5913 break;
5914 case 4:
5915 gen_helper_fpush(cpu_env);
5916 gen_helper_fldlg2_ST0(cpu_env);
5917 break;
5918 case 5:
5919 gen_helper_fpush(cpu_env);
5920 gen_helper_fldln2_ST0(cpu_env);
5921 break;
5922 case 6:
5923 gen_helper_fpush(cpu_env);
5924 gen_helper_fldz_ST0(cpu_env);
5925 break;
5926 default:
5927 goto illegal_op;
5928 }
5929 }
5930 break;
5931 case 0x0e: /* grp d9/6 */
5932 switch(rm) {
5933 case 0: /* f2xm1 */
5934 gen_helper_f2xm1(cpu_env);
5935 break;
5936 case 1: /* fyl2x */
5937 gen_helper_fyl2x(cpu_env);
5938 break;
5939 case 2: /* fptan */
5940 gen_helper_fptan(cpu_env);
5941 break;
5942 case 3: /* fpatan */
5943 gen_helper_fpatan(cpu_env);
5944 break;
5945 case 4: /* fxtract */
5946 gen_helper_fxtract(cpu_env);
5947 break;
5948 case 5: /* fprem1 */
5949 gen_helper_fprem1(cpu_env);
5950 break;
5951 case 6: /* fdecstp */
5952 gen_helper_fdecstp(cpu_env);
5953 break;
5954 default:
5955 case 7: /* fincstp */
5956 gen_helper_fincstp(cpu_env);
5957 break;
5958 }
5959 break;
5960 case 0x0f: /* grp d9/7 */
5961 switch(rm) {
5962 case 0: /* fprem */
5963 gen_helper_fprem(cpu_env);
5964 break;
5965 case 1: /* fyl2xp1 */
5966 gen_helper_fyl2xp1(cpu_env);
5967 break;
5968 case 2: /* fsqrt */
5969 gen_helper_fsqrt(cpu_env);
5970 break;
5971 case 3: /* fsincos */
5972 gen_helper_fsincos(cpu_env);
5973 break;
5974 case 5: /* fscale */
5975 gen_helper_fscale(cpu_env);
5976 break;
5977 case 4: /* frndint */
5978 gen_helper_frndint(cpu_env);
5979 break;
5980 case 6: /* fsin */
5981 gen_helper_fsin(cpu_env);
5982 break;
5983 default:
5984 case 7: /* fcos */
5985 gen_helper_fcos(cpu_env);
5986 break;
5987 }
5988 break;
5989 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
5990 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
5991 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
5992 {
5993 int op1;
5994
5995 op1 = op & 7;
5996 if (op >= 0x20) {
5997 gen_helper_fp_arith_STN_ST0(op1, opreg);
5998 if (op >= 0x30)
5999 gen_helper_fpop(cpu_env);
6000 } else {
6001 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6002 gen_helper_fp_arith_ST0_FT0(op1);
6003 }
6004 }
6005 break;
6006 case 0x02: /* fcom */
6007 case 0x22: /* fcom2, undocumented op */
6008 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6009 gen_helper_fcom_ST0_FT0(cpu_env);
6010 break;
6011 case 0x03: /* fcomp */
6012 case 0x23: /* fcomp3, undocumented op */
6013 case 0x32: /* fcomp5, undocumented op */
6014 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6015 gen_helper_fcom_ST0_FT0(cpu_env);
6016 gen_helper_fpop(cpu_env);
6017 break;
6018 case 0x15: /* da/5 */
6019 switch(rm) {
6020 case 1: /* fucompp */
6021 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
6022 gen_helper_fucom_ST0_FT0(cpu_env);
6023 gen_helper_fpop(cpu_env);
6024 gen_helper_fpop(cpu_env);
6025 break;
6026 default:
6027 goto illegal_op;
6028 }
6029 break;
6030 case 0x1c:
6031 switch(rm) {
6032 case 0: /* feni (287 only, just do nop here) */
6033 break;
6034 case 1: /* fdisi (287 only, just do nop here) */
6035 break;
6036 case 2: /* fclex */
6037 gen_helper_fclex(cpu_env);
6038 break;
6039 case 3: /* fninit */
6040 gen_helper_fninit(cpu_env);
6041 break;
6042 case 4: /* fsetpm (287 only, just do nop here) */
6043 break;
6044 default:
6045 goto illegal_op;
6046 }
6047 break;
6048 case 0x1d: /* fucomi */
6049 gen_update_cc_op(s);
6050 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6051 gen_helper_fucomi_ST0_FT0(cpu_env);
6052 set_cc_op(s, CC_OP_EFLAGS);
6053 break;
6054 case 0x1e: /* fcomi */
6055 gen_update_cc_op(s);
6056 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6057 gen_helper_fcomi_ST0_FT0(cpu_env);
6058 set_cc_op(s, CC_OP_EFLAGS);
6059 break;
6060 case 0x28: /* ffree sti */
6061 gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
6062 break;
6063 case 0x2a: /* fst sti */
6064 gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
6065 break;
6066 case 0x2b: /* fstp sti */
6067 case 0x0b: /* fstp1 sti, undocumented op */
6068 case 0x3a: /* fstp8 sti, undocumented op */
6069 case 0x3b: /* fstp9 sti, undocumented op */
6070 gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
6071 gen_helper_fpop(cpu_env);
6072 break;
6073 case 0x2c: /* fucom st(i) */
6074 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6075 gen_helper_fucom_ST0_FT0(cpu_env);
6076 break;
6077 case 0x2d: /* fucomp st(i) */
6078 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6079 gen_helper_fucom_ST0_FT0(cpu_env);
6080 gen_helper_fpop(cpu_env);
6081 break;
6082 case 0x33: /* de/3 */
6083 switch(rm) {
6084 case 1: /* fcompp */
6085 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
6086 gen_helper_fcom_ST0_FT0(cpu_env);
6087 gen_helper_fpop(cpu_env);
6088 gen_helper_fpop(cpu_env);
6089 break;
6090 default:
6091 goto illegal_op;
6092 }
6093 break;
6094 case 0x38: /* ffreep sti, undocumented op */
6095 gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
6096 gen_helper_fpop(cpu_env);
6097 break;
6098 case 0x3c: /* df/4 */
6099 switch(rm) {
6100 case 0:
6101 gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
6102 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
6103 gen_op_mov_reg_T0(OT_WORD, R_EAX);
6104 break;
6105 default:
6106 goto illegal_op;
6107 }
6108 break;
6109 case 0x3d: /* fucomip */
6110 gen_update_cc_op(s);
6111 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6112 gen_helper_fucomi_ST0_FT0(cpu_env);
6113 gen_helper_fpop(cpu_env);
6114 set_cc_op(s, CC_OP_EFLAGS);
6115 break;
6116 case 0x3e: /* fcomip */
6117 gen_update_cc_op(s);
6118 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6119 gen_helper_fcomi_ST0_FT0(cpu_env);
6120 gen_helper_fpop(cpu_env);
6121 set_cc_op(s, CC_OP_EFLAGS);
6122 break;
6123 case 0x10 ... 0x13: /* fcmovxx */
6124 case 0x18 ... 0x1b:
6125 {
6126 int op1, l1;
6127 static const uint8_t fcmov_cc[8] = {
6128 (JCC_B << 1),
6129 (JCC_Z << 1),
6130 (JCC_BE << 1),
6131 (JCC_P << 1),
6132 };
6133 op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
6134 l1 = gen_new_label();
6135 gen_jcc1_noeob(s, op1, l1);
6136 gen_helper_fmov_ST0_STN(cpu_env, tcg_const_i32(opreg));
6137 gen_set_label(l1);
6138 }
6139 break;
6140 default:
6141 goto illegal_op;
6142 }
6143 }
6144 break;
6145 /************************/
6146 /* string ops */
6147
6148 case 0xa4: /* movsS */
6149 case 0xa5:
6150 if ((b & 1) == 0)
6151 ot = OT_BYTE;
6152 else
6153 ot = dflag + OT_WORD;
6154
6155 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6156 gen_repz_movs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6157 } else {
6158 gen_movs(s, ot);
6159 }
6160 break;
6161
6162 case 0xaa: /* stosS */
6163 case 0xab:
6164 if ((b & 1) == 0)
6165 ot = OT_BYTE;
6166 else
6167 ot = dflag + OT_WORD;
6168
6169 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6170 gen_repz_stos(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6171 } else {
6172 gen_stos(s, ot);
6173 }
6174 break;
6175 case 0xac: /* lodsS */
6176 case 0xad:
6177 if ((b & 1) == 0)
6178 ot = OT_BYTE;
6179 else
6180 ot = dflag + OT_WORD;
6181 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6182 gen_repz_lods(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6183 } else {
6184 gen_lods(s, ot);
6185 }
6186 break;
6187 case 0xae: /* scasS */
6188 case 0xaf:
6189 if ((b & 1) == 0)
6190 ot = OT_BYTE;
6191 else
6192 ot = dflag + OT_WORD;
6193 if (prefixes & PREFIX_REPNZ) {
6194 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6195 } else if (prefixes & PREFIX_REPZ) {
6196 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6197 } else {
6198 gen_scas(s, ot);
6199 }
6200 break;
6201
6202 case 0xa6: /* cmpsS */
6203 case 0xa7:
6204 if ((b & 1) == 0)
6205 ot = OT_BYTE;
6206 else
6207 ot = dflag + OT_WORD;
6208 if (prefixes & PREFIX_REPNZ) {
6209 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6210 } else if (prefixes & PREFIX_REPZ) {
6211 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6212 } else {
6213 gen_cmps(s, ot);
6214 }
6215 break;
6216 case 0x6c: /* insS */
6217 case 0x6d:
6218 if ((b & 1) == 0)
6219 ot = OT_BYTE;
6220 else
6221 ot = dflag ? OT_LONG : OT_WORD;
6222 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
6223 gen_op_andl_T0_ffff();
6224 gen_check_io(s, ot, pc_start - s->cs_base,
6225 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4);
6226 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6227 gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6228 } else {
6229 gen_ins(s, ot);
6230 if (use_icount) {
6231 gen_jmp(s, s->pc - s->cs_base);
6232 }
6233 }
6234 break;
6235 case 0x6e: /* outsS */
6236 case 0x6f:
6237 if ((b & 1) == 0)
6238 ot = OT_BYTE;
6239 else
6240 ot = dflag ? OT_LONG : OT_WORD;
6241 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
6242 gen_op_andl_T0_ffff();
6243 gen_check_io(s, ot, pc_start - s->cs_base,
6244 svm_is_rep(prefixes) | 4);
6245 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6246 gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6247 } else {
6248 gen_outs(s, ot);
6249 if (use_icount) {
6250 gen_jmp(s, s->pc - s->cs_base);
6251 }
6252 }
6253 break;
6254
6255 /************************/
6256 /* port I/O */
6257
6258 case 0xe4:
6259 case 0xe5:
6260 if ((b & 1) == 0)
6261 ot = OT_BYTE;
6262 else
6263 ot = dflag ? OT_LONG : OT_WORD;
6264 val = cpu_ldub_code(env, s->pc++);
6265 gen_op_movl_T0_im(val);
6266 gen_check_io(s, ot, pc_start - s->cs_base,
6267 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
6268 if (use_icount)
6269 gen_io_start();
6270 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6271 gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
6272 gen_op_mov_reg_T1(ot, R_EAX);
6273 if (use_icount) {
6274 gen_io_end();
6275 gen_jmp(s, s->pc - s->cs_base);
6276 }
6277 break;
6278 case 0xe6:
6279 case 0xe7:
6280 if ((b & 1) == 0)
6281 ot = OT_BYTE;
6282 else
6283 ot = dflag ? OT_LONG : OT_WORD;
6284 val = cpu_ldub_code(env, s->pc++);
6285 gen_op_movl_T0_im(val);
6286 gen_check_io(s, ot, pc_start - s->cs_base,
6287 svm_is_rep(prefixes));
6288 gen_op_mov_TN_reg(ot, 1, R_EAX);
6289
6290 if (use_icount)
6291 gen_io_start();
6292 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6293 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
6294 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
6295 if (use_icount) {
6296 gen_io_end();
6297 gen_jmp(s, s->pc - s->cs_base);
6298 }
6299 break;
6300 case 0xec:
6301 case 0xed:
6302 if ((b & 1) == 0)
6303 ot = OT_BYTE;
6304 else
6305 ot = dflag ? OT_LONG : OT_WORD;
6306 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
6307 gen_op_andl_T0_ffff();
6308 gen_check_io(s, ot, pc_start - s->cs_base,
6309 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
6310 if (use_icount)
6311 gen_io_start();
6312 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6313 gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
6314 gen_op_mov_reg_T1(ot, R_EAX);
6315 if (use_icount) {
6316 gen_io_end();
6317 gen_jmp(s, s->pc - s->cs_base);
6318 }
6319 break;
6320 case 0xee:
6321 case 0xef:
6322 if ((b & 1) == 0)
6323 ot = OT_BYTE;
6324 else
6325 ot = dflag ? OT_LONG : OT_WORD;
6326 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
6327 gen_op_andl_T0_ffff();
6328 gen_check_io(s, ot, pc_start - s->cs_base,
6329 svm_is_rep(prefixes));
6330 gen_op_mov_TN_reg(ot, 1, R_EAX);
6331
6332 if (use_icount)
6333 gen_io_start();
6334 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6335 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
6336 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
6337 if (use_icount) {
6338 gen_io_end();
6339 gen_jmp(s, s->pc - s->cs_base);
6340 }
6341 break;
6342
6343 /************************/
6344 /* control */
6345 case 0xc2: /* ret im */
6346 val = cpu_ldsw_code(env, s->pc);
6347 s->pc += 2;
6348 gen_pop_T0(s);
6349 if (CODE64(s) && s->dflag)
6350 s->dflag = 2;
6351 gen_stack_update(s, val + (2 << s->dflag));
6352 if (s->dflag == 0)
6353 gen_op_andl_T0_ffff();
6354 gen_op_jmp_T0();
6355 gen_eob(s);
6356 break;
6357 case 0xc3: /* ret */
6358 gen_pop_T0(s);
6359 gen_pop_update(s);
6360 if (s->dflag == 0)
6361 gen_op_andl_T0_ffff();
6362 gen_op_jmp_T0();
6363 gen_eob(s);
6364 break;
6365 case 0xca: /* lret im */
6366 val = cpu_ldsw_code(env, s->pc);
6367 s->pc += 2;
6368 do_lret:
6369 if (s->pe && !s->vm86) {
6370 gen_update_cc_op(s);
6371 gen_jmp_im(pc_start - s->cs_base);
6372 gen_helper_lret_protected(cpu_env, tcg_const_i32(s->dflag),
6373 tcg_const_i32(val));
6374 } else {
6375 gen_stack_A0(s);
6376 /* pop offset */
6377 gen_op_ld_T0_A0(1 + s->dflag + s->mem_index);
6378 if (s->dflag == 0)
6379 gen_op_andl_T0_ffff();
6380 /* NOTE: keeping EIP updated is not a problem in case of
6381 exception */
6382 gen_op_jmp_T0();
6383 /* pop selector */
6384 gen_op_addl_A0_im(2 << s->dflag);
6385 gen_op_ld_T0_A0(1 + s->dflag + s->mem_index);
6386 gen_op_movl_seg_T0_vm(R_CS);
6387 /* add stack offset */
6388 gen_stack_update(s, val + (4 << s->dflag));
6389 }
6390 gen_eob(s);
6391 break;
6392 case 0xcb: /* lret */
6393 val = 0;
6394 goto do_lret;
6395 case 0xcf: /* iret */
6396 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET);
6397 if (!s->pe) {
6398 /* real mode */
6399 gen_helper_iret_real(cpu_env, tcg_const_i32(s->dflag));
6400 set_cc_op(s, CC_OP_EFLAGS);
6401 } else if (s->vm86) {
6402 if (s->iopl != 3) {
6403 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6404 } else {
6405 gen_helper_iret_real(cpu_env, tcg_const_i32(s->dflag));
6406 set_cc_op(s, CC_OP_EFLAGS);
6407 }
6408 } else {
6409 gen_update_cc_op(s);
6410 gen_jmp_im(pc_start - s->cs_base);
6411 gen_helper_iret_protected(cpu_env, tcg_const_i32(s->dflag),
6412 tcg_const_i32(s->pc - s->cs_base));
6413 set_cc_op(s, CC_OP_EFLAGS);
6414 }
6415 gen_eob(s);
6416 break;
6417 case 0xe8: /* call im */
6418 {
6419 if (dflag)
6420 tval = (int32_t)insn_get(env, s, OT_LONG);
6421 else
6422 tval = (int16_t)insn_get(env, s, OT_WORD);
6423 next_eip = s->pc - s->cs_base;
6424 tval += next_eip;
6425 if (s->dflag == 0)
6426 tval &= 0xffff;
6427 else if(!CODE64(s))
6428 tval &= 0xffffffff;
6429 gen_movtl_T0_im(next_eip);
6430 gen_push_T0(s);
6431 gen_jmp(s, tval);
6432 }
6433 break;
6434 case 0x9a: /* lcall im */
6435 {
6436 unsigned int selector, offset;
6437
6438 if (CODE64(s))
6439 goto illegal_op;
6440 ot = dflag ? OT_LONG : OT_WORD;
6441 offset = insn_get(env, s, ot);
6442 selector = insn_get(env, s, OT_WORD);
6443
6444 gen_op_movl_T0_im(selector);
6445 gen_op_movl_T1_imu(offset);
6446 }
6447 goto do_lcall;
6448 case 0xe9: /* jmp im */
6449 if (dflag)
6450 tval = (int32_t)insn_get(env, s, OT_LONG);
6451 else
6452 tval = (int16_t)insn_get(env, s, OT_WORD);
6453 tval += s->pc - s->cs_base;
6454 if (s->dflag == 0)
6455 tval &= 0xffff;
6456 else if(!CODE64(s))
6457 tval &= 0xffffffff;
6458 gen_jmp(s, tval);
6459 break;
6460 case 0xea: /* ljmp im */
6461 {
6462 unsigned int selector, offset;
6463
6464 if (CODE64(s))
6465 goto illegal_op;
6466 ot = dflag ? OT_LONG : OT_WORD;
6467 offset = insn_get(env, s, ot);
6468 selector = insn_get(env, s, OT_WORD);
6469
6470 gen_op_movl_T0_im(selector);
6471 gen_op_movl_T1_imu(offset);
6472 }
6473 goto do_ljmp;
6474 case 0xeb: /* jmp Jb */
6475 tval = (int8_t)insn_get(env, s, OT_BYTE);
6476 tval += s->pc - s->cs_base;
6477 if (s->dflag == 0)
6478 tval &= 0xffff;
6479 gen_jmp(s, tval);
6480 break;
6481 case 0x70 ... 0x7f: /* jcc Jb */
6482 tval = (int8_t)insn_get(env, s, OT_BYTE);
6483 goto do_jcc;
6484 case 0x180 ... 0x18f: /* jcc Jv */
6485 if (dflag) {
6486 tval = (int32_t)insn_get(env, s, OT_LONG);
6487 } else {
6488 tval = (int16_t)insn_get(env, s, OT_WORD);
6489 }
6490 do_jcc:
6491 next_eip = s->pc - s->cs_base;
6492 tval += next_eip;
6493 if (s->dflag == 0)
6494 tval &= 0xffff;
6495 gen_jcc(s, b, tval, next_eip);
6496 break;
6497
6498 case 0x190 ... 0x19f: /* setcc Gv */
6499 modrm = cpu_ldub_code(env, s->pc++);
6500 gen_setcc1(s, b, cpu_T[0]);
6501 gen_ldst_modrm(env, s, modrm, OT_BYTE, OR_TMP0, 1);
6502 break;
6503 case 0x140 ... 0x14f: /* cmov Gv, Ev */
6504 ot = dflag + OT_WORD;
6505 modrm = cpu_ldub_code(env, s->pc++);
6506 reg = ((modrm >> 3) & 7) | rex_r;
6507 gen_cmovcc1(env, s, ot, b, modrm, reg);
6508 break;
6509
6510 /************************/
6511 /* flags */
6512 case 0x9c: /* pushf */
6513 gen_svm_check_intercept(s, pc_start, SVM_EXIT_PUSHF);
6514 if (s->vm86 && s->iopl != 3) {
6515 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6516 } else {
6517 gen_update_cc_op(s);
6518 gen_helper_read_eflags(cpu_T[0], cpu_env);
6519 gen_push_T0(s);
6520 }
6521 break;
6522 case 0x9d: /* popf */
6523 gen_svm_check_intercept(s, pc_start, SVM_EXIT_POPF);
6524 if (s->vm86 && s->iopl != 3) {
6525 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6526 } else {
6527 gen_pop_T0(s);
6528 if (s->cpl == 0) {
6529 if (s->dflag) {
6530 gen_helper_write_eflags(cpu_env, cpu_T[0],
6531 tcg_const_i32((TF_MASK | AC_MASK |
6532 ID_MASK | NT_MASK |
6533 IF_MASK |
6534 IOPL_MASK)));
6535 } else {
6536 gen_helper_write_eflags(cpu_env, cpu_T[0],
6537 tcg_const_i32((TF_MASK | AC_MASK |
6538 ID_MASK | NT_MASK |
6539 IF_MASK | IOPL_MASK)
6540 & 0xffff));
6541 }
6542 } else {
6543 if (s->cpl <= s->iopl) {
6544 if (s->dflag) {
6545 gen_helper_write_eflags(cpu_env, cpu_T[0],
6546 tcg_const_i32((TF_MASK |
6547 AC_MASK |
6548 ID_MASK |
6549 NT_MASK |
6550 IF_MASK)));
6551 } else {
6552 gen_helper_write_eflags(cpu_env, cpu_T[0],
6553 tcg_const_i32((TF_MASK |
6554 AC_MASK |
6555 ID_MASK |
6556 NT_MASK |
6557 IF_MASK)
6558 & 0xffff));
6559 }
6560 } else {
6561 if (s->dflag) {
6562 gen_helper_write_eflags(cpu_env, cpu_T[0],
6563 tcg_const_i32((TF_MASK | AC_MASK |
6564 ID_MASK | NT_MASK)));
6565 } else {
6566 gen_helper_write_eflags(cpu_env, cpu_T[0],
6567 tcg_const_i32((TF_MASK | AC_MASK |
6568 ID_MASK | NT_MASK)
6569 & 0xffff));
6570 }
6571 }
6572 }
6573 gen_pop_update(s);
6574 set_cc_op(s, CC_OP_EFLAGS);
6575 /* abort translation because TF/AC flag may change */
6576 gen_jmp_im(s->pc - s->cs_base);
6577 gen_eob(s);
6578 }
6579 break;
6580 case 0x9e: /* sahf */
6581 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
6582 goto illegal_op;
6583 gen_op_mov_TN_reg(OT_BYTE, 0, R_AH);
6584 gen_compute_eflags(s);
6585 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O);
6586 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], CC_S | CC_Z | CC_A | CC_P | CC_C);
6587 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_T[0]);
6588 break;
6589 case 0x9f: /* lahf */
6590 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
6591 goto illegal_op;
6592 gen_compute_eflags(s);
6593 /* Note: gen_compute_eflags() only gives the condition codes */
6594 tcg_gen_ori_tl(cpu_T[0], cpu_cc_src, 0x02);
6595 gen_op_mov_reg_T0(OT_BYTE, R_AH);
6596 break;
6597 case 0xf5: /* cmc */
6598 gen_compute_eflags(s);
6599 tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C);
6600 break;
6601 case 0xf8: /* clc */
6602 gen_compute_eflags(s);
6603 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C);
6604 break;
6605 case 0xf9: /* stc */
6606 gen_compute_eflags(s);
6607 tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C);
6608 break;
6609 case 0xfc: /* cld */
6610 tcg_gen_movi_i32(cpu_tmp2_i32, 1);
6611 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
6612 break;
6613 case 0xfd: /* std */
6614 tcg_gen_movi_i32(cpu_tmp2_i32, -1);
6615 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
6616 break;
6617
6618 /************************/
6619 /* bit operations */
6620 case 0x1ba: /* bt/bts/btr/btc Gv, im */
6621 ot = dflag + OT_WORD;
6622 modrm = cpu_ldub_code(env, s->pc++);
6623 op = (modrm >> 3) & 7;
6624 mod = (modrm >> 6) & 3;
6625 rm = (modrm & 7) | REX_B(s);
6626 if (mod != 3) {
6627 s->rip_offset = 1;
6628 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
6629 gen_op_ld_T0_A0(ot + s->mem_index);
6630 } else {
6631 gen_op_mov_TN_reg(ot, 0, rm);
6632 }
6633 /* load shift */
6634 val = cpu_ldub_code(env, s->pc++);
6635 gen_op_movl_T1_im(val);
6636 if (op < 4)
6637 goto illegal_op;
6638 op -= 4;
6639 goto bt_op;
6640 case 0x1a3: /* bt Gv, Ev */
6641 op = 0;
6642 goto do_btx;
6643 case 0x1ab: /* bts */
6644 op = 1;
6645 goto do_btx;
6646 case 0x1b3: /* btr */
6647 op = 2;
6648 goto do_btx;
6649 case 0x1bb: /* btc */
6650 op = 3;
6651 do_btx:
6652 ot = dflag + OT_WORD;
6653 modrm = cpu_ldub_code(env, s->pc++);
6654 reg = ((modrm >> 3) & 7) | rex_r;
6655 mod = (modrm >> 6) & 3;
6656 rm = (modrm & 7) | REX_B(s);
6657 gen_op_mov_TN_reg(OT_LONG, 1, reg);
6658 if (mod != 3) {
6659 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
6660 /* specific case: we need to add a displacement */
6661 gen_exts(ot, cpu_T[1]);
6662 tcg_gen_sari_tl(cpu_tmp0, cpu_T[1], 3 + ot);
6663 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, ot);
6664 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
6665 gen_op_ld_T0_A0(ot + s->mem_index);
6666 } else {
6667 gen_op_mov_TN_reg(ot, 0, rm);
6668 }
6669 bt_op:
6670 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], (1 << (3 + ot)) - 1);
6671 switch(op) {
6672 case 0:
6673 tcg_gen_shr_tl(cpu_cc_src, cpu_T[0], cpu_T[1]);
6674 tcg_gen_movi_tl(cpu_cc_dst, 0);
6675 break;
6676 case 1:
6677 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6678 tcg_gen_movi_tl(cpu_tmp0, 1);
6679 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6680 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6681 break;
6682 case 2:
6683 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6684 tcg_gen_movi_tl(cpu_tmp0, 1);
6685 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6686 tcg_gen_not_tl(cpu_tmp0, cpu_tmp0);
6687 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6688 break;
6689 default:
6690 case 3:
6691 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6692 tcg_gen_movi_tl(cpu_tmp0, 1);
6693 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6694 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6695 break;
6696 }
6697 set_cc_op(s, CC_OP_SARB + ot);
6698 if (op != 0) {
6699 if (mod != 3)
6700 gen_op_st_T0_A0(ot + s->mem_index);
6701 else
6702 gen_op_mov_reg_T0(ot, rm);
6703 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
6704 tcg_gen_movi_tl(cpu_cc_dst, 0);
6705 }
6706 break;
6707 case 0x1bc: /* bsf */
6708 case 0x1bd: /* bsr */
6709 {
6710 int label1;
6711 TCGv t0;
6712
6713 ot = dflag + OT_WORD;
6714 modrm = cpu_ldub_code(env, s->pc++);
6715 reg = ((modrm >> 3) & 7) | rex_r;
6716 gen_ldst_modrm(env, s,modrm, ot, OR_TMP0, 0);
6717 gen_extu(ot, cpu_T[0]);
6718 t0 = tcg_temp_local_new();
6719 tcg_gen_mov_tl(t0, cpu_T[0]);
6720 if ((b & 1) && (prefixes & PREFIX_REPZ) &&
6721 (s->cpuid_ext3_features & CPUID_EXT3_ABM)) {
6722 switch(ot) {
6723 case OT_WORD: gen_helper_lzcnt(cpu_T[0], t0,
6724 tcg_const_i32(16)); break;
6725 case OT_LONG: gen_helper_lzcnt(cpu_T[0], t0,
6726 tcg_const_i32(32)); break;
6727 case OT_QUAD: gen_helper_lzcnt(cpu_T[0], t0,
6728 tcg_const_i32(64)); break;
6729 }
6730 gen_op_mov_reg_T0(ot, reg);
6731 } else {
6732 label1 = gen_new_label();
6733 tcg_gen_movi_tl(cpu_cc_dst, 0);
6734 tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, label1);
6735 if (b & 1) {
6736 gen_helper_bsr(cpu_T[0], t0);
6737 } else {
6738 gen_helper_bsf(cpu_T[0], t0);
6739 }
6740 gen_op_mov_reg_T0(ot, reg);
6741 tcg_gen_movi_tl(cpu_cc_dst, 1);
6742 gen_set_label(label1);
6743 set_cc_op(s, CC_OP_LOGICB + ot);
6744 }
6745 tcg_temp_free(t0);
6746 }
6747 break;
6748 /************************/
6749 /* bcd */
6750 case 0x27: /* daa */
6751 if (CODE64(s))
6752 goto illegal_op;
6753 gen_update_cc_op(s);
6754 gen_helper_daa(cpu_env);
6755 set_cc_op(s, CC_OP_EFLAGS);
6756 break;
6757 case 0x2f: /* das */
6758 if (CODE64(s))
6759 goto illegal_op;
6760 gen_update_cc_op(s);
6761 gen_helper_das(cpu_env);
6762 set_cc_op(s, CC_OP_EFLAGS);
6763 break;
6764 case 0x37: /* aaa */
6765 if (CODE64(s))
6766 goto illegal_op;
6767 gen_update_cc_op(s);
6768 gen_helper_aaa(cpu_env);
6769 set_cc_op(s, CC_OP_EFLAGS);
6770 break;
6771 case 0x3f: /* aas */
6772 if (CODE64(s))
6773 goto illegal_op;
6774 gen_update_cc_op(s);
6775 gen_helper_aas(cpu_env);
6776 set_cc_op(s, CC_OP_EFLAGS);
6777 break;
6778 case 0xd4: /* aam */
6779 if (CODE64(s))
6780 goto illegal_op;
6781 val = cpu_ldub_code(env, s->pc++);
6782 if (val == 0) {
6783 gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base);
6784 } else {
6785 gen_helper_aam(cpu_env, tcg_const_i32(val));
6786 set_cc_op(s, CC_OP_LOGICB);
6787 }
6788 break;
6789 case 0xd5: /* aad */
6790 if (CODE64(s))
6791 goto illegal_op;
6792 val = cpu_ldub_code(env, s->pc++);
6793 gen_helper_aad(cpu_env, tcg_const_i32(val));
6794 set_cc_op(s, CC_OP_LOGICB);
6795 break;
6796 /************************/
6797 /* misc */
6798 case 0x90: /* nop */
6799 /* XXX: correct lock test for all insn */
6800 if (prefixes & PREFIX_LOCK) {
6801 goto illegal_op;
6802 }
6803 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
6804 if (REX_B(s)) {
6805 goto do_xchg_reg_eax;
6806 }
6807 if (prefixes & PREFIX_REPZ) {
6808 gen_svm_check_intercept(s, pc_start, SVM_EXIT_PAUSE);
6809 }
6810 break;
6811 case 0x9b: /* fwait */
6812 if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) ==
6813 (HF_MP_MASK | HF_TS_MASK)) {
6814 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
6815 } else {
6816 gen_update_cc_op(s);
6817 gen_jmp_im(pc_start - s->cs_base);
6818 gen_helper_fwait(cpu_env);
6819 }
6820 break;
6821 case 0xcc: /* int3 */
6822 gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base);
6823 break;
6824 case 0xcd: /* int N */
6825 val = cpu_ldub_code(env, s->pc++);
6826 if (s->vm86 && s->iopl != 3) {
6827 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6828 } else {
6829 gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base);
6830 }
6831 break;
6832 case 0xce: /* into */
6833 if (CODE64(s))
6834 goto illegal_op;
6835 gen_update_cc_op(s);
6836 gen_jmp_im(pc_start - s->cs_base);
6837 gen_helper_into(cpu_env, tcg_const_i32(s->pc - pc_start));
6838 break;
6839 #ifdef WANT_ICEBP
6840 case 0xf1: /* icebp (undocumented, exits to external debugger) */
6841 gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP);
6842 #if 1
6843 gen_debug(s, pc_start - s->cs_base);
6844 #else
6845 /* start debug */
6846 tb_flush(env);
6847 qemu_set_log(CPU_LOG_INT | CPU_LOG_TB_IN_ASM);
6848 #endif
6849 break;
6850 #endif
6851 case 0xfa: /* cli */
6852 if (!s->vm86) {
6853 if (s->cpl <= s->iopl) {
6854 gen_helper_cli(cpu_env);
6855 } else {
6856 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6857 }
6858 } else {
6859 if (s->iopl == 3) {
6860 gen_helper_cli(cpu_env);
6861 } else {
6862 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6863 }
6864 }
6865 break;
6866 case 0xfb: /* sti */
6867 if (!s->vm86) {
6868 if (s->cpl <= s->iopl) {
6869 gen_sti:
6870 gen_helper_sti(cpu_env);
6871 /* interruptions are enabled only the first insn after sti */
6872 /* If several instructions disable interrupts, only the
6873 _first_ does it */
6874 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
6875 gen_helper_set_inhibit_irq(cpu_env);
6876 /* give a chance to handle pending irqs */
6877 gen_jmp_im(s->pc - s->cs_base);
6878 gen_eob(s);
6879 } else {
6880 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6881 }
6882 } else {
6883 if (s->iopl == 3) {
6884 goto gen_sti;
6885 } else {
6886 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6887 }
6888 }
6889 break;
6890 case 0x62: /* bound */
6891 if (CODE64(s))
6892 goto illegal_op;
6893 ot = dflag ? OT_LONG : OT_WORD;
6894 modrm = cpu_ldub_code(env, s->pc++);
6895 reg = (modrm >> 3) & 7;
6896 mod = (modrm >> 6) & 3;
6897 if (mod == 3)
6898 goto illegal_op;
6899 gen_op_mov_TN_reg(ot, 0, reg);
6900 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
6901 gen_jmp_im(pc_start - s->cs_base);
6902 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6903 if (ot == OT_WORD) {
6904 gen_helper_boundw(cpu_env, cpu_A0, cpu_tmp2_i32);
6905 } else {
6906 gen_helper_boundl(cpu_env, cpu_A0, cpu_tmp2_i32);
6907 }
6908 break;
6909 case 0x1c8 ... 0x1cf: /* bswap reg */
6910 reg = (b & 7) | REX_B(s);
6911 #ifdef TARGET_X86_64
6912 if (dflag == 2) {
6913 gen_op_mov_TN_reg(OT_QUAD, 0, reg);
6914 tcg_gen_bswap64_i64(cpu_T[0], cpu_T[0]);
6915 gen_op_mov_reg_T0(OT_QUAD, reg);
6916 } else
6917 #endif
6918 {
6919 gen_op_mov_TN_reg(OT_LONG, 0, reg);
6920 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
6921 tcg_gen_bswap32_tl(cpu_T[0], cpu_T[0]);
6922 gen_op_mov_reg_T0(OT_LONG, reg);
6923 }
6924 break;
6925 case 0xd6: /* salc */
6926 if (CODE64(s))
6927 goto illegal_op;
6928 gen_compute_eflags_c(s, cpu_T[0]);
6929 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
6930 gen_op_mov_reg_T0(OT_BYTE, R_EAX);
6931 break;
6932 case 0xe0: /* loopnz */
6933 case 0xe1: /* loopz */
6934 case 0xe2: /* loop */
6935 case 0xe3: /* jecxz */
6936 {
6937 int l1, l2, l3;
6938
6939 tval = (int8_t)insn_get(env, s, OT_BYTE);
6940 next_eip = s->pc - s->cs_base;
6941 tval += next_eip;
6942 if (s->dflag == 0)
6943 tval &= 0xffff;
6944
6945 l1 = gen_new_label();
6946 l2 = gen_new_label();
6947 l3 = gen_new_label();
6948 b &= 3;
6949 switch(b) {
6950 case 0: /* loopnz */
6951 case 1: /* loopz */
6952 gen_op_add_reg_im(s->aflag, R_ECX, -1);
6953 gen_op_jz_ecx(s->aflag, l3);
6954 gen_jcc1(s, (JCC_Z << 1) | (b ^ 1), l1);
6955 break;
6956 case 2: /* loop */
6957 gen_op_add_reg_im(s->aflag, R_ECX, -1);
6958 gen_op_jnz_ecx(s->aflag, l1);
6959 break;
6960 default:
6961 case 3: /* jcxz */
6962 gen_op_jz_ecx(s->aflag, l1);
6963 break;
6964 }
6965
6966 gen_set_label(l3);
6967 gen_jmp_im(next_eip);
6968 tcg_gen_br(l2);
6969
6970 gen_set_label(l1);
6971 gen_jmp_im(tval);
6972 gen_set_label(l2);
6973 gen_eob(s);
6974 }
6975 break;
6976 case 0x130: /* wrmsr */
6977 case 0x132: /* rdmsr */
6978 if (s->cpl != 0) {
6979 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6980 } else {
6981 gen_update_cc_op(s);
6982 gen_jmp_im(pc_start - s->cs_base);
6983 if (b & 2) {
6984 gen_helper_rdmsr(cpu_env);
6985 } else {
6986 gen_helper_wrmsr(cpu_env);
6987 }
6988 }
6989 break;
6990 case 0x131: /* rdtsc */
6991 gen_update_cc_op(s);
6992 gen_jmp_im(pc_start - s->cs_base);
6993 if (use_icount)
6994 gen_io_start();
6995 gen_helper_rdtsc(cpu_env);
6996 if (use_icount) {
6997 gen_io_end();
6998 gen_jmp(s, s->pc - s->cs_base);
6999 }
7000 break;
7001 case 0x133: /* rdpmc */
7002 gen_update_cc_op(s);
7003 gen_jmp_im(pc_start - s->cs_base);
7004 gen_helper_rdpmc(cpu_env);
7005 break;
7006 case 0x134: /* sysenter */
7007 /* For Intel SYSENTER is valid on 64-bit */
7008 if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
7009 goto illegal_op;
7010 if (!s->pe) {
7011 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7012 } else {
7013 gen_update_cc_op(s);
7014 gen_jmp_im(pc_start - s->cs_base);
7015 gen_helper_sysenter(cpu_env);
7016 gen_eob(s);
7017 }
7018 break;
7019 case 0x135: /* sysexit */
7020 /* For Intel SYSEXIT is valid on 64-bit */
7021 if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
7022 goto illegal_op;
7023 if (!s->pe) {
7024 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7025 } else {
7026 gen_update_cc_op(s);
7027 gen_jmp_im(pc_start - s->cs_base);
7028 gen_helper_sysexit(cpu_env, tcg_const_i32(dflag));
7029 gen_eob(s);
7030 }
7031 break;
7032 #ifdef TARGET_X86_64
7033 case 0x105: /* syscall */
7034 /* XXX: is it usable in real mode ? */
7035 gen_update_cc_op(s);
7036 gen_jmp_im(pc_start - s->cs_base);
7037 gen_helper_syscall(cpu_env, tcg_const_i32(s->pc - pc_start));
7038 gen_eob(s);
7039 break;
7040 case 0x107: /* sysret */
7041 if (!s->pe) {
7042 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7043 } else {
7044 gen_update_cc_op(s);
7045 gen_jmp_im(pc_start - s->cs_base);
7046 gen_helper_sysret(cpu_env, tcg_const_i32(s->dflag));
7047 /* condition codes are modified only in long mode */
7048 if (s->lma) {
7049 set_cc_op(s, CC_OP_EFLAGS);
7050 }
7051 gen_eob(s);
7052 }
7053 break;
7054 #endif
7055 case 0x1a2: /* cpuid */
7056 gen_update_cc_op(s);
7057 gen_jmp_im(pc_start - s->cs_base);
7058 gen_helper_cpuid(cpu_env);
7059 break;
7060 case 0xf4: /* hlt */
7061 if (s->cpl != 0) {
7062 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7063 } else {
7064 gen_update_cc_op(s);
7065 gen_jmp_im(pc_start - s->cs_base);
7066 gen_helper_hlt(cpu_env, tcg_const_i32(s->pc - pc_start));
7067 s->is_jmp = DISAS_TB_JUMP;
7068 }
7069 break;
7070 case 0x100:
7071 modrm = cpu_ldub_code(env, s->pc++);
7072 mod = (modrm >> 6) & 3;
7073 op = (modrm >> 3) & 7;
7074 switch(op) {
7075 case 0: /* sldt */
7076 if (!s->pe || s->vm86)
7077 goto illegal_op;
7078 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ);
7079 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,ldt.selector));
7080 ot = OT_WORD;
7081 if (mod == 3)
7082 ot += s->dflag;
7083 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
7084 break;
7085 case 2: /* lldt */
7086 if (!s->pe || s->vm86)
7087 goto illegal_op;
7088 if (s->cpl != 0) {
7089 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7090 } else {
7091 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE);
7092 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
7093 gen_jmp_im(pc_start - s->cs_base);
7094 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
7095 gen_helper_lldt(cpu_env, cpu_tmp2_i32);
7096 }
7097 break;
7098 case 1: /* str */
7099 if (!s->pe || s->vm86)
7100 goto illegal_op;
7101 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ);
7102 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,tr.selector));
7103 ot = OT_WORD;
7104 if (mod == 3)
7105 ot += s->dflag;
7106 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
7107 break;
7108 case 3: /* ltr */
7109 if (!s->pe || s->vm86)
7110 goto illegal_op;
7111 if (s->cpl != 0) {
7112 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7113 } else {
7114 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE);
7115 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
7116 gen_jmp_im(pc_start - s->cs_base);
7117 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
7118 gen_helper_ltr(cpu_env, cpu_tmp2_i32);
7119 }
7120 break;
7121 case 4: /* verr */
7122 case 5: /* verw */
7123 if (!s->pe || s->vm86)
7124 goto illegal_op;
7125 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
7126 gen_update_cc_op(s);
7127 if (op == 4) {
7128 gen_helper_verr(cpu_env, cpu_T[0]);
7129 } else {
7130 gen_helper_verw(cpu_env, cpu_T[0]);
7131 }
7132 set_cc_op(s, CC_OP_EFLAGS);
7133 break;
7134 default:
7135 goto illegal_op;
7136 }
7137 break;
7138 case 0x101:
7139 modrm = cpu_ldub_code(env, s->pc++);
7140 mod = (modrm >> 6) & 3;
7141 op = (modrm >> 3) & 7;
7142 rm = modrm & 7;
7143 switch(op) {
7144 case 0: /* sgdt */
7145 if (mod == 3)
7146 goto illegal_op;
7147 gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ);
7148 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7149 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.limit));
7150 gen_op_st_T0_A0(OT_WORD + s->mem_index);
7151 gen_add_A0_im(s, 2);
7152 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.base));
7153 if (!s->dflag)
7154 gen_op_andl_T0_im(0xffffff);
7155 gen_op_st_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
7156 break;
7157 case 1:
7158 if (mod == 3) {
7159 switch (rm) {
7160 case 0: /* monitor */
7161 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
7162 s->cpl != 0)
7163 goto illegal_op;
7164 gen_update_cc_op(s);
7165 gen_jmp_im(pc_start - s->cs_base);
7166 #ifdef TARGET_X86_64
7167 if (s->aflag == 2) {
7168 gen_op_movq_A0_reg(R_EAX);
7169 } else
7170 #endif
7171 {
7172 gen_op_movl_A0_reg(R_EAX);
7173 if (s->aflag == 0)
7174 gen_op_andl_A0_ffff();
7175 }
7176 gen_add_A0_ds_seg(s);
7177 gen_helper_monitor(cpu_env, cpu_A0);
7178 break;
7179 case 1: /* mwait */
7180 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
7181 s->cpl != 0)
7182 goto illegal_op;
7183 gen_update_cc_op(s);
7184 gen_jmp_im(pc_start - s->cs_base);
7185 gen_helper_mwait(cpu_env, tcg_const_i32(s->pc - pc_start));
7186 gen_eob(s);
7187 break;
7188 case 2: /* clac */
7189 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) ||
7190 s->cpl != 0) {
7191 goto illegal_op;
7192 }
7193 gen_helper_clac(cpu_env);
7194 gen_jmp_im(s->pc - s->cs_base);
7195 gen_eob(s);
7196 break;
7197 case 3: /* stac */
7198 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) ||
7199 s->cpl != 0) {
7200 goto illegal_op;
7201 }
7202 gen_helper_stac(cpu_env);
7203 gen_jmp_im(s->pc - s->cs_base);
7204 gen_eob(s);
7205 break;
7206 default:
7207 goto illegal_op;
7208 }
7209 } else { /* sidt */
7210 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ);
7211 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7212 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.limit));
7213 gen_op_st_T0_A0(OT_WORD + s->mem_index);
7214 gen_add_A0_im(s, 2);
7215 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.base));
7216 if (!s->dflag)
7217 gen_op_andl_T0_im(0xffffff);
7218 gen_op_st_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
7219 }
7220 break;
7221 case 2: /* lgdt */
7222 case 3: /* lidt */
7223 if (mod == 3) {
7224 gen_update_cc_op(s);
7225 gen_jmp_im(pc_start - s->cs_base);
7226 switch(rm) {
7227 case 0: /* VMRUN */
7228 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7229 goto illegal_op;
7230 if (s->cpl != 0) {
7231 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7232 break;
7233 } else {
7234 gen_helper_vmrun(cpu_env, tcg_const_i32(s->aflag),
7235 tcg_const_i32(s->pc - pc_start));
7236 tcg_gen_exit_tb(0);
7237 s->is_jmp = DISAS_TB_JUMP;
7238 }
7239 break;
7240 case 1: /* VMMCALL */
7241 if (!(s->flags & HF_SVME_MASK))
7242 goto illegal_op;
7243 gen_helper_vmmcall(cpu_env);
7244 break;
7245 case 2: /* VMLOAD */
7246 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7247 goto illegal_op;
7248 if (s->cpl != 0) {
7249 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7250 break;
7251 } else {
7252 gen_helper_vmload(cpu_env, tcg_const_i32(s->aflag));
7253 }
7254 break;
7255 case 3: /* VMSAVE */
7256 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7257 goto illegal_op;
7258 if (s->cpl != 0) {
7259 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7260 break;
7261 } else {
7262 gen_helper_vmsave(cpu_env, tcg_const_i32(s->aflag));
7263 }
7264 break;
7265 case 4: /* STGI */
7266 if ((!(s->flags & HF_SVME_MASK) &&
7267 !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
7268 !s->pe)
7269 goto illegal_op;
7270 if (s->cpl != 0) {
7271 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7272 break;
7273 } else {
7274 gen_helper_stgi(cpu_env);
7275 }
7276 break;
7277 case 5: /* CLGI */
7278 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7279 goto illegal_op;
7280 if (s->cpl != 0) {
7281 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7282 break;
7283 } else {
7284 gen_helper_clgi(cpu_env);
7285 }
7286 break;
7287 case 6: /* SKINIT */
7288 if ((!(s->flags & HF_SVME_MASK) &&
7289 !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
7290 !s->pe)
7291 goto illegal_op;
7292 gen_helper_skinit(cpu_env);
7293 break;
7294 case 7: /* INVLPGA */
7295 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7296 goto illegal_op;
7297 if (s->cpl != 0) {
7298 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7299 break;
7300 } else {
7301 gen_helper_invlpga(cpu_env, tcg_const_i32(s->aflag));
7302 }
7303 break;
7304 default:
7305 goto illegal_op;
7306 }
7307 } else if (s->cpl != 0) {
7308 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7309 } else {
7310 gen_svm_check_intercept(s, pc_start,
7311 op==2 ? SVM_EXIT_GDTR_WRITE : SVM_EXIT_IDTR_WRITE);
7312 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7313 gen_op_ld_T1_A0(OT_WORD + s->mem_index);
7314 gen_add_A0_im(s, 2);
7315 gen_op_ld_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
7316 if (!s->dflag)
7317 gen_op_andl_T0_im(0xffffff);
7318 if (op == 2) {
7319 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,gdt.base));
7320 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,gdt.limit));
7321 } else {
7322 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,idt.base));
7323 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,idt.limit));
7324 }
7325 }
7326 break;
7327 case 4: /* smsw */
7328 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0);
7329 #if defined TARGET_X86_64 && defined HOST_WORDS_BIGENDIAN
7330 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]) + 4);
7331 #else
7332 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]));
7333 #endif
7334 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 1);
7335 break;
7336 case 6: /* lmsw */
7337 if (s->cpl != 0) {
7338 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7339 } else {
7340 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
7341 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
7342 gen_helper_lmsw(cpu_env, cpu_T[0]);
7343 gen_jmp_im(s->pc - s->cs_base);
7344 gen_eob(s);
7345 }
7346 break;
7347 case 7:
7348 if (mod != 3) { /* invlpg */
7349 if (s->cpl != 0) {
7350 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7351 } else {
7352 gen_update_cc_op(s);
7353 gen_jmp_im(pc_start - s->cs_base);
7354 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7355 gen_helper_invlpg(cpu_env, cpu_A0);
7356 gen_jmp_im(s->pc - s->cs_base);
7357 gen_eob(s);
7358 }
7359 } else {
7360 switch (rm) {
7361 case 0: /* swapgs */
7362 #ifdef TARGET_X86_64
7363 if (CODE64(s)) {
7364 if (s->cpl != 0) {
7365 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7366 } else {
7367 tcg_gen_ld_tl(cpu_T[0], cpu_env,
7368 offsetof(CPUX86State,segs[R_GS].base));
7369 tcg_gen_ld_tl(cpu_T[1], cpu_env,
7370 offsetof(CPUX86State,kernelgsbase));
7371 tcg_gen_st_tl(cpu_T[1], cpu_env,
7372 offsetof(CPUX86State,segs[R_GS].base));
7373 tcg_gen_st_tl(cpu_T[0], cpu_env,
7374 offsetof(CPUX86State,kernelgsbase));
7375 }
7376 } else
7377 #endif
7378 {
7379 goto illegal_op;
7380 }
7381 break;
7382 case 1: /* rdtscp */
7383 if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP))
7384 goto illegal_op;
7385 gen_update_cc_op(s);
7386 gen_jmp_im(pc_start - s->cs_base);
7387 if (use_icount)
7388 gen_io_start();
7389 gen_helper_rdtscp(cpu_env);
7390 if (use_icount) {
7391 gen_io_end();
7392 gen_jmp(s, s->pc - s->cs_base);
7393 }
7394 break;
7395 default:
7396 goto illegal_op;
7397 }
7398 }
7399 break;
7400 default:
7401 goto illegal_op;
7402 }
7403 break;
7404 case 0x108: /* invd */
7405 case 0x109: /* wbinvd */
7406 if (s->cpl != 0) {
7407 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7408 } else {
7409 gen_svm_check_intercept(s, pc_start, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD);
7410 /* nothing to do */
7411 }
7412 break;
7413 case 0x63: /* arpl or movslS (x86_64) */
7414 #ifdef TARGET_X86_64
7415 if (CODE64(s)) {
7416 int d_ot;
7417 /* d_ot is the size of destination */
7418 d_ot = dflag + OT_WORD;
7419
7420 modrm = cpu_ldub_code(env, s->pc++);
7421 reg = ((modrm >> 3) & 7) | rex_r;
7422 mod = (modrm >> 6) & 3;
7423 rm = (modrm & 7) | REX_B(s);
7424
7425 if (mod == 3) {
7426 gen_op_mov_TN_reg(OT_LONG, 0, rm);
7427 /* sign extend */
7428 if (d_ot == OT_QUAD)
7429 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
7430 gen_op_mov_reg_T0(d_ot, reg);
7431 } else {
7432 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7433 if (d_ot == OT_QUAD) {
7434 gen_op_lds_T0_A0(OT_LONG + s->mem_index);
7435 } else {
7436 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
7437 }
7438 gen_op_mov_reg_T0(d_ot, reg);
7439 }
7440 } else
7441 #endif
7442 {
7443 int label1;
7444 TCGv t0, t1, t2, a0;
7445
7446 if (!s->pe || s->vm86)
7447 goto illegal_op;
7448 t0 = tcg_temp_local_new();
7449 t1 = tcg_temp_local_new();
7450 t2 = tcg_temp_local_new();
7451 ot = OT_WORD;
7452 modrm = cpu_ldub_code(env, s->pc++);
7453 reg = (modrm >> 3) & 7;
7454 mod = (modrm >> 6) & 3;
7455 rm = modrm & 7;
7456 if (mod != 3) {
7457 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7458 gen_op_ld_v(ot + s->mem_index, t0, cpu_A0);
7459 a0 = tcg_temp_local_new();
7460 tcg_gen_mov_tl(a0, cpu_A0);
7461 } else {
7462 gen_op_mov_v_reg(ot, t0, rm);
7463 TCGV_UNUSED(a0);
7464 }
7465 gen_op_mov_v_reg(ot, t1, reg);
7466 tcg_gen_andi_tl(cpu_tmp0, t0, 3);
7467 tcg_gen_andi_tl(t1, t1, 3);
7468 tcg_gen_movi_tl(t2, 0);
7469 label1 = gen_new_label();
7470 tcg_gen_brcond_tl(TCG_COND_GE, cpu_tmp0, t1, label1);
7471 tcg_gen_andi_tl(t0, t0, ~3);
7472 tcg_gen_or_tl(t0, t0, t1);
7473 tcg_gen_movi_tl(t2, CC_Z);
7474 gen_set_label(label1);
7475 if (mod != 3) {
7476 gen_op_st_v(ot + s->mem_index, t0, a0);
7477 tcg_temp_free(a0);
7478 } else {
7479 gen_op_mov_reg_v(ot, rm, t0);
7480 }
7481 gen_compute_eflags(s);
7482 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z);
7483 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2);
7484 tcg_temp_free(t0);
7485 tcg_temp_free(t1);
7486 tcg_temp_free(t2);
7487 }
7488 break;
7489 case 0x102: /* lar */
7490 case 0x103: /* lsl */
7491 {
7492 int label1;
7493 TCGv t0;
7494 if (!s->pe || s->vm86)
7495 goto illegal_op;
7496 ot = dflag ? OT_LONG : OT_WORD;
7497 modrm = cpu_ldub_code(env, s->pc++);
7498 reg = ((modrm >> 3) & 7) | rex_r;
7499 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
7500 t0 = tcg_temp_local_new();
7501 gen_update_cc_op(s);
7502 if (b == 0x102) {
7503 gen_helper_lar(t0, cpu_env, cpu_T[0]);
7504 } else {
7505 gen_helper_lsl(t0, cpu_env, cpu_T[0]);
7506 }
7507 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_src, CC_Z);
7508 label1 = gen_new_label();
7509 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
7510 gen_op_mov_reg_v(ot, reg, t0);
7511 gen_set_label(label1);
7512 set_cc_op(s, CC_OP_EFLAGS);
7513 tcg_temp_free(t0);
7514 }
7515 break;
7516 case 0x118:
7517 modrm = cpu_ldub_code(env, s->pc++);
7518 mod = (modrm >> 6) & 3;
7519 op = (modrm >> 3) & 7;
7520 switch(op) {
7521 case 0: /* prefetchnta */
7522 case 1: /* prefetchnt0 */
7523 case 2: /* prefetchnt0 */
7524 case 3: /* prefetchnt0 */
7525 if (mod == 3)
7526 goto illegal_op;
7527 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7528 /* nothing more to do */
7529 break;
7530 default: /* nop (multi byte) */
7531 gen_nop_modrm(env, s, modrm);
7532 break;
7533 }
7534 break;
7535 case 0x119 ... 0x11f: /* nop (multi byte) */
7536 modrm = cpu_ldub_code(env, s->pc++);
7537 gen_nop_modrm(env, s, modrm);
7538 break;
7539 case 0x120: /* mov reg, crN */
7540 case 0x122: /* mov crN, reg */
7541 if (s->cpl != 0) {
7542 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7543 } else {
7544 modrm = cpu_ldub_code(env, s->pc++);
7545 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7546 * AMD documentation (24594.pdf) and testing of
7547 * intel 386 and 486 processors all show that the mod bits
7548 * are assumed to be 1's, regardless of actual values.
7549 */
7550 rm = (modrm & 7) | REX_B(s);
7551 reg = ((modrm >> 3) & 7) | rex_r;
7552 if (CODE64(s))
7553 ot = OT_QUAD;
7554 else
7555 ot = OT_LONG;
7556 if ((prefixes & PREFIX_LOCK) && (reg == 0) &&
7557 (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) {
7558 reg = 8;
7559 }
7560 switch(reg) {
7561 case 0:
7562 case 2:
7563 case 3:
7564 case 4:
7565 case 8:
7566 gen_update_cc_op(s);
7567 gen_jmp_im(pc_start - s->cs_base);
7568 if (b & 2) {
7569 gen_op_mov_TN_reg(ot, 0, rm);
7570 gen_helper_write_crN(cpu_env, tcg_const_i32(reg),
7571 cpu_T[0]);
7572 gen_jmp_im(s->pc - s->cs_base);
7573 gen_eob(s);
7574 } else {
7575 gen_helper_read_crN(cpu_T[0], cpu_env, tcg_const_i32(reg));
7576 gen_op_mov_reg_T0(ot, rm);
7577 }
7578 break;
7579 default:
7580 goto illegal_op;
7581 }
7582 }
7583 break;
7584 case 0x121: /* mov reg, drN */
7585 case 0x123: /* mov drN, reg */
7586 if (s->cpl != 0) {
7587 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7588 } else {
7589 modrm = cpu_ldub_code(env, s->pc++);
7590 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7591 * AMD documentation (24594.pdf) and testing of
7592 * intel 386 and 486 processors all show that the mod bits
7593 * are assumed to be 1's, regardless of actual values.
7594 */
7595 rm = (modrm & 7) | REX_B(s);
7596 reg = ((modrm >> 3) & 7) | rex_r;
7597 if (CODE64(s))
7598 ot = OT_QUAD;
7599 else
7600 ot = OT_LONG;
7601 /* XXX: do it dynamically with CR4.DE bit */
7602 if (reg == 4 || reg == 5 || reg >= 8)
7603 goto illegal_op;
7604 if (b & 2) {
7605 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg);
7606 gen_op_mov_TN_reg(ot, 0, rm);
7607 gen_helper_movl_drN_T0(cpu_env, tcg_const_i32(reg), cpu_T[0]);
7608 gen_jmp_im(s->pc - s->cs_base);
7609 gen_eob(s);
7610 } else {
7611 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg);
7612 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,dr[reg]));
7613 gen_op_mov_reg_T0(ot, rm);
7614 }
7615 }
7616 break;
7617 case 0x106: /* clts */
7618 if (s->cpl != 0) {
7619 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7620 } else {
7621 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
7622 gen_helper_clts(cpu_env);
7623 /* abort block because static cpu state changed */
7624 gen_jmp_im(s->pc - s->cs_base);
7625 gen_eob(s);
7626 }
7627 break;
7628 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
7629 case 0x1c3: /* MOVNTI reg, mem */
7630 if (!(s->cpuid_features & CPUID_SSE2))
7631 goto illegal_op;
7632 ot = s->dflag == 2 ? OT_QUAD : OT_LONG;
7633 modrm = cpu_ldub_code(env, s->pc++);
7634 mod = (modrm >> 6) & 3;
7635 if (mod == 3)
7636 goto illegal_op;
7637 reg = ((modrm >> 3) & 7) | rex_r;
7638 /* generate a generic store */
7639 gen_ldst_modrm(env, s, modrm, ot, reg, 1);
7640 break;
7641 case 0x1ae:
7642 modrm = cpu_ldub_code(env, s->pc++);
7643 mod = (modrm >> 6) & 3;
7644 op = (modrm >> 3) & 7;
7645 switch(op) {
7646 case 0: /* fxsave */
7647 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
7648 (s->prefix & PREFIX_LOCK))
7649 goto illegal_op;
7650 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
7651 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7652 break;
7653 }
7654 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7655 gen_update_cc_op(s);
7656 gen_jmp_im(pc_start - s->cs_base);
7657 gen_helper_fxsave(cpu_env, cpu_A0, tcg_const_i32((s->dflag == 2)));
7658 break;
7659 case 1: /* fxrstor */
7660 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
7661 (s->prefix & PREFIX_LOCK))
7662 goto illegal_op;
7663 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
7664 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7665 break;
7666 }
7667 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7668 gen_update_cc_op(s);
7669 gen_jmp_im(pc_start - s->cs_base);
7670 gen_helper_fxrstor(cpu_env, cpu_A0,
7671 tcg_const_i32((s->dflag == 2)));
7672 break;
7673 case 2: /* ldmxcsr */
7674 case 3: /* stmxcsr */
7675 if (s->flags & HF_TS_MASK) {
7676 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7677 break;
7678 }
7679 if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK) ||
7680 mod == 3)
7681 goto illegal_op;
7682 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7683 if (op == 2) {
7684 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
7685 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
7686 gen_helper_ldmxcsr(cpu_env, cpu_tmp2_i32);
7687 } else {
7688 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, mxcsr));
7689 gen_op_st_T0_A0(OT_LONG + s->mem_index);
7690 }
7691 break;
7692 case 5: /* lfence */
7693 case 6: /* mfence */
7694 if ((modrm & 0xc7) != 0xc0 || !(s->cpuid_features & CPUID_SSE2))
7695 goto illegal_op;
7696 break;
7697 case 7: /* sfence / clflush */
7698 if ((modrm & 0xc7) == 0xc0) {
7699 /* sfence */
7700 /* XXX: also check for cpuid_ext2_features & CPUID_EXT2_EMMX */
7701 if (!(s->cpuid_features & CPUID_SSE))
7702 goto illegal_op;
7703 } else {
7704 /* clflush */
7705 if (!(s->cpuid_features & CPUID_CLFLUSH))
7706 goto illegal_op;
7707 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7708 }
7709 break;
7710 default:
7711 goto illegal_op;
7712 }
7713 break;
7714 case 0x10d: /* 3DNow! prefetch(w) */
7715 modrm = cpu_ldub_code(env, s->pc++);
7716 mod = (modrm >> 6) & 3;
7717 if (mod == 3)
7718 goto illegal_op;
7719 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7720 /* ignore for now */
7721 break;
7722 case 0x1aa: /* rsm */
7723 gen_svm_check_intercept(s, pc_start, SVM_EXIT_RSM);
7724 if (!(s->flags & HF_SMM_MASK))
7725 goto illegal_op;
7726 gen_update_cc_op(s);
7727 gen_jmp_im(s->pc - s->cs_base);
7728 gen_helper_rsm(cpu_env);
7729 gen_eob(s);
7730 break;
7731 case 0x1b8: /* SSE4.2 popcnt */
7732 if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) !=
7733 PREFIX_REPZ)
7734 goto illegal_op;
7735 if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
7736 goto illegal_op;
7737
7738 modrm = cpu_ldub_code(env, s->pc++);
7739 reg = ((modrm >> 3) & 7) | rex_r;
7740
7741 if (s->prefix & PREFIX_DATA)
7742 ot = OT_WORD;
7743 else if (s->dflag != 2)
7744 ot = OT_LONG;
7745 else
7746 ot = OT_QUAD;
7747
7748 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
7749 gen_helper_popcnt(cpu_T[0], cpu_env, cpu_T[0], tcg_const_i32(ot));
7750 gen_op_mov_reg_T0(ot, reg);
7751
7752 set_cc_op(s, CC_OP_EFLAGS);
7753 break;
7754 case 0x10e ... 0x10f:
7755 /* 3DNow! instructions, ignore prefixes */
7756 s->prefix &= ~(PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA);
7757 case 0x110 ... 0x117:
7758 case 0x128 ... 0x12f:
7759 case 0x138 ... 0x13a:
7760 case 0x150 ... 0x179:
7761 case 0x17c ... 0x17f:
7762 case 0x1c2:
7763 case 0x1c4 ... 0x1c6:
7764 case 0x1d0 ... 0x1fe:
7765 gen_sse(env, s, b, pc_start, rex_r);
7766 break;
7767 default:
7768 goto illegal_op;
7769 }
7770 /* lock generation */
7771 if (s->prefix & PREFIX_LOCK)
7772 gen_helper_unlock();
7773 return s->pc;
7774 illegal_op:
7775 if (s->prefix & PREFIX_LOCK)
7776 gen_helper_unlock();
7777 /* XXX: ensure that no lock was generated */
7778 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
7779 return s->pc;
7780 }
7781
7782 void optimize_flags_init(void)
7783 {
7784 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
7785 cpu_cc_op = tcg_global_mem_new_i32(TCG_AREG0,
7786 offsetof(CPUX86State, cc_op), "cc_op");
7787 cpu_cc_dst = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_dst),
7788 "cc_dst");
7789 cpu_cc_src = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_src),
7790 "cc_src");
7791
7792 #ifdef TARGET_X86_64
7793 cpu_regs[R_EAX] = tcg_global_mem_new_i64(TCG_AREG0,
7794 offsetof(CPUX86State, regs[R_EAX]), "rax");
7795 cpu_regs[R_ECX] = tcg_global_mem_new_i64(TCG_AREG0,
7796 offsetof(CPUX86State, regs[R_ECX]), "rcx");
7797 cpu_regs[R_EDX] = tcg_global_mem_new_i64(TCG_AREG0,
7798 offsetof(CPUX86State, regs[R_EDX]), "rdx");
7799 cpu_regs[R_EBX] = tcg_global_mem_new_i64(TCG_AREG0,
7800 offsetof(CPUX86State, regs[R_EBX]), "rbx");
7801 cpu_regs[R_ESP] = tcg_global_mem_new_i64(TCG_AREG0,
7802 offsetof(CPUX86State, regs[R_ESP]), "rsp");
7803 cpu_regs[R_EBP] = tcg_global_mem_new_i64(TCG_AREG0,
7804 offsetof(CPUX86State, regs[R_EBP]), "rbp");
7805 cpu_regs[R_ESI] = tcg_global_mem_new_i64(TCG_AREG0,
7806 offsetof(CPUX86State, regs[R_ESI]), "rsi");
7807 cpu_regs[R_EDI] = tcg_global_mem_new_i64(TCG_AREG0,
7808 offsetof(CPUX86State, regs[R_EDI]), "rdi");
7809 cpu_regs[8] = tcg_global_mem_new_i64(TCG_AREG0,
7810 offsetof(CPUX86State, regs[8]), "r8");
7811 cpu_regs[9] = tcg_global_mem_new_i64(TCG_AREG0,
7812 offsetof(CPUX86State, regs[9]), "r9");
7813 cpu_regs[10] = tcg_global_mem_new_i64(TCG_AREG0,
7814 offsetof(CPUX86State, regs[10]), "r10");
7815 cpu_regs[11] = tcg_global_mem_new_i64(TCG_AREG0,
7816 offsetof(CPUX86State, regs[11]), "r11");
7817 cpu_regs[12] = tcg_global_mem_new_i64(TCG_AREG0,
7818 offsetof(CPUX86State, regs[12]), "r12");
7819 cpu_regs[13] = tcg_global_mem_new_i64(TCG_AREG0,
7820 offsetof(CPUX86State, regs[13]), "r13");
7821 cpu_regs[14] = tcg_global_mem_new_i64(TCG_AREG0,
7822 offsetof(CPUX86State, regs[14]), "r14");
7823 cpu_regs[15] = tcg_global_mem_new_i64(TCG_AREG0,
7824 offsetof(CPUX86State, regs[15]), "r15");
7825 #else
7826 cpu_regs[R_EAX] = tcg_global_mem_new_i32(TCG_AREG0,
7827 offsetof(CPUX86State, regs[R_EAX]), "eax");
7828 cpu_regs[R_ECX] = tcg_global_mem_new_i32(TCG_AREG0,
7829 offsetof(CPUX86State, regs[R_ECX]), "ecx");
7830 cpu_regs[R_EDX] = tcg_global_mem_new_i32(TCG_AREG0,
7831 offsetof(CPUX86State, regs[R_EDX]), "edx");
7832 cpu_regs[R_EBX] = tcg_global_mem_new_i32(TCG_AREG0,
7833 offsetof(CPUX86State, regs[R_EBX]), "ebx");
7834 cpu_regs[R_ESP] = tcg_global_mem_new_i32(TCG_AREG0,
7835 offsetof(CPUX86State, regs[R_ESP]), "esp");
7836 cpu_regs[R_EBP] = tcg_global_mem_new_i32(TCG_AREG0,
7837 offsetof(CPUX86State, regs[R_EBP]), "ebp");
7838 cpu_regs[R_ESI] = tcg_global_mem_new_i32(TCG_AREG0,
7839 offsetof(CPUX86State, regs[R_ESI]), "esi");
7840 cpu_regs[R_EDI] = tcg_global_mem_new_i32(TCG_AREG0,
7841 offsetof(CPUX86State, regs[R_EDI]), "edi");
7842 #endif
7843
7844 /* register helpers */
7845 #define GEN_HELPER 2
7846 #include "helper.h"
7847 }
7848
7849 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
7850 basic block 'tb'. If search_pc is TRUE, also generate PC
7851 information for each intermediate instruction. */
7852 static inline void gen_intermediate_code_internal(CPUX86State *env,
7853 TranslationBlock *tb,
7854 int search_pc)
7855 {
7856 DisasContext dc1, *dc = &dc1;
7857 target_ulong pc_ptr;
7858 uint16_t *gen_opc_end;
7859 CPUBreakpoint *bp;
7860 int j, lj;
7861 uint64_t flags;
7862 target_ulong pc_start;
7863 target_ulong cs_base;
7864 int num_insns;
7865 int max_insns;
7866
7867 /* generate intermediate code */
7868 pc_start = tb->pc;
7869 cs_base = tb->cs_base;
7870 flags = tb->flags;
7871
7872 dc->pe = (flags >> HF_PE_SHIFT) & 1;
7873 dc->code32 = (flags >> HF_CS32_SHIFT) & 1;
7874 dc->ss32 = (flags >> HF_SS32_SHIFT) & 1;
7875 dc->addseg = (flags >> HF_ADDSEG_SHIFT) & 1;
7876 dc->f_st = 0;
7877 dc->vm86 = (flags >> VM_SHIFT) & 1;
7878 dc->cpl = (flags >> HF_CPL_SHIFT) & 3;
7879 dc->iopl = (flags >> IOPL_SHIFT) & 3;
7880 dc->tf = (flags >> TF_SHIFT) & 1;
7881 dc->singlestep_enabled = env->singlestep_enabled;
7882 dc->cc_op = CC_OP_DYNAMIC;
7883 dc->cc_op_dirty = false;
7884 dc->cs_base = cs_base;
7885 dc->tb = tb;
7886 dc->popl_esp_hack = 0;
7887 /* select memory access functions */
7888 dc->mem_index = 0;
7889 if (flags & HF_SOFTMMU_MASK) {
7890 dc->mem_index = (cpu_mmu_index(env) + 1) << 2;
7891 }
7892 dc->cpuid_features = env->cpuid_features;
7893 dc->cpuid_ext_features = env->cpuid_ext_features;
7894 dc->cpuid_ext2_features = env->cpuid_ext2_features;
7895 dc->cpuid_ext3_features = env->cpuid_ext3_features;
7896 dc->cpuid_7_0_ebx_features = env->cpuid_7_0_ebx_features;
7897 #ifdef TARGET_X86_64
7898 dc->lma = (flags >> HF_LMA_SHIFT) & 1;
7899 dc->code64 = (flags >> HF_CS64_SHIFT) & 1;
7900 #endif
7901 dc->flags = flags;
7902 dc->jmp_opt = !(dc->tf || env->singlestep_enabled ||
7903 (flags & HF_INHIBIT_IRQ_MASK)
7904 #ifndef CONFIG_SOFTMMU
7905 || (flags & HF_SOFTMMU_MASK)
7906 #endif
7907 );
7908 #if 0
7909 /* check addseg logic */
7910 if (!dc->addseg && (dc->vm86 || !dc->pe || !dc->code32))
7911 printf("ERROR addseg\n");
7912 #endif
7913
7914 cpu_T[0] = tcg_temp_new();
7915 cpu_T[1] = tcg_temp_new();
7916 cpu_A0 = tcg_temp_new();
7917
7918 cpu_tmp0 = tcg_temp_new();
7919 cpu_tmp1_i64 = tcg_temp_new_i64();
7920 cpu_tmp2_i32 = tcg_temp_new_i32();
7921 cpu_tmp3_i32 = tcg_temp_new_i32();
7922 cpu_tmp4 = tcg_temp_new();
7923 cpu_tmp5 = tcg_temp_new();
7924 cpu_ptr0 = tcg_temp_new_ptr();
7925 cpu_ptr1 = tcg_temp_new_ptr();
7926 cpu_cc_srcT = tcg_temp_local_new();
7927
7928 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
7929
7930 dc->is_jmp = DISAS_NEXT;
7931 pc_ptr = pc_start;
7932 lj = -1;
7933 num_insns = 0;
7934 max_insns = tb->cflags & CF_COUNT_MASK;
7935 if (max_insns == 0)
7936 max_insns = CF_COUNT_MASK;
7937
7938 gen_icount_start();
7939 for(;;) {
7940 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
7941 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
7942 if (bp->pc == pc_ptr &&
7943 !((bp->flags & BP_CPU) && (tb->flags & HF_RF_MASK))) {
7944 gen_debug(dc, pc_ptr - dc->cs_base);
7945 break;
7946 }
7947 }
7948 }
7949 if (search_pc) {
7950 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
7951 if (lj < j) {
7952 lj++;
7953 while (lj < j)
7954 tcg_ctx.gen_opc_instr_start[lj++] = 0;
7955 }
7956 tcg_ctx.gen_opc_pc[lj] = pc_ptr;
7957 gen_opc_cc_op[lj] = dc->cc_op;
7958 tcg_ctx.gen_opc_instr_start[lj] = 1;
7959 tcg_ctx.gen_opc_icount[lj] = num_insns;
7960 }
7961 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
7962 gen_io_start();
7963
7964 pc_ptr = disas_insn(env, dc, pc_ptr);
7965 num_insns++;
7966 /* stop translation if indicated */
7967 if (dc->is_jmp)
7968 break;
7969 /* if single step mode, we generate only one instruction and
7970 generate an exception */
7971 /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
7972 the flag and abort the translation to give the irqs a
7973 change to be happen */
7974 if (dc->tf || dc->singlestep_enabled ||
7975 (flags & HF_INHIBIT_IRQ_MASK)) {
7976 gen_jmp_im(pc_ptr - dc->cs_base);
7977 gen_eob(dc);
7978 break;
7979 }
7980 /* if too long translation, stop generation too */
7981 if (tcg_ctx.gen_opc_ptr >= gen_opc_end ||
7982 (pc_ptr - pc_start) >= (TARGET_PAGE_SIZE - 32) ||
7983 num_insns >= max_insns) {
7984 gen_jmp_im(pc_ptr - dc->cs_base);
7985 gen_eob(dc);
7986 break;
7987 }
7988 if (singlestep) {
7989 gen_jmp_im(pc_ptr - dc->cs_base);
7990 gen_eob(dc);
7991 break;
7992 }
7993 }
7994 if (tb->cflags & CF_LAST_IO)
7995 gen_io_end();
7996 gen_icount_end(tb, num_insns);
7997 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
7998 /* we don't forget to fill the last values */
7999 if (search_pc) {
8000 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
8001 lj++;
8002 while (lj <= j)
8003 tcg_ctx.gen_opc_instr_start[lj++] = 0;
8004 }
8005
8006 #ifdef DEBUG_DISAS
8007 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
8008 int disas_flags;
8009 qemu_log("----------------\n");
8010 qemu_log("IN: %s\n", lookup_symbol(pc_start));
8011 #ifdef TARGET_X86_64
8012 if (dc->code64)
8013 disas_flags = 2;
8014 else
8015 #endif
8016 disas_flags = !dc->code32;
8017 log_target_disas(env, pc_start, pc_ptr - pc_start, disas_flags);
8018 qemu_log("\n");
8019 }
8020 #endif
8021
8022 if (!search_pc) {
8023 tb->size = pc_ptr - pc_start;
8024 tb->icount = num_insns;
8025 }
8026 }
8027
8028 void gen_intermediate_code(CPUX86State *env, TranslationBlock *tb)
8029 {
8030 gen_intermediate_code_internal(env, tb, 0);
8031 }
8032
8033 void gen_intermediate_code_pc(CPUX86State *env, TranslationBlock *tb)
8034 {
8035 gen_intermediate_code_internal(env, tb, 1);
8036 }
8037
8038 void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb, int pc_pos)
8039 {
8040 int cc_op;
8041 #ifdef DEBUG_DISAS
8042 if (qemu_loglevel_mask(CPU_LOG_TB_OP)) {
8043 int i;
8044 qemu_log("RESTORE:\n");
8045 for(i = 0;i <= pc_pos; i++) {
8046 if (tcg_ctx.gen_opc_instr_start[i]) {
8047 qemu_log("0x%04x: " TARGET_FMT_lx "\n", i,
8048 tcg_ctx.gen_opc_pc[i]);
8049 }
8050 }
8051 qemu_log("pc_pos=0x%x eip=" TARGET_FMT_lx " cs_base=%x\n",
8052 pc_pos, tcg_ctx.gen_opc_pc[pc_pos] - tb->cs_base,
8053 (uint32_t)tb->cs_base);
8054 }
8055 #endif
8056 env->eip = tcg_ctx.gen_opc_pc[pc_pos] - tb->cs_base;
8057 cc_op = gen_opc_cc_op[pc_pos];
8058 if (cc_op != CC_OP_DYNAMIC)
8059 env->cc_op = cc_op;
8060 }