]> git.proxmox.com Git - qemu.git/blame_incremental - target-i386/translate.c
ppc dyngen fix (malc)
[qemu.git] / target-i386 / translate.c
... / ...
CommitLineData
1/*
2 * i386 translation
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#include <stdarg.h>
21#include <stdlib.h>
22#include <stdio.h>
23#include <string.h>
24#include <inttypes.h>
25#include <signal.h>
26#include <assert.h>
27
28#include "cpu.h"
29#include "exec-all.h"
30#include "disas.h"
31#include "helper.h"
32#include "tcg-op.h"
33
34#define PREFIX_REPZ 0x01
35#define PREFIX_REPNZ 0x02
36#define PREFIX_LOCK 0x04
37#define PREFIX_DATA 0x08
38#define PREFIX_ADR 0x10
39
40#ifdef TARGET_X86_64
41#define X86_64_ONLY(x) x
42#define X86_64_DEF(x...) x
43#define CODE64(s) ((s)->code64)
44#define REX_X(s) ((s)->rex_x)
45#define REX_B(s) ((s)->rex_b)
46/* XXX: gcc generates push/pop in some opcodes, so we cannot use them */
47#if 1
48#define BUGGY_64(x) NULL
49#endif
50#else
51#define X86_64_ONLY(x) NULL
52#define X86_64_DEF(x...)
53#define CODE64(s) 0
54#define REX_X(s) 0
55#define REX_B(s) 0
56#endif
57
58//#define MACRO_TEST 1
59
60/* global register indexes */
61static TCGv cpu_env, cpu_A0, cpu_cc_op, cpu_cc_src, cpu_cc_dst, cpu_cc_tmp;
62/* local temps */
63static TCGv cpu_T[2], cpu_T3;
64/* local register indexes (only used inside old micro ops) */
65static TCGv cpu_tmp0, cpu_tmp1_i64, cpu_tmp2_i32, cpu_tmp3_i32, cpu_tmp4, cpu_ptr0, cpu_ptr1;
66static TCGv cpu_tmp5, cpu_tmp6;
67
68#ifdef TARGET_X86_64
69static int x86_64_hregs;
70#endif
71
72typedef struct DisasContext {
73 /* current insn context */
74 int override; /* -1 if no override */
75 int prefix;
76 int aflag, dflag;
77 target_ulong pc; /* pc = eip + cs_base */
78 int is_jmp; /* 1 = means jump (stop translation), 2 means CPU
79 static state change (stop translation) */
80 /* current block context */
81 target_ulong cs_base; /* base of CS segment */
82 int pe; /* protected mode */
83 int code32; /* 32 bit code segment */
84#ifdef TARGET_X86_64
85 int lma; /* long mode active */
86 int code64; /* 64 bit code segment */
87 int rex_x, rex_b;
88#endif
89 int ss32; /* 32 bit stack segment */
90 int cc_op; /* current CC operation */
91 int addseg; /* non zero if either DS/ES/SS have a non zero base */
92 int f_st; /* currently unused */
93 int vm86; /* vm86 mode */
94 int cpl;
95 int iopl;
96 int tf; /* TF cpu flag */
97 int singlestep_enabled; /* "hardware" single step enabled */
98 int jmp_opt; /* use direct block chaining for direct jumps */
99 int mem_index; /* select memory access functions */
100 uint64_t flags; /* all execution flags */
101 struct TranslationBlock *tb;
102 int popl_esp_hack; /* for correct popl with esp base handling */
103 int rip_offset; /* only used in x86_64, but left for simplicity */
104 int cpuid_features;
105 int cpuid_ext_features;
106 int cpuid_ext2_features;
107 int cpuid_ext3_features;
108} DisasContext;
109
110static void gen_eob(DisasContext *s);
111static void gen_jmp(DisasContext *s, target_ulong eip);
112static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num);
113
114/* i386 arith/logic operations */
115enum {
116 OP_ADDL,
117 OP_ORL,
118 OP_ADCL,
119 OP_SBBL,
120 OP_ANDL,
121 OP_SUBL,
122 OP_XORL,
123 OP_CMPL,
124};
125
126/* i386 shift ops */
127enum {
128 OP_ROL,
129 OP_ROR,
130 OP_RCL,
131 OP_RCR,
132 OP_SHL,
133 OP_SHR,
134 OP_SHL1, /* undocumented */
135 OP_SAR = 7,
136};
137
138enum {
139 JCC_O,
140 JCC_B,
141 JCC_Z,
142 JCC_BE,
143 JCC_S,
144 JCC_P,
145 JCC_L,
146 JCC_LE,
147};
148
149/* operand size */
150enum {
151 OT_BYTE = 0,
152 OT_WORD,
153 OT_LONG,
154 OT_QUAD,
155};
156
157enum {
158 /* I386 int registers */
159 OR_EAX, /* MUST be even numbered */
160 OR_ECX,
161 OR_EDX,
162 OR_EBX,
163 OR_ESP,
164 OR_EBP,
165 OR_ESI,
166 OR_EDI,
167
168 OR_TMP0 = 16, /* temporary operand register */
169 OR_TMP1,
170 OR_A0, /* temporary register used when doing address evaluation */
171};
172
173static inline void gen_op_movl_T0_0(void)
174{
175 tcg_gen_movi_tl(cpu_T[0], 0);
176}
177
178static inline void gen_op_movl_T0_im(int32_t val)
179{
180 tcg_gen_movi_tl(cpu_T[0], val);
181}
182
183static inline void gen_op_movl_T0_imu(uint32_t val)
184{
185 tcg_gen_movi_tl(cpu_T[0], val);
186}
187
188static inline void gen_op_movl_T1_im(int32_t val)
189{
190 tcg_gen_movi_tl(cpu_T[1], val);
191}
192
193static inline void gen_op_movl_T1_imu(uint32_t val)
194{
195 tcg_gen_movi_tl(cpu_T[1], val);
196}
197
198static inline void gen_op_movl_A0_im(uint32_t val)
199{
200 tcg_gen_movi_tl(cpu_A0, val);
201}
202
203#ifdef TARGET_X86_64
204static inline void gen_op_movq_A0_im(int64_t val)
205{
206 tcg_gen_movi_tl(cpu_A0, val);
207}
208#endif
209
210static inline void gen_movtl_T0_im(target_ulong val)
211{
212 tcg_gen_movi_tl(cpu_T[0], val);
213}
214
215static inline void gen_movtl_T1_im(target_ulong val)
216{
217 tcg_gen_movi_tl(cpu_T[1], val);
218}
219
220static inline void gen_op_andl_T0_ffff(void)
221{
222 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
223}
224
225static inline void gen_op_andl_T0_im(uint32_t val)
226{
227 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], val);
228}
229
230static inline void gen_op_movl_T0_T1(void)
231{
232 tcg_gen_mov_tl(cpu_T[0], cpu_T[1]);
233}
234
235static inline void gen_op_andl_A0_ffff(void)
236{
237 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffff);
238}
239
240#ifdef TARGET_X86_64
241
242#define NB_OP_SIZES 4
243
244#else /* !TARGET_X86_64 */
245
246#define NB_OP_SIZES 3
247
248#endif /* !TARGET_X86_64 */
249
250#if defined(WORDS_BIGENDIAN)
251#define REG_B_OFFSET (sizeof(target_ulong) - 1)
252#define REG_H_OFFSET (sizeof(target_ulong) - 2)
253#define REG_W_OFFSET (sizeof(target_ulong) - 2)
254#define REG_L_OFFSET (sizeof(target_ulong) - 4)
255#define REG_LH_OFFSET (sizeof(target_ulong) - 8)
256#else
257#define REG_B_OFFSET 0
258#define REG_H_OFFSET 1
259#define REG_W_OFFSET 0
260#define REG_L_OFFSET 0
261#define REG_LH_OFFSET 4
262#endif
263
264static inline void gen_op_mov_reg_v(int ot, int reg, TCGv t0)
265{
266 switch(ot) {
267 case OT_BYTE:
268 if (reg < 4 X86_64_DEF( || reg >= 8 || x86_64_hregs)) {
269 tcg_gen_st8_tl(t0, cpu_env, offsetof(CPUState, regs[reg]) + REG_B_OFFSET);
270 } else {
271 tcg_gen_st8_tl(t0, cpu_env, offsetof(CPUState, regs[reg - 4]) + REG_H_OFFSET);
272 }
273 break;
274 case OT_WORD:
275 tcg_gen_st16_tl(t0, cpu_env, offsetof(CPUState, regs[reg]) + REG_W_OFFSET);
276 break;
277#ifdef TARGET_X86_64
278 case OT_LONG:
279 tcg_gen_st32_tl(t0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET);
280 /* high part of register set to zero */
281 tcg_gen_movi_tl(cpu_tmp0, 0);
282 tcg_gen_st32_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]) + REG_LH_OFFSET);
283 break;
284 default:
285 case OT_QUAD:
286 tcg_gen_st_tl(t0, cpu_env, offsetof(CPUState, regs[reg]));
287 break;
288#else
289 default:
290 case OT_LONG:
291 tcg_gen_st32_tl(t0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET);
292 break;
293#endif
294 }
295}
296
297static inline void gen_op_mov_reg_T0(int ot, int reg)
298{
299 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
300}
301
302static inline void gen_op_mov_reg_T1(int ot, int reg)
303{
304 gen_op_mov_reg_v(ot, reg, cpu_T[1]);
305}
306
307static inline void gen_op_mov_reg_A0(int size, int reg)
308{
309 switch(size) {
310 case 0:
311 tcg_gen_st16_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]) + REG_W_OFFSET);
312 break;
313#ifdef TARGET_X86_64
314 case 1:
315 tcg_gen_st32_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET);
316 /* high part of register set to zero */
317 tcg_gen_movi_tl(cpu_tmp0, 0);
318 tcg_gen_st32_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]) + REG_LH_OFFSET);
319 break;
320 default:
321 case 2:
322 tcg_gen_st_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]));
323 break;
324#else
325 default:
326 case 1:
327 tcg_gen_st32_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET);
328 break;
329#endif
330 }
331}
332
333static inline void gen_op_mov_v_reg(int ot, TCGv t0, int reg)
334{
335 switch(ot) {
336 case OT_BYTE:
337 if (reg < 4 X86_64_DEF( || reg >= 8 || x86_64_hregs)) {
338 goto std_case;
339 } else {
340 tcg_gen_ld8u_tl(t0, cpu_env, offsetof(CPUState, regs[reg - 4]) + REG_H_OFFSET);
341 }
342 break;
343 default:
344 std_case:
345 tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, regs[reg]));
346 break;
347 }
348}
349
350static inline void gen_op_mov_TN_reg(int ot, int t_index, int reg)
351{
352 gen_op_mov_v_reg(ot, cpu_T[t_index], reg);
353}
354
355static inline void gen_op_movl_A0_reg(int reg)
356{
357 tcg_gen_ld32u_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET);
358}
359
360static inline void gen_op_addl_A0_im(int32_t val)
361{
362 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
363#ifdef TARGET_X86_64
364 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
365#endif
366}
367
368#ifdef TARGET_X86_64
369static inline void gen_op_addq_A0_im(int64_t val)
370{
371 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
372}
373#endif
374
375static void gen_add_A0_im(DisasContext *s, int val)
376{
377#ifdef TARGET_X86_64
378 if (CODE64(s))
379 gen_op_addq_A0_im(val);
380 else
381#endif
382 gen_op_addl_A0_im(val);
383}
384
385static inline void gen_op_addl_T0_T1(void)
386{
387 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
388}
389
390static inline void gen_op_jmp_T0(void)
391{
392 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUState, eip));
393}
394
395static inline void gen_op_add_reg_im(int size, int reg, int32_t val)
396{
397 switch(size) {
398 case 0:
399 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
400 tcg_gen_addi_tl(cpu_tmp0, cpu_tmp0, val);
401 tcg_gen_st16_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]) + REG_W_OFFSET);
402 break;
403 case 1:
404 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
405 tcg_gen_addi_tl(cpu_tmp0, cpu_tmp0, val);
406#ifdef TARGET_X86_64
407 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xffffffff);
408#endif
409 tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
410 break;
411#ifdef TARGET_X86_64
412 case 2:
413 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
414 tcg_gen_addi_tl(cpu_tmp0, cpu_tmp0, val);
415 tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
416 break;
417#endif
418 }
419}
420
421static inline void gen_op_add_reg_T0(int size, int reg)
422{
423 switch(size) {
424 case 0:
425 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
426 tcg_gen_add_tl(cpu_tmp0, cpu_tmp0, cpu_T[0]);
427 tcg_gen_st16_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]) + REG_W_OFFSET);
428 break;
429 case 1:
430 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
431 tcg_gen_add_tl(cpu_tmp0, cpu_tmp0, cpu_T[0]);
432#ifdef TARGET_X86_64
433 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xffffffff);
434#endif
435 tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
436 break;
437#ifdef TARGET_X86_64
438 case 2:
439 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
440 tcg_gen_add_tl(cpu_tmp0, cpu_tmp0, cpu_T[0]);
441 tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
442 break;
443#endif
444 }
445}
446
447static inline void gen_op_set_cc_op(int32_t val)
448{
449 tcg_gen_movi_i32(cpu_cc_op, val);
450}
451
452static inline void gen_op_addl_A0_reg_sN(int shift, int reg)
453{
454 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
455 if (shift != 0)
456 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
457 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
458#ifdef TARGET_X86_64
459 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
460#endif
461}
462
463static inline void gen_op_movl_A0_seg(int reg)
464{
465 tcg_gen_ld32u_tl(cpu_A0, cpu_env, offsetof(CPUState, segs[reg].base) + REG_L_OFFSET);
466}
467
468static inline void gen_op_addl_A0_seg(int reg)
469{
470 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, segs[reg].base));
471 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
472#ifdef TARGET_X86_64
473 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
474#endif
475}
476
477#ifdef TARGET_X86_64
478static inline void gen_op_movq_A0_seg(int reg)
479{
480 tcg_gen_ld_tl(cpu_A0, cpu_env, offsetof(CPUState, segs[reg].base));
481}
482
483static inline void gen_op_addq_A0_seg(int reg)
484{
485 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, segs[reg].base));
486 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
487}
488
489static inline void gen_op_movq_A0_reg(int reg)
490{
491 tcg_gen_ld_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]));
492}
493
494static inline void gen_op_addq_A0_reg_sN(int shift, int reg)
495{
496 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
497 if (shift != 0)
498 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
499 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
500}
501#endif
502
503static inline void gen_op_lds_T0_A0(int idx)
504{
505 int mem_index = (idx >> 2) - 1;
506 switch(idx & 3) {
507 case 0:
508 tcg_gen_qemu_ld8s(cpu_T[0], cpu_A0, mem_index);
509 break;
510 case 1:
511 tcg_gen_qemu_ld16s(cpu_T[0], cpu_A0, mem_index);
512 break;
513 default:
514 case 2:
515 tcg_gen_qemu_ld32s(cpu_T[0], cpu_A0, mem_index);
516 break;
517 }
518}
519
520static inline void gen_op_ld_v(int idx, TCGv t0, TCGv a0)
521{
522 int mem_index = (idx >> 2) - 1;
523 switch(idx & 3) {
524 case 0:
525 tcg_gen_qemu_ld8u(t0, a0, mem_index);
526 break;
527 case 1:
528 tcg_gen_qemu_ld16u(t0, a0, mem_index);
529 break;
530 case 2:
531 tcg_gen_qemu_ld32u(t0, a0, mem_index);
532 break;
533 default:
534 case 3:
535 tcg_gen_qemu_ld64(t0, a0, mem_index);
536 break;
537 }
538}
539
540/* XXX: always use ldu or lds */
541static inline void gen_op_ld_T0_A0(int idx)
542{
543 gen_op_ld_v(idx, cpu_T[0], cpu_A0);
544}
545
546static inline void gen_op_ldu_T0_A0(int idx)
547{
548 gen_op_ld_v(idx, cpu_T[0], cpu_A0);
549}
550
551static inline void gen_op_ld_T1_A0(int idx)
552{
553 gen_op_ld_v(idx, cpu_T[1], cpu_A0);
554}
555
556static inline void gen_op_st_v(int idx, TCGv t0, TCGv a0)
557{
558 int mem_index = (idx >> 2) - 1;
559 switch(idx & 3) {
560 case 0:
561 tcg_gen_qemu_st8(t0, a0, mem_index);
562 break;
563 case 1:
564 tcg_gen_qemu_st16(t0, a0, mem_index);
565 break;
566 case 2:
567 tcg_gen_qemu_st32(t0, a0, mem_index);
568 break;
569 default:
570 case 3:
571 tcg_gen_qemu_st64(t0, a0, mem_index);
572 break;
573 }
574}
575
576static inline void gen_op_st_T0_A0(int idx)
577{
578 gen_op_st_v(idx, cpu_T[0], cpu_A0);
579}
580
581static inline void gen_op_st_T1_A0(int idx)
582{
583 gen_op_st_v(idx, cpu_T[1], cpu_A0);
584}
585
586static inline void gen_jmp_im(target_ulong pc)
587{
588 tcg_gen_movi_tl(cpu_tmp0, pc);
589 tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, eip));
590}
591
592static inline void gen_string_movl_A0_ESI(DisasContext *s)
593{
594 int override;
595
596 override = s->override;
597#ifdef TARGET_X86_64
598 if (s->aflag == 2) {
599 if (override >= 0) {
600 gen_op_movq_A0_seg(override);
601 gen_op_addq_A0_reg_sN(0, R_ESI);
602 } else {
603 gen_op_movq_A0_reg(R_ESI);
604 }
605 } else
606#endif
607 if (s->aflag) {
608 /* 32 bit address */
609 if (s->addseg && override < 0)
610 override = R_DS;
611 if (override >= 0) {
612 gen_op_movl_A0_seg(override);
613 gen_op_addl_A0_reg_sN(0, R_ESI);
614 } else {
615 gen_op_movl_A0_reg(R_ESI);
616 }
617 } else {
618 /* 16 address, always override */
619 if (override < 0)
620 override = R_DS;
621 gen_op_movl_A0_reg(R_ESI);
622 gen_op_andl_A0_ffff();
623 gen_op_addl_A0_seg(override);
624 }
625}
626
627static inline void gen_string_movl_A0_EDI(DisasContext *s)
628{
629#ifdef TARGET_X86_64
630 if (s->aflag == 2) {
631 gen_op_movq_A0_reg(R_EDI);
632 } else
633#endif
634 if (s->aflag) {
635 if (s->addseg) {
636 gen_op_movl_A0_seg(R_ES);
637 gen_op_addl_A0_reg_sN(0, R_EDI);
638 } else {
639 gen_op_movl_A0_reg(R_EDI);
640 }
641 } else {
642 gen_op_movl_A0_reg(R_EDI);
643 gen_op_andl_A0_ffff();
644 gen_op_addl_A0_seg(R_ES);
645 }
646}
647
648static inline void gen_op_movl_T0_Dshift(int ot)
649{
650 tcg_gen_ld32s_tl(cpu_T[0], cpu_env, offsetof(CPUState, df));
651 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], ot);
652};
653
654static void gen_extu(int ot, TCGv reg)
655{
656 switch(ot) {
657 case OT_BYTE:
658 tcg_gen_ext8u_tl(reg, reg);
659 break;
660 case OT_WORD:
661 tcg_gen_ext16u_tl(reg, reg);
662 break;
663 case OT_LONG:
664 tcg_gen_ext32u_tl(reg, reg);
665 break;
666 default:
667 break;
668 }
669}
670
671static void gen_exts(int ot, TCGv reg)
672{
673 switch(ot) {
674 case OT_BYTE:
675 tcg_gen_ext8s_tl(reg, reg);
676 break;
677 case OT_WORD:
678 tcg_gen_ext16s_tl(reg, reg);
679 break;
680 case OT_LONG:
681 tcg_gen_ext32s_tl(reg, reg);
682 break;
683 default:
684 break;
685 }
686}
687
688static inline void gen_op_jnz_ecx(int size, int label1)
689{
690 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[R_ECX]));
691 gen_extu(size + 1, cpu_tmp0);
692 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, label1);
693}
694
695static inline void gen_op_jz_ecx(int size, int label1)
696{
697 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[R_ECX]));
698 gen_extu(size + 1, cpu_tmp0);
699 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
700}
701
702static void *helper_in_func[3] = {
703 helper_inb,
704 helper_inw,
705 helper_inl,
706};
707
708static void *helper_out_func[3] = {
709 helper_outb,
710 helper_outw,
711 helper_outl,
712};
713
714static void *gen_check_io_func[3] = {
715 helper_check_iob,
716 helper_check_iow,
717 helper_check_iol,
718};
719
720static void gen_check_io(DisasContext *s, int ot, target_ulong cur_eip,
721 uint32_t svm_flags)
722{
723 int state_saved;
724 target_ulong next_eip;
725
726 state_saved = 0;
727 if (s->pe && (s->cpl > s->iopl || s->vm86)) {
728 if (s->cc_op != CC_OP_DYNAMIC)
729 gen_op_set_cc_op(s->cc_op);
730 gen_jmp_im(cur_eip);
731 state_saved = 1;
732 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
733 tcg_gen_helper_0_1(gen_check_io_func[ot],
734 cpu_tmp2_i32);
735 }
736 if(s->flags & (1ULL << INTERCEPT_IOIO_PROT)) {
737 if (!state_saved) {
738 if (s->cc_op != CC_OP_DYNAMIC)
739 gen_op_set_cc_op(s->cc_op);
740 gen_jmp_im(cur_eip);
741 state_saved = 1;
742 }
743 svm_flags |= (1 << (4 + ot));
744 next_eip = s->pc - s->cs_base;
745 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
746 tcg_gen_helper_0_3(helper_svm_check_io,
747 cpu_tmp2_i32,
748 tcg_const_i32(svm_flags),
749 tcg_const_i32(next_eip - cur_eip));
750 }
751}
752
753static inline void gen_movs(DisasContext *s, int ot)
754{
755 gen_string_movl_A0_ESI(s);
756 gen_op_ld_T0_A0(ot + s->mem_index);
757 gen_string_movl_A0_EDI(s);
758 gen_op_st_T0_A0(ot + s->mem_index);
759 gen_op_movl_T0_Dshift(ot);
760 gen_op_add_reg_T0(s->aflag, R_ESI);
761 gen_op_add_reg_T0(s->aflag, R_EDI);
762}
763
764static inline void gen_update_cc_op(DisasContext *s)
765{
766 if (s->cc_op != CC_OP_DYNAMIC) {
767 gen_op_set_cc_op(s->cc_op);
768 s->cc_op = CC_OP_DYNAMIC;
769 }
770}
771
772static void gen_op_update1_cc(void)
773{
774 tcg_gen_discard_tl(cpu_cc_src);
775 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
776}
777
778static void gen_op_update2_cc(void)
779{
780 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
781 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
782}
783
784static inline void gen_op_cmpl_T0_T1_cc(void)
785{
786 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
787 tcg_gen_sub_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
788}
789
790static inline void gen_op_testl_T0_T1_cc(void)
791{
792 tcg_gen_discard_tl(cpu_cc_src);
793 tcg_gen_and_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
794}
795
796static void gen_op_update_neg_cc(void)
797{
798 tcg_gen_neg_tl(cpu_cc_src, cpu_T[0]);
799 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
800}
801
802/* compute eflags.C to reg */
803static void gen_compute_eflags_c(TCGv reg)
804{
805#if TCG_TARGET_REG_BITS == 32
806 tcg_gen_shli_i32(cpu_tmp2_i32, cpu_cc_op, 3);
807 tcg_gen_addi_i32(cpu_tmp2_i32, cpu_tmp2_i32,
808 (long)cc_table + offsetof(CCTable, compute_c));
809 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0);
810 tcg_gen_call(&tcg_ctx, cpu_tmp2_i32, TCG_CALL_PURE,
811 1, &cpu_tmp2_i32, 0, NULL);
812#else
813 tcg_gen_extu_i32_tl(cpu_tmp1_i64, cpu_cc_op);
814 tcg_gen_shli_i64(cpu_tmp1_i64, cpu_tmp1_i64, 4);
815 tcg_gen_addi_i64(cpu_tmp1_i64, cpu_tmp1_i64,
816 (long)cc_table + offsetof(CCTable, compute_c));
817 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_tmp1_i64, 0);
818 tcg_gen_call(&tcg_ctx, cpu_tmp1_i64, TCG_CALL_PURE,
819 1, &cpu_tmp2_i32, 0, NULL);
820#endif
821 tcg_gen_extu_i32_tl(reg, cpu_tmp2_i32);
822}
823
824/* compute all eflags to cc_src */
825static void gen_compute_eflags(TCGv reg)
826{
827#if TCG_TARGET_REG_BITS == 32
828 tcg_gen_shli_i32(cpu_tmp2_i32, cpu_cc_op, 3);
829 tcg_gen_addi_i32(cpu_tmp2_i32, cpu_tmp2_i32,
830 (long)cc_table + offsetof(CCTable, compute_all));
831 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0);
832 tcg_gen_call(&tcg_ctx, cpu_tmp2_i32, TCG_CALL_PURE,
833 1, &cpu_tmp2_i32, 0, NULL);
834#else
835 tcg_gen_extu_i32_tl(cpu_tmp1_i64, cpu_cc_op);
836 tcg_gen_shli_i64(cpu_tmp1_i64, cpu_tmp1_i64, 4);
837 tcg_gen_addi_i64(cpu_tmp1_i64, cpu_tmp1_i64,
838 (long)cc_table + offsetof(CCTable, compute_all));
839 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_tmp1_i64, 0);
840 tcg_gen_call(&tcg_ctx, cpu_tmp1_i64, TCG_CALL_PURE,
841 1, &cpu_tmp2_i32, 0, NULL);
842#endif
843 tcg_gen_extu_i32_tl(reg, cpu_tmp2_i32);
844}
845
846static inline void gen_setcc_slow_T0(DisasContext *s, int jcc_op)
847{
848 if (s->cc_op != CC_OP_DYNAMIC)
849 gen_op_set_cc_op(s->cc_op);
850 switch(jcc_op) {
851 case JCC_O:
852 gen_compute_eflags(cpu_T[0]);
853 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 11);
854 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
855 break;
856 case JCC_B:
857 gen_compute_eflags_c(cpu_T[0]);
858 break;
859 case JCC_Z:
860 gen_compute_eflags(cpu_T[0]);
861 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 6);
862 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
863 break;
864 case JCC_BE:
865 gen_compute_eflags(cpu_tmp0);
866 tcg_gen_shri_tl(cpu_T[0], cpu_tmp0, 6);
867 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
868 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
869 break;
870 case JCC_S:
871 gen_compute_eflags(cpu_T[0]);
872 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 7);
873 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
874 break;
875 case JCC_P:
876 gen_compute_eflags(cpu_T[0]);
877 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 2);
878 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
879 break;
880 case JCC_L:
881 gen_compute_eflags(cpu_tmp0);
882 tcg_gen_shri_tl(cpu_T[0], cpu_tmp0, 11); /* CC_O */
883 tcg_gen_shri_tl(cpu_tmp0, cpu_tmp0, 7); /* CC_S */
884 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
885 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
886 break;
887 default:
888 case JCC_LE:
889 gen_compute_eflags(cpu_tmp0);
890 tcg_gen_shri_tl(cpu_T[0], cpu_tmp0, 11); /* CC_O */
891 tcg_gen_shri_tl(cpu_tmp4, cpu_tmp0, 7); /* CC_S */
892 tcg_gen_shri_tl(cpu_tmp0, cpu_tmp0, 6); /* CC_Z */
893 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
894 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
895 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
896 break;
897 }
898}
899
900/* return true if setcc_slow is not needed (WARNING: must be kept in
901 sync with gen_jcc1) */
902static int is_fast_jcc_case(DisasContext *s, int b)
903{
904 int jcc_op;
905 jcc_op = (b >> 1) & 7;
906 switch(s->cc_op) {
907 /* we optimize the cmp/jcc case */
908 case CC_OP_SUBB:
909 case CC_OP_SUBW:
910 case CC_OP_SUBL:
911 case CC_OP_SUBQ:
912 if (jcc_op == JCC_O || jcc_op == JCC_P)
913 goto slow_jcc;
914 break;
915
916 /* some jumps are easy to compute */
917 case CC_OP_ADDB:
918 case CC_OP_ADDW:
919 case CC_OP_ADDL:
920 case CC_OP_ADDQ:
921
922 case CC_OP_LOGICB:
923 case CC_OP_LOGICW:
924 case CC_OP_LOGICL:
925 case CC_OP_LOGICQ:
926
927 case CC_OP_INCB:
928 case CC_OP_INCW:
929 case CC_OP_INCL:
930 case CC_OP_INCQ:
931
932 case CC_OP_DECB:
933 case CC_OP_DECW:
934 case CC_OP_DECL:
935 case CC_OP_DECQ:
936
937 case CC_OP_SHLB:
938 case CC_OP_SHLW:
939 case CC_OP_SHLL:
940 case CC_OP_SHLQ:
941 if (jcc_op != JCC_Z && jcc_op != JCC_S)
942 goto slow_jcc;
943 break;
944 default:
945 slow_jcc:
946 return 0;
947 }
948 return 1;
949}
950
951/* generate a conditional jump to label 'l1' according to jump opcode
952 value 'b'. In the fast case, T0 is guaranted not to be used. */
953static inline void gen_jcc1(DisasContext *s, int cc_op, int b, int l1)
954{
955 int inv, jcc_op, size, cond;
956 TCGv t0;
957
958 inv = b & 1;
959 jcc_op = (b >> 1) & 7;
960
961 switch(cc_op) {
962 /* we optimize the cmp/jcc case */
963 case CC_OP_SUBB:
964 case CC_OP_SUBW:
965 case CC_OP_SUBL:
966 case CC_OP_SUBQ:
967
968 size = cc_op - CC_OP_SUBB;
969 switch(jcc_op) {
970 case JCC_Z:
971 fast_jcc_z:
972 switch(size) {
973 case 0:
974 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0xff);
975 t0 = cpu_tmp0;
976 break;
977 case 1:
978 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0xffff);
979 t0 = cpu_tmp0;
980 break;
981#ifdef TARGET_X86_64
982 case 2:
983 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0xffffffff);
984 t0 = cpu_tmp0;
985 break;
986#endif
987 default:
988 t0 = cpu_cc_dst;
989 break;
990 }
991 tcg_gen_brcondi_tl(inv ? TCG_COND_NE : TCG_COND_EQ, t0, 0, l1);
992 break;
993 case JCC_S:
994 fast_jcc_s:
995 switch(size) {
996 case 0:
997 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0x80);
998 tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0,
999 0, l1);
1000 break;
1001 case 1:
1002 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0x8000);
1003 tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0,
1004 0, l1);
1005 break;
1006#ifdef TARGET_X86_64
1007 case 2:
1008 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0x80000000);
1009 tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0,
1010 0, l1);
1011 break;
1012#endif
1013 default:
1014 tcg_gen_brcondi_tl(inv ? TCG_COND_GE : TCG_COND_LT, cpu_cc_dst,
1015 0, l1);
1016 break;
1017 }
1018 break;
1019
1020 case JCC_B:
1021 cond = inv ? TCG_COND_GEU : TCG_COND_LTU;
1022 goto fast_jcc_b;
1023 case JCC_BE:
1024 cond = inv ? TCG_COND_GTU : TCG_COND_LEU;
1025 fast_jcc_b:
1026 tcg_gen_add_tl(cpu_tmp4, cpu_cc_dst, cpu_cc_src);
1027 switch(size) {
1028 case 0:
1029 t0 = cpu_tmp0;
1030 tcg_gen_andi_tl(cpu_tmp4, cpu_tmp4, 0xff);
1031 tcg_gen_andi_tl(t0, cpu_cc_src, 0xff);
1032 break;
1033 case 1:
1034 t0 = cpu_tmp0;
1035 tcg_gen_andi_tl(cpu_tmp4, cpu_tmp4, 0xffff);
1036 tcg_gen_andi_tl(t0, cpu_cc_src, 0xffff);
1037 break;
1038#ifdef TARGET_X86_64
1039 case 2:
1040 t0 = cpu_tmp0;
1041 tcg_gen_andi_tl(cpu_tmp4, cpu_tmp4, 0xffffffff);
1042 tcg_gen_andi_tl(t0, cpu_cc_src, 0xffffffff);
1043 break;
1044#endif
1045 default:
1046 t0 = cpu_cc_src;
1047 break;
1048 }
1049 tcg_gen_brcond_tl(cond, cpu_tmp4, t0, l1);
1050 break;
1051
1052 case JCC_L:
1053 cond = inv ? TCG_COND_GE : TCG_COND_LT;
1054 goto fast_jcc_l;
1055 case JCC_LE:
1056 cond = inv ? TCG_COND_GT : TCG_COND_LE;
1057 fast_jcc_l:
1058 tcg_gen_add_tl(cpu_tmp4, cpu_cc_dst, cpu_cc_src);
1059 switch(size) {
1060 case 0:
1061 t0 = cpu_tmp0;
1062 tcg_gen_ext8s_tl(cpu_tmp4, cpu_tmp4);
1063 tcg_gen_ext8s_tl(t0, cpu_cc_src);
1064 break;
1065 case 1:
1066 t0 = cpu_tmp0;
1067 tcg_gen_ext16s_tl(cpu_tmp4, cpu_tmp4);
1068 tcg_gen_ext16s_tl(t0, cpu_cc_src);
1069 break;
1070#ifdef TARGET_X86_64
1071 case 2:
1072 t0 = cpu_tmp0;
1073 tcg_gen_ext32s_tl(cpu_tmp4, cpu_tmp4);
1074 tcg_gen_ext32s_tl(t0, cpu_cc_src);
1075 break;
1076#endif
1077 default:
1078 t0 = cpu_cc_src;
1079 break;
1080 }
1081 tcg_gen_brcond_tl(cond, cpu_tmp4, t0, l1);
1082 break;
1083
1084 default:
1085 goto slow_jcc;
1086 }
1087 break;
1088
1089 /* some jumps are easy to compute */
1090 case CC_OP_ADDB:
1091 case CC_OP_ADDW:
1092 case CC_OP_ADDL:
1093 case CC_OP_ADDQ:
1094
1095 case CC_OP_ADCB:
1096 case CC_OP_ADCW:
1097 case CC_OP_ADCL:
1098 case CC_OP_ADCQ:
1099
1100 case CC_OP_SBBB:
1101 case CC_OP_SBBW:
1102 case CC_OP_SBBL:
1103 case CC_OP_SBBQ:
1104
1105 case CC_OP_LOGICB:
1106 case CC_OP_LOGICW:
1107 case CC_OP_LOGICL:
1108 case CC_OP_LOGICQ:
1109
1110 case CC_OP_INCB:
1111 case CC_OP_INCW:
1112 case CC_OP_INCL:
1113 case CC_OP_INCQ:
1114
1115 case CC_OP_DECB:
1116 case CC_OP_DECW:
1117 case CC_OP_DECL:
1118 case CC_OP_DECQ:
1119
1120 case CC_OP_SHLB:
1121 case CC_OP_SHLW:
1122 case CC_OP_SHLL:
1123 case CC_OP_SHLQ:
1124
1125 case CC_OP_SARB:
1126 case CC_OP_SARW:
1127 case CC_OP_SARL:
1128 case CC_OP_SARQ:
1129 switch(jcc_op) {
1130 case JCC_Z:
1131 size = (cc_op - CC_OP_ADDB) & 3;
1132 goto fast_jcc_z;
1133 case JCC_S:
1134 size = (cc_op - CC_OP_ADDB) & 3;
1135 goto fast_jcc_s;
1136 default:
1137 goto slow_jcc;
1138 }
1139 break;
1140 default:
1141 slow_jcc:
1142 gen_setcc_slow_T0(s, jcc_op);
1143 tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE,
1144 cpu_T[0], 0, l1);
1145 break;
1146 }
1147}
1148
1149/* XXX: does not work with gdbstub "ice" single step - not a
1150 serious problem */
1151static int gen_jz_ecx_string(DisasContext *s, target_ulong next_eip)
1152{
1153 int l1, l2;
1154
1155 l1 = gen_new_label();
1156 l2 = gen_new_label();
1157 gen_op_jnz_ecx(s->aflag, l1);
1158 gen_set_label(l2);
1159 gen_jmp_tb(s, next_eip, 1);
1160 gen_set_label(l1);
1161 return l2;
1162}
1163
1164static inline void gen_stos(DisasContext *s, int ot)
1165{
1166 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
1167 gen_string_movl_A0_EDI(s);
1168 gen_op_st_T0_A0(ot + s->mem_index);
1169 gen_op_movl_T0_Dshift(ot);
1170 gen_op_add_reg_T0(s->aflag, R_EDI);
1171}
1172
1173static inline void gen_lods(DisasContext *s, int ot)
1174{
1175 gen_string_movl_A0_ESI(s);
1176 gen_op_ld_T0_A0(ot + s->mem_index);
1177 gen_op_mov_reg_T0(ot, R_EAX);
1178 gen_op_movl_T0_Dshift(ot);
1179 gen_op_add_reg_T0(s->aflag, R_ESI);
1180}
1181
1182static inline void gen_scas(DisasContext *s, int ot)
1183{
1184 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
1185 gen_string_movl_A0_EDI(s);
1186 gen_op_ld_T1_A0(ot + s->mem_index);
1187 gen_op_cmpl_T0_T1_cc();
1188 gen_op_movl_T0_Dshift(ot);
1189 gen_op_add_reg_T0(s->aflag, R_EDI);
1190}
1191
1192static inline void gen_cmps(DisasContext *s, int ot)
1193{
1194 gen_string_movl_A0_ESI(s);
1195 gen_op_ld_T0_A0(ot + s->mem_index);
1196 gen_string_movl_A0_EDI(s);
1197 gen_op_ld_T1_A0(ot + s->mem_index);
1198 gen_op_cmpl_T0_T1_cc();
1199 gen_op_movl_T0_Dshift(ot);
1200 gen_op_add_reg_T0(s->aflag, R_ESI);
1201 gen_op_add_reg_T0(s->aflag, R_EDI);
1202}
1203
1204static inline void gen_ins(DisasContext *s, int ot)
1205{
1206 gen_string_movl_A0_EDI(s);
1207 /* Note: we must do this dummy write first to be restartable in
1208 case of page fault. */
1209 gen_op_movl_T0_0();
1210 gen_op_st_T0_A0(ot + s->mem_index);
1211 gen_op_mov_TN_reg(OT_WORD, 1, R_EDX);
1212 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[1]);
1213 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1214 tcg_gen_helper_1_1(helper_in_func[ot], cpu_T[0], cpu_tmp2_i32);
1215 gen_op_st_T0_A0(ot + s->mem_index);
1216 gen_op_movl_T0_Dshift(ot);
1217 gen_op_add_reg_T0(s->aflag, R_EDI);
1218}
1219
1220static inline void gen_outs(DisasContext *s, int ot)
1221{
1222 gen_string_movl_A0_ESI(s);
1223 gen_op_ld_T0_A0(ot + s->mem_index);
1224
1225 gen_op_mov_TN_reg(OT_WORD, 1, R_EDX);
1226 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[1]);
1227 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1228 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[0]);
1229 tcg_gen_helper_0_2(helper_out_func[ot], cpu_tmp2_i32, cpu_tmp3_i32);
1230
1231 gen_op_movl_T0_Dshift(ot);
1232 gen_op_add_reg_T0(s->aflag, R_ESI);
1233}
1234
1235/* same method as Valgrind : we generate jumps to current or next
1236 instruction */
1237#define GEN_REPZ(op) \
1238static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1239 target_ulong cur_eip, target_ulong next_eip) \
1240{ \
1241 int l2;\
1242 gen_update_cc_op(s); \
1243 l2 = gen_jz_ecx_string(s, next_eip); \
1244 gen_ ## op(s, ot); \
1245 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1246 /* a loop would cause two single step exceptions if ECX = 1 \
1247 before rep string_insn */ \
1248 if (!s->jmp_opt) \
1249 gen_op_jz_ecx(s->aflag, l2); \
1250 gen_jmp(s, cur_eip); \
1251}
1252
1253#define GEN_REPZ2(op) \
1254static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1255 target_ulong cur_eip, \
1256 target_ulong next_eip, \
1257 int nz) \
1258{ \
1259 int l2;\
1260 gen_update_cc_op(s); \
1261 l2 = gen_jz_ecx_string(s, next_eip); \
1262 gen_ ## op(s, ot); \
1263 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1264 gen_op_set_cc_op(CC_OP_SUBB + ot); \
1265 gen_jcc1(s, CC_OP_SUBB + ot, (JCC_Z << 1) | (nz ^ 1), l2); \
1266 if (!s->jmp_opt) \
1267 gen_op_jz_ecx(s->aflag, l2); \
1268 gen_jmp(s, cur_eip); \
1269}
1270
1271GEN_REPZ(movs)
1272GEN_REPZ(stos)
1273GEN_REPZ(lods)
1274GEN_REPZ(ins)
1275GEN_REPZ(outs)
1276GEN_REPZ2(scas)
1277GEN_REPZ2(cmps)
1278
1279static void *helper_fp_arith_ST0_FT0[8] = {
1280 helper_fadd_ST0_FT0,
1281 helper_fmul_ST0_FT0,
1282 helper_fcom_ST0_FT0,
1283 helper_fcom_ST0_FT0,
1284 helper_fsub_ST0_FT0,
1285 helper_fsubr_ST0_FT0,
1286 helper_fdiv_ST0_FT0,
1287 helper_fdivr_ST0_FT0,
1288};
1289
1290/* NOTE the exception in "r" op ordering */
1291static void *helper_fp_arith_STN_ST0[8] = {
1292 helper_fadd_STN_ST0,
1293 helper_fmul_STN_ST0,
1294 NULL,
1295 NULL,
1296 helper_fsubr_STN_ST0,
1297 helper_fsub_STN_ST0,
1298 helper_fdivr_STN_ST0,
1299 helper_fdiv_STN_ST0,
1300};
1301
1302/* if d == OR_TMP0, it means memory operand (address in A0) */
1303static void gen_op(DisasContext *s1, int op, int ot, int d)
1304{
1305 if (d != OR_TMP0) {
1306 gen_op_mov_TN_reg(ot, 0, d);
1307 } else {
1308 gen_op_ld_T0_A0(ot + s1->mem_index);
1309 }
1310 switch(op) {
1311 case OP_ADCL:
1312 if (s1->cc_op != CC_OP_DYNAMIC)
1313 gen_op_set_cc_op(s1->cc_op);
1314 gen_compute_eflags_c(cpu_tmp4);
1315 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1316 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1317 if (d != OR_TMP0)
1318 gen_op_mov_reg_T0(ot, d);
1319 else
1320 gen_op_st_T0_A0(ot + s1->mem_index);
1321 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
1322 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1323 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp4);
1324 tcg_gen_shli_i32(cpu_tmp2_i32, cpu_tmp2_i32, 2);
1325 tcg_gen_addi_i32(cpu_cc_op, cpu_tmp2_i32, CC_OP_ADDB + ot);
1326 s1->cc_op = CC_OP_DYNAMIC;
1327 break;
1328 case OP_SBBL:
1329 if (s1->cc_op != CC_OP_DYNAMIC)
1330 gen_op_set_cc_op(s1->cc_op);
1331 gen_compute_eflags_c(cpu_tmp4);
1332 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1333 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1334 if (d != OR_TMP0)
1335 gen_op_mov_reg_T0(ot, d);
1336 else
1337 gen_op_st_T0_A0(ot + s1->mem_index);
1338 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
1339 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1340 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp4);
1341 tcg_gen_shli_i32(cpu_tmp2_i32, cpu_tmp2_i32, 2);
1342 tcg_gen_addi_i32(cpu_cc_op, cpu_tmp2_i32, CC_OP_SUBB + ot);
1343 s1->cc_op = CC_OP_DYNAMIC;
1344 break;
1345 case OP_ADDL:
1346 gen_op_addl_T0_T1();
1347 if (d != OR_TMP0)
1348 gen_op_mov_reg_T0(ot, d);
1349 else
1350 gen_op_st_T0_A0(ot + s1->mem_index);
1351 gen_op_update2_cc();
1352 s1->cc_op = CC_OP_ADDB + ot;
1353 break;
1354 case OP_SUBL:
1355 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1356 if (d != OR_TMP0)
1357 gen_op_mov_reg_T0(ot, d);
1358 else
1359 gen_op_st_T0_A0(ot + s1->mem_index);
1360 gen_op_update2_cc();
1361 s1->cc_op = CC_OP_SUBB + ot;
1362 break;
1363 default:
1364 case OP_ANDL:
1365 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1366 if (d != OR_TMP0)
1367 gen_op_mov_reg_T0(ot, d);
1368 else
1369 gen_op_st_T0_A0(ot + s1->mem_index);
1370 gen_op_update1_cc();
1371 s1->cc_op = CC_OP_LOGICB + ot;
1372 break;
1373 case OP_ORL:
1374 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1375 if (d != OR_TMP0)
1376 gen_op_mov_reg_T0(ot, d);
1377 else
1378 gen_op_st_T0_A0(ot + s1->mem_index);
1379 gen_op_update1_cc();
1380 s1->cc_op = CC_OP_LOGICB + ot;
1381 break;
1382 case OP_XORL:
1383 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1384 if (d != OR_TMP0)
1385 gen_op_mov_reg_T0(ot, d);
1386 else
1387 gen_op_st_T0_A0(ot + s1->mem_index);
1388 gen_op_update1_cc();
1389 s1->cc_op = CC_OP_LOGICB + ot;
1390 break;
1391 case OP_CMPL:
1392 gen_op_cmpl_T0_T1_cc();
1393 s1->cc_op = CC_OP_SUBB + ot;
1394 break;
1395 }
1396}
1397
1398/* if d == OR_TMP0, it means memory operand (address in A0) */
1399static void gen_inc(DisasContext *s1, int ot, int d, int c)
1400{
1401 if (d != OR_TMP0)
1402 gen_op_mov_TN_reg(ot, 0, d);
1403 else
1404 gen_op_ld_T0_A0(ot + s1->mem_index);
1405 if (s1->cc_op != CC_OP_DYNAMIC)
1406 gen_op_set_cc_op(s1->cc_op);
1407 if (c > 0) {
1408 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], 1);
1409 s1->cc_op = CC_OP_INCB + ot;
1410 } else {
1411 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], -1);
1412 s1->cc_op = CC_OP_DECB + ot;
1413 }
1414 if (d != OR_TMP0)
1415 gen_op_mov_reg_T0(ot, d);
1416 else
1417 gen_op_st_T0_A0(ot + s1->mem_index);
1418 gen_compute_eflags_c(cpu_cc_src);
1419 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1420}
1421
1422static void gen_shift_rm_T1(DisasContext *s, int ot, int op1,
1423 int is_right, int is_arith)
1424{
1425 target_ulong mask;
1426 int shift_label;
1427 TCGv t0, t1;
1428
1429 if (ot == OT_QUAD)
1430 mask = 0x3f;
1431 else
1432 mask = 0x1f;
1433
1434 /* load */
1435 if (op1 == OR_TMP0)
1436 gen_op_ld_T0_A0(ot + s->mem_index);
1437 else
1438 gen_op_mov_TN_reg(ot, 0, op1);
1439
1440 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], mask);
1441
1442 tcg_gen_addi_tl(cpu_tmp5, cpu_T[1], -1);
1443
1444 if (is_right) {
1445 if (is_arith) {
1446 gen_exts(ot, cpu_T[0]);
1447 tcg_gen_sar_tl(cpu_T3, cpu_T[0], cpu_tmp5);
1448 tcg_gen_sar_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1449 } else {
1450 gen_extu(ot, cpu_T[0]);
1451 tcg_gen_shr_tl(cpu_T3, cpu_T[0], cpu_tmp5);
1452 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1453 }
1454 } else {
1455 tcg_gen_shl_tl(cpu_T3, cpu_T[0], cpu_tmp5);
1456 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1457 }
1458
1459 /* store */
1460 if (op1 == OR_TMP0)
1461 gen_op_st_T0_A0(ot + s->mem_index);
1462 else
1463 gen_op_mov_reg_T0(ot, op1);
1464
1465 /* update eflags if non zero shift */
1466 if (s->cc_op != CC_OP_DYNAMIC)
1467 gen_op_set_cc_op(s->cc_op);
1468
1469 /* XXX: inefficient */
1470 t0 = tcg_temp_local_new(TCG_TYPE_TL);
1471 t1 = tcg_temp_local_new(TCG_TYPE_TL);
1472
1473 tcg_gen_mov_tl(t0, cpu_T[0]);
1474 tcg_gen_mov_tl(t1, cpu_T3);
1475
1476 shift_label = gen_new_label();
1477 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_T[1], 0, shift_label);
1478
1479 tcg_gen_mov_tl(cpu_cc_src, t1);
1480 tcg_gen_mov_tl(cpu_cc_dst, t0);
1481 if (is_right)
1482 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SARB + ot);
1483 else
1484 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SHLB + ot);
1485
1486 gen_set_label(shift_label);
1487 s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
1488
1489 tcg_temp_free(t0);
1490 tcg_temp_free(t1);
1491}
1492
1493static void gen_shift_rm_im(DisasContext *s, int ot, int op1, int op2,
1494 int is_right, int is_arith)
1495{
1496 int mask;
1497
1498 if (ot == OT_QUAD)
1499 mask = 0x3f;
1500 else
1501 mask = 0x1f;
1502
1503 /* load */
1504 if (op1 == OR_TMP0)
1505 gen_op_ld_T0_A0(ot + s->mem_index);
1506 else
1507 gen_op_mov_TN_reg(ot, 0, op1);
1508
1509 op2 &= mask;
1510 if (op2 != 0) {
1511 if (is_right) {
1512 if (is_arith) {
1513 gen_exts(ot, cpu_T[0]);
1514 tcg_gen_sari_tl(cpu_tmp0, cpu_T[0], op2 - 1);
1515 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], op2);
1516 } else {
1517 gen_extu(ot, cpu_T[0]);
1518 tcg_gen_shri_tl(cpu_tmp0, cpu_T[0], op2 - 1);
1519 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], op2);
1520 }
1521 } else {
1522 tcg_gen_shli_tl(cpu_tmp0, cpu_T[0], op2 - 1);
1523 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], op2);
1524 }
1525 }
1526
1527 /* store */
1528 if (op1 == OR_TMP0)
1529 gen_op_st_T0_A0(ot + s->mem_index);
1530 else
1531 gen_op_mov_reg_T0(ot, op1);
1532
1533 /* update eflags if non zero shift */
1534 if (op2 != 0) {
1535 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp0);
1536 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1537 if (is_right)
1538 s->cc_op = CC_OP_SARB + ot;
1539 else
1540 s->cc_op = CC_OP_SHLB + ot;
1541 }
1542}
1543
1544static inline void tcg_gen_lshift(TCGv ret, TCGv arg1, target_long arg2)
1545{
1546 if (arg2 >= 0)
1547 tcg_gen_shli_tl(ret, arg1, arg2);
1548 else
1549 tcg_gen_shri_tl(ret, arg1, -arg2);
1550}
1551
1552/* XXX: add faster immediate case */
1553static void gen_rot_rm_T1(DisasContext *s, int ot, int op1,
1554 int is_right)
1555{
1556 target_ulong mask;
1557 int label1, label2, data_bits;
1558 TCGv t0, t1, t2, a0;
1559
1560 /* XXX: inefficient, but we must use local temps */
1561 t0 = tcg_temp_local_new(TCG_TYPE_TL);
1562 t1 = tcg_temp_local_new(TCG_TYPE_TL);
1563 t2 = tcg_temp_local_new(TCG_TYPE_TL);
1564 a0 = tcg_temp_local_new(TCG_TYPE_TL);
1565
1566 if (ot == OT_QUAD)
1567 mask = 0x3f;
1568 else
1569 mask = 0x1f;
1570
1571 /* load */
1572 if (op1 == OR_TMP0) {
1573 tcg_gen_mov_tl(a0, cpu_A0);
1574 gen_op_ld_v(ot + s->mem_index, t0, a0);
1575 } else {
1576 gen_op_mov_v_reg(ot, t0, op1);
1577 }
1578
1579 tcg_gen_mov_tl(t1, cpu_T[1]);
1580
1581 tcg_gen_andi_tl(t1, t1, mask);
1582
1583 /* Must test zero case to avoid using undefined behaviour in TCG
1584 shifts. */
1585 label1 = gen_new_label();
1586 tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, label1);
1587
1588 if (ot <= OT_WORD)
1589 tcg_gen_andi_tl(cpu_tmp0, t1, (1 << (3 + ot)) - 1);
1590 else
1591 tcg_gen_mov_tl(cpu_tmp0, t1);
1592
1593 gen_extu(ot, t0);
1594 tcg_gen_mov_tl(t2, t0);
1595
1596 data_bits = 8 << ot;
1597 /* XXX: rely on behaviour of shifts when operand 2 overflows (XXX:
1598 fix TCG definition) */
1599 if (is_right) {
1600 tcg_gen_shr_tl(cpu_tmp4, t0, cpu_tmp0);
1601 tcg_gen_sub_tl(cpu_tmp0, tcg_const_tl(data_bits), cpu_tmp0);
1602 tcg_gen_shl_tl(t0, t0, cpu_tmp0);
1603 } else {
1604 tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp0);
1605 tcg_gen_sub_tl(cpu_tmp0, tcg_const_tl(data_bits), cpu_tmp0);
1606 tcg_gen_shr_tl(t0, t0, cpu_tmp0);
1607 }
1608 tcg_gen_or_tl(t0, t0, cpu_tmp4);
1609
1610 gen_set_label(label1);
1611 /* store */
1612 if (op1 == OR_TMP0) {
1613 gen_op_st_v(ot + s->mem_index, t0, a0);
1614 } else {
1615 gen_op_mov_reg_v(ot, op1, t0);
1616 }
1617
1618 /* update eflags */
1619 if (s->cc_op != CC_OP_DYNAMIC)
1620 gen_op_set_cc_op(s->cc_op);
1621
1622 label2 = gen_new_label();
1623 tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, label2);
1624
1625 gen_compute_eflags(cpu_cc_src);
1626 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~(CC_O | CC_C));
1627 tcg_gen_xor_tl(cpu_tmp0, t2, t0);
1628 tcg_gen_lshift(cpu_tmp0, cpu_tmp0, 11 - (data_bits - 1));
1629 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, CC_O);
1630 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_tmp0);
1631 if (is_right) {
1632 tcg_gen_shri_tl(t0, t0, data_bits - 1);
1633 }
1634 tcg_gen_andi_tl(t0, t0, CC_C);
1635 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
1636
1637 tcg_gen_discard_tl(cpu_cc_dst);
1638 tcg_gen_movi_i32(cpu_cc_op, CC_OP_EFLAGS);
1639
1640 gen_set_label(label2);
1641 s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
1642
1643 tcg_temp_free(t0);
1644 tcg_temp_free(t1);
1645 tcg_temp_free(t2);
1646 tcg_temp_free(a0);
1647}
1648
1649static void *helper_rotc[8] = {
1650 helper_rclb,
1651 helper_rclw,
1652 helper_rcll,
1653 X86_64_ONLY(helper_rclq),
1654 helper_rcrb,
1655 helper_rcrw,
1656 helper_rcrl,
1657 X86_64_ONLY(helper_rcrq),
1658};
1659
1660/* XXX: add faster immediate = 1 case */
1661static void gen_rotc_rm_T1(DisasContext *s, int ot, int op1,
1662 int is_right)
1663{
1664 int label1;
1665
1666 if (s->cc_op != CC_OP_DYNAMIC)
1667 gen_op_set_cc_op(s->cc_op);
1668
1669 /* load */
1670 if (op1 == OR_TMP0)
1671 gen_op_ld_T0_A0(ot + s->mem_index);
1672 else
1673 gen_op_mov_TN_reg(ot, 0, op1);
1674
1675 tcg_gen_helper_1_2(helper_rotc[ot + (is_right * 4)],
1676 cpu_T[0], cpu_T[0], cpu_T[1]);
1677 /* store */
1678 if (op1 == OR_TMP0)
1679 gen_op_st_T0_A0(ot + s->mem_index);
1680 else
1681 gen_op_mov_reg_T0(ot, op1);
1682
1683 /* update eflags */
1684 label1 = gen_new_label();
1685 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_cc_tmp, -1, label1);
1686
1687 tcg_gen_mov_tl(cpu_cc_src, cpu_cc_tmp);
1688 tcg_gen_discard_tl(cpu_cc_dst);
1689 tcg_gen_movi_i32(cpu_cc_op, CC_OP_EFLAGS);
1690
1691 gen_set_label(label1);
1692 s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
1693}
1694
1695/* XXX: add faster immediate case */
1696static void gen_shiftd_rm_T1_T3(DisasContext *s, int ot, int op1,
1697 int is_right)
1698{
1699 int label1, label2, data_bits;
1700 target_ulong mask;
1701 TCGv t0, t1, t2, a0;
1702
1703 t0 = tcg_temp_local_new(TCG_TYPE_TL);
1704 t1 = tcg_temp_local_new(TCG_TYPE_TL);
1705 t2 = tcg_temp_local_new(TCG_TYPE_TL);
1706 a0 = tcg_temp_local_new(TCG_TYPE_TL);
1707
1708 if (ot == OT_QUAD)
1709 mask = 0x3f;
1710 else
1711 mask = 0x1f;
1712
1713 /* load */
1714 if (op1 == OR_TMP0) {
1715 tcg_gen_mov_tl(a0, cpu_A0);
1716 gen_op_ld_v(ot + s->mem_index, t0, a0);
1717 } else {
1718 gen_op_mov_v_reg(ot, t0, op1);
1719 }
1720
1721 tcg_gen_andi_tl(cpu_T3, cpu_T3, mask);
1722
1723 tcg_gen_mov_tl(t1, cpu_T[1]);
1724 tcg_gen_mov_tl(t2, cpu_T3);
1725
1726 /* Must test zero case to avoid using undefined behaviour in TCG
1727 shifts. */
1728 label1 = gen_new_label();
1729 tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label1);
1730
1731 tcg_gen_addi_tl(cpu_tmp5, t2, -1);
1732 if (ot == OT_WORD) {
1733 /* Note: we implement the Intel behaviour for shift count > 16 */
1734 if (is_right) {
1735 tcg_gen_andi_tl(t0, t0, 0xffff);
1736 tcg_gen_shli_tl(cpu_tmp0, t1, 16);
1737 tcg_gen_or_tl(t0, t0, cpu_tmp0);
1738 tcg_gen_ext32u_tl(t0, t0);
1739
1740 tcg_gen_shr_tl(cpu_tmp4, t0, cpu_tmp5);
1741
1742 /* only needed if count > 16, but a test would complicate */
1743 tcg_gen_sub_tl(cpu_tmp5, tcg_const_tl(32), t2);
1744 tcg_gen_shl_tl(cpu_tmp0, t0, cpu_tmp5);
1745
1746 tcg_gen_shr_tl(t0, t0, t2);
1747
1748 tcg_gen_or_tl(t0, t0, cpu_tmp0);
1749 } else {
1750 /* XXX: not optimal */
1751 tcg_gen_andi_tl(t0, t0, 0xffff);
1752 tcg_gen_shli_tl(t1, t1, 16);
1753 tcg_gen_or_tl(t1, t1, t0);
1754 tcg_gen_ext32u_tl(t1, t1);
1755
1756 tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp5);
1757 tcg_gen_sub_tl(cpu_tmp0, tcg_const_tl(32), cpu_tmp5);
1758 tcg_gen_shr_tl(cpu_tmp6, t1, cpu_tmp0);
1759 tcg_gen_or_tl(cpu_tmp4, cpu_tmp4, cpu_tmp6);
1760
1761 tcg_gen_shl_tl(t0, t0, t2);
1762 tcg_gen_sub_tl(cpu_tmp5, tcg_const_tl(32), t2);
1763 tcg_gen_shr_tl(t1, t1, cpu_tmp5);
1764 tcg_gen_or_tl(t0, t0, t1);
1765 }
1766 } else {
1767 data_bits = 8 << ot;
1768 if (is_right) {
1769 if (ot == OT_LONG)
1770 tcg_gen_ext32u_tl(t0, t0);
1771
1772 tcg_gen_shr_tl(cpu_tmp4, t0, cpu_tmp5);
1773
1774 tcg_gen_shr_tl(t0, t0, t2);
1775 tcg_gen_sub_tl(cpu_tmp5, tcg_const_tl(data_bits), t2);
1776 tcg_gen_shl_tl(t1, t1, cpu_tmp5);
1777 tcg_gen_or_tl(t0, t0, t1);
1778
1779 } else {
1780 if (ot == OT_LONG)
1781 tcg_gen_ext32u_tl(t1, t1);
1782
1783 tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp5);
1784
1785 tcg_gen_shl_tl(t0, t0, t2);
1786 tcg_gen_sub_tl(cpu_tmp5, tcg_const_tl(data_bits), t2);
1787 tcg_gen_shr_tl(t1, t1, cpu_tmp5);
1788 tcg_gen_or_tl(t0, t0, t1);
1789 }
1790 }
1791 tcg_gen_mov_tl(t1, cpu_tmp4);
1792
1793 gen_set_label(label1);
1794 /* store */
1795 if (op1 == OR_TMP0) {
1796 gen_op_st_v(ot + s->mem_index, t0, a0);
1797 } else {
1798 gen_op_mov_reg_v(ot, op1, t0);
1799 }
1800
1801 /* update eflags */
1802 if (s->cc_op != CC_OP_DYNAMIC)
1803 gen_op_set_cc_op(s->cc_op);
1804
1805 label2 = gen_new_label();
1806 tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label2);
1807
1808 tcg_gen_mov_tl(cpu_cc_src, t1);
1809 tcg_gen_mov_tl(cpu_cc_dst, t0);
1810 if (is_right) {
1811 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SARB + ot);
1812 } else {
1813 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SHLB + ot);
1814 }
1815 gen_set_label(label2);
1816 s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
1817
1818 tcg_temp_free(t0);
1819 tcg_temp_free(t1);
1820 tcg_temp_free(t2);
1821 tcg_temp_free(a0);
1822}
1823
1824static void gen_shift(DisasContext *s1, int op, int ot, int d, int s)
1825{
1826 if (s != OR_TMP1)
1827 gen_op_mov_TN_reg(ot, 1, s);
1828 switch(op) {
1829 case OP_ROL:
1830 gen_rot_rm_T1(s1, ot, d, 0);
1831 break;
1832 case OP_ROR:
1833 gen_rot_rm_T1(s1, ot, d, 1);
1834 break;
1835 case OP_SHL:
1836 case OP_SHL1:
1837 gen_shift_rm_T1(s1, ot, d, 0, 0);
1838 break;
1839 case OP_SHR:
1840 gen_shift_rm_T1(s1, ot, d, 1, 0);
1841 break;
1842 case OP_SAR:
1843 gen_shift_rm_T1(s1, ot, d, 1, 1);
1844 break;
1845 case OP_RCL:
1846 gen_rotc_rm_T1(s1, ot, d, 0);
1847 break;
1848 case OP_RCR:
1849 gen_rotc_rm_T1(s1, ot, d, 1);
1850 break;
1851 }
1852}
1853
1854static void gen_shifti(DisasContext *s1, int op, int ot, int d, int c)
1855{
1856 switch(op) {
1857 case OP_SHL:
1858 case OP_SHL1:
1859 gen_shift_rm_im(s1, ot, d, c, 0, 0);
1860 break;
1861 case OP_SHR:
1862 gen_shift_rm_im(s1, ot, d, c, 1, 0);
1863 break;
1864 case OP_SAR:
1865 gen_shift_rm_im(s1, ot, d, c, 1, 1);
1866 break;
1867 default:
1868 /* currently not optimized */
1869 gen_op_movl_T1_im(c);
1870 gen_shift(s1, op, ot, d, OR_TMP1);
1871 break;
1872 }
1873}
1874
1875static void gen_lea_modrm(DisasContext *s, int modrm, int *reg_ptr, int *offset_ptr)
1876{
1877 target_long disp;
1878 int havesib;
1879 int base;
1880 int index;
1881 int scale;
1882 int opreg;
1883 int mod, rm, code, override, must_add_seg;
1884
1885 override = s->override;
1886 must_add_seg = s->addseg;
1887 if (override >= 0)
1888 must_add_seg = 1;
1889 mod = (modrm >> 6) & 3;
1890 rm = modrm & 7;
1891
1892 if (s->aflag) {
1893
1894 havesib = 0;
1895 base = rm;
1896 index = 0;
1897 scale = 0;
1898
1899 if (base == 4) {
1900 havesib = 1;
1901 code = ldub_code(s->pc++);
1902 scale = (code >> 6) & 3;
1903 index = ((code >> 3) & 7) | REX_X(s);
1904 base = (code & 7);
1905 }
1906 base |= REX_B(s);
1907
1908 switch (mod) {
1909 case 0:
1910 if ((base & 7) == 5) {
1911 base = -1;
1912 disp = (int32_t)ldl_code(s->pc);
1913 s->pc += 4;
1914 if (CODE64(s) && !havesib) {
1915 disp += s->pc + s->rip_offset;
1916 }
1917 } else {
1918 disp = 0;
1919 }
1920 break;
1921 case 1:
1922 disp = (int8_t)ldub_code(s->pc++);
1923 break;
1924 default:
1925 case 2:
1926 disp = ldl_code(s->pc);
1927 s->pc += 4;
1928 break;
1929 }
1930
1931 if (base >= 0) {
1932 /* for correct popl handling with esp */
1933 if (base == 4 && s->popl_esp_hack)
1934 disp += s->popl_esp_hack;
1935#ifdef TARGET_X86_64
1936 if (s->aflag == 2) {
1937 gen_op_movq_A0_reg(base);
1938 if (disp != 0) {
1939 gen_op_addq_A0_im(disp);
1940 }
1941 } else
1942#endif
1943 {
1944 gen_op_movl_A0_reg(base);
1945 if (disp != 0)
1946 gen_op_addl_A0_im(disp);
1947 }
1948 } else {
1949#ifdef TARGET_X86_64
1950 if (s->aflag == 2) {
1951 gen_op_movq_A0_im(disp);
1952 } else
1953#endif
1954 {
1955 gen_op_movl_A0_im(disp);
1956 }
1957 }
1958 /* XXX: index == 4 is always invalid */
1959 if (havesib && (index != 4 || scale != 0)) {
1960#ifdef TARGET_X86_64
1961 if (s->aflag == 2) {
1962 gen_op_addq_A0_reg_sN(scale, index);
1963 } else
1964#endif
1965 {
1966 gen_op_addl_A0_reg_sN(scale, index);
1967 }
1968 }
1969 if (must_add_seg) {
1970 if (override < 0) {
1971 if (base == R_EBP || base == R_ESP)
1972 override = R_SS;
1973 else
1974 override = R_DS;
1975 }
1976#ifdef TARGET_X86_64
1977 if (s->aflag == 2) {
1978 gen_op_addq_A0_seg(override);
1979 } else
1980#endif
1981 {
1982 gen_op_addl_A0_seg(override);
1983 }
1984 }
1985 } else {
1986 switch (mod) {
1987 case 0:
1988 if (rm == 6) {
1989 disp = lduw_code(s->pc);
1990 s->pc += 2;
1991 gen_op_movl_A0_im(disp);
1992 rm = 0; /* avoid SS override */
1993 goto no_rm;
1994 } else {
1995 disp = 0;
1996 }
1997 break;
1998 case 1:
1999 disp = (int8_t)ldub_code(s->pc++);
2000 break;
2001 default:
2002 case 2:
2003 disp = lduw_code(s->pc);
2004 s->pc += 2;
2005 break;
2006 }
2007 switch(rm) {
2008 case 0:
2009 gen_op_movl_A0_reg(R_EBX);
2010 gen_op_addl_A0_reg_sN(0, R_ESI);
2011 break;
2012 case 1:
2013 gen_op_movl_A0_reg(R_EBX);
2014 gen_op_addl_A0_reg_sN(0, R_EDI);
2015 break;
2016 case 2:
2017 gen_op_movl_A0_reg(R_EBP);
2018 gen_op_addl_A0_reg_sN(0, R_ESI);
2019 break;
2020 case 3:
2021 gen_op_movl_A0_reg(R_EBP);
2022 gen_op_addl_A0_reg_sN(0, R_EDI);
2023 break;
2024 case 4:
2025 gen_op_movl_A0_reg(R_ESI);
2026 break;
2027 case 5:
2028 gen_op_movl_A0_reg(R_EDI);
2029 break;
2030 case 6:
2031 gen_op_movl_A0_reg(R_EBP);
2032 break;
2033 default:
2034 case 7:
2035 gen_op_movl_A0_reg(R_EBX);
2036 break;
2037 }
2038 if (disp != 0)
2039 gen_op_addl_A0_im(disp);
2040 gen_op_andl_A0_ffff();
2041 no_rm:
2042 if (must_add_seg) {
2043 if (override < 0) {
2044 if (rm == 2 || rm == 3 || rm == 6)
2045 override = R_SS;
2046 else
2047 override = R_DS;
2048 }
2049 gen_op_addl_A0_seg(override);
2050 }
2051 }
2052
2053 opreg = OR_A0;
2054 disp = 0;
2055 *reg_ptr = opreg;
2056 *offset_ptr = disp;
2057}
2058
2059static void gen_nop_modrm(DisasContext *s, int modrm)
2060{
2061 int mod, rm, base, code;
2062
2063 mod = (modrm >> 6) & 3;
2064 if (mod == 3)
2065 return;
2066 rm = modrm & 7;
2067
2068 if (s->aflag) {
2069
2070 base = rm;
2071
2072 if (base == 4) {
2073 code = ldub_code(s->pc++);
2074 base = (code & 7);
2075 }
2076
2077 switch (mod) {
2078 case 0:
2079 if (base == 5) {
2080 s->pc += 4;
2081 }
2082 break;
2083 case 1:
2084 s->pc++;
2085 break;
2086 default:
2087 case 2:
2088 s->pc += 4;
2089 break;
2090 }
2091 } else {
2092 switch (mod) {
2093 case 0:
2094 if (rm == 6) {
2095 s->pc += 2;
2096 }
2097 break;
2098 case 1:
2099 s->pc++;
2100 break;
2101 default:
2102 case 2:
2103 s->pc += 2;
2104 break;
2105 }
2106 }
2107}
2108
2109/* used for LEA and MOV AX, mem */
2110static void gen_add_A0_ds_seg(DisasContext *s)
2111{
2112 int override, must_add_seg;
2113 must_add_seg = s->addseg;
2114 override = R_DS;
2115 if (s->override >= 0) {
2116 override = s->override;
2117 must_add_seg = 1;
2118 } else {
2119 override = R_DS;
2120 }
2121 if (must_add_seg) {
2122#ifdef TARGET_X86_64
2123 if (CODE64(s)) {
2124 gen_op_addq_A0_seg(override);
2125 } else
2126#endif
2127 {
2128 gen_op_addl_A0_seg(override);
2129 }
2130 }
2131}
2132
2133/* generate modrm memory load or store of 'reg'. TMP0 is used if reg !=
2134 OR_TMP0 */
2135static void gen_ldst_modrm(DisasContext *s, int modrm, int ot, int reg, int is_store)
2136{
2137 int mod, rm, opreg, disp;
2138
2139 mod = (modrm >> 6) & 3;
2140 rm = (modrm & 7) | REX_B(s);
2141 if (mod == 3) {
2142 if (is_store) {
2143 if (reg != OR_TMP0)
2144 gen_op_mov_TN_reg(ot, 0, reg);
2145 gen_op_mov_reg_T0(ot, rm);
2146 } else {
2147 gen_op_mov_TN_reg(ot, 0, rm);
2148 if (reg != OR_TMP0)
2149 gen_op_mov_reg_T0(ot, reg);
2150 }
2151 } else {
2152 gen_lea_modrm(s, modrm, &opreg, &disp);
2153 if (is_store) {
2154 if (reg != OR_TMP0)
2155 gen_op_mov_TN_reg(ot, 0, reg);
2156 gen_op_st_T0_A0(ot + s->mem_index);
2157 } else {
2158 gen_op_ld_T0_A0(ot + s->mem_index);
2159 if (reg != OR_TMP0)
2160 gen_op_mov_reg_T0(ot, reg);
2161 }
2162 }
2163}
2164
2165static inline uint32_t insn_get(DisasContext *s, int ot)
2166{
2167 uint32_t ret;
2168
2169 switch(ot) {
2170 case OT_BYTE:
2171 ret = ldub_code(s->pc);
2172 s->pc++;
2173 break;
2174 case OT_WORD:
2175 ret = lduw_code(s->pc);
2176 s->pc += 2;
2177 break;
2178 default:
2179 case OT_LONG:
2180 ret = ldl_code(s->pc);
2181 s->pc += 4;
2182 break;
2183 }
2184 return ret;
2185}
2186
2187static inline int insn_const_size(unsigned int ot)
2188{
2189 if (ot <= OT_LONG)
2190 return 1 << ot;
2191 else
2192 return 4;
2193}
2194
2195static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong eip)
2196{
2197 TranslationBlock *tb;
2198 target_ulong pc;
2199
2200 pc = s->cs_base + eip;
2201 tb = s->tb;
2202 /* NOTE: we handle the case where the TB spans two pages here */
2203 if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) ||
2204 (pc & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK)) {
2205 /* jump to same page: we can use a direct jump */
2206 tcg_gen_goto_tb(tb_num);
2207 gen_jmp_im(eip);
2208 tcg_gen_exit_tb((long)tb + tb_num);
2209 } else {
2210 /* jump to another page: currently not optimized */
2211 gen_jmp_im(eip);
2212 gen_eob(s);
2213 }
2214}
2215
2216static inline void gen_jcc(DisasContext *s, int b,
2217 target_ulong val, target_ulong next_eip)
2218{
2219 int l1, l2, cc_op;
2220
2221 cc_op = s->cc_op;
2222 if (s->cc_op != CC_OP_DYNAMIC) {
2223 gen_op_set_cc_op(s->cc_op);
2224 s->cc_op = CC_OP_DYNAMIC;
2225 }
2226 if (s->jmp_opt) {
2227 l1 = gen_new_label();
2228 gen_jcc1(s, cc_op, b, l1);
2229
2230 gen_goto_tb(s, 0, next_eip);
2231
2232 gen_set_label(l1);
2233 gen_goto_tb(s, 1, val);
2234 s->is_jmp = 3;
2235 } else {
2236
2237 l1 = gen_new_label();
2238 l2 = gen_new_label();
2239 gen_jcc1(s, cc_op, b, l1);
2240
2241 gen_jmp_im(next_eip);
2242 tcg_gen_br(l2);
2243
2244 gen_set_label(l1);
2245 gen_jmp_im(val);
2246 gen_set_label(l2);
2247 gen_eob(s);
2248 }
2249}
2250
2251static void gen_setcc(DisasContext *s, int b)
2252{
2253 int inv, jcc_op, l1;
2254 TCGv t0;
2255
2256 if (is_fast_jcc_case(s, b)) {
2257 /* nominal case: we use a jump */
2258 /* XXX: make it faster by adding new instructions in TCG */
2259 t0 = tcg_temp_local_new(TCG_TYPE_TL);
2260 tcg_gen_movi_tl(t0, 0);
2261 l1 = gen_new_label();
2262 gen_jcc1(s, s->cc_op, b ^ 1, l1);
2263 tcg_gen_movi_tl(t0, 1);
2264 gen_set_label(l1);
2265 tcg_gen_mov_tl(cpu_T[0], t0);
2266 tcg_temp_free(t0);
2267 } else {
2268 /* slow case: it is more efficient not to generate a jump,
2269 although it is questionnable whether this optimization is
2270 worth to */
2271 inv = b & 1;
2272 jcc_op = (b >> 1) & 7;
2273 gen_setcc_slow_T0(s, jcc_op);
2274 if (inv) {
2275 tcg_gen_xori_tl(cpu_T[0], cpu_T[0], 1);
2276 }
2277 }
2278}
2279
2280static inline void gen_op_movl_T0_seg(int seg_reg)
2281{
2282 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
2283 offsetof(CPUX86State,segs[seg_reg].selector));
2284}
2285
2286static inline void gen_op_movl_seg_T0_vm(int seg_reg)
2287{
2288 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
2289 tcg_gen_st32_tl(cpu_T[0], cpu_env,
2290 offsetof(CPUX86State,segs[seg_reg].selector));
2291 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], 4);
2292 tcg_gen_st_tl(cpu_T[0], cpu_env,
2293 offsetof(CPUX86State,segs[seg_reg].base));
2294}
2295
2296/* move T0 to seg_reg and compute if the CPU state may change. Never
2297 call this function with seg_reg == R_CS */
2298static void gen_movl_seg_T0(DisasContext *s, int seg_reg, target_ulong cur_eip)
2299{
2300 if (s->pe && !s->vm86) {
2301 /* XXX: optimize by finding processor state dynamically */
2302 if (s->cc_op != CC_OP_DYNAMIC)
2303 gen_op_set_cc_op(s->cc_op);
2304 gen_jmp_im(cur_eip);
2305 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2306 tcg_gen_helper_0_2(helper_load_seg, tcg_const_i32(seg_reg), cpu_tmp2_i32);
2307 /* abort translation because the addseg value may change or
2308 because ss32 may change. For R_SS, translation must always
2309 stop as a special handling must be done to disable hardware
2310 interrupts for the next instruction */
2311 if (seg_reg == R_SS || (s->code32 && seg_reg < R_FS))
2312 s->is_jmp = 3;
2313 } else {
2314 gen_op_movl_seg_T0_vm(seg_reg);
2315 if (seg_reg == R_SS)
2316 s->is_jmp = 3;
2317 }
2318}
2319
2320static inline int svm_is_rep(int prefixes)
2321{
2322 return ((prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) ? 8 : 0);
2323}
2324
2325static inline int
2326gen_svm_check_intercept_param(DisasContext *s, target_ulong pc_start,
2327 uint32_t type, uint64_t param)
2328{
2329 if(!(s->flags & (INTERCEPT_SVM_MASK)))
2330 /* no SVM activated */
2331 return 0;
2332 switch(type) {
2333 /* CRx and DRx reads/writes */
2334 case SVM_EXIT_READ_CR0 ... SVM_EXIT_EXCP_BASE - 1:
2335 if (s->cc_op != CC_OP_DYNAMIC) {
2336 gen_op_set_cc_op(s->cc_op);
2337 }
2338 gen_jmp_im(pc_start - s->cs_base);
2339 tcg_gen_helper_0_2(helper_svm_check_intercept_param,
2340 tcg_const_i32(type), tcg_const_i64(param));
2341 /* this is a special case as we do not know if the interception occurs
2342 so we assume there was none */
2343 return 0;
2344 case SVM_EXIT_MSR:
2345 if(s->flags & (1ULL << INTERCEPT_MSR_PROT)) {
2346 if (s->cc_op != CC_OP_DYNAMIC) {
2347 gen_op_set_cc_op(s->cc_op);
2348 }
2349 gen_jmp_im(pc_start - s->cs_base);
2350 tcg_gen_helper_0_2(helper_svm_check_intercept_param,
2351 tcg_const_i32(type), tcg_const_i64(param));
2352 /* this is a special case as we do not know if the interception occurs
2353 so we assume there was none */
2354 return 0;
2355 }
2356 break;
2357 default:
2358 if(s->flags & (1ULL << ((type - SVM_EXIT_INTR) + INTERCEPT_INTR))) {
2359 if (s->cc_op != CC_OP_DYNAMIC) {
2360 gen_op_set_cc_op(s->cc_op);
2361 }
2362 gen_jmp_im(pc_start - s->cs_base);
2363 tcg_gen_helper_0_2(helper_vmexit,
2364 tcg_const_i32(type), tcg_const_i64(param));
2365 /* we can optimize this one so TBs don't get longer
2366 than up to vmexit */
2367 gen_eob(s);
2368 return 1;
2369 }
2370 }
2371 return 0;
2372}
2373
2374static inline int
2375gen_svm_check_intercept(DisasContext *s, target_ulong pc_start, uint64_t type)
2376{
2377 return gen_svm_check_intercept_param(s, pc_start, type, 0);
2378}
2379
2380static inline void gen_stack_update(DisasContext *s, int addend)
2381{
2382#ifdef TARGET_X86_64
2383 if (CODE64(s)) {
2384 gen_op_add_reg_im(2, R_ESP, addend);
2385 } else
2386#endif
2387 if (s->ss32) {
2388 gen_op_add_reg_im(1, R_ESP, addend);
2389 } else {
2390 gen_op_add_reg_im(0, R_ESP, addend);
2391 }
2392}
2393
2394/* generate a push. It depends on ss32, addseg and dflag */
2395static void gen_push_T0(DisasContext *s)
2396{
2397#ifdef TARGET_X86_64
2398 if (CODE64(s)) {
2399 gen_op_movq_A0_reg(R_ESP);
2400 if (s->dflag) {
2401 gen_op_addq_A0_im(-8);
2402 gen_op_st_T0_A0(OT_QUAD + s->mem_index);
2403 } else {
2404 gen_op_addq_A0_im(-2);
2405 gen_op_st_T0_A0(OT_WORD + s->mem_index);
2406 }
2407 gen_op_mov_reg_A0(2, R_ESP);
2408 } else
2409#endif
2410 {
2411 gen_op_movl_A0_reg(R_ESP);
2412 if (!s->dflag)
2413 gen_op_addl_A0_im(-2);
2414 else
2415 gen_op_addl_A0_im(-4);
2416 if (s->ss32) {
2417 if (s->addseg) {
2418 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2419 gen_op_addl_A0_seg(R_SS);
2420 }
2421 } else {
2422 gen_op_andl_A0_ffff();
2423 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2424 gen_op_addl_A0_seg(R_SS);
2425 }
2426 gen_op_st_T0_A0(s->dflag + 1 + s->mem_index);
2427 if (s->ss32 && !s->addseg)
2428 gen_op_mov_reg_A0(1, R_ESP);
2429 else
2430 gen_op_mov_reg_T1(s->ss32 + 1, R_ESP);
2431 }
2432}
2433
2434/* generate a push. It depends on ss32, addseg and dflag */
2435/* slower version for T1, only used for call Ev */
2436static void gen_push_T1(DisasContext *s)
2437{
2438#ifdef TARGET_X86_64
2439 if (CODE64(s)) {
2440 gen_op_movq_A0_reg(R_ESP);
2441 if (s->dflag) {
2442 gen_op_addq_A0_im(-8);
2443 gen_op_st_T1_A0(OT_QUAD + s->mem_index);
2444 } else {
2445 gen_op_addq_A0_im(-2);
2446 gen_op_st_T0_A0(OT_WORD + s->mem_index);
2447 }
2448 gen_op_mov_reg_A0(2, R_ESP);
2449 } else
2450#endif
2451 {
2452 gen_op_movl_A0_reg(R_ESP);
2453 if (!s->dflag)
2454 gen_op_addl_A0_im(-2);
2455 else
2456 gen_op_addl_A0_im(-4);
2457 if (s->ss32) {
2458 if (s->addseg) {
2459 gen_op_addl_A0_seg(R_SS);
2460 }
2461 } else {
2462 gen_op_andl_A0_ffff();
2463 gen_op_addl_A0_seg(R_SS);
2464 }
2465 gen_op_st_T1_A0(s->dflag + 1 + s->mem_index);
2466
2467 if (s->ss32 && !s->addseg)
2468 gen_op_mov_reg_A0(1, R_ESP);
2469 else
2470 gen_stack_update(s, (-2) << s->dflag);
2471 }
2472}
2473
2474/* two step pop is necessary for precise exceptions */
2475static void gen_pop_T0(DisasContext *s)
2476{
2477#ifdef TARGET_X86_64
2478 if (CODE64(s)) {
2479 gen_op_movq_A0_reg(R_ESP);
2480 gen_op_ld_T0_A0((s->dflag ? OT_QUAD : OT_WORD) + s->mem_index);
2481 } else
2482#endif
2483 {
2484 gen_op_movl_A0_reg(R_ESP);
2485 if (s->ss32) {
2486 if (s->addseg)
2487 gen_op_addl_A0_seg(R_SS);
2488 } else {
2489 gen_op_andl_A0_ffff();
2490 gen_op_addl_A0_seg(R_SS);
2491 }
2492 gen_op_ld_T0_A0(s->dflag + 1 + s->mem_index);
2493 }
2494}
2495
2496static void gen_pop_update(DisasContext *s)
2497{
2498#ifdef TARGET_X86_64
2499 if (CODE64(s) && s->dflag) {
2500 gen_stack_update(s, 8);
2501 } else
2502#endif
2503 {
2504 gen_stack_update(s, 2 << s->dflag);
2505 }
2506}
2507
2508static void gen_stack_A0(DisasContext *s)
2509{
2510 gen_op_movl_A0_reg(R_ESP);
2511 if (!s->ss32)
2512 gen_op_andl_A0_ffff();
2513 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2514 if (s->addseg)
2515 gen_op_addl_A0_seg(R_SS);
2516}
2517
2518/* NOTE: wrap around in 16 bit not fully handled */
2519static void gen_pusha(DisasContext *s)
2520{
2521 int i;
2522 gen_op_movl_A0_reg(R_ESP);
2523 gen_op_addl_A0_im(-16 << s->dflag);
2524 if (!s->ss32)
2525 gen_op_andl_A0_ffff();
2526 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2527 if (s->addseg)
2528 gen_op_addl_A0_seg(R_SS);
2529 for(i = 0;i < 8; i++) {
2530 gen_op_mov_TN_reg(OT_LONG, 0, 7 - i);
2531 gen_op_st_T0_A0(OT_WORD + s->dflag + s->mem_index);
2532 gen_op_addl_A0_im(2 << s->dflag);
2533 }
2534 gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2535}
2536
2537/* NOTE: wrap around in 16 bit not fully handled */
2538static void gen_popa(DisasContext *s)
2539{
2540 int i;
2541 gen_op_movl_A0_reg(R_ESP);
2542 if (!s->ss32)
2543 gen_op_andl_A0_ffff();
2544 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2545 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], 16 << s->dflag);
2546 if (s->addseg)
2547 gen_op_addl_A0_seg(R_SS);
2548 for(i = 0;i < 8; i++) {
2549 /* ESP is not reloaded */
2550 if (i != 3) {
2551 gen_op_ld_T0_A0(OT_WORD + s->dflag + s->mem_index);
2552 gen_op_mov_reg_T0(OT_WORD + s->dflag, 7 - i);
2553 }
2554 gen_op_addl_A0_im(2 << s->dflag);
2555 }
2556 gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2557}
2558
2559static void gen_enter(DisasContext *s, int esp_addend, int level)
2560{
2561 int ot, opsize;
2562
2563 level &= 0x1f;
2564#ifdef TARGET_X86_64
2565 if (CODE64(s)) {
2566 ot = s->dflag ? OT_QUAD : OT_WORD;
2567 opsize = 1 << ot;
2568
2569 gen_op_movl_A0_reg(R_ESP);
2570 gen_op_addq_A0_im(-opsize);
2571 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2572
2573 /* push bp */
2574 gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
2575 gen_op_st_T0_A0(ot + s->mem_index);
2576 if (level) {
2577 /* XXX: must save state */
2578 tcg_gen_helper_0_3(helper_enter64_level,
2579 tcg_const_i32(level),
2580 tcg_const_i32((ot == OT_QUAD)),
2581 cpu_T[1]);
2582 }
2583 gen_op_mov_reg_T1(ot, R_EBP);
2584 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
2585 gen_op_mov_reg_T1(OT_QUAD, R_ESP);
2586 } else
2587#endif
2588 {
2589 ot = s->dflag + OT_WORD;
2590 opsize = 2 << s->dflag;
2591
2592 gen_op_movl_A0_reg(R_ESP);
2593 gen_op_addl_A0_im(-opsize);
2594 if (!s->ss32)
2595 gen_op_andl_A0_ffff();
2596 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2597 if (s->addseg)
2598 gen_op_addl_A0_seg(R_SS);
2599 /* push bp */
2600 gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
2601 gen_op_st_T0_A0(ot + s->mem_index);
2602 if (level) {
2603 /* XXX: must save state */
2604 tcg_gen_helper_0_3(helper_enter_level,
2605 tcg_const_i32(level),
2606 tcg_const_i32(s->dflag),
2607 cpu_T[1]);
2608 }
2609 gen_op_mov_reg_T1(ot, R_EBP);
2610 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
2611 gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2612 }
2613}
2614
2615static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip)
2616{
2617 if (s->cc_op != CC_OP_DYNAMIC)
2618 gen_op_set_cc_op(s->cc_op);
2619 gen_jmp_im(cur_eip);
2620 tcg_gen_helper_0_1(helper_raise_exception, tcg_const_i32(trapno));
2621 s->is_jmp = 3;
2622}
2623
2624/* an interrupt is different from an exception because of the
2625 privilege checks */
2626static void gen_interrupt(DisasContext *s, int intno,
2627 target_ulong cur_eip, target_ulong next_eip)
2628{
2629 if (s->cc_op != CC_OP_DYNAMIC)
2630 gen_op_set_cc_op(s->cc_op);
2631 gen_jmp_im(cur_eip);
2632 tcg_gen_helper_0_2(helper_raise_interrupt,
2633 tcg_const_i32(intno),
2634 tcg_const_i32(next_eip - cur_eip));
2635 s->is_jmp = 3;
2636}
2637
2638static void gen_debug(DisasContext *s, target_ulong cur_eip)
2639{
2640 if (s->cc_op != CC_OP_DYNAMIC)
2641 gen_op_set_cc_op(s->cc_op);
2642 gen_jmp_im(cur_eip);
2643 tcg_gen_helper_0_0(helper_debug);
2644 s->is_jmp = 3;
2645}
2646
2647/* generate a generic end of block. Trace exception is also generated
2648 if needed */
2649static void gen_eob(DisasContext *s)
2650{
2651 if (s->cc_op != CC_OP_DYNAMIC)
2652 gen_op_set_cc_op(s->cc_op);
2653 if (s->tb->flags & HF_INHIBIT_IRQ_MASK) {
2654 tcg_gen_helper_0_0(helper_reset_inhibit_irq);
2655 }
2656 if (s->singlestep_enabled) {
2657 tcg_gen_helper_0_0(helper_debug);
2658 } else if (s->tf) {
2659 tcg_gen_helper_0_0(helper_single_step);
2660 } else {
2661 tcg_gen_exit_tb(0);
2662 }
2663 s->is_jmp = 3;
2664}
2665
2666/* generate a jump to eip. No segment change must happen before as a
2667 direct call to the next block may occur */
2668static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num)
2669{
2670 if (s->jmp_opt) {
2671 if (s->cc_op != CC_OP_DYNAMIC) {
2672 gen_op_set_cc_op(s->cc_op);
2673 s->cc_op = CC_OP_DYNAMIC;
2674 }
2675 gen_goto_tb(s, tb_num, eip);
2676 s->is_jmp = 3;
2677 } else {
2678 gen_jmp_im(eip);
2679 gen_eob(s);
2680 }
2681}
2682
2683static void gen_jmp(DisasContext *s, target_ulong eip)
2684{
2685 gen_jmp_tb(s, eip, 0);
2686}
2687
2688static inline void gen_ldq_env_A0(int idx, int offset)
2689{
2690 int mem_index = (idx >> 2) - 1;
2691 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, mem_index);
2692 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset);
2693}
2694
2695static inline void gen_stq_env_A0(int idx, int offset)
2696{
2697 int mem_index = (idx >> 2) - 1;
2698 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset);
2699 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, mem_index);
2700}
2701
2702static inline void gen_ldo_env_A0(int idx, int offset)
2703{
2704 int mem_index = (idx >> 2) - 1;
2705 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, mem_index);
2706 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
2707 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
2708 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_tmp0, mem_index);
2709 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
2710}
2711
2712static inline void gen_sto_env_A0(int idx, int offset)
2713{
2714 int mem_index = (idx >> 2) - 1;
2715 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
2716 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, mem_index);
2717 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
2718 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
2719 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_tmp0, mem_index);
2720}
2721
2722static inline void gen_op_movo(int d_offset, int s_offset)
2723{
2724 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2725 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2726 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + 8);
2727 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + 8);
2728}
2729
2730static inline void gen_op_movq(int d_offset, int s_offset)
2731{
2732 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2733 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2734}
2735
2736static inline void gen_op_movl(int d_offset, int s_offset)
2737{
2738 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env, s_offset);
2739 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, d_offset);
2740}
2741
2742static inline void gen_op_movq_env_0(int d_offset)
2743{
2744 tcg_gen_movi_i64(cpu_tmp1_i64, 0);
2745 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2746}
2747
2748#define SSE_SPECIAL ((void *)1)
2749#define SSE_DUMMY ((void *)2)
2750
2751#define MMX_OP2(x) { helper_ ## x ## _mmx, helper_ ## x ## _xmm }
2752#define SSE_FOP(x) { helper_ ## x ## ps, helper_ ## x ## pd, \
2753 helper_ ## x ## ss, helper_ ## x ## sd, }
2754
2755static void *sse_op_table1[256][4] = {
2756 /* 3DNow! extensions */
2757 [0x0e] = { SSE_DUMMY }, /* femms */
2758 [0x0f] = { SSE_DUMMY }, /* pf... */
2759 /* pure SSE operations */
2760 [0x10] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2761 [0x11] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2762 [0x12] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd, movsldup, movddup */
2763 [0x13] = { SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd */
2764 [0x14] = { helper_punpckldq_xmm, helper_punpcklqdq_xmm },
2765 [0x15] = { helper_punpckhdq_xmm, helper_punpckhqdq_xmm },
2766 [0x16] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd, movshdup */
2767 [0x17] = { SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd */
2768
2769 [0x28] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2770 [0x29] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2771 [0x2a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
2772 [0x2b] = { SSE_SPECIAL, SSE_SPECIAL }, /* movntps, movntpd */
2773 [0x2c] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
2774 [0x2d] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
2775 [0x2e] = { helper_ucomiss, helper_ucomisd },
2776 [0x2f] = { helper_comiss, helper_comisd },
2777 [0x50] = { SSE_SPECIAL, SSE_SPECIAL }, /* movmskps, movmskpd */
2778 [0x51] = SSE_FOP(sqrt),
2779 [0x52] = { helper_rsqrtps, NULL, helper_rsqrtss, NULL },
2780 [0x53] = { helper_rcpps, NULL, helper_rcpss, NULL },
2781 [0x54] = { helper_pand_xmm, helper_pand_xmm }, /* andps, andpd */
2782 [0x55] = { helper_pandn_xmm, helper_pandn_xmm }, /* andnps, andnpd */
2783 [0x56] = { helper_por_xmm, helper_por_xmm }, /* orps, orpd */
2784 [0x57] = { helper_pxor_xmm, helper_pxor_xmm }, /* xorps, xorpd */
2785 [0x58] = SSE_FOP(add),
2786 [0x59] = SSE_FOP(mul),
2787 [0x5a] = { helper_cvtps2pd, helper_cvtpd2ps,
2788 helper_cvtss2sd, helper_cvtsd2ss },
2789 [0x5b] = { helper_cvtdq2ps, helper_cvtps2dq, helper_cvttps2dq },
2790 [0x5c] = SSE_FOP(sub),
2791 [0x5d] = SSE_FOP(min),
2792 [0x5e] = SSE_FOP(div),
2793 [0x5f] = SSE_FOP(max),
2794
2795 [0xc2] = SSE_FOP(cmpeq),
2796 [0xc6] = { helper_shufps, helper_shufpd },
2797
2798 /* MMX ops and their SSE extensions */
2799 [0x60] = MMX_OP2(punpcklbw),
2800 [0x61] = MMX_OP2(punpcklwd),
2801 [0x62] = MMX_OP2(punpckldq),
2802 [0x63] = MMX_OP2(packsswb),
2803 [0x64] = MMX_OP2(pcmpgtb),
2804 [0x65] = MMX_OP2(pcmpgtw),
2805 [0x66] = MMX_OP2(pcmpgtl),
2806 [0x67] = MMX_OP2(packuswb),
2807 [0x68] = MMX_OP2(punpckhbw),
2808 [0x69] = MMX_OP2(punpckhwd),
2809 [0x6a] = MMX_OP2(punpckhdq),
2810 [0x6b] = MMX_OP2(packssdw),
2811 [0x6c] = { NULL, helper_punpcklqdq_xmm },
2812 [0x6d] = { NULL, helper_punpckhqdq_xmm },
2813 [0x6e] = { SSE_SPECIAL, SSE_SPECIAL }, /* movd mm, ea */
2814 [0x6f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, , movqdu */
2815 [0x70] = { helper_pshufw_mmx,
2816 helper_pshufd_xmm,
2817 helper_pshufhw_xmm,
2818 helper_pshuflw_xmm },
2819 [0x71] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftw */
2820 [0x72] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftd */
2821 [0x73] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftq */
2822 [0x74] = MMX_OP2(pcmpeqb),
2823 [0x75] = MMX_OP2(pcmpeqw),
2824 [0x76] = MMX_OP2(pcmpeql),
2825 [0x77] = { SSE_DUMMY }, /* emms */
2826 [0x7c] = { NULL, helper_haddpd, NULL, helper_haddps },
2827 [0x7d] = { NULL, helper_hsubpd, NULL, helper_hsubps },
2828 [0x7e] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movd, movd, , movq */
2829 [0x7f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, movdqu */
2830 [0xc4] = { SSE_SPECIAL, SSE_SPECIAL }, /* pinsrw */
2831 [0xc5] = { SSE_SPECIAL, SSE_SPECIAL }, /* pextrw */
2832 [0xd0] = { NULL, helper_addsubpd, NULL, helper_addsubps },
2833 [0xd1] = MMX_OP2(psrlw),
2834 [0xd2] = MMX_OP2(psrld),
2835 [0xd3] = MMX_OP2(psrlq),
2836 [0xd4] = MMX_OP2(paddq),
2837 [0xd5] = MMX_OP2(pmullw),
2838 [0xd6] = { NULL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2839 [0xd7] = { SSE_SPECIAL, SSE_SPECIAL }, /* pmovmskb */
2840 [0xd8] = MMX_OP2(psubusb),
2841 [0xd9] = MMX_OP2(psubusw),
2842 [0xda] = MMX_OP2(pminub),
2843 [0xdb] = MMX_OP2(pand),
2844 [0xdc] = MMX_OP2(paddusb),
2845 [0xdd] = MMX_OP2(paddusw),
2846 [0xde] = MMX_OP2(pmaxub),
2847 [0xdf] = MMX_OP2(pandn),
2848 [0xe0] = MMX_OP2(pavgb),
2849 [0xe1] = MMX_OP2(psraw),
2850 [0xe2] = MMX_OP2(psrad),
2851 [0xe3] = MMX_OP2(pavgw),
2852 [0xe4] = MMX_OP2(pmulhuw),
2853 [0xe5] = MMX_OP2(pmulhw),
2854 [0xe6] = { NULL, helper_cvttpd2dq, helper_cvtdq2pd, helper_cvtpd2dq },
2855 [0xe7] = { SSE_SPECIAL , SSE_SPECIAL }, /* movntq, movntq */
2856 [0xe8] = MMX_OP2(psubsb),
2857 [0xe9] = MMX_OP2(psubsw),
2858 [0xea] = MMX_OP2(pminsw),
2859 [0xeb] = MMX_OP2(por),
2860 [0xec] = MMX_OP2(paddsb),
2861 [0xed] = MMX_OP2(paddsw),
2862 [0xee] = MMX_OP2(pmaxsw),
2863 [0xef] = MMX_OP2(pxor),
2864 [0xf0] = { NULL, NULL, NULL, SSE_SPECIAL }, /* lddqu */
2865 [0xf1] = MMX_OP2(psllw),
2866 [0xf2] = MMX_OP2(pslld),
2867 [0xf3] = MMX_OP2(psllq),
2868 [0xf4] = MMX_OP2(pmuludq),
2869 [0xf5] = MMX_OP2(pmaddwd),
2870 [0xf6] = MMX_OP2(psadbw),
2871 [0xf7] = MMX_OP2(maskmov),
2872 [0xf8] = MMX_OP2(psubb),
2873 [0xf9] = MMX_OP2(psubw),
2874 [0xfa] = MMX_OP2(psubl),
2875 [0xfb] = MMX_OP2(psubq),
2876 [0xfc] = MMX_OP2(paddb),
2877 [0xfd] = MMX_OP2(paddw),
2878 [0xfe] = MMX_OP2(paddl),
2879};
2880
2881static void *sse_op_table2[3 * 8][2] = {
2882 [0 + 2] = MMX_OP2(psrlw),
2883 [0 + 4] = MMX_OP2(psraw),
2884 [0 + 6] = MMX_OP2(psllw),
2885 [8 + 2] = MMX_OP2(psrld),
2886 [8 + 4] = MMX_OP2(psrad),
2887 [8 + 6] = MMX_OP2(pslld),
2888 [16 + 2] = MMX_OP2(psrlq),
2889 [16 + 3] = { NULL, helper_psrldq_xmm },
2890 [16 + 6] = MMX_OP2(psllq),
2891 [16 + 7] = { NULL, helper_pslldq_xmm },
2892};
2893
2894static void *sse_op_table3[4 * 3] = {
2895 helper_cvtsi2ss,
2896 helper_cvtsi2sd,
2897 X86_64_ONLY(helper_cvtsq2ss),
2898 X86_64_ONLY(helper_cvtsq2sd),
2899
2900 helper_cvttss2si,
2901 helper_cvttsd2si,
2902 X86_64_ONLY(helper_cvttss2sq),
2903 X86_64_ONLY(helper_cvttsd2sq),
2904
2905 helper_cvtss2si,
2906 helper_cvtsd2si,
2907 X86_64_ONLY(helper_cvtss2sq),
2908 X86_64_ONLY(helper_cvtsd2sq),
2909};
2910
2911static void *sse_op_table4[8][4] = {
2912 SSE_FOP(cmpeq),
2913 SSE_FOP(cmplt),
2914 SSE_FOP(cmple),
2915 SSE_FOP(cmpunord),
2916 SSE_FOP(cmpneq),
2917 SSE_FOP(cmpnlt),
2918 SSE_FOP(cmpnle),
2919 SSE_FOP(cmpord),
2920};
2921
2922static void *sse_op_table5[256] = {
2923 [0x0c] = helper_pi2fw,
2924 [0x0d] = helper_pi2fd,
2925 [0x1c] = helper_pf2iw,
2926 [0x1d] = helper_pf2id,
2927 [0x8a] = helper_pfnacc,
2928 [0x8e] = helper_pfpnacc,
2929 [0x90] = helper_pfcmpge,
2930 [0x94] = helper_pfmin,
2931 [0x96] = helper_pfrcp,
2932 [0x97] = helper_pfrsqrt,
2933 [0x9a] = helper_pfsub,
2934 [0x9e] = helper_pfadd,
2935 [0xa0] = helper_pfcmpgt,
2936 [0xa4] = helper_pfmax,
2937 [0xa6] = helper_movq, /* pfrcpit1; no need to actually increase precision */
2938 [0xa7] = helper_movq, /* pfrsqit1 */
2939 [0xaa] = helper_pfsubr,
2940 [0xae] = helper_pfacc,
2941 [0xb0] = helper_pfcmpeq,
2942 [0xb4] = helper_pfmul,
2943 [0xb6] = helper_movq, /* pfrcpit2 */
2944 [0xb7] = helper_pmulhrw_mmx,
2945 [0xbb] = helper_pswapd,
2946 [0xbf] = helper_pavgb_mmx /* pavgusb */
2947};
2948
2949static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
2950{
2951 int b1, op1_offset, op2_offset, is_xmm, val, ot;
2952 int modrm, mod, rm, reg, reg_addr, offset_addr;
2953 void *sse_op2;
2954
2955 b &= 0xff;
2956 if (s->prefix & PREFIX_DATA)
2957 b1 = 1;
2958 else if (s->prefix & PREFIX_REPZ)
2959 b1 = 2;
2960 else if (s->prefix & PREFIX_REPNZ)
2961 b1 = 3;
2962 else
2963 b1 = 0;
2964 sse_op2 = sse_op_table1[b][b1];
2965 if (!sse_op2)
2966 goto illegal_op;
2967 if ((b <= 0x5f && b >= 0x10) || b == 0xc6 || b == 0xc2) {
2968 is_xmm = 1;
2969 } else {
2970 if (b1 == 0) {
2971 /* MMX case */
2972 is_xmm = 0;
2973 } else {
2974 is_xmm = 1;
2975 }
2976 }
2977 /* simple MMX/SSE operation */
2978 if (s->flags & HF_TS_MASK) {
2979 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
2980 return;
2981 }
2982 if (s->flags & HF_EM_MASK) {
2983 illegal_op:
2984 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
2985 return;
2986 }
2987 if (is_xmm && !(s->flags & HF_OSFXSR_MASK))
2988 goto illegal_op;
2989 if (b == 0x0e) {
2990 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
2991 goto illegal_op;
2992 /* femms */
2993 tcg_gen_helper_0_0(helper_emms);
2994 return;
2995 }
2996 if (b == 0x77) {
2997 /* emms */
2998 tcg_gen_helper_0_0(helper_emms);
2999 return;
3000 }
3001 /* prepare MMX state (XXX: optimize by storing fptt and fptags in
3002 the static cpu state) */
3003 if (!is_xmm) {
3004 tcg_gen_helper_0_0(helper_enter_mmx);
3005 }
3006
3007 modrm = ldub_code(s->pc++);
3008 reg = ((modrm >> 3) & 7);
3009 if (is_xmm)
3010 reg |= rex_r;
3011 mod = (modrm >> 6) & 3;
3012 if (sse_op2 == SSE_SPECIAL) {
3013 b |= (b1 << 8);
3014 switch(b) {
3015 case 0x0e7: /* movntq */
3016 if (mod == 3)
3017 goto illegal_op;
3018 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3019 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
3020 break;
3021 case 0x1e7: /* movntdq */
3022 case 0x02b: /* movntps */
3023 case 0x12b: /* movntps */
3024 case 0x3f0: /* lddqu */
3025 if (mod == 3)
3026 goto illegal_op;
3027 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3028 gen_sto_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3029 break;
3030 case 0x6e: /* movd mm, ea */
3031#ifdef TARGET_X86_64
3032 if (s->dflag == 2) {
3033 gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 0);
3034 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,fpregs[reg].mmx));
3035 } else
3036#endif
3037 {
3038 gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 0);
3039 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3040 offsetof(CPUX86State,fpregs[reg].mmx));
3041 tcg_gen_helper_0_2(helper_movl_mm_T0_mmx, cpu_ptr0, cpu_T[0]);
3042 }
3043 break;
3044 case 0x16e: /* movd xmm, ea */
3045#ifdef TARGET_X86_64
3046 if (s->dflag == 2) {
3047 gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 0);
3048 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3049 offsetof(CPUX86State,xmm_regs[reg]));
3050 tcg_gen_helper_0_2(helper_movq_mm_T0_xmm, cpu_ptr0, cpu_T[0]);
3051 } else
3052#endif
3053 {
3054 gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 0);
3055 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3056 offsetof(CPUX86State,xmm_regs[reg]));
3057 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3058 tcg_gen_helper_0_2(helper_movl_mm_T0_xmm, cpu_ptr0, cpu_tmp2_i32);
3059 }
3060 break;
3061 case 0x6f: /* movq mm, ea */
3062 if (mod != 3) {
3063 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3064 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
3065 } else {
3066 rm = (modrm & 7);
3067 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
3068 offsetof(CPUX86State,fpregs[rm].mmx));
3069 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
3070 offsetof(CPUX86State,fpregs[reg].mmx));
3071 }
3072 break;
3073 case 0x010: /* movups */
3074 case 0x110: /* movupd */
3075 case 0x028: /* movaps */
3076 case 0x128: /* movapd */
3077 case 0x16f: /* movdqa xmm, ea */
3078 case 0x26f: /* movdqu xmm, ea */
3079 if (mod != 3) {
3080 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3081 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3082 } else {
3083 rm = (modrm & 7) | REX_B(s);
3084 gen_op_movo(offsetof(CPUX86State,xmm_regs[reg]),
3085 offsetof(CPUX86State,xmm_regs[rm]));
3086 }
3087 break;
3088 case 0x210: /* movss xmm, ea */
3089 if (mod != 3) {
3090 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3091 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
3092 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3093 gen_op_movl_T0_0();
3094 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
3095 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3096 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3097 } else {
3098 rm = (modrm & 7) | REX_B(s);
3099 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3100 offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
3101 }
3102 break;
3103 case 0x310: /* movsd xmm, ea */
3104 if (mod != 3) {
3105 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3106 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3107 gen_op_movl_T0_0();
3108 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3109 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3110 } else {
3111 rm = (modrm & 7) | REX_B(s);
3112 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3113 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3114 }
3115 break;
3116 case 0x012: /* movlps */
3117 case 0x112: /* movlpd */
3118 if (mod != 3) {
3119 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3120 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3121 } else {
3122 /* movhlps */
3123 rm = (modrm & 7) | REX_B(s);
3124 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3125 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
3126 }
3127 break;
3128 case 0x212: /* movsldup */
3129 if (mod != 3) {
3130 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3131 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3132 } else {
3133 rm = (modrm & 7) | REX_B(s);
3134 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3135 offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
3136 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
3137 offsetof(CPUX86State,xmm_regs[rm].XMM_L(2)));
3138 }
3139 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
3140 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3141 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
3142 offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3143 break;
3144 case 0x312: /* movddup */
3145 if (mod != 3) {
3146 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3147 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3148 } else {
3149 rm = (modrm & 7) | REX_B(s);
3150 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3151 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3152 }
3153 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
3154 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3155 break;
3156 case 0x016: /* movhps */
3157 case 0x116: /* movhpd */
3158 if (mod != 3) {
3159 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3160 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3161 } else {
3162 /* movlhps */
3163 rm = (modrm & 7) | REX_B(s);
3164 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
3165 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3166 }
3167 break;
3168 case 0x216: /* movshdup */
3169 if (mod != 3) {
3170 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3171 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3172 } else {
3173 rm = (modrm & 7) | REX_B(s);
3174 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
3175 offsetof(CPUX86State,xmm_regs[rm].XMM_L(1)));
3176 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
3177 offsetof(CPUX86State,xmm_regs[rm].XMM_L(3)));
3178 }
3179 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3180 offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
3181 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
3182 offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3183 break;
3184 case 0x7e: /* movd ea, mm */
3185#ifdef TARGET_X86_64
3186 if (s->dflag == 2) {
3187 tcg_gen_ld_i64(cpu_T[0], cpu_env,
3188 offsetof(CPUX86State,fpregs[reg].mmx));
3189 gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 1);
3190 } else
3191#endif
3192 {
3193 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
3194 offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0)));
3195 gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 1);
3196 }
3197 break;
3198 case 0x17e: /* movd ea, xmm */
3199#ifdef TARGET_X86_64
3200 if (s->dflag == 2) {
3201 tcg_gen_ld_i64(cpu_T[0], cpu_env,
3202 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3203 gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 1);
3204 } else
3205#endif
3206 {
3207 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
3208 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3209 gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 1);
3210 }
3211 break;
3212 case 0x27e: /* movq xmm, ea */
3213 if (mod != 3) {
3214 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3215 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3216 } else {
3217 rm = (modrm & 7) | REX_B(s);
3218 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3219 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3220 }
3221 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3222 break;
3223 case 0x7f: /* movq ea, mm */
3224 if (mod != 3) {
3225 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3226 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
3227 } else {
3228 rm = (modrm & 7);
3229 gen_op_movq(offsetof(CPUX86State,fpregs[rm].mmx),
3230 offsetof(CPUX86State,fpregs[reg].mmx));
3231 }
3232 break;
3233 case 0x011: /* movups */
3234 case 0x111: /* movupd */
3235 case 0x029: /* movaps */
3236 case 0x129: /* movapd */
3237 case 0x17f: /* movdqa ea, xmm */
3238 case 0x27f: /* movdqu ea, xmm */
3239 if (mod != 3) {
3240 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3241 gen_sto_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3242 } else {
3243 rm = (modrm & 7) | REX_B(s);
3244 gen_op_movo(offsetof(CPUX86State,xmm_regs[rm]),
3245 offsetof(CPUX86State,xmm_regs[reg]));
3246 }
3247 break;
3248 case 0x211: /* movss ea, xmm */
3249 if (mod != 3) {
3250 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3251 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3252 gen_op_st_T0_A0(OT_LONG + s->mem_index);
3253 } else {
3254 rm = (modrm & 7) | REX_B(s);
3255 gen_op_movl(offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)),
3256 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3257 }
3258 break;
3259 case 0x311: /* movsd ea, xmm */
3260 if (mod != 3) {
3261 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3262 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3263 } else {
3264 rm = (modrm & 7) | REX_B(s);
3265 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
3266 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3267 }
3268 break;
3269 case 0x013: /* movlps */
3270 case 0x113: /* movlpd */
3271 if (mod != 3) {
3272 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3273 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3274 } else {
3275 goto illegal_op;
3276 }
3277 break;
3278 case 0x017: /* movhps */
3279 case 0x117: /* movhpd */
3280 if (mod != 3) {
3281 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3282 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3283 } else {
3284 goto illegal_op;
3285 }
3286 break;
3287 case 0x71: /* shift mm, im */
3288 case 0x72:
3289 case 0x73:
3290 case 0x171: /* shift xmm, im */
3291 case 0x172:
3292 case 0x173:
3293 val = ldub_code(s->pc++);
3294 if (is_xmm) {
3295 gen_op_movl_T0_im(val);
3296 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
3297 gen_op_movl_T0_0();
3298 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(1)));
3299 op1_offset = offsetof(CPUX86State,xmm_t0);
3300 } else {
3301 gen_op_movl_T0_im(val);
3302 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(0)));
3303 gen_op_movl_T0_0();
3304 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(1)));
3305 op1_offset = offsetof(CPUX86State,mmx_t0);
3306 }
3307 sse_op2 = sse_op_table2[((b - 1) & 3) * 8 + (((modrm >> 3)) & 7)][b1];
3308 if (!sse_op2)
3309 goto illegal_op;
3310 if (is_xmm) {
3311 rm = (modrm & 7) | REX_B(s);
3312 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3313 } else {
3314 rm = (modrm & 7);
3315 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3316 }
3317 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3318 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op1_offset);
3319 tcg_gen_helper_0_2(sse_op2, cpu_ptr0, cpu_ptr1);
3320 break;
3321 case 0x050: /* movmskps */
3322 rm = (modrm & 7) | REX_B(s);
3323 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3324 offsetof(CPUX86State,xmm_regs[rm]));
3325 tcg_gen_helper_1_1(helper_movmskps, cpu_tmp2_i32, cpu_ptr0);
3326 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3327 gen_op_mov_reg_T0(OT_LONG, reg);
3328 break;
3329 case 0x150: /* movmskpd */
3330 rm = (modrm & 7) | REX_B(s);
3331 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3332 offsetof(CPUX86State,xmm_regs[rm]));
3333 tcg_gen_helper_1_1(helper_movmskpd, cpu_tmp2_i32, cpu_ptr0);
3334 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3335 gen_op_mov_reg_T0(OT_LONG, reg);
3336 break;
3337 case 0x02a: /* cvtpi2ps */
3338 case 0x12a: /* cvtpi2pd */
3339 tcg_gen_helper_0_0(helper_enter_mmx);
3340 if (mod != 3) {
3341 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3342 op2_offset = offsetof(CPUX86State,mmx_t0);
3343 gen_ldq_env_A0(s->mem_index, op2_offset);
3344 } else {
3345 rm = (modrm & 7);
3346 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3347 }
3348 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3349 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3350 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3351 switch(b >> 8) {
3352 case 0x0:
3353 tcg_gen_helper_0_2(helper_cvtpi2ps, cpu_ptr0, cpu_ptr1);
3354 break;
3355 default:
3356 case 0x1:
3357 tcg_gen_helper_0_2(helper_cvtpi2pd, cpu_ptr0, cpu_ptr1);
3358 break;
3359 }
3360 break;
3361 case 0x22a: /* cvtsi2ss */
3362 case 0x32a: /* cvtsi2sd */
3363 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3364 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
3365 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3366 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3367 sse_op2 = sse_op_table3[(s->dflag == 2) * 2 + ((b >> 8) - 2)];
3368 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3369 tcg_gen_helper_0_2(sse_op2, cpu_ptr0, cpu_tmp2_i32);
3370 break;
3371 case 0x02c: /* cvttps2pi */
3372 case 0x12c: /* cvttpd2pi */
3373 case 0x02d: /* cvtps2pi */
3374 case 0x12d: /* cvtpd2pi */
3375 tcg_gen_helper_0_0(helper_enter_mmx);
3376 if (mod != 3) {
3377 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3378 op2_offset = offsetof(CPUX86State,xmm_t0);
3379 gen_ldo_env_A0(s->mem_index, op2_offset);
3380 } else {
3381 rm = (modrm & 7) | REX_B(s);
3382 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3383 }
3384 op1_offset = offsetof(CPUX86State,fpregs[reg & 7].mmx);
3385 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3386 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3387 switch(b) {
3388 case 0x02c:
3389 tcg_gen_helper_0_2(helper_cvttps2pi, cpu_ptr0, cpu_ptr1);
3390 break;
3391 case 0x12c:
3392 tcg_gen_helper_0_2(helper_cvttpd2pi, cpu_ptr0, cpu_ptr1);
3393 break;
3394 case 0x02d:
3395 tcg_gen_helper_0_2(helper_cvtps2pi, cpu_ptr0, cpu_ptr1);
3396 break;
3397 case 0x12d:
3398 tcg_gen_helper_0_2(helper_cvtpd2pi, cpu_ptr0, cpu_ptr1);
3399 break;
3400 }
3401 break;
3402 case 0x22c: /* cvttss2si */
3403 case 0x32c: /* cvttsd2si */
3404 case 0x22d: /* cvtss2si */
3405 case 0x32d: /* cvtsd2si */
3406 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3407 if (mod != 3) {
3408 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3409 if ((b >> 8) & 1) {
3410 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_t0.XMM_Q(0)));
3411 } else {
3412 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
3413 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
3414 }
3415 op2_offset = offsetof(CPUX86State,xmm_t0);
3416 } else {
3417 rm = (modrm & 7) | REX_B(s);
3418 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3419 }
3420 sse_op2 = sse_op_table3[(s->dflag == 2) * 2 + ((b >> 8) - 2) + 4 +
3421 (b & 1) * 4];
3422 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3423 if (ot == OT_LONG) {
3424 tcg_gen_helper_1_1(sse_op2, cpu_tmp2_i32, cpu_ptr0);
3425 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3426 } else {
3427 tcg_gen_helper_1_1(sse_op2, cpu_T[0], cpu_ptr0);
3428 }
3429 gen_op_mov_reg_T0(ot, reg);
3430 break;
3431 case 0xc4: /* pinsrw */
3432 case 0x1c4:
3433 s->rip_offset = 1;
3434 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
3435 val = ldub_code(s->pc++);
3436 if (b1) {
3437 val &= 7;
3438 tcg_gen_st16_tl(cpu_T[0], cpu_env,
3439 offsetof(CPUX86State,xmm_regs[reg].XMM_W(val)));
3440 } else {
3441 val &= 3;
3442 tcg_gen_st16_tl(cpu_T[0], cpu_env,
3443 offsetof(CPUX86State,fpregs[reg].mmx.MMX_W(val)));
3444 }
3445 break;
3446 case 0xc5: /* pextrw */
3447 case 0x1c5:
3448 if (mod != 3)
3449 goto illegal_op;
3450 val = ldub_code(s->pc++);
3451 if (b1) {
3452 val &= 7;
3453 rm = (modrm & 7) | REX_B(s);
3454 tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3455 offsetof(CPUX86State,xmm_regs[rm].XMM_W(val)));
3456 } else {
3457 val &= 3;
3458 rm = (modrm & 7);
3459 tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3460 offsetof(CPUX86State,fpregs[rm].mmx.MMX_W(val)));
3461 }
3462 reg = ((modrm >> 3) & 7) | rex_r;
3463 gen_op_mov_reg_T0(OT_LONG, reg);
3464 break;
3465 case 0x1d6: /* movq ea, xmm */
3466 if (mod != 3) {
3467 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3468 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3469 } else {
3470 rm = (modrm & 7) | REX_B(s);
3471 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
3472 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3473 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
3474 }
3475 break;
3476 case 0x2d6: /* movq2dq */
3477 tcg_gen_helper_0_0(helper_enter_mmx);
3478 rm = (modrm & 7);
3479 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3480 offsetof(CPUX86State,fpregs[rm].mmx));
3481 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3482 break;
3483 case 0x3d6: /* movdq2q */
3484 tcg_gen_helper_0_0(helper_enter_mmx);
3485 rm = (modrm & 7) | REX_B(s);
3486 gen_op_movq(offsetof(CPUX86State,fpregs[reg & 7].mmx),
3487 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3488 break;
3489 case 0xd7: /* pmovmskb */
3490 case 0x1d7:
3491 if (mod != 3)
3492 goto illegal_op;
3493 if (b1) {
3494 rm = (modrm & 7) | REX_B(s);
3495 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[rm]));
3496 tcg_gen_helper_1_1(helper_pmovmskb_xmm, cpu_tmp2_i32, cpu_ptr0);
3497 } else {
3498 rm = (modrm & 7);
3499 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,fpregs[rm].mmx));
3500 tcg_gen_helper_1_1(helper_pmovmskb_mmx, cpu_tmp2_i32, cpu_ptr0);
3501 }
3502 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3503 reg = ((modrm >> 3) & 7) | rex_r;
3504 gen_op_mov_reg_T0(OT_LONG, reg);
3505 break;
3506 default:
3507 goto illegal_op;
3508 }
3509 } else {
3510 /* generic MMX or SSE operation */
3511 switch(b) {
3512 case 0x70: /* pshufx insn */
3513 case 0xc6: /* pshufx insn */
3514 case 0xc2: /* compare insns */
3515 s->rip_offset = 1;
3516 break;
3517 default:
3518 break;
3519 }
3520 if (is_xmm) {
3521 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3522 if (mod != 3) {
3523 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3524 op2_offset = offsetof(CPUX86State,xmm_t0);
3525 if (b1 >= 2 && ((b >= 0x50 && b <= 0x5f && b != 0x5b) ||
3526 b == 0xc2)) {
3527 /* specific case for SSE single instructions */
3528 if (b1 == 2) {
3529 /* 32 bit access */
3530 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
3531 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
3532 } else {
3533 /* 64 bit access */
3534 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_t0.XMM_D(0)));
3535 }
3536 } else {
3537 gen_ldo_env_A0(s->mem_index, op2_offset);
3538 }
3539 } else {
3540 rm = (modrm & 7) | REX_B(s);
3541 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3542 }
3543 } else {
3544 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
3545 if (mod != 3) {
3546 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3547 op2_offset = offsetof(CPUX86State,mmx_t0);
3548 gen_ldq_env_A0(s->mem_index, op2_offset);
3549 } else {
3550 rm = (modrm & 7);
3551 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3552 }
3553 }
3554 switch(b) {
3555 case 0x0f: /* 3DNow! data insns */
3556 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
3557 goto illegal_op;
3558 val = ldub_code(s->pc++);
3559 sse_op2 = sse_op_table5[val];
3560 if (!sse_op2)
3561 goto illegal_op;
3562 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3563 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3564 tcg_gen_helper_0_2(sse_op2, cpu_ptr0, cpu_ptr1);
3565 break;
3566 case 0x70: /* pshufx insn */
3567 case 0xc6: /* pshufx insn */
3568 val = ldub_code(s->pc++);
3569 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3570 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3571 tcg_gen_helper_0_3(sse_op2, cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
3572 break;
3573 case 0xc2:
3574 /* compare insns */
3575 val = ldub_code(s->pc++);
3576 if (val >= 8)
3577 goto illegal_op;
3578 sse_op2 = sse_op_table4[val][b1];
3579 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3580 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3581 tcg_gen_helper_0_2(sse_op2, cpu_ptr0, cpu_ptr1);
3582 break;
3583 case 0xf7:
3584 /* maskmov : we must prepare A0 */
3585 if (mod != 3)
3586 goto illegal_op;
3587#ifdef TARGET_X86_64
3588 if (s->aflag == 2) {
3589 gen_op_movq_A0_reg(R_EDI);
3590 } else
3591#endif
3592 {
3593 gen_op_movl_A0_reg(R_EDI);
3594 if (s->aflag == 0)
3595 gen_op_andl_A0_ffff();
3596 }
3597 gen_add_A0_ds_seg(s);
3598
3599 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3600 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3601 tcg_gen_helper_0_3(sse_op2, cpu_ptr0, cpu_ptr1, cpu_A0);
3602 break;
3603 default:
3604 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3605 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3606 tcg_gen_helper_0_2(sse_op2, cpu_ptr0, cpu_ptr1);
3607 break;
3608 }
3609 if (b == 0x2e || b == 0x2f) {
3610 s->cc_op = CC_OP_EFLAGS;
3611 }
3612 }
3613}
3614
3615/* convert one instruction. s->is_jmp is set if the translation must
3616 be stopped. Return the next pc value */
3617static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
3618{
3619 int b, prefixes, aflag, dflag;
3620 int shift, ot;
3621 int modrm, reg, rm, mod, reg_addr, op, opreg, offset_addr, val;
3622 target_ulong next_eip, tval;
3623 int rex_w, rex_r;
3624
3625 if (unlikely(loglevel & CPU_LOG_TB_OP))
3626 tcg_gen_debug_insn_start(pc_start);
3627 s->pc = pc_start;
3628 prefixes = 0;
3629 aflag = s->code32;
3630 dflag = s->code32;
3631 s->override = -1;
3632 rex_w = -1;
3633 rex_r = 0;
3634#ifdef TARGET_X86_64
3635 s->rex_x = 0;
3636 s->rex_b = 0;
3637 x86_64_hregs = 0;
3638#endif
3639 s->rip_offset = 0; /* for relative ip address */
3640 next_byte:
3641 b = ldub_code(s->pc);
3642 s->pc++;
3643 /* check prefixes */
3644#ifdef TARGET_X86_64
3645 if (CODE64(s)) {
3646 switch (b) {
3647 case 0xf3:
3648 prefixes |= PREFIX_REPZ;
3649 goto next_byte;
3650 case 0xf2:
3651 prefixes |= PREFIX_REPNZ;
3652 goto next_byte;
3653 case 0xf0:
3654 prefixes |= PREFIX_LOCK;
3655 goto next_byte;
3656 case 0x2e:
3657 s->override = R_CS;
3658 goto next_byte;
3659 case 0x36:
3660 s->override = R_SS;
3661 goto next_byte;
3662 case 0x3e:
3663 s->override = R_DS;
3664 goto next_byte;
3665 case 0x26:
3666 s->override = R_ES;
3667 goto next_byte;
3668 case 0x64:
3669 s->override = R_FS;
3670 goto next_byte;
3671 case 0x65:
3672 s->override = R_GS;
3673 goto next_byte;
3674 case 0x66:
3675 prefixes |= PREFIX_DATA;
3676 goto next_byte;
3677 case 0x67:
3678 prefixes |= PREFIX_ADR;
3679 goto next_byte;
3680 case 0x40 ... 0x4f:
3681 /* REX prefix */
3682 rex_w = (b >> 3) & 1;
3683 rex_r = (b & 0x4) << 1;
3684 s->rex_x = (b & 0x2) << 2;
3685 REX_B(s) = (b & 0x1) << 3;
3686 x86_64_hregs = 1; /* select uniform byte register addressing */
3687 goto next_byte;
3688 }
3689 if (rex_w == 1) {
3690 /* 0x66 is ignored if rex.w is set */
3691 dflag = 2;
3692 } else {
3693 if (prefixes & PREFIX_DATA)
3694 dflag ^= 1;
3695 }
3696 if (!(prefixes & PREFIX_ADR))
3697 aflag = 2;
3698 } else
3699#endif
3700 {
3701 switch (b) {
3702 case 0xf3:
3703 prefixes |= PREFIX_REPZ;
3704 goto next_byte;
3705 case 0xf2:
3706 prefixes |= PREFIX_REPNZ;
3707 goto next_byte;
3708 case 0xf0:
3709 prefixes |= PREFIX_LOCK;
3710 goto next_byte;
3711 case 0x2e:
3712 s->override = R_CS;
3713 goto next_byte;
3714 case 0x36:
3715 s->override = R_SS;
3716 goto next_byte;
3717 case 0x3e:
3718 s->override = R_DS;
3719 goto next_byte;
3720 case 0x26:
3721 s->override = R_ES;
3722 goto next_byte;
3723 case 0x64:
3724 s->override = R_FS;
3725 goto next_byte;
3726 case 0x65:
3727 s->override = R_GS;
3728 goto next_byte;
3729 case 0x66:
3730 prefixes |= PREFIX_DATA;
3731 goto next_byte;
3732 case 0x67:
3733 prefixes |= PREFIX_ADR;
3734 goto next_byte;
3735 }
3736 if (prefixes & PREFIX_DATA)
3737 dflag ^= 1;
3738 if (prefixes & PREFIX_ADR)
3739 aflag ^= 1;
3740 }
3741
3742 s->prefix = prefixes;
3743 s->aflag = aflag;
3744 s->dflag = dflag;
3745
3746 /* lock generation */
3747 if (prefixes & PREFIX_LOCK)
3748 tcg_gen_helper_0_0(helper_lock);
3749
3750 /* now check op code */
3751 reswitch:
3752 switch(b) {
3753 case 0x0f:
3754 /**************************/
3755 /* extended op code */
3756 b = ldub_code(s->pc++) | 0x100;
3757 goto reswitch;
3758
3759 /**************************/
3760 /* arith & logic */
3761 case 0x00 ... 0x05:
3762 case 0x08 ... 0x0d:
3763 case 0x10 ... 0x15:
3764 case 0x18 ... 0x1d:
3765 case 0x20 ... 0x25:
3766 case 0x28 ... 0x2d:
3767 case 0x30 ... 0x35:
3768 case 0x38 ... 0x3d:
3769 {
3770 int op, f, val;
3771 op = (b >> 3) & 7;
3772 f = (b >> 1) & 3;
3773
3774 if ((b & 1) == 0)
3775 ot = OT_BYTE;
3776 else
3777 ot = dflag + OT_WORD;
3778
3779 switch(f) {
3780 case 0: /* OP Ev, Gv */
3781 modrm = ldub_code(s->pc++);
3782 reg = ((modrm >> 3) & 7) | rex_r;
3783 mod = (modrm >> 6) & 3;
3784 rm = (modrm & 7) | REX_B(s);
3785 if (mod != 3) {
3786 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3787 opreg = OR_TMP0;
3788 } else if (op == OP_XORL && rm == reg) {
3789 xor_zero:
3790 /* xor reg, reg optimisation */
3791 gen_op_movl_T0_0();
3792 s->cc_op = CC_OP_LOGICB + ot;
3793 gen_op_mov_reg_T0(ot, reg);
3794 gen_op_update1_cc();
3795 break;
3796 } else {
3797 opreg = rm;
3798 }
3799 gen_op_mov_TN_reg(ot, 1, reg);
3800 gen_op(s, op, ot, opreg);
3801 break;
3802 case 1: /* OP Gv, Ev */
3803 modrm = ldub_code(s->pc++);
3804 mod = (modrm >> 6) & 3;
3805 reg = ((modrm >> 3) & 7) | rex_r;
3806 rm = (modrm & 7) | REX_B(s);
3807 if (mod != 3) {
3808 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3809 gen_op_ld_T1_A0(ot + s->mem_index);
3810 } else if (op == OP_XORL && rm == reg) {
3811 goto xor_zero;
3812 } else {
3813 gen_op_mov_TN_reg(ot, 1, rm);
3814 }
3815 gen_op(s, op, ot, reg);
3816 break;
3817 case 2: /* OP A, Iv */
3818 val = insn_get(s, ot);
3819 gen_op_movl_T1_im(val);
3820 gen_op(s, op, ot, OR_EAX);
3821 break;
3822 }
3823 }
3824 break;
3825
3826 case 0x80: /* GRP1 */
3827 case 0x81:
3828 case 0x82:
3829 case 0x83:
3830 {
3831 int val;
3832
3833 if ((b & 1) == 0)
3834 ot = OT_BYTE;
3835 else
3836 ot = dflag + OT_WORD;
3837
3838 modrm = ldub_code(s->pc++);
3839 mod = (modrm >> 6) & 3;
3840 rm = (modrm & 7) | REX_B(s);
3841 op = (modrm >> 3) & 7;
3842
3843 if (mod != 3) {
3844 if (b == 0x83)
3845 s->rip_offset = 1;
3846 else
3847 s->rip_offset = insn_const_size(ot);
3848 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3849 opreg = OR_TMP0;
3850 } else {
3851 opreg = rm;
3852 }
3853
3854 switch(b) {
3855 default:
3856 case 0x80:
3857 case 0x81:
3858 case 0x82:
3859 val = insn_get(s, ot);
3860 break;
3861 case 0x83:
3862 val = (int8_t)insn_get(s, OT_BYTE);
3863 break;
3864 }
3865 gen_op_movl_T1_im(val);
3866 gen_op(s, op, ot, opreg);
3867 }
3868 break;
3869
3870 /**************************/
3871 /* inc, dec, and other misc arith */
3872 case 0x40 ... 0x47: /* inc Gv */
3873 ot = dflag ? OT_LONG : OT_WORD;
3874 gen_inc(s, ot, OR_EAX + (b & 7), 1);
3875 break;
3876 case 0x48 ... 0x4f: /* dec Gv */
3877 ot = dflag ? OT_LONG : OT_WORD;
3878 gen_inc(s, ot, OR_EAX + (b & 7), -1);
3879 break;
3880 case 0xf6: /* GRP3 */
3881 case 0xf7:
3882 if ((b & 1) == 0)
3883 ot = OT_BYTE;
3884 else
3885 ot = dflag + OT_WORD;
3886
3887 modrm = ldub_code(s->pc++);
3888 mod = (modrm >> 6) & 3;
3889 rm = (modrm & 7) | REX_B(s);
3890 op = (modrm >> 3) & 7;
3891 if (mod != 3) {
3892 if (op == 0)
3893 s->rip_offset = insn_const_size(ot);
3894 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3895 gen_op_ld_T0_A0(ot + s->mem_index);
3896 } else {
3897 gen_op_mov_TN_reg(ot, 0, rm);
3898 }
3899
3900 switch(op) {
3901 case 0: /* test */
3902 val = insn_get(s, ot);
3903 gen_op_movl_T1_im(val);
3904 gen_op_testl_T0_T1_cc();
3905 s->cc_op = CC_OP_LOGICB + ot;
3906 break;
3907 case 2: /* not */
3908 tcg_gen_not_tl(cpu_T[0], cpu_T[0]);
3909 if (mod != 3) {
3910 gen_op_st_T0_A0(ot + s->mem_index);
3911 } else {
3912 gen_op_mov_reg_T0(ot, rm);
3913 }
3914 break;
3915 case 3: /* neg */
3916 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
3917 if (mod != 3) {
3918 gen_op_st_T0_A0(ot + s->mem_index);
3919 } else {
3920 gen_op_mov_reg_T0(ot, rm);
3921 }
3922 gen_op_update_neg_cc();
3923 s->cc_op = CC_OP_SUBB + ot;
3924 break;
3925 case 4: /* mul */
3926 switch(ot) {
3927 case OT_BYTE:
3928 gen_op_mov_TN_reg(OT_BYTE, 1, R_EAX);
3929 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
3930 tcg_gen_ext8u_tl(cpu_T[1], cpu_T[1]);
3931 /* XXX: use 32 bit mul which could be faster */
3932 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
3933 gen_op_mov_reg_T0(OT_WORD, R_EAX);
3934 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
3935 tcg_gen_andi_tl(cpu_cc_src, cpu_T[0], 0xff00);
3936 s->cc_op = CC_OP_MULB;
3937 break;
3938 case OT_WORD:
3939 gen_op_mov_TN_reg(OT_WORD, 1, R_EAX);
3940 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
3941 tcg_gen_ext16u_tl(cpu_T[1], cpu_T[1]);
3942 /* XXX: use 32 bit mul which could be faster */
3943 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
3944 gen_op_mov_reg_T0(OT_WORD, R_EAX);
3945 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
3946 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
3947 gen_op_mov_reg_T0(OT_WORD, R_EDX);
3948 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
3949 s->cc_op = CC_OP_MULW;
3950 break;
3951 default:
3952 case OT_LONG:
3953#ifdef TARGET_X86_64
3954 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
3955 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
3956 tcg_gen_ext32u_tl(cpu_T[1], cpu_T[1]);
3957 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
3958 gen_op_mov_reg_T0(OT_LONG, R_EAX);
3959 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
3960 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 32);
3961 gen_op_mov_reg_T0(OT_LONG, R_EDX);
3962 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
3963#else
3964 {
3965 TCGv t0, t1;
3966 t0 = tcg_temp_new(TCG_TYPE_I64);
3967 t1 = tcg_temp_new(TCG_TYPE_I64);
3968 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
3969 tcg_gen_extu_i32_i64(t0, cpu_T[0]);
3970 tcg_gen_extu_i32_i64(t1, cpu_T[1]);
3971 tcg_gen_mul_i64(t0, t0, t1);
3972 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
3973 gen_op_mov_reg_T0(OT_LONG, R_EAX);
3974 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
3975 tcg_gen_shri_i64(t0, t0, 32);
3976 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
3977 gen_op_mov_reg_T0(OT_LONG, R_EDX);
3978 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
3979 }
3980#endif
3981 s->cc_op = CC_OP_MULL;
3982 break;
3983#ifdef TARGET_X86_64
3984 case OT_QUAD:
3985 tcg_gen_helper_0_1(helper_mulq_EAX_T0, cpu_T[0]);
3986 s->cc_op = CC_OP_MULQ;
3987 break;
3988#endif
3989 }
3990 break;
3991 case 5: /* imul */
3992 switch(ot) {
3993 case OT_BYTE:
3994 gen_op_mov_TN_reg(OT_BYTE, 1, R_EAX);
3995 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
3996 tcg_gen_ext8s_tl(cpu_T[1], cpu_T[1]);
3997 /* XXX: use 32 bit mul which could be faster */
3998 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
3999 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4000 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4001 tcg_gen_ext8s_tl(cpu_tmp0, cpu_T[0]);
4002 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4003 s->cc_op = CC_OP_MULB;
4004 break;
4005 case OT_WORD:
4006 gen_op_mov_TN_reg(OT_WORD, 1, R_EAX);
4007 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4008 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
4009 /* XXX: use 32 bit mul which could be faster */
4010 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4011 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4012 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4013 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
4014 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4015 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
4016 gen_op_mov_reg_T0(OT_WORD, R_EDX);
4017 s->cc_op = CC_OP_MULW;
4018 break;
4019 default:
4020 case OT_LONG:
4021#ifdef TARGET_X86_64
4022 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4023 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4024 tcg_gen_ext32s_tl(cpu_T[1], cpu_T[1]);
4025 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4026 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4027 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4028 tcg_gen_ext32s_tl(cpu_tmp0, cpu_T[0]);
4029 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4030 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 32);
4031 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4032#else
4033 {
4034 TCGv t0, t1;
4035 t0 = tcg_temp_new(TCG_TYPE_I64);
4036 t1 = tcg_temp_new(TCG_TYPE_I64);
4037 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4038 tcg_gen_ext_i32_i64(t0, cpu_T[0]);
4039 tcg_gen_ext_i32_i64(t1, cpu_T[1]);
4040 tcg_gen_mul_i64(t0, t0, t1);
4041 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4042 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4043 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4044 tcg_gen_sari_tl(cpu_tmp0, cpu_T[0], 31);
4045 tcg_gen_shri_i64(t0, t0, 32);
4046 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4047 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4048 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4049 }
4050#endif
4051 s->cc_op = CC_OP_MULL;
4052 break;
4053#ifdef TARGET_X86_64
4054 case OT_QUAD:
4055 tcg_gen_helper_0_1(helper_imulq_EAX_T0, cpu_T[0]);
4056 s->cc_op = CC_OP_MULQ;
4057 break;
4058#endif
4059 }
4060 break;
4061 case 6: /* div */
4062 switch(ot) {
4063 case OT_BYTE:
4064 gen_jmp_im(pc_start - s->cs_base);
4065 tcg_gen_helper_0_1(helper_divb_AL, cpu_T[0]);
4066 break;
4067 case OT_WORD:
4068 gen_jmp_im(pc_start - s->cs_base);
4069 tcg_gen_helper_0_1(helper_divw_AX, cpu_T[0]);
4070 break;
4071 default:
4072 case OT_LONG:
4073 gen_jmp_im(pc_start - s->cs_base);
4074 tcg_gen_helper_0_1(helper_divl_EAX, cpu_T[0]);
4075 break;
4076#ifdef TARGET_X86_64
4077 case OT_QUAD:
4078 gen_jmp_im(pc_start - s->cs_base);
4079 tcg_gen_helper_0_1(helper_divq_EAX, cpu_T[0]);
4080 break;
4081#endif
4082 }
4083 break;
4084 case 7: /* idiv */
4085 switch(ot) {
4086 case OT_BYTE:
4087 gen_jmp_im(pc_start - s->cs_base);
4088 tcg_gen_helper_0_1(helper_idivb_AL, cpu_T[0]);
4089 break;
4090 case OT_WORD:
4091 gen_jmp_im(pc_start - s->cs_base);
4092 tcg_gen_helper_0_1(helper_idivw_AX, cpu_T[0]);
4093 break;
4094 default:
4095 case OT_LONG:
4096 gen_jmp_im(pc_start - s->cs_base);
4097 tcg_gen_helper_0_1(helper_idivl_EAX, cpu_T[0]);
4098 break;
4099#ifdef TARGET_X86_64
4100 case OT_QUAD:
4101 gen_jmp_im(pc_start - s->cs_base);
4102 tcg_gen_helper_0_1(helper_idivq_EAX, cpu_T[0]);
4103 break;
4104#endif
4105 }
4106 break;
4107 default:
4108 goto illegal_op;
4109 }
4110 break;
4111
4112 case 0xfe: /* GRP4 */
4113 case 0xff: /* GRP5 */
4114 if ((b & 1) == 0)
4115 ot = OT_BYTE;
4116 else
4117 ot = dflag + OT_WORD;
4118
4119 modrm = ldub_code(s->pc++);
4120 mod = (modrm >> 6) & 3;
4121 rm = (modrm & 7) | REX_B(s);
4122 op = (modrm >> 3) & 7;
4123 if (op >= 2 && b == 0xfe) {
4124 goto illegal_op;
4125 }
4126 if (CODE64(s)) {
4127 if (op == 2 || op == 4) {
4128 /* operand size for jumps is 64 bit */
4129 ot = OT_QUAD;
4130 } else if (op == 3 || op == 5) {
4131 /* for call calls, the operand is 16 or 32 bit, even
4132 in long mode */
4133 ot = dflag ? OT_LONG : OT_WORD;
4134 } else if (op == 6) {
4135 /* default push size is 64 bit */
4136 ot = dflag ? OT_QUAD : OT_WORD;
4137 }
4138 }
4139 if (mod != 3) {
4140 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4141 if (op >= 2 && op != 3 && op != 5)
4142 gen_op_ld_T0_A0(ot + s->mem_index);
4143 } else {
4144 gen_op_mov_TN_reg(ot, 0, rm);
4145 }
4146
4147 switch(op) {
4148 case 0: /* inc Ev */
4149 if (mod != 3)
4150 opreg = OR_TMP0;
4151 else
4152 opreg = rm;
4153 gen_inc(s, ot, opreg, 1);
4154 break;
4155 case 1: /* dec Ev */
4156 if (mod != 3)
4157 opreg = OR_TMP0;
4158 else
4159 opreg = rm;
4160 gen_inc(s, ot, opreg, -1);
4161 break;
4162 case 2: /* call Ev */
4163 /* XXX: optimize if memory (no 'and' is necessary) */
4164 if (s->dflag == 0)
4165 gen_op_andl_T0_ffff();
4166 next_eip = s->pc - s->cs_base;
4167 gen_movtl_T1_im(next_eip);
4168 gen_push_T1(s);
4169 gen_op_jmp_T0();
4170 gen_eob(s);
4171 break;
4172 case 3: /* lcall Ev */
4173 gen_op_ld_T1_A0(ot + s->mem_index);
4174 gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
4175 gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
4176 do_lcall:
4177 if (s->pe && !s->vm86) {
4178 if (s->cc_op != CC_OP_DYNAMIC)
4179 gen_op_set_cc_op(s->cc_op);
4180 gen_jmp_im(pc_start - s->cs_base);
4181 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4182 tcg_gen_helper_0_4(helper_lcall_protected,
4183 cpu_tmp2_i32, cpu_T[1],
4184 tcg_const_i32(dflag),
4185 tcg_const_i32(s->pc - pc_start));
4186 } else {
4187 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4188 tcg_gen_helper_0_4(helper_lcall_real,
4189 cpu_tmp2_i32, cpu_T[1],
4190 tcg_const_i32(dflag),
4191 tcg_const_i32(s->pc - s->cs_base));
4192 }
4193 gen_eob(s);
4194 break;
4195 case 4: /* jmp Ev */
4196 if (s->dflag == 0)
4197 gen_op_andl_T0_ffff();
4198 gen_op_jmp_T0();
4199 gen_eob(s);
4200 break;
4201 case 5: /* ljmp Ev */
4202 gen_op_ld_T1_A0(ot + s->mem_index);
4203 gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
4204 gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
4205 do_ljmp:
4206 if (s->pe && !s->vm86) {
4207 if (s->cc_op != CC_OP_DYNAMIC)
4208 gen_op_set_cc_op(s->cc_op);
4209 gen_jmp_im(pc_start - s->cs_base);
4210 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4211 tcg_gen_helper_0_3(helper_ljmp_protected,
4212 cpu_tmp2_i32,
4213 cpu_T[1],
4214 tcg_const_i32(s->pc - pc_start));
4215 } else {
4216 gen_op_movl_seg_T0_vm(R_CS);
4217 gen_op_movl_T0_T1();
4218 gen_op_jmp_T0();
4219 }
4220 gen_eob(s);
4221 break;
4222 case 6: /* push Ev */
4223 gen_push_T0(s);
4224 break;
4225 default:
4226 goto illegal_op;
4227 }
4228 break;
4229
4230 case 0x84: /* test Ev, Gv */
4231 case 0x85:
4232 if ((b & 1) == 0)
4233 ot = OT_BYTE;
4234 else
4235 ot = dflag + OT_WORD;
4236
4237 modrm = ldub_code(s->pc++);
4238 mod = (modrm >> 6) & 3;
4239 rm = (modrm & 7) | REX_B(s);
4240 reg = ((modrm >> 3) & 7) | rex_r;
4241
4242 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
4243 gen_op_mov_TN_reg(ot, 1, reg);
4244 gen_op_testl_T0_T1_cc();
4245 s->cc_op = CC_OP_LOGICB + ot;
4246 break;
4247
4248 case 0xa8: /* test eAX, Iv */
4249 case 0xa9:
4250 if ((b & 1) == 0)
4251 ot = OT_BYTE;
4252 else
4253 ot = dflag + OT_WORD;
4254 val = insn_get(s, ot);
4255
4256 gen_op_mov_TN_reg(ot, 0, OR_EAX);
4257 gen_op_movl_T1_im(val);
4258 gen_op_testl_T0_T1_cc();
4259 s->cc_op = CC_OP_LOGICB + ot;
4260 break;
4261
4262 case 0x98: /* CWDE/CBW */
4263#ifdef TARGET_X86_64
4264 if (dflag == 2) {
4265 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
4266 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4267 gen_op_mov_reg_T0(OT_QUAD, R_EAX);
4268 } else
4269#endif
4270 if (dflag == 1) {
4271 gen_op_mov_TN_reg(OT_WORD, 0, R_EAX);
4272 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4273 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4274 } else {
4275 gen_op_mov_TN_reg(OT_BYTE, 0, R_EAX);
4276 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
4277 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4278 }
4279 break;
4280 case 0x99: /* CDQ/CWD */
4281#ifdef TARGET_X86_64
4282 if (dflag == 2) {
4283 gen_op_mov_TN_reg(OT_QUAD, 0, R_EAX);
4284 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 63);
4285 gen_op_mov_reg_T0(OT_QUAD, R_EDX);
4286 } else
4287#endif
4288 if (dflag == 1) {
4289 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
4290 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4291 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 31);
4292 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4293 } else {
4294 gen_op_mov_TN_reg(OT_WORD, 0, R_EAX);
4295 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4296 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 15);
4297 gen_op_mov_reg_T0(OT_WORD, R_EDX);
4298 }
4299 break;
4300 case 0x1af: /* imul Gv, Ev */
4301 case 0x69: /* imul Gv, Ev, I */
4302 case 0x6b:
4303 ot = dflag + OT_WORD;
4304 modrm = ldub_code(s->pc++);
4305 reg = ((modrm >> 3) & 7) | rex_r;
4306 if (b == 0x69)
4307 s->rip_offset = insn_const_size(ot);
4308 else if (b == 0x6b)
4309 s->rip_offset = 1;
4310 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
4311 if (b == 0x69) {
4312 val = insn_get(s, ot);
4313 gen_op_movl_T1_im(val);
4314 } else if (b == 0x6b) {
4315 val = (int8_t)insn_get(s, OT_BYTE);
4316 gen_op_movl_T1_im(val);
4317 } else {
4318 gen_op_mov_TN_reg(ot, 1, reg);
4319 }
4320
4321#ifdef TARGET_X86_64
4322 if (ot == OT_QUAD) {
4323 tcg_gen_helper_1_2(helper_imulq_T0_T1, cpu_T[0], cpu_T[0], cpu_T[1]);
4324 } else
4325#endif
4326 if (ot == OT_LONG) {
4327#ifdef TARGET_X86_64
4328 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4329 tcg_gen_ext32s_tl(cpu_T[1], cpu_T[1]);
4330 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4331 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4332 tcg_gen_ext32s_tl(cpu_tmp0, cpu_T[0]);
4333 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4334#else
4335 {
4336 TCGv t0, t1;
4337 t0 = tcg_temp_new(TCG_TYPE_I64);
4338 t1 = tcg_temp_new(TCG_TYPE_I64);
4339 tcg_gen_ext_i32_i64(t0, cpu_T[0]);
4340 tcg_gen_ext_i32_i64(t1, cpu_T[1]);
4341 tcg_gen_mul_i64(t0, t0, t1);
4342 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4343 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4344 tcg_gen_sari_tl(cpu_tmp0, cpu_T[0], 31);
4345 tcg_gen_shri_i64(t0, t0, 32);
4346 tcg_gen_trunc_i64_i32(cpu_T[1], t0);
4347 tcg_gen_sub_tl(cpu_cc_src, cpu_T[1], cpu_tmp0);
4348 }
4349#endif
4350 } else {
4351 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4352 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
4353 /* XXX: use 32 bit mul which could be faster */
4354 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4355 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4356 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
4357 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4358 }
4359 gen_op_mov_reg_T0(ot, reg);
4360 s->cc_op = CC_OP_MULB + ot;
4361 break;
4362 case 0x1c0:
4363 case 0x1c1: /* xadd Ev, Gv */
4364 if ((b & 1) == 0)
4365 ot = OT_BYTE;
4366 else
4367 ot = dflag + OT_WORD;
4368 modrm = ldub_code(s->pc++);
4369 reg = ((modrm >> 3) & 7) | rex_r;
4370 mod = (modrm >> 6) & 3;
4371 if (mod == 3) {
4372 rm = (modrm & 7) | REX_B(s);
4373 gen_op_mov_TN_reg(ot, 0, reg);
4374 gen_op_mov_TN_reg(ot, 1, rm);
4375 gen_op_addl_T0_T1();
4376 gen_op_mov_reg_T1(ot, reg);
4377 gen_op_mov_reg_T0(ot, rm);
4378 } else {
4379 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4380 gen_op_mov_TN_reg(ot, 0, reg);
4381 gen_op_ld_T1_A0(ot + s->mem_index);
4382 gen_op_addl_T0_T1();
4383 gen_op_st_T0_A0(ot + s->mem_index);
4384 gen_op_mov_reg_T1(ot, reg);
4385 }
4386 gen_op_update2_cc();
4387 s->cc_op = CC_OP_ADDB + ot;
4388 break;
4389 case 0x1b0:
4390 case 0x1b1: /* cmpxchg Ev, Gv */
4391 {
4392 int label1, label2;
4393 TCGv t0, t1, t2, a0;
4394
4395 if ((b & 1) == 0)
4396 ot = OT_BYTE;
4397 else
4398 ot = dflag + OT_WORD;
4399 modrm = ldub_code(s->pc++);
4400 reg = ((modrm >> 3) & 7) | rex_r;
4401 mod = (modrm >> 6) & 3;
4402 t0 = tcg_temp_local_new(TCG_TYPE_TL);
4403 t1 = tcg_temp_local_new(TCG_TYPE_TL);
4404 t2 = tcg_temp_local_new(TCG_TYPE_TL);
4405 a0 = tcg_temp_local_new(TCG_TYPE_TL);
4406 gen_op_mov_v_reg(ot, t1, reg);
4407 if (mod == 3) {
4408 rm = (modrm & 7) | REX_B(s);
4409 gen_op_mov_v_reg(ot, t0, rm);
4410 } else {
4411 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4412 tcg_gen_mov_tl(a0, cpu_A0);
4413 gen_op_ld_v(ot + s->mem_index, t0, a0);
4414 rm = 0; /* avoid warning */
4415 }
4416 label1 = gen_new_label();
4417 tcg_gen_ld_tl(t2, cpu_env, offsetof(CPUState, regs[R_EAX]));
4418 tcg_gen_sub_tl(t2, t2, t0);
4419 gen_extu(ot, t2);
4420 tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label1);
4421 if (mod == 3) {
4422 label2 = gen_new_label();
4423 gen_op_mov_reg_v(ot, R_EAX, t0);
4424 tcg_gen_br(label2);
4425 gen_set_label(label1);
4426 gen_op_mov_reg_v(ot, rm, t1);
4427 gen_set_label(label2);
4428 } else {
4429 tcg_gen_mov_tl(t1, t0);
4430 gen_op_mov_reg_v(ot, R_EAX, t0);
4431 gen_set_label(label1);
4432 /* always store */
4433 gen_op_st_v(ot + s->mem_index, t1, a0);
4434 }
4435 tcg_gen_mov_tl(cpu_cc_src, t0);
4436 tcg_gen_mov_tl(cpu_cc_dst, t2);
4437 s->cc_op = CC_OP_SUBB + ot;
4438 tcg_temp_free(t0);
4439 tcg_temp_free(t1);
4440 tcg_temp_free(t2);
4441 tcg_temp_free(a0);
4442 }
4443 break;
4444 case 0x1c7: /* cmpxchg8b */
4445 modrm = ldub_code(s->pc++);
4446 mod = (modrm >> 6) & 3;
4447 if ((mod == 3) || ((modrm & 0x38) != 0x8))
4448 goto illegal_op;
4449#ifdef TARGET_X86_64
4450 if (dflag == 2) {
4451 if (!(s->cpuid_ext_features & CPUID_EXT_CX16))
4452 goto illegal_op;
4453 gen_jmp_im(pc_start - s->cs_base);
4454 if (s->cc_op != CC_OP_DYNAMIC)
4455 gen_op_set_cc_op(s->cc_op);
4456 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4457 tcg_gen_helper_0_1(helper_cmpxchg16b, cpu_A0);
4458 } else
4459#endif
4460 {
4461 if (!(s->cpuid_features & CPUID_CX8))
4462 goto illegal_op;
4463 gen_jmp_im(pc_start - s->cs_base);
4464 if (s->cc_op != CC_OP_DYNAMIC)
4465 gen_op_set_cc_op(s->cc_op);
4466 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4467 tcg_gen_helper_0_1(helper_cmpxchg8b, cpu_A0);
4468 }
4469 s->cc_op = CC_OP_EFLAGS;
4470 break;
4471
4472 /**************************/
4473 /* push/pop */
4474 case 0x50 ... 0x57: /* push */
4475 gen_op_mov_TN_reg(OT_LONG, 0, (b & 7) | REX_B(s));
4476 gen_push_T0(s);
4477 break;
4478 case 0x58 ... 0x5f: /* pop */
4479 if (CODE64(s)) {
4480 ot = dflag ? OT_QUAD : OT_WORD;
4481 } else {
4482 ot = dflag + OT_WORD;
4483 }
4484 gen_pop_T0(s);
4485 /* NOTE: order is important for pop %sp */
4486 gen_pop_update(s);
4487 gen_op_mov_reg_T0(ot, (b & 7) | REX_B(s));
4488 break;
4489 case 0x60: /* pusha */
4490 if (CODE64(s))
4491 goto illegal_op;
4492 gen_pusha(s);
4493 break;
4494 case 0x61: /* popa */
4495 if (CODE64(s))
4496 goto illegal_op;
4497 gen_popa(s);
4498 break;
4499 case 0x68: /* push Iv */
4500 case 0x6a:
4501 if (CODE64(s)) {
4502 ot = dflag ? OT_QUAD : OT_WORD;
4503 } else {
4504 ot = dflag + OT_WORD;
4505 }
4506 if (b == 0x68)
4507 val = insn_get(s, ot);
4508 else
4509 val = (int8_t)insn_get(s, OT_BYTE);
4510 gen_op_movl_T0_im(val);
4511 gen_push_T0(s);
4512 break;
4513 case 0x8f: /* pop Ev */
4514 if (CODE64(s)) {
4515 ot = dflag ? OT_QUAD : OT_WORD;
4516 } else {
4517 ot = dflag + OT_WORD;
4518 }
4519 modrm = ldub_code(s->pc++);
4520 mod = (modrm >> 6) & 3;
4521 gen_pop_T0(s);
4522 if (mod == 3) {
4523 /* NOTE: order is important for pop %sp */
4524 gen_pop_update(s);
4525 rm = (modrm & 7) | REX_B(s);
4526 gen_op_mov_reg_T0(ot, rm);
4527 } else {
4528 /* NOTE: order is important too for MMU exceptions */
4529 s->popl_esp_hack = 1 << ot;
4530 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1);
4531 s->popl_esp_hack = 0;
4532 gen_pop_update(s);
4533 }
4534 break;
4535 case 0xc8: /* enter */
4536 {
4537 int level;
4538 val = lduw_code(s->pc);
4539 s->pc += 2;
4540 level = ldub_code(s->pc++);
4541 gen_enter(s, val, level);
4542 }
4543 break;
4544 case 0xc9: /* leave */
4545 /* XXX: exception not precise (ESP is updated before potential exception) */
4546 if (CODE64(s)) {
4547 gen_op_mov_TN_reg(OT_QUAD, 0, R_EBP);
4548 gen_op_mov_reg_T0(OT_QUAD, R_ESP);
4549 } else if (s->ss32) {
4550 gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
4551 gen_op_mov_reg_T0(OT_LONG, R_ESP);
4552 } else {
4553 gen_op_mov_TN_reg(OT_WORD, 0, R_EBP);
4554 gen_op_mov_reg_T0(OT_WORD, R_ESP);
4555 }
4556 gen_pop_T0(s);
4557 if (CODE64(s)) {
4558 ot = dflag ? OT_QUAD : OT_WORD;
4559 } else {
4560 ot = dflag + OT_WORD;
4561 }
4562 gen_op_mov_reg_T0(ot, R_EBP);
4563 gen_pop_update(s);
4564 break;
4565 case 0x06: /* push es */
4566 case 0x0e: /* push cs */
4567 case 0x16: /* push ss */
4568 case 0x1e: /* push ds */
4569 if (CODE64(s))
4570 goto illegal_op;
4571 gen_op_movl_T0_seg(b >> 3);
4572 gen_push_T0(s);
4573 break;
4574 case 0x1a0: /* push fs */
4575 case 0x1a8: /* push gs */
4576 gen_op_movl_T0_seg((b >> 3) & 7);
4577 gen_push_T0(s);
4578 break;
4579 case 0x07: /* pop es */
4580 case 0x17: /* pop ss */
4581 case 0x1f: /* pop ds */
4582 if (CODE64(s))
4583 goto illegal_op;
4584 reg = b >> 3;
4585 gen_pop_T0(s);
4586 gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
4587 gen_pop_update(s);
4588 if (reg == R_SS) {
4589 /* if reg == SS, inhibit interrupts/trace. */
4590 /* If several instructions disable interrupts, only the
4591 _first_ does it */
4592 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
4593 tcg_gen_helper_0_0(helper_set_inhibit_irq);
4594 s->tf = 0;
4595 }
4596 if (s->is_jmp) {
4597 gen_jmp_im(s->pc - s->cs_base);
4598 gen_eob(s);
4599 }
4600 break;
4601 case 0x1a1: /* pop fs */
4602 case 0x1a9: /* pop gs */
4603 gen_pop_T0(s);
4604 gen_movl_seg_T0(s, (b >> 3) & 7, pc_start - s->cs_base);
4605 gen_pop_update(s);
4606 if (s->is_jmp) {
4607 gen_jmp_im(s->pc - s->cs_base);
4608 gen_eob(s);
4609 }
4610 break;
4611
4612 /**************************/
4613 /* mov */
4614 case 0x88:
4615 case 0x89: /* mov Gv, Ev */
4616 if ((b & 1) == 0)
4617 ot = OT_BYTE;
4618 else
4619 ot = dflag + OT_WORD;
4620 modrm = ldub_code(s->pc++);
4621 reg = ((modrm >> 3) & 7) | rex_r;
4622
4623 /* generate a generic store */
4624 gen_ldst_modrm(s, modrm, ot, reg, 1);
4625 break;
4626 case 0xc6:
4627 case 0xc7: /* mov Ev, Iv */
4628 if ((b & 1) == 0)
4629 ot = OT_BYTE;
4630 else
4631 ot = dflag + OT_WORD;
4632 modrm = ldub_code(s->pc++);
4633 mod = (modrm >> 6) & 3;
4634 if (mod != 3) {
4635 s->rip_offset = insn_const_size(ot);
4636 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4637 }
4638 val = insn_get(s, ot);
4639 gen_op_movl_T0_im(val);
4640 if (mod != 3)
4641 gen_op_st_T0_A0(ot + s->mem_index);
4642 else
4643 gen_op_mov_reg_T0(ot, (modrm & 7) | REX_B(s));
4644 break;
4645 case 0x8a:
4646 case 0x8b: /* mov Ev, Gv */
4647 if ((b & 1) == 0)
4648 ot = OT_BYTE;
4649 else
4650 ot = OT_WORD + dflag;
4651 modrm = ldub_code(s->pc++);
4652 reg = ((modrm >> 3) & 7) | rex_r;
4653
4654 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
4655 gen_op_mov_reg_T0(ot, reg);
4656 break;
4657 case 0x8e: /* mov seg, Gv */
4658 modrm = ldub_code(s->pc++);
4659 reg = (modrm >> 3) & 7;
4660 if (reg >= 6 || reg == R_CS)
4661 goto illegal_op;
4662 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
4663 gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
4664 if (reg == R_SS) {
4665 /* if reg == SS, inhibit interrupts/trace */
4666 /* If several instructions disable interrupts, only the
4667 _first_ does it */
4668 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
4669 tcg_gen_helper_0_0(helper_set_inhibit_irq);
4670 s->tf = 0;
4671 }
4672 if (s->is_jmp) {
4673 gen_jmp_im(s->pc - s->cs_base);
4674 gen_eob(s);
4675 }
4676 break;
4677 case 0x8c: /* mov Gv, seg */
4678 modrm = ldub_code(s->pc++);
4679 reg = (modrm >> 3) & 7;
4680 mod = (modrm >> 6) & 3;
4681 if (reg >= 6)
4682 goto illegal_op;
4683 gen_op_movl_T0_seg(reg);
4684 if (mod == 3)
4685 ot = OT_WORD + dflag;
4686 else
4687 ot = OT_WORD;
4688 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1);
4689 break;
4690
4691 case 0x1b6: /* movzbS Gv, Eb */
4692 case 0x1b7: /* movzwS Gv, Eb */
4693 case 0x1be: /* movsbS Gv, Eb */
4694 case 0x1bf: /* movswS Gv, Eb */
4695 {
4696 int d_ot;
4697 /* d_ot is the size of destination */
4698 d_ot = dflag + OT_WORD;
4699 /* ot is the size of source */
4700 ot = (b & 1) + OT_BYTE;
4701 modrm = ldub_code(s->pc++);
4702 reg = ((modrm >> 3) & 7) | rex_r;
4703 mod = (modrm >> 6) & 3;
4704 rm = (modrm & 7) | REX_B(s);
4705
4706 if (mod == 3) {
4707 gen_op_mov_TN_reg(ot, 0, rm);
4708 switch(ot | (b & 8)) {
4709 case OT_BYTE:
4710 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
4711 break;
4712 case OT_BYTE | 8:
4713 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
4714 break;
4715 case OT_WORD:
4716 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
4717 break;
4718 default:
4719 case OT_WORD | 8:
4720 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4721 break;
4722 }
4723 gen_op_mov_reg_T0(d_ot, reg);
4724 } else {
4725 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4726 if (b & 8) {
4727 gen_op_lds_T0_A0(ot + s->mem_index);
4728 } else {
4729 gen_op_ldu_T0_A0(ot + s->mem_index);
4730 }
4731 gen_op_mov_reg_T0(d_ot, reg);
4732 }
4733 }
4734 break;
4735
4736 case 0x8d: /* lea */
4737 ot = dflag + OT_WORD;
4738 modrm = ldub_code(s->pc++);
4739 mod = (modrm >> 6) & 3;
4740 if (mod == 3)
4741 goto illegal_op;
4742 reg = ((modrm >> 3) & 7) | rex_r;
4743 /* we must ensure that no segment is added */
4744 s->override = -1;
4745 val = s->addseg;
4746 s->addseg = 0;
4747 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4748 s->addseg = val;
4749 gen_op_mov_reg_A0(ot - OT_WORD, reg);
4750 break;
4751
4752 case 0xa0: /* mov EAX, Ov */
4753 case 0xa1:
4754 case 0xa2: /* mov Ov, EAX */
4755 case 0xa3:
4756 {
4757 target_ulong offset_addr;
4758
4759 if ((b & 1) == 0)
4760 ot = OT_BYTE;
4761 else
4762 ot = dflag + OT_WORD;
4763#ifdef TARGET_X86_64
4764 if (s->aflag == 2) {
4765 offset_addr = ldq_code(s->pc);
4766 s->pc += 8;
4767 gen_op_movq_A0_im(offset_addr);
4768 } else
4769#endif
4770 {
4771 if (s->aflag) {
4772 offset_addr = insn_get(s, OT_LONG);
4773 } else {
4774 offset_addr = insn_get(s, OT_WORD);
4775 }
4776 gen_op_movl_A0_im(offset_addr);
4777 }
4778 gen_add_A0_ds_seg(s);
4779 if ((b & 2) == 0) {
4780 gen_op_ld_T0_A0(ot + s->mem_index);
4781 gen_op_mov_reg_T0(ot, R_EAX);
4782 } else {
4783 gen_op_mov_TN_reg(ot, 0, R_EAX);
4784 gen_op_st_T0_A0(ot + s->mem_index);
4785 }
4786 }
4787 break;
4788 case 0xd7: /* xlat */
4789#ifdef TARGET_X86_64
4790 if (s->aflag == 2) {
4791 gen_op_movq_A0_reg(R_EBX);
4792 gen_op_mov_TN_reg(OT_QUAD, 0, R_EAX);
4793 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xff);
4794 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]);
4795 } else
4796#endif
4797 {
4798 gen_op_movl_A0_reg(R_EBX);
4799 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
4800 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xff);
4801 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]);
4802 if (s->aflag == 0)
4803 gen_op_andl_A0_ffff();
4804 else
4805 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
4806 }
4807 gen_add_A0_ds_seg(s);
4808 gen_op_ldu_T0_A0(OT_BYTE + s->mem_index);
4809 gen_op_mov_reg_T0(OT_BYTE, R_EAX);
4810 break;
4811 case 0xb0 ... 0xb7: /* mov R, Ib */
4812 val = insn_get(s, OT_BYTE);
4813 gen_op_movl_T0_im(val);
4814 gen_op_mov_reg_T0(OT_BYTE, (b & 7) | REX_B(s));
4815 break;
4816 case 0xb8 ... 0xbf: /* mov R, Iv */
4817#ifdef TARGET_X86_64
4818 if (dflag == 2) {
4819 uint64_t tmp;
4820 /* 64 bit case */
4821 tmp = ldq_code(s->pc);
4822 s->pc += 8;
4823 reg = (b & 7) | REX_B(s);
4824 gen_movtl_T0_im(tmp);
4825 gen_op_mov_reg_T0(OT_QUAD, reg);
4826 } else
4827#endif
4828 {
4829 ot = dflag ? OT_LONG : OT_WORD;
4830 val = insn_get(s, ot);
4831 reg = (b & 7) | REX_B(s);
4832 gen_op_movl_T0_im(val);
4833 gen_op_mov_reg_T0(ot, reg);
4834 }
4835 break;
4836
4837 case 0x91 ... 0x97: /* xchg R, EAX */
4838 ot = dflag + OT_WORD;
4839 reg = (b & 7) | REX_B(s);
4840 rm = R_EAX;
4841 goto do_xchg_reg;
4842 case 0x86:
4843 case 0x87: /* xchg Ev, Gv */
4844 if ((b & 1) == 0)
4845 ot = OT_BYTE;
4846 else
4847 ot = dflag + OT_WORD;
4848 modrm = ldub_code(s->pc++);
4849 reg = ((modrm >> 3) & 7) | rex_r;
4850 mod = (modrm >> 6) & 3;
4851 if (mod == 3) {
4852 rm = (modrm & 7) | REX_B(s);
4853 do_xchg_reg:
4854 gen_op_mov_TN_reg(ot, 0, reg);
4855 gen_op_mov_TN_reg(ot, 1, rm);
4856 gen_op_mov_reg_T0(ot, rm);
4857 gen_op_mov_reg_T1(ot, reg);
4858 } else {
4859 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4860 gen_op_mov_TN_reg(ot, 0, reg);
4861 /* for xchg, lock is implicit */
4862 if (!(prefixes & PREFIX_LOCK))
4863 tcg_gen_helper_0_0(helper_lock);
4864 gen_op_ld_T1_A0(ot + s->mem_index);
4865 gen_op_st_T0_A0(ot + s->mem_index);
4866 if (!(prefixes & PREFIX_LOCK))
4867 tcg_gen_helper_0_0(helper_unlock);
4868 gen_op_mov_reg_T1(ot, reg);
4869 }
4870 break;
4871 case 0xc4: /* les Gv */
4872 if (CODE64(s))
4873 goto illegal_op;
4874 op = R_ES;
4875 goto do_lxx;
4876 case 0xc5: /* lds Gv */
4877 if (CODE64(s))
4878 goto illegal_op;
4879 op = R_DS;
4880 goto do_lxx;
4881 case 0x1b2: /* lss Gv */
4882 op = R_SS;
4883 goto do_lxx;
4884 case 0x1b4: /* lfs Gv */
4885 op = R_FS;
4886 goto do_lxx;
4887 case 0x1b5: /* lgs Gv */
4888 op = R_GS;
4889 do_lxx:
4890 ot = dflag ? OT_LONG : OT_WORD;
4891 modrm = ldub_code(s->pc++);
4892 reg = ((modrm >> 3) & 7) | rex_r;
4893 mod = (modrm >> 6) & 3;
4894 if (mod == 3)
4895 goto illegal_op;
4896 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4897 gen_op_ld_T1_A0(ot + s->mem_index);
4898 gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
4899 /* load the segment first to handle exceptions properly */
4900 gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
4901 gen_movl_seg_T0(s, op, pc_start - s->cs_base);
4902 /* then put the data */
4903 gen_op_mov_reg_T1(ot, reg);
4904 if (s->is_jmp) {
4905 gen_jmp_im(s->pc - s->cs_base);
4906 gen_eob(s);
4907 }
4908 break;
4909
4910 /************************/
4911 /* shifts */
4912 case 0xc0:
4913 case 0xc1:
4914 /* shift Ev,Ib */
4915 shift = 2;
4916 grp2:
4917 {
4918 if ((b & 1) == 0)
4919 ot = OT_BYTE;
4920 else
4921 ot = dflag + OT_WORD;
4922
4923 modrm = ldub_code(s->pc++);
4924 mod = (modrm >> 6) & 3;
4925 op = (modrm >> 3) & 7;
4926
4927 if (mod != 3) {
4928 if (shift == 2) {
4929 s->rip_offset = 1;
4930 }
4931 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4932 opreg = OR_TMP0;
4933 } else {
4934 opreg = (modrm & 7) | REX_B(s);
4935 }
4936
4937 /* simpler op */
4938 if (shift == 0) {
4939 gen_shift(s, op, ot, opreg, OR_ECX);
4940 } else {
4941 if (shift == 2) {
4942 shift = ldub_code(s->pc++);
4943 }
4944 gen_shifti(s, op, ot, opreg, shift);
4945 }
4946 }
4947 break;
4948 case 0xd0:
4949 case 0xd1:
4950 /* shift Ev,1 */
4951 shift = 1;
4952 goto grp2;
4953 case 0xd2:
4954 case 0xd3:
4955 /* shift Ev,cl */
4956 shift = 0;
4957 goto grp2;
4958
4959 case 0x1a4: /* shld imm */
4960 op = 0;
4961 shift = 1;
4962 goto do_shiftd;
4963 case 0x1a5: /* shld cl */
4964 op = 0;
4965 shift = 0;
4966 goto do_shiftd;
4967 case 0x1ac: /* shrd imm */
4968 op = 1;
4969 shift = 1;
4970 goto do_shiftd;
4971 case 0x1ad: /* shrd cl */
4972 op = 1;
4973 shift = 0;
4974 do_shiftd:
4975 ot = dflag + OT_WORD;
4976 modrm = ldub_code(s->pc++);
4977 mod = (modrm >> 6) & 3;
4978 rm = (modrm & 7) | REX_B(s);
4979 reg = ((modrm >> 3) & 7) | rex_r;
4980 if (mod != 3) {
4981 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4982 opreg = OR_TMP0;
4983 } else {
4984 opreg = rm;
4985 }
4986 gen_op_mov_TN_reg(ot, 1, reg);
4987
4988 if (shift) {
4989 val = ldub_code(s->pc++);
4990 tcg_gen_movi_tl(cpu_T3, val);
4991 } else {
4992 tcg_gen_ld_tl(cpu_T3, cpu_env, offsetof(CPUState, regs[R_ECX]));
4993 }
4994 gen_shiftd_rm_T1_T3(s, ot, opreg, op);
4995 break;
4996
4997 /************************/
4998 /* floats */
4999 case 0xd8 ... 0xdf:
5000 if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
5001 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
5002 /* XXX: what to do if illegal op ? */
5003 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
5004 break;
5005 }
5006 modrm = ldub_code(s->pc++);
5007 mod = (modrm >> 6) & 3;
5008 rm = modrm & 7;
5009 op = ((b & 7) << 3) | ((modrm >> 3) & 7);
5010 if (mod != 3) {
5011 /* memory op */
5012 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5013 switch(op) {
5014 case 0x00 ... 0x07: /* fxxxs */
5015 case 0x10 ... 0x17: /* fixxxl */
5016 case 0x20 ... 0x27: /* fxxxl */
5017 case 0x30 ... 0x37: /* fixxx */
5018 {
5019 int op1;
5020 op1 = op & 7;
5021
5022 switch(op >> 4) {
5023 case 0:
5024 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5025 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5026 tcg_gen_helper_0_1(helper_flds_FT0, cpu_tmp2_i32);
5027 break;
5028 case 1:
5029 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5030 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5031 tcg_gen_helper_0_1(helper_fildl_FT0, cpu_tmp2_i32);
5032 break;
5033 case 2:
5034 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
5035 (s->mem_index >> 2) - 1);
5036 tcg_gen_helper_0_1(helper_fldl_FT0, cpu_tmp1_i64);
5037 break;
5038 case 3:
5039 default:
5040 gen_op_lds_T0_A0(OT_WORD + s->mem_index);
5041 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5042 tcg_gen_helper_0_1(helper_fildl_FT0, cpu_tmp2_i32);
5043 break;
5044 }
5045
5046 tcg_gen_helper_0_0(helper_fp_arith_ST0_FT0[op1]);
5047 if (op1 == 3) {
5048 /* fcomp needs pop */
5049 tcg_gen_helper_0_0(helper_fpop);
5050 }
5051 }
5052 break;
5053 case 0x08: /* flds */
5054 case 0x0a: /* fsts */
5055 case 0x0b: /* fstps */
5056 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
5057 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
5058 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
5059 switch(op & 7) {
5060 case 0:
5061 switch(op >> 4) {
5062 case 0:
5063 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5064 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5065 tcg_gen_helper_0_1(helper_flds_ST0, cpu_tmp2_i32);
5066 break;
5067 case 1:
5068 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5069 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5070 tcg_gen_helper_0_1(helper_fildl_ST0, cpu_tmp2_i32);
5071 break;
5072 case 2:
5073 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
5074 (s->mem_index >> 2) - 1);
5075 tcg_gen_helper_0_1(helper_fldl_ST0, cpu_tmp1_i64);
5076 break;
5077 case 3:
5078 default:
5079 gen_op_lds_T0_A0(OT_WORD + s->mem_index);
5080 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5081 tcg_gen_helper_0_1(helper_fildl_ST0, cpu_tmp2_i32);
5082 break;
5083 }
5084 break;
5085 case 1:
5086 /* XXX: the corresponding CPUID bit must be tested ! */
5087 switch(op >> 4) {
5088 case 1:
5089 tcg_gen_helper_1_0(helper_fisttl_ST0, cpu_tmp2_i32);
5090 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5091 gen_op_st_T0_A0(OT_LONG + s->mem_index);
5092 break;
5093 case 2:
5094 tcg_gen_helper_1_0(helper_fisttll_ST0, cpu_tmp1_i64);
5095 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
5096 (s->mem_index >> 2) - 1);
5097 break;
5098 case 3:
5099 default:
5100 tcg_gen_helper_1_0(helper_fistt_ST0, cpu_tmp2_i32);
5101 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5102 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5103 break;
5104 }
5105 tcg_gen_helper_0_0(helper_fpop);
5106 break;
5107 default:
5108 switch(op >> 4) {
5109 case 0:
5110 tcg_gen_helper_1_0(helper_fsts_ST0, cpu_tmp2_i32);
5111 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5112 gen_op_st_T0_A0(OT_LONG + s->mem_index);
5113 break;
5114 case 1:
5115 tcg_gen_helper_1_0(helper_fistl_ST0, cpu_tmp2_i32);
5116 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5117 gen_op_st_T0_A0(OT_LONG + s->mem_index);
5118 break;
5119 case 2:
5120 tcg_gen_helper_1_0(helper_fstl_ST0, cpu_tmp1_i64);
5121 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
5122 (s->mem_index >> 2) - 1);
5123 break;
5124 case 3:
5125 default:
5126 tcg_gen_helper_1_0(helper_fist_ST0, cpu_tmp2_i32);
5127 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5128 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5129 break;
5130 }
5131 if ((op & 7) == 3)
5132 tcg_gen_helper_0_0(helper_fpop);
5133 break;
5134 }
5135 break;
5136 case 0x0c: /* fldenv mem */
5137 if (s->cc_op != CC_OP_DYNAMIC)
5138 gen_op_set_cc_op(s->cc_op);
5139 gen_jmp_im(pc_start - s->cs_base);
5140 tcg_gen_helper_0_2(helper_fldenv,
5141 cpu_A0, tcg_const_i32(s->dflag));
5142 break;
5143 case 0x0d: /* fldcw mem */
5144 gen_op_ld_T0_A0(OT_WORD + s->mem_index);
5145 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5146 tcg_gen_helper_0_1(helper_fldcw, cpu_tmp2_i32);
5147 break;
5148 case 0x0e: /* fnstenv mem */
5149 if (s->cc_op != CC_OP_DYNAMIC)
5150 gen_op_set_cc_op(s->cc_op);
5151 gen_jmp_im(pc_start - s->cs_base);
5152 tcg_gen_helper_0_2(helper_fstenv,
5153 cpu_A0, tcg_const_i32(s->dflag));
5154 break;
5155 case 0x0f: /* fnstcw mem */
5156 tcg_gen_helper_1_0(helper_fnstcw, cpu_tmp2_i32);
5157 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5158 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5159 break;
5160 case 0x1d: /* fldt mem */
5161 if (s->cc_op != CC_OP_DYNAMIC)
5162 gen_op_set_cc_op(s->cc_op);
5163 gen_jmp_im(pc_start - s->cs_base);
5164 tcg_gen_helper_0_1(helper_fldt_ST0, cpu_A0);
5165 break;
5166 case 0x1f: /* fstpt mem */
5167 if (s->cc_op != CC_OP_DYNAMIC)
5168 gen_op_set_cc_op(s->cc_op);
5169 gen_jmp_im(pc_start - s->cs_base);
5170 tcg_gen_helper_0_1(helper_fstt_ST0, cpu_A0);
5171 tcg_gen_helper_0_0(helper_fpop);
5172 break;
5173 case 0x2c: /* frstor mem */
5174 if (s->cc_op != CC_OP_DYNAMIC)
5175 gen_op_set_cc_op(s->cc_op);
5176 gen_jmp_im(pc_start - s->cs_base);
5177 tcg_gen_helper_0_2(helper_frstor,
5178 cpu_A0, tcg_const_i32(s->dflag));
5179 break;
5180 case 0x2e: /* fnsave mem */
5181 if (s->cc_op != CC_OP_DYNAMIC)
5182 gen_op_set_cc_op(s->cc_op);
5183 gen_jmp_im(pc_start - s->cs_base);
5184 tcg_gen_helper_0_2(helper_fsave,
5185 cpu_A0, tcg_const_i32(s->dflag));
5186 break;
5187 case 0x2f: /* fnstsw mem */
5188 tcg_gen_helper_1_0(helper_fnstsw, cpu_tmp2_i32);
5189 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5190 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5191 break;
5192 case 0x3c: /* fbld */
5193 if (s->cc_op != CC_OP_DYNAMIC)
5194 gen_op_set_cc_op(s->cc_op);
5195 gen_jmp_im(pc_start - s->cs_base);
5196 tcg_gen_helper_0_1(helper_fbld_ST0, cpu_A0);
5197 break;
5198 case 0x3e: /* fbstp */
5199 if (s->cc_op != CC_OP_DYNAMIC)
5200 gen_op_set_cc_op(s->cc_op);
5201 gen_jmp_im(pc_start - s->cs_base);
5202 tcg_gen_helper_0_1(helper_fbst_ST0, cpu_A0);
5203 tcg_gen_helper_0_0(helper_fpop);
5204 break;
5205 case 0x3d: /* fildll */
5206 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
5207 (s->mem_index >> 2) - 1);
5208 tcg_gen_helper_0_1(helper_fildll_ST0, cpu_tmp1_i64);
5209 break;
5210 case 0x3f: /* fistpll */
5211 tcg_gen_helper_1_0(helper_fistll_ST0, cpu_tmp1_i64);
5212 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
5213 (s->mem_index >> 2) - 1);
5214 tcg_gen_helper_0_0(helper_fpop);
5215 break;
5216 default:
5217 goto illegal_op;
5218 }
5219 } else {
5220 /* register float ops */
5221 opreg = rm;
5222
5223 switch(op) {
5224 case 0x08: /* fld sti */
5225 tcg_gen_helper_0_0(helper_fpush);
5226 tcg_gen_helper_0_1(helper_fmov_ST0_STN, tcg_const_i32((opreg + 1) & 7));
5227 break;
5228 case 0x09: /* fxchg sti */
5229 case 0x29: /* fxchg4 sti, undocumented op */
5230 case 0x39: /* fxchg7 sti, undocumented op */
5231 tcg_gen_helper_0_1(helper_fxchg_ST0_STN, tcg_const_i32(opreg));
5232 break;
5233 case 0x0a: /* grp d9/2 */
5234 switch(rm) {
5235 case 0: /* fnop */
5236 /* check exceptions (FreeBSD FPU probe) */
5237 if (s->cc_op != CC_OP_DYNAMIC)
5238 gen_op_set_cc_op(s->cc_op);
5239 gen_jmp_im(pc_start - s->cs_base);
5240 tcg_gen_helper_0_0(helper_fwait);
5241 break;
5242 default:
5243 goto illegal_op;
5244 }
5245 break;
5246 case 0x0c: /* grp d9/4 */
5247 switch(rm) {
5248 case 0: /* fchs */
5249 tcg_gen_helper_0_0(helper_fchs_ST0);
5250 break;
5251 case 1: /* fabs */
5252 tcg_gen_helper_0_0(helper_fabs_ST0);
5253 break;
5254 case 4: /* ftst */
5255 tcg_gen_helper_0_0(helper_fldz_FT0);
5256 tcg_gen_helper_0_0(helper_fcom_ST0_FT0);
5257 break;
5258 case 5: /* fxam */
5259 tcg_gen_helper_0_0(helper_fxam_ST0);
5260 break;
5261 default:
5262 goto illegal_op;
5263 }
5264 break;
5265 case 0x0d: /* grp d9/5 */
5266 {
5267 switch(rm) {
5268 case 0:
5269 tcg_gen_helper_0_0(helper_fpush);
5270 tcg_gen_helper_0_0(helper_fld1_ST0);
5271 break;
5272 case 1:
5273 tcg_gen_helper_0_0(helper_fpush);
5274 tcg_gen_helper_0_0(helper_fldl2t_ST0);
5275 break;
5276 case 2:
5277 tcg_gen_helper_0_0(helper_fpush);
5278 tcg_gen_helper_0_0(helper_fldl2e_ST0);
5279 break;
5280 case 3:
5281 tcg_gen_helper_0_0(helper_fpush);
5282 tcg_gen_helper_0_0(helper_fldpi_ST0);
5283 break;
5284 case 4:
5285 tcg_gen_helper_0_0(helper_fpush);
5286 tcg_gen_helper_0_0(helper_fldlg2_ST0);
5287 break;
5288 case 5:
5289 tcg_gen_helper_0_0(helper_fpush);
5290 tcg_gen_helper_0_0(helper_fldln2_ST0);
5291 break;
5292 case 6:
5293 tcg_gen_helper_0_0(helper_fpush);
5294 tcg_gen_helper_0_0(helper_fldz_ST0);
5295 break;
5296 default:
5297 goto illegal_op;
5298 }
5299 }
5300 break;
5301 case 0x0e: /* grp d9/6 */
5302 switch(rm) {
5303 case 0: /* f2xm1 */
5304 tcg_gen_helper_0_0(helper_f2xm1);
5305 break;
5306 case 1: /* fyl2x */
5307 tcg_gen_helper_0_0(helper_fyl2x);
5308 break;
5309 case 2: /* fptan */
5310 tcg_gen_helper_0_0(helper_fptan);
5311 break;
5312 case 3: /* fpatan */
5313 tcg_gen_helper_0_0(helper_fpatan);
5314 break;
5315 case 4: /* fxtract */
5316 tcg_gen_helper_0_0(helper_fxtract);
5317 break;
5318 case 5: /* fprem1 */
5319 tcg_gen_helper_0_0(helper_fprem1);
5320 break;
5321 case 6: /* fdecstp */
5322 tcg_gen_helper_0_0(helper_fdecstp);
5323 break;
5324 default:
5325 case 7: /* fincstp */
5326 tcg_gen_helper_0_0(helper_fincstp);
5327 break;
5328 }
5329 break;
5330 case 0x0f: /* grp d9/7 */
5331 switch(rm) {
5332 case 0: /* fprem */
5333 tcg_gen_helper_0_0(helper_fprem);
5334 break;
5335 case 1: /* fyl2xp1 */
5336 tcg_gen_helper_0_0(helper_fyl2xp1);
5337 break;
5338 case 2: /* fsqrt */
5339 tcg_gen_helper_0_0(helper_fsqrt);
5340 break;
5341 case 3: /* fsincos */
5342 tcg_gen_helper_0_0(helper_fsincos);
5343 break;
5344 case 5: /* fscale */
5345 tcg_gen_helper_0_0(helper_fscale);
5346 break;
5347 case 4: /* frndint */
5348 tcg_gen_helper_0_0(helper_frndint);
5349 break;
5350 case 6: /* fsin */
5351 tcg_gen_helper_0_0(helper_fsin);
5352 break;
5353 default:
5354 case 7: /* fcos */
5355 tcg_gen_helper_0_0(helper_fcos);
5356 break;
5357 }
5358 break;
5359 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
5360 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
5361 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
5362 {
5363 int op1;
5364
5365 op1 = op & 7;
5366 if (op >= 0x20) {
5367 tcg_gen_helper_0_1(helper_fp_arith_STN_ST0[op1], tcg_const_i32(opreg));
5368 if (op >= 0x30)
5369 tcg_gen_helper_0_0(helper_fpop);
5370 } else {
5371 tcg_gen_helper_0_1(helper_fmov_FT0_STN, tcg_const_i32(opreg));
5372 tcg_gen_helper_0_0(helper_fp_arith_ST0_FT0[op1]);
5373 }
5374 }
5375 break;
5376 case 0x02: /* fcom */
5377 case 0x22: /* fcom2, undocumented op */
5378 tcg_gen_helper_0_1(helper_fmov_FT0_STN, tcg_const_i32(opreg));
5379 tcg_gen_helper_0_0(helper_fcom_ST0_FT0);
5380 break;
5381 case 0x03: /* fcomp */
5382 case 0x23: /* fcomp3, undocumented op */
5383 case 0x32: /* fcomp5, undocumented op */
5384 tcg_gen_helper_0_1(helper_fmov_FT0_STN, tcg_const_i32(opreg));
5385 tcg_gen_helper_0_0(helper_fcom_ST0_FT0);
5386 tcg_gen_helper_0_0(helper_fpop);
5387 break;
5388 case 0x15: /* da/5 */
5389 switch(rm) {
5390 case 1: /* fucompp */
5391 tcg_gen_helper_0_1(helper_fmov_FT0_STN, tcg_const_i32(1));
5392 tcg_gen_helper_0_0(helper_fucom_ST0_FT0);
5393 tcg_gen_helper_0_0(helper_fpop);
5394 tcg_gen_helper_0_0(helper_fpop);
5395 break;
5396 default:
5397 goto illegal_op;
5398 }
5399 break;
5400 case 0x1c:
5401 switch(rm) {
5402 case 0: /* feni (287 only, just do nop here) */
5403 break;
5404 case 1: /* fdisi (287 only, just do nop here) */
5405 break;
5406 case 2: /* fclex */
5407 tcg_gen_helper_0_0(helper_fclex);
5408 break;
5409 case 3: /* fninit */
5410 tcg_gen_helper_0_0(helper_fninit);
5411 break;
5412 case 4: /* fsetpm (287 only, just do nop here) */
5413 break;
5414 default:
5415 goto illegal_op;
5416 }
5417 break;
5418 case 0x1d: /* fucomi */
5419 if (s->cc_op != CC_OP_DYNAMIC)
5420 gen_op_set_cc_op(s->cc_op);
5421 tcg_gen_helper_0_1(helper_fmov_FT0_STN, tcg_const_i32(opreg));
5422 tcg_gen_helper_0_0(helper_fucomi_ST0_FT0);
5423 s->cc_op = CC_OP_EFLAGS;
5424 break;
5425 case 0x1e: /* fcomi */
5426 if (s->cc_op != CC_OP_DYNAMIC)
5427 gen_op_set_cc_op(s->cc_op);
5428 tcg_gen_helper_0_1(helper_fmov_FT0_STN, tcg_const_i32(opreg));
5429 tcg_gen_helper_0_0(helper_fcomi_ST0_FT0);
5430 s->cc_op = CC_OP_EFLAGS;
5431 break;
5432 case 0x28: /* ffree sti */
5433 tcg_gen_helper_0_1(helper_ffree_STN, tcg_const_i32(opreg));
5434 break;
5435 case 0x2a: /* fst sti */
5436 tcg_gen_helper_0_1(helper_fmov_STN_ST0, tcg_const_i32(opreg));
5437 break;
5438 case 0x2b: /* fstp sti */
5439 case 0x0b: /* fstp1 sti, undocumented op */
5440 case 0x3a: /* fstp8 sti, undocumented op */
5441 case 0x3b: /* fstp9 sti, undocumented op */
5442 tcg_gen_helper_0_1(helper_fmov_STN_ST0, tcg_const_i32(opreg));
5443 tcg_gen_helper_0_0(helper_fpop);
5444 break;
5445 case 0x2c: /* fucom st(i) */
5446 tcg_gen_helper_0_1(helper_fmov_FT0_STN, tcg_const_i32(opreg));
5447 tcg_gen_helper_0_0(helper_fucom_ST0_FT0);
5448 break;
5449 case 0x2d: /* fucomp st(i) */
5450 tcg_gen_helper_0_1(helper_fmov_FT0_STN, tcg_const_i32(opreg));
5451 tcg_gen_helper_0_0(helper_fucom_ST0_FT0);
5452 tcg_gen_helper_0_0(helper_fpop);
5453 break;
5454 case 0x33: /* de/3 */
5455 switch(rm) {
5456 case 1: /* fcompp */
5457 tcg_gen_helper_0_1(helper_fmov_FT0_STN, tcg_const_i32(1));
5458 tcg_gen_helper_0_0(helper_fcom_ST0_FT0);
5459 tcg_gen_helper_0_0(helper_fpop);
5460 tcg_gen_helper_0_0(helper_fpop);
5461 break;
5462 default:
5463 goto illegal_op;
5464 }
5465 break;
5466 case 0x38: /* ffreep sti, undocumented op */
5467 tcg_gen_helper_0_1(helper_ffree_STN, tcg_const_i32(opreg));
5468 tcg_gen_helper_0_0(helper_fpop);
5469 break;
5470 case 0x3c: /* df/4 */
5471 switch(rm) {
5472 case 0:
5473 tcg_gen_helper_1_0(helper_fnstsw, cpu_tmp2_i32);
5474 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5475 gen_op_mov_reg_T0(OT_WORD, R_EAX);
5476 break;
5477 default:
5478 goto illegal_op;
5479 }
5480 break;
5481 case 0x3d: /* fucomip */
5482 if (s->cc_op != CC_OP_DYNAMIC)
5483 gen_op_set_cc_op(s->cc_op);
5484 tcg_gen_helper_0_1(helper_fmov_FT0_STN, tcg_const_i32(opreg));
5485 tcg_gen_helper_0_0(helper_fucomi_ST0_FT0);
5486 tcg_gen_helper_0_0(helper_fpop);
5487 s->cc_op = CC_OP_EFLAGS;
5488 break;
5489 case 0x3e: /* fcomip */
5490 if (s->cc_op != CC_OP_DYNAMIC)
5491 gen_op_set_cc_op(s->cc_op);
5492 tcg_gen_helper_0_1(helper_fmov_FT0_STN, tcg_const_i32(opreg));
5493 tcg_gen_helper_0_0(helper_fcomi_ST0_FT0);
5494 tcg_gen_helper_0_0(helper_fpop);
5495 s->cc_op = CC_OP_EFLAGS;
5496 break;
5497 case 0x10 ... 0x13: /* fcmovxx */
5498 case 0x18 ... 0x1b:
5499 {
5500 int op1, l1;
5501 const static uint8_t fcmov_cc[8] = {
5502 (JCC_B << 1),
5503 (JCC_Z << 1),
5504 (JCC_BE << 1),
5505 (JCC_P << 1),
5506 };
5507 op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
5508 l1 = gen_new_label();
5509 gen_jcc1(s, s->cc_op, op1, l1);
5510 tcg_gen_helper_0_1(helper_fmov_ST0_STN, tcg_const_i32(opreg));
5511 gen_set_label(l1);
5512 }
5513 break;
5514 default:
5515 goto illegal_op;
5516 }
5517 }
5518 break;
5519 /************************/
5520 /* string ops */
5521
5522 case 0xa4: /* movsS */
5523 case 0xa5:
5524 if ((b & 1) == 0)
5525 ot = OT_BYTE;
5526 else
5527 ot = dflag + OT_WORD;
5528
5529 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
5530 gen_repz_movs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
5531 } else {
5532 gen_movs(s, ot);
5533 }
5534 break;
5535
5536 case 0xaa: /* stosS */
5537 case 0xab:
5538 if ((b & 1) == 0)
5539 ot = OT_BYTE;
5540 else
5541 ot = dflag + OT_WORD;
5542
5543 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
5544 gen_repz_stos(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
5545 } else {
5546 gen_stos(s, ot);
5547 }
5548 break;
5549 case 0xac: /* lodsS */
5550 case 0xad:
5551 if ((b & 1) == 0)
5552 ot = OT_BYTE;
5553 else
5554 ot = dflag + OT_WORD;
5555 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
5556 gen_repz_lods(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
5557 } else {
5558 gen_lods(s, ot);
5559 }
5560 break;
5561 case 0xae: /* scasS */
5562 case 0xaf:
5563 if ((b & 1) == 0)
5564 ot = OT_BYTE;
5565 else
5566 ot = dflag + OT_WORD;
5567 if (prefixes & PREFIX_REPNZ) {
5568 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
5569 } else if (prefixes & PREFIX_REPZ) {
5570 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
5571 } else {
5572 gen_scas(s, ot);
5573 s->cc_op = CC_OP_SUBB + ot;
5574 }
5575 break;
5576
5577 case 0xa6: /* cmpsS */
5578 case 0xa7:
5579 if ((b & 1) == 0)
5580 ot = OT_BYTE;
5581 else
5582 ot = dflag + OT_WORD;
5583 if (prefixes & PREFIX_REPNZ) {
5584 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
5585 } else if (prefixes & PREFIX_REPZ) {
5586 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
5587 } else {
5588 gen_cmps(s, ot);
5589 s->cc_op = CC_OP_SUBB + ot;
5590 }
5591 break;
5592 case 0x6c: /* insS */
5593 case 0x6d:
5594 if ((b & 1) == 0)
5595 ot = OT_BYTE;
5596 else
5597 ot = dflag ? OT_LONG : OT_WORD;
5598 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
5599 gen_op_andl_T0_ffff();
5600 gen_check_io(s, ot, pc_start - s->cs_base,
5601 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4);
5602 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
5603 gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
5604 } else {
5605 gen_ins(s, ot);
5606 }
5607 break;
5608 case 0x6e: /* outsS */
5609 case 0x6f:
5610 if ((b & 1) == 0)
5611 ot = OT_BYTE;
5612 else
5613 ot = dflag ? OT_LONG : OT_WORD;
5614 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
5615 gen_op_andl_T0_ffff();
5616 gen_check_io(s, ot, pc_start - s->cs_base,
5617 svm_is_rep(prefixes) | 4);
5618 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
5619 gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
5620 } else {
5621 gen_outs(s, ot);
5622 }
5623 break;
5624
5625 /************************/
5626 /* port I/O */
5627
5628 case 0xe4:
5629 case 0xe5:
5630 if ((b & 1) == 0)
5631 ot = OT_BYTE;
5632 else
5633 ot = dflag ? OT_LONG : OT_WORD;
5634 val = ldub_code(s->pc++);
5635 gen_op_movl_T0_im(val);
5636 gen_check_io(s, ot, pc_start - s->cs_base,
5637 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
5638 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5639 tcg_gen_helper_1_1(helper_in_func[ot], cpu_T[1], cpu_tmp2_i32);
5640 gen_op_mov_reg_T1(ot, R_EAX);
5641 break;
5642 case 0xe6:
5643 case 0xe7:
5644 if ((b & 1) == 0)
5645 ot = OT_BYTE;
5646 else
5647 ot = dflag ? OT_LONG : OT_WORD;
5648 val = ldub_code(s->pc++);
5649 gen_op_movl_T0_im(val);
5650 gen_check_io(s, ot, pc_start - s->cs_base,
5651 svm_is_rep(prefixes));
5652 gen_op_mov_TN_reg(ot, 1, R_EAX);
5653
5654 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5655 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
5656 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
5657 tcg_gen_helper_0_2(helper_out_func[ot], cpu_tmp2_i32, cpu_tmp3_i32);
5658 break;
5659 case 0xec:
5660 case 0xed:
5661 if ((b & 1) == 0)
5662 ot = OT_BYTE;
5663 else
5664 ot = dflag ? OT_LONG : OT_WORD;
5665 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
5666 gen_op_andl_T0_ffff();
5667 gen_check_io(s, ot, pc_start - s->cs_base,
5668 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
5669 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5670 tcg_gen_helper_1_1(helper_in_func[ot], cpu_T[1], cpu_tmp2_i32);
5671 gen_op_mov_reg_T1(ot, R_EAX);
5672 break;
5673 case 0xee:
5674 case 0xef:
5675 if ((b & 1) == 0)
5676 ot = OT_BYTE;
5677 else
5678 ot = dflag ? OT_LONG : OT_WORD;
5679 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
5680 gen_op_andl_T0_ffff();
5681 gen_check_io(s, ot, pc_start - s->cs_base,
5682 svm_is_rep(prefixes));
5683 gen_op_mov_TN_reg(ot, 1, R_EAX);
5684
5685 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5686 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
5687 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
5688 tcg_gen_helper_0_2(helper_out_func[ot], cpu_tmp2_i32, cpu_tmp3_i32);
5689 break;
5690
5691 /************************/
5692 /* control */
5693 case 0xc2: /* ret im */
5694 val = ldsw_code(s->pc);
5695 s->pc += 2;
5696 gen_pop_T0(s);
5697 if (CODE64(s) && s->dflag)
5698 s->dflag = 2;
5699 gen_stack_update(s, val + (2 << s->dflag));
5700 if (s->dflag == 0)
5701 gen_op_andl_T0_ffff();
5702 gen_op_jmp_T0();
5703 gen_eob(s);
5704 break;
5705 case 0xc3: /* ret */
5706 gen_pop_T0(s);
5707 gen_pop_update(s);
5708 if (s->dflag == 0)
5709 gen_op_andl_T0_ffff();
5710 gen_op_jmp_T0();
5711 gen_eob(s);
5712 break;
5713 case 0xca: /* lret im */
5714 val = ldsw_code(s->pc);
5715 s->pc += 2;
5716 do_lret:
5717 if (s->pe && !s->vm86) {
5718 if (s->cc_op != CC_OP_DYNAMIC)
5719 gen_op_set_cc_op(s->cc_op);
5720 gen_jmp_im(pc_start - s->cs_base);
5721 tcg_gen_helper_0_2(helper_lret_protected,
5722 tcg_const_i32(s->dflag),
5723 tcg_const_i32(val));
5724 } else {
5725 gen_stack_A0(s);
5726 /* pop offset */
5727 gen_op_ld_T0_A0(1 + s->dflag + s->mem_index);
5728 if (s->dflag == 0)
5729 gen_op_andl_T0_ffff();
5730 /* NOTE: keeping EIP updated is not a problem in case of
5731 exception */
5732 gen_op_jmp_T0();
5733 /* pop selector */
5734 gen_op_addl_A0_im(2 << s->dflag);
5735 gen_op_ld_T0_A0(1 + s->dflag + s->mem_index);
5736 gen_op_movl_seg_T0_vm(R_CS);
5737 /* add stack offset */
5738 gen_stack_update(s, val + (4 << s->dflag));
5739 }
5740 gen_eob(s);
5741 break;
5742 case 0xcb: /* lret */
5743 val = 0;
5744 goto do_lret;
5745 case 0xcf: /* iret */
5746 if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET))
5747 break;
5748 if (!s->pe) {
5749 /* real mode */
5750 tcg_gen_helper_0_1(helper_iret_real, tcg_const_i32(s->dflag));
5751 s->cc_op = CC_OP_EFLAGS;
5752 } else if (s->vm86) {
5753 if (s->iopl != 3) {
5754 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
5755 } else {
5756 tcg_gen_helper_0_1(helper_iret_real, tcg_const_i32(s->dflag));
5757 s->cc_op = CC_OP_EFLAGS;
5758 }
5759 } else {
5760 if (s->cc_op != CC_OP_DYNAMIC)
5761 gen_op_set_cc_op(s->cc_op);
5762 gen_jmp_im(pc_start - s->cs_base);
5763 tcg_gen_helper_0_2(helper_iret_protected,
5764 tcg_const_i32(s->dflag),
5765 tcg_const_i32(s->pc - s->cs_base));
5766 s->cc_op = CC_OP_EFLAGS;
5767 }
5768 gen_eob(s);
5769 break;
5770 case 0xe8: /* call im */
5771 {
5772 if (dflag)
5773 tval = (int32_t)insn_get(s, OT_LONG);
5774 else
5775 tval = (int16_t)insn_get(s, OT_WORD);
5776 next_eip = s->pc - s->cs_base;
5777 tval += next_eip;
5778 if (s->dflag == 0)
5779 tval &= 0xffff;
5780 gen_movtl_T0_im(next_eip);
5781 gen_push_T0(s);
5782 gen_jmp(s, tval);
5783 }
5784 break;
5785 case 0x9a: /* lcall im */
5786 {
5787 unsigned int selector, offset;
5788
5789 if (CODE64(s))
5790 goto illegal_op;
5791 ot = dflag ? OT_LONG : OT_WORD;
5792 offset = insn_get(s, ot);
5793 selector = insn_get(s, OT_WORD);
5794
5795 gen_op_movl_T0_im(selector);
5796 gen_op_movl_T1_imu(offset);
5797 }
5798 goto do_lcall;
5799 case 0xe9: /* jmp im */
5800 if (dflag)
5801 tval = (int32_t)insn_get(s, OT_LONG);
5802 else
5803 tval = (int16_t)insn_get(s, OT_WORD);
5804 tval += s->pc - s->cs_base;
5805 if (s->dflag == 0)
5806 tval &= 0xffff;
5807 gen_jmp(s, tval);
5808 break;
5809 case 0xea: /* ljmp im */
5810 {
5811 unsigned int selector, offset;
5812
5813 if (CODE64(s))
5814 goto illegal_op;
5815 ot = dflag ? OT_LONG : OT_WORD;
5816 offset = insn_get(s, ot);
5817 selector = insn_get(s, OT_WORD);
5818
5819 gen_op_movl_T0_im(selector);
5820 gen_op_movl_T1_imu(offset);
5821 }
5822 goto do_ljmp;
5823 case 0xeb: /* jmp Jb */
5824 tval = (int8_t)insn_get(s, OT_BYTE);
5825 tval += s->pc - s->cs_base;
5826 if (s->dflag == 0)
5827 tval &= 0xffff;
5828 gen_jmp(s, tval);
5829 break;
5830 case 0x70 ... 0x7f: /* jcc Jb */
5831 tval = (int8_t)insn_get(s, OT_BYTE);
5832 goto do_jcc;
5833 case 0x180 ... 0x18f: /* jcc Jv */
5834 if (dflag) {
5835 tval = (int32_t)insn_get(s, OT_LONG);
5836 } else {
5837 tval = (int16_t)insn_get(s, OT_WORD);
5838 }
5839 do_jcc:
5840 next_eip = s->pc - s->cs_base;
5841 tval += next_eip;
5842 if (s->dflag == 0)
5843 tval &= 0xffff;
5844 gen_jcc(s, b, tval, next_eip);
5845 break;
5846
5847 case 0x190 ... 0x19f: /* setcc Gv */
5848 modrm = ldub_code(s->pc++);
5849 gen_setcc(s, b);
5850 gen_ldst_modrm(s, modrm, OT_BYTE, OR_TMP0, 1);
5851 break;
5852 case 0x140 ... 0x14f: /* cmov Gv, Ev */
5853 {
5854 int l1;
5855 TCGv t0;
5856
5857 ot = dflag + OT_WORD;
5858 modrm = ldub_code(s->pc++);
5859 reg = ((modrm >> 3) & 7) | rex_r;
5860 mod = (modrm >> 6) & 3;
5861 t0 = tcg_temp_local_new(TCG_TYPE_TL);
5862 if (mod != 3) {
5863 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5864 gen_op_ld_v(ot + s->mem_index, t0, cpu_A0);
5865 } else {
5866 rm = (modrm & 7) | REX_B(s);
5867 gen_op_mov_v_reg(ot, t0, rm);
5868 }
5869#ifdef TARGET_X86_64
5870 if (ot == OT_LONG) {
5871 /* XXX: specific Intel behaviour ? */
5872 l1 = gen_new_label();
5873 gen_jcc1(s, s->cc_op, b ^ 1, l1);
5874 tcg_gen_st32_tl(t0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET);
5875 gen_set_label(l1);
5876 tcg_gen_movi_tl(cpu_tmp0, 0);
5877 tcg_gen_st32_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]) + REG_LH_OFFSET);
5878 } else
5879#endif
5880 {
5881 l1 = gen_new_label();
5882 gen_jcc1(s, s->cc_op, b ^ 1, l1);
5883 gen_op_mov_reg_v(ot, reg, t0);
5884 gen_set_label(l1);
5885 }
5886 tcg_temp_free(t0);
5887 }
5888 break;
5889
5890 /************************/
5891 /* flags */
5892 case 0x9c: /* pushf */
5893 if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_PUSHF))
5894 break;
5895 if (s->vm86 && s->iopl != 3) {
5896 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
5897 } else {
5898 if (s->cc_op != CC_OP_DYNAMIC)
5899 gen_op_set_cc_op(s->cc_op);
5900 tcg_gen_helper_1_0(helper_read_eflags, cpu_T[0]);
5901 gen_push_T0(s);
5902 }
5903 break;
5904 case 0x9d: /* popf */
5905 if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_POPF))
5906 break;
5907 if (s->vm86 && s->iopl != 3) {
5908 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
5909 } else {
5910 gen_pop_T0(s);
5911 if (s->cpl == 0) {
5912 if (s->dflag) {
5913 tcg_gen_helper_0_2(helper_write_eflags, cpu_T[0],
5914 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK | IOPL_MASK)));
5915 } else {
5916 tcg_gen_helper_0_2(helper_write_eflags, cpu_T[0],
5917 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK | IOPL_MASK) & 0xffff));
5918 }
5919 } else {
5920 if (s->cpl <= s->iopl) {
5921 if (s->dflag) {
5922 tcg_gen_helper_0_2(helper_write_eflags, cpu_T[0],
5923 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK)));
5924 } else {
5925 tcg_gen_helper_0_2(helper_write_eflags, cpu_T[0],
5926 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK) & 0xffff));
5927 }
5928 } else {
5929 if (s->dflag) {
5930 tcg_gen_helper_0_2(helper_write_eflags, cpu_T[0],
5931 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK)));
5932 } else {
5933 tcg_gen_helper_0_2(helper_write_eflags, cpu_T[0],
5934 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff));
5935 }
5936 }
5937 }
5938 gen_pop_update(s);
5939 s->cc_op = CC_OP_EFLAGS;
5940 /* abort translation because TF flag may change */
5941 gen_jmp_im(s->pc - s->cs_base);
5942 gen_eob(s);
5943 }
5944 break;
5945 case 0x9e: /* sahf */
5946 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
5947 goto illegal_op;
5948 gen_op_mov_TN_reg(OT_BYTE, 0, R_AH);
5949 if (s->cc_op != CC_OP_DYNAMIC)
5950 gen_op_set_cc_op(s->cc_op);
5951 gen_compute_eflags(cpu_cc_src);
5952 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O);
5953 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], CC_S | CC_Z | CC_A | CC_P | CC_C);
5954 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_T[0]);
5955 s->cc_op = CC_OP_EFLAGS;
5956 break;
5957 case 0x9f: /* lahf */
5958 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
5959 goto illegal_op;
5960 if (s->cc_op != CC_OP_DYNAMIC)
5961 gen_op_set_cc_op(s->cc_op);
5962 gen_compute_eflags(cpu_T[0]);
5963 /* Note: gen_compute_eflags() only gives the condition codes */
5964 tcg_gen_ori_tl(cpu_T[0], cpu_T[0], 0x02);
5965 gen_op_mov_reg_T0(OT_BYTE, R_AH);
5966 break;
5967 case 0xf5: /* cmc */
5968 if (s->cc_op != CC_OP_DYNAMIC)
5969 gen_op_set_cc_op(s->cc_op);
5970 gen_compute_eflags(cpu_cc_src);
5971 tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C);
5972 s->cc_op = CC_OP_EFLAGS;
5973 break;
5974 case 0xf8: /* clc */
5975 if (s->cc_op != CC_OP_DYNAMIC)
5976 gen_op_set_cc_op(s->cc_op);
5977 gen_compute_eflags(cpu_cc_src);
5978 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C);
5979 s->cc_op = CC_OP_EFLAGS;
5980 break;
5981 case 0xf9: /* stc */
5982 if (s->cc_op != CC_OP_DYNAMIC)
5983 gen_op_set_cc_op(s->cc_op);
5984 gen_compute_eflags(cpu_cc_src);
5985 tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C);
5986 s->cc_op = CC_OP_EFLAGS;
5987 break;
5988 case 0xfc: /* cld */
5989 tcg_gen_movi_i32(cpu_tmp2_i32, 1);
5990 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUState, df));
5991 break;
5992 case 0xfd: /* std */
5993 tcg_gen_movi_i32(cpu_tmp2_i32, -1);
5994 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUState, df));
5995 break;
5996
5997 /************************/
5998 /* bit operations */
5999 case 0x1ba: /* bt/bts/btr/btc Gv, im */
6000 ot = dflag + OT_WORD;
6001 modrm = ldub_code(s->pc++);
6002 op = (modrm >> 3) & 7;
6003 mod = (modrm >> 6) & 3;
6004 rm = (modrm & 7) | REX_B(s);
6005 if (mod != 3) {
6006 s->rip_offset = 1;
6007 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6008 gen_op_ld_T0_A0(ot + s->mem_index);
6009 } else {
6010 gen_op_mov_TN_reg(ot, 0, rm);
6011 }
6012 /* load shift */
6013 val = ldub_code(s->pc++);
6014 gen_op_movl_T1_im(val);
6015 if (op < 4)
6016 goto illegal_op;
6017 op -= 4;
6018 goto bt_op;
6019 case 0x1a3: /* bt Gv, Ev */
6020 op = 0;
6021 goto do_btx;
6022 case 0x1ab: /* bts */
6023 op = 1;
6024 goto do_btx;
6025 case 0x1b3: /* btr */
6026 op = 2;
6027 goto do_btx;
6028 case 0x1bb: /* btc */
6029 op = 3;
6030 do_btx:
6031 ot = dflag + OT_WORD;
6032 modrm = ldub_code(s->pc++);
6033 reg = ((modrm >> 3) & 7) | rex_r;
6034 mod = (modrm >> 6) & 3;
6035 rm = (modrm & 7) | REX_B(s);
6036 gen_op_mov_TN_reg(OT_LONG, 1, reg);
6037 if (mod != 3) {
6038 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6039 /* specific case: we need to add a displacement */
6040 gen_exts(ot, cpu_T[1]);
6041 tcg_gen_sari_tl(cpu_tmp0, cpu_T[1], 3 + ot);
6042 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, ot);
6043 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
6044 gen_op_ld_T0_A0(ot + s->mem_index);
6045 } else {
6046 gen_op_mov_TN_reg(ot, 0, rm);
6047 }
6048 bt_op:
6049 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], (1 << (3 + ot)) - 1);
6050 switch(op) {
6051 case 0:
6052 tcg_gen_shr_tl(cpu_cc_src, cpu_T[0], cpu_T[1]);
6053 tcg_gen_movi_tl(cpu_cc_dst, 0);
6054 break;
6055 case 1:
6056 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6057 tcg_gen_movi_tl(cpu_tmp0, 1);
6058 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6059 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6060 break;
6061 case 2:
6062 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6063 tcg_gen_movi_tl(cpu_tmp0, 1);
6064 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6065 tcg_gen_not_tl(cpu_tmp0, cpu_tmp0);
6066 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6067 break;
6068 default:
6069 case 3:
6070 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6071 tcg_gen_movi_tl(cpu_tmp0, 1);
6072 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6073 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6074 break;
6075 }
6076 s->cc_op = CC_OP_SARB + ot;
6077 if (op != 0) {
6078 if (mod != 3)
6079 gen_op_st_T0_A0(ot + s->mem_index);
6080 else
6081 gen_op_mov_reg_T0(ot, rm);
6082 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
6083 tcg_gen_movi_tl(cpu_cc_dst, 0);
6084 }
6085 break;
6086 case 0x1bc: /* bsf */
6087 case 0x1bd: /* bsr */
6088 {
6089 int label1;
6090 TCGv t0;
6091
6092 ot = dflag + OT_WORD;
6093 modrm = ldub_code(s->pc++);
6094 reg = ((modrm >> 3) & 7) | rex_r;
6095 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
6096 gen_extu(ot, cpu_T[0]);
6097 label1 = gen_new_label();
6098 tcg_gen_movi_tl(cpu_cc_dst, 0);
6099 t0 = tcg_temp_local_new(TCG_TYPE_TL);
6100 tcg_gen_mov_tl(t0, cpu_T[0]);
6101 tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, label1);
6102 if (b & 1) {
6103 tcg_gen_helper_1_1(helper_bsr, cpu_T[0], t0);
6104 } else {
6105 tcg_gen_helper_1_1(helper_bsf, cpu_T[0], t0);
6106 }
6107 gen_op_mov_reg_T0(ot, reg);
6108 tcg_gen_movi_tl(cpu_cc_dst, 1);
6109 gen_set_label(label1);
6110 tcg_gen_discard_tl(cpu_cc_src);
6111 s->cc_op = CC_OP_LOGICB + ot;
6112 tcg_temp_free(t0);
6113 }
6114 break;
6115 /************************/
6116 /* bcd */
6117 case 0x27: /* daa */
6118 if (CODE64(s))
6119 goto illegal_op;
6120 if (s->cc_op != CC_OP_DYNAMIC)
6121 gen_op_set_cc_op(s->cc_op);
6122 tcg_gen_helper_0_0(helper_daa);
6123 s->cc_op = CC_OP_EFLAGS;
6124 break;
6125 case 0x2f: /* das */
6126 if (CODE64(s))
6127 goto illegal_op;
6128 if (s->cc_op != CC_OP_DYNAMIC)
6129 gen_op_set_cc_op(s->cc_op);
6130 tcg_gen_helper_0_0(helper_das);
6131 s->cc_op = CC_OP_EFLAGS;
6132 break;
6133 case 0x37: /* aaa */
6134 if (CODE64(s))
6135 goto illegal_op;
6136 if (s->cc_op != CC_OP_DYNAMIC)
6137 gen_op_set_cc_op(s->cc_op);
6138 tcg_gen_helper_0_0(helper_aaa);
6139 s->cc_op = CC_OP_EFLAGS;
6140 break;
6141 case 0x3f: /* aas */
6142 if (CODE64(s))
6143 goto illegal_op;
6144 if (s->cc_op != CC_OP_DYNAMIC)
6145 gen_op_set_cc_op(s->cc_op);
6146 tcg_gen_helper_0_0(helper_aas);
6147 s->cc_op = CC_OP_EFLAGS;
6148 break;
6149 case 0xd4: /* aam */
6150 if (CODE64(s))
6151 goto illegal_op;
6152 val = ldub_code(s->pc++);
6153 if (val == 0) {
6154 gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base);
6155 } else {
6156 tcg_gen_helper_0_1(helper_aam, tcg_const_i32(val));
6157 s->cc_op = CC_OP_LOGICB;
6158 }
6159 break;
6160 case 0xd5: /* aad */
6161 if (CODE64(s))
6162 goto illegal_op;
6163 val = ldub_code(s->pc++);
6164 tcg_gen_helper_0_1(helper_aad, tcg_const_i32(val));
6165 s->cc_op = CC_OP_LOGICB;
6166 break;
6167 /************************/
6168 /* misc */
6169 case 0x90: /* nop */
6170 /* XXX: xchg + rex handling */
6171 /* XXX: correct lock test for all insn */
6172 if (prefixes & PREFIX_LOCK)
6173 goto illegal_op;
6174 if (prefixes & PREFIX_REPZ) {
6175 gen_svm_check_intercept(s, pc_start, SVM_EXIT_PAUSE);
6176 }
6177 break;
6178 case 0x9b: /* fwait */
6179 if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) ==
6180 (HF_MP_MASK | HF_TS_MASK)) {
6181 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
6182 } else {
6183 if (s->cc_op != CC_OP_DYNAMIC)
6184 gen_op_set_cc_op(s->cc_op);
6185 gen_jmp_im(pc_start - s->cs_base);
6186 tcg_gen_helper_0_0(helper_fwait);
6187 }
6188 break;
6189 case 0xcc: /* int3 */
6190 if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_SWINT))
6191 break;
6192 gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base);
6193 break;
6194 case 0xcd: /* int N */
6195 val = ldub_code(s->pc++);
6196 if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_SWINT))
6197 break;
6198 if (s->vm86 && s->iopl != 3) {
6199 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6200 } else {
6201 gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base);
6202 }
6203 break;
6204 case 0xce: /* into */
6205 if (CODE64(s))
6206 goto illegal_op;
6207 if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_SWINT))
6208 break;
6209 if (s->cc_op != CC_OP_DYNAMIC)
6210 gen_op_set_cc_op(s->cc_op);
6211 gen_jmp_im(pc_start - s->cs_base);
6212 tcg_gen_helper_0_1(helper_into, tcg_const_i32(s->pc - pc_start));
6213 break;
6214 case 0xf1: /* icebp (undocumented, exits to external debugger) */
6215 if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP))
6216 break;
6217#if 1
6218 gen_debug(s, pc_start - s->cs_base);
6219#else
6220 /* start debug */
6221 tb_flush(cpu_single_env);
6222 cpu_set_log(CPU_LOG_INT | CPU_LOG_TB_IN_ASM);
6223#endif
6224 break;
6225 case 0xfa: /* cli */
6226 if (!s->vm86) {
6227 if (s->cpl <= s->iopl) {
6228 tcg_gen_helper_0_0(helper_cli);
6229 } else {
6230 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6231 }
6232 } else {
6233 if (s->iopl == 3) {
6234 tcg_gen_helper_0_0(helper_cli);
6235 } else {
6236 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6237 }
6238 }
6239 break;
6240 case 0xfb: /* sti */
6241 if (!s->vm86) {
6242 if (s->cpl <= s->iopl) {
6243 gen_sti:
6244 tcg_gen_helper_0_0(helper_sti);
6245 /* interruptions are enabled only the first insn after sti */
6246 /* If several instructions disable interrupts, only the
6247 _first_ does it */
6248 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
6249 tcg_gen_helper_0_0(helper_set_inhibit_irq);
6250 /* give a chance to handle pending irqs */
6251 gen_jmp_im(s->pc - s->cs_base);
6252 gen_eob(s);
6253 } else {
6254 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6255 }
6256 } else {
6257 if (s->iopl == 3) {
6258 goto gen_sti;
6259 } else {
6260 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6261 }
6262 }
6263 break;
6264 case 0x62: /* bound */
6265 if (CODE64(s))
6266 goto illegal_op;
6267 ot = dflag ? OT_LONG : OT_WORD;
6268 modrm = ldub_code(s->pc++);
6269 reg = (modrm >> 3) & 7;
6270 mod = (modrm >> 6) & 3;
6271 if (mod == 3)
6272 goto illegal_op;
6273 gen_op_mov_TN_reg(ot, 0, reg);
6274 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6275 gen_jmp_im(pc_start - s->cs_base);
6276 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6277 if (ot == OT_WORD)
6278 tcg_gen_helper_0_2(helper_boundw, cpu_A0, cpu_tmp2_i32);
6279 else
6280 tcg_gen_helper_0_2(helper_boundl, cpu_A0, cpu_tmp2_i32);
6281 break;
6282 case 0x1c8 ... 0x1cf: /* bswap reg */
6283 reg = (b & 7) | REX_B(s);
6284#ifdef TARGET_X86_64
6285 if (dflag == 2) {
6286 gen_op_mov_TN_reg(OT_QUAD, 0, reg);
6287 tcg_gen_bswap_i64(cpu_T[0], cpu_T[0]);
6288 gen_op_mov_reg_T0(OT_QUAD, reg);
6289 } else
6290 {
6291 TCGv tmp0;
6292 gen_op_mov_TN_reg(OT_LONG, 0, reg);
6293
6294 tmp0 = tcg_temp_new(TCG_TYPE_I32);
6295 tcg_gen_trunc_i64_i32(tmp0, cpu_T[0]);
6296 tcg_gen_bswap_i32(tmp0, tmp0);
6297 tcg_gen_extu_i32_i64(cpu_T[0], tmp0);
6298 gen_op_mov_reg_T0(OT_LONG, reg);
6299 }
6300#else
6301 {
6302 gen_op_mov_TN_reg(OT_LONG, 0, reg);
6303 tcg_gen_bswap_i32(cpu_T[0], cpu_T[0]);
6304 gen_op_mov_reg_T0(OT_LONG, reg);
6305 }
6306#endif
6307 break;
6308 case 0xd6: /* salc */
6309 if (CODE64(s))
6310 goto illegal_op;
6311 if (s->cc_op != CC_OP_DYNAMIC)
6312 gen_op_set_cc_op(s->cc_op);
6313 gen_compute_eflags_c(cpu_T[0]);
6314 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
6315 gen_op_mov_reg_T0(OT_BYTE, R_EAX);
6316 break;
6317 case 0xe0: /* loopnz */
6318 case 0xe1: /* loopz */
6319 case 0xe2: /* loop */
6320 case 0xe3: /* jecxz */
6321 {
6322 int l1, l2, l3;
6323
6324 tval = (int8_t)insn_get(s, OT_BYTE);
6325 next_eip = s->pc - s->cs_base;
6326 tval += next_eip;
6327 if (s->dflag == 0)
6328 tval &= 0xffff;
6329
6330 l1 = gen_new_label();
6331 l2 = gen_new_label();
6332 l3 = gen_new_label();
6333 b &= 3;
6334 switch(b) {
6335 case 0: /* loopnz */
6336 case 1: /* loopz */
6337 if (s->cc_op != CC_OP_DYNAMIC)
6338 gen_op_set_cc_op(s->cc_op);
6339 gen_op_add_reg_im(s->aflag, R_ECX, -1);
6340 gen_op_jz_ecx(s->aflag, l3);
6341 gen_compute_eflags(cpu_tmp0);
6342 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, CC_Z);
6343 if (b == 0) {
6344 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, l1);
6345 } else {
6346 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, l1);
6347 }
6348 break;
6349 case 2: /* loop */
6350 gen_op_add_reg_im(s->aflag, R_ECX, -1);
6351 gen_op_jnz_ecx(s->aflag, l1);
6352 break;
6353 default:
6354 case 3: /* jcxz */
6355 gen_op_jz_ecx(s->aflag, l1);
6356 break;
6357 }
6358
6359 gen_set_label(l3);
6360 gen_jmp_im(next_eip);
6361 tcg_gen_br(l2);
6362
6363 gen_set_label(l1);
6364 gen_jmp_im(tval);
6365 gen_set_label(l2);
6366 gen_eob(s);
6367 }
6368 break;
6369 case 0x130: /* wrmsr */
6370 case 0x132: /* rdmsr */
6371 if (s->cpl != 0) {
6372 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6373 } else {
6374 int retval = 0;
6375 if (b & 2) {
6376 retval = gen_svm_check_intercept_param(s, pc_start, SVM_EXIT_MSR, 0);
6377 tcg_gen_helper_0_0(helper_rdmsr);
6378 } else {
6379 retval = gen_svm_check_intercept_param(s, pc_start, SVM_EXIT_MSR, 1);
6380 tcg_gen_helper_0_0(helper_wrmsr);
6381 }
6382 if(retval)
6383 gen_eob(s);
6384 }
6385 break;
6386 case 0x131: /* rdtsc */
6387 if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_RDTSC))
6388 break;
6389 gen_jmp_im(pc_start - s->cs_base);
6390 tcg_gen_helper_0_0(helper_rdtsc);
6391 break;
6392 case 0x133: /* rdpmc */
6393 gen_jmp_im(pc_start - s->cs_base);
6394 tcg_gen_helper_0_0(helper_rdpmc);
6395 break;
6396 case 0x134: /* sysenter */
6397 if (CODE64(s))
6398 goto illegal_op;
6399 if (!s->pe) {
6400 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6401 } else {
6402 if (s->cc_op != CC_OP_DYNAMIC) {
6403 gen_op_set_cc_op(s->cc_op);
6404 s->cc_op = CC_OP_DYNAMIC;
6405 }
6406 gen_jmp_im(pc_start - s->cs_base);
6407 tcg_gen_helper_0_0(helper_sysenter);
6408 gen_eob(s);
6409 }
6410 break;
6411 case 0x135: /* sysexit */
6412 if (CODE64(s))
6413 goto illegal_op;
6414 if (!s->pe) {
6415 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6416 } else {
6417 if (s->cc_op != CC_OP_DYNAMIC) {
6418 gen_op_set_cc_op(s->cc_op);
6419 s->cc_op = CC_OP_DYNAMIC;
6420 }
6421 gen_jmp_im(pc_start - s->cs_base);
6422 tcg_gen_helper_0_0(helper_sysexit);
6423 gen_eob(s);
6424 }
6425 break;
6426#ifdef TARGET_X86_64
6427 case 0x105: /* syscall */
6428 /* XXX: is it usable in real mode ? */
6429 if (s->cc_op != CC_OP_DYNAMIC) {
6430 gen_op_set_cc_op(s->cc_op);
6431 s->cc_op = CC_OP_DYNAMIC;
6432 }
6433 gen_jmp_im(pc_start - s->cs_base);
6434 tcg_gen_helper_0_1(helper_syscall, tcg_const_i32(s->pc - pc_start));
6435 gen_eob(s);
6436 break;
6437 case 0x107: /* sysret */
6438 if (!s->pe) {
6439 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6440 } else {
6441 if (s->cc_op != CC_OP_DYNAMIC) {
6442 gen_op_set_cc_op(s->cc_op);
6443 s->cc_op = CC_OP_DYNAMIC;
6444 }
6445 gen_jmp_im(pc_start - s->cs_base);
6446 tcg_gen_helper_0_1(helper_sysret, tcg_const_i32(s->dflag));
6447 /* condition codes are modified only in long mode */
6448 if (s->lma)
6449 s->cc_op = CC_OP_EFLAGS;
6450 gen_eob(s);
6451 }
6452 break;
6453#endif
6454 case 0x1a2: /* cpuid */
6455 if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_CPUID))
6456 break;
6457 tcg_gen_helper_0_0(helper_cpuid);
6458 break;
6459 case 0xf4: /* hlt */
6460 if (s->cpl != 0) {
6461 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6462 } else {
6463 if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_HLT))
6464 break;
6465 if (s->cc_op != CC_OP_DYNAMIC)
6466 gen_op_set_cc_op(s->cc_op);
6467 gen_jmp_im(s->pc - s->cs_base);
6468 tcg_gen_helper_0_0(helper_hlt);
6469 s->is_jmp = 3;
6470 }
6471 break;
6472 case 0x100:
6473 modrm = ldub_code(s->pc++);
6474 mod = (modrm >> 6) & 3;
6475 op = (modrm >> 3) & 7;
6476 switch(op) {
6477 case 0: /* sldt */
6478 if (!s->pe || s->vm86)
6479 goto illegal_op;
6480 if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ))
6481 break;
6482 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,ldt.selector));
6483 ot = OT_WORD;
6484 if (mod == 3)
6485 ot += s->dflag;
6486 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1);
6487 break;
6488 case 2: /* lldt */
6489 if (!s->pe || s->vm86)
6490 goto illegal_op;
6491 if (s->cpl != 0) {
6492 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6493 } else {
6494 if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE))
6495 break;
6496 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
6497 gen_jmp_im(pc_start - s->cs_base);
6498 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6499 tcg_gen_helper_0_1(helper_lldt, cpu_tmp2_i32);
6500 }
6501 break;
6502 case 1: /* str */
6503 if (!s->pe || s->vm86)
6504 goto illegal_op;
6505 if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ))
6506 break;
6507 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,tr.selector));
6508 ot = OT_WORD;
6509 if (mod == 3)
6510 ot += s->dflag;
6511 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1);
6512 break;
6513 case 3: /* ltr */
6514 if (!s->pe || s->vm86)
6515 goto illegal_op;
6516 if (s->cpl != 0) {
6517 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6518 } else {
6519 if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE))
6520 break;
6521 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
6522 gen_jmp_im(pc_start - s->cs_base);
6523 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6524 tcg_gen_helper_0_1(helper_ltr, cpu_tmp2_i32);
6525 }
6526 break;
6527 case 4: /* verr */
6528 case 5: /* verw */
6529 if (!s->pe || s->vm86)
6530 goto illegal_op;
6531 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
6532 if (s->cc_op != CC_OP_DYNAMIC)
6533 gen_op_set_cc_op(s->cc_op);
6534 if (op == 4)
6535 tcg_gen_helper_0_1(helper_verr, cpu_T[0]);
6536 else
6537 tcg_gen_helper_0_1(helper_verw, cpu_T[0]);
6538 s->cc_op = CC_OP_EFLAGS;
6539 break;
6540 default:
6541 goto illegal_op;
6542 }
6543 break;
6544 case 0x101:
6545 modrm = ldub_code(s->pc++);
6546 mod = (modrm >> 6) & 3;
6547 op = (modrm >> 3) & 7;
6548 rm = modrm & 7;
6549 switch(op) {
6550 case 0: /* sgdt */
6551 if (mod == 3)
6552 goto illegal_op;
6553 if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ))
6554 break;
6555 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6556 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.limit));
6557 gen_op_st_T0_A0(OT_WORD + s->mem_index);
6558 gen_add_A0_im(s, 2);
6559 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.base));
6560 if (!s->dflag)
6561 gen_op_andl_T0_im(0xffffff);
6562 gen_op_st_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
6563 break;
6564 case 1:
6565 if (mod == 3) {
6566 switch (rm) {
6567 case 0: /* monitor */
6568 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
6569 s->cpl != 0)
6570 goto illegal_op;
6571 if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_MONITOR))
6572 break;
6573 gen_jmp_im(pc_start - s->cs_base);
6574#ifdef TARGET_X86_64
6575 if (s->aflag == 2) {
6576 gen_op_movq_A0_reg(R_EAX);
6577 } else
6578#endif
6579 {
6580 gen_op_movl_A0_reg(R_EAX);
6581 if (s->aflag == 0)
6582 gen_op_andl_A0_ffff();
6583 }
6584 gen_add_A0_ds_seg(s);
6585 tcg_gen_helper_0_1(helper_monitor, cpu_A0);
6586 break;
6587 case 1: /* mwait */
6588 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
6589 s->cpl != 0)
6590 goto illegal_op;
6591 if (s->cc_op != CC_OP_DYNAMIC) {
6592 gen_op_set_cc_op(s->cc_op);
6593 s->cc_op = CC_OP_DYNAMIC;
6594 }
6595 if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_MWAIT))
6596 break;
6597 gen_jmp_im(s->pc - s->cs_base);
6598 tcg_gen_helper_0_0(helper_mwait);
6599 gen_eob(s);
6600 break;
6601 default:
6602 goto illegal_op;
6603 }
6604 } else { /* sidt */
6605 if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ))
6606 break;
6607 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6608 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.limit));
6609 gen_op_st_T0_A0(OT_WORD + s->mem_index);
6610 gen_add_A0_im(s, 2);
6611 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.base));
6612 if (!s->dflag)
6613 gen_op_andl_T0_im(0xffffff);
6614 gen_op_st_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
6615 }
6616 break;
6617 case 2: /* lgdt */
6618 case 3: /* lidt */
6619 if (mod == 3) {
6620 switch(rm) {
6621 case 0: /* VMRUN */
6622 if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_VMRUN))
6623 break;
6624 if (s->cc_op != CC_OP_DYNAMIC)
6625 gen_op_set_cc_op(s->cc_op);
6626 gen_jmp_im(s->pc - s->cs_base);
6627 tcg_gen_helper_0_0(helper_vmrun);
6628 s->cc_op = CC_OP_EFLAGS;
6629 gen_eob(s);
6630 break;
6631 case 1: /* VMMCALL */
6632 if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_VMMCALL))
6633 break;
6634 /* FIXME: cause #UD if hflags & SVM */
6635 tcg_gen_helper_0_0(helper_vmmcall);
6636 break;
6637 case 2: /* VMLOAD */
6638 if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_VMLOAD))
6639 break;
6640 tcg_gen_helper_0_0(helper_vmload);
6641 break;
6642 case 3: /* VMSAVE */
6643 if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_VMSAVE))
6644 break;
6645 tcg_gen_helper_0_0(helper_vmsave);
6646 break;
6647 case 4: /* STGI */
6648 if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_STGI))
6649 break;
6650 tcg_gen_helper_0_0(helper_stgi);
6651 break;
6652 case 5: /* CLGI */
6653 if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_CLGI))
6654 break;
6655 tcg_gen_helper_0_0(helper_clgi);
6656 break;
6657 case 6: /* SKINIT */
6658 if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_SKINIT))
6659 break;
6660 tcg_gen_helper_0_0(helper_skinit);
6661 break;
6662 case 7: /* INVLPGA */
6663 if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_INVLPGA))
6664 break;
6665 tcg_gen_helper_0_0(helper_invlpga);
6666 break;
6667 default:
6668 goto illegal_op;
6669 }
6670 } else if (s->cpl != 0) {
6671 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6672 } else {
6673 if (gen_svm_check_intercept(s, pc_start,
6674 op==2 ? SVM_EXIT_GDTR_WRITE : SVM_EXIT_IDTR_WRITE))
6675 break;
6676 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6677 gen_op_ld_T1_A0(OT_WORD + s->mem_index);
6678 gen_add_A0_im(s, 2);
6679 gen_op_ld_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
6680 if (!s->dflag)
6681 gen_op_andl_T0_im(0xffffff);
6682 if (op == 2) {
6683 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,gdt.base));
6684 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,gdt.limit));
6685 } else {
6686 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,idt.base));
6687 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,idt.limit));
6688 }
6689 }
6690 break;
6691 case 4: /* smsw */
6692 if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0))
6693 break;
6694 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]));
6695 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 1);
6696 break;
6697 case 6: /* lmsw */
6698 if (s->cpl != 0) {
6699 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6700 } else {
6701 if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0))
6702 break;
6703 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
6704 tcg_gen_helper_0_1(helper_lmsw, cpu_T[0]);
6705 gen_jmp_im(s->pc - s->cs_base);
6706 gen_eob(s);
6707 }
6708 break;
6709 case 7: /* invlpg */
6710 if (s->cpl != 0) {
6711 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6712 } else {
6713 if (mod == 3) {
6714#ifdef TARGET_X86_64
6715 if (CODE64(s) && rm == 0) {
6716 /* swapgs */
6717 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,segs[R_GS].base));
6718 tcg_gen_ld_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,kernelgsbase));
6719 tcg_gen_st_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,segs[R_GS].base));
6720 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,kernelgsbase));
6721 } else
6722#endif
6723 {
6724 goto illegal_op;
6725 }
6726 } else {
6727 if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_INVLPG))
6728 break;
6729 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6730 tcg_gen_helper_0_1(helper_invlpg, cpu_A0);
6731 gen_jmp_im(s->pc - s->cs_base);
6732 gen_eob(s);
6733 }
6734 }
6735 break;
6736 default:
6737 goto illegal_op;
6738 }
6739 break;
6740 case 0x108: /* invd */
6741 case 0x109: /* wbinvd */
6742 if (s->cpl != 0) {
6743 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6744 } else {
6745 if (gen_svm_check_intercept(s, pc_start, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD))
6746 break;
6747 /* nothing to do */
6748 }
6749 break;
6750 case 0x63: /* arpl or movslS (x86_64) */
6751#ifdef TARGET_X86_64
6752 if (CODE64(s)) {
6753 int d_ot;
6754 /* d_ot is the size of destination */
6755 d_ot = dflag + OT_WORD;
6756
6757 modrm = ldub_code(s->pc++);
6758 reg = ((modrm >> 3) & 7) | rex_r;
6759 mod = (modrm >> 6) & 3;
6760 rm = (modrm & 7) | REX_B(s);
6761
6762 if (mod == 3) {
6763 gen_op_mov_TN_reg(OT_LONG, 0, rm);
6764 /* sign extend */
6765 if (d_ot == OT_QUAD)
6766 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
6767 gen_op_mov_reg_T0(d_ot, reg);
6768 } else {
6769 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6770 if (d_ot == OT_QUAD) {
6771 gen_op_lds_T0_A0(OT_LONG + s->mem_index);
6772 } else {
6773 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
6774 }
6775 gen_op_mov_reg_T0(d_ot, reg);
6776 }
6777 } else
6778#endif
6779 {
6780 int label1;
6781 TCGv t0, t1, t2;
6782
6783 if (!s->pe || s->vm86)
6784 goto illegal_op;
6785 t0 = tcg_temp_local_new(TCG_TYPE_TL);
6786 t1 = tcg_temp_local_new(TCG_TYPE_TL);
6787 t2 = tcg_temp_local_new(TCG_TYPE_TL);
6788 ot = OT_WORD;
6789 modrm = ldub_code(s->pc++);
6790 reg = (modrm >> 3) & 7;
6791 mod = (modrm >> 6) & 3;
6792 rm = modrm & 7;
6793 if (mod != 3) {
6794 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6795 gen_op_ld_v(ot + s->mem_index, t0, cpu_A0);
6796 } else {
6797 gen_op_mov_v_reg(ot, t0, rm);
6798 }
6799 gen_op_mov_v_reg(ot, t1, reg);
6800 tcg_gen_andi_tl(cpu_tmp0, t0, 3);
6801 tcg_gen_andi_tl(t1, t1, 3);
6802 tcg_gen_movi_tl(t2, 0);
6803 label1 = gen_new_label();
6804 tcg_gen_brcond_tl(TCG_COND_GE, cpu_tmp0, t1, label1);
6805 tcg_gen_andi_tl(t0, t0, ~3);
6806 tcg_gen_or_tl(t0, t0, t1);
6807 tcg_gen_movi_tl(t2, CC_Z);
6808 gen_set_label(label1);
6809 if (mod != 3) {
6810 gen_op_st_v(ot + s->mem_index, t0, cpu_A0);
6811 } else {
6812 gen_op_mov_reg_v(ot, rm, t0);
6813 }
6814 if (s->cc_op != CC_OP_DYNAMIC)
6815 gen_op_set_cc_op(s->cc_op);
6816 gen_compute_eflags(cpu_cc_src);
6817 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z);
6818 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2);
6819 s->cc_op = CC_OP_EFLAGS;
6820 tcg_temp_free(t0);
6821 tcg_temp_free(t1);
6822 tcg_temp_free(t2);
6823 }
6824 break;
6825 case 0x102: /* lar */
6826 case 0x103: /* lsl */
6827 {
6828 int label1;
6829 TCGv t0;
6830 if (!s->pe || s->vm86)
6831 goto illegal_op;
6832 ot = dflag ? OT_LONG : OT_WORD;
6833 modrm = ldub_code(s->pc++);
6834 reg = ((modrm >> 3) & 7) | rex_r;
6835 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
6836 t0 = tcg_temp_local_new(TCG_TYPE_TL);
6837 if (s->cc_op != CC_OP_DYNAMIC)
6838 gen_op_set_cc_op(s->cc_op);
6839 if (b == 0x102)
6840 tcg_gen_helper_1_1(helper_lar, t0, cpu_T[0]);
6841 else
6842 tcg_gen_helper_1_1(helper_lsl, t0, cpu_T[0]);
6843 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_src, CC_Z);
6844 label1 = gen_new_label();
6845 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
6846 gen_op_mov_reg_v(ot, reg, t0);
6847 gen_set_label(label1);
6848 s->cc_op = CC_OP_EFLAGS;
6849 tcg_temp_free(t0);
6850 }
6851 break;
6852 case 0x118:
6853 modrm = ldub_code(s->pc++);
6854 mod = (modrm >> 6) & 3;
6855 op = (modrm >> 3) & 7;
6856 switch(op) {
6857 case 0: /* prefetchnta */
6858 case 1: /* prefetchnt0 */
6859 case 2: /* prefetchnt0 */
6860 case 3: /* prefetchnt0 */
6861 if (mod == 3)
6862 goto illegal_op;
6863 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6864 /* nothing more to do */
6865 break;
6866 default: /* nop (multi byte) */
6867 gen_nop_modrm(s, modrm);
6868 break;
6869 }
6870 break;
6871 case 0x119 ... 0x11f: /* nop (multi byte) */
6872 modrm = ldub_code(s->pc++);
6873 gen_nop_modrm(s, modrm);
6874 break;
6875 case 0x120: /* mov reg, crN */
6876 case 0x122: /* mov crN, reg */
6877 if (s->cpl != 0) {
6878 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6879 } else {
6880 modrm = ldub_code(s->pc++);
6881 if ((modrm & 0xc0) != 0xc0)
6882 goto illegal_op;
6883 rm = (modrm & 7) | REX_B(s);
6884 reg = ((modrm >> 3) & 7) | rex_r;
6885 if (CODE64(s))
6886 ot = OT_QUAD;
6887 else
6888 ot = OT_LONG;
6889 switch(reg) {
6890 case 0:
6891 case 2:
6892 case 3:
6893 case 4:
6894 case 8:
6895 if (b & 2) {
6896 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0 + reg);
6897 gen_op_mov_TN_reg(ot, 0, rm);
6898 tcg_gen_helper_0_2(helper_movl_crN_T0,
6899 tcg_const_i32(reg), cpu_T[0]);
6900 gen_jmp_im(s->pc - s->cs_base);
6901 gen_eob(s);
6902 } else {
6903 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0 + reg);
6904#if !defined(CONFIG_USER_ONLY)
6905 if (reg == 8)
6906 tcg_gen_helper_1_0(helper_movtl_T0_cr8, cpu_T[0]);
6907 else
6908#endif
6909 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[reg]));
6910 gen_op_mov_reg_T0(ot, rm);
6911 }
6912 break;
6913 default:
6914 goto illegal_op;
6915 }
6916 }
6917 break;
6918 case 0x121: /* mov reg, drN */
6919 case 0x123: /* mov drN, reg */
6920 if (s->cpl != 0) {
6921 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6922 } else {
6923 modrm = ldub_code(s->pc++);
6924 if ((modrm & 0xc0) != 0xc0)
6925 goto illegal_op;
6926 rm = (modrm & 7) | REX_B(s);
6927 reg = ((modrm >> 3) & 7) | rex_r;
6928 if (CODE64(s))
6929 ot = OT_QUAD;
6930 else
6931 ot = OT_LONG;
6932 /* XXX: do it dynamically with CR4.DE bit */
6933 if (reg == 4 || reg == 5 || reg >= 8)
6934 goto illegal_op;
6935 if (b & 2) {
6936 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg);
6937 gen_op_mov_TN_reg(ot, 0, rm);
6938 tcg_gen_helper_0_2(helper_movl_drN_T0,
6939 tcg_const_i32(reg), cpu_T[0]);
6940 gen_jmp_im(s->pc - s->cs_base);
6941 gen_eob(s);
6942 } else {
6943 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg);
6944 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,dr[reg]));
6945 gen_op_mov_reg_T0(ot, rm);
6946 }
6947 }
6948 break;
6949 case 0x106: /* clts */
6950 if (s->cpl != 0) {
6951 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6952 } else {
6953 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
6954 tcg_gen_helper_0_0(helper_clts);
6955 /* abort block because static cpu state changed */
6956 gen_jmp_im(s->pc - s->cs_base);
6957 gen_eob(s);
6958 }
6959 break;
6960 /* MMX/3DNow!/SSE/SSE2/SSE3 support */
6961 case 0x1c3: /* MOVNTI reg, mem */
6962 if (!(s->cpuid_features & CPUID_SSE2))
6963 goto illegal_op;
6964 ot = s->dflag == 2 ? OT_QUAD : OT_LONG;
6965 modrm = ldub_code(s->pc++);
6966 mod = (modrm >> 6) & 3;
6967 if (mod == 3)
6968 goto illegal_op;
6969 reg = ((modrm >> 3) & 7) | rex_r;
6970 /* generate a generic store */
6971 gen_ldst_modrm(s, modrm, ot, reg, 1);
6972 break;
6973 case 0x1ae:
6974 modrm = ldub_code(s->pc++);
6975 mod = (modrm >> 6) & 3;
6976 op = (modrm >> 3) & 7;
6977 switch(op) {
6978 case 0: /* fxsave */
6979 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
6980 (s->flags & HF_EM_MASK))
6981 goto illegal_op;
6982 if (s->flags & HF_TS_MASK) {
6983 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
6984 break;
6985 }
6986 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6987 if (s->cc_op != CC_OP_DYNAMIC)
6988 gen_op_set_cc_op(s->cc_op);
6989 gen_jmp_im(pc_start - s->cs_base);
6990 tcg_gen_helper_0_2(helper_fxsave,
6991 cpu_A0, tcg_const_i32((s->dflag == 2)));
6992 break;
6993 case 1: /* fxrstor */
6994 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
6995 (s->flags & HF_EM_MASK))
6996 goto illegal_op;
6997 if (s->flags & HF_TS_MASK) {
6998 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
6999 break;
7000 }
7001 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7002 if (s->cc_op != CC_OP_DYNAMIC)
7003 gen_op_set_cc_op(s->cc_op);
7004 gen_jmp_im(pc_start - s->cs_base);
7005 tcg_gen_helper_0_2(helper_fxrstor,
7006 cpu_A0, tcg_const_i32((s->dflag == 2)));
7007 break;
7008 case 2: /* ldmxcsr */
7009 case 3: /* stmxcsr */
7010 if (s->flags & HF_TS_MASK) {
7011 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7012 break;
7013 }
7014 if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK) ||
7015 mod == 3)
7016 goto illegal_op;
7017 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7018 if (op == 2) {
7019 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
7020 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, mxcsr));
7021 } else {
7022 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, mxcsr));
7023 gen_op_st_T0_A0(OT_LONG + s->mem_index);
7024 }
7025 break;
7026 case 5: /* lfence */
7027 case 6: /* mfence */
7028 if ((modrm & 0xc7) != 0xc0 || !(s->cpuid_features & CPUID_SSE))
7029 goto illegal_op;
7030 break;
7031 case 7: /* sfence / clflush */
7032 if ((modrm & 0xc7) == 0xc0) {
7033 /* sfence */
7034 /* XXX: also check for cpuid_ext2_features & CPUID_EXT2_EMMX */
7035 if (!(s->cpuid_features & CPUID_SSE))
7036 goto illegal_op;
7037 } else {
7038 /* clflush */
7039 if (!(s->cpuid_features & CPUID_CLFLUSH))
7040 goto illegal_op;
7041 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7042 }
7043 break;
7044 default:
7045 goto illegal_op;
7046 }
7047 break;
7048 case 0x10d: /* 3DNow! prefetch(w) */
7049 modrm = ldub_code(s->pc++);
7050 mod = (modrm >> 6) & 3;
7051 if (mod == 3)
7052 goto illegal_op;
7053 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7054 /* ignore for now */
7055 break;
7056 case 0x1aa: /* rsm */
7057 if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_RSM))
7058 break;
7059 if (!(s->flags & HF_SMM_MASK))
7060 goto illegal_op;
7061 if (s->cc_op != CC_OP_DYNAMIC) {
7062 gen_op_set_cc_op(s->cc_op);
7063 s->cc_op = CC_OP_DYNAMIC;
7064 }
7065 gen_jmp_im(s->pc - s->cs_base);
7066 tcg_gen_helper_0_0(helper_rsm);
7067 gen_eob(s);
7068 break;
7069 case 0x10e ... 0x10f:
7070 /* 3DNow! instructions, ignore prefixes */
7071 s->prefix &= ~(PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA);
7072 case 0x110 ... 0x117:
7073 case 0x128 ... 0x12f:
7074 case 0x150 ... 0x177:
7075 case 0x17c ... 0x17f:
7076 case 0x1c2:
7077 case 0x1c4 ... 0x1c6:
7078 case 0x1d0 ... 0x1fe:
7079 gen_sse(s, b, pc_start, rex_r);
7080 break;
7081 default:
7082 goto illegal_op;
7083 }
7084 /* lock generation */
7085 if (s->prefix & PREFIX_LOCK)
7086 tcg_gen_helper_0_0(helper_unlock);
7087 return s->pc;
7088 illegal_op:
7089 if (s->prefix & PREFIX_LOCK)
7090 tcg_gen_helper_0_0(helper_unlock);
7091 /* XXX: ensure that no lock was generated */
7092 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
7093 return s->pc;
7094}
7095
7096void optimize_flags_init(void)
7097{
7098#if TCG_TARGET_REG_BITS == 32
7099 assert(sizeof(CCTable) == (1 << 3));
7100#else
7101 assert(sizeof(CCTable) == (1 << 4));
7102#endif
7103 cpu_env = tcg_global_reg_new(TCG_TYPE_PTR, TCG_AREG0, "env");
7104 cpu_cc_op = tcg_global_mem_new(TCG_TYPE_I32,
7105 TCG_AREG0, offsetof(CPUState, cc_op), "cc_op");
7106 cpu_cc_src = tcg_global_mem_new(TCG_TYPE_TL,
7107 TCG_AREG0, offsetof(CPUState, cc_src), "cc_src");
7108 cpu_cc_dst = tcg_global_mem_new(TCG_TYPE_TL,
7109 TCG_AREG0, offsetof(CPUState, cc_dst), "cc_dst");
7110 cpu_cc_tmp = tcg_global_mem_new(TCG_TYPE_TL,
7111 TCG_AREG0, offsetof(CPUState, cc_tmp), "cc_tmp");
7112
7113 /* register helpers */
7114
7115#define DEF_HELPER(ret, name, params) tcg_register_helper(name, #name);
7116#include "helper.h"
7117}
7118
7119/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
7120 basic block 'tb'. If search_pc is TRUE, also generate PC
7121 information for each intermediate instruction. */
7122static inline int gen_intermediate_code_internal(CPUState *env,
7123 TranslationBlock *tb,
7124 int search_pc)
7125{
7126 DisasContext dc1, *dc = &dc1;
7127 target_ulong pc_ptr;
7128 uint16_t *gen_opc_end;
7129 int j, lj, cflags;
7130 uint64_t flags;
7131 target_ulong pc_start;
7132 target_ulong cs_base;
7133
7134 /* generate intermediate code */
7135 pc_start = tb->pc;
7136 cs_base = tb->cs_base;
7137 flags = tb->flags;
7138 cflags = tb->cflags;
7139
7140 dc->pe = (flags >> HF_PE_SHIFT) & 1;
7141 dc->code32 = (flags >> HF_CS32_SHIFT) & 1;
7142 dc->ss32 = (flags >> HF_SS32_SHIFT) & 1;
7143 dc->addseg = (flags >> HF_ADDSEG_SHIFT) & 1;
7144 dc->f_st = 0;
7145 dc->vm86 = (flags >> VM_SHIFT) & 1;
7146 dc->cpl = (flags >> HF_CPL_SHIFT) & 3;
7147 dc->iopl = (flags >> IOPL_SHIFT) & 3;
7148 dc->tf = (flags >> TF_SHIFT) & 1;
7149 dc->singlestep_enabled = env->singlestep_enabled;
7150 dc->cc_op = CC_OP_DYNAMIC;
7151 dc->cs_base = cs_base;
7152 dc->tb = tb;
7153 dc->popl_esp_hack = 0;
7154 /* select memory access functions */
7155 dc->mem_index = 0;
7156 if (flags & HF_SOFTMMU_MASK) {
7157 if (dc->cpl == 3)
7158 dc->mem_index = 2 * 4;
7159 else
7160 dc->mem_index = 1 * 4;
7161 }
7162 dc->cpuid_features = env->cpuid_features;
7163 dc->cpuid_ext_features = env->cpuid_ext_features;
7164 dc->cpuid_ext2_features = env->cpuid_ext2_features;
7165 dc->cpuid_ext3_features = env->cpuid_ext3_features;
7166#ifdef TARGET_X86_64
7167 dc->lma = (flags >> HF_LMA_SHIFT) & 1;
7168 dc->code64 = (flags >> HF_CS64_SHIFT) & 1;
7169#endif
7170 dc->flags = flags;
7171 dc->jmp_opt = !(dc->tf || env->singlestep_enabled ||
7172 (flags & HF_INHIBIT_IRQ_MASK)
7173#ifndef CONFIG_SOFTMMU
7174 || (flags & HF_SOFTMMU_MASK)
7175#endif
7176 );
7177#if 0
7178 /* check addseg logic */
7179 if (!dc->addseg && (dc->vm86 || !dc->pe || !dc->code32))
7180 printf("ERROR addseg\n");
7181#endif
7182
7183 cpu_T[0] = tcg_temp_new(TCG_TYPE_TL);
7184 cpu_T[1] = tcg_temp_new(TCG_TYPE_TL);
7185 cpu_A0 = tcg_temp_new(TCG_TYPE_TL);
7186 cpu_T3 = tcg_temp_new(TCG_TYPE_TL);
7187
7188 cpu_tmp0 = tcg_temp_new(TCG_TYPE_TL);
7189 cpu_tmp1_i64 = tcg_temp_new(TCG_TYPE_I64);
7190 cpu_tmp2_i32 = tcg_temp_new(TCG_TYPE_I32);
7191 cpu_tmp3_i32 = tcg_temp_new(TCG_TYPE_I32);
7192 cpu_tmp4 = tcg_temp_new(TCG_TYPE_TL);
7193 cpu_tmp5 = tcg_temp_new(TCG_TYPE_TL);
7194 cpu_tmp6 = tcg_temp_new(TCG_TYPE_TL);
7195 cpu_ptr0 = tcg_temp_new(TCG_TYPE_PTR);
7196 cpu_ptr1 = tcg_temp_new(TCG_TYPE_PTR);
7197
7198 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
7199
7200 dc->is_jmp = DISAS_NEXT;
7201 pc_ptr = pc_start;
7202 lj = -1;
7203
7204 for(;;) {
7205 if (env->nb_breakpoints > 0) {
7206 for(j = 0; j < env->nb_breakpoints; j++) {
7207 if (env->breakpoints[j] == pc_ptr) {
7208 gen_debug(dc, pc_ptr - dc->cs_base);
7209 break;
7210 }
7211 }
7212 }
7213 if (search_pc) {
7214 j = gen_opc_ptr - gen_opc_buf;
7215 if (lj < j) {
7216 lj++;
7217 while (lj < j)
7218 gen_opc_instr_start[lj++] = 0;
7219 }
7220 gen_opc_pc[lj] = pc_ptr;
7221 gen_opc_cc_op[lj] = dc->cc_op;
7222 gen_opc_instr_start[lj] = 1;
7223 }
7224 pc_ptr = disas_insn(dc, pc_ptr);
7225 /* stop translation if indicated */
7226 if (dc->is_jmp)
7227 break;
7228 /* if single step mode, we generate only one instruction and
7229 generate an exception */
7230 /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
7231 the flag and abort the translation to give the irqs a
7232 change to be happen */
7233 if (dc->tf || dc->singlestep_enabled ||
7234 (flags & HF_INHIBIT_IRQ_MASK) ||
7235 (cflags & CF_SINGLE_INSN)) {
7236 gen_jmp_im(pc_ptr - dc->cs_base);
7237 gen_eob(dc);
7238 break;
7239 }
7240 /* if too long translation, stop generation too */
7241 if (gen_opc_ptr >= gen_opc_end ||
7242 (pc_ptr - pc_start) >= (TARGET_PAGE_SIZE - 32)) {
7243 gen_jmp_im(pc_ptr - dc->cs_base);
7244 gen_eob(dc);
7245 break;
7246 }
7247 }
7248 *gen_opc_ptr = INDEX_op_end;
7249 /* we don't forget to fill the last values */
7250 if (search_pc) {
7251 j = gen_opc_ptr - gen_opc_buf;
7252 lj++;
7253 while (lj <= j)
7254 gen_opc_instr_start[lj++] = 0;
7255 }
7256
7257#ifdef DEBUG_DISAS
7258 if (loglevel & CPU_LOG_TB_CPU) {
7259 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
7260 }
7261 if (loglevel & CPU_LOG_TB_IN_ASM) {
7262 int disas_flags;
7263 fprintf(logfile, "----------------\n");
7264 fprintf(logfile, "IN: %s\n", lookup_symbol(pc_start));
7265#ifdef TARGET_X86_64
7266 if (dc->code64)
7267 disas_flags = 2;
7268 else
7269#endif
7270 disas_flags = !dc->code32;
7271 target_disas(logfile, pc_start, pc_ptr - pc_start, disas_flags);
7272 fprintf(logfile, "\n");
7273 }
7274#endif
7275
7276 if (!search_pc)
7277 tb->size = pc_ptr - pc_start;
7278 return 0;
7279}
7280
7281int gen_intermediate_code(CPUState *env, TranslationBlock *tb)
7282{
7283 return gen_intermediate_code_internal(env, tb, 0);
7284}
7285
7286int gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
7287{
7288 return gen_intermediate_code_internal(env, tb, 1);
7289}
7290
7291void gen_pc_load(CPUState *env, TranslationBlock *tb,
7292 unsigned long searched_pc, int pc_pos, void *puc)
7293{
7294 int cc_op;
7295#ifdef DEBUG_DISAS
7296 if (loglevel & CPU_LOG_TB_OP) {
7297 int i;
7298 fprintf(logfile, "RESTORE:\n");
7299 for(i = 0;i <= pc_pos; i++) {
7300 if (gen_opc_instr_start[i]) {
7301 fprintf(logfile, "0x%04x: " TARGET_FMT_lx "\n", i, gen_opc_pc[i]);
7302 }
7303 }
7304 fprintf(logfile, "spc=0x%08lx pc_pos=0x%x eip=" TARGET_FMT_lx " cs_base=%x\n",
7305 searched_pc, pc_pos, gen_opc_pc[pc_pos] - tb->cs_base,
7306 (uint32_t)tb->cs_base);
7307 }
7308#endif
7309 env->eip = gen_opc_pc[pc_pos] - tb->cs_base;
7310 cc_op = gen_opc_cc_op[pc_pos];
7311 if (cc_op != CC_OP_DYNAMIC)
7312 env->cc_op = cc_op;
7313}