]> git.proxmox.com Git - qemu.git/blob - target-i386/translate.c
use debug_insn_start to have nicer debug traces
[qemu.git] / target-i386 / translate.c
1 /*
2 * i386 translation
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include <stdarg.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <string.h>
24 #include <inttypes.h>
25 #include <signal.h>
26 #include <assert.h>
27
28 #include "cpu.h"
29 #include "exec-all.h"
30 #include "disas.h"
31 #include "helper.h"
32 #include "tcg-op.h"
33
34 #define PREFIX_REPZ 0x01
35 #define PREFIX_REPNZ 0x02
36 #define PREFIX_LOCK 0x04
37 #define PREFIX_DATA 0x08
38 #define PREFIX_ADR 0x10
39
40 #ifdef TARGET_X86_64
41 #define X86_64_ONLY(x) x
42 #define X86_64_DEF(x...) x
43 #define CODE64(s) ((s)->code64)
44 #define REX_X(s) ((s)->rex_x)
45 #define REX_B(s) ((s)->rex_b)
46 /* XXX: gcc generates push/pop in some opcodes, so we cannot use them */
47 #if 1
48 #define BUGGY_64(x) NULL
49 #endif
50 #else
51 #define X86_64_ONLY(x) NULL
52 #define X86_64_DEF(x...)
53 #define CODE64(s) 0
54 #define REX_X(s) 0
55 #define REX_B(s) 0
56 #endif
57
58 //#define MACRO_TEST 1
59
60 /* global register indexes */
61 static TCGv cpu_env, cpu_T[2], cpu_A0, cpu_cc_op, cpu_cc_src, cpu_cc_dst;
62 static TCGv cpu_T3;
63 /* local register indexes (only used inside old micro ops) */
64 static TCGv cpu_tmp0, cpu_tmp1_i64, cpu_tmp2_i32, cpu_tmp3_i32, cpu_tmp4, cpu_ptr0, cpu_ptr1;
65 static TCGv cpu_tmp5, cpu_tmp6;
66
67 #ifdef TARGET_X86_64
68 static int x86_64_hregs;
69 #endif
70
71 typedef struct DisasContext {
72 /* current insn context */
73 int override; /* -1 if no override */
74 int prefix;
75 int aflag, dflag;
76 target_ulong pc; /* pc = eip + cs_base */
77 int is_jmp; /* 1 = means jump (stop translation), 2 means CPU
78 static state change (stop translation) */
79 /* current block context */
80 target_ulong cs_base; /* base of CS segment */
81 int pe; /* protected mode */
82 int code32; /* 32 bit code segment */
83 #ifdef TARGET_X86_64
84 int lma; /* long mode active */
85 int code64; /* 64 bit code segment */
86 int rex_x, rex_b;
87 #endif
88 int ss32; /* 32 bit stack segment */
89 int cc_op; /* current CC operation */
90 int addseg; /* non zero if either DS/ES/SS have a non zero base */
91 int f_st; /* currently unused */
92 int vm86; /* vm86 mode */
93 int cpl;
94 int iopl;
95 int tf; /* TF cpu flag */
96 int singlestep_enabled; /* "hardware" single step enabled */
97 int jmp_opt; /* use direct block chaining for direct jumps */
98 int mem_index; /* select memory access functions */
99 uint64_t flags; /* all execution flags */
100 struct TranslationBlock *tb;
101 int popl_esp_hack; /* for correct popl with esp base handling */
102 int rip_offset; /* only used in x86_64, but left for simplicity */
103 int cpuid_features;
104 int cpuid_ext_features;
105 int cpuid_ext2_features;
106 int cpuid_ext3_features;
107 } DisasContext;
108
109 static void gen_eob(DisasContext *s);
110 static void gen_jmp(DisasContext *s, target_ulong eip);
111 static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num);
112
113 /* i386 arith/logic operations */
114 enum {
115 OP_ADDL,
116 OP_ORL,
117 OP_ADCL,
118 OP_SBBL,
119 OP_ANDL,
120 OP_SUBL,
121 OP_XORL,
122 OP_CMPL,
123 };
124
125 /* i386 shift ops */
126 enum {
127 OP_ROL,
128 OP_ROR,
129 OP_RCL,
130 OP_RCR,
131 OP_SHL,
132 OP_SHR,
133 OP_SHL1, /* undocumented */
134 OP_SAR = 7,
135 };
136
137 enum {
138 JCC_O,
139 JCC_B,
140 JCC_Z,
141 JCC_BE,
142 JCC_S,
143 JCC_P,
144 JCC_L,
145 JCC_LE,
146 };
147
148 /* operand size */
149 enum {
150 OT_BYTE = 0,
151 OT_WORD,
152 OT_LONG,
153 OT_QUAD,
154 };
155
156 enum {
157 /* I386 int registers */
158 OR_EAX, /* MUST be even numbered */
159 OR_ECX,
160 OR_EDX,
161 OR_EBX,
162 OR_ESP,
163 OR_EBP,
164 OR_ESI,
165 OR_EDI,
166
167 OR_TMP0 = 16, /* temporary operand register */
168 OR_TMP1,
169 OR_A0, /* temporary register used when doing address evaluation */
170 };
171
172 static inline void gen_op_movl_T0_0(void)
173 {
174 tcg_gen_movi_tl(cpu_T[0], 0);
175 }
176
177 static inline void gen_op_movl_T0_im(int32_t val)
178 {
179 tcg_gen_movi_tl(cpu_T[0], val);
180 }
181
182 static inline void gen_op_movl_T0_imu(uint32_t val)
183 {
184 tcg_gen_movi_tl(cpu_T[0], val);
185 }
186
187 static inline void gen_op_movl_T1_im(int32_t val)
188 {
189 tcg_gen_movi_tl(cpu_T[1], val);
190 }
191
192 static inline void gen_op_movl_T1_imu(uint32_t val)
193 {
194 tcg_gen_movi_tl(cpu_T[1], val);
195 }
196
197 static inline void gen_op_movl_A0_im(uint32_t val)
198 {
199 tcg_gen_movi_tl(cpu_A0, val);
200 }
201
202 #ifdef TARGET_X86_64
203 static inline void gen_op_movq_A0_im(int64_t val)
204 {
205 tcg_gen_movi_tl(cpu_A0, val);
206 }
207 #endif
208
209 static inline void gen_movtl_T0_im(target_ulong val)
210 {
211 tcg_gen_movi_tl(cpu_T[0], val);
212 }
213
214 static inline void gen_movtl_T1_im(target_ulong val)
215 {
216 tcg_gen_movi_tl(cpu_T[1], val);
217 }
218
219 static inline void gen_op_andl_T0_ffff(void)
220 {
221 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
222 }
223
224 static inline void gen_op_andl_T0_im(uint32_t val)
225 {
226 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], val);
227 }
228
229 static inline void gen_op_movl_T0_T1(void)
230 {
231 tcg_gen_mov_tl(cpu_T[0], cpu_T[1]);
232 }
233
234 static inline void gen_op_andl_A0_ffff(void)
235 {
236 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffff);
237 }
238
239 #ifdef TARGET_X86_64
240
241 #define NB_OP_SIZES 4
242
243 #else /* !TARGET_X86_64 */
244
245 #define NB_OP_SIZES 3
246
247 #endif /* !TARGET_X86_64 */
248
249 #if defined(WORDS_BIGENDIAN)
250 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
251 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
252 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
253 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
254 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
255 #else
256 #define REG_B_OFFSET 0
257 #define REG_H_OFFSET 1
258 #define REG_W_OFFSET 0
259 #define REG_L_OFFSET 0
260 #define REG_LH_OFFSET 4
261 #endif
262
263 static inline void gen_op_mov_reg_TN(int ot, int t_index, int reg)
264 {
265 switch(ot) {
266 case OT_BYTE:
267 if (reg < 4 X86_64_DEF( || reg >= 8 || x86_64_hregs)) {
268 tcg_gen_st8_tl(cpu_T[t_index], cpu_env, offsetof(CPUState, regs[reg]) + REG_B_OFFSET);
269 } else {
270 tcg_gen_st8_tl(cpu_T[t_index], cpu_env, offsetof(CPUState, regs[reg - 4]) + REG_H_OFFSET);
271 }
272 break;
273 case OT_WORD:
274 tcg_gen_st16_tl(cpu_T[t_index], cpu_env, offsetof(CPUState, regs[reg]) + REG_W_OFFSET);
275 break;
276 #ifdef TARGET_X86_64
277 case OT_LONG:
278 tcg_gen_st32_tl(cpu_T[t_index], cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET);
279 /* high part of register set to zero */
280 tcg_gen_movi_tl(cpu_tmp0, 0);
281 tcg_gen_st32_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]) + REG_LH_OFFSET);
282 break;
283 default:
284 case OT_QUAD:
285 tcg_gen_st_tl(cpu_T[t_index], cpu_env, offsetof(CPUState, regs[reg]));
286 break;
287 #else
288 default:
289 case OT_LONG:
290 tcg_gen_st32_tl(cpu_T[t_index], cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET);
291 break;
292 #endif
293 }
294 }
295
296 static inline void gen_op_mov_reg_T0(int ot, int reg)
297 {
298 gen_op_mov_reg_TN(ot, 0, reg);
299 }
300
301 static inline void gen_op_mov_reg_T1(int ot, int reg)
302 {
303 gen_op_mov_reg_TN(ot, 1, reg);
304 }
305
306 static inline void gen_op_mov_reg_A0(int size, int reg)
307 {
308 switch(size) {
309 case 0:
310 tcg_gen_st16_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]) + REG_W_OFFSET);
311 break;
312 #ifdef TARGET_X86_64
313 case 1:
314 tcg_gen_st32_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET);
315 /* high part of register set to zero */
316 tcg_gen_movi_tl(cpu_tmp0, 0);
317 tcg_gen_st32_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]) + REG_LH_OFFSET);
318 break;
319 default:
320 case 2:
321 tcg_gen_st_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]));
322 break;
323 #else
324 default:
325 case 1:
326 tcg_gen_st32_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET);
327 break;
328 #endif
329 }
330 }
331
332 static inline void gen_op_mov_TN_reg(int ot, int t_index, int reg)
333 {
334 switch(ot) {
335 case OT_BYTE:
336 if (reg < 4 X86_64_DEF( || reg >= 8 || x86_64_hregs)) {
337 goto std_case;
338 } else {
339 tcg_gen_ld8u_tl(cpu_T[t_index], cpu_env, offsetof(CPUState, regs[reg - 4]) + REG_H_OFFSET);
340 }
341 break;
342 default:
343 std_case:
344 tcg_gen_ld_tl(cpu_T[t_index], cpu_env, offsetof(CPUState, regs[reg]));
345 break;
346 }
347 }
348
349 static inline void gen_op_movl_A0_reg(int reg)
350 {
351 tcg_gen_ld32u_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET);
352 }
353
354 static inline void gen_op_addl_A0_im(int32_t val)
355 {
356 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
357 #ifdef TARGET_X86_64
358 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
359 #endif
360 }
361
362 #ifdef TARGET_X86_64
363 static inline void gen_op_addq_A0_im(int64_t val)
364 {
365 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
366 }
367 #endif
368
369 static void gen_add_A0_im(DisasContext *s, int val)
370 {
371 #ifdef TARGET_X86_64
372 if (CODE64(s))
373 gen_op_addq_A0_im(val);
374 else
375 #endif
376 gen_op_addl_A0_im(val);
377 }
378
379 static inline void gen_op_addl_T0_T1(void)
380 {
381 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
382 }
383
384 static inline void gen_op_jmp_T0(void)
385 {
386 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUState, eip));
387 }
388
389 static inline void gen_op_add_reg_im(int size, int reg, int32_t val)
390 {
391 switch(size) {
392 case 0:
393 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
394 tcg_gen_addi_tl(cpu_tmp0, cpu_tmp0, val);
395 tcg_gen_st16_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]) + REG_W_OFFSET);
396 break;
397 case 1:
398 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
399 tcg_gen_addi_tl(cpu_tmp0, cpu_tmp0, val);
400 #ifdef TARGET_X86_64
401 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xffffffff);
402 #endif
403 tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
404 break;
405 #ifdef TARGET_X86_64
406 case 2:
407 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
408 tcg_gen_addi_tl(cpu_tmp0, cpu_tmp0, val);
409 tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
410 break;
411 #endif
412 }
413 }
414
415 static inline void gen_op_add_reg_T0(int size, int reg)
416 {
417 switch(size) {
418 case 0:
419 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
420 tcg_gen_add_tl(cpu_tmp0, cpu_tmp0, cpu_T[0]);
421 tcg_gen_st16_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]) + REG_W_OFFSET);
422 break;
423 case 1:
424 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
425 tcg_gen_add_tl(cpu_tmp0, cpu_tmp0, cpu_T[0]);
426 #ifdef TARGET_X86_64
427 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xffffffff);
428 #endif
429 tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
430 break;
431 #ifdef TARGET_X86_64
432 case 2:
433 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
434 tcg_gen_add_tl(cpu_tmp0, cpu_tmp0, cpu_T[0]);
435 tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
436 break;
437 #endif
438 }
439 }
440
441 static inline void gen_op_set_cc_op(int32_t val)
442 {
443 tcg_gen_movi_i32(cpu_cc_op, val);
444 }
445
446 static inline void gen_op_addl_A0_reg_sN(int shift, int reg)
447 {
448 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
449 if (shift != 0)
450 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
451 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
452 #ifdef TARGET_X86_64
453 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
454 #endif
455 }
456
457 static inline void gen_op_movl_A0_seg(int reg)
458 {
459 tcg_gen_ld32u_tl(cpu_A0, cpu_env, offsetof(CPUState, segs[reg].base) + REG_L_OFFSET);
460 }
461
462 static inline void gen_op_addl_A0_seg(int reg)
463 {
464 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, segs[reg].base));
465 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
466 #ifdef TARGET_X86_64
467 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
468 #endif
469 }
470
471 #ifdef TARGET_X86_64
472 static inline void gen_op_movq_A0_seg(int reg)
473 {
474 tcg_gen_ld_tl(cpu_A0, cpu_env, offsetof(CPUState, segs[reg].base));
475 }
476
477 static inline void gen_op_addq_A0_seg(int reg)
478 {
479 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, segs[reg].base));
480 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
481 }
482
483 static inline void gen_op_movq_A0_reg(int reg)
484 {
485 tcg_gen_ld_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]));
486 }
487
488 static inline void gen_op_addq_A0_reg_sN(int shift, int reg)
489 {
490 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
491 if (shift != 0)
492 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
493 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
494 }
495 #endif
496
497 static inline void gen_op_lds_T0_A0(int idx)
498 {
499 int mem_index = (idx >> 2) - 1;
500 switch(idx & 3) {
501 case 0:
502 tcg_gen_qemu_ld8s(cpu_T[0], cpu_A0, mem_index);
503 break;
504 case 1:
505 tcg_gen_qemu_ld16s(cpu_T[0], cpu_A0, mem_index);
506 break;
507 default:
508 case 2:
509 tcg_gen_qemu_ld32s(cpu_T[0], cpu_A0, mem_index);
510 break;
511 }
512 }
513
514 /* sign does not matter, except for lidt/lgdt call (TODO: fix it) */
515 static inline void gen_op_ld_T0_A0(int idx)
516 {
517 int mem_index = (idx >> 2) - 1;
518 switch(idx & 3) {
519 case 0:
520 tcg_gen_qemu_ld8u(cpu_T[0], cpu_A0, mem_index);
521 break;
522 case 1:
523 tcg_gen_qemu_ld16u(cpu_T[0], cpu_A0, mem_index);
524 break;
525 case 2:
526 tcg_gen_qemu_ld32u(cpu_T[0], cpu_A0, mem_index);
527 break;
528 default:
529 case 3:
530 tcg_gen_qemu_ld64(cpu_T[0], cpu_A0, mem_index);
531 break;
532 }
533 }
534
535 static inline void gen_op_ldu_T0_A0(int idx)
536 {
537 gen_op_ld_T0_A0(idx);
538 }
539
540 static inline void gen_op_ld_T1_A0(int idx)
541 {
542 int mem_index = (idx >> 2) - 1;
543 switch(idx & 3) {
544 case 0:
545 tcg_gen_qemu_ld8u(cpu_T[1], cpu_A0, mem_index);
546 break;
547 case 1:
548 tcg_gen_qemu_ld16u(cpu_T[1], cpu_A0, mem_index);
549 break;
550 case 2:
551 tcg_gen_qemu_ld32u(cpu_T[1], cpu_A0, mem_index);
552 break;
553 default:
554 case 3:
555 tcg_gen_qemu_ld64(cpu_T[1], cpu_A0, mem_index);
556 break;
557 }
558 }
559
560 static inline void gen_op_st_T0_A0(int idx)
561 {
562 int mem_index = (idx >> 2) - 1;
563 switch(idx & 3) {
564 case 0:
565 tcg_gen_qemu_st8(cpu_T[0], cpu_A0, mem_index);
566 break;
567 case 1:
568 tcg_gen_qemu_st16(cpu_T[0], cpu_A0, mem_index);
569 break;
570 case 2:
571 tcg_gen_qemu_st32(cpu_T[0], cpu_A0, mem_index);
572 break;
573 default:
574 case 3:
575 tcg_gen_qemu_st64(cpu_T[0], cpu_A0, mem_index);
576 break;
577 }
578 }
579
580 static inline void gen_op_st_T1_A0(int idx)
581 {
582 int mem_index = (idx >> 2) - 1;
583 switch(idx & 3) {
584 case 0:
585 tcg_gen_qemu_st8(cpu_T[1], cpu_A0, mem_index);
586 break;
587 case 1:
588 tcg_gen_qemu_st16(cpu_T[1], cpu_A0, mem_index);
589 break;
590 case 2:
591 tcg_gen_qemu_st32(cpu_T[1], cpu_A0, mem_index);
592 break;
593 default:
594 case 3:
595 tcg_gen_qemu_st64(cpu_T[1], cpu_A0, mem_index);
596 break;
597 }
598 }
599
600 static inline void gen_jmp_im(target_ulong pc)
601 {
602 tcg_gen_movi_tl(cpu_tmp0, pc);
603 tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, eip));
604 }
605
606 static inline void gen_string_movl_A0_ESI(DisasContext *s)
607 {
608 int override;
609
610 override = s->override;
611 #ifdef TARGET_X86_64
612 if (s->aflag == 2) {
613 if (override >= 0) {
614 gen_op_movq_A0_seg(override);
615 gen_op_addq_A0_reg_sN(0, R_ESI);
616 } else {
617 gen_op_movq_A0_reg(R_ESI);
618 }
619 } else
620 #endif
621 if (s->aflag) {
622 /* 32 bit address */
623 if (s->addseg && override < 0)
624 override = R_DS;
625 if (override >= 0) {
626 gen_op_movl_A0_seg(override);
627 gen_op_addl_A0_reg_sN(0, R_ESI);
628 } else {
629 gen_op_movl_A0_reg(R_ESI);
630 }
631 } else {
632 /* 16 address, always override */
633 if (override < 0)
634 override = R_DS;
635 gen_op_movl_A0_reg(R_ESI);
636 gen_op_andl_A0_ffff();
637 gen_op_addl_A0_seg(override);
638 }
639 }
640
641 static inline void gen_string_movl_A0_EDI(DisasContext *s)
642 {
643 #ifdef TARGET_X86_64
644 if (s->aflag == 2) {
645 gen_op_movq_A0_reg(R_EDI);
646 } else
647 #endif
648 if (s->aflag) {
649 if (s->addseg) {
650 gen_op_movl_A0_seg(R_ES);
651 gen_op_addl_A0_reg_sN(0, R_EDI);
652 } else {
653 gen_op_movl_A0_reg(R_EDI);
654 }
655 } else {
656 gen_op_movl_A0_reg(R_EDI);
657 gen_op_andl_A0_ffff();
658 gen_op_addl_A0_seg(R_ES);
659 }
660 }
661
662 static inline void gen_op_movl_T0_Dshift(int ot)
663 {
664 tcg_gen_ld32s_tl(cpu_T[0], cpu_env, offsetof(CPUState, df));
665 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], ot);
666 };
667
668 static void gen_extu(int ot, TCGv reg)
669 {
670 switch(ot) {
671 case OT_BYTE:
672 tcg_gen_ext8u_tl(reg, reg);
673 break;
674 case OT_WORD:
675 tcg_gen_ext16u_tl(reg, reg);
676 break;
677 case OT_LONG:
678 tcg_gen_ext32u_tl(reg, reg);
679 break;
680 default:
681 break;
682 }
683 }
684
685 static void gen_exts(int ot, TCGv reg)
686 {
687 switch(ot) {
688 case OT_BYTE:
689 tcg_gen_ext8s_tl(reg, reg);
690 break;
691 case OT_WORD:
692 tcg_gen_ext16s_tl(reg, reg);
693 break;
694 case OT_LONG:
695 tcg_gen_ext32s_tl(reg, reg);
696 break;
697 default:
698 break;
699 }
700 }
701
702 static inline void gen_op_jnz_ecx(int size, int label1)
703 {
704 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[R_ECX]));
705 gen_extu(size + 1, cpu_tmp0);
706 tcg_gen_brcond_tl(TCG_COND_NE, cpu_tmp0, tcg_const_tl(0), label1);
707 }
708
709 static inline void gen_op_jz_ecx(int size, int label1)
710 {
711 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[R_ECX]));
712 gen_extu(size + 1, cpu_tmp0);
713 tcg_gen_brcond_tl(TCG_COND_EQ, cpu_tmp0, tcg_const_tl(0), label1);
714 }
715
716 static void *helper_in_func[3] = {
717 helper_inb,
718 helper_inw,
719 helper_inl,
720 };
721
722 static void *helper_out_func[3] = {
723 helper_outb,
724 helper_outw,
725 helper_outl,
726 };
727
728 static void *gen_check_io_func[3] = {
729 helper_check_iob,
730 helper_check_iow,
731 helper_check_iol,
732 };
733
734 static void gen_check_io(DisasContext *s, int ot, target_ulong cur_eip,
735 uint32_t svm_flags)
736 {
737 int state_saved;
738 target_ulong next_eip;
739
740 state_saved = 0;
741 if (s->pe && (s->cpl > s->iopl || s->vm86)) {
742 if (s->cc_op != CC_OP_DYNAMIC)
743 gen_op_set_cc_op(s->cc_op);
744 gen_jmp_im(cur_eip);
745 state_saved = 1;
746 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
747 tcg_gen_helper_0_1(gen_check_io_func[ot],
748 cpu_tmp2_i32);
749 }
750 if(s->flags & (1ULL << INTERCEPT_IOIO_PROT)) {
751 if (!state_saved) {
752 if (s->cc_op != CC_OP_DYNAMIC)
753 gen_op_set_cc_op(s->cc_op);
754 gen_jmp_im(cur_eip);
755 state_saved = 1;
756 }
757 svm_flags |= (1 << (4 + ot));
758 next_eip = s->pc - s->cs_base;
759 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
760 tcg_gen_helper_0_3(helper_svm_check_io,
761 cpu_tmp2_i32,
762 tcg_const_i32(svm_flags),
763 tcg_const_i32(next_eip - cur_eip));
764 }
765 }
766
767 static inline void gen_movs(DisasContext *s, int ot)
768 {
769 gen_string_movl_A0_ESI(s);
770 gen_op_ld_T0_A0(ot + s->mem_index);
771 gen_string_movl_A0_EDI(s);
772 gen_op_st_T0_A0(ot + s->mem_index);
773 gen_op_movl_T0_Dshift(ot);
774 gen_op_add_reg_T0(s->aflag, R_ESI);
775 gen_op_add_reg_T0(s->aflag, R_EDI);
776 }
777
778 static inline void gen_update_cc_op(DisasContext *s)
779 {
780 if (s->cc_op != CC_OP_DYNAMIC) {
781 gen_op_set_cc_op(s->cc_op);
782 s->cc_op = CC_OP_DYNAMIC;
783 }
784 }
785
786 static void gen_op_update1_cc(void)
787 {
788 tcg_gen_discard_tl(cpu_cc_src);
789 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
790 }
791
792 static void gen_op_update2_cc(void)
793 {
794 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
795 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
796 }
797
798 static inline void gen_op_cmpl_T0_T1_cc(void)
799 {
800 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
801 tcg_gen_sub_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
802 }
803
804 static inline void gen_op_testl_T0_T1_cc(void)
805 {
806 tcg_gen_discard_tl(cpu_cc_src);
807 tcg_gen_and_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
808 }
809
810 static void gen_op_update_neg_cc(void)
811 {
812 tcg_gen_neg_tl(cpu_cc_src, cpu_T[0]);
813 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
814 }
815
816 /* compute eflags.C to reg */
817 static void gen_compute_eflags_c(TCGv reg)
818 {
819 #if TCG_TARGET_REG_BITS == 32
820 tcg_gen_shli_i32(cpu_tmp2_i32, cpu_cc_op, 3);
821 tcg_gen_addi_i32(cpu_tmp2_i32, cpu_tmp2_i32,
822 (long)cc_table + offsetof(CCTable, compute_c));
823 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0);
824 tcg_gen_call(&tcg_ctx, cpu_tmp2_i32, TCG_CALL_PURE,
825 1, &cpu_tmp2_i32, 0, NULL);
826 #else
827 tcg_gen_extu_i32_tl(cpu_tmp1_i64, cpu_cc_op);
828 tcg_gen_shli_i64(cpu_tmp1_i64, cpu_tmp1_i64, 4);
829 tcg_gen_addi_i64(cpu_tmp1_i64, cpu_tmp1_i64,
830 (long)cc_table + offsetof(CCTable, compute_c));
831 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_tmp1_i64, 0);
832 tcg_gen_call(&tcg_ctx, cpu_tmp1_i64, TCG_CALL_PURE,
833 1, &cpu_tmp2_i32, 0, NULL);
834 #endif
835 tcg_gen_extu_i32_tl(reg, cpu_tmp2_i32);
836 }
837
838 /* compute all eflags to cc_src */
839 static void gen_compute_eflags(TCGv reg)
840 {
841 #if TCG_TARGET_REG_BITS == 32
842 tcg_gen_shli_i32(cpu_tmp2_i32, cpu_cc_op, 3);
843 tcg_gen_addi_i32(cpu_tmp2_i32, cpu_tmp2_i32,
844 (long)cc_table + offsetof(CCTable, compute_all));
845 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0);
846 tcg_gen_call(&tcg_ctx, cpu_tmp2_i32, TCG_CALL_PURE,
847 1, &cpu_tmp2_i32, 0, NULL);
848 #else
849 tcg_gen_extu_i32_tl(cpu_tmp1_i64, cpu_cc_op);
850 tcg_gen_shli_i64(cpu_tmp1_i64, cpu_tmp1_i64, 4);
851 tcg_gen_addi_i64(cpu_tmp1_i64, cpu_tmp1_i64,
852 (long)cc_table + offsetof(CCTable, compute_all));
853 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_tmp1_i64, 0);
854 tcg_gen_call(&tcg_ctx, cpu_tmp1_i64, TCG_CALL_PURE,
855 1, &cpu_tmp2_i32, 0, NULL);
856 #endif
857 tcg_gen_extu_i32_tl(reg, cpu_tmp2_i32);
858 }
859
860 static inline void gen_setcc_slow_T0(int op)
861 {
862 switch(op) {
863 case JCC_O:
864 gen_compute_eflags(cpu_T[0]);
865 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 11);
866 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
867 break;
868 case JCC_B:
869 gen_compute_eflags_c(cpu_T[0]);
870 break;
871 case JCC_Z:
872 gen_compute_eflags(cpu_T[0]);
873 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 6);
874 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
875 break;
876 case JCC_BE:
877 gen_compute_eflags(cpu_tmp0);
878 tcg_gen_shri_tl(cpu_T[0], cpu_tmp0, 6);
879 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
880 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
881 break;
882 case JCC_S:
883 gen_compute_eflags(cpu_T[0]);
884 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 7);
885 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
886 break;
887 case JCC_P:
888 gen_compute_eflags(cpu_T[0]);
889 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 2);
890 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
891 break;
892 case JCC_L:
893 gen_compute_eflags(cpu_tmp0);
894 tcg_gen_shri_tl(cpu_T[0], cpu_tmp0, 11); /* CC_O */
895 tcg_gen_shri_tl(cpu_tmp0, cpu_tmp0, 7); /* CC_S */
896 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
897 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
898 break;
899 default:
900 case JCC_LE:
901 gen_compute_eflags(cpu_tmp0);
902 tcg_gen_shri_tl(cpu_T[0], cpu_tmp0, 11); /* CC_O */
903 tcg_gen_shri_tl(cpu_tmp4, cpu_tmp0, 7); /* CC_S */
904 tcg_gen_shri_tl(cpu_tmp0, cpu_tmp0, 6); /* CC_Z */
905 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
906 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
907 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
908 break;
909 }
910 }
911
912 /* return true if setcc_slow is not needed (WARNING: must be kept in
913 sync with gen_jcc1) */
914 static int is_fast_jcc_case(DisasContext *s, int b)
915 {
916 int jcc_op;
917 jcc_op = (b >> 1) & 7;
918 switch(s->cc_op) {
919 /* we optimize the cmp/jcc case */
920 case CC_OP_SUBB:
921 case CC_OP_SUBW:
922 case CC_OP_SUBL:
923 case CC_OP_SUBQ:
924 if (jcc_op == JCC_O || jcc_op == JCC_P)
925 goto slow_jcc;
926 break;
927
928 /* some jumps are easy to compute */
929 case CC_OP_ADDB:
930 case CC_OP_ADDW:
931 case CC_OP_ADDL:
932 case CC_OP_ADDQ:
933
934 case CC_OP_LOGICB:
935 case CC_OP_LOGICW:
936 case CC_OP_LOGICL:
937 case CC_OP_LOGICQ:
938
939 case CC_OP_INCB:
940 case CC_OP_INCW:
941 case CC_OP_INCL:
942 case CC_OP_INCQ:
943
944 case CC_OP_DECB:
945 case CC_OP_DECW:
946 case CC_OP_DECL:
947 case CC_OP_DECQ:
948
949 case CC_OP_SHLB:
950 case CC_OP_SHLW:
951 case CC_OP_SHLL:
952 case CC_OP_SHLQ:
953 if (jcc_op != JCC_Z && jcc_op != JCC_S)
954 goto slow_jcc;
955 break;
956 default:
957 slow_jcc:
958 return 0;
959 }
960 return 1;
961 }
962
963 /* generate a conditional jump to label 'l1' according to jump opcode
964 value 'b'. In the fast case, T0 is guaranted not to be used. */
965 static inline void gen_jcc1(DisasContext *s, int cc_op, int b, int l1)
966 {
967 int inv, jcc_op, size, cond;
968 TCGv t0;
969
970 inv = b & 1;
971 jcc_op = (b >> 1) & 7;
972
973 switch(cc_op) {
974 /* we optimize the cmp/jcc case */
975 case CC_OP_SUBB:
976 case CC_OP_SUBW:
977 case CC_OP_SUBL:
978 case CC_OP_SUBQ:
979
980 size = cc_op - CC_OP_SUBB;
981 switch(jcc_op) {
982 case JCC_Z:
983 fast_jcc_z:
984 switch(size) {
985 case 0:
986 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0xff);
987 t0 = cpu_tmp0;
988 break;
989 case 1:
990 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0xffff);
991 t0 = cpu_tmp0;
992 break;
993 #ifdef TARGET_X86_64
994 case 2:
995 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0xffffffff);
996 t0 = cpu_tmp0;
997 break;
998 #endif
999 default:
1000 t0 = cpu_cc_dst;
1001 break;
1002 }
1003 tcg_gen_brcond_tl(inv ? TCG_COND_NE : TCG_COND_EQ, t0,
1004 tcg_const_tl(0), l1);
1005 break;
1006 case JCC_S:
1007 fast_jcc_s:
1008 switch(size) {
1009 case 0:
1010 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0x80);
1011 tcg_gen_brcond_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0,
1012 tcg_const_tl(0), l1);
1013 break;
1014 case 1:
1015 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0x8000);
1016 tcg_gen_brcond_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0,
1017 tcg_const_tl(0), l1);
1018 break;
1019 #ifdef TARGET_X86_64
1020 case 2:
1021 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0x80000000);
1022 tcg_gen_brcond_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0,
1023 tcg_const_tl(0), l1);
1024 break;
1025 #endif
1026 default:
1027 tcg_gen_brcond_tl(inv ? TCG_COND_GE : TCG_COND_LT, cpu_cc_dst,
1028 tcg_const_tl(0), l1);
1029 break;
1030 }
1031 break;
1032
1033 case JCC_B:
1034 cond = inv ? TCG_COND_GEU : TCG_COND_LTU;
1035 goto fast_jcc_b;
1036 case JCC_BE:
1037 cond = inv ? TCG_COND_GTU : TCG_COND_LEU;
1038 fast_jcc_b:
1039 tcg_gen_add_tl(cpu_tmp4, cpu_cc_dst, cpu_cc_src);
1040 switch(size) {
1041 case 0:
1042 t0 = cpu_tmp0;
1043 tcg_gen_andi_tl(cpu_tmp4, cpu_tmp4, 0xff);
1044 tcg_gen_andi_tl(t0, cpu_cc_src, 0xff);
1045 break;
1046 case 1:
1047 t0 = cpu_tmp0;
1048 tcg_gen_andi_tl(cpu_tmp4, cpu_tmp4, 0xffff);
1049 tcg_gen_andi_tl(t0, cpu_cc_src, 0xffff);
1050 break;
1051 #ifdef TARGET_X86_64
1052 case 2:
1053 t0 = cpu_tmp0;
1054 tcg_gen_andi_tl(cpu_tmp4, cpu_tmp4, 0xffffffff);
1055 tcg_gen_andi_tl(t0, cpu_cc_src, 0xffffffff);
1056 break;
1057 #endif
1058 default:
1059 t0 = cpu_cc_src;
1060 break;
1061 }
1062 tcg_gen_brcond_tl(cond, cpu_tmp4, t0, l1);
1063 break;
1064
1065 case JCC_L:
1066 cond = inv ? TCG_COND_GE : TCG_COND_LT;
1067 goto fast_jcc_l;
1068 case JCC_LE:
1069 cond = inv ? TCG_COND_GT : TCG_COND_LE;
1070 fast_jcc_l:
1071 tcg_gen_add_tl(cpu_tmp4, cpu_cc_dst, cpu_cc_src);
1072 switch(size) {
1073 case 0:
1074 t0 = cpu_tmp0;
1075 tcg_gen_ext8s_tl(cpu_tmp4, cpu_tmp4);
1076 tcg_gen_ext8s_tl(t0, cpu_cc_src);
1077 break;
1078 case 1:
1079 t0 = cpu_tmp0;
1080 tcg_gen_ext16s_tl(cpu_tmp4, cpu_tmp4);
1081 tcg_gen_ext16s_tl(t0, cpu_cc_src);
1082 break;
1083 #ifdef TARGET_X86_64
1084 case 2:
1085 t0 = cpu_tmp0;
1086 tcg_gen_ext32s_tl(cpu_tmp4, cpu_tmp4);
1087 tcg_gen_ext32s_tl(t0, cpu_cc_src);
1088 break;
1089 #endif
1090 default:
1091 t0 = cpu_cc_src;
1092 break;
1093 }
1094 tcg_gen_brcond_tl(cond, cpu_tmp4, t0, l1);
1095 break;
1096
1097 default:
1098 goto slow_jcc;
1099 }
1100 break;
1101
1102 /* some jumps are easy to compute */
1103 case CC_OP_ADDB:
1104 case CC_OP_ADDW:
1105 case CC_OP_ADDL:
1106 case CC_OP_ADDQ:
1107
1108 case CC_OP_ADCB:
1109 case CC_OP_ADCW:
1110 case CC_OP_ADCL:
1111 case CC_OP_ADCQ:
1112
1113 case CC_OP_SBBB:
1114 case CC_OP_SBBW:
1115 case CC_OP_SBBL:
1116 case CC_OP_SBBQ:
1117
1118 case CC_OP_LOGICB:
1119 case CC_OP_LOGICW:
1120 case CC_OP_LOGICL:
1121 case CC_OP_LOGICQ:
1122
1123 case CC_OP_INCB:
1124 case CC_OP_INCW:
1125 case CC_OP_INCL:
1126 case CC_OP_INCQ:
1127
1128 case CC_OP_DECB:
1129 case CC_OP_DECW:
1130 case CC_OP_DECL:
1131 case CC_OP_DECQ:
1132
1133 case CC_OP_SHLB:
1134 case CC_OP_SHLW:
1135 case CC_OP_SHLL:
1136 case CC_OP_SHLQ:
1137
1138 case CC_OP_SARB:
1139 case CC_OP_SARW:
1140 case CC_OP_SARL:
1141 case CC_OP_SARQ:
1142 switch(jcc_op) {
1143 case JCC_Z:
1144 size = (cc_op - CC_OP_ADDB) & 3;
1145 goto fast_jcc_z;
1146 case JCC_S:
1147 size = (cc_op - CC_OP_ADDB) & 3;
1148 goto fast_jcc_s;
1149 default:
1150 goto slow_jcc;
1151 }
1152 break;
1153 default:
1154 slow_jcc:
1155 gen_setcc_slow_T0(jcc_op);
1156 tcg_gen_brcond_tl(inv ? TCG_COND_EQ : TCG_COND_NE,
1157 cpu_T[0], tcg_const_tl(0), l1);
1158 break;
1159 }
1160 }
1161
1162 /* XXX: does not work with gdbstub "ice" single step - not a
1163 serious problem */
1164 static int gen_jz_ecx_string(DisasContext *s, target_ulong next_eip)
1165 {
1166 int l1, l2;
1167
1168 l1 = gen_new_label();
1169 l2 = gen_new_label();
1170 gen_op_jnz_ecx(s->aflag, l1);
1171 gen_set_label(l2);
1172 gen_jmp_tb(s, next_eip, 1);
1173 gen_set_label(l1);
1174 return l2;
1175 }
1176
1177 static inline void gen_stos(DisasContext *s, int ot)
1178 {
1179 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
1180 gen_string_movl_A0_EDI(s);
1181 gen_op_st_T0_A0(ot + s->mem_index);
1182 gen_op_movl_T0_Dshift(ot);
1183 gen_op_add_reg_T0(s->aflag, R_EDI);
1184 }
1185
1186 static inline void gen_lods(DisasContext *s, int ot)
1187 {
1188 gen_string_movl_A0_ESI(s);
1189 gen_op_ld_T0_A0(ot + s->mem_index);
1190 gen_op_mov_reg_T0(ot, R_EAX);
1191 gen_op_movl_T0_Dshift(ot);
1192 gen_op_add_reg_T0(s->aflag, R_ESI);
1193 }
1194
1195 static inline void gen_scas(DisasContext *s, int ot)
1196 {
1197 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
1198 gen_string_movl_A0_EDI(s);
1199 gen_op_ld_T1_A0(ot + s->mem_index);
1200 gen_op_cmpl_T0_T1_cc();
1201 gen_op_movl_T0_Dshift(ot);
1202 gen_op_add_reg_T0(s->aflag, R_EDI);
1203 }
1204
1205 static inline void gen_cmps(DisasContext *s, int ot)
1206 {
1207 gen_string_movl_A0_ESI(s);
1208 gen_op_ld_T0_A0(ot + s->mem_index);
1209 gen_string_movl_A0_EDI(s);
1210 gen_op_ld_T1_A0(ot + s->mem_index);
1211 gen_op_cmpl_T0_T1_cc();
1212 gen_op_movl_T0_Dshift(ot);
1213 gen_op_add_reg_T0(s->aflag, R_ESI);
1214 gen_op_add_reg_T0(s->aflag, R_EDI);
1215 }
1216
1217 static inline void gen_ins(DisasContext *s, int ot)
1218 {
1219 gen_string_movl_A0_EDI(s);
1220 /* Note: we must do this dummy write first to be restartable in
1221 case of page fault. */
1222 gen_op_movl_T0_0();
1223 gen_op_st_T0_A0(ot + s->mem_index);
1224 gen_op_mov_TN_reg(OT_WORD, 1, R_EDX);
1225 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[1]);
1226 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1227 tcg_gen_helper_1_1(helper_in_func[ot], cpu_T[0], cpu_tmp2_i32);
1228 gen_op_st_T0_A0(ot + s->mem_index);
1229 gen_op_movl_T0_Dshift(ot);
1230 gen_op_add_reg_T0(s->aflag, R_EDI);
1231 }
1232
1233 static inline void gen_outs(DisasContext *s, int ot)
1234 {
1235 gen_string_movl_A0_ESI(s);
1236 gen_op_ld_T0_A0(ot + s->mem_index);
1237
1238 gen_op_mov_TN_reg(OT_WORD, 1, R_EDX);
1239 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[1]);
1240 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1241 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[0]);
1242 tcg_gen_helper_0_2(helper_out_func[ot], cpu_tmp2_i32, cpu_tmp3_i32);
1243
1244 gen_op_movl_T0_Dshift(ot);
1245 gen_op_add_reg_T0(s->aflag, R_ESI);
1246 }
1247
1248 /* same method as Valgrind : we generate jumps to current or next
1249 instruction */
1250 #define GEN_REPZ(op) \
1251 static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1252 target_ulong cur_eip, target_ulong next_eip) \
1253 { \
1254 int l2;\
1255 gen_update_cc_op(s); \
1256 l2 = gen_jz_ecx_string(s, next_eip); \
1257 gen_ ## op(s, ot); \
1258 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1259 /* a loop would cause two single step exceptions if ECX = 1 \
1260 before rep string_insn */ \
1261 if (!s->jmp_opt) \
1262 gen_op_jz_ecx(s->aflag, l2); \
1263 gen_jmp(s, cur_eip); \
1264 }
1265
1266 #define GEN_REPZ2(op) \
1267 static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1268 target_ulong cur_eip, \
1269 target_ulong next_eip, \
1270 int nz) \
1271 { \
1272 int l2;\
1273 gen_update_cc_op(s); \
1274 l2 = gen_jz_ecx_string(s, next_eip); \
1275 gen_ ## op(s, ot); \
1276 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1277 gen_op_set_cc_op(CC_OP_SUBB + ot); \
1278 gen_jcc1(s, CC_OP_SUBB + ot, (JCC_Z << 1) | (nz ^ 1), l2); \
1279 if (!s->jmp_opt) \
1280 gen_op_jz_ecx(s->aflag, l2); \
1281 gen_jmp(s, cur_eip); \
1282 }
1283
1284 GEN_REPZ(movs)
1285 GEN_REPZ(stos)
1286 GEN_REPZ(lods)
1287 GEN_REPZ(ins)
1288 GEN_REPZ(outs)
1289 GEN_REPZ2(scas)
1290 GEN_REPZ2(cmps)
1291
1292 static void *helper_fp_arith_ST0_FT0[8] = {
1293 helper_fadd_ST0_FT0,
1294 helper_fmul_ST0_FT0,
1295 helper_fcom_ST0_FT0,
1296 helper_fcom_ST0_FT0,
1297 helper_fsub_ST0_FT0,
1298 helper_fsubr_ST0_FT0,
1299 helper_fdiv_ST0_FT0,
1300 helper_fdivr_ST0_FT0,
1301 };
1302
1303 /* NOTE the exception in "r" op ordering */
1304 static void *helper_fp_arith_STN_ST0[8] = {
1305 helper_fadd_STN_ST0,
1306 helper_fmul_STN_ST0,
1307 NULL,
1308 NULL,
1309 helper_fsubr_STN_ST0,
1310 helper_fsub_STN_ST0,
1311 helper_fdivr_STN_ST0,
1312 helper_fdiv_STN_ST0,
1313 };
1314
1315 /* if d == OR_TMP0, it means memory operand (address in A0) */
1316 static void gen_op(DisasContext *s1, int op, int ot, int d)
1317 {
1318 if (d != OR_TMP0) {
1319 gen_op_mov_TN_reg(ot, 0, d);
1320 } else {
1321 gen_op_ld_T0_A0(ot + s1->mem_index);
1322 }
1323 switch(op) {
1324 case OP_ADCL:
1325 if (s1->cc_op != CC_OP_DYNAMIC)
1326 gen_op_set_cc_op(s1->cc_op);
1327 gen_compute_eflags_c(cpu_tmp4);
1328 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1329 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1330 if (d != OR_TMP0)
1331 gen_op_mov_reg_T0(ot, d);
1332 else
1333 gen_op_st_T0_A0(ot + s1->mem_index);
1334 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
1335 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1336 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp4);
1337 tcg_gen_shli_i32(cpu_tmp2_i32, cpu_tmp2_i32, 2);
1338 tcg_gen_addi_i32(cpu_cc_op, cpu_tmp2_i32, CC_OP_ADDB + ot);
1339 s1->cc_op = CC_OP_DYNAMIC;
1340 break;
1341 case OP_SBBL:
1342 if (s1->cc_op != CC_OP_DYNAMIC)
1343 gen_op_set_cc_op(s1->cc_op);
1344 gen_compute_eflags_c(cpu_tmp4);
1345 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1346 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1347 if (d != OR_TMP0)
1348 gen_op_mov_reg_T0(ot, d);
1349 else
1350 gen_op_st_T0_A0(ot + s1->mem_index);
1351 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
1352 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1353 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp4);
1354 tcg_gen_shli_i32(cpu_tmp2_i32, cpu_tmp2_i32, 2);
1355 tcg_gen_addi_i32(cpu_cc_op, cpu_tmp2_i32, CC_OP_SUBB + ot);
1356 s1->cc_op = CC_OP_DYNAMIC;
1357 break;
1358 case OP_ADDL:
1359 gen_op_addl_T0_T1();
1360 if (d != OR_TMP0)
1361 gen_op_mov_reg_T0(ot, d);
1362 else
1363 gen_op_st_T0_A0(ot + s1->mem_index);
1364 gen_op_update2_cc();
1365 s1->cc_op = CC_OP_ADDB + ot;
1366 break;
1367 case OP_SUBL:
1368 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1369 if (d != OR_TMP0)
1370 gen_op_mov_reg_T0(ot, d);
1371 else
1372 gen_op_st_T0_A0(ot + s1->mem_index);
1373 gen_op_update2_cc();
1374 s1->cc_op = CC_OP_SUBB + ot;
1375 break;
1376 default:
1377 case OP_ANDL:
1378 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1379 if (d != OR_TMP0)
1380 gen_op_mov_reg_T0(ot, d);
1381 else
1382 gen_op_st_T0_A0(ot + s1->mem_index);
1383 gen_op_update1_cc();
1384 s1->cc_op = CC_OP_LOGICB + ot;
1385 break;
1386 case OP_ORL:
1387 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1388 if (d != OR_TMP0)
1389 gen_op_mov_reg_T0(ot, d);
1390 else
1391 gen_op_st_T0_A0(ot + s1->mem_index);
1392 gen_op_update1_cc();
1393 s1->cc_op = CC_OP_LOGICB + ot;
1394 break;
1395 case OP_XORL:
1396 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1397 if (d != OR_TMP0)
1398 gen_op_mov_reg_T0(ot, d);
1399 else
1400 gen_op_st_T0_A0(ot + s1->mem_index);
1401 gen_op_update1_cc();
1402 s1->cc_op = CC_OP_LOGICB + ot;
1403 break;
1404 case OP_CMPL:
1405 gen_op_cmpl_T0_T1_cc();
1406 s1->cc_op = CC_OP_SUBB + ot;
1407 break;
1408 }
1409 }
1410
1411 /* if d == OR_TMP0, it means memory operand (address in A0) */
1412 static void gen_inc(DisasContext *s1, int ot, int d, int c)
1413 {
1414 if (d != OR_TMP0)
1415 gen_op_mov_TN_reg(ot, 0, d);
1416 else
1417 gen_op_ld_T0_A0(ot + s1->mem_index);
1418 if (s1->cc_op != CC_OP_DYNAMIC)
1419 gen_op_set_cc_op(s1->cc_op);
1420 if (c > 0) {
1421 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], 1);
1422 s1->cc_op = CC_OP_INCB + ot;
1423 } else {
1424 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], -1);
1425 s1->cc_op = CC_OP_DECB + ot;
1426 }
1427 if (d != OR_TMP0)
1428 gen_op_mov_reg_T0(ot, d);
1429 else
1430 gen_op_st_T0_A0(ot + s1->mem_index);
1431 gen_compute_eflags_c(cpu_cc_src);
1432 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1433 }
1434
1435 static void gen_shift_rm_T1(DisasContext *s, int ot, int op1,
1436 int is_right, int is_arith)
1437 {
1438 target_ulong mask;
1439 int shift_label;
1440
1441 if (ot == OT_QUAD)
1442 mask = 0x3f;
1443 else
1444 mask = 0x1f;
1445
1446 /* load */
1447 if (op1 == OR_TMP0)
1448 gen_op_ld_T0_A0(ot + s->mem_index);
1449 else
1450 gen_op_mov_TN_reg(ot, 0, op1);
1451
1452 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], mask);
1453
1454 tcg_gen_addi_tl(cpu_tmp5, cpu_T[1], -1);
1455
1456 if (is_right) {
1457 if (is_arith) {
1458 gen_exts(ot, cpu_T[0]);
1459 tcg_gen_sar_tl(cpu_T3, cpu_T[0], cpu_tmp5);
1460 tcg_gen_sar_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1461 } else {
1462 gen_extu(ot, cpu_T[0]);
1463 tcg_gen_shr_tl(cpu_T3, cpu_T[0], cpu_tmp5);
1464 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1465 }
1466 } else {
1467 tcg_gen_shl_tl(cpu_T3, cpu_T[0], cpu_tmp5);
1468 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1469 }
1470
1471 /* store */
1472 if (op1 == OR_TMP0)
1473 gen_op_st_T0_A0(ot + s->mem_index);
1474 else
1475 gen_op_mov_reg_T0(ot, op1);
1476
1477 /* update eflags if non zero shift */
1478 if (s->cc_op != CC_OP_DYNAMIC)
1479 gen_op_set_cc_op(s->cc_op);
1480
1481 shift_label = gen_new_label();
1482 tcg_gen_brcond_tl(TCG_COND_EQ, cpu_T[1], tcg_const_tl(0), shift_label);
1483
1484 tcg_gen_mov_tl(cpu_cc_src, cpu_T3);
1485 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1486 if (is_right)
1487 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SARB + ot);
1488 else
1489 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SHLB + ot);
1490
1491 gen_set_label(shift_label);
1492 s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
1493 }
1494
1495 static void gen_shift_rm_im(DisasContext *s, int ot, int op1, int op2,
1496 int is_right, int is_arith)
1497 {
1498 int mask;
1499
1500 if (ot == OT_QUAD)
1501 mask = 0x3f;
1502 else
1503 mask = 0x1f;
1504
1505 /* load */
1506 if (op1 == OR_TMP0)
1507 gen_op_ld_T0_A0(ot + s->mem_index);
1508 else
1509 gen_op_mov_TN_reg(ot, 0, op1);
1510
1511 op2 &= mask;
1512 if (op2 != 0) {
1513 if (is_right) {
1514 if (is_arith) {
1515 gen_exts(ot, cpu_T[0]);
1516 tcg_gen_sari_tl(cpu_tmp0, cpu_T[0], op2 - 1);
1517 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], op2);
1518 } else {
1519 gen_extu(ot, cpu_T[0]);
1520 tcg_gen_shri_tl(cpu_tmp0, cpu_T[0], op2 - 1);
1521 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], op2);
1522 }
1523 } else {
1524 tcg_gen_shli_tl(cpu_tmp0, cpu_T[0], op2 - 1);
1525 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], op2);
1526 }
1527 }
1528
1529 /* store */
1530 if (op1 == OR_TMP0)
1531 gen_op_st_T0_A0(ot + s->mem_index);
1532 else
1533 gen_op_mov_reg_T0(ot, op1);
1534
1535 /* update eflags if non zero shift */
1536 if (op2 != 0) {
1537 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp0);
1538 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1539 if (is_right)
1540 s->cc_op = CC_OP_SARB + ot;
1541 else
1542 s->cc_op = CC_OP_SHLB + ot;
1543 }
1544 }
1545
1546 static inline void tcg_gen_lshift(TCGv ret, TCGv arg1, target_long arg2)
1547 {
1548 if (arg2 >= 0)
1549 tcg_gen_shli_tl(ret, arg1, arg2);
1550 else
1551 tcg_gen_shri_tl(ret, arg1, -arg2);
1552 }
1553
1554 /* XXX: add faster immediate case */
1555 static void gen_rot_rm_T1(DisasContext *s, int ot, int op1,
1556 int is_right)
1557 {
1558 target_ulong mask;
1559 int label1, label2, data_bits;
1560
1561 if (ot == OT_QUAD)
1562 mask = 0x3f;
1563 else
1564 mask = 0x1f;
1565
1566 /* load */
1567 if (op1 == OR_TMP0)
1568 gen_op_ld_T0_A0(ot + s->mem_index);
1569 else
1570 gen_op_mov_TN_reg(ot, 0, op1);
1571
1572 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], mask);
1573
1574 /* Must test zero case to avoid using undefined behaviour in TCG
1575 shifts. */
1576 label1 = gen_new_label();
1577 tcg_gen_brcond_tl(TCG_COND_EQ, cpu_T[1], tcg_const_tl(0), label1);
1578
1579 if (ot <= OT_WORD)
1580 tcg_gen_andi_tl(cpu_tmp0, cpu_T[1], (1 << (3 + ot)) - 1);
1581 else
1582 tcg_gen_mov_tl(cpu_tmp0, cpu_T[1]);
1583
1584 gen_extu(ot, cpu_T[0]);
1585 tcg_gen_mov_tl(cpu_T3, cpu_T[0]);
1586
1587 data_bits = 8 << ot;
1588 /* XXX: rely on behaviour of shifts when operand 2 overflows (XXX:
1589 fix TCG definition) */
1590 if (is_right) {
1591 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_tmp0);
1592 tcg_gen_sub_tl(cpu_tmp0, tcg_const_tl(data_bits), cpu_tmp0);
1593 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
1594 } else {
1595 tcg_gen_shl_tl(cpu_tmp4, cpu_T[0], cpu_tmp0);
1596 tcg_gen_sub_tl(cpu_tmp0, tcg_const_tl(data_bits), cpu_tmp0);
1597 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
1598 }
1599 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1600
1601 gen_set_label(label1);
1602 /* store */
1603 if (op1 == OR_TMP0)
1604 gen_op_st_T0_A0(ot + s->mem_index);
1605 else
1606 gen_op_mov_reg_T0(ot, op1);
1607
1608 /* update eflags */
1609 if (s->cc_op != CC_OP_DYNAMIC)
1610 gen_op_set_cc_op(s->cc_op);
1611
1612 label2 = gen_new_label();
1613 tcg_gen_brcond_tl(TCG_COND_EQ, cpu_T[1], tcg_const_tl(0), label2);
1614
1615 gen_compute_eflags(cpu_cc_src);
1616 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~(CC_O | CC_C));
1617 tcg_gen_xor_tl(cpu_tmp0, cpu_T3, cpu_T[0]);
1618 tcg_gen_lshift(cpu_tmp0, cpu_tmp0, 11 - (data_bits - 1));
1619 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, CC_O);
1620 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_tmp0);
1621 if (is_right) {
1622 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], data_bits - 1);
1623 }
1624 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], CC_C);
1625 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_T[0]);
1626
1627 tcg_gen_discard_tl(cpu_cc_dst);
1628 tcg_gen_movi_i32(cpu_cc_op, CC_OP_EFLAGS);
1629
1630 gen_set_label(label2);
1631 s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
1632 }
1633
1634 static void *helper_rotc[8] = {
1635 helper_rclb,
1636 helper_rclw,
1637 helper_rcll,
1638 X86_64_ONLY(helper_rclq),
1639 helper_rcrb,
1640 helper_rcrw,
1641 helper_rcrl,
1642 X86_64_ONLY(helper_rcrq),
1643 };
1644
1645 /* XXX: add faster immediate = 1 case */
1646 static void gen_rotc_rm_T1(DisasContext *s, int ot, int op1,
1647 int is_right)
1648 {
1649 int label1;
1650
1651 if (s->cc_op != CC_OP_DYNAMIC)
1652 gen_op_set_cc_op(s->cc_op);
1653
1654 /* load */
1655 if (op1 == OR_TMP0)
1656 gen_op_ld_T0_A0(ot + s->mem_index);
1657 else
1658 gen_op_mov_TN_reg(ot, 0, op1);
1659
1660 tcg_gen_helper_1_2(helper_rotc[ot + (is_right * 4)],
1661 cpu_T[0], cpu_T[0], cpu_T[1]);
1662 /* store */
1663 if (op1 == OR_TMP0)
1664 gen_op_st_T0_A0(ot + s->mem_index);
1665 else
1666 gen_op_mov_reg_T0(ot, op1);
1667
1668 /* update eflags */
1669 label1 = gen_new_label();
1670 tcg_gen_brcond_tl(TCG_COND_EQ, cpu_T3, tcg_const_tl(-1), label1);
1671
1672 tcg_gen_mov_tl(cpu_cc_src, cpu_T3);
1673 tcg_gen_discard_tl(cpu_cc_dst);
1674 tcg_gen_movi_i32(cpu_cc_op, CC_OP_EFLAGS);
1675
1676 gen_set_label(label1);
1677 s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
1678 }
1679
1680 /* XXX: add faster immediate case */
1681 static void gen_shiftd_rm_T1_T3(DisasContext *s, int ot, int op1,
1682 int is_right)
1683 {
1684 int label1, label2, data_bits;
1685 target_ulong mask;
1686
1687 if (ot == OT_QUAD)
1688 mask = 0x3f;
1689 else
1690 mask = 0x1f;
1691
1692 /* load */
1693 if (op1 == OR_TMP0)
1694 gen_op_ld_T0_A0(ot + s->mem_index);
1695 else
1696 gen_op_mov_TN_reg(ot, 0, op1);
1697
1698 tcg_gen_andi_tl(cpu_T3, cpu_T3, mask);
1699 /* Must test zero case to avoid using undefined behaviour in TCG
1700 shifts. */
1701 label1 = gen_new_label();
1702 tcg_gen_brcond_tl(TCG_COND_EQ, cpu_T3, tcg_const_tl(0), label1);
1703
1704 tcg_gen_addi_tl(cpu_tmp5, cpu_T3, -1);
1705 if (ot == OT_WORD) {
1706 /* Note: we implement the Intel behaviour for shift count > 16 */
1707 if (is_right) {
1708 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
1709 tcg_gen_shli_tl(cpu_tmp0, cpu_T[1], 16);
1710 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
1711 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
1712
1713 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_tmp5);
1714
1715 /* only needed if count > 16, but a test would complicate */
1716 tcg_gen_sub_tl(cpu_tmp5, tcg_const_tl(32), cpu_T3);
1717 tcg_gen_shl_tl(cpu_tmp0, cpu_T[0], cpu_tmp5);
1718
1719 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_T3);
1720
1721 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
1722 } else {
1723 /* XXX: not optimal */
1724 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
1725 tcg_gen_shli_tl(cpu_T[1], cpu_T[1], 16);
1726 tcg_gen_or_tl(cpu_T[1], cpu_T[1], cpu_T[0]);
1727 tcg_gen_ext32u_tl(cpu_T[1], cpu_T[1]);
1728
1729 tcg_gen_shl_tl(cpu_tmp4, cpu_T[0], cpu_tmp5);
1730 tcg_gen_sub_tl(cpu_tmp0, tcg_const_tl(32), cpu_tmp5);
1731 tcg_gen_shr_tl(cpu_tmp6, cpu_T[1], cpu_tmp0);
1732 tcg_gen_or_tl(cpu_tmp4, cpu_tmp4, cpu_tmp6);
1733
1734 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], cpu_T3);
1735 tcg_gen_sub_tl(cpu_tmp5, tcg_const_tl(32), cpu_T3);
1736 tcg_gen_shr_tl(cpu_T[1], cpu_T[1], cpu_tmp5);
1737 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1738 }
1739 } else {
1740 data_bits = 8 << ot;
1741 if (is_right) {
1742 if (ot == OT_LONG)
1743 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
1744
1745 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_tmp5);
1746
1747 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_T3);
1748 tcg_gen_sub_tl(cpu_tmp5, tcg_const_tl(data_bits), cpu_T3);
1749 tcg_gen_shl_tl(cpu_T[1], cpu_T[1], cpu_tmp5);
1750 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1751
1752 } else {
1753 if (ot == OT_LONG)
1754 tcg_gen_ext32u_tl(cpu_T[1], cpu_T[1]);
1755
1756 tcg_gen_shl_tl(cpu_tmp4, cpu_T[0], cpu_tmp5);
1757
1758 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], cpu_T3);
1759 tcg_gen_sub_tl(cpu_tmp5, tcg_const_tl(data_bits), cpu_T3);
1760 tcg_gen_shr_tl(cpu_T[1], cpu_T[1], cpu_tmp5);
1761 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1762 }
1763 }
1764 tcg_gen_mov_tl(cpu_T[1], cpu_tmp4);
1765
1766 gen_set_label(label1);
1767 /* store */
1768 if (op1 == OR_TMP0)
1769 gen_op_st_T0_A0(ot + s->mem_index);
1770 else
1771 gen_op_mov_reg_T0(ot, op1);
1772
1773 /* update eflags */
1774 if (s->cc_op != CC_OP_DYNAMIC)
1775 gen_op_set_cc_op(s->cc_op);
1776
1777 label2 = gen_new_label();
1778 tcg_gen_brcond_tl(TCG_COND_EQ, cpu_T3, tcg_const_tl(0), label2);
1779
1780 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
1781 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1782 if (is_right) {
1783 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SARB + ot);
1784 } else {
1785 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SHLB + ot);
1786 }
1787 gen_set_label(label2);
1788 s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
1789 }
1790
1791 static void gen_shift(DisasContext *s1, int op, int ot, int d, int s)
1792 {
1793 if (s != OR_TMP1)
1794 gen_op_mov_TN_reg(ot, 1, s);
1795 switch(op) {
1796 case OP_ROL:
1797 gen_rot_rm_T1(s1, ot, d, 0);
1798 break;
1799 case OP_ROR:
1800 gen_rot_rm_T1(s1, ot, d, 1);
1801 break;
1802 case OP_SHL:
1803 case OP_SHL1:
1804 gen_shift_rm_T1(s1, ot, d, 0, 0);
1805 break;
1806 case OP_SHR:
1807 gen_shift_rm_T1(s1, ot, d, 1, 0);
1808 break;
1809 case OP_SAR:
1810 gen_shift_rm_T1(s1, ot, d, 1, 1);
1811 break;
1812 case OP_RCL:
1813 gen_rotc_rm_T1(s1, ot, d, 0);
1814 break;
1815 case OP_RCR:
1816 gen_rotc_rm_T1(s1, ot, d, 1);
1817 break;
1818 }
1819 }
1820
1821 static void gen_shifti(DisasContext *s1, int op, int ot, int d, int c)
1822 {
1823 switch(op) {
1824 case OP_SHL:
1825 case OP_SHL1:
1826 gen_shift_rm_im(s1, ot, d, c, 0, 0);
1827 break;
1828 case OP_SHR:
1829 gen_shift_rm_im(s1, ot, d, c, 1, 0);
1830 break;
1831 case OP_SAR:
1832 gen_shift_rm_im(s1, ot, d, c, 1, 1);
1833 break;
1834 default:
1835 /* currently not optimized */
1836 gen_op_movl_T1_im(c);
1837 gen_shift(s1, op, ot, d, OR_TMP1);
1838 break;
1839 }
1840 }
1841
1842 static void gen_lea_modrm(DisasContext *s, int modrm, int *reg_ptr, int *offset_ptr)
1843 {
1844 target_long disp;
1845 int havesib;
1846 int base;
1847 int index;
1848 int scale;
1849 int opreg;
1850 int mod, rm, code, override, must_add_seg;
1851
1852 override = s->override;
1853 must_add_seg = s->addseg;
1854 if (override >= 0)
1855 must_add_seg = 1;
1856 mod = (modrm >> 6) & 3;
1857 rm = modrm & 7;
1858
1859 if (s->aflag) {
1860
1861 havesib = 0;
1862 base = rm;
1863 index = 0;
1864 scale = 0;
1865
1866 if (base == 4) {
1867 havesib = 1;
1868 code = ldub_code(s->pc++);
1869 scale = (code >> 6) & 3;
1870 index = ((code >> 3) & 7) | REX_X(s);
1871 base = (code & 7);
1872 }
1873 base |= REX_B(s);
1874
1875 switch (mod) {
1876 case 0:
1877 if ((base & 7) == 5) {
1878 base = -1;
1879 disp = (int32_t)ldl_code(s->pc);
1880 s->pc += 4;
1881 if (CODE64(s) && !havesib) {
1882 disp += s->pc + s->rip_offset;
1883 }
1884 } else {
1885 disp = 0;
1886 }
1887 break;
1888 case 1:
1889 disp = (int8_t)ldub_code(s->pc++);
1890 break;
1891 default:
1892 case 2:
1893 disp = ldl_code(s->pc);
1894 s->pc += 4;
1895 break;
1896 }
1897
1898 if (base >= 0) {
1899 /* for correct popl handling with esp */
1900 if (base == 4 && s->popl_esp_hack)
1901 disp += s->popl_esp_hack;
1902 #ifdef TARGET_X86_64
1903 if (s->aflag == 2) {
1904 gen_op_movq_A0_reg(base);
1905 if (disp != 0) {
1906 gen_op_addq_A0_im(disp);
1907 }
1908 } else
1909 #endif
1910 {
1911 gen_op_movl_A0_reg(base);
1912 if (disp != 0)
1913 gen_op_addl_A0_im(disp);
1914 }
1915 } else {
1916 #ifdef TARGET_X86_64
1917 if (s->aflag == 2) {
1918 gen_op_movq_A0_im(disp);
1919 } else
1920 #endif
1921 {
1922 gen_op_movl_A0_im(disp);
1923 }
1924 }
1925 /* XXX: index == 4 is always invalid */
1926 if (havesib && (index != 4 || scale != 0)) {
1927 #ifdef TARGET_X86_64
1928 if (s->aflag == 2) {
1929 gen_op_addq_A0_reg_sN(scale, index);
1930 } else
1931 #endif
1932 {
1933 gen_op_addl_A0_reg_sN(scale, index);
1934 }
1935 }
1936 if (must_add_seg) {
1937 if (override < 0) {
1938 if (base == R_EBP || base == R_ESP)
1939 override = R_SS;
1940 else
1941 override = R_DS;
1942 }
1943 #ifdef TARGET_X86_64
1944 if (s->aflag == 2) {
1945 gen_op_addq_A0_seg(override);
1946 } else
1947 #endif
1948 {
1949 gen_op_addl_A0_seg(override);
1950 }
1951 }
1952 } else {
1953 switch (mod) {
1954 case 0:
1955 if (rm == 6) {
1956 disp = lduw_code(s->pc);
1957 s->pc += 2;
1958 gen_op_movl_A0_im(disp);
1959 rm = 0; /* avoid SS override */
1960 goto no_rm;
1961 } else {
1962 disp = 0;
1963 }
1964 break;
1965 case 1:
1966 disp = (int8_t)ldub_code(s->pc++);
1967 break;
1968 default:
1969 case 2:
1970 disp = lduw_code(s->pc);
1971 s->pc += 2;
1972 break;
1973 }
1974 switch(rm) {
1975 case 0:
1976 gen_op_movl_A0_reg(R_EBX);
1977 gen_op_addl_A0_reg_sN(0, R_ESI);
1978 break;
1979 case 1:
1980 gen_op_movl_A0_reg(R_EBX);
1981 gen_op_addl_A0_reg_sN(0, R_EDI);
1982 break;
1983 case 2:
1984 gen_op_movl_A0_reg(R_EBP);
1985 gen_op_addl_A0_reg_sN(0, R_ESI);
1986 break;
1987 case 3:
1988 gen_op_movl_A0_reg(R_EBP);
1989 gen_op_addl_A0_reg_sN(0, R_EDI);
1990 break;
1991 case 4:
1992 gen_op_movl_A0_reg(R_ESI);
1993 break;
1994 case 5:
1995 gen_op_movl_A0_reg(R_EDI);
1996 break;
1997 case 6:
1998 gen_op_movl_A0_reg(R_EBP);
1999 break;
2000 default:
2001 case 7:
2002 gen_op_movl_A0_reg(R_EBX);
2003 break;
2004 }
2005 if (disp != 0)
2006 gen_op_addl_A0_im(disp);
2007 gen_op_andl_A0_ffff();
2008 no_rm:
2009 if (must_add_seg) {
2010 if (override < 0) {
2011 if (rm == 2 || rm == 3 || rm == 6)
2012 override = R_SS;
2013 else
2014 override = R_DS;
2015 }
2016 gen_op_addl_A0_seg(override);
2017 }
2018 }
2019
2020 opreg = OR_A0;
2021 disp = 0;
2022 *reg_ptr = opreg;
2023 *offset_ptr = disp;
2024 }
2025
2026 static void gen_nop_modrm(DisasContext *s, int modrm)
2027 {
2028 int mod, rm, base, code;
2029
2030 mod = (modrm >> 6) & 3;
2031 if (mod == 3)
2032 return;
2033 rm = modrm & 7;
2034
2035 if (s->aflag) {
2036
2037 base = rm;
2038
2039 if (base == 4) {
2040 code = ldub_code(s->pc++);
2041 base = (code & 7);
2042 }
2043
2044 switch (mod) {
2045 case 0:
2046 if (base == 5) {
2047 s->pc += 4;
2048 }
2049 break;
2050 case 1:
2051 s->pc++;
2052 break;
2053 default:
2054 case 2:
2055 s->pc += 4;
2056 break;
2057 }
2058 } else {
2059 switch (mod) {
2060 case 0:
2061 if (rm == 6) {
2062 s->pc += 2;
2063 }
2064 break;
2065 case 1:
2066 s->pc++;
2067 break;
2068 default:
2069 case 2:
2070 s->pc += 2;
2071 break;
2072 }
2073 }
2074 }
2075
2076 /* used for LEA and MOV AX, mem */
2077 static void gen_add_A0_ds_seg(DisasContext *s)
2078 {
2079 int override, must_add_seg;
2080 must_add_seg = s->addseg;
2081 override = R_DS;
2082 if (s->override >= 0) {
2083 override = s->override;
2084 must_add_seg = 1;
2085 } else {
2086 override = R_DS;
2087 }
2088 if (must_add_seg) {
2089 #ifdef TARGET_X86_64
2090 if (CODE64(s)) {
2091 gen_op_addq_A0_seg(override);
2092 } else
2093 #endif
2094 {
2095 gen_op_addl_A0_seg(override);
2096 }
2097 }
2098 }
2099
2100 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg !=
2101 OR_TMP0 */
2102 static void gen_ldst_modrm(DisasContext *s, int modrm, int ot, int reg, int is_store)
2103 {
2104 int mod, rm, opreg, disp;
2105
2106 mod = (modrm >> 6) & 3;
2107 rm = (modrm & 7) | REX_B(s);
2108 if (mod == 3) {
2109 if (is_store) {
2110 if (reg != OR_TMP0)
2111 gen_op_mov_TN_reg(ot, 0, reg);
2112 gen_op_mov_reg_T0(ot, rm);
2113 } else {
2114 gen_op_mov_TN_reg(ot, 0, rm);
2115 if (reg != OR_TMP0)
2116 gen_op_mov_reg_T0(ot, reg);
2117 }
2118 } else {
2119 gen_lea_modrm(s, modrm, &opreg, &disp);
2120 if (is_store) {
2121 if (reg != OR_TMP0)
2122 gen_op_mov_TN_reg(ot, 0, reg);
2123 gen_op_st_T0_A0(ot + s->mem_index);
2124 } else {
2125 gen_op_ld_T0_A0(ot + s->mem_index);
2126 if (reg != OR_TMP0)
2127 gen_op_mov_reg_T0(ot, reg);
2128 }
2129 }
2130 }
2131
2132 static inline uint32_t insn_get(DisasContext *s, int ot)
2133 {
2134 uint32_t ret;
2135
2136 switch(ot) {
2137 case OT_BYTE:
2138 ret = ldub_code(s->pc);
2139 s->pc++;
2140 break;
2141 case OT_WORD:
2142 ret = lduw_code(s->pc);
2143 s->pc += 2;
2144 break;
2145 default:
2146 case OT_LONG:
2147 ret = ldl_code(s->pc);
2148 s->pc += 4;
2149 break;
2150 }
2151 return ret;
2152 }
2153
2154 static inline int insn_const_size(unsigned int ot)
2155 {
2156 if (ot <= OT_LONG)
2157 return 1 << ot;
2158 else
2159 return 4;
2160 }
2161
2162 static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong eip)
2163 {
2164 TranslationBlock *tb;
2165 target_ulong pc;
2166
2167 pc = s->cs_base + eip;
2168 tb = s->tb;
2169 /* NOTE: we handle the case where the TB spans two pages here */
2170 if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) ||
2171 (pc & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK)) {
2172 /* jump to same page: we can use a direct jump */
2173 tcg_gen_goto_tb(tb_num);
2174 gen_jmp_im(eip);
2175 tcg_gen_exit_tb((long)tb + tb_num);
2176 } else {
2177 /* jump to another page: currently not optimized */
2178 gen_jmp_im(eip);
2179 gen_eob(s);
2180 }
2181 }
2182
2183 static inline void gen_jcc(DisasContext *s, int b,
2184 target_ulong val, target_ulong next_eip)
2185 {
2186 int l1, l2, cc_op;
2187
2188 cc_op = s->cc_op;
2189 if (s->cc_op != CC_OP_DYNAMIC) {
2190 gen_op_set_cc_op(s->cc_op);
2191 s->cc_op = CC_OP_DYNAMIC;
2192 }
2193 if (s->jmp_opt) {
2194 l1 = gen_new_label();
2195 gen_jcc1(s, cc_op, b, l1);
2196
2197 gen_goto_tb(s, 0, next_eip);
2198
2199 gen_set_label(l1);
2200 gen_goto_tb(s, 1, val);
2201 s->is_jmp = 3;
2202 } else {
2203
2204 l1 = gen_new_label();
2205 l2 = gen_new_label();
2206 gen_jcc1(s, cc_op, b, l1);
2207
2208 gen_jmp_im(next_eip);
2209 tcg_gen_br(l2);
2210
2211 gen_set_label(l1);
2212 gen_jmp_im(val);
2213 gen_set_label(l2);
2214 gen_eob(s);
2215 }
2216 }
2217
2218 static void gen_setcc(DisasContext *s, int b)
2219 {
2220 int inv, jcc_op, l1;
2221
2222 if (is_fast_jcc_case(s, b)) {
2223 /* nominal case: we use a jump */
2224 tcg_gen_movi_tl(cpu_T[0], 0);
2225 l1 = gen_new_label();
2226 gen_jcc1(s, s->cc_op, b ^ 1, l1);
2227 tcg_gen_movi_tl(cpu_T[0], 1);
2228 gen_set_label(l1);
2229 } else {
2230 /* slow case: it is more efficient not to generate a jump,
2231 although it is questionnable whether this optimization is
2232 worth to */
2233 inv = b & 1;
2234 jcc_op = (b >> 1) & 7;
2235 if (s->cc_op != CC_OP_DYNAMIC)
2236 gen_op_set_cc_op(s->cc_op);
2237 gen_setcc_slow_T0(jcc_op);
2238 if (inv) {
2239 tcg_gen_xori_tl(cpu_T[0], cpu_T[0], 1);
2240 }
2241 }
2242 }
2243
2244 static inline void gen_op_movl_T0_seg(int seg_reg)
2245 {
2246 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
2247 offsetof(CPUX86State,segs[seg_reg].selector));
2248 }
2249
2250 static inline void gen_op_movl_seg_T0_vm(int seg_reg)
2251 {
2252 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
2253 tcg_gen_st32_tl(cpu_T[0], cpu_env,
2254 offsetof(CPUX86State,segs[seg_reg].selector));
2255 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], 4);
2256 tcg_gen_st_tl(cpu_T[0], cpu_env,
2257 offsetof(CPUX86State,segs[seg_reg].base));
2258 }
2259
2260 /* move T0 to seg_reg and compute if the CPU state may change. Never
2261 call this function with seg_reg == R_CS */
2262 static void gen_movl_seg_T0(DisasContext *s, int seg_reg, target_ulong cur_eip)
2263 {
2264 if (s->pe && !s->vm86) {
2265 /* XXX: optimize by finding processor state dynamically */
2266 if (s->cc_op != CC_OP_DYNAMIC)
2267 gen_op_set_cc_op(s->cc_op);
2268 gen_jmp_im(cur_eip);
2269 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2270 tcg_gen_helper_0_2(helper_load_seg, tcg_const_i32(seg_reg), cpu_tmp2_i32);
2271 /* abort translation because the addseg value may change or
2272 because ss32 may change. For R_SS, translation must always
2273 stop as a special handling must be done to disable hardware
2274 interrupts for the next instruction */
2275 if (seg_reg == R_SS || (s->code32 && seg_reg < R_FS))
2276 s->is_jmp = 3;
2277 } else {
2278 gen_op_movl_seg_T0_vm(seg_reg);
2279 if (seg_reg == R_SS)
2280 s->is_jmp = 3;
2281 }
2282 }
2283
2284 static inline int svm_is_rep(int prefixes)
2285 {
2286 return ((prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) ? 8 : 0);
2287 }
2288
2289 static inline int
2290 gen_svm_check_intercept_param(DisasContext *s, target_ulong pc_start,
2291 uint32_t type, uint64_t param)
2292 {
2293 if(!(s->flags & (INTERCEPT_SVM_MASK)))
2294 /* no SVM activated */
2295 return 0;
2296 switch(type) {
2297 /* CRx and DRx reads/writes */
2298 case SVM_EXIT_READ_CR0 ... SVM_EXIT_EXCP_BASE - 1:
2299 if (s->cc_op != CC_OP_DYNAMIC) {
2300 gen_op_set_cc_op(s->cc_op);
2301 }
2302 gen_jmp_im(pc_start - s->cs_base);
2303 tcg_gen_helper_0_2(helper_svm_check_intercept_param,
2304 tcg_const_i32(type), tcg_const_i64(param));
2305 /* this is a special case as we do not know if the interception occurs
2306 so we assume there was none */
2307 return 0;
2308 case SVM_EXIT_MSR:
2309 if(s->flags & (1ULL << INTERCEPT_MSR_PROT)) {
2310 if (s->cc_op != CC_OP_DYNAMIC) {
2311 gen_op_set_cc_op(s->cc_op);
2312 }
2313 gen_jmp_im(pc_start - s->cs_base);
2314 tcg_gen_helper_0_2(helper_svm_check_intercept_param,
2315 tcg_const_i32(type), tcg_const_i64(param));
2316 /* this is a special case as we do not know if the interception occurs
2317 so we assume there was none */
2318 return 0;
2319 }
2320 break;
2321 default:
2322 if(s->flags & (1ULL << ((type - SVM_EXIT_INTR) + INTERCEPT_INTR))) {
2323 if (s->cc_op != CC_OP_DYNAMIC) {
2324 gen_op_set_cc_op(s->cc_op);
2325 }
2326 gen_jmp_im(pc_start - s->cs_base);
2327 tcg_gen_helper_0_2(helper_vmexit,
2328 tcg_const_i32(type), tcg_const_i64(param));
2329 /* we can optimize this one so TBs don't get longer
2330 than up to vmexit */
2331 gen_eob(s);
2332 return 1;
2333 }
2334 }
2335 return 0;
2336 }
2337
2338 static inline int
2339 gen_svm_check_intercept(DisasContext *s, target_ulong pc_start, uint64_t type)
2340 {
2341 return gen_svm_check_intercept_param(s, pc_start, type, 0);
2342 }
2343
2344 static inline void gen_stack_update(DisasContext *s, int addend)
2345 {
2346 #ifdef TARGET_X86_64
2347 if (CODE64(s)) {
2348 gen_op_add_reg_im(2, R_ESP, addend);
2349 } else
2350 #endif
2351 if (s->ss32) {
2352 gen_op_add_reg_im(1, R_ESP, addend);
2353 } else {
2354 gen_op_add_reg_im(0, R_ESP, addend);
2355 }
2356 }
2357
2358 /* generate a push. It depends on ss32, addseg and dflag */
2359 static void gen_push_T0(DisasContext *s)
2360 {
2361 #ifdef TARGET_X86_64
2362 if (CODE64(s)) {
2363 gen_op_movq_A0_reg(R_ESP);
2364 if (s->dflag) {
2365 gen_op_addq_A0_im(-8);
2366 gen_op_st_T0_A0(OT_QUAD + s->mem_index);
2367 } else {
2368 gen_op_addq_A0_im(-2);
2369 gen_op_st_T0_A0(OT_WORD + s->mem_index);
2370 }
2371 gen_op_mov_reg_A0(2, R_ESP);
2372 } else
2373 #endif
2374 {
2375 gen_op_movl_A0_reg(R_ESP);
2376 if (!s->dflag)
2377 gen_op_addl_A0_im(-2);
2378 else
2379 gen_op_addl_A0_im(-4);
2380 if (s->ss32) {
2381 if (s->addseg) {
2382 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2383 gen_op_addl_A0_seg(R_SS);
2384 }
2385 } else {
2386 gen_op_andl_A0_ffff();
2387 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2388 gen_op_addl_A0_seg(R_SS);
2389 }
2390 gen_op_st_T0_A0(s->dflag + 1 + s->mem_index);
2391 if (s->ss32 && !s->addseg)
2392 gen_op_mov_reg_A0(1, R_ESP);
2393 else
2394 gen_op_mov_reg_T1(s->ss32 + 1, R_ESP);
2395 }
2396 }
2397
2398 /* generate a push. It depends on ss32, addseg and dflag */
2399 /* slower version for T1, only used for call Ev */
2400 static void gen_push_T1(DisasContext *s)
2401 {
2402 #ifdef TARGET_X86_64
2403 if (CODE64(s)) {
2404 gen_op_movq_A0_reg(R_ESP);
2405 if (s->dflag) {
2406 gen_op_addq_A0_im(-8);
2407 gen_op_st_T1_A0(OT_QUAD + s->mem_index);
2408 } else {
2409 gen_op_addq_A0_im(-2);
2410 gen_op_st_T0_A0(OT_WORD + s->mem_index);
2411 }
2412 gen_op_mov_reg_A0(2, R_ESP);
2413 } else
2414 #endif
2415 {
2416 gen_op_movl_A0_reg(R_ESP);
2417 if (!s->dflag)
2418 gen_op_addl_A0_im(-2);
2419 else
2420 gen_op_addl_A0_im(-4);
2421 if (s->ss32) {
2422 if (s->addseg) {
2423 gen_op_addl_A0_seg(R_SS);
2424 }
2425 } else {
2426 gen_op_andl_A0_ffff();
2427 gen_op_addl_A0_seg(R_SS);
2428 }
2429 gen_op_st_T1_A0(s->dflag + 1 + s->mem_index);
2430
2431 if (s->ss32 && !s->addseg)
2432 gen_op_mov_reg_A0(1, R_ESP);
2433 else
2434 gen_stack_update(s, (-2) << s->dflag);
2435 }
2436 }
2437
2438 /* two step pop is necessary for precise exceptions */
2439 static void gen_pop_T0(DisasContext *s)
2440 {
2441 #ifdef TARGET_X86_64
2442 if (CODE64(s)) {
2443 gen_op_movq_A0_reg(R_ESP);
2444 gen_op_ld_T0_A0((s->dflag ? OT_QUAD : OT_WORD) + s->mem_index);
2445 } else
2446 #endif
2447 {
2448 gen_op_movl_A0_reg(R_ESP);
2449 if (s->ss32) {
2450 if (s->addseg)
2451 gen_op_addl_A0_seg(R_SS);
2452 } else {
2453 gen_op_andl_A0_ffff();
2454 gen_op_addl_A0_seg(R_SS);
2455 }
2456 gen_op_ld_T0_A0(s->dflag + 1 + s->mem_index);
2457 }
2458 }
2459
2460 static void gen_pop_update(DisasContext *s)
2461 {
2462 #ifdef TARGET_X86_64
2463 if (CODE64(s) && s->dflag) {
2464 gen_stack_update(s, 8);
2465 } else
2466 #endif
2467 {
2468 gen_stack_update(s, 2 << s->dflag);
2469 }
2470 }
2471
2472 static void gen_stack_A0(DisasContext *s)
2473 {
2474 gen_op_movl_A0_reg(R_ESP);
2475 if (!s->ss32)
2476 gen_op_andl_A0_ffff();
2477 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2478 if (s->addseg)
2479 gen_op_addl_A0_seg(R_SS);
2480 }
2481
2482 /* NOTE: wrap around in 16 bit not fully handled */
2483 static void gen_pusha(DisasContext *s)
2484 {
2485 int i;
2486 gen_op_movl_A0_reg(R_ESP);
2487 gen_op_addl_A0_im(-16 << s->dflag);
2488 if (!s->ss32)
2489 gen_op_andl_A0_ffff();
2490 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2491 if (s->addseg)
2492 gen_op_addl_A0_seg(R_SS);
2493 for(i = 0;i < 8; i++) {
2494 gen_op_mov_TN_reg(OT_LONG, 0, 7 - i);
2495 gen_op_st_T0_A0(OT_WORD + s->dflag + s->mem_index);
2496 gen_op_addl_A0_im(2 << s->dflag);
2497 }
2498 gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2499 }
2500
2501 /* NOTE: wrap around in 16 bit not fully handled */
2502 static void gen_popa(DisasContext *s)
2503 {
2504 int i;
2505 gen_op_movl_A0_reg(R_ESP);
2506 if (!s->ss32)
2507 gen_op_andl_A0_ffff();
2508 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2509 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], 16 << s->dflag);
2510 if (s->addseg)
2511 gen_op_addl_A0_seg(R_SS);
2512 for(i = 0;i < 8; i++) {
2513 /* ESP is not reloaded */
2514 if (i != 3) {
2515 gen_op_ld_T0_A0(OT_WORD + s->dflag + s->mem_index);
2516 gen_op_mov_reg_T0(OT_WORD + s->dflag, 7 - i);
2517 }
2518 gen_op_addl_A0_im(2 << s->dflag);
2519 }
2520 gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2521 }
2522
2523 static void gen_enter(DisasContext *s, int esp_addend, int level)
2524 {
2525 int ot, opsize;
2526
2527 level &= 0x1f;
2528 #ifdef TARGET_X86_64
2529 if (CODE64(s)) {
2530 ot = s->dflag ? OT_QUAD : OT_WORD;
2531 opsize = 1 << ot;
2532
2533 gen_op_movl_A0_reg(R_ESP);
2534 gen_op_addq_A0_im(-opsize);
2535 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2536
2537 /* push bp */
2538 gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
2539 gen_op_st_T0_A0(ot + s->mem_index);
2540 if (level) {
2541 /* XXX: must save state */
2542 tcg_gen_helper_0_3(helper_enter64_level,
2543 tcg_const_i32(level),
2544 tcg_const_i32((ot == OT_QUAD)),
2545 cpu_T[1]);
2546 }
2547 gen_op_mov_reg_T1(ot, R_EBP);
2548 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
2549 gen_op_mov_reg_T1(OT_QUAD, R_ESP);
2550 } else
2551 #endif
2552 {
2553 ot = s->dflag + OT_WORD;
2554 opsize = 2 << s->dflag;
2555
2556 gen_op_movl_A0_reg(R_ESP);
2557 gen_op_addl_A0_im(-opsize);
2558 if (!s->ss32)
2559 gen_op_andl_A0_ffff();
2560 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2561 if (s->addseg)
2562 gen_op_addl_A0_seg(R_SS);
2563 /* push bp */
2564 gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
2565 gen_op_st_T0_A0(ot + s->mem_index);
2566 if (level) {
2567 /* XXX: must save state */
2568 tcg_gen_helper_0_3(helper_enter_level,
2569 tcg_const_i32(level),
2570 tcg_const_i32(s->dflag),
2571 cpu_T[1]);
2572 }
2573 gen_op_mov_reg_T1(ot, R_EBP);
2574 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
2575 gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2576 }
2577 }
2578
2579 static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip)
2580 {
2581 if (s->cc_op != CC_OP_DYNAMIC)
2582 gen_op_set_cc_op(s->cc_op);
2583 gen_jmp_im(cur_eip);
2584 tcg_gen_helper_0_1(helper_raise_exception, tcg_const_i32(trapno));
2585 s->is_jmp = 3;
2586 }
2587
2588 /* an interrupt is different from an exception because of the
2589 privilege checks */
2590 static void gen_interrupt(DisasContext *s, int intno,
2591 target_ulong cur_eip, target_ulong next_eip)
2592 {
2593 if (s->cc_op != CC_OP_DYNAMIC)
2594 gen_op_set_cc_op(s->cc_op);
2595 gen_jmp_im(cur_eip);
2596 tcg_gen_helper_0_2(helper_raise_interrupt,
2597 tcg_const_i32(intno),
2598 tcg_const_i32(next_eip - cur_eip));
2599 s->is_jmp = 3;
2600 }
2601
2602 static void gen_debug(DisasContext *s, target_ulong cur_eip)
2603 {
2604 if (s->cc_op != CC_OP_DYNAMIC)
2605 gen_op_set_cc_op(s->cc_op);
2606 gen_jmp_im(cur_eip);
2607 tcg_gen_helper_0_0(helper_debug);
2608 s->is_jmp = 3;
2609 }
2610
2611 /* generate a generic end of block. Trace exception is also generated
2612 if needed */
2613 static void gen_eob(DisasContext *s)
2614 {
2615 if (s->cc_op != CC_OP_DYNAMIC)
2616 gen_op_set_cc_op(s->cc_op);
2617 if (s->tb->flags & HF_INHIBIT_IRQ_MASK) {
2618 tcg_gen_helper_0_0(helper_reset_inhibit_irq);
2619 }
2620 if (s->singlestep_enabled) {
2621 tcg_gen_helper_0_0(helper_debug);
2622 } else if (s->tf) {
2623 tcg_gen_helper_0_0(helper_single_step);
2624 } else {
2625 tcg_gen_exit_tb(0);
2626 }
2627 s->is_jmp = 3;
2628 }
2629
2630 /* generate a jump to eip. No segment change must happen before as a
2631 direct call to the next block may occur */
2632 static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num)
2633 {
2634 if (s->jmp_opt) {
2635 if (s->cc_op != CC_OP_DYNAMIC) {
2636 gen_op_set_cc_op(s->cc_op);
2637 s->cc_op = CC_OP_DYNAMIC;
2638 }
2639 gen_goto_tb(s, tb_num, eip);
2640 s->is_jmp = 3;
2641 } else {
2642 gen_jmp_im(eip);
2643 gen_eob(s);
2644 }
2645 }
2646
2647 static void gen_jmp(DisasContext *s, target_ulong eip)
2648 {
2649 gen_jmp_tb(s, eip, 0);
2650 }
2651
2652 static inline void gen_ldq_env_A0(int idx, int offset)
2653 {
2654 int mem_index = (idx >> 2) - 1;
2655 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, mem_index);
2656 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset);
2657 }
2658
2659 static inline void gen_stq_env_A0(int idx, int offset)
2660 {
2661 int mem_index = (idx >> 2) - 1;
2662 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset);
2663 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, mem_index);
2664 }
2665
2666 static inline void gen_ldo_env_A0(int idx, int offset)
2667 {
2668 int mem_index = (idx >> 2) - 1;
2669 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, mem_index);
2670 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
2671 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
2672 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_tmp0, mem_index);
2673 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
2674 }
2675
2676 static inline void gen_sto_env_A0(int idx, int offset)
2677 {
2678 int mem_index = (idx >> 2) - 1;
2679 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
2680 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, mem_index);
2681 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
2682 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
2683 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_tmp0, mem_index);
2684 }
2685
2686 static inline void gen_op_movo(int d_offset, int s_offset)
2687 {
2688 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2689 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2690 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + 8);
2691 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + 8);
2692 }
2693
2694 static inline void gen_op_movq(int d_offset, int s_offset)
2695 {
2696 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2697 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2698 }
2699
2700 static inline void gen_op_movl(int d_offset, int s_offset)
2701 {
2702 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env, s_offset);
2703 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, d_offset);
2704 }
2705
2706 static inline void gen_op_movq_env_0(int d_offset)
2707 {
2708 tcg_gen_movi_i64(cpu_tmp1_i64, 0);
2709 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2710 }
2711
2712 #define SSE_SPECIAL ((void *)1)
2713 #define SSE_DUMMY ((void *)2)
2714
2715 #define MMX_OP2(x) { helper_ ## x ## _mmx, helper_ ## x ## _xmm }
2716 #define SSE_FOP(x) { helper_ ## x ## ps, helper_ ## x ## pd, \
2717 helper_ ## x ## ss, helper_ ## x ## sd, }
2718
2719 static void *sse_op_table1[256][4] = {
2720 /* 3DNow! extensions */
2721 [0x0e] = { SSE_DUMMY }, /* femms */
2722 [0x0f] = { SSE_DUMMY }, /* pf... */
2723 /* pure SSE operations */
2724 [0x10] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2725 [0x11] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2726 [0x12] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd, movsldup, movddup */
2727 [0x13] = { SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd */
2728 [0x14] = { helper_punpckldq_xmm, helper_punpcklqdq_xmm },
2729 [0x15] = { helper_punpckhdq_xmm, helper_punpckhqdq_xmm },
2730 [0x16] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd, movshdup */
2731 [0x17] = { SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd */
2732
2733 [0x28] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2734 [0x29] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2735 [0x2a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
2736 [0x2b] = { SSE_SPECIAL, SSE_SPECIAL }, /* movntps, movntpd */
2737 [0x2c] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
2738 [0x2d] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
2739 [0x2e] = { helper_ucomiss, helper_ucomisd },
2740 [0x2f] = { helper_comiss, helper_comisd },
2741 [0x50] = { SSE_SPECIAL, SSE_SPECIAL }, /* movmskps, movmskpd */
2742 [0x51] = SSE_FOP(sqrt),
2743 [0x52] = { helper_rsqrtps, NULL, helper_rsqrtss, NULL },
2744 [0x53] = { helper_rcpps, NULL, helper_rcpss, NULL },
2745 [0x54] = { helper_pand_xmm, helper_pand_xmm }, /* andps, andpd */
2746 [0x55] = { helper_pandn_xmm, helper_pandn_xmm }, /* andnps, andnpd */
2747 [0x56] = { helper_por_xmm, helper_por_xmm }, /* orps, orpd */
2748 [0x57] = { helper_pxor_xmm, helper_pxor_xmm }, /* xorps, xorpd */
2749 [0x58] = SSE_FOP(add),
2750 [0x59] = SSE_FOP(mul),
2751 [0x5a] = { helper_cvtps2pd, helper_cvtpd2ps,
2752 helper_cvtss2sd, helper_cvtsd2ss },
2753 [0x5b] = { helper_cvtdq2ps, helper_cvtps2dq, helper_cvttps2dq },
2754 [0x5c] = SSE_FOP(sub),
2755 [0x5d] = SSE_FOP(min),
2756 [0x5e] = SSE_FOP(div),
2757 [0x5f] = SSE_FOP(max),
2758
2759 [0xc2] = SSE_FOP(cmpeq),
2760 [0xc6] = { helper_shufps, helper_shufpd },
2761
2762 /* MMX ops and their SSE extensions */
2763 [0x60] = MMX_OP2(punpcklbw),
2764 [0x61] = MMX_OP2(punpcklwd),
2765 [0x62] = MMX_OP2(punpckldq),
2766 [0x63] = MMX_OP2(packsswb),
2767 [0x64] = MMX_OP2(pcmpgtb),
2768 [0x65] = MMX_OP2(pcmpgtw),
2769 [0x66] = MMX_OP2(pcmpgtl),
2770 [0x67] = MMX_OP2(packuswb),
2771 [0x68] = MMX_OP2(punpckhbw),
2772 [0x69] = MMX_OP2(punpckhwd),
2773 [0x6a] = MMX_OP2(punpckhdq),
2774 [0x6b] = MMX_OP2(packssdw),
2775 [0x6c] = { NULL, helper_punpcklqdq_xmm },
2776 [0x6d] = { NULL, helper_punpckhqdq_xmm },
2777 [0x6e] = { SSE_SPECIAL, SSE_SPECIAL }, /* movd mm, ea */
2778 [0x6f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, , movqdu */
2779 [0x70] = { helper_pshufw_mmx,
2780 helper_pshufd_xmm,
2781 helper_pshufhw_xmm,
2782 helper_pshuflw_xmm },
2783 [0x71] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftw */
2784 [0x72] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftd */
2785 [0x73] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftq */
2786 [0x74] = MMX_OP2(pcmpeqb),
2787 [0x75] = MMX_OP2(pcmpeqw),
2788 [0x76] = MMX_OP2(pcmpeql),
2789 [0x77] = { SSE_DUMMY }, /* emms */
2790 [0x7c] = { NULL, helper_haddpd, NULL, helper_haddps },
2791 [0x7d] = { NULL, helper_hsubpd, NULL, helper_hsubps },
2792 [0x7e] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movd, movd, , movq */
2793 [0x7f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, movdqu */
2794 [0xc4] = { SSE_SPECIAL, SSE_SPECIAL }, /* pinsrw */
2795 [0xc5] = { SSE_SPECIAL, SSE_SPECIAL }, /* pextrw */
2796 [0xd0] = { NULL, helper_addsubpd, NULL, helper_addsubps },
2797 [0xd1] = MMX_OP2(psrlw),
2798 [0xd2] = MMX_OP2(psrld),
2799 [0xd3] = MMX_OP2(psrlq),
2800 [0xd4] = MMX_OP2(paddq),
2801 [0xd5] = MMX_OP2(pmullw),
2802 [0xd6] = { NULL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2803 [0xd7] = { SSE_SPECIAL, SSE_SPECIAL }, /* pmovmskb */
2804 [0xd8] = MMX_OP2(psubusb),
2805 [0xd9] = MMX_OP2(psubusw),
2806 [0xda] = MMX_OP2(pminub),
2807 [0xdb] = MMX_OP2(pand),
2808 [0xdc] = MMX_OP2(paddusb),
2809 [0xdd] = MMX_OP2(paddusw),
2810 [0xde] = MMX_OP2(pmaxub),
2811 [0xdf] = MMX_OP2(pandn),
2812 [0xe0] = MMX_OP2(pavgb),
2813 [0xe1] = MMX_OP2(psraw),
2814 [0xe2] = MMX_OP2(psrad),
2815 [0xe3] = MMX_OP2(pavgw),
2816 [0xe4] = MMX_OP2(pmulhuw),
2817 [0xe5] = MMX_OP2(pmulhw),
2818 [0xe6] = { NULL, helper_cvttpd2dq, helper_cvtdq2pd, helper_cvtpd2dq },
2819 [0xe7] = { SSE_SPECIAL , SSE_SPECIAL }, /* movntq, movntq */
2820 [0xe8] = MMX_OP2(psubsb),
2821 [0xe9] = MMX_OP2(psubsw),
2822 [0xea] = MMX_OP2(pminsw),
2823 [0xeb] = MMX_OP2(por),
2824 [0xec] = MMX_OP2(paddsb),
2825 [0xed] = MMX_OP2(paddsw),
2826 [0xee] = MMX_OP2(pmaxsw),
2827 [0xef] = MMX_OP2(pxor),
2828 [0xf0] = { NULL, NULL, NULL, SSE_SPECIAL }, /* lddqu */
2829 [0xf1] = MMX_OP2(psllw),
2830 [0xf2] = MMX_OP2(pslld),
2831 [0xf3] = MMX_OP2(psllq),
2832 [0xf4] = MMX_OP2(pmuludq),
2833 [0xf5] = MMX_OP2(pmaddwd),
2834 [0xf6] = MMX_OP2(psadbw),
2835 [0xf7] = MMX_OP2(maskmov),
2836 [0xf8] = MMX_OP2(psubb),
2837 [0xf9] = MMX_OP2(psubw),
2838 [0xfa] = MMX_OP2(psubl),
2839 [0xfb] = MMX_OP2(psubq),
2840 [0xfc] = MMX_OP2(paddb),
2841 [0xfd] = MMX_OP2(paddw),
2842 [0xfe] = MMX_OP2(paddl),
2843 };
2844
2845 static void *sse_op_table2[3 * 8][2] = {
2846 [0 + 2] = MMX_OP2(psrlw),
2847 [0 + 4] = MMX_OP2(psraw),
2848 [0 + 6] = MMX_OP2(psllw),
2849 [8 + 2] = MMX_OP2(psrld),
2850 [8 + 4] = MMX_OP2(psrad),
2851 [8 + 6] = MMX_OP2(pslld),
2852 [16 + 2] = MMX_OP2(psrlq),
2853 [16 + 3] = { NULL, helper_psrldq_xmm },
2854 [16 + 6] = MMX_OP2(psllq),
2855 [16 + 7] = { NULL, helper_pslldq_xmm },
2856 };
2857
2858 static void *sse_op_table3[4 * 3] = {
2859 helper_cvtsi2ss,
2860 helper_cvtsi2sd,
2861 X86_64_ONLY(helper_cvtsq2ss),
2862 X86_64_ONLY(helper_cvtsq2sd),
2863
2864 helper_cvttss2si,
2865 helper_cvttsd2si,
2866 X86_64_ONLY(helper_cvttss2sq),
2867 X86_64_ONLY(helper_cvttsd2sq),
2868
2869 helper_cvtss2si,
2870 helper_cvtsd2si,
2871 X86_64_ONLY(helper_cvtss2sq),
2872 X86_64_ONLY(helper_cvtsd2sq),
2873 };
2874
2875 static void *sse_op_table4[8][4] = {
2876 SSE_FOP(cmpeq),
2877 SSE_FOP(cmplt),
2878 SSE_FOP(cmple),
2879 SSE_FOP(cmpunord),
2880 SSE_FOP(cmpneq),
2881 SSE_FOP(cmpnlt),
2882 SSE_FOP(cmpnle),
2883 SSE_FOP(cmpord),
2884 };
2885
2886 static void *sse_op_table5[256] = {
2887 [0x0c] = helper_pi2fw,
2888 [0x0d] = helper_pi2fd,
2889 [0x1c] = helper_pf2iw,
2890 [0x1d] = helper_pf2id,
2891 [0x8a] = helper_pfnacc,
2892 [0x8e] = helper_pfpnacc,
2893 [0x90] = helper_pfcmpge,
2894 [0x94] = helper_pfmin,
2895 [0x96] = helper_pfrcp,
2896 [0x97] = helper_pfrsqrt,
2897 [0x9a] = helper_pfsub,
2898 [0x9e] = helper_pfadd,
2899 [0xa0] = helper_pfcmpgt,
2900 [0xa4] = helper_pfmax,
2901 [0xa6] = helper_movq, /* pfrcpit1; no need to actually increase precision */
2902 [0xa7] = helper_movq, /* pfrsqit1 */
2903 [0xaa] = helper_pfsubr,
2904 [0xae] = helper_pfacc,
2905 [0xb0] = helper_pfcmpeq,
2906 [0xb4] = helper_pfmul,
2907 [0xb6] = helper_movq, /* pfrcpit2 */
2908 [0xb7] = helper_pmulhrw_mmx,
2909 [0xbb] = helper_pswapd,
2910 [0xbf] = helper_pavgb_mmx /* pavgusb */
2911 };
2912
2913 static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
2914 {
2915 int b1, op1_offset, op2_offset, is_xmm, val, ot;
2916 int modrm, mod, rm, reg, reg_addr, offset_addr;
2917 void *sse_op2;
2918
2919 b &= 0xff;
2920 if (s->prefix & PREFIX_DATA)
2921 b1 = 1;
2922 else if (s->prefix & PREFIX_REPZ)
2923 b1 = 2;
2924 else if (s->prefix & PREFIX_REPNZ)
2925 b1 = 3;
2926 else
2927 b1 = 0;
2928 sse_op2 = sse_op_table1[b][b1];
2929 if (!sse_op2)
2930 goto illegal_op;
2931 if ((b <= 0x5f && b >= 0x10) || b == 0xc6 || b == 0xc2) {
2932 is_xmm = 1;
2933 } else {
2934 if (b1 == 0) {
2935 /* MMX case */
2936 is_xmm = 0;
2937 } else {
2938 is_xmm = 1;
2939 }
2940 }
2941 /* simple MMX/SSE operation */
2942 if (s->flags & HF_TS_MASK) {
2943 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
2944 return;
2945 }
2946 if (s->flags & HF_EM_MASK) {
2947 illegal_op:
2948 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
2949 return;
2950 }
2951 if (is_xmm && !(s->flags & HF_OSFXSR_MASK))
2952 goto illegal_op;
2953 if (b == 0x0e) {
2954 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
2955 goto illegal_op;
2956 /* femms */
2957 tcg_gen_helper_0_0(helper_emms);
2958 return;
2959 }
2960 if (b == 0x77) {
2961 /* emms */
2962 tcg_gen_helper_0_0(helper_emms);
2963 return;
2964 }
2965 /* prepare MMX state (XXX: optimize by storing fptt and fptags in
2966 the static cpu state) */
2967 if (!is_xmm) {
2968 tcg_gen_helper_0_0(helper_enter_mmx);
2969 }
2970
2971 modrm = ldub_code(s->pc++);
2972 reg = ((modrm >> 3) & 7);
2973 if (is_xmm)
2974 reg |= rex_r;
2975 mod = (modrm >> 6) & 3;
2976 if (sse_op2 == SSE_SPECIAL) {
2977 b |= (b1 << 8);
2978 switch(b) {
2979 case 0x0e7: /* movntq */
2980 if (mod == 3)
2981 goto illegal_op;
2982 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
2983 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
2984 break;
2985 case 0x1e7: /* movntdq */
2986 case 0x02b: /* movntps */
2987 case 0x12b: /* movntps */
2988 case 0x3f0: /* lddqu */
2989 if (mod == 3)
2990 goto illegal_op;
2991 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
2992 gen_sto_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
2993 break;
2994 case 0x6e: /* movd mm, ea */
2995 #ifdef TARGET_X86_64
2996 if (s->dflag == 2) {
2997 gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 0);
2998 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,fpregs[reg].mmx));
2999 } else
3000 #endif
3001 {
3002 gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 0);
3003 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3004 offsetof(CPUX86State,fpregs[reg].mmx));
3005 tcg_gen_helper_0_2(helper_movl_mm_T0_mmx, cpu_ptr0, cpu_T[0]);
3006 }
3007 break;
3008 case 0x16e: /* movd xmm, ea */
3009 #ifdef TARGET_X86_64
3010 if (s->dflag == 2) {
3011 gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 0);
3012 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3013 offsetof(CPUX86State,xmm_regs[reg]));
3014 tcg_gen_helper_0_2(helper_movq_mm_T0_xmm, cpu_ptr0, cpu_T[0]);
3015 } else
3016 #endif
3017 {
3018 gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 0);
3019 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3020 offsetof(CPUX86State,xmm_regs[reg]));
3021 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3022 tcg_gen_helper_0_2(helper_movl_mm_T0_xmm, cpu_ptr0, cpu_tmp2_i32);
3023 }
3024 break;
3025 case 0x6f: /* movq mm, ea */
3026 if (mod != 3) {
3027 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3028 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
3029 } else {
3030 rm = (modrm & 7);
3031 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
3032 offsetof(CPUX86State,fpregs[rm].mmx));
3033 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
3034 offsetof(CPUX86State,fpregs[reg].mmx));
3035 }
3036 break;
3037 case 0x010: /* movups */
3038 case 0x110: /* movupd */
3039 case 0x028: /* movaps */
3040 case 0x128: /* movapd */
3041 case 0x16f: /* movdqa xmm, ea */
3042 case 0x26f: /* movdqu xmm, ea */
3043 if (mod != 3) {
3044 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3045 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3046 } else {
3047 rm = (modrm & 7) | REX_B(s);
3048 gen_op_movo(offsetof(CPUX86State,xmm_regs[reg]),
3049 offsetof(CPUX86State,xmm_regs[rm]));
3050 }
3051 break;
3052 case 0x210: /* movss xmm, ea */
3053 if (mod != 3) {
3054 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3055 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
3056 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3057 gen_op_movl_T0_0();
3058 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
3059 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3060 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3061 } else {
3062 rm = (modrm & 7) | REX_B(s);
3063 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3064 offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
3065 }
3066 break;
3067 case 0x310: /* movsd xmm, ea */
3068 if (mod != 3) {
3069 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3070 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3071 gen_op_movl_T0_0();
3072 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3073 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3074 } else {
3075 rm = (modrm & 7) | REX_B(s);
3076 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3077 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3078 }
3079 break;
3080 case 0x012: /* movlps */
3081 case 0x112: /* movlpd */
3082 if (mod != 3) {
3083 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3084 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3085 } else {
3086 /* movhlps */
3087 rm = (modrm & 7) | REX_B(s);
3088 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3089 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
3090 }
3091 break;
3092 case 0x212: /* movsldup */
3093 if (mod != 3) {
3094 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3095 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3096 } else {
3097 rm = (modrm & 7) | REX_B(s);
3098 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3099 offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
3100 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
3101 offsetof(CPUX86State,xmm_regs[rm].XMM_L(2)));
3102 }
3103 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
3104 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3105 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
3106 offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3107 break;
3108 case 0x312: /* movddup */
3109 if (mod != 3) {
3110 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3111 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3112 } else {
3113 rm = (modrm & 7) | REX_B(s);
3114 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3115 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3116 }
3117 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
3118 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3119 break;
3120 case 0x016: /* movhps */
3121 case 0x116: /* movhpd */
3122 if (mod != 3) {
3123 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3124 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3125 } else {
3126 /* movlhps */
3127 rm = (modrm & 7) | REX_B(s);
3128 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
3129 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3130 }
3131 break;
3132 case 0x216: /* movshdup */
3133 if (mod != 3) {
3134 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3135 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3136 } else {
3137 rm = (modrm & 7) | REX_B(s);
3138 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
3139 offsetof(CPUX86State,xmm_regs[rm].XMM_L(1)));
3140 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
3141 offsetof(CPUX86State,xmm_regs[rm].XMM_L(3)));
3142 }
3143 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3144 offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
3145 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
3146 offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3147 break;
3148 case 0x7e: /* movd ea, mm */
3149 #ifdef TARGET_X86_64
3150 if (s->dflag == 2) {
3151 tcg_gen_ld_i64(cpu_T[0], cpu_env,
3152 offsetof(CPUX86State,fpregs[reg].mmx));
3153 gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 1);
3154 } else
3155 #endif
3156 {
3157 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
3158 offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0)));
3159 gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 1);
3160 }
3161 break;
3162 case 0x17e: /* movd ea, xmm */
3163 #ifdef TARGET_X86_64
3164 if (s->dflag == 2) {
3165 tcg_gen_ld_i64(cpu_T[0], cpu_env,
3166 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3167 gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 1);
3168 } else
3169 #endif
3170 {
3171 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
3172 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3173 gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 1);
3174 }
3175 break;
3176 case 0x27e: /* movq xmm, ea */
3177 if (mod != 3) {
3178 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3179 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3180 } else {
3181 rm = (modrm & 7) | REX_B(s);
3182 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3183 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3184 }
3185 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3186 break;
3187 case 0x7f: /* movq ea, mm */
3188 if (mod != 3) {
3189 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3190 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
3191 } else {
3192 rm = (modrm & 7);
3193 gen_op_movq(offsetof(CPUX86State,fpregs[rm].mmx),
3194 offsetof(CPUX86State,fpregs[reg].mmx));
3195 }
3196 break;
3197 case 0x011: /* movups */
3198 case 0x111: /* movupd */
3199 case 0x029: /* movaps */
3200 case 0x129: /* movapd */
3201 case 0x17f: /* movdqa ea, xmm */
3202 case 0x27f: /* movdqu ea, xmm */
3203 if (mod != 3) {
3204 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3205 gen_sto_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3206 } else {
3207 rm = (modrm & 7) | REX_B(s);
3208 gen_op_movo(offsetof(CPUX86State,xmm_regs[rm]),
3209 offsetof(CPUX86State,xmm_regs[reg]));
3210 }
3211 break;
3212 case 0x211: /* movss ea, xmm */
3213 if (mod != 3) {
3214 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3215 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3216 gen_op_st_T0_A0(OT_LONG + s->mem_index);
3217 } else {
3218 rm = (modrm & 7) | REX_B(s);
3219 gen_op_movl(offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)),
3220 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3221 }
3222 break;
3223 case 0x311: /* movsd ea, xmm */
3224 if (mod != 3) {
3225 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3226 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3227 } else {
3228 rm = (modrm & 7) | REX_B(s);
3229 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
3230 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3231 }
3232 break;
3233 case 0x013: /* movlps */
3234 case 0x113: /* movlpd */
3235 if (mod != 3) {
3236 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3237 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3238 } else {
3239 goto illegal_op;
3240 }
3241 break;
3242 case 0x017: /* movhps */
3243 case 0x117: /* movhpd */
3244 if (mod != 3) {
3245 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3246 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3247 } else {
3248 goto illegal_op;
3249 }
3250 break;
3251 case 0x71: /* shift mm, im */
3252 case 0x72:
3253 case 0x73:
3254 case 0x171: /* shift xmm, im */
3255 case 0x172:
3256 case 0x173:
3257 val = ldub_code(s->pc++);
3258 if (is_xmm) {
3259 gen_op_movl_T0_im(val);
3260 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
3261 gen_op_movl_T0_0();
3262 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(1)));
3263 op1_offset = offsetof(CPUX86State,xmm_t0);
3264 } else {
3265 gen_op_movl_T0_im(val);
3266 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(0)));
3267 gen_op_movl_T0_0();
3268 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(1)));
3269 op1_offset = offsetof(CPUX86State,mmx_t0);
3270 }
3271 sse_op2 = sse_op_table2[((b - 1) & 3) * 8 + (((modrm >> 3)) & 7)][b1];
3272 if (!sse_op2)
3273 goto illegal_op;
3274 if (is_xmm) {
3275 rm = (modrm & 7) | REX_B(s);
3276 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3277 } else {
3278 rm = (modrm & 7);
3279 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3280 }
3281 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3282 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op1_offset);
3283 tcg_gen_helper_0_2(sse_op2, cpu_ptr0, cpu_ptr1);
3284 break;
3285 case 0x050: /* movmskps */
3286 rm = (modrm & 7) | REX_B(s);
3287 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3288 offsetof(CPUX86State,xmm_regs[rm]));
3289 tcg_gen_helper_1_1(helper_movmskps, cpu_tmp2_i32, cpu_ptr0);
3290 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3291 gen_op_mov_reg_T0(OT_LONG, reg);
3292 break;
3293 case 0x150: /* movmskpd */
3294 rm = (modrm & 7) | REX_B(s);
3295 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3296 offsetof(CPUX86State,xmm_regs[rm]));
3297 tcg_gen_helper_1_1(helper_movmskpd, cpu_tmp2_i32, cpu_ptr0);
3298 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3299 gen_op_mov_reg_T0(OT_LONG, reg);
3300 break;
3301 case 0x02a: /* cvtpi2ps */
3302 case 0x12a: /* cvtpi2pd */
3303 tcg_gen_helper_0_0(helper_enter_mmx);
3304 if (mod != 3) {
3305 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3306 op2_offset = offsetof(CPUX86State,mmx_t0);
3307 gen_ldq_env_A0(s->mem_index, op2_offset);
3308 } else {
3309 rm = (modrm & 7);
3310 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3311 }
3312 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3313 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3314 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3315 switch(b >> 8) {
3316 case 0x0:
3317 tcg_gen_helper_0_2(helper_cvtpi2ps, cpu_ptr0, cpu_ptr1);
3318 break;
3319 default:
3320 case 0x1:
3321 tcg_gen_helper_0_2(helper_cvtpi2pd, cpu_ptr0, cpu_ptr1);
3322 break;
3323 }
3324 break;
3325 case 0x22a: /* cvtsi2ss */
3326 case 0x32a: /* cvtsi2sd */
3327 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3328 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
3329 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3330 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3331 sse_op2 = sse_op_table3[(s->dflag == 2) * 2 + ((b >> 8) - 2)];
3332 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3333 tcg_gen_helper_0_2(sse_op2, cpu_ptr0, cpu_tmp2_i32);
3334 break;
3335 case 0x02c: /* cvttps2pi */
3336 case 0x12c: /* cvttpd2pi */
3337 case 0x02d: /* cvtps2pi */
3338 case 0x12d: /* cvtpd2pi */
3339 tcg_gen_helper_0_0(helper_enter_mmx);
3340 if (mod != 3) {
3341 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3342 op2_offset = offsetof(CPUX86State,xmm_t0);
3343 gen_ldo_env_A0(s->mem_index, op2_offset);
3344 } else {
3345 rm = (modrm & 7) | REX_B(s);
3346 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3347 }
3348 op1_offset = offsetof(CPUX86State,fpregs[reg & 7].mmx);
3349 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3350 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3351 switch(b) {
3352 case 0x02c:
3353 tcg_gen_helper_0_2(helper_cvttps2pi, cpu_ptr0, cpu_ptr1);
3354 break;
3355 case 0x12c:
3356 tcg_gen_helper_0_2(helper_cvttpd2pi, cpu_ptr0, cpu_ptr1);
3357 break;
3358 case 0x02d:
3359 tcg_gen_helper_0_2(helper_cvtps2pi, cpu_ptr0, cpu_ptr1);
3360 break;
3361 case 0x12d:
3362 tcg_gen_helper_0_2(helper_cvtpd2pi, cpu_ptr0, cpu_ptr1);
3363 break;
3364 }
3365 break;
3366 case 0x22c: /* cvttss2si */
3367 case 0x32c: /* cvttsd2si */
3368 case 0x22d: /* cvtss2si */
3369 case 0x32d: /* cvtsd2si */
3370 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3371 if (mod != 3) {
3372 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3373 if ((b >> 8) & 1) {
3374 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_t0.XMM_Q(0)));
3375 } else {
3376 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
3377 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
3378 }
3379 op2_offset = offsetof(CPUX86State,xmm_t0);
3380 } else {
3381 rm = (modrm & 7) | REX_B(s);
3382 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3383 }
3384 sse_op2 = sse_op_table3[(s->dflag == 2) * 2 + ((b >> 8) - 2) + 4 +
3385 (b & 1) * 4];
3386 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3387 if (ot == OT_LONG) {
3388 tcg_gen_helper_1_1(sse_op2, cpu_tmp2_i32, cpu_ptr0);
3389 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3390 } else {
3391 tcg_gen_helper_1_1(sse_op2, cpu_T[0], cpu_ptr0);
3392 }
3393 gen_op_mov_reg_T0(ot, reg);
3394 break;
3395 case 0xc4: /* pinsrw */
3396 case 0x1c4:
3397 s->rip_offset = 1;
3398 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
3399 val = ldub_code(s->pc++);
3400 if (b1) {
3401 val &= 7;
3402 tcg_gen_st16_tl(cpu_T[0], cpu_env,
3403 offsetof(CPUX86State,xmm_regs[reg].XMM_W(val)));
3404 } else {
3405 val &= 3;
3406 tcg_gen_st16_tl(cpu_T[0], cpu_env,
3407 offsetof(CPUX86State,fpregs[reg].mmx.MMX_W(val)));
3408 }
3409 break;
3410 case 0xc5: /* pextrw */
3411 case 0x1c5:
3412 if (mod != 3)
3413 goto illegal_op;
3414 val = ldub_code(s->pc++);
3415 if (b1) {
3416 val &= 7;
3417 rm = (modrm & 7) | REX_B(s);
3418 tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3419 offsetof(CPUX86State,xmm_regs[rm].XMM_W(val)));
3420 } else {
3421 val &= 3;
3422 rm = (modrm & 7);
3423 tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3424 offsetof(CPUX86State,fpregs[rm].mmx.MMX_W(val)));
3425 }
3426 reg = ((modrm >> 3) & 7) | rex_r;
3427 gen_op_mov_reg_T0(OT_LONG, reg);
3428 break;
3429 case 0x1d6: /* movq ea, xmm */
3430 if (mod != 3) {
3431 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3432 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3433 } else {
3434 rm = (modrm & 7) | REX_B(s);
3435 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
3436 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3437 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
3438 }
3439 break;
3440 case 0x2d6: /* movq2dq */
3441 tcg_gen_helper_0_0(helper_enter_mmx);
3442 rm = (modrm & 7);
3443 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3444 offsetof(CPUX86State,fpregs[rm].mmx));
3445 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3446 break;
3447 case 0x3d6: /* movdq2q */
3448 tcg_gen_helper_0_0(helper_enter_mmx);
3449 rm = (modrm & 7) | REX_B(s);
3450 gen_op_movq(offsetof(CPUX86State,fpregs[reg & 7].mmx),
3451 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3452 break;
3453 case 0xd7: /* pmovmskb */
3454 case 0x1d7:
3455 if (mod != 3)
3456 goto illegal_op;
3457 if (b1) {
3458 rm = (modrm & 7) | REX_B(s);
3459 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[rm]));
3460 tcg_gen_helper_1_1(helper_pmovmskb_xmm, cpu_tmp2_i32, cpu_ptr0);
3461 } else {
3462 rm = (modrm & 7);
3463 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,fpregs[rm].mmx));
3464 tcg_gen_helper_1_1(helper_pmovmskb_mmx, cpu_tmp2_i32, cpu_ptr0);
3465 }
3466 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3467 reg = ((modrm >> 3) & 7) | rex_r;
3468 gen_op_mov_reg_T0(OT_LONG, reg);
3469 break;
3470 default:
3471 goto illegal_op;
3472 }
3473 } else {
3474 /* generic MMX or SSE operation */
3475 switch(b) {
3476 case 0x70: /* pshufx insn */
3477 case 0xc6: /* pshufx insn */
3478 case 0xc2: /* compare insns */
3479 s->rip_offset = 1;
3480 break;
3481 default:
3482 break;
3483 }
3484 if (is_xmm) {
3485 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3486 if (mod != 3) {
3487 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3488 op2_offset = offsetof(CPUX86State,xmm_t0);
3489 if (b1 >= 2 && ((b >= 0x50 && b <= 0x5f && b != 0x5b) ||
3490 b == 0xc2)) {
3491 /* specific case for SSE single instructions */
3492 if (b1 == 2) {
3493 /* 32 bit access */
3494 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
3495 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
3496 } else {
3497 /* 64 bit access */
3498 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_t0.XMM_D(0)));
3499 }
3500 } else {
3501 gen_ldo_env_A0(s->mem_index, op2_offset);
3502 }
3503 } else {
3504 rm = (modrm & 7) | REX_B(s);
3505 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3506 }
3507 } else {
3508 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
3509 if (mod != 3) {
3510 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3511 op2_offset = offsetof(CPUX86State,mmx_t0);
3512 gen_ldq_env_A0(s->mem_index, op2_offset);
3513 } else {
3514 rm = (modrm & 7);
3515 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3516 }
3517 }
3518 switch(b) {
3519 case 0x0f: /* 3DNow! data insns */
3520 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
3521 goto illegal_op;
3522 val = ldub_code(s->pc++);
3523 sse_op2 = sse_op_table5[val];
3524 if (!sse_op2)
3525 goto illegal_op;
3526 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3527 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3528 tcg_gen_helper_0_2(sse_op2, cpu_ptr0, cpu_ptr1);
3529 break;
3530 case 0x70: /* pshufx insn */
3531 case 0xc6: /* pshufx insn */
3532 val = ldub_code(s->pc++);
3533 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3534 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3535 tcg_gen_helper_0_3(sse_op2, cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
3536 break;
3537 case 0xc2:
3538 /* compare insns */
3539 val = ldub_code(s->pc++);
3540 if (val >= 8)
3541 goto illegal_op;
3542 sse_op2 = sse_op_table4[val][b1];
3543 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3544 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3545 tcg_gen_helper_0_2(sse_op2, cpu_ptr0, cpu_ptr1);
3546 break;
3547 case 0xf7:
3548 /* maskmov : we must prepare A0 */
3549 if (mod != 3)
3550 goto illegal_op;
3551 #ifdef TARGET_X86_64
3552 if (s->aflag == 2) {
3553 gen_op_movq_A0_reg(R_EDI);
3554 } else
3555 #endif
3556 {
3557 gen_op_movl_A0_reg(R_EDI);
3558 if (s->aflag == 0)
3559 gen_op_andl_A0_ffff();
3560 }
3561 gen_add_A0_ds_seg(s);
3562
3563 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3564 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3565 tcg_gen_helper_0_3(sse_op2, cpu_ptr0, cpu_ptr1, cpu_A0);
3566 break;
3567 default:
3568 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3569 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3570 tcg_gen_helper_0_2(sse_op2, cpu_ptr0, cpu_ptr1);
3571 break;
3572 }
3573 if (b == 0x2e || b == 0x2f) {
3574 s->cc_op = CC_OP_EFLAGS;
3575 }
3576 }
3577 }
3578
3579 /* convert one instruction. s->is_jmp is set if the translation must
3580 be stopped. Return the next pc value */
3581 static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
3582 {
3583 int b, prefixes, aflag, dflag;
3584 int shift, ot;
3585 int modrm, reg, rm, mod, reg_addr, op, opreg, offset_addr, val;
3586 target_ulong next_eip, tval;
3587 int rex_w, rex_r;
3588
3589 if (unlikely(loglevel & CPU_LOG_TB_OP))
3590 tcg_gen_debug_insn_start(pc_start);
3591 s->pc = pc_start;
3592 prefixes = 0;
3593 aflag = s->code32;
3594 dflag = s->code32;
3595 s->override = -1;
3596 rex_w = -1;
3597 rex_r = 0;
3598 #ifdef TARGET_X86_64
3599 s->rex_x = 0;
3600 s->rex_b = 0;
3601 x86_64_hregs = 0;
3602 #endif
3603 s->rip_offset = 0; /* for relative ip address */
3604 next_byte:
3605 b = ldub_code(s->pc);
3606 s->pc++;
3607 /* check prefixes */
3608 #ifdef TARGET_X86_64
3609 if (CODE64(s)) {
3610 switch (b) {
3611 case 0xf3:
3612 prefixes |= PREFIX_REPZ;
3613 goto next_byte;
3614 case 0xf2:
3615 prefixes |= PREFIX_REPNZ;
3616 goto next_byte;
3617 case 0xf0:
3618 prefixes |= PREFIX_LOCK;
3619 goto next_byte;
3620 case 0x2e:
3621 s->override = R_CS;
3622 goto next_byte;
3623 case 0x36:
3624 s->override = R_SS;
3625 goto next_byte;
3626 case 0x3e:
3627 s->override = R_DS;
3628 goto next_byte;
3629 case 0x26:
3630 s->override = R_ES;
3631 goto next_byte;
3632 case 0x64:
3633 s->override = R_FS;
3634 goto next_byte;
3635 case 0x65:
3636 s->override = R_GS;
3637 goto next_byte;
3638 case 0x66:
3639 prefixes |= PREFIX_DATA;
3640 goto next_byte;
3641 case 0x67:
3642 prefixes |= PREFIX_ADR;
3643 goto next_byte;
3644 case 0x40 ... 0x4f:
3645 /* REX prefix */
3646 rex_w = (b >> 3) & 1;
3647 rex_r = (b & 0x4) << 1;
3648 s->rex_x = (b & 0x2) << 2;
3649 REX_B(s) = (b & 0x1) << 3;
3650 x86_64_hregs = 1; /* select uniform byte register addressing */
3651 goto next_byte;
3652 }
3653 if (rex_w == 1) {
3654 /* 0x66 is ignored if rex.w is set */
3655 dflag = 2;
3656 } else {
3657 if (prefixes & PREFIX_DATA)
3658 dflag ^= 1;
3659 }
3660 if (!(prefixes & PREFIX_ADR))
3661 aflag = 2;
3662 } else
3663 #endif
3664 {
3665 switch (b) {
3666 case 0xf3:
3667 prefixes |= PREFIX_REPZ;
3668 goto next_byte;
3669 case 0xf2:
3670 prefixes |= PREFIX_REPNZ;
3671 goto next_byte;
3672 case 0xf0:
3673 prefixes |= PREFIX_LOCK;
3674 goto next_byte;
3675 case 0x2e:
3676 s->override = R_CS;
3677 goto next_byte;
3678 case 0x36:
3679 s->override = R_SS;
3680 goto next_byte;
3681 case 0x3e:
3682 s->override = R_DS;
3683 goto next_byte;
3684 case 0x26:
3685 s->override = R_ES;
3686 goto next_byte;
3687 case 0x64:
3688 s->override = R_FS;
3689 goto next_byte;
3690 case 0x65:
3691 s->override = R_GS;
3692 goto next_byte;
3693 case 0x66:
3694 prefixes |= PREFIX_DATA;
3695 goto next_byte;
3696 case 0x67:
3697 prefixes |= PREFIX_ADR;
3698 goto next_byte;
3699 }
3700 if (prefixes & PREFIX_DATA)
3701 dflag ^= 1;
3702 if (prefixes & PREFIX_ADR)
3703 aflag ^= 1;
3704 }
3705
3706 s->prefix = prefixes;
3707 s->aflag = aflag;
3708 s->dflag = dflag;
3709
3710 /* lock generation */
3711 if (prefixes & PREFIX_LOCK)
3712 tcg_gen_helper_0_0(helper_lock);
3713
3714 /* now check op code */
3715 reswitch:
3716 switch(b) {
3717 case 0x0f:
3718 /**************************/
3719 /* extended op code */
3720 b = ldub_code(s->pc++) | 0x100;
3721 goto reswitch;
3722
3723 /**************************/
3724 /* arith & logic */
3725 case 0x00 ... 0x05:
3726 case 0x08 ... 0x0d:
3727 case 0x10 ... 0x15:
3728 case 0x18 ... 0x1d:
3729 case 0x20 ... 0x25:
3730 case 0x28 ... 0x2d:
3731 case 0x30 ... 0x35:
3732 case 0x38 ... 0x3d:
3733 {
3734 int op, f, val;
3735 op = (b >> 3) & 7;
3736 f = (b >> 1) & 3;
3737
3738 if ((b & 1) == 0)
3739 ot = OT_BYTE;
3740 else
3741 ot = dflag + OT_WORD;
3742
3743 switch(f) {
3744 case 0: /* OP Ev, Gv */
3745 modrm = ldub_code(s->pc++);
3746 reg = ((modrm >> 3) & 7) | rex_r;
3747 mod = (modrm >> 6) & 3;
3748 rm = (modrm & 7) | REX_B(s);
3749 if (mod != 3) {
3750 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3751 opreg = OR_TMP0;
3752 } else if (op == OP_XORL && rm == reg) {
3753 xor_zero:
3754 /* xor reg, reg optimisation */
3755 gen_op_movl_T0_0();
3756 s->cc_op = CC_OP_LOGICB + ot;
3757 gen_op_mov_reg_T0(ot, reg);
3758 gen_op_update1_cc();
3759 break;
3760 } else {
3761 opreg = rm;
3762 }
3763 gen_op_mov_TN_reg(ot, 1, reg);
3764 gen_op(s, op, ot, opreg);
3765 break;
3766 case 1: /* OP Gv, Ev */
3767 modrm = ldub_code(s->pc++);
3768 mod = (modrm >> 6) & 3;
3769 reg = ((modrm >> 3) & 7) | rex_r;
3770 rm = (modrm & 7) | REX_B(s);
3771 if (mod != 3) {
3772 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3773 gen_op_ld_T1_A0(ot + s->mem_index);
3774 } else if (op == OP_XORL && rm == reg) {
3775 goto xor_zero;
3776 } else {
3777 gen_op_mov_TN_reg(ot, 1, rm);
3778 }
3779 gen_op(s, op, ot, reg);
3780 break;
3781 case 2: /* OP A, Iv */
3782 val = insn_get(s, ot);
3783 gen_op_movl_T1_im(val);
3784 gen_op(s, op, ot, OR_EAX);
3785 break;
3786 }
3787 }
3788 break;
3789
3790 case 0x80: /* GRP1 */
3791 case 0x81:
3792 case 0x82:
3793 case 0x83:
3794 {
3795 int val;
3796
3797 if ((b & 1) == 0)
3798 ot = OT_BYTE;
3799 else
3800 ot = dflag + OT_WORD;
3801
3802 modrm = ldub_code(s->pc++);
3803 mod = (modrm >> 6) & 3;
3804 rm = (modrm & 7) | REX_B(s);
3805 op = (modrm >> 3) & 7;
3806
3807 if (mod != 3) {
3808 if (b == 0x83)
3809 s->rip_offset = 1;
3810 else
3811 s->rip_offset = insn_const_size(ot);
3812 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3813 opreg = OR_TMP0;
3814 } else {
3815 opreg = rm;
3816 }
3817
3818 switch(b) {
3819 default:
3820 case 0x80:
3821 case 0x81:
3822 case 0x82:
3823 val = insn_get(s, ot);
3824 break;
3825 case 0x83:
3826 val = (int8_t)insn_get(s, OT_BYTE);
3827 break;
3828 }
3829 gen_op_movl_T1_im(val);
3830 gen_op(s, op, ot, opreg);
3831 }
3832 break;
3833
3834 /**************************/
3835 /* inc, dec, and other misc arith */
3836 case 0x40 ... 0x47: /* inc Gv */
3837 ot = dflag ? OT_LONG : OT_WORD;
3838 gen_inc(s, ot, OR_EAX + (b & 7), 1);
3839 break;
3840 case 0x48 ... 0x4f: /* dec Gv */
3841 ot = dflag ? OT_LONG : OT_WORD;
3842 gen_inc(s, ot, OR_EAX + (b & 7), -1);
3843 break;
3844 case 0xf6: /* GRP3 */
3845 case 0xf7:
3846 if ((b & 1) == 0)
3847 ot = OT_BYTE;
3848 else
3849 ot = dflag + OT_WORD;
3850
3851 modrm = ldub_code(s->pc++);
3852 mod = (modrm >> 6) & 3;
3853 rm = (modrm & 7) | REX_B(s);
3854 op = (modrm >> 3) & 7;
3855 if (mod != 3) {
3856 if (op == 0)
3857 s->rip_offset = insn_const_size(ot);
3858 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3859 gen_op_ld_T0_A0(ot + s->mem_index);
3860 } else {
3861 gen_op_mov_TN_reg(ot, 0, rm);
3862 }
3863
3864 switch(op) {
3865 case 0: /* test */
3866 val = insn_get(s, ot);
3867 gen_op_movl_T1_im(val);
3868 gen_op_testl_T0_T1_cc();
3869 s->cc_op = CC_OP_LOGICB + ot;
3870 break;
3871 case 2: /* not */
3872 tcg_gen_not_tl(cpu_T[0], cpu_T[0]);
3873 if (mod != 3) {
3874 gen_op_st_T0_A0(ot + s->mem_index);
3875 } else {
3876 gen_op_mov_reg_T0(ot, rm);
3877 }
3878 break;
3879 case 3: /* neg */
3880 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
3881 if (mod != 3) {
3882 gen_op_st_T0_A0(ot + s->mem_index);
3883 } else {
3884 gen_op_mov_reg_T0(ot, rm);
3885 }
3886 gen_op_update_neg_cc();
3887 s->cc_op = CC_OP_SUBB + ot;
3888 break;
3889 case 4: /* mul */
3890 switch(ot) {
3891 case OT_BYTE:
3892 gen_op_mov_TN_reg(OT_BYTE, 1, R_EAX);
3893 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
3894 tcg_gen_ext8u_tl(cpu_T[1], cpu_T[1]);
3895 /* XXX: use 32 bit mul which could be faster */
3896 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
3897 gen_op_mov_reg_T0(OT_WORD, R_EAX);
3898 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
3899 tcg_gen_andi_tl(cpu_cc_src, cpu_T[0], 0xff00);
3900 s->cc_op = CC_OP_MULB;
3901 break;
3902 case OT_WORD:
3903 gen_op_mov_TN_reg(OT_WORD, 1, R_EAX);
3904 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
3905 tcg_gen_ext16u_tl(cpu_T[1], cpu_T[1]);
3906 /* XXX: use 32 bit mul which could be faster */
3907 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
3908 gen_op_mov_reg_T0(OT_WORD, R_EAX);
3909 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
3910 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
3911 gen_op_mov_reg_T0(OT_WORD, R_EDX);
3912 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
3913 s->cc_op = CC_OP_MULW;
3914 break;
3915 default:
3916 case OT_LONG:
3917 #ifdef TARGET_X86_64
3918 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
3919 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
3920 tcg_gen_ext32u_tl(cpu_T[1], cpu_T[1]);
3921 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
3922 gen_op_mov_reg_T0(OT_LONG, R_EAX);
3923 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
3924 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 32);
3925 gen_op_mov_reg_T0(OT_LONG, R_EDX);
3926 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
3927 #else
3928 {
3929 TCGv t0, t1;
3930 t0 = tcg_temp_new(TCG_TYPE_I64);
3931 t1 = tcg_temp_new(TCG_TYPE_I64);
3932 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
3933 tcg_gen_extu_i32_i64(t0, cpu_T[0]);
3934 tcg_gen_extu_i32_i64(t1, cpu_T[1]);
3935 tcg_gen_mul_i64(t0, t0, t1);
3936 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
3937 gen_op_mov_reg_T0(OT_LONG, R_EAX);
3938 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
3939 tcg_gen_shri_i64(t0, t0, 32);
3940 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
3941 gen_op_mov_reg_T0(OT_LONG, R_EDX);
3942 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
3943 }
3944 #endif
3945 s->cc_op = CC_OP_MULL;
3946 break;
3947 #ifdef TARGET_X86_64
3948 case OT_QUAD:
3949 tcg_gen_helper_0_1(helper_mulq_EAX_T0, cpu_T[0]);
3950 s->cc_op = CC_OP_MULQ;
3951 break;
3952 #endif
3953 }
3954 break;
3955 case 5: /* imul */
3956 switch(ot) {
3957 case OT_BYTE:
3958 gen_op_mov_TN_reg(OT_BYTE, 1, R_EAX);
3959 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
3960 tcg_gen_ext8s_tl(cpu_T[1], cpu_T[1]);
3961 /* XXX: use 32 bit mul which could be faster */
3962 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
3963 gen_op_mov_reg_T0(OT_WORD, R_EAX);
3964 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
3965 tcg_gen_ext8s_tl(cpu_tmp0, cpu_T[0]);
3966 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
3967 s->cc_op = CC_OP_MULB;
3968 break;
3969 case OT_WORD:
3970 gen_op_mov_TN_reg(OT_WORD, 1, R_EAX);
3971 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
3972 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
3973 /* XXX: use 32 bit mul which could be faster */
3974 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
3975 gen_op_mov_reg_T0(OT_WORD, R_EAX);
3976 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
3977 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
3978 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
3979 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
3980 gen_op_mov_reg_T0(OT_WORD, R_EDX);
3981 s->cc_op = CC_OP_MULW;
3982 break;
3983 default:
3984 case OT_LONG:
3985 #ifdef TARGET_X86_64
3986 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
3987 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
3988 tcg_gen_ext32s_tl(cpu_T[1], cpu_T[1]);
3989 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
3990 gen_op_mov_reg_T0(OT_LONG, R_EAX);
3991 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
3992 tcg_gen_ext32s_tl(cpu_tmp0, cpu_T[0]);
3993 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
3994 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 32);
3995 gen_op_mov_reg_T0(OT_LONG, R_EDX);
3996 #else
3997 {
3998 TCGv t0, t1;
3999 t0 = tcg_temp_new(TCG_TYPE_I64);
4000 t1 = tcg_temp_new(TCG_TYPE_I64);
4001 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4002 tcg_gen_ext_i32_i64(t0, cpu_T[0]);
4003 tcg_gen_ext_i32_i64(t1, cpu_T[1]);
4004 tcg_gen_mul_i64(t0, t0, t1);
4005 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4006 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4007 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4008 tcg_gen_sari_tl(cpu_tmp0, cpu_T[0], 31);
4009 tcg_gen_shri_i64(t0, t0, 32);
4010 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4011 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4012 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4013 }
4014 #endif
4015 s->cc_op = CC_OP_MULL;
4016 break;
4017 #ifdef TARGET_X86_64
4018 case OT_QUAD:
4019 tcg_gen_helper_0_1(helper_imulq_EAX_T0, cpu_T[0]);
4020 s->cc_op = CC_OP_MULQ;
4021 break;
4022 #endif
4023 }
4024 break;
4025 case 6: /* div */
4026 switch(ot) {
4027 case OT_BYTE:
4028 gen_jmp_im(pc_start - s->cs_base);
4029 tcg_gen_helper_0_1(helper_divb_AL, cpu_T[0]);
4030 break;
4031 case OT_WORD:
4032 gen_jmp_im(pc_start - s->cs_base);
4033 tcg_gen_helper_0_1(helper_divw_AX, cpu_T[0]);
4034 break;
4035 default:
4036 case OT_LONG:
4037 gen_jmp_im(pc_start - s->cs_base);
4038 tcg_gen_helper_0_1(helper_divl_EAX, cpu_T[0]);
4039 break;
4040 #ifdef TARGET_X86_64
4041 case OT_QUAD:
4042 gen_jmp_im(pc_start - s->cs_base);
4043 tcg_gen_helper_0_1(helper_divq_EAX, cpu_T[0]);
4044 break;
4045 #endif
4046 }
4047 break;
4048 case 7: /* idiv */
4049 switch(ot) {
4050 case OT_BYTE:
4051 gen_jmp_im(pc_start - s->cs_base);
4052 tcg_gen_helper_0_1(helper_idivb_AL, cpu_T[0]);
4053 break;
4054 case OT_WORD:
4055 gen_jmp_im(pc_start - s->cs_base);
4056 tcg_gen_helper_0_1(helper_idivw_AX, cpu_T[0]);
4057 break;
4058 default:
4059 case OT_LONG:
4060 gen_jmp_im(pc_start - s->cs_base);
4061 tcg_gen_helper_0_1(helper_idivl_EAX, cpu_T[0]);
4062 break;
4063 #ifdef TARGET_X86_64
4064 case OT_QUAD:
4065 gen_jmp_im(pc_start - s->cs_base);
4066 tcg_gen_helper_0_1(helper_idivq_EAX, cpu_T[0]);
4067 break;
4068 #endif
4069 }
4070 break;
4071 default:
4072 goto illegal_op;
4073 }
4074 break;
4075
4076 case 0xfe: /* GRP4 */
4077 case 0xff: /* GRP5 */
4078 if ((b & 1) == 0)
4079 ot = OT_BYTE;
4080 else
4081 ot = dflag + OT_WORD;
4082
4083 modrm = ldub_code(s->pc++);
4084 mod = (modrm >> 6) & 3;
4085 rm = (modrm & 7) | REX_B(s);
4086 op = (modrm >> 3) & 7;
4087 if (op >= 2 && b == 0xfe) {
4088 goto illegal_op;
4089 }
4090 if (CODE64(s)) {
4091 if (op == 2 || op == 4) {
4092 /* operand size for jumps is 64 bit */
4093 ot = OT_QUAD;
4094 } else if (op == 3 || op == 5) {
4095 /* for call calls, the operand is 16 or 32 bit, even
4096 in long mode */
4097 ot = dflag ? OT_LONG : OT_WORD;
4098 } else if (op == 6) {
4099 /* default push size is 64 bit */
4100 ot = dflag ? OT_QUAD : OT_WORD;
4101 }
4102 }
4103 if (mod != 3) {
4104 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4105 if (op >= 2 && op != 3 && op != 5)
4106 gen_op_ld_T0_A0(ot + s->mem_index);
4107 } else {
4108 gen_op_mov_TN_reg(ot, 0, rm);
4109 }
4110
4111 switch(op) {
4112 case 0: /* inc Ev */
4113 if (mod != 3)
4114 opreg = OR_TMP0;
4115 else
4116 opreg = rm;
4117 gen_inc(s, ot, opreg, 1);
4118 break;
4119 case 1: /* dec Ev */
4120 if (mod != 3)
4121 opreg = OR_TMP0;
4122 else
4123 opreg = rm;
4124 gen_inc(s, ot, opreg, -1);
4125 break;
4126 case 2: /* call Ev */
4127 /* XXX: optimize if memory (no 'and' is necessary) */
4128 if (s->dflag == 0)
4129 gen_op_andl_T0_ffff();
4130 next_eip = s->pc - s->cs_base;
4131 gen_movtl_T1_im(next_eip);
4132 gen_push_T1(s);
4133 gen_op_jmp_T0();
4134 gen_eob(s);
4135 break;
4136 case 3: /* lcall Ev */
4137 gen_op_ld_T1_A0(ot + s->mem_index);
4138 gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
4139 gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
4140 do_lcall:
4141 if (s->pe && !s->vm86) {
4142 if (s->cc_op != CC_OP_DYNAMIC)
4143 gen_op_set_cc_op(s->cc_op);
4144 gen_jmp_im(pc_start - s->cs_base);
4145 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4146 tcg_gen_helper_0_4(helper_lcall_protected,
4147 cpu_tmp2_i32, cpu_T[1],
4148 tcg_const_i32(dflag),
4149 tcg_const_i32(s->pc - pc_start));
4150 } else {
4151 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4152 tcg_gen_helper_0_4(helper_lcall_real,
4153 cpu_tmp2_i32, cpu_T[1],
4154 tcg_const_i32(dflag),
4155 tcg_const_i32(s->pc - s->cs_base));
4156 }
4157 gen_eob(s);
4158 break;
4159 case 4: /* jmp Ev */
4160 if (s->dflag == 0)
4161 gen_op_andl_T0_ffff();
4162 gen_op_jmp_T0();
4163 gen_eob(s);
4164 break;
4165 case 5: /* ljmp Ev */
4166 gen_op_ld_T1_A0(ot + s->mem_index);
4167 gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
4168 gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
4169 do_ljmp:
4170 if (s->pe && !s->vm86) {
4171 if (s->cc_op != CC_OP_DYNAMIC)
4172 gen_op_set_cc_op(s->cc_op);
4173 gen_jmp_im(pc_start - s->cs_base);
4174 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4175 tcg_gen_helper_0_3(helper_ljmp_protected,
4176 cpu_tmp2_i32,
4177 cpu_T[1],
4178 tcg_const_i32(s->pc - pc_start));
4179 } else {
4180 gen_op_movl_seg_T0_vm(R_CS);
4181 gen_op_movl_T0_T1();
4182 gen_op_jmp_T0();
4183 }
4184 gen_eob(s);
4185 break;
4186 case 6: /* push Ev */
4187 gen_push_T0(s);
4188 break;
4189 default:
4190 goto illegal_op;
4191 }
4192 break;
4193
4194 case 0x84: /* test Ev, Gv */
4195 case 0x85:
4196 if ((b & 1) == 0)
4197 ot = OT_BYTE;
4198 else
4199 ot = dflag + OT_WORD;
4200
4201 modrm = ldub_code(s->pc++);
4202 mod = (modrm >> 6) & 3;
4203 rm = (modrm & 7) | REX_B(s);
4204 reg = ((modrm >> 3) & 7) | rex_r;
4205
4206 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
4207 gen_op_mov_TN_reg(ot, 1, reg);
4208 gen_op_testl_T0_T1_cc();
4209 s->cc_op = CC_OP_LOGICB + ot;
4210 break;
4211
4212 case 0xa8: /* test eAX, Iv */
4213 case 0xa9:
4214 if ((b & 1) == 0)
4215 ot = OT_BYTE;
4216 else
4217 ot = dflag + OT_WORD;
4218 val = insn_get(s, ot);
4219
4220 gen_op_mov_TN_reg(ot, 0, OR_EAX);
4221 gen_op_movl_T1_im(val);
4222 gen_op_testl_T0_T1_cc();
4223 s->cc_op = CC_OP_LOGICB + ot;
4224 break;
4225
4226 case 0x98: /* CWDE/CBW */
4227 #ifdef TARGET_X86_64
4228 if (dflag == 2) {
4229 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
4230 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4231 gen_op_mov_reg_T0(OT_QUAD, R_EAX);
4232 } else
4233 #endif
4234 if (dflag == 1) {
4235 gen_op_mov_TN_reg(OT_WORD, 0, R_EAX);
4236 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4237 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4238 } else {
4239 gen_op_mov_TN_reg(OT_BYTE, 0, R_EAX);
4240 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
4241 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4242 }
4243 break;
4244 case 0x99: /* CDQ/CWD */
4245 #ifdef TARGET_X86_64
4246 if (dflag == 2) {
4247 gen_op_mov_TN_reg(OT_QUAD, 0, R_EAX);
4248 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 63);
4249 gen_op_mov_reg_T0(OT_QUAD, R_EDX);
4250 } else
4251 #endif
4252 if (dflag == 1) {
4253 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
4254 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4255 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 31);
4256 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4257 } else {
4258 gen_op_mov_TN_reg(OT_WORD, 0, R_EAX);
4259 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4260 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 15);
4261 gen_op_mov_reg_T0(OT_WORD, R_EDX);
4262 }
4263 break;
4264 case 0x1af: /* imul Gv, Ev */
4265 case 0x69: /* imul Gv, Ev, I */
4266 case 0x6b:
4267 ot = dflag + OT_WORD;
4268 modrm = ldub_code(s->pc++);
4269 reg = ((modrm >> 3) & 7) | rex_r;
4270 if (b == 0x69)
4271 s->rip_offset = insn_const_size(ot);
4272 else if (b == 0x6b)
4273 s->rip_offset = 1;
4274 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
4275 if (b == 0x69) {
4276 val = insn_get(s, ot);
4277 gen_op_movl_T1_im(val);
4278 } else if (b == 0x6b) {
4279 val = (int8_t)insn_get(s, OT_BYTE);
4280 gen_op_movl_T1_im(val);
4281 } else {
4282 gen_op_mov_TN_reg(ot, 1, reg);
4283 }
4284
4285 #ifdef TARGET_X86_64
4286 if (ot == OT_QUAD) {
4287 tcg_gen_helper_1_2(helper_imulq_T0_T1, cpu_T[0], cpu_T[0], cpu_T[1]);
4288 } else
4289 #endif
4290 if (ot == OT_LONG) {
4291 #ifdef TARGET_X86_64
4292 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4293 tcg_gen_ext32s_tl(cpu_T[1], cpu_T[1]);
4294 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4295 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4296 tcg_gen_ext32s_tl(cpu_tmp0, cpu_T[0]);
4297 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4298 #else
4299 {
4300 TCGv t0, t1;
4301 t0 = tcg_temp_new(TCG_TYPE_I64);
4302 t1 = tcg_temp_new(TCG_TYPE_I64);
4303 tcg_gen_ext_i32_i64(t0, cpu_T[0]);
4304 tcg_gen_ext_i32_i64(t1, cpu_T[1]);
4305 tcg_gen_mul_i64(t0, t0, t1);
4306 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4307 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4308 tcg_gen_sari_tl(cpu_tmp0, cpu_T[0], 31);
4309 tcg_gen_shri_i64(t0, t0, 32);
4310 tcg_gen_trunc_i64_i32(cpu_T[1], t0);
4311 tcg_gen_sub_tl(cpu_cc_src, cpu_T[1], cpu_tmp0);
4312 }
4313 #endif
4314 } else {
4315 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4316 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
4317 /* XXX: use 32 bit mul which could be faster */
4318 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4319 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4320 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
4321 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4322 }
4323 gen_op_mov_reg_T0(ot, reg);
4324 s->cc_op = CC_OP_MULB + ot;
4325 break;
4326 case 0x1c0:
4327 case 0x1c1: /* xadd Ev, Gv */
4328 if ((b & 1) == 0)
4329 ot = OT_BYTE;
4330 else
4331 ot = dflag + OT_WORD;
4332 modrm = ldub_code(s->pc++);
4333 reg = ((modrm >> 3) & 7) | rex_r;
4334 mod = (modrm >> 6) & 3;
4335 if (mod == 3) {
4336 rm = (modrm & 7) | REX_B(s);
4337 gen_op_mov_TN_reg(ot, 0, reg);
4338 gen_op_mov_TN_reg(ot, 1, rm);
4339 gen_op_addl_T0_T1();
4340 gen_op_mov_reg_T1(ot, reg);
4341 gen_op_mov_reg_T0(ot, rm);
4342 } else {
4343 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4344 gen_op_mov_TN_reg(ot, 0, reg);
4345 gen_op_ld_T1_A0(ot + s->mem_index);
4346 gen_op_addl_T0_T1();
4347 gen_op_st_T0_A0(ot + s->mem_index);
4348 gen_op_mov_reg_T1(ot, reg);
4349 }
4350 gen_op_update2_cc();
4351 s->cc_op = CC_OP_ADDB + ot;
4352 break;
4353 case 0x1b0:
4354 case 0x1b1: /* cmpxchg Ev, Gv */
4355 {
4356 int label1, label2;
4357
4358 if ((b & 1) == 0)
4359 ot = OT_BYTE;
4360 else
4361 ot = dflag + OT_WORD;
4362 modrm = ldub_code(s->pc++);
4363 reg = ((modrm >> 3) & 7) | rex_r;
4364 mod = (modrm >> 6) & 3;
4365 gen_op_mov_TN_reg(ot, 1, reg);
4366 if (mod == 3) {
4367 rm = (modrm & 7) | REX_B(s);
4368 gen_op_mov_TN_reg(ot, 0, rm);
4369 } else {
4370 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4371 gen_op_ld_T0_A0(ot + s->mem_index);
4372 rm = 0; /* avoid warning */
4373 }
4374 label1 = gen_new_label();
4375 tcg_gen_ld_tl(cpu_T3, cpu_env, offsetof(CPUState, regs[R_EAX]));
4376 tcg_gen_sub_tl(cpu_T3, cpu_T3, cpu_T[0]);
4377 gen_extu(ot, cpu_T3);
4378 tcg_gen_brcond_tl(TCG_COND_EQ, cpu_T3, tcg_const_tl(0), label1);
4379 if (mod == 3) {
4380 label2 = gen_new_label();
4381 gen_op_mov_reg_T0(ot, R_EAX);
4382 tcg_gen_br(label2);
4383 gen_set_label(label1);
4384 gen_op_mov_reg_T1(ot, rm);
4385 gen_set_label(label2);
4386 } else {
4387 tcg_gen_mov_tl(cpu_T[1], cpu_T[0]);
4388 gen_op_mov_reg_T0(ot, R_EAX);
4389 gen_set_label(label1);
4390 /* always store */
4391 gen_op_st_T1_A0(ot + s->mem_index);
4392 }
4393 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4394 tcg_gen_mov_tl(cpu_cc_dst, cpu_T3);
4395 s->cc_op = CC_OP_SUBB + ot;
4396 }
4397 break;
4398 case 0x1c7: /* cmpxchg8b */
4399 modrm = ldub_code(s->pc++);
4400 mod = (modrm >> 6) & 3;
4401 if ((mod == 3) || ((modrm & 0x38) != 0x8))
4402 goto illegal_op;
4403 #ifdef TARGET_X86_64
4404 if (dflag == 2) {
4405 if (!(s->cpuid_ext_features & CPUID_EXT_CX16))
4406 goto illegal_op;
4407 gen_jmp_im(pc_start - s->cs_base);
4408 if (s->cc_op != CC_OP_DYNAMIC)
4409 gen_op_set_cc_op(s->cc_op);
4410 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4411 tcg_gen_helper_0_1(helper_cmpxchg16b, cpu_A0);
4412 } else
4413 #endif
4414 {
4415 if (!(s->cpuid_features & CPUID_CX8))
4416 goto illegal_op;
4417 gen_jmp_im(pc_start - s->cs_base);
4418 if (s->cc_op != CC_OP_DYNAMIC)
4419 gen_op_set_cc_op(s->cc_op);
4420 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4421 tcg_gen_helper_0_1(helper_cmpxchg8b, cpu_A0);
4422 }
4423 s->cc_op = CC_OP_EFLAGS;
4424 break;
4425
4426 /**************************/
4427 /* push/pop */
4428 case 0x50 ... 0x57: /* push */
4429 gen_op_mov_TN_reg(OT_LONG, 0, (b & 7) | REX_B(s));
4430 gen_push_T0(s);
4431 break;
4432 case 0x58 ... 0x5f: /* pop */
4433 if (CODE64(s)) {
4434 ot = dflag ? OT_QUAD : OT_WORD;
4435 } else {
4436 ot = dflag + OT_WORD;
4437 }
4438 gen_pop_T0(s);
4439 /* NOTE: order is important for pop %sp */
4440 gen_pop_update(s);
4441 gen_op_mov_reg_T0(ot, (b & 7) | REX_B(s));
4442 break;
4443 case 0x60: /* pusha */
4444 if (CODE64(s))
4445 goto illegal_op;
4446 gen_pusha(s);
4447 break;
4448 case 0x61: /* popa */
4449 if (CODE64(s))
4450 goto illegal_op;
4451 gen_popa(s);
4452 break;
4453 case 0x68: /* push Iv */
4454 case 0x6a:
4455 if (CODE64(s)) {
4456 ot = dflag ? OT_QUAD : OT_WORD;
4457 } else {
4458 ot = dflag + OT_WORD;
4459 }
4460 if (b == 0x68)
4461 val = insn_get(s, ot);
4462 else
4463 val = (int8_t)insn_get(s, OT_BYTE);
4464 gen_op_movl_T0_im(val);
4465 gen_push_T0(s);
4466 break;
4467 case 0x8f: /* pop Ev */
4468 if (CODE64(s)) {
4469 ot = dflag ? OT_QUAD : OT_WORD;
4470 } else {
4471 ot = dflag + OT_WORD;
4472 }
4473 modrm = ldub_code(s->pc++);
4474 mod = (modrm >> 6) & 3;
4475 gen_pop_T0(s);
4476 if (mod == 3) {
4477 /* NOTE: order is important for pop %sp */
4478 gen_pop_update(s);
4479 rm = (modrm & 7) | REX_B(s);
4480 gen_op_mov_reg_T0(ot, rm);
4481 } else {
4482 /* NOTE: order is important too for MMU exceptions */
4483 s->popl_esp_hack = 1 << ot;
4484 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1);
4485 s->popl_esp_hack = 0;
4486 gen_pop_update(s);
4487 }
4488 break;
4489 case 0xc8: /* enter */
4490 {
4491 int level;
4492 val = lduw_code(s->pc);
4493 s->pc += 2;
4494 level = ldub_code(s->pc++);
4495 gen_enter(s, val, level);
4496 }
4497 break;
4498 case 0xc9: /* leave */
4499 /* XXX: exception not precise (ESP is updated before potential exception) */
4500 if (CODE64(s)) {
4501 gen_op_mov_TN_reg(OT_QUAD, 0, R_EBP);
4502 gen_op_mov_reg_T0(OT_QUAD, R_ESP);
4503 } else if (s->ss32) {
4504 gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
4505 gen_op_mov_reg_T0(OT_LONG, R_ESP);
4506 } else {
4507 gen_op_mov_TN_reg(OT_WORD, 0, R_EBP);
4508 gen_op_mov_reg_T0(OT_WORD, R_ESP);
4509 }
4510 gen_pop_T0(s);
4511 if (CODE64(s)) {
4512 ot = dflag ? OT_QUAD : OT_WORD;
4513 } else {
4514 ot = dflag + OT_WORD;
4515 }
4516 gen_op_mov_reg_T0(ot, R_EBP);
4517 gen_pop_update(s);
4518 break;
4519 case 0x06: /* push es */
4520 case 0x0e: /* push cs */
4521 case 0x16: /* push ss */
4522 case 0x1e: /* push ds */
4523 if (CODE64(s))
4524 goto illegal_op;
4525 gen_op_movl_T0_seg(b >> 3);
4526 gen_push_T0(s);
4527 break;
4528 case 0x1a0: /* push fs */
4529 case 0x1a8: /* push gs */
4530 gen_op_movl_T0_seg((b >> 3) & 7);
4531 gen_push_T0(s);
4532 break;
4533 case 0x07: /* pop es */
4534 case 0x17: /* pop ss */
4535 case 0x1f: /* pop ds */
4536 if (CODE64(s))
4537 goto illegal_op;
4538 reg = b >> 3;
4539 gen_pop_T0(s);
4540 gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
4541 gen_pop_update(s);
4542 if (reg == R_SS) {
4543 /* if reg == SS, inhibit interrupts/trace. */
4544 /* If several instructions disable interrupts, only the
4545 _first_ does it */
4546 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
4547 tcg_gen_helper_0_0(helper_set_inhibit_irq);
4548 s->tf = 0;
4549 }
4550 if (s->is_jmp) {
4551 gen_jmp_im(s->pc - s->cs_base);
4552 gen_eob(s);
4553 }
4554 break;
4555 case 0x1a1: /* pop fs */
4556 case 0x1a9: /* pop gs */
4557 gen_pop_T0(s);
4558 gen_movl_seg_T0(s, (b >> 3) & 7, pc_start - s->cs_base);
4559 gen_pop_update(s);
4560 if (s->is_jmp) {
4561 gen_jmp_im(s->pc - s->cs_base);
4562 gen_eob(s);
4563 }
4564 break;
4565
4566 /**************************/
4567 /* mov */
4568 case 0x88:
4569 case 0x89: /* mov Gv, Ev */
4570 if ((b & 1) == 0)
4571 ot = OT_BYTE;
4572 else
4573 ot = dflag + OT_WORD;
4574 modrm = ldub_code(s->pc++);
4575 reg = ((modrm >> 3) & 7) | rex_r;
4576
4577 /* generate a generic store */
4578 gen_ldst_modrm(s, modrm, ot, reg, 1);
4579 break;
4580 case 0xc6:
4581 case 0xc7: /* mov Ev, Iv */
4582 if ((b & 1) == 0)
4583 ot = OT_BYTE;
4584 else
4585 ot = dflag + OT_WORD;
4586 modrm = ldub_code(s->pc++);
4587 mod = (modrm >> 6) & 3;
4588 if (mod != 3) {
4589 s->rip_offset = insn_const_size(ot);
4590 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4591 }
4592 val = insn_get(s, ot);
4593 gen_op_movl_T0_im(val);
4594 if (mod != 3)
4595 gen_op_st_T0_A0(ot + s->mem_index);
4596 else
4597 gen_op_mov_reg_T0(ot, (modrm & 7) | REX_B(s));
4598 break;
4599 case 0x8a:
4600 case 0x8b: /* mov Ev, Gv */
4601 if ((b & 1) == 0)
4602 ot = OT_BYTE;
4603 else
4604 ot = OT_WORD + dflag;
4605 modrm = ldub_code(s->pc++);
4606 reg = ((modrm >> 3) & 7) | rex_r;
4607
4608 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
4609 gen_op_mov_reg_T0(ot, reg);
4610 break;
4611 case 0x8e: /* mov seg, Gv */
4612 modrm = ldub_code(s->pc++);
4613 reg = (modrm >> 3) & 7;
4614 if (reg >= 6 || reg == R_CS)
4615 goto illegal_op;
4616 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
4617 gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
4618 if (reg == R_SS) {
4619 /* if reg == SS, inhibit interrupts/trace */
4620 /* If several instructions disable interrupts, only the
4621 _first_ does it */
4622 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
4623 tcg_gen_helper_0_0(helper_set_inhibit_irq);
4624 s->tf = 0;
4625 }
4626 if (s->is_jmp) {
4627 gen_jmp_im(s->pc - s->cs_base);
4628 gen_eob(s);
4629 }
4630 break;
4631 case 0x8c: /* mov Gv, seg */
4632 modrm = ldub_code(s->pc++);
4633 reg = (modrm >> 3) & 7;
4634 mod = (modrm >> 6) & 3;
4635 if (reg >= 6)
4636 goto illegal_op;
4637 gen_op_movl_T0_seg(reg);
4638 if (mod == 3)
4639 ot = OT_WORD + dflag;
4640 else
4641 ot = OT_WORD;
4642 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1);
4643 break;
4644
4645 case 0x1b6: /* movzbS Gv, Eb */
4646 case 0x1b7: /* movzwS Gv, Eb */
4647 case 0x1be: /* movsbS Gv, Eb */
4648 case 0x1bf: /* movswS Gv, Eb */
4649 {
4650 int d_ot;
4651 /* d_ot is the size of destination */
4652 d_ot = dflag + OT_WORD;
4653 /* ot is the size of source */
4654 ot = (b & 1) + OT_BYTE;
4655 modrm = ldub_code(s->pc++);
4656 reg = ((modrm >> 3) & 7) | rex_r;
4657 mod = (modrm >> 6) & 3;
4658 rm = (modrm & 7) | REX_B(s);
4659
4660 if (mod == 3) {
4661 gen_op_mov_TN_reg(ot, 0, rm);
4662 switch(ot | (b & 8)) {
4663 case OT_BYTE:
4664 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
4665 break;
4666 case OT_BYTE | 8:
4667 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
4668 break;
4669 case OT_WORD:
4670 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
4671 break;
4672 default:
4673 case OT_WORD | 8:
4674 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4675 break;
4676 }
4677 gen_op_mov_reg_T0(d_ot, reg);
4678 } else {
4679 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4680 if (b & 8) {
4681 gen_op_lds_T0_A0(ot + s->mem_index);
4682 } else {
4683 gen_op_ldu_T0_A0(ot + s->mem_index);
4684 }
4685 gen_op_mov_reg_T0(d_ot, reg);
4686 }
4687 }
4688 break;
4689
4690 case 0x8d: /* lea */
4691 ot = dflag + OT_WORD;
4692 modrm = ldub_code(s->pc++);
4693 mod = (modrm >> 6) & 3;
4694 if (mod == 3)
4695 goto illegal_op;
4696 reg = ((modrm >> 3) & 7) | rex_r;
4697 /* we must ensure that no segment is added */
4698 s->override = -1;
4699 val = s->addseg;
4700 s->addseg = 0;
4701 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4702 s->addseg = val;
4703 gen_op_mov_reg_A0(ot - OT_WORD, reg);
4704 break;
4705
4706 case 0xa0: /* mov EAX, Ov */
4707 case 0xa1:
4708 case 0xa2: /* mov Ov, EAX */
4709 case 0xa3:
4710 {
4711 target_ulong offset_addr;
4712
4713 if ((b & 1) == 0)
4714 ot = OT_BYTE;
4715 else
4716 ot = dflag + OT_WORD;
4717 #ifdef TARGET_X86_64
4718 if (s->aflag == 2) {
4719 offset_addr = ldq_code(s->pc);
4720 s->pc += 8;
4721 gen_op_movq_A0_im(offset_addr);
4722 } else
4723 #endif
4724 {
4725 if (s->aflag) {
4726 offset_addr = insn_get(s, OT_LONG);
4727 } else {
4728 offset_addr = insn_get(s, OT_WORD);
4729 }
4730 gen_op_movl_A0_im(offset_addr);
4731 }
4732 gen_add_A0_ds_seg(s);
4733 if ((b & 2) == 0) {
4734 gen_op_ld_T0_A0(ot + s->mem_index);
4735 gen_op_mov_reg_T0(ot, R_EAX);
4736 } else {
4737 gen_op_mov_TN_reg(ot, 0, R_EAX);
4738 gen_op_st_T0_A0(ot + s->mem_index);
4739 }
4740 }
4741 break;
4742 case 0xd7: /* xlat */
4743 #ifdef TARGET_X86_64
4744 if (s->aflag == 2) {
4745 gen_op_movq_A0_reg(R_EBX);
4746 gen_op_mov_TN_reg(OT_QUAD, 0, R_EAX);
4747 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xff);
4748 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]);
4749 } else
4750 #endif
4751 {
4752 gen_op_movl_A0_reg(R_EBX);
4753 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
4754 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xff);
4755 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]);
4756 if (s->aflag == 0)
4757 gen_op_andl_A0_ffff();
4758 else
4759 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
4760 }
4761 gen_add_A0_ds_seg(s);
4762 gen_op_ldu_T0_A0(OT_BYTE + s->mem_index);
4763 gen_op_mov_reg_T0(OT_BYTE, R_EAX);
4764 break;
4765 case 0xb0 ... 0xb7: /* mov R, Ib */
4766 val = insn_get(s, OT_BYTE);
4767 gen_op_movl_T0_im(val);
4768 gen_op_mov_reg_T0(OT_BYTE, (b & 7) | REX_B(s));
4769 break;
4770 case 0xb8 ... 0xbf: /* mov R, Iv */
4771 #ifdef TARGET_X86_64
4772 if (dflag == 2) {
4773 uint64_t tmp;
4774 /* 64 bit case */
4775 tmp = ldq_code(s->pc);
4776 s->pc += 8;
4777 reg = (b & 7) | REX_B(s);
4778 gen_movtl_T0_im(tmp);
4779 gen_op_mov_reg_T0(OT_QUAD, reg);
4780 } else
4781 #endif
4782 {
4783 ot = dflag ? OT_LONG : OT_WORD;
4784 val = insn_get(s, ot);
4785 reg = (b & 7) | REX_B(s);
4786 gen_op_movl_T0_im(val);
4787 gen_op_mov_reg_T0(ot, reg);
4788 }
4789 break;
4790
4791 case 0x91 ... 0x97: /* xchg R, EAX */
4792 ot = dflag + OT_WORD;
4793 reg = (b & 7) | REX_B(s);
4794 rm = R_EAX;
4795 goto do_xchg_reg;
4796 case 0x86:
4797 case 0x87: /* xchg Ev, Gv */
4798 if ((b & 1) == 0)
4799 ot = OT_BYTE;
4800 else
4801 ot = dflag + OT_WORD;
4802 modrm = ldub_code(s->pc++);
4803 reg = ((modrm >> 3) & 7) | rex_r;
4804 mod = (modrm >> 6) & 3;
4805 if (mod == 3) {
4806 rm = (modrm & 7) | REX_B(s);
4807 do_xchg_reg:
4808 gen_op_mov_TN_reg(ot, 0, reg);
4809 gen_op_mov_TN_reg(ot, 1, rm);
4810 gen_op_mov_reg_T0(ot, rm);
4811 gen_op_mov_reg_T1(ot, reg);
4812 } else {
4813 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4814 gen_op_mov_TN_reg(ot, 0, reg);
4815 /* for xchg, lock is implicit */
4816 if (!(prefixes & PREFIX_LOCK))
4817 tcg_gen_helper_0_0(helper_lock);
4818 gen_op_ld_T1_A0(ot + s->mem_index);
4819 gen_op_st_T0_A0(ot + s->mem_index);
4820 if (!(prefixes & PREFIX_LOCK))
4821 tcg_gen_helper_0_0(helper_unlock);
4822 gen_op_mov_reg_T1(ot, reg);
4823 }
4824 break;
4825 case 0xc4: /* les Gv */
4826 if (CODE64(s))
4827 goto illegal_op;
4828 op = R_ES;
4829 goto do_lxx;
4830 case 0xc5: /* lds Gv */
4831 if (CODE64(s))
4832 goto illegal_op;
4833 op = R_DS;
4834 goto do_lxx;
4835 case 0x1b2: /* lss Gv */
4836 op = R_SS;
4837 goto do_lxx;
4838 case 0x1b4: /* lfs Gv */
4839 op = R_FS;
4840 goto do_lxx;
4841 case 0x1b5: /* lgs Gv */
4842 op = R_GS;
4843 do_lxx:
4844 ot = dflag ? OT_LONG : OT_WORD;
4845 modrm = ldub_code(s->pc++);
4846 reg = ((modrm >> 3) & 7) | rex_r;
4847 mod = (modrm >> 6) & 3;
4848 if (mod == 3)
4849 goto illegal_op;
4850 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4851 gen_op_ld_T1_A0(ot + s->mem_index);
4852 gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
4853 /* load the segment first to handle exceptions properly */
4854 gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
4855 gen_movl_seg_T0(s, op, pc_start - s->cs_base);
4856 /* then put the data */
4857 gen_op_mov_reg_T1(ot, reg);
4858 if (s->is_jmp) {
4859 gen_jmp_im(s->pc - s->cs_base);
4860 gen_eob(s);
4861 }
4862 break;
4863
4864 /************************/
4865 /* shifts */
4866 case 0xc0:
4867 case 0xc1:
4868 /* shift Ev,Ib */
4869 shift = 2;
4870 grp2:
4871 {
4872 if ((b & 1) == 0)
4873 ot = OT_BYTE;
4874 else
4875 ot = dflag + OT_WORD;
4876
4877 modrm = ldub_code(s->pc++);
4878 mod = (modrm >> 6) & 3;
4879 op = (modrm >> 3) & 7;
4880
4881 if (mod != 3) {
4882 if (shift == 2) {
4883 s->rip_offset = 1;
4884 }
4885 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4886 opreg = OR_TMP0;
4887 } else {
4888 opreg = (modrm & 7) | REX_B(s);
4889 }
4890
4891 /* simpler op */
4892 if (shift == 0) {
4893 gen_shift(s, op, ot, opreg, OR_ECX);
4894 } else {
4895 if (shift == 2) {
4896 shift = ldub_code(s->pc++);
4897 }
4898 gen_shifti(s, op, ot, opreg, shift);
4899 }
4900 }
4901 break;
4902 case 0xd0:
4903 case 0xd1:
4904 /* shift Ev,1 */
4905 shift = 1;
4906 goto grp2;
4907 case 0xd2:
4908 case 0xd3:
4909 /* shift Ev,cl */
4910 shift = 0;
4911 goto grp2;
4912
4913 case 0x1a4: /* shld imm */
4914 op = 0;
4915 shift = 1;
4916 goto do_shiftd;
4917 case 0x1a5: /* shld cl */
4918 op = 0;
4919 shift = 0;
4920 goto do_shiftd;
4921 case 0x1ac: /* shrd imm */
4922 op = 1;
4923 shift = 1;
4924 goto do_shiftd;
4925 case 0x1ad: /* shrd cl */
4926 op = 1;
4927 shift = 0;
4928 do_shiftd:
4929 ot = dflag + OT_WORD;
4930 modrm = ldub_code(s->pc++);
4931 mod = (modrm >> 6) & 3;
4932 rm = (modrm & 7) | REX_B(s);
4933 reg = ((modrm >> 3) & 7) | rex_r;
4934 if (mod != 3) {
4935 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4936 opreg = OR_TMP0;
4937 } else {
4938 opreg = rm;
4939 }
4940 gen_op_mov_TN_reg(ot, 1, reg);
4941
4942 if (shift) {
4943 val = ldub_code(s->pc++);
4944 tcg_gen_movi_tl(cpu_T3, val);
4945 } else {
4946 tcg_gen_ld_tl(cpu_T3, cpu_env, offsetof(CPUState, regs[R_ECX]));
4947 }
4948 gen_shiftd_rm_T1_T3(s, ot, opreg, op);
4949 break;
4950
4951 /************************/
4952 /* floats */
4953 case 0xd8 ... 0xdf:
4954 if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
4955 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
4956 /* XXX: what to do if illegal op ? */
4957 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
4958 break;
4959 }
4960 modrm = ldub_code(s->pc++);
4961 mod = (modrm >> 6) & 3;
4962 rm = modrm & 7;
4963 op = ((b & 7) << 3) | ((modrm >> 3) & 7);
4964 if (mod != 3) {
4965 /* memory op */
4966 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4967 switch(op) {
4968 case 0x00 ... 0x07: /* fxxxs */
4969 case 0x10 ... 0x17: /* fixxxl */
4970 case 0x20 ... 0x27: /* fxxxl */
4971 case 0x30 ... 0x37: /* fixxx */
4972 {
4973 int op1;
4974 op1 = op & 7;
4975
4976 switch(op >> 4) {
4977 case 0:
4978 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
4979 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4980 tcg_gen_helper_0_1(helper_flds_FT0, cpu_tmp2_i32);
4981 break;
4982 case 1:
4983 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
4984 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4985 tcg_gen_helper_0_1(helper_fildl_FT0, cpu_tmp2_i32);
4986 break;
4987 case 2:
4988 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
4989 (s->mem_index >> 2) - 1);
4990 tcg_gen_helper_0_1(helper_fldl_FT0, cpu_tmp1_i64);
4991 break;
4992 case 3:
4993 default:
4994 gen_op_lds_T0_A0(OT_WORD + s->mem_index);
4995 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4996 tcg_gen_helper_0_1(helper_fildl_FT0, cpu_tmp2_i32);
4997 break;
4998 }
4999
5000 tcg_gen_helper_0_0(helper_fp_arith_ST0_FT0[op1]);
5001 if (op1 == 3) {
5002 /* fcomp needs pop */
5003 tcg_gen_helper_0_0(helper_fpop);
5004 }
5005 }
5006 break;
5007 case 0x08: /* flds */
5008 case 0x0a: /* fsts */
5009 case 0x0b: /* fstps */
5010 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
5011 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
5012 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
5013 switch(op & 7) {
5014 case 0:
5015 switch(op >> 4) {
5016 case 0:
5017 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5018 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5019 tcg_gen_helper_0_1(helper_flds_ST0, cpu_tmp2_i32);
5020 break;
5021 case 1:
5022 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5023 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5024 tcg_gen_helper_0_1(helper_fildl_ST0, cpu_tmp2_i32);
5025 break;
5026 case 2:
5027 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
5028 (s->mem_index >> 2) - 1);
5029 tcg_gen_helper_0_1(helper_fldl_ST0, cpu_tmp1_i64);
5030 break;
5031 case 3:
5032 default:
5033 gen_op_lds_T0_A0(OT_WORD + s->mem_index);
5034 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5035 tcg_gen_helper_0_1(helper_fildl_ST0, cpu_tmp2_i32);
5036 break;
5037 }
5038 break;
5039 case 1:
5040 /* XXX: the corresponding CPUID bit must be tested ! */
5041 switch(op >> 4) {
5042 case 1:
5043 tcg_gen_helper_1_0(helper_fisttl_ST0, cpu_tmp2_i32);
5044 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5045 gen_op_st_T0_A0(OT_LONG + s->mem_index);
5046 break;
5047 case 2:
5048 tcg_gen_helper_1_0(helper_fisttll_ST0, cpu_tmp1_i64);
5049 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
5050 (s->mem_index >> 2) - 1);
5051 break;
5052 case 3:
5053 default:
5054 tcg_gen_helper_1_0(helper_fistt_ST0, cpu_tmp2_i32);
5055 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5056 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5057 break;
5058 }
5059 tcg_gen_helper_0_0(helper_fpop);
5060 break;
5061 default:
5062 switch(op >> 4) {
5063 case 0:
5064 tcg_gen_helper_1_0(helper_fsts_ST0, cpu_tmp2_i32);
5065 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5066 gen_op_st_T0_A0(OT_LONG + s->mem_index);
5067 break;
5068 case 1:
5069 tcg_gen_helper_1_0(helper_fistl_ST0, cpu_tmp2_i32);
5070 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5071 gen_op_st_T0_A0(OT_LONG + s->mem_index);
5072 break;
5073 case 2:
5074 tcg_gen_helper_1_0(helper_fstl_ST0, cpu_tmp1_i64);
5075 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
5076 (s->mem_index >> 2) - 1);
5077 break;
5078 case 3:
5079 default:
5080 tcg_gen_helper_1_0(helper_fist_ST0, cpu_tmp2_i32);
5081 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5082 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5083 break;
5084 }
5085 if ((op & 7) == 3)
5086 tcg_gen_helper_0_0(helper_fpop);
5087 break;
5088 }
5089 break;
5090 case 0x0c: /* fldenv mem */
5091 if (s->cc_op != CC_OP_DYNAMIC)
5092 gen_op_set_cc_op(s->cc_op);
5093 gen_jmp_im(pc_start - s->cs_base);
5094 tcg_gen_helper_0_2(helper_fldenv,
5095 cpu_A0, tcg_const_i32(s->dflag));
5096 break;
5097 case 0x0d: /* fldcw mem */
5098 gen_op_ld_T0_A0(OT_WORD + s->mem_index);
5099 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5100 tcg_gen_helper_0_1(helper_fldcw, cpu_tmp2_i32);
5101 break;
5102 case 0x0e: /* fnstenv mem */
5103 if (s->cc_op != CC_OP_DYNAMIC)
5104 gen_op_set_cc_op(s->cc_op);
5105 gen_jmp_im(pc_start - s->cs_base);
5106 tcg_gen_helper_0_2(helper_fstenv,
5107 cpu_A0, tcg_const_i32(s->dflag));
5108 break;
5109 case 0x0f: /* fnstcw mem */
5110 tcg_gen_helper_1_0(helper_fnstcw, cpu_tmp2_i32);
5111 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5112 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5113 break;
5114 case 0x1d: /* fldt mem */
5115 if (s->cc_op != CC_OP_DYNAMIC)
5116 gen_op_set_cc_op(s->cc_op);
5117 gen_jmp_im(pc_start - s->cs_base);
5118 tcg_gen_helper_0_1(helper_fldt_ST0, cpu_A0);
5119 break;
5120 case 0x1f: /* fstpt mem */
5121 if (s->cc_op != CC_OP_DYNAMIC)
5122 gen_op_set_cc_op(s->cc_op);
5123 gen_jmp_im(pc_start - s->cs_base);
5124 tcg_gen_helper_0_1(helper_fstt_ST0, cpu_A0);
5125 tcg_gen_helper_0_0(helper_fpop);
5126 break;
5127 case 0x2c: /* frstor mem */
5128 if (s->cc_op != CC_OP_DYNAMIC)
5129 gen_op_set_cc_op(s->cc_op);
5130 gen_jmp_im(pc_start - s->cs_base);
5131 tcg_gen_helper_0_2(helper_frstor,
5132 cpu_A0, tcg_const_i32(s->dflag));
5133 break;
5134 case 0x2e: /* fnsave mem */
5135 if (s->cc_op != CC_OP_DYNAMIC)
5136 gen_op_set_cc_op(s->cc_op);
5137 gen_jmp_im(pc_start - s->cs_base);
5138 tcg_gen_helper_0_2(helper_fsave,
5139 cpu_A0, tcg_const_i32(s->dflag));
5140 break;
5141 case 0x2f: /* fnstsw mem */
5142 tcg_gen_helper_1_0(helper_fnstsw, cpu_tmp2_i32);
5143 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5144 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5145 break;
5146 case 0x3c: /* fbld */
5147 if (s->cc_op != CC_OP_DYNAMIC)
5148 gen_op_set_cc_op(s->cc_op);
5149 gen_jmp_im(pc_start - s->cs_base);
5150 tcg_gen_helper_0_1(helper_fbld_ST0, cpu_A0);
5151 break;
5152 case 0x3e: /* fbstp */
5153 if (s->cc_op != CC_OP_DYNAMIC)
5154 gen_op_set_cc_op(s->cc_op);
5155 gen_jmp_im(pc_start - s->cs_base);
5156 tcg_gen_helper_0_1(helper_fbst_ST0, cpu_A0);
5157 tcg_gen_helper_0_0(helper_fpop);
5158 break;
5159 case 0x3d: /* fildll */
5160 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
5161 (s->mem_index >> 2) - 1);
5162 tcg_gen_helper_0_1(helper_fildll_ST0, cpu_tmp1_i64);
5163 break;
5164 case 0x3f: /* fistpll */
5165 tcg_gen_helper_1_0(helper_fistll_ST0, cpu_tmp1_i64);
5166 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
5167 (s->mem_index >> 2) - 1);
5168 tcg_gen_helper_0_0(helper_fpop);
5169 break;
5170 default:
5171 goto illegal_op;
5172 }
5173 } else {
5174 /* register float ops */
5175 opreg = rm;
5176
5177 switch(op) {
5178 case 0x08: /* fld sti */
5179 tcg_gen_helper_0_0(helper_fpush);
5180 tcg_gen_helper_0_1(helper_fmov_ST0_STN, tcg_const_i32((opreg + 1) & 7));
5181 break;
5182 case 0x09: /* fxchg sti */
5183 case 0x29: /* fxchg4 sti, undocumented op */
5184 case 0x39: /* fxchg7 sti, undocumented op */
5185 tcg_gen_helper_0_1(helper_fxchg_ST0_STN, tcg_const_i32(opreg));
5186 break;
5187 case 0x0a: /* grp d9/2 */
5188 switch(rm) {
5189 case 0: /* fnop */
5190 /* check exceptions (FreeBSD FPU probe) */
5191 if (s->cc_op != CC_OP_DYNAMIC)
5192 gen_op_set_cc_op(s->cc_op);
5193 gen_jmp_im(pc_start - s->cs_base);
5194 tcg_gen_helper_0_0(helper_fwait);
5195 break;
5196 default:
5197 goto illegal_op;
5198 }
5199 break;
5200 case 0x0c: /* grp d9/4 */
5201 switch(rm) {
5202 case 0: /* fchs */
5203 tcg_gen_helper_0_0(helper_fchs_ST0);
5204 break;
5205 case 1: /* fabs */
5206 tcg_gen_helper_0_0(helper_fabs_ST0);
5207 break;
5208 case 4: /* ftst */
5209 tcg_gen_helper_0_0(helper_fldz_FT0);
5210 tcg_gen_helper_0_0(helper_fcom_ST0_FT0);
5211 break;
5212 case 5: /* fxam */
5213 tcg_gen_helper_0_0(helper_fxam_ST0);
5214 break;
5215 default:
5216 goto illegal_op;
5217 }
5218 break;
5219 case 0x0d: /* grp d9/5 */
5220 {
5221 switch(rm) {
5222 case 0:
5223 tcg_gen_helper_0_0(helper_fpush);
5224 tcg_gen_helper_0_0(helper_fld1_ST0);
5225 break;
5226 case 1:
5227 tcg_gen_helper_0_0(helper_fpush);
5228 tcg_gen_helper_0_0(helper_fldl2t_ST0);
5229 break;
5230 case 2:
5231 tcg_gen_helper_0_0(helper_fpush);
5232 tcg_gen_helper_0_0(helper_fldl2e_ST0);
5233 break;
5234 case 3:
5235 tcg_gen_helper_0_0(helper_fpush);
5236 tcg_gen_helper_0_0(helper_fldpi_ST0);
5237 break;
5238 case 4:
5239 tcg_gen_helper_0_0(helper_fpush);
5240 tcg_gen_helper_0_0(helper_fldlg2_ST0);
5241 break;
5242 case 5:
5243 tcg_gen_helper_0_0(helper_fpush);
5244 tcg_gen_helper_0_0(helper_fldln2_ST0);
5245 break;
5246 case 6:
5247 tcg_gen_helper_0_0(helper_fpush);
5248 tcg_gen_helper_0_0(helper_fldz_ST0);
5249 break;
5250 default:
5251 goto illegal_op;
5252 }
5253 }
5254 break;
5255 case 0x0e: /* grp d9/6 */
5256 switch(rm) {
5257 case 0: /* f2xm1 */
5258 tcg_gen_helper_0_0(helper_f2xm1);
5259 break;
5260 case 1: /* fyl2x */
5261 tcg_gen_helper_0_0(helper_fyl2x);
5262 break;
5263 case 2: /* fptan */
5264 tcg_gen_helper_0_0(helper_fptan);
5265 break;
5266 case 3: /* fpatan */
5267 tcg_gen_helper_0_0(helper_fpatan);
5268 break;
5269 case 4: /* fxtract */
5270 tcg_gen_helper_0_0(helper_fxtract);
5271 break;
5272 case 5: /* fprem1 */
5273 tcg_gen_helper_0_0(helper_fprem1);
5274 break;
5275 case 6: /* fdecstp */
5276 tcg_gen_helper_0_0(helper_fdecstp);
5277 break;
5278 default:
5279 case 7: /* fincstp */
5280 tcg_gen_helper_0_0(helper_fincstp);
5281 break;
5282 }
5283 break;
5284 case 0x0f: /* grp d9/7 */
5285 switch(rm) {
5286 case 0: /* fprem */
5287 tcg_gen_helper_0_0(helper_fprem);
5288 break;
5289 case 1: /* fyl2xp1 */
5290 tcg_gen_helper_0_0(helper_fyl2xp1);
5291 break;
5292 case 2: /* fsqrt */
5293 tcg_gen_helper_0_0(helper_fsqrt);
5294 break;
5295 case 3: /* fsincos */
5296 tcg_gen_helper_0_0(helper_fsincos);
5297 break;
5298 case 5: /* fscale */
5299 tcg_gen_helper_0_0(helper_fscale);
5300 break;
5301 case 4: /* frndint */
5302 tcg_gen_helper_0_0(helper_frndint);
5303 break;
5304 case 6: /* fsin */
5305 tcg_gen_helper_0_0(helper_fsin);
5306 break;
5307 default:
5308 case 7: /* fcos */
5309 tcg_gen_helper_0_0(helper_fcos);
5310 break;
5311 }
5312 break;
5313 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
5314 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
5315 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
5316 {
5317 int op1;
5318
5319 op1 = op & 7;
5320 if (op >= 0x20) {
5321 tcg_gen_helper_0_1(helper_fp_arith_STN_ST0[op1], tcg_const_i32(opreg));
5322 if (op >= 0x30)
5323 tcg_gen_helper_0_0(helper_fpop);
5324 } else {
5325 tcg_gen_helper_0_1(helper_fmov_FT0_STN, tcg_const_i32(opreg));
5326 tcg_gen_helper_0_0(helper_fp_arith_ST0_FT0[op1]);
5327 }
5328 }
5329 break;
5330 case 0x02: /* fcom */
5331 case 0x22: /* fcom2, undocumented op */
5332 tcg_gen_helper_0_1(helper_fmov_FT0_STN, tcg_const_i32(opreg));
5333 tcg_gen_helper_0_0(helper_fcom_ST0_FT0);
5334 break;
5335 case 0x03: /* fcomp */
5336 case 0x23: /* fcomp3, undocumented op */
5337 case 0x32: /* fcomp5, undocumented op */
5338 tcg_gen_helper_0_1(helper_fmov_FT0_STN, tcg_const_i32(opreg));
5339 tcg_gen_helper_0_0(helper_fcom_ST0_FT0);
5340 tcg_gen_helper_0_0(helper_fpop);
5341 break;
5342 case 0x15: /* da/5 */
5343 switch(rm) {
5344 case 1: /* fucompp */
5345 tcg_gen_helper_0_1(helper_fmov_FT0_STN, tcg_const_i32(1));
5346 tcg_gen_helper_0_0(helper_fucom_ST0_FT0);
5347 tcg_gen_helper_0_0(helper_fpop);
5348 tcg_gen_helper_0_0(helper_fpop);
5349 break;
5350 default:
5351 goto illegal_op;
5352 }
5353 break;
5354 case 0x1c:
5355 switch(rm) {
5356 case 0: /* feni (287 only, just do nop here) */
5357 break;
5358 case 1: /* fdisi (287 only, just do nop here) */
5359 break;
5360 case 2: /* fclex */
5361 tcg_gen_helper_0_0(helper_fclex);
5362 break;
5363 case 3: /* fninit */
5364 tcg_gen_helper_0_0(helper_fninit);
5365 break;
5366 case 4: /* fsetpm (287 only, just do nop here) */
5367 break;
5368 default:
5369 goto illegal_op;
5370 }
5371 break;
5372 case 0x1d: /* fucomi */
5373 if (s->cc_op != CC_OP_DYNAMIC)
5374 gen_op_set_cc_op(s->cc_op);
5375 tcg_gen_helper_0_1(helper_fmov_FT0_STN, tcg_const_i32(opreg));
5376 tcg_gen_helper_0_0(helper_fucomi_ST0_FT0);
5377 s->cc_op = CC_OP_EFLAGS;
5378 break;
5379 case 0x1e: /* fcomi */
5380 if (s->cc_op != CC_OP_DYNAMIC)
5381 gen_op_set_cc_op(s->cc_op);
5382 tcg_gen_helper_0_1(helper_fmov_FT0_STN, tcg_const_i32(opreg));
5383 tcg_gen_helper_0_0(helper_fcomi_ST0_FT0);
5384 s->cc_op = CC_OP_EFLAGS;
5385 break;
5386 case 0x28: /* ffree sti */
5387 tcg_gen_helper_0_1(helper_ffree_STN, tcg_const_i32(opreg));
5388 break;
5389 case 0x2a: /* fst sti */
5390 tcg_gen_helper_0_1(helper_fmov_STN_ST0, tcg_const_i32(opreg));
5391 break;
5392 case 0x2b: /* fstp sti */
5393 case 0x0b: /* fstp1 sti, undocumented op */
5394 case 0x3a: /* fstp8 sti, undocumented op */
5395 case 0x3b: /* fstp9 sti, undocumented op */
5396 tcg_gen_helper_0_1(helper_fmov_STN_ST0, tcg_const_i32(opreg));
5397 tcg_gen_helper_0_0(helper_fpop);
5398 break;
5399 case 0x2c: /* fucom st(i) */
5400 tcg_gen_helper_0_1(helper_fmov_FT0_STN, tcg_const_i32(opreg));
5401 tcg_gen_helper_0_0(helper_fucom_ST0_FT0);
5402 break;
5403 case 0x2d: /* fucomp st(i) */
5404 tcg_gen_helper_0_1(helper_fmov_FT0_STN, tcg_const_i32(opreg));
5405 tcg_gen_helper_0_0(helper_fucom_ST0_FT0);
5406 tcg_gen_helper_0_0(helper_fpop);
5407 break;
5408 case 0x33: /* de/3 */
5409 switch(rm) {
5410 case 1: /* fcompp */
5411 tcg_gen_helper_0_1(helper_fmov_FT0_STN, tcg_const_i32(1));
5412 tcg_gen_helper_0_0(helper_fcom_ST0_FT0);
5413 tcg_gen_helper_0_0(helper_fpop);
5414 tcg_gen_helper_0_0(helper_fpop);
5415 break;
5416 default:
5417 goto illegal_op;
5418 }
5419 break;
5420 case 0x38: /* ffreep sti, undocumented op */
5421 tcg_gen_helper_0_1(helper_ffree_STN, tcg_const_i32(opreg));
5422 tcg_gen_helper_0_0(helper_fpop);
5423 break;
5424 case 0x3c: /* df/4 */
5425 switch(rm) {
5426 case 0:
5427 tcg_gen_helper_1_0(helper_fnstsw, cpu_tmp2_i32);
5428 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5429 gen_op_mov_reg_T0(OT_WORD, R_EAX);
5430 break;
5431 default:
5432 goto illegal_op;
5433 }
5434 break;
5435 case 0x3d: /* fucomip */
5436 if (s->cc_op != CC_OP_DYNAMIC)
5437 gen_op_set_cc_op(s->cc_op);
5438 tcg_gen_helper_0_1(helper_fmov_FT0_STN, tcg_const_i32(opreg));
5439 tcg_gen_helper_0_0(helper_fucomi_ST0_FT0);
5440 tcg_gen_helper_0_0(helper_fpop);
5441 s->cc_op = CC_OP_EFLAGS;
5442 break;
5443 case 0x3e: /* fcomip */
5444 if (s->cc_op != CC_OP_DYNAMIC)
5445 gen_op_set_cc_op(s->cc_op);
5446 tcg_gen_helper_0_1(helper_fmov_FT0_STN, tcg_const_i32(opreg));
5447 tcg_gen_helper_0_0(helper_fcomi_ST0_FT0);
5448 tcg_gen_helper_0_0(helper_fpop);
5449 s->cc_op = CC_OP_EFLAGS;
5450 break;
5451 case 0x10 ... 0x13: /* fcmovxx */
5452 case 0x18 ... 0x1b:
5453 {
5454 int op1, l1;
5455 const static uint8_t fcmov_cc[8] = {
5456 (JCC_B << 1),
5457 (JCC_Z << 1),
5458 (JCC_BE << 1),
5459 (JCC_P << 1),
5460 };
5461 op1 = fcmov_cc[op & 3] | ((op >> 3) & 1);
5462 gen_setcc(s, op1);
5463 l1 = gen_new_label();
5464 tcg_gen_brcond_tl(TCG_COND_EQ, cpu_T[0], tcg_const_tl(0), l1);
5465 tcg_gen_helper_0_1(helper_fmov_ST0_STN, tcg_const_i32(opreg));
5466 gen_set_label(l1);
5467 }
5468 break;
5469 default:
5470 goto illegal_op;
5471 }
5472 }
5473 break;
5474 /************************/
5475 /* string ops */
5476
5477 case 0xa4: /* movsS */
5478 case 0xa5:
5479 if ((b & 1) == 0)
5480 ot = OT_BYTE;
5481 else
5482 ot = dflag + OT_WORD;
5483
5484 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
5485 gen_repz_movs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
5486 } else {
5487 gen_movs(s, ot);
5488 }
5489 break;
5490
5491 case 0xaa: /* stosS */
5492 case 0xab:
5493 if ((b & 1) == 0)
5494 ot = OT_BYTE;
5495 else
5496 ot = dflag + OT_WORD;
5497
5498 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
5499 gen_repz_stos(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
5500 } else {
5501 gen_stos(s, ot);
5502 }
5503 break;
5504 case 0xac: /* lodsS */
5505 case 0xad:
5506 if ((b & 1) == 0)
5507 ot = OT_BYTE;
5508 else
5509 ot = dflag + OT_WORD;
5510 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
5511 gen_repz_lods(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
5512 } else {
5513 gen_lods(s, ot);
5514 }
5515 break;
5516 case 0xae: /* scasS */
5517 case 0xaf:
5518 if ((b & 1) == 0)
5519 ot = OT_BYTE;
5520 else
5521 ot = dflag + OT_WORD;
5522 if (prefixes & PREFIX_REPNZ) {
5523 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
5524 } else if (prefixes & PREFIX_REPZ) {
5525 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
5526 } else {
5527 gen_scas(s, ot);
5528 s->cc_op = CC_OP_SUBB + ot;
5529 }
5530 break;
5531
5532 case 0xa6: /* cmpsS */
5533 case 0xa7:
5534 if ((b & 1) == 0)
5535 ot = OT_BYTE;
5536 else
5537 ot = dflag + OT_WORD;
5538 if (prefixes & PREFIX_REPNZ) {
5539 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
5540 } else if (prefixes & PREFIX_REPZ) {
5541 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
5542 } else {
5543 gen_cmps(s, ot);
5544 s->cc_op = CC_OP_SUBB + ot;
5545 }
5546 break;
5547 case 0x6c: /* insS */
5548 case 0x6d:
5549 if ((b & 1) == 0)
5550 ot = OT_BYTE;
5551 else
5552 ot = dflag ? OT_LONG : OT_WORD;
5553 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
5554 gen_op_andl_T0_ffff();
5555 gen_check_io(s, ot, pc_start - s->cs_base,
5556 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4);
5557 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
5558 gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
5559 } else {
5560 gen_ins(s, ot);
5561 }
5562 break;
5563 case 0x6e: /* outsS */
5564 case 0x6f:
5565 if ((b & 1) == 0)
5566 ot = OT_BYTE;
5567 else
5568 ot = dflag ? OT_LONG : OT_WORD;
5569 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
5570 gen_op_andl_T0_ffff();
5571 gen_check_io(s, ot, pc_start - s->cs_base,
5572 svm_is_rep(prefixes) | 4);
5573 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
5574 gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
5575 } else {
5576 gen_outs(s, ot);
5577 }
5578 break;
5579
5580 /************************/
5581 /* port I/O */
5582
5583 case 0xe4:
5584 case 0xe5:
5585 if ((b & 1) == 0)
5586 ot = OT_BYTE;
5587 else
5588 ot = dflag ? OT_LONG : OT_WORD;
5589 val = ldub_code(s->pc++);
5590 gen_op_movl_T0_im(val);
5591 gen_check_io(s, ot, pc_start - s->cs_base,
5592 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
5593 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5594 tcg_gen_helper_1_1(helper_in_func[ot], cpu_T[1], cpu_tmp2_i32);
5595 gen_op_mov_reg_T1(ot, R_EAX);
5596 break;
5597 case 0xe6:
5598 case 0xe7:
5599 if ((b & 1) == 0)
5600 ot = OT_BYTE;
5601 else
5602 ot = dflag ? OT_LONG : OT_WORD;
5603 val = ldub_code(s->pc++);
5604 gen_op_movl_T0_im(val);
5605 gen_check_io(s, ot, pc_start - s->cs_base,
5606 svm_is_rep(prefixes));
5607 gen_op_mov_TN_reg(ot, 1, R_EAX);
5608
5609 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5610 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
5611 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
5612 tcg_gen_helper_0_2(helper_out_func[ot], cpu_tmp2_i32, cpu_tmp3_i32);
5613 break;
5614 case 0xec:
5615 case 0xed:
5616 if ((b & 1) == 0)
5617 ot = OT_BYTE;
5618 else
5619 ot = dflag ? OT_LONG : OT_WORD;
5620 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
5621 gen_op_andl_T0_ffff();
5622 gen_check_io(s, ot, pc_start - s->cs_base,
5623 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
5624 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5625 tcg_gen_helper_1_1(helper_in_func[ot], cpu_T[1], cpu_tmp2_i32);
5626 gen_op_mov_reg_T1(ot, R_EAX);
5627 break;
5628 case 0xee:
5629 case 0xef:
5630 if ((b & 1) == 0)
5631 ot = OT_BYTE;
5632 else
5633 ot = dflag ? OT_LONG : OT_WORD;
5634 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
5635 gen_op_andl_T0_ffff();
5636 gen_check_io(s, ot, pc_start - s->cs_base,
5637 svm_is_rep(prefixes));
5638 gen_op_mov_TN_reg(ot, 1, R_EAX);
5639
5640 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5641 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
5642 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
5643 tcg_gen_helper_0_2(helper_out_func[ot], cpu_tmp2_i32, cpu_tmp3_i32);
5644 break;
5645
5646 /************************/
5647 /* control */
5648 case 0xc2: /* ret im */
5649 val = ldsw_code(s->pc);
5650 s->pc += 2;
5651 gen_pop_T0(s);
5652 if (CODE64(s) && s->dflag)
5653 s->dflag = 2;
5654 gen_stack_update(s, val + (2 << s->dflag));
5655 if (s->dflag == 0)
5656 gen_op_andl_T0_ffff();
5657 gen_op_jmp_T0();
5658 gen_eob(s);
5659 break;
5660 case 0xc3: /* ret */
5661 gen_pop_T0(s);
5662 gen_pop_update(s);
5663 if (s->dflag == 0)
5664 gen_op_andl_T0_ffff();
5665 gen_op_jmp_T0();
5666 gen_eob(s);
5667 break;
5668 case 0xca: /* lret im */
5669 val = ldsw_code(s->pc);
5670 s->pc += 2;
5671 do_lret:
5672 if (s->pe && !s->vm86) {
5673 if (s->cc_op != CC_OP_DYNAMIC)
5674 gen_op_set_cc_op(s->cc_op);
5675 gen_jmp_im(pc_start - s->cs_base);
5676 tcg_gen_helper_0_2(helper_lret_protected,
5677 tcg_const_i32(s->dflag),
5678 tcg_const_i32(val));
5679 } else {
5680 gen_stack_A0(s);
5681 /* pop offset */
5682 gen_op_ld_T0_A0(1 + s->dflag + s->mem_index);
5683 if (s->dflag == 0)
5684 gen_op_andl_T0_ffff();
5685 /* NOTE: keeping EIP updated is not a problem in case of
5686 exception */
5687 gen_op_jmp_T0();
5688 /* pop selector */
5689 gen_op_addl_A0_im(2 << s->dflag);
5690 gen_op_ld_T0_A0(1 + s->dflag + s->mem_index);
5691 gen_op_movl_seg_T0_vm(R_CS);
5692 /* add stack offset */
5693 gen_stack_update(s, val + (4 << s->dflag));
5694 }
5695 gen_eob(s);
5696 break;
5697 case 0xcb: /* lret */
5698 val = 0;
5699 goto do_lret;
5700 case 0xcf: /* iret */
5701 if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET))
5702 break;
5703 if (!s->pe) {
5704 /* real mode */
5705 tcg_gen_helper_0_1(helper_iret_real, tcg_const_i32(s->dflag));
5706 s->cc_op = CC_OP_EFLAGS;
5707 } else if (s->vm86) {
5708 if (s->iopl != 3) {
5709 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
5710 } else {
5711 tcg_gen_helper_0_1(helper_iret_real, tcg_const_i32(s->dflag));
5712 s->cc_op = CC_OP_EFLAGS;
5713 }
5714 } else {
5715 if (s->cc_op != CC_OP_DYNAMIC)
5716 gen_op_set_cc_op(s->cc_op);
5717 gen_jmp_im(pc_start - s->cs_base);
5718 tcg_gen_helper_0_2(helper_iret_protected,
5719 tcg_const_i32(s->dflag),
5720 tcg_const_i32(s->pc - s->cs_base));
5721 s->cc_op = CC_OP_EFLAGS;
5722 }
5723 gen_eob(s);
5724 break;
5725 case 0xe8: /* call im */
5726 {
5727 if (dflag)
5728 tval = (int32_t)insn_get(s, OT_LONG);
5729 else
5730 tval = (int16_t)insn_get(s, OT_WORD);
5731 next_eip = s->pc - s->cs_base;
5732 tval += next_eip;
5733 if (s->dflag == 0)
5734 tval &= 0xffff;
5735 gen_movtl_T0_im(next_eip);
5736 gen_push_T0(s);
5737 gen_jmp(s, tval);
5738 }
5739 break;
5740 case 0x9a: /* lcall im */
5741 {
5742 unsigned int selector, offset;
5743
5744 if (CODE64(s))
5745 goto illegal_op;
5746 ot = dflag ? OT_LONG : OT_WORD;
5747 offset = insn_get(s, ot);
5748 selector = insn_get(s, OT_WORD);
5749
5750 gen_op_movl_T0_im(selector);
5751 gen_op_movl_T1_imu(offset);
5752 }
5753 goto do_lcall;
5754 case 0xe9: /* jmp im */
5755 if (dflag)
5756 tval = (int32_t)insn_get(s, OT_LONG);
5757 else
5758 tval = (int16_t)insn_get(s, OT_WORD);
5759 tval += s->pc - s->cs_base;
5760 if (s->dflag == 0)
5761 tval &= 0xffff;
5762 gen_jmp(s, tval);
5763 break;
5764 case 0xea: /* ljmp im */
5765 {
5766 unsigned int selector, offset;
5767
5768 if (CODE64(s))
5769 goto illegal_op;
5770 ot = dflag ? OT_LONG : OT_WORD;
5771 offset = insn_get(s, ot);
5772 selector = insn_get(s, OT_WORD);
5773
5774 gen_op_movl_T0_im(selector);
5775 gen_op_movl_T1_imu(offset);
5776 }
5777 goto do_ljmp;
5778 case 0xeb: /* jmp Jb */
5779 tval = (int8_t)insn_get(s, OT_BYTE);
5780 tval += s->pc - s->cs_base;
5781 if (s->dflag == 0)
5782 tval &= 0xffff;
5783 gen_jmp(s, tval);
5784 break;
5785 case 0x70 ... 0x7f: /* jcc Jb */
5786 tval = (int8_t)insn_get(s, OT_BYTE);
5787 goto do_jcc;
5788 case 0x180 ... 0x18f: /* jcc Jv */
5789 if (dflag) {
5790 tval = (int32_t)insn_get(s, OT_LONG);
5791 } else {
5792 tval = (int16_t)insn_get(s, OT_WORD);
5793 }
5794 do_jcc:
5795 next_eip = s->pc - s->cs_base;
5796 tval += next_eip;
5797 if (s->dflag == 0)
5798 tval &= 0xffff;
5799 gen_jcc(s, b, tval, next_eip);
5800 break;
5801
5802 case 0x190 ... 0x19f: /* setcc Gv */
5803 modrm = ldub_code(s->pc++);
5804 gen_setcc(s, b);
5805 gen_ldst_modrm(s, modrm, OT_BYTE, OR_TMP0, 1);
5806 break;
5807 case 0x140 ... 0x14f: /* cmov Gv, Ev */
5808 {
5809 int l1;
5810 ot = dflag + OT_WORD;
5811 modrm = ldub_code(s->pc++);
5812 reg = ((modrm >> 3) & 7) | rex_r;
5813 mod = (modrm >> 6) & 3;
5814 if (mod != 3) {
5815 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5816 gen_op_ld_T1_A0(ot + s->mem_index);
5817 } else {
5818 rm = (modrm & 7) | REX_B(s);
5819 gen_op_mov_TN_reg(ot, 1, rm);
5820 }
5821 if (s->cc_op != CC_OP_DYNAMIC)
5822 gen_op_set_cc_op(s->cc_op);
5823 #ifdef TARGET_X86_64
5824 if (ot == OT_LONG) {
5825 /* XXX: specific Intel behaviour ? */
5826 l1 = gen_new_label();
5827 gen_jcc1(s, s->cc_op, b ^ 1, l1);
5828 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET);
5829 gen_set_label(l1);
5830 tcg_gen_movi_tl(cpu_tmp0, 0);
5831 tcg_gen_st32_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]) + REG_LH_OFFSET);
5832 } else
5833 #endif
5834 {
5835 l1 = gen_new_label();
5836 gen_jcc1(s, s->cc_op, b ^ 1, l1);
5837 gen_op_mov_reg_T1(ot, reg);
5838 gen_set_label(l1);
5839 }
5840 }
5841 break;
5842
5843 /************************/
5844 /* flags */
5845 case 0x9c: /* pushf */
5846 if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_PUSHF))
5847 break;
5848 if (s->vm86 && s->iopl != 3) {
5849 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
5850 } else {
5851 if (s->cc_op != CC_OP_DYNAMIC)
5852 gen_op_set_cc_op(s->cc_op);
5853 tcg_gen_helper_1_0(helper_read_eflags, cpu_T[0]);
5854 gen_push_T0(s);
5855 }
5856 break;
5857 case 0x9d: /* popf */
5858 if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_POPF))
5859 break;
5860 if (s->vm86 && s->iopl != 3) {
5861 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
5862 } else {
5863 gen_pop_T0(s);
5864 if (s->cpl == 0) {
5865 if (s->dflag) {
5866 tcg_gen_helper_0_2(helper_write_eflags, cpu_T[0],
5867 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK | IOPL_MASK)));
5868 } else {
5869 tcg_gen_helper_0_2(helper_write_eflags, cpu_T[0],
5870 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK | IOPL_MASK) & 0xffff));
5871 }
5872 } else {
5873 if (s->cpl <= s->iopl) {
5874 if (s->dflag) {
5875 tcg_gen_helper_0_2(helper_write_eflags, cpu_T[0],
5876 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK)));
5877 } else {
5878 tcg_gen_helper_0_2(helper_write_eflags, cpu_T[0],
5879 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK) & 0xffff));
5880 }
5881 } else {
5882 if (s->dflag) {
5883 tcg_gen_helper_0_2(helper_write_eflags, cpu_T[0],
5884 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK)));
5885 } else {
5886 tcg_gen_helper_0_2(helper_write_eflags, cpu_T[0],
5887 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff));
5888 }
5889 }
5890 }
5891 gen_pop_update(s);
5892 s->cc_op = CC_OP_EFLAGS;
5893 /* abort translation because TF flag may change */
5894 gen_jmp_im(s->pc - s->cs_base);
5895 gen_eob(s);
5896 }
5897 break;
5898 case 0x9e: /* sahf */
5899 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
5900 goto illegal_op;
5901 gen_op_mov_TN_reg(OT_BYTE, 0, R_AH);
5902 if (s->cc_op != CC_OP_DYNAMIC)
5903 gen_op_set_cc_op(s->cc_op);
5904 gen_compute_eflags(cpu_cc_src);
5905 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O);
5906 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], CC_S | CC_Z | CC_A | CC_P | CC_C);
5907 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_T[0]);
5908 s->cc_op = CC_OP_EFLAGS;
5909 break;
5910 case 0x9f: /* lahf */
5911 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
5912 goto illegal_op;
5913 if (s->cc_op != CC_OP_DYNAMIC)
5914 gen_op_set_cc_op(s->cc_op);
5915 gen_compute_eflags(cpu_T[0]);
5916 /* Note: gen_compute_eflags() only gives the condition codes */
5917 tcg_gen_ori_tl(cpu_T[0], cpu_T[0], 0x02);
5918 gen_op_mov_reg_T0(OT_BYTE, R_AH);
5919 break;
5920 case 0xf5: /* cmc */
5921 if (s->cc_op != CC_OP_DYNAMIC)
5922 gen_op_set_cc_op(s->cc_op);
5923 gen_compute_eflags(cpu_cc_src);
5924 tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C);
5925 s->cc_op = CC_OP_EFLAGS;
5926 break;
5927 case 0xf8: /* clc */
5928 if (s->cc_op != CC_OP_DYNAMIC)
5929 gen_op_set_cc_op(s->cc_op);
5930 gen_compute_eflags(cpu_cc_src);
5931 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C);
5932 s->cc_op = CC_OP_EFLAGS;
5933 break;
5934 case 0xf9: /* stc */
5935 if (s->cc_op != CC_OP_DYNAMIC)
5936 gen_op_set_cc_op(s->cc_op);
5937 gen_compute_eflags(cpu_cc_src);
5938 tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C);
5939 s->cc_op = CC_OP_EFLAGS;
5940 break;
5941 case 0xfc: /* cld */
5942 tcg_gen_movi_i32(cpu_tmp2_i32, 1);
5943 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUState, df));
5944 break;
5945 case 0xfd: /* std */
5946 tcg_gen_movi_i32(cpu_tmp2_i32, -1);
5947 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUState, df));
5948 break;
5949
5950 /************************/
5951 /* bit operations */
5952 case 0x1ba: /* bt/bts/btr/btc Gv, im */
5953 ot = dflag + OT_WORD;
5954 modrm = ldub_code(s->pc++);
5955 op = (modrm >> 3) & 7;
5956 mod = (modrm >> 6) & 3;
5957 rm = (modrm & 7) | REX_B(s);
5958 if (mod != 3) {
5959 s->rip_offset = 1;
5960 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5961 gen_op_ld_T0_A0(ot + s->mem_index);
5962 } else {
5963 gen_op_mov_TN_reg(ot, 0, rm);
5964 }
5965 /* load shift */
5966 val = ldub_code(s->pc++);
5967 gen_op_movl_T1_im(val);
5968 if (op < 4)
5969 goto illegal_op;
5970 op -= 4;
5971 goto bt_op;
5972 case 0x1a3: /* bt Gv, Ev */
5973 op = 0;
5974 goto do_btx;
5975 case 0x1ab: /* bts */
5976 op = 1;
5977 goto do_btx;
5978 case 0x1b3: /* btr */
5979 op = 2;
5980 goto do_btx;
5981 case 0x1bb: /* btc */
5982 op = 3;
5983 do_btx:
5984 ot = dflag + OT_WORD;
5985 modrm = ldub_code(s->pc++);
5986 reg = ((modrm >> 3) & 7) | rex_r;
5987 mod = (modrm >> 6) & 3;
5988 rm = (modrm & 7) | REX_B(s);
5989 gen_op_mov_TN_reg(OT_LONG, 1, reg);
5990 if (mod != 3) {
5991 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5992 /* specific case: we need to add a displacement */
5993 gen_exts(ot, cpu_T[1]);
5994 tcg_gen_sari_tl(cpu_tmp0, cpu_T[1], 3 + ot);
5995 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, ot);
5996 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
5997 gen_op_ld_T0_A0(ot + s->mem_index);
5998 } else {
5999 gen_op_mov_TN_reg(ot, 0, rm);
6000 }
6001 bt_op:
6002 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], (1 << (3 + ot)) - 1);
6003 switch(op) {
6004 case 0:
6005 tcg_gen_shr_tl(cpu_cc_src, cpu_T[0], cpu_T[1]);
6006 tcg_gen_movi_tl(cpu_cc_dst, 0);
6007 break;
6008 case 1:
6009 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6010 tcg_gen_movi_tl(cpu_tmp0, 1);
6011 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6012 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6013 break;
6014 case 2:
6015 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6016 tcg_gen_movi_tl(cpu_tmp0, 1);
6017 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6018 tcg_gen_not_tl(cpu_tmp0, cpu_tmp0);
6019 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6020 break;
6021 default:
6022 case 3:
6023 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6024 tcg_gen_movi_tl(cpu_tmp0, 1);
6025 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6026 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6027 break;
6028 }
6029 s->cc_op = CC_OP_SARB + ot;
6030 if (op != 0) {
6031 if (mod != 3)
6032 gen_op_st_T0_A0(ot + s->mem_index);
6033 else
6034 gen_op_mov_reg_T0(ot, rm);
6035 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
6036 tcg_gen_movi_tl(cpu_cc_dst, 0);
6037 }
6038 break;
6039 case 0x1bc: /* bsf */
6040 case 0x1bd: /* bsr */
6041 {
6042 int label1;
6043 ot = dflag + OT_WORD;
6044 modrm = ldub_code(s->pc++);
6045 reg = ((modrm >> 3) & 7) | rex_r;
6046 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
6047 gen_extu(ot, cpu_T[0]);
6048 label1 = gen_new_label();
6049 tcg_gen_movi_tl(cpu_cc_dst, 0);
6050 tcg_gen_brcond_tl(TCG_COND_EQ, cpu_T[0], tcg_const_tl(0), label1);
6051 if (b & 1) {
6052 tcg_gen_helper_1_1(helper_bsr, cpu_T[0], cpu_T[0]);
6053 } else {
6054 tcg_gen_helper_1_1(helper_bsf, cpu_T[0], cpu_T[0]);
6055 }
6056 gen_op_mov_reg_T0(ot, reg);
6057 tcg_gen_movi_tl(cpu_cc_dst, 1);
6058 gen_set_label(label1);
6059 tcg_gen_discard_tl(cpu_cc_src);
6060 s->cc_op = CC_OP_LOGICB + ot;
6061 }
6062 break;
6063 /************************/
6064 /* bcd */
6065 case 0x27: /* daa */
6066 if (CODE64(s))
6067 goto illegal_op;
6068 if (s->cc_op != CC_OP_DYNAMIC)
6069 gen_op_set_cc_op(s->cc_op);
6070 tcg_gen_helper_0_0(helper_daa);
6071 s->cc_op = CC_OP_EFLAGS;
6072 break;
6073 case 0x2f: /* das */
6074 if (CODE64(s))
6075 goto illegal_op;
6076 if (s->cc_op != CC_OP_DYNAMIC)
6077 gen_op_set_cc_op(s->cc_op);
6078 tcg_gen_helper_0_0(helper_das);
6079 s->cc_op = CC_OP_EFLAGS;
6080 break;
6081 case 0x37: /* aaa */
6082 if (CODE64(s))
6083 goto illegal_op;
6084 if (s->cc_op != CC_OP_DYNAMIC)
6085 gen_op_set_cc_op(s->cc_op);
6086 tcg_gen_helper_0_0(helper_aaa);
6087 s->cc_op = CC_OP_EFLAGS;
6088 break;
6089 case 0x3f: /* aas */
6090 if (CODE64(s))
6091 goto illegal_op;
6092 if (s->cc_op != CC_OP_DYNAMIC)
6093 gen_op_set_cc_op(s->cc_op);
6094 tcg_gen_helper_0_0(helper_aas);
6095 s->cc_op = CC_OP_EFLAGS;
6096 break;
6097 case 0xd4: /* aam */
6098 if (CODE64(s))
6099 goto illegal_op;
6100 val = ldub_code(s->pc++);
6101 if (val == 0) {
6102 gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base);
6103 } else {
6104 tcg_gen_helper_0_1(helper_aam, tcg_const_i32(val));
6105 s->cc_op = CC_OP_LOGICB;
6106 }
6107 break;
6108 case 0xd5: /* aad */
6109 if (CODE64(s))
6110 goto illegal_op;
6111 val = ldub_code(s->pc++);
6112 tcg_gen_helper_0_1(helper_aad, tcg_const_i32(val));
6113 s->cc_op = CC_OP_LOGICB;
6114 break;
6115 /************************/
6116 /* misc */
6117 case 0x90: /* nop */
6118 /* XXX: xchg + rex handling */
6119 /* XXX: correct lock test for all insn */
6120 if (prefixes & PREFIX_LOCK)
6121 goto illegal_op;
6122 if (prefixes & PREFIX_REPZ) {
6123 gen_svm_check_intercept(s, pc_start, SVM_EXIT_PAUSE);
6124 }
6125 break;
6126 case 0x9b: /* fwait */
6127 if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) ==
6128 (HF_MP_MASK | HF_TS_MASK)) {
6129 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
6130 } else {
6131 if (s->cc_op != CC_OP_DYNAMIC)
6132 gen_op_set_cc_op(s->cc_op);
6133 gen_jmp_im(pc_start - s->cs_base);
6134 tcg_gen_helper_0_0(helper_fwait);
6135 }
6136 break;
6137 case 0xcc: /* int3 */
6138 if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_SWINT))
6139 break;
6140 gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base);
6141 break;
6142 case 0xcd: /* int N */
6143 val = ldub_code(s->pc++);
6144 if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_SWINT))
6145 break;
6146 if (s->vm86 && s->iopl != 3) {
6147 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6148 } else {
6149 gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base);
6150 }
6151 break;
6152 case 0xce: /* into */
6153 if (CODE64(s))
6154 goto illegal_op;
6155 if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_SWINT))
6156 break;
6157 if (s->cc_op != CC_OP_DYNAMIC)
6158 gen_op_set_cc_op(s->cc_op);
6159 gen_jmp_im(pc_start - s->cs_base);
6160 tcg_gen_helper_0_1(helper_into, tcg_const_i32(s->pc - pc_start));
6161 break;
6162 case 0xf1: /* icebp (undocumented, exits to external debugger) */
6163 if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP))
6164 break;
6165 #if 1
6166 gen_debug(s, pc_start - s->cs_base);
6167 #else
6168 /* start debug */
6169 tb_flush(cpu_single_env);
6170 cpu_set_log(CPU_LOG_INT | CPU_LOG_TB_IN_ASM);
6171 #endif
6172 break;
6173 case 0xfa: /* cli */
6174 if (!s->vm86) {
6175 if (s->cpl <= s->iopl) {
6176 tcg_gen_helper_0_0(helper_cli);
6177 } else {
6178 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6179 }
6180 } else {
6181 if (s->iopl == 3) {
6182 tcg_gen_helper_0_0(helper_cli);
6183 } else {
6184 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6185 }
6186 }
6187 break;
6188 case 0xfb: /* sti */
6189 if (!s->vm86) {
6190 if (s->cpl <= s->iopl) {
6191 gen_sti:
6192 tcg_gen_helper_0_0(helper_sti);
6193 /* interruptions are enabled only the first insn after sti */
6194 /* If several instructions disable interrupts, only the
6195 _first_ does it */
6196 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
6197 tcg_gen_helper_0_0(helper_set_inhibit_irq);
6198 /* give a chance to handle pending irqs */
6199 gen_jmp_im(s->pc - s->cs_base);
6200 gen_eob(s);
6201 } else {
6202 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6203 }
6204 } else {
6205 if (s->iopl == 3) {
6206 goto gen_sti;
6207 } else {
6208 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6209 }
6210 }
6211 break;
6212 case 0x62: /* bound */
6213 if (CODE64(s))
6214 goto illegal_op;
6215 ot = dflag ? OT_LONG : OT_WORD;
6216 modrm = ldub_code(s->pc++);
6217 reg = (modrm >> 3) & 7;
6218 mod = (modrm >> 6) & 3;
6219 if (mod == 3)
6220 goto illegal_op;
6221 gen_op_mov_TN_reg(ot, 0, reg);
6222 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6223 gen_jmp_im(pc_start - s->cs_base);
6224 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6225 if (ot == OT_WORD)
6226 tcg_gen_helper_0_2(helper_boundw, cpu_A0, cpu_tmp2_i32);
6227 else
6228 tcg_gen_helper_0_2(helper_boundl, cpu_A0, cpu_tmp2_i32);
6229 break;
6230 case 0x1c8 ... 0x1cf: /* bswap reg */
6231 reg = (b & 7) | REX_B(s);
6232 #ifdef TARGET_X86_64
6233 if (dflag == 2) {
6234 gen_op_mov_TN_reg(OT_QUAD, 0, reg);
6235 tcg_gen_bswap_i64(cpu_T[0], cpu_T[0]);
6236 gen_op_mov_reg_T0(OT_QUAD, reg);
6237 } else
6238 {
6239 TCGv tmp0;
6240 gen_op_mov_TN_reg(OT_LONG, 0, reg);
6241
6242 tmp0 = tcg_temp_new(TCG_TYPE_I32);
6243 tcg_gen_trunc_i64_i32(tmp0, cpu_T[0]);
6244 tcg_gen_bswap_i32(tmp0, tmp0);
6245 tcg_gen_extu_i32_i64(cpu_T[0], tmp0);
6246 gen_op_mov_reg_T0(OT_LONG, reg);
6247 }
6248 #else
6249 {
6250 gen_op_mov_TN_reg(OT_LONG, 0, reg);
6251 tcg_gen_bswap_i32(cpu_T[0], cpu_T[0]);
6252 gen_op_mov_reg_T0(OT_LONG, reg);
6253 }
6254 #endif
6255 break;
6256 case 0xd6: /* salc */
6257 if (CODE64(s))
6258 goto illegal_op;
6259 if (s->cc_op != CC_OP_DYNAMIC)
6260 gen_op_set_cc_op(s->cc_op);
6261 gen_compute_eflags_c(cpu_T[0]);
6262 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
6263 gen_op_mov_reg_T0(OT_BYTE, R_EAX);
6264 break;
6265 case 0xe0: /* loopnz */
6266 case 0xe1: /* loopz */
6267 case 0xe2: /* loop */
6268 case 0xe3: /* jecxz */
6269 {
6270 int l1, l2, l3;
6271
6272 tval = (int8_t)insn_get(s, OT_BYTE);
6273 next_eip = s->pc - s->cs_base;
6274 tval += next_eip;
6275 if (s->dflag == 0)
6276 tval &= 0xffff;
6277
6278 l1 = gen_new_label();
6279 l2 = gen_new_label();
6280 l3 = gen_new_label();
6281 b &= 3;
6282 switch(b) {
6283 case 0: /* loopnz */
6284 case 1: /* loopz */
6285 if (s->cc_op != CC_OP_DYNAMIC)
6286 gen_op_set_cc_op(s->cc_op);
6287 gen_op_add_reg_im(s->aflag, R_ECX, -1);
6288 gen_op_jz_ecx(s->aflag, l3);
6289 gen_compute_eflags(cpu_tmp0);
6290 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, CC_Z);
6291 if (b == 0) {
6292 tcg_gen_brcond_tl(TCG_COND_EQ,
6293 cpu_tmp0, tcg_const_tl(0), l1);
6294 } else {
6295 tcg_gen_brcond_tl(TCG_COND_NE,
6296 cpu_tmp0, tcg_const_tl(0), l1);
6297 }
6298 break;
6299 case 2: /* loop */
6300 gen_op_add_reg_im(s->aflag, R_ECX, -1);
6301 gen_op_jnz_ecx(s->aflag, l1);
6302 break;
6303 default:
6304 case 3: /* jcxz */
6305 gen_op_jz_ecx(s->aflag, l1);
6306 break;
6307 }
6308
6309 gen_set_label(l3);
6310 gen_jmp_im(next_eip);
6311 tcg_gen_br(l2);
6312
6313 gen_set_label(l1);
6314 gen_jmp_im(tval);
6315 gen_set_label(l2);
6316 gen_eob(s);
6317 }
6318 break;
6319 case 0x130: /* wrmsr */
6320 case 0x132: /* rdmsr */
6321 if (s->cpl != 0) {
6322 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6323 } else {
6324 int retval = 0;
6325 if (b & 2) {
6326 retval = gen_svm_check_intercept_param(s, pc_start, SVM_EXIT_MSR, 0);
6327 tcg_gen_helper_0_0(helper_rdmsr);
6328 } else {
6329 retval = gen_svm_check_intercept_param(s, pc_start, SVM_EXIT_MSR, 1);
6330 tcg_gen_helper_0_0(helper_wrmsr);
6331 }
6332 if(retval)
6333 gen_eob(s);
6334 }
6335 break;
6336 case 0x131: /* rdtsc */
6337 if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_RDTSC))
6338 break;
6339 gen_jmp_im(pc_start - s->cs_base);
6340 tcg_gen_helper_0_0(helper_rdtsc);
6341 break;
6342 case 0x133: /* rdpmc */
6343 gen_jmp_im(pc_start - s->cs_base);
6344 tcg_gen_helper_0_0(helper_rdpmc);
6345 break;
6346 case 0x134: /* sysenter */
6347 if (CODE64(s))
6348 goto illegal_op;
6349 if (!s->pe) {
6350 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6351 } else {
6352 if (s->cc_op != CC_OP_DYNAMIC) {
6353 gen_op_set_cc_op(s->cc_op);
6354 s->cc_op = CC_OP_DYNAMIC;
6355 }
6356 gen_jmp_im(pc_start - s->cs_base);
6357 tcg_gen_helper_0_0(helper_sysenter);
6358 gen_eob(s);
6359 }
6360 break;
6361 case 0x135: /* sysexit */
6362 if (CODE64(s))
6363 goto illegal_op;
6364 if (!s->pe) {
6365 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6366 } else {
6367 if (s->cc_op != CC_OP_DYNAMIC) {
6368 gen_op_set_cc_op(s->cc_op);
6369 s->cc_op = CC_OP_DYNAMIC;
6370 }
6371 gen_jmp_im(pc_start - s->cs_base);
6372 tcg_gen_helper_0_0(helper_sysexit);
6373 gen_eob(s);
6374 }
6375 break;
6376 #ifdef TARGET_X86_64
6377 case 0x105: /* syscall */
6378 /* XXX: is it usable in real mode ? */
6379 if (s->cc_op != CC_OP_DYNAMIC) {
6380 gen_op_set_cc_op(s->cc_op);
6381 s->cc_op = CC_OP_DYNAMIC;
6382 }
6383 gen_jmp_im(pc_start - s->cs_base);
6384 tcg_gen_helper_0_1(helper_syscall, tcg_const_i32(s->pc - pc_start));
6385 gen_eob(s);
6386 break;
6387 case 0x107: /* sysret */
6388 if (!s->pe) {
6389 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6390 } else {
6391 if (s->cc_op != CC_OP_DYNAMIC) {
6392 gen_op_set_cc_op(s->cc_op);
6393 s->cc_op = CC_OP_DYNAMIC;
6394 }
6395 gen_jmp_im(pc_start - s->cs_base);
6396 tcg_gen_helper_0_1(helper_sysret, tcg_const_i32(s->dflag));
6397 /* condition codes are modified only in long mode */
6398 if (s->lma)
6399 s->cc_op = CC_OP_EFLAGS;
6400 gen_eob(s);
6401 }
6402 break;
6403 #endif
6404 case 0x1a2: /* cpuid */
6405 if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_CPUID))
6406 break;
6407 tcg_gen_helper_0_0(helper_cpuid);
6408 break;
6409 case 0xf4: /* hlt */
6410 if (s->cpl != 0) {
6411 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6412 } else {
6413 if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_HLT))
6414 break;
6415 if (s->cc_op != CC_OP_DYNAMIC)
6416 gen_op_set_cc_op(s->cc_op);
6417 gen_jmp_im(s->pc - s->cs_base);
6418 tcg_gen_helper_0_0(helper_hlt);
6419 s->is_jmp = 3;
6420 }
6421 break;
6422 case 0x100:
6423 modrm = ldub_code(s->pc++);
6424 mod = (modrm >> 6) & 3;
6425 op = (modrm >> 3) & 7;
6426 switch(op) {
6427 case 0: /* sldt */
6428 if (!s->pe || s->vm86)
6429 goto illegal_op;
6430 if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ))
6431 break;
6432 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,ldt.selector));
6433 ot = OT_WORD;
6434 if (mod == 3)
6435 ot += s->dflag;
6436 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1);
6437 break;
6438 case 2: /* lldt */
6439 if (!s->pe || s->vm86)
6440 goto illegal_op;
6441 if (s->cpl != 0) {
6442 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6443 } else {
6444 if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE))
6445 break;
6446 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
6447 gen_jmp_im(pc_start - s->cs_base);
6448 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6449 tcg_gen_helper_0_1(helper_lldt, cpu_tmp2_i32);
6450 }
6451 break;
6452 case 1: /* str */
6453 if (!s->pe || s->vm86)
6454 goto illegal_op;
6455 if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ))
6456 break;
6457 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,tr.selector));
6458 ot = OT_WORD;
6459 if (mod == 3)
6460 ot += s->dflag;
6461 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1);
6462 break;
6463 case 3: /* ltr */
6464 if (!s->pe || s->vm86)
6465 goto illegal_op;
6466 if (s->cpl != 0) {
6467 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6468 } else {
6469 if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE))
6470 break;
6471 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
6472 gen_jmp_im(pc_start - s->cs_base);
6473 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6474 tcg_gen_helper_0_1(helper_ltr, cpu_tmp2_i32);
6475 }
6476 break;
6477 case 4: /* verr */
6478 case 5: /* verw */
6479 if (!s->pe || s->vm86)
6480 goto illegal_op;
6481 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
6482 if (s->cc_op != CC_OP_DYNAMIC)
6483 gen_op_set_cc_op(s->cc_op);
6484 if (op == 4)
6485 tcg_gen_helper_0_1(helper_verr, cpu_T[0]);
6486 else
6487 tcg_gen_helper_0_1(helper_verw, cpu_T[0]);
6488 s->cc_op = CC_OP_EFLAGS;
6489 break;
6490 default:
6491 goto illegal_op;
6492 }
6493 break;
6494 case 0x101:
6495 modrm = ldub_code(s->pc++);
6496 mod = (modrm >> 6) & 3;
6497 op = (modrm >> 3) & 7;
6498 rm = modrm & 7;
6499 switch(op) {
6500 case 0: /* sgdt */
6501 if (mod == 3)
6502 goto illegal_op;
6503 if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ))
6504 break;
6505 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6506 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.limit));
6507 gen_op_st_T0_A0(OT_WORD + s->mem_index);
6508 gen_add_A0_im(s, 2);
6509 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.base));
6510 if (!s->dflag)
6511 gen_op_andl_T0_im(0xffffff);
6512 gen_op_st_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
6513 break;
6514 case 1:
6515 if (mod == 3) {
6516 switch (rm) {
6517 case 0: /* monitor */
6518 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
6519 s->cpl != 0)
6520 goto illegal_op;
6521 if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_MONITOR))
6522 break;
6523 gen_jmp_im(pc_start - s->cs_base);
6524 #ifdef TARGET_X86_64
6525 if (s->aflag == 2) {
6526 gen_op_movq_A0_reg(R_EAX);
6527 } else
6528 #endif
6529 {
6530 gen_op_movl_A0_reg(R_EAX);
6531 if (s->aflag == 0)
6532 gen_op_andl_A0_ffff();
6533 }
6534 gen_add_A0_ds_seg(s);
6535 tcg_gen_helper_0_1(helper_monitor, cpu_A0);
6536 break;
6537 case 1: /* mwait */
6538 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
6539 s->cpl != 0)
6540 goto illegal_op;
6541 if (s->cc_op != CC_OP_DYNAMIC) {
6542 gen_op_set_cc_op(s->cc_op);
6543 s->cc_op = CC_OP_DYNAMIC;
6544 }
6545 if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_MWAIT))
6546 break;
6547 gen_jmp_im(s->pc - s->cs_base);
6548 tcg_gen_helper_0_0(helper_mwait);
6549 gen_eob(s);
6550 break;
6551 default:
6552 goto illegal_op;
6553 }
6554 } else { /* sidt */
6555 if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ))
6556 break;
6557 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6558 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.limit));
6559 gen_op_st_T0_A0(OT_WORD + s->mem_index);
6560 gen_add_A0_im(s, 2);
6561 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.base));
6562 if (!s->dflag)
6563 gen_op_andl_T0_im(0xffffff);
6564 gen_op_st_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
6565 }
6566 break;
6567 case 2: /* lgdt */
6568 case 3: /* lidt */
6569 if (mod == 3) {
6570 switch(rm) {
6571 case 0: /* VMRUN */
6572 if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_VMRUN))
6573 break;
6574 if (s->cc_op != CC_OP_DYNAMIC)
6575 gen_op_set_cc_op(s->cc_op);
6576 gen_jmp_im(s->pc - s->cs_base);
6577 tcg_gen_helper_0_0(helper_vmrun);
6578 s->cc_op = CC_OP_EFLAGS;
6579 gen_eob(s);
6580 break;
6581 case 1: /* VMMCALL */
6582 if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_VMMCALL))
6583 break;
6584 /* FIXME: cause #UD if hflags & SVM */
6585 tcg_gen_helper_0_0(helper_vmmcall);
6586 break;
6587 case 2: /* VMLOAD */
6588 if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_VMLOAD))
6589 break;
6590 tcg_gen_helper_0_0(helper_vmload);
6591 break;
6592 case 3: /* VMSAVE */
6593 if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_VMSAVE))
6594 break;
6595 tcg_gen_helper_0_0(helper_vmsave);
6596 break;
6597 case 4: /* STGI */
6598 if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_STGI))
6599 break;
6600 tcg_gen_helper_0_0(helper_stgi);
6601 break;
6602 case 5: /* CLGI */
6603 if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_CLGI))
6604 break;
6605 tcg_gen_helper_0_0(helper_clgi);
6606 break;
6607 case 6: /* SKINIT */
6608 if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_SKINIT))
6609 break;
6610 tcg_gen_helper_0_0(helper_skinit);
6611 break;
6612 case 7: /* INVLPGA */
6613 if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_INVLPGA))
6614 break;
6615 tcg_gen_helper_0_0(helper_invlpga);
6616 break;
6617 default:
6618 goto illegal_op;
6619 }
6620 } else if (s->cpl != 0) {
6621 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6622 } else {
6623 if (gen_svm_check_intercept(s, pc_start,
6624 op==2 ? SVM_EXIT_GDTR_WRITE : SVM_EXIT_IDTR_WRITE))
6625 break;
6626 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6627 gen_op_ld_T1_A0(OT_WORD + s->mem_index);
6628 gen_add_A0_im(s, 2);
6629 gen_op_ld_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
6630 if (!s->dflag)
6631 gen_op_andl_T0_im(0xffffff);
6632 if (op == 2) {
6633 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,gdt.base));
6634 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,gdt.limit));
6635 } else {
6636 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,idt.base));
6637 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,idt.limit));
6638 }
6639 }
6640 break;
6641 case 4: /* smsw */
6642 if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0))
6643 break;
6644 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]));
6645 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 1);
6646 break;
6647 case 6: /* lmsw */
6648 if (s->cpl != 0) {
6649 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6650 } else {
6651 if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0))
6652 break;
6653 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
6654 tcg_gen_helper_0_1(helper_lmsw, cpu_T[0]);
6655 gen_jmp_im(s->pc - s->cs_base);
6656 gen_eob(s);
6657 }
6658 break;
6659 case 7: /* invlpg */
6660 if (s->cpl != 0) {
6661 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6662 } else {
6663 if (mod == 3) {
6664 #ifdef TARGET_X86_64
6665 if (CODE64(s) && rm == 0) {
6666 /* swapgs */
6667 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,segs[R_GS].base));
6668 tcg_gen_ld_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,kernelgsbase));
6669 tcg_gen_st_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,segs[R_GS].base));
6670 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,kernelgsbase));
6671 } else
6672 #endif
6673 {
6674 goto illegal_op;
6675 }
6676 } else {
6677 if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_INVLPG))
6678 break;
6679 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6680 tcg_gen_helper_0_1(helper_invlpg, cpu_A0);
6681 gen_jmp_im(s->pc - s->cs_base);
6682 gen_eob(s);
6683 }
6684 }
6685 break;
6686 default:
6687 goto illegal_op;
6688 }
6689 break;
6690 case 0x108: /* invd */
6691 case 0x109: /* wbinvd */
6692 if (s->cpl != 0) {
6693 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6694 } else {
6695 if (gen_svm_check_intercept(s, pc_start, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD))
6696 break;
6697 /* nothing to do */
6698 }
6699 break;
6700 case 0x63: /* arpl or movslS (x86_64) */
6701 #ifdef TARGET_X86_64
6702 if (CODE64(s)) {
6703 int d_ot;
6704 /* d_ot is the size of destination */
6705 d_ot = dflag + OT_WORD;
6706
6707 modrm = ldub_code(s->pc++);
6708 reg = ((modrm >> 3) & 7) | rex_r;
6709 mod = (modrm >> 6) & 3;
6710 rm = (modrm & 7) | REX_B(s);
6711
6712 if (mod == 3) {
6713 gen_op_mov_TN_reg(OT_LONG, 0, rm);
6714 /* sign extend */
6715 if (d_ot == OT_QUAD)
6716 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
6717 gen_op_mov_reg_T0(d_ot, reg);
6718 } else {
6719 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6720 if (d_ot == OT_QUAD) {
6721 gen_op_lds_T0_A0(OT_LONG + s->mem_index);
6722 } else {
6723 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
6724 }
6725 gen_op_mov_reg_T0(d_ot, reg);
6726 }
6727 } else
6728 #endif
6729 {
6730 int label1;
6731 if (!s->pe || s->vm86)
6732 goto illegal_op;
6733 ot = OT_WORD;
6734 modrm = ldub_code(s->pc++);
6735 reg = (modrm >> 3) & 7;
6736 mod = (modrm >> 6) & 3;
6737 rm = modrm & 7;
6738 if (mod != 3) {
6739 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6740 gen_op_ld_T0_A0(ot + s->mem_index);
6741 } else {
6742 gen_op_mov_TN_reg(ot, 0, rm);
6743 }
6744 gen_op_mov_TN_reg(ot, 1, reg);
6745 tcg_gen_andi_tl(cpu_tmp0, cpu_T[0], 3);
6746 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], 3);
6747 tcg_gen_movi_tl(cpu_T3, 0);
6748 label1 = gen_new_label();
6749 tcg_gen_brcond_tl(TCG_COND_GE, cpu_tmp0, cpu_T[1], label1);
6750 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], ~3);
6751 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
6752 tcg_gen_movi_tl(cpu_T3, CC_Z);
6753 gen_set_label(label1);
6754 if (mod != 3) {
6755 gen_op_st_T0_A0(ot + s->mem_index);
6756 } else {
6757 gen_op_mov_reg_T0(ot, rm);
6758 }
6759 if (s->cc_op != CC_OP_DYNAMIC)
6760 gen_op_set_cc_op(s->cc_op);
6761 gen_compute_eflags(cpu_cc_src);
6762 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z);
6763 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_T3);
6764 s->cc_op = CC_OP_EFLAGS;
6765 }
6766 break;
6767 case 0x102: /* lar */
6768 case 0x103: /* lsl */
6769 {
6770 int label1;
6771 if (!s->pe || s->vm86)
6772 goto illegal_op;
6773 ot = dflag ? OT_LONG : OT_WORD;
6774 modrm = ldub_code(s->pc++);
6775 reg = ((modrm >> 3) & 7) | rex_r;
6776 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
6777 if (s->cc_op != CC_OP_DYNAMIC)
6778 gen_op_set_cc_op(s->cc_op);
6779 if (b == 0x102)
6780 tcg_gen_helper_1_1(helper_lar, cpu_T[0], cpu_T[0]);
6781 else
6782 tcg_gen_helper_1_1(helper_lsl, cpu_T[0], cpu_T[0]);
6783 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_src, CC_Z);
6784 label1 = gen_new_label();
6785 tcg_gen_brcond_tl(TCG_COND_EQ, cpu_tmp0, tcg_const_tl(0), label1);
6786 gen_op_mov_reg_T0(ot, reg);
6787 gen_set_label(label1);
6788 s->cc_op = CC_OP_EFLAGS;
6789 }
6790 break;
6791 case 0x118:
6792 modrm = ldub_code(s->pc++);
6793 mod = (modrm >> 6) & 3;
6794 op = (modrm >> 3) & 7;
6795 switch(op) {
6796 case 0: /* prefetchnta */
6797 case 1: /* prefetchnt0 */
6798 case 2: /* prefetchnt0 */
6799 case 3: /* prefetchnt0 */
6800 if (mod == 3)
6801 goto illegal_op;
6802 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6803 /* nothing more to do */
6804 break;
6805 default: /* nop (multi byte) */
6806 gen_nop_modrm(s, modrm);
6807 break;
6808 }
6809 break;
6810 case 0x119 ... 0x11f: /* nop (multi byte) */
6811 modrm = ldub_code(s->pc++);
6812 gen_nop_modrm(s, modrm);
6813 break;
6814 case 0x120: /* mov reg, crN */
6815 case 0x122: /* mov crN, reg */
6816 if (s->cpl != 0) {
6817 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6818 } else {
6819 modrm = ldub_code(s->pc++);
6820 if ((modrm & 0xc0) != 0xc0)
6821 goto illegal_op;
6822 rm = (modrm & 7) | REX_B(s);
6823 reg = ((modrm >> 3) & 7) | rex_r;
6824 if (CODE64(s))
6825 ot = OT_QUAD;
6826 else
6827 ot = OT_LONG;
6828 switch(reg) {
6829 case 0:
6830 case 2:
6831 case 3:
6832 case 4:
6833 case 8:
6834 if (b & 2) {
6835 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0 + reg);
6836 gen_op_mov_TN_reg(ot, 0, rm);
6837 tcg_gen_helper_0_2(helper_movl_crN_T0,
6838 tcg_const_i32(reg), cpu_T[0]);
6839 gen_jmp_im(s->pc - s->cs_base);
6840 gen_eob(s);
6841 } else {
6842 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0 + reg);
6843 #if !defined(CONFIG_USER_ONLY)
6844 if (reg == 8)
6845 tcg_gen_helper_1_0(helper_movtl_T0_cr8, cpu_T[0]);
6846 else
6847 #endif
6848 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[reg]));
6849 gen_op_mov_reg_T0(ot, rm);
6850 }
6851 break;
6852 default:
6853 goto illegal_op;
6854 }
6855 }
6856 break;
6857 case 0x121: /* mov reg, drN */
6858 case 0x123: /* mov drN, reg */
6859 if (s->cpl != 0) {
6860 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6861 } else {
6862 modrm = ldub_code(s->pc++);
6863 if ((modrm & 0xc0) != 0xc0)
6864 goto illegal_op;
6865 rm = (modrm & 7) | REX_B(s);
6866 reg = ((modrm >> 3) & 7) | rex_r;
6867 if (CODE64(s))
6868 ot = OT_QUAD;
6869 else
6870 ot = OT_LONG;
6871 /* XXX: do it dynamically with CR4.DE bit */
6872 if (reg == 4 || reg == 5 || reg >= 8)
6873 goto illegal_op;
6874 if (b & 2) {
6875 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg);
6876 gen_op_mov_TN_reg(ot, 0, rm);
6877 tcg_gen_helper_0_2(helper_movl_drN_T0,
6878 tcg_const_i32(reg), cpu_T[0]);
6879 gen_jmp_im(s->pc - s->cs_base);
6880 gen_eob(s);
6881 } else {
6882 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg);
6883 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,dr[reg]));
6884 gen_op_mov_reg_T0(ot, rm);
6885 }
6886 }
6887 break;
6888 case 0x106: /* clts */
6889 if (s->cpl != 0) {
6890 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6891 } else {
6892 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
6893 tcg_gen_helper_0_0(helper_clts);
6894 /* abort block because static cpu state changed */
6895 gen_jmp_im(s->pc - s->cs_base);
6896 gen_eob(s);
6897 }
6898 break;
6899 /* MMX/3DNow!/SSE/SSE2/SSE3 support */
6900 case 0x1c3: /* MOVNTI reg, mem */
6901 if (!(s->cpuid_features & CPUID_SSE2))
6902 goto illegal_op;
6903 ot = s->dflag == 2 ? OT_QUAD : OT_LONG;
6904 modrm = ldub_code(s->pc++);
6905 mod = (modrm >> 6) & 3;
6906 if (mod == 3)
6907 goto illegal_op;
6908 reg = ((modrm >> 3) & 7) | rex_r;
6909 /* generate a generic store */
6910 gen_ldst_modrm(s, modrm, ot, reg, 1);
6911 break;
6912 case 0x1ae:
6913 modrm = ldub_code(s->pc++);
6914 mod = (modrm >> 6) & 3;
6915 op = (modrm >> 3) & 7;
6916 switch(op) {
6917 case 0: /* fxsave */
6918 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
6919 (s->flags & HF_EM_MASK))
6920 goto illegal_op;
6921 if (s->flags & HF_TS_MASK) {
6922 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
6923 break;
6924 }
6925 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6926 if (s->cc_op != CC_OP_DYNAMIC)
6927 gen_op_set_cc_op(s->cc_op);
6928 gen_jmp_im(pc_start - s->cs_base);
6929 tcg_gen_helper_0_2(helper_fxsave,
6930 cpu_A0, tcg_const_i32((s->dflag == 2)));
6931 break;
6932 case 1: /* fxrstor */
6933 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
6934 (s->flags & HF_EM_MASK))
6935 goto illegal_op;
6936 if (s->flags & HF_TS_MASK) {
6937 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
6938 break;
6939 }
6940 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6941 if (s->cc_op != CC_OP_DYNAMIC)
6942 gen_op_set_cc_op(s->cc_op);
6943 gen_jmp_im(pc_start - s->cs_base);
6944 tcg_gen_helper_0_2(helper_fxrstor,
6945 cpu_A0, tcg_const_i32((s->dflag == 2)));
6946 break;
6947 case 2: /* ldmxcsr */
6948 case 3: /* stmxcsr */
6949 if (s->flags & HF_TS_MASK) {
6950 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
6951 break;
6952 }
6953 if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK) ||
6954 mod == 3)
6955 goto illegal_op;
6956 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6957 if (op == 2) {
6958 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
6959 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, mxcsr));
6960 } else {
6961 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, mxcsr));
6962 gen_op_st_T0_A0(OT_LONG + s->mem_index);
6963 }
6964 break;
6965 case 5: /* lfence */
6966 case 6: /* mfence */
6967 if ((modrm & 0xc7) != 0xc0 || !(s->cpuid_features & CPUID_SSE))
6968 goto illegal_op;
6969 break;
6970 case 7: /* sfence / clflush */
6971 if ((modrm & 0xc7) == 0xc0) {
6972 /* sfence */
6973 /* XXX: also check for cpuid_ext2_features & CPUID_EXT2_EMMX */
6974 if (!(s->cpuid_features & CPUID_SSE))
6975 goto illegal_op;
6976 } else {
6977 /* clflush */
6978 if (!(s->cpuid_features & CPUID_CLFLUSH))
6979 goto illegal_op;
6980 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6981 }
6982 break;
6983 default:
6984 goto illegal_op;
6985 }
6986 break;
6987 case 0x10d: /* 3DNow! prefetch(w) */
6988 modrm = ldub_code(s->pc++);
6989 mod = (modrm >> 6) & 3;
6990 if (mod == 3)
6991 goto illegal_op;
6992 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6993 /* ignore for now */
6994 break;
6995 case 0x1aa: /* rsm */
6996 if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_RSM))
6997 break;
6998 if (!(s->flags & HF_SMM_MASK))
6999 goto illegal_op;
7000 if (s->cc_op != CC_OP_DYNAMIC) {
7001 gen_op_set_cc_op(s->cc_op);
7002 s->cc_op = CC_OP_DYNAMIC;
7003 }
7004 gen_jmp_im(s->pc - s->cs_base);
7005 tcg_gen_helper_0_0(helper_rsm);
7006 gen_eob(s);
7007 break;
7008 case 0x10e ... 0x10f:
7009 /* 3DNow! instructions, ignore prefixes */
7010 s->prefix &= ~(PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA);
7011 case 0x110 ... 0x117:
7012 case 0x128 ... 0x12f:
7013 case 0x150 ... 0x177:
7014 case 0x17c ... 0x17f:
7015 case 0x1c2:
7016 case 0x1c4 ... 0x1c6:
7017 case 0x1d0 ... 0x1fe:
7018 gen_sse(s, b, pc_start, rex_r);
7019 break;
7020 default:
7021 goto illegal_op;
7022 }
7023 /* lock generation */
7024 if (s->prefix & PREFIX_LOCK)
7025 tcg_gen_helper_0_0(helper_unlock);
7026 return s->pc;
7027 illegal_op:
7028 if (s->prefix & PREFIX_LOCK)
7029 tcg_gen_helper_0_0(helper_unlock);
7030 /* XXX: ensure that no lock was generated */
7031 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
7032 return s->pc;
7033 }
7034
7035 static void tcg_macro_func(TCGContext *s, int macro_id, const int *dead_args)
7036 {
7037 switch(macro_id) {
7038 #ifdef MACRO_TEST
7039 case MACRO_TEST:
7040 tcg_gen_helper_0_1(helper_divl_EAX_T0, cpu_T[0]);
7041 break;
7042 #endif
7043 }
7044 }
7045
7046 void optimize_flags_init(void)
7047 {
7048 #if TCG_TARGET_REG_BITS == 32
7049 assert(sizeof(CCTable) == (1 << 3));
7050 #else
7051 assert(sizeof(CCTable) == (1 << 4));
7052 #endif
7053 tcg_set_macro_func(&tcg_ctx, tcg_macro_func);
7054
7055 cpu_env = tcg_global_reg_new(TCG_TYPE_PTR, TCG_AREG0, "env");
7056 #if TARGET_LONG_BITS > HOST_LONG_BITS
7057 cpu_T[0] = tcg_global_mem_new(TCG_TYPE_TL,
7058 TCG_AREG0, offsetof(CPUState, t0), "T0");
7059 cpu_T[1] = tcg_global_mem_new(TCG_TYPE_TL,
7060 TCG_AREG0, offsetof(CPUState, t1), "T1");
7061 cpu_A0 = tcg_global_mem_new(TCG_TYPE_TL,
7062 TCG_AREG0, offsetof(CPUState, t2), "A0");
7063 #else
7064 cpu_T[0] = tcg_global_reg_new(TCG_TYPE_TL, TCG_AREG1, "T0");
7065 cpu_T[1] = tcg_global_reg_new(TCG_TYPE_TL, TCG_AREG2, "T1");
7066 cpu_A0 = tcg_global_reg_new(TCG_TYPE_TL, TCG_AREG3, "A0");
7067 #endif
7068 cpu_T3 = tcg_global_mem_new(TCG_TYPE_TL,
7069 TCG_AREG0, offsetof(CPUState, t3), "T3");
7070 #if defined(__i386__) && (TARGET_LONG_BITS <= HOST_LONG_BITS)
7071 /* XXX: must be suppressed once there are less fixed registers */
7072 cpu_tmp1_i64 = tcg_global_reg2_new_hack(TCG_TYPE_I64, TCG_AREG1, TCG_AREG2, "tmp1");
7073 #endif
7074 cpu_cc_op = tcg_global_mem_new(TCG_TYPE_I32,
7075 TCG_AREG0, offsetof(CPUState, cc_op), "cc_op");
7076 cpu_cc_src = tcg_global_mem_new(TCG_TYPE_TL,
7077 TCG_AREG0, offsetof(CPUState, cc_src), "cc_src");
7078 cpu_cc_dst = tcg_global_mem_new(TCG_TYPE_TL,
7079 TCG_AREG0, offsetof(CPUState, cc_dst), "cc_dst");
7080
7081 /* register helpers */
7082
7083 #define DEF_HELPER(ret, name, params) tcg_register_helper(name, #name);
7084 #include "helper.h"
7085 }
7086
7087 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
7088 basic block 'tb'. If search_pc is TRUE, also generate PC
7089 information for each intermediate instruction. */
7090 static inline int gen_intermediate_code_internal(CPUState *env,
7091 TranslationBlock *tb,
7092 int search_pc)
7093 {
7094 DisasContext dc1, *dc = &dc1;
7095 target_ulong pc_ptr;
7096 uint16_t *gen_opc_end;
7097 int j, lj, cflags;
7098 uint64_t flags;
7099 target_ulong pc_start;
7100 target_ulong cs_base;
7101
7102 /* generate intermediate code */
7103 pc_start = tb->pc;
7104 cs_base = tb->cs_base;
7105 flags = tb->flags;
7106 cflags = tb->cflags;
7107
7108 dc->pe = (flags >> HF_PE_SHIFT) & 1;
7109 dc->code32 = (flags >> HF_CS32_SHIFT) & 1;
7110 dc->ss32 = (flags >> HF_SS32_SHIFT) & 1;
7111 dc->addseg = (flags >> HF_ADDSEG_SHIFT) & 1;
7112 dc->f_st = 0;
7113 dc->vm86 = (flags >> VM_SHIFT) & 1;
7114 dc->cpl = (flags >> HF_CPL_SHIFT) & 3;
7115 dc->iopl = (flags >> IOPL_SHIFT) & 3;
7116 dc->tf = (flags >> TF_SHIFT) & 1;
7117 dc->singlestep_enabled = env->singlestep_enabled;
7118 dc->cc_op = CC_OP_DYNAMIC;
7119 dc->cs_base = cs_base;
7120 dc->tb = tb;
7121 dc->popl_esp_hack = 0;
7122 /* select memory access functions */
7123 dc->mem_index = 0;
7124 if (flags & HF_SOFTMMU_MASK) {
7125 if (dc->cpl == 3)
7126 dc->mem_index = 2 * 4;
7127 else
7128 dc->mem_index = 1 * 4;
7129 }
7130 dc->cpuid_features = env->cpuid_features;
7131 dc->cpuid_ext_features = env->cpuid_ext_features;
7132 dc->cpuid_ext2_features = env->cpuid_ext2_features;
7133 dc->cpuid_ext3_features = env->cpuid_ext3_features;
7134 #ifdef TARGET_X86_64
7135 dc->lma = (flags >> HF_LMA_SHIFT) & 1;
7136 dc->code64 = (flags >> HF_CS64_SHIFT) & 1;
7137 #endif
7138 dc->flags = flags;
7139 dc->jmp_opt = !(dc->tf || env->singlestep_enabled ||
7140 (flags & HF_INHIBIT_IRQ_MASK)
7141 #ifndef CONFIG_SOFTMMU
7142 || (flags & HF_SOFTMMU_MASK)
7143 #endif
7144 );
7145 #if 0
7146 /* check addseg logic */
7147 if (!dc->addseg && (dc->vm86 || !dc->pe || !dc->code32))
7148 printf("ERROR addseg\n");
7149 #endif
7150
7151 cpu_tmp0 = tcg_temp_new(TCG_TYPE_TL);
7152 #if !(defined(__i386__) && (TARGET_LONG_BITS <= HOST_LONG_BITS))
7153 cpu_tmp1_i64 = tcg_temp_new(TCG_TYPE_I64);
7154 #endif
7155 cpu_tmp2_i32 = tcg_temp_new(TCG_TYPE_I32);
7156 cpu_tmp3_i32 = tcg_temp_new(TCG_TYPE_I32);
7157 cpu_tmp4 = tcg_temp_new(TCG_TYPE_TL);
7158 cpu_tmp5 = tcg_temp_new(TCG_TYPE_TL);
7159 cpu_tmp6 = tcg_temp_new(TCG_TYPE_TL);
7160 cpu_ptr0 = tcg_temp_new(TCG_TYPE_PTR);
7161 cpu_ptr1 = tcg_temp_new(TCG_TYPE_PTR);
7162
7163 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
7164
7165 dc->is_jmp = DISAS_NEXT;
7166 pc_ptr = pc_start;
7167 lj = -1;
7168
7169 for(;;) {
7170 if (env->nb_breakpoints > 0) {
7171 for(j = 0; j < env->nb_breakpoints; j++) {
7172 if (env->breakpoints[j] == pc_ptr) {
7173 gen_debug(dc, pc_ptr - dc->cs_base);
7174 break;
7175 }
7176 }
7177 }
7178 if (search_pc) {
7179 j = gen_opc_ptr - gen_opc_buf;
7180 if (lj < j) {
7181 lj++;
7182 while (lj < j)
7183 gen_opc_instr_start[lj++] = 0;
7184 }
7185 gen_opc_pc[lj] = pc_ptr;
7186 gen_opc_cc_op[lj] = dc->cc_op;
7187 gen_opc_instr_start[lj] = 1;
7188 }
7189 pc_ptr = disas_insn(dc, pc_ptr);
7190 /* stop translation if indicated */
7191 if (dc->is_jmp)
7192 break;
7193 /* if single step mode, we generate only one instruction and
7194 generate an exception */
7195 /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
7196 the flag and abort the translation to give the irqs a
7197 change to be happen */
7198 if (dc->tf || dc->singlestep_enabled ||
7199 (flags & HF_INHIBIT_IRQ_MASK) ||
7200 (cflags & CF_SINGLE_INSN)) {
7201 gen_jmp_im(pc_ptr - dc->cs_base);
7202 gen_eob(dc);
7203 break;
7204 }
7205 /* if too long translation, stop generation too */
7206 if (gen_opc_ptr >= gen_opc_end ||
7207 (pc_ptr - pc_start) >= (TARGET_PAGE_SIZE - 32)) {
7208 gen_jmp_im(pc_ptr - dc->cs_base);
7209 gen_eob(dc);
7210 break;
7211 }
7212 }
7213 *gen_opc_ptr = INDEX_op_end;
7214 /* we don't forget to fill the last values */
7215 if (search_pc) {
7216 j = gen_opc_ptr - gen_opc_buf;
7217 lj++;
7218 while (lj <= j)
7219 gen_opc_instr_start[lj++] = 0;
7220 }
7221
7222 #ifdef DEBUG_DISAS
7223 if (loglevel & CPU_LOG_TB_CPU) {
7224 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
7225 }
7226 if (loglevel & CPU_LOG_TB_IN_ASM) {
7227 int disas_flags;
7228 fprintf(logfile, "----------------\n");
7229 fprintf(logfile, "IN: %s\n", lookup_symbol(pc_start));
7230 #ifdef TARGET_X86_64
7231 if (dc->code64)
7232 disas_flags = 2;
7233 else
7234 #endif
7235 disas_flags = !dc->code32;
7236 target_disas(logfile, pc_start, pc_ptr - pc_start, disas_flags);
7237 fprintf(logfile, "\n");
7238 }
7239 #endif
7240
7241 if (!search_pc)
7242 tb->size = pc_ptr - pc_start;
7243 return 0;
7244 }
7245
7246 int gen_intermediate_code(CPUState *env, TranslationBlock *tb)
7247 {
7248 return gen_intermediate_code_internal(env, tb, 0);
7249 }
7250
7251 int gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
7252 {
7253 return gen_intermediate_code_internal(env, tb, 1);
7254 }
7255
7256 void gen_pc_load(CPUState *env, TranslationBlock *tb,
7257 unsigned long searched_pc, int pc_pos, void *puc)
7258 {
7259 int cc_op;
7260 #ifdef DEBUG_DISAS
7261 if (loglevel & CPU_LOG_TB_OP) {
7262 int i;
7263 fprintf(logfile, "RESTORE:\n");
7264 for(i = 0;i <= pc_pos; i++) {
7265 if (gen_opc_instr_start[i]) {
7266 fprintf(logfile, "0x%04x: " TARGET_FMT_lx "\n", i, gen_opc_pc[i]);
7267 }
7268 }
7269 fprintf(logfile, "spc=0x%08lx pc_pos=0x%x eip=" TARGET_FMT_lx " cs_base=%x\n",
7270 searched_pc, pc_pos, gen_opc_pc[pc_pos] - tb->cs_base,
7271 (uint32_t)tb->cs_base);
7272 }
7273 #endif
7274 env->eip = gen_opc_pc[pc_pos] - tb->cs_base;
7275 cc_op = gen_opc_cc_op[pc_pos];
7276 if (cc_op != CC_OP_DYNAMIC)
7277 env->cc_op = cc_op;
7278 }