2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 static const char * const tcg_target_reg_names
[TCG_TARGET_NB_REGS
] = {
38 static const int tcg_target_reg_alloc_order
[] = {
48 static const int tcg_target_call_iarg_regs
[3] = { TCG_REG_EAX
, TCG_REG_EDX
, TCG_REG_ECX
};
49 static const int tcg_target_call_oarg_regs
[2] = { TCG_REG_EAX
, TCG_REG_EDX
};
51 static uint8_t *tb_ret_addr
;
53 static void patch_reloc(uint8_t *code_ptr
, int type
,
54 tcg_target_long value
, tcg_target_long addend
)
59 *(uint32_t *)code_ptr
= value
;
62 *(uint32_t *)code_ptr
= value
- (long)code_ptr
;
65 value
-= (long)code_ptr
;
66 if (value
!= (int8_t)value
) {
69 *(uint8_t *)code_ptr
= value
;
76 /* maximum number of register used for input function arguments */
77 static inline int tcg_target_get_call_iarg_regs_count(int flags
)
79 flags
&= TCG_CALL_TYPE_MASK
;
81 case TCG_CALL_TYPE_STD
:
83 case TCG_CALL_TYPE_REGPARM_1
:
84 case TCG_CALL_TYPE_REGPARM_2
:
85 case TCG_CALL_TYPE_REGPARM
:
86 return flags
- TCG_CALL_TYPE_REGPARM_1
+ 1;
92 /* parse target specific constraints */
93 static int target_parse_constraint(TCGArgConstraint
*ct
, const char **pct_str
)
100 ct
->ct
|= TCG_CT_REG
;
101 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_EAX
);
104 ct
->ct
|= TCG_CT_REG
;
105 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_EBX
);
108 ct
->ct
|= TCG_CT_REG
;
109 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_ECX
);
112 ct
->ct
|= TCG_CT_REG
;
113 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_EDX
);
116 ct
->ct
|= TCG_CT_REG
;
117 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_ESI
);
120 ct
->ct
|= TCG_CT_REG
;
121 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_EDI
);
124 ct
->ct
|= TCG_CT_REG
;
125 tcg_regset_set32(ct
->u
.regs
, 0, 0xf);
128 ct
->ct
|= TCG_CT_REG
;
129 tcg_regset_set32(ct
->u
.regs
, 0, 0xff);
132 /* qemu_ld/st address constraint */
134 ct
->ct
|= TCG_CT_REG
;
135 tcg_regset_set32(ct
->u
.regs
, 0, 0xff);
136 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_EAX
);
137 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_EDX
);
147 /* test if a constant matches the constraint */
148 static inline int tcg_target_const_match(tcg_target_long val
,
149 const TCGArgConstraint
*arg_ct
)
153 if (ct
& TCG_CT_CONST
)
159 #define P_EXT 0x100 /* 0x0f opcode prefix */
161 #define OPC_ARITH_EvIz (0x81)
162 #define OPC_ARITH_EvIb (0x83)
163 #define OPC_ARITH_GvEv (0x03) /* ... plus (ARITH_FOO << 3) */
164 #define OPC_ADD_GvEv (OPC_ARITH_GvEv | (ARITH_ADD << 3))
165 #define OPC_BSWAP (0xc8 | P_EXT)
166 #define OPC_CALL_Jz (0xe8)
167 #define OPC_CMP_GvEv (OPC_ARITH_GvEv | (ARITH_CMP << 3))
168 #define OPC_DEC_r32 (0x48)
169 #define OPC_IMUL_GvEv (0xaf | P_EXT)
170 #define OPC_IMUL_GvEvIb (0x6b)
171 #define OPC_IMUL_GvEvIz (0x69)
172 #define OPC_INC_r32 (0x40)
173 #define OPC_JCC_long (0x80 | P_EXT) /* ... plus condition code */
174 #define OPC_JCC_short (0x70) /* ... plus condition code */
175 #define OPC_JMP_long (0xe9)
176 #define OPC_JMP_short (0xeb)
177 #define OPC_MOVB_EvGv (0x88) /* stores, more or less */
178 #define OPC_MOVL_EvGv (0x89) /* stores, more or less */
179 #define OPC_MOVL_GvEv (0x8b) /* loads, more or less */
180 #define OPC_MOVL_Iv (0xb8)
181 #define OPC_MOVSBL (0xbe | P_EXT)
182 #define OPC_MOVSWL (0xbf | P_EXT)
183 #define OPC_MOVZBL (0xb6 | P_EXT)
184 #define OPC_MOVZWL (0xb7 | P_EXT)
185 #define OPC_POP_r32 (0x58)
186 #define OPC_PUSH_r32 (0x50)
187 #define OPC_PUSH_Iv (0x68)
188 #define OPC_PUSH_Ib (0x6a)
189 #define OPC_RET (0xc3)
190 #define OPC_SETCC (0x90 | P_EXT) /* ... plus condition code */
191 #define OPC_SHIFT_1 (0xd1)
192 #define OPC_SHIFT_Ib (0xc1)
193 #define OPC_SHIFT_cl (0xd3)
194 #define OPC_TESTL (0x85)
195 #define OPC_XCHG_ax_r32 (0x90)
197 #define OPC_GRP3_Ev (0xf7)
198 #define OPC_GRP5 (0xff)
200 /* Group 1 opcode extensions for 0x80-0x83.
201 These are also used as modifiers for OPC_ARITH. */
211 /* Group 2 opcode extensions for 0xc0, 0xc1, 0xd0-0xd3. */
218 /* Group 3 opcode extensions for 0xf6, 0xf7. To be used with OPC_GRP3. */
226 /* Group 5 opcode extensions for 0xff. To be used with OPC_GRP5. */
227 #define EXT5_CALLN_Ev 2
228 #define EXT5_JMPN_Ev 4
230 /* Condition codes to be added to OPC_JCC_{long,short}. */
249 static const uint8_t tcg_cond_to_jcc
[10] = {
250 [TCG_COND_EQ
] = JCC_JE
,
251 [TCG_COND_NE
] = JCC_JNE
,
252 [TCG_COND_LT
] = JCC_JL
,
253 [TCG_COND_GE
] = JCC_JGE
,
254 [TCG_COND_LE
] = JCC_JLE
,
255 [TCG_COND_GT
] = JCC_JG
,
256 [TCG_COND_LTU
] = JCC_JB
,
257 [TCG_COND_GEU
] = JCC_JAE
,
258 [TCG_COND_LEU
] = JCC_JBE
,
259 [TCG_COND_GTU
] = JCC_JA
,
262 static inline void tcg_out_opc(TCGContext
*s
, int opc
)
269 static inline void tcg_out_modrm(TCGContext
*s
, int opc
, int r
, int rm
)
272 tcg_out8(s
, 0xc0 | (r
<< 3) | rm
);
275 /* rm == -1 means no register index */
276 static inline void tcg_out_modrm_offset(TCGContext
*s
, int opc
, int r
, int rm
,
281 tcg_out8(s
, 0x05 | (r
<< 3));
282 tcg_out32(s
, offset
);
283 } else if (offset
== 0 && rm
!= TCG_REG_EBP
) {
284 if (rm
== TCG_REG_ESP
) {
285 tcg_out8(s
, 0x04 | (r
<< 3));
288 tcg_out8(s
, 0x00 | (r
<< 3) | rm
);
290 } else if ((int8_t)offset
== offset
) {
291 if (rm
== TCG_REG_ESP
) {
292 tcg_out8(s
, 0x44 | (r
<< 3));
295 tcg_out8(s
, 0x40 | (r
<< 3) | rm
);
299 if (rm
== TCG_REG_ESP
) {
300 tcg_out8(s
, 0x84 | (r
<< 3));
303 tcg_out8(s
, 0x80 | (r
<< 3) | rm
);
305 tcg_out32(s
, offset
);
309 /* Generate dest op= src. Uses the same ARITH_* codes as tgen_arithi. */
310 static inline void tgen_arithr(TCGContext
*s
, int subop
, int dest
, int src
)
312 tcg_out_modrm(s
, OPC_ARITH_GvEv
+ (subop
<< 3), dest
, src
);
315 static inline void tcg_out_mov(TCGContext
*s
, int ret
, int arg
)
318 tcg_out_modrm(s
, OPC_MOVL_GvEv
, ret
, arg
);
322 static inline void tcg_out_movi(TCGContext
*s
, TCGType type
,
323 int ret
, int32_t arg
)
326 tgen_arithr(s
, ARITH_XOR
, ret
, ret
);
328 tcg_out8(s
, OPC_MOVL_Iv
+ ret
);
333 static inline void tcg_out_pushi(TCGContext
*s
, tcg_target_long val
)
335 if (val
== (int8_t)val
) {
336 tcg_out_opc(s
, OPC_PUSH_Ib
);
339 tcg_out_opc(s
, OPC_PUSH_Iv
);
344 static inline void tcg_out_push(TCGContext
*s
, int reg
)
346 tcg_out_opc(s
, OPC_PUSH_r32
+ reg
);
349 static inline void tcg_out_pop(TCGContext
*s
, int reg
)
351 tcg_out_opc(s
, OPC_POP_r32
+ reg
);
354 static inline void tcg_out_ld(TCGContext
*s
, TCGType type
, int ret
,
355 int arg1
, tcg_target_long arg2
)
357 tcg_out_modrm_offset(s
, OPC_MOVL_GvEv
, ret
, arg1
, arg2
);
360 static inline void tcg_out_st(TCGContext
*s
, TCGType type
, int arg
,
361 int arg1
, tcg_target_long arg2
)
363 tcg_out_modrm_offset(s
, OPC_MOVL_EvGv
, arg
, arg1
, arg2
);
366 static void tcg_out_shifti(TCGContext
*s
, int subopc
, int reg
, int count
)
369 tcg_out_modrm(s
, OPC_SHIFT_1
, subopc
, reg
);
371 tcg_out_modrm(s
, OPC_SHIFT_Ib
, subopc
, reg
);
376 static inline void tcg_out_bswap32(TCGContext
*s
, int reg
)
378 tcg_out_opc(s
, OPC_BSWAP
+ reg
);
381 static inline void tcg_out_rolw_8(TCGContext
*s
, int reg
)
384 tcg_out_shifti(s
, SHIFT_ROL
, reg
, 8);
387 static inline void tcg_out_ext8u(TCGContext
*s
, int dest
, int src
)
391 tcg_out_modrm(s
, OPC_MOVZBL
, dest
, src
);
394 static void tcg_out_ext8s(TCGContext
*s
, int dest
, int src
)
398 tcg_out_modrm(s
, OPC_MOVSBL
, dest
, src
);
401 static inline void tcg_out_ext16u(TCGContext
*s
, int dest
, int src
)
404 tcg_out_modrm(s
, OPC_MOVZWL
, dest
, src
);
407 static inline void tcg_out_ext16s(TCGContext
*s
, int dest
, int src
)
410 tcg_out_modrm(s
, OPC_MOVSWL
, dest
, src
);
413 static inline void tgen_arithi(TCGContext
*s
, int c
, int r0
,
416 /* ??? While INC is 2 bytes shorter than ADDL $1, they also induce
417 partial flags update stalls on Pentium4 and are not recommended
418 by current Intel optimization manuals. */
419 if (!cf
&& (c
== ARITH_ADD
|| c
== ARITH_SUB
) && (val
== 1 || val
== -1)) {
420 int opc
= ((c
== ARITH_ADD
) ^ (val
< 0) ? OPC_INC_r32
: OPC_DEC_r32
);
421 tcg_out_opc(s
, opc
+ r0
);
422 } else if (val
== (int8_t)val
) {
423 tcg_out_modrm(s
, OPC_ARITH_EvIb
, c
, r0
);
425 } else if (c
== ARITH_AND
&& val
== 0xffu
&& r0
< 4) {
426 tcg_out_ext8u(s
, r0
, r0
);
427 } else if (c
== ARITH_AND
&& val
== 0xffffu
) {
428 tcg_out_ext16u(s
, r0
, r0
);
430 tcg_out_modrm(s
, OPC_ARITH_EvIz
, c
, r0
);
435 static void tcg_out_addi(TCGContext
*s
, int reg
, tcg_target_long val
)
438 tgen_arithi(s
, ARITH_ADD
, reg
, val
, 0);
441 /* Use SMALL != 0 to force a short forward branch. */
442 static void tcg_out_jxx(TCGContext
*s
, int opc
, int label_index
, int small
)
445 TCGLabel
*l
= &s
->labels
[label_index
];
448 val
= l
->u
.value
- (tcg_target_long
)s
->code_ptr
;
450 if ((int8_t)val1
== val1
) {
452 tcg_out8(s
, OPC_JMP_short
);
454 tcg_out8(s
, OPC_JCC_short
+ opc
);
462 tcg_out8(s
, OPC_JMP_long
);
463 tcg_out32(s
, val
- 5);
465 tcg_out_opc(s
, OPC_JCC_long
+ opc
);
466 tcg_out32(s
, val
- 6);
471 tcg_out8(s
, OPC_JMP_short
);
473 tcg_out8(s
, OPC_JCC_short
+ opc
);
475 tcg_out_reloc(s
, s
->code_ptr
, R_386_PC8
, label_index
, -1);
479 tcg_out8(s
, OPC_JMP_long
);
481 tcg_out_opc(s
, OPC_JCC_long
+ opc
);
483 tcg_out_reloc(s
, s
->code_ptr
, R_386_PC32
, label_index
, -4);
488 static void tcg_out_cmp(TCGContext
*s
, TCGArg arg1
, TCGArg arg2
,
494 tcg_out_modrm(s
, OPC_TESTL
, arg1
, arg1
);
496 tgen_arithi(s
, ARITH_CMP
, arg1
, arg2
, 0);
499 tgen_arithr(s
, ARITH_CMP
, arg1
, arg2
);
503 static void tcg_out_brcond(TCGContext
*s
, TCGCond cond
,
504 TCGArg arg1
, TCGArg arg2
, int const_arg2
,
505 int label_index
, int small
)
507 tcg_out_cmp(s
, arg1
, arg2
, const_arg2
);
508 tcg_out_jxx(s
, tcg_cond_to_jcc
[cond
], label_index
, small
);
511 /* XXX: we implement it at the target level to avoid having to
512 handle cross basic blocks temporaries */
513 static void tcg_out_brcond2(TCGContext
*s
, const TCGArg
*args
,
514 const int *const_args
, int small
)
517 label_next
= gen_new_label();
520 tcg_out_brcond(s
, TCG_COND_NE
, args
[0], args
[2], const_args
[2],
522 tcg_out_brcond(s
, TCG_COND_EQ
, args
[1], args
[3], const_args
[3],
526 tcg_out_brcond(s
, TCG_COND_NE
, args
[0], args
[2], const_args
[2],
528 tcg_out_brcond(s
, TCG_COND_NE
, args
[1], args
[3], const_args
[3],
532 tcg_out_brcond(s
, TCG_COND_LT
, args
[1], args
[3], const_args
[3],
534 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
535 tcg_out_brcond(s
, TCG_COND_LTU
, args
[0], args
[2], const_args
[2],
539 tcg_out_brcond(s
, TCG_COND_LT
, args
[1], args
[3], const_args
[3],
541 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
542 tcg_out_brcond(s
, TCG_COND_LEU
, args
[0], args
[2], const_args
[2],
546 tcg_out_brcond(s
, TCG_COND_GT
, args
[1], args
[3], const_args
[3],
548 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
549 tcg_out_brcond(s
, TCG_COND_GTU
, args
[0], args
[2], const_args
[2],
553 tcg_out_brcond(s
, TCG_COND_GT
, args
[1], args
[3], const_args
[3],
555 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
556 tcg_out_brcond(s
, TCG_COND_GEU
, args
[0], args
[2], const_args
[2],
560 tcg_out_brcond(s
, TCG_COND_LTU
, args
[1], args
[3], const_args
[3],
562 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
563 tcg_out_brcond(s
, TCG_COND_LTU
, args
[0], args
[2], const_args
[2],
567 tcg_out_brcond(s
, TCG_COND_LTU
, args
[1], args
[3], const_args
[3],
569 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
570 tcg_out_brcond(s
, TCG_COND_LEU
, args
[0], args
[2], const_args
[2],
574 tcg_out_brcond(s
, TCG_COND_GTU
, args
[1], args
[3], const_args
[3],
576 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
577 tcg_out_brcond(s
, TCG_COND_GTU
, args
[0], args
[2], const_args
[2],
581 tcg_out_brcond(s
, TCG_COND_GTU
, args
[1], args
[3], const_args
[3],
583 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
584 tcg_out_brcond(s
, TCG_COND_GEU
, args
[0], args
[2], const_args
[2],
590 tcg_out_label(s
, label_next
, (tcg_target_long
)s
->code_ptr
);
593 static void tcg_out_setcond(TCGContext
*s
, TCGCond cond
, TCGArg dest
,
594 TCGArg arg1
, TCGArg arg2
, int const_arg2
)
596 tcg_out_cmp(s
, arg1
, arg2
, const_arg2
);
597 tcg_out_modrm(s
, OPC_SETCC
| tcg_cond_to_jcc
[cond
], 0, dest
);
598 tcg_out_ext8u(s
, dest
, dest
);
601 static void tcg_out_setcond2(TCGContext
*s
, const TCGArg
*args
,
602 const int *const_args
)
605 int label_true
, label_over
;
607 memcpy(new_args
, args
+1, 5*sizeof(TCGArg
));
609 if (args
[0] == args
[1] || args
[0] == args
[2]
610 || (!const_args
[3] && args
[0] == args
[3])
611 || (!const_args
[4] && args
[0] == args
[4])) {
612 /* When the destination overlaps with one of the argument
613 registers, don't do anything tricky. */
614 label_true
= gen_new_label();
615 label_over
= gen_new_label();
617 new_args
[5] = label_true
;
618 tcg_out_brcond2(s
, new_args
, const_args
+1, 1);
620 tcg_out_movi(s
, TCG_TYPE_I32
, args
[0], 0);
621 tcg_out_jxx(s
, JCC_JMP
, label_over
, 1);
622 tcg_out_label(s
, label_true
, (tcg_target_long
)s
->code_ptr
);
624 tcg_out_movi(s
, TCG_TYPE_I32
, args
[0], 1);
625 tcg_out_label(s
, label_over
, (tcg_target_long
)s
->code_ptr
);
627 /* When the destination does not overlap one of the arguments,
628 clear the destination first, jump if cond false, and emit an
629 increment in the true case. This results in smaller code. */
631 tcg_out_movi(s
, TCG_TYPE_I32
, args
[0], 0);
633 label_over
= gen_new_label();
634 new_args
[4] = tcg_invert_cond(new_args
[4]);
635 new_args
[5] = label_over
;
636 tcg_out_brcond2(s
, new_args
, const_args
+1, 1);
638 tgen_arithi(s
, ARITH_ADD
, args
[0], 1, 0);
639 tcg_out_label(s
, label_over
, (tcg_target_long
)s
->code_ptr
);
643 static void tcg_out_calli(TCGContext
*s
, tcg_target_long dest
)
645 tcg_out_opc(s
, OPC_CALL_Jz
);
646 tcg_out32(s
, dest
- (tcg_target_long
)s
->code_ptr
- 4);
649 #if defined(CONFIG_SOFTMMU)
651 #include "../../softmmu_defs.h"
653 static void *qemu_ld_helpers
[4] = {
660 static void *qemu_st_helpers
[4] = {
668 #ifndef CONFIG_USER_ONLY
672 /* XXX: qemu_ld and qemu_st could be modified to clobber only EDX and
673 EAX. It will be useful once fixed registers globals are less
675 static void tcg_out_qemu_ld(TCGContext
*s
, const TCGArg
*args
,
678 int addr_reg
, data_reg
, data_reg2
, r0
, r1
, mem_index
, s_bits
, bswap
;
679 #if defined(CONFIG_SOFTMMU)
680 uint8_t *label1_ptr
, *label2_ptr
;
682 #if TARGET_LONG_BITS == 64
683 #if defined(CONFIG_SOFTMMU)
695 #if TARGET_LONG_BITS == 64
704 #if defined(CONFIG_SOFTMMU)
705 tcg_out_mov(s
, r1
, addr_reg
);
706 tcg_out_mov(s
, r0
, addr_reg
);
708 tcg_out_shifti(s
, SHIFT_SHR
, r1
, TARGET_PAGE_BITS
- CPU_TLB_ENTRY_BITS
);
710 tgen_arithi(s
, ARITH_AND
, r0
, TARGET_PAGE_MASK
| ((1 << s_bits
) - 1), 0);
711 tgen_arithi(s
, ARITH_AND
, r1
, (CPU_TLB_SIZE
- 1) << CPU_TLB_ENTRY_BITS
, 0);
713 tcg_out_opc(s
, 0x8d); /* lea offset(r1, %ebp), r1 */
714 tcg_out8(s
, 0x80 | (r1
<< 3) | 0x04);
715 tcg_out8(s
, (5 << 3) | r1
);
716 tcg_out32(s
, offsetof(CPUState
, tlb_table
[mem_index
][0].addr_read
));
719 tcg_out_modrm_offset(s
, OPC_CMP_GvEv
, r0
, r1
, 0);
721 tcg_out_mov(s
, r0
, addr_reg
);
723 #if TARGET_LONG_BITS == 32
725 tcg_out8(s
, OPC_JCC_short
+ JCC_JE
);
726 label1_ptr
= s
->code_ptr
;
730 tcg_out8(s
, OPC_JCC_short
+ JCC_JNE
);
731 label3_ptr
= s
->code_ptr
;
734 /* cmp 4(r1), addr_reg2 */
735 tcg_out_modrm_offset(s
, OPC_CMP_GvEv
, addr_reg2
, r1
, 4);
738 tcg_out8(s
, OPC_JCC_short
+ JCC_JE
);
739 label1_ptr
= s
->code_ptr
;
743 *label3_ptr
= s
->code_ptr
- label3_ptr
- 1;
746 /* XXX: move that code at the end of the TB */
747 #if TARGET_LONG_BITS == 32
748 tcg_out_movi(s
, TCG_TYPE_I32
, TCG_REG_EDX
, mem_index
);
750 tcg_out_mov(s
, TCG_REG_EDX
, addr_reg2
);
751 tcg_out_movi(s
, TCG_TYPE_I32
, TCG_REG_ECX
, mem_index
);
753 tcg_out_calli(s
, (tcg_target_long
)qemu_ld_helpers
[s_bits
]);
757 tcg_out_ext8s(s
, data_reg
, TCG_REG_EAX
);
760 tcg_out_ext16s(s
, data_reg
, TCG_REG_EAX
);
763 tcg_out_ext8u(s
, data_reg
, TCG_REG_EAX
);
766 tcg_out_ext16u(s
, data_reg
, TCG_REG_EAX
);
770 tcg_out_mov(s
, data_reg
, TCG_REG_EAX
);
773 if (data_reg
== TCG_REG_EDX
) {
774 /* xchg %edx, %eax */
775 tcg_out_opc(s
, OPC_XCHG_ax_r32
+ TCG_REG_EDX
);
776 tcg_out_mov(s
, data_reg2
, TCG_REG_EAX
);
778 tcg_out_mov(s
, data_reg
, TCG_REG_EAX
);
779 tcg_out_mov(s
, data_reg2
, TCG_REG_EDX
);
785 tcg_out8(s
, OPC_JMP_short
);
786 label2_ptr
= s
->code_ptr
;
790 *label1_ptr
= s
->code_ptr
- label1_ptr
- 1;
793 tcg_out_modrm_offset(s
, OPC_ADD_GvEv
, r0
, r1
,
794 offsetof(CPUTLBEntry
, addend
) -
795 offsetof(CPUTLBEntry
, addr_read
));
800 #ifdef TARGET_WORDS_BIGENDIAN
808 tcg_out_modrm_offset(s
, OPC_MOVZBL
, data_reg
, r0
, GUEST_BASE
);
812 tcg_out_modrm_offset(s
, OPC_MOVSBL
, data_reg
, r0
, GUEST_BASE
);
816 tcg_out_modrm_offset(s
, OPC_MOVZWL
, data_reg
, r0
, GUEST_BASE
);
818 tcg_out_rolw_8(s
, data_reg
);
823 tcg_out_modrm_offset(s
, OPC_MOVSWL
, data_reg
, r0
, GUEST_BASE
);
825 tcg_out_rolw_8(s
, data_reg
);
827 /* movswl data_reg, data_reg */
828 tcg_out_modrm(s
, OPC_MOVSWL
, data_reg
, data_reg
);
832 tcg_out_ld(s
, TCG_TYPE_I32
, data_reg
, r0
, GUEST_BASE
);
834 tcg_out_bswap32(s
, data_reg
);
840 data_reg
= data_reg2
;
843 if (r0
!= data_reg
) {
844 tcg_out_ld(s
, TCG_TYPE_I32
, data_reg
, r0
, GUEST_BASE
);
845 tcg_out_ld(s
, TCG_TYPE_I32
, data_reg2
, r0
, GUEST_BASE
+ 4);
847 tcg_out_ld(s
, TCG_TYPE_I32
, data_reg2
, r0
, GUEST_BASE
+ 4);
848 tcg_out_ld(s
, TCG_TYPE_I32
, data_reg
, r0
, GUEST_BASE
);
851 tcg_out_bswap32(s
, data_reg
);
852 tcg_out_bswap32(s
, data_reg2
);
859 #if defined(CONFIG_SOFTMMU)
861 *label2_ptr
= s
->code_ptr
- label2_ptr
- 1;
866 static void tcg_out_qemu_st(TCGContext
*s
, const TCGArg
*args
,
869 int addr_reg
, data_reg
, data_reg2
, r0
, r1
, mem_index
, s_bits
, bswap
;
870 #if defined(CONFIG_SOFTMMU)
872 uint8_t *label1_ptr
, *label2_ptr
;
874 #if TARGET_LONG_BITS == 64
875 #if defined(CONFIG_SOFTMMU)
887 #if TARGET_LONG_BITS == 64
897 #if defined(CONFIG_SOFTMMU)
898 tcg_out_mov(s
, r1
, addr_reg
);
899 tcg_out_mov(s
, r0
, addr_reg
);
901 tcg_out_shifti(s
, SHIFT_SHR
, r1
, TARGET_PAGE_BITS
- CPU_TLB_ENTRY_BITS
);
903 tgen_arithi(s
, ARITH_AND
, r0
, TARGET_PAGE_MASK
| ((1 << s_bits
) - 1), 0);
904 tgen_arithi(s
, ARITH_AND
, r1
, (CPU_TLB_SIZE
- 1) << CPU_TLB_ENTRY_BITS
, 0);
906 tcg_out_opc(s
, 0x8d); /* lea offset(r1, %ebp), r1 */
907 tcg_out8(s
, 0x80 | (r1
<< 3) | 0x04);
908 tcg_out8(s
, (5 << 3) | r1
);
909 tcg_out32(s
, offsetof(CPUState
, tlb_table
[mem_index
][0].addr_write
));
912 tcg_out_modrm_offset(s
, OPC_CMP_GvEv
, r0
, r1
, 0);
914 tcg_out_mov(s
, r0
, addr_reg
);
916 #if TARGET_LONG_BITS == 32
918 tcg_out8(s
, OPC_JCC_short
+ JCC_JE
);
919 label1_ptr
= s
->code_ptr
;
923 tcg_out8(s
, OPC_JCC_short
+ JCC_JNE
);
924 label3_ptr
= s
->code_ptr
;
927 /* cmp 4(r1), addr_reg2 */
928 tcg_out_modrm_offset(s
, OPC_CMP_GvEv
, addr_reg2
, r1
, 4);
931 tcg_out8(s
, OPC_JCC_short
+ JCC_JE
);
932 label1_ptr
= s
->code_ptr
;
936 *label3_ptr
= s
->code_ptr
- label3_ptr
- 1;
939 /* XXX: move that code at the end of the TB */
940 #if TARGET_LONG_BITS == 32
942 tcg_out_mov(s
, TCG_REG_EDX
, data_reg
);
943 tcg_out_mov(s
, TCG_REG_ECX
, data_reg2
);
944 tcg_out_pushi(s
, mem_index
);
949 tcg_out_ext8u(s
, TCG_REG_EDX
, data_reg
);
952 tcg_out_ext16u(s
, TCG_REG_EDX
, data_reg
);
955 tcg_out_mov(s
, TCG_REG_EDX
, data_reg
);
958 tcg_out_movi(s
, TCG_TYPE_I32
, TCG_REG_ECX
, mem_index
);
963 tcg_out_mov(s
, TCG_REG_EDX
, addr_reg2
);
964 tcg_out_pushi(s
, mem_index
);
965 tcg_out_push(s
, data_reg2
);
966 tcg_out_push(s
, data_reg
);
969 tcg_out_mov(s
, TCG_REG_EDX
, addr_reg2
);
972 tcg_out_ext8u(s
, TCG_REG_ECX
, data_reg
);
975 tcg_out_ext16u(s
, TCG_REG_ECX
, data_reg
);
978 tcg_out_mov(s
, TCG_REG_ECX
, data_reg
);
981 tcg_out_pushi(s
, mem_index
);
986 tcg_out_calli(s
, (tcg_target_long
)qemu_st_helpers
[s_bits
]);
988 if (stack_adjust
== 4) {
989 /* Pop and discard. This is 2 bytes smaller than the add. */
990 tcg_out_pop(s
, TCG_REG_ECX
);
991 } else if (stack_adjust
!= 0) {
992 tcg_out_addi(s
, TCG_REG_ESP
, stack_adjust
);
996 tcg_out8(s
, OPC_JMP_short
);
997 label2_ptr
= s
->code_ptr
;
1001 *label1_ptr
= s
->code_ptr
- label1_ptr
- 1;
1004 tcg_out_modrm_offset(s
, OPC_ADD_GvEv
, r0
, r1
,
1005 offsetof(CPUTLBEntry
, addend
) -
1006 offsetof(CPUTLBEntry
, addr_write
));
1011 #ifdef TARGET_WORDS_BIGENDIAN
1018 tcg_out_modrm_offset(s
, OPC_MOVB_EvGv
, data_reg
, r0
, GUEST_BASE
);
1022 tcg_out_mov(s
, r1
, data_reg
);
1023 tcg_out_rolw_8(s
, r1
);
1028 tcg_out_modrm_offset(s
, OPC_MOVL_EvGv
, data_reg
, r0
, GUEST_BASE
);
1032 tcg_out_mov(s
, r1
, data_reg
);
1033 tcg_out_bswap32(s
, r1
);
1036 tcg_out_st(s
, TCG_TYPE_I32
, data_reg
, r0
, GUEST_BASE
);
1040 tcg_out_mov(s
, r1
, data_reg2
);
1041 tcg_out_bswap32(s
, r1
);
1042 tcg_out_st(s
, TCG_TYPE_I32
, r1
, r0
, GUEST_BASE
);
1043 tcg_out_mov(s
, r1
, data_reg
);
1044 tcg_out_bswap32(s
, r1
);
1045 tcg_out_st(s
, TCG_TYPE_I32
, r1
, r0
, GUEST_BASE
+ 4);
1047 tcg_out_st(s
, TCG_TYPE_I32
, data_reg
, r0
, GUEST_BASE
);
1048 tcg_out_st(s
, TCG_TYPE_I32
, data_reg2
, r0
, GUEST_BASE
+ 4);
1055 #if defined(CONFIG_SOFTMMU)
1057 *label2_ptr
= s
->code_ptr
- label2_ptr
- 1;
1061 static inline void tcg_out_op(TCGContext
*s
, TCGOpcode opc
,
1062 const TCGArg
*args
, const int *const_args
)
1067 case INDEX_op_exit_tb
:
1068 tcg_out_movi(s
, TCG_TYPE_I32
, TCG_REG_EAX
, args
[0]);
1069 tcg_out8(s
, OPC_JMP_long
); /* jmp tb_ret_addr */
1070 tcg_out32(s
, tb_ret_addr
- s
->code_ptr
- 4);
1072 case INDEX_op_goto_tb
:
1073 if (s
->tb_jmp_offset
) {
1074 /* direct jump method */
1075 tcg_out8(s
, OPC_JMP_long
); /* jmp im */
1076 s
->tb_jmp_offset
[args
[0]] = s
->code_ptr
- s
->code_buf
;
1079 /* indirect jump method */
1080 tcg_out_modrm_offset(s
, OPC_GRP5
, EXT5_JMPN_Ev
, -1,
1081 (tcg_target_long
)(s
->tb_next
+ args
[0]));
1083 s
->tb_next_offset
[args
[0]] = s
->code_ptr
- s
->code_buf
;
1086 if (const_args
[0]) {
1087 tcg_out_calli(s
, args
[0]);
1090 tcg_out_modrm(s
, OPC_GRP5
, EXT5_CALLN_Ev
, args
[0]);
1094 if (const_args
[0]) {
1095 tcg_out8(s
, OPC_JMP_long
);
1096 tcg_out32(s
, args
[0] - (tcg_target_long
)s
->code_ptr
- 4);
1099 tcg_out_modrm(s
, OPC_GRP5
, EXT5_JMPN_Ev
, args
[0]);
1103 tcg_out_jxx(s
, JCC_JMP
, args
[0], 0);
1105 case INDEX_op_movi_i32
:
1106 tcg_out_movi(s
, TCG_TYPE_I32
, args
[0], args
[1]);
1108 case INDEX_op_ld8u_i32
:
1110 tcg_out_modrm_offset(s
, OPC_MOVZBL
, args
[0], args
[1], args
[2]);
1112 case INDEX_op_ld8s_i32
:
1114 tcg_out_modrm_offset(s
, OPC_MOVSBL
, args
[0], args
[1], args
[2]);
1116 case INDEX_op_ld16u_i32
:
1118 tcg_out_modrm_offset(s
, OPC_MOVZWL
, args
[0], args
[1], args
[2]);
1120 case INDEX_op_ld16s_i32
:
1122 tcg_out_modrm_offset(s
, OPC_MOVSWL
, args
[0], args
[1], args
[2]);
1124 case INDEX_op_ld_i32
:
1125 tcg_out_ld(s
, TCG_TYPE_I32
, args
[0], args
[1], args
[2]);
1127 case INDEX_op_st8_i32
:
1129 tcg_out_modrm_offset(s
, OPC_MOVB_EvGv
, args
[0], args
[1], args
[2]);
1131 case INDEX_op_st16_i32
:
1134 tcg_out_modrm_offset(s
, OPC_MOVL_EvGv
, args
[0], args
[1], args
[2]);
1136 case INDEX_op_st_i32
:
1137 tcg_out_st(s
, TCG_TYPE_I32
, args
[0], args
[1], args
[2]);
1139 case INDEX_op_sub_i32
:
1142 case INDEX_op_and_i32
:
1145 case INDEX_op_or_i32
:
1148 case INDEX_op_xor_i32
:
1151 case INDEX_op_add_i32
:
1154 if (const_args
[2]) {
1155 tgen_arithi(s
, c
, args
[0], args
[2], 0);
1157 tgen_arithr(s
, c
, args
[0], args
[2]);
1160 case INDEX_op_mul_i32
:
1161 if (const_args
[2]) {
1164 if (val
== (int8_t)val
) {
1165 tcg_out_modrm(s
, OPC_IMUL_GvEvIb
, args
[0], args
[0]);
1168 tcg_out_modrm(s
, OPC_IMUL_GvEvIz
, args
[0], args
[0]);
1172 tcg_out_modrm(s
, OPC_IMUL_GvEv
, args
[0], args
[2]);
1175 case INDEX_op_mulu2_i32
:
1176 tcg_out_modrm(s
, OPC_GRP3_Ev
, EXT3_MUL
, args
[3]);
1178 case INDEX_op_div2_i32
:
1179 tcg_out_modrm(s
, OPC_GRP3_Ev
, EXT3_IDIV
, args
[4]);
1181 case INDEX_op_divu2_i32
:
1182 tcg_out_modrm(s
, OPC_GRP3_Ev
, EXT3_DIV
, args
[4]);
1184 case INDEX_op_shl_i32
:
1187 if (const_args
[2]) {
1188 tcg_out_shifti(s
, c
, args
[0], args
[2]);
1190 tcg_out_modrm(s
, OPC_SHIFT_cl
, c
, args
[0]);
1193 case INDEX_op_shr_i32
:
1196 case INDEX_op_sar_i32
:
1199 case INDEX_op_rotl_i32
:
1202 case INDEX_op_rotr_i32
:
1206 case INDEX_op_add2_i32
:
1207 if (const_args
[4]) {
1208 tgen_arithi(s
, ARITH_ADD
, args
[0], args
[4], 1);
1210 tgen_arithr(s
, ARITH_ADD
, args
[0], args
[4]);
1212 if (const_args
[5]) {
1213 tgen_arithi(s
, ARITH_ADC
, args
[1], args
[5], 1);
1215 tgen_arithr(s
, ARITH_ADC
, args
[1], args
[5]);
1218 case INDEX_op_sub2_i32
:
1219 if (const_args
[4]) {
1220 tgen_arithi(s
, ARITH_SUB
, args
[0], args
[4], 1);
1222 tgen_arithr(s
, ARITH_SUB
, args
[0], args
[4]);
1224 if (const_args
[5]) {
1225 tgen_arithi(s
, ARITH_SBB
, args
[1], args
[5], 1);
1227 tgen_arithr(s
, ARITH_SBB
, args
[1], args
[5]);
1230 case INDEX_op_brcond_i32
:
1231 tcg_out_brcond(s
, args
[2], args
[0], args
[1], const_args
[1],
1234 case INDEX_op_brcond2_i32
:
1235 tcg_out_brcond2(s
, args
, const_args
, 0);
1238 case INDEX_op_bswap16_i32
:
1239 tcg_out_rolw_8(s
, args
[0]);
1241 case INDEX_op_bswap32_i32
:
1242 tcg_out_bswap32(s
, args
[0]);
1245 case INDEX_op_neg_i32
:
1246 tcg_out_modrm(s
, OPC_GRP3_Ev
, EXT3_NEG
, args
[0]);
1249 case INDEX_op_not_i32
:
1250 tcg_out_modrm(s
, OPC_GRP3_Ev
, EXT3_NOT
, args
[0]);
1253 case INDEX_op_ext8s_i32
:
1254 tcg_out_ext8s(s
, args
[0], args
[1]);
1256 case INDEX_op_ext16s_i32
:
1257 tcg_out_ext16s(s
, args
[0], args
[1]);
1259 case INDEX_op_ext8u_i32
:
1260 tcg_out_ext8u(s
, args
[0], args
[1]);
1262 case INDEX_op_ext16u_i32
:
1263 tcg_out_ext16u(s
, args
[0], args
[1]);
1266 case INDEX_op_setcond_i32
:
1267 tcg_out_setcond(s
, args
[3], args
[0], args
[1], args
[2], const_args
[2]);
1269 case INDEX_op_setcond2_i32
:
1270 tcg_out_setcond2(s
, args
, const_args
);
1273 case INDEX_op_qemu_ld8u
:
1274 tcg_out_qemu_ld(s
, args
, 0);
1276 case INDEX_op_qemu_ld8s
:
1277 tcg_out_qemu_ld(s
, args
, 0 | 4);
1279 case INDEX_op_qemu_ld16u
:
1280 tcg_out_qemu_ld(s
, args
, 1);
1282 case INDEX_op_qemu_ld16s
:
1283 tcg_out_qemu_ld(s
, args
, 1 | 4);
1285 case INDEX_op_qemu_ld32
:
1286 tcg_out_qemu_ld(s
, args
, 2);
1288 case INDEX_op_qemu_ld64
:
1289 tcg_out_qemu_ld(s
, args
, 3);
1292 case INDEX_op_qemu_st8
:
1293 tcg_out_qemu_st(s
, args
, 0);
1295 case INDEX_op_qemu_st16
:
1296 tcg_out_qemu_st(s
, args
, 1);
1298 case INDEX_op_qemu_st32
:
1299 tcg_out_qemu_st(s
, args
, 2);
1301 case INDEX_op_qemu_st64
:
1302 tcg_out_qemu_st(s
, args
, 3);
1310 static const TCGTargetOpDef x86_op_defs
[] = {
1311 { INDEX_op_exit_tb
, { } },
1312 { INDEX_op_goto_tb
, { } },
1313 { INDEX_op_call
, { "ri" } },
1314 { INDEX_op_jmp
, { "ri" } },
1315 { INDEX_op_br
, { } },
1316 { INDEX_op_mov_i32
, { "r", "r" } },
1317 { INDEX_op_movi_i32
, { "r" } },
1318 { INDEX_op_ld8u_i32
, { "r", "r" } },
1319 { INDEX_op_ld8s_i32
, { "r", "r" } },
1320 { INDEX_op_ld16u_i32
, { "r", "r" } },
1321 { INDEX_op_ld16s_i32
, { "r", "r" } },
1322 { INDEX_op_ld_i32
, { "r", "r" } },
1323 { INDEX_op_st8_i32
, { "q", "r" } },
1324 { INDEX_op_st16_i32
, { "r", "r" } },
1325 { INDEX_op_st_i32
, { "r", "r" } },
1327 { INDEX_op_add_i32
, { "r", "0", "ri" } },
1328 { INDEX_op_sub_i32
, { "r", "0", "ri" } },
1329 { INDEX_op_mul_i32
, { "r", "0", "ri" } },
1330 { INDEX_op_mulu2_i32
, { "a", "d", "a", "r" } },
1331 { INDEX_op_div2_i32
, { "a", "d", "0", "1", "r" } },
1332 { INDEX_op_divu2_i32
, { "a", "d", "0", "1", "r" } },
1333 { INDEX_op_and_i32
, { "r", "0", "ri" } },
1334 { INDEX_op_or_i32
, { "r", "0", "ri" } },
1335 { INDEX_op_xor_i32
, { "r", "0", "ri" } },
1337 { INDEX_op_shl_i32
, { "r", "0", "ci" } },
1338 { INDEX_op_shr_i32
, { "r", "0", "ci" } },
1339 { INDEX_op_sar_i32
, { "r", "0", "ci" } },
1340 { INDEX_op_rotl_i32
, { "r", "0", "ci" } },
1341 { INDEX_op_rotr_i32
, { "r", "0", "ci" } },
1343 { INDEX_op_brcond_i32
, { "r", "ri" } },
1345 { INDEX_op_add2_i32
, { "r", "r", "0", "1", "ri", "ri" } },
1346 { INDEX_op_sub2_i32
, { "r", "r", "0", "1", "ri", "ri" } },
1347 { INDEX_op_brcond2_i32
, { "r", "r", "ri", "ri" } },
1349 { INDEX_op_bswap16_i32
, { "r", "0" } },
1350 { INDEX_op_bswap32_i32
, { "r", "0" } },
1352 { INDEX_op_neg_i32
, { "r", "0" } },
1354 { INDEX_op_not_i32
, { "r", "0" } },
1356 { INDEX_op_ext8s_i32
, { "r", "q" } },
1357 { INDEX_op_ext16s_i32
, { "r", "r" } },
1358 { INDEX_op_ext8u_i32
, { "r", "q" } },
1359 { INDEX_op_ext16u_i32
, { "r", "r" } },
1361 { INDEX_op_setcond_i32
, { "q", "r", "ri" } },
1362 { INDEX_op_setcond2_i32
, { "r", "r", "r", "ri", "ri" } },
1364 #if TARGET_LONG_BITS == 32
1365 { INDEX_op_qemu_ld8u
, { "r", "L" } },
1366 { INDEX_op_qemu_ld8s
, { "r", "L" } },
1367 { INDEX_op_qemu_ld16u
, { "r", "L" } },
1368 { INDEX_op_qemu_ld16s
, { "r", "L" } },
1369 { INDEX_op_qemu_ld32
, { "r", "L" } },
1370 { INDEX_op_qemu_ld64
, { "r", "r", "L" } },
1372 { INDEX_op_qemu_st8
, { "cb", "L" } },
1373 { INDEX_op_qemu_st16
, { "L", "L" } },
1374 { INDEX_op_qemu_st32
, { "L", "L" } },
1375 { INDEX_op_qemu_st64
, { "L", "L", "L" } },
1377 { INDEX_op_qemu_ld8u
, { "r", "L", "L" } },
1378 { INDEX_op_qemu_ld8s
, { "r", "L", "L" } },
1379 { INDEX_op_qemu_ld16u
, { "r", "L", "L" } },
1380 { INDEX_op_qemu_ld16s
, { "r", "L", "L" } },
1381 { INDEX_op_qemu_ld32
, { "r", "L", "L" } },
1382 { INDEX_op_qemu_ld64
, { "r", "r", "L", "L" } },
1384 { INDEX_op_qemu_st8
, { "cb", "L", "L" } },
1385 { INDEX_op_qemu_st16
, { "L", "L", "L" } },
1386 { INDEX_op_qemu_st32
, { "L", "L", "L" } },
1387 { INDEX_op_qemu_st64
, { "L", "L", "L", "L" } },
1392 static int tcg_target_callee_save_regs
[] = {
1393 /* TCG_REG_EBP, */ /* currently used for the global env, so no
1400 /* Generate global QEMU prologue and epilogue code */
1401 void tcg_target_qemu_prologue(TCGContext
*s
)
1403 int i
, frame_size
, push_size
, stack_addend
;
1406 /* save all callee saved registers */
1407 for(i
= 0; i
< ARRAY_SIZE(tcg_target_callee_save_regs
); i
++) {
1408 tcg_out_push(s
, tcg_target_callee_save_regs
[i
]);
1410 /* reserve some stack space */
1411 push_size
= 4 + ARRAY_SIZE(tcg_target_callee_save_regs
) * 4;
1412 frame_size
= push_size
+ TCG_STATIC_CALL_ARGS_SIZE
;
1413 frame_size
= (frame_size
+ TCG_TARGET_STACK_ALIGN
- 1) &
1414 ~(TCG_TARGET_STACK_ALIGN
- 1);
1415 stack_addend
= frame_size
- push_size
;
1416 tcg_out_addi(s
, TCG_REG_ESP
, -stack_addend
);
1418 tcg_out_modrm(s
, OPC_GRP5
, EXT5_JMPN_Ev
, TCG_REG_EAX
); /* jmp *%eax */
1421 tb_ret_addr
= s
->code_ptr
;
1422 tcg_out_addi(s
, TCG_REG_ESP
, stack_addend
);
1423 for(i
= ARRAY_SIZE(tcg_target_callee_save_regs
) - 1; i
>= 0; i
--) {
1424 tcg_out_pop(s
, tcg_target_callee_save_regs
[i
]);
1426 tcg_out_opc(s
, OPC_RET
);
1429 void tcg_target_init(TCGContext
*s
)
1431 #if !defined(CONFIG_USER_ONLY)
1433 if ((1 << CPU_TLB_ENTRY_BITS
) != sizeof(CPUTLBEntry
))
1437 tcg_regset_set32(tcg_target_available_regs
[TCG_TYPE_I32
], 0, 0xff);
1439 tcg_regset_clear(tcg_target_call_clobber_regs
);
1440 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_EAX
);
1441 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_EDX
);
1442 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_ECX
);
1444 tcg_regset_clear(s
->reserved_regs
);
1445 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_ESP
);
1447 tcg_add_target_add_op_defs(x86_op_defs
);