2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 static const char * const tcg_target_reg_names
[TCG_TARGET_NB_REGS
] = {
38 static const int tcg_target_reg_alloc_order
[] = {
48 static const int tcg_target_call_iarg_regs
[3] = { TCG_REG_EAX
, TCG_REG_EDX
, TCG_REG_ECX
};
49 static const int tcg_target_call_oarg_regs
[2] = { TCG_REG_EAX
, TCG_REG_EDX
};
51 static uint8_t *tb_ret_addr
;
53 static void patch_reloc(uint8_t *code_ptr
, int type
,
54 tcg_target_long value
, tcg_target_long addend
)
59 *(uint32_t *)code_ptr
= value
;
62 *(uint32_t *)code_ptr
= value
- (long)code_ptr
;
65 value
-= (long)code_ptr
;
66 if (value
!= (int8_t)value
) {
69 *(uint8_t *)code_ptr
= value
;
76 /* maximum number of register used for input function arguments */
77 static inline int tcg_target_get_call_iarg_regs_count(int flags
)
79 flags
&= TCG_CALL_TYPE_MASK
;
81 case TCG_CALL_TYPE_STD
:
83 case TCG_CALL_TYPE_REGPARM_1
:
84 case TCG_CALL_TYPE_REGPARM_2
:
85 case TCG_CALL_TYPE_REGPARM
:
86 return flags
- TCG_CALL_TYPE_REGPARM_1
+ 1;
92 /* parse target specific constraints */
93 static int target_parse_constraint(TCGArgConstraint
*ct
, const char **pct_str
)
100 ct
->ct
|= TCG_CT_REG
;
101 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_EAX
);
104 ct
->ct
|= TCG_CT_REG
;
105 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_EBX
);
108 ct
->ct
|= TCG_CT_REG
;
109 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_ECX
);
112 ct
->ct
|= TCG_CT_REG
;
113 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_EDX
);
116 ct
->ct
|= TCG_CT_REG
;
117 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_ESI
);
120 ct
->ct
|= TCG_CT_REG
;
121 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_EDI
);
124 ct
->ct
|= TCG_CT_REG
;
125 tcg_regset_set32(ct
->u
.regs
, 0, 0xf);
128 ct
->ct
|= TCG_CT_REG
;
129 tcg_regset_set32(ct
->u
.regs
, 0, 0xff);
132 /* qemu_ld/st address constraint */
134 ct
->ct
|= TCG_CT_REG
;
135 tcg_regset_set32(ct
->u
.regs
, 0, 0xff);
136 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_EAX
);
137 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_EDX
);
147 /* test if a constant matches the constraint */
148 static inline int tcg_target_const_match(tcg_target_long val
,
149 const TCGArgConstraint
*arg_ct
)
153 if (ct
& TCG_CT_CONST
)
159 #define P_EXT 0x100 /* 0x0f opcode prefix */
161 #define OPC_ARITH_EvIz (0x81)
162 #define OPC_ARITH_EvIb (0x83)
163 #define OPC_ARITH_GvEv (0x03) /* ... plus (ARITH_FOO << 3) */
164 #define OPC_ADD_GvEv (OPC_ARITH_GvEv | (ARITH_ADD << 3))
165 #define OPC_BSWAP (0xc8 | P_EXT)
166 #define OPC_CALL_Jz (0xe8)
167 #define OPC_CMP_GvEv (OPC_ARITH_GvEv | (ARITH_CMP << 3))
168 #define OPC_DEC_r32 (0x48)
169 #define OPC_IMUL_GvEv (0xaf | P_EXT)
170 #define OPC_IMUL_GvEvIb (0x6b)
171 #define OPC_IMUL_GvEvIz (0x69)
172 #define OPC_INC_r32 (0x40)
173 #define OPC_JCC_long (0x80 | P_EXT) /* ... plus condition code */
174 #define OPC_JCC_short (0x70) /* ... plus condition code */
175 #define OPC_JMP_long (0xe9)
176 #define OPC_JMP_short (0xeb)
177 #define OPC_LEA (0x8d)
178 #define OPC_MOVB_EvGv (0x88) /* stores, more or less */
179 #define OPC_MOVL_EvGv (0x89) /* stores, more or less */
180 #define OPC_MOVL_GvEv (0x8b) /* loads, more or less */
181 #define OPC_MOVL_Iv (0xb8)
182 #define OPC_MOVSBL (0xbe | P_EXT)
183 #define OPC_MOVSWL (0xbf | P_EXT)
184 #define OPC_MOVZBL (0xb6 | P_EXT)
185 #define OPC_MOVZWL (0xb7 | P_EXT)
186 #define OPC_POP_r32 (0x58)
187 #define OPC_PUSH_r32 (0x50)
188 #define OPC_PUSH_Iv (0x68)
189 #define OPC_PUSH_Ib (0x6a)
190 #define OPC_RET (0xc3)
191 #define OPC_SETCC (0x90 | P_EXT) /* ... plus condition code */
192 #define OPC_SHIFT_1 (0xd1)
193 #define OPC_SHIFT_Ib (0xc1)
194 #define OPC_SHIFT_cl (0xd3)
195 #define OPC_TESTL (0x85)
196 #define OPC_XCHG_ax_r32 (0x90)
198 #define OPC_GRP3_Ev (0xf7)
199 #define OPC_GRP5 (0xff)
201 /* Group 1 opcode extensions for 0x80-0x83.
202 These are also used as modifiers for OPC_ARITH. */
212 /* Group 2 opcode extensions for 0xc0, 0xc1, 0xd0-0xd3. */
219 /* Group 3 opcode extensions for 0xf6, 0xf7. To be used with OPC_GRP3. */
227 /* Group 5 opcode extensions for 0xff. To be used with OPC_GRP5. */
228 #define EXT5_CALLN_Ev 2
229 #define EXT5_JMPN_Ev 4
231 /* Condition codes to be added to OPC_JCC_{long,short}. */
250 static const uint8_t tcg_cond_to_jcc
[10] = {
251 [TCG_COND_EQ
] = JCC_JE
,
252 [TCG_COND_NE
] = JCC_JNE
,
253 [TCG_COND_LT
] = JCC_JL
,
254 [TCG_COND_GE
] = JCC_JGE
,
255 [TCG_COND_LE
] = JCC_JLE
,
256 [TCG_COND_GT
] = JCC_JG
,
257 [TCG_COND_LTU
] = JCC_JB
,
258 [TCG_COND_GEU
] = JCC_JAE
,
259 [TCG_COND_LEU
] = JCC_JBE
,
260 [TCG_COND_GTU
] = JCC_JA
,
263 static inline void tcg_out_opc(TCGContext
*s
, int opc
)
270 static inline void tcg_out_modrm(TCGContext
*s
, int opc
, int r
, int rm
)
273 tcg_out8(s
, 0xc0 | (r
<< 3) | rm
);
276 /* Output an opcode with a full "rm + (index<<shift) + offset" address mode.
277 We handle either RM and INDEX missing with a -1 value. */
279 static void tcg_out_modrm_sib_offset(TCGContext
*s
, int opc
, int r
, int rm
,
280 int index
, int shift
, int32_t offset
)
284 if (index
== -1 && rm
== -1) {
285 /* Absolute address. */
287 tcg_out8(s
, (r
<< 3) | 5);
288 tcg_out32(s
, offset
);
294 /* Find the length of the immediate addend. Note that the encoding
295 that would be used for (%ebp) indicates absolute addressing. */
297 mod
= 0, len
= 4, rm
= 5;
298 } else if (offset
== 0 && rm
!= TCG_REG_EBP
) {
300 } else if (offset
== (int8_t)offset
) {
306 /* Use a single byte MODRM format if possible. Note that the encoding
307 that would be used for %esp is the escape to the two byte form. */
308 if (index
== -1 && rm
!= TCG_REG_ESP
) {
309 /* Single byte MODRM format. */
310 tcg_out8(s
, mod
| (r
<< 3) | rm
);
312 /* Two byte MODRM+SIB format. */
314 /* Note that the encoding that would place %esp into the index
315 field indicates no index register. */
319 assert(index
!= TCG_REG_ESP
);
322 tcg_out8(s
, mod
| (r
<< 3) | 4);
323 tcg_out8(s
, (shift
<< 6) | (index
<< 3) | rm
);
328 } else if (len
== 4) {
329 tcg_out32(s
, offset
);
333 /* rm == -1 means no register index */
334 static inline void tcg_out_modrm_offset(TCGContext
*s
, int opc
, int r
, int rm
,
337 tcg_out_modrm_sib_offset(s
, opc
, r
, rm
, -1, 0, offset
);
340 /* Generate dest op= src. Uses the same ARITH_* codes as tgen_arithi. */
341 static inline void tgen_arithr(TCGContext
*s
, int subop
, int dest
, int src
)
343 tcg_out_modrm(s
, OPC_ARITH_GvEv
+ (subop
<< 3), dest
, src
);
346 static inline void tcg_out_mov(TCGContext
*s
, int ret
, int arg
)
349 tcg_out_modrm(s
, OPC_MOVL_GvEv
, ret
, arg
);
353 static inline void tcg_out_movi(TCGContext
*s
, TCGType type
,
354 int ret
, int32_t arg
)
357 tgen_arithr(s
, ARITH_XOR
, ret
, ret
);
359 tcg_out8(s
, OPC_MOVL_Iv
+ ret
);
364 static inline void tcg_out_pushi(TCGContext
*s
, tcg_target_long val
)
366 if (val
== (int8_t)val
) {
367 tcg_out_opc(s
, OPC_PUSH_Ib
);
370 tcg_out_opc(s
, OPC_PUSH_Iv
);
375 static inline void tcg_out_push(TCGContext
*s
, int reg
)
377 tcg_out_opc(s
, OPC_PUSH_r32
+ reg
);
380 static inline void tcg_out_pop(TCGContext
*s
, int reg
)
382 tcg_out_opc(s
, OPC_POP_r32
+ reg
);
385 static inline void tcg_out_ld(TCGContext
*s
, TCGType type
, int ret
,
386 int arg1
, tcg_target_long arg2
)
388 tcg_out_modrm_offset(s
, OPC_MOVL_GvEv
, ret
, arg1
, arg2
);
391 static inline void tcg_out_st(TCGContext
*s
, TCGType type
, int arg
,
392 int arg1
, tcg_target_long arg2
)
394 tcg_out_modrm_offset(s
, OPC_MOVL_EvGv
, arg
, arg1
, arg2
);
397 static void tcg_out_shifti(TCGContext
*s
, int subopc
, int reg
, int count
)
400 tcg_out_modrm(s
, OPC_SHIFT_1
, subopc
, reg
);
402 tcg_out_modrm(s
, OPC_SHIFT_Ib
, subopc
, reg
);
407 static inline void tcg_out_bswap32(TCGContext
*s
, int reg
)
409 tcg_out_opc(s
, OPC_BSWAP
+ reg
);
412 static inline void tcg_out_rolw_8(TCGContext
*s
, int reg
)
415 tcg_out_shifti(s
, SHIFT_ROL
, reg
, 8);
418 static inline void tcg_out_ext8u(TCGContext
*s
, int dest
, int src
)
422 tcg_out_modrm(s
, OPC_MOVZBL
, dest
, src
);
425 static void tcg_out_ext8s(TCGContext
*s
, int dest
, int src
)
429 tcg_out_modrm(s
, OPC_MOVSBL
, dest
, src
);
432 static inline void tcg_out_ext16u(TCGContext
*s
, int dest
, int src
)
435 tcg_out_modrm(s
, OPC_MOVZWL
, dest
, src
);
438 static inline void tcg_out_ext16s(TCGContext
*s
, int dest
, int src
)
441 tcg_out_modrm(s
, OPC_MOVSWL
, dest
, src
);
444 static inline void tgen_arithi(TCGContext
*s
, int c
, int r0
,
447 /* ??? While INC is 2 bytes shorter than ADDL $1, they also induce
448 partial flags update stalls on Pentium4 and are not recommended
449 by current Intel optimization manuals. */
450 if (!cf
&& (c
== ARITH_ADD
|| c
== ARITH_SUB
) && (val
== 1 || val
== -1)) {
451 int opc
= ((c
== ARITH_ADD
) ^ (val
< 0) ? OPC_INC_r32
: OPC_DEC_r32
);
452 tcg_out_opc(s
, opc
+ r0
);
453 } else if (val
== (int8_t)val
) {
454 tcg_out_modrm(s
, OPC_ARITH_EvIb
, c
, r0
);
456 } else if (c
== ARITH_AND
&& val
== 0xffu
&& r0
< 4) {
457 tcg_out_ext8u(s
, r0
, r0
);
458 } else if (c
== ARITH_AND
&& val
== 0xffffu
) {
459 tcg_out_ext16u(s
, r0
, r0
);
461 tcg_out_modrm(s
, OPC_ARITH_EvIz
, c
, r0
);
466 static void tcg_out_addi(TCGContext
*s
, int reg
, tcg_target_long val
)
469 tgen_arithi(s
, ARITH_ADD
, reg
, val
, 0);
472 /* Use SMALL != 0 to force a short forward branch. */
473 static void tcg_out_jxx(TCGContext
*s
, int opc
, int label_index
, int small
)
476 TCGLabel
*l
= &s
->labels
[label_index
];
479 val
= l
->u
.value
- (tcg_target_long
)s
->code_ptr
;
481 if ((int8_t)val1
== val1
) {
483 tcg_out8(s
, OPC_JMP_short
);
485 tcg_out8(s
, OPC_JCC_short
+ opc
);
493 tcg_out8(s
, OPC_JMP_long
);
494 tcg_out32(s
, val
- 5);
496 tcg_out_opc(s
, OPC_JCC_long
+ opc
);
497 tcg_out32(s
, val
- 6);
502 tcg_out8(s
, OPC_JMP_short
);
504 tcg_out8(s
, OPC_JCC_short
+ opc
);
506 tcg_out_reloc(s
, s
->code_ptr
, R_386_PC8
, label_index
, -1);
510 tcg_out8(s
, OPC_JMP_long
);
512 tcg_out_opc(s
, OPC_JCC_long
+ opc
);
514 tcg_out_reloc(s
, s
->code_ptr
, R_386_PC32
, label_index
, -4);
519 static void tcg_out_cmp(TCGContext
*s
, TCGArg arg1
, TCGArg arg2
,
525 tcg_out_modrm(s
, OPC_TESTL
, arg1
, arg1
);
527 tgen_arithi(s
, ARITH_CMP
, arg1
, arg2
, 0);
530 tgen_arithr(s
, ARITH_CMP
, arg1
, arg2
);
534 static void tcg_out_brcond(TCGContext
*s
, TCGCond cond
,
535 TCGArg arg1
, TCGArg arg2
, int const_arg2
,
536 int label_index
, int small
)
538 tcg_out_cmp(s
, arg1
, arg2
, const_arg2
);
539 tcg_out_jxx(s
, tcg_cond_to_jcc
[cond
], label_index
, small
);
542 /* XXX: we implement it at the target level to avoid having to
543 handle cross basic blocks temporaries */
544 static void tcg_out_brcond2(TCGContext
*s
, const TCGArg
*args
,
545 const int *const_args
, int small
)
548 label_next
= gen_new_label();
551 tcg_out_brcond(s
, TCG_COND_NE
, args
[0], args
[2], const_args
[2],
553 tcg_out_brcond(s
, TCG_COND_EQ
, args
[1], args
[3], const_args
[3],
557 tcg_out_brcond(s
, TCG_COND_NE
, args
[0], args
[2], const_args
[2],
559 tcg_out_brcond(s
, TCG_COND_NE
, args
[1], args
[3], const_args
[3],
563 tcg_out_brcond(s
, TCG_COND_LT
, args
[1], args
[3], const_args
[3],
565 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
566 tcg_out_brcond(s
, TCG_COND_LTU
, args
[0], args
[2], const_args
[2],
570 tcg_out_brcond(s
, TCG_COND_LT
, args
[1], args
[3], const_args
[3],
572 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
573 tcg_out_brcond(s
, TCG_COND_LEU
, args
[0], args
[2], const_args
[2],
577 tcg_out_brcond(s
, TCG_COND_GT
, args
[1], args
[3], const_args
[3],
579 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
580 tcg_out_brcond(s
, TCG_COND_GTU
, args
[0], args
[2], const_args
[2],
584 tcg_out_brcond(s
, TCG_COND_GT
, args
[1], args
[3], const_args
[3],
586 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
587 tcg_out_brcond(s
, TCG_COND_GEU
, args
[0], args
[2], const_args
[2],
591 tcg_out_brcond(s
, TCG_COND_LTU
, args
[1], args
[3], const_args
[3],
593 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
594 tcg_out_brcond(s
, TCG_COND_LTU
, args
[0], args
[2], const_args
[2],
598 tcg_out_brcond(s
, TCG_COND_LTU
, args
[1], args
[3], const_args
[3],
600 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
601 tcg_out_brcond(s
, TCG_COND_LEU
, args
[0], args
[2], const_args
[2],
605 tcg_out_brcond(s
, TCG_COND_GTU
, args
[1], args
[3], const_args
[3],
607 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
608 tcg_out_brcond(s
, TCG_COND_GTU
, args
[0], args
[2], const_args
[2],
612 tcg_out_brcond(s
, TCG_COND_GTU
, args
[1], args
[3], const_args
[3],
614 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
615 tcg_out_brcond(s
, TCG_COND_GEU
, args
[0], args
[2], const_args
[2],
621 tcg_out_label(s
, label_next
, (tcg_target_long
)s
->code_ptr
);
624 static void tcg_out_setcond(TCGContext
*s
, TCGCond cond
, TCGArg dest
,
625 TCGArg arg1
, TCGArg arg2
, int const_arg2
)
627 tcg_out_cmp(s
, arg1
, arg2
, const_arg2
);
628 tcg_out_modrm(s
, OPC_SETCC
| tcg_cond_to_jcc
[cond
], 0, dest
);
629 tcg_out_ext8u(s
, dest
, dest
);
632 static void tcg_out_setcond2(TCGContext
*s
, const TCGArg
*args
,
633 const int *const_args
)
636 int label_true
, label_over
;
638 memcpy(new_args
, args
+1, 5*sizeof(TCGArg
));
640 if (args
[0] == args
[1] || args
[0] == args
[2]
641 || (!const_args
[3] && args
[0] == args
[3])
642 || (!const_args
[4] && args
[0] == args
[4])) {
643 /* When the destination overlaps with one of the argument
644 registers, don't do anything tricky. */
645 label_true
= gen_new_label();
646 label_over
= gen_new_label();
648 new_args
[5] = label_true
;
649 tcg_out_brcond2(s
, new_args
, const_args
+1, 1);
651 tcg_out_movi(s
, TCG_TYPE_I32
, args
[0], 0);
652 tcg_out_jxx(s
, JCC_JMP
, label_over
, 1);
653 tcg_out_label(s
, label_true
, (tcg_target_long
)s
->code_ptr
);
655 tcg_out_movi(s
, TCG_TYPE_I32
, args
[0], 1);
656 tcg_out_label(s
, label_over
, (tcg_target_long
)s
->code_ptr
);
658 /* When the destination does not overlap one of the arguments,
659 clear the destination first, jump if cond false, and emit an
660 increment in the true case. This results in smaller code. */
662 tcg_out_movi(s
, TCG_TYPE_I32
, args
[0], 0);
664 label_over
= gen_new_label();
665 new_args
[4] = tcg_invert_cond(new_args
[4]);
666 new_args
[5] = label_over
;
667 tcg_out_brcond2(s
, new_args
, const_args
+1, 1);
669 tgen_arithi(s
, ARITH_ADD
, args
[0], 1, 0);
670 tcg_out_label(s
, label_over
, (tcg_target_long
)s
->code_ptr
);
674 static void tcg_out_calli(TCGContext
*s
, tcg_target_long dest
)
676 tcg_out_opc(s
, OPC_CALL_Jz
);
677 tcg_out32(s
, dest
- (tcg_target_long
)s
->code_ptr
- 4);
680 #if defined(CONFIG_SOFTMMU)
682 #include "../../softmmu_defs.h"
684 static void *qemu_ld_helpers
[4] = {
691 static void *qemu_st_helpers
[4] = {
699 #ifndef CONFIG_USER_ONLY
703 /* XXX: qemu_ld and qemu_st could be modified to clobber only EDX and
704 EAX. It will be useful once fixed registers globals are less
706 static void tcg_out_qemu_ld(TCGContext
*s
, const TCGArg
*args
,
709 int addr_reg
, data_reg
, data_reg2
, r0
, r1
, mem_index
, s_bits
, bswap
;
710 #if defined(CONFIG_SOFTMMU)
711 uint8_t *label1_ptr
, *label2_ptr
;
713 #if TARGET_LONG_BITS == 64
714 #if defined(CONFIG_SOFTMMU)
726 #if TARGET_LONG_BITS == 64
735 #if defined(CONFIG_SOFTMMU)
736 tcg_out_mov(s
, r1
, addr_reg
);
737 tcg_out_mov(s
, r0
, addr_reg
);
739 tcg_out_shifti(s
, SHIFT_SHR
, r1
, TARGET_PAGE_BITS
- CPU_TLB_ENTRY_BITS
);
741 tgen_arithi(s
, ARITH_AND
, r0
, TARGET_PAGE_MASK
| ((1 << s_bits
) - 1), 0);
742 tgen_arithi(s
, ARITH_AND
, r1
, (CPU_TLB_SIZE
- 1) << CPU_TLB_ENTRY_BITS
, 0);
744 tcg_out_modrm_sib_offset(s
, OPC_LEA
, r1
, TCG_AREG0
, r1
, 0,
746 tlb_table
[mem_index
][0].addr_read
));
749 tcg_out_modrm_offset(s
, OPC_CMP_GvEv
, r0
, r1
, 0);
751 tcg_out_mov(s
, r0
, addr_reg
);
753 #if TARGET_LONG_BITS == 32
755 tcg_out8(s
, OPC_JCC_short
+ JCC_JE
);
756 label1_ptr
= s
->code_ptr
;
760 tcg_out8(s
, OPC_JCC_short
+ JCC_JNE
);
761 label3_ptr
= s
->code_ptr
;
764 /* cmp 4(r1), addr_reg2 */
765 tcg_out_modrm_offset(s
, OPC_CMP_GvEv
, addr_reg2
, r1
, 4);
768 tcg_out8(s
, OPC_JCC_short
+ JCC_JE
);
769 label1_ptr
= s
->code_ptr
;
773 *label3_ptr
= s
->code_ptr
- label3_ptr
- 1;
776 /* XXX: move that code at the end of the TB */
777 #if TARGET_LONG_BITS == 32
778 tcg_out_movi(s
, TCG_TYPE_I32
, TCG_REG_EDX
, mem_index
);
780 tcg_out_mov(s
, TCG_REG_EDX
, addr_reg2
);
781 tcg_out_movi(s
, TCG_TYPE_I32
, TCG_REG_ECX
, mem_index
);
783 tcg_out_calli(s
, (tcg_target_long
)qemu_ld_helpers
[s_bits
]);
787 tcg_out_ext8s(s
, data_reg
, TCG_REG_EAX
);
790 tcg_out_ext16s(s
, data_reg
, TCG_REG_EAX
);
793 tcg_out_ext8u(s
, data_reg
, TCG_REG_EAX
);
796 tcg_out_ext16u(s
, data_reg
, TCG_REG_EAX
);
800 tcg_out_mov(s
, data_reg
, TCG_REG_EAX
);
803 if (data_reg
== TCG_REG_EDX
) {
804 /* xchg %edx, %eax */
805 tcg_out_opc(s
, OPC_XCHG_ax_r32
+ TCG_REG_EDX
);
806 tcg_out_mov(s
, data_reg2
, TCG_REG_EAX
);
808 tcg_out_mov(s
, data_reg
, TCG_REG_EAX
);
809 tcg_out_mov(s
, data_reg2
, TCG_REG_EDX
);
815 tcg_out8(s
, OPC_JMP_short
);
816 label2_ptr
= s
->code_ptr
;
820 *label1_ptr
= s
->code_ptr
- label1_ptr
- 1;
823 tcg_out_modrm_offset(s
, OPC_ADD_GvEv
, r0
, r1
,
824 offsetof(CPUTLBEntry
, addend
) -
825 offsetof(CPUTLBEntry
, addr_read
));
830 #ifdef TARGET_WORDS_BIGENDIAN
838 tcg_out_modrm_offset(s
, OPC_MOVZBL
, data_reg
, r0
, GUEST_BASE
);
842 tcg_out_modrm_offset(s
, OPC_MOVSBL
, data_reg
, r0
, GUEST_BASE
);
846 tcg_out_modrm_offset(s
, OPC_MOVZWL
, data_reg
, r0
, GUEST_BASE
);
848 tcg_out_rolw_8(s
, data_reg
);
853 tcg_out_modrm_offset(s
, OPC_MOVSWL
, data_reg
, r0
, GUEST_BASE
);
855 tcg_out_rolw_8(s
, data_reg
);
857 /* movswl data_reg, data_reg */
858 tcg_out_modrm(s
, OPC_MOVSWL
, data_reg
, data_reg
);
862 tcg_out_ld(s
, TCG_TYPE_I32
, data_reg
, r0
, GUEST_BASE
);
864 tcg_out_bswap32(s
, data_reg
);
870 data_reg
= data_reg2
;
873 if (r0
!= data_reg
) {
874 tcg_out_ld(s
, TCG_TYPE_I32
, data_reg
, r0
, GUEST_BASE
);
875 tcg_out_ld(s
, TCG_TYPE_I32
, data_reg2
, r0
, GUEST_BASE
+ 4);
877 tcg_out_ld(s
, TCG_TYPE_I32
, data_reg2
, r0
, GUEST_BASE
+ 4);
878 tcg_out_ld(s
, TCG_TYPE_I32
, data_reg
, r0
, GUEST_BASE
);
881 tcg_out_bswap32(s
, data_reg
);
882 tcg_out_bswap32(s
, data_reg2
);
889 #if defined(CONFIG_SOFTMMU)
891 *label2_ptr
= s
->code_ptr
- label2_ptr
- 1;
896 static void tcg_out_qemu_st(TCGContext
*s
, const TCGArg
*args
,
899 int addr_reg
, data_reg
, data_reg2
, r0
, r1
, mem_index
, s_bits
, bswap
;
900 #if defined(CONFIG_SOFTMMU)
902 uint8_t *label1_ptr
, *label2_ptr
;
904 #if TARGET_LONG_BITS == 64
905 #if defined(CONFIG_SOFTMMU)
917 #if TARGET_LONG_BITS == 64
927 #if defined(CONFIG_SOFTMMU)
928 tcg_out_mov(s
, r1
, addr_reg
);
929 tcg_out_mov(s
, r0
, addr_reg
);
931 tcg_out_shifti(s
, SHIFT_SHR
, r1
, TARGET_PAGE_BITS
- CPU_TLB_ENTRY_BITS
);
933 tgen_arithi(s
, ARITH_AND
, r0
, TARGET_PAGE_MASK
| ((1 << s_bits
) - 1), 0);
934 tgen_arithi(s
, ARITH_AND
, r1
, (CPU_TLB_SIZE
- 1) << CPU_TLB_ENTRY_BITS
, 0);
936 tcg_out_modrm_sib_offset(s
, OPC_LEA
, r1
, TCG_AREG0
, r1
, 0,
938 tlb_table
[mem_index
][0].addr_write
));
941 tcg_out_modrm_offset(s
, OPC_CMP_GvEv
, r0
, r1
, 0);
943 tcg_out_mov(s
, r0
, addr_reg
);
945 #if TARGET_LONG_BITS == 32
947 tcg_out8(s
, OPC_JCC_short
+ JCC_JE
);
948 label1_ptr
= s
->code_ptr
;
952 tcg_out8(s
, OPC_JCC_short
+ JCC_JNE
);
953 label3_ptr
= s
->code_ptr
;
956 /* cmp 4(r1), addr_reg2 */
957 tcg_out_modrm_offset(s
, OPC_CMP_GvEv
, addr_reg2
, r1
, 4);
960 tcg_out8(s
, OPC_JCC_short
+ JCC_JE
);
961 label1_ptr
= s
->code_ptr
;
965 *label3_ptr
= s
->code_ptr
- label3_ptr
- 1;
968 /* XXX: move that code at the end of the TB */
969 #if TARGET_LONG_BITS == 32
971 tcg_out_mov(s
, TCG_REG_EDX
, data_reg
);
972 tcg_out_mov(s
, TCG_REG_ECX
, data_reg2
);
973 tcg_out_pushi(s
, mem_index
);
978 tcg_out_ext8u(s
, TCG_REG_EDX
, data_reg
);
981 tcg_out_ext16u(s
, TCG_REG_EDX
, data_reg
);
984 tcg_out_mov(s
, TCG_REG_EDX
, data_reg
);
987 tcg_out_movi(s
, TCG_TYPE_I32
, TCG_REG_ECX
, mem_index
);
992 tcg_out_mov(s
, TCG_REG_EDX
, addr_reg2
);
993 tcg_out_pushi(s
, mem_index
);
994 tcg_out_push(s
, data_reg2
);
995 tcg_out_push(s
, data_reg
);
998 tcg_out_mov(s
, TCG_REG_EDX
, addr_reg2
);
1001 tcg_out_ext8u(s
, TCG_REG_ECX
, data_reg
);
1004 tcg_out_ext16u(s
, TCG_REG_ECX
, data_reg
);
1007 tcg_out_mov(s
, TCG_REG_ECX
, data_reg
);
1010 tcg_out_pushi(s
, mem_index
);
1015 tcg_out_calli(s
, (tcg_target_long
)qemu_st_helpers
[s_bits
]);
1017 if (stack_adjust
== 4) {
1018 /* Pop and discard. This is 2 bytes smaller than the add. */
1019 tcg_out_pop(s
, TCG_REG_ECX
);
1020 } else if (stack_adjust
!= 0) {
1021 tcg_out_addi(s
, TCG_REG_ESP
, stack_adjust
);
1025 tcg_out8(s
, OPC_JMP_short
);
1026 label2_ptr
= s
->code_ptr
;
1030 *label1_ptr
= s
->code_ptr
- label1_ptr
- 1;
1033 tcg_out_modrm_offset(s
, OPC_ADD_GvEv
, r0
, r1
,
1034 offsetof(CPUTLBEntry
, addend
) -
1035 offsetof(CPUTLBEntry
, addr_write
));
1040 #ifdef TARGET_WORDS_BIGENDIAN
1047 tcg_out_modrm_offset(s
, OPC_MOVB_EvGv
, data_reg
, r0
, GUEST_BASE
);
1051 tcg_out_mov(s
, r1
, data_reg
);
1052 tcg_out_rolw_8(s
, r1
);
1057 tcg_out_modrm_offset(s
, OPC_MOVL_EvGv
, data_reg
, r0
, GUEST_BASE
);
1061 tcg_out_mov(s
, r1
, data_reg
);
1062 tcg_out_bswap32(s
, r1
);
1065 tcg_out_st(s
, TCG_TYPE_I32
, data_reg
, r0
, GUEST_BASE
);
1069 tcg_out_mov(s
, r1
, data_reg2
);
1070 tcg_out_bswap32(s
, r1
);
1071 tcg_out_st(s
, TCG_TYPE_I32
, r1
, r0
, GUEST_BASE
);
1072 tcg_out_mov(s
, r1
, data_reg
);
1073 tcg_out_bswap32(s
, r1
);
1074 tcg_out_st(s
, TCG_TYPE_I32
, r1
, r0
, GUEST_BASE
+ 4);
1076 tcg_out_st(s
, TCG_TYPE_I32
, data_reg
, r0
, GUEST_BASE
);
1077 tcg_out_st(s
, TCG_TYPE_I32
, data_reg2
, r0
, GUEST_BASE
+ 4);
1084 #if defined(CONFIG_SOFTMMU)
1086 *label2_ptr
= s
->code_ptr
- label2_ptr
- 1;
1090 static inline void tcg_out_op(TCGContext
*s
, TCGOpcode opc
,
1091 const TCGArg
*args
, const int *const_args
)
1096 case INDEX_op_exit_tb
:
1097 tcg_out_movi(s
, TCG_TYPE_I32
, TCG_REG_EAX
, args
[0]);
1098 tcg_out8(s
, OPC_JMP_long
); /* jmp tb_ret_addr */
1099 tcg_out32(s
, tb_ret_addr
- s
->code_ptr
- 4);
1101 case INDEX_op_goto_tb
:
1102 if (s
->tb_jmp_offset
) {
1103 /* direct jump method */
1104 tcg_out8(s
, OPC_JMP_long
); /* jmp im */
1105 s
->tb_jmp_offset
[args
[0]] = s
->code_ptr
- s
->code_buf
;
1108 /* indirect jump method */
1109 tcg_out_modrm_offset(s
, OPC_GRP5
, EXT5_JMPN_Ev
, -1,
1110 (tcg_target_long
)(s
->tb_next
+ args
[0]));
1112 s
->tb_next_offset
[args
[0]] = s
->code_ptr
- s
->code_buf
;
1115 if (const_args
[0]) {
1116 tcg_out_calli(s
, args
[0]);
1119 tcg_out_modrm(s
, OPC_GRP5
, EXT5_CALLN_Ev
, args
[0]);
1123 if (const_args
[0]) {
1124 tcg_out8(s
, OPC_JMP_long
);
1125 tcg_out32(s
, args
[0] - (tcg_target_long
)s
->code_ptr
- 4);
1128 tcg_out_modrm(s
, OPC_GRP5
, EXT5_JMPN_Ev
, args
[0]);
1132 tcg_out_jxx(s
, JCC_JMP
, args
[0], 0);
1134 case INDEX_op_movi_i32
:
1135 tcg_out_movi(s
, TCG_TYPE_I32
, args
[0], args
[1]);
1137 case INDEX_op_ld8u_i32
:
1139 tcg_out_modrm_offset(s
, OPC_MOVZBL
, args
[0], args
[1], args
[2]);
1141 case INDEX_op_ld8s_i32
:
1143 tcg_out_modrm_offset(s
, OPC_MOVSBL
, args
[0], args
[1], args
[2]);
1145 case INDEX_op_ld16u_i32
:
1147 tcg_out_modrm_offset(s
, OPC_MOVZWL
, args
[0], args
[1], args
[2]);
1149 case INDEX_op_ld16s_i32
:
1151 tcg_out_modrm_offset(s
, OPC_MOVSWL
, args
[0], args
[1], args
[2]);
1153 case INDEX_op_ld_i32
:
1154 tcg_out_ld(s
, TCG_TYPE_I32
, args
[0], args
[1], args
[2]);
1156 case INDEX_op_st8_i32
:
1158 tcg_out_modrm_offset(s
, OPC_MOVB_EvGv
, args
[0], args
[1], args
[2]);
1160 case INDEX_op_st16_i32
:
1163 tcg_out_modrm_offset(s
, OPC_MOVL_EvGv
, args
[0], args
[1], args
[2]);
1165 case INDEX_op_st_i32
:
1166 tcg_out_st(s
, TCG_TYPE_I32
, args
[0], args
[1], args
[2]);
1168 case INDEX_op_add_i32
:
1169 /* For 3-operand addition, use LEA. */
1170 if (args
[0] != args
[1]) {
1171 TCGArg a0
= args
[0], a1
= args
[1], a2
= args
[2], c3
= 0;
1173 if (const_args
[2]) {
1175 } else if (a0
== a2
) {
1176 /* Watch out for dest = src + dest, since we've removed
1177 the matching constraint on the add. */
1178 tgen_arithr(s
, ARITH_ADD
, a0
, a1
);
1182 tcg_out_modrm_sib_offset(s
, OPC_LEA
, a0
, a1
, a2
, 0, c3
);
1187 case INDEX_op_sub_i32
:
1190 case INDEX_op_and_i32
:
1193 case INDEX_op_or_i32
:
1196 case INDEX_op_xor_i32
:
1200 if (const_args
[2]) {
1201 tgen_arithi(s
, c
, args
[0], args
[2], 0);
1203 tgen_arithr(s
, c
, args
[0], args
[2]);
1206 case INDEX_op_mul_i32
:
1207 if (const_args
[2]) {
1210 if (val
== (int8_t)val
) {
1211 tcg_out_modrm(s
, OPC_IMUL_GvEvIb
, args
[0], args
[0]);
1214 tcg_out_modrm(s
, OPC_IMUL_GvEvIz
, args
[0], args
[0]);
1218 tcg_out_modrm(s
, OPC_IMUL_GvEv
, args
[0], args
[2]);
1221 case INDEX_op_mulu2_i32
:
1222 tcg_out_modrm(s
, OPC_GRP3_Ev
, EXT3_MUL
, args
[3]);
1224 case INDEX_op_div2_i32
:
1225 tcg_out_modrm(s
, OPC_GRP3_Ev
, EXT3_IDIV
, args
[4]);
1227 case INDEX_op_divu2_i32
:
1228 tcg_out_modrm(s
, OPC_GRP3_Ev
, EXT3_DIV
, args
[4]);
1230 case INDEX_op_shl_i32
:
1233 if (const_args
[2]) {
1234 tcg_out_shifti(s
, c
, args
[0], args
[2]);
1236 tcg_out_modrm(s
, OPC_SHIFT_cl
, c
, args
[0]);
1239 case INDEX_op_shr_i32
:
1242 case INDEX_op_sar_i32
:
1245 case INDEX_op_rotl_i32
:
1248 case INDEX_op_rotr_i32
:
1252 case INDEX_op_add2_i32
:
1253 if (const_args
[4]) {
1254 tgen_arithi(s
, ARITH_ADD
, args
[0], args
[4], 1);
1256 tgen_arithr(s
, ARITH_ADD
, args
[0], args
[4]);
1258 if (const_args
[5]) {
1259 tgen_arithi(s
, ARITH_ADC
, args
[1], args
[5], 1);
1261 tgen_arithr(s
, ARITH_ADC
, args
[1], args
[5]);
1264 case INDEX_op_sub2_i32
:
1265 if (const_args
[4]) {
1266 tgen_arithi(s
, ARITH_SUB
, args
[0], args
[4], 1);
1268 tgen_arithr(s
, ARITH_SUB
, args
[0], args
[4]);
1270 if (const_args
[5]) {
1271 tgen_arithi(s
, ARITH_SBB
, args
[1], args
[5], 1);
1273 tgen_arithr(s
, ARITH_SBB
, args
[1], args
[5]);
1276 case INDEX_op_brcond_i32
:
1277 tcg_out_brcond(s
, args
[2], args
[0], args
[1], const_args
[1],
1280 case INDEX_op_brcond2_i32
:
1281 tcg_out_brcond2(s
, args
, const_args
, 0);
1284 case INDEX_op_bswap16_i32
:
1285 tcg_out_rolw_8(s
, args
[0]);
1287 case INDEX_op_bswap32_i32
:
1288 tcg_out_bswap32(s
, args
[0]);
1291 case INDEX_op_neg_i32
:
1292 tcg_out_modrm(s
, OPC_GRP3_Ev
, EXT3_NEG
, args
[0]);
1295 case INDEX_op_not_i32
:
1296 tcg_out_modrm(s
, OPC_GRP3_Ev
, EXT3_NOT
, args
[0]);
1299 case INDEX_op_ext8s_i32
:
1300 tcg_out_ext8s(s
, args
[0], args
[1]);
1302 case INDEX_op_ext16s_i32
:
1303 tcg_out_ext16s(s
, args
[0], args
[1]);
1305 case INDEX_op_ext8u_i32
:
1306 tcg_out_ext8u(s
, args
[0], args
[1]);
1308 case INDEX_op_ext16u_i32
:
1309 tcg_out_ext16u(s
, args
[0], args
[1]);
1312 case INDEX_op_setcond_i32
:
1313 tcg_out_setcond(s
, args
[3], args
[0], args
[1], args
[2], const_args
[2]);
1315 case INDEX_op_setcond2_i32
:
1316 tcg_out_setcond2(s
, args
, const_args
);
1319 case INDEX_op_qemu_ld8u
:
1320 tcg_out_qemu_ld(s
, args
, 0);
1322 case INDEX_op_qemu_ld8s
:
1323 tcg_out_qemu_ld(s
, args
, 0 | 4);
1325 case INDEX_op_qemu_ld16u
:
1326 tcg_out_qemu_ld(s
, args
, 1);
1328 case INDEX_op_qemu_ld16s
:
1329 tcg_out_qemu_ld(s
, args
, 1 | 4);
1331 case INDEX_op_qemu_ld32
:
1332 tcg_out_qemu_ld(s
, args
, 2);
1334 case INDEX_op_qemu_ld64
:
1335 tcg_out_qemu_ld(s
, args
, 3);
1338 case INDEX_op_qemu_st8
:
1339 tcg_out_qemu_st(s
, args
, 0);
1341 case INDEX_op_qemu_st16
:
1342 tcg_out_qemu_st(s
, args
, 1);
1344 case INDEX_op_qemu_st32
:
1345 tcg_out_qemu_st(s
, args
, 2);
1347 case INDEX_op_qemu_st64
:
1348 tcg_out_qemu_st(s
, args
, 3);
1356 static const TCGTargetOpDef x86_op_defs
[] = {
1357 { INDEX_op_exit_tb
, { } },
1358 { INDEX_op_goto_tb
, { } },
1359 { INDEX_op_call
, { "ri" } },
1360 { INDEX_op_jmp
, { "ri" } },
1361 { INDEX_op_br
, { } },
1362 { INDEX_op_mov_i32
, { "r", "r" } },
1363 { INDEX_op_movi_i32
, { "r" } },
1364 { INDEX_op_ld8u_i32
, { "r", "r" } },
1365 { INDEX_op_ld8s_i32
, { "r", "r" } },
1366 { INDEX_op_ld16u_i32
, { "r", "r" } },
1367 { INDEX_op_ld16s_i32
, { "r", "r" } },
1368 { INDEX_op_ld_i32
, { "r", "r" } },
1369 { INDEX_op_st8_i32
, { "q", "r" } },
1370 { INDEX_op_st16_i32
, { "r", "r" } },
1371 { INDEX_op_st_i32
, { "r", "r" } },
1373 { INDEX_op_add_i32
, { "r", "r", "ri" } },
1374 { INDEX_op_sub_i32
, { "r", "0", "ri" } },
1375 { INDEX_op_mul_i32
, { "r", "0", "ri" } },
1376 { INDEX_op_mulu2_i32
, { "a", "d", "a", "r" } },
1377 { INDEX_op_div2_i32
, { "a", "d", "0", "1", "r" } },
1378 { INDEX_op_divu2_i32
, { "a", "d", "0", "1", "r" } },
1379 { INDEX_op_and_i32
, { "r", "0", "ri" } },
1380 { INDEX_op_or_i32
, { "r", "0", "ri" } },
1381 { INDEX_op_xor_i32
, { "r", "0", "ri" } },
1383 { INDEX_op_shl_i32
, { "r", "0", "ci" } },
1384 { INDEX_op_shr_i32
, { "r", "0", "ci" } },
1385 { INDEX_op_sar_i32
, { "r", "0", "ci" } },
1386 { INDEX_op_rotl_i32
, { "r", "0", "ci" } },
1387 { INDEX_op_rotr_i32
, { "r", "0", "ci" } },
1389 { INDEX_op_brcond_i32
, { "r", "ri" } },
1391 { INDEX_op_add2_i32
, { "r", "r", "0", "1", "ri", "ri" } },
1392 { INDEX_op_sub2_i32
, { "r", "r", "0", "1", "ri", "ri" } },
1393 { INDEX_op_brcond2_i32
, { "r", "r", "ri", "ri" } },
1395 { INDEX_op_bswap16_i32
, { "r", "0" } },
1396 { INDEX_op_bswap32_i32
, { "r", "0" } },
1398 { INDEX_op_neg_i32
, { "r", "0" } },
1400 { INDEX_op_not_i32
, { "r", "0" } },
1402 { INDEX_op_ext8s_i32
, { "r", "q" } },
1403 { INDEX_op_ext16s_i32
, { "r", "r" } },
1404 { INDEX_op_ext8u_i32
, { "r", "q" } },
1405 { INDEX_op_ext16u_i32
, { "r", "r" } },
1407 { INDEX_op_setcond_i32
, { "q", "r", "ri" } },
1408 { INDEX_op_setcond2_i32
, { "r", "r", "r", "ri", "ri" } },
1410 #if TARGET_LONG_BITS == 32
1411 { INDEX_op_qemu_ld8u
, { "r", "L" } },
1412 { INDEX_op_qemu_ld8s
, { "r", "L" } },
1413 { INDEX_op_qemu_ld16u
, { "r", "L" } },
1414 { INDEX_op_qemu_ld16s
, { "r", "L" } },
1415 { INDEX_op_qemu_ld32
, { "r", "L" } },
1416 { INDEX_op_qemu_ld64
, { "r", "r", "L" } },
1418 { INDEX_op_qemu_st8
, { "cb", "L" } },
1419 { INDEX_op_qemu_st16
, { "L", "L" } },
1420 { INDEX_op_qemu_st32
, { "L", "L" } },
1421 { INDEX_op_qemu_st64
, { "L", "L", "L" } },
1423 { INDEX_op_qemu_ld8u
, { "r", "L", "L" } },
1424 { INDEX_op_qemu_ld8s
, { "r", "L", "L" } },
1425 { INDEX_op_qemu_ld16u
, { "r", "L", "L" } },
1426 { INDEX_op_qemu_ld16s
, { "r", "L", "L" } },
1427 { INDEX_op_qemu_ld32
, { "r", "L", "L" } },
1428 { INDEX_op_qemu_ld64
, { "r", "r", "L", "L" } },
1430 { INDEX_op_qemu_st8
, { "cb", "L", "L" } },
1431 { INDEX_op_qemu_st16
, { "L", "L", "L" } },
1432 { INDEX_op_qemu_st32
, { "L", "L", "L" } },
1433 { INDEX_op_qemu_st64
, { "L", "L", "L", "L" } },
1438 static int tcg_target_callee_save_regs
[] = {
1439 /* TCG_REG_EBP, */ /* currently used for the global env, so no
1446 /* Generate global QEMU prologue and epilogue code */
1447 void tcg_target_qemu_prologue(TCGContext
*s
)
1449 int i
, frame_size
, push_size
, stack_addend
;
1452 /* save all callee saved registers */
1453 for(i
= 0; i
< ARRAY_SIZE(tcg_target_callee_save_regs
); i
++) {
1454 tcg_out_push(s
, tcg_target_callee_save_regs
[i
]);
1456 /* reserve some stack space */
1457 push_size
= 4 + ARRAY_SIZE(tcg_target_callee_save_regs
) * 4;
1458 frame_size
= push_size
+ TCG_STATIC_CALL_ARGS_SIZE
;
1459 frame_size
= (frame_size
+ TCG_TARGET_STACK_ALIGN
- 1) &
1460 ~(TCG_TARGET_STACK_ALIGN
- 1);
1461 stack_addend
= frame_size
- push_size
;
1462 tcg_out_addi(s
, TCG_REG_ESP
, -stack_addend
);
1464 tcg_out_modrm(s
, OPC_GRP5
, EXT5_JMPN_Ev
, TCG_REG_EAX
); /* jmp *%eax */
1467 tb_ret_addr
= s
->code_ptr
;
1468 tcg_out_addi(s
, TCG_REG_ESP
, stack_addend
);
1469 for(i
= ARRAY_SIZE(tcg_target_callee_save_regs
) - 1; i
>= 0; i
--) {
1470 tcg_out_pop(s
, tcg_target_callee_save_regs
[i
]);
1472 tcg_out_opc(s
, OPC_RET
);
1475 void tcg_target_init(TCGContext
*s
)
1477 #if !defined(CONFIG_USER_ONLY)
1479 if ((1 << CPU_TLB_ENTRY_BITS
) != sizeof(CPUTLBEntry
))
1483 tcg_regset_set32(tcg_target_available_regs
[TCG_TYPE_I32
], 0, 0xff);
1485 tcg_regset_clear(tcg_target_call_clobber_regs
);
1486 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_EAX
);
1487 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_EDX
);
1488 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_ECX
);
1490 tcg_regset_clear(s
->reserved_regs
);
1491 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_ESP
);
1493 tcg_add_target_add_op_defs(x86_op_defs
);