2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 static const char * const tcg_target_reg_names
[TCG_TARGET_NB_REGS
] = {
27 #if TCG_TARGET_REG_BITS == 64
28 "%rax", "%rcx", "%rdx", "%rbx", "%rsp", "%rbp", "%rsi", "%rdi",
29 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
31 "%eax", "%ecx", "%edx", "%ebx", "%esp", "%ebp", "%esi", "%edi",
36 static const int tcg_target_reg_alloc_order
[] = {
37 #if TCG_TARGET_REG_BITS == 64
64 static const int tcg_target_call_iarg_regs
[] = {
65 #if TCG_TARGET_REG_BITS == 64
79 static const int tcg_target_call_oarg_regs
[] = {
81 #if TCG_TARGET_REG_BITS == 32
86 static uint8_t *tb_ret_addr
;
88 static void patch_reloc(uint8_t *code_ptr
, int type
,
89 tcg_target_long value
, tcg_target_long addend
)
94 value
-= (uintptr_t)code_ptr
;
95 if (value
!= (int32_t)value
) {
98 *(uint32_t *)code_ptr
= value
;
101 value
-= (uintptr_t)code_ptr
;
102 if (value
!= (int8_t)value
) {
105 *(uint8_t *)code_ptr
= value
;
112 /* maximum number of register used for input function arguments */
113 static inline int tcg_target_get_call_iarg_regs_count(int flags
)
115 if (TCG_TARGET_REG_BITS
== 64) {
119 flags
&= TCG_CALL_TYPE_MASK
;
121 case TCG_CALL_TYPE_STD
:
123 case TCG_CALL_TYPE_REGPARM_1
:
124 case TCG_CALL_TYPE_REGPARM_2
:
125 case TCG_CALL_TYPE_REGPARM
:
126 return flags
- TCG_CALL_TYPE_REGPARM_1
+ 1;
132 /* parse target specific constraints */
133 static int target_parse_constraint(TCGArgConstraint
*ct
, const char **pct_str
)
140 ct
->ct
|= TCG_CT_REG
;
141 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_EAX
);
144 ct
->ct
|= TCG_CT_REG
;
145 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_EBX
);
148 ct
->ct
|= TCG_CT_REG
;
149 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_ECX
);
152 ct
->ct
|= TCG_CT_REG
;
153 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_EDX
);
156 ct
->ct
|= TCG_CT_REG
;
157 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_ESI
);
160 ct
->ct
|= TCG_CT_REG
;
161 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_EDI
);
164 ct
->ct
|= TCG_CT_REG
;
165 if (TCG_TARGET_REG_BITS
== 64) {
166 tcg_regset_set32(ct
->u
.regs
, 0, 0xffff);
168 tcg_regset_set32(ct
->u
.regs
, 0, 0xf);
172 ct
->ct
|= TCG_CT_REG
;
173 tcg_regset_set32(ct
->u
.regs
, 0, 0xf);
176 ct
->ct
|= TCG_CT_REG
;
177 if (TCG_TARGET_REG_BITS
== 64) {
178 tcg_regset_set32(ct
->u
.regs
, 0, 0xffff);
180 tcg_regset_set32(ct
->u
.regs
, 0, 0xff);
184 /* qemu_ld/st address constraint */
186 ct
->ct
|= TCG_CT_REG
;
187 if (TCG_TARGET_REG_BITS
== 64) {
188 tcg_regset_set32(ct
->u
.regs
, 0, 0xffff);
189 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_RSI
);
190 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_RDI
);
192 tcg_regset_set32(ct
->u
.regs
, 0, 0xff);
193 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_EAX
);
194 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_EDX
);
199 ct
->ct
|= TCG_CT_CONST_S32
;
202 ct
->ct
|= TCG_CT_CONST_U32
;
213 /* test if a constant matches the constraint */
214 static inline int tcg_target_const_match(tcg_target_long val
,
215 const TCGArgConstraint
*arg_ct
)
218 if (ct
& TCG_CT_CONST
) {
221 if ((ct
& TCG_CT_CONST_S32
) && val
== (int32_t)val
) {
224 if ((ct
& TCG_CT_CONST_U32
) && val
== (uint32_t)val
) {
230 #if TCG_TARGET_REG_BITS == 64
231 # define LOWREGMASK(x) ((x) & 7)
233 # define LOWREGMASK(x) (x)
236 #define P_EXT 0x100 /* 0x0f opcode prefix */
237 #define P_DATA16 0x200 /* 0x66 opcode prefix */
238 #if TCG_TARGET_REG_BITS == 64
239 # define P_ADDR32 0x400 /* 0x67 opcode prefix */
240 # define P_REXW 0x800 /* Set REX.W = 1 */
241 # define P_REXB_R 0x1000 /* REG field as byte register */
242 # define P_REXB_RM 0x2000 /* R/M field as byte register */
250 #define OPC_ARITH_EvIz (0x81)
251 #define OPC_ARITH_EvIb (0x83)
252 #define OPC_ARITH_GvEv (0x03) /* ... plus (ARITH_FOO << 3) */
253 #define OPC_ADD_GvEv (OPC_ARITH_GvEv | (ARITH_ADD << 3))
254 #define OPC_BSWAP (0xc8 | P_EXT)
255 #define OPC_CALL_Jz (0xe8)
256 #define OPC_CMP_GvEv (OPC_ARITH_GvEv | (ARITH_CMP << 3))
257 #define OPC_DEC_r32 (0x48)
258 #define OPC_IMUL_GvEv (0xaf | P_EXT)
259 #define OPC_IMUL_GvEvIb (0x6b)
260 #define OPC_IMUL_GvEvIz (0x69)
261 #define OPC_INC_r32 (0x40)
262 #define OPC_JCC_long (0x80 | P_EXT) /* ... plus condition code */
263 #define OPC_JCC_short (0x70) /* ... plus condition code */
264 #define OPC_JMP_long (0xe9)
265 #define OPC_JMP_short (0xeb)
266 #define OPC_LEA (0x8d)
267 #define OPC_MOVB_EvGv (0x88) /* stores, more or less */
268 #define OPC_MOVL_EvGv (0x89) /* stores, more or less */
269 #define OPC_MOVL_GvEv (0x8b) /* loads, more or less */
270 #define OPC_MOVL_EvIz (0xc7)
271 #define OPC_MOVL_Iv (0xb8)
272 #define OPC_MOVSBL (0xbe | P_EXT)
273 #define OPC_MOVSWL (0xbf | P_EXT)
274 #define OPC_MOVSLQ (0x63 | P_REXW)
275 #define OPC_MOVZBL (0xb6 | P_EXT)
276 #define OPC_MOVZWL (0xb7 | P_EXT)
277 #define OPC_POP_r32 (0x58)
278 #define OPC_PUSH_r32 (0x50)
279 #define OPC_PUSH_Iv (0x68)
280 #define OPC_PUSH_Ib (0x6a)
281 #define OPC_RET (0xc3)
282 #define OPC_SETCC (0x90 | P_EXT | P_REXB_RM) /* ... plus cc */
283 #define OPC_SHIFT_1 (0xd1)
284 #define OPC_SHIFT_Ib (0xc1)
285 #define OPC_SHIFT_cl (0xd3)
286 #define OPC_TESTL (0x85)
287 #define OPC_XCHG_ax_r32 (0x90)
289 #define OPC_GRP3_Ev (0xf7)
290 #define OPC_GRP5 (0xff)
292 /* Group 1 opcode extensions for 0x80-0x83.
293 These are also used as modifiers for OPC_ARITH. */
303 /* Group 2 opcode extensions for 0xc0, 0xc1, 0xd0-0xd3. */
310 /* Group 3 opcode extensions for 0xf6, 0xf7. To be used with OPC_GRP3. */
318 /* Group 5 opcode extensions for 0xff. To be used with OPC_GRP5. */
319 #define EXT5_INC_Ev 0
320 #define EXT5_DEC_Ev 1
321 #define EXT5_CALLN_Ev 2
322 #define EXT5_JMPN_Ev 4
324 /* Condition codes to be added to OPC_JCC_{long,short}. */
343 static const uint8_t tcg_cond_to_jcc
[10] = {
344 [TCG_COND_EQ
] = JCC_JE
,
345 [TCG_COND_NE
] = JCC_JNE
,
346 [TCG_COND_LT
] = JCC_JL
,
347 [TCG_COND_GE
] = JCC_JGE
,
348 [TCG_COND_LE
] = JCC_JLE
,
349 [TCG_COND_GT
] = JCC_JG
,
350 [TCG_COND_LTU
] = JCC_JB
,
351 [TCG_COND_GEU
] = JCC_JAE
,
352 [TCG_COND_LEU
] = JCC_JBE
,
353 [TCG_COND_GTU
] = JCC_JA
,
356 #if TCG_TARGET_REG_BITS == 64
357 static void tcg_out_opc(TCGContext
*s
, int opc
, int r
, int rm
, int x
)
361 if (opc
& P_DATA16
) {
362 /* We should never be asking for both 16 and 64-bit operation. */
363 assert((opc
& P_REXW
) == 0);
366 if (opc
& P_ADDR32
) {
371 rex
|= (opc
& P_REXW
) >> 8; /* REX.W */
372 rex
|= (r
& 8) >> 1; /* REX.R */
373 rex
|= (x
& 8) >> 2; /* REX.X */
374 rex
|= (rm
& 8) >> 3; /* REX.B */
376 /* P_REXB_{R,RM} indicates that the given register is the low byte.
377 For %[abcd]l we need no REX prefix, but for %{si,di,bp,sp}l we do,
378 as otherwise the encoding indicates %[abcd]h. Note that the values
379 that are ORed in merely indicate that the REX byte must be present;
380 those bits get discarded in output. */
381 rex
|= opc
& (r
>= 4 ? P_REXB_R
: 0);
382 rex
|= opc
& (rm
>= 4 ? P_REXB_RM
: 0);
385 tcg_out8(s
, (uint8_t)(rex
| 0x40));
394 static void tcg_out_opc(TCGContext
*s
, int opc
)
396 if (opc
& P_DATA16
) {
404 /* Discard the register arguments to tcg_out_opc early, so as not to penalize
405 the 32-bit compilation paths. This method works with all versions of gcc,
406 whereas relying on optimization may not be able to exclude them. */
407 #define tcg_out_opc(s, opc, r, rm, x) (tcg_out_opc)(s, opc)
410 static void tcg_out_modrm(TCGContext
*s
, int opc
, int r
, int rm
)
412 tcg_out_opc(s
, opc
, r
, rm
, 0);
413 tcg_out8(s
, 0xc0 | (LOWREGMASK(r
) << 3) | LOWREGMASK(rm
));
416 /* Output an opcode with a full "rm + (index<<shift) + offset" address mode.
417 We handle either RM and INDEX missing with a negative value. In 64-bit
418 mode for absolute addresses, ~RM is the size of the immediate operand
419 that will follow the instruction. */
421 static void tcg_out_modrm_sib_offset(TCGContext
*s
, int opc
, int r
, int rm
,
422 int index
, int shift
,
423 tcg_target_long offset
)
427 if (index
< 0 && rm
< 0) {
428 if (TCG_TARGET_REG_BITS
== 64) {
429 /* Try for a rip-relative addressing mode. This has replaced
430 the 32-bit-mode absolute addressing encoding. */
431 tcg_target_long pc
= (tcg_target_long
)s
->code_ptr
+ 5 + ~rm
;
432 tcg_target_long disp
= offset
- pc
;
433 if (disp
== (int32_t)disp
) {
434 tcg_out_opc(s
, opc
, r
, 0, 0);
435 tcg_out8(s
, (LOWREGMASK(r
) << 3) | 5);
440 /* Try for an absolute address encoding. This requires the
441 use of the MODRM+SIB encoding and is therefore larger than
442 rip-relative addressing. */
443 if (offset
== (int32_t)offset
) {
444 tcg_out_opc(s
, opc
, r
, 0, 0);
445 tcg_out8(s
, (LOWREGMASK(r
) << 3) | 4);
446 tcg_out8(s
, (4 << 3) | 5);
447 tcg_out32(s
, offset
);
451 /* ??? The memory isn't directly addressable. */
454 /* Absolute address. */
455 tcg_out_opc(s
, opc
, r
, 0, 0);
456 tcg_out8(s
, (r
<< 3) | 5);
457 tcg_out32(s
, offset
);
462 /* Find the length of the immediate addend. Note that the encoding
463 that would be used for (%ebp) indicates absolute addressing. */
465 mod
= 0, len
= 4, rm
= 5;
466 } else if (offset
== 0 && LOWREGMASK(rm
) != TCG_REG_EBP
) {
468 } else if (offset
== (int8_t)offset
) {
474 /* Use a single byte MODRM format if possible. Note that the encoding
475 that would be used for %esp is the escape to the two byte form. */
476 if (index
< 0 && LOWREGMASK(rm
) != TCG_REG_ESP
) {
477 /* Single byte MODRM format. */
478 tcg_out_opc(s
, opc
, r
, rm
, 0);
479 tcg_out8(s
, mod
| (LOWREGMASK(r
) << 3) | LOWREGMASK(rm
));
481 /* Two byte MODRM+SIB format. */
483 /* Note that the encoding that would place %esp into the index
484 field indicates no index register. In 64-bit mode, the REX.X
485 bit counts, so %r12 can be used as the index. */
489 assert(index
!= TCG_REG_ESP
);
492 tcg_out_opc(s
, opc
, r
, rm
, index
);
493 tcg_out8(s
, mod
| (LOWREGMASK(r
) << 3) | 4);
494 tcg_out8(s
, (shift
<< 6) | (LOWREGMASK(index
) << 3) | LOWREGMASK(rm
));
499 } else if (len
== 4) {
500 tcg_out32(s
, offset
);
504 /* A simplification of the above with no index or shift. */
505 static inline void tcg_out_modrm_offset(TCGContext
*s
, int opc
, int r
,
506 int rm
, tcg_target_long offset
)
508 tcg_out_modrm_sib_offset(s
, opc
, r
, rm
, -1, 0, offset
);
511 /* Generate dest op= src. Uses the same ARITH_* codes as tgen_arithi. */
512 static inline void tgen_arithr(TCGContext
*s
, int subop
, int dest
, int src
)
514 /* Propagate an opcode prefix, such as P_REXW. */
515 int ext
= subop
& ~0x7;
518 tcg_out_modrm(s
, OPC_ARITH_GvEv
+ (subop
<< 3) + ext
, dest
, src
);
521 static inline void tcg_out_mov(TCGContext
*s
, TCGType type
, int ret
, int arg
)
524 int opc
= OPC_MOVL_GvEv
+ (type
== TCG_TYPE_I64
? P_REXW
: 0);
525 tcg_out_modrm(s
, opc
, ret
, arg
);
529 static void tcg_out_movi(TCGContext
*s
, TCGType type
,
530 int ret
, tcg_target_long arg
)
533 tgen_arithr(s
, ARITH_XOR
, ret
, ret
);
535 } else if (arg
== (uint32_t)arg
|| type
== TCG_TYPE_I32
) {
536 tcg_out_opc(s
, OPC_MOVL_Iv
+ LOWREGMASK(ret
), 0, ret
, 0);
538 } else if (arg
== (int32_t)arg
) {
539 tcg_out_modrm(s
, OPC_MOVL_EvIz
+ P_REXW
, 0, ret
);
542 tcg_out_opc(s
, OPC_MOVL_Iv
+ P_REXW
+ LOWREGMASK(ret
), 0, ret
, 0);
544 tcg_out32(s
, arg
>> 31 >> 1);
548 static inline void tcg_out_pushi(TCGContext
*s
, tcg_target_long val
)
550 if (val
== (int8_t)val
) {
551 tcg_out_opc(s
, OPC_PUSH_Ib
, 0, 0, 0);
553 } else if (val
== (int32_t)val
) {
554 tcg_out_opc(s
, OPC_PUSH_Iv
, 0, 0, 0);
561 static inline void tcg_out_push(TCGContext
*s
, int reg
)
563 tcg_out_opc(s
, OPC_PUSH_r32
+ LOWREGMASK(reg
), 0, reg
, 0);
566 static inline void tcg_out_pop(TCGContext
*s
, int reg
)
568 tcg_out_opc(s
, OPC_POP_r32
+ LOWREGMASK(reg
), 0, reg
, 0);
571 static inline void tcg_out_ld(TCGContext
*s
, TCGType type
, int ret
,
572 int arg1
, tcg_target_long arg2
)
574 int opc
= OPC_MOVL_GvEv
+ (type
== TCG_TYPE_I64
? P_REXW
: 0);
575 tcg_out_modrm_offset(s
, opc
, ret
, arg1
, arg2
);
578 static inline void tcg_out_st(TCGContext
*s
, TCGType type
, int arg
,
579 int arg1
, tcg_target_long arg2
)
581 int opc
= OPC_MOVL_EvGv
+ (type
== TCG_TYPE_I64
? P_REXW
: 0);
582 tcg_out_modrm_offset(s
, opc
, arg
, arg1
, arg2
);
585 static void tcg_out_shifti(TCGContext
*s
, int subopc
, int reg
, int count
)
587 /* Propagate an opcode prefix, such as P_DATA16. */
588 int ext
= subopc
& ~0x7;
592 tcg_out_modrm(s
, OPC_SHIFT_1
+ ext
, subopc
, reg
);
594 tcg_out_modrm(s
, OPC_SHIFT_Ib
+ ext
, subopc
, reg
);
599 static inline void tcg_out_bswap32(TCGContext
*s
, int reg
)
601 tcg_out_opc(s
, OPC_BSWAP
+ LOWREGMASK(reg
), 0, reg
, 0);
604 static inline void tcg_out_rolw_8(TCGContext
*s
, int reg
)
606 tcg_out_shifti(s
, SHIFT_ROL
+ P_DATA16
, reg
, 8);
609 static inline void tcg_out_ext8u(TCGContext
*s
, int dest
, int src
)
612 assert(src
< 4 || TCG_TARGET_REG_BITS
== 64);
613 tcg_out_modrm(s
, OPC_MOVZBL
+ P_REXB_RM
, dest
, src
);
616 static void tcg_out_ext8s(TCGContext
*s
, int dest
, int src
, int rexw
)
619 assert(src
< 4 || TCG_TARGET_REG_BITS
== 64);
620 tcg_out_modrm(s
, OPC_MOVSBL
+ P_REXB_RM
+ rexw
, dest
, src
);
623 static inline void tcg_out_ext16u(TCGContext
*s
, int dest
, int src
)
626 tcg_out_modrm(s
, OPC_MOVZWL
, dest
, src
);
629 static inline void tcg_out_ext16s(TCGContext
*s
, int dest
, int src
, int rexw
)
632 tcg_out_modrm(s
, OPC_MOVSWL
+ rexw
, dest
, src
);
635 static inline void tcg_out_ext32u(TCGContext
*s
, int dest
, int src
)
637 /* 32-bit mov zero extends. */
638 tcg_out_modrm(s
, OPC_MOVL_GvEv
, dest
, src
);
641 static inline void tcg_out_ext32s(TCGContext
*s
, int dest
, int src
)
643 tcg_out_modrm(s
, OPC_MOVSLQ
, dest
, src
);
646 static inline void tcg_out_bswap64(TCGContext
*s
, int reg
)
648 tcg_out_opc(s
, OPC_BSWAP
+ P_REXW
+ LOWREGMASK(reg
), 0, reg
, 0);
651 static void tgen_arithi(TCGContext
*s
, int c
, int r0
,
652 tcg_target_long val
, int cf
)
656 if (TCG_TARGET_REG_BITS
== 64) {
661 /* ??? While INC is 2 bytes shorter than ADDL $1, they also induce
662 partial flags update stalls on Pentium4 and are not recommended
663 by current Intel optimization manuals. */
664 if (!cf
&& (c
== ARITH_ADD
|| c
== ARITH_SUB
) && (val
== 1 || val
== -1)) {
665 int is_inc
= (c
== ARITH_ADD
) ^ (val
< 0);
666 if (TCG_TARGET_REG_BITS
== 64) {
667 /* The single-byte increment encodings are re-tasked as the
668 REX prefixes. Use the MODRM encoding. */
669 tcg_out_modrm(s
, OPC_GRP5
+ rexw
,
670 (is_inc
? EXT5_INC_Ev
: EXT5_DEC_Ev
), r0
);
672 tcg_out8(s
, (is_inc
? OPC_INC_r32
: OPC_DEC_r32
) + r0
);
677 if (c
== ARITH_AND
) {
678 if (TCG_TARGET_REG_BITS
== 64) {
679 if (val
== 0xffffffffu
) {
680 tcg_out_ext32u(s
, r0
, r0
);
683 if (val
== (uint32_t)val
) {
684 /* AND with no high bits set can use a 32-bit operation. */
688 if (val
== 0xffu
&& (r0
< 4 || TCG_TARGET_REG_BITS
== 64)) {
689 tcg_out_ext8u(s
, r0
, r0
);
692 if (val
== 0xffffu
) {
693 tcg_out_ext16u(s
, r0
, r0
);
698 if (val
== (int8_t)val
) {
699 tcg_out_modrm(s
, OPC_ARITH_EvIb
+ rexw
, c
, r0
);
703 if (rexw
== 0 || val
== (int32_t)val
) {
704 tcg_out_modrm(s
, OPC_ARITH_EvIz
+ rexw
, c
, r0
);
712 static void tcg_out_addi(TCGContext
*s
, int reg
, tcg_target_long val
)
715 tgen_arithi(s
, ARITH_ADD
+ P_REXW
, reg
, val
, 0);
719 /* Use SMALL != 0 to force a short forward branch. */
720 static void tcg_out_jxx(TCGContext
*s
, int opc
, int label_index
, int small
)
723 TCGLabel
*l
= &s
->labels
[label_index
];
726 val
= l
->u
.value
- (tcg_target_long
)s
->code_ptr
;
728 if ((int8_t)val1
== val1
) {
730 tcg_out8(s
, OPC_JMP_short
);
732 tcg_out8(s
, OPC_JCC_short
+ opc
);
740 tcg_out8(s
, OPC_JMP_long
);
741 tcg_out32(s
, val
- 5);
743 tcg_out_opc(s
, OPC_JCC_long
+ opc
, 0, 0, 0);
744 tcg_out32(s
, val
- 6);
749 tcg_out8(s
, OPC_JMP_short
);
751 tcg_out8(s
, OPC_JCC_short
+ opc
);
753 tcg_out_reloc(s
, s
->code_ptr
, R_386_PC8
, label_index
, -1);
757 tcg_out8(s
, OPC_JMP_long
);
759 tcg_out_opc(s
, OPC_JCC_long
+ opc
, 0, 0, 0);
761 tcg_out_reloc(s
, s
->code_ptr
, R_386_PC32
, label_index
, -4);
766 static void tcg_out_cmp(TCGContext
*s
, TCGArg arg1
, TCGArg arg2
,
767 int const_arg2
, int rexw
)
772 tcg_out_modrm(s
, OPC_TESTL
+ rexw
, arg1
, arg1
);
774 tgen_arithi(s
, ARITH_CMP
+ rexw
, arg1
, arg2
, 0);
777 tgen_arithr(s
, ARITH_CMP
+ rexw
, arg1
, arg2
);
781 static void tcg_out_brcond32(TCGContext
*s
, TCGCond cond
,
782 TCGArg arg1
, TCGArg arg2
, int const_arg2
,
783 int label_index
, int small
)
785 tcg_out_cmp(s
, arg1
, arg2
, const_arg2
, 0);
786 tcg_out_jxx(s
, tcg_cond_to_jcc
[cond
], label_index
, small
);
789 #if TCG_TARGET_REG_BITS == 64
790 static void tcg_out_brcond64(TCGContext
*s
, TCGCond cond
,
791 TCGArg arg1
, TCGArg arg2
, int const_arg2
,
792 int label_index
, int small
)
794 tcg_out_cmp(s
, arg1
, arg2
, const_arg2
, P_REXW
);
795 tcg_out_jxx(s
, tcg_cond_to_jcc
[cond
], label_index
, small
);
798 /* XXX: we implement it at the target level to avoid having to
799 handle cross basic blocks temporaries */
800 static void tcg_out_brcond2(TCGContext
*s
, const TCGArg
*args
,
801 const int *const_args
, int small
)
804 label_next
= gen_new_label();
807 tcg_out_brcond32(s
, TCG_COND_NE
, args
[0], args
[2], const_args
[2],
809 tcg_out_brcond32(s
, TCG_COND_EQ
, args
[1], args
[3], const_args
[3],
813 tcg_out_brcond32(s
, TCG_COND_NE
, args
[0], args
[2], const_args
[2],
815 tcg_out_brcond32(s
, TCG_COND_NE
, args
[1], args
[3], const_args
[3],
819 tcg_out_brcond32(s
, TCG_COND_LT
, args
[1], args
[3], const_args
[3],
821 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
822 tcg_out_brcond32(s
, TCG_COND_LTU
, args
[0], args
[2], const_args
[2],
826 tcg_out_brcond32(s
, TCG_COND_LT
, args
[1], args
[3], const_args
[3],
828 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
829 tcg_out_brcond32(s
, TCG_COND_LEU
, args
[0], args
[2], const_args
[2],
833 tcg_out_brcond32(s
, TCG_COND_GT
, args
[1], args
[3], const_args
[3],
835 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
836 tcg_out_brcond32(s
, TCG_COND_GTU
, args
[0], args
[2], const_args
[2],
840 tcg_out_brcond32(s
, TCG_COND_GT
, args
[1], args
[3], const_args
[3],
842 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
843 tcg_out_brcond32(s
, TCG_COND_GEU
, args
[0], args
[2], const_args
[2],
847 tcg_out_brcond32(s
, TCG_COND_LTU
, args
[1], args
[3], const_args
[3],
849 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
850 tcg_out_brcond32(s
, TCG_COND_LTU
, args
[0], args
[2], const_args
[2],
854 tcg_out_brcond32(s
, TCG_COND_LTU
, args
[1], args
[3], const_args
[3],
856 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
857 tcg_out_brcond32(s
, TCG_COND_LEU
, args
[0], args
[2], const_args
[2],
861 tcg_out_brcond32(s
, TCG_COND_GTU
, args
[1], args
[3], const_args
[3],
863 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
864 tcg_out_brcond32(s
, TCG_COND_GTU
, args
[0], args
[2], const_args
[2],
868 tcg_out_brcond32(s
, TCG_COND_GTU
, args
[1], args
[3], const_args
[3],
870 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
871 tcg_out_brcond32(s
, TCG_COND_GEU
, args
[0], args
[2], const_args
[2],
877 tcg_out_label(s
, label_next
, (tcg_target_long
)s
->code_ptr
);
881 static void tcg_out_setcond32(TCGContext
*s
, TCGCond cond
, TCGArg dest
,
882 TCGArg arg1
, TCGArg arg2
, int const_arg2
)
884 tcg_out_cmp(s
, arg1
, arg2
, const_arg2
, 0);
885 tcg_out_modrm(s
, OPC_SETCC
| tcg_cond_to_jcc
[cond
], 0, dest
);
886 tcg_out_ext8u(s
, dest
, dest
);
889 #if TCG_TARGET_REG_BITS == 64
890 static void tcg_out_setcond64(TCGContext
*s
, TCGCond cond
, TCGArg dest
,
891 TCGArg arg1
, TCGArg arg2
, int const_arg2
)
893 tcg_out_cmp(s
, arg1
, arg2
, const_arg2
, P_REXW
);
894 tcg_out_modrm(s
, OPC_SETCC
| tcg_cond_to_jcc
[cond
], 0, dest
);
895 tcg_out_ext8u(s
, dest
, dest
);
898 static void tcg_out_setcond2(TCGContext
*s
, const TCGArg
*args
,
899 const int *const_args
)
902 int label_true
, label_over
;
904 memcpy(new_args
, args
+1, 5*sizeof(TCGArg
));
906 if (args
[0] == args
[1] || args
[0] == args
[2]
907 || (!const_args
[3] && args
[0] == args
[3])
908 || (!const_args
[4] && args
[0] == args
[4])) {
909 /* When the destination overlaps with one of the argument
910 registers, don't do anything tricky. */
911 label_true
= gen_new_label();
912 label_over
= gen_new_label();
914 new_args
[5] = label_true
;
915 tcg_out_brcond2(s
, new_args
, const_args
+1, 1);
917 tcg_out_movi(s
, TCG_TYPE_I32
, args
[0], 0);
918 tcg_out_jxx(s
, JCC_JMP
, label_over
, 1);
919 tcg_out_label(s
, label_true
, (tcg_target_long
)s
->code_ptr
);
921 tcg_out_movi(s
, TCG_TYPE_I32
, args
[0], 1);
922 tcg_out_label(s
, label_over
, (tcg_target_long
)s
->code_ptr
);
924 /* When the destination does not overlap one of the arguments,
925 clear the destination first, jump if cond false, and emit an
926 increment in the true case. This results in smaller code. */
928 tcg_out_movi(s
, TCG_TYPE_I32
, args
[0], 0);
930 label_over
= gen_new_label();
931 new_args
[4] = tcg_invert_cond(new_args
[4]);
932 new_args
[5] = label_over
;
933 tcg_out_brcond2(s
, new_args
, const_args
+1, 1);
935 tgen_arithi(s
, ARITH_ADD
, args
[0], 1, 0);
936 tcg_out_label(s
, label_over
, (tcg_target_long
)s
->code_ptr
);
941 static void tcg_out_branch(TCGContext
*s
, int call
, tcg_target_long dest
)
943 tcg_target_long disp
= dest
- (tcg_target_long
)s
->code_ptr
- 5;
945 if (disp
== (int32_t)disp
) {
946 tcg_out_opc(s
, call
? OPC_CALL_Jz
: OPC_JMP_long
, 0, 0, 0);
949 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_R10
, dest
);
950 tcg_out_modrm(s
, OPC_GRP5
,
951 call
? EXT5_CALLN_Ev
: EXT5_JMPN_Ev
, TCG_REG_R10
);
955 static inline void tcg_out_calli(TCGContext
*s
, tcg_target_long dest
)
957 tcg_out_branch(s
, 1, dest
);
960 static void tcg_out_jmp(TCGContext
*s
, tcg_target_long dest
)
962 tcg_out_branch(s
, 0, dest
);
965 #if defined(CONFIG_SOFTMMU)
967 #include "../../softmmu_defs.h"
969 static void *qemu_ld_helpers
[4] = {
976 static void *qemu_st_helpers
[4] = {
983 /* Perform the TLB load and compare.
986 ADDRLO_IDX contains the index into ARGS of the low part of the
987 address; the high part of the address is at ADDR_LOW_IDX+1.
989 MEM_INDEX and S_BITS are the memory context and log2 size of the load.
991 WHICH is the offset into the CPUTLBEntry structure of the slot to read.
992 This should be offsetof addr_read or addr_write.
995 LABEL_PTRS is filled with 1 (32-bit addresses) or 2 (64-bit addresses)
996 positions of the displacements of forward jumps to the TLB miss case.
998 First argument register is loaded with the low part of the address.
999 In the TLB hit case, it has been adjusted as indicated by the TLB
1000 and so is a host address. In the TLB miss case, it continues to
1001 hold a guest address.
1003 Second argument register is clobbered. */
1005 static inline void tcg_out_tlb_load(TCGContext
*s
, int addrlo_idx
,
1006 int mem_index
, int s_bits
,
1008 uint8_t **label_ptr
, int which
)
1010 const int addrlo
= args
[addrlo_idx
];
1011 const int r0
= tcg_target_call_iarg_regs
[0];
1012 const int r1
= tcg_target_call_iarg_regs
[1];
1013 TCGType type
= TCG_TYPE_I32
;
1016 if (TCG_TARGET_REG_BITS
== 64 && TARGET_LONG_BITS
== 64) {
1017 type
= TCG_TYPE_I64
;
1021 tcg_out_mov(s
, type
, r1
, addrlo
);
1022 tcg_out_mov(s
, type
, r0
, addrlo
);
1024 tcg_out_shifti(s
, SHIFT_SHR
+ rexw
, r1
,
1025 TARGET_PAGE_BITS
- CPU_TLB_ENTRY_BITS
);
1027 tgen_arithi(s
, ARITH_AND
+ rexw
, r0
,
1028 TARGET_PAGE_MASK
| ((1 << s_bits
) - 1), 0);
1029 tgen_arithi(s
, ARITH_AND
+ rexw
, r1
,
1030 (CPU_TLB_SIZE
- 1) << CPU_TLB_ENTRY_BITS
, 0);
1032 tcg_out_modrm_sib_offset(s
, OPC_LEA
+ P_REXW
, r1
, TCG_AREG0
, r1
, 0,
1033 offsetof(CPUState
, tlb_table
[mem_index
][0])
1037 tcg_out_modrm_offset(s
, OPC_CMP_GvEv
+ rexw
, r0
, r1
, 0);
1039 tcg_out_mov(s
, type
, r0
, addrlo
);
1042 tcg_out8(s
, OPC_JCC_short
+ JCC_JNE
);
1043 label_ptr
[0] = s
->code_ptr
;
1046 if (TARGET_LONG_BITS
> TCG_TARGET_REG_BITS
) {
1047 /* cmp 4(r1), addrhi */
1048 tcg_out_modrm_offset(s
, OPC_CMP_GvEv
, args
[addrlo_idx
+1], r1
, 4);
1051 tcg_out8(s
, OPC_JCC_short
+ JCC_JNE
);
1052 label_ptr
[1] = s
->code_ptr
;
1058 /* add addend(r1), r0 */
1059 tcg_out_modrm_offset(s
, OPC_ADD_GvEv
+ P_REXW
, r0
, r1
,
1060 offsetof(CPUTLBEntry
, addend
) - which
);
1064 static void tcg_out_qemu_ld_direct(TCGContext
*s
, int datalo
, int datahi
,
1065 int base
, tcg_target_long ofs
, int sizeop
)
1067 #ifdef TARGET_WORDS_BIGENDIAN
1068 const int bswap
= 1;
1070 const int bswap
= 0;
1074 tcg_out_modrm_offset(s
, OPC_MOVZBL
, datalo
, base
, ofs
);
1077 tcg_out_modrm_offset(s
, OPC_MOVSBL
+ P_REXW
, datalo
, base
, ofs
);
1080 tcg_out_modrm_offset(s
, OPC_MOVZWL
, datalo
, base
, ofs
);
1082 tcg_out_rolw_8(s
, datalo
);
1087 tcg_out_modrm_offset(s
, OPC_MOVZWL
, datalo
, base
, ofs
);
1088 tcg_out_rolw_8(s
, datalo
);
1089 tcg_out_modrm(s
, OPC_MOVSWL
+ P_REXW
, datalo
, datalo
);
1091 tcg_out_modrm_offset(s
, OPC_MOVSWL
+ P_REXW
, datalo
, base
, ofs
);
1095 tcg_out_ld(s
, TCG_TYPE_I32
, datalo
, base
, ofs
);
1097 tcg_out_bswap32(s
, datalo
);
1100 #if TCG_TARGET_REG_BITS == 64
1103 tcg_out_ld(s
, TCG_TYPE_I32
, datalo
, base
, ofs
);
1104 tcg_out_bswap32(s
, datalo
);
1105 tcg_out_ext32s(s
, datalo
, datalo
);
1107 tcg_out_modrm_offset(s
, OPC_MOVSLQ
, datalo
, base
, ofs
);
1112 if (TCG_TARGET_REG_BITS
== 64) {
1113 tcg_out_ld(s
, TCG_TYPE_I64
, datalo
, base
, ofs
);
1115 tcg_out_bswap64(s
, datalo
);
1123 if (base
!= datalo
) {
1124 tcg_out_ld(s
, TCG_TYPE_I32
, datalo
, base
, ofs
);
1125 tcg_out_ld(s
, TCG_TYPE_I32
, datahi
, base
, ofs
+ 4);
1127 tcg_out_ld(s
, TCG_TYPE_I32
, datahi
, base
, ofs
+ 4);
1128 tcg_out_ld(s
, TCG_TYPE_I32
, datalo
, base
, ofs
);
1131 tcg_out_bswap32(s
, datalo
);
1132 tcg_out_bswap32(s
, datahi
);
1141 /* XXX: qemu_ld and qemu_st could be modified to clobber only EDX and
1142 EAX. It will be useful once fixed registers globals are less
1144 static void tcg_out_qemu_ld(TCGContext
*s
, const TCGArg
*args
,
1147 int data_reg
, data_reg2
= 0;
1149 #if defined(CONFIG_SOFTMMU)
1150 int mem_index
, s_bits
, arg_idx
;
1151 uint8_t *label_ptr
[3];
1156 if (TCG_TARGET_REG_BITS
== 32 && opc
== 3) {
1157 data_reg2
= args
[1];
1161 #if defined(CONFIG_SOFTMMU)
1162 mem_index
= args
[addrlo_idx
+ 1 + (TARGET_LONG_BITS
> TCG_TARGET_REG_BITS
)];
1165 tcg_out_tlb_load(s
, addrlo_idx
, mem_index
, s_bits
, args
,
1166 label_ptr
, offsetof(CPUTLBEntry
, addr_read
));
1169 tcg_out_qemu_ld_direct(s
, data_reg
, data_reg2
,
1170 tcg_target_call_iarg_regs
[0], 0, opc
);
1173 tcg_out8(s
, OPC_JMP_short
);
1174 label_ptr
[2] = s
->code_ptr
;
1180 *label_ptr
[0] = s
->code_ptr
- label_ptr
[0] - 1;
1181 if (TARGET_LONG_BITS
> TCG_TARGET_REG_BITS
) {
1182 *label_ptr
[1] = s
->code_ptr
- label_ptr
[1] - 1;
1185 /* XXX: move that code at the end of the TB */
1186 /* The first argument is already loaded with addrlo. */
1188 if (TCG_TARGET_REG_BITS
== 32 && TARGET_LONG_BITS
== 64) {
1189 tcg_out_mov(s
, TCG_TYPE_I32
, tcg_target_call_iarg_regs
[arg_idx
++],
1190 args
[addrlo_idx
+ 1]);
1192 tcg_out_movi(s
, TCG_TYPE_I32
, tcg_target_call_iarg_regs
[arg_idx
],
1194 tcg_out_calli(s
, (tcg_target_long
)qemu_ld_helpers
[s_bits
]);
1198 tcg_out_ext8s(s
, data_reg
, TCG_REG_EAX
, P_REXW
);
1201 tcg_out_ext16s(s
, data_reg
, TCG_REG_EAX
, P_REXW
);
1204 tcg_out_ext8u(s
, data_reg
, TCG_REG_EAX
);
1207 tcg_out_ext16u(s
, data_reg
, TCG_REG_EAX
);
1210 tcg_out_mov(s
, TCG_TYPE_I32
, data_reg
, TCG_REG_EAX
);
1212 #if TCG_TARGET_REG_BITS == 64
1214 tcg_out_ext32s(s
, data_reg
, TCG_REG_EAX
);
1218 if (TCG_TARGET_REG_BITS
== 64) {
1219 tcg_out_mov(s
, TCG_TYPE_I64
, data_reg
, TCG_REG_RAX
);
1220 } else if (data_reg
== TCG_REG_EDX
) {
1221 /* xchg %edx, %eax */
1222 tcg_out_opc(s
, OPC_XCHG_ax_r32
+ TCG_REG_EDX
, 0, 0, 0);
1223 tcg_out_mov(s
, TCG_TYPE_I32
, data_reg2
, TCG_REG_EAX
);
1225 tcg_out_mov(s
, TCG_TYPE_I32
, data_reg
, TCG_REG_EAX
);
1226 tcg_out_mov(s
, TCG_TYPE_I32
, data_reg2
, TCG_REG_EDX
);
1234 *label_ptr
[2] = s
->code_ptr
- label_ptr
[2] - 1;
1237 int32_t offset
= GUEST_BASE
;
1238 int base
= args
[addrlo_idx
];
1240 if (TCG_TARGET_REG_BITS
== 64) {
1241 /* ??? We assume all operations have left us with register
1242 contents that are zero extended. So far this appears to
1243 be true. If we want to enforce this, we can either do
1244 an explicit zero-extension here, or (if GUEST_BASE == 0)
1245 use the ADDR32 prefix. For now, do nothing. */
1247 if (offset
!= GUEST_BASE
) {
1248 tcg_out_movi(s
, TCG_TYPE_I64
, TCG_REG_RDI
, GUEST_BASE
);
1249 tgen_arithr(s
, ARITH_ADD
+ P_REXW
, TCG_REG_RDI
, base
);
1250 base
= TCG_REG_RDI
, offset
= 0;
1254 tcg_out_qemu_ld_direct(s
, data_reg
, data_reg2
, base
, offset
, opc
);
1259 static void tcg_out_qemu_st_direct(TCGContext
*s
, int datalo
, int datahi
,
1260 int base
, tcg_target_long ofs
, int sizeop
)
1262 #ifdef TARGET_WORDS_BIGENDIAN
1263 const int bswap
= 1;
1265 const int bswap
= 0;
1267 /* ??? Ideally we wouldn't need a scratch register. For user-only,
1268 we could perform the bswap twice to restore the original value
1269 instead of moving to the scratch. But as it is, the L constraint
1270 means that the second argument reg is definitely free here. */
1271 int scratch
= tcg_target_call_iarg_regs
[1];
1275 tcg_out_modrm_offset(s
, OPC_MOVB_EvGv
+ P_REXB_R
, datalo
, base
, ofs
);
1279 tcg_out_mov(s
, TCG_TYPE_I32
, scratch
, datalo
);
1280 tcg_out_rolw_8(s
, scratch
);
1283 tcg_out_modrm_offset(s
, OPC_MOVL_EvGv
+ P_DATA16
, datalo
, base
, ofs
);
1287 tcg_out_mov(s
, TCG_TYPE_I32
, scratch
, datalo
);
1288 tcg_out_bswap32(s
, scratch
);
1291 tcg_out_st(s
, TCG_TYPE_I32
, datalo
, base
, ofs
);
1294 if (TCG_TARGET_REG_BITS
== 64) {
1296 tcg_out_mov(s
, TCG_TYPE_I64
, scratch
, datalo
);
1297 tcg_out_bswap64(s
, scratch
);
1300 tcg_out_st(s
, TCG_TYPE_I64
, datalo
, base
, ofs
);
1302 tcg_out_mov(s
, TCG_TYPE_I32
, scratch
, datahi
);
1303 tcg_out_bswap32(s
, scratch
);
1304 tcg_out_st(s
, TCG_TYPE_I32
, scratch
, base
, ofs
);
1305 tcg_out_mov(s
, TCG_TYPE_I32
, scratch
, datalo
);
1306 tcg_out_bswap32(s
, scratch
);
1307 tcg_out_st(s
, TCG_TYPE_I32
, scratch
, base
, ofs
+ 4);
1309 tcg_out_st(s
, TCG_TYPE_I32
, datalo
, base
, ofs
);
1310 tcg_out_st(s
, TCG_TYPE_I32
, datahi
, base
, ofs
+ 4);
1318 static void tcg_out_qemu_st(TCGContext
*s
, const TCGArg
*args
,
1321 int data_reg
, data_reg2
= 0;
1323 #if defined(CONFIG_SOFTMMU)
1324 int mem_index
, s_bits
;
1326 uint8_t *label_ptr
[3];
1331 if (TCG_TARGET_REG_BITS
== 32 && opc
== 3) {
1332 data_reg2
= args
[1];
1336 #if defined(CONFIG_SOFTMMU)
1337 mem_index
= args
[addrlo_idx
+ 1 + (TARGET_LONG_BITS
> TCG_TARGET_REG_BITS
)];
1340 tcg_out_tlb_load(s
, addrlo_idx
, mem_index
, s_bits
, args
,
1341 label_ptr
, offsetof(CPUTLBEntry
, addr_write
));
1344 tcg_out_qemu_st_direct(s
, data_reg
, data_reg2
,
1345 tcg_target_call_iarg_regs
[0], 0, opc
);
1348 tcg_out8(s
, OPC_JMP_short
);
1349 label_ptr
[2] = s
->code_ptr
;
1355 *label_ptr
[0] = s
->code_ptr
- label_ptr
[0] - 1;
1356 if (TARGET_LONG_BITS
> TCG_TARGET_REG_BITS
) {
1357 *label_ptr
[1] = s
->code_ptr
- label_ptr
[1] - 1;
1360 /* XXX: move that code at the end of the TB */
1361 if (TCG_TARGET_REG_BITS
== 64) {
1362 tcg_out_mov(s
, (opc
== 3 ? TCG_TYPE_I64
: TCG_TYPE_I32
),
1363 TCG_REG_RSI
, data_reg
);
1364 tcg_out_movi(s
, TCG_TYPE_I32
, TCG_REG_RDX
, mem_index
);
1366 } else if (TARGET_LONG_BITS
== 32) {
1367 tcg_out_mov(s
, TCG_TYPE_I32
, TCG_REG_EDX
, data_reg
);
1369 tcg_out_mov(s
, TCG_TYPE_I32
, TCG_REG_ECX
, data_reg2
);
1370 tcg_out_pushi(s
, mem_index
);
1373 tcg_out_movi(s
, TCG_TYPE_I32
, TCG_REG_ECX
, mem_index
);
1378 tcg_out_mov(s
, TCG_TYPE_I32
, TCG_REG_EDX
, args
[addrlo_idx
+ 1]);
1379 tcg_out_pushi(s
, mem_index
);
1380 tcg_out_push(s
, data_reg2
);
1381 tcg_out_push(s
, data_reg
);
1384 tcg_out_mov(s
, TCG_TYPE_I32
, TCG_REG_EDX
, args
[addrlo_idx
+ 1]);
1387 tcg_out_ext8u(s
, TCG_REG_ECX
, data_reg
);
1390 tcg_out_ext16u(s
, TCG_REG_ECX
, data_reg
);
1393 tcg_out_mov(s
, TCG_TYPE_I32
, TCG_REG_ECX
, data_reg
);
1396 tcg_out_pushi(s
, mem_index
);
1401 tcg_out_calli(s
, (tcg_target_long
)qemu_st_helpers
[s_bits
]);
1403 if (stack_adjust
== (TCG_TARGET_REG_BITS
/ 8)) {
1404 /* Pop and discard. This is 2 bytes smaller than the add. */
1405 tcg_out_pop(s
, TCG_REG_ECX
);
1406 } else if (stack_adjust
!= 0) {
1407 tcg_out_addi(s
, TCG_REG_CALL_STACK
, stack_adjust
);
1411 *label_ptr
[2] = s
->code_ptr
- label_ptr
[2] - 1;
1414 int32_t offset
= GUEST_BASE
;
1415 int base
= args
[addrlo_idx
];
1417 if (TCG_TARGET_REG_BITS
== 64) {
1418 /* ??? We assume all operations have left us with register
1419 contents that are zero extended. So far this appears to
1420 be true. If we want to enforce this, we can either do
1421 an explicit zero-extension here, or (if GUEST_BASE == 0)
1422 use the ADDR32 prefix. For now, do nothing. */
1424 if (offset
!= GUEST_BASE
) {
1425 tcg_out_movi(s
, TCG_TYPE_I64
, TCG_REG_RDI
, GUEST_BASE
);
1426 tgen_arithr(s
, ARITH_ADD
+ P_REXW
, TCG_REG_RDI
, base
);
1427 base
= TCG_REG_RDI
, offset
= 0;
1431 tcg_out_qemu_st_direct(s
, data_reg
, data_reg2
, base
, offset
, opc
);
1436 static inline void tcg_out_op(TCGContext
*s
, TCGOpcode opc
,
1437 const TCGArg
*args
, const int *const_args
)
1441 #if TCG_TARGET_REG_BITS == 64
1442 # define OP_32_64(x) \
1443 case glue(glue(INDEX_op_, x), _i64): \
1444 rexw = P_REXW; /* FALLTHRU */ \
1445 case glue(glue(INDEX_op_, x), _i32)
1447 # define OP_32_64(x) \
1448 case glue(glue(INDEX_op_, x), _i32)
1452 case INDEX_op_exit_tb
:
1453 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_EAX
, args
[0]);
1454 tcg_out_jmp(s
, (tcg_target_long
) tb_ret_addr
);
1456 case INDEX_op_goto_tb
:
1457 if (s
->tb_jmp_offset
) {
1458 /* direct jump method */
1459 tcg_out8(s
, OPC_JMP_long
); /* jmp im */
1460 s
->tb_jmp_offset
[args
[0]] = s
->code_ptr
- s
->code_buf
;
1463 /* indirect jump method */
1464 tcg_out_modrm_offset(s
, OPC_GRP5
, EXT5_JMPN_Ev
, -1,
1465 (tcg_target_long
)(s
->tb_next
+ args
[0]));
1467 s
->tb_next_offset
[args
[0]] = s
->code_ptr
- s
->code_buf
;
1470 if (const_args
[0]) {
1471 tcg_out_calli(s
, args
[0]);
1474 tcg_out_modrm(s
, OPC_GRP5
, EXT5_CALLN_Ev
, args
[0]);
1478 if (const_args
[0]) {
1479 tcg_out_jmp(s
, args
[0]);
1482 tcg_out_modrm(s
, OPC_GRP5
, EXT5_JMPN_Ev
, args
[0]);
1486 tcg_out_jxx(s
, JCC_JMP
, args
[0], 0);
1488 case INDEX_op_movi_i32
:
1489 tcg_out_movi(s
, TCG_TYPE_I32
, args
[0], args
[1]);
1492 /* Note that we can ignore REXW for the zero-extend to 64-bit. */
1493 tcg_out_modrm_offset(s
, OPC_MOVZBL
, args
[0], args
[1], args
[2]);
1496 tcg_out_modrm_offset(s
, OPC_MOVSBL
+ rexw
, args
[0], args
[1], args
[2]);
1499 /* Note that we can ignore REXW for the zero-extend to 64-bit. */
1500 tcg_out_modrm_offset(s
, OPC_MOVZWL
, args
[0], args
[1], args
[2]);
1503 tcg_out_modrm_offset(s
, OPC_MOVSWL
+ rexw
, args
[0], args
[1], args
[2]);
1505 #if TCG_TARGET_REG_BITS == 64
1506 case INDEX_op_ld32u_i64
:
1508 case INDEX_op_ld_i32
:
1509 tcg_out_ld(s
, TCG_TYPE_I32
, args
[0], args
[1], args
[2]);
1513 tcg_out_modrm_offset(s
, OPC_MOVB_EvGv
| P_REXB_R
,
1514 args
[0], args
[1], args
[2]);
1517 tcg_out_modrm_offset(s
, OPC_MOVL_EvGv
| P_DATA16
,
1518 args
[0], args
[1], args
[2]);
1520 #if TCG_TARGET_REG_BITS == 64
1521 case INDEX_op_st32_i64
:
1523 case INDEX_op_st_i32
:
1524 tcg_out_st(s
, TCG_TYPE_I32
, args
[0], args
[1], args
[2]);
1528 /* For 3-operand addition, use LEA. */
1529 if (args
[0] != args
[1]) {
1530 TCGArg a0
= args
[0], a1
= args
[1], a2
= args
[2], c3
= 0;
1532 if (const_args
[2]) {
1534 } else if (a0
== a2
) {
1535 /* Watch out for dest = src + dest, since we've removed
1536 the matching constraint on the add. */
1537 tgen_arithr(s
, ARITH_ADD
+ rexw
, a0
, a1
);
1541 tcg_out_modrm_sib_offset(s
, OPC_LEA
+ rexw
, a0
, a1
, a2
, 0, c3
);
1559 if (const_args
[2]) {
1560 tgen_arithi(s
, c
+ rexw
, args
[0], args
[2], 0);
1562 tgen_arithr(s
, c
+ rexw
, args
[0], args
[2]);
1567 if (const_args
[2]) {
1570 if (val
== (int8_t)val
) {
1571 tcg_out_modrm(s
, OPC_IMUL_GvEvIb
+ rexw
, args
[0], args
[0]);
1574 tcg_out_modrm(s
, OPC_IMUL_GvEvIz
+ rexw
, args
[0], args
[0]);
1578 tcg_out_modrm(s
, OPC_IMUL_GvEv
+ rexw
, args
[0], args
[2]);
1583 tcg_out_modrm(s
, OPC_GRP3_Ev
+ rexw
, EXT3_IDIV
, args
[4]);
1586 tcg_out_modrm(s
, OPC_GRP3_Ev
+ rexw
, EXT3_DIV
, args
[4]);
1605 if (const_args
[2]) {
1606 tcg_out_shifti(s
, c
+ rexw
, args
[0], args
[2]);
1608 tcg_out_modrm(s
, OPC_SHIFT_cl
+ rexw
, c
, args
[0]);
1612 case INDEX_op_brcond_i32
:
1613 tcg_out_brcond32(s
, args
[2], args
[0], args
[1], const_args
[1],
1616 case INDEX_op_setcond_i32
:
1617 tcg_out_setcond32(s
, args
[3], args
[0], args
[1],
1618 args
[2], const_args
[2]);
1622 tcg_out_rolw_8(s
, args
[0]);
1625 tcg_out_bswap32(s
, args
[0]);
1629 tcg_out_modrm(s
, OPC_GRP3_Ev
+ rexw
, EXT3_NEG
, args
[0]);
1632 tcg_out_modrm(s
, OPC_GRP3_Ev
+ rexw
, EXT3_NOT
, args
[0]);
1636 tcg_out_ext8s(s
, args
[0], args
[1], rexw
);
1639 tcg_out_ext16s(s
, args
[0], args
[1], rexw
);
1642 tcg_out_ext8u(s
, args
[0], args
[1]);
1645 tcg_out_ext16u(s
, args
[0], args
[1]);
1648 case INDEX_op_qemu_ld8u
:
1649 tcg_out_qemu_ld(s
, args
, 0);
1651 case INDEX_op_qemu_ld8s
:
1652 tcg_out_qemu_ld(s
, args
, 0 | 4);
1654 case INDEX_op_qemu_ld16u
:
1655 tcg_out_qemu_ld(s
, args
, 1);
1657 case INDEX_op_qemu_ld16s
:
1658 tcg_out_qemu_ld(s
, args
, 1 | 4);
1660 #if TCG_TARGET_REG_BITS == 64
1661 case INDEX_op_qemu_ld32u
:
1663 case INDEX_op_qemu_ld32
:
1664 tcg_out_qemu_ld(s
, args
, 2);
1666 case INDEX_op_qemu_ld64
:
1667 tcg_out_qemu_ld(s
, args
, 3);
1670 case INDEX_op_qemu_st8
:
1671 tcg_out_qemu_st(s
, args
, 0);
1673 case INDEX_op_qemu_st16
:
1674 tcg_out_qemu_st(s
, args
, 1);
1676 case INDEX_op_qemu_st32
:
1677 tcg_out_qemu_st(s
, args
, 2);
1679 case INDEX_op_qemu_st64
:
1680 tcg_out_qemu_st(s
, args
, 3);
1683 #if TCG_TARGET_REG_BITS == 32
1684 case INDEX_op_brcond2_i32
:
1685 tcg_out_brcond2(s
, args
, const_args
, 0);
1687 case INDEX_op_setcond2_i32
:
1688 tcg_out_setcond2(s
, args
, const_args
);
1690 case INDEX_op_mulu2_i32
:
1691 tcg_out_modrm(s
, OPC_GRP3_Ev
, EXT3_MUL
, args
[3]);
1693 case INDEX_op_add2_i32
:
1694 if (const_args
[4]) {
1695 tgen_arithi(s
, ARITH_ADD
, args
[0], args
[4], 1);
1697 tgen_arithr(s
, ARITH_ADD
, args
[0], args
[4]);
1699 if (const_args
[5]) {
1700 tgen_arithi(s
, ARITH_ADC
, args
[1], args
[5], 1);
1702 tgen_arithr(s
, ARITH_ADC
, args
[1], args
[5]);
1705 case INDEX_op_sub2_i32
:
1706 if (const_args
[4]) {
1707 tgen_arithi(s
, ARITH_SUB
, args
[0], args
[4], 1);
1709 tgen_arithr(s
, ARITH_SUB
, args
[0], args
[4]);
1711 if (const_args
[5]) {
1712 tgen_arithi(s
, ARITH_SBB
, args
[1], args
[5], 1);
1714 tgen_arithr(s
, ARITH_SBB
, args
[1], args
[5]);
1717 #else /* TCG_TARGET_REG_BITS == 64 */
1718 case INDEX_op_movi_i64
:
1719 tcg_out_movi(s
, TCG_TYPE_I64
, args
[0], args
[1]);
1721 case INDEX_op_ld32s_i64
:
1722 tcg_out_modrm_offset(s
, OPC_MOVSLQ
, args
[0], args
[1], args
[2]);
1724 case INDEX_op_ld_i64
:
1725 tcg_out_ld(s
, TCG_TYPE_I64
, args
[0], args
[1], args
[2]);
1727 case INDEX_op_st_i64
:
1728 tcg_out_st(s
, TCG_TYPE_I64
, args
[0], args
[1], args
[2]);
1730 case INDEX_op_qemu_ld32s
:
1731 tcg_out_qemu_ld(s
, args
, 2 | 4);
1734 case INDEX_op_brcond_i64
:
1735 tcg_out_brcond64(s
, args
[2], args
[0], args
[1], const_args
[1],
1738 case INDEX_op_setcond_i64
:
1739 tcg_out_setcond64(s
, args
[3], args
[0], args
[1],
1740 args
[2], const_args
[2]);
1743 case INDEX_op_bswap64_i64
:
1744 tcg_out_bswap64(s
, args
[0]);
1746 case INDEX_op_ext32u_i64
:
1747 tcg_out_ext32u(s
, args
[0], args
[1]);
1749 case INDEX_op_ext32s_i64
:
1750 tcg_out_ext32s(s
, args
[0], args
[1]);
1755 if (args
[3] == 0 && args
[4] == 8) {
1756 /* load bits 0..7 */
1757 tcg_out_modrm(s
, OPC_MOVB_EvGv
| P_REXB_R
| P_REXB_RM
,
1759 } else if (args
[3] == 8 && args
[4] == 8) {
1760 /* load bits 8..15 */
1761 tcg_out_modrm(s
, OPC_MOVB_EvGv
, args
[2], args
[0] + 4);
1762 } else if (args
[3] == 0 && args
[4] == 16) {
1763 /* load bits 0..15 */
1764 tcg_out_modrm(s
, OPC_MOVL_EvGv
| P_DATA16
, args
[2], args
[0]);
1777 static const TCGTargetOpDef x86_op_defs
[] = {
1778 { INDEX_op_exit_tb
, { } },
1779 { INDEX_op_goto_tb
, { } },
1780 { INDEX_op_call
, { "ri" } },
1781 { INDEX_op_jmp
, { "ri" } },
1782 { INDEX_op_br
, { } },
1783 { INDEX_op_mov_i32
, { "r", "r" } },
1784 { INDEX_op_movi_i32
, { "r" } },
1785 { INDEX_op_ld8u_i32
, { "r", "r" } },
1786 { INDEX_op_ld8s_i32
, { "r", "r" } },
1787 { INDEX_op_ld16u_i32
, { "r", "r" } },
1788 { INDEX_op_ld16s_i32
, { "r", "r" } },
1789 { INDEX_op_ld_i32
, { "r", "r" } },
1790 { INDEX_op_st8_i32
, { "q", "r" } },
1791 { INDEX_op_st16_i32
, { "r", "r" } },
1792 { INDEX_op_st_i32
, { "r", "r" } },
1794 { INDEX_op_add_i32
, { "r", "r", "ri" } },
1795 { INDEX_op_sub_i32
, { "r", "0", "ri" } },
1796 { INDEX_op_mul_i32
, { "r", "0", "ri" } },
1797 { INDEX_op_div2_i32
, { "a", "d", "0", "1", "r" } },
1798 { INDEX_op_divu2_i32
, { "a", "d", "0", "1", "r" } },
1799 { INDEX_op_and_i32
, { "r", "0", "ri" } },
1800 { INDEX_op_or_i32
, { "r", "0", "ri" } },
1801 { INDEX_op_xor_i32
, { "r", "0", "ri" } },
1803 { INDEX_op_shl_i32
, { "r", "0", "ci" } },
1804 { INDEX_op_shr_i32
, { "r", "0", "ci" } },
1805 { INDEX_op_sar_i32
, { "r", "0", "ci" } },
1806 { INDEX_op_rotl_i32
, { "r", "0", "ci" } },
1807 { INDEX_op_rotr_i32
, { "r", "0", "ci" } },
1809 { INDEX_op_brcond_i32
, { "r", "ri" } },
1811 { INDEX_op_bswap16_i32
, { "r", "0" } },
1812 { INDEX_op_bswap32_i32
, { "r", "0" } },
1814 { INDEX_op_neg_i32
, { "r", "0" } },
1816 { INDEX_op_not_i32
, { "r", "0" } },
1818 { INDEX_op_ext8s_i32
, { "r", "q" } },
1819 { INDEX_op_ext16s_i32
, { "r", "r" } },
1820 { INDEX_op_ext8u_i32
, { "r", "q" } },
1821 { INDEX_op_ext16u_i32
, { "r", "r" } },
1823 { INDEX_op_setcond_i32
, { "q", "r", "ri" } },
1825 { INDEX_op_deposit_i32
, { "Q", "0", "Q" } },
1827 #if TCG_TARGET_REG_BITS == 32
1828 { INDEX_op_mulu2_i32
, { "a", "d", "a", "r" } },
1829 { INDEX_op_add2_i32
, { "r", "r", "0", "1", "ri", "ri" } },
1830 { INDEX_op_sub2_i32
, { "r", "r", "0", "1", "ri", "ri" } },
1831 { INDEX_op_brcond2_i32
, { "r", "r", "ri", "ri" } },
1832 { INDEX_op_setcond2_i32
, { "r", "r", "r", "ri", "ri" } },
1834 { INDEX_op_mov_i64
, { "r", "r" } },
1835 { INDEX_op_movi_i64
, { "r" } },
1836 { INDEX_op_ld8u_i64
, { "r", "r" } },
1837 { INDEX_op_ld8s_i64
, { "r", "r" } },
1838 { INDEX_op_ld16u_i64
, { "r", "r" } },
1839 { INDEX_op_ld16s_i64
, { "r", "r" } },
1840 { INDEX_op_ld32u_i64
, { "r", "r" } },
1841 { INDEX_op_ld32s_i64
, { "r", "r" } },
1842 { INDEX_op_ld_i64
, { "r", "r" } },
1843 { INDEX_op_st8_i64
, { "r", "r" } },
1844 { INDEX_op_st16_i64
, { "r", "r" } },
1845 { INDEX_op_st32_i64
, { "r", "r" } },
1846 { INDEX_op_st_i64
, { "r", "r" } },
1848 { INDEX_op_add_i64
, { "r", "0", "re" } },
1849 { INDEX_op_mul_i64
, { "r", "0", "re" } },
1850 { INDEX_op_div2_i64
, { "a", "d", "0", "1", "r" } },
1851 { INDEX_op_divu2_i64
, { "a", "d", "0", "1", "r" } },
1852 { INDEX_op_sub_i64
, { "r", "0", "re" } },
1853 { INDEX_op_and_i64
, { "r", "0", "reZ" } },
1854 { INDEX_op_or_i64
, { "r", "0", "re" } },
1855 { INDEX_op_xor_i64
, { "r", "0", "re" } },
1857 { INDEX_op_shl_i64
, { "r", "0", "ci" } },
1858 { INDEX_op_shr_i64
, { "r", "0", "ci" } },
1859 { INDEX_op_sar_i64
, { "r", "0", "ci" } },
1860 { INDEX_op_rotl_i64
, { "r", "0", "ci" } },
1861 { INDEX_op_rotr_i64
, { "r", "0", "ci" } },
1863 { INDEX_op_brcond_i64
, { "r", "re" } },
1864 { INDEX_op_setcond_i64
, { "r", "r", "re" } },
1866 { INDEX_op_bswap16_i64
, { "r", "0" } },
1867 { INDEX_op_bswap32_i64
, { "r", "0" } },
1868 { INDEX_op_bswap64_i64
, { "r", "0" } },
1869 { INDEX_op_neg_i64
, { "r", "0" } },
1870 { INDEX_op_not_i64
, { "r", "0" } },
1872 { INDEX_op_ext8s_i64
, { "r", "r" } },
1873 { INDEX_op_ext16s_i64
, { "r", "r" } },
1874 { INDEX_op_ext32s_i64
, { "r", "r" } },
1875 { INDEX_op_ext8u_i64
, { "r", "r" } },
1876 { INDEX_op_ext16u_i64
, { "r", "r" } },
1877 { INDEX_op_ext32u_i64
, { "r", "r" } },
1879 { INDEX_op_deposit_i64
, { "Q", "0", "Q" } },
1882 #if TCG_TARGET_REG_BITS == 64
1883 { INDEX_op_qemu_ld8u
, { "r", "L" } },
1884 { INDEX_op_qemu_ld8s
, { "r", "L" } },
1885 { INDEX_op_qemu_ld16u
, { "r", "L" } },
1886 { INDEX_op_qemu_ld16s
, { "r", "L" } },
1887 { INDEX_op_qemu_ld32
, { "r", "L" } },
1888 { INDEX_op_qemu_ld32u
, { "r", "L" } },
1889 { INDEX_op_qemu_ld32s
, { "r", "L" } },
1890 { INDEX_op_qemu_ld64
, { "r", "L" } },
1892 { INDEX_op_qemu_st8
, { "L", "L" } },
1893 { INDEX_op_qemu_st16
, { "L", "L" } },
1894 { INDEX_op_qemu_st32
, { "L", "L" } },
1895 { INDEX_op_qemu_st64
, { "L", "L" } },
1896 #elif TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
1897 { INDEX_op_qemu_ld8u
, { "r", "L" } },
1898 { INDEX_op_qemu_ld8s
, { "r", "L" } },
1899 { INDEX_op_qemu_ld16u
, { "r", "L" } },
1900 { INDEX_op_qemu_ld16s
, { "r", "L" } },
1901 { INDEX_op_qemu_ld32
, { "r", "L" } },
1902 { INDEX_op_qemu_ld64
, { "r", "r", "L" } },
1904 { INDEX_op_qemu_st8
, { "cb", "L" } },
1905 { INDEX_op_qemu_st16
, { "L", "L" } },
1906 { INDEX_op_qemu_st32
, { "L", "L" } },
1907 { INDEX_op_qemu_st64
, { "L", "L", "L" } },
1909 { INDEX_op_qemu_ld8u
, { "r", "L", "L" } },
1910 { INDEX_op_qemu_ld8s
, { "r", "L", "L" } },
1911 { INDEX_op_qemu_ld16u
, { "r", "L", "L" } },
1912 { INDEX_op_qemu_ld16s
, { "r", "L", "L" } },
1913 { INDEX_op_qemu_ld32
, { "r", "L", "L" } },
1914 { INDEX_op_qemu_ld64
, { "r", "r", "L", "L" } },
1916 { INDEX_op_qemu_st8
, { "cb", "L", "L" } },
1917 { INDEX_op_qemu_st16
, { "L", "L", "L" } },
1918 { INDEX_op_qemu_st32
, { "L", "L", "L" } },
1919 { INDEX_op_qemu_st64
, { "L", "L", "L", "L" } },
1924 static int tcg_target_callee_save_regs
[] = {
1925 #if TCG_TARGET_REG_BITS == 64
1930 TCG_REG_R14
, /* Currently used for the global env. */
1933 TCG_REG_EBP
, /* Currently used for the global env. */
1940 /* Generate global QEMU prologue and epilogue code */
1941 static void tcg_target_qemu_prologue(TCGContext
*s
)
1943 int i
, frame_size
, push_size
, stack_addend
;
1947 /* Reserve some stack space, also for TCG temps. */
1948 push_size
= 1 + ARRAY_SIZE(tcg_target_callee_save_regs
);
1949 push_size
*= TCG_TARGET_REG_BITS
/ 8;
1951 frame_size
= push_size
+ TCG_STATIC_CALL_ARGS_SIZE
+
1952 CPU_TEMP_BUF_NLONGS
* sizeof(long);
1953 frame_size
= (frame_size
+ TCG_TARGET_STACK_ALIGN
- 1) &
1954 ~(TCG_TARGET_STACK_ALIGN
- 1);
1955 stack_addend
= frame_size
- push_size
;
1956 tcg_set_frame(s
, TCG_REG_CALL_STACK
, TCG_STATIC_CALL_ARGS_SIZE
,
1957 CPU_TEMP_BUF_NLONGS
* sizeof(long));
1959 /* Save all callee saved registers. */
1960 for (i
= 0; i
< ARRAY_SIZE(tcg_target_callee_save_regs
); i
++) {
1961 tcg_out_push(s
, tcg_target_callee_save_regs
[i
]);
1964 tcg_out_addi(s
, TCG_REG_ESP
, -stack_addend
);
1966 tcg_out_mov(s
, TCG_TYPE_PTR
, TCG_AREG0
, tcg_target_call_iarg_regs
[0]);
1969 tcg_out_modrm(s
, OPC_GRP5
, EXT5_JMPN_Ev
, tcg_target_call_iarg_regs
[1]);
1972 tb_ret_addr
= s
->code_ptr
;
1974 tcg_out_addi(s
, TCG_REG_CALL_STACK
, stack_addend
);
1976 for (i
= ARRAY_SIZE(tcg_target_callee_save_regs
) - 1; i
>= 0; i
--) {
1977 tcg_out_pop(s
, tcg_target_callee_save_regs
[i
]);
1979 tcg_out_opc(s
, OPC_RET
, 0, 0, 0);
1982 static void tcg_target_init(TCGContext
*s
)
1984 #if !defined(CONFIG_USER_ONLY)
1986 if ((1 << CPU_TLB_ENTRY_BITS
) != sizeof(CPUTLBEntry
))
1990 if (TCG_TARGET_REG_BITS
== 64) {
1991 tcg_regset_set32(tcg_target_available_regs
[TCG_TYPE_I32
], 0, 0xffff);
1992 tcg_regset_set32(tcg_target_available_regs
[TCG_TYPE_I64
], 0, 0xffff);
1994 tcg_regset_set32(tcg_target_available_regs
[TCG_TYPE_I32
], 0, 0xff);
1997 tcg_regset_clear(tcg_target_call_clobber_regs
);
1998 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_EAX
);
1999 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_EDX
);
2000 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_ECX
);
2001 if (TCG_TARGET_REG_BITS
== 64) {
2002 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_RDI
);
2003 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_RSI
);
2004 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R8
);
2005 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R9
);
2006 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R10
);
2007 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R11
);
2010 tcg_regset_clear(s
->reserved_regs
);
2011 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_CALL_STACK
);
2013 tcg_add_target_add_op_defs(x86_op_defs
);