2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 static const char * const tcg_target_reg_names
[TCG_TARGET_NB_REGS
] = {
27 #if TCG_TARGET_REG_BITS == 64
28 "%rax", "%rcx", "%rdx", "%rbx", "%rsp", "%rbp", "%rsi", "%rdi",
29 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
31 "%eax", "%ecx", "%edx", "%ebx", "%esp", "%ebp", "%esi", "%edi",
36 static const int tcg_target_reg_alloc_order
[] = {
37 #if TCG_TARGET_REG_BITS == 64
64 static const int tcg_target_call_iarg_regs
[] = {
65 #if TCG_TARGET_REG_BITS == 64
78 /* 32 bit mode uses stack based calling convention (GCC default). */
82 static const int tcg_target_call_oarg_regs
[] = {
84 #if TCG_TARGET_REG_BITS == 32
89 /* Registers used with L constraint, which are the first argument
90 registers on x86_64, and two random call clobbered registers on
92 #if TCG_TARGET_REG_BITS == 64
93 # define TCG_REG_L0 tcg_target_call_iarg_regs[0]
94 # define TCG_REG_L1 tcg_target_call_iarg_regs[1]
96 # define TCG_REG_L0 TCG_REG_EAX
97 # define TCG_REG_L1 TCG_REG_EDX
100 static uint8_t *tb_ret_addr
;
102 static void patch_reloc(uint8_t *code_ptr
, int type
,
103 tcg_target_long value
, tcg_target_long addend
)
108 value
-= (uintptr_t)code_ptr
;
109 if (value
!= (int32_t)value
) {
112 *(uint32_t *)code_ptr
= value
;
115 value
-= (uintptr_t)code_ptr
;
116 if (value
!= (int8_t)value
) {
119 *(uint8_t *)code_ptr
= value
;
126 /* parse target specific constraints */
127 static int target_parse_constraint(TCGArgConstraint
*ct
, const char **pct_str
)
134 ct
->ct
|= TCG_CT_REG
;
135 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_EAX
);
138 ct
->ct
|= TCG_CT_REG
;
139 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_EBX
);
142 ct
->ct
|= TCG_CT_REG
;
143 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_ECX
);
146 ct
->ct
|= TCG_CT_REG
;
147 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_EDX
);
150 ct
->ct
|= TCG_CT_REG
;
151 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_ESI
);
154 ct
->ct
|= TCG_CT_REG
;
155 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_EDI
);
158 ct
->ct
|= TCG_CT_REG
;
159 if (TCG_TARGET_REG_BITS
== 64) {
160 tcg_regset_set32(ct
->u
.regs
, 0, 0xffff);
162 tcg_regset_set32(ct
->u
.regs
, 0, 0xf);
166 ct
->ct
|= TCG_CT_REG
;
167 tcg_regset_set32(ct
->u
.regs
, 0, 0xf);
170 ct
->ct
|= TCG_CT_REG
;
171 if (TCG_TARGET_REG_BITS
== 64) {
172 tcg_regset_set32(ct
->u
.regs
, 0, 0xffff);
174 tcg_regset_set32(ct
->u
.regs
, 0, 0xff);
178 /* qemu_ld/st address constraint */
180 ct
->ct
|= TCG_CT_REG
;
181 #if TCG_TARGET_REG_BITS == 64
182 tcg_regset_set32(ct
->u
.regs
, 0, 0xffff);
184 tcg_regset_set32(ct
->u
.regs
, 0, 0xff);
186 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_L0
);
187 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_L1
);
191 ct
->ct
|= TCG_CT_CONST_S32
;
194 ct
->ct
|= TCG_CT_CONST_U32
;
205 /* test if a constant matches the constraint */
206 static inline int tcg_target_const_match(tcg_target_long val
,
207 const TCGArgConstraint
*arg_ct
)
210 if (ct
& TCG_CT_CONST
) {
213 if ((ct
& TCG_CT_CONST_S32
) && val
== (int32_t)val
) {
216 if ((ct
& TCG_CT_CONST_U32
) && val
== (uint32_t)val
) {
222 #if TCG_TARGET_REG_BITS == 64
223 # define LOWREGMASK(x) ((x) & 7)
225 # define LOWREGMASK(x) (x)
228 #define P_EXT 0x100 /* 0x0f opcode prefix */
229 #define P_DATA16 0x200 /* 0x66 opcode prefix */
230 #if TCG_TARGET_REG_BITS == 64
231 # define P_ADDR32 0x400 /* 0x67 opcode prefix */
232 # define P_REXW 0x800 /* Set REX.W = 1 */
233 # define P_REXB_R 0x1000 /* REG field as byte register */
234 # define P_REXB_RM 0x2000 /* R/M field as byte register */
235 # define P_GS 0x4000 /* gs segment override */
244 #define OPC_ARITH_EvIz (0x81)
245 #define OPC_ARITH_EvIb (0x83)
246 #define OPC_ARITH_GvEv (0x03) /* ... plus (ARITH_FOO << 3) */
247 #define OPC_ADD_GvEv (OPC_ARITH_GvEv | (ARITH_ADD << 3))
248 #define OPC_BSWAP (0xc8 | P_EXT)
249 #define OPC_CALL_Jz (0xe8)
250 #define OPC_CMOVCC (0x40 | P_EXT) /* ... plus condition code */
251 #define OPC_CMP_GvEv (OPC_ARITH_GvEv | (ARITH_CMP << 3))
252 #define OPC_DEC_r32 (0x48)
253 #define OPC_IMUL_GvEv (0xaf | P_EXT)
254 #define OPC_IMUL_GvEvIb (0x6b)
255 #define OPC_IMUL_GvEvIz (0x69)
256 #define OPC_INC_r32 (0x40)
257 #define OPC_JCC_long (0x80 | P_EXT) /* ... plus condition code */
258 #define OPC_JCC_short (0x70) /* ... plus condition code */
259 #define OPC_JMP_long (0xe9)
260 #define OPC_JMP_short (0xeb)
261 #define OPC_LEA (0x8d)
262 #define OPC_MOVB_EvGv (0x88) /* stores, more or less */
263 #define OPC_MOVL_EvGv (0x89) /* stores, more or less */
264 #define OPC_MOVL_GvEv (0x8b) /* loads, more or less */
265 #define OPC_MOVB_EvIz (0xc6)
266 #define OPC_MOVL_EvIz (0xc7)
267 #define OPC_MOVL_Iv (0xb8)
268 #define OPC_MOVSBL (0xbe | P_EXT)
269 #define OPC_MOVSWL (0xbf | P_EXT)
270 #define OPC_MOVSLQ (0x63 | P_REXW)
271 #define OPC_MOVZBL (0xb6 | P_EXT)
272 #define OPC_MOVZWL (0xb7 | P_EXT)
273 #define OPC_POP_r32 (0x58)
274 #define OPC_PUSH_r32 (0x50)
275 #define OPC_PUSH_Iv (0x68)
276 #define OPC_PUSH_Ib (0x6a)
277 #define OPC_RET (0xc3)
278 #define OPC_SETCC (0x90 | P_EXT | P_REXB_RM) /* ... plus cc */
279 #define OPC_SHIFT_1 (0xd1)
280 #define OPC_SHIFT_Ib (0xc1)
281 #define OPC_SHIFT_cl (0xd3)
282 #define OPC_TESTL (0x85)
283 #define OPC_XCHG_ax_r32 (0x90)
285 #define OPC_GRP3_Ev (0xf7)
286 #define OPC_GRP5 (0xff)
288 /* Group 1 opcode extensions for 0x80-0x83.
289 These are also used as modifiers for OPC_ARITH. */
299 /* Group 2 opcode extensions for 0xc0, 0xc1, 0xd0-0xd3. */
306 /* Group 3 opcode extensions for 0xf6, 0xf7. To be used with OPC_GRP3. */
314 /* Group 5 opcode extensions for 0xff. To be used with OPC_GRP5. */
315 #define EXT5_INC_Ev 0
316 #define EXT5_DEC_Ev 1
317 #define EXT5_CALLN_Ev 2
318 #define EXT5_JMPN_Ev 4
320 /* Condition codes to be added to OPC_JCC_{long,short}. */
339 static const uint8_t tcg_cond_to_jcc
[] = {
340 [TCG_COND_EQ
] = JCC_JE
,
341 [TCG_COND_NE
] = JCC_JNE
,
342 [TCG_COND_LT
] = JCC_JL
,
343 [TCG_COND_GE
] = JCC_JGE
,
344 [TCG_COND_LE
] = JCC_JLE
,
345 [TCG_COND_GT
] = JCC_JG
,
346 [TCG_COND_LTU
] = JCC_JB
,
347 [TCG_COND_GEU
] = JCC_JAE
,
348 [TCG_COND_LEU
] = JCC_JBE
,
349 [TCG_COND_GTU
] = JCC_JA
,
352 #if TCG_TARGET_REG_BITS == 64
353 static void tcg_out_opc(TCGContext
*s
, int opc
, int r
, int rm
, int x
)
360 if (opc
& P_DATA16
) {
361 /* We should never be asking for both 16 and 64-bit operation. */
362 assert((opc
& P_REXW
) == 0);
365 if (opc
& P_ADDR32
) {
370 rex
|= (opc
& P_REXW
) >> 8; /* REX.W */
371 rex
|= (r
& 8) >> 1; /* REX.R */
372 rex
|= (x
& 8) >> 2; /* REX.X */
373 rex
|= (rm
& 8) >> 3; /* REX.B */
375 /* P_REXB_{R,RM} indicates that the given register is the low byte.
376 For %[abcd]l we need no REX prefix, but for %{si,di,bp,sp}l we do,
377 as otherwise the encoding indicates %[abcd]h. Note that the values
378 that are ORed in merely indicate that the REX byte must be present;
379 those bits get discarded in output. */
380 rex
|= opc
& (r
>= 4 ? P_REXB_R
: 0);
381 rex
|= opc
& (rm
>= 4 ? P_REXB_RM
: 0);
384 tcg_out8(s
, (uint8_t)(rex
| 0x40));
393 static void tcg_out_opc(TCGContext
*s
, int opc
)
395 if (opc
& P_DATA16
) {
403 /* Discard the register arguments to tcg_out_opc early, so as not to penalize
404 the 32-bit compilation paths. This method works with all versions of gcc,
405 whereas relying on optimization may not be able to exclude them. */
406 #define tcg_out_opc(s, opc, r, rm, x) (tcg_out_opc)(s, opc)
409 static void tcg_out_modrm(TCGContext
*s
, int opc
, int r
, int rm
)
411 tcg_out_opc(s
, opc
, r
, rm
, 0);
412 tcg_out8(s
, 0xc0 | (LOWREGMASK(r
) << 3) | LOWREGMASK(rm
));
415 /* Output an opcode with a full "rm + (index<<shift) + offset" address mode.
416 We handle either RM and INDEX missing with a negative value. In 64-bit
417 mode for absolute addresses, ~RM is the size of the immediate operand
418 that will follow the instruction. */
420 static void tcg_out_modrm_sib_offset(TCGContext
*s
, int opc
, int r
, int rm
,
421 int index
, int shift
,
422 tcg_target_long offset
)
426 if (index
< 0 && rm
< 0) {
427 if (TCG_TARGET_REG_BITS
== 64) {
428 /* Try for a rip-relative addressing mode. This has replaced
429 the 32-bit-mode absolute addressing encoding. */
430 tcg_target_long pc
= (tcg_target_long
)s
->code_ptr
+ 5 + ~rm
;
431 tcg_target_long disp
= offset
- pc
;
432 if (disp
== (int32_t)disp
) {
433 tcg_out_opc(s
, opc
, r
, 0, 0);
434 tcg_out8(s
, (LOWREGMASK(r
) << 3) | 5);
439 /* Try for an absolute address encoding. This requires the
440 use of the MODRM+SIB encoding and is therefore larger than
441 rip-relative addressing. */
442 if (offset
== (int32_t)offset
) {
443 tcg_out_opc(s
, opc
, r
, 0, 0);
444 tcg_out8(s
, (LOWREGMASK(r
) << 3) | 4);
445 tcg_out8(s
, (4 << 3) | 5);
446 tcg_out32(s
, offset
);
450 /* ??? The memory isn't directly addressable. */
453 /* Absolute address. */
454 tcg_out_opc(s
, opc
, r
, 0, 0);
455 tcg_out8(s
, (r
<< 3) | 5);
456 tcg_out32(s
, offset
);
461 /* Find the length of the immediate addend. Note that the encoding
462 that would be used for (%ebp) indicates absolute addressing. */
464 mod
= 0, len
= 4, rm
= 5;
465 } else if (offset
== 0 && LOWREGMASK(rm
) != TCG_REG_EBP
) {
467 } else if (offset
== (int8_t)offset
) {
473 /* Use a single byte MODRM format if possible. Note that the encoding
474 that would be used for %esp is the escape to the two byte form. */
475 if (index
< 0 && LOWREGMASK(rm
) != TCG_REG_ESP
) {
476 /* Single byte MODRM format. */
477 tcg_out_opc(s
, opc
, r
, rm
, 0);
478 tcg_out8(s
, mod
| (LOWREGMASK(r
) << 3) | LOWREGMASK(rm
));
480 /* Two byte MODRM+SIB format. */
482 /* Note that the encoding that would place %esp into the index
483 field indicates no index register. In 64-bit mode, the REX.X
484 bit counts, so %r12 can be used as the index. */
488 assert(index
!= TCG_REG_ESP
);
491 tcg_out_opc(s
, opc
, r
, rm
, index
);
492 tcg_out8(s
, mod
| (LOWREGMASK(r
) << 3) | 4);
493 tcg_out8(s
, (shift
<< 6) | (LOWREGMASK(index
) << 3) | LOWREGMASK(rm
));
498 } else if (len
== 4) {
499 tcg_out32(s
, offset
);
503 /* A simplification of the above with no index or shift. */
504 static inline void tcg_out_modrm_offset(TCGContext
*s
, int opc
, int r
,
505 int rm
, tcg_target_long offset
)
507 tcg_out_modrm_sib_offset(s
, opc
, r
, rm
, -1, 0, offset
);
510 /* Generate dest op= src. Uses the same ARITH_* codes as tgen_arithi. */
511 static inline void tgen_arithr(TCGContext
*s
, int subop
, int dest
, int src
)
513 /* Propagate an opcode prefix, such as P_REXW. */
514 int ext
= subop
& ~0x7;
517 tcg_out_modrm(s
, OPC_ARITH_GvEv
+ (subop
<< 3) + ext
, dest
, src
);
520 static inline void tcg_out_mov(TCGContext
*s
, TCGType type
,
521 TCGReg ret
, TCGReg arg
)
524 int opc
= OPC_MOVL_GvEv
+ (type
== TCG_TYPE_I64
? P_REXW
: 0);
525 tcg_out_modrm(s
, opc
, ret
, arg
);
529 static void tcg_out_movi(TCGContext
*s
, TCGType type
,
530 TCGReg ret
, tcg_target_long arg
)
533 tgen_arithr(s
, ARITH_XOR
, ret
, ret
);
535 } else if (arg
== (uint32_t)arg
|| type
== TCG_TYPE_I32
) {
536 tcg_out_opc(s
, OPC_MOVL_Iv
+ LOWREGMASK(ret
), 0, ret
, 0);
538 } else if (arg
== (int32_t)arg
) {
539 tcg_out_modrm(s
, OPC_MOVL_EvIz
+ P_REXW
, 0, ret
);
542 tcg_out_opc(s
, OPC_MOVL_Iv
+ P_REXW
+ LOWREGMASK(ret
), 0, ret
, 0);
544 tcg_out32(s
, arg
>> 31 >> 1);
548 static inline void tcg_out_pushi(TCGContext
*s
, tcg_target_long val
)
550 if (val
== (int8_t)val
) {
551 tcg_out_opc(s
, OPC_PUSH_Ib
, 0, 0, 0);
553 } else if (val
== (int32_t)val
) {
554 tcg_out_opc(s
, OPC_PUSH_Iv
, 0, 0, 0);
561 static inline void tcg_out_push(TCGContext
*s
, int reg
)
563 tcg_out_opc(s
, OPC_PUSH_r32
+ LOWREGMASK(reg
), 0, reg
, 0);
566 static inline void tcg_out_pop(TCGContext
*s
, int reg
)
568 tcg_out_opc(s
, OPC_POP_r32
+ LOWREGMASK(reg
), 0, reg
, 0);
571 static inline void tcg_out_ld(TCGContext
*s
, TCGType type
, TCGReg ret
,
572 TCGReg arg1
, tcg_target_long arg2
)
574 int opc
= OPC_MOVL_GvEv
+ (type
== TCG_TYPE_I64
? P_REXW
: 0);
575 tcg_out_modrm_offset(s
, opc
, ret
, arg1
, arg2
);
578 static inline void tcg_out_st(TCGContext
*s
, TCGType type
, TCGReg arg
,
579 TCGReg arg1
, tcg_target_long arg2
)
581 int opc
= OPC_MOVL_EvGv
+ (type
== TCG_TYPE_I64
? P_REXW
: 0);
582 tcg_out_modrm_offset(s
, opc
, arg
, arg1
, arg2
);
585 static void tcg_out_shifti(TCGContext
*s
, int subopc
, int reg
, int count
)
587 /* Propagate an opcode prefix, such as P_DATA16. */
588 int ext
= subopc
& ~0x7;
592 tcg_out_modrm(s
, OPC_SHIFT_1
+ ext
, subopc
, reg
);
594 tcg_out_modrm(s
, OPC_SHIFT_Ib
+ ext
, subopc
, reg
);
599 static inline void tcg_out_bswap32(TCGContext
*s
, int reg
)
601 tcg_out_opc(s
, OPC_BSWAP
+ LOWREGMASK(reg
), 0, reg
, 0);
604 static inline void tcg_out_rolw_8(TCGContext
*s
, int reg
)
606 tcg_out_shifti(s
, SHIFT_ROL
+ P_DATA16
, reg
, 8);
609 static inline void tcg_out_ext8u(TCGContext
*s
, int dest
, int src
)
612 assert(src
< 4 || TCG_TARGET_REG_BITS
== 64);
613 tcg_out_modrm(s
, OPC_MOVZBL
+ P_REXB_RM
, dest
, src
);
616 static void tcg_out_ext8s(TCGContext
*s
, int dest
, int src
, int rexw
)
619 assert(src
< 4 || TCG_TARGET_REG_BITS
== 64);
620 tcg_out_modrm(s
, OPC_MOVSBL
+ P_REXB_RM
+ rexw
, dest
, src
);
623 static inline void tcg_out_ext16u(TCGContext
*s
, int dest
, int src
)
626 tcg_out_modrm(s
, OPC_MOVZWL
, dest
, src
);
629 static inline void tcg_out_ext16s(TCGContext
*s
, int dest
, int src
, int rexw
)
632 tcg_out_modrm(s
, OPC_MOVSWL
+ rexw
, dest
, src
);
635 static inline void tcg_out_ext32u(TCGContext
*s
, int dest
, int src
)
637 /* 32-bit mov zero extends. */
638 tcg_out_modrm(s
, OPC_MOVL_GvEv
, dest
, src
);
641 static inline void tcg_out_ext32s(TCGContext
*s
, int dest
, int src
)
643 tcg_out_modrm(s
, OPC_MOVSLQ
, dest
, src
);
646 static inline void tcg_out_bswap64(TCGContext
*s
, int reg
)
648 tcg_out_opc(s
, OPC_BSWAP
+ P_REXW
+ LOWREGMASK(reg
), 0, reg
, 0);
651 static void tgen_arithi(TCGContext
*s
, int c
, int r0
,
652 tcg_target_long val
, int cf
)
656 if (TCG_TARGET_REG_BITS
== 64) {
661 /* ??? While INC is 2 bytes shorter than ADDL $1, they also induce
662 partial flags update stalls on Pentium4 and are not recommended
663 by current Intel optimization manuals. */
664 if (!cf
&& (c
== ARITH_ADD
|| c
== ARITH_SUB
) && (val
== 1 || val
== -1)) {
665 int is_inc
= (c
== ARITH_ADD
) ^ (val
< 0);
666 if (TCG_TARGET_REG_BITS
== 64) {
667 /* The single-byte increment encodings are re-tasked as the
668 REX prefixes. Use the MODRM encoding. */
669 tcg_out_modrm(s
, OPC_GRP5
+ rexw
,
670 (is_inc
? EXT5_INC_Ev
: EXT5_DEC_Ev
), r0
);
672 tcg_out8(s
, (is_inc
? OPC_INC_r32
: OPC_DEC_r32
) + r0
);
677 if (c
== ARITH_AND
) {
678 if (TCG_TARGET_REG_BITS
== 64) {
679 if (val
== 0xffffffffu
) {
680 tcg_out_ext32u(s
, r0
, r0
);
683 if (val
== (uint32_t)val
) {
684 /* AND with no high bits set can use a 32-bit operation. */
688 if (val
== 0xffu
&& (r0
< 4 || TCG_TARGET_REG_BITS
== 64)) {
689 tcg_out_ext8u(s
, r0
, r0
);
692 if (val
== 0xffffu
) {
693 tcg_out_ext16u(s
, r0
, r0
);
698 if (val
== (int8_t)val
) {
699 tcg_out_modrm(s
, OPC_ARITH_EvIb
+ rexw
, c
, r0
);
703 if (rexw
== 0 || val
== (int32_t)val
) {
704 tcg_out_modrm(s
, OPC_ARITH_EvIz
+ rexw
, c
, r0
);
712 static void tcg_out_addi(TCGContext
*s
, int reg
, tcg_target_long val
)
715 tgen_arithi(s
, ARITH_ADD
+ P_REXW
, reg
, val
, 0);
719 /* Use SMALL != 0 to force a short forward branch. */
720 static void tcg_out_jxx(TCGContext
*s
, int opc
, int label_index
, int small
)
723 TCGLabel
*l
= &s
->labels
[label_index
];
726 val
= l
->u
.value
- (tcg_target_long
)s
->code_ptr
;
728 if ((int8_t)val1
== val1
) {
730 tcg_out8(s
, OPC_JMP_short
);
732 tcg_out8(s
, OPC_JCC_short
+ opc
);
740 tcg_out8(s
, OPC_JMP_long
);
741 tcg_out32(s
, val
- 5);
743 tcg_out_opc(s
, OPC_JCC_long
+ opc
, 0, 0, 0);
744 tcg_out32(s
, val
- 6);
749 tcg_out8(s
, OPC_JMP_short
);
751 tcg_out8(s
, OPC_JCC_short
+ opc
);
753 tcg_out_reloc(s
, s
->code_ptr
, R_386_PC8
, label_index
, -1);
757 tcg_out8(s
, OPC_JMP_long
);
759 tcg_out_opc(s
, OPC_JCC_long
+ opc
, 0, 0, 0);
761 tcg_out_reloc(s
, s
->code_ptr
, R_386_PC32
, label_index
, -4);
766 static void tcg_out_cmp(TCGContext
*s
, TCGArg arg1
, TCGArg arg2
,
767 int const_arg2
, int rexw
)
772 tcg_out_modrm(s
, OPC_TESTL
+ rexw
, arg1
, arg1
);
774 tgen_arithi(s
, ARITH_CMP
+ rexw
, arg1
, arg2
, 0);
777 tgen_arithr(s
, ARITH_CMP
+ rexw
, arg1
, arg2
);
781 static void tcg_out_brcond32(TCGContext
*s
, TCGCond cond
,
782 TCGArg arg1
, TCGArg arg2
, int const_arg2
,
783 int label_index
, int small
)
785 tcg_out_cmp(s
, arg1
, arg2
, const_arg2
, 0);
786 tcg_out_jxx(s
, tcg_cond_to_jcc
[cond
], label_index
, small
);
789 #if TCG_TARGET_REG_BITS == 64
790 static void tcg_out_brcond64(TCGContext
*s
, TCGCond cond
,
791 TCGArg arg1
, TCGArg arg2
, int const_arg2
,
792 int label_index
, int small
)
794 tcg_out_cmp(s
, arg1
, arg2
, const_arg2
, P_REXW
);
795 tcg_out_jxx(s
, tcg_cond_to_jcc
[cond
], label_index
, small
);
798 /* XXX: we implement it at the target level to avoid having to
799 handle cross basic blocks temporaries */
800 static void tcg_out_brcond2(TCGContext
*s
, const TCGArg
*args
,
801 const int *const_args
, int small
)
804 label_next
= gen_new_label();
807 tcg_out_brcond32(s
, TCG_COND_NE
, args
[0], args
[2], const_args
[2],
809 tcg_out_brcond32(s
, TCG_COND_EQ
, args
[1], args
[3], const_args
[3],
813 tcg_out_brcond32(s
, TCG_COND_NE
, args
[0], args
[2], const_args
[2],
815 tcg_out_brcond32(s
, TCG_COND_NE
, args
[1], args
[3], const_args
[3],
819 tcg_out_brcond32(s
, TCG_COND_LT
, args
[1], args
[3], const_args
[3],
821 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
822 tcg_out_brcond32(s
, TCG_COND_LTU
, args
[0], args
[2], const_args
[2],
826 tcg_out_brcond32(s
, TCG_COND_LT
, args
[1], args
[3], const_args
[3],
828 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
829 tcg_out_brcond32(s
, TCG_COND_LEU
, args
[0], args
[2], const_args
[2],
833 tcg_out_brcond32(s
, TCG_COND_GT
, args
[1], args
[3], const_args
[3],
835 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
836 tcg_out_brcond32(s
, TCG_COND_GTU
, args
[0], args
[2], const_args
[2],
840 tcg_out_brcond32(s
, TCG_COND_GT
, args
[1], args
[3], const_args
[3],
842 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
843 tcg_out_brcond32(s
, TCG_COND_GEU
, args
[0], args
[2], const_args
[2],
847 tcg_out_brcond32(s
, TCG_COND_LTU
, args
[1], args
[3], const_args
[3],
849 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
850 tcg_out_brcond32(s
, TCG_COND_LTU
, args
[0], args
[2], const_args
[2],
854 tcg_out_brcond32(s
, TCG_COND_LTU
, args
[1], args
[3], const_args
[3],
856 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
857 tcg_out_brcond32(s
, TCG_COND_LEU
, args
[0], args
[2], const_args
[2],
861 tcg_out_brcond32(s
, TCG_COND_GTU
, args
[1], args
[3], const_args
[3],
863 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
864 tcg_out_brcond32(s
, TCG_COND_GTU
, args
[0], args
[2], const_args
[2],
868 tcg_out_brcond32(s
, TCG_COND_GTU
, args
[1], args
[3], const_args
[3],
870 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
871 tcg_out_brcond32(s
, TCG_COND_GEU
, args
[0], args
[2], const_args
[2],
877 tcg_out_label(s
, label_next
, s
->code_ptr
);
881 static void tcg_out_setcond32(TCGContext
*s
, TCGCond cond
, TCGArg dest
,
882 TCGArg arg1
, TCGArg arg2
, int const_arg2
)
884 tcg_out_cmp(s
, arg1
, arg2
, const_arg2
, 0);
885 tcg_out_modrm(s
, OPC_SETCC
| tcg_cond_to_jcc
[cond
], 0, dest
);
886 tcg_out_ext8u(s
, dest
, dest
);
889 #if TCG_TARGET_REG_BITS == 64
890 static void tcg_out_setcond64(TCGContext
*s
, TCGCond cond
, TCGArg dest
,
891 TCGArg arg1
, TCGArg arg2
, int const_arg2
)
893 tcg_out_cmp(s
, arg1
, arg2
, const_arg2
, P_REXW
);
894 tcg_out_modrm(s
, OPC_SETCC
| tcg_cond_to_jcc
[cond
], 0, dest
);
895 tcg_out_ext8u(s
, dest
, dest
);
898 static void tcg_out_setcond2(TCGContext
*s
, const TCGArg
*args
,
899 const int *const_args
)
902 int label_true
, label_over
;
904 memcpy(new_args
, args
+1, 5*sizeof(TCGArg
));
906 if (args
[0] == args
[1] || args
[0] == args
[2]
907 || (!const_args
[3] && args
[0] == args
[3])
908 || (!const_args
[4] && args
[0] == args
[4])) {
909 /* When the destination overlaps with one of the argument
910 registers, don't do anything tricky. */
911 label_true
= gen_new_label();
912 label_over
= gen_new_label();
914 new_args
[5] = label_true
;
915 tcg_out_brcond2(s
, new_args
, const_args
+1, 1);
917 tcg_out_movi(s
, TCG_TYPE_I32
, args
[0], 0);
918 tcg_out_jxx(s
, JCC_JMP
, label_over
, 1);
919 tcg_out_label(s
, label_true
, s
->code_ptr
);
921 tcg_out_movi(s
, TCG_TYPE_I32
, args
[0], 1);
922 tcg_out_label(s
, label_over
, s
->code_ptr
);
924 /* When the destination does not overlap one of the arguments,
925 clear the destination first, jump if cond false, and emit an
926 increment in the true case. This results in smaller code. */
928 tcg_out_movi(s
, TCG_TYPE_I32
, args
[0], 0);
930 label_over
= gen_new_label();
931 new_args
[4] = tcg_invert_cond(new_args
[4]);
932 new_args
[5] = label_over
;
933 tcg_out_brcond2(s
, new_args
, const_args
+1, 1);
935 tgen_arithi(s
, ARITH_ADD
, args
[0], 1, 0);
936 tcg_out_label(s
, label_over
, s
->code_ptr
);
941 static void tcg_out_movcond32(TCGContext
*s
, TCGCond cond
, TCGArg dest
,
942 TCGArg c1
, TCGArg c2
, int const_c2
,
945 tcg_out_cmp(s
, c1
, c2
, const_c2
, 0);
946 tcg_out_modrm(s
, OPC_CMOVCC
| tcg_cond_to_jcc
[cond
], dest
, v1
);
949 #if TCG_TARGET_REG_BITS == 64
950 static void tcg_out_movcond64(TCGContext
*s
, TCGCond cond
, TCGArg dest
,
951 TCGArg c1
, TCGArg c2
, int const_c2
,
954 tcg_out_cmp(s
, c1
, c2
, const_c2
, P_REXW
);
955 tcg_out_modrm(s
, OPC_CMOVCC
| tcg_cond_to_jcc
[cond
] | P_REXW
, dest
, v1
);
959 static void tcg_out_branch(TCGContext
*s
, int call
, tcg_target_long dest
)
961 tcg_target_long disp
= dest
- (tcg_target_long
)s
->code_ptr
- 5;
963 if (disp
== (int32_t)disp
) {
964 tcg_out_opc(s
, call
? OPC_CALL_Jz
: OPC_JMP_long
, 0, 0, 0);
967 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_R10
, dest
);
968 tcg_out_modrm(s
, OPC_GRP5
,
969 call
? EXT5_CALLN_Ev
: EXT5_JMPN_Ev
, TCG_REG_R10
);
973 static inline void tcg_out_calli(TCGContext
*s
, tcg_target_long dest
)
975 tcg_out_branch(s
, 1, dest
);
978 static void tcg_out_jmp(TCGContext
*s
, tcg_target_long dest
)
980 tcg_out_branch(s
, 0, dest
);
983 #if defined(CONFIG_SOFTMMU)
985 #include "exec/softmmu_defs.h"
987 /* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
989 static const void *qemu_ld_helpers
[4] = {
996 /* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
997 uintxx_t val, int mmu_idx) */
998 static const void *qemu_st_helpers
[4] = {
1005 static void add_qemu_ldst_label(TCGContext
*s
,
1014 uint8_t **label_ptr
);
1016 /* Perform the TLB load and compare.
1019 ADDRLO_IDX contains the index into ARGS of the low part of the
1020 address; the high part of the address is at ADDR_LOW_IDX+1.
1022 MEM_INDEX and S_BITS are the memory context and log2 size of the load.
1024 WHICH is the offset into the CPUTLBEntry structure of the slot to read.
1025 This should be offsetof addr_read or addr_write.
1028 LABEL_PTRS is filled with 1 (32-bit addresses) or 2 (64-bit addresses)
1029 positions of the displacements of forward jumps to the TLB miss case.
1031 Second argument register is loaded with the low part of the address.
1032 In the TLB hit case, it has been adjusted as indicated by the TLB
1033 and so is a host address. In the TLB miss case, it continues to
1034 hold a guest address.
1036 First argument register is clobbered. */
1038 static inline void tcg_out_tlb_load(TCGContext
*s
, int addrlo_idx
,
1039 int mem_index
, int s_bits
,
1041 uint8_t **label_ptr
, int which
)
1043 const int addrlo
= args
[addrlo_idx
];
1044 const int r0
= TCG_REG_L0
;
1045 const int r1
= TCG_REG_L1
;
1046 TCGType type
= TCG_TYPE_I32
;
1049 if (TCG_TARGET_REG_BITS
== 64 && TARGET_LONG_BITS
== 64) {
1050 type
= TCG_TYPE_I64
;
1054 tcg_out_mov(s
, type
, r0
, addrlo
);
1055 tcg_out_mov(s
, type
, r1
, addrlo
);
1057 tcg_out_shifti(s
, SHIFT_SHR
+ rexw
, r0
,
1058 TARGET_PAGE_BITS
- CPU_TLB_ENTRY_BITS
);
1060 tgen_arithi(s
, ARITH_AND
+ rexw
, r1
,
1061 TARGET_PAGE_MASK
| ((1 << s_bits
) - 1), 0);
1062 tgen_arithi(s
, ARITH_AND
+ rexw
, r0
,
1063 (CPU_TLB_SIZE
- 1) << CPU_TLB_ENTRY_BITS
, 0);
1065 tcg_out_modrm_sib_offset(s
, OPC_LEA
+ P_REXW
, r0
, TCG_AREG0
, r0
, 0,
1066 offsetof(CPUArchState
, tlb_table
[mem_index
][0])
1070 tcg_out_modrm_offset(s
, OPC_CMP_GvEv
+ rexw
, r1
, r0
, 0);
1072 tcg_out_mov(s
, type
, r1
, addrlo
);
1075 tcg_out_opc(s
, OPC_JCC_long
+ JCC_JNE
, 0, 0, 0);
1076 label_ptr
[0] = s
->code_ptr
;
1079 if (TARGET_LONG_BITS
> TCG_TARGET_REG_BITS
) {
1080 /* cmp 4(r0), addrhi */
1081 tcg_out_modrm_offset(s
, OPC_CMP_GvEv
, args
[addrlo_idx
+1], r0
, 4);
1084 tcg_out_opc(s
, OPC_JCC_long
+ JCC_JNE
, 0, 0, 0);
1085 label_ptr
[1] = s
->code_ptr
;
1091 /* add addend(r0), r1 */
1092 tcg_out_modrm_offset(s
, OPC_ADD_GvEv
+ P_REXW
, r1
, r0
,
1093 offsetof(CPUTLBEntry
, addend
) - which
);
1095 #elif defined(__x86_64__) && defined(__linux__)
1096 # include <asm/prctl.h>
1097 # include <sys/prctl.h>
1099 int arch_prctl(int code
, unsigned long addr
);
1101 static int guest_base_flags
;
1102 static inline void setup_guest_base_seg(void)
1104 if (arch_prctl(ARCH_SET_GS
, GUEST_BASE
) == 0) {
1105 guest_base_flags
= P_GS
;
1109 # define guest_base_flags 0
1110 static inline void setup_guest_base_seg(void) { }
1111 #endif /* SOFTMMU */
1113 static void tcg_out_qemu_ld_direct(TCGContext
*s
, int datalo
, int datahi
,
1114 int base
, tcg_target_long ofs
, int seg
,
1117 #ifdef TARGET_WORDS_BIGENDIAN
1118 const int bswap
= 1;
1120 const int bswap
= 0;
1124 tcg_out_modrm_offset(s
, OPC_MOVZBL
+ seg
, datalo
, base
, ofs
);
1127 tcg_out_modrm_offset(s
, OPC_MOVSBL
+ P_REXW
+ seg
, datalo
, base
, ofs
);
1130 tcg_out_modrm_offset(s
, OPC_MOVZWL
+ seg
, datalo
, base
, ofs
);
1132 tcg_out_rolw_8(s
, datalo
);
1137 tcg_out_modrm_offset(s
, OPC_MOVZWL
+ seg
, datalo
, base
, ofs
);
1138 tcg_out_rolw_8(s
, datalo
);
1139 tcg_out_modrm(s
, OPC_MOVSWL
+ P_REXW
, datalo
, datalo
);
1141 tcg_out_modrm_offset(s
, OPC_MOVSWL
+ P_REXW
+ seg
,
1146 tcg_out_modrm_offset(s
, OPC_MOVL_GvEv
+ seg
, datalo
, base
, ofs
);
1148 tcg_out_bswap32(s
, datalo
);
1151 #if TCG_TARGET_REG_BITS == 64
1154 tcg_out_modrm_offset(s
, OPC_MOVL_GvEv
+ seg
, datalo
, base
, ofs
);
1155 tcg_out_bswap32(s
, datalo
);
1156 tcg_out_ext32s(s
, datalo
, datalo
);
1158 tcg_out_modrm_offset(s
, OPC_MOVSLQ
+ seg
, datalo
, base
, ofs
);
1163 if (TCG_TARGET_REG_BITS
== 64) {
1164 tcg_out_modrm_offset(s
, OPC_MOVL_GvEv
+ P_REXW
+ seg
,
1167 tcg_out_bswap64(s
, datalo
);
1175 if (base
!= datalo
) {
1176 tcg_out_modrm_offset(s
, OPC_MOVL_GvEv
+ seg
,
1178 tcg_out_modrm_offset(s
, OPC_MOVL_GvEv
+ seg
,
1179 datahi
, base
, ofs
+ 4);
1181 tcg_out_modrm_offset(s
, OPC_MOVL_GvEv
+ seg
,
1182 datahi
, base
, ofs
+ 4);
1183 tcg_out_modrm_offset(s
, OPC_MOVL_GvEv
+ seg
,
1187 tcg_out_bswap32(s
, datalo
);
1188 tcg_out_bswap32(s
, datahi
);
1197 /* XXX: qemu_ld and qemu_st could be modified to clobber only EDX and
1198 EAX. It will be useful once fixed registers globals are less
1200 static void tcg_out_qemu_ld(TCGContext
*s
, const TCGArg
*args
,
1203 int data_reg
, data_reg2
= 0;
1205 #if defined(CONFIG_SOFTMMU)
1206 int mem_index
, s_bits
;
1207 uint8_t *label_ptr
[2];
1212 if (TCG_TARGET_REG_BITS
== 32 && opc
== 3) {
1213 data_reg2
= args
[1];
1217 #if defined(CONFIG_SOFTMMU)
1218 mem_index
= args
[addrlo_idx
+ 1 + (TARGET_LONG_BITS
> TCG_TARGET_REG_BITS
)];
1221 tcg_out_tlb_load(s
, addrlo_idx
, mem_index
, s_bits
, args
,
1222 label_ptr
, offsetof(CPUTLBEntry
, addr_read
));
1225 tcg_out_qemu_ld_direct(s
, data_reg
, data_reg2
, TCG_REG_L1
, 0, 0, opc
);
1227 /* Record the current context of a load into ldst label */
1228 add_qemu_ldst_label(s
,
1234 args
[addrlo_idx
+ 1],
1240 int32_t offset
= GUEST_BASE
;
1241 int base
= args
[addrlo_idx
];
1244 /* ??? We assume all operations have left us with register contents
1245 that are zero extended. So far this appears to be true. If we
1246 want to enforce this, we can either do an explicit zero-extension
1247 here, or (if GUEST_BASE == 0, or a segment register is in use)
1248 use the ADDR32 prefix. For now, do nothing. */
1249 if (GUEST_BASE
&& guest_base_flags
) {
1250 seg
= guest_base_flags
;
1252 } else if (TCG_TARGET_REG_BITS
== 64 && offset
!= GUEST_BASE
) {
1253 tcg_out_movi(s
, TCG_TYPE_I64
, TCG_REG_L1
, GUEST_BASE
);
1254 tgen_arithr(s
, ARITH_ADD
+ P_REXW
, TCG_REG_L1
, base
);
1259 tcg_out_qemu_ld_direct(s
, data_reg
, data_reg2
, base
, offset
, seg
, opc
);
1264 static void tcg_out_qemu_st_direct(TCGContext
*s
, int datalo
, int datahi
,
1265 int base
, tcg_target_long ofs
, int seg
,
1268 #ifdef TARGET_WORDS_BIGENDIAN
1269 const int bswap
= 1;
1271 const int bswap
= 0;
1273 /* ??? Ideally we wouldn't need a scratch register. For user-only,
1274 we could perform the bswap twice to restore the original value
1275 instead of moving to the scratch. But as it is, the L constraint
1276 means that TCG_REG_L0 is definitely free here. */
1277 const int scratch
= TCG_REG_L0
;
1281 tcg_out_modrm_offset(s
, OPC_MOVB_EvGv
+ P_REXB_R
+ seg
,
1286 tcg_out_mov(s
, TCG_TYPE_I32
, scratch
, datalo
);
1287 tcg_out_rolw_8(s
, scratch
);
1290 tcg_out_modrm_offset(s
, OPC_MOVL_EvGv
+ P_DATA16
+ seg
,
1295 tcg_out_mov(s
, TCG_TYPE_I32
, scratch
, datalo
);
1296 tcg_out_bswap32(s
, scratch
);
1299 tcg_out_modrm_offset(s
, OPC_MOVL_EvGv
+ seg
, datalo
, base
, ofs
);
1302 if (TCG_TARGET_REG_BITS
== 64) {
1304 tcg_out_mov(s
, TCG_TYPE_I64
, scratch
, datalo
);
1305 tcg_out_bswap64(s
, scratch
);
1308 tcg_out_modrm_offset(s
, OPC_MOVL_EvGv
+ P_REXW
+ seg
,
1311 tcg_out_mov(s
, TCG_TYPE_I32
, scratch
, datahi
);
1312 tcg_out_bswap32(s
, scratch
);
1313 tcg_out_modrm_offset(s
, OPC_MOVL_EvGv
+ seg
, scratch
, base
, ofs
);
1314 tcg_out_mov(s
, TCG_TYPE_I32
, scratch
, datalo
);
1315 tcg_out_bswap32(s
, scratch
);
1316 tcg_out_modrm_offset(s
, OPC_MOVL_EvGv
+ seg
, scratch
, base
, ofs
+4);
1318 tcg_out_modrm_offset(s
, OPC_MOVL_EvGv
+ seg
, datalo
, base
, ofs
);
1319 tcg_out_modrm_offset(s
, OPC_MOVL_EvGv
+ seg
, datahi
, base
, ofs
+4);
1327 static void tcg_out_qemu_st(TCGContext
*s
, const TCGArg
*args
,
1330 int data_reg
, data_reg2
= 0;
1332 #if defined(CONFIG_SOFTMMU)
1333 int mem_index
, s_bits
;
1334 uint8_t *label_ptr
[2];
1339 if (TCG_TARGET_REG_BITS
== 32 && opc
== 3) {
1340 data_reg2
= args
[1];
1344 #if defined(CONFIG_SOFTMMU)
1345 mem_index
= args
[addrlo_idx
+ 1 + (TARGET_LONG_BITS
> TCG_TARGET_REG_BITS
)];
1348 tcg_out_tlb_load(s
, addrlo_idx
, mem_index
, s_bits
, args
,
1349 label_ptr
, offsetof(CPUTLBEntry
, addr_write
));
1352 tcg_out_qemu_st_direct(s
, data_reg
, data_reg2
, TCG_REG_L1
, 0, 0, opc
);
1354 /* Record the current context of a store into ldst label */
1355 add_qemu_ldst_label(s
,
1361 args
[addrlo_idx
+ 1],
1367 int32_t offset
= GUEST_BASE
;
1368 int base
= args
[addrlo_idx
];
1371 /* ??? We assume all operations have left us with register contents
1372 that are zero extended. So far this appears to be true. If we
1373 want to enforce this, we can either do an explicit zero-extension
1374 here, or (if GUEST_BASE == 0, or a segment register is in use)
1375 use the ADDR32 prefix. For now, do nothing. */
1376 if (GUEST_BASE
&& guest_base_flags
) {
1377 seg
= guest_base_flags
;
1379 } else if (TCG_TARGET_REG_BITS
== 64 && offset
!= GUEST_BASE
) {
1380 tcg_out_movi(s
, TCG_TYPE_I64
, TCG_REG_L1
, GUEST_BASE
);
1381 tgen_arithr(s
, ARITH_ADD
+ P_REXW
, TCG_REG_L1
, base
);
1386 tcg_out_qemu_st_direct(s
, data_reg
, data_reg2
, base
, offset
, seg
, opc
);
1391 #if defined(CONFIG_SOFTMMU)
1393 * Record the context of a call to the out of line helper code for the slow path
1394 * for a load or store, so that we can later generate the correct helper code
1396 static void add_qemu_ldst_label(TCGContext
*s
,
1405 uint8_t **label_ptr
)
1408 TCGLabelQemuLdst
*label
;
1410 if (s
->nb_qemu_ldst_labels
>= TCG_MAX_QEMU_LDST
) {
1414 idx
= s
->nb_qemu_ldst_labels
++;
1415 label
= (TCGLabelQemuLdst
*)&s
->qemu_ldst_labels
[idx
];
1416 label
->is_ld
= is_ld
;
1418 label
->datalo_reg
= data_reg
;
1419 label
->datahi_reg
= data_reg2
;
1420 label
->addrlo_reg
= addrlo_reg
;
1421 label
->addrhi_reg
= addrhi_reg
;
1422 label
->mem_index
= mem_index
;
1423 label
->raddr
= raddr
;
1424 label
->label_ptr
[0] = label_ptr
[0];
1425 if (TARGET_LONG_BITS
> TCG_TARGET_REG_BITS
) {
1426 label
->label_ptr
[1] = label_ptr
[1];
1431 * Generate code for the slow path for a load at the end of block
1433 static void tcg_out_qemu_ld_slow_path(TCGContext
*s
, TCGLabelQemuLdst
*label
)
1436 int opc
= label
->opc
;
1437 int mem_index
= label
->mem_index
;
1438 #if TCG_TARGET_REG_BITS == 32
1440 int addrlo_reg
= label
->addrlo_reg
;
1441 int addrhi_reg
= label
->addrhi_reg
;
1443 int data_reg
= label
->datalo_reg
;
1444 int data_reg2
= label
->datahi_reg
;
1445 uint8_t *raddr
= label
->raddr
;
1446 uint8_t **label_ptr
= &label
->label_ptr
[0];
1450 /* resolve label address */
1451 *(uint32_t *)label_ptr
[0] = (uint32_t)(s
->code_ptr
- label_ptr
[0] - 4);
1452 if (TARGET_LONG_BITS
> TCG_TARGET_REG_BITS
) {
1453 *(uint32_t *)label_ptr
[1] = (uint32_t)(s
->code_ptr
- label_ptr
[1] - 4);
1456 #if TCG_TARGET_REG_BITS == 32
1457 tcg_out_pushi(s
, mem_index
);
1459 if (TARGET_LONG_BITS
== 64) {
1460 tcg_out_push(s
, addrhi_reg
);
1463 tcg_out_push(s
, addrlo_reg
);
1465 tcg_out_push(s
, TCG_AREG0
);
1468 tcg_out_mov(s
, TCG_TYPE_I64
, tcg_target_call_iarg_regs
[0], TCG_AREG0
);
1469 /* The second argument is already loaded with addrlo. */
1470 tcg_out_movi(s
, TCG_TYPE_I32
, tcg_target_call_iarg_regs
[2], mem_index
);
1473 /* Code generation of qemu_ld/st's slow path calling MMU helper
1477 jmp POST_PROC (2b) : short forward jump <- GETRA()
1478 jmp next_code (5b) : dummy long backward jump which is never executed
1479 POST_PROC ... : do post-processing <- GETRA() + 7
1480 jmp next_code : jump to the code corresponding to next IR of qemu_ld/st
1483 tcg_out_calli(s
, (tcg_target_long
)qemu_ld_helpers
[s_bits
]);
1485 /* Jump to post-processing code */
1486 tcg_out8(s
, OPC_JMP_short
);
1488 /* Dummy backward jump having information of fast path'pc for MMU helpers */
1489 tcg_out8(s
, OPC_JMP_long
);
1490 *(int32_t *)s
->code_ptr
= (int32_t)(raddr
- s
->code_ptr
- 4);
1493 #if TCG_TARGET_REG_BITS == 32
1494 if (stack_adjust
== (TCG_TARGET_REG_BITS
/ 8)) {
1495 /* Pop and discard. This is 2 bytes smaller than the add. */
1496 tcg_out_pop(s
, TCG_REG_ECX
);
1497 } else if (stack_adjust
!= 0) {
1498 tcg_out_addi(s
, TCG_REG_CALL_STACK
, stack_adjust
);
1504 tcg_out_ext8s(s
, data_reg
, TCG_REG_EAX
, P_REXW
);
1507 tcg_out_ext16s(s
, data_reg
, TCG_REG_EAX
, P_REXW
);
1510 tcg_out_ext8u(s
, data_reg
, TCG_REG_EAX
);
1513 tcg_out_ext16u(s
, data_reg
, TCG_REG_EAX
);
1516 tcg_out_mov(s
, TCG_TYPE_I32
, data_reg
, TCG_REG_EAX
);
1518 #if TCG_TARGET_REG_BITS == 64
1520 tcg_out_ext32s(s
, data_reg
, TCG_REG_EAX
);
1524 if (TCG_TARGET_REG_BITS
== 64) {
1525 tcg_out_mov(s
, TCG_TYPE_I64
, data_reg
, TCG_REG_RAX
);
1526 } else if (data_reg
== TCG_REG_EDX
) {
1527 /* xchg %edx, %eax */
1528 tcg_out_opc(s
, OPC_XCHG_ax_r32
+ TCG_REG_EDX
, 0, 0, 0);
1529 tcg_out_mov(s
, TCG_TYPE_I32
, data_reg2
, TCG_REG_EAX
);
1531 tcg_out_mov(s
, TCG_TYPE_I32
, data_reg
, TCG_REG_EAX
);
1532 tcg_out_mov(s
, TCG_TYPE_I32
, data_reg2
, TCG_REG_EDX
);
1539 /* Jump to the code corresponding to next IR of qemu_st */
1540 tcg_out_jmp(s
, (tcg_target_long
)raddr
);
1544 * Generate code for the slow path for a store at the end of block
1546 static void tcg_out_qemu_st_slow_path(TCGContext
*s
, TCGLabelQemuLdst
*label
)
1550 int opc
= label
->opc
;
1551 int mem_index
= label
->mem_index
;
1552 int data_reg
= label
->datalo_reg
;
1553 #if TCG_TARGET_REG_BITS == 32
1554 int data_reg2
= label
->datahi_reg
;
1555 int addrlo_reg
= label
->addrlo_reg
;
1556 int addrhi_reg
= label
->addrhi_reg
;
1558 uint8_t *raddr
= label
->raddr
;
1559 uint8_t **label_ptr
= &label
->label_ptr
[0];
1563 /* resolve label address */
1564 *(uint32_t *)label_ptr
[0] = (uint32_t)(s
->code_ptr
- label_ptr
[0] - 4);
1565 if (TARGET_LONG_BITS
> TCG_TARGET_REG_BITS
) {
1566 *(uint32_t *)label_ptr
[1] = (uint32_t)(s
->code_ptr
- label_ptr
[1] - 4);
1569 #if TCG_TARGET_REG_BITS == 32
1570 tcg_out_pushi(s
, mem_index
);
1573 tcg_out_push(s
, data_reg2
);
1576 tcg_out_push(s
, data_reg
);
1578 if (TARGET_LONG_BITS
== 64) {
1579 tcg_out_push(s
, addrhi_reg
);
1582 tcg_out_push(s
, addrlo_reg
);
1584 tcg_out_push(s
, TCG_AREG0
);
1587 tcg_out_mov(s
, TCG_TYPE_I64
, tcg_target_call_iarg_regs
[0], TCG_AREG0
);
1588 /* The second argument is already loaded with addrlo. */
1589 tcg_out_mov(s
, (opc
== 3 ? TCG_TYPE_I64
: TCG_TYPE_I32
),
1590 tcg_target_call_iarg_regs
[2], data_reg
);
1591 tcg_out_movi(s
, TCG_TYPE_I32
, tcg_target_call_iarg_regs
[3], mem_index
);
1595 /* Code generation of qemu_ld/st's slow path calling MMU helper
1599 jmp POST_PROC (2b) : short forward jump <- GETRA()
1600 jmp next_code (5b) : dummy long backward jump which is never executed
1601 POST_PROC ... : do post-processing <- GETRA() + 7
1602 jmp next_code : jump to the code corresponding to next IR of qemu_ld/st
1605 tcg_out_calli(s
, (tcg_target_long
)qemu_st_helpers
[s_bits
]);
1607 /* Jump to post-processing code */
1608 tcg_out8(s
, OPC_JMP_short
);
1610 /* Dummy backward jump having information of fast path'pc for MMU helpers */
1611 tcg_out8(s
, OPC_JMP_long
);
1612 *(int32_t *)s
->code_ptr
= (int32_t)(raddr
- s
->code_ptr
- 4);
1615 if (stack_adjust
== (TCG_TARGET_REG_BITS
/ 8)) {
1616 /* Pop and discard. This is 2 bytes smaller than the add. */
1617 tcg_out_pop(s
, TCG_REG_ECX
);
1618 } else if (stack_adjust
!= 0) {
1619 tcg_out_addi(s
, TCG_REG_CALL_STACK
, stack_adjust
);
1622 /* Jump to the code corresponding to next IR of qemu_st */
1623 tcg_out_jmp(s
, (tcg_target_long
)raddr
);
1627 * Generate TB finalization at the end of block
1629 void tcg_out_tb_finalize(TCGContext
*s
)
1632 TCGLabelQemuLdst
*label
;
1634 /* qemu_ld/st slow paths */
1635 for (i
= 0; i
< s
->nb_qemu_ldst_labels
; i
++) {
1636 label
= (TCGLabelQemuLdst
*)&s
->qemu_ldst_labels
[i
];
1638 tcg_out_qemu_ld_slow_path(s
, label
);
1640 tcg_out_qemu_st_slow_path(s
, label
);
1644 #endif /* CONFIG_SOFTMMU */
1646 static inline void tcg_out_op(TCGContext
*s
, TCGOpcode opc
,
1647 const TCGArg
*args
, const int *const_args
)
1651 #if TCG_TARGET_REG_BITS == 64
1652 # define OP_32_64(x) \
1653 case glue(glue(INDEX_op_, x), _i64): \
1654 rexw = P_REXW; /* FALLTHRU */ \
1655 case glue(glue(INDEX_op_, x), _i32)
1657 # define OP_32_64(x) \
1658 case glue(glue(INDEX_op_, x), _i32)
1662 case INDEX_op_exit_tb
:
1663 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_EAX
, args
[0]);
1664 tcg_out_jmp(s
, (tcg_target_long
) tb_ret_addr
);
1666 case INDEX_op_goto_tb
:
1667 if (s
->tb_jmp_offset
) {
1668 /* direct jump method */
1669 tcg_out8(s
, OPC_JMP_long
); /* jmp im */
1670 s
->tb_jmp_offset
[args
[0]] = s
->code_ptr
- s
->code_buf
;
1673 /* indirect jump method */
1674 tcg_out_modrm_offset(s
, OPC_GRP5
, EXT5_JMPN_Ev
, -1,
1675 (tcg_target_long
)(s
->tb_next
+ args
[0]));
1677 s
->tb_next_offset
[args
[0]] = s
->code_ptr
- s
->code_buf
;
1680 if (const_args
[0]) {
1681 tcg_out_calli(s
, args
[0]);
1684 tcg_out_modrm(s
, OPC_GRP5
, EXT5_CALLN_Ev
, args
[0]);
1688 tcg_out_jxx(s
, JCC_JMP
, args
[0], 0);
1690 case INDEX_op_movi_i32
:
1691 tcg_out_movi(s
, TCG_TYPE_I32
, args
[0], args
[1]);
1694 /* Note that we can ignore REXW for the zero-extend to 64-bit. */
1695 tcg_out_modrm_offset(s
, OPC_MOVZBL
, args
[0], args
[1], args
[2]);
1698 tcg_out_modrm_offset(s
, OPC_MOVSBL
+ rexw
, args
[0], args
[1], args
[2]);
1701 /* Note that we can ignore REXW for the zero-extend to 64-bit. */
1702 tcg_out_modrm_offset(s
, OPC_MOVZWL
, args
[0], args
[1], args
[2]);
1705 tcg_out_modrm_offset(s
, OPC_MOVSWL
+ rexw
, args
[0], args
[1], args
[2]);
1707 #if TCG_TARGET_REG_BITS == 64
1708 case INDEX_op_ld32u_i64
:
1710 case INDEX_op_ld_i32
:
1711 tcg_out_ld(s
, TCG_TYPE_I32
, args
[0], args
[1], args
[2]);
1715 if (const_args
[0]) {
1716 tcg_out_modrm_offset(s
, OPC_MOVB_EvIz
,
1717 0, args
[1], args
[2]);
1718 tcg_out8(s
, args
[0]);
1720 tcg_out_modrm_offset(s
, OPC_MOVB_EvGv
| P_REXB_R
,
1721 args
[0], args
[1], args
[2]);
1725 if (const_args
[0]) {
1726 tcg_out_modrm_offset(s
, OPC_MOVL_EvIz
| P_DATA16
,
1727 0, args
[1], args
[2]);
1728 tcg_out16(s
, args
[0]);
1730 tcg_out_modrm_offset(s
, OPC_MOVL_EvGv
| P_DATA16
,
1731 args
[0], args
[1], args
[2]);
1734 #if TCG_TARGET_REG_BITS == 64
1735 case INDEX_op_st32_i64
:
1737 case INDEX_op_st_i32
:
1738 if (const_args
[0]) {
1739 tcg_out_modrm_offset(s
, OPC_MOVL_EvIz
, 0, args
[1], args
[2]);
1740 tcg_out32(s
, args
[0]);
1742 tcg_out_st(s
, TCG_TYPE_I32
, args
[0], args
[1], args
[2]);
1747 /* For 3-operand addition, use LEA. */
1748 if (args
[0] != args
[1]) {
1749 TCGArg a0
= args
[0], a1
= args
[1], a2
= args
[2], c3
= 0;
1751 if (const_args
[2]) {
1753 } else if (a0
== a2
) {
1754 /* Watch out for dest = src + dest, since we've removed
1755 the matching constraint on the add. */
1756 tgen_arithr(s
, ARITH_ADD
+ rexw
, a0
, a1
);
1760 tcg_out_modrm_sib_offset(s
, OPC_LEA
+ rexw
, a0
, a1
, a2
, 0, c3
);
1778 if (const_args
[2]) {
1779 tgen_arithi(s
, c
+ rexw
, args
[0], args
[2], 0);
1781 tgen_arithr(s
, c
+ rexw
, args
[0], args
[2]);
1786 if (const_args
[2]) {
1789 if (val
== (int8_t)val
) {
1790 tcg_out_modrm(s
, OPC_IMUL_GvEvIb
+ rexw
, args
[0], args
[0]);
1793 tcg_out_modrm(s
, OPC_IMUL_GvEvIz
+ rexw
, args
[0], args
[0]);
1797 tcg_out_modrm(s
, OPC_IMUL_GvEv
+ rexw
, args
[0], args
[2]);
1802 tcg_out_modrm(s
, OPC_GRP3_Ev
+ rexw
, EXT3_IDIV
, args
[4]);
1805 tcg_out_modrm(s
, OPC_GRP3_Ev
+ rexw
, EXT3_DIV
, args
[4]);
1824 if (const_args
[2]) {
1825 tcg_out_shifti(s
, c
+ rexw
, args
[0], args
[2]);
1827 tcg_out_modrm(s
, OPC_SHIFT_cl
+ rexw
, c
, args
[0]);
1831 case INDEX_op_brcond_i32
:
1832 tcg_out_brcond32(s
, args
[2], args
[0], args
[1], const_args
[1],
1835 case INDEX_op_setcond_i32
:
1836 tcg_out_setcond32(s
, args
[3], args
[0], args
[1],
1837 args
[2], const_args
[2]);
1839 case INDEX_op_movcond_i32
:
1840 tcg_out_movcond32(s
, args
[5], args
[0], args
[1],
1841 args
[2], const_args
[2], args
[3]);
1845 tcg_out_rolw_8(s
, args
[0]);
1848 tcg_out_bswap32(s
, args
[0]);
1852 tcg_out_modrm(s
, OPC_GRP3_Ev
+ rexw
, EXT3_NEG
, args
[0]);
1855 tcg_out_modrm(s
, OPC_GRP3_Ev
+ rexw
, EXT3_NOT
, args
[0]);
1859 tcg_out_ext8s(s
, args
[0], args
[1], rexw
);
1862 tcg_out_ext16s(s
, args
[0], args
[1], rexw
);
1865 tcg_out_ext8u(s
, args
[0], args
[1]);
1868 tcg_out_ext16u(s
, args
[0], args
[1]);
1871 case INDEX_op_qemu_ld8u
:
1872 tcg_out_qemu_ld(s
, args
, 0);
1874 case INDEX_op_qemu_ld8s
:
1875 tcg_out_qemu_ld(s
, args
, 0 | 4);
1877 case INDEX_op_qemu_ld16u
:
1878 tcg_out_qemu_ld(s
, args
, 1);
1880 case INDEX_op_qemu_ld16s
:
1881 tcg_out_qemu_ld(s
, args
, 1 | 4);
1883 #if TCG_TARGET_REG_BITS == 64
1884 case INDEX_op_qemu_ld32u
:
1886 case INDEX_op_qemu_ld32
:
1887 tcg_out_qemu_ld(s
, args
, 2);
1889 case INDEX_op_qemu_ld64
:
1890 tcg_out_qemu_ld(s
, args
, 3);
1893 case INDEX_op_qemu_st8
:
1894 tcg_out_qemu_st(s
, args
, 0);
1896 case INDEX_op_qemu_st16
:
1897 tcg_out_qemu_st(s
, args
, 1);
1899 case INDEX_op_qemu_st32
:
1900 tcg_out_qemu_st(s
, args
, 2);
1902 case INDEX_op_qemu_st64
:
1903 tcg_out_qemu_st(s
, args
, 3);
1906 #if TCG_TARGET_REG_BITS == 32
1907 case INDEX_op_brcond2_i32
:
1908 tcg_out_brcond2(s
, args
, const_args
, 0);
1910 case INDEX_op_setcond2_i32
:
1911 tcg_out_setcond2(s
, args
, const_args
);
1913 case INDEX_op_mulu2_i32
:
1914 tcg_out_modrm(s
, OPC_GRP3_Ev
, EXT3_MUL
, args
[3]);
1916 case INDEX_op_add2_i32
:
1917 if (const_args
[4]) {
1918 tgen_arithi(s
, ARITH_ADD
, args
[0], args
[4], 1);
1920 tgen_arithr(s
, ARITH_ADD
, args
[0], args
[4]);
1922 if (const_args
[5]) {
1923 tgen_arithi(s
, ARITH_ADC
, args
[1], args
[5], 1);
1925 tgen_arithr(s
, ARITH_ADC
, args
[1], args
[5]);
1928 case INDEX_op_sub2_i32
:
1929 if (const_args
[4]) {
1930 tgen_arithi(s
, ARITH_SUB
, args
[0], args
[4], 1);
1932 tgen_arithr(s
, ARITH_SUB
, args
[0], args
[4]);
1934 if (const_args
[5]) {
1935 tgen_arithi(s
, ARITH_SBB
, args
[1], args
[5], 1);
1937 tgen_arithr(s
, ARITH_SBB
, args
[1], args
[5]);
1940 #else /* TCG_TARGET_REG_BITS == 64 */
1941 case INDEX_op_movi_i64
:
1942 tcg_out_movi(s
, TCG_TYPE_I64
, args
[0], args
[1]);
1944 case INDEX_op_ld32s_i64
:
1945 tcg_out_modrm_offset(s
, OPC_MOVSLQ
, args
[0], args
[1], args
[2]);
1947 case INDEX_op_ld_i64
:
1948 tcg_out_ld(s
, TCG_TYPE_I64
, args
[0], args
[1], args
[2]);
1950 case INDEX_op_st_i64
:
1951 if (const_args
[0]) {
1952 tcg_out_modrm_offset(s
, OPC_MOVL_EvIz
| P_REXW
,
1953 0, args
[1], args
[2]);
1954 tcg_out32(s
, args
[0]);
1956 tcg_out_st(s
, TCG_TYPE_I64
, args
[0], args
[1], args
[2]);
1959 case INDEX_op_qemu_ld32s
:
1960 tcg_out_qemu_ld(s
, args
, 2 | 4);
1963 case INDEX_op_brcond_i64
:
1964 tcg_out_brcond64(s
, args
[2], args
[0], args
[1], const_args
[1],
1967 case INDEX_op_setcond_i64
:
1968 tcg_out_setcond64(s
, args
[3], args
[0], args
[1],
1969 args
[2], const_args
[2]);
1971 case INDEX_op_movcond_i64
:
1972 tcg_out_movcond64(s
, args
[5], args
[0], args
[1],
1973 args
[2], const_args
[2], args
[3]);
1976 case INDEX_op_bswap64_i64
:
1977 tcg_out_bswap64(s
, args
[0]);
1979 case INDEX_op_ext32u_i64
:
1980 tcg_out_ext32u(s
, args
[0], args
[1]);
1982 case INDEX_op_ext32s_i64
:
1983 tcg_out_ext32s(s
, args
[0], args
[1]);
1988 if (args
[3] == 0 && args
[4] == 8) {
1989 /* load bits 0..7 */
1990 tcg_out_modrm(s
, OPC_MOVB_EvGv
| P_REXB_R
| P_REXB_RM
,
1992 } else if (args
[3] == 8 && args
[4] == 8) {
1993 /* load bits 8..15 */
1994 tcg_out_modrm(s
, OPC_MOVB_EvGv
, args
[2], args
[0] + 4);
1995 } else if (args
[3] == 0 && args
[4] == 16) {
1996 /* load bits 0..15 */
1997 tcg_out_modrm(s
, OPC_MOVL_EvGv
| P_DATA16
, args
[2], args
[0]);
2010 static const TCGTargetOpDef x86_op_defs
[] = {
2011 { INDEX_op_exit_tb
, { } },
2012 { INDEX_op_goto_tb
, { } },
2013 { INDEX_op_call
, { "ri" } },
2014 { INDEX_op_br
, { } },
2015 { INDEX_op_mov_i32
, { "r", "r" } },
2016 { INDEX_op_movi_i32
, { "r" } },
2017 { INDEX_op_ld8u_i32
, { "r", "r" } },
2018 { INDEX_op_ld8s_i32
, { "r", "r" } },
2019 { INDEX_op_ld16u_i32
, { "r", "r" } },
2020 { INDEX_op_ld16s_i32
, { "r", "r" } },
2021 { INDEX_op_ld_i32
, { "r", "r" } },
2022 { INDEX_op_st8_i32
, { "qi", "r" } },
2023 { INDEX_op_st16_i32
, { "ri", "r" } },
2024 { INDEX_op_st_i32
, { "ri", "r" } },
2026 { INDEX_op_add_i32
, { "r", "r", "ri" } },
2027 { INDEX_op_sub_i32
, { "r", "0", "ri" } },
2028 { INDEX_op_mul_i32
, { "r", "0", "ri" } },
2029 { INDEX_op_div2_i32
, { "a", "d", "0", "1", "r" } },
2030 { INDEX_op_divu2_i32
, { "a", "d", "0", "1", "r" } },
2031 { INDEX_op_and_i32
, { "r", "0", "ri" } },
2032 { INDEX_op_or_i32
, { "r", "0", "ri" } },
2033 { INDEX_op_xor_i32
, { "r", "0", "ri" } },
2035 { INDEX_op_shl_i32
, { "r", "0", "ci" } },
2036 { INDEX_op_shr_i32
, { "r", "0", "ci" } },
2037 { INDEX_op_sar_i32
, { "r", "0", "ci" } },
2038 { INDEX_op_rotl_i32
, { "r", "0", "ci" } },
2039 { INDEX_op_rotr_i32
, { "r", "0", "ci" } },
2041 { INDEX_op_brcond_i32
, { "r", "ri" } },
2043 { INDEX_op_bswap16_i32
, { "r", "0" } },
2044 { INDEX_op_bswap32_i32
, { "r", "0" } },
2046 { INDEX_op_neg_i32
, { "r", "0" } },
2048 { INDEX_op_not_i32
, { "r", "0" } },
2050 { INDEX_op_ext8s_i32
, { "r", "q" } },
2051 { INDEX_op_ext16s_i32
, { "r", "r" } },
2052 { INDEX_op_ext8u_i32
, { "r", "q" } },
2053 { INDEX_op_ext16u_i32
, { "r", "r" } },
2055 { INDEX_op_setcond_i32
, { "q", "r", "ri" } },
2057 { INDEX_op_deposit_i32
, { "Q", "0", "Q" } },
2058 #if TCG_TARGET_HAS_movcond_i32
2059 { INDEX_op_movcond_i32
, { "r", "r", "ri", "r", "0" } },
2062 #if TCG_TARGET_REG_BITS == 32
2063 { INDEX_op_mulu2_i32
, { "a", "d", "a", "r" } },
2064 { INDEX_op_add2_i32
, { "r", "r", "0", "1", "ri", "ri" } },
2065 { INDEX_op_sub2_i32
, { "r", "r", "0", "1", "ri", "ri" } },
2066 { INDEX_op_brcond2_i32
, { "r", "r", "ri", "ri" } },
2067 { INDEX_op_setcond2_i32
, { "r", "r", "r", "ri", "ri" } },
2069 { INDEX_op_mov_i64
, { "r", "r" } },
2070 { INDEX_op_movi_i64
, { "r" } },
2071 { INDEX_op_ld8u_i64
, { "r", "r" } },
2072 { INDEX_op_ld8s_i64
, { "r", "r" } },
2073 { INDEX_op_ld16u_i64
, { "r", "r" } },
2074 { INDEX_op_ld16s_i64
, { "r", "r" } },
2075 { INDEX_op_ld32u_i64
, { "r", "r" } },
2076 { INDEX_op_ld32s_i64
, { "r", "r" } },
2077 { INDEX_op_ld_i64
, { "r", "r" } },
2078 { INDEX_op_st8_i64
, { "ri", "r" } },
2079 { INDEX_op_st16_i64
, { "ri", "r" } },
2080 { INDEX_op_st32_i64
, { "ri", "r" } },
2081 { INDEX_op_st_i64
, { "re", "r" } },
2083 { INDEX_op_add_i64
, { "r", "0", "re" } },
2084 { INDEX_op_mul_i64
, { "r", "0", "re" } },
2085 { INDEX_op_div2_i64
, { "a", "d", "0", "1", "r" } },
2086 { INDEX_op_divu2_i64
, { "a", "d", "0", "1", "r" } },
2087 { INDEX_op_sub_i64
, { "r", "0", "re" } },
2088 { INDEX_op_and_i64
, { "r", "0", "reZ" } },
2089 { INDEX_op_or_i64
, { "r", "0", "re" } },
2090 { INDEX_op_xor_i64
, { "r", "0", "re" } },
2092 { INDEX_op_shl_i64
, { "r", "0", "ci" } },
2093 { INDEX_op_shr_i64
, { "r", "0", "ci" } },
2094 { INDEX_op_sar_i64
, { "r", "0", "ci" } },
2095 { INDEX_op_rotl_i64
, { "r", "0", "ci" } },
2096 { INDEX_op_rotr_i64
, { "r", "0", "ci" } },
2098 { INDEX_op_brcond_i64
, { "r", "re" } },
2099 { INDEX_op_setcond_i64
, { "r", "r", "re" } },
2101 { INDEX_op_bswap16_i64
, { "r", "0" } },
2102 { INDEX_op_bswap32_i64
, { "r", "0" } },
2103 { INDEX_op_bswap64_i64
, { "r", "0" } },
2104 { INDEX_op_neg_i64
, { "r", "0" } },
2105 { INDEX_op_not_i64
, { "r", "0" } },
2107 { INDEX_op_ext8s_i64
, { "r", "r" } },
2108 { INDEX_op_ext16s_i64
, { "r", "r" } },
2109 { INDEX_op_ext32s_i64
, { "r", "r" } },
2110 { INDEX_op_ext8u_i64
, { "r", "r" } },
2111 { INDEX_op_ext16u_i64
, { "r", "r" } },
2112 { INDEX_op_ext32u_i64
, { "r", "r" } },
2114 { INDEX_op_deposit_i64
, { "Q", "0", "Q" } },
2115 { INDEX_op_movcond_i64
, { "r", "r", "re", "r", "0" } },
2118 #if TCG_TARGET_REG_BITS == 64
2119 { INDEX_op_qemu_ld8u
, { "r", "L" } },
2120 { INDEX_op_qemu_ld8s
, { "r", "L" } },
2121 { INDEX_op_qemu_ld16u
, { "r", "L" } },
2122 { INDEX_op_qemu_ld16s
, { "r", "L" } },
2123 { INDEX_op_qemu_ld32
, { "r", "L" } },
2124 { INDEX_op_qemu_ld32u
, { "r", "L" } },
2125 { INDEX_op_qemu_ld32s
, { "r", "L" } },
2126 { INDEX_op_qemu_ld64
, { "r", "L" } },
2128 { INDEX_op_qemu_st8
, { "L", "L" } },
2129 { INDEX_op_qemu_st16
, { "L", "L" } },
2130 { INDEX_op_qemu_st32
, { "L", "L" } },
2131 { INDEX_op_qemu_st64
, { "L", "L" } },
2132 #elif TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
2133 { INDEX_op_qemu_ld8u
, { "r", "L" } },
2134 { INDEX_op_qemu_ld8s
, { "r", "L" } },
2135 { INDEX_op_qemu_ld16u
, { "r", "L" } },
2136 { INDEX_op_qemu_ld16s
, { "r", "L" } },
2137 { INDEX_op_qemu_ld32
, { "r", "L" } },
2138 { INDEX_op_qemu_ld64
, { "r", "r", "L" } },
2140 { INDEX_op_qemu_st8
, { "cb", "L" } },
2141 { INDEX_op_qemu_st16
, { "L", "L" } },
2142 { INDEX_op_qemu_st32
, { "L", "L" } },
2143 { INDEX_op_qemu_st64
, { "L", "L", "L" } },
2145 { INDEX_op_qemu_ld8u
, { "r", "L", "L" } },
2146 { INDEX_op_qemu_ld8s
, { "r", "L", "L" } },
2147 { INDEX_op_qemu_ld16u
, { "r", "L", "L" } },
2148 { INDEX_op_qemu_ld16s
, { "r", "L", "L" } },
2149 { INDEX_op_qemu_ld32
, { "r", "L", "L" } },
2150 { INDEX_op_qemu_ld64
, { "r", "r", "L", "L" } },
2152 { INDEX_op_qemu_st8
, { "cb", "L", "L" } },
2153 { INDEX_op_qemu_st16
, { "L", "L", "L" } },
2154 { INDEX_op_qemu_st32
, { "L", "L", "L" } },
2155 { INDEX_op_qemu_st64
, { "L", "L", "L", "L" } },
2160 static int tcg_target_callee_save_regs
[] = {
2161 #if TCG_TARGET_REG_BITS == 64
2170 TCG_REG_R14
, /* Currently used for the global env. */
2173 TCG_REG_EBP
, /* Currently used for the global env. */
2180 /* Compute frame size via macros, to share between tcg_target_qemu_prologue
2181 and tcg_register_jit. */
2184 ((1 + ARRAY_SIZE(tcg_target_callee_save_regs)) \
2185 * (TCG_TARGET_REG_BITS / 8))
2187 #define FRAME_SIZE \
2189 + TCG_STATIC_CALL_ARGS_SIZE \
2190 + CPU_TEMP_BUF_NLONGS * sizeof(long) \
2191 + TCG_TARGET_STACK_ALIGN - 1) \
2192 & ~(TCG_TARGET_STACK_ALIGN - 1))
2194 /* Generate global QEMU prologue and epilogue code */
2195 static void tcg_target_qemu_prologue(TCGContext
*s
)
2197 int i
, stack_addend
;
2201 /* Reserve some stack space, also for TCG temps. */
2202 stack_addend
= FRAME_SIZE
- PUSH_SIZE
;
2203 tcg_set_frame(s
, TCG_REG_CALL_STACK
, TCG_STATIC_CALL_ARGS_SIZE
,
2204 CPU_TEMP_BUF_NLONGS
* sizeof(long));
2206 /* Save all callee saved registers. */
2207 for (i
= 0; i
< ARRAY_SIZE(tcg_target_callee_save_regs
); i
++) {
2208 tcg_out_push(s
, tcg_target_callee_save_regs
[i
]);
2211 #if TCG_TARGET_REG_BITS == 32
2212 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_AREG0
, TCG_REG_ESP
,
2213 (ARRAY_SIZE(tcg_target_callee_save_regs
) + 1) * 4);
2214 tcg_out_addi(s
, TCG_REG_ESP
, -stack_addend
);
2216 tcg_out_modrm_offset(s
, OPC_GRP5
, EXT5_JMPN_Ev
, TCG_REG_ESP
,
2217 (ARRAY_SIZE(tcg_target_callee_save_regs
) + 2) * 4
2220 tcg_out_mov(s
, TCG_TYPE_PTR
, TCG_AREG0
, tcg_target_call_iarg_regs
[0]);
2221 tcg_out_addi(s
, TCG_REG_ESP
, -stack_addend
);
2223 tcg_out_modrm(s
, OPC_GRP5
, EXT5_JMPN_Ev
, tcg_target_call_iarg_regs
[1]);
2227 tb_ret_addr
= s
->code_ptr
;
2229 tcg_out_addi(s
, TCG_REG_CALL_STACK
, stack_addend
);
2231 for (i
= ARRAY_SIZE(tcg_target_callee_save_regs
) - 1; i
>= 0; i
--) {
2232 tcg_out_pop(s
, tcg_target_callee_save_regs
[i
]);
2234 tcg_out_opc(s
, OPC_RET
, 0, 0, 0);
2236 #if !defined(CONFIG_SOFTMMU)
2237 /* Try to set up a segment register to point to GUEST_BASE. */
2239 setup_guest_base_seg();
2244 static void tcg_target_init(TCGContext
*s
)
2246 #if !defined(CONFIG_USER_ONLY)
2248 if ((1 << CPU_TLB_ENTRY_BITS
) != sizeof(CPUTLBEntry
))
2252 if (TCG_TARGET_REG_BITS
== 64) {
2253 tcg_regset_set32(tcg_target_available_regs
[TCG_TYPE_I32
], 0, 0xffff);
2254 tcg_regset_set32(tcg_target_available_regs
[TCG_TYPE_I64
], 0, 0xffff);
2256 tcg_regset_set32(tcg_target_available_regs
[TCG_TYPE_I32
], 0, 0xff);
2259 tcg_regset_clear(tcg_target_call_clobber_regs
);
2260 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_EAX
);
2261 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_EDX
);
2262 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_ECX
);
2263 if (TCG_TARGET_REG_BITS
== 64) {
2264 #if !defined(_WIN64)
2265 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_RDI
);
2266 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_RSI
);
2268 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R8
);
2269 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R9
);
2270 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R10
);
2271 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R11
);
2274 tcg_regset_clear(s
->reserved_regs
);
2275 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_CALL_STACK
);
2277 tcg_add_target_add_op_defs(x86_op_defs
);
2281 uint32_t len
__attribute__((aligned((sizeof(void *)))));
2284 char augmentation
[1];
2287 uint8_t return_column
;
2291 uint32_t len
__attribute__((aligned((sizeof(void *)))));
2292 uint32_t cie_offset
;
2293 tcg_target_long func_start
__attribute__((packed
));
2294 tcg_target_long func_len
__attribute__((packed
));
2296 uint8_t reg_ofs
[14];
2304 #if !defined(__ELF__)
2305 /* Host machine without ELF. */
2306 #elif TCG_TARGET_REG_BITS == 64
2307 #define ELF_HOST_MACHINE EM_X86_64
2308 static DebugFrame debug_frame
= {
2309 .cie
.len
= sizeof(DebugFrameCIE
)-4, /* length after .len member */
2312 .cie
.code_align
= 1,
2313 .cie
.data_align
= 0x78, /* sleb128 -8 */
2314 .cie
.return_column
= 16,
2316 .fde
.len
= sizeof(DebugFrameFDE
)-4, /* length after .len member */
2318 12, 7, /* DW_CFA_def_cfa %rsp, ... */
2319 (FRAME_SIZE
& 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
2323 0x90, 1, /* DW_CFA_offset, %rip, -8 */
2324 /* The following ordering must match tcg_target_callee_save_regs. */
2325 0x86, 2, /* DW_CFA_offset, %rbp, -16 */
2326 0x83, 3, /* DW_CFA_offset, %rbx, -24 */
2327 0x8c, 4, /* DW_CFA_offset, %r12, -32 */
2328 0x8d, 5, /* DW_CFA_offset, %r13, -40 */
2329 0x8e, 6, /* DW_CFA_offset, %r14, -48 */
2330 0x8f, 7, /* DW_CFA_offset, %r15, -56 */
2334 #define ELF_HOST_MACHINE EM_386
2335 static DebugFrame debug_frame
= {
2336 .cie
.len
= sizeof(DebugFrameCIE
)-4, /* length after .len member */
2339 .cie
.code_align
= 1,
2340 .cie
.data_align
= 0x7c, /* sleb128 -4 */
2341 .cie
.return_column
= 8,
2343 .fde
.len
= sizeof(DebugFrameFDE
)-4, /* length after .len member */
2345 12, 4, /* DW_CFA_def_cfa %esp, ... */
2346 (FRAME_SIZE
& 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
2350 0x88, 1, /* DW_CFA_offset, %eip, -4 */
2351 /* The following ordering must match tcg_target_callee_save_regs. */
2352 0x85, 2, /* DW_CFA_offset, %ebp, -8 */
2353 0x83, 3, /* DW_CFA_offset, %ebx, -12 */
2354 0x86, 4, /* DW_CFA_offset, %esi, -16 */
2355 0x87, 5, /* DW_CFA_offset, %edi, -20 */
2360 #if defined(ELF_HOST_MACHINE)
2361 void tcg_register_jit(void *buf
, size_t buf_size
)
2363 /* We're expecting a 2 byte uleb128 encoded value. */
2364 assert(FRAME_SIZE
>> 14 == 0);
2366 debug_frame
.fde
.func_start
= (tcg_target_long
) buf
;
2367 debug_frame
.fde
.func_len
= buf_size
;
2369 tcg_register_jit_int(buf
, buf_size
, &debug_frame
, sizeof(debug_frame
));