2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Andrzej Zaborowski
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include "tcg-pool.inc.c"
28 int arm_arch
= __ARM_ARCH
;
30 #ifndef use_idiv_instructions
31 bool use_idiv_instructions
;
34 /* ??? Ought to think about changing CONFIG_SOFTMMU to always defined. */
36 # define USING_SOFTMMU 1
38 # define USING_SOFTMMU 0
41 #ifdef CONFIG_DEBUG_TCG
42 static const char * const tcg_target_reg_names
[TCG_TARGET_NB_REGS
] = {
62 static const int tcg_target_reg_alloc_order
[] = {
80 static const int tcg_target_call_iarg_regs
[4] = {
81 TCG_REG_R0
, TCG_REG_R1
, TCG_REG_R2
, TCG_REG_R3
83 static const int tcg_target_call_oarg_regs
[2] = {
84 TCG_REG_R0
, TCG_REG_R1
87 #define TCG_REG_TMP TCG_REG_R12
89 enum arm_cond_code_e
{
92 COND_CS
= 0x2, /* Unsigned greater or equal */
93 COND_CC
= 0x3, /* Unsigned less than */
94 COND_MI
= 0x4, /* Negative */
95 COND_PL
= 0x5, /* Zero or greater */
96 COND_VS
= 0x6, /* Overflow */
97 COND_VC
= 0x7, /* No overflow */
98 COND_HI
= 0x8, /* Unsigned greater than */
99 COND_LS
= 0x9, /* Unsigned less or equal */
107 #define TO_CPSR (1 << 20)
109 #define SHIFT_IMM_LSL(im) (((im) << 7) | 0x00)
110 #define SHIFT_IMM_LSR(im) (((im) << 7) | 0x20)
111 #define SHIFT_IMM_ASR(im) (((im) << 7) | 0x40)
112 #define SHIFT_IMM_ROR(im) (((im) << 7) | 0x60)
113 #define SHIFT_REG_LSL(rs) (((rs) << 8) | 0x10)
114 #define SHIFT_REG_LSR(rs) (((rs) << 8) | 0x30)
115 #define SHIFT_REG_ASR(rs) (((rs) << 8) | 0x50)
116 #define SHIFT_REG_ROR(rs) (((rs) << 8) | 0x70)
119 ARITH_AND
= 0x0 << 21,
120 ARITH_EOR
= 0x1 << 21,
121 ARITH_SUB
= 0x2 << 21,
122 ARITH_RSB
= 0x3 << 21,
123 ARITH_ADD
= 0x4 << 21,
124 ARITH_ADC
= 0x5 << 21,
125 ARITH_SBC
= 0x6 << 21,
126 ARITH_RSC
= 0x7 << 21,
127 ARITH_TST
= 0x8 << 21 | TO_CPSR
,
128 ARITH_CMP
= 0xa << 21 | TO_CPSR
,
129 ARITH_CMN
= 0xb << 21 | TO_CPSR
,
130 ARITH_ORR
= 0xc << 21,
131 ARITH_MOV
= 0xd << 21,
132 ARITH_BIC
= 0xe << 21,
133 ARITH_MVN
= 0xf << 21,
135 INSN_CLZ
= 0x016f0f10,
136 INSN_RBIT
= 0x06ff0f30,
138 INSN_LDR_IMM
= 0x04100000,
139 INSN_LDR_REG
= 0x06100000,
140 INSN_STR_IMM
= 0x04000000,
141 INSN_STR_REG
= 0x06000000,
143 INSN_LDRH_IMM
= 0x005000b0,
144 INSN_LDRH_REG
= 0x001000b0,
145 INSN_LDRSH_IMM
= 0x005000f0,
146 INSN_LDRSH_REG
= 0x001000f0,
147 INSN_STRH_IMM
= 0x004000b0,
148 INSN_STRH_REG
= 0x000000b0,
150 INSN_LDRB_IMM
= 0x04500000,
151 INSN_LDRB_REG
= 0x06500000,
152 INSN_LDRSB_IMM
= 0x005000d0,
153 INSN_LDRSB_REG
= 0x001000d0,
154 INSN_STRB_IMM
= 0x04400000,
155 INSN_STRB_REG
= 0x06400000,
157 INSN_LDRD_IMM
= 0x004000d0,
158 INSN_LDRD_REG
= 0x000000d0,
159 INSN_STRD_IMM
= 0x004000f0,
160 INSN_STRD_REG
= 0x000000f0,
162 INSN_DMB_ISH
= 0xf57ff05b,
163 INSN_DMB_MCR
= 0xee070fba,
165 /* Architected nop introduced in v6k. */
166 /* ??? This is an MSR (imm) 0,0,0 insn. Anyone know if this
167 also Just So Happened to do nothing on pre-v6k so that we
168 don't need to conditionalize it? */
169 INSN_NOP_v6k
= 0xe320f000,
170 /* Otherwise the assembler uses mov r0,r0 */
171 INSN_NOP_v4
= (COND_AL
<< 28) | ARITH_MOV
,
174 #define INSN_NOP (use_armv7_instructions ? INSN_NOP_v6k : INSN_NOP_v4)
176 static const uint8_t tcg_cond_to_arm_cond
[] = {
177 [TCG_COND_EQ
] = COND_EQ
,
178 [TCG_COND_NE
] = COND_NE
,
179 [TCG_COND_LT
] = COND_LT
,
180 [TCG_COND_GE
] = COND_GE
,
181 [TCG_COND_LE
] = COND_LE
,
182 [TCG_COND_GT
] = COND_GT
,
184 [TCG_COND_LTU
] = COND_CC
,
185 [TCG_COND_GEU
] = COND_CS
,
186 [TCG_COND_LEU
] = COND_LS
,
187 [TCG_COND_GTU
] = COND_HI
,
190 static inline bool reloc_pc24(tcg_insn_unit
*code_ptr
, tcg_insn_unit
*target
)
192 ptrdiff_t offset
= (tcg_ptr_byte_diff(target
, code_ptr
) - 8) >> 2;
193 if (offset
== sextract32(offset
, 0, 24)) {
194 *code_ptr
= (*code_ptr
& ~0xffffff) | (offset
& 0xffffff);
200 static inline bool reloc_pc13(tcg_insn_unit
*code_ptr
, tcg_insn_unit
*target
)
202 ptrdiff_t offset
= tcg_ptr_byte_diff(target
, code_ptr
) - 8;
204 if (offset
>= -0xfff && offset
<= 0xfff) {
205 tcg_insn_unit insn
= *code_ptr
;
206 bool u
= (offset
>= 0);
210 insn
= deposit32(insn
, 23, 1, u
);
211 insn
= deposit32(insn
, 0, 12, offset
);
218 static bool patch_reloc(tcg_insn_unit
*code_ptr
, int type
,
219 intptr_t value
, intptr_t addend
)
221 tcg_debug_assert(addend
== 0);
223 if (type
== R_ARM_PC24
) {
224 return reloc_pc24(code_ptr
, (tcg_insn_unit
*)value
);
225 } else if (type
== R_ARM_PC13
) {
226 return reloc_pc13(code_ptr
, (tcg_insn_unit
*)value
);
228 g_assert_not_reached();
232 #define TCG_CT_CONST_ARM 0x100
233 #define TCG_CT_CONST_INV 0x200
234 #define TCG_CT_CONST_NEG 0x400
235 #define TCG_CT_CONST_ZERO 0x800
237 /* parse target specific constraints */
238 static const char *target_parse_constraint(TCGArgConstraint
*ct
,
239 const char *ct_str
, TCGType type
)
243 ct
->ct
|= TCG_CT_CONST_ARM
;
246 ct
->ct
|= TCG_CT_CONST_INV
;
248 case 'N': /* The gcc constraint letter is L, already used here. */
249 ct
->ct
|= TCG_CT_CONST_NEG
;
252 ct
->ct
|= TCG_CT_CONST_ZERO
;
256 ct
->ct
|= TCG_CT_REG
;
260 /* qemu_ld address */
262 ct
->ct
|= TCG_CT_REG
;
264 #ifdef CONFIG_SOFTMMU
265 /* r0-r2,lr will be overwritten when reading the tlb entry,
266 so don't use these. */
267 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R0
);
268 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R1
);
269 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R2
);
270 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R14
);
274 /* qemu_st address & data */
276 ct
->ct
|= TCG_CT_REG
;
278 /* r0-r2 will be overwritten when reading the tlb entry (softmmu only)
279 and r0-r1 doing the byte swapping, so don't use these. */
280 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R0
);
281 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R1
);
282 #if defined(CONFIG_SOFTMMU)
283 /* Avoid clashes with registers being used for helper args */
284 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R2
);
285 #if TARGET_LONG_BITS == 64
286 /* Avoid clashes with registers being used for helper args */
287 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R3
);
289 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R14
);
299 static inline uint32_t rotl(uint32_t val
, int n
)
301 return (val
<< n
) | (val
>> (32 - n
));
304 /* ARM immediates for ALU instructions are made of an unsigned 8-bit
305 right-rotated by an even amount between 0 and 30. */
306 static inline int encode_imm(uint32_t imm
)
310 /* simple case, only lower bits */
311 if ((imm
& ~0xff) == 0)
313 /* then try a simple even shift */
314 shift
= ctz32(imm
) & ~1;
315 if (((imm
>> shift
) & ~0xff) == 0)
317 /* now try harder with rotations */
318 if ((rotl(imm
, 2) & ~0xff) == 0)
320 if ((rotl(imm
, 4) & ~0xff) == 0)
322 if ((rotl(imm
, 6) & ~0xff) == 0)
324 /* imm can't be encoded */
328 static inline int check_fit_imm(uint32_t imm
)
330 return encode_imm(imm
) >= 0;
333 /* Test if a constant matches the constraint.
334 * TODO: define constraints for:
336 * ldr/str offset: between -0xfff and 0xfff
337 * ldrh/strh offset: between -0xff and 0xff
338 * mov operand2: values represented with x << (2 * y), x < 0x100
339 * add, sub, eor...: ditto
341 static inline int tcg_target_const_match(tcg_target_long val
, TCGType type
,
342 const TCGArgConstraint
*arg_ct
)
346 if (ct
& TCG_CT_CONST
) {
348 } else if ((ct
& TCG_CT_CONST_ARM
) && check_fit_imm(val
)) {
350 } else if ((ct
& TCG_CT_CONST_INV
) && check_fit_imm(~val
)) {
352 } else if ((ct
& TCG_CT_CONST_NEG
) && check_fit_imm(-val
)) {
354 } else if ((ct
& TCG_CT_CONST_ZERO
) && val
== 0) {
361 static inline void tcg_out_b(TCGContext
*s
, int cond
, int32_t offset
)
363 tcg_out32(s
, (cond
<< 28) | 0x0a000000 |
364 (((offset
- 8) >> 2) & 0x00ffffff));
367 static inline void tcg_out_bl(TCGContext
*s
, int cond
, int32_t offset
)
369 tcg_out32(s
, (cond
<< 28) | 0x0b000000 |
370 (((offset
- 8) >> 2) & 0x00ffffff));
373 static inline void tcg_out_blx(TCGContext
*s
, int cond
, int rn
)
375 tcg_out32(s
, (cond
<< 28) | 0x012fff30 | rn
);
378 static inline void tcg_out_blx_imm(TCGContext
*s
, int32_t offset
)
380 tcg_out32(s
, 0xfa000000 | ((offset
& 2) << 23) |
381 (((offset
- 8) >> 2) & 0x00ffffff));
384 static inline void tcg_out_dat_reg(TCGContext
*s
,
385 int cond
, int opc
, int rd
, int rn
, int rm
, int shift
)
387 tcg_out32(s
, (cond
<< 28) | (0 << 25) | opc
|
388 (rn
<< 16) | (rd
<< 12) | shift
| rm
);
391 static inline void tcg_out_nop(TCGContext
*s
)
393 tcg_out32(s
, INSN_NOP
);
396 static inline void tcg_out_mov_reg(TCGContext
*s
, int cond
, int rd
, int rm
)
398 /* Simple reg-reg move, optimising out the 'do nothing' case */
400 tcg_out_dat_reg(s
, cond
, ARITH_MOV
, rd
, 0, rm
, SHIFT_IMM_LSL(0));
404 static inline void tcg_out_bx(TCGContext
*s
, int cond
, TCGReg rn
)
406 /* Unless the C portion of QEMU is compiled as thumb, we don't
407 actually need true BX semantics; merely a branch to an address
408 held in a register. */
409 if (use_armv5t_instructions
) {
410 tcg_out32(s
, (cond
<< 28) | 0x012fff10 | rn
);
412 tcg_out_mov_reg(s
, cond
, TCG_REG_PC
, rn
);
416 static inline void tcg_out_dat_imm(TCGContext
*s
,
417 int cond
, int opc
, int rd
, int rn
, int im
)
419 tcg_out32(s
, (cond
<< 28) | (1 << 25) | opc
|
420 (rn
<< 16) | (rd
<< 12) | im
);
423 /* Note that this routine is used for both LDR and LDRH formats, so we do
424 not wish to include an immediate shift at this point. */
425 static void tcg_out_memop_r(TCGContext
*s
, int cond
, ARMInsn opc
, TCGReg rt
,
426 TCGReg rn
, TCGReg rm
, bool u
, bool p
, bool w
)
428 tcg_out32(s
, (cond
<< 28) | opc
| (u
<< 23) | (p
<< 24)
429 | (w
<< 21) | (rn
<< 16) | (rt
<< 12) | rm
);
432 static void tcg_out_memop_8(TCGContext
*s
, int cond
, ARMInsn opc
, TCGReg rt
,
433 TCGReg rn
, int imm8
, bool p
, bool w
)
440 tcg_out32(s
, (cond
<< 28) | opc
| (u
<< 23) | (p
<< 24) | (w
<< 21) |
441 (rn
<< 16) | (rt
<< 12) | ((imm8
& 0xf0) << 4) | (imm8
& 0xf));
444 static void tcg_out_memop_12(TCGContext
*s
, int cond
, ARMInsn opc
, TCGReg rt
,
445 TCGReg rn
, int imm12
, bool p
, bool w
)
452 tcg_out32(s
, (cond
<< 28) | opc
| (u
<< 23) | (p
<< 24) | (w
<< 21) |
453 (rn
<< 16) | (rt
<< 12) | imm12
);
456 static inline void tcg_out_ld32_12(TCGContext
*s
, int cond
, TCGReg rt
,
457 TCGReg rn
, int imm12
)
459 tcg_out_memop_12(s
, cond
, INSN_LDR_IMM
, rt
, rn
, imm12
, 1, 0);
462 static inline void tcg_out_st32_12(TCGContext
*s
, int cond
, TCGReg rt
,
463 TCGReg rn
, int imm12
)
465 tcg_out_memop_12(s
, cond
, INSN_STR_IMM
, rt
, rn
, imm12
, 1, 0);
468 static inline void tcg_out_ld32_r(TCGContext
*s
, int cond
, TCGReg rt
,
469 TCGReg rn
, TCGReg rm
)
471 tcg_out_memop_r(s
, cond
, INSN_LDR_REG
, rt
, rn
, rm
, 1, 1, 0);
474 static inline void tcg_out_st32_r(TCGContext
*s
, int cond
, TCGReg rt
,
475 TCGReg rn
, TCGReg rm
)
477 tcg_out_memop_r(s
, cond
, INSN_STR_REG
, rt
, rn
, rm
, 1, 1, 0);
480 static inline void tcg_out_ldrd_8(TCGContext
*s
, int cond
, TCGReg rt
,
483 tcg_out_memop_8(s
, cond
, INSN_LDRD_IMM
, rt
, rn
, imm8
, 1, 0);
486 static inline void tcg_out_ldrd_r(TCGContext
*s
, int cond
, TCGReg rt
,
487 TCGReg rn
, TCGReg rm
)
489 tcg_out_memop_r(s
, cond
, INSN_LDRD_REG
, rt
, rn
, rm
, 1, 1, 0);
492 static inline void tcg_out_ldrd_rwb(TCGContext
*s
, int cond
, TCGReg rt
,
493 TCGReg rn
, TCGReg rm
)
495 tcg_out_memop_r(s
, cond
, INSN_LDRD_REG
, rt
, rn
, rm
, 1, 1, 1);
498 static inline void tcg_out_strd_8(TCGContext
*s
, int cond
, TCGReg rt
,
501 tcg_out_memop_8(s
, cond
, INSN_STRD_IMM
, rt
, rn
, imm8
, 1, 0);
504 static inline void tcg_out_strd_r(TCGContext
*s
, int cond
, TCGReg rt
,
505 TCGReg rn
, TCGReg rm
)
507 tcg_out_memop_r(s
, cond
, INSN_STRD_REG
, rt
, rn
, rm
, 1, 1, 0);
510 /* Register pre-increment with base writeback. */
511 static inline void tcg_out_ld32_rwb(TCGContext
*s
, int cond
, TCGReg rt
,
512 TCGReg rn
, TCGReg rm
)
514 tcg_out_memop_r(s
, cond
, INSN_LDR_REG
, rt
, rn
, rm
, 1, 1, 1);
517 static inline void tcg_out_st32_rwb(TCGContext
*s
, int cond
, TCGReg rt
,
518 TCGReg rn
, TCGReg rm
)
520 tcg_out_memop_r(s
, cond
, INSN_STR_REG
, rt
, rn
, rm
, 1, 1, 1);
523 static inline void tcg_out_ld16u_8(TCGContext
*s
, int cond
, TCGReg rt
,
526 tcg_out_memop_8(s
, cond
, INSN_LDRH_IMM
, rt
, rn
, imm8
, 1, 0);
529 static inline void tcg_out_st16_8(TCGContext
*s
, int cond
, TCGReg rt
,
532 tcg_out_memop_8(s
, cond
, INSN_STRH_IMM
, rt
, rn
, imm8
, 1, 0);
535 static inline void tcg_out_ld16u_r(TCGContext
*s
, int cond
, TCGReg rt
,
536 TCGReg rn
, TCGReg rm
)
538 tcg_out_memop_r(s
, cond
, INSN_LDRH_REG
, rt
, rn
, rm
, 1, 1, 0);
541 static inline void tcg_out_st16_r(TCGContext
*s
, int cond
, TCGReg rt
,
542 TCGReg rn
, TCGReg rm
)
544 tcg_out_memop_r(s
, cond
, INSN_STRH_REG
, rt
, rn
, rm
, 1, 1, 0);
547 static inline void tcg_out_ld16s_8(TCGContext
*s
, int cond
, TCGReg rt
,
550 tcg_out_memop_8(s
, cond
, INSN_LDRSH_IMM
, rt
, rn
, imm8
, 1, 0);
553 static inline void tcg_out_ld16s_r(TCGContext
*s
, int cond
, TCGReg rt
,
554 TCGReg rn
, TCGReg rm
)
556 tcg_out_memop_r(s
, cond
, INSN_LDRSH_REG
, rt
, rn
, rm
, 1, 1, 0);
559 static inline void tcg_out_ld8_12(TCGContext
*s
, int cond
, TCGReg rt
,
560 TCGReg rn
, int imm12
)
562 tcg_out_memop_12(s
, cond
, INSN_LDRB_IMM
, rt
, rn
, imm12
, 1, 0);
565 static inline void tcg_out_st8_12(TCGContext
*s
, int cond
, TCGReg rt
,
566 TCGReg rn
, int imm12
)
568 tcg_out_memop_12(s
, cond
, INSN_STRB_IMM
, rt
, rn
, imm12
, 1, 0);
571 static inline void tcg_out_ld8_r(TCGContext
*s
, int cond
, TCGReg rt
,
572 TCGReg rn
, TCGReg rm
)
574 tcg_out_memop_r(s
, cond
, INSN_LDRB_REG
, rt
, rn
, rm
, 1, 1, 0);
577 static inline void tcg_out_st8_r(TCGContext
*s
, int cond
, TCGReg rt
,
578 TCGReg rn
, TCGReg rm
)
580 tcg_out_memop_r(s
, cond
, INSN_STRB_REG
, rt
, rn
, rm
, 1, 1, 0);
583 static inline void tcg_out_ld8s_8(TCGContext
*s
, int cond
, TCGReg rt
,
586 tcg_out_memop_8(s
, cond
, INSN_LDRSB_IMM
, rt
, rn
, imm8
, 1, 0);
589 static inline void tcg_out_ld8s_r(TCGContext
*s
, int cond
, TCGReg rt
,
590 TCGReg rn
, TCGReg rm
)
592 tcg_out_memop_r(s
, cond
, INSN_LDRSB_REG
, rt
, rn
, rm
, 1, 1, 0);
595 static void tcg_out_movi_pool(TCGContext
*s
, int cond
, int rd
, uint32_t arg
)
597 new_pool_label(s
, arg
, R_ARM_PC13
, s
->code_ptr
, 0);
598 tcg_out_ld32_12(s
, cond
, rd
, TCG_REG_PC
, 0);
601 static void tcg_out_movi32(TCGContext
*s
, int cond
, int rd
, uint32_t arg
)
603 int rot
, diff
, opc
, sh1
, sh2
;
604 uint32_t tt0
, tt1
, tt2
;
606 /* Check a single MOV/MVN before anything else. */
607 rot
= encode_imm(arg
);
609 tcg_out_dat_imm(s
, cond
, ARITH_MOV
, rd
, 0,
610 rotl(arg
, rot
) | (rot
<< 7));
613 rot
= encode_imm(~arg
);
615 tcg_out_dat_imm(s
, cond
, ARITH_MVN
, rd
, 0,
616 rotl(~arg
, rot
) | (rot
<< 7));
620 /* Check for a pc-relative address. This will usually be the TB,
621 or within the TB, which is immediately before the code block. */
622 diff
= arg
- ((intptr_t)s
->code_ptr
+ 8);
624 rot
= encode_imm(diff
);
626 tcg_out_dat_imm(s
, cond
, ARITH_ADD
, rd
, TCG_REG_PC
,
627 rotl(diff
, rot
) | (rot
<< 7));
631 rot
= encode_imm(-diff
);
633 tcg_out_dat_imm(s
, cond
, ARITH_SUB
, rd
, TCG_REG_PC
,
634 rotl(-diff
, rot
) | (rot
<< 7));
639 /* Use movw + movt. */
640 if (use_armv7_instructions
) {
642 tcg_out32(s
, (cond
<< 28) | 0x03000000 | (rd
<< 12)
643 | ((arg
<< 4) & 0x000f0000) | (arg
& 0xfff));
644 if (arg
& 0xffff0000) {
646 tcg_out32(s
, (cond
<< 28) | 0x03400000 | (rd
<< 12)
647 | ((arg
>> 12) & 0x000f0000) | ((arg
>> 16) & 0xfff));
652 /* Look for sequences of two insns. If we have lots of 1's, we can
653 shorten the sequence by beginning with mvn and then clearing
654 higher bits with eor. */
657 if (ctpop32(arg
) > 16) {
661 sh1
= ctz32(tt0
) & ~1;
662 tt1
= tt0
& ~(0xff << sh1
);
663 sh2
= ctz32(tt1
) & ~1;
664 tt2
= tt1
& ~(0xff << sh2
);
666 rot
= ((32 - sh1
) << 7) & 0xf00;
667 tcg_out_dat_imm(s
, cond
, opc
, rd
, 0, ((tt0
>> sh1
) & 0xff) | rot
);
668 rot
= ((32 - sh2
) << 7) & 0xf00;
669 tcg_out_dat_imm(s
, cond
, ARITH_EOR
, rd
, rd
,
670 ((tt0
>> sh2
) & 0xff) | rot
);
674 /* Otherwise, drop it into the constant pool. */
675 tcg_out_movi_pool(s
, cond
, rd
, arg
);
678 static inline void tcg_out_dat_rI(TCGContext
*s
, int cond
, int opc
, TCGArg dst
,
679 TCGArg lhs
, TCGArg rhs
, int rhs_is_const
)
681 /* Emit either the reg,imm or reg,reg form of a data-processing insn.
682 * rhs must satisfy the "rI" constraint.
685 int rot
= encode_imm(rhs
);
686 tcg_debug_assert(rot
>= 0);
687 tcg_out_dat_imm(s
, cond
, opc
, dst
, lhs
, rotl(rhs
, rot
) | (rot
<< 7));
689 tcg_out_dat_reg(s
, cond
, opc
, dst
, lhs
, rhs
, SHIFT_IMM_LSL(0));
693 static void tcg_out_dat_rIK(TCGContext
*s
, int cond
, int opc
, int opinv
,
694 TCGReg dst
, TCGReg lhs
, TCGArg rhs
,
697 /* Emit either the reg,imm or reg,reg form of a data-processing insn.
698 * rhs must satisfy the "rIK" constraint.
701 int rot
= encode_imm(rhs
);
704 rot
= encode_imm(rhs
);
705 tcg_debug_assert(rot
>= 0);
708 tcg_out_dat_imm(s
, cond
, opc
, dst
, lhs
, rotl(rhs
, rot
) | (rot
<< 7));
710 tcg_out_dat_reg(s
, cond
, opc
, dst
, lhs
, rhs
, SHIFT_IMM_LSL(0));
714 static void tcg_out_dat_rIN(TCGContext
*s
, int cond
, int opc
, int opneg
,
715 TCGArg dst
, TCGArg lhs
, TCGArg rhs
,
718 /* Emit either the reg,imm or reg,reg form of a data-processing insn.
719 * rhs must satisfy the "rIN" constraint.
722 int rot
= encode_imm(rhs
);
725 rot
= encode_imm(rhs
);
726 tcg_debug_assert(rot
>= 0);
729 tcg_out_dat_imm(s
, cond
, opc
, dst
, lhs
, rotl(rhs
, rot
) | (rot
<< 7));
731 tcg_out_dat_reg(s
, cond
, opc
, dst
, lhs
, rhs
, SHIFT_IMM_LSL(0));
735 static inline void tcg_out_mul32(TCGContext
*s
, int cond
, TCGReg rd
,
736 TCGReg rn
, TCGReg rm
)
738 /* if ArchVersion() < 6 && d == n then UNPREDICTABLE; */
739 if (!use_armv6_instructions
&& rd
== rn
) {
741 /* rd == rn == rm; copy an input to tmp first. */
742 tcg_out_mov_reg(s
, cond
, TCG_REG_TMP
, rn
);
743 rm
= rn
= TCG_REG_TMP
;
750 tcg_out32(s
, (cond
<< 28) | 0x90 | (rd
<< 16) | (rm
<< 8) | rn
);
753 static inline void tcg_out_umull32(TCGContext
*s
, int cond
, TCGReg rd0
,
754 TCGReg rd1
, TCGReg rn
, TCGReg rm
)
756 /* if ArchVersion() < 6 && (dHi == n || dLo == n) then UNPREDICTABLE; */
757 if (!use_armv6_instructions
&& (rd0
== rn
|| rd1
== rn
)) {
758 if (rd0
== rm
|| rd1
== rm
) {
759 tcg_out_mov_reg(s
, cond
, TCG_REG_TMP
, rn
);
768 tcg_out32(s
, (cond
<< 28) | 0x00800090 |
769 (rd1
<< 16) | (rd0
<< 12) | (rm
<< 8) | rn
);
772 static inline void tcg_out_smull32(TCGContext
*s
, int cond
, TCGReg rd0
,
773 TCGReg rd1
, TCGReg rn
, TCGReg rm
)
775 /* if ArchVersion() < 6 && (dHi == n || dLo == n) then UNPREDICTABLE; */
776 if (!use_armv6_instructions
&& (rd0
== rn
|| rd1
== rn
)) {
777 if (rd0
== rm
|| rd1
== rm
) {
778 tcg_out_mov_reg(s
, cond
, TCG_REG_TMP
, rn
);
787 tcg_out32(s
, (cond
<< 28) | 0x00c00090 |
788 (rd1
<< 16) | (rd0
<< 12) | (rm
<< 8) | rn
);
791 static inline void tcg_out_sdiv(TCGContext
*s
, int cond
, int rd
, int rn
, int rm
)
793 tcg_out32(s
, 0x0710f010 | (cond
<< 28) | (rd
<< 16) | rn
| (rm
<< 8));
796 static inline void tcg_out_udiv(TCGContext
*s
, int cond
, int rd
, int rn
, int rm
)
798 tcg_out32(s
, 0x0730f010 | (cond
<< 28) | (rd
<< 16) | rn
| (rm
<< 8));
801 static inline void tcg_out_ext8s(TCGContext
*s
, int cond
,
804 if (use_armv6_instructions
) {
806 tcg_out32(s
, 0x06af0070 | (cond
<< 28) | (rd
<< 12) | rn
);
808 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
809 rd
, 0, rn
, SHIFT_IMM_LSL(24));
810 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
811 rd
, 0, rd
, SHIFT_IMM_ASR(24));
815 static inline void tcg_out_ext8u(TCGContext
*s
, int cond
,
818 tcg_out_dat_imm(s
, cond
, ARITH_AND
, rd
, rn
, 0xff);
821 static inline void tcg_out_ext16s(TCGContext
*s
, int cond
,
824 if (use_armv6_instructions
) {
826 tcg_out32(s
, 0x06bf0070 | (cond
<< 28) | (rd
<< 12) | rn
);
828 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
829 rd
, 0, rn
, SHIFT_IMM_LSL(16));
830 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
831 rd
, 0, rd
, SHIFT_IMM_ASR(16));
835 static inline void tcg_out_ext16u(TCGContext
*s
, int cond
,
838 if (use_armv6_instructions
) {
840 tcg_out32(s
, 0x06ff0070 | (cond
<< 28) | (rd
<< 12) | rn
);
842 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
843 rd
, 0, rn
, SHIFT_IMM_LSL(16));
844 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
845 rd
, 0, rd
, SHIFT_IMM_LSR(16));
849 static inline void tcg_out_bswap16s(TCGContext
*s
, int cond
, int rd
, int rn
)
851 if (use_armv6_instructions
) {
853 tcg_out32(s
, 0x06ff0fb0 | (cond
<< 28) | (rd
<< 12) | rn
);
855 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
856 TCG_REG_TMP
, 0, rn
, SHIFT_IMM_LSL(24));
857 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
858 TCG_REG_TMP
, 0, TCG_REG_TMP
, SHIFT_IMM_ASR(16));
859 tcg_out_dat_reg(s
, cond
, ARITH_ORR
,
860 rd
, TCG_REG_TMP
, rn
, SHIFT_IMM_LSR(8));
864 static inline void tcg_out_bswap16(TCGContext
*s
, int cond
, int rd
, int rn
)
866 if (use_armv6_instructions
) {
868 tcg_out32(s
, 0x06bf0fb0 | (cond
<< 28) | (rd
<< 12) | rn
);
870 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
871 TCG_REG_TMP
, 0, rn
, SHIFT_IMM_LSL(24));
872 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
873 TCG_REG_TMP
, 0, TCG_REG_TMP
, SHIFT_IMM_LSR(16));
874 tcg_out_dat_reg(s
, cond
, ARITH_ORR
,
875 rd
, TCG_REG_TMP
, rn
, SHIFT_IMM_LSR(8));
879 /* swap the two low bytes assuming that the two high input bytes and the
880 two high output bit can hold any value. */
881 static inline void tcg_out_bswap16st(TCGContext
*s
, int cond
, int rd
, int rn
)
883 if (use_armv6_instructions
) {
885 tcg_out32(s
, 0x06bf0fb0 | (cond
<< 28) | (rd
<< 12) | rn
);
887 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
888 TCG_REG_TMP
, 0, rn
, SHIFT_IMM_LSR(8));
889 tcg_out_dat_imm(s
, cond
, ARITH_AND
, TCG_REG_TMP
, TCG_REG_TMP
, 0xff);
890 tcg_out_dat_reg(s
, cond
, ARITH_ORR
,
891 rd
, TCG_REG_TMP
, rn
, SHIFT_IMM_LSL(8));
895 static inline void tcg_out_bswap32(TCGContext
*s
, int cond
, int rd
, int rn
)
897 if (use_armv6_instructions
) {
899 tcg_out32(s
, 0x06bf0f30 | (cond
<< 28) | (rd
<< 12) | rn
);
901 tcg_out_dat_reg(s
, cond
, ARITH_EOR
,
902 TCG_REG_TMP
, rn
, rn
, SHIFT_IMM_ROR(16));
903 tcg_out_dat_imm(s
, cond
, ARITH_BIC
,
904 TCG_REG_TMP
, TCG_REG_TMP
, 0xff | 0x800);
905 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
906 rd
, 0, rn
, SHIFT_IMM_ROR(8));
907 tcg_out_dat_reg(s
, cond
, ARITH_EOR
,
908 rd
, rd
, TCG_REG_TMP
, SHIFT_IMM_LSR(8));
912 static inline void tcg_out_deposit(TCGContext
*s
, int cond
, TCGReg rd
,
913 TCGArg a1
, int ofs
, int len
, bool const_a1
)
916 /* bfi becomes bfc with rn == 15. */
920 tcg_out32(s
, 0x07c00010 | (cond
<< 28) | (rd
<< 12) | a1
921 | (ofs
<< 7) | ((ofs
+ len
- 1) << 16));
924 static inline void tcg_out_extract(TCGContext
*s
, int cond
, TCGReg rd
,
925 TCGArg a1
, int ofs
, int len
)
928 tcg_out32(s
, 0x07e00050 | (cond
<< 28) | (rd
<< 12) | a1
929 | (ofs
<< 7) | ((len
- 1) << 16));
932 static inline void tcg_out_sextract(TCGContext
*s
, int cond
, TCGReg rd
,
933 TCGArg a1
, int ofs
, int len
)
936 tcg_out32(s
, 0x07a00050 | (cond
<< 28) | (rd
<< 12) | a1
937 | (ofs
<< 7) | ((len
- 1) << 16));
940 static inline void tcg_out_ld32u(TCGContext
*s
, int cond
,
941 int rd
, int rn
, int32_t offset
)
943 if (offset
> 0xfff || offset
< -0xfff) {
944 tcg_out_movi32(s
, cond
, TCG_REG_TMP
, offset
);
945 tcg_out_ld32_r(s
, cond
, rd
, rn
, TCG_REG_TMP
);
947 tcg_out_ld32_12(s
, cond
, rd
, rn
, offset
);
950 static inline void tcg_out_st32(TCGContext
*s
, int cond
,
951 int rd
, int rn
, int32_t offset
)
953 if (offset
> 0xfff || offset
< -0xfff) {
954 tcg_out_movi32(s
, cond
, TCG_REG_TMP
, offset
);
955 tcg_out_st32_r(s
, cond
, rd
, rn
, TCG_REG_TMP
);
957 tcg_out_st32_12(s
, cond
, rd
, rn
, offset
);
960 static inline void tcg_out_ld16u(TCGContext
*s
, int cond
,
961 int rd
, int rn
, int32_t offset
)
963 if (offset
> 0xff || offset
< -0xff) {
964 tcg_out_movi32(s
, cond
, TCG_REG_TMP
, offset
);
965 tcg_out_ld16u_r(s
, cond
, rd
, rn
, TCG_REG_TMP
);
967 tcg_out_ld16u_8(s
, cond
, rd
, rn
, offset
);
970 static inline void tcg_out_ld16s(TCGContext
*s
, int cond
,
971 int rd
, int rn
, int32_t offset
)
973 if (offset
> 0xff || offset
< -0xff) {
974 tcg_out_movi32(s
, cond
, TCG_REG_TMP
, offset
);
975 tcg_out_ld16s_r(s
, cond
, rd
, rn
, TCG_REG_TMP
);
977 tcg_out_ld16s_8(s
, cond
, rd
, rn
, offset
);
980 static inline void tcg_out_st16(TCGContext
*s
, int cond
,
981 int rd
, int rn
, int32_t offset
)
983 if (offset
> 0xff || offset
< -0xff) {
984 tcg_out_movi32(s
, cond
, TCG_REG_TMP
, offset
);
985 tcg_out_st16_r(s
, cond
, rd
, rn
, TCG_REG_TMP
);
987 tcg_out_st16_8(s
, cond
, rd
, rn
, offset
);
990 static inline void tcg_out_ld8u(TCGContext
*s
, int cond
,
991 int rd
, int rn
, int32_t offset
)
993 if (offset
> 0xfff || offset
< -0xfff) {
994 tcg_out_movi32(s
, cond
, TCG_REG_TMP
, offset
);
995 tcg_out_ld8_r(s
, cond
, rd
, rn
, TCG_REG_TMP
);
997 tcg_out_ld8_12(s
, cond
, rd
, rn
, offset
);
1000 static inline void tcg_out_ld8s(TCGContext
*s
, int cond
,
1001 int rd
, int rn
, int32_t offset
)
1003 if (offset
> 0xff || offset
< -0xff) {
1004 tcg_out_movi32(s
, cond
, TCG_REG_TMP
, offset
);
1005 tcg_out_ld8s_r(s
, cond
, rd
, rn
, TCG_REG_TMP
);
1007 tcg_out_ld8s_8(s
, cond
, rd
, rn
, offset
);
1010 static inline void tcg_out_st8(TCGContext
*s
, int cond
,
1011 int rd
, int rn
, int32_t offset
)
1013 if (offset
> 0xfff || offset
< -0xfff) {
1014 tcg_out_movi32(s
, cond
, TCG_REG_TMP
, offset
);
1015 tcg_out_st8_r(s
, cond
, rd
, rn
, TCG_REG_TMP
);
1017 tcg_out_st8_12(s
, cond
, rd
, rn
, offset
);
1020 /* The _goto case is normally between TBs within the same code buffer, and
1021 * with the code buffer limited to 16MB we wouldn't need the long case.
1022 * But we also use it for the tail-call to the qemu_ld/st helpers, which does.
1024 static void tcg_out_goto(TCGContext
*s
, int cond
, tcg_insn_unit
*addr
)
1026 intptr_t addri
= (intptr_t)addr
;
1027 ptrdiff_t disp
= tcg_pcrel_diff(s
, addr
);
1029 if ((addri
& 1) == 0 && disp
- 8 < 0x01fffffd && disp
- 8 > -0x01fffffd) {
1030 tcg_out_b(s
, cond
, disp
);
1033 tcg_out_movi_pool(s
, cond
, TCG_REG_PC
, addri
);
1036 /* The call case is mostly used for helpers - so it's not unreasonable
1037 * for them to be beyond branch range */
1038 static void tcg_out_call(TCGContext
*s
, tcg_insn_unit
*addr
)
1040 intptr_t addri
= (intptr_t)addr
;
1041 ptrdiff_t disp
= tcg_pcrel_diff(s
, addr
);
1043 if (disp
- 8 < 0x02000000 && disp
- 8 >= -0x02000000) {
1045 /* Use BLX if the target is in Thumb mode */
1046 if (!use_armv5t_instructions
) {
1049 tcg_out_blx_imm(s
, disp
);
1051 tcg_out_bl(s
, COND_AL
, disp
);
1053 } else if (use_armv7_instructions
) {
1054 tcg_out_movi32(s
, COND_AL
, TCG_REG_TMP
, addri
);
1055 tcg_out_blx(s
, COND_AL
, TCG_REG_TMP
);
1057 /* ??? Know that movi_pool emits exactly 1 insn. */
1058 tcg_out_dat_imm(s
, COND_AL
, ARITH_ADD
, TCG_REG_R14
, TCG_REG_PC
, 0);
1059 tcg_out_movi_pool(s
, COND_AL
, TCG_REG_PC
, addri
);
1063 static inline void tcg_out_goto_label(TCGContext
*s
, int cond
, TCGLabel
*l
)
1066 tcg_out_goto(s
, cond
, l
->u
.value_ptr
);
1068 tcg_out_reloc(s
, s
->code_ptr
, R_ARM_PC24
, l
, 0);
1069 tcg_out_b(s
, cond
, 0);
1073 static inline void tcg_out_mb(TCGContext
*s
, TCGArg a0
)
1075 if (use_armv7_instructions
) {
1076 tcg_out32(s
, INSN_DMB_ISH
);
1077 } else if (use_armv6_instructions
) {
1078 tcg_out32(s
, INSN_DMB_MCR
);
1082 static TCGCond
tcg_out_cmp2(TCGContext
*s
, const TCGArg
*args
,
1083 const int *const_args
)
1085 TCGReg al
= args
[0];
1086 TCGReg ah
= args
[1];
1087 TCGArg bl
= args
[2];
1088 TCGArg bh
= args
[3];
1089 TCGCond cond
= args
[4];
1090 int const_bl
= const_args
[2];
1091 int const_bh
= const_args
[3];
1100 /* We perform a conditional comparision. If the high half is
1101 equal, then overwrite the flags with the comparison of the
1102 low half. The resulting flags cover the whole. */
1103 tcg_out_dat_rI(s
, COND_AL
, ARITH_CMP
, 0, ah
, bh
, const_bh
);
1104 tcg_out_dat_rI(s
, COND_EQ
, ARITH_CMP
, 0, al
, bl
, const_bl
);
1109 /* We perform a double-word subtraction and examine the result.
1110 We do not actually need the result of the subtract, so the
1111 low part "subtract" is a compare. For the high half we have
1112 no choice but to compute into a temporary. */
1113 tcg_out_dat_rI(s
, COND_AL
, ARITH_CMP
, 0, al
, bl
, const_bl
);
1114 tcg_out_dat_rI(s
, COND_AL
, ARITH_SBC
| TO_CPSR
,
1115 TCG_REG_TMP
, ah
, bh
, const_bh
);
1120 /* Similar, but with swapped arguments, via reversed subtract. */
1121 tcg_out_dat_rI(s
, COND_AL
, ARITH_RSB
| TO_CPSR
,
1122 TCG_REG_TMP
, al
, bl
, const_bl
);
1123 tcg_out_dat_rI(s
, COND_AL
, ARITH_RSC
| TO_CPSR
,
1124 TCG_REG_TMP
, ah
, bh
, const_bh
);
1125 return tcg_swap_cond(cond
);
1128 g_assert_not_reached();
1132 #ifdef CONFIG_SOFTMMU
1133 #include "tcg-ldst.inc.c"
1135 /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
1136 * int mmu_idx, uintptr_t ra)
1138 static void * const qemu_ld_helpers
[16] = {
1139 [MO_UB
] = helper_ret_ldub_mmu
,
1140 [MO_SB
] = helper_ret_ldsb_mmu
,
1142 [MO_LEUW
] = helper_le_lduw_mmu
,
1143 [MO_LEUL
] = helper_le_ldul_mmu
,
1144 [MO_LEQ
] = helper_le_ldq_mmu
,
1145 [MO_LESW
] = helper_le_ldsw_mmu
,
1146 [MO_LESL
] = helper_le_ldul_mmu
,
1148 [MO_BEUW
] = helper_be_lduw_mmu
,
1149 [MO_BEUL
] = helper_be_ldul_mmu
,
1150 [MO_BEQ
] = helper_be_ldq_mmu
,
1151 [MO_BESW
] = helper_be_ldsw_mmu
,
1152 [MO_BESL
] = helper_be_ldul_mmu
,
1155 /* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
1156 * uintxx_t val, int mmu_idx, uintptr_t ra)
1158 static void * const qemu_st_helpers
[16] = {
1159 [MO_UB
] = helper_ret_stb_mmu
,
1160 [MO_LEUW
] = helper_le_stw_mmu
,
1161 [MO_LEUL
] = helper_le_stl_mmu
,
1162 [MO_LEQ
] = helper_le_stq_mmu
,
1163 [MO_BEUW
] = helper_be_stw_mmu
,
1164 [MO_BEUL
] = helper_be_stl_mmu
,
1165 [MO_BEQ
] = helper_be_stq_mmu
,
1168 /* Helper routines for marshalling helper function arguments into
1169 * the correct registers and stack.
1170 * argreg is where we want to put this argument, arg is the argument itself.
1171 * Return value is the updated argreg ready for the next call.
1172 * Note that argreg 0..3 is real registers, 4+ on stack.
1174 * We provide routines for arguments which are: immediate, 32 bit
1175 * value in register, 16 and 8 bit values in register (which must be zero
1176 * extended before use) and 64 bit value in a lo:hi register pair.
1178 #define DEFINE_TCG_OUT_ARG(NAME, ARGTYPE, MOV_ARG, EXT_ARG) \
1179 static TCGReg NAME(TCGContext *s, TCGReg argreg, ARGTYPE arg) \
1182 MOV_ARG(s, COND_AL, argreg, arg); \
1184 int ofs = (argreg - 4) * 4; \
1186 tcg_debug_assert(ofs + 4 <= TCG_STATIC_CALL_ARGS_SIZE); \
1187 tcg_out_st32_12(s, COND_AL, arg, TCG_REG_CALL_STACK, ofs); \
1189 return argreg + 1; \
1192 DEFINE_TCG_OUT_ARG(tcg_out_arg_imm32
, uint32_t, tcg_out_movi32
,
1193 (tcg_out_movi32(s
, COND_AL
, TCG_REG_TMP
, arg
), arg
= TCG_REG_TMP
))
1194 DEFINE_TCG_OUT_ARG(tcg_out_arg_reg8
, TCGReg
, tcg_out_ext8u
,
1195 (tcg_out_ext8u(s
, COND_AL
, TCG_REG_TMP
, arg
), arg
= TCG_REG_TMP
))
1196 DEFINE_TCG_OUT_ARG(tcg_out_arg_reg16
, TCGReg
, tcg_out_ext16u
,
1197 (tcg_out_ext16u(s
, COND_AL
, TCG_REG_TMP
, arg
), arg
= TCG_REG_TMP
))
1198 DEFINE_TCG_OUT_ARG(tcg_out_arg_reg32
, TCGReg
, tcg_out_mov_reg
, )
1200 static TCGReg
tcg_out_arg_reg64(TCGContext
*s
, TCGReg argreg
,
1201 TCGReg arglo
, TCGReg arghi
)
1203 /* 64 bit arguments must go in even/odd register pairs
1204 * and in 8-aligned stack slots.
1209 if (use_armv6_instructions
&& argreg
>= 4
1210 && (arglo
& 1) == 0 && arghi
== arglo
+ 1) {
1211 tcg_out_strd_8(s
, COND_AL
, arglo
,
1212 TCG_REG_CALL_STACK
, (argreg
- 4) * 4);
1215 argreg
= tcg_out_arg_reg32(s
, argreg
, arglo
);
1216 argreg
= tcg_out_arg_reg32(s
, argreg
, arghi
);
1221 #define TLB_SHIFT (CPU_TLB_ENTRY_BITS + CPU_TLB_BITS)
1223 /* We expect tlb_mask to be before tlb_table. */
1224 QEMU_BUILD_BUG_ON(offsetof(CPUArchState
, tlb_table
) <
1225 offsetof(CPUArchState
, tlb_mask
));
1227 /* We expect to use a 20-bit unsigned offset from ENV. */
1228 QEMU_BUILD_BUG_ON(offsetof(CPUArchState
, tlb_table
[NB_MMU_MODES
- 1])
1231 /* Load and compare a TLB entry, leaving the flags set. Returns the register
1232 containing the addend of the tlb entry. Clobbers R0, R1, R2, TMP. */
1234 static TCGReg
tcg_out_tlb_read(TCGContext
*s
, TCGReg addrlo
, TCGReg addrhi
,
1235 TCGMemOp opc
, int mem_index
, bool is_load
)
1237 int cmp_off
= (is_load
? offsetof(CPUTLBEntry
, addr_read
)
1238 : offsetof(CPUTLBEntry
, addr_write
));
1239 int mask_off
= offsetof(CPUArchState
, tlb_mask
[mem_index
]);
1240 int table_off
= offsetof(CPUArchState
, tlb_table
[mem_index
]);
1241 TCGReg mask_base
= TCG_AREG0
, table_base
= TCG_AREG0
;
1242 unsigned s_bits
= opc
& MO_SIZE
;
1243 unsigned a_bits
= get_alignment_bits(opc
);
1245 if (table_off
> 0xfff) {
1246 int mask_hi
= mask_off
& ~0xfff;
1247 int table_hi
= table_off
& ~0xfff;
1250 table_base
= TCG_REG_R2
;
1251 if (mask_hi
== table_hi
) {
1252 mask_base
= table_base
;
1253 } else if (mask_hi
) {
1254 mask_base
= TCG_REG_TMP
;
1255 rot
= encode_imm(mask_hi
);
1257 tcg_out_dat_imm(s
, COND_AL
, ARITH_ADD
, mask_base
, TCG_AREG0
,
1258 rotl(mask_hi
, rot
) | (rot
<< 7));
1260 rot
= encode_imm(table_hi
);
1262 tcg_out_dat_imm(s
, COND_AL
, ARITH_ADD
, table_base
, TCG_AREG0
,
1263 rotl(table_hi
, rot
) | (rot
<< 7));
1265 mask_off
-= mask_hi
;
1266 table_off
-= table_hi
;
1269 /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
1270 tcg_out_ld(s
, TCG_TYPE_I32
, TCG_REG_TMP
, mask_base
, mask_off
);
1271 tcg_out_ld(s
, TCG_TYPE_I32
, TCG_REG_R2
, table_base
, table_off
);
1273 /* Extract the tlb index from the address into TMP. */
1274 tcg_out_dat_reg(s
, COND_AL
, ARITH_AND
, TCG_REG_TMP
, TCG_REG_TMP
, addrlo
,
1275 SHIFT_IMM_LSR(TARGET_PAGE_BITS
- CPU_TLB_ENTRY_BITS
));
1278 * Add the tlb_table pointer, creating the CPUTLBEntry address in R2.
1279 * Load the tlb comparator into R0/R1 and the fast path addend into R2.
1282 if (use_armv6_instructions
&& TARGET_LONG_BITS
== 64) {
1283 tcg_out_ldrd_rwb(s
, COND_AL
, TCG_REG_R0
, TCG_REG_R2
, TCG_REG_TMP
);
1285 tcg_out_ld32_rwb(s
, COND_AL
, TCG_REG_R0
, TCG_REG_R2
, TCG_REG_TMP
);
1288 tcg_out_dat_reg(s
, COND_AL
, ARITH_ADD
,
1289 TCG_REG_R2
, TCG_REG_R2
, TCG_REG_TMP
, 0);
1290 if (use_armv6_instructions
&& TARGET_LONG_BITS
== 64) {
1291 tcg_out_ldrd_8(s
, COND_AL
, TCG_REG_R0
, TCG_REG_R2
, cmp_off
);
1293 tcg_out_ld32_12(s
, COND_AL
, TCG_REG_R0
, TCG_REG_R2
, cmp_off
);
1296 if (!use_armv6_instructions
&& TARGET_LONG_BITS
== 64) {
1297 tcg_out_ld32_12(s
, COND_AL
, TCG_REG_R1
, TCG_REG_R2
, cmp_off
+ 4);
1300 /* Load the tlb addend. */
1301 tcg_out_ld32_12(s
, COND_AL
, TCG_REG_R2
, TCG_REG_R2
,
1302 offsetof(CPUTLBEntry
, addend
));
1304 /* Check alignment. We don't support inline unaligned acceses,
1305 but we can easily support overalignment checks. */
1306 if (a_bits
< s_bits
) {
1310 if (use_armv7_instructions
) {
1311 tcg_target_ulong mask
= ~(TARGET_PAGE_MASK
| ((1 << a_bits
) - 1));
1312 int rot
= encode_imm(mask
);
1315 tcg_out_dat_imm(s
, COND_AL
, ARITH_BIC
, TCG_REG_TMP
, addrlo
,
1316 rotl(mask
, rot
) | (rot
<< 7));
1318 tcg_out_movi32(s
, COND_AL
, TCG_REG_TMP
, mask
);
1319 tcg_out_dat_reg(s
, COND_AL
, ARITH_BIC
, TCG_REG_TMP
,
1320 addrlo
, TCG_REG_TMP
, 0);
1322 tcg_out_dat_reg(s
, COND_AL
, ARITH_CMP
, 0, TCG_REG_R0
, TCG_REG_TMP
, 0);
1325 tcg_out_dat_imm(s
, COND_AL
, ARITH_TST
, 0, addrlo
,
1328 tcg_out_dat_reg(s
, (a_bits
? COND_EQ
: COND_AL
), ARITH_CMP
,
1329 0, TCG_REG_R0
, TCG_REG_TMP
,
1330 SHIFT_IMM_LSL(TARGET_PAGE_BITS
));
1333 if (TARGET_LONG_BITS
== 64) {
1334 tcg_out_dat_reg(s
, COND_EQ
, ARITH_CMP
, 0, TCG_REG_R1
, addrhi
, 0);
1340 /* Record the context of a call to the out of line helper code for the slow
1341 path for a load or store, so that we can later generate the correct
1343 static void add_qemu_ldst_label(TCGContext
*s
, bool is_ld
, TCGMemOpIdx oi
,
1344 TCGReg datalo
, TCGReg datahi
, TCGReg addrlo
,
1345 TCGReg addrhi
, tcg_insn_unit
*raddr
,
1346 tcg_insn_unit
*label_ptr
)
1348 TCGLabelQemuLdst
*label
= new_ldst_label(s
);
1350 label
->is_ld
= is_ld
;
1352 label
->datalo_reg
= datalo
;
1353 label
->datahi_reg
= datahi
;
1354 label
->addrlo_reg
= addrlo
;
1355 label
->addrhi_reg
= addrhi
;
1356 label
->raddr
= raddr
;
1357 label
->label_ptr
[0] = label_ptr
;
1360 static bool tcg_out_qemu_ld_slow_path(TCGContext
*s
, TCGLabelQemuLdst
*lb
)
1362 TCGReg argreg
, datalo
, datahi
;
1363 TCGMemOpIdx oi
= lb
->oi
;
1364 TCGMemOp opc
= get_memop(oi
);
1367 if (!reloc_pc24(lb
->label_ptr
[0], s
->code_ptr
)) {
1371 argreg
= tcg_out_arg_reg32(s
, TCG_REG_R0
, TCG_AREG0
);
1372 if (TARGET_LONG_BITS
== 64) {
1373 argreg
= tcg_out_arg_reg64(s
, argreg
, lb
->addrlo_reg
, lb
->addrhi_reg
);
1375 argreg
= tcg_out_arg_reg32(s
, argreg
, lb
->addrlo_reg
);
1377 argreg
= tcg_out_arg_imm32(s
, argreg
, oi
);
1378 argreg
= tcg_out_arg_reg32(s
, argreg
, TCG_REG_R14
);
1380 /* For armv6 we can use the canonical unsigned helpers and minimize
1381 icache usage. For pre-armv6, use the signed helpers since we do
1382 not have a single insn sign-extend. */
1383 if (use_armv6_instructions
) {
1384 func
= qemu_ld_helpers
[opc
& (MO_BSWAP
| MO_SIZE
)];
1386 func
= qemu_ld_helpers
[opc
& (MO_BSWAP
| MO_SSIZE
)];
1387 if (opc
& MO_SIGN
) {
1391 tcg_out_call(s
, func
);
1393 datalo
= lb
->datalo_reg
;
1394 datahi
= lb
->datahi_reg
;
1395 switch (opc
& MO_SSIZE
) {
1397 tcg_out_ext8s(s
, COND_AL
, datalo
, TCG_REG_R0
);
1400 tcg_out_ext16s(s
, COND_AL
, datalo
, TCG_REG_R0
);
1403 tcg_out_mov_reg(s
, COND_AL
, datalo
, TCG_REG_R0
);
1406 if (datalo
!= TCG_REG_R1
) {
1407 tcg_out_mov_reg(s
, COND_AL
, datalo
, TCG_REG_R0
);
1408 tcg_out_mov_reg(s
, COND_AL
, datahi
, TCG_REG_R1
);
1409 } else if (datahi
!= TCG_REG_R0
) {
1410 tcg_out_mov_reg(s
, COND_AL
, datahi
, TCG_REG_R1
);
1411 tcg_out_mov_reg(s
, COND_AL
, datalo
, TCG_REG_R0
);
1413 tcg_out_mov_reg(s
, COND_AL
, TCG_REG_TMP
, TCG_REG_R0
);
1414 tcg_out_mov_reg(s
, COND_AL
, datahi
, TCG_REG_R1
);
1415 tcg_out_mov_reg(s
, COND_AL
, datalo
, TCG_REG_TMP
);
1420 tcg_out_goto(s
, COND_AL
, lb
->raddr
);
1424 static bool tcg_out_qemu_st_slow_path(TCGContext
*s
, TCGLabelQemuLdst
*lb
)
1426 TCGReg argreg
, datalo
, datahi
;
1427 TCGMemOpIdx oi
= lb
->oi
;
1428 TCGMemOp opc
= get_memop(oi
);
1430 if (!reloc_pc24(lb
->label_ptr
[0], s
->code_ptr
)) {
1434 argreg
= TCG_REG_R0
;
1435 argreg
= tcg_out_arg_reg32(s
, argreg
, TCG_AREG0
);
1436 if (TARGET_LONG_BITS
== 64) {
1437 argreg
= tcg_out_arg_reg64(s
, argreg
, lb
->addrlo_reg
, lb
->addrhi_reg
);
1439 argreg
= tcg_out_arg_reg32(s
, argreg
, lb
->addrlo_reg
);
1442 datalo
= lb
->datalo_reg
;
1443 datahi
= lb
->datahi_reg
;
1444 switch (opc
& MO_SIZE
) {
1446 argreg
= tcg_out_arg_reg8(s
, argreg
, datalo
);
1449 argreg
= tcg_out_arg_reg16(s
, argreg
, datalo
);
1453 argreg
= tcg_out_arg_reg32(s
, argreg
, datalo
);
1456 argreg
= tcg_out_arg_reg64(s
, argreg
, datalo
, datahi
);
1460 argreg
= tcg_out_arg_imm32(s
, argreg
, oi
);
1461 argreg
= tcg_out_arg_reg32(s
, argreg
, TCG_REG_R14
);
1463 /* Tail-call to the helper, which will return to the fast path. */
1464 tcg_out_goto(s
, COND_AL
, qemu_st_helpers
[opc
& (MO_BSWAP
| MO_SIZE
)]);
1467 #endif /* SOFTMMU */
1469 static inline void tcg_out_qemu_ld_index(TCGContext
*s
, TCGMemOp opc
,
1470 TCGReg datalo
, TCGReg datahi
,
1471 TCGReg addrlo
, TCGReg addend
)
1473 TCGMemOp bswap
= opc
& MO_BSWAP
;
1475 switch (opc
& MO_SSIZE
) {
1477 tcg_out_ld8_r(s
, COND_AL
, datalo
, addrlo
, addend
);
1480 tcg_out_ld8s_r(s
, COND_AL
, datalo
, addrlo
, addend
);
1483 tcg_out_ld16u_r(s
, COND_AL
, datalo
, addrlo
, addend
);
1485 tcg_out_bswap16(s
, COND_AL
, datalo
, datalo
);
1490 tcg_out_ld16u_r(s
, COND_AL
, datalo
, addrlo
, addend
);
1491 tcg_out_bswap16s(s
, COND_AL
, datalo
, datalo
);
1493 tcg_out_ld16s_r(s
, COND_AL
, datalo
, addrlo
, addend
);
1498 tcg_out_ld32_r(s
, COND_AL
, datalo
, addrlo
, addend
);
1500 tcg_out_bswap32(s
, COND_AL
, datalo
, datalo
);
1505 TCGReg dl
= (bswap
? datahi
: datalo
);
1506 TCGReg dh
= (bswap
? datalo
: datahi
);
1508 /* Avoid ldrd for user-only emulation, to handle unaligned. */
1509 if (USING_SOFTMMU
&& use_armv6_instructions
1510 && (dl
& 1) == 0 && dh
== dl
+ 1) {
1511 tcg_out_ldrd_r(s
, COND_AL
, dl
, addrlo
, addend
);
1512 } else if (dl
!= addend
) {
1513 tcg_out_ld32_rwb(s
, COND_AL
, dl
, addend
, addrlo
);
1514 tcg_out_ld32_12(s
, COND_AL
, dh
, addend
, 4);
1516 tcg_out_dat_reg(s
, COND_AL
, ARITH_ADD
, TCG_REG_TMP
,
1517 addend
, addrlo
, SHIFT_IMM_LSL(0));
1518 tcg_out_ld32_12(s
, COND_AL
, dl
, TCG_REG_TMP
, 0);
1519 tcg_out_ld32_12(s
, COND_AL
, dh
, TCG_REG_TMP
, 4);
1522 tcg_out_bswap32(s
, COND_AL
, dl
, dl
);
1523 tcg_out_bswap32(s
, COND_AL
, dh
, dh
);
1530 static inline void tcg_out_qemu_ld_direct(TCGContext
*s
, TCGMemOp opc
,
1531 TCGReg datalo
, TCGReg datahi
,
1534 TCGMemOp bswap
= opc
& MO_BSWAP
;
1536 switch (opc
& MO_SSIZE
) {
1538 tcg_out_ld8_12(s
, COND_AL
, datalo
, addrlo
, 0);
1541 tcg_out_ld8s_8(s
, COND_AL
, datalo
, addrlo
, 0);
1544 tcg_out_ld16u_8(s
, COND_AL
, datalo
, addrlo
, 0);
1546 tcg_out_bswap16(s
, COND_AL
, datalo
, datalo
);
1551 tcg_out_ld16u_8(s
, COND_AL
, datalo
, addrlo
, 0);
1552 tcg_out_bswap16s(s
, COND_AL
, datalo
, datalo
);
1554 tcg_out_ld16s_8(s
, COND_AL
, datalo
, addrlo
, 0);
1559 tcg_out_ld32_12(s
, COND_AL
, datalo
, addrlo
, 0);
1561 tcg_out_bswap32(s
, COND_AL
, datalo
, datalo
);
1566 TCGReg dl
= (bswap
? datahi
: datalo
);
1567 TCGReg dh
= (bswap
? datalo
: datahi
);
1569 /* Avoid ldrd for user-only emulation, to handle unaligned. */
1570 if (USING_SOFTMMU
&& use_armv6_instructions
1571 && (dl
& 1) == 0 && dh
== dl
+ 1) {
1572 tcg_out_ldrd_8(s
, COND_AL
, dl
, addrlo
, 0);
1573 } else if (dl
== addrlo
) {
1574 tcg_out_ld32_12(s
, COND_AL
, dh
, addrlo
, bswap
? 0 : 4);
1575 tcg_out_ld32_12(s
, COND_AL
, dl
, addrlo
, bswap
? 4 : 0);
1577 tcg_out_ld32_12(s
, COND_AL
, dl
, addrlo
, bswap
? 4 : 0);
1578 tcg_out_ld32_12(s
, COND_AL
, dh
, addrlo
, bswap
? 0 : 4);
1581 tcg_out_bswap32(s
, COND_AL
, dl
, dl
);
1582 tcg_out_bswap32(s
, COND_AL
, dh
, dh
);
1589 static void tcg_out_qemu_ld(TCGContext
*s
, const TCGArg
*args
, bool is64
)
1591 TCGReg addrlo
, datalo
, datahi
, addrhi
__attribute__((unused
));
1594 #ifdef CONFIG_SOFTMMU
1597 tcg_insn_unit
*label_ptr
;
1601 datahi
= (is64
? *args
++ : 0);
1603 addrhi
= (TARGET_LONG_BITS
== 64 ? *args
++ : 0);
1605 opc
= get_memop(oi
);
1607 #ifdef CONFIG_SOFTMMU
1608 mem_index
= get_mmuidx(oi
);
1609 addend
= tcg_out_tlb_read(s
, addrlo
, addrhi
, opc
, mem_index
, 1);
1611 /* This a conditional BL only to load a pointer within this opcode into LR
1612 for the slow path. We will not be using the value for a tail call. */
1613 label_ptr
= s
->code_ptr
;
1614 tcg_out_bl(s
, COND_NE
, 0);
1616 tcg_out_qemu_ld_index(s
, opc
, datalo
, datahi
, addrlo
, addend
);
1618 add_qemu_ldst_label(s
, true, oi
, datalo
, datahi
, addrlo
, addrhi
,
1619 s
->code_ptr
, label_ptr
);
1620 #else /* !CONFIG_SOFTMMU */
1622 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_TMP
, guest_base
);
1623 tcg_out_qemu_ld_index(s
, opc
, datalo
, datahi
, addrlo
, TCG_REG_TMP
);
1625 tcg_out_qemu_ld_direct(s
, opc
, datalo
, datahi
, addrlo
);
1630 static inline void tcg_out_qemu_st_index(TCGContext
*s
, int cond
, TCGMemOp opc
,
1631 TCGReg datalo
, TCGReg datahi
,
1632 TCGReg addrlo
, TCGReg addend
)
1634 TCGMemOp bswap
= opc
& MO_BSWAP
;
1636 switch (opc
& MO_SIZE
) {
1638 tcg_out_st8_r(s
, cond
, datalo
, addrlo
, addend
);
1642 tcg_out_bswap16st(s
, cond
, TCG_REG_R0
, datalo
);
1643 tcg_out_st16_r(s
, cond
, TCG_REG_R0
, addrlo
, addend
);
1645 tcg_out_st16_r(s
, cond
, datalo
, addrlo
, addend
);
1651 tcg_out_bswap32(s
, cond
, TCG_REG_R0
, datalo
);
1652 tcg_out_st32_r(s
, cond
, TCG_REG_R0
, addrlo
, addend
);
1654 tcg_out_st32_r(s
, cond
, datalo
, addrlo
, addend
);
1658 /* Avoid strd for user-only emulation, to handle unaligned. */
1660 tcg_out_bswap32(s
, cond
, TCG_REG_R0
, datahi
);
1661 tcg_out_st32_rwb(s
, cond
, TCG_REG_R0
, addend
, addrlo
);
1662 tcg_out_bswap32(s
, cond
, TCG_REG_R0
, datalo
);
1663 tcg_out_st32_12(s
, cond
, TCG_REG_R0
, addend
, 4);
1664 } else if (USING_SOFTMMU
&& use_armv6_instructions
1665 && (datalo
& 1) == 0 && datahi
== datalo
+ 1) {
1666 tcg_out_strd_r(s
, cond
, datalo
, addrlo
, addend
);
1668 tcg_out_st32_rwb(s
, cond
, datalo
, addend
, addrlo
);
1669 tcg_out_st32_12(s
, cond
, datahi
, addend
, 4);
1675 static inline void tcg_out_qemu_st_direct(TCGContext
*s
, TCGMemOp opc
,
1676 TCGReg datalo
, TCGReg datahi
,
1679 TCGMemOp bswap
= opc
& MO_BSWAP
;
1681 switch (opc
& MO_SIZE
) {
1683 tcg_out_st8_12(s
, COND_AL
, datalo
, addrlo
, 0);
1687 tcg_out_bswap16st(s
, COND_AL
, TCG_REG_R0
, datalo
);
1688 tcg_out_st16_8(s
, COND_AL
, TCG_REG_R0
, addrlo
, 0);
1690 tcg_out_st16_8(s
, COND_AL
, datalo
, addrlo
, 0);
1696 tcg_out_bswap32(s
, COND_AL
, TCG_REG_R0
, datalo
);
1697 tcg_out_st32_12(s
, COND_AL
, TCG_REG_R0
, addrlo
, 0);
1699 tcg_out_st32_12(s
, COND_AL
, datalo
, addrlo
, 0);
1703 /* Avoid strd for user-only emulation, to handle unaligned. */
1705 tcg_out_bswap32(s
, COND_AL
, TCG_REG_R0
, datahi
);
1706 tcg_out_st32_12(s
, COND_AL
, TCG_REG_R0
, addrlo
, 0);
1707 tcg_out_bswap32(s
, COND_AL
, TCG_REG_R0
, datalo
);
1708 tcg_out_st32_12(s
, COND_AL
, TCG_REG_R0
, addrlo
, 4);
1709 } else if (USING_SOFTMMU
&& use_armv6_instructions
1710 && (datalo
& 1) == 0 && datahi
== datalo
+ 1) {
1711 tcg_out_strd_8(s
, COND_AL
, datalo
, addrlo
, 0);
1713 tcg_out_st32_12(s
, COND_AL
, datalo
, addrlo
, 0);
1714 tcg_out_st32_12(s
, COND_AL
, datahi
, addrlo
, 4);
1720 static void tcg_out_qemu_st(TCGContext
*s
, const TCGArg
*args
, bool is64
)
1722 TCGReg addrlo
, datalo
, datahi
, addrhi
__attribute__((unused
));
1725 #ifdef CONFIG_SOFTMMU
1728 tcg_insn_unit
*label_ptr
;
1732 datahi
= (is64
? *args
++ : 0);
1734 addrhi
= (TARGET_LONG_BITS
== 64 ? *args
++ : 0);
1736 opc
= get_memop(oi
);
1738 #ifdef CONFIG_SOFTMMU
1739 mem_index
= get_mmuidx(oi
);
1740 addend
= tcg_out_tlb_read(s
, addrlo
, addrhi
, opc
, mem_index
, 0);
1742 tcg_out_qemu_st_index(s
, COND_EQ
, opc
, datalo
, datahi
, addrlo
, addend
);
1744 /* The conditional call must come last, as we're going to return here. */
1745 label_ptr
= s
->code_ptr
;
1746 tcg_out_bl(s
, COND_NE
, 0);
1748 add_qemu_ldst_label(s
, false, oi
, datalo
, datahi
, addrlo
, addrhi
,
1749 s
->code_ptr
, label_ptr
);
1750 #else /* !CONFIG_SOFTMMU */
1752 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_TMP
, guest_base
);
1753 tcg_out_qemu_st_index(s
, COND_AL
, opc
, datalo
,
1754 datahi
, addrlo
, TCG_REG_TMP
);
1756 tcg_out_qemu_st_direct(s
, opc
, datalo
, datahi
, addrlo
);
1761 static tcg_insn_unit
*tb_ret_addr
;
1763 static inline void tcg_out_op(TCGContext
*s
, TCGOpcode opc
,
1764 const TCGArg
*args
, const int *const_args
)
1766 TCGArg a0
, a1
, a2
, a3
, a4
, a5
;
1770 case INDEX_op_exit_tb
:
1771 /* Reuse the zeroing that exists for goto_ptr. */
1774 tcg_out_goto(s
, COND_AL
, s
->code_gen_epilogue
);
1776 tcg_out_movi32(s
, COND_AL
, TCG_REG_R0
, args
[0]);
1777 tcg_out_goto(s
, COND_AL
, tb_ret_addr
);
1780 case INDEX_op_goto_tb
:
1782 /* Indirect jump method */
1783 intptr_t ptr
, dif
, dil
;
1784 TCGReg base
= TCG_REG_PC
;
1786 tcg_debug_assert(s
->tb_jmp_insn_offset
== 0);
1787 ptr
= (intptr_t)(s
->tb_jmp_target_addr
+ args
[0]);
1788 dif
= ptr
- ((intptr_t)s
->code_ptr
+ 8);
1789 dil
= sextract32(dif
, 0, 12);
1791 /* The TB is close, but outside the 12 bits addressable by
1792 the load. We can extend this to 20 bits with a sub of a
1793 shifted immediate from pc. In the vastly unlikely event
1794 the code requires more than 1MB, we'll use 2 insns and
1797 tcg_out_movi32(s
, COND_AL
, base
, ptr
- dil
);
1799 tcg_out_ld32_12(s
, COND_AL
, TCG_REG_PC
, base
, dil
);
1800 set_jmp_reset_offset(s
, args
[0]);
1803 case INDEX_op_goto_ptr
:
1804 tcg_out_bx(s
, COND_AL
, args
[0]);
1807 tcg_out_goto_label(s
, COND_AL
, arg_label(args
[0]));
1810 case INDEX_op_ld8u_i32
:
1811 tcg_out_ld8u(s
, COND_AL
, args
[0], args
[1], args
[2]);
1813 case INDEX_op_ld8s_i32
:
1814 tcg_out_ld8s(s
, COND_AL
, args
[0], args
[1], args
[2]);
1816 case INDEX_op_ld16u_i32
:
1817 tcg_out_ld16u(s
, COND_AL
, args
[0], args
[1], args
[2]);
1819 case INDEX_op_ld16s_i32
:
1820 tcg_out_ld16s(s
, COND_AL
, args
[0], args
[1], args
[2]);
1822 case INDEX_op_ld_i32
:
1823 tcg_out_ld32u(s
, COND_AL
, args
[0], args
[1], args
[2]);
1825 case INDEX_op_st8_i32
:
1826 tcg_out_st8(s
, COND_AL
, args
[0], args
[1], args
[2]);
1828 case INDEX_op_st16_i32
:
1829 tcg_out_st16(s
, COND_AL
, args
[0], args
[1], args
[2]);
1831 case INDEX_op_st_i32
:
1832 tcg_out_st32(s
, COND_AL
, args
[0], args
[1], args
[2]);
1835 case INDEX_op_movcond_i32
:
1836 /* Constraints mean that v2 is always in the same register as dest,
1837 * so we only need to do "if condition passed, move v1 to dest".
1839 tcg_out_dat_rIN(s
, COND_AL
, ARITH_CMP
, ARITH_CMN
, 0,
1840 args
[1], args
[2], const_args
[2]);
1841 tcg_out_dat_rIK(s
, tcg_cond_to_arm_cond
[args
[5]], ARITH_MOV
,
1842 ARITH_MVN
, args
[0], 0, args
[3], const_args
[3]);
1844 case INDEX_op_add_i32
:
1845 tcg_out_dat_rIN(s
, COND_AL
, ARITH_ADD
, ARITH_SUB
,
1846 args
[0], args
[1], args
[2], const_args
[2]);
1848 case INDEX_op_sub_i32
:
1849 if (const_args
[1]) {
1850 if (const_args
[2]) {
1851 tcg_out_movi32(s
, COND_AL
, args
[0], args
[1] - args
[2]);
1853 tcg_out_dat_rI(s
, COND_AL
, ARITH_RSB
,
1854 args
[0], args
[2], args
[1], 1);
1857 tcg_out_dat_rIN(s
, COND_AL
, ARITH_SUB
, ARITH_ADD
,
1858 args
[0], args
[1], args
[2], const_args
[2]);
1861 case INDEX_op_and_i32
:
1862 tcg_out_dat_rIK(s
, COND_AL
, ARITH_AND
, ARITH_BIC
,
1863 args
[0], args
[1], args
[2], const_args
[2]);
1865 case INDEX_op_andc_i32
:
1866 tcg_out_dat_rIK(s
, COND_AL
, ARITH_BIC
, ARITH_AND
,
1867 args
[0], args
[1], args
[2], const_args
[2]);
1869 case INDEX_op_or_i32
:
1872 case INDEX_op_xor_i32
:
1876 tcg_out_dat_rI(s
, COND_AL
, c
, args
[0], args
[1], args
[2], const_args
[2]);
1878 case INDEX_op_add2_i32
:
1879 a0
= args
[0], a1
= args
[1], a2
= args
[2];
1880 a3
= args
[3], a4
= args
[4], a5
= args
[5];
1881 if (a0
== a3
|| (a0
== a5
&& !const_args
[5])) {
1884 tcg_out_dat_rIN(s
, COND_AL
, ARITH_ADD
| TO_CPSR
, ARITH_SUB
| TO_CPSR
,
1885 a0
, a2
, a4
, const_args
[4]);
1886 tcg_out_dat_rIK(s
, COND_AL
, ARITH_ADC
, ARITH_SBC
,
1887 a1
, a3
, a5
, const_args
[5]);
1888 tcg_out_mov_reg(s
, COND_AL
, args
[0], a0
);
1890 case INDEX_op_sub2_i32
:
1891 a0
= args
[0], a1
= args
[1], a2
= args
[2];
1892 a3
= args
[3], a4
= args
[4], a5
= args
[5];
1893 if ((a0
== a3
&& !const_args
[3]) || (a0
== a5
&& !const_args
[5])) {
1896 if (const_args
[2]) {
1897 if (const_args
[4]) {
1898 tcg_out_movi32(s
, COND_AL
, a0
, a4
);
1901 tcg_out_dat_rI(s
, COND_AL
, ARITH_RSB
| TO_CPSR
, a0
, a4
, a2
, 1);
1903 tcg_out_dat_rIN(s
, COND_AL
, ARITH_SUB
| TO_CPSR
,
1904 ARITH_ADD
| TO_CPSR
, a0
, a2
, a4
, const_args
[4]);
1906 if (const_args
[3]) {
1907 if (const_args
[5]) {
1908 tcg_out_movi32(s
, COND_AL
, a1
, a5
);
1911 tcg_out_dat_rI(s
, COND_AL
, ARITH_RSC
, a1
, a5
, a3
, 1);
1913 tcg_out_dat_rIK(s
, COND_AL
, ARITH_SBC
, ARITH_ADC
,
1914 a1
, a3
, a5
, const_args
[5]);
1916 tcg_out_mov_reg(s
, COND_AL
, args
[0], a0
);
1918 case INDEX_op_neg_i32
:
1919 tcg_out_dat_imm(s
, COND_AL
, ARITH_RSB
, args
[0], args
[1], 0);
1921 case INDEX_op_not_i32
:
1922 tcg_out_dat_reg(s
, COND_AL
,
1923 ARITH_MVN
, args
[0], 0, args
[1], SHIFT_IMM_LSL(0));
1925 case INDEX_op_mul_i32
:
1926 tcg_out_mul32(s
, COND_AL
, args
[0], args
[1], args
[2]);
1928 case INDEX_op_mulu2_i32
:
1929 tcg_out_umull32(s
, COND_AL
, args
[0], args
[1], args
[2], args
[3]);
1931 case INDEX_op_muls2_i32
:
1932 tcg_out_smull32(s
, COND_AL
, args
[0], args
[1], args
[2], args
[3]);
1934 /* XXX: Perhaps args[2] & 0x1f is wrong */
1935 case INDEX_op_shl_i32
:
1937 SHIFT_IMM_LSL(args
[2] & 0x1f) : SHIFT_REG_LSL(args
[2]);
1939 case INDEX_op_shr_i32
:
1940 c
= const_args
[2] ? (args
[2] & 0x1f) ? SHIFT_IMM_LSR(args
[2] & 0x1f) :
1941 SHIFT_IMM_LSL(0) : SHIFT_REG_LSR(args
[2]);
1943 case INDEX_op_sar_i32
:
1944 c
= const_args
[2] ? (args
[2] & 0x1f) ? SHIFT_IMM_ASR(args
[2] & 0x1f) :
1945 SHIFT_IMM_LSL(0) : SHIFT_REG_ASR(args
[2]);
1947 case INDEX_op_rotr_i32
:
1948 c
= const_args
[2] ? (args
[2] & 0x1f) ? SHIFT_IMM_ROR(args
[2] & 0x1f) :
1949 SHIFT_IMM_LSL(0) : SHIFT_REG_ROR(args
[2]);
1952 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
, args
[0], 0, args
[1], c
);
1955 case INDEX_op_rotl_i32
:
1956 if (const_args
[2]) {
1957 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
, args
[0], 0, args
[1],
1958 ((0x20 - args
[2]) & 0x1f) ?
1959 SHIFT_IMM_ROR((0x20 - args
[2]) & 0x1f) :
1962 tcg_out_dat_imm(s
, COND_AL
, ARITH_RSB
, TCG_REG_TMP
, args
[2], 0x20);
1963 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
, args
[0], 0, args
[1],
1964 SHIFT_REG_ROR(TCG_REG_TMP
));
1968 case INDEX_op_ctz_i32
:
1969 tcg_out_dat_reg(s
, COND_AL
, INSN_RBIT
, TCG_REG_TMP
, 0, args
[1], 0);
1973 case INDEX_op_clz_i32
:
1979 if (c
&& a2
== 32) {
1980 tcg_out_dat_reg(s
, COND_AL
, INSN_CLZ
, a0
, 0, a1
, 0);
1983 tcg_out_dat_imm(s
, COND_AL
, ARITH_CMP
, 0, a1
, 0);
1984 tcg_out_dat_reg(s
, COND_NE
, INSN_CLZ
, a0
, 0, a1
, 0);
1985 if (c
|| a0
!= a2
) {
1986 tcg_out_dat_rIK(s
, COND_EQ
, ARITH_MOV
, ARITH_MVN
, a0
, 0, a2
, c
);
1990 case INDEX_op_brcond_i32
:
1991 tcg_out_dat_rIN(s
, COND_AL
, ARITH_CMP
, ARITH_CMN
, 0,
1992 args
[0], args
[1], const_args
[1]);
1993 tcg_out_goto_label(s
, tcg_cond_to_arm_cond
[args
[2]],
1994 arg_label(args
[3]));
1996 case INDEX_op_setcond_i32
:
1997 tcg_out_dat_rIN(s
, COND_AL
, ARITH_CMP
, ARITH_CMN
, 0,
1998 args
[1], args
[2], const_args
[2]);
1999 tcg_out_dat_imm(s
, tcg_cond_to_arm_cond
[args
[3]],
2000 ARITH_MOV
, args
[0], 0, 1);
2001 tcg_out_dat_imm(s
, tcg_cond_to_arm_cond
[tcg_invert_cond(args
[3])],
2002 ARITH_MOV
, args
[0], 0, 0);
2005 case INDEX_op_brcond2_i32
:
2006 c
= tcg_out_cmp2(s
, args
, const_args
);
2007 tcg_out_goto_label(s
, tcg_cond_to_arm_cond
[c
], arg_label(args
[5]));
2009 case INDEX_op_setcond2_i32
:
2010 c
= tcg_out_cmp2(s
, args
+ 1, const_args
+ 1);
2011 tcg_out_dat_imm(s
, tcg_cond_to_arm_cond
[c
], ARITH_MOV
, args
[0], 0, 1);
2012 tcg_out_dat_imm(s
, tcg_cond_to_arm_cond
[tcg_invert_cond(c
)],
2013 ARITH_MOV
, args
[0], 0, 0);
2016 case INDEX_op_qemu_ld_i32
:
2017 tcg_out_qemu_ld(s
, args
, 0);
2019 case INDEX_op_qemu_ld_i64
:
2020 tcg_out_qemu_ld(s
, args
, 1);
2022 case INDEX_op_qemu_st_i32
:
2023 tcg_out_qemu_st(s
, args
, 0);
2025 case INDEX_op_qemu_st_i64
:
2026 tcg_out_qemu_st(s
, args
, 1);
2029 case INDEX_op_bswap16_i32
:
2030 tcg_out_bswap16(s
, COND_AL
, args
[0], args
[1]);
2032 case INDEX_op_bswap32_i32
:
2033 tcg_out_bswap32(s
, COND_AL
, args
[0], args
[1]);
2036 case INDEX_op_ext8s_i32
:
2037 tcg_out_ext8s(s
, COND_AL
, args
[0], args
[1]);
2039 case INDEX_op_ext16s_i32
:
2040 tcg_out_ext16s(s
, COND_AL
, args
[0], args
[1]);
2042 case INDEX_op_ext16u_i32
:
2043 tcg_out_ext16u(s
, COND_AL
, args
[0], args
[1]);
2046 case INDEX_op_deposit_i32
:
2047 tcg_out_deposit(s
, COND_AL
, args
[0], args
[2],
2048 args
[3], args
[4], const_args
[2]);
2050 case INDEX_op_extract_i32
:
2051 tcg_out_extract(s
, COND_AL
, args
[0], args
[1], args
[2], args
[3]);
2053 case INDEX_op_sextract_i32
:
2054 tcg_out_sextract(s
, COND_AL
, args
[0], args
[1], args
[2], args
[3]);
2056 case INDEX_op_extract2_i32
:
2057 /* ??? These optimization vs zero should be generic. */
2058 /* ??? But we can't substitute 2 for 1 in the opcode stream yet. */
2059 if (const_args
[1]) {
2060 if (const_args
[2]) {
2061 tcg_out_movi(s
, TCG_TYPE_REG
, args
[0], 0);
2063 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
, args
[0], 0,
2064 args
[2], SHIFT_IMM_LSL(32 - args
[3]));
2066 } else if (const_args
[2]) {
2067 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
, args
[0], 0,
2068 args
[1], SHIFT_IMM_LSR(args
[3]));
2070 /* We can do extract2 in 2 insns, vs the 3 required otherwise. */
2071 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
, TCG_REG_TMP
, 0,
2072 args
[2], SHIFT_IMM_LSL(32 - args
[3]));
2073 tcg_out_dat_reg(s
, COND_AL
, ARITH_ORR
, args
[0], TCG_REG_TMP
,
2074 args
[1], SHIFT_IMM_LSR(args
[3]));
2078 case INDEX_op_div_i32
:
2079 tcg_out_sdiv(s
, COND_AL
, args
[0], args
[1], args
[2]);
2081 case INDEX_op_divu_i32
:
2082 tcg_out_udiv(s
, COND_AL
, args
[0], args
[1], args
[2]);
2086 tcg_out_mb(s
, args
[0]);
2089 case INDEX_op_mov_i32
: /* Always emitted via tcg_out_mov. */
2090 case INDEX_op_movi_i32
: /* Always emitted via tcg_out_movi. */
2091 case INDEX_op_call
: /* Always emitted via tcg_out_call. */
2097 static const TCGTargetOpDef
*tcg_target_op_def(TCGOpcode op
)
2099 static const TCGTargetOpDef r
= { .args_ct_str
= { "r" } };
2100 static const TCGTargetOpDef r_r
= { .args_ct_str
= { "r", "r" } };
2101 static const TCGTargetOpDef s_s
= { .args_ct_str
= { "s", "s" } };
2102 static const TCGTargetOpDef r_l
= { .args_ct_str
= { "r", "l" } };
2103 static const TCGTargetOpDef r_r_r
= { .args_ct_str
= { "r", "r", "r" } };
2104 static const TCGTargetOpDef r_r_l
= { .args_ct_str
= { "r", "r", "l" } };
2105 static const TCGTargetOpDef r_l_l
= { .args_ct_str
= { "r", "l", "l" } };
2106 static const TCGTargetOpDef s_s_s
= { .args_ct_str
= { "s", "s", "s" } };
2107 static const TCGTargetOpDef r_r_ri
= { .args_ct_str
= { "r", "r", "ri" } };
2108 static const TCGTargetOpDef r_r_rI
= { .args_ct_str
= { "r", "r", "rI" } };
2109 static const TCGTargetOpDef r_r_rIN
2110 = { .args_ct_str
= { "r", "r", "rIN" } };
2111 static const TCGTargetOpDef r_r_rIK
2112 = { .args_ct_str
= { "r", "r", "rIK" } };
2113 static const TCGTargetOpDef r_r_r_r
2114 = { .args_ct_str
= { "r", "r", "r", "r" } };
2115 static const TCGTargetOpDef r_r_l_l
2116 = { .args_ct_str
= { "r", "r", "l", "l" } };
2117 static const TCGTargetOpDef s_s_s_s
2118 = { .args_ct_str
= { "s", "s", "s", "s" } };
2119 static const TCGTargetOpDef br
2120 = { .args_ct_str
= { "r", "rIN" } };
2121 static const TCGTargetOpDef ext2
2122 = { .args_ct_str
= { "r", "rZ", "rZ" } };
2123 static const TCGTargetOpDef dep
2124 = { .args_ct_str
= { "r", "0", "rZ" } };
2125 static const TCGTargetOpDef movc
2126 = { .args_ct_str
= { "r", "r", "rIN", "rIK", "0" } };
2127 static const TCGTargetOpDef add2
2128 = { .args_ct_str
= { "r", "r", "r", "r", "rIN", "rIK" } };
2129 static const TCGTargetOpDef sub2
2130 = { .args_ct_str
= { "r", "r", "rI", "rI", "rIN", "rIK" } };
2131 static const TCGTargetOpDef br2
2132 = { .args_ct_str
= { "r", "r", "rI", "rI" } };
2133 static const TCGTargetOpDef setc2
2134 = { .args_ct_str
= { "r", "r", "r", "rI", "rI" } };
2137 case INDEX_op_goto_ptr
:
2140 case INDEX_op_ld8u_i32
:
2141 case INDEX_op_ld8s_i32
:
2142 case INDEX_op_ld16u_i32
:
2143 case INDEX_op_ld16s_i32
:
2144 case INDEX_op_ld_i32
:
2145 case INDEX_op_st8_i32
:
2146 case INDEX_op_st16_i32
:
2147 case INDEX_op_st_i32
:
2148 case INDEX_op_neg_i32
:
2149 case INDEX_op_not_i32
:
2150 case INDEX_op_bswap16_i32
:
2151 case INDEX_op_bswap32_i32
:
2152 case INDEX_op_ext8s_i32
:
2153 case INDEX_op_ext16s_i32
:
2154 case INDEX_op_ext16u_i32
:
2155 case INDEX_op_extract_i32
:
2156 case INDEX_op_sextract_i32
:
2159 case INDEX_op_add_i32
:
2160 case INDEX_op_sub_i32
:
2161 case INDEX_op_setcond_i32
:
2163 case INDEX_op_and_i32
:
2164 case INDEX_op_andc_i32
:
2165 case INDEX_op_clz_i32
:
2166 case INDEX_op_ctz_i32
:
2168 case INDEX_op_mul_i32
:
2169 case INDEX_op_div_i32
:
2170 case INDEX_op_divu_i32
:
2172 case INDEX_op_mulu2_i32
:
2173 case INDEX_op_muls2_i32
:
2175 case INDEX_op_or_i32
:
2176 case INDEX_op_xor_i32
:
2178 case INDEX_op_shl_i32
:
2179 case INDEX_op_shr_i32
:
2180 case INDEX_op_sar_i32
:
2181 case INDEX_op_rotl_i32
:
2182 case INDEX_op_rotr_i32
:
2185 case INDEX_op_brcond_i32
:
2187 case INDEX_op_deposit_i32
:
2189 case INDEX_op_extract2_i32
:
2191 case INDEX_op_movcond_i32
:
2193 case INDEX_op_add2_i32
:
2195 case INDEX_op_sub2_i32
:
2197 case INDEX_op_brcond2_i32
:
2199 case INDEX_op_setcond2_i32
:
2202 case INDEX_op_qemu_ld_i32
:
2203 return TARGET_LONG_BITS
== 32 ? &r_l
: &r_l_l
;
2204 case INDEX_op_qemu_ld_i64
:
2205 return TARGET_LONG_BITS
== 32 ? &r_r_l
: &r_r_l_l
;
2206 case INDEX_op_qemu_st_i32
:
2207 return TARGET_LONG_BITS
== 32 ? &s_s
: &s_s_s
;
2208 case INDEX_op_qemu_st_i64
:
2209 return TARGET_LONG_BITS
== 32 ? &s_s_s
: &s_s_s_s
;
2216 static void tcg_target_init(TCGContext
*s
)
2218 /* Only probe for the platform and capabilities if we havn't already
2219 determined maximum values at compile time. */
2220 #ifndef use_idiv_instructions
2222 unsigned long hwcap
= qemu_getauxval(AT_HWCAP
);
2223 use_idiv_instructions
= (hwcap
& HWCAP_ARM_IDIVA
) != 0;
2226 if (__ARM_ARCH
< 7) {
2227 const char *pl
= (const char *)qemu_getauxval(AT_PLATFORM
);
2228 if (pl
!= NULL
&& pl
[0] == 'v' && pl
[1] >= '4' && pl
[1] <= '9') {
2229 arm_arch
= pl
[1] - '0';
2233 tcg_target_available_regs
[TCG_TYPE_I32
] = 0xffff;
2235 tcg_target_call_clobber_regs
= 0;
2236 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R0
);
2237 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R1
);
2238 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R2
);
2239 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R3
);
2240 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R12
);
2241 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R14
);
2243 s
->reserved_regs
= 0;
2244 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_CALL_STACK
);
2245 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_TMP
);
2246 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_PC
);
2249 static inline void tcg_out_ld(TCGContext
*s
, TCGType type
, TCGReg arg
,
2250 TCGReg arg1
, intptr_t arg2
)
2252 tcg_out_ld32u(s
, COND_AL
, arg
, arg1
, arg2
);
2255 static inline void tcg_out_st(TCGContext
*s
, TCGType type
, TCGReg arg
,
2256 TCGReg arg1
, intptr_t arg2
)
2258 tcg_out_st32(s
, COND_AL
, arg
, arg1
, arg2
);
2261 static inline bool tcg_out_sti(TCGContext
*s
, TCGType type
, TCGArg val
,
2262 TCGReg base
, intptr_t ofs
)
2267 static inline void tcg_out_mov(TCGContext
*s
, TCGType type
,
2268 TCGReg ret
, TCGReg arg
)
2270 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
, ret
, 0, arg
, SHIFT_IMM_LSL(0));
2273 static inline void tcg_out_movi(TCGContext
*s
, TCGType type
,
2274 TCGReg ret
, tcg_target_long arg
)
2276 tcg_out_movi32(s
, COND_AL
, ret
, arg
);
2279 static void tcg_out_nop_fill(tcg_insn_unit
*p
, int count
)
2282 for (i
= 0; i
< count
; ++i
) {
2287 /* Compute frame size via macros, to share between tcg_target_qemu_prologue
2288 and tcg_register_jit. */
2290 #define PUSH_SIZE ((11 - 4 + 1 + 1) * sizeof(tcg_target_long))
2292 #define FRAME_SIZE \
2294 + TCG_STATIC_CALL_ARGS_SIZE \
2295 + CPU_TEMP_BUF_NLONGS * sizeof(long) \
2296 + TCG_TARGET_STACK_ALIGN - 1) \
2297 & -TCG_TARGET_STACK_ALIGN)
2299 static void tcg_target_qemu_prologue(TCGContext
*s
)
2303 /* Calling convention requires us to save r4-r11 and lr. */
2304 /* stmdb sp!, { r4 - r11, lr } */
2305 tcg_out32(s
, (COND_AL
<< 28) | 0x092d4ff0);
2307 /* Reserve callee argument and tcg temp space. */
2308 stack_addend
= FRAME_SIZE
- PUSH_SIZE
;
2310 tcg_out_dat_rI(s
, COND_AL
, ARITH_SUB
, TCG_REG_CALL_STACK
,
2311 TCG_REG_CALL_STACK
, stack_addend
, 1);
2312 tcg_set_frame(s
, TCG_REG_CALL_STACK
, TCG_STATIC_CALL_ARGS_SIZE
,
2313 CPU_TEMP_BUF_NLONGS
* sizeof(long));
2315 tcg_out_mov(s
, TCG_TYPE_PTR
, TCG_AREG0
, tcg_target_call_iarg_regs
[0]);
2317 tcg_out_bx(s
, COND_AL
, tcg_target_call_iarg_regs
[1]);
2320 * Return path for goto_ptr. Set return value to 0, a-la exit_tb,
2321 * and fall through to the rest of the epilogue.
2323 s
->code_gen_epilogue
= s
->code_ptr
;
2324 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_R0
, 0);
2327 tb_ret_addr
= s
->code_ptr
;
2328 tcg_out_dat_rI(s
, COND_AL
, ARITH_ADD
, TCG_REG_CALL_STACK
,
2329 TCG_REG_CALL_STACK
, stack_addend
, 1);
2331 /* ldmia sp!, { r4 - r11, pc } */
2332 tcg_out32(s
, (COND_AL
<< 28) | 0x08bd8ff0);
2337 uint8_t fde_def_cfa
[4];
2338 uint8_t fde_reg_ofs
[18];
2341 #define ELF_HOST_MACHINE EM_ARM
2343 /* We're expecting a 2 byte uleb128 encoded value. */
2344 QEMU_BUILD_BUG_ON(FRAME_SIZE
>= (1 << 14));
2346 static const DebugFrame debug_frame
= {
2347 .h
.cie
.len
= sizeof(DebugFrameCIE
)-4, /* length after .len member */
2350 .h
.cie
.code_align
= 1,
2351 .h
.cie
.data_align
= 0x7c, /* sleb128 -4 */
2352 .h
.cie
.return_column
= 14,
2354 /* Total FDE size does not include the "len" member. */
2355 .h
.fde
.len
= sizeof(DebugFrame
) - offsetof(DebugFrame
, h
.fde
.cie_offset
),
2358 12, 13, /* DW_CFA_def_cfa sp, ... */
2359 (FRAME_SIZE
& 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
2363 /* The following must match the stmdb in the prologue. */
2364 0x8e, 1, /* DW_CFA_offset, lr, -4 */
2365 0x8b, 2, /* DW_CFA_offset, r11, -8 */
2366 0x8a, 3, /* DW_CFA_offset, r10, -12 */
2367 0x89, 4, /* DW_CFA_offset, r9, -16 */
2368 0x88, 5, /* DW_CFA_offset, r8, -20 */
2369 0x87, 6, /* DW_CFA_offset, r7, -24 */
2370 0x86, 7, /* DW_CFA_offset, r6, -28 */
2371 0x85, 8, /* DW_CFA_offset, r5, -32 */
2372 0x84, 9, /* DW_CFA_offset, r4, -36 */
2376 void tcg_register_jit(void *buf
, size_t buf_size
)
2378 tcg_register_jit_int(buf
, buf_size
, &debug_frame
, sizeof(debug_frame
));