2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Andrzej Zaborowski
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include "tcg-pool.inc.c"
28 int arm_arch
= __ARM_ARCH
;
30 #ifndef use_idiv_instructions
31 bool use_idiv_instructions
;
34 /* ??? Ought to think about changing CONFIG_SOFTMMU to always defined. */
36 # define USING_SOFTMMU 1
38 # define USING_SOFTMMU 0
41 #ifdef CONFIG_DEBUG_TCG
42 static const char * const tcg_target_reg_names
[TCG_TARGET_NB_REGS
] = {
62 static const int tcg_target_reg_alloc_order
[] = {
80 static const int tcg_target_call_iarg_regs
[4] = {
81 TCG_REG_R0
, TCG_REG_R1
, TCG_REG_R2
, TCG_REG_R3
83 static const int tcg_target_call_oarg_regs
[2] = {
84 TCG_REG_R0
, TCG_REG_R1
87 #define TCG_REG_TMP TCG_REG_R12
89 enum arm_cond_code_e
{
92 COND_CS
= 0x2, /* Unsigned greater or equal */
93 COND_CC
= 0x3, /* Unsigned less than */
94 COND_MI
= 0x4, /* Negative */
95 COND_PL
= 0x5, /* Zero or greater */
96 COND_VS
= 0x6, /* Overflow */
97 COND_VC
= 0x7, /* No overflow */
98 COND_HI
= 0x8, /* Unsigned greater than */
99 COND_LS
= 0x9, /* Unsigned less or equal */
107 #define TO_CPSR (1 << 20)
109 #define SHIFT_IMM_LSL(im) (((im) << 7) | 0x00)
110 #define SHIFT_IMM_LSR(im) (((im) << 7) | 0x20)
111 #define SHIFT_IMM_ASR(im) (((im) << 7) | 0x40)
112 #define SHIFT_IMM_ROR(im) (((im) << 7) | 0x60)
113 #define SHIFT_REG_LSL(rs) (((rs) << 8) | 0x10)
114 #define SHIFT_REG_LSR(rs) (((rs) << 8) | 0x30)
115 #define SHIFT_REG_ASR(rs) (((rs) << 8) | 0x50)
116 #define SHIFT_REG_ROR(rs) (((rs) << 8) | 0x70)
119 ARITH_AND
= 0x0 << 21,
120 ARITH_EOR
= 0x1 << 21,
121 ARITH_SUB
= 0x2 << 21,
122 ARITH_RSB
= 0x3 << 21,
123 ARITH_ADD
= 0x4 << 21,
124 ARITH_ADC
= 0x5 << 21,
125 ARITH_SBC
= 0x6 << 21,
126 ARITH_RSC
= 0x7 << 21,
127 ARITH_TST
= 0x8 << 21 | TO_CPSR
,
128 ARITH_CMP
= 0xa << 21 | TO_CPSR
,
129 ARITH_CMN
= 0xb << 21 | TO_CPSR
,
130 ARITH_ORR
= 0xc << 21,
131 ARITH_MOV
= 0xd << 21,
132 ARITH_BIC
= 0xe << 21,
133 ARITH_MVN
= 0xf << 21,
135 INSN_CLZ
= 0x016f0f10,
136 INSN_RBIT
= 0x06ff0f30,
138 INSN_LDR_IMM
= 0x04100000,
139 INSN_LDR_REG
= 0x06100000,
140 INSN_STR_IMM
= 0x04000000,
141 INSN_STR_REG
= 0x06000000,
143 INSN_LDRH_IMM
= 0x005000b0,
144 INSN_LDRH_REG
= 0x001000b0,
145 INSN_LDRSH_IMM
= 0x005000f0,
146 INSN_LDRSH_REG
= 0x001000f0,
147 INSN_STRH_IMM
= 0x004000b0,
148 INSN_STRH_REG
= 0x000000b0,
150 INSN_LDRB_IMM
= 0x04500000,
151 INSN_LDRB_REG
= 0x06500000,
152 INSN_LDRSB_IMM
= 0x005000d0,
153 INSN_LDRSB_REG
= 0x001000d0,
154 INSN_STRB_IMM
= 0x04400000,
155 INSN_STRB_REG
= 0x06400000,
157 INSN_LDRD_IMM
= 0x004000d0,
158 INSN_LDRD_REG
= 0x000000d0,
159 INSN_STRD_IMM
= 0x004000f0,
160 INSN_STRD_REG
= 0x000000f0,
162 INSN_DMB_ISH
= 0xf57ff05b,
163 INSN_DMB_MCR
= 0xee070fba,
165 /* Architected nop introduced in v6k. */
166 /* ??? This is an MSR (imm) 0,0,0 insn. Anyone know if this
167 also Just So Happened to do nothing on pre-v6k so that we
168 don't need to conditionalize it? */
169 INSN_NOP_v6k
= 0xe320f000,
170 /* Otherwise the assembler uses mov r0,r0 */
171 INSN_NOP_v4
= (COND_AL
<< 28) | ARITH_MOV
,
174 #define INSN_NOP (use_armv7_instructions ? INSN_NOP_v6k : INSN_NOP_v4)
176 static const uint8_t tcg_cond_to_arm_cond
[] = {
177 [TCG_COND_EQ
] = COND_EQ
,
178 [TCG_COND_NE
] = COND_NE
,
179 [TCG_COND_LT
] = COND_LT
,
180 [TCG_COND_GE
] = COND_GE
,
181 [TCG_COND_LE
] = COND_LE
,
182 [TCG_COND_GT
] = COND_GT
,
184 [TCG_COND_LTU
] = COND_CC
,
185 [TCG_COND_GEU
] = COND_CS
,
186 [TCG_COND_LEU
] = COND_LS
,
187 [TCG_COND_GTU
] = COND_HI
,
190 static inline bool reloc_pc24(tcg_insn_unit
*code_ptr
, tcg_insn_unit
*target
)
192 ptrdiff_t offset
= (tcg_ptr_byte_diff(target
, code_ptr
) - 8) >> 2;
193 if (offset
== sextract32(offset
, 0, 24)) {
194 *code_ptr
= (*code_ptr
& ~0xffffff) | (offset
& 0xffffff);
200 static bool patch_reloc(tcg_insn_unit
*code_ptr
, int type
,
201 intptr_t value
, intptr_t addend
)
203 tcg_debug_assert(addend
== 0);
205 if (type
== R_ARM_PC24
) {
206 return reloc_pc24(code_ptr
, (tcg_insn_unit
*)value
);
207 } else if (type
== R_ARM_PC13
) {
208 intptr_t diff
= value
- (uintptr_t)(code_ptr
+ 2);
209 tcg_insn_unit insn
= *code_ptr
;
212 if (diff
>= -0xfff && diff
<= 0xfff) {
218 int rd
= extract32(insn
, 12, 4);
219 int rt
= rd
== TCG_REG_PC
? TCG_REG_TMP
: rd
;
221 if (diff
< 0x1000 || diff
>= 0x100000) {
225 /* add rt, pc, #high */
226 *code_ptr
++ = ((insn
& 0xf0000000) | (1 << 25) | ARITH_ADD
227 | (TCG_REG_PC
<< 16) | (rt
<< 12)
228 | (20 << 7) | (diff
>> 12));
229 /* ldr rd, [rt, #low] */
230 insn
= deposit32(insn
, 12, 4, rt
);
234 insn
= deposit32(insn
, 23, 1, u
);
235 insn
= deposit32(insn
, 0, 12, diff
);
238 g_assert_not_reached();
243 #define TCG_CT_CONST_ARM 0x100
244 #define TCG_CT_CONST_INV 0x200
245 #define TCG_CT_CONST_NEG 0x400
246 #define TCG_CT_CONST_ZERO 0x800
248 /* parse target specific constraints */
249 static const char *target_parse_constraint(TCGArgConstraint
*ct
,
250 const char *ct_str
, TCGType type
)
254 ct
->ct
|= TCG_CT_CONST_ARM
;
257 ct
->ct
|= TCG_CT_CONST_INV
;
259 case 'N': /* The gcc constraint letter is L, already used here. */
260 ct
->ct
|= TCG_CT_CONST_NEG
;
263 ct
->ct
|= TCG_CT_CONST_ZERO
;
267 ct
->ct
|= TCG_CT_REG
;
271 /* qemu_ld address */
273 ct
->ct
|= TCG_CT_REG
;
275 #ifdef CONFIG_SOFTMMU
276 /* r0-r2,lr will be overwritten when reading the tlb entry,
277 so don't use these. */
278 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R0
);
279 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R1
);
280 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R2
);
281 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R14
);
285 /* qemu_st address & data */
287 ct
->ct
|= TCG_CT_REG
;
289 /* r0-r2 will be overwritten when reading the tlb entry (softmmu only)
290 and r0-r1 doing the byte swapping, so don't use these. */
291 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R0
);
292 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R1
);
293 #if defined(CONFIG_SOFTMMU)
294 /* Avoid clashes with registers being used for helper args */
295 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R2
);
296 #if TARGET_LONG_BITS == 64
297 /* Avoid clashes with registers being used for helper args */
298 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R3
);
300 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R14
);
310 static inline uint32_t rotl(uint32_t val
, int n
)
312 return (val
<< n
) | (val
>> (32 - n
));
315 /* ARM immediates for ALU instructions are made of an unsigned 8-bit
316 right-rotated by an even amount between 0 and 30. */
317 static inline int encode_imm(uint32_t imm
)
321 /* simple case, only lower bits */
322 if ((imm
& ~0xff) == 0)
324 /* then try a simple even shift */
325 shift
= ctz32(imm
) & ~1;
326 if (((imm
>> shift
) & ~0xff) == 0)
328 /* now try harder with rotations */
329 if ((rotl(imm
, 2) & ~0xff) == 0)
331 if ((rotl(imm
, 4) & ~0xff) == 0)
333 if ((rotl(imm
, 6) & ~0xff) == 0)
335 /* imm can't be encoded */
339 static inline int check_fit_imm(uint32_t imm
)
341 return encode_imm(imm
) >= 0;
344 /* Test if a constant matches the constraint.
345 * TODO: define constraints for:
347 * ldr/str offset: between -0xfff and 0xfff
348 * ldrh/strh offset: between -0xff and 0xff
349 * mov operand2: values represented with x << (2 * y), x < 0x100
350 * add, sub, eor...: ditto
352 static inline int tcg_target_const_match(tcg_target_long val
, TCGType type
,
353 const TCGArgConstraint
*arg_ct
)
357 if (ct
& TCG_CT_CONST
) {
359 } else if ((ct
& TCG_CT_CONST_ARM
) && check_fit_imm(val
)) {
361 } else if ((ct
& TCG_CT_CONST_INV
) && check_fit_imm(~val
)) {
363 } else if ((ct
& TCG_CT_CONST_NEG
) && check_fit_imm(-val
)) {
365 } else if ((ct
& TCG_CT_CONST_ZERO
) && val
== 0) {
372 static inline void tcg_out_b(TCGContext
*s
, int cond
, int32_t offset
)
374 tcg_out32(s
, (cond
<< 28) | 0x0a000000 |
375 (((offset
- 8) >> 2) & 0x00ffffff));
378 static inline void tcg_out_bl(TCGContext
*s
, int cond
, int32_t offset
)
380 tcg_out32(s
, (cond
<< 28) | 0x0b000000 |
381 (((offset
- 8) >> 2) & 0x00ffffff));
384 static inline void tcg_out_blx(TCGContext
*s
, int cond
, int rn
)
386 tcg_out32(s
, (cond
<< 28) | 0x012fff30 | rn
);
389 static inline void tcg_out_blx_imm(TCGContext
*s
, int32_t offset
)
391 tcg_out32(s
, 0xfa000000 | ((offset
& 2) << 23) |
392 (((offset
- 8) >> 2) & 0x00ffffff));
395 static inline void tcg_out_dat_reg(TCGContext
*s
,
396 int cond
, int opc
, int rd
, int rn
, int rm
, int shift
)
398 tcg_out32(s
, (cond
<< 28) | (0 << 25) | opc
|
399 (rn
<< 16) | (rd
<< 12) | shift
| rm
);
402 static inline void tcg_out_nop(TCGContext
*s
)
404 tcg_out32(s
, INSN_NOP
);
407 static inline void tcg_out_mov_reg(TCGContext
*s
, int cond
, int rd
, int rm
)
409 /* Simple reg-reg move, optimising out the 'do nothing' case */
411 tcg_out_dat_reg(s
, cond
, ARITH_MOV
, rd
, 0, rm
, SHIFT_IMM_LSL(0));
415 static inline void tcg_out_bx(TCGContext
*s
, int cond
, TCGReg rn
)
417 /* Unless the C portion of QEMU is compiled as thumb, we don't
418 actually need true BX semantics; merely a branch to an address
419 held in a register. */
420 if (use_armv5t_instructions
) {
421 tcg_out32(s
, (cond
<< 28) | 0x012fff10 | rn
);
423 tcg_out_mov_reg(s
, cond
, TCG_REG_PC
, rn
);
427 static inline void tcg_out_dat_imm(TCGContext
*s
,
428 int cond
, int opc
, int rd
, int rn
, int im
)
430 tcg_out32(s
, (cond
<< 28) | (1 << 25) | opc
|
431 (rn
<< 16) | (rd
<< 12) | im
);
434 /* Note that this routine is used for both LDR and LDRH formats, so we do
435 not wish to include an immediate shift at this point. */
436 static void tcg_out_memop_r(TCGContext
*s
, int cond
, ARMInsn opc
, TCGReg rt
,
437 TCGReg rn
, TCGReg rm
, bool u
, bool p
, bool w
)
439 tcg_out32(s
, (cond
<< 28) | opc
| (u
<< 23) | (p
<< 24)
440 | (w
<< 21) | (rn
<< 16) | (rt
<< 12) | rm
);
443 static void tcg_out_memop_8(TCGContext
*s
, int cond
, ARMInsn opc
, TCGReg rt
,
444 TCGReg rn
, int imm8
, bool p
, bool w
)
451 tcg_out32(s
, (cond
<< 28) | opc
| (u
<< 23) | (p
<< 24) | (w
<< 21) |
452 (rn
<< 16) | (rt
<< 12) | ((imm8
& 0xf0) << 4) | (imm8
& 0xf));
455 static void tcg_out_memop_12(TCGContext
*s
, int cond
, ARMInsn opc
, TCGReg rt
,
456 TCGReg rn
, int imm12
, bool p
, bool w
)
463 tcg_out32(s
, (cond
<< 28) | opc
| (u
<< 23) | (p
<< 24) | (w
<< 21) |
464 (rn
<< 16) | (rt
<< 12) | imm12
);
467 static inline void tcg_out_ld32_12(TCGContext
*s
, int cond
, TCGReg rt
,
468 TCGReg rn
, int imm12
)
470 tcg_out_memop_12(s
, cond
, INSN_LDR_IMM
, rt
, rn
, imm12
, 1, 0);
473 static inline void tcg_out_st32_12(TCGContext
*s
, int cond
, TCGReg rt
,
474 TCGReg rn
, int imm12
)
476 tcg_out_memop_12(s
, cond
, INSN_STR_IMM
, rt
, rn
, imm12
, 1, 0);
479 static inline void tcg_out_ld32_r(TCGContext
*s
, int cond
, TCGReg rt
,
480 TCGReg rn
, TCGReg rm
)
482 tcg_out_memop_r(s
, cond
, INSN_LDR_REG
, rt
, rn
, rm
, 1, 1, 0);
485 static inline void tcg_out_st32_r(TCGContext
*s
, int cond
, TCGReg rt
,
486 TCGReg rn
, TCGReg rm
)
488 tcg_out_memop_r(s
, cond
, INSN_STR_REG
, rt
, rn
, rm
, 1, 1, 0);
491 static inline void tcg_out_ldrd_8(TCGContext
*s
, int cond
, TCGReg rt
,
494 tcg_out_memop_8(s
, cond
, INSN_LDRD_IMM
, rt
, rn
, imm8
, 1, 0);
497 static inline void tcg_out_ldrd_r(TCGContext
*s
, int cond
, TCGReg rt
,
498 TCGReg rn
, TCGReg rm
)
500 tcg_out_memop_r(s
, cond
, INSN_LDRD_REG
, rt
, rn
, rm
, 1, 1, 0);
503 static inline void tcg_out_strd_8(TCGContext
*s
, int cond
, TCGReg rt
,
506 tcg_out_memop_8(s
, cond
, INSN_STRD_IMM
, rt
, rn
, imm8
, 1, 0);
509 static inline void tcg_out_strd_r(TCGContext
*s
, int cond
, TCGReg rt
,
510 TCGReg rn
, TCGReg rm
)
512 tcg_out_memop_r(s
, cond
, INSN_STRD_REG
, rt
, rn
, rm
, 1, 1, 0);
515 /* Register pre-increment with base writeback. */
516 static inline void tcg_out_ld32_rwb(TCGContext
*s
, int cond
, TCGReg rt
,
517 TCGReg rn
, TCGReg rm
)
519 tcg_out_memop_r(s
, cond
, INSN_LDR_REG
, rt
, rn
, rm
, 1, 1, 1);
522 static inline void tcg_out_st32_rwb(TCGContext
*s
, int cond
, TCGReg rt
,
523 TCGReg rn
, TCGReg rm
)
525 tcg_out_memop_r(s
, cond
, INSN_STR_REG
, rt
, rn
, rm
, 1, 1, 1);
528 static inline void tcg_out_ld16u_8(TCGContext
*s
, int cond
, TCGReg rt
,
531 tcg_out_memop_8(s
, cond
, INSN_LDRH_IMM
, rt
, rn
, imm8
, 1, 0);
534 static inline void tcg_out_st16_8(TCGContext
*s
, int cond
, TCGReg rt
,
537 tcg_out_memop_8(s
, cond
, INSN_STRH_IMM
, rt
, rn
, imm8
, 1, 0);
540 static inline void tcg_out_ld16u_r(TCGContext
*s
, int cond
, TCGReg rt
,
541 TCGReg rn
, TCGReg rm
)
543 tcg_out_memop_r(s
, cond
, INSN_LDRH_REG
, rt
, rn
, rm
, 1, 1, 0);
546 static inline void tcg_out_st16_r(TCGContext
*s
, int cond
, TCGReg rt
,
547 TCGReg rn
, TCGReg rm
)
549 tcg_out_memop_r(s
, cond
, INSN_STRH_REG
, rt
, rn
, rm
, 1, 1, 0);
552 static inline void tcg_out_ld16s_8(TCGContext
*s
, int cond
, TCGReg rt
,
555 tcg_out_memop_8(s
, cond
, INSN_LDRSH_IMM
, rt
, rn
, imm8
, 1, 0);
558 static inline void tcg_out_ld16s_r(TCGContext
*s
, int cond
, TCGReg rt
,
559 TCGReg rn
, TCGReg rm
)
561 tcg_out_memop_r(s
, cond
, INSN_LDRSH_REG
, rt
, rn
, rm
, 1, 1, 0);
564 static inline void tcg_out_ld8_12(TCGContext
*s
, int cond
, TCGReg rt
,
565 TCGReg rn
, int imm12
)
567 tcg_out_memop_12(s
, cond
, INSN_LDRB_IMM
, rt
, rn
, imm12
, 1, 0);
570 static inline void tcg_out_st8_12(TCGContext
*s
, int cond
, TCGReg rt
,
571 TCGReg rn
, int imm12
)
573 tcg_out_memop_12(s
, cond
, INSN_STRB_IMM
, rt
, rn
, imm12
, 1, 0);
576 static inline void tcg_out_ld8_r(TCGContext
*s
, int cond
, TCGReg rt
,
577 TCGReg rn
, TCGReg rm
)
579 tcg_out_memop_r(s
, cond
, INSN_LDRB_REG
, rt
, rn
, rm
, 1, 1, 0);
582 static inline void tcg_out_st8_r(TCGContext
*s
, int cond
, TCGReg rt
,
583 TCGReg rn
, TCGReg rm
)
585 tcg_out_memop_r(s
, cond
, INSN_STRB_REG
, rt
, rn
, rm
, 1, 1, 0);
588 static inline void tcg_out_ld8s_8(TCGContext
*s
, int cond
, TCGReg rt
,
591 tcg_out_memop_8(s
, cond
, INSN_LDRSB_IMM
, rt
, rn
, imm8
, 1, 0);
594 static inline void tcg_out_ld8s_r(TCGContext
*s
, int cond
, TCGReg rt
,
595 TCGReg rn
, TCGReg rm
)
597 tcg_out_memop_r(s
, cond
, INSN_LDRSB_REG
, rt
, rn
, rm
, 1, 1, 0);
600 static void tcg_out_movi_pool(TCGContext
*s
, int cond
, int rd
, uint32_t arg
)
602 /* The 12-bit range on the ldr insn is sometimes a bit too small.
603 In order to get around that we require two insns, one of which
604 will usually be a nop, but may be replaced in patch_reloc. */
605 new_pool_label(s
, arg
, R_ARM_PC13
, s
->code_ptr
, 0);
606 tcg_out_ld32_12(s
, cond
, rd
, TCG_REG_PC
, 0);
610 static void tcg_out_movi32(TCGContext
*s
, int cond
, int rd
, uint32_t arg
)
612 int rot
, diff
, opc
, sh1
, sh2
;
613 uint32_t tt0
, tt1
, tt2
;
615 /* Check a single MOV/MVN before anything else. */
616 rot
= encode_imm(arg
);
618 tcg_out_dat_imm(s
, cond
, ARITH_MOV
, rd
, 0,
619 rotl(arg
, rot
) | (rot
<< 7));
622 rot
= encode_imm(~arg
);
624 tcg_out_dat_imm(s
, cond
, ARITH_MVN
, rd
, 0,
625 rotl(~arg
, rot
) | (rot
<< 7));
629 /* Check for a pc-relative address. This will usually be the TB,
630 or within the TB, which is immediately before the code block. */
631 diff
= arg
- ((intptr_t)s
->code_ptr
+ 8);
633 rot
= encode_imm(diff
);
635 tcg_out_dat_imm(s
, cond
, ARITH_ADD
, rd
, TCG_REG_PC
,
636 rotl(diff
, rot
) | (rot
<< 7));
640 rot
= encode_imm(-diff
);
642 tcg_out_dat_imm(s
, cond
, ARITH_SUB
, rd
, TCG_REG_PC
,
643 rotl(-diff
, rot
) | (rot
<< 7));
648 /* Use movw + movt. */
649 if (use_armv7_instructions
) {
651 tcg_out32(s
, (cond
<< 28) | 0x03000000 | (rd
<< 12)
652 | ((arg
<< 4) & 0x000f0000) | (arg
& 0xfff));
653 if (arg
& 0xffff0000) {
655 tcg_out32(s
, (cond
<< 28) | 0x03400000 | (rd
<< 12)
656 | ((arg
>> 12) & 0x000f0000) | ((arg
>> 16) & 0xfff));
661 /* Look for sequences of two insns. If we have lots of 1's, we can
662 shorten the sequence by beginning with mvn and then clearing
663 higher bits with eor. */
666 if (ctpop32(arg
) > 16) {
670 sh1
= ctz32(tt0
) & ~1;
671 tt1
= tt0
& ~(0xff << sh1
);
672 sh2
= ctz32(tt1
) & ~1;
673 tt2
= tt1
& ~(0xff << sh2
);
675 rot
= ((32 - sh1
) << 7) & 0xf00;
676 tcg_out_dat_imm(s
, cond
, opc
, rd
, 0, ((tt0
>> sh1
) & 0xff) | rot
);
677 rot
= ((32 - sh2
) << 7) & 0xf00;
678 tcg_out_dat_imm(s
, cond
, ARITH_EOR
, rd
, rd
,
679 ((tt0
>> sh2
) & 0xff) | rot
);
683 /* Otherwise, drop it into the constant pool. */
684 tcg_out_movi_pool(s
, cond
, rd
, arg
);
687 static inline void tcg_out_dat_rI(TCGContext
*s
, int cond
, int opc
, TCGArg dst
,
688 TCGArg lhs
, TCGArg rhs
, int rhs_is_const
)
690 /* Emit either the reg,imm or reg,reg form of a data-processing insn.
691 * rhs must satisfy the "rI" constraint.
694 int rot
= encode_imm(rhs
);
695 tcg_debug_assert(rot
>= 0);
696 tcg_out_dat_imm(s
, cond
, opc
, dst
, lhs
, rotl(rhs
, rot
) | (rot
<< 7));
698 tcg_out_dat_reg(s
, cond
, opc
, dst
, lhs
, rhs
, SHIFT_IMM_LSL(0));
702 static void tcg_out_dat_rIK(TCGContext
*s
, int cond
, int opc
, int opinv
,
703 TCGReg dst
, TCGReg lhs
, TCGArg rhs
,
706 /* Emit either the reg,imm or reg,reg form of a data-processing insn.
707 * rhs must satisfy the "rIK" constraint.
710 int rot
= encode_imm(rhs
);
713 rot
= encode_imm(rhs
);
714 tcg_debug_assert(rot
>= 0);
717 tcg_out_dat_imm(s
, cond
, opc
, dst
, lhs
, rotl(rhs
, rot
) | (rot
<< 7));
719 tcg_out_dat_reg(s
, cond
, opc
, dst
, lhs
, rhs
, SHIFT_IMM_LSL(0));
723 static void tcg_out_dat_rIN(TCGContext
*s
, int cond
, int opc
, int opneg
,
724 TCGArg dst
, TCGArg lhs
, TCGArg rhs
,
727 /* Emit either the reg,imm or reg,reg form of a data-processing insn.
728 * rhs must satisfy the "rIN" constraint.
731 int rot
= encode_imm(rhs
);
734 rot
= encode_imm(rhs
);
735 tcg_debug_assert(rot
>= 0);
738 tcg_out_dat_imm(s
, cond
, opc
, dst
, lhs
, rotl(rhs
, rot
) | (rot
<< 7));
740 tcg_out_dat_reg(s
, cond
, opc
, dst
, lhs
, rhs
, SHIFT_IMM_LSL(0));
744 static inline void tcg_out_mul32(TCGContext
*s
, int cond
, TCGReg rd
,
745 TCGReg rn
, TCGReg rm
)
747 /* if ArchVersion() < 6 && d == n then UNPREDICTABLE; */
748 if (!use_armv6_instructions
&& rd
== rn
) {
750 /* rd == rn == rm; copy an input to tmp first. */
751 tcg_out_mov_reg(s
, cond
, TCG_REG_TMP
, rn
);
752 rm
= rn
= TCG_REG_TMP
;
759 tcg_out32(s
, (cond
<< 28) | 0x90 | (rd
<< 16) | (rm
<< 8) | rn
);
762 static inline void tcg_out_umull32(TCGContext
*s
, int cond
, TCGReg rd0
,
763 TCGReg rd1
, TCGReg rn
, TCGReg rm
)
765 /* if ArchVersion() < 6 && (dHi == n || dLo == n) then UNPREDICTABLE; */
766 if (!use_armv6_instructions
&& (rd0
== rn
|| rd1
== rn
)) {
767 if (rd0
== rm
|| rd1
== rm
) {
768 tcg_out_mov_reg(s
, cond
, TCG_REG_TMP
, rn
);
777 tcg_out32(s
, (cond
<< 28) | 0x00800090 |
778 (rd1
<< 16) | (rd0
<< 12) | (rm
<< 8) | rn
);
781 static inline void tcg_out_smull32(TCGContext
*s
, int cond
, TCGReg rd0
,
782 TCGReg rd1
, TCGReg rn
, TCGReg rm
)
784 /* if ArchVersion() < 6 && (dHi == n || dLo == n) then UNPREDICTABLE; */
785 if (!use_armv6_instructions
&& (rd0
== rn
|| rd1
== rn
)) {
786 if (rd0
== rm
|| rd1
== rm
) {
787 tcg_out_mov_reg(s
, cond
, TCG_REG_TMP
, rn
);
796 tcg_out32(s
, (cond
<< 28) | 0x00c00090 |
797 (rd1
<< 16) | (rd0
<< 12) | (rm
<< 8) | rn
);
800 static inline void tcg_out_sdiv(TCGContext
*s
, int cond
, int rd
, int rn
, int rm
)
802 tcg_out32(s
, 0x0710f010 | (cond
<< 28) | (rd
<< 16) | rn
| (rm
<< 8));
805 static inline void tcg_out_udiv(TCGContext
*s
, int cond
, int rd
, int rn
, int rm
)
807 tcg_out32(s
, 0x0730f010 | (cond
<< 28) | (rd
<< 16) | rn
| (rm
<< 8));
810 static inline void tcg_out_ext8s(TCGContext
*s
, int cond
,
813 if (use_armv6_instructions
) {
815 tcg_out32(s
, 0x06af0070 | (cond
<< 28) | (rd
<< 12) | rn
);
817 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
818 rd
, 0, rn
, SHIFT_IMM_LSL(24));
819 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
820 rd
, 0, rd
, SHIFT_IMM_ASR(24));
824 static inline void tcg_out_ext8u(TCGContext
*s
, int cond
,
827 tcg_out_dat_imm(s
, cond
, ARITH_AND
, rd
, rn
, 0xff);
830 static inline void tcg_out_ext16s(TCGContext
*s
, int cond
,
833 if (use_armv6_instructions
) {
835 tcg_out32(s
, 0x06bf0070 | (cond
<< 28) | (rd
<< 12) | rn
);
837 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
838 rd
, 0, rn
, SHIFT_IMM_LSL(16));
839 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
840 rd
, 0, rd
, SHIFT_IMM_ASR(16));
844 static inline void tcg_out_ext16u(TCGContext
*s
, int cond
,
847 if (use_armv6_instructions
) {
849 tcg_out32(s
, 0x06ff0070 | (cond
<< 28) | (rd
<< 12) | rn
);
851 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
852 rd
, 0, rn
, SHIFT_IMM_LSL(16));
853 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
854 rd
, 0, rd
, SHIFT_IMM_LSR(16));
858 static inline void tcg_out_bswap16s(TCGContext
*s
, int cond
, int rd
, int rn
)
860 if (use_armv6_instructions
) {
862 tcg_out32(s
, 0x06ff0fb0 | (cond
<< 28) | (rd
<< 12) | rn
);
864 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
865 TCG_REG_TMP
, 0, rn
, SHIFT_IMM_LSL(24));
866 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
867 TCG_REG_TMP
, 0, TCG_REG_TMP
, SHIFT_IMM_ASR(16));
868 tcg_out_dat_reg(s
, cond
, ARITH_ORR
,
869 rd
, TCG_REG_TMP
, rn
, SHIFT_IMM_LSR(8));
873 static inline void tcg_out_bswap16(TCGContext
*s
, int cond
, int rd
, int rn
)
875 if (use_armv6_instructions
) {
877 tcg_out32(s
, 0x06bf0fb0 | (cond
<< 28) | (rd
<< 12) | rn
);
879 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
880 TCG_REG_TMP
, 0, rn
, SHIFT_IMM_LSL(24));
881 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
882 TCG_REG_TMP
, 0, TCG_REG_TMP
, SHIFT_IMM_LSR(16));
883 tcg_out_dat_reg(s
, cond
, ARITH_ORR
,
884 rd
, TCG_REG_TMP
, rn
, SHIFT_IMM_LSR(8));
888 /* swap the two low bytes assuming that the two high input bytes and the
889 two high output bit can hold any value. */
890 static inline void tcg_out_bswap16st(TCGContext
*s
, int cond
, int rd
, int rn
)
892 if (use_armv6_instructions
) {
894 tcg_out32(s
, 0x06bf0fb0 | (cond
<< 28) | (rd
<< 12) | rn
);
896 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
897 TCG_REG_TMP
, 0, rn
, SHIFT_IMM_LSR(8));
898 tcg_out_dat_imm(s
, cond
, ARITH_AND
, TCG_REG_TMP
, TCG_REG_TMP
, 0xff);
899 tcg_out_dat_reg(s
, cond
, ARITH_ORR
,
900 rd
, TCG_REG_TMP
, rn
, SHIFT_IMM_LSL(8));
904 static inline void tcg_out_bswap32(TCGContext
*s
, int cond
, int rd
, int rn
)
906 if (use_armv6_instructions
) {
908 tcg_out32(s
, 0x06bf0f30 | (cond
<< 28) | (rd
<< 12) | rn
);
910 tcg_out_dat_reg(s
, cond
, ARITH_EOR
,
911 TCG_REG_TMP
, rn
, rn
, SHIFT_IMM_ROR(16));
912 tcg_out_dat_imm(s
, cond
, ARITH_BIC
,
913 TCG_REG_TMP
, TCG_REG_TMP
, 0xff | 0x800);
914 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
915 rd
, 0, rn
, SHIFT_IMM_ROR(8));
916 tcg_out_dat_reg(s
, cond
, ARITH_EOR
,
917 rd
, rd
, TCG_REG_TMP
, SHIFT_IMM_LSR(8));
921 static inline void tcg_out_deposit(TCGContext
*s
, int cond
, TCGReg rd
,
922 TCGArg a1
, int ofs
, int len
, bool const_a1
)
925 /* bfi becomes bfc with rn == 15. */
929 tcg_out32(s
, 0x07c00010 | (cond
<< 28) | (rd
<< 12) | a1
930 | (ofs
<< 7) | ((ofs
+ len
- 1) << 16));
933 static inline void tcg_out_extract(TCGContext
*s
, int cond
, TCGReg rd
,
934 TCGArg a1
, int ofs
, int len
)
937 tcg_out32(s
, 0x07e00050 | (cond
<< 28) | (rd
<< 12) | a1
938 | (ofs
<< 7) | ((len
- 1) << 16));
941 static inline void tcg_out_sextract(TCGContext
*s
, int cond
, TCGReg rd
,
942 TCGArg a1
, int ofs
, int len
)
945 tcg_out32(s
, 0x07a00050 | (cond
<< 28) | (rd
<< 12) | a1
946 | (ofs
<< 7) | ((len
- 1) << 16));
949 static inline void tcg_out_ld32u(TCGContext
*s
, int cond
,
950 int rd
, int rn
, int32_t offset
)
952 if (offset
> 0xfff || offset
< -0xfff) {
953 tcg_out_movi32(s
, cond
, TCG_REG_TMP
, offset
);
954 tcg_out_ld32_r(s
, cond
, rd
, rn
, TCG_REG_TMP
);
956 tcg_out_ld32_12(s
, cond
, rd
, rn
, offset
);
959 static inline void tcg_out_st32(TCGContext
*s
, int cond
,
960 int rd
, int rn
, int32_t offset
)
962 if (offset
> 0xfff || offset
< -0xfff) {
963 tcg_out_movi32(s
, cond
, TCG_REG_TMP
, offset
);
964 tcg_out_st32_r(s
, cond
, rd
, rn
, TCG_REG_TMP
);
966 tcg_out_st32_12(s
, cond
, rd
, rn
, offset
);
969 static inline void tcg_out_ld16u(TCGContext
*s
, int cond
,
970 int rd
, int rn
, int32_t offset
)
972 if (offset
> 0xff || offset
< -0xff) {
973 tcg_out_movi32(s
, cond
, TCG_REG_TMP
, offset
);
974 tcg_out_ld16u_r(s
, cond
, rd
, rn
, TCG_REG_TMP
);
976 tcg_out_ld16u_8(s
, cond
, rd
, rn
, offset
);
979 static inline void tcg_out_ld16s(TCGContext
*s
, int cond
,
980 int rd
, int rn
, int32_t offset
)
982 if (offset
> 0xff || offset
< -0xff) {
983 tcg_out_movi32(s
, cond
, TCG_REG_TMP
, offset
);
984 tcg_out_ld16s_r(s
, cond
, rd
, rn
, TCG_REG_TMP
);
986 tcg_out_ld16s_8(s
, cond
, rd
, rn
, offset
);
989 static inline void tcg_out_st16(TCGContext
*s
, int cond
,
990 int rd
, int rn
, int32_t offset
)
992 if (offset
> 0xff || offset
< -0xff) {
993 tcg_out_movi32(s
, cond
, TCG_REG_TMP
, offset
);
994 tcg_out_st16_r(s
, cond
, rd
, rn
, TCG_REG_TMP
);
996 tcg_out_st16_8(s
, cond
, rd
, rn
, offset
);
999 static inline void tcg_out_ld8u(TCGContext
*s
, int cond
,
1000 int rd
, int rn
, int32_t offset
)
1002 if (offset
> 0xfff || offset
< -0xfff) {
1003 tcg_out_movi32(s
, cond
, TCG_REG_TMP
, offset
);
1004 tcg_out_ld8_r(s
, cond
, rd
, rn
, TCG_REG_TMP
);
1006 tcg_out_ld8_12(s
, cond
, rd
, rn
, offset
);
1009 static inline void tcg_out_ld8s(TCGContext
*s
, int cond
,
1010 int rd
, int rn
, int32_t offset
)
1012 if (offset
> 0xff || offset
< -0xff) {
1013 tcg_out_movi32(s
, cond
, TCG_REG_TMP
, offset
);
1014 tcg_out_ld8s_r(s
, cond
, rd
, rn
, TCG_REG_TMP
);
1016 tcg_out_ld8s_8(s
, cond
, rd
, rn
, offset
);
1019 static inline void tcg_out_st8(TCGContext
*s
, int cond
,
1020 int rd
, int rn
, int32_t offset
)
1022 if (offset
> 0xfff || offset
< -0xfff) {
1023 tcg_out_movi32(s
, cond
, TCG_REG_TMP
, offset
);
1024 tcg_out_st8_r(s
, cond
, rd
, rn
, TCG_REG_TMP
);
1026 tcg_out_st8_12(s
, cond
, rd
, rn
, offset
);
1029 /* The _goto case is normally between TBs within the same code buffer, and
1030 * with the code buffer limited to 16MB we wouldn't need the long case.
1031 * But we also use it for the tail-call to the qemu_ld/st helpers, which does.
1033 static void tcg_out_goto(TCGContext
*s
, int cond
, tcg_insn_unit
*addr
)
1035 intptr_t addri
= (intptr_t)addr
;
1036 ptrdiff_t disp
= tcg_pcrel_diff(s
, addr
);
1038 if ((addri
& 1) == 0 && disp
- 8 < 0x01fffffd && disp
- 8 > -0x01fffffd) {
1039 tcg_out_b(s
, cond
, disp
);
1042 tcg_out_movi_pool(s
, cond
, TCG_REG_PC
, addri
);
1045 /* The call case is mostly used for helpers - so it's not unreasonable
1046 * for them to be beyond branch range */
1047 static void tcg_out_call(TCGContext
*s
, tcg_insn_unit
*addr
)
1049 intptr_t addri
= (intptr_t)addr
;
1050 ptrdiff_t disp
= tcg_pcrel_diff(s
, addr
);
1052 if (disp
- 8 < 0x02000000 && disp
- 8 >= -0x02000000) {
1054 /* Use BLX if the target is in Thumb mode */
1055 if (!use_armv5t_instructions
) {
1058 tcg_out_blx_imm(s
, disp
);
1060 tcg_out_bl(s
, COND_AL
, disp
);
1062 } else if (use_armv7_instructions
) {
1063 tcg_out_movi32(s
, COND_AL
, TCG_REG_TMP
, addri
);
1064 tcg_out_blx(s
, COND_AL
, TCG_REG_TMP
);
1066 /* ??? Know that movi_pool emits exactly 2 insns. */
1067 tcg_out_dat_imm(s
, COND_AL
, ARITH_ADD
, TCG_REG_R14
, TCG_REG_PC
, 4);
1068 tcg_out_movi_pool(s
, COND_AL
, TCG_REG_PC
, addri
);
1072 static inline void tcg_out_goto_label(TCGContext
*s
, int cond
, TCGLabel
*l
)
1075 tcg_out_goto(s
, cond
, l
->u
.value_ptr
);
1077 tcg_out_reloc(s
, s
->code_ptr
, R_ARM_PC24
, l
, 0);
1078 tcg_out_b(s
, cond
, 0);
1082 static inline void tcg_out_mb(TCGContext
*s
, TCGArg a0
)
1084 if (use_armv7_instructions
) {
1085 tcg_out32(s
, INSN_DMB_ISH
);
1086 } else if (use_armv6_instructions
) {
1087 tcg_out32(s
, INSN_DMB_MCR
);
1091 static TCGCond
tcg_out_cmp2(TCGContext
*s
, const TCGArg
*args
,
1092 const int *const_args
)
1094 TCGReg al
= args
[0];
1095 TCGReg ah
= args
[1];
1096 TCGArg bl
= args
[2];
1097 TCGArg bh
= args
[3];
1098 TCGCond cond
= args
[4];
1099 int const_bl
= const_args
[2];
1100 int const_bh
= const_args
[3];
1109 /* We perform a conditional comparision. If the high half is
1110 equal, then overwrite the flags with the comparison of the
1111 low half. The resulting flags cover the whole. */
1112 tcg_out_dat_rI(s
, COND_AL
, ARITH_CMP
, 0, ah
, bh
, const_bh
);
1113 tcg_out_dat_rI(s
, COND_EQ
, ARITH_CMP
, 0, al
, bl
, const_bl
);
1118 /* We perform a double-word subtraction and examine the result.
1119 We do not actually need the result of the subtract, so the
1120 low part "subtract" is a compare. For the high half we have
1121 no choice but to compute into a temporary. */
1122 tcg_out_dat_rI(s
, COND_AL
, ARITH_CMP
, 0, al
, bl
, const_bl
);
1123 tcg_out_dat_rI(s
, COND_AL
, ARITH_SBC
| TO_CPSR
,
1124 TCG_REG_TMP
, ah
, bh
, const_bh
);
1129 /* Similar, but with swapped arguments, via reversed subtract. */
1130 tcg_out_dat_rI(s
, COND_AL
, ARITH_RSB
| TO_CPSR
,
1131 TCG_REG_TMP
, al
, bl
, const_bl
);
1132 tcg_out_dat_rI(s
, COND_AL
, ARITH_RSC
| TO_CPSR
,
1133 TCG_REG_TMP
, ah
, bh
, const_bh
);
1134 return tcg_swap_cond(cond
);
1137 g_assert_not_reached();
1141 #ifdef CONFIG_SOFTMMU
1142 #include "tcg-ldst.inc.c"
1144 /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
1145 * int mmu_idx, uintptr_t ra)
1147 static void * const qemu_ld_helpers
[16] = {
1148 [MO_UB
] = helper_ret_ldub_mmu
,
1149 [MO_SB
] = helper_ret_ldsb_mmu
,
1151 [MO_LEUW
] = helper_le_lduw_mmu
,
1152 [MO_LEUL
] = helper_le_ldul_mmu
,
1153 [MO_LEQ
] = helper_le_ldq_mmu
,
1154 [MO_LESW
] = helper_le_ldsw_mmu
,
1155 [MO_LESL
] = helper_le_ldul_mmu
,
1157 [MO_BEUW
] = helper_be_lduw_mmu
,
1158 [MO_BEUL
] = helper_be_ldul_mmu
,
1159 [MO_BEQ
] = helper_be_ldq_mmu
,
1160 [MO_BESW
] = helper_be_ldsw_mmu
,
1161 [MO_BESL
] = helper_be_ldul_mmu
,
1164 /* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
1165 * uintxx_t val, int mmu_idx, uintptr_t ra)
1167 static void * const qemu_st_helpers
[16] = {
1168 [MO_UB
] = helper_ret_stb_mmu
,
1169 [MO_LEUW
] = helper_le_stw_mmu
,
1170 [MO_LEUL
] = helper_le_stl_mmu
,
1171 [MO_LEQ
] = helper_le_stq_mmu
,
1172 [MO_BEUW
] = helper_be_stw_mmu
,
1173 [MO_BEUL
] = helper_be_stl_mmu
,
1174 [MO_BEQ
] = helper_be_stq_mmu
,
1177 /* Helper routines for marshalling helper function arguments into
1178 * the correct registers and stack.
1179 * argreg is where we want to put this argument, arg is the argument itself.
1180 * Return value is the updated argreg ready for the next call.
1181 * Note that argreg 0..3 is real registers, 4+ on stack.
1183 * We provide routines for arguments which are: immediate, 32 bit
1184 * value in register, 16 and 8 bit values in register (which must be zero
1185 * extended before use) and 64 bit value in a lo:hi register pair.
1187 #define DEFINE_TCG_OUT_ARG(NAME, ARGTYPE, MOV_ARG, EXT_ARG) \
1188 static TCGReg NAME(TCGContext *s, TCGReg argreg, ARGTYPE arg) \
1191 MOV_ARG(s, COND_AL, argreg, arg); \
1193 int ofs = (argreg - 4) * 4; \
1195 tcg_debug_assert(ofs + 4 <= TCG_STATIC_CALL_ARGS_SIZE); \
1196 tcg_out_st32_12(s, COND_AL, arg, TCG_REG_CALL_STACK, ofs); \
1198 return argreg + 1; \
1201 DEFINE_TCG_OUT_ARG(tcg_out_arg_imm32
, uint32_t, tcg_out_movi32
,
1202 (tcg_out_movi32(s
, COND_AL
, TCG_REG_TMP
, arg
), arg
= TCG_REG_TMP
))
1203 DEFINE_TCG_OUT_ARG(tcg_out_arg_reg8
, TCGReg
, tcg_out_ext8u
,
1204 (tcg_out_ext8u(s
, COND_AL
, TCG_REG_TMP
, arg
), arg
= TCG_REG_TMP
))
1205 DEFINE_TCG_OUT_ARG(tcg_out_arg_reg16
, TCGReg
, tcg_out_ext16u
,
1206 (tcg_out_ext16u(s
, COND_AL
, TCG_REG_TMP
, arg
), arg
= TCG_REG_TMP
))
1207 DEFINE_TCG_OUT_ARG(tcg_out_arg_reg32
, TCGReg
, tcg_out_mov_reg
, )
1209 static TCGReg
tcg_out_arg_reg64(TCGContext
*s
, TCGReg argreg
,
1210 TCGReg arglo
, TCGReg arghi
)
1212 /* 64 bit arguments must go in even/odd register pairs
1213 * and in 8-aligned stack slots.
1218 if (use_armv6_instructions
&& argreg
>= 4
1219 && (arglo
& 1) == 0 && arghi
== arglo
+ 1) {
1220 tcg_out_strd_8(s
, COND_AL
, arglo
,
1221 TCG_REG_CALL_STACK
, (argreg
- 4) * 4);
1224 argreg
= tcg_out_arg_reg32(s
, argreg
, arglo
);
1225 argreg
= tcg_out_arg_reg32(s
, argreg
, arghi
);
1230 #define TLB_SHIFT (CPU_TLB_ENTRY_BITS + CPU_TLB_BITS)
1232 /* We're expecting to use an 8-bit immediate and to mask. */
1233 QEMU_BUILD_BUG_ON(CPU_TLB_BITS
> 8);
1235 /* Load and compare a TLB entry, leaving the flags set. Returns the register
1236 containing the addend of the tlb entry. Clobbers R0, R1, R2, TMP. */
1238 static TCGReg
tcg_out_tlb_read(TCGContext
*s
, TCGReg addrlo
, TCGReg addrhi
,
1239 TCGMemOp opc
, int mem_index
, bool is_load
)
1241 TCGReg base
= TCG_AREG0
;
1244 ? offsetof(CPUArchState
, tlb_table
[mem_index
][0].addr_read
)
1245 : offsetof(CPUArchState
, tlb_table
[mem_index
][0].addr_write
));
1246 int add_off
= offsetof(CPUArchState
, tlb_table
[mem_index
][0].addend
);
1248 unsigned s_bits
= opc
& MO_SIZE
;
1249 unsigned a_bits
= get_alignment_bits(opc
);
1251 /* V7 generates the following:
1252 * ubfx r0, addrlo, #TARGET_PAGE_BITS, #CPU_TLB_BITS
1253 * add r2, env, #high
1254 * add r2, r2, r0, lsl #CPU_TLB_ENTRY_BITS
1255 * ldr r0, [r2, #cmp]
1256 * ldr r2, [r2, #add]
1257 * movw tmp, #page_align_mask
1258 * bic tmp, addrlo, tmp
1261 * Otherwise we generate:
1262 * shr tmp, addrlo, #TARGET_PAGE_BITS
1263 * add r2, env, #high
1264 * and r0, tmp, #(CPU_TLB_SIZE - 1)
1265 * add r2, r2, r0, lsl #CPU_TLB_ENTRY_BITS
1266 * ldr r0, [r2, #cmp]
1267 * ldr r2, [r2, #add]
1268 * tst addrlo, #s_mask
1269 * cmpeq r0, tmp, lsl #TARGET_PAGE_BITS
1271 if (use_armv7_instructions
) {
1272 tcg_out_extract(s
, COND_AL
, TCG_REG_R0
, addrlo
,
1273 TARGET_PAGE_BITS
, CPU_TLB_BITS
);
1275 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
, TCG_REG_TMP
,
1276 0, addrlo
, SHIFT_IMM_LSR(TARGET_PAGE_BITS
));
1279 /* Add portions of the offset until the memory access is in range.
1280 * If we plan on using ldrd, reduce to an 8-bit offset; otherwise
1281 * we can use a 12-bit offset. */
1282 if (use_armv6_instructions
&& TARGET_LONG_BITS
== 64) {
1287 while (cmp_off
> mask_off
) {
1288 int shift
= ctz32(cmp_off
& ~mask_off
) & ~1;
1289 int rot
= ((32 - shift
) << 7) & 0xf00;
1290 int addend
= cmp_off
& (0xff << shift
);
1291 tcg_out_dat_imm(s
, COND_AL
, ARITH_ADD
, TCG_REG_R2
, base
,
1292 rot
| ((cmp_off
>> shift
) & 0xff));
1298 if (!use_armv7_instructions
) {
1299 tcg_out_dat_imm(s
, COND_AL
, ARITH_AND
,
1300 TCG_REG_R0
, TCG_REG_TMP
, CPU_TLB_SIZE
- 1);
1302 tcg_out_dat_reg(s
, COND_AL
, ARITH_ADD
, TCG_REG_R2
, base
,
1303 TCG_REG_R0
, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS
));
1305 /* Load the tlb comparator. Use ldrd if needed and available,
1306 but due to how the pointer needs setting up, ldm isn't useful.
1307 Base arm5 doesn't have ldrd, but armv5te does. */
1308 if (use_armv6_instructions
&& TARGET_LONG_BITS
== 64) {
1309 tcg_out_ldrd_8(s
, COND_AL
, TCG_REG_R0
, TCG_REG_R2
, cmp_off
);
1311 tcg_out_ld32_12(s
, COND_AL
, TCG_REG_R0
, TCG_REG_R2
, cmp_off
);
1312 if (TARGET_LONG_BITS
== 64) {
1313 tcg_out_ld32_12(s
, COND_AL
, TCG_REG_R1
, TCG_REG_R2
, cmp_off
+ 4);
1317 /* Load the tlb addend. */
1318 tcg_out_ld32_12(s
, COND_AL
, TCG_REG_R2
, TCG_REG_R2
, add_off
);
1320 /* Check alignment. We don't support inline unaligned acceses,
1321 but we can easily support overalignment checks. */
1322 if (a_bits
< s_bits
) {
1326 if (use_armv7_instructions
) {
1327 tcg_target_ulong mask
= ~(TARGET_PAGE_MASK
| ((1 << a_bits
) - 1));
1328 int rot
= encode_imm(mask
);
1331 tcg_out_dat_imm(s
, COND_AL
, ARITH_BIC
, TCG_REG_TMP
, addrlo
,
1332 rotl(mask
, rot
) | (rot
<< 7));
1334 tcg_out_movi32(s
, COND_AL
, TCG_REG_TMP
, mask
);
1335 tcg_out_dat_reg(s
, COND_AL
, ARITH_BIC
, TCG_REG_TMP
,
1336 addrlo
, TCG_REG_TMP
, 0);
1338 tcg_out_dat_reg(s
, COND_AL
, ARITH_CMP
, 0, TCG_REG_R0
, TCG_REG_TMP
, 0);
1341 tcg_out_dat_imm(s
, COND_AL
, ARITH_TST
, 0, addrlo
,
1344 tcg_out_dat_reg(s
, (a_bits
? COND_EQ
: COND_AL
), ARITH_CMP
,
1345 0, TCG_REG_R0
, TCG_REG_TMP
,
1346 SHIFT_IMM_LSL(TARGET_PAGE_BITS
));
1349 if (TARGET_LONG_BITS
== 64) {
1350 tcg_out_dat_reg(s
, COND_EQ
, ARITH_CMP
, 0, TCG_REG_R1
, addrhi
, 0);
1356 /* Record the context of a call to the out of line helper code for the slow
1357 path for a load or store, so that we can later generate the correct
1359 static void add_qemu_ldst_label(TCGContext
*s
, bool is_ld
, TCGMemOpIdx oi
,
1360 TCGReg datalo
, TCGReg datahi
, TCGReg addrlo
,
1361 TCGReg addrhi
, tcg_insn_unit
*raddr
,
1362 tcg_insn_unit
*label_ptr
)
1364 TCGLabelQemuLdst
*label
= new_ldst_label(s
);
1366 label
->is_ld
= is_ld
;
1368 label
->datalo_reg
= datalo
;
1369 label
->datahi_reg
= datahi
;
1370 label
->addrlo_reg
= addrlo
;
1371 label
->addrhi_reg
= addrhi
;
1372 label
->raddr
= raddr
;
1373 label
->label_ptr
[0] = label_ptr
;
1376 static void tcg_out_qemu_ld_slow_path(TCGContext
*s
, TCGLabelQemuLdst
*lb
)
1378 TCGReg argreg
, datalo
, datahi
;
1379 TCGMemOpIdx oi
= lb
->oi
;
1380 TCGMemOp opc
= get_memop(oi
);
1383 bool ok
= reloc_pc24(lb
->label_ptr
[0], s
->code_ptr
);
1384 tcg_debug_assert(ok
);
1386 argreg
= tcg_out_arg_reg32(s
, TCG_REG_R0
, TCG_AREG0
);
1387 if (TARGET_LONG_BITS
== 64) {
1388 argreg
= tcg_out_arg_reg64(s
, argreg
, lb
->addrlo_reg
, lb
->addrhi_reg
);
1390 argreg
= tcg_out_arg_reg32(s
, argreg
, lb
->addrlo_reg
);
1392 argreg
= tcg_out_arg_imm32(s
, argreg
, oi
);
1393 argreg
= tcg_out_arg_reg32(s
, argreg
, TCG_REG_R14
);
1395 /* For armv6 we can use the canonical unsigned helpers and minimize
1396 icache usage. For pre-armv6, use the signed helpers since we do
1397 not have a single insn sign-extend. */
1398 if (use_armv6_instructions
) {
1399 func
= qemu_ld_helpers
[opc
& (MO_BSWAP
| MO_SIZE
)];
1401 func
= qemu_ld_helpers
[opc
& (MO_BSWAP
| MO_SSIZE
)];
1402 if (opc
& MO_SIGN
) {
1406 tcg_out_call(s
, func
);
1408 datalo
= lb
->datalo_reg
;
1409 datahi
= lb
->datahi_reg
;
1410 switch (opc
& MO_SSIZE
) {
1412 tcg_out_ext8s(s
, COND_AL
, datalo
, TCG_REG_R0
);
1415 tcg_out_ext16s(s
, COND_AL
, datalo
, TCG_REG_R0
);
1418 tcg_out_mov_reg(s
, COND_AL
, datalo
, TCG_REG_R0
);
1421 if (datalo
!= TCG_REG_R1
) {
1422 tcg_out_mov_reg(s
, COND_AL
, datalo
, TCG_REG_R0
);
1423 tcg_out_mov_reg(s
, COND_AL
, datahi
, TCG_REG_R1
);
1424 } else if (datahi
!= TCG_REG_R0
) {
1425 tcg_out_mov_reg(s
, COND_AL
, datahi
, TCG_REG_R1
);
1426 tcg_out_mov_reg(s
, COND_AL
, datalo
, TCG_REG_R0
);
1428 tcg_out_mov_reg(s
, COND_AL
, TCG_REG_TMP
, TCG_REG_R0
);
1429 tcg_out_mov_reg(s
, COND_AL
, datahi
, TCG_REG_R1
);
1430 tcg_out_mov_reg(s
, COND_AL
, datalo
, TCG_REG_TMP
);
1435 tcg_out_goto(s
, COND_AL
, lb
->raddr
);
1438 static void tcg_out_qemu_st_slow_path(TCGContext
*s
, TCGLabelQemuLdst
*lb
)
1440 TCGReg argreg
, datalo
, datahi
;
1441 TCGMemOpIdx oi
= lb
->oi
;
1442 TCGMemOp opc
= get_memop(oi
);
1444 bool ok
= reloc_pc24(lb
->label_ptr
[0], s
->code_ptr
);
1445 tcg_debug_assert(ok
);
1447 argreg
= TCG_REG_R0
;
1448 argreg
= tcg_out_arg_reg32(s
, argreg
, TCG_AREG0
);
1449 if (TARGET_LONG_BITS
== 64) {
1450 argreg
= tcg_out_arg_reg64(s
, argreg
, lb
->addrlo_reg
, lb
->addrhi_reg
);
1452 argreg
= tcg_out_arg_reg32(s
, argreg
, lb
->addrlo_reg
);
1455 datalo
= lb
->datalo_reg
;
1456 datahi
= lb
->datahi_reg
;
1457 switch (opc
& MO_SIZE
) {
1459 argreg
= tcg_out_arg_reg8(s
, argreg
, datalo
);
1462 argreg
= tcg_out_arg_reg16(s
, argreg
, datalo
);
1466 argreg
= tcg_out_arg_reg32(s
, argreg
, datalo
);
1469 argreg
= tcg_out_arg_reg64(s
, argreg
, datalo
, datahi
);
1473 argreg
= tcg_out_arg_imm32(s
, argreg
, oi
);
1474 argreg
= tcg_out_arg_reg32(s
, argreg
, TCG_REG_R14
);
1476 /* Tail-call to the helper, which will return to the fast path. */
1477 tcg_out_goto(s
, COND_AL
, qemu_st_helpers
[opc
& (MO_BSWAP
| MO_SIZE
)]);
1479 #endif /* SOFTMMU */
1481 static inline void tcg_out_qemu_ld_index(TCGContext
*s
, TCGMemOp opc
,
1482 TCGReg datalo
, TCGReg datahi
,
1483 TCGReg addrlo
, TCGReg addend
)
1485 TCGMemOp bswap
= opc
& MO_BSWAP
;
1487 switch (opc
& MO_SSIZE
) {
1489 tcg_out_ld8_r(s
, COND_AL
, datalo
, addrlo
, addend
);
1492 tcg_out_ld8s_r(s
, COND_AL
, datalo
, addrlo
, addend
);
1495 tcg_out_ld16u_r(s
, COND_AL
, datalo
, addrlo
, addend
);
1497 tcg_out_bswap16(s
, COND_AL
, datalo
, datalo
);
1502 tcg_out_ld16u_r(s
, COND_AL
, datalo
, addrlo
, addend
);
1503 tcg_out_bswap16s(s
, COND_AL
, datalo
, datalo
);
1505 tcg_out_ld16s_r(s
, COND_AL
, datalo
, addrlo
, addend
);
1510 tcg_out_ld32_r(s
, COND_AL
, datalo
, addrlo
, addend
);
1512 tcg_out_bswap32(s
, COND_AL
, datalo
, datalo
);
1517 TCGReg dl
= (bswap
? datahi
: datalo
);
1518 TCGReg dh
= (bswap
? datalo
: datahi
);
1520 /* Avoid ldrd for user-only emulation, to handle unaligned. */
1521 if (USING_SOFTMMU
&& use_armv6_instructions
1522 && (dl
& 1) == 0 && dh
== dl
+ 1) {
1523 tcg_out_ldrd_r(s
, COND_AL
, dl
, addrlo
, addend
);
1524 } else if (dl
!= addend
) {
1525 tcg_out_ld32_rwb(s
, COND_AL
, dl
, addend
, addrlo
);
1526 tcg_out_ld32_12(s
, COND_AL
, dh
, addend
, 4);
1528 tcg_out_dat_reg(s
, COND_AL
, ARITH_ADD
, TCG_REG_TMP
,
1529 addend
, addrlo
, SHIFT_IMM_LSL(0));
1530 tcg_out_ld32_12(s
, COND_AL
, dl
, TCG_REG_TMP
, 0);
1531 tcg_out_ld32_12(s
, COND_AL
, dh
, TCG_REG_TMP
, 4);
1534 tcg_out_bswap32(s
, COND_AL
, dl
, dl
);
1535 tcg_out_bswap32(s
, COND_AL
, dh
, dh
);
1542 static inline void tcg_out_qemu_ld_direct(TCGContext
*s
, TCGMemOp opc
,
1543 TCGReg datalo
, TCGReg datahi
,
1546 TCGMemOp bswap
= opc
& MO_BSWAP
;
1548 switch (opc
& MO_SSIZE
) {
1550 tcg_out_ld8_12(s
, COND_AL
, datalo
, addrlo
, 0);
1553 tcg_out_ld8s_8(s
, COND_AL
, datalo
, addrlo
, 0);
1556 tcg_out_ld16u_8(s
, COND_AL
, datalo
, addrlo
, 0);
1558 tcg_out_bswap16(s
, COND_AL
, datalo
, datalo
);
1563 tcg_out_ld16u_8(s
, COND_AL
, datalo
, addrlo
, 0);
1564 tcg_out_bswap16s(s
, COND_AL
, datalo
, datalo
);
1566 tcg_out_ld16s_8(s
, COND_AL
, datalo
, addrlo
, 0);
1571 tcg_out_ld32_12(s
, COND_AL
, datalo
, addrlo
, 0);
1573 tcg_out_bswap32(s
, COND_AL
, datalo
, datalo
);
1578 TCGReg dl
= (bswap
? datahi
: datalo
);
1579 TCGReg dh
= (bswap
? datalo
: datahi
);
1581 /* Avoid ldrd for user-only emulation, to handle unaligned. */
1582 if (USING_SOFTMMU
&& use_armv6_instructions
1583 && (dl
& 1) == 0 && dh
== dl
+ 1) {
1584 tcg_out_ldrd_8(s
, COND_AL
, dl
, addrlo
, 0);
1585 } else if (dl
== addrlo
) {
1586 tcg_out_ld32_12(s
, COND_AL
, dh
, addrlo
, bswap
? 0 : 4);
1587 tcg_out_ld32_12(s
, COND_AL
, dl
, addrlo
, bswap
? 4 : 0);
1589 tcg_out_ld32_12(s
, COND_AL
, dl
, addrlo
, bswap
? 4 : 0);
1590 tcg_out_ld32_12(s
, COND_AL
, dh
, addrlo
, bswap
? 0 : 4);
1593 tcg_out_bswap32(s
, COND_AL
, dl
, dl
);
1594 tcg_out_bswap32(s
, COND_AL
, dh
, dh
);
1601 static void tcg_out_qemu_ld(TCGContext
*s
, const TCGArg
*args
, bool is64
)
1603 TCGReg addrlo
, datalo
, datahi
, addrhi
__attribute__((unused
));
1606 #ifdef CONFIG_SOFTMMU
1609 tcg_insn_unit
*label_ptr
;
1613 datahi
= (is64
? *args
++ : 0);
1615 addrhi
= (TARGET_LONG_BITS
== 64 ? *args
++ : 0);
1617 opc
= get_memop(oi
);
1619 #ifdef CONFIG_SOFTMMU
1620 mem_index
= get_mmuidx(oi
);
1621 addend
= tcg_out_tlb_read(s
, addrlo
, addrhi
, opc
, mem_index
, 1);
1623 /* This a conditional BL only to load a pointer within this opcode into LR
1624 for the slow path. We will not be using the value for a tail call. */
1625 label_ptr
= s
->code_ptr
;
1626 tcg_out_bl(s
, COND_NE
, 0);
1628 tcg_out_qemu_ld_index(s
, opc
, datalo
, datahi
, addrlo
, addend
);
1630 add_qemu_ldst_label(s
, true, oi
, datalo
, datahi
, addrlo
, addrhi
,
1631 s
->code_ptr
, label_ptr
);
1632 #else /* !CONFIG_SOFTMMU */
1634 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_TMP
, guest_base
);
1635 tcg_out_qemu_ld_index(s
, opc
, datalo
, datahi
, addrlo
, TCG_REG_TMP
);
1637 tcg_out_qemu_ld_direct(s
, opc
, datalo
, datahi
, addrlo
);
1642 static inline void tcg_out_qemu_st_index(TCGContext
*s
, int cond
, TCGMemOp opc
,
1643 TCGReg datalo
, TCGReg datahi
,
1644 TCGReg addrlo
, TCGReg addend
)
1646 TCGMemOp bswap
= opc
& MO_BSWAP
;
1648 switch (opc
& MO_SIZE
) {
1650 tcg_out_st8_r(s
, cond
, datalo
, addrlo
, addend
);
1654 tcg_out_bswap16st(s
, cond
, TCG_REG_R0
, datalo
);
1655 tcg_out_st16_r(s
, cond
, TCG_REG_R0
, addrlo
, addend
);
1657 tcg_out_st16_r(s
, cond
, datalo
, addrlo
, addend
);
1663 tcg_out_bswap32(s
, cond
, TCG_REG_R0
, datalo
);
1664 tcg_out_st32_r(s
, cond
, TCG_REG_R0
, addrlo
, addend
);
1666 tcg_out_st32_r(s
, cond
, datalo
, addrlo
, addend
);
1670 /* Avoid strd for user-only emulation, to handle unaligned. */
1672 tcg_out_bswap32(s
, cond
, TCG_REG_R0
, datahi
);
1673 tcg_out_st32_rwb(s
, cond
, TCG_REG_R0
, addend
, addrlo
);
1674 tcg_out_bswap32(s
, cond
, TCG_REG_R0
, datalo
);
1675 tcg_out_st32_12(s
, cond
, TCG_REG_R0
, addend
, 4);
1676 } else if (USING_SOFTMMU
&& use_armv6_instructions
1677 && (datalo
& 1) == 0 && datahi
== datalo
+ 1) {
1678 tcg_out_strd_r(s
, cond
, datalo
, addrlo
, addend
);
1680 tcg_out_st32_rwb(s
, cond
, datalo
, addend
, addrlo
);
1681 tcg_out_st32_12(s
, cond
, datahi
, addend
, 4);
1687 static inline void tcg_out_qemu_st_direct(TCGContext
*s
, TCGMemOp opc
,
1688 TCGReg datalo
, TCGReg datahi
,
1691 TCGMemOp bswap
= opc
& MO_BSWAP
;
1693 switch (opc
& MO_SIZE
) {
1695 tcg_out_st8_12(s
, COND_AL
, datalo
, addrlo
, 0);
1699 tcg_out_bswap16st(s
, COND_AL
, TCG_REG_R0
, datalo
);
1700 tcg_out_st16_8(s
, COND_AL
, TCG_REG_R0
, addrlo
, 0);
1702 tcg_out_st16_8(s
, COND_AL
, datalo
, addrlo
, 0);
1708 tcg_out_bswap32(s
, COND_AL
, TCG_REG_R0
, datalo
);
1709 tcg_out_st32_12(s
, COND_AL
, TCG_REG_R0
, addrlo
, 0);
1711 tcg_out_st32_12(s
, COND_AL
, datalo
, addrlo
, 0);
1715 /* Avoid strd for user-only emulation, to handle unaligned. */
1717 tcg_out_bswap32(s
, COND_AL
, TCG_REG_R0
, datahi
);
1718 tcg_out_st32_12(s
, COND_AL
, TCG_REG_R0
, addrlo
, 0);
1719 tcg_out_bswap32(s
, COND_AL
, TCG_REG_R0
, datalo
);
1720 tcg_out_st32_12(s
, COND_AL
, TCG_REG_R0
, addrlo
, 4);
1721 } else if (USING_SOFTMMU
&& use_armv6_instructions
1722 && (datalo
& 1) == 0 && datahi
== datalo
+ 1) {
1723 tcg_out_strd_8(s
, COND_AL
, datalo
, addrlo
, 0);
1725 tcg_out_st32_12(s
, COND_AL
, datalo
, addrlo
, 0);
1726 tcg_out_st32_12(s
, COND_AL
, datahi
, addrlo
, 4);
1732 static void tcg_out_qemu_st(TCGContext
*s
, const TCGArg
*args
, bool is64
)
1734 TCGReg addrlo
, datalo
, datahi
, addrhi
__attribute__((unused
));
1737 #ifdef CONFIG_SOFTMMU
1740 tcg_insn_unit
*label_ptr
;
1744 datahi
= (is64
? *args
++ : 0);
1746 addrhi
= (TARGET_LONG_BITS
== 64 ? *args
++ : 0);
1748 opc
= get_memop(oi
);
1750 #ifdef CONFIG_SOFTMMU
1751 mem_index
= get_mmuidx(oi
);
1752 addend
= tcg_out_tlb_read(s
, addrlo
, addrhi
, opc
, mem_index
, 0);
1754 tcg_out_qemu_st_index(s
, COND_EQ
, opc
, datalo
, datahi
, addrlo
, addend
);
1756 /* The conditional call must come last, as we're going to return here. */
1757 label_ptr
= s
->code_ptr
;
1758 tcg_out_bl(s
, COND_NE
, 0);
1760 add_qemu_ldst_label(s
, false, oi
, datalo
, datahi
, addrlo
, addrhi
,
1761 s
->code_ptr
, label_ptr
);
1762 #else /* !CONFIG_SOFTMMU */
1764 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_TMP
, guest_base
);
1765 tcg_out_qemu_st_index(s
, COND_AL
, opc
, datalo
,
1766 datahi
, addrlo
, TCG_REG_TMP
);
1768 tcg_out_qemu_st_direct(s
, opc
, datalo
, datahi
, addrlo
);
1773 static tcg_insn_unit
*tb_ret_addr
;
1775 static inline void tcg_out_op(TCGContext
*s
, TCGOpcode opc
,
1776 const TCGArg
*args
, const int *const_args
)
1778 TCGArg a0
, a1
, a2
, a3
, a4
, a5
;
1782 case INDEX_op_exit_tb
:
1783 /* Reuse the zeroing that exists for goto_ptr. */
1786 tcg_out_goto(s
, COND_AL
, s
->code_gen_epilogue
);
1788 tcg_out_movi32(s
, COND_AL
, TCG_REG_R0
, args
[0]);
1789 tcg_out_goto(s
, COND_AL
, tb_ret_addr
);
1792 case INDEX_op_goto_tb
:
1794 /* Indirect jump method */
1795 intptr_t ptr
, dif
, dil
;
1796 TCGReg base
= TCG_REG_PC
;
1798 tcg_debug_assert(s
->tb_jmp_insn_offset
== 0);
1799 ptr
= (intptr_t)(s
->tb_jmp_target_addr
+ args
[0]);
1800 dif
= ptr
- ((intptr_t)s
->code_ptr
+ 8);
1801 dil
= sextract32(dif
, 0, 12);
1803 /* The TB is close, but outside the 12 bits addressable by
1804 the load. We can extend this to 20 bits with a sub of a
1805 shifted immediate from pc. In the vastly unlikely event
1806 the code requires more than 1MB, we'll use 2 insns and
1809 tcg_out_movi32(s
, COND_AL
, base
, ptr
- dil
);
1811 tcg_out_ld32_12(s
, COND_AL
, TCG_REG_PC
, base
, dil
);
1812 set_jmp_reset_offset(s
, args
[0]);
1815 case INDEX_op_goto_ptr
:
1816 tcg_out_bx(s
, COND_AL
, args
[0]);
1819 tcg_out_goto_label(s
, COND_AL
, arg_label(args
[0]));
1822 case INDEX_op_ld8u_i32
:
1823 tcg_out_ld8u(s
, COND_AL
, args
[0], args
[1], args
[2]);
1825 case INDEX_op_ld8s_i32
:
1826 tcg_out_ld8s(s
, COND_AL
, args
[0], args
[1], args
[2]);
1828 case INDEX_op_ld16u_i32
:
1829 tcg_out_ld16u(s
, COND_AL
, args
[0], args
[1], args
[2]);
1831 case INDEX_op_ld16s_i32
:
1832 tcg_out_ld16s(s
, COND_AL
, args
[0], args
[1], args
[2]);
1834 case INDEX_op_ld_i32
:
1835 tcg_out_ld32u(s
, COND_AL
, args
[0], args
[1], args
[2]);
1837 case INDEX_op_st8_i32
:
1838 tcg_out_st8(s
, COND_AL
, args
[0], args
[1], args
[2]);
1840 case INDEX_op_st16_i32
:
1841 tcg_out_st16(s
, COND_AL
, args
[0], args
[1], args
[2]);
1843 case INDEX_op_st_i32
:
1844 tcg_out_st32(s
, COND_AL
, args
[0], args
[1], args
[2]);
1847 case INDEX_op_movcond_i32
:
1848 /* Constraints mean that v2 is always in the same register as dest,
1849 * so we only need to do "if condition passed, move v1 to dest".
1851 tcg_out_dat_rIN(s
, COND_AL
, ARITH_CMP
, ARITH_CMN
, 0,
1852 args
[1], args
[2], const_args
[2]);
1853 tcg_out_dat_rIK(s
, tcg_cond_to_arm_cond
[args
[5]], ARITH_MOV
,
1854 ARITH_MVN
, args
[0], 0, args
[3], const_args
[3]);
1856 case INDEX_op_add_i32
:
1857 tcg_out_dat_rIN(s
, COND_AL
, ARITH_ADD
, ARITH_SUB
,
1858 args
[0], args
[1], args
[2], const_args
[2]);
1860 case INDEX_op_sub_i32
:
1861 if (const_args
[1]) {
1862 if (const_args
[2]) {
1863 tcg_out_movi32(s
, COND_AL
, args
[0], args
[1] - args
[2]);
1865 tcg_out_dat_rI(s
, COND_AL
, ARITH_RSB
,
1866 args
[0], args
[2], args
[1], 1);
1869 tcg_out_dat_rIN(s
, COND_AL
, ARITH_SUB
, ARITH_ADD
,
1870 args
[0], args
[1], args
[2], const_args
[2]);
1873 case INDEX_op_and_i32
:
1874 tcg_out_dat_rIK(s
, COND_AL
, ARITH_AND
, ARITH_BIC
,
1875 args
[0], args
[1], args
[2], const_args
[2]);
1877 case INDEX_op_andc_i32
:
1878 tcg_out_dat_rIK(s
, COND_AL
, ARITH_BIC
, ARITH_AND
,
1879 args
[0], args
[1], args
[2], const_args
[2]);
1881 case INDEX_op_or_i32
:
1884 case INDEX_op_xor_i32
:
1888 tcg_out_dat_rI(s
, COND_AL
, c
, args
[0], args
[1], args
[2], const_args
[2]);
1890 case INDEX_op_add2_i32
:
1891 a0
= args
[0], a1
= args
[1], a2
= args
[2];
1892 a3
= args
[3], a4
= args
[4], a5
= args
[5];
1893 if (a0
== a3
|| (a0
== a5
&& !const_args
[5])) {
1896 tcg_out_dat_rIN(s
, COND_AL
, ARITH_ADD
| TO_CPSR
, ARITH_SUB
| TO_CPSR
,
1897 a0
, a2
, a4
, const_args
[4]);
1898 tcg_out_dat_rIK(s
, COND_AL
, ARITH_ADC
, ARITH_SBC
,
1899 a1
, a3
, a5
, const_args
[5]);
1900 tcg_out_mov_reg(s
, COND_AL
, args
[0], a0
);
1902 case INDEX_op_sub2_i32
:
1903 a0
= args
[0], a1
= args
[1], a2
= args
[2];
1904 a3
= args
[3], a4
= args
[4], a5
= args
[5];
1905 if ((a0
== a3
&& !const_args
[3]) || (a0
== a5
&& !const_args
[5])) {
1908 if (const_args
[2]) {
1909 if (const_args
[4]) {
1910 tcg_out_movi32(s
, COND_AL
, a0
, a4
);
1913 tcg_out_dat_rI(s
, COND_AL
, ARITH_RSB
| TO_CPSR
, a0
, a4
, a2
, 1);
1915 tcg_out_dat_rIN(s
, COND_AL
, ARITH_SUB
| TO_CPSR
,
1916 ARITH_ADD
| TO_CPSR
, a0
, a2
, a4
, const_args
[4]);
1918 if (const_args
[3]) {
1919 if (const_args
[5]) {
1920 tcg_out_movi32(s
, COND_AL
, a1
, a5
);
1923 tcg_out_dat_rI(s
, COND_AL
, ARITH_RSC
, a1
, a5
, a3
, 1);
1925 tcg_out_dat_rIK(s
, COND_AL
, ARITH_SBC
, ARITH_ADC
,
1926 a1
, a3
, a5
, const_args
[5]);
1928 tcg_out_mov_reg(s
, COND_AL
, args
[0], a0
);
1930 case INDEX_op_neg_i32
:
1931 tcg_out_dat_imm(s
, COND_AL
, ARITH_RSB
, args
[0], args
[1], 0);
1933 case INDEX_op_not_i32
:
1934 tcg_out_dat_reg(s
, COND_AL
,
1935 ARITH_MVN
, args
[0], 0, args
[1], SHIFT_IMM_LSL(0));
1937 case INDEX_op_mul_i32
:
1938 tcg_out_mul32(s
, COND_AL
, args
[0], args
[1], args
[2]);
1940 case INDEX_op_mulu2_i32
:
1941 tcg_out_umull32(s
, COND_AL
, args
[0], args
[1], args
[2], args
[3]);
1943 case INDEX_op_muls2_i32
:
1944 tcg_out_smull32(s
, COND_AL
, args
[0], args
[1], args
[2], args
[3]);
1946 /* XXX: Perhaps args[2] & 0x1f is wrong */
1947 case INDEX_op_shl_i32
:
1949 SHIFT_IMM_LSL(args
[2] & 0x1f) : SHIFT_REG_LSL(args
[2]);
1951 case INDEX_op_shr_i32
:
1952 c
= const_args
[2] ? (args
[2] & 0x1f) ? SHIFT_IMM_LSR(args
[2] & 0x1f) :
1953 SHIFT_IMM_LSL(0) : SHIFT_REG_LSR(args
[2]);
1955 case INDEX_op_sar_i32
:
1956 c
= const_args
[2] ? (args
[2] & 0x1f) ? SHIFT_IMM_ASR(args
[2] & 0x1f) :
1957 SHIFT_IMM_LSL(0) : SHIFT_REG_ASR(args
[2]);
1959 case INDEX_op_rotr_i32
:
1960 c
= const_args
[2] ? (args
[2] & 0x1f) ? SHIFT_IMM_ROR(args
[2] & 0x1f) :
1961 SHIFT_IMM_LSL(0) : SHIFT_REG_ROR(args
[2]);
1964 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
, args
[0], 0, args
[1], c
);
1967 case INDEX_op_rotl_i32
:
1968 if (const_args
[2]) {
1969 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
, args
[0], 0, args
[1],
1970 ((0x20 - args
[2]) & 0x1f) ?
1971 SHIFT_IMM_ROR((0x20 - args
[2]) & 0x1f) :
1974 tcg_out_dat_imm(s
, COND_AL
, ARITH_RSB
, TCG_REG_TMP
, args
[2], 0x20);
1975 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
, args
[0], 0, args
[1],
1976 SHIFT_REG_ROR(TCG_REG_TMP
));
1980 case INDEX_op_ctz_i32
:
1981 tcg_out_dat_reg(s
, COND_AL
, INSN_RBIT
, TCG_REG_TMP
, 0, args
[1], 0);
1985 case INDEX_op_clz_i32
:
1991 if (c
&& a2
== 32) {
1992 tcg_out_dat_reg(s
, COND_AL
, INSN_CLZ
, a0
, 0, a1
, 0);
1995 tcg_out_dat_imm(s
, COND_AL
, ARITH_CMP
, 0, a1
, 0);
1996 tcg_out_dat_reg(s
, COND_NE
, INSN_CLZ
, a0
, 0, a1
, 0);
1997 if (c
|| a0
!= a2
) {
1998 tcg_out_dat_rIK(s
, COND_EQ
, ARITH_MOV
, ARITH_MVN
, a0
, 0, a2
, c
);
2002 case INDEX_op_brcond_i32
:
2003 tcg_out_dat_rIN(s
, COND_AL
, ARITH_CMP
, ARITH_CMN
, 0,
2004 args
[0], args
[1], const_args
[1]);
2005 tcg_out_goto_label(s
, tcg_cond_to_arm_cond
[args
[2]],
2006 arg_label(args
[3]));
2008 case INDEX_op_setcond_i32
:
2009 tcg_out_dat_rIN(s
, COND_AL
, ARITH_CMP
, ARITH_CMN
, 0,
2010 args
[1], args
[2], const_args
[2]);
2011 tcg_out_dat_imm(s
, tcg_cond_to_arm_cond
[args
[3]],
2012 ARITH_MOV
, args
[0], 0, 1);
2013 tcg_out_dat_imm(s
, tcg_cond_to_arm_cond
[tcg_invert_cond(args
[3])],
2014 ARITH_MOV
, args
[0], 0, 0);
2017 case INDEX_op_brcond2_i32
:
2018 c
= tcg_out_cmp2(s
, args
, const_args
);
2019 tcg_out_goto_label(s
, tcg_cond_to_arm_cond
[c
], arg_label(args
[5]));
2021 case INDEX_op_setcond2_i32
:
2022 c
= tcg_out_cmp2(s
, args
+ 1, const_args
+ 1);
2023 tcg_out_dat_imm(s
, tcg_cond_to_arm_cond
[c
], ARITH_MOV
, args
[0], 0, 1);
2024 tcg_out_dat_imm(s
, tcg_cond_to_arm_cond
[tcg_invert_cond(c
)],
2025 ARITH_MOV
, args
[0], 0, 0);
2028 case INDEX_op_qemu_ld_i32
:
2029 tcg_out_qemu_ld(s
, args
, 0);
2031 case INDEX_op_qemu_ld_i64
:
2032 tcg_out_qemu_ld(s
, args
, 1);
2034 case INDEX_op_qemu_st_i32
:
2035 tcg_out_qemu_st(s
, args
, 0);
2037 case INDEX_op_qemu_st_i64
:
2038 tcg_out_qemu_st(s
, args
, 1);
2041 case INDEX_op_bswap16_i32
:
2042 tcg_out_bswap16(s
, COND_AL
, args
[0], args
[1]);
2044 case INDEX_op_bswap32_i32
:
2045 tcg_out_bswap32(s
, COND_AL
, args
[0], args
[1]);
2048 case INDEX_op_ext8s_i32
:
2049 tcg_out_ext8s(s
, COND_AL
, args
[0], args
[1]);
2051 case INDEX_op_ext16s_i32
:
2052 tcg_out_ext16s(s
, COND_AL
, args
[0], args
[1]);
2054 case INDEX_op_ext16u_i32
:
2055 tcg_out_ext16u(s
, COND_AL
, args
[0], args
[1]);
2058 case INDEX_op_deposit_i32
:
2059 tcg_out_deposit(s
, COND_AL
, args
[0], args
[2],
2060 args
[3], args
[4], const_args
[2]);
2062 case INDEX_op_extract_i32
:
2063 tcg_out_extract(s
, COND_AL
, args
[0], args
[1], args
[2], args
[3]);
2065 case INDEX_op_sextract_i32
:
2066 tcg_out_sextract(s
, COND_AL
, args
[0], args
[1], args
[2], args
[3]);
2069 case INDEX_op_div_i32
:
2070 tcg_out_sdiv(s
, COND_AL
, args
[0], args
[1], args
[2]);
2072 case INDEX_op_divu_i32
:
2073 tcg_out_udiv(s
, COND_AL
, args
[0], args
[1], args
[2]);
2077 tcg_out_mb(s
, args
[0]);
2080 case INDEX_op_mov_i32
: /* Always emitted via tcg_out_mov. */
2081 case INDEX_op_movi_i32
: /* Always emitted via tcg_out_movi. */
2082 case INDEX_op_call
: /* Always emitted via tcg_out_call. */
2088 static const TCGTargetOpDef
*tcg_target_op_def(TCGOpcode op
)
2090 static const TCGTargetOpDef r
= { .args_ct_str
= { "r" } };
2091 static const TCGTargetOpDef r_r
= { .args_ct_str
= { "r", "r" } };
2092 static const TCGTargetOpDef s_s
= { .args_ct_str
= { "s", "s" } };
2093 static const TCGTargetOpDef r_l
= { .args_ct_str
= { "r", "l" } };
2094 static const TCGTargetOpDef r_r_r
= { .args_ct_str
= { "r", "r", "r" } };
2095 static const TCGTargetOpDef r_r_l
= { .args_ct_str
= { "r", "r", "l" } };
2096 static const TCGTargetOpDef r_l_l
= { .args_ct_str
= { "r", "l", "l" } };
2097 static const TCGTargetOpDef s_s_s
= { .args_ct_str
= { "s", "s", "s" } };
2098 static const TCGTargetOpDef r_r_ri
= { .args_ct_str
= { "r", "r", "ri" } };
2099 static const TCGTargetOpDef r_r_rI
= { .args_ct_str
= { "r", "r", "rI" } };
2100 static const TCGTargetOpDef r_r_rIN
2101 = { .args_ct_str
= { "r", "r", "rIN" } };
2102 static const TCGTargetOpDef r_r_rIK
2103 = { .args_ct_str
= { "r", "r", "rIK" } };
2104 static const TCGTargetOpDef r_r_r_r
2105 = { .args_ct_str
= { "r", "r", "r", "r" } };
2106 static const TCGTargetOpDef r_r_l_l
2107 = { .args_ct_str
= { "r", "r", "l", "l" } };
2108 static const TCGTargetOpDef s_s_s_s
2109 = { .args_ct_str
= { "s", "s", "s", "s" } };
2110 static const TCGTargetOpDef br
2111 = { .args_ct_str
= { "r", "rIN" } };
2112 static const TCGTargetOpDef dep
2113 = { .args_ct_str
= { "r", "0", "rZ" } };
2114 static const TCGTargetOpDef movc
2115 = { .args_ct_str
= { "r", "r", "rIN", "rIK", "0" } };
2116 static const TCGTargetOpDef add2
2117 = { .args_ct_str
= { "r", "r", "r", "r", "rIN", "rIK" } };
2118 static const TCGTargetOpDef sub2
2119 = { .args_ct_str
= { "r", "r", "rI", "rI", "rIN", "rIK" } };
2120 static const TCGTargetOpDef br2
2121 = { .args_ct_str
= { "r", "r", "rI", "rI" } };
2122 static const TCGTargetOpDef setc2
2123 = { .args_ct_str
= { "r", "r", "r", "rI", "rI" } };
2126 case INDEX_op_goto_ptr
:
2129 case INDEX_op_ld8u_i32
:
2130 case INDEX_op_ld8s_i32
:
2131 case INDEX_op_ld16u_i32
:
2132 case INDEX_op_ld16s_i32
:
2133 case INDEX_op_ld_i32
:
2134 case INDEX_op_st8_i32
:
2135 case INDEX_op_st16_i32
:
2136 case INDEX_op_st_i32
:
2137 case INDEX_op_neg_i32
:
2138 case INDEX_op_not_i32
:
2139 case INDEX_op_bswap16_i32
:
2140 case INDEX_op_bswap32_i32
:
2141 case INDEX_op_ext8s_i32
:
2142 case INDEX_op_ext16s_i32
:
2143 case INDEX_op_ext16u_i32
:
2144 case INDEX_op_extract_i32
:
2145 case INDEX_op_sextract_i32
:
2148 case INDEX_op_add_i32
:
2149 case INDEX_op_sub_i32
:
2150 case INDEX_op_setcond_i32
:
2152 case INDEX_op_and_i32
:
2153 case INDEX_op_andc_i32
:
2154 case INDEX_op_clz_i32
:
2155 case INDEX_op_ctz_i32
:
2157 case INDEX_op_mul_i32
:
2158 case INDEX_op_div_i32
:
2159 case INDEX_op_divu_i32
:
2161 case INDEX_op_mulu2_i32
:
2162 case INDEX_op_muls2_i32
:
2164 case INDEX_op_or_i32
:
2165 case INDEX_op_xor_i32
:
2167 case INDEX_op_shl_i32
:
2168 case INDEX_op_shr_i32
:
2169 case INDEX_op_sar_i32
:
2170 case INDEX_op_rotl_i32
:
2171 case INDEX_op_rotr_i32
:
2174 case INDEX_op_brcond_i32
:
2176 case INDEX_op_deposit_i32
:
2178 case INDEX_op_movcond_i32
:
2180 case INDEX_op_add2_i32
:
2182 case INDEX_op_sub2_i32
:
2184 case INDEX_op_brcond2_i32
:
2186 case INDEX_op_setcond2_i32
:
2189 case INDEX_op_qemu_ld_i32
:
2190 return TARGET_LONG_BITS
== 32 ? &r_l
: &r_l_l
;
2191 case INDEX_op_qemu_ld_i64
:
2192 return TARGET_LONG_BITS
== 32 ? &r_r_l
: &r_r_l_l
;
2193 case INDEX_op_qemu_st_i32
:
2194 return TARGET_LONG_BITS
== 32 ? &s_s
: &s_s_s
;
2195 case INDEX_op_qemu_st_i64
:
2196 return TARGET_LONG_BITS
== 32 ? &s_s_s
: &s_s_s_s
;
2203 static void tcg_target_init(TCGContext
*s
)
2205 /* Only probe for the platform and capabilities if we havn't already
2206 determined maximum values at compile time. */
2207 #ifndef use_idiv_instructions
2209 unsigned long hwcap
= qemu_getauxval(AT_HWCAP
);
2210 use_idiv_instructions
= (hwcap
& HWCAP_ARM_IDIVA
) != 0;
2213 if (__ARM_ARCH
< 7) {
2214 const char *pl
= (const char *)qemu_getauxval(AT_PLATFORM
);
2215 if (pl
!= NULL
&& pl
[0] == 'v' && pl
[1] >= '4' && pl
[1] <= '9') {
2216 arm_arch
= pl
[1] - '0';
2220 tcg_target_available_regs
[TCG_TYPE_I32
] = 0xffff;
2222 tcg_target_call_clobber_regs
= 0;
2223 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R0
);
2224 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R1
);
2225 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R2
);
2226 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R3
);
2227 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R12
);
2228 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R14
);
2230 s
->reserved_regs
= 0;
2231 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_CALL_STACK
);
2232 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_TMP
);
2233 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_PC
);
2236 static inline void tcg_out_ld(TCGContext
*s
, TCGType type
, TCGReg arg
,
2237 TCGReg arg1
, intptr_t arg2
)
2239 tcg_out_ld32u(s
, COND_AL
, arg
, arg1
, arg2
);
2242 static inline void tcg_out_st(TCGContext
*s
, TCGType type
, TCGReg arg
,
2243 TCGReg arg1
, intptr_t arg2
)
2245 tcg_out_st32(s
, COND_AL
, arg
, arg1
, arg2
);
2248 static inline bool tcg_out_sti(TCGContext
*s
, TCGType type
, TCGArg val
,
2249 TCGReg base
, intptr_t ofs
)
2254 static inline void tcg_out_mov(TCGContext
*s
, TCGType type
,
2255 TCGReg ret
, TCGReg arg
)
2257 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
, ret
, 0, arg
, SHIFT_IMM_LSL(0));
2260 static inline void tcg_out_movi(TCGContext
*s
, TCGType type
,
2261 TCGReg ret
, tcg_target_long arg
)
2263 tcg_out_movi32(s
, COND_AL
, ret
, arg
);
2266 static void tcg_out_nop_fill(tcg_insn_unit
*p
, int count
)
2269 for (i
= 0; i
< count
; ++i
) {
2274 /* Compute frame size via macros, to share between tcg_target_qemu_prologue
2275 and tcg_register_jit. */
2277 #define PUSH_SIZE ((11 - 4 + 1 + 1) * sizeof(tcg_target_long))
2279 #define FRAME_SIZE \
2281 + TCG_STATIC_CALL_ARGS_SIZE \
2282 + CPU_TEMP_BUF_NLONGS * sizeof(long) \
2283 + TCG_TARGET_STACK_ALIGN - 1) \
2284 & -TCG_TARGET_STACK_ALIGN)
2286 static void tcg_target_qemu_prologue(TCGContext
*s
)
2290 /* Calling convention requires us to save r4-r11 and lr. */
2291 /* stmdb sp!, { r4 - r11, lr } */
2292 tcg_out32(s
, (COND_AL
<< 28) | 0x092d4ff0);
2294 /* Reserve callee argument and tcg temp space. */
2295 stack_addend
= FRAME_SIZE
- PUSH_SIZE
;
2297 tcg_out_dat_rI(s
, COND_AL
, ARITH_SUB
, TCG_REG_CALL_STACK
,
2298 TCG_REG_CALL_STACK
, stack_addend
, 1);
2299 tcg_set_frame(s
, TCG_REG_CALL_STACK
, TCG_STATIC_CALL_ARGS_SIZE
,
2300 CPU_TEMP_BUF_NLONGS
* sizeof(long));
2302 tcg_out_mov(s
, TCG_TYPE_PTR
, TCG_AREG0
, tcg_target_call_iarg_regs
[0]);
2304 tcg_out_bx(s
, COND_AL
, tcg_target_call_iarg_regs
[1]);
2307 * Return path for goto_ptr. Set return value to 0, a-la exit_tb,
2308 * and fall through to the rest of the epilogue.
2310 s
->code_gen_epilogue
= s
->code_ptr
;
2311 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_R0
, 0);
2314 tb_ret_addr
= s
->code_ptr
;
2315 tcg_out_dat_rI(s
, COND_AL
, ARITH_ADD
, TCG_REG_CALL_STACK
,
2316 TCG_REG_CALL_STACK
, stack_addend
, 1);
2318 /* ldmia sp!, { r4 - r11, pc } */
2319 tcg_out32(s
, (COND_AL
<< 28) | 0x08bd8ff0);
2324 uint8_t fde_def_cfa
[4];
2325 uint8_t fde_reg_ofs
[18];
2328 #define ELF_HOST_MACHINE EM_ARM
2330 /* We're expecting a 2 byte uleb128 encoded value. */
2331 QEMU_BUILD_BUG_ON(FRAME_SIZE
>= (1 << 14));
2333 static const DebugFrame debug_frame
= {
2334 .h
.cie
.len
= sizeof(DebugFrameCIE
)-4, /* length after .len member */
2337 .h
.cie
.code_align
= 1,
2338 .h
.cie
.data_align
= 0x7c, /* sleb128 -4 */
2339 .h
.cie
.return_column
= 14,
2341 /* Total FDE size does not include the "len" member. */
2342 .h
.fde
.len
= sizeof(DebugFrame
) - offsetof(DebugFrame
, h
.fde
.cie_offset
),
2345 12, 13, /* DW_CFA_def_cfa sp, ... */
2346 (FRAME_SIZE
& 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
2350 /* The following must match the stmdb in the prologue. */
2351 0x8e, 1, /* DW_CFA_offset, lr, -4 */
2352 0x8b, 2, /* DW_CFA_offset, r11, -8 */
2353 0x8a, 3, /* DW_CFA_offset, r10, -12 */
2354 0x89, 4, /* DW_CFA_offset, r9, -16 */
2355 0x88, 5, /* DW_CFA_offset, r8, -20 */
2356 0x87, 6, /* DW_CFA_offset, r7, -24 */
2357 0x86, 7, /* DW_CFA_offset, r6, -28 */
2358 0x85, 8, /* DW_CFA_offset, r5, -32 */
2359 0x84, 9, /* DW_CFA_offset, r4, -36 */
2363 void tcg_register_jit(void *buf
, size_t buf_size
)
2365 tcg_register_jit_int(buf
, buf_size
, &debug_frame
, sizeof(debug_frame
));