2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Andrzej Zaborowski
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #if defined(__ARM_ARCH_7__) || \
26 defined(__ARM_ARCH_7A__) || \
27 defined(__ARM_ARCH_7EM__) || \
28 defined(__ARM_ARCH_7M__) || \
29 defined(__ARM_ARCH_7R__)
30 #define USE_ARMV7_INSTRUCTIONS
33 #if defined(USE_ARMV7_INSTRUCTIONS) || \
34 defined(__ARM_ARCH_6J__) || \
35 defined(__ARM_ARCH_6K__) || \
36 defined(__ARM_ARCH_6T2__) || \
37 defined(__ARM_ARCH_6Z__) || \
38 defined(__ARM_ARCH_6ZK__)
39 #define USE_ARMV6_INSTRUCTIONS
42 #if defined(USE_ARMV6_INSTRUCTIONS) || \
43 defined(__ARM_ARCH_5T__) || \
44 defined(__ARM_ARCH_5TE__) || \
45 defined(__ARM_ARCH_5TEJ__)
46 #define USE_ARMV5_INSTRUCTIONS
49 #ifdef USE_ARMV5_INSTRUCTIONS
50 static const int use_armv5_instructions
= 1;
52 static const int use_armv5_instructions
= 0;
54 #undef USE_ARMV5_INSTRUCTIONS
56 #ifdef USE_ARMV6_INSTRUCTIONS
57 static const int use_armv6_instructions
= 1;
59 static const int use_armv6_instructions
= 0;
61 #undef USE_ARMV6_INSTRUCTIONS
63 #ifdef USE_ARMV7_INSTRUCTIONS
64 static const int use_armv7_instructions
= 1;
66 static const int use_armv7_instructions
= 0;
68 #undef USE_ARMV7_INSTRUCTIONS
71 static const char * const tcg_target_reg_names
[TCG_TARGET_NB_REGS
] = {
91 static const int tcg_target_reg_alloc_order
[] = {
109 static const int tcg_target_call_iarg_regs
[4] = {
110 TCG_REG_R0
, TCG_REG_R1
, TCG_REG_R2
, TCG_REG_R3
112 static const int tcg_target_call_oarg_regs
[2] = {
113 TCG_REG_R0
, TCG_REG_R1
116 static inline void reloc_abs32(void *code_ptr
, tcg_target_long target
)
118 *(uint32_t *) code_ptr
= target
;
121 static inline void reloc_pc24(void *code_ptr
, tcg_target_long target
)
123 uint32_t offset
= ((target
- ((tcg_target_long
) code_ptr
+ 8)) >> 2);
125 *(uint32_t *) code_ptr
= ((*(uint32_t *) code_ptr
) & ~0xffffff)
126 | (offset
& 0xffffff);
129 static void patch_reloc(uint8_t *code_ptr
, int type
,
130 tcg_target_long value
, tcg_target_long addend
)
134 reloc_abs32(code_ptr
, value
);
143 reloc_pc24(code_ptr
, value
);
148 /* maximum number of register used for input function arguments */
149 static inline int tcg_target_get_call_iarg_regs_count(int flags
)
154 /* parse target specific constraints */
155 static int target_parse_constraint(TCGArgConstraint
*ct
, const char **pct_str
)
162 ct
->ct
|= TCG_CT_CONST_ARM
;
166 ct
->ct
|= TCG_CT_REG
;
167 tcg_regset_set32(ct
->u
.regs
, 0, (1 << TCG_TARGET_NB_REGS
) - 1);
170 /* qemu_ld address */
172 ct
->ct
|= TCG_CT_REG
;
173 tcg_regset_set32(ct
->u
.regs
, 0, (1 << TCG_TARGET_NB_REGS
) - 1);
174 #ifdef CONFIG_SOFTMMU
175 /* r0 and r1 will be overwritten when reading the tlb entry,
176 so don't use these. */
177 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R0
);
178 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R1
);
182 ct
->ct
|= TCG_CT_REG
;
183 tcg_regset_set32(ct
->u
.regs
, 0, (1 << TCG_TARGET_NB_REGS
) - 1);
184 #ifdef CONFIG_SOFTMMU
185 /* r1 is still needed to load data_reg or data_reg2,
187 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R1
);
191 /* qemu_st address & data_reg */
193 ct
->ct
|= TCG_CT_REG
;
194 tcg_regset_set32(ct
->u
.regs
, 0, (1 << TCG_TARGET_NB_REGS
) - 1);
195 /* r0 and r1 will be overwritten when reading the tlb entry
196 (softmmu only) and doing the byte swapping, so don't
198 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R0
);
199 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R1
);
201 /* qemu_st64 data_reg2 */
203 ct
->ct
|= TCG_CT_REG
;
204 tcg_regset_set32(ct
->u
.regs
, 0, (1 << TCG_TARGET_NB_REGS
) - 1);
205 /* r0 and r1 will be overwritten when reading the tlb entry
206 (softmmu only) and doing the byte swapping, so don't
208 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R0
);
209 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R1
);
210 #ifdef CONFIG_SOFTMMU
211 /* r2 is still needed to load data_reg, so don't use it. */
212 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R2
);
225 static inline uint32_t rotl(uint32_t val
, int n
)
227 return (val
<< n
) | (val
>> (32 - n
));
230 /* ARM immediates for ALU instructions are made of an unsigned 8-bit
231 right-rotated by an even amount between 0 and 30. */
232 static inline int encode_imm(uint32_t imm
)
236 /* simple case, only lower bits */
237 if ((imm
& ~0xff) == 0)
239 /* then try a simple even shift */
240 shift
= ctz32(imm
) & ~1;
241 if (((imm
>> shift
) & ~0xff) == 0)
243 /* now try harder with rotations */
244 if ((rotl(imm
, 2) & ~0xff) == 0)
246 if ((rotl(imm
, 4) & ~0xff) == 0)
248 if ((rotl(imm
, 6) & ~0xff) == 0)
250 /* imm can't be encoded */
254 static inline int check_fit_imm(uint32_t imm
)
256 return encode_imm(imm
) >= 0;
259 /* Test if a constant matches the constraint.
260 * TODO: define constraints for:
262 * ldr/str offset: between -0xfff and 0xfff
263 * ldrh/strh offset: between -0xff and 0xff
264 * mov operand2: values represented with x << (2 * y), x < 0x100
265 * add, sub, eor...: ditto
267 static inline int tcg_target_const_match(tcg_target_long val
,
268 const TCGArgConstraint
*arg_ct
)
272 if (ct
& TCG_CT_CONST
)
274 else if ((ct
& TCG_CT_CONST_ARM
) && check_fit_imm(val
))
280 enum arm_data_opc_e
{
298 #define TO_CPSR(opc) \
299 ((opc == ARITH_CMP || opc == ARITH_CMN || opc == ARITH_TST) << 20)
301 #define SHIFT_IMM_LSL(im) (((im) << 7) | 0x00)
302 #define SHIFT_IMM_LSR(im) (((im) << 7) | 0x20)
303 #define SHIFT_IMM_ASR(im) (((im) << 7) | 0x40)
304 #define SHIFT_IMM_ROR(im) (((im) << 7) | 0x60)
305 #define SHIFT_REG_LSL(rs) (((rs) << 8) | 0x10)
306 #define SHIFT_REG_LSR(rs) (((rs) << 8) | 0x30)
307 #define SHIFT_REG_ASR(rs) (((rs) << 8) | 0x50)
308 #define SHIFT_REG_ROR(rs) (((rs) << 8) | 0x70)
310 enum arm_cond_code_e
{
313 COND_CS
= 0x2, /* Unsigned greater or equal */
314 COND_CC
= 0x3, /* Unsigned less than */
315 COND_MI
= 0x4, /* Negative */
316 COND_PL
= 0x5, /* Zero or greater */
317 COND_VS
= 0x6, /* Overflow */
318 COND_VC
= 0x7, /* No overflow */
319 COND_HI
= 0x8, /* Unsigned greater than */
320 COND_LS
= 0x9, /* Unsigned less or equal */
328 static const uint8_t tcg_cond_to_arm_cond
[10] = {
329 [TCG_COND_EQ
] = COND_EQ
,
330 [TCG_COND_NE
] = COND_NE
,
331 [TCG_COND_LT
] = COND_LT
,
332 [TCG_COND_GE
] = COND_GE
,
333 [TCG_COND_LE
] = COND_LE
,
334 [TCG_COND_GT
] = COND_GT
,
336 [TCG_COND_LTU
] = COND_CC
,
337 [TCG_COND_GEU
] = COND_CS
,
338 [TCG_COND_LEU
] = COND_LS
,
339 [TCG_COND_GTU
] = COND_HI
,
342 static inline void tcg_out_bx(TCGContext
*s
, int cond
, int rn
)
344 tcg_out32(s
, (cond
<< 28) | 0x012fff10 | rn
);
347 static inline void tcg_out_b(TCGContext
*s
, int cond
, int32_t offset
)
349 tcg_out32(s
, (cond
<< 28) | 0x0a000000 |
350 (((offset
- 8) >> 2) & 0x00ffffff));
353 static inline void tcg_out_b_noaddr(TCGContext
*s
, int cond
)
355 #ifdef HOST_WORDS_BIGENDIAN
356 tcg_out8(s
, (cond
<< 4) | 0x0a);
360 tcg_out8(s
, (cond
<< 4) | 0x0a);
364 static inline void tcg_out_bl(TCGContext
*s
, int cond
, int32_t offset
)
366 tcg_out32(s
, (cond
<< 28) | 0x0b000000 |
367 (((offset
- 8) >> 2) & 0x00ffffff));
370 static inline void tcg_out_blx(TCGContext
*s
, int cond
, int rn
)
372 tcg_out32(s
, (cond
<< 28) | 0x012fff30 | rn
);
375 static inline void tcg_out_dat_reg(TCGContext
*s
,
376 int cond
, int opc
, int rd
, int rn
, int rm
, int shift
)
378 tcg_out32(s
, (cond
<< 28) | (0 << 25) | (opc
<< 21) | TO_CPSR(opc
) |
379 (rn
<< 16) | (rd
<< 12) | shift
| rm
);
382 static inline void tcg_out_dat_reg2(TCGContext
*s
,
383 int cond
, int opc0
, int opc1
, int rd0
, int rd1
,
384 int rn0
, int rn1
, int rm0
, int rm1
, int shift
)
386 if (rd0
== rn1
|| rd0
== rm1
) {
387 tcg_out32(s
, (cond
<< 28) | (0 << 25) | (opc0
<< 21) | (1 << 20) |
388 (rn0
<< 16) | (8 << 12) | shift
| rm0
);
389 tcg_out32(s
, (cond
<< 28) | (0 << 25) | (opc1
<< 21) |
390 (rn1
<< 16) | (rd1
<< 12) | shift
| rm1
);
391 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
392 rd0
, 0, TCG_REG_R8
, SHIFT_IMM_LSL(0));
394 tcg_out32(s
, (cond
<< 28) | (0 << 25) | (opc0
<< 21) | (1 << 20) |
395 (rn0
<< 16) | (rd0
<< 12) | shift
| rm0
);
396 tcg_out32(s
, (cond
<< 28) | (0 << 25) | (opc1
<< 21) |
397 (rn1
<< 16) | (rd1
<< 12) | shift
| rm1
);
401 static inline void tcg_out_dat_imm(TCGContext
*s
,
402 int cond
, int opc
, int rd
, int rn
, int im
)
404 tcg_out32(s
, (cond
<< 28) | (1 << 25) | (opc
<< 21) | TO_CPSR(opc
) |
405 (rn
<< 16) | (rd
<< 12) | im
);
408 static inline void tcg_out_movi32(TCGContext
*s
,
409 int cond
, int rd
, uint32_t arg
)
411 /* TODO: This is very suboptimal, we can easily have a constant
412 * pool somewhere after all the instructions. */
413 if ((int)arg
< 0 && (int)arg
>= -0x100) {
414 tcg_out_dat_imm(s
, cond
, ARITH_MVN
, rd
, 0, (~arg
) & 0xff);
415 } else if (use_armv7_instructions
) {
418 tcg_out32(s
, (cond
<< 28) | 0x03000000 | (rd
<< 12)
419 | ((arg
<< 4) & 0x000f0000) | (arg
& 0xfff));
420 if (arg
& 0xffff0000) {
422 tcg_out32(s
, (cond
<< 28) | 0x03400000 | (rd
<< 12)
423 | ((arg
>> 12) & 0x000f0000) | ((arg
>> 16) & 0xfff));
433 rot
= ((32 - i
) << 7) & 0xf00;
434 tcg_out_dat_imm(s
, cond
, opc
, rd
, rn
, ((arg
>> i
) & 0xff) | rot
);
443 static inline void tcg_out_mul32(TCGContext
*s
,
444 int cond
, int rd
, int rs
, int rm
)
447 tcg_out32(s
, (cond
<< 28) | (rd
<< 16) | (0 << 12) |
448 (rs
<< 8) | 0x90 | rm
);
450 tcg_out32(s
, (cond
<< 28) | (rd
<< 16) | (0 << 12) |
451 (rm
<< 8) | 0x90 | rs
);
453 tcg_out32(s
, (cond
<< 28) | ( 8 << 16) | (0 << 12) |
454 (rs
<< 8) | 0x90 | rm
);
455 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
456 rd
, 0, TCG_REG_R8
, SHIFT_IMM_LSL(0));
460 static inline void tcg_out_umull32(TCGContext
*s
,
461 int cond
, int rd0
, int rd1
, int rs
, int rm
)
463 if (rd0
!= rm
&& rd1
!= rm
)
464 tcg_out32(s
, (cond
<< 28) | 0x800090 |
465 (rd1
<< 16) | (rd0
<< 12) | (rs
<< 8) | rm
);
466 else if (rd0
!= rs
&& rd1
!= rs
)
467 tcg_out32(s
, (cond
<< 28) | 0x800090 |
468 (rd1
<< 16) | (rd0
<< 12) | (rm
<< 8) | rs
);
470 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
471 TCG_REG_R8
, 0, rm
, SHIFT_IMM_LSL(0));
472 tcg_out32(s
, (cond
<< 28) | 0x800098 |
473 (rd1
<< 16) | (rd0
<< 12) | (rs
<< 8));
477 static inline void tcg_out_smull32(TCGContext
*s
,
478 int cond
, int rd0
, int rd1
, int rs
, int rm
)
480 if (rd0
!= rm
&& rd1
!= rm
)
481 tcg_out32(s
, (cond
<< 28) | 0xc00090 |
482 (rd1
<< 16) | (rd0
<< 12) | (rs
<< 8) | rm
);
483 else if (rd0
!= rs
&& rd1
!= rs
)
484 tcg_out32(s
, (cond
<< 28) | 0xc00090 |
485 (rd1
<< 16) | (rd0
<< 12) | (rm
<< 8) | rs
);
487 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
488 TCG_REG_R8
, 0, rm
, SHIFT_IMM_LSL(0));
489 tcg_out32(s
, (cond
<< 28) | 0xc00098 |
490 (rd1
<< 16) | (rd0
<< 12) | (rs
<< 8));
494 static inline void tcg_out_ext8s(TCGContext
*s
, int cond
,
497 if (use_armv6_instructions
) {
499 tcg_out32(s
, 0x06af0070 | (cond
<< 28) | (rd
<< 12) | rn
);
501 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
502 rd
, 0, rn
, SHIFT_IMM_LSL(24));
503 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
504 rd
, 0, rd
, SHIFT_IMM_ASR(24));
508 static inline void tcg_out_ext8u(TCGContext
*s
, int cond
,
511 tcg_out_dat_imm(s
, cond
, ARITH_AND
, rd
, rn
, 0xff);
514 static inline void tcg_out_ext16s(TCGContext
*s
, int cond
,
517 if (use_armv6_instructions
) {
519 tcg_out32(s
, 0x06bf0070 | (cond
<< 28) | (rd
<< 12) | rn
);
521 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
522 rd
, 0, rn
, SHIFT_IMM_LSL(16));
523 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
524 rd
, 0, rd
, SHIFT_IMM_ASR(16));
528 static inline void tcg_out_ext16u(TCGContext
*s
, int cond
,
531 if (use_armv6_instructions
) {
533 tcg_out32(s
, 0x06ff0070 | (cond
<< 28) | (rd
<< 12) | rn
);
535 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
536 rd
, 0, rn
, SHIFT_IMM_LSL(16));
537 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
538 rd
, 0, rd
, SHIFT_IMM_LSR(16));
542 static inline void tcg_out_bswap16s(TCGContext
*s
, int cond
, int rd
, int rn
)
544 if (use_armv6_instructions
) {
546 tcg_out32(s
, 0x06ff0fb0 | (cond
<< 28) | (rd
<< 12) | rn
);
548 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
549 TCG_REG_R8
, 0, rn
, SHIFT_IMM_LSL(24));
550 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
551 TCG_REG_R8
, 0, TCG_REG_R8
, SHIFT_IMM_ASR(16));
552 tcg_out_dat_reg(s
, cond
, ARITH_ORR
,
553 rd
, TCG_REG_R8
, rn
, SHIFT_IMM_LSR(8));
557 static inline void tcg_out_bswap16(TCGContext
*s
, int cond
, int rd
, int rn
)
559 if (use_armv6_instructions
) {
561 tcg_out32(s
, 0x06bf0fb0 | (cond
<< 28) | (rd
<< 12) | rn
);
563 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
564 TCG_REG_R8
, 0, rn
, SHIFT_IMM_LSL(24));
565 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
566 TCG_REG_R8
, 0, TCG_REG_R8
, SHIFT_IMM_LSR(16));
567 tcg_out_dat_reg(s
, cond
, ARITH_ORR
,
568 rd
, TCG_REG_R8
, rn
, SHIFT_IMM_LSR(8));
572 static inline void tcg_out_bswap32(TCGContext
*s
, int cond
, int rd
, int rn
)
574 if (use_armv6_instructions
) {
576 tcg_out32(s
, 0x06bf0f30 | (cond
<< 28) | (rd
<< 12) | rn
);
578 tcg_out_dat_reg(s
, cond
, ARITH_EOR
,
579 TCG_REG_R8
, rn
, rn
, SHIFT_IMM_ROR(16));
580 tcg_out_dat_imm(s
, cond
, ARITH_BIC
,
581 TCG_REG_R8
, TCG_REG_R8
, 0xff | 0x800);
582 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
583 rd
, 0, rn
, SHIFT_IMM_ROR(8));
584 tcg_out_dat_reg(s
, cond
, ARITH_EOR
,
585 rd
, rd
, TCG_REG_R8
, SHIFT_IMM_LSR(8));
589 static inline void tcg_out_ld32_12(TCGContext
*s
, int cond
,
590 int rd
, int rn
, tcg_target_long im
)
593 tcg_out32(s
, (cond
<< 28) | 0x05900000 |
594 (rn
<< 16) | (rd
<< 12) | (im
& 0xfff));
596 tcg_out32(s
, (cond
<< 28) | 0x05100000 |
597 (rn
<< 16) | (rd
<< 12) | ((-im
) & 0xfff));
600 static inline void tcg_out_st32_12(TCGContext
*s
, int cond
,
601 int rd
, int rn
, tcg_target_long im
)
604 tcg_out32(s
, (cond
<< 28) | 0x05800000 |
605 (rn
<< 16) | (rd
<< 12) | (im
& 0xfff));
607 tcg_out32(s
, (cond
<< 28) | 0x05000000 |
608 (rn
<< 16) | (rd
<< 12) | ((-im
) & 0xfff));
611 static inline void tcg_out_ld32_r(TCGContext
*s
, int cond
,
612 int rd
, int rn
, int rm
)
614 tcg_out32(s
, (cond
<< 28) | 0x07900000 |
615 (rn
<< 16) | (rd
<< 12) | rm
);
618 static inline void tcg_out_st32_r(TCGContext
*s
, int cond
,
619 int rd
, int rn
, int rm
)
621 tcg_out32(s
, (cond
<< 28) | 0x07800000 |
622 (rn
<< 16) | (rd
<< 12) | rm
);
625 /* Register pre-increment with base writeback. */
626 static inline void tcg_out_ld32_rwb(TCGContext
*s
, int cond
,
627 int rd
, int rn
, int rm
)
629 tcg_out32(s
, (cond
<< 28) | 0x07b00000 |
630 (rn
<< 16) | (rd
<< 12) | rm
);
633 static inline void tcg_out_st32_rwb(TCGContext
*s
, int cond
,
634 int rd
, int rn
, int rm
)
636 tcg_out32(s
, (cond
<< 28) | 0x07a00000 |
637 (rn
<< 16) | (rd
<< 12) | rm
);
640 static inline void tcg_out_ld16u_8(TCGContext
*s
, int cond
,
641 int rd
, int rn
, tcg_target_long im
)
644 tcg_out32(s
, (cond
<< 28) | 0x01d000b0 |
645 (rn
<< 16) | (rd
<< 12) |
646 ((im
& 0xf0) << 4) | (im
& 0xf));
648 tcg_out32(s
, (cond
<< 28) | 0x015000b0 |
649 (rn
<< 16) | (rd
<< 12) |
650 (((-im
) & 0xf0) << 4) | ((-im
) & 0xf));
653 static inline void tcg_out_st16_8(TCGContext
*s
, int cond
,
654 int rd
, int rn
, tcg_target_long im
)
657 tcg_out32(s
, (cond
<< 28) | 0x01c000b0 |
658 (rn
<< 16) | (rd
<< 12) |
659 ((im
& 0xf0) << 4) | (im
& 0xf));
661 tcg_out32(s
, (cond
<< 28) | 0x014000b0 |
662 (rn
<< 16) | (rd
<< 12) |
663 (((-im
) & 0xf0) << 4) | ((-im
) & 0xf));
666 static inline void tcg_out_ld16u_r(TCGContext
*s
, int cond
,
667 int rd
, int rn
, int rm
)
669 tcg_out32(s
, (cond
<< 28) | 0x019000b0 |
670 (rn
<< 16) | (rd
<< 12) | rm
);
673 static inline void tcg_out_st16_r(TCGContext
*s
, int cond
,
674 int rd
, int rn
, int rm
)
676 tcg_out32(s
, (cond
<< 28) | 0x018000b0 |
677 (rn
<< 16) | (rd
<< 12) | rm
);
680 static inline void tcg_out_ld16s_8(TCGContext
*s
, int cond
,
681 int rd
, int rn
, tcg_target_long im
)
684 tcg_out32(s
, (cond
<< 28) | 0x01d000f0 |
685 (rn
<< 16) | (rd
<< 12) |
686 ((im
& 0xf0) << 4) | (im
& 0xf));
688 tcg_out32(s
, (cond
<< 28) | 0x015000f0 |
689 (rn
<< 16) | (rd
<< 12) |
690 (((-im
) & 0xf0) << 4) | ((-im
) & 0xf));
693 static inline void tcg_out_ld16s_r(TCGContext
*s
, int cond
,
694 int rd
, int rn
, int rm
)
696 tcg_out32(s
, (cond
<< 28) | 0x019000f0 |
697 (rn
<< 16) | (rd
<< 12) | rm
);
700 static inline void tcg_out_ld8_12(TCGContext
*s
, int cond
,
701 int rd
, int rn
, tcg_target_long im
)
704 tcg_out32(s
, (cond
<< 28) | 0x05d00000 |
705 (rn
<< 16) | (rd
<< 12) | (im
& 0xfff));
707 tcg_out32(s
, (cond
<< 28) | 0x05500000 |
708 (rn
<< 16) | (rd
<< 12) | ((-im
) & 0xfff));
711 static inline void tcg_out_st8_12(TCGContext
*s
, int cond
,
712 int rd
, int rn
, tcg_target_long im
)
715 tcg_out32(s
, (cond
<< 28) | 0x05c00000 |
716 (rn
<< 16) | (rd
<< 12) | (im
& 0xfff));
718 tcg_out32(s
, (cond
<< 28) | 0x05400000 |
719 (rn
<< 16) | (rd
<< 12) | ((-im
) & 0xfff));
722 static inline void tcg_out_ld8_r(TCGContext
*s
, int cond
,
723 int rd
, int rn
, int rm
)
725 tcg_out32(s
, (cond
<< 28) | 0x07d00000 |
726 (rn
<< 16) | (rd
<< 12) | rm
);
729 static inline void tcg_out_st8_r(TCGContext
*s
, int cond
,
730 int rd
, int rn
, int rm
)
732 tcg_out32(s
, (cond
<< 28) | 0x07c00000 |
733 (rn
<< 16) | (rd
<< 12) | rm
);
736 static inline void tcg_out_ld8s_8(TCGContext
*s
, int cond
,
737 int rd
, int rn
, tcg_target_long im
)
740 tcg_out32(s
, (cond
<< 28) | 0x01d000d0 |
741 (rn
<< 16) | (rd
<< 12) |
742 ((im
& 0xf0) << 4) | (im
& 0xf));
744 tcg_out32(s
, (cond
<< 28) | 0x015000d0 |
745 (rn
<< 16) | (rd
<< 12) |
746 (((-im
) & 0xf0) << 4) | ((-im
) & 0xf));
749 static inline void tcg_out_ld8s_r(TCGContext
*s
, int cond
,
750 int rd
, int rn
, int rm
)
752 tcg_out32(s
, (cond
<< 28) | 0x019000d0 |
753 (rn
<< 16) | (rd
<< 12) | rm
);
756 static inline void tcg_out_ld32u(TCGContext
*s
, int cond
,
757 int rd
, int rn
, int32_t offset
)
759 if (offset
> 0xfff || offset
< -0xfff) {
760 tcg_out_movi32(s
, cond
, TCG_REG_R8
, offset
);
761 tcg_out_ld32_r(s
, cond
, rd
, rn
, TCG_REG_R8
);
763 tcg_out_ld32_12(s
, cond
, rd
, rn
, offset
);
766 static inline void tcg_out_st32(TCGContext
*s
, int cond
,
767 int rd
, int rn
, int32_t offset
)
769 if (offset
> 0xfff || offset
< -0xfff) {
770 tcg_out_movi32(s
, cond
, TCG_REG_R8
, offset
);
771 tcg_out_st32_r(s
, cond
, rd
, rn
, TCG_REG_R8
);
773 tcg_out_st32_12(s
, cond
, rd
, rn
, offset
);
776 static inline void tcg_out_ld16u(TCGContext
*s
, int cond
,
777 int rd
, int rn
, int32_t offset
)
779 if (offset
> 0xff || offset
< -0xff) {
780 tcg_out_movi32(s
, cond
, TCG_REG_R8
, offset
);
781 tcg_out_ld16u_r(s
, cond
, rd
, rn
, TCG_REG_R8
);
783 tcg_out_ld16u_8(s
, cond
, rd
, rn
, offset
);
786 static inline void tcg_out_ld16s(TCGContext
*s
, int cond
,
787 int rd
, int rn
, int32_t offset
)
789 if (offset
> 0xff || offset
< -0xff) {
790 tcg_out_movi32(s
, cond
, TCG_REG_R8
, offset
);
791 tcg_out_ld16s_r(s
, cond
, rd
, rn
, TCG_REG_R8
);
793 tcg_out_ld16s_8(s
, cond
, rd
, rn
, offset
);
796 static inline void tcg_out_st16(TCGContext
*s
, int cond
,
797 int rd
, int rn
, int32_t offset
)
799 if (offset
> 0xff || offset
< -0xff) {
800 tcg_out_movi32(s
, cond
, TCG_REG_R8
, offset
);
801 tcg_out_st16_r(s
, cond
, rd
, rn
, TCG_REG_R8
);
803 tcg_out_st16_8(s
, cond
, rd
, rn
, offset
);
806 static inline void tcg_out_ld8u(TCGContext
*s
, int cond
,
807 int rd
, int rn
, int32_t offset
)
809 if (offset
> 0xfff || offset
< -0xfff) {
810 tcg_out_movi32(s
, cond
, TCG_REG_R8
, offset
);
811 tcg_out_ld8_r(s
, cond
, rd
, rn
, TCG_REG_R8
);
813 tcg_out_ld8_12(s
, cond
, rd
, rn
, offset
);
816 static inline void tcg_out_ld8s(TCGContext
*s
, int cond
,
817 int rd
, int rn
, int32_t offset
)
819 if (offset
> 0xff || offset
< -0xff) {
820 tcg_out_movi32(s
, cond
, TCG_REG_R8
, offset
);
821 tcg_out_ld8s_r(s
, cond
, rd
, rn
, TCG_REG_R8
);
823 tcg_out_ld8s_8(s
, cond
, rd
, rn
, offset
);
826 static inline void tcg_out_st8(TCGContext
*s
, int cond
,
827 int rd
, int rn
, int32_t offset
)
829 if (offset
> 0xfff || offset
< -0xfff) {
830 tcg_out_movi32(s
, cond
, TCG_REG_R8
, offset
);
831 tcg_out_st8_r(s
, cond
, rd
, rn
, TCG_REG_R8
);
833 tcg_out_st8_12(s
, cond
, rd
, rn
, offset
);
836 static inline void tcg_out_goto(TCGContext
*s
, int cond
, uint32_t addr
)
840 val
= addr
- (tcg_target_long
) s
->code_ptr
;
841 if (val
- 8 < 0x01fffffd && val
- 8 > -0x01fffffd)
842 tcg_out_b(s
, cond
, val
);
847 if (cond
== COND_AL
) {
848 tcg_out_ld32_12(s
, COND_AL
, TCG_REG_PC
, TCG_REG_PC
, -4);
849 tcg_out32(s
, addr
); /* XXX: This is l->u.value, can we use it? */
851 tcg_out_movi32(s
, cond
, TCG_REG_R8
, val
- 8);
852 tcg_out_dat_reg(s
, cond
, ARITH_ADD
,
853 TCG_REG_PC
, TCG_REG_PC
,
854 TCG_REG_R8
, SHIFT_IMM_LSL(0));
860 static inline void tcg_out_call(TCGContext
*s
, int cond
, uint32_t addr
)
864 val
= addr
- (tcg_target_long
) s
->code_ptr
;
865 if (val
< 0x01fffffd && val
> -0x01fffffd)
866 tcg_out_bl(s
, cond
, val
);
871 if (cond
== COND_AL
) {
872 tcg_out_dat_imm(s
, cond
, ARITH_ADD
, TCG_REG_R14
, TCG_REG_PC
, 4);
873 tcg_out_ld32_12(s
, COND_AL
, TCG_REG_PC
, TCG_REG_PC
, -4);
874 tcg_out32(s
, addr
); /* XXX: This is l->u.value, can we use it? */
876 tcg_out_movi32(s
, cond
, TCG_REG_R9
, addr
);
877 tcg_out_dat_reg(s
, cond
, ARITH_MOV
, TCG_REG_R14
, 0,
878 TCG_REG_PC
, SHIFT_IMM_LSL(0));
879 tcg_out_bx(s
, cond
, TCG_REG_R9
);
885 static inline void tcg_out_callr(TCGContext
*s
, int cond
, int arg
)
887 if (use_armv5_instructions
) {
888 tcg_out_blx(s
, cond
, arg
);
890 tcg_out_dat_reg(s
, cond
, ARITH_MOV
, TCG_REG_R14
, 0,
891 TCG_REG_PC
, SHIFT_IMM_LSL(0));
892 tcg_out_bx(s
, cond
, arg
);
896 static inline void tcg_out_goto_label(TCGContext
*s
, int cond
, int label_index
)
898 TCGLabel
*l
= &s
->labels
[label_index
];
901 tcg_out_goto(s
, cond
, l
->u
.value
);
902 else if (cond
== COND_AL
) {
903 tcg_out_ld32_12(s
, COND_AL
, TCG_REG_PC
, TCG_REG_PC
, -4);
904 tcg_out_reloc(s
, s
->code_ptr
, R_ARM_ABS32
, label_index
, 31337);
907 /* Probably this should be preferred even for COND_AL... */
908 tcg_out_reloc(s
, s
->code_ptr
, R_ARM_PC24
, label_index
, 31337);
909 tcg_out_b_noaddr(s
, cond
);
913 #ifdef CONFIG_SOFTMMU
915 #include "../../softmmu_defs.h"
917 static void *qemu_ld_helpers
[4] = {
924 static void *qemu_st_helpers
[4] = {
932 #define TLB_SHIFT (CPU_TLB_ENTRY_BITS + CPU_TLB_BITS)
934 static inline void tcg_out_qemu_ld(TCGContext
*s
, const TCGArg
*args
, int opc
)
936 int addr_reg
, data_reg
, data_reg2
, bswap
;
937 #ifdef CONFIG_SOFTMMU
938 int mem_index
, s_bits
;
939 # if TARGET_LONG_BITS == 64
945 #ifdef TARGET_WORDS_BIGENDIAN
954 data_reg2
= 0; /* suppress warning */
956 #ifdef CONFIG_SOFTMMU
957 # if TARGET_LONG_BITS == 64
963 /* Should generate something like the following:
964 * shr r8, addr_reg, #TARGET_PAGE_BITS
965 * and r0, r8, #(CPU_TLB_SIZE - 1) @ Assumption: CPU_TLB_BITS <= 8
966 * add r0, env, r0 lsl #CPU_TLB_ENTRY_BITS
968 # if CPU_TLB_BITS > 8
971 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
, TCG_REG_R8
,
972 0, addr_reg
, SHIFT_IMM_LSR(TARGET_PAGE_BITS
));
973 tcg_out_dat_imm(s
, COND_AL
, ARITH_AND
,
974 TCG_REG_R0
, TCG_REG_R8
, CPU_TLB_SIZE
- 1);
975 tcg_out_dat_reg(s
, COND_AL
, ARITH_ADD
, TCG_REG_R0
, TCG_AREG0
,
976 TCG_REG_R0
, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS
));
978 * ldr r1 [r0, #(offsetof(CPUState, tlb_table[mem_index][0].addr_read))]
979 * below, the offset is likely to exceed 12 bits if mem_index != 0 and
980 * not exceed otherwise, so use an
981 * add r0, r0, #(mem_index * sizeof *CPUState.tlb_table)
985 tcg_out_dat_imm(s
, COND_AL
, ARITH_ADD
, TCG_REG_R0
, TCG_REG_R0
,
986 (mem_index
<< (TLB_SHIFT
& 1)) |
987 ((16 - (TLB_SHIFT
>> 1)) << 8));
988 tcg_out_ld32_12(s
, COND_AL
, TCG_REG_R1
, TCG_REG_R0
,
989 offsetof(CPUState
, tlb_table
[0][0].addr_read
));
990 tcg_out_dat_reg(s
, COND_AL
, ARITH_CMP
, 0, TCG_REG_R1
,
991 TCG_REG_R8
, SHIFT_IMM_LSL(TARGET_PAGE_BITS
));
992 /* Check alignment. */
994 tcg_out_dat_imm(s
, COND_EQ
, ARITH_TST
,
995 0, addr_reg
, (1 << s_bits
) - 1);
996 # if TARGET_LONG_BITS == 64
997 /* XXX: possibly we could use a block data load or writeback in
998 * the first access. */
999 tcg_out_ld32_12(s
, COND_EQ
, TCG_REG_R1
, TCG_REG_R0
,
1000 offsetof(CPUState
, tlb_table
[0][0].addr_read
) + 4);
1001 tcg_out_dat_reg(s
, COND_EQ
, ARITH_CMP
, 0,
1002 TCG_REG_R1
, addr_reg2
, SHIFT_IMM_LSL(0));
1004 tcg_out_ld32_12(s
, COND_EQ
, TCG_REG_R1
, TCG_REG_R0
,
1005 offsetof(CPUState
, tlb_table
[0][0].addend
));
1009 tcg_out_ld8_r(s
, COND_EQ
, data_reg
, addr_reg
, TCG_REG_R1
);
1012 tcg_out_ld8s_r(s
, COND_EQ
, data_reg
, addr_reg
, TCG_REG_R1
);
1015 tcg_out_ld16u_r(s
, COND_EQ
, data_reg
, addr_reg
, TCG_REG_R1
);
1017 tcg_out_bswap16(s
, COND_EQ
, data_reg
, data_reg
);
1022 tcg_out_ld16u_r(s
, COND_EQ
, data_reg
, addr_reg
, TCG_REG_R1
);
1023 tcg_out_bswap16s(s
, COND_EQ
, data_reg
, data_reg
);
1025 tcg_out_ld16s_r(s
, COND_EQ
, data_reg
, addr_reg
, TCG_REG_R1
);
1030 tcg_out_ld32_r(s
, COND_EQ
, data_reg
, addr_reg
, TCG_REG_R1
);
1032 tcg_out_bswap32(s
, COND_EQ
, data_reg
, data_reg
);
1037 tcg_out_ld32_rwb(s
, COND_EQ
, data_reg2
, TCG_REG_R1
, addr_reg
);
1038 tcg_out_ld32_12(s
, COND_EQ
, data_reg
, TCG_REG_R1
, 4);
1039 tcg_out_bswap32(s
, COND_EQ
, data_reg2
, data_reg2
);
1040 tcg_out_bswap32(s
, COND_EQ
, data_reg
, data_reg
);
1042 tcg_out_ld32_rwb(s
, COND_EQ
, data_reg
, TCG_REG_R1
, addr_reg
);
1043 tcg_out_ld32_12(s
, COND_EQ
, data_reg2
, TCG_REG_R1
, 4);
1048 label_ptr
= (void *) s
->code_ptr
;
1049 tcg_out_b_noaddr(s
, COND_EQ
);
1051 /* TODO: move this code to where the constants pool will be */
1052 if (addr_reg
!= TCG_REG_R0
) {
1053 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
,
1054 TCG_REG_R0
, 0, addr_reg
, SHIFT_IMM_LSL(0));
1056 # if TARGET_LONG_BITS == 32
1057 tcg_out_dat_imm(s
, COND_AL
, ARITH_MOV
, TCG_REG_R1
, 0, mem_index
);
1059 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
,
1060 TCG_REG_R1
, 0, addr_reg2
, SHIFT_IMM_LSL(0));
1061 tcg_out_dat_imm(s
, COND_AL
, ARITH_MOV
, TCG_REG_R2
, 0, mem_index
);
1063 tcg_out_bl(s
, COND_AL
, (tcg_target_long
) qemu_ld_helpers
[s_bits
] -
1064 (tcg_target_long
) s
->code_ptr
);
1068 tcg_out_ext8s(s
, COND_AL
, data_reg
, TCG_REG_R0
);
1071 tcg_out_ext16s(s
, COND_AL
, data_reg
, TCG_REG_R0
);
1077 if (data_reg
!= TCG_REG_R0
) {
1078 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
,
1079 data_reg
, 0, TCG_REG_R0
, SHIFT_IMM_LSL(0));
1083 if (data_reg
!= TCG_REG_R0
) {
1084 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
,
1085 data_reg
, 0, TCG_REG_R0
, SHIFT_IMM_LSL(0));
1087 if (data_reg2
!= TCG_REG_R1
) {
1088 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
,
1089 data_reg2
, 0, TCG_REG_R1
, SHIFT_IMM_LSL(0));
1094 reloc_pc24(label_ptr
, (tcg_target_long
)s
->code_ptr
);
1095 #else /* !CONFIG_SOFTMMU */
1097 uint32_t offset
= GUEST_BASE
;
1102 i
= ctz32(offset
) & ~1;
1103 rot
= ((32 - i
) << 7) & 0xf00;
1105 tcg_out_dat_imm(s
, COND_AL
, ARITH_ADD
, TCG_REG_R8
, addr_reg
,
1106 ((offset
>> i
) & 0xff) | rot
);
1107 addr_reg
= TCG_REG_R8
;
1108 offset
&= ~(0xff << i
);
1113 tcg_out_ld8_12(s
, COND_AL
, data_reg
, addr_reg
, 0);
1116 tcg_out_ld8s_8(s
, COND_AL
, data_reg
, addr_reg
, 0);
1119 tcg_out_ld16u_8(s
, COND_AL
, data_reg
, addr_reg
, 0);
1121 tcg_out_bswap16(s
, COND_AL
, data_reg
, data_reg
);
1126 tcg_out_ld16u_8(s
, COND_AL
, data_reg
, addr_reg
, 0);
1127 tcg_out_bswap16s(s
, COND_AL
, data_reg
, data_reg
);
1129 tcg_out_ld16s_8(s
, COND_AL
, data_reg
, addr_reg
, 0);
1134 tcg_out_ld32_12(s
, COND_AL
, data_reg
, addr_reg
, 0);
1136 tcg_out_bswap32(s
, COND_AL
, data_reg
, data_reg
);
1140 /* TODO: use block load -
1141 * check that data_reg2 > data_reg or the other way */
1142 if (data_reg
== addr_reg
) {
1143 tcg_out_ld32_12(s
, COND_AL
, data_reg2
, addr_reg
, bswap
? 0 : 4);
1144 tcg_out_ld32_12(s
, COND_AL
, data_reg
, addr_reg
, bswap
? 4 : 0);
1146 tcg_out_ld32_12(s
, COND_AL
, data_reg
, addr_reg
, bswap
? 4 : 0);
1147 tcg_out_ld32_12(s
, COND_AL
, data_reg2
, addr_reg
, bswap
? 0 : 4);
1150 tcg_out_bswap32(s
, COND_AL
, data_reg
, data_reg
);
1151 tcg_out_bswap32(s
, COND_AL
, data_reg2
, data_reg2
);
1158 static inline void tcg_out_qemu_st(TCGContext
*s
, const TCGArg
*args
, int opc
)
1160 int addr_reg
, data_reg
, data_reg2
, bswap
;
1161 #ifdef CONFIG_SOFTMMU
1162 int mem_index
, s_bits
;
1163 # if TARGET_LONG_BITS == 64
1166 uint32_t *label_ptr
;
1169 #ifdef TARGET_WORDS_BIGENDIAN
1176 data_reg2
= *args
++;
1178 data_reg2
= 0; /* suppress warning */
1180 #ifdef CONFIG_SOFTMMU
1181 # if TARGET_LONG_BITS == 64
1182 addr_reg2
= *args
++;
1187 /* Should generate something like the following:
1188 * shr r8, addr_reg, #TARGET_PAGE_BITS
1189 * and r0, r8, #(CPU_TLB_SIZE - 1) @ Assumption: CPU_TLB_BITS <= 8
1190 * add r0, env, r0 lsl #CPU_TLB_ENTRY_BITS
1192 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
,
1193 TCG_REG_R8
, 0, addr_reg
, SHIFT_IMM_LSR(TARGET_PAGE_BITS
));
1194 tcg_out_dat_imm(s
, COND_AL
, ARITH_AND
,
1195 TCG_REG_R0
, TCG_REG_R8
, CPU_TLB_SIZE
- 1);
1196 tcg_out_dat_reg(s
, COND_AL
, ARITH_ADD
, TCG_REG_R0
,
1197 TCG_AREG0
, TCG_REG_R0
, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS
));
1199 * ldr r1 [r0, #(offsetof(CPUState, tlb_table[mem_index][0].addr_write))]
1200 * below, the offset is likely to exceed 12 bits if mem_index != 0 and
1201 * not exceed otherwise, so use an
1202 * add r0, r0, #(mem_index * sizeof *CPUState.tlb_table)
1206 tcg_out_dat_imm(s
, COND_AL
, ARITH_ADD
, TCG_REG_R0
, TCG_REG_R0
,
1207 (mem_index
<< (TLB_SHIFT
& 1)) |
1208 ((16 - (TLB_SHIFT
>> 1)) << 8));
1209 tcg_out_ld32_12(s
, COND_AL
, TCG_REG_R1
, TCG_REG_R0
,
1210 offsetof(CPUState
, tlb_table
[0][0].addr_write
));
1211 tcg_out_dat_reg(s
, COND_AL
, ARITH_CMP
, 0, TCG_REG_R1
,
1212 TCG_REG_R8
, SHIFT_IMM_LSL(TARGET_PAGE_BITS
));
1213 /* Check alignment. */
1215 tcg_out_dat_imm(s
, COND_EQ
, ARITH_TST
,
1216 0, addr_reg
, (1 << s_bits
) - 1);
1217 # if TARGET_LONG_BITS == 64
1218 /* XXX: possibly we could use a block data load or writeback in
1219 * the first access. */
1220 tcg_out_ld32_12(s
, COND_EQ
, TCG_REG_R1
, TCG_REG_R0
,
1221 offsetof(CPUState
, tlb_table
[0][0].addr_write
) + 4);
1222 tcg_out_dat_reg(s
, COND_EQ
, ARITH_CMP
, 0,
1223 TCG_REG_R1
, addr_reg2
, SHIFT_IMM_LSL(0));
1225 tcg_out_ld32_12(s
, COND_EQ
, TCG_REG_R1
, TCG_REG_R0
,
1226 offsetof(CPUState
, tlb_table
[0][0].addend
));
1230 tcg_out_st8_r(s
, COND_EQ
, data_reg
, addr_reg
, TCG_REG_R1
);
1234 tcg_out_bswap16(s
, COND_EQ
, TCG_REG_R0
, data_reg
);
1235 tcg_out_st16_r(s
, COND_EQ
, TCG_REG_R0
, addr_reg
, TCG_REG_R1
);
1237 tcg_out_st16_r(s
, COND_EQ
, data_reg
, addr_reg
, TCG_REG_R1
);
1243 tcg_out_bswap32(s
, COND_EQ
, TCG_REG_R0
, data_reg
);
1244 tcg_out_st32_r(s
, COND_EQ
, TCG_REG_R0
, addr_reg
, TCG_REG_R1
);
1246 tcg_out_st32_r(s
, COND_EQ
, data_reg
, addr_reg
, TCG_REG_R1
);
1251 tcg_out_bswap32(s
, COND_EQ
, TCG_REG_R0
, data_reg2
);
1252 tcg_out_st32_rwb(s
, COND_EQ
, TCG_REG_R0
, TCG_REG_R1
, addr_reg
);
1253 tcg_out_bswap32(s
, COND_EQ
, TCG_REG_R0
, data_reg
);
1254 tcg_out_st32_12(s
, COND_EQ
, TCG_REG_R0
, TCG_REG_R1
, 4);
1256 tcg_out_st32_rwb(s
, COND_EQ
, data_reg
, TCG_REG_R1
, addr_reg
);
1257 tcg_out_st32_12(s
, COND_EQ
, data_reg2
, TCG_REG_R1
, 4);
1262 label_ptr
= (void *) s
->code_ptr
;
1263 tcg_out_b_noaddr(s
, COND_EQ
);
1265 /* TODO: move this code to where the constants pool will be */
1266 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
,
1267 TCG_REG_R0
, 0, addr_reg
, SHIFT_IMM_LSL(0));
1268 # if TARGET_LONG_BITS == 32
1271 tcg_out_ext8u(s
, COND_AL
, TCG_REG_R1
, data_reg
);
1272 tcg_out_dat_imm(s
, COND_AL
, ARITH_MOV
, TCG_REG_R2
, 0, mem_index
);
1275 tcg_out_ext16u(s
, COND_AL
, TCG_REG_R1
, data_reg
);
1276 tcg_out_dat_imm(s
, COND_AL
, ARITH_MOV
, TCG_REG_R2
, 0, mem_index
);
1279 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
,
1280 TCG_REG_R1
, 0, data_reg
, SHIFT_IMM_LSL(0));
1281 tcg_out_dat_imm(s
, COND_AL
, ARITH_MOV
, TCG_REG_R2
, 0, mem_index
);
1284 tcg_out_dat_imm(s
, COND_AL
, ARITH_MOV
, TCG_REG_R8
, 0, mem_index
);
1285 tcg_out32(s
, (COND_AL
<< 28) | 0x052d8010); /* str r8, [sp, #-0x10]! */
1286 if (data_reg
!= TCG_REG_R2
) {
1287 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
,
1288 TCG_REG_R2
, 0, data_reg
, SHIFT_IMM_LSL(0));
1290 if (data_reg2
!= TCG_REG_R3
) {
1291 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
,
1292 TCG_REG_R3
, 0, data_reg2
, SHIFT_IMM_LSL(0));
1297 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
,
1298 TCG_REG_R1
, 0, addr_reg2
, SHIFT_IMM_LSL(0));
1301 tcg_out_ext8u(s
, COND_AL
, TCG_REG_R2
, data_reg
);
1302 tcg_out_dat_imm(s
, COND_AL
, ARITH_MOV
, TCG_REG_R3
, 0, mem_index
);
1305 tcg_out_ext16u(s
, COND_AL
, TCG_REG_R2
, data_reg
);
1306 tcg_out_dat_imm(s
, COND_AL
, ARITH_MOV
, TCG_REG_R3
, 0, mem_index
);
1309 if (data_reg
!= TCG_REG_R2
) {
1310 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
,
1311 TCG_REG_R2
, 0, data_reg
, SHIFT_IMM_LSL(0));
1313 tcg_out_dat_imm(s
, COND_AL
, ARITH_MOV
, TCG_REG_R3
, 0, mem_index
);
1316 tcg_out_dat_imm(s
, COND_AL
, ARITH_MOV
, TCG_REG_R8
, 0, mem_index
);
1317 tcg_out32(s
, (COND_AL
<< 28) | 0x052d8010); /* str r8, [sp, #-0x10]! */
1318 if (data_reg
!= TCG_REG_R2
) {
1319 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
,
1320 TCG_REG_R2
, 0, data_reg
, SHIFT_IMM_LSL(0));
1322 if (data_reg2
!= TCG_REG_R3
) {
1323 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
,
1324 TCG_REG_R3
, 0, data_reg2
, SHIFT_IMM_LSL(0));
1330 tcg_out_bl(s
, COND_AL
, (tcg_target_long
) qemu_st_helpers
[s_bits
] -
1331 (tcg_target_long
) s
->code_ptr
);
1333 tcg_out_dat_imm(s
, COND_AL
, ARITH_ADD
, TCG_REG_R13
, TCG_REG_R13
, 0x10);
1335 reloc_pc24(label_ptr
, (tcg_target_long
)s
->code_ptr
);
1336 #else /* !CONFIG_SOFTMMU */
1338 uint32_t offset
= GUEST_BASE
;
1343 i
= ctz32(offset
) & ~1;
1344 rot
= ((32 - i
) << 7) & 0xf00;
1346 tcg_out_dat_imm(s
, COND_AL
, ARITH_ADD
, TCG_REG_R1
, addr_reg
,
1347 ((offset
>> i
) & 0xff) | rot
);
1348 addr_reg
= TCG_REG_R1
;
1349 offset
&= ~(0xff << i
);
1354 tcg_out_st8_12(s
, COND_AL
, data_reg
, addr_reg
, 0);
1358 tcg_out_bswap16(s
, COND_AL
, TCG_REG_R0
, data_reg
);
1359 tcg_out_st16_8(s
, COND_AL
, TCG_REG_R0
, addr_reg
, 0);
1361 tcg_out_st16_8(s
, COND_AL
, data_reg
, addr_reg
, 0);
1367 tcg_out_bswap32(s
, COND_AL
, TCG_REG_R0
, data_reg
);
1368 tcg_out_st32_12(s
, COND_AL
, TCG_REG_R0
, addr_reg
, 0);
1370 tcg_out_st32_12(s
, COND_AL
, data_reg
, addr_reg
, 0);
1374 /* TODO: use block store -
1375 * check that data_reg2 > data_reg or the other way */
1377 tcg_out_bswap32(s
, COND_AL
, TCG_REG_R0
, data_reg2
);
1378 tcg_out_st32_12(s
, COND_AL
, TCG_REG_R0
, addr_reg
, 0);
1379 tcg_out_bswap32(s
, COND_AL
, TCG_REG_R0
, data_reg
);
1380 tcg_out_st32_12(s
, COND_AL
, TCG_REG_R0
, addr_reg
, 4);
1382 tcg_out_st32_12(s
, COND_AL
, data_reg
, addr_reg
, 0);
1383 tcg_out_st32_12(s
, COND_AL
, data_reg2
, addr_reg
, 4);
1390 static uint8_t *tb_ret_addr
;
1392 static inline void tcg_out_op(TCGContext
*s
, TCGOpcode opc
,
1393 const TCGArg
*args
, const int *const_args
)
1398 case INDEX_op_exit_tb
:
1400 uint8_t *ld_ptr
= s
->code_ptr
;
1402 tcg_out_ld32_12(s
, COND_AL
, TCG_REG_R0
, TCG_REG_PC
, 0);
1404 tcg_out_dat_imm(s
, COND_AL
, ARITH_MOV
, TCG_REG_R0
, 0, args
[0]);
1405 tcg_out_goto(s
, COND_AL
, (tcg_target_ulong
) tb_ret_addr
);
1407 *ld_ptr
= (uint8_t) (s
->code_ptr
- ld_ptr
) - 8;
1408 tcg_out32(s
, args
[0]);
1412 case INDEX_op_goto_tb
:
1413 if (s
->tb_jmp_offset
) {
1414 /* Direct jump method */
1415 #if defined(USE_DIRECT_JUMP)
1416 s
->tb_jmp_offset
[args
[0]] = s
->code_ptr
- s
->code_buf
;
1417 tcg_out_b_noaddr(s
, COND_AL
);
1419 tcg_out_ld32_12(s
, COND_AL
, TCG_REG_PC
, TCG_REG_PC
, -4);
1420 s
->tb_jmp_offset
[args
[0]] = s
->code_ptr
- s
->code_buf
;
1424 /* Indirect jump method */
1426 c
= (int) (s
->tb_next
+ args
[0]) - ((int) s
->code_ptr
+ 8);
1427 if (c
> 0xfff || c
< -0xfff) {
1428 tcg_out_movi32(s
, COND_AL
, TCG_REG_R0
,
1429 (tcg_target_long
) (s
->tb_next
+ args
[0]));
1430 tcg_out_ld32_12(s
, COND_AL
, TCG_REG_PC
, TCG_REG_R0
, 0);
1432 tcg_out_ld32_12(s
, COND_AL
, TCG_REG_PC
, TCG_REG_PC
, c
);
1434 tcg_out_ld32_12(s
, COND_AL
, TCG_REG_R0
, TCG_REG_PC
, 0);
1435 tcg_out_ld32_12(s
, COND_AL
, TCG_REG_PC
, TCG_REG_R0
, 0);
1436 tcg_out32(s
, (tcg_target_long
) (s
->tb_next
+ args
[0]));
1439 s
->tb_next_offset
[args
[0]] = s
->code_ptr
- s
->code_buf
;
1443 tcg_out_call(s
, COND_AL
, args
[0]);
1445 tcg_out_callr(s
, COND_AL
, args
[0]);
1449 tcg_out_goto(s
, COND_AL
, args
[0]);
1451 tcg_out_bx(s
, COND_AL
, args
[0]);
1454 tcg_out_goto_label(s
, COND_AL
, args
[0]);
1457 case INDEX_op_ld8u_i32
:
1458 tcg_out_ld8u(s
, COND_AL
, args
[0], args
[1], args
[2]);
1460 case INDEX_op_ld8s_i32
:
1461 tcg_out_ld8s(s
, COND_AL
, args
[0], args
[1], args
[2]);
1463 case INDEX_op_ld16u_i32
:
1464 tcg_out_ld16u(s
, COND_AL
, args
[0], args
[1], args
[2]);
1466 case INDEX_op_ld16s_i32
:
1467 tcg_out_ld16s(s
, COND_AL
, args
[0], args
[1], args
[2]);
1469 case INDEX_op_ld_i32
:
1470 tcg_out_ld32u(s
, COND_AL
, args
[0], args
[1], args
[2]);
1472 case INDEX_op_st8_i32
:
1473 tcg_out_st8(s
, COND_AL
, args
[0], args
[1], args
[2]);
1475 case INDEX_op_st16_i32
:
1476 tcg_out_st16(s
, COND_AL
, args
[0], args
[1], args
[2]);
1478 case INDEX_op_st_i32
:
1479 tcg_out_st32(s
, COND_AL
, args
[0], args
[1], args
[2]);
1482 case INDEX_op_mov_i32
:
1483 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
,
1484 args
[0], 0, args
[1], SHIFT_IMM_LSL(0));
1486 case INDEX_op_movi_i32
:
1487 tcg_out_movi32(s
, COND_AL
, args
[0], args
[1]);
1489 case INDEX_op_add_i32
:
1492 case INDEX_op_sub_i32
:
1495 case INDEX_op_and_i32
:
1498 case INDEX_op_andc_i32
:
1501 case INDEX_op_or_i32
:
1504 case INDEX_op_xor_i32
:
1508 if (const_args
[2]) {
1510 rot
= encode_imm(args
[2]);
1511 tcg_out_dat_imm(s
, COND_AL
, c
,
1512 args
[0], args
[1], rotl(args
[2], rot
) | (rot
<< 7));
1514 tcg_out_dat_reg(s
, COND_AL
, c
,
1515 args
[0], args
[1], args
[2], SHIFT_IMM_LSL(0));
1517 case INDEX_op_add2_i32
:
1518 tcg_out_dat_reg2(s
, COND_AL
, ARITH_ADD
, ARITH_ADC
,
1519 args
[0], args
[1], args
[2], args
[3],
1520 args
[4], args
[5], SHIFT_IMM_LSL(0));
1522 case INDEX_op_sub2_i32
:
1523 tcg_out_dat_reg2(s
, COND_AL
, ARITH_SUB
, ARITH_SBC
,
1524 args
[0], args
[1], args
[2], args
[3],
1525 args
[4], args
[5], SHIFT_IMM_LSL(0));
1527 case INDEX_op_neg_i32
:
1528 tcg_out_dat_imm(s
, COND_AL
, ARITH_RSB
, args
[0], args
[1], 0);
1530 case INDEX_op_not_i32
:
1531 tcg_out_dat_reg(s
, COND_AL
,
1532 ARITH_MVN
, args
[0], 0, args
[1], SHIFT_IMM_LSL(0));
1534 case INDEX_op_mul_i32
:
1535 tcg_out_mul32(s
, COND_AL
, args
[0], args
[1], args
[2]);
1537 case INDEX_op_mulu2_i32
:
1538 tcg_out_umull32(s
, COND_AL
, args
[0], args
[1], args
[2], args
[3]);
1540 /* XXX: Perhaps args[2] & 0x1f is wrong */
1541 case INDEX_op_shl_i32
:
1543 SHIFT_IMM_LSL(args
[2] & 0x1f) : SHIFT_REG_LSL(args
[2]);
1545 case INDEX_op_shr_i32
:
1546 c
= const_args
[2] ? (args
[2] & 0x1f) ? SHIFT_IMM_LSR(args
[2] & 0x1f) :
1547 SHIFT_IMM_LSL(0) : SHIFT_REG_LSR(args
[2]);
1549 case INDEX_op_sar_i32
:
1550 c
= const_args
[2] ? (args
[2] & 0x1f) ? SHIFT_IMM_ASR(args
[2] & 0x1f) :
1551 SHIFT_IMM_LSL(0) : SHIFT_REG_ASR(args
[2]);
1553 case INDEX_op_rotr_i32
:
1554 c
= const_args
[2] ? (args
[2] & 0x1f) ? SHIFT_IMM_ROR(args
[2] & 0x1f) :
1555 SHIFT_IMM_LSL(0) : SHIFT_REG_ROR(args
[2]);
1558 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
, args
[0], 0, args
[1], c
);
1561 case INDEX_op_rotl_i32
:
1562 if (const_args
[2]) {
1563 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
, args
[0], 0, args
[1],
1564 ((0x20 - args
[2]) & 0x1f) ?
1565 SHIFT_IMM_ROR((0x20 - args
[2]) & 0x1f) :
1568 tcg_out_dat_imm(s
, COND_AL
, ARITH_RSB
, TCG_REG_R8
, args
[1], 0x20);
1569 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
, args
[0], 0, args
[1],
1570 SHIFT_REG_ROR(TCG_REG_R8
));
1574 case INDEX_op_brcond_i32
:
1575 if (const_args
[1]) {
1577 rot
= encode_imm(args
[1]);
1578 tcg_out_dat_imm(s
, COND_AL
, ARITH_CMP
, 0,
1579 args
[0], rotl(args
[1], rot
) | (rot
<< 7));
1581 tcg_out_dat_reg(s
, COND_AL
, ARITH_CMP
, 0,
1582 args
[0], args
[1], SHIFT_IMM_LSL(0));
1584 tcg_out_goto_label(s
, tcg_cond_to_arm_cond
[args
[2]], args
[3]);
1586 case INDEX_op_brcond2_i32
:
1587 /* The resulting conditions are:
1588 * TCG_COND_EQ --> a0 == a2 && a1 == a3,
1589 * TCG_COND_NE --> (a0 != a2 && a1 == a3) || a1 != a3,
1590 * TCG_COND_LT(U) --> (a0 < a2 && a1 == a3) || a1 < a3,
1591 * TCG_COND_GE(U) --> (a0 >= a2 && a1 == a3) || (a1 >= a3 && a1 != a3),
1592 * TCG_COND_LE(U) --> (a0 <= a2 && a1 == a3) || (a1 <= a3 && a1 != a3),
1593 * TCG_COND_GT(U) --> (a0 > a2 && a1 == a3) || a1 > a3,
1595 tcg_out_dat_reg(s
, COND_AL
, ARITH_CMP
, 0,
1596 args
[1], args
[3], SHIFT_IMM_LSL(0));
1597 tcg_out_dat_reg(s
, COND_EQ
, ARITH_CMP
, 0,
1598 args
[0], args
[2], SHIFT_IMM_LSL(0));
1599 tcg_out_goto_label(s
, tcg_cond_to_arm_cond
[args
[4]], args
[5]);
1601 case INDEX_op_setcond_i32
:
1602 if (const_args
[2]) {
1604 rot
= encode_imm(args
[2]);
1605 tcg_out_dat_imm(s
, COND_AL
, ARITH_CMP
, 0,
1606 args
[1], rotl(args
[2], rot
) | (rot
<< 7));
1608 tcg_out_dat_reg(s
, COND_AL
, ARITH_CMP
, 0,
1609 args
[1], args
[2], SHIFT_IMM_LSL(0));
1611 tcg_out_dat_imm(s
, tcg_cond_to_arm_cond
[args
[3]],
1612 ARITH_MOV
, args
[0], 0, 1);
1613 tcg_out_dat_imm(s
, tcg_cond_to_arm_cond
[tcg_invert_cond(args
[3])],
1614 ARITH_MOV
, args
[0], 0, 0);
1616 case INDEX_op_setcond2_i32
:
1617 /* See brcond2_i32 comment */
1618 tcg_out_dat_reg(s
, COND_AL
, ARITH_CMP
, 0,
1619 args
[2], args
[4], SHIFT_IMM_LSL(0));
1620 tcg_out_dat_reg(s
, COND_EQ
, ARITH_CMP
, 0,
1621 args
[1], args
[3], SHIFT_IMM_LSL(0));
1622 tcg_out_dat_imm(s
, tcg_cond_to_arm_cond
[args
[5]],
1623 ARITH_MOV
, args
[0], 0, 1);
1624 tcg_out_dat_imm(s
, tcg_cond_to_arm_cond
[tcg_invert_cond(args
[5])],
1625 ARITH_MOV
, args
[0], 0, 0);
1628 case INDEX_op_qemu_ld8u
:
1629 tcg_out_qemu_ld(s
, args
, 0);
1631 case INDEX_op_qemu_ld8s
:
1632 tcg_out_qemu_ld(s
, args
, 0 | 4);
1634 case INDEX_op_qemu_ld16u
:
1635 tcg_out_qemu_ld(s
, args
, 1);
1637 case INDEX_op_qemu_ld16s
:
1638 tcg_out_qemu_ld(s
, args
, 1 | 4);
1640 case INDEX_op_qemu_ld32
:
1641 tcg_out_qemu_ld(s
, args
, 2);
1643 case INDEX_op_qemu_ld64
:
1644 tcg_out_qemu_ld(s
, args
, 3);
1647 case INDEX_op_qemu_st8
:
1648 tcg_out_qemu_st(s
, args
, 0);
1650 case INDEX_op_qemu_st16
:
1651 tcg_out_qemu_st(s
, args
, 1);
1653 case INDEX_op_qemu_st32
:
1654 tcg_out_qemu_st(s
, args
, 2);
1656 case INDEX_op_qemu_st64
:
1657 tcg_out_qemu_st(s
, args
, 3);
1660 case INDEX_op_bswap16_i32
:
1661 tcg_out_bswap16(s
, COND_AL
, args
[0], args
[1]);
1663 case INDEX_op_bswap32_i32
:
1664 tcg_out_bswap32(s
, COND_AL
, args
[0], args
[1]);
1667 case INDEX_op_ext8s_i32
:
1668 tcg_out_ext8s(s
, COND_AL
, args
[0], args
[1]);
1670 case INDEX_op_ext16s_i32
:
1671 tcg_out_ext16s(s
, COND_AL
, args
[0], args
[1]);
1673 case INDEX_op_ext16u_i32
:
1674 tcg_out_ext16u(s
, COND_AL
, args
[0], args
[1]);
1682 static const TCGTargetOpDef arm_op_defs
[] = {
1683 { INDEX_op_exit_tb
, { } },
1684 { INDEX_op_goto_tb
, { } },
1685 { INDEX_op_call
, { "ri" } },
1686 { INDEX_op_jmp
, { "ri" } },
1687 { INDEX_op_br
, { } },
1689 { INDEX_op_mov_i32
, { "r", "r" } },
1690 { INDEX_op_movi_i32
, { "r" } },
1692 { INDEX_op_ld8u_i32
, { "r", "r" } },
1693 { INDEX_op_ld8s_i32
, { "r", "r" } },
1694 { INDEX_op_ld16u_i32
, { "r", "r" } },
1695 { INDEX_op_ld16s_i32
, { "r", "r" } },
1696 { INDEX_op_ld_i32
, { "r", "r" } },
1697 { INDEX_op_st8_i32
, { "r", "r" } },
1698 { INDEX_op_st16_i32
, { "r", "r" } },
1699 { INDEX_op_st_i32
, { "r", "r" } },
1701 /* TODO: "r", "r", "ri" */
1702 { INDEX_op_add_i32
, { "r", "r", "rI" } },
1703 { INDEX_op_sub_i32
, { "r", "r", "rI" } },
1704 { INDEX_op_mul_i32
, { "r", "r", "r" } },
1705 { INDEX_op_mulu2_i32
, { "r", "r", "r", "r" } },
1706 { INDEX_op_and_i32
, { "r", "r", "rI" } },
1707 { INDEX_op_andc_i32
, { "r", "r", "rI" } },
1708 { INDEX_op_or_i32
, { "r", "r", "rI" } },
1709 { INDEX_op_xor_i32
, { "r", "r", "rI" } },
1710 { INDEX_op_neg_i32
, { "r", "r" } },
1711 { INDEX_op_not_i32
, { "r", "r" } },
1713 { INDEX_op_shl_i32
, { "r", "r", "ri" } },
1714 { INDEX_op_shr_i32
, { "r", "r", "ri" } },
1715 { INDEX_op_sar_i32
, { "r", "r", "ri" } },
1716 { INDEX_op_rotl_i32
, { "r", "r", "ri" } },
1717 { INDEX_op_rotr_i32
, { "r", "r", "ri" } },
1719 { INDEX_op_brcond_i32
, { "r", "rI" } },
1720 { INDEX_op_setcond_i32
, { "r", "r", "rI" } },
1722 /* TODO: "r", "r", "r", "r", "ri", "ri" */
1723 { INDEX_op_add2_i32
, { "r", "r", "r", "r", "r", "r" } },
1724 { INDEX_op_sub2_i32
, { "r", "r", "r", "r", "r", "r" } },
1725 { INDEX_op_brcond2_i32
, { "r", "r", "r", "r" } },
1726 { INDEX_op_setcond2_i32
, { "r", "r", "r", "r", "r" } },
1728 #if TARGET_LONG_BITS == 32
1729 { INDEX_op_qemu_ld8u
, { "r", "l" } },
1730 { INDEX_op_qemu_ld8s
, { "r", "l" } },
1731 { INDEX_op_qemu_ld16u
, { "r", "l" } },
1732 { INDEX_op_qemu_ld16s
, { "r", "l" } },
1733 { INDEX_op_qemu_ld32
, { "r", "l" } },
1734 { INDEX_op_qemu_ld64
, { "L", "L", "l" } },
1736 { INDEX_op_qemu_st8
, { "s", "s" } },
1737 { INDEX_op_qemu_st16
, { "s", "s" } },
1738 { INDEX_op_qemu_st32
, { "s", "s" } },
1739 { INDEX_op_qemu_st64
, { "S", "S", "s" } },
1741 { INDEX_op_qemu_ld8u
, { "r", "l", "l" } },
1742 { INDEX_op_qemu_ld8s
, { "r", "l", "l" } },
1743 { INDEX_op_qemu_ld16u
, { "r", "l", "l" } },
1744 { INDEX_op_qemu_ld16s
, { "r", "l", "l" } },
1745 { INDEX_op_qemu_ld32
, { "r", "l", "l" } },
1746 { INDEX_op_qemu_ld64
, { "L", "L", "l", "l" } },
1748 { INDEX_op_qemu_st8
, { "s", "s", "s" } },
1749 { INDEX_op_qemu_st16
, { "s", "s", "s" } },
1750 { INDEX_op_qemu_st32
, { "s", "s", "s" } },
1751 { INDEX_op_qemu_st64
, { "S", "S", "s", "s" } },
1754 { INDEX_op_bswap16_i32
, { "r", "r" } },
1755 { INDEX_op_bswap32_i32
, { "r", "r" } },
1757 { INDEX_op_ext8s_i32
, { "r", "r" } },
1758 { INDEX_op_ext16s_i32
, { "r", "r" } },
1759 { INDEX_op_ext16u_i32
, { "r", "r" } },
1764 static void tcg_target_init(TCGContext
*s
)
1766 #if !defined(CONFIG_USER_ONLY)
1768 if ((1 << CPU_TLB_ENTRY_BITS
) != sizeof(CPUTLBEntry
))
1772 tcg_regset_set32(tcg_target_available_regs
[TCG_TYPE_I32
], 0, 0xffff);
1773 tcg_regset_set32(tcg_target_call_clobber_regs
, 0,
1778 (1 << TCG_REG_R12
) |
1779 (1 << TCG_REG_R14
));
1781 tcg_regset_clear(s
->reserved_regs
);
1782 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_CALL_STACK
);
1783 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_R8
);
1784 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_PC
);
1786 tcg_add_target_add_op_defs(arm_op_defs
);
1789 static inline void tcg_out_ld(TCGContext
*s
, TCGType type
, int arg
,
1790 int arg1
, tcg_target_long arg2
)
1792 tcg_out_ld32u(s
, COND_AL
, arg
, arg1
, arg2
);
1795 static inline void tcg_out_st(TCGContext
*s
, TCGType type
, int arg
,
1796 int arg1
, tcg_target_long arg2
)
1798 tcg_out_st32(s
, COND_AL
, arg
, arg1
, arg2
);
1801 static void tcg_out_addi(TCGContext
*s
, int reg
, tcg_target_long val
)
1805 tcg_out_dat_imm(s
, COND_AL
, ARITH_ADD
, reg
, reg
, val
);
1810 tcg_out_dat_imm(s
, COND_AL
, ARITH_SUB
, reg
, reg
, -val
);
1816 static inline void tcg_out_mov(TCGContext
*s
, TCGType type
, int ret
, int arg
)
1818 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
, ret
, 0, arg
, SHIFT_IMM_LSL(0));
1821 static inline void tcg_out_movi(TCGContext
*s
, TCGType type
,
1822 int ret
, tcg_target_long arg
)
1824 tcg_out_movi32(s
, COND_AL
, ret
, arg
);
1827 static void tcg_target_qemu_prologue(TCGContext
*s
)
1829 /* There is no need to save r7, it is used to store the address
1830 of the env structure and is not modified by GCC. */
1832 /* stmdb sp!, { r4 - r6, r8 - r11, lr } */
1833 tcg_out32(s
, (COND_AL
<< 28) | 0x092d4f70);
1835 tcg_out_bx(s
, COND_AL
, TCG_REG_R0
);
1836 tb_ret_addr
= s
->code_ptr
;
1838 /* ldmia sp!, { r4 - r6, r8 - r11, pc } */
1839 tcg_out32(s
, (COND_AL
<< 28) | 0x08bd8f70);