2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Andrzej Zaborowski
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #if defined(__ARM_ARCH_7__) || \
26 defined(__ARM_ARCH_7A__) || \
27 defined(__ARM_ARCH_7EM__) || \
28 defined(__ARM_ARCH_7M__) || \
29 defined(__ARM_ARCH_7R__)
30 #define USE_ARMV7_INSTRUCTIONS
33 #if defined(USE_ARMV7_INSTRUCTIONS) || \
34 defined(__ARM_ARCH_6J__) || \
35 defined(__ARM_ARCH_6K__) || \
36 defined(__ARM_ARCH_6T2__) || \
37 defined(__ARM_ARCH_6Z__) || \
38 defined(__ARM_ARCH_6ZK__)
39 #define USE_ARMV6_INSTRUCTIONS
42 #if defined(USE_ARMV6_INSTRUCTIONS) || \
43 defined(__ARM_ARCH_5T__) || \
44 defined(__ARM_ARCH_5TE__) || \
45 defined(__ARM_ARCH_5TEJ__)
46 #define USE_ARMV5_INSTRUCTIONS
49 #ifdef USE_ARMV5_INSTRUCTIONS
50 static const int use_armv5_instructions
= 1;
52 static const int use_armv5_instructions
= 0;
54 #undef USE_ARMV5_INSTRUCTIONS
56 #ifdef USE_ARMV6_INSTRUCTIONS
57 static const int use_armv6_instructions
= 1;
59 static const int use_armv6_instructions
= 0;
61 #undef USE_ARMV6_INSTRUCTIONS
63 #ifdef USE_ARMV7_INSTRUCTIONS
64 static const int use_armv7_instructions
= 1;
66 static const int use_armv7_instructions
= 0;
68 #undef USE_ARMV7_INSTRUCTIONS
71 static const char * const tcg_target_reg_names
[TCG_TARGET_NB_REGS
] = {
91 static const int tcg_target_reg_alloc_order
[] = {
109 static const int tcg_target_call_iarg_regs
[4] = {
110 TCG_REG_R0
, TCG_REG_R1
, TCG_REG_R2
, TCG_REG_R3
112 static const int tcg_target_call_oarg_regs
[2] = {
113 TCG_REG_R0
, TCG_REG_R1
116 static inline void reloc_abs32(void *code_ptr
, tcg_target_long target
)
118 *(uint32_t *) code_ptr
= target
;
121 static inline void reloc_pc24(void *code_ptr
, tcg_target_long target
)
123 uint32_t offset
= ((target
- ((tcg_target_long
) code_ptr
+ 8)) >> 2);
125 *(uint32_t *) code_ptr
= ((*(uint32_t *) code_ptr
) & ~0xffffff)
126 | (offset
& 0xffffff);
129 static void patch_reloc(uint8_t *code_ptr
, int type
,
130 tcg_target_long value
, tcg_target_long addend
)
134 reloc_abs32(code_ptr
, value
);
143 reloc_pc24(code_ptr
, value
);
148 /* maximum number of register used for input function arguments */
149 static inline int tcg_target_get_call_iarg_regs_count(int flags
)
154 /* parse target specific constraints */
155 static int target_parse_constraint(TCGArgConstraint
*ct
, const char **pct_str
)
162 ct
->ct
|= TCG_CT_CONST_ARM
;
166 ct
->ct
|= TCG_CT_REG
;
167 tcg_regset_set32(ct
->u
.regs
, 0, (1 << TCG_TARGET_NB_REGS
) - 1);
170 /* qemu_ld address */
172 ct
->ct
|= TCG_CT_REG
;
173 tcg_regset_set32(ct
->u
.regs
, 0, (1 << TCG_TARGET_NB_REGS
) - 1);
174 #ifdef CONFIG_SOFTMMU
175 /* r0 and r1 will be overwritten when reading the tlb entry,
176 so don't use these. */
177 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R0
);
178 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R1
);
182 ct
->ct
|= TCG_CT_REG
;
183 tcg_regset_set32(ct
->u
.regs
, 0, (1 << TCG_TARGET_NB_REGS
) - 1);
184 #ifdef CONFIG_SOFTMMU
185 /* r1 is still needed to load data_reg or data_reg2,
187 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R1
);
191 /* qemu_st address & data_reg */
193 ct
->ct
|= TCG_CT_REG
;
194 tcg_regset_set32(ct
->u
.regs
, 0, (1 << TCG_TARGET_NB_REGS
) - 1);
195 /* r0 and r1 will be overwritten when reading the tlb entry
196 (softmmu only) and doing the byte swapping, so don't
198 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R0
);
199 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R1
);
201 /* qemu_st64 data_reg2 */
203 ct
->ct
|= TCG_CT_REG
;
204 tcg_regset_set32(ct
->u
.regs
, 0, (1 << TCG_TARGET_NB_REGS
) - 1);
205 /* r0 and r1 will be overwritten when reading the tlb entry
206 (softmmu only) and doing the byte swapping, so don't
208 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R0
);
209 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R1
);
210 #ifdef CONFIG_SOFTMMU
211 /* r2 is still needed to load data_reg, so don't use it. */
212 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R2
);
225 static inline uint32_t rotl(uint32_t val
, int n
)
227 return (val
<< n
) | (val
>> (32 - n
));
230 /* ARM immediates for ALU instructions are made of an unsigned 8-bit
231 right-rotated by an even amount between 0 and 30. */
232 static inline int encode_imm(uint32_t imm
)
236 /* simple case, only lower bits */
237 if ((imm
& ~0xff) == 0)
239 /* then try a simple even shift */
240 shift
= ctz32(imm
) & ~1;
241 if (((imm
>> shift
) & ~0xff) == 0)
243 /* now try harder with rotations */
244 if ((rotl(imm
, 2) & ~0xff) == 0)
246 if ((rotl(imm
, 4) & ~0xff) == 0)
248 if ((rotl(imm
, 6) & ~0xff) == 0)
250 /* imm can't be encoded */
254 static inline int check_fit_imm(uint32_t imm
)
256 return encode_imm(imm
) >= 0;
259 /* Test if a constant matches the constraint.
260 * TODO: define constraints for:
262 * ldr/str offset: between -0xfff and 0xfff
263 * ldrh/strh offset: between -0xff and 0xff
264 * mov operand2: values represented with x << (2 * y), x < 0x100
265 * add, sub, eor...: ditto
267 static inline int tcg_target_const_match(tcg_target_long val
,
268 const TCGArgConstraint
*arg_ct
)
272 if (ct
& TCG_CT_CONST
)
274 else if ((ct
& TCG_CT_CONST_ARM
) && check_fit_imm(val
))
280 enum arm_data_opc_e
{
298 #define TO_CPSR(opc) \
299 ((opc == ARITH_CMP || opc == ARITH_CMN || opc == ARITH_TST) << 20)
301 #define SHIFT_IMM_LSL(im) (((im) << 7) | 0x00)
302 #define SHIFT_IMM_LSR(im) (((im) << 7) | 0x20)
303 #define SHIFT_IMM_ASR(im) (((im) << 7) | 0x40)
304 #define SHIFT_IMM_ROR(im) (((im) << 7) | 0x60)
305 #define SHIFT_REG_LSL(rs) (((rs) << 8) | 0x10)
306 #define SHIFT_REG_LSR(rs) (((rs) << 8) | 0x30)
307 #define SHIFT_REG_ASR(rs) (((rs) << 8) | 0x50)
308 #define SHIFT_REG_ROR(rs) (((rs) << 8) | 0x70)
310 enum arm_cond_code_e
{
313 COND_CS
= 0x2, /* Unsigned greater or equal */
314 COND_CC
= 0x3, /* Unsigned less than */
315 COND_MI
= 0x4, /* Negative */
316 COND_PL
= 0x5, /* Zero or greater */
317 COND_VS
= 0x6, /* Overflow */
318 COND_VC
= 0x7, /* No overflow */
319 COND_HI
= 0x8, /* Unsigned greater than */
320 COND_LS
= 0x9, /* Unsigned less or equal */
328 static const uint8_t tcg_cond_to_arm_cond
[10] = {
329 [TCG_COND_EQ
] = COND_EQ
,
330 [TCG_COND_NE
] = COND_NE
,
331 [TCG_COND_LT
] = COND_LT
,
332 [TCG_COND_GE
] = COND_GE
,
333 [TCG_COND_LE
] = COND_LE
,
334 [TCG_COND_GT
] = COND_GT
,
336 [TCG_COND_LTU
] = COND_CC
,
337 [TCG_COND_GEU
] = COND_CS
,
338 [TCG_COND_LEU
] = COND_LS
,
339 [TCG_COND_GTU
] = COND_HI
,
342 static inline void tcg_out_bx(TCGContext
*s
, int cond
, int rn
)
344 tcg_out32(s
, (cond
<< 28) | 0x012fff10 | rn
);
347 static inline void tcg_out_b(TCGContext
*s
, int cond
, int32_t offset
)
349 tcg_out32(s
, (cond
<< 28) | 0x0a000000 |
350 (((offset
- 8) >> 2) & 0x00ffffff));
353 static inline void tcg_out_b_noaddr(TCGContext
*s
, int cond
)
355 #ifdef HOST_WORDS_BIGENDIAN
356 tcg_out8(s
, (cond
<< 4) | 0x0a);
360 tcg_out8(s
, (cond
<< 4) | 0x0a);
364 static inline void tcg_out_bl(TCGContext
*s
, int cond
, int32_t offset
)
366 tcg_out32(s
, (cond
<< 28) | 0x0b000000 |
367 (((offset
- 8) >> 2) & 0x00ffffff));
370 static inline void tcg_out_blx(TCGContext
*s
, int cond
, int rn
)
372 tcg_out32(s
, (cond
<< 28) | 0x012fff30 | rn
);
375 static inline void tcg_out_dat_reg(TCGContext
*s
,
376 int cond
, int opc
, int rd
, int rn
, int rm
, int shift
)
378 tcg_out32(s
, (cond
<< 28) | (0 << 25) | (opc
<< 21) | TO_CPSR(opc
) |
379 (rn
<< 16) | (rd
<< 12) | shift
| rm
);
382 static inline void tcg_out_dat_reg2(TCGContext
*s
,
383 int cond
, int opc0
, int opc1
, int rd0
, int rd1
,
384 int rn0
, int rn1
, int rm0
, int rm1
, int shift
)
386 if (rd0
== rn1
|| rd0
== rm1
) {
387 tcg_out32(s
, (cond
<< 28) | (0 << 25) | (opc0
<< 21) | (1 << 20) |
388 (rn0
<< 16) | (8 << 12) | shift
| rm0
);
389 tcg_out32(s
, (cond
<< 28) | (0 << 25) | (opc1
<< 21) |
390 (rn1
<< 16) | (rd1
<< 12) | shift
| rm1
);
391 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
392 rd0
, 0, TCG_REG_R8
, SHIFT_IMM_LSL(0));
394 tcg_out32(s
, (cond
<< 28) | (0 << 25) | (opc0
<< 21) | (1 << 20) |
395 (rn0
<< 16) | (rd0
<< 12) | shift
| rm0
);
396 tcg_out32(s
, (cond
<< 28) | (0 << 25) | (opc1
<< 21) |
397 (rn1
<< 16) | (rd1
<< 12) | shift
| rm1
);
401 static inline void tcg_out_dat_imm(TCGContext
*s
,
402 int cond
, int opc
, int rd
, int rn
, int im
)
404 tcg_out32(s
, (cond
<< 28) | (1 << 25) | (opc
<< 21) | TO_CPSR(opc
) |
405 (rn
<< 16) | (rd
<< 12) | im
);
408 static inline void tcg_out_movi32(TCGContext
*s
,
409 int cond
, int rd
, int32_t arg
)
411 /* TODO: This is very suboptimal, we can easily have a constant
412 * pool somewhere after all the instructions. */
414 if (arg
< 0 && arg
> -0x100)
415 return tcg_out_dat_imm(s
, cond
, ARITH_MVN
, rd
, 0, (~arg
) & 0xff);
417 if (use_armv7_instructions
) {
420 tcg_out32(s
, (cond
<< 28) | 0x03000000 | (rd
<< 12)
421 | ((arg
<< 4) & 0x000f0000) | (arg
& 0xfff));
422 if (arg
& 0xffff0000)
424 tcg_out32(s
, (cond
<< 28) | 0x03400000 | (rd
<< 12)
425 | ((arg
>> 12) & 0x000f0000) | ((arg
>> 16) & 0xfff));
427 tcg_out_dat_imm(s
, cond
, ARITH_MOV
, rd
, 0, arg
& 0xff);
428 if (arg
& 0x0000ff00)
429 tcg_out_dat_imm(s
, cond
, ARITH_ORR
, rd
, rd
,
430 ((arg
>> 8) & 0xff) | 0xc00);
431 if (arg
& 0x00ff0000)
432 tcg_out_dat_imm(s
, cond
, ARITH_ORR
, rd
, rd
,
433 ((arg
>> 16) & 0xff) | 0x800);
434 if (arg
& 0xff000000)
435 tcg_out_dat_imm(s
, cond
, ARITH_ORR
, rd
, rd
,
436 ((arg
>> 24) & 0xff) | 0x400);
440 static inline void tcg_out_mul32(TCGContext
*s
,
441 int cond
, int rd
, int rs
, int rm
)
444 tcg_out32(s
, (cond
<< 28) | (rd
<< 16) | (0 << 12) |
445 (rs
<< 8) | 0x90 | rm
);
447 tcg_out32(s
, (cond
<< 28) | (rd
<< 16) | (0 << 12) |
448 (rm
<< 8) | 0x90 | rs
);
450 tcg_out32(s
, (cond
<< 28) | ( 8 << 16) | (0 << 12) |
451 (rs
<< 8) | 0x90 | rm
);
452 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
453 rd
, 0, TCG_REG_R8
, SHIFT_IMM_LSL(0));
457 static inline void tcg_out_umull32(TCGContext
*s
,
458 int cond
, int rd0
, int rd1
, int rs
, int rm
)
460 if (rd0
!= rm
&& rd1
!= rm
)
461 tcg_out32(s
, (cond
<< 28) | 0x800090 |
462 (rd1
<< 16) | (rd0
<< 12) | (rs
<< 8) | rm
);
463 else if (rd0
!= rs
&& rd1
!= rs
)
464 tcg_out32(s
, (cond
<< 28) | 0x800090 |
465 (rd1
<< 16) | (rd0
<< 12) | (rm
<< 8) | rs
);
467 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
468 TCG_REG_R8
, 0, rm
, SHIFT_IMM_LSL(0));
469 tcg_out32(s
, (cond
<< 28) | 0x800098 |
470 (rd1
<< 16) | (rd0
<< 12) | (rs
<< 8));
474 static inline void tcg_out_smull32(TCGContext
*s
,
475 int cond
, int rd0
, int rd1
, int rs
, int rm
)
477 if (rd0
!= rm
&& rd1
!= rm
)
478 tcg_out32(s
, (cond
<< 28) | 0xc00090 |
479 (rd1
<< 16) | (rd0
<< 12) | (rs
<< 8) | rm
);
480 else if (rd0
!= rs
&& rd1
!= rs
)
481 tcg_out32(s
, (cond
<< 28) | 0xc00090 |
482 (rd1
<< 16) | (rd0
<< 12) | (rm
<< 8) | rs
);
484 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
485 TCG_REG_R8
, 0, rm
, SHIFT_IMM_LSL(0));
486 tcg_out32(s
, (cond
<< 28) | 0xc00098 |
487 (rd1
<< 16) | (rd0
<< 12) | (rs
<< 8));
491 static inline void tcg_out_ext8s(TCGContext
*s
, int cond
,
494 if (use_armv6_instructions
) {
496 tcg_out32(s
, 0x06af0070 | (cond
<< 28) | (rd
<< 12) | rn
);
498 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
499 rd
, 0, rn
, SHIFT_IMM_LSL(24));
500 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
501 rd
, 0, rd
, SHIFT_IMM_ASR(24));
505 static inline void tcg_out_ext8u(TCGContext
*s
, int cond
,
508 tcg_out_dat_imm(s
, cond
, ARITH_AND
, rd
, rn
, 0xff);
511 static inline void tcg_out_ext16s(TCGContext
*s
, int cond
,
514 if (use_armv6_instructions
) {
516 tcg_out32(s
, 0x06bf0070 | (cond
<< 28) | (rd
<< 12) | rn
);
518 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
519 rd
, 0, rn
, SHIFT_IMM_LSL(16));
520 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
521 rd
, 0, rd
, SHIFT_IMM_ASR(16));
525 static inline void tcg_out_ext16u(TCGContext
*s
, int cond
,
528 if (use_armv6_instructions
) {
530 tcg_out32(s
, 0x06ff0070 | (cond
<< 28) | (rd
<< 12) | rn
);
532 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
533 rd
, 0, rn
, SHIFT_IMM_LSL(16));
534 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
535 rd
, 0, rd
, SHIFT_IMM_LSR(16));
539 static inline void tcg_out_bswap16s(TCGContext
*s
, int cond
, int rd
, int rn
)
541 if (use_armv6_instructions
) {
543 tcg_out32(s
, 0x06ff0fb0 | (cond
<< 28) | (rd
<< 12) | rn
);
545 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
546 TCG_REG_R8
, 0, rn
, SHIFT_IMM_LSL(24));
547 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
548 TCG_REG_R8
, 0, TCG_REG_R8
, SHIFT_IMM_ASR(16));
549 tcg_out_dat_reg(s
, cond
, ARITH_ORR
,
550 rd
, TCG_REG_R8
, rn
, SHIFT_IMM_LSR(8));
554 static inline void tcg_out_bswap16(TCGContext
*s
, int cond
, int rd
, int rn
)
556 if (use_armv6_instructions
) {
558 tcg_out32(s
, 0x06bf0fb0 | (cond
<< 28) | (rd
<< 12) | rn
);
560 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
561 TCG_REG_R8
, 0, rn
, SHIFT_IMM_LSL(24));
562 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
563 TCG_REG_R8
, 0, TCG_REG_R8
, SHIFT_IMM_LSR(16));
564 tcg_out_dat_reg(s
, cond
, ARITH_ORR
,
565 rd
, TCG_REG_R8
, rn
, SHIFT_IMM_LSR(8));
569 static inline void tcg_out_bswap32(TCGContext
*s
, int cond
, int rd
, int rn
)
571 if (use_armv6_instructions
) {
573 tcg_out32(s
, 0x06bf0f30 | (cond
<< 28) | (rd
<< 12) | rn
);
575 tcg_out_dat_reg(s
, cond
, ARITH_EOR
,
576 TCG_REG_R8
, rn
, rn
, SHIFT_IMM_ROR(16));
577 tcg_out_dat_imm(s
, cond
, ARITH_BIC
,
578 TCG_REG_R8
, TCG_REG_R8
, 0xff | 0x800);
579 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
580 rd
, 0, rn
, SHIFT_IMM_ROR(8));
581 tcg_out_dat_reg(s
, cond
, ARITH_EOR
,
582 rd
, rd
, TCG_REG_R8
, SHIFT_IMM_LSR(8));
586 static inline void tcg_out_ld32_12(TCGContext
*s
, int cond
,
587 int rd
, int rn
, tcg_target_long im
)
590 tcg_out32(s
, (cond
<< 28) | 0x05900000 |
591 (rn
<< 16) | (rd
<< 12) | (im
& 0xfff));
593 tcg_out32(s
, (cond
<< 28) | 0x05100000 |
594 (rn
<< 16) | (rd
<< 12) | ((-im
) & 0xfff));
597 static inline void tcg_out_st32_12(TCGContext
*s
, int cond
,
598 int rd
, int rn
, tcg_target_long im
)
601 tcg_out32(s
, (cond
<< 28) | 0x05800000 |
602 (rn
<< 16) | (rd
<< 12) | (im
& 0xfff));
604 tcg_out32(s
, (cond
<< 28) | 0x05000000 |
605 (rn
<< 16) | (rd
<< 12) | ((-im
) & 0xfff));
608 static inline void tcg_out_ld32_r(TCGContext
*s
, int cond
,
609 int rd
, int rn
, int rm
)
611 tcg_out32(s
, (cond
<< 28) | 0x07900000 |
612 (rn
<< 16) | (rd
<< 12) | rm
);
615 static inline void tcg_out_st32_r(TCGContext
*s
, int cond
,
616 int rd
, int rn
, int rm
)
618 tcg_out32(s
, (cond
<< 28) | 0x07800000 |
619 (rn
<< 16) | (rd
<< 12) | rm
);
622 /* Register pre-increment with base writeback. */
623 static inline void tcg_out_ld32_rwb(TCGContext
*s
, int cond
,
624 int rd
, int rn
, int rm
)
626 tcg_out32(s
, (cond
<< 28) | 0x07b00000 |
627 (rn
<< 16) | (rd
<< 12) | rm
);
630 static inline void tcg_out_st32_rwb(TCGContext
*s
, int cond
,
631 int rd
, int rn
, int rm
)
633 tcg_out32(s
, (cond
<< 28) | 0x07a00000 |
634 (rn
<< 16) | (rd
<< 12) | rm
);
637 static inline void tcg_out_ld16u_8(TCGContext
*s
, int cond
,
638 int rd
, int rn
, tcg_target_long im
)
641 tcg_out32(s
, (cond
<< 28) | 0x01d000b0 |
642 (rn
<< 16) | (rd
<< 12) |
643 ((im
& 0xf0) << 4) | (im
& 0xf));
645 tcg_out32(s
, (cond
<< 28) | 0x015000b0 |
646 (rn
<< 16) | (rd
<< 12) |
647 (((-im
) & 0xf0) << 4) | ((-im
) & 0xf));
650 static inline void tcg_out_st16_8(TCGContext
*s
, int cond
,
651 int rd
, int rn
, tcg_target_long im
)
654 tcg_out32(s
, (cond
<< 28) | 0x01c000b0 |
655 (rn
<< 16) | (rd
<< 12) |
656 ((im
& 0xf0) << 4) | (im
& 0xf));
658 tcg_out32(s
, (cond
<< 28) | 0x014000b0 |
659 (rn
<< 16) | (rd
<< 12) |
660 (((-im
) & 0xf0) << 4) | ((-im
) & 0xf));
663 static inline void tcg_out_ld16u_r(TCGContext
*s
, int cond
,
664 int rd
, int rn
, int rm
)
666 tcg_out32(s
, (cond
<< 28) | 0x019000b0 |
667 (rn
<< 16) | (rd
<< 12) | rm
);
670 static inline void tcg_out_st16_r(TCGContext
*s
, int cond
,
671 int rd
, int rn
, int rm
)
673 tcg_out32(s
, (cond
<< 28) | 0x018000b0 |
674 (rn
<< 16) | (rd
<< 12) | rm
);
677 static inline void tcg_out_ld16s_8(TCGContext
*s
, int cond
,
678 int rd
, int rn
, tcg_target_long im
)
681 tcg_out32(s
, (cond
<< 28) | 0x01d000f0 |
682 (rn
<< 16) | (rd
<< 12) |
683 ((im
& 0xf0) << 4) | (im
& 0xf));
685 tcg_out32(s
, (cond
<< 28) | 0x015000f0 |
686 (rn
<< 16) | (rd
<< 12) |
687 (((-im
) & 0xf0) << 4) | ((-im
) & 0xf));
690 static inline void tcg_out_ld16s_r(TCGContext
*s
, int cond
,
691 int rd
, int rn
, int rm
)
693 tcg_out32(s
, (cond
<< 28) | 0x019000f0 |
694 (rn
<< 16) | (rd
<< 12) | rm
);
697 static inline void tcg_out_ld8_12(TCGContext
*s
, int cond
,
698 int rd
, int rn
, tcg_target_long im
)
701 tcg_out32(s
, (cond
<< 28) | 0x05d00000 |
702 (rn
<< 16) | (rd
<< 12) | (im
& 0xfff));
704 tcg_out32(s
, (cond
<< 28) | 0x05500000 |
705 (rn
<< 16) | (rd
<< 12) | ((-im
) & 0xfff));
708 static inline void tcg_out_st8_12(TCGContext
*s
, int cond
,
709 int rd
, int rn
, tcg_target_long im
)
712 tcg_out32(s
, (cond
<< 28) | 0x05c00000 |
713 (rn
<< 16) | (rd
<< 12) | (im
& 0xfff));
715 tcg_out32(s
, (cond
<< 28) | 0x05400000 |
716 (rn
<< 16) | (rd
<< 12) | ((-im
) & 0xfff));
719 static inline void tcg_out_ld8_r(TCGContext
*s
, int cond
,
720 int rd
, int rn
, int rm
)
722 tcg_out32(s
, (cond
<< 28) | 0x07d00000 |
723 (rn
<< 16) | (rd
<< 12) | rm
);
726 static inline void tcg_out_st8_r(TCGContext
*s
, int cond
,
727 int rd
, int rn
, int rm
)
729 tcg_out32(s
, (cond
<< 28) | 0x07c00000 |
730 (rn
<< 16) | (rd
<< 12) | rm
);
733 static inline void tcg_out_ld8s_8(TCGContext
*s
, int cond
,
734 int rd
, int rn
, tcg_target_long im
)
737 tcg_out32(s
, (cond
<< 28) | 0x01d000d0 |
738 (rn
<< 16) | (rd
<< 12) |
739 ((im
& 0xf0) << 4) | (im
& 0xf));
741 tcg_out32(s
, (cond
<< 28) | 0x015000d0 |
742 (rn
<< 16) | (rd
<< 12) |
743 (((-im
) & 0xf0) << 4) | ((-im
) & 0xf));
746 static inline void tcg_out_ld8s_r(TCGContext
*s
, int cond
,
747 int rd
, int rn
, int rm
)
749 tcg_out32(s
, (cond
<< 28) | 0x019000d0 |
750 (rn
<< 16) | (rd
<< 12) | rm
);
753 static inline void tcg_out_ld32u(TCGContext
*s
, int cond
,
754 int rd
, int rn
, int32_t offset
)
756 if (offset
> 0xfff || offset
< -0xfff) {
757 tcg_out_movi32(s
, cond
, TCG_REG_R8
, offset
);
758 tcg_out_ld32_r(s
, cond
, rd
, rn
, TCG_REG_R8
);
760 tcg_out_ld32_12(s
, cond
, rd
, rn
, offset
);
763 static inline void tcg_out_st32(TCGContext
*s
, int cond
,
764 int rd
, int rn
, int32_t offset
)
766 if (offset
> 0xfff || offset
< -0xfff) {
767 tcg_out_movi32(s
, cond
, TCG_REG_R8
, offset
);
768 tcg_out_st32_r(s
, cond
, rd
, rn
, TCG_REG_R8
);
770 tcg_out_st32_12(s
, cond
, rd
, rn
, offset
);
773 static inline void tcg_out_ld16u(TCGContext
*s
, int cond
,
774 int rd
, int rn
, int32_t offset
)
776 if (offset
> 0xff || offset
< -0xff) {
777 tcg_out_movi32(s
, cond
, TCG_REG_R8
, offset
);
778 tcg_out_ld16u_r(s
, cond
, rd
, rn
, TCG_REG_R8
);
780 tcg_out_ld16u_8(s
, cond
, rd
, rn
, offset
);
783 static inline void tcg_out_ld16s(TCGContext
*s
, int cond
,
784 int rd
, int rn
, int32_t offset
)
786 if (offset
> 0xff || offset
< -0xff) {
787 tcg_out_movi32(s
, cond
, TCG_REG_R8
, offset
);
788 tcg_out_ld16s_r(s
, cond
, rd
, rn
, TCG_REG_R8
);
790 tcg_out_ld16s_8(s
, cond
, rd
, rn
, offset
);
793 static inline void tcg_out_st16(TCGContext
*s
, int cond
,
794 int rd
, int rn
, int32_t offset
)
796 if (offset
> 0xff || offset
< -0xff) {
797 tcg_out_movi32(s
, cond
, TCG_REG_R8
, offset
);
798 tcg_out_st16_r(s
, cond
, rd
, rn
, TCG_REG_R8
);
800 tcg_out_st16_8(s
, cond
, rd
, rn
, offset
);
803 static inline void tcg_out_ld8u(TCGContext
*s
, int cond
,
804 int rd
, int rn
, int32_t offset
)
806 if (offset
> 0xfff || offset
< -0xfff) {
807 tcg_out_movi32(s
, cond
, TCG_REG_R8
, offset
);
808 tcg_out_ld8_r(s
, cond
, rd
, rn
, TCG_REG_R8
);
810 tcg_out_ld8_12(s
, cond
, rd
, rn
, offset
);
813 static inline void tcg_out_ld8s(TCGContext
*s
, int cond
,
814 int rd
, int rn
, int32_t offset
)
816 if (offset
> 0xff || offset
< -0xff) {
817 tcg_out_movi32(s
, cond
, TCG_REG_R8
, offset
);
818 tcg_out_ld8s_r(s
, cond
, rd
, rn
, TCG_REG_R8
);
820 tcg_out_ld8s_8(s
, cond
, rd
, rn
, offset
);
823 static inline void tcg_out_st8(TCGContext
*s
, int cond
,
824 int rd
, int rn
, int32_t offset
)
826 if (offset
> 0xfff || offset
< -0xfff) {
827 tcg_out_movi32(s
, cond
, TCG_REG_R8
, offset
);
828 tcg_out_st8_r(s
, cond
, rd
, rn
, TCG_REG_R8
);
830 tcg_out_st8_12(s
, cond
, rd
, rn
, offset
);
833 static inline void tcg_out_goto(TCGContext
*s
, int cond
, uint32_t addr
)
837 val
= addr
- (tcg_target_long
) s
->code_ptr
;
838 if (val
- 8 < 0x01fffffd && val
- 8 > -0x01fffffd)
839 tcg_out_b(s
, cond
, val
);
844 if (cond
== COND_AL
) {
845 tcg_out_ld32_12(s
, COND_AL
, TCG_REG_PC
, TCG_REG_PC
, -4);
846 tcg_out32(s
, addr
); /* XXX: This is l->u.value, can we use it? */
848 tcg_out_movi32(s
, cond
, TCG_REG_R8
, val
- 8);
849 tcg_out_dat_reg(s
, cond
, ARITH_ADD
,
850 TCG_REG_PC
, TCG_REG_PC
,
851 TCG_REG_R8
, SHIFT_IMM_LSL(0));
857 static inline void tcg_out_call(TCGContext
*s
, int cond
, uint32_t addr
)
861 val
= addr
- (tcg_target_long
) s
->code_ptr
;
862 if (val
< 0x01fffffd && val
> -0x01fffffd)
863 tcg_out_bl(s
, cond
, val
);
868 if (cond
== COND_AL
) {
869 tcg_out_dat_imm(s
, cond
, ARITH_ADD
, TCG_REG_R14
, TCG_REG_PC
, 4);
870 tcg_out_ld32_12(s
, COND_AL
, TCG_REG_PC
, TCG_REG_PC
, -4);
871 tcg_out32(s
, addr
); /* XXX: This is l->u.value, can we use it? */
873 tcg_out_movi32(s
, cond
, TCG_REG_R9
, addr
);
874 tcg_out_dat_reg(s
, cond
, ARITH_MOV
, TCG_REG_R14
, 0,
875 TCG_REG_PC
, SHIFT_IMM_LSL(0));
876 tcg_out_bx(s
, cond
, TCG_REG_R9
);
882 static inline void tcg_out_callr(TCGContext
*s
, int cond
, int arg
)
884 if (use_armv5_instructions
) {
885 tcg_out_blx(s
, cond
, arg
);
887 tcg_out_dat_reg(s
, cond
, ARITH_MOV
, TCG_REG_R14
, 0,
888 TCG_REG_PC
, SHIFT_IMM_LSL(0));
889 tcg_out_bx(s
, cond
, arg
);
893 static inline void tcg_out_goto_label(TCGContext
*s
, int cond
, int label_index
)
895 TCGLabel
*l
= &s
->labels
[label_index
];
898 tcg_out_goto(s
, cond
, l
->u
.value
);
899 else if (cond
== COND_AL
) {
900 tcg_out_ld32_12(s
, COND_AL
, TCG_REG_PC
, TCG_REG_PC
, -4);
901 tcg_out_reloc(s
, s
->code_ptr
, R_ARM_ABS32
, label_index
, 31337);
904 /* Probably this should be preferred even for COND_AL... */
905 tcg_out_reloc(s
, s
->code_ptr
, R_ARM_PC24
, label_index
, 31337);
906 tcg_out_b_noaddr(s
, cond
);
910 #ifdef CONFIG_SOFTMMU
912 #include "../../softmmu_defs.h"
914 static void *qemu_ld_helpers
[4] = {
921 static void *qemu_st_helpers
[4] = {
929 #define TLB_SHIFT (CPU_TLB_ENTRY_BITS + CPU_TLB_BITS)
931 static inline void tcg_out_qemu_ld(TCGContext
*s
, const TCGArg
*args
, int opc
)
933 int addr_reg
, data_reg
, data_reg2
, bswap
;
934 #ifdef CONFIG_SOFTMMU
935 int mem_index
, s_bits
;
936 # if TARGET_LONG_BITS == 64
942 #ifdef TARGET_WORDS_BIGENDIAN
951 data_reg2
= 0; /* suppress warning */
953 #ifdef CONFIG_SOFTMMU
954 # if TARGET_LONG_BITS == 64
960 /* Should generate something like the following:
961 * shr r8, addr_reg, #TARGET_PAGE_BITS
962 * and r0, r8, #(CPU_TLB_SIZE - 1) @ Assumption: CPU_TLB_BITS <= 8
963 * add r0, env, r0 lsl #CPU_TLB_ENTRY_BITS
965 # if CPU_TLB_BITS > 8
968 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
, TCG_REG_R8
,
969 0, addr_reg
, SHIFT_IMM_LSR(TARGET_PAGE_BITS
));
970 tcg_out_dat_imm(s
, COND_AL
, ARITH_AND
,
971 TCG_REG_R0
, TCG_REG_R8
, CPU_TLB_SIZE
- 1);
972 tcg_out_dat_reg(s
, COND_AL
, ARITH_ADD
, TCG_REG_R0
, TCG_AREG0
,
973 TCG_REG_R0
, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS
));
975 * ldr r1 [r0, #(offsetof(CPUState, tlb_table[mem_index][0].addr_read))]
976 * below, the offset is likely to exceed 12 bits if mem_index != 0 and
977 * not exceed otherwise, so use an
978 * add r0, r0, #(mem_index * sizeof *CPUState.tlb_table)
982 tcg_out_dat_imm(s
, COND_AL
, ARITH_ADD
, TCG_REG_R0
, TCG_REG_R0
,
983 (mem_index
<< (TLB_SHIFT
& 1)) |
984 ((16 - (TLB_SHIFT
>> 1)) << 8));
985 tcg_out_ld32_12(s
, COND_AL
, TCG_REG_R1
, TCG_REG_R0
,
986 offsetof(CPUState
, tlb_table
[0][0].addr_read
));
987 tcg_out_dat_reg(s
, COND_AL
, ARITH_CMP
, 0, TCG_REG_R1
,
988 TCG_REG_R8
, SHIFT_IMM_LSL(TARGET_PAGE_BITS
));
989 /* Check alignment. */
991 tcg_out_dat_imm(s
, COND_EQ
, ARITH_TST
,
992 0, addr_reg
, (1 << s_bits
) - 1);
993 # if TARGET_LONG_BITS == 64
994 /* XXX: possibly we could use a block data load or writeback in
995 * the first access. */
996 tcg_out_ld32_12(s
, COND_EQ
, TCG_REG_R1
, TCG_REG_R0
,
997 offsetof(CPUState
, tlb_table
[0][0].addr_read
) + 4);
998 tcg_out_dat_reg(s
, COND_EQ
, ARITH_CMP
, 0,
999 TCG_REG_R1
, addr_reg2
, SHIFT_IMM_LSL(0));
1001 tcg_out_ld32_12(s
, COND_EQ
, TCG_REG_R1
, TCG_REG_R0
,
1002 offsetof(CPUState
, tlb_table
[0][0].addend
));
1006 tcg_out_ld8_r(s
, COND_EQ
, data_reg
, addr_reg
, TCG_REG_R1
);
1009 tcg_out_ld8s_r(s
, COND_EQ
, data_reg
, addr_reg
, TCG_REG_R1
);
1012 tcg_out_ld16u_r(s
, COND_EQ
, data_reg
, addr_reg
, TCG_REG_R1
);
1014 tcg_out_bswap16(s
, COND_EQ
, data_reg
, data_reg
);
1019 tcg_out_ld16u_r(s
, COND_EQ
, data_reg
, addr_reg
, TCG_REG_R1
);
1020 tcg_out_bswap16s(s
, COND_EQ
, data_reg
, data_reg
);
1022 tcg_out_ld16s_r(s
, COND_EQ
, data_reg
, addr_reg
, TCG_REG_R1
);
1027 tcg_out_ld32_r(s
, COND_EQ
, data_reg
, addr_reg
, TCG_REG_R1
);
1029 tcg_out_bswap32(s
, COND_EQ
, data_reg
, data_reg
);
1034 tcg_out_ld32_rwb(s
, COND_EQ
, data_reg2
, TCG_REG_R1
, addr_reg
);
1035 tcg_out_ld32_12(s
, COND_EQ
, data_reg
, TCG_REG_R1
, 4);
1036 tcg_out_bswap32(s
, COND_EQ
, data_reg2
, data_reg2
);
1037 tcg_out_bswap32(s
, COND_EQ
, data_reg
, data_reg
);
1039 tcg_out_ld32_rwb(s
, COND_EQ
, data_reg
, TCG_REG_R1
, addr_reg
);
1040 tcg_out_ld32_12(s
, COND_EQ
, data_reg2
, TCG_REG_R1
, 4);
1045 label_ptr
= (void *) s
->code_ptr
;
1046 tcg_out_b_noaddr(s
, COND_EQ
);
1048 /* TODO: move this code to where the constants pool will be */
1049 if (addr_reg
!= TCG_REG_R0
) {
1050 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
,
1051 TCG_REG_R0
, 0, addr_reg
, SHIFT_IMM_LSL(0));
1053 # if TARGET_LONG_BITS == 32
1054 tcg_out_dat_imm(s
, COND_AL
, ARITH_MOV
, TCG_REG_R1
, 0, mem_index
);
1056 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
,
1057 TCG_REG_R1
, 0, addr_reg2
, SHIFT_IMM_LSL(0));
1058 tcg_out_dat_imm(s
, COND_AL
, ARITH_MOV
, TCG_REG_R2
, 0, mem_index
);
1060 tcg_out_bl(s
, COND_AL
, (tcg_target_long
) qemu_ld_helpers
[s_bits
] -
1061 (tcg_target_long
) s
->code_ptr
);
1065 tcg_out_ext8s(s
, COND_AL
, data_reg
, TCG_REG_R0
);
1068 tcg_out_ext16s(s
, COND_AL
, data_reg
, TCG_REG_R0
);
1074 if (data_reg
!= TCG_REG_R0
) {
1075 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
,
1076 data_reg
, 0, TCG_REG_R0
, SHIFT_IMM_LSL(0));
1080 if (data_reg
!= TCG_REG_R0
) {
1081 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
,
1082 data_reg
, 0, TCG_REG_R0
, SHIFT_IMM_LSL(0));
1084 if (data_reg2
!= TCG_REG_R1
) {
1085 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
,
1086 data_reg2
, 0, TCG_REG_R1
, SHIFT_IMM_LSL(0));
1091 reloc_pc24(label_ptr
, (tcg_target_long
)s
->code_ptr
);
1092 #else /* !CONFIG_SOFTMMU */
1094 uint32_t offset
= GUEST_BASE
;
1099 i
= ctz32(offset
) & ~1;
1100 rot
= ((32 - i
) << 7) & 0xf00;
1102 tcg_out_dat_imm(s
, COND_AL
, ARITH_ADD
, TCG_REG_R8
, addr_reg
,
1103 ((offset
>> i
) & 0xff) | rot
);
1104 addr_reg
= TCG_REG_R8
;
1105 offset
&= ~(0xff << i
);
1110 tcg_out_ld8_12(s
, COND_AL
, data_reg
, addr_reg
, 0);
1113 tcg_out_ld8s_8(s
, COND_AL
, data_reg
, addr_reg
, 0);
1116 tcg_out_ld16u_8(s
, COND_AL
, data_reg
, addr_reg
, 0);
1118 tcg_out_bswap16(s
, COND_AL
, data_reg
, data_reg
);
1123 tcg_out_ld16u_8(s
, COND_AL
, data_reg
, addr_reg
, 0);
1124 tcg_out_bswap16s(s
, COND_AL
, data_reg
, data_reg
);
1126 tcg_out_ld16s_8(s
, COND_AL
, data_reg
, addr_reg
, 0);
1131 tcg_out_ld32_12(s
, COND_AL
, data_reg
, addr_reg
, 0);
1133 tcg_out_bswap32(s
, COND_AL
, data_reg
, data_reg
);
1137 /* TODO: use block load -
1138 * check that data_reg2 > data_reg or the other way */
1139 if (data_reg
== addr_reg
) {
1140 tcg_out_ld32_12(s
, COND_AL
, data_reg2
, addr_reg
, bswap
? 0 : 4);
1141 tcg_out_ld32_12(s
, COND_AL
, data_reg
, addr_reg
, bswap
? 4 : 0);
1143 tcg_out_ld32_12(s
, COND_AL
, data_reg
, addr_reg
, bswap
? 4 : 0);
1144 tcg_out_ld32_12(s
, COND_AL
, data_reg2
, addr_reg
, bswap
? 0 : 4);
1147 tcg_out_bswap32(s
, COND_AL
, data_reg
, data_reg
);
1148 tcg_out_bswap32(s
, COND_AL
, data_reg2
, data_reg2
);
1155 static inline void tcg_out_qemu_st(TCGContext
*s
, const TCGArg
*args
, int opc
)
1157 int addr_reg
, data_reg
, data_reg2
, bswap
;
1158 #ifdef CONFIG_SOFTMMU
1159 int mem_index
, s_bits
;
1160 # if TARGET_LONG_BITS == 64
1163 uint32_t *label_ptr
;
1166 #ifdef TARGET_WORDS_BIGENDIAN
1173 data_reg2
= *args
++;
1175 data_reg2
= 0; /* suppress warning */
1177 #ifdef CONFIG_SOFTMMU
1178 # if TARGET_LONG_BITS == 64
1179 addr_reg2
= *args
++;
1184 /* Should generate something like the following:
1185 * shr r8, addr_reg, #TARGET_PAGE_BITS
1186 * and r0, r8, #(CPU_TLB_SIZE - 1) @ Assumption: CPU_TLB_BITS <= 8
1187 * add r0, env, r0 lsl #CPU_TLB_ENTRY_BITS
1189 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
,
1190 TCG_REG_R8
, 0, addr_reg
, SHIFT_IMM_LSR(TARGET_PAGE_BITS
));
1191 tcg_out_dat_imm(s
, COND_AL
, ARITH_AND
,
1192 TCG_REG_R0
, TCG_REG_R8
, CPU_TLB_SIZE
- 1);
1193 tcg_out_dat_reg(s
, COND_AL
, ARITH_ADD
, TCG_REG_R0
,
1194 TCG_AREG0
, TCG_REG_R0
, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS
));
1196 * ldr r1 [r0, #(offsetof(CPUState, tlb_table[mem_index][0].addr_write))]
1197 * below, the offset is likely to exceed 12 bits if mem_index != 0 and
1198 * not exceed otherwise, so use an
1199 * add r0, r0, #(mem_index * sizeof *CPUState.tlb_table)
1203 tcg_out_dat_imm(s
, COND_AL
, ARITH_ADD
, TCG_REG_R0
, TCG_REG_R0
,
1204 (mem_index
<< (TLB_SHIFT
& 1)) |
1205 ((16 - (TLB_SHIFT
>> 1)) << 8));
1206 tcg_out_ld32_12(s
, COND_AL
, TCG_REG_R1
, TCG_REG_R0
,
1207 offsetof(CPUState
, tlb_table
[0][0].addr_write
));
1208 tcg_out_dat_reg(s
, COND_AL
, ARITH_CMP
, 0, TCG_REG_R1
,
1209 TCG_REG_R8
, SHIFT_IMM_LSL(TARGET_PAGE_BITS
));
1210 /* Check alignment. */
1212 tcg_out_dat_imm(s
, COND_EQ
, ARITH_TST
,
1213 0, addr_reg
, (1 << s_bits
) - 1);
1214 # if TARGET_LONG_BITS == 64
1215 /* XXX: possibly we could use a block data load or writeback in
1216 * the first access. */
1217 tcg_out_ld32_12(s
, COND_EQ
, TCG_REG_R1
, TCG_REG_R0
,
1218 offsetof(CPUState
, tlb_table
[0][0].addr_write
) + 4);
1219 tcg_out_dat_reg(s
, COND_EQ
, ARITH_CMP
, 0,
1220 TCG_REG_R1
, addr_reg2
, SHIFT_IMM_LSL(0));
1222 tcg_out_ld32_12(s
, COND_EQ
, TCG_REG_R1
, TCG_REG_R0
,
1223 offsetof(CPUState
, tlb_table
[0][0].addend
));
1227 tcg_out_st8_r(s
, COND_EQ
, data_reg
, addr_reg
, TCG_REG_R1
);
1231 tcg_out_bswap16(s
, COND_EQ
, TCG_REG_R0
, data_reg
);
1232 tcg_out_st16_r(s
, COND_EQ
, TCG_REG_R0
, addr_reg
, TCG_REG_R1
);
1234 tcg_out_st16_r(s
, COND_EQ
, data_reg
, addr_reg
, TCG_REG_R1
);
1240 tcg_out_bswap32(s
, COND_EQ
, TCG_REG_R0
, data_reg
);
1241 tcg_out_st32_r(s
, COND_EQ
, TCG_REG_R0
, addr_reg
, TCG_REG_R1
);
1243 tcg_out_st32_r(s
, COND_EQ
, data_reg
, addr_reg
, TCG_REG_R1
);
1248 tcg_out_bswap32(s
, COND_EQ
, TCG_REG_R0
, data_reg2
);
1249 tcg_out_st32_rwb(s
, COND_EQ
, TCG_REG_R0
, TCG_REG_R1
, addr_reg
);
1250 tcg_out_bswap32(s
, COND_EQ
, TCG_REG_R0
, data_reg
);
1251 tcg_out_st32_12(s
, COND_EQ
, data_reg
, TCG_REG_R1
, 4);
1253 tcg_out_st32_rwb(s
, COND_EQ
, data_reg
, TCG_REG_R1
, addr_reg
);
1254 tcg_out_st32_12(s
, COND_EQ
, data_reg2
, TCG_REG_R1
, 4);
1259 label_ptr
= (void *) s
->code_ptr
;
1260 tcg_out_b_noaddr(s
, COND_EQ
);
1262 /* TODO: move this code to where the constants pool will be */
1263 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
,
1264 TCG_REG_R0
, 0, addr_reg
, SHIFT_IMM_LSL(0));
1265 # if TARGET_LONG_BITS == 32
1268 tcg_out_ext8u(s
, COND_AL
, TCG_REG_R1
, data_reg
);
1269 tcg_out_dat_imm(s
, COND_AL
, ARITH_MOV
, TCG_REG_R2
, 0, mem_index
);
1272 tcg_out_ext16u(s
, COND_AL
, TCG_REG_R1
, data_reg
);
1273 tcg_out_dat_imm(s
, COND_AL
, ARITH_MOV
, TCG_REG_R2
, 0, mem_index
);
1276 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
,
1277 TCG_REG_R1
, 0, data_reg
, SHIFT_IMM_LSL(0));
1278 tcg_out_dat_imm(s
, COND_AL
, ARITH_MOV
, TCG_REG_R2
, 0, mem_index
);
1281 tcg_out_dat_imm(s
, COND_AL
, ARITH_MOV
, TCG_REG_R8
, 0, mem_index
);
1282 tcg_out32(s
, (COND_AL
<< 28) | 0x052d8010); /* str r8, [sp, #-0x10]! */
1283 if (data_reg
!= TCG_REG_R2
) {
1284 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
,
1285 TCG_REG_R2
, 0, data_reg
, SHIFT_IMM_LSL(0));
1287 if (data_reg2
!= TCG_REG_R3
) {
1288 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
,
1289 TCG_REG_R3
, 0, data_reg2
, SHIFT_IMM_LSL(0));
1294 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
,
1295 TCG_REG_R1
, 0, addr_reg2
, SHIFT_IMM_LSL(0));
1298 tcg_out_ext8u(s
, COND_AL
, TCG_REG_R2
, data_reg
);
1299 tcg_out_dat_imm(s
, COND_AL
, ARITH_MOV
, TCG_REG_R3
, 0, mem_index
);
1302 tcg_out_ext16u(s
, COND_AL
, TCG_REG_R2
, data_reg
);
1303 tcg_out_dat_imm(s
, COND_AL
, ARITH_MOV
, TCG_REG_R3
, 0, mem_index
);
1306 if (data_reg
!= TCG_REG_R2
) {
1307 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
,
1308 TCG_REG_R2
, 0, data_reg
, SHIFT_IMM_LSL(0));
1310 tcg_out_dat_imm(s
, COND_AL
, ARITH_MOV
, TCG_REG_R3
, 0, mem_index
);
1313 tcg_out_dat_imm(s
, COND_AL
, ARITH_MOV
, TCG_REG_R8
, 0, mem_index
);
1314 tcg_out32(s
, (COND_AL
<< 28) | 0x052d8010); /* str r8, [sp, #-0x10]! */
1315 if (data_reg
!= TCG_REG_R2
) {
1316 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
,
1317 TCG_REG_R2
, 0, data_reg
, SHIFT_IMM_LSL(0));
1319 if (data_reg2
!= TCG_REG_R3
) {
1320 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
,
1321 TCG_REG_R3
, 0, data_reg2
, SHIFT_IMM_LSL(0));
1327 tcg_out_bl(s
, COND_AL
, (tcg_target_long
) qemu_st_helpers
[s_bits
] -
1328 (tcg_target_long
) s
->code_ptr
);
1330 tcg_out_dat_imm(s
, COND_AL
, ARITH_ADD
, TCG_REG_R13
, TCG_REG_R13
, 0x10);
1332 reloc_pc24(label_ptr
, (tcg_target_long
)s
->code_ptr
);
1333 #else /* !CONFIG_SOFTMMU */
1335 uint32_t offset
= GUEST_BASE
;
1340 i
= ctz32(offset
) & ~1;
1341 rot
= ((32 - i
) << 7) & 0xf00;
1343 tcg_out_dat_imm(s
, COND_AL
, ARITH_ADD
, TCG_REG_R1
, addr_reg
,
1344 ((offset
>> i
) & 0xff) | rot
);
1345 addr_reg
= TCG_REG_R1
;
1346 offset
&= ~(0xff << i
);
1351 tcg_out_st8_12(s
, COND_AL
, data_reg
, addr_reg
, 0);
1355 tcg_out_bswap16(s
, COND_AL
, TCG_REG_R0
, data_reg
);
1356 tcg_out_st16_8(s
, COND_AL
, TCG_REG_R0
, addr_reg
, 0);
1358 tcg_out_st16_8(s
, COND_AL
, data_reg
, addr_reg
, 0);
1364 tcg_out_bswap32(s
, COND_AL
, TCG_REG_R0
, data_reg
);
1365 tcg_out_st32_12(s
, COND_AL
, TCG_REG_R0
, addr_reg
, 0);
1367 tcg_out_st32_12(s
, COND_AL
, data_reg
, addr_reg
, 0);
1371 /* TODO: use block store -
1372 * check that data_reg2 > data_reg or the other way */
1374 tcg_out_bswap32(s
, COND_AL
, TCG_REG_R0
, data_reg2
);
1375 tcg_out_st32_12(s
, COND_AL
, TCG_REG_R0
, addr_reg
, 0);
1376 tcg_out_bswap32(s
, COND_AL
, TCG_REG_R0
, data_reg
);
1377 tcg_out_st32_12(s
, COND_AL
, TCG_REG_R0
, addr_reg
, 4);
1379 tcg_out_st32_12(s
, COND_AL
, data_reg
, addr_reg
, 0);
1380 tcg_out_st32_12(s
, COND_AL
, data_reg2
, addr_reg
, 4);
1387 static uint8_t *tb_ret_addr
;
1389 static inline void tcg_out_op(TCGContext
*s
, TCGOpcode opc
,
1390 const TCGArg
*args
, const int *const_args
)
1395 case INDEX_op_exit_tb
:
1397 uint8_t *ld_ptr
= s
->code_ptr
;
1399 tcg_out_ld32_12(s
, COND_AL
, TCG_REG_R0
, TCG_REG_PC
, 0);
1401 tcg_out_dat_imm(s
, COND_AL
, ARITH_MOV
, TCG_REG_R0
, 0, args
[0]);
1402 tcg_out_goto(s
, COND_AL
, (tcg_target_ulong
) tb_ret_addr
);
1404 *ld_ptr
= (uint8_t) (s
->code_ptr
- ld_ptr
) - 8;
1405 tcg_out32(s
, args
[0]);
1409 case INDEX_op_goto_tb
:
1410 if (s
->tb_jmp_offset
) {
1411 /* Direct jump method */
1412 #if defined(USE_DIRECT_JUMP)
1413 s
->tb_jmp_offset
[args
[0]] = s
->code_ptr
- s
->code_buf
;
1414 tcg_out_b_noaddr(s
, COND_AL
);
1416 tcg_out_ld32_12(s
, COND_AL
, TCG_REG_PC
, TCG_REG_PC
, -4);
1417 s
->tb_jmp_offset
[args
[0]] = s
->code_ptr
- s
->code_buf
;
1421 /* Indirect jump method */
1423 c
= (int) (s
->tb_next
+ args
[0]) - ((int) s
->code_ptr
+ 8);
1424 if (c
> 0xfff || c
< -0xfff) {
1425 tcg_out_movi32(s
, COND_AL
, TCG_REG_R0
,
1426 (tcg_target_long
) (s
->tb_next
+ args
[0]));
1427 tcg_out_ld32_12(s
, COND_AL
, TCG_REG_PC
, TCG_REG_R0
, 0);
1429 tcg_out_ld32_12(s
, COND_AL
, TCG_REG_PC
, TCG_REG_PC
, c
);
1431 tcg_out_ld32_12(s
, COND_AL
, TCG_REG_R0
, TCG_REG_PC
, 0);
1432 tcg_out_ld32_12(s
, COND_AL
, TCG_REG_PC
, TCG_REG_R0
, 0);
1433 tcg_out32(s
, (tcg_target_long
) (s
->tb_next
+ args
[0]));
1436 s
->tb_next_offset
[args
[0]] = s
->code_ptr
- s
->code_buf
;
1440 tcg_out_call(s
, COND_AL
, args
[0]);
1442 tcg_out_callr(s
, COND_AL
, args
[0]);
1446 tcg_out_goto(s
, COND_AL
, args
[0]);
1448 tcg_out_bx(s
, COND_AL
, args
[0]);
1451 tcg_out_goto_label(s
, COND_AL
, args
[0]);
1454 case INDEX_op_ld8u_i32
:
1455 tcg_out_ld8u(s
, COND_AL
, args
[0], args
[1], args
[2]);
1457 case INDEX_op_ld8s_i32
:
1458 tcg_out_ld8s(s
, COND_AL
, args
[0], args
[1], args
[2]);
1460 case INDEX_op_ld16u_i32
:
1461 tcg_out_ld16u(s
, COND_AL
, args
[0], args
[1], args
[2]);
1463 case INDEX_op_ld16s_i32
:
1464 tcg_out_ld16s(s
, COND_AL
, args
[0], args
[1], args
[2]);
1466 case INDEX_op_ld_i32
:
1467 tcg_out_ld32u(s
, COND_AL
, args
[0], args
[1], args
[2]);
1469 case INDEX_op_st8_i32
:
1470 tcg_out_st8(s
, COND_AL
, args
[0], args
[1], args
[2]);
1472 case INDEX_op_st16_i32
:
1473 tcg_out_st16(s
, COND_AL
, args
[0], args
[1], args
[2]);
1475 case INDEX_op_st_i32
:
1476 tcg_out_st32(s
, COND_AL
, args
[0], args
[1], args
[2]);
1479 case INDEX_op_mov_i32
:
1480 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
,
1481 args
[0], 0, args
[1], SHIFT_IMM_LSL(0));
1483 case INDEX_op_movi_i32
:
1484 tcg_out_movi32(s
, COND_AL
, args
[0], args
[1]);
1486 case INDEX_op_add_i32
:
1489 case INDEX_op_sub_i32
:
1492 case INDEX_op_and_i32
:
1495 case INDEX_op_andc_i32
:
1498 case INDEX_op_or_i32
:
1501 case INDEX_op_xor_i32
:
1505 if (const_args
[2]) {
1507 rot
= encode_imm(args
[2]);
1508 tcg_out_dat_imm(s
, COND_AL
, c
,
1509 args
[0], args
[1], rotl(args
[2], rot
) | (rot
<< 7));
1511 tcg_out_dat_reg(s
, COND_AL
, c
,
1512 args
[0], args
[1], args
[2], SHIFT_IMM_LSL(0));
1514 case INDEX_op_add2_i32
:
1515 tcg_out_dat_reg2(s
, COND_AL
, ARITH_ADD
, ARITH_ADC
,
1516 args
[0], args
[1], args
[2], args
[3],
1517 args
[4], args
[5], SHIFT_IMM_LSL(0));
1519 case INDEX_op_sub2_i32
:
1520 tcg_out_dat_reg2(s
, COND_AL
, ARITH_SUB
, ARITH_SBC
,
1521 args
[0], args
[1], args
[2], args
[3],
1522 args
[4], args
[5], SHIFT_IMM_LSL(0));
1524 case INDEX_op_neg_i32
:
1525 tcg_out_dat_imm(s
, COND_AL
, ARITH_RSB
, args
[0], args
[1], 0);
1527 case INDEX_op_not_i32
:
1528 tcg_out_dat_reg(s
, COND_AL
,
1529 ARITH_MVN
, args
[0], 0, args
[1], SHIFT_IMM_LSL(0));
1531 case INDEX_op_mul_i32
:
1532 tcg_out_mul32(s
, COND_AL
, args
[0], args
[1], args
[2]);
1534 case INDEX_op_mulu2_i32
:
1535 tcg_out_umull32(s
, COND_AL
, args
[0], args
[1], args
[2], args
[3]);
1537 /* XXX: Perhaps args[2] & 0x1f is wrong */
1538 case INDEX_op_shl_i32
:
1540 SHIFT_IMM_LSL(args
[2] & 0x1f) : SHIFT_REG_LSL(args
[2]);
1542 case INDEX_op_shr_i32
:
1543 c
= const_args
[2] ? (args
[2] & 0x1f) ? SHIFT_IMM_LSR(args
[2] & 0x1f) :
1544 SHIFT_IMM_LSL(0) : SHIFT_REG_LSR(args
[2]);
1546 case INDEX_op_sar_i32
:
1547 c
= const_args
[2] ? (args
[2] & 0x1f) ? SHIFT_IMM_ASR(args
[2] & 0x1f) :
1548 SHIFT_IMM_LSL(0) : SHIFT_REG_ASR(args
[2]);
1550 case INDEX_op_rotr_i32
:
1551 c
= const_args
[2] ? (args
[2] & 0x1f) ? SHIFT_IMM_ROR(args
[2] & 0x1f) :
1552 SHIFT_IMM_LSL(0) : SHIFT_REG_ROR(args
[2]);
1555 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
, args
[0], 0, args
[1], c
);
1558 case INDEX_op_rotl_i32
:
1559 if (const_args
[2]) {
1560 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
, args
[0], 0, args
[1],
1561 ((0x20 - args
[2]) & 0x1f) ?
1562 SHIFT_IMM_ROR((0x20 - args
[2]) & 0x1f) :
1565 tcg_out_dat_imm(s
, COND_AL
, ARITH_RSB
, TCG_REG_R8
, args
[1], 0x20);
1566 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
, args
[0], 0, args
[1],
1567 SHIFT_REG_ROR(TCG_REG_R8
));
1571 case INDEX_op_brcond_i32
:
1572 if (const_args
[1]) {
1574 rot
= encode_imm(args
[1]);
1575 tcg_out_dat_imm(s
, COND_AL
, ARITH_CMP
, 0,
1576 args
[0], rotl(args
[1], rot
) | (rot
<< 7));
1578 tcg_out_dat_reg(s
, COND_AL
, ARITH_CMP
, 0,
1579 args
[0], args
[1], SHIFT_IMM_LSL(0));
1581 tcg_out_goto_label(s
, tcg_cond_to_arm_cond
[args
[2]], args
[3]);
1583 case INDEX_op_brcond2_i32
:
1584 /* The resulting conditions are:
1585 * TCG_COND_EQ --> a0 == a2 && a1 == a3,
1586 * TCG_COND_NE --> (a0 != a2 && a1 == a3) || a1 != a3,
1587 * TCG_COND_LT(U) --> (a0 < a2 && a1 == a3) || a1 < a3,
1588 * TCG_COND_GE(U) --> (a0 >= a2 && a1 == a3) || (a1 >= a3 && a1 != a3),
1589 * TCG_COND_LE(U) --> (a0 <= a2 && a1 == a3) || (a1 <= a3 && a1 != a3),
1590 * TCG_COND_GT(U) --> (a0 > a2 && a1 == a3) || a1 > a3,
1592 tcg_out_dat_reg(s
, COND_AL
, ARITH_CMP
, 0,
1593 args
[1], args
[3], SHIFT_IMM_LSL(0));
1594 tcg_out_dat_reg(s
, COND_EQ
, ARITH_CMP
, 0,
1595 args
[0], args
[2], SHIFT_IMM_LSL(0));
1596 tcg_out_goto_label(s
, tcg_cond_to_arm_cond
[args
[4]], args
[5]);
1598 case INDEX_op_setcond_i32
:
1599 if (const_args
[2]) {
1601 rot
= encode_imm(args
[2]);
1602 tcg_out_dat_imm(s
, COND_AL
, ARITH_CMP
, 0,
1603 args
[1], rotl(args
[2], rot
) | (rot
<< 7));
1605 tcg_out_dat_reg(s
, COND_AL
, ARITH_CMP
, 0,
1606 args
[1], args
[2], SHIFT_IMM_LSL(0));
1608 tcg_out_dat_imm(s
, tcg_cond_to_arm_cond
[args
[3]],
1609 ARITH_MOV
, args
[0], 0, 1);
1610 tcg_out_dat_imm(s
, tcg_cond_to_arm_cond
[tcg_invert_cond(args
[3])],
1611 ARITH_MOV
, args
[0], 0, 0);
1613 case INDEX_op_setcond2_i32
:
1614 /* See brcond2_i32 comment */
1615 tcg_out_dat_reg(s
, COND_AL
, ARITH_CMP
, 0,
1616 args
[2], args
[4], SHIFT_IMM_LSL(0));
1617 tcg_out_dat_reg(s
, COND_EQ
, ARITH_CMP
, 0,
1618 args
[1], args
[3], SHIFT_IMM_LSL(0));
1619 tcg_out_dat_imm(s
, tcg_cond_to_arm_cond
[args
[5]],
1620 ARITH_MOV
, args
[0], 0, 1);
1621 tcg_out_dat_imm(s
, tcg_cond_to_arm_cond
[tcg_invert_cond(args
[5])],
1622 ARITH_MOV
, args
[0], 0, 0);
1625 case INDEX_op_qemu_ld8u
:
1626 tcg_out_qemu_ld(s
, args
, 0);
1628 case INDEX_op_qemu_ld8s
:
1629 tcg_out_qemu_ld(s
, args
, 0 | 4);
1631 case INDEX_op_qemu_ld16u
:
1632 tcg_out_qemu_ld(s
, args
, 1);
1634 case INDEX_op_qemu_ld16s
:
1635 tcg_out_qemu_ld(s
, args
, 1 | 4);
1637 case INDEX_op_qemu_ld32
:
1638 tcg_out_qemu_ld(s
, args
, 2);
1640 case INDEX_op_qemu_ld64
:
1641 tcg_out_qemu_ld(s
, args
, 3);
1644 case INDEX_op_qemu_st8
:
1645 tcg_out_qemu_st(s
, args
, 0);
1647 case INDEX_op_qemu_st16
:
1648 tcg_out_qemu_st(s
, args
, 1);
1650 case INDEX_op_qemu_st32
:
1651 tcg_out_qemu_st(s
, args
, 2);
1653 case INDEX_op_qemu_st64
:
1654 tcg_out_qemu_st(s
, args
, 3);
1657 case INDEX_op_bswap16_i32
:
1658 tcg_out_bswap16(s
, COND_AL
, args
[0], args
[1]);
1660 case INDEX_op_bswap32_i32
:
1661 tcg_out_bswap32(s
, COND_AL
, args
[0], args
[1]);
1664 case INDEX_op_ext8s_i32
:
1665 tcg_out_ext8s(s
, COND_AL
, args
[0], args
[1]);
1667 case INDEX_op_ext16s_i32
:
1668 tcg_out_ext16s(s
, COND_AL
, args
[0], args
[1]);
1670 case INDEX_op_ext16u_i32
:
1671 tcg_out_ext16u(s
, COND_AL
, args
[0], args
[1]);
1679 static const TCGTargetOpDef arm_op_defs
[] = {
1680 { INDEX_op_exit_tb
, { } },
1681 { INDEX_op_goto_tb
, { } },
1682 { INDEX_op_call
, { "ri" } },
1683 { INDEX_op_jmp
, { "ri" } },
1684 { INDEX_op_br
, { } },
1686 { INDEX_op_mov_i32
, { "r", "r" } },
1687 { INDEX_op_movi_i32
, { "r" } },
1689 { INDEX_op_ld8u_i32
, { "r", "r" } },
1690 { INDEX_op_ld8s_i32
, { "r", "r" } },
1691 { INDEX_op_ld16u_i32
, { "r", "r" } },
1692 { INDEX_op_ld16s_i32
, { "r", "r" } },
1693 { INDEX_op_ld_i32
, { "r", "r" } },
1694 { INDEX_op_st8_i32
, { "r", "r" } },
1695 { INDEX_op_st16_i32
, { "r", "r" } },
1696 { INDEX_op_st_i32
, { "r", "r" } },
1698 /* TODO: "r", "r", "ri" */
1699 { INDEX_op_add_i32
, { "r", "r", "rI" } },
1700 { INDEX_op_sub_i32
, { "r", "r", "rI" } },
1701 { INDEX_op_mul_i32
, { "r", "r", "r" } },
1702 { INDEX_op_mulu2_i32
, { "r", "r", "r", "r" } },
1703 { INDEX_op_and_i32
, { "r", "r", "rI" } },
1704 { INDEX_op_andc_i32
, { "r", "r", "rI" } },
1705 { INDEX_op_or_i32
, { "r", "r", "rI" } },
1706 { INDEX_op_xor_i32
, { "r", "r", "rI" } },
1707 { INDEX_op_neg_i32
, { "r", "r" } },
1708 { INDEX_op_not_i32
, { "r", "r" } },
1710 { INDEX_op_shl_i32
, { "r", "r", "ri" } },
1711 { INDEX_op_shr_i32
, { "r", "r", "ri" } },
1712 { INDEX_op_sar_i32
, { "r", "r", "ri" } },
1713 { INDEX_op_rotl_i32
, { "r", "r", "ri" } },
1714 { INDEX_op_rotr_i32
, { "r", "r", "ri" } },
1716 { INDEX_op_brcond_i32
, { "r", "rI" } },
1717 { INDEX_op_setcond_i32
, { "r", "r", "rI" } },
1719 /* TODO: "r", "r", "r", "r", "ri", "ri" */
1720 { INDEX_op_add2_i32
, { "r", "r", "r", "r", "r", "r" } },
1721 { INDEX_op_sub2_i32
, { "r", "r", "r", "r", "r", "r" } },
1722 { INDEX_op_brcond2_i32
, { "r", "r", "r", "r" } },
1723 { INDEX_op_setcond2_i32
, { "r", "r", "r", "r", "r" } },
1725 #if TARGET_LONG_BITS == 32
1726 { INDEX_op_qemu_ld8u
, { "r", "l" } },
1727 { INDEX_op_qemu_ld8s
, { "r", "l" } },
1728 { INDEX_op_qemu_ld16u
, { "r", "l" } },
1729 { INDEX_op_qemu_ld16s
, { "r", "l" } },
1730 { INDEX_op_qemu_ld32
, { "r", "l" } },
1731 { INDEX_op_qemu_ld64
, { "L", "L", "l" } },
1733 { INDEX_op_qemu_st8
, { "s", "s" } },
1734 { INDEX_op_qemu_st16
, { "s", "s" } },
1735 { INDEX_op_qemu_st32
, { "s", "s" } },
1736 { INDEX_op_qemu_st64
, { "S", "S", "s" } },
1738 { INDEX_op_qemu_ld8u
, { "r", "l", "l" } },
1739 { INDEX_op_qemu_ld8s
, { "r", "l", "l" } },
1740 { INDEX_op_qemu_ld16u
, { "r", "l", "l" } },
1741 { INDEX_op_qemu_ld16s
, { "r", "l", "l" } },
1742 { INDEX_op_qemu_ld32
, { "r", "l", "l" } },
1743 { INDEX_op_qemu_ld64
, { "L", "L", "l", "l" } },
1745 { INDEX_op_qemu_st8
, { "s", "s", "s" } },
1746 { INDEX_op_qemu_st16
, { "s", "s", "s" } },
1747 { INDEX_op_qemu_st32
, { "s", "s", "s" } },
1748 { INDEX_op_qemu_st64
, { "S", "S", "s", "s" } },
1751 { INDEX_op_bswap16_i32
, { "r", "r" } },
1752 { INDEX_op_bswap32_i32
, { "r", "r" } },
1754 { INDEX_op_ext8s_i32
, { "r", "r" } },
1755 { INDEX_op_ext16s_i32
, { "r", "r" } },
1756 { INDEX_op_ext16u_i32
, { "r", "r" } },
1761 static void tcg_target_init(TCGContext
*s
)
1763 #if !defined(CONFIG_USER_ONLY)
1765 if ((1 << CPU_TLB_ENTRY_BITS
) != sizeof(CPUTLBEntry
))
1769 tcg_regset_set32(tcg_target_available_regs
[TCG_TYPE_I32
], 0, 0xffff);
1770 tcg_regset_set32(tcg_target_call_clobber_regs
, 0,
1775 (1 << TCG_REG_R12
) |
1776 (1 << TCG_REG_R14
));
1778 tcg_regset_clear(s
->reserved_regs
);
1779 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_CALL_STACK
);
1780 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_R8
);
1781 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_PC
);
1783 tcg_add_target_add_op_defs(arm_op_defs
);
1786 static inline void tcg_out_ld(TCGContext
*s
, TCGType type
, int arg
,
1787 int arg1
, tcg_target_long arg2
)
1789 tcg_out_ld32u(s
, COND_AL
, arg
, arg1
, arg2
);
1792 static inline void tcg_out_st(TCGContext
*s
, TCGType type
, int arg
,
1793 int arg1
, tcg_target_long arg2
)
1795 tcg_out_st32(s
, COND_AL
, arg
, arg1
, arg2
);
1798 static void tcg_out_addi(TCGContext
*s
, int reg
, tcg_target_long val
)
1802 tcg_out_dat_imm(s
, COND_AL
, ARITH_ADD
, reg
, reg
, val
);
1807 tcg_out_dat_imm(s
, COND_AL
, ARITH_SUB
, reg
, reg
, -val
);
1813 static inline void tcg_out_mov(TCGContext
*s
, TCGType type
, int ret
, int arg
)
1815 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
, ret
, 0, arg
, SHIFT_IMM_LSL(0));
1818 static inline void tcg_out_movi(TCGContext
*s
, TCGType type
,
1819 int ret
, tcg_target_long arg
)
1821 tcg_out_movi32(s
, COND_AL
, ret
, arg
);
1824 static void tcg_target_qemu_prologue(TCGContext
*s
)
1826 /* There is no need to save r7, it is used to store the address
1827 of the env structure and is not modified by GCC. */
1829 /* stmdb sp!, { r4 - r6, r8 - r11, lr } */
1830 tcg_out32(s
, (COND_AL
<< 28) | 0x092d4f70);
1832 tcg_out_bx(s
, COND_AL
, TCG_REG_R0
);
1833 tb_ret_addr
= s
->code_ptr
;
1835 /* ldmia sp!, { r4 - r6, r8 - r11, pc } */
1836 tcg_out32(s
, (COND_AL
<< 28) | 0x08bd8f70);