2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 static const char * const tcg_target_reg_names
[TCG_TARGET_NB_REGS
] = {
27 "%r0", "%r1", "%rp", "%r3", "%r4", "%r5", "%r6", "%r7",
28 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
29 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
30 "%r24", "%r25", "%r26", "%dp", "%ret0", "%ret1", "%sp", "%r31",
34 /* This is an 8 byte temp slot in the stack frame. */
35 #define STACK_TEMP_OFS -16
41 #ifdef CONFIG_USE_GUEST_BASE
42 #define TCG_GUEST_BASE_REG TCG_REG_R16
44 #define TCG_GUEST_BASE_REG TCG_REG_R0
47 static const int tcg_target_reg_alloc_order
[] = {
73 static const int tcg_target_call_iarg_regs
[4] = {
80 static const int tcg_target_call_oarg_regs
[2] = {
85 /* True iff val fits a signed field of width BITS. */
86 static inline int check_fit_tl(tcg_target_long val
, unsigned int bits
)
88 return (val
<< ((sizeof(tcg_target_long
) * 8 - bits
))
89 >> (sizeof(tcg_target_long
) * 8 - bits
)) == val
;
92 /* True iff depi can be used to compute (reg | MASK).
93 Accept a bit pattern like:
97 Copied from gcc sources. */
98 static inline int or_mask_p(tcg_target_ulong mask
)
100 mask
+= mask
& -mask
;
101 return (mask
& (mask
- 1)) == 0;
104 /* True iff depi or extru can be used to compute (reg & mask).
105 Accept a bit pattern like these:
109 Copied from gcc sources. */
110 static inline int and_mask_p(tcg_target_ulong mask
)
112 return or_mask_p(~mask
);
115 static int low_sign_ext(int val
, int len
)
117 return (((val
<< 1) & ~(-1u << len
)) | ((val
>> (len
- 1)) & 1));
120 static int reassemble_12(int as12
)
122 return (((as12
& 0x800) >> 11) |
123 ((as12
& 0x400) >> 8) |
124 ((as12
& 0x3ff) << 3));
127 static int reassemble_17(int as17
)
129 return (((as17
& 0x10000) >> 16) |
130 ((as17
& 0x0f800) << 5) |
131 ((as17
& 0x00400) >> 8) |
132 ((as17
& 0x003ff) << 3));
135 static int reassemble_21(int as21
)
137 return (((as21
& 0x100000) >> 20) |
138 ((as21
& 0x0ffe00) >> 8) |
139 ((as21
& 0x000180) << 7) |
140 ((as21
& 0x00007c) << 14) |
141 ((as21
& 0x000003) << 12));
144 /* ??? Bizzarely, there is no PCREL12F relocation type. I guess all
145 such relocations are simply fully handled by the assembler. */
146 #define R_PARISC_PCREL12F R_PARISC_NONE
148 static void patch_reloc(uint8_t *code_ptr
, int type
,
149 tcg_target_long value
, tcg_target_long addend
)
151 uint32_t *insn_ptr
= (uint32_t *)code_ptr
;
152 uint32_t insn
= *insn_ptr
;
153 tcg_target_long pcrel
;
156 pcrel
= (value
- ((tcg_target_long
)code_ptr
+ 8)) >> 2;
159 case R_PARISC_PCREL12F
:
160 assert(check_fit_tl(pcrel
, 12));
161 /* ??? We assume all patches are forward. See tcg_out_brcond
162 re setting the NUL bit on the branch and eliding the nop. */
165 insn
|= reassemble_12(pcrel
);
167 case R_PARISC_PCREL17F
:
168 assert(check_fit_tl(pcrel
, 17));
170 insn
|= reassemble_17(pcrel
);
179 /* maximum number of register used for input function arguments */
180 static inline int tcg_target_get_call_iarg_regs_count(int flags
)
185 /* parse target specific constraints */
186 static int target_parse_constraint(TCGArgConstraint
*ct
, const char **pct_str
)
193 ct
->ct
|= TCG_CT_REG
;
194 tcg_regset_set32(ct
->u
.regs
, 0, 0xffffffff);
196 case 'L': /* qemu_ld/st constraint */
197 ct
->ct
|= TCG_CT_REG
;
198 tcg_regset_set32(ct
->u
.regs
, 0, 0xffffffff);
199 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R26
);
200 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R25
);
201 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R24
);
202 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R23
);
205 ct
->ct
|= TCG_CT_CONST_0
;
208 ct
->ct
|= TCG_CT_CONST_S11
;
211 ct
->ct
|= TCG_CT_CONST_S5
;
214 ct
->ct
|= TCG_CT_CONST_MS11
;
224 /* test if a constant matches the constraint */
225 static int tcg_target_const_match(tcg_target_long val
,
226 const TCGArgConstraint
*arg_ct
)
229 if (ct
& TCG_CT_CONST
) {
231 } else if (ct
& TCG_CT_CONST_0
) {
233 } else if (ct
& TCG_CT_CONST_S5
) {
234 return check_fit_tl(val
, 5);
235 } else if (ct
& TCG_CT_CONST_S11
) {
236 return check_fit_tl(val
, 11);
237 } else if (ct
& TCG_CT_CONST_MS11
) {
238 return check_fit_tl(-val
, 11);
243 #define INSN_OP(x) ((x) << 26)
244 #define INSN_EXT3BR(x) ((x) << 13)
245 #define INSN_EXT3SH(x) ((x) << 10)
246 #define INSN_EXT4(x) ((x) << 6)
247 #define INSN_EXT5(x) (x)
248 #define INSN_EXT6(x) ((x) << 6)
249 #define INSN_EXT7(x) ((x) << 6)
250 #define INSN_EXT8A(x) ((x) << 6)
251 #define INSN_EXT8B(x) ((x) << 5)
252 #define INSN_T(x) (x)
253 #define INSN_R1(x) ((x) << 16)
254 #define INSN_R2(x) ((x) << 21)
255 #define INSN_DEP_LEN(x) (32 - (x))
256 #define INSN_SHDEP_CP(x) ((31 - (x)) << 5)
257 #define INSN_SHDEP_P(x) ((x) << 5)
258 #define INSN_COND(x) ((x) << 13)
259 #define INSN_IM11(x) low_sign_ext(x, 11)
260 #define INSN_IM14(x) low_sign_ext(x, 14)
261 #define INSN_IM5(x) (low_sign_ext(x, 5) << 16)
273 #define INSN_ADD (INSN_OP(0x02) | INSN_EXT6(0x18))
274 #define INSN_ADDC (INSN_OP(0x02) | INSN_EXT6(0x1c))
275 #define INSN_ADDI (INSN_OP(0x2d))
276 #define INSN_ADDIL (INSN_OP(0x0a))
277 #define INSN_ADDL (INSN_OP(0x02) | INSN_EXT6(0x28))
278 #define INSN_AND (INSN_OP(0x02) | INSN_EXT6(0x08))
279 #define INSN_ANDCM (INSN_OP(0x02) | INSN_EXT6(0x00))
280 #define INSN_COMCLR (INSN_OP(0x02) | INSN_EXT6(0x22))
281 #define INSN_COMICLR (INSN_OP(0x24))
282 #define INSN_DEP (INSN_OP(0x35) | INSN_EXT3SH(3))
283 #define INSN_DEPI (INSN_OP(0x35) | INSN_EXT3SH(7))
284 #define INSN_EXTRS (INSN_OP(0x34) | INSN_EXT3SH(7))
285 #define INSN_EXTRU (INSN_OP(0x34) | INSN_EXT3SH(6))
286 #define INSN_LDIL (INSN_OP(0x08))
287 #define INSN_LDO (INSN_OP(0x0d))
288 #define INSN_MTCTL (INSN_OP(0x00) | INSN_EXT8B(0xc2))
289 #define INSN_OR (INSN_OP(0x02) | INSN_EXT6(0x09))
290 #define INSN_SHD (INSN_OP(0x34) | INSN_EXT3SH(2))
291 #define INSN_SUB (INSN_OP(0x02) | INSN_EXT6(0x10))
292 #define INSN_SUBB (INSN_OP(0x02) | INSN_EXT6(0x14))
293 #define INSN_SUBI (INSN_OP(0x25))
294 #define INSN_VEXTRS (INSN_OP(0x34) | INSN_EXT3SH(5))
295 #define INSN_VEXTRU (INSN_OP(0x34) | INSN_EXT3SH(4))
296 #define INSN_VSHD (INSN_OP(0x34) | INSN_EXT3SH(0))
297 #define INSN_XOR (INSN_OP(0x02) | INSN_EXT6(0x0a))
298 #define INSN_ZDEP (INSN_OP(0x35) | INSN_EXT3SH(2))
299 #define INSN_ZVDEP (INSN_OP(0x35) | INSN_EXT3SH(0))
301 #define INSN_BL (INSN_OP(0x3a) | INSN_EXT3BR(0))
302 #define INSN_BL_N (INSN_OP(0x3a) | INSN_EXT3BR(0) | 2)
303 #define INSN_BLR (INSN_OP(0x3a) | INSN_EXT3BR(2))
304 #define INSN_BV (INSN_OP(0x3a) | INSN_EXT3BR(6))
305 #define INSN_BV_N (INSN_OP(0x3a) | INSN_EXT3BR(6) | 2)
306 #define INSN_BLE_SR4 (INSN_OP(0x39) | (1 << 13))
308 #define INSN_LDB (INSN_OP(0x10))
309 #define INSN_LDH (INSN_OP(0x11))
310 #define INSN_LDW (INSN_OP(0x12))
311 #define INSN_LDWM (INSN_OP(0x13))
312 #define INSN_FLDDS (INSN_OP(0x0b) | INSN_EXT4(0) | (1 << 12))
314 #define INSN_LDBX (INSN_OP(0x03) | INSN_EXT4(0))
315 #define INSN_LDHX (INSN_OP(0x03) | INSN_EXT4(1))
316 #define INSN_LDWX (INSN_OP(0x03) | INSN_EXT4(2))
318 #define INSN_STB (INSN_OP(0x18))
319 #define INSN_STH (INSN_OP(0x19))
320 #define INSN_STW (INSN_OP(0x1a))
321 #define INSN_STWM (INSN_OP(0x1b))
322 #define INSN_FSTDS (INSN_OP(0x0b) | INSN_EXT4(8) | (1 << 12))
324 #define INSN_COMBT (INSN_OP(0x20))
325 #define INSN_COMBF (INSN_OP(0x22))
326 #define INSN_COMIBT (INSN_OP(0x21))
327 #define INSN_COMIBF (INSN_OP(0x23))
329 /* supplied by libgcc */
330 extern void *__canonicalize_funcptr_for_compare(void *);
332 static void tcg_out_mov(TCGContext
*s
, int ret
, int arg
)
334 /* PA1.1 defines COPY as OR r,0,t; PA2.0 defines COPY as LDO 0(r),t
335 but hppa-dis.c is unaware of this definition */
337 tcg_out32(s
, INSN_OR
| INSN_T(ret
) | INSN_R1(arg
)
338 | INSN_R2(TCG_REG_R0
));
342 static void tcg_out_movi(TCGContext
*s
, TCGType type
,
343 int ret
, tcg_target_long arg
)
345 if (check_fit_tl(arg
, 14)) {
346 tcg_out32(s
, INSN_LDO
| INSN_R1(ret
)
347 | INSN_R2(TCG_REG_R0
) | INSN_IM14(arg
));
353 tcg_out32(s
, INSN_LDIL
| INSN_R2(ret
) | reassemble_21(hi
));
355 tcg_out32(s
, INSN_LDO
| INSN_R1(ret
)
356 | INSN_R2(ret
) | INSN_IM14(lo
));
361 static void tcg_out_ldst(TCGContext
*s
, int ret
, int addr
,
362 tcg_target_long offset
, int op
)
364 if (!check_fit_tl(offset
, 14)) {
370 if (addr
== TCG_REG_R0
) {
371 op
= INSN_LDIL
| INSN_R2(TCG_REG_R1
);
373 op
= INSN_ADDIL
| INSN_R2(addr
);
375 tcg_out32(s
, op
| reassemble_21(hi
));
381 if (ret
!= addr
|| offset
!= 0 || op
!= INSN_LDO
) {
382 tcg_out32(s
, op
| INSN_R1(ret
) | INSN_R2(addr
) | INSN_IM14(offset
));
386 /* This function is required by tcg.c. */
387 static inline void tcg_out_ld(TCGContext
*s
, TCGType type
, int ret
,
388 int arg1
, tcg_target_long arg2
)
390 tcg_out_ldst(s
, ret
, arg1
, arg2
, INSN_LDW
);
393 /* This function is required by tcg.c. */
394 static inline void tcg_out_st(TCGContext
*s
, TCGType type
, int ret
,
395 int arg1
, tcg_target_long arg2
)
397 tcg_out_ldst(s
, ret
, arg1
, arg2
, INSN_STW
);
400 static void tcg_out_ldst_index(TCGContext
*s
, int data
,
401 int base
, int index
, int op
)
403 tcg_out32(s
, op
| INSN_T(data
) | INSN_R1(index
) | INSN_R2(base
));
406 static inline void tcg_out_addi2(TCGContext
*s
, int ret
, int arg1
,
409 tcg_out_ldst(s
, ret
, arg1
, val
, INSN_LDO
);
412 /* This function is required by tcg.c. */
413 static inline void tcg_out_addi(TCGContext
*s
, int reg
, tcg_target_long val
)
415 tcg_out_addi2(s
, reg
, reg
, val
);
418 static inline void tcg_out_arith(TCGContext
*s
, int t
, int r1
, int r2
, int op
)
420 tcg_out32(s
, op
| INSN_T(t
) | INSN_R1(r1
) | INSN_R2(r2
));
423 static inline void tcg_out_arithi(TCGContext
*s
, int t
, int r1
,
424 tcg_target_long val
, int op
)
426 assert(check_fit_tl(val
, 11));
427 tcg_out32(s
, op
| INSN_R1(t
) | INSN_R2(r1
) | INSN_IM11(val
));
430 static inline void tcg_out_nop(TCGContext
*s
)
432 tcg_out_arith(s
, TCG_REG_R0
, TCG_REG_R0
, TCG_REG_R0
, INSN_OR
);
435 static inline void tcg_out_mtctl_sar(TCGContext
*s
, int arg
)
437 tcg_out32(s
, INSN_MTCTL
| INSN_R2(11) | INSN_R1(arg
));
440 /* Extract LEN bits at position OFS from ARG and place in RET.
441 Note that here the bit ordering is reversed from the PA-RISC
442 standard, such that the right-most bit is 0. */
443 static inline void tcg_out_extr(TCGContext
*s
, int ret
, int arg
,
444 unsigned ofs
, unsigned len
, int sign
)
446 assert(ofs
< 32 && len
<= 32 - ofs
);
447 tcg_out32(s
, (sign
? INSN_EXTRS
: INSN_EXTRU
)
448 | INSN_R1(ret
) | INSN_R2(arg
)
449 | INSN_SHDEP_P(31 - ofs
) | INSN_DEP_LEN(len
));
452 /* Likewise with OFS interpreted little-endian. */
453 static inline void tcg_out_dep(TCGContext
*s
, int ret
, int arg
,
454 unsigned ofs
, unsigned len
)
456 assert(ofs
< 32 && len
<= 32 - ofs
);
457 tcg_out32(s
, INSN_DEP
| INSN_R2(ret
) | INSN_R1(arg
)
458 | INSN_SHDEP_CP(31 - ofs
) | INSN_DEP_LEN(len
));
461 static inline void tcg_out_shd(TCGContext
*s
, int ret
, int hi
, int lo
,
465 tcg_out32(s
, INSN_SHD
| INSN_R1(hi
) | INSN_R2(lo
) | INSN_T(ret
)
466 | INSN_SHDEP_CP(count
));
469 static void tcg_out_vshd(TCGContext
*s
, int ret
, int hi
, int lo
, int creg
)
471 tcg_out_mtctl_sar(s
, creg
);
472 tcg_out32(s
, INSN_VSHD
| INSN_T(ret
) | INSN_R1(hi
) | INSN_R2(lo
));
475 static void tcg_out_ori(TCGContext
*s
, int ret
, int arg
, tcg_target_ulong m
)
478 tcg_out_mov(s
, ret
, arg
);
479 } else if (m
== -1) {
480 tcg_out_movi(s
, TCG_TYPE_I32
, ret
, -1);
481 } else if (or_mask_p(m
)) {
484 for (bs0
= 0; bs0
< 32; bs0
++) {
485 if ((m
& (1u << bs0
)) != 0) {
489 for (bs1
= bs0
; bs1
< 32; bs1
++) {
490 if ((m
& (1u << bs1
)) == 0) {
494 assert(bs1
== 32 || (1ul << bs1
) > m
);
496 tcg_out_mov(s
, ret
, arg
);
497 tcg_out32(s
, INSN_DEPI
| INSN_R2(ret
) | INSN_IM5(-1)
498 | INSN_SHDEP_CP(31 - bs0
) | INSN_DEP_LEN(bs1
- bs0
));
500 tcg_out_movi(s
, TCG_TYPE_I32
, TCG_REG_R1
, m
);
501 tcg_out_arith(s
, ret
, arg
, TCG_REG_R1
, INSN_OR
);
505 static void tcg_out_andi(TCGContext
*s
, int ret
, int arg
, tcg_target_ulong m
)
508 tcg_out_mov(s
, ret
, TCG_REG_R0
);
509 } else if (m
== -1) {
510 tcg_out_mov(s
, ret
, arg
);
511 } else if (and_mask_p(m
)) {
514 for (ls0
= 0; ls0
< 32; ls0
++) {
515 if ((m
& (1u << ls0
)) == 0) {
519 for (ls1
= ls0
; ls1
< 32; ls1
++) {
520 if ((m
& (1u << ls1
)) != 0) {
524 for (ms0
= ls1
; ms0
< 32; ms0
++) {
525 if ((m
& (1u << ms0
)) == 0) {
532 tcg_out_extr(s
, ret
, arg
, 0, ls0
, 0);
534 tcg_out_mov(s
, ret
, arg
);
535 tcg_out32(s
, INSN_DEPI
| INSN_R2(ret
) | INSN_IM5(0)
536 | INSN_SHDEP_CP(31 - ls0
) | INSN_DEP_LEN(ls1
- ls0
));
539 tcg_out_movi(s
, TCG_TYPE_I32
, TCG_REG_R1
, m
);
540 tcg_out_arith(s
, ret
, arg
, TCG_REG_R1
, INSN_AND
);
544 static inline void tcg_out_ext8s(TCGContext
*s
, int ret
, int arg
)
546 tcg_out_extr(s
, ret
, arg
, 0, 8, 1);
549 static inline void tcg_out_ext16s(TCGContext
*s
, int ret
, int arg
)
551 tcg_out_extr(s
, ret
, arg
, 0, 16, 1);
554 static void tcg_out_shli(TCGContext
*s
, int ret
, int arg
, int count
)
557 tcg_out32(s
, INSN_ZDEP
| INSN_R2(ret
) | INSN_R1(arg
)
558 | INSN_SHDEP_CP(31 - count
) | INSN_DEP_LEN(32 - count
));
561 static void tcg_out_shl(TCGContext
*s
, int ret
, int arg
, int creg
)
563 tcg_out_arithi(s
, TCG_REG_R20
, creg
, 31, INSN_SUBI
);
564 tcg_out_mtctl_sar(s
, TCG_REG_R20
);
565 tcg_out32(s
, INSN_ZVDEP
| INSN_R2(ret
) | INSN_R1(arg
) | INSN_DEP_LEN(32));
568 static void tcg_out_shri(TCGContext
*s
, int ret
, int arg
, int count
)
571 tcg_out_extr(s
, ret
, arg
, count
, 32 - count
, 0);
574 static void tcg_out_shr(TCGContext
*s
, int ret
, int arg
, int creg
)
576 tcg_out_vshd(s
, ret
, TCG_REG_R0
, arg
, creg
);
579 static void tcg_out_sari(TCGContext
*s
, int ret
, int arg
, int count
)
582 tcg_out_extr(s
, ret
, arg
, count
, 32 - count
, 1);
585 static void tcg_out_sar(TCGContext
*s
, int ret
, int arg
, int creg
)
587 tcg_out_arithi(s
, TCG_REG_R20
, creg
, 31, INSN_SUBI
);
588 tcg_out_mtctl_sar(s
, TCG_REG_R20
);
589 tcg_out32(s
, INSN_VEXTRS
| INSN_R1(ret
) | INSN_R2(arg
) | INSN_DEP_LEN(32));
592 static void tcg_out_rotli(TCGContext
*s
, int ret
, int arg
, int count
)
595 tcg_out_shd(s
, ret
, arg
, arg
, 32 - count
);
598 static void tcg_out_rotl(TCGContext
*s
, int ret
, int arg
, int creg
)
600 tcg_out_arithi(s
, TCG_REG_R20
, creg
, 32, INSN_SUBI
);
601 tcg_out_vshd(s
, ret
, arg
, arg
, TCG_REG_R20
);
604 static void tcg_out_rotri(TCGContext
*s
, int ret
, int arg
, int count
)
607 tcg_out_shd(s
, ret
, arg
, arg
, count
);
610 static void tcg_out_rotr(TCGContext
*s
, int ret
, int arg
, int creg
)
612 tcg_out_vshd(s
, ret
, arg
, arg
, creg
);
615 static void tcg_out_bswap16(TCGContext
*s
, int ret
, int arg
, int sign
)
618 tcg_out_mov(s
, ret
, arg
); /* arg = xxAB */
620 tcg_out_dep(s
, ret
, ret
, 16, 8); /* ret = xBAB */
621 tcg_out_extr(s
, ret
, ret
, 8, 16, sign
); /* ret = ..BA */
624 static void tcg_out_bswap32(TCGContext
*s
, int ret
, int arg
, int temp
)
627 tcg_out_rotri(s
, temp
, arg
, 16); /* temp = CDAB */
628 tcg_out_dep(s
, temp
, temp
, 16, 8); /* temp = CBAB */
629 tcg_out_shd(s
, ret
, arg
, temp
, 8); /* ret = DCBA */
632 static void tcg_out_call(TCGContext
*s
, void *func
)
634 tcg_target_long val
, hi
, lo
, disp
;
636 val
= (uint32_t)__canonicalize_funcptr_for_compare(func
);
637 disp
= (val
- ((tcg_target_long
)s
->code_ptr
+ 8)) >> 2;
639 if (check_fit_tl(disp
, 17)) {
640 tcg_out32(s
, INSN_BL_N
| INSN_R2(TCG_REG_RP
) | reassemble_17(disp
));
645 tcg_out32(s
, INSN_LDIL
| INSN_R2(TCG_REG_R20
) | reassemble_21(hi
));
646 tcg_out32(s
, INSN_BLE_SR4
| INSN_R2(TCG_REG_R20
)
647 | reassemble_17(lo
>> 2));
648 tcg_out_mov(s
, TCG_REG_RP
, TCG_REG_R31
);
652 static void tcg_out_xmpyu(TCGContext
*s
, int retl
, int reth
,
655 /* Store both words into the stack for copy to the FPU. */
656 tcg_out_ldst(s
, arg1
, TCG_REG_SP
, STACK_TEMP_OFS
, INSN_STW
);
657 tcg_out_ldst(s
, arg2
, TCG_REG_SP
, STACK_TEMP_OFS
+ 4, INSN_STW
);
659 /* Load both words into the FPU at the same time. We get away
660 with this because we can address the left and right half of the
661 FPU registers individually once loaded. */
662 /* fldds stack_temp(sp),fr22 */
663 tcg_out32(s
, INSN_FLDDS
| INSN_R2(TCG_REG_SP
)
664 | INSN_IM5(STACK_TEMP_OFS
) | INSN_T(22));
666 /* xmpyu fr22r,fr22,fr22 */
667 tcg_out32(s
, 0x3ad64796);
669 /* Store the 64-bit result back into the stack. */
670 /* fstds stack_temp(sp),fr22 */
671 tcg_out32(s
, INSN_FSTDS
| INSN_R2(TCG_REG_SP
)
672 | INSN_IM5(STACK_TEMP_OFS
) | INSN_T(22));
674 /* Load the pieces of the result that the caller requested. */
676 tcg_out_ldst(s
, reth
, TCG_REG_SP
, STACK_TEMP_OFS
, INSN_LDW
);
679 tcg_out_ldst(s
, retl
, TCG_REG_SP
, STACK_TEMP_OFS
+ 4, INSN_LDW
);
683 static void tcg_out_add2(TCGContext
*s
, int destl
, int desth
,
684 int al
, int ah
, int bl
, int bh
, int blconst
)
686 int tmp
= (destl
== ah
|| destl
== bh
? TCG_REG_R20
: destl
);
689 tcg_out_arithi(s
, tmp
, al
, bl
, INSN_ADDI
);
691 tcg_out_arith(s
, tmp
, al
, bl
, INSN_ADD
);
693 tcg_out_arith(s
, desth
, ah
, bh
, INSN_ADDC
);
695 tcg_out_mov(s
, destl
, tmp
);
698 static void tcg_out_sub2(TCGContext
*s
, int destl
, int desth
, int al
, int ah
,
699 int bl
, int bh
, int alconst
, int blconst
)
701 int tmp
= (destl
== ah
|| destl
== bh
? TCG_REG_R20
: destl
);
705 tcg_out_movi(s
, TCG_TYPE_I32
, TCG_REG_R20
, bl
);
708 tcg_out_arithi(s
, tmp
, bl
, al
, INSN_SUBI
);
709 } else if (blconst
) {
710 tcg_out_arithi(s
, tmp
, al
, -bl
, INSN_ADDI
);
712 tcg_out_arith(s
, tmp
, al
, bl
, INSN_SUB
);
714 tcg_out_arith(s
, desth
, ah
, bh
, INSN_SUBB
);
716 tcg_out_mov(s
, destl
, tmp
);
719 static void tcg_out_branch(TCGContext
*s
, int label_index
, int nul
)
721 TCGLabel
*l
= &s
->labels
[label_index
];
722 uint32_t op
= nul
? INSN_BL_N
: INSN_BL
;
725 tcg_target_long val
= l
->u
.value
;
727 val
-= (tcg_target_long
)s
->code_ptr
+ 8;
729 assert(check_fit_tl(val
, 17));
731 tcg_out32(s
, op
| reassemble_17(val
));
733 tcg_out_reloc(s
, s
->code_ptr
, R_PARISC_PCREL17F
, label_index
, 0);
738 static const uint8_t tcg_cond_to_cmp_cond
[10] =
740 [TCG_COND_EQ
] = COND_EQ
,
741 [TCG_COND_NE
] = COND_EQ
| COND_FALSE
,
742 [TCG_COND_LT
] = COND_LT
,
743 [TCG_COND_GE
] = COND_LT
| COND_FALSE
,
744 [TCG_COND_LE
] = COND_LE
,
745 [TCG_COND_GT
] = COND_LE
| COND_FALSE
,
746 [TCG_COND_LTU
] = COND_LTU
,
747 [TCG_COND_GEU
] = COND_LTU
| COND_FALSE
,
748 [TCG_COND_LEU
] = COND_LEU
,
749 [TCG_COND_GTU
] = COND_LEU
| COND_FALSE
,
752 static void tcg_out_brcond(TCGContext
*s
, int cond
, TCGArg c1
,
753 TCGArg c2
, int c2const
, int label_index
)
755 TCGLabel
*l
= &s
->labels
[label_index
];
758 /* Note that COMIB operates as if the immediate is the first
759 operand. We model brcond with the immediate in the second
760 to better match what targets are likely to give us. For
761 consistency, model COMB with reversed operands as well. */
762 pacond
= tcg_cond_to_cmp_cond
[tcg_swap_cond(cond
)];
765 op
= (pacond
& COND_FALSE
? INSN_COMIBF
: INSN_COMIBT
);
768 op
= (pacond
& COND_FALSE
? INSN_COMBF
: INSN_COMBT
);
772 op
|= INSN_COND(pacond
& 7);
775 tcg_target_long val
= l
->u
.value
;
777 val
-= (tcg_target_long
)s
->code_ptr
+ 8;
779 assert(check_fit_tl(val
, 12));
781 /* ??? Assume that all branches to defined labels are backward.
782 Which means that if the nul bit is set, the delay slot is
783 executed if the branch is taken, and not executed in fallthru. */
784 tcg_out32(s
, op
| reassemble_12(val
));
787 tcg_out_reloc(s
, s
->code_ptr
, R_PARISC_PCREL12F
, label_index
, 0);
788 /* ??? Assume that all branches to undefined labels are forward.
789 Which means that if the nul bit is set, the delay slot is
790 not executed if the branch is taken, which is what we want. */
791 tcg_out32(s
, op
| 2);
795 static void tcg_out_comclr(TCGContext
*s
, int cond
, TCGArg ret
,
796 TCGArg c1
, TCGArg c2
, int c2const
)
800 /* Note that COMICLR operates as if the immediate is the first
801 operand. We model setcond with the immediate in the second
802 to better match what targets are likely to give us. For
803 consistency, model COMCLR with reversed operands as well. */
804 pacond
= tcg_cond_to_cmp_cond
[tcg_swap_cond(cond
)];
807 op
= INSN_COMICLR
| INSN_R2(c1
) | INSN_R1(ret
) | INSN_IM11(c2
);
809 op
= INSN_COMCLR
| INSN_R2(c1
) | INSN_R1(c2
) | INSN_T(ret
);
811 op
|= INSN_COND(pacond
& 7);
812 op
|= pacond
& COND_FALSE
? 1 << 12 : 0;
817 static void tcg_out_brcond2(TCGContext
*s
, int cond
, TCGArg al
, TCGArg ah
,
818 TCGArg bl
, int blconst
, TCGArg bh
, int bhconst
,
824 tcg_out_comclr(s
, tcg_invert_cond(cond
), TCG_REG_R0
, al
, bl
, blconst
);
825 tcg_out_brcond(s
, cond
, ah
, bh
, bhconst
, label_index
);
829 tcg_out_brcond(s
, cond
, ah
, bh
, bhconst
, label_index
);
830 tcg_out_comclr(s
, TCG_COND_NE
, TCG_REG_R0
, ah
, bh
, bhconst
);
831 tcg_out_brcond(s
, tcg_unsigned_cond(cond
),
832 al
, bl
, blconst
, label_index
);
837 static void tcg_out_setcond(TCGContext
*s
, int cond
, TCGArg ret
,
838 TCGArg c1
, TCGArg c2
, int c2const
)
840 tcg_out_comclr(s
, tcg_invert_cond(cond
), ret
, c1
, c2
, c2const
);
841 tcg_out_movi(s
, TCG_TYPE_I32
, ret
, 1);
844 static void tcg_out_setcond2(TCGContext
*s
, int cond
, TCGArg ret
,
845 TCGArg al
, TCGArg ah
, TCGArg bl
, int blconst
,
846 TCGArg bh
, int bhconst
)
848 int scratch
= TCG_REG_R20
;
850 if (ret
!= al
&& ret
!= ah
851 && (blconst
|| ret
!= bl
)
852 && (bhconst
|| ret
!= bh
)) {
859 tcg_out_setcond(s
, cond
, scratch
, al
, bl
, blconst
);
860 tcg_out_comclr(s
, TCG_COND_EQ
, TCG_REG_R0
, ah
, bh
, bhconst
);
861 tcg_out_movi(s
, TCG_TYPE_I32
, scratch
, cond
== TCG_COND_NE
);
865 tcg_out_setcond(s
, tcg_unsigned_cond(cond
), scratch
, al
, bl
, blconst
);
866 tcg_out_comclr(s
, TCG_COND_EQ
, TCG_REG_R0
, ah
, bh
, bhconst
);
867 tcg_out_movi(s
, TCG_TYPE_I32
, scratch
, 0);
868 tcg_out_comclr(s
, cond
, TCG_REG_R0
, ah
, bh
, bhconst
);
869 tcg_out_movi(s
, TCG_TYPE_I32
, scratch
, 1);
873 tcg_out_mov(s
, ret
, scratch
);
876 #if defined(CONFIG_SOFTMMU)
877 #include "../../softmmu_defs.h"
879 static void *qemu_ld_helpers
[4] = {
886 static void *qemu_st_helpers
[4] = {
893 /* Load and compare a TLB entry, and branch if TLB miss. OFFSET is set to
894 the offset of the first ADDR_READ or ADDR_WRITE member of the appropriate
895 TLB for the memory index. The return value is the offset from ENV
896 contained in R1 afterward (to be used when loading ADDEND); if the
897 return value is 0, R1 is not used. */
899 static int tcg_out_tlb_read(TCGContext
*s
, int r0
, int r1
, int addrlo
,
900 int addrhi
, int s_bits
, int lab_miss
, int offset
)
904 /* Extracting the index into the TLB. The "normal C operation" is
905 r1 = addr_reg >> TARGET_PAGE_BITS;
906 r1 &= CPU_TLB_SIZE - 1;
907 r1 <<= CPU_TLB_ENTRY_BITS;
908 What this does is extract CPU_TLB_BITS beginning at TARGET_PAGE_BITS
909 and place them at CPU_TLB_ENTRY_BITS. We can combine the first two
910 operations with an EXTRU. Unfortunately, the current value of
911 CPU_TLB_ENTRY_BITS is > 3, so we can't merge that shift with the
913 tcg_out_extr(s
, r1
, addrlo
, TARGET_PAGE_BITS
, CPU_TLB_BITS
, 0);
914 tcg_out_andi(s
, r0
, addrlo
, TARGET_PAGE_MASK
| ((1 << s_bits
) - 1));
915 tcg_out_shli(s
, r1
, r1
, CPU_TLB_ENTRY_BITS
);
916 tcg_out_arith(s
, r1
, r1
, TCG_AREG0
, INSN_ADDL
);
918 /* Make sure that both the addr_{read,write} and addend can be
919 read with a 14-bit offset from the same base register. */
920 if (check_fit_tl(offset
+ CPU_TLB_SIZE
, 14)) {
923 ret
= (offset
+ 0x400) & ~0x7ff;
924 offset
= ret
- offset
;
925 tcg_out_addi2(s
, TCG_REG_R1
, r1
, ret
);
929 /* Load the entry from the computed slot. */
930 if (TARGET_LONG_BITS
== 64) {
931 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_REG_R23
, r1
, offset
);
932 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_REG_R20
, r1
, offset
+ 4);
934 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_REG_R20
, r1
, offset
);
937 /* If not equal, jump to lab_miss. */
938 if (TARGET_LONG_BITS
== 64) {
939 tcg_out_brcond2(s
, TCG_COND_NE
, TCG_REG_R20
, TCG_REG_R23
,
940 r0
, 0, addrhi
, 0, lab_miss
);
942 tcg_out_brcond(s
, TCG_COND_NE
, TCG_REG_R20
, r0
, 0, lab_miss
);
949 static void tcg_out_qemu_ld(TCGContext
*s
, const TCGArg
*args
, int opc
)
951 int addr_reg
, addr_reg2
;
952 int data_reg
, data_reg2
;
953 int r0
, r1
, mem_index
, s_bits
, bswap
;
954 tcg_target_long offset
;
955 #if defined(CONFIG_SOFTMMU)
956 int lab1
, lab2
, argreg
;
960 data_reg2
= (opc
== 3 ? *args
++ : TCG_REG_R0
);
962 addr_reg2
= (TARGET_LONG_BITS
== 64 ? *args
++ : TCG_REG_R0
);
969 #if defined(CONFIG_SOFTMMU)
970 lab1
= gen_new_label();
971 lab2
= gen_new_label();
973 offset
= tcg_out_tlb_read(s
, r0
, r1
, addr_reg
, addr_reg2
, s_bits
, lab1
,
975 tlb_table
[mem_index
][0].addr_read
));
978 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_REG_R20
, (offset
? TCG_REG_R1
: r1
),
979 offsetof(CPUState
, tlb_table
[mem_index
][0].addend
) - offset
);
981 tcg_out_arith(s
, r0
, addr_reg
, TCG_REG_R20
, INSN_ADDL
);
985 offset
= GUEST_BASE
? TCG_GUEST_BASE_REG
: TCG_REG_R0
;
988 #ifdef TARGET_WORDS_BIGENDIAN
995 tcg_out_ldst_index(s
, data_reg
, r0
, offset
, INSN_LDBX
);
998 tcg_out_ldst_index(s
, data_reg
, r0
, offset
, INSN_LDBX
);
999 tcg_out_ext8s(s
, data_reg
, data_reg
);
1002 tcg_out_ldst_index(s
, data_reg
, r0
, offset
, INSN_LDHX
);
1004 tcg_out_bswap16(s
, data_reg
, data_reg
, 0);
1008 tcg_out_ldst_index(s
, data_reg
, r0
, offset
, INSN_LDHX
);
1010 tcg_out_bswap16(s
, data_reg
, data_reg
, 1);
1012 tcg_out_ext16s(s
, data_reg
, data_reg
);
1016 tcg_out_ldst_index(s
, data_reg
, r0
, offset
, INSN_LDWX
);
1018 tcg_out_bswap32(s
, data_reg
, data_reg
, TCG_REG_R20
);
1024 data_reg2
= data_reg
;
1027 if (offset
== TCG_REG_R0
) {
1028 /* Make sure not to clobber the base register. */
1029 if (data_reg2
== r0
) {
1030 tcg_out_ldst(s
, data_reg
, r0
, 4, INSN_LDW
);
1031 tcg_out_ldst(s
, data_reg2
, r0
, 0, INSN_LDW
);
1033 tcg_out_ldst(s
, data_reg2
, r0
, 0, INSN_LDW
);
1034 tcg_out_ldst(s
, data_reg
, r0
, 4, INSN_LDW
);
1037 tcg_out_addi2(s
, TCG_REG_R20
, r0
, 4);
1038 tcg_out_ldst_index(s
, data_reg2
, r0
, offset
, INSN_LDWX
);
1039 tcg_out_ldst_index(s
, data_reg
, TCG_REG_R20
, offset
, INSN_LDWX
);
1042 tcg_out_bswap32(s
, data_reg
, data_reg
, TCG_REG_R20
);
1043 tcg_out_bswap32(s
, data_reg2
, data_reg2
, TCG_REG_R20
);
1050 #if defined(CONFIG_SOFTMMU)
1051 tcg_out_branch(s
, lab2
, 1);
1055 tcg_out_label(s
, lab1
, (tcg_target_long
)s
->code_ptr
);
1057 argreg
= TCG_REG_R26
;
1058 tcg_out_mov(s
, argreg
--, addr_reg
);
1059 if (TARGET_LONG_BITS
== 64) {
1060 tcg_out_mov(s
, argreg
--, addr_reg2
);
1062 tcg_out_movi(s
, TCG_TYPE_I32
, argreg
, mem_index
);
1064 tcg_out_call(s
, qemu_ld_helpers
[s_bits
]);
1068 tcg_out_andi(s
, data_reg
, TCG_REG_RET0
, 0xff);
1071 tcg_out_ext8s(s
, data_reg
, TCG_REG_RET0
);
1074 tcg_out_andi(s
, data_reg
, TCG_REG_RET0
, 0xffff);
1077 tcg_out_ext16s(s
, data_reg
, TCG_REG_RET0
);
1081 tcg_out_mov(s
, data_reg
, TCG_REG_RET0
);
1084 tcg_out_mov(s
, data_reg
, TCG_REG_RET0
);
1085 tcg_out_mov(s
, data_reg2
, TCG_REG_RET1
);
1092 tcg_out_label(s
, lab2
, (tcg_target_long
)s
->code_ptr
);
1096 static void tcg_out_qemu_st(TCGContext
*s
, const TCGArg
*args
, int opc
)
1098 int addr_reg
, addr_reg2
;
1099 int data_reg
, data_reg2
;
1100 int r0
, r1
, mem_index
, s_bits
, bswap
;
1101 #if defined(CONFIG_SOFTMMU)
1102 tcg_target_long offset
;
1103 int lab1
, lab2
, argreg
;
1107 data_reg2
= (opc
== 3 ? *args
++ : 0);
1109 addr_reg2
= (TARGET_LONG_BITS
== 64 ? *args
++ : 0);
1116 #if defined(CONFIG_SOFTMMU)
1117 lab1
= gen_new_label();
1118 lab2
= gen_new_label();
1120 offset
= tcg_out_tlb_read(s
, r0
, r1
, addr_reg
, addr_reg2
, s_bits
, lab1
,
1122 tlb_table
[mem_index
][0].addr_write
));
1125 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_REG_R20
, (offset
? TCG_REG_R1
: r1
),
1126 offsetof(CPUState
, tlb_table
[mem_index
][0].addend
) - offset
);
1128 tcg_out_arith(s
, r0
, addr_reg
, TCG_REG_R20
, INSN_ADDL
);
1130 /* There are no indexed stores, so if GUEST_BASE is set
1131 we must do the add explicitly. Careful to avoid R20,
1132 which is used for the bswaps to follow. */
1133 if (GUEST_BASE
== 0) {
1136 tcg_out_arith(s
, TCG_REG_R31
, addr_reg
, TCG_GUEST_BASE_REG
, INSN_ADDL
);
1141 #ifdef TARGET_WORDS_BIGENDIAN
1148 tcg_out_ldst(s
, data_reg
, r0
, 0, INSN_STB
);
1152 tcg_out_bswap16(s
, TCG_REG_R20
, data_reg
, 0);
1153 data_reg
= TCG_REG_R20
;
1155 tcg_out_ldst(s
, data_reg
, r0
, 0, INSN_STH
);
1159 tcg_out_bswap32(s
, TCG_REG_R20
, data_reg
, TCG_REG_R20
);
1160 data_reg
= TCG_REG_R20
;
1162 tcg_out_ldst(s
, data_reg
, r0
, 0, INSN_STW
);
1166 tcg_out_bswap32(s
, TCG_REG_R20
, data_reg
, TCG_REG_R20
);
1167 tcg_out_bswap32(s
, TCG_REG_R23
, data_reg2
, TCG_REG_R23
);
1168 data_reg2
= TCG_REG_R20
;
1169 data_reg
= TCG_REG_R23
;
1171 tcg_out_ldst(s
, data_reg2
, r0
, 0, INSN_STW
);
1172 tcg_out_ldst(s
, data_reg
, r0
, 4, INSN_STW
);
1178 #if defined(CONFIG_SOFTMMU)
1179 tcg_out_branch(s
, lab2
, 1);
1183 tcg_out_label(s
, lab1
, (tcg_target_long
)s
->code_ptr
);
1185 argreg
= TCG_REG_R26
;
1186 tcg_out_mov(s
, argreg
--, addr_reg
);
1187 if (TARGET_LONG_BITS
== 64) {
1188 tcg_out_mov(s
, argreg
--, addr_reg2
);
1193 tcg_out_andi(s
, argreg
--, data_reg
, 0xff);
1194 tcg_out_movi(s
, TCG_TYPE_I32
, argreg
, mem_index
);
1197 tcg_out_andi(s
, argreg
--, data_reg
, 0xffff);
1198 tcg_out_movi(s
, TCG_TYPE_I32
, argreg
, mem_index
);
1201 tcg_out_mov(s
, argreg
--, data_reg
);
1202 tcg_out_movi(s
, TCG_TYPE_I32
, argreg
, mem_index
);
1205 /* Because of the alignment required by the 64-bit data argument,
1206 we will always use R23/R24. Also, we will always run out of
1207 argument registers for storing mem_index, so that will have
1208 to go on the stack. */
1209 if (mem_index
== 0) {
1210 argreg
= TCG_REG_R0
;
1212 argreg
= TCG_REG_R20
;
1213 tcg_out_movi(s
, TCG_TYPE_I32
, argreg
, mem_index
);
1215 tcg_out_mov(s
, TCG_REG_R23
, data_reg2
);
1216 tcg_out_mov(s
, TCG_REG_R24
, data_reg
);
1217 tcg_out_st(s
, TCG_TYPE_I32
, argreg
, TCG_REG_SP
,
1218 TCG_TARGET_CALL_STACK_OFFSET
- 4);
1224 tcg_out_call(s
, qemu_st_helpers
[s_bits
]);
1227 tcg_out_label(s
, lab2
, (tcg_target_long
)s
->code_ptr
);
1231 static void tcg_out_exit_tb(TCGContext
*s
, TCGArg arg
)
1233 if (!check_fit_tl(arg
, 14)) {
1238 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_RET0
, hi
);
1239 tcg_out32(s
, INSN_BV
| INSN_R2(TCG_REG_R18
));
1240 tcg_out_addi(s
, TCG_REG_RET0
, lo
);
1245 tcg_out32(s
, INSN_BV
| INSN_R2(TCG_REG_R18
));
1246 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_RET0
, arg
);
1249 static void tcg_out_goto_tb(TCGContext
*s
, TCGArg arg
)
1251 if (s
->tb_jmp_offset
) {
1252 /* direct jump method */
1253 fprintf(stderr
, "goto_tb direct\n");
1256 /* indirect jump method */
1257 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_REG_R20
, TCG_REG_R0
,
1258 (tcg_target_long
)(s
->tb_next
+ arg
));
1259 tcg_out32(s
, INSN_BV_N
| INSN_R2(TCG_REG_R20
));
1261 s
->tb_next_offset
[arg
] = s
->code_ptr
- s
->code_buf
;
1264 static inline void tcg_out_op(TCGContext
*s
, TCGOpcode opc
, const TCGArg
*args
,
1265 const int *const_args
)
1268 case INDEX_op_exit_tb
:
1269 tcg_out_exit_tb(s
, args
[0]);
1271 case INDEX_op_goto_tb
:
1272 tcg_out_goto_tb(s
, args
[0]);
1276 if (const_args
[0]) {
1277 tcg_out_call(s
, (void *)args
[0]);
1279 tcg_out32(s
, INSN_BLE_SR4
| INSN_R2(args
[0]));
1280 tcg_out_mov(s
, TCG_REG_RP
, TCG_REG_R31
);
1285 fprintf(stderr
, "unimplemented jmp\n");
1290 tcg_out_branch(s
, args
[0], 1);
1293 case INDEX_op_movi_i32
:
1294 tcg_out_movi(s
, TCG_TYPE_I32
, args
[0], (uint32_t)args
[1]);
1297 case INDEX_op_ld8u_i32
:
1298 tcg_out_ldst(s
, args
[0], args
[1], args
[2], INSN_LDB
);
1300 case INDEX_op_ld8s_i32
:
1301 tcg_out_ldst(s
, args
[0], args
[1], args
[2], INSN_LDB
);
1302 tcg_out_ext8s(s
, args
[0], args
[0]);
1304 case INDEX_op_ld16u_i32
:
1305 tcg_out_ldst(s
, args
[0], args
[1], args
[2], INSN_LDH
);
1307 case INDEX_op_ld16s_i32
:
1308 tcg_out_ldst(s
, args
[0], args
[1], args
[2], INSN_LDH
);
1309 tcg_out_ext16s(s
, args
[0], args
[0]);
1311 case INDEX_op_ld_i32
:
1312 tcg_out_ldst(s
, args
[0], args
[1], args
[2], INSN_LDW
);
1315 case INDEX_op_st8_i32
:
1316 tcg_out_ldst(s
, args
[0], args
[1], args
[2], INSN_STB
);
1318 case INDEX_op_st16_i32
:
1319 tcg_out_ldst(s
, args
[0], args
[1], args
[2], INSN_STH
);
1321 case INDEX_op_st_i32
:
1322 tcg_out_ldst(s
, args
[0], args
[1], args
[2], INSN_STW
);
1325 case INDEX_op_add_i32
:
1326 if (const_args
[2]) {
1327 tcg_out_addi2(s
, args
[0], args
[1], args
[2]);
1329 tcg_out_arith(s
, args
[0], args
[1], args
[2], INSN_ADDL
);
1333 case INDEX_op_sub_i32
:
1334 if (const_args
[1]) {
1335 if (const_args
[2]) {
1336 tcg_out_movi(s
, TCG_TYPE_I32
, args
[0], args
[1] - args
[2]);
1338 /* Recall that SUBI is a reversed subtract. */
1339 tcg_out_arithi(s
, args
[0], args
[2], args
[1], INSN_SUBI
);
1341 } else if (const_args
[2]) {
1342 tcg_out_addi2(s
, args
[0], args
[1], -args
[2]);
1344 tcg_out_arith(s
, args
[0], args
[1], args
[2], INSN_SUB
);
1348 case INDEX_op_and_i32
:
1349 if (const_args
[2]) {
1350 tcg_out_andi(s
, args
[0], args
[1], args
[2]);
1352 tcg_out_arith(s
, args
[0], args
[1], args
[2], INSN_AND
);
1356 case INDEX_op_or_i32
:
1357 if (const_args
[2]) {
1358 tcg_out_ori(s
, args
[0], args
[1], args
[2]);
1360 tcg_out_arith(s
, args
[0], args
[1], args
[2], INSN_OR
);
1364 case INDEX_op_xor_i32
:
1365 tcg_out_arith(s
, args
[0], args
[1], args
[2], INSN_XOR
);
1368 case INDEX_op_andc_i32
:
1369 if (const_args
[2]) {
1370 tcg_out_andi(s
, args
[0], args
[1], ~args
[2]);
1372 tcg_out_arith(s
, args
[0], args
[1], args
[2], INSN_ANDCM
);
1376 case INDEX_op_shl_i32
:
1377 if (const_args
[2]) {
1378 tcg_out_shli(s
, args
[0], args
[1], args
[2]);
1380 tcg_out_shl(s
, args
[0], args
[1], args
[2]);
1384 case INDEX_op_shr_i32
:
1385 if (const_args
[2]) {
1386 tcg_out_shri(s
, args
[0], args
[1], args
[2]);
1388 tcg_out_shr(s
, args
[0], args
[1], args
[2]);
1392 case INDEX_op_sar_i32
:
1393 if (const_args
[2]) {
1394 tcg_out_sari(s
, args
[0], args
[1], args
[2]);
1396 tcg_out_sar(s
, args
[0], args
[1], args
[2]);
1400 case INDEX_op_rotl_i32
:
1401 if (const_args
[2]) {
1402 tcg_out_rotli(s
, args
[0], args
[1], args
[2]);
1404 tcg_out_rotl(s
, args
[0], args
[1], args
[2]);
1408 case INDEX_op_rotr_i32
:
1409 if (const_args
[2]) {
1410 tcg_out_rotri(s
, args
[0], args
[1], args
[2]);
1412 tcg_out_rotr(s
, args
[0], args
[1], args
[2]);
1416 case INDEX_op_mul_i32
:
1417 tcg_out_xmpyu(s
, args
[0], TCG_REG_R0
, args
[1], args
[2]);
1419 case INDEX_op_mulu2_i32
:
1420 tcg_out_xmpyu(s
, args
[0], args
[1], args
[2], args
[3]);
1423 case INDEX_op_bswap16_i32
:
1424 tcg_out_bswap16(s
, args
[0], args
[1], 0);
1426 case INDEX_op_bswap32_i32
:
1427 tcg_out_bswap32(s
, args
[0], args
[1], TCG_REG_R20
);
1430 case INDEX_op_not_i32
:
1431 tcg_out_arithi(s
, args
[0], args
[1], -1, INSN_SUBI
);
1433 case INDEX_op_ext8s_i32
:
1434 tcg_out_ext8s(s
, args
[0], args
[1]);
1436 case INDEX_op_ext16s_i32
:
1437 tcg_out_ext16s(s
, args
[0], args
[1]);
1440 /* These three correspond exactly to the fallback implementation.
1441 But by including them we reduce the number of TCG ops that
1442 need to be generated, and these opcodes are fairly common. */
1443 case INDEX_op_neg_i32
:
1444 tcg_out_arith(s
, args
[0], TCG_REG_R0
, args
[1], INSN_SUB
);
1446 case INDEX_op_ext8u_i32
:
1447 tcg_out_andi(s
, args
[0], args
[1], 0xff);
1449 case INDEX_op_ext16u_i32
:
1450 tcg_out_andi(s
, args
[0], args
[1], 0xffff);
1453 case INDEX_op_brcond_i32
:
1454 tcg_out_brcond(s
, args
[2], args
[0], args
[1], const_args
[1], args
[3]);
1456 case INDEX_op_brcond2_i32
:
1457 tcg_out_brcond2(s
, args
[4], args
[0], args
[1],
1458 args
[2], const_args
[2],
1459 args
[3], const_args
[3], args
[5]);
1462 case INDEX_op_setcond_i32
:
1463 tcg_out_setcond(s
, args
[3], args
[0], args
[1], args
[2], const_args
[2]);
1465 case INDEX_op_setcond2_i32
:
1466 tcg_out_setcond2(s
, args
[5], args
[0], args
[1], args
[2],
1467 args
[3], const_args
[3], args
[4], const_args
[4]);
1470 case INDEX_op_add2_i32
:
1471 tcg_out_add2(s
, args
[0], args
[1], args
[2], args
[3],
1472 args
[4], args
[5], const_args
[4]);
1475 case INDEX_op_sub2_i32
:
1476 tcg_out_sub2(s
, args
[0], args
[1], args
[2], args
[3],
1477 args
[4], args
[5], const_args
[2], const_args
[4]);
1480 case INDEX_op_qemu_ld8u
:
1481 tcg_out_qemu_ld(s
, args
, 0);
1483 case INDEX_op_qemu_ld8s
:
1484 tcg_out_qemu_ld(s
, args
, 0 | 4);
1486 case INDEX_op_qemu_ld16u
:
1487 tcg_out_qemu_ld(s
, args
, 1);
1489 case INDEX_op_qemu_ld16s
:
1490 tcg_out_qemu_ld(s
, args
, 1 | 4);
1492 case INDEX_op_qemu_ld32
:
1493 tcg_out_qemu_ld(s
, args
, 2);
1495 case INDEX_op_qemu_ld64
:
1496 tcg_out_qemu_ld(s
, args
, 3);
1499 case INDEX_op_qemu_st8
:
1500 tcg_out_qemu_st(s
, args
, 0);
1502 case INDEX_op_qemu_st16
:
1503 tcg_out_qemu_st(s
, args
, 1);
1505 case INDEX_op_qemu_st32
:
1506 tcg_out_qemu_st(s
, args
, 2);
1508 case INDEX_op_qemu_st64
:
1509 tcg_out_qemu_st(s
, args
, 3);
1513 fprintf(stderr
, "unknown opcode 0x%x\n", opc
);
1518 static const TCGTargetOpDef hppa_op_defs
[] = {
1519 { INDEX_op_exit_tb
, { } },
1520 { INDEX_op_goto_tb
, { } },
1522 { INDEX_op_call
, { "ri" } },
1523 { INDEX_op_jmp
, { "r" } },
1524 { INDEX_op_br
, { } },
1526 { INDEX_op_mov_i32
, { "r", "r" } },
1527 { INDEX_op_movi_i32
, { "r" } },
1529 { INDEX_op_ld8u_i32
, { "r", "r" } },
1530 { INDEX_op_ld8s_i32
, { "r", "r" } },
1531 { INDEX_op_ld16u_i32
, { "r", "r" } },
1532 { INDEX_op_ld16s_i32
, { "r", "r" } },
1533 { INDEX_op_ld_i32
, { "r", "r" } },
1534 { INDEX_op_st8_i32
, { "rZ", "r" } },
1535 { INDEX_op_st16_i32
, { "rZ", "r" } },
1536 { INDEX_op_st_i32
, { "rZ", "r" } },
1538 { INDEX_op_add_i32
, { "r", "rZ", "ri" } },
1539 { INDEX_op_sub_i32
, { "r", "rI", "ri" } },
1540 { INDEX_op_and_i32
, { "r", "rZ", "ri" } },
1541 { INDEX_op_or_i32
, { "r", "rZ", "ri" } },
1542 { INDEX_op_xor_i32
, { "r", "rZ", "rZ" } },
1543 { INDEX_op_andc_i32
, { "r", "rZ", "ri" } },
1545 { INDEX_op_mul_i32
, { "r", "r", "r" } },
1546 { INDEX_op_mulu2_i32
, { "r", "r", "r", "r" } },
1548 { INDEX_op_shl_i32
, { "r", "r", "ri" } },
1549 { INDEX_op_shr_i32
, { "r", "r", "ri" } },
1550 { INDEX_op_sar_i32
, { "r", "r", "ri" } },
1551 { INDEX_op_rotl_i32
, { "r", "r", "ri" } },
1552 { INDEX_op_rotr_i32
, { "r", "r", "ri" } },
1554 { INDEX_op_bswap16_i32
, { "r", "r" } },
1555 { INDEX_op_bswap32_i32
, { "r", "r" } },
1556 { INDEX_op_neg_i32
, { "r", "r" } },
1557 { INDEX_op_not_i32
, { "r", "r" } },
1559 { INDEX_op_ext8s_i32
, { "r", "r" } },
1560 { INDEX_op_ext8u_i32
, { "r", "r" } },
1561 { INDEX_op_ext16s_i32
, { "r", "r" } },
1562 { INDEX_op_ext16u_i32
, { "r", "r" } },
1564 { INDEX_op_brcond_i32
, { "rZ", "rJ" } },
1565 { INDEX_op_brcond2_i32
, { "rZ", "rZ", "rJ", "rJ" } },
1567 { INDEX_op_setcond_i32
, { "r", "rZ", "rI" } },
1568 { INDEX_op_setcond2_i32
, { "r", "rZ", "rZ", "rI", "rI" } },
1570 { INDEX_op_add2_i32
, { "r", "r", "rZ", "rZ", "rI", "rZ" } },
1571 { INDEX_op_sub2_i32
, { "r", "r", "rI", "rZ", "rK", "rZ" } },
1573 #if TARGET_LONG_BITS == 32
1574 { INDEX_op_qemu_ld8u
, { "r", "L" } },
1575 { INDEX_op_qemu_ld8s
, { "r", "L" } },
1576 { INDEX_op_qemu_ld16u
, { "r", "L" } },
1577 { INDEX_op_qemu_ld16s
, { "r", "L" } },
1578 { INDEX_op_qemu_ld32
, { "r", "L" } },
1579 { INDEX_op_qemu_ld64
, { "r", "r", "L" } },
1581 { INDEX_op_qemu_st8
, { "LZ", "L" } },
1582 { INDEX_op_qemu_st16
, { "LZ", "L" } },
1583 { INDEX_op_qemu_st32
, { "LZ", "L" } },
1584 { INDEX_op_qemu_st64
, { "LZ", "LZ", "L" } },
1586 { INDEX_op_qemu_ld8u
, { "r", "L", "L" } },
1587 { INDEX_op_qemu_ld8s
, { "r", "L", "L" } },
1588 { INDEX_op_qemu_ld16u
, { "r", "L", "L" } },
1589 { INDEX_op_qemu_ld16s
, { "r", "L", "L" } },
1590 { INDEX_op_qemu_ld32
, { "r", "L", "L" } },
1591 { INDEX_op_qemu_ld64
, { "r", "r", "L", "L" } },
1593 { INDEX_op_qemu_st8
, { "LZ", "L", "L" } },
1594 { INDEX_op_qemu_st16
, { "LZ", "L", "L" } },
1595 { INDEX_op_qemu_st32
, { "LZ", "L", "L" } },
1596 { INDEX_op_qemu_st64
, { "LZ", "LZ", "L", "L" } },
1601 static int tcg_target_callee_save_regs
[] = {
1602 /* R2, the return address register, is saved specially
1603 in the caller's frame. */
1604 /* R3, the frame pointer, is not currently modified. */
1618 /* R17 is the global env, so no need to save. */
1622 void tcg_target_qemu_prologue(TCGContext
*s
)
1626 /* Allocate space for the fixed frame marker. */
1627 frame_size
= -TCG_TARGET_CALL_STACK_OFFSET
;
1628 frame_size
+= TCG_TARGET_STATIC_CALL_ARGS_SIZE
;
1630 /* Allocate space for the saved registers. */
1631 frame_size
+= ARRAY_SIZE(tcg_target_callee_save_regs
) * 4;
1633 /* Align the allocated space. */
1634 frame_size
= ((frame_size
+ TCG_TARGET_STACK_ALIGN
- 1)
1635 & -TCG_TARGET_STACK_ALIGN
);
1637 /* The return address is stored in the caller's frame. */
1638 tcg_out_st(s
, TCG_TYPE_PTR
, TCG_REG_RP
, TCG_REG_SP
, -20);
1640 /* Allocate stack frame, saving the first register at the same time. */
1641 tcg_out_ldst(s
, tcg_target_callee_save_regs
[0],
1642 TCG_REG_SP
, frame_size
, INSN_STWM
);
1644 /* Save all callee saved registers. */
1645 for (i
= 1; i
< ARRAY_SIZE(tcg_target_callee_save_regs
); i
++) {
1646 tcg_out_st(s
, TCG_TYPE_PTR
, tcg_target_callee_save_regs
[i
],
1647 TCG_REG_SP
, -frame_size
+ i
* 4);
1650 if (GUEST_BASE
!= 0) {
1651 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_GUEST_BASE_REG
, GUEST_BASE
);
1654 /* Jump to TB, and adjust R18 to be the return address. */
1655 tcg_out32(s
, INSN_BLE_SR4
| INSN_R2(TCG_REG_R26
));
1656 tcg_out_mov(s
, TCG_REG_R18
, TCG_REG_R31
);
1658 /* Restore callee saved registers. */
1659 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_REG_RP
, TCG_REG_SP
, -frame_size
- 20);
1660 for (i
= 1; i
< ARRAY_SIZE(tcg_target_callee_save_regs
); i
++) {
1661 tcg_out_ld(s
, TCG_TYPE_PTR
, tcg_target_callee_save_regs
[i
],
1662 TCG_REG_SP
, -frame_size
+ i
* 4);
1665 /* Deallocate stack frame and return. */
1666 tcg_out32(s
, INSN_BV
| INSN_R2(TCG_REG_RP
));
1667 tcg_out_ldst(s
, tcg_target_callee_save_regs
[0],
1668 TCG_REG_SP
, -frame_size
, INSN_LDWM
);
1671 void tcg_target_init(TCGContext
*s
)
1673 tcg_regset_set32(tcg_target_available_regs
[TCG_TYPE_I32
], 0, 0xffffffff);
1675 tcg_regset_clear(tcg_target_call_clobber_regs
);
1676 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R20
);
1677 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R21
);
1678 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R22
);
1679 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R23
);
1680 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R24
);
1681 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R25
);
1682 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R26
);
1683 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_RET0
);
1684 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_RET1
);
1686 tcg_regset_clear(s
->reserved_regs
);
1687 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_R0
); /* hardwired to zero */
1688 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_R1
); /* addil target */
1689 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_RP
); /* link register */
1690 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_R3
); /* frame pointer */
1691 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_R18
); /* return pointer */
1692 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_R19
); /* clobbered w/o pic */
1693 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_R20
); /* reserved */
1694 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_DP
); /* data pointer */
1695 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_SP
); /* stack pointer */
1696 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_R31
); /* ble link reg */
1697 if (GUEST_BASE
!= 0) {
1698 tcg_regset_set_reg(s
->reserved_regs
, TCG_GUEST_BASE_REG
);
1701 tcg_add_target_add_op_defs(hppa_op_defs
);