2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 static const char * const tcg_target_reg_names
[TCG_TARGET_NB_REGS
] = {
27 "%r0", "%r1", "%rp", "%r3", "%r4", "%r5", "%r6", "%r7",
28 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
29 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
30 "%r24", "%r25", "%r26", "%dp", "%ret0", "%ret1", "%sp", "%r31",
34 /* This is an 8 byte temp slot in the stack frame. */
35 #define STACK_TEMP_OFS -16
37 #ifdef CONFIG_USE_GUEST_BASE
38 #define TCG_GUEST_BASE_REG TCG_REG_R16
40 #define TCG_GUEST_BASE_REG TCG_REG_R0
43 static const int tcg_target_reg_alloc_order
[] = {
69 static const int tcg_target_call_iarg_regs
[4] = {
76 static const int tcg_target_call_oarg_regs
[2] = {
81 /* True iff val fits a signed field of width BITS. */
82 static inline int check_fit_tl(tcg_target_long val
, unsigned int bits
)
84 return (val
<< ((sizeof(tcg_target_long
) * 8 - bits
))
85 >> (sizeof(tcg_target_long
) * 8 - bits
)) == val
;
88 /* True iff depi can be used to compute (reg | MASK).
89 Accept a bit pattern like:
93 Copied from gcc sources. */
94 static inline int or_mask_p(tcg_target_ulong mask
)
96 if (mask
== 0 || mask
== -1) {
100 return (mask
& (mask
- 1)) == 0;
103 /* True iff depi or extru can be used to compute (reg & mask).
104 Accept a bit pattern like these:
108 Copied from gcc sources. */
109 static inline int and_mask_p(tcg_target_ulong mask
)
111 return or_mask_p(~mask
);
114 static int low_sign_ext(int val
, int len
)
116 return (((val
<< 1) & ~(-1u << len
)) | ((val
>> (len
- 1)) & 1));
119 static int reassemble_12(int as12
)
121 return (((as12
& 0x800) >> 11) |
122 ((as12
& 0x400) >> 8) |
123 ((as12
& 0x3ff) << 3));
126 static int reassemble_17(int as17
)
128 return (((as17
& 0x10000) >> 16) |
129 ((as17
& 0x0f800) << 5) |
130 ((as17
& 0x00400) >> 8) |
131 ((as17
& 0x003ff) << 3));
134 static int reassemble_21(int as21
)
136 return (((as21
& 0x100000) >> 20) |
137 ((as21
& 0x0ffe00) >> 8) |
138 ((as21
& 0x000180) << 7) |
139 ((as21
& 0x00007c) << 14) |
140 ((as21
& 0x000003) << 12));
143 /* ??? Bizzarely, there is no PCREL12F relocation type. I guess all
144 such relocations are simply fully handled by the assembler. */
145 #define R_PARISC_PCREL12F R_PARISC_NONE
147 static void patch_reloc(uint8_t *code_ptr
, int type
,
148 tcg_target_long value
, tcg_target_long addend
)
150 uint32_t *insn_ptr
= (uint32_t *)code_ptr
;
151 uint32_t insn
= *insn_ptr
;
152 tcg_target_long pcrel
;
155 pcrel
= (value
- ((tcg_target_long
)code_ptr
+ 8)) >> 2;
158 case R_PARISC_PCREL12F
:
159 assert(check_fit_tl(pcrel
, 12));
160 /* ??? We assume all patches are forward. See tcg_out_brcond
161 re setting the NUL bit on the branch and eliding the nop. */
164 insn
|= reassemble_12(pcrel
);
166 case R_PARISC_PCREL17F
:
167 assert(check_fit_tl(pcrel
, 17));
169 insn
|= reassemble_17(pcrel
);
178 /* parse target specific constraints */
179 static int target_parse_constraint(TCGArgConstraint
*ct
, const char **pct_str
)
186 ct
->ct
|= TCG_CT_REG
;
187 tcg_regset_set32(ct
->u
.regs
, 0, 0xffffffff);
189 case 'L': /* qemu_ld/st constraint */
190 ct
->ct
|= TCG_CT_REG
;
191 tcg_regset_set32(ct
->u
.regs
, 0, 0xffffffff);
192 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R26
);
193 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R25
);
194 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R24
);
195 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R23
);
198 ct
->ct
|= TCG_CT_CONST_0
;
201 ct
->ct
|= TCG_CT_CONST_S11
;
204 ct
->ct
|= TCG_CT_CONST_S5
;
207 ct
->ct
|= TCG_CT_CONST_MS11
;
210 ct
->ct
|= TCG_CT_CONST_AND
;
213 ct
->ct
|= TCG_CT_CONST_OR
;
223 /* test if a constant matches the constraint */
224 static int tcg_target_const_match(tcg_target_long val
,
225 const TCGArgConstraint
*arg_ct
)
228 if (ct
& TCG_CT_CONST
) {
230 } else if (ct
& TCG_CT_CONST_0
) {
232 } else if (ct
& TCG_CT_CONST_S5
) {
233 return check_fit_tl(val
, 5);
234 } else if (ct
& TCG_CT_CONST_S11
) {
235 return check_fit_tl(val
, 11);
236 } else if (ct
& TCG_CT_CONST_MS11
) {
237 return check_fit_tl(-val
, 11);
238 } else if (ct
& TCG_CT_CONST_AND
) {
239 return and_mask_p(val
);
240 } else if (ct
& TCG_CT_CONST_OR
) {
241 return or_mask_p(val
);
246 #define INSN_OP(x) ((x) << 26)
247 #define INSN_EXT3BR(x) ((x) << 13)
248 #define INSN_EXT3SH(x) ((x) << 10)
249 #define INSN_EXT4(x) ((x) << 6)
250 #define INSN_EXT5(x) (x)
251 #define INSN_EXT6(x) ((x) << 6)
252 #define INSN_EXT7(x) ((x) << 6)
253 #define INSN_EXT8A(x) ((x) << 6)
254 #define INSN_EXT8B(x) ((x) << 5)
255 #define INSN_T(x) (x)
256 #define INSN_R1(x) ((x) << 16)
257 #define INSN_R2(x) ((x) << 21)
258 #define INSN_DEP_LEN(x) (32 - (x))
259 #define INSN_SHDEP_CP(x) ((31 - (x)) << 5)
260 #define INSN_SHDEP_P(x) ((x) << 5)
261 #define INSN_COND(x) ((x) << 13)
262 #define INSN_IM11(x) low_sign_ext(x, 11)
263 #define INSN_IM14(x) low_sign_ext(x, 14)
264 #define INSN_IM5(x) (low_sign_ext(x, 5) << 16)
276 #define INSN_ADD (INSN_OP(0x02) | INSN_EXT6(0x18))
277 #define INSN_ADDC (INSN_OP(0x02) | INSN_EXT6(0x1c))
278 #define INSN_ADDI (INSN_OP(0x2d))
279 #define INSN_ADDIL (INSN_OP(0x0a))
280 #define INSN_ADDL (INSN_OP(0x02) | INSN_EXT6(0x28))
281 #define INSN_AND (INSN_OP(0x02) | INSN_EXT6(0x08))
282 #define INSN_ANDCM (INSN_OP(0x02) | INSN_EXT6(0x00))
283 #define INSN_COMCLR (INSN_OP(0x02) | INSN_EXT6(0x22))
284 #define INSN_COMICLR (INSN_OP(0x24))
285 #define INSN_DEP (INSN_OP(0x35) | INSN_EXT3SH(3))
286 #define INSN_DEPI (INSN_OP(0x35) | INSN_EXT3SH(7))
287 #define INSN_EXTRS (INSN_OP(0x34) | INSN_EXT3SH(7))
288 #define INSN_EXTRU (INSN_OP(0x34) | INSN_EXT3SH(6))
289 #define INSN_LDIL (INSN_OP(0x08))
290 #define INSN_LDO (INSN_OP(0x0d))
291 #define INSN_MTCTL (INSN_OP(0x00) | INSN_EXT8B(0xc2))
292 #define INSN_OR (INSN_OP(0x02) | INSN_EXT6(0x09))
293 #define INSN_SHD (INSN_OP(0x34) | INSN_EXT3SH(2))
294 #define INSN_SUB (INSN_OP(0x02) | INSN_EXT6(0x10))
295 #define INSN_SUBB (INSN_OP(0x02) | INSN_EXT6(0x14))
296 #define INSN_SUBI (INSN_OP(0x25))
297 #define INSN_VEXTRS (INSN_OP(0x34) | INSN_EXT3SH(5))
298 #define INSN_VEXTRU (INSN_OP(0x34) | INSN_EXT3SH(4))
299 #define INSN_VSHD (INSN_OP(0x34) | INSN_EXT3SH(0))
300 #define INSN_XOR (INSN_OP(0x02) | INSN_EXT6(0x0a))
301 #define INSN_ZDEP (INSN_OP(0x35) | INSN_EXT3SH(2))
302 #define INSN_ZVDEP (INSN_OP(0x35) | INSN_EXT3SH(0))
304 #define INSN_BL (INSN_OP(0x3a) | INSN_EXT3BR(0))
305 #define INSN_BL_N (INSN_OP(0x3a) | INSN_EXT3BR(0) | 2)
306 #define INSN_BLR (INSN_OP(0x3a) | INSN_EXT3BR(2))
307 #define INSN_BV (INSN_OP(0x3a) | INSN_EXT3BR(6))
308 #define INSN_BV_N (INSN_OP(0x3a) | INSN_EXT3BR(6) | 2)
309 #define INSN_BLE_SR4 (INSN_OP(0x39) | (1 << 13))
311 #define INSN_LDB (INSN_OP(0x10))
312 #define INSN_LDH (INSN_OP(0x11))
313 #define INSN_LDW (INSN_OP(0x12))
314 #define INSN_LDWM (INSN_OP(0x13))
315 #define INSN_FLDDS (INSN_OP(0x0b) | INSN_EXT4(0) | (1 << 12))
317 #define INSN_LDBX (INSN_OP(0x03) | INSN_EXT4(0))
318 #define INSN_LDHX (INSN_OP(0x03) | INSN_EXT4(1))
319 #define INSN_LDWX (INSN_OP(0x03) | INSN_EXT4(2))
321 #define INSN_STB (INSN_OP(0x18))
322 #define INSN_STH (INSN_OP(0x19))
323 #define INSN_STW (INSN_OP(0x1a))
324 #define INSN_STWM (INSN_OP(0x1b))
325 #define INSN_FSTDS (INSN_OP(0x0b) | INSN_EXT4(8) | (1 << 12))
327 #define INSN_COMBT (INSN_OP(0x20))
328 #define INSN_COMBF (INSN_OP(0x22))
329 #define INSN_COMIBT (INSN_OP(0x21))
330 #define INSN_COMIBF (INSN_OP(0x23))
332 /* supplied by libgcc */
333 extern void *__canonicalize_funcptr_for_compare(const void *);
335 static void tcg_out_mov(TCGContext
*s
, TCGType type
, TCGReg ret
, TCGReg arg
)
337 /* PA1.1 defines COPY as OR r,0,t; PA2.0 defines COPY as LDO 0(r),t
338 but hppa-dis.c is unaware of this definition */
340 tcg_out32(s
, INSN_OR
| INSN_T(ret
) | INSN_R1(arg
)
341 | INSN_R2(TCG_REG_R0
));
345 static void tcg_out_movi(TCGContext
*s
, TCGType type
,
346 TCGReg ret
, tcg_target_long arg
)
348 if (check_fit_tl(arg
, 14)) {
349 tcg_out32(s
, INSN_LDO
| INSN_R1(ret
)
350 | INSN_R2(TCG_REG_R0
) | INSN_IM14(arg
));
356 tcg_out32(s
, INSN_LDIL
| INSN_R2(ret
) | reassemble_21(hi
));
358 tcg_out32(s
, INSN_LDO
| INSN_R1(ret
)
359 | INSN_R2(ret
) | INSN_IM14(lo
));
364 static void tcg_out_ldst(TCGContext
*s
, int ret
, int addr
,
365 tcg_target_long offset
, int op
)
367 if (!check_fit_tl(offset
, 14)) {
373 if (addr
== TCG_REG_R0
) {
374 op
= INSN_LDIL
| INSN_R2(TCG_REG_R1
);
376 op
= INSN_ADDIL
| INSN_R2(addr
);
378 tcg_out32(s
, op
| reassemble_21(hi
));
384 if (ret
!= addr
|| offset
!= 0 || op
!= INSN_LDO
) {
385 tcg_out32(s
, op
| INSN_R1(ret
) | INSN_R2(addr
) | INSN_IM14(offset
));
389 /* This function is required by tcg.c. */
390 static inline void tcg_out_ld(TCGContext
*s
, TCGType type
, TCGReg ret
,
391 TCGReg arg1
, tcg_target_long arg2
)
393 tcg_out_ldst(s
, ret
, arg1
, arg2
, INSN_LDW
);
396 /* This function is required by tcg.c. */
397 static inline void tcg_out_st(TCGContext
*s
, TCGType type
, TCGReg ret
,
398 TCGReg arg1
, tcg_target_long arg2
)
400 tcg_out_ldst(s
, ret
, arg1
, arg2
, INSN_STW
);
403 static void tcg_out_ldst_index(TCGContext
*s
, int data
,
404 int base
, int index
, int op
)
406 tcg_out32(s
, op
| INSN_T(data
) | INSN_R1(index
) | INSN_R2(base
));
409 static inline void tcg_out_addi2(TCGContext
*s
, int ret
, int arg1
,
412 tcg_out_ldst(s
, ret
, arg1
, val
, INSN_LDO
);
415 /* This function is required by tcg.c. */
416 static inline void tcg_out_addi(TCGContext
*s
, int reg
, tcg_target_long val
)
418 tcg_out_addi2(s
, reg
, reg
, val
);
421 static inline void tcg_out_arith(TCGContext
*s
, int t
, int r1
, int r2
, int op
)
423 tcg_out32(s
, op
| INSN_T(t
) | INSN_R1(r1
) | INSN_R2(r2
));
426 static inline void tcg_out_arithi(TCGContext
*s
, int t
, int r1
,
427 tcg_target_long val
, int op
)
429 assert(check_fit_tl(val
, 11));
430 tcg_out32(s
, op
| INSN_R1(t
) | INSN_R2(r1
) | INSN_IM11(val
));
433 static inline void tcg_out_nop(TCGContext
*s
)
435 tcg_out_arith(s
, TCG_REG_R0
, TCG_REG_R0
, TCG_REG_R0
, INSN_OR
);
438 static inline void tcg_out_mtctl_sar(TCGContext
*s
, int arg
)
440 tcg_out32(s
, INSN_MTCTL
| INSN_R2(11) | INSN_R1(arg
));
443 /* Extract LEN bits at position OFS from ARG and place in RET.
444 Note that here the bit ordering is reversed from the PA-RISC
445 standard, such that the right-most bit is 0. */
446 static inline void tcg_out_extr(TCGContext
*s
, int ret
, int arg
,
447 unsigned ofs
, unsigned len
, int sign
)
449 assert(ofs
< 32 && len
<= 32 - ofs
);
450 tcg_out32(s
, (sign
? INSN_EXTRS
: INSN_EXTRU
)
451 | INSN_R1(ret
) | INSN_R2(arg
)
452 | INSN_SHDEP_P(31 - ofs
) | INSN_DEP_LEN(len
));
455 /* Likewise with OFS interpreted little-endian. */
456 static inline void tcg_out_dep(TCGContext
*s
, int ret
, int arg
,
457 unsigned ofs
, unsigned len
)
459 assert(ofs
< 32 && len
<= 32 - ofs
);
460 tcg_out32(s
, INSN_DEP
| INSN_R2(ret
) | INSN_R1(arg
)
461 | INSN_SHDEP_CP(31 - ofs
) | INSN_DEP_LEN(len
));
464 static inline void tcg_out_depi(TCGContext
*s
, int ret
, int arg
,
465 unsigned ofs
, unsigned len
)
467 assert(ofs
< 32 && len
<= 32 - ofs
);
468 tcg_out32(s
, INSN_DEPI
| INSN_R2(ret
) | INSN_IM5(arg
)
469 | INSN_SHDEP_CP(31 - ofs
) | INSN_DEP_LEN(len
));
472 static inline void tcg_out_shd(TCGContext
*s
, int ret
, int hi
, int lo
,
476 tcg_out32(s
, INSN_SHD
| INSN_R1(hi
) | INSN_R2(lo
) | INSN_T(ret
)
477 | INSN_SHDEP_CP(count
));
480 static void tcg_out_vshd(TCGContext
*s
, int ret
, int hi
, int lo
, int creg
)
482 tcg_out_mtctl_sar(s
, creg
);
483 tcg_out32(s
, INSN_VSHD
| INSN_T(ret
) | INSN_R1(hi
) | INSN_R2(lo
));
486 static void tcg_out_ori(TCGContext
*s
, int ret
, int arg
, tcg_target_ulong m
)
490 /* Note that the argument is constrained to match or_mask_p. */
491 for (bs0
= 0; bs0
< 32; bs0
++) {
492 if ((m
& (1u << bs0
)) != 0) {
496 for (bs1
= bs0
; bs1
< 32; bs1
++) {
497 if ((m
& (1u << bs1
)) == 0) {
501 assert(bs1
== 32 || (1ul << bs1
) > m
);
503 tcg_out_mov(s
, TCG_TYPE_I32
, ret
, arg
);
504 tcg_out_depi(s
, ret
, -1, bs0
, bs1
- bs0
);
507 static void tcg_out_andi(TCGContext
*s
, int ret
, int arg
, tcg_target_ulong m
)
511 /* Note that the argument is constrained to match and_mask_p. */
512 for (ls0
= 0; ls0
< 32; ls0
++) {
513 if ((m
& (1u << ls0
)) == 0) {
517 for (ls1
= ls0
; ls1
< 32; ls1
++) {
518 if ((m
& (1u << ls1
)) != 0) {
522 for (ms0
= ls1
; ms0
< 32; ms0
++) {
523 if ((m
& (1u << ms0
)) == 0) {
530 tcg_out_extr(s
, ret
, arg
, 0, ls0
, 0);
532 tcg_out_mov(s
, TCG_TYPE_I32
, ret
, arg
);
533 tcg_out_depi(s
, ret
, 0, ls0
, ls1
- ls0
);
537 static inline void tcg_out_ext8s(TCGContext
*s
, int ret
, int arg
)
539 tcg_out_extr(s
, ret
, arg
, 0, 8, 1);
542 static inline void tcg_out_ext16s(TCGContext
*s
, int ret
, int arg
)
544 tcg_out_extr(s
, ret
, arg
, 0, 16, 1);
547 static void tcg_out_shli(TCGContext
*s
, int ret
, int arg
, int count
)
550 tcg_out32(s
, INSN_ZDEP
| INSN_R2(ret
) | INSN_R1(arg
)
551 | INSN_SHDEP_CP(31 - count
) | INSN_DEP_LEN(32 - count
));
554 static void tcg_out_shl(TCGContext
*s
, int ret
, int arg
, int creg
)
556 tcg_out_arithi(s
, TCG_REG_R20
, creg
, 31, INSN_SUBI
);
557 tcg_out_mtctl_sar(s
, TCG_REG_R20
);
558 tcg_out32(s
, INSN_ZVDEP
| INSN_R2(ret
) | INSN_R1(arg
) | INSN_DEP_LEN(32));
561 static void tcg_out_shri(TCGContext
*s
, int ret
, int arg
, int count
)
564 tcg_out_extr(s
, ret
, arg
, count
, 32 - count
, 0);
567 static void tcg_out_shr(TCGContext
*s
, int ret
, int arg
, int creg
)
569 tcg_out_vshd(s
, ret
, TCG_REG_R0
, arg
, creg
);
572 static void tcg_out_sari(TCGContext
*s
, int ret
, int arg
, int count
)
575 tcg_out_extr(s
, ret
, arg
, count
, 32 - count
, 1);
578 static void tcg_out_sar(TCGContext
*s
, int ret
, int arg
, int creg
)
580 tcg_out_arithi(s
, TCG_REG_R20
, creg
, 31, INSN_SUBI
);
581 tcg_out_mtctl_sar(s
, TCG_REG_R20
);
582 tcg_out32(s
, INSN_VEXTRS
| INSN_R1(ret
) | INSN_R2(arg
) | INSN_DEP_LEN(32));
585 static void tcg_out_rotli(TCGContext
*s
, int ret
, int arg
, int count
)
588 tcg_out_shd(s
, ret
, arg
, arg
, 32 - count
);
591 static void tcg_out_rotl(TCGContext
*s
, int ret
, int arg
, int creg
)
593 tcg_out_arithi(s
, TCG_REG_R20
, creg
, 32, INSN_SUBI
);
594 tcg_out_vshd(s
, ret
, arg
, arg
, TCG_REG_R20
);
597 static void tcg_out_rotri(TCGContext
*s
, int ret
, int arg
, int count
)
600 tcg_out_shd(s
, ret
, arg
, arg
, count
);
603 static void tcg_out_rotr(TCGContext
*s
, int ret
, int arg
, int creg
)
605 tcg_out_vshd(s
, ret
, arg
, arg
, creg
);
608 static void tcg_out_bswap16(TCGContext
*s
, int ret
, int arg
, int sign
)
611 tcg_out_mov(s
, TCG_TYPE_I32
, ret
, arg
); /* arg = xxAB */
613 tcg_out_dep(s
, ret
, ret
, 16, 8); /* ret = xBAB */
614 tcg_out_extr(s
, ret
, ret
, 8, 16, sign
); /* ret = ..BA */
617 static void tcg_out_bswap32(TCGContext
*s
, int ret
, int arg
, int temp
)
620 tcg_out_rotri(s
, temp
, arg
, 16); /* temp = CDAB */
621 tcg_out_dep(s
, temp
, temp
, 16, 8); /* temp = CBAB */
622 tcg_out_shd(s
, ret
, arg
, temp
, 8); /* ret = DCBA */
625 static void tcg_out_call(TCGContext
*s
, const void *func
)
627 tcg_target_long val
, hi
, lo
, disp
;
629 val
= (uint32_t)__canonicalize_funcptr_for_compare(func
);
630 disp
= (val
- ((tcg_target_long
)s
->code_ptr
+ 8)) >> 2;
632 if (check_fit_tl(disp
, 17)) {
633 tcg_out32(s
, INSN_BL_N
| INSN_R2(TCG_REG_RP
) | reassemble_17(disp
));
638 tcg_out32(s
, INSN_LDIL
| INSN_R2(TCG_REG_R20
) | reassemble_21(hi
));
639 tcg_out32(s
, INSN_BLE_SR4
| INSN_R2(TCG_REG_R20
)
640 | reassemble_17(lo
>> 2));
641 tcg_out_mov(s
, TCG_TYPE_I32
, TCG_REG_RP
, TCG_REG_R31
);
645 static void tcg_out_xmpyu(TCGContext
*s
, int retl
, int reth
,
648 /* Store both words into the stack for copy to the FPU. */
649 tcg_out_ldst(s
, arg1
, TCG_REG_CALL_STACK
, STACK_TEMP_OFS
, INSN_STW
);
650 tcg_out_ldst(s
, arg2
, TCG_REG_CALL_STACK
, STACK_TEMP_OFS
+ 4, INSN_STW
);
652 /* Load both words into the FPU at the same time. We get away
653 with this because we can address the left and right half of the
654 FPU registers individually once loaded. */
655 /* fldds stack_temp(sp),fr22 */
656 tcg_out32(s
, INSN_FLDDS
| INSN_R2(TCG_REG_CALL_STACK
)
657 | INSN_IM5(STACK_TEMP_OFS
) | INSN_T(22));
659 /* xmpyu fr22r,fr22,fr22 */
660 tcg_out32(s
, 0x3ad64796);
662 /* Store the 64-bit result back into the stack. */
663 /* fstds stack_temp(sp),fr22 */
664 tcg_out32(s
, INSN_FSTDS
| INSN_R2(TCG_REG_CALL_STACK
)
665 | INSN_IM5(STACK_TEMP_OFS
) | INSN_T(22));
667 /* Load the pieces of the result that the caller requested. */
669 tcg_out_ldst(s
, reth
, TCG_REG_CALL_STACK
, STACK_TEMP_OFS
, INSN_LDW
);
672 tcg_out_ldst(s
, retl
, TCG_REG_CALL_STACK
, STACK_TEMP_OFS
+ 4,
677 static void tcg_out_add2(TCGContext
*s
, int destl
, int desth
,
678 int al
, int ah
, int bl
, int bh
, int blconst
)
680 int tmp
= (destl
== ah
|| destl
== bh
? TCG_REG_R20
: destl
);
683 tcg_out_arithi(s
, tmp
, al
, bl
, INSN_ADDI
);
685 tcg_out_arith(s
, tmp
, al
, bl
, INSN_ADD
);
687 tcg_out_arith(s
, desth
, ah
, bh
, INSN_ADDC
);
689 tcg_out_mov(s
, TCG_TYPE_I32
, destl
, tmp
);
692 static void tcg_out_sub2(TCGContext
*s
, int destl
, int desth
, int al
, int ah
,
693 int bl
, int bh
, int alconst
, int blconst
)
695 int tmp
= (destl
== ah
|| destl
== bh
? TCG_REG_R20
: destl
);
699 tcg_out_movi(s
, TCG_TYPE_I32
, TCG_REG_R20
, bl
);
702 tcg_out_arithi(s
, tmp
, bl
, al
, INSN_SUBI
);
703 } else if (blconst
) {
704 tcg_out_arithi(s
, tmp
, al
, -bl
, INSN_ADDI
);
706 tcg_out_arith(s
, tmp
, al
, bl
, INSN_SUB
);
708 tcg_out_arith(s
, desth
, ah
, bh
, INSN_SUBB
);
710 tcg_out_mov(s
, TCG_TYPE_I32
, destl
, tmp
);
713 static void tcg_out_branch(TCGContext
*s
, int label_index
, int nul
)
715 TCGLabel
*l
= &s
->labels
[label_index
];
716 uint32_t op
= nul
? INSN_BL_N
: INSN_BL
;
719 tcg_target_long val
= l
->u
.value
;
721 val
-= (tcg_target_long
)s
->code_ptr
+ 8;
723 assert(check_fit_tl(val
, 17));
725 tcg_out32(s
, op
| reassemble_17(val
));
727 /* We need to keep the offset unchanged for retranslation. */
728 uint32_t old_insn
= *(uint32_t *)s
->code_ptr
;
730 tcg_out_reloc(s
, s
->code_ptr
, R_PARISC_PCREL17F
, label_index
, 0);
731 tcg_out32(s
, op
| (old_insn
& 0x1f1ffdu
));
735 static const uint8_t tcg_cond_to_cmp_cond
[10] =
737 [TCG_COND_EQ
] = COND_EQ
,
738 [TCG_COND_NE
] = COND_EQ
| COND_FALSE
,
739 [TCG_COND_LT
] = COND_LT
,
740 [TCG_COND_GE
] = COND_LT
| COND_FALSE
,
741 [TCG_COND_LE
] = COND_LE
,
742 [TCG_COND_GT
] = COND_LE
| COND_FALSE
,
743 [TCG_COND_LTU
] = COND_LTU
,
744 [TCG_COND_GEU
] = COND_LTU
| COND_FALSE
,
745 [TCG_COND_LEU
] = COND_LEU
,
746 [TCG_COND_GTU
] = COND_LEU
| COND_FALSE
,
749 static void tcg_out_brcond(TCGContext
*s
, int cond
, TCGArg c1
,
750 TCGArg c2
, int c2const
, int label_index
)
752 TCGLabel
*l
= &s
->labels
[label_index
];
755 /* Note that COMIB operates as if the immediate is the first
756 operand. We model brcond with the immediate in the second
757 to better match what targets are likely to give us. For
758 consistency, model COMB with reversed operands as well. */
759 pacond
= tcg_cond_to_cmp_cond
[tcg_swap_cond(cond
)];
762 op
= (pacond
& COND_FALSE
? INSN_COMIBF
: INSN_COMIBT
);
765 op
= (pacond
& COND_FALSE
? INSN_COMBF
: INSN_COMBT
);
769 op
|= INSN_COND(pacond
& 7);
772 tcg_target_long val
= l
->u
.value
;
774 val
-= (tcg_target_long
)s
->code_ptr
+ 8;
776 assert(check_fit_tl(val
, 12));
778 /* ??? Assume that all branches to defined labels are backward.
779 Which means that if the nul bit is set, the delay slot is
780 executed if the branch is taken, and not executed in fallthru. */
781 tcg_out32(s
, op
| reassemble_12(val
));
784 /* We need to keep the offset unchanged for retranslation. */
785 uint32_t old_insn
= *(uint32_t *)s
->code_ptr
;
787 tcg_out_reloc(s
, s
->code_ptr
, R_PARISC_PCREL12F
, label_index
, 0);
788 /* ??? Assume that all branches to undefined labels are forward.
789 Which means that if the nul bit is set, the delay slot is
790 not executed if the branch is taken, which is what we want. */
791 tcg_out32(s
, op
| 2 | (old_insn
& 0x1ffdu
));
795 static void tcg_out_comclr(TCGContext
*s
, int cond
, TCGArg ret
,
796 TCGArg c1
, TCGArg c2
, int c2const
)
800 /* Note that COMICLR operates as if the immediate is the first
801 operand. We model setcond with the immediate in the second
802 to better match what targets are likely to give us. For
803 consistency, model COMCLR with reversed operands as well. */
804 pacond
= tcg_cond_to_cmp_cond
[tcg_swap_cond(cond
)];
807 op
= INSN_COMICLR
| INSN_R2(c1
) | INSN_R1(ret
) | INSN_IM11(c2
);
809 op
= INSN_COMCLR
| INSN_R2(c1
) | INSN_R1(c2
) | INSN_T(ret
);
811 op
|= INSN_COND(pacond
& 7);
812 op
|= pacond
& COND_FALSE
? 1 << 12 : 0;
817 static TCGCond
const tcg_high_cond
[] = {
818 [TCG_COND_EQ
] = TCG_COND_EQ
,
819 [TCG_COND_NE
] = TCG_COND_NE
,
820 [TCG_COND_LT
] = TCG_COND_LT
,
821 [TCG_COND_LE
] = TCG_COND_LT
,
822 [TCG_COND_GT
] = TCG_COND_GT
,
823 [TCG_COND_GE
] = TCG_COND_GT
,
824 [TCG_COND_LTU
] = TCG_COND_LTU
,
825 [TCG_COND_LEU
] = TCG_COND_LTU
,
826 [TCG_COND_GTU
] = TCG_COND_GTU
,
827 [TCG_COND_GEU
] = TCG_COND_GTU
830 static void tcg_out_brcond2(TCGContext
*s
, int cond
, TCGArg al
, TCGArg ah
,
831 TCGArg bl
, int blconst
, TCGArg bh
, int bhconst
,
836 tcg_out_comclr(s
, TCG_COND_NE
, TCG_REG_R0
, al
, bl
, blconst
);
837 tcg_out_brcond(s
, TCG_COND_EQ
, ah
, bh
, bhconst
, label_index
);
840 tcg_out_brcond(s
, TCG_COND_NE
, al
, bl
, bhconst
, label_index
);
841 tcg_out_brcond(s
, TCG_COND_NE
, ah
, bh
, bhconst
, label_index
);
844 tcg_out_brcond(s
, tcg_high_cond
[cond
], ah
, bh
, bhconst
, label_index
);
845 tcg_out_comclr(s
, TCG_COND_NE
, TCG_REG_R0
, ah
, bh
, bhconst
);
846 tcg_out_brcond(s
, tcg_unsigned_cond(cond
),
847 al
, bl
, blconst
, label_index
);
852 static void tcg_out_setcond(TCGContext
*s
, int cond
, TCGArg ret
,
853 TCGArg c1
, TCGArg c2
, int c2const
)
855 tcg_out_comclr(s
, tcg_invert_cond(cond
), ret
, c1
, c2
, c2const
);
856 tcg_out_movi(s
, TCG_TYPE_I32
, ret
, 1);
859 static void tcg_out_setcond2(TCGContext
*s
, int cond
, TCGArg ret
,
860 TCGArg al
, TCGArg ah
, TCGArg bl
, int blconst
,
861 TCGArg bh
, int bhconst
)
863 int scratch
= TCG_REG_R20
;
865 /* Note that the low parts are fully consumed before scratch is set. */
866 if (ret
!= ah
&& (bhconst
|| ret
!= bh
)) {
873 tcg_out_setcond(s
, cond
, scratch
, al
, bl
, blconst
);
874 tcg_out_comclr(s
, TCG_COND_EQ
, TCG_REG_R0
, ah
, bh
, bhconst
);
875 tcg_out_movi(s
, TCG_TYPE_I32
, scratch
, cond
== TCG_COND_NE
);
882 /* Optimize compares with low part zero. */
884 tcg_out_setcond(s
, cond
, ret
, ah
, bh
, bhconst
);
893 /* <= : ah < bh | (ah == bh && al <= bl) */
894 tcg_out_setcond(s
, tcg_unsigned_cond(cond
), scratch
, al
, bl
, blconst
);
895 tcg_out_comclr(s
, TCG_COND_EQ
, TCG_REG_R0
, ah
, bh
, bhconst
);
896 tcg_out_movi(s
, TCG_TYPE_I32
, scratch
, 0);
897 tcg_out_comclr(s
, tcg_invert_cond(tcg_high_cond
[cond
]),
898 TCG_REG_R0
, ah
, bh
, bhconst
);
899 tcg_out_movi(s
, TCG_TYPE_I32
, scratch
, 1);
906 tcg_out_mov(s
, TCG_TYPE_I32
, ret
, scratch
);
909 static void tcg_out_movcond(TCGContext
*s
, int cond
, TCGArg ret
,
910 TCGArg c1
, TCGArg c2
, int c2const
,
911 TCGArg v1
, int v1const
)
913 tcg_out_comclr(s
, tcg_invert_cond(cond
), TCG_REG_R0
, c1
, c2
, c2const
);
915 tcg_out_movi(s
, TCG_TYPE_I32
, ret
, v1
);
917 tcg_out_mov(s
, TCG_TYPE_I32
, ret
, v1
);
921 #if defined(CONFIG_SOFTMMU)
922 #include "../../softmmu_defs.h"
924 /* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
926 static const void * const qemu_ld_helpers
[4] = {
933 /* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
934 uintxx_t val, int mmu_idx) */
935 static const void * const qemu_st_helpers
[4] = {
942 /* Load and compare a TLB entry, and branch if TLB miss. OFFSET is set to
943 the offset of the first ADDR_READ or ADDR_WRITE member of the appropriate
944 TLB for the memory index. The return value is the offset from ENV
945 contained in R1 afterward (to be used when loading ADDEND); if the
946 return value is 0, R1 is not used. */
948 static int tcg_out_tlb_read(TCGContext
*s
, int r0
, int r1
, int addrlo
,
949 int addrhi
, int s_bits
, int lab_miss
, int offset
)
953 /* Extracting the index into the TLB. The "normal C operation" is
954 r1 = addr_reg >> TARGET_PAGE_BITS;
955 r1 &= CPU_TLB_SIZE - 1;
956 r1 <<= CPU_TLB_ENTRY_BITS;
957 What this does is extract CPU_TLB_BITS beginning at TARGET_PAGE_BITS
958 and place them at CPU_TLB_ENTRY_BITS. We can combine the first two
959 operations with an EXTRU. Unfortunately, the current value of
960 CPU_TLB_ENTRY_BITS is > 3, so we can't merge that shift with the
962 tcg_out_extr(s
, r1
, addrlo
, TARGET_PAGE_BITS
, CPU_TLB_BITS
, 0);
963 tcg_out_shli(s
, r1
, r1
, CPU_TLB_ENTRY_BITS
);
964 tcg_out_arith(s
, r1
, r1
, TCG_AREG0
, INSN_ADDL
);
966 /* Make sure that both the addr_{read,write} and addend can be
967 read with a 14-bit offset from the same base register. */
968 if (check_fit_tl(offset
+ CPU_TLB_SIZE
, 14)) {
971 ret
= (offset
+ 0x400) & ~0x7ff;
972 offset
= ret
- offset
;
973 tcg_out_addi2(s
, TCG_REG_R1
, r1
, ret
);
977 /* Load the entry from the computed slot. */
978 if (TARGET_LONG_BITS
== 64) {
979 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_REG_R23
, r1
, offset
);
980 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_REG_R20
, r1
, offset
+ 4);
982 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_REG_R20
, r1
, offset
);
985 /* Compute the value that ought to appear in the TLB for a hit, namely,
986 the page of the address. We include the low N bits of the address
987 to catch unaligned accesses and force them onto the slow path. Do
988 this computation after having issued the load from the TLB slot to
989 give the load time to complete. */
990 tcg_out_andi(s
, r0
, addrlo
, TARGET_PAGE_MASK
| ((1 << s_bits
) - 1));
992 /* If not equal, jump to lab_miss. */
993 if (TARGET_LONG_BITS
== 64) {
994 tcg_out_brcond2(s
, TCG_COND_NE
, TCG_REG_R20
, TCG_REG_R23
,
995 r0
, 0, addrhi
, 0, lab_miss
);
997 tcg_out_brcond(s
, TCG_COND_NE
, TCG_REG_R20
, r0
, 0, lab_miss
);
1003 static int tcg_out_arg_reg32(TCGContext
*s
, int argno
, TCGArg v
, bool vconst
)
1007 tcg_out_movi(s
, TCG_TYPE_I32
, tcg_target_call_iarg_regs
[argno
], v
);
1009 tcg_out_mov(s
, TCG_TYPE_I32
, tcg_target_call_iarg_regs
[argno
], v
);
1012 if (vconst
&& v
!= 0) {
1013 tcg_out_movi(s
, TCG_TYPE_I32
, TCG_REG_R20
, v
);
1016 tcg_out_st(s
, TCG_TYPE_I32
, v
, TCG_REG_CALL_STACK
,
1017 TCG_TARGET_CALL_STACK_OFFSET
- ((argno
- 3) * 4));
1022 static int tcg_out_arg_reg64(TCGContext
*s
, int argno
, TCGArg vl
, TCGArg vh
)
1024 /* 64-bit arguments must go in even reg pairs and stack slots. */
1028 argno
= tcg_out_arg_reg32(s
, argno
, vl
, false);
1029 argno
= tcg_out_arg_reg32(s
, argno
, vh
, false);
1034 static void tcg_out_qemu_ld_direct(TCGContext
*s
, int datalo_reg
, int datahi_reg
,
1035 int addr_reg
, int addend_reg
, int opc
)
1037 #ifdef TARGET_WORDS_BIGENDIAN
1038 const int bswap
= 0;
1040 const int bswap
= 1;
1045 tcg_out_ldst_index(s
, datalo_reg
, addr_reg
, addend_reg
, INSN_LDBX
);
1048 tcg_out_ldst_index(s
, datalo_reg
, addr_reg
, addend_reg
, INSN_LDBX
);
1049 tcg_out_ext8s(s
, datalo_reg
, datalo_reg
);
1052 tcg_out_ldst_index(s
, datalo_reg
, addr_reg
, addend_reg
, INSN_LDHX
);
1054 tcg_out_bswap16(s
, datalo_reg
, datalo_reg
, 0);
1058 tcg_out_ldst_index(s
, datalo_reg
, addr_reg
, addend_reg
, INSN_LDHX
);
1060 tcg_out_bswap16(s
, datalo_reg
, datalo_reg
, 1);
1062 tcg_out_ext16s(s
, datalo_reg
, datalo_reg
);
1066 tcg_out_ldst_index(s
, datalo_reg
, addr_reg
, addend_reg
, INSN_LDWX
);
1068 tcg_out_bswap32(s
, datalo_reg
, datalo_reg
, TCG_REG_R20
);
1074 datahi_reg
= datalo_reg
;
1077 /* We can't access the low-part with a reg+reg addressing mode,
1078 so perform the addition now and use reg_ofs addressing mode. */
1079 if (addend_reg
!= TCG_REG_R0
) {
1080 tcg_out_arith(s
, TCG_REG_R20
, addr_reg
, addend_reg
, INSN_ADD
);
1081 addr_reg
= TCG_REG_R20
;
1083 /* Make sure not to clobber the base register. */
1084 if (datahi_reg
== addr_reg
) {
1085 tcg_out_ldst(s
, datalo_reg
, addr_reg
, 4, INSN_LDW
);
1086 tcg_out_ldst(s
, datahi_reg
, addr_reg
, 0, INSN_LDW
);
1088 tcg_out_ldst(s
, datahi_reg
, addr_reg
, 0, INSN_LDW
);
1089 tcg_out_ldst(s
, datalo_reg
, addr_reg
, 4, INSN_LDW
);
1092 tcg_out_bswap32(s
, datalo_reg
, datalo_reg
, TCG_REG_R20
);
1093 tcg_out_bswap32(s
, datahi_reg
, datahi_reg
, TCG_REG_R20
);
1101 static void tcg_out_qemu_ld(TCGContext
*s
, const TCGArg
*args
, int opc
)
1103 int datalo_reg
= *args
++;
1104 /* Note that datahi_reg is only used for 64-bit loads. */
1105 int datahi_reg
= (opc
== 3 ? *args
++ : TCG_REG_R0
);
1106 int addrlo_reg
= *args
++;
1108 #if defined(CONFIG_SOFTMMU)
1109 /* Note that addrhi_reg is only used for 64-bit guests. */
1110 int addrhi_reg
= (TARGET_LONG_BITS
== 64 ? *args
++ : TCG_REG_R0
);
1111 int mem_index
= *args
;
1112 int lab1
, lab2
, argno
, offset
;
1114 lab1
= gen_new_label();
1115 lab2
= gen_new_label();
1117 offset
= offsetof(CPUArchState
, tlb_table
[mem_index
][0].addr_read
);
1118 offset
= tcg_out_tlb_read(s
, TCG_REG_R26
, TCG_REG_R25
, addrlo_reg
,
1119 addrhi_reg
, opc
& 3, lab1
, offset
);
1122 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_REG_R20
,
1123 (offset
? TCG_REG_R1
: TCG_REG_R25
),
1124 offsetof(CPUArchState
, tlb_table
[mem_index
][0].addend
) - offset
);
1125 tcg_out_qemu_ld_direct(s
, datalo_reg
, datahi_reg
, addrlo_reg
,
1127 tcg_out_branch(s
, lab2
, 1);
1131 tcg_out_label(s
, lab1
, s
->code_ptr
);
1134 argno
= tcg_out_arg_reg32(s
, argno
, TCG_AREG0
, false);
1135 if (TARGET_LONG_BITS
== 64) {
1136 argno
= tcg_out_arg_reg64(s
, argno
, addrlo_reg
, addrhi_reg
);
1138 argno
= tcg_out_arg_reg32(s
, argno
, addrlo_reg
, false);
1140 argno
= tcg_out_arg_reg32(s
, argno
, mem_index
, true);
1142 tcg_out_call(s
, qemu_ld_helpers
[opc
& 3]);
1146 tcg_out_andi(s
, datalo_reg
, TCG_REG_RET0
, 0xff);
1149 tcg_out_ext8s(s
, datalo_reg
, TCG_REG_RET0
);
1152 tcg_out_andi(s
, datalo_reg
, TCG_REG_RET0
, 0xffff);
1155 tcg_out_ext16s(s
, datalo_reg
, TCG_REG_RET0
);
1159 tcg_out_mov(s
, TCG_TYPE_I32
, datalo_reg
, TCG_REG_RET0
);
1162 tcg_out_mov(s
, TCG_TYPE_I32
, datahi_reg
, TCG_REG_RET0
);
1163 tcg_out_mov(s
, TCG_TYPE_I32
, datalo_reg
, TCG_REG_RET1
);
1170 tcg_out_label(s
, lab2
, s
->code_ptr
);
1172 tcg_out_qemu_ld_direct(s
, datalo_reg
, datahi_reg
, addrlo_reg
,
1173 (GUEST_BASE
? TCG_GUEST_BASE_REG
: TCG_REG_R0
), opc
);
1177 static void tcg_out_qemu_st_direct(TCGContext
*s
, int datalo_reg
,
1178 int datahi_reg
, int addr_reg
, int opc
)
1180 #ifdef TARGET_WORDS_BIGENDIAN
1181 const int bswap
= 0;
1183 const int bswap
= 1;
1188 tcg_out_ldst(s
, datalo_reg
, addr_reg
, 0, INSN_STB
);
1192 tcg_out_bswap16(s
, TCG_REG_R20
, datalo_reg
, 0);
1193 datalo_reg
= TCG_REG_R20
;
1195 tcg_out_ldst(s
, datalo_reg
, addr_reg
, 0, INSN_STH
);
1199 tcg_out_bswap32(s
, TCG_REG_R20
, datalo_reg
, TCG_REG_R20
);
1200 datalo_reg
= TCG_REG_R20
;
1202 tcg_out_ldst(s
, datalo_reg
, addr_reg
, 0, INSN_STW
);
1206 tcg_out_bswap32(s
, TCG_REG_R20
, datalo_reg
, TCG_REG_R20
);
1207 tcg_out_bswap32(s
, TCG_REG_R23
, datahi_reg
, TCG_REG_R23
);
1208 datahi_reg
= TCG_REG_R20
;
1209 datalo_reg
= TCG_REG_R23
;
1211 tcg_out_ldst(s
, datahi_reg
, addr_reg
, 0, INSN_STW
);
1212 tcg_out_ldst(s
, datalo_reg
, addr_reg
, 4, INSN_STW
);
1220 static void tcg_out_qemu_st(TCGContext
*s
, const TCGArg
*args
, int opc
)
1222 int datalo_reg
= *args
++;
1223 /* Note that datahi_reg is only used for 64-bit loads. */
1224 int datahi_reg
= (opc
== 3 ? *args
++ : TCG_REG_R0
);
1225 int addrlo_reg
= *args
++;
1227 #if defined(CONFIG_SOFTMMU)
1228 /* Note that addrhi_reg is only used for 64-bit guests. */
1229 int addrhi_reg
= (TARGET_LONG_BITS
== 64 ? *args
++ : TCG_REG_R0
);
1230 int mem_index
= *args
;
1231 int lab1
, lab2
, argno
, next
, offset
;
1233 lab1
= gen_new_label();
1234 lab2
= gen_new_label();
1236 offset
= offsetof(CPUArchState
, tlb_table
[mem_index
][0].addr_write
);
1237 offset
= tcg_out_tlb_read(s
, TCG_REG_R26
, TCG_REG_R25
, addrlo_reg
,
1238 addrhi_reg
, opc
, lab1
, offset
);
1241 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_REG_R20
,
1242 (offset
? TCG_REG_R1
: TCG_REG_R25
),
1243 offsetof(CPUArchState
, tlb_table
[mem_index
][0].addend
) - offset
);
1245 /* There are no indexed stores, so we must do this addition explitly.
1246 Careful to avoid R20, which is used for the bswaps to follow. */
1247 tcg_out_arith(s
, TCG_REG_R31
, addrlo_reg
, TCG_REG_R20
, INSN_ADDL
);
1248 tcg_out_qemu_st_direct(s
, datalo_reg
, datahi_reg
, TCG_REG_R31
, opc
);
1249 tcg_out_branch(s
, lab2
, 1);
1253 tcg_out_label(s
, lab1
, s
->code_ptr
);
1256 argno
= tcg_out_arg_reg32(s
, argno
, TCG_AREG0
, false);
1257 if (TARGET_LONG_BITS
== 64) {
1258 argno
= tcg_out_arg_reg64(s
, argno
, addrlo_reg
, addrhi_reg
);
1260 argno
= tcg_out_arg_reg32(s
, argno
, addrlo_reg
, false);
1263 next
= (argno
< 4 ? tcg_target_call_iarg_regs
[argno
] : TCG_REG_R20
);
1266 tcg_out_andi(s
, next
, datalo_reg
, 0xff);
1267 argno
= tcg_out_arg_reg32(s
, argno
, next
, false);
1270 tcg_out_andi(s
, next
, datalo_reg
, 0xffff);
1271 argno
= tcg_out_arg_reg32(s
, argno
, next
, false);
1274 argno
= tcg_out_arg_reg32(s
, argno
, datalo_reg
, false);
1277 argno
= tcg_out_arg_reg64(s
, argno
, datalo_reg
, datahi_reg
);
1282 argno
= tcg_out_arg_reg32(s
, argno
, mem_index
, true);
1284 tcg_out_call(s
, qemu_st_helpers
[opc
]);
1287 tcg_out_label(s
, lab2
, s
->code_ptr
);
1289 /* There are no indexed stores, so if GUEST_BASE is set we must do
1290 the add explicitly. Careful to avoid R20, which is used for the
1291 bswaps to follow. */
1292 if (GUEST_BASE
!= 0) {
1293 tcg_out_arith(s
, TCG_REG_R31
, addrlo_reg
,
1294 TCG_GUEST_BASE_REG
, INSN_ADDL
);
1295 addrlo_reg
= TCG_REG_R31
;
1297 tcg_out_qemu_st_direct(s
, datalo_reg
, datahi_reg
, addrlo_reg
, opc
);
1301 static void tcg_out_exit_tb(TCGContext
*s
, TCGArg arg
)
1303 if (!check_fit_tl(arg
, 14)) {
1308 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_RET0
, hi
);
1309 tcg_out32(s
, INSN_BV
| INSN_R2(TCG_REG_R18
));
1310 tcg_out_addi(s
, TCG_REG_RET0
, lo
);
1315 tcg_out32(s
, INSN_BV
| INSN_R2(TCG_REG_R18
));
1316 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_RET0
, arg
);
1319 static void tcg_out_goto_tb(TCGContext
*s
, TCGArg arg
)
1321 if (s
->tb_jmp_offset
) {
1322 /* direct jump method */
1323 fprintf(stderr
, "goto_tb direct\n");
1326 /* indirect jump method */
1327 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_REG_R20
, TCG_REG_R0
,
1328 (tcg_target_long
)(s
->tb_next
+ arg
));
1329 tcg_out32(s
, INSN_BV_N
| INSN_R2(TCG_REG_R20
));
1331 s
->tb_next_offset
[arg
] = s
->code_ptr
- s
->code_buf
;
1334 static inline void tcg_out_op(TCGContext
*s
, TCGOpcode opc
, const TCGArg
*args
,
1335 const int *const_args
)
1338 case INDEX_op_exit_tb
:
1339 tcg_out_exit_tb(s
, args
[0]);
1341 case INDEX_op_goto_tb
:
1342 tcg_out_goto_tb(s
, args
[0]);
1346 if (const_args
[0]) {
1347 tcg_out_call(s
, (void *)args
[0]);
1349 /* ??? FIXME: the value in the register in args[0] is almost
1350 certainly a procedure descriptor, not a code address. We
1351 probably need to use the millicode $$dyncall routine. */
1357 fprintf(stderr
, "unimplemented jmp\n");
1362 tcg_out_branch(s
, args
[0], 1);
1365 case INDEX_op_movi_i32
:
1366 tcg_out_movi(s
, TCG_TYPE_I32
, args
[0], (uint32_t)args
[1]);
1369 case INDEX_op_ld8u_i32
:
1370 tcg_out_ldst(s
, args
[0], args
[1], args
[2], INSN_LDB
);
1372 case INDEX_op_ld8s_i32
:
1373 tcg_out_ldst(s
, args
[0], args
[1], args
[2], INSN_LDB
);
1374 tcg_out_ext8s(s
, args
[0], args
[0]);
1376 case INDEX_op_ld16u_i32
:
1377 tcg_out_ldst(s
, args
[0], args
[1], args
[2], INSN_LDH
);
1379 case INDEX_op_ld16s_i32
:
1380 tcg_out_ldst(s
, args
[0], args
[1], args
[2], INSN_LDH
);
1381 tcg_out_ext16s(s
, args
[0], args
[0]);
1383 case INDEX_op_ld_i32
:
1384 tcg_out_ldst(s
, args
[0], args
[1], args
[2], INSN_LDW
);
1387 case INDEX_op_st8_i32
:
1388 tcg_out_ldst(s
, args
[0], args
[1], args
[2], INSN_STB
);
1390 case INDEX_op_st16_i32
:
1391 tcg_out_ldst(s
, args
[0], args
[1], args
[2], INSN_STH
);
1393 case INDEX_op_st_i32
:
1394 tcg_out_ldst(s
, args
[0], args
[1], args
[2], INSN_STW
);
1397 case INDEX_op_add_i32
:
1398 if (const_args
[2]) {
1399 tcg_out_addi2(s
, args
[0], args
[1], args
[2]);
1401 tcg_out_arith(s
, args
[0], args
[1], args
[2], INSN_ADDL
);
1405 case INDEX_op_sub_i32
:
1406 if (const_args
[1]) {
1407 if (const_args
[2]) {
1408 tcg_out_movi(s
, TCG_TYPE_I32
, args
[0], args
[1] - args
[2]);
1410 /* Recall that SUBI is a reversed subtract. */
1411 tcg_out_arithi(s
, args
[0], args
[2], args
[1], INSN_SUBI
);
1413 } else if (const_args
[2]) {
1414 tcg_out_addi2(s
, args
[0], args
[1], -args
[2]);
1416 tcg_out_arith(s
, args
[0], args
[1], args
[2], INSN_SUB
);
1420 case INDEX_op_and_i32
:
1421 if (const_args
[2]) {
1422 tcg_out_andi(s
, args
[0], args
[1], args
[2]);
1424 tcg_out_arith(s
, args
[0], args
[1], args
[2], INSN_AND
);
1428 case INDEX_op_or_i32
:
1429 if (const_args
[2]) {
1430 tcg_out_ori(s
, args
[0], args
[1], args
[2]);
1432 tcg_out_arith(s
, args
[0], args
[1], args
[2], INSN_OR
);
1436 case INDEX_op_xor_i32
:
1437 tcg_out_arith(s
, args
[0], args
[1], args
[2], INSN_XOR
);
1440 case INDEX_op_andc_i32
:
1441 if (const_args
[2]) {
1442 tcg_out_andi(s
, args
[0], args
[1], ~args
[2]);
1444 tcg_out_arith(s
, args
[0], args
[1], args
[2], INSN_ANDCM
);
1448 case INDEX_op_shl_i32
:
1449 if (const_args
[2]) {
1450 tcg_out_shli(s
, args
[0], args
[1], args
[2]);
1452 tcg_out_shl(s
, args
[0], args
[1], args
[2]);
1456 case INDEX_op_shr_i32
:
1457 if (const_args
[2]) {
1458 tcg_out_shri(s
, args
[0], args
[1], args
[2]);
1460 tcg_out_shr(s
, args
[0], args
[1], args
[2]);
1464 case INDEX_op_sar_i32
:
1465 if (const_args
[2]) {
1466 tcg_out_sari(s
, args
[0], args
[1], args
[2]);
1468 tcg_out_sar(s
, args
[0], args
[1], args
[2]);
1472 case INDEX_op_rotl_i32
:
1473 if (const_args
[2]) {
1474 tcg_out_rotli(s
, args
[0], args
[1], args
[2]);
1476 tcg_out_rotl(s
, args
[0], args
[1], args
[2]);
1480 case INDEX_op_rotr_i32
:
1481 if (const_args
[2]) {
1482 tcg_out_rotri(s
, args
[0], args
[1], args
[2]);
1484 tcg_out_rotr(s
, args
[0], args
[1], args
[2]);
1488 case INDEX_op_mul_i32
:
1489 tcg_out_xmpyu(s
, args
[0], TCG_REG_R0
, args
[1], args
[2]);
1491 case INDEX_op_mulu2_i32
:
1492 tcg_out_xmpyu(s
, args
[0], args
[1], args
[2], args
[3]);
1495 case INDEX_op_bswap16_i32
:
1496 tcg_out_bswap16(s
, args
[0], args
[1], 0);
1498 case INDEX_op_bswap32_i32
:
1499 tcg_out_bswap32(s
, args
[0], args
[1], TCG_REG_R20
);
1502 case INDEX_op_not_i32
:
1503 tcg_out_arithi(s
, args
[0], args
[1], -1, INSN_SUBI
);
1505 case INDEX_op_ext8s_i32
:
1506 tcg_out_ext8s(s
, args
[0], args
[1]);
1508 case INDEX_op_ext16s_i32
:
1509 tcg_out_ext16s(s
, args
[0], args
[1]);
1512 case INDEX_op_brcond_i32
:
1513 tcg_out_brcond(s
, args
[2], args
[0], args
[1], const_args
[1], args
[3]);
1515 case INDEX_op_brcond2_i32
:
1516 tcg_out_brcond2(s
, args
[4], args
[0], args
[1],
1517 args
[2], const_args
[2],
1518 args
[3], const_args
[3], args
[5]);
1521 case INDEX_op_setcond_i32
:
1522 tcg_out_setcond(s
, args
[3], args
[0], args
[1], args
[2], const_args
[2]);
1524 case INDEX_op_setcond2_i32
:
1525 tcg_out_setcond2(s
, args
[5], args
[0], args
[1], args
[2],
1526 args
[3], const_args
[3], args
[4], const_args
[4]);
1529 case INDEX_op_movcond_i32
:
1530 tcg_out_movcond(s
, args
[5], args
[0], args
[1], args
[2], const_args
[2],
1531 args
[3], const_args
[3]);
1534 case INDEX_op_add2_i32
:
1535 tcg_out_add2(s
, args
[0], args
[1], args
[2], args
[3],
1536 args
[4], args
[5], const_args
[4]);
1539 case INDEX_op_sub2_i32
:
1540 tcg_out_sub2(s
, args
[0], args
[1], args
[2], args
[3],
1541 args
[4], args
[5], const_args
[2], const_args
[4]);
1544 case INDEX_op_deposit_i32
:
1545 if (const_args
[2]) {
1546 tcg_out_depi(s
, args
[0], args
[2], args
[3], args
[4]);
1548 tcg_out_dep(s
, args
[0], args
[2], args
[3], args
[4]);
1552 case INDEX_op_qemu_ld8u
:
1553 tcg_out_qemu_ld(s
, args
, 0);
1555 case INDEX_op_qemu_ld8s
:
1556 tcg_out_qemu_ld(s
, args
, 0 | 4);
1558 case INDEX_op_qemu_ld16u
:
1559 tcg_out_qemu_ld(s
, args
, 1);
1561 case INDEX_op_qemu_ld16s
:
1562 tcg_out_qemu_ld(s
, args
, 1 | 4);
1564 case INDEX_op_qemu_ld32
:
1565 tcg_out_qemu_ld(s
, args
, 2);
1567 case INDEX_op_qemu_ld64
:
1568 tcg_out_qemu_ld(s
, args
, 3);
1571 case INDEX_op_qemu_st8
:
1572 tcg_out_qemu_st(s
, args
, 0);
1574 case INDEX_op_qemu_st16
:
1575 tcg_out_qemu_st(s
, args
, 1);
1577 case INDEX_op_qemu_st32
:
1578 tcg_out_qemu_st(s
, args
, 2);
1580 case INDEX_op_qemu_st64
:
1581 tcg_out_qemu_st(s
, args
, 3);
1585 fprintf(stderr
, "unknown opcode 0x%x\n", opc
);
1590 static const TCGTargetOpDef hppa_op_defs
[] = {
1591 { INDEX_op_exit_tb
, { } },
1592 { INDEX_op_goto_tb
, { } },
1594 { INDEX_op_call
, { "ri" } },
1595 { INDEX_op_jmp
, { "r" } },
1596 { INDEX_op_br
, { } },
1598 { INDEX_op_mov_i32
, { "r", "r" } },
1599 { INDEX_op_movi_i32
, { "r" } },
1601 { INDEX_op_ld8u_i32
, { "r", "r" } },
1602 { INDEX_op_ld8s_i32
, { "r", "r" } },
1603 { INDEX_op_ld16u_i32
, { "r", "r" } },
1604 { INDEX_op_ld16s_i32
, { "r", "r" } },
1605 { INDEX_op_ld_i32
, { "r", "r" } },
1606 { INDEX_op_st8_i32
, { "rZ", "r" } },
1607 { INDEX_op_st16_i32
, { "rZ", "r" } },
1608 { INDEX_op_st_i32
, { "rZ", "r" } },
1610 { INDEX_op_add_i32
, { "r", "rZ", "ri" } },
1611 { INDEX_op_sub_i32
, { "r", "rI", "ri" } },
1612 { INDEX_op_and_i32
, { "r", "rZ", "rM" } },
1613 { INDEX_op_or_i32
, { "r", "rZ", "rO" } },
1614 { INDEX_op_xor_i32
, { "r", "rZ", "rZ" } },
1615 /* Note that the second argument will be inverted, which means
1616 we want a constant whose inversion matches M, and that O = ~M.
1617 See the implementation of and_mask_p. */
1618 { INDEX_op_andc_i32
, { "r", "rZ", "rO" } },
1620 { INDEX_op_mul_i32
, { "r", "r", "r" } },
1621 { INDEX_op_mulu2_i32
, { "r", "r", "r", "r" } },
1623 { INDEX_op_shl_i32
, { "r", "r", "ri" } },
1624 { INDEX_op_shr_i32
, { "r", "r", "ri" } },
1625 { INDEX_op_sar_i32
, { "r", "r", "ri" } },
1626 { INDEX_op_rotl_i32
, { "r", "r", "ri" } },
1627 { INDEX_op_rotr_i32
, { "r", "r", "ri" } },
1629 { INDEX_op_bswap16_i32
, { "r", "r" } },
1630 { INDEX_op_bswap32_i32
, { "r", "r" } },
1631 { INDEX_op_not_i32
, { "r", "r" } },
1633 { INDEX_op_ext8s_i32
, { "r", "r" } },
1634 { INDEX_op_ext16s_i32
, { "r", "r" } },
1636 { INDEX_op_brcond_i32
, { "rZ", "rJ" } },
1637 { INDEX_op_brcond2_i32
, { "rZ", "rZ", "rJ", "rJ" } },
1639 { INDEX_op_setcond_i32
, { "r", "rZ", "rI" } },
1640 { INDEX_op_setcond2_i32
, { "r", "rZ", "rZ", "rI", "rI" } },
1642 /* ??? We can actually support a signed 14-bit arg3, but we
1643 only have existing constraints for a signed 11-bit. */
1644 { INDEX_op_movcond_i32
, { "r", "rZ", "rI", "rI", "0" } },
1646 { INDEX_op_add2_i32
, { "r", "r", "rZ", "rZ", "rI", "rZ" } },
1647 { INDEX_op_sub2_i32
, { "r", "r", "rI", "rZ", "rK", "rZ" } },
1649 { INDEX_op_deposit_i32
, { "r", "0", "rJ" } },
1651 #if TARGET_LONG_BITS == 32
1652 { INDEX_op_qemu_ld8u
, { "r", "L" } },
1653 { INDEX_op_qemu_ld8s
, { "r", "L" } },
1654 { INDEX_op_qemu_ld16u
, { "r", "L" } },
1655 { INDEX_op_qemu_ld16s
, { "r", "L" } },
1656 { INDEX_op_qemu_ld32
, { "r", "L" } },
1657 { INDEX_op_qemu_ld64
, { "r", "r", "L" } },
1659 { INDEX_op_qemu_st8
, { "LZ", "L" } },
1660 { INDEX_op_qemu_st16
, { "LZ", "L" } },
1661 { INDEX_op_qemu_st32
, { "LZ", "L" } },
1662 { INDEX_op_qemu_st64
, { "LZ", "LZ", "L" } },
1664 { INDEX_op_qemu_ld8u
, { "r", "L", "L" } },
1665 { INDEX_op_qemu_ld8s
, { "r", "L", "L" } },
1666 { INDEX_op_qemu_ld16u
, { "r", "L", "L" } },
1667 { INDEX_op_qemu_ld16s
, { "r", "L", "L" } },
1668 { INDEX_op_qemu_ld32
, { "r", "L", "L" } },
1669 { INDEX_op_qemu_ld64
, { "r", "r", "L", "L" } },
1671 { INDEX_op_qemu_st8
, { "LZ", "L", "L" } },
1672 { INDEX_op_qemu_st16
, { "LZ", "L", "L" } },
1673 { INDEX_op_qemu_st32
, { "LZ", "L", "L" } },
1674 { INDEX_op_qemu_st64
, { "LZ", "LZ", "L", "L" } },
1679 static int tcg_target_callee_save_regs
[] = {
1680 /* R2, the return address register, is saved specially
1681 in the caller's frame. */
1682 /* R3, the frame pointer, is not currently modified. */
1696 TCG_REG_R17
, /* R17 is the global env. */
1700 #define FRAME_SIZE ((-TCG_TARGET_CALL_STACK_OFFSET \
1701 + TCG_TARGET_STATIC_CALL_ARGS_SIZE \
1702 + ARRAY_SIZE(tcg_target_callee_save_regs) * 4 \
1703 + CPU_TEMP_BUF_NLONGS * sizeof(long) \
1704 + TCG_TARGET_STACK_ALIGN - 1) \
1705 & -TCG_TARGET_STACK_ALIGN)
1707 static void tcg_target_qemu_prologue(TCGContext
*s
)
1711 frame_size
= FRAME_SIZE
;
1713 /* The return address is stored in the caller's frame. */
1714 tcg_out_st(s
, TCG_TYPE_PTR
, TCG_REG_RP
, TCG_REG_CALL_STACK
, -20);
1716 /* Allocate stack frame, saving the first register at the same time. */
1717 tcg_out_ldst(s
, tcg_target_callee_save_regs
[0],
1718 TCG_REG_CALL_STACK
, frame_size
, INSN_STWM
);
1720 /* Save all callee saved registers. */
1721 for (i
= 1; i
< ARRAY_SIZE(tcg_target_callee_save_regs
); i
++) {
1722 tcg_out_st(s
, TCG_TYPE_PTR
, tcg_target_callee_save_regs
[i
],
1723 TCG_REG_CALL_STACK
, -frame_size
+ i
* 4);
1726 /* Record the location of the TCG temps. */
1727 tcg_set_frame(s
, TCG_REG_CALL_STACK
, -frame_size
+ i
* 4,
1728 CPU_TEMP_BUF_NLONGS
* sizeof(long));
1730 #ifdef CONFIG_USE_GUEST_BASE
1731 if (GUEST_BASE
!= 0) {
1732 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_GUEST_BASE_REG
, GUEST_BASE
);
1733 tcg_regset_set_reg(s
->reserved_regs
, TCG_GUEST_BASE_REG
);
1737 tcg_out_mov(s
, TCG_TYPE_PTR
, TCG_AREG0
, tcg_target_call_iarg_regs
[0]);
1739 /* Jump to TB, and adjust R18 to be the return address. */
1740 tcg_out32(s
, INSN_BLE_SR4
| INSN_R2(tcg_target_call_iarg_regs
[1]));
1741 tcg_out_mov(s
, TCG_TYPE_I32
, TCG_REG_R18
, TCG_REG_R31
);
1743 /* Restore callee saved registers. */
1744 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_REG_RP
, TCG_REG_CALL_STACK
,
1746 for (i
= 1; i
< ARRAY_SIZE(tcg_target_callee_save_regs
); i
++) {
1747 tcg_out_ld(s
, TCG_TYPE_PTR
, tcg_target_callee_save_regs
[i
],
1748 TCG_REG_CALL_STACK
, -frame_size
+ i
* 4);
1751 /* Deallocate stack frame and return. */
1752 tcg_out32(s
, INSN_BV
| INSN_R2(TCG_REG_RP
));
1753 tcg_out_ldst(s
, tcg_target_callee_save_regs
[0],
1754 TCG_REG_CALL_STACK
, -frame_size
, INSN_LDWM
);
1757 static void tcg_target_init(TCGContext
*s
)
1759 tcg_regset_set32(tcg_target_available_regs
[TCG_TYPE_I32
], 0, 0xffffffff);
1761 tcg_regset_clear(tcg_target_call_clobber_regs
);
1762 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R20
);
1763 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R21
);
1764 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R22
);
1765 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R23
);
1766 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R24
);
1767 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R25
);
1768 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R26
);
1769 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_RET0
);
1770 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_RET1
);
1772 tcg_regset_clear(s
->reserved_regs
);
1773 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_R0
); /* hardwired to zero */
1774 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_R1
); /* addil target */
1775 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_RP
); /* link register */
1776 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_R3
); /* frame pointer */
1777 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_R18
); /* return pointer */
1778 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_R19
); /* clobbered w/o pic */
1779 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_R20
); /* reserved */
1780 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_DP
); /* data pointer */
1781 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_CALL_STACK
); /* stack pointer */
1782 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_R31
); /* ble link reg */
1784 tcg_add_target_add_op_defs(hppa_op_defs
);
1788 uint32_t len
__attribute__((aligned((sizeof(void *)))));
1791 char augmentation
[1];
1794 uint8_t return_column
;
1798 uint32_t len
__attribute__((aligned((sizeof(void *)))));
1799 uint32_t cie_offset
;
1800 tcg_target_long func_start
__attribute__((packed
));
1801 tcg_target_long func_len
__attribute__((packed
));
1804 uint8_t reg_ofs
[ARRAY_SIZE(tcg_target_callee_save_regs
) * 2];
1812 #define ELF_HOST_MACHINE EM_PARISC
1813 #define ELF_HOST_FLAGS EFA_PARISC_1_1
1815 /* ??? BFD (and thus GDB) wants very much to distinguish between HPUX
1816 and other extensions. We don't really care, but if we don't set this
1817 to *something* then the object file won't be properly matched. */
1818 #define ELF_OSABI ELFOSABI_LINUX
1820 static DebugFrame debug_frame
= {
1821 .cie
.len
= sizeof(DebugFrameCIE
)-4, /* length after .len member */
1824 .cie
.code_align
= 1,
1825 .cie
.data_align
= 1,
1826 .cie
.return_column
= 2,
1828 .fde
.len
= sizeof(DebugFrameFDE
)-4, /* length after .len member */
1830 0x12, 30, /* DW_CFA_def_cfa_sf sp, ... */
1831 (-FRAME_SIZE
& 0x7f) | 0x80, /* ... sleb128 -FRAME_SIZE */
1832 (-FRAME_SIZE
>> 7) & 0x7f
1835 0x11, 2, (-20 / 4) & 0x7f /* DW_CFA_offset_extended_sf r2, 20 */
1838 /* This must match the ordering in tcg_target_callee_save_regs. */
1839 0x80 + 4, 0, /* DW_CFA_offset r4, 0 */
1840 0x80 + 5, 4, /* DW_CFA_offset r5, 4 */
1841 0x80 + 6, 8, /* DW_CFA_offset r6, 8 */
1842 0x80 + 7, 12, /* ... */
1857 void tcg_register_jit(void *buf
, size_t buf_size
)
1859 debug_frame
.fde
.func_start
= (tcg_target_long
) buf
;
1860 debug_frame
.fde
.func_len
= buf_size
;
1862 tcg_register_jit_int(buf
, buf_size
, &debug_frame
, sizeof(debug_frame
));