2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 /* We only support generating code for 64-bit mode. */
27 #error "unsupported code generation mode"
30 #include "../tcg-pool.c.inc"
32 #ifdef CONFIG_DEBUG_TCG
33 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
69 #define TCG_CT_CONST_S11 0x100
70 #define TCG_CT_CONST_S13 0x200
71 #define TCG_CT_CONST_ZERO 0x400
74 * For softmmu, we need to avoid conflicts with the first 3
75 * argument registers to perform the tlb lookup, and to call
76 * the helper function.
79 #define SOFTMMU_RESERVE_REGS MAKE_64BIT_MASK(TCG_REG_O0, 3)
81 #define SOFTMMU_RESERVE_REGS 0
83 #define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
84 #define ALL_QLDST_REGS (ALL_GENERAL_REGS & ~SOFTMMU_RESERVE_REGS)
86 /* Define some temporary registers. T2 is used for constant generation. */
87 #define TCG_REG_T1 TCG_REG_G1
88 #define TCG_REG_T2 TCG_REG_O7
90 #ifndef CONFIG_SOFTMMU
91 # define TCG_GUEST_BASE_REG TCG_REG_I5
94 #define TCG_REG_TB TCG_REG_I1
95 #define USE_REG_TB (sizeof(void *) > 4)
97 static const int tcg_target_reg_alloc_order[] = {
127 static const int tcg_target_call_iarg_regs[6] = {
136 static const int tcg_target_call_oarg_regs[] = {
143 #define INSN_OP(x) ((x) << 30)
144 #define INSN_OP2(x) ((x) << 22)
145 #define INSN_OP3(x) ((x) << 19)
146 #define INSN_OPF(x) ((x) << 5)
147 #define INSN_RD(x) ((x) << 25)
148 #define INSN_RS1(x) ((x) << 14)
149 #define INSN_RS2(x) (x)
150 #define INSN_ASI(x) ((x) << 5)
152 #define INSN_IMM10(x) ((1 << 13) | ((x) & 0x3ff))
153 #define INSN_IMM11(x) ((1 << 13) | ((x) & 0x7ff))
154 #define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff))
155 #define INSN_OFF16(x) ((((x) >> 2) & 0x3fff) | ((((x) >> 16) & 3) << 20))
156 #define INSN_OFF19(x) (((x) >> 2) & 0x07ffff)
157 #define INSN_COND(x) ((x) << 25)
175 #define BA (INSN_OP(0) | INSN_COND(COND_A) | INSN_OP2(0x2))
184 #define MOVCC_ICC (1 << 18)
185 #define MOVCC_XCC (1 << 18 | 1 << 12)
188 #define BPCC_XCC (2 << 20)
189 #define BPCC_PT (1 << 19)
191 #define BPCC_A (1 << 29)
193 #define BPR_PT BPCC_PT
195 #define ARITH_ADD (INSN_OP(2) | INSN_OP3(0x00))
196 #define ARITH_ADDCC (INSN_OP(2) | INSN_OP3(0x10))
197 #define ARITH_AND (INSN_OP(2) | INSN_OP3(0x01))
198 #define ARITH_ANDCC (INSN_OP(2) | INSN_OP3(0x11))
199 #define ARITH_ANDN (INSN_OP(2) | INSN_OP3(0x05))
200 #define ARITH_OR (INSN_OP(2) | INSN_OP3(0x02))
201 #define ARITH_ORCC (INSN_OP(2) | INSN_OP3(0x12))
202 #define ARITH_ORN (INSN_OP(2) | INSN_OP3(0x06))
203 #define ARITH_XOR (INSN_OP(2) | INSN_OP3(0x03))
204 #define ARITH_SUB (INSN_OP(2) | INSN_OP3(0x04))
205 #define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14))
206 #define ARITH_ADDC (INSN_OP(2) | INSN_OP3(0x08))
207 #define ARITH_SUBC (INSN_OP(2) | INSN_OP3(0x0c))
208 #define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a))
209 #define ARITH_SMUL (INSN_OP(2) | INSN_OP3(0x0b))
210 #define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e))
211 #define ARITH_SDIV (INSN_OP(2) | INSN_OP3(0x0f))
212 #define ARITH_MULX (INSN_OP(2) | INSN_OP3(0x09))
213 #define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d))
214 #define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d))
215 #define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c))
216 #define ARITH_MOVR (INSN_OP(2) | INSN_OP3(0x2f))
218 #define ARITH_ADDXC (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x11))
219 #define ARITH_UMULXHI (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x16))
221 #define SHIFT_SLL (INSN_OP(2) | INSN_OP3(0x25))
222 #define SHIFT_SRL (INSN_OP(2) | INSN_OP3(0x26))
223 #define SHIFT_SRA (INSN_OP(2) | INSN_OP3(0x27))
225 #define SHIFT_SLLX (INSN_OP(2) | INSN_OP3(0x25) | (1 << 12))
226 #define SHIFT_SRLX (INSN_OP(2) | INSN_OP3(0x26) | (1 << 12))
227 #define SHIFT_SRAX (INSN_OP(2) | INSN_OP3(0x27) | (1 << 12))
229 #define RDY (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(0))
230 #define WRY (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(0))
231 #define JMPL (INSN_OP(2) | INSN_OP3(0x38))
232 #define RETURN (INSN_OP(2) | INSN_OP3(0x39))
233 #define SAVE (INSN_OP(2) | INSN_OP3(0x3c))
234 #define RESTORE (INSN_OP(2) | INSN_OP3(0x3d))
235 #define SETHI (INSN_OP(0) | INSN_OP2(0x4))
236 #define CALL INSN_OP(1)
237 #define LDUB (INSN_OP(3) | INSN_OP3(0x01))
238 #define LDSB (INSN_OP(3) | INSN_OP3(0x09))
239 #define LDUH (INSN_OP(3) | INSN_OP3(0x02))
240 #define LDSH (INSN_OP(3) | INSN_OP3(0x0a))
241 #define LDUW (INSN_OP(3) | INSN_OP3(0x00))
242 #define LDSW (INSN_OP(3) | INSN_OP3(0x08))
243 #define LDX (INSN_OP(3) | INSN_OP3(0x0b))
244 #define STB (INSN_OP(3) | INSN_OP3(0x05))
245 #define STH (INSN_OP(3) | INSN_OP3(0x06))
246 #define STW (INSN_OP(3) | INSN_OP3(0x04))
247 #define STX (INSN_OP(3) | INSN_OP3(0x0e))
248 #define LDUBA (INSN_OP(3) | INSN_OP3(0x11))
249 #define LDSBA (INSN_OP(3) | INSN_OP3(0x19))
250 #define LDUHA (INSN_OP(3) | INSN_OP3(0x12))
251 #define LDSHA (INSN_OP(3) | INSN_OP3(0x1a))
252 #define LDUWA (INSN_OP(3) | INSN_OP3(0x10))
253 #define LDSWA (INSN_OP(3) | INSN_OP3(0x18))
254 #define LDXA (INSN_OP(3) | INSN_OP3(0x1b))
255 #define STBA (INSN_OP(3) | INSN_OP3(0x15))
256 #define STHA (INSN_OP(3) | INSN_OP3(0x16))
257 #define STWA (INSN_OP(3) | INSN_OP3(0x14))
258 #define STXA (INSN_OP(3) | INSN_OP3(0x1e))
260 #define MEMBAR (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(15) | (1 << 13))
262 #define NOP (SETHI | INSN_RD(TCG_REG_G0) | 0)
264 #ifndef ASI_PRIMARY_LITTLE
265 #define ASI_PRIMARY_LITTLE 0x88
268 #define LDUH_LE (LDUHA | INSN_ASI(ASI_PRIMARY_LITTLE))
269 #define LDSH_LE (LDSHA | INSN_ASI(ASI_PRIMARY_LITTLE))
270 #define LDUW_LE (LDUWA | INSN_ASI(ASI_PRIMARY_LITTLE))
271 #define LDSW_LE (LDSWA | INSN_ASI(ASI_PRIMARY_LITTLE))
272 #define LDX_LE (LDXA | INSN_ASI(ASI_PRIMARY_LITTLE))
274 #define STH_LE (STHA | INSN_ASI(ASI_PRIMARY_LITTLE))
275 #define STW_LE (STWA | INSN_ASI(ASI_PRIMARY_LITTLE))
276 #define STX_LE (STXA | INSN_ASI(ASI_PRIMARY_LITTLE))
278 #ifndef use_vis3_instructions
279 bool use_vis3_instructions;
282 static bool check_fit_i64(int64_t val, unsigned int bits)
284 return val == sextract64(val, 0, bits);
287 static bool check_fit_i32(int32_t val, unsigned int bits)
289 return val == sextract32(val, 0, bits);
292 #define check_fit_tl check_fit_i64
293 #define check_fit_ptr check_fit_i64
295 static bool patch_reloc(tcg_insn_unit *src_rw, int type,
296 intptr_t value, intptr_t addend)
298 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
299 uint32_t insn = *src_rw;
303 pcrel = tcg_ptr_byte_diff((tcg_insn_unit *)value, src_rx);
306 case R_SPARC_WDISP16:
307 if (!check_fit_ptr(pcrel >> 2, 16)) {
310 insn &= ~INSN_OFF16(-1);
311 insn |= INSN_OFF16(pcrel);
313 case R_SPARC_WDISP19:
314 if (!check_fit_ptr(pcrel >> 2, 19)) {
317 insn &= ~INSN_OFF19(-1);
318 insn |= INSN_OFF19(pcrel);
321 if (!check_fit_ptr(value, 13)) {
324 insn &= ~INSN_IMM13(-1);
325 insn |= INSN_IMM13(value);
328 g_assert_not_reached();
335 /* test if a constant matches the constraint */
336 static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
338 if (ct & TCG_CT_CONST) {
342 if (type == TCG_TYPE_I32) {
346 if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
348 } else if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11)) {
350 } else if ((ct & TCG_CT_CONST_S13) && check_fit_tl(val, 13)) {
357 static void tcg_out_nop(TCGContext *s)
362 static void tcg_out_arith(TCGContext *s, TCGReg rd, TCGReg rs1,
365 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_RS2(rs2));
368 static void tcg_out_arithi(TCGContext *s, TCGReg rd, TCGReg rs1,
369 int32_t offset, int op)
371 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_IMM13(offset));
374 static void tcg_out_arithc(TCGContext *s, TCGReg rd, TCGReg rs1,
375 int32_t val2, int val2const, int op)
377 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1)
378 | (val2const ? INSN_IMM13(val2) : INSN_RS2(val2)));
381 static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
384 tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
389 static void tcg_out_mov_delay(TCGContext *s, TCGReg ret, TCGReg arg)
392 tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
398 static void tcg_out_sethi(TCGContext *s, TCGReg ret, uint32_t arg)
400 tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10));
403 static void tcg_out_movi_imm13(TCGContext *s, TCGReg ret, int32_t arg)
405 tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR);
408 static void tcg_out_movi_imm32(TCGContext *s, TCGReg ret, int32_t arg)
410 if (check_fit_i32(arg, 13)) {
411 /* A 13-bit constant sign-extended to 64-bits. */
412 tcg_out_movi_imm13(s, ret, arg);
414 /* A 32-bit constant zero-extended to 64 bits. */
415 tcg_out_sethi(s, ret, arg);
417 tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR);
422 static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
423 tcg_target_long arg, bool in_prologue,
426 tcg_target_long hi, lo = (int32_t)arg;
427 tcg_target_long test, lsb;
429 /* A 32-bit constant, or 32-bit zero-extended to 64-bits. */
430 if (type == TCG_TYPE_I32 || arg == (uint32_t)arg) {
431 tcg_out_movi_imm32(s, ret, arg);
435 /* A 13-bit constant sign-extended to 64-bits. */
436 if (check_fit_tl(arg, 13)) {
437 tcg_out_movi_imm13(s, ret, arg);
441 /* A 13-bit constant relative to the TB. */
442 if (!in_prologue && USE_REG_TB) {
443 test = tcg_tbrel_diff(s, (void *)arg);
444 if (check_fit_ptr(test, 13)) {
445 tcg_out_arithi(s, ret, TCG_REG_TB, test, ARITH_ADD);
450 /* A 32-bit constant sign-extended to 64-bits. */
452 tcg_out_sethi(s, ret, ~arg);
453 tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR);
457 /* A 32-bit constant, shifted. */
459 test = (tcg_target_long)arg >> lsb;
460 if (lsb > 10 && test == extract64(test, 0, 21)) {
461 tcg_out_sethi(s, ret, test << 10);
462 tcg_out_arithi(s, ret, ret, lsb - 10, SHIFT_SLLX);
464 } else if (test == (uint32_t)test || test == (int32_t)test) {
465 tcg_out_movi_int(s, TCG_TYPE_I64, ret, test, in_prologue, scratch);
466 tcg_out_arithi(s, ret, ret, lsb, SHIFT_SLLX);
470 /* Use the constant pool, if possible. */
471 if (!in_prologue && USE_REG_TB) {
472 new_pool_label(s, arg, R_SPARC_13, s->code_ptr,
473 tcg_tbrel_diff(s, NULL));
474 tcg_out32(s, LDX | INSN_RD(ret) | INSN_RS1(TCG_REG_TB));
478 /* A 64-bit constant decomposed into 2 32-bit pieces. */
479 if (check_fit_i32(lo, 13)) {
480 hi = (arg - lo) >> 32;
481 tcg_out_movi_imm32(s, ret, hi);
482 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
483 tcg_out_arithi(s, ret, ret, lo, ARITH_ADD);
486 tcg_out_movi_imm32(s, ret, hi);
487 tcg_out_movi_imm32(s, scratch, lo);
488 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
489 tcg_out_arith(s, ret, ret, scratch, ARITH_OR);
493 static void tcg_out_movi(TCGContext *s, TCGType type,
494 TCGReg ret, tcg_target_long arg)
496 tcg_debug_assert(ret != TCG_REG_T2);
497 tcg_out_movi_int(s, type, ret, arg, false, TCG_REG_T2);
500 static void tcg_out_ldst_rr(TCGContext *s, TCGReg data, TCGReg a1,
503 tcg_out32(s, op | INSN_RD(data) | INSN_RS1(a1) | INSN_RS2(a2));
506 static void tcg_out_ldst(TCGContext *s, TCGReg ret, TCGReg addr,
507 intptr_t offset, int op)
509 if (check_fit_ptr(offset, 13)) {
510 tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(addr) |
513 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, offset);
514 tcg_out_ldst_rr(s, ret, addr, TCG_REG_T1, op);
518 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
519 TCGReg arg1, intptr_t arg2)
521 tcg_out_ldst(s, ret, arg1, arg2, (type == TCG_TYPE_I32 ? LDUW : LDX));
524 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
525 TCGReg arg1, intptr_t arg2)
527 tcg_out_ldst(s, arg, arg1, arg2, (type == TCG_TYPE_I32 ? STW : STX));
530 static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
531 TCGReg base, intptr_t ofs)
534 tcg_out_st(s, type, TCG_REG_G0, base, ofs);
540 static void tcg_out_ld_ptr(TCGContext *s, TCGReg ret, const void *arg)
542 intptr_t diff = tcg_tbrel_diff(s, arg);
543 if (USE_REG_TB && check_fit_ptr(diff, 13)) {
544 tcg_out_ld(s, TCG_TYPE_PTR, ret, TCG_REG_TB, diff);
547 tcg_out_movi(s, TCG_TYPE_PTR, ret, (uintptr_t)arg & ~0x3ff);
548 tcg_out_ld(s, TCG_TYPE_PTR, ret, ret, (uintptr_t)arg & 0x3ff);
551 static void tcg_out_sety(TCGContext *s, TCGReg rs)
553 tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs));
556 static void tcg_out_div32(TCGContext *s, TCGReg rd, TCGReg rs1,
557 int32_t val2, int val2const, int uns)
559 /* Load Y with the sign/zero extension of RS1 to 64-bits. */
561 tcg_out_sety(s, TCG_REG_G0);
563 tcg_out_arithi(s, TCG_REG_T1, rs1, 31, SHIFT_SRA);
564 tcg_out_sety(s, TCG_REG_T1);
567 tcg_out_arithc(s, rd, rs1, val2, val2const,
568 uns ? ARITH_UDIV : ARITH_SDIV);
571 static const uint8_t tcg_cond_to_bcond[] = {
572 [TCG_COND_EQ] = COND_E,
573 [TCG_COND_NE] = COND_NE,
574 [TCG_COND_LT] = COND_L,
575 [TCG_COND_GE] = COND_GE,
576 [TCG_COND_LE] = COND_LE,
577 [TCG_COND_GT] = COND_G,
578 [TCG_COND_LTU] = COND_CS,
579 [TCG_COND_GEU] = COND_CC,
580 [TCG_COND_LEU] = COND_LEU,
581 [TCG_COND_GTU] = COND_GU,
584 static const uint8_t tcg_cond_to_rcond[] = {
585 [TCG_COND_EQ] = RCOND_Z,
586 [TCG_COND_NE] = RCOND_NZ,
587 [TCG_COND_LT] = RCOND_LZ,
588 [TCG_COND_GT] = RCOND_GZ,
589 [TCG_COND_LE] = RCOND_LEZ,
590 [TCG_COND_GE] = RCOND_GEZ
593 static void tcg_out_bpcc0(TCGContext *s, int scond, int flags, int off19)
595 tcg_out32(s, INSN_OP(0) | INSN_OP2(1) | INSN_COND(scond) | flags | off19);
598 static void tcg_out_bpcc(TCGContext *s, int scond, int flags, TCGLabel *l)
603 off19 = INSN_OFF19(tcg_pcrel_diff(s, l->u.value_ptr));
605 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP19, l, 0);
607 tcg_out_bpcc0(s, scond, flags, off19);
610 static void tcg_out_cmp(TCGContext *s, TCGReg c1, int32_t c2, int c2const)
612 tcg_out_arithc(s, TCG_REG_G0, c1, c2, c2const, ARITH_SUBCC);
615 static void tcg_out_brcond_i32(TCGContext *s, TCGCond cond, TCGReg arg1,
616 int32_t arg2, int const_arg2, TCGLabel *l)
618 tcg_out_cmp(s, arg1, arg2, const_arg2);
619 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_ICC | BPCC_PT, l);
623 static void tcg_out_movcc(TCGContext *s, TCGCond cond, int cc, TCGReg ret,
624 int32_t v1, int v1const)
626 tcg_out32(s, ARITH_MOVCC | cc | INSN_RD(ret)
627 | INSN_RS1(tcg_cond_to_bcond[cond])
628 | (v1const ? INSN_IMM11(v1) : INSN_RS2(v1)));
631 static void tcg_out_movcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
632 TCGReg c1, int32_t c2, int c2const,
633 int32_t v1, int v1const)
635 tcg_out_cmp(s, c1, c2, c2const);
636 tcg_out_movcc(s, cond, MOVCC_ICC, ret, v1, v1const);
639 static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond, TCGReg arg1,
640 int32_t arg2, int const_arg2, TCGLabel *l)
642 /* For 64-bit signed comparisons vs zero, we can avoid the compare. */
643 if (arg2 == 0 && !is_unsigned_cond(cond)) {
647 off16 = INSN_OFF16(tcg_pcrel_diff(s, l->u.value_ptr));
649 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP16, l, 0);
651 tcg_out32(s, INSN_OP(0) | INSN_OP2(3) | BPR_PT | INSN_RS1(arg1)
652 | INSN_COND(tcg_cond_to_rcond[cond]) | off16);
654 tcg_out_cmp(s, arg1, arg2, const_arg2);
655 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_XCC | BPCC_PT, l);
660 static void tcg_out_movr(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg c1,
661 int32_t v1, int v1const)
663 tcg_out32(s, ARITH_MOVR | INSN_RD(ret) | INSN_RS1(c1)
664 | (tcg_cond_to_rcond[cond] << 10)
665 | (v1const ? INSN_IMM10(v1) : INSN_RS2(v1)));
668 static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
669 TCGReg c1, int32_t c2, int c2const,
670 int32_t v1, int v1const)
672 /* For 64-bit signed comparisons vs zero, we can avoid the compare.
673 Note that the immediate range is one bit smaller, so we must check
675 if (c2 == 0 && !is_unsigned_cond(cond)
676 && (!v1const || check_fit_i32(v1, 10))) {
677 tcg_out_movr(s, cond, ret, c1, v1, v1const);
679 tcg_out_cmp(s, c1, c2, c2const);
680 tcg_out_movcc(s, cond, MOVCC_XCC, ret, v1, v1const);
684 static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
685 TCGReg c1, int32_t c2, int c2const)
687 /* For 32-bit comparisons, we can play games with ADDC/SUBC. */
691 /* The result of the comparison is in the carry bit. */
696 /* For equality, we can transform to inequality vs zero. */
698 tcg_out_arithc(s, TCG_REG_T1, c1, c2, c2const, ARITH_XOR);
703 c1 = TCG_REG_G0, c2const = 0;
704 cond = (cond == TCG_COND_EQ ? TCG_COND_GEU : TCG_COND_LTU);
709 /* If we don't need to load a constant into a register, we can
710 swap the operands on GTU/LEU. There's no benefit to loading
711 the constant into a temporary register. */
712 if (!c2const || c2 == 0) {
717 cond = tcg_swap_cond(cond);
723 tcg_out_cmp(s, c1, c2, c2const);
724 tcg_out_movi_imm13(s, ret, 0);
725 tcg_out_movcc(s, cond, MOVCC_ICC, ret, 1, 1);
729 tcg_out_cmp(s, c1, c2, c2const);
730 if (cond == TCG_COND_LTU) {
731 tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDC);
733 tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBC);
737 static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
738 TCGReg c1, int32_t c2, int c2const)
740 if (use_vis3_instructions) {
746 c2 = c1, c2const = 0, c1 = TCG_REG_G0;
749 tcg_out_cmp(s, c1, c2, c2const);
750 tcg_out_arith(s, ret, TCG_REG_G0, TCG_REG_G0, ARITH_ADDXC);
757 /* For 64-bit signed comparisons vs zero, we can avoid the compare
758 if the input does not overlap the output. */
759 if (c2 == 0 && !is_unsigned_cond(cond) && c1 != ret) {
760 tcg_out_movi_imm13(s, ret, 0);
761 tcg_out_movr(s, cond, ret, c1, 1, 1);
763 tcg_out_cmp(s, c1, c2, c2const);
764 tcg_out_movi_imm13(s, ret, 0);
765 tcg_out_movcc(s, cond, MOVCC_XCC, ret, 1, 1);
769 static void tcg_out_addsub2_i32(TCGContext *s, TCGReg rl, TCGReg rh,
770 TCGReg al, TCGReg ah, int32_t bl, int blconst,
771 int32_t bh, int bhconst, int opl, int oph)
773 TCGReg tmp = TCG_REG_T1;
775 /* Note that the low parts are fully consumed before tmp is set. */
776 if (rl != ah && (bhconst || rl != bh)) {
780 tcg_out_arithc(s, tmp, al, bl, blconst, opl);
781 tcg_out_arithc(s, rh, ah, bh, bhconst, oph);
782 tcg_out_mov(s, TCG_TYPE_I32, rl, tmp);
785 static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh,
786 TCGReg al, TCGReg ah, int32_t bl, int blconst,
787 int32_t bh, int bhconst, bool is_sub)
789 TCGReg tmp = TCG_REG_T1;
791 /* Note that the low parts are fully consumed before tmp is set. */
792 if (rl != ah && (bhconst || rl != bh)) {
796 tcg_out_arithc(s, tmp, al, bl, blconst, is_sub ? ARITH_SUBCC : ARITH_ADDCC);
798 if (use_vis3_instructions && !is_sub) {
799 /* Note that ADDXC doesn't accept immediates. */
800 if (bhconst && bh != 0) {
801 tcg_out_movi_imm13(s, TCG_REG_T2, bh);
804 tcg_out_arith(s, rh, ah, bh, ARITH_ADDXC);
805 } else if (bh == TCG_REG_G0) {
806 /* If we have a zero, we can perform the operation in two insns,
807 with the arithmetic first, and a conditional move into place. */
809 tcg_out_arithi(s, TCG_REG_T2, ah, 1,
810 is_sub ? ARITH_SUB : ARITH_ADD);
811 tcg_out_movcc(s, TCG_COND_LTU, MOVCC_XCC, rh, TCG_REG_T2, 0);
813 tcg_out_arithi(s, rh, ah, 1, is_sub ? ARITH_SUB : ARITH_ADD);
814 tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, rh, ah, 0);
818 * Otherwise adjust BH as if there is carry into T2.
819 * Note that constant BH is constrained to 11 bits for the MOVCC,
820 * so the adjustment fits 12 bits.
823 tcg_out_movi_imm13(s, TCG_REG_T2, bh + (is_sub ? -1 : 1));
825 tcg_out_arithi(s, TCG_REG_T2, bh, 1,
826 is_sub ? ARITH_SUB : ARITH_ADD);
828 /* ... smoosh T2 back to original BH if carry is clear ... */
829 tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, TCG_REG_T2, bh, bhconst);
830 /* ... and finally perform the arithmetic with the new operand. */
831 tcg_out_arith(s, rh, ah, TCG_REG_T2, is_sub ? ARITH_SUB : ARITH_ADD);
834 tcg_out_mov(s, TCG_TYPE_I64, rl, tmp);
837 static void tcg_out_jmpl_const(TCGContext *s, const tcg_insn_unit *dest,
838 bool in_prologue, bool tail_call)
840 uintptr_t desti = (uintptr_t)dest;
842 /* Be careful not to clobber %o7 for a tail call. */
843 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_REG_T1,
844 desti & ~0xfff, in_prologue,
845 tail_call ? TCG_REG_G2 : TCG_REG_O7);
846 tcg_out_arithi(s, tail_call ? TCG_REG_G0 : TCG_REG_O7,
847 TCG_REG_T1, desti & 0xfff, JMPL);
850 static void tcg_out_call_nodelay(TCGContext *s, const tcg_insn_unit *dest,
853 ptrdiff_t disp = tcg_pcrel_diff(s, dest);
855 if (disp == (int32_t)disp) {
856 tcg_out32(s, CALL | (uint32_t)disp >> 2);
858 tcg_out_jmpl_const(s, dest, in_prologue, false);
862 static void tcg_out_call(TCGContext *s, const tcg_insn_unit *dest)
864 tcg_out_call_nodelay(s, dest, false);
868 static void tcg_out_mb(TCGContext *s, TCGArg a0)
870 /* Note that the TCG memory order constants mirror the Sparc MEMBAR. */
871 tcg_out32(s, MEMBAR | (a0 & TCG_MO_ALL));
874 #ifdef CONFIG_SOFTMMU
875 static const tcg_insn_unit *qemu_ld_trampoline[(MO_SSIZE | MO_BSWAP) + 1];
876 static const tcg_insn_unit *qemu_st_trampoline[(MO_SIZE | MO_BSWAP) + 1];
878 static void emit_extend(TCGContext *s, TCGReg r, int op)
880 /* Emit zero extend of 8, 16 or 32 bit data as
881 * required by the MO_* value op; do nothing for 64 bit.
883 switch (op & MO_SIZE) {
885 tcg_out_arithi(s, r, r, 0xff, ARITH_AND);
888 tcg_out_arithi(s, r, r, 16, SHIFT_SLL);
889 tcg_out_arithi(s, r, r, 16, SHIFT_SRL);
892 tcg_out_arith(s, r, r, 0, SHIFT_SRL);
899 static void build_trampolines(TCGContext *s)
901 static void * const qemu_ld_helpers[] = {
902 [MO_UB] = helper_ret_ldub_mmu,
903 [MO_SB] = helper_ret_ldsb_mmu,
904 [MO_LEUW] = helper_le_lduw_mmu,
905 [MO_LESW] = helper_le_ldsw_mmu,
906 [MO_LEUL] = helper_le_ldul_mmu,
907 [MO_LEUQ] = helper_le_ldq_mmu,
908 [MO_BEUW] = helper_be_lduw_mmu,
909 [MO_BESW] = helper_be_ldsw_mmu,
910 [MO_BEUL] = helper_be_ldul_mmu,
911 [MO_BEUQ] = helper_be_ldq_mmu,
913 static void * const qemu_st_helpers[] = {
914 [MO_UB] = helper_ret_stb_mmu,
915 [MO_LEUW] = helper_le_stw_mmu,
916 [MO_LEUL] = helper_le_stl_mmu,
917 [MO_LEUQ] = helper_le_stq_mmu,
918 [MO_BEUW] = helper_be_stw_mmu,
919 [MO_BEUL] = helper_be_stl_mmu,
920 [MO_BEUQ] = helper_be_stq_mmu,
925 for (i = 0; i < ARRAY_SIZE(qemu_ld_helpers); ++i) {
926 if (qemu_ld_helpers[i] == NULL) {
930 /* May as well align the trampoline. */
931 while ((uintptr_t)s->code_ptr & 15) {
934 qemu_ld_trampoline[i] = tcg_splitwx_to_rx(s->code_ptr);
936 /* Set the retaddr operand. */
937 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O3, TCG_REG_O7);
939 tcg_out_jmpl_const(s, qemu_ld_helpers[i], true, true);
940 /* delay slot -- set the env argument */
941 tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0);
944 for (i = 0; i < ARRAY_SIZE(qemu_st_helpers); ++i) {
945 if (qemu_st_helpers[i] == NULL) {
949 /* May as well align the trampoline. */
950 while ((uintptr_t)s->code_ptr & 15) {
953 qemu_st_trampoline[i] = tcg_splitwx_to_rx(s->code_ptr);
955 emit_extend(s, TCG_REG_O2, i);
957 /* Set the retaddr operand. */
958 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O4, TCG_REG_O7);
961 tcg_out_jmpl_const(s, qemu_st_helpers[i], true, true);
962 /* delay slot -- set the env argument */
963 tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0);
967 static const tcg_insn_unit *qemu_unalign_ld_trampoline;
968 static const tcg_insn_unit *qemu_unalign_st_trampoline;
970 static void build_trampolines(TCGContext *s)
972 for (int ld = 0; ld < 2; ++ld) {
975 while ((uintptr_t)s->code_ptr & 15) {
980 helper = helper_unaligned_ld;
981 qemu_unalign_ld_trampoline = tcg_splitwx_to_rx(s->code_ptr);
983 helper = helper_unaligned_st;
984 qemu_unalign_st_trampoline = tcg_splitwx_to_rx(s->code_ptr);
988 tcg_out_jmpl_const(s, helper, true, true);
989 /* delay slot -- set the env argument */
990 tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0);
995 /* Generate global QEMU prologue and epilogue code */
996 static void tcg_target_qemu_prologue(TCGContext *s)
998 int tmp_buf_size, frame_size;
1001 * The TCG temp buffer is at the top of the frame, immediately
1002 * below the frame pointer. Use the logical (aligned) offset here;
1003 * the stack bias is applied in temp_allocate_frame().
1005 tmp_buf_size = CPU_TEMP_BUF_NLONGS * (int)sizeof(long);
1006 tcg_set_frame(s, TCG_REG_I6, -tmp_buf_size, tmp_buf_size);
1009 * TCG_TARGET_CALL_STACK_OFFSET includes the stack bias, but is
1010 * otherwise the minimal frame usable by callees.
1012 frame_size = TCG_TARGET_CALL_STACK_OFFSET - TCG_TARGET_STACK_BIAS;
1013 frame_size += TCG_STATIC_CALL_ARGS_SIZE + tmp_buf_size;
1014 frame_size += TCG_TARGET_STACK_ALIGN - 1;
1015 frame_size &= -TCG_TARGET_STACK_ALIGN;
1016 tcg_out32(s, SAVE | INSN_RD(TCG_REG_O6) | INSN_RS1(TCG_REG_O6) |
1017 INSN_IMM13(-frame_size));
1019 #ifndef CONFIG_SOFTMMU
1020 if (guest_base != 0) {
1021 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG,
1022 guest_base, true, TCG_REG_T1);
1023 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
1027 /* We choose TCG_REG_TB such that no move is required. */
1029 QEMU_BUILD_BUG_ON(TCG_REG_TB != TCG_REG_I1);
1030 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB);
1033 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I1, 0, JMPL);
1037 /* Epilogue for goto_ptr. */
1038 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
1039 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1041 tcg_out_movi_imm13(s, TCG_REG_O0, 0);
1043 build_trampolines(s);
1046 static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
1049 for (i = 0; i < count; ++i) {
1054 #if defined(CONFIG_SOFTMMU)
1056 /* We expect to use a 13-bit negative offset from ENV. */
1057 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
1058 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 12));
1060 /* Perform the TLB load and compare.
1063 ADDRLO and ADDRHI contain the possible two parts of the address.
1065 MEM_INDEX and S_BITS are the memory context and log2 size of the load.
1067 WHICH is the offset into the CPUTLBEntry structure of the slot to read.
1068 This should be offsetof addr_read or addr_write.
1070 The result of the TLB comparison is in %[ix]cc. The sanitized address
1071 is in the returned register, maybe %o0. The TLB addend is in %o1. */
1073 static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, int mem_index,
1074 MemOp opc, int which)
1076 int fast_off = TLB_MASK_TABLE_OFS(mem_index);
1077 int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
1078 int table_off = fast_off + offsetof(CPUTLBDescFast, table);
1079 const TCGReg r0 = TCG_REG_O0;
1080 const TCGReg r1 = TCG_REG_O1;
1081 const TCGReg r2 = TCG_REG_O2;
1082 unsigned s_bits = opc & MO_SIZE;
1083 unsigned a_bits = get_alignment_bits(opc);
1084 tcg_target_long compare_mask;
1086 /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
1087 tcg_out_ld(s, TCG_TYPE_PTR, r0, TCG_AREG0, mask_off);
1088 tcg_out_ld(s, TCG_TYPE_PTR, r1, TCG_AREG0, table_off);
1090 /* Extract the page index, shifted into place for tlb index. */
1091 tcg_out_arithi(s, r2, addr, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS,
1093 tcg_out_arith(s, r2, r2, r0, ARITH_AND);
1095 /* Add the tlb_table pointer, creating the CPUTLBEntry address into R2. */
1096 tcg_out_arith(s, r2, r2, r1, ARITH_ADD);
1098 /* Load the tlb comparator and the addend. */
1099 tcg_out_ld(s, TCG_TYPE_TL, r0, r2, which);
1100 tcg_out_ld(s, TCG_TYPE_PTR, r1, r2, offsetof(CPUTLBEntry, addend));
1102 /* Mask out the page offset, except for the required alignment.
1103 We don't support unaligned accesses. */
1104 if (a_bits < s_bits) {
1107 compare_mask = (tcg_target_ulong)TARGET_PAGE_MASK | ((1 << a_bits) - 1);
1108 if (check_fit_tl(compare_mask, 13)) {
1109 tcg_out_arithi(s, r2, addr, compare_mask, ARITH_AND);
1111 tcg_out_movi(s, TCG_TYPE_TL, r2, compare_mask);
1112 tcg_out_arith(s, r2, addr, r2, ARITH_AND);
1114 tcg_out_cmp(s, r0, r2, 0);
1116 /* If the guest address must be zero-extended, do so now. */
1117 if (TARGET_LONG_BITS == 32) {
1118 tcg_out_arithi(s, r0, addr, 0, SHIFT_SRL);
1123 #endif /* CONFIG_SOFTMMU */
1125 static const int qemu_ld_opc[(MO_SSIZE | MO_BSWAP) + 1] = {
1128 [MO_UB | MO_LE] = LDUB,
1129 [MO_SB | MO_LE] = LDSB,
1138 [MO_LEUW] = LDUH_LE,
1139 [MO_LESW] = LDSH_LE,
1140 [MO_LEUL] = LDUW_LE,
1141 [MO_LESL] = LDSW_LE,
1146 static const int qemu_st_opc[(MO_SIZE | MO_BSWAP) + 1] = {
1158 static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
1159 MemOpIdx oi, bool is_64)
1161 MemOp memop = get_memop(oi);
1162 tcg_insn_unit *label_ptr;
1164 #ifdef CONFIG_SOFTMMU
1165 unsigned memi = get_mmuidx(oi);
1167 const tcg_insn_unit *func;
1169 addrz = tcg_out_tlb_load(s, addr, memi, memop,
1170 offsetof(CPUTLBEntry, addr_read));
1172 /* The fast path is exactly one insn. Thus we can perform the
1173 entire TLB Hit in the (annulled) delay slot of the branch
1174 over the TLB Miss case. */
1176 /* beq,a,pt %[xi]cc, label0 */
1177 label_ptr = s->code_ptr;
1178 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
1179 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
1181 tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1,
1182 qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
1186 tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_O1, addrz);
1188 /* We use the helpers to extend SB and SW data, leaving the case
1189 of SL needing explicit extending below. */
1190 if ((memop & MO_SSIZE) == MO_SL) {
1191 func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SIZE)];
1193 func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SSIZE)];
1195 tcg_debug_assert(func != NULL);
1196 tcg_out_call_nodelay(s, func, false);
1198 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_O2, oi);
1200 /* We let the helper sign-extend SB and SW, but leave SL for here. */
1201 if (is_64 && (memop & MO_SSIZE) == MO_SL) {
1202 tcg_out_arithi(s, data, TCG_REG_O0, 0, SHIFT_SRA);
1204 tcg_out_mov(s, TCG_TYPE_REG, data, TCG_REG_O0);
1207 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
1209 TCGReg index = (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0);
1210 unsigned a_bits = get_alignment_bits(memop);
1211 unsigned s_bits = memop & MO_SIZE;
1214 if (TARGET_LONG_BITS == 32) {
1215 tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL);
1220 * Normal case: alignment equal to access size.
1222 if (a_bits == s_bits) {
1223 tcg_out_ldst_rr(s, data, addr, index,
1224 qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
1229 * Test for at least natural alignment, and assume most accesses
1230 * will be aligned -- perform a straight load in the delay slot.
1231 * This is required to preserve atomicity for aligned accesses.
1233 t_bits = MAX(a_bits, s_bits);
1234 tcg_debug_assert(t_bits < 13);
1235 tcg_out_arithi(s, TCG_REG_G0, addr, (1u << t_bits) - 1, ARITH_ANDCC);
1237 /* beq,a,pt %icc, label */
1238 label_ptr = s->code_ptr;
1239 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT | BPCC_ICC, 0);
1241 tcg_out_ldst_rr(s, data, addr, index,
1242 qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
1244 if (a_bits >= s_bits) {
1246 * Overalignment: A successful alignment test will perform the memory
1247 * operation in the delay slot, and failure need only invoke the
1248 * handler for SIGBUS.
1250 tcg_out_call_nodelay(s, qemu_unalign_ld_trampoline, false);
1251 /* delay slot -- move to low part of argument reg */
1252 tcg_out_mov_delay(s, TCG_REG_O1, addr);
1254 /* Underalignment: load by pieces of minimum alignment. */
1255 int ld_opc, a_size, s_size, i;
1258 * Force full address into T1 early; avoids problems with
1259 * overlap between @addr and @data.
1261 tcg_out_arith(s, TCG_REG_T1, addr, index, ARITH_ADD);
1263 a_size = 1 << a_bits;
1264 s_size = 1 << s_bits;
1265 if ((memop & MO_BSWAP) == MO_BE) {
1266 ld_opc = qemu_ld_opc[a_bits | MO_BE | (memop & MO_SIGN)];
1267 tcg_out_ldst(s, data, TCG_REG_T1, 0, ld_opc);
1268 ld_opc = qemu_ld_opc[a_bits | MO_BE];
1269 for (i = a_size; i < s_size; i += a_size) {
1270 tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, ld_opc);
1271 tcg_out_arithi(s, data, data, a_size, SHIFT_SLLX);
1272 tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR);
1274 } else if (a_bits == 0) {
1276 tcg_out_ldst(s, data, TCG_REG_T1, 0, ld_opc);
1277 for (i = a_size; i < s_size; i += a_size) {
1278 if ((memop & MO_SIGN) && i == s_size - a_size) {
1281 tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, ld_opc);
1282 tcg_out_arithi(s, TCG_REG_T2, TCG_REG_T2, i * 8, SHIFT_SLLX);
1283 tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR);
1286 ld_opc = qemu_ld_opc[a_bits | MO_LE];
1287 tcg_out_ldst_rr(s, data, TCG_REG_T1, TCG_REG_G0, ld_opc);
1288 for (i = a_size; i < s_size; i += a_size) {
1289 tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, a_size, ARITH_ADD);
1290 if ((memop & MO_SIGN) && i == s_size - a_size) {
1291 ld_opc = qemu_ld_opc[a_bits | MO_LE | MO_SIGN];
1293 tcg_out_ldst_rr(s, TCG_REG_T2, TCG_REG_T1, TCG_REG_G0, ld_opc);
1294 tcg_out_arithi(s, TCG_REG_T2, TCG_REG_T2, i * 8, SHIFT_SLLX);
1295 tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR);
1300 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
1301 #endif /* CONFIG_SOFTMMU */
1304 static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
1307 MemOp memop = get_memop(oi);
1308 tcg_insn_unit *label_ptr;
1310 #ifdef CONFIG_SOFTMMU
1311 unsigned memi = get_mmuidx(oi);
1313 const tcg_insn_unit *func;
1315 addrz = tcg_out_tlb_load(s, addr, memi, memop,
1316 offsetof(CPUTLBEntry, addr_write));
1318 /* The fast path is exactly one insn. Thus we can perform the entire
1319 TLB Hit in the (annulled) delay slot of the branch over TLB Miss. */
1320 /* beq,a,pt %[xi]cc, label0 */
1321 label_ptr = s->code_ptr;
1322 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
1323 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
1325 tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1,
1326 qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
1330 tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_O1, addrz);
1331 tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_O2, data);
1333 func = qemu_st_trampoline[memop & (MO_BSWAP | MO_SIZE)];
1334 tcg_debug_assert(func != NULL);
1335 tcg_out_call_nodelay(s, func, false);
1337 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_O3, oi);
1339 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
1341 TCGReg index = (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0);
1342 unsigned a_bits = get_alignment_bits(memop);
1343 unsigned s_bits = memop & MO_SIZE;
1346 if (TARGET_LONG_BITS == 32) {
1347 tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL);
1352 * Normal case: alignment equal to access size.
1354 if (a_bits == s_bits) {
1355 tcg_out_ldst_rr(s, data, addr, index,
1356 qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
1361 * Test for at least natural alignment, and assume most accesses
1362 * will be aligned -- perform a straight store in the delay slot.
1363 * This is required to preserve atomicity for aligned accesses.
1365 t_bits = MAX(a_bits, s_bits);
1366 tcg_debug_assert(t_bits < 13);
1367 tcg_out_arithi(s, TCG_REG_G0, addr, (1u << t_bits) - 1, ARITH_ANDCC);
1369 /* beq,a,pt %icc, label */
1370 label_ptr = s->code_ptr;
1371 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT | BPCC_ICC, 0);
1373 tcg_out_ldst_rr(s, data, addr, index,
1374 qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
1376 if (a_bits >= s_bits) {
1378 * Overalignment: A successful alignment test will perform the memory
1379 * operation in the delay slot, and failure need only invoke the
1380 * handler for SIGBUS.
1382 tcg_out_call_nodelay(s, qemu_unalign_st_trampoline, false);
1383 /* delay slot -- move to low part of argument reg */
1384 tcg_out_mov_delay(s, TCG_REG_O1, addr);
1386 /* Underalignment: store by pieces of minimum alignment. */
1387 int st_opc, a_size, s_size, i;
1390 * Force full address into T1 early; avoids problems with
1391 * overlap between @addr and @data.
1393 tcg_out_arith(s, TCG_REG_T1, addr, index, ARITH_ADD);
1395 a_size = 1 << a_bits;
1396 s_size = 1 << s_bits;
1397 if ((memop & MO_BSWAP) == MO_BE) {
1398 st_opc = qemu_st_opc[a_bits | MO_BE];
1399 for (i = 0; i < s_size; i += a_size) {
1401 int shift = (s_size - a_size - i) * 8;
1404 tcg_out_arithi(s, d, data, shift, SHIFT_SRLX);
1406 tcg_out_ldst(s, d, TCG_REG_T1, i, st_opc);
1408 } else if (a_bits == 0) {
1409 tcg_out_ldst(s, data, TCG_REG_T1, 0, STB);
1410 for (i = 1; i < s_size; i++) {
1411 tcg_out_arithi(s, TCG_REG_T2, data, i * 8, SHIFT_SRLX);
1412 tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, STB);
1415 /* Note that ST*A with immediate asi must use indexed address. */
1416 st_opc = qemu_st_opc[a_bits + MO_LE];
1417 tcg_out_ldst_rr(s, data, TCG_REG_T1, TCG_REG_G0, st_opc);
1418 for (i = a_size; i < s_size; i += a_size) {
1419 tcg_out_arithi(s, TCG_REG_T2, data, i * 8, SHIFT_SRLX);
1420 tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, a_size, ARITH_ADD);
1421 tcg_out_ldst_rr(s, TCG_REG_T2, TCG_REG_T1, TCG_REG_G0, st_opc);
1426 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
1427 #endif /* CONFIG_SOFTMMU */
1430 static void tcg_out_op(TCGContext *s, TCGOpcode opc,
1431 const TCGArg args[TCG_MAX_OP_ARGS],
1432 const int const_args[TCG_MAX_OP_ARGS])
1437 /* Hoist the loads of the most common arguments. */
1444 case INDEX_op_exit_tb:
1445 if (check_fit_ptr(a0, 13)) {
1446 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1447 tcg_out_movi_imm13(s, TCG_REG_O0, a0);
1449 } else if (USE_REG_TB) {
1450 intptr_t tb_diff = tcg_tbrel_diff(s, (void *)a0);
1451 if (check_fit_ptr(tb_diff, 13)) {
1452 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1453 /* Note that TCG_REG_TB has been unwound to O1. */
1454 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O1, tb_diff, ARITH_ADD);
1458 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, a0 & ~0x3ff);
1459 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1460 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, a0 & 0x3ff, ARITH_OR);
1462 case INDEX_op_goto_tb:
1463 if (s->tb_jmp_insn_offset) {
1464 /* direct jump method */
1466 /* make sure the patch is 8-byte aligned. */
1467 if ((intptr_t)s->code_ptr & 4) {
1470 s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
1471 tcg_out_sethi(s, TCG_REG_T1, 0);
1472 tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, 0, ARITH_OR);
1473 tcg_out_arith(s, TCG_REG_G0, TCG_REG_TB, TCG_REG_T1, JMPL);
1474 tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
1476 s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
1481 /* indirect jump method */
1482 tcg_out_ld_ptr(s, TCG_REG_TB, s->tb_jmp_target_addr + a0);
1483 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_TB, 0, JMPL);
1486 set_jmp_reset_offset(s, a0);
1488 /* For the unlinked path of goto_tb, we need to reset
1489 TCG_REG_TB to the beginning of this TB. */
1491 c = -tcg_current_code_size(s);
1492 if (check_fit_i32(c, 13)) {
1493 tcg_out_arithi(s, TCG_REG_TB, TCG_REG_TB, c, ARITH_ADD);
1495 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, c);
1496 tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB,
1497 TCG_REG_T1, ARITH_ADD);
1501 case INDEX_op_goto_ptr:
1502 tcg_out_arithi(s, TCG_REG_G0, a0, 0, JMPL);
1504 tcg_out_mov_delay(s, TCG_REG_TB, a0);
1510 tcg_out_bpcc(s, COND_A, BPCC_PT, arg_label(a0));
1514 #define OP_32_64(x) \
1515 glue(glue(case INDEX_op_, x), _i32): \
1516 glue(glue(case INDEX_op_, x), _i64)
1519 tcg_out_ldst(s, a0, a1, a2, LDUB);
1522 tcg_out_ldst(s, a0, a1, a2, LDSB);
1525 tcg_out_ldst(s, a0, a1, a2, LDUH);
1528 tcg_out_ldst(s, a0, a1, a2, LDSH);
1530 case INDEX_op_ld_i32:
1531 case INDEX_op_ld32u_i64:
1532 tcg_out_ldst(s, a0, a1, a2, LDUW);
1535 tcg_out_ldst(s, a0, a1, a2, STB);
1538 tcg_out_ldst(s, a0, a1, a2, STH);
1540 case INDEX_op_st_i32:
1541 case INDEX_op_st32_i64:
1542 tcg_out_ldst(s, a0, a1, a2, STW);
1565 case INDEX_op_shl_i32:
1568 /* Limit immediate shift count lest we create an illegal insn. */
1569 tcg_out_arithc(s, a0, a1, a2 & 31, c2, c);
1571 case INDEX_op_shr_i32:
1574 case INDEX_op_sar_i32:
1577 case INDEX_op_mul_i32:
1588 case INDEX_op_div_i32:
1589 tcg_out_div32(s, a0, a1, a2, c2, 0);
1591 case INDEX_op_divu_i32:
1592 tcg_out_div32(s, a0, a1, a2, c2, 1);
1595 case INDEX_op_brcond_i32:
1596 tcg_out_brcond_i32(s, a2, a0, a1, const_args[1], arg_label(args[3]));
1598 case INDEX_op_setcond_i32:
1599 tcg_out_setcond_i32(s, args[3], a0, a1, a2, c2);
1601 case INDEX_op_movcond_i32:
1602 tcg_out_movcond_i32(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
1605 case INDEX_op_add2_i32:
1606 tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
1607 args[4], const_args[4], args[5], const_args[5],
1608 ARITH_ADDCC, ARITH_ADDC);
1610 case INDEX_op_sub2_i32:
1611 tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
1612 args[4], const_args[4], args[5], const_args[5],
1613 ARITH_SUBCC, ARITH_SUBC);
1615 case INDEX_op_mulu2_i32:
1618 case INDEX_op_muls2_i32:
1621 /* The 32-bit multiply insns produce a full 64-bit result. */
1622 tcg_out_arithc(s, a0, a2, args[3], const_args[3], c);
1623 tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX);
1626 case INDEX_op_qemu_ld_i32:
1627 tcg_out_qemu_ld(s, a0, a1, a2, false);
1629 case INDEX_op_qemu_ld_i64:
1630 tcg_out_qemu_ld(s, a0, a1, a2, true);
1632 case INDEX_op_qemu_st_i32:
1633 case INDEX_op_qemu_st_i64:
1634 tcg_out_qemu_st(s, a0, a1, a2);
1637 case INDEX_op_ld32s_i64:
1638 tcg_out_ldst(s, a0, a1, a2, LDSW);
1640 case INDEX_op_ld_i64:
1641 tcg_out_ldst(s, a0, a1, a2, LDX);
1643 case INDEX_op_st_i64:
1644 tcg_out_ldst(s, a0, a1, a2, STX);
1646 case INDEX_op_shl_i64:
1649 /* Limit immediate shift count lest we create an illegal insn. */
1650 tcg_out_arithc(s, a0, a1, a2 & 63, c2, c);
1652 case INDEX_op_shr_i64:
1655 case INDEX_op_sar_i64:
1658 case INDEX_op_mul_i64:
1661 case INDEX_op_div_i64:
1664 case INDEX_op_divu_i64:
1667 case INDEX_op_ext_i32_i64:
1668 case INDEX_op_ext32s_i64:
1669 tcg_out_arithi(s, a0, a1, 0, SHIFT_SRA);
1671 case INDEX_op_extu_i32_i64:
1672 case INDEX_op_ext32u_i64:
1673 tcg_out_arithi(s, a0, a1, 0, SHIFT_SRL);
1675 case INDEX_op_extrl_i64_i32:
1676 tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
1678 case INDEX_op_extrh_i64_i32:
1679 tcg_out_arithi(s, a0, a1, 32, SHIFT_SRLX);
1682 case INDEX_op_brcond_i64:
1683 tcg_out_brcond_i64(s, a2, a0, a1, const_args[1], arg_label(args[3]));
1685 case INDEX_op_setcond_i64:
1686 tcg_out_setcond_i64(s, args[3], a0, a1, a2, c2);
1688 case INDEX_op_movcond_i64:
1689 tcg_out_movcond_i64(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
1691 case INDEX_op_add2_i64:
1692 tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
1693 const_args[4], args[5], const_args[5], false);
1695 case INDEX_op_sub2_i64:
1696 tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
1697 const_args[4], args[5], const_args[5], true);
1699 case INDEX_op_muluh_i64:
1700 tcg_out_arith(s, args[0], args[1], args[2], ARITH_UMULXHI);
1704 tcg_out_arithc(s, a0, a1, a2, c2, c);
1708 tcg_out_arithc(s, a0, TCG_REG_G0, a1, const_args[1], c);
1715 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
1716 case INDEX_op_mov_i64:
1717 case INDEX_op_call: /* Always emitted via tcg_out_call. */
1723 static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
1726 case INDEX_op_goto_ptr:
1729 case INDEX_op_ld8u_i32:
1730 case INDEX_op_ld8u_i64:
1731 case INDEX_op_ld8s_i32:
1732 case INDEX_op_ld8s_i64:
1733 case INDEX_op_ld16u_i32:
1734 case INDEX_op_ld16u_i64:
1735 case INDEX_op_ld16s_i32:
1736 case INDEX_op_ld16s_i64:
1737 case INDEX_op_ld_i32:
1738 case INDEX_op_ld32u_i64:
1739 case INDEX_op_ld32s_i64:
1740 case INDEX_op_ld_i64:
1741 case INDEX_op_neg_i32:
1742 case INDEX_op_neg_i64:
1743 case INDEX_op_not_i32:
1744 case INDEX_op_not_i64:
1745 case INDEX_op_ext32s_i64:
1746 case INDEX_op_ext32u_i64:
1747 case INDEX_op_ext_i32_i64:
1748 case INDEX_op_extu_i32_i64:
1749 case INDEX_op_extrl_i64_i32:
1750 case INDEX_op_extrh_i64_i32:
1751 return C_O1_I1(r, r);
1753 case INDEX_op_st8_i32:
1754 case INDEX_op_st8_i64:
1755 case INDEX_op_st16_i32:
1756 case INDEX_op_st16_i64:
1757 case INDEX_op_st_i32:
1758 case INDEX_op_st32_i64:
1759 case INDEX_op_st_i64:
1760 return C_O0_I2(rZ, r);
1762 case INDEX_op_add_i32:
1763 case INDEX_op_add_i64:
1764 case INDEX_op_mul_i32:
1765 case INDEX_op_mul_i64:
1766 case INDEX_op_div_i32:
1767 case INDEX_op_div_i64:
1768 case INDEX_op_divu_i32:
1769 case INDEX_op_divu_i64:
1770 case INDEX_op_sub_i32:
1771 case INDEX_op_sub_i64:
1772 case INDEX_op_and_i32:
1773 case INDEX_op_and_i64:
1774 case INDEX_op_andc_i32:
1775 case INDEX_op_andc_i64:
1776 case INDEX_op_or_i32:
1777 case INDEX_op_or_i64:
1778 case INDEX_op_orc_i32:
1779 case INDEX_op_orc_i64:
1780 case INDEX_op_xor_i32:
1781 case INDEX_op_xor_i64:
1782 case INDEX_op_shl_i32:
1783 case INDEX_op_shl_i64:
1784 case INDEX_op_shr_i32:
1785 case INDEX_op_shr_i64:
1786 case INDEX_op_sar_i32:
1787 case INDEX_op_sar_i64:
1788 case INDEX_op_setcond_i32:
1789 case INDEX_op_setcond_i64:
1790 return C_O1_I2(r, rZ, rJ);
1792 case INDEX_op_brcond_i32:
1793 case INDEX_op_brcond_i64:
1794 return C_O0_I2(rZ, rJ);
1795 case INDEX_op_movcond_i32:
1796 case INDEX_op_movcond_i64:
1797 return C_O1_I4(r, rZ, rJ, rI, 0);
1798 case INDEX_op_add2_i32:
1799 case INDEX_op_add2_i64:
1800 case INDEX_op_sub2_i32:
1801 case INDEX_op_sub2_i64:
1802 return C_O2_I4(r, r, rZ, rZ, rJ, rJ);
1803 case INDEX_op_mulu2_i32:
1804 case INDEX_op_muls2_i32:
1805 return C_O2_I2(r, r, rZ, rJ);
1806 case INDEX_op_muluh_i64:
1807 return C_O1_I2(r, r, r);
1809 case INDEX_op_qemu_ld_i32:
1810 case INDEX_op_qemu_ld_i64:
1811 return C_O1_I1(r, s);
1812 case INDEX_op_qemu_st_i32:
1813 case INDEX_op_qemu_st_i64:
1814 return C_O0_I2(sZ, s);
1817 g_assert_not_reached();
1821 static void tcg_target_init(TCGContext *s)
1824 * Only probe for the platform and capabilities if we haven't already
1825 * determined maximum values at compile time.
1827 #ifndef use_vis3_instructions
1829 unsigned long hwcap = qemu_getauxval(AT_HWCAP);
1830 use_vis3_instructions = (hwcap & HWCAP_SPARC_VIS3) != 0;
1834 tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
1835 tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS;
1837 tcg_target_call_clobber_regs = 0;
1838 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G1);
1839 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G2);
1840 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G3);
1841 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G4);
1842 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G5);
1843 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G6);
1844 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G7);
1845 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O0);
1846 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O1);
1847 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O2);
1848 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O3);
1849 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O4);
1850 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O5);
1851 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O6);
1852 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O7);
1854 s->reserved_regs = 0;
1855 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0); /* zero */
1856 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G6); /* reserved for os */
1857 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G7); /* thread pointer */
1858 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6); /* frame pointer */
1859 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7); /* return address */
1860 tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6); /* stack pointer */
1861 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T1); /* for internal use */
1862 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T2); /* for internal use */
1865 #define ELF_HOST_MACHINE EM_SPARCV9
1869 uint8_t fde_def_cfa[4];
1870 uint8_t fde_win_save;
1871 uint8_t fde_ret_save[3];
1874 static const DebugFrame debug_frame = {
1875 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
1878 .h.cie.code_align = 1,
1879 .h.cie.data_align = -sizeof(void *) & 0x7f,
1880 .h.cie.return_column = 15, /* o7 */
1882 /* Total FDE size does not include the "len" member. */
1883 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
1886 12, 30, /* DW_CFA_def_cfa i6, 2047 */
1887 (2047 & 0x7f) | 0x80, (2047 >> 7)
1889 .fde_win_save = 0x2d, /* DW_CFA_GNU_window_save */
1890 .fde_ret_save = { 9, 15, 31 }, /* DW_CFA_register o7, i7 */
1893 void tcg_register_jit(const void *buf, size_t buf_size)
1895 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
1898 void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
1899 uintptr_t jmp_rw, uintptr_t addr)
1901 intptr_t tb_disp = addr - tc_ptr;
1902 intptr_t br_disp = addr - jmp_rx;
1903 tcg_insn_unit i1, i2;
1905 /* We can reach the entire address space for ILP32.
1906 For LP64, the code_gen_buffer can't be larger than 2GB. */
1907 tcg_debug_assert(tb_disp == (int32_t)tb_disp);
1908 tcg_debug_assert(br_disp == (int32_t)br_disp);
1911 qatomic_set((uint32_t *)jmp_rw,
1912 deposit32(CALL, 0, 30, br_disp >> 2));
1913 flush_idcache_range(jmp_rx, jmp_rw, 4);
1917 /* This does not exercise the range of the branch, but we do
1918 still need to be able to load the new value of TCG_REG_TB.
1919 But this does still happen quite often. */
1920 if (check_fit_ptr(tb_disp, 13)) {
1921 /* ba,pt %icc, addr */
1922 i1 = (INSN_OP(0) | INSN_OP2(1) | INSN_COND(COND_A)
1923 | BPCC_ICC | BPCC_PT | INSN_OFF19(br_disp));
1924 i2 = (ARITH_ADD | INSN_RD(TCG_REG_TB) | INSN_RS1(TCG_REG_TB)
1925 | INSN_IMM13(tb_disp));
1926 } else if (tb_disp >= 0) {
1927 i1 = SETHI | INSN_RD(TCG_REG_T1) | ((tb_disp & 0xfffffc00) >> 10);
1928 i2 = (ARITH_OR | INSN_RD(TCG_REG_T1) | INSN_RS1(TCG_REG_T1)
1929 | INSN_IMM13(tb_disp & 0x3ff));
1931 i1 = SETHI | INSN_RD(TCG_REG_T1) | ((~tb_disp & 0xfffffc00) >> 10);
1932 i2 = (ARITH_XOR | INSN_RD(TCG_REG_T1) | INSN_RS1(TCG_REG_T1)
1933 | INSN_IMM13((tb_disp & 0x3ff) | -0x400));
1936 qatomic_set((uint64_t *)jmp_rw, deposit64(i2, 32, 32, i1));
1937 flush_idcache_range(jmp_rx, jmp_rw, 8);