2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2021 WANG Xuerui <git@xen0n.name>
6 * Based on tcg/riscv/tcg-target.c.inc
8 * Copyright (c) 2018 SiFive, Inc
9 * Copyright (c) 2008-2009 Arnaud Patard <arnaud.patard@rtp-net.org>
10 * Copyright (c) 2009 Aurelien Jarno <aurelien@aurel32.net>
11 * Copyright (c) 2008 Fabrice Bellard
13 * Permission is hereby granted, free of charge, to any person obtaining a copy
14 * of this software and associated documentation files (the "Software"), to deal
15 * in the Software without restriction, including without limitation the rights
16 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
17 * copies of the Software, and to permit persons to whom the Software is
18 * furnished to do so, subject to the following conditions:
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
26 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
28 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
32 #include "../tcg-ldst.c.inc"
33 #include <asm/hwcap.h>
35 bool use_lsx_instructions;
37 #ifdef CONFIG_DEBUG_TCG
38 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
60 "r21", /* reserved in the LP64* ABI, hence no ABI name */
106 static const int tcg_target_reg_alloc_order[] = {
107 /* Registers preserved across calls */
108 /* TCG_REG_S0 reserved for TCG_AREG0 */
119 /* Registers (potentially) clobbered across calls */
130 /* Argument registers, opposite order of allocation. */
140 /* Vector registers */
141 TCG_REG_V0, TCG_REG_V1, TCG_REG_V2, TCG_REG_V3,
142 TCG_REG_V4, TCG_REG_V5, TCG_REG_V6, TCG_REG_V7,
143 TCG_REG_V8, TCG_REG_V9, TCG_REG_V10, TCG_REG_V11,
144 TCG_REG_V12, TCG_REG_V13, TCG_REG_V14, TCG_REG_V15,
145 TCG_REG_V16, TCG_REG_V17, TCG_REG_V18, TCG_REG_V19,
146 TCG_REG_V20, TCG_REG_V21, TCG_REG_V22, TCG_REG_V23,
147 /* V24 - V31 are caller-saved, and skipped. */
150 static const int tcg_target_call_iarg_regs[] = {
161 static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
163 tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
164 tcg_debug_assert(slot >= 0 && slot <= 1);
165 return TCG_REG_A0 + slot;
168 #ifndef CONFIG_SOFTMMU
169 #define USE_GUEST_BASE (guest_base != 0)
170 #define TCG_GUEST_BASE_REG TCG_REG_S1
173 #define TCG_CT_CONST_ZERO 0x100
174 #define TCG_CT_CONST_S12 0x200
175 #define TCG_CT_CONST_S32 0x400
176 #define TCG_CT_CONST_U12 0x800
177 #define TCG_CT_CONST_C12 0x1000
178 #define TCG_CT_CONST_WSZ 0x2000
179 #define TCG_CT_CONST_VCMP 0x4000
180 #define TCG_CT_CONST_VADD 0x8000
182 #define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
183 #define ALL_VECTOR_REGS MAKE_64BIT_MASK(32, 32)
185 static inline tcg_target_long sextreg(tcg_target_long val, int pos, int len)
187 return sextract64(val, pos, len);
190 /* test if a constant matches the constraint */
191 static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
193 if (ct & TCG_CT_CONST) {
196 if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
199 if ((ct & TCG_CT_CONST_S12) && val == sextreg(val, 0, 12)) {
202 if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) {
205 if ((ct & TCG_CT_CONST_U12) && val >= 0 && val <= 0xfff) {
208 if ((ct & TCG_CT_CONST_C12) && ~val >= 0 && ~val <= 0xfff) {
211 if ((ct & TCG_CT_CONST_WSZ) && val == (type == TCG_TYPE_I32 ? 32 : 64)) {
214 int64_t vec_val = sextract64(val, 0, 8 << vece);
215 if ((ct & TCG_CT_CONST_VCMP) && -0x10 <= vec_val && vec_val <= 0x1f) {
218 if ((ct & TCG_CT_CONST_VADD) && -0x1f <= vec_val && vec_val <= 0x1f) {
229 * Relocation records defined in LoongArch ELF psABI v1.00 is way too
230 * complicated; a whopping stack machine is needed to stuff the fields, at
231 * the very least one SOP_PUSH and one SOP_POP (of the correct format) are
234 * Hence, define our own simpler relocation types. Numbers are chosen as to
235 * not collide with potential future additions to the true ELF relocation
239 /* Field Sk16, shifted right by 2; suitable for conditional jumps */
240 #define R_LOONGARCH_BR_SK16 256
241 /* Field Sd10k16, shifted right by 2; suitable for B and BL */
242 #define R_LOONGARCH_BR_SD10K16 257
244 static bool reloc_br_sk16(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
246 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
247 intptr_t offset = (intptr_t)target - (intptr_t)src_rx;
249 tcg_debug_assert((offset & 3) == 0);
251 if (offset == sextreg(offset, 0, 16)) {
252 *src_rw = deposit64(*src_rw, 10, 16, offset);
259 static bool reloc_br_sd10k16(tcg_insn_unit *src_rw,
260 const tcg_insn_unit *target)
262 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
263 intptr_t offset = (intptr_t)target - (intptr_t)src_rx;
265 tcg_debug_assert((offset & 3) == 0);
267 if (offset == sextreg(offset, 0, 26)) {
268 *src_rw = deposit64(*src_rw, 0, 10, offset >> 16); /* slot d10 */
269 *src_rw = deposit64(*src_rw, 10, 16, offset); /* slot k16 */
276 static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
277 intptr_t value, intptr_t addend)
279 tcg_debug_assert(addend == 0);
281 case R_LOONGARCH_BR_SK16:
282 return reloc_br_sk16(code_ptr, (tcg_insn_unit *)value);
283 case R_LOONGARCH_BR_SD10K16:
284 return reloc_br_sd10k16(code_ptr, (tcg_insn_unit *)value);
286 g_assert_not_reached();
290 #include "tcg-insn-defs.c.inc"
296 static void tcg_out_mb(TCGContext *s, TCGArg a0)
298 /* Baseline LoongArch only has the full barrier, unfortunately. */
299 tcg_out_opc_dbar(s, 0);
302 static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
311 * Conventional register-register move used in LoongArch is
312 * `or dst, src, zero`.
314 tcg_out_opc_or(s, ret, arg, TCG_REG_ZERO);
317 g_assert_not_reached();
322 /* Loads a 32-bit immediate into rd, sign-extended. */
323 static void tcg_out_movi_i32(TCGContext *s, TCGReg rd, int32_t val)
325 tcg_target_long lo = sextreg(val, 0, 12);
326 tcg_target_long hi12 = sextreg(val, 12, 20);
328 /* Single-instruction cases. */
330 /* val fits in uimm12: ori rd, zero, val */
331 tcg_out_opc_ori(s, rd, TCG_REG_ZERO, val);
334 if (hi12 == sextreg(lo, 12, 20)) {
335 /* val fits in simm12: addi.w rd, zero, val */
336 tcg_out_opc_addi_w(s, rd, TCG_REG_ZERO, val);
340 /* High bits must be set; load with lu12i.w + optional ori. */
341 tcg_out_opc_lu12i_w(s, rd, hi12);
343 tcg_out_opc_ori(s, rd, rd, lo & 0xfff);
347 static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd,
351 * LoongArch conventionally loads 64-bit immediates in at most 4 steps,
352 * with dedicated instructions for filling the respective bitfields
356 * 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2
357 * +-----------------------+---------------------------------------+...
359 * +-----------------------+---------------------------------------+...
361 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
362 * ...+-------------------------------------+-------------------------+
364 * ...+-------------------------------------+-------------------------+
366 * Check if val belong to one of the several fast cases, before falling
367 * back to the slow path.
371 tcg_target_long val_lo, val_hi, pc_hi, offset_hi;
372 tcg_target_long hi12, hi32, hi52;
374 /* Value fits in signed i32. */
375 if (type == TCG_TYPE_I32 || val == (int32_t)val) {
376 tcg_out_movi_i32(s, rd, val);
380 /* PC-relative cases. */
381 pc_offset = tcg_pcrel_diff(s, (void *)val);
382 if (pc_offset == sextreg(pc_offset, 0, 22) && (pc_offset & 3) == 0) {
383 /* Single pcaddu2i. */
384 tcg_out_opc_pcaddu2i(s, rd, pc_offset >> 2);
388 if (pc_offset == (int32_t)pc_offset) {
389 /* Offset within 32 bits; load with pcalau12i + ori. */
390 val_lo = sextreg(val, 0, 12);
392 pc_hi = (val - pc_offset) >> 12;
393 offset_hi = val_hi - pc_hi;
395 tcg_debug_assert(offset_hi == sextreg(offset_hi, 0, 20));
396 tcg_out_opc_pcalau12i(s, rd, offset_hi);
398 tcg_out_opc_ori(s, rd, rd, val_lo & 0xfff);
403 hi12 = sextreg(val, 12, 20);
404 hi32 = sextreg(val, 32, 20);
405 hi52 = sextreg(val, 52, 12);
407 /* Single cu52i.d case. */
408 if ((hi52 != 0) && (ctz64(val) >= 52)) {
409 tcg_out_opc_cu52i_d(s, rd, TCG_REG_ZERO, hi52);
413 /* Slow path. Initialize the low 32 bits, then concat high bits. */
414 tcg_out_movi_i32(s, rd, val);
416 /* Load hi32 and hi52 explicitly when they are unexpected values. */
417 if (hi32 != sextreg(hi12, 20, 20)) {
418 tcg_out_opc_cu32i_d(s, rd, hi32);
421 if (hi52 != sextreg(hi32, 20, 12)) {
422 tcg_out_opc_cu52i_d(s, rd, rd, hi52);
426 static void tcg_out_addi(TCGContext *s, TCGType type, TCGReg rd,
427 TCGReg rs, tcg_target_long imm)
429 tcg_target_long lo12 = sextreg(imm, 0, 12);
430 tcg_target_long hi16 = sextreg(imm - lo12, 16, 16);
433 * Note that there's a hole in between hi16 and lo12:
436 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
437 * ...+-------------------------------+-------+-----------------------+
439 * ...+-------------------------------+-------+-----------------------+
441 * For bits within that hole, it's more efficient to use LU12I and ADD.
443 if (imm == (hi16 << 16) + lo12) {
445 tcg_out_opc_addu16i_d(s, rd, rs, hi16);
448 if (type == TCG_TYPE_I32) {
449 tcg_out_opc_addi_w(s, rd, rs, lo12);
451 tcg_out_opc_addi_d(s, rd, rs, lo12);
453 tcg_out_mov(s, type, rd, rs);
456 tcg_out_movi(s, type, TCG_REG_TMP0, imm);
457 if (type == TCG_TYPE_I32) {
458 tcg_out_opc_add_w(s, rd, rs, TCG_REG_TMP0);
460 tcg_out_opc_add_d(s, rd, rs, TCG_REG_TMP0);
465 static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
470 static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
473 /* This function is only used for passing structs by reference. */
474 g_assert_not_reached();
477 static void tcg_out_ext8u(TCGContext *s, TCGReg ret, TCGReg arg)
479 tcg_out_opc_andi(s, ret, arg, 0xff);
482 static void tcg_out_ext16u(TCGContext *s, TCGReg ret, TCGReg arg)
484 tcg_out_opc_bstrpick_w(s, ret, arg, 0, 15);
487 static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg)
489 tcg_out_opc_bstrpick_d(s, ret, arg, 0, 31);
492 static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
494 tcg_out_opc_sext_b(s, ret, arg);
497 static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
499 tcg_out_opc_sext_h(s, ret, arg);
502 static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg)
504 tcg_out_opc_addi_w(s, ret, arg, 0);
507 static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg)
510 tcg_out_ext32s(s, ret, arg);
514 static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg)
516 tcg_out_ext32u(s, ret, arg);
519 static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg ret, TCGReg arg)
521 tcg_out_ext32s(s, ret, arg);
524 static void tcg_out_clzctz(TCGContext *s, LoongArchInsn opc,
525 TCGReg a0, TCGReg a1, TCGReg a2,
526 bool c2, bool is_32bit)
530 * Fast path: semantics already satisfied due to constraint and
531 * insn behavior, single instruction is enough.
533 tcg_debug_assert(a2 == (is_32bit ? 32 : 64));
534 /* all clz/ctz insns belong to DJ-format */
535 tcg_out32(s, encode_dj_insn(opc, a0, a1));
539 tcg_out32(s, encode_dj_insn(opc, TCG_REG_TMP0, a1));
540 /* a0 = a1 ? REG_TMP0 : a2 */
541 tcg_out_opc_maskeqz(s, TCG_REG_TMP0, TCG_REG_TMP0, a1);
542 tcg_out_opc_masknez(s, a0, a2, a1);
543 tcg_out_opc_or(s, a0, TCG_REG_TMP0, a0);
546 #define SETCOND_INV TCG_TARGET_NB_REGS
547 #define SETCOND_NEZ (SETCOND_INV << 1)
548 #define SETCOND_FLAGS (SETCOND_INV | SETCOND_NEZ)
550 static int tcg_out_setcond_int(TCGContext *s, TCGCond cond, TCGReg ret,
551 TCGReg arg1, tcg_target_long arg2, bool c2)
556 case TCG_COND_EQ: /* -> NE */
557 case TCG_COND_GE: /* -> LT */
558 case TCG_COND_GEU: /* -> LTU */
559 case TCG_COND_GT: /* -> LE */
560 case TCG_COND_GTU: /* -> LEU */
561 cond = tcg_invert_cond(cond);
562 flags ^= SETCOND_INV;
572 * If we have a constant input, the most efficient way to implement
573 * LE is by adding 1 and using LT. Watch out for wrap around for LEU.
574 * We don't need to care for this for LE because the constant input
575 * is still constrained to int32_t, and INT32_MAX+1 is representable
576 * in the 64-bit temporary register.
579 if (cond == TCG_COND_LEU) {
580 /* unsigned <= -1 is true */
582 tcg_out_movi(s, TCG_TYPE_REG, ret, !(flags & SETCOND_INV));
594 cond = tcg_swap_cond(cond); /* LE -> GE */
595 cond = tcg_invert_cond(cond); /* GE -> LT */
596 flags ^= SETCOND_INV;
605 flags |= SETCOND_NEZ;
607 tcg_out_opc_xor(s, ret, arg1, arg2);
608 } else if (arg2 == 0) {
610 } else if (arg2 >= 0 && arg2 <= 0xfff) {
611 tcg_out_opc_xori(s, ret, arg1, arg2);
613 tcg_out_addi(s, TCG_TYPE_REG, ret, arg1, -arg2);
620 if (arg2 >= -0x800 && arg2 <= 0x7ff) {
621 if (cond == TCG_COND_LT) {
622 tcg_out_opc_slti(s, ret, arg1, arg2);
624 tcg_out_opc_sltui(s, ret, arg1, arg2);
628 tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_TMP0, arg2);
631 if (cond == TCG_COND_LT) {
632 tcg_out_opc_slt(s, ret, arg1, arg2);
634 tcg_out_opc_sltu(s, ret, arg1, arg2);
639 g_assert_not_reached();
646 static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret,
647 TCGReg arg1, tcg_target_long arg2, bool c2)
649 int tmpflags = tcg_out_setcond_int(s, cond, ret, arg1, arg2, c2);
651 if (tmpflags != ret) {
652 TCGReg tmp = tmpflags & ~SETCOND_FLAGS;
654 switch (tmpflags & SETCOND_FLAGS) {
656 /* Intermediate result is boolean: simply invert. */
657 tcg_out_opc_xori(s, ret, tmp, 1);
660 /* Intermediate result is zero/non-zero: test != 0. */
661 tcg_out_opc_sltu(s, ret, TCG_REG_ZERO, tmp);
663 case SETCOND_NEZ | SETCOND_INV:
664 /* Intermediate result is zero/non-zero: test == 0. */
665 tcg_out_opc_sltui(s, ret, tmp, 1);
668 g_assert_not_reached();
673 static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret,
674 TCGReg c1, tcg_target_long c2, bool const2,
675 TCGReg v1, TCGReg v2)
677 int tmpflags = tcg_out_setcond_int(s, cond, TCG_REG_TMP0, c1, c2, const2);
680 /* Standardize the test below to t != 0. */
681 if (tmpflags & SETCOND_INV) {
682 t = v1, v1 = v2, v2 = t;
685 t = tmpflags & ~SETCOND_FLAGS;
686 if (v1 == TCG_REG_ZERO) {
687 tcg_out_opc_masknez(s, ret, v2, t);
688 } else if (v2 == TCG_REG_ZERO) {
689 tcg_out_opc_maskeqz(s, ret, v1, t);
691 tcg_out_opc_masknez(s, TCG_REG_TMP2, v2, t); /* t ? 0 : v2 */
692 tcg_out_opc_maskeqz(s, TCG_REG_TMP1, v1, t); /* t ? v1 : 0 */
693 tcg_out_opc_or(s, ret, TCG_REG_TMP1, TCG_REG_TMP2);
701 static const struct {
704 } tcg_brcond_to_loongarch[] = {
705 [TCG_COND_EQ] = { OPC_BEQ, false },
706 [TCG_COND_NE] = { OPC_BNE, false },
707 [TCG_COND_LT] = { OPC_BGT, true },
708 [TCG_COND_GE] = { OPC_BLE, true },
709 [TCG_COND_LE] = { OPC_BLE, false },
710 [TCG_COND_GT] = { OPC_BGT, false },
711 [TCG_COND_LTU] = { OPC_BGTU, true },
712 [TCG_COND_GEU] = { OPC_BLEU, true },
713 [TCG_COND_LEU] = { OPC_BLEU, false },
714 [TCG_COND_GTU] = { OPC_BGTU, false }
717 static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1,
718 TCGReg arg2, TCGLabel *l)
720 LoongArchInsn op = tcg_brcond_to_loongarch[cond].op;
722 tcg_debug_assert(op != 0);
724 if (tcg_brcond_to_loongarch[cond].swap) {
730 /* all conditional branch insns belong to DJSk16-format */
731 tcg_out_reloc(s, s->code_ptr, R_LOONGARCH_BR_SK16, l, 0);
732 tcg_out32(s, encode_djsk16_insn(op, arg1, arg2, 0));
735 static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail)
737 TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA;
738 ptrdiff_t offset = tcg_pcrel_diff(s, arg);
740 tcg_debug_assert((offset & 3) == 0);
741 if (offset == sextreg(offset, 0, 28)) {
742 /* short jump: +/- 256MiB */
744 tcg_out_opc_b(s, offset >> 2);
746 tcg_out_opc_bl(s, offset >> 2);
748 } else if (offset == sextreg(offset, 0, 38)) {
749 /* long jump: +/- 256GiB */
750 tcg_target_long lo = sextreg(offset, 0, 18);
751 tcg_target_long hi = offset - lo;
752 tcg_out_opc_pcaddu18i(s, TCG_REG_TMP0, hi >> 18);
753 tcg_out_opc_jirl(s, link, TCG_REG_TMP0, lo >> 2);
755 /* far jump: 64-bit */
756 tcg_target_long lo = sextreg((tcg_target_long)arg, 0, 18);
757 tcg_target_long hi = (tcg_target_long)arg - lo;
758 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, hi);
759 tcg_out_opc_jirl(s, link, TCG_REG_TMP0, lo >> 2);
763 static void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg,
764 const TCGHelperInfo *info)
766 tcg_out_call_int(s, arg, false);
773 static void tcg_out_ldst(TCGContext *s, LoongArchInsn opc, TCGReg data,
774 TCGReg addr, intptr_t offset)
776 intptr_t imm12 = sextreg(offset, 0, 12);
778 if (offset != imm12) {
779 intptr_t diff = tcg_pcrel_diff(s, (void *)offset);
781 if (addr == TCG_REG_ZERO && diff == (int32_t)diff) {
782 imm12 = sextreg(diff, 0, 12);
783 tcg_out_opc_pcaddu12i(s, TCG_REG_TMP2, (diff - imm12) >> 12);
785 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP2, offset - imm12);
786 if (addr != TCG_REG_ZERO) {
787 tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, addr);
805 tcg_out32(s, encode_djsk12_insn(opc, data, addr, imm12));
808 g_assert_not_reached();
812 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
813 TCGReg arg1, intptr_t arg2)
815 bool is_32bit = type == TCG_TYPE_I32;
816 tcg_out_ldst(s, is_32bit ? OPC_LD_W : OPC_LD_D, arg, arg1, arg2);
819 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
820 TCGReg arg1, intptr_t arg2)
822 bool is_32bit = type == TCG_TYPE_I32;
823 tcg_out_ldst(s, is_32bit ? OPC_ST_W : OPC_ST_D, arg, arg1, arg2);
826 static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
827 TCGReg base, intptr_t ofs)
830 tcg_out_st(s, type, TCG_REG_ZERO, base, ofs);
837 * Load/store helpers for SoftMMU, and qemu_ld/st implementations
840 static bool tcg_out_goto(TCGContext *s, const tcg_insn_unit *target)
843 return reloc_br_sd10k16(s->code_ptr - 1, target);
846 static const TCGLdstHelperParam ldst_helper_param = {
847 .ntmp = 1, .tmp = { TCG_REG_TMP0 }
850 static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
852 MemOp opc = get_memop(l->oi);
854 /* resolve label address */
855 if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
859 tcg_out_ld_helper_args(s, l, &ldst_helper_param);
860 tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SIZE], false);
861 tcg_out_ld_helper_ret(s, l, false, &ldst_helper_param);
862 return tcg_out_goto(s, l->raddr);
865 static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
867 MemOp opc = get_memop(l->oi);
869 /* resolve label address */
870 if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
874 tcg_out_st_helper_args(s, l, &ldst_helper_param);
875 tcg_out_call_int(s, qemu_st_helpers[opc & MO_SIZE], false);
876 return tcg_out_goto(s, l->raddr);
885 bool tcg_target_has_memory_bswap(MemOp memop)
890 /* We expect to use a 12-bit negative offset from ENV. */
891 #define MIN_TLB_MASK_TABLE_OFS -(1 << 11)
894 * For softmmu, perform the TLB load and compare.
895 * For useronly, perform any required alignment tests.
896 * In both cases, return a TCGLabelQemuLdst structure if the slow path
897 * is required and fill in @h with the host address for the fast path.
899 static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
900 TCGReg addr_reg, MemOpIdx oi,
903 TCGType addr_type = s->addr_type;
904 TCGLabelQemuLdst *ldst = NULL;
905 MemOp opc = get_memop(oi);
908 h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false);
909 a_bits = h->aa.align;
911 #ifdef CONFIG_SOFTMMU
912 unsigned s_bits = opc & MO_SIZE;
913 int mem_index = get_mmuidx(oi);
914 int fast_ofs = tlb_mask_table_ofs(s, mem_index);
915 int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask);
916 int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table);
918 ldst = new_ldst_label(s);
921 ldst->addrlo_reg = addr_reg;
923 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs);
924 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs);
926 tcg_out_opc_srli_d(s, TCG_REG_TMP2, addr_reg,
927 s->page_bits - CPU_TLB_ENTRY_BITS);
928 tcg_out_opc_and(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0);
929 tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1);
931 /* Load the tlb comparator and the addend. */
932 QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN);
933 tcg_out_ld(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP2,
934 is_ld ? offsetof(CPUTLBEntry, addr_read)
935 : offsetof(CPUTLBEntry, addr_write));
936 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2,
937 offsetof(CPUTLBEntry, addend));
940 * For aligned accesses, we check the first byte and include the alignment
941 * bits within the address. For unaligned access, we check that we don't
942 * cross pages using the address of the last byte of the access.
944 if (a_bits < s_bits) {
945 unsigned a_mask = (1u << a_bits) - 1;
946 unsigned s_mask = (1u << s_bits) - 1;
947 tcg_out_addi(s, addr_type, TCG_REG_TMP1, addr_reg, s_mask - a_mask);
949 tcg_out_mov(s, addr_type, TCG_REG_TMP1, addr_reg);
951 tcg_out_opc_bstrins_d(s, TCG_REG_TMP1, TCG_REG_ZERO,
952 a_bits, s->page_bits - 1);
954 /* Compare masked address with the TLB entry. */
955 ldst->label_ptr[0] = s->code_ptr;
956 tcg_out_opc_bne(s, TCG_REG_TMP0, TCG_REG_TMP1, 0);
958 h->index = TCG_REG_TMP2;
961 ldst = new_ldst_label(s);
965 ldst->addrlo_reg = addr_reg;
968 * Without micro-architecture details, we don't know which of
969 * bstrpick or andi is faster, so use bstrpick as it's not
970 * constrained by imm field width. Not to say alignments >= 2^12
971 * are going to happen any time soon.
973 tcg_out_opc_bstrpick_d(s, TCG_REG_TMP1, addr_reg, 0, a_bits - 1);
975 ldst->label_ptr[0] = s->code_ptr;
976 tcg_out_opc_bne(s, TCG_REG_TMP1, TCG_REG_ZERO, 0);
979 h->index = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO;
982 if (addr_type == TCG_TYPE_I32) {
983 h->base = TCG_REG_TMP0;
984 tcg_out_ext32u(s, h->base, addr_reg);
992 static void tcg_out_qemu_ld_indexed(TCGContext *s, MemOp opc, TCGType type,
993 TCGReg rd, HostAddress h)
995 /* Byte swapping is left to middle-end expansion. */
996 tcg_debug_assert((opc & MO_BSWAP) == 0);
998 switch (opc & MO_SSIZE) {
1000 tcg_out_opc_ldx_bu(s, rd, h.base, h.index);
1003 tcg_out_opc_ldx_b(s, rd, h.base, h.index);
1006 tcg_out_opc_ldx_hu(s, rd, h.base, h.index);
1009 tcg_out_opc_ldx_h(s, rd, h.base, h.index);
1012 if (type == TCG_TYPE_I64) {
1013 tcg_out_opc_ldx_wu(s, rd, h.base, h.index);
1018 tcg_out_opc_ldx_w(s, rd, h.base, h.index);
1021 tcg_out_opc_ldx_d(s, rd, h.base, h.index);
1024 g_assert_not_reached();
1028 static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
1029 MemOpIdx oi, TCGType data_type)
1031 TCGLabelQemuLdst *ldst;
1034 ldst = prepare_host_addr(s, &h, addr_reg, oi, true);
1035 tcg_out_qemu_ld_indexed(s, get_memop(oi), data_type, data_reg, h);
1038 ldst->type = data_type;
1039 ldst->datalo_reg = data_reg;
1040 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1044 static void tcg_out_qemu_st_indexed(TCGContext *s, MemOp opc,
1045 TCGReg rd, HostAddress h)
1047 /* Byte swapping is left to middle-end expansion. */
1048 tcg_debug_assert((opc & MO_BSWAP) == 0);
1050 switch (opc & MO_SIZE) {
1052 tcg_out_opc_stx_b(s, rd, h.base, h.index);
1055 tcg_out_opc_stx_h(s, rd, h.base, h.index);
1058 tcg_out_opc_stx_w(s, rd, h.base, h.index);
1061 tcg_out_opc_stx_d(s, rd, h.base, h.index);
1064 g_assert_not_reached();
1068 static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
1069 MemOpIdx oi, TCGType data_type)
1071 TCGLabelQemuLdst *ldst;
1074 ldst = prepare_host_addr(s, &h, addr_reg, oi, false);
1075 tcg_out_qemu_st_indexed(s, get_memop(oi), data_reg, h);
1078 ldst->type = data_type;
1079 ldst->datalo_reg = data_reg;
1080 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1088 static const tcg_insn_unit *tb_ret_addr;
1090 static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
1092 /* Reuse the zeroing that exists for goto_ptr. */
1094 tcg_out_call_int(s, tcg_code_gen_epilogue, true);
1096 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A0, a0);
1097 tcg_out_call_int(s, tb_ret_addr, true);
1101 static void tcg_out_goto_tb(TCGContext *s, int which)
1104 * Direct branch, or load indirect address, to be patched
1105 * by tb_target_set_jmp_target. Check indirect load offset
1106 * in range early, regardless of direct branch distance,
1107 * via assert within tcg_out_opc_pcaddu2i.
1109 uintptr_t i_addr = get_jmp_target_addr(s, which);
1110 intptr_t i_disp = tcg_pcrel_diff(s, (void *)i_addr);
1112 set_jmp_insn_offset(s, which);
1113 tcg_out_opc_pcaddu2i(s, TCG_REG_TMP0, i_disp >> 2);
1115 /* Finish the load and indirect branch. */
1116 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_TMP0, 0);
1117 tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_TMP0, 0);
1118 set_jmp_reset_offset(s, which);
1121 void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
1122 uintptr_t jmp_rx, uintptr_t jmp_rw)
1124 uintptr_t d_addr = tb->jmp_target_addr[n];
1125 ptrdiff_t d_disp = (ptrdiff_t)(d_addr - jmp_rx) >> 2;
1128 /* Either directly branch, or load slot address for indirect branch. */
1129 if (d_disp == sextreg(d_disp, 0, 26)) {
1130 insn = encode_sd10k16_insn(OPC_B, d_disp);
1132 uintptr_t i_addr = (uintptr_t)&tb->jmp_target_addr[n];
1133 intptr_t i_disp = i_addr - jmp_rx;
1134 insn = encode_dsj20_insn(OPC_PCADDU2I, TCG_REG_TMP0, i_disp >> 2);
1137 qatomic_set((tcg_insn_unit *)jmp_rw, insn);
1138 flush_idcache_range(jmp_rx, jmp_rw, 4);
1141 static void tcg_out_op(TCGContext *s, TCGOpcode opc,
1142 const TCGArg args[TCG_MAX_OP_ARGS],
1143 const int const_args[TCG_MAX_OP_ARGS])
1145 TCGArg a0 = args[0];
1146 TCGArg a1 = args[1];
1147 TCGArg a2 = args[2];
1148 int c2 = const_args[2];
1155 case INDEX_op_goto_ptr:
1156 tcg_out_opc_jirl(s, TCG_REG_ZERO, a0, 0);
1160 tcg_out_reloc(s, s->code_ptr, R_LOONGARCH_BR_SD10K16, arg_label(a0),
1162 tcg_out_opc_b(s, 0);
1165 case INDEX_op_brcond_i32:
1166 case INDEX_op_brcond_i64:
1167 tcg_out_brcond(s, a2, a0, a1, arg_label(args[3]));
1170 case INDEX_op_extrh_i64_i32:
1171 tcg_out_opc_srai_d(s, a0, a1, 32);
1174 case INDEX_op_not_i32:
1175 case INDEX_op_not_i64:
1176 tcg_out_opc_nor(s, a0, a1, TCG_REG_ZERO);
1179 case INDEX_op_nor_i32:
1180 case INDEX_op_nor_i64:
1182 tcg_out_opc_ori(s, a0, a1, a2);
1183 tcg_out_opc_nor(s, a0, a0, TCG_REG_ZERO);
1185 tcg_out_opc_nor(s, a0, a1, a2);
1189 case INDEX_op_andc_i32:
1190 case INDEX_op_andc_i64:
1192 /* guaranteed to fit due to constraint */
1193 tcg_out_opc_andi(s, a0, a1, ~a2);
1195 tcg_out_opc_andn(s, a0, a1, a2);
1199 case INDEX_op_orc_i32:
1200 case INDEX_op_orc_i64:
1202 /* guaranteed to fit due to constraint */
1203 tcg_out_opc_ori(s, a0, a1, ~a2);
1205 tcg_out_opc_orn(s, a0, a1, a2);
1209 case INDEX_op_and_i32:
1210 case INDEX_op_and_i64:
1212 tcg_out_opc_andi(s, a0, a1, a2);
1214 tcg_out_opc_and(s, a0, a1, a2);
1218 case INDEX_op_or_i32:
1219 case INDEX_op_or_i64:
1221 tcg_out_opc_ori(s, a0, a1, a2);
1223 tcg_out_opc_or(s, a0, a1, a2);
1227 case INDEX_op_xor_i32:
1228 case INDEX_op_xor_i64:
1230 tcg_out_opc_xori(s, a0, a1, a2);
1232 tcg_out_opc_xor(s, a0, a1, a2);
1236 case INDEX_op_extract_i32:
1237 tcg_out_opc_bstrpick_w(s, a0, a1, a2, a2 + args[3] - 1);
1239 case INDEX_op_extract_i64:
1240 tcg_out_opc_bstrpick_d(s, a0, a1, a2, a2 + args[3] - 1);
1243 case INDEX_op_deposit_i32:
1244 tcg_out_opc_bstrins_w(s, a0, a2, args[3], args[3] + args[4] - 1);
1246 case INDEX_op_deposit_i64:
1247 tcg_out_opc_bstrins_d(s, a0, a2, args[3], args[3] + args[4] - 1);
1250 case INDEX_op_bswap16_i32:
1251 case INDEX_op_bswap16_i64:
1252 tcg_out_opc_revb_2h(s, a0, a1);
1253 if (a2 & TCG_BSWAP_OS) {
1254 tcg_out_ext16s(s, TCG_TYPE_REG, a0, a0);
1255 } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
1256 tcg_out_ext16u(s, a0, a0);
1260 case INDEX_op_bswap32_i32:
1261 /* All 32-bit values are computed sign-extended in the register. */
1264 case INDEX_op_bswap32_i64:
1265 tcg_out_opc_revb_2w(s, a0, a1);
1266 if (a2 & TCG_BSWAP_OS) {
1267 tcg_out_ext32s(s, a0, a0);
1268 } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
1269 tcg_out_ext32u(s, a0, a0);
1273 case INDEX_op_bswap64_i64:
1274 tcg_out_opc_revb_d(s, a0, a1);
1277 case INDEX_op_clz_i32:
1278 tcg_out_clzctz(s, OPC_CLZ_W, a0, a1, a2, c2, true);
1280 case INDEX_op_clz_i64:
1281 tcg_out_clzctz(s, OPC_CLZ_D, a0, a1, a2, c2, false);
1284 case INDEX_op_ctz_i32:
1285 tcg_out_clzctz(s, OPC_CTZ_W, a0, a1, a2, c2, true);
1287 case INDEX_op_ctz_i64:
1288 tcg_out_clzctz(s, OPC_CTZ_D, a0, a1, a2, c2, false);
1291 case INDEX_op_shl_i32:
1293 tcg_out_opc_slli_w(s, a0, a1, a2 & 0x1f);
1295 tcg_out_opc_sll_w(s, a0, a1, a2);
1298 case INDEX_op_shl_i64:
1300 tcg_out_opc_slli_d(s, a0, a1, a2 & 0x3f);
1302 tcg_out_opc_sll_d(s, a0, a1, a2);
1306 case INDEX_op_shr_i32:
1308 tcg_out_opc_srli_w(s, a0, a1, a2 & 0x1f);
1310 tcg_out_opc_srl_w(s, a0, a1, a2);
1313 case INDEX_op_shr_i64:
1315 tcg_out_opc_srli_d(s, a0, a1, a2 & 0x3f);
1317 tcg_out_opc_srl_d(s, a0, a1, a2);
1321 case INDEX_op_sar_i32:
1323 tcg_out_opc_srai_w(s, a0, a1, a2 & 0x1f);
1325 tcg_out_opc_sra_w(s, a0, a1, a2);
1328 case INDEX_op_sar_i64:
1330 tcg_out_opc_srai_d(s, a0, a1, a2 & 0x3f);
1332 tcg_out_opc_sra_d(s, a0, a1, a2);
1336 case INDEX_op_rotl_i32:
1337 /* transform into equivalent rotr/rotri */
1339 tcg_out_opc_rotri_w(s, a0, a1, (32 - a2) & 0x1f);
1341 tcg_out_opc_sub_w(s, TCG_REG_TMP0, TCG_REG_ZERO, a2);
1342 tcg_out_opc_rotr_w(s, a0, a1, TCG_REG_TMP0);
1345 case INDEX_op_rotl_i64:
1346 /* transform into equivalent rotr/rotri */
1348 tcg_out_opc_rotri_d(s, a0, a1, (64 - a2) & 0x3f);
1350 tcg_out_opc_sub_w(s, TCG_REG_TMP0, TCG_REG_ZERO, a2);
1351 tcg_out_opc_rotr_d(s, a0, a1, TCG_REG_TMP0);
1355 case INDEX_op_rotr_i32:
1357 tcg_out_opc_rotri_w(s, a0, a1, a2 & 0x1f);
1359 tcg_out_opc_rotr_w(s, a0, a1, a2);
1362 case INDEX_op_rotr_i64:
1364 tcg_out_opc_rotri_d(s, a0, a1, a2 & 0x3f);
1366 tcg_out_opc_rotr_d(s, a0, a1, a2);
1370 case INDEX_op_add_i32:
1372 tcg_out_addi(s, TCG_TYPE_I32, a0, a1, a2);
1374 tcg_out_opc_add_w(s, a0, a1, a2);
1377 case INDEX_op_add_i64:
1379 tcg_out_addi(s, TCG_TYPE_I64, a0, a1, a2);
1381 tcg_out_opc_add_d(s, a0, a1, a2);
1385 case INDEX_op_sub_i32:
1387 tcg_out_addi(s, TCG_TYPE_I32, a0, a1, -a2);
1389 tcg_out_opc_sub_w(s, a0, a1, a2);
1392 case INDEX_op_sub_i64:
1394 tcg_out_addi(s, TCG_TYPE_I64, a0, a1, -a2);
1396 tcg_out_opc_sub_d(s, a0, a1, a2);
1400 case INDEX_op_mul_i32:
1401 tcg_out_opc_mul_w(s, a0, a1, a2);
1403 case INDEX_op_mul_i64:
1404 tcg_out_opc_mul_d(s, a0, a1, a2);
1407 case INDEX_op_mulsh_i32:
1408 tcg_out_opc_mulh_w(s, a0, a1, a2);
1410 case INDEX_op_mulsh_i64:
1411 tcg_out_opc_mulh_d(s, a0, a1, a2);
1414 case INDEX_op_muluh_i32:
1415 tcg_out_opc_mulh_wu(s, a0, a1, a2);
1417 case INDEX_op_muluh_i64:
1418 tcg_out_opc_mulh_du(s, a0, a1, a2);
1421 case INDEX_op_div_i32:
1422 tcg_out_opc_div_w(s, a0, a1, a2);
1424 case INDEX_op_div_i64:
1425 tcg_out_opc_div_d(s, a0, a1, a2);
1428 case INDEX_op_divu_i32:
1429 tcg_out_opc_div_wu(s, a0, a1, a2);
1431 case INDEX_op_divu_i64:
1432 tcg_out_opc_div_du(s, a0, a1, a2);
1435 case INDEX_op_rem_i32:
1436 tcg_out_opc_mod_w(s, a0, a1, a2);
1438 case INDEX_op_rem_i64:
1439 tcg_out_opc_mod_d(s, a0, a1, a2);
1442 case INDEX_op_remu_i32:
1443 tcg_out_opc_mod_wu(s, a0, a1, a2);
1445 case INDEX_op_remu_i64:
1446 tcg_out_opc_mod_du(s, a0, a1, a2);
1449 case INDEX_op_setcond_i32:
1450 case INDEX_op_setcond_i64:
1451 tcg_out_setcond(s, args[3], a0, a1, a2, c2);
1454 case INDEX_op_movcond_i32:
1455 case INDEX_op_movcond_i64:
1456 tcg_out_movcond(s, args[5], a0, a1, a2, c2, args[3], args[4]);
1459 case INDEX_op_ld8s_i32:
1460 case INDEX_op_ld8s_i64:
1461 tcg_out_ldst(s, OPC_LD_B, a0, a1, a2);
1463 case INDEX_op_ld8u_i32:
1464 case INDEX_op_ld8u_i64:
1465 tcg_out_ldst(s, OPC_LD_BU, a0, a1, a2);
1467 case INDEX_op_ld16s_i32:
1468 case INDEX_op_ld16s_i64:
1469 tcg_out_ldst(s, OPC_LD_H, a0, a1, a2);
1471 case INDEX_op_ld16u_i32:
1472 case INDEX_op_ld16u_i64:
1473 tcg_out_ldst(s, OPC_LD_HU, a0, a1, a2);
1475 case INDEX_op_ld_i32:
1476 case INDEX_op_ld32s_i64:
1477 tcg_out_ldst(s, OPC_LD_W, a0, a1, a2);
1479 case INDEX_op_ld32u_i64:
1480 tcg_out_ldst(s, OPC_LD_WU, a0, a1, a2);
1482 case INDEX_op_ld_i64:
1483 tcg_out_ldst(s, OPC_LD_D, a0, a1, a2);
1486 case INDEX_op_st8_i32:
1487 case INDEX_op_st8_i64:
1488 tcg_out_ldst(s, OPC_ST_B, a0, a1, a2);
1490 case INDEX_op_st16_i32:
1491 case INDEX_op_st16_i64:
1492 tcg_out_ldst(s, OPC_ST_H, a0, a1, a2);
1494 case INDEX_op_st_i32:
1495 case INDEX_op_st32_i64:
1496 tcg_out_ldst(s, OPC_ST_W, a0, a1, a2);
1498 case INDEX_op_st_i64:
1499 tcg_out_ldst(s, OPC_ST_D, a0, a1, a2);
1502 case INDEX_op_qemu_ld_a32_i32:
1503 case INDEX_op_qemu_ld_a64_i32:
1504 tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32);
1506 case INDEX_op_qemu_ld_a32_i64:
1507 case INDEX_op_qemu_ld_a64_i64:
1508 tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64);
1510 case INDEX_op_qemu_st_a32_i32:
1511 case INDEX_op_qemu_st_a64_i32:
1512 tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32);
1514 case INDEX_op_qemu_st_a32_i64:
1515 case INDEX_op_qemu_st_a64_i64:
1516 tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64);
1519 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
1520 case INDEX_op_mov_i64:
1521 case INDEX_op_call: /* Always emitted via tcg_out_call. */
1522 case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
1523 case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
1524 case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
1525 case INDEX_op_ext8s_i64:
1526 case INDEX_op_ext8u_i32:
1527 case INDEX_op_ext8u_i64:
1528 case INDEX_op_ext16s_i32:
1529 case INDEX_op_ext16s_i64:
1530 case INDEX_op_ext16u_i32:
1531 case INDEX_op_ext16u_i64:
1532 case INDEX_op_ext32s_i64:
1533 case INDEX_op_ext32u_i64:
1534 case INDEX_op_ext_i32_i64:
1535 case INDEX_op_extu_i32_i64:
1536 case INDEX_op_extrl_i64_i32:
1538 g_assert_not_reached();
1542 static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
1543 TCGReg rd, TCGReg rs)
1547 tcg_out_opc_vreplgr2vr_b(s, rd, rs);
1550 tcg_out_opc_vreplgr2vr_h(s, rd, rs);
1553 tcg_out_opc_vreplgr2vr_w(s, rd, rs);
1556 tcg_out_opc_vreplgr2vr_d(s, rd, rs);
1559 g_assert_not_reached();
1564 static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
1565 TCGReg r, TCGReg base, intptr_t offset)
1567 /* Handle imm overflow and division (vldrepl.d imm is divided by 8) */
1568 if (offset < -0x800 || offset > 0x7ff || \
1569 (offset & ((1 << vece) - 1)) != 0) {
1570 tcg_out_addi(s, TCG_TYPE_I64, TCG_REG_TMP0, base, offset);
1571 base = TCG_REG_TMP0;
1578 tcg_out_opc_vldrepl_b(s, r, base, offset);
1581 tcg_out_opc_vldrepl_h(s, r, base, offset);
1584 tcg_out_opc_vldrepl_w(s, r, base, offset);
1587 tcg_out_opc_vldrepl_d(s, r, base, offset);
1590 g_assert_not_reached();
1595 static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
1596 TCGReg rd, int64_t v64)
1598 /* Try vldi if imm can fit */
1599 int64_t value = sextract64(v64, 0, 8 << vece);
1600 if (-0x200 <= value && value <= 0x1FF) {
1601 uint32_t imm = (vece << 10) | ((uint32_t)v64 & 0x3FF);
1602 tcg_out_opc_vldi(s, rd, imm);
1606 /* TODO: vldi patterns when imm 12 is set */
1608 /* Fallback to vreplgr2vr */
1609 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP0, value);
1612 tcg_out_opc_vreplgr2vr_b(s, rd, TCG_REG_TMP0);
1615 tcg_out_opc_vreplgr2vr_h(s, rd, TCG_REG_TMP0);
1618 tcg_out_opc_vreplgr2vr_w(s, rd, TCG_REG_TMP0);
1621 tcg_out_opc_vreplgr2vr_d(s, rd, TCG_REG_TMP0);
1624 g_assert_not_reached();
1628 static void tcg_out_addsub_vec(TCGContext *s, unsigned vece, const TCGArg a0,
1629 const TCGArg a1, const TCGArg a2,
1630 bool a2_is_const, bool is_add)
1632 static const LoongArchInsn add_vec_insn[4] = {
1633 OPC_VADD_B, OPC_VADD_H, OPC_VADD_W, OPC_VADD_D
1635 static const LoongArchInsn add_vec_imm_insn[4] = {
1636 OPC_VADDI_BU, OPC_VADDI_HU, OPC_VADDI_WU, OPC_VADDI_DU
1638 static const LoongArchInsn sub_vec_insn[4] = {
1639 OPC_VSUB_B, OPC_VSUB_H, OPC_VSUB_W, OPC_VSUB_D
1641 static const LoongArchInsn sub_vec_imm_insn[4] = {
1642 OPC_VSUBI_BU, OPC_VSUBI_HU, OPC_VSUBI_WU, OPC_VSUBI_DU
1646 int64_t value = sextract64(a2, 0, 8 << vece);
1651 /* Try vaddi/vsubi */
1652 if (0 <= value && value <= 0x1f) {
1653 tcg_out32(s, encode_vdvjuk5_insn(add_vec_imm_insn[vece], a0, \
1656 } else if (-0x1f <= value && value < 0) {
1657 tcg_out32(s, encode_vdvjuk5_insn(sub_vec_imm_insn[vece], a0, \
1662 /* constraint TCG_CT_CONST_VADD ensures unreachable */
1663 g_assert_not_reached();
1667 tcg_out32(s, encode_vdvjvk_insn(add_vec_insn[vece], a0, a1, a2));
1669 tcg_out32(s, encode_vdvjvk_insn(sub_vec_insn[vece], a0, a1, a2));
1673 static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
1674 unsigned vecl, unsigned vece,
1675 const TCGArg args[TCG_MAX_OP_ARGS],
1676 const int const_args[TCG_MAX_OP_ARGS])
1678 TCGType type = vecl + TCG_TYPE_V64;
1680 TCGReg temp = TCG_REG_TMP0;
1681 TCGReg temp_vec = TCG_VEC_TMP0;
1683 static const LoongArchInsn cmp_vec_insn[16][4] = {
1684 [TCG_COND_EQ] = {OPC_VSEQ_B, OPC_VSEQ_H, OPC_VSEQ_W, OPC_VSEQ_D},
1685 [TCG_COND_LE] = {OPC_VSLE_B, OPC_VSLE_H, OPC_VSLE_W, OPC_VSLE_D},
1686 [TCG_COND_LEU] = {OPC_VSLE_BU, OPC_VSLE_HU, OPC_VSLE_WU, OPC_VSLE_DU},
1687 [TCG_COND_LT] = {OPC_VSLT_B, OPC_VSLT_H, OPC_VSLT_W, OPC_VSLT_D},
1688 [TCG_COND_LTU] = {OPC_VSLT_BU, OPC_VSLT_HU, OPC_VSLT_WU, OPC_VSLT_DU},
1690 static const LoongArchInsn cmp_vec_imm_insn[16][4] = {
1691 [TCG_COND_EQ] = {OPC_VSEQI_B, OPC_VSEQI_H, OPC_VSEQI_W, OPC_VSEQI_D},
1692 [TCG_COND_LE] = {OPC_VSLEI_B, OPC_VSLEI_H, OPC_VSLEI_W, OPC_VSLEI_D},
1693 [TCG_COND_LEU] = {OPC_VSLEI_BU, OPC_VSLEI_HU, OPC_VSLEI_WU, OPC_VSLEI_DU},
1694 [TCG_COND_LT] = {OPC_VSLTI_B, OPC_VSLTI_H, OPC_VSLTI_W, OPC_VSLTI_D},
1695 [TCG_COND_LTU] = {OPC_VSLTI_BU, OPC_VSLTI_HU, OPC_VSLTI_WU, OPC_VSLTI_DU},
1698 static const LoongArchInsn neg_vec_insn[4] = {
1699 OPC_VNEG_B, OPC_VNEG_H, OPC_VNEG_W, OPC_VNEG_D
1706 /* Currently only supports V128 */
1707 tcg_debug_assert(type == TCG_TYPE_V128);
1710 case INDEX_op_st_vec:
1711 /* Try to fit vst imm */
1712 if (-0x800 <= a2 && a2 <= 0x7ff) {
1713 tcg_out_opc_vst(s, a0, a1, a2);
1715 tcg_out_movi(s, TCG_TYPE_I64, temp, a2);
1716 tcg_out_opc_vstx(s, a0, a1, temp);
1719 case INDEX_op_ld_vec:
1720 /* Try to fit vld imm */
1721 if (-0x800 <= a2 && a2 <= 0x7ff) {
1722 tcg_out_opc_vld(s, a0, a1, a2);
1724 tcg_out_movi(s, TCG_TYPE_I64, temp, a2);
1725 tcg_out_opc_vldx(s, a0, a1, temp);
1728 case INDEX_op_and_vec:
1729 tcg_out_opc_vand_v(s, a0, a1, a2);
1731 case INDEX_op_andc_vec:
1733 * vandn vd, vj, vk: vd = vk & ~vj
1734 * andc_vec vd, vj, vk: vd = vj & ~vk
1735 * vk and vk are swapped
1737 tcg_out_opc_vandn_v(s, a0, a2, a1);
1739 case INDEX_op_or_vec:
1740 tcg_out_opc_vor_v(s, a0, a1, a2);
1742 case INDEX_op_orc_vec:
1743 tcg_out_opc_vorn_v(s, a0, a1, a2);
1745 case INDEX_op_xor_vec:
1746 tcg_out_opc_vxor_v(s, a0, a1, a2);
1748 case INDEX_op_nor_vec:
1749 tcg_out_opc_vnor_v(s, a0, a1, a2);
1751 case INDEX_op_not_vec:
1752 tcg_out_opc_vnor_v(s, a0, a1, a1);
1754 case INDEX_op_cmp_vec:
1755 TCGCond cond = args[3];
1756 if (const_args[2]) {
1758 * cmp_vec dest, src, value
1759 * Try vseqi/vslei/vslti
1761 int64_t value = sextract64(a2, 0, 8 << vece);
1762 if ((cond == TCG_COND_EQ || cond == TCG_COND_LE || \
1763 cond == TCG_COND_LT) && (-0x10 <= value && value <= 0x0f)) {
1764 tcg_out32(s, encode_vdvjsk5_insn(cmp_vec_imm_insn[cond][vece], \
1767 } else if ((cond == TCG_COND_LEU || cond == TCG_COND_LTU) &&
1768 (0x00 <= value && value <= 0x1f)) {
1769 tcg_out32(s, encode_vdvjuk5_insn(cmp_vec_imm_insn[cond][vece], \
1777 * cmp_vec a0, a1, temp, cond
1779 tcg_out_dupi_vec(s, type, vece, temp_vec, a2);
1783 insn = cmp_vec_insn[cond][vece];
1786 t = a1, a1 = a2, a2 = t;
1787 cond = tcg_swap_cond(cond);
1788 insn = cmp_vec_insn[cond][vece];
1789 tcg_debug_assert(insn != 0);
1791 tcg_out32(s, encode_vdvjvk_insn(insn, a0, a1, a2));
1793 case INDEX_op_add_vec:
1794 tcg_out_addsub_vec(s, vece, a0, a1, a2, const_args[2], true);
1796 case INDEX_op_sub_vec:
1797 tcg_out_addsub_vec(s, vece, a0, a1, a2, const_args[2], false);
1799 case INDEX_op_neg_vec:
1800 tcg_out32(s, encode_vdvj_insn(neg_vec_insn[vece], a0, a1));
1802 case INDEX_op_dupm_vec:
1803 tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
1806 g_assert_not_reached();
1810 int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
1813 case INDEX_op_ld_vec:
1814 case INDEX_op_st_vec:
1815 case INDEX_op_dup_vec:
1816 case INDEX_op_dupm_vec:
1817 case INDEX_op_cmp_vec:
1818 case INDEX_op_add_vec:
1819 case INDEX_op_sub_vec:
1820 case INDEX_op_and_vec:
1821 case INDEX_op_andc_vec:
1822 case INDEX_op_or_vec:
1823 case INDEX_op_orc_vec:
1824 case INDEX_op_xor_vec:
1825 case INDEX_op_nor_vec:
1826 case INDEX_op_not_vec:
1827 case INDEX_op_neg_vec:
1834 void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
1837 g_assert_not_reached();
1840 static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
1843 case INDEX_op_goto_ptr:
1846 case INDEX_op_st8_i32:
1847 case INDEX_op_st8_i64:
1848 case INDEX_op_st16_i32:
1849 case INDEX_op_st16_i64:
1850 case INDEX_op_st32_i64:
1851 case INDEX_op_st_i32:
1852 case INDEX_op_st_i64:
1853 case INDEX_op_qemu_st_a32_i32:
1854 case INDEX_op_qemu_st_a64_i32:
1855 case INDEX_op_qemu_st_a32_i64:
1856 case INDEX_op_qemu_st_a64_i64:
1857 return C_O0_I2(rZ, r);
1859 case INDEX_op_brcond_i32:
1860 case INDEX_op_brcond_i64:
1861 return C_O0_I2(rZ, rZ);
1863 case INDEX_op_ext8s_i32:
1864 case INDEX_op_ext8s_i64:
1865 case INDEX_op_ext8u_i32:
1866 case INDEX_op_ext8u_i64:
1867 case INDEX_op_ext16s_i32:
1868 case INDEX_op_ext16s_i64:
1869 case INDEX_op_ext16u_i32:
1870 case INDEX_op_ext16u_i64:
1871 case INDEX_op_ext32s_i64:
1872 case INDEX_op_ext32u_i64:
1873 case INDEX_op_extu_i32_i64:
1874 case INDEX_op_extrl_i64_i32:
1875 case INDEX_op_extrh_i64_i32:
1876 case INDEX_op_ext_i32_i64:
1877 case INDEX_op_not_i32:
1878 case INDEX_op_not_i64:
1879 case INDEX_op_extract_i32:
1880 case INDEX_op_extract_i64:
1881 case INDEX_op_bswap16_i32:
1882 case INDEX_op_bswap16_i64:
1883 case INDEX_op_bswap32_i32:
1884 case INDEX_op_bswap32_i64:
1885 case INDEX_op_bswap64_i64:
1886 case INDEX_op_ld8s_i32:
1887 case INDEX_op_ld8s_i64:
1888 case INDEX_op_ld8u_i32:
1889 case INDEX_op_ld8u_i64:
1890 case INDEX_op_ld16s_i32:
1891 case INDEX_op_ld16s_i64:
1892 case INDEX_op_ld16u_i32:
1893 case INDEX_op_ld16u_i64:
1894 case INDEX_op_ld32s_i64:
1895 case INDEX_op_ld32u_i64:
1896 case INDEX_op_ld_i32:
1897 case INDEX_op_ld_i64:
1898 case INDEX_op_qemu_ld_a32_i32:
1899 case INDEX_op_qemu_ld_a64_i32:
1900 case INDEX_op_qemu_ld_a32_i64:
1901 case INDEX_op_qemu_ld_a64_i64:
1902 return C_O1_I1(r, r);
1904 case INDEX_op_andc_i32:
1905 case INDEX_op_andc_i64:
1906 case INDEX_op_orc_i32:
1907 case INDEX_op_orc_i64:
1909 * LoongArch insns for these ops don't have reg-imm forms, but we
1910 * can express using andi/ori if ~constant satisfies
1913 return C_O1_I2(r, r, rC);
1915 case INDEX_op_shl_i32:
1916 case INDEX_op_shl_i64:
1917 case INDEX_op_shr_i32:
1918 case INDEX_op_shr_i64:
1919 case INDEX_op_sar_i32:
1920 case INDEX_op_sar_i64:
1921 case INDEX_op_rotl_i32:
1922 case INDEX_op_rotl_i64:
1923 case INDEX_op_rotr_i32:
1924 case INDEX_op_rotr_i64:
1925 return C_O1_I2(r, r, ri);
1927 case INDEX_op_add_i32:
1928 return C_O1_I2(r, r, ri);
1929 case INDEX_op_add_i64:
1930 return C_O1_I2(r, r, rJ);
1932 case INDEX_op_and_i32:
1933 case INDEX_op_and_i64:
1934 case INDEX_op_nor_i32:
1935 case INDEX_op_nor_i64:
1936 case INDEX_op_or_i32:
1937 case INDEX_op_or_i64:
1938 case INDEX_op_xor_i32:
1939 case INDEX_op_xor_i64:
1940 /* LoongArch reg-imm bitops have their imms ZERO-extended */
1941 return C_O1_I2(r, r, rU);
1943 case INDEX_op_clz_i32:
1944 case INDEX_op_clz_i64:
1945 case INDEX_op_ctz_i32:
1946 case INDEX_op_ctz_i64:
1947 return C_O1_I2(r, r, rW);
1949 case INDEX_op_deposit_i32:
1950 case INDEX_op_deposit_i64:
1951 /* Must deposit into the same register as input */
1952 return C_O1_I2(r, 0, rZ);
1954 case INDEX_op_sub_i32:
1955 case INDEX_op_setcond_i32:
1956 return C_O1_I2(r, rZ, ri);
1957 case INDEX_op_sub_i64:
1958 case INDEX_op_setcond_i64:
1959 return C_O1_I2(r, rZ, rJ);
1961 case INDEX_op_mul_i32:
1962 case INDEX_op_mul_i64:
1963 case INDEX_op_mulsh_i32:
1964 case INDEX_op_mulsh_i64:
1965 case INDEX_op_muluh_i32:
1966 case INDEX_op_muluh_i64:
1967 case INDEX_op_div_i32:
1968 case INDEX_op_div_i64:
1969 case INDEX_op_divu_i32:
1970 case INDEX_op_divu_i64:
1971 case INDEX_op_rem_i32:
1972 case INDEX_op_rem_i64:
1973 case INDEX_op_remu_i32:
1974 case INDEX_op_remu_i64:
1975 return C_O1_I2(r, rZ, rZ);
1977 case INDEX_op_movcond_i32:
1978 case INDEX_op_movcond_i64:
1979 return C_O1_I4(r, rZ, rJ, rZ, rZ);
1981 case INDEX_op_ld_vec:
1982 case INDEX_op_dupm_vec:
1983 case INDEX_op_dup_vec:
1984 return C_O1_I1(w, r);
1986 case INDEX_op_st_vec:
1987 return C_O0_I2(w, r);
1989 case INDEX_op_cmp_vec:
1990 return C_O1_I2(w, w, wM);
1992 case INDEX_op_add_vec:
1993 case INDEX_op_sub_vec:
1994 return C_O1_I2(w, w, wA);
1996 case INDEX_op_and_vec:
1997 case INDEX_op_andc_vec:
1998 case INDEX_op_or_vec:
1999 case INDEX_op_orc_vec:
2000 case INDEX_op_xor_vec:
2001 case INDEX_op_nor_vec:
2002 return C_O1_I2(w, w, w);
2004 case INDEX_op_not_vec:
2005 case INDEX_op_neg_vec:
2006 return C_O1_I1(w, w);
2009 g_assert_not_reached();
2013 static const int tcg_target_callee_save_regs[] = {
2014 TCG_REG_S0, /* used for the global env (TCG_AREG0) */
2024 TCG_REG_RA, /* should be last for ABI compliance */
2027 /* Stack frame parameters. */
2028 #define REG_SIZE (TCG_TARGET_REG_BITS / 8)
2029 #define SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * REG_SIZE)
2030 #define TEMP_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long))
2031 #define FRAME_SIZE ((TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE + SAVE_SIZE \
2032 + TCG_TARGET_STACK_ALIGN - 1) \
2033 & -TCG_TARGET_STACK_ALIGN)
2034 #define SAVE_OFS (TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE)
2036 /* We're expecting to be able to use an immediate for frame allocation. */
2037 QEMU_BUILD_BUG_ON(FRAME_SIZE > 0x7ff);
2039 /* Generate global QEMU prologue and epilogue code */
2040 static void tcg_target_qemu_prologue(TCGContext *s)
2044 tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, TEMP_SIZE);
2047 tcg_out_opc_addi_d(s, TCG_REG_SP, TCG_REG_SP, -FRAME_SIZE);
2048 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
2049 tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
2050 TCG_REG_SP, SAVE_OFS + i * REG_SIZE);
2053 #if !defined(CONFIG_SOFTMMU)
2054 if (USE_GUEST_BASE) {
2055 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base);
2056 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
2060 /* Call generated code */
2061 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2062 tcg_out_opc_jirl(s, TCG_REG_ZERO, tcg_target_call_iarg_regs[1], 0);
2064 /* Return path for goto_ptr. Set return value to 0 */
2065 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
2066 tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_A0, TCG_REG_ZERO);
2069 tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr);
2070 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
2071 tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
2072 TCG_REG_SP, SAVE_OFS + i * REG_SIZE);
2075 tcg_out_opc_addi_d(s, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE);
2076 tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_RA, 0);
2079 static void tcg_target_init(TCGContext *s)
2081 unsigned long hwcap = qemu_getauxval(AT_HWCAP);
2083 /* Server and desktop class cpus have UAL; embedded cpus do not. */
2084 if (!(hwcap & HWCAP_LOONGARCH_UAL)) {
2085 error_report("TCG: unaligned access support required; exiting");
2089 if (hwcap & HWCAP_LOONGARCH_LSX) {
2090 use_lsx_instructions = 1;
2093 tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
2094 tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS;
2096 tcg_target_call_clobber_regs = ALL_GENERAL_REGS;
2097 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S0);
2098 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S1);
2099 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S2);
2100 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S3);
2101 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S4);
2102 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S5);
2103 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S6);
2104 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S7);
2105 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S8);
2106 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S9);
2108 if (use_lsx_instructions) {
2109 tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS;
2110 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V24);
2111 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V25);
2112 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V26);
2113 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V27);
2114 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V28);
2115 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V29);
2116 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V30);
2117 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V31);
2120 s->reserved_regs = 0;
2121 tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO);
2122 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP0);
2123 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1);
2124 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2);
2125 tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP);
2126 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TP);
2127 tcg_regset_set_reg(s->reserved_regs, TCG_REG_RESERVED);
2128 tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP0);
2133 uint8_t fde_def_cfa[4];
2134 uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2];
2137 #define ELF_HOST_MACHINE EM_LOONGARCH
2139 static const DebugFrame debug_frame = {
2140 .h.cie.len = sizeof(DebugFrameCIE) - 4, /* length after .len member */
2143 .h.cie.code_align = 1,
2144 .h.cie.data_align = -(TCG_TARGET_REG_BITS / 8) & 0x7f, /* sleb128 */
2145 .h.cie.return_column = TCG_REG_RA,
2147 /* Total FDE size does not include the "len" member. */
2148 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
2151 12, TCG_REG_SP, /* DW_CFA_def_cfa sp, ... */
2152 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
2156 0x80 + 23, 11, /* DW_CFA_offset, s0, -88 */
2157 0x80 + 24, 10, /* DW_CFA_offset, s1, -80 */
2158 0x80 + 25, 9, /* DW_CFA_offset, s2, -72 */
2159 0x80 + 26, 8, /* DW_CFA_offset, s3, -64 */
2160 0x80 + 27, 7, /* DW_CFA_offset, s4, -56 */
2161 0x80 + 28, 6, /* DW_CFA_offset, s5, -48 */
2162 0x80 + 29, 5, /* DW_CFA_offset, s6, -40 */
2163 0x80 + 30, 4, /* DW_CFA_offset, s7, -32 */
2164 0x80 + 31, 3, /* DW_CFA_offset, s8, -24 */
2165 0x80 + 22, 2, /* DW_CFA_offset, s9, -16 */
2166 0x80 + 1 , 1, /* DW_CFA_offset, ra, -8 */
2170 void tcg_register_jit(const void *buf, size_t buf_size)
2172 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));