2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2021 WANG Xuerui <git@xen0n.name>
6 * Based on tcg/riscv/tcg-target.c.inc
8 * Copyright (c) 2018 SiFive, Inc
9 * Copyright (c) 2008-2009 Arnaud Patard <arnaud.patard@rtp-net.org>
10 * Copyright (c) 2009 Aurelien Jarno <aurelien@aurel32.net>
11 * Copyright (c) 2008 Fabrice Bellard
13 * Permission is hereby granted, free of charge, to any person obtaining a copy
14 * of this software and associated documentation files (the "Software"), to deal
15 * in the Software without restriction, including without limitation the rights
16 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
17 * copies of the Software, and to permit persons to whom the Software is
18 * furnished to do so, subject to the following conditions:
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
26 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
28 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
32 #include "../tcg-ldst.c.inc"
34 #ifdef CONFIG_DEBUG_TCG
35 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
57 "r21", /* reserved in the LP64* ABI, hence no ABI name */
71 static const int tcg_target_reg_alloc_order[] = {
72 /* Registers preserved across calls */
73 /* TCG_REG_S0 reserved for TCG_AREG0 */
84 /* Registers (potentially) clobbered across calls */
95 /* Argument registers, opposite order of allocation. */
106 static const int tcg_target_call_iarg_regs[] = {
117 static const int tcg_target_call_oarg_regs[] = {
122 #ifndef CONFIG_SOFTMMU
123 #define USE_GUEST_BASE (guest_base != 0)
124 #define TCG_GUEST_BASE_REG TCG_REG_S1
127 #define TCG_CT_CONST_ZERO 0x100
128 #define TCG_CT_CONST_S12 0x200
129 #define TCG_CT_CONST_S32 0x400
130 #define TCG_CT_CONST_U12 0x800
131 #define TCG_CT_CONST_C12 0x1000
132 #define TCG_CT_CONST_WSZ 0x2000
134 #define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
136 * For softmmu, we need to avoid conflicts with the first 5
137 * argument registers to call the helper. Some of these are
138 * also used for the tlb lookup.
140 #ifdef CONFIG_SOFTMMU
141 #define SOFTMMU_RESERVE_REGS MAKE_64BIT_MASK(TCG_REG_A0, 5)
143 #define SOFTMMU_RESERVE_REGS 0
147 static inline tcg_target_long sextreg(tcg_target_long val, int pos, int len)
149 return sextract64(val, pos, len);
152 /* test if a constant matches the constraint */
153 static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
155 if (ct & TCG_CT_CONST) {
158 if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
161 if ((ct & TCG_CT_CONST_S12) && val == sextreg(val, 0, 12)) {
164 if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) {
167 if ((ct & TCG_CT_CONST_U12) && val >= 0 && val <= 0xfff) {
170 if ((ct & TCG_CT_CONST_C12) && ~val >= 0 && ~val <= 0xfff) {
173 if ((ct & TCG_CT_CONST_WSZ) && val == (type == TCG_TYPE_I32 ? 32 : 64)) {
184 * Relocation records defined in LoongArch ELF psABI v1.00 is way too
185 * complicated; a whopping stack machine is needed to stuff the fields, at
186 * the very least one SOP_PUSH and one SOP_POP (of the correct format) are
189 * Hence, define our own simpler relocation types. Numbers are chosen as to
190 * not collide with potential future additions to the true ELF relocation
194 /* Field Sk16, shifted right by 2; suitable for conditional jumps */
195 #define R_LOONGARCH_BR_SK16 256
196 /* Field Sd10k16, shifted right by 2; suitable for B and BL */
197 #define R_LOONGARCH_BR_SD10K16 257
199 static bool reloc_br_sk16(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
201 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
202 intptr_t offset = (intptr_t)target - (intptr_t)src_rx;
204 tcg_debug_assert((offset & 3) == 0);
206 if (offset == sextreg(offset, 0, 16)) {
207 *src_rw = deposit64(*src_rw, 10, 16, offset);
214 static bool reloc_br_sd10k16(tcg_insn_unit *src_rw,
215 const tcg_insn_unit *target)
217 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
218 intptr_t offset = (intptr_t)target - (intptr_t)src_rx;
220 tcg_debug_assert((offset & 3) == 0);
222 if (offset == sextreg(offset, 0, 26)) {
223 *src_rw = deposit64(*src_rw, 0, 10, offset >> 16); /* slot d10 */
224 *src_rw = deposit64(*src_rw, 10, 16, offset); /* slot k16 */
231 static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
232 intptr_t value, intptr_t addend)
234 tcg_debug_assert(addend == 0);
236 case R_LOONGARCH_BR_SK16:
237 return reloc_br_sk16(code_ptr, (tcg_insn_unit *)value);
238 case R_LOONGARCH_BR_SD10K16:
239 return reloc_br_sd10k16(code_ptr, (tcg_insn_unit *)value);
241 g_assert_not_reached();
245 #include "tcg-insn-defs.c.inc"
251 static void tcg_out_mb(TCGContext *s, TCGArg a0)
253 /* Baseline LoongArch only has the full barrier, unfortunately. */
254 tcg_out_opc_dbar(s, 0);
257 static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
266 * Conventional register-register move used in LoongArch is
267 * `or dst, src, zero`.
269 tcg_out_opc_or(s, ret, arg, TCG_REG_ZERO);
272 g_assert_not_reached();
277 /* Loads a 32-bit immediate into rd, sign-extended. */
278 static void tcg_out_movi_i32(TCGContext *s, TCGReg rd, int32_t val)
280 tcg_target_long lo = sextreg(val, 0, 12);
281 tcg_target_long hi12 = sextreg(val, 12, 20);
283 /* Single-instruction cases. */
285 /* val fits in uimm12: ori rd, zero, val */
286 tcg_out_opc_ori(s, rd, TCG_REG_ZERO, val);
289 if (hi12 == sextreg(lo, 12, 20)) {
290 /* val fits in simm12: addi.w rd, zero, val */
291 tcg_out_opc_addi_w(s, rd, TCG_REG_ZERO, val);
295 /* High bits must be set; load with lu12i.w + optional ori. */
296 tcg_out_opc_lu12i_w(s, rd, hi12);
298 tcg_out_opc_ori(s, rd, rd, lo & 0xfff);
302 static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd,
306 * LoongArch conventionally loads 64-bit immediates in at most 4 steps,
307 * with dedicated instructions for filling the respective bitfields
311 * 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2
312 * +-----------------------+---------------------------------------+...
314 * +-----------------------+---------------------------------------+...
316 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
317 * ...+-------------------------------------+-------------------------+
319 * ...+-------------------------------------+-------------------------+
321 * Check if val belong to one of the several fast cases, before falling
322 * back to the slow path.
326 tcg_target_long val_lo, val_hi, pc_hi, offset_hi;
327 tcg_target_long hi12, hi32, hi52;
329 /* Value fits in signed i32. */
330 if (type == TCG_TYPE_I32 || val == (int32_t)val) {
331 tcg_out_movi_i32(s, rd, val);
335 /* PC-relative cases. */
336 pc_offset = tcg_pcrel_diff(s, (void *)val);
337 if (pc_offset == sextreg(pc_offset, 0, 22) && (pc_offset & 3) == 0) {
338 /* Single pcaddu2i. */
339 tcg_out_opc_pcaddu2i(s, rd, pc_offset >> 2);
343 if (pc_offset == (int32_t)pc_offset) {
344 /* Offset within 32 bits; load with pcalau12i + ori. */
345 val_lo = sextreg(val, 0, 12);
347 pc_hi = (val - pc_offset) >> 12;
348 offset_hi = val_hi - pc_hi;
350 tcg_debug_assert(offset_hi == sextreg(offset_hi, 0, 20));
351 tcg_out_opc_pcalau12i(s, rd, offset_hi);
353 tcg_out_opc_ori(s, rd, rd, val_lo & 0xfff);
358 hi12 = sextreg(val, 12, 20);
359 hi32 = sextreg(val, 32, 20);
360 hi52 = sextreg(val, 52, 12);
362 /* Single cu52i.d case. */
363 if ((hi52 != 0) && (ctz64(val) >= 52)) {
364 tcg_out_opc_cu52i_d(s, rd, TCG_REG_ZERO, hi52);
368 /* Slow path. Initialize the low 32 bits, then concat high bits. */
369 tcg_out_movi_i32(s, rd, val);
371 /* Load hi32 and hi52 explicitly when they are unexpected values. */
372 if (hi32 != sextreg(hi12, 20, 20)) {
373 tcg_out_opc_cu32i_d(s, rd, hi32);
376 if (hi52 != sextreg(hi32, 20, 12)) {
377 tcg_out_opc_cu52i_d(s, rd, rd, hi52);
381 static void tcg_out_addi(TCGContext *s, TCGType type, TCGReg rd,
382 TCGReg rs, tcg_target_long imm)
384 tcg_target_long lo12 = sextreg(imm, 0, 12);
385 tcg_target_long hi16 = sextreg(imm - lo12, 16, 16);
388 * Note that there's a hole in between hi16 and lo12:
391 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
392 * ...+-------------------------------+-------+-----------------------+
394 * ...+-------------------------------+-------+-----------------------+
396 * For bits within that hole, it's more efficient to use LU12I and ADD.
398 if (imm == (hi16 << 16) + lo12) {
400 tcg_out_opc_addu16i_d(s, rd, rs, hi16);
403 if (type == TCG_TYPE_I32) {
404 tcg_out_opc_addi_w(s, rd, rs, lo12);
406 tcg_out_opc_addi_d(s, rd, rs, lo12);
408 tcg_out_mov(s, type, rd, rs);
411 tcg_out_movi(s, type, TCG_REG_TMP0, imm);
412 if (type == TCG_TYPE_I32) {
413 tcg_out_opc_add_w(s, rd, rs, TCG_REG_TMP0);
415 tcg_out_opc_add_d(s, rd, rs, TCG_REG_TMP0);
420 static void tcg_out_ext8u(TCGContext *s, TCGReg ret, TCGReg arg)
422 tcg_out_opc_andi(s, ret, arg, 0xff);
425 static void tcg_out_ext16u(TCGContext *s, TCGReg ret, TCGReg arg)
427 tcg_out_opc_bstrpick_w(s, ret, arg, 0, 15);
430 static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg)
432 tcg_out_opc_bstrpick_d(s, ret, arg, 0, 31);
435 static void tcg_out_ext8s(TCGContext *s, TCGReg ret, TCGReg arg)
437 tcg_out_opc_sext_b(s, ret, arg);
440 static void tcg_out_ext16s(TCGContext *s, TCGReg ret, TCGReg arg)
442 tcg_out_opc_sext_h(s, ret, arg);
445 static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg)
447 tcg_out_opc_addi_w(s, ret, arg, 0);
450 static void tcg_out_clzctz(TCGContext *s, LoongArchInsn opc,
451 TCGReg a0, TCGReg a1, TCGReg a2,
452 bool c2, bool is_32bit)
456 * Fast path: semantics already satisfied due to constraint and
457 * insn behavior, single instruction is enough.
459 tcg_debug_assert(a2 == (is_32bit ? 32 : 64));
460 /* all clz/ctz insns belong to DJ-format */
461 tcg_out32(s, encode_dj_insn(opc, a0, a1));
465 tcg_out32(s, encode_dj_insn(opc, TCG_REG_TMP0, a1));
466 /* a0 = a1 ? REG_TMP0 : a2 */
467 tcg_out_opc_maskeqz(s, TCG_REG_TMP0, TCG_REG_TMP0, a1);
468 tcg_out_opc_masknez(s, a0, a2, a1);
469 tcg_out_opc_or(s, a0, TCG_REG_TMP0, a0);
472 #define SETCOND_INV TCG_TARGET_NB_REGS
473 #define SETCOND_NEZ (SETCOND_INV << 1)
474 #define SETCOND_FLAGS (SETCOND_INV | SETCOND_NEZ)
476 static int tcg_out_setcond_int(TCGContext *s, TCGCond cond, TCGReg ret,
477 TCGReg arg1, tcg_target_long arg2, bool c2)
482 case TCG_COND_EQ: /* -> NE */
483 case TCG_COND_GE: /* -> LT */
484 case TCG_COND_GEU: /* -> LTU */
485 case TCG_COND_GT: /* -> LE */
486 case TCG_COND_GTU: /* -> LEU */
487 cond = tcg_invert_cond(cond);
488 flags ^= SETCOND_INV;
498 * If we have a constant input, the most efficient way to implement
499 * LE is by adding 1 and using LT. Watch out for wrap around for LEU.
500 * We don't need to care for this for LE because the constant input
501 * is still constrained to int32_t, and INT32_MAX+1 is representable
502 * in the 64-bit temporary register.
505 if (cond == TCG_COND_LEU) {
506 /* unsigned <= -1 is true */
508 tcg_out_movi(s, TCG_TYPE_REG, ret, !(flags & SETCOND_INV));
520 cond = tcg_swap_cond(cond); /* LE -> GE */
521 cond = tcg_invert_cond(cond); /* GE -> LT */
522 flags ^= SETCOND_INV;
531 flags |= SETCOND_NEZ;
533 tcg_out_opc_xor(s, ret, arg1, arg2);
534 } else if (arg2 == 0) {
536 } else if (arg2 >= 0 && arg2 <= 0xfff) {
537 tcg_out_opc_xori(s, ret, arg1, arg2);
539 tcg_out_addi(s, TCG_TYPE_REG, ret, arg1, -arg2);
546 if (arg2 >= -0x800 && arg2 <= 0x7ff) {
547 if (cond == TCG_COND_LT) {
548 tcg_out_opc_slti(s, ret, arg1, arg2);
550 tcg_out_opc_sltui(s, ret, arg1, arg2);
554 tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_TMP0, arg2);
557 if (cond == TCG_COND_LT) {
558 tcg_out_opc_slt(s, ret, arg1, arg2);
560 tcg_out_opc_sltu(s, ret, arg1, arg2);
565 g_assert_not_reached();
572 static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret,
573 TCGReg arg1, tcg_target_long arg2, bool c2)
575 int tmpflags = tcg_out_setcond_int(s, cond, ret, arg1, arg2, c2);
577 if (tmpflags != ret) {
578 TCGReg tmp = tmpflags & ~SETCOND_FLAGS;
580 switch (tmpflags & SETCOND_FLAGS) {
582 /* Intermediate result is boolean: simply invert. */
583 tcg_out_opc_xori(s, ret, tmp, 1);
586 /* Intermediate result is zero/non-zero: test != 0. */
587 tcg_out_opc_sltu(s, ret, TCG_REG_ZERO, tmp);
589 case SETCOND_NEZ | SETCOND_INV:
590 /* Intermediate result is zero/non-zero: test == 0. */
591 tcg_out_opc_sltui(s, ret, tmp, 1);
594 g_assert_not_reached();
599 static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret,
600 TCGReg c1, tcg_target_long c2, bool const2,
601 TCGReg v1, TCGReg v2)
603 int tmpflags = tcg_out_setcond_int(s, cond, TCG_REG_TMP0, c1, c2, const2);
606 /* Standardize the test below to t != 0. */
607 if (tmpflags & SETCOND_INV) {
608 t = v1, v1 = v2, v2 = t;
611 t = tmpflags & ~SETCOND_FLAGS;
612 if (v1 == TCG_REG_ZERO) {
613 tcg_out_opc_masknez(s, ret, v2, t);
614 } else if (v2 == TCG_REG_ZERO) {
615 tcg_out_opc_maskeqz(s, ret, v1, t);
617 tcg_out_opc_masknez(s, TCG_REG_TMP2, v2, t); /* t ? 0 : v2 */
618 tcg_out_opc_maskeqz(s, TCG_REG_TMP1, v1, t); /* t ? v1 : 0 */
619 tcg_out_opc_or(s, ret, TCG_REG_TMP1, TCG_REG_TMP2);
627 static const struct {
630 } tcg_brcond_to_loongarch[] = {
631 [TCG_COND_EQ] = { OPC_BEQ, false },
632 [TCG_COND_NE] = { OPC_BNE, false },
633 [TCG_COND_LT] = { OPC_BGT, true },
634 [TCG_COND_GE] = { OPC_BLE, true },
635 [TCG_COND_LE] = { OPC_BLE, false },
636 [TCG_COND_GT] = { OPC_BGT, false },
637 [TCG_COND_LTU] = { OPC_BGTU, true },
638 [TCG_COND_GEU] = { OPC_BLEU, true },
639 [TCG_COND_LEU] = { OPC_BLEU, false },
640 [TCG_COND_GTU] = { OPC_BGTU, false }
643 static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1,
644 TCGReg arg2, TCGLabel *l)
646 LoongArchInsn op = tcg_brcond_to_loongarch[cond].op;
648 tcg_debug_assert(op != 0);
650 if (tcg_brcond_to_loongarch[cond].swap) {
656 /* all conditional branch insns belong to DJSk16-format */
657 tcg_out_reloc(s, s->code_ptr, R_LOONGARCH_BR_SK16, l, 0);
658 tcg_out32(s, encode_djsk16_insn(op, arg1, arg2, 0));
661 static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail)
663 TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA;
664 ptrdiff_t offset = tcg_pcrel_diff(s, arg);
666 tcg_debug_assert((offset & 3) == 0);
667 if (offset == sextreg(offset, 0, 28)) {
668 /* short jump: +/- 256MiB */
670 tcg_out_opc_b(s, offset >> 2);
672 tcg_out_opc_bl(s, offset >> 2);
674 } else if (offset == sextreg(offset, 0, 38)) {
675 /* long jump: +/- 256GiB */
676 tcg_target_long lo = sextreg(offset, 0, 18);
677 tcg_target_long hi = offset - lo;
678 tcg_out_opc_pcaddu18i(s, TCG_REG_TMP0, hi >> 18);
679 tcg_out_opc_jirl(s, link, TCG_REG_TMP0, lo >> 2);
681 /* far jump: 64-bit */
682 tcg_target_long lo = sextreg((tcg_target_long)arg, 0, 18);
683 tcg_target_long hi = (tcg_target_long)arg - lo;
684 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, hi);
685 tcg_out_opc_jirl(s, link, TCG_REG_TMP0, lo >> 2);
689 static void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg,
690 const TCGHelperInfo *info)
692 tcg_out_call_int(s, arg, false);
699 static void tcg_out_ldst(TCGContext *s, LoongArchInsn opc, TCGReg data,
700 TCGReg addr, intptr_t offset)
702 intptr_t imm12 = sextreg(offset, 0, 12);
704 if (offset != imm12) {
705 intptr_t diff = tcg_pcrel_diff(s, (void *)offset);
707 if (addr == TCG_REG_ZERO && diff == (int32_t)diff) {
708 imm12 = sextreg(diff, 0, 12);
709 tcg_out_opc_pcaddu12i(s, TCG_REG_TMP2, (diff - imm12) >> 12);
711 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP2, offset - imm12);
712 if (addr != TCG_REG_ZERO) {
713 tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, addr);
731 tcg_out32(s, encode_djsk12_insn(opc, data, addr, imm12));
734 g_assert_not_reached();
738 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
739 TCGReg arg1, intptr_t arg2)
741 bool is_32bit = type == TCG_TYPE_I32;
742 tcg_out_ldst(s, is_32bit ? OPC_LD_W : OPC_LD_D, arg, arg1, arg2);
745 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
746 TCGReg arg1, intptr_t arg2)
748 bool is_32bit = type == TCG_TYPE_I32;
749 tcg_out_ldst(s, is_32bit ? OPC_ST_W : OPC_ST_D, arg, arg1, arg2);
752 static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
753 TCGReg base, intptr_t ofs)
756 tcg_out_st(s, type, TCG_REG_ZERO, base, ofs);
763 * Load/store helpers for SoftMMU, and qemu_ld/st implementations
766 #if defined(CONFIG_SOFTMMU)
768 * helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
769 * MemOpIdx oi, uintptr_t ra)
771 static void * const qemu_ld_helpers[4] = {
772 [MO_8] = helper_ret_ldub_mmu,
773 [MO_16] = helper_le_lduw_mmu,
774 [MO_32] = helper_le_ldul_mmu,
775 [MO_64] = helper_le_ldq_mmu,
779 * helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
780 * uintxx_t val, MemOpIdx oi,
783 static void * const qemu_st_helpers[4] = {
784 [MO_8] = helper_ret_stb_mmu,
785 [MO_16] = helper_le_stw_mmu,
786 [MO_32] = helper_le_stl_mmu,
787 [MO_64] = helper_le_stq_mmu,
790 /* We expect to use a 12-bit negative offset from ENV. */
791 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
792 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 11));
794 static bool tcg_out_goto(TCGContext *s, const tcg_insn_unit *target)
797 return reloc_br_sd10k16(s->code_ptr - 1, target);
801 * Emits common code for TLB addend lookup, that eventually loads the
802 * addend in TCG_REG_TMP2.
804 static void tcg_out_tlb_load(TCGContext *s, TCGReg addrl, MemOpIdx oi,
805 tcg_insn_unit **label_ptr, bool is_load)
807 MemOp opc = get_memop(oi);
808 unsigned s_bits = opc & MO_SIZE;
809 unsigned a_bits = get_alignment_bits(opc);
810 tcg_target_long compare_mask;
811 int mem_index = get_mmuidx(oi);
812 int fast_ofs = TLB_MASK_TABLE_OFS(mem_index);
813 int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask);
814 int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table);
816 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs);
817 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs);
819 tcg_out_opc_srli_d(s, TCG_REG_TMP2, addrl,
820 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
821 tcg_out_opc_and(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0);
822 tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1);
824 /* Load the tlb comparator and the addend. */
825 tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP0, TCG_REG_TMP2,
826 is_load ? offsetof(CPUTLBEntry, addr_read)
827 : offsetof(CPUTLBEntry, addr_write));
828 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2,
829 offsetof(CPUTLBEntry, addend));
831 /* We don't support unaligned accesses. */
832 if (a_bits < s_bits) {
835 /* Clear the non-page, non-alignment bits from the address. */
836 compare_mask = (tcg_target_long)TARGET_PAGE_MASK | ((1 << a_bits) - 1);
837 tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_TMP1, compare_mask);
838 tcg_out_opc_and(s, TCG_REG_TMP1, TCG_REG_TMP1, addrl);
840 /* Compare masked address with the TLB entry. */
841 label_ptr[0] = s->code_ptr;
842 tcg_out_opc_bne(s, TCG_REG_TMP0, TCG_REG_TMP1, 0);
844 /* TLB Hit - addend in TCG_REG_TMP2, ready for use. */
847 static void add_qemu_ldst_label(TCGContext *s, int is_ld, MemOpIdx oi,
849 TCGReg datalo, TCGReg addrlo,
850 void *raddr, tcg_insn_unit **label_ptr)
852 TCGLabelQemuLdst *label = new_ldst_label(s);
854 label->is_ld = is_ld;
857 label->datalo_reg = datalo;
858 label->datahi_reg = 0; /* unused */
859 label->addrlo_reg = addrlo;
860 label->addrhi_reg = 0; /* unused */
861 label->raddr = tcg_splitwx_to_rx(raddr);
862 label->label_ptr[0] = label_ptr[0];
865 static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
868 MemOp opc = get_memop(oi);
869 MemOp size = opc & MO_SIZE;
870 TCGType type = l->type;
872 /* resolve label address */
873 if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
877 /* call load helper */
878 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0);
879 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A1, l->addrlo_reg);
880 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A2, oi);
881 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A3, (tcg_target_long)l->raddr);
883 tcg_out_call_int(s, qemu_ld_helpers[size], false);
885 switch (opc & MO_SSIZE) {
887 tcg_out_ext8s(s, l->datalo_reg, TCG_REG_A0);
890 tcg_out_ext16s(s, l->datalo_reg, TCG_REG_A0);
893 tcg_out_ext32s(s, l->datalo_reg, TCG_REG_A0);
896 if (type == TCG_TYPE_I32) {
897 /* MO_UL loads of i32 should be sign-extended too */
898 tcg_out_ext32s(s, l->datalo_reg, TCG_REG_A0);
903 tcg_out_mov(s, type, l->datalo_reg, TCG_REG_A0);
907 return tcg_out_goto(s, l->raddr);
910 static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
913 MemOp opc = get_memop(oi);
914 MemOp size = opc & MO_SIZE;
916 /* resolve label address */
917 if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
921 /* call store helper */
922 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0);
923 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A1, l->addrlo_reg);
926 tcg_out_ext8u(s, TCG_REG_A2, l->datalo_reg);
929 tcg_out_ext16u(s, TCG_REG_A2, l->datalo_reg);
932 tcg_out_ext32u(s, TCG_REG_A2, l->datalo_reg);
935 tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_A2, l->datalo_reg);
938 g_assert_not_reached();
941 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A3, oi);
942 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A4, (tcg_target_long)l->raddr);
944 tcg_out_call_int(s, qemu_st_helpers[size], false);
946 return tcg_out_goto(s, l->raddr);
951 * Alignment helpers for user-mode emulation
954 static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addr_reg,
957 TCGLabelQemuLdst *l = new_ldst_label(s);
960 l->addrlo_reg = addr_reg;
963 * Without micro-architecture details, we don't know which of bstrpick or
964 * andi is faster, so use bstrpick as it's not constrained by imm field
965 * width. (Not to say alignments >= 2^12 are going to happen any time
968 tcg_out_opc_bstrpick_d(s, TCG_REG_TMP1, addr_reg, 0, a_bits - 1);
970 l->label_ptr[0] = s->code_ptr;
971 tcg_out_opc_bne(s, TCG_REG_TMP1, TCG_REG_ZERO, 0);
973 l->raddr = tcg_splitwx_to_rx(s->code_ptr);
976 static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l)
978 /* resolve label address */
979 if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
983 tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_A1, l->addrlo_reg);
984 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0);
986 /* tail call, with the return address back inline. */
987 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RA, (uintptr_t)l->raddr);
988 tcg_out_call_int(s, (const void *)(l->is_ld ? helper_unaligned_ld
989 : helper_unaligned_st), true);
993 static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
995 return tcg_out_fail_alignment(s, l);
998 static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1000 return tcg_out_fail_alignment(s, l);
1003 #endif /* CONFIG_SOFTMMU */
1006 * `ext32u` the address register into the temp register given,
1007 * if target is 32-bit, no-op otherwise.
1009 * Returns the address register ready for use with TLB addend.
1011 static TCGReg tcg_out_zext_addr_if_32_bit(TCGContext *s,
1012 TCGReg addr, TCGReg tmp)
1014 if (TARGET_LONG_BITS == 32) {
1015 tcg_out_ext32u(s, tmp, addr);
1021 static void tcg_out_qemu_ld_indexed(TCGContext *s, TCGReg rd, TCGReg rj,
1022 TCGReg rk, MemOp opc, TCGType type)
1024 /* Byte swapping is left to middle-end expansion. */
1025 tcg_debug_assert((opc & MO_BSWAP) == 0);
1027 switch (opc & MO_SSIZE) {
1029 tcg_out_opc_ldx_bu(s, rd, rj, rk);
1032 tcg_out_opc_ldx_b(s, rd, rj, rk);
1035 tcg_out_opc_ldx_hu(s, rd, rj, rk);
1038 tcg_out_opc_ldx_h(s, rd, rj, rk);
1041 if (type == TCG_TYPE_I64) {
1042 tcg_out_opc_ldx_wu(s, rd, rj, rk);
1047 tcg_out_opc_ldx_w(s, rd, rj, rk);
1050 tcg_out_opc_ldx_d(s, rd, rj, rk);
1053 g_assert_not_reached();
1057 static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, TCGType type)
1063 #if defined(CONFIG_SOFTMMU)
1064 tcg_insn_unit *label_ptr[1];
1070 data_regl = *args++;
1071 addr_regl = *args++;
1073 opc = get_memop(oi);
1075 #if defined(CONFIG_SOFTMMU)
1076 tcg_out_tlb_load(s, addr_regl, oi, label_ptr, 1);
1077 base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0);
1078 tcg_out_qemu_ld_indexed(s, data_regl, base, TCG_REG_TMP2, opc, type);
1079 add_qemu_ldst_label(s, 1, oi, type,
1080 data_regl, addr_regl,
1081 s->code_ptr, label_ptr);
1083 a_bits = get_alignment_bits(opc);
1085 tcg_out_test_alignment(s, true, addr_regl, a_bits);
1087 base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0);
1088 TCGReg guest_base_reg = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO;
1089 tcg_out_qemu_ld_indexed(s, data_regl, base, guest_base_reg, opc, type);
1093 static void tcg_out_qemu_st_indexed(TCGContext *s, TCGReg data,
1094 TCGReg rj, TCGReg rk, MemOp opc)
1096 /* Byte swapping is left to middle-end expansion. */
1097 tcg_debug_assert((opc & MO_BSWAP) == 0);
1099 switch (opc & MO_SIZE) {
1101 tcg_out_opc_stx_b(s, data, rj, rk);
1104 tcg_out_opc_stx_h(s, data, rj, rk);
1107 tcg_out_opc_stx_w(s, data, rj, rk);
1110 tcg_out_opc_stx_d(s, data, rj, rk);
1113 g_assert_not_reached();
1117 static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args)
1123 #if defined(CONFIG_SOFTMMU)
1124 tcg_insn_unit *label_ptr[1];
1130 data_regl = *args++;
1131 addr_regl = *args++;
1133 opc = get_memop(oi);
1135 #if defined(CONFIG_SOFTMMU)
1136 tcg_out_tlb_load(s, addr_regl, oi, label_ptr, 0);
1137 base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0);
1138 tcg_out_qemu_st_indexed(s, data_regl, base, TCG_REG_TMP2, opc);
1139 add_qemu_ldst_label(s, 0, oi,
1140 0, /* type param is unused for stores */
1141 data_regl, addr_regl,
1142 s->code_ptr, label_ptr);
1144 a_bits = get_alignment_bits(opc);
1146 tcg_out_test_alignment(s, false, addr_regl, a_bits);
1148 base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0);
1149 TCGReg guest_base_reg = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO;
1150 tcg_out_qemu_st_indexed(s, data_regl, base, guest_base_reg, opc);
1158 static const tcg_insn_unit *tb_ret_addr;
1160 static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
1162 /* Reuse the zeroing that exists for goto_ptr. */
1164 tcg_out_call_int(s, tcg_code_gen_epilogue, true);
1166 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A0, a0);
1167 tcg_out_call_int(s, tb_ret_addr, true);
1171 static void tcg_out_goto_tb(TCGContext *s, int which)
1174 * Direct branch, or load indirect address, to be patched
1175 * by tb_target_set_jmp_target. Check indirect load offset
1176 * in range early, regardless of direct branch distance,
1177 * via assert within tcg_out_opc_pcaddu2i.
1179 uintptr_t i_addr = get_jmp_target_addr(s, which);
1180 intptr_t i_disp = tcg_pcrel_diff(s, (void *)i_addr);
1182 set_jmp_insn_offset(s, which);
1183 tcg_out_opc_pcaddu2i(s, TCG_REG_TMP0, i_disp >> 2);
1185 /* Finish the load and indirect branch. */
1186 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_TMP0, 0);
1187 tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_TMP0, 0);
1188 set_jmp_reset_offset(s, which);
1191 void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
1192 uintptr_t jmp_rx, uintptr_t jmp_rw)
1194 uintptr_t d_addr = tb->jmp_target_addr[n];
1195 ptrdiff_t d_disp = (ptrdiff_t)(d_addr - jmp_rx) >> 2;
1198 /* Either directly branch, or load slot address for indirect branch. */
1199 if (d_disp == sextreg(d_disp, 0, 26)) {
1200 insn = encode_sd10k16_insn(OPC_B, d_disp);
1202 uintptr_t i_addr = (uintptr_t)&tb->jmp_target_addr[n];
1203 intptr_t i_disp = i_addr - jmp_rx;
1204 insn = encode_dsj20_insn(OPC_PCADDU2I, TCG_REG_TMP0, i_disp >> 2);
1207 qatomic_set((tcg_insn_unit *)jmp_rw, insn);
1208 flush_idcache_range(jmp_rx, jmp_rw, 4);
1211 static void tcg_out_op(TCGContext *s, TCGOpcode opc,
1212 const TCGArg args[TCG_MAX_OP_ARGS],
1213 const int const_args[TCG_MAX_OP_ARGS])
1215 TCGArg a0 = args[0];
1216 TCGArg a1 = args[1];
1217 TCGArg a2 = args[2];
1218 int c2 = const_args[2];
1225 case INDEX_op_goto_ptr:
1226 tcg_out_opc_jirl(s, TCG_REG_ZERO, a0, 0);
1230 tcg_out_reloc(s, s->code_ptr, R_LOONGARCH_BR_SD10K16, arg_label(a0),
1232 tcg_out_opc_b(s, 0);
1235 case INDEX_op_brcond_i32:
1236 case INDEX_op_brcond_i64:
1237 tcg_out_brcond(s, a2, a0, a1, arg_label(args[3]));
1240 case INDEX_op_ext8s_i32:
1241 case INDEX_op_ext8s_i64:
1242 tcg_out_ext8s(s, a0, a1);
1245 case INDEX_op_ext8u_i32:
1246 case INDEX_op_ext8u_i64:
1247 tcg_out_ext8u(s, a0, a1);
1250 case INDEX_op_ext16s_i32:
1251 case INDEX_op_ext16s_i64:
1252 tcg_out_ext16s(s, a0, a1);
1255 case INDEX_op_ext16u_i32:
1256 case INDEX_op_ext16u_i64:
1257 tcg_out_ext16u(s, a0, a1);
1260 case INDEX_op_ext32u_i64:
1261 case INDEX_op_extu_i32_i64:
1262 tcg_out_ext32u(s, a0, a1);
1265 case INDEX_op_ext32s_i64:
1266 case INDEX_op_extrl_i64_i32:
1267 case INDEX_op_ext_i32_i64:
1268 tcg_out_ext32s(s, a0, a1);
1271 case INDEX_op_extrh_i64_i32:
1272 tcg_out_opc_srai_d(s, a0, a1, 32);
1275 case INDEX_op_not_i32:
1276 case INDEX_op_not_i64:
1277 tcg_out_opc_nor(s, a0, a1, TCG_REG_ZERO);
1280 case INDEX_op_nor_i32:
1281 case INDEX_op_nor_i64:
1283 tcg_out_opc_ori(s, a0, a1, a2);
1284 tcg_out_opc_nor(s, a0, a0, TCG_REG_ZERO);
1286 tcg_out_opc_nor(s, a0, a1, a2);
1290 case INDEX_op_andc_i32:
1291 case INDEX_op_andc_i64:
1293 /* guaranteed to fit due to constraint */
1294 tcg_out_opc_andi(s, a0, a1, ~a2);
1296 tcg_out_opc_andn(s, a0, a1, a2);
1300 case INDEX_op_orc_i32:
1301 case INDEX_op_orc_i64:
1303 /* guaranteed to fit due to constraint */
1304 tcg_out_opc_ori(s, a0, a1, ~a2);
1306 tcg_out_opc_orn(s, a0, a1, a2);
1310 case INDEX_op_and_i32:
1311 case INDEX_op_and_i64:
1313 tcg_out_opc_andi(s, a0, a1, a2);
1315 tcg_out_opc_and(s, a0, a1, a2);
1319 case INDEX_op_or_i32:
1320 case INDEX_op_or_i64:
1322 tcg_out_opc_ori(s, a0, a1, a2);
1324 tcg_out_opc_or(s, a0, a1, a2);
1328 case INDEX_op_xor_i32:
1329 case INDEX_op_xor_i64:
1331 tcg_out_opc_xori(s, a0, a1, a2);
1333 tcg_out_opc_xor(s, a0, a1, a2);
1337 case INDEX_op_extract_i32:
1338 tcg_out_opc_bstrpick_w(s, a0, a1, a2, a2 + args[3] - 1);
1340 case INDEX_op_extract_i64:
1341 tcg_out_opc_bstrpick_d(s, a0, a1, a2, a2 + args[3] - 1);
1344 case INDEX_op_deposit_i32:
1345 tcg_out_opc_bstrins_w(s, a0, a2, args[3], args[3] + args[4] - 1);
1347 case INDEX_op_deposit_i64:
1348 tcg_out_opc_bstrins_d(s, a0, a2, args[3], args[3] + args[4] - 1);
1351 case INDEX_op_bswap16_i32:
1352 case INDEX_op_bswap16_i64:
1353 tcg_out_opc_revb_2h(s, a0, a1);
1354 if (a2 & TCG_BSWAP_OS) {
1355 tcg_out_ext16s(s, a0, a0);
1356 } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
1357 tcg_out_ext16u(s, a0, a0);
1361 case INDEX_op_bswap32_i32:
1362 /* All 32-bit values are computed sign-extended in the register. */
1365 case INDEX_op_bswap32_i64:
1366 tcg_out_opc_revb_2w(s, a0, a1);
1367 if (a2 & TCG_BSWAP_OS) {
1368 tcg_out_ext32s(s, a0, a0);
1369 } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
1370 tcg_out_ext32u(s, a0, a0);
1374 case INDEX_op_bswap64_i64:
1375 tcg_out_opc_revb_d(s, a0, a1);
1378 case INDEX_op_clz_i32:
1379 tcg_out_clzctz(s, OPC_CLZ_W, a0, a1, a2, c2, true);
1381 case INDEX_op_clz_i64:
1382 tcg_out_clzctz(s, OPC_CLZ_D, a0, a1, a2, c2, false);
1385 case INDEX_op_ctz_i32:
1386 tcg_out_clzctz(s, OPC_CTZ_W, a0, a1, a2, c2, true);
1388 case INDEX_op_ctz_i64:
1389 tcg_out_clzctz(s, OPC_CTZ_D, a0, a1, a2, c2, false);
1392 case INDEX_op_shl_i32:
1394 tcg_out_opc_slli_w(s, a0, a1, a2 & 0x1f);
1396 tcg_out_opc_sll_w(s, a0, a1, a2);
1399 case INDEX_op_shl_i64:
1401 tcg_out_opc_slli_d(s, a0, a1, a2 & 0x3f);
1403 tcg_out_opc_sll_d(s, a0, a1, a2);
1407 case INDEX_op_shr_i32:
1409 tcg_out_opc_srli_w(s, a0, a1, a2 & 0x1f);
1411 tcg_out_opc_srl_w(s, a0, a1, a2);
1414 case INDEX_op_shr_i64:
1416 tcg_out_opc_srli_d(s, a0, a1, a2 & 0x3f);
1418 tcg_out_opc_srl_d(s, a0, a1, a2);
1422 case INDEX_op_sar_i32:
1424 tcg_out_opc_srai_w(s, a0, a1, a2 & 0x1f);
1426 tcg_out_opc_sra_w(s, a0, a1, a2);
1429 case INDEX_op_sar_i64:
1431 tcg_out_opc_srai_d(s, a0, a1, a2 & 0x3f);
1433 tcg_out_opc_sra_d(s, a0, a1, a2);
1437 case INDEX_op_rotl_i32:
1438 /* transform into equivalent rotr/rotri */
1440 tcg_out_opc_rotri_w(s, a0, a1, (32 - a2) & 0x1f);
1442 tcg_out_opc_sub_w(s, TCG_REG_TMP0, TCG_REG_ZERO, a2);
1443 tcg_out_opc_rotr_w(s, a0, a1, TCG_REG_TMP0);
1446 case INDEX_op_rotl_i64:
1447 /* transform into equivalent rotr/rotri */
1449 tcg_out_opc_rotri_d(s, a0, a1, (64 - a2) & 0x3f);
1451 tcg_out_opc_sub_w(s, TCG_REG_TMP0, TCG_REG_ZERO, a2);
1452 tcg_out_opc_rotr_d(s, a0, a1, TCG_REG_TMP0);
1456 case INDEX_op_rotr_i32:
1458 tcg_out_opc_rotri_w(s, a0, a1, a2 & 0x1f);
1460 tcg_out_opc_rotr_w(s, a0, a1, a2);
1463 case INDEX_op_rotr_i64:
1465 tcg_out_opc_rotri_d(s, a0, a1, a2 & 0x3f);
1467 tcg_out_opc_rotr_d(s, a0, a1, a2);
1471 case INDEX_op_add_i32:
1473 tcg_out_addi(s, TCG_TYPE_I32, a0, a1, a2);
1475 tcg_out_opc_add_w(s, a0, a1, a2);
1478 case INDEX_op_add_i64:
1480 tcg_out_addi(s, TCG_TYPE_I64, a0, a1, a2);
1482 tcg_out_opc_add_d(s, a0, a1, a2);
1486 case INDEX_op_sub_i32:
1488 tcg_out_addi(s, TCG_TYPE_I32, a0, a1, -a2);
1490 tcg_out_opc_sub_w(s, a0, a1, a2);
1493 case INDEX_op_sub_i64:
1495 tcg_out_addi(s, TCG_TYPE_I64, a0, a1, -a2);
1497 tcg_out_opc_sub_d(s, a0, a1, a2);
1501 case INDEX_op_mul_i32:
1502 tcg_out_opc_mul_w(s, a0, a1, a2);
1504 case INDEX_op_mul_i64:
1505 tcg_out_opc_mul_d(s, a0, a1, a2);
1508 case INDEX_op_mulsh_i32:
1509 tcg_out_opc_mulh_w(s, a0, a1, a2);
1511 case INDEX_op_mulsh_i64:
1512 tcg_out_opc_mulh_d(s, a0, a1, a2);
1515 case INDEX_op_muluh_i32:
1516 tcg_out_opc_mulh_wu(s, a0, a1, a2);
1518 case INDEX_op_muluh_i64:
1519 tcg_out_opc_mulh_du(s, a0, a1, a2);
1522 case INDEX_op_div_i32:
1523 tcg_out_opc_div_w(s, a0, a1, a2);
1525 case INDEX_op_div_i64:
1526 tcg_out_opc_div_d(s, a0, a1, a2);
1529 case INDEX_op_divu_i32:
1530 tcg_out_opc_div_wu(s, a0, a1, a2);
1532 case INDEX_op_divu_i64:
1533 tcg_out_opc_div_du(s, a0, a1, a2);
1536 case INDEX_op_rem_i32:
1537 tcg_out_opc_mod_w(s, a0, a1, a2);
1539 case INDEX_op_rem_i64:
1540 tcg_out_opc_mod_d(s, a0, a1, a2);
1543 case INDEX_op_remu_i32:
1544 tcg_out_opc_mod_wu(s, a0, a1, a2);
1546 case INDEX_op_remu_i64:
1547 tcg_out_opc_mod_du(s, a0, a1, a2);
1550 case INDEX_op_setcond_i32:
1551 case INDEX_op_setcond_i64:
1552 tcg_out_setcond(s, args[3], a0, a1, a2, c2);
1555 case INDEX_op_movcond_i32:
1556 case INDEX_op_movcond_i64:
1557 tcg_out_movcond(s, args[5], a0, a1, a2, c2, args[3], args[4]);
1560 case INDEX_op_ld8s_i32:
1561 case INDEX_op_ld8s_i64:
1562 tcg_out_ldst(s, OPC_LD_B, a0, a1, a2);
1564 case INDEX_op_ld8u_i32:
1565 case INDEX_op_ld8u_i64:
1566 tcg_out_ldst(s, OPC_LD_BU, a0, a1, a2);
1568 case INDEX_op_ld16s_i32:
1569 case INDEX_op_ld16s_i64:
1570 tcg_out_ldst(s, OPC_LD_H, a0, a1, a2);
1572 case INDEX_op_ld16u_i32:
1573 case INDEX_op_ld16u_i64:
1574 tcg_out_ldst(s, OPC_LD_HU, a0, a1, a2);
1576 case INDEX_op_ld_i32:
1577 case INDEX_op_ld32s_i64:
1578 tcg_out_ldst(s, OPC_LD_W, a0, a1, a2);
1580 case INDEX_op_ld32u_i64:
1581 tcg_out_ldst(s, OPC_LD_WU, a0, a1, a2);
1583 case INDEX_op_ld_i64:
1584 tcg_out_ldst(s, OPC_LD_D, a0, a1, a2);
1587 case INDEX_op_st8_i32:
1588 case INDEX_op_st8_i64:
1589 tcg_out_ldst(s, OPC_ST_B, a0, a1, a2);
1591 case INDEX_op_st16_i32:
1592 case INDEX_op_st16_i64:
1593 tcg_out_ldst(s, OPC_ST_H, a0, a1, a2);
1595 case INDEX_op_st_i32:
1596 case INDEX_op_st32_i64:
1597 tcg_out_ldst(s, OPC_ST_W, a0, a1, a2);
1599 case INDEX_op_st_i64:
1600 tcg_out_ldst(s, OPC_ST_D, a0, a1, a2);
1603 case INDEX_op_qemu_ld_i32:
1604 tcg_out_qemu_ld(s, args, TCG_TYPE_I32);
1606 case INDEX_op_qemu_ld_i64:
1607 tcg_out_qemu_ld(s, args, TCG_TYPE_I64);
1609 case INDEX_op_qemu_st_i32:
1610 tcg_out_qemu_st(s, args);
1612 case INDEX_op_qemu_st_i64:
1613 tcg_out_qemu_st(s, args);
1616 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
1617 case INDEX_op_mov_i64:
1618 case INDEX_op_call: /* Always emitted via tcg_out_call. */
1619 case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
1620 case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
1622 g_assert_not_reached();
1626 static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
1629 case INDEX_op_goto_ptr:
1632 case INDEX_op_st8_i32:
1633 case INDEX_op_st8_i64:
1634 case INDEX_op_st16_i32:
1635 case INDEX_op_st16_i64:
1636 case INDEX_op_st32_i64:
1637 case INDEX_op_st_i32:
1638 case INDEX_op_st_i64:
1639 return C_O0_I2(rZ, r);
1641 case INDEX_op_brcond_i32:
1642 case INDEX_op_brcond_i64:
1643 return C_O0_I2(rZ, rZ);
1645 case INDEX_op_qemu_st_i32:
1646 case INDEX_op_qemu_st_i64:
1647 return C_O0_I2(LZ, L);
1649 case INDEX_op_ext8s_i32:
1650 case INDEX_op_ext8s_i64:
1651 case INDEX_op_ext8u_i32:
1652 case INDEX_op_ext8u_i64:
1653 case INDEX_op_ext16s_i32:
1654 case INDEX_op_ext16s_i64:
1655 case INDEX_op_ext16u_i32:
1656 case INDEX_op_ext16u_i64:
1657 case INDEX_op_ext32s_i64:
1658 case INDEX_op_ext32u_i64:
1659 case INDEX_op_extu_i32_i64:
1660 case INDEX_op_extrl_i64_i32:
1661 case INDEX_op_extrh_i64_i32:
1662 case INDEX_op_ext_i32_i64:
1663 case INDEX_op_not_i32:
1664 case INDEX_op_not_i64:
1665 case INDEX_op_extract_i32:
1666 case INDEX_op_extract_i64:
1667 case INDEX_op_bswap16_i32:
1668 case INDEX_op_bswap16_i64:
1669 case INDEX_op_bswap32_i32:
1670 case INDEX_op_bswap32_i64:
1671 case INDEX_op_bswap64_i64:
1672 case INDEX_op_ld8s_i32:
1673 case INDEX_op_ld8s_i64:
1674 case INDEX_op_ld8u_i32:
1675 case INDEX_op_ld8u_i64:
1676 case INDEX_op_ld16s_i32:
1677 case INDEX_op_ld16s_i64:
1678 case INDEX_op_ld16u_i32:
1679 case INDEX_op_ld16u_i64:
1680 case INDEX_op_ld32s_i64:
1681 case INDEX_op_ld32u_i64:
1682 case INDEX_op_ld_i32:
1683 case INDEX_op_ld_i64:
1684 return C_O1_I1(r, r);
1686 case INDEX_op_qemu_ld_i32:
1687 case INDEX_op_qemu_ld_i64:
1688 return C_O1_I1(r, L);
1690 case INDEX_op_andc_i32:
1691 case INDEX_op_andc_i64:
1692 case INDEX_op_orc_i32:
1693 case INDEX_op_orc_i64:
1695 * LoongArch insns for these ops don't have reg-imm forms, but we
1696 * can express using andi/ori if ~constant satisfies
1699 return C_O1_I2(r, r, rC);
1701 case INDEX_op_shl_i32:
1702 case INDEX_op_shl_i64:
1703 case INDEX_op_shr_i32:
1704 case INDEX_op_shr_i64:
1705 case INDEX_op_sar_i32:
1706 case INDEX_op_sar_i64:
1707 case INDEX_op_rotl_i32:
1708 case INDEX_op_rotl_i64:
1709 case INDEX_op_rotr_i32:
1710 case INDEX_op_rotr_i64:
1711 return C_O1_I2(r, r, ri);
1713 case INDEX_op_add_i32:
1714 return C_O1_I2(r, r, ri);
1715 case INDEX_op_add_i64:
1716 return C_O1_I2(r, r, rJ);
1718 case INDEX_op_and_i32:
1719 case INDEX_op_and_i64:
1720 case INDEX_op_nor_i32:
1721 case INDEX_op_nor_i64:
1722 case INDEX_op_or_i32:
1723 case INDEX_op_or_i64:
1724 case INDEX_op_xor_i32:
1725 case INDEX_op_xor_i64:
1726 /* LoongArch reg-imm bitops have their imms ZERO-extended */
1727 return C_O1_I2(r, r, rU);
1729 case INDEX_op_clz_i32:
1730 case INDEX_op_clz_i64:
1731 case INDEX_op_ctz_i32:
1732 case INDEX_op_ctz_i64:
1733 return C_O1_I2(r, r, rW);
1735 case INDEX_op_deposit_i32:
1736 case INDEX_op_deposit_i64:
1737 /* Must deposit into the same register as input */
1738 return C_O1_I2(r, 0, rZ);
1740 case INDEX_op_sub_i32:
1741 case INDEX_op_setcond_i32:
1742 return C_O1_I2(r, rZ, ri);
1743 case INDEX_op_sub_i64:
1744 case INDEX_op_setcond_i64:
1745 return C_O1_I2(r, rZ, rJ);
1747 case INDEX_op_mul_i32:
1748 case INDEX_op_mul_i64:
1749 case INDEX_op_mulsh_i32:
1750 case INDEX_op_mulsh_i64:
1751 case INDEX_op_muluh_i32:
1752 case INDEX_op_muluh_i64:
1753 case INDEX_op_div_i32:
1754 case INDEX_op_div_i64:
1755 case INDEX_op_divu_i32:
1756 case INDEX_op_divu_i64:
1757 case INDEX_op_rem_i32:
1758 case INDEX_op_rem_i64:
1759 case INDEX_op_remu_i32:
1760 case INDEX_op_remu_i64:
1761 return C_O1_I2(r, rZ, rZ);
1763 case INDEX_op_movcond_i32:
1764 case INDEX_op_movcond_i64:
1765 return C_O1_I4(r, rZ, rJ, rZ, rZ);
1768 g_assert_not_reached();
1772 static const int tcg_target_callee_save_regs[] = {
1773 TCG_REG_S0, /* used for the global env (TCG_AREG0) */
1783 TCG_REG_RA, /* should be last for ABI compliance */
1786 /* Stack frame parameters. */
1787 #define REG_SIZE (TCG_TARGET_REG_BITS / 8)
1788 #define SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * REG_SIZE)
1789 #define TEMP_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long))
1790 #define FRAME_SIZE ((TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE + SAVE_SIZE \
1791 + TCG_TARGET_STACK_ALIGN - 1) \
1792 & -TCG_TARGET_STACK_ALIGN)
1793 #define SAVE_OFS (TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE)
1795 /* We're expecting to be able to use an immediate for frame allocation. */
1796 QEMU_BUILD_BUG_ON(FRAME_SIZE > 0x7ff);
1798 /* Generate global QEMU prologue and epilogue code */
1799 static void tcg_target_qemu_prologue(TCGContext *s)
1803 tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, TEMP_SIZE);
1806 tcg_out_opc_addi_d(s, TCG_REG_SP, TCG_REG_SP, -FRAME_SIZE);
1807 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
1808 tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
1809 TCG_REG_SP, SAVE_OFS + i * REG_SIZE);
1812 #if !defined(CONFIG_SOFTMMU)
1813 if (USE_GUEST_BASE) {
1814 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base);
1815 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
1819 /* Call generated code */
1820 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
1821 tcg_out_opc_jirl(s, TCG_REG_ZERO, tcg_target_call_iarg_regs[1], 0);
1823 /* Return path for goto_ptr. Set return value to 0 */
1824 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
1825 tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_A0, TCG_REG_ZERO);
1828 tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr);
1829 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
1830 tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
1831 TCG_REG_SP, SAVE_OFS + i * REG_SIZE);
1834 tcg_out_opc_addi_d(s, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE);
1835 tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_RA, 0);
1838 static void tcg_target_init(TCGContext *s)
1840 tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
1841 tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS;
1843 tcg_target_call_clobber_regs = ALL_GENERAL_REGS;
1844 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S0);
1845 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S1);
1846 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S2);
1847 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S3);
1848 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S4);
1849 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S5);
1850 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S6);
1851 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S7);
1852 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S8);
1853 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S9);
1855 s->reserved_regs = 0;
1856 tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO);
1857 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP0);
1858 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1);
1859 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2);
1860 tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP);
1861 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TP);
1862 tcg_regset_set_reg(s->reserved_regs, TCG_REG_RESERVED);
1867 uint8_t fde_def_cfa[4];
1868 uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2];
1871 #define ELF_HOST_MACHINE EM_LOONGARCH
1873 static const DebugFrame debug_frame = {
1874 .h.cie.len = sizeof(DebugFrameCIE) - 4, /* length after .len member */
1877 .h.cie.code_align = 1,
1878 .h.cie.data_align = -(TCG_TARGET_REG_BITS / 8) & 0x7f, /* sleb128 */
1879 .h.cie.return_column = TCG_REG_RA,
1881 /* Total FDE size does not include the "len" member. */
1882 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
1885 12, TCG_REG_SP, /* DW_CFA_def_cfa sp, ... */
1886 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
1890 0x80 + 23, 11, /* DW_CFA_offset, s0, -88 */
1891 0x80 + 24, 10, /* DW_CFA_offset, s1, -80 */
1892 0x80 + 25, 9, /* DW_CFA_offset, s2, -72 */
1893 0x80 + 26, 8, /* DW_CFA_offset, s3, -64 */
1894 0x80 + 27, 7, /* DW_CFA_offset, s4, -56 */
1895 0x80 + 28, 6, /* DW_CFA_offset, s5, -48 */
1896 0x80 + 29, 5, /* DW_CFA_offset, s6, -40 */
1897 0x80 + 30, 4, /* DW_CFA_offset, s7, -32 */
1898 0x80 + 31, 3, /* DW_CFA_offset, s8, -24 */
1899 0x80 + 22, 2, /* DW_CFA_offset, s9, -16 */
1900 0x80 + 1 , 1, /* DW_CFA_offset, ra, -8 */
1904 void tcg_register_jit(const void *buf, size_t buf_size)
1906 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));