2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2021 WANG Xuerui <git@xen0n.name>
6 * Based on tcg/riscv/tcg-target.c.inc
8 * Copyright (c) 2018 SiFive, Inc
9 * Copyright (c) 2008-2009 Arnaud Patard <arnaud.patard@rtp-net.org>
10 * Copyright (c) 2009 Aurelien Jarno <aurelien@aurel32.net>
11 * Copyright (c) 2008 Fabrice Bellard
13 * Permission is hereby granted, free of charge, to any person obtaining a copy
14 * of this software and associated documentation files (the "Software"), to deal
15 * in the Software without restriction, including without limitation the rights
16 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
17 * copies of the Software, and to permit persons to whom the Software is
18 * furnished to do so, subject to the following conditions:
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
26 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
28 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
32 #include "../tcg-ldst.c.inc"
33 #include <asm/hwcap.h>
35 #ifdef CONFIG_DEBUG_TCG
36 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
58 "r21", /* reserved in the LP64* ABI, hence no ABI name */
72 static const int tcg_target_reg_alloc_order[] = {
73 /* Registers preserved across calls */
74 /* TCG_REG_S0 reserved for TCG_AREG0 */
85 /* Registers (potentially) clobbered across calls */
96 /* Argument registers, opposite order of allocation. */
107 static const int tcg_target_call_iarg_regs[] = {
118 static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
120 tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
121 tcg_debug_assert(slot >= 0 && slot <= 1);
122 return TCG_REG_A0 + slot;
125 #ifndef CONFIG_SOFTMMU
126 #define USE_GUEST_BASE (guest_base != 0)
127 #define TCG_GUEST_BASE_REG TCG_REG_S1
130 #define TCG_CT_CONST_ZERO 0x100
131 #define TCG_CT_CONST_S12 0x200
132 #define TCG_CT_CONST_S32 0x400
133 #define TCG_CT_CONST_U12 0x800
134 #define TCG_CT_CONST_C12 0x1000
135 #define TCG_CT_CONST_WSZ 0x2000
137 #define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
139 static inline tcg_target_long sextreg(tcg_target_long val, int pos, int len)
141 return sextract64(val, pos, len);
144 /* test if a constant matches the constraint */
145 static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
147 if (ct & TCG_CT_CONST) {
150 if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
153 if ((ct & TCG_CT_CONST_S12) && val == sextreg(val, 0, 12)) {
156 if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) {
159 if ((ct & TCG_CT_CONST_U12) && val >= 0 && val <= 0xfff) {
162 if ((ct & TCG_CT_CONST_C12) && ~val >= 0 && ~val <= 0xfff) {
165 if ((ct & TCG_CT_CONST_WSZ) && val == (type == TCG_TYPE_I32 ? 32 : 64)) {
176 * Relocation records defined in LoongArch ELF psABI v1.00 is way too
177 * complicated; a whopping stack machine is needed to stuff the fields, at
178 * the very least one SOP_PUSH and one SOP_POP (of the correct format) are
181 * Hence, define our own simpler relocation types. Numbers are chosen as to
182 * not collide with potential future additions to the true ELF relocation
186 /* Field Sk16, shifted right by 2; suitable for conditional jumps */
187 #define R_LOONGARCH_BR_SK16 256
188 /* Field Sd10k16, shifted right by 2; suitable for B and BL */
189 #define R_LOONGARCH_BR_SD10K16 257
191 static bool reloc_br_sk16(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
193 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
194 intptr_t offset = (intptr_t)target - (intptr_t)src_rx;
196 tcg_debug_assert((offset & 3) == 0);
198 if (offset == sextreg(offset, 0, 16)) {
199 *src_rw = deposit64(*src_rw, 10, 16, offset);
206 static bool reloc_br_sd10k16(tcg_insn_unit *src_rw,
207 const tcg_insn_unit *target)
209 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
210 intptr_t offset = (intptr_t)target - (intptr_t)src_rx;
212 tcg_debug_assert((offset & 3) == 0);
214 if (offset == sextreg(offset, 0, 26)) {
215 *src_rw = deposit64(*src_rw, 0, 10, offset >> 16); /* slot d10 */
216 *src_rw = deposit64(*src_rw, 10, 16, offset); /* slot k16 */
223 static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
224 intptr_t value, intptr_t addend)
226 tcg_debug_assert(addend == 0);
228 case R_LOONGARCH_BR_SK16:
229 return reloc_br_sk16(code_ptr, (tcg_insn_unit *)value);
230 case R_LOONGARCH_BR_SD10K16:
231 return reloc_br_sd10k16(code_ptr, (tcg_insn_unit *)value);
233 g_assert_not_reached();
237 #include "tcg-insn-defs.c.inc"
243 static void tcg_out_mb(TCGContext *s, TCGArg a0)
245 /* Baseline LoongArch only has the full barrier, unfortunately. */
246 tcg_out_opc_dbar(s, 0);
249 static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
258 * Conventional register-register move used in LoongArch is
259 * `or dst, src, zero`.
261 tcg_out_opc_or(s, ret, arg, TCG_REG_ZERO);
264 g_assert_not_reached();
269 /* Loads a 32-bit immediate into rd, sign-extended. */
270 static void tcg_out_movi_i32(TCGContext *s, TCGReg rd, int32_t val)
272 tcg_target_long lo = sextreg(val, 0, 12);
273 tcg_target_long hi12 = sextreg(val, 12, 20);
275 /* Single-instruction cases. */
277 /* val fits in uimm12: ori rd, zero, val */
278 tcg_out_opc_ori(s, rd, TCG_REG_ZERO, val);
281 if (hi12 == sextreg(lo, 12, 20)) {
282 /* val fits in simm12: addi.w rd, zero, val */
283 tcg_out_opc_addi_w(s, rd, TCG_REG_ZERO, val);
287 /* High bits must be set; load with lu12i.w + optional ori. */
288 tcg_out_opc_lu12i_w(s, rd, hi12);
290 tcg_out_opc_ori(s, rd, rd, lo & 0xfff);
294 static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd,
298 * LoongArch conventionally loads 64-bit immediates in at most 4 steps,
299 * with dedicated instructions for filling the respective bitfields
303 * 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2
304 * +-----------------------+---------------------------------------+...
306 * +-----------------------+---------------------------------------+...
308 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
309 * ...+-------------------------------------+-------------------------+
311 * ...+-------------------------------------+-------------------------+
313 * Check if val belong to one of the several fast cases, before falling
314 * back to the slow path.
318 tcg_target_long val_lo, val_hi, pc_hi, offset_hi;
319 tcg_target_long hi12, hi32, hi52;
321 /* Value fits in signed i32. */
322 if (type == TCG_TYPE_I32 || val == (int32_t)val) {
323 tcg_out_movi_i32(s, rd, val);
327 /* PC-relative cases. */
328 pc_offset = tcg_pcrel_diff(s, (void *)val);
329 if (pc_offset == sextreg(pc_offset, 0, 22) && (pc_offset & 3) == 0) {
330 /* Single pcaddu2i. */
331 tcg_out_opc_pcaddu2i(s, rd, pc_offset >> 2);
335 if (pc_offset == (int32_t)pc_offset) {
336 /* Offset within 32 bits; load with pcalau12i + ori. */
337 val_lo = sextreg(val, 0, 12);
339 pc_hi = (val - pc_offset) >> 12;
340 offset_hi = val_hi - pc_hi;
342 tcg_debug_assert(offset_hi == sextreg(offset_hi, 0, 20));
343 tcg_out_opc_pcalau12i(s, rd, offset_hi);
345 tcg_out_opc_ori(s, rd, rd, val_lo & 0xfff);
350 hi12 = sextreg(val, 12, 20);
351 hi32 = sextreg(val, 32, 20);
352 hi52 = sextreg(val, 52, 12);
354 /* Single cu52i.d case. */
355 if ((hi52 != 0) && (ctz64(val) >= 52)) {
356 tcg_out_opc_cu52i_d(s, rd, TCG_REG_ZERO, hi52);
360 /* Slow path. Initialize the low 32 bits, then concat high bits. */
361 tcg_out_movi_i32(s, rd, val);
363 /* Load hi32 and hi52 explicitly when they are unexpected values. */
364 if (hi32 != sextreg(hi12, 20, 20)) {
365 tcg_out_opc_cu32i_d(s, rd, hi32);
368 if (hi52 != sextreg(hi32, 20, 12)) {
369 tcg_out_opc_cu52i_d(s, rd, rd, hi52);
373 static void tcg_out_addi(TCGContext *s, TCGType type, TCGReg rd,
374 TCGReg rs, tcg_target_long imm)
376 tcg_target_long lo12 = sextreg(imm, 0, 12);
377 tcg_target_long hi16 = sextreg(imm - lo12, 16, 16);
380 * Note that there's a hole in between hi16 and lo12:
383 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
384 * ...+-------------------------------+-------+-----------------------+
386 * ...+-------------------------------+-------+-----------------------+
388 * For bits within that hole, it's more efficient to use LU12I and ADD.
390 if (imm == (hi16 << 16) + lo12) {
392 tcg_out_opc_addu16i_d(s, rd, rs, hi16);
395 if (type == TCG_TYPE_I32) {
396 tcg_out_opc_addi_w(s, rd, rs, lo12);
398 tcg_out_opc_addi_d(s, rd, rs, lo12);
400 tcg_out_mov(s, type, rd, rs);
403 tcg_out_movi(s, type, TCG_REG_TMP0, imm);
404 if (type == TCG_TYPE_I32) {
405 tcg_out_opc_add_w(s, rd, rs, TCG_REG_TMP0);
407 tcg_out_opc_add_d(s, rd, rs, TCG_REG_TMP0);
412 static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
417 static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
420 /* This function is only used for passing structs by reference. */
421 g_assert_not_reached();
424 static void tcg_out_ext8u(TCGContext *s, TCGReg ret, TCGReg arg)
426 tcg_out_opc_andi(s, ret, arg, 0xff);
429 static void tcg_out_ext16u(TCGContext *s, TCGReg ret, TCGReg arg)
431 tcg_out_opc_bstrpick_w(s, ret, arg, 0, 15);
434 static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg)
436 tcg_out_opc_bstrpick_d(s, ret, arg, 0, 31);
439 static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
441 tcg_out_opc_sext_b(s, ret, arg);
444 static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
446 tcg_out_opc_sext_h(s, ret, arg);
449 static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg)
451 tcg_out_opc_addi_w(s, ret, arg, 0);
454 static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg)
457 tcg_out_ext32s(s, ret, arg);
461 static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg)
463 tcg_out_ext32u(s, ret, arg);
466 static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg ret, TCGReg arg)
468 tcg_out_ext32s(s, ret, arg);
471 static void tcg_out_clzctz(TCGContext *s, LoongArchInsn opc,
472 TCGReg a0, TCGReg a1, TCGReg a2,
473 bool c2, bool is_32bit)
477 * Fast path: semantics already satisfied due to constraint and
478 * insn behavior, single instruction is enough.
480 tcg_debug_assert(a2 == (is_32bit ? 32 : 64));
481 /* all clz/ctz insns belong to DJ-format */
482 tcg_out32(s, encode_dj_insn(opc, a0, a1));
486 tcg_out32(s, encode_dj_insn(opc, TCG_REG_TMP0, a1));
487 /* a0 = a1 ? REG_TMP0 : a2 */
488 tcg_out_opc_maskeqz(s, TCG_REG_TMP0, TCG_REG_TMP0, a1);
489 tcg_out_opc_masknez(s, a0, a2, a1);
490 tcg_out_opc_or(s, a0, TCG_REG_TMP0, a0);
493 #define SETCOND_INV TCG_TARGET_NB_REGS
494 #define SETCOND_NEZ (SETCOND_INV << 1)
495 #define SETCOND_FLAGS (SETCOND_INV | SETCOND_NEZ)
497 static int tcg_out_setcond_int(TCGContext *s, TCGCond cond, TCGReg ret,
498 TCGReg arg1, tcg_target_long arg2, bool c2)
503 case TCG_COND_EQ: /* -> NE */
504 case TCG_COND_GE: /* -> LT */
505 case TCG_COND_GEU: /* -> LTU */
506 case TCG_COND_GT: /* -> LE */
507 case TCG_COND_GTU: /* -> LEU */
508 cond = tcg_invert_cond(cond);
509 flags ^= SETCOND_INV;
519 * If we have a constant input, the most efficient way to implement
520 * LE is by adding 1 and using LT. Watch out for wrap around for LEU.
521 * We don't need to care for this for LE because the constant input
522 * is still constrained to int32_t, and INT32_MAX+1 is representable
523 * in the 64-bit temporary register.
526 if (cond == TCG_COND_LEU) {
527 /* unsigned <= -1 is true */
529 tcg_out_movi(s, TCG_TYPE_REG, ret, !(flags & SETCOND_INV));
541 cond = tcg_swap_cond(cond); /* LE -> GE */
542 cond = tcg_invert_cond(cond); /* GE -> LT */
543 flags ^= SETCOND_INV;
552 flags |= SETCOND_NEZ;
554 tcg_out_opc_xor(s, ret, arg1, arg2);
555 } else if (arg2 == 0) {
557 } else if (arg2 >= 0 && arg2 <= 0xfff) {
558 tcg_out_opc_xori(s, ret, arg1, arg2);
560 tcg_out_addi(s, TCG_TYPE_REG, ret, arg1, -arg2);
567 if (arg2 >= -0x800 && arg2 <= 0x7ff) {
568 if (cond == TCG_COND_LT) {
569 tcg_out_opc_slti(s, ret, arg1, arg2);
571 tcg_out_opc_sltui(s, ret, arg1, arg2);
575 tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_TMP0, arg2);
578 if (cond == TCG_COND_LT) {
579 tcg_out_opc_slt(s, ret, arg1, arg2);
581 tcg_out_opc_sltu(s, ret, arg1, arg2);
586 g_assert_not_reached();
593 static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret,
594 TCGReg arg1, tcg_target_long arg2, bool c2)
596 int tmpflags = tcg_out_setcond_int(s, cond, ret, arg1, arg2, c2);
598 if (tmpflags != ret) {
599 TCGReg tmp = tmpflags & ~SETCOND_FLAGS;
601 switch (tmpflags & SETCOND_FLAGS) {
603 /* Intermediate result is boolean: simply invert. */
604 tcg_out_opc_xori(s, ret, tmp, 1);
607 /* Intermediate result is zero/non-zero: test != 0. */
608 tcg_out_opc_sltu(s, ret, TCG_REG_ZERO, tmp);
610 case SETCOND_NEZ | SETCOND_INV:
611 /* Intermediate result is zero/non-zero: test == 0. */
612 tcg_out_opc_sltui(s, ret, tmp, 1);
615 g_assert_not_reached();
620 static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret,
621 TCGReg c1, tcg_target_long c2, bool const2,
622 TCGReg v1, TCGReg v2)
624 int tmpflags = tcg_out_setcond_int(s, cond, TCG_REG_TMP0, c1, c2, const2);
627 /* Standardize the test below to t != 0. */
628 if (tmpflags & SETCOND_INV) {
629 t = v1, v1 = v2, v2 = t;
632 t = tmpflags & ~SETCOND_FLAGS;
633 if (v1 == TCG_REG_ZERO) {
634 tcg_out_opc_masknez(s, ret, v2, t);
635 } else if (v2 == TCG_REG_ZERO) {
636 tcg_out_opc_maskeqz(s, ret, v1, t);
638 tcg_out_opc_masknez(s, TCG_REG_TMP2, v2, t); /* t ? 0 : v2 */
639 tcg_out_opc_maskeqz(s, TCG_REG_TMP1, v1, t); /* t ? v1 : 0 */
640 tcg_out_opc_or(s, ret, TCG_REG_TMP1, TCG_REG_TMP2);
648 static const struct {
651 } tcg_brcond_to_loongarch[] = {
652 [TCG_COND_EQ] = { OPC_BEQ, false },
653 [TCG_COND_NE] = { OPC_BNE, false },
654 [TCG_COND_LT] = { OPC_BGT, true },
655 [TCG_COND_GE] = { OPC_BLE, true },
656 [TCG_COND_LE] = { OPC_BLE, false },
657 [TCG_COND_GT] = { OPC_BGT, false },
658 [TCG_COND_LTU] = { OPC_BGTU, true },
659 [TCG_COND_GEU] = { OPC_BLEU, true },
660 [TCG_COND_LEU] = { OPC_BLEU, false },
661 [TCG_COND_GTU] = { OPC_BGTU, false }
664 static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1,
665 TCGReg arg2, TCGLabel *l)
667 LoongArchInsn op = tcg_brcond_to_loongarch[cond].op;
669 tcg_debug_assert(op != 0);
671 if (tcg_brcond_to_loongarch[cond].swap) {
677 /* all conditional branch insns belong to DJSk16-format */
678 tcg_out_reloc(s, s->code_ptr, R_LOONGARCH_BR_SK16, l, 0);
679 tcg_out32(s, encode_djsk16_insn(op, arg1, arg2, 0));
682 static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail)
684 TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA;
685 ptrdiff_t offset = tcg_pcrel_diff(s, arg);
687 tcg_debug_assert((offset & 3) == 0);
688 if (offset == sextreg(offset, 0, 28)) {
689 /* short jump: +/- 256MiB */
691 tcg_out_opc_b(s, offset >> 2);
693 tcg_out_opc_bl(s, offset >> 2);
695 } else if (offset == sextreg(offset, 0, 38)) {
696 /* long jump: +/- 256GiB */
697 tcg_target_long lo = sextreg(offset, 0, 18);
698 tcg_target_long hi = offset - lo;
699 tcg_out_opc_pcaddu18i(s, TCG_REG_TMP0, hi >> 18);
700 tcg_out_opc_jirl(s, link, TCG_REG_TMP0, lo >> 2);
702 /* far jump: 64-bit */
703 tcg_target_long lo = sextreg((tcg_target_long)arg, 0, 18);
704 tcg_target_long hi = (tcg_target_long)arg - lo;
705 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, hi);
706 tcg_out_opc_jirl(s, link, TCG_REG_TMP0, lo >> 2);
710 static void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg,
711 const TCGHelperInfo *info)
713 tcg_out_call_int(s, arg, false);
720 static void tcg_out_ldst(TCGContext *s, LoongArchInsn opc, TCGReg data,
721 TCGReg addr, intptr_t offset)
723 intptr_t imm12 = sextreg(offset, 0, 12);
725 if (offset != imm12) {
726 intptr_t diff = tcg_pcrel_diff(s, (void *)offset);
728 if (addr == TCG_REG_ZERO && diff == (int32_t)diff) {
729 imm12 = sextreg(diff, 0, 12);
730 tcg_out_opc_pcaddu12i(s, TCG_REG_TMP2, (diff - imm12) >> 12);
732 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP2, offset - imm12);
733 if (addr != TCG_REG_ZERO) {
734 tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, addr);
752 tcg_out32(s, encode_djsk12_insn(opc, data, addr, imm12));
755 g_assert_not_reached();
759 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
760 TCGReg arg1, intptr_t arg2)
762 bool is_32bit = type == TCG_TYPE_I32;
763 tcg_out_ldst(s, is_32bit ? OPC_LD_W : OPC_LD_D, arg, arg1, arg2);
766 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
767 TCGReg arg1, intptr_t arg2)
769 bool is_32bit = type == TCG_TYPE_I32;
770 tcg_out_ldst(s, is_32bit ? OPC_ST_W : OPC_ST_D, arg, arg1, arg2);
773 static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
774 TCGReg base, intptr_t ofs)
777 tcg_out_st(s, type, TCG_REG_ZERO, base, ofs);
784 * Load/store helpers for SoftMMU, and qemu_ld/st implementations
787 static bool tcg_out_goto(TCGContext *s, const tcg_insn_unit *target)
790 return reloc_br_sd10k16(s->code_ptr - 1, target);
793 static const TCGLdstHelperParam ldst_helper_param = {
794 .ntmp = 1, .tmp = { TCG_REG_TMP0 }
797 static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
799 MemOp opc = get_memop(l->oi);
801 /* resolve label address */
802 if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
806 tcg_out_ld_helper_args(s, l, &ldst_helper_param);
807 tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SIZE], false);
808 tcg_out_ld_helper_ret(s, l, false, &ldst_helper_param);
809 return tcg_out_goto(s, l->raddr);
812 static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
814 MemOp opc = get_memop(l->oi);
816 /* resolve label address */
817 if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
821 tcg_out_st_helper_args(s, l, &ldst_helper_param);
822 tcg_out_call_int(s, qemu_st_helpers[opc & MO_SIZE], false);
823 return tcg_out_goto(s, l->raddr);
832 bool tcg_target_has_memory_bswap(MemOp memop)
838 * For softmmu, perform the TLB load and compare.
839 * For useronly, perform any required alignment tests.
840 * In both cases, return a TCGLabelQemuLdst structure if the slow path
841 * is required and fill in @h with the host address for the fast path.
843 static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
844 TCGReg addr_reg, MemOpIdx oi,
847 TCGType addr_type = s->addr_type;
848 TCGLabelQemuLdst *ldst = NULL;
849 MemOp opc = get_memop(oi);
852 h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false);
853 a_bits = h->aa.align;
855 #ifdef CONFIG_SOFTMMU
856 unsigned s_bits = opc & MO_SIZE;
857 int mem_index = get_mmuidx(oi);
858 int fast_ofs = TLB_MASK_TABLE_OFS(mem_index);
859 int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask);
860 int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table);
862 ldst = new_ldst_label(s);
865 ldst->addrlo_reg = addr_reg;
867 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
868 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 11));
869 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs);
870 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs);
872 tcg_out_opc_srli_d(s, TCG_REG_TMP2, addr_reg,
873 s->page_bits - CPU_TLB_ENTRY_BITS);
874 tcg_out_opc_and(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0);
875 tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1);
877 /* Load the tlb comparator and the addend. */
878 tcg_out_ld(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP2,
879 is_ld ? offsetof(CPUTLBEntry, addr_read)
880 : offsetof(CPUTLBEntry, addr_write));
881 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2,
882 offsetof(CPUTLBEntry, addend));
885 * For aligned accesses, we check the first byte and include the alignment
886 * bits within the address. For unaligned access, we check that we don't
887 * cross pages using the address of the last byte of the access.
889 if (a_bits < s_bits) {
890 unsigned a_mask = (1u << a_bits) - 1;
891 unsigned s_mask = (1u << s_bits) - 1;
892 tcg_out_addi(s, addr_type, TCG_REG_TMP1, addr_reg, s_mask - a_mask);
894 tcg_out_mov(s, addr_type, TCG_REG_TMP1, addr_reg);
896 tcg_out_opc_bstrins_d(s, TCG_REG_TMP1, TCG_REG_ZERO,
897 a_bits, s->page_bits - 1);
899 /* Compare masked address with the TLB entry. */
900 ldst->label_ptr[0] = s->code_ptr;
901 tcg_out_opc_bne(s, TCG_REG_TMP0, TCG_REG_TMP1, 0);
903 h->index = TCG_REG_TMP2;
906 ldst = new_ldst_label(s);
910 ldst->addrlo_reg = addr_reg;
913 * Without micro-architecture details, we don't know which of
914 * bstrpick or andi is faster, so use bstrpick as it's not
915 * constrained by imm field width. Not to say alignments >= 2^12
916 * are going to happen any time soon.
918 tcg_out_opc_bstrpick_d(s, TCG_REG_TMP1, addr_reg, 0, a_bits - 1);
920 ldst->label_ptr[0] = s->code_ptr;
921 tcg_out_opc_bne(s, TCG_REG_TMP1, TCG_REG_ZERO, 0);
924 h->index = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO;
927 if (addr_type == TCG_TYPE_I32) {
928 h->base = TCG_REG_TMP0;
929 tcg_out_ext32u(s, h->base, addr_reg);
937 static void tcg_out_qemu_ld_indexed(TCGContext *s, MemOp opc, TCGType type,
938 TCGReg rd, HostAddress h)
940 /* Byte swapping is left to middle-end expansion. */
941 tcg_debug_assert((opc & MO_BSWAP) == 0);
943 switch (opc & MO_SSIZE) {
945 tcg_out_opc_ldx_bu(s, rd, h.base, h.index);
948 tcg_out_opc_ldx_b(s, rd, h.base, h.index);
951 tcg_out_opc_ldx_hu(s, rd, h.base, h.index);
954 tcg_out_opc_ldx_h(s, rd, h.base, h.index);
957 if (type == TCG_TYPE_I64) {
958 tcg_out_opc_ldx_wu(s, rd, h.base, h.index);
963 tcg_out_opc_ldx_w(s, rd, h.base, h.index);
966 tcg_out_opc_ldx_d(s, rd, h.base, h.index);
969 g_assert_not_reached();
973 static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
974 MemOpIdx oi, TCGType data_type)
976 TCGLabelQemuLdst *ldst;
979 ldst = prepare_host_addr(s, &h, addr_reg, oi, true);
980 tcg_out_qemu_ld_indexed(s, get_memop(oi), data_type, data_reg, h);
983 ldst->type = data_type;
984 ldst->datalo_reg = data_reg;
985 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
989 static void tcg_out_qemu_st_indexed(TCGContext *s, MemOp opc,
990 TCGReg rd, HostAddress h)
992 /* Byte swapping is left to middle-end expansion. */
993 tcg_debug_assert((opc & MO_BSWAP) == 0);
995 switch (opc & MO_SIZE) {
997 tcg_out_opc_stx_b(s, rd, h.base, h.index);
1000 tcg_out_opc_stx_h(s, rd, h.base, h.index);
1003 tcg_out_opc_stx_w(s, rd, h.base, h.index);
1006 tcg_out_opc_stx_d(s, rd, h.base, h.index);
1009 g_assert_not_reached();
1013 static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
1014 MemOpIdx oi, TCGType data_type)
1016 TCGLabelQemuLdst *ldst;
1019 ldst = prepare_host_addr(s, &h, addr_reg, oi, false);
1020 tcg_out_qemu_st_indexed(s, get_memop(oi), data_reg, h);
1023 ldst->type = data_type;
1024 ldst->datalo_reg = data_reg;
1025 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1033 static const tcg_insn_unit *tb_ret_addr;
1035 static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
1037 /* Reuse the zeroing that exists for goto_ptr. */
1039 tcg_out_call_int(s, tcg_code_gen_epilogue, true);
1041 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A0, a0);
1042 tcg_out_call_int(s, tb_ret_addr, true);
1046 static void tcg_out_goto_tb(TCGContext *s, int which)
1049 * Direct branch, or load indirect address, to be patched
1050 * by tb_target_set_jmp_target. Check indirect load offset
1051 * in range early, regardless of direct branch distance,
1052 * via assert within tcg_out_opc_pcaddu2i.
1054 uintptr_t i_addr = get_jmp_target_addr(s, which);
1055 intptr_t i_disp = tcg_pcrel_diff(s, (void *)i_addr);
1057 set_jmp_insn_offset(s, which);
1058 tcg_out_opc_pcaddu2i(s, TCG_REG_TMP0, i_disp >> 2);
1060 /* Finish the load and indirect branch. */
1061 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_TMP0, 0);
1062 tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_TMP0, 0);
1063 set_jmp_reset_offset(s, which);
1066 void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
1067 uintptr_t jmp_rx, uintptr_t jmp_rw)
1069 uintptr_t d_addr = tb->jmp_target_addr[n];
1070 ptrdiff_t d_disp = (ptrdiff_t)(d_addr - jmp_rx) >> 2;
1073 /* Either directly branch, or load slot address for indirect branch. */
1074 if (d_disp == sextreg(d_disp, 0, 26)) {
1075 insn = encode_sd10k16_insn(OPC_B, d_disp);
1077 uintptr_t i_addr = (uintptr_t)&tb->jmp_target_addr[n];
1078 intptr_t i_disp = i_addr - jmp_rx;
1079 insn = encode_dsj20_insn(OPC_PCADDU2I, TCG_REG_TMP0, i_disp >> 2);
1082 qatomic_set((tcg_insn_unit *)jmp_rw, insn);
1083 flush_idcache_range(jmp_rx, jmp_rw, 4);
1086 static void tcg_out_op(TCGContext *s, TCGOpcode opc,
1087 const TCGArg args[TCG_MAX_OP_ARGS],
1088 const int const_args[TCG_MAX_OP_ARGS])
1090 TCGArg a0 = args[0];
1091 TCGArg a1 = args[1];
1092 TCGArg a2 = args[2];
1093 int c2 = const_args[2];
1100 case INDEX_op_goto_ptr:
1101 tcg_out_opc_jirl(s, TCG_REG_ZERO, a0, 0);
1105 tcg_out_reloc(s, s->code_ptr, R_LOONGARCH_BR_SD10K16, arg_label(a0),
1107 tcg_out_opc_b(s, 0);
1110 case INDEX_op_brcond_i32:
1111 case INDEX_op_brcond_i64:
1112 tcg_out_brcond(s, a2, a0, a1, arg_label(args[3]));
1115 case INDEX_op_extrh_i64_i32:
1116 tcg_out_opc_srai_d(s, a0, a1, 32);
1119 case INDEX_op_not_i32:
1120 case INDEX_op_not_i64:
1121 tcg_out_opc_nor(s, a0, a1, TCG_REG_ZERO);
1124 case INDEX_op_nor_i32:
1125 case INDEX_op_nor_i64:
1127 tcg_out_opc_ori(s, a0, a1, a2);
1128 tcg_out_opc_nor(s, a0, a0, TCG_REG_ZERO);
1130 tcg_out_opc_nor(s, a0, a1, a2);
1134 case INDEX_op_andc_i32:
1135 case INDEX_op_andc_i64:
1137 /* guaranteed to fit due to constraint */
1138 tcg_out_opc_andi(s, a0, a1, ~a2);
1140 tcg_out_opc_andn(s, a0, a1, a2);
1144 case INDEX_op_orc_i32:
1145 case INDEX_op_orc_i64:
1147 /* guaranteed to fit due to constraint */
1148 tcg_out_opc_ori(s, a0, a1, ~a2);
1150 tcg_out_opc_orn(s, a0, a1, a2);
1154 case INDEX_op_and_i32:
1155 case INDEX_op_and_i64:
1157 tcg_out_opc_andi(s, a0, a1, a2);
1159 tcg_out_opc_and(s, a0, a1, a2);
1163 case INDEX_op_or_i32:
1164 case INDEX_op_or_i64:
1166 tcg_out_opc_ori(s, a0, a1, a2);
1168 tcg_out_opc_or(s, a0, a1, a2);
1172 case INDEX_op_xor_i32:
1173 case INDEX_op_xor_i64:
1175 tcg_out_opc_xori(s, a0, a1, a2);
1177 tcg_out_opc_xor(s, a0, a1, a2);
1181 case INDEX_op_extract_i32:
1182 tcg_out_opc_bstrpick_w(s, a0, a1, a2, a2 + args[3] - 1);
1184 case INDEX_op_extract_i64:
1185 tcg_out_opc_bstrpick_d(s, a0, a1, a2, a2 + args[3] - 1);
1188 case INDEX_op_deposit_i32:
1189 tcg_out_opc_bstrins_w(s, a0, a2, args[3], args[3] + args[4] - 1);
1191 case INDEX_op_deposit_i64:
1192 tcg_out_opc_bstrins_d(s, a0, a2, args[3], args[3] + args[4] - 1);
1195 case INDEX_op_bswap16_i32:
1196 case INDEX_op_bswap16_i64:
1197 tcg_out_opc_revb_2h(s, a0, a1);
1198 if (a2 & TCG_BSWAP_OS) {
1199 tcg_out_ext16s(s, TCG_TYPE_REG, a0, a0);
1200 } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
1201 tcg_out_ext16u(s, a0, a0);
1205 case INDEX_op_bswap32_i32:
1206 /* All 32-bit values are computed sign-extended in the register. */
1209 case INDEX_op_bswap32_i64:
1210 tcg_out_opc_revb_2w(s, a0, a1);
1211 if (a2 & TCG_BSWAP_OS) {
1212 tcg_out_ext32s(s, a0, a0);
1213 } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
1214 tcg_out_ext32u(s, a0, a0);
1218 case INDEX_op_bswap64_i64:
1219 tcg_out_opc_revb_d(s, a0, a1);
1222 case INDEX_op_clz_i32:
1223 tcg_out_clzctz(s, OPC_CLZ_W, a0, a1, a2, c2, true);
1225 case INDEX_op_clz_i64:
1226 tcg_out_clzctz(s, OPC_CLZ_D, a0, a1, a2, c2, false);
1229 case INDEX_op_ctz_i32:
1230 tcg_out_clzctz(s, OPC_CTZ_W, a0, a1, a2, c2, true);
1232 case INDEX_op_ctz_i64:
1233 tcg_out_clzctz(s, OPC_CTZ_D, a0, a1, a2, c2, false);
1236 case INDEX_op_shl_i32:
1238 tcg_out_opc_slli_w(s, a0, a1, a2 & 0x1f);
1240 tcg_out_opc_sll_w(s, a0, a1, a2);
1243 case INDEX_op_shl_i64:
1245 tcg_out_opc_slli_d(s, a0, a1, a2 & 0x3f);
1247 tcg_out_opc_sll_d(s, a0, a1, a2);
1251 case INDEX_op_shr_i32:
1253 tcg_out_opc_srli_w(s, a0, a1, a2 & 0x1f);
1255 tcg_out_opc_srl_w(s, a0, a1, a2);
1258 case INDEX_op_shr_i64:
1260 tcg_out_opc_srli_d(s, a0, a1, a2 & 0x3f);
1262 tcg_out_opc_srl_d(s, a0, a1, a2);
1266 case INDEX_op_sar_i32:
1268 tcg_out_opc_srai_w(s, a0, a1, a2 & 0x1f);
1270 tcg_out_opc_sra_w(s, a0, a1, a2);
1273 case INDEX_op_sar_i64:
1275 tcg_out_opc_srai_d(s, a0, a1, a2 & 0x3f);
1277 tcg_out_opc_sra_d(s, a0, a1, a2);
1281 case INDEX_op_rotl_i32:
1282 /* transform into equivalent rotr/rotri */
1284 tcg_out_opc_rotri_w(s, a0, a1, (32 - a2) & 0x1f);
1286 tcg_out_opc_sub_w(s, TCG_REG_TMP0, TCG_REG_ZERO, a2);
1287 tcg_out_opc_rotr_w(s, a0, a1, TCG_REG_TMP0);
1290 case INDEX_op_rotl_i64:
1291 /* transform into equivalent rotr/rotri */
1293 tcg_out_opc_rotri_d(s, a0, a1, (64 - a2) & 0x3f);
1295 tcg_out_opc_sub_w(s, TCG_REG_TMP0, TCG_REG_ZERO, a2);
1296 tcg_out_opc_rotr_d(s, a0, a1, TCG_REG_TMP0);
1300 case INDEX_op_rotr_i32:
1302 tcg_out_opc_rotri_w(s, a0, a1, a2 & 0x1f);
1304 tcg_out_opc_rotr_w(s, a0, a1, a2);
1307 case INDEX_op_rotr_i64:
1309 tcg_out_opc_rotri_d(s, a0, a1, a2 & 0x3f);
1311 tcg_out_opc_rotr_d(s, a0, a1, a2);
1315 case INDEX_op_add_i32:
1317 tcg_out_addi(s, TCG_TYPE_I32, a0, a1, a2);
1319 tcg_out_opc_add_w(s, a0, a1, a2);
1322 case INDEX_op_add_i64:
1324 tcg_out_addi(s, TCG_TYPE_I64, a0, a1, a2);
1326 tcg_out_opc_add_d(s, a0, a1, a2);
1330 case INDEX_op_sub_i32:
1332 tcg_out_addi(s, TCG_TYPE_I32, a0, a1, -a2);
1334 tcg_out_opc_sub_w(s, a0, a1, a2);
1337 case INDEX_op_sub_i64:
1339 tcg_out_addi(s, TCG_TYPE_I64, a0, a1, -a2);
1341 tcg_out_opc_sub_d(s, a0, a1, a2);
1345 case INDEX_op_mul_i32:
1346 tcg_out_opc_mul_w(s, a0, a1, a2);
1348 case INDEX_op_mul_i64:
1349 tcg_out_opc_mul_d(s, a0, a1, a2);
1352 case INDEX_op_mulsh_i32:
1353 tcg_out_opc_mulh_w(s, a0, a1, a2);
1355 case INDEX_op_mulsh_i64:
1356 tcg_out_opc_mulh_d(s, a0, a1, a2);
1359 case INDEX_op_muluh_i32:
1360 tcg_out_opc_mulh_wu(s, a0, a1, a2);
1362 case INDEX_op_muluh_i64:
1363 tcg_out_opc_mulh_du(s, a0, a1, a2);
1366 case INDEX_op_div_i32:
1367 tcg_out_opc_div_w(s, a0, a1, a2);
1369 case INDEX_op_div_i64:
1370 tcg_out_opc_div_d(s, a0, a1, a2);
1373 case INDEX_op_divu_i32:
1374 tcg_out_opc_div_wu(s, a0, a1, a2);
1376 case INDEX_op_divu_i64:
1377 tcg_out_opc_div_du(s, a0, a1, a2);
1380 case INDEX_op_rem_i32:
1381 tcg_out_opc_mod_w(s, a0, a1, a2);
1383 case INDEX_op_rem_i64:
1384 tcg_out_opc_mod_d(s, a0, a1, a2);
1387 case INDEX_op_remu_i32:
1388 tcg_out_opc_mod_wu(s, a0, a1, a2);
1390 case INDEX_op_remu_i64:
1391 tcg_out_opc_mod_du(s, a0, a1, a2);
1394 case INDEX_op_setcond_i32:
1395 case INDEX_op_setcond_i64:
1396 tcg_out_setcond(s, args[3], a0, a1, a2, c2);
1399 case INDEX_op_movcond_i32:
1400 case INDEX_op_movcond_i64:
1401 tcg_out_movcond(s, args[5], a0, a1, a2, c2, args[3], args[4]);
1404 case INDEX_op_ld8s_i32:
1405 case INDEX_op_ld8s_i64:
1406 tcg_out_ldst(s, OPC_LD_B, a0, a1, a2);
1408 case INDEX_op_ld8u_i32:
1409 case INDEX_op_ld8u_i64:
1410 tcg_out_ldst(s, OPC_LD_BU, a0, a1, a2);
1412 case INDEX_op_ld16s_i32:
1413 case INDEX_op_ld16s_i64:
1414 tcg_out_ldst(s, OPC_LD_H, a0, a1, a2);
1416 case INDEX_op_ld16u_i32:
1417 case INDEX_op_ld16u_i64:
1418 tcg_out_ldst(s, OPC_LD_HU, a0, a1, a2);
1420 case INDEX_op_ld_i32:
1421 case INDEX_op_ld32s_i64:
1422 tcg_out_ldst(s, OPC_LD_W, a0, a1, a2);
1424 case INDEX_op_ld32u_i64:
1425 tcg_out_ldst(s, OPC_LD_WU, a0, a1, a2);
1427 case INDEX_op_ld_i64:
1428 tcg_out_ldst(s, OPC_LD_D, a0, a1, a2);
1431 case INDEX_op_st8_i32:
1432 case INDEX_op_st8_i64:
1433 tcg_out_ldst(s, OPC_ST_B, a0, a1, a2);
1435 case INDEX_op_st16_i32:
1436 case INDEX_op_st16_i64:
1437 tcg_out_ldst(s, OPC_ST_H, a0, a1, a2);
1439 case INDEX_op_st_i32:
1440 case INDEX_op_st32_i64:
1441 tcg_out_ldst(s, OPC_ST_W, a0, a1, a2);
1443 case INDEX_op_st_i64:
1444 tcg_out_ldst(s, OPC_ST_D, a0, a1, a2);
1447 case INDEX_op_qemu_ld_a32_i32:
1448 case INDEX_op_qemu_ld_a64_i32:
1449 tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32);
1451 case INDEX_op_qemu_ld_a32_i64:
1452 case INDEX_op_qemu_ld_a64_i64:
1453 tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64);
1455 case INDEX_op_qemu_st_a32_i32:
1456 case INDEX_op_qemu_st_a64_i32:
1457 tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32);
1459 case INDEX_op_qemu_st_a32_i64:
1460 case INDEX_op_qemu_st_a64_i64:
1461 tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64);
1464 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
1465 case INDEX_op_mov_i64:
1466 case INDEX_op_call: /* Always emitted via tcg_out_call. */
1467 case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
1468 case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
1469 case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
1470 case INDEX_op_ext8s_i64:
1471 case INDEX_op_ext8u_i32:
1472 case INDEX_op_ext8u_i64:
1473 case INDEX_op_ext16s_i32:
1474 case INDEX_op_ext16s_i64:
1475 case INDEX_op_ext16u_i32:
1476 case INDEX_op_ext16u_i64:
1477 case INDEX_op_ext32s_i64:
1478 case INDEX_op_ext32u_i64:
1479 case INDEX_op_ext_i32_i64:
1480 case INDEX_op_extu_i32_i64:
1481 case INDEX_op_extrl_i64_i32:
1483 g_assert_not_reached();
1487 static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
1490 case INDEX_op_goto_ptr:
1493 case INDEX_op_st8_i32:
1494 case INDEX_op_st8_i64:
1495 case INDEX_op_st16_i32:
1496 case INDEX_op_st16_i64:
1497 case INDEX_op_st32_i64:
1498 case INDEX_op_st_i32:
1499 case INDEX_op_st_i64:
1500 case INDEX_op_qemu_st_a32_i32:
1501 case INDEX_op_qemu_st_a64_i32:
1502 case INDEX_op_qemu_st_a32_i64:
1503 case INDEX_op_qemu_st_a64_i64:
1504 return C_O0_I2(rZ, r);
1506 case INDEX_op_brcond_i32:
1507 case INDEX_op_brcond_i64:
1508 return C_O0_I2(rZ, rZ);
1510 case INDEX_op_ext8s_i32:
1511 case INDEX_op_ext8s_i64:
1512 case INDEX_op_ext8u_i32:
1513 case INDEX_op_ext8u_i64:
1514 case INDEX_op_ext16s_i32:
1515 case INDEX_op_ext16s_i64:
1516 case INDEX_op_ext16u_i32:
1517 case INDEX_op_ext16u_i64:
1518 case INDEX_op_ext32s_i64:
1519 case INDEX_op_ext32u_i64:
1520 case INDEX_op_extu_i32_i64:
1521 case INDEX_op_extrl_i64_i32:
1522 case INDEX_op_extrh_i64_i32:
1523 case INDEX_op_ext_i32_i64:
1524 case INDEX_op_not_i32:
1525 case INDEX_op_not_i64:
1526 case INDEX_op_extract_i32:
1527 case INDEX_op_extract_i64:
1528 case INDEX_op_bswap16_i32:
1529 case INDEX_op_bswap16_i64:
1530 case INDEX_op_bswap32_i32:
1531 case INDEX_op_bswap32_i64:
1532 case INDEX_op_bswap64_i64:
1533 case INDEX_op_ld8s_i32:
1534 case INDEX_op_ld8s_i64:
1535 case INDEX_op_ld8u_i32:
1536 case INDEX_op_ld8u_i64:
1537 case INDEX_op_ld16s_i32:
1538 case INDEX_op_ld16s_i64:
1539 case INDEX_op_ld16u_i32:
1540 case INDEX_op_ld16u_i64:
1541 case INDEX_op_ld32s_i64:
1542 case INDEX_op_ld32u_i64:
1543 case INDEX_op_ld_i32:
1544 case INDEX_op_ld_i64:
1545 case INDEX_op_qemu_ld_a32_i32:
1546 case INDEX_op_qemu_ld_a64_i32:
1547 case INDEX_op_qemu_ld_a32_i64:
1548 case INDEX_op_qemu_ld_a64_i64:
1549 return C_O1_I1(r, r);
1551 case INDEX_op_andc_i32:
1552 case INDEX_op_andc_i64:
1553 case INDEX_op_orc_i32:
1554 case INDEX_op_orc_i64:
1556 * LoongArch insns for these ops don't have reg-imm forms, but we
1557 * can express using andi/ori if ~constant satisfies
1560 return C_O1_I2(r, r, rC);
1562 case INDEX_op_shl_i32:
1563 case INDEX_op_shl_i64:
1564 case INDEX_op_shr_i32:
1565 case INDEX_op_shr_i64:
1566 case INDEX_op_sar_i32:
1567 case INDEX_op_sar_i64:
1568 case INDEX_op_rotl_i32:
1569 case INDEX_op_rotl_i64:
1570 case INDEX_op_rotr_i32:
1571 case INDEX_op_rotr_i64:
1572 return C_O1_I2(r, r, ri);
1574 case INDEX_op_add_i32:
1575 return C_O1_I2(r, r, ri);
1576 case INDEX_op_add_i64:
1577 return C_O1_I2(r, r, rJ);
1579 case INDEX_op_and_i32:
1580 case INDEX_op_and_i64:
1581 case INDEX_op_nor_i32:
1582 case INDEX_op_nor_i64:
1583 case INDEX_op_or_i32:
1584 case INDEX_op_or_i64:
1585 case INDEX_op_xor_i32:
1586 case INDEX_op_xor_i64:
1587 /* LoongArch reg-imm bitops have their imms ZERO-extended */
1588 return C_O1_I2(r, r, rU);
1590 case INDEX_op_clz_i32:
1591 case INDEX_op_clz_i64:
1592 case INDEX_op_ctz_i32:
1593 case INDEX_op_ctz_i64:
1594 return C_O1_I2(r, r, rW);
1596 case INDEX_op_deposit_i32:
1597 case INDEX_op_deposit_i64:
1598 /* Must deposit into the same register as input */
1599 return C_O1_I2(r, 0, rZ);
1601 case INDEX_op_sub_i32:
1602 case INDEX_op_setcond_i32:
1603 return C_O1_I2(r, rZ, ri);
1604 case INDEX_op_sub_i64:
1605 case INDEX_op_setcond_i64:
1606 return C_O1_I2(r, rZ, rJ);
1608 case INDEX_op_mul_i32:
1609 case INDEX_op_mul_i64:
1610 case INDEX_op_mulsh_i32:
1611 case INDEX_op_mulsh_i64:
1612 case INDEX_op_muluh_i32:
1613 case INDEX_op_muluh_i64:
1614 case INDEX_op_div_i32:
1615 case INDEX_op_div_i64:
1616 case INDEX_op_divu_i32:
1617 case INDEX_op_divu_i64:
1618 case INDEX_op_rem_i32:
1619 case INDEX_op_rem_i64:
1620 case INDEX_op_remu_i32:
1621 case INDEX_op_remu_i64:
1622 return C_O1_I2(r, rZ, rZ);
1624 case INDEX_op_movcond_i32:
1625 case INDEX_op_movcond_i64:
1626 return C_O1_I4(r, rZ, rJ, rZ, rZ);
1629 g_assert_not_reached();
1633 static const int tcg_target_callee_save_regs[] = {
1634 TCG_REG_S0, /* used for the global env (TCG_AREG0) */
1644 TCG_REG_RA, /* should be last for ABI compliance */
1647 /* Stack frame parameters. */
1648 #define REG_SIZE (TCG_TARGET_REG_BITS / 8)
1649 #define SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * REG_SIZE)
1650 #define TEMP_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long))
1651 #define FRAME_SIZE ((TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE + SAVE_SIZE \
1652 + TCG_TARGET_STACK_ALIGN - 1) \
1653 & -TCG_TARGET_STACK_ALIGN)
1654 #define SAVE_OFS (TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE)
1656 /* We're expecting to be able to use an immediate for frame allocation. */
1657 QEMU_BUILD_BUG_ON(FRAME_SIZE > 0x7ff);
1659 /* Generate global QEMU prologue and epilogue code */
1660 static void tcg_target_qemu_prologue(TCGContext *s)
1664 tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, TEMP_SIZE);
1667 tcg_out_opc_addi_d(s, TCG_REG_SP, TCG_REG_SP, -FRAME_SIZE);
1668 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
1669 tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
1670 TCG_REG_SP, SAVE_OFS + i * REG_SIZE);
1673 #if !defined(CONFIG_SOFTMMU)
1674 if (USE_GUEST_BASE) {
1675 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base);
1676 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
1680 /* Call generated code */
1681 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
1682 tcg_out_opc_jirl(s, TCG_REG_ZERO, tcg_target_call_iarg_regs[1], 0);
1684 /* Return path for goto_ptr. Set return value to 0 */
1685 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
1686 tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_A0, TCG_REG_ZERO);
1689 tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr);
1690 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
1691 tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
1692 TCG_REG_SP, SAVE_OFS + i * REG_SIZE);
1695 tcg_out_opc_addi_d(s, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE);
1696 tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_RA, 0);
1699 static void tcg_target_init(TCGContext *s)
1701 unsigned long hwcap = qemu_getauxval(AT_HWCAP);
1703 /* Server and desktop class cpus have UAL; embedded cpus do not. */
1704 if (!(hwcap & HWCAP_LOONGARCH_UAL)) {
1705 error_report("TCG: unaligned access support required; exiting");
1709 tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
1710 tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS;
1712 tcg_target_call_clobber_regs = ALL_GENERAL_REGS;
1713 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S0);
1714 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S1);
1715 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S2);
1716 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S3);
1717 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S4);
1718 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S5);
1719 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S6);
1720 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S7);
1721 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S8);
1722 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S9);
1724 s->reserved_regs = 0;
1725 tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO);
1726 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP0);
1727 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1);
1728 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2);
1729 tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP);
1730 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TP);
1731 tcg_regset_set_reg(s->reserved_regs, TCG_REG_RESERVED);
1736 uint8_t fde_def_cfa[4];
1737 uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2];
1740 #define ELF_HOST_MACHINE EM_LOONGARCH
1742 static const DebugFrame debug_frame = {
1743 .h.cie.len = sizeof(DebugFrameCIE) - 4, /* length after .len member */
1746 .h.cie.code_align = 1,
1747 .h.cie.data_align = -(TCG_TARGET_REG_BITS / 8) & 0x7f, /* sleb128 */
1748 .h.cie.return_column = TCG_REG_RA,
1750 /* Total FDE size does not include the "len" member. */
1751 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
1754 12, TCG_REG_SP, /* DW_CFA_def_cfa sp, ... */
1755 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
1759 0x80 + 23, 11, /* DW_CFA_offset, s0, -88 */
1760 0x80 + 24, 10, /* DW_CFA_offset, s1, -80 */
1761 0x80 + 25, 9, /* DW_CFA_offset, s2, -72 */
1762 0x80 + 26, 8, /* DW_CFA_offset, s3, -64 */
1763 0x80 + 27, 7, /* DW_CFA_offset, s4, -56 */
1764 0x80 + 28, 6, /* DW_CFA_offset, s5, -48 */
1765 0x80 + 29, 5, /* DW_CFA_offset, s6, -40 */
1766 0x80 + 30, 4, /* DW_CFA_offset, s7, -32 */
1767 0x80 + 31, 3, /* DW_CFA_offset, s8, -24 */
1768 0x80 + 22, 2, /* DW_CFA_offset, s9, -16 */
1769 0x80 + 1 , 1, /* DW_CFA_offset, ra, -8 */
1773 void tcg_register_jit(const void *buf, size_t buf_size)
1775 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));