#error "unsupported code generation mode"
#endif
+#include "../tcg-ldst.c.inc"
#include "../tcg-pool.c.inc"
#ifdef CONFIG_DEBUG_TCG
#define TCG_CT_CONST_S13 0x200
#define TCG_CT_CONST_ZERO 0x400
-/*
- * For softmmu, we need to avoid conflicts with the first 3
- * argument registers to perform the tlb lookup, and to call
- * the helper function.
- */
-#ifdef CONFIG_SOFTMMU
-#define SOFTMMU_RESERVE_REGS MAKE_64BIT_MASK(TCG_REG_O0, 3)
-#else
-#define SOFTMMU_RESERVE_REGS 0
-#endif
-#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
-#define ALL_QLDST_REGS (ALL_GENERAL_REGS & ~SOFTMMU_RESERVE_REGS)
+#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
-/* Define some temporary registers. T2 is used for constant generation. */
+/* Define some temporary registers. T3 is used for constant generation. */
#define TCG_REG_T1 TCG_REG_G1
-#define TCG_REG_T2 TCG_REG_O7
+#define TCG_REG_T2 TCG_REG_G2
+#define TCG_REG_T3 TCG_REG_O7
#ifndef CONFIG_SOFTMMU
# define TCG_GUEST_BASE_REG TCG_REG_I5
TCG_REG_I4,
TCG_REG_I5,
- TCG_REG_G2,
TCG_REG_G3,
TCG_REG_G4,
TCG_REG_G5,
}
/* test if a constant matches the constraint */
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
+static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
{
if (ct & TCG_CT_CONST) {
return 1;
tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10));
}
-static void tcg_out_movi_imm13(TCGContext *s, TCGReg ret, int32_t arg)
+/* A 13-bit constant sign-extended to 64 bits. */
+static void tcg_out_movi_s13(TCGContext *s, TCGReg ret, int32_t arg)
{
tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR);
}
-static void tcg_out_movi_imm32(TCGContext *s, TCGReg ret, int32_t arg)
+/* A 32-bit constant sign-extended to 64 bits. */
+static void tcg_out_movi_s32(TCGContext *s, TCGReg ret, int32_t arg)
{
- if (check_fit_i32(arg, 13)) {
- /* A 13-bit constant sign-extended to 64-bits. */
- tcg_out_movi_imm13(s, ret, arg);
- } else {
- /* A 32-bit constant zero-extended to 64 bits. */
- tcg_out_sethi(s, ret, arg);
- if (arg & 0x3ff) {
- tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR);
- }
+ tcg_out_sethi(s, ret, ~arg);
+ tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR);
+}
+
+/* A 32-bit constant zero-extended to 64 bits. */
+static void tcg_out_movi_u32(TCGContext *s, TCGReg ret, uint32_t arg)
+{
+ tcg_out_sethi(s, ret, arg);
+ if (arg & 0x3ff) {
+ tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR);
}
}
tcg_target_long hi, lo = (int32_t)arg;
tcg_target_long test, lsb;
- /* A 32-bit constant, or 32-bit zero-extended to 64-bits. */
- if (type == TCG_TYPE_I32 || arg == (uint32_t)arg) {
- tcg_out_movi_imm32(s, ret, arg);
+ /* A 13-bit constant sign-extended to 64-bits. */
+ if (check_fit_tl(arg, 13)) {
+ tcg_out_movi_s13(s, ret, arg);
return;
}
- /* A 13-bit constant sign-extended to 64-bits. */
- if (check_fit_tl(arg, 13)) {
- tcg_out_movi_imm13(s, ret, arg);
+ /* A 32-bit constant, or 32-bit zero-extended to 64-bits. */
+ if (type == TCG_TYPE_I32 || arg == (uint32_t)arg) {
+ tcg_out_movi_u32(s, ret, arg);
return;
}
/* A 32-bit constant sign-extended to 64-bits. */
if (arg == lo) {
- tcg_out_sethi(s, ret, ~arg);
- tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR);
+ tcg_out_movi_s32(s, ret, arg);
return;
}
/* A 64-bit constant decomposed into 2 32-bit pieces. */
if (check_fit_i32(lo, 13)) {
hi = (arg - lo) >> 32;
- tcg_out_movi_imm32(s, ret, hi);
+ tcg_out_movi_u32(s, ret, hi);
tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
tcg_out_arithi(s, ret, ret, lo, ARITH_ADD);
} else {
hi = arg >> 32;
- tcg_out_movi_imm32(s, ret, hi);
- tcg_out_movi_imm32(s, scratch, lo);
+ tcg_out_movi_u32(s, ret, hi);
+ tcg_out_movi_u32(s, scratch, lo);
tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
tcg_out_arith(s, ret, ret, scratch, ARITH_OR);
}
static void tcg_out_movi(TCGContext *s, TCGType type,
TCGReg ret, tcg_target_long arg)
{
- tcg_debug_assert(ret != TCG_REG_T2);
- tcg_out_movi_int(s, type, ret, arg, false, TCG_REG_T2);
+ tcg_debug_assert(ret != TCG_REG_T3);
+ tcg_out_movi_int(s, type, ret, arg, false, TCG_REG_T3);
}
static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
g_assert_not_reached();
}
+static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
+{
+ g_assert_not_reached();
+}
+
+static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rs)
+{
+ tcg_out_arithi(s, rd, rs, 0xff, ARITH_AND);
+}
+
+static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rs)
+{
+ tcg_out_arithi(s, rd, rs, 16, SHIFT_SLL);
+ tcg_out_arithi(s, rd, rd, 16, SHIFT_SRL);
+}
+
+static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rs)
+{
+ tcg_out_arithi(s, rd, rs, 0, SHIFT_SRA);
+}
+
+static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rs)
+{
+ tcg_out_arithi(s, rd, rs, 0, SHIFT_SRL);
+}
+
+static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
+{
+ tcg_out_ext32s(s, rd, rs);
+}
+
+static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
+{
+ tcg_out_ext32u(s, rd, rs);
+}
+
+static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
+{
+ return false;
+}
+
static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
tcg_target_long imm)
{
}
static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
- TCGReg c1, int32_t c2, int c2const)
+ TCGReg c1, int32_t c2, int c2const, bool neg)
{
/* For 32-bit comparisons, we can play games with ADDC/SUBC. */
switch (cond) {
default:
tcg_out_cmp(s, c1, c2, c2const);
- tcg_out_movi_imm13(s, ret, 0);
- tcg_out_movcc(s, cond, MOVCC_ICC, ret, 1, 1);
+ tcg_out_movi_s13(s, ret, 0);
+ tcg_out_movcc(s, cond, MOVCC_ICC, ret, neg ? -1 : 1, 1);
return;
}
tcg_out_cmp(s, c1, c2, c2const);
if (cond == TCG_COND_LTU) {
- tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDC);
+ if (neg) {
+ /* 0 - 0 - C = -C = (C ? -1 : 0) */
+ tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_SUBC);
+ } else {
+ /* 0 + 0 + C = C = (C ? 1 : 0) */
+ tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDC);
+ }
} else {
- tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBC);
+ if (neg) {
+ /* 0 + -1 + C = C - 1 = (C ? 0 : -1) */
+ tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_ADDC);
+ } else {
+ /* 0 - -1 - C = 1 - C = (C ? 0 : 1) */
+ tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBC);
+ }
}
}
static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
- TCGReg c1, int32_t c2, int c2const)
+ TCGReg c1, int32_t c2, int c2const, bool neg)
{
- if (use_vis3_instructions) {
+ if (use_vis3_instructions && !neg) {
switch (cond) {
case TCG_COND_NE:
if (c2 != 0) {
/* For 64-bit signed comparisons vs zero, we can avoid the compare
if the input does not overlap the output. */
if (c2 == 0 && !is_unsigned_cond(cond) && c1 != ret) {
- tcg_out_movi_imm13(s, ret, 0);
- tcg_out_movr(s, cond, ret, c1, 1, 1);
+ tcg_out_movi_s13(s, ret, 0);
+ tcg_out_movr(s, cond, ret, c1, neg ? -1 : 1, 1);
} else {
tcg_out_cmp(s, c1, c2, c2const);
- tcg_out_movi_imm13(s, ret, 0);
- tcg_out_movcc(s, cond, MOVCC_XCC, ret, 1, 1);
+ tcg_out_movi_s13(s, ret, 0);
+ tcg_out_movcc(s, cond, MOVCC_XCC, ret, neg ? -1 : 1, 1);
}
}
if (use_vis3_instructions && !is_sub) {
/* Note that ADDXC doesn't accept immediates. */
if (bhconst && bh != 0) {
- tcg_out_movi_imm13(s, TCG_REG_T2, bh);
+ tcg_out_movi_s13(s, TCG_REG_T2, bh);
bh = TCG_REG_T2;
}
tcg_out_arith(s, rh, ah, bh, ARITH_ADDXC);
* so the adjustment fits 12 bits.
*/
if (bhconst) {
- tcg_out_movi_imm13(s, TCG_REG_T2, bh + (is_sub ? -1 : 1));
+ tcg_out_movi_s13(s, TCG_REG_T2, bh + (is_sub ? -1 : 1));
} else {
tcg_out_arithi(s, TCG_REG_T2, bh, 1,
is_sub ? ARITH_SUB : ARITH_ADD);
{
uintptr_t desti = (uintptr_t)dest;
- /* Be careful not to clobber %o7 for a tail call. */
tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_REG_T1,
- desti & ~0xfff, in_prologue,
- tail_call ? TCG_REG_G2 : TCG_REG_O7);
+ desti & ~0xfff, in_prologue, TCG_REG_T2);
tcg_out_arithi(s, tail_call ? TCG_REG_G0 : TCG_REG_O7,
TCG_REG_T1, desti & 0xfff, JMPL);
}
tcg_out32(s, MEMBAR | (a0 & TCG_MO_ALL));
}
-#ifdef CONFIG_SOFTMMU
-static const tcg_insn_unit *qemu_ld_trampoline[(MO_SSIZE | MO_BSWAP) + 1];
-static const tcg_insn_unit *qemu_st_trampoline[(MO_SIZE | MO_BSWAP) + 1];
-
-static void emit_extend(TCGContext *s, TCGReg r, int op)
-{
- /* Emit zero extend of 8, 16 or 32 bit data as
- * required by the MO_* value op; do nothing for 64 bit.
- */
- switch (op & MO_SIZE) {
- case MO_8:
- tcg_out_arithi(s, r, r, 0xff, ARITH_AND);
- break;
- case MO_16:
- tcg_out_arithi(s, r, r, 16, SHIFT_SLL);
- tcg_out_arithi(s, r, r, 16, SHIFT_SRL);
- break;
- case MO_32:
- tcg_out_arith(s, r, r, 0, SHIFT_SRL);
- break;
- case MO_64:
- break;
- }
-}
-
-static void build_trampolines(TCGContext *s)
-{
- static void * const qemu_ld_helpers[] = {
- [MO_UB] = helper_ret_ldub_mmu,
- [MO_SB] = helper_ret_ldsb_mmu,
- [MO_LEUW] = helper_le_lduw_mmu,
- [MO_LESW] = helper_le_ldsw_mmu,
- [MO_LEUL] = helper_le_ldul_mmu,
- [MO_LEUQ] = helper_le_ldq_mmu,
- [MO_BEUW] = helper_be_lduw_mmu,
- [MO_BESW] = helper_be_ldsw_mmu,
- [MO_BEUL] = helper_be_ldul_mmu,
- [MO_BEUQ] = helper_be_ldq_mmu,
- };
- static void * const qemu_st_helpers[] = {
- [MO_UB] = helper_ret_stb_mmu,
- [MO_LEUW] = helper_le_stw_mmu,
- [MO_LEUL] = helper_le_stl_mmu,
- [MO_LEUQ] = helper_le_stq_mmu,
- [MO_BEUW] = helper_be_stw_mmu,
- [MO_BEUL] = helper_be_stl_mmu,
- [MO_BEUQ] = helper_be_stq_mmu,
- };
-
- int i;
-
- for (i = 0; i < ARRAY_SIZE(qemu_ld_helpers); ++i) {
- if (qemu_ld_helpers[i] == NULL) {
- continue;
- }
-
- /* May as well align the trampoline. */
- while ((uintptr_t)s->code_ptr & 15) {
- tcg_out_nop(s);
- }
- qemu_ld_trampoline[i] = tcg_splitwx_to_rx(s->code_ptr);
-
- /* Set the retaddr operand. */
- tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O3, TCG_REG_O7);
- /* Tail call. */
- tcg_out_jmpl_const(s, qemu_ld_helpers[i], true, true);
- /* delay slot -- set the env argument */
- tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0);
- }
-
- for (i = 0; i < ARRAY_SIZE(qemu_st_helpers); ++i) {
- if (qemu_st_helpers[i] == NULL) {
- continue;
- }
-
- /* May as well align the trampoline. */
- while ((uintptr_t)s->code_ptr & 15) {
- tcg_out_nop(s);
- }
- qemu_st_trampoline[i] = tcg_splitwx_to_rx(s->code_ptr);
-
- emit_extend(s, TCG_REG_O2, i);
-
- /* Set the retaddr operand. */
- tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O4, TCG_REG_O7);
-
- /* Tail call. */
- tcg_out_jmpl_const(s, qemu_st_helpers[i], true, true);
- /* delay slot -- set the env argument */
- tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0);
- }
-}
-#else
-static const tcg_insn_unit *qemu_unalign_ld_trampoline;
-static const tcg_insn_unit *qemu_unalign_st_trampoline;
-
-static void build_trampolines(TCGContext *s)
-{
- for (int ld = 0; ld < 2; ++ld) {
- void *helper;
-
- while ((uintptr_t)s->code_ptr & 15) {
- tcg_out_nop(s);
- }
-
- if (ld) {
- helper = helper_unaligned_ld;
- qemu_unalign_ld_trampoline = tcg_splitwx_to_rx(s->code_ptr);
- } else {
- helper = helper_unaligned_st;
- qemu_unalign_st_trampoline = tcg_splitwx_to_rx(s->code_ptr);
- }
-
- /* Tail call. */
- tcg_out_jmpl_const(s, helper, true, true);
- /* delay slot -- set the env argument */
- tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0);
- }
-}
-#endif
-
/* Generate global QEMU prologue and epilogue code */
static void tcg_target_qemu_prologue(TCGContext *s)
{
tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
/* delay slot */
- tcg_out_movi_imm13(s, TCG_REG_O0, 0);
+ tcg_out_movi_s13(s, TCG_REG_O0, 0);
+}
- build_trampolines(s);
+static void tcg_out_tb_start(TCGContext *s)
+{
+ /* nothing to do */
}
static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
}
}
-#if defined(CONFIG_SOFTMMU)
-
-/* We expect to use a 13-bit negative offset from ENV. */
-QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
-QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 12));
+static const TCGLdstHelperParam ldst_helper_param = {
+ .ntmp = 1, .tmp = { TCG_REG_T1 }
+};
-/* Perform the TLB load and compare.
+static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
+{
+ MemOp opc = get_memop(lb->oi);
+ MemOp sgn;
- Inputs:
- ADDRLO and ADDRHI contain the possible two parts of the address.
+ if (!patch_reloc(lb->label_ptr[0], R_SPARC_WDISP19,
+ (intptr_t)tcg_splitwx_to_rx(s->code_ptr), 0)) {
+ return false;
+ }
- MEM_INDEX and S_BITS are the memory context and log2 size of the load.
+ /* Use inline tcg_out_ext32s; otherwise let the helper sign-extend. */
+ sgn = (opc & MO_SIZE) < MO_32 ? MO_SIGN : 0;
- WHICH is the offset into the CPUTLBEntry structure of the slot to read.
- This should be offsetof addr_read or addr_write.
+ tcg_out_ld_helper_args(s, lb, &ldst_helper_param);
+ tcg_out_call(s, qemu_ld_helpers[opc & (MO_SIZE | sgn)], NULL);
+ tcg_out_ld_helper_ret(s, lb, sgn, &ldst_helper_param);
- The result of the TLB comparison is in %[ix]cc. The sanitized address
- is in the returned register, maybe %o0. The TLB addend is in %o1. */
+ tcg_out_bpcc0(s, COND_A, BPCC_A | BPCC_PT, 0);
+ return patch_reloc(s->code_ptr - 1, R_SPARC_WDISP19,
+ (intptr_t)lb->raddr, 0);
+}
-static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, int mem_index,
- MemOp opc, int which)
+static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
{
- int fast_off = TLB_MASK_TABLE_OFS(mem_index);
- int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
- int table_off = fast_off + offsetof(CPUTLBDescFast, table);
- const TCGReg r0 = TCG_REG_O0;
- const TCGReg r1 = TCG_REG_O1;
- const TCGReg r2 = TCG_REG_O2;
- unsigned s_bits = opc & MO_SIZE;
- unsigned a_bits = get_alignment_bits(opc);
- tcg_target_long compare_mask;
+ MemOp opc = get_memop(lb->oi);
- /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
- tcg_out_ld(s, TCG_TYPE_PTR, r0, TCG_AREG0, mask_off);
- tcg_out_ld(s, TCG_TYPE_PTR, r1, TCG_AREG0, table_off);
-
- /* Extract the page index, shifted into place for tlb index. */
- tcg_out_arithi(s, r2, addr, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS,
- SHIFT_SRL);
- tcg_out_arith(s, r2, r2, r0, ARITH_AND);
-
- /* Add the tlb_table pointer, creating the CPUTLBEntry address into R2. */
- tcg_out_arith(s, r2, r2, r1, ARITH_ADD);
-
- /* Load the tlb comparator and the addend. */
- tcg_out_ld(s, TCG_TYPE_TL, r0, r2, which);
- tcg_out_ld(s, TCG_TYPE_PTR, r1, r2, offsetof(CPUTLBEntry, addend));
-
- /* Mask out the page offset, except for the required alignment.
- We don't support unaligned accesses. */
- if (a_bits < s_bits) {
- a_bits = s_bits;
+ if (!patch_reloc(lb->label_ptr[0], R_SPARC_WDISP19,
+ (intptr_t)tcg_splitwx_to_rx(s->code_ptr), 0)) {
+ return false;
}
- compare_mask = (tcg_target_ulong)TARGET_PAGE_MASK | ((1 << a_bits) - 1);
- if (check_fit_tl(compare_mask, 13)) {
- tcg_out_arithi(s, r2, addr, compare_mask, ARITH_AND);
- } else {
- tcg_out_movi(s, TCG_TYPE_TL, r2, compare_mask);
- tcg_out_arith(s, r2, addr, r2, ARITH_AND);
- }
- tcg_out_cmp(s, r0, r2, 0);
- /* If the guest address must be zero-extended, do so now. */
- if (TARGET_LONG_BITS == 32) {
- tcg_out_arithi(s, r0, addr, 0, SHIFT_SRL);
- return r0;
- }
- return addr;
-}
-#endif /* CONFIG_SOFTMMU */
-
-static const int qemu_ld_opc[(MO_SSIZE | MO_BSWAP) + 1] = {
- [MO_UB] = LDUB,
- [MO_SB] = LDSB,
- [MO_UB | MO_LE] = LDUB,
- [MO_SB | MO_LE] = LDSB,
-
- [MO_BEUW] = LDUH,
- [MO_BESW] = LDSH,
- [MO_BEUL] = LDUW,
- [MO_BESL] = LDSW,
- [MO_BEUQ] = LDX,
- [MO_BESQ] = LDX,
-
- [MO_LEUW] = LDUH_LE,
- [MO_LESW] = LDSH_LE,
- [MO_LEUL] = LDUW_LE,
- [MO_LESL] = LDSW_LE,
- [MO_LEUQ] = LDX_LE,
- [MO_LESQ] = LDX_LE,
-};
+ tcg_out_st_helper_args(s, lb, &ldst_helper_param);
+ tcg_out_call(s, qemu_st_helpers[opc & MO_SIZE], NULL);
+
+ tcg_out_bpcc0(s, COND_A, BPCC_A | BPCC_PT, 0);
+ return patch_reloc(s->code_ptr - 1, R_SPARC_WDISP19,
+ (intptr_t)lb->raddr, 0);
+}
-static const int qemu_st_opc[(MO_SIZE | MO_BSWAP) + 1] = {
- [MO_UB] = STB,
+typedef struct {
+ TCGReg base;
+ TCGReg index;
+ TCGAtomAlign aa;
+} HostAddress;
- [MO_BEUW] = STH,
- [MO_BEUL] = STW,
- [MO_BEUQ] = STX,
+bool tcg_target_has_memory_bswap(MemOp memop)
+{
+ return true;
+}
- [MO_LEUW] = STH_LE,
- [MO_LEUL] = STW_LE,
- [MO_LEUQ] = STX_LE,
-};
+/* We expect to use a 13-bit negative offset from ENV. */
+#define MIN_TLB_MASK_TABLE_OFS -(1 << 12)
-static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
- MemOpIdx oi, bool is_64)
+/*
+ * For softmmu, perform the TLB load and compare.
+ * For useronly, perform any required alignment tests.
+ * In both cases, return a TCGLabelQemuLdst structure if the slow path
+ * is required and fill in @h with the host address for the fast path.
+ */
+static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
+ TCGReg addr_reg, MemOpIdx oi,
+ bool is_ld)
{
- MemOp memop = get_memop(oi);
- tcg_insn_unit *label_ptr;
+ TCGType addr_type = s->addr_type;
+ TCGLabelQemuLdst *ldst = NULL;
+ MemOp opc = get_memop(oi);
+ MemOp s_bits = opc & MO_SIZE;
+ unsigned a_mask;
-#ifdef CONFIG_SOFTMMU
- unsigned memi = get_mmuidx(oi);
- TCGReg addrz;
- const tcg_insn_unit *func;
+ /* We don't support unaligned accesses. */
+ h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false);
+ h->aa.align = MAX(h->aa.align, s_bits);
+ a_mask = (1u << h->aa.align) - 1;
- addrz = tcg_out_tlb_load(s, addr, memi, memop,
- offsetof(CPUTLBEntry, addr_read));
+#ifdef CONFIG_SOFTMMU
+ int mem_index = get_mmuidx(oi);
+ int fast_off = tlb_mask_table_ofs(s, mem_index);
+ int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
+ int table_off = fast_off + offsetof(CPUTLBDescFast, table);
+ int cmp_off = is_ld ? offsetof(CPUTLBEntry, addr_read)
+ : offsetof(CPUTLBEntry, addr_write);
+ int add_off = offsetof(CPUTLBEntry, addend);
+ int compare_mask;
+ int cc;
- /* The fast path is exactly one insn. Thus we can perform the
- entire TLB Hit in the (annulled) delay slot of the branch
- over the TLB Miss case. */
+ /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_T2, TCG_AREG0, mask_off);
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_T3, TCG_AREG0, table_off);
- /* beq,a,pt %[xi]cc, label0 */
- label_ptr = s->code_ptr;
- tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
- | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
- /* delay slot */
- tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1,
- qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
+ /* Extract the page index, shifted into place for tlb index. */
+ tcg_out_arithi(s, TCG_REG_T1, addr_reg,
+ s->page_bits - CPU_TLB_ENTRY_BITS, SHIFT_SRL);
+ tcg_out_arith(s, TCG_REG_T1, TCG_REG_T1, TCG_REG_T2, ARITH_AND);
- /* TLB Miss. */
+ /* Add the tlb_table pointer, creating the CPUTLBEntry address into R2. */
+ tcg_out_arith(s, TCG_REG_T1, TCG_REG_T1, TCG_REG_T3, ARITH_ADD);
- tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_O1, addrz);
+ /*
+ * Load the tlb comparator and the addend.
+ * Always load the entire 64-bit comparator for simplicity.
+ * We will ignore the high bits via BPCC_ICC below.
+ */
+ tcg_out_ld(s, TCG_TYPE_I64, TCG_REG_T2, TCG_REG_T1, cmp_off);
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_T1, TCG_REG_T1, add_off);
+ h->base = TCG_REG_T1;
- /* We use the helpers to extend SB and SW data, leaving the case
- of SL needing explicit extending below. */
- if ((memop & MO_SSIZE) == MO_SL) {
- func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SIZE)];
+ /* Mask out the page offset, except for the required alignment. */
+ compare_mask = s->page_mask | a_mask;
+ if (check_fit_tl(compare_mask, 13)) {
+ tcg_out_arithi(s, TCG_REG_T3, addr_reg, compare_mask, ARITH_AND);
} else {
- func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SSIZE)];
+ tcg_out_movi_s32(s, TCG_REG_T3, compare_mask);
+ tcg_out_arith(s, TCG_REG_T3, addr_reg, TCG_REG_T3, ARITH_AND);
}
- tcg_debug_assert(func != NULL);
- tcg_out_call_nodelay(s, func, false);
- /* delay slot */
- tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_O2, oi);
+ tcg_out_cmp(s, TCG_REG_T2, TCG_REG_T3, 0);
- /* We let the helper sign-extend SB and SW, but leave SL for here. */
- if (is_64 && (memop & MO_SSIZE) == MO_SL) {
- tcg_out_arithi(s, data, TCG_REG_O0, 0, SHIFT_SRA);
- } else {
- tcg_out_mov(s, TCG_TYPE_REG, data, TCG_REG_O0);
- }
+ ldst = new_ldst_label(s);
+ ldst->is_ld = is_ld;
+ ldst->oi = oi;
+ ldst->addrlo_reg = addr_reg;
+ ldst->label_ptr[0] = s->code_ptr;
- *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
+ /* bne,pn %[xi]cc, label0 */
+ cc = addr_type == TCG_TYPE_I32 ? BPCC_ICC : BPCC_XCC;
+ tcg_out_bpcc0(s, COND_NE, BPCC_PN | cc, 0);
#else
- TCGReg index = (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0);
- unsigned a_bits = get_alignment_bits(memop);
- unsigned s_bits = memop & MO_SIZE;
- unsigned t_bits;
-
- if (TARGET_LONG_BITS == 32) {
- tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL);
- addr = TCG_REG_T1;
- }
-
/*
- * Normal case: alignment equal to access size.
+ * If the size equals the required alignment, we can skip the test
+ * and allow host SIGBUS to deliver SIGBUS to the guest.
+ * Otherwise, test for at least natural alignment and defer
+ * everything else to the helper functions.
*/
- if (a_bits == s_bits) {
- tcg_out_ldst_rr(s, data, addr, index,
- qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
- return;
+ if (s_bits != get_alignment_bits(opc)) {
+ tcg_debug_assert(check_fit_tl(a_mask, 13));
+ tcg_out_arithi(s, TCG_REG_G0, addr_reg, a_mask, ARITH_ANDCC);
+
+ ldst = new_ldst_label(s);
+ ldst->is_ld = is_ld;
+ ldst->oi = oi;
+ ldst->addrlo_reg = addr_reg;
+ ldst->label_ptr[0] = s->code_ptr;
+
+ /* bne,pn %icc, label0 */
+ tcg_out_bpcc0(s, COND_NE, BPCC_PN | BPCC_ICC, 0);
}
+ h->base = guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0;
+#endif
- /*
- * Test for at least natural alignment, and assume most accesses
- * will be aligned -- perform a straight load in the delay slot.
- * This is required to preserve atomicity for aligned accesses.
- */
- t_bits = MAX(a_bits, s_bits);
- tcg_debug_assert(t_bits < 13);
- tcg_out_arithi(s, TCG_REG_G0, addr, (1u << t_bits) - 1, ARITH_ANDCC);
-
- /* beq,a,pt %icc, label */
- label_ptr = s->code_ptr;
- tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT | BPCC_ICC, 0);
- /* delay slot */
- tcg_out_ldst_rr(s, data, addr, index,
- qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
-
- if (a_bits >= s_bits) {
- /*
- * Overalignment: A successful alignment test will perform the memory
- * operation in the delay slot, and failure need only invoke the
- * handler for SIGBUS.
- */
- tcg_out_call_nodelay(s, qemu_unalign_ld_trampoline, false);
- /* delay slot -- move to low part of argument reg */
- tcg_out_mov_delay(s, TCG_REG_O1, addr);
+ /* If the guest address must be zero-extended, do in the delay slot. */
+ if (addr_type == TCG_TYPE_I32) {
+ tcg_out_ext32u(s, TCG_REG_T2, addr_reg);
+ h->index = TCG_REG_T2;
} else {
- /* Underalignment: load by pieces of minimum alignment. */
- int ld_opc, a_size, s_size, i;
-
- /*
- * Force full address into T1 early; avoids problems with
- * overlap between @addr and @data.
- */
- tcg_out_arith(s, TCG_REG_T1, addr, index, ARITH_ADD);
-
- a_size = 1 << a_bits;
- s_size = 1 << s_bits;
- if ((memop & MO_BSWAP) == MO_BE) {
- ld_opc = qemu_ld_opc[a_bits | MO_BE | (memop & MO_SIGN)];
- tcg_out_ldst(s, data, TCG_REG_T1, 0, ld_opc);
- ld_opc = qemu_ld_opc[a_bits | MO_BE];
- for (i = a_size; i < s_size; i += a_size) {
- tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, ld_opc);
- tcg_out_arithi(s, data, data, a_size, SHIFT_SLLX);
- tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR);
- }
- } else if (a_bits == 0) {
- ld_opc = LDUB;
- tcg_out_ldst(s, data, TCG_REG_T1, 0, ld_opc);
- for (i = a_size; i < s_size; i += a_size) {
- if ((memop & MO_SIGN) && i == s_size - a_size) {
- ld_opc = LDSB;
- }
- tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, ld_opc);
- tcg_out_arithi(s, TCG_REG_T2, TCG_REG_T2, i * 8, SHIFT_SLLX);
- tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR);
- }
- } else {
- ld_opc = qemu_ld_opc[a_bits | MO_LE];
- tcg_out_ldst_rr(s, data, TCG_REG_T1, TCG_REG_G0, ld_opc);
- for (i = a_size; i < s_size; i += a_size) {
- tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, a_size, ARITH_ADD);
- if ((memop & MO_SIGN) && i == s_size - a_size) {
- ld_opc = qemu_ld_opc[a_bits | MO_LE | MO_SIGN];
- }
- tcg_out_ldst_rr(s, TCG_REG_T2, TCG_REG_T1, TCG_REG_G0, ld_opc);
- tcg_out_arithi(s, TCG_REG_T2, TCG_REG_T2, i * 8, SHIFT_SLLX);
- tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR);
- }
+ if (ldst) {
+ tcg_out_nop(s);
}
+ h->index = addr_reg;
}
-
- *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
-#endif /* CONFIG_SOFTMMU */
+ return ldst;
}
-static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
- MemOpIdx oi)
+static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
+ MemOpIdx oi, TCGType data_type)
{
- MemOp memop = get_memop(oi);
- tcg_insn_unit *label_ptr;
-
-#ifdef CONFIG_SOFTMMU
- unsigned memi = get_mmuidx(oi);
- TCGReg addrz;
- const tcg_insn_unit *func;
-
- addrz = tcg_out_tlb_load(s, addr, memi, memop,
- offsetof(CPUTLBEntry, addr_write));
-
- /* The fast path is exactly one insn. Thus we can perform the entire
- TLB Hit in the (annulled) delay slot of the branch over TLB Miss. */
- /* beq,a,pt %[xi]cc, label0 */
- label_ptr = s->code_ptr;
- tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
- | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
- /* delay slot */
- tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1,
- qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
+ static const int ld_opc[(MO_SSIZE | MO_BSWAP) + 1] = {
+ [MO_UB] = LDUB,
+ [MO_SB] = LDSB,
+ [MO_UB | MO_LE] = LDUB,
+ [MO_SB | MO_LE] = LDSB,
+
+ [MO_BEUW] = LDUH,
+ [MO_BESW] = LDSH,
+ [MO_BEUL] = LDUW,
+ [MO_BESL] = LDSW,
+ [MO_BEUQ] = LDX,
+ [MO_BESQ] = LDX,
+
+ [MO_LEUW] = LDUH_LE,
+ [MO_LESW] = LDSH_LE,
+ [MO_LEUL] = LDUW_LE,
+ [MO_LESL] = LDSW_LE,
+ [MO_LEUQ] = LDX_LE,
+ [MO_LESQ] = LDX_LE,
+ };
- /* TLB Miss. */
+ TCGLabelQemuLdst *ldst;
+ HostAddress h;
- tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_O1, addrz);
- tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_O2, data);
+ ldst = prepare_host_addr(s, &h, addr, oi, true);
- func = qemu_st_trampoline[memop & (MO_BSWAP | MO_SIZE)];
- tcg_debug_assert(func != NULL);
- tcg_out_call_nodelay(s, func, false);
- /* delay slot */
- tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_O3, oi);
+ tcg_out_ldst_rr(s, data, h.base, h.index,
+ ld_opc[get_memop(oi) & (MO_BSWAP | MO_SSIZE)]);
- *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
-#else
- TCGReg index = (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0);
- unsigned a_bits = get_alignment_bits(memop);
- unsigned s_bits = memop & MO_SIZE;
- unsigned t_bits;
-
- if (TARGET_LONG_BITS == 32) {
- tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL);
- addr = TCG_REG_T1;
+ if (ldst) {
+ ldst->type = data_type;
+ ldst->datalo_reg = data;
+ ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
}
+}
- /*
- * Normal case: alignment equal to access size.
- */
- if (a_bits == s_bits) {
- tcg_out_ldst_rr(s, data, addr, index,
- qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
- return;
- }
+static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
+ MemOpIdx oi, TCGType data_type)
+{
+ static const int st_opc[(MO_SIZE | MO_BSWAP) + 1] = {
+ [MO_UB] = STB,
- /*
- * Test for at least natural alignment, and assume most accesses
- * will be aligned -- perform a straight store in the delay slot.
- * This is required to preserve atomicity for aligned accesses.
- */
- t_bits = MAX(a_bits, s_bits);
- tcg_debug_assert(t_bits < 13);
- tcg_out_arithi(s, TCG_REG_G0, addr, (1u << t_bits) - 1, ARITH_ANDCC);
+ [MO_BEUW] = STH,
+ [MO_BEUL] = STW,
+ [MO_BEUQ] = STX,
- /* beq,a,pt %icc, label */
- label_ptr = s->code_ptr;
- tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT | BPCC_ICC, 0);
- /* delay slot */
- tcg_out_ldst_rr(s, data, addr, index,
- qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
+ [MO_LEUW] = STH_LE,
+ [MO_LEUL] = STW_LE,
+ [MO_LEUQ] = STX_LE,
+ };
- if (a_bits >= s_bits) {
- /*
- * Overalignment: A successful alignment test will perform the memory
- * operation in the delay slot, and failure need only invoke the
- * handler for SIGBUS.
- */
- tcg_out_call_nodelay(s, qemu_unalign_st_trampoline, false);
- /* delay slot -- move to low part of argument reg */
- tcg_out_mov_delay(s, TCG_REG_O1, addr);
- } else {
- /* Underalignment: store by pieces of minimum alignment. */
- int st_opc, a_size, s_size, i;
+ TCGLabelQemuLdst *ldst;
+ HostAddress h;
- /*
- * Force full address into T1 early; avoids problems with
- * overlap between @addr and @data.
- */
- tcg_out_arith(s, TCG_REG_T1, addr, index, ARITH_ADD);
-
- a_size = 1 << a_bits;
- s_size = 1 << s_bits;
- if ((memop & MO_BSWAP) == MO_BE) {
- st_opc = qemu_st_opc[a_bits | MO_BE];
- for (i = 0; i < s_size; i += a_size) {
- TCGReg d = data;
- int shift = (s_size - a_size - i) * 8;
- if (shift) {
- d = TCG_REG_T2;
- tcg_out_arithi(s, d, data, shift, SHIFT_SRLX);
- }
- tcg_out_ldst(s, d, TCG_REG_T1, i, st_opc);
- }
- } else if (a_bits == 0) {
- tcg_out_ldst(s, data, TCG_REG_T1, 0, STB);
- for (i = 1; i < s_size; i++) {
- tcg_out_arithi(s, TCG_REG_T2, data, i * 8, SHIFT_SRLX);
- tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, STB);
- }
- } else {
- /* Note that ST*A with immediate asi must use indexed address. */
- st_opc = qemu_st_opc[a_bits + MO_LE];
- tcg_out_ldst_rr(s, data, TCG_REG_T1, TCG_REG_G0, st_opc);
- for (i = a_size; i < s_size; i += a_size) {
- tcg_out_arithi(s, TCG_REG_T2, data, i * 8, SHIFT_SRLX);
- tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, a_size, ARITH_ADD);
- tcg_out_ldst_rr(s, TCG_REG_T2, TCG_REG_T1, TCG_REG_G0, st_opc);
- }
- }
- }
+ ldst = prepare_host_addr(s, &h, addr, oi, false);
+
+ tcg_out_ldst_rr(s, data, h.base, h.index,
+ st_opc[get_memop(oi) & (MO_BSWAP | MO_SIZE)]);
- *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
-#endif /* CONFIG_SOFTMMU */
+ if (ldst) {
+ ldst->type = data_type;
+ ldst->datalo_reg = data;
+ ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
+ }
}
static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
{
if (check_fit_ptr(a0, 13)) {
tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
- tcg_out_movi_imm13(s, TCG_REG_O0, a0);
+ tcg_out_movi_s13(s, TCG_REG_O0, a0);
return;
} else {
intptr_t tb_diff = tcg_tbrel_diff(s, (void *)a0);
tcg_out_brcond_i32(s, a2, a0, a1, const_args[1], arg_label(args[3]));
break;
case INDEX_op_setcond_i32:
- tcg_out_setcond_i32(s, args[3], a0, a1, a2, c2);
+ tcg_out_setcond_i32(s, args[3], a0, a1, a2, c2, false);
+ break;
+ case INDEX_op_negsetcond_i32:
+ tcg_out_setcond_i32(s, args[3], a0, a1, a2, c2, true);
break;
case INDEX_op_movcond_i32:
tcg_out_movcond_i32(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX);
break;
- case INDEX_op_qemu_ld_i32:
- tcg_out_qemu_ld(s, a0, a1, a2, false);
+ case INDEX_op_qemu_ld_a32_i32:
+ case INDEX_op_qemu_ld_a64_i32:
+ tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32);
+ break;
+ case INDEX_op_qemu_ld_a32_i64:
+ case INDEX_op_qemu_ld_a64_i64:
+ tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64);
break;
- case INDEX_op_qemu_ld_i64:
- tcg_out_qemu_ld(s, a0, a1, a2, true);
+ case INDEX_op_qemu_st_a32_i32:
+ case INDEX_op_qemu_st_a64_i32:
+ tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32);
break;
- case INDEX_op_qemu_st_i32:
- case INDEX_op_qemu_st_i64:
- tcg_out_qemu_st(s, a0, a1, a2);
+ case INDEX_op_qemu_st_a32_i64:
+ case INDEX_op_qemu_st_a64_i64:
+ tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64);
break;
case INDEX_op_ld32s_i64:
case INDEX_op_divu_i64:
c = ARITH_UDIVX;
goto gen_arith;
- case INDEX_op_ext_i32_i64:
- case INDEX_op_ext32s_i64:
- tcg_out_arithi(s, a0, a1, 0, SHIFT_SRA);
- break;
- case INDEX_op_extu_i32_i64:
- case INDEX_op_ext32u_i64:
- tcg_out_arithi(s, a0, a1, 0, SHIFT_SRL);
- break;
- case INDEX_op_extrl_i64_i32:
- tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
- break;
- case INDEX_op_extrh_i64_i32:
- tcg_out_arithi(s, a0, a1, 32, SHIFT_SRLX);
- break;
case INDEX_op_brcond_i64:
tcg_out_brcond_i64(s, a2, a0, a1, const_args[1], arg_label(args[3]));
break;
case INDEX_op_setcond_i64:
- tcg_out_setcond_i64(s, args[3], a0, a1, a2, c2);
+ tcg_out_setcond_i64(s, args[3], a0, a1, a2, c2, false);
+ break;
+ case INDEX_op_negsetcond_i64:
+ tcg_out_setcond_i64(s, args[3], a0, a1, a2, c2, true);
break;
case INDEX_op_movcond_i64:
tcg_out_movcond_i64(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
case INDEX_op_ext8s_i64:
+ case INDEX_op_ext8u_i32:
+ case INDEX_op_ext8u_i64:
+ case INDEX_op_ext16s_i32:
+ case INDEX_op_ext16s_i64:
+ case INDEX_op_ext16u_i32:
+ case INDEX_op_ext16u_i64:
+ case INDEX_op_ext32s_i64:
+ case INDEX_op_ext32u_i64:
+ case INDEX_op_ext_i32_i64:
+ case INDEX_op_extu_i32_i64:
default:
g_assert_not_reached();
}
case INDEX_op_ext32u_i64:
case INDEX_op_ext_i32_i64:
case INDEX_op_extu_i32_i64:
- case INDEX_op_extrl_i64_i32:
- case INDEX_op_extrh_i64_i32:
+ case INDEX_op_qemu_ld_a32_i32:
+ case INDEX_op_qemu_ld_a64_i32:
+ case INDEX_op_qemu_ld_a32_i64:
+ case INDEX_op_qemu_ld_a64_i64:
return C_O1_I1(r, r);
case INDEX_op_st8_i32:
case INDEX_op_st_i32:
case INDEX_op_st32_i64:
case INDEX_op_st_i64:
+ case INDEX_op_qemu_st_a32_i32:
+ case INDEX_op_qemu_st_a64_i32:
+ case INDEX_op_qemu_st_a32_i64:
+ case INDEX_op_qemu_st_a64_i64:
return C_O0_I2(rZ, r);
case INDEX_op_add_i32:
case INDEX_op_sar_i64:
case INDEX_op_setcond_i32:
case INDEX_op_setcond_i64:
+ case INDEX_op_negsetcond_i32:
+ case INDEX_op_negsetcond_i64:
return C_O1_I2(r, rZ, rJ);
case INDEX_op_brcond_i32:
case INDEX_op_muluh_i64:
return C_O1_I2(r, r, r);
- case INDEX_op_qemu_ld_i32:
- case INDEX_op_qemu_ld_i64:
- return C_O1_I1(r, s);
- case INDEX_op_qemu_st_i32:
- case INDEX_op_qemu_st_i64:
- return C_O0_I2(sZ, s);
-
default:
g_assert_not_reached();
}
tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6); /* stack pointer */
tcg_regset_set_reg(s->reserved_regs, TCG_REG_T1); /* for internal use */
tcg_regset_set_reg(s->reserved_regs, TCG_REG_T2); /* for internal use */
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_T3); /* for internal use */
}
#define ELF_HOST_MACHINE EM_SPARCV9