*/
#include "qemu/osdep.h"
+#include "tcg/tcg.h"
+#include "tcg/tcg-ldst.h"
+#include <ffi.h>
-/* Enable TCI assertions only when debugging TCG (and without NDEBUG defined).
- * Without assertions, the interpreter runs much faster. */
+
+/*
+ * Enable TCI assertions only when debugging TCG (and without NDEBUG defined).
+ * Without assertions, the interpreter runs much faster.
+ */
#if defined(CONFIG_DEBUG_TCG)
# define tci_assert(cond) assert(cond)
#else
-# define tci_assert(cond) ((void)0)
-#endif
-
-#include "qemu-common.h"
-#include "tcg/tcg.h" /* MAX_OPC_PARAM_IARGS */
-#include "exec/cpu_ldst.h"
-#include "tcg/tcg-op.h"
-#include "qemu/compiler.h"
-
-#if MAX_OPC_PARAM_IARGS != 6
-# error Fix needed, number of supported input arguments changed!
-#endif
-#if TCG_TARGET_REG_BITS == 32
-typedef uint64_t (*helper_function)(tcg_target_ulong, tcg_target_ulong,
- tcg_target_ulong, tcg_target_ulong,
- tcg_target_ulong, tcg_target_ulong,
- tcg_target_ulong, tcg_target_ulong,
- tcg_target_ulong, tcg_target_ulong,
- tcg_target_ulong, tcg_target_ulong);
-#else
-typedef uint64_t (*helper_function)(tcg_target_ulong, tcg_target_ulong,
- tcg_target_ulong, tcg_target_ulong,
- tcg_target_ulong, tcg_target_ulong);
+# define tci_assert(cond) ((void)(cond))
#endif
__thread uintptr_t tci_tb_ptr;
-static tcg_target_ulong tci_read_reg(const tcg_target_ulong *regs, TCGReg index)
-{
- tci_assert(index < TCG_TARGET_NB_REGS);
- return regs[index];
-}
-
-static void
-tci_write_reg(tcg_target_ulong *regs, TCGReg index, tcg_target_ulong value)
-{
- tci_assert(index < TCG_TARGET_NB_REGS);
- tci_assert(index != TCG_AREG0);
- tci_assert(index != TCG_REG_CALL_STACK);
- regs[index] = value;
-}
-
-#if TCG_TARGET_REG_BITS == 32
static void tci_write_reg64(tcg_target_ulong *regs, uint32_t high_index,
uint32_t low_index, uint64_t value)
{
- tci_write_reg(regs, low_index, value);
- tci_write_reg(regs, high_index, value >> 32);
+ regs[low_index] = (uint32_t)value;
+ regs[high_index] = value >> 32;
}
-#endif
-#if TCG_TARGET_REG_BITS == 32
/* Create a 64 bit value from two 32 bit values. */
static uint64_t tci_uint64(uint32_t high, uint32_t low)
{
return ((uint64_t)high << 32) + low;
}
-#endif
-/* Read constant byte from bytecode. */
-static uint8_t tci_read_b(const uint8_t **tb_ptr)
+/*
+ * Load sets of arguments all at once. The naming convention is:
+ * tci_args_<arguments>
+ * where arguments is a sequence of
+ *
+ * b = immediate (bit position)
+ * c = condition (TCGCond)
+ * i = immediate (uint32_t)
+ * I = immediate (tcg_target_ulong)
+ * l = label or pointer
+ * m = immediate (MemOpIdx)
+ * n = immediate (call return length)
+ * r = register
+ * s = signed ldst offset
+ */
+
+static void tci_args_l(uint32_t insn, const void *tb_ptr, void **l0)
{
- return *(tb_ptr[0]++);
+ int diff = sextract32(insn, 12, 20);
+ *l0 = diff ? (void *)tb_ptr + diff : NULL;
}
-/* Read register number from bytecode. */
-static TCGReg tci_read_r(const uint8_t **tb_ptr)
+static void tci_args_r(uint32_t insn, TCGReg *r0)
{
- uint8_t regno = tci_read_b(tb_ptr);
- tci_assert(regno < TCG_TARGET_NB_REGS);
- return regno;
+ *r0 = extract32(insn, 8, 4);
}
-/* Read constant (native size) from bytecode. */
-static tcg_target_ulong tci_read_i(const uint8_t **tb_ptr)
+static void tci_args_nl(uint32_t insn, const void *tb_ptr,
+ uint8_t *n0, void **l1)
{
- tcg_target_ulong value = *(const tcg_target_ulong *)(*tb_ptr);
- *tb_ptr += sizeof(value);
- return value;
+ *n0 = extract32(insn, 8, 4);
+ *l1 = sextract32(insn, 12, 20) + (void *)tb_ptr;
}
-/* Read unsigned constant (32 bit) from bytecode. */
-static uint32_t tci_read_i32(const uint8_t **tb_ptr)
+static void tci_args_rl(uint32_t insn, const void *tb_ptr,
+ TCGReg *r0, void **l1)
{
- uint32_t value = *(const uint32_t *)(*tb_ptr);
- *tb_ptr += sizeof(value);
- return value;
+ *r0 = extract32(insn, 8, 4);
+ *l1 = sextract32(insn, 12, 20) + (void *)tb_ptr;
}
-/* Read signed constant (32 bit) from bytecode. */
-static int32_t tci_read_s32(const uint8_t **tb_ptr)
+static void tci_args_rr(uint32_t insn, TCGReg *r0, TCGReg *r1)
{
- int32_t value = *(const int32_t *)(*tb_ptr);
- *tb_ptr += sizeof(value);
- return value;
+ *r0 = extract32(insn, 8, 4);
+ *r1 = extract32(insn, 12, 4);
}
-#if TCG_TARGET_REG_BITS == 64
-/* Read constant (64 bit) from bytecode. */
-static uint64_t tci_read_i64(const uint8_t **tb_ptr)
+static void tci_args_ri(uint32_t insn, TCGReg *r0, tcg_target_ulong *i1)
{
- uint64_t value = *(const uint64_t *)(*tb_ptr);
- *tb_ptr += sizeof(value);
- return value;
+ *r0 = extract32(insn, 8, 4);
+ *i1 = sextract32(insn, 12, 20);
}
-#endif
-/* Read indexed register (native size) from bytecode. */
-static tcg_target_ulong
-tci_read_rval(const tcg_target_ulong *regs, const uint8_t **tb_ptr)
+static void tci_args_rrm(uint32_t insn, TCGReg *r0,
+ TCGReg *r1, MemOpIdx *m2)
{
- tcg_target_ulong value = tci_read_reg(regs, **tb_ptr);
- *tb_ptr += 1;
- return value;
+ *r0 = extract32(insn, 8, 4);
+ *r1 = extract32(insn, 12, 4);
+ *m2 = extract32(insn, 16, 16);
}
-#if TCG_TARGET_REG_BITS == 32
-/* Read two indexed registers (2 * 32 bit) from bytecode. */
-static uint64_t tci_read_r64(const tcg_target_ulong *regs,
- const uint8_t **tb_ptr)
+static void tci_args_rrr(uint32_t insn, TCGReg *r0, TCGReg *r1, TCGReg *r2)
{
- uint32_t low = tci_read_rval(regs, tb_ptr);
- return tci_uint64(tci_read_rval(regs, tb_ptr), low);
+ *r0 = extract32(insn, 8, 4);
+ *r1 = extract32(insn, 12, 4);
+ *r2 = extract32(insn, 16, 4);
}
-#elif TCG_TARGET_REG_BITS == 64
-/* Read indexed register (64 bit) from bytecode. */
-static uint64_t tci_read_r64(const tcg_target_ulong *regs,
- const uint8_t **tb_ptr)
+
+static void tci_args_rrs(uint32_t insn, TCGReg *r0, TCGReg *r1, int32_t *i2)
{
- return tci_read_rval(regs, tb_ptr);
+ *r0 = extract32(insn, 8, 4);
+ *r1 = extract32(insn, 12, 4);
+ *i2 = sextract32(insn, 16, 16);
}
-#endif
-/* Read indexed register(s) with target address from bytecode. */
-static target_ulong
-tci_read_ulong(const tcg_target_ulong *regs, const uint8_t **tb_ptr)
+static void tci_args_rrbb(uint32_t insn, TCGReg *r0, TCGReg *r1,
+ uint8_t *i2, uint8_t *i3)
{
- target_ulong taddr = tci_read_rval(regs, tb_ptr);
-#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
- taddr += (uint64_t)tci_read_rval(regs, tb_ptr) << 32;
-#endif
- return taddr;
+ *r0 = extract32(insn, 8, 4);
+ *r1 = extract32(insn, 12, 4);
+ *i2 = extract32(insn, 16, 6);
+ *i3 = extract32(insn, 22, 6);
}
-static tcg_target_ulong tci_read_label(const uint8_t **tb_ptr)
+static void tci_args_rrrc(uint32_t insn,
+ TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGCond *c3)
{
- tcg_target_ulong label = tci_read_i(tb_ptr);
- tci_assert(label != 0);
- return label;
+ *r0 = extract32(insn, 8, 4);
+ *r1 = extract32(insn, 12, 4);
+ *r2 = extract32(insn, 16, 4);
+ *c3 = extract32(insn, 20, 4);
}
-/*
- * Load sets of arguments all at once. The naming convention is:
- * tci_args_<arguments>
- * where arguments is a sequence of
- *
- * c = condition (TCGCond)
- * l = label or pointer
- * r = register
- * s = signed ldst offset
- */
-
-static void tci_args_l(const uint8_t **tb_ptr, void **l0)
+static void tci_args_rrrbb(uint32_t insn, TCGReg *r0, TCGReg *r1,
+ TCGReg *r2, uint8_t *i3, uint8_t *i4)
{
- *l0 = (void *)tci_read_label(tb_ptr);
+ *r0 = extract32(insn, 8, 4);
+ *r1 = extract32(insn, 12, 4);
+ *r2 = extract32(insn, 16, 4);
+ *i3 = extract32(insn, 20, 6);
+ *i4 = extract32(insn, 26, 6);
}
-static void tci_args_rr(const uint8_t **tb_ptr,
- TCGReg *r0, TCGReg *r1)
+static void tci_args_rrrrr(uint32_t insn, TCGReg *r0, TCGReg *r1,
+ TCGReg *r2, TCGReg *r3, TCGReg *r4)
{
- *r0 = tci_read_r(tb_ptr);
- *r1 = tci_read_r(tb_ptr);
+ *r0 = extract32(insn, 8, 4);
+ *r1 = extract32(insn, 12, 4);
+ *r2 = extract32(insn, 16, 4);
+ *r3 = extract32(insn, 20, 4);
+ *r4 = extract32(insn, 24, 4);
}
-static void tci_args_rrr(const uint8_t **tb_ptr,
- TCGReg *r0, TCGReg *r1, TCGReg *r2)
+static void tci_args_rrrr(uint32_t insn,
+ TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGReg *r3)
{
- *r0 = tci_read_r(tb_ptr);
- *r1 = tci_read_r(tb_ptr);
- *r2 = tci_read_r(tb_ptr);
+ *r0 = extract32(insn, 8, 4);
+ *r1 = extract32(insn, 12, 4);
+ *r2 = extract32(insn, 16, 4);
+ *r3 = extract32(insn, 20, 4);
}
-static void tci_args_rrs(const uint8_t **tb_ptr,
- TCGReg *r0, TCGReg *r1, int32_t *i2)
+static void tci_args_rrrrrc(uint32_t insn, TCGReg *r0, TCGReg *r1,
+ TCGReg *r2, TCGReg *r3, TCGReg *r4, TCGCond *c5)
{
- *r0 = tci_read_r(tb_ptr);
- *r1 = tci_read_r(tb_ptr);
- *i2 = tci_read_s32(tb_ptr);
+ *r0 = extract32(insn, 8, 4);
+ *r1 = extract32(insn, 12, 4);
+ *r2 = extract32(insn, 16, 4);
+ *r3 = extract32(insn, 20, 4);
+ *r4 = extract32(insn, 24, 4);
+ *c5 = extract32(insn, 28, 4);
}
-static void tci_args_rrrc(const uint8_t **tb_ptr,
- TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGCond *c3)
+static void tci_args_rrrrrr(uint32_t insn, TCGReg *r0, TCGReg *r1,
+ TCGReg *r2, TCGReg *r3, TCGReg *r4, TCGReg *r5)
{
- *r0 = tci_read_r(tb_ptr);
- *r1 = tci_read_r(tb_ptr);
- *r2 = tci_read_r(tb_ptr);
- *c3 = tci_read_b(tb_ptr);
+ *r0 = extract32(insn, 8, 4);
+ *r1 = extract32(insn, 12, 4);
+ *r2 = extract32(insn, 16, 4);
+ *r3 = extract32(insn, 20, 4);
+ *r4 = extract32(insn, 24, 4);
+ *r5 = extract32(insn, 28, 4);
}
static bool tci_compare32(uint32_t u0, uint32_t u1, TCGCond condition)
return result;
}
-#define qemu_ld_ub \
- cpu_ldub_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
-#define qemu_ld_leuw \
- cpu_lduw_le_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
-#define qemu_ld_leul \
- cpu_ldl_le_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
-#define qemu_ld_leq \
- cpu_ldq_le_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
-#define qemu_ld_beuw \
- cpu_lduw_be_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
-#define qemu_ld_beul \
- cpu_ldl_be_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
-#define qemu_ld_beq \
- cpu_ldq_be_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
-#define qemu_st_b(X) \
- cpu_stb_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
-#define qemu_st_lew(X) \
- cpu_stw_le_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
-#define qemu_st_lel(X) \
- cpu_stl_le_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
-#define qemu_st_leq(X) \
- cpu_stq_le_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
-#define qemu_st_bew(X) \
- cpu_stw_be_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
-#define qemu_st_bel(X) \
- cpu_stl_be_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
-#define qemu_st_beq(X) \
- cpu_stq_be_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
+static uint64_t tci_qemu_ld(CPUArchState *env, uint64_t taddr,
+ MemOpIdx oi, const void *tb_ptr)
+{
+ MemOp mop = get_memop(oi);
+ uintptr_t ra = (uintptr_t)tb_ptr;
+
+ switch (mop & MO_SSIZE) {
+ case MO_UB:
+ return helper_ldub_mmu(env, taddr, oi, ra);
+ case MO_SB:
+ return helper_ldsb_mmu(env, taddr, oi, ra);
+ case MO_UW:
+ return helper_lduw_mmu(env, taddr, oi, ra);
+ case MO_SW:
+ return helper_ldsw_mmu(env, taddr, oi, ra);
+ case MO_UL:
+ return helper_ldul_mmu(env, taddr, oi, ra);
+ case MO_SL:
+ return helper_ldsl_mmu(env, taddr, oi, ra);
+ case MO_UQ:
+ return helper_ldq_mmu(env, taddr, oi, ra);
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void tci_qemu_st(CPUArchState *env, uint64_t taddr, uint64_t val,
+ MemOpIdx oi, const void *tb_ptr)
+{
+ MemOp mop = get_memop(oi);
+ uintptr_t ra = (uintptr_t)tb_ptr;
+
+ switch (mop & MO_SIZE) {
+ case MO_UB:
+ helper_stb_mmu(env, taddr, val, oi, ra);
+ break;
+ case MO_UW:
+ helper_stw_mmu(env, taddr, val, oi, ra);
+ break;
+ case MO_UL:
+ helper_stl_mmu(env, taddr, val, oi, ra);
+ break;
+ case MO_UQ:
+ helper_stq_mmu(env, taddr, val, oi, ra);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
#if TCG_TARGET_REG_BITS == 64
# define CASE_32_64(x) \
uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
const void *v_tb_ptr)
{
- const uint8_t *tb_ptr = v_tb_ptr;
+ const uint32_t *tb_ptr = v_tb_ptr;
tcg_target_ulong regs[TCG_TARGET_NB_REGS];
- long tcg_temps[CPU_TEMP_BUF_NLONGS];
- uintptr_t sp_value = (uintptr_t)(tcg_temps + CPU_TEMP_BUF_NLONGS);
- uintptr_t ret = 0;
+ uint64_t stack[(TCG_STATIC_CALL_ARGS_SIZE + TCG_STATIC_FRAME_SIZE)
+ / sizeof(uint64_t)];
regs[TCG_AREG0] = (tcg_target_ulong)env;
- regs[TCG_REG_CALL_STACK] = sp_value;
+ regs[TCG_REG_CALL_STACK] = (uintptr_t)stack;
tci_assert(tb_ptr);
for (;;) {
- TCGOpcode opc = tb_ptr[0];
-#if defined(CONFIG_DEBUG_TCG) && !defined(NDEBUG)
- uint8_t op_size = tb_ptr[1];
- const uint8_t *old_code_ptr = tb_ptr;
-#endif
- TCGReg r0, r1, r2;
- tcg_target_ulong t0;
+ uint32_t insn;
+ TCGOpcode opc;
+ TCGReg r0, r1, r2, r3, r4, r5;
tcg_target_ulong t1;
- tcg_target_ulong t2;
- tcg_target_ulong label;
TCGCond condition;
- target_ulong taddr;
- uint8_t tmp8;
- uint16_t tmp16;
+ uint8_t pos, len;
uint32_t tmp32;
- uint64_t tmp64;
-#if TCG_TARGET_REG_BITS == 32
- uint64_t v64;
-#endif
- TCGMemOpIdx oi;
+ uint64_t tmp64, taddr;
+ uint64_t T1, T2;
+ MemOpIdx oi;
int32_t ofs;
void *ptr;
- /* Skip opcode and size entry. */
- tb_ptr += 2;
+ insn = *tb_ptr++;
+ opc = extract32(insn, 0, 8);
switch (opc) {
case INDEX_op_call:
- t0 = tci_read_i(&tb_ptr);
- tci_tb_ptr = (uintptr_t)tb_ptr;
-#if TCG_TARGET_REG_BITS == 32
- tmp64 = ((helper_function)t0)(tci_read_reg(regs, TCG_REG_R0),
- tci_read_reg(regs, TCG_REG_R1),
- tci_read_reg(regs, TCG_REG_R2),
- tci_read_reg(regs, TCG_REG_R3),
- tci_read_reg(regs, TCG_REG_R4),
- tci_read_reg(regs, TCG_REG_R5),
- tci_read_reg(regs, TCG_REG_R6),
- tci_read_reg(regs, TCG_REG_R7),
- tci_read_reg(regs, TCG_REG_R8),
- tci_read_reg(regs, TCG_REG_R9),
- tci_read_reg(regs, TCG_REG_R10),
- tci_read_reg(regs, TCG_REG_R11));
- tci_write_reg(regs, TCG_REG_R0, tmp64);
- tci_write_reg(regs, TCG_REG_R1, tmp64 >> 32);
-#else
- tmp64 = ((helper_function)t0)(tci_read_reg(regs, TCG_REG_R0),
- tci_read_reg(regs, TCG_REG_R1),
- tci_read_reg(regs, TCG_REG_R2),
- tci_read_reg(regs, TCG_REG_R3),
- tci_read_reg(regs, TCG_REG_R4),
- tci_read_reg(regs, TCG_REG_R5));
- tci_write_reg(regs, TCG_REG_R0, tmp64);
-#endif
+ {
+ void *call_slots[MAX_CALL_IARGS];
+ ffi_cif *cif;
+ void *func;
+ unsigned i, s, n;
+
+ tci_args_nl(insn, tb_ptr, &len, &ptr);
+ func = ((void **)ptr)[0];
+ cif = ((void **)ptr)[1];
+
+ n = cif->nargs;
+ for (i = s = 0; i < n; ++i) {
+ ffi_type *t = cif->arg_types[i];
+ call_slots[i] = &stack[s];
+ s += DIV_ROUND_UP(t->size, 8);
+ }
+
+ /* Helper functions may need to access the "return address" */
+ tci_tb_ptr = (uintptr_t)tb_ptr;
+ ffi_call(cif, func, stack, call_slots);
+ }
+
+ switch (len) {
+ case 0: /* void */
+ break;
+ case 1: /* uint32_t */
+ /*
+ * The result winds up "left-aligned" in the stack[0] slot.
+ * Note that libffi has an odd special case in that it will
+ * always widen an integral result to ffi_arg.
+ */
+ if (sizeof(ffi_arg) == 8) {
+ regs[TCG_REG_R0] = (uint32_t)stack[0];
+ } else {
+ regs[TCG_REG_R0] = *(uint32_t *)stack;
+ }
+ break;
+ case 2: /* uint64_t */
+ /*
+ * For TCG_TARGET_REG_BITS == 32, the register pair
+ * must stay in host memory order.
+ */
+ memcpy(®s[TCG_REG_R0], stack, 8);
+ break;
+ case 3: /* Int128 */
+ memcpy(®s[TCG_REG_R0], stack, 16);
+ break;
+ default:
+ g_assert_not_reached();
+ }
break;
+
case INDEX_op_br:
- tci_args_l(&tb_ptr, &ptr);
- tci_assert(tb_ptr == old_code_ptr + op_size);
+ tci_args_l(insn, tb_ptr, &ptr);
tb_ptr = ptr;
continue;
case INDEX_op_setcond_i32:
- tci_args_rrrc(&tb_ptr, &r0, &r1, &r2, &condition);
+ tci_args_rrrc(insn, &r0, &r1, &r2, &condition);
regs[r0] = tci_compare32(regs[r1], regs[r2], condition);
break;
+ case INDEX_op_movcond_i32:
+ tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition);
+ tmp32 = tci_compare32(regs[r1], regs[r2], condition);
+ regs[r0] = regs[tmp32 ? r3 : r4];
+ break;
#if TCG_TARGET_REG_BITS == 32
case INDEX_op_setcond2_i32:
- t0 = *tb_ptr++;
- tmp64 = tci_read_r64(regs, &tb_ptr);
- v64 = tci_read_r64(regs, &tb_ptr);
- condition = *tb_ptr++;
- tci_write_reg(regs, t0, tci_compare64(tmp64, v64, condition));
+ tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition);
+ T1 = tci_uint64(regs[r2], regs[r1]);
+ T2 = tci_uint64(regs[r4], regs[r3]);
+ regs[r0] = tci_compare64(T1, T2, condition);
break;
#elif TCG_TARGET_REG_BITS == 64
case INDEX_op_setcond_i64:
- tci_args_rrrc(&tb_ptr, &r0, &r1, &r2, &condition);
+ tci_args_rrrc(insn, &r0, &r1, &r2, &condition);
regs[r0] = tci_compare64(regs[r1], regs[r2], condition);
break;
+ case INDEX_op_movcond_i64:
+ tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition);
+ tmp32 = tci_compare64(regs[r1], regs[r2], condition);
+ regs[r0] = regs[tmp32 ? r3 : r4];
+ break;
#endif
CASE_32_64(mov)
- tci_args_rr(&tb_ptr, &r0, &r1);
+ tci_args_rr(insn, &r0, &r1);
regs[r0] = regs[r1];
break;
- case INDEX_op_tci_movi_i32:
- t0 = *tb_ptr++;
- t1 = tci_read_i32(&tb_ptr);
- tci_write_reg(regs, t0, t1);
+ case INDEX_op_tci_movi:
+ tci_args_ri(insn, &r0, &t1);
+ regs[r0] = t1;
+ break;
+ case INDEX_op_tci_movl:
+ tci_args_rl(insn, tb_ptr, &r0, &ptr);
+ regs[r0] = *(tcg_target_ulong *)ptr;
break;
/* Load/store operations (32 bit). */
CASE_32_64(ld8u)
- tci_args_rrs(&tb_ptr, &r0, &r1, &ofs);
+ tci_args_rrs(insn, &r0, &r1, &ofs);
ptr = (void *)(regs[r1] + ofs);
regs[r0] = *(uint8_t *)ptr;
break;
CASE_32_64(ld8s)
- tci_args_rrs(&tb_ptr, &r0, &r1, &ofs);
+ tci_args_rrs(insn, &r0, &r1, &ofs);
ptr = (void *)(regs[r1] + ofs);
regs[r0] = *(int8_t *)ptr;
break;
CASE_32_64(ld16u)
- tci_args_rrs(&tb_ptr, &r0, &r1, &ofs);
+ tci_args_rrs(insn, &r0, &r1, &ofs);
ptr = (void *)(regs[r1] + ofs);
regs[r0] = *(uint16_t *)ptr;
break;
CASE_32_64(ld16s)
- tci_args_rrs(&tb_ptr, &r0, &r1, &ofs);
+ tci_args_rrs(insn, &r0, &r1, &ofs);
ptr = (void *)(regs[r1] + ofs);
regs[r0] = *(int16_t *)ptr;
break;
case INDEX_op_ld_i32:
CASE_64(ld32u)
- tci_args_rrs(&tb_ptr, &r0, &r1, &ofs);
+ tci_args_rrs(insn, &r0, &r1, &ofs);
ptr = (void *)(regs[r1] + ofs);
regs[r0] = *(uint32_t *)ptr;
break;
CASE_32_64(st8)
- tci_args_rrs(&tb_ptr, &r0, &r1, &ofs);
+ tci_args_rrs(insn, &r0, &r1, &ofs);
ptr = (void *)(regs[r1] + ofs);
*(uint8_t *)ptr = regs[r0];
break;
CASE_32_64(st16)
- tci_args_rrs(&tb_ptr, &r0, &r1, &ofs);
+ tci_args_rrs(insn, &r0, &r1, &ofs);
ptr = (void *)(regs[r1] + ofs);
*(uint16_t *)ptr = regs[r0];
break;
case INDEX_op_st_i32:
CASE_64(st32)
- tci_args_rrs(&tb_ptr, &r0, &r1, &ofs);
+ tci_args_rrs(insn, &r0, &r1, &ofs);
ptr = (void *)(regs[r1] + ofs);
*(uint32_t *)ptr = regs[r0];
break;
/* Arithmetic operations (mixed 32/64 bit). */
CASE_32_64(add)
- tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
+ tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = regs[r1] + regs[r2];
break;
CASE_32_64(sub)
- tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
+ tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = regs[r1] - regs[r2];
break;
CASE_32_64(mul)
- tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
+ tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = regs[r1] * regs[r2];
break;
CASE_32_64(and)
- tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
+ tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = regs[r1] & regs[r2];
break;
CASE_32_64(or)
- tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
+ tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = regs[r1] | regs[r2];
break;
CASE_32_64(xor)
- tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
+ tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = regs[r1] ^ regs[r2];
break;
+#if TCG_TARGET_HAS_andc_i32 || TCG_TARGET_HAS_andc_i64
+ CASE_32_64(andc)
+ tci_args_rrr(insn, &r0, &r1, &r2);
+ regs[r0] = regs[r1] & ~regs[r2];
+ break;
+#endif
+#if TCG_TARGET_HAS_orc_i32 || TCG_TARGET_HAS_orc_i64
+ CASE_32_64(orc)
+ tci_args_rrr(insn, &r0, &r1, &r2);
+ regs[r0] = regs[r1] | ~regs[r2];
+ break;
+#endif
+#if TCG_TARGET_HAS_eqv_i32 || TCG_TARGET_HAS_eqv_i64
+ CASE_32_64(eqv)
+ tci_args_rrr(insn, &r0, &r1, &r2);
+ regs[r0] = ~(regs[r1] ^ regs[r2]);
+ break;
+#endif
+#if TCG_TARGET_HAS_nand_i32 || TCG_TARGET_HAS_nand_i64
+ CASE_32_64(nand)
+ tci_args_rrr(insn, &r0, &r1, &r2);
+ regs[r0] = ~(regs[r1] & regs[r2]);
+ break;
+#endif
+#if TCG_TARGET_HAS_nor_i32 || TCG_TARGET_HAS_nor_i64
+ CASE_32_64(nor)
+ tci_args_rrr(insn, &r0, &r1, &r2);
+ regs[r0] = ~(regs[r1] | regs[r2]);
+ break;
+#endif
/* Arithmetic operations (32 bit). */
case INDEX_op_div_i32:
- tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
+ tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = (int32_t)regs[r1] / (int32_t)regs[r2];
break;
case INDEX_op_divu_i32:
- tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
+ tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = (uint32_t)regs[r1] / (uint32_t)regs[r2];
break;
case INDEX_op_rem_i32:
- tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
+ tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = (int32_t)regs[r1] % (int32_t)regs[r2];
break;
case INDEX_op_remu_i32:
- tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
+ tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = (uint32_t)regs[r1] % (uint32_t)regs[r2];
break;
+#if TCG_TARGET_HAS_clz_i32
+ case INDEX_op_clz_i32:
+ tci_args_rrr(insn, &r0, &r1, &r2);
+ tmp32 = regs[r1];
+ regs[r0] = tmp32 ? clz32(tmp32) : regs[r2];
+ break;
+#endif
+#if TCG_TARGET_HAS_ctz_i32
+ case INDEX_op_ctz_i32:
+ tci_args_rrr(insn, &r0, &r1, &r2);
+ tmp32 = regs[r1];
+ regs[r0] = tmp32 ? ctz32(tmp32) : regs[r2];
+ break;
+#endif
+#if TCG_TARGET_HAS_ctpop_i32
+ case INDEX_op_ctpop_i32:
+ tci_args_rr(insn, &r0, &r1);
+ regs[r0] = ctpop32(regs[r1]);
+ break;
+#endif
/* Shift/rotate operations (32 bit). */
case INDEX_op_shl_i32:
- tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
+ tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = (uint32_t)regs[r1] << (regs[r2] & 31);
break;
case INDEX_op_shr_i32:
- tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
+ tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = (uint32_t)regs[r1] >> (regs[r2] & 31);
break;
case INDEX_op_sar_i32:
- tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
+ tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = (int32_t)regs[r1] >> (regs[r2] & 31);
break;
#if TCG_TARGET_HAS_rot_i32
case INDEX_op_rotl_i32:
- tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
+ tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = rol32(regs[r1], regs[r2] & 31);
break;
case INDEX_op_rotr_i32:
- tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
+ tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = ror32(regs[r1], regs[r2] & 31);
break;
#endif
#if TCG_TARGET_HAS_deposit_i32
case INDEX_op_deposit_i32:
- t0 = *tb_ptr++;
- t1 = tci_read_rval(regs, &tb_ptr);
- t2 = tci_read_rval(regs, &tb_ptr);
- tmp16 = *tb_ptr++;
- tmp8 = *tb_ptr++;
- tmp32 = (((1 << tmp8) - 1) << tmp16);
- tci_write_reg(regs, t0, (t1 & ~tmp32) | ((t2 << tmp16) & tmp32));
+ tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len);
+ regs[r0] = deposit32(regs[r1], pos, len, regs[r2]);
+ break;
+#endif
+#if TCG_TARGET_HAS_extract_i32
+ case INDEX_op_extract_i32:
+ tci_args_rrbb(insn, &r0, &r1, &pos, &len);
+ regs[r0] = extract32(regs[r1], pos, len);
+ break;
+#endif
+#if TCG_TARGET_HAS_sextract_i32
+ case INDEX_op_sextract_i32:
+ tci_args_rrbb(insn, &r0, &r1, &pos, &len);
+ regs[r0] = sextract32(regs[r1], pos, len);
break;
#endif
case INDEX_op_brcond_i32:
- t0 = tci_read_rval(regs, &tb_ptr);
- t1 = tci_read_rval(regs, &tb_ptr);
- condition = *tb_ptr++;
- label = tci_read_label(&tb_ptr);
- if (tci_compare32(t0, t1, condition)) {
- tci_assert(tb_ptr == old_code_ptr + op_size);
- tb_ptr = (uint8_t *)label;
- continue;
+ tci_args_rl(insn, tb_ptr, &r0, &ptr);
+ if ((uint32_t)regs[r0]) {
+ tb_ptr = ptr;
}
break;
-#if TCG_TARGET_REG_BITS == 32
+#if TCG_TARGET_REG_BITS == 32 || TCG_TARGET_HAS_add2_i32
case INDEX_op_add2_i32:
- t0 = *tb_ptr++;
- t1 = *tb_ptr++;
- tmp64 = tci_read_r64(regs, &tb_ptr);
- tmp64 += tci_read_r64(regs, &tb_ptr);
- tci_write_reg64(regs, t1, t0, tmp64);
+ tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
+ T1 = tci_uint64(regs[r3], regs[r2]);
+ T2 = tci_uint64(regs[r5], regs[r4]);
+ tci_write_reg64(regs, r1, r0, T1 + T2);
break;
+#endif
+#if TCG_TARGET_REG_BITS == 32 || TCG_TARGET_HAS_sub2_i32
case INDEX_op_sub2_i32:
- t0 = *tb_ptr++;
- t1 = *tb_ptr++;
- tmp64 = tci_read_r64(regs, &tb_ptr);
- tmp64 -= tci_read_r64(regs, &tb_ptr);
- tci_write_reg64(regs, t1, t0, tmp64);
- break;
- case INDEX_op_brcond2_i32:
- tmp64 = tci_read_r64(regs, &tb_ptr);
- v64 = tci_read_r64(regs, &tb_ptr);
- condition = *tb_ptr++;
- label = tci_read_label(&tb_ptr);
- if (tci_compare64(tmp64, v64, condition)) {
- tci_assert(tb_ptr == old_code_ptr + op_size);
- tb_ptr = (uint8_t *)label;
- continue;
- }
+ tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
+ T1 = tci_uint64(regs[r3], regs[r2]);
+ T2 = tci_uint64(regs[r5], regs[r4]);
+ tci_write_reg64(regs, r1, r0, T1 - T2);
break;
+#endif
+#if TCG_TARGET_HAS_mulu2_i32
case INDEX_op_mulu2_i32:
- t0 = *tb_ptr++;
- t1 = *tb_ptr++;
- t2 = tci_read_rval(regs, &tb_ptr);
- tmp64 = (uint32_t)tci_read_rval(regs, &tb_ptr);
- tci_write_reg64(regs, t1, t0, (uint32_t)t2 * tmp64);
+ tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
+ tmp64 = (uint64_t)(uint32_t)regs[r2] * (uint32_t)regs[r3];
+ tci_write_reg64(regs, r1, r0, tmp64);
+ break;
+#endif
+#if TCG_TARGET_HAS_muls2_i32
+ case INDEX_op_muls2_i32:
+ tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
+ tmp64 = (int64_t)(int32_t)regs[r2] * (int32_t)regs[r3];
+ tci_write_reg64(regs, r1, r0, tmp64);
break;
-#endif /* TCG_TARGET_REG_BITS == 32 */
+#endif
#if TCG_TARGET_HAS_ext8s_i32 || TCG_TARGET_HAS_ext8s_i64
CASE_32_64(ext8s)
- tci_args_rr(&tb_ptr, &r0, &r1);
+ tci_args_rr(insn, &r0, &r1);
regs[r0] = (int8_t)regs[r1];
break;
#endif
-#if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64
+#if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64 || \
+ TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64
CASE_32_64(ext16s)
- tci_args_rr(&tb_ptr, &r0, &r1);
+ tci_args_rr(insn, &r0, &r1);
regs[r0] = (int16_t)regs[r1];
break;
#endif
#if TCG_TARGET_HAS_ext8u_i32 || TCG_TARGET_HAS_ext8u_i64
CASE_32_64(ext8u)
- tci_args_rr(&tb_ptr, &r0, &r1);
+ tci_args_rr(insn, &r0, &r1);
regs[r0] = (uint8_t)regs[r1];
break;
#endif
#if TCG_TARGET_HAS_ext16u_i32 || TCG_TARGET_HAS_ext16u_i64
CASE_32_64(ext16u)
- tci_args_rr(&tb_ptr, &r0, &r1);
+ tci_args_rr(insn, &r0, &r1);
regs[r0] = (uint16_t)regs[r1];
break;
#endif
#if TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64
CASE_32_64(bswap16)
- tci_args_rr(&tb_ptr, &r0, &r1);
+ tci_args_rr(insn, &r0, &r1);
regs[r0] = bswap16(regs[r1]);
break;
#endif
#if TCG_TARGET_HAS_bswap32_i32 || TCG_TARGET_HAS_bswap32_i64
CASE_32_64(bswap32)
- tci_args_rr(&tb_ptr, &r0, &r1);
+ tci_args_rr(insn, &r0, &r1);
regs[r0] = bswap32(regs[r1]);
break;
#endif
#if TCG_TARGET_HAS_not_i32 || TCG_TARGET_HAS_not_i64
CASE_32_64(not)
- tci_args_rr(&tb_ptr, &r0, &r1);
+ tci_args_rr(insn, &r0, &r1);
regs[r0] = ~regs[r1];
break;
#endif
-#if TCG_TARGET_HAS_neg_i32 || TCG_TARGET_HAS_neg_i64
CASE_32_64(neg)
- tci_args_rr(&tb_ptr, &r0, &r1);
+ tci_args_rr(insn, &r0, &r1);
regs[r0] = -regs[r1];
break;
-#endif
#if TCG_TARGET_REG_BITS == 64
- case INDEX_op_tci_movi_i64:
- t0 = *tb_ptr++;
- t1 = tci_read_i64(&tb_ptr);
- tci_write_reg(regs, t0, t1);
- break;
-
/* Load/store operations (64 bit). */
case INDEX_op_ld32s_i64:
- tci_args_rrs(&tb_ptr, &r0, &r1, &ofs);
+ tci_args_rrs(insn, &r0, &r1, &ofs);
ptr = (void *)(regs[r1] + ofs);
regs[r0] = *(int32_t *)ptr;
break;
case INDEX_op_ld_i64:
- tci_args_rrs(&tb_ptr, &r0, &r1, &ofs);
+ tci_args_rrs(insn, &r0, &r1, &ofs);
ptr = (void *)(regs[r1] + ofs);
regs[r0] = *(uint64_t *)ptr;
break;
case INDEX_op_st_i64:
- tci_args_rrs(&tb_ptr, &r0, &r1, &ofs);
+ tci_args_rrs(insn, &r0, &r1, &ofs);
ptr = (void *)(regs[r1] + ofs);
*(uint64_t *)ptr = regs[r0];
break;
/* Arithmetic operations (64 bit). */
case INDEX_op_div_i64:
- tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
+ tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = (int64_t)regs[r1] / (int64_t)regs[r2];
break;
case INDEX_op_divu_i64:
- tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
+ tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = (uint64_t)regs[r1] / (uint64_t)regs[r2];
break;
case INDEX_op_rem_i64:
- tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
+ tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = (int64_t)regs[r1] % (int64_t)regs[r2];
break;
case INDEX_op_remu_i64:
- tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
+ tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = (uint64_t)regs[r1] % (uint64_t)regs[r2];
break;
+#if TCG_TARGET_HAS_clz_i64
+ case INDEX_op_clz_i64:
+ tci_args_rrr(insn, &r0, &r1, &r2);
+ regs[r0] = regs[r1] ? clz64(regs[r1]) : regs[r2];
+ break;
+#endif
+#if TCG_TARGET_HAS_ctz_i64
+ case INDEX_op_ctz_i64:
+ tci_args_rrr(insn, &r0, &r1, &r2);
+ regs[r0] = regs[r1] ? ctz64(regs[r1]) : regs[r2];
+ break;
+#endif
+#if TCG_TARGET_HAS_ctpop_i64
+ case INDEX_op_ctpop_i64:
+ tci_args_rr(insn, &r0, &r1);
+ regs[r0] = ctpop64(regs[r1]);
+ break;
+#endif
+#if TCG_TARGET_HAS_mulu2_i64
+ case INDEX_op_mulu2_i64:
+ tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
+ mulu64(®s[r0], ®s[r1], regs[r2], regs[r3]);
+ break;
+#endif
+#if TCG_TARGET_HAS_muls2_i64
+ case INDEX_op_muls2_i64:
+ tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
+ muls64(®s[r0], ®s[r1], regs[r2], regs[r3]);
+ break;
+#endif
+#if TCG_TARGET_HAS_add2_i64
+ case INDEX_op_add2_i64:
+ tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
+ T1 = regs[r2] + regs[r4];
+ T2 = regs[r3] + regs[r5] + (T1 < regs[r2]);
+ regs[r0] = T1;
+ regs[r1] = T2;
+ break;
+#endif
+#if TCG_TARGET_HAS_add2_i64
+ case INDEX_op_sub2_i64:
+ tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
+ T1 = regs[r2] - regs[r4];
+ T2 = regs[r3] - regs[r5] - (regs[r2] < regs[r4]);
+ regs[r0] = T1;
+ regs[r1] = T2;
+ break;
+#endif
/* Shift/rotate operations (64 bit). */
case INDEX_op_shl_i64:
- tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
+ tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = regs[r1] << (regs[r2] & 63);
break;
case INDEX_op_shr_i64:
- tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
+ tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = regs[r1] >> (regs[r2] & 63);
break;
case INDEX_op_sar_i64:
- tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
+ tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = (int64_t)regs[r1] >> (regs[r2] & 63);
break;
#if TCG_TARGET_HAS_rot_i64
case INDEX_op_rotl_i64:
- tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
+ tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = rol64(regs[r1], regs[r2] & 63);
break;
case INDEX_op_rotr_i64:
- tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
+ tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = ror64(regs[r1], regs[r2] & 63);
break;
#endif
#if TCG_TARGET_HAS_deposit_i64
case INDEX_op_deposit_i64:
- t0 = *tb_ptr++;
- t1 = tci_read_rval(regs, &tb_ptr);
- t2 = tci_read_rval(regs, &tb_ptr);
- tmp16 = *tb_ptr++;
- tmp8 = *tb_ptr++;
- tmp64 = (((1ULL << tmp8) - 1) << tmp16);
- tci_write_reg(regs, t0, (t1 & ~tmp64) | ((t2 << tmp16) & tmp64));
+ tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len);
+ regs[r0] = deposit64(regs[r1], pos, len, regs[r2]);
+ break;
+#endif
+#if TCG_TARGET_HAS_extract_i64
+ case INDEX_op_extract_i64:
+ tci_args_rrbb(insn, &r0, &r1, &pos, &len);
+ regs[r0] = extract64(regs[r1], pos, len);
+ break;
+#endif
+#if TCG_TARGET_HAS_sextract_i64
+ case INDEX_op_sextract_i64:
+ tci_args_rrbb(insn, &r0, &r1, &pos, &len);
+ regs[r0] = sextract64(regs[r1], pos, len);
break;
#endif
case INDEX_op_brcond_i64:
- t0 = tci_read_rval(regs, &tb_ptr);
- t1 = tci_read_rval(regs, &tb_ptr);
- condition = *tb_ptr++;
- label = tci_read_label(&tb_ptr);
- if (tci_compare64(t0, t1, condition)) {
- tci_assert(tb_ptr == old_code_ptr + op_size);
- tb_ptr = (uint8_t *)label;
- continue;
+ tci_args_rl(insn, tb_ptr, &r0, &ptr);
+ if (regs[r0]) {
+ tb_ptr = ptr;
}
break;
case INDEX_op_ext32s_i64:
case INDEX_op_ext_i32_i64:
- tci_args_rr(&tb_ptr, &r0, &r1);
+ tci_args_rr(insn, &r0, &r1);
regs[r0] = (int32_t)regs[r1];
break;
case INDEX_op_ext32u_i64:
case INDEX_op_extu_i32_i64:
- tci_args_rr(&tb_ptr, &r0, &r1);
+ tci_args_rr(insn, &r0, &r1);
regs[r0] = (uint32_t)regs[r1];
break;
#if TCG_TARGET_HAS_bswap64_i64
case INDEX_op_bswap64_i64:
- tci_args_rr(&tb_ptr, &r0, &r1);
+ tci_args_rr(insn, &r0, &r1);
regs[r0] = bswap64(regs[r1]);
break;
#endif
/* QEMU specific operations. */
case INDEX_op_exit_tb:
- ret = *(uint64_t *)tb_ptr;
- goto exit;
- break;
+ tci_args_l(insn, tb_ptr, &ptr);
+ return (uintptr_t)ptr;
+
case INDEX_op_goto_tb:
- /* Jump address is aligned */
- tb_ptr = QEMU_ALIGN_PTR_UP(tb_ptr, 4);
- t0 = qatomic_read((int32_t *)tb_ptr);
- tb_ptr += sizeof(int32_t);
- tci_assert(tb_ptr == old_code_ptr + op_size);
- tb_ptr += (int32_t)t0;
- continue;
- case INDEX_op_qemu_ld_i32:
- t0 = *tb_ptr++;
- taddr = tci_read_ulong(regs, &tb_ptr);
- oi = tci_read_i(&tb_ptr);
- switch (get_memop(oi) & (MO_BSWAP | MO_SSIZE)) {
- case MO_UB:
- tmp32 = qemu_ld_ub;
- break;
- case MO_SB:
- tmp32 = (int8_t)qemu_ld_ub;
- break;
- case MO_LEUW:
- tmp32 = qemu_ld_leuw;
- break;
- case MO_LESW:
- tmp32 = (int16_t)qemu_ld_leuw;
- break;
- case MO_LEUL:
- tmp32 = qemu_ld_leul;
- break;
- case MO_BEUW:
- tmp32 = qemu_ld_beuw;
- break;
- case MO_BESW:
- tmp32 = (int16_t)qemu_ld_beuw;
- break;
- case MO_BEUL:
- tmp32 = qemu_ld_beul;
- break;
- default:
- g_assert_not_reached();
+ tci_args_l(insn, tb_ptr, &ptr);
+ tb_ptr = *(void **)ptr;
+ break;
+
+ case INDEX_op_goto_ptr:
+ tci_args_r(insn, &r0);
+ ptr = (void *)regs[r0];
+ if (!ptr) {
+ return 0;
}
- tci_write_reg(regs, t0, tmp32);
+ tb_ptr = ptr;
break;
- case INDEX_op_qemu_ld_i64:
- t0 = *tb_ptr++;
- if (TCG_TARGET_REG_BITS == 32) {
- t1 = *tb_ptr++;
+
+ case INDEX_op_qemu_ld_a32_i32:
+ tci_args_rrm(insn, &r0, &r1, &oi);
+ taddr = (uint32_t)regs[r1];
+ goto do_ld_i32;
+ case INDEX_op_qemu_ld_a64_i32:
+ if (TCG_TARGET_REG_BITS == 64) {
+ tci_args_rrm(insn, &r0, &r1, &oi);
+ taddr = regs[r1];
+ } else {
+ tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
+ taddr = tci_uint64(regs[r2], regs[r1]);
+ oi = regs[r3];
}
- taddr = tci_read_ulong(regs, &tb_ptr);
- oi = tci_read_i(&tb_ptr);
- switch (get_memop(oi) & (MO_BSWAP | MO_SSIZE)) {
- case MO_UB:
- tmp64 = qemu_ld_ub;
- break;
- case MO_SB:
- tmp64 = (int8_t)qemu_ld_ub;
- break;
- case MO_LEUW:
- tmp64 = qemu_ld_leuw;
- break;
- case MO_LESW:
- tmp64 = (int16_t)qemu_ld_leuw;
- break;
- case MO_LEUL:
- tmp64 = qemu_ld_leul;
- break;
- case MO_LESL:
- tmp64 = (int32_t)qemu_ld_leul;
- break;
- case MO_LEQ:
- tmp64 = qemu_ld_leq;
- break;
- case MO_BEUW:
- tmp64 = qemu_ld_beuw;
- break;
- case MO_BESW:
- tmp64 = (int16_t)qemu_ld_beuw;
- break;
- case MO_BEUL:
- tmp64 = qemu_ld_beul;
- break;
- case MO_BESL:
- tmp64 = (int32_t)qemu_ld_beul;
- break;
- case MO_BEQ:
- tmp64 = qemu_ld_beq;
- break;
- default:
- g_assert_not_reached();
+ do_ld_i32:
+ regs[r0] = tci_qemu_ld(env, taddr, oi, tb_ptr);
+ break;
+
+ case INDEX_op_qemu_ld_a32_i64:
+ if (TCG_TARGET_REG_BITS == 64) {
+ tci_args_rrm(insn, &r0, &r1, &oi);
+ taddr = (uint32_t)regs[r1];
+ } else {
+ tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
+ taddr = (uint32_t)regs[r2];
+ oi = regs[r3];
}
- tci_write_reg(regs, t0, tmp64);
+ goto do_ld_i64;
+ case INDEX_op_qemu_ld_a64_i64:
+ if (TCG_TARGET_REG_BITS == 64) {
+ tci_args_rrm(insn, &r0, &r1, &oi);
+ taddr = regs[r1];
+ } else {
+ tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4);
+ taddr = tci_uint64(regs[r3], regs[r2]);
+ oi = regs[r4];
+ }
+ do_ld_i64:
+ tmp64 = tci_qemu_ld(env, taddr, oi, tb_ptr);
if (TCG_TARGET_REG_BITS == 32) {
- tci_write_reg(regs, t1, tmp64 >> 32);
+ tci_write_reg64(regs, r1, r0, tmp64);
+ } else {
+ regs[r0] = tmp64;
}
break;
- case INDEX_op_qemu_st_i32:
- t0 = tci_read_rval(regs, &tb_ptr);
- taddr = tci_read_ulong(regs, &tb_ptr);
- oi = tci_read_i(&tb_ptr);
- switch (get_memop(oi) & (MO_BSWAP | MO_SIZE)) {
- case MO_UB:
- qemu_st_b(t0);
- break;
- case MO_LEUW:
- qemu_st_lew(t0);
- break;
- case MO_LEUL:
- qemu_st_lel(t0);
- break;
- case MO_BEUW:
- qemu_st_bew(t0);
- break;
- case MO_BEUL:
- qemu_st_bel(t0);
- break;
- default:
- g_assert_not_reached();
+
+ case INDEX_op_qemu_st_a32_i32:
+ tci_args_rrm(insn, &r0, &r1, &oi);
+ taddr = (uint32_t)regs[r1];
+ goto do_st_i32;
+ case INDEX_op_qemu_st_a64_i32:
+ if (TCG_TARGET_REG_BITS == 64) {
+ tci_args_rrm(insn, &r0, &r1, &oi);
+ taddr = regs[r1];
+ } else {
+ tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
+ taddr = tci_uint64(regs[r2], regs[r1]);
+ oi = regs[r3];
}
+ do_st_i32:
+ tci_qemu_st(env, taddr, regs[r0], oi, tb_ptr);
break;
- case INDEX_op_qemu_st_i64:
- tmp64 = tci_read_r64(regs, &tb_ptr);
- taddr = tci_read_ulong(regs, &tb_ptr);
- oi = tci_read_i(&tb_ptr);
- switch (get_memop(oi) & (MO_BSWAP | MO_SIZE)) {
- case MO_UB:
- qemu_st_b(tmp64);
- break;
- case MO_LEUW:
- qemu_st_lew(tmp64);
- break;
- case MO_LEUL:
- qemu_st_lel(tmp64);
- break;
- case MO_LEQ:
- qemu_st_leq(tmp64);
- break;
- case MO_BEUW:
- qemu_st_bew(tmp64);
- break;
- case MO_BEUL:
- qemu_st_bel(tmp64);
- break;
- case MO_BEQ:
- qemu_st_beq(tmp64);
- break;
- default:
- g_assert_not_reached();
+
+ case INDEX_op_qemu_st_a32_i64:
+ if (TCG_TARGET_REG_BITS == 64) {
+ tci_args_rrm(insn, &r0, &r1, &oi);
+ tmp64 = regs[r0];
+ taddr = (uint32_t)regs[r1];
+ } else {
+ tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
+ tmp64 = tci_uint64(regs[r1], regs[r0]);
+ taddr = (uint32_t)regs[r2];
+ oi = regs[r3];
+ }
+ goto do_st_i64;
+ case INDEX_op_qemu_st_a64_i64:
+ if (TCG_TARGET_REG_BITS == 64) {
+ tci_args_rrm(insn, &r0, &r1, &oi);
+ tmp64 = regs[r0];
+ taddr = regs[r1];
+ } else {
+ tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4);
+ tmp64 = tci_uint64(regs[r1], regs[r0]);
+ taddr = tci_uint64(regs[r3], regs[r2]);
+ oi = regs[r4];
}
+ do_st_i64:
+ tci_qemu_st(env, taddr, tmp64, oi, tb_ptr);
break;
+
case INDEX_op_mb:
/* Ensure ordering for all kinds */
smp_mb();
default:
g_assert_not_reached();
}
- tci_assert(tb_ptr == old_code_ptr + op_size);
}
-exit:
- return ret;
+}
+
+/*
+ * Disassembler that matches the interpreter
+ */
+
+static const char *str_r(TCGReg r)
+{
+ static const char regs[TCG_TARGET_NB_REGS][4] = {
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r11", "r12", "r13", "env", "sp"
+ };
+
+ QEMU_BUILD_BUG_ON(TCG_AREG0 != TCG_REG_R14);
+ QEMU_BUILD_BUG_ON(TCG_REG_CALL_STACK != TCG_REG_R15);
+
+ assert((unsigned)r < TCG_TARGET_NB_REGS);
+ return regs[r];
+}
+
+static const char *str_c(TCGCond c)
+{
+ static const char cond[16][8] = {
+ [TCG_COND_NEVER] = "never",
+ [TCG_COND_ALWAYS] = "always",
+ [TCG_COND_EQ] = "eq",
+ [TCG_COND_NE] = "ne",
+ [TCG_COND_LT] = "lt",
+ [TCG_COND_GE] = "ge",
+ [TCG_COND_LE] = "le",
+ [TCG_COND_GT] = "gt",
+ [TCG_COND_LTU] = "ltu",
+ [TCG_COND_GEU] = "geu",
+ [TCG_COND_LEU] = "leu",
+ [TCG_COND_GTU] = "gtu",
+ };
+
+ assert((unsigned)c < ARRAY_SIZE(cond));
+ assert(cond[c][0] != 0);
+ return cond[c];
+}
+
+/* Disassemble TCI bytecode. */
+int print_insn_tci(bfd_vma addr, disassemble_info *info)
+{
+ const uint32_t *tb_ptr = (const void *)(uintptr_t)addr;
+ const TCGOpDef *def;
+ const char *op_name;
+ uint32_t insn;
+ TCGOpcode op;
+ TCGReg r0, r1, r2, r3, r4, r5;
+ tcg_target_ulong i1;
+ int32_t s2;
+ TCGCond c;
+ MemOpIdx oi;
+ uint8_t pos, len;
+ void *ptr;
+
+ /* TCI is always the host, so we don't need to load indirect. */
+ insn = *tb_ptr++;
+
+ info->fprintf_func(info->stream, "%08x ", insn);
+
+ op = extract32(insn, 0, 8);
+ def = &tcg_op_defs[op];
+ op_name = def->name;
+
+ switch (op) {
+ case INDEX_op_br:
+ case INDEX_op_exit_tb:
+ case INDEX_op_goto_tb:
+ tci_args_l(insn, tb_ptr, &ptr);
+ info->fprintf_func(info->stream, "%-12s %p", op_name, ptr);
+ break;
+
+ case INDEX_op_goto_ptr:
+ tci_args_r(insn, &r0);
+ info->fprintf_func(info->stream, "%-12s %s", op_name, str_r(r0));
+ break;
+
+ case INDEX_op_call:
+ tci_args_nl(insn, tb_ptr, &len, &ptr);
+ info->fprintf_func(info->stream, "%-12s %d, %p", op_name, len, ptr);
+ break;
+
+ case INDEX_op_brcond_i32:
+ case INDEX_op_brcond_i64:
+ tci_args_rl(insn, tb_ptr, &r0, &ptr);
+ info->fprintf_func(info->stream, "%-12s %s, 0, ne, %p",
+ op_name, str_r(r0), ptr);
+ break;
+
+ case INDEX_op_setcond_i32:
+ case INDEX_op_setcond_i64:
+ tci_args_rrrc(insn, &r0, &r1, &r2, &c);
+ info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s",
+ op_name, str_r(r0), str_r(r1), str_r(r2), str_c(c));
+ break;
+
+ case INDEX_op_tci_movi:
+ tci_args_ri(insn, &r0, &i1);
+ info->fprintf_func(info->stream, "%-12s %s, 0x%" TCG_PRIlx,
+ op_name, str_r(r0), i1);
+ break;
+
+ case INDEX_op_tci_movl:
+ tci_args_rl(insn, tb_ptr, &r0, &ptr);
+ info->fprintf_func(info->stream, "%-12s %s, %p",
+ op_name, str_r(r0), ptr);
+ break;
+
+ case INDEX_op_ld8u_i32:
+ case INDEX_op_ld8u_i64:
+ case INDEX_op_ld8s_i32:
+ case INDEX_op_ld8s_i64:
+ case INDEX_op_ld16u_i32:
+ case INDEX_op_ld16u_i64:
+ case INDEX_op_ld16s_i32:
+ case INDEX_op_ld16s_i64:
+ case INDEX_op_ld32u_i64:
+ case INDEX_op_ld32s_i64:
+ case INDEX_op_ld_i32:
+ case INDEX_op_ld_i64:
+ case INDEX_op_st8_i32:
+ case INDEX_op_st8_i64:
+ case INDEX_op_st16_i32:
+ case INDEX_op_st16_i64:
+ case INDEX_op_st32_i64:
+ case INDEX_op_st_i32:
+ case INDEX_op_st_i64:
+ tci_args_rrs(insn, &r0, &r1, &s2);
+ info->fprintf_func(info->stream, "%-12s %s, %s, %d",
+ op_name, str_r(r0), str_r(r1), s2);
+ break;
+
+ case INDEX_op_mov_i32:
+ case INDEX_op_mov_i64:
+ case INDEX_op_ext8s_i32:
+ case INDEX_op_ext8s_i64:
+ case INDEX_op_ext8u_i32:
+ case INDEX_op_ext8u_i64:
+ case INDEX_op_ext16s_i32:
+ case INDEX_op_ext16s_i64:
+ case INDEX_op_ext16u_i32:
+ case INDEX_op_ext32s_i64:
+ case INDEX_op_ext32u_i64:
+ case INDEX_op_ext_i32_i64:
+ case INDEX_op_extu_i32_i64:
+ case INDEX_op_bswap16_i32:
+ case INDEX_op_bswap16_i64:
+ case INDEX_op_bswap32_i32:
+ case INDEX_op_bswap32_i64:
+ case INDEX_op_bswap64_i64:
+ case INDEX_op_not_i32:
+ case INDEX_op_not_i64:
+ case INDEX_op_neg_i32:
+ case INDEX_op_neg_i64:
+ case INDEX_op_ctpop_i32:
+ case INDEX_op_ctpop_i64:
+ tci_args_rr(insn, &r0, &r1);
+ info->fprintf_func(info->stream, "%-12s %s, %s",
+ op_name, str_r(r0), str_r(r1));
+ break;
+
+ case INDEX_op_add_i32:
+ case INDEX_op_add_i64:
+ case INDEX_op_sub_i32:
+ case INDEX_op_sub_i64:
+ case INDEX_op_mul_i32:
+ case INDEX_op_mul_i64:
+ case INDEX_op_and_i32:
+ case INDEX_op_and_i64:
+ case INDEX_op_or_i32:
+ case INDEX_op_or_i64:
+ case INDEX_op_xor_i32:
+ case INDEX_op_xor_i64:
+ case INDEX_op_andc_i32:
+ case INDEX_op_andc_i64:
+ case INDEX_op_orc_i32:
+ case INDEX_op_orc_i64:
+ case INDEX_op_eqv_i32:
+ case INDEX_op_eqv_i64:
+ case INDEX_op_nand_i32:
+ case INDEX_op_nand_i64:
+ case INDEX_op_nor_i32:
+ case INDEX_op_nor_i64:
+ case INDEX_op_div_i32:
+ case INDEX_op_div_i64:
+ case INDEX_op_rem_i32:
+ case INDEX_op_rem_i64:
+ case INDEX_op_divu_i32:
+ case INDEX_op_divu_i64:
+ case INDEX_op_remu_i32:
+ case INDEX_op_remu_i64:
+ case INDEX_op_shl_i32:
+ case INDEX_op_shl_i64:
+ case INDEX_op_shr_i32:
+ case INDEX_op_shr_i64:
+ case INDEX_op_sar_i32:
+ case INDEX_op_sar_i64:
+ case INDEX_op_rotl_i32:
+ case INDEX_op_rotl_i64:
+ case INDEX_op_rotr_i32:
+ case INDEX_op_rotr_i64:
+ case INDEX_op_clz_i32:
+ case INDEX_op_clz_i64:
+ case INDEX_op_ctz_i32:
+ case INDEX_op_ctz_i64:
+ tci_args_rrr(insn, &r0, &r1, &r2);
+ info->fprintf_func(info->stream, "%-12s %s, %s, %s",
+ op_name, str_r(r0), str_r(r1), str_r(r2));
+ break;
+
+ case INDEX_op_deposit_i32:
+ case INDEX_op_deposit_i64:
+ tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len);
+ info->fprintf_func(info->stream, "%-12s %s, %s, %s, %d, %d",
+ op_name, str_r(r0), str_r(r1), str_r(r2), pos, len);
+ break;
+
+ case INDEX_op_extract_i32:
+ case INDEX_op_extract_i64:
+ case INDEX_op_sextract_i32:
+ case INDEX_op_sextract_i64:
+ tci_args_rrbb(insn, &r0, &r1, &pos, &len);
+ info->fprintf_func(info->stream, "%-12s %s,%s,%d,%d",
+ op_name, str_r(r0), str_r(r1), pos, len);
+ break;
+
+ case INDEX_op_movcond_i32:
+ case INDEX_op_movcond_i64:
+ case INDEX_op_setcond2_i32:
+ tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &c);
+ info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s, %s",
+ op_name, str_r(r0), str_r(r1), str_r(r2),
+ str_r(r3), str_r(r4), str_c(c));
+ break;
+
+ case INDEX_op_mulu2_i32:
+ case INDEX_op_mulu2_i64:
+ case INDEX_op_muls2_i32:
+ case INDEX_op_muls2_i64:
+ tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
+ info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s",
+ op_name, str_r(r0), str_r(r1),
+ str_r(r2), str_r(r3));
+ break;
+
+ case INDEX_op_add2_i32:
+ case INDEX_op_add2_i64:
+ case INDEX_op_sub2_i32:
+ case INDEX_op_sub2_i64:
+ tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
+ info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s, %s",
+ op_name, str_r(r0), str_r(r1), str_r(r2),
+ str_r(r3), str_r(r4), str_r(r5));
+ break;
+
+ case INDEX_op_qemu_ld_a32_i32:
+ case INDEX_op_qemu_st_a32_i32:
+ len = 1 + 1;
+ goto do_qemu_ldst;
+ case INDEX_op_qemu_ld_a32_i64:
+ case INDEX_op_qemu_st_a32_i64:
+ case INDEX_op_qemu_ld_a64_i32:
+ case INDEX_op_qemu_st_a64_i32:
+ len = 1 + DIV_ROUND_UP(64, TCG_TARGET_REG_BITS);
+ goto do_qemu_ldst;
+ case INDEX_op_qemu_ld_a64_i64:
+ case INDEX_op_qemu_st_a64_i64:
+ len = 2 * DIV_ROUND_UP(64, TCG_TARGET_REG_BITS);
+ goto do_qemu_ldst;
+ do_qemu_ldst:
+ switch (len) {
+ case 2:
+ tci_args_rrm(insn, &r0, &r1, &oi);
+ info->fprintf_func(info->stream, "%-12s %s, %s, %x",
+ op_name, str_r(r0), str_r(r1), oi);
+ break;
+ case 3:
+ tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
+ info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s",
+ op_name, str_r(r0), str_r(r1),
+ str_r(r2), str_r(r3));
+ break;
+ case 4:
+ tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4);
+ info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s",
+ op_name, str_r(r0), str_r(r1),
+ str_r(r2), str_r(r3), str_r(r4));
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ break;
+
+ case 0:
+ /* tcg_out_nop_fill uses zeros */
+ if (insn == 0) {
+ info->fprintf_func(info->stream, "align");
+ break;
+ }
+ /* fall through */
+
+ default:
+ info->fprintf_func(info->stream, "illegal opcode %d", op);
+ break;
+ }
+
+ return sizeof(insn);
}