static uint8_t *tb_ret_addr;
-#ifdef _CALL_DARWIN
+#if defined _CALL_DARWIN || defined __APPLE__
+#define TCG_TARGET_CALL_DARWIN
+#endif
+
+#ifdef TCG_TARGET_CALL_DARWIN
#define LINKAGE_AREA_SIZE 24
#define LR_OFFSET 8
#elif defined _CALL_AIX
TCG_REG_R29,
TCG_REG_R30,
TCG_REG_R31,
-#ifdef _CALL_DARWIN
+#ifdef TCG_TARGET_CALL_DARWIN
TCG_REG_R2,
#endif
TCG_REG_R3,
TCG_REG_R8,
TCG_REG_R9,
TCG_REG_R10,
-#ifndef _CALL_DARWIN
+#ifndef TCG_TARGET_CALL_DARWIN
TCG_REG_R11,
#endif
TCG_REG_R12,
};
static const int tcg_target_callee_save_regs[] = {
-#ifdef _CALL_DARWIN
+#ifdef TCG_TARGET_CALL_DARWIN
TCG_REG_R11,
TCG_REG_R13,
#endif
}
}
-/* maximum number of register used for input function arguments */
-static int tcg_target_get_call_iarg_regs_count(int flags)
-{
- return ARRAY_SIZE (tcg_target_call_iarg_regs);
-}
-
/* parse target specific constraints */
static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
{
tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3);
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R4);
-#ifdef CONFIG_TCG_PASS_AREG0
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R5);
#if TARGET_LONG_BITS == 64
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R6);
#ifdef TCG_TARGET_CALL_ALIGN_ARGS
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R7);
#endif
-#endif
-#else /* !AREG0 */
-#if TARGET_LONG_BITS == 64
- tcg_regset_reset_reg(ct->u.regs, TCG_REG_R5);
-#endif
#endif
break;
case 'K': /* qemu_st[8..32] constraint */
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3);
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R4);
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R5);
-#ifdef CONFIG_TCG_PASS_AREG0
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R6);
#if TARGET_LONG_BITS == 64
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R7);
#ifdef TCG_TARGET_CALL_ALIGN_ARGS
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R8);
#endif
-#endif
-#else /* !AREG0 */
-#if TARGET_LONG_BITS == 64
- tcg_regset_reset_reg(ct->u.regs, TCG_REG_R6);
-#endif
#endif
break;
case 'M': /* qemu_st64 constraint */
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R5);
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R6);
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R7);
-#if defined(CONFIG_TCG_PASS_AREG0)
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R8);
#ifdef TCG_TARGET_CALL_ALIGN_ARGS
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R9);
-#endif
#endif
break;
#else
#define ORC XO31(412)
#define EQV XO31(284)
#define NAND XO31(476)
+#define ISEL XO31( 15)
#define LBZX XO31( 87)
#define LHZX XO31(279)
#include "../../softmmu_defs.h"
-#ifdef CONFIG_TCG_PASS_AREG0
/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
int mmu_idx) */
static const void * const qemu_ld_helpers[4] = {
helper_stl_mmu,
helper_stq_mmu,
};
-#else
-/* legacy helper signature: __ld_mmu(target_ulong addr, int
- mmu_idx) */
-static void *qemu_ld_helpers[4] = {
- __ldb_mmu,
- __ldw_mmu,
- __ldl_mmu,
- __ldq_mmu,
-};
-
-/* legacy helper signature: __ld_mmu(target_ulong addr, int
- mmu_idx) */
-static void *qemu_st_helpers[4] = {
- __stb_mmu,
- __stw_mmu,
- __stl_mmu,
- __stq_mmu,
-};
-#endif
#endif
static void tcg_out_qemu_ld (TCGContext *s, const TCGArg *args, int opc)
/* slow path */
ir = 3;
-#ifdef CONFIG_TCG_PASS_AREG0
tcg_out_mov (s, TCG_TYPE_I32, ir++, TCG_AREG0);
-#endif
#if TARGET_LONG_BITS == 32
tcg_out_mov (s, TCG_TYPE_I32, ir++, addr_reg);
#else
/* slow path */
ir = 3;
-#ifdef CONFIG_TCG_PASS_AREG0
tcg_out_mov (s, TCG_TYPE_I32, ir++, TCG_AREG0);
-#endif
#if TARGET_LONG_BITS == 32
tcg_out_mov (s, TCG_TYPE_I32, ir++, addr_reg);
#else
);
}
+static void tcg_out_movcond (TCGContext *s, TCGCond cond,
+ TCGArg dest,
+ TCGArg c1, TCGArg c2,
+ TCGArg v1, TCGArg v2,
+ int const_c2)
+{
+ tcg_out_cmp (s, cond, c1, c2, const_c2, 7);
+
+ if (1) {
+ /* At least here on 7747A bit twiddling hacks are outperformed
+ by jumpy code (the testing was not scientific) */
+ if (dest == v2) {
+ cond = tcg_invert_cond (cond);
+ v2 = v1;
+ }
+ else {
+ if (dest != v1) {
+ tcg_out_mov (s, TCG_TYPE_I32, dest, v1);
+ }
+ }
+ /* Branch forward over one insn */
+ tcg_out32 (s, tcg_to_bc[cond] | 8);
+ tcg_out_mov (s, TCG_TYPE_I32, dest, v2);
+ }
+ else {
+ /* isel version, "if (1)" above should be replaced once a way
+ to figure out availability of isel on the underlying
+ hardware is found */
+ int tab, bc;
+
+ switch (cond) {
+ case TCG_COND_EQ:
+ tab = TAB (dest, v1, v2);
+ bc = CR_EQ;
+ break;
+ case TCG_COND_NE:
+ tab = TAB (dest, v2, v1);
+ bc = CR_EQ;
+ break;
+ case TCG_COND_LTU:
+ case TCG_COND_LT:
+ tab = TAB (dest, v1, v2);
+ bc = CR_LT;
+ break;
+ case TCG_COND_GEU:
+ case TCG_COND_GE:
+ tab = TAB (dest, v2, v1);
+ bc = CR_LT;
+ break;
+ case TCG_COND_LEU:
+ case TCG_COND_LE:
+ tab = TAB (dest, v2, v1);
+ bc = CR_GT;
+ break;
+ case TCG_COND_GTU:
+ case TCG_COND_GT:
+ tab = TAB (dest, v1, v2);
+ bc = CR_GT;
+ break;
+ default:
+ tcg_abort ();
+ }
+ tcg_out32 (s, ISEL | tab | ((bc + 28) << 6));
+ }
+}
+
static void tcg_out_brcond (TCGContext *s, TCGCond cond,
TCGArg arg1, TCGArg arg2, int const_arg2,
int label_index)
);
break;
+ case INDEX_op_movcond_i32:
+ tcg_out_movcond (s, args[5], args[0],
+ args[1], args[2],
+ args[3], args[4],
+ const_args[2]);
+ break;
+
default:
- tcg_dump_ops (s, stderr);
+ tcg_dump_ops (s);
tcg_abort ();
}
}
{ INDEX_op_ext16u_i32, { "r", "r" } },
{ INDEX_op_deposit_i32, { "r", "0", "r" } },
+ { INDEX_op_movcond_i32, { "r", "r", "ri", "r", "r" } },
{ -1 },
};
tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff);
tcg_regset_set32(tcg_target_call_clobber_regs, 0,
(1 << TCG_REG_R0) |
-#ifdef _CALL_DARWIN
+#ifdef TCG_TARGET_CALL_DARWIN
(1 << TCG_REG_R2) |
#endif
(1 << TCG_REG_R3) |
tcg_regset_clear(s->reserved_regs);
tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0);
tcg_regset_set_reg(s->reserved_regs, TCG_REG_R1);
-#ifndef _CALL_DARWIN
+#ifndef TCG_TARGET_CALL_DARWIN
tcg_regset_set_reg(s->reserved_regs, TCG_REG_R2);
#endif
#ifdef _CALL_SYSV