#include "exec/helper-proto.h"
#include "exec/exec-all.h"
#include "tcg/tcg-op.h"
-
+#include "tcg/tcg-op-gvec.h"
#include "exec/helper-gen.h"
-
#include "exec/translator.h"
#include "exec/log.h"
#include "asi.h"
#ifdef TARGET_SPARC64
# define gen_helper_rdpsr(D, E) qemu_build_not_reached()
+# define gen_helper_rett(E) qemu_build_not_reached()
# define gen_helper_power_down(E) qemu_build_not_reached()
# define gen_helper_wrpsr(E, S) qemu_build_not_reached()
#else
# define gen_helper_clear_softint(E, S) qemu_build_not_reached()
+# define gen_helper_done(E) qemu_build_not_reached()
+# define gen_helper_fabsd(D, S) qemu_build_not_reached()
# define gen_helper_flushw(E) qemu_build_not_reached()
+# define gen_helper_fnegd(D, S) qemu_build_not_reached()
# define gen_helper_rdccr(D, E) qemu_build_not_reached()
# define gen_helper_rdcwp(D, E) qemu_build_not_reached()
# define gen_helper_restored(E) qemu_build_not_reached()
+# define gen_helper_retry(E) qemu_build_not_reached()
# define gen_helper_saved(E) qemu_build_not_reached()
# define gen_helper_set_softint(E, S) qemu_build_not_reached()
# define gen_helper_tick_get_count(D, E, T, C) qemu_build_not_reached()
# define gen_helper_write_softint(E, S) qemu_build_not_reached()
# define gen_helper_wrpil(E, S) qemu_build_not_reached()
# define gen_helper_wrpstate(E, S) qemu_build_not_reached()
+# define gen_helper_fabsq ({ qemu_build_not_reached(); NULL; })
+# define gen_helper_fcmpeq16 ({ qemu_build_not_reached(); NULL; })
+# define gen_helper_fcmpeq32 ({ qemu_build_not_reached(); NULL; })
+# define gen_helper_fcmpgt16 ({ qemu_build_not_reached(); NULL; })
+# define gen_helper_fcmpgt32 ({ qemu_build_not_reached(); NULL; })
+# define gen_helper_fcmple16 ({ qemu_build_not_reached(); NULL; })
+# define gen_helper_fcmple32 ({ qemu_build_not_reached(); NULL; })
+# define gen_helper_fcmpne16 ({ qemu_build_not_reached(); NULL; })
+# define gen_helper_fcmpne32 ({ qemu_build_not_reached(); NULL; })
+# define gen_helper_fdtox ({ qemu_build_not_reached(); NULL; })
+# define gen_helper_fexpand ({ qemu_build_not_reached(); NULL; })
+# define gen_helper_fmul8sux16 ({ qemu_build_not_reached(); NULL; })
+# define gen_helper_fmul8ulx16 ({ qemu_build_not_reached(); NULL; })
+# define gen_helper_fmul8x16al ({ qemu_build_not_reached(); NULL; })
+# define gen_helper_fmul8x16au ({ qemu_build_not_reached(); NULL; })
+# define gen_helper_fmul8x16 ({ qemu_build_not_reached(); NULL; })
+# define gen_helper_fmuld8sux16 ({ qemu_build_not_reached(); NULL; })
+# define gen_helper_fmuld8ulx16 ({ qemu_build_not_reached(); NULL; })
+# define gen_helper_fnegq ({ qemu_build_not_reached(); NULL; })
+# define gen_helper_fpmerge ({ qemu_build_not_reached(); NULL; })
+# define gen_helper_fqtox ({ qemu_build_not_reached(); NULL; })
+# define gen_helper_fstox ({ qemu_build_not_reached(); NULL; })
+# define gen_helper_fxtod ({ qemu_build_not_reached(); NULL; })
+# define gen_helper_fxtoq ({ qemu_build_not_reached(); NULL; })
+# define gen_helper_fxtos ({ qemu_build_not_reached(); NULL; })
+# define gen_helper_pdist ({ qemu_build_not_reached(); NULL; })
+# define FSR_LDXFSR_MASK 0
+# define FSR_LDXFSR_OLDMASK 0
# define MAXTL_MASK 0
#endif
/* global register indexes */
static TCGv_ptr cpu_regwptr;
-static TCGv cpu_cc_src, cpu_cc_src2, cpu_cc_dst;
-static TCGv_i32 cpu_cc_op;
-static TCGv_i32 cpu_psr;
static TCGv cpu_fsr, cpu_pc, cpu_npc;
static TCGv cpu_regs[32];
static TCGv cpu_y;
static TCGv cpu_tbr;
static TCGv cpu_cond;
+static TCGv cpu_cc_N;
+static TCGv cpu_cc_V;
+static TCGv cpu_icc_Z;
+static TCGv cpu_icc_C;
#ifdef TARGET_SPARC64
-static TCGv_i32 cpu_xcc, cpu_fprs;
+static TCGv cpu_xcc_Z;
+static TCGv cpu_xcc_C;
+static TCGv_i32 cpu_fprs;
static TCGv cpu_gsr;
#else
# define cpu_fprs ({ qemu_build_not_reached(); (TCGv)NULL; })
# define cpu_gsr ({ qemu_build_not_reached(); (TCGv)NULL; })
#endif
+
+#ifdef TARGET_SPARC64
+#define cpu_cc_Z cpu_xcc_Z
+#define cpu_cc_C cpu_xcc_C
+#else
+#define cpu_cc_Z cpu_icc_Z
+#define cpu_cc_C cpu_icc_C
+#define cpu_xcc_Z ({ qemu_build_not_reached(); NULL; })
+#define cpu_xcc_C ({ qemu_build_not_reached(); NULL; })
+#endif
+
/* Floating point registers */
static TCGv_i64 cpu_fpr[TARGET_DPREGS];
# define env64_field_offsetof(X) ({ qemu_build_not_reached(); 0; })
#endif
+typedef struct DisasCompare {
+ TCGCond cond;
+ TCGv c1;
+ int c2;
+} DisasCompare;
+
typedef struct DisasDelayException {
struct DisasDelayException *next;
TCGLabel *lab;
DisasContextBase base;
target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
- target_ulong jump_pc[2]; /* used when JUMP_PC pc value is used */
+
+ /* Used when JUMP_PC value is used. */
+ DisasCompare jump;
+ target_ulong jump_pc[2];
+
int mem_idx;
+ bool cpu_cond_live;
bool fpu_enabled;
bool address_mask_32bit;
#ifndef CONFIG_USER_ONLY
#endif
#endif
- uint32_t cc_op; /* current CC operation */
sparc_def_t *def;
#ifdef TARGET_SPARC64
int fprs_dirty;
DisasDelayException *delay_excp_list;
} DisasContext;
-typedef struct {
- TCGCond cond;
- bool is_bool;
- TCGv c1, c2;
-} DisasCompare;
-
// This function uses non-native bit order
#define GET_FIELD(X, FROM, TO) \
((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
#define UA2005_HTRAP_MASK 0xff
#define V8_TRAP_MASK 0x7f
-static int sign_extend(int x, int len)
-{
- len = 32 - len;
- return (x << len) >> len;
-}
-
#define IS_IMM (insn & (1<<13))
static void gen_update_fprs_dirty(DisasContext *dc, int rd)
offsetof(CPU_QuadU, ll.lower));
}
-static void gen_store_fpr_Q(DisasContext *dc, unsigned int dst,
- TCGv_i64 v1, TCGv_i64 v2)
-{
- dst = QFPREG(dst);
-
- tcg_gen_mov_i64(cpu_fpr[dst / 2], v1);
- tcg_gen_mov_i64(cpu_fpr[dst / 2 + 1], v2);
- gen_update_fprs_dirty(dc, dst);
-}
-
-#ifdef TARGET_SPARC64
-static TCGv_i64 gen_load_fpr_Q0(DisasContext *dc, unsigned int src)
-{
- src = QFPREG(src);
- return cpu_fpr[src / 2];
-}
-
-static TCGv_i64 gen_load_fpr_Q1(DisasContext *dc, unsigned int src)
-{
- src = QFPREG(src);
- return cpu_fpr[src / 2 + 1];
-}
-
-static void gen_move_Q(DisasContext *dc, unsigned int rd, unsigned int rs)
-{
- rd = QFPREG(rd);
- rs = QFPREG(rs);
-
- tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]);
- tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]);
- gen_update_fprs_dirty(dc, rd);
-}
-#endif
-
/* moves */
#ifdef CONFIG_USER_ONLY
#define supervisor(dc) 0
}
}
-// XXX suboptimal
-static void gen_mov_reg_N(TCGv reg, TCGv_i32 src)
+static TCGv gen_carry32(void)
{
- tcg_gen_extu_i32_tl(reg, src);
- tcg_gen_extract_tl(reg, reg, PSR_NEG_SHIFT, 1);
-}
-
-static void gen_mov_reg_Z(TCGv reg, TCGv_i32 src)
-{
- tcg_gen_extu_i32_tl(reg, src);
- tcg_gen_extract_tl(reg, reg, PSR_ZERO_SHIFT, 1);
+ if (TARGET_LONG_BITS == 64) {
+ TCGv t = tcg_temp_new();
+ tcg_gen_extract_tl(t, cpu_icc_C, 32, 1);
+ return t;
+ }
+ return cpu_icc_C;
}
-static void gen_mov_reg_V(TCGv reg, TCGv_i32 src)
+static void gen_op_addcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
{
- tcg_gen_extu_i32_tl(reg, src);
- tcg_gen_extract_tl(reg, reg, PSR_OVF_SHIFT, 1);
-}
+ TCGv z = tcg_constant_tl(0);
-static void gen_mov_reg_C(TCGv reg, TCGv_i32 src)
-{
- tcg_gen_extu_i32_tl(reg, src);
- tcg_gen_extract_tl(reg, reg, PSR_CARRY_SHIFT, 1);
+ if (cin) {
+ tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
+ tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
+ } else {
+ tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
+ }
+ tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
+ tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src2);
+ tcg_gen_andc_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
+ if (TARGET_LONG_BITS == 64) {
+ /*
+ * Carry-in to bit 32 is result ^ src1 ^ src2.
+ * We already have the src xor term in Z, from computation of V.
+ */
+ tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
+ tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
+ }
+ tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
+ tcg_gen_mov_tl(dst, cpu_cc_N);
}
-static void gen_op_add_cc(TCGv dst, TCGv src1, TCGv src2)
+static void gen_op_addcc(TCGv dst, TCGv src1, TCGv src2)
{
- tcg_gen_mov_tl(cpu_cc_src, src1);
- tcg_gen_mov_tl(cpu_cc_src2, src2);
- tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
- tcg_gen_mov_tl(dst, cpu_cc_dst);
+ gen_op_addcc_int(dst, src1, src2, NULL);
}
-static TCGv_i32 gen_add32_carry32(void)
+static void gen_op_taddcc(TCGv dst, TCGv src1, TCGv src2)
{
- TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
+ TCGv t = tcg_temp_new();
- /* Carry is computed from a previous add: (dst < src) */
-#if TARGET_LONG_BITS == 64
- cc_src1_32 = tcg_temp_new_i32();
- cc_src2_32 = tcg_temp_new_i32();
- tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_dst);
- tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src);
-#else
- cc_src1_32 = cpu_cc_dst;
- cc_src2_32 = cpu_cc_src;
-#endif
+ /* Save the tag bits around modification of dst. */
+ tcg_gen_or_tl(t, src1, src2);
- carry_32 = tcg_temp_new_i32();
- tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
+ gen_op_addcc(dst, src1, src2);
- return carry_32;
+ /* Incorprate tag bits into icc.V */
+ tcg_gen_andi_tl(t, t, 3);
+ tcg_gen_neg_tl(t, t);
+ tcg_gen_ext32u_tl(t, t);
+ tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
}
-static TCGv_i32 gen_sub32_carry32(void)
+static void gen_op_addc(TCGv dst, TCGv src1, TCGv src2)
{
- TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
-
- /* Carry is computed from a previous borrow: (src1 < src2) */
-#if TARGET_LONG_BITS == 64
- cc_src1_32 = tcg_temp_new_i32();
- cc_src2_32 = tcg_temp_new_i32();
- tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_src);
- tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src2);
-#else
- cc_src1_32 = cpu_cc_src;
- cc_src2_32 = cpu_cc_src2;
-#endif
-
- carry_32 = tcg_temp_new_i32();
- tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
-
- return carry_32;
+ tcg_gen_add_tl(dst, src1, src2);
+ tcg_gen_add_tl(dst, dst, gen_carry32());
}
-static void gen_op_addx_int(DisasContext *dc, TCGv dst, TCGv src1,
- TCGv src2, int update_cc)
+static void gen_op_addccc(TCGv dst, TCGv src1, TCGv src2)
{
- TCGv_i32 carry_32;
- TCGv carry;
-
- switch (dc->cc_op) {
- case CC_OP_DIV:
- case CC_OP_LOGIC:
- /* Carry is known to be zero. Fall back to plain ADD. */
- if (update_cc) {
- gen_op_add_cc(dst, src1, src2);
- } else {
- tcg_gen_add_tl(dst, src1, src2);
- }
- return;
-
- case CC_OP_ADD:
- case CC_OP_TADD:
- case CC_OP_TADDTV:
- if (TARGET_LONG_BITS == 32) {
- /* We can re-use the host's hardware carry generation by using
- an ADD2 opcode. We discard the low part of the output.
- Ideally we'd combine this operation with the add that
- generated the carry in the first place. */
- carry = tcg_temp_new();
- tcg_gen_add2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
- goto add_done;
- }
- carry_32 = gen_add32_carry32();
- break;
+ gen_op_addcc_int(dst, src1, src2, gen_carry32());
+}
- case CC_OP_SUB:
- case CC_OP_TSUB:
- case CC_OP_TSUBTV:
- carry_32 = gen_sub32_carry32();
- break;
+static void gen_op_subcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
+{
+ TCGv z = tcg_constant_tl(0);
- default:
- /* We need external help to produce the carry. */
- carry_32 = tcg_temp_new_i32();
- gen_helper_compute_C_icc(carry_32, tcg_env);
- break;
+ if (cin) {
+ tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
+ tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
+ } else {
+ tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
}
-
-#if TARGET_LONG_BITS == 64
- carry = tcg_temp_new();
- tcg_gen_extu_i32_i64(carry, carry_32);
-#else
- carry = carry_32;
+ tcg_gen_neg_tl(cpu_cc_C, cpu_cc_C);
+ tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
+ tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src1);
+ tcg_gen_and_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
+#ifdef TARGET_SPARC64
+ tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
+ tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
#endif
-
- tcg_gen_add_tl(dst, src1, src2);
- tcg_gen_add_tl(dst, dst, carry);
-
- add_done:
- if (update_cc) {
- tcg_gen_mov_tl(cpu_cc_src, src1);
- tcg_gen_mov_tl(cpu_cc_src2, src2);
- tcg_gen_mov_tl(cpu_cc_dst, dst);
- tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADDX);
- dc->cc_op = CC_OP_ADDX;
- }
+ tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
+ tcg_gen_mov_tl(dst, cpu_cc_N);
}
-static void gen_op_sub_cc(TCGv dst, TCGv src1, TCGv src2)
+static void gen_op_subcc(TCGv dst, TCGv src1, TCGv src2)
{
- tcg_gen_mov_tl(cpu_cc_src, src1);
- tcg_gen_mov_tl(cpu_cc_src2, src2);
- tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
- tcg_gen_mov_tl(dst, cpu_cc_dst);
+ gen_op_subcc_int(dst, src1, src2, NULL);
}
-static void gen_op_subx_int(DisasContext *dc, TCGv dst, TCGv src1,
- TCGv src2, int update_cc)
+static void gen_op_tsubcc(TCGv dst, TCGv src1, TCGv src2)
{
- TCGv_i32 carry_32;
- TCGv carry;
-
- switch (dc->cc_op) {
- case CC_OP_DIV:
- case CC_OP_LOGIC:
- /* Carry is known to be zero. Fall back to plain SUB. */
- if (update_cc) {
- gen_op_sub_cc(dst, src1, src2);
- } else {
- tcg_gen_sub_tl(dst, src1, src2);
- }
- return;
-
- case CC_OP_ADD:
- case CC_OP_TADD:
- case CC_OP_TADDTV:
- carry_32 = gen_add32_carry32();
- break;
+ TCGv t = tcg_temp_new();
- case CC_OP_SUB:
- case CC_OP_TSUB:
- case CC_OP_TSUBTV:
- if (TARGET_LONG_BITS == 32) {
- /* We can re-use the host's hardware carry generation by using
- a SUB2 opcode. We discard the low part of the output.
- Ideally we'd combine this operation with the add that
- generated the carry in the first place. */
- carry = tcg_temp_new();
- tcg_gen_sub2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
- goto sub_done;
- }
- carry_32 = gen_sub32_carry32();
- break;
+ /* Save the tag bits around modification of dst. */
+ tcg_gen_or_tl(t, src1, src2);
- default:
- /* We need external help to produce the carry. */
- carry_32 = tcg_temp_new_i32();
- gen_helper_compute_C_icc(carry_32, tcg_env);
- break;
- }
+ gen_op_subcc(dst, src1, src2);
-#if TARGET_LONG_BITS == 64
- carry = tcg_temp_new();
- tcg_gen_extu_i32_i64(carry, carry_32);
-#else
- carry = carry_32;
-#endif
+ /* Incorprate tag bits into icc.V */
+ tcg_gen_andi_tl(t, t, 3);
+ tcg_gen_neg_tl(t, t);
+ tcg_gen_ext32u_tl(t, t);
+ tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
+}
+static void gen_op_subc(TCGv dst, TCGv src1, TCGv src2)
+{
tcg_gen_sub_tl(dst, src1, src2);
- tcg_gen_sub_tl(dst, dst, carry);
+ tcg_gen_sub_tl(dst, dst, gen_carry32());
+}
- sub_done:
- if (update_cc) {
- tcg_gen_mov_tl(cpu_cc_src, src1);
- tcg_gen_mov_tl(cpu_cc_src2, src2);
- tcg_gen_mov_tl(cpu_cc_dst, dst);
- tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUBX);
- dc->cc_op = CC_OP_SUBX;
- }
+static void gen_op_subccc(TCGv dst, TCGv src1, TCGv src2)
+{
+ gen_op_subcc_int(dst, src1, src2, gen_carry32());
}
static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
{
- TCGv r_temp, zero, t0;
+ TCGv zero = tcg_constant_tl(0);
+ TCGv t_src1 = tcg_temp_new();
+ TCGv t_src2 = tcg_temp_new();
+ TCGv t0 = tcg_temp_new();
- r_temp = tcg_temp_new();
- t0 = tcg_temp_new();
+ tcg_gen_ext32u_tl(t_src1, src1);
+ tcg_gen_ext32u_tl(t_src2, src2);
- /* old op:
- if (!(env->y & 1))
- T1 = 0;
- */
- zero = tcg_constant_tl(0);
- tcg_gen_andi_tl(cpu_cc_src, src1, 0xffffffff);
- tcg_gen_andi_tl(r_temp, cpu_y, 0x1);
- tcg_gen_andi_tl(cpu_cc_src2, src2, 0xffffffff);
- tcg_gen_movcond_tl(TCG_COND_EQ, cpu_cc_src2, r_temp, zero,
- zero, cpu_cc_src2);
+ /*
+ * if (!(env->y & 1))
+ * src2 = 0;
+ */
+ tcg_gen_andi_tl(t0, cpu_y, 0x1);
+ tcg_gen_movcond_tl(TCG_COND_EQ, t_src2, t0, zero, zero, t_src2);
- // b2 = T0 & 1;
- // env->y = (b2 << 31) | (env->y >> 1);
+ /*
+ * b2 = src1 & 1;
+ * y = (b2 << 31) | (y >> 1);
+ */
tcg_gen_extract_tl(t0, cpu_y, 1, 31);
- tcg_gen_deposit_tl(cpu_y, t0, cpu_cc_src, 31, 1);
+ tcg_gen_deposit_tl(cpu_y, t0, src1, 31, 1);
// b1 = N ^ V;
- gen_mov_reg_N(t0, cpu_psr);
- gen_mov_reg_V(r_temp, cpu_psr);
- tcg_gen_xor_tl(t0, t0, r_temp);
+ tcg_gen_xor_tl(t0, cpu_cc_N, cpu_cc_V);
- // T0 = (b1 << 31) | (T0 >> 1);
- // src1 = T0;
- tcg_gen_shli_tl(t0, t0, 31);
- tcg_gen_shri_tl(cpu_cc_src, cpu_cc_src, 1);
- tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
-
- tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
+ /*
+ * src1 = (b1 << 31) | (src1 >> 1)
+ */
+ tcg_gen_andi_tl(t0, t0, 1u << 31);
+ tcg_gen_shri_tl(t_src1, t_src1, 1);
+ tcg_gen_or_tl(t_src1, t_src1, t0);
- tcg_gen_mov_tl(dst, cpu_cc_dst);
+ gen_op_addcc(dst, t_src1, t_src2);
}
static void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
gen_op_multiply(dst, src1, src2, 1);
}
-// 1
-static void gen_op_eval_ba(TCGv dst)
+static void gen_op_sdiv(TCGv dst, TCGv src1, TCGv src2)
{
- tcg_gen_movi_tl(dst, 1);
+#ifdef TARGET_SPARC64
+ gen_helper_sdiv(dst, tcg_env, src1, src2);
+ tcg_gen_ext32s_tl(dst, dst);
+#else
+ TCGv_i64 t64 = tcg_temp_new_i64();
+ gen_helper_sdiv(t64, tcg_env, src1, src2);
+ tcg_gen_trunc_i64_tl(dst, t64);
+#endif
}
-// Z
-static void gen_op_eval_be(TCGv dst, TCGv_i32 src)
+static void gen_op_udivcc(TCGv dst, TCGv src1, TCGv src2)
{
- gen_mov_reg_Z(dst, src);
+ TCGv_i64 t64;
+
+#ifdef TARGET_SPARC64
+ t64 = cpu_cc_V;
+#else
+ t64 = tcg_temp_new_i64();
+#endif
+
+ gen_helper_udiv(t64, tcg_env, src1, src2);
+
+#ifdef TARGET_SPARC64
+ tcg_gen_ext32u_tl(cpu_cc_N, t64);
+ tcg_gen_shri_tl(cpu_cc_V, t64, 32);
+ tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
+ tcg_gen_movi_tl(cpu_icc_C, 0);
+#else
+ tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
+#endif
+ tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
+ tcg_gen_movi_tl(cpu_cc_C, 0);
+ tcg_gen_mov_tl(dst, cpu_cc_N);
}
-// Z | (N ^ V)
-static void gen_op_eval_ble(TCGv dst, TCGv_i32 src)
+static void gen_op_sdivcc(TCGv dst, TCGv src1, TCGv src2)
{
- TCGv t0 = tcg_temp_new();
- gen_mov_reg_N(t0, src);
- gen_mov_reg_V(dst, src);
- tcg_gen_xor_tl(dst, dst, t0);
- gen_mov_reg_Z(t0, src);
- tcg_gen_or_tl(dst, dst, t0);
+ TCGv_i64 t64;
+
+#ifdef TARGET_SPARC64
+ t64 = cpu_cc_V;
+#else
+ t64 = tcg_temp_new_i64();
+#endif
+
+ gen_helper_sdiv(t64, tcg_env, src1, src2);
+
+#ifdef TARGET_SPARC64
+ tcg_gen_ext32s_tl(cpu_cc_N, t64);
+ tcg_gen_shri_tl(cpu_cc_V, t64, 32);
+ tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
+ tcg_gen_movi_tl(cpu_icc_C, 0);
+#else
+ tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
+#endif
+ tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
+ tcg_gen_movi_tl(cpu_cc_C, 0);
+ tcg_gen_mov_tl(dst, cpu_cc_N);
}
-// N ^ V
-static void gen_op_eval_bl(TCGv dst, TCGv_i32 src)
+static void gen_op_taddcctv(TCGv dst, TCGv src1, TCGv src2)
{
- TCGv t0 = tcg_temp_new();
- gen_mov_reg_V(t0, src);
- gen_mov_reg_N(dst, src);
- tcg_gen_xor_tl(dst, dst, t0);
+ gen_helper_taddcctv(dst, tcg_env, src1, src2);
}
-// C | Z
-static void gen_op_eval_bleu(TCGv dst, TCGv_i32 src)
+static void gen_op_tsubcctv(TCGv dst, TCGv src1, TCGv src2)
{
- TCGv t0 = tcg_temp_new();
- gen_mov_reg_Z(t0, src);
- gen_mov_reg_C(dst, src);
- tcg_gen_or_tl(dst, dst, t0);
+ gen_helper_tsubcctv(dst, tcg_env, src1, src2);
}
-// C
-static void gen_op_eval_bcs(TCGv dst, TCGv_i32 src)
+static void gen_op_popc(TCGv dst, TCGv src1, TCGv src2)
{
- gen_mov_reg_C(dst, src);
+ tcg_gen_ctpop_tl(dst, src2);
}
-// V
-static void gen_op_eval_bvs(TCGv dst, TCGv_i32 src)
+#ifndef TARGET_SPARC64
+static void gen_helper_array8(TCGv dst, TCGv src1, TCGv src2)
{
- gen_mov_reg_V(dst, src);
+ g_assert_not_reached();
}
+#endif
-// 0
-static void gen_op_eval_bn(TCGv dst)
+static void gen_op_array16(TCGv dst, TCGv src1, TCGv src2)
{
- tcg_gen_movi_tl(dst, 0);
+ gen_helper_array8(dst, src1, src2);
+ tcg_gen_shli_tl(dst, dst, 1);
}
-// N
-static void gen_op_eval_bneg(TCGv dst, TCGv_i32 src)
+static void gen_op_array32(TCGv dst, TCGv src1, TCGv src2)
{
- gen_mov_reg_N(dst, src);
+ gen_helper_array8(dst, src1, src2);
+ tcg_gen_shli_tl(dst, dst, 2);
}
-// !Z
-static void gen_op_eval_bne(TCGv dst, TCGv_i32 src)
+static void gen_op_fpack16(TCGv_i32 dst, TCGv_i64 src)
{
- gen_mov_reg_Z(dst, src);
- tcg_gen_xori_tl(dst, dst, 0x1);
+#ifdef TARGET_SPARC64
+ gen_helper_fpack16(dst, cpu_gsr, src);
+#else
+ g_assert_not_reached();
+#endif
}
-// !(Z | (N ^ V))
-static void gen_op_eval_bg(TCGv dst, TCGv_i32 src)
+static void gen_op_fpackfix(TCGv_i32 dst, TCGv_i64 src)
{
- gen_op_eval_ble(dst, src);
- tcg_gen_xori_tl(dst, dst, 0x1);
+#ifdef TARGET_SPARC64
+ gen_helper_fpackfix(dst, cpu_gsr, src);
+#else
+ g_assert_not_reached();
+#endif
}
-// !(N ^ V)
-static void gen_op_eval_bge(TCGv dst, TCGv_i32 src)
+static void gen_op_fpack32(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
{
- gen_op_eval_bl(dst, src);
- tcg_gen_xori_tl(dst, dst, 0x1);
+#ifdef TARGET_SPARC64
+ gen_helper_fpack32(dst, cpu_gsr, src1, src2);
+#else
+ g_assert_not_reached();
+#endif
}
-// !(C | Z)
-static void gen_op_eval_bgu(TCGv dst, TCGv_i32 src)
+static void gen_op_faligndata(TCGv_i64 dst, TCGv_i64 s1, TCGv_i64 s2)
{
- gen_op_eval_bleu(dst, src);
- tcg_gen_xori_tl(dst, dst, 0x1);
+#ifdef TARGET_SPARC64
+ TCGv t1, t2, shift;
+
+ t1 = tcg_temp_new();
+ t2 = tcg_temp_new();
+ shift = tcg_temp_new();
+
+ tcg_gen_andi_tl(shift, cpu_gsr, 7);
+ tcg_gen_shli_tl(shift, shift, 3);
+ tcg_gen_shl_tl(t1, s1, shift);
+
+ /*
+ * A shift of 64 does not produce 0 in TCG. Divide this into a
+ * shift of (up to 63) followed by a constant shift of 1.
+ */
+ tcg_gen_xori_tl(shift, shift, 63);
+ tcg_gen_shr_tl(t2, s2, shift);
+ tcg_gen_shri_tl(t2, t2, 1);
+
+ tcg_gen_or_tl(dst, t1, t2);
+#else
+ g_assert_not_reached();
+#endif
}
-// !C
-static void gen_op_eval_bcc(TCGv dst, TCGv_i32 src)
+static void gen_op_bshuffle(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
{
- gen_mov_reg_C(dst, src);
- tcg_gen_xori_tl(dst, dst, 0x1);
+#ifdef TARGET_SPARC64
+ gen_helper_bshuffle(dst, cpu_gsr, src1, src2);
+#else
+ g_assert_not_reached();
+#endif
}
-// !N
-static void gen_op_eval_bpos(TCGv dst, TCGv_i32 src)
+// 1
+static void gen_op_eval_ba(TCGv dst)
{
- gen_mov_reg_N(dst, src);
- tcg_gen_xori_tl(dst, dst, 0x1);
+ tcg_gen_movi_tl(dst, 1);
}
-// !V
-static void gen_op_eval_bvc(TCGv dst, TCGv_i32 src)
+// 0
+static void gen_op_eval_bn(TCGv dst)
{
- gen_mov_reg_V(dst, src);
- tcg_gen_xori_tl(dst, dst, 0x1);
+ tcg_gen_movi_tl(dst, 0);
}
/*
tcg_gen_xori_tl(dst, dst, 0x1);
}
-static void gen_branch2(DisasContext *dc, target_ulong pc1,
- target_ulong pc2, TCGv r_cond)
+static void finishing_insn(DisasContext *dc)
{
- TCGLabel *l1 = gen_new_label();
-
- tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
-
- gen_goto_tb(dc, 0, pc1, pc1 + 4);
-
- gen_set_label(l1);
- gen_goto_tb(dc, 1, pc2, pc2 + 4);
+ /*
+ * From here, there is no future path through an unwinding exception.
+ * If the current insn cannot raise an exception, the computation of
+ * cpu_cond may be able to be elided.
+ */
+ if (dc->cpu_cond_live) {
+ tcg_gen_discard_tl(cpu_cond);
+ dc->cpu_cond_live = false;
+ }
}
static void gen_generic_branch(DisasContext *dc)
{
TCGv npc0 = tcg_constant_tl(dc->jump_pc[0]);
TCGv npc1 = tcg_constant_tl(dc->jump_pc[1]);
- TCGv zero = tcg_constant_tl(0);
+ TCGv c2 = tcg_constant_tl(dc->jump.c2);
- tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, zero, npc0, npc1);
+ tcg_gen_movcond_tl(dc->jump.cond, cpu_npc, dc->jump.c1, c2, npc0, npc1);
}
/* call this function before using the condition register as it may
}
}
-static void update_psr(DisasContext *dc)
-{
- if (dc->cc_op != CC_OP_FLAGS) {
- dc->cc_op = CC_OP_FLAGS;
- gen_helper_compute_psr(tcg_env);
- }
-}
-
static void save_state(DisasContext *dc)
{
tcg_gen_movi_tl(cpu_pc, dc->pc);
static void gen_exception(DisasContext *dc, int which)
{
+ finishing_insn(dc);
save_state(dc);
gen_helper_raise_exception(tcg_env, tcg_constant_i32(which));
dc->base.is_jmp = DISAS_NORETURN;
static void gen_mov_pc_npc(DisasContext *dc)
{
+ finishing_insn(dc);
+
if (dc->npc & 3) {
switch (dc->npc) {
case JUMP_PC:
}
}
-static void gen_op_next_insn(void)
-{
- tcg_gen_mov_tl(cpu_pc, cpu_npc);
- tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
-}
-
static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
DisasContext *dc)
{
- static int subcc_cond[16] = {
- TCG_COND_NEVER,
- TCG_COND_EQ,
- TCG_COND_LE,
- TCG_COND_LT,
- TCG_COND_LEU,
- TCG_COND_LTU,
- -1, /* neg */
- -1, /* overflow */
- TCG_COND_ALWAYS,
- TCG_COND_NE,
- TCG_COND_GT,
- TCG_COND_GE,
- TCG_COND_GTU,
- TCG_COND_GEU,
- -1, /* pos */
- -1, /* no overflow */
- };
+ TCGv t1;
- static int logic_cond[16] = {
- TCG_COND_NEVER,
- TCG_COND_EQ, /* eq: Z */
- TCG_COND_LE, /* le: Z | (N ^ V) -> Z | N */
- TCG_COND_LT, /* lt: N ^ V -> N */
- TCG_COND_EQ, /* leu: C | Z -> Z */
- TCG_COND_NEVER, /* ltu: C -> 0 */
- TCG_COND_LT, /* neg: N */
- TCG_COND_NEVER, /* vs: V -> 0 */
- TCG_COND_ALWAYS,
- TCG_COND_NE, /* ne: !Z */
- TCG_COND_GT, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
- TCG_COND_GE, /* ge: !(N ^ V) -> !N */
- TCG_COND_NE, /* gtu: !(C | Z) -> !Z */
- TCG_COND_ALWAYS, /* geu: !C -> 1 */
- TCG_COND_GE, /* pos: !N */
- TCG_COND_ALWAYS, /* vc: !V -> 1 */
- };
+ cmp->c1 = t1 = tcg_temp_new();
+ cmp->c2 = 0;
- TCGv_i32 r_src;
- TCGv r_dst;
+ switch (cond & 7) {
+ case 0x0: /* never */
+ cmp->cond = TCG_COND_NEVER;
+ cmp->c1 = tcg_constant_tl(0);
+ break;
-#ifdef TARGET_SPARC64
- if (xcc) {
- r_src = cpu_xcc;
- } else {
- r_src = cpu_psr;
- }
-#else
- r_src = cpu_psr;
-#endif
+ case 0x1: /* eq: Z */
+ cmp->cond = TCG_COND_EQ;
+ if (TARGET_LONG_BITS == 32 || xcc) {
+ tcg_gen_mov_tl(t1, cpu_cc_Z);
+ } else {
+ tcg_gen_ext32u_tl(t1, cpu_icc_Z);
+ }
+ break;
- switch (dc->cc_op) {
- case CC_OP_LOGIC:
- cmp->cond = logic_cond[cond];
- do_compare_dst_0:
- cmp->is_bool = false;
- cmp->c2 = tcg_constant_tl(0);
-#ifdef TARGET_SPARC64
- if (!xcc) {
- cmp->c1 = tcg_temp_new();
- tcg_gen_ext32s_tl(cmp->c1, cpu_cc_dst);
- break;
+ case 0x2: /* le: Z | (N ^ V) */
+ /*
+ * Simplify:
+ * cc_Z || (N ^ V) < 0 NE
+ * cc_Z && !((N ^ V) < 0) EQ
+ * cc_Z & ~((N ^ V) >> TLB) EQ
+ */
+ cmp->cond = TCG_COND_EQ;
+ tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
+ tcg_gen_sextract_tl(t1, t1, xcc ? 63 : 31, 1);
+ tcg_gen_andc_tl(t1, xcc ? cpu_cc_Z : cpu_icc_Z, t1);
+ if (TARGET_LONG_BITS == 64 && !xcc) {
+ tcg_gen_ext32u_tl(t1, t1);
}
-#endif
- cmp->c1 = cpu_cc_dst;
break;
- case CC_OP_SUB:
- switch (cond) {
- case 6: /* neg */
- case 14: /* pos */
- cmp->cond = (cond == 6 ? TCG_COND_LT : TCG_COND_GE);
- goto do_compare_dst_0;
+ case 0x3: /* lt: N ^ V */
+ cmp->cond = TCG_COND_LT;
+ tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
+ if (TARGET_LONG_BITS == 64 && !xcc) {
+ tcg_gen_ext32s_tl(t1, t1);
+ }
+ break;
- case 7: /* overflow */
- case 15: /* !overflow */
- goto do_dynamic;
+ case 0x4: /* leu: Z | C */
+ /*
+ * Simplify:
+ * cc_Z == 0 || cc_C != 0 NE
+ * cc_Z != 0 && cc_C == 0 EQ
+ * cc_Z & (cc_C ? 0 : -1) EQ
+ * cc_Z & (cc_C - 1) EQ
+ */
+ cmp->cond = TCG_COND_EQ;
+ if (TARGET_LONG_BITS == 32 || xcc) {
+ tcg_gen_subi_tl(t1, cpu_cc_C, 1);
+ tcg_gen_and_tl(t1, t1, cpu_cc_Z);
+ } else {
+ tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
+ tcg_gen_subi_tl(t1, t1, 1);
+ tcg_gen_and_tl(t1, t1, cpu_icc_Z);
+ tcg_gen_ext32u_tl(t1, t1);
+ }
+ break;
- default:
- cmp->cond = subcc_cond[cond];
- cmp->is_bool = false;
-#ifdef TARGET_SPARC64
- if (!xcc) {
- /* Note that sign-extension works for unsigned compares as
- long as both operands are sign-extended. */
- cmp->c1 = tcg_temp_new();
- cmp->c2 = tcg_temp_new();
- tcg_gen_ext32s_tl(cmp->c1, cpu_cc_src);
- tcg_gen_ext32s_tl(cmp->c2, cpu_cc_src2);
- break;
- }
-#endif
- cmp->c1 = cpu_cc_src;
- cmp->c2 = cpu_cc_src2;
- break;
+ case 0x5: /* ltu: C */
+ cmp->cond = TCG_COND_NE;
+ if (TARGET_LONG_BITS == 32 || xcc) {
+ tcg_gen_mov_tl(t1, cpu_cc_C);
+ } else {
+ tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
}
break;
- default:
- do_dynamic:
- gen_helper_compute_psr(tcg_env);
- dc->cc_op = CC_OP_FLAGS;
- /* FALLTHRU */
+ case 0x6: /* neg: N */
+ cmp->cond = TCG_COND_LT;
+ if (TARGET_LONG_BITS == 32 || xcc) {
+ tcg_gen_mov_tl(t1, cpu_cc_N);
+ } else {
+ tcg_gen_ext32s_tl(t1, cpu_cc_N);
+ }
+ break;
- case CC_OP_FLAGS:
- /* We're going to generate a boolean result. */
- cmp->cond = TCG_COND_NE;
- cmp->is_bool = true;
- cmp->c1 = r_dst = tcg_temp_new();
- cmp->c2 = tcg_constant_tl(0);
-
- switch (cond) {
- case 0x0:
- gen_op_eval_bn(r_dst);
- break;
- case 0x1:
- gen_op_eval_be(r_dst, r_src);
- break;
- case 0x2:
- gen_op_eval_ble(r_dst, r_src);
- break;
- case 0x3:
- gen_op_eval_bl(r_dst, r_src);
- break;
- case 0x4:
- gen_op_eval_bleu(r_dst, r_src);
- break;
- case 0x5:
- gen_op_eval_bcs(r_dst, r_src);
- break;
- case 0x6:
- gen_op_eval_bneg(r_dst, r_src);
- break;
- case 0x7:
- gen_op_eval_bvs(r_dst, r_src);
- break;
- case 0x8:
- gen_op_eval_ba(r_dst);
- break;
- case 0x9:
- gen_op_eval_bne(r_dst, r_src);
- break;
- case 0xa:
- gen_op_eval_bg(r_dst, r_src);
- break;
- case 0xb:
- gen_op_eval_bge(r_dst, r_src);
- break;
- case 0xc:
- gen_op_eval_bgu(r_dst, r_src);
- break;
- case 0xd:
- gen_op_eval_bcc(r_dst, r_src);
- break;
- case 0xe:
- gen_op_eval_bpos(r_dst, r_src);
- break;
- case 0xf:
- gen_op_eval_bvc(r_dst, r_src);
- break;
- }
- break;
- }
-}
+ case 0x7: /* vs: V */
+ cmp->cond = TCG_COND_LT;
+ if (TARGET_LONG_BITS == 32 || xcc) {
+ tcg_gen_mov_tl(t1, cpu_cc_V);
+ } else {
+ tcg_gen_ext32s_tl(t1, cpu_cc_V);
+ }
+ break;
+ }
+ if (cond & 8) {
+ cmp->cond = tcg_invert_cond(cmp->cond);
+ }
+}
static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
{
/* For now we still generate a straight boolean result. */
cmp->cond = TCG_COND_NE;
- cmp->is_bool = true;
cmp->c1 = r_dst = tcg_temp_new();
- cmp->c2 = tcg_constant_tl(0);
+ cmp->c2 = 0;
switch (cc) {
default:
}
}
-// Inverted logic
-static const TCGCond gen_tcg_cond_reg[8] = {
- TCG_COND_NEVER, /* reserved */
- TCG_COND_NE,
- TCG_COND_GT,
- TCG_COND_GE,
- TCG_COND_NEVER, /* reserved */
- TCG_COND_EQ,
- TCG_COND_LE,
- TCG_COND_LT,
-};
+static bool gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
+{
+ static const TCGCond cond_reg[4] = {
+ TCG_COND_NEVER, /* reserved */
+ TCG_COND_EQ,
+ TCG_COND_LE,
+ TCG_COND_LT,
+ };
+ TCGCond tcond;
+
+ if ((cond & 3) == 0) {
+ return false;
+ }
+ tcond = cond_reg[cond & 3];
+ if (cond & 4) {
+ tcond = tcg_invert_cond(tcond);
+ }
+
+ cmp->cond = tcond;
+ cmp->c1 = tcg_temp_new();
+ cmp->c2 = 0;
+ tcg_gen_mov_tl(cmp->c1, r_src);
+ return true;
+}
+
+static void gen_op_clear_ieee_excp_and_FTT(void)
+{
+ tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK);
+}
+
+static void gen_op_fmovs(TCGv_i32 dst, TCGv_i32 src)
+{
+ gen_op_clear_ieee_excp_and_FTT();
+ tcg_gen_mov_i32(dst, src);
+}
+
+static void gen_op_fnegs(TCGv_i32 dst, TCGv_i32 src)
+{
+ gen_op_clear_ieee_excp_and_FTT();
+ gen_helper_fnegs(dst, src);
+}
+
+static void gen_op_fabss(TCGv_i32 dst, TCGv_i32 src)
+{
+ gen_op_clear_ieee_excp_and_FTT();
+ gen_helper_fabss(dst, src);
+}
+
+static void gen_op_fmovd(TCGv_i64 dst, TCGv_i64 src)
+{
+ gen_op_clear_ieee_excp_and_FTT();
+ tcg_gen_mov_i64(dst, src);
+}
+
+static void gen_op_fnegd(TCGv_i64 dst, TCGv_i64 src)
+{
+ gen_op_clear_ieee_excp_and_FTT();
+ gen_helper_fnegd(dst, src);
+}
-static void gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
+static void gen_op_fabsd(TCGv_i64 dst, TCGv_i64 src)
{
- cmp->cond = tcg_invert_cond(gen_tcg_cond_reg[cond]);
- cmp->is_bool = false;
- cmp->c1 = r_src;
- cmp->c2 = tcg_constant_tl(0);
+ gen_op_clear_ieee_excp_and_FTT();
+ gen_helper_fabsd(dst, src);
}
#ifdef TARGET_SPARC64
return 0;
}
-static void gen_op_clear_ieee_excp_and_FTT(void)
-{
- tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK);
-}
-
-static void gen_fop_FF(DisasContext *dc, int rd, int rs,
- void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32))
-{
- TCGv_i32 dst, src;
-
- src = gen_load_fpr_F(dc, rs);
- dst = gen_dest_fpr_F(dc);
-
- gen(dst, tcg_env, src);
- gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
-
- gen_store_fpr_F(dc, rd, dst);
-}
-
-static void gen_ne_fop_FF(DisasContext *dc, int rd, int rs,
- void (*gen)(TCGv_i32, TCGv_i32))
-{
- TCGv_i32 dst, src;
-
- src = gen_load_fpr_F(dc, rs);
- dst = gen_dest_fpr_F(dc);
-
- gen(dst, src);
-
- gen_store_fpr_F(dc, rd, dst);
-}
-
-static void gen_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
- void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32))
-{
- TCGv_i32 dst, src1, src2;
-
- src1 = gen_load_fpr_F(dc, rs1);
- src2 = gen_load_fpr_F(dc, rs2);
- dst = gen_dest_fpr_F(dc);
-
- gen(dst, tcg_env, src1, src2);
- gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
-
- gen_store_fpr_F(dc, rd, dst);
-}
-
-#ifdef TARGET_SPARC64
-static void gen_ne_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
- void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
-{
- TCGv_i32 dst, src1, src2;
-
- src1 = gen_load_fpr_F(dc, rs1);
- src2 = gen_load_fpr_F(dc, rs2);
- dst = gen_dest_fpr_F(dc);
-
- gen(dst, src1, src2);
-
- gen_store_fpr_F(dc, rd, dst);
-}
-#endif
-
-static void gen_fop_DD(DisasContext *dc, int rd, int rs,
- void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64))
-{
- TCGv_i64 dst, src;
-
- src = gen_load_fpr_D(dc, rs);
- dst = gen_dest_fpr_D(dc, rd);
-
- gen(dst, tcg_env, src);
- gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
-
- gen_store_fpr_D(dc, rd, dst);
-}
-
-#ifdef TARGET_SPARC64
-static void gen_ne_fop_DD(DisasContext *dc, int rd, int rs,
- void (*gen)(TCGv_i64, TCGv_i64))
-{
- TCGv_i64 dst, src;
-
- src = gen_load_fpr_D(dc, rs);
- dst = gen_dest_fpr_D(dc, rd);
-
- gen(dst, src);
-
- gen_store_fpr_D(dc, rd, dst);
-}
-#endif
-
-static void gen_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
- void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64))
-{
- TCGv_i64 dst, src1, src2;
-
- src1 = gen_load_fpr_D(dc, rs1);
- src2 = gen_load_fpr_D(dc, rs2);
- dst = gen_dest_fpr_D(dc, rd);
-
- gen(dst, tcg_env, src1, src2);
- gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
-
- gen_store_fpr_D(dc, rd, dst);
-}
-
-#ifdef TARGET_SPARC64
-static void gen_ne_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
- void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64))
-{
- TCGv_i64 dst, src1, src2;
-
- src1 = gen_load_fpr_D(dc, rs1);
- src2 = gen_load_fpr_D(dc, rs2);
- dst = gen_dest_fpr_D(dc, rd);
-
- gen(dst, src1, src2);
-
- gen_store_fpr_D(dc, rd, dst);
-}
-
-static void gen_gsr_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
- void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
-{
- TCGv_i64 dst, src1, src2;
-
- src1 = gen_load_fpr_D(dc, rs1);
- src2 = gen_load_fpr_D(dc, rs2);
- dst = gen_dest_fpr_D(dc, rd);
-
- gen(dst, cpu_gsr, src1, src2);
-
- gen_store_fpr_D(dc, rd, dst);
-}
-
-static void gen_ne_fop_DDDD(DisasContext *dc, int rd, int rs1, int rs2,
- void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
-{
- TCGv_i64 dst, src0, src1, src2;
-
- src1 = gen_load_fpr_D(dc, rs1);
- src2 = gen_load_fpr_D(dc, rs2);
- src0 = gen_load_fpr_D(dc, rd);
- dst = gen_dest_fpr_D(dc, rd);
-
- gen(dst, src0, src1, src2);
-
- gen_store_fpr_D(dc, rd, dst);
-}
-#endif
-
-static void gen_fop_QQ(DisasContext *dc, int rd, int rs,
- void (*gen)(TCGv_ptr))
-{
- gen_op_load_fpr_QT1(QFPREG(rs));
-
- gen(tcg_env);
- gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
-
- gen_op_store_QT0_fpr(QFPREG(rd));
- gen_update_fprs_dirty(dc, QFPREG(rd));
-}
-
-#ifdef TARGET_SPARC64
-static void gen_ne_fop_QQ(DisasContext *dc, int rd, int rs,
- void (*gen)(TCGv_ptr))
-{
- gen_op_load_fpr_QT1(QFPREG(rs));
-
- gen(tcg_env);
-
- gen_op_store_QT0_fpr(QFPREG(rd));
- gen_update_fprs_dirty(dc, QFPREG(rd));
-}
-#endif
-
-static void gen_fop_QQQ(DisasContext *dc, int rd, int rs1, int rs2,
- void (*gen)(TCGv_ptr))
-{
- gen_op_load_fpr_QT0(QFPREG(rs1));
- gen_op_load_fpr_QT1(QFPREG(rs2));
-
- gen(tcg_env);
- gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
-
- gen_op_store_QT0_fpr(QFPREG(rd));
- gen_update_fprs_dirty(dc, QFPREG(rd));
-}
-
-static void gen_fop_DFF(DisasContext *dc, int rd, int rs1, int rs2,
- void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32, TCGv_i32))
-{
- TCGv_i64 dst;
- TCGv_i32 src1, src2;
-
- src1 = gen_load_fpr_F(dc, rs1);
- src2 = gen_load_fpr_F(dc, rs2);
- dst = gen_dest_fpr_D(dc, rd);
-
- gen(dst, tcg_env, src1, src2);
- gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
-
- gen_store_fpr_D(dc, rd, dst);
-}
-
-static void gen_fop_QDD(DisasContext *dc, int rd, int rs1, int rs2,
- void (*gen)(TCGv_ptr, TCGv_i64, TCGv_i64))
-{
- TCGv_i64 src1, src2;
-
- src1 = gen_load_fpr_D(dc, rs1);
- src2 = gen_load_fpr_D(dc, rs2);
-
- gen(tcg_env, src1, src2);
- gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
-
- gen_op_store_QT0_fpr(QFPREG(rd));
- gen_update_fprs_dirty(dc, QFPREG(rd));
-}
-
-#ifdef TARGET_SPARC64
-static void gen_fop_DF(DisasContext *dc, int rd, int rs,
- void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
-{
- TCGv_i64 dst;
- TCGv_i32 src;
-
- src = gen_load_fpr_F(dc, rs);
- dst = gen_dest_fpr_D(dc, rd);
-
- gen(dst, tcg_env, src);
- gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
-
- gen_store_fpr_D(dc, rd, dst);
-}
-#endif
-
-static void gen_ne_fop_DF(DisasContext *dc, int rd, int rs,
- void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
-{
- TCGv_i64 dst;
- TCGv_i32 src;
-
- src = gen_load_fpr_F(dc, rs);
- dst = gen_dest_fpr_D(dc, rd);
-
- gen(dst, tcg_env, src);
-
- gen_store_fpr_D(dc, rd, dst);
-}
-
-static void gen_fop_FD(DisasContext *dc, int rd, int rs,
- void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i64))
-{
- TCGv_i32 dst;
- TCGv_i64 src;
-
- src = gen_load_fpr_D(dc, rs);
- dst = gen_dest_fpr_F(dc);
-
- gen(dst, tcg_env, src);
- gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
-
- gen_store_fpr_F(dc, rd, dst);
-}
-
-static void gen_fop_FQ(DisasContext *dc, int rd, int rs,
- void (*gen)(TCGv_i32, TCGv_ptr))
-{
- TCGv_i32 dst;
-
- gen_op_load_fpr_QT1(QFPREG(rs));
- dst = gen_dest_fpr_F(dc);
-
- gen(dst, tcg_env);
- gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
-
- gen_store_fpr_F(dc, rd, dst);
-}
-
-static void gen_fop_DQ(DisasContext *dc, int rd, int rs,
- void (*gen)(TCGv_i64, TCGv_ptr))
-{
- TCGv_i64 dst;
-
- gen_op_load_fpr_QT1(QFPREG(rs));
- dst = gen_dest_fpr_D(dc, rd);
-
- gen(dst, tcg_env);
- gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
-
- gen_store_fpr_D(dc, rd, dst);
-}
-
-static void gen_ne_fop_QF(DisasContext *dc, int rd, int rs,
- void (*gen)(TCGv_ptr, TCGv_i32))
-{
- TCGv_i32 src;
-
- src = gen_load_fpr_F(dc, rs);
-
- gen(tcg_env, src);
-
- gen_op_store_QT0_fpr(QFPREG(rd));
- gen_update_fprs_dirty(dc, QFPREG(rd));
-}
-
-static void gen_ne_fop_QD(DisasContext *dc, int rd, int rs,
- void (*gen)(TCGv_ptr, TCGv_i64))
-{
- TCGv_i64 src;
-
- src = gen_load_fpr_D(dc, rs);
-
- gen(tcg_env, src);
-
- gen_op_store_QT0_fpr(QFPREG(rd));
- gen_update_fprs_dirty(dc, QFPREG(rd));
-}
-
-static void gen_swap(DisasContext *dc, TCGv dst, TCGv src,
- TCGv addr, int mmu_idx, MemOp memop)
-{
- gen_address_mask(dc, addr);
- tcg_gen_atomic_xchg_tl(dst, addr, src, mmu_idx, memop | MO_ALIGN);
-}
-
-static void gen_ldstub(DisasContext *dc, TCGv dst, TCGv addr, int mmu_idx)
-{
- TCGv m1 = tcg_constant_tl(0xff);
- gen_address_mask(dc, addr);
- tcg_gen_atomic_xchg_tl(dst, addr, m1, mmu_idx, MO_UB);
-}
-
/* asi moves */
-#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
typedef enum {
GET_ASI_HELPER,
GET_ASI_EXCP,
MemOp memop;
} DisasASI;
-static DisasASI get_asi(DisasContext *dc, int insn, MemOp memop)
+/*
+ * Build DisasASI.
+ * For asi == -1, treat as non-asi.
+ * For ask == -2, treat as immediate offset (v8 error, v9 %asi).
+ */
+static DisasASI resolve_asi(DisasContext *dc, int asi, MemOp memop)
{
- int asi = GET_FIELD(insn, 19, 26);
ASIType type = GET_ASI_HELPER;
int mem_idx = dc->mem_idx;
+ if (asi == -1) {
+ /* Artificial "non-asi" case. */
+ type = GET_ASI_DIRECT;
+ goto done;
+ }
+
#ifndef TARGET_SPARC64
/* Before v9, all asis are immediate and privileged. */
- if (IS_IMM) {
+ if (asi < 0) {
gen_exception(dc, TT_ILL_INSN);
type = GET_ASI_EXCP;
} else if (supervisor(dc)
type = GET_ASI_EXCP;
}
#else
- if (IS_IMM) {
+ if (asi < 0) {
asi = dc->asi;
}
/* With v9, all asis below 0x80 are privileged. */
}
#endif
+ done:
return (DisasASI){ type, asi, mem_idx, memop };
}
-static void gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr,
- int insn, MemOp memop)
+#if defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
+static void gen_helper_ld_asi(TCGv_i64 r, TCGv_env e, TCGv a,
+ TCGv_i32 asi, TCGv_i32 mop)
{
- DisasASI da = get_asi(dc, insn, memop);
+ g_assert_not_reached();
+}
+
+static void gen_helper_st_asi(TCGv_env e, TCGv a, TCGv_i64 r,
+ TCGv_i32 asi, TCGv_i32 mop)
+{
+ g_assert_not_reached();
+}
+#endif
- switch (da.type) {
+static void gen_ld_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
+{
+ switch (da->type) {
case GET_ASI_EXCP:
break;
case GET_ASI_DTWINX: /* Reserved for ldda. */
gen_exception(dc, TT_ILL_INSN);
break;
case GET_ASI_DIRECT:
- gen_address_mask(dc, addr);
- tcg_gen_qemu_ld_tl(dst, addr, da.mem_idx, da.memop | MO_ALIGN);
+ tcg_gen_qemu_ld_tl(dst, addr, da->mem_idx, da->memop | MO_ALIGN);
break;
default:
{
- TCGv_i32 r_asi = tcg_constant_i32(da.asi);
- TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
+ TCGv_i32 r_asi = tcg_constant_i32(da->asi);
+ TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
save_state(dc);
#ifdef TARGET_SPARC64
}
}
-static void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr,
- int insn, MemOp memop)
+static void gen_st_asi(DisasContext *dc, DisasASI *da, TCGv src, TCGv addr)
{
- DisasASI da = get_asi(dc, insn, memop);
-
- switch (da.type) {
+ switch (da->type) {
case GET_ASI_EXCP:
break;
+
case GET_ASI_DTWINX: /* Reserved for stda. */
-#ifndef TARGET_SPARC64
- gen_exception(dc, TT_ILL_INSN);
- break;
-#else
- if (!(dc->def->features & CPU_FEATURE_HYPV)) {
+ if (TARGET_LONG_BITS == 32) {
+ gen_exception(dc, TT_ILL_INSN);
+ break;
+ } else if (!(dc->def->features & CPU_FEATURE_HYPV)) {
/* Pre OpenSPARC CPUs don't have these */
gen_exception(dc, TT_ILL_INSN);
- return;
+ break;
}
- /* in OpenSPARC T1+ CPUs TWINX ASIs in store instructions
- * are ST_BLKINIT_ ASIs */
-#endif
+ /* In OpenSPARC T1+ CPUs TWINX ASIs in store are ST_BLKINIT_ ASIs */
/* fall through */
+
case GET_ASI_DIRECT:
- gen_address_mask(dc, addr);
- tcg_gen_qemu_st_tl(src, addr, da.mem_idx, da.memop | MO_ALIGN);
+ tcg_gen_qemu_st_tl(src, addr, da->mem_idx, da->memop | MO_ALIGN);
break;
-#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
+
case GET_ASI_BCOPY:
+ assert(TARGET_LONG_BITS == 32);
/* Copy 32 bytes from the address in SRC to ADDR. */
/* ??? The original qemu code suggests 4-byte alignment, dropping
the low bits, but the only place I can see this used is in the
for (i = 0; i < 32; i += 4) {
/* Since the loads and stores are paired, allow the
copy to happen in the host endianness. */
- tcg_gen_qemu_ld_i32(tmp, saddr, da.mem_idx, MO_UL);
- tcg_gen_qemu_st_i32(tmp, daddr, da.mem_idx, MO_UL);
+ tcg_gen_qemu_ld_i32(tmp, saddr, da->mem_idx, MO_UL);
+ tcg_gen_qemu_st_i32(tmp, daddr, da->mem_idx, MO_UL);
tcg_gen_add_tl(saddr, saddr, four);
tcg_gen_add_tl(daddr, daddr, four);
}
}
break;
-#endif
+
default:
{
- TCGv_i32 r_asi = tcg_constant_i32(da.asi);
- TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
+ TCGv_i32 r_asi = tcg_constant_i32(da->asi);
+ TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
save_state(dc);
#ifdef TARGET_SPARC64
}
}
-static void gen_swap_asi(DisasContext *dc, TCGv dst, TCGv src,
- TCGv addr, int insn)
+static void gen_swap_asi(DisasContext *dc, DisasASI *da,
+ TCGv dst, TCGv src, TCGv addr)
{
- DisasASI da = get_asi(dc, insn, MO_TEUL);
-
- switch (da.type) {
+ switch (da->type) {
case GET_ASI_EXCP:
break;
case GET_ASI_DIRECT:
- gen_swap(dc, dst, src, addr, da.mem_idx, da.memop);
+ tcg_gen_atomic_xchg_tl(dst, addr, src,
+ da->mem_idx, da->memop | MO_ALIGN);
break;
default:
/* ??? Should be DAE_invalid_asi. */
}
}
-static void gen_cas_asi(DisasContext *dc, TCGv addr, TCGv cmpv,
- int insn, int rd)
+static void gen_cas_asi(DisasContext *dc, DisasASI *da,
+ TCGv oldv, TCGv newv, TCGv cmpv, TCGv addr)
{
- DisasASI da = get_asi(dc, insn, MO_TEUL);
- TCGv oldv;
-
- switch (da.type) {
+ switch (da->type) {
case GET_ASI_EXCP:
return;
case GET_ASI_DIRECT:
- oldv = tcg_temp_new();
- tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd),
- da.mem_idx, da.memop | MO_ALIGN);
- gen_store_gpr(dc, rd, oldv);
+ tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, newv,
+ da->mem_idx, da->memop | MO_ALIGN);
break;
default:
/* ??? Should be DAE_invalid_asi. */
}
}
-static void gen_ldstub_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn)
+static void gen_ldstub_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
{
- DisasASI da = get_asi(dc, insn, MO_UB);
-
- switch (da.type) {
+ switch (da->type) {
case GET_ASI_EXCP:
break;
case GET_ASI_DIRECT:
- gen_ldstub(dc, dst, addr, da.mem_idx);
+ tcg_gen_atomic_xchg_tl(dst, addr, tcg_constant_tl(0xff),
+ da->mem_idx, MO_UB);
break;
default:
/* ??? In theory, this should be raise DAE_invalid_asi.
if (tb_cflags(dc->base.tb) & CF_PARALLEL) {
gen_helper_exit_atomic(tcg_env);
} else {
- TCGv_i32 r_asi = tcg_constant_i32(da.asi);
+ TCGv_i32 r_asi = tcg_constant_i32(da->asi);
TCGv_i32 r_mop = tcg_constant_i32(MO_UB);
TCGv_i64 s64, t64;
break;
}
}
-#endif
-#ifdef TARGET_SPARC64
-static void gen_ldf_asi(DisasContext *dc, TCGv addr,
- int insn, int size, int rd)
+static void gen_ldf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
+ TCGv addr, int rd)
{
- DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEUQ));
+ MemOp memop = da->memop;
+ MemOp size = memop & MO_SIZE;
TCGv_i32 d32;
TCGv_i64 d64;
+ TCGv addr_tmp;
- switch (da.type) {
+ /* TODO: Use 128-bit load/store below. */
+ if (size == MO_128) {
+ memop = (memop & ~MO_SIZE) | MO_64;
+ }
+
+ switch (da->type) {
case GET_ASI_EXCP:
break;
case GET_ASI_DIRECT:
- gen_address_mask(dc, addr);
+ memop |= MO_ALIGN_4;
switch (size) {
- case 4:
+ case MO_32:
d32 = gen_dest_fpr_F(dc);
- tcg_gen_qemu_ld_i32(d32, addr, da.mem_idx, da.memop | MO_ALIGN);
+ tcg_gen_qemu_ld_i32(d32, addr, da->mem_idx, memop);
gen_store_fpr_F(dc, rd, d32);
break;
- case 8:
- tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
- da.memop | MO_ALIGN_4);
+
+ case MO_64:
+ tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da->mem_idx, memop);
break;
- case 16:
+
+ case MO_128:
d64 = tcg_temp_new_i64();
- tcg_gen_qemu_ld_i64(d64, addr, da.mem_idx, da.memop | MO_ALIGN_4);
- tcg_gen_addi_tl(addr, addr, 8);
- tcg_gen_qemu_ld_i64(cpu_fpr[rd/2+1], addr, da.mem_idx,
- da.memop | MO_ALIGN_4);
+ tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
+ addr_tmp = tcg_temp_new();
+ tcg_gen_addi_tl(addr_tmp, addr, 8);
+ tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + 1], addr_tmp, da->mem_idx, memop);
tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
break;
default:
case GET_ASI_BLOCK:
/* Valid for lddfa on aligned registers only. */
- if (size == 8 && (rd & 7) == 0) {
- MemOp memop;
- TCGv eight;
- int i;
-
- gen_address_mask(dc, addr);
-
+ if (orig_size == MO_64 && (rd & 7) == 0) {
/* The first operation checks required alignment. */
- memop = da.memop | MO_ALIGN_64;
- eight = tcg_constant_tl(8);
- for (i = 0; ; ++i) {
- tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + i], addr,
- da.mem_idx, memop);
+ addr_tmp = tcg_temp_new();
+ for (int i = 0; ; ++i) {
+ tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + i], addr, da->mem_idx,
+ memop | (i == 0 ? MO_ALIGN_64 : 0));
if (i == 7) {
break;
}
- tcg_gen_add_tl(addr, addr, eight);
- memop = da.memop;
+ tcg_gen_addi_tl(addr_tmp, addr, 8);
+ addr = addr_tmp;
}
} else {
gen_exception(dc, TT_ILL_INSN);
case GET_ASI_SHORT:
/* Valid for lddfa only. */
- if (size == 8) {
- gen_address_mask(dc, addr);
- tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
- da.memop | MO_ALIGN);
+ if (orig_size == MO_64) {
+ tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
+ memop | MO_ALIGN);
} else {
gen_exception(dc, TT_ILL_INSN);
}
default:
{
- TCGv_i32 r_asi = tcg_constant_i32(da.asi);
- TCGv_i32 r_mop = tcg_constant_i32(da.memop | MO_ALIGN);
+ TCGv_i32 r_asi = tcg_constant_i32(da->asi);
+ TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
save_state(dc);
/* According to the table in the UA2011 manual, the only
the NO_FAULT asis. We still need a helper for these,
but we can just use the integer asi helper for them. */
switch (size) {
- case 4:
+ case MO_32:
d64 = tcg_temp_new_i64();
gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
d32 = gen_dest_fpr_F(dc);
tcg_gen_extrl_i64_i32(d32, d64);
gen_store_fpr_F(dc, rd, d32);
break;
- case 8:
- gen_helper_ld_asi(cpu_fpr[rd / 2], tcg_env, addr, r_asi, r_mop);
+ case MO_64:
+ gen_helper_ld_asi(cpu_fpr[rd / 2], tcg_env, addr,
+ r_asi, r_mop);
break;
- case 16:
+ case MO_128:
d64 = tcg_temp_new_i64();
gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
- tcg_gen_addi_tl(addr, addr, 8);
- gen_helper_ld_asi(cpu_fpr[rd/2+1], tcg_env, addr, r_asi, r_mop);
+ addr_tmp = tcg_temp_new();
+ tcg_gen_addi_tl(addr_tmp, addr, 8);
+ gen_helper_ld_asi(cpu_fpr[rd / 2 + 1], tcg_env, addr_tmp,
+ r_asi, r_mop);
tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
break;
default:
}
}
-static void gen_stf_asi(DisasContext *dc, TCGv addr,
- int insn, int size, int rd)
+static void gen_stf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
+ TCGv addr, int rd)
{
- DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEUQ));
+ MemOp memop = da->memop;
+ MemOp size = memop & MO_SIZE;
TCGv_i32 d32;
+ TCGv addr_tmp;
- switch (da.type) {
+ /* TODO: Use 128-bit load/store below. */
+ if (size == MO_128) {
+ memop = (memop & ~MO_SIZE) | MO_64;
+ }
+
+ switch (da->type) {
case GET_ASI_EXCP:
break;
case GET_ASI_DIRECT:
- gen_address_mask(dc, addr);
+ memop |= MO_ALIGN_4;
switch (size) {
- case 4:
+ case MO_32:
d32 = gen_load_fpr_F(dc, rd);
- tcg_gen_qemu_st_i32(d32, addr, da.mem_idx, da.memop | MO_ALIGN);
+ tcg_gen_qemu_st_i32(d32, addr, da->mem_idx, memop | MO_ALIGN);
break;
- case 8:
- tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
- da.memop | MO_ALIGN_4);
+ case MO_64:
+ tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
+ memop | MO_ALIGN_4);
break;
- case 16:
+ case MO_128:
/* Only 4-byte alignment required. However, it is legal for the
cpu to signal the alignment fault, and the OS trap handler is
required to fix it up. Requiring 16-byte alignment here avoids
having to probe the second page before performing the first
write. */
- tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
- da.memop | MO_ALIGN_16);
- tcg_gen_addi_tl(addr, addr, 8);
- tcg_gen_qemu_st_i64(cpu_fpr[rd/2+1], addr, da.mem_idx, da.memop);
+ tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
+ memop | MO_ALIGN_16);
+ addr_tmp = tcg_temp_new();
+ tcg_gen_addi_tl(addr_tmp, addr, 8);
+ tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + 1], addr_tmp, da->mem_idx, memop);
break;
default:
g_assert_not_reached();
case GET_ASI_BLOCK:
/* Valid for stdfa on aligned registers only. */
- if (size == 8 && (rd & 7) == 0) {
- MemOp memop;
- TCGv eight;
- int i;
-
- gen_address_mask(dc, addr);
-
+ if (orig_size == MO_64 && (rd & 7) == 0) {
/* The first operation checks required alignment. */
- memop = da.memop | MO_ALIGN_64;
- eight = tcg_constant_tl(8);
- for (i = 0; ; ++i) {
- tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + i], addr,
- da.mem_idx, memop);
+ addr_tmp = tcg_temp_new();
+ for (int i = 0; ; ++i) {
+ tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + i], addr, da->mem_idx,
+ memop | (i == 0 ? MO_ALIGN_64 : 0));
if (i == 7) {
break;
}
- tcg_gen_add_tl(addr, addr, eight);
- memop = da.memop;
+ tcg_gen_addi_tl(addr_tmp, addr, 8);
+ addr = addr_tmp;
}
} else {
gen_exception(dc, TT_ILL_INSN);
case GET_ASI_SHORT:
/* Valid for stdfa only. */
- if (size == 8) {
- gen_address_mask(dc, addr);
- tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
- da.memop | MO_ALIGN);
+ if (orig_size == MO_64) {
+ tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
+ memop | MO_ALIGN);
} else {
gen_exception(dc, TT_ILL_INSN);
}
}
}
-static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
+static void gen_ldda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
{
- DisasASI da = get_asi(dc, insn, MO_TEUQ);
- TCGv_i64 hi = gen_dest_gpr(dc, rd);
- TCGv_i64 lo = gen_dest_gpr(dc, rd + 1);
+ TCGv hi = gen_dest_gpr(dc, rd);
+ TCGv lo = gen_dest_gpr(dc, rd + 1);
- switch (da.type) {
+ switch (da->type) {
case GET_ASI_EXCP:
return;
case GET_ASI_DTWINX:
- gen_address_mask(dc, addr);
- tcg_gen_qemu_ld_i64(hi, addr, da.mem_idx, da.memop | MO_ALIGN_16);
- tcg_gen_addi_tl(addr, addr, 8);
- tcg_gen_qemu_ld_i64(lo, addr, da.mem_idx, da.memop);
+#ifdef TARGET_SPARC64
+ {
+ MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
+ TCGv_i128 t = tcg_temp_new_i128();
+
+ tcg_gen_qemu_ld_i128(t, addr, da->mem_idx, mop);
+ /*
+ * Note that LE twinx acts as if each 64-bit register result is
+ * byte swapped. We perform one 128-bit LE load, so must swap
+ * the order of the writebacks.
+ */
+ if ((mop & MO_BSWAP) == MO_TE) {
+ tcg_gen_extr_i128_i64(lo, hi, t);
+ } else {
+ tcg_gen_extr_i128_i64(hi, lo, t);
+ }
+ }
break;
+#else
+ g_assert_not_reached();
+#endif
case GET_ASI_DIRECT:
{
TCGv_i64 tmp = tcg_temp_new_i64();
- gen_address_mask(dc, addr);
- tcg_gen_qemu_ld_i64(tmp, addr, da.mem_idx, da.memop | MO_ALIGN);
+ tcg_gen_qemu_ld_i64(tmp, addr, da->mem_idx, da->memop | MO_ALIGN);
/* Note that LE ldda acts as if each 32-bit register
result is byte swapped. Having just performed one
64-bit bswap, we need now to swap the writebacks. */
- if ((da.memop & MO_BSWAP) == MO_TE) {
- tcg_gen_extr32_i64(lo, hi, tmp);
+ if ((da->memop & MO_BSWAP) == MO_TE) {
+ tcg_gen_extr_i64_tl(lo, hi, tmp);
} else {
- tcg_gen_extr32_i64(hi, lo, tmp);
+ tcg_gen_extr_i64_tl(hi, lo, tmp);
}
}
break;
real hardware allows others. This can be seen with e.g.
FreeBSD 10.3 wrt ASI_IC_TAG. */
{
- TCGv_i32 r_asi = tcg_constant_i32(da.asi);
- TCGv_i32 r_mop = tcg_constant_i32(da.memop);
+ TCGv_i32 r_asi = tcg_constant_i32(da->asi);
+ TCGv_i32 r_mop = tcg_constant_i32(da->memop);
TCGv_i64 tmp = tcg_temp_new_i64();
save_state(dc);
gen_helper_ld_asi(tmp, tcg_env, addr, r_asi, r_mop);
/* See above. */
- if ((da.memop & MO_BSWAP) == MO_TE) {
- tcg_gen_extr32_i64(lo, hi, tmp);
+ if ((da->memop & MO_BSWAP) == MO_TE) {
+ tcg_gen_extr_i64_tl(lo, hi, tmp);
} else {
- tcg_gen_extr32_i64(hi, lo, tmp);
+ tcg_gen_extr_i64_tl(hi, lo, tmp);
}
}
break;
gen_store_gpr(dc, rd + 1, lo);
}
-static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
- int insn, int rd)
+static void gen_stda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
{
- DisasASI da = get_asi(dc, insn, MO_TEUQ);
+ TCGv hi = gen_load_gpr(dc, rd);
TCGv lo = gen_load_gpr(dc, rd + 1);
- switch (da.type) {
+ switch (da->type) {
case GET_ASI_EXCP:
break;
case GET_ASI_DTWINX:
- gen_address_mask(dc, addr);
- tcg_gen_qemu_st_i64(hi, addr, da.mem_idx, da.memop | MO_ALIGN_16);
- tcg_gen_addi_tl(addr, addr, 8);
- tcg_gen_qemu_st_i64(lo, addr, da.mem_idx, da.memop);
- break;
-
- case GET_ASI_DIRECT:
+#ifdef TARGET_SPARC64
{
- TCGv_i64 t64 = tcg_temp_new_i64();
-
- /* Note that LE stda acts as if each 32-bit register result is
- byte swapped. We will perform one 64-bit LE store, so now
- we must swap the order of the construction. */
- if ((da.memop & MO_BSWAP) == MO_TE) {
- tcg_gen_concat32_i64(t64, lo, hi);
+ MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
+ TCGv_i128 t = tcg_temp_new_i128();
+
+ /*
+ * Note that LE twinx acts as if each 64-bit register result is
+ * byte swapped. We perform one 128-bit LE store, so must swap
+ * the order of the construction.
+ */
+ if ((mop & MO_BSWAP) == MO_TE) {
+ tcg_gen_concat_i64_i128(t, lo, hi);
} else {
- tcg_gen_concat32_i64(t64, hi, lo);
+ tcg_gen_concat_i64_i128(t, hi, lo);
}
- gen_address_mask(dc, addr);
- tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop | MO_ALIGN);
+ tcg_gen_qemu_st_i128(t, addr, da->mem_idx, mop);
}
break;
+#else
+ g_assert_not_reached();
+#endif
- default:
- /* ??? In theory we've handled all of the ASIs that are valid
- for stda, and this should raise DAE_invalid_asi. */
+ case GET_ASI_DIRECT:
{
- TCGv_i32 r_asi = tcg_constant_i32(da.asi);
- TCGv_i32 r_mop = tcg_constant_i32(da.memop);
TCGv_i64 t64 = tcg_temp_new_i64();
- /* See above. */
- if ((da.memop & MO_BSWAP) == MO_TE) {
- tcg_gen_concat32_i64(t64, lo, hi);
+ /* Note that LE stda acts as if each 32-bit register result is
+ byte swapped. We will perform one 64-bit LE store, so now
+ we must swap the order of the construction. */
+ if ((da->memop & MO_BSWAP) == MO_TE) {
+ tcg_gen_concat_tl_i64(t64, lo, hi);
} else {
- tcg_gen_concat32_i64(t64, hi, lo);
+ tcg_gen_concat_tl_i64(t64, hi, lo);
}
-
- save_state(dc);
- gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
- }
- break;
- }
-}
-
-static void gen_casx_asi(DisasContext *dc, TCGv addr, TCGv cmpv,
- int insn, int rd)
-{
- DisasASI da = get_asi(dc, insn, MO_TEUQ);
- TCGv oldv;
-
- switch (da.type) {
- case GET_ASI_EXCP:
- return;
- case GET_ASI_DIRECT:
- oldv = tcg_temp_new();
- tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd),
- da.mem_idx, da.memop | MO_ALIGN);
- gen_store_gpr(dc, rd, oldv);
- break;
- default:
- /* ??? Should be DAE_invalid_asi. */
- gen_exception(dc, TT_DATA_ACCESS);
- break;
- }
-}
-
-#elif !defined(CONFIG_USER_ONLY)
-static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
-{
- /* ??? Work around an apparent bug in Ubuntu gcc 4.8.2-10ubuntu2+12,
- whereby "rd + 1" elicits "error: array subscript is above array".
- Since we have already asserted that rd is even, the semantics
- are unchanged. */
- TCGv lo = gen_dest_gpr(dc, rd | 1);
- TCGv hi = gen_dest_gpr(dc, rd);
- TCGv_i64 t64 = tcg_temp_new_i64();
- DisasASI da = get_asi(dc, insn, MO_TEUQ);
-
- switch (da.type) {
- case GET_ASI_EXCP:
- return;
- case GET_ASI_DIRECT:
- gen_address_mask(dc, addr);
- tcg_gen_qemu_ld_i64(t64, addr, da.mem_idx, da.memop | MO_ALIGN);
- break;
- default:
- {
- TCGv_i32 r_asi = tcg_constant_i32(da.asi);
- TCGv_i32 r_mop = tcg_constant_i32(MO_UQ);
-
- save_state(dc);
- gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
+ tcg_gen_qemu_st_i64(t64, addr, da->mem_idx, da->memop | MO_ALIGN);
}
break;
- }
-
- tcg_gen_extr_i64_i32(lo, hi, t64);
- gen_store_gpr(dc, rd | 1, lo);
- gen_store_gpr(dc, rd, hi);
-}
-
-static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
- int insn, int rd)
-{
- DisasASI da = get_asi(dc, insn, MO_TEUQ);
- TCGv lo = gen_load_gpr(dc, rd + 1);
- TCGv_i64 t64 = tcg_temp_new_i64();
- tcg_gen_concat_tl_i64(t64, lo, hi);
-
- switch (da.type) {
- case GET_ASI_EXCP:
- break;
- case GET_ASI_DIRECT:
- gen_address_mask(dc, addr);
- tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop | MO_ALIGN);
- break;
case GET_ASI_BFILL:
+ assert(TARGET_LONG_BITS == 32);
/* Store 32 bytes of T64 to ADDR. */
/* ??? The original qemu code suggests 8-byte alignment, dropping
the low bits, but the only place I can see this used is in the
Linux kernel with 32 byte alignment, which would make more sense
as a cacheline-style operation. */
{
+ TCGv_i64 t64 = tcg_temp_new_i64();
TCGv d_addr = tcg_temp_new();
TCGv eight = tcg_constant_tl(8);
int i;
+ tcg_gen_concat_tl_i64(t64, lo, hi);
tcg_gen_andi_tl(d_addr, addr, -8);
for (i = 0; i < 32; i += 8) {
- tcg_gen_qemu_st_i64(t64, d_addr, da.mem_idx, da.memop);
+ tcg_gen_qemu_st_i64(t64, d_addr, da->mem_idx, da->memop);
tcg_gen_add_tl(d_addr, d_addr, eight);
}
}
break;
+
default:
+ /* ??? In theory we've handled all of the ASIs that are valid
+ for stda, and this should raise DAE_invalid_asi. */
{
- TCGv_i32 r_asi = tcg_constant_i32(da.asi);
- TCGv_i32 r_mop = tcg_constant_i32(MO_UQ);
+ TCGv_i32 r_asi = tcg_constant_i32(da->asi);
+ TCGv_i32 r_mop = tcg_constant_i32(da->memop);
+ TCGv_i64 t64 = tcg_temp_new_i64();
+
+ /* See above. */
+ if ((da->memop & MO_BSWAP) == MO_TE) {
+ tcg_gen_concat_tl_i64(t64, lo, hi);
+ } else {
+ tcg_gen_concat_tl_i64(t64, hi, lo);
+ }
save_state(dc);
gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
break;
}
}
-#endif
-
-static TCGv get_src1(DisasContext *dc, unsigned int insn)
-{
- unsigned int rs1 = GET_FIELD(insn, 13, 17);
- return gen_load_gpr(dc, rs1);
-}
-
-static TCGv get_src2(DisasContext *dc, unsigned int insn)
-{
- if (IS_IMM) { /* immediate */
- target_long simm = GET_FIELDs(insn, 19, 31);
- TCGv t = tcg_temp_new();
- tcg_gen_movi_tl(t, simm);
- return t;
- } else { /* register */
- unsigned int rs2 = GET_FIELD(insn, 27, 31);
- return gen_load_gpr(dc, rs2);
- }
-}
-#ifdef TARGET_SPARC64
static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
{
+#ifdef TARGET_SPARC64
TCGv_i32 c32, zero, dst, s1, s2;
+ TCGv_i64 c64 = tcg_temp_new_i64();
/* We have two choices here: extend the 32 bit data and use movcond_i64,
or fold the comparison down to 32 bits and use movcond_i32. Choose
the later. */
c32 = tcg_temp_new_i32();
- if (cmp->is_bool) {
- tcg_gen_extrl_i64_i32(c32, cmp->c1);
- } else {
- TCGv_i64 c64 = tcg_temp_new_i64();
- tcg_gen_setcond_i64(cmp->cond, c64, cmp->c1, cmp->c2);
- tcg_gen_extrl_i64_i32(c32, c64);
- }
+ tcg_gen_setcondi_i64(cmp->cond, c64, cmp->c1, cmp->c2);
+ tcg_gen_extrl_i64_i32(c32, c64);
s1 = gen_load_fpr_F(dc, rs);
s2 = gen_load_fpr_F(dc, rd);
tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
gen_store_fpr_F(dc, rd, dst);
+#else
+ qemu_build_not_reached();
+#endif
}
static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
{
+#ifdef TARGET_SPARC64
TCGv_i64 dst = gen_dest_fpr_D(dc, rd);
- tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, cmp->c2,
+ tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, tcg_constant_tl(cmp->c2),
gen_load_fpr_D(dc, rs),
gen_load_fpr_D(dc, rd));
gen_store_fpr_D(dc, rd, dst);
+#else
+ qemu_build_not_reached();
+#endif
}
static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
{
+#ifdef TARGET_SPARC64
int qd = QFPREG(rd);
int qs = QFPREG(rs);
+ TCGv c2 = tcg_constant_tl(cmp->c2);
- tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, cmp->c2,
+ tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, c2,
cpu_fpr[qs / 2], cpu_fpr[qd / 2]);
- tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, cmp->c2,
+ tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, c2,
cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]);
gen_update_fprs_dirty(dc, qd);
+#else
+ qemu_build_not_reached();
+#endif
}
+#ifdef TARGET_SPARC64
static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr)
{
TCGv_i32 r_tl = tcg_temp_new_i32();
tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
}
}
+#endif
-static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2,
- int width, bool cc, bool left)
-{
- TCGv lo1, lo2;
- uint64_t amask, tabl, tabr;
- int shift, imask, omask;
-
- if (cc) {
- tcg_gen_mov_tl(cpu_cc_src, s1);
- tcg_gen_mov_tl(cpu_cc_src2, s2);
- tcg_gen_sub_tl(cpu_cc_dst, s1, s2);
- tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
- dc->cc_op = CC_OP_SUB;
- }
-
- /* Theory of operation: there are two tables, left and right (not to
- be confused with the left and right versions of the opcode). These
- are indexed by the low 3 bits of the inputs. To make things "easy",
- these tables are loaded into two constants, TABL and TABR below.
- The operation index = (input & imask) << shift calculates the index
- into the constant, while val = (table >> index) & omask calculates
- the value we're looking for. */
- switch (width) {
- case 8:
- imask = 0x7;
- shift = 3;
- omask = 0xff;
- if (left) {
- tabl = 0x80c0e0f0f8fcfeffULL;
- tabr = 0xff7f3f1f0f070301ULL;
- } else {
- tabl = 0x0103070f1f3f7fffULL;
- tabr = 0xfffefcf8f0e0c080ULL;
- }
- break;
- case 16:
- imask = 0x6;
- shift = 1;
- omask = 0xf;
- if (left) {
- tabl = 0x8cef;
- tabr = 0xf731;
- } else {
- tabl = 0x137f;
- tabr = 0xfec8;
- }
- break;
- case 32:
- imask = 0x4;
- shift = 0;
- omask = 0x3;
- if (left) {
- tabl = (2 << 2) | 3;
- tabr = (3 << 2) | 1;
- } else {
- tabl = (1 << 2) | 3;
- tabr = (3 << 2) | 2;
- }
- break;
- default:
- abort();
- }
-
- lo1 = tcg_temp_new();
- lo2 = tcg_temp_new();
- tcg_gen_andi_tl(lo1, s1, imask);
- tcg_gen_andi_tl(lo2, s2, imask);
- tcg_gen_shli_tl(lo1, lo1, shift);
- tcg_gen_shli_tl(lo2, lo2, shift);
-
- tcg_gen_shr_tl(lo1, tcg_constant_tl(tabl), lo1);
- tcg_gen_shr_tl(lo2, tcg_constant_tl(tabr), lo2);
- tcg_gen_andi_tl(lo1, lo1, omask);
- tcg_gen_andi_tl(lo2, lo2, omask);
-
- amask = -8;
- if (AM_CHECK(dc)) {
- amask &= 0xffffffffULL;
- }
- tcg_gen_andi_tl(s1, s1, amask);
- tcg_gen_andi_tl(s2, s2, amask);
-
- /* Compute dst = (s1 == s2 ? lo1 : lo1 & lo2). */
- tcg_gen_and_tl(lo2, lo2, lo1);
- tcg_gen_movcond_tl(TCG_COND_EQ, dst, s1, s2, lo1, lo2);
-}
-
-static void gen_alignaddr(TCGv dst, TCGv s1, TCGv s2, bool left)
+static int extract_dfpreg(DisasContext *dc, int x)
{
- TCGv tmp = tcg_temp_new();
-
- tcg_gen_add_tl(tmp, s1, s2);
- tcg_gen_andi_tl(dst, tmp, -8);
- if (left) {
- tcg_gen_neg_tl(tmp, tmp);
- }
- tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
+ return DFPREG(x);
}
-static void gen_faligndata(TCGv dst, TCGv gsr, TCGv s1, TCGv s2)
+static int extract_qfpreg(DisasContext *dc, int x)
{
- TCGv t1, t2, shift;
-
- t1 = tcg_temp_new();
- t2 = tcg_temp_new();
- shift = tcg_temp_new();
-
- tcg_gen_andi_tl(shift, gsr, 7);
- tcg_gen_shli_tl(shift, shift, 3);
- tcg_gen_shl_tl(t1, s1, shift);
-
- /* A shift of 64 does not produce 0 in TCG. Divide this into a
- shift of (up to 63) followed by a constant shift of 1. */
- tcg_gen_xori_tl(shift, shift, 63);
- tcg_gen_shr_tl(t2, s2, shift);
- tcg_gen_shri_tl(t2, t2, 1);
-
- tcg_gen_or_tl(dst, t1, t2);
+ return QFPREG(x);
}
-#endif
/* Include the auto-generated decoder. */
#include "decode-insns.c.inc"
#ifdef TARGET_SPARC64
# define avail_32(C) false
# define avail_ASR17(C) false
+# define avail_CASA(C) true
+# define avail_DIV(C) true
+# define avail_MUL(C) true
# define avail_POWERDOWN(C) false
# define avail_64(C) true
# define avail_GL(C) ((C)->def->features & CPU_FEATURE_GL)
# define avail_HYPV(C) ((C)->def->features & CPU_FEATURE_HYPV)
+# define avail_VIS1(C) ((C)->def->features & CPU_FEATURE_VIS1)
+# define avail_VIS2(C) ((C)->def->features & CPU_FEATURE_VIS2)
#else
# define avail_32(C) true
# define avail_ASR17(C) ((C)->def->features & CPU_FEATURE_ASR17)
+# define avail_CASA(C) ((C)->def->features & CPU_FEATURE_CASA)
+# define avail_DIV(C) ((C)->def->features & CPU_FEATURE_DIV)
+# define avail_MUL(C) ((C)->def->features & CPU_FEATURE_MUL)
# define avail_POWERDOWN(C) ((C)->def->features & CPU_FEATURE_POWERDOWN)
# define avail_64(C) false
# define avail_GL(C) false
# define avail_HYPV(C) false
+# define avail_VIS1(C) false
+# define avail_VIS2(C) false
#endif
/* Default case for non jump instructions. */
static bool advance_pc(DisasContext *dc)
{
+ TCGLabel *l1;
+
+ finishing_insn(dc);
+
if (dc->npc & 3) {
switch (dc->npc) {
case DYNAMIC_PC:
case DYNAMIC_PC_LOOKUP:
dc->pc = dc->npc;
- gen_op_next_insn();
+ tcg_gen_mov_tl(cpu_pc, cpu_npc);
+ tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
break;
+
case JUMP_PC:
/* we can do a static jump */
- gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond);
+ l1 = gen_new_label();
+ tcg_gen_brcondi_tl(dc->jump.cond, dc->jump.c1, dc->jump.c2, l1);
+
+ /* jump not taken */
+ gen_goto_tb(dc, 1, dc->jump_pc[1], dc->jump_pc[1] + 4);
+
+ /* jump taken */
+ gen_set_label(l1);
+ gen_goto_tb(dc, 0, dc->jump_pc[0], dc->jump_pc[0] + 4);
+
dc->base.is_jmp = DISAS_NORETURN;
break;
+
default:
g_assert_not_reached();
}
* Major opcodes 00 and 01 -- branches, call, and sethi
*/
-static bool advance_jump_uncond_never(DisasContext *dc, bool annul)
+static bool advance_jump_cond(DisasContext *dc, DisasCompare *cmp,
+ bool annul, int disp)
{
- if (annul) {
- dc->pc = dc->npc + 4;
- dc->npc = dc->pc + 4;
- } else {
- dc->pc = dc->npc;
- dc->npc = dc->pc + 4;
- }
- return true;
-}
+ target_ulong dest = address_mask_i(dc, dc->pc + disp * 4);
+ target_ulong npc;
-static bool advance_jump_uncond_always(DisasContext *dc, bool annul,
- target_ulong dest)
-{
- if (annul) {
- dc->pc = dest;
- dc->npc = dest + 4;
- } else {
- dc->pc = dc->npc;
- dc->npc = dest;
- tcg_gen_mov_tl(cpu_pc, cpu_npc);
+ finishing_insn(dc);
+
+ if (cmp->cond == TCG_COND_ALWAYS) {
+ if (annul) {
+ dc->pc = dest;
+ dc->npc = dest + 4;
+ } else {
+ gen_mov_pc_npc(dc);
+ dc->npc = dest;
+ }
+ return true;
}
- return true;
-}
-static bool advance_jump_cond(DisasContext *dc, DisasCompare *cmp,
- bool annul, target_ulong dest)
-{
- target_ulong npc = dc->npc;
+ if (cmp->cond == TCG_COND_NEVER) {
+ npc = dc->npc;
+ if (npc & 3) {
+ gen_mov_pc_npc(dc);
+ if (annul) {
+ tcg_gen_addi_tl(cpu_pc, cpu_pc, 4);
+ }
+ tcg_gen_addi_tl(cpu_npc, cpu_pc, 4);
+ } else {
+ dc->pc = npc + (annul ? 4 : 0);
+ dc->npc = dc->pc + 4;
+ }
+ return true;
+ }
+
+ flush_cond(dc);
+ npc = dc->npc;
if (annul) {
TCGLabel *l1 = gen_new_label();
- tcg_gen_brcond_tl(tcg_invert_cond(cmp->cond), cmp->c1, cmp->c2, l1);
+ tcg_gen_brcondi_tl(tcg_invert_cond(cmp->cond), cmp->c1, cmp->c2, l1);
gen_goto_tb(dc, 0, npc, dest);
gen_set_label(l1);
gen_goto_tb(dc, 1, npc + 4, npc + 8);
tcg_gen_mov_tl(cpu_pc, cpu_npc);
tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
tcg_gen_movcond_tl(cmp->cond, cpu_npc,
- cmp->c1, cmp->c2,
+ cmp->c1, tcg_constant_tl(cmp->c2),
tcg_constant_tl(dest), cpu_npc);
dc->pc = npc;
break;
}
} else {
dc->pc = npc;
+ dc->npc = JUMP_PC;
+ dc->jump = *cmp;
dc->jump_pc[0] = dest;
dc->jump_pc[1] = npc + 4;
- dc->npc = JUMP_PC;
- if (cmp->is_bool) {
- tcg_gen_mov_tl(cpu_cond, cmp->c1);
+
+ /* The condition for cpu_cond is always NE -- normalize. */
+ if (cmp->cond == TCG_COND_NE) {
+ tcg_gen_xori_tl(cpu_cond, cmp->c1, cmp->c2);
} else {
- tcg_gen_setcond_tl(cmp->cond, cpu_cond, cmp->c1, cmp->c2);
+ tcg_gen_setcondi_tl(cmp->cond, cpu_cond, cmp->c1, cmp->c2);
}
+ dc->cpu_cond_live = true;
}
}
return true;
return true;
}
+static bool raise_unimpfpop(DisasContext *dc)
+{
+ gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
+ return true;
+}
+
+static bool gen_trap_float128(DisasContext *dc)
+{
+ if (dc->def->features & CPU_FEATURE_FLOAT128) {
+ return false;
+ }
+ return raise_unimpfpop(dc);
+}
+
static bool do_bpcc(DisasContext *dc, arg_bcc *a)
{
- target_long target = address_mask_i(dc, dc->pc + a->i * 4);
DisasCompare cmp;
- switch (a->cond) {
- case 0x0:
- return advance_jump_uncond_never(dc, a->a);
- case 0x8:
- return advance_jump_uncond_always(dc, a->a, target);
- default:
- flush_cond(dc);
-
- gen_compare(&cmp, a->cc, a->cond, dc);
- return advance_jump_cond(dc, &cmp, a->a, target);
- }
+ gen_compare(&cmp, a->cc, a->cond, dc);
+ return advance_jump_cond(dc, &cmp, a->a, a->i);
}
TRANS(Bicc, ALL, do_bpcc, a)
static bool do_fbpfcc(DisasContext *dc, arg_bcc *a)
{
- target_long target = address_mask_i(dc, dc->pc + a->i * 4);
DisasCompare cmp;
if (gen_trap_ifnofpu(dc)) {
return true;
}
- switch (a->cond) {
- case 0x0:
- return advance_jump_uncond_never(dc, a->a);
- case 0x8:
- return advance_jump_uncond_always(dc, a->a, target);
- default:
- flush_cond(dc);
-
- gen_fcompare(&cmp, a->cc, a->cond);
- return advance_jump_cond(dc, &cmp, a->a, target);
- }
+ gen_fcompare(&cmp, a->cc, a->cond);
+ return advance_jump_cond(dc, &cmp, a->a, a->i);
}
TRANS(FBPfcc, 64, do_fbpfcc, a)
static bool trans_BPr(DisasContext *dc, arg_BPr *a)
{
- target_long target = address_mask_i(dc, dc->pc + a->i * 4);
DisasCompare cmp;
if (!avail_64(dc)) {
return false;
}
- if (gen_tcg_cond_reg[a->cond] == TCG_COND_NEVER) {
+ if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
return false;
}
-
- flush_cond(dc);
- gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1));
- return advance_jump_cond(dc, &cmp, a->a, target);
+ return advance_jump_cond(dc, &cmp, a->a, a->i);
}
static bool trans_CALL(DisasContext *dc, arg_CALL *a)
tcg_gen_addi_i32(trap, trap, TT_TRAP);
}
+ finishing_insn(dc);
+
/* Trap always. */
if (cond == 8) {
save_state(dc);
flush_cond(dc);
lab = delay_exceptionv(dc, trap);
gen_compare(&cmp, cc, cond, dc);
- tcg_gen_brcond_tl(cmp.cond, cmp.c1, cmp.c2, lab);
+ tcg_gen_brcondi_tl(cmp.cond, cmp.c1, cmp.c2, lab);
return advance_pc(dc);
}
static TCGv do_rdccr(DisasContext *dc, TCGv dst)
{
- update_psr(dc);
gen_helper_rdccr(dst, tcg_env);
return dst;
}
static TCGv do_rdpsr(DisasContext *dc, TCGv dst)
{
- update_psr(dc);
gen_helper_rdpsr(dst, tcg_env);
return dst;
}
static void do_wrpowerdown(DisasContext *dc, TCGv src)
{
+ finishing_insn(dc);
save_state(dc);
gen_helper_power_down(tcg_env);
}
static void do_wrpsr(DisasContext *dc, TCGv src)
{
gen_helper_wrpsr(tcg_env, src);
- tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
- dc->cc_op = CC_OP_FLAGS;
dc->base.is_jmp = DISAS_EXIT;
}
TRANS(SAVED, 64, do_saved_restored, true)
TRANS(RESTORED, 64, do_saved_restored, false)
-static bool trans_NOP_v7(DisasContext *dc, arg_NOP_v7 *a)
+static bool trans_NOP(DisasContext *dc, arg_NOP *a)
{
- /*
- * TODO: Need a feature bit for sparcv8.
- * In the meantime, treat all 32-bit cpus like sparcv7.
- */
- if (avail_32(dc)) {
- return advance_pc(dc);
+ return advance_pc(dc);
+}
+
+/*
+ * TODO: Need a feature bit for sparcv8.
+ * In the meantime, treat all 32-bit cpus like sparcv7.
+ */
+TRANS(NOP_v7, 32, trans_NOP, a)
+TRANS(NOP_v9, 64, trans_NOP, a)
+
+static bool do_arith_int(DisasContext *dc, arg_r_r_ri_cc *a,
+ void (*func)(TCGv, TCGv, TCGv),
+ void (*funci)(TCGv, TCGv, target_long),
+ bool logic_cc)
+{
+ TCGv dst, src1;
+
+ /* For simplicity, we under-decoded the rs2 form. */
+ if (!a->imm && a->rs2_or_imm & ~0x1f) {
+ return false;
}
- return false;
+
+ if (logic_cc) {
+ dst = cpu_cc_N;
+ } else {
+ dst = gen_dest_gpr(dc, a->rd);
+ }
+ src1 = gen_load_gpr(dc, a->rs1);
+
+ if (a->imm || a->rs2_or_imm == 0) {
+ if (funci) {
+ funci(dst, src1, a->rs2_or_imm);
+ } else {
+ func(dst, src1, tcg_constant_tl(a->rs2_or_imm));
+ }
+ } else {
+ func(dst, src1, cpu_regs[a->rs2_or_imm]);
+ }
+
+ if (logic_cc) {
+ if (TARGET_LONG_BITS == 64) {
+ tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
+ tcg_gen_movi_tl(cpu_icc_C, 0);
+ }
+ tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
+ tcg_gen_movi_tl(cpu_cc_C, 0);
+ tcg_gen_movi_tl(cpu_cc_V, 0);
+ }
+
+ gen_store_gpr(dc, a->rd, dst);
+ return advance_pc(dc);
}
-#define CHECK_IU_FEATURE(dc, FEATURE) \
- if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
- goto illegal_insn;
-#define CHECK_FPU_FEATURE(dc, FEATURE) \
- if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
- goto nfpu_insn;
+static bool do_arith(DisasContext *dc, arg_r_r_ri_cc *a,
+ void (*func)(TCGv, TCGv, TCGv),
+ void (*funci)(TCGv, TCGv, target_long),
+ void (*func_cc)(TCGv, TCGv, TCGv))
+{
+ if (a->cc) {
+ return do_arith_int(dc, a, func_cc, NULL, false);
+ }
+ return do_arith_int(dc, a, func, funci, false);
+}
-/* before an instruction, dc->pc must be static */
-static void disas_sparc_legacy(DisasContext *dc, unsigned int insn)
+static bool do_logic(DisasContext *dc, arg_r_r_ri_cc *a,
+ void (*func)(TCGv, TCGv, TCGv),
+ void (*funci)(TCGv, TCGv, target_long))
{
- unsigned int opc, rs1, rs2, rd;
- TCGv cpu_src1, cpu_src2;
- TCGv_i32 cpu_src1_32, cpu_src2_32, cpu_dst_32;
- TCGv_i64 cpu_src1_64, cpu_src2_64, cpu_dst_64;
- target_long simm;
+ return do_arith_int(dc, a, func, funci, a->cc);
+}
- opc = GET_FIELD(insn, 0, 1);
- rd = GET_FIELD(insn, 2, 6);
+TRANS(ADD, ALL, do_arith, a, tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_addcc)
+TRANS(SUB, ALL, do_arith, a, tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_subcc)
+TRANS(ADDC, ALL, do_arith, a, gen_op_addc, NULL, gen_op_addccc)
+TRANS(SUBC, ALL, do_arith, a, gen_op_subc, NULL, gen_op_subccc)
- switch (opc) {
- case 0:
- goto illegal_insn; /* in decodetree */
- case 1:
- g_assert_not_reached(); /* in decodetree */
- case 2: /* FPU & Logical Operations */
- {
- unsigned int xop __attribute__((unused)) = GET_FIELD(insn, 7, 12);
- TCGv cpu_dst __attribute__((unused)) = tcg_temp_new();
- TCGv cpu_tmp0 __attribute__((unused));
+TRANS(TADDcc, ALL, do_arith, a, NULL, NULL, gen_op_taddcc)
+TRANS(TSUBcc, ALL, do_arith, a, NULL, NULL, gen_op_tsubcc)
+TRANS(TADDccTV, ALL, do_arith, a, NULL, NULL, gen_op_taddcctv)
+TRANS(TSUBccTV, ALL, do_arith, a, NULL, NULL, gen_op_tsubcctv)
- if (xop == 0x34) { /* FPU Operations */
- if (gen_trap_ifnofpu(dc)) {
- goto jmp_insn;
- }
- gen_op_clear_ieee_excp_and_FTT();
- rs1 = GET_FIELD(insn, 13, 17);
- rs2 = GET_FIELD(insn, 27, 31);
- xop = GET_FIELD(insn, 18, 26);
-
- switch (xop) {
- case 0x1: /* fmovs */
- cpu_src1_32 = gen_load_fpr_F(dc, rs2);
- gen_store_fpr_F(dc, rd, cpu_src1_32);
- break;
- case 0x5: /* fnegs */
- gen_ne_fop_FF(dc, rd, rs2, gen_helper_fnegs);
- break;
- case 0x9: /* fabss */
- gen_ne_fop_FF(dc, rd, rs2, gen_helper_fabss);
- break;
- case 0x29: /* fsqrts */
- gen_fop_FF(dc, rd, rs2, gen_helper_fsqrts);
- break;
- case 0x2a: /* fsqrtd */
- gen_fop_DD(dc, rd, rs2, gen_helper_fsqrtd);
- break;
- case 0x2b: /* fsqrtq */
- CHECK_FPU_FEATURE(dc, FLOAT128);
- gen_fop_QQ(dc, rd, rs2, gen_helper_fsqrtq);
- break;
- case 0x41: /* fadds */
- gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fadds);
- break;
- case 0x42: /* faddd */
- gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_faddd);
- break;
- case 0x43: /* faddq */
- CHECK_FPU_FEATURE(dc, FLOAT128);
- gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_faddq);
- break;
- case 0x45: /* fsubs */
- gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fsubs);
- break;
- case 0x46: /* fsubd */
- gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fsubd);
- break;
- case 0x47: /* fsubq */
- CHECK_FPU_FEATURE(dc, FLOAT128);
- gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fsubq);
- break;
- case 0x49: /* fmuls */
- gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fmuls);
- break;
- case 0x4a: /* fmuld */
- gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld);
- break;
- case 0x4b: /* fmulq */
- CHECK_FPU_FEATURE(dc, FLOAT128);
- gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fmulq);
- break;
- case 0x4d: /* fdivs */
- gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fdivs);
- break;
- case 0x4e: /* fdivd */
- gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fdivd);
- break;
- case 0x4f: /* fdivq */
- CHECK_FPU_FEATURE(dc, FLOAT128);
- gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fdivq);
- break;
- case 0x69: /* fsmuld */
- CHECK_FPU_FEATURE(dc, FSMULD);
- gen_fop_DFF(dc, rd, rs1, rs2, gen_helper_fsmuld);
- break;
- case 0x6e: /* fdmulq */
- CHECK_FPU_FEATURE(dc, FLOAT128);
- gen_fop_QDD(dc, rd, rs1, rs2, gen_helper_fdmulq);
- break;
- case 0xc4: /* fitos */
- gen_fop_FF(dc, rd, rs2, gen_helper_fitos);
- break;
- case 0xc6: /* fdtos */
- gen_fop_FD(dc, rd, rs2, gen_helper_fdtos);
- break;
- case 0xc7: /* fqtos */
- CHECK_FPU_FEATURE(dc, FLOAT128);
- gen_fop_FQ(dc, rd, rs2, gen_helper_fqtos);
- break;
- case 0xc8: /* fitod */
- gen_ne_fop_DF(dc, rd, rs2, gen_helper_fitod);
- break;
- case 0xc9: /* fstod */
- gen_ne_fop_DF(dc, rd, rs2, gen_helper_fstod);
- break;
- case 0xcb: /* fqtod */
- CHECK_FPU_FEATURE(dc, FLOAT128);
- gen_fop_DQ(dc, rd, rs2, gen_helper_fqtod);
- break;
- case 0xcc: /* fitoq */
- CHECK_FPU_FEATURE(dc, FLOAT128);
- gen_ne_fop_QF(dc, rd, rs2, gen_helper_fitoq);
- break;
- case 0xcd: /* fstoq */
- CHECK_FPU_FEATURE(dc, FLOAT128);
- gen_ne_fop_QF(dc, rd, rs2, gen_helper_fstoq);
- break;
- case 0xce: /* fdtoq */
- CHECK_FPU_FEATURE(dc, FLOAT128);
- gen_ne_fop_QD(dc, rd, rs2, gen_helper_fdtoq);
- break;
- case 0xd1: /* fstoi */
- gen_fop_FF(dc, rd, rs2, gen_helper_fstoi);
- break;
- case 0xd2: /* fdtoi */
- gen_fop_FD(dc, rd, rs2, gen_helper_fdtoi);
- break;
- case 0xd3: /* fqtoi */
- CHECK_FPU_FEATURE(dc, FLOAT128);
- gen_fop_FQ(dc, rd, rs2, gen_helper_fqtoi);
- break;
-#ifdef TARGET_SPARC64
- case 0x2: /* V9 fmovd */
- cpu_src1_64 = gen_load_fpr_D(dc, rs2);
- gen_store_fpr_D(dc, rd, cpu_src1_64);
- break;
- case 0x3: /* V9 fmovq */
- CHECK_FPU_FEATURE(dc, FLOAT128);
- gen_move_Q(dc, rd, rs2);
- break;
- case 0x6: /* V9 fnegd */
- gen_ne_fop_DD(dc, rd, rs2, gen_helper_fnegd);
- break;
- case 0x7: /* V9 fnegq */
- CHECK_FPU_FEATURE(dc, FLOAT128);
- gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fnegq);
- break;
- case 0xa: /* V9 fabsd */
- gen_ne_fop_DD(dc, rd, rs2, gen_helper_fabsd);
- break;
- case 0xb: /* V9 fabsq */
- CHECK_FPU_FEATURE(dc, FLOAT128);
- gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fabsq);
- break;
- case 0x81: /* V9 fstox */
- gen_fop_DF(dc, rd, rs2, gen_helper_fstox);
- break;
- case 0x82: /* V9 fdtox */
- gen_fop_DD(dc, rd, rs2, gen_helper_fdtox);
- break;
- case 0x83: /* V9 fqtox */
- CHECK_FPU_FEATURE(dc, FLOAT128);
- gen_fop_DQ(dc, rd, rs2, gen_helper_fqtox);
- break;
- case 0x84: /* V9 fxtos */
- gen_fop_FD(dc, rd, rs2, gen_helper_fxtos);
- break;
- case 0x88: /* V9 fxtod */
- gen_fop_DD(dc, rd, rs2, gen_helper_fxtod);
- break;
- case 0x8c: /* V9 fxtoq */
- CHECK_FPU_FEATURE(dc, FLOAT128);
- gen_ne_fop_QD(dc, rd, rs2, gen_helper_fxtoq);
- break;
-#endif
- default:
- goto illegal_insn;
- }
- } else if (xop == 0x35) { /* FPU Operations */
-#ifdef TARGET_SPARC64
- int cond;
-#endif
- if (gen_trap_ifnofpu(dc)) {
- goto jmp_insn;
- }
- gen_op_clear_ieee_excp_and_FTT();
- rs1 = GET_FIELD(insn, 13, 17);
- rs2 = GET_FIELD(insn, 27, 31);
- xop = GET_FIELD(insn, 18, 26);
+TRANS(AND, ALL, do_logic, a, tcg_gen_and_tl, tcg_gen_andi_tl)
+TRANS(XOR, ALL, do_logic, a, tcg_gen_xor_tl, tcg_gen_xori_tl)
+TRANS(ANDN, ALL, do_logic, a, tcg_gen_andc_tl, NULL)
+TRANS(ORN, ALL, do_logic, a, tcg_gen_orc_tl, NULL)
+TRANS(XORN, ALL, do_logic, a, tcg_gen_eqv_tl, NULL)
+TRANS(MULX, 64, do_arith, a, tcg_gen_mul_tl, tcg_gen_muli_tl, NULL)
+TRANS(UMUL, MUL, do_logic, a, gen_op_umul, NULL)
+TRANS(SMUL, MUL, do_logic, a, gen_op_smul, NULL)
+TRANS(MULScc, ALL, do_arith, a, NULL, NULL, gen_op_mulscc)
+
+TRANS(UDIVcc, DIV, do_arith, a, NULL, NULL, gen_op_udivcc)
+TRANS(SDIV, DIV, do_arith, a, gen_op_sdiv, NULL, gen_op_sdivcc)
+
+/* TODO: Should have feature bit -- comes in with UltraSparc T2. */
+TRANS(POPC, 64, do_arith, a, gen_op_popc, NULL, NULL)
+
+static bool trans_OR(DisasContext *dc, arg_r_r_ri_cc *a)
+{
+ /* OR with %g0 is the canonical alias for MOV. */
+ if (!a->cc && a->rs1 == 0) {
+ if (a->imm || a->rs2_or_imm == 0) {
+ gen_store_gpr(dc, a->rd, tcg_constant_tl(a->rs2_or_imm));
+ } else if (a->rs2_or_imm & ~0x1f) {
+ /* For simplicity, we under-decoded the rs2 form. */
+ return false;
+ } else {
+ gen_store_gpr(dc, a->rd, cpu_regs[a->rs2_or_imm]);
+ }
+ return advance_pc(dc);
+ }
+ return do_logic(dc, a, tcg_gen_or_tl, tcg_gen_ori_tl);
+}
+
+static bool trans_UDIV(DisasContext *dc, arg_r_r_ri *a)
+{
+ TCGv_i64 t1, t2;
+ TCGv dst;
+
+ if (!avail_DIV(dc)) {
+ return false;
+ }
+ /* For simplicity, we under-decoded the rs2 form. */
+ if (!a->imm && a->rs2_or_imm & ~0x1f) {
+ return false;
+ }
+
+ if (unlikely(a->rs2_or_imm == 0)) {
+ gen_exception(dc, TT_DIV_ZERO);
+ return true;
+ }
+
+ if (a->imm) {
+ t2 = tcg_constant_i64((uint32_t)a->rs2_or_imm);
+ } else {
+ TCGLabel *lab;
+ TCGv_i32 n2;
+
+ finishing_insn(dc);
+ flush_cond(dc);
+
+ n2 = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(n2, cpu_regs[a->rs2_or_imm]);
+
+ lab = delay_exception(dc, TT_DIV_ZERO);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, n2, 0, lab);
+
+ t2 = tcg_temp_new_i64();
#ifdef TARGET_SPARC64
-#define FMOVR(sz) \
- do { \
- DisasCompare cmp; \
- cond = GET_FIELD_SP(insn, 10, 12); \
- cpu_src1 = get_src1(dc, insn); \
- gen_compare_reg(&cmp, cond, cpu_src1); \
- gen_fmov##sz(dc, &cmp, rd, rs2); \
- } while (0)
-
- if ((xop & 0x11f) == 0x005) { /* V9 fmovsr */
- FMOVR(s);
- break;
- } else if ((xop & 0x11f) == 0x006) { // V9 fmovdr
- FMOVR(d);
- break;
- } else if ((xop & 0x11f) == 0x007) { // V9 fmovqr
- CHECK_FPU_FEATURE(dc, FLOAT128);
- FMOVR(q);
- break;
- }
-#undef FMOVR
-#endif
- switch (xop) {
-#ifdef TARGET_SPARC64
-#define FMOVCC(fcc, sz) \
- do { \
- DisasCompare cmp; \
- cond = GET_FIELD_SP(insn, 14, 17); \
- gen_fcompare(&cmp, fcc, cond); \
- gen_fmov##sz(dc, &cmp, rd, rs2); \
- } while (0)
-
- case 0x001: /* V9 fmovscc %fcc0 */
- FMOVCC(0, s);
- break;
- case 0x002: /* V9 fmovdcc %fcc0 */
- FMOVCC(0, d);
- break;
- case 0x003: /* V9 fmovqcc %fcc0 */
- CHECK_FPU_FEATURE(dc, FLOAT128);
- FMOVCC(0, q);
- break;
- case 0x041: /* V9 fmovscc %fcc1 */
- FMOVCC(1, s);
- break;
- case 0x042: /* V9 fmovdcc %fcc1 */
- FMOVCC(1, d);
- break;
- case 0x043: /* V9 fmovqcc %fcc1 */
- CHECK_FPU_FEATURE(dc, FLOAT128);
- FMOVCC(1, q);
- break;
- case 0x081: /* V9 fmovscc %fcc2 */
- FMOVCC(2, s);
- break;
- case 0x082: /* V9 fmovdcc %fcc2 */
- FMOVCC(2, d);
- break;
- case 0x083: /* V9 fmovqcc %fcc2 */
- CHECK_FPU_FEATURE(dc, FLOAT128);
- FMOVCC(2, q);
- break;
- case 0x0c1: /* V9 fmovscc %fcc3 */
- FMOVCC(3, s);
- break;
- case 0x0c2: /* V9 fmovdcc %fcc3 */
- FMOVCC(3, d);
- break;
- case 0x0c3: /* V9 fmovqcc %fcc3 */
- CHECK_FPU_FEATURE(dc, FLOAT128);
- FMOVCC(3, q);
- break;
-#undef FMOVCC
-#define FMOVCC(xcc, sz) \
- do { \
- DisasCompare cmp; \
- cond = GET_FIELD_SP(insn, 14, 17); \
- gen_compare(&cmp, xcc, cond, dc); \
- gen_fmov##sz(dc, &cmp, rd, rs2); \
- } while (0)
-
- case 0x101: /* V9 fmovscc %icc */
- FMOVCC(0, s);
- break;
- case 0x102: /* V9 fmovdcc %icc */
- FMOVCC(0, d);
- break;
- case 0x103: /* V9 fmovqcc %icc */
- CHECK_FPU_FEATURE(dc, FLOAT128);
- FMOVCC(0, q);
- break;
- case 0x181: /* V9 fmovscc %xcc */
- FMOVCC(1, s);
- break;
- case 0x182: /* V9 fmovdcc %xcc */
- FMOVCC(1, d);
- break;
- case 0x183: /* V9 fmovqcc %xcc */
- CHECK_FPU_FEATURE(dc, FLOAT128);
- FMOVCC(1, q);
- break;
-#undef FMOVCC
-#endif
- case 0x51: /* fcmps, V9 %fcc */
- cpu_src1_32 = gen_load_fpr_F(dc, rs1);
- cpu_src2_32 = gen_load_fpr_F(dc, rs2);
- gen_op_fcmps(rd & 3, cpu_src1_32, cpu_src2_32);
- break;
- case 0x52: /* fcmpd, V9 %fcc */
- cpu_src1_64 = gen_load_fpr_D(dc, rs1);
- cpu_src2_64 = gen_load_fpr_D(dc, rs2);
- gen_op_fcmpd(rd & 3, cpu_src1_64, cpu_src2_64);
- break;
- case 0x53: /* fcmpq, V9 %fcc */
- CHECK_FPU_FEATURE(dc, FLOAT128);
- gen_op_load_fpr_QT0(QFPREG(rs1));
- gen_op_load_fpr_QT1(QFPREG(rs2));
- gen_op_fcmpq(rd & 3);
- break;
- case 0x55: /* fcmpes, V9 %fcc */
- cpu_src1_32 = gen_load_fpr_F(dc, rs1);
- cpu_src2_32 = gen_load_fpr_F(dc, rs2);
- gen_op_fcmpes(rd & 3, cpu_src1_32, cpu_src2_32);
- break;
- case 0x56: /* fcmped, V9 %fcc */
- cpu_src1_64 = gen_load_fpr_D(dc, rs1);
- cpu_src2_64 = gen_load_fpr_D(dc, rs2);
- gen_op_fcmped(rd & 3, cpu_src1_64, cpu_src2_64);
- break;
- case 0x57: /* fcmpeq, V9 %fcc */
- CHECK_FPU_FEATURE(dc, FLOAT128);
- gen_op_load_fpr_QT0(QFPREG(rs1));
- gen_op_load_fpr_QT1(QFPREG(rs2));
- gen_op_fcmpeq(rd & 3);
- break;
- default:
- goto illegal_insn;
- }
- } else if (xop == 0x2) {
- TCGv dst = gen_dest_gpr(dc, rd);
- rs1 = GET_FIELD(insn, 13, 17);
- if (rs1 == 0) {
- /* clr/mov shortcut : or %g0, x, y -> mov x, y */
- if (IS_IMM) { /* immediate */
- simm = GET_FIELDs(insn, 19, 31);
- tcg_gen_movi_tl(dst, simm);
- gen_store_gpr(dc, rd, dst);
- } else { /* register */
- rs2 = GET_FIELD(insn, 27, 31);
- if (rs2 == 0) {
- tcg_gen_movi_tl(dst, 0);
- gen_store_gpr(dc, rd, dst);
- } else {
- cpu_src2 = gen_load_gpr(dc, rs2);
- gen_store_gpr(dc, rd, cpu_src2);
- }
- }
- } else {
- cpu_src1 = get_src1(dc, insn);
- if (IS_IMM) { /* immediate */
- simm = GET_FIELDs(insn, 19, 31);
- tcg_gen_ori_tl(dst, cpu_src1, simm);
- gen_store_gpr(dc, rd, dst);
- } else { /* register */
- rs2 = GET_FIELD(insn, 27, 31);
- if (rs2 == 0) {
- /* mov shortcut: or x, %g0, y -> mov x, y */
- gen_store_gpr(dc, rd, cpu_src1);
- } else {
- cpu_src2 = gen_load_gpr(dc, rs2);
- tcg_gen_or_tl(dst, cpu_src1, cpu_src2);
- gen_store_gpr(dc, rd, dst);
- }
- }
- }
-#ifdef TARGET_SPARC64
- } else if (xop == 0x25) { /* sll, V9 sllx */
- cpu_src1 = get_src1(dc, insn);
- if (IS_IMM) { /* immediate */
- simm = GET_FIELDs(insn, 20, 31);
- if (insn & (1 << 12)) {
- tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x3f);
- } else {
- tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x1f);
- }
- } else { /* register */
- rs2 = GET_FIELD(insn, 27, 31);
- cpu_src2 = gen_load_gpr(dc, rs2);
- cpu_tmp0 = tcg_temp_new();
- if (insn & (1 << 12)) {
- tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
- } else {
- tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
- }
- tcg_gen_shl_i64(cpu_dst, cpu_src1, cpu_tmp0);
- }
- gen_store_gpr(dc, rd, cpu_dst);
- } else if (xop == 0x26) { /* srl, V9 srlx */
- cpu_src1 = get_src1(dc, insn);
- if (IS_IMM) { /* immediate */
- simm = GET_FIELDs(insn, 20, 31);
- if (insn & (1 << 12)) {
- tcg_gen_shri_i64(cpu_dst, cpu_src1, simm & 0x3f);
- } else {
- tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
- tcg_gen_shri_i64(cpu_dst, cpu_dst, simm & 0x1f);
- }
- } else { /* register */
- rs2 = GET_FIELD(insn, 27, 31);
- cpu_src2 = gen_load_gpr(dc, rs2);
- cpu_tmp0 = tcg_temp_new();
- if (insn & (1 << 12)) {
- tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
- tcg_gen_shr_i64(cpu_dst, cpu_src1, cpu_tmp0);
- } else {
- tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
- tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
- tcg_gen_shr_i64(cpu_dst, cpu_dst, cpu_tmp0);
- }
- }
- gen_store_gpr(dc, rd, cpu_dst);
- } else if (xop == 0x27) { /* sra, V9 srax */
- cpu_src1 = get_src1(dc, insn);
- if (IS_IMM) { /* immediate */
- simm = GET_FIELDs(insn, 20, 31);
- if (insn & (1 << 12)) {
- tcg_gen_sari_i64(cpu_dst, cpu_src1, simm & 0x3f);
- } else {
- tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
- tcg_gen_sari_i64(cpu_dst, cpu_dst, simm & 0x1f);
- }
- } else { /* register */
- rs2 = GET_FIELD(insn, 27, 31);
- cpu_src2 = gen_load_gpr(dc, rs2);
- cpu_tmp0 = tcg_temp_new();
- if (insn & (1 << 12)) {
- tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
- tcg_gen_sar_i64(cpu_dst, cpu_src1, cpu_tmp0);
- } else {
- tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
- tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
- tcg_gen_sar_i64(cpu_dst, cpu_dst, cpu_tmp0);
- }
- }
- gen_store_gpr(dc, rd, cpu_dst);
-#endif
- } else if (xop < 0x36) {
- if (xop < 0x20) {
- cpu_src1 = get_src1(dc, insn);
- cpu_src2 = get_src2(dc, insn);
- switch (xop & ~0x10) {
- case 0x0: /* add */
- if (xop & 0x10) {
- gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
- tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
- dc->cc_op = CC_OP_ADD;
- } else {
- tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
- }
- break;
- case 0x1: /* and */
- tcg_gen_and_tl(cpu_dst, cpu_src1, cpu_src2);
- if (xop & 0x10) {
- tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
- tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
- dc->cc_op = CC_OP_LOGIC;
- }
- break;
- case 0x2: /* or */
- tcg_gen_or_tl(cpu_dst, cpu_src1, cpu_src2);
- if (xop & 0x10) {
- tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
- tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
- dc->cc_op = CC_OP_LOGIC;
- }
- break;
- case 0x3: /* xor */
- tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
- if (xop & 0x10) {
- tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
- tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
- dc->cc_op = CC_OP_LOGIC;
- }
- break;
- case 0x4: /* sub */
- if (xop & 0x10) {
- gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
- tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
- dc->cc_op = CC_OP_SUB;
- } else {
- tcg_gen_sub_tl(cpu_dst, cpu_src1, cpu_src2);
- }
- break;
- case 0x5: /* andn */
- tcg_gen_andc_tl(cpu_dst, cpu_src1, cpu_src2);
- if (xop & 0x10) {
- tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
- tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
- dc->cc_op = CC_OP_LOGIC;
- }
- break;
- case 0x6: /* orn */
- tcg_gen_orc_tl(cpu_dst, cpu_src1, cpu_src2);
- if (xop & 0x10) {
- tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
- tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
- dc->cc_op = CC_OP_LOGIC;
- }
- break;
- case 0x7: /* xorn */
- tcg_gen_eqv_tl(cpu_dst, cpu_src1, cpu_src2);
- if (xop & 0x10) {
- tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
- tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
- dc->cc_op = CC_OP_LOGIC;
- }
- break;
- case 0x8: /* addx, V9 addc */
- gen_op_addx_int(dc, cpu_dst, cpu_src1, cpu_src2,
- (xop & 0x10));
- break;
-#ifdef TARGET_SPARC64
- case 0x9: /* V9 mulx */
- tcg_gen_mul_i64(cpu_dst, cpu_src1, cpu_src2);
- break;
-#endif
- case 0xa: /* umul */
- CHECK_IU_FEATURE(dc, MUL);
- gen_op_umul(cpu_dst, cpu_src1, cpu_src2);
- if (xop & 0x10) {
- tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
- tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
- dc->cc_op = CC_OP_LOGIC;
- }
- break;
- case 0xb: /* smul */
- CHECK_IU_FEATURE(dc, MUL);
- gen_op_smul(cpu_dst, cpu_src1, cpu_src2);
- if (xop & 0x10) {
- tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
- tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
- dc->cc_op = CC_OP_LOGIC;
- }
- break;
- case 0xc: /* subx, V9 subc */
- gen_op_subx_int(dc, cpu_dst, cpu_src1, cpu_src2,
- (xop & 0x10));
- break;
-#ifdef TARGET_SPARC64
- case 0xd: /* V9 udivx */
- gen_helper_udivx(cpu_dst, tcg_env, cpu_src1, cpu_src2);
- break;
-#endif
- case 0xe: /* udiv */
- CHECK_IU_FEATURE(dc, DIV);
- if (xop & 0x10) {
- gen_helper_udiv_cc(cpu_dst, tcg_env, cpu_src1,
- cpu_src2);
- dc->cc_op = CC_OP_DIV;
- } else {
- gen_helper_udiv(cpu_dst, tcg_env, cpu_src1,
- cpu_src2);
- }
- break;
- case 0xf: /* sdiv */
- CHECK_IU_FEATURE(dc, DIV);
- if (xop & 0x10) {
- gen_helper_sdiv_cc(cpu_dst, tcg_env, cpu_src1,
- cpu_src2);
- dc->cc_op = CC_OP_DIV;
- } else {
- gen_helper_sdiv(cpu_dst, tcg_env, cpu_src1,
- cpu_src2);
- }
- break;
- default:
- goto illegal_insn;
- }
- gen_store_gpr(dc, rd, cpu_dst);
- } else {
- cpu_src1 = get_src1(dc, insn);
- cpu_src2 = get_src2(dc, insn);
- switch (xop) {
- case 0x20: /* taddcc */
- gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
- gen_store_gpr(dc, rd, cpu_dst);
- tcg_gen_movi_i32(cpu_cc_op, CC_OP_TADD);
- dc->cc_op = CC_OP_TADD;
- break;
- case 0x21: /* tsubcc */
- gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
- gen_store_gpr(dc, rd, cpu_dst);
- tcg_gen_movi_i32(cpu_cc_op, CC_OP_TSUB);
- dc->cc_op = CC_OP_TSUB;
- break;
- case 0x22: /* taddcctv */
- gen_helper_taddcctv(cpu_dst, tcg_env,
- cpu_src1, cpu_src2);
- gen_store_gpr(dc, rd, cpu_dst);
- dc->cc_op = CC_OP_TADDTV;
- break;
- case 0x23: /* tsubcctv */
- gen_helper_tsubcctv(cpu_dst, tcg_env,
- cpu_src1, cpu_src2);
- gen_store_gpr(dc, rd, cpu_dst);
- dc->cc_op = CC_OP_TSUBTV;
- break;
- case 0x24: /* mulscc */
- update_psr(dc);
- gen_op_mulscc(cpu_dst, cpu_src1, cpu_src2);
- gen_store_gpr(dc, rd, cpu_dst);
- tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
- dc->cc_op = CC_OP_ADD;
- break;
-#ifndef TARGET_SPARC64
- case 0x25: /* sll */
- if (IS_IMM) { /* immediate */
- simm = GET_FIELDs(insn, 20, 31);
- tcg_gen_shli_tl(cpu_dst, cpu_src1, simm & 0x1f);
- } else { /* register */
- cpu_tmp0 = tcg_temp_new();
- tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
- tcg_gen_shl_tl(cpu_dst, cpu_src1, cpu_tmp0);
- }
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x26: /* srl */
- if (IS_IMM) { /* immediate */
- simm = GET_FIELDs(insn, 20, 31);
- tcg_gen_shri_tl(cpu_dst, cpu_src1, simm & 0x1f);
- } else { /* register */
- cpu_tmp0 = tcg_temp_new();
- tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
- tcg_gen_shr_tl(cpu_dst, cpu_src1, cpu_tmp0);
- }
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x27: /* sra */
- if (IS_IMM) { /* immediate */
- simm = GET_FIELDs(insn, 20, 31);
- tcg_gen_sari_tl(cpu_dst, cpu_src1, simm & 0x1f);
- } else { /* register */
- cpu_tmp0 = tcg_temp_new();
- tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
- tcg_gen_sar_tl(cpu_dst, cpu_src1, cpu_tmp0);
- }
- gen_store_gpr(dc, rd, cpu_dst);
- break;
-#endif
- case 0x30:
- goto illegal_insn; /* WRASR in decodetree */
- case 0x32:
- goto illegal_insn; /* WRPR in decodetree */
- case 0x33: /* wrtbr, UA2005 wrhpr */
- goto illegal_insn; /* WRTBR, WRHPR in decodetree */
-#ifdef TARGET_SPARC64
- case 0x2c: /* V9 movcc */
- {
- int cc = GET_FIELD_SP(insn, 11, 12);
- int cond = GET_FIELD_SP(insn, 14, 17);
- DisasCompare cmp;
- TCGv dst;
-
- if (insn & (1 << 18)) {
- if (cc == 0) {
- gen_compare(&cmp, 0, cond, dc);
- } else if (cc == 2) {
- gen_compare(&cmp, 1, cond, dc);
- } else {
- goto illegal_insn;
- }
- } else {
- gen_fcompare(&cmp, cc, cond);
- }
-
- /* The get_src2 above loaded the normal 13-bit
- immediate field, not the 11-bit field we have
- in movcc. But it did handle the reg case. */
- if (IS_IMM) {
- simm = GET_FIELD_SPs(insn, 0, 10);
- tcg_gen_movi_tl(cpu_src2, simm);
- }
-
- dst = gen_load_gpr(dc, rd);
- tcg_gen_movcond_tl(cmp.cond, dst,
- cmp.c1, cmp.c2,
- cpu_src2, dst);
- gen_store_gpr(dc, rd, dst);
- break;
- }
- case 0x2d: /* V9 sdivx */
- gen_helper_sdivx(cpu_dst, tcg_env, cpu_src1, cpu_src2);
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x2e: /* V9 popc */
- tcg_gen_ctpop_tl(cpu_dst, cpu_src2);
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x2f: /* V9 movr */
- {
- int cond = GET_FIELD_SP(insn, 10, 12);
- DisasCompare cmp;
- TCGv dst;
-
- gen_compare_reg(&cmp, cond, cpu_src1);
-
- /* The get_src2 above loaded the normal 13-bit
- immediate field, not the 10-bit field we have
- in movr. But it did handle the reg case. */
- if (IS_IMM) {
- simm = GET_FIELD_SPs(insn, 0, 9);
- tcg_gen_movi_tl(cpu_src2, simm);
- }
-
- dst = gen_load_gpr(dc, rd);
- tcg_gen_movcond_tl(cmp.cond, dst,
- cmp.c1, cmp.c2,
- cpu_src2, dst);
- gen_store_gpr(dc, rd, dst);
- break;
- }
+ tcg_gen_ext32u_i64(t2, cpu_regs[a->rs2_or_imm]);
+#else
+ tcg_gen_extu_i32_i64(t2, cpu_regs[a->rs2_or_imm]);
#endif
- default:
- goto illegal_insn;
- }
- }
- } else if (xop == 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
-#ifdef TARGET_SPARC64
- int opf = GET_FIELD_SP(insn, 5, 13);
- rs1 = GET_FIELD(insn, 13, 17);
- rs2 = GET_FIELD(insn, 27, 31);
- if (gen_trap_ifnofpu(dc)) {
- goto jmp_insn;
- }
+ }
- switch (opf) {
- case 0x000: /* VIS I edge8cc */
- CHECK_FPU_FEATURE(dc, VIS1);
- cpu_src1 = gen_load_gpr(dc, rs1);
- cpu_src2 = gen_load_gpr(dc, rs2);
- gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 0);
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x001: /* VIS II edge8n */
- CHECK_FPU_FEATURE(dc, VIS2);
- cpu_src1 = gen_load_gpr(dc, rs1);
- cpu_src2 = gen_load_gpr(dc, rs2);
- gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 0);
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x002: /* VIS I edge8lcc */
- CHECK_FPU_FEATURE(dc, VIS1);
- cpu_src1 = gen_load_gpr(dc, rs1);
- cpu_src2 = gen_load_gpr(dc, rs2);
- gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 1);
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x003: /* VIS II edge8ln */
- CHECK_FPU_FEATURE(dc, VIS2);
- cpu_src1 = gen_load_gpr(dc, rs1);
- cpu_src2 = gen_load_gpr(dc, rs2);
- gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 1);
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x004: /* VIS I edge16cc */
- CHECK_FPU_FEATURE(dc, VIS1);
- cpu_src1 = gen_load_gpr(dc, rs1);
- cpu_src2 = gen_load_gpr(dc, rs2);
- gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 0);
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x005: /* VIS II edge16n */
- CHECK_FPU_FEATURE(dc, VIS2);
- cpu_src1 = gen_load_gpr(dc, rs1);
- cpu_src2 = gen_load_gpr(dc, rs2);
- gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 0);
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x006: /* VIS I edge16lcc */
- CHECK_FPU_FEATURE(dc, VIS1);
- cpu_src1 = gen_load_gpr(dc, rs1);
- cpu_src2 = gen_load_gpr(dc, rs2);
- gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 1);
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x007: /* VIS II edge16ln */
- CHECK_FPU_FEATURE(dc, VIS2);
- cpu_src1 = gen_load_gpr(dc, rs1);
- cpu_src2 = gen_load_gpr(dc, rs2);
- gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 1);
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x008: /* VIS I edge32cc */
- CHECK_FPU_FEATURE(dc, VIS1);
- cpu_src1 = gen_load_gpr(dc, rs1);
- cpu_src2 = gen_load_gpr(dc, rs2);
- gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 0);
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x009: /* VIS II edge32n */
- CHECK_FPU_FEATURE(dc, VIS2);
- cpu_src1 = gen_load_gpr(dc, rs1);
- cpu_src2 = gen_load_gpr(dc, rs2);
- gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 0);
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x00a: /* VIS I edge32lcc */
- CHECK_FPU_FEATURE(dc, VIS1);
- cpu_src1 = gen_load_gpr(dc, rs1);
- cpu_src2 = gen_load_gpr(dc, rs2);
- gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 1);
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x00b: /* VIS II edge32ln */
- CHECK_FPU_FEATURE(dc, VIS2);
- cpu_src1 = gen_load_gpr(dc, rs1);
- cpu_src2 = gen_load_gpr(dc, rs2);
- gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 1);
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x010: /* VIS I array8 */
- CHECK_FPU_FEATURE(dc, VIS1);
- cpu_src1 = gen_load_gpr(dc, rs1);
- cpu_src2 = gen_load_gpr(dc, rs2);
- gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x012: /* VIS I array16 */
- CHECK_FPU_FEATURE(dc, VIS1);
- cpu_src1 = gen_load_gpr(dc, rs1);
- cpu_src2 = gen_load_gpr(dc, rs2);
- gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
- tcg_gen_shli_i64(cpu_dst, cpu_dst, 1);
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x014: /* VIS I array32 */
- CHECK_FPU_FEATURE(dc, VIS1);
- cpu_src1 = gen_load_gpr(dc, rs1);
- cpu_src2 = gen_load_gpr(dc, rs2);
- gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
- tcg_gen_shli_i64(cpu_dst, cpu_dst, 2);
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x018: /* VIS I alignaddr */
- CHECK_FPU_FEATURE(dc, VIS1);
- cpu_src1 = gen_load_gpr(dc, rs1);
- cpu_src2 = gen_load_gpr(dc, rs2);
- gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 0);
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x01a: /* VIS I alignaddrl */
- CHECK_FPU_FEATURE(dc, VIS1);
- cpu_src1 = gen_load_gpr(dc, rs1);
- cpu_src2 = gen_load_gpr(dc, rs2);
- gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 1);
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x019: /* VIS II bmask */
- CHECK_FPU_FEATURE(dc, VIS2);
- cpu_src1 = gen_load_gpr(dc, rs1);
- cpu_src2 = gen_load_gpr(dc, rs2);
- tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
- tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, cpu_dst, 32, 32);
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x020: /* VIS I fcmple16 */
- CHECK_FPU_FEATURE(dc, VIS1);
- cpu_src1_64 = gen_load_fpr_D(dc, rs1);
- cpu_src2_64 = gen_load_fpr_D(dc, rs2);
- gen_helper_fcmple16(cpu_dst, cpu_src1_64, cpu_src2_64);
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x022: /* VIS I fcmpne16 */
- CHECK_FPU_FEATURE(dc, VIS1);
- cpu_src1_64 = gen_load_fpr_D(dc, rs1);
- cpu_src2_64 = gen_load_fpr_D(dc, rs2);
- gen_helper_fcmpne16(cpu_dst, cpu_src1_64, cpu_src2_64);
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x024: /* VIS I fcmple32 */
- CHECK_FPU_FEATURE(dc, VIS1);
- cpu_src1_64 = gen_load_fpr_D(dc, rs1);
- cpu_src2_64 = gen_load_fpr_D(dc, rs2);
- gen_helper_fcmple32(cpu_dst, cpu_src1_64, cpu_src2_64);
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x026: /* VIS I fcmpne32 */
- CHECK_FPU_FEATURE(dc, VIS1);
- cpu_src1_64 = gen_load_fpr_D(dc, rs1);
- cpu_src2_64 = gen_load_fpr_D(dc, rs2);
- gen_helper_fcmpne32(cpu_dst, cpu_src1_64, cpu_src2_64);
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x028: /* VIS I fcmpgt16 */
- CHECK_FPU_FEATURE(dc, VIS1);
- cpu_src1_64 = gen_load_fpr_D(dc, rs1);
- cpu_src2_64 = gen_load_fpr_D(dc, rs2);
- gen_helper_fcmpgt16(cpu_dst, cpu_src1_64, cpu_src2_64);
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x02a: /* VIS I fcmpeq16 */
- CHECK_FPU_FEATURE(dc, VIS1);
- cpu_src1_64 = gen_load_fpr_D(dc, rs1);
- cpu_src2_64 = gen_load_fpr_D(dc, rs2);
- gen_helper_fcmpeq16(cpu_dst, cpu_src1_64, cpu_src2_64);
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x02c: /* VIS I fcmpgt32 */
- CHECK_FPU_FEATURE(dc, VIS1);
- cpu_src1_64 = gen_load_fpr_D(dc, rs1);
- cpu_src2_64 = gen_load_fpr_D(dc, rs2);
- gen_helper_fcmpgt32(cpu_dst, cpu_src1_64, cpu_src2_64);
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x02e: /* VIS I fcmpeq32 */
- CHECK_FPU_FEATURE(dc, VIS1);
- cpu_src1_64 = gen_load_fpr_D(dc, rs1);
- cpu_src2_64 = gen_load_fpr_D(dc, rs2);
- gen_helper_fcmpeq32(cpu_dst, cpu_src1_64, cpu_src2_64);
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x031: /* VIS I fmul8x16 */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16);
- break;
- case 0x033: /* VIS I fmul8x16au */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16au);
- break;
- case 0x035: /* VIS I fmul8x16al */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16al);
- break;
- case 0x036: /* VIS I fmul8sux16 */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8sux16);
- break;
- case 0x037: /* VIS I fmul8ulx16 */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8ulx16);
- break;
- case 0x038: /* VIS I fmuld8sux16 */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8sux16);
- break;
- case 0x039: /* VIS I fmuld8ulx16 */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8ulx16);
- break;
- case 0x03a: /* VIS I fpack32 */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpack32);
- break;
- case 0x03b: /* VIS I fpack16 */
- CHECK_FPU_FEATURE(dc, VIS1);
- cpu_src1_64 = gen_load_fpr_D(dc, rs2);
- cpu_dst_32 = gen_dest_fpr_F(dc);
- gen_helper_fpack16(cpu_dst_32, cpu_gsr, cpu_src1_64);
- gen_store_fpr_F(dc, rd, cpu_dst_32);
- break;
- case 0x03d: /* VIS I fpackfix */
- CHECK_FPU_FEATURE(dc, VIS1);
- cpu_src1_64 = gen_load_fpr_D(dc, rs2);
- cpu_dst_32 = gen_dest_fpr_F(dc);
- gen_helper_fpackfix(cpu_dst_32, cpu_gsr, cpu_src1_64);
- gen_store_fpr_F(dc, rd, cpu_dst_32);
- break;
- case 0x03e: /* VIS I pdist */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_DDDD(dc, rd, rs1, rs2, gen_helper_pdist);
- break;
- case 0x048: /* VIS I faligndata */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_faligndata);
- break;
- case 0x04b: /* VIS I fpmerge */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpmerge);
- break;
- case 0x04c: /* VIS II bshuffle */
- CHECK_FPU_FEATURE(dc, VIS2);
- gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_bshuffle);
- break;
- case 0x04d: /* VIS I fexpand */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fexpand);
- break;
- case 0x050: /* VIS I fpadd16 */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd16);
- break;
- case 0x051: /* VIS I fpadd16s */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpadd16s);
- break;
- case 0x052: /* VIS I fpadd32 */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd32);
- break;
- case 0x053: /* VIS I fpadd32s */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_add_i32);
- break;
- case 0x054: /* VIS I fpsub16 */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub16);
- break;
- case 0x055: /* VIS I fpsub16s */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpsub16s);
- break;
- case 0x056: /* VIS I fpsub32 */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub32);
- break;
- case 0x057: /* VIS I fpsub32s */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_sub_i32);
- break;
- case 0x060: /* VIS I fzero */
- CHECK_FPU_FEATURE(dc, VIS1);
- cpu_dst_64 = gen_dest_fpr_D(dc, rd);
- tcg_gen_movi_i64(cpu_dst_64, 0);
- gen_store_fpr_D(dc, rd, cpu_dst_64);
- break;
- case 0x061: /* VIS I fzeros */
- CHECK_FPU_FEATURE(dc, VIS1);
- cpu_dst_32 = gen_dest_fpr_F(dc);
- tcg_gen_movi_i32(cpu_dst_32, 0);
- gen_store_fpr_F(dc, rd, cpu_dst_32);
- break;
- case 0x062: /* VIS I fnor */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nor_i64);
- break;
- case 0x063: /* VIS I fnors */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nor_i32);
- break;
- case 0x064: /* VIS I fandnot2 */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_andc_i64);
- break;
- case 0x065: /* VIS I fandnot2s */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_andc_i32);
- break;
- case 0x066: /* VIS I fnot2 */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_DD(dc, rd, rs2, tcg_gen_not_i64);
- break;
- case 0x067: /* VIS I fnot2s */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_FF(dc, rd, rs2, tcg_gen_not_i32);
- break;
- case 0x068: /* VIS I fandnot1 */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_andc_i64);
- break;
- case 0x069: /* VIS I fandnot1s */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_andc_i32);
- break;
- case 0x06a: /* VIS I fnot1 */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_DD(dc, rd, rs1, tcg_gen_not_i64);
- break;
- case 0x06b: /* VIS I fnot1s */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_FF(dc, rd, rs1, tcg_gen_not_i32);
- break;
- case 0x06c: /* VIS I fxor */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_xor_i64);
- break;
- case 0x06d: /* VIS I fxors */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_xor_i32);
- break;
- case 0x06e: /* VIS I fnand */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nand_i64);
- break;
- case 0x06f: /* VIS I fnands */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nand_i32);
- break;
- case 0x070: /* VIS I fand */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_and_i64);
- break;
- case 0x071: /* VIS I fands */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_and_i32);
- break;
- case 0x072: /* VIS I fxnor */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_eqv_i64);
- break;
- case 0x073: /* VIS I fxnors */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_eqv_i32);
- break;
- case 0x074: /* VIS I fsrc1 */
- CHECK_FPU_FEATURE(dc, VIS1);
- cpu_src1_64 = gen_load_fpr_D(dc, rs1);
- gen_store_fpr_D(dc, rd, cpu_src1_64);
- break;
- case 0x075: /* VIS I fsrc1s */
- CHECK_FPU_FEATURE(dc, VIS1);
- cpu_src1_32 = gen_load_fpr_F(dc, rs1);
- gen_store_fpr_F(dc, rd, cpu_src1_32);
- break;
- case 0x076: /* VIS I fornot2 */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_orc_i64);
- break;
- case 0x077: /* VIS I fornot2s */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_orc_i32);
- break;
- case 0x078: /* VIS I fsrc2 */
- CHECK_FPU_FEATURE(dc, VIS1);
- cpu_src1_64 = gen_load_fpr_D(dc, rs2);
- gen_store_fpr_D(dc, rd, cpu_src1_64);
- break;
- case 0x079: /* VIS I fsrc2s */
- CHECK_FPU_FEATURE(dc, VIS1);
- cpu_src1_32 = gen_load_fpr_F(dc, rs2);
- gen_store_fpr_F(dc, rd, cpu_src1_32);
- break;
- case 0x07a: /* VIS I fornot1 */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_orc_i64);
- break;
- case 0x07b: /* VIS I fornot1s */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_orc_i32);
- break;
- case 0x07c: /* VIS I for */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_or_i64);
- break;
- case 0x07d: /* VIS I fors */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_or_i32);
- break;
- case 0x07e: /* VIS I fone */
- CHECK_FPU_FEATURE(dc, VIS1);
- cpu_dst_64 = gen_dest_fpr_D(dc, rd);
- tcg_gen_movi_i64(cpu_dst_64, -1);
- gen_store_fpr_D(dc, rd, cpu_dst_64);
- break;
- case 0x07f: /* VIS I fones */
- CHECK_FPU_FEATURE(dc, VIS1);
- cpu_dst_32 = gen_dest_fpr_F(dc);
- tcg_gen_movi_i32(cpu_dst_32, -1);
- gen_store_fpr_F(dc, rd, cpu_dst_32);
- break;
- case 0x080: /* VIS I shutdown */
- case 0x081: /* VIS II siam */
- // XXX
- goto illegal_insn;
- default:
- goto illegal_insn;
- }
+ t1 = tcg_temp_new_i64();
+ tcg_gen_concat_tl_i64(t1, gen_load_gpr(dc, a->rs1), cpu_y);
+
+ tcg_gen_divu_i64(t1, t1, t2);
+ tcg_gen_umin_i64(t1, t1, tcg_constant_i64(UINT32_MAX));
+
+ dst = gen_dest_gpr(dc, a->rd);
+ tcg_gen_trunc_i64_tl(dst, t1);
+ gen_store_gpr(dc, a->rd, dst);
+ return advance_pc(dc);
+}
+
+static bool trans_UDIVX(DisasContext *dc, arg_r_r_ri *a)
+{
+ TCGv dst, src1, src2;
+
+ if (!avail_64(dc)) {
+ return false;
+ }
+ /* For simplicity, we under-decoded the rs2 form. */
+ if (!a->imm && a->rs2_or_imm & ~0x1f) {
+ return false;
+ }
+
+ if (unlikely(a->rs2_or_imm == 0)) {
+ gen_exception(dc, TT_DIV_ZERO);
+ return true;
+ }
+
+ if (a->imm) {
+ src2 = tcg_constant_tl(a->rs2_or_imm);
+ } else {
+ TCGLabel *lab;
+
+ finishing_insn(dc);
+ flush_cond(dc);
+
+ lab = delay_exception(dc, TT_DIV_ZERO);
+ src2 = cpu_regs[a->rs2_or_imm];
+ tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
+ }
+
+ dst = gen_dest_gpr(dc, a->rd);
+ src1 = gen_load_gpr(dc, a->rs1);
+
+ tcg_gen_divu_tl(dst, src1, src2);
+ gen_store_gpr(dc, a->rd, dst);
+ return advance_pc(dc);
+}
+
+static bool trans_SDIVX(DisasContext *dc, arg_r_r_ri *a)
+{
+ TCGv dst, src1, src2;
+
+ if (!avail_64(dc)) {
+ return false;
+ }
+ /* For simplicity, we under-decoded the rs2 form. */
+ if (!a->imm && a->rs2_or_imm & ~0x1f) {
+ return false;
+ }
+
+ if (unlikely(a->rs2_or_imm == 0)) {
+ gen_exception(dc, TT_DIV_ZERO);
+ return true;
+ }
+
+ dst = gen_dest_gpr(dc, a->rd);
+ src1 = gen_load_gpr(dc, a->rs1);
+
+ if (a->imm) {
+ if (unlikely(a->rs2_or_imm == -1)) {
+ tcg_gen_neg_tl(dst, src1);
+ gen_store_gpr(dc, a->rd, dst);
+ return advance_pc(dc);
+ }
+ src2 = tcg_constant_tl(a->rs2_or_imm);
+ } else {
+ TCGLabel *lab;
+ TCGv t1, t2;
+
+ finishing_insn(dc);
+ flush_cond(dc);
+
+ lab = delay_exception(dc, TT_DIV_ZERO);
+ src2 = cpu_regs[a->rs2_or_imm];
+ tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
+
+ /*
+ * Need to avoid INT64_MIN / -1, which will trap on x86 host.
+ * Set SRC2 to 1 as a new divisor, to produce the correct result.
+ */
+ t1 = tcg_temp_new();
+ t2 = tcg_temp_new();
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t1, src1, (target_long)INT64_MIN);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t2, src2, -1);
+ tcg_gen_and_tl(t1, t1, t2);
+ tcg_gen_movcond_tl(TCG_COND_NE, t1, t1, tcg_constant_tl(0),
+ tcg_constant_tl(1), src2);
+ src2 = t1;
+ }
+
+ tcg_gen_div_tl(dst, src1, src2);
+ gen_store_gpr(dc, a->rd, dst);
+ return advance_pc(dc);
+}
+
+static bool gen_edge(DisasContext *dc, arg_r_r_r *a,
+ int width, bool cc, bool left)
+{
+ TCGv dst, s1, s2, lo1, lo2;
+ uint64_t amask, tabl, tabr;
+ int shift, imask, omask;
+
+ dst = gen_dest_gpr(dc, a->rd);
+ s1 = gen_load_gpr(dc, a->rs1);
+ s2 = gen_load_gpr(dc, a->rs2);
+
+ if (cc) {
+ gen_op_subcc(cpu_cc_N, s1, s2);
+ }
+
+ /*
+ * Theory of operation: there are two tables, left and right (not to
+ * be confused with the left and right versions of the opcode). These
+ * are indexed by the low 3 bits of the inputs. To make things "easy",
+ * these tables are loaded into two constants, TABL and TABR below.
+ * The operation index = (input & imask) << shift calculates the index
+ * into the constant, while val = (table >> index) & omask calculates
+ * the value we're looking for.
+ */
+ switch (width) {
+ case 8:
+ imask = 0x7;
+ shift = 3;
+ omask = 0xff;
+ if (left) {
+ tabl = 0x80c0e0f0f8fcfeffULL;
+ tabr = 0xff7f3f1f0f070301ULL;
+ } else {
+ tabl = 0x0103070f1f3f7fffULL;
+ tabr = 0xfffefcf8f0e0c080ULL;
+ }
+ break;
+ case 16:
+ imask = 0x6;
+ shift = 1;
+ omask = 0xf;
+ if (left) {
+ tabl = 0x8cef;
+ tabr = 0xf731;
+ } else {
+ tabl = 0x137f;
+ tabr = 0xfec8;
+ }
+ break;
+ case 32:
+ imask = 0x4;
+ shift = 0;
+ omask = 0x3;
+ if (left) {
+ tabl = (2 << 2) | 3;
+ tabr = (3 << 2) | 1;
+ } else {
+ tabl = (1 << 2) | 3;
+ tabr = (3 << 2) | 2;
+ }
+ break;
+ default:
+ abort();
+ }
+
+ lo1 = tcg_temp_new();
+ lo2 = tcg_temp_new();
+ tcg_gen_andi_tl(lo1, s1, imask);
+ tcg_gen_andi_tl(lo2, s2, imask);
+ tcg_gen_shli_tl(lo1, lo1, shift);
+ tcg_gen_shli_tl(lo2, lo2, shift);
+
+ tcg_gen_shr_tl(lo1, tcg_constant_tl(tabl), lo1);
+ tcg_gen_shr_tl(lo2, tcg_constant_tl(tabr), lo2);
+ tcg_gen_andi_tl(lo1, lo1, omask);
+ tcg_gen_andi_tl(lo2, lo2, omask);
+
+ amask = address_mask_i(dc, -8);
+ tcg_gen_andi_tl(s1, s1, amask);
+ tcg_gen_andi_tl(s2, s2, amask);
+
+ /* Compute dst = (s1 == s2 ? lo1 : lo1 & lo2). */
+ tcg_gen_and_tl(lo2, lo2, lo1);
+ tcg_gen_movcond_tl(TCG_COND_EQ, dst, s1, s2, lo1, lo2);
+
+ gen_store_gpr(dc, a->rd, dst);
+ return advance_pc(dc);
+}
+
+TRANS(EDGE8cc, VIS1, gen_edge, a, 8, 1, 0)
+TRANS(EDGE8Lcc, VIS1, gen_edge, a, 8, 1, 1)
+TRANS(EDGE16cc, VIS1, gen_edge, a, 16, 1, 0)
+TRANS(EDGE16Lcc, VIS1, gen_edge, a, 16, 1, 1)
+TRANS(EDGE32cc, VIS1, gen_edge, a, 32, 1, 0)
+TRANS(EDGE32Lcc, VIS1, gen_edge, a, 32, 1, 1)
+
+TRANS(EDGE8N, VIS2, gen_edge, a, 8, 0, 0)
+TRANS(EDGE8LN, VIS2, gen_edge, a, 8, 0, 1)
+TRANS(EDGE16N, VIS2, gen_edge, a, 16, 0, 0)
+TRANS(EDGE16LN, VIS2, gen_edge, a, 16, 0, 1)
+TRANS(EDGE32N, VIS2, gen_edge, a, 32, 0, 0)
+TRANS(EDGE32LN, VIS2, gen_edge, a, 32, 0, 1)
+
+static bool do_rrr(DisasContext *dc, arg_r_r_r *a,
+ void (*func)(TCGv, TCGv, TCGv))
+{
+ TCGv dst = gen_dest_gpr(dc, a->rd);
+ TCGv src1 = gen_load_gpr(dc, a->rs1);
+ TCGv src2 = gen_load_gpr(dc, a->rs2);
+
+ func(dst, src1, src2);
+ gen_store_gpr(dc, a->rd, dst);
+ return advance_pc(dc);
+}
+
+TRANS(ARRAY8, VIS1, do_rrr, a, gen_helper_array8)
+TRANS(ARRAY16, VIS1, do_rrr, a, gen_op_array16)
+TRANS(ARRAY32, VIS1, do_rrr, a, gen_op_array32)
+
+static void gen_op_alignaddr(TCGv dst, TCGv s1, TCGv s2)
+{
+#ifdef TARGET_SPARC64
+ TCGv tmp = tcg_temp_new();
+
+ tcg_gen_add_tl(tmp, s1, s2);
+ tcg_gen_andi_tl(dst, tmp, -8);
+ tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
#else
- goto ncp_insn;
+ g_assert_not_reached();
#endif
- } else if (xop == 0x37) { /* V8 CPop2, V9 impdep2 */
+}
+
+static void gen_op_alignaddrl(TCGv dst, TCGv s1, TCGv s2)
+{
#ifdef TARGET_SPARC64
- goto illegal_insn;
+ TCGv tmp = tcg_temp_new();
+
+ tcg_gen_add_tl(tmp, s1, s2);
+ tcg_gen_andi_tl(dst, tmp, -8);
+ tcg_gen_neg_tl(tmp, tmp);
+ tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
#else
- goto ncp_insn;
+ g_assert_not_reached();
#endif
+}
+
+TRANS(ALIGNADDR, VIS1, do_rrr, a, gen_op_alignaddr)
+TRANS(ALIGNADDRL, VIS1, do_rrr, a, gen_op_alignaddrl)
+
+static void gen_op_bmask(TCGv dst, TCGv s1, TCGv s2)
+{
#ifdef TARGET_SPARC64
- } else if (xop == 0x39) { /* V9 return */
- save_state(dc);
- cpu_src1 = get_src1(dc, insn);
- cpu_tmp0 = tcg_temp_new();
- if (IS_IMM) { /* immediate */
- simm = GET_FIELDs(insn, 19, 31);
- tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
- } else { /* register */
- rs2 = GET_FIELD(insn, 27, 31);
- if (rs2) {
- cpu_src2 = gen_load_gpr(dc, rs2);
- tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
- } else {
- tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
- }
- }
- gen_check_align(dc, cpu_tmp0, 3);
- gen_helper_restore(tcg_env);
- gen_mov_pc_npc(dc);
- tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
- dc->npc = DYNAMIC_PC_LOOKUP;
- goto jmp_insn;
-#endif
- } else {
- cpu_src1 = get_src1(dc, insn);
- cpu_tmp0 = tcg_temp_new();
- if (IS_IMM) { /* immediate */
- simm = GET_FIELDs(insn, 19, 31);
- tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
- } else { /* register */
- rs2 = GET_FIELD(insn, 27, 31);
- if (rs2) {
- cpu_src2 = gen_load_gpr(dc, rs2);
- tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
- } else {
- tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
- }
- }
- switch (xop) {
- case 0x38: /* jmpl */
- {
- gen_check_align(dc, cpu_tmp0, 3);
- gen_store_gpr(dc, rd, tcg_constant_tl(dc->pc));
- gen_mov_pc_npc(dc);
- gen_address_mask(dc, cpu_tmp0);
- tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
- dc->npc = DYNAMIC_PC_LOOKUP;
- }
- goto jmp_insn;
-#if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
- case 0x39: /* rett, V9 return */
- {
- if (!supervisor(dc))
- goto priv_insn;
- gen_check_align(dc, cpu_tmp0, 3);
- gen_mov_pc_npc(dc);
- tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
- dc->npc = DYNAMIC_PC;
- gen_helper_rett(tcg_env);
- }
- goto jmp_insn;
-#endif
- case 0x3b: /* flush */
- /* nop */
- break;
- case 0x3c: /* save */
- gen_helper_save(tcg_env);
- gen_store_gpr(dc, rd, cpu_tmp0);
- break;
- case 0x3d: /* restore */
- gen_helper_restore(tcg_env);
- gen_store_gpr(dc, rd, cpu_tmp0);
- break;
-#if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
- case 0x3e: /* V9 done/retry */
- {
- switch (rd) {
- case 0:
- if (!supervisor(dc))
- goto priv_insn;
- dc->npc = DYNAMIC_PC;
- dc->pc = DYNAMIC_PC;
- translator_io_start(&dc->base);
- gen_helper_done(tcg_env);
- goto jmp_insn;
- case 1:
- if (!supervisor(dc))
- goto priv_insn;
- dc->npc = DYNAMIC_PC;
- dc->pc = DYNAMIC_PC;
- translator_io_start(&dc->base);
- gen_helper_retry(tcg_env);
- goto jmp_insn;
- default:
- goto illegal_insn;
- }
- }
- break;
+ tcg_gen_add_tl(dst, s1, s2);
+ tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, dst, 32, 32);
+#else
+ g_assert_not_reached();
#endif
- default:
- goto illegal_insn;
- }
- }
- break;
+}
+
+TRANS(BMASK, VIS2, do_rrr, a, gen_op_bmask)
+
+static bool do_shift_r(DisasContext *dc, arg_shiftr *a, bool l, bool u)
+{
+ TCGv dst, src1, src2;
+
+ /* Reject 64-bit shifts for sparc32. */
+ if (avail_32(dc) && a->x) {
+ return false;
+ }
+
+ src2 = tcg_temp_new();
+ tcg_gen_andi_tl(src2, gen_load_gpr(dc, a->rs2), a->x ? 63 : 31);
+ src1 = gen_load_gpr(dc, a->rs1);
+ dst = gen_dest_gpr(dc, a->rd);
+
+ if (l) {
+ tcg_gen_shl_tl(dst, src1, src2);
+ if (!a->x) {
+ tcg_gen_ext32u_tl(dst, dst);
}
- break;
- case 3: /* load/store instructions */
- {
- unsigned int xop = GET_FIELD(insn, 7, 12);
- /* ??? gen_address_mask prevents us from using a source
- register directly. Always generate a temporary. */
- TCGv cpu_addr = tcg_temp_new();
-
- tcg_gen_mov_tl(cpu_addr, get_src1(dc, insn));
- if (xop == 0x3c || xop == 0x3e) {
- /* V9 casa/casxa : no offset */
- } else if (IS_IMM) { /* immediate */
- simm = GET_FIELDs(insn, 19, 31);
- if (simm != 0) {
- tcg_gen_addi_tl(cpu_addr, cpu_addr, simm);
- }
- } else { /* register */
- rs2 = GET_FIELD(insn, 27, 31);
- if (rs2 != 0) {
- tcg_gen_add_tl(cpu_addr, cpu_addr, gen_load_gpr(dc, rs2));
- }
- }
- if (xop < 4 || (xop > 7 && xop < 0x14 && xop != 0x0e) ||
- (xop > 0x17 && xop <= 0x1d ) ||
- (xop > 0x2c && xop <= 0x33) || xop == 0x1f || xop == 0x3d) {
- TCGv cpu_val = gen_dest_gpr(dc, rd);
-
- switch (xop) {
- case 0x0: /* ld, V9 lduw, load unsigned word */
- gen_address_mask(dc, cpu_addr);
- tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
- dc->mem_idx, MO_TEUL | MO_ALIGN);
- break;
- case 0x1: /* ldub, load unsigned byte */
- gen_address_mask(dc, cpu_addr);
- tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
- dc->mem_idx, MO_UB);
- break;
- case 0x2: /* lduh, load unsigned halfword */
- gen_address_mask(dc, cpu_addr);
- tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
- dc->mem_idx, MO_TEUW | MO_ALIGN);
- break;
- case 0x3: /* ldd, load double word */
- if (rd & 1)
- goto illegal_insn;
- else {
- TCGv_i64 t64;
-
- gen_address_mask(dc, cpu_addr);
- t64 = tcg_temp_new_i64();
- tcg_gen_qemu_ld_i64(t64, cpu_addr,
- dc->mem_idx, MO_TEUQ | MO_ALIGN);
- tcg_gen_trunc_i64_tl(cpu_val, t64);
- tcg_gen_ext32u_tl(cpu_val, cpu_val);
- gen_store_gpr(dc, rd + 1, cpu_val);
- tcg_gen_shri_i64(t64, t64, 32);
- tcg_gen_trunc_i64_tl(cpu_val, t64);
- tcg_gen_ext32u_tl(cpu_val, cpu_val);
- }
- break;
- case 0x9: /* ldsb, load signed byte */
- gen_address_mask(dc, cpu_addr);
- tcg_gen_qemu_ld_tl(cpu_val, cpu_addr, dc->mem_idx, MO_SB);
- break;
- case 0xa: /* ldsh, load signed halfword */
- gen_address_mask(dc, cpu_addr);
- tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
- dc->mem_idx, MO_TESW | MO_ALIGN);
- break;
- case 0xd: /* ldstub */
- gen_ldstub(dc, cpu_val, cpu_addr, dc->mem_idx);
- break;
- case 0x0f:
- /* swap, swap register with memory. Also atomically */
- cpu_src1 = gen_load_gpr(dc, rd);
- gen_swap(dc, cpu_val, cpu_src1, cpu_addr,
- dc->mem_idx, MO_TEUL);
- break;
-#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
- case 0x10: /* lda, V9 lduwa, load word alternate */
- gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUL);
- break;
- case 0x11: /* lduba, load unsigned byte alternate */
- gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_UB);
- break;
- case 0x12: /* lduha, load unsigned halfword alternate */
- gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUW);
- break;
- case 0x13: /* ldda, load double word alternate */
- if (rd & 1) {
- goto illegal_insn;
- }
- gen_ldda_asi(dc, cpu_addr, insn, rd);
- goto skip_move;
- case 0x19: /* ldsba, load signed byte alternate */
- gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_SB);
- break;
- case 0x1a: /* ldsha, load signed halfword alternate */
- gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESW);
- break;
- case 0x1d: /* ldstuba -- XXX: should be atomically */
- gen_ldstub_asi(dc, cpu_val, cpu_addr, insn);
- break;
- case 0x1f: /* swapa, swap reg with alt. memory. Also
- atomically */
- cpu_src1 = gen_load_gpr(dc, rd);
- gen_swap_asi(dc, cpu_val, cpu_src1, cpu_addr, insn);
- break;
+ } else if (u) {
+ if (!a->x) {
+ tcg_gen_ext32u_tl(dst, src1);
+ src1 = dst;
+ }
+ tcg_gen_shr_tl(dst, src1, src2);
+ } else {
+ if (!a->x) {
+ tcg_gen_ext32s_tl(dst, src1);
+ src1 = dst;
+ }
+ tcg_gen_sar_tl(dst, src1, src2);
+ }
+ gen_store_gpr(dc, a->rd, dst);
+ return advance_pc(dc);
+}
+
+TRANS(SLL_r, ALL, do_shift_r, a, true, true)
+TRANS(SRL_r, ALL, do_shift_r, a, false, true)
+TRANS(SRA_r, ALL, do_shift_r, a, false, false)
+
+static bool do_shift_i(DisasContext *dc, arg_shifti *a, bool l, bool u)
+{
+ TCGv dst, src1;
+
+ /* Reject 64-bit shifts for sparc32. */
+ if (avail_32(dc) && (a->x || a->i >= 32)) {
+ return false;
+ }
+
+ src1 = gen_load_gpr(dc, a->rs1);
+ dst = gen_dest_gpr(dc, a->rd);
+
+ if (avail_32(dc) || a->x) {
+ if (l) {
+ tcg_gen_shli_tl(dst, src1, a->i);
+ } else if (u) {
+ tcg_gen_shri_tl(dst, src1, a->i);
+ } else {
+ tcg_gen_sari_tl(dst, src1, a->i);
+ }
+ } else {
+ if (l) {
+ tcg_gen_deposit_z_tl(dst, src1, a->i, 32 - a->i);
+ } else if (u) {
+ tcg_gen_extract_tl(dst, src1, a->i, 32 - a->i);
+ } else {
+ tcg_gen_sextract_tl(dst, src1, a->i, 32 - a->i);
+ }
+ }
+ gen_store_gpr(dc, a->rd, dst);
+ return advance_pc(dc);
+}
+
+TRANS(SLL_i, ALL, do_shift_i, a, true, true)
+TRANS(SRL_i, ALL, do_shift_i, a, false, true)
+TRANS(SRA_i, ALL, do_shift_i, a, false, false)
+
+static TCGv gen_rs2_or_imm(DisasContext *dc, bool imm, int rs2_or_imm)
+{
+ /* For simplicity, we under-decoded the rs2 form. */
+ if (!imm && rs2_or_imm & ~0x1f) {
+ return NULL;
+ }
+ if (imm || rs2_or_imm == 0) {
+ return tcg_constant_tl(rs2_or_imm);
+ } else {
+ return cpu_regs[rs2_or_imm];
+ }
+}
+
+static bool do_mov_cond(DisasContext *dc, DisasCompare *cmp, int rd, TCGv src2)
+{
+ TCGv dst = gen_load_gpr(dc, rd);
+ TCGv c2 = tcg_constant_tl(cmp->c2);
+
+ tcg_gen_movcond_tl(cmp->cond, dst, cmp->c1, c2, src2, dst);
+ gen_store_gpr(dc, rd, dst);
+ return advance_pc(dc);
+}
+
+static bool trans_MOVcc(DisasContext *dc, arg_MOVcc *a)
+{
+ TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
+ DisasCompare cmp;
+
+ if (src2 == NULL) {
+ return false;
+ }
+ gen_compare(&cmp, a->cc, a->cond, dc);
+ return do_mov_cond(dc, &cmp, a->rd, src2);
+}
+
+static bool trans_MOVfcc(DisasContext *dc, arg_MOVfcc *a)
+{
+ TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
+ DisasCompare cmp;
+
+ if (src2 == NULL) {
+ return false;
+ }
+ gen_fcompare(&cmp, a->cc, a->cond);
+ return do_mov_cond(dc, &cmp, a->rd, src2);
+}
+
+static bool trans_MOVR(DisasContext *dc, arg_MOVR *a)
+{
+ TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
+ DisasCompare cmp;
+
+ if (src2 == NULL) {
+ return false;
+ }
+ if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
+ return false;
+ }
+ return do_mov_cond(dc, &cmp, a->rd, src2);
+}
+
+static bool do_add_special(DisasContext *dc, arg_r_r_ri *a,
+ bool (*func)(DisasContext *dc, int rd, TCGv src))
+{
+ TCGv src1, sum;
+
+ /* For simplicity, we under-decoded the rs2 form. */
+ if (!a->imm && a->rs2_or_imm & ~0x1f) {
+ return false;
+ }
+
+ /*
+ * Always load the sum into a new temporary.
+ * This is required to capture the value across a window change,
+ * e.g. SAVE and RESTORE, and may be optimized away otherwise.
+ */
+ sum = tcg_temp_new();
+ src1 = gen_load_gpr(dc, a->rs1);
+ if (a->imm || a->rs2_or_imm == 0) {
+ tcg_gen_addi_tl(sum, src1, a->rs2_or_imm);
+ } else {
+ tcg_gen_add_tl(sum, src1, cpu_regs[a->rs2_or_imm]);
+ }
+ return func(dc, a->rd, sum);
+}
+
+static bool do_jmpl(DisasContext *dc, int rd, TCGv src)
+{
+ /*
+ * Preserve pc across advance, so that we can delay
+ * the writeback to rd until after src is consumed.
+ */
+ target_ulong cur_pc = dc->pc;
+
+ gen_check_align(dc, src, 3);
+
+ gen_mov_pc_npc(dc);
+ tcg_gen_mov_tl(cpu_npc, src);
+ gen_address_mask(dc, cpu_npc);
+ gen_store_gpr(dc, rd, tcg_constant_tl(cur_pc));
+
+ dc->npc = DYNAMIC_PC_LOOKUP;
+ return true;
+}
+
+TRANS(JMPL, ALL, do_add_special, a, do_jmpl)
+
+static bool do_rett(DisasContext *dc, int rd, TCGv src)
+{
+ if (!supervisor(dc)) {
+ return raise_priv(dc);
+ }
+
+ gen_check_align(dc, src, 3);
+
+ gen_mov_pc_npc(dc);
+ tcg_gen_mov_tl(cpu_npc, src);
+ gen_helper_rett(tcg_env);
+
+ dc->npc = DYNAMIC_PC;
+ return true;
+}
+
+TRANS(RETT, 32, do_add_special, a, do_rett)
+
+static bool do_return(DisasContext *dc, int rd, TCGv src)
+{
+ gen_check_align(dc, src, 3);
+ gen_helper_restore(tcg_env);
+
+ gen_mov_pc_npc(dc);
+ tcg_gen_mov_tl(cpu_npc, src);
+ gen_address_mask(dc, cpu_npc);
+
+ dc->npc = DYNAMIC_PC_LOOKUP;
+ return true;
+}
+
+TRANS(RETURN, 64, do_add_special, a, do_return)
+
+static bool do_save(DisasContext *dc, int rd, TCGv src)
+{
+ gen_helper_save(tcg_env);
+ gen_store_gpr(dc, rd, src);
+ return advance_pc(dc);
+}
+
+TRANS(SAVE, ALL, do_add_special, a, do_save)
+
+static bool do_restore(DisasContext *dc, int rd, TCGv src)
+{
+ gen_helper_restore(tcg_env);
+ gen_store_gpr(dc, rd, src);
+ return advance_pc(dc);
+}
+
+TRANS(RESTORE, ALL, do_add_special, a, do_restore)
+
+static bool do_done_retry(DisasContext *dc, bool done)
+{
+ if (!supervisor(dc)) {
+ return raise_priv(dc);
+ }
+ dc->npc = DYNAMIC_PC;
+ dc->pc = DYNAMIC_PC;
+ translator_io_start(&dc->base);
+ if (done) {
+ gen_helper_done(tcg_env);
+ } else {
+ gen_helper_retry(tcg_env);
+ }
+ return true;
+}
+
+TRANS(DONE, 64, do_done_retry, true)
+TRANS(RETRY, 64, do_done_retry, false)
+
+/*
+ * Major opcode 11 -- load and store instructions
+ */
+
+static TCGv gen_ldst_addr(DisasContext *dc, int rs1, bool imm, int rs2_or_imm)
+{
+ TCGv addr, tmp = NULL;
+
+ /* For simplicity, we under-decoded the rs2 form. */
+ if (!imm && rs2_or_imm & ~0x1f) {
+ return NULL;
+ }
+
+ addr = gen_load_gpr(dc, rs1);
+ if (rs2_or_imm) {
+ tmp = tcg_temp_new();
+ if (imm) {
+ tcg_gen_addi_tl(tmp, addr, rs2_or_imm);
+ } else {
+ tcg_gen_add_tl(tmp, addr, cpu_regs[rs2_or_imm]);
+ }
+ addr = tmp;
+ }
+ if (AM_CHECK(dc)) {
+ if (!tmp) {
+ tmp = tcg_temp_new();
+ }
+ tcg_gen_ext32u_tl(tmp, addr);
+ addr = tmp;
+ }
+ return addr;
+}
+
+static bool do_ld_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
+{
+ TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
+ DisasASI da;
+
+ if (addr == NULL) {
+ return false;
+ }
+ da = resolve_asi(dc, a->asi, mop);
+
+ reg = gen_dest_gpr(dc, a->rd);
+ gen_ld_asi(dc, &da, reg, addr);
+ gen_store_gpr(dc, a->rd, reg);
+ return advance_pc(dc);
+}
+
+TRANS(LDUW, ALL, do_ld_gpr, a, MO_TEUL)
+TRANS(LDUB, ALL, do_ld_gpr, a, MO_UB)
+TRANS(LDUH, ALL, do_ld_gpr, a, MO_TEUW)
+TRANS(LDSB, ALL, do_ld_gpr, a, MO_SB)
+TRANS(LDSH, ALL, do_ld_gpr, a, MO_TESW)
+TRANS(LDSW, 64, do_ld_gpr, a, MO_TESL)
+TRANS(LDX, 64, do_ld_gpr, a, MO_TEUQ)
+
+static bool do_st_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
+{
+ TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
+ DisasASI da;
+
+ if (addr == NULL) {
+ return false;
+ }
+ da = resolve_asi(dc, a->asi, mop);
+
+ reg = gen_load_gpr(dc, a->rd);
+ gen_st_asi(dc, &da, reg, addr);
+ return advance_pc(dc);
+}
+
+TRANS(STW, ALL, do_st_gpr, a, MO_TEUL)
+TRANS(STB, ALL, do_st_gpr, a, MO_UB)
+TRANS(STH, ALL, do_st_gpr, a, MO_TEUW)
+TRANS(STX, 64, do_st_gpr, a, MO_TEUQ)
+
+static bool trans_LDD(DisasContext *dc, arg_r_r_ri_asi *a)
+{
+ TCGv addr;
+ DisasASI da;
+
+ if (a->rd & 1) {
+ return false;
+ }
+ addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
+ if (addr == NULL) {
+ return false;
+ }
+ da = resolve_asi(dc, a->asi, MO_TEUQ);
+ gen_ldda_asi(dc, &da, addr, a->rd);
+ return advance_pc(dc);
+}
+
+static bool trans_STD(DisasContext *dc, arg_r_r_ri_asi *a)
+{
+ TCGv addr;
+ DisasASI da;
+
+ if (a->rd & 1) {
+ return false;
+ }
+ addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
+ if (addr == NULL) {
+ return false;
+ }
+ da = resolve_asi(dc, a->asi, MO_TEUQ);
+ gen_stda_asi(dc, &da, addr, a->rd);
+ return advance_pc(dc);
+}
+
+static bool trans_LDSTUB(DisasContext *dc, arg_r_r_ri_asi *a)
+{
+ TCGv addr, reg;
+ DisasASI da;
+
+ addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
+ if (addr == NULL) {
+ return false;
+ }
+ da = resolve_asi(dc, a->asi, MO_UB);
+
+ reg = gen_dest_gpr(dc, a->rd);
+ gen_ldstub_asi(dc, &da, reg, addr);
+ gen_store_gpr(dc, a->rd, reg);
+ return advance_pc(dc);
+}
+
+static bool trans_SWAP(DisasContext *dc, arg_r_r_ri_asi *a)
+{
+ TCGv addr, dst, src;
+ DisasASI da;
+
+ addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
+ if (addr == NULL) {
+ return false;
+ }
+ da = resolve_asi(dc, a->asi, MO_TEUL);
+
+ dst = gen_dest_gpr(dc, a->rd);
+ src = gen_load_gpr(dc, a->rd);
+ gen_swap_asi(dc, &da, dst, src, addr);
+ gen_store_gpr(dc, a->rd, dst);
+ return advance_pc(dc);
+}
+
+static bool do_casa(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
+{
+ TCGv addr, o, n, c;
+ DisasASI da;
+
+ addr = gen_ldst_addr(dc, a->rs1, true, 0);
+ if (addr == NULL) {
+ return false;
+ }
+ da = resolve_asi(dc, a->asi, mop);
+
+ o = gen_dest_gpr(dc, a->rd);
+ n = gen_load_gpr(dc, a->rd);
+ c = gen_load_gpr(dc, a->rs2_or_imm);
+ gen_cas_asi(dc, &da, o, n, c, addr);
+ gen_store_gpr(dc, a->rd, o);
+ return advance_pc(dc);
+}
+
+TRANS(CASA, CASA, do_casa, a, MO_TEUL)
+TRANS(CASXA, 64, do_casa, a, MO_TEUQ)
+
+static bool do_ld_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
+{
+ TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
+ DisasASI da;
+
+ if (addr == NULL) {
+ return false;
+ }
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+ if (sz == MO_128 && gen_trap_float128(dc)) {
+ return true;
+ }
+ da = resolve_asi(dc, a->asi, MO_TE | sz);
+ gen_ldf_asi(dc, &da, sz, addr, a->rd);
+ gen_update_fprs_dirty(dc, a->rd);
+ return advance_pc(dc);
+}
+
+TRANS(LDF, ALL, do_ld_fpr, a, MO_32)
+TRANS(LDDF, ALL, do_ld_fpr, a, MO_64)
+TRANS(LDQF, ALL, do_ld_fpr, a, MO_128)
+
+TRANS(LDFA, 64, do_ld_fpr, a, MO_32)
+TRANS(LDDFA, 64, do_ld_fpr, a, MO_64)
+TRANS(LDQFA, 64, do_ld_fpr, a, MO_128)
+
+static bool do_st_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
+{
+ TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
+ DisasASI da;
+
+ if (addr == NULL) {
+ return false;
+ }
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+ if (sz == MO_128 && gen_trap_float128(dc)) {
+ return true;
+ }
+ da = resolve_asi(dc, a->asi, MO_TE | sz);
+ gen_stf_asi(dc, &da, sz, addr, a->rd);
+ return advance_pc(dc);
+}
+
+TRANS(STF, ALL, do_st_fpr, a, MO_32)
+TRANS(STDF, ALL, do_st_fpr, a, MO_64)
+TRANS(STQF, ALL, do_st_fpr, a, MO_128)
+
+TRANS(STFA, 64, do_st_fpr, a, MO_32)
+TRANS(STDFA, 64, do_st_fpr, a, MO_64)
+TRANS(STQFA, 64, do_st_fpr, a, MO_128)
+
+static bool trans_STDFQ(DisasContext *dc, arg_STDFQ *a)
+{
+ if (!avail_32(dc)) {
+ return false;
+ }
+ if (!supervisor(dc)) {
+ return raise_priv(dc);
+ }
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+ gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
+ return true;
+}
+
+static bool do_ldfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop,
+ target_ulong new_mask, target_ulong old_mask)
+{
+ TCGv tmp, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
+ if (addr == NULL) {
+ return false;
+ }
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+ tmp = tcg_temp_new();
+ tcg_gen_qemu_ld_tl(tmp, addr, dc->mem_idx, mop | MO_ALIGN);
+ tcg_gen_andi_tl(tmp, tmp, new_mask);
+ tcg_gen_andi_tl(cpu_fsr, cpu_fsr, old_mask);
+ tcg_gen_or_tl(cpu_fsr, cpu_fsr, tmp);
+ gen_helper_set_fsr(tcg_env, cpu_fsr);
+ return advance_pc(dc);
+}
+
+TRANS(LDFSR, ALL, do_ldfsr, a, MO_TEUL, FSR_LDFSR_MASK, FSR_LDFSR_OLDMASK)
+TRANS(LDXFSR, 64, do_ldfsr, a, MO_TEUQ, FSR_LDXFSR_MASK, FSR_LDXFSR_OLDMASK)
+
+static bool do_stfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop)
+{
+ TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
+ if (addr == NULL) {
+ return false;
+ }
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+ tcg_gen_qemu_st_tl(cpu_fsr, addr, dc->mem_idx, mop | MO_ALIGN);
+ return advance_pc(dc);
+}
+
+TRANS(STFSR, ALL, do_stfsr, a, MO_TEUL)
+TRANS(STXFSR, 64, do_stfsr, a, MO_TEUQ)
+
+static bool do_fc(DisasContext *dc, int rd, bool c)
+{
+ uint64_t mask;
+
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+
+ if (rd & 1) {
+ mask = MAKE_64BIT_MASK(0, 32);
+ } else {
+ mask = MAKE_64BIT_MASK(32, 32);
+ }
+ if (c) {
+ tcg_gen_ori_i64(cpu_fpr[rd / 2], cpu_fpr[rd / 2], mask);
+ } else {
+ tcg_gen_andi_i64(cpu_fpr[rd / 2], cpu_fpr[rd / 2], ~mask);
+ }
+ gen_update_fprs_dirty(dc, rd);
+ return advance_pc(dc);
+}
+
+TRANS(FZEROs, VIS1, do_fc, a->rd, 0)
+TRANS(FONEs, VIS1, do_fc, a->rd, 1)
+
+static bool do_dc(DisasContext *dc, int rd, int64_t c)
+{
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+
+ tcg_gen_movi_i64(cpu_fpr[rd / 2], c);
+ gen_update_fprs_dirty(dc, rd);
+ return advance_pc(dc);
+}
+
+TRANS(FZEROd, VIS1, do_dc, a->rd, 0)
+TRANS(FONEd, VIS1, do_dc, a->rd, -1)
+
+static bool do_ff(DisasContext *dc, arg_r_r *a,
+ void (*func)(TCGv_i32, TCGv_i32))
+{
+ TCGv_i32 tmp;
+
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+
+ tmp = gen_load_fpr_F(dc, a->rs);
+ func(tmp, tmp);
+ gen_store_fpr_F(dc, a->rd, tmp);
+ return advance_pc(dc);
+}
+
+TRANS(FMOVs, ALL, do_ff, a, gen_op_fmovs)
+TRANS(FNEGs, ALL, do_ff, a, gen_op_fnegs)
+TRANS(FABSs, ALL, do_ff, a, gen_op_fabss)
+TRANS(FSRCs, VIS1, do_ff, a, tcg_gen_mov_i32)
+TRANS(FNOTs, VIS1, do_ff, a, tcg_gen_not_i32)
+
+static bool do_fd(DisasContext *dc, arg_r_r *a,
+ void (*func)(TCGv_i32, TCGv_i64))
+{
+ TCGv_i32 dst;
+ TCGv_i64 src;
+
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+
+ dst = gen_dest_fpr_F(dc);
+ src = gen_load_fpr_D(dc, a->rs);
+ func(dst, src);
+ gen_store_fpr_F(dc, a->rd, dst);
+ return advance_pc(dc);
+}
+
+TRANS(FPACK16, VIS1, do_fd, a, gen_op_fpack16)
+TRANS(FPACKFIX, VIS1, do_fd, a, gen_op_fpackfix)
+
+static bool do_env_ff(DisasContext *dc, arg_r_r *a,
+ void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
+{
+ TCGv_i32 tmp;
+
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+
+ gen_op_clear_ieee_excp_and_FTT();
+ tmp = gen_load_fpr_F(dc, a->rs);
+ func(tmp, tcg_env, tmp);
+ gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
+ gen_store_fpr_F(dc, a->rd, tmp);
+ return advance_pc(dc);
+}
+
+TRANS(FSQRTs, ALL, do_env_ff, a, gen_helper_fsqrts)
+TRANS(FiTOs, ALL, do_env_ff, a, gen_helper_fitos)
+TRANS(FsTOi, ALL, do_env_ff, a, gen_helper_fstoi)
+
+static bool do_env_fd(DisasContext *dc, arg_r_r *a,
+ void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
+{
+ TCGv_i32 dst;
+ TCGv_i64 src;
+
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+
+ gen_op_clear_ieee_excp_and_FTT();
+ dst = gen_dest_fpr_F(dc);
+ src = gen_load_fpr_D(dc, a->rs);
+ func(dst, tcg_env, src);
+ gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
+ gen_store_fpr_F(dc, a->rd, dst);
+ return advance_pc(dc);
+}
+
+TRANS(FdTOs, ALL, do_env_fd, a, gen_helper_fdtos)
+TRANS(FdTOi, ALL, do_env_fd, a, gen_helper_fdtoi)
+TRANS(FxTOs, 64, do_env_fd, a, gen_helper_fxtos)
+
+static bool do_dd(DisasContext *dc, arg_r_r *a,
+ void (*func)(TCGv_i64, TCGv_i64))
+{
+ TCGv_i64 dst, src;
+
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+
+ dst = gen_dest_fpr_D(dc, a->rd);
+ src = gen_load_fpr_D(dc, a->rs);
+ func(dst, src);
+ gen_store_fpr_D(dc, a->rd, dst);
+ return advance_pc(dc);
+}
+
+TRANS(FMOVd, 64, do_dd, a, gen_op_fmovd)
+TRANS(FNEGd, 64, do_dd, a, gen_op_fnegd)
+TRANS(FABSd, 64, do_dd, a, gen_op_fabsd)
+TRANS(FSRCd, VIS1, do_dd, a, tcg_gen_mov_i64)
+TRANS(FNOTd, VIS1, do_dd, a, tcg_gen_not_i64)
+
+static bool do_env_dd(DisasContext *dc, arg_r_r *a,
+ void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
+{
+ TCGv_i64 dst, src;
+
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+
+ gen_op_clear_ieee_excp_and_FTT();
+ dst = gen_dest_fpr_D(dc, a->rd);
+ src = gen_load_fpr_D(dc, a->rs);
+ func(dst, tcg_env, src);
+ gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
+ gen_store_fpr_D(dc, a->rd, dst);
+ return advance_pc(dc);
+}
+
+TRANS(FSQRTd, ALL, do_env_dd, a, gen_helper_fsqrtd)
+TRANS(FxTOd, 64, do_env_dd, a, gen_helper_fxtod)
+TRANS(FdTOx, 64, do_env_dd, a, gen_helper_fdtox)
+
+static bool do_env_df(DisasContext *dc, arg_r_r *a,
+ void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
+{
+ TCGv_i64 dst;
+ TCGv_i32 src;
+
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+
+ gen_op_clear_ieee_excp_and_FTT();
+ dst = gen_dest_fpr_D(dc, a->rd);
+ src = gen_load_fpr_F(dc, a->rs);
+ func(dst, tcg_env, src);
+ gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
+ gen_store_fpr_D(dc, a->rd, dst);
+ return advance_pc(dc);
+}
+
+TRANS(FiTOd, ALL, do_env_df, a, gen_helper_fitod)
+TRANS(FsTOd, ALL, do_env_df, a, gen_helper_fstod)
+TRANS(FsTOx, 64, do_env_df, a, gen_helper_fstox)
+
+static bool trans_FMOVq(DisasContext *dc, arg_FMOVq *a)
+{
+ int rd, rs;
+
+ if (!avail_64(dc)) {
+ return false;
+ }
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+ if (gen_trap_float128(dc)) {
+ return true;
+ }
+
+ gen_op_clear_ieee_excp_and_FTT();
+ rd = QFPREG(a->rd);
+ rs = QFPREG(a->rs);
+ tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]);
+ tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]);
+ gen_update_fprs_dirty(dc, rd);
+ return advance_pc(dc);
+}
+
+static bool do_qq(DisasContext *dc, arg_r_r *a,
+ void (*func)(TCGv_env))
+{
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+ if (gen_trap_float128(dc)) {
+ return true;
+ }
+
+ gen_op_clear_ieee_excp_and_FTT();
+ gen_op_load_fpr_QT1(QFPREG(a->rs));
+ func(tcg_env);
+ gen_op_store_QT0_fpr(QFPREG(a->rd));
+ gen_update_fprs_dirty(dc, QFPREG(a->rd));
+ return advance_pc(dc);
+}
+
+TRANS(FNEGq, 64, do_qq, a, gen_helper_fnegq)
+TRANS(FABSq, 64, do_qq, a, gen_helper_fabsq)
+
+static bool do_env_qq(DisasContext *dc, arg_r_r *a,
+ void (*func)(TCGv_env))
+{
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+ if (gen_trap_float128(dc)) {
+ return true;
+ }
+
+ gen_op_clear_ieee_excp_and_FTT();
+ gen_op_load_fpr_QT1(QFPREG(a->rs));
+ func(tcg_env);
+ gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
+ gen_op_store_QT0_fpr(QFPREG(a->rd));
+ gen_update_fprs_dirty(dc, QFPREG(a->rd));
+ return advance_pc(dc);
+}
+
+TRANS(FSQRTq, ALL, do_env_qq, a, gen_helper_fsqrtq)
+
+static bool do_env_fq(DisasContext *dc, arg_r_r *a,
+ void (*func)(TCGv_i32, TCGv_env))
+{
+ TCGv_i32 dst;
+
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+ if (gen_trap_float128(dc)) {
+ return true;
+ }
+
+ gen_op_clear_ieee_excp_and_FTT();
+ gen_op_load_fpr_QT1(QFPREG(a->rs));
+ dst = gen_dest_fpr_F(dc);
+ func(dst, tcg_env);
+ gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
+ gen_store_fpr_F(dc, a->rd, dst);
+ return advance_pc(dc);
+}
+
+TRANS(FqTOs, ALL, do_env_fq, a, gen_helper_fqtos)
+TRANS(FqTOi, ALL, do_env_fq, a, gen_helper_fqtoi)
+
+static bool do_env_dq(DisasContext *dc, arg_r_r *a,
+ void (*func)(TCGv_i64, TCGv_env))
+{
+ TCGv_i64 dst;
+
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+ if (gen_trap_float128(dc)) {
+ return true;
+ }
+
+ gen_op_clear_ieee_excp_and_FTT();
+ gen_op_load_fpr_QT1(QFPREG(a->rs));
+ dst = gen_dest_fpr_D(dc, a->rd);
+ func(dst, tcg_env);
+ gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
+ gen_store_fpr_D(dc, a->rd, dst);
+ return advance_pc(dc);
+}
+
+TRANS(FqTOd, ALL, do_env_dq, a, gen_helper_fqtod)
+TRANS(FqTOx, 64, do_env_dq, a, gen_helper_fqtox)
+
+static bool do_env_qf(DisasContext *dc, arg_r_r *a,
+ void (*func)(TCGv_env, TCGv_i32))
+{
+ TCGv_i32 src;
+
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+ if (gen_trap_float128(dc)) {
+ return true;
+ }
+
+ gen_op_clear_ieee_excp_and_FTT();
+ src = gen_load_fpr_F(dc, a->rs);
+ func(tcg_env, src);
+ gen_op_store_QT0_fpr(QFPREG(a->rd));
+ gen_update_fprs_dirty(dc, QFPREG(a->rd));
+ return advance_pc(dc);
+}
+
+TRANS(FiTOq, ALL, do_env_qf, a, gen_helper_fitoq)
+TRANS(FsTOq, ALL, do_env_qf, a, gen_helper_fstoq)
+
+static bool do_env_qd(DisasContext *dc, arg_r_r *a,
+ void (*func)(TCGv_env, TCGv_i64))
+{
+ TCGv_i64 src;
+
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+ if (gen_trap_float128(dc)) {
+ return true;
+ }
+
+ gen_op_clear_ieee_excp_and_FTT();
+ src = gen_load_fpr_D(dc, a->rs);
+ func(tcg_env, src);
+ gen_op_store_QT0_fpr(QFPREG(a->rd));
+ gen_update_fprs_dirty(dc, QFPREG(a->rd));
+ return advance_pc(dc);
+}
+
+TRANS(FdTOq, ALL, do_env_qd, a, gen_helper_fdtoq)
+TRANS(FxTOq, 64, do_env_qd, a, gen_helper_fxtoq)
+
+static bool do_fff(DisasContext *dc, arg_r_r_r *a,
+ void (*func)(TCGv_i32, TCGv_i32, TCGv_i32))
+{
+ TCGv_i32 src1, src2;
+
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+
+ src1 = gen_load_fpr_F(dc, a->rs1);
+ src2 = gen_load_fpr_F(dc, a->rs2);
+ func(src1, src1, src2);
+ gen_store_fpr_F(dc, a->rd, src1);
+ return advance_pc(dc);
+}
+
+TRANS(FPADD16s, VIS1, do_fff, a, tcg_gen_vec_add16_i32)
+TRANS(FPADD32s, VIS1, do_fff, a, tcg_gen_add_i32)
+TRANS(FPSUB16s, VIS1, do_fff, a, tcg_gen_vec_sub16_i32)
+TRANS(FPSUB32s, VIS1, do_fff, a, tcg_gen_sub_i32)
+TRANS(FNORs, VIS1, do_fff, a, tcg_gen_nor_i32)
+TRANS(FANDNOTs, VIS1, do_fff, a, tcg_gen_andc_i32)
+TRANS(FXORs, VIS1, do_fff, a, tcg_gen_xor_i32)
+TRANS(FNANDs, VIS1, do_fff, a, tcg_gen_nand_i32)
+TRANS(FANDs, VIS1, do_fff, a, tcg_gen_and_i32)
+TRANS(FXNORs, VIS1, do_fff, a, tcg_gen_eqv_i32)
+TRANS(FORNOTs, VIS1, do_fff, a, tcg_gen_orc_i32)
+TRANS(FORs, VIS1, do_fff, a, tcg_gen_or_i32)
+
+static bool do_env_fff(DisasContext *dc, arg_r_r_r *a,
+ void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
+{
+ TCGv_i32 src1, src2;
-#ifndef TARGET_SPARC64
- case 0x30: /* ldc */
- case 0x31: /* ldcsr */
- case 0x33: /* lddc */
- goto ncp_insn;
-#endif
-#endif
-#ifdef TARGET_SPARC64
- case 0x08: /* V9 ldsw */
- gen_address_mask(dc, cpu_addr);
- tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
- dc->mem_idx, MO_TESL | MO_ALIGN);
- break;
- case 0x0b: /* V9 ldx */
- gen_address_mask(dc, cpu_addr);
- tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
- dc->mem_idx, MO_TEUQ | MO_ALIGN);
- break;
- case 0x18: /* V9 ldswa */
- gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESL);
- break;
- case 0x1b: /* V9 ldxa */
- gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUQ);
- break;
- case 0x2d: /* V9 prefetch, no effect */
- goto skip_move;
- case 0x30: /* V9 ldfa */
- if (gen_trap_ifnofpu(dc)) {
- goto jmp_insn;
- }
- gen_ldf_asi(dc, cpu_addr, insn, 4, rd);
- gen_update_fprs_dirty(dc, rd);
- goto skip_move;
- case 0x33: /* V9 lddfa */
- if (gen_trap_ifnofpu(dc)) {
- goto jmp_insn;
- }
- gen_ldf_asi(dc, cpu_addr, insn, 8, DFPREG(rd));
- gen_update_fprs_dirty(dc, DFPREG(rd));
- goto skip_move;
- case 0x3d: /* V9 prefetcha, no effect */
- goto skip_move;
- case 0x32: /* V9 ldqfa */
- CHECK_FPU_FEATURE(dc, FLOAT128);
- if (gen_trap_ifnofpu(dc)) {
- goto jmp_insn;
- }
- gen_ldf_asi(dc, cpu_addr, insn, 16, QFPREG(rd));
- gen_update_fprs_dirty(dc, QFPREG(rd));
- goto skip_move;
-#endif
- default:
- goto illegal_insn;
- }
- gen_store_gpr(dc, rd, cpu_val);
-#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
- skip_move: ;
-#endif
- } else if (xop >= 0x20 && xop < 0x24) {
- if (gen_trap_ifnofpu(dc)) {
- goto jmp_insn;
- }
- switch (xop) {
- case 0x20: /* ldf, load fpreg */
- gen_address_mask(dc, cpu_addr);
- cpu_dst_32 = gen_dest_fpr_F(dc);
- tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr,
- dc->mem_idx, MO_TEUL | MO_ALIGN);
- gen_store_fpr_F(dc, rd, cpu_dst_32);
- break;
- case 0x21: /* ldfsr, V9 ldxfsr */
-#ifdef TARGET_SPARC64
- gen_address_mask(dc, cpu_addr);
- if (rd == 1) {
- TCGv_i64 t64 = tcg_temp_new_i64();
- tcg_gen_qemu_ld_i64(t64, cpu_addr,
- dc->mem_idx, MO_TEUQ | MO_ALIGN);
- gen_helper_ldxfsr(cpu_fsr, tcg_env, cpu_fsr, t64);
- break;
- }
-#endif
- cpu_dst_32 = tcg_temp_new_i32();
- tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr,
- dc->mem_idx, MO_TEUL | MO_ALIGN);
- gen_helper_ldfsr(cpu_fsr, tcg_env, cpu_fsr, cpu_dst_32);
- break;
- case 0x22: /* ldqf, load quad fpreg */
- CHECK_FPU_FEATURE(dc, FLOAT128);
- gen_address_mask(dc, cpu_addr);
- cpu_src1_64 = tcg_temp_new_i64();
- tcg_gen_qemu_ld_i64(cpu_src1_64, cpu_addr, dc->mem_idx,
- MO_TEUQ | MO_ALIGN_4);
- tcg_gen_addi_tl(cpu_addr, cpu_addr, 8);
- cpu_src2_64 = tcg_temp_new_i64();
- tcg_gen_qemu_ld_i64(cpu_src2_64, cpu_addr, dc->mem_idx,
- MO_TEUQ | MO_ALIGN_4);
- gen_store_fpr_Q(dc, rd, cpu_src1_64, cpu_src2_64);
- break;
- case 0x23: /* lddf, load double fpreg */
- gen_address_mask(dc, cpu_addr);
- cpu_dst_64 = gen_dest_fpr_D(dc, rd);
- tcg_gen_qemu_ld_i64(cpu_dst_64, cpu_addr, dc->mem_idx,
- MO_TEUQ | MO_ALIGN_4);
- gen_store_fpr_D(dc, rd, cpu_dst_64);
- break;
- default:
- goto illegal_insn;
- }
- } else if (xop < 8 || (xop >= 0x14 && xop < 0x18) ||
- xop == 0xe || xop == 0x1e) {
- TCGv cpu_val = gen_load_gpr(dc, rd);
-
- switch (xop) {
- case 0x4: /* st, store word */
- gen_address_mask(dc, cpu_addr);
- tcg_gen_qemu_st_tl(cpu_val, cpu_addr,
- dc->mem_idx, MO_TEUL | MO_ALIGN);
- break;
- case 0x5: /* stb, store byte */
- gen_address_mask(dc, cpu_addr);
- tcg_gen_qemu_st_tl(cpu_val, cpu_addr, dc->mem_idx, MO_UB);
- break;
- case 0x6: /* sth, store halfword */
- gen_address_mask(dc, cpu_addr);
- tcg_gen_qemu_st_tl(cpu_val, cpu_addr,
- dc->mem_idx, MO_TEUW | MO_ALIGN);
- break;
- case 0x7: /* std, store double word */
- if (rd & 1)
- goto illegal_insn;
- else {
- TCGv_i64 t64;
- TCGv lo;
-
- gen_address_mask(dc, cpu_addr);
- lo = gen_load_gpr(dc, rd + 1);
- t64 = tcg_temp_new_i64();
- tcg_gen_concat_tl_i64(t64, lo, cpu_val);
- tcg_gen_qemu_st_i64(t64, cpu_addr,
- dc->mem_idx, MO_TEUQ | MO_ALIGN);
- }
- break;
-#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
- case 0x14: /* sta, V9 stwa, store word alternate */
- gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUL);
- break;
- case 0x15: /* stba, store byte alternate */
- gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_UB);
- break;
- case 0x16: /* stha, store halfword alternate */
- gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUW);
- break;
- case 0x17: /* stda, store double word alternate */
- if (rd & 1) {
- goto illegal_insn;
- }
- gen_stda_asi(dc, cpu_val, cpu_addr, insn, rd);
- break;
-#endif
-#ifdef TARGET_SPARC64
- case 0x0e: /* V9 stx */
- gen_address_mask(dc, cpu_addr);
- tcg_gen_qemu_st_tl(cpu_val, cpu_addr,
- dc->mem_idx, MO_TEUQ | MO_ALIGN);
- break;
- case 0x1e: /* V9 stxa */
- gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUQ);
- break;
-#endif
- default:
- goto illegal_insn;
- }
- } else if (xop > 0x23 && xop < 0x28) {
- if (gen_trap_ifnofpu(dc)) {
- goto jmp_insn;
- }
- switch (xop) {
- case 0x24: /* stf, store fpreg */
- gen_address_mask(dc, cpu_addr);
- cpu_src1_32 = gen_load_fpr_F(dc, rd);
- tcg_gen_qemu_st_i32(cpu_src1_32, cpu_addr,
- dc->mem_idx, MO_TEUL | MO_ALIGN);
- break;
- case 0x25: /* stfsr, V9 stxfsr */
- {
-#ifdef TARGET_SPARC64
- gen_address_mask(dc, cpu_addr);
- if (rd == 1) {
- tcg_gen_qemu_st_tl(cpu_fsr, cpu_addr,
- dc->mem_idx, MO_TEUQ | MO_ALIGN);
- break;
- }
-#endif
- tcg_gen_qemu_st_tl(cpu_fsr, cpu_addr,
- dc->mem_idx, MO_TEUL | MO_ALIGN);
- }
- break;
- case 0x26:
-#ifdef TARGET_SPARC64
- /* V9 stqf, store quad fpreg */
- CHECK_FPU_FEATURE(dc, FLOAT128);
- gen_address_mask(dc, cpu_addr);
- /* ??? While stqf only requires 4-byte alignment, it is
- legal for the cpu to signal the unaligned exception.
- The OS trap handler is then required to fix it up.
- For qemu, this avoids having to probe the second page
- before performing the first write. */
- cpu_src1_64 = gen_load_fpr_Q0(dc, rd);
- tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr,
- dc->mem_idx, MO_TEUQ | MO_ALIGN_16);
- tcg_gen_addi_tl(cpu_addr, cpu_addr, 8);
- cpu_src2_64 = gen_load_fpr_Q1(dc, rd);
- tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr,
- dc->mem_idx, MO_TEUQ);
- break;
-#else /* !TARGET_SPARC64 */
- /* stdfq, store floating point queue */
-#if defined(CONFIG_USER_ONLY)
- goto illegal_insn;
-#else
- if (!supervisor(dc))
- goto priv_insn;
- if (gen_trap_ifnofpu(dc)) {
- goto jmp_insn;
- }
- goto nfq_insn;
-#endif
-#endif
- case 0x27: /* stdf, store double fpreg */
- gen_address_mask(dc, cpu_addr);
- cpu_src1_64 = gen_load_fpr_D(dc, rd);
- tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr, dc->mem_idx,
- MO_TEUQ | MO_ALIGN_4);
- break;
- default:
- goto illegal_insn;
- }
- } else if (xop > 0x33 && xop < 0x3f) {
- switch (xop) {
-#ifdef TARGET_SPARC64
- case 0x34: /* V9 stfa */
- if (gen_trap_ifnofpu(dc)) {
- goto jmp_insn;
- }
- gen_stf_asi(dc, cpu_addr, insn, 4, rd);
- break;
- case 0x36: /* V9 stqfa */
- {
- CHECK_FPU_FEATURE(dc, FLOAT128);
- if (gen_trap_ifnofpu(dc)) {
- goto jmp_insn;
- }
- gen_stf_asi(dc, cpu_addr, insn, 16, QFPREG(rd));
- }
- break;
- case 0x37: /* V9 stdfa */
- if (gen_trap_ifnofpu(dc)) {
- goto jmp_insn;
- }
- gen_stf_asi(dc, cpu_addr, insn, 8, DFPREG(rd));
- break;
- case 0x3e: /* V9 casxa */
- rs2 = GET_FIELD(insn, 27, 31);
- cpu_src2 = gen_load_gpr(dc, rs2);
- gen_casx_asi(dc, cpu_addr, cpu_src2, insn, rd);
- break;
-#else
- case 0x34: /* stc */
- case 0x35: /* stcsr */
- case 0x36: /* stdcq */
- case 0x37: /* stdc */
- goto ncp_insn;
-#endif
-#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
- case 0x3c: /* V9 or LEON3 casa */
-#ifndef TARGET_SPARC64
- CHECK_IU_FEATURE(dc, CASA);
-#endif
- rs2 = GET_FIELD(insn, 27, 31);
- cpu_src2 = gen_load_gpr(dc, rs2);
- gen_cas_asi(dc, cpu_addr, cpu_src2, insn, rd);
- break;
-#endif
- default:
- goto illegal_insn;
- }
- } else {
- goto illegal_insn;
- }
- }
- break;
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
}
- advance_pc(dc);
- jmp_insn:
- return;
- illegal_insn:
- gen_exception(dc, TT_ILL_INSN);
- return;
-#if !defined(CONFIG_USER_ONLY)
- priv_insn:
- gen_exception(dc, TT_PRIV_INSN);
- return;
-#endif
- nfpu_insn:
- gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
- return;
-#if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
- nfq_insn:
- gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
- return;
-#endif
-#ifndef TARGET_SPARC64
- ncp_insn:
- gen_exception(dc, TT_NCP_INSN);
- return;
-#endif
+
+ gen_op_clear_ieee_excp_and_FTT();
+ src1 = gen_load_fpr_F(dc, a->rs1);
+ src2 = gen_load_fpr_F(dc, a->rs2);
+ func(src1, tcg_env, src1, src2);
+ gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
+ gen_store_fpr_F(dc, a->rd, src1);
+ return advance_pc(dc);
+}
+
+TRANS(FADDs, ALL, do_env_fff, a, gen_helper_fadds)
+TRANS(FSUBs, ALL, do_env_fff, a, gen_helper_fsubs)
+TRANS(FMULs, ALL, do_env_fff, a, gen_helper_fmuls)
+TRANS(FDIVs, ALL, do_env_fff, a, gen_helper_fdivs)
+
+static bool do_ddd(DisasContext *dc, arg_r_r_r *a,
+ void (*func)(TCGv_i64, TCGv_i64, TCGv_i64))
+{
+ TCGv_i64 dst, src1, src2;
+
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+
+ dst = gen_dest_fpr_D(dc, a->rd);
+ src1 = gen_load_fpr_D(dc, a->rs1);
+ src2 = gen_load_fpr_D(dc, a->rs2);
+ func(dst, src1, src2);
+ gen_store_fpr_D(dc, a->rd, dst);
+ return advance_pc(dc);
+}
+
+TRANS(FMUL8x16, VIS1, do_ddd, a, gen_helper_fmul8x16)
+TRANS(FMUL8x16AU, VIS1, do_ddd, a, gen_helper_fmul8x16au)
+TRANS(FMUL8x16AL, VIS1, do_ddd, a, gen_helper_fmul8x16al)
+TRANS(FMUL8SUx16, VIS1, do_ddd, a, gen_helper_fmul8sux16)
+TRANS(FMUL8ULx16, VIS1, do_ddd, a, gen_helper_fmul8ulx16)
+TRANS(FMULD8SUx16, VIS1, do_ddd, a, gen_helper_fmuld8sux16)
+TRANS(FMULD8ULx16, VIS1, do_ddd, a, gen_helper_fmuld8ulx16)
+TRANS(FPMERGE, VIS1, do_ddd, a, gen_helper_fpmerge)
+TRANS(FEXPAND, VIS1, do_ddd, a, gen_helper_fexpand)
+
+TRANS(FPADD16, VIS1, do_ddd, a, tcg_gen_vec_add16_i64)
+TRANS(FPADD32, VIS1, do_ddd, a, tcg_gen_vec_add32_i64)
+TRANS(FPSUB16, VIS1, do_ddd, a, tcg_gen_vec_sub16_i64)
+TRANS(FPSUB32, VIS1, do_ddd, a, tcg_gen_vec_sub32_i64)
+TRANS(FNORd, VIS1, do_ddd, a, tcg_gen_nor_i64)
+TRANS(FANDNOTd, VIS1, do_ddd, a, tcg_gen_andc_i64)
+TRANS(FXORd, VIS1, do_ddd, a, tcg_gen_xor_i64)
+TRANS(FNANDd, VIS1, do_ddd, a, tcg_gen_nand_i64)
+TRANS(FANDd, VIS1, do_ddd, a, tcg_gen_and_i64)
+TRANS(FXNORd, VIS1, do_ddd, a, tcg_gen_eqv_i64)
+TRANS(FORNOTd, VIS1, do_ddd, a, tcg_gen_orc_i64)
+TRANS(FORd, VIS1, do_ddd, a, tcg_gen_or_i64)
+
+TRANS(FPACK32, VIS1, do_ddd, a, gen_op_fpack32)
+TRANS(FALIGNDATAg, VIS1, do_ddd, a, gen_op_faligndata)
+TRANS(BSHUFFLE, VIS2, do_ddd, a, gen_op_bshuffle)
+
+static bool do_rdd(DisasContext *dc, arg_r_r_r *a,
+ void (*func)(TCGv, TCGv_i64, TCGv_i64))
+{
+ TCGv_i64 src1, src2;
+ TCGv dst;
+
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+
+ dst = gen_dest_gpr(dc, a->rd);
+ src1 = gen_load_fpr_D(dc, a->rs1);
+ src2 = gen_load_fpr_D(dc, a->rs2);
+ func(dst, src1, src2);
+ gen_store_gpr(dc, a->rd, dst);
+ return advance_pc(dc);
+}
+
+TRANS(FPCMPLE16, VIS1, do_rdd, a, gen_helper_fcmple16)
+TRANS(FPCMPNE16, VIS1, do_rdd, a, gen_helper_fcmpne16)
+TRANS(FPCMPGT16, VIS1, do_rdd, a, gen_helper_fcmpgt16)
+TRANS(FPCMPEQ16, VIS1, do_rdd, a, gen_helper_fcmpeq16)
+
+TRANS(FPCMPLE32, VIS1, do_rdd, a, gen_helper_fcmple32)
+TRANS(FPCMPNE32, VIS1, do_rdd, a, gen_helper_fcmpne32)
+TRANS(FPCMPGT32, VIS1, do_rdd, a, gen_helper_fcmpgt32)
+TRANS(FPCMPEQ32, VIS1, do_rdd, a, gen_helper_fcmpeq32)
+
+static bool do_env_ddd(DisasContext *dc, arg_r_r_r *a,
+ void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
+{
+ TCGv_i64 dst, src1, src2;
+
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+
+ gen_op_clear_ieee_excp_and_FTT();
+ dst = gen_dest_fpr_D(dc, a->rd);
+ src1 = gen_load_fpr_D(dc, a->rs1);
+ src2 = gen_load_fpr_D(dc, a->rs2);
+ func(dst, tcg_env, src1, src2);
+ gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
+ gen_store_fpr_D(dc, a->rd, dst);
+ return advance_pc(dc);
+}
+
+TRANS(FADDd, ALL, do_env_ddd, a, gen_helper_faddd)
+TRANS(FSUBd, ALL, do_env_ddd, a, gen_helper_fsubd)
+TRANS(FMULd, ALL, do_env_ddd, a, gen_helper_fmuld)
+TRANS(FDIVd, ALL, do_env_ddd, a, gen_helper_fdivd)
+
+static bool trans_FsMULd(DisasContext *dc, arg_r_r_r *a)
+{
+ TCGv_i64 dst;
+ TCGv_i32 src1, src2;
+
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+ if (!(dc->def->features & CPU_FEATURE_FSMULD)) {
+ return raise_unimpfpop(dc);
+ }
+
+ gen_op_clear_ieee_excp_and_FTT();
+ dst = gen_dest_fpr_D(dc, a->rd);
+ src1 = gen_load_fpr_F(dc, a->rs1);
+ src2 = gen_load_fpr_F(dc, a->rs2);
+ gen_helper_fsmuld(dst, tcg_env, src1, src2);
+ gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
+ gen_store_fpr_D(dc, a->rd, dst);
+ return advance_pc(dc);
+}
+
+static bool do_dddd(DisasContext *dc, arg_r_r_r *a,
+ void (*func)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
+{
+ TCGv_i64 dst, src0, src1, src2;
+
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+
+ dst = gen_dest_fpr_D(dc, a->rd);
+ src0 = gen_load_fpr_D(dc, a->rd);
+ src1 = gen_load_fpr_D(dc, a->rs1);
+ src2 = gen_load_fpr_D(dc, a->rs2);
+ func(dst, src0, src1, src2);
+ gen_store_fpr_D(dc, a->rd, dst);
+ return advance_pc(dc);
+}
+
+TRANS(PDIST, VIS1, do_dddd, a, gen_helper_pdist)
+
+static bool do_env_qqq(DisasContext *dc, arg_r_r_r *a,
+ void (*func)(TCGv_env))
+{
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+ if (gen_trap_float128(dc)) {
+ return true;
+ }
+
+ gen_op_clear_ieee_excp_and_FTT();
+ gen_op_load_fpr_QT0(QFPREG(a->rs1));
+ gen_op_load_fpr_QT1(QFPREG(a->rs2));
+ func(tcg_env);
+ gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
+ gen_op_store_QT0_fpr(QFPREG(a->rd));
+ gen_update_fprs_dirty(dc, QFPREG(a->rd));
+ return advance_pc(dc);
+}
+
+TRANS(FADDq, ALL, do_env_qqq, a, gen_helper_faddq)
+TRANS(FSUBq, ALL, do_env_qqq, a, gen_helper_fsubq)
+TRANS(FMULq, ALL, do_env_qqq, a, gen_helper_fmulq)
+TRANS(FDIVq, ALL, do_env_qqq, a, gen_helper_fdivq)
+
+static bool trans_FdMULq(DisasContext *dc, arg_r_r_r *a)
+{
+ TCGv_i64 src1, src2;
+
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+ if (gen_trap_float128(dc)) {
+ return true;
+ }
+
+ gen_op_clear_ieee_excp_and_FTT();
+ src1 = gen_load_fpr_D(dc, a->rs1);
+ src2 = gen_load_fpr_D(dc, a->rs2);
+ gen_helper_fdmulq(tcg_env, src1, src2);
+ gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
+ gen_op_store_QT0_fpr(QFPREG(a->rd));
+ gen_update_fprs_dirty(dc, QFPREG(a->rd));
+ return advance_pc(dc);
+}
+
+static bool do_fmovr(DisasContext *dc, arg_FMOVRs *a, bool is_128,
+ void (*func)(DisasContext *, DisasCompare *, int, int))
+{
+ DisasCompare cmp;
+
+ if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
+ return false;
+ }
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+ if (is_128 && gen_trap_float128(dc)) {
+ return true;
+ }
+
+ gen_op_clear_ieee_excp_and_FTT();
+ func(dc, &cmp, a->rd, a->rs2);
+ return advance_pc(dc);
+}
+
+TRANS(FMOVRs, 64, do_fmovr, a, false, gen_fmovs)
+TRANS(FMOVRd, 64, do_fmovr, a, false, gen_fmovd)
+TRANS(FMOVRq, 64, do_fmovr, a, true, gen_fmovq)
+
+static bool do_fmovcc(DisasContext *dc, arg_FMOVscc *a, bool is_128,
+ void (*func)(DisasContext *, DisasCompare *, int, int))
+{
+ DisasCompare cmp;
+
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+ if (is_128 && gen_trap_float128(dc)) {
+ return true;
+ }
+
+ gen_op_clear_ieee_excp_and_FTT();
+ gen_compare(&cmp, a->cc, a->cond, dc);
+ func(dc, &cmp, a->rd, a->rs2);
+ return advance_pc(dc);
+}
+
+TRANS(FMOVscc, 64, do_fmovcc, a, false, gen_fmovs)
+TRANS(FMOVdcc, 64, do_fmovcc, a, false, gen_fmovd)
+TRANS(FMOVqcc, 64, do_fmovcc, a, true, gen_fmovq)
+
+static bool do_fmovfcc(DisasContext *dc, arg_FMOVsfcc *a, bool is_128,
+ void (*func)(DisasContext *, DisasCompare *, int, int))
+{
+ DisasCompare cmp;
+
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+ if (is_128 && gen_trap_float128(dc)) {
+ return true;
+ }
+
+ gen_op_clear_ieee_excp_and_FTT();
+ gen_fcompare(&cmp, a->cc, a->cond);
+ func(dc, &cmp, a->rd, a->rs2);
+ return advance_pc(dc);
+}
+
+TRANS(FMOVsfcc, 64, do_fmovfcc, a, false, gen_fmovs)
+TRANS(FMOVdfcc, 64, do_fmovfcc, a, false, gen_fmovd)
+TRANS(FMOVqfcc, 64, do_fmovfcc, a, true, gen_fmovq)
+
+static bool do_fcmps(DisasContext *dc, arg_FCMPs *a, bool e)
+{
+ TCGv_i32 src1, src2;
+
+ if (avail_32(dc) && a->cc != 0) {
+ return false;
+ }
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+
+ gen_op_clear_ieee_excp_and_FTT();
+ src1 = gen_load_fpr_F(dc, a->rs1);
+ src2 = gen_load_fpr_F(dc, a->rs2);
+ if (e) {
+ gen_op_fcmpes(a->cc, src1, src2);
+ } else {
+ gen_op_fcmps(a->cc, src1, src2);
+ }
+ return advance_pc(dc);
+}
+
+TRANS(FCMPs, ALL, do_fcmps, a, false)
+TRANS(FCMPEs, ALL, do_fcmps, a, true)
+
+static bool do_fcmpd(DisasContext *dc, arg_FCMPd *a, bool e)
+{
+ TCGv_i64 src1, src2;
+
+ if (avail_32(dc) && a->cc != 0) {
+ return false;
+ }
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+
+ gen_op_clear_ieee_excp_and_FTT();
+ src1 = gen_load_fpr_D(dc, a->rs1);
+ src2 = gen_load_fpr_D(dc, a->rs2);
+ if (e) {
+ gen_op_fcmped(a->cc, src1, src2);
+ } else {
+ gen_op_fcmpd(a->cc, src1, src2);
+ }
+ return advance_pc(dc);
+}
+
+TRANS(FCMPd, ALL, do_fcmpd, a, false)
+TRANS(FCMPEd, ALL, do_fcmpd, a, true)
+
+static bool do_fcmpq(DisasContext *dc, arg_FCMPq *a, bool e)
+{
+ if (avail_32(dc) && a->cc != 0) {
+ return false;
+ }
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+ if (gen_trap_float128(dc)) {
+ return true;
+ }
+
+ gen_op_clear_ieee_excp_and_FTT();
+ gen_op_load_fpr_QT0(QFPREG(a->rs1));
+ gen_op_load_fpr_QT1(QFPREG(a->rs2));
+ if (e) {
+ gen_op_fcmpeq(a->cc);
+ } else {
+ gen_op_fcmpq(a->cc);
+ }
+ return advance_pc(dc);
}
+TRANS(FCMPq, ALL, do_fcmpq, a, false)
+TRANS(FCMPEq, ALL, do_fcmpq, a, true)
+
static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
dc->pc = dc->base.pc_first;
dc->npc = (target_ulong)dc->base.tb->cs_base;
- dc->cc_op = CC_OP_DYNAMIC;
dc->mem_idx = dc->base.tb->flags & TB_FLAG_MMU_MASK;
dc->def = &env->def;
dc->fpu_enabled = tb_fpu_enabled(dc->base.tb->flags);
dc->base.pc_next += 4;
if (!decode(dc, insn)) {
- disas_sparc_legacy(dc, insn);
+ gen_exception(dc, TT_ILL_INSN);
}
if (dc->base.is_jmp == DISAS_NORETURN) {
DisasDelayException *e, *e_next;
bool may_lookup;
+ finishing_insn(dc);
+
switch (dc->base.is_jmp) {
case DISAS_NEXT:
case DISAS_TOO_MANY:
};
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
- target_ulong pc, void *host_pc)
+ vaddr pc, void *host_pc)
{
DisasContext dc = {};
"f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
};
- static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = {
-#ifdef TARGET_SPARC64
- { &cpu_xcc, offsetof(CPUSPARCState, xcc), "xcc" },
- { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" },
-#endif
- { &cpu_cc_op, offsetof(CPUSPARCState, cc_op), "cc_op" },
- { &cpu_psr, offsetof(CPUSPARCState, psr), "psr" },
- };
-
static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
#ifdef TARGET_SPARC64
{ &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
+ { &cpu_xcc_Z, offsetof(CPUSPARCState, xcc_Z), "xcc_Z" },
+ { &cpu_xcc_C, offsetof(CPUSPARCState, xcc_C), "xcc_C" },
#endif
+ { &cpu_cc_N, offsetof(CPUSPARCState, cc_N), "cc_N" },
+ { &cpu_cc_V, offsetof(CPUSPARCState, cc_V), "cc_V" },
+ { &cpu_icc_Z, offsetof(CPUSPARCState, icc_Z), "icc_Z" },
+ { &cpu_icc_C, offsetof(CPUSPARCState, icc_C), "icc_C" },
{ &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
- { &cpu_cc_src, offsetof(CPUSPARCState, cc_src), "cc_src" },
- { &cpu_cc_src2, offsetof(CPUSPARCState, cc_src2), "cc_src2" },
- { &cpu_cc_dst, offsetof(CPUSPARCState, cc_dst), "cc_dst" },
{ &cpu_fsr, offsetof(CPUSPARCState, fsr), "fsr" },
{ &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
{ &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
offsetof(CPUSPARCState, regwptr),
"regwptr");
- for (i = 0; i < ARRAY_SIZE(r32); ++i) {
- *r32[i].ptr = tcg_global_mem_new_i32(tcg_env, r32[i].off, r32[i].name);
- }
-
for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
*rtl[i].ptr = tcg_global_mem_new(tcg_env, rtl[i].off, rtl[i].name);
}
offsetof(CPUSPARCState, fpr[i]),
fregnames[i]);
}
+
+#ifdef TARGET_SPARC64
+ cpu_fprs = tcg_global_mem_new_i32(tcg_env,
+ offsetof(CPUSPARCState, fprs), "fprs");
+#endif
}
void sparc_restore_state_to_opc(CPUState *cs,