/* Routine used to access memory */
bool pr, hv, dr, le_mode;
bool lazy_tlb_flush;
+ bool need_access_type;
int mem_idx;
int access_type;
/* Translation flags */
static inline void gen_set_access_type(DisasContext *ctx, int access_type)
{
- if (ctx->access_type != access_type) {
+ if (ctx->need_access_type && ctx->access_type != access_type) {
tcg_gen_movi_i32(cpu_access_type, access_type);
ctx->access_type = access_type;
}
EXTRACT_HELPER(SIMM5, 16, 5);
/* 5 bits signed immediate value */
EXTRACT_HELPER(UIMM5, 16, 5);
+/* 4 bits unsigned immediate value */
+EXTRACT_HELPER(UIMM4, 16, 4);
/* Bit count */
EXTRACT_HELPER(NB, 11, 5);
/* Shift count */
/* addpcis */
EXTRACT_HELPER_DXFORM(DX, 10, 6, 6, 5, 16, 1, 1, 0, 0)
+#if defined(TARGET_PPC64)
+/* darn */
+EXTRACT_HELPER(L, 16, 2);
+#endif
/*** Jump target decoding ***/
/* Immediate address */
EXTRACT_HELPER(UIM, 16, 2);
EXTRACT_HELPER(SHW, 8, 2);
EXTRACT_HELPER(SP, 19, 2);
+EXTRACT_HELPER(IMM8, 11, 8);
+
/*****************************************************************************/
/* PowerPC instructions table */
static inline void gen_op_arith_divw(DisasContext *ctx, TCGv ret, TCGv arg1,
TCGv arg2, int sign, int compute_ov)
{
- TCGLabel *l1 = gen_new_label();
- TCGLabel *l2 = gen_new_label();
- TCGv_i32 t0 = tcg_temp_local_new_i32();
- TCGv_i32 t1 = tcg_temp_local_new_i32();
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGv_i32 t1 = tcg_temp_new_i32();
+ TCGv_i32 t2 = tcg_temp_new_i32();
+ TCGv_i32 t3 = tcg_temp_new_i32();
tcg_gen_trunc_tl_i32(t0, arg1);
tcg_gen_trunc_tl_i32(t1, arg2);
- tcg_gen_brcondi_i32(TCG_COND_EQ, t1, 0, l1);
if (sign) {
- TCGLabel *l3 = gen_new_label();
- tcg_gen_brcondi_i32(TCG_COND_NE, t1, -1, l3);
- tcg_gen_brcondi_i32(TCG_COND_EQ, t0, INT32_MIN, l1);
- gen_set_label(l3);
- tcg_gen_div_i32(t0, t0, t1);
- } else {
- tcg_gen_divu_i32(t0, t0, t1);
- }
- if (compute_ov) {
- tcg_gen_movi_tl(cpu_ov, 0);
- }
- tcg_gen_br(l2);
- gen_set_label(l1);
- if (sign) {
- tcg_gen_sari_i32(t0, t0, 31);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, t2, t0, INT_MIN);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, -1);
+ tcg_gen_and_i32(t2, t2, t3);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, 0);
+ tcg_gen_or_i32(t2, t2, t3);
+ tcg_gen_movi_i32(t3, 0);
+ tcg_gen_movcond_i32(TCG_COND_NE, t1, t2, t3, t2, t1);
+ tcg_gen_div_i32(t3, t0, t1);
+ tcg_gen_extu_i32_tl(ret, t3);
} else {
- tcg_gen_movi_i32(t0, 0);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, t2, t1, 0);
+ tcg_gen_movi_i32(t3, 0);
+ tcg_gen_movcond_i32(TCG_COND_NE, t1, t2, t3, t2, t1);
+ tcg_gen_divu_i32(t3, t0, t1);
+ tcg_gen_extu_i32_tl(ret, t3);
}
if (compute_ov) {
- tcg_gen_movi_tl(cpu_ov, 1);
- tcg_gen_movi_tl(cpu_so, 1);
+ tcg_gen_extu_i32_tl(cpu_ov, t2);
+ tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
}
- gen_set_label(l2);
- tcg_gen_extu_i32_tl(ret, t0);
tcg_temp_free_i32(t0);
tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t3);
+
if (unlikely(Rc(ctx->opcode) != 0))
gen_set_Rc0(ctx, ret);
}
static inline void gen_op_arith_divd(DisasContext *ctx, TCGv ret, TCGv arg1,
TCGv arg2, int sign, int compute_ov)
{
- TCGLabel *l1 = gen_new_label();
- TCGLabel *l2 = gen_new_label();
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ TCGv_i64 t3 = tcg_temp_new_i64();
- tcg_gen_brcondi_i64(TCG_COND_EQ, arg2, 0, l1);
- if (sign) {
- TCGLabel *l3 = gen_new_label();
- tcg_gen_brcondi_i64(TCG_COND_NE, arg2, -1, l3);
- tcg_gen_brcondi_i64(TCG_COND_EQ, arg1, INT64_MIN, l1);
- gen_set_label(l3);
- tcg_gen_div_i64(ret, arg1, arg2);
- } else {
- tcg_gen_divu_i64(ret, arg1, arg2);
- }
- if (compute_ov) {
- tcg_gen_movi_tl(cpu_ov, 0);
- }
- tcg_gen_br(l2);
- gen_set_label(l1);
+ tcg_gen_mov_i64(t0, arg1);
+ tcg_gen_mov_i64(t1, arg2);
if (sign) {
- tcg_gen_sari_i64(ret, arg1, 63);
+ tcg_gen_setcondi_i64(TCG_COND_EQ, t2, t0, INT64_MIN);
+ tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, -1);
+ tcg_gen_and_i64(t2, t2, t3);
+ tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, 0);
+ tcg_gen_or_i64(t2, t2, t3);
+ tcg_gen_movi_i64(t3, 0);
+ tcg_gen_movcond_i64(TCG_COND_NE, t1, t2, t3, t2, t1);
+ tcg_gen_div_i64(ret, t0, t1);
} else {
- tcg_gen_movi_i64(ret, 0);
+ tcg_gen_setcondi_i64(TCG_COND_EQ, t2, t1, 0);
+ tcg_gen_movi_i64(t3, 0);
+ tcg_gen_movcond_i64(TCG_COND_NE, t1, t2, t3, t2, t1);
+ tcg_gen_divu_i64(ret, t0, t1);
}
if (compute_ov) {
- tcg_gen_movi_tl(cpu_ov, 1);
- tcg_gen_movi_tl(cpu_so, 1);
+ tcg_gen_mov_tl(cpu_ov, t2);
+ tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
}
- gen_set_label(l2);
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+ tcg_temp_free_i64(t3);
+
if (unlikely(Rc(ctx->opcode) != 0))
gen_set_Rc0(ctx, ret);
}
+
#define GEN_INT_ARITH_DIVD(name, opc3, sign, compute_ov) \
static void glue(gen_, name)(DisasContext *ctx) \
{ \
gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
}
}
+
+/* darn */
+static void gen_darn(DisasContext *ctx)
+{
+ int l = L(ctx->opcode);
+
+ if (l == 0) {
+ gen_helper_darn32(cpu_gpr[rD(ctx->opcode)]);
+ } else if (l <= 2) {
+ /* Return 64-bit random for both CRN and RRN */
+ gen_helper_darn64(cpu_gpr[rD(ctx->opcode)]);
+ } else {
+ tcg_gen_movi_i64(cpu_gpr[rD(ctx->opcode)], -1);
+ }
+}
#endif
/*** Integer rotate ***/
gen_sradi(ctx, 1);
}
+/* extswsli & extswsli. */
+static inline void gen_extswsli(DisasContext *ctx, int n)
+{
+ int sh = SH(ctx->opcode) + (n << 5);
+ TCGv dst = cpu_gpr[rA(ctx->opcode)];
+ TCGv src = cpu_gpr[rS(ctx->opcode)];
+
+ tcg_gen_ext32s_tl(dst, src);
+ tcg_gen_shli_tl(dst, dst, sh);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, dst);
+ }
+}
+
+static void gen_extswsli0(DisasContext *ctx)
+{
+ gen_extswsli(ctx, 0);
+}
+
+static void gen_extswsli1(DisasContext *ctx)
+{
+ gen_extswsli(ctx, 1);
+}
+
/* srd & srd. */
static void gen_srd(DisasContext *ctx)
{
TCGLabel *l1 = gen_new_label();
TCGv t0 = tcg_temp_new();
TCGv_i32 t1, t2;
- /* NIP cannot be restored if the memory exception comes from an helper */
- gen_update_nip(ctx, ctx->nip - 4);
tcg_gen_andi_tl(t0, EA, mask);
tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1);
t1 = tcg_const_i32(POWERPC_EXCP_ALIGN);
- t2 = tcg_const_i32(0);
+ t2 = tcg_const_i32(ctx->opcode & 0x03FF0000);
+ gen_update_nip(ctx, ctx->nip - 4);
gen_helper_raise_exception_err(cpu_env, t1, t2);
tcg_temp_free_i32(t1);
tcg_temp_free_i32(t2);
tcg_temp_free(t0);
}
-/*** Integer load ***/
-static inline void gen_qemu_ld8u(DisasContext *ctx, TCGv arg1, TCGv arg2)
+static inline void gen_align_no_le(DisasContext *ctx)
{
- tcg_gen_qemu_ld8u(arg1, arg2, ctx->mem_idx);
+ gen_exception_err(ctx, POWERPC_EXCP_ALIGN,
+ (ctx->opcode & 0x03FF0000) | POWERPC_EXCP_ALIGN_LE);
}
-static inline void gen_qemu_ld16u(DisasContext *ctx, TCGv arg1, TCGv arg2)
-{
- TCGMemOp op = MO_UW | ctx->default_tcg_memop_mask;
- tcg_gen_qemu_ld_tl(arg1, arg2, ctx->mem_idx, op);
-}
+/*** Integer load ***/
+#define DEF_MEMOP(op) ((op) | ctx->default_tcg_memop_mask)
+#define BSWAP_MEMOP(op) ((op) | (ctx->default_tcg_memop_mask ^ MO_BSWAP))
-static inline void gen_qemu_ld16s(DisasContext *ctx, TCGv arg1, TCGv arg2)
-{
- TCGMemOp op = MO_SW | ctx->default_tcg_memop_mask;
- tcg_gen_qemu_ld_tl(arg1, arg2, ctx->mem_idx, op);
+#define GEN_QEMU_LOAD_TL(ldop, op) \
+static void glue(gen_qemu_, ldop)(DisasContext *ctx, \
+ TCGv val, \
+ TCGv addr) \
+{ \
+ tcg_gen_qemu_ld_tl(val, addr, ctx->mem_idx, op); \
}
-static inline void gen_qemu_ld32u(DisasContext *ctx, TCGv arg1, TCGv arg2)
-{
- TCGMemOp op = MO_UL | ctx->default_tcg_memop_mask;
- tcg_gen_qemu_ld_tl(arg1, arg2, ctx->mem_idx, op);
-}
+GEN_QEMU_LOAD_TL(ld8u, DEF_MEMOP(MO_UB))
+GEN_QEMU_LOAD_TL(ld16u, DEF_MEMOP(MO_UW))
+GEN_QEMU_LOAD_TL(ld16s, DEF_MEMOP(MO_SW))
+GEN_QEMU_LOAD_TL(ld32u, DEF_MEMOP(MO_UL))
+GEN_QEMU_LOAD_TL(ld32s, DEF_MEMOP(MO_SL))
-static void gen_qemu_ld32u_i64(DisasContext *ctx, TCGv_i64 val, TCGv addr)
-{
- TCGv tmp = tcg_temp_new();
- gen_qemu_ld32u(ctx, tmp, addr);
- tcg_gen_extu_tl_i64(val, tmp);
- tcg_temp_free(tmp);
-}
+GEN_QEMU_LOAD_TL(ld16ur, BSWAP_MEMOP(MO_UW))
+GEN_QEMU_LOAD_TL(ld32ur, BSWAP_MEMOP(MO_UL))
-static inline void gen_qemu_ld32s(DisasContext *ctx, TCGv arg1, TCGv arg2)
-{
- TCGMemOp op = MO_SL | ctx->default_tcg_memop_mask;
- tcg_gen_qemu_ld_tl(arg1, arg2, ctx->mem_idx, op);
+#define GEN_QEMU_LOAD_64(ldop, op) \
+static void glue(gen_qemu_, glue(ldop, _i64))(DisasContext *ctx, \
+ TCGv_i64 val, \
+ TCGv addr) \
+{ \
+ tcg_gen_qemu_ld_i64(val, addr, ctx->mem_idx, op); \
}
-static void gen_qemu_ld32s_i64(DisasContext *ctx, TCGv_i64 val, TCGv addr)
-{
- TCGv tmp = tcg_temp_new();
- gen_qemu_ld32s(ctx, tmp, addr);
- tcg_gen_ext_tl_i64(val, tmp);
- tcg_temp_free(tmp);
-}
+GEN_QEMU_LOAD_64(ld8u, DEF_MEMOP(MO_UB))
+GEN_QEMU_LOAD_64(ld16u, DEF_MEMOP(MO_UW))
+GEN_QEMU_LOAD_64(ld32u, DEF_MEMOP(MO_UL))
+GEN_QEMU_LOAD_64(ld32s, DEF_MEMOP(MO_SL))
+GEN_QEMU_LOAD_64(ld64, DEF_MEMOP(MO_Q))
-static inline void gen_qemu_ld64(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
-{
- TCGMemOp op = MO_Q | ctx->default_tcg_memop_mask;
- tcg_gen_qemu_ld_i64(arg1, arg2, ctx->mem_idx, op);
-}
+#if defined(TARGET_PPC64)
+GEN_QEMU_LOAD_64(ld64ur, BSWAP_MEMOP(MO_Q))
+#endif
-static inline void gen_qemu_st8(DisasContext *ctx, TCGv arg1, TCGv arg2)
-{
- tcg_gen_qemu_st8(arg1, arg2, ctx->mem_idx);
+#define GEN_QEMU_STORE_TL(stop, op) \
+static void glue(gen_qemu_, stop)(DisasContext *ctx, \
+ TCGv val, \
+ TCGv addr) \
+{ \
+ tcg_gen_qemu_st_tl(val, addr, ctx->mem_idx, op); \
}
-static inline void gen_qemu_st16(DisasContext *ctx, TCGv arg1, TCGv arg2)
-{
- TCGMemOp op = MO_UW | ctx->default_tcg_memop_mask;
- tcg_gen_qemu_st_tl(arg1, arg2, ctx->mem_idx, op);
-}
+GEN_QEMU_STORE_TL(st8, DEF_MEMOP(MO_UB))
+GEN_QEMU_STORE_TL(st16, DEF_MEMOP(MO_UW))
+GEN_QEMU_STORE_TL(st32, DEF_MEMOP(MO_UL))
-static inline void gen_qemu_st32(DisasContext *ctx, TCGv arg1, TCGv arg2)
-{
- TCGMemOp op = MO_UL | ctx->default_tcg_memop_mask;
- tcg_gen_qemu_st_tl(arg1, arg2, ctx->mem_idx, op);
-}
+GEN_QEMU_STORE_TL(st16r, BSWAP_MEMOP(MO_UW))
+GEN_QEMU_STORE_TL(st32r, BSWAP_MEMOP(MO_UL))
-static void gen_qemu_st32_i64(DisasContext *ctx, TCGv_i64 val, TCGv addr)
-{
- TCGv tmp = tcg_temp_new();
- tcg_gen_trunc_i64_tl(tmp, val);
- gen_qemu_st32(ctx, tmp, addr);
- tcg_temp_free(tmp);
+#define GEN_QEMU_STORE_64(stop, op) \
+static void glue(gen_qemu_, glue(stop, _i64))(DisasContext *ctx, \
+ TCGv_i64 val, \
+ TCGv addr) \
+{ \
+ tcg_gen_qemu_st_i64(val, addr, ctx->mem_idx, op); \
}
-static inline void gen_qemu_st64(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
-{
- TCGMemOp op = MO_Q | ctx->default_tcg_memop_mask;
- tcg_gen_qemu_st_i64(arg1, arg2, ctx->mem_idx, op);
-}
+GEN_QEMU_STORE_64(st8, DEF_MEMOP(MO_UB))
+GEN_QEMU_STORE_64(st16, DEF_MEMOP(MO_UW))
+GEN_QEMU_STORE_64(st32, DEF_MEMOP(MO_UL))
+GEN_QEMU_STORE_64(st64, DEF_MEMOP(MO_Q))
+
+#if defined(TARGET_PPC64)
+GEN_QEMU_STORE_64(st64r, BSWAP_MEMOP(MO_Q))
+#endif
#define GEN_LD(name, ldop, opc, type) \
static void glue(gen_, name)(DisasContext *ctx) \
/* lwax */
GEN_LDX(lwa, ld32s, 0x15, 0x0A, PPC_64B);
/* ldux */
-GEN_LDUX(ld, ld64, 0x15, 0x01, PPC_64B);
+GEN_LDUX(ld, ld64_i64, 0x15, 0x01, PPC_64B);
/* ldx */
-GEN_LDX(ld, ld64, 0x15, 0x00, PPC_64B);
+GEN_LDX(ld, ld64_i64, 0x15, 0x00, PPC_64B);
/* CI load/store variants */
-GEN_LDX_HVRM(ldcix, ld64, 0x15, 0x1b, PPC_CILDST)
+GEN_LDX_HVRM(ldcix, ld64_i64, 0x15, 0x1b, PPC_CILDST)
GEN_LDX_HVRM(lwzcix, ld32u, 0x15, 0x15, PPC_CILDST)
GEN_LDX_HVRM(lhzcix, ld16u, 0x15, 0x19, PPC_CILDST)
GEN_LDX_HVRM(lbzcix, ld8u, 0x15, 0x1a, PPC_CILDST)
gen_qemu_ld32s(ctx, cpu_gpr[rD(ctx->opcode)], EA);
} else {
/* ld - ldu */
- gen_qemu_ld64(ctx, cpu_gpr[rD(ctx->opcode)], EA);
+ gen_qemu_ld64_i64(ctx, cpu_gpr[rD(ctx->opcode)], EA);
}
if (Rc(ctx->opcode))
tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA);
}
if (!le_is_supported && ctx->le_mode) {
- gen_exception_err(ctx, POWERPC_EXCP_ALIGN, POWERPC_EXCP_ALIGN_LE);
+ gen_align_no_le(ctx);
return;
}
-
ra = rA(ctx->opcode);
rd = rD(ctx->opcode);
if (unlikely((rd & 1) || rd == ra)) {
EA = tcg_temp_new();
gen_addr_imm_index(ctx, EA, 0x0F);
- /* We only need to swap high and low halves. gen_qemu_ld64 does necessary
- 64-bit byteswap already. */
+ /* We only need to swap high and low halves. gen_qemu_ld64_i64 does
+ necessary 64-bit byteswap already. */
if (unlikely(ctx->le_mode)) {
- gen_qemu_ld64(ctx, cpu_gpr[rd+1], EA);
+ gen_qemu_ld64_i64(ctx, cpu_gpr[rd + 1], EA);
gen_addr_add(ctx, EA, EA, 8);
- gen_qemu_ld64(ctx, cpu_gpr[rd], EA);
+ gen_qemu_ld64_i64(ctx, cpu_gpr[rd], EA);
} else {
- gen_qemu_ld64(ctx, cpu_gpr[rd], EA);
+ gen_qemu_ld64_i64(ctx, cpu_gpr[rd], EA);
gen_addr_add(ctx, EA, EA, 8);
- gen_qemu_ld64(ctx, cpu_gpr[rd+1], EA);
+ gen_qemu_ld64_i64(ctx, cpu_gpr[rd + 1], EA);
}
tcg_temp_free(EA);
}
/* stw stwu stwux stwx */
GEN_STS(stw, st32, 0x04, PPC_INTEGER);
#if defined(TARGET_PPC64)
-GEN_STUX(std, st64, 0x15, 0x05, PPC_64B);
-GEN_STX(std, st64, 0x15, 0x04, PPC_64B);
-GEN_STX_HVRM(stdcix, st64, 0x15, 0x1f, PPC_CILDST)
+GEN_STUX(std, st64_i64, 0x15, 0x05, PPC_64B);
+GEN_STX(std, st64_i64, 0x15, 0x04, PPC_64B);
+GEN_STX_HVRM(stdcix, st64_i64, 0x15, 0x1f, PPC_CILDST)
GEN_STX_HVRM(stwcix, st32, 0x15, 0x1c, PPC_CILDST)
GEN_STX_HVRM(sthcix, st16, 0x15, 0x1d, PPC_CILDST)
GEN_STX_HVRM(stbcix, st8, 0x15, 0x1e, PPC_CILDST)
}
if (!le_is_supported && ctx->le_mode) {
- gen_exception_err(ctx, POWERPC_EXCP_ALIGN, POWERPC_EXCP_ALIGN_LE);
+ gen_align_no_le(ctx);
return;
}
EA = tcg_temp_new();
gen_addr_imm_index(ctx, EA, 0x03);
- /* We only need to swap high and low halves. gen_qemu_st64 does
+ /* We only need to swap high and low halves. gen_qemu_st64_i64 does
necessary 64-bit byteswap already. */
if (unlikely(ctx->le_mode)) {
- gen_qemu_st64(ctx, cpu_gpr[rs+1], EA);
+ gen_qemu_st64_i64(ctx, cpu_gpr[rs + 1], EA);
gen_addr_add(ctx, EA, EA, 8);
- gen_qemu_st64(ctx, cpu_gpr[rs], EA);
+ gen_qemu_st64_i64(ctx, cpu_gpr[rs], EA);
} else {
- gen_qemu_st64(ctx, cpu_gpr[rs], EA);
+ gen_qemu_st64_i64(ctx, cpu_gpr[rs], EA);
gen_addr_add(ctx, EA, EA, 8);
- gen_qemu_st64(ctx, cpu_gpr[rs+1], EA);
+ gen_qemu_st64_i64(ctx, cpu_gpr[rs + 1], EA);
}
tcg_temp_free(EA);
} else {
gen_set_access_type(ctx, ACCESS_INT);
EA = tcg_temp_new();
gen_addr_imm_index(ctx, EA, 0x03);
- gen_qemu_st64(ctx, cpu_gpr[rs], EA);
+ gen_qemu_st64_i64(ctx, cpu_gpr[rs], EA);
if (Rc(ctx->opcode))
tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA);
tcg_temp_free(EA);
/*** Integer load and store with byte reverse ***/
/* lhbrx */
-static inline void gen_qemu_ld16ur(DisasContext *ctx, TCGv arg1, TCGv arg2)
-{
- TCGMemOp op = MO_UW | (ctx->default_tcg_memop_mask ^ MO_BSWAP);
- tcg_gen_qemu_ld_tl(arg1, arg2, ctx->mem_idx, op);
-}
GEN_LDX(lhbr, ld16ur, 0x16, 0x18, PPC_INTEGER);
/* lwbrx */
-static inline void gen_qemu_ld32ur(DisasContext *ctx, TCGv arg1, TCGv arg2)
-{
- TCGMemOp op = MO_UL | (ctx->default_tcg_memop_mask ^ MO_BSWAP);
- tcg_gen_qemu_ld_tl(arg1, arg2, ctx->mem_idx, op);
-}
GEN_LDX(lwbr, ld32ur, 0x16, 0x10, PPC_INTEGER);
#if defined(TARGET_PPC64)
/* ldbrx */
-static inline void gen_qemu_ld64ur(DisasContext *ctx, TCGv arg1, TCGv arg2)
-{
- TCGMemOp op = MO_Q | (ctx->default_tcg_memop_mask ^ MO_BSWAP);
- tcg_gen_qemu_ld_i64(arg1, arg2, ctx->mem_idx, op);
-}
-GEN_LDX_E(ldbr, ld64ur, 0x14, 0x10, PPC_NONE, PPC2_DBRX, CHK_NONE);
+GEN_LDX_E(ldbr, ld64ur_i64, 0x14, 0x10, PPC_NONE, PPC2_DBRX, CHK_NONE);
+/* stdbrx */
+GEN_STX_E(stdbr, st64r_i64, 0x14, 0x14, PPC_NONE, PPC2_DBRX, CHK_NONE);
#endif /* TARGET_PPC64 */
/* sthbrx */
-static inline void gen_qemu_st16r(DisasContext *ctx, TCGv arg1, TCGv arg2)
-{
- TCGMemOp op = MO_UW | (ctx->default_tcg_memop_mask ^ MO_BSWAP);
- tcg_gen_qemu_st_tl(arg1, arg2, ctx->mem_idx, op);
-}
GEN_STX(sthbr, st16r, 0x16, 0x1C, PPC_INTEGER);
-
/* stwbrx */
-static inline void gen_qemu_st32r(DisasContext *ctx, TCGv arg1, TCGv arg2)
-{
- TCGMemOp op = MO_UL | (ctx->default_tcg_memop_mask ^ MO_BSWAP);
- tcg_gen_qemu_st_tl(arg1, arg2, ctx->mem_idx, op);
-}
GEN_STX(stwbr, st32r, 0x16, 0x14, PPC_INTEGER);
-#if defined(TARGET_PPC64)
-/* stdbrx */
-static inline void gen_qemu_st64r(DisasContext *ctx, TCGv arg1, TCGv arg2)
-{
- TCGMemOp op = MO_Q | (ctx->default_tcg_memop_mask ^ MO_BSWAP);
- tcg_gen_qemu_st_i64(arg1, arg2, ctx->mem_idx, op);
-}
-GEN_STX_E(stdbr, st64r, 0x14, 0x14, PPC_NONE, PPC2_DBRX, CHK_NONE);
-#endif /* TARGET_PPC64 */
-
/*** Integer load and store multiple ***/
/* lmw */
{
TCGv t0;
TCGv_i32 t1;
+
+ if (ctx->le_mode) {
+ gen_align_no_le(ctx);
+ return;
+ }
gen_set_access_type(ctx, ACCESS_INT);
t0 = tcg_temp_new();
t1 = tcg_const_i32(rD(ctx->opcode));
{
TCGv t0;
TCGv_i32 t1;
+
+ if (ctx->le_mode) {
+ gen_align_no_le(ctx);
+ return;
+ }
gen_set_access_type(ctx, ACCESS_INT);
t0 = tcg_temp_new();
t1 = tcg_const_i32(rS(ctx->opcode));
int ra = rA(ctx->opcode);
int nr;
+ if (ctx->le_mode) {
+ gen_align_no_le(ctx);
+ return;
+ }
if (nb == 0)
nb = 32;
nr = (nb + 3) / 4;
{
TCGv t0;
TCGv_i32 t1, t2, t3;
+
+ if (ctx->le_mode) {
+ gen_align_no_le(ctx);
+ return;
+ }
gen_set_access_type(ctx, ACCESS_INT);
t0 = tcg_temp_new();
gen_addr_reg_index(ctx, t0);
TCGv t0;
TCGv_i32 t1, t2;
int nb = NB(ctx->opcode);
+
+ if (ctx->le_mode) {
+ gen_align_no_le(ctx);
+ return;
+ }
gen_set_access_type(ctx, ACCESS_INT);
t0 = tcg_temp_new();
gen_addr_register(ctx, t0);
{
TCGv t0;
TCGv_i32 t1, t2;
+
+ if (ctx->le_mode) {
+ gen_align_no_le(ctx);
+ return;
+ }
gen_set_access_type(ctx, ACCESS_INT);
t0 = tcg_temp_new();
gen_addr_reg_index(ctx, t0);
}
#if !defined(CONFIG_USER_ONLY)
-static inline void gen_check_tlb_flush(DisasContext *ctx)
+static inline void gen_check_tlb_flush(DisasContext *ctx, bool global)
{
TCGv_i32 t;
TCGLabel *l;
t = tcg_temp_new_i32();
tcg_gen_ld_i32(t, cpu_env, offsetof(CPUPPCState, tlb_need_flush));
tcg_gen_brcondi_i32(TCG_COND_EQ, t, 0, l);
- gen_helper_check_tlb_flush(cpu_env);
+ if (global) {
+ gen_helper_check_tlb_flush_global(cpu_env);
+ } else {
+ gen_helper_check_tlb_flush_local(cpu_env);
+ }
gen_set_label(l);
tcg_temp_free_i32(t);
}
#else
-static inline void gen_check_tlb_flush(DisasContext *ctx) { }
+static inline void gen_check_tlb_flush(DisasContext *ctx, bool global) { }
#endif
/* isync */
* kernel mode however so check MSR_PR
*/
if (!ctx->pr) {
- gen_check_tlb_flush(ctx);
+ gen_check_tlb_flush(ctx, false);
}
gen_stop_exception(ctx);
}
-#define LARX(name, len, loadop) \
+#define MEMOP_GET_SIZE(x) (1 << ((x) & MO_SIZE))
+
+#define LARX(name, memop) \
static void gen_##name(DisasContext *ctx) \
{ \
TCGv t0; \
TCGv gpr = cpu_gpr[rD(ctx->opcode)]; \
+ int len = MEMOP_GET_SIZE(memop); \
gen_set_access_type(ctx, ACCESS_RES); \
t0 = tcg_temp_local_new(); \
gen_addr_reg_index(ctx, t0); \
if ((len) > 1) { \
gen_check_align(ctx, t0, (len)-1); \
} \
- gen_qemu_##loadop(ctx, gpr, t0); \
+ tcg_gen_qemu_ld_tl(gpr, t0, ctx->mem_idx, memop); \
tcg_gen_mov_tl(cpu_reserve, t0); \
tcg_gen_st_tl(gpr, cpu_env, offsetof(CPUPPCState, reserve_val)); \
tcg_temp_free(t0); \
}
/* lwarx */
-LARX(lbarx, 1, ld8u);
-LARX(lharx, 2, ld16u);
-LARX(lwarx, 4, ld32u);
-
+LARX(lbarx, DEF_MEMOP(MO_UB))
+LARX(lharx, DEF_MEMOP(MO_UW))
+LARX(lwarx, DEF_MEMOP(MO_UL))
#if defined(CONFIG_USER_ONLY)
static void gen_conditional_store(DisasContext *ctx, TCGv EA,
- int reg, int size)
+ int reg, int memop)
{
TCGv t0 = tcg_temp_new();
tcg_gen_st_tl(EA, cpu_env, offsetof(CPUPPCState, reserve_ea));
- tcg_gen_movi_tl(t0, (size << 5) | reg);
+ tcg_gen_movi_tl(t0, (MEMOP_GET_SIZE(memop) << 5) | reg);
tcg_gen_st_tl(t0, cpu_env, offsetof(CPUPPCState, reserve_info));
tcg_temp_free(t0);
gen_exception_err(ctx, POWERPC_EXCP_STCX, 0);
}
#else
static void gen_conditional_store(DisasContext *ctx, TCGv EA,
- int reg, int size)
+ int reg, int memop)
{
TCGLabel *l1;
l1 = gen_new_label();
tcg_gen_brcond_tl(TCG_COND_NE, EA, cpu_reserve, l1);
tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 1 << CRF_EQ);
-#if defined(TARGET_PPC64)
- if (size == 8) {
- gen_qemu_st64(ctx, cpu_gpr[reg], EA);
- } else
-#endif
- if (size == 4) {
- gen_qemu_st32(ctx, cpu_gpr[reg], EA);
- } else if (size == 2) {
- gen_qemu_st16(ctx, cpu_gpr[reg], EA);
-#if defined(TARGET_PPC64)
- } else if (size == 16) {
- TCGv gpr1, gpr2 , EA8;
- if (unlikely(ctx->le_mode)) {
- gpr1 = cpu_gpr[reg+1];
- gpr2 = cpu_gpr[reg];
- } else {
- gpr1 = cpu_gpr[reg];
- gpr2 = cpu_gpr[reg+1];
- }
- gen_qemu_st64(ctx, gpr1, EA);
- EA8 = tcg_temp_local_new();
- gen_addr_add(ctx, EA8, EA, 8);
- gen_qemu_st64(ctx, gpr2, EA8);
- tcg_temp_free(EA8);
-#endif
- } else {
- gen_qemu_st8(ctx, cpu_gpr[reg], EA);
- }
+ tcg_gen_qemu_st_tl(cpu_gpr[reg], EA, ctx->mem_idx, memop);
gen_set_label(l1);
tcg_gen_movi_tl(cpu_reserve, -1);
}
#endif
-#define STCX(name, len) \
-static void gen_##name(DisasContext *ctx) \
-{ \
- TCGv t0; \
- if (unlikely((len == 16) && (rD(ctx->opcode) & 1))) { \
- gen_inval_exception(ctx, \
- POWERPC_EXCP_INVAL_INVAL); \
- return; \
- } \
- gen_set_access_type(ctx, ACCESS_RES); \
- t0 = tcg_temp_local_new(); \
- gen_addr_reg_index(ctx, t0); \
- if (len > 1) { \
- gen_check_align(ctx, t0, (len)-1); \
- } \
- gen_conditional_store(ctx, t0, rS(ctx->opcode), len); \
- tcg_temp_free(t0); \
-}
-
-STCX(stbcx_, 1);
-STCX(sthcx_, 2);
-STCX(stwcx_, 4);
+#define STCX(name, memop) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv t0; \
+ int len = MEMOP_GET_SIZE(memop); \
+ gen_set_access_type(ctx, ACCESS_RES); \
+ t0 = tcg_temp_local_new(); \
+ gen_addr_reg_index(ctx, t0); \
+ if (len > 1) { \
+ gen_check_align(ctx, t0, (len) - 1); \
+ } \
+ gen_conditional_store(ctx, t0, rS(ctx->opcode), memop); \
+ tcg_temp_free(t0); \
+}
+
+STCX(stbcx_, DEF_MEMOP(MO_UB))
+STCX(sthcx_, DEF_MEMOP(MO_UW))
+STCX(stwcx_, DEF_MEMOP(MO_UL))
#if defined(TARGET_PPC64)
/* ldarx */
-LARX(ldarx, 8, ld64);
+LARX(ldarx, DEF_MEMOP(MO_Q))
+/* stdcx. */
+STCX(stdcx_, DEF_MEMOP(MO_Q))
/* lqarx */
static void gen_lqarx(DisasContext *ctx)
gpr1 = cpu_gpr[rd];
gpr2 = cpu_gpr[rd+1];
}
- gen_qemu_ld64(ctx, gpr1, EA);
+ tcg_gen_qemu_ld_i64(gpr1, EA, ctx->mem_idx, DEF_MEMOP(MO_Q));
tcg_gen_mov_tl(cpu_reserve, EA);
-
gen_addr_add(ctx, EA, EA, 8);
- gen_qemu_ld64(ctx, gpr2, EA);
+ tcg_gen_qemu_ld_i64(gpr2, EA, ctx->mem_idx, DEF_MEMOP(MO_Q));
tcg_gen_st_tl(gpr1, cpu_env, offsetof(CPUPPCState, reserve_val));
tcg_gen_st_tl(gpr2, cpu_env, offsetof(CPUPPCState, reserve_val2));
+ tcg_temp_free(EA);
+}
+/* stqcx. */
+static void gen_stqcx_(DisasContext *ctx)
+{
+ TCGv EA;
+ int reg = rS(ctx->opcode);
+ int len = 16;
+#if !defined(CONFIG_USER_ONLY)
+ TCGLabel *l1;
+ TCGv gpr1, gpr2;
+#endif
+
+ if (unlikely((rD(ctx->opcode) & 1))) {
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_RES);
+ EA = tcg_temp_local_new();
+ gen_addr_reg_index(ctx, EA);
+ if (len > 1) {
+ gen_check_align(ctx, EA, (len) - 1);
+ }
+
+#if defined(CONFIG_USER_ONLY)
+ gen_conditional_store(ctx, EA, reg, 16);
+#else
+ tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
+ l1 = gen_new_label();
+ tcg_gen_brcond_tl(TCG_COND_NE, EA, cpu_reserve, l1);
+ tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 1 << CRF_EQ);
+
+ if (unlikely(ctx->le_mode)) {
+ gpr1 = cpu_gpr[reg + 1];
+ gpr2 = cpu_gpr[reg];
+ } else {
+ gpr1 = cpu_gpr[reg];
+ gpr2 = cpu_gpr[reg + 1];
+ }
+ tcg_gen_qemu_st_tl(gpr1, EA, ctx->mem_idx, DEF_MEMOP(MO_Q));
+ gen_addr_add(ctx, EA, EA, 8);
+ tcg_gen_qemu_st_tl(gpr2, EA, ctx->mem_idx, DEF_MEMOP(MO_Q));
+
+ gen_set_label(l1);
+ tcg_gen_movi_tl(cpu_reserve, -1);
+#endif
tcg_temp_free(EA);
}
-/* stdcx. */
-STCX(stdcx_, 8);
-STCX(stqcx_, 16);
#endif /* defined(TARGET_PPC64) */
/* sync */
* check MSR_PR as well.
*/
if (((l == 2) || !(ctx->insns_flags & PPC_64B)) && !ctx->pr) {
- gen_check_tlb_flush(ctx);
+ gen_check_tlb_flush(ctx, true);
}
}
if (LK(ctx->opcode)) {
gen_setlr(ctx, ctx->nip);
}
- gen_update_cfar(ctx, ctx->nip);
+ gen_update_cfar(ctx, ctx->nip - 4);
gen_goto_tb(ctx, 0, target);
}
}
tcg_temp_free_i32(temp);
}
- gen_update_cfar(ctx, ctx->nip);
+ gen_update_cfar(ctx, ctx->nip - 4);
if (type == BCOND_IM) {
target_ulong li = (target_long)((int16_t)(BD(ctx->opcode)));
if (likely(AA(ctx->opcode) == 0)) {
} else {
gen_goto_tb(ctx, 0, li);
}
- gen_set_label(l1);
- gen_goto_tb(ctx, 1, ctx->nip);
+ if ((bo & 0x14) != 0x14) {
+ gen_set_label(l1);
+ gen_goto_tb(ctx, 1, ctx->nip);
+ }
} else {
if (NARROW_MODE(ctx)) {
tcg_gen_andi_tl(cpu_nip, target, (uint32_t)~3);
tcg_gen_andi_tl(cpu_nip, target, ~3);
}
tcg_gen_exit_tb(0);
- gen_set_label(l1);
- gen_update_nip(ctx, ctx->nip);
- tcg_gen_exit_tb(0);
+ if ((bo & 0x14) != 0x14) {
+ gen_set_label(l1);
+ gen_update_nip(ctx, ctx->nip);
+ tcg_gen_exit_tb(0);
+ }
}
if (type == BCOND_LR || type == BCOND_CTR || type == BCOND_TAR) {
tcg_temp_free(target);
#if defined(CONFIG_USER_ONLY)
GEN_PRIV;
#else
- /* FIXME: This instruction doesn't exist anymore on 64-bit server
- * processors compliant with arch 2.x, we should remove it there,
- * but we need to fix OpenBIOS not to use it on 970 first
+ /* This instruction doesn't exist anymore on 64-bit server
+ * processors compliant with arch 2.x
*/
+ if (ctx->insns_flags & PPC_SEGMENT_64B) {
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+ return;
+ }
/* Restore CPU state */
CHK_SV;
- gen_update_cfar(ctx, ctx->nip);
+ gen_update_cfar(ctx, ctx->nip - 4);
gen_helper_rfi(cpu_env);
gen_sync_exception(ctx);
#endif
#else
/* Restore CPU state */
CHK_SV;
- gen_update_cfar(ctx, ctx->nip);
+ gen_update_cfar(ctx, ctx->nip - 4);
gen_helper_rfid(cpu_env);
gen_sync_exception(ctx);
#endif
/*** Trap ***/
+/* Check for unconditional traps (always or never) */
+static bool check_unconditional_trap(DisasContext *ctx)
+{
+ /* Trap never */
+ if (TO(ctx->opcode) == 0) {
+ return true;
+ }
+ /* Trap always */
+ if (TO(ctx->opcode) == 31) {
+ gen_exception_err(ctx, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
+ return true;
+ }
+ return false;
+}
+
/* tw */
static void gen_tw(DisasContext *ctx)
{
- TCGv_i32 t0 = tcg_const_i32(TO(ctx->opcode));
- /* Update the nip since this might generate a trap exception */
- gen_update_nip(ctx, ctx->nip - 4);
+ TCGv_i32 t0;
+
+ if (check_unconditional_trap(ctx)) {
+ return;
+ }
+ t0 = tcg_const_i32(TO(ctx->opcode));
gen_helper_tw(cpu_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
t0);
tcg_temp_free_i32(t0);
/* twi */
static void gen_twi(DisasContext *ctx)
{
- TCGv t0 = tcg_const_tl(SIMM(ctx->opcode));
- TCGv_i32 t1 = tcg_const_i32(TO(ctx->opcode));
- /* Update the nip since this might generate a trap exception */
- gen_update_nip(ctx, ctx->nip - 4);
+ TCGv t0;
+ TCGv_i32 t1;
+
+ if (check_unconditional_trap(ctx)) {
+ return;
+ }
+ t0 = tcg_const_tl(SIMM(ctx->opcode));
+ t1 = tcg_const_i32(TO(ctx->opcode));
gen_helper_tw(cpu_env, cpu_gpr[rA(ctx->opcode)], t0, t1);
tcg_temp_free(t0);
tcg_temp_free_i32(t1);
/* td */
static void gen_td(DisasContext *ctx)
{
- TCGv_i32 t0 = tcg_const_i32(TO(ctx->opcode));
- /* Update the nip since this might generate a trap exception */
- gen_update_nip(ctx, ctx->nip - 4);
+ TCGv_i32 t0;
+
+ if (check_unconditional_trap(ctx)) {
+ return;
+ }
+ t0 = tcg_const_i32(TO(ctx->opcode));
gen_helper_td(cpu_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
t0);
tcg_temp_free_i32(t0);
/* tdi */
static void gen_tdi(DisasContext *ctx)
{
- TCGv t0 = tcg_const_tl(SIMM(ctx->opcode));
- TCGv_i32 t1 = tcg_const_i32(TO(ctx->opcode));
- /* Update the nip since this might generate a trap exception */
- gen_update_nip(ctx, ctx->nip - 4);
+ TCGv t0;
+ TCGv_i32 t1;
+
+ if (check_unconditional_trap(ctx)) {
+ return;
+ }
+ t0 = tcg_const_tl(SIMM(ctx->opcode));
+ t1 = tcg_const_i32(TO(ctx->opcode));
gen_helper_td(cpu_env, cpu_gpr[rA(ctx->opcode)], t0, t1);
tcg_temp_free(t0);
tcg_temp_free_i32(t1);
static void gen_dcbz(DisasContext *ctx)
{
TCGv tcgv_addr;
- TCGv_i32 tcgv_is_dcbzl;
- int is_dcbzl = ctx->opcode & 0x00200000 ? 1 : 0;
+ TCGv_i32 tcgv_op;
gen_set_access_type(ctx, ACCESS_CACHE);
- /* NIP cannot be restored if the memory exception comes from an helper */
- gen_update_nip(ctx, ctx->nip - 4);
tcgv_addr = tcg_temp_new();
- tcgv_is_dcbzl = tcg_const_i32(is_dcbzl);
-
+ tcgv_op = tcg_const_i32(ctx->opcode & 0x03FF000);
gen_addr_reg_index(ctx, tcgv_addr);
- gen_helper_dcbz(cpu_env, tcgv_addr, tcgv_is_dcbzl);
-
+ gen_helper_dcbz(cpu_env, tcgv_addr, tcgv_op);
tcg_temp_free(tcgv_addr);
- tcg_temp_free_i32(tcgv_is_dcbzl);
+ tcg_temp_free_i32(tcgv_op);
}
/* dst / dstt */
#if defined(CONFIG_USER_ONLY)
GEN_PRIV;
#else
+ TCGv_i32 t1;
CHK_HV;
if (NARROW_MODE(ctx)) {
} else {
gen_helper_tlbie(cpu_env, cpu_gpr[rB(ctx->opcode)]);
}
+ t1 = tcg_temp_new_i32();
+ tcg_gen_ld_i32(t1, cpu_env, offsetof(CPUPPCState, tlb_need_flush));
+ tcg_gen_ori_i32(t1, t1, TLB_NEED_GLOBAL_FLUSH);
+ tcg_gen_st_i32(t1, cpu_env, offsetof(CPUPPCState, tlb_need_flush));
+ tcg_temp_free_i32(t1);
#endif /* defined(CONFIG_USER_ONLY) */
}
#else
CHK_HV;
- /* tlbsync is a nop for server, ptesync handles delayed tlb flush,
- * embedded however needs to deal with tlbsync. We don't try to be
- * fancy and swallow the overhead of checking for both.
- */
- gen_check_tlb_flush(ctx);
+ /* BookS does both ptesync and tlbsync make tlbsync a nop for server */
+ if (ctx->insns_flags & PPC_BOOKE) {
+ gen_check_tlb_flush(ctx, true);
+ }
#endif /* defined(CONFIG_USER_ONLY) */
}
TCGv_i32 t3 = tcg_const_i32(rB(ctx->opcode));
gen_addr_reg_index(ctx, t0);
- /* NIP cannot be restored if the memory exception comes from an helper */
- gen_update_nip(ctx, ctx->nip - 4);
gen_helper_lscbx(t0, cpu_env, t0, t1, t2, t3);
tcg_temp_free_i32(t1);
tcg_temp_free_i32(t2);
#endif /* defined(CONFIG_USER_ONLY) */
}
-#include "translate/fp-impl.c"
-
-#include "translate/vmx-impl.c"
-
-#include "translate/vsx-impl.c"
-
/* svc is not implemented for now */
/* BookE specific instructions */
GEN_PRIV;
#else
CHK_SV;
- gen_update_nip(ctx, ctx->nip - 4);
gen_helper_booke206_tlbwe(cpu_env);
#endif /* defined(CONFIG_USER_ONLY) */
}
}
#endif /* defined(TARGET_PPC64) */
-#include "translate/dfp-impl.c"
-
-#include "translate/spe-impl.c"
-
static void gen_tbegin(DisasContext *ctx)
{
if (unlikely(!ctx->tm_enabled)) {
GEN_TM_PRIV_NOOP(treclaim);
GEN_TM_PRIV_NOOP(trechkpt);
+#include "translate/fp-impl.inc.c"
+
+#include "translate/vmx-impl.inc.c"
+
+#include "translate/vsx-impl.inc.c"
+
+#include "translate/dfp-impl.inc.c"
+
+#include "translate/spe-impl.inc.c"
+
static opcode_t opcodes[] = {
GEN_HANDLER(invalid, 0x00, 0x00, 0x00, 0xFFFFFFFF, PPC_NONE),
GEN_HANDLER(cmp, 0x1F, 0x00, 0x00, 0x00400000, PPC_INTEGER),
GEN_HANDLER(cmpi, 0x0B, 0xFF, 0xFF, 0x00400000, PPC_INTEGER),
-GEN_HANDLER(cmpl, 0x1F, 0x00, 0x01, 0x00400000, PPC_INTEGER),
+GEN_HANDLER(cmpl, 0x1F, 0x00, 0x01, 0x00400001, PPC_INTEGER),
GEN_HANDLER(cmpli, 0x0A, 0xFF, 0xFF, 0x00400000, PPC_INTEGER),
#if defined(TARGET_PPC64)
GEN_HANDLER_E(cmpeqb, 0x1F, 0x00, 0x07, 0x00600000, PPC_NONE, PPC2_ISA300),
GEN_HANDLER(popcntd, 0x1F, 0x1A, 0x0F, 0x0000F801, PPC_POPCNTWD),
GEN_HANDLER(cntlzd, 0x1F, 0x1A, 0x01, 0x00000000, PPC_64B),
GEN_HANDLER_E(cnttzd, 0x1F, 0x1A, 0x11, 0x00000000, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E(darn, 0x1F, 0x13, 0x17, 0x001CF801, PPC_NONE, PPC2_ISA300),
GEN_HANDLER_E(prtyd, 0x1F, 0x1A, 0x05, 0x0000F801, PPC_NONE, PPC2_ISA205),
GEN_HANDLER_E(bpermd, 0x1F, 0x1C, 0x07, 0x00000001, PPC_NONE, PPC2_PERM_ISA206),
#endif
GEN_HANDLER2(sradi0, "sradi", 0x1F, 0x1A, 0x19, 0x00000000, PPC_64B),
GEN_HANDLER2(sradi1, "sradi", 0x1F, 0x1B, 0x19, 0x00000000, PPC_64B),
GEN_HANDLER(srd, 0x1F, 0x1B, 0x10, 0x00000000, PPC_64B),
+GEN_HANDLER2_E(extswsli0, "extswsli", 0x1F, 0x1A, 0x1B, 0x00000000,
+ PPC_NONE, PPC2_ISA300),
+GEN_HANDLER2_E(extswsli1, "extswsli", 0x1F, 0x1B, 0x1B, 0x00000000,
+ PPC_NONE, PPC2_ISA300),
#endif
#if defined(TARGET_PPC64)
GEN_HANDLER(ld, 0x3A, 0xFF, 0xFF, 0x00000000, PPC_64B),
GEN_HANDLER(bc, 0x10, 0xFF, 0xFF, 0x00000000, PPC_FLOW),
GEN_HANDLER(bcctr, 0x13, 0x10, 0x10, 0x00000000, PPC_FLOW),
GEN_HANDLER(bclr, 0x13, 0x10, 0x00, 0x00000000, PPC_FLOW),
-GEN_HANDLER_E(bctar, 0x13, 0x10, 0x11, 0, PPC_NONE, PPC2_BCTAR_ISA207),
+GEN_HANDLER_E(bctar, 0x13, 0x10, 0x11, 0x0000E000, PPC_NONE, PPC2_BCTAR_ISA207),
GEN_HANDLER(mcrf, 0x13, 0x00, 0xFF, 0x00000001, PPC_INTEGER),
GEN_HANDLER(rfi, 0x13, 0x12, 0x01, 0x03FF8001, PPC_FLOW),
#if defined(TARGET_PPC64)
#if defined(TARGET_PPC64)
GEN_LDUX(lwa, ld32s, 0x15, 0x0B, PPC_64B)
GEN_LDX(lwa, ld32s, 0x15, 0x0A, PPC_64B)
-GEN_LDUX(ld, ld64, 0x15, 0x01, PPC_64B)
-GEN_LDX(ld, ld64, 0x15, 0x00, PPC_64B)
-GEN_LDX_E(ldbr, ld64ur, 0x14, 0x10, PPC_NONE, PPC2_DBRX, CHK_NONE)
+GEN_LDUX(ld, ld64_i64, 0x15, 0x01, PPC_64B)
+GEN_LDX(ld, ld64_i64, 0x15, 0x00, PPC_64B)
+GEN_LDX_E(ldbr, ld64ur_i64, 0x14, 0x10, PPC_NONE, PPC2_DBRX, CHK_NONE)
/* HV/P7 and later only */
-GEN_LDX_HVRM(ldcix, ld64, 0x15, 0x1b, PPC_CILDST)
+GEN_LDX_HVRM(ldcix, ld64_i64, 0x15, 0x1b, PPC_CILDST)
GEN_LDX_HVRM(lwzcix, ld32u, 0x15, 0x18, PPC_CILDST)
GEN_LDX_HVRM(lhzcix, ld16u, 0x15, 0x19, PPC_CILDST)
GEN_LDX_HVRM(lbzcix, ld8u, 0x15, 0x1a, PPC_CILDST)
GEN_STS(sth, st16, 0x0C, PPC_INTEGER)
GEN_STS(stw, st32, 0x04, PPC_INTEGER)
#if defined(TARGET_PPC64)
-GEN_STUX(std, st64, 0x15, 0x05, PPC_64B)
-GEN_STX(std, st64, 0x15, 0x04, PPC_64B)
-GEN_STX_E(stdbr, st64r, 0x14, 0x14, PPC_NONE, PPC2_DBRX, CHK_NONE)
-GEN_STX_HVRM(stdcix, st64, 0x15, 0x1f, PPC_CILDST)
+GEN_STUX(std, st64_i64, 0x15, 0x05, PPC_64B)
+GEN_STX(std, st64_i64, 0x15, 0x04, PPC_64B)
+GEN_STX_E(stdbr, st64r_i64, 0x14, 0x14, PPC_NONE, PPC2_DBRX, CHK_NONE)
+GEN_STX_HVRM(stdcix, st64_i64, 0x15, 0x1f, PPC_CILDST)
GEN_STX_HVRM(stwcix, st32, 0x15, 0x1c, PPC_CILDST)
GEN_STX_HVRM(sthcix, st16, 0x15, 0x1d, PPC_CILDST)
GEN_STX_HVRM(stbcix, st8, 0x15, 0x1e, PPC_CILDST)
GEN_MAC_HANDLER(mullhw, 0x08, 0x0D),
GEN_MAC_HANDLER(mullhwu, 0x08, 0x0C),
-#include "translate/fp-ops.c"
-
-#include "translate/vmx-ops.c"
-
-#include "translate/vsx-ops.c"
-
-#include "translate/dfp-ops.c"
-
-#include "translate/spe-ops.c"
-
GEN_HANDLER2_E(tbegin, "tbegin", 0x1F, 0x0E, 0x14, 0x01DFF800, \
PPC_NONE, PPC2_TM),
GEN_HANDLER2_E(tend, "tend", 0x1F, 0x0E, 0x15, 0x01FFF800, \
PPC_NONE, PPC2_TM),
GEN_HANDLER2_E(trechkpt, "trechkpt", 0x1F, 0x0E, 0x1F, 0x03FFF800, \
PPC_NONE, PPC2_TM),
+
+#include "translate/fp-ops.inc.c"
+
+#include "translate/vmx-ops.inc.c"
+
+#include "translate/vsx-ops.inc.c"
+
+#include "translate/dfp-ops.inc.c"
+
+#include "translate/spe-ops.inc.c"
};
#include "helper_regs.h"
ctx.insns_flags = env->insns_flags;
ctx.insns_flags2 = env->insns_flags2;
ctx.access_type = -1;
+ ctx.need_access_type = !(env->mmu_model & POWERPC_MMU_64B);
ctx.le_mode = !!(env->hflags & (1 << MSR_LE));
ctx.default_tcg_memop_mask = ctx.le_mode ? MO_LE : MO_BE;
#if defined(TARGET_PPC64)