#include "exec/log.h"
#include "asi.h"
-
-#define DEBUG_DISAS
+#define HELPER_H "helper.h"
+#include "exec/helper-info.c.inc"
+#undef HELPER_H
#define DYNAMIC_PC 1 /* dynamic pc value */
#define JUMP_PC 2 /* dynamic pc value which takes only two values
/* Floating point registers */
static TCGv_i64 cpu_fpr[TARGET_DPREGS];
-#include "exec/gen-icount.h"
-
typedef struct DisasContext {
DisasContextBase base;
target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
tcg_gen_movi_tl(cpu_npc, npc);
tcg_gen_exit_tb(s->base.tb, tb_num);
} else {
- /* jump to another page: currently not optimized */
+ /* jump to another page: we can use an indirect jump */
tcg_gen_movi_tl(cpu_pc, pc);
tcg_gen_movi_tl(cpu_npc, npc);
- tcg_gen_exit_tb(NULL, 0);
+ tcg_gen_lookup_and_goto_ptr();
}
}
if (!(env->y & 1))
T1 = 0;
*/
- zero = tcg_const_tl(0);
+ zero = tcg_constant_tl(0);
tcg_gen_andi_tl(cpu_cc_src, src1, 0xffffffff);
tcg_gen_andi_tl(r_temp, cpu_y, 0x1);
tcg_gen_andi_tl(cpu_cc_src2, src2, 0xffffffff);
tcg_gen_mov_tl(cpu_pc, cpu_npc);
tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
- t = tcg_const_tl(pc1);
- z = tcg_const_tl(0);
+ t = tcg_constant_tl(pc1);
+ z = tcg_constant_tl(0);
tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, z, t, cpu_npc);
dc->pc = DYNAMIC_PC;
static inline void gen_generic_branch(DisasContext *dc)
{
- TCGv npc0 = tcg_const_tl(dc->jump_pc[0]);
- TCGv npc1 = tcg_const_tl(dc->jump_pc[1]);
- TCGv zero = tcg_const_tl(0);
+ TCGv npc0 = tcg_constant_tl(dc->jump_pc[0]);
+ TCGv npc1 = tcg_constant_tl(dc->jump_pc[1]);
+ TCGv zero = tcg_constant_tl(0);
tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, zero, npc0, npc1);
}
static void gen_exception(DisasContext *dc, int which)
{
- TCGv_i32 t;
-
save_state(dc);
- t = tcg_const_i32(which);
- gen_helper_raise_exception(cpu_env, t);
+ gen_helper_raise_exception(cpu_env, tcg_constant_i32(which));
dc->base.is_jmp = DISAS_NORETURN;
}
static void gen_check_align(TCGv addr, int mask)
{
- TCGv_i32 r_mask = tcg_const_i32(mask);
- gen_helper_check_align(cpu_env, addr, r_mask);
+ gen_helper_check_align(cpu_env, addr, tcg_constant_i32(mask));
}
static inline void gen_mov_pc_npc(DisasContext *dc)
cmp->cond = logic_cond[cond];
do_compare_dst_0:
cmp->is_bool = false;
- cmp->c2 = tcg_const_tl(0);
+ cmp->c2 = tcg_constant_tl(0);
#ifdef TARGET_SPARC64
if (!xcc) {
cmp->c1 = tcg_temp_new();
cmp->cond = TCG_COND_NE;
cmp->is_bool = true;
cmp->c1 = r_dst = tcg_temp_new();
- cmp->c2 = tcg_const_tl(0);
+ cmp->c2 = tcg_constant_tl(0);
switch (cond) {
case 0x0:
cmp->cond = TCG_COND_NE;
cmp->is_bool = true;
cmp->c1 = r_dst = tcg_temp_new();
- cmp->c2 = tcg_const_tl(0);
+ cmp->c2 = tcg_constant_tl(0);
switch (cc) {
default:
cmp->cond = tcg_invert_cond(gen_tcg_cond_reg[cond]);
cmp->is_bool = false;
cmp->c1 = r_src;
- cmp->c2 = tcg_const_tl(0);
+ cmp->c2 = tcg_constant_tl(0);
}
static inline void gen_cond_reg(TCGv r_dst, int cond, TCGv r_src)
TCGv addr, int mmu_idx, MemOp memop)
{
gen_address_mask(dc, addr);
- tcg_gen_atomic_xchg_tl(dst, addr, src, mmu_idx, memop);
+ tcg_gen_atomic_xchg_tl(dst, addr, src, mmu_idx, memop | MO_ALIGN);
}
static void gen_ldstub(DisasContext *dc, TCGv dst, TCGv addr, int mmu_idx)
{
- TCGv m1 = tcg_const_tl(0xff);
+ TCGv m1 = tcg_constant_tl(0xff);
gen_address_mask(dc, addr);
tcg_gen_atomic_xchg_tl(dst, addr, m1, mmu_idx, MO_UB);
}
break;
case GET_ASI_DIRECT:
gen_address_mask(dc, addr);
- tcg_gen_qemu_ld_tl(dst, addr, da.mem_idx, da.memop);
+ tcg_gen_qemu_ld_tl(dst, addr, da.mem_idx, da.memop | MO_ALIGN);
break;
default:
{
- TCGv_i32 r_asi = tcg_const_i32(da.asi);
- TCGv_i32 r_mop = tcg_const_i32(memop);
+ TCGv_i32 r_asi = tcg_constant_i32(da.asi);
+ TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
save_state(dc);
#ifdef TARGET_SPARC64
/* fall through */
case GET_ASI_DIRECT:
gen_address_mask(dc, addr);
- tcg_gen_qemu_st_tl(src, addr, da.mem_idx, da.memop);
+ tcg_gen_qemu_st_tl(src, addr, da.mem_idx, da.memop | MO_ALIGN);
break;
#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
case GET_ASI_BCOPY:
{
TCGv saddr = tcg_temp_new();
TCGv daddr = tcg_temp_new();
- TCGv four = tcg_const_tl(4);
+ TCGv four = tcg_constant_tl(4);
TCGv_i32 tmp = tcg_temp_new_i32();
int i;
#endif
default:
{
- TCGv_i32 r_asi = tcg_const_i32(da.asi);
- TCGv_i32 r_mop = tcg_const_i32(memop & MO_SIZE);
+ TCGv_i32 r_asi = tcg_constant_i32(da.asi);
+ TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
save_state(dc);
#ifdef TARGET_SPARC64
case GET_ASI_DIRECT:
oldv = tcg_temp_new();
tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd),
- da.mem_idx, da.memop);
+ da.mem_idx, da.memop | MO_ALIGN);
gen_store_gpr(dc, rd, oldv);
break;
default:
if (tb_cflags(dc->base.tb) & CF_PARALLEL) {
gen_helper_exit_atomic(cpu_env);
} else {
- TCGv_i32 r_asi = tcg_const_i32(da.asi);
- TCGv_i32 r_mop = tcg_const_i32(MO_UB);
+ TCGv_i32 r_asi = tcg_constant_i32(da.asi);
+ TCGv_i32 r_mop = tcg_constant_i32(MO_UB);
TCGv_i64 s64, t64;
save_state(dc);
t64 = tcg_temp_new_i64();
gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop);
- s64 = tcg_const_i64(0xff);
+ s64 = tcg_constant_i64(0xff);
gen_helper_st_asi(cpu_env, addr, s64, r_asi, r_mop);
tcg_gen_trunc_i64_tl(dst, t64);
switch (size) {
case 4:
d32 = gen_dest_fpr_F(dc);
- tcg_gen_qemu_ld_i32(d32, addr, da.mem_idx, da.memop);
+ tcg_gen_qemu_ld_i32(d32, addr, da.mem_idx, da.memop | MO_ALIGN);
gen_store_fpr_F(dc, rd, d32);
break;
case 8:
/* The first operation checks required alignment. */
memop = da.memop | MO_ALIGN_64;
- eight = tcg_const_tl(8);
+ eight = tcg_constant_tl(8);
for (i = 0; ; ++i) {
tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + i], addr,
da.mem_idx, memop);
/* Valid for lddfa only. */
if (size == 8) {
gen_address_mask(dc, addr);
- tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx, da.memop);
+ tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
+ da.memop | MO_ALIGN);
} else {
gen_exception(dc, TT_ILL_INSN);
}
default:
{
- TCGv_i32 r_asi = tcg_const_i32(da.asi);
- TCGv_i32 r_mop = tcg_const_i32(da.memop);
+ TCGv_i32 r_asi = tcg_constant_i32(da.asi);
+ TCGv_i32 r_mop = tcg_constant_i32(da.memop | MO_ALIGN);
save_state(dc);
/* According to the table in the UA2011 manual, the only
switch (size) {
case 4:
d32 = gen_load_fpr_F(dc, rd);
- tcg_gen_qemu_st_i32(d32, addr, da.mem_idx, da.memop);
+ tcg_gen_qemu_st_i32(d32, addr, da.mem_idx, da.memop | MO_ALIGN);
break;
case 8:
tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
/* The first operation checks required alignment. */
memop = da.memop | MO_ALIGN_64;
- eight = tcg_const_tl(8);
+ eight = tcg_constant_tl(8);
for (i = 0; ; ++i) {
tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + i], addr,
da.mem_idx, memop);
/* Valid for stdfa only. */
if (size == 8) {
gen_address_mask(dc, addr);
- tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx, da.memop);
+ tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
+ da.memop | MO_ALIGN);
} else {
gen_exception(dc, TT_ILL_INSN);
}
TCGv_i64 tmp = tcg_temp_new_i64();
gen_address_mask(dc, addr);
- tcg_gen_qemu_ld_i64(tmp, addr, da.mem_idx, da.memop);
+ tcg_gen_qemu_ld_i64(tmp, addr, da.mem_idx, da.memop | MO_ALIGN);
/* Note that LE ldda acts as if each 32-bit register
result is byte swapped. Having just performed one
real hardware allows others. This can be seen with e.g.
FreeBSD 10.3 wrt ASI_IC_TAG. */
{
- TCGv_i32 r_asi = tcg_const_i32(da.asi);
- TCGv_i32 r_mop = tcg_const_i32(da.memop);
+ TCGv_i32 r_asi = tcg_constant_i32(da.asi);
+ TCGv_i32 r_mop = tcg_constant_i32(da.memop);
TCGv_i64 tmp = tcg_temp_new_i64();
save_state(dc);
tcg_gen_concat32_i64(t64, hi, lo);
}
gen_address_mask(dc, addr);
- tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop);
+ tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop | MO_ALIGN);
}
break;
/* ??? In theory we've handled all of the ASIs that are valid
for stda, and this should raise DAE_invalid_asi. */
{
- TCGv_i32 r_asi = tcg_const_i32(da.asi);
- TCGv_i32 r_mop = tcg_const_i32(da.memop);
+ TCGv_i32 r_asi = tcg_constant_i32(da.asi);
+ TCGv_i32 r_mop = tcg_constant_i32(da.memop);
TCGv_i64 t64 = tcg_temp_new_i64();
/* See above. */
case GET_ASI_DIRECT:
oldv = tcg_temp_new();
tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd),
- da.mem_idx, da.memop);
+ da.mem_idx, da.memop | MO_ALIGN);
gen_store_gpr(dc, rd, oldv);
break;
default:
return;
case GET_ASI_DIRECT:
gen_address_mask(dc, addr);
- tcg_gen_qemu_ld_i64(t64, addr, da.mem_idx, da.memop);
+ tcg_gen_qemu_ld_i64(t64, addr, da.mem_idx, da.memop | MO_ALIGN);
break;
default:
{
- TCGv_i32 r_asi = tcg_const_i32(da.asi);
- TCGv_i32 r_mop = tcg_const_i32(MO_UQ);
+ TCGv_i32 r_asi = tcg_constant_i32(da.asi);
+ TCGv_i32 r_mop = tcg_constant_i32(MO_UQ);
save_state(dc);
gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop);
break;
case GET_ASI_DIRECT:
gen_address_mask(dc, addr);
- tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop);
+ tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop | MO_ALIGN);
break;
case GET_ASI_BFILL:
/* Store 32 bytes of T64 to ADDR. */
as a cacheline-style operation. */
{
TCGv d_addr = tcg_temp_new();
- TCGv eight = tcg_const_tl(8);
+ TCGv eight = tcg_constant_tl(8);
int i;
tcg_gen_andi_tl(d_addr, addr, -8);
break;
default:
{
- TCGv_i32 r_asi = tcg_const_i32(da.asi);
- TCGv_i32 r_mop = tcg_const_i32(MO_UQ);
+ TCGv_i32 r_asi = tcg_constant_i32(da.asi);
+ TCGv_i32 r_mop = tcg_constant_i32(MO_UQ);
save_state(dc);
gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop);
s1 = gen_load_fpr_F(dc, rs);
s2 = gen_load_fpr_F(dc, rd);
dst = gen_dest_fpr_F(dc);
- zero = tcg_const_i32(0);
+ zero = tcg_constant_i32(0);
tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2,
int width, bool cc, bool left)
{
- TCGv lo1, lo2, t1, t2;
+ TCGv lo1, lo2;
uint64_t amask, tabl, tabr;
int shift, imask, omask;
tcg_gen_shli_tl(lo1, lo1, shift);
tcg_gen_shli_tl(lo2, lo2, shift);
- t1 = tcg_const_tl(tabl);
- t2 = tcg_const_tl(tabr);
- tcg_gen_shr_tl(lo1, t1, lo1);
- tcg_gen_shr_tl(lo2, t2, lo2);
+ tcg_gen_shr_tl(lo1, tcg_constant_tl(tabl), lo1);
+ tcg_gen_shr_tl(lo2, tcg_constant_tl(tabr), lo2);
tcg_gen_andi_tl(dst, lo1, omask);
tcg_gen_andi_tl(lo2, lo2, omask);
lo2 |= -(s1 == s2)
dst &= lo2
*/
- tcg_gen_setcond_tl(TCG_COND_EQ, t1, s1, s2);
- tcg_gen_neg_tl(t1, t1);
- tcg_gen_or_tl(lo2, lo2, t1);
+ tcg_gen_setcond_tl(TCG_COND_EQ, lo1, s1, s2);
+ tcg_gen_neg_tl(lo1, lo1);
+ tcg_gen_or_tl(lo2, lo2, lo1);
tcg_gen_and_tl(dst, dst, lo2);
}
TCGv_i32 r_const;
r_tickptr = tcg_temp_new_ptr();
- r_const = tcg_const_i32(dc->mem_idx);
+ r_const = tcg_constant_i32(dc->mem_idx);
tcg_gen_ld_ptr(r_tickptr, cpu_env,
offsetof(CPUSPARCState, tick));
- if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
- gen_io_start();
+ if (translator_io_start(&dc->base)) {
+ dc->base.is_jmp = DISAS_EXIT;
}
gen_helper_tick_get_count(cpu_dst, cpu_env, r_tickptr,
r_const);
gen_store_gpr(dc, rd, cpu_dst);
- if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
- /* I/O operations in icount mode must end the TB */
- dc->base.is_jmp = DISAS_EXIT;
- }
}
break;
case 0x5: /* V9 rdpc */
TCGv_i32 r_const;
r_tickptr = tcg_temp_new_ptr();
- r_const = tcg_const_i32(dc->mem_idx);
+ r_const = tcg_constant_i32(dc->mem_idx);
tcg_gen_ld_ptr(r_tickptr, cpu_env,
offsetof(CPUSPARCState, stick));
- if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
- gen_io_start();
+ if (translator_io_start(&dc->base)) {
+ dc->base.is_jmp = DISAS_EXIT;
}
gen_helper_tick_get_count(cpu_dst, cpu_env, r_tickptr,
r_const);
gen_store_gpr(dc, rd, cpu_dst);
- if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
- /* I/O operations in icount mode must end the TB */
- dc->base.is_jmp = DISAS_EXIT;
- }
}
break;
case 0x19: /* System tick compare */
TCGv_i32 r_const;
r_tickptr = tcg_temp_new_ptr();
- r_const = tcg_const_i32(dc->mem_idx);
+ r_const = tcg_constant_i32(dc->mem_idx);
tcg_gen_ld_ptr(r_tickptr, cpu_env,
offsetof(CPUSPARCState, tick));
- if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
- gen_io_start();
+ if (translator_io_start(&dc->base)) {
+ dc->base.is_jmp = DISAS_EXIT;
}
gen_helper_tick_get_count(cpu_tmp0, cpu_env,
r_tickptr, r_const);
- if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
- /* I/O operations in icount mode must end the TB */
- dc->base.is_jmp = DISAS_EXIT;
- }
}
break;
case 5: // tba
r_tickptr = tcg_temp_new_ptr();
tcg_gen_ld_ptr(r_tickptr, cpu_env,
offsetof(CPUSPARCState, tick));
- if (tb_cflags(dc->base.tb) &
- CF_USE_ICOUNT) {
- gen_io_start();
- }
+ translator_io_start(&dc->base);
gen_helper_tick_set_limit(r_tickptr,
cpu_tick_cmpr);
/* End TB to handle timer interrupt */
r_tickptr = tcg_temp_new_ptr();
tcg_gen_ld_ptr(r_tickptr, cpu_env,
offsetof(CPUSPARCState, stick));
- if (tb_cflags(dc->base.tb) &
- CF_USE_ICOUNT) {
- gen_io_start();
- }
+ translator_io_start(&dc->base);
gen_helper_tick_set_count(r_tickptr,
cpu_tmp0);
/* End TB to handle timer interrupt */
r_tickptr = tcg_temp_new_ptr();
tcg_gen_ld_ptr(r_tickptr, cpu_env,
offsetof(CPUSPARCState, stick));
- if (tb_cflags(dc->base.tb) &
- CF_USE_ICOUNT) {
- gen_io_start();
- }
+ translator_io_start(&dc->base);
gen_helper_tick_set_limit(r_tickptr,
cpu_stick_cmpr);
/* End TB to handle timer interrupt */
r_tickptr = tcg_temp_new_ptr();
tcg_gen_ld_ptr(r_tickptr, cpu_env,
offsetof(CPUSPARCState, tick));
- if (tb_cflags(dc->base.tb) &
- CF_USE_ICOUNT) {
- gen_io_start();
- }
+ translator_io_start(&dc->base);
gen_helper_tick_set_count(r_tickptr,
cpu_tmp0);
/* End TB to handle timer interrupt */
break;
case 6: // pstate
save_state(dc);
- if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
- gen_io_start();
- }
- gen_helper_wrpstate(cpu_env, cpu_tmp0);
- if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
- /* I/O ops in icount mode must end the TB */
+ if (translator_io_start(&dc->base)) {
dc->base.is_jmp = DISAS_EXIT;
}
+ gen_helper_wrpstate(cpu_env, cpu_tmp0);
dc->npc = DYNAMIC_PC;
break;
case 7: // tl
dc->npc = DYNAMIC_PC;
break;
case 8: // pil
- if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
- gen_io_start();
- }
- gen_helper_wrpil(cpu_env, cpu_tmp0);
- if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
- /* I/O ops in icount mode must end the TB */
+ if (translator_io_start(&dc->base)) {
dc->base.is_jmp = DISAS_EXIT;
}
+ gen_helper_wrpil(cpu_env, cpu_tmp0);
break;
case 9: // cwp
gen_helper_wrcwp(cpu_env, cpu_tmp0);
r_tickptr = tcg_temp_new_ptr();
tcg_gen_ld_ptr(r_tickptr, cpu_env,
offsetof(CPUSPARCState, hstick));
- if (tb_cflags(dc->base.tb) &
- CF_USE_ICOUNT) {
- gen_io_start();
- }
+ translator_io_start(&dc->base);
gen_helper_tick_set_limit(r_tickptr,
cpu_hstick_cmpr);
/* End TB to handle timer interrupt */
goto priv_insn;
dc->npc = DYNAMIC_PC;
dc->pc = DYNAMIC_PC;
- if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
- gen_io_start();
- }
+ translator_io_start(&dc->base);
gen_helper_done(cpu_env);
goto jmp_insn;
case 1:
goto priv_insn;
dc->npc = DYNAMIC_PC;
dc->pc = DYNAMIC_PC;
- if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
- gen_io_start();
- }
+ translator_io_start(&dc->base);
gen_helper_retry(cpu_env);
goto jmp_insn;
default:
switch (xop) {
case 0x0: /* ld, V9 lduw, load unsigned word */
gen_address_mask(dc, cpu_addr);
- tcg_gen_qemu_ld32u(cpu_val, cpu_addr, dc->mem_idx);
+ tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
+ dc->mem_idx, MO_TEUL | MO_ALIGN);
break;
case 0x1: /* ldub, load unsigned byte */
gen_address_mask(dc, cpu_addr);
- tcg_gen_qemu_ld8u(cpu_val, cpu_addr, dc->mem_idx);
+ tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
+ dc->mem_idx, MO_UB);
break;
case 0x2: /* lduh, load unsigned halfword */
gen_address_mask(dc, cpu_addr);
- tcg_gen_qemu_ld16u(cpu_val, cpu_addr, dc->mem_idx);
+ tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
+ dc->mem_idx, MO_TEUW | MO_ALIGN);
break;
case 0x3: /* ldd, load double word */
if (rd & 1)
gen_address_mask(dc, cpu_addr);
t64 = tcg_temp_new_i64();
- tcg_gen_qemu_ld64(t64, cpu_addr, dc->mem_idx);
+ tcg_gen_qemu_ld_i64(t64, cpu_addr,
+ dc->mem_idx, MO_TEUQ | MO_ALIGN);
tcg_gen_trunc_i64_tl(cpu_val, t64);
tcg_gen_ext32u_tl(cpu_val, cpu_val);
gen_store_gpr(dc, rd + 1, cpu_val);
break;
case 0x9: /* ldsb, load signed byte */
gen_address_mask(dc, cpu_addr);
- tcg_gen_qemu_ld8s(cpu_val, cpu_addr, dc->mem_idx);
+ tcg_gen_qemu_ld_tl(cpu_val, cpu_addr, dc->mem_idx, MO_SB);
break;
case 0xa: /* ldsh, load signed halfword */
gen_address_mask(dc, cpu_addr);
- tcg_gen_qemu_ld16s(cpu_val, cpu_addr, dc->mem_idx);
+ tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
+ dc->mem_idx, MO_TESW | MO_ALIGN);
break;
case 0xd: /* ldstub */
gen_ldstub(dc, cpu_val, cpu_addr, dc->mem_idx);
#ifdef TARGET_SPARC64
case 0x08: /* V9 ldsw */
gen_address_mask(dc, cpu_addr);
- tcg_gen_qemu_ld32s(cpu_val, cpu_addr, dc->mem_idx);
+ tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
+ dc->mem_idx, MO_TESL | MO_ALIGN);
break;
case 0x0b: /* V9 ldx */
gen_address_mask(dc, cpu_addr);
- tcg_gen_qemu_ld64(cpu_val, cpu_addr, dc->mem_idx);
+ tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
+ dc->mem_idx, MO_TEUQ | MO_ALIGN);
break;
case 0x18: /* V9 ldswa */
gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESL);
gen_address_mask(dc, cpu_addr);
cpu_dst_32 = gen_dest_fpr_F(dc);
tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr,
- dc->mem_idx, MO_TEUL);
+ dc->mem_idx, MO_TEUL | MO_ALIGN);
gen_store_fpr_F(dc, rd, cpu_dst_32);
break;
case 0x21: /* ldfsr, V9 ldxfsr */
if (rd == 1) {
TCGv_i64 t64 = tcg_temp_new_i64();
tcg_gen_qemu_ld_i64(t64, cpu_addr,
- dc->mem_idx, MO_TEUQ);
+ dc->mem_idx, MO_TEUQ | MO_ALIGN);
gen_helper_ldxfsr(cpu_fsr, cpu_env, cpu_fsr, t64);
break;
}
#endif
cpu_dst_32 = tcg_temp_new_i32();
tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr,
- dc->mem_idx, MO_TEUL);
+ dc->mem_idx, MO_TEUL | MO_ALIGN);
gen_helper_ldfsr(cpu_fsr, cpu_env, cpu_fsr, cpu_dst_32);
break;
case 0x22: /* ldqf, load quad fpreg */
switch (xop) {
case 0x4: /* st, store word */
gen_address_mask(dc, cpu_addr);
- tcg_gen_qemu_st32(cpu_val, cpu_addr, dc->mem_idx);
+ tcg_gen_qemu_st_tl(cpu_val, cpu_addr,
+ dc->mem_idx, MO_TEUL | MO_ALIGN);
break;
case 0x5: /* stb, store byte */
gen_address_mask(dc, cpu_addr);
- tcg_gen_qemu_st8(cpu_val, cpu_addr, dc->mem_idx);
+ tcg_gen_qemu_st_tl(cpu_val, cpu_addr, dc->mem_idx, MO_UB);
break;
case 0x6: /* sth, store halfword */
gen_address_mask(dc, cpu_addr);
- tcg_gen_qemu_st16(cpu_val, cpu_addr, dc->mem_idx);
+ tcg_gen_qemu_st_tl(cpu_val, cpu_addr,
+ dc->mem_idx, MO_TEUW | MO_ALIGN);
break;
case 0x7: /* std, store double word */
if (rd & 1)
lo = gen_load_gpr(dc, rd + 1);
t64 = tcg_temp_new_i64();
tcg_gen_concat_tl_i64(t64, lo, cpu_val);
- tcg_gen_qemu_st64(t64, cpu_addr, dc->mem_idx);
+ tcg_gen_qemu_st_i64(t64, cpu_addr,
+ dc->mem_idx, MO_TEUQ | MO_ALIGN);
}
break;
#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
#ifdef TARGET_SPARC64
case 0x0e: /* V9 stx */
gen_address_mask(dc, cpu_addr);
- tcg_gen_qemu_st64(cpu_val, cpu_addr, dc->mem_idx);
+ tcg_gen_qemu_st_tl(cpu_val, cpu_addr,
+ dc->mem_idx, MO_TEUQ | MO_ALIGN);
break;
case 0x1e: /* V9 stxa */
gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUQ);
gen_address_mask(dc, cpu_addr);
cpu_src1_32 = gen_load_fpr_F(dc, rd);
tcg_gen_qemu_st_i32(cpu_src1_32, cpu_addr,
- dc->mem_idx, MO_TEUL);
+ dc->mem_idx, MO_TEUL | MO_ALIGN);
break;
case 0x25: /* stfsr, V9 stxfsr */
{
#ifdef TARGET_SPARC64
gen_address_mask(dc, cpu_addr);
if (rd == 1) {
- tcg_gen_qemu_st64(cpu_fsr, cpu_addr, dc->mem_idx);
+ tcg_gen_qemu_st_tl(cpu_fsr, cpu_addr,
+ dc->mem_idx, MO_TEUQ | MO_ALIGN);
break;
}
#endif
- tcg_gen_qemu_st32(cpu_fsr, cpu_addr, dc->mem_idx);
+ tcg_gen_qemu_st_tl(cpu_fsr, cpu_addr,
+ dc->mem_idx, MO_TEUL | MO_ALIGN);
}
break;
case 0x26: