#include "exec/helper-proto.h"
#include "exec/helper-gen.h"
#include "exec/log.h"
-#include "exec/cpu_ldst.h"
#include "exec/translator.h"
#include "qemu/qemu-print.h"
-#include "exec/gen-icount.h"
+#include "semihosting/semihost.h"
+
+#define HELPER_H "helper.h"
+#include "exec/helper-info.c.inc"
+#undef HELPER_H
+
/* is_jmp field values */
-#define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
#define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */
#define INSTRUCTION_FLG(func, flags) { (func), (flags) }
#define INSN_R_TYPE 0x3A
/* I-Type instruction parsing */
+typedef struct {
+ uint8_t op;
+ union {
+ uint16_t u;
+ int16_t s;
+ } imm16;
+ uint8_t b;
+ uint8_t a;
+} InstrIType;
+
#define I_TYPE(instr, code) \
- struct { \
- uint8_t op; \
- union { \
- uint16_t u; \
- int16_t s; \
- } imm16; \
- uint8_t b; \
- uint8_t a; \
- } (instr) = { \
+ InstrIType (instr) = { \
.op = extract32((code), 0, 6), \
.imm16.u = extract32((code), 6, 16), \
.b = extract32((code), 22, 5), \
.a = extract32((code), 27, 5), \
}
+typedef target_ulong ImmFromIType(const InstrIType *);
+
+static target_ulong imm_unsigned(const InstrIType *i)
+{
+ return i->imm16.u;
+}
+
+static target_ulong imm_signed(const InstrIType *i)
+{
+ return i->imm16.s;
+}
+
+static target_ulong imm_shifted(const InstrIType *i)
+{
+ return i->imm16.u << 16;
+}
+
/* R-Type instruction parsing */
+typedef struct {
+ uint8_t op;
+ uint8_t imm5;
+ uint8_t opx;
+ uint8_t c;
+ uint8_t b;
+ uint8_t a;
+} InstrRType;
+
#define R_TYPE(instr, code) \
- struct { \
- uint8_t op; \
- uint8_t imm5; \
- uint8_t opx; \
- uint8_t c; \
- uint8_t b; \
- uint8_t a; \
- } (instr) = { \
+ InstrRType (instr) = { \
.op = extract32((code), 0, 6), \
.imm5 = extract32((code), 6, 5), \
.opx = extract32((code), 11, 6), \
}
/* J-Type instruction parsing */
+typedef struct {
+ uint8_t op;
+ uint32_t imm26;
+} InstrJType;
+
#define J_TYPE(instr, code) \
- struct { \
- uint8_t op; \
- uint32_t imm26; \
- } (instr) = { \
+ InstrJType (instr) = { \
.op = extract32((code), 0, 6), \
.imm26 = extract32((code), 6, 26), \
}
+typedef void GenFn2i(TCGv, TCGv, target_long);
+typedef void GenFn3(TCGv, TCGv, TCGv);
+typedef void GenFn4(TCGv, TCGv, TCGv, TCGv);
+
typedef struct DisasContext {
DisasContextBase base;
- TCGv_i32 zero;
target_ulong pc;
int mem_idx;
+ uint32_t tb_flags;
+ TCGv sink;
+ const ControlRegState *cr_state;
+ bool eic_present;
} DisasContext;
static TCGv cpu_R[NUM_GP_REGS];
static TCGv cpu_pc;
+#ifndef CONFIG_USER_ONLY
+static TCGv cpu_crs_R[NUM_GP_REGS];
+#endif
typedef struct Nios2Instruction {
void (*handler)(DisasContext *dc, uint32_t code, uint32_t flags);
return instr.opx;
}
-static TCGv load_zero(DisasContext *dc)
+static TCGv load_gpr(DisasContext *dc, unsigned reg)
{
- if (!dc->zero) {
- dc->zero = tcg_const_i32(0);
+ assert(reg < NUM_GP_REGS);
+
+ /*
+ * With shadow register sets, register r0 does not necessarily contain 0,
+ * but it is overwhelmingly likely that it does -- software is supposed
+ * to have set r0 to 0 in every shadow register set before use.
+ */
+ if (unlikely(reg == R_ZERO) && FIELD_EX32(dc->tb_flags, TBFLAGS, R0_0)) {
+ return tcg_constant_tl(0);
+ }
+ if (FIELD_EX32(dc->tb_flags, TBFLAGS, CRS0)) {
+ return cpu_R[reg];
}
- return dc->zero;
+#ifdef CONFIG_USER_ONLY
+ g_assert_not_reached();
+#else
+ return cpu_crs_R[reg];
+#endif
}
-static TCGv load_gpr(DisasContext *dc, uint8_t reg)
+static TCGv dest_gpr(DisasContext *dc, unsigned reg)
{
- if (likely(reg != R_ZERO)) {
+ assert(reg < NUM_GP_REGS);
+
+ /*
+ * The spec for shadow register sets isn't clear, but we assume that
+ * writes to r0 are discarded regardless of CRS.
+ */
+ if (unlikely(reg == R_ZERO)) {
+ if (dc->sink == NULL) {
+ dc->sink = tcg_temp_new();
+ }
+ return dc->sink;
+ }
+ if (FIELD_EX32(dc->tb_flags, TBFLAGS, CRS0)) {
return cpu_R[reg];
- } else {
- return load_zero(dc);
}
+#ifdef CONFIG_USER_ONLY
+ g_assert_not_reached();
+#else
+ return cpu_crs_R[reg];
+#endif
}
-static void t_gen_helper_raise_exception(DisasContext *dc,
- uint32_t index)
+static void t_gen_helper_raise_exception(DisasContext *dc, uint32_t index)
{
- TCGv_i32 tmp = tcg_const_i32(index);
-
- tcg_gen_movi_tl(cpu_pc, dc->pc);
- gen_helper_raise_exception(cpu_env, tmp);
- tcg_temp_free_i32(tmp);
+ /* Note that PC is advanced for all hardware exceptions. */
+ tcg_gen_movi_tl(cpu_pc, dc->base.pc_next);
+ gen_helper_raise_exception(tcg_env, tcg_constant_i32(index));
dc->base.is_jmp = DISAS_NORETURN;
}
tcg_gen_exit_tb(tb, n);
} else {
tcg_gen_movi_tl(cpu_pc, dest);
- tcg_gen_exit_tb(NULL, 0);
+ tcg_gen_lookup_and_goto_ptr();
}
+ dc->base.is_jmp = DISAS_NORETURN;
+}
+
+static void gen_jumpr(DisasContext *dc, int regno, bool is_call)
+{
+ TCGLabel *l = gen_new_label();
+ TCGv test = tcg_temp_new();
+ TCGv dest = load_gpr(dc, regno);
+
+ tcg_gen_andi_tl(test, dest, 3);
+ tcg_gen_brcondi_tl(TCG_COND_NE, test, 0, l);
+
+ tcg_gen_mov_tl(cpu_pc, dest);
+ if (is_call) {
+ tcg_gen_movi_tl(dest_gpr(dc, R_RA), dc->base.pc_next);
+ }
+ tcg_gen_lookup_and_goto_ptr();
+
+ gen_set_label(l);
+ tcg_gen_st_tl(dest, tcg_env, offsetof(CPUNios2State, ctrl[CR_BADADDR]));
+ t_gen_helper_raise_exception(dc, EXCP_UNALIGND);
+
+ dc->base.is_jmp = DISAS_NORETURN;
}
static void gen_excp(DisasContext *dc, uint32_t code, uint32_t flags)
static bool gen_check_supervisor(DisasContext *dc)
{
- if (dc->base.tb->flags & CR_STATUS_U) {
+ if (FIELD_EX32(dc->tb_flags, TBFLAGS, U)) {
/* CPU in user mode, privileged instruction called, stop. */
t_gen_helper_raise_exception(dc, EXCP_SUPERI);
return false;
{
J_TYPE(instr, code);
gen_goto_tb(dc, 0, (dc->pc & 0xF0000000) | (instr.imm26 << 2));
- dc->base.is_jmp = DISAS_NORETURN;
}
static void call(DisasContext *dc, uint32_t code, uint32_t flags)
{
- tcg_gen_movi_tl(cpu_R[R_RA], dc->base.pc_next);
+ tcg_gen_movi_tl(dest_gpr(dc, R_RA), dc->base.pc_next);
jmpi(dc, code, flags);
}
I_TYPE(instr, code);
TCGv addr = tcg_temp_new();
- TCGv data;
-
- /*
- * WARNING: Loads into R_ZERO are ignored, but we must generate the
- * memory access itself to emulate the CPU precisely. Load
- * from a protected page to R_ZERO will cause SIGSEGV on
- * the Nios2 CPU.
- */
- if (likely(instr.b != R_ZERO)) {
- data = cpu_R[instr.b];
- } else {
- data = tcg_temp_new();
- }
+ TCGv data = dest_gpr(dc, instr.b);
tcg_gen_addi_tl(addr, load_gpr(dc, instr.a), instr.imm16.s);
+#ifdef CONFIG_USER_ONLY
+ flags |= MO_UNALN;
+#else
+ flags |= MO_ALIGN;
+#endif
tcg_gen_qemu_ld_tl(data, addr, dc->mem_idx, flags);
-
- if (unlikely(instr.b == R_ZERO)) {
- tcg_temp_free(data);
- }
-
- tcg_temp_free(addr);
}
/* Store instructions */
TCGv addr = tcg_temp_new();
tcg_gen_addi_tl(addr, load_gpr(dc, instr.a), instr.imm16.s);
+#ifdef CONFIG_USER_ONLY
+ flags |= MO_UNALN;
+#else
+ flags |= MO_ALIGN;
+#endif
tcg_gen_qemu_st_tl(val, addr, dc->mem_idx, flags);
- tcg_temp_free(addr);
}
/* Branch instructions */
I_TYPE(instr, code);
gen_goto_tb(dc, 0, dc->base.pc_next + (instr.imm16.s & -4));
- dc->base.is_jmp = DISAS_NORETURN;
}
static void gen_bxx(DisasContext *dc, uint32_t code, uint32_t flags)
I_TYPE(instr, code);
TCGLabel *l1 = gen_new_label();
- tcg_gen_brcond_tl(flags, cpu_R[instr.a], cpu_R[instr.b], l1);
+ tcg_gen_brcond_tl(flags, load_gpr(dc, instr.a), load_gpr(dc, instr.b), l1);
gen_goto_tb(dc, 0, dc->base.pc_next);
gen_set_label(l1);
gen_goto_tb(dc, 1, dc->base.pc_next + (instr.imm16.s & -4));
- dc->base.is_jmp = DISAS_NORETURN;
}
/* Comparison instructions */
-#define gen_i_cmpxx(fname, op3) \
-static void (fname)(DisasContext *dc, uint32_t code, uint32_t flags) \
-{ \
- I_TYPE(instr, (code)); \
- tcg_gen_setcondi_tl(flags, cpu_R[instr.b], cpu_R[instr.a], (op3)); \
+static void do_i_cmpxx(DisasContext *dc, uint32_t insn,
+ TCGCond cond, ImmFromIType *imm)
+{
+ I_TYPE(instr, insn);
+ tcg_gen_setcondi_tl(cond, dest_gpr(dc, instr.b),
+ load_gpr(dc, instr.a), imm(&instr));
}
-gen_i_cmpxx(gen_cmpxxsi, instr.imm16.s)
-gen_i_cmpxx(gen_cmpxxui, instr.imm16.u)
+#define gen_i_cmpxx(fname, imm) \
+ static void (fname)(DisasContext *dc, uint32_t code, uint32_t flags) \
+ { do_i_cmpxx(dc, code, flags, imm); }
+
+gen_i_cmpxx(gen_cmpxxsi, imm_signed)
+gen_i_cmpxx(gen_cmpxxui, imm_unsigned)
/* Math/logic instructions */
-#define gen_i_math_logic(fname, insn, resimm, op3) \
-static void (fname)(DisasContext *dc, uint32_t code, uint32_t flags) \
-{ \
- I_TYPE(instr, (code)); \
- if (unlikely(instr.b == R_ZERO)) { /* Store to R_ZERO is ignored */ \
- return; \
- } else if (instr.a == R_ZERO) { /* MOVxI optimizations */ \
- tcg_gen_movi_tl(cpu_R[instr.b], (resimm) ? (op3) : 0); \
- } else { \
- tcg_gen_##insn##_tl(cpu_R[instr.b], cpu_R[instr.a], (op3)); \
- } \
-}
-
-gen_i_math_logic(addi, addi, 1, instr.imm16.s)
-gen_i_math_logic(muli, muli, 0, instr.imm16.s)
-
-gen_i_math_logic(andi, andi, 0, instr.imm16.u)
-gen_i_math_logic(ori, ori, 1, instr.imm16.u)
-gen_i_math_logic(xori, xori, 1, instr.imm16.u)
-
-gen_i_math_logic(andhi, andi, 0, instr.imm16.u << 16)
-gen_i_math_logic(orhi , ori, 1, instr.imm16.u << 16)
-gen_i_math_logic(xorhi, xori, 1, instr.imm16.u << 16)
+static void do_i_math_logic(DisasContext *dc, uint32_t insn,
+ GenFn2i *fn, ImmFromIType *imm,
+ bool x_op_0_eq_x)
+{
+ I_TYPE(instr, insn);
+ target_ulong val;
+
+ if (unlikely(instr.b == R_ZERO)) {
+ /* Store to R_ZERO is ignored -- this catches the canonical NOP. */
+ return;
+ }
+
+ val = imm(&instr);
+
+ if (instr.a == R_ZERO && FIELD_EX32(dc->tb_flags, TBFLAGS, R0_0)) {
+ /* This catches the canonical expansions of movi and movhi. */
+ tcg_gen_movi_tl(dest_gpr(dc, instr.b), x_op_0_eq_x ? val : 0);
+ } else {
+ fn(dest_gpr(dc, instr.b), load_gpr(dc, instr.a), val);
+ }
+}
+
+#define gen_i_math_logic(fname, insn, x_op_0, imm) \
+ static void (fname)(DisasContext *dc, uint32_t code, uint32_t flags) \
+ { do_i_math_logic(dc, code, tcg_gen_##insn##_tl, imm, x_op_0); }
+
+gen_i_math_logic(addi, addi, 1, imm_signed)
+gen_i_math_logic(muli, muli, 0, imm_signed)
+
+gen_i_math_logic(andi, andi, 0, imm_unsigned)
+gen_i_math_logic(ori, ori, 1, imm_unsigned)
+gen_i_math_logic(xori, xori, 1, imm_unsigned)
+
+gen_i_math_logic(andhi, andi, 0, imm_shifted)
+gen_i_math_logic(orhi , ori, 1, imm_shifted)
+gen_i_math_logic(xorhi, xori, 1, imm_shifted)
+
+/* rB <- prs.rA + sigma(IMM16) */
+static void rdprs(DisasContext *dc, uint32_t code, uint32_t flags)
+{
+ if (!dc->eic_present) {
+ t_gen_helper_raise_exception(dc, EXCP_ILLEGAL);
+ return;
+ }
+ if (!gen_check_supervisor(dc)) {
+ return;
+ }
+
+#ifdef CONFIG_USER_ONLY
+ g_assert_not_reached();
+#else
+ I_TYPE(instr, code);
+ TCGv dest = dest_gpr(dc, instr.b);
+ gen_helper_rdprs(dest, tcg_env, tcg_constant_i32(instr.a));
+ tcg_gen_addi_tl(dest, dest, instr.imm16.s);
+#endif
+}
/* Prototype only, defined below */
static void handle_r_type_instr(DisasContext *dc, uint32_t code,
INSTRUCTION_FLG(gen_cmpxxsi, TCG_COND_GE), /* cmpgei */
INSTRUCTION_ILLEGAL(),
INSTRUCTION_ILLEGAL(),
- INSTRUCTION_FLG(gen_ldx, MO_UW), /* ldhu */
+ INSTRUCTION_FLG(gen_ldx, MO_TEUW), /* ldhu */
INSTRUCTION(andi), /* andi */
- INSTRUCTION_FLG(gen_stx, MO_UW), /* sth */
+ INSTRUCTION_FLG(gen_stx, MO_TEUW), /* sth */
INSTRUCTION_FLG(gen_bxx, TCG_COND_GE), /* bge */
- INSTRUCTION_FLG(gen_ldx, MO_SW), /* ldh */
+ INSTRUCTION_FLG(gen_ldx, MO_TESW), /* ldh */
INSTRUCTION_FLG(gen_cmpxxsi, TCG_COND_LT), /* cmplti */
INSTRUCTION_ILLEGAL(),
INSTRUCTION_ILLEGAL(),
INSTRUCTION_NOP(), /* initda */
INSTRUCTION(ori), /* ori */
- INSTRUCTION_FLG(gen_stx, MO_UL), /* stw */
+ INSTRUCTION_FLG(gen_stx, MO_TEUL), /* stw */
INSTRUCTION_FLG(gen_bxx, TCG_COND_LT), /* blt */
- INSTRUCTION_FLG(gen_ldx, MO_UL), /* ldw */
+ INSTRUCTION_FLG(gen_ldx, MO_TEUL), /* ldw */
INSTRUCTION_FLG(gen_cmpxxsi, TCG_COND_NE), /* cmpnei */
INSTRUCTION_ILLEGAL(),
INSTRUCTION_ILLEGAL(),
INSTRUCTION_FLG(gen_cmpxxui, TCG_COND_GEU), /* cmpgeui */
INSTRUCTION_ILLEGAL(),
INSTRUCTION_ILLEGAL(),
- INSTRUCTION_FLG(gen_ldx, MO_UW), /* ldhuio */
+ INSTRUCTION_FLG(gen_ldx, MO_TEUW), /* ldhuio */
INSTRUCTION(andhi), /* andhi */
- INSTRUCTION_FLG(gen_stx, MO_UW), /* sthio */
+ INSTRUCTION_FLG(gen_stx, MO_TEUW), /* sthio */
INSTRUCTION_FLG(gen_bxx, TCG_COND_GEU), /* bgeu */
- INSTRUCTION_FLG(gen_ldx, MO_SW), /* ldhio */
+ INSTRUCTION_FLG(gen_ldx, MO_TESW), /* ldhio */
INSTRUCTION_FLG(gen_cmpxxui, TCG_COND_LTU), /* cmpltui */
INSTRUCTION_ILLEGAL(),
INSTRUCTION_UNIMPLEMENTED(), /* custom */
INSTRUCTION_NOP(), /* initd */
INSTRUCTION(orhi), /* orhi */
- INSTRUCTION_FLG(gen_stx, MO_SL), /* stwio */
+ INSTRUCTION_FLG(gen_stx, MO_TESL), /* stwio */
INSTRUCTION_FLG(gen_bxx, TCG_COND_LTU), /* bltu */
- INSTRUCTION_FLG(gen_ldx, MO_UL), /* ldwio */
- INSTRUCTION_UNIMPLEMENTED(), /* rdprs */
+ INSTRUCTION_FLG(gen_ldx, MO_TEUL), /* ldwio */
+ INSTRUCTION(rdprs), /* rdprs */
INSTRUCTION_ILLEGAL(),
INSTRUCTION_FLG(handle_r_type_instr, 0), /* R-Type */
INSTRUCTION_NOP(), /* flushd */
#ifdef CONFIG_USER_ONLY
g_assert_not_reached();
#else
- TCGv tmp = tcg_temp_new();
- tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPUNios2State, ctrl[CR_ESTATUS]));
- gen_helper_eret(cpu_env, tmp, cpu_R[R_EA]);
- tcg_temp_free(tmp);
-
+ if (FIELD_EX32(dc->tb_flags, TBFLAGS, CRS0)) {
+ TCGv tmp = tcg_temp_new();
+ tcg_gen_ld_tl(tmp, tcg_env, offsetof(CPUNios2State, ctrl[CR_ESTATUS]));
+ gen_helper_eret(tcg_env, tmp, load_gpr(dc, R_EA));
+ } else {
+ gen_helper_eret(tcg_env, load_gpr(dc, R_SSTATUS), load_gpr(dc, R_EA));
+ }
dc->base.is_jmp = DISAS_NORETURN;
#endif
}
/* PC <- ra */
static void ret(DisasContext *dc, uint32_t code, uint32_t flags)
{
- tcg_gen_mov_tl(cpu_pc, cpu_R[R_RA]);
-
- dc->base.is_jmp = DISAS_JUMP;
+ gen_jumpr(dc, R_RA, false);
}
/*
g_assert_not_reached();
#else
TCGv tmp = tcg_temp_new();
- tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPUNios2State, ctrl[CR_BSTATUS]));
- gen_helper_eret(cpu_env, tmp, cpu_R[R_BA]);
- tcg_temp_free(tmp);
+ tcg_gen_ld_tl(tmp, tcg_env, offsetof(CPUNios2State, ctrl[CR_BSTATUS]));
+ gen_helper_eret(tcg_env, tmp, load_gpr(dc, R_BA));
dc->base.is_jmp = DISAS_NORETURN;
#endif
{
R_TYPE(instr, code);
- tcg_gen_mov_tl(cpu_pc, load_gpr(dc, instr.a));
-
- dc->base.is_jmp = DISAS_JUMP;
+ gen_jumpr(dc, instr.a, false);
}
/* rC <- PC + 4 */
{
R_TYPE(instr, code);
- if (likely(instr.c != R_ZERO)) {
- tcg_gen_movi_tl(cpu_R[instr.c], dc->base.pc_next);
- }
+ tcg_gen_movi_tl(dest_gpr(dc, instr.c), dc->base.pc_next);
}
/*
{
R_TYPE(instr, code);
- tcg_gen_mov_tl(cpu_pc, load_gpr(dc, instr.a));
- tcg_gen_movi_tl(cpu_R[R_RA], dc->base.pc_next);
-
- dc->base.is_jmp = DISAS_JUMP;
+ gen_jumpr(dc, instr.a, true);
}
/* rC <- ctlN */
static void rdctl(DisasContext *dc, uint32_t code, uint32_t flags)
{
- R_TYPE(instr, code);
- TCGv t1, t2;
-
if (!gen_check_supervisor(dc)) {
return;
}
- if (unlikely(instr.c == R_ZERO)) {
+#ifdef CONFIG_USER_ONLY
+ g_assert_not_reached();
+#else
+ R_TYPE(instr, code);
+ TCGv t1, t2, dest = dest_gpr(dc, instr.c);
+
+ /* Reserved registers read as zero. */
+ if (nios2_cr_reserved(&dc->cr_state[instr.imm5])) {
+ tcg_gen_movi_tl(dest, 0);
return;
}
*/
t1 = tcg_temp_new();
t2 = tcg_temp_new();
- tcg_gen_ld_tl(t1, cpu_env, offsetof(CPUNios2State, ctrl[CR_IPENDING]));
- tcg_gen_ld_tl(t2, cpu_env, offsetof(CPUNios2State, ctrl[CR_IENABLE]));
- tcg_gen_and_tl(cpu_R[instr.c], t1, t2);
- tcg_temp_free(t1);
- tcg_temp_free(t2);
+ tcg_gen_ld_tl(t1, tcg_env, offsetof(CPUNios2State, ctrl[CR_IPENDING]));
+ tcg_gen_ld_tl(t2, tcg_env, offsetof(CPUNios2State, ctrl[CR_IENABLE]));
+ tcg_gen_and_tl(dest, t1, t2);
break;
default:
- tcg_gen_ld_tl(cpu_R[instr.c], cpu_env,
+ tcg_gen_ld_tl(dest, tcg_env,
offsetof(CPUNios2State, ctrl[instr.imm5]));
break;
}
+#endif
}
/* ctlN <- rA */
#else
R_TYPE(instr, code);
TCGv v = load_gpr(dc, instr.a);
+ uint32_t ofs = offsetof(CPUNios2State, ctrl[instr.imm5]);
+ uint32_t wr = dc->cr_state[instr.imm5].writable;
+ uint32_t ro = dc->cr_state[instr.imm5].readonly;
+
+ /* Skip reserved or readonly registers. */
+ if (wr == 0) {
+ return;
+ }
switch (instr.imm5) {
case CR_PTEADDR:
- gen_helper_mmu_write_pteaddr(cpu_env, v);
+ gen_helper_mmu_write_pteaddr(tcg_env, v);
break;
case CR_TLBACC:
- gen_helper_mmu_write_tlbacc(cpu_env, v);
+ gen_helper_mmu_write_tlbacc(tcg_env, v);
break;
case CR_TLBMISC:
- gen_helper_mmu_write_tlbmisc(cpu_env, v);
- break;
- case CR_IPENDING:
- /* ipending is read only, writes ignored. */
+ gen_helper_mmu_write_tlbmisc(tcg_env, v);
break;
case CR_STATUS:
case CR_IENABLE:
dc->base.is_jmp = DISAS_UPDATE;
/* fall through */
default:
- tcg_gen_st_tl(v, cpu_env,
- offsetof(CPUNios2State, ctrl[instr.imm5]));
+ if (wr == -1) {
+ /* The register is entirely writable. */
+ tcg_gen_st_tl(v, tcg_env, ofs);
+ } else {
+ /*
+ * The register is partially read-only or reserved:
+ * merge the value.
+ */
+ TCGv n = tcg_temp_new();
+
+ tcg_gen_andi_tl(n, v, wr);
+
+ if (ro != 0) {
+ TCGv o = tcg_temp_new();
+ tcg_gen_ld_tl(o, tcg_env, ofs);
+ tcg_gen_andi_tl(o, o, ro);
+ tcg_gen_or_tl(n, n, o);
+ }
+
+ tcg_gen_st_tl(n, tcg_env, ofs);
+ }
break;
}
#endif
}
+/* prs.rC <- rA */
+static void wrprs(DisasContext *dc, uint32_t code, uint32_t flags)
+{
+ if (!dc->eic_present) {
+ t_gen_helper_raise_exception(dc, EXCP_ILLEGAL);
+ return;
+ }
+ if (!gen_check_supervisor(dc)) {
+ return;
+ }
+
+#ifdef CONFIG_USER_ONLY
+ g_assert_not_reached();
+#else
+ R_TYPE(instr, code);
+ gen_helper_wrprs(tcg_env, tcg_constant_i32(instr.c),
+ load_gpr(dc, instr.a));
+ /*
+ * The expected write to PRS[r0] is 0, from CRS[r0].
+ * If not, and CRS == PRS (which we cannot tell from here),
+ * we may now have a non-zero value in our current r0.
+ * By ending the TB, we re-evaluate tb_flags and find out.
+ */
+ if (instr.c == 0
+ && (instr.a != 0 || !FIELD_EX32(dc->tb_flags, TBFLAGS, R0_0))) {
+ dc->base.is_jmp = DISAS_UPDATE;
+ }
+#endif
+}
+
/* Comparison instructions */
static void gen_cmpxx(DisasContext *dc, uint32_t code, uint32_t flags)
{
R_TYPE(instr, code);
- if (likely(instr.c != R_ZERO)) {
- tcg_gen_setcond_tl(flags, cpu_R[instr.c], cpu_R[instr.a],
- cpu_R[instr.b]);
- }
+ tcg_gen_setcond_tl(flags, dest_gpr(dc, instr.c),
+ load_gpr(dc, instr.a), load_gpr(dc, instr.b));
}
/* Math/logic instructions */
-#define gen_r_math_logic(fname, insn, op3) \
-static void (fname)(DisasContext *dc, uint32_t code, uint32_t flags) \
-{ \
- R_TYPE(instr, (code)); \
- if (likely(instr.c != R_ZERO)) { \
- tcg_gen_##insn(cpu_R[instr.c], load_gpr((dc), instr.a), (op3)); \
- } \
-}
-
-gen_r_math_logic(add, add_tl, load_gpr(dc, instr.b))
-gen_r_math_logic(sub, sub_tl, load_gpr(dc, instr.b))
-gen_r_math_logic(mul, mul_tl, load_gpr(dc, instr.b))
-
-gen_r_math_logic(and, and_tl, load_gpr(dc, instr.b))
-gen_r_math_logic(or, or_tl, load_gpr(dc, instr.b))
-gen_r_math_logic(xor, xor_tl, load_gpr(dc, instr.b))
-gen_r_math_logic(nor, nor_tl, load_gpr(dc, instr.b))
-
-gen_r_math_logic(srai, sari_tl, instr.imm5)
-gen_r_math_logic(srli, shri_tl, instr.imm5)
-gen_r_math_logic(slli, shli_tl, instr.imm5)
-gen_r_math_logic(roli, rotli_tl, instr.imm5)
-
-#define gen_r_mul(fname, insn) \
-static void (fname)(DisasContext *dc, uint32_t code, uint32_t flags) \
-{ \
- R_TYPE(instr, (code)); \
- if (likely(instr.c != R_ZERO)) { \
- TCGv t0 = tcg_temp_new(); \
- tcg_gen_##insn(t0, cpu_R[instr.c], \
- load_gpr(dc, instr.a), load_gpr(dc, instr.b)); \
- tcg_temp_free(t0); \
- } \
-}
-
-gen_r_mul(mulxss, muls2_tl)
-gen_r_mul(mulxuu, mulu2_tl)
-gen_r_mul(mulxsu, mulsu2_tl)
-
-#define gen_r_shift_s(fname, insn) \
-static void (fname)(DisasContext *dc, uint32_t code, uint32_t flags) \
-{ \
- R_TYPE(instr, (code)); \
- if (likely(instr.c != R_ZERO)) { \
- TCGv t0 = tcg_temp_new(); \
- tcg_gen_andi_tl(t0, load_gpr((dc), instr.b), 31); \
- tcg_gen_##insn(cpu_R[instr.c], load_gpr((dc), instr.a), t0); \
- tcg_temp_free(t0); \
- } \
-}
-
-gen_r_shift_s(sra, sar_tl)
-gen_r_shift_s(srl, shr_tl)
-gen_r_shift_s(sll, shl_tl)
-gen_r_shift_s(rol, rotl_tl)
-gen_r_shift_s(ror, rotr_tl)
+static void do_ri_math_logic(DisasContext *dc, uint32_t insn, GenFn2i *fn)
+{
+ R_TYPE(instr, insn);
+ fn(dest_gpr(dc, instr.c), load_gpr(dc, instr.a), instr.imm5);
+}
-static void divs(DisasContext *dc, uint32_t code, uint32_t flags)
+static void do_rr_math_logic(DisasContext *dc, uint32_t insn, GenFn3 *fn)
{
- R_TYPE(instr, (code));
+ R_TYPE(instr, insn);
+ fn(dest_gpr(dc, instr.c), load_gpr(dc, instr.a), load_gpr(dc, instr.b));
+}
- /* Stores into R_ZERO are ignored */
- if (unlikely(instr.c == R_ZERO)) {
- return;
- }
+#define gen_ri_math_logic(fname, insn) \
+ static void (fname)(DisasContext *dc, uint32_t code, uint32_t flags) \
+ { do_ri_math_logic(dc, code, tcg_gen_##insn##_tl); }
+
+#define gen_rr_math_logic(fname, insn) \
+ static void (fname)(DisasContext *dc, uint32_t code, uint32_t flags) \
+ { do_rr_math_logic(dc, code, tcg_gen_##insn##_tl); }
+
+gen_rr_math_logic(add, add)
+gen_rr_math_logic(sub, sub)
+gen_rr_math_logic(mul, mul)
- TCGv t0 = tcg_temp_new();
- TCGv t1 = tcg_temp_new();
- TCGv t2 = tcg_temp_new();
- TCGv t3 = tcg_temp_new();
+gen_rr_math_logic(and, and)
+gen_rr_math_logic(or, or)
+gen_rr_math_logic(xor, xor)
+gen_rr_math_logic(nor, nor)
- tcg_gen_ext32s_tl(t0, load_gpr(dc, instr.a));
- tcg_gen_ext32s_tl(t1, load_gpr(dc, instr.b));
- tcg_gen_setcondi_tl(TCG_COND_EQ, t2, t0, INT_MIN);
- tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, -1);
- tcg_gen_and_tl(t2, t2, t3);
- tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, 0);
- tcg_gen_or_tl(t2, t2, t3);
- tcg_gen_movi_tl(t3, 0);
- tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1);
- tcg_gen_div_tl(cpu_R[instr.c], t0, t1);
- tcg_gen_ext32s_tl(cpu_R[instr.c], cpu_R[instr.c]);
+gen_ri_math_logic(srai, sari)
+gen_ri_math_logic(srli, shri)
+gen_ri_math_logic(slli, shli)
+gen_ri_math_logic(roli, rotli)
- tcg_temp_free(t3);
- tcg_temp_free(t2);
- tcg_temp_free(t1);
- tcg_temp_free(t0);
+static void do_rr_mul_high(DisasContext *dc, uint32_t insn, GenFn4 *fn)
+{
+ R_TYPE(instr, insn);
+ TCGv discard = tcg_temp_new();
+
+ fn(discard, dest_gpr(dc, instr.c),
+ load_gpr(dc, instr.a), load_gpr(dc, instr.b));
}
-static void divu(DisasContext *dc, uint32_t code, uint32_t flags)
+#define gen_rr_mul_high(fname, insn) \
+ static void (fname)(DisasContext *dc, uint32_t code, uint32_t flags) \
+ { do_rr_mul_high(dc, code, tcg_gen_##insn##_tl); }
+
+gen_rr_mul_high(mulxss, muls2)
+gen_rr_mul_high(mulxuu, mulu2)
+gen_rr_mul_high(mulxsu, mulsu2)
+
+static void do_rr_shift(DisasContext *dc, uint32_t insn, GenFn3 *fn)
{
- R_TYPE(instr, (code));
+ R_TYPE(instr, insn);
+ TCGv sh = tcg_temp_new();
- /* Stores into R_ZERO are ignored */
- if (unlikely(instr.c == R_ZERO)) {
- return;
- }
+ tcg_gen_andi_tl(sh, load_gpr(dc, instr.b), 31);
+ fn(dest_gpr(dc, instr.c), load_gpr(dc, instr.a), sh);
+}
+
+#define gen_rr_shift(fname, insn) \
+ static void (fname)(DisasContext *dc, uint32_t code, uint32_t flags) \
+ { do_rr_shift(dc, code, tcg_gen_##insn##_tl); }
- TCGv t0 = tcg_temp_new();
- TCGv t1 = tcg_temp_new();
- TCGv t2 = tcg_const_tl(0);
- TCGv t3 = tcg_const_tl(1);
+gen_rr_shift(sra, sar)
+gen_rr_shift(srl, shr)
+gen_rr_shift(sll, shl)
+gen_rr_shift(rol, rotl)
+gen_rr_shift(ror, rotr)
- tcg_gen_ext32u_tl(t0, load_gpr(dc, instr.a));
- tcg_gen_ext32u_tl(t1, load_gpr(dc, instr.b));
- tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1);
- tcg_gen_divu_tl(cpu_R[instr.c], t0, t1);
- tcg_gen_ext32s_tl(cpu_R[instr.c], cpu_R[instr.c]);
+static void divs(DisasContext *dc, uint32_t code, uint32_t flags)
+{
+ R_TYPE(instr, (code));
+ gen_helper_divs(dest_gpr(dc, instr.c), tcg_env,
+ load_gpr(dc, instr.a), load_gpr(dc, instr.b));
+}
- tcg_temp_free(t3);
- tcg_temp_free(t2);
- tcg_temp_free(t1);
- tcg_temp_free(t0);
+static void divu(DisasContext *dc, uint32_t code, uint32_t flags)
+{
+ R_TYPE(instr, (code));
+ gen_helper_divu(dest_gpr(dc, instr.c), tcg_env,
+ load_gpr(dc, instr.a), load_gpr(dc, instr.b));
}
static void trap(DisasContext *dc, uint32_t code, uint32_t flags)
* things easier for cpu_loop if we pop this into env->error_code.
*/
R_TYPE(instr, code);
- tcg_gen_st_i32(tcg_constant_i32(instr.imm5), cpu_env,
+ tcg_gen_st_i32(tcg_constant_i32(instr.imm5), tcg_env,
offsetof(CPUNios2State, error_code));
#endif
t_gen_helper_raise_exception(dc, EXCP_TRAP);
}
+static void gen_break(DisasContext *dc, uint32_t code, uint32_t flags)
+{
+#ifndef CONFIG_USER_ONLY
+ /* The semihosting instruction is "break 1". */
+ bool is_user = FIELD_EX32(dc->tb_flags, TBFLAGS, U);
+ R_TYPE(instr, code);
+ if (semihosting_enabled(is_user) && instr.imm5 == 1) {
+ t_gen_helper_raise_exception(dc, EXCP_SEMIHOST);
+ return;
+ }
+#endif
+
+ t_gen_helper_raise_exception(dc, EXCP_BREAK);
+}
+
static const Nios2Instruction r_type_instructions[] = {
INSTRUCTION_ILLEGAL(),
INSTRUCTION(eret), /* eret */
INSTRUCTION_ILLEGAL(),
INSTRUCTION(slli), /* slli */
INSTRUCTION(sll), /* sll */
- INSTRUCTION_UNIMPLEMENTED(), /* wrprs */
+ INSTRUCTION(wrprs), /* wrprs */
INSTRUCTION_ILLEGAL(),
INSTRUCTION(or), /* or */
INSTRUCTION(mulxsu), /* mulxsu */
INSTRUCTION(add), /* add */
INSTRUCTION_ILLEGAL(),
INSTRUCTION_ILLEGAL(),
- INSTRUCTION_FLG(gen_excp, EXCP_BREAK), /* break */
+ INSTRUCTION(gen_break), /* break */
INSTRUCTION_ILLEGAL(),
INSTRUCTION(nop), /* nop */
INSTRUCTION_ILLEGAL(),
};
#endif
-#include "exec/gen-icount.h"
-
/* generate intermediate code for basic block 'tb'. */
static void nios2_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
CPUNios2State *env = cs->env_ptr;
+ Nios2CPU *cpu = env_archcpu(env);
int page_insns;
dc->mem_idx = cpu_mmu_index(env, false);
+ dc->cr_state = cpu->cr_state;
+ dc->tb_flags = dc->base.tb->flags;
+ dc->eic_present = cpu->eic_present;
/* Bound the number of insns to execute to those left on the page. */
page_insns = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
return;
}
- dc->zero = NULL;
+ dc->sink = NULL;
instr = &i_type_instructions[op];
instr->handler(dc, code, instr->flags);
-
- if (dc->zero) {
- tcg_temp_free(dc->zero);
- }
}
static void nios2_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
/* Indicate where the next block should start */
switch (dc->base.is_jmp) {
case DISAS_TOO_MANY:
- case DISAS_UPDATE:
- /* Save the current PC back into the CPU register */
- tcg_gen_movi_tl(cpu_pc, dc->base.pc_next);
- tcg_gen_exit_tb(NULL, 0);
+ gen_goto_tb(dc, 0, dc->base.pc_next);
break;
- case DISAS_JUMP:
- /* The jump will already have updated the PC register */
+ case DISAS_UPDATE:
+ /* Save the current PC, and return to the main loop. */
+ tcg_gen_movi_tl(cpu_pc, dc->base.pc_next);
tcg_gen_exit_tb(NULL, 0);
break;
.disas_log = nios2_tr_disas_log,
};
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
+ target_ulong pc, void *host_pc)
{
DisasContext dc;
- translator_loop(&nios2_tr_ops, &dc.base, cs, tb, max_insns);
+ translator_loop(cs, tb, max_insns, pc, host_pc, &nios2_tr_ops, &dc.base);
}
void nios2_cpu_dump_state(CPUState *cs, FILE *f, int flags)
}
#if !defined(CONFIG_USER_ONLY)
- for (i = 0; i < NUM_CR_REGS; i++) {
- qemu_fprintf(f, "%9s=%8.8x ", cr_regnames[i], env->ctrl[i]);
- if ((i + 1) % 4 == 0) {
- qemu_fprintf(f, "\n");
+ int j;
+
+ for (i = j = 0; i < NUM_CR_REGS; i++) {
+ if (!nios2_cr_reserved(&cpu->cr_state[i])) {
+ qemu_fprintf(f, "%9s=%8.8x ", cr_regnames[i], env->ctrl[i]);
+ if (++j % 4 == 0) {
+ qemu_fprintf(f, "\n");
+ }
}
}
- qemu_fprintf(f, " mmu write: VPN=%05X PID %02X TLBACC %08X\n",
- env->mmu.pteaddr_wr & R_CR_PTEADDR_VPN_MASK,
- (env->mmu.tlbmisc_wr & CR_TLBMISC_PID_MASK) >> 4,
- env->mmu.tlbacc_wr);
+ if (j % 4 != 0) {
+ qemu_fprintf(f, "\n");
+ }
+ if (cpu->mmu_present) {
+ qemu_fprintf(f, " mmu write: VPN=%05X PID %02X TLBACC %08X\n",
+ env->mmu.pteaddr_wr & R_CR_PTEADDR_VPN_MASK,
+ FIELD_EX32(env->mmu.tlbmisc_wr, CR_TLBMISC, PID),
+ env->mmu.tlbacc_wr);
+ }
#endif
qemu_fprintf(f, "\n\n");
}
void nios2_tcg_init(void)
{
- int i;
+#ifndef CONFIG_USER_ONLY
+ TCGv_ptr crs = tcg_global_mem_new_ptr(tcg_env,
+ offsetof(CPUNios2State, regs), "crs");
- for (i = 0; i < NUM_GP_REGS; i++) {
- cpu_R[i] = tcg_global_mem_new(cpu_env,
- offsetof(CPUNios2State, regs[i]),
+ for (int i = 0; i < NUM_GP_REGS; i++) {
+ cpu_crs_R[i] = tcg_global_mem_new(crs, 4 * i, gr_regnames[i]);
+ }
+
+#define offsetof_regs0(N) offsetof(CPUNios2State, shadow_regs[0][N])
+#else
+#define offsetof_regs0(N) offsetof(CPUNios2State, regs[N])
+#endif
+
+ for (int i = 0; i < NUM_GP_REGS; i++) {
+ cpu_R[i] = tcg_global_mem_new(tcg_env, offsetof_regs0(i),
gr_regnames[i]);
}
- cpu_pc = tcg_global_mem_new(cpu_env,
- offsetof(CPUNios2State, pc), "pc");
-}
-void restore_state_to_opc(CPUNios2State *env, TranslationBlock *tb,
- target_ulong *data)
-{
- env->pc = data[0];
+#undef offsetof_regs0
+
+ cpu_pc = tcg_global_mem_new(tcg_env,
+ offsetof(CPUNios2State, pc), "pc");
}