/*
* m68k translation
- *
+ *
* Copyright (c) 2005-2007 CodeSourcery
* Written by Paul Brook
*
uint32_t fpcr;
struct TranslationBlock *tb;
int singlestep_enabled;
+ int is_mem;
} DisasContext;
#define DISAS_JUMP_NEXT 4
static inline int gen_load(DisasContext * s, int opsize, int addr, int sign)
{
int tmp;
+ s->is_mem = 1;
switch(opsize) {
case OS_BYTE:
tmp = gen_new_qreg(QMODE_I32);
/* Generate a store. */
static inline void gen_store(DisasContext *s, int opsize, int addr, int val)
{
+ s->is_mem = 1;
switch(opsize) {
case OS_BYTE:
gen_st(s, 8, addr, val);
{
if (s->cc_op == CC_OP_FLAGS)
return;
- gen_op_flush_flags(s->cc_op);
+ gen_flush_cc_op(s);
+ gen_op_flush_flags();
s->cc_op = CC_OP_FLAGS;
}
gen_op_logic_cc(gen_im32(val & 0xf));
gen_op_update_xflag_tst(gen_im32((val & 0x10) >> 4));
if (!ccr_only) {
- gen_op_mov32(QREG_SR, gen_im32(val & 0xff00));
+ gen_op_set_sr(gen_im32(val & 0xff00));
}
}
gen_op_and32(src1, src1, gen_im32(1));
gen_op_update_xflag_tst(src1);
if (!ccr_only) {
- gen_op_and32(QREG_SR, reg, gen_im32(0xff00));
+ gen_op_set_sr(reg);
}
}
else if ((insn & 0x3f) == 0x3c)
uint32_t base;
int op;
int l1;
-
+
base = s->pc;
op = (insn >> 8) & 0xf;
offset = (int8_t)insn;
DISAS_INSN(ff1)
{
- cpu_abort(NULL, "Unimplemented insn: ff1");
+ int reg;
+ reg = DREG(insn, 0);
+ gen_logic_cc(s, reg);
+ gen_op_ff1(reg, reg);
}
static int gen_get_sr(DisasContext *s)
}
DEST_EA(insn, OS_LONG, res, NULL);
break;
- case 6: /* fmovem */
+ case 6: /* fmovem */
case 7:
{
int addr;
dest = QREG_F0;
while (mask) {
if (ext & mask) {
+ s->is_mem = 1;
if (ext & (1 << 13)) {
/* store */
gen_st(s, f64, addr, dest);
tmp = gen_new_qreg(QMODE_F32);
gen_op_f64_to_f32(tmp, res);
gen_op_f32_to_f64(res, tmp);
- }
+ }
gen_op_fp_result(res);
if (dest) {
gen_op_movf64(dest, res);
qemu_assert(0, "FSAVE not implemented");
}
+static inline int gen_mac_extract_word(DisasContext *s, int val, int upper)
+{
+ int tmp = gen_new_qreg(QMODE_I32);
+ if (s->env->macsr & MACSR_FI) {
+ if (upper)
+ gen_op_and32(tmp, val, gen_im32(0xffff0000));
+ else
+ gen_op_shl32(tmp, val, gen_im32(16));
+ } else if (s->env->macsr & MACSR_SU) {
+ if (upper)
+ gen_op_sar32(tmp, val, gen_im32(16));
+ else
+ gen_op_ext16s32(tmp, val);
+ } else {
+ if (upper)
+ gen_op_shr32(tmp, val, gen_im32(16));
+ else
+ gen_op_ext16u32(tmp, val);
+ }
+ return tmp;
+}
+
+DISAS_INSN(mac)
+{
+ int rx;
+ int ry;
+ uint16_t ext;
+ int acc;
+ int l1;
+ int tmp;
+ int addr;
+ int loadval;
+ int dual;
+ int saved_flags = -1;
+
+ ext = lduw_code(s->pc);
+ s->pc += 2;
+
+ acc = ((insn >> 7) & 1) | ((ext >> 3) & 2);
+ dual = ((insn & 0x30) != 0 && (ext & 3) != 0);
+ if (dual && !m68k_feature(s->env, M68K_FEATURE_CF_EMAC_B)) {
+ disas_undef(s, insn);
+ return;
+ }
+ if (insn & 0x30) {
+ /* MAC with load. */
+ tmp = gen_lea(s, insn, OS_LONG);
+ addr = gen_new_qreg(QMODE_I32);
+ gen_op_and32(addr, tmp, QREG_MAC_MASK);
+ /* Load the value now to ensure correct exception behavior.
+ Perform writeback after reading the MAC inputs. */
+ loadval = gen_load(s, OS_LONG, addr, 0);
+
+ acc ^= 1;
+ rx = (ext & 0x8000) ? AREG(ext, 12) : DREG(insn, 12);
+ ry = (ext & 8) ? AREG(ext, 0) : DREG(ext, 0);
+ } else {
+ loadval = addr = -1;
+ rx = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
+ ry = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
+ }
+
+ gen_op_mac_clear_flags();
+ l1 = -1;
+ if ((s->env->macsr & MACSR_OMC) != 0 && !dual) {
+ /* Skip the multiply if we know we will ignore it. */
+ l1 = gen_new_label();
+ tmp = gen_new_qreg(QMODE_I32);
+ gen_op_and32(tmp, QREG_MACSR, gen_im32(1 << (acc + 8)));
+ gen_op_jmp_nz32(tmp, l1);
+ }
+
+ if ((ext & 0x0800) == 0) {
+ /* Word. */
+ rx = gen_mac_extract_word(s, rx, (ext & 0x80) != 0);
+ ry = gen_mac_extract_word(s, ry, (ext & 0x40) != 0);
+ }
+ if (s->env->macsr & MACSR_FI) {
+ gen_op_macmulf(rx, ry);
+ } else {
+ if (s->env->macsr & MACSR_SU)
+ gen_op_macmuls(rx, ry);
+ else
+ gen_op_macmulu(rx, ry);
+ switch ((ext >> 9) & 3) {
+ case 1:
+ gen_op_macshl();
+ break;
+ case 3:
+ gen_op_macshr();
+ break;
+ }
+ }
+
+ if (dual) {
+ /* Save the overflow flag from the multiply. */
+ saved_flags = gen_new_qreg(QMODE_I32);
+ gen_op_mov32(saved_flags, QREG_MACSR);
+ }
+
+ if ((s->env->macsr & MACSR_OMC) != 0 && dual) {
+ /* Skip the accumulate if the value is already saturated. */
+ l1 = gen_new_label();
+ tmp = gen_new_qreg(QMODE_I32);
+ gen_op_and32(tmp, QREG_MACSR, gen_im32(MACSR_PAV0 << acc));
+ gen_op_jmp_nz32(tmp, l1);
+ }
+
+ if (insn & 0x100)
+ gen_op_macsub(acc);
+ else
+ gen_op_macadd(acc);
+
+ if (s->env->macsr & MACSR_FI)
+ gen_op_macsatf(acc);
+ else if (s->env->macsr & MACSR_SU)
+ gen_op_macsats(acc);
+ else
+ gen_op_macsatu(acc);
+
+ if (l1 != -1)
+ gen_set_label(l1);
+
+ if (dual) {
+ /* Dual accumulate variant. */
+ acc = (ext >> 2) & 3;
+ /* Restore the overflow flag from the multiplier. */
+ gen_op_mov32(QREG_MACSR, saved_flags);
+ if ((s->env->macsr & MACSR_OMC) != 0) {
+ /* Skip the accumulate if the value is already saturated. */
+ l1 = gen_new_label();
+ tmp = gen_new_qreg(QMODE_I32);
+ gen_op_and32(tmp, QREG_MACSR, gen_im32(MACSR_PAV0 << acc));
+ gen_op_jmp_nz32(tmp, l1);
+ }
+ if (ext & 2)
+ gen_op_macsub(acc);
+ else
+ gen_op_macadd(acc);
+ if (s->env->macsr & MACSR_FI)
+ gen_op_macsatf(acc);
+ else if (s->env->macsr & MACSR_SU)
+ gen_op_macsats(acc);
+ else
+ gen_op_macsatu(acc);
+ if (l1 != -1)
+ gen_set_label(l1);
+ }
+ gen_op_mac_set_flags(acc);
+
+ if (insn & 0x30) {
+ int rw;
+ rw = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
+ gen_op_mov32(rw, loadval);
+ /* FIXME: Should address writeback happen with the masked or
+ unmasked value? */
+ switch ((insn >> 3) & 7) {
+ case 3: /* Post-increment. */
+ gen_op_add32(AREG(insn, 0), addr, gen_im32(4));
+ break;
+ case 4: /* Pre-decrement. */
+ gen_op_mov32(AREG(insn, 0), addr);
+ }
+ }
+}
+
+DISAS_INSN(from_mac)
+{
+ int rx;
+ int acc;
+
+ rx = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
+ acc = (insn >> 9) & 3;
+ if (s->env->macsr & MACSR_FI) {
+ gen_op_get_macf(rx, acc);
+ } else if ((s->env->macsr & MACSR_OMC) == 0) {
+ gen_op_get_maci(rx, acc);
+ } else if (s->env->macsr & MACSR_SU) {
+ gen_op_get_macs(rx, acc);
+ } else {
+ gen_op_get_macu(rx, acc);
+ }
+ if (insn & 0x40)
+ gen_op_clear_mac(acc);
+}
+
+DISAS_INSN(move_mac)
+{
+ int src;
+ int dest;
+ src = insn & 3;
+ dest = (insn >> 9) & 3;
+ gen_op_move_mac(dest, src);
+ gen_op_mac_clear_flags();
+ gen_op_mac_set_flags(dest);
+}
+
+DISAS_INSN(from_macsr)
+{
+ int reg;
+
+ reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
+ gen_op_mov32(reg, QREG_MACSR);
+}
+
+DISAS_INSN(from_mask)
+{
+ int reg;
+ reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
+ gen_op_mov32(reg, QREG_MAC_MASK);
+}
+
+DISAS_INSN(from_mext)
+{
+ int reg;
+ int acc;
+ reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
+ acc = (insn & 0x400) ? 2 : 0;
+ if (s->env->macsr & MACSR_FI)
+ gen_op_get_mac_extf(reg, acc);
+ else
+ gen_op_get_mac_exti(reg, acc);
+}
+
+DISAS_INSN(macsr_to_ccr)
+{
+ gen_op_mov32(QREG_CC_X, gen_im32(0));
+ gen_op_and32(QREG_CC_DEST, QREG_MACSR, gen_im32(0xf));
+ s->cc_op = CC_OP_FLAGS;
+}
+
+DISAS_INSN(to_mac)
+{
+ int acc;
+ int val;
+ acc = (insn >>9) & 3;
+ SRC_EA(val, OS_LONG, 0, NULL);
+ if (s->env->macsr & MACSR_FI) {
+ gen_op_set_macf(val, acc);
+ } else if (s->env->macsr & MACSR_SU) {
+ gen_op_set_macs(val, acc);
+ } else {
+ gen_op_set_macu(val, acc);
+ }
+ gen_op_mac_clear_flags();
+ gen_op_mac_set_flags(acc);
+}
+
+DISAS_INSN(to_macsr)
+{
+ int val;
+ SRC_EA(val, OS_LONG, 0, NULL);
+ gen_op_set_macsr(val);
+ gen_lookup_tb(s);
+}
+
+DISAS_INSN(to_mask)
+{
+ int val;
+ SRC_EA(val, OS_LONG, 0, NULL);
+ gen_op_or32(QREG_MAC_MASK, val, gen_im32(0xffff0000));
+}
+
+DISAS_INSN(to_mext)
+{
+ int val;
+ int acc;
+ SRC_EA(val, OS_LONG, 0, NULL);
+ acc = (insn & 0x400) ? 2 : 0;
+ if (s->env->macsr & MACSR_FI)
+ gen_op_set_mac_extf(val, acc);
+ else if (s->env->macsr & MACSR_SU)
+ gen_op_set_mac_exts(val, acc);
+ else
+ gen_op_set_mac_extu(val, acc);
+}
+
static disas_proc opcode_table[65536];
static void
Later insn override earlier ones. */
void register_m68k_insns (CPUM68KState *env)
{
-#define INSN(name, opcode, mask, feature) \
+#define INSN(name, opcode, mask, feature) do { \
if (m68k_feature(env, M68K_FEATURE_##feature)) \
- register_opcode(disas_##name, 0x##opcode, 0x##mask)
+ register_opcode(disas_##name, 0x##opcode, 0x##mask); \
+ } while(0)
INSN(undef, 0000, 0000, CF_ISA_A);
INSN(arith_im, 0080, fff8, CF_ISA_A);
- INSN(bitrev, 00c0, fff8, CF_ISA_C);
+ INSN(bitrev, 00c0, fff8, CF_ISA_APLUSC);
INSN(bitop_reg, 0100, f1c0, CF_ISA_A);
INSN(bitop_reg, 0140, f1c0, CF_ISA_A);
INSN(bitop_reg, 0180, f1c0, CF_ISA_A);
INSN(bitop_reg, 01c0, f1c0, CF_ISA_A);
INSN(arith_im, 0280, fff8, CF_ISA_A);
- INSN(byterev, 02c0, fff8, CF_ISA_A);
+ INSN(byterev, 02c0, fff8, CF_ISA_APLUSC);
INSN(arith_im, 0480, fff8, CF_ISA_A);
- INSN(ff1, 04c0, fff8, CF_ISA_C);
+ INSN(ff1, 04c0, fff8, CF_ISA_APLUSC);
INSN(arith_im, 0680, fff8, CF_ISA_A);
INSN(bitop_im, 0800, ffc0, CF_ISA_A);
INSN(bitop_im, 0840, ffc0, CF_ISA_A);
INSN(move, 1000, f000, CF_ISA_A);
INSN(move, 2000, f000, CF_ISA_A);
INSN(move, 3000, f000, CF_ISA_A);
- INSN(strldsr, 40e7, ffff, CF_ISA_A);
+ INSN(strldsr, 40e7, ffff, CF_ISA_APLUSC);
INSN(negx, 4080, fff8, CF_ISA_A);
INSN(move_from_sr, 40c0, fff8, CF_ISA_A);
INSN(lea, 41c0, f1c0, CF_ISA_A);
INSN(trap, 4e40, fff0, CF_ISA_A);
INSN(link, 4e50, fff8, CF_ISA_A);
INSN(unlk, 4e58, fff8, CF_ISA_A);
- INSN(move_to_usp, 4e60, fff8, CF_ISA_B);
- INSN(move_from_usp, 4e68, fff8, CF_ISA_B);
+ INSN(move_to_usp, 4e60, fff8, USP);
+ INSN(move_from_usp, 4e68, fff8, USP);
INSN(nop, 4e71, ffff, CF_ISA_A);
INSN(stop, 4e72, ffff, CF_ISA_A);
INSN(rte, 4e73, ffff, CF_ISA_A);
INSN(scc, 50c0, f0f8, CF_ISA_A);
INSN(addsubq, 5080, f1c0, CF_ISA_A);
INSN(tpf, 51f8, fff8, CF_ISA_A);
+
+ /* Branch instructions. */
INSN(branch, 6000, f000, CF_ISA_A);
+ /* Disable long branch instructions, then add back the ones we want. */
+ INSN(undef, 60ff, f0ff, CF_ISA_A); /* All long branches. */
+ INSN(branch, 60ff, f0ff, CF_ISA_B);
+ INSN(undef, 60ff, ffff, CF_ISA_B); /* bra.l */
+ INSN(branch, 60ff, ffff, BRAL);
+
INSN(moveq, 7000, f100, CF_ISA_A);
INSN(mvzs, 7100, f100, CF_ISA_B);
INSN(or, 8000, f000, CF_ISA_A);
INSN(addsub, 9000, f000, CF_ISA_A);
INSN(subx, 9180, f1f8, CF_ISA_A);
INSN(suba, 91c0, f1c0, CF_ISA_A);
+
INSN(undef_mac, a000, f000, CF_ISA_A);
+ INSN(mac, a000, f100, CF_EMAC);
+ INSN(from_mac, a180, f9b0, CF_EMAC);
+ INSN(move_mac, a110, f9fc, CF_EMAC);
+ INSN(from_macsr,a980, f9f0, CF_EMAC);
+ INSN(from_mask, ad80, fff0, CF_EMAC);
+ INSN(from_mext, ab80, fbf0, CF_EMAC);
+ INSN(macsr_to_ccr, a9c0, ffff, CF_EMAC);
+ INSN(to_mac, a100, f9c0, CF_EMAC);
+ INSN(to_macsr, a900, ffc0, CF_EMAC);
+ INSN(to_mext, ab00, fbc0, CF_EMAC);
+ INSN(to_mask, ad00, ffc0, CF_EMAC);
+
INSN(mov3q, a140, f1c0, CF_ISA_B);
INSN(cmp, b000, f1c0, CF_ISA_B); /* cmp.b */
INSN(cmp, b040, f1c0, CF_ISA_B); /* cmp.w */
int arg0 = qop->args[0];
int arg1 = qop->args[1];
int l1, l2;
-
+
gen_op_add32 (arg0, arg0, arg1);
l1 = gen_new_label();
l2 = gen_new_label();
/* generate intermediate code */
pc_start = tb->pc;
-
+
dc->tb = tb;
gen_opc_ptr = gen_opc_buf;
dc->singlestep_enabled = env->singlestep_enabled;
dc->fpcr = env->fpcr;
dc->user = (env->sr & SR_S) == 0;
+ dc->is_mem = 0;
nb_gen_labels = 0;
lj = -1;
do {
last_cc_op = dc->cc_op;
dc->insn_pc = dc->pc;
disas_m68k_insn(env, dc);
+
+ /* Terminate the TB on memory ops if watchpoints are present. */
+ /* FIXME: This should be replacd by the deterministic execution
+ * IRQ raising bits. */
+ if (dc->is_mem && env->nb_watchpoints)
+ break;
} while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
!env->singlestep_enabled &&
(pc_offset) < (TARGET_PAGE_SIZE - 32));
lj++;
while (lj <= j)
gen_opc_instr_start[lj++] = 0;
- tb->size = 0;
} else {
tb->size = dc->pc - pc_start;
}
#if !defined (CONFIG_USER_ONLY)
env->sr = 0x2700;
#endif
+ m68k_switch_sp(env);
/* ??? FP regs should be initialized to NaN. */
env->cc_op = CC_OP_FLAGS;
/* TODO: We should set PC from the interrupt vector. */
free(env);
}
-void cpu_dump_state(CPUState *env, FILE *f,
+void cpu_dump_state(CPUState *env, FILE *f,
int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
int flags)
{