#include "cpu.h"
#include "internals.h"
#include "disas/disas.h"
+#include "exec/exec-all.h"
#include "tcg-op.h"
#include "qemu/log.h"
#include "qemu/bitops.h"
#include "arm_ldst.h"
+#include "exec/semihost.h"
#include "exec/helper-proto.h"
#include "exec/helper-gen.h"
TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
TCGv_i64 cpu_exclusive_addr;
TCGv_i64 cpu_exclusive_val;
-#ifdef CONFIG_USER_ONLY
-TCGv_i64 cpu_exclusive_test;
-TCGv_i32 cpu_exclusive_info;
-#endif
/* FIXME: These should be removed. */
static TCGv_i32 cpu_F0s, cpu_F1s;
int i;
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
+ tcg_ctx.tcg_env = cpu_env;
for (i = 0; i < 16; i++) {
cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
offsetof(CPUARMState, exclusive_val), "exclusive_val");
-#ifdef CONFIG_USER_ONLY
- cpu_exclusive_test = tcg_global_mem_new_i64(cpu_env,
- offsetof(CPUARMState, exclusive_test), "exclusive_test");
- cpu_exclusive_info = tcg_global_mem_new_i32(cpu_env,
- offsetof(CPUARMState, exclusive_info), "exclusive_info");
-#endif
a64_translate_init();
}
static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
{
if (reg == 15) {
- tcg_gen_andi_i32(var, var, ~1);
+ /* In Thumb mode, we must ignore bit 0.
+ * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
+ * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
+ * We choose to ignore [1:0] in ARM mode for all architecture versions.
+ */
+ tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3);
s->is_jmp = DISAS_JUMP;
}
tcg_gen_mov_i32(cpu_R[reg], var);
* These functions work like tcg_gen_qemu_{ld,st}* except
* that the address argument is TCGv_i32 rather than TCGv.
*/
-#if TARGET_LONG_BITS == 32
-
-#define DO_GEN_LD(SUFF, OPC, BE32_XOR) \
-static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
- TCGv_i32 addr, int index) \
-{ \
- TCGMemOp opc = (OPC) | s->be_data; \
- /* Not needed for user-mode BE32, where we use MO_BE instead. */ \
- if (!IS_USER_ONLY && s->sctlr_b && BE32_XOR) { \
- TCGv addr_be = tcg_temp_new(); \
- tcg_gen_xori_i32(addr_be, addr, BE32_XOR); \
- tcg_gen_qemu_ld_i32(val, addr_be, index, opc); \
- tcg_temp_free(addr_be); \
- return; \
- } \
- tcg_gen_qemu_ld_i32(val, addr, index, opc); \
-}
-
-#define DO_GEN_ST(SUFF, OPC, BE32_XOR) \
-static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
- TCGv_i32 addr, int index) \
-{ \
- TCGMemOp opc = (OPC) | s->be_data; \
- /* Not needed for user-mode BE32, where we use MO_BE instead. */ \
- if (!IS_USER_ONLY && s->sctlr_b && BE32_XOR) { \
- TCGv addr_be = tcg_temp_new(); \
- tcg_gen_xori_i32(addr_be, addr, BE32_XOR); \
- tcg_gen_qemu_st_i32(val, addr_be, index, opc); \
- tcg_temp_free(addr_be); \
- return; \
- } \
- tcg_gen_qemu_st_i32(val, addr, index, opc); \
-}
-static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
- TCGv_i32 addr, int index)
+static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, TCGMemOp op)
{
- TCGMemOp opc = MO_Q | s->be_data;
- tcg_gen_qemu_ld_i64(val, addr, index, opc);
+ TCGv addr = tcg_temp_new();
+ tcg_gen_extu_i32_tl(addr, a32);
+
/* Not needed for user-mode BE32, where we use MO_BE instead. */
- if (!IS_USER_ONLY && s->sctlr_b) {
- tcg_gen_rotri_i64(val, val, 32);
+ if (!IS_USER_ONLY && s->sctlr_b && (op & MO_SIZE) < MO_32) {
+ tcg_gen_xori_tl(addr, addr, 4 - (1 << (op & MO_SIZE)));
}
+ return addr;
}
-static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
- TCGv_i32 addr, int index)
+static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
+ int index, TCGMemOp opc)
{
- TCGMemOp opc = MO_Q | s->be_data;
- /* Not needed for user-mode BE32, where we use MO_BE instead. */
- if (!IS_USER_ONLY && s->sctlr_b) {
- TCGv_i64 tmp = tcg_temp_new_i64();
- tcg_gen_rotri_i64(tmp, val, 32);
- tcg_gen_qemu_st_i64(tmp, addr, index, opc);
- tcg_temp_free_i64(tmp);
- return;
- }
- tcg_gen_qemu_st_i64(val, addr, index, opc);
+ TCGv addr = gen_aa32_addr(s, a32, opc);
+ tcg_gen_qemu_ld_i32(val, addr, index, opc);
+ tcg_temp_free(addr);
}
-#else
+static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
+ int index, TCGMemOp opc)
+{
+ TCGv addr = gen_aa32_addr(s, a32, opc);
+ tcg_gen_qemu_st_i32(val, addr, index, opc);
+ tcg_temp_free(addr);
+}
-#define DO_GEN_LD(SUFF, OPC, BE32_XOR) \
+#define DO_GEN_LD(SUFF, OPC) \
static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
- TCGv_i32 addr, int index) \
+ TCGv_i32 a32, int index) \
{ \
- TCGMemOp opc = (OPC) | s->be_data; \
- TCGv addr64 = tcg_temp_new(); \
- tcg_gen_extu_i32_i64(addr64, addr); \
- /* Not needed for user-mode BE32, where we use MO_BE instead. */ \
- if (!IS_USER_ONLY && s->sctlr_b && BE32_XOR) { \
- tcg_gen_xori_i64(addr64, addr64, BE32_XOR); \
- } \
- tcg_gen_qemu_ld_i32(val, addr64, index, opc); \
- tcg_temp_free(addr64); \
-}
-
-#define DO_GEN_ST(SUFF, OPC, BE32_XOR) \
+ gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
+}
+
+#define DO_GEN_ST(SUFF, OPC) \
static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
- TCGv_i32 addr, int index) \
+ TCGv_i32 a32, int index) \
{ \
- TCGMemOp opc = (OPC) | s->be_data; \
- TCGv addr64 = tcg_temp_new(); \
- tcg_gen_extu_i32_i64(addr64, addr); \
- /* Not needed for user-mode BE32, where we use MO_BE instead. */ \
- if (!IS_USER_ONLY && s->sctlr_b && BE32_XOR) { \
- tcg_gen_xori_i64(addr64, addr64, BE32_XOR); \
- } \
- tcg_gen_qemu_st_i32(val, addr64, index, opc); \
- tcg_temp_free(addr64); \
+ gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
}
-static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
- TCGv_i32 addr, int index)
+static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val)
{
- TCGMemOp opc = MO_Q | s->be_data;
- TCGv addr64 = tcg_temp_new();
- tcg_gen_extu_i32_i64(addr64, addr);
- tcg_gen_qemu_ld_i64(val, addr64, index, opc);
-
/* Not needed for user-mode BE32, where we use MO_BE instead. */
if (!IS_USER_ONLY && s->sctlr_b) {
tcg_gen_rotri_i64(val, val, 32);
}
- tcg_temp_free(addr64);
}
-static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
- TCGv_i32 addr, int index)
+static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
+ int index, TCGMemOp opc)
{
- TCGMemOp opc = MO_Q | s->be_data;
- TCGv addr64 = tcg_temp_new();
- tcg_gen_extu_i32_i64(addr64, addr);
+ TCGv addr = gen_aa32_addr(s, a32, opc);
+ tcg_gen_qemu_ld_i64(val, addr, index, opc);
+ gen_aa32_frob64(s, val);
+ tcg_temp_free(addr);
+}
+
+static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
+ TCGv_i32 a32, int index)
+{
+ gen_aa32_ld_i64(s, val, a32, index, MO_Q | s->be_data);
+}
+
+static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
+ int index, TCGMemOp opc)
+{
+ TCGv addr = gen_aa32_addr(s, a32, opc);
/* Not needed for user-mode BE32, where we use MO_BE instead. */
if (!IS_USER_ONLY && s->sctlr_b) {
- TCGv tmp = tcg_temp_new();
+ TCGv_i64 tmp = tcg_temp_new_i64();
tcg_gen_rotri_i64(tmp, val, 32);
- tcg_gen_qemu_st_i64(tmp, addr64, index, opc);
- tcg_temp_free(tmp);
+ tcg_gen_qemu_st_i64(tmp, addr, index, opc);
+ tcg_temp_free_i64(tmp);
} else {
- tcg_gen_qemu_st_i64(val, addr64, index, opc);
+ tcg_gen_qemu_st_i64(val, addr, index, opc);
}
- tcg_temp_free(addr64);
+ tcg_temp_free(addr);
}
-#endif
+static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
+ TCGv_i32 a32, int index)
+{
+ gen_aa32_st_i64(s, val, a32, index, MO_Q | s->be_data);
+}
-DO_GEN_LD(8s, MO_SB, 3)
-DO_GEN_LD(8u, MO_UB, 3)
-DO_GEN_LD(16s, MO_SW, 2)
-DO_GEN_LD(16u, MO_UW, 2)
-DO_GEN_LD(32u, MO_UL, 0)
-/* 'a' variants include an alignment check */
-DO_GEN_LD(16ua, MO_UW | MO_ALIGN, 2)
-DO_GEN_LD(32ua, MO_UL | MO_ALIGN, 0)
-DO_GEN_ST(8, MO_UB, 3)
-DO_GEN_ST(16, MO_UW, 2)
-DO_GEN_ST(32, MO_UL, 0)
+DO_GEN_LD(8s, MO_SB)
+DO_GEN_LD(8u, MO_UB)
+DO_GEN_LD(16s, MO_SW)
+DO_GEN_LD(16u, MO_UW)
+DO_GEN_LD(32u, MO_UL)
+DO_GEN_ST(8, MO_UB)
+DO_GEN_ST(16, MO_UW)
+DO_GEN_ST(32, MO_UL)
static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
{
s->is_jmp = DISAS_JUMP;
}
+static inline void gen_hlt(DisasContext *s, int imm)
+{
+ /* HLT. This has two purposes.
+ * Architecturally, it is an external halting debug instruction.
+ * Since QEMU doesn't implement external debug, we treat this as
+ * it is required for halting debug disabled: it will UNDEF.
+ * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
+ * and "HLT 0xF000" is an A32 semihosting syscall. These traps
+ * must trigger semihosting even for ARMv7 and earlier, where
+ * HLT was an undefined encoding.
+ * In system mode, we don't allow userspace access to
+ * semihosting, to provide some semblance of security
+ * (and for consistency with our 32-bit semihosting).
+ */
+ if (semihosting_enabled() &&
+#ifndef CONFIG_USER_ONLY
+ s->current_el != 0 &&
+#endif
+ (imm == (s->thumb ? 0x3c : 0xf000))) {
+ gen_exception_internal_insn(s, 0, EXCP_SEMIHOST);
+ return;
+ }
+
+ gen_exception_insn(s, s->thumb ? 2 : 4, EXCP_UDEF, syn_uncategorized(),
+ default_exception_el(s));
+}
+
static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
TCGv_i32 var)
{
return 0;
}
-static inline void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
+static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
{
- TranslationBlock *tb;
+#ifndef CONFIG_USER_ONLY
+ return (s->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
+ ((s->pc - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
+#else
+ return true;
+#endif
+}
- tb = s->tb;
- if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
+static inline void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
+{
+ if (use_goto_tb(s, dest)) {
tcg_gen_goto_tb(n);
gen_set_pc_im(s, dest);
- tcg_gen_exit_tb((uintptr_t)tb + n);
+ tcg_gen_exit_tb((uintptr_t)s->tb + n);
} else {
gen_set_pc_im(s, dest);
tcg_gen_exit_tb(0);
s->is_jmp = DISAS_UPDATE;
}
-/* Generate an old-style exception return. Marks pc as dead. */
-static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
+/* Store value to PC as for an exception return (ie don't
+ * mask bits). The subsequent call to gen_helper_cpsr_write_eret()
+ * will do the masking based on the new value of the Thumb bit.
+ */
+static void store_pc_exc_ret(DisasContext *s, TCGv_i32 pc)
{
- TCGv_i32 tmp;
- store_reg(s, 15, pc);
- tmp = load_cpu_field(spsr);
- gen_helper_cpsr_write_eret(cpu_env, tmp);
- tcg_temp_free_i32(tmp);
- s->is_jmp = DISAS_JUMP;
+ tcg_gen_mov_i32(cpu_R[15], pc);
+ tcg_temp_free_i32(pc);
}
/* Generate a v6 exception return. Marks both values as dead. */
static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
{
+ store_pc_exc_ret(s, pc);
+ /* The cpsr_write_eret helper will mask the low bits of PC
+ * appropriately depending on the new Thumb bit, so it must
+ * be called after storing the new PC.
+ */
gen_helper_cpsr_write_eret(cpu_env, cpsr);
tcg_temp_free_i32(cpsr);
- store_reg(s, 15, pc);
s->is_jmp = DISAS_JUMP;
}
+/* Generate an old-style exception return. Marks pc as dead. */
+static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
+{
+ gen_rfe(s, pc, load_cpu_field(spsr));
+}
+
static void gen_nop_hint(DisasContext *s, int val)
{
switch (val) {
op >= NEON_2RM_VRECPE_F);
}
+static bool neon_2rm_is_v8_op(int op)
+{
+ /* Return true if this neon 2reg-misc op is ARMv8 and up */
+ switch (op) {
+ case NEON_2RM_VRINTN:
+ case NEON_2RM_VRINTA:
+ case NEON_2RM_VRINTM:
+ case NEON_2RM_VRINTP:
+ case NEON_2RM_VRINTZ:
+ case NEON_2RM_VRINTX:
+ case NEON_2RM_VCVTAU:
+ case NEON_2RM_VCVTAS:
+ case NEON_2RM_VCVTNU:
+ case NEON_2RM_VCVTNS:
+ case NEON_2RM_VCVTPU:
+ case NEON_2RM_VCVTPS:
+ case NEON_2RM_VCVTMU:
+ case NEON_2RM_VCVTMS:
+ return true;
+ default:
+ return false;
+ }
+}
+
/* Each entry in this array has bit n set if the insn allows
* size value n (otherwise it will UNDEF). Since unallocated
* op values will have no bits set they always UNDEF.
if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
return 1;
}
+ if (neon_2rm_is_v8_op(op) &&
+ !arm_dc_feature(s, ARM_FEATURE_V8)) {
+ return 1;
+ }
if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
q && ((rm | rd) & 1)) {
return 1;
/* Load/Store exclusive instructions are implemented by remembering
the value/address loaded, and seeing if these are the same
- when the store is performed. This should be sufficient to implement
+ when the store is performed. This should be sufficient to implement
the architecturally mandated semantics, and avoids having to monitor
- regular stores.
-
- In system emulation mode only one CPU will be running at once, so
- this sequence is effectively atomic. In user emulation mode we
- throw an exception and handle the atomic operation elsewhere. */
+ regular stores. The compare vs the remembered value is done during
+ the cmpxchg operation, but we must compare the addresses manually. */
static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
TCGv_i32 addr, int size)
{
TCGv_i32 tmp = tcg_temp_new_i32();
+ TCGMemOp opc = size | MO_ALIGN | s->be_data;
s->is_ldex = true;
- switch (size) {
- case 0:
- gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
- break;
- case 1:
- gen_aa32_ld16ua(s, tmp, addr, get_mem_index(s));
- break;
- case 2:
- case 3:
- gen_aa32_ld32ua(s, tmp, addr, get_mem_index(s));
- break;
- default:
- abort();
- }
-
if (size == 3) {
TCGv_i32 tmp2 = tcg_temp_new_i32();
- TCGv_i32 tmp3 = tcg_temp_new_i32();
+ TCGv_i64 t64 = tcg_temp_new_i64();
- tcg_gen_addi_i32(tmp2, addr, 4);
- gen_aa32_ld32u(s, tmp3, tmp2, get_mem_index(s));
- tcg_temp_free_i32(tmp2);
- tcg_gen_concat_i32_i64(cpu_exclusive_val, tmp, tmp3);
- store_reg(s, rt2, tmp3);
+ gen_aa32_ld_i64(s, t64, addr, get_mem_index(s), opc);
+ tcg_gen_mov_i64(cpu_exclusive_val, t64);
+ tcg_gen_extr_i64_i32(tmp, tmp2, t64);
+ tcg_temp_free_i64(t64);
+
+ store_reg(s, rt2, tmp2);
} else {
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), opc);
tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
}
tcg_gen_movi_i64(cpu_exclusive_addr, -1);
}
-#ifdef CONFIG_USER_ONLY
-static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
- TCGv_i32 addr, int size)
-{
- tcg_gen_extu_i32_i64(cpu_exclusive_test, addr);
- tcg_gen_movi_i32(cpu_exclusive_info,
- size | (rd << 4) | (rt << 8) | (rt2 << 12));
- gen_exception_internal_insn(s, 4, EXCP_STREX);
-}
-#else
static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
TCGv_i32 addr, int size)
{
- TCGv_i32 tmp;
- TCGv_i64 val64, extaddr;
+ TCGv_i32 t0, t1, t2;
+ TCGv_i64 extaddr;
+ TCGv taddr;
TCGLabel *done_label;
TCGLabel *fail_label;
+ TCGMemOp opc = size | MO_ALIGN | s->be_data;
/* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
[addr] = {Rt};
tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
tcg_temp_free_i64(extaddr);
- tmp = tcg_temp_new_i32();
- switch (size) {
- case 0:
- gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
- break;
- case 1:
- gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
- break;
- case 2:
- case 3:
- gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
- break;
- default:
- abort();
- }
-
- val64 = tcg_temp_new_i64();
+ taddr = gen_aa32_addr(s, addr, opc);
+ t0 = tcg_temp_new_i32();
+ t1 = load_reg(s, rt);
if (size == 3) {
- TCGv_i32 tmp2 = tcg_temp_new_i32();
- TCGv_i32 tmp3 = tcg_temp_new_i32();
- tcg_gen_addi_i32(tmp2, addr, 4);
- gen_aa32_ld32u(s, tmp3, tmp2, get_mem_index(s));
- tcg_temp_free_i32(tmp2);
- tcg_gen_concat_i32_i64(val64, tmp, tmp3);
- tcg_temp_free_i32(tmp3);
- } else {
- tcg_gen_extu_i32_i64(val64, tmp);
- }
- tcg_temp_free_i32(tmp);
+ TCGv_i64 o64 = tcg_temp_new_i64();
+ TCGv_i64 n64 = tcg_temp_new_i64();
- tcg_gen_brcond_i64(TCG_COND_NE, val64, cpu_exclusive_val, fail_label);
- tcg_temp_free_i64(val64);
+ t2 = load_reg(s, rt2);
+ tcg_gen_concat_i32_i64(n64, t1, t2);
+ tcg_temp_free_i32(t2);
+ gen_aa32_frob64(s, n64);
- tmp = load_reg(s, rt);
- switch (size) {
- case 0:
- gen_aa32_st8(s, tmp, addr, get_mem_index(s));
- break;
- case 1:
- gen_aa32_st16(s, tmp, addr, get_mem_index(s));
- break;
- case 2:
- case 3:
- gen_aa32_st32(s, tmp, addr, get_mem_index(s));
- break;
- default:
- abort();
- }
- tcg_temp_free_i32(tmp);
- if (size == 3) {
- tcg_gen_addi_i32(addr, addr, 4);
- tmp = load_reg(s, rt2);
- gen_aa32_st32(s, tmp, addr, get_mem_index(s));
- tcg_temp_free_i32(tmp);
+ tcg_gen_atomic_cmpxchg_i64(o64, taddr, cpu_exclusive_val, n64,
+ get_mem_index(s), opc);
+ tcg_temp_free_i64(n64);
+
+ gen_aa32_frob64(s, o64);
+ tcg_gen_setcond_i64(TCG_COND_NE, o64, o64, cpu_exclusive_val);
+ tcg_gen_extrl_i64_i32(t0, o64);
+
+ tcg_temp_free_i64(o64);
+ } else {
+ t2 = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(t2, cpu_exclusive_val);
+ tcg_gen_atomic_cmpxchg_i32(t0, taddr, t2, t1, get_mem_index(s), opc);
+ tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t2);
+ tcg_temp_free_i32(t2);
}
- tcg_gen_movi_i32(cpu_R[rd], 0);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free(taddr);
+ tcg_gen_mov_i32(cpu_R[rd], t0);
+ tcg_temp_free_i32(t0);
tcg_gen_br(done_label);
+
gen_set_label(fail_label);
tcg_gen_movi_i32(cpu_R[rd], 1);
gen_set_label(done_label);
tcg_gen_movi_i64(cpu_exclusive_addr, -1);
}
-#endif
/* gen_srs:
* @env: CPUARMState
case 4: /* dsb */
case 5: /* dmb */
ARCH(7);
- /* We don't emulate caches so these are a no-op. */
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
return;
case 6: /* isb */
/* We need to break the TB after this insn to execute
{
int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
switch (op1) {
+ case 0:
+ /* HLT */
+ gen_hlt(s, imm16);
+ break;
case 1:
/* bkpt */
ARCH(5);
gen_smc(s);
break;
default:
- goto illegal_op;
+ g_assert_not_reached();
}
break;
}
}
tcg_temp_free_i32(addr);
} else {
+ TCGv taddr;
+ TCGMemOp opc = s->be_data;
+
/* SWP instruction */
rm = (insn) & 0xf;
- /* ??? This is not really atomic. However we know
- we never have multiple CPUs running in parallel,
- so it is good enough. */
- addr = load_reg(s, rn);
- tmp = load_reg(s, rm);
- tmp2 = tcg_temp_new_i32();
if (insn & (1 << 22)) {
- gen_aa32_ld8u(s, tmp2, addr, get_mem_index(s));
- gen_aa32_st8(s, tmp, addr, get_mem_index(s));
+ opc |= MO_UB;
} else {
- gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
- gen_aa32_st32(s, tmp, addr, get_mem_index(s));
+ opc |= MO_UL | MO_ALIGN;
}
- tcg_temp_free_i32(tmp);
+
+ addr = load_reg(s, rn);
+ taddr = gen_aa32_addr(s, addr, opc);
tcg_temp_free_i32(addr);
- store_reg(s, rd, tmp2);
+
+ tmp = load_reg(s, rm);
+ tcg_gen_atomic_xchg_i32(tmp, taddr, tmp,
+ get_mem_index(s), opc);
+ tcg_temp_free(taddr);
+ store_reg(s, rd, tmp);
}
}
} else {
} else if (i == rn) {
loaded_var = tmp;
loaded_base = 1;
+ } else if (rn == 15 && exc_return) {
+ store_pc_exc_ret(s, tmp);
} else {
store_reg_from_load(s, i, tmp);
}
break;
case 4: /* dsb */
case 5: /* dmb */
- /* These execute as NOPs. */
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
break;
case 6: /* isb */
/* We need to break the TB after this insn
break;
}
- case 0xa: /* rev */
+ case 0xa: /* rev, and hlt */
+ {
+ int op1 = extract32(insn, 6, 2);
+
+ if (op1 == 2) {
+ /* HLT */
+ int imm6 = extract32(insn, 0, 6);
+
+ gen_hlt(s, imm6);
+ break;
+ }
+
+ /* Otherwise this is rev */
ARCH(6);
rn = (insn >> 3) & 0x7;
rd = insn & 0x7;
tmp = load_reg(s, rn);
- switch ((insn >> 6) & 3) {
+ switch (op1) {
case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
case 1: gen_rev16(tmp); break;
case 3: gen_revsh(tmp); break;
- default: goto illegal_op;
+ default:
+ g_assert_not_reached();
}
store_reg(s, rd, tmp);
break;
+ }
case 6:
switch ((insn >> 5) & 7) {
}
do {
tcg_gen_insn_start(dc->pc,
- (dc->condexec_cond << 4) | (dc->condexec_mask >> 1));
+ (dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
+ 0);
num_insns++;
#ifdef CONFIG_USER_ONLY
#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM) &&
qemu_log_in_addr_range(pc_start)) {
+ qemu_log_lock();
qemu_log("----------------\n");
qemu_log("IN: %s\n", lookup_symbol(pc_start));
log_target_disas(cs, pc_start, dc->pc - pc_start,
dc->thumb | (dc->sctlr_b << 1));
qemu_log("\n");
+ qemu_log_unlock();
}
#endif
tb->size = dc->pc - pc_start;
if (is_a64(env)) {
env->pc = data[0];
env->condexec_bits = 0;
+ env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
} else {
env->regs[15] = data[0];
env->condexec_bits = data[1];
+ env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
}
}