tcg_temp_free_i32(tcg_excp);
}
-static void gen_exception(int excp, uint32_t syndrome)
+static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
{
TCGv_i32 tcg_excp = tcg_const_i32(excp);
TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
+ TCGv_i32 tcg_el = tcg_const_i32(target_el);
- gen_helper_exception_with_syndrome(cpu_env, tcg_excp, tcg_syn);
+ gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
+ tcg_syn, tcg_el);
+
+ tcg_temp_free_i32(tcg_el);
tcg_temp_free_i32(tcg_syn);
tcg_temp_free_i32(tcg_excp);
}
* of the exception, and our syndrome information is always correct.
*/
gen_ss_advance(s);
- gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex));
+ gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
+ default_exception_el(s));
s->is_jmp = DISAS_EXC;
}
* generate a conditional branch based on ARM condition code cc.
* This is common between ARM and Aarch64 targets.
*/
-void arm_gen_test_cc(int cc, int label)
+void arm_gen_test_cc(int cc, TCGLabel *label)
{
TCGv_i32 tmp;
- int inv;
+ TCGLabel *inv;
switch (cc) {
case 0: /* eq: Z */
s->is_jmp = DISAS_JUMP;
}
-static void gen_exception_insn(DisasContext *s, int offset, int excp, int syn)
+static void gen_exception_insn(DisasContext *s, int offset, int excp,
+ int syn, uint32_t target_el)
{
gen_set_condexec(s);
gen_set_pc_im(s, s->pc - offset);
- gen_exception(excp, syn);
+ gen_exception(excp, syn, target_el);
s->is_jmp = DISAS_JUMP;
}
} else {
tmp = tcg_temp_new_i32();
iwmmxt_load_reg(cpu_V0, rd);
- tcg_gen_trunc_i64_i32(tmp, cpu_V0);
+ tcg_gen_extrl_i64_i32(tmp, cpu_V0);
}
tcg_gen_andi_i32(tmp, tmp, mask);
tcg_gen_mov_i32(dest, tmp);
rdhi = (insn >> 16) & 0xf;
if (insn & ARM_CP_RW_BIT) { /* TMRRC */
iwmmxt_load_reg(cpu_V0, wrd);
- tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
+ tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
- tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
+ tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
} else { /* TMCRR */
tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
iwmmxt_store_reg(cpu_V0, wrd);
if (insn & (1 << 22)) { /* WSTRD */
gen_aa32_st64(cpu_M0, addr, get_mem_index(s));
} else { /* WSTRW wRd */
- tcg_gen_trunc_i64_i32(tmp, cpu_M0);
+ tcg_gen_extrl_i64_i32(tmp, cpu_M0);
gen_aa32_st32(tmp, addr, get_mem_index(s));
}
} else {
if (insn & (1 << 22)) { /* WSTRH */
- tcg_gen_trunc_i64_i32(tmp, cpu_M0);
+ tcg_gen_extrl_i64_i32(tmp, cpu_M0);
gen_aa32_st16(tmp, addr, get_mem_index(s));
} else { /* WSTRB */
- tcg_gen_trunc_i64_i32(tmp, cpu_M0);
+ tcg_gen_extrl_i64_i32(tmp, cpu_M0);
gen_aa32_st8(tmp, addr, get_mem_index(s));
}
}
switch ((insn >> 22) & 3) {
case 0:
tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
- tcg_gen_trunc_i64_i32(tmp, cpu_M0);
+ tcg_gen_extrl_i64_i32(tmp, cpu_M0);
if (insn & 8) {
tcg_gen_ext8s_i32(tmp, tmp);
} else {
break;
case 1:
tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
- tcg_gen_trunc_i64_i32(tmp, cpu_M0);
+ tcg_gen_extrl_i64_i32(tmp, cpu_M0);
if (insn & 8) {
tcg_gen_ext16s_i32(tmp, tmp);
} else {
break;
case 2:
tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
- tcg_gen_trunc_i64_i32(tmp, cpu_M0);
+ tcg_gen_extrl_i64_i32(tmp, cpu_M0);
break;
}
store_reg(s, rd, tmp);
if (insn & ARM_CP_RW_BIT) { /* MRA */
iwmmxt_load_reg(cpu_V0, acc);
- tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
+ tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
- tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
+ tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
} else { /* MAR */
tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
} else {
gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
}
- tcg_gen_trunc_i64_i32(tcg_tmp, tcg_res);
+ tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
tcg_temp_free_i32(tcg_tmp);
tcg_temp_free_i64(tcg_res);
* for invalid encodings; we will generate incorrect syndrome information
* for attempts to execute invalid vfp/neon encodings with FP disabled.
*/
- if (!s->cpacr_fpen) {
+ if (s->fp_excp_el) {
gen_exception_insn(s, 4, EXCP_UDEF,
- syn_fp_access_trap(1, 0xe, s->thumb));
+ syn_fp_access_trap(1, 0xe, s->thumb), s->fp_excp_el);
return 0;
}
static void gen_nop_hint(DisasContext *s, int val)
{
switch (val) {
+ case 1: /* yield */
+ gen_set_pc_im(s, s->pc);
+ s->is_jmp = DISAS_YIELD;
+ break;
case 3: /* wfi */
gen_set_pc_im(s, s->pc);
s->is_jmp = DISAS_WFI;
* for invalid encodings; we will generate incorrect syndrome information
* for attempts to execute invalid vfp/neon encodings with FP disabled.
*/
- if (!s->cpacr_fpen) {
+ if (s->fp_excp_el) {
gen_exception_insn(s, 4, EXCP_UDEF,
- syn_fp_access_trap(1, 0xe, s->thumb));
+ syn_fp_access_trap(1, 0xe, s->thumb), s->fp_excp_el);
return 0;
}
switch (size) {
case 0: gen_helper_neon_narrow_u8(dest, src); break;
case 1: gen_helper_neon_narrow_u16(dest, src); break;
- case 2: tcg_gen_trunc_i64_i32(dest, src); break;
+ case 2: tcg_gen_extrl_i64_i32(dest, src); break;
default: abort();
}
}
* for invalid encodings; we will generate incorrect syndrome information
* for attempts to execute invalid vfp/neon encodings with FP disabled.
*/
- if (!s->cpacr_fpen) {
+ if (s->fp_excp_el) {
gen_exception_insn(s, 4, EXCP_UDEF,
- syn_fp_access_trap(1, 0xe, s->thumb));
+ syn_fp_access_trap(1, 0xe, s->thumb), s->fp_excp_el);
return 0;
}
break;
case 2:
tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
- tcg_gen_trunc_i64_i32(tmp, cpu_V0);
+ tcg_gen_extrl_i64_i32(tmp, cpu_V0);
break;
default: abort();
}
case 2:
tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
- tcg_gen_trunc_i64_i32(tmp, cpu_V0);
+ tcg_gen_extrl_i64_i32(tmp, cpu_V0);
break;
default: abort();
}
break;
}
- gen_set_pc_im(s, s->pc);
+ gen_set_pc_im(s, s->pc - 4);
tmpptr = tcg_const_ptr(ri);
tcg_syn = tcg_const_i32(syndrome);
gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn);
tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
}
tmp = tcg_temp_new_i32();
- tcg_gen_trunc_i64_i32(tmp, tmp64);
+ tcg_gen_extrl_i64_i32(tmp, tmp64);
store_reg(s, rt, tmp);
tcg_gen_shri_i64(tmp64, tmp64, 32);
tmp = tcg_temp_new_i32();
- tcg_gen_trunc_i64_i32(tmp, tmp64);
+ tcg_gen_extrl_i64_i32(tmp, tmp64);
tcg_temp_free_i64(tmp64);
store_reg(s, rt2, tmp);
} else {
{
TCGv_i32 tmp;
tmp = tcg_temp_new_i32();
- tcg_gen_trunc_i64_i32(tmp, val);
+ tcg_gen_extrl_i64_i32(tmp, val);
store_reg(s, rlow, tmp);
tmp = tcg_temp_new_i32();
tcg_gen_shri_i64(val, val, 32);
- tcg_gen_trunc_i64_i32(tmp, val);
+ tcg_gen_extrl_i64_i32(tmp, val);
store_reg(s, rhigh, tmp);
}
{
TCGv_i32 tmp;
TCGv_i64 val64, extaddr;
- int done_label;
- int fail_label;
+ TCGLabel *done_label;
+ TCGLabel *fail_label;
/* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
[addr] = {Rt};
/* bkpt */
ARCH(5);
gen_exception_insn(s, 4, EXCP_BKPT,
- syn_aa32_bkpt(imm16, false));
+ syn_aa32_bkpt(imm16, false),
+ default_exception_el(s));
break;
case 2:
/* Hypervisor call (v7) */
tmp64 = gen_muls_i64_i32(tmp, tmp2);
tcg_gen_shri_i64(tmp64, tmp64, 16);
tmp = tcg_temp_new_i32();
- tcg_gen_trunc_i64_i32(tmp, tmp64);
+ tcg_gen_extrl_i64_i32(tmp, tmp64);
tcg_temp_free_i64(tmp64);
if ((sh & 2) == 0) {
tmp2 = load_reg(s, rn);
}
} else {
int address_offset;
- int load;
+ bool load = insn & (1 << 20);
+ bool doubleword = false;
/* Misc load/store */
rn = (insn >> 16) & 0xf;
rd = (insn >> 12) & 0xf;
+
+ if (!load && (sh & 2)) {
+ /* doubleword */
+ ARCH(5TE);
+ if (rd & 1) {
+ /* UNPREDICTABLE; we choose to UNDEF */
+ goto illegal_op;
+ }
+ load = (sh & 1) == 0;
+ doubleword = true;
+ }
+
addr = load_reg(s, rn);
if (insn & (1 << 24))
gen_add_datah_offset(s, insn, 0, addr);
address_offset = 0;
- if (insn & (1 << 20)) {
- /* load */
- tmp = tcg_temp_new_i32();
- switch(sh) {
- case 1:
- gen_aa32_ld16u(tmp, addr, get_mem_index(s));
- break;
- case 2:
- gen_aa32_ld8s(tmp, addr, get_mem_index(s));
- break;
- default:
- case 3:
- gen_aa32_ld16s(tmp, addr, get_mem_index(s));
- break;
- }
- load = 1;
- } else if (sh & 2) {
- ARCH(5TE);
- /* doubleword */
- if (sh & 1) {
+
+ if (doubleword) {
+ if (!load) {
/* store */
tmp = load_reg(s, rd);
gen_aa32_st32(tmp, addr, get_mem_index(s));
tmp = load_reg(s, rd + 1);
gen_aa32_st32(tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
- load = 0;
} else {
/* load */
tmp = tcg_temp_new_i32();
tmp = tcg_temp_new_i32();
gen_aa32_ld32u(tmp, addr, get_mem_index(s));
rd++;
- load = 1;
}
address_offset = -4;
+ } else if (load) {
+ /* load */
+ tmp = tcg_temp_new_i32();
+ switch (sh) {
+ case 1:
+ gen_aa32_ld16u(tmp, addr, get_mem_index(s));
+ break;
+ case 2:
+ gen_aa32_ld8s(tmp, addr, get_mem_index(s));
+ break;
+ default:
+ case 3:
+ gen_aa32_ld16s(tmp, addr, get_mem_index(s));
+ break;
+ }
} else {
/* store */
tmp = load_reg(s, rd);
gen_aa32_st16(tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
- load = 0;
}
/* Perform base writeback before the loaded value to
ensure correct behavior with overlapping index registers.
- ldrd with base writeback is is undefined if the
+ ldrd with base writeback is undefined if the
destination and index registers overlap. */
if (!(insn & (1 << 24))) {
gen_add_datah_offset(s, insn, address_offset, addr);
}
tcg_gen_shri_i64(tmp64, tmp64, 32);
tmp = tcg_temp_new_i32();
- tcg_gen_trunc_i64_i32(tmp, tmp64);
+ tcg_gen_extrl_i64_i32(tmp, tmp64);
tcg_temp_free_i64(tmp64);
store_reg(s, rn, tmp);
break;
case 0x08:
case 0x09:
{
- int j, n, user, loaded_base;
+ int j, n, loaded_base;
+ bool exc_return = false;
+ bool is_load = extract32(insn, 20, 1);
+ bool user = false;
TCGv_i32 loaded_var;
/* load/store multiple words */
/* XXX: store correct base if write back */
- user = 0;
if (insn & (1 << 22)) {
+ /* LDM (user), LDM (exception return) and STM (user) */
if (IS_USER(s))
goto illegal_op; /* only usable in supervisor mode */
- if ((insn & (1 << 15)) == 0)
- user = 1;
+ if (is_load && extract32(insn, 15, 1)) {
+ exc_return = true;
+ } else {
+ user = true;
+ }
}
rn = (insn >> 16) & 0xf;
addr = load_reg(s, rn);
j = 0;
for(i=0;i<16;i++) {
if (insn & (1 << i)) {
- if (insn & (1 << 20)) {
+ if (is_load) {
/* load */
tmp = tcg_temp_new_i32();
gen_aa32_ld32u(tmp, addr, get_mem_index(s));
if (loaded_base) {
store_reg(s, rn, loaded_var);
}
- if ((insn & (1 << 22)) && !user) {
+ if (exc_return) {
/* Restore CPSR from SPSR. */
tmp = load_cpu_field(spsr);
gen_set_cpsr(tmp, CPSR_ERET_MASK);
break;
default:
illegal_op:
- gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized());
+ gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
+ default_exception_el(s));
break;
}
}
op = (insn >> 21) & 0xf;
if (op == 6) {
+ if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
+ goto illegal_op;
+ }
/* Halfword pack. */
tmp = load_reg(s, rn);
tmp2 = load_reg(s, rm);
store_reg_bx(s, rd, tmp);
break;
case 1: /* Sign/zero extend. */
+ op = (insn >> 20) & 7;
+ switch (op) {
+ case 0: /* SXTAH, SXTH */
+ case 1: /* UXTAH, UXTH */
+ case 4: /* SXTAB, SXTB */
+ case 5: /* UXTAB, UXTB */
+ break;
+ case 2: /* SXTAB16, SXTB16 */
+ case 3: /* UXTAB16, UXTB16 */
+ if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
+ goto illegal_op;
+ }
+ break;
+ default:
+ goto illegal_op;
+ }
+ if (rn != 15) {
+ if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
+ goto illegal_op;
+ }
+ }
tmp = load_reg(s, rm);
shift = (insn >> 4) & 3;
/* ??? In many cases it's not necessary to do a
case 3: gen_uxtb16(tmp); break;
case 4: gen_sxtb(tmp); break;
case 5: gen_uxtb(tmp); break;
- default: goto illegal_op;
+ default:
+ g_assert_not_reached();
}
if (rn != 15) {
tmp2 = load_reg(s, rn);
store_reg(s, rd, tmp);
break;
case 2: /* SIMD add/subtract. */
+ if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
+ goto illegal_op;
+ }
op = (insn >> 20) & 7;
shift = (insn >> 4) & 7;
if ((op & 3) == 3 || (shift & 3) == 3)
op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
if (op < 4) {
/* Saturating add/subtract. */
+ if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
+ goto illegal_op;
+ }
tmp = load_reg(s, rn);
tmp2 = load_reg(s, rm);
if (op & 1)
gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
tcg_temp_free_i32(tmp2);
} else {
+ switch (op) {
+ case 0x0a: /* rbit */
+ case 0x08: /* rev */
+ case 0x09: /* rev16 */
+ case 0x0b: /* revsh */
+ case 0x18: /* clz */
+ break;
+ case 0x10: /* sel */
+ if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
+ goto illegal_op;
+ }
+ break;
+ case 0x20: /* crc32/crc32c */
+ case 0x21:
+ case 0x22:
+ case 0x28:
+ case 0x29:
+ case 0x2a:
+ if (!arm_dc_feature(s, ARM_FEATURE_CRC)) {
+ goto illegal_op;
+ }
+ break;
+ default:
+ goto illegal_op;
+ }
tmp = load_reg(s, rn);
switch (op) {
case 0x0a: /* rbit */
uint32_t sz = op & 0x3;
uint32_t c = op & 0x8;
- if (!arm_dc_feature(s, ARM_FEATURE_CRC)) {
- goto illegal_op;
- }
-
tmp2 = load_reg(s, rm);
if (sz == 0) {
tcg_gen_andi_i32(tmp2, tmp2, 0xff);
break;
}
default:
- goto illegal_op;
+ g_assert_not_reached();
}
}
store_reg(s, rd, tmp);
break;
case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
+ switch ((insn >> 20) & 7) {
+ case 0: /* 32 x 32 -> 32 */
+ case 7: /* Unsigned sum of absolute differences. */
+ break;
+ case 1: /* 16 x 16 -> 32 */
+ case 2: /* Dual multiply add. */
+ case 3: /* 32 * 16 -> 32msb */
+ case 4: /* Dual multiply subtract. */
+ case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
+ if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
+ goto illegal_op;
+ }
+ break;
+ }
op = (insn >> 4) & 0xf;
tmp = load_reg(s, rn);
tmp2 = load_reg(s, rm);
tmp64 = gen_muls_i64_i32(tmp, tmp2);
tcg_gen_shri_i64(tmp64, tmp64, 16);
tmp = tcg_temp_new_i32();
- tcg_gen_trunc_i64_i32(tmp, tmp64);
+ tcg_gen_extrl_i64_i32(tmp, tmp64);
tcg_temp_free_i64(tmp64);
if (rs != 15)
{
}
tcg_gen_shri_i64(tmp64, tmp64, 32);
tmp = tcg_temp_new_i32();
- tcg_gen_trunc_i64_i32(tmp, tmp64);
+ tcg_gen_extrl_i64_i32(tmp, tmp64);
tcg_temp_free_i64(tmp64);
break;
case 7: /* Unsigned sum of absolute differences. */
store_reg(s, rd, tmp);
} else if ((op & 0xe) == 0xc) {
/* Dual multiply accumulate long. */
+ if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(tmp2);
+ goto illegal_op;
+ }
if (op & 1)
gen_swap_half(tmp2);
gen_smul_dual(tmp, tmp2);
} else {
if (op & 8) {
/* smlalxy */
+ if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
+ tcg_temp_free_i32(tmp2);
+ tcg_temp_free_i32(tmp);
+ goto illegal_op;
+ }
gen_mulxy(tmp, tmp2, op & 2, op & 1);
tcg_temp_free_i32(tmp2);
tmp64 = tcg_temp_new_i64();
}
if (op & 4) {
/* umaal */
+ if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
+ tcg_temp_free_i64(tmp64);
+ goto illegal_op;
+ }
gen_addq_lo(s, tmp64, rs);
gen_addq_lo(s, tmp64, rd);
} else if (op & 0x40) {
tmp2 = tcg_const_i32(imm);
if (op & 4) {
/* Unsigned. */
- if ((op & 1) && shift == 0)
+ if ((op & 1) && shift == 0) {
+ if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(tmp2);
+ goto illegal_op;
+ }
gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
- else
+ } else {
gen_helper_usat(tmp, cpu_env, tmp, tmp2);
+ }
} else {
/* Signed. */
- if ((op & 1) && shift == 0)
+ if ((op & 1) && shift == 0) {
+ if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(tmp2);
+ goto illegal_op;
+ }
gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
- else
+ } else {
gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
+ }
}
tcg_temp_free_i32(tmp2);
break;
{
int imm8 = extract32(insn, 0, 8);
ARCH(5);
- gen_exception_insn(s, 2, EXCP_BKPT, syn_aa32_bkpt(imm8, true));
+ gen_exception_insn(s, 2, EXCP_BKPT, syn_aa32_bkpt(imm8, true),
+ default_exception_el(s));
break;
}
}
return;
undef32:
- gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized());
+ gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
+ default_exception_el(s));
return;
illegal_op:
undef:
- gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized());
+ gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(),
+ default_exception_el(s));
}
/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
CPUARMState *env = &cpu->env;
DisasContext dc1, *dc = &dc1;
CPUBreakpoint *bp;
- uint16_t *gen_opc_end;
int j, lj;
target_ulong pc_start;
target_ulong next_page_start;
dc->tb = tb;
- gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
-
dc->is_jmp = DISAS_NEXT;
dc->pc = pc_start;
dc->singlestep_enabled = cs->singlestep_enabled;
dc->condjmp = 0;
dc->aarch64 = 0;
+ /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
+ * there is no secure EL1, so we route exceptions to EL3.
+ */
+ dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
+ !arm_el_is_aa64(env, 3);
dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags);
dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
dc->user = (dc->current_el == 0);
#endif
dc->ns = ARM_TBFLAG_NS(tb->flags);
- dc->cpacr_fpen = ARM_TBFLAG_CPACR_FPEN(tb->flags);
+ dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(tb->flags);
dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
}
}
if (search_pc) {
- j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
+ j = tcg_op_buf_count();
if (lj < j) {
lj++;
while (lj < j)
* bits should be zero.
*/
assert(num_insns == 0);
- gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0));
+ gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
+ default_exception_el(dc));
goto done_generating;
}
* Also stop translation when a page boundary is reached. This
* ensures prefetch aborts occur at the right place. */
num_insns ++;
- } while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end &&
+ } while (!dc->is_jmp && !tcg_op_buf_full() &&
!cs->singlestep_enabled &&
!singlestep &&
!dc->ss_active &&
gen_set_condexec(dc);
if (dc->is_jmp == DISAS_SWI) {
gen_ss_advance(dc);
- gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb));
+ gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
+ default_exception_el(dc));
} else if (dc->is_jmp == DISAS_HVC) {
gen_ss_advance(dc);
- gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm));
+ gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
} else if (dc->is_jmp == DISAS_SMC) {
gen_ss_advance(dc);
- gen_exception(EXCP_SMC, syn_aa32_smc());
+ gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
} else if (dc->ss_active) {
gen_step_complete_exception(dc);
} else {
gen_set_condexec(dc);
if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
gen_ss_advance(dc);
- gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb));
+ gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
+ default_exception_el(dc));
} else if (dc->is_jmp == DISAS_HVC && !dc->condjmp) {
gen_ss_advance(dc);
- gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm));
+ gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
} else if (dc->is_jmp == DISAS_SMC && !dc->condjmp) {
gen_ss_advance(dc);
- gen_exception(EXCP_SMC, syn_aa32_smc());
+ gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
} else if (dc->ss_active) {
gen_step_complete_exception(dc);
} else {
break;
case DISAS_WFI:
gen_helper_wfi(cpu_env);
+ /* The helper doesn't necessarily throw an exception, but we
+ * must go back to the main loop to check for interrupts anyway.
+ */
+ tcg_gen_exit_tb(0);
break;
case DISAS_WFE:
gen_helper_wfe(cpu_env);
break;
+ case DISAS_YIELD:
+ gen_helper_yield(cpu_env);
+ break;
case DISAS_SWI:
- gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb));
+ gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
+ default_exception_el(dc));
break;
case DISAS_HVC:
- gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm));
+ gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
break;
case DISAS_SMC:
- gen_exception(EXCP_SMC, syn_aa32_smc());
+ gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
break;
}
if (dc->condjmp) {
done_generating:
gen_tb_end(tb, num_insns);
- *tcg_ctx.gen_opc_ptr = INDEX_op_end;
#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
qemu_log("----------------\n");
qemu_log("IN: %s\n", lookup_symbol(pc_start));
- log_target_disas(env, pc_start, dc->pc - pc_start,
+ log_target_disas(cs, pc_start, dc->pc - pc_start,
dc->thumb | (dc->bswap_code << 1));
qemu_log("\n");
}
#endif
if (search_pc) {
- j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
+ j = tcg_op_buf_count();
lj++;
while (lj <= j)
tcg_ctx.gen_opc_instr_start[lj++] = 0;