X-Git-Url: https://git.proxmox.com/?a=blobdiff_plain;f=target%2Farm%2Ftranslate.c;h=7873c03ae8f074b894b8688eb4f7c49e199bc7be;hb=b1311c4acf503dc9c1a310cc40b64f05b08833dc;hp=5c6f9fea1ba3e3a506388c5190791fea90f4ecde;hpb=d02a8698d7ae2bfed3b11fe5b064cb0aa406863b;p=mirror_qemu.git diff --git a/target/arm/translate.c b/target/arm/translate.c index 5c6f9fea1b..7873c03ae8 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -82,7 +82,7 @@ void arm_translate_init(void) int i; cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env"); - tcg_ctx.tcg_env = cpu_env; + tcg_ctx->tcg_env = cpu_env; for (i = 0; i < 16; i++) { cpu_R[i] = tcg_global_mem_new_i32(cpu_env, @@ -4546,8 +4546,13 @@ static void gen_exception_return(DisasContext *s, TCGv_i32 pc) static void gen_nop_hint(DisasContext *s, int val) { switch (val) { + /* When running in MTTCG we don't generate jumps to the yield and + * WFE helpers as it won't affect the scheduling of other vCPUs. + * If we wanted to more completely model WFE/SEV so we don't busy + * spin unnecessarily we would need to do something more involved. + */ case 1: /* yield */ - if (!parallel_cpus) { + if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) { gen_set_pc_im(s, s->pc); s->base.is_jmp = DISAS_YIELD; } @@ -4557,7 +4562,7 @@ static void gen_nop_hint(DisasContext *s, int val) s->base.is_jmp = DISAS_WFI; break; case 2: /* wfe */ - if (!parallel_cpus) { + if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) { gen_set_pc_im(s, s->pc); s->base.is_jmp = DISAS_WFE; } @@ -7704,7 +7709,7 @@ static int disas_coproc_insn(DisasContext *s, uint32_t insn) break; } - if ((s->base.tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) { + if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) { gen_io_start(); } @@ -7795,7 +7800,7 @@ static int disas_coproc_insn(DisasContext *s, uint32_t insn) } } - if ((s->base.tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) { + if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) { /* I/O operations must end the TB here (whether read or write) */ gen_io_end(); gen_lookup_tb(s); @@ -9620,6 +9625,44 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn) } } +static bool thumb_insn_is_16bit(DisasContext *s, uint32_t insn) +{ + /* Return true if this is a 16 bit instruction. We must be precise + * about this (matching the decode). We assume that s->pc still + * points to the first 16 bits of the insn. + */ + if ((insn >> 11) < 0x1d) { + /* Definitely a 16-bit instruction */ + return true; + } + + /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the + * first half of a 32-bit Thumb insn. Thumb-1 cores might + * end up actually treating this as two 16-bit insns, though, + * if it's half of a bl/blx pair that might span a page boundary. + */ + if (arm_dc_feature(s, ARM_FEATURE_THUMB2)) { + /* Thumb2 cores (including all M profile ones) always treat + * 32-bit insns as 32-bit. + */ + return false; + } + + if ((insn >> 11) == 0x1e && (s->pc < s->next_page_start - 3)) { + /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix, and the suffix + * is not on the next page; we merge this into a 32-bit + * insn. + */ + return false; + } + /* 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF); + * 0b1111_1xxx_xxxx_xxxx : BL suffix; + * 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix on the end of a page + * -- handle as single 16 bit insn + */ + return true; +} + /* Return true if this is a Thumb-2 logical op. */ static int thumb2_logic_op(int op) @@ -9705,9 +9748,9 @@ gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction is not legal. */ -static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1) +static int disas_thumb2_insn(DisasContext *s, uint32_t insn) { - uint32_t insn, imm, shift, offset; + uint32_t imm, shift, offset; uint32_t rd, rn, rm, rs; TCGv_i32 tmp; TCGv_i32 tmp2; @@ -9719,52 +9762,9 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw int conds; int logic_cc; - if (!(arm_dc_feature(s, ARM_FEATURE_THUMB2) - || arm_dc_feature(s, ARM_FEATURE_M))) { - /* Thumb-1 cores may need to treat bl and blx as a pair of - 16-bit instructions to get correct prefetch abort behavior. */ - insn = insn_hw1; - if ((insn & (1 << 12)) == 0) { - ARCH(5); - /* Second half of blx. */ - offset = ((insn & 0x7ff) << 1); - tmp = load_reg(s, 14); - tcg_gen_addi_i32(tmp, tmp, offset); - tcg_gen_andi_i32(tmp, tmp, 0xfffffffc); - - tmp2 = tcg_temp_new_i32(); - tcg_gen_movi_i32(tmp2, s->pc | 1); - store_reg(s, 14, tmp2); - gen_bx(s, tmp); - return 0; - } - if (insn & (1 << 11)) { - /* Second half of bl. */ - offset = ((insn & 0x7ff) << 1) | 1; - tmp = load_reg(s, 14); - tcg_gen_addi_i32(tmp, tmp, offset); - - tmp2 = tcg_temp_new_i32(); - tcg_gen_movi_i32(tmp2, s->pc | 1); - store_reg(s, 14, tmp2); - gen_bx(s, tmp); - return 0; - } - if ((s->pc & ~TARGET_PAGE_MASK) == 0) { - /* Instruction spans a page boundary. Implement it as two - 16-bit instructions in case the second half causes an - prefetch abort. */ - offset = ((int32_t)insn << 21) >> 9; - tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset); - return 0; - } - /* Fall through to 32-bit decode. */ - } - - insn = arm_lduw_code(env, s->pc, s->sctlr_b); - s->pc += 2; - insn |= (uint32_t)insn_hw1 << 16; - + /* The only 32 bit insn that's allowed for Thumb1 is the combined + * BL/BLX prefix and suffix. + */ if ((insn & 0xf800e800) != 0xf000e800) { ARCH(6T2); } @@ -9783,7 +9783,28 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw * - load/store doubleword, load/store exclusive, ldacq/strel, * table branch. */ - if (insn & 0x01200000) { + if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_M) && + arm_dc_feature(s, ARM_FEATURE_V8)) { + /* 0b1110_1001_0111_1111_1110_1001_0111_111 + * - SG (v8M only) + * The bulk of the behaviour for this instruction is implemented + * in v7m_handle_execute_nsc(), which deals with the insn when + * it is executed by a CPU in non-secure state from memory + * which is Secure & NonSecure-Callable. + * Here we only need to handle the remaining cases: + * * in NS memory (including the "security extension not + * implemented" case) : NOP + * * in S memory but CPU already secure (clear IT bits) + * We know that the attribute for the memory this insn is + * in must match the current CPU state, because otherwise + * get_phys_addr_pmsav8 would have generated an exception. + */ + if (s->v8m_secure) { + /* Like the IT insn, we don't need to generate any code */ + s->condexec_cond = 0; + s->condexec_mask = 0; + } + } else if (insn & 0x01200000) { /* 0b1110_1000_x11x_xxxx_xxxx_xxxx_xxxx_xxxx * - load/store dual (post-indexed) * 0b1111_1001_x10x_xxxx_xxxx_xxxx_xxxx_xxxx @@ -11079,27 +11100,15 @@ illegal_op: return 1; } -static void disas_thumb_insn(CPUARMState *env, DisasContext *s) +static void disas_thumb_insn(DisasContext *s, uint32_t insn) { - uint32_t val, insn, op, rm, rn, rd, shift, cond; + uint32_t val, op, rm, rn, rd, shift, cond; int32_t offset; int i; TCGv_i32 tmp; TCGv_i32 tmp2; TCGv_i32 addr; - if (s->condexec_mask) { - cond = s->condexec_cond; - if (cond != 0x0e) { /* Skip conditional when condition is AL. */ - s->condlabel = gen_new_label(); - arm_gen_test_cc(cond ^ 1, s->condlabel); - s->condjmp = 1; - } - } - - insn = arm_lduw_code(env, s->pc, s->sctlr_b); - s->pc += 2; - switch (insn >> 12) { case 0: case 1: @@ -11830,8 +11839,21 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s) case 14: if (insn & (1 << 11)) { - if (disas_thumb2_insn(env, s, insn)) - goto undef32; + /* thumb_insn_is_16bit() ensures we can't get here for + * a Thumb2 CPU, so this must be a thumb1 split BL/BLX: + * 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF) + */ + assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2)); + ARCH(5); + offset = ((insn & 0x7ff) << 1); + tmp = load_reg(s, 14); + tcg_gen_addi_i32(tmp, tmp, offset); + tcg_gen_andi_i32(tmp, tmp, 0xfffffffc); + + tmp2 = tcg_temp_new_i32(); + tcg_gen_movi_i32(tmp2, s->pc | 1); + store_reg(s, 14, tmp2); + gen_bx(s, tmp); break; } /* unconditional branch */ @@ -11842,15 +11864,30 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s) break; case 15: - if (disas_thumb2_insn(env, s, insn)) - goto undef32; + /* thumb_insn_is_16bit() ensures we can't get here for + * a Thumb2 CPU, so this must be a thumb1 split BL/BLX. + */ + assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2)); + + if (insn & (1 << 11)) { + /* 0b1111_1xxx_xxxx_xxxx : BL suffix */ + offset = ((insn & 0x7ff) << 1) | 1; + tmp = load_reg(s, 14); + tcg_gen_addi_i32(tmp, tmp, offset); + + tmp2 = tcg_temp_new_i32(); + tcg_gen_movi_i32(tmp2, s->pc | 1); + store_reg(s, 14, tmp2); + gen_bx(s, tmp); + } else { + /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix */ + uint32_t uoffset = ((int32_t)insn << 21) >> 9; + + tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + uoffset); + } break; } return; -undef32: - gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), - default_exception_el(s)); - return; illegal_op: undef: gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(), @@ -11861,29 +11898,14 @@ static bool insn_crosses_page(CPUARMState *env, DisasContext *s) { /* Return true if the insn at dc->pc might cross a page boundary. * (False positives are OK, false negatives are not.) + * We know this is a Thumb insn, and our caller ensures we are + * only called if dc->pc is less than 4 bytes from the page + * boundary, so we cross the page if the first 16 bits indicate + * that this is a 32 bit insn. */ - uint16_t insn; + uint16_t insn = arm_lduw_code(env, s->pc, s->sctlr_b); - if ((s->pc & 3) == 0) { - /* At a 4-aligned address we can't be crossing a page */ - return false; - } - - /* This must be a Thumb insn */ - insn = arm_lduw_code(env, s->pc, s->sctlr_b); - - if ((insn >> 11) >= 0x1d) { - /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the - * First half of a 32-bit Thumb insn. Thumb-1 cores might - * end up actually treating this as two 16-bit insns (see the - * code at the start of disas_thumb2_insn()) but we don't bother - * to check for that as it is unlikely, and false positives here - * are harmless. - */ - return true; - } - /* Definitely a 16-bit insn, can't be crossing a page. */ - return false; + return !thumb_insn_is_16bit(s, insn); } static int arm_tr_init_disas_context(DisasContextBase *dcbase, @@ -12116,16 +12138,88 @@ static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) in init_disas_context by adjusting max_insns. */ } +static bool thumb_insn_is_unconditional(DisasContext *s, uint32_t insn) +{ + /* Return true if this Thumb insn is always unconditional, + * even inside an IT block. This is true of only a very few + * instructions: BKPT, HLT, and SG. + * + * A larger class of instructions are UNPREDICTABLE if used + * inside an IT block; we do not need to detect those here, because + * what we do by default (perform the cc check and update the IT + * bits state machine) is a permitted CONSTRAINED UNPREDICTABLE + * choice for those situations. + * + * insn is either a 16-bit or a 32-bit instruction; the two are + * distinguishable because for the 16-bit case the top 16 bits + * are zeroes, and that isn't a valid 32-bit encoding. + */ + if ((insn & 0xffffff00) == 0xbe00) { + /* BKPT */ + return true; + } + + if ((insn & 0xffffffc0) == 0xba80 && arm_dc_feature(s, ARM_FEATURE_V8) && + !arm_dc_feature(s, ARM_FEATURE_M)) { + /* HLT: v8A only. This is unconditional even when it is going to + * UNDEF; see the v8A ARM ARM DDI0487B.a H3.3. + * For v7 cores this was a plain old undefined encoding and so + * honours its cc check. (We might be using the encoding as + * a semihosting trap, but we don't change the cc check behaviour + * on that account, because a debugger connected to a real v7A + * core and emulating semihosting traps by catching the UNDEF + * exception would also only see cases where the cc check passed. + * No guest code should be trying to do a HLT semihosting trap + * in an IT block anyway. + */ + return true; + } + + if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_V8) && + arm_dc_feature(s, ARM_FEATURE_M)) { + /* SG: v8M only */ + return true; + } + + return false; +} + static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) { DisasContext *dc = container_of(dcbase, DisasContext, base); CPUARMState *env = cpu->env_ptr; + uint32_t insn; + bool is_16bit; if (arm_pre_translate_insn(dc)) { return; } - disas_thumb_insn(env, dc); + insn = arm_lduw_code(env, dc->pc, dc->sctlr_b); + is_16bit = thumb_insn_is_16bit(dc, insn); + dc->pc += 2; + if (!is_16bit) { + uint32_t insn2 = arm_lduw_code(env, dc->pc, dc->sctlr_b); + + insn = insn << 16 | insn2; + dc->pc += 2; + } + + if (dc->condexec_mask && !thumb_insn_is_unconditional(dc, insn)) { + uint32_t cond = dc->condexec_cond; + + if (cond != 0x0e) { /* Skip conditional when condition is AL. */ + dc->condlabel = gen_new_label(); + arm_gen_test_cc(cond ^ 1, dc->condlabel); + dc->condjmp = 1; + } + } + + if (is_16bit) { + disas_thumb_insn(dc, insn); + } else { + disas_thumb2_insn(dc, insn); + } /* Advance the Thumb condexec condition. */ if (dc->condexec_mask) { @@ -12164,7 +12258,7 @@ static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) { DisasContext *dc = container_of(dcbase, DisasContext, base); - if (dc->base.tb->cflags & CF_LAST_IO && dc->condjmp) { + if (tb_cflags(dc->base.tb) & CF_LAST_IO && dc->condjmp) { /* FIXME: This can theoretically happen with self-modifying code. */ cpu_abort(cpu, "IO on conditional branch instruction"); }