/* Accept I/O on the last instruction. */
gen_io_start();
ops->translate_insn(db, cpu);
- gen_io_end();
} else {
ops->translate_insn(db, cpu);
}
tcg_temp_free_i32(tmp);
}
+/*
+ * cpu->can_do_io is cleared automatically at the beginning of
+ * each translation block. The cost is minimal and only paid
+ * for -icount, plus it would be very easy to forget doing it
+ * in the translator. Therefore, backends only need to call
+ * gen_io_start.
+ */
static inline void gen_io_end(void)
{
TCGv_i32 tmp = tcg_const_i32(0);
tcg_gen_st16_i32(count, cpu_env,
offsetof(ArchCPU, neg.icount_decr.u16.low) -
offsetof(ArchCPU, env));
- /* Disable I/O by default */
gen_io_end();
}
if (use_icount) {
gen_io_start();
helper(va);
- gen_io_end();
return DISAS_PC_STALE;
} else {
helper(va);
if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
gen_io_start();
gen_helper_load_pcc(va, cpu_env);
- gen_io_end();
ret = DISAS_PC_STALE;
} else {
gen_helper_load_pcc(va, cpu_env);
if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
/* I/O operations must end the TB here (whether read or write) */
- gen_io_end();
s->base.is_jmp = DISAS_UPDATE;
} else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
/* We default to ending the TB on a coprocessor register write,
gen_helper_exception_return(cpu_env, dst);
tcg_temp_free_i64(dst);
- if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
- gen_io_end();
- }
/* Must exit loop to check un-masked IRQs */
s->base.is_jmp = DISAS_EXIT;
return;
gen_io_start();
}
gen_helper_cpsr_write_eret(cpu_env, cpsr);
- if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
- gen_io_end();
- }
tcg_temp_free_i32(cpsr);
/* Must exit loop to check un-masked IRQs */
s->base.is_jmp = DISAS_EXIT;
if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
/* I/O operations must end the TB here (whether read or write) */
- gen_io_end();
gen_lookup_tb(s);
} else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
/* We default to ending the TB on a coprocessor register write,
gen_io_start();
}
gen_helper_cpsr_write_eret(cpu_env, tmp);
- if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
- gen_io_end();
- }
tcg_temp_free_i32(tmp);
/* Must exit loop to check un-masked IRQs */
s->base.is_jmp = DISAS_EXIT;
npc = dc->pc;
- if (tb_cflags(tb) & CF_LAST_IO)
- gen_io_end();
/* Force an update if the per-tb cpu state has changed. */
if (dc->is_jmp == DISAS_NEXT
&& (dc->cpustate_changed || !dc->flagx_known
if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
gen_io_start();
gen_helper_read_interval_timer(tmp);
- gen_io_end();
ctx->base.is_jmp = DISAS_IAQ_N_STALE;
} else {
gen_helper_read_interval_timer(tmp);
gen_op_mov_reg_v(s, dflag, rm, s->T0);
set_cc_op(s, CC_OP_EFLAGS);
if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
- gen_io_end();
gen_jmp(s, s->pc - s->cs_base);
}
break;
gen_op_mov_reg_v(s, ot, R_EAX, s->T1);
gen_bpt_io(s, s->tmp2_i32, ot);
if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
- gen_io_end();
gen_jmp(s, s->pc - s->cs_base);
}
break;
gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32);
gen_bpt_io(s, s->tmp2_i32, ot);
if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
- gen_io_end();
gen_jmp(s, s->pc - s->cs_base);
}
break;
gen_op_mov_reg_v(s, ot, R_EAX, s->T1);
gen_bpt_io(s, s->tmp2_i32, ot);
if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
- gen_io_end();
gen_jmp(s, s->pc - s->cs_base);
}
break;
gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32);
gen_bpt_io(s, s->tmp2_i32, ot);
if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
- gen_io_end();
gen_jmp(s, s->pc - s->cs_base);
}
break;
}
gen_helper_rdtsc(cpu_env);
if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
- gen_io_end();
gen_jmp(s, s->pc - s->cs_base);
}
break;
}
gen_helper_rdtscp(cpu_env);
if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
- gen_io_end();
gen_jmp(s, s->pc - s->cs_base);
}
break;
gen_op_mov_v_reg(s, ot, s->T0, rm);
gen_helper_write_crN(cpu_env, tcg_const_i32(reg),
s->T0);
- if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
- gen_io_end();
- }
gen_jmp_im(s, s->pc - s->cs_base);
gen_eob(s);
} else {
}
gen_helper_wcsr_im(cpu_env, cpu_R[dc->r1]);
tcg_gen_movi_tl(cpu_pc, dc->pc + 4);
- if (tb_cflags(dc->tb) & CF_USE_ICOUNT) {
- gen_io_end();
- }
dc->is_jmp = DISAS_UPDATE;
break;
case CSR_IP:
}
gen_helper_wcsr_ip(cpu_env, cpu_R[dc->r1]);
tcg_gen_movi_tl(cpu_pc, dc->pc + 4);
- if (tb_cflags(dc->tb) & CF_USE_ICOUNT) {
- gen_io_end();
- }
dc->is_jmp = DISAS_UPDATE;
break;
case CSR_ICC:
&& (dc->pc - page_start < TARGET_PAGE_SIZE)
&& num_insns < max_insns);
- if (tb_cflags(tb) & CF_LAST_IO) {
- gen_io_end();
- }
if (unlikely(cs->singlestep_enabled)) {
if (dc->is_jmp == DISAS_NEXT) {
npc = dc->jmp_pc;
}
- if (tb_cflags(tb) & CF_LAST_IO)
- gen_io_end();
/* Force an update if the per-tb cpu state has changed. */
if (dc->is_jmp == DISAS_NEXT
&& (dc->cpustate_changed || org_flags != dc->tb_flags)) {
gen_io_start();
}
gen_helper_mfc0_count(arg, cpu_env);
- if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
- gen_io_end();
- }
/*
* Break the TB to be able to take timer interrupts immediately
* after reading count. DISAS_STOP isn't sufficient, we need to
/* For simplicity assume that all writes can cause interrupts. */
if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
- gen_io_end();
/*
* DISAS_STOP isn't sufficient, we need to ensure we break out of
* translated code to check for pending interrupts.
gen_io_start();
}
gen_helper_mfc0_count(arg, cpu_env);
- if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
- gen_io_end();
- }
/*
* Break the TB to be able to take timer interrupts immediately
* after reading count. DISAS_STOP isn't sufficient, we need to
/* For simplicity assume that all writes can cause interrupts. */
if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
- gen_io_end();
/*
* DISAS_STOP isn't sufficient, we need to ensure we break out of
* translated code to check for pending interrupts.
gen_io_start();
}
gen_helper_rdhwr_cc(t0, cpu_env);
- if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
- gen_io_end();
- }
gen_store_gpr(t0, rt);
/*
* Break the TB to be able to take timer interrupts immediately
!tcg_op_buf_full() &&
num_insns < max_insns);
- if (tb_cflags(tb) & CF_LAST_IO) {
- gen_io_end();
- }
-
/* Indicate where the next block should start */
switch (dc->is_jmp) {
case DISAS_NEXT:
gen_helper_darn64(cpu_gpr[rD(ctx->opcode)]);
}
if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
- gen_io_end();
gen_stop_exception(ctx);
}
}
gen_update_cfar(ctx, ctx->base.pc_next - 4);
gen_helper_rfi(cpu_env);
gen_sync_exception(ctx);
- if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
- gen_io_end();
- }
#endif
}
gen_update_cfar(ctx, ctx->base.pc_next - 4);
gen_helper_rfid(cpu_env);
gen_sync_exception(ctx);
- if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
- gen_io_end();
- }
#endif
}
/* Must stop the translation as machine state (may have) changed */
/* Note that mtmsr is not always defined as context-synchronizing */
gen_stop_exception(ctx);
- if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
- gen_io_end();
- }
}
#endif /* !defined(CONFIG_USER_ONLY) */
}
tcg_gen_mov_tl(msr, cpu_gpr[rS(ctx->opcode)]);
#endif
gen_helper_store_msr(cpu_env, msr);
- if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
- gen_io_end();
- }
tcg_temp_free(msr);
/* Must stop the translation as machine state (may have) changed */
/* Note that mtmsr is not always defined as context-synchronizing */
}
gen_helper_load_decr(cpu_gpr[gprn], cpu_env);
if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
- gen_io_end();
gen_stop_exception(ctx);
}
}
}
gen_helper_store_decr(cpu_env, cpu_gpr[gprn]);
if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
- gen_io_end();
gen_stop_exception(ctx);
}
}
} while (0)
#define RISCV_OP_CSR_POST do {\
- gen_io_end(); \
gen_set_gpr(a->rd, dest); \
tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn); \
exit_tb(ctx); \
gen_helper_tick_set_limit(r_tickptr,
cpu_tick_cmpr);
tcg_temp_free_ptr(r_tickptr);
- if (tb_cflags(dc->base.tb) &
- CF_USE_ICOUNT) {
- gen_io_end();
- }
/* End TB to handle timer interrupt */
dc->base.is_jmp = DISAS_EXIT;
}
gen_helper_tick_set_count(r_tickptr,
cpu_tmp0);
tcg_temp_free_ptr(r_tickptr);
- if (tb_cflags(dc->base.tb) &
- CF_USE_ICOUNT) {
- gen_io_end();
- }
/* End TB to handle timer interrupt */
dc->base.is_jmp = DISAS_EXIT;
}
gen_helper_tick_set_limit(r_tickptr,
cpu_stick_cmpr);
tcg_temp_free_ptr(r_tickptr);
- if (tb_cflags(dc->base.tb) &
- CF_USE_ICOUNT) {
- gen_io_end();
- }
/* End TB to handle timer interrupt */
dc->base.is_jmp = DISAS_EXIT;
}
gen_helper_tick_set_count(r_tickptr,
cpu_tmp0);
tcg_temp_free_ptr(r_tickptr);
- if (tb_cflags(dc->base.tb) &
- CF_USE_ICOUNT) {
- gen_io_end();
- }
/* End TB to handle timer interrupt */
dc->base.is_jmp = DISAS_EXIT;
}
code. */
cpu_abort(cs, "IO on conditional branch instruction");
}
- gen_io_end();
}
/* At this stage dc->condjmp will only be set when the skipped
gen_io_start();
}
gen_helper_waiti(cpu_env, pc, intlevel);
- if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
- gen_io_end();
- }
tcg_temp_free(pc);
tcg_temp_free(intlevel);
}
}
gen_helper_update_ccount(cpu_env);
tcg_gen_mov_i32(arg[0].out, cpu_SR[par[0]]);
- if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
- gen_io_end();
- }
#endif
}
tcg_gen_mov_i32(cpu_SR[par[0]], arg[0].in);
gen_helper_update_ccompare(cpu_env, tmp);
tcg_temp_free(tmp);
- if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
- gen_io_end();
- }
#endif
}
gen_io_start();
}
gen_helper_wsr_ccount(cpu_env, arg[0].in);
- if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
- gen_io_end();
- }
#endif
}
tcg_gen_mov_i32(arg[0].out, tmp);
tcg_temp_free(tmp);
- if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
- gen_io_end();
- }
#endif
}