#include "tb-jmp-cache.h"
#include "tb-hash.h"
#include "tb-context.h"
-#include "internal.h"
+#include "internal-common.h"
+#include "internal-target.h"
/* -icount align implementation. */
return;
}
- cpu_icount = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low;
+ cpu_icount = cpu->icount_extra + cpu->neg.icount_decr.u16.low;
sc->diff_clk += icount_to_ns(sc->last_cpu_icount - cpu_icount);
sc->last_cpu_icount = cpu_icount;
sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock;
sc->last_cpu_icount
- = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low;
+ = cpu->icount_extra + cpu->neg.icount_decr.u16.low;
if (sc->diff_clk < max_delay) {
max_delay = sc->diff_clk;
}
struct tb_desc desc;
uint32_t h;
- desc.env = cpu->env_ptr;
+ desc.env = cpu_env(cpu);
desc.cs_base = cs_base;
desc.flags = flags;
desc.cflags = cflags;
hash = tb_jmp_cache_hash_func(pc);
jc = cpu->tb_jmp_cache;
- if (cflags & CF_PCREL) {
- /* Use acquire to ensure current load of pc from jc. */
- tb = qatomic_load_acquire(&jc->array[hash].tb);
+ tb = qatomic_read(&jc->array[hash].tb);
+ if (likely(tb &&
+ jc->array[hash].pc == pc &&
+ tb->cs_base == cs_base &&
+ tb->flags == flags &&
+ tb_cflags(tb) == cflags)) {
+ goto hit;
+ }
- if (likely(tb &&
- jc->array[hash].pc == pc &&
- tb->cs_base == cs_base &&
- tb->flags == flags &&
- tb_cflags(tb) == cflags)) {
- return tb;
- }
- tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags);
- if (tb == NULL) {
- return NULL;
- }
- jc->array[hash].pc = pc;
- /* Ensure pc is written first. */
- qatomic_store_release(&jc->array[hash].tb, tb);
- } else {
- /* Use rcu_read to ensure current load of pc from *tb. */
- tb = qatomic_rcu_read(&jc->array[hash].tb);
-
- if (likely(tb &&
- tb->pc == pc &&
- tb->cs_base == cs_base &&
- tb->flags == flags &&
- tb_cflags(tb) == cflags)) {
- return tb;
- }
- tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags);
- if (tb == NULL) {
- return NULL;
- }
- /* Use the pc value already stored in tb->pc. */
- qatomic_set(&jc->array[hash].tb, tb);
+ tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags);
+ if (tb == NULL) {
+ return NULL;
}
+ jc->array[hash].pc = pc;
+ qatomic_set(&jc->array[hash].tb, tb);
+
+hit:
+ /*
+ * As long as tb is not NULL, the contents are consistent. Therefore,
+ * the virtual PC has to match for non-CF_PCREL translations.
+ */
+ assert((tb_cflags(tb) & CF_PCREL) || tb->pc == pc);
return tb;
}
#ifdef CONFIG_USER_ONLY
g_assert_not_reached();
#else
- CPUClass *cc = CPU_GET_CLASS(cpu);
- assert(cc->tcg_ops->debug_check_breakpoint);
- match_bp = cc->tcg_ops->debug_check_breakpoint(cpu);
+ const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
+ assert(tcg_ops->debug_check_breakpoint);
+ match_bp = tcg_ops->debug_check_breakpoint(cpu);
#endif
}
static inline TranslationBlock * QEMU_DISABLE_CFI
cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
{
- CPUArchState *env = cpu->env_ptr;
+ CPUArchState *env = cpu_env(cpu);
uintptr_t ret;
TranslationBlock *last_tb;
const void *tb_ptr = itb->tc.ptr;
qemu_thread_jit_execute();
ret = tcg_qemu_tb_exec(env, tb_ptr);
- cpu->can_do_io = 1;
+ cpu->neg.can_do_io = true;
qemu_plugin_disable_mem_helpers(cpu);
/*
* TODO: Delay swapping back to the read-write region of the TB
* counter hit zero); we must restore the guest PC to the address
* of the start of the TB.
*/
- CPUClass *cc = CPU_GET_CLASS(cpu);
+ CPUClass *cc = cpu->cc;
+ const TCGCPUOps *tcg_ops = cc->tcg_ops;
- if (cc->tcg_ops->synchronize_from_tb) {
- cc->tcg_ops->synchronize_from_tb(cpu, last_tb);
+ if (tcg_ops->synchronize_from_tb) {
+ tcg_ops->synchronize_from_tb(cpu, last_tb);
} else {
tcg_debug_assert(!(tb_cflags(last_tb) & CF_PCREL));
assert(cc->set_pc);
static void cpu_exec_enter(CPUState *cpu)
{
- CPUClass *cc = CPU_GET_CLASS(cpu);
+ const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
- if (cc->tcg_ops->cpu_exec_enter) {
- cc->tcg_ops->cpu_exec_enter(cpu);
+ if (tcg_ops->cpu_exec_enter) {
+ tcg_ops->cpu_exec_enter(cpu);
}
}
static void cpu_exec_exit(CPUState *cpu)
{
- CPUClass *cc = CPU_GET_CLASS(cpu);
+ const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
- if (cc->tcg_ops->cpu_exec_exit) {
- cc->tcg_ops->cpu_exec_exit(cpu);
+ if (tcg_ops->cpu_exec_exit) {
+ tcg_ops->cpu_exec_exit(cpu);
}
}
tcg_ctx->gen_tb = NULL;
}
#endif
- if (qemu_mutex_iothread_locked()) {
- qemu_mutex_unlock_iothread();
+ if (bql_locked()) {
+ bql_unlock();
}
assert_no_pages_locked();
}
void cpu_exec_step_atomic(CPUState *cpu)
{
- CPUArchState *env = cpu->env_ptr;
+ CPUArchState *env = cpu_env(cpu);
TranslationBlock *tb;
vaddr pc;
uint64_t cs_base;
#if defined(TARGET_I386)
if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
X86CPU *x86_cpu = X86_CPU(cpu);
- qemu_mutex_lock_iothread();
+ bql_lock();
apic_poll_irq(x86_cpu->apic_state);
cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
- qemu_mutex_unlock_iothread();
+ bql_unlock();
}
#endif /* TARGET_I386 */
if (!cpu_has_work(cpu)) {
static inline void cpu_handle_debug_exception(CPUState *cpu)
{
- CPUClass *cc = CPU_GET_CLASS(cpu);
+ const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
CPUWatchpoint *wp;
if (!cpu->watchpoint_hit) {
}
}
- if (cc->tcg_ops->debug_excp_handler) {
- cc->tcg_ops->debug_excp_handler(cpu);
+ if (tcg_ops->debug_excp_handler) {
+ tcg_ops->debug_excp_handler(cpu);
}
}
if (cpu->exception_index < 0) {
#ifndef CONFIG_USER_ONLY
if (replay_has_exception()
- && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0) {
+ && cpu->neg.icount_decr.u16.low + cpu->icount_extra == 0) {
/* Execute just one insn to trigger exception pending in the log */
cpu->cflags_next_tb = (curr_cflags(cpu) & ~CF_USE_ICOUNT)
- | CF_LAST_IO | CF_NOIRQ | 1;
+ | CF_NOIRQ | 1;
}
#endif
return false;
}
+
if (cpu->exception_index >= EXCP_INTERRUPT) {
/* exit request from the cpu execution loop */
*ret = cpu->exception_index;
}
cpu->exception_index = -1;
return true;
- } else {
+ }
+
#if defined(CONFIG_USER_ONLY)
- /* if user mode only, we simulate a fake exception
- which will be handled outside the cpu execution
- loop */
+ /*
+ * If user mode only, we simulate a fake exception which will be
+ * handled outside the cpu execution loop.
+ */
#if defined(TARGET_I386)
- CPUClass *cc = CPU_GET_CLASS(cpu);
- cc->tcg_ops->fake_user_interrupt(cpu);
+ const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
+ tcg_ops->fake_user_interrupt(cpu);
#endif /* TARGET_I386 */
- *ret = cpu->exception_index;
- cpu->exception_index = -1;
- return true;
+ *ret = cpu->exception_index;
+ cpu->exception_index = -1;
+ return true;
#else
- if (replay_exception()) {
- CPUClass *cc = CPU_GET_CLASS(cpu);
- qemu_mutex_lock_iothread();
- cc->tcg_ops->do_interrupt(cpu);
- qemu_mutex_unlock_iothread();
- cpu->exception_index = -1;
-
- if (unlikely(cpu->singlestep_enabled)) {
- /*
- * After processing the exception, ensure an EXCP_DEBUG is
- * raised when single-stepping so that GDB doesn't miss the
- * next instruction.
- */
- *ret = EXCP_DEBUG;
- cpu_handle_debug_exception(cpu);
- return true;
- }
- } else if (!replay_has_interrupt()) {
- /* give a chance to iothread in replay mode */
- *ret = EXCP_INTERRUPT;
+ if (replay_exception()) {
+ const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
+
+ bql_lock();
+ tcg_ops->do_interrupt(cpu);
+ bql_unlock();
+ cpu->exception_index = -1;
+
+ if (unlikely(cpu->singlestep_enabled)) {
+ /*
+ * After processing the exception, ensure an EXCP_DEBUG is
+ * raised when single-stepping so that GDB doesn't miss the
+ * next instruction.
+ */
+ *ret = EXCP_DEBUG;
+ cpu_handle_debug_exception(cpu);
return true;
}
-#endif
+ } else if (!replay_has_interrupt()) {
+ /* give a chance to iothread in replay mode */
+ *ret = EXCP_INTERRUPT;
+ return true;
}
+#endif
return false;
}
* "real" interrupt event later. It does not need to be recorded for
* replay purposes.
*/
-static inline bool need_replay_interrupt(int interrupt_request)
+static inline bool need_replay_interrupt(CPUState *cpu, int interrupt_request)
{
#if defined(TARGET_I386)
return !(interrupt_request & CPU_INTERRUPT_POLL);
#else
- return true;
+ const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
+ return !tcg_ops->need_replay_interrupt
+ || tcg_ops->need_replay_interrupt(interrupt_request);
#endif
}
#endif /* !CONFIG_USER_ONLY */
+static inline bool icount_exit_request(CPUState *cpu)
+{
+ if (!icount_enabled()) {
+ return false;
+ }
+ if (cpu->cflags_next_tb != -1 && !(cpu->cflags_next_tb & CF_USE_ICOUNT)) {
+ return false;
+ }
+ return cpu->neg.icount_decr.u16.low + cpu->icount_extra == 0;
+}
+
static inline bool cpu_handle_interrupt(CPUState *cpu,
TranslationBlock **last_tb)
{
* Ensure zeroing happens before reading cpu->exit_request or
* cpu->interrupt_request (see also smp_wmb in cpu_exit())
*/
- qatomic_set_mb(&cpu_neg(cpu)->icount_decr.u16.high, 0);
+ qatomic_set_mb(&cpu->neg.icount_decr.u16.high, 0);
if (unlikely(qatomic_read(&cpu->interrupt_request))) {
int interrupt_request;
- qemu_mutex_lock_iothread();
+ bql_lock();
interrupt_request = cpu->interrupt_request;
if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
/* Mask out external interrupts for this step. */
if (interrupt_request & CPU_INTERRUPT_DEBUG) {
cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
cpu->exception_index = EXCP_DEBUG;
- qemu_mutex_unlock_iothread();
+ bql_unlock();
return true;
}
#if !defined(CONFIG_USER_ONLY)
cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
cpu->halted = 1;
cpu->exception_index = EXCP_HLT;
- qemu_mutex_unlock_iothread();
+ bql_unlock();
return true;
}
#if defined(TARGET_I386)
cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0);
do_cpu_init(x86_cpu);
cpu->exception_index = EXCP_HALTED;
- qemu_mutex_unlock_iothread();
+ bql_unlock();
return true;
}
#else
else if (interrupt_request & CPU_INTERRUPT_RESET) {
replay_interrupt();
cpu_reset(cpu);
- qemu_mutex_unlock_iothread();
+ bql_unlock();
return true;
}
#endif /* !TARGET_I386 */
True when it is, and we should restart on a new TB,
and via longjmp via cpu_loop_exit. */
else {
- CPUClass *cc = CPU_GET_CLASS(cpu);
+ const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
- if (cc->tcg_ops->cpu_exec_interrupt &&
- cc->tcg_ops->cpu_exec_interrupt(cpu, interrupt_request)) {
- if (need_replay_interrupt(interrupt_request)) {
+ if (tcg_ops->cpu_exec_interrupt &&
+ tcg_ops->cpu_exec_interrupt(cpu, interrupt_request)) {
+ if (need_replay_interrupt(cpu, interrupt_request)) {
replay_interrupt();
}
/*
*/
if (unlikely(cpu->singlestep_enabled)) {
cpu->exception_index = EXCP_DEBUG;
- qemu_mutex_unlock_iothread();
+ bql_unlock();
return true;
}
cpu->exception_index = -1;
}
/* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */
- qemu_mutex_unlock_iothread();
+ bql_unlock();
}
/* Finally, check if we need to exit to the main loop. */
- if (unlikely(qatomic_read(&cpu->exit_request))
- || (icount_enabled()
- && (cpu->cflags_next_tb == -1 || cpu->cflags_next_tb & CF_USE_ICOUNT)
- && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0)) {
+ if (unlikely(qatomic_read(&cpu->exit_request)) || icount_exit_request(cpu)) {
qatomic_set(&cpu->exit_request, 0);
if (cpu->exception_index == -1) {
cpu->exception_index = EXCP_INTERRUPT;
}
*last_tb = NULL;
- insns_left = qatomic_read(&cpu_neg(cpu)->icount_decr.u32);
+ insns_left = qatomic_read(&cpu->neg.icount_decr.u32);
if (insns_left < 0) {
/* Something asked us to stop executing chained TBs; just
* continue round the main loop. Whatever requested the exit
icount_update(cpu);
/* Refill decrementer and continue execution. */
insns_left = MIN(0xffff, cpu->icount_budget);
- cpu_neg(cpu)->icount_decr.u16.low = insns_left;
+ cpu->neg.icount_decr.u16.low = insns_left;
cpu->icount_extra = cpu->icount_budget - insns_left;
/*
uint64_t cs_base;
uint32_t flags, cflags;
- cpu_get_tb_cpu_state(cpu->env_ptr, &pc, &cs_base, &flags);
+ cpu_get_tb_cpu_state(cpu_env(cpu), &pc, &cs_base, &flags);
/*
* When requested, use an exact setting for cflags for the next
*/
h = tb_jmp_cache_hash_func(pc);
jc = cpu->tb_jmp_cache;
- if (cflags & CF_PCREL) {
- jc->array[h].pc = pc;
- /* Ensure pc is written first. */
- qatomic_store_release(&jc->array[h].tb, tb);
- } else {
- /* Use the pc value already stored in tb->pc. */
- qatomic_set(&jc->array[h].tb, tb);
- }
+ jc->array[h].pc = pc;
+ qatomic_set(&jc->array[h].tb, tb);
}
#ifndef CONFIG_USER_ONLY
return EXCP_HALTED;
}
- rcu_read_lock();
+ RCU_READ_LOCK_GUARD();
cpu_exec_enter(cpu);
/*
ret = cpu_exec_setjmp(cpu, &sc);
cpu_exec_exit(cpu);
- rcu_read_unlock();
-
return ret;
}
-void tcg_exec_realizefn(CPUState *cpu, Error **errp)
+bool tcg_exec_realizefn(CPUState *cpu, Error **errp)
{
static bool tcg_target_initialized;
- CPUClass *cc = CPU_GET_CLASS(cpu);
if (!tcg_target_initialized) {
- cc->tcg_ops->initialize();
+ cpu->cc->tcg_ops->initialize();
tcg_target_initialized = true;
}
tcg_iommu_init_notifier_list(cpu);
#endif /* !CONFIG_USER_ONLY */
/* qemu_plugin_vcpu_init_hook delayed until cpu_index assigned. */
+
+ return true;
}
/* undo the initializations in reverse order */