*/
#include "qemu/osdep.h"
-#include "qemu-common.h"
#include "qemu/qemu-print.h"
+#include "qapi/error.h"
+#include "qapi/qapi-commands-machine.h"
+#include "qapi/type-helpers.h"
#include "hw/core/tcg-cpu-ops.h"
#include "trace.h"
#include "disas/disas.h"
#include "exec/cpu-all.h"
#include "sysemu/cpu-timers.h"
#include "sysemu/replay.h"
+#include "sysemu/tcg.h"
#include "exec/helper-proto.h"
+#include "tb-jmp-cache.h"
#include "tb-hash.h"
#include "tb-context.h"
#include "internal.h"
uint32_t cflags = cpu->tcg_cflags;
/*
+ * Record gdb single-step. We should be exiting the TB by raising
+ * EXCP_DEBUG, but to simplify other tests, disable chaining too.
+ *
* For singlestep and -d nochain, suppress goto_tb so that
* we can log -d cpu,exec after every TB.
*/
- if (singlestep) {
+ if (unlikely(cpu->singlestep_enabled)) {
+ cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | CF_SINGLE_STEP | 1;
+ } else if (singlestep) {
cflags |= CF_NO_GOTO_TB | 1;
} else if (qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
cflags |= CF_NO_GOTO_TB;
return cflags;
}
+struct tb_desc {
+ target_ulong pc;
+ target_ulong cs_base;
+ CPUArchState *env;
+ tb_page_addr_t page_addr0;
+ uint32_t flags;
+ uint32_t cflags;
+ uint32_t trace_vcpu_dstate;
+};
+
+static bool tb_lookup_cmp(const void *p, const void *d)
+{
+ const TranslationBlock *tb = p;
+ const struct tb_desc *desc = d;
+
+ if ((TARGET_TB_PCREL || tb_pc(tb) == desc->pc) &&
+ tb->page_addr[0] == desc->page_addr0 &&
+ tb->cs_base == desc->cs_base &&
+ tb->flags == desc->flags &&
+ tb->trace_vcpu_dstate == desc->trace_vcpu_dstate &&
+ tb_cflags(tb) == desc->cflags) {
+ /* check next page if needed */
+ if (tb->page_addr[1] == -1) {
+ return true;
+ } else {
+ tb_page_addr_t phys_page1;
+ target_ulong virt_page1;
+
+ /*
+ * We know that the first page matched, and an otherwise valid TB
+ * encountered an incomplete instruction at the end of that page,
+ * therefore we know that generating a new TB from the current PC
+ * must also require reading from the next page -- even if the
+ * second pages do not match, and therefore the resulting insn
+ * is different for the new TB. Therefore any exception raised
+ * here by the faulting lookup is not premature.
+ */
+ virt_page1 = TARGET_PAGE_ALIGN(desc->pc);
+ phys_page1 = get_page_addr_code(desc->env, virt_page1);
+ if (tb->page_addr[1] == phys_page1) {
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+static TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
+ target_ulong cs_base, uint32_t flags,
+ uint32_t cflags)
+{
+ tb_page_addr_t phys_pc;
+ struct tb_desc desc;
+ uint32_t h;
+
+ desc.env = cpu->env_ptr;
+ desc.cs_base = cs_base;
+ desc.flags = flags;
+ desc.cflags = cflags;
+ desc.trace_vcpu_dstate = *cpu->trace_dstate;
+ desc.pc = pc;
+ phys_pc = get_page_addr_code(desc.env, pc);
+ if (phys_pc == -1) {
+ return NULL;
+ }
+ desc.page_addr0 = phys_pc;
+ h = tb_hash_func(phys_pc, (TARGET_TB_PCREL ? 0 : pc),
+ flags, cflags, *cpu->trace_dstate);
+ return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
+}
+
/* Might cause an exception, so have a longjmp destination ready */
static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
target_ulong cs_base,
uint32_t flags, uint32_t cflags)
{
TranslationBlock *tb;
+ CPUJumpCache *jc;
uint32_t hash;
/* we should never be trying to look up an INVALID tb */
tcg_debug_assert(!(cflags & CF_INVALID));
hash = tb_jmp_cache_hash_func(pc);
- tb = qatomic_rcu_read(&cpu->tb_jmp_cache[hash]);
+ jc = cpu->tb_jmp_cache;
+ tb = tb_jmp_cache_get_tb(jc, hash);
if (likely(tb &&
- tb->pc == pc &&
+ tb_jmp_cache_get_pc(jc, hash, tb) == pc &&
tb->cs_base == cs_base &&
tb->flags == flags &&
tb->trace_vcpu_dstate == *cpu->trace_dstate &&
if (tb == NULL) {
return NULL;
}
- qatomic_set(&cpu->tb_jmp_cache[hash], tb);
+ tb_jmp_cache_set(jc, hash, tb, pc);
return tb;
}
-static inline void log_cpu_exec(target_ulong pc, CPUState *cpu,
- const TranslationBlock *tb)
+static void log_cpu_exec(target_ulong pc, CPUState *cpu,
+ const TranslationBlock *tb)
{
- if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_CPU | CPU_LOG_EXEC))
- && qemu_log_in_addr_range(pc)) {
-
+ if (qemu_log_in_addr_range(pc)) {
qemu_log_mask(CPU_LOG_EXEC,
"Trace %d: %p [" TARGET_FMT_lx
"/" TARGET_FMT_lx "/%08x/%08x] %s\n",
#if defined(DEBUG_DISAS)
if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
- FILE *logfile = qemu_log_lock();
- int flags = 0;
+ FILE *logfile = qemu_log_trylock();
+ if (logfile) {
+ int flags = 0;
- if (qemu_loglevel_mask(CPU_LOG_TB_FPU)) {
- flags |= CPU_DUMP_FPU;
- }
+ if (qemu_loglevel_mask(CPU_LOG_TB_FPU)) {
+ flags |= CPU_DUMP_FPU;
+ }
#if defined(TARGET_I386)
- flags |= CPU_DUMP_CCOP;
+ flags |= CPU_DUMP_CCOP;
#endif
- log_cpu_state(cpu, flags);
- qemu_log_unlock(logfile);
+ cpu_dump_state(cpu, logfile, flags);
+ qemu_log_unlock(logfile);
+ }
}
#endif /* DEBUG_DISAS */
}
}
+static bool check_for_breakpoints(CPUState *cpu, target_ulong pc,
+ uint32_t *cflags)
+{
+ CPUBreakpoint *bp;
+ bool match_page = false;
+
+ if (likely(QTAILQ_EMPTY(&cpu->breakpoints))) {
+ return false;
+ }
+
+ /*
+ * Singlestep overrides breakpoints.
+ * This requirement is visible in the record-replay tests, where
+ * we would fail to make forward progress in reverse-continue.
+ *
+ * TODO: gdb singlestep should only override gdb breakpoints,
+ * so that one could (gdb) singlestep into the guest kernel's
+ * architectural breakpoint handler.
+ */
+ if (cpu->singlestep_enabled) {
+ return false;
+ }
+
+ QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
+ /*
+ * If we have an exact pc match, trigger the breakpoint.
+ * Otherwise, note matches within the page.
+ */
+ if (pc == bp->pc) {
+ bool match_bp = false;
+
+ if (bp->flags & BP_GDB) {
+ match_bp = true;
+ } else if (bp->flags & BP_CPU) {
+#ifdef CONFIG_USER_ONLY
+ g_assert_not_reached();
+#else
+ CPUClass *cc = CPU_GET_CLASS(cpu);
+ assert(cc->tcg_ops->debug_check_breakpoint);
+ match_bp = cc->tcg_ops->debug_check_breakpoint(cpu);
+#endif
+ }
+
+ if (match_bp) {
+ cpu->exception_index = EXCP_DEBUG;
+ return true;
+ }
+ } else if (((pc ^ bp->pc) & TARGET_PAGE_MASK) == 0) {
+ match_page = true;
+ }
+ }
+
+ /*
+ * Within the same page as a breakpoint, single-step,
+ * returning to helper_lookup_tb_ptr after each insn looking
+ * for the actual breakpoint.
+ *
+ * TODO: Perhaps better to record all of the TBs associated
+ * with a given virtual page that contains a breakpoint, and
+ * then invalidate them when a new overlapping breakpoint is
+ * set on the page. Non-overlapping TBs would not be
+ * invalidated, nor would any TB need to be invalidated as
+ * breakpoints are removed.
+ */
+ if (match_page) {
+ *cflags = (*cflags & ~CF_COUNT_MASK) | CF_NO_GOTO_TB | 1;
+ }
+ return false;
+}
+
/**
* helper_lookup_tb_ptr: quick check for next tb
* @env: current cpu state
CPUState *cpu = env_cpu(env);
TranslationBlock *tb;
target_ulong cs_base, pc;
- uint32_t flags;
+ uint32_t flags, cflags;
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
- tb = tb_lookup(cpu, pc, cs_base, flags, curr_cflags(cpu));
+ cflags = curr_cflags(cpu);
+ if (check_for_breakpoints(cpu, pc, &cflags)) {
+ cpu_loop_exit(cpu);
+ }
+
+ tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
if (tb == NULL) {
return tcg_code_gen_epilogue;
}
- log_cpu_exec(pc, cpu, tb);
+ if (qemu_loglevel_mask(CPU_LOG_TB_CPU | CPU_LOG_EXEC)) {
+ log_cpu_exec(pc, cpu, tb);
+ }
return tb->tc.ptr;
}
TranslationBlock *last_tb;
const void *tb_ptr = itb->tc.ptr;
- log_cpu_exec(itb->pc, cpu, itb);
+ if (qemu_loglevel_mask(CPU_LOG_TB_CPU | CPU_LOG_EXEC)) {
+ log_cpu_exec(log_pc(cpu, itb), cpu, itb);
+ }
qemu_thread_jit_execute();
ret = tcg_qemu_tb_exec(env, tb_ptr);
* of the start of the TB.
*/
CPUClass *cc = CPU_GET_CLASS(cpu);
- qemu_log_mask_and_addr(CPU_LOG_EXEC, last_tb->pc,
- "Stopped execution of TB chain before %p ["
- TARGET_FMT_lx "] %s\n",
- last_tb->tc.ptr, last_tb->pc,
- lookup_symbol(last_tb->pc));
+
if (cc->tcg_ops->synchronize_from_tb) {
cc->tcg_ops->synchronize_from_tb(cpu, last_tb);
} else {
+ assert(!TARGET_TB_PCREL);
assert(cc->set_pc);
- cc->set_pc(cpu, last_tb->pc);
+ cc->set_pc(cpu, tb_pc(last_tb));
}
+ if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
+ target_ulong pc = log_pc(cpu, last_tb);
+ if (qemu_log_in_addr_range(pc)) {
+ qemu_log("Stopped execution of TB chain before %p ["
+ TARGET_FMT_lx "] %s\n",
+ last_tb->tc.ptr, pc, lookup_symbol(pc));
+ }
+ }
+ }
+
+ /*
+ * If gdb single-step, and we haven't raised another exception,
+ * raise a debug exception. Single-step with another exception
+ * is handled in cpu_handle_exception.
+ */
+ if (unlikely(cpu->singlestep_enabled) && cpu->exception_index == -1) {
+ cpu->exception_index = EXCP_DEBUG;
+ cpu_loop_exit(cpu);
}
+
return last_tb;
}
void cpu_exec_step_atomic(CPUState *cpu)
{
- CPUArchState *env = (CPUArchState *)cpu->env_ptr;
+ CPUArchState *env = cpu->env_ptr;
TranslationBlock *tb;
target_ulong cs_base, pc;
- uint32_t flags;
- uint32_t cflags = (curr_cflags(cpu) & ~CF_PARALLEL) | 1;
+ uint32_t flags, cflags;
int tb_exit;
if (sigsetjmp(cpu->jmp_env, 0) == 0) {
cpu->running = true;
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
- tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
+ cflags = curr_cflags(cpu);
+ /* Execute in a serial context. */
+ cflags &= ~CF_PARALLEL;
+ /* After 1 insn, return and release the exclusive lock. */
+ cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | 1;
+ /*
+ * No need to check_for_breakpoints here.
+ * We only arrive in cpu_exec_step_atomic after beginning execution
+ * of an insn that includes an atomic operation we can't handle.
+ * Any breakpoint for this insn will have been recognized earlier.
+ */
+
+ tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
if (tb == NULL) {
mmap_lock();
tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
cpu_tb_exec(cpu, tb, &tb_exit);
cpu_exec_exit(cpu);
} else {
- /*
- * The mmap_lock is dropped by tb_gen_code if it runs out of
- * memory.
- */
#ifndef CONFIG_SOFTMMU
- tcg_debug_assert(!have_mmap_lock());
+ clear_helper_retaddr();
+ if (have_mmap_lock()) {
+ mmap_unlock();
+ }
#endif
if (qemu_mutex_iothread_locked()) {
qemu_mutex_unlock_iothread();
qemu_plugin_disable_mem_helpers(cpu);
}
-
/*
* As we start the exclusive region before codegen we must still
* be in the region if we longjump out of either the codegen or
end_exclusive();
}
-struct tb_desc {
- target_ulong pc;
- target_ulong cs_base;
- CPUArchState *env;
- tb_page_addr_t phys_page1;
- uint32_t flags;
- uint32_t cflags;
- uint32_t trace_vcpu_dstate;
-};
-
-static bool tb_lookup_cmp(const void *p, const void *d)
-{
- const TranslationBlock *tb = p;
- const struct tb_desc *desc = d;
-
- if (tb->pc == desc->pc &&
- tb->page_addr[0] == desc->phys_page1 &&
- tb->cs_base == desc->cs_base &&
- tb->flags == desc->flags &&
- tb->trace_vcpu_dstate == desc->trace_vcpu_dstate &&
- tb_cflags(tb) == desc->cflags) {
- /* check next page if needed */
- if (tb->page_addr[1] == -1) {
- return true;
- } else {
- tb_page_addr_t phys_page2;
- target_ulong virt_page2;
-
- virt_page2 = (desc->pc & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
- phys_page2 = get_page_addr_code(desc->env, virt_page2);
- if (tb->page_addr[1] == phys_page2) {
- return true;
- }
- }
- }
- return false;
-}
-
-TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
- target_ulong cs_base, uint32_t flags,
- uint32_t cflags)
-{
- tb_page_addr_t phys_pc;
- struct tb_desc desc;
- uint32_t h;
-
- desc.env = (CPUArchState *)cpu->env_ptr;
- desc.cs_base = cs_base;
- desc.flags = flags;
- desc.cflags = cflags;
- desc.trace_vcpu_dstate = *cpu->trace_dstate;
- desc.pc = pc;
- phys_pc = get_page_addr_code(desc.env, pc);
- if (phys_pc == -1) {
- return NULL;
- }
- desc.phys_page1 = phys_pc & TARGET_PAGE_MASK;
- h = tb_hash_func(phys_pc, pc, flags, cflags, *cpu->trace_dstate);
- return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
-}
-
void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr)
{
if (TCG_TARGET_HAS_direct_jump) {
qemu_spin_unlock(&tb_next->jmp_lock);
- qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
- "Linking TBs %p [" TARGET_FMT_lx
- "] index %d -> %p [" TARGET_FMT_lx "]\n",
- tb->tc.ptr, tb->pc, n,
- tb_next->tc.ptr, tb_next->pc);
+ qemu_log_mask(CPU_LOG_EXEC, "Linking TBs %p index %d -> %p\n",
+ tb->tc.ptr, n, tb_next->tc.ptr);
return;
out_unlock_next:
return;
}
-static inline TranslationBlock *tb_find(CPUState *cpu,
- TranslationBlock *last_tb,
- int tb_exit, uint32_t cflags)
-{
- CPUArchState *env = (CPUArchState *)cpu->env_ptr;
- TranslationBlock *tb;
- target_ulong cs_base, pc;
- uint32_t flags;
-
- cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
-
- tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
- if (tb == NULL) {
- mmap_lock();
- tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
- mmap_unlock();
- /* We add the TB in the virtual pc hash table for the fast lookup */
- qatomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb);
- }
-#ifndef CONFIG_USER_ONLY
- /* We don't take care of direct jumps when address mapping changes in
- * system emulation. So it's not safe to make a direct jump to a TB
- * spanning two pages because the mapping for the second page can change.
- */
- if (tb->page_addr[1] != -1) {
- last_tb = NULL;
- }
-#endif
- /* See if we can patch the calling TB. */
- if (last_tb) {
- tb_add_jump(last_tb, tb_exit, tb);
- }
- return tb;
-}
-
static inline bool cpu_handle_halt(CPUState *cpu)
{
+#ifndef CONFIG_USER_ONLY
if (cpu->halted) {
-#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
+#if defined(TARGET_I386)
if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
X86CPU *x86_cpu = X86_CPU(cpu);
qemu_mutex_lock_iothread();
cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
qemu_mutex_unlock_iothread();
}
-#endif
+#endif /* TARGET_I386 */
if (!cpu_has_work(cpu)) {
return true;
}
cpu->halted = 0;
}
+#endif /* !CONFIG_USER_ONLY */
return false;
}
if (replay_has_exception()
&& cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0) {
/* Execute just one insn to trigger exception pending in the log */
- cpu->cflags_next_tb = (curr_cflags(cpu) & ~CF_USE_ICOUNT) | 1;
+ cpu->cflags_next_tb = (curr_cflags(cpu) & ~CF_USE_ICOUNT)
+ | CF_NOIRQ | 1;
}
#endif
return false;
loop */
#if defined(TARGET_I386)
CPUClass *cc = CPU_GET_CLASS(cpu);
- cc->tcg_ops->do_interrupt(cpu);
-#endif
+ cc->tcg_ops->fake_user_interrupt(cpu);
+#endif /* TARGET_I386 */
*ret = cpu->exception_index;
cpu->exception_index = -1;
return true;
return false;
}
+#ifndef CONFIG_USER_ONLY
/*
* CPU_INTERRUPT_POLL is a virtual event which gets converted into a
* "real" interrupt event later. It does not need to be recorded for
return true;
#endif
}
+#endif /* !CONFIG_USER_ONLY */
static inline bool cpu_handle_interrupt(CPUState *cpu,
TranslationBlock **last_tb)
{
- CPUClass *cc = CPU_GET_CLASS(cpu);
+ /*
+ * If we have requested custom cflags with CF_NOIRQ we should
+ * skip checking here. Any pending interrupts will get picked up
+ * by the next TB we execute under normal cflags.
+ */
+ if (cpu->cflags_next_tb != -1 && cpu->cflags_next_tb & CF_NOIRQ) {
+ return false;
+ }
/* Clear the interrupt flag now since we're processing
* cpu->interrupt_request and cpu->exit_request.
qemu_mutex_unlock_iothread();
return true;
}
+#if !defined(CONFIG_USER_ONLY)
if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) {
/* Do nothing */
} else if (interrupt_request & CPU_INTERRUPT_HALT) {
qemu_mutex_unlock_iothread();
return true;
}
-#endif
+#endif /* !TARGET_I386 */
/* The target hook has 3 exit conditions:
False when the interrupt isn't processed,
True when it is, and we should restart on a new TB,
and via longjmp via cpu_loop_exit. */
else {
+ CPUClass *cc = CPU_GET_CLASS(cpu);
+
if (cc->tcg_ops->cpu_exec_interrupt &&
cc->tcg_ops->cpu_exec_interrupt(cpu, interrupt_request)) {
if (need_replay_interrupt(interrupt_request)) {
* raised when single-stepping so that GDB doesn't miss the
* next instruction.
*/
- cpu->exception_index =
- (cpu->singlestep_enabled ? EXCP_DEBUG : -1);
+ if (unlikely(cpu->singlestep_enabled)) {
+ cpu->exception_index = EXCP_DEBUG;
+ qemu_mutex_unlock_iothread();
+ return true;
+ }
+ cpu->exception_index = -1;
*last_tb = NULL;
}
/* The target hook may have updated the 'cpu->interrupt_request';
* reload the 'interrupt_request' value */
interrupt_request = cpu->interrupt_request;
}
+#endif /* !CONFIG_USER_ONLY */
if (interrupt_request & CPU_INTERRUPT_EXITTB) {
cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
/* ensure that no TB jump will be modified as
}
static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
+ target_ulong pc,
TranslationBlock **last_tb, int *tb_exit)
{
int32_t insns_left;
- trace_exec_tb(tb, tb->pc);
+ trace_exec_tb(tb, pc);
tb = cpu_tb_exec(cpu, tb, tb_exit);
if (*tb_exit != TB_EXIT_REQUESTED) {
*last_tb = tb;
/* Ensure global icount has gone forward */
icount_update(cpu);
/* Refill decrementer and continue execution. */
- insns_left = MIN(CF_COUNT_MASK, cpu->icount_budget);
+ insns_left = MIN(0xffff, cpu->icount_budget);
cpu_neg(cpu)->icount_decr.u16.low = insns_left;
cpu->icount_extra = cpu->icount_budget - insns_left;
* execute we need to ensure we find/generate a TB with exactly
* insns_left instructions in it.
*/
- if (!cpu->icount_extra && insns_left > 0 && insns_left < tb->icount) {
+ if (insns_left > 0 && insns_left < tb->icount) {
+ assert(insns_left <= CF_COUNT_MASK);
+ assert(cpu->icount_extra == 0);
cpu->cflags_next_tb = (tb->cflags & ~CF_COUNT_MASK) | insns_left;
}
#endif
int cpu_exec(CPUState *cpu)
{
- CPUClass *cc = CPU_GET_CLASS(cpu);
int ret;
SyncClocks sc = { 0 };
* that we support, but is still unfixed in clang:
* https://bugs.llvm.org/show_bug.cgi?id=21183
*
- * Reload essential local variables here for those compilers.
+ * Reload an essential local variable here for those compilers.
* Newer versions of gcc would complain about this code (-Wclobbered),
* so we only perform the workaround for clang.
*/
cpu = current_cpu;
- cc = CPU_GET_CLASS(cpu);
#else
- /*
- * Non-buggy compilers preserve these locals; assert that
- * they have the correct value.
- */
+ /* Non-buggy compilers preserve this; assert the correct value. */
g_assert(cpu == current_cpu);
- g_assert(cc == CPU_GET_CLASS(cpu));
#endif
#ifndef CONFIG_SOFTMMU
- tcg_debug_assert(!have_mmap_lock());
+ clear_helper_retaddr();
+ if (have_mmap_lock()) {
+ mmap_unlock();
+ }
#endif
if (qemu_mutex_iothread_locked()) {
qemu_mutex_unlock_iothread();
int tb_exit = 0;
while (!cpu_handle_interrupt(cpu, &last_tb)) {
- uint32_t cflags = cpu->cflags_next_tb;
TranslationBlock *tb;
-
- /* When requested, use an exact setting for cflags for the next
- execution. This is used for icount, precise smc, and stop-
- after-access watchpoints. Since this request should never
- have CF_INVALID set, -1 is a convenient invalid value that
- does not require tcg headers for cpu_common_reset. */
+ target_ulong cs_base, pc;
+ uint32_t flags, cflags;
+
+ cpu_get_tb_cpu_state(cpu->env_ptr, &pc, &cs_base, &flags);
+
+ /*
+ * When requested, use an exact setting for cflags for the next
+ * execution. This is used for icount, precise smc, and stop-
+ * after-access watchpoints. Since this request should never
+ * have CF_INVALID set, -1 is a convenient invalid value that
+ * does not require tcg headers for cpu_common_reset.
+ */
+ cflags = cpu->cflags_next_tb;
if (cflags == -1) {
cflags = curr_cflags(cpu);
} else {
cpu->cflags_next_tb = -1;
}
- tb = tb_find(cpu, last_tb, tb_exit, cflags);
- cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit);
+ if (check_for_breakpoints(cpu, pc, &cflags)) {
+ break;
+ }
+
+ tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
+ if (tb == NULL) {
+ uint32_t h;
+
+ mmap_lock();
+ tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
+ mmap_unlock();
+ /*
+ * We add the TB in the virtual pc hash table
+ * for the fast lookup
+ */
+ h = tb_jmp_cache_hash_func(pc);
+ tb_jmp_cache_set(cpu->tb_jmp_cache, h, tb, pc);
+ }
+
+#ifndef CONFIG_USER_ONLY
+ /*
+ * We don't take care of direct jumps when address mapping
+ * changes in system emulation. So it's not safe to make a
+ * direct jump to a TB spanning two pages because the mapping
+ * for the second page can change.
+ */
+ if (tb->page_addr[1] != -1) {
+ last_tb = NULL;
+ }
+#endif
+ /* See if we can patch the calling TB. */
+ if (last_tb) {
+ tb_add_jump(last_tb, tb_exit, tb);
+ }
+
+ cpu_loop_exec_tb(cpu, tb, pc, &last_tb, &tb_exit);
+
/* Try to align the host and virtual clocks
if the guest is in advance */
align_clocks(&sc, cpu);
#ifndef CONFIG_USER_ONLY
-void dump_drift_info(void)
+static void dump_drift_info(GString *buf)
{
if (!icount_enabled()) {
return;
}
- qemu_printf("Host - Guest clock %"PRIi64" ms\n",
- (cpu_get_clock() - icount_get()) / SCALE_MS);
+ g_string_append_printf(buf, "Host - Guest clock %"PRIi64" ms\n",
+ (cpu_get_clock() - icount_get()) / SCALE_MS);
if (icount_align_option) {
- qemu_printf("Max guest delay %"PRIi64" ms\n",
- -max_delay / SCALE_MS);
- qemu_printf("Max guest advance %"PRIi64" ms\n",
- max_advance / SCALE_MS);
+ g_string_append_printf(buf, "Max guest delay %"PRIi64" ms\n",
+ -max_delay / SCALE_MS);
+ g_string_append_printf(buf, "Max guest advance %"PRIi64" ms\n",
+ max_advance / SCALE_MS);
} else {
- qemu_printf("Max guest delay NA\n");
- qemu_printf("Max guest advance NA\n");
+ g_string_append_printf(buf, "Max guest delay NA\n");
+ g_string_append_printf(buf, "Max guest advance NA\n");
+ }
+}
+
+HumanReadableText *qmp_x_query_jit(Error **errp)
+{
+ g_autoptr(GString) buf = g_string_new("");
+
+ if (!tcg_enabled()) {
+ error_setg(errp, "JIT information is only available with accel=tcg");
+ return NULL;
+ }
+
+ dump_exec_info(buf);
+ dump_drift_info(buf);
+
+ return human_readable_text_from_str(buf);
+}
+
+HumanReadableText *qmp_x_query_opcount(Error **errp)
+{
+ g_autoptr(GString) buf = g_string_new("");
+
+ if (!tcg_enabled()) {
+ error_setg(errp, "Opcode count information is only available with accel=tcg");
+ return NULL;
}
+
+ tcg_dump_op_count(buf);
+
+ return human_readable_text_from_str(buf);
+}
+
+#ifdef CONFIG_PROFILER
+
+int64_t dev_time;
+
+HumanReadableText *qmp_x_query_profile(Error **errp)
+{
+ g_autoptr(GString) buf = g_string_new("");
+ static int64_t last_cpu_exec_time;
+ int64_t cpu_exec_time;
+ int64_t delta;
+
+ cpu_exec_time = tcg_cpu_exec_time();
+ delta = cpu_exec_time - last_cpu_exec_time;
+
+ g_string_append_printf(buf, "async time %" PRId64 " (%0.3f)\n",
+ dev_time, dev_time / (double)NANOSECONDS_PER_SECOND);
+ g_string_append_printf(buf, "qemu time %" PRId64 " (%0.3f)\n",
+ delta, delta / (double)NANOSECONDS_PER_SECOND);
+ last_cpu_exec_time = cpu_exec_time;
+ dev_time = 0;
+
+ return human_readable_text_from_str(buf);
}
+#else
+HumanReadableText *qmp_x_query_profile(Error **errp)
+{
+ error_setg(errp, "Internal profiler not compiled");
+ return NULL;
+}
+#endif
#endif /* !CONFIG_USER_ONLY */