X-Git-Url: https://git.proxmox.com/?a=blobdiff_plain;f=cpus.c;h=68fdbc40b9d0278798ad20b66ebb6388dd298dc9;hb=a67ec6ee2dbb3725f4291f17b5bdca5e086108a7;hp=c857ad29572fe718e6847b82228501bfb07b57c2;hpb=ff79d5e939c38677a575e3493eb9b4d36eb21865;p=mirror_qemu.git diff --git a/cpus.c b/cpus.c index c857ad2957..68fdbc40b9 100644 --- a/cpus.c +++ b/cpus.c @@ -181,10 +181,7 @@ static bool check_tcg_memory_orders_compatible(void) static bool default_mttcg_enabled(void) { - QemuOpts *icount_opts = qemu_find_opts_singleton("icount"); - const char *rr = qemu_opt_get(icount_opts, "rr"); - - if (rr || TCG_OVERSIZED_GUEST) { + if (use_icount || TCG_OVERSIZED_GUEST) { return false; } else { #ifdef TARGET_SUPPORTS_MTTCG @@ -202,11 +199,17 @@ void qemu_tcg_configure(QemuOpts *opts, Error **errp) if (strcmp(t, "multi") == 0) { if (TCG_OVERSIZED_GUEST) { error_setg(errp, "No MTTCG when guest word size > hosts"); + } else if (use_icount) { + error_setg(errp, "No MTTCG when icount is enabled"); } else { +#ifndef TARGET_SUPPORT_MTTCG + error_report("Guest not yet converted to MTTCG - " + "you may get unexpected results"); +#endif if (!check_tcg_memory_orders_compatible()) { error_report("Guest expects a stronger memory ordering " "than the host provides"); - error_printf("This may cause strange/hard to debug errors"); + error_printf("This may cause strange/hard to debug errors\n"); } mttcg_enabled = true; } @@ -797,6 +800,27 @@ static void qemu_cpu_kick_rr_cpu(void) } while (cpu != atomic_mb_read(&tcg_current_rr_cpu)); } +static void do_nothing(CPUState *cpu, run_on_cpu_data unused) +{ +} + +void qemu_timer_notify_cb(void *opaque, QEMUClockType type) +{ + if (!use_icount || type != QEMU_CLOCK_VIRTUAL) { + qemu_notify_event(); + return; + } + + if (!qemu_in_vcpu_thread() && first_cpu) { + /* qemu_cpu_kick is not enough to kick a halted CPU out of + * qemu_tcg_wait_io_event. async_run_on_cpu, instead, + * causes cpu_thread_is_idle to return false. This way, + * handle_icount_deadline can run. + */ + async_run_on_cpu(first_cpu, do_nothing, RUN_ON_CPU_NULL); + } +} + static void kick_tcg_thread(void *opaque) { timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick()); @@ -1142,12 +1166,15 @@ static int64_t tcg_get_icount_limit(void) static void handle_icount_deadline(void) { + assert(qemu_in_vcpu_thread()); if (use_icount) { int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL); if (deadline == 0) { + /* Wake up other AioContexts. */ qemu_clock_notify(QEMU_CLOCK_VIRTUAL); + qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL); } } } @@ -1260,6 +1287,11 @@ static void *qemu_tcg_rr_cpu_thread_fn(void *arg) /* Account partial waits to QEMU_CLOCK_VIRTUAL. */ qemu_account_warp_timer(); + /* Run the timers here. This is much more efficient than + * waking up the I/O thread and waiting for completion. + */ + handle_icount_deadline(); + if (!cpu) { cpu = first_cpu; } @@ -1301,8 +1333,6 @@ static void *qemu_tcg_rr_cpu_thread_fn(void *arg) atomic_mb_set(&cpu->exit_request, 0); } - handle_icount_deadline(); - qemu_tcg_wait_io_event(cpu ? cpu : QTAILQ_FIRST(&cpus)); deal_with_unplugged_cpus(); } @@ -1314,8 +1344,9 @@ static void *qemu_hax_cpu_thread_fn(void *arg) { CPUState *cpu = arg; int r; + + qemu_mutex_lock_iothread(); qemu_thread_get_self(cpu->thread); - qemu_mutex_lock(&qemu_global_mutex); cpu->thread_id = qemu_get_thread_id(); cpu->created = true;