X-Git-Url: https://git.proxmox.com/?a=blobdiff_plain;f=cpus.c;h=e58e7ab0f63d2debe112d5e6143ed5645a608012;hb=25d68ffb6bca333ce8496eca7f438d5a93f9cd06;hp=3978f63d8f112cafba699274e1aa34232ce97e95;hpb=808ebd66e467f77c0d1f8c6346235f81e9c99cf2;p=mirror_qemu.git diff --git a/cpus.c b/cpus.c index 3978f63d8f..e58e7ab0f6 100644 --- a/cpus.c +++ b/cpus.c @@ -31,6 +31,7 @@ #include "qapi/qapi-events-run-state.h" #include "qapi/qmp/qerror.h" #include "qemu/error-report.h" +#include "qemu/qemu-print.h" #include "sysemu/sysemu.h" #include "sysemu/block-backend.h" #include "exec/gdbstub.h" @@ -1009,7 +1010,7 @@ void hw_error(const char *fmt, ...) fprintf(stderr, "\n"); CPU_FOREACH(cpu) { fprintf(stderr, "CPU #%d:\n", cpu->cpu_index); - cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU); + cpu_dump_state(cpu, stderr, CPU_DUMP_FPU); } va_end(ap); abort(); @@ -1220,16 +1221,20 @@ static void qemu_wait_io_event_common(CPUState *cpu) process_queued_cpu_work(cpu); } -static void qemu_tcg_rr_wait_io_event(CPUState *cpu) +static void qemu_tcg_rr_wait_io_event(void) { + CPUState *cpu; + while (all_cpu_threads_idle()) { stop_tcg_kick_timer(); - qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex); + qemu_cond_wait(first_cpu->halt_cond, &qemu_global_mutex); } start_tcg_kick_timer(); - qemu_wait_io_event_common(cpu); + CPU_FOREACH(cpu) { + qemu_wait_io_event_common(cpu); + } } static void qemu_wait_io_event(CPUState *cpu) @@ -1329,6 +1334,7 @@ static void *qemu_dummy_cpu_thread_fn(void *arg) qemu_wait_io_event(cpu); } while (!cpu->unplug); + qemu_mutex_unlock_iothread(); rcu_unregister_thread(); return NULL; #endif @@ -1554,7 +1560,15 @@ static void *qemu_tcg_rr_cpu_thread_fn(void *arg) atomic_mb_set(&cpu->exit_request, 0); } - qemu_tcg_rr_wait_io_event(cpu ? cpu : first_cpu); + if (use_icount && all_cpu_threads_idle()) { + /* + * When all cpus are sleeping (e.g in WFI), to avoid a deadlock + * in the main_loop, wake it up in order to start the warp timer. + */ + qemu_notify_event(); + } + + qemu_tcg_rr_wait_io_event(); deal_with_unplugged_cpus(); } @@ -1766,7 +1780,7 @@ static void qemu_cpu_kick_thread(CPUState *cpu) } cpu->thread_kicked = true; err = pthread_kill(cpu->thread->thread, SIG_IPI); - if (err) { + if (err && err != ESRCH) { fprintf(stderr, "qemu:%s: %s", __func__, strerror(err)); exit(1); } @@ -2088,7 +2102,8 @@ void qemu_init_vcpu(CPUState *cpu) void cpu_stop_current(void) { if (current_cpu) { - qemu_cpu_stop(current_cpu, true); + current_cpu->stop = true; + cpu_exit(current_cpu); } } @@ -2166,11 +2181,11 @@ int vm_stop_force_state(RunState state) } } -void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg) +void list_cpus(const char *optarg) { /* XXX: implement xxx_cpu_list for targets that still miss it */ #if defined(cpu_list) - cpu_list(f, cpu_fprintf); + cpu_list(); #endif } @@ -2440,19 +2455,21 @@ void qmp_inject_nmi(Error **errp) nmi_monitor_handle(monitor_get_cpu_index(), errp); } -void dump_drift_info(FILE *f, fprintf_function cpu_fprintf) +void dump_drift_info(void) { if (!use_icount) { return; } - cpu_fprintf(f, "Host - Guest clock %"PRIi64" ms\n", + qemu_printf("Host - Guest clock %"PRIi64" ms\n", (cpu_get_clock() - cpu_get_icount())/SCALE_MS); if (icount_align_option) { - cpu_fprintf(f, "Max guest delay %"PRIi64" ms\n", -max_delay/SCALE_MS); - cpu_fprintf(f, "Max guest advance %"PRIi64" ms\n", max_advance/SCALE_MS); + qemu_printf("Max guest delay %"PRIi64" ms\n", + -max_delay / SCALE_MS); + qemu_printf("Max guest advance %"PRIi64" ms\n", + max_advance / SCALE_MS); } else { - cpu_fprintf(f, "Max guest delay NA\n"); - cpu_fprintf(f, "Max guest advance NA\n"); + qemu_printf("Max guest delay NA\n"); + qemu_printf("Max guest advance NA\n"); } }