*/
#include "qemu/osdep.h"
-#include "qemu-common.h"
+#include "qemu/lockable.h"
#include "sysemu/tcg.h"
#include "sysemu/replay.h"
+#include "sysemu/cpu-timers.h"
#include "qemu/main-loop.h"
+#include "qemu/notify.h"
#include "qemu/guest-random.h"
#include "exec/exec-all.h"
-#include "hw/boards.h"
-
+#include "tcg/startup.h"
#include "tcg-accel-ops.h"
#include "tcg-accel-ops-rr.h"
#include "tcg-accel-ops-icount.h"
*
* The kick timer is responsible for moving single threaded vCPU
* emulation on to the next vCPU. If more than one vCPU is running a
- * timer event with force a cpu->exit so the next vCPU can get
+ * timer event we force a cpu->exit so the next vCPU can get
* scheduled.
*
* The timer is removed if all vCPUs are idle and restarted again once
static QEMUTimer *rr_kick_vcpu_timer;
static CPUState *rr_current_cpu;
-#define TCG_KICK_PERIOD (NANOSECONDS_PER_SECOND / 10)
-
static inline int64_t rr_next_kick_time(void)
{
return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + TCG_KICK_PERIOD;
{
CPUState *cpu;
do {
- cpu = qatomic_mb_read(&rr_current_cpu);
+ cpu = qatomic_read(&rr_current_cpu);
if (cpu) {
cpu_exit(cpu);
}
- } while (cpu != qatomic_mb_read(&rr_current_cpu));
+ /* Finish kicking this cpu before reading again. */
+ smp_mb();
+ } while (cpu != qatomic_read(&rr_current_cpu));
}
static void rr_kick_thread(void *opaque)
{
CPUState *cpu;
- while (all_cpu_threads_idle()) {
+ while (all_cpu_threads_idle() && replay_can_wait()) {
rr_stop_kick_timer();
- qemu_cond_wait_iothread(first_cpu->halt_cond);
+ qemu_cond_wait_bql(first_cpu->halt_cond);
}
rr_start_kick_timer();
CPU_FOREACH(cpu) {
if (cpu->unplug && !cpu_can_run(cpu)) {
- tcg_cpus_destroy(cpu);
+ tcg_cpu_destroy(cpu);
break;
}
}
}
+static void rr_force_rcu(Notifier *notify, void *data)
+{
+ rr_kick_next_cpu();
+}
+
+/*
+ * Calculate the number of CPUs that we will process in a single iteration of
+ * the main CPU thread loop so that we can fairly distribute the instruction
+ * count across CPUs.
+ *
+ * The CPU count is cached based on the CPU list generation ID to avoid
+ * iterating the list every time.
+ */
+static int rr_cpu_count(void)
+{
+ static unsigned int last_gen_id = ~0;
+ static int cpu_count;
+ CPUState *cpu;
+
+ QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
+
+ if (cpu_list_generation_id_get() != last_gen_id) {
+ cpu_count = 0;
+ CPU_FOREACH(cpu) {
+ ++cpu_count;
+ }
+ last_gen_id = cpu_list_generation_id_get();
+ }
+
+ return cpu_count;
+}
+
/*
* In the single-threaded case each vCPU is simulated in turn. If
* there is more than a single vCPU we create a simple timer to kick
static void *rr_cpu_thread_fn(void *arg)
{
+ Notifier force_rcu;
CPUState *cpu = arg;
assert(tcg_enabled());
rcu_register_thread();
+ force_rcu.notify = rr_force_rcu;
+ rcu_add_force_rcu_notifier(&force_rcu);
tcg_register_thread();
- qemu_mutex_lock_iothread();
+ bql_lock();
qemu_thread_get_self(cpu->thread);
cpu->thread_id = qemu_get_thread_id();
- cpu->can_do_io = 1;
+ cpu->neg.can_do_io = true;
cpu_thread_signal_created(cpu);
qemu_guest_random_seed_thread_part2(cpu->random_seed);
/* wait for initial kick-off after machine start */
while (first_cpu->stopped) {
- qemu_cond_wait_iothread(first_cpu->halt_cond);
+ qemu_cond_wait_bql(first_cpu->halt_cond);
/* process any pending work */
CPU_FOREACH(cpu) {
cpu->exit_request = 1;
while (1) {
- qemu_mutex_unlock_iothread();
+ /* Only used for icount_enabled() */
+ int64_t cpu_budget = 0;
+
+ bql_unlock();
replay_mutex_lock();
- qemu_mutex_lock_iothread();
+ bql_lock();
if (icount_enabled()) {
+ int cpu_count = rr_cpu_count();
+
/* Account partial waits to QEMU_CLOCK_VIRTUAL. */
icount_account_warp_timer();
/*
* waking up the I/O thread and waiting for completion.
*/
icount_handle_deadline();
+
+ cpu_budget = icount_percpu_budget(cpu_count);
}
replay_mutex_unlock();
}
while (cpu && cpu_work_list_empty(cpu) && !cpu->exit_request) {
+ /* Store rr_current_cpu before evaluating cpu_can_run(). */
+ qatomic_set_mb(&rr_current_cpu, cpu);
- qatomic_mb_set(&rr_current_cpu, cpu);
current_cpu = cpu;
qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
if (cpu_can_run(cpu)) {
int r;
- qemu_mutex_unlock_iothread();
+ bql_unlock();
if (icount_enabled()) {
- icount_prepare_for_run(cpu);
+ icount_prepare_for_run(cpu, cpu_budget);
}
r = tcg_cpus_exec(cpu);
if (icount_enabled()) {
icount_process_data(cpu);
}
- qemu_mutex_lock_iothread();
+ bql_lock();
if (r == EXCP_DEBUG) {
cpu_handle_guest_debug(cpu);
break;
} else if (r == EXCP_ATOMIC) {
- qemu_mutex_unlock_iothread();
+ bql_unlock();
cpu_exec_step_atomic(cpu);
- qemu_mutex_lock_iothread();
+ bql_lock();
break;
}
} else if (cpu->stop) {
cpu = CPU_NEXT(cpu);
} /* while (cpu && !cpu->exit_request).. */
- /* Does not need qatomic_mb_set because a spurious wakeup is okay. */
+ /* Does not need a memory barrier because a spurious wakeup is okay. */
qatomic_set(&rr_current_cpu, NULL);
if (cpu && cpu->exit_request) {
- qatomic_mb_set(&cpu->exit_request, 0);
+ qatomic_set_mb(&cpu->exit_request, 0);
}
if (icount_enabled() && all_cpu_threads_idle()) {
rr_deal_with_unplugged_cpus();
}
+ rcu_remove_force_rcu_notifier(&force_rcu);
rcu_unregister_thread();
return NULL;
}
static QemuThread *single_tcg_cpu_thread;
g_assert(tcg_enabled());
- parallel_cpus = false;
+ tcg_cpu_init_cflags(cpu, false);
if (!single_tcg_cpu_thread) {
- cpu->thread = g_malloc0(sizeof(QemuThread));
- cpu->halt_cond = g_malloc0(sizeof(QemuCond));
+ cpu->thread = g_new0(QemuThread, 1);
+ cpu->halt_cond = g_new0(QemuCond, 1);
qemu_cond_init(cpu->halt_cond);
/* share a single thread for all cpus with TCG */
single_tcg_halt_cond = cpu->halt_cond;
single_tcg_cpu_thread = cpu->thread;
-#ifdef _WIN32
- cpu->hThread = qemu_thread_get_handle(cpu->thread);
-#endif
} else {
/* we share the thread */
cpu->thread = single_tcg_cpu_thread;
cpu->halt_cond = single_tcg_halt_cond;
cpu->thread_id = first_cpu->thread_id;
- cpu->can_do_io = 1;
+ cpu->neg.can_do_io = 1;
cpu->created = true;
}
}