S: Maintained
F: softmmu/cpus.c
F: softmmu/watchpoint.c
-F: cpus-common.c
+F: cpu-common.c
+F: cpu-target.c
F: page-vary.c
F: page-vary-common.c
F: accel/tcg/
R: Philippe Mathieu-Daudé <philmd@linaro.org>
R: Yanan Wang <wangyanan55@huawei.com>
S: Supported
-F: cpu.c
F: hw/core/cpu.c
F: hw/core/machine-qmp-cmds.c
F: hw/core/machine.c
--- /dev/null
+/*
+ * CPU thread main loop - common bits for user and system mode emulation
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/main-loop.h"
+#include "exec/cpu-common.h"
+#include "hw/core/cpu.h"
+#include "sysemu/cpus.h"
+#include "qemu/lockable.h"
+#include "trace/trace-root.h"
+
+QemuMutex qemu_cpu_list_lock;
+static QemuCond exclusive_cond;
+static QemuCond exclusive_resume;
+static QemuCond qemu_work_cond;
+
+/* >= 1 if a thread is inside start_exclusive/end_exclusive. Written
+ * under qemu_cpu_list_lock, read with atomic operations.
+ */
+static int pending_cpus;
+
+void qemu_init_cpu_list(void)
+{
+ /* This is needed because qemu_init_cpu_list is also called by the
+ * child process in a fork. */
+ pending_cpus = 0;
+
+ qemu_mutex_init(&qemu_cpu_list_lock);
+ qemu_cond_init(&exclusive_cond);
+ qemu_cond_init(&exclusive_resume);
+ qemu_cond_init(&qemu_work_cond);
+}
+
+void cpu_list_lock(void)
+{
+ qemu_mutex_lock(&qemu_cpu_list_lock);
+}
+
+void cpu_list_unlock(void)
+{
+ qemu_mutex_unlock(&qemu_cpu_list_lock);
+}
+
+static bool cpu_index_auto_assigned;
+
+static int cpu_get_free_index(void)
+{
+ CPUState *some_cpu;
+ int max_cpu_index = 0;
+
+ cpu_index_auto_assigned = true;
+ CPU_FOREACH(some_cpu) {
+ if (some_cpu->cpu_index >= max_cpu_index) {
+ max_cpu_index = some_cpu->cpu_index + 1;
+ }
+ }
+ return max_cpu_index;
+}
+
+CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
+static unsigned int cpu_list_generation_id;
+
+unsigned int cpu_list_generation_id_get(void)
+{
+ return cpu_list_generation_id;
+}
+
+void cpu_list_add(CPUState *cpu)
+{
+ QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
+ if (cpu->cpu_index == UNASSIGNED_CPU_INDEX) {
+ cpu->cpu_index = cpu_get_free_index();
+ assert(cpu->cpu_index != UNASSIGNED_CPU_INDEX);
+ } else {
+ assert(!cpu_index_auto_assigned);
+ }
+ QTAILQ_INSERT_TAIL_RCU(&cpus, cpu, node);
+ cpu_list_generation_id++;
+}
+
+void cpu_list_remove(CPUState *cpu)
+{
+ QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
+ if (!QTAILQ_IN_USE(cpu, node)) {
+ /* there is nothing to undo since cpu_exec_init() hasn't been called */
+ return;
+ }
+
+ QTAILQ_REMOVE_RCU(&cpus, cpu, node);
+ cpu->cpu_index = UNASSIGNED_CPU_INDEX;
+ cpu_list_generation_id++;
+}
+
+CPUState *qemu_get_cpu(int index)
+{
+ CPUState *cpu;
+
+ CPU_FOREACH(cpu) {
+ if (cpu->cpu_index == index) {
+ return cpu;
+ }
+ }
+
+ return NULL;
+}
+
+/* current CPU in the current thread. It is only valid inside cpu_exec() */
+__thread CPUState *current_cpu;
+
+struct qemu_work_item {
+ QSIMPLEQ_ENTRY(qemu_work_item) node;
+ run_on_cpu_func func;
+ run_on_cpu_data data;
+ bool free, exclusive, done;
+};
+
+static void queue_work_on_cpu(CPUState *cpu, struct qemu_work_item *wi)
+{
+ qemu_mutex_lock(&cpu->work_mutex);
+ QSIMPLEQ_INSERT_TAIL(&cpu->work_list, wi, node);
+ wi->done = false;
+ qemu_mutex_unlock(&cpu->work_mutex);
+
+ qemu_cpu_kick(cpu);
+}
+
+void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data,
+ QemuMutex *mutex)
+{
+ struct qemu_work_item wi;
+
+ if (qemu_cpu_is_self(cpu)) {
+ func(cpu, data);
+ return;
+ }
+
+ wi.func = func;
+ wi.data = data;
+ wi.done = false;
+ wi.free = false;
+ wi.exclusive = false;
+
+ queue_work_on_cpu(cpu, &wi);
+ while (!qatomic_load_acquire(&wi.done)) {
+ CPUState *self_cpu = current_cpu;
+
+ qemu_cond_wait(&qemu_work_cond, mutex);
+ current_cpu = self_cpu;
+ }
+}
+
+void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data)
+{
+ struct qemu_work_item *wi;
+
+ wi = g_new0(struct qemu_work_item, 1);
+ wi->func = func;
+ wi->data = data;
+ wi->free = true;
+
+ queue_work_on_cpu(cpu, wi);
+}
+
+/* Wait for pending exclusive operations to complete. The CPU list lock
+ must be held. */
+static inline void exclusive_idle(void)
+{
+ while (pending_cpus) {
+ qemu_cond_wait(&exclusive_resume, &qemu_cpu_list_lock);
+ }
+}
+
+/* Start an exclusive operation.
+ Must only be called from outside cpu_exec. */
+void start_exclusive(void)
+{
+ CPUState *other_cpu;
+ int running_cpus;
+
+ if (current_cpu->exclusive_context_count) {
+ current_cpu->exclusive_context_count++;
+ return;
+ }
+
+ qemu_mutex_lock(&qemu_cpu_list_lock);
+ exclusive_idle();
+
+ /* Make all other cpus stop executing. */
+ qatomic_set(&pending_cpus, 1);
+
+ /* Write pending_cpus before reading other_cpu->running. */
+ smp_mb();
+ running_cpus = 0;
+ CPU_FOREACH(other_cpu) {
+ if (qatomic_read(&other_cpu->running)) {
+ other_cpu->has_waiter = true;
+ running_cpus++;
+ qemu_cpu_kick(other_cpu);
+ }
+ }
+
+ qatomic_set(&pending_cpus, running_cpus + 1);
+ while (pending_cpus > 1) {
+ qemu_cond_wait(&exclusive_cond, &qemu_cpu_list_lock);
+ }
+
+ /* Can release mutex, no one will enter another exclusive
+ * section until end_exclusive resets pending_cpus to 0.
+ */
+ qemu_mutex_unlock(&qemu_cpu_list_lock);
+
+ current_cpu->exclusive_context_count = 1;
+}
+
+/* Finish an exclusive operation. */
+void end_exclusive(void)
+{
+ current_cpu->exclusive_context_count--;
+ if (current_cpu->exclusive_context_count) {
+ return;
+ }
+
+ qemu_mutex_lock(&qemu_cpu_list_lock);
+ qatomic_set(&pending_cpus, 0);
+ qemu_cond_broadcast(&exclusive_resume);
+ qemu_mutex_unlock(&qemu_cpu_list_lock);
+}
+
+/* Wait for exclusive ops to finish, and begin cpu execution. */
+void cpu_exec_start(CPUState *cpu)
+{
+ qatomic_set(&cpu->running, true);
+
+ /* Write cpu->running before reading pending_cpus. */
+ smp_mb();
+
+ /* 1. start_exclusive saw cpu->running == true and pending_cpus >= 1.
+ * After taking the lock we'll see cpu->has_waiter == true and run---not
+ * for long because start_exclusive kicked us. cpu_exec_end will
+ * decrement pending_cpus and signal the waiter.
+ *
+ * 2. start_exclusive saw cpu->running == false but pending_cpus >= 1.
+ * This includes the case when an exclusive item is running now.
+ * Then we'll see cpu->has_waiter == false and wait for the item to
+ * complete.
+ *
+ * 3. pending_cpus == 0. Then start_exclusive is definitely going to
+ * see cpu->running == true, and it will kick the CPU.
+ */
+ if (unlikely(qatomic_read(&pending_cpus))) {
+ QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
+ if (!cpu->has_waiter) {
+ /* Not counted in pending_cpus, let the exclusive item
+ * run. Since we have the lock, just set cpu->running to true
+ * while holding it; no need to check pending_cpus again.
+ */
+ qatomic_set(&cpu->running, false);
+ exclusive_idle();
+ /* Now pending_cpus is zero. */
+ qatomic_set(&cpu->running, true);
+ } else {
+ /* Counted in pending_cpus, go ahead and release the
+ * waiter at cpu_exec_end.
+ */
+ }
+ }
+}
+
+/* Mark cpu as not executing, and release pending exclusive ops. */
+void cpu_exec_end(CPUState *cpu)
+{
+ qatomic_set(&cpu->running, false);
+
+ /* Write cpu->running before reading pending_cpus. */
+ smp_mb();
+
+ /* 1. start_exclusive saw cpu->running == true. Then it will increment
+ * pending_cpus and wait for exclusive_cond. After taking the lock
+ * we'll see cpu->has_waiter == true.
+ *
+ * 2. start_exclusive saw cpu->running == false but here pending_cpus >= 1.
+ * This includes the case when an exclusive item started after setting
+ * cpu->running to false and before we read pending_cpus. Then we'll see
+ * cpu->has_waiter == false and not touch pending_cpus. The next call to
+ * cpu_exec_start will run exclusive_idle if still necessary, thus waiting
+ * for the item to complete.
+ *
+ * 3. pending_cpus == 0. Then start_exclusive is definitely going to
+ * see cpu->running == false, and it can ignore this CPU until the
+ * next cpu_exec_start.
+ */
+ if (unlikely(qatomic_read(&pending_cpus))) {
+ QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
+ if (cpu->has_waiter) {
+ cpu->has_waiter = false;
+ qatomic_set(&pending_cpus, pending_cpus - 1);
+ if (pending_cpus == 1) {
+ qemu_cond_signal(&exclusive_cond);
+ }
+ }
+ }
+}
+
+void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func,
+ run_on_cpu_data data)
+{
+ struct qemu_work_item *wi;
+
+ wi = g_new0(struct qemu_work_item, 1);
+ wi->func = func;
+ wi->data = data;
+ wi->free = true;
+ wi->exclusive = true;
+
+ queue_work_on_cpu(cpu, wi);
+}
+
+void process_queued_cpu_work(CPUState *cpu)
+{
+ struct qemu_work_item *wi;
+
+ qemu_mutex_lock(&cpu->work_mutex);
+ if (QSIMPLEQ_EMPTY(&cpu->work_list)) {
+ qemu_mutex_unlock(&cpu->work_mutex);
+ return;
+ }
+ while (!QSIMPLEQ_EMPTY(&cpu->work_list)) {
+ wi = QSIMPLEQ_FIRST(&cpu->work_list);
+ QSIMPLEQ_REMOVE_HEAD(&cpu->work_list, node);
+ qemu_mutex_unlock(&cpu->work_mutex);
+ if (wi->exclusive) {
+ /* Running work items outside the BQL avoids the following deadlock:
+ * 1) start_exclusive() is called with the BQL taken while another
+ * CPU is running; 2) cpu_exec in the other CPU tries to takes the
+ * BQL, so it goes to sleep; start_exclusive() is sleeping too, so
+ * neither CPU can proceed.
+ */
+ qemu_mutex_unlock_iothread();
+ start_exclusive();
+ wi->func(cpu, wi->data);
+ end_exclusive();
+ qemu_mutex_lock_iothread();
+ } else {
+ wi->func(cpu, wi->data);
+ }
+ qemu_mutex_lock(&cpu->work_mutex);
+ if (wi->free) {
+ g_free(wi);
+ } else {
+ qatomic_store_release(&wi->done, true);
+ }
+ }
+ qemu_mutex_unlock(&cpu->work_mutex);
+ qemu_cond_broadcast(&qemu_work_cond);
+}
+
+/* Add a breakpoint. */
+int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
+ CPUBreakpoint **breakpoint)
+{
+ CPUClass *cc = CPU_GET_CLASS(cpu);
+ CPUBreakpoint *bp;
+
+ if (cc->gdb_adjust_breakpoint) {
+ pc = cc->gdb_adjust_breakpoint(cpu, pc);
+ }
+
+ bp = g_malloc(sizeof(*bp));
+
+ bp->pc = pc;
+ bp->flags = flags;
+
+ /* keep all GDB-injected breakpoints in front */
+ if (flags & BP_GDB) {
+ QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
+ } else {
+ QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
+ }
+
+ if (breakpoint) {
+ *breakpoint = bp;
+ }
+
+ trace_breakpoint_insert(cpu->cpu_index, pc, flags);
+ return 0;
+}
+
+/* Remove a specific breakpoint. */
+int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
+{
+ CPUClass *cc = CPU_GET_CLASS(cpu);
+ CPUBreakpoint *bp;
+
+ if (cc->gdb_adjust_breakpoint) {
+ pc = cc->gdb_adjust_breakpoint(cpu, pc);
+ }
+
+ QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
+ if (bp->pc == pc && bp->flags == flags) {
+ cpu_breakpoint_remove_by_ref(cpu, bp);
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+
+/* Remove a specific breakpoint by reference. */
+void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *bp)
+{
+ QTAILQ_REMOVE(&cpu->breakpoints, bp, entry);
+
+ trace_breakpoint_remove(cpu->cpu_index, bp->pc, bp->flags);
+ g_free(bp);
+}
+
+/* Remove all matching breakpoints. */
+void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
+{
+ CPUBreakpoint *bp, *next;
+
+ QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
+ if (bp->flags & mask) {
+ cpu_breakpoint_remove_by_ref(cpu, bp);
+ }
+ }
+}
--- /dev/null
+/*
+ * Target-specific parts of the CPU object
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+
+#include "exec/target_page.h"
+#include "hw/qdev-core.h"
+#include "hw/qdev-properties.h"
+#include "qemu/error-report.h"
+#include "migration/vmstate.h"
+#ifdef CONFIG_USER_ONLY
+#include "qemu.h"
+#else
+#include "hw/core/sysemu-cpu-ops.h"
+#include "exec/address-spaces.h"
+#endif
+#include "sysemu/cpus.h"
+#include "sysemu/tcg.h"
+#include "exec/replay-core.h"
+#include "exec/cpu-common.h"
+#include "exec/exec-all.h"
+#include "exec/tb-flush.h"
+#include "exec/translate-all.h"
+#include "exec/log.h"
+#include "hw/core/accel-cpu.h"
+#include "trace/trace-root.h"
+#include "qemu/accel.h"
+#include "qemu/plugin.h"
+
+uintptr_t qemu_host_page_size;
+intptr_t qemu_host_page_mask;
+
+#ifndef CONFIG_USER_ONLY
+static int cpu_common_post_load(void *opaque, int version_id)
+{
+ CPUState *cpu = opaque;
+
+ /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
+ version_id is increased. */
+ cpu->interrupt_request &= ~0x01;
+ tlb_flush(cpu);
+
+ /* loadvm has just updated the content of RAM, bypassing the
+ * usual mechanisms that ensure we flush TBs for writes to
+ * memory we've translated code from. So we must flush all TBs,
+ * which will now be stale.
+ */
+ tb_flush(cpu);
+
+ return 0;
+}
+
+static int cpu_common_pre_load(void *opaque)
+{
+ CPUState *cpu = opaque;
+
+ cpu->exception_index = -1;
+
+ return 0;
+}
+
+static bool cpu_common_exception_index_needed(void *opaque)
+{
+ CPUState *cpu = opaque;
+
+ return tcg_enabled() && cpu->exception_index != -1;
+}
+
+static const VMStateDescription vmstate_cpu_common_exception_index = {
+ .name = "cpu_common/exception_index",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = cpu_common_exception_index_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_INT32(exception_index, CPUState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool cpu_common_crash_occurred_needed(void *opaque)
+{
+ CPUState *cpu = opaque;
+
+ return cpu->crash_occurred;
+}
+
+static const VMStateDescription vmstate_cpu_common_crash_occurred = {
+ .name = "cpu_common/crash_occurred",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = cpu_common_crash_occurred_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_BOOL(crash_occurred, CPUState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+const VMStateDescription vmstate_cpu_common = {
+ .name = "cpu_common",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .pre_load = cpu_common_pre_load,
+ .post_load = cpu_common_post_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(halted, CPUState),
+ VMSTATE_UINT32(interrupt_request, CPUState),
+ VMSTATE_END_OF_LIST()
+ },
+ .subsections = (const VMStateDescription*[]) {
+ &vmstate_cpu_common_exception_index,
+ &vmstate_cpu_common_crash_occurred,
+ NULL
+ }
+};
+#endif
+
+void cpu_exec_realizefn(CPUState *cpu, Error **errp)
+{
+ /* cache the cpu class for the hotpath */
+ cpu->cc = CPU_GET_CLASS(cpu);
+
+ if (!accel_cpu_common_realize(cpu, errp)) {
+ return;
+ }
+
+ /* Wait until cpu initialization complete before exposing cpu. */
+ cpu_list_add(cpu);
+
+ /* Plugin initialization must wait until cpu_index assigned. */
+ if (tcg_enabled()) {
+ qemu_plugin_vcpu_init_hook(cpu);
+ }
+
+#ifdef CONFIG_USER_ONLY
+ assert(qdev_get_vmsd(DEVICE(cpu)) == NULL ||
+ qdev_get_vmsd(DEVICE(cpu))->unmigratable);
+#else
+ if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
+ vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
+ }
+ if (cpu->cc->sysemu_ops->legacy_vmsd != NULL) {
+ vmstate_register(NULL, cpu->cpu_index, cpu->cc->sysemu_ops->legacy_vmsd, cpu);
+ }
+#endif /* CONFIG_USER_ONLY */
+}
+
+void cpu_exec_unrealizefn(CPUState *cpu)
+{
+#ifndef CONFIG_USER_ONLY
+ CPUClass *cc = CPU_GET_CLASS(cpu);
+
+ if (cc->sysemu_ops->legacy_vmsd != NULL) {
+ vmstate_unregister(NULL, cc->sysemu_ops->legacy_vmsd, cpu);
+ }
+ if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
+ vmstate_unregister(NULL, &vmstate_cpu_common, cpu);
+ }
+#endif
+
+ /* Call the plugin hook before clearing cpu->cpu_index in cpu_list_remove */
+ if (tcg_enabled()) {
+ qemu_plugin_vcpu_exit_hook(cpu);
+ }
+
+ cpu_list_remove(cpu);
+ /*
+ * Now that the vCPU has been removed from the RCU list, we can call
+ * accel_cpu_common_unrealize, which may free fields using call_rcu.
+ */
+ accel_cpu_common_unrealize(cpu);
+}
+
+/*
+ * This can't go in hw/core/cpu.c because that file is compiled only
+ * once for both user-mode and system builds.
+ */
+static Property cpu_common_props[] = {
+#ifdef CONFIG_USER_ONLY
+ /*
+ * Create a property for the user-only object, so users can
+ * adjust prctl(PR_SET_UNALIGN) from the command-line.
+ * Has no effect if the target does not support the feature.
+ */
+ DEFINE_PROP_BOOL("prctl-unalign-sigbus", CPUState,
+ prctl_unalign_sigbus, false),
+#else
+ /*
+ * Create a memory property for softmmu CPU object, so users can
+ * wire up its memory. The default if no link is set up is to use
+ * the system address space.
+ */
+ DEFINE_PROP_LINK("memory", CPUState, memory, TYPE_MEMORY_REGION,
+ MemoryRegion *),
+#endif
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static bool cpu_get_start_powered_off(Object *obj, Error **errp)
+{
+ CPUState *cpu = CPU(obj);
+ return cpu->start_powered_off;
+}
+
+static void cpu_set_start_powered_off(Object *obj, bool value, Error **errp)
+{
+ CPUState *cpu = CPU(obj);
+ cpu->start_powered_off = value;
+}
+
+void cpu_class_init_props(DeviceClass *dc)
+{
+ ObjectClass *oc = OBJECT_CLASS(dc);
+
+ device_class_set_props(dc, cpu_common_props);
+ /*
+ * We can't use DEFINE_PROP_BOOL in the Property array for this
+ * property, because we want this to be settable after realize.
+ */
+ object_class_property_add_bool(oc, "start-powered-off",
+ cpu_get_start_powered_off,
+ cpu_set_start_powered_off);
+}
+
+void cpu_exec_initfn(CPUState *cpu)
+{
+ cpu->as = NULL;
+ cpu->num_ases = 0;
+
+#ifndef CONFIG_USER_ONLY
+ cpu->thread_id = qemu_get_thread_id();
+ cpu->memory = get_system_memory();
+ object_ref(OBJECT(cpu->memory));
+#endif
+}
+
+const char *parse_cpu_option(const char *cpu_option)
+{
+ ObjectClass *oc;
+ CPUClass *cc;
+ gchar **model_pieces;
+ const char *cpu_type;
+
+ model_pieces = g_strsplit(cpu_option, ",", 2);
+ if (!model_pieces[0]) {
+ error_report("-cpu option cannot be empty");
+ exit(1);
+ }
+
+ oc = cpu_class_by_name(CPU_RESOLVING_TYPE, model_pieces[0]);
+ if (oc == NULL) {
+ error_report("unable to find CPU model '%s'", model_pieces[0]);
+ g_strfreev(model_pieces);
+ exit(EXIT_FAILURE);
+ }
+
+ cpu_type = object_class_get_name(oc);
+ cc = CPU_CLASS(oc);
+ cc->parse_features(cpu_type, model_pieces[1], &error_fatal);
+ g_strfreev(model_pieces);
+ return cpu_type;
+}
+
+void list_cpus(void)
+{
+ /* XXX: implement xxx_cpu_list for targets that still miss it */
+#if defined(cpu_list)
+ cpu_list();
+#endif
+}
+
+#if defined(CONFIG_USER_ONLY)
+void tb_invalidate_phys_addr(hwaddr addr)
+{
+ mmap_lock();
+ tb_invalidate_phys_page(addr);
+ mmap_unlock();
+}
+#else
+void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs)
+{
+ ram_addr_t ram_addr;
+ MemoryRegion *mr;
+ hwaddr l = 1;
+
+ if (!tcg_enabled()) {
+ return;
+ }
+
+ RCU_READ_LOCK_GUARD();
+ mr = address_space_translate(as, addr, &addr, &l, false, attrs);
+ if (!(memory_region_is_ram(mr)
+ || memory_region_is_romd(mr))) {
+ return;
+ }
+ ram_addr = memory_region_get_ram_addr(mr) + addr;
+ tb_invalidate_phys_page(ram_addr);
+}
+#endif
+
+/* enable or disable single step mode. EXCP_DEBUG is returned by the
+ CPU loop after each instruction */
+void cpu_single_step(CPUState *cpu, int enabled)
+{
+ if (cpu->singlestep_enabled != enabled) {
+ cpu->singlestep_enabled = enabled;
+
+#if !defined(CONFIG_USER_ONLY)
+ const AccelOpsClass *ops = cpus_get_accel();
+ if (ops->update_guest_debug) {
+ ops->update_guest_debug(cpu);
+ }
+#endif
+
+ trace_breakpoint_singlestep(cpu->cpu_index, enabled);
+ }
+}
+
+void cpu_abort(CPUState *cpu, const char *fmt, ...)
+{
+ va_list ap;
+ va_list ap2;
+
+ va_start(ap, fmt);
+ va_copy(ap2, ap);
+ fprintf(stderr, "qemu: fatal: ");
+ vfprintf(stderr, fmt, ap);
+ fprintf(stderr, "\n");
+ cpu_dump_state(cpu, stderr, CPU_DUMP_FPU | CPU_DUMP_CCOP);
+ if (qemu_log_separate()) {
+ FILE *logfile = qemu_log_trylock();
+ if (logfile) {
+ fprintf(logfile, "qemu: fatal: ");
+ vfprintf(logfile, fmt, ap2);
+ fprintf(logfile, "\n");
+ cpu_dump_state(cpu, logfile, CPU_DUMP_FPU | CPU_DUMP_CCOP);
+ qemu_log_unlock(logfile);
+ }
+ }
+ va_end(ap2);
+ va_end(ap);
+ replay_finish();
+#if defined(CONFIG_USER_ONLY)
+ {
+ struct sigaction act;
+ sigfillset(&act.sa_mask);
+ act.sa_handler = SIG_DFL;
+ act.sa_flags = 0;
+ sigaction(SIGABRT, &act, NULL);
+ }
+#endif
+ abort();
+}
+
+/* physical memory access (slow version, mainly for debug) */
+#if defined(CONFIG_USER_ONLY)
+int cpu_memory_rw_debug(CPUState *cpu, vaddr addr,
+ void *ptr, size_t len, bool is_write)
+{
+ int flags;
+ vaddr l, page;
+ void * p;
+ uint8_t *buf = ptr;
+
+ while (len > 0) {
+ page = addr & TARGET_PAGE_MASK;
+ l = (page + TARGET_PAGE_SIZE) - addr;
+ if (l > len)
+ l = len;
+ flags = page_get_flags(page);
+ if (!(flags & PAGE_VALID))
+ return -1;
+ if (is_write) {
+ if (!(flags & PAGE_WRITE))
+ return -1;
+ /* XXX: this code should not depend on lock_user */
+ if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
+ return -1;
+ memcpy(p, buf, l);
+ unlock_user(p, addr, l);
+ } else {
+ if (!(flags & PAGE_READ))
+ return -1;
+ /* XXX: this code should not depend on lock_user */
+ if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
+ return -1;
+ memcpy(buf, p, l);
+ unlock_user(p, addr, 0);
+ }
+ len -= l;
+ buf += l;
+ addr += l;
+ }
+ return 0;
+}
+#endif
+
+bool target_words_bigendian(void)
+{
+ return TARGET_BIG_ENDIAN;
+}
+
+const char *target_name(void)
+{
+ return TARGET_NAME;
+}
+
+void page_size_init(void)
+{
+ /* NOTE: we can always suppose that qemu_host_page_size >=
+ TARGET_PAGE_SIZE */
+ if (qemu_host_page_size == 0) {
+ qemu_host_page_size = qemu_real_host_page_size();
+ }
+ if (qemu_host_page_size < TARGET_PAGE_SIZE) {
+ qemu_host_page_size = TARGET_PAGE_SIZE;
+ }
+ qemu_host_page_mask = -(intptr_t)qemu_host_page_size;
+}
+++ /dev/null
-/*
- * Target-specific parts of the CPU object
- *
- * Copyright (c) 2003 Fabrice Bellard
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "qemu/osdep.h"
-#include "qapi/error.h"
-
-#include "exec/target_page.h"
-#include "hw/qdev-core.h"
-#include "hw/qdev-properties.h"
-#include "qemu/error-report.h"
-#include "migration/vmstate.h"
-#ifdef CONFIG_USER_ONLY
-#include "qemu.h"
-#else
-#include "hw/core/sysemu-cpu-ops.h"
-#include "exec/address-spaces.h"
-#endif
-#include "sysemu/cpus.h"
-#include "sysemu/tcg.h"
-#include "exec/replay-core.h"
-#include "exec/cpu-common.h"
-#include "exec/exec-all.h"
-#include "exec/tb-flush.h"
-#include "exec/translate-all.h"
-#include "exec/log.h"
-#include "hw/core/accel-cpu.h"
-#include "trace/trace-root.h"
-#include "qemu/accel.h"
-#include "qemu/plugin.h"
-
-uintptr_t qemu_host_page_size;
-intptr_t qemu_host_page_mask;
-
-#ifndef CONFIG_USER_ONLY
-static int cpu_common_post_load(void *opaque, int version_id)
-{
- CPUState *cpu = opaque;
-
- /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
- version_id is increased. */
- cpu->interrupt_request &= ~0x01;
- tlb_flush(cpu);
-
- /* loadvm has just updated the content of RAM, bypassing the
- * usual mechanisms that ensure we flush TBs for writes to
- * memory we've translated code from. So we must flush all TBs,
- * which will now be stale.
- */
- tb_flush(cpu);
-
- return 0;
-}
-
-static int cpu_common_pre_load(void *opaque)
-{
- CPUState *cpu = opaque;
-
- cpu->exception_index = -1;
-
- return 0;
-}
-
-static bool cpu_common_exception_index_needed(void *opaque)
-{
- CPUState *cpu = opaque;
-
- return tcg_enabled() && cpu->exception_index != -1;
-}
-
-static const VMStateDescription vmstate_cpu_common_exception_index = {
- .name = "cpu_common/exception_index",
- .version_id = 1,
- .minimum_version_id = 1,
- .needed = cpu_common_exception_index_needed,
- .fields = (VMStateField[]) {
- VMSTATE_INT32(exception_index, CPUState),
- VMSTATE_END_OF_LIST()
- }
-};
-
-static bool cpu_common_crash_occurred_needed(void *opaque)
-{
- CPUState *cpu = opaque;
-
- return cpu->crash_occurred;
-}
-
-static const VMStateDescription vmstate_cpu_common_crash_occurred = {
- .name = "cpu_common/crash_occurred",
- .version_id = 1,
- .minimum_version_id = 1,
- .needed = cpu_common_crash_occurred_needed,
- .fields = (VMStateField[]) {
- VMSTATE_BOOL(crash_occurred, CPUState),
- VMSTATE_END_OF_LIST()
- }
-};
-
-const VMStateDescription vmstate_cpu_common = {
- .name = "cpu_common",
- .version_id = 1,
- .minimum_version_id = 1,
- .pre_load = cpu_common_pre_load,
- .post_load = cpu_common_post_load,
- .fields = (VMStateField[]) {
- VMSTATE_UINT32(halted, CPUState),
- VMSTATE_UINT32(interrupt_request, CPUState),
- VMSTATE_END_OF_LIST()
- },
- .subsections = (const VMStateDescription*[]) {
- &vmstate_cpu_common_exception_index,
- &vmstate_cpu_common_crash_occurred,
- NULL
- }
-};
-#endif
-
-void cpu_exec_realizefn(CPUState *cpu, Error **errp)
-{
- /* cache the cpu class for the hotpath */
- cpu->cc = CPU_GET_CLASS(cpu);
-
- if (!accel_cpu_common_realize(cpu, errp)) {
- return;
- }
-
- /* Wait until cpu initialization complete before exposing cpu. */
- cpu_list_add(cpu);
-
- /* Plugin initialization must wait until cpu_index assigned. */
- if (tcg_enabled()) {
- qemu_plugin_vcpu_init_hook(cpu);
- }
-
-#ifdef CONFIG_USER_ONLY
- assert(qdev_get_vmsd(DEVICE(cpu)) == NULL ||
- qdev_get_vmsd(DEVICE(cpu))->unmigratable);
-#else
- if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
- vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
- }
- if (cpu->cc->sysemu_ops->legacy_vmsd != NULL) {
- vmstate_register(NULL, cpu->cpu_index, cpu->cc->sysemu_ops->legacy_vmsd, cpu);
- }
-#endif /* CONFIG_USER_ONLY */
-}
-
-void cpu_exec_unrealizefn(CPUState *cpu)
-{
-#ifndef CONFIG_USER_ONLY
- CPUClass *cc = CPU_GET_CLASS(cpu);
-
- if (cc->sysemu_ops->legacy_vmsd != NULL) {
- vmstate_unregister(NULL, cc->sysemu_ops->legacy_vmsd, cpu);
- }
- if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
- vmstate_unregister(NULL, &vmstate_cpu_common, cpu);
- }
-#endif
-
- /* Call the plugin hook before clearing cpu->cpu_index in cpu_list_remove */
- if (tcg_enabled()) {
- qemu_plugin_vcpu_exit_hook(cpu);
- }
-
- cpu_list_remove(cpu);
- /*
- * Now that the vCPU has been removed from the RCU list, we can call
- * accel_cpu_common_unrealize, which may free fields using call_rcu.
- */
- accel_cpu_common_unrealize(cpu);
-}
-
-/*
- * This can't go in hw/core/cpu.c because that file is compiled only
- * once for both user-mode and system builds.
- */
-static Property cpu_common_props[] = {
-#ifdef CONFIG_USER_ONLY
- /*
- * Create a property for the user-only object, so users can
- * adjust prctl(PR_SET_UNALIGN) from the command-line.
- * Has no effect if the target does not support the feature.
- */
- DEFINE_PROP_BOOL("prctl-unalign-sigbus", CPUState,
- prctl_unalign_sigbus, false),
-#else
- /*
- * Create a memory property for softmmu CPU object, so users can
- * wire up its memory. The default if no link is set up is to use
- * the system address space.
- */
- DEFINE_PROP_LINK("memory", CPUState, memory, TYPE_MEMORY_REGION,
- MemoryRegion *),
-#endif
- DEFINE_PROP_END_OF_LIST(),
-};
-
-static bool cpu_get_start_powered_off(Object *obj, Error **errp)
-{
- CPUState *cpu = CPU(obj);
- return cpu->start_powered_off;
-}
-
-static void cpu_set_start_powered_off(Object *obj, bool value, Error **errp)
-{
- CPUState *cpu = CPU(obj);
- cpu->start_powered_off = value;
-}
-
-void cpu_class_init_props(DeviceClass *dc)
-{
- ObjectClass *oc = OBJECT_CLASS(dc);
-
- device_class_set_props(dc, cpu_common_props);
- /*
- * We can't use DEFINE_PROP_BOOL in the Property array for this
- * property, because we want this to be settable after realize.
- */
- object_class_property_add_bool(oc, "start-powered-off",
- cpu_get_start_powered_off,
- cpu_set_start_powered_off);
-}
-
-void cpu_exec_initfn(CPUState *cpu)
-{
- cpu->as = NULL;
- cpu->num_ases = 0;
-
-#ifndef CONFIG_USER_ONLY
- cpu->thread_id = qemu_get_thread_id();
- cpu->memory = get_system_memory();
- object_ref(OBJECT(cpu->memory));
-#endif
-}
-
-const char *parse_cpu_option(const char *cpu_option)
-{
- ObjectClass *oc;
- CPUClass *cc;
- gchar **model_pieces;
- const char *cpu_type;
-
- model_pieces = g_strsplit(cpu_option, ",", 2);
- if (!model_pieces[0]) {
- error_report("-cpu option cannot be empty");
- exit(1);
- }
-
- oc = cpu_class_by_name(CPU_RESOLVING_TYPE, model_pieces[0]);
- if (oc == NULL) {
- error_report("unable to find CPU model '%s'", model_pieces[0]);
- g_strfreev(model_pieces);
- exit(EXIT_FAILURE);
- }
-
- cpu_type = object_class_get_name(oc);
- cc = CPU_CLASS(oc);
- cc->parse_features(cpu_type, model_pieces[1], &error_fatal);
- g_strfreev(model_pieces);
- return cpu_type;
-}
-
-void list_cpus(void)
-{
- /* XXX: implement xxx_cpu_list for targets that still miss it */
-#if defined(cpu_list)
- cpu_list();
-#endif
-}
-
-#if defined(CONFIG_USER_ONLY)
-void tb_invalidate_phys_addr(hwaddr addr)
-{
- mmap_lock();
- tb_invalidate_phys_page(addr);
- mmap_unlock();
-}
-#else
-void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs)
-{
- ram_addr_t ram_addr;
- MemoryRegion *mr;
- hwaddr l = 1;
-
- if (!tcg_enabled()) {
- return;
- }
-
- RCU_READ_LOCK_GUARD();
- mr = address_space_translate(as, addr, &addr, &l, false, attrs);
- if (!(memory_region_is_ram(mr)
- || memory_region_is_romd(mr))) {
- return;
- }
- ram_addr = memory_region_get_ram_addr(mr) + addr;
- tb_invalidate_phys_page(ram_addr);
-}
-#endif
-
-/* enable or disable single step mode. EXCP_DEBUG is returned by the
- CPU loop after each instruction */
-void cpu_single_step(CPUState *cpu, int enabled)
-{
- if (cpu->singlestep_enabled != enabled) {
- cpu->singlestep_enabled = enabled;
-
-#if !defined(CONFIG_USER_ONLY)
- const AccelOpsClass *ops = cpus_get_accel();
- if (ops->update_guest_debug) {
- ops->update_guest_debug(cpu);
- }
-#endif
-
- trace_breakpoint_singlestep(cpu->cpu_index, enabled);
- }
-}
-
-void cpu_abort(CPUState *cpu, const char *fmt, ...)
-{
- va_list ap;
- va_list ap2;
-
- va_start(ap, fmt);
- va_copy(ap2, ap);
- fprintf(stderr, "qemu: fatal: ");
- vfprintf(stderr, fmt, ap);
- fprintf(stderr, "\n");
- cpu_dump_state(cpu, stderr, CPU_DUMP_FPU | CPU_DUMP_CCOP);
- if (qemu_log_separate()) {
- FILE *logfile = qemu_log_trylock();
- if (logfile) {
- fprintf(logfile, "qemu: fatal: ");
- vfprintf(logfile, fmt, ap2);
- fprintf(logfile, "\n");
- cpu_dump_state(cpu, logfile, CPU_DUMP_FPU | CPU_DUMP_CCOP);
- qemu_log_unlock(logfile);
- }
- }
- va_end(ap2);
- va_end(ap);
- replay_finish();
-#if defined(CONFIG_USER_ONLY)
- {
- struct sigaction act;
- sigfillset(&act.sa_mask);
- act.sa_handler = SIG_DFL;
- act.sa_flags = 0;
- sigaction(SIGABRT, &act, NULL);
- }
-#endif
- abort();
-}
-
-/* physical memory access (slow version, mainly for debug) */
-#if defined(CONFIG_USER_ONLY)
-int cpu_memory_rw_debug(CPUState *cpu, vaddr addr,
- void *ptr, size_t len, bool is_write)
-{
- int flags;
- vaddr l, page;
- void * p;
- uint8_t *buf = ptr;
-
- while (len > 0) {
- page = addr & TARGET_PAGE_MASK;
- l = (page + TARGET_PAGE_SIZE) - addr;
- if (l > len)
- l = len;
- flags = page_get_flags(page);
- if (!(flags & PAGE_VALID))
- return -1;
- if (is_write) {
- if (!(flags & PAGE_WRITE))
- return -1;
- /* XXX: this code should not depend on lock_user */
- if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
- return -1;
- memcpy(p, buf, l);
- unlock_user(p, addr, l);
- } else {
- if (!(flags & PAGE_READ))
- return -1;
- /* XXX: this code should not depend on lock_user */
- if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
- return -1;
- memcpy(buf, p, l);
- unlock_user(p, addr, 0);
- }
- len -= l;
- buf += l;
- addr += l;
- }
- return 0;
-}
-#endif
-
-bool target_words_bigendian(void)
-{
- return TARGET_BIG_ENDIAN;
-}
-
-const char *target_name(void)
-{
- return TARGET_NAME;
-}
-
-void page_size_init(void)
-{
- /* NOTE: we can always suppose that qemu_host_page_size >=
- TARGET_PAGE_SIZE */
- if (qemu_host_page_size == 0) {
- qemu_host_page_size = qemu_real_host_page_size();
- }
- if (qemu_host_page_size < TARGET_PAGE_SIZE) {
- qemu_host_page_size = TARGET_PAGE_SIZE;
- }
- qemu_host_page_mask = -(intptr_t)qemu_host_page_size;
-}
+++ /dev/null
-/*
- * CPU thread main loop - common bits for user and system mode emulation
- *
- * Copyright (c) 2003-2005 Fabrice Bellard
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "qemu/osdep.h"
-#include "qemu/main-loop.h"
-#include "exec/cpu-common.h"
-#include "hw/core/cpu.h"
-#include "sysemu/cpus.h"
-#include "qemu/lockable.h"
-#include "trace/trace-root.h"
-
-QemuMutex qemu_cpu_list_lock;
-static QemuCond exclusive_cond;
-static QemuCond exclusive_resume;
-static QemuCond qemu_work_cond;
-
-/* >= 1 if a thread is inside start_exclusive/end_exclusive. Written
- * under qemu_cpu_list_lock, read with atomic operations.
- */
-static int pending_cpus;
-
-void qemu_init_cpu_list(void)
-{
- /* This is needed because qemu_init_cpu_list is also called by the
- * child process in a fork. */
- pending_cpus = 0;
-
- qemu_mutex_init(&qemu_cpu_list_lock);
- qemu_cond_init(&exclusive_cond);
- qemu_cond_init(&exclusive_resume);
- qemu_cond_init(&qemu_work_cond);
-}
-
-void cpu_list_lock(void)
-{
- qemu_mutex_lock(&qemu_cpu_list_lock);
-}
-
-void cpu_list_unlock(void)
-{
- qemu_mutex_unlock(&qemu_cpu_list_lock);
-}
-
-static bool cpu_index_auto_assigned;
-
-static int cpu_get_free_index(void)
-{
- CPUState *some_cpu;
- int max_cpu_index = 0;
-
- cpu_index_auto_assigned = true;
- CPU_FOREACH(some_cpu) {
- if (some_cpu->cpu_index >= max_cpu_index) {
- max_cpu_index = some_cpu->cpu_index + 1;
- }
- }
- return max_cpu_index;
-}
-
-CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
-static unsigned int cpu_list_generation_id;
-
-unsigned int cpu_list_generation_id_get(void)
-{
- return cpu_list_generation_id;
-}
-
-void cpu_list_add(CPUState *cpu)
-{
- QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
- if (cpu->cpu_index == UNASSIGNED_CPU_INDEX) {
- cpu->cpu_index = cpu_get_free_index();
- assert(cpu->cpu_index != UNASSIGNED_CPU_INDEX);
- } else {
- assert(!cpu_index_auto_assigned);
- }
- QTAILQ_INSERT_TAIL_RCU(&cpus, cpu, node);
- cpu_list_generation_id++;
-}
-
-void cpu_list_remove(CPUState *cpu)
-{
- QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
- if (!QTAILQ_IN_USE(cpu, node)) {
- /* there is nothing to undo since cpu_exec_init() hasn't been called */
- return;
- }
-
- QTAILQ_REMOVE_RCU(&cpus, cpu, node);
- cpu->cpu_index = UNASSIGNED_CPU_INDEX;
- cpu_list_generation_id++;
-}
-
-CPUState *qemu_get_cpu(int index)
-{
- CPUState *cpu;
-
- CPU_FOREACH(cpu) {
- if (cpu->cpu_index == index) {
- return cpu;
- }
- }
-
- return NULL;
-}
-
-/* current CPU in the current thread. It is only valid inside cpu_exec() */
-__thread CPUState *current_cpu;
-
-struct qemu_work_item {
- QSIMPLEQ_ENTRY(qemu_work_item) node;
- run_on_cpu_func func;
- run_on_cpu_data data;
- bool free, exclusive, done;
-};
-
-static void queue_work_on_cpu(CPUState *cpu, struct qemu_work_item *wi)
-{
- qemu_mutex_lock(&cpu->work_mutex);
- QSIMPLEQ_INSERT_TAIL(&cpu->work_list, wi, node);
- wi->done = false;
- qemu_mutex_unlock(&cpu->work_mutex);
-
- qemu_cpu_kick(cpu);
-}
-
-void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data,
- QemuMutex *mutex)
-{
- struct qemu_work_item wi;
-
- if (qemu_cpu_is_self(cpu)) {
- func(cpu, data);
- return;
- }
-
- wi.func = func;
- wi.data = data;
- wi.done = false;
- wi.free = false;
- wi.exclusive = false;
-
- queue_work_on_cpu(cpu, &wi);
- while (!qatomic_load_acquire(&wi.done)) {
- CPUState *self_cpu = current_cpu;
-
- qemu_cond_wait(&qemu_work_cond, mutex);
- current_cpu = self_cpu;
- }
-}
-
-void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data)
-{
- struct qemu_work_item *wi;
-
- wi = g_new0(struct qemu_work_item, 1);
- wi->func = func;
- wi->data = data;
- wi->free = true;
-
- queue_work_on_cpu(cpu, wi);
-}
-
-/* Wait for pending exclusive operations to complete. The CPU list lock
- must be held. */
-static inline void exclusive_idle(void)
-{
- while (pending_cpus) {
- qemu_cond_wait(&exclusive_resume, &qemu_cpu_list_lock);
- }
-}
-
-/* Start an exclusive operation.
- Must only be called from outside cpu_exec. */
-void start_exclusive(void)
-{
- CPUState *other_cpu;
- int running_cpus;
-
- if (current_cpu->exclusive_context_count) {
- current_cpu->exclusive_context_count++;
- return;
- }
-
- qemu_mutex_lock(&qemu_cpu_list_lock);
- exclusive_idle();
-
- /* Make all other cpus stop executing. */
- qatomic_set(&pending_cpus, 1);
-
- /* Write pending_cpus before reading other_cpu->running. */
- smp_mb();
- running_cpus = 0;
- CPU_FOREACH(other_cpu) {
- if (qatomic_read(&other_cpu->running)) {
- other_cpu->has_waiter = true;
- running_cpus++;
- qemu_cpu_kick(other_cpu);
- }
- }
-
- qatomic_set(&pending_cpus, running_cpus + 1);
- while (pending_cpus > 1) {
- qemu_cond_wait(&exclusive_cond, &qemu_cpu_list_lock);
- }
-
- /* Can release mutex, no one will enter another exclusive
- * section until end_exclusive resets pending_cpus to 0.
- */
- qemu_mutex_unlock(&qemu_cpu_list_lock);
-
- current_cpu->exclusive_context_count = 1;
-}
-
-/* Finish an exclusive operation. */
-void end_exclusive(void)
-{
- current_cpu->exclusive_context_count--;
- if (current_cpu->exclusive_context_count) {
- return;
- }
-
- qemu_mutex_lock(&qemu_cpu_list_lock);
- qatomic_set(&pending_cpus, 0);
- qemu_cond_broadcast(&exclusive_resume);
- qemu_mutex_unlock(&qemu_cpu_list_lock);
-}
-
-/* Wait for exclusive ops to finish, and begin cpu execution. */
-void cpu_exec_start(CPUState *cpu)
-{
- qatomic_set(&cpu->running, true);
-
- /* Write cpu->running before reading pending_cpus. */
- smp_mb();
-
- /* 1. start_exclusive saw cpu->running == true and pending_cpus >= 1.
- * After taking the lock we'll see cpu->has_waiter == true and run---not
- * for long because start_exclusive kicked us. cpu_exec_end will
- * decrement pending_cpus and signal the waiter.
- *
- * 2. start_exclusive saw cpu->running == false but pending_cpus >= 1.
- * This includes the case when an exclusive item is running now.
- * Then we'll see cpu->has_waiter == false and wait for the item to
- * complete.
- *
- * 3. pending_cpus == 0. Then start_exclusive is definitely going to
- * see cpu->running == true, and it will kick the CPU.
- */
- if (unlikely(qatomic_read(&pending_cpus))) {
- QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
- if (!cpu->has_waiter) {
- /* Not counted in pending_cpus, let the exclusive item
- * run. Since we have the lock, just set cpu->running to true
- * while holding it; no need to check pending_cpus again.
- */
- qatomic_set(&cpu->running, false);
- exclusive_idle();
- /* Now pending_cpus is zero. */
- qatomic_set(&cpu->running, true);
- } else {
- /* Counted in pending_cpus, go ahead and release the
- * waiter at cpu_exec_end.
- */
- }
- }
-}
-
-/* Mark cpu as not executing, and release pending exclusive ops. */
-void cpu_exec_end(CPUState *cpu)
-{
- qatomic_set(&cpu->running, false);
-
- /* Write cpu->running before reading pending_cpus. */
- smp_mb();
-
- /* 1. start_exclusive saw cpu->running == true. Then it will increment
- * pending_cpus and wait for exclusive_cond. After taking the lock
- * we'll see cpu->has_waiter == true.
- *
- * 2. start_exclusive saw cpu->running == false but here pending_cpus >= 1.
- * This includes the case when an exclusive item started after setting
- * cpu->running to false and before we read pending_cpus. Then we'll see
- * cpu->has_waiter == false and not touch pending_cpus. The next call to
- * cpu_exec_start will run exclusive_idle if still necessary, thus waiting
- * for the item to complete.
- *
- * 3. pending_cpus == 0. Then start_exclusive is definitely going to
- * see cpu->running == false, and it can ignore this CPU until the
- * next cpu_exec_start.
- */
- if (unlikely(qatomic_read(&pending_cpus))) {
- QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
- if (cpu->has_waiter) {
- cpu->has_waiter = false;
- qatomic_set(&pending_cpus, pending_cpus - 1);
- if (pending_cpus == 1) {
- qemu_cond_signal(&exclusive_cond);
- }
- }
- }
-}
-
-void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func,
- run_on_cpu_data data)
-{
- struct qemu_work_item *wi;
-
- wi = g_new0(struct qemu_work_item, 1);
- wi->func = func;
- wi->data = data;
- wi->free = true;
- wi->exclusive = true;
-
- queue_work_on_cpu(cpu, wi);
-}
-
-void process_queued_cpu_work(CPUState *cpu)
-{
- struct qemu_work_item *wi;
-
- qemu_mutex_lock(&cpu->work_mutex);
- if (QSIMPLEQ_EMPTY(&cpu->work_list)) {
- qemu_mutex_unlock(&cpu->work_mutex);
- return;
- }
- while (!QSIMPLEQ_EMPTY(&cpu->work_list)) {
- wi = QSIMPLEQ_FIRST(&cpu->work_list);
- QSIMPLEQ_REMOVE_HEAD(&cpu->work_list, node);
- qemu_mutex_unlock(&cpu->work_mutex);
- if (wi->exclusive) {
- /* Running work items outside the BQL avoids the following deadlock:
- * 1) start_exclusive() is called with the BQL taken while another
- * CPU is running; 2) cpu_exec in the other CPU tries to takes the
- * BQL, so it goes to sleep; start_exclusive() is sleeping too, so
- * neither CPU can proceed.
- */
- qemu_mutex_unlock_iothread();
- start_exclusive();
- wi->func(cpu, wi->data);
- end_exclusive();
- qemu_mutex_lock_iothread();
- } else {
- wi->func(cpu, wi->data);
- }
- qemu_mutex_lock(&cpu->work_mutex);
- if (wi->free) {
- g_free(wi);
- } else {
- qatomic_store_release(&wi->done, true);
- }
- }
- qemu_mutex_unlock(&cpu->work_mutex);
- qemu_cond_broadcast(&qemu_work_cond);
-}
-
-/* Add a breakpoint. */
-int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
- CPUBreakpoint **breakpoint)
-{
- CPUClass *cc = CPU_GET_CLASS(cpu);
- CPUBreakpoint *bp;
-
- if (cc->gdb_adjust_breakpoint) {
- pc = cc->gdb_adjust_breakpoint(cpu, pc);
- }
-
- bp = g_malloc(sizeof(*bp));
-
- bp->pc = pc;
- bp->flags = flags;
-
- /* keep all GDB-injected breakpoints in front */
- if (flags & BP_GDB) {
- QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
- } else {
- QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
- }
-
- if (breakpoint) {
- *breakpoint = bp;
- }
-
- trace_breakpoint_insert(cpu->cpu_index, pc, flags);
- return 0;
-}
-
-/* Remove a specific breakpoint. */
-int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
-{
- CPUClass *cc = CPU_GET_CLASS(cpu);
- CPUBreakpoint *bp;
-
- if (cc->gdb_adjust_breakpoint) {
- pc = cc->gdb_adjust_breakpoint(cpu, pc);
- }
-
- QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
- if (bp->pc == pc && bp->flags == flags) {
- cpu_breakpoint_remove_by_ref(cpu, bp);
- return 0;
- }
- }
- return -ENOENT;
-}
-
-/* Remove a specific breakpoint by reference. */
-void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *bp)
-{
- QTAILQ_REMOVE(&cpu->breakpoints, bp, entry);
-
- trace_breakpoint_remove(cpu->cpu_index, bp->pc, bp->flags);
- g_free(bp);
-}
-
-/* Remove all matching breakpoints. */
-void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
-{
- CPUBreakpoint *bp, *next;
-
- QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
- if (bp->flags & mask) {
- cpu_breakpoint_remove_by_ref(cpu, bp);
- }
- }
-}
system_ss.add(when: 'CONFIG_WIN32', if_true: [files('os-win32.c')])
endif
-common_ss.add(files('cpus-common.c'))
-specific_ss.add(files('cpu.c'))
+common_ss.add(files('cpu-common.c'))
+specific_ss.add(files('cpu-target.c'))
subdir('softmmu')