X-Git-Url: https://git.proxmox.com/?a=blobdiff_plain;f=cpus-common.c;h=45c745ecf69a3e8af08de4177234247238186ca3;hb=91e0127087257048d2eb98b5b1a5671f53c3a36d;hp=af3385a296848ec3d9fe36183cc75cfa8a7159ef;hpb=d86766a9d0e5c4d9cfb2186bf0b7a6f0f17e1831;p=mirror_qemu.git diff --git a/cpus-common.c b/cpus-common.c index af3385a296..45c745ecf6 100644 --- a/cpus-common.c +++ b/cpus-common.c @@ -6,7 +6,7 @@ * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. + * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -22,8 +22,10 @@ #include "exec/cpu-common.h" #include "hw/core/cpu.h" #include "sysemu/cpus.h" +#include "qemu/lockable.h" +#include "trace/trace-root.h" -static QemuMutex qemu_cpu_list_lock; +QemuMutex qemu_cpu_list_lock; static QemuCond exclusive_cond; static QemuCond exclusive_resume; static QemuCond qemu_work_cond; @@ -60,18 +62,28 @@ static bool cpu_index_auto_assigned; static int cpu_get_free_index(void) { CPUState *some_cpu; - int cpu_index = 0; + int max_cpu_index = 0; cpu_index_auto_assigned = true; CPU_FOREACH(some_cpu) { - cpu_index++; + if (some_cpu->cpu_index >= max_cpu_index) { + max_cpu_index = some_cpu->cpu_index + 1; + } } - return cpu_index; + return max_cpu_index; +} + +CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus); +static unsigned int cpu_list_generation_id; + +unsigned int cpu_list_generation_id_get(void) +{ + return cpu_list_generation_id; } void cpu_list_add(CPUState *cpu) { - qemu_mutex_lock(&qemu_cpu_list_lock); + QEMU_LOCK_GUARD(&qemu_cpu_list_lock); if (cpu->cpu_index == UNASSIGNED_CPU_INDEX) { cpu->cpu_index = cpu_get_free_index(); assert(cpu->cpu_index != UNASSIGNED_CPU_INDEX); @@ -79,27 +91,40 @@ void cpu_list_add(CPUState *cpu) assert(!cpu_index_auto_assigned); } QTAILQ_INSERT_TAIL_RCU(&cpus, cpu, node); - qemu_mutex_unlock(&qemu_cpu_list_lock); + cpu_list_generation_id++; } void cpu_list_remove(CPUState *cpu) { - qemu_mutex_lock(&qemu_cpu_list_lock); + QEMU_LOCK_GUARD(&qemu_cpu_list_lock); if (!QTAILQ_IN_USE(cpu, node)) { /* there is nothing to undo since cpu_exec_init() hasn't been called */ - qemu_mutex_unlock(&qemu_cpu_list_lock); return; } - assert(!(cpu_index_auto_assigned && cpu != QTAILQ_LAST(&cpus))); - QTAILQ_REMOVE_RCU(&cpus, cpu, node); cpu->cpu_index = UNASSIGNED_CPU_INDEX; - qemu_mutex_unlock(&qemu_cpu_list_lock); + cpu_list_generation_id++; +} + +CPUState *qemu_get_cpu(int index) +{ + CPUState *cpu; + + CPU_FOREACH(cpu) { + if (cpu->cpu_index == index) { + return cpu; + } + } + + return NULL; } +/* current CPU in the current thread. It is only valid inside cpu_exec() */ +__thread CPUState *current_cpu; + struct qemu_work_item { - struct qemu_work_item *next; + QSIMPLEQ_ENTRY(qemu_work_item) node; run_on_cpu_func func; run_on_cpu_data data; bool free, exclusive, done; @@ -108,13 +133,7 @@ struct qemu_work_item { static void queue_work_on_cpu(CPUState *cpu, struct qemu_work_item *wi) { qemu_mutex_lock(&cpu->work_mutex); - if (cpu->queued_work_first == NULL) { - cpu->queued_work_first = wi; - } else { - cpu->queued_work_last->next = wi; - } - cpu->queued_work_last = wi; - wi->next = NULL; + QSIMPLEQ_INSERT_TAIL(&cpu->work_list, wi, node); wi->done = false; qemu_mutex_unlock(&cpu->work_mutex); @@ -138,7 +157,7 @@ void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data, wi.exclusive = false; queue_work_on_cpu(cpu, &wi); - while (!atomic_mb_read(&wi.done)) { + while (!qatomic_load_acquire(&wi.done)) { CPUState *self_cpu = current_cpu; qemu_cond_wait(&qemu_work_cond, mutex); @@ -150,7 +169,7 @@ void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data) { struct qemu_work_item *wi; - wi = g_malloc0(sizeof(struct qemu_work_item)); + wi = g_new0(struct qemu_work_item, 1); wi->func = func; wi->data = data; wi->free = true; @@ -174,24 +193,29 @@ void start_exclusive(void) CPUState *other_cpu; int running_cpus; + if (current_cpu->exclusive_context_count) { + current_cpu->exclusive_context_count++; + return; + } + qemu_mutex_lock(&qemu_cpu_list_lock); exclusive_idle(); /* Make all other cpus stop executing. */ - atomic_set(&pending_cpus, 1); + qatomic_set(&pending_cpus, 1); /* Write pending_cpus before reading other_cpu->running. */ smp_mb(); running_cpus = 0; CPU_FOREACH(other_cpu) { - if (atomic_read(&other_cpu->running)) { + if (qatomic_read(&other_cpu->running)) { other_cpu->has_waiter = true; running_cpus++; qemu_cpu_kick(other_cpu); } } - atomic_set(&pending_cpus, running_cpus + 1); + qatomic_set(&pending_cpus, running_cpus + 1); while (pending_cpus > 1) { qemu_cond_wait(&exclusive_cond, &qemu_cpu_list_lock); } @@ -200,13 +224,20 @@ void start_exclusive(void) * section until end_exclusive resets pending_cpus to 0. */ qemu_mutex_unlock(&qemu_cpu_list_lock); + + current_cpu->exclusive_context_count = 1; } /* Finish an exclusive operation. */ void end_exclusive(void) { + current_cpu->exclusive_context_count--; + if (current_cpu->exclusive_context_count) { + return; + } + qemu_mutex_lock(&qemu_cpu_list_lock); - atomic_set(&pending_cpus, 0); + qatomic_set(&pending_cpus, 0); qemu_cond_broadcast(&exclusive_resume); qemu_mutex_unlock(&qemu_cpu_list_lock); } @@ -214,7 +245,7 @@ void end_exclusive(void) /* Wait for exclusive ops to finish, and begin cpu execution. */ void cpu_exec_start(CPUState *cpu) { - atomic_set(&cpu->running, true); + qatomic_set(&cpu->running, true); /* Write cpu->running before reading pending_cpus. */ smp_mb(); @@ -232,30 +263,29 @@ void cpu_exec_start(CPUState *cpu) * 3. pending_cpus == 0. Then start_exclusive is definitely going to * see cpu->running == true, and it will kick the CPU. */ - if (unlikely(atomic_read(&pending_cpus))) { - qemu_mutex_lock(&qemu_cpu_list_lock); + if (unlikely(qatomic_read(&pending_cpus))) { + QEMU_LOCK_GUARD(&qemu_cpu_list_lock); if (!cpu->has_waiter) { /* Not counted in pending_cpus, let the exclusive item * run. Since we have the lock, just set cpu->running to true * while holding it; no need to check pending_cpus again. */ - atomic_set(&cpu->running, false); + qatomic_set(&cpu->running, false); exclusive_idle(); /* Now pending_cpus is zero. */ - atomic_set(&cpu->running, true); + qatomic_set(&cpu->running, true); } else { /* Counted in pending_cpus, go ahead and release the * waiter at cpu_exec_end. */ } - qemu_mutex_unlock(&qemu_cpu_list_lock); } } /* Mark cpu as not executing, and release pending exclusive ops. */ void cpu_exec_end(CPUState *cpu) { - atomic_set(&cpu->running, false); + qatomic_set(&cpu->running, false); /* Write cpu->running before reading pending_cpus. */ smp_mb(); @@ -275,16 +305,15 @@ void cpu_exec_end(CPUState *cpu) * see cpu->running == false, and it can ignore this CPU until the * next cpu_exec_start. */ - if (unlikely(atomic_read(&pending_cpus))) { - qemu_mutex_lock(&qemu_cpu_list_lock); + if (unlikely(qatomic_read(&pending_cpus))) { + QEMU_LOCK_GUARD(&qemu_cpu_list_lock); if (cpu->has_waiter) { cpu->has_waiter = false; - atomic_set(&pending_cpus, pending_cpus - 1); + qatomic_set(&pending_cpus, pending_cpus - 1); if (pending_cpus == 1) { qemu_cond_signal(&exclusive_cond); } } - qemu_mutex_unlock(&qemu_cpu_list_lock); } } @@ -293,7 +322,7 @@ void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func, { struct qemu_work_item *wi; - wi = g_malloc0(sizeof(struct qemu_work_item)); + wi = g_new0(struct qemu_work_item, 1); wi->func = func; wi->data = data; wi->free = true; @@ -306,17 +335,14 @@ void process_queued_cpu_work(CPUState *cpu) { struct qemu_work_item *wi; - if (cpu->queued_work_first == NULL) { + qemu_mutex_lock(&cpu->work_mutex); + if (QSIMPLEQ_EMPTY(&cpu->work_list)) { + qemu_mutex_unlock(&cpu->work_mutex); return; } - - qemu_mutex_lock(&cpu->work_mutex); - while (cpu->queued_work_first != NULL) { - wi = cpu->queued_work_first; - cpu->queued_work_first = wi->next; - if (!cpu->queued_work_first) { - cpu->queued_work_last = NULL; - } + while (!QSIMPLEQ_EMPTY(&cpu->work_list)) { + wi = QSIMPLEQ_FIRST(&cpu->work_list); + QSIMPLEQ_REMOVE_HEAD(&cpu->work_list, node); qemu_mutex_unlock(&cpu->work_mutex); if (wi->exclusive) { /* Running work items outside the BQL avoids the following deadlock: @@ -337,9 +363,80 @@ void process_queued_cpu_work(CPUState *cpu) if (wi->free) { g_free(wi); } else { - atomic_mb_set(&wi->done, true); + qatomic_store_release(&wi->done, true); } } qemu_mutex_unlock(&cpu->work_mutex); qemu_cond_broadcast(&qemu_work_cond); } + +/* Add a breakpoint. */ +int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags, + CPUBreakpoint **breakpoint) +{ + CPUClass *cc = CPU_GET_CLASS(cpu); + CPUBreakpoint *bp; + + if (cc->gdb_adjust_breakpoint) { + pc = cc->gdb_adjust_breakpoint(cpu, pc); + } + + bp = g_malloc(sizeof(*bp)); + + bp->pc = pc; + bp->flags = flags; + + /* keep all GDB-injected breakpoints in front */ + if (flags & BP_GDB) { + QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry); + } else { + QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry); + } + + if (breakpoint) { + *breakpoint = bp; + } + + trace_breakpoint_insert(cpu->cpu_index, pc, flags); + return 0; +} + +/* Remove a specific breakpoint. */ +int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags) +{ + CPUClass *cc = CPU_GET_CLASS(cpu); + CPUBreakpoint *bp; + + if (cc->gdb_adjust_breakpoint) { + pc = cc->gdb_adjust_breakpoint(cpu, pc); + } + + QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) { + if (bp->pc == pc && bp->flags == flags) { + cpu_breakpoint_remove_by_ref(cpu, bp); + return 0; + } + } + return -ENOENT; +} + +/* Remove a specific breakpoint by reference. */ +void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *bp) +{ + QTAILQ_REMOVE(&cpu->breakpoints, bp, entry); + + trace_breakpoint_remove(cpu->cpu_index, bp->pc, bp->flags); + g_free(bp); +} + +/* Remove all matching breakpoints. */ +void cpu_breakpoint_remove_all(CPUState *cpu, int mask) +{ + CPUBreakpoint *bp, *next; + + QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) { + if (bp->flags & mask) { + cpu_breakpoint_remove_by_ref(cpu, bp); + } + } +}