X-Git-Url: https://git.proxmox.com/?a=blobdiff_plain;f=cpus-common.c;h=39f355de989f6192608d410768a9a9e464bcadd0;hb=9880e9bc63efcaf3f5230c2f93fb03068df2e465;hp=e742658e7502ad86862f548deb63b719a0755db5;hpb=2e5b09fd0e766434962327db4678ce1cda0c7241;p=mirror_qemu.git diff --git a/cpus-common.c b/cpus-common.c index e742658e75..39f355de98 100644 --- a/cpus-common.c +++ b/cpus-common.c @@ -6,7 +6,7 @@ * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. + * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -22,6 +22,7 @@ #include "exec/cpu-common.h" #include "hw/core/cpu.h" #include "sysemu/cpus.h" +#include "qemu/lockable.h" static QemuMutex qemu_cpu_list_lock; static QemuCond exclusive_cond; @@ -60,24 +61,28 @@ static bool cpu_index_auto_assigned; static int cpu_get_free_index(void) { CPUState *some_cpu; - int cpu_index = 0; + int max_cpu_index = 0; cpu_index_auto_assigned = true; CPU_FOREACH(some_cpu) { - cpu_index++; + if (some_cpu->cpu_index >= max_cpu_index) { + max_cpu_index = some_cpu->cpu_index + 1; + } } - return cpu_index; + return max_cpu_index; } -static void finish_safe_work(CPUState *cpu) +CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus); +static unsigned int cpu_list_generation_id; + +unsigned int cpu_list_generation_id_get(void) { - cpu_exec_start(cpu); - cpu_exec_end(cpu); + return cpu_list_generation_id; } void cpu_list_add(CPUState *cpu) { - qemu_mutex_lock(&qemu_cpu_list_lock); + QEMU_LOCK_GUARD(&qemu_cpu_list_lock); if (cpu->cpu_index == UNASSIGNED_CPU_INDEX) { cpu->cpu_index = cpu_get_free_index(); assert(cpu->cpu_index != UNASSIGNED_CPU_INDEX); @@ -85,29 +90,40 @@ void cpu_list_add(CPUState *cpu) assert(!cpu_index_auto_assigned); } QTAILQ_INSERT_TAIL_RCU(&cpus, cpu, node); - qemu_mutex_unlock(&qemu_cpu_list_lock); - - finish_safe_work(cpu); + cpu_list_generation_id++; } void cpu_list_remove(CPUState *cpu) { - qemu_mutex_lock(&qemu_cpu_list_lock); + QEMU_LOCK_GUARD(&qemu_cpu_list_lock); if (!QTAILQ_IN_USE(cpu, node)) { /* there is nothing to undo since cpu_exec_init() hasn't been called */ - qemu_mutex_unlock(&qemu_cpu_list_lock); return; } - assert(!(cpu_index_auto_assigned && cpu != QTAILQ_LAST(&cpus))); - QTAILQ_REMOVE_RCU(&cpus, cpu, node); cpu->cpu_index = UNASSIGNED_CPU_INDEX; - qemu_mutex_unlock(&qemu_cpu_list_lock); + cpu_list_generation_id++; +} + +CPUState *qemu_get_cpu(int index) +{ + CPUState *cpu; + + CPU_FOREACH(cpu) { + if (cpu->cpu_index == index) { + return cpu; + } + } + + return NULL; } +/* current CPU in the current thread. It is only valid inside cpu_exec() */ +__thread CPUState *current_cpu; + struct qemu_work_item { - struct qemu_work_item *next; + QSIMPLEQ_ENTRY(qemu_work_item) node; run_on_cpu_func func; run_on_cpu_data data; bool free, exclusive, done; @@ -116,13 +132,7 @@ struct qemu_work_item { static void queue_work_on_cpu(CPUState *cpu, struct qemu_work_item *wi) { qemu_mutex_lock(&cpu->work_mutex); - if (cpu->queued_work_first == NULL) { - cpu->queued_work_first = wi; - } else { - cpu->queued_work_last->next = wi; - } - cpu->queued_work_last = wi; - wi->next = NULL; + QSIMPLEQ_INSERT_TAIL(&cpu->work_list, wi, node); wi->done = false; qemu_mutex_unlock(&cpu->work_mutex); @@ -146,7 +156,7 @@ void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data, wi.exclusive = false; queue_work_on_cpu(cpu, &wi); - while (!atomic_mb_read(&wi.done)) { + while (!qatomic_mb_read(&wi.done)) { CPUState *self_cpu = current_cpu; qemu_cond_wait(&qemu_work_cond, mutex); @@ -158,7 +168,7 @@ void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data) { struct qemu_work_item *wi; - wi = g_malloc0(sizeof(struct qemu_work_item)); + wi = g_new0(struct qemu_work_item, 1); wi->func = func; wi->data = data; wi->free = true; @@ -182,24 +192,29 @@ void start_exclusive(void) CPUState *other_cpu; int running_cpus; + if (current_cpu->exclusive_context_count) { + current_cpu->exclusive_context_count++; + return; + } + qemu_mutex_lock(&qemu_cpu_list_lock); exclusive_idle(); /* Make all other cpus stop executing. */ - atomic_set(&pending_cpus, 1); + qatomic_set(&pending_cpus, 1); /* Write pending_cpus before reading other_cpu->running. */ smp_mb(); running_cpus = 0; CPU_FOREACH(other_cpu) { - if (atomic_read(&other_cpu->running)) { + if (qatomic_read(&other_cpu->running)) { other_cpu->has_waiter = true; running_cpus++; qemu_cpu_kick(other_cpu); } } - atomic_set(&pending_cpus, running_cpus + 1); + qatomic_set(&pending_cpus, running_cpus + 1); while (pending_cpus > 1) { qemu_cond_wait(&exclusive_cond, &qemu_cpu_list_lock); } @@ -208,13 +223,20 @@ void start_exclusive(void) * section until end_exclusive resets pending_cpus to 0. */ qemu_mutex_unlock(&qemu_cpu_list_lock); + + current_cpu->exclusive_context_count = 1; } /* Finish an exclusive operation. */ void end_exclusive(void) { + current_cpu->exclusive_context_count--; + if (current_cpu->exclusive_context_count) { + return; + } + qemu_mutex_lock(&qemu_cpu_list_lock); - atomic_set(&pending_cpus, 0); + qatomic_set(&pending_cpus, 0); qemu_cond_broadcast(&exclusive_resume); qemu_mutex_unlock(&qemu_cpu_list_lock); } @@ -222,7 +244,7 @@ void end_exclusive(void) /* Wait for exclusive ops to finish, and begin cpu execution. */ void cpu_exec_start(CPUState *cpu) { - atomic_set(&cpu->running, true); + qatomic_set(&cpu->running, true); /* Write cpu->running before reading pending_cpus. */ smp_mb(); @@ -240,30 +262,29 @@ void cpu_exec_start(CPUState *cpu) * 3. pending_cpus == 0. Then start_exclusive is definitely going to * see cpu->running == true, and it will kick the CPU. */ - if (unlikely(atomic_read(&pending_cpus))) { - qemu_mutex_lock(&qemu_cpu_list_lock); + if (unlikely(qatomic_read(&pending_cpus))) { + QEMU_LOCK_GUARD(&qemu_cpu_list_lock); if (!cpu->has_waiter) { /* Not counted in pending_cpus, let the exclusive item * run. Since we have the lock, just set cpu->running to true * while holding it; no need to check pending_cpus again. */ - atomic_set(&cpu->running, false); + qatomic_set(&cpu->running, false); exclusive_idle(); /* Now pending_cpus is zero. */ - atomic_set(&cpu->running, true); + qatomic_set(&cpu->running, true); } else { /* Counted in pending_cpus, go ahead and release the * waiter at cpu_exec_end. */ } - qemu_mutex_unlock(&qemu_cpu_list_lock); } } /* Mark cpu as not executing, and release pending exclusive ops. */ void cpu_exec_end(CPUState *cpu) { - atomic_set(&cpu->running, false); + qatomic_set(&cpu->running, false); /* Write cpu->running before reading pending_cpus. */ smp_mb(); @@ -283,16 +304,15 @@ void cpu_exec_end(CPUState *cpu) * see cpu->running == false, and it can ignore this CPU until the * next cpu_exec_start. */ - if (unlikely(atomic_read(&pending_cpus))) { - qemu_mutex_lock(&qemu_cpu_list_lock); + if (unlikely(qatomic_read(&pending_cpus))) { + QEMU_LOCK_GUARD(&qemu_cpu_list_lock); if (cpu->has_waiter) { cpu->has_waiter = false; - atomic_set(&pending_cpus, pending_cpus - 1); + qatomic_set(&pending_cpus, pending_cpus - 1); if (pending_cpus == 1) { qemu_cond_signal(&exclusive_cond); } } - qemu_mutex_unlock(&qemu_cpu_list_lock); } } @@ -301,7 +321,7 @@ void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func, { struct qemu_work_item *wi; - wi = g_malloc0(sizeof(struct qemu_work_item)); + wi = g_new0(struct qemu_work_item, 1); wi->func = func; wi->data = data; wi->free = true; @@ -314,17 +334,14 @@ void process_queued_cpu_work(CPUState *cpu) { struct qemu_work_item *wi; - if (cpu->queued_work_first == NULL) { + qemu_mutex_lock(&cpu->work_mutex); + if (QSIMPLEQ_EMPTY(&cpu->work_list)) { + qemu_mutex_unlock(&cpu->work_mutex); return; } - - qemu_mutex_lock(&cpu->work_mutex); - while (cpu->queued_work_first != NULL) { - wi = cpu->queued_work_first; - cpu->queued_work_first = wi->next; - if (!cpu->queued_work_first) { - cpu->queued_work_last = NULL; - } + while (!QSIMPLEQ_EMPTY(&cpu->work_list)) { + wi = QSIMPLEQ_FIRST(&cpu->work_list); + QSIMPLEQ_REMOVE_HEAD(&cpu->work_list, node); qemu_mutex_unlock(&cpu->work_mutex); if (wi->exclusive) { /* Running work items outside the BQL avoids the following deadlock: @@ -345,7 +362,7 @@ void process_queued_cpu_work(CPUState *cpu) if (wi->free) { g_free(wi); } else { - atomic_mb_set(&wi->done, true); + qatomic_mb_set(&wi->done, true); } } qemu_mutex_unlock(&cpu->work_mutex);