]> git.proxmox.com Git - mirror_qemu.git/blame - softmmu/cpus.c
rcu: use coroutine TLS macros
[mirror_qemu.git] / softmmu / cpus.c
CommitLineData
296af7c9
BS
1/*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
7b31bbc2 25#include "qemu/osdep.h"
a8d25326 26#include "qemu-common.h"
83c9089e 27#include "monitor/monitor.h"
e688df6b 28#include "qapi/error.h"
df7a1f48 29#include "qapi/qapi-commands-machine.h"
112ed241 30#include "qapi/qapi-commands-misc.h"
9af23989 31#include "qapi/qapi-events-run-state.h"
a4e15de9 32#include "qapi/qmp/qerror.h"
022c62cb 33#include "exec/gdbstub.h"
b3946626 34#include "sysemu/hw_accel.h"
63c91552 35#include "exec/exec-all.h"
1de7afc9 36#include "qemu/thread.h"
30865f31 37#include "qemu/plugin.h"
9c17d615 38#include "sysemu/cpus.h"
9c09a251 39#include "qemu/guest-random.h"
9cb805fd 40#include "hw/nmi.h"
8b427044 41#include "sysemu/replay.h"
54d31236 42#include "sysemu/runstate.h"
740b1759 43#include "sysemu/cpu-timers.h"
faf20793 44#include "sysemu/whpx.h"
5cc8767d 45#include "hw/boards.h"
650d103d 46#include "hw/hw.h"
8af3f5c6 47#include "trace.h"
0ff0fc19 48
6d9cb73c
JK
49#ifdef CONFIG_LINUX
50
51#include <sys/prctl.h>
52
c0532a76
MT
53#ifndef PR_MCE_KILL
54#define PR_MCE_KILL 33
55#endif
56
6d9cb73c
JK
57#ifndef PR_MCE_KILL_SET
58#define PR_MCE_KILL_SET 1
59#endif
60
61#ifndef PR_MCE_KILL_EARLY
62#define PR_MCE_KILL_EARLY 1
63#endif
64
65#endif /* CONFIG_LINUX */
66
bd1f7ff4
YK
67static QemuMutex qemu_global_mutex;
68
321bc0b2
TC
69bool cpu_is_stopped(CPUState *cpu)
70{
71 return cpu->stopped || !runstate_is_running();
72}
73
430065da 74bool cpu_work_list_empty(CPUState *cpu)
0c0fcc20 75{
25e82fb7 76 return QSIMPLEQ_EMPTY_ATOMIC(&cpu->work_list);
0c0fcc20
EC
77}
78
430065da 79bool cpu_thread_is_idle(CPUState *cpu)
ac873f1e 80{
0c0fcc20 81 if (cpu->stop || !cpu_work_list_empty(cpu)) {
ac873f1e
PM
82 return false;
83 }
321bc0b2 84 if (cpu_is_stopped(cpu)) {
ac873f1e
PM
85 return true;
86 }
8c2e1b00 87 if (!cpu->halted || cpu_has_work(cpu) ||
faf20793 88 kvm_halt_in_kernel() || whpx_apic_in_platform()) {
ac873f1e
PM
89 return false;
90 }
91 return true;
92}
93
740b1759 94bool all_cpu_threads_idle(void)
ac873f1e 95{
182735ef 96 CPUState *cpu;
ac873f1e 97
bdc44640 98 CPU_FOREACH(cpu) {
182735ef 99 if (!cpu_thread_is_idle(cpu)) {
ac873f1e
PM
100 return false;
101 }
102 }
103 return true;
104}
105
296af7c9
BS
106/***********************************************************/
107void hw_error(const char *fmt, ...)
108{
109 va_list ap;
55e5c285 110 CPUState *cpu;
296af7c9
BS
111
112 va_start(ap, fmt);
113 fprintf(stderr, "qemu: hardware error: ");
114 vfprintf(stderr, fmt, ap);
115 fprintf(stderr, "\n");
bdc44640 116 CPU_FOREACH(cpu) {
55e5c285 117 fprintf(stderr, "CPU #%d:\n", cpu->cpu_index);
90c84c56 118 cpu_dump_state(cpu, stderr, CPU_DUMP_FPU);
296af7c9
BS
119 }
120 va_end(ap);
121 abort();
122}
123
430065da
CF
124/*
125 * The chosen accelerator is supposed to register this.
126 */
b86f59c7 127static const AccelOpsClass *cpus_accel;
430065da 128
296af7c9
BS
129void cpu_synchronize_all_states(void)
130{
182735ef 131 CPUState *cpu;
296af7c9 132
bdc44640 133 CPU_FOREACH(cpu) {
182735ef 134 cpu_synchronize_state(cpu);
296af7c9
BS
135 }
136}
137
138void cpu_synchronize_all_post_reset(void)
139{
182735ef 140 CPUState *cpu;
296af7c9 141
bdc44640 142 CPU_FOREACH(cpu) {
182735ef 143 cpu_synchronize_post_reset(cpu);
296af7c9
BS
144 }
145}
146
147void cpu_synchronize_all_post_init(void)
148{
182735ef 149 CPUState *cpu;
296af7c9 150
bdc44640 151 CPU_FOREACH(cpu) {
182735ef 152 cpu_synchronize_post_init(cpu);
296af7c9
BS
153 }
154}
155
75e972da
DG
156void cpu_synchronize_all_pre_loadvm(void)
157{
158 CPUState *cpu;
159
160 CPU_FOREACH(cpu) {
161 cpu_synchronize_pre_loadvm(cpu);
162 }
163}
164
430065da
CF
165void cpu_synchronize_state(CPUState *cpu)
166{
994aa172 167 if (cpus_accel->synchronize_state) {
430065da
CF
168 cpus_accel->synchronize_state(cpu);
169 }
430065da
CF
170}
171
172void cpu_synchronize_post_reset(CPUState *cpu)
173{
994aa172 174 if (cpus_accel->synchronize_post_reset) {
430065da
CF
175 cpus_accel->synchronize_post_reset(cpu);
176 }
430065da
CF
177}
178
179void cpu_synchronize_post_init(CPUState *cpu)
180{
994aa172 181 if (cpus_accel->synchronize_post_init) {
430065da
CF
182 cpus_accel->synchronize_post_init(cpu);
183 }
430065da
CF
184}
185
186void cpu_synchronize_pre_loadvm(CPUState *cpu)
187{
994aa172 188 if (cpus_accel->synchronize_pre_loadvm) {
430065da
CF
189 cpus_accel->synchronize_pre_loadvm(cpu);
190 }
430065da
CF
191}
192
92a5199b
TL
193bool cpus_are_resettable(void)
194{
195 return cpu_check_are_resettable();
196}
197
430065da
CF
198int64_t cpus_get_virtual_clock(void)
199{
994aa172
CF
200 /*
201 * XXX
202 *
203 * need to check that cpus_accel is not NULL, because qcow2 calls
204 * qemu_get_clock_ns(CLOCK_VIRTUAL) without any accel initialized and
205 * with ticks disabled in some io-tests:
206 * 030 040 041 060 099 120 127 140 156 161 172 181 191 192 195 203 229 249 256 267
207 *
208 * is this expected?
209 *
210 * XXX
211 */
430065da
CF
212 if (cpus_accel && cpus_accel->get_virtual_clock) {
213 return cpus_accel->get_virtual_clock();
214 }
430065da
CF
215 return cpu_get_clock();
216}
217
218/*
219 * return the time elapsed in VM between vm_start and vm_stop. Unless
220 * icount is active, cpus_get_elapsed_ticks() uses units of the host CPU cycle
221 * counter.
222 */
223int64_t cpus_get_elapsed_ticks(void)
224{
994aa172 225 if (cpus_accel->get_elapsed_ticks) {
430065da
CF
226 return cpus_accel->get_elapsed_ticks();
227 }
430065da
CF
228 return cpu_get_ticks();
229}
230
bb4776be
CF
231static void generic_handle_interrupt(CPUState *cpu, int mask)
232{
233 cpu->interrupt_request |= mask;
234
235 if (!qemu_cpu_is_self(cpu)) {
236 qemu_cpu_kick(cpu);
237 }
238}
239
240void cpu_interrupt(CPUState *cpu, int mask)
241{
242 if (cpus_accel->handle_interrupt) {
243 cpus_accel->handle_interrupt(cpu, mask);
244 } else {
245 generic_handle_interrupt(cpu, mask);
246 }
247}
248
4486e89c 249static int do_vm_stop(RunState state, bool send_stop)
296af7c9 250{
56983463
KW
251 int ret = 0;
252
1354869c 253 if (runstate_is_running()) {
f962cac4 254 runstate_set(state);
296af7c9 255 cpu_disable_ticks();
296af7c9 256 pause_all_vcpus();
1dfb4dd9 257 vm_state_notify(0, state);
4486e89c 258 if (send_stop) {
3ab72385 259 qapi_event_send_stop();
4486e89c 260 }
296af7c9 261 }
56983463 262
594a45ce 263 bdrv_drain_all();
22af08ea 264 ret = bdrv_flush_all();
8af3f5c6 265 trace_vm_stop_flush_all(ret);
594a45ce 266
56983463 267 return ret;
296af7c9
BS
268}
269
4486e89c
SH
270/* Special vm_stop() variant for terminating the process. Historically clients
271 * did not expect a QMP STOP event and so we need to retain compatibility.
272 */
273int vm_shutdown(void)
274{
275 return do_vm_stop(RUN_STATE_SHUTDOWN, false);
276}
277
430065da 278bool cpu_can_run(CPUState *cpu)
296af7c9 279{
4fdeee7c 280 if (cpu->stop) {
a1fcaa73 281 return false;
0ab07c62 282 }
321bc0b2 283 if (cpu_is_stopped(cpu)) {
a1fcaa73 284 return false;
0ab07c62 285 }
a1fcaa73 286 return true;
296af7c9
BS
287}
288
430065da 289void cpu_handle_guest_debug(CPUState *cpu)
83f338f7 290{
fda8458b
PD
291 if (replay_running_debug()) {
292 if (!cpu->singlestep_enabled) {
cda38259
PD
293 /*
294 * Report about the breakpoint and
295 * make a single step to skip it
296 */
297 replay_breakpoint();
fda8458b
PD
298 cpu_single_step(cpu, SSTEP_ENABLE);
299 } else {
300 cpu_single_step(cpu, 0);
301 }
302 } else {
303 gdb_set_stop_cpu(cpu);
304 qemu_system_debug_request();
305 cpu->stopped = true;
306 }
3c638d06
JK
307}
308
6d9cb73c
JK
309#ifdef CONFIG_LINUX
310static void sigbus_reraise(void)
311{
312 sigset_t set;
313 struct sigaction action;
314
315 memset(&action, 0, sizeof(action));
316 action.sa_handler = SIG_DFL;
317 if (!sigaction(SIGBUS, &action, NULL)) {
318 raise(SIGBUS);
319 sigemptyset(&set);
320 sigaddset(&set, SIGBUS);
a2d1761d 321 pthread_sigmask(SIG_UNBLOCK, &set, NULL);
6d9cb73c 322 }
eb1960aa 323 perror("Failed to re-raise SIGBUS!");
6d9cb73c
JK
324 abort();
325}
326
d98d4072 327static void sigbus_handler(int n, siginfo_t *siginfo, void *ctx)
6d9cb73c 328{
a16fc07e
PB
329 if (siginfo->si_code != BUS_MCEERR_AO && siginfo->si_code != BUS_MCEERR_AR) {
330 sigbus_reraise();
331 }
332
2ae41db2
PB
333 if (current_cpu) {
334 /* Called asynchronously in VCPU thread. */
335 if (kvm_on_sigbus_vcpu(current_cpu, siginfo->si_code, siginfo->si_addr)) {
336 sigbus_reraise();
337 }
338 } else {
339 /* Called synchronously (via signalfd) in main thread. */
340 if (kvm_on_sigbus(siginfo->si_code, siginfo->si_addr)) {
341 sigbus_reraise();
342 }
6d9cb73c
JK
343 }
344}
345
346static void qemu_init_sigbus(void)
347{
348 struct sigaction action;
349
29b838c0
DH
350 /*
351 * ALERT: when modifying this, take care that SIGBUS forwarding in
352 * os_mem_prealloc() will continue working as expected.
353 */
6d9cb73c
JK
354 memset(&action, 0, sizeof(action));
355 action.sa_flags = SA_SIGINFO;
d98d4072 356 action.sa_sigaction = sigbus_handler;
6d9cb73c
JK
357 sigaction(SIGBUS, &action, NULL);
358
359 prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0);
360}
6d9cb73c 361#else /* !CONFIG_LINUX */
6d9cb73c
JK
362static void qemu_init_sigbus(void)
363{
364}
a16fc07e 365#endif /* !CONFIG_LINUX */
ff48eb5f 366
296af7c9
BS
367static QemuThread io_thread;
368
296af7c9
BS
369/* cpu creation */
370static QemuCond qemu_cpu_cond;
371/* system init */
296af7c9
BS
372static QemuCond qemu_pause_cond;
373
d3b12f5d 374void qemu_init_cpu_loop(void)
296af7c9 375{
6d9cb73c 376 qemu_init_sigbus();
ed94592b 377 qemu_cond_init(&qemu_cpu_cond);
ed94592b 378 qemu_cond_init(&qemu_pause_cond);
296af7c9 379 qemu_mutex_init(&qemu_global_mutex);
296af7c9 380
b7680cb6 381 qemu_thread_get_self(&io_thread);
296af7c9
BS
382}
383
14e6fe12 384void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data)
e82bcec2 385{
d148d90e 386 do_run_on_cpu(cpu, func, data, &qemu_global_mutex);
3c02270d
CV
387}
388
ebd05fea
DH
389static void qemu_cpu_stop(CPUState *cpu, bool exit)
390{
391 g_assert(qemu_cpu_is_self(cpu));
392 cpu->stop = false;
393 cpu->stopped = true;
394 if (exit) {
395 cpu_exit(cpu);
396 }
397 qemu_cond_broadcast(&qemu_pause_cond);
398}
399
430065da 400void qemu_wait_io_event_common(CPUState *cpu)
296af7c9 401{
d73415a3 402 qatomic_mb_set(&cpu->thread_kicked, false);
4fdeee7c 403 if (cpu->stop) {
ebd05fea 404 qemu_cpu_stop(cpu, false);
296af7c9 405 }
a5403c69 406 process_queued_cpu_work(cpu);
37257942
AB
407}
408
430065da 409void qemu_wait_io_event(CPUState *cpu)
296af7c9 410{
30865f31
EC
411 bool slept = false;
412
a98ae1d8 413 while (cpu_thread_is_idle(cpu)) {
30865f31
EC
414 if (!slept) {
415 slept = true;
416 qemu_plugin_vcpu_idle_cb(cpu);
417 }
f5c121b8 418 qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
16400322 419 }
30865f31
EC
420 if (slept) {
421 qemu_plugin_vcpu_resume_cb(cpu);
422 }
296af7c9 423
db08b687 424#ifdef _WIN32
430065da
CF
425 /* Eat dummy APC queued by cpus_kick_thread. */
426 if (hax_enabled()) {
db08b687 427 SleepEx(0, TRUE);
c97d6d2c 428 }
db08b687 429#endif
c97d6d2c
SAGDR
430 qemu_wait_io_event_common(cpu);
431}
432
430065da 433void cpus_kick_thread(CPUState *cpu)
cc015e9a
PB
434{
435#ifndef _WIN32
436 int err;
437
e0c38211
PB
438 if (cpu->thread_kicked) {
439 return;
9102deda 440 }
e0c38211 441 cpu->thread_kicked = true;
814e612e 442 err = pthread_kill(cpu->thread->thread, SIG_IPI);
d455ebc4 443 if (err && err != ESRCH) {
cc015e9a
PB
444 fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
445 exit(1);
446 }
e0c38211
PB
447#endif
448}
ed9164a3 449
c08d7424 450void qemu_cpu_kick(CPUState *cpu)
296af7c9 451{
f5c121b8 452 qemu_cond_broadcast(cpu->halt_cond);
994aa172 453 if (cpus_accel->kick_vcpu_thread) {
430065da 454 cpus_accel->kick_vcpu_thread(cpu);
e92558e4 455 } else { /* default */
430065da 456 cpus_kick_thread(cpu);
e0c38211 457 }
296af7c9
BS
458}
459
46d62fac 460void qemu_cpu_kick_self(void)
296af7c9 461{
4917cf44 462 assert(current_cpu);
430065da 463 cpus_kick_thread(current_cpu);
296af7c9
BS
464}
465
60e82579 466bool qemu_cpu_is_self(CPUState *cpu)
296af7c9 467{
814e612e 468 return qemu_thread_is_self(cpu->thread);
296af7c9
BS
469}
470
79e2b9ae 471bool qemu_in_vcpu_thread(void)
aa723c23 472{
4917cf44 473 return current_cpu && qemu_cpu_is_self(current_cpu);
aa723c23
JQ
474}
475
afbe7053
PB
476static __thread bool iothread_locked = false;
477
478bool qemu_mutex_iothread_locked(void)
479{
480 return iothread_locked;
481}
482
cb764d06
EC
483/*
484 * The BQL is taken from so many places that it is worth profiling the
485 * callers directly, instead of funneling them all through a single function.
486 */
487void qemu_mutex_lock_iothread_impl(const char *file, int line)
296af7c9 488{
d73415a3 489 QemuMutexLockFunc bql_lock = qatomic_read(&qemu_bql_mutex_lock_func);
cb764d06 490
8d04fb55 491 g_assert(!qemu_mutex_iothread_locked());
cb764d06 492 bql_lock(&qemu_global_mutex, file, line);
afbe7053 493 iothread_locked = true;
296af7c9
BS
494}
495
496void qemu_mutex_unlock_iothread(void)
497{
8d04fb55 498 g_assert(qemu_mutex_iothread_locked());
afbe7053 499 iothread_locked = false;
296af7c9
BS
500 qemu_mutex_unlock(&qemu_global_mutex);
501}
502
19e067e0
AP
503void qemu_cond_wait_iothread(QemuCond *cond)
504{
505 qemu_cond_wait(cond, &qemu_global_mutex);
506}
507
b0c3cf94
CF
508void qemu_cond_timedwait_iothread(QemuCond *cond, int ms)
509{
510 qemu_cond_timedwait(cond, &qemu_global_mutex, ms);
511}
512
430065da
CF
513/* signal CPU creation */
514void cpu_thread_signal_created(CPUState *cpu)
515{
516 cpu->created = true;
517 qemu_cond_signal(&qemu_cpu_cond);
518}
519
520/* signal CPU destruction */
521void cpu_thread_signal_destroyed(CPUState *cpu)
522{
523 cpu->created = false;
524 qemu_cond_signal(&qemu_cpu_cond);
525}
526
527
e8faee06 528static bool all_vcpus_paused(void)
296af7c9 529{
bdc44640 530 CPUState *cpu;
296af7c9 531
bdc44640 532 CPU_FOREACH(cpu) {
182735ef 533 if (!cpu->stopped) {
e8faee06 534 return false;
0ab07c62 535 }
296af7c9
BS
536 }
537
e8faee06 538 return true;
296af7c9
BS
539}
540
541void pause_all_vcpus(void)
542{
bdc44640 543 CPUState *cpu;
296af7c9 544
40daca54 545 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, false);
bdc44640 546 CPU_FOREACH(cpu) {
ebd05fea
DH
547 if (qemu_cpu_is_self(cpu)) {
548 qemu_cpu_stop(cpu, true);
549 } else {
550 cpu->stop = true;
551 qemu_cpu_kick(cpu);
552 }
d798e974
JK
553 }
554
d759c951
AB
555 /* We need to drop the replay_lock so any vCPU threads woken up
556 * can finish their replay tasks
557 */
558 replay_mutex_unlock();
559
296af7c9 560 while (!all_vcpus_paused()) {
be7d6c57 561 qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
bdc44640 562 CPU_FOREACH(cpu) {
182735ef 563 qemu_cpu_kick(cpu);
296af7c9
BS
564 }
565 }
d759c951
AB
566
567 qemu_mutex_unlock_iothread();
568 replay_mutex_lock();
569 qemu_mutex_lock_iothread();
296af7c9
BS
570}
571
2993683b
IM
572void cpu_resume(CPUState *cpu)
573{
574 cpu->stop = false;
575 cpu->stopped = false;
576 qemu_cpu_kick(cpu);
577}
578
296af7c9
BS
579void resume_all_vcpus(void)
580{
bdc44640 581 CPUState *cpu;
296af7c9 582
f962cac4
LM
583 if (!runstate_is_running()) {
584 return;
585 }
586
40daca54 587 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true);
bdc44640 588 CPU_FOREACH(cpu) {
182735ef 589 cpu_resume(cpu);
296af7c9
BS
590 }
591}
592
dbadee4f 593void cpu_remove_sync(CPUState *cpu)
4c055ab5
GZ
594{
595 cpu->stop = true;
596 cpu->unplug = true;
597 qemu_cpu_kick(cpu);
dbadee4f
PB
598 qemu_mutex_unlock_iothread();
599 qemu_thread_join(cpu->thread);
600 qemu_mutex_lock_iothread();
2c579042
BR
601}
602
b86f59c7 603void cpus_register_accel(const AccelOpsClass *ops)
430065da 604{
b86f59c7
CF
605 assert(ops != NULL);
606 assert(ops->create_vcpu_thread != NULL); /* mandatory */
607 cpus_accel = ops;
430065da
CF
608}
609
c643bed9 610void qemu_init_vcpu(CPUState *cpu)
296af7c9 611{
5cc8767d
LX
612 MachineState *ms = MACHINE(qdev_get_machine());
613
614 cpu->nr_cores = ms->smp.cores;
615 cpu->nr_threads = ms->smp.threads;
f324e766 616 cpu->stopped = true;
9c09a251 617 cpu->random_seed = qemu_guest_random_seed_thread_part1();
56943e8c
PM
618
619 if (!cpu->as) {
620 /* If the target cpu hasn't set up any address spaces itself,
621 * give it the default one.
622 */
12ebc9a7 623 cpu->num_ases = 1;
80ceb07a 624 cpu_address_space_init(cpu, 0, "cpu-memory", cpu->memory);
56943e8c
PM
625 }
626
b86f59c7 627 /* accelerators all implement the AccelOpsClass */
994aa172
CF
628 g_assert(cpus_accel != NULL && cpus_accel->create_vcpu_thread != NULL);
629 cpus_accel->create_vcpu_thread(cpu);
81e96311
DH
630
631 while (!cpu->created) {
632 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
633 }
296af7c9
BS
634}
635
b4a3d965 636void cpu_stop_current(void)
296af7c9 637{
4917cf44 638 if (current_cpu) {
0ec7e677
PM
639 current_cpu->stop = true;
640 cpu_exit(current_cpu);
b4a3d965 641 }
296af7c9
BS
642}
643
56983463 644int vm_stop(RunState state)
296af7c9 645{
aa723c23 646 if (qemu_in_vcpu_thread()) {
74892d24 647 qemu_system_vmstop_request_prepare();
1dfb4dd9 648 qemu_system_vmstop_request(state);
296af7c9
BS
649 /*
650 * FIXME: should not return to device code in case
651 * vm_stop() has been requested.
652 */
b4a3d965 653 cpu_stop_current();
56983463 654 return 0;
296af7c9 655 }
56983463 656
4486e89c 657 return do_vm_stop(state, true);
296af7c9
BS
658}
659
2d76e823
CI
660/**
661 * Prepare for (re)starting the VM.
662 * Returns -1 if the vCPUs are not to be restarted (e.g. if they are already
663 * running or in case of an error condition), 0 otherwise.
664 */
665int vm_prepare_start(void)
666{
667 RunState requested;
2d76e823
CI
668
669 qemu_vmstop_requested(&requested);
670 if (runstate_is_running() && requested == RUN_STATE__MAX) {
671 return -1;
672 }
673
674 /* Ensure that a STOP/RESUME pair of events is emitted if a
675 * vmstop request was pending. The BLOCK_IO_ERROR event, for
676 * example, according to documentation is always followed by
677 * the STOP event.
678 */
679 if (runstate_is_running()) {
3ab72385
PX
680 qapi_event_send_stop();
681 qapi_event_send_resume();
f056158d 682 return -1;
2d76e823
CI
683 }
684
685 /* We are sending this now, but the CPUs will be resumed shortly later */
3ab72385 686 qapi_event_send_resume();
f056158d 687
f056158d
MA
688 cpu_enable_ticks();
689 runstate_set(RUN_STATE_RUNNING);
690 vm_state_notify(1, RUN_STATE_RUNNING);
691 return 0;
2d76e823
CI
692}
693
694void vm_start(void)
695{
696 if (!vm_prepare_start()) {
697 resume_all_vcpus();
698 }
699}
700
8a9236f1
LC
701/* does a state transition even if the VM is already stopped,
702 current state is forgotten forever */
56983463 703int vm_stop_force_state(RunState state)
8a9236f1
LC
704{
705 if (runstate_is_running()) {
56983463 706 return vm_stop(state);
8a9236f1 707 } else {
8af3f5c6 708 int ret;
8a9236f1 709 runstate_set(state);
b2780d32
WC
710
711 bdrv_drain_all();
594a45ce
KW
712 /* Make sure to return an error if the flush in a previous vm_stop()
713 * failed. */
8af3f5c6
DB
714 ret = bdrv_flush_all();
715 trace_vm_stop_flush_all(ret);
716 return ret;
8a9236f1
LC
717 }
718}
719
0442428a 720void list_cpus(const char *optarg)
262353cb
BS
721{
722 /* XXX: implement xxx_cpu_list for targets that still miss it */
e916cbf8 723#if defined(cpu_list)
0442428a 724 cpu_list();
262353cb
BS
725#endif
726}
de0b36b6 727
0cfd6a9a
LC
728void qmp_memsave(int64_t addr, int64_t size, const char *filename,
729 bool has_cpu, int64_t cpu_index, Error **errp)
730{
731 FILE *f;
732 uint32_t l;
55e5c285 733 CPUState *cpu;
0cfd6a9a 734 uint8_t buf[1024];
0dc9daf0 735 int64_t orig_addr = addr, orig_size = size;
0cfd6a9a
LC
736
737 if (!has_cpu) {
738 cpu_index = 0;
739 }
740
151d1322
AF
741 cpu = qemu_get_cpu(cpu_index);
742 if (cpu == NULL) {
c6bd8c70
MA
743 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cpu-index",
744 "a CPU number");
0cfd6a9a
LC
745 return;
746 }
747
748 f = fopen(filename, "wb");
749 if (!f) {
618da851 750 error_setg_file_open(errp, errno, filename);
0cfd6a9a
LC
751 return;
752 }
753
754 while (size != 0) {
755 l = sizeof(buf);
756 if (l > size)
757 l = size;
2f4d0f59 758 if (cpu_memory_rw_debug(cpu, addr, buf, l, 0) != 0) {
0dc9daf0
BP
759 error_setg(errp, "Invalid addr 0x%016" PRIx64 "/size %" PRId64
760 " specified", orig_addr, orig_size);
2f4d0f59
AK
761 goto exit;
762 }
0cfd6a9a 763 if (fwrite(buf, 1, l, f) != l) {
c6bd8c70 764 error_setg(errp, QERR_IO_ERROR);
0cfd6a9a
LC
765 goto exit;
766 }
767 addr += l;
768 size -= l;
769 }
770
771exit:
772 fclose(f);
773}
6d3962bf
LC
774
775void qmp_pmemsave(int64_t addr, int64_t size, const char *filename,
776 Error **errp)
777{
778 FILE *f;
779 uint32_t l;
780 uint8_t buf[1024];
781
782 f = fopen(filename, "wb");
783 if (!f) {
618da851 784 error_setg_file_open(errp, errno, filename);
6d3962bf
LC
785 return;
786 }
787
788 while (size != 0) {
789 l = sizeof(buf);
790 if (l > size)
791 l = size;
eb6282f2 792 cpu_physical_memory_read(addr, buf, l);
6d3962bf 793 if (fwrite(buf, 1, l, f) != l) {
c6bd8c70 794 error_setg(errp, QERR_IO_ERROR);
6d3962bf
LC
795 goto exit;
796 }
797 addr += l;
798 size -= l;
799 }
800
801exit:
802 fclose(f);
803}
ab49ab5c
LC
804
805void qmp_inject_nmi(Error **errp)
806{
947e4744 807 nmi_monitor_handle(monitor_get_cpu_index(monitor_cur()), errp);
ab49ab5c 808}
27498bef 809