]> git.proxmox.com Git - mirror_qemu.git/blame - softmmu/cpus.c
Makefile: No echoing for 'make help V=1'
[mirror_qemu.git] / softmmu / cpus.c
CommitLineData
296af7c9
BS
1/*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
7b31bbc2 25#include "qemu/osdep.h"
a8d25326 26#include "qemu-common.h"
83c9089e 27#include "monitor/monitor.h"
e688df6b 28#include "qapi/error.h"
df7a1f48 29#include "qapi/qapi-commands-machine.h"
112ed241 30#include "qapi/qapi-commands-misc.h"
9af23989 31#include "qapi/qapi-events-run-state.h"
a4e15de9 32#include "qapi/qmp/qerror.h"
022c62cb 33#include "exec/gdbstub.h"
b3946626 34#include "sysemu/hw_accel.h"
63c91552 35#include "exec/exec-all.h"
1de7afc9 36#include "qemu/thread.h"
30865f31 37#include "qemu/plugin.h"
9c17d615 38#include "sysemu/cpus.h"
9c09a251 39#include "qemu/guest-random.h"
9cb805fd 40#include "hw/nmi.h"
8b427044 41#include "sysemu/replay.h"
54d31236 42#include "sysemu/runstate.h"
740b1759 43#include "sysemu/cpu-timers.h"
5cc8767d 44#include "hw/boards.h"
650d103d 45#include "hw/hw.h"
0ff0fc19 46
6d9cb73c
JK
47#ifdef CONFIG_LINUX
48
49#include <sys/prctl.h>
50
c0532a76
MT
51#ifndef PR_MCE_KILL
52#define PR_MCE_KILL 33
53#endif
54
6d9cb73c
JK
55#ifndef PR_MCE_KILL_SET
56#define PR_MCE_KILL_SET 1
57#endif
58
59#ifndef PR_MCE_KILL_EARLY
60#define PR_MCE_KILL_EARLY 1
61#endif
62
63#endif /* CONFIG_LINUX */
64
bd1f7ff4
YK
65static QemuMutex qemu_global_mutex;
66
321bc0b2
TC
67bool cpu_is_stopped(CPUState *cpu)
68{
69 return cpu->stopped || !runstate_is_running();
70}
71
430065da 72bool cpu_work_list_empty(CPUState *cpu)
0c0fcc20
EC
73{
74 bool ret;
75
76 qemu_mutex_lock(&cpu->work_mutex);
77 ret = QSIMPLEQ_EMPTY(&cpu->work_list);
78 qemu_mutex_unlock(&cpu->work_mutex);
79 return ret;
80}
81
430065da 82bool cpu_thread_is_idle(CPUState *cpu)
ac873f1e 83{
0c0fcc20 84 if (cpu->stop || !cpu_work_list_empty(cpu)) {
ac873f1e
PM
85 return false;
86 }
321bc0b2 87 if (cpu_is_stopped(cpu)) {
ac873f1e
PM
88 return true;
89 }
8c2e1b00 90 if (!cpu->halted || cpu_has_work(cpu) ||
215e79c0 91 kvm_halt_in_kernel()) {
ac873f1e
PM
92 return false;
93 }
94 return true;
95}
96
740b1759 97bool all_cpu_threads_idle(void)
ac873f1e 98{
182735ef 99 CPUState *cpu;
ac873f1e 100
bdc44640 101 CPU_FOREACH(cpu) {
182735ef 102 if (!cpu_thread_is_idle(cpu)) {
ac873f1e
PM
103 return false;
104 }
105 }
106 return true;
107}
108
296af7c9
BS
109/***********************************************************/
110void hw_error(const char *fmt, ...)
111{
112 va_list ap;
55e5c285 113 CPUState *cpu;
296af7c9
BS
114
115 va_start(ap, fmt);
116 fprintf(stderr, "qemu: hardware error: ");
117 vfprintf(stderr, fmt, ap);
118 fprintf(stderr, "\n");
bdc44640 119 CPU_FOREACH(cpu) {
55e5c285 120 fprintf(stderr, "CPU #%d:\n", cpu->cpu_index);
90c84c56 121 cpu_dump_state(cpu, stderr, CPU_DUMP_FPU);
296af7c9
BS
122 }
123 va_end(ap);
124 abort();
125}
126
430065da
CF
127/*
128 * The chosen accelerator is supposed to register this.
129 */
130static const CpusAccel *cpus_accel;
131
296af7c9
BS
132void cpu_synchronize_all_states(void)
133{
182735ef 134 CPUState *cpu;
296af7c9 135
bdc44640 136 CPU_FOREACH(cpu) {
182735ef 137 cpu_synchronize_state(cpu);
296af7c9
BS
138 }
139}
140
141void cpu_synchronize_all_post_reset(void)
142{
182735ef 143 CPUState *cpu;
296af7c9 144
bdc44640 145 CPU_FOREACH(cpu) {
182735ef 146 cpu_synchronize_post_reset(cpu);
296af7c9
BS
147 }
148}
149
150void cpu_synchronize_all_post_init(void)
151{
182735ef 152 CPUState *cpu;
296af7c9 153
bdc44640 154 CPU_FOREACH(cpu) {
182735ef 155 cpu_synchronize_post_init(cpu);
296af7c9
BS
156 }
157}
158
75e972da
DG
159void cpu_synchronize_all_pre_loadvm(void)
160{
161 CPUState *cpu;
162
163 CPU_FOREACH(cpu) {
164 cpu_synchronize_pre_loadvm(cpu);
165 }
166}
167
430065da
CF
168void cpu_synchronize_state(CPUState *cpu)
169{
994aa172 170 if (cpus_accel->synchronize_state) {
430065da
CF
171 cpus_accel->synchronize_state(cpu);
172 }
430065da
CF
173}
174
175void cpu_synchronize_post_reset(CPUState *cpu)
176{
994aa172 177 if (cpus_accel->synchronize_post_reset) {
430065da
CF
178 cpus_accel->synchronize_post_reset(cpu);
179 }
430065da
CF
180}
181
182void cpu_synchronize_post_init(CPUState *cpu)
183{
994aa172 184 if (cpus_accel->synchronize_post_init) {
430065da
CF
185 cpus_accel->synchronize_post_init(cpu);
186 }
430065da
CF
187}
188
189void cpu_synchronize_pre_loadvm(CPUState *cpu)
190{
994aa172 191 if (cpus_accel->synchronize_pre_loadvm) {
430065da
CF
192 cpus_accel->synchronize_pre_loadvm(cpu);
193 }
430065da
CF
194}
195
196int64_t cpus_get_virtual_clock(void)
197{
994aa172
CF
198 /*
199 * XXX
200 *
201 * need to check that cpus_accel is not NULL, because qcow2 calls
202 * qemu_get_clock_ns(CLOCK_VIRTUAL) without any accel initialized and
203 * with ticks disabled in some io-tests:
204 * 030 040 041 060 099 120 127 140 156 161 172 181 191 192 195 203 229 249 256 267
205 *
206 * is this expected?
207 *
208 * XXX
209 */
430065da
CF
210 if (cpus_accel && cpus_accel->get_virtual_clock) {
211 return cpus_accel->get_virtual_clock();
212 }
430065da
CF
213 return cpu_get_clock();
214}
215
216/*
217 * return the time elapsed in VM between vm_start and vm_stop. Unless
218 * icount is active, cpus_get_elapsed_ticks() uses units of the host CPU cycle
219 * counter.
220 */
221int64_t cpus_get_elapsed_ticks(void)
222{
994aa172 223 if (cpus_accel->get_elapsed_ticks) {
430065da
CF
224 return cpus_accel->get_elapsed_ticks();
225 }
430065da
CF
226 return cpu_get_ticks();
227}
228
bb4776be
CF
229static void generic_handle_interrupt(CPUState *cpu, int mask)
230{
231 cpu->interrupt_request |= mask;
232
233 if (!qemu_cpu_is_self(cpu)) {
234 qemu_cpu_kick(cpu);
235 }
236}
237
238void cpu_interrupt(CPUState *cpu, int mask)
239{
240 if (cpus_accel->handle_interrupt) {
241 cpus_accel->handle_interrupt(cpu, mask);
242 } else {
243 generic_handle_interrupt(cpu, mask);
244 }
245}
246
4486e89c 247static int do_vm_stop(RunState state, bool send_stop)
296af7c9 248{
56983463
KW
249 int ret = 0;
250
1354869c 251 if (runstate_is_running()) {
f962cac4 252 runstate_set(state);
296af7c9 253 cpu_disable_ticks();
296af7c9 254 pause_all_vcpus();
1dfb4dd9 255 vm_state_notify(0, state);
4486e89c 256 if (send_stop) {
3ab72385 257 qapi_event_send_stop();
4486e89c 258 }
296af7c9 259 }
56983463 260
594a45ce 261 bdrv_drain_all();
22af08ea 262 ret = bdrv_flush_all();
594a45ce 263
56983463 264 return ret;
296af7c9
BS
265}
266
4486e89c
SH
267/* Special vm_stop() variant for terminating the process. Historically clients
268 * did not expect a QMP STOP event and so we need to retain compatibility.
269 */
270int vm_shutdown(void)
271{
272 return do_vm_stop(RUN_STATE_SHUTDOWN, false);
273}
274
430065da 275bool cpu_can_run(CPUState *cpu)
296af7c9 276{
4fdeee7c 277 if (cpu->stop) {
a1fcaa73 278 return false;
0ab07c62 279 }
321bc0b2 280 if (cpu_is_stopped(cpu)) {
a1fcaa73 281 return false;
0ab07c62 282 }
a1fcaa73 283 return true;
296af7c9
BS
284}
285
430065da 286void cpu_handle_guest_debug(CPUState *cpu)
83f338f7 287{
fda8458b
PD
288 if (replay_running_debug()) {
289 if (!cpu->singlestep_enabled) {
cda38259
PD
290 /*
291 * Report about the breakpoint and
292 * make a single step to skip it
293 */
294 replay_breakpoint();
fda8458b
PD
295 cpu_single_step(cpu, SSTEP_ENABLE);
296 } else {
297 cpu_single_step(cpu, 0);
298 }
299 } else {
300 gdb_set_stop_cpu(cpu);
301 qemu_system_debug_request();
302 cpu->stopped = true;
303 }
3c638d06
JK
304}
305
6d9cb73c
JK
306#ifdef CONFIG_LINUX
307static void sigbus_reraise(void)
308{
309 sigset_t set;
310 struct sigaction action;
311
312 memset(&action, 0, sizeof(action));
313 action.sa_handler = SIG_DFL;
314 if (!sigaction(SIGBUS, &action, NULL)) {
315 raise(SIGBUS);
316 sigemptyset(&set);
317 sigaddset(&set, SIGBUS);
a2d1761d 318 pthread_sigmask(SIG_UNBLOCK, &set, NULL);
6d9cb73c
JK
319 }
320 perror("Failed to re-raise SIGBUS!\n");
321 abort();
322}
323
d98d4072 324static void sigbus_handler(int n, siginfo_t *siginfo, void *ctx)
6d9cb73c 325{
a16fc07e
PB
326 if (siginfo->si_code != BUS_MCEERR_AO && siginfo->si_code != BUS_MCEERR_AR) {
327 sigbus_reraise();
328 }
329
2ae41db2
PB
330 if (current_cpu) {
331 /* Called asynchronously in VCPU thread. */
332 if (kvm_on_sigbus_vcpu(current_cpu, siginfo->si_code, siginfo->si_addr)) {
333 sigbus_reraise();
334 }
335 } else {
336 /* Called synchronously (via signalfd) in main thread. */
337 if (kvm_on_sigbus(siginfo->si_code, siginfo->si_addr)) {
338 sigbus_reraise();
339 }
6d9cb73c
JK
340 }
341}
342
343static void qemu_init_sigbus(void)
344{
345 struct sigaction action;
346
347 memset(&action, 0, sizeof(action));
348 action.sa_flags = SA_SIGINFO;
d98d4072 349 action.sa_sigaction = sigbus_handler;
6d9cb73c
JK
350 sigaction(SIGBUS, &action, NULL);
351
352 prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0);
353}
6d9cb73c 354#else /* !CONFIG_LINUX */
6d9cb73c
JK
355static void qemu_init_sigbus(void)
356{
357}
a16fc07e 358#endif /* !CONFIG_LINUX */
ff48eb5f 359
296af7c9
BS
360static QemuThread io_thread;
361
296af7c9
BS
362/* cpu creation */
363static QemuCond qemu_cpu_cond;
364/* system init */
296af7c9
BS
365static QemuCond qemu_pause_cond;
366
d3b12f5d 367void qemu_init_cpu_loop(void)
296af7c9 368{
6d9cb73c 369 qemu_init_sigbus();
ed94592b 370 qemu_cond_init(&qemu_cpu_cond);
ed94592b 371 qemu_cond_init(&qemu_pause_cond);
296af7c9 372 qemu_mutex_init(&qemu_global_mutex);
296af7c9 373
b7680cb6 374 qemu_thread_get_self(&io_thread);
296af7c9
BS
375}
376
14e6fe12 377void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data)
e82bcec2 378{
d148d90e 379 do_run_on_cpu(cpu, func, data, &qemu_global_mutex);
3c02270d
CV
380}
381
ebd05fea
DH
382static void qemu_cpu_stop(CPUState *cpu, bool exit)
383{
384 g_assert(qemu_cpu_is_self(cpu));
385 cpu->stop = false;
386 cpu->stopped = true;
387 if (exit) {
388 cpu_exit(cpu);
389 }
390 qemu_cond_broadcast(&qemu_pause_cond);
391}
392
430065da 393void qemu_wait_io_event_common(CPUState *cpu)
296af7c9 394{
d73415a3 395 qatomic_mb_set(&cpu->thread_kicked, false);
4fdeee7c 396 if (cpu->stop) {
ebd05fea 397 qemu_cpu_stop(cpu, false);
296af7c9 398 }
a5403c69 399 process_queued_cpu_work(cpu);
37257942
AB
400}
401
430065da 402void qemu_wait_io_event(CPUState *cpu)
296af7c9 403{
30865f31
EC
404 bool slept = false;
405
a98ae1d8 406 while (cpu_thread_is_idle(cpu)) {
30865f31
EC
407 if (!slept) {
408 slept = true;
409 qemu_plugin_vcpu_idle_cb(cpu);
410 }
f5c121b8 411 qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
16400322 412 }
30865f31
EC
413 if (slept) {
414 qemu_plugin_vcpu_resume_cb(cpu);
415 }
296af7c9 416
db08b687 417#ifdef _WIN32
430065da
CF
418 /* Eat dummy APC queued by cpus_kick_thread. */
419 if (hax_enabled()) {
db08b687 420 SleepEx(0, TRUE);
c97d6d2c 421 }
db08b687 422#endif
c97d6d2c
SAGDR
423 qemu_wait_io_event_common(cpu);
424}
425
430065da 426void cpus_kick_thread(CPUState *cpu)
cc015e9a
PB
427{
428#ifndef _WIN32
429 int err;
430
e0c38211
PB
431 if (cpu->thread_kicked) {
432 return;
9102deda 433 }
e0c38211 434 cpu->thread_kicked = true;
814e612e 435 err = pthread_kill(cpu->thread->thread, SIG_IPI);
d455ebc4 436 if (err && err != ESRCH) {
cc015e9a
PB
437 fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
438 exit(1);
439 }
e0c38211
PB
440#endif
441}
ed9164a3 442
c08d7424 443void qemu_cpu_kick(CPUState *cpu)
296af7c9 444{
f5c121b8 445 qemu_cond_broadcast(cpu->halt_cond);
994aa172 446 if (cpus_accel->kick_vcpu_thread) {
430065da 447 cpus_accel->kick_vcpu_thread(cpu);
e92558e4 448 } else { /* default */
430065da 449 cpus_kick_thread(cpu);
e0c38211 450 }
296af7c9
BS
451}
452
46d62fac 453void qemu_cpu_kick_self(void)
296af7c9 454{
4917cf44 455 assert(current_cpu);
430065da 456 cpus_kick_thread(current_cpu);
296af7c9
BS
457}
458
60e82579 459bool qemu_cpu_is_self(CPUState *cpu)
296af7c9 460{
814e612e 461 return qemu_thread_is_self(cpu->thread);
296af7c9
BS
462}
463
79e2b9ae 464bool qemu_in_vcpu_thread(void)
aa723c23 465{
4917cf44 466 return current_cpu && qemu_cpu_is_self(current_cpu);
aa723c23
JQ
467}
468
afbe7053
PB
469static __thread bool iothread_locked = false;
470
471bool qemu_mutex_iothread_locked(void)
472{
473 return iothread_locked;
474}
475
cb764d06
EC
476/*
477 * The BQL is taken from so many places that it is worth profiling the
478 * callers directly, instead of funneling them all through a single function.
479 */
480void qemu_mutex_lock_iothread_impl(const char *file, int line)
296af7c9 481{
d73415a3 482 QemuMutexLockFunc bql_lock = qatomic_read(&qemu_bql_mutex_lock_func);
cb764d06 483
8d04fb55 484 g_assert(!qemu_mutex_iothread_locked());
cb764d06 485 bql_lock(&qemu_global_mutex, file, line);
afbe7053 486 iothread_locked = true;
296af7c9
BS
487}
488
489void qemu_mutex_unlock_iothread(void)
490{
8d04fb55 491 g_assert(qemu_mutex_iothread_locked());
afbe7053 492 iothread_locked = false;
296af7c9
BS
493 qemu_mutex_unlock(&qemu_global_mutex);
494}
495
19e067e0
AP
496void qemu_cond_wait_iothread(QemuCond *cond)
497{
498 qemu_cond_wait(cond, &qemu_global_mutex);
499}
500
b0c3cf94
CF
501void qemu_cond_timedwait_iothread(QemuCond *cond, int ms)
502{
503 qemu_cond_timedwait(cond, &qemu_global_mutex, ms);
504}
505
430065da
CF
506/* signal CPU creation */
507void cpu_thread_signal_created(CPUState *cpu)
508{
509 cpu->created = true;
510 qemu_cond_signal(&qemu_cpu_cond);
511}
512
513/* signal CPU destruction */
514void cpu_thread_signal_destroyed(CPUState *cpu)
515{
516 cpu->created = false;
517 qemu_cond_signal(&qemu_cpu_cond);
518}
519
520
e8faee06 521static bool all_vcpus_paused(void)
296af7c9 522{
bdc44640 523 CPUState *cpu;
296af7c9 524
bdc44640 525 CPU_FOREACH(cpu) {
182735ef 526 if (!cpu->stopped) {
e8faee06 527 return false;
0ab07c62 528 }
296af7c9
BS
529 }
530
e8faee06 531 return true;
296af7c9
BS
532}
533
534void pause_all_vcpus(void)
535{
bdc44640 536 CPUState *cpu;
296af7c9 537
40daca54 538 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, false);
bdc44640 539 CPU_FOREACH(cpu) {
ebd05fea
DH
540 if (qemu_cpu_is_self(cpu)) {
541 qemu_cpu_stop(cpu, true);
542 } else {
543 cpu->stop = true;
544 qemu_cpu_kick(cpu);
545 }
d798e974
JK
546 }
547
d759c951
AB
548 /* We need to drop the replay_lock so any vCPU threads woken up
549 * can finish their replay tasks
550 */
551 replay_mutex_unlock();
552
296af7c9 553 while (!all_vcpus_paused()) {
be7d6c57 554 qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
bdc44640 555 CPU_FOREACH(cpu) {
182735ef 556 qemu_cpu_kick(cpu);
296af7c9
BS
557 }
558 }
d759c951
AB
559
560 qemu_mutex_unlock_iothread();
561 replay_mutex_lock();
562 qemu_mutex_lock_iothread();
296af7c9
BS
563}
564
2993683b
IM
565void cpu_resume(CPUState *cpu)
566{
567 cpu->stop = false;
568 cpu->stopped = false;
569 qemu_cpu_kick(cpu);
570}
571
296af7c9
BS
572void resume_all_vcpus(void)
573{
bdc44640 574 CPUState *cpu;
296af7c9 575
f962cac4
LM
576 if (!runstate_is_running()) {
577 return;
578 }
579
40daca54 580 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true);
bdc44640 581 CPU_FOREACH(cpu) {
182735ef 582 cpu_resume(cpu);
296af7c9
BS
583 }
584}
585
dbadee4f 586void cpu_remove_sync(CPUState *cpu)
4c055ab5
GZ
587{
588 cpu->stop = true;
589 cpu->unplug = true;
590 qemu_cpu_kick(cpu);
dbadee4f
PB
591 qemu_mutex_unlock_iothread();
592 qemu_thread_join(cpu->thread);
593 qemu_mutex_lock_iothread();
2c579042
BR
594}
595
430065da
CF
596void cpus_register_accel(const CpusAccel *ca)
597{
598 assert(ca != NULL);
599 assert(ca->create_vcpu_thread != NULL); /* mandatory */
600 cpus_accel = ca;
601}
602
c643bed9 603void qemu_init_vcpu(CPUState *cpu)
296af7c9 604{
5cc8767d
LX
605 MachineState *ms = MACHINE(qdev_get_machine());
606
607 cpu->nr_cores = ms->smp.cores;
608 cpu->nr_threads = ms->smp.threads;
f324e766 609 cpu->stopped = true;
9c09a251 610 cpu->random_seed = qemu_guest_random_seed_thread_part1();
56943e8c
PM
611
612 if (!cpu->as) {
613 /* If the target cpu hasn't set up any address spaces itself,
614 * give it the default one.
615 */
12ebc9a7 616 cpu->num_ases = 1;
80ceb07a 617 cpu_address_space_init(cpu, 0, "cpu-memory", cpu->memory);
56943e8c
PM
618 }
619
994aa172
CF
620 /* accelerators all implement the CpusAccel interface */
621 g_assert(cpus_accel != NULL && cpus_accel->create_vcpu_thread != NULL);
622 cpus_accel->create_vcpu_thread(cpu);
81e96311
DH
623
624 while (!cpu->created) {
625 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
626 }
296af7c9
BS
627}
628
b4a3d965 629void cpu_stop_current(void)
296af7c9 630{
4917cf44 631 if (current_cpu) {
0ec7e677
PM
632 current_cpu->stop = true;
633 cpu_exit(current_cpu);
b4a3d965 634 }
296af7c9
BS
635}
636
56983463 637int vm_stop(RunState state)
296af7c9 638{
aa723c23 639 if (qemu_in_vcpu_thread()) {
74892d24 640 qemu_system_vmstop_request_prepare();
1dfb4dd9 641 qemu_system_vmstop_request(state);
296af7c9
BS
642 /*
643 * FIXME: should not return to device code in case
644 * vm_stop() has been requested.
645 */
b4a3d965 646 cpu_stop_current();
56983463 647 return 0;
296af7c9 648 }
56983463 649
4486e89c 650 return do_vm_stop(state, true);
296af7c9
BS
651}
652
2d76e823
CI
653/**
654 * Prepare for (re)starting the VM.
655 * Returns -1 if the vCPUs are not to be restarted (e.g. if they are already
656 * running or in case of an error condition), 0 otherwise.
657 */
658int vm_prepare_start(void)
659{
660 RunState requested;
2d76e823
CI
661
662 qemu_vmstop_requested(&requested);
663 if (runstate_is_running() && requested == RUN_STATE__MAX) {
664 return -1;
665 }
666
667 /* Ensure that a STOP/RESUME pair of events is emitted if a
668 * vmstop request was pending. The BLOCK_IO_ERROR event, for
669 * example, according to documentation is always followed by
670 * the STOP event.
671 */
672 if (runstate_is_running()) {
3ab72385
PX
673 qapi_event_send_stop();
674 qapi_event_send_resume();
f056158d 675 return -1;
2d76e823
CI
676 }
677
678 /* We are sending this now, but the CPUs will be resumed shortly later */
3ab72385 679 qapi_event_send_resume();
f056158d 680
f056158d
MA
681 cpu_enable_ticks();
682 runstate_set(RUN_STATE_RUNNING);
683 vm_state_notify(1, RUN_STATE_RUNNING);
684 return 0;
2d76e823
CI
685}
686
687void vm_start(void)
688{
689 if (!vm_prepare_start()) {
690 resume_all_vcpus();
691 }
692}
693
8a9236f1
LC
694/* does a state transition even if the VM is already stopped,
695 current state is forgotten forever */
56983463 696int vm_stop_force_state(RunState state)
8a9236f1
LC
697{
698 if (runstate_is_running()) {
56983463 699 return vm_stop(state);
8a9236f1
LC
700 } else {
701 runstate_set(state);
b2780d32
WC
702
703 bdrv_drain_all();
594a45ce
KW
704 /* Make sure to return an error if the flush in a previous vm_stop()
705 * failed. */
22af08ea 706 return bdrv_flush_all();
8a9236f1
LC
707 }
708}
709
0442428a 710void list_cpus(const char *optarg)
262353cb
BS
711{
712 /* XXX: implement xxx_cpu_list for targets that still miss it */
e916cbf8 713#if defined(cpu_list)
0442428a 714 cpu_list();
262353cb
BS
715#endif
716}
de0b36b6 717
0cfd6a9a
LC
718void qmp_memsave(int64_t addr, int64_t size, const char *filename,
719 bool has_cpu, int64_t cpu_index, Error **errp)
720{
721 FILE *f;
722 uint32_t l;
55e5c285 723 CPUState *cpu;
0cfd6a9a 724 uint8_t buf[1024];
0dc9daf0 725 int64_t orig_addr = addr, orig_size = size;
0cfd6a9a
LC
726
727 if (!has_cpu) {
728 cpu_index = 0;
729 }
730
151d1322
AF
731 cpu = qemu_get_cpu(cpu_index);
732 if (cpu == NULL) {
c6bd8c70
MA
733 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cpu-index",
734 "a CPU number");
0cfd6a9a
LC
735 return;
736 }
737
738 f = fopen(filename, "wb");
739 if (!f) {
618da851 740 error_setg_file_open(errp, errno, filename);
0cfd6a9a
LC
741 return;
742 }
743
744 while (size != 0) {
745 l = sizeof(buf);
746 if (l > size)
747 l = size;
2f4d0f59 748 if (cpu_memory_rw_debug(cpu, addr, buf, l, 0) != 0) {
0dc9daf0
BP
749 error_setg(errp, "Invalid addr 0x%016" PRIx64 "/size %" PRId64
750 " specified", orig_addr, orig_size);
2f4d0f59
AK
751 goto exit;
752 }
0cfd6a9a 753 if (fwrite(buf, 1, l, f) != l) {
c6bd8c70 754 error_setg(errp, QERR_IO_ERROR);
0cfd6a9a
LC
755 goto exit;
756 }
757 addr += l;
758 size -= l;
759 }
760
761exit:
762 fclose(f);
763}
6d3962bf
LC
764
765void qmp_pmemsave(int64_t addr, int64_t size, const char *filename,
766 Error **errp)
767{
768 FILE *f;
769 uint32_t l;
770 uint8_t buf[1024];
771
772 f = fopen(filename, "wb");
773 if (!f) {
618da851 774 error_setg_file_open(errp, errno, filename);
6d3962bf
LC
775 return;
776 }
777
778 while (size != 0) {
779 l = sizeof(buf);
780 if (l > size)
781 l = size;
eb6282f2 782 cpu_physical_memory_read(addr, buf, l);
6d3962bf 783 if (fwrite(buf, 1, l, f) != l) {
c6bd8c70 784 error_setg(errp, QERR_IO_ERROR);
6d3962bf
LC
785 goto exit;
786 }
787 addr += l;
788 size -= l;
789 }
790
791exit:
792 fclose(f);
793}
ab49ab5c
LC
794
795void qmp_inject_nmi(Error **errp)
796{
947e4744 797 nmi_monitor_handle(monitor_get_cpu_index(monitor_cur()), errp);
ab49ab5c 798}
27498bef 799