]>
Commit | Line | Data |
---|---|---|
296af7c9 BS |
1 | /* |
2 | * QEMU System Emulator | |
3 | * | |
4 | * Copyright (c) 2003-2008 Fabrice Bellard | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
7 | * of this software and associated documentation files (the "Software"), to deal | |
8 | * in the Software without restriction, including without limitation the rights | |
9 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
10 | * copies of the Software, and to permit persons to whom the Software is | |
11 | * furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
22 | * THE SOFTWARE. | |
23 | */ | |
24 | ||
7b31bbc2 | 25 | #include "qemu/osdep.h" |
a8d25326 | 26 | #include "qemu-common.h" |
83c9089e | 27 | #include "monitor/monitor.h" |
e688df6b | 28 | #include "qapi/error.h" |
112ed241 | 29 | #include "qapi/qapi-commands-misc.h" |
9af23989 | 30 | #include "qapi/qapi-events-run-state.h" |
a4e15de9 | 31 | #include "qapi/qmp/qerror.h" |
022c62cb | 32 | #include "exec/gdbstub.h" |
b3946626 | 33 | #include "sysemu/hw_accel.h" |
63c91552 | 34 | #include "exec/exec-all.h" |
1de7afc9 | 35 | #include "qemu/thread.h" |
30865f31 | 36 | #include "qemu/plugin.h" |
9c17d615 | 37 | #include "sysemu/cpus.h" |
9c09a251 | 38 | #include "qemu/guest-random.h" |
9cb805fd | 39 | #include "hw/nmi.h" |
8b427044 | 40 | #include "sysemu/replay.h" |
54d31236 | 41 | #include "sysemu/runstate.h" |
740b1759 | 42 | #include "sysemu/cpu-timers.h" |
5cc8767d | 43 | #include "hw/boards.h" |
650d103d | 44 | #include "hw/hw.h" |
0ff0fc19 | 45 | |
6d9cb73c JK |
46 | #ifdef CONFIG_LINUX |
47 | ||
48 | #include <sys/prctl.h> | |
49 | ||
c0532a76 MT |
50 | #ifndef PR_MCE_KILL |
51 | #define PR_MCE_KILL 33 | |
52 | #endif | |
53 | ||
6d9cb73c JK |
54 | #ifndef PR_MCE_KILL_SET |
55 | #define PR_MCE_KILL_SET 1 | |
56 | #endif | |
57 | ||
58 | #ifndef PR_MCE_KILL_EARLY | |
59 | #define PR_MCE_KILL_EARLY 1 | |
60 | #endif | |
61 | ||
62 | #endif /* CONFIG_LINUX */ | |
63 | ||
bd1f7ff4 YK |
64 | static QemuMutex qemu_global_mutex; |
65 | ||
321bc0b2 TC |
66 | bool cpu_is_stopped(CPUState *cpu) |
67 | { | |
68 | return cpu->stopped || !runstate_is_running(); | |
69 | } | |
70 | ||
430065da | 71 | bool cpu_work_list_empty(CPUState *cpu) |
0c0fcc20 EC |
72 | { |
73 | bool ret; | |
74 | ||
75 | qemu_mutex_lock(&cpu->work_mutex); | |
76 | ret = QSIMPLEQ_EMPTY(&cpu->work_list); | |
77 | qemu_mutex_unlock(&cpu->work_mutex); | |
78 | return ret; | |
79 | } | |
80 | ||
430065da | 81 | bool cpu_thread_is_idle(CPUState *cpu) |
ac873f1e | 82 | { |
0c0fcc20 | 83 | if (cpu->stop || !cpu_work_list_empty(cpu)) { |
ac873f1e PM |
84 | return false; |
85 | } | |
321bc0b2 | 86 | if (cpu_is_stopped(cpu)) { |
ac873f1e PM |
87 | return true; |
88 | } | |
8c2e1b00 | 89 | if (!cpu->halted || cpu_has_work(cpu) || |
215e79c0 | 90 | kvm_halt_in_kernel()) { |
ac873f1e PM |
91 | return false; |
92 | } | |
93 | return true; | |
94 | } | |
95 | ||
740b1759 | 96 | bool all_cpu_threads_idle(void) |
ac873f1e | 97 | { |
182735ef | 98 | CPUState *cpu; |
ac873f1e | 99 | |
bdc44640 | 100 | CPU_FOREACH(cpu) { |
182735ef | 101 | if (!cpu_thread_is_idle(cpu)) { |
ac873f1e PM |
102 | return false; |
103 | } | |
104 | } | |
105 | return true; | |
106 | } | |
107 | ||
296af7c9 BS |
108 | /***********************************************************/ |
109 | void hw_error(const char *fmt, ...) | |
110 | { | |
111 | va_list ap; | |
55e5c285 | 112 | CPUState *cpu; |
296af7c9 BS |
113 | |
114 | va_start(ap, fmt); | |
115 | fprintf(stderr, "qemu: hardware error: "); | |
116 | vfprintf(stderr, fmt, ap); | |
117 | fprintf(stderr, "\n"); | |
bdc44640 | 118 | CPU_FOREACH(cpu) { |
55e5c285 | 119 | fprintf(stderr, "CPU #%d:\n", cpu->cpu_index); |
90c84c56 | 120 | cpu_dump_state(cpu, stderr, CPU_DUMP_FPU); |
296af7c9 BS |
121 | } |
122 | va_end(ap); | |
123 | abort(); | |
124 | } | |
125 | ||
430065da CF |
126 | /* |
127 | * The chosen accelerator is supposed to register this. | |
128 | */ | |
129 | static const CpusAccel *cpus_accel; | |
130 | ||
296af7c9 BS |
131 | void cpu_synchronize_all_states(void) |
132 | { | |
182735ef | 133 | CPUState *cpu; |
296af7c9 | 134 | |
bdc44640 | 135 | CPU_FOREACH(cpu) { |
182735ef | 136 | cpu_synchronize_state(cpu); |
296af7c9 BS |
137 | } |
138 | } | |
139 | ||
140 | void cpu_synchronize_all_post_reset(void) | |
141 | { | |
182735ef | 142 | CPUState *cpu; |
296af7c9 | 143 | |
bdc44640 | 144 | CPU_FOREACH(cpu) { |
182735ef | 145 | cpu_synchronize_post_reset(cpu); |
296af7c9 BS |
146 | } |
147 | } | |
148 | ||
149 | void cpu_synchronize_all_post_init(void) | |
150 | { | |
182735ef | 151 | CPUState *cpu; |
296af7c9 | 152 | |
bdc44640 | 153 | CPU_FOREACH(cpu) { |
182735ef | 154 | cpu_synchronize_post_init(cpu); |
296af7c9 BS |
155 | } |
156 | } | |
157 | ||
75e972da DG |
158 | void cpu_synchronize_all_pre_loadvm(void) |
159 | { | |
160 | CPUState *cpu; | |
161 | ||
162 | CPU_FOREACH(cpu) { | |
163 | cpu_synchronize_pre_loadvm(cpu); | |
164 | } | |
165 | } | |
166 | ||
430065da CF |
167 | void cpu_synchronize_state(CPUState *cpu) |
168 | { | |
994aa172 | 169 | if (cpus_accel->synchronize_state) { |
430065da CF |
170 | cpus_accel->synchronize_state(cpu); |
171 | } | |
430065da CF |
172 | } |
173 | ||
174 | void cpu_synchronize_post_reset(CPUState *cpu) | |
175 | { | |
994aa172 | 176 | if (cpus_accel->synchronize_post_reset) { |
430065da CF |
177 | cpus_accel->synchronize_post_reset(cpu); |
178 | } | |
430065da CF |
179 | } |
180 | ||
181 | void cpu_synchronize_post_init(CPUState *cpu) | |
182 | { | |
994aa172 | 183 | if (cpus_accel->synchronize_post_init) { |
430065da CF |
184 | cpus_accel->synchronize_post_init(cpu); |
185 | } | |
430065da CF |
186 | } |
187 | ||
188 | void cpu_synchronize_pre_loadvm(CPUState *cpu) | |
189 | { | |
994aa172 | 190 | if (cpus_accel->synchronize_pre_loadvm) { |
430065da CF |
191 | cpus_accel->synchronize_pre_loadvm(cpu); |
192 | } | |
430065da CF |
193 | } |
194 | ||
195 | int64_t cpus_get_virtual_clock(void) | |
196 | { | |
994aa172 CF |
197 | /* |
198 | * XXX | |
199 | * | |
200 | * need to check that cpus_accel is not NULL, because qcow2 calls | |
201 | * qemu_get_clock_ns(CLOCK_VIRTUAL) without any accel initialized and | |
202 | * with ticks disabled in some io-tests: | |
203 | * 030 040 041 060 099 120 127 140 156 161 172 181 191 192 195 203 229 249 256 267 | |
204 | * | |
205 | * is this expected? | |
206 | * | |
207 | * XXX | |
208 | */ | |
430065da CF |
209 | if (cpus_accel && cpus_accel->get_virtual_clock) { |
210 | return cpus_accel->get_virtual_clock(); | |
211 | } | |
430065da CF |
212 | return cpu_get_clock(); |
213 | } | |
214 | ||
215 | /* | |
216 | * return the time elapsed in VM between vm_start and vm_stop. Unless | |
217 | * icount is active, cpus_get_elapsed_ticks() uses units of the host CPU cycle | |
218 | * counter. | |
219 | */ | |
220 | int64_t cpus_get_elapsed_ticks(void) | |
221 | { | |
994aa172 | 222 | if (cpus_accel->get_elapsed_ticks) { |
430065da CF |
223 | return cpus_accel->get_elapsed_ticks(); |
224 | } | |
430065da CF |
225 | return cpu_get_ticks(); |
226 | } | |
227 | ||
bb4776be CF |
228 | static void generic_handle_interrupt(CPUState *cpu, int mask) |
229 | { | |
230 | cpu->interrupt_request |= mask; | |
231 | ||
232 | if (!qemu_cpu_is_self(cpu)) { | |
233 | qemu_cpu_kick(cpu); | |
234 | } | |
235 | } | |
236 | ||
237 | void cpu_interrupt(CPUState *cpu, int mask) | |
238 | { | |
239 | if (cpus_accel->handle_interrupt) { | |
240 | cpus_accel->handle_interrupt(cpu, mask); | |
241 | } else { | |
242 | generic_handle_interrupt(cpu, mask); | |
243 | } | |
244 | } | |
245 | ||
4486e89c | 246 | static int do_vm_stop(RunState state, bool send_stop) |
296af7c9 | 247 | { |
56983463 KW |
248 | int ret = 0; |
249 | ||
1354869c | 250 | if (runstate_is_running()) { |
f962cac4 | 251 | runstate_set(state); |
296af7c9 | 252 | cpu_disable_ticks(); |
296af7c9 | 253 | pause_all_vcpus(); |
1dfb4dd9 | 254 | vm_state_notify(0, state); |
4486e89c | 255 | if (send_stop) { |
3ab72385 | 256 | qapi_event_send_stop(); |
4486e89c | 257 | } |
296af7c9 | 258 | } |
56983463 | 259 | |
594a45ce | 260 | bdrv_drain_all(); |
22af08ea | 261 | ret = bdrv_flush_all(); |
594a45ce | 262 | |
56983463 | 263 | return ret; |
296af7c9 BS |
264 | } |
265 | ||
4486e89c SH |
266 | /* Special vm_stop() variant for terminating the process. Historically clients |
267 | * did not expect a QMP STOP event and so we need to retain compatibility. | |
268 | */ | |
269 | int vm_shutdown(void) | |
270 | { | |
271 | return do_vm_stop(RUN_STATE_SHUTDOWN, false); | |
272 | } | |
273 | ||
430065da | 274 | bool cpu_can_run(CPUState *cpu) |
296af7c9 | 275 | { |
4fdeee7c | 276 | if (cpu->stop) { |
a1fcaa73 | 277 | return false; |
0ab07c62 | 278 | } |
321bc0b2 | 279 | if (cpu_is_stopped(cpu)) { |
a1fcaa73 | 280 | return false; |
0ab07c62 | 281 | } |
a1fcaa73 | 282 | return true; |
296af7c9 BS |
283 | } |
284 | ||
430065da | 285 | void cpu_handle_guest_debug(CPUState *cpu) |
83f338f7 | 286 | { |
fda8458b PD |
287 | if (replay_running_debug()) { |
288 | if (!cpu->singlestep_enabled) { | |
cda38259 PD |
289 | /* |
290 | * Report about the breakpoint and | |
291 | * make a single step to skip it | |
292 | */ | |
293 | replay_breakpoint(); | |
fda8458b PD |
294 | cpu_single_step(cpu, SSTEP_ENABLE); |
295 | } else { | |
296 | cpu_single_step(cpu, 0); | |
297 | } | |
298 | } else { | |
299 | gdb_set_stop_cpu(cpu); | |
300 | qemu_system_debug_request(); | |
301 | cpu->stopped = true; | |
302 | } | |
3c638d06 JK |
303 | } |
304 | ||
6d9cb73c JK |
305 | #ifdef CONFIG_LINUX |
306 | static void sigbus_reraise(void) | |
307 | { | |
308 | sigset_t set; | |
309 | struct sigaction action; | |
310 | ||
311 | memset(&action, 0, sizeof(action)); | |
312 | action.sa_handler = SIG_DFL; | |
313 | if (!sigaction(SIGBUS, &action, NULL)) { | |
314 | raise(SIGBUS); | |
315 | sigemptyset(&set); | |
316 | sigaddset(&set, SIGBUS); | |
a2d1761d | 317 | pthread_sigmask(SIG_UNBLOCK, &set, NULL); |
6d9cb73c JK |
318 | } |
319 | perror("Failed to re-raise SIGBUS!\n"); | |
320 | abort(); | |
321 | } | |
322 | ||
d98d4072 | 323 | static void sigbus_handler(int n, siginfo_t *siginfo, void *ctx) |
6d9cb73c | 324 | { |
a16fc07e PB |
325 | if (siginfo->si_code != BUS_MCEERR_AO && siginfo->si_code != BUS_MCEERR_AR) { |
326 | sigbus_reraise(); | |
327 | } | |
328 | ||
2ae41db2 PB |
329 | if (current_cpu) { |
330 | /* Called asynchronously in VCPU thread. */ | |
331 | if (kvm_on_sigbus_vcpu(current_cpu, siginfo->si_code, siginfo->si_addr)) { | |
332 | sigbus_reraise(); | |
333 | } | |
334 | } else { | |
335 | /* Called synchronously (via signalfd) in main thread. */ | |
336 | if (kvm_on_sigbus(siginfo->si_code, siginfo->si_addr)) { | |
337 | sigbus_reraise(); | |
338 | } | |
6d9cb73c JK |
339 | } |
340 | } | |
341 | ||
342 | static void qemu_init_sigbus(void) | |
343 | { | |
344 | struct sigaction action; | |
345 | ||
346 | memset(&action, 0, sizeof(action)); | |
347 | action.sa_flags = SA_SIGINFO; | |
d98d4072 | 348 | action.sa_sigaction = sigbus_handler; |
6d9cb73c JK |
349 | sigaction(SIGBUS, &action, NULL); |
350 | ||
351 | prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0); | |
352 | } | |
6d9cb73c | 353 | #else /* !CONFIG_LINUX */ |
6d9cb73c JK |
354 | static void qemu_init_sigbus(void) |
355 | { | |
356 | } | |
a16fc07e | 357 | #endif /* !CONFIG_LINUX */ |
ff48eb5f | 358 | |
296af7c9 BS |
359 | static QemuThread io_thread; |
360 | ||
296af7c9 BS |
361 | /* cpu creation */ |
362 | static QemuCond qemu_cpu_cond; | |
363 | /* system init */ | |
296af7c9 BS |
364 | static QemuCond qemu_pause_cond; |
365 | ||
d3b12f5d | 366 | void qemu_init_cpu_loop(void) |
296af7c9 | 367 | { |
6d9cb73c | 368 | qemu_init_sigbus(); |
ed94592b | 369 | qemu_cond_init(&qemu_cpu_cond); |
ed94592b | 370 | qemu_cond_init(&qemu_pause_cond); |
296af7c9 | 371 | qemu_mutex_init(&qemu_global_mutex); |
296af7c9 | 372 | |
b7680cb6 | 373 | qemu_thread_get_self(&io_thread); |
296af7c9 BS |
374 | } |
375 | ||
14e6fe12 | 376 | void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data) |
e82bcec2 | 377 | { |
d148d90e | 378 | do_run_on_cpu(cpu, func, data, &qemu_global_mutex); |
3c02270d CV |
379 | } |
380 | ||
ebd05fea DH |
381 | static void qemu_cpu_stop(CPUState *cpu, bool exit) |
382 | { | |
383 | g_assert(qemu_cpu_is_self(cpu)); | |
384 | cpu->stop = false; | |
385 | cpu->stopped = true; | |
386 | if (exit) { | |
387 | cpu_exit(cpu); | |
388 | } | |
389 | qemu_cond_broadcast(&qemu_pause_cond); | |
390 | } | |
391 | ||
430065da | 392 | void qemu_wait_io_event_common(CPUState *cpu) |
296af7c9 | 393 | { |
d73415a3 | 394 | qatomic_mb_set(&cpu->thread_kicked, false); |
4fdeee7c | 395 | if (cpu->stop) { |
ebd05fea | 396 | qemu_cpu_stop(cpu, false); |
296af7c9 | 397 | } |
a5403c69 | 398 | process_queued_cpu_work(cpu); |
37257942 AB |
399 | } |
400 | ||
430065da | 401 | void qemu_wait_io_event(CPUState *cpu) |
296af7c9 | 402 | { |
30865f31 EC |
403 | bool slept = false; |
404 | ||
a98ae1d8 | 405 | while (cpu_thread_is_idle(cpu)) { |
30865f31 EC |
406 | if (!slept) { |
407 | slept = true; | |
408 | qemu_plugin_vcpu_idle_cb(cpu); | |
409 | } | |
f5c121b8 | 410 | qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex); |
16400322 | 411 | } |
30865f31 EC |
412 | if (slept) { |
413 | qemu_plugin_vcpu_resume_cb(cpu); | |
414 | } | |
296af7c9 | 415 | |
db08b687 | 416 | #ifdef _WIN32 |
430065da CF |
417 | /* Eat dummy APC queued by cpus_kick_thread. */ |
418 | if (hax_enabled()) { | |
db08b687 | 419 | SleepEx(0, TRUE); |
c97d6d2c | 420 | } |
db08b687 | 421 | #endif |
c97d6d2c SAGDR |
422 | qemu_wait_io_event_common(cpu); |
423 | } | |
424 | ||
430065da | 425 | void cpus_kick_thread(CPUState *cpu) |
cc015e9a PB |
426 | { |
427 | #ifndef _WIN32 | |
428 | int err; | |
429 | ||
e0c38211 PB |
430 | if (cpu->thread_kicked) { |
431 | return; | |
9102deda | 432 | } |
e0c38211 | 433 | cpu->thread_kicked = true; |
814e612e | 434 | err = pthread_kill(cpu->thread->thread, SIG_IPI); |
d455ebc4 | 435 | if (err && err != ESRCH) { |
cc015e9a PB |
436 | fprintf(stderr, "qemu:%s: %s", __func__, strerror(err)); |
437 | exit(1); | |
438 | } | |
e0c38211 PB |
439 | #endif |
440 | } | |
ed9164a3 | 441 | |
c08d7424 | 442 | void qemu_cpu_kick(CPUState *cpu) |
296af7c9 | 443 | { |
f5c121b8 | 444 | qemu_cond_broadcast(cpu->halt_cond); |
994aa172 | 445 | if (cpus_accel->kick_vcpu_thread) { |
430065da | 446 | cpus_accel->kick_vcpu_thread(cpu); |
e92558e4 | 447 | } else { /* default */ |
430065da | 448 | cpus_kick_thread(cpu); |
e0c38211 | 449 | } |
296af7c9 BS |
450 | } |
451 | ||
46d62fac | 452 | void qemu_cpu_kick_self(void) |
296af7c9 | 453 | { |
4917cf44 | 454 | assert(current_cpu); |
430065da | 455 | cpus_kick_thread(current_cpu); |
296af7c9 BS |
456 | } |
457 | ||
60e82579 | 458 | bool qemu_cpu_is_self(CPUState *cpu) |
296af7c9 | 459 | { |
814e612e | 460 | return qemu_thread_is_self(cpu->thread); |
296af7c9 BS |
461 | } |
462 | ||
79e2b9ae | 463 | bool qemu_in_vcpu_thread(void) |
aa723c23 | 464 | { |
4917cf44 | 465 | return current_cpu && qemu_cpu_is_self(current_cpu); |
aa723c23 JQ |
466 | } |
467 | ||
afbe7053 PB |
468 | static __thread bool iothread_locked = false; |
469 | ||
470 | bool qemu_mutex_iothread_locked(void) | |
471 | { | |
472 | return iothread_locked; | |
473 | } | |
474 | ||
cb764d06 EC |
475 | /* |
476 | * The BQL is taken from so many places that it is worth profiling the | |
477 | * callers directly, instead of funneling them all through a single function. | |
478 | */ | |
479 | void qemu_mutex_lock_iothread_impl(const char *file, int line) | |
296af7c9 | 480 | { |
d73415a3 | 481 | QemuMutexLockFunc bql_lock = qatomic_read(&qemu_bql_mutex_lock_func); |
cb764d06 | 482 | |
8d04fb55 | 483 | g_assert(!qemu_mutex_iothread_locked()); |
cb764d06 | 484 | bql_lock(&qemu_global_mutex, file, line); |
afbe7053 | 485 | iothread_locked = true; |
296af7c9 BS |
486 | } |
487 | ||
488 | void qemu_mutex_unlock_iothread(void) | |
489 | { | |
8d04fb55 | 490 | g_assert(qemu_mutex_iothread_locked()); |
afbe7053 | 491 | iothread_locked = false; |
296af7c9 BS |
492 | qemu_mutex_unlock(&qemu_global_mutex); |
493 | } | |
494 | ||
19e067e0 AP |
495 | void qemu_cond_wait_iothread(QemuCond *cond) |
496 | { | |
497 | qemu_cond_wait(cond, &qemu_global_mutex); | |
498 | } | |
499 | ||
b0c3cf94 CF |
500 | void qemu_cond_timedwait_iothread(QemuCond *cond, int ms) |
501 | { | |
502 | qemu_cond_timedwait(cond, &qemu_global_mutex, ms); | |
503 | } | |
504 | ||
430065da CF |
505 | /* signal CPU creation */ |
506 | void cpu_thread_signal_created(CPUState *cpu) | |
507 | { | |
508 | cpu->created = true; | |
509 | qemu_cond_signal(&qemu_cpu_cond); | |
510 | } | |
511 | ||
512 | /* signal CPU destruction */ | |
513 | void cpu_thread_signal_destroyed(CPUState *cpu) | |
514 | { | |
515 | cpu->created = false; | |
516 | qemu_cond_signal(&qemu_cpu_cond); | |
517 | } | |
518 | ||
519 | ||
e8faee06 | 520 | static bool all_vcpus_paused(void) |
296af7c9 | 521 | { |
bdc44640 | 522 | CPUState *cpu; |
296af7c9 | 523 | |
bdc44640 | 524 | CPU_FOREACH(cpu) { |
182735ef | 525 | if (!cpu->stopped) { |
e8faee06 | 526 | return false; |
0ab07c62 | 527 | } |
296af7c9 BS |
528 | } |
529 | ||
e8faee06 | 530 | return true; |
296af7c9 BS |
531 | } |
532 | ||
533 | void pause_all_vcpus(void) | |
534 | { | |
bdc44640 | 535 | CPUState *cpu; |
296af7c9 | 536 | |
40daca54 | 537 | qemu_clock_enable(QEMU_CLOCK_VIRTUAL, false); |
bdc44640 | 538 | CPU_FOREACH(cpu) { |
ebd05fea DH |
539 | if (qemu_cpu_is_self(cpu)) { |
540 | qemu_cpu_stop(cpu, true); | |
541 | } else { | |
542 | cpu->stop = true; | |
543 | qemu_cpu_kick(cpu); | |
544 | } | |
d798e974 JK |
545 | } |
546 | ||
d759c951 AB |
547 | /* We need to drop the replay_lock so any vCPU threads woken up |
548 | * can finish their replay tasks | |
549 | */ | |
550 | replay_mutex_unlock(); | |
551 | ||
296af7c9 | 552 | while (!all_vcpus_paused()) { |
be7d6c57 | 553 | qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex); |
bdc44640 | 554 | CPU_FOREACH(cpu) { |
182735ef | 555 | qemu_cpu_kick(cpu); |
296af7c9 BS |
556 | } |
557 | } | |
d759c951 AB |
558 | |
559 | qemu_mutex_unlock_iothread(); | |
560 | replay_mutex_lock(); | |
561 | qemu_mutex_lock_iothread(); | |
296af7c9 BS |
562 | } |
563 | ||
2993683b IM |
564 | void cpu_resume(CPUState *cpu) |
565 | { | |
566 | cpu->stop = false; | |
567 | cpu->stopped = false; | |
568 | qemu_cpu_kick(cpu); | |
569 | } | |
570 | ||
296af7c9 BS |
571 | void resume_all_vcpus(void) |
572 | { | |
bdc44640 | 573 | CPUState *cpu; |
296af7c9 | 574 | |
f962cac4 LM |
575 | if (!runstate_is_running()) { |
576 | return; | |
577 | } | |
578 | ||
40daca54 | 579 | qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true); |
bdc44640 | 580 | CPU_FOREACH(cpu) { |
182735ef | 581 | cpu_resume(cpu); |
296af7c9 BS |
582 | } |
583 | } | |
584 | ||
dbadee4f | 585 | void cpu_remove_sync(CPUState *cpu) |
4c055ab5 GZ |
586 | { |
587 | cpu->stop = true; | |
588 | cpu->unplug = true; | |
589 | qemu_cpu_kick(cpu); | |
dbadee4f PB |
590 | qemu_mutex_unlock_iothread(); |
591 | qemu_thread_join(cpu->thread); | |
592 | qemu_mutex_lock_iothread(); | |
2c579042 BR |
593 | } |
594 | ||
430065da CF |
595 | void cpus_register_accel(const CpusAccel *ca) |
596 | { | |
597 | assert(ca != NULL); | |
598 | assert(ca->create_vcpu_thread != NULL); /* mandatory */ | |
599 | cpus_accel = ca; | |
600 | } | |
601 | ||
c643bed9 | 602 | void qemu_init_vcpu(CPUState *cpu) |
296af7c9 | 603 | { |
5cc8767d LX |
604 | MachineState *ms = MACHINE(qdev_get_machine()); |
605 | ||
606 | cpu->nr_cores = ms->smp.cores; | |
607 | cpu->nr_threads = ms->smp.threads; | |
f324e766 | 608 | cpu->stopped = true; |
9c09a251 | 609 | cpu->random_seed = qemu_guest_random_seed_thread_part1(); |
56943e8c PM |
610 | |
611 | if (!cpu->as) { | |
612 | /* If the target cpu hasn't set up any address spaces itself, | |
613 | * give it the default one. | |
614 | */ | |
12ebc9a7 | 615 | cpu->num_ases = 1; |
80ceb07a | 616 | cpu_address_space_init(cpu, 0, "cpu-memory", cpu->memory); |
56943e8c PM |
617 | } |
618 | ||
994aa172 CF |
619 | /* accelerators all implement the CpusAccel interface */ |
620 | g_assert(cpus_accel != NULL && cpus_accel->create_vcpu_thread != NULL); | |
621 | cpus_accel->create_vcpu_thread(cpu); | |
81e96311 DH |
622 | |
623 | while (!cpu->created) { | |
624 | qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex); | |
625 | } | |
296af7c9 BS |
626 | } |
627 | ||
b4a3d965 | 628 | void cpu_stop_current(void) |
296af7c9 | 629 | { |
4917cf44 | 630 | if (current_cpu) { |
0ec7e677 PM |
631 | current_cpu->stop = true; |
632 | cpu_exit(current_cpu); | |
b4a3d965 | 633 | } |
296af7c9 BS |
634 | } |
635 | ||
56983463 | 636 | int vm_stop(RunState state) |
296af7c9 | 637 | { |
aa723c23 | 638 | if (qemu_in_vcpu_thread()) { |
74892d24 | 639 | qemu_system_vmstop_request_prepare(); |
1dfb4dd9 | 640 | qemu_system_vmstop_request(state); |
296af7c9 BS |
641 | /* |
642 | * FIXME: should not return to device code in case | |
643 | * vm_stop() has been requested. | |
644 | */ | |
b4a3d965 | 645 | cpu_stop_current(); |
56983463 | 646 | return 0; |
296af7c9 | 647 | } |
56983463 | 648 | |
4486e89c | 649 | return do_vm_stop(state, true); |
296af7c9 BS |
650 | } |
651 | ||
2d76e823 CI |
652 | /** |
653 | * Prepare for (re)starting the VM. | |
654 | * Returns -1 if the vCPUs are not to be restarted (e.g. if they are already | |
655 | * running or in case of an error condition), 0 otherwise. | |
656 | */ | |
657 | int vm_prepare_start(void) | |
658 | { | |
659 | RunState requested; | |
2d76e823 CI |
660 | |
661 | qemu_vmstop_requested(&requested); | |
662 | if (runstate_is_running() && requested == RUN_STATE__MAX) { | |
663 | return -1; | |
664 | } | |
665 | ||
666 | /* Ensure that a STOP/RESUME pair of events is emitted if a | |
667 | * vmstop request was pending. The BLOCK_IO_ERROR event, for | |
668 | * example, according to documentation is always followed by | |
669 | * the STOP event. | |
670 | */ | |
671 | if (runstate_is_running()) { | |
3ab72385 PX |
672 | qapi_event_send_stop(); |
673 | qapi_event_send_resume(); | |
f056158d | 674 | return -1; |
2d76e823 CI |
675 | } |
676 | ||
677 | /* We are sending this now, but the CPUs will be resumed shortly later */ | |
3ab72385 | 678 | qapi_event_send_resume(); |
f056158d | 679 | |
f056158d MA |
680 | cpu_enable_ticks(); |
681 | runstate_set(RUN_STATE_RUNNING); | |
682 | vm_state_notify(1, RUN_STATE_RUNNING); | |
683 | return 0; | |
2d76e823 CI |
684 | } |
685 | ||
686 | void vm_start(void) | |
687 | { | |
688 | if (!vm_prepare_start()) { | |
689 | resume_all_vcpus(); | |
690 | } | |
691 | } | |
692 | ||
8a9236f1 LC |
693 | /* does a state transition even if the VM is already stopped, |
694 | current state is forgotten forever */ | |
56983463 | 695 | int vm_stop_force_state(RunState state) |
8a9236f1 LC |
696 | { |
697 | if (runstate_is_running()) { | |
56983463 | 698 | return vm_stop(state); |
8a9236f1 LC |
699 | } else { |
700 | runstate_set(state); | |
b2780d32 WC |
701 | |
702 | bdrv_drain_all(); | |
594a45ce KW |
703 | /* Make sure to return an error if the flush in a previous vm_stop() |
704 | * failed. */ | |
22af08ea | 705 | return bdrv_flush_all(); |
8a9236f1 LC |
706 | } |
707 | } | |
708 | ||
0442428a | 709 | void list_cpus(const char *optarg) |
262353cb BS |
710 | { |
711 | /* XXX: implement xxx_cpu_list for targets that still miss it */ | |
e916cbf8 | 712 | #if defined(cpu_list) |
0442428a | 713 | cpu_list(); |
262353cb BS |
714 | #endif |
715 | } | |
de0b36b6 | 716 | |
0cfd6a9a LC |
717 | void qmp_memsave(int64_t addr, int64_t size, const char *filename, |
718 | bool has_cpu, int64_t cpu_index, Error **errp) | |
719 | { | |
720 | FILE *f; | |
721 | uint32_t l; | |
55e5c285 | 722 | CPUState *cpu; |
0cfd6a9a | 723 | uint8_t buf[1024]; |
0dc9daf0 | 724 | int64_t orig_addr = addr, orig_size = size; |
0cfd6a9a LC |
725 | |
726 | if (!has_cpu) { | |
727 | cpu_index = 0; | |
728 | } | |
729 | ||
151d1322 AF |
730 | cpu = qemu_get_cpu(cpu_index); |
731 | if (cpu == NULL) { | |
c6bd8c70 MA |
732 | error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cpu-index", |
733 | "a CPU number"); | |
0cfd6a9a LC |
734 | return; |
735 | } | |
736 | ||
737 | f = fopen(filename, "wb"); | |
738 | if (!f) { | |
618da851 | 739 | error_setg_file_open(errp, errno, filename); |
0cfd6a9a LC |
740 | return; |
741 | } | |
742 | ||
743 | while (size != 0) { | |
744 | l = sizeof(buf); | |
745 | if (l > size) | |
746 | l = size; | |
2f4d0f59 | 747 | if (cpu_memory_rw_debug(cpu, addr, buf, l, 0) != 0) { |
0dc9daf0 BP |
748 | error_setg(errp, "Invalid addr 0x%016" PRIx64 "/size %" PRId64 |
749 | " specified", orig_addr, orig_size); | |
2f4d0f59 AK |
750 | goto exit; |
751 | } | |
0cfd6a9a | 752 | if (fwrite(buf, 1, l, f) != l) { |
c6bd8c70 | 753 | error_setg(errp, QERR_IO_ERROR); |
0cfd6a9a LC |
754 | goto exit; |
755 | } | |
756 | addr += l; | |
757 | size -= l; | |
758 | } | |
759 | ||
760 | exit: | |
761 | fclose(f); | |
762 | } | |
6d3962bf LC |
763 | |
764 | void qmp_pmemsave(int64_t addr, int64_t size, const char *filename, | |
765 | Error **errp) | |
766 | { | |
767 | FILE *f; | |
768 | uint32_t l; | |
769 | uint8_t buf[1024]; | |
770 | ||
771 | f = fopen(filename, "wb"); | |
772 | if (!f) { | |
618da851 | 773 | error_setg_file_open(errp, errno, filename); |
6d3962bf LC |
774 | return; |
775 | } | |
776 | ||
777 | while (size != 0) { | |
778 | l = sizeof(buf); | |
779 | if (l > size) | |
780 | l = size; | |
eb6282f2 | 781 | cpu_physical_memory_read(addr, buf, l); |
6d3962bf | 782 | if (fwrite(buf, 1, l, f) != l) { |
c6bd8c70 | 783 | error_setg(errp, QERR_IO_ERROR); |
6d3962bf LC |
784 | goto exit; |
785 | } | |
786 | addr += l; | |
787 | size -= l; | |
788 | } | |
789 | ||
790 | exit: | |
791 | fclose(f); | |
792 | } | |
ab49ab5c LC |
793 | |
794 | void qmp_inject_nmi(Error **errp) | |
795 | { | |
947e4744 | 796 | nmi_monitor_handle(monitor_get_cpu_index(monitor_cur()), errp); |
ab49ab5c | 797 | } |
27498bef | 798 |