]> git.proxmox.com Git - mirror_qemu.git/blame - cpus.c
Rename target_phys_addr_t to hwaddr
[mirror_qemu.git] / cpus.c
CommitLineData
296af7c9
BS
1/*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25/* Needed early for CONFIG_BSD etc. */
26#include "config-host.h"
27
28#include "monitor.h"
29#include "sysemu.h"
30#include "gdbstub.h"
31#include "dma.h"
32#include "kvm.h"
de0b36b6 33#include "qmp-commands.h"
296af7c9 34
96284e89 35#include "qemu-thread.h"
296af7c9 36#include "cpus.h"
8156be56 37#include "qtest.h"
44a9b356 38#include "main-loop.h"
ee785fed 39#include "bitmap.h"
0ff0fc19
JK
40
41#ifndef _WIN32
a8486bc9 42#include "compatfd.h"
0ff0fc19 43#endif
296af7c9 44
6d9cb73c
JK
45#ifdef CONFIG_LINUX
46
47#include <sys/prctl.h>
48
c0532a76
MT
49#ifndef PR_MCE_KILL
50#define PR_MCE_KILL 33
51#endif
52
6d9cb73c
JK
53#ifndef PR_MCE_KILL_SET
54#define PR_MCE_KILL_SET 1
55#endif
56
57#ifndef PR_MCE_KILL_EARLY
58#define PR_MCE_KILL_EARLY 1
59#endif
60
61#endif /* CONFIG_LINUX */
62
9349b4f9 63static CPUArchState *next_cpu;
296af7c9 64
ac873f1e
PM
65static bool cpu_thread_is_idle(CPUArchState *env)
66{
67 if (env->stop || env->queued_work_first) {
68 return false;
69 }
70 if (env->stopped || !runstate_is_running()) {
71 return true;
72 }
7ae26bd4
PM
73 if (!env->halted || qemu_cpu_has_work(env) ||
74 kvm_async_interrupts_enabled()) {
ac873f1e
PM
75 return false;
76 }
77 return true;
78}
79
80static bool all_cpu_threads_idle(void)
81{
82 CPUArchState *env;
83
84 for (env = first_cpu; env != NULL; env = env->next_cpu) {
85 if (!cpu_thread_is_idle(env)) {
86 return false;
87 }
88 }
89 return true;
90}
91
946fb27c
PB
92/***********************************************************/
93/* guest cycle counter */
94
95/* Conversion factor from emulated instructions to virtual clock ticks. */
96static int icount_time_shift;
97/* Arbitrarily pick 1MIPS as the minimum allowable speed. */
98#define MAX_ICOUNT_SHIFT 10
99/* Compensate for varying guest execution speed. */
100static int64_t qemu_icount_bias;
101static QEMUTimer *icount_rt_timer;
102static QEMUTimer *icount_vm_timer;
103static QEMUTimer *icount_warp_timer;
104static int64_t vm_clock_warp_start;
105static int64_t qemu_icount;
106
107typedef struct TimersState {
108 int64_t cpu_ticks_prev;
109 int64_t cpu_ticks_offset;
110 int64_t cpu_clock_offset;
111 int32_t cpu_ticks_enabled;
112 int64_t dummy;
113} TimersState;
114
115TimersState timers_state;
116
117/* Return the virtual CPU time, based on the instruction counter. */
118int64_t cpu_get_icount(void)
119{
120 int64_t icount;
9349b4f9 121 CPUArchState *env = cpu_single_env;
946fb27c
PB
122
123 icount = qemu_icount;
124 if (env) {
125 if (!can_do_io(env)) {
126 fprintf(stderr, "Bad clock read\n");
127 }
128 icount -= (env->icount_decr.u16.low + env->icount_extra);
129 }
130 return qemu_icount_bias + (icount << icount_time_shift);
131}
132
133/* return the host CPU cycle counter and handle stop/restart */
134int64_t cpu_get_ticks(void)
135{
136 if (use_icount) {
137 return cpu_get_icount();
138 }
139 if (!timers_state.cpu_ticks_enabled) {
140 return timers_state.cpu_ticks_offset;
141 } else {
142 int64_t ticks;
143 ticks = cpu_get_real_ticks();
144 if (timers_state.cpu_ticks_prev > ticks) {
145 /* Note: non increasing ticks may happen if the host uses
146 software suspend */
147 timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
148 }
149 timers_state.cpu_ticks_prev = ticks;
150 return ticks + timers_state.cpu_ticks_offset;
151 }
152}
153
154/* return the host CPU monotonic timer and handle stop/restart */
155int64_t cpu_get_clock(void)
156{
157 int64_t ti;
158 if (!timers_state.cpu_ticks_enabled) {
159 return timers_state.cpu_clock_offset;
160 } else {
161 ti = get_clock();
162 return ti + timers_state.cpu_clock_offset;
163 }
164}
165
166/* enable cpu_get_ticks() */
167void cpu_enable_ticks(void)
168{
169 if (!timers_state.cpu_ticks_enabled) {
170 timers_state.cpu_ticks_offset -= cpu_get_real_ticks();
171 timers_state.cpu_clock_offset -= get_clock();
172 timers_state.cpu_ticks_enabled = 1;
173 }
174}
175
176/* disable cpu_get_ticks() : the clock is stopped. You must not call
177 cpu_get_ticks() after that. */
178void cpu_disable_ticks(void)
179{
180 if (timers_state.cpu_ticks_enabled) {
181 timers_state.cpu_ticks_offset = cpu_get_ticks();
182 timers_state.cpu_clock_offset = cpu_get_clock();
183 timers_state.cpu_ticks_enabled = 0;
184 }
185}
186
187/* Correlation between real and virtual time is always going to be
188 fairly approximate, so ignore small variation.
189 When the guest is idle real and virtual time will be aligned in
190 the IO wait loop. */
191#define ICOUNT_WOBBLE (get_ticks_per_sec() / 10)
192
193static void icount_adjust(void)
194{
195 int64_t cur_time;
196 int64_t cur_icount;
197 int64_t delta;
198 static int64_t last_delta;
199 /* If the VM is not running, then do nothing. */
200 if (!runstate_is_running()) {
201 return;
202 }
203 cur_time = cpu_get_clock();
204 cur_icount = qemu_get_clock_ns(vm_clock);
205 delta = cur_icount - cur_time;
206 /* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */
207 if (delta > 0
208 && last_delta + ICOUNT_WOBBLE < delta * 2
209 && icount_time_shift > 0) {
210 /* The guest is getting too far ahead. Slow time down. */
211 icount_time_shift--;
212 }
213 if (delta < 0
214 && last_delta - ICOUNT_WOBBLE > delta * 2
215 && icount_time_shift < MAX_ICOUNT_SHIFT) {
216 /* The guest is getting too far behind. Speed time up. */
217 icount_time_shift++;
218 }
219 last_delta = delta;
220 qemu_icount_bias = cur_icount - (qemu_icount << icount_time_shift);
221}
222
223static void icount_adjust_rt(void *opaque)
224{
225 qemu_mod_timer(icount_rt_timer,
226 qemu_get_clock_ms(rt_clock) + 1000);
227 icount_adjust();
228}
229
230static void icount_adjust_vm(void *opaque)
231{
232 qemu_mod_timer(icount_vm_timer,
233 qemu_get_clock_ns(vm_clock) + get_ticks_per_sec() / 10);
234 icount_adjust();
235}
236
237static int64_t qemu_icount_round(int64_t count)
238{
239 return (count + (1 << icount_time_shift) - 1) >> icount_time_shift;
240}
241
242static void icount_warp_rt(void *opaque)
243{
244 if (vm_clock_warp_start == -1) {
245 return;
246 }
247
248 if (runstate_is_running()) {
249 int64_t clock = qemu_get_clock_ns(rt_clock);
250 int64_t warp_delta = clock - vm_clock_warp_start;
251 if (use_icount == 1) {
252 qemu_icount_bias += warp_delta;
253 } else {
254 /*
255 * In adaptive mode, do not let the vm_clock run too
256 * far ahead of real time.
257 */
258 int64_t cur_time = cpu_get_clock();
259 int64_t cur_icount = qemu_get_clock_ns(vm_clock);
260 int64_t delta = cur_time - cur_icount;
261 qemu_icount_bias += MIN(warp_delta, delta);
262 }
263 if (qemu_clock_expired(vm_clock)) {
264 qemu_notify_event();
265 }
266 }
267 vm_clock_warp_start = -1;
268}
269
8156be56
PB
270void qtest_clock_warp(int64_t dest)
271{
272 int64_t clock = qemu_get_clock_ns(vm_clock);
273 assert(qtest_enabled());
274 while (clock < dest) {
275 int64_t deadline = qemu_clock_deadline(vm_clock);
276 int64_t warp = MIN(dest - clock, deadline);
277 qemu_icount_bias += warp;
278 qemu_run_timers(vm_clock);
279 clock = qemu_get_clock_ns(vm_clock);
280 }
281 qemu_notify_event();
282}
283
946fb27c
PB
284void qemu_clock_warp(QEMUClock *clock)
285{
286 int64_t deadline;
287
288 /*
289 * There are too many global variables to make the "warp" behavior
290 * applicable to other clocks. But a clock argument removes the
291 * need for if statements all over the place.
292 */
293 if (clock != vm_clock || !use_icount) {
294 return;
295 }
296
297 /*
298 * If the CPUs have been sleeping, advance the vm_clock timer now. This
299 * ensures that the deadline for the timer is computed correctly below.
300 * This also makes sure that the insn counter is synchronized before the
301 * CPU starts running, in case the CPU is woken by an event other than
302 * the earliest vm_clock timer.
303 */
304 icount_warp_rt(NULL);
305 if (!all_cpu_threads_idle() || !qemu_clock_has_timers(vm_clock)) {
306 qemu_del_timer(icount_warp_timer);
307 return;
308 }
309
8156be56
PB
310 if (qtest_enabled()) {
311 /* When testing, qtest commands advance icount. */
312 return;
313 }
314
946fb27c
PB
315 vm_clock_warp_start = qemu_get_clock_ns(rt_clock);
316 deadline = qemu_clock_deadline(vm_clock);
317 if (deadline > 0) {
318 /*
319 * Ensure the vm_clock proceeds even when the virtual CPU goes to
320 * sleep. Otherwise, the CPU might be waiting for a future timer
321 * interrupt to wake it up, but the interrupt never comes because
322 * the vCPU isn't running any insns and thus doesn't advance the
323 * vm_clock.
324 *
325 * An extreme solution for this problem would be to never let VCPUs
326 * sleep in icount mode if there is a pending vm_clock timer; rather
327 * time could just advance to the next vm_clock event. Instead, we
328 * do stop VCPUs and only advance vm_clock after some "real" time,
329 * (related to the time left until the next event) has passed. This
330 * rt_clock timer will do this. This avoids that the warps are too
331 * visible externally---for example, you will not be sending network
07f35073 332 * packets continuously instead of every 100ms.
946fb27c
PB
333 */
334 qemu_mod_timer(icount_warp_timer, vm_clock_warp_start + deadline);
335 } else {
336 qemu_notify_event();
337 }
338}
339
340static const VMStateDescription vmstate_timers = {
341 .name = "timer",
342 .version_id = 2,
343 .minimum_version_id = 1,
344 .minimum_version_id_old = 1,
345 .fields = (VMStateField[]) {
346 VMSTATE_INT64(cpu_ticks_offset, TimersState),
347 VMSTATE_INT64(dummy, TimersState),
348 VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
349 VMSTATE_END_OF_LIST()
350 }
351};
352
353void configure_icount(const char *option)
354{
355 vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
356 if (!option) {
357 return;
358 }
359
360 icount_warp_timer = qemu_new_timer_ns(rt_clock, icount_warp_rt, NULL);
361 if (strcmp(option, "auto") != 0) {
362 icount_time_shift = strtol(option, NULL, 0);
363 use_icount = 1;
364 return;
365 }
366
367 use_icount = 2;
368
369 /* 125MIPS seems a reasonable initial guess at the guest speed.
370 It will be corrected fairly quickly anyway. */
371 icount_time_shift = 3;
372
373 /* Have both realtime and virtual time triggers for speed adjustment.
374 The realtime trigger catches emulated time passing too slowly,
375 the virtual time trigger catches emulated time passing too fast.
376 Realtime triggers occur even when idle, so use them less frequently
377 than VM triggers. */
378 icount_rt_timer = qemu_new_timer_ms(rt_clock, icount_adjust_rt, NULL);
379 qemu_mod_timer(icount_rt_timer,
380 qemu_get_clock_ms(rt_clock) + 1000);
381 icount_vm_timer = qemu_new_timer_ns(vm_clock, icount_adjust_vm, NULL);
382 qemu_mod_timer(icount_vm_timer,
383 qemu_get_clock_ns(vm_clock) + get_ticks_per_sec() / 10);
384}
385
296af7c9
BS
386/***********************************************************/
387void hw_error(const char *fmt, ...)
388{
389 va_list ap;
9349b4f9 390 CPUArchState *env;
296af7c9
BS
391
392 va_start(ap, fmt);
393 fprintf(stderr, "qemu: hardware error: ");
394 vfprintf(stderr, fmt, ap);
395 fprintf(stderr, "\n");
396 for(env = first_cpu; env != NULL; env = env->next_cpu) {
397 fprintf(stderr, "CPU #%d:\n", env->cpu_index);
6fd2a026 398 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU);
296af7c9
BS
399 }
400 va_end(ap);
401 abort();
402}
403
404void cpu_synchronize_all_states(void)
405{
9349b4f9 406 CPUArchState *cpu;
296af7c9
BS
407
408 for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
409 cpu_synchronize_state(cpu);
410 }
411}
412
413void cpu_synchronize_all_post_reset(void)
414{
9349b4f9 415 CPUArchState *cpu;
296af7c9
BS
416
417 for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
418 cpu_synchronize_post_reset(cpu);
419 }
420}
421
422void cpu_synchronize_all_post_init(void)
423{
9349b4f9 424 CPUArchState *cpu;
296af7c9
BS
425
426 for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
427 cpu_synchronize_post_init(cpu);
428 }
429}
430
9349b4f9 431int cpu_is_stopped(CPUArchState *env)
3ae9501c 432{
1354869c 433 return !runstate_is_running() || env->stopped;
3ae9501c
MT
434}
435
1dfb4dd9 436static void do_vm_stop(RunState state)
296af7c9 437{
1354869c 438 if (runstate_is_running()) {
296af7c9 439 cpu_disable_ticks();
296af7c9 440 pause_all_vcpus();
f5bbfba1 441 runstate_set(state);
1dfb4dd9 442 vm_state_notify(0, state);
922453bc 443 bdrv_drain_all();
55df6f33 444 bdrv_flush_all();
296af7c9
BS
445 monitor_protocol_event(QEVENT_STOP, NULL);
446 }
447}
448
9349b4f9 449static int cpu_can_run(CPUArchState *env)
296af7c9 450{
0ab07c62 451 if (env->stop) {
296af7c9 452 return 0;
0ab07c62 453 }
1354869c 454 if (env->stopped || !runstate_is_running()) {
296af7c9 455 return 0;
0ab07c62 456 }
296af7c9
BS
457 return 1;
458}
459
9349b4f9 460static void cpu_handle_guest_debug(CPUArchState *env)
83f338f7 461{
3c638d06 462 gdb_set_stop_cpu(env);
8cf71710 463 qemu_system_debug_request();
83f338f7 464 env->stopped = 1;
3c638d06
JK
465}
466
714bd040
PB
467static void cpu_signal(int sig)
468{
469 if (cpu_single_env) {
470 cpu_exit(cpu_single_env);
471 }
472 exit_request = 1;
473}
714bd040 474
6d9cb73c
JK
475#ifdef CONFIG_LINUX
476static void sigbus_reraise(void)
477{
478 sigset_t set;
479 struct sigaction action;
480
481 memset(&action, 0, sizeof(action));
482 action.sa_handler = SIG_DFL;
483 if (!sigaction(SIGBUS, &action, NULL)) {
484 raise(SIGBUS);
485 sigemptyset(&set);
486 sigaddset(&set, SIGBUS);
487 sigprocmask(SIG_UNBLOCK, &set, NULL);
488 }
489 perror("Failed to re-raise SIGBUS!\n");
490 abort();
491}
492
493static void sigbus_handler(int n, struct qemu_signalfd_siginfo *siginfo,
494 void *ctx)
495{
496 if (kvm_on_sigbus(siginfo->ssi_code,
497 (void *)(intptr_t)siginfo->ssi_addr)) {
498 sigbus_reraise();
499 }
500}
501
502static void qemu_init_sigbus(void)
503{
504 struct sigaction action;
505
506 memset(&action, 0, sizeof(action));
507 action.sa_flags = SA_SIGINFO;
508 action.sa_sigaction = (void (*)(int, siginfo_t*, void*))sigbus_handler;
509 sigaction(SIGBUS, &action, NULL);
510
511 prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0);
512}
513
9349b4f9 514static void qemu_kvm_eat_signals(CPUArchState *env)
1ab3c6c0
JK
515{
516 struct timespec ts = { 0, 0 };
517 siginfo_t siginfo;
518 sigset_t waitset;
519 sigset_t chkset;
520 int r;
521
522 sigemptyset(&waitset);
523 sigaddset(&waitset, SIG_IPI);
524 sigaddset(&waitset, SIGBUS);
525
526 do {
527 r = sigtimedwait(&waitset, &siginfo, &ts);
528 if (r == -1 && !(errno == EAGAIN || errno == EINTR)) {
529 perror("sigtimedwait");
530 exit(1);
531 }
532
533 switch (r) {
534 case SIGBUS:
535 if (kvm_on_sigbus_vcpu(env, siginfo.si_code, siginfo.si_addr)) {
536 sigbus_reraise();
537 }
538 break;
539 default:
540 break;
541 }
542
543 r = sigpending(&chkset);
544 if (r == -1) {
545 perror("sigpending");
546 exit(1);
547 }
548 } while (sigismember(&chkset, SIG_IPI) || sigismember(&chkset, SIGBUS));
1ab3c6c0
JK
549}
550
6d9cb73c
JK
551#else /* !CONFIG_LINUX */
552
553static void qemu_init_sigbus(void)
554{
555}
1ab3c6c0 556
9349b4f9 557static void qemu_kvm_eat_signals(CPUArchState *env)
1ab3c6c0
JK
558{
559}
6d9cb73c
JK
560#endif /* !CONFIG_LINUX */
561
296af7c9 562#ifndef _WIN32
55f8d6ac
JK
563static void dummy_signal(int sig)
564{
565}
55f8d6ac 566
9349b4f9 567static void qemu_kvm_init_cpu_signals(CPUArchState *env)
714bd040
PB
568{
569 int r;
570 sigset_t set;
571 struct sigaction sigact;
572
573 memset(&sigact, 0, sizeof(sigact));
574 sigact.sa_handler = dummy_signal;
575 sigaction(SIG_IPI, &sigact, NULL);
576
714bd040
PB
577 pthread_sigmask(SIG_BLOCK, NULL, &set);
578 sigdelset(&set, SIG_IPI);
714bd040
PB
579 sigdelset(&set, SIGBUS);
580 r = kvm_set_signal_mask(env, &set);
581 if (r) {
582 fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
583 exit(1);
584 }
585}
586
587static void qemu_tcg_init_cpu_signals(void)
588{
714bd040
PB
589 sigset_t set;
590 struct sigaction sigact;
591
592 memset(&sigact, 0, sizeof(sigact));
593 sigact.sa_handler = cpu_signal;
594 sigaction(SIG_IPI, &sigact, NULL);
595
596 sigemptyset(&set);
597 sigaddset(&set, SIG_IPI);
598 pthread_sigmask(SIG_UNBLOCK, &set, NULL);
714bd040
PB
599}
600
55f8d6ac 601#else /* _WIN32 */
9349b4f9 602static void qemu_kvm_init_cpu_signals(CPUArchState *env)
ff48eb5f 603{
714bd040
PB
604 abort();
605}
ff48eb5f 606
714bd040
PB
607static void qemu_tcg_init_cpu_signals(void)
608{
ff48eb5f 609}
714bd040 610#endif /* _WIN32 */
ff48eb5f 611
b2532d88 612static QemuMutex qemu_global_mutex;
46daff13
PB
613static QemuCond qemu_io_proceeded_cond;
614static bool iothread_requesting_mutex;
296af7c9
BS
615
616static QemuThread io_thread;
617
618static QemuThread *tcg_cpu_thread;
619static QemuCond *tcg_halt_cond;
620
296af7c9
BS
621/* cpu creation */
622static QemuCond qemu_cpu_cond;
623/* system init */
296af7c9 624static QemuCond qemu_pause_cond;
e82bcec2 625static QemuCond qemu_work_cond;
296af7c9 626
d3b12f5d 627void qemu_init_cpu_loop(void)
296af7c9 628{
6d9cb73c 629 qemu_init_sigbus();
ed94592b 630 qemu_cond_init(&qemu_cpu_cond);
ed94592b
AL
631 qemu_cond_init(&qemu_pause_cond);
632 qemu_cond_init(&qemu_work_cond);
46daff13 633 qemu_cond_init(&qemu_io_proceeded_cond);
296af7c9 634 qemu_mutex_init(&qemu_global_mutex);
296af7c9 635
b7680cb6 636 qemu_thread_get_self(&io_thread);
296af7c9
BS
637}
638
9349b4f9 639void run_on_cpu(CPUArchState *env, void (*func)(void *data), void *data)
e82bcec2
MT
640{
641 struct qemu_work_item wi;
642
b7680cb6 643 if (qemu_cpu_is_self(env)) {
e82bcec2
MT
644 func(data);
645 return;
646 }
647
648 wi.func = func;
649 wi.data = data;
0ab07c62 650 if (!env->queued_work_first) {
e82bcec2 651 env->queued_work_first = &wi;
0ab07c62 652 } else {
e82bcec2 653 env->queued_work_last->next = &wi;
0ab07c62 654 }
e82bcec2
MT
655 env->queued_work_last = &wi;
656 wi.next = NULL;
657 wi.done = false;
658
659 qemu_cpu_kick(env);
660 while (!wi.done) {
9349b4f9 661 CPUArchState *self_env = cpu_single_env;
e82bcec2
MT
662
663 qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex);
664 cpu_single_env = self_env;
665 }
666}
667
9349b4f9 668static void flush_queued_work(CPUArchState *env)
e82bcec2
MT
669{
670 struct qemu_work_item *wi;
671
0ab07c62 672 if (!env->queued_work_first) {
e82bcec2 673 return;
0ab07c62 674 }
e82bcec2
MT
675
676 while ((wi = env->queued_work_first)) {
677 env->queued_work_first = wi->next;
678 wi->func(wi->data);
679 wi->done = true;
680 }
681 env->queued_work_last = NULL;
682 qemu_cond_broadcast(&qemu_work_cond);
683}
684
9349b4f9 685static void qemu_wait_io_event_common(CPUArchState *env)
296af7c9 686{
216fc9a4
AF
687 CPUState *cpu = ENV_GET_CPU(env);
688
296af7c9
BS
689 if (env->stop) {
690 env->stop = 0;
691 env->stopped = 1;
692 qemu_cond_signal(&qemu_pause_cond);
693 }
e82bcec2 694 flush_queued_work(env);
216fc9a4 695 cpu->thread_kicked = false;
296af7c9
BS
696}
697
6cabe1f3 698static void qemu_tcg_wait_io_event(void)
296af7c9 699{
9349b4f9 700 CPUArchState *env;
6cabe1f3 701
16400322 702 while (all_cpu_threads_idle()) {
ab33fcda
PB
703 /* Start accounting real time to the virtual clock if the CPUs
704 are idle. */
705 qemu_clock_warp(vm_clock);
9705fbb5 706 qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
16400322 707 }
296af7c9 708
46daff13
PB
709 while (iothread_requesting_mutex) {
710 qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex);
711 }
6cabe1f3
JK
712
713 for (env = first_cpu; env != NULL; env = env->next_cpu) {
714 qemu_wait_io_event_common(env);
715 }
296af7c9
BS
716}
717
9349b4f9 718static void qemu_kvm_wait_io_event(CPUArchState *env)
296af7c9 719{
16400322 720 while (cpu_thread_is_idle(env)) {
9705fbb5 721 qemu_cond_wait(env->halt_cond, &qemu_global_mutex);
16400322 722 }
296af7c9 723
5db5bdac 724 qemu_kvm_eat_signals(env);
296af7c9
BS
725 qemu_wait_io_event_common(env);
726}
727
7e97cd88 728static void *qemu_kvm_cpu_thread_fn(void *arg)
296af7c9 729{
9349b4f9 730 CPUArchState *env = arg;
814e612e 731 CPUState *cpu = ENV_GET_CPU(env);
84b4915d 732 int r;
296af7c9 733
6164e6d6 734 qemu_mutex_lock(&qemu_global_mutex);
814e612e 735 qemu_thread_get_self(cpu->thread);
dc7a09cf 736 env->thread_id = qemu_get_thread_id();
e479c207 737 cpu_single_env = env;
296af7c9 738
84b4915d
JK
739 r = kvm_init_vcpu(env);
740 if (r < 0) {
741 fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r));
742 exit(1);
743 }
296af7c9 744
55f8d6ac 745 qemu_kvm_init_cpu_signals(env);
296af7c9
BS
746
747 /* signal CPU creation */
296af7c9
BS
748 env->created = 1;
749 qemu_cond_signal(&qemu_cpu_cond);
750
296af7c9 751 while (1) {
0ab07c62 752 if (cpu_can_run(env)) {
6792a57b 753 r = kvm_cpu_exec(env);
83f338f7 754 if (r == EXCP_DEBUG) {
1009d2ed 755 cpu_handle_guest_debug(env);
83f338f7 756 }
0ab07c62 757 }
296af7c9
BS
758 qemu_kvm_wait_io_event(env);
759 }
760
761 return NULL;
762}
763
c7f0f3b1
AL
764static void *qemu_dummy_cpu_thread_fn(void *arg)
765{
766#ifdef _WIN32
767 fprintf(stderr, "qtest is not supported under Windows\n");
768 exit(1);
769#else
770 CPUArchState *env = arg;
814e612e 771 CPUState *cpu = ENV_GET_CPU(env);
c7f0f3b1
AL
772 sigset_t waitset;
773 int r;
774
775 qemu_mutex_lock_iothread();
814e612e 776 qemu_thread_get_self(cpu->thread);
c7f0f3b1
AL
777 env->thread_id = qemu_get_thread_id();
778
779 sigemptyset(&waitset);
780 sigaddset(&waitset, SIG_IPI);
781
782 /* signal CPU creation */
783 env->created = 1;
784 qemu_cond_signal(&qemu_cpu_cond);
785
786 cpu_single_env = env;
787 while (1) {
788 cpu_single_env = NULL;
789 qemu_mutex_unlock_iothread();
790 do {
791 int sig;
792 r = sigwait(&waitset, &sig);
793 } while (r == -1 && (errno == EAGAIN || errno == EINTR));
794 if (r == -1) {
795 perror("sigwait");
796 exit(1);
797 }
798 qemu_mutex_lock_iothread();
799 cpu_single_env = env;
800 qemu_wait_io_event_common(env);
801 }
802
803 return NULL;
804#endif
805}
806
bdb7ca67
JK
807static void tcg_exec_all(void);
808
7e97cd88 809static void *qemu_tcg_cpu_thread_fn(void *arg)
296af7c9 810{
9349b4f9 811 CPUArchState *env = arg;
814e612e 812 CPUState *cpu = ENV_GET_CPU(env);
296af7c9 813
55f8d6ac 814 qemu_tcg_init_cpu_signals();
814e612e 815 qemu_thread_get_self(cpu->thread);
296af7c9
BS
816
817 /* signal CPU creation */
818 qemu_mutex_lock(&qemu_global_mutex);
0ab07c62 819 for (env = first_cpu; env != NULL; env = env->next_cpu) {
dc7a09cf 820 env->thread_id = qemu_get_thread_id();
296af7c9 821 env->created = 1;
0ab07c62 822 }
296af7c9
BS
823 qemu_cond_signal(&qemu_cpu_cond);
824
fa7d1867
JK
825 /* wait for initial kick-off after machine start */
826 while (first_cpu->stopped) {
827 qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
8e564b4e
JK
828
829 /* process any pending work */
830 for (env = first_cpu; env != NULL; env = env->next_cpu) {
831 qemu_wait_io_event_common(env);
832 }
0ab07c62 833 }
296af7c9
BS
834
835 while (1) {
bdb7ca67 836 tcg_exec_all();
946fb27c 837 if (use_icount && qemu_clock_deadline(vm_clock) <= 0) {
3b2319a3
PB
838 qemu_notify_event();
839 }
6cabe1f3 840 qemu_tcg_wait_io_event();
296af7c9
BS
841 }
842
843 return NULL;
844}
845
9349b4f9 846static void qemu_cpu_kick_thread(CPUArchState *env)
cc015e9a 847{
814e612e 848 CPUState *cpu = ENV_GET_CPU(env);
cc015e9a
PB
849#ifndef _WIN32
850 int err;
851
814e612e 852 err = pthread_kill(cpu->thread->thread, SIG_IPI);
cc015e9a
PB
853 if (err) {
854 fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
855 exit(1);
856 }
857#else /* _WIN32 */
858 if (!qemu_cpu_is_self(env)) {
bcba2a72 859 SuspendThread(cpu->hThread);
cc015e9a 860 cpu_signal(0);
bcba2a72 861 ResumeThread(cpu->hThread);
cc015e9a
PB
862 }
863#endif
864}
865
296af7c9
BS
866void qemu_cpu_kick(void *_env)
867{
9349b4f9 868 CPUArchState *env = _env;
216fc9a4 869 CPUState *cpu = ENV_GET_CPU(env);
296af7c9 870
296af7c9 871 qemu_cond_broadcast(env->halt_cond);
216fc9a4 872 if (!tcg_enabled() && !cpu->thread_kicked) {
cc015e9a 873 qemu_cpu_kick_thread(env);
216fc9a4 874 cpu->thread_kicked = true;
aa2c364b 875 }
296af7c9
BS
876}
877
46d62fac 878void qemu_cpu_kick_self(void)
296af7c9 879{
b55c22c6 880#ifndef _WIN32
46d62fac 881 assert(cpu_single_env);
216fc9a4 882 CPUState *cpu_single_cpu = ENV_GET_CPU(cpu_single_env);
296af7c9 883
216fc9a4 884 if (!cpu_single_cpu->thread_kicked) {
cc015e9a 885 qemu_cpu_kick_thread(cpu_single_env);
216fc9a4 886 cpu_single_cpu->thread_kicked = true;
296af7c9 887 }
b55c22c6
PB
888#else
889 abort();
890#endif
296af7c9
BS
891}
892
b7680cb6 893int qemu_cpu_is_self(void *_env)
296af7c9 894{
9349b4f9 895 CPUArchState *env = _env;
814e612e 896 CPUState *cpu = ENV_GET_CPU(env);
a8486bc9 897
814e612e 898 return qemu_thread_is_self(cpu->thread);
296af7c9
BS
899}
900
aa723c23
JQ
901static bool qemu_in_vcpu_thread(void)
902{
903 return cpu_single_env && qemu_cpu_is_self(cpu_single_env);
904}
905
296af7c9
BS
906void qemu_mutex_lock_iothread(void)
907{
c7f0f3b1 908 if (!tcg_enabled()) {
296af7c9 909 qemu_mutex_lock(&qemu_global_mutex);
1a28cac3 910 } else {
46daff13 911 iothread_requesting_mutex = true;
1a28cac3 912 if (qemu_mutex_trylock(&qemu_global_mutex)) {
cc015e9a 913 qemu_cpu_kick_thread(first_cpu);
1a28cac3
MT
914 qemu_mutex_lock(&qemu_global_mutex);
915 }
46daff13
PB
916 iothread_requesting_mutex = false;
917 qemu_cond_broadcast(&qemu_io_proceeded_cond);
1a28cac3 918 }
296af7c9
BS
919}
920
921void qemu_mutex_unlock_iothread(void)
922{
923 qemu_mutex_unlock(&qemu_global_mutex);
924}
925
926static int all_vcpus_paused(void)
927{
9349b4f9 928 CPUArchState *penv = first_cpu;
296af7c9
BS
929
930 while (penv) {
0ab07c62 931 if (!penv->stopped) {
296af7c9 932 return 0;
0ab07c62 933 }
5207a5e0 934 penv = penv->next_cpu;
296af7c9
BS
935 }
936
937 return 1;
938}
939
940void pause_all_vcpus(void)
941{
9349b4f9 942 CPUArchState *penv = first_cpu;
296af7c9 943
a5c57d64 944 qemu_clock_enable(vm_clock, false);
296af7c9
BS
945 while (penv) {
946 penv->stop = 1;
296af7c9 947 qemu_cpu_kick(penv);
5207a5e0 948 penv = penv->next_cpu;
296af7c9
BS
949 }
950
aa723c23 951 if (qemu_in_vcpu_thread()) {
d798e974
JK
952 cpu_stop_current();
953 if (!kvm_enabled()) {
954 while (penv) {
955 penv->stop = 0;
956 penv->stopped = 1;
957 penv = penv->next_cpu;
958 }
959 return;
960 }
961 }
962
296af7c9 963 while (!all_vcpus_paused()) {
be7d6c57 964 qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
296af7c9
BS
965 penv = first_cpu;
966 while (penv) {
1fbb22e5 967 qemu_cpu_kick(penv);
5207a5e0 968 penv = penv->next_cpu;
296af7c9
BS
969 }
970 }
971}
972
973void resume_all_vcpus(void)
974{
9349b4f9 975 CPUArchState *penv = first_cpu;
296af7c9 976
47113ab6 977 qemu_clock_enable(vm_clock, true);
296af7c9
BS
978 while (penv) {
979 penv->stop = 0;
980 penv->stopped = 0;
296af7c9 981 qemu_cpu_kick(penv);
5207a5e0 982 penv = penv->next_cpu;
296af7c9
BS
983 }
984}
985
7e97cd88 986static void qemu_tcg_init_vcpu(void *_env)
296af7c9 987{
9349b4f9 988 CPUArchState *env = _env;
bcba2a72 989 CPUState *cpu = ENV_GET_CPU(env);
0ab07c62 990
296af7c9
BS
991 /* share a single thread for all cpus with TCG */
992 if (!tcg_cpu_thread) {
814e612e 993 cpu->thread = g_malloc0(sizeof(QemuThread));
7267c094 994 env->halt_cond = g_malloc0(sizeof(QemuCond));
296af7c9 995 qemu_cond_init(env->halt_cond);
fa7d1867 996 tcg_halt_cond = env->halt_cond;
814e612e 997 qemu_thread_create(cpu->thread, qemu_tcg_cpu_thread_fn, env,
1ecf47bf
PB
998 QEMU_THREAD_JOINABLE);
999#ifdef _WIN32
814e612e 1000 cpu->hThread = qemu_thread_get_handle(cpu->thread);
1ecf47bf 1001#endif
0ab07c62 1002 while (env->created == 0) {
18a85728 1003 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
0ab07c62 1004 }
814e612e 1005 tcg_cpu_thread = cpu->thread;
296af7c9 1006 } else {
814e612e 1007 cpu->thread = tcg_cpu_thread;
296af7c9
BS
1008 env->halt_cond = tcg_halt_cond;
1009 }
1010}
1011
9349b4f9 1012static void qemu_kvm_start_vcpu(CPUArchState *env)
296af7c9 1013{
814e612e
AF
1014 CPUState *cpu = ENV_GET_CPU(env);
1015
1016 cpu->thread = g_malloc0(sizeof(QemuThread));
7267c094 1017 env->halt_cond = g_malloc0(sizeof(QemuCond));
296af7c9 1018 qemu_cond_init(env->halt_cond);
814e612e 1019 qemu_thread_create(cpu->thread, qemu_kvm_cpu_thread_fn, env,
1ecf47bf 1020 QEMU_THREAD_JOINABLE);
0ab07c62 1021 while (env->created == 0) {
18a85728 1022 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
0ab07c62 1023 }
296af7c9
BS
1024}
1025
c7f0f3b1
AL
1026static void qemu_dummy_start_vcpu(CPUArchState *env)
1027{
814e612e
AF
1028 CPUState *cpu = ENV_GET_CPU(env);
1029
1030 cpu->thread = g_malloc0(sizeof(QemuThread));
c7f0f3b1
AL
1031 env->halt_cond = g_malloc0(sizeof(QemuCond));
1032 qemu_cond_init(env->halt_cond);
814e612e 1033 qemu_thread_create(cpu->thread, qemu_dummy_cpu_thread_fn, env,
c7f0f3b1
AL
1034 QEMU_THREAD_JOINABLE);
1035 while (env->created == 0) {
1036 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1037 }
1038}
1039
296af7c9
BS
1040void qemu_init_vcpu(void *_env)
1041{
9349b4f9 1042 CPUArchState *env = _env;
296af7c9
BS
1043
1044 env->nr_cores = smp_cores;
1045 env->nr_threads = smp_threads;
fa7d1867 1046 env->stopped = 1;
0ab07c62 1047 if (kvm_enabled()) {
7e97cd88 1048 qemu_kvm_start_vcpu(env);
c7f0f3b1 1049 } else if (tcg_enabled()) {
7e97cd88 1050 qemu_tcg_init_vcpu(env);
c7f0f3b1
AL
1051 } else {
1052 qemu_dummy_start_vcpu(env);
0ab07c62 1053 }
296af7c9
BS
1054}
1055
b4a3d965 1056void cpu_stop_current(void)
296af7c9 1057{
b4a3d965 1058 if (cpu_single_env) {
67bb172f 1059 cpu_single_env->stop = 0;
b4a3d965
JK
1060 cpu_single_env->stopped = 1;
1061 cpu_exit(cpu_single_env);
67bb172f 1062 qemu_cond_signal(&qemu_pause_cond);
b4a3d965 1063 }
296af7c9
BS
1064}
1065
1dfb4dd9 1066void vm_stop(RunState state)
296af7c9 1067{
aa723c23 1068 if (qemu_in_vcpu_thread()) {
1dfb4dd9 1069 qemu_system_vmstop_request(state);
296af7c9
BS
1070 /*
1071 * FIXME: should not return to device code in case
1072 * vm_stop() has been requested.
1073 */
b4a3d965 1074 cpu_stop_current();
296af7c9
BS
1075 return;
1076 }
1dfb4dd9 1077 do_vm_stop(state);
296af7c9
BS
1078}
1079
8a9236f1
LC
1080/* does a state transition even if the VM is already stopped,
1081 current state is forgotten forever */
1082void vm_stop_force_state(RunState state)
1083{
1084 if (runstate_is_running()) {
1085 vm_stop(state);
1086 } else {
1087 runstate_set(state);
1088 }
1089}
1090
9349b4f9 1091static int tcg_cpu_exec(CPUArchState *env)
296af7c9
BS
1092{
1093 int ret;
1094#ifdef CONFIG_PROFILER
1095 int64_t ti;
1096#endif
1097
1098#ifdef CONFIG_PROFILER
1099 ti = profile_getclock();
1100#endif
1101 if (use_icount) {
1102 int64_t count;
1103 int decr;
1104 qemu_icount -= (env->icount_decr.u16.low + env->icount_extra);
1105 env->icount_decr.u16.low = 0;
1106 env->icount_extra = 0;
946fb27c 1107 count = qemu_icount_round(qemu_clock_deadline(vm_clock));
296af7c9
BS
1108 qemu_icount += count;
1109 decr = (count > 0xffff) ? 0xffff : count;
1110 count -= decr;
1111 env->icount_decr.u16.low = decr;
1112 env->icount_extra = count;
1113 }
1114 ret = cpu_exec(env);
1115#ifdef CONFIG_PROFILER
1116 qemu_time += profile_getclock() - ti;
1117#endif
1118 if (use_icount) {
1119 /* Fold pending instructions back into the
1120 instruction counter, and clear the interrupt flag. */
1121 qemu_icount -= (env->icount_decr.u16.low
1122 + env->icount_extra);
1123 env->icount_decr.u32 = 0;
1124 env->icount_extra = 0;
1125 }
1126 return ret;
1127}
1128
bdb7ca67 1129static void tcg_exec_all(void)
296af7c9 1130{
9a36085b
JK
1131 int r;
1132
ab33fcda
PB
1133 /* Account partial waits to the vm_clock. */
1134 qemu_clock_warp(vm_clock);
1135
0ab07c62 1136 if (next_cpu == NULL) {
296af7c9 1137 next_cpu = first_cpu;
0ab07c62 1138 }
c629a4bc 1139 for (; next_cpu != NULL && !exit_request; next_cpu = next_cpu->next_cpu) {
9349b4f9 1140 CPUArchState *env = next_cpu;
296af7c9
BS
1141
1142 qemu_clock_enable(vm_clock,
345f4426 1143 (env->singlestep_enabled & SSTEP_NOTIMER) == 0);
296af7c9 1144
3c638d06 1145 if (cpu_can_run(env)) {
bdb7ca67 1146 r = tcg_cpu_exec(env);
9a36085b 1147 if (r == EXCP_DEBUG) {
1009d2ed 1148 cpu_handle_guest_debug(env);
3c638d06
JK
1149 break;
1150 }
df646dfd 1151 } else if (env->stop || env->stopped) {
296af7c9
BS
1152 break;
1153 }
1154 }
c629a4bc 1155 exit_request = 0;
296af7c9
BS
1156}
1157
1158void set_numa_modes(void)
1159{
9349b4f9 1160 CPUArchState *env;
296af7c9
BS
1161 int i;
1162
1163 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1164 for (i = 0; i < nb_numa_nodes; i++) {
ee785fed 1165 if (test_bit(env->cpu_index, node_cpumask[i])) {
296af7c9
BS
1166 env->numa_node = i;
1167 }
1168 }
1169 }
1170}
1171
1172void set_cpu_log(const char *optarg)
1173{
1174 int mask;
1175 const CPULogItem *item;
1176
1177 mask = cpu_str_to_log_mask(optarg);
1178 if (!mask) {
1179 printf("Log items (comma separated):\n");
1180 for (item = cpu_log_items; item->mask != 0; item++) {
1181 printf("%-10s %s\n", item->name, item->help);
1182 }
1183 exit(1);
1184 }
1185 cpu_set_log(mask);
1186}
29e922b6 1187
c235d738
MF
1188void set_cpu_log_filename(const char *optarg)
1189{
1190 cpu_set_log_filename(optarg);
1191}
1192
9a78eead 1193void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
262353cb
BS
1194{
1195 /* XXX: implement xxx_cpu_list for targets that still miss it */
e916cbf8
PM
1196#if defined(cpu_list)
1197 cpu_list(f, cpu_fprintf);
262353cb
BS
1198#endif
1199}
de0b36b6
LC
1200
1201CpuInfoList *qmp_query_cpus(Error **errp)
1202{
1203 CpuInfoList *head = NULL, *cur_item = NULL;
9349b4f9 1204 CPUArchState *env;
de0b36b6
LC
1205
1206 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1207 CpuInfoList *info;
1208
1209 cpu_synchronize_state(env);
1210
1211 info = g_malloc0(sizeof(*info));
1212 info->value = g_malloc0(sizeof(*info->value));
1213 info->value->CPU = env->cpu_index;
1214 info->value->current = (env == first_cpu);
1215 info->value->halted = env->halted;
1216 info->value->thread_id = env->thread_id;
1217#if defined(TARGET_I386)
1218 info->value->has_pc = true;
1219 info->value->pc = env->eip + env->segs[R_CS].base;
1220#elif defined(TARGET_PPC)
1221 info->value->has_nip = true;
1222 info->value->nip = env->nip;
1223#elif defined(TARGET_SPARC)
1224 info->value->has_pc = true;
1225 info->value->pc = env->pc;
1226 info->value->has_npc = true;
1227 info->value->npc = env->npc;
1228#elif defined(TARGET_MIPS)
1229 info->value->has_PC = true;
1230 info->value->PC = env->active_tc.PC;
1231#endif
1232
1233 /* XXX: waiting for the qapi to support GSList */
1234 if (!cur_item) {
1235 head = cur_item = info;
1236 } else {
1237 cur_item->next = info;
1238 cur_item = info;
1239 }
1240 }
1241
1242 return head;
1243}
0cfd6a9a
LC
1244
1245void qmp_memsave(int64_t addr, int64_t size, const char *filename,
1246 bool has_cpu, int64_t cpu_index, Error **errp)
1247{
1248 FILE *f;
1249 uint32_t l;
9349b4f9 1250 CPUArchState *env;
0cfd6a9a
LC
1251 uint8_t buf[1024];
1252
1253 if (!has_cpu) {
1254 cpu_index = 0;
1255 }
1256
1257 for (env = first_cpu; env; env = env->next_cpu) {
1258 if (cpu_index == env->cpu_index) {
1259 break;
1260 }
1261 }
1262
1263 if (env == NULL) {
1264 error_set(errp, QERR_INVALID_PARAMETER_VALUE, "cpu-index",
1265 "a CPU number");
1266 return;
1267 }
1268
1269 f = fopen(filename, "wb");
1270 if (!f) {
1271 error_set(errp, QERR_OPEN_FILE_FAILED, filename);
1272 return;
1273 }
1274
1275 while (size != 0) {
1276 l = sizeof(buf);
1277 if (l > size)
1278 l = size;
1279 cpu_memory_rw_debug(env, addr, buf, l, 0);
1280 if (fwrite(buf, 1, l, f) != l) {
1281 error_set(errp, QERR_IO_ERROR);
1282 goto exit;
1283 }
1284 addr += l;
1285 size -= l;
1286 }
1287
1288exit:
1289 fclose(f);
1290}
6d3962bf
LC
1291
1292void qmp_pmemsave(int64_t addr, int64_t size, const char *filename,
1293 Error **errp)
1294{
1295 FILE *f;
1296 uint32_t l;
1297 uint8_t buf[1024];
1298
1299 f = fopen(filename, "wb");
1300 if (!f) {
1301 error_set(errp, QERR_OPEN_FILE_FAILED, filename);
1302 return;
1303 }
1304
1305 while (size != 0) {
1306 l = sizeof(buf);
1307 if (l > size)
1308 l = size;
1309 cpu_physical_memory_rw(addr, buf, l, 0);
1310 if (fwrite(buf, 1, l, f) != l) {
1311 error_set(errp, QERR_IO_ERROR);
1312 goto exit;
1313 }
1314 addr += l;
1315 size -= l;
1316 }
1317
1318exit:
1319 fclose(f);
1320}
ab49ab5c
LC
1321
1322void qmp_inject_nmi(Error **errp)
1323{
1324#if defined(TARGET_I386)
9349b4f9 1325 CPUArchState *env;
ab49ab5c
LC
1326
1327 for (env = first_cpu; env != NULL; env = env->next_cpu) {
02c09195
JK
1328 if (!env->apic_state) {
1329 cpu_interrupt(env, CPU_INTERRUPT_NMI);
1330 } else {
1331 apic_deliver_nmi(env->apic_state);
1332 }
ab49ab5c
LC
1333 }
1334#else
1335 error_set(errp, QERR_UNSUPPORTED);
1336#endif
1337}