]> git.proxmox.com Git - mirror_qemu.git/blob - cpus.c
icount: print a warning if there is no more deadline in sleep=no mode
[mirror_qemu.git] / cpus.c
1 /*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 /* Needed early for CONFIG_BSD etc. */
26 #include "config-host.h"
27
28 #include "monitor/monitor.h"
29 #include "qapi/qmp/qerror.h"
30 #include "sysemu/sysemu.h"
31 #include "exec/gdbstub.h"
32 #include "sysemu/dma.h"
33 #include "sysemu/kvm.h"
34 #include "qmp-commands.h"
35
36 #include "qemu/thread.h"
37 #include "sysemu/cpus.h"
38 #include "sysemu/qtest.h"
39 #include "qemu/main-loop.h"
40 #include "qemu/bitmap.h"
41 #include "qemu/seqlock.h"
42 #include "qapi-event.h"
43 #include "hw/nmi.h"
44
45 #ifndef _WIN32
46 #include "qemu/compatfd.h"
47 #endif
48
49 #ifdef CONFIG_LINUX
50
51 #include <sys/prctl.h>
52
53 #ifndef PR_MCE_KILL
54 #define PR_MCE_KILL 33
55 #endif
56
57 #ifndef PR_MCE_KILL_SET
58 #define PR_MCE_KILL_SET 1
59 #endif
60
61 #ifndef PR_MCE_KILL_EARLY
62 #define PR_MCE_KILL_EARLY 1
63 #endif
64
65 #endif /* CONFIG_LINUX */
66
67 static CPUState *next_cpu;
68 int64_t max_delay;
69 int64_t max_advance;
70
71 bool cpu_is_stopped(CPUState *cpu)
72 {
73 return cpu->stopped || !runstate_is_running();
74 }
75
76 static bool cpu_thread_is_idle(CPUState *cpu)
77 {
78 if (cpu->stop || cpu->queued_work_first) {
79 return false;
80 }
81 if (cpu_is_stopped(cpu)) {
82 return true;
83 }
84 if (!cpu->halted || cpu_has_work(cpu) ||
85 kvm_halt_in_kernel()) {
86 return false;
87 }
88 return true;
89 }
90
91 static bool all_cpu_threads_idle(void)
92 {
93 CPUState *cpu;
94
95 CPU_FOREACH(cpu) {
96 if (!cpu_thread_is_idle(cpu)) {
97 return false;
98 }
99 }
100 return true;
101 }
102
103 /***********************************************************/
104 /* guest cycle counter */
105
106 /* Protected by TimersState seqlock */
107
108 static bool icount_sleep = true;
109 static int64_t vm_clock_warp_start = -1;
110 /* Conversion factor from emulated instructions to virtual clock ticks. */
111 static int icount_time_shift;
112 /* Arbitrarily pick 1MIPS as the minimum allowable speed. */
113 #define MAX_ICOUNT_SHIFT 10
114
115 static QEMUTimer *icount_rt_timer;
116 static QEMUTimer *icount_vm_timer;
117 static QEMUTimer *icount_warp_timer;
118
119 typedef struct TimersState {
120 /* Protected by BQL. */
121 int64_t cpu_ticks_prev;
122 int64_t cpu_ticks_offset;
123
124 /* cpu_clock_offset can be read out of BQL, so protect it with
125 * this lock.
126 */
127 QemuSeqLock vm_clock_seqlock;
128 int64_t cpu_clock_offset;
129 int32_t cpu_ticks_enabled;
130 int64_t dummy;
131
132 /* Compensate for varying guest execution speed. */
133 int64_t qemu_icount_bias;
134 /* Only written by TCG thread */
135 int64_t qemu_icount;
136 } TimersState;
137
138 static TimersState timers_state;
139
140 int64_t cpu_get_icount_raw(void)
141 {
142 int64_t icount;
143 CPUState *cpu = current_cpu;
144
145 icount = timers_state.qemu_icount;
146 if (cpu) {
147 if (!cpu_can_do_io(cpu)) {
148 fprintf(stderr, "Bad icount read\n");
149 exit(1);
150 }
151 icount -= (cpu->icount_decr.u16.low + cpu->icount_extra);
152 }
153 return icount;
154 }
155
156 /* Return the virtual CPU time, based on the instruction counter. */
157 static int64_t cpu_get_icount_locked(void)
158 {
159 int64_t icount = cpu_get_icount_raw();
160 return timers_state.qemu_icount_bias + cpu_icount_to_ns(icount);
161 }
162
163 int64_t cpu_get_icount(void)
164 {
165 int64_t icount;
166 unsigned start;
167
168 do {
169 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
170 icount = cpu_get_icount_locked();
171 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
172
173 return icount;
174 }
175
176 int64_t cpu_icount_to_ns(int64_t icount)
177 {
178 return icount << icount_time_shift;
179 }
180
181 /* return the host CPU cycle counter and handle stop/restart */
182 /* Caller must hold the BQL */
183 int64_t cpu_get_ticks(void)
184 {
185 int64_t ticks;
186
187 if (use_icount) {
188 return cpu_get_icount();
189 }
190
191 ticks = timers_state.cpu_ticks_offset;
192 if (timers_state.cpu_ticks_enabled) {
193 ticks += cpu_get_real_ticks();
194 }
195
196 if (timers_state.cpu_ticks_prev > ticks) {
197 /* Note: non increasing ticks may happen if the host uses
198 software suspend */
199 timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
200 ticks = timers_state.cpu_ticks_prev;
201 }
202
203 timers_state.cpu_ticks_prev = ticks;
204 return ticks;
205 }
206
207 static int64_t cpu_get_clock_locked(void)
208 {
209 int64_t ticks;
210
211 ticks = timers_state.cpu_clock_offset;
212 if (timers_state.cpu_ticks_enabled) {
213 ticks += get_clock();
214 }
215
216 return ticks;
217 }
218
219 /* return the host CPU monotonic timer and handle stop/restart */
220 int64_t cpu_get_clock(void)
221 {
222 int64_t ti;
223 unsigned start;
224
225 do {
226 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
227 ti = cpu_get_clock_locked();
228 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
229
230 return ti;
231 }
232
233 /* enable cpu_get_ticks()
234 * Caller must hold BQL which server as mutex for vm_clock_seqlock.
235 */
236 void cpu_enable_ticks(void)
237 {
238 /* Here, the really thing protected by seqlock is cpu_clock_offset. */
239 seqlock_write_lock(&timers_state.vm_clock_seqlock);
240 if (!timers_state.cpu_ticks_enabled) {
241 timers_state.cpu_ticks_offset -= cpu_get_real_ticks();
242 timers_state.cpu_clock_offset -= get_clock();
243 timers_state.cpu_ticks_enabled = 1;
244 }
245 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
246 }
247
248 /* disable cpu_get_ticks() : the clock is stopped. You must not call
249 * cpu_get_ticks() after that.
250 * Caller must hold BQL which server as mutex for vm_clock_seqlock.
251 */
252 void cpu_disable_ticks(void)
253 {
254 /* Here, the really thing protected by seqlock is cpu_clock_offset. */
255 seqlock_write_lock(&timers_state.vm_clock_seqlock);
256 if (timers_state.cpu_ticks_enabled) {
257 timers_state.cpu_ticks_offset += cpu_get_real_ticks();
258 timers_state.cpu_clock_offset = cpu_get_clock_locked();
259 timers_state.cpu_ticks_enabled = 0;
260 }
261 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
262 }
263
264 /* Correlation between real and virtual time is always going to be
265 fairly approximate, so ignore small variation.
266 When the guest is idle real and virtual time will be aligned in
267 the IO wait loop. */
268 #define ICOUNT_WOBBLE (get_ticks_per_sec() / 10)
269
270 static void icount_adjust(void)
271 {
272 int64_t cur_time;
273 int64_t cur_icount;
274 int64_t delta;
275
276 /* Protected by TimersState mutex. */
277 static int64_t last_delta;
278
279 /* If the VM is not running, then do nothing. */
280 if (!runstate_is_running()) {
281 return;
282 }
283
284 seqlock_write_lock(&timers_state.vm_clock_seqlock);
285 cur_time = cpu_get_clock_locked();
286 cur_icount = cpu_get_icount_locked();
287
288 delta = cur_icount - cur_time;
289 /* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */
290 if (delta > 0
291 && last_delta + ICOUNT_WOBBLE < delta * 2
292 && icount_time_shift > 0) {
293 /* The guest is getting too far ahead. Slow time down. */
294 icount_time_shift--;
295 }
296 if (delta < 0
297 && last_delta - ICOUNT_WOBBLE > delta * 2
298 && icount_time_shift < MAX_ICOUNT_SHIFT) {
299 /* The guest is getting too far behind. Speed time up. */
300 icount_time_shift++;
301 }
302 last_delta = delta;
303 timers_state.qemu_icount_bias = cur_icount
304 - (timers_state.qemu_icount << icount_time_shift);
305 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
306 }
307
308 static void icount_adjust_rt(void *opaque)
309 {
310 timer_mod(icount_rt_timer,
311 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
312 icount_adjust();
313 }
314
315 static void icount_adjust_vm(void *opaque)
316 {
317 timer_mod(icount_vm_timer,
318 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
319 get_ticks_per_sec() / 10);
320 icount_adjust();
321 }
322
323 static int64_t qemu_icount_round(int64_t count)
324 {
325 return (count + (1 << icount_time_shift) - 1) >> icount_time_shift;
326 }
327
328 static void icount_warp_rt(void *opaque)
329 {
330 /* The icount_warp_timer is rescheduled soon after vm_clock_warp_start
331 * changes from -1 to another value, so the race here is okay.
332 */
333 if (atomic_read(&vm_clock_warp_start) == -1) {
334 return;
335 }
336
337 seqlock_write_lock(&timers_state.vm_clock_seqlock);
338 if (runstate_is_running()) {
339 int64_t clock = cpu_get_clock_locked();
340 int64_t warp_delta;
341
342 warp_delta = clock - vm_clock_warp_start;
343 if (use_icount == 2) {
344 /*
345 * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too
346 * far ahead of real time.
347 */
348 int64_t cur_icount = cpu_get_icount_locked();
349 int64_t delta = clock - cur_icount;
350 warp_delta = MIN(warp_delta, delta);
351 }
352 timers_state.qemu_icount_bias += warp_delta;
353 }
354 vm_clock_warp_start = -1;
355 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
356
357 if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) {
358 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
359 }
360 }
361
362 void qtest_clock_warp(int64_t dest)
363 {
364 int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
365 AioContext *aio_context;
366 assert(qtest_enabled());
367 aio_context = qemu_get_aio_context();
368 while (clock < dest) {
369 int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
370 int64_t warp = qemu_soonest_timeout(dest - clock, deadline);
371
372 seqlock_write_lock(&timers_state.vm_clock_seqlock);
373 timers_state.qemu_icount_bias += warp;
374 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
375
376 qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
377 timerlist_run_timers(aio_context->tlg.tl[QEMU_CLOCK_VIRTUAL]);
378 clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
379 }
380 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
381 }
382
383 void qemu_clock_warp(QEMUClockType type)
384 {
385 int64_t clock;
386 int64_t deadline;
387
388 /*
389 * There are too many global variables to make the "warp" behavior
390 * applicable to other clocks. But a clock argument removes the
391 * need for if statements all over the place.
392 */
393 if (type != QEMU_CLOCK_VIRTUAL || !use_icount) {
394 return;
395 }
396
397 if (icount_sleep) {
398 /*
399 * If the CPUs have been sleeping, advance QEMU_CLOCK_VIRTUAL timer now.
400 * This ensures that the deadline for the timer is computed correctly
401 * below.
402 * This also makes sure that the insn counter is synchronized before
403 * the CPU starts running, in case the CPU is woken by an event other
404 * than the earliest QEMU_CLOCK_VIRTUAL timer.
405 */
406 icount_warp_rt(NULL);
407 timer_del(icount_warp_timer);
408 }
409 if (!all_cpu_threads_idle()) {
410 return;
411 }
412
413 if (qtest_enabled()) {
414 /* When testing, qtest commands advance icount. */
415 return;
416 }
417
418 /* We want to use the earliest deadline from ALL vm_clocks */
419 clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
420 deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
421 if (deadline < 0) {
422 static bool notified;
423 if (!icount_sleep && !notified) {
424 error_report("WARNING: icount sleep disabled and no active timers");
425 notified = true;
426 }
427 return;
428 }
429
430 if (deadline > 0) {
431 /*
432 * Ensure QEMU_CLOCK_VIRTUAL proceeds even when the virtual CPU goes to
433 * sleep. Otherwise, the CPU might be waiting for a future timer
434 * interrupt to wake it up, but the interrupt never comes because
435 * the vCPU isn't running any insns and thus doesn't advance the
436 * QEMU_CLOCK_VIRTUAL.
437 */
438 if (!icount_sleep) {
439 /*
440 * We never let VCPUs sleep in no sleep icount mode.
441 * If there is a pending QEMU_CLOCK_VIRTUAL timer we just advance
442 * to the next QEMU_CLOCK_VIRTUAL event and notify it.
443 * It is useful when we want a deterministic execution time,
444 * isolated from host latencies.
445 */
446 seqlock_write_lock(&timers_state.vm_clock_seqlock);
447 timers_state.qemu_icount_bias += deadline;
448 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
449 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
450 } else {
451 /*
452 * We do stop VCPUs and only advance QEMU_CLOCK_VIRTUAL after some
453 * "real" time, (related to the time left until the next event) has
454 * passed. The QEMU_CLOCK_VIRTUAL_RT clock will do this.
455 * This avoids that the warps are visible externally; for example,
456 * you will not be sending network packets continuously instead of
457 * every 100ms.
458 */
459 seqlock_write_lock(&timers_state.vm_clock_seqlock);
460 if (vm_clock_warp_start == -1 || vm_clock_warp_start > clock) {
461 vm_clock_warp_start = clock;
462 }
463 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
464 timer_mod_anticipate(icount_warp_timer, clock + deadline);
465 }
466 } else if (deadline == 0) {
467 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
468 }
469 }
470
471 static bool icount_state_needed(void *opaque)
472 {
473 return use_icount;
474 }
475
476 /*
477 * This is a subsection for icount migration.
478 */
479 static const VMStateDescription icount_vmstate_timers = {
480 .name = "timer/icount",
481 .version_id = 1,
482 .minimum_version_id = 1,
483 .fields = (VMStateField[]) {
484 VMSTATE_INT64(qemu_icount_bias, TimersState),
485 VMSTATE_INT64(qemu_icount, TimersState),
486 VMSTATE_END_OF_LIST()
487 }
488 };
489
490 static const VMStateDescription vmstate_timers = {
491 .name = "timer",
492 .version_id = 2,
493 .minimum_version_id = 1,
494 .fields = (VMStateField[]) {
495 VMSTATE_INT64(cpu_ticks_offset, TimersState),
496 VMSTATE_INT64(dummy, TimersState),
497 VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
498 VMSTATE_END_OF_LIST()
499 },
500 .subsections = (VMStateSubsection[]) {
501 {
502 .vmsd = &icount_vmstate_timers,
503 .needed = icount_state_needed,
504 }, {
505 /* empty */
506 }
507 }
508 };
509
510 void cpu_ticks_init(void)
511 {
512 seqlock_init(&timers_state.vm_clock_seqlock, NULL);
513 vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
514 }
515
516 void configure_icount(QemuOpts *opts, Error **errp)
517 {
518 const char *option;
519 char *rem_str = NULL;
520
521 option = qemu_opt_get(opts, "shift");
522 if (!option) {
523 if (qemu_opt_get(opts, "align") != NULL) {
524 error_setg(errp, "Please specify shift option when using align");
525 }
526 return;
527 }
528
529 icount_sleep = qemu_opt_get_bool(opts, "sleep", true);
530 if (icount_sleep) {
531 icount_warp_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
532 icount_warp_rt, NULL);
533 }
534
535 icount_align_option = qemu_opt_get_bool(opts, "align", false);
536
537 if (icount_align_option && !icount_sleep) {
538 error_setg(errp, "align=on and sleep=no are incompatible");
539 }
540 if (strcmp(option, "auto") != 0) {
541 errno = 0;
542 icount_time_shift = strtol(option, &rem_str, 0);
543 if (errno != 0 || *rem_str != '\0' || !strlen(option)) {
544 error_setg(errp, "icount: Invalid shift value");
545 }
546 use_icount = 1;
547 return;
548 } else if (icount_align_option) {
549 error_setg(errp, "shift=auto and align=on are incompatible");
550 } else if (!icount_sleep) {
551 error_setg(errp, "shift=auto and sleep=no are incompatible");
552 }
553
554 use_icount = 2;
555
556 /* 125MIPS seems a reasonable initial guess at the guest speed.
557 It will be corrected fairly quickly anyway. */
558 icount_time_shift = 3;
559
560 /* Have both realtime and virtual time triggers for speed adjustment.
561 The realtime trigger catches emulated time passing too slowly,
562 the virtual time trigger catches emulated time passing too fast.
563 Realtime triggers occur even when idle, so use them less frequently
564 than VM triggers. */
565 icount_rt_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL_RT,
566 icount_adjust_rt, NULL);
567 timer_mod(icount_rt_timer,
568 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
569 icount_vm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
570 icount_adjust_vm, NULL);
571 timer_mod(icount_vm_timer,
572 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
573 get_ticks_per_sec() / 10);
574 }
575
576 /***********************************************************/
577 void hw_error(const char *fmt, ...)
578 {
579 va_list ap;
580 CPUState *cpu;
581
582 va_start(ap, fmt);
583 fprintf(stderr, "qemu: hardware error: ");
584 vfprintf(stderr, fmt, ap);
585 fprintf(stderr, "\n");
586 CPU_FOREACH(cpu) {
587 fprintf(stderr, "CPU #%d:\n", cpu->cpu_index);
588 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU);
589 }
590 va_end(ap);
591 abort();
592 }
593
594 void cpu_synchronize_all_states(void)
595 {
596 CPUState *cpu;
597
598 CPU_FOREACH(cpu) {
599 cpu_synchronize_state(cpu);
600 }
601 }
602
603 void cpu_synchronize_all_post_reset(void)
604 {
605 CPUState *cpu;
606
607 CPU_FOREACH(cpu) {
608 cpu_synchronize_post_reset(cpu);
609 }
610 }
611
612 void cpu_synchronize_all_post_init(void)
613 {
614 CPUState *cpu;
615
616 CPU_FOREACH(cpu) {
617 cpu_synchronize_post_init(cpu);
618 }
619 }
620
621 void cpu_clean_all_dirty(void)
622 {
623 CPUState *cpu;
624
625 CPU_FOREACH(cpu) {
626 cpu_clean_state(cpu);
627 }
628 }
629
630 static int do_vm_stop(RunState state)
631 {
632 int ret = 0;
633
634 if (runstate_is_running()) {
635 cpu_disable_ticks();
636 pause_all_vcpus();
637 runstate_set(state);
638 vm_state_notify(0, state);
639 qapi_event_send_stop(&error_abort);
640 }
641
642 bdrv_drain_all();
643 ret = bdrv_flush_all();
644
645 return ret;
646 }
647
648 static bool cpu_can_run(CPUState *cpu)
649 {
650 if (cpu->stop) {
651 return false;
652 }
653 if (cpu_is_stopped(cpu)) {
654 return false;
655 }
656 return true;
657 }
658
659 static void cpu_handle_guest_debug(CPUState *cpu)
660 {
661 gdb_set_stop_cpu(cpu);
662 qemu_system_debug_request();
663 cpu->stopped = true;
664 }
665
666 static void cpu_signal(int sig)
667 {
668 if (current_cpu) {
669 cpu_exit(current_cpu);
670 }
671 exit_request = 1;
672 }
673
674 #ifdef CONFIG_LINUX
675 static void sigbus_reraise(void)
676 {
677 sigset_t set;
678 struct sigaction action;
679
680 memset(&action, 0, sizeof(action));
681 action.sa_handler = SIG_DFL;
682 if (!sigaction(SIGBUS, &action, NULL)) {
683 raise(SIGBUS);
684 sigemptyset(&set);
685 sigaddset(&set, SIGBUS);
686 sigprocmask(SIG_UNBLOCK, &set, NULL);
687 }
688 perror("Failed to re-raise SIGBUS!\n");
689 abort();
690 }
691
692 static void sigbus_handler(int n, struct qemu_signalfd_siginfo *siginfo,
693 void *ctx)
694 {
695 if (kvm_on_sigbus(siginfo->ssi_code,
696 (void *)(intptr_t)siginfo->ssi_addr)) {
697 sigbus_reraise();
698 }
699 }
700
701 static void qemu_init_sigbus(void)
702 {
703 struct sigaction action;
704
705 memset(&action, 0, sizeof(action));
706 action.sa_flags = SA_SIGINFO;
707 action.sa_sigaction = (void (*)(int, siginfo_t*, void*))sigbus_handler;
708 sigaction(SIGBUS, &action, NULL);
709
710 prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0);
711 }
712
713 static void qemu_kvm_eat_signals(CPUState *cpu)
714 {
715 struct timespec ts = { 0, 0 };
716 siginfo_t siginfo;
717 sigset_t waitset;
718 sigset_t chkset;
719 int r;
720
721 sigemptyset(&waitset);
722 sigaddset(&waitset, SIG_IPI);
723 sigaddset(&waitset, SIGBUS);
724
725 do {
726 r = sigtimedwait(&waitset, &siginfo, &ts);
727 if (r == -1 && !(errno == EAGAIN || errno == EINTR)) {
728 perror("sigtimedwait");
729 exit(1);
730 }
731
732 switch (r) {
733 case SIGBUS:
734 if (kvm_on_sigbus_vcpu(cpu, siginfo.si_code, siginfo.si_addr)) {
735 sigbus_reraise();
736 }
737 break;
738 default:
739 break;
740 }
741
742 r = sigpending(&chkset);
743 if (r == -1) {
744 perror("sigpending");
745 exit(1);
746 }
747 } while (sigismember(&chkset, SIG_IPI) || sigismember(&chkset, SIGBUS));
748 }
749
750 #else /* !CONFIG_LINUX */
751
752 static void qemu_init_sigbus(void)
753 {
754 }
755
756 static void qemu_kvm_eat_signals(CPUState *cpu)
757 {
758 }
759 #endif /* !CONFIG_LINUX */
760
761 #ifndef _WIN32
762 static void dummy_signal(int sig)
763 {
764 }
765
766 static void qemu_kvm_init_cpu_signals(CPUState *cpu)
767 {
768 int r;
769 sigset_t set;
770 struct sigaction sigact;
771
772 memset(&sigact, 0, sizeof(sigact));
773 sigact.sa_handler = dummy_signal;
774 sigaction(SIG_IPI, &sigact, NULL);
775
776 pthread_sigmask(SIG_BLOCK, NULL, &set);
777 sigdelset(&set, SIG_IPI);
778 sigdelset(&set, SIGBUS);
779 r = kvm_set_signal_mask(cpu, &set);
780 if (r) {
781 fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
782 exit(1);
783 }
784 }
785
786 static void qemu_tcg_init_cpu_signals(void)
787 {
788 sigset_t set;
789 struct sigaction sigact;
790
791 memset(&sigact, 0, sizeof(sigact));
792 sigact.sa_handler = cpu_signal;
793 sigaction(SIG_IPI, &sigact, NULL);
794
795 sigemptyset(&set);
796 sigaddset(&set, SIG_IPI);
797 pthread_sigmask(SIG_UNBLOCK, &set, NULL);
798 }
799
800 #else /* _WIN32 */
801 static void qemu_kvm_init_cpu_signals(CPUState *cpu)
802 {
803 abort();
804 }
805
806 static void qemu_tcg_init_cpu_signals(void)
807 {
808 }
809 #endif /* _WIN32 */
810
811 static QemuMutex qemu_global_mutex;
812 static QemuCond qemu_io_proceeded_cond;
813 static unsigned iothread_requesting_mutex;
814
815 static QemuThread io_thread;
816
817 static QemuThread *tcg_cpu_thread;
818 static QemuCond *tcg_halt_cond;
819
820 /* cpu creation */
821 static QemuCond qemu_cpu_cond;
822 /* system init */
823 static QemuCond qemu_pause_cond;
824 static QemuCond qemu_work_cond;
825
826 void qemu_init_cpu_loop(void)
827 {
828 qemu_init_sigbus();
829 qemu_cond_init(&qemu_cpu_cond);
830 qemu_cond_init(&qemu_pause_cond);
831 qemu_cond_init(&qemu_work_cond);
832 qemu_cond_init(&qemu_io_proceeded_cond);
833 qemu_mutex_init(&qemu_global_mutex);
834
835 qemu_thread_get_self(&io_thread);
836 }
837
838 void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
839 {
840 struct qemu_work_item wi;
841
842 if (qemu_cpu_is_self(cpu)) {
843 func(data);
844 return;
845 }
846
847 wi.func = func;
848 wi.data = data;
849 wi.free = false;
850 if (cpu->queued_work_first == NULL) {
851 cpu->queued_work_first = &wi;
852 } else {
853 cpu->queued_work_last->next = &wi;
854 }
855 cpu->queued_work_last = &wi;
856 wi.next = NULL;
857 wi.done = false;
858
859 qemu_cpu_kick(cpu);
860 while (!wi.done) {
861 CPUState *self_cpu = current_cpu;
862
863 qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex);
864 current_cpu = self_cpu;
865 }
866 }
867
868 void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
869 {
870 struct qemu_work_item *wi;
871
872 if (qemu_cpu_is_self(cpu)) {
873 func(data);
874 return;
875 }
876
877 wi = g_malloc0(sizeof(struct qemu_work_item));
878 wi->func = func;
879 wi->data = data;
880 wi->free = true;
881 if (cpu->queued_work_first == NULL) {
882 cpu->queued_work_first = wi;
883 } else {
884 cpu->queued_work_last->next = wi;
885 }
886 cpu->queued_work_last = wi;
887 wi->next = NULL;
888 wi->done = false;
889
890 qemu_cpu_kick(cpu);
891 }
892
893 static void flush_queued_work(CPUState *cpu)
894 {
895 struct qemu_work_item *wi;
896
897 if (cpu->queued_work_first == NULL) {
898 return;
899 }
900
901 while ((wi = cpu->queued_work_first)) {
902 cpu->queued_work_first = wi->next;
903 wi->func(wi->data);
904 wi->done = true;
905 if (wi->free) {
906 g_free(wi);
907 }
908 }
909 cpu->queued_work_last = NULL;
910 qemu_cond_broadcast(&qemu_work_cond);
911 }
912
913 static void qemu_wait_io_event_common(CPUState *cpu)
914 {
915 if (cpu->stop) {
916 cpu->stop = false;
917 cpu->stopped = true;
918 qemu_cond_signal(&qemu_pause_cond);
919 }
920 flush_queued_work(cpu);
921 cpu->thread_kicked = false;
922 }
923
924 static void qemu_tcg_wait_io_event(void)
925 {
926 CPUState *cpu;
927
928 while (all_cpu_threads_idle()) {
929 /* Start accounting real time to the virtual clock if the CPUs
930 are idle. */
931 qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
932 qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
933 }
934
935 while (iothread_requesting_mutex) {
936 qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex);
937 }
938
939 CPU_FOREACH(cpu) {
940 qemu_wait_io_event_common(cpu);
941 }
942 }
943
944 static void qemu_kvm_wait_io_event(CPUState *cpu)
945 {
946 while (cpu_thread_is_idle(cpu)) {
947 qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
948 }
949
950 qemu_kvm_eat_signals(cpu);
951 qemu_wait_io_event_common(cpu);
952 }
953
954 static void *qemu_kvm_cpu_thread_fn(void *arg)
955 {
956 CPUState *cpu = arg;
957 int r;
958
959 qemu_mutex_lock(&qemu_global_mutex);
960 qemu_thread_get_self(cpu->thread);
961 cpu->thread_id = qemu_get_thread_id();
962 cpu->can_do_io = 1;
963 current_cpu = cpu;
964
965 r = kvm_init_vcpu(cpu);
966 if (r < 0) {
967 fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r));
968 exit(1);
969 }
970
971 qemu_kvm_init_cpu_signals(cpu);
972
973 /* signal CPU creation */
974 cpu->created = true;
975 qemu_cond_signal(&qemu_cpu_cond);
976
977 while (1) {
978 if (cpu_can_run(cpu)) {
979 r = kvm_cpu_exec(cpu);
980 if (r == EXCP_DEBUG) {
981 cpu_handle_guest_debug(cpu);
982 }
983 }
984 qemu_kvm_wait_io_event(cpu);
985 }
986
987 return NULL;
988 }
989
990 static void *qemu_dummy_cpu_thread_fn(void *arg)
991 {
992 #ifdef _WIN32
993 fprintf(stderr, "qtest is not supported under Windows\n");
994 exit(1);
995 #else
996 CPUState *cpu = arg;
997 sigset_t waitset;
998 int r;
999
1000 qemu_mutex_lock_iothread();
1001 qemu_thread_get_self(cpu->thread);
1002 cpu->thread_id = qemu_get_thread_id();
1003 cpu->can_do_io = 1;
1004
1005 sigemptyset(&waitset);
1006 sigaddset(&waitset, SIG_IPI);
1007
1008 /* signal CPU creation */
1009 cpu->created = true;
1010 qemu_cond_signal(&qemu_cpu_cond);
1011
1012 current_cpu = cpu;
1013 while (1) {
1014 current_cpu = NULL;
1015 qemu_mutex_unlock_iothread();
1016 do {
1017 int sig;
1018 r = sigwait(&waitset, &sig);
1019 } while (r == -1 && (errno == EAGAIN || errno == EINTR));
1020 if (r == -1) {
1021 perror("sigwait");
1022 exit(1);
1023 }
1024 qemu_mutex_lock_iothread();
1025 current_cpu = cpu;
1026 qemu_wait_io_event_common(cpu);
1027 }
1028
1029 return NULL;
1030 #endif
1031 }
1032
1033 static void tcg_exec_all(void);
1034
1035 static void *qemu_tcg_cpu_thread_fn(void *arg)
1036 {
1037 CPUState *cpu = arg;
1038
1039 qemu_tcg_init_cpu_signals();
1040 qemu_thread_get_self(cpu->thread);
1041
1042 qemu_mutex_lock(&qemu_global_mutex);
1043 CPU_FOREACH(cpu) {
1044 cpu->thread_id = qemu_get_thread_id();
1045 cpu->created = true;
1046 cpu->can_do_io = 1;
1047 }
1048 qemu_cond_signal(&qemu_cpu_cond);
1049
1050 /* wait for initial kick-off after machine start */
1051 while (first_cpu->stopped) {
1052 qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
1053
1054 /* process any pending work */
1055 CPU_FOREACH(cpu) {
1056 qemu_wait_io_event_common(cpu);
1057 }
1058 }
1059
1060 /* process any pending work */
1061 exit_request = 1;
1062
1063 while (1) {
1064 tcg_exec_all();
1065
1066 if (use_icount) {
1067 int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
1068
1069 if (deadline == 0) {
1070 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
1071 }
1072 }
1073 qemu_tcg_wait_io_event();
1074 }
1075
1076 return NULL;
1077 }
1078
1079 static void qemu_cpu_kick_thread(CPUState *cpu)
1080 {
1081 #ifndef _WIN32
1082 int err;
1083
1084 err = pthread_kill(cpu->thread->thread, SIG_IPI);
1085 if (err) {
1086 fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
1087 exit(1);
1088 }
1089 #else /* _WIN32 */
1090 if (!qemu_cpu_is_self(cpu)) {
1091 CONTEXT tcgContext;
1092
1093 if (SuspendThread(cpu->hThread) == (DWORD)-1) {
1094 fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
1095 GetLastError());
1096 exit(1);
1097 }
1098
1099 /* On multi-core systems, we are not sure that the thread is actually
1100 * suspended until we can get the context.
1101 */
1102 tcgContext.ContextFlags = CONTEXT_CONTROL;
1103 while (GetThreadContext(cpu->hThread, &tcgContext) != 0) {
1104 continue;
1105 }
1106
1107 cpu_signal(0);
1108
1109 if (ResumeThread(cpu->hThread) == (DWORD)-1) {
1110 fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
1111 GetLastError());
1112 exit(1);
1113 }
1114 }
1115 #endif
1116 }
1117
1118 void qemu_cpu_kick(CPUState *cpu)
1119 {
1120 qemu_cond_broadcast(cpu->halt_cond);
1121 if (!tcg_enabled() && !cpu->thread_kicked) {
1122 qemu_cpu_kick_thread(cpu);
1123 cpu->thread_kicked = true;
1124 }
1125 }
1126
1127 void qemu_cpu_kick_self(void)
1128 {
1129 #ifndef _WIN32
1130 assert(current_cpu);
1131
1132 if (!current_cpu->thread_kicked) {
1133 qemu_cpu_kick_thread(current_cpu);
1134 current_cpu->thread_kicked = true;
1135 }
1136 #else
1137 abort();
1138 #endif
1139 }
1140
1141 bool qemu_cpu_is_self(CPUState *cpu)
1142 {
1143 return qemu_thread_is_self(cpu->thread);
1144 }
1145
1146 bool qemu_in_vcpu_thread(void)
1147 {
1148 return current_cpu && qemu_cpu_is_self(current_cpu);
1149 }
1150
1151 void qemu_mutex_lock_iothread(void)
1152 {
1153 atomic_inc(&iothread_requesting_mutex);
1154 if (!tcg_enabled() || !first_cpu || !first_cpu->thread) {
1155 qemu_mutex_lock(&qemu_global_mutex);
1156 atomic_dec(&iothread_requesting_mutex);
1157 } else {
1158 if (qemu_mutex_trylock(&qemu_global_mutex)) {
1159 qemu_cpu_kick_thread(first_cpu);
1160 qemu_mutex_lock(&qemu_global_mutex);
1161 }
1162 atomic_dec(&iothread_requesting_mutex);
1163 qemu_cond_broadcast(&qemu_io_proceeded_cond);
1164 }
1165 }
1166
1167 void qemu_mutex_unlock_iothread(void)
1168 {
1169 qemu_mutex_unlock(&qemu_global_mutex);
1170 }
1171
1172 static int all_vcpus_paused(void)
1173 {
1174 CPUState *cpu;
1175
1176 CPU_FOREACH(cpu) {
1177 if (!cpu->stopped) {
1178 return 0;
1179 }
1180 }
1181
1182 return 1;
1183 }
1184
1185 void pause_all_vcpus(void)
1186 {
1187 CPUState *cpu;
1188
1189 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, false);
1190 CPU_FOREACH(cpu) {
1191 cpu->stop = true;
1192 qemu_cpu_kick(cpu);
1193 }
1194
1195 if (qemu_in_vcpu_thread()) {
1196 cpu_stop_current();
1197 if (!kvm_enabled()) {
1198 CPU_FOREACH(cpu) {
1199 cpu->stop = false;
1200 cpu->stopped = true;
1201 }
1202 return;
1203 }
1204 }
1205
1206 while (!all_vcpus_paused()) {
1207 qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
1208 CPU_FOREACH(cpu) {
1209 qemu_cpu_kick(cpu);
1210 }
1211 }
1212 }
1213
1214 void cpu_resume(CPUState *cpu)
1215 {
1216 cpu->stop = false;
1217 cpu->stopped = false;
1218 qemu_cpu_kick(cpu);
1219 }
1220
1221 void resume_all_vcpus(void)
1222 {
1223 CPUState *cpu;
1224
1225 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true);
1226 CPU_FOREACH(cpu) {
1227 cpu_resume(cpu);
1228 }
1229 }
1230
1231 /* For temporary buffers for forming a name */
1232 #define VCPU_THREAD_NAME_SIZE 16
1233
1234 static void qemu_tcg_init_vcpu(CPUState *cpu)
1235 {
1236 char thread_name[VCPU_THREAD_NAME_SIZE];
1237
1238 tcg_cpu_address_space_init(cpu, cpu->as);
1239
1240 /* share a single thread for all cpus with TCG */
1241 if (!tcg_cpu_thread) {
1242 cpu->thread = g_malloc0(sizeof(QemuThread));
1243 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1244 qemu_cond_init(cpu->halt_cond);
1245 tcg_halt_cond = cpu->halt_cond;
1246 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/TCG",
1247 cpu->cpu_index);
1248 qemu_thread_create(cpu->thread, thread_name, qemu_tcg_cpu_thread_fn,
1249 cpu, QEMU_THREAD_JOINABLE);
1250 #ifdef _WIN32
1251 cpu->hThread = qemu_thread_get_handle(cpu->thread);
1252 #endif
1253 while (!cpu->created) {
1254 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1255 }
1256 tcg_cpu_thread = cpu->thread;
1257 } else {
1258 cpu->thread = tcg_cpu_thread;
1259 cpu->halt_cond = tcg_halt_cond;
1260 }
1261 }
1262
1263 static void qemu_kvm_start_vcpu(CPUState *cpu)
1264 {
1265 char thread_name[VCPU_THREAD_NAME_SIZE];
1266
1267 cpu->thread = g_malloc0(sizeof(QemuThread));
1268 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1269 qemu_cond_init(cpu->halt_cond);
1270 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/KVM",
1271 cpu->cpu_index);
1272 qemu_thread_create(cpu->thread, thread_name, qemu_kvm_cpu_thread_fn,
1273 cpu, QEMU_THREAD_JOINABLE);
1274 while (!cpu->created) {
1275 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1276 }
1277 }
1278
1279 static void qemu_dummy_start_vcpu(CPUState *cpu)
1280 {
1281 char thread_name[VCPU_THREAD_NAME_SIZE];
1282
1283 cpu->thread = g_malloc0(sizeof(QemuThread));
1284 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1285 qemu_cond_init(cpu->halt_cond);
1286 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/DUMMY",
1287 cpu->cpu_index);
1288 qemu_thread_create(cpu->thread, thread_name, qemu_dummy_cpu_thread_fn, cpu,
1289 QEMU_THREAD_JOINABLE);
1290 while (!cpu->created) {
1291 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1292 }
1293 }
1294
1295 void qemu_init_vcpu(CPUState *cpu)
1296 {
1297 cpu->nr_cores = smp_cores;
1298 cpu->nr_threads = smp_threads;
1299 cpu->stopped = true;
1300 if (kvm_enabled()) {
1301 qemu_kvm_start_vcpu(cpu);
1302 } else if (tcg_enabled()) {
1303 qemu_tcg_init_vcpu(cpu);
1304 } else {
1305 qemu_dummy_start_vcpu(cpu);
1306 }
1307 }
1308
1309 void cpu_stop_current(void)
1310 {
1311 if (current_cpu) {
1312 current_cpu->stop = false;
1313 current_cpu->stopped = true;
1314 cpu_exit(current_cpu);
1315 qemu_cond_signal(&qemu_pause_cond);
1316 }
1317 }
1318
1319 int vm_stop(RunState state)
1320 {
1321 if (qemu_in_vcpu_thread()) {
1322 qemu_system_vmstop_request_prepare();
1323 qemu_system_vmstop_request(state);
1324 /*
1325 * FIXME: should not return to device code in case
1326 * vm_stop() has been requested.
1327 */
1328 cpu_stop_current();
1329 return 0;
1330 }
1331
1332 return do_vm_stop(state);
1333 }
1334
1335 /* does a state transition even if the VM is already stopped,
1336 current state is forgotten forever */
1337 int vm_stop_force_state(RunState state)
1338 {
1339 if (runstate_is_running()) {
1340 return vm_stop(state);
1341 } else {
1342 runstate_set(state);
1343 /* Make sure to return an error if the flush in a previous vm_stop()
1344 * failed. */
1345 return bdrv_flush_all();
1346 }
1347 }
1348
1349 static int tcg_cpu_exec(CPUArchState *env)
1350 {
1351 CPUState *cpu = ENV_GET_CPU(env);
1352 int ret;
1353 #ifdef CONFIG_PROFILER
1354 int64_t ti;
1355 #endif
1356
1357 #ifdef CONFIG_PROFILER
1358 ti = profile_getclock();
1359 #endif
1360 if (use_icount) {
1361 int64_t count;
1362 int64_t deadline;
1363 int decr;
1364 timers_state.qemu_icount -= (cpu->icount_decr.u16.low
1365 + cpu->icount_extra);
1366 cpu->icount_decr.u16.low = 0;
1367 cpu->icount_extra = 0;
1368 deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
1369
1370 /* Maintain prior (possibly buggy) behaviour where if no deadline
1371 * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
1372 * INT32_MAX nanoseconds ahead, we still use INT32_MAX
1373 * nanoseconds.
1374 */
1375 if ((deadline < 0) || (deadline > INT32_MAX)) {
1376 deadline = INT32_MAX;
1377 }
1378
1379 count = qemu_icount_round(deadline);
1380 timers_state.qemu_icount += count;
1381 decr = (count > 0xffff) ? 0xffff : count;
1382 count -= decr;
1383 cpu->icount_decr.u16.low = decr;
1384 cpu->icount_extra = count;
1385 }
1386 ret = cpu_exec(env);
1387 #ifdef CONFIG_PROFILER
1388 tcg_time += profile_getclock() - ti;
1389 #endif
1390 if (use_icount) {
1391 /* Fold pending instructions back into the
1392 instruction counter, and clear the interrupt flag. */
1393 timers_state.qemu_icount -= (cpu->icount_decr.u16.low
1394 + cpu->icount_extra);
1395 cpu->icount_decr.u32 = 0;
1396 cpu->icount_extra = 0;
1397 }
1398 return ret;
1399 }
1400
1401 static void tcg_exec_all(void)
1402 {
1403 int r;
1404
1405 /* Account partial waits to QEMU_CLOCK_VIRTUAL. */
1406 qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
1407
1408 if (next_cpu == NULL) {
1409 next_cpu = first_cpu;
1410 }
1411 for (; next_cpu != NULL && !exit_request; next_cpu = CPU_NEXT(next_cpu)) {
1412 CPUState *cpu = next_cpu;
1413 CPUArchState *env = cpu->env_ptr;
1414
1415 qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
1416 (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
1417
1418 if (cpu_can_run(cpu)) {
1419 r = tcg_cpu_exec(env);
1420 if (r == EXCP_DEBUG) {
1421 cpu_handle_guest_debug(cpu);
1422 break;
1423 }
1424 } else if (cpu->stop || cpu->stopped) {
1425 break;
1426 }
1427 }
1428 exit_request = 0;
1429 }
1430
1431 void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
1432 {
1433 /* XXX: implement xxx_cpu_list for targets that still miss it */
1434 #if defined(cpu_list)
1435 cpu_list(f, cpu_fprintf);
1436 #endif
1437 }
1438
1439 CpuInfoList *qmp_query_cpus(Error **errp)
1440 {
1441 CpuInfoList *head = NULL, *cur_item = NULL;
1442 CPUState *cpu;
1443
1444 CPU_FOREACH(cpu) {
1445 CpuInfoList *info;
1446 #if defined(TARGET_I386)
1447 X86CPU *x86_cpu = X86_CPU(cpu);
1448 CPUX86State *env = &x86_cpu->env;
1449 #elif defined(TARGET_PPC)
1450 PowerPCCPU *ppc_cpu = POWERPC_CPU(cpu);
1451 CPUPPCState *env = &ppc_cpu->env;
1452 #elif defined(TARGET_SPARC)
1453 SPARCCPU *sparc_cpu = SPARC_CPU(cpu);
1454 CPUSPARCState *env = &sparc_cpu->env;
1455 #elif defined(TARGET_MIPS)
1456 MIPSCPU *mips_cpu = MIPS_CPU(cpu);
1457 CPUMIPSState *env = &mips_cpu->env;
1458 #elif defined(TARGET_TRICORE)
1459 TriCoreCPU *tricore_cpu = TRICORE_CPU(cpu);
1460 CPUTriCoreState *env = &tricore_cpu->env;
1461 #endif
1462
1463 cpu_synchronize_state(cpu);
1464
1465 info = g_malloc0(sizeof(*info));
1466 info->value = g_malloc0(sizeof(*info->value));
1467 info->value->CPU = cpu->cpu_index;
1468 info->value->current = (cpu == first_cpu);
1469 info->value->halted = cpu->halted;
1470 info->value->qom_path = object_get_canonical_path(OBJECT(cpu));
1471 info->value->thread_id = cpu->thread_id;
1472 #if defined(TARGET_I386)
1473 info->value->has_pc = true;
1474 info->value->pc = env->eip + env->segs[R_CS].base;
1475 #elif defined(TARGET_PPC)
1476 info->value->has_nip = true;
1477 info->value->nip = env->nip;
1478 #elif defined(TARGET_SPARC)
1479 info->value->has_pc = true;
1480 info->value->pc = env->pc;
1481 info->value->has_npc = true;
1482 info->value->npc = env->npc;
1483 #elif defined(TARGET_MIPS)
1484 info->value->has_PC = true;
1485 info->value->PC = env->active_tc.PC;
1486 #elif defined(TARGET_TRICORE)
1487 info->value->has_PC = true;
1488 info->value->PC = env->PC;
1489 #endif
1490
1491 /* XXX: waiting for the qapi to support GSList */
1492 if (!cur_item) {
1493 head = cur_item = info;
1494 } else {
1495 cur_item->next = info;
1496 cur_item = info;
1497 }
1498 }
1499
1500 return head;
1501 }
1502
1503 void qmp_memsave(int64_t addr, int64_t size, const char *filename,
1504 bool has_cpu, int64_t cpu_index, Error **errp)
1505 {
1506 FILE *f;
1507 uint32_t l;
1508 CPUState *cpu;
1509 uint8_t buf[1024];
1510 int64_t orig_addr = addr, orig_size = size;
1511
1512 if (!has_cpu) {
1513 cpu_index = 0;
1514 }
1515
1516 cpu = qemu_get_cpu(cpu_index);
1517 if (cpu == NULL) {
1518 error_set(errp, QERR_INVALID_PARAMETER_VALUE, "cpu-index",
1519 "a CPU number");
1520 return;
1521 }
1522
1523 f = fopen(filename, "wb");
1524 if (!f) {
1525 error_setg_file_open(errp, errno, filename);
1526 return;
1527 }
1528
1529 while (size != 0) {
1530 l = sizeof(buf);
1531 if (l > size)
1532 l = size;
1533 if (cpu_memory_rw_debug(cpu, addr, buf, l, 0) != 0) {
1534 error_setg(errp, "Invalid addr 0x%016" PRIx64 "/size %" PRId64
1535 " specified", orig_addr, orig_size);
1536 goto exit;
1537 }
1538 if (fwrite(buf, 1, l, f) != l) {
1539 error_set(errp, QERR_IO_ERROR);
1540 goto exit;
1541 }
1542 addr += l;
1543 size -= l;
1544 }
1545
1546 exit:
1547 fclose(f);
1548 }
1549
1550 void qmp_pmemsave(int64_t addr, int64_t size, const char *filename,
1551 Error **errp)
1552 {
1553 FILE *f;
1554 uint32_t l;
1555 uint8_t buf[1024];
1556
1557 f = fopen(filename, "wb");
1558 if (!f) {
1559 error_setg_file_open(errp, errno, filename);
1560 return;
1561 }
1562
1563 while (size != 0) {
1564 l = sizeof(buf);
1565 if (l > size)
1566 l = size;
1567 cpu_physical_memory_read(addr, buf, l);
1568 if (fwrite(buf, 1, l, f) != l) {
1569 error_set(errp, QERR_IO_ERROR);
1570 goto exit;
1571 }
1572 addr += l;
1573 size -= l;
1574 }
1575
1576 exit:
1577 fclose(f);
1578 }
1579
1580 void qmp_inject_nmi(Error **errp)
1581 {
1582 #if defined(TARGET_I386)
1583 CPUState *cs;
1584
1585 CPU_FOREACH(cs) {
1586 X86CPU *cpu = X86_CPU(cs);
1587
1588 if (!cpu->apic_state) {
1589 cpu_interrupt(cs, CPU_INTERRUPT_NMI);
1590 } else {
1591 apic_deliver_nmi(cpu->apic_state);
1592 }
1593 }
1594 #else
1595 nmi_monitor_handle(monitor_get_cpu_index(), errp);
1596 #endif
1597 }
1598
1599 void dump_drift_info(FILE *f, fprintf_function cpu_fprintf)
1600 {
1601 if (!use_icount) {
1602 return;
1603 }
1604
1605 cpu_fprintf(f, "Host - Guest clock %"PRIi64" ms\n",
1606 (cpu_get_clock() - cpu_get_icount())/SCALE_MS);
1607 if (icount_align_option) {
1608 cpu_fprintf(f, "Max guest delay %"PRIi64" ms\n", -max_delay/SCALE_MS);
1609 cpu_fprintf(f, "Max guest advance %"PRIi64" ms\n", max_advance/SCALE_MS);
1610 } else {
1611 cpu_fprintf(f, "Max guest delay NA\n");
1612 cpu_fprintf(f, "Max guest advance NA\n");
1613 }
1614 }