]> git.proxmox.com Git - mirror_qemu.git/blob - cpus.c
Merge remote-tracking branch 'remotes/amit-migration/tags/for-2.3-2' into staging
[mirror_qemu.git] / cpus.c
1 /*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 /* Needed early for CONFIG_BSD etc. */
26 #include "config-host.h"
27
28 #include "monitor/monitor.h"
29 #include "qapi/qmp/qerror.h"
30 #include "sysemu/sysemu.h"
31 #include "exec/gdbstub.h"
32 #include "sysemu/dma.h"
33 #include "sysemu/kvm.h"
34 #include "qmp-commands.h"
35
36 #include "qemu/thread.h"
37 #include "sysemu/cpus.h"
38 #include "sysemu/qtest.h"
39 #include "qemu/main-loop.h"
40 #include "qemu/bitmap.h"
41 #include "qemu/seqlock.h"
42 #include "qapi-event.h"
43 #include "hw/nmi.h"
44
45 #ifndef _WIN32
46 #include "qemu/compatfd.h"
47 #endif
48
49 #ifdef CONFIG_LINUX
50
51 #include <sys/prctl.h>
52
53 #ifndef PR_MCE_KILL
54 #define PR_MCE_KILL 33
55 #endif
56
57 #ifndef PR_MCE_KILL_SET
58 #define PR_MCE_KILL_SET 1
59 #endif
60
61 #ifndef PR_MCE_KILL_EARLY
62 #define PR_MCE_KILL_EARLY 1
63 #endif
64
65 #endif /* CONFIG_LINUX */
66
67 static CPUState *next_cpu;
68 int64_t max_delay;
69 int64_t max_advance;
70
71 bool cpu_is_stopped(CPUState *cpu)
72 {
73 return cpu->stopped || !runstate_is_running();
74 }
75
76 static bool cpu_thread_is_idle(CPUState *cpu)
77 {
78 if (cpu->stop || cpu->queued_work_first) {
79 return false;
80 }
81 if (cpu_is_stopped(cpu)) {
82 return true;
83 }
84 if (!cpu->halted || cpu_has_work(cpu) ||
85 kvm_halt_in_kernel()) {
86 return false;
87 }
88 return true;
89 }
90
91 static bool all_cpu_threads_idle(void)
92 {
93 CPUState *cpu;
94
95 CPU_FOREACH(cpu) {
96 if (!cpu_thread_is_idle(cpu)) {
97 return false;
98 }
99 }
100 return true;
101 }
102
103 /***********************************************************/
104 /* guest cycle counter */
105
106 /* Protected by TimersState seqlock */
107
108 static int64_t vm_clock_warp_start = -1;
109 /* Conversion factor from emulated instructions to virtual clock ticks. */
110 static int icount_time_shift;
111 /* Arbitrarily pick 1MIPS as the minimum allowable speed. */
112 #define MAX_ICOUNT_SHIFT 10
113
114 static QEMUTimer *icount_rt_timer;
115 static QEMUTimer *icount_vm_timer;
116 static QEMUTimer *icount_warp_timer;
117
118 typedef struct TimersState {
119 /* Protected by BQL. */
120 int64_t cpu_ticks_prev;
121 int64_t cpu_ticks_offset;
122
123 /* cpu_clock_offset can be read out of BQL, so protect it with
124 * this lock.
125 */
126 QemuSeqLock vm_clock_seqlock;
127 int64_t cpu_clock_offset;
128 int32_t cpu_ticks_enabled;
129 int64_t dummy;
130
131 /* Compensate for varying guest execution speed. */
132 int64_t qemu_icount_bias;
133 /* Only written by TCG thread */
134 int64_t qemu_icount;
135 } TimersState;
136
137 static TimersState timers_state;
138
139 int64_t cpu_get_icount_raw(void)
140 {
141 int64_t icount;
142 CPUState *cpu = current_cpu;
143
144 icount = timers_state.qemu_icount;
145 if (cpu) {
146 if (!cpu_can_do_io(cpu)) {
147 fprintf(stderr, "Bad icount read\n");
148 exit(1);
149 }
150 icount -= (cpu->icount_decr.u16.low + cpu->icount_extra);
151 }
152 return icount;
153 }
154
155 /* Return the virtual CPU time, based on the instruction counter. */
156 static int64_t cpu_get_icount_locked(void)
157 {
158 int64_t icount = cpu_get_icount_raw();
159 return timers_state.qemu_icount_bias + cpu_icount_to_ns(icount);
160 }
161
162 int64_t cpu_get_icount(void)
163 {
164 int64_t icount;
165 unsigned start;
166
167 do {
168 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
169 icount = cpu_get_icount_locked();
170 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
171
172 return icount;
173 }
174
175 int64_t cpu_icount_to_ns(int64_t icount)
176 {
177 return icount << icount_time_shift;
178 }
179
180 /* return the host CPU cycle counter and handle stop/restart */
181 /* Caller must hold the BQL */
182 int64_t cpu_get_ticks(void)
183 {
184 int64_t ticks;
185
186 if (use_icount) {
187 return cpu_get_icount();
188 }
189
190 ticks = timers_state.cpu_ticks_offset;
191 if (timers_state.cpu_ticks_enabled) {
192 ticks += cpu_get_real_ticks();
193 }
194
195 if (timers_state.cpu_ticks_prev > ticks) {
196 /* Note: non increasing ticks may happen if the host uses
197 software suspend */
198 timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
199 ticks = timers_state.cpu_ticks_prev;
200 }
201
202 timers_state.cpu_ticks_prev = ticks;
203 return ticks;
204 }
205
206 static int64_t cpu_get_clock_locked(void)
207 {
208 int64_t ticks;
209
210 ticks = timers_state.cpu_clock_offset;
211 if (timers_state.cpu_ticks_enabled) {
212 ticks += get_clock();
213 }
214
215 return ticks;
216 }
217
218 /* return the host CPU monotonic timer and handle stop/restart */
219 int64_t cpu_get_clock(void)
220 {
221 int64_t ti;
222 unsigned start;
223
224 do {
225 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
226 ti = cpu_get_clock_locked();
227 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
228
229 return ti;
230 }
231
232 /* return the offset between the host clock and virtual CPU clock */
233 int64_t cpu_get_clock_offset(void)
234 {
235 int64_t ti;
236 unsigned start;
237
238 do {
239 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
240 ti = timers_state.cpu_clock_offset;
241 if (!timers_state.cpu_ticks_enabled) {
242 ti -= get_clock();
243 }
244 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
245
246 return -ti;
247 }
248
249 /* enable cpu_get_ticks()
250 * Caller must hold BQL which server as mutex for vm_clock_seqlock.
251 */
252 void cpu_enable_ticks(void)
253 {
254 /* Here, the really thing protected by seqlock is cpu_clock_offset. */
255 seqlock_write_lock(&timers_state.vm_clock_seqlock);
256 if (!timers_state.cpu_ticks_enabled) {
257 timers_state.cpu_ticks_offset -= cpu_get_real_ticks();
258 timers_state.cpu_clock_offset -= get_clock();
259 timers_state.cpu_ticks_enabled = 1;
260 }
261 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
262 }
263
264 /* disable cpu_get_ticks() : the clock is stopped. You must not call
265 * cpu_get_ticks() after that.
266 * Caller must hold BQL which server as mutex for vm_clock_seqlock.
267 */
268 void cpu_disable_ticks(void)
269 {
270 /* Here, the really thing protected by seqlock is cpu_clock_offset. */
271 seqlock_write_lock(&timers_state.vm_clock_seqlock);
272 if (timers_state.cpu_ticks_enabled) {
273 timers_state.cpu_ticks_offset += cpu_get_real_ticks();
274 timers_state.cpu_clock_offset = cpu_get_clock_locked();
275 timers_state.cpu_ticks_enabled = 0;
276 }
277 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
278 }
279
280 /* Correlation between real and virtual time is always going to be
281 fairly approximate, so ignore small variation.
282 When the guest is idle real and virtual time will be aligned in
283 the IO wait loop. */
284 #define ICOUNT_WOBBLE (get_ticks_per_sec() / 10)
285
286 static void icount_adjust(void)
287 {
288 int64_t cur_time;
289 int64_t cur_icount;
290 int64_t delta;
291
292 /* Protected by TimersState mutex. */
293 static int64_t last_delta;
294
295 /* If the VM is not running, then do nothing. */
296 if (!runstate_is_running()) {
297 return;
298 }
299
300 seqlock_write_lock(&timers_state.vm_clock_seqlock);
301 cur_time = cpu_get_clock_locked();
302 cur_icount = cpu_get_icount_locked();
303
304 delta = cur_icount - cur_time;
305 /* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */
306 if (delta > 0
307 && last_delta + ICOUNT_WOBBLE < delta * 2
308 && icount_time_shift > 0) {
309 /* The guest is getting too far ahead. Slow time down. */
310 icount_time_shift--;
311 }
312 if (delta < 0
313 && last_delta - ICOUNT_WOBBLE > delta * 2
314 && icount_time_shift < MAX_ICOUNT_SHIFT) {
315 /* The guest is getting too far behind. Speed time up. */
316 icount_time_shift++;
317 }
318 last_delta = delta;
319 timers_state.qemu_icount_bias = cur_icount
320 - (timers_state.qemu_icount << icount_time_shift);
321 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
322 }
323
324 static void icount_adjust_rt(void *opaque)
325 {
326 timer_mod(icount_rt_timer,
327 qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 1000);
328 icount_adjust();
329 }
330
331 static void icount_adjust_vm(void *opaque)
332 {
333 timer_mod(icount_vm_timer,
334 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
335 get_ticks_per_sec() / 10);
336 icount_adjust();
337 }
338
339 static int64_t qemu_icount_round(int64_t count)
340 {
341 return (count + (1 << icount_time_shift) - 1) >> icount_time_shift;
342 }
343
344 static void icount_warp_rt(void *opaque)
345 {
346 /* The icount_warp_timer is rescheduled soon after vm_clock_warp_start
347 * changes from -1 to another value, so the race here is okay.
348 */
349 if (atomic_read(&vm_clock_warp_start) == -1) {
350 return;
351 }
352
353 seqlock_write_lock(&timers_state.vm_clock_seqlock);
354 if (runstate_is_running()) {
355 int64_t clock = cpu_get_clock_locked();
356 int64_t warp_delta;
357
358 warp_delta = clock - vm_clock_warp_start;
359 if (use_icount == 2) {
360 /*
361 * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too
362 * far ahead of real time.
363 */
364 int64_t cur_icount = cpu_get_icount_locked();
365 int64_t delta = clock - cur_icount;
366 warp_delta = MIN(warp_delta, delta);
367 }
368 timers_state.qemu_icount_bias += warp_delta;
369 }
370 vm_clock_warp_start = -1;
371 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
372
373 if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) {
374 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
375 }
376 }
377
378 void qtest_clock_warp(int64_t dest)
379 {
380 int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
381 assert(qtest_enabled());
382 while (clock < dest) {
383 int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
384 int64_t warp = qemu_soonest_timeout(dest - clock, deadline);
385 seqlock_write_lock(&timers_state.vm_clock_seqlock);
386 timers_state.qemu_icount_bias += warp;
387 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
388
389 qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
390 clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
391 }
392 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
393 }
394
395 void qemu_clock_warp(QEMUClockType type)
396 {
397 int64_t clock;
398 int64_t deadline;
399
400 /*
401 * There are too many global variables to make the "warp" behavior
402 * applicable to other clocks. But a clock argument removes the
403 * need for if statements all over the place.
404 */
405 if (type != QEMU_CLOCK_VIRTUAL || !use_icount) {
406 return;
407 }
408
409 /*
410 * If the CPUs have been sleeping, advance QEMU_CLOCK_VIRTUAL timer now.
411 * This ensures that the deadline for the timer is computed correctly below.
412 * This also makes sure that the insn counter is synchronized before the
413 * CPU starts running, in case the CPU is woken by an event other than
414 * the earliest QEMU_CLOCK_VIRTUAL timer.
415 */
416 icount_warp_rt(NULL);
417 timer_del(icount_warp_timer);
418 if (!all_cpu_threads_idle()) {
419 return;
420 }
421
422 if (qtest_enabled()) {
423 /* When testing, qtest commands advance icount. */
424 return;
425 }
426
427 /* We want to use the earliest deadline from ALL vm_clocks */
428 clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
429 deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
430 if (deadline < 0) {
431 return;
432 }
433
434 if (deadline > 0) {
435 /*
436 * Ensure QEMU_CLOCK_VIRTUAL proceeds even when the virtual CPU goes to
437 * sleep. Otherwise, the CPU might be waiting for a future timer
438 * interrupt to wake it up, but the interrupt never comes because
439 * the vCPU isn't running any insns and thus doesn't advance the
440 * QEMU_CLOCK_VIRTUAL.
441 *
442 * An extreme solution for this problem would be to never let VCPUs
443 * sleep in icount mode if there is a pending QEMU_CLOCK_VIRTUAL
444 * timer; rather time could just advance to the next QEMU_CLOCK_VIRTUAL
445 * event. Instead, we do stop VCPUs and only advance QEMU_CLOCK_VIRTUAL
446 * after some "real" time, (related to the time left until the next
447 * event) has passed. The QEMU_CLOCK_VIRTUAL_RT clock will do this.
448 * This avoids that the warps are visible externally; for example,
449 * you will not be sending network packets continuously instead of
450 * every 100ms.
451 */
452 seqlock_write_lock(&timers_state.vm_clock_seqlock);
453 if (vm_clock_warp_start == -1 || vm_clock_warp_start > clock) {
454 vm_clock_warp_start = clock;
455 }
456 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
457 timer_mod_anticipate(icount_warp_timer, clock + deadline);
458 } else if (deadline == 0) {
459 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
460 }
461 }
462
463 static bool icount_state_needed(void *opaque)
464 {
465 return use_icount;
466 }
467
468 /*
469 * This is a subsection for icount migration.
470 */
471 static const VMStateDescription icount_vmstate_timers = {
472 .name = "timer/icount",
473 .version_id = 1,
474 .minimum_version_id = 1,
475 .fields = (VMStateField[]) {
476 VMSTATE_INT64(qemu_icount_bias, TimersState),
477 VMSTATE_INT64(qemu_icount, TimersState),
478 VMSTATE_END_OF_LIST()
479 }
480 };
481
482 static const VMStateDescription vmstate_timers = {
483 .name = "timer",
484 .version_id = 2,
485 .minimum_version_id = 1,
486 .fields = (VMStateField[]) {
487 VMSTATE_INT64(cpu_ticks_offset, TimersState),
488 VMSTATE_INT64(dummy, TimersState),
489 VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
490 VMSTATE_END_OF_LIST()
491 },
492 .subsections = (VMStateSubsection[]) {
493 {
494 .vmsd = &icount_vmstate_timers,
495 .needed = icount_state_needed,
496 }, {
497 /* empty */
498 }
499 }
500 };
501
502 void cpu_ticks_init(void)
503 {
504 seqlock_init(&timers_state.vm_clock_seqlock, NULL);
505 vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
506 }
507
508 void configure_icount(QemuOpts *opts, Error **errp)
509 {
510 const char *option;
511 char *rem_str = NULL;
512
513 option = qemu_opt_get(opts, "shift");
514 if (!option) {
515 if (qemu_opt_get(opts, "align") != NULL) {
516 error_setg(errp, "Please specify shift option when using align");
517 }
518 return;
519 }
520 icount_align_option = qemu_opt_get_bool(opts, "align", false);
521 icount_warp_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
522 icount_warp_rt, NULL);
523 if (strcmp(option, "auto") != 0) {
524 errno = 0;
525 icount_time_shift = strtol(option, &rem_str, 0);
526 if (errno != 0 || *rem_str != '\0' || !strlen(option)) {
527 error_setg(errp, "icount: Invalid shift value");
528 }
529 use_icount = 1;
530 return;
531 } else if (icount_align_option) {
532 error_setg(errp, "shift=auto and align=on are incompatible");
533 }
534
535 use_icount = 2;
536
537 /* 125MIPS seems a reasonable initial guess at the guest speed.
538 It will be corrected fairly quickly anyway. */
539 icount_time_shift = 3;
540
541 /* Have both realtime and virtual time triggers for speed adjustment.
542 The realtime trigger catches emulated time passing too slowly,
543 the virtual time trigger catches emulated time passing too fast.
544 Realtime triggers occur even when idle, so use them less frequently
545 than VM triggers. */
546 icount_rt_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL_RT,
547 icount_adjust_rt, NULL);
548 timer_mod(icount_rt_timer,
549 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
550 icount_vm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
551 icount_adjust_vm, NULL);
552 timer_mod(icount_vm_timer,
553 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
554 get_ticks_per_sec() / 10);
555 }
556
557 /***********************************************************/
558 void hw_error(const char *fmt, ...)
559 {
560 va_list ap;
561 CPUState *cpu;
562
563 va_start(ap, fmt);
564 fprintf(stderr, "qemu: hardware error: ");
565 vfprintf(stderr, fmt, ap);
566 fprintf(stderr, "\n");
567 CPU_FOREACH(cpu) {
568 fprintf(stderr, "CPU #%d:\n", cpu->cpu_index);
569 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU);
570 }
571 va_end(ap);
572 abort();
573 }
574
575 void cpu_synchronize_all_states(void)
576 {
577 CPUState *cpu;
578
579 CPU_FOREACH(cpu) {
580 cpu_synchronize_state(cpu);
581 }
582 }
583
584 void cpu_synchronize_all_post_reset(void)
585 {
586 CPUState *cpu;
587
588 CPU_FOREACH(cpu) {
589 cpu_synchronize_post_reset(cpu);
590 }
591 }
592
593 void cpu_synchronize_all_post_init(void)
594 {
595 CPUState *cpu;
596
597 CPU_FOREACH(cpu) {
598 cpu_synchronize_post_init(cpu);
599 }
600 }
601
602 void cpu_clean_all_dirty(void)
603 {
604 CPUState *cpu;
605
606 CPU_FOREACH(cpu) {
607 cpu_clean_state(cpu);
608 }
609 }
610
611 static int do_vm_stop(RunState state)
612 {
613 int ret = 0;
614
615 if (runstate_is_running()) {
616 cpu_disable_ticks();
617 pause_all_vcpus();
618 runstate_set(state);
619 vm_state_notify(0, state);
620 qapi_event_send_stop(&error_abort);
621 }
622
623 bdrv_drain_all();
624 ret = bdrv_flush_all();
625
626 return ret;
627 }
628
629 static bool cpu_can_run(CPUState *cpu)
630 {
631 if (cpu->stop) {
632 return false;
633 }
634 if (cpu_is_stopped(cpu)) {
635 return false;
636 }
637 return true;
638 }
639
640 static void cpu_handle_guest_debug(CPUState *cpu)
641 {
642 gdb_set_stop_cpu(cpu);
643 qemu_system_debug_request();
644 cpu->stopped = true;
645 }
646
647 static void cpu_signal(int sig)
648 {
649 if (current_cpu) {
650 cpu_exit(current_cpu);
651 }
652 exit_request = 1;
653 }
654
655 #ifdef CONFIG_LINUX
656 static void sigbus_reraise(void)
657 {
658 sigset_t set;
659 struct sigaction action;
660
661 memset(&action, 0, sizeof(action));
662 action.sa_handler = SIG_DFL;
663 if (!sigaction(SIGBUS, &action, NULL)) {
664 raise(SIGBUS);
665 sigemptyset(&set);
666 sigaddset(&set, SIGBUS);
667 sigprocmask(SIG_UNBLOCK, &set, NULL);
668 }
669 perror("Failed to re-raise SIGBUS!\n");
670 abort();
671 }
672
673 static void sigbus_handler(int n, struct qemu_signalfd_siginfo *siginfo,
674 void *ctx)
675 {
676 if (kvm_on_sigbus(siginfo->ssi_code,
677 (void *)(intptr_t)siginfo->ssi_addr)) {
678 sigbus_reraise();
679 }
680 }
681
682 static void qemu_init_sigbus(void)
683 {
684 struct sigaction action;
685
686 memset(&action, 0, sizeof(action));
687 action.sa_flags = SA_SIGINFO;
688 action.sa_sigaction = (void (*)(int, siginfo_t*, void*))sigbus_handler;
689 sigaction(SIGBUS, &action, NULL);
690
691 prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0);
692 }
693
694 static void qemu_kvm_eat_signals(CPUState *cpu)
695 {
696 struct timespec ts = { 0, 0 };
697 siginfo_t siginfo;
698 sigset_t waitset;
699 sigset_t chkset;
700 int r;
701
702 sigemptyset(&waitset);
703 sigaddset(&waitset, SIG_IPI);
704 sigaddset(&waitset, SIGBUS);
705
706 do {
707 r = sigtimedwait(&waitset, &siginfo, &ts);
708 if (r == -1 && !(errno == EAGAIN || errno == EINTR)) {
709 perror("sigtimedwait");
710 exit(1);
711 }
712
713 switch (r) {
714 case SIGBUS:
715 if (kvm_on_sigbus_vcpu(cpu, siginfo.si_code, siginfo.si_addr)) {
716 sigbus_reraise();
717 }
718 break;
719 default:
720 break;
721 }
722
723 r = sigpending(&chkset);
724 if (r == -1) {
725 perror("sigpending");
726 exit(1);
727 }
728 } while (sigismember(&chkset, SIG_IPI) || sigismember(&chkset, SIGBUS));
729 }
730
731 #else /* !CONFIG_LINUX */
732
733 static void qemu_init_sigbus(void)
734 {
735 }
736
737 static void qemu_kvm_eat_signals(CPUState *cpu)
738 {
739 }
740 #endif /* !CONFIG_LINUX */
741
742 #ifndef _WIN32
743 static void dummy_signal(int sig)
744 {
745 }
746
747 static void qemu_kvm_init_cpu_signals(CPUState *cpu)
748 {
749 int r;
750 sigset_t set;
751 struct sigaction sigact;
752
753 memset(&sigact, 0, sizeof(sigact));
754 sigact.sa_handler = dummy_signal;
755 sigaction(SIG_IPI, &sigact, NULL);
756
757 pthread_sigmask(SIG_BLOCK, NULL, &set);
758 sigdelset(&set, SIG_IPI);
759 sigdelset(&set, SIGBUS);
760 r = kvm_set_signal_mask(cpu, &set);
761 if (r) {
762 fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
763 exit(1);
764 }
765 }
766
767 static void qemu_tcg_init_cpu_signals(void)
768 {
769 sigset_t set;
770 struct sigaction sigact;
771
772 memset(&sigact, 0, sizeof(sigact));
773 sigact.sa_handler = cpu_signal;
774 sigaction(SIG_IPI, &sigact, NULL);
775
776 sigemptyset(&set);
777 sigaddset(&set, SIG_IPI);
778 pthread_sigmask(SIG_UNBLOCK, &set, NULL);
779 }
780
781 #else /* _WIN32 */
782 static void qemu_kvm_init_cpu_signals(CPUState *cpu)
783 {
784 abort();
785 }
786
787 static void qemu_tcg_init_cpu_signals(void)
788 {
789 }
790 #endif /* _WIN32 */
791
792 static QemuMutex qemu_global_mutex;
793 static QemuCond qemu_io_proceeded_cond;
794 static bool iothread_requesting_mutex;
795
796 static QemuThread io_thread;
797
798 static QemuThread *tcg_cpu_thread;
799 static QemuCond *tcg_halt_cond;
800
801 /* cpu creation */
802 static QemuCond qemu_cpu_cond;
803 /* system init */
804 static QemuCond qemu_pause_cond;
805 static QemuCond qemu_work_cond;
806
807 void qemu_init_cpu_loop(void)
808 {
809 qemu_init_sigbus();
810 qemu_cond_init(&qemu_cpu_cond);
811 qemu_cond_init(&qemu_pause_cond);
812 qemu_cond_init(&qemu_work_cond);
813 qemu_cond_init(&qemu_io_proceeded_cond);
814 qemu_mutex_init(&qemu_global_mutex);
815
816 qemu_thread_get_self(&io_thread);
817 }
818
819 void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
820 {
821 struct qemu_work_item wi;
822
823 if (qemu_cpu_is_self(cpu)) {
824 func(data);
825 return;
826 }
827
828 wi.func = func;
829 wi.data = data;
830 wi.free = false;
831 if (cpu->queued_work_first == NULL) {
832 cpu->queued_work_first = &wi;
833 } else {
834 cpu->queued_work_last->next = &wi;
835 }
836 cpu->queued_work_last = &wi;
837 wi.next = NULL;
838 wi.done = false;
839
840 qemu_cpu_kick(cpu);
841 while (!wi.done) {
842 CPUState *self_cpu = current_cpu;
843
844 qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex);
845 current_cpu = self_cpu;
846 }
847 }
848
849 void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
850 {
851 struct qemu_work_item *wi;
852
853 if (qemu_cpu_is_self(cpu)) {
854 func(data);
855 return;
856 }
857
858 wi = g_malloc0(sizeof(struct qemu_work_item));
859 wi->func = func;
860 wi->data = data;
861 wi->free = true;
862 if (cpu->queued_work_first == NULL) {
863 cpu->queued_work_first = wi;
864 } else {
865 cpu->queued_work_last->next = wi;
866 }
867 cpu->queued_work_last = wi;
868 wi->next = NULL;
869 wi->done = false;
870
871 qemu_cpu_kick(cpu);
872 }
873
874 static void flush_queued_work(CPUState *cpu)
875 {
876 struct qemu_work_item *wi;
877
878 if (cpu->queued_work_first == NULL) {
879 return;
880 }
881
882 while ((wi = cpu->queued_work_first)) {
883 cpu->queued_work_first = wi->next;
884 wi->func(wi->data);
885 wi->done = true;
886 if (wi->free) {
887 g_free(wi);
888 }
889 }
890 cpu->queued_work_last = NULL;
891 qemu_cond_broadcast(&qemu_work_cond);
892 }
893
894 static void qemu_wait_io_event_common(CPUState *cpu)
895 {
896 if (cpu->stop) {
897 cpu->stop = false;
898 cpu->stopped = true;
899 qemu_cond_signal(&qemu_pause_cond);
900 }
901 flush_queued_work(cpu);
902 cpu->thread_kicked = false;
903 }
904
905 static void qemu_tcg_wait_io_event(void)
906 {
907 CPUState *cpu;
908
909 while (all_cpu_threads_idle()) {
910 /* Start accounting real time to the virtual clock if the CPUs
911 are idle. */
912 qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
913 qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
914 }
915
916 while (iothread_requesting_mutex) {
917 qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex);
918 }
919
920 CPU_FOREACH(cpu) {
921 qemu_wait_io_event_common(cpu);
922 }
923 }
924
925 static void qemu_kvm_wait_io_event(CPUState *cpu)
926 {
927 while (cpu_thread_is_idle(cpu)) {
928 qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
929 }
930
931 qemu_kvm_eat_signals(cpu);
932 qemu_wait_io_event_common(cpu);
933 }
934
935 static void *qemu_kvm_cpu_thread_fn(void *arg)
936 {
937 CPUState *cpu = arg;
938 int r;
939
940 qemu_mutex_lock(&qemu_global_mutex);
941 qemu_thread_get_self(cpu->thread);
942 cpu->thread_id = qemu_get_thread_id();
943 cpu->exception_index = -1;
944 cpu->can_do_io = 1;
945 current_cpu = cpu;
946
947 r = kvm_init_vcpu(cpu);
948 if (r < 0) {
949 fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r));
950 exit(1);
951 }
952
953 qemu_kvm_init_cpu_signals(cpu);
954
955 /* signal CPU creation */
956 cpu->created = true;
957 qemu_cond_signal(&qemu_cpu_cond);
958
959 while (1) {
960 if (cpu_can_run(cpu)) {
961 r = kvm_cpu_exec(cpu);
962 if (r == EXCP_DEBUG) {
963 cpu_handle_guest_debug(cpu);
964 }
965 }
966 qemu_kvm_wait_io_event(cpu);
967 }
968
969 return NULL;
970 }
971
972 static void *qemu_dummy_cpu_thread_fn(void *arg)
973 {
974 #ifdef _WIN32
975 fprintf(stderr, "qtest is not supported under Windows\n");
976 exit(1);
977 #else
978 CPUState *cpu = arg;
979 sigset_t waitset;
980 int r;
981
982 qemu_mutex_lock_iothread();
983 qemu_thread_get_self(cpu->thread);
984 cpu->thread_id = qemu_get_thread_id();
985 cpu->exception_index = -1;
986 cpu->can_do_io = 1;
987
988 sigemptyset(&waitset);
989 sigaddset(&waitset, SIG_IPI);
990
991 /* signal CPU creation */
992 cpu->created = true;
993 qemu_cond_signal(&qemu_cpu_cond);
994
995 current_cpu = cpu;
996 while (1) {
997 current_cpu = NULL;
998 qemu_mutex_unlock_iothread();
999 do {
1000 int sig;
1001 r = sigwait(&waitset, &sig);
1002 } while (r == -1 && (errno == EAGAIN || errno == EINTR));
1003 if (r == -1) {
1004 perror("sigwait");
1005 exit(1);
1006 }
1007 qemu_mutex_lock_iothread();
1008 current_cpu = cpu;
1009 qemu_wait_io_event_common(cpu);
1010 }
1011
1012 return NULL;
1013 #endif
1014 }
1015
1016 static void tcg_exec_all(void);
1017
1018 static void *qemu_tcg_cpu_thread_fn(void *arg)
1019 {
1020 CPUState *cpu = arg;
1021
1022 qemu_tcg_init_cpu_signals();
1023 qemu_thread_get_self(cpu->thread);
1024
1025 qemu_mutex_lock(&qemu_global_mutex);
1026 CPU_FOREACH(cpu) {
1027 cpu->thread_id = qemu_get_thread_id();
1028 cpu->created = true;
1029 cpu->exception_index = -1;
1030 cpu->can_do_io = 1;
1031 }
1032 qemu_cond_signal(&qemu_cpu_cond);
1033
1034 /* wait for initial kick-off after machine start */
1035 while (QTAILQ_FIRST(&cpus)->stopped) {
1036 qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
1037
1038 /* process any pending work */
1039 CPU_FOREACH(cpu) {
1040 qemu_wait_io_event_common(cpu);
1041 }
1042 }
1043
1044 while (1) {
1045 tcg_exec_all();
1046
1047 if (use_icount) {
1048 int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
1049
1050 if (deadline == 0) {
1051 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
1052 }
1053 }
1054 qemu_tcg_wait_io_event();
1055 }
1056
1057 return NULL;
1058 }
1059
1060 static void qemu_cpu_kick_thread(CPUState *cpu)
1061 {
1062 #ifndef _WIN32
1063 int err;
1064
1065 err = pthread_kill(cpu->thread->thread, SIG_IPI);
1066 if (err) {
1067 fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
1068 exit(1);
1069 }
1070 #else /* _WIN32 */
1071 if (!qemu_cpu_is_self(cpu)) {
1072 CONTEXT tcgContext;
1073
1074 if (SuspendThread(cpu->hThread) == (DWORD)-1) {
1075 fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
1076 GetLastError());
1077 exit(1);
1078 }
1079
1080 /* On multi-core systems, we are not sure that the thread is actually
1081 * suspended until we can get the context.
1082 */
1083 tcgContext.ContextFlags = CONTEXT_CONTROL;
1084 while (GetThreadContext(cpu->hThread, &tcgContext) != 0) {
1085 continue;
1086 }
1087
1088 cpu_signal(0);
1089
1090 if (ResumeThread(cpu->hThread) == (DWORD)-1) {
1091 fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
1092 GetLastError());
1093 exit(1);
1094 }
1095 }
1096 #endif
1097 }
1098
1099 void qemu_cpu_kick(CPUState *cpu)
1100 {
1101 qemu_cond_broadcast(cpu->halt_cond);
1102 if (!tcg_enabled() && !cpu->thread_kicked) {
1103 qemu_cpu_kick_thread(cpu);
1104 cpu->thread_kicked = true;
1105 }
1106 }
1107
1108 void qemu_cpu_kick_self(void)
1109 {
1110 #ifndef _WIN32
1111 assert(current_cpu);
1112
1113 if (!current_cpu->thread_kicked) {
1114 qemu_cpu_kick_thread(current_cpu);
1115 current_cpu->thread_kicked = true;
1116 }
1117 #else
1118 abort();
1119 #endif
1120 }
1121
1122 bool qemu_cpu_is_self(CPUState *cpu)
1123 {
1124 return qemu_thread_is_self(cpu->thread);
1125 }
1126
1127 static bool qemu_in_vcpu_thread(void)
1128 {
1129 return current_cpu && qemu_cpu_is_self(current_cpu);
1130 }
1131
1132 void qemu_mutex_lock_iothread(void)
1133 {
1134 if (!tcg_enabled()) {
1135 qemu_mutex_lock(&qemu_global_mutex);
1136 } else {
1137 iothread_requesting_mutex = true;
1138 if (qemu_mutex_trylock(&qemu_global_mutex)) {
1139 qemu_cpu_kick_thread(first_cpu);
1140 qemu_mutex_lock(&qemu_global_mutex);
1141 }
1142 iothread_requesting_mutex = false;
1143 qemu_cond_broadcast(&qemu_io_proceeded_cond);
1144 }
1145 }
1146
1147 void qemu_mutex_unlock_iothread(void)
1148 {
1149 qemu_mutex_unlock(&qemu_global_mutex);
1150 }
1151
1152 static int all_vcpus_paused(void)
1153 {
1154 CPUState *cpu;
1155
1156 CPU_FOREACH(cpu) {
1157 if (!cpu->stopped) {
1158 return 0;
1159 }
1160 }
1161
1162 return 1;
1163 }
1164
1165 void pause_all_vcpus(void)
1166 {
1167 CPUState *cpu;
1168
1169 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, false);
1170 CPU_FOREACH(cpu) {
1171 cpu->stop = true;
1172 qemu_cpu_kick(cpu);
1173 }
1174
1175 if (qemu_in_vcpu_thread()) {
1176 cpu_stop_current();
1177 if (!kvm_enabled()) {
1178 CPU_FOREACH(cpu) {
1179 cpu->stop = false;
1180 cpu->stopped = true;
1181 }
1182 return;
1183 }
1184 }
1185
1186 while (!all_vcpus_paused()) {
1187 qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
1188 CPU_FOREACH(cpu) {
1189 qemu_cpu_kick(cpu);
1190 }
1191 }
1192 }
1193
1194 void cpu_resume(CPUState *cpu)
1195 {
1196 cpu->stop = false;
1197 cpu->stopped = false;
1198 qemu_cpu_kick(cpu);
1199 }
1200
1201 void resume_all_vcpus(void)
1202 {
1203 CPUState *cpu;
1204
1205 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true);
1206 CPU_FOREACH(cpu) {
1207 cpu_resume(cpu);
1208 }
1209 }
1210
1211 /* For temporary buffers for forming a name */
1212 #define VCPU_THREAD_NAME_SIZE 16
1213
1214 static void qemu_tcg_init_vcpu(CPUState *cpu)
1215 {
1216 char thread_name[VCPU_THREAD_NAME_SIZE];
1217
1218 tcg_cpu_address_space_init(cpu, cpu->as);
1219
1220 /* share a single thread for all cpus with TCG */
1221 if (!tcg_cpu_thread) {
1222 cpu->thread = g_malloc0(sizeof(QemuThread));
1223 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1224 qemu_cond_init(cpu->halt_cond);
1225 tcg_halt_cond = cpu->halt_cond;
1226 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/TCG",
1227 cpu->cpu_index);
1228 qemu_thread_create(cpu->thread, thread_name, qemu_tcg_cpu_thread_fn,
1229 cpu, QEMU_THREAD_JOINABLE);
1230 #ifdef _WIN32
1231 cpu->hThread = qemu_thread_get_handle(cpu->thread);
1232 #endif
1233 while (!cpu->created) {
1234 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1235 }
1236 tcg_cpu_thread = cpu->thread;
1237 } else {
1238 cpu->thread = tcg_cpu_thread;
1239 cpu->halt_cond = tcg_halt_cond;
1240 }
1241 }
1242
1243 static void qemu_kvm_start_vcpu(CPUState *cpu)
1244 {
1245 char thread_name[VCPU_THREAD_NAME_SIZE];
1246
1247 cpu->thread = g_malloc0(sizeof(QemuThread));
1248 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1249 qemu_cond_init(cpu->halt_cond);
1250 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/KVM",
1251 cpu->cpu_index);
1252 qemu_thread_create(cpu->thread, thread_name, qemu_kvm_cpu_thread_fn,
1253 cpu, QEMU_THREAD_JOINABLE);
1254 while (!cpu->created) {
1255 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1256 }
1257 }
1258
1259 static void qemu_dummy_start_vcpu(CPUState *cpu)
1260 {
1261 char thread_name[VCPU_THREAD_NAME_SIZE];
1262
1263 cpu->thread = g_malloc0(sizeof(QemuThread));
1264 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1265 qemu_cond_init(cpu->halt_cond);
1266 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/DUMMY",
1267 cpu->cpu_index);
1268 qemu_thread_create(cpu->thread, thread_name, qemu_dummy_cpu_thread_fn, cpu,
1269 QEMU_THREAD_JOINABLE);
1270 while (!cpu->created) {
1271 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1272 }
1273 }
1274
1275 void qemu_init_vcpu(CPUState *cpu)
1276 {
1277 cpu->nr_cores = smp_cores;
1278 cpu->nr_threads = smp_threads;
1279 cpu->stopped = true;
1280 if (kvm_enabled()) {
1281 qemu_kvm_start_vcpu(cpu);
1282 } else if (tcg_enabled()) {
1283 qemu_tcg_init_vcpu(cpu);
1284 } else {
1285 qemu_dummy_start_vcpu(cpu);
1286 }
1287 }
1288
1289 void cpu_stop_current(void)
1290 {
1291 if (current_cpu) {
1292 current_cpu->stop = false;
1293 current_cpu->stopped = true;
1294 cpu_exit(current_cpu);
1295 qemu_cond_signal(&qemu_pause_cond);
1296 }
1297 }
1298
1299 int vm_stop(RunState state)
1300 {
1301 if (qemu_in_vcpu_thread()) {
1302 qemu_system_vmstop_request_prepare();
1303 qemu_system_vmstop_request(state);
1304 /*
1305 * FIXME: should not return to device code in case
1306 * vm_stop() has been requested.
1307 */
1308 cpu_stop_current();
1309 return 0;
1310 }
1311
1312 return do_vm_stop(state);
1313 }
1314
1315 /* does a state transition even if the VM is already stopped,
1316 current state is forgotten forever */
1317 int vm_stop_force_state(RunState state)
1318 {
1319 if (runstate_is_running()) {
1320 return vm_stop(state);
1321 } else {
1322 runstate_set(state);
1323 /* Make sure to return an error if the flush in a previous vm_stop()
1324 * failed. */
1325 return bdrv_flush_all();
1326 }
1327 }
1328
1329 static int tcg_cpu_exec(CPUArchState *env)
1330 {
1331 CPUState *cpu = ENV_GET_CPU(env);
1332 int ret;
1333 #ifdef CONFIG_PROFILER
1334 int64_t ti;
1335 #endif
1336
1337 #ifdef CONFIG_PROFILER
1338 ti = profile_getclock();
1339 #endif
1340 if (use_icount) {
1341 int64_t count;
1342 int64_t deadline;
1343 int decr;
1344 timers_state.qemu_icount -= (cpu->icount_decr.u16.low
1345 + cpu->icount_extra);
1346 cpu->icount_decr.u16.low = 0;
1347 cpu->icount_extra = 0;
1348 deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
1349
1350 /* Maintain prior (possibly buggy) behaviour where if no deadline
1351 * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
1352 * INT32_MAX nanoseconds ahead, we still use INT32_MAX
1353 * nanoseconds.
1354 */
1355 if ((deadline < 0) || (deadline > INT32_MAX)) {
1356 deadline = INT32_MAX;
1357 }
1358
1359 count = qemu_icount_round(deadline);
1360 timers_state.qemu_icount += count;
1361 decr = (count > 0xffff) ? 0xffff : count;
1362 count -= decr;
1363 cpu->icount_decr.u16.low = decr;
1364 cpu->icount_extra = count;
1365 }
1366 ret = cpu_exec(env);
1367 #ifdef CONFIG_PROFILER
1368 qemu_time += profile_getclock() - ti;
1369 #endif
1370 if (use_icount) {
1371 /* Fold pending instructions back into the
1372 instruction counter, and clear the interrupt flag. */
1373 timers_state.qemu_icount -= (cpu->icount_decr.u16.low
1374 + cpu->icount_extra);
1375 cpu->icount_decr.u32 = 0;
1376 cpu->icount_extra = 0;
1377 }
1378 return ret;
1379 }
1380
1381 static void tcg_exec_all(void)
1382 {
1383 int r;
1384
1385 /* Account partial waits to QEMU_CLOCK_VIRTUAL. */
1386 qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
1387
1388 if (next_cpu == NULL) {
1389 next_cpu = first_cpu;
1390 }
1391 for (; next_cpu != NULL && !exit_request; next_cpu = CPU_NEXT(next_cpu)) {
1392 CPUState *cpu = next_cpu;
1393 CPUArchState *env = cpu->env_ptr;
1394
1395 qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
1396 (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
1397
1398 if (cpu_can_run(cpu)) {
1399 r = tcg_cpu_exec(env);
1400 if (r == EXCP_DEBUG) {
1401 cpu_handle_guest_debug(cpu);
1402 break;
1403 }
1404 } else if (cpu->stop || cpu->stopped) {
1405 break;
1406 }
1407 }
1408 exit_request = 0;
1409 }
1410
1411 void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
1412 {
1413 /* XXX: implement xxx_cpu_list for targets that still miss it */
1414 #if defined(cpu_list)
1415 cpu_list(f, cpu_fprintf);
1416 #endif
1417 }
1418
1419 CpuInfoList *qmp_query_cpus(Error **errp)
1420 {
1421 CpuInfoList *head = NULL, *cur_item = NULL;
1422 CPUState *cpu;
1423
1424 CPU_FOREACH(cpu) {
1425 CpuInfoList *info;
1426 #if defined(TARGET_I386)
1427 X86CPU *x86_cpu = X86_CPU(cpu);
1428 CPUX86State *env = &x86_cpu->env;
1429 #elif defined(TARGET_PPC)
1430 PowerPCCPU *ppc_cpu = POWERPC_CPU(cpu);
1431 CPUPPCState *env = &ppc_cpu->env;
1432 #elif defined(TARGET_SPARC)
1433 SPARCCPU *sparc_cpu = SPARC_CPU(cpu);
1434 CPUSPARCState *env = &sparc_cpu->env;
1435 #elif defined(TARGET_MIPS)
1436 MIPSCPU *mips_cpu = MIPS_CPU(cpu);
1437 CPUMIPSState *env = &mips_cpu->env;
1438 #elif defined(TARGET_TRICORE)
1439 TriCoreCPU *tricore_cpu = TRICORE_CPU(cpu);
1440 CPUTriCoreState *env = &tricore_cpu->env;
1441 #endif
1442
1443 cpu_synchronize_state(cpu);
1444
1445 info = g_malloc0(sizeof(*info));
1446 info->value = g_malloc0(sizeof(*info->value));
1447 info->value->CPU = cpu->cpu_index;
1448 info->value->current = (cpu == first_cpu);
1449 info->value->halted = cpu->halted;
1450 info->value->thread_id = cpu->thread_id;
1451 #if defined(TARGET_I386)
1452 info->value->has_pc = true;
1453 info->value->pc = env->eip + env->segs[R_CS].base;
1454 #elif defined(TARGET_PPC)
1455 info->value->has_nip = true;
1456 info->value->nip = env->nip;
1457 #elif defined(TARGET_SPARC)
1458 info->value->has_pc = true;
1459 info->value->pc = env->pc;
1460 info->value->has_npc = true;
1461 info->value->npc = env->npc;
1462 #elif defined(TARGET_MIPS)
1463 info->value->has_PC = true;
1464 info->value->PC = env->active_tc.PC;
1465 #elif defined(TARGET_TRICORE)
1466 info->value->has_PC = true;
1467 info->value->PC = env->PC;
1468 #endif
1469
1470 /* XXX: waiting for the qapi to support GSList */
1471 if (!cur_item) {
1472 head = cur_item = info;
1473 } else {
1474 cur_item->next = info;
1475 cur_item = info;
1476 }
1477 }
1478
1479 return head;
1480 }
1481
1482 void qmp_memsave(int64_t addr, int64_t size, const char *filename,
1483 bool has_cpu, int64_t cpu_index, Error **errp)
1484 {
1485 FILE *f;
1486 uint32_t l;
1487 CPUState *cpu;
1488 uint8_t buf[1024];
1489
1490 if (!has_cpu) {
1491 cpu_index = 0;
1492 }
1493
1494 cpu = qemu_get_cpu(cpu_index);
1495 if (cpu == NULL) {
1496 error_set(errp, QERR_INVALID_PARAMETER_VALUE, "cpu-index",
1497 "a CPU number");
1498 return;
1499 }
1500
1501 f = fopen(filename, "wb");
1502 if (!f) {
1503 error_setg_file_open(errp, errno, filename);
1504 return;
1505 }
1506
1507 while (size != 0) {
1508 l = sizeof(buf);
1509 if (l > size)
1510 l = size;
1511 if (cpu_memory_rw_debug(cpu, addr, buf, l, 0) != 0) {
1512 error_setg(errp, "Invalid addr 0x%016" PRIx64 "specified", addr);
1513 goto exit;
1514 }
1515 if (fwrite(buf, 1, l, f) != l) {
1516 error_set(errp, QERR_IO_ERROR);
1517 goto exit;
1518 }
1519 addr += l;
1520 size -= l;
1521 }
1522
1523 exit:
1524 fclose(f);
1525 }
1526
1527 void qmp_pmemsave(int64_t addr, int64_t size, const char *filename,
1528 Error **errp)
1529 {
1530 FILE *f;
1531 uint32_t l;
1532 uint8_t buf[1024];
1533
1534 f = fopen(filename, "wb");
1535 if (!f) {
1536 error_setg_file_open(errp, errno, filename);
1537 return;
1538 }
1539
1540 while (size != 0) {
1541 l = sizeof(buf);
1542 if (l > size)
1543 l = size;
1544 cpu_physical_memory_read(addr, buf, l);
1545 if (fwrite(buf, 1, l, f) != l) {
1546 error_set(errp, QERR_IO_ERROR);
1547 goto exit;
1548 }
1549 addr += l;
1550 size -= l;
1551 }
1552
1553 exit:
1554 fclose(f);
1555 }
1556
1557 void qmp_inject_nmi(Error **errp)
1558 {
1559 #if defined(TARGET_I386)
1560 CPUState *cs;
1561
1562 CPU_FOREACH(cs) {
1563 X86CPU *cpu = X86_CPU(cs);
1564
1565 if (!cpu->apic_state) {
1566 cpu_interrupt(cs, CPU_INTERRUPT_NMI);
1567 } else {
1568 apic_deliver_nmi(cpu->apic_state);
1569 }
1570 }
1571 #else
1572 nmi_monitor_handle(monitor_get_cpu_index(), errp);
1573 #endif
1574 }
1575
1576 void dump_drift_info(FILE *f, fprintf_function cpu_fprintf)
1577 {
1578 if (!use_icount) {
1579 return;
1580 }
1581
1582 cpu_fprintf(f, "Host - Guest clock %"PRIi64" ms\n",
1583 (cpu_get_clock() - cpu_get_icount())/SCALE_MS);
1584 if (icount_align_option) {
1585 cpu_fprintf(f, "Max guest delay %"PRIi64" ms\n", -max_delay/SCALE_MS);
1586 cpu_fprintf(f, "Max guest advance %"PRIi64" ms\n", max_advance/SCALE_MS);
1587 } else {
1588 cpu_fprintf(f, "Max guest delay NA\n");
1589 cpu_fprintf(f, "Max guest advance NA\n");
1590 }
1591 }