]> git.proxmox.com Git - mirror_qemu.git/blob - cpus.c
icount: introduce cpu_get_icount_raw
[mirror_qemu.git] / cpus.c
1 /*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 /* Needed early for CONFIG_BSD etc. */
26 #include "config-host.h"
27
28 #include "monitor/monitor.h"
29 #include "qapi/qmp/qerror.h"
30 #include "sysemu/sysemu.h"
31 #include "exec/gdbstub.h"
32 #include "sysemu/dma.h"
33 #include "sysemu/kvm.h"
34 #include "qmp-commands.h"
35
36 #include "qemu/thread.h"
37 #include "sysemu/cpus.h"
38 #include "sysemu/qtest.h"
39 #include "qemu/main-loop.h"
40 #include "qemu/bitmap.h"
41 #include "qemu/seqlock.h"
42 #include "qapi-event.h"
43 #include "hw/nmi.h"
44
45 #ifndef _WIN32
46 #include "qemu/compatfd.h"
47 #endif
48
49 #ifdef CONFIG_LINUX
50
51 #include <sys/prctl.h>
52
53 #ifndef PR_MCE_KILL
54 #define PR_MCE_KILL 33
55 #endif
56
57 #ifndef PR_MCE_KILL_SET
58 #define PR_MCE_KILL_SET 1
59 #endif
60
61 #ifndef PR_MCE_KILL_EARLY
62 #define PR_MCE_KILL_EARLY 1
63 #endif
64
65 #endif /* CONFIG_LINUX */
66
67 static CPUState *next_cpu;
68 int64_t max_delay;
69 int64_t max_advance;
70
71 bool cpu_is_stopped(CPUState *cpu)
72 {
73 return cpu->stopped || !runstate_is_running();
74 }
75
76 static bool cpu_thread_is_idle(CPUState *cpu)
77 {
78 if (cpu->stop || cpu->queued_work_first) {
79 return false;
80 }
81 if (cpu_is_stopped(cpu)) {
82 return true;
83 }
84 if (!cpu->halted || cpu_has_work(cpu) ||
85 kvm_halt_in_kernel()) {
86 return false;
87 }
88 return true;
89 }
90
91 static bool all_cpu_threads_idle(void)
92 {
93 CPUState *cpu;
94
95 CPU_FOREACH(cpu) {
96 if (!cpu_thread_is_idle(cpu)) {
97 return false;
98 }
99 }
100 return true;
101 }
102
103 /***********************************************************/
104 /* guest cycle counter */
105
106 /* Protected by TimersState seqlock */
107
108 static int64_t vm_clock_warp_start = -1;
109 /* Conversion factor from emulated instructions to virtual clock ticks. */
110 static int icount_time_shift;
111 /* Arbitrarily pick 1MIPS as the minimum allowable speed. */
112 #define MAX_ICOUNT_SHIFT 10
113
114 static QEMUTimer *icount_rt_timer;
115 static QEMUTimer *icount_vm_timer;
116 static QEMUTimer *icount_warp_timer;
117
118 typedef struct TimersState {
119 /* Protected by BQL. */
120 int64_t cpu_ticks_prev;
121 int64_t cpu_ticks_offset;
122
123 /* cpu_clock_offset can be read out of BQL, so protect it with
124 * this lock.
125 */
126 QemuSeqLock vm_clock_seqlock;
127 int64_t cpu_clock_offset;
128 int32_t cpu_ticks_enabled;
129 int64_t dummy;
130
131 /* Compensate for varying guest execution speed. */
132 int64_t qemu_icount_bias;
133 /* Only written by TCG thread */
134 int64_t qemu_icount;
135 } TimersState;
136
137 static TimersState timers_state;
138
139 int64_t cpu_get_icount_raw(void)
140 {
141 int64_t icount;
142 CPUState *cpu = current_cpu;
143
144 icount = timers_state.qemu_icount;
145 if (cpu) {
146 if (!cpu_can_do_io(cpu)) {
147 fprintf(stderr, "Bad icount read\n");
148 exit(1);
149 }
150 icount -= (cpu->icount_decr.u16.low + cpu->icount_extra);
151 }
152 return icount;
153 }
154
155 /* Return the virtual CPU time, based on the instruction counter. */
156 static int64_t cpu_get_icount_locked(void)
157 {
158 int64_t icount = cpu_get_icount_raw();
159 return timers_state.qemu_icount_bias + cpu_icount_to_ns(icount);
160 }
161
162 int64_t cpu_get_icount(void)
163 {
164 int64_t icount;
165 unsigned start;
166
167 do {
168 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
169 icount = cpu_get_icount_locked();
170 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
171
172 return icount;
173 }
174
175 int64_t cpu_icount_to_ns(int64_t icount)
176 {
177 return icount << icount_time_shift;
178 }
179
180 /* return the host CPU cycle counter and handle stop/restart */
181 /* Caller must hold the BQL */
182 int64_t cpu_get_ticks(void)
183 {
184 int64_t ticks;
185
186 if (use_icount) {
187 return cpu_get_icount();
188 }
189
190 ticks = timers_state.cpu_ticks_offset;
191 if (timers_state.cpu_ticks_enabled) {
192 ticks += cpu_get_real_ticks();
193 }
194
195 if (timers_state.cpu_ticks_prev > ticks) {
196 /* Note: non increasing ticks may happen if the host uses
197 software suspend */
198 timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
199 ticks = timers_state.cpu_ticks_prev;
200 }
201
202 timers_state.cpu_ticks_prev = ticks;
203 return ticks;
204 }
205
206 static int64_t cpu_get_clock_locked(void)
207 {
208 int64_t ticks;
209
210 ticks = timers_state.cpu_clock_offset;
211 if (timers_state.cpu_ticks_enabled) {
212 ticks += get_clock();
213 }
214
215 return ticks;
216 }
217
218 /* return the host CPU monotonic timer and handle stop/restart */
219 int64_t cpu_get_clock(void)
220 {
221 int64_t ti;
222 unsigned start;
223
224 do {
225 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
226 ti = cpu_get_clock_locked();
227 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
228
229 return ti;
230 }
231
232 /* return the offset between the host clock and virtual CPU clock */
233 int64_t cpu_get_clock_offset(void)
234 {
235 int64_t ti;
236 unsigned start;
237
238 do {
239 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
240 ti = timers_state.cpu_clock_offset;
241 if (!timers_state.cpu_ticks_enabled) {
242 ti -= get_clock();
243 }
244 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
245
246 return -ti;
247 }
248
249 /* enable cpu_get_ticks()
250 * Caller must hold BQL which server as mutex for vm_clock_seqlock.
251 */
252 void cpu_enable_ticks(void)
253 {
254 /* Here, the really thing protected by seqlock is cpu_clock_offset. */
255 seqlock_write_lock(&timers_state.vm_clock_seqlock);
256 if (!timers_state.cpu_ticks_enabled) {
257 timers_state.cpu_ticks_offset -= cpu_get_real_ticks();
258 timers_state.cpu_clock_offset -= get_clock();
259 timers_state.cpu_ticks_enabled = 1;
260 }
261 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
262 }
263
264 /* disable cpu_get_ticks() : the clock is stopped. You must not call
265 * cpu_get_ticks() after that.
266 * Caller must hold BQL which server as mutex for vm_clock_seqlock.
267 */
268 void cpu_disable_ticks(void)
269 {
270 /* Here, the really thing protected by seqlock is cpu_clock_offset. */
271 seqlock_write_lock(&timers_state.vm_clock_seqlock);
272 if (timers_state.cpu_ticks_enabled) {
273 timers_state.cpu_ticks_offset += cpu_get_real_ticks();
274 timers_state.cpu_clock_offset = cpu_get_clock_locked();
275 timers_state.cpu_ticks_enabled = 0;
276 }
277 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
278 }
279
280 /* Correlation between real and virtual time is always going to be
281 fairly approximate, so ignore small variation.
282 When the guest is idle real and virtual time will be aligned in
283 the IO wait loop. */
284 #define ICOUNT_WOBBLE (get_ticks_per_sec() / 10)
285
286 static void icount_adjust(void)
287 {
288 int64_t cur_time;
289 int64_t cur_icount;
290 int64_t delta;
291
292 /* Protected by TimersState mutex. */
293 static int64_t last_delta;
294
295 /* If the VM is not running, then do nothing. */
296 if (!runstate_is_running()) {
297 return;
298 }
299
300 seqlock_write_lock(&timers_state.vm_clock_seqlock);
301 cur_time = cpu_get_clock_locked();
302 cur_icount = cpu_get_icount_locked();
303
304 delta = cur_icount - cur_time;
305 /* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */
306 if (delta > 0
307 && last_delta + ICOUNT_WOBBLE < delta * 2
308 && icount_time_shift > 0) {
309 /* The guest is getting too far ahead. Slow time down. */
310 icount_time_shift--;
311 }
312 if (delta < 0
313 && last_delta - ICOUNT_WOBBLE > delta * 2
314 && icount_time_shift < MAX_ICOUNT_SHIFT) {
315 /* The guest is getting too far behind. Speed time up. */
316 icount_time_shift++;
317 }
318 last_delta = delta;
319 timers_state.qemu_icount_bias = cur_icount
320 - (timers_state.qemu_icount << icount_time_shift);
321 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
322 }
323
324 static void icount_adjust_rt(void *opaque)
325 {
326 timer_mod(icount_rt_timer,
327 qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 1000);
328 icount_adjust();
329 }
330
331 static void icount_adjust_vm(void *opaque)
332 {
333 timer_mod(icount_vm_timer,
334 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
335 get_ticks_per_sec() / 10);
336 icount_adjust();
337 }
338
339 static int64_t qemu_icount_round(int64_t count)
340 {
341 return (count + (1 << icount_time_shift) - 1) >> icount_time_shift;
342 }
343
344 static void icount_warp_rt(void *opaque)
345 {
346 /* The icount_warp_timer is rescheduled soon after vm_clock_warp_start
347 * changes from -1 to another value, so the race here is okay.
348 */
349 if (atomic_read(&vm_clock_warp_start) == -1) {
350 return;
351 }
352
353 seqlock_write_lock(&timers_state.vm_clock_seqlock);
354 if (runstate_is_running()) {
355 int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
356 int64_t warp_delta;
357
358 warp_delta = clock - vm_clock_warp_start;
359 if (use_icount == 2) {
360 /*
361 * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too
362 * far ahead of real time.
363 */
364 int64_t cur_time = cpu_get_clock_locked();
365 int64_t cur_icount = cpu_get_icount_locked();
366 int64_t delta = cur_time - cur_icount;
367 warp_delta = MIN(warp_delta, delta);
368 }
369 timers_state.qemu_icount_bias += warp_delta;
370 }
371 vm_clock_warp_start = -1;
372 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
373
374 if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) {
375 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
376 }
377 }
378
379 void qtest_clock_warp(int64_t dest)
380 {
381 int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
382 assert(qtest_enabled());
383 while (clock < dest) {
384 int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
385 int64_t warp = qemu_soonest_timeout(dest - clock, deadline);
386 seqlock_write_lock(&timers_state.vm_clock_seqlock);
387 timers_state.qemu_icount_bias += warp;
388 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
389
390 qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
391 clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
392 }
393 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
394 }
395
396 void qemu_clock_warp(QEMUClockType type)
397 {
398 int64_t clock;
399 int64_t deadline;
400
401 /*
402 * There are too many global variables to make the "warp" behavior
403 * applicable to other clocks. But a clock argument removes the
404 * need for if statements all over the place.
405 */
406 if (type != QEMU_CLOCK_VIRTUAL || !use_icount) {
407 return;
408 }
409
410 /*
411 * If the CPUs have been sleeping, advance QEMU_CLOCK_VIRTUAL timer now.
412 * This ensures that the deadline for the timer is computed correctly below.
413 * This also makes sure that the insn counter is synchronized before the
414 * CPU starts running, in case the CPU is woken by an event other than
415 * the earliest QEMU_CLOCK_VIRTUAL timer.
416 */
417 icount_warp_rt(NULL);
418 timer_del(icount_warp_timer);
419 if (!all_cpu_threads_idle()) {
420 return;
421 }
422
423 if (qtest_enabled()) {
424 /* When testing, qtest commands advance icount. */
425 return;
426 }
427
428 /* We want to use the earliest deadline from ALL vm_clocks */
429 clock = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
430 deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
431 if (deadline < 0) {
432 return;
433 }
434
435 if (deadline > 0) {
436 /*
437 * Ensure QEMU_CLOCK_VIRTUAL proceeds even when the virtual CPU goes to
438 * sleep. Otherwise, the CPU might be waiting for a future timer
439 * interrupt to wake it up, but the interrupt never comes because
440 * the vCPU isn't running any insns and thus doesn't advance the
441 * QEMU_CLOCK_VIRTUAL.
442 *
443 * An extreme solution for this problem would be to never let VCPUs
444 * sleep in icount mode if there is a pending QEMU_CLOCK_VIRTUAL
445 * timer; rather time could just advance to the next QEMU_CLOCK_VIRTUAL
446 * event. Instead, we do stop VCPUs and only advance QEMU_CLOCK_VIRTUAL
447 * after some e"real" time, (related to the time left until the next
448 * event) has passed. The QEMU_CLOCK_REALTIME timer will do this.
449 * This avoids that the warps are visible externally; for example,
450 * you will not be sending network packets continuously instead of
451 * every 100ms.
452 */
453 seqlock_write_lock(&timers_state.vm_clock_seqlock);
454 if (vm_clock_warp_start == -1 || vm_clock_warp_start > clock) {
455 vm_clock_warp_start = clock;
456 }
457 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
458 timer_mod_anticipate(icount_warp_timer, clock + deadline);
459 } else if (deadline == 0) {
460 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
461 }
462 }
463
464 static bool icount_state_needed(void *opaque)
465 {
466 return use_icount;
467 }
468
469 /*
470 * This is a subsection for icount migration.
471 */
472 static const VMStateDescription icount_vmstate_timers = {
473 .name = "timer/icount",
474 .version_id = 1,
475 .minimum_version_id = 1,
476 .fields = (VMStateField[]) {
477 VMSTATE_INT64(qemu_icount_bias, TimersState),
478 VMSTATE_INT64(qemu_icount, TimersState),
479 VMSTATE_END_OF_LIST()
480 }
481 };
482
483 static const VMStateDescription vmstate_timers = {
484 .name = "timer",
485 .version_id = 2,
486 .minimum_version_id = 1,
487 .fields = (VMStateField[]) {
488 VMSTATE_INT64(cpu_ticks_offset, TimersState),
489 VMSTATE_INT64(dummy, TimersState),
490 VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
491 VMSTATE_END_OF_LIST()
492 },
493 .subsections = (VMStateSubsection[]) {
494 {
495 .vmsd = &icount_vmstate_timers,
496 .needed = icount_state_needed,
497 }, {
498 /* empty */
499 }
500 }
501 };
502
503 void cpu_ticks_init(void)
504 {
505 seqlock_init(&timers_state.vm_clock_seqlock, NULL);
506 vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
507 }
508
509 void configure_icount(QemuOpts *opts, Error **errp)
510 {
511 const char *option;
512 char *rem_str = NULL;
513
514 option = qemu_opt_get(opts, "shift");
515 if (!option) {
516 if (qemu_opt_get(opts, "align") != NULL) {
517 error_setg(errp, "Please specify shift option when using align");
518 }
519 return;
520 }
521 icount_align_option = qemu_opt_get_bool(opts, "align", false);
522 icount_warp_timer = timer_new_ns(QEMU_CLOCK_REALTIME,
523 icount_warp_rt, NULL);
524 if (strcmp(option, "auto") != 0) {
525 errno = 0;
526 icount_time_shift = strtol(option, &rem_str, 0);
527 if (errno != 0 || *rem_str != '\0' || !strlen(option)) {
528 error_setg(errp, "icount: Invalid shift value");
529 }
530 use_icount = 1;
531 return;
532 } else if (icount_align_option) {
533 error_setg(errp, "shift=auto and align=on are incompatible");
534 }
535
536 use_icount = 2;
537
538 /* 125MIPS seems a reasonable initial guess at the guest speed.
539 It will be corrected fairly quickly anyway. */
540 icount_time_shift = 3;
541
542 /* Have both realtime and virtual time triggers for speed adjustment.
543 The realtime trigger catches emulated time passing too slowly,
544 the virtual time trigger catches emulated time passing too fast.
545 Realtime triggers occur even when idle, so use them less frequently
546 than VM triggers. */
547 icount_rt_timer = timer_new_ms(QEMU_CLOCK_REALTIME,
548 icount_adjust_rt, NULL);
549 timer_mod(icount_rt_timer,
550 qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 1000);
551 icount_vm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
552 icount_adjust_vm, NULL);
553 timer_mod(icount_vm_timer,
554 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
555 get_ticks_per_sec() / 10);
556 }
557
558 /***********************************************************/
559 void hw_error(const char *fmt, ...)
560 {
561 va_list ap;
562 CPUState *cpu;
563
564 va_start(ap, fmt);
565 fprintf(stderr, "qemu: hardware error: ");
566 vfprintf(stderr, fmt, ap);
567 fprintf(stderr, "\n");
568 CPU_FOREACH(cpu) {
569 fprintf(stderr, "CPU #%d:\n", cpu->cpu_index);
570 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU);
571 }
572 va_end(ap);
573 abort();
574 }
575
576 void cpu_synchronize_all_states(void)
577 {
578 CPUState *cpu;
579
580 CPU_FOREACH(cpu) {
581 cpu_synchronize_state(cpu);
582 }
583 }
584
585 void cpu_synchronize_all_post_reset(void)
586 {
587 CPUState *cpu;
588
589 CPU_FOREACH(cpu) {
590 cpu_synchronize_post_reset(cpu);
591 }
592 }
593
594 void cpu_synchronize_all_post_init(void)
595 {
596 CPUState *cpu;
597
598 CPU_FOREACH(cpu) {
599 cpu_synchronize_post_init(cpu);
600 }
601 }
602
603 void cpu_clean_all_dirty(void)
604 {
605 CPUState *cpu;
606
607 CPU_FOREACH(cpu) {
608 cpu_clean_state(cpu);
609 }
610 }
611
612 static int do_vm_stop(RunState state)
613 {
614 int ret = 0;
615
616 if (runstate_is_running()) {
617 cpu_disable_ticks();
618 pause_all_vcpus();
619 runstate_set(state);
620 vm_state_notify(0, state);
621 qapi_event_send_stop(&error_abort);
622 }
623
624 bdrv_drain_all();
625 ret = bdrv_flush_all();
626
627 return ret;
628 }
629
630 static bool cpu_can_run(CPUState *cpu)
631 {
632 if (cpu->stop) {
633 return false;
634 }
635 if (cpu_is_stopped(cpu)) {
636 return false;
637 }
638 return true;
639 }
640
641 static void cpu_handle_guest_debug(CPUState *cpu)
642 {
643 gdb_set_stop_cpu(cpu);
644 qemu_system_debug_request();
645 cpu->stopped = true;
646 }
647
648 static void cpu_signal(int sig)
649 {
650 if (current_cpu) {
651 cpu_exit(current_cpu);
652 }
653 exit_request = 1;
654 }
655
656 #ifdef CONFIG_LINUX
657 static void sigbus_reraise(void)
658 {
659 sigset_t set;
660 struct sigaction action;
661
662 memset(&action, 0, sizeof(action));
663 action.sa_handler = SIG_DFL;
664 if (!sigaction(SIGBUS, &action, NULL)) {
665 raise(SIGBUS);
666 sigemptyset(&set);
667 sigaddset(&set, SIGBUS);
668 sigprocmask(SIG_UNBLOCK, &set, NULL);
669 }
670 perror("Failed to re-raise SIGBUS!\n");
671 abort();
672 }
673
674 static void sigbus_handler(int n, struct qemu_signalfd_siginfo *siginfo,
675 void *ctx)
676 {
677 if (kvm_on_sigbus(siginfo->ssi_code,
678 (void *)(intptr_t)siginfo->ssi_addr)) {
679 sigbus_reraise();
680 }
681 }
682
683 static void qemu_init_sigbus(void)
684 {
685 struct sigaction action;
686
687 memset(&action, 0, sizeof(action));
688 action.sa_flags = SA_SIGINFO;
689 action.sa_sigaction = (void (*)(int, siginfo_t*, void*))sigbus_handler;
690 sigaction(SIGBUS, &action, NULL);
691
692 prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0);
693 }
694
695 static void qemu_kvm_eat_signals(CPUState *cpu)
696 {
697 struct timespec ts = { 0, 0 };
698 siginfo_t siginfo;
699 sigset_t waitset;
700 sigset_t chkset;
701 int r;
702
703 sigemptyset(&waitset);
704 sigaddset(&waitset, SIG_IPI);
705 sigaddset(&waitset, SIGBUS);
706
707 do {
708 r = sigtimedwait(&waitset, &siginfo, &ts);
709 if (r == -1 && !(errno == EAGAIN || errno == EINTR)) {
710 perror("sigtimedwait");
711 exit(1);
712 }
713
714 switch (r) {
715 case SIGBUS:
716 if (kvm_on_sigbus_vcpu(cpu, siginfo.si_code, siginfo.si_addr)) {
717 sigbus_reraise();
718 }
719 break;
720 default:
721 break;
722 }
723
724 r = sigpending(&chkset);
725 if (r == -1) {
726 perror("sigpending");
727 exit(1);
728 }
729 } while (sigismember(&chkset, SIG_IPI) || sigismember(&chkset, SIGBUS));
730 }
731
732 #else /* !CONFIG_LINUX */
733
734 static void qemu_init_sigbus(void)
735 {
736 }
737
738 static void qemu_kvm_eat_signals(CPUState *cpu)
739 {
740 }
741 #endif /* !CONFIG_LINUX */
742
743 #ifndef _WIN32
744 static void dummy_signal(int sig)
745 {
746 }
747
748 static void qemu_kvm_init_cpu_signals(CPUState *cpu)
749 {
750 int r;
751 sigset_t set;
752 struct sigaction sigact;
753
754 memset(&sigact, 0, sizeof(sigact));
755 sigact.sa_handler = dummy_signal;
756 sigaction(SIG_IPI, &sigact, NULL);
757
758 pthread_sigmask(SIG_BLOCK, NULL, &set);
759 sigdelset(&set, SIG_IPI);
760 sigdelset(&set, SIGBUS);
761 r = kvm_set_signal_mask(cpu, &set);
762 if (r) {
763 fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
764 exit(1);
765 }
766 }
767
768 static void qemu_tcg_init_cpu_signals(void)
769 {
770 sigset_t set;
771 struct sigaction sigact;
772
773 memset(&sigact, 0, sizeof(sigact));
774 sigact.sa_handler = cpu_signal;
775 sigaction(SIG_IPI, &sigact, NULL);
776
777 sigemptyset(&set);
778 sigaddset(&set, SIG_IPI);
779 pthread_sigmask(SIG_UNBLOCK, &set, NULL);
780 }
781
782 #else /* _WIN32 */
783 static void qemu_kvm_init_cpu_signals(CPUState *cpu)
784 {
785 abort();
786 }
787
788 static void qemu_tcg_init_cpu_signals(void)
789 {
790 }
791 #endif /* _WIN32 */
792
793 static QemuMutex qemu_global_mutex;
794 static QemuCond qemu_io_proceeded_cond;
795 static bool iothread_requesting_mutex;
796
797 static QemuThread io_thread;
798
799 static QemuThread *tcg_cpu_thread;
800 static QemuCond *tcg_halt_cond;
801
802 /* cpu creation */
803 static QemuCond qemu_cpu_cond;
804 /* system init */
805 static QemuCond qemu_pause_cond;
806 static QemuCond qemu_work_cond;
807
808 void qemu_init_cpu_loop(void)
809 {
810 qemu_init_sigbus();
811 qemu_cond_init(&qemu_cpu_cond);
812 qemu_cond_init(&qemu_pause_cond);
813 qemu_cond_init(&qemu_work_cond);
814 qemu_cond_init(&qemu_io_proceeded_cond);
815 qemu_mutex_init(&qemu_global_mutex);
816
817 qemu_thread_get_self(&io_thread);
818 }
819
820 void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
821 {
822 struct qemu_work_item wi;
823
824 if (qemu_cpu_is_self(cpu)) {
825 func(data);
826 return;
827 }
828
829 wi.func = func;
830 wi.data = data;
831 wi.free = false;
832 if (cpu->queued_work_first == NULL) {
833 cpu->queued_work_first = &wi;
834 } else {
835 cpu->queued_work_last->next = &wi;
836 }
837 cpu->queued_work_last = &wi;
838 wi.next = NULL;
839 wi.done = false;
840
841 qemu_cpu_kick(cpu);
842 while (!wi.done) {
843 CPUState *self_cpu = current_cpu;
844
845 qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex);
846 current_cpu = self_cpu;
847 }
848 }
849
850 void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
851 {
852 struct qemu_work_item *wi;
853
854 if (qemu_cpu_is_self(cpu)) {
855 func(data);
856 return;
857 }
858
859 wi = g_malloc0(sizeof(struct qemu_work_item));
860 wi->func = func;
861 wi->data = data;
862 wi->free = true;
863 if (cpu->queued_work_first == NULL) {
864 cpu->queued_work_first = wi;
865 } else {
866 cpu->queued_work_last->next = wi;
867 }
868 cpu->queued_work_last = wi;
869 wi->next = NULL;
870 wi->done = false;
871
872 qemu_cpu_kick(cpu);
873 }
874
875 static void flush_queued_work(CPUState *cpu)
876 {
877 struct qemu_work_item *wi;
878
879 if (cpu->queued_work_first == NULL) {
880 return;
881 }
882
883 while ((wi = cpu->queued_work_first)) {
884 cpu->queued_work_first = wi->next;
885 wi->func(wi->data);
886 wi->done = true;
887 if (wi->free) {
888 g_free(wi);
889 }
890 }
891 cpu->queued_work_last = NULL;
892 qemu_cond_broadcast(&qemu_work_cond);
893 }
894
895 static void qemu_wait_io_event_common(CPUState *cpu)
896 {
897 if (cpu->stop) {
898 cpu->stop = false;
899 cpu->stopped = true;
900 qemu_cond_signal(&qemu_pause_cond);
901 }
902 flush_queued_work(cpu);
903 cpu->thread_kicked = false;
904 }
905
906 static void qemu_tcg_wait_io_event(void)
907 {
908 CPUState *cpu;
909
910 while (all_cpu_threads_idle()) {
911 /* Start accounting real time to the virtual clock if the CPUs
912 are idle. */
913 qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
914 qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
915 }
916
917 while (iothread_requesting_mutex) {
918 qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex);
919 }
920
921 CPU_FOREACH(cpu) {
922 qemu_wait_io_event_common(cpu);
923 }
924 }
925
926 static void qemu_kvm_wait_io_event(CPUState *cpu)
927 {
928 while (cpu_thread_is_idle(cpu)) {
929 qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
930 }
931
932 qemu_kvm_eat_signals(cpu);
933 qemu_wait_io_event_common(cpu);
934 }
935
936 static void *qemu_kvm_cpu_thread_fn(void *arg)
937 {
938 CPUState *cpu = arg;
939 int r;
940
941 qemu_mutex_lock(&qemu_global_mutex);
942 qemu_thread_get_self(cpu->thread);
943 cpu->thread_id = qemu_get_thread_id();
944 cpu->exception_index = -1;
945 cpu->can_do_io = 1;
946 current_cpu = cpu;
947
948 r = kvm_init_vcpu(cpu);
949 if (r < 0) {
950 fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r));
951 exit(1);
952 }
953
954 qemu_kvm_init_cpu_signals(cpu);
955
956 /* signal CPU creation */
957 cpu->created = true;
958 qemu_cond_signal(&qemu_cpu_cond);
959
960 while (1) {
961 if (cpu_can_run(cpu)) {
962 r = kvm_cpu_exec(cpu);
963 if (r == EXCP_DEBUG) {
964 cpu_handle_guest_debug(cpu);
965 }
966 }
967 qemu_kvm_wait_io_event(cpu);
968 }
969
970 return NULL;
971 }
972
973 static void *qemu_dummy_cpu_thread_fn(void *arg)
974 {
975 #ifdef _WIN32
976 fprintf(stderr, "qtest is not supported under Windows\n");
977 exit(1);
978 #else
979 CPUState *cpu = arg;
980 sigset_t waitset;
981 int r;
982
983 qemu_mutex_lock_iothread();
984 qemu_thread_get_self(cpu->thread);
985 cpu->thread_id = qemu_get_thread_id();
986 cpu->exception_index = -1;
987 cpu->can_do_io = 1;
988
989 sigemptyset(&waitset);
990 sigaddset(&waitset, SIG_IPI);
991
992 /* signal CPU creation */
993 cpu->created = true;
994 qemu_cond_signal(&qemu_cpu_cond);
995
996 current_cpu = cpu;
997 while (1) {
998 current_cpu = NULL;
999 qemu_mutex_unlock_iothread();
1000 do {
1001 int sig;
1002 r = sigwait(&waitset, &sig);
1003 } while (r == -1 && (errno == EAGAIN || errno == EINTR));
1004 if (r == -1) {
1005 perror("sigwait");
1006 exit(1);
1007 }
1008 qemu_mutex_lock_iothread();
1009 current_cpu = cpu;
1010 qemu_wait_io_event_common(cpu);
1011 }
1012
1013 return NULL;
1014 #endif
1015 }
1016
1017 static void tcg_exec_all(void);
1018
1019 static void *qemu_tcg_cpu_thread_fn(void *arg)
1020 {
1021 CPUState *cpu = arg;
1022
1023 qemu_tcg_init_cpu_signals();
1024 qemu_thread_get_self(cpu->thread);
1025
1026 qemu_mutex_lock(&qemu_global_mutex);
1027 CPU_FOREACH(cpu) {
1028 cpu->thread_id = qemu_get_thread_id();
1029 cpu->created = true;
1030 cpu->exception_index = -1;
1031 cpu->can_do_io = 1;
1032 }
1033 qemu_cond_signal(&qemu_cpu_cond);
1034
1035 /* wait for initial kick-off after machine start */
1036 while (QTAILQ_FIRST(&cpus)->stopped) {
1037 qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
1038
1039 /* process any pending work */
1040 CPU_FOREACH(cpu) {
1041 qemu_wait_io_event_common(cpu);
1042 }
1043 }
1044
1045 while (1) {
1046 tcg_exec_all();
1047
1048 if (use_icount) {
1049 int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
1050
1051 if (deadline == 0) {
1052 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
1053 }
1054 }
1055 qemu_tcg_wait_io_event();
1056 }
1057
1058 return NULL;
1059 }
1060
1061 static void qemu_cpu_kick_thread(CPUState *cpu)
1062 {
1063 #ifndef _WIN32
1064 int err;
1065
1066 err = pthread_kill(cpu->thread->thread, SIG_IPI);
1067 if (err) {
1068 fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
1069 exit(1);
1070 }
1071 #else /* _WIN32 */
1072 if (!qemu_cpu_is_self(cpu)) {
1073 CONTEXT tcgContext;
1074
1075 if (SuspendThread(cpu->hThread) == (DWORD)-1) {
1076 fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
1077 GetLastError());
1078 exit(1);
1079 }
1080
1081 /* On multi-core systems, we are not sure that the thread is actually
1082 * suspended until we can get the context.
1083 */
1084 tcgContext.ContextFlags = CONTEXT_CONTROL;
1085 while (GetThreadContext(cpu->hThread, &tcgContext) != 0) {
1086 continue;
1087 }
1088
1089 cpu_signal(0);
1090
1091 if (ResumeThread(cpu->hThread) == (DWORD)-1) {
1092 fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
1093 GetLastError());
1094 exit(1);
1095 }
1096 }
1097 #endif
1098 }
1099
1100 void qemu_cpu_kick(CPUState *cpu)
1101 {
1102 qemu_cond_broadcast(cpu->halt_cond);
1103 if (!tcg_enabled() && !cpu->thread_kicked) {
1104 qemu_cpu_kick_thread(cpu);
1105 cpu->thread_kicked = true;
1106 }
1107 }
1108
1109 void qemu_cpu_kick_self(void)
1110 {
1111 #ifndef _WIN32
1112 assert(current_cpu);
1113
1114 if (!current_cpu->thread_kicked) {
1115 qemu_cpu_kick_thread(current_cpu);
1116 current_cpu->thread_kicked = true;
1117 }
1118 #else
1119 abort();
1120 #endif
1121 }
1122
1123 bool qemu_cpu_is_self(CPUState *cpu)
1124 {
1125 return qemu_thread_is_self(cpu->thread);
1126 }
1127
1128 static bool qemu_in_vcpu_thread(void)
1129 {
1130 return current_cpu && qemu_cpu_is_self(current_cpu);
1131 }
1132
1133 void qemu_mutex_lock_iothread(void)
1134 {
1135 if (!tcg_enabled()) {
1136 qemu_mutex_lock(&qemu_global_mutex);
1137 } else {
1138 iothread_requesting_mutex = true;
1139 if (qemu_mutex_trylock(&qemu_global_mutex)) {
1140 qemu_cpu_kick_thread(first_cpu);
1141 qemu_mutex_lock(&qemu_global_mutex);
1142 }
1143 iothread_requesting_mutex = false;
1144 qemu_cond_broadcast(&qemu_io_proceeded_cond);
1145 }
1146 }
1147
1148 void qemu_mutex_unlock_iothread(void)
1149 {
1150 qemu_mutex_unlock(&qemu_global_mutex);
1151 }
1152
1153 static int all_vcpus_paused(void)
1154 {
1155 CPUState *cpu;
1156
1157 CPU_FOREACH(cpu) {
1158 if (!cpu->stopped) {
1159 return 0;
1160 }
1161 }
1162
1163 return 1;
1164 }
1165
1166 void pause_all_vcpus(void)
1167 {
1168 CPUState *cpu;
1169
1170 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, false);
1171 CPU_FOREACH(cpu) {
1172 cpu->stop = true;
1173 qemu_cpu_kick(cpu);
1174 }
1175
1176 if (qemu_in_vcpu_thread()) {
1177 cpu_stop_current();
1178 if (!kvm_enabled()) {
1179 CPU_FOREACH(cpu) {
1180 cpu->stop = false;
1181 cpu->stopped = true;
1182 }
1183 return;
1184 }
1185 }
1186
1187 while (!all_vcpus_paused()) {
1188 qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
1189 CPU_FOREACH(cpu) {
1190 qemu_cpu_kick(cpu);
1191 }
1192 }
1193 }
1194
1195 void cpu_resume(CPUState *cpu)
1196 {
1197 cpu->stop = false;
1198 cpu->stopped = false;
1199 qemu_cpu_kick(cpu);
1200 }
1201
1202 void resume_all_vcpus(void)
1203 {
1204 CPUState *cpu;
1205
1206 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true);
1207 CPU_FOREACH(cpu) {
1208 cpu_resume(cpu);
1209 }
1210 }
1211
1212 /* For temporary buffers for forming a name */
1213 #define VCPU_THREAD_NAME_SIZE 16
1214
1215 static void qemu_tcg_init_vcpu(CPUState *cpu)
1216 {
1217 char thread_name[VCPU_THREAD_NAME_SIZE];
1218
1219 tcg_cpu_address_space_init(cpu, cpu->as);
1220
1221 /* share a single thread for all cpus with TCG */
1222 if (!tcg_cpu_thread) {
1223 cpu->thread = g_malloc0(sizeof(QemuThread));
1224 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1225 qemu_cond_init(cpu->halt_cond);
1226 tcg_halt_cond = cpu->halt_cond;
1227 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/TCG",
1228 cpu->cpu_index);
1229 qemu_thread_create(cpu->thread, thread_name, qemu_tcg_cpu_thread_fn,
1230 cpu, QEMU_THREAD_JOINABLE);
1231 #ifdef _WIN32
1232 cpu->hThread = qemu_thread_get_handle(cpu->thread);
1233 #endif
1234 while (!cpu->created) {
1235 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1236 }
1237 tcg_cpu_thread = cpu->thread;
1238 } else {
1239 cpu->thread = tcg_cpu_thread;
1240 cpu->halt_cond = tcg_halt_cond;
1241 }
1242 }
1243
1244 static void qemu_kvm_start_vcpu(CPUState *cpu)
1245 {
1246 char thread_name[VCPU_THREAD_NAME_SIZE];
1247
1248 cpu->thread = g_malloc0(sizeof(QemuThread));
1249 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1250 qemu_cond_init(cpu->halt_cond);
1251 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/KVM",
1252 cpu->cpu_index);
1253 qemu_thread_create(cpu->thread, thread_name, qemu_kvm_cpu_thread_fn,
1254 cpu, QEMU_THREAD_JOINABLE);
1255 while (!cpu->created) {
1256 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1257 }
1258 }
1259
1260 static void qemu_dummy_start_vcpu(CPUState *cpu)
1261 {
1262 char thread_name[VCPU_THREAD_NAME_SIZE];
1263
1264 cpu->thread = g_malloc0(sizeof(QemuThread));
1265 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1266 qemu_cond_init(cpu->halt_cond);
1267 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/DUMMY",
1268 cpu->cpu_index);
1269 qemu_thread_create(cpu->thread, thread_name, qemu_dummy_cpu_thread_fn, cpu,
1270 QEMU_THREAD_JOINABLE);
1271 while (!cpu->created) {
1272 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1273 }
1274 }
1275
1276 void qemu_init_vcpu(CPUState *cpu)
1277 {
1278 cpu->nr_cores = smp_cores;
1279 cpu->nr_threads = smp_threads;
1280 cpu->stopped = true;
1281 if (kvm_enabled()) {
1282 qemu_kvm_start_vcpu(cpu);
1283 } else if (tcg_enabled()) {
1284 qemu_tcg_init_vcpu(cpu);
1285 } else {
1286 qemu_dummy_start_vcpu(cpu);
1287 }
1288 }
1289
1290 void cpu_stop_current(void)
1291 {
1292 if (current_cpu) {
1293 current_cpu->stop = false;
1294 current_cpu->stopped = true;
1295 cpu_exit(current_cpu);
1296 qemu_cond_signal(&qemu_pause_cond);
1297 }
1298 }
1299
1300 int vm_stop(RunState state)
1301 {
1302 if (qemu_in_vcpu_thread()) {
1303 qemu_system_vmstop_request_prepare();
1304 qemu_system_vmstop_request(state);
1305 /*
1306 * FIXME: should not return to device code in case
1307 * vm_stop() has been requested.
1308 */
1309 cpu_stop_current();
1310 return 0;
1311 }
1312
1313 return do_vm_stop(state);
1314 }
1315
1316 /* does a state transition even if the VM is already stopped,
1317 current state is forgotten forever */
1318 int vm_stop_force_state(RunState state)
1319 {
1320 if (runstate_is_running()) {
1321 return vm_stop(state);
1322 } else {
1323 runstate_set(state);
1324 /* Make sure to return an error if the flush in a previous vm_stop()
1325 * failed. */
1326 return bdrv_flush_all();
1327 }
1328 }
1329
1330 static int tcg_cpu_exec(CPUArchState *env)
1331 {
1332 CPUState *cpu = ENV_GET_CPU(env);
1333 int ret;
1334 #ifdef CONFIG_PROFILER
1335 int64_t ti;
1336 #endif
1337
1338 #ifdef CONFIG_PROFILER
1339 ti = profile_getclock();
1340 #endif
1341 if (use_icount) {
1342 int64_t count;
1343 int64_t deadline;
1344 int decr;
1345 timers_state.qemu_icount -= (cpu->icount_decr.u16.low
1346 + cpu->icount_extra);
1347 cpu->icount_decr.u16.low = 0;
1348 cpu->icount_extra = 0;
1349 deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
1350
1351 /* Maintain prior (possibly buggy) behaviour where if no deadline
1352 * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
1353 * INT32_MAX nanoseconds ahead, we still use INT32_MAX
1354 * nanoseconds.
1355 */
1356 if ((deadline < 0) || (deadline > INT32_MAX)) {
1357 deadline = INT32_MAX;
1358 }
1359
1360 count = qemu_icount_round(deadline);
1361 timers_state.qemu_icount += count;
1362 decr = (count > 0xffff) ? 0xffff : count;
1363 count -= decr;
1364 cpu->icount_decr.u16.low = decr;
1365 cpu->icount_extra = count;
1366 }
1367 ret = cpu_exec(env);
1368 #ifdef CONFIG_PROFILER
1369 qemu_time += profile_getclock() - ti;
1370 #endif
1371 if (use_icount) {
1372 /* Fold pending instructions back into the
1373 instruction counter, and clear the interrupt flag. */
1374 timers_state.qemu_icount -= (cpu->icount_decr.u16.low
1375 + cpu->icount_extra);
1376 cpu->icount_decr.u32 = 0;
1377 cpu->icount_extra = 0;
1378 }
1379 return ret;
1380 }
1381
1382 static void tcg_exec_all(void)
1383 {
1384 int r;
1385
1386 /* Account partial waits to QEMU_CLOCK_VIRTUAL. */
1387 qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
1388
1389 if (next_cpu == NULL) {
1390 next_cpu = first_cpu;
1391 }
1392 for (; next_cpu != NULL && !exit_request; next_cpu = CPU_NEXT(next_cpu)) {
1393 CPUState *cpu = next_cpu;
1394 CPUArchState *env = cpu->env_ptr;
1395
1396 qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
1397 (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
1398
1399 if (cpu_can_run(cpu)) {
1400 r = tcg_cpu_exec(env);
1401 if (r == EXCP_DEBUG) {
1402 cpu_handle_guest_debug(cpu);
1403 break;
1404 }
1405 } else if (cpu->stop || cpu->stopped) {
1406 break;
1407 }
1408 }
1409 exit_request = 0;
1410 }
1411
1412 void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
1413 {
1414 /* XXX: implement xxx_cpu_list for targets that still miss it */
1415 #if defined(cpu_list)
1416 cpu_list(f, cpu_fprintf);
1417 #endif
1418 }
1419
1420 CpuInfoList *qmp_query_cpus(Error **errp)
1421 {
1422 CpuInfoList *head = NULL, *cur_item = NULL;
1423 CPUState *cpu;
1424
1425 CPU_FOREACH(cpu) {
1426 CpuInfoList *info;
1427 #if defined(TARGET_I386)
1428 X86CPU *x86_cpu = X86_CPU(cpu);
1429 CPUX86State *env = &x86_cpu->env;
1430 #elif defined(TARGET_PPC)
1431 PowerPCCPU *ppc_cpu = POWERPC_CPU(cpu);
1432 CPUPPCState *env = &ppc_cpu->env;
1433 #elif defined(TARGET_SPARC)
1434 SPARCCPU *sparc_cpu = SPARC_CPU(cpu);
1435 CPUSPARCState *env = &sparc_cpu->env;
1436 #elif defined(TARGET_MIPS)
1437 MIPSCPU *mips_cpu = MIPS_CPU(cpu);
1438 CPUMIPSState *env = &mips_cpu->env;
1439 #elif defined(TARGET_TRICORE)
1440 TriCoreCPU *tricore_cpu = TRICORE_CPU(cpu);
1441 CPUTriCoreState *env = &tricore_cpu->env;
1442 #endif
1443
1444 cpu_synchronize_state(cpu);
1445
1446 info = g_malloc0(sizeof(*info));
1447 info->value = g_malloc0(sizeof(*info->value));
1448 info->value->CPU = cpu->cpu_index;
1449 info->value->current = (cpu == first_cpu);
1450 info->value->halted = cpu->halted;
1451 info->value->thread_id = cpu->thread_id;
1452 #if defined(TARGET_I386)
1453 info->value->has_pc = true;
1454 info->value->pc = env->eip + env->segs[R_CS].base;
1455 #elif defined(TARGET_PPC)
1456 info->value->has_nip = true;
1457 info->value->nip = env->nip;
1458 #elif defined(TARGET_SPARC)
1459 info->value->has_pc = true;
1460 info->value->pc = env->pc;
1461 info->value->has_npc = true;
1462 info->value->npc = env->npc;
1463 #elif defined(TARGET_MIPS)
1464 info->value->has_PC = true;
1465 info->value->PC = env->active_tc.PC;
1466 #elif defined(TARGET_TRICORE)
1467 info->value->has_PC = true;
1468 info->value->PC = env->PC;
1469 #endif
1470
1471 /* XXX: waiting for the qapi to support GSList */
1472 if (!cur_item) {
1473 head = cur_item = info;
1474 } else {
1475 cur_item->next = info;
1476 cur_item = info;
1477 }
1478 }
1479
1480 return head;
1481 }
1482
1483 void qmp_memsave(int64_t addr, int64_t size, const char *filename,
1484 bool has_cpu, int64_t cpu_index, Error **errp)
1485 {
1486 FILE *f;
1487 uint32_t l;
1488 CPUState *cpu;
1489 uint8_t buf[1024];
1490
1491 if (!has_cpu) {
1492 cpu_index = 0;
1493 }
1494
1495 cpu = qemu_get_cpu(cpu_index);
1496 if (cpu == NULL) {
1497 error_set(errp, QERR_INVALID_PARAMETER_VALUE, "cpu-index",
1498 "a CPU number");
1499 return;
1500 }
1501
1502 f = fopen(filename, "wb");
1503 if (!f) {
1504 error_setg_file_open(errp, errno, filename);
1505 return;
1506 }
1507
1508 while (size != 0) {
1509 l = sizeof(buf);
1510 if (l > size)
1511 l = size;
1512 if (cpu_memory_rw_debug(cpu, addr, buf, l, 0) != 0) {
1513 error_setg(errp, "Invalid addr 0x%016" PRIx64 "specified", addr);
1514 goto exit;
1515 }
1516 if (fwrite(buf, 1, l, f) != l) {
1517 error_set(errp, QERR_IO_ERROR);
1518 goto exit;
1519 }
1520 addr += l;
1521 size -= l;
1522 }
1523
1524 exit:
1525 fclose(f);
1526 }
1527
1528 void qmp_pmemsave(int64_t addr, int64_t size, const char *filename,
1529 Error **errp)
1530 {
1531 FILE *f;
1532 uint32_t l;
1533 uint8_t buf[1024];
1534
1535 f = fopen(filename, "wb");
1536 if (!f) {
1537 error_setg_file_open(errp, errno, filename);
1538 return;
1539 }
1540
1541 while (size != 0) {
1542 l = sizeof(buf);
1543 if (l > size)
1544 l = size;
1545 cpu_physical_memory_read(addr, buf, l);
1546 if (fwrite(buf, 1, l, f) != l) {
1547 error_set(errp, QERR_IO_ERROR);
1548 goto exit;
1549 }
1550 addr += l;
1551 size -= l;
1552 }
1553
1554 exit:
1555 fclose(f);
1556 }
1557
1558 void qmp_inject_nmi(Error **errp)
1559 {
1560 #if defined(TARGET_I386)
1561 CPUState *cs;
1562
1563 CPU_FOREACH(cs) {
1564 X86CPU *cpu = X86_CPU(cs);
1565
1566 if (!cpu->apic_state) {
1567 cpu_interrupt(cs, CPU_INTERRUPT_NMI);
1568 } else {
1569 apic_deliver_nmi(cpu->apic_state);
1570 }
1571 }
1572 #else
1573 nmi_monitor_handle(monitor_get_cpu_index(), errp);
1574 #endif
1575 }
1576
1577 void dump_drift_info(FILE *f, fprintf_function cpu_fprintf)
1578 {
1579 if (!use_icount) {
1580 return;
1581 }
1582
1583 cpu_fprintf(f, "Host - Guest clock %"PRIi64" ms\n",
1584 (cpu_get_clock() - cpu_get_icount())/SCALE_MS);
1585 if (icount_align_option) {
1586 cpu_fprintf(f, "Max guest delay %"PRIi64" ms\n", -max_delay/SCALE_MS);
1587 cpu_fprintf(f, "Max guest advance %"PRIi64" ms\n", max_advance/SCALE_MS);
1588 } else {
1589 cpu_fprintf(f, "Max guest delay NA\n");
1590 cpu_fprintf(f, "Max guest advance NA\n");
1591 }
1592 }