]> git.proxmox.com Git - mirror_qemu.git/blob - cpus.c
sPAPR: Introduce rtas_ldq()
[mirror_qemu.git] / cpus.c
1 /*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 /* Needed early for CONFIG_BSD etc. */
26 #include "config-host.h"
27
28 #include "monitor/monitor.h"
29 #include "qapi/qmp/qerror.h"
30 #include "qemu/error-report.h"
31 #include "sysemu/sysemu.h"
32 #include "exec/gdbstub.h"
33 #include "sysemu/dma.h"
34 #include "sysemu/kvm.h"
35 #include "qmp-commands.h"
36
37 #include "qemu/thread.h"
38 #include "sysemu/cpus.h"
39 #include "sysemu/qtest.h"
40 #include "qemu/main-loop.h"
41 #include "qemu/bitmap.h"
42 #include "qemu/seqlock.h"
43 #include "qapi-event.h"
44 #include "hw/nmi.h"
45
46 #ifndef _WIN32
47 #include "qemu/compatfd.h"
48 #endif
49
50 #ifdef CONFIG_LINUX
51
52 #include <sys/prctl.h>
53
54 #ifndef PR_MCE_KILL
55 #define PR_MCE_KILL 33
56 #endif
57
58 #ifndef PR_MCE_KILL_SET
59 #define PR_MCE_KILL_SET 1
60 #endif
61
62 #ifndef PR_MCE_KILL_EARLY
63 #define PR_MCE_KILL_EARLY 1
64 #endif
65
66 #endif /* CONFIG_LINUX */
67
68 static CPUState *next_cpu;
69 int64_t max_delay;
70 int64_t max_advance;
71
72 bool cpu_is_stopped(CPUState *cpu)
73 {
74 return cpu->stopped || !runstate_is_running();
75 }
76
77 static bool cpu_thread_is_idle(CPUState *cpu)
78 {
79 if (cpu->stop || cpu->queued_work_first) {
80 return false;
81 }
82 if (cpu_is_stopped(cpu)) {
83 return true;
84 }
85 if (!cpu->halted || cpu_has_work(cpu) ||
86 kvm_halt_in_kernel()) {
87 return false;
88 }
89 return true;
90 }
91
92 static bool all_cpu_threads_idle(void)
93 {
94 CPUState *cpu;
95
96 CPU_FOREACH(cpu) {
97 if (!cpu_thread_is_idle(cpu)) {
98 return false;
99 }
100 }
101 return true;
102 }
103
104 /***********************************************************/
105 /* guest cycle counter */
106
107 /* Protected by TimersState seqlock */
108
109 static bool icount_sleep = true;
110 static int64_t vm_clock_warp_start = -1;
111 /* Conversion factor from emulated instructions to virtual clock ticks. */
112 static int icount_time_shift;
113 /* Arbitrarily pick 1MIPS as the minimum allowable speed. */
114 #define MAX_ICOUNT_SHIFT 10
115
116 static QEMUTimer *icount_rt_timer;
117 static QEMUTimer *icount_vm_timer;
118 static QEMUTimer *icount_warp_timer;
119
120 typedef struct TimersState {
121 /* Protected by BQL. */
122 int64_t cpu_ticks_prev;
123 int64_t cpu_ticks_offset;
124
125 /* cpu_clock_offset can be read out of BQL, so protect it with
126 * this lock.
127 */
128 QemuSeqLock vm_clock_seqlock;
129 int64_t cpu_clock_offset;
130 int32_t cpu_ticks_enabled;
131 int64_t dummy;
132
133 /* Compensate for varying guest execution speed. */
134 int64_t qemu_icount_bias;
135 /* Only written by TCG thread */
136 int64_t qemu_icount;
137 } TimersState;
138
139 static TimersState timers_state;
140
141 int64_t cpu_get_icount_raw(void)
142 {
143 int64_t icount;
144 CPUState *cpu = current_cpu;
145
146 icount = timers_state.qemu_icount;
147 if (cpu) {
148 if (!cpu->can_do_io) {
149 fprintf(stderr, "Bad icount read\n");
150 exit(1);
151 }
152 icount -= (cpu->icount_decr.u16.low + cpu->icount_extra);
153 }
154 return icount;
155 }
156
157 /* Return the virtual CPU time, based on the instruction counter. */
158 static int64_t cpu_get_icount_locked(void)
159 {
160 int64_t icount = cpu_get_icount_raw();
161 return timers_state.qemu_icount_bias + cpu_icount_to_ns(icount);
162 }
163
164 int64_t cpu_get_icount(void)
165 {
166 int64_t icount;
167 unsigned start;
168
169 do {
170 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
171 icount = cpu_get_icount_locked();
172 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
173
174 return icount;
175 }
176
177 int64_t cpu_icount_to_ns(int64_t icount)
178 {
179 return icount << icount_time_shift;
180 }
181
182 /* return the host CPU cycle counter and handle stop/restart */
183 /* Caller must hold the BQL */
184 int64_t cpu_get_ticks(void)
185 {
186 int64_t ticks;
187
188 if (use_icount) {
189 return cpu_get_icount();
190 }
191
192 ticks = timers_state.cpu_ticks_offset;
193 if (timers_state.cpu_ticks_enabled) {
194 ticks += cpu_get_real_ticks();
195 }
196
197 if (timers_state.cpu_ticks_prev > ticks) {
198 /* Note: non increasing ticks may happen if the host uses
199 software suspend */
200 timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
201 ticks = timers_state.cpu_ticks_prev;
202 }
203
204 timers_state.cpu_ticks_prev = ticks;
205 return ticks;
206 }
207
208 static int64_t cpu_get_clock_locked(void)
209 {
210 int64_t ticks;
211
212 ticks = timers_state.cpu_clock_offset;
213 if (timers_state.cpu_ticks_enabled) {
214 ticks += get_clock();
215 }
216
217 return ticks;
218 }
219
220 /* return the host CPU monotonic timer and handle stop/restart */
221 int64_t cpu_get_clock(void)
222 {
223 int64_t ti;
224 unsigned start;
225
226 do {
227 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
228 ti = cpu_get_clock_locked();
229 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
230
231 return ti;
232 }
233
234 /* enable cpu_get_ticks()
235 * Caller must hold BQL which server as mutex for vm_clock_seqlock.
236 */
237 void cpu_enable_ticks(void)
238 {
239 /* Here, the really thing protected by seqlock is cpu_clock_offset. */
240 seqlock_write_lock(&timers_state.vm_clock_seqlock);
241 if (!timers_state.cpu_ticks_enabled) {
242 timers_state.cpu_ticks_offset -= cpu_get_real_ticks();
243 timers_state.cpu_clock_offset -= get_clock();
244 timers_state.cpu_ticks_enabled = 1;
245 }
246 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
247 }
248
249 /* disable cpu_get_ticks() : the clock is stopped. You must not call
250 * cpu_get_ticks() after that.
251 * Caller must hold BQL which server as mutex for vm_clock_seqlock.
252 */
253 void cpu_disable_ticks(void)
254 {
255 /* Here, the really thing protected by seqlock is cpu_clock_offset. */
256 seqlock_write_lock(&timers_state.vm_clock_seqlock);
257 if (timers_state.cpu_ticks_enabled) {
258 timers_state.cpu_ticks_offset += cpu_get_real_ticks();
259 timers_state.cpu_clock_offset = cpu_get_clock_locked();
260 timers_state.cpu_ticks_enabled = 0;
261 }
262 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
263 }
264
265 /* Correlation between real and virtual time is always going to be
266 fairly approximate, so ignore small variation.
267 When the guest is idle real and virtual time will be aligned in
268 the IO wait loop. */
269 #define ICOUNT_WOBBLE (get_ticks_per_sec() / 10)
270
271 static void icount_adjust(void)
272 {
273 int64_t cur_time;
274 int64_t cur_icount;
275 int64_t delta;
276
277 /* Protected by TimersState mutex. */
278 static int64_t last_delta;
279
280 /* If the VM is not running, then do nothing. */
281 if (!runstate_is_running()) {
282 return;
283 }
284
285 seqlock_write_lock(&timers_state.vm_clock_seqlock);
286 cur_time = cpu_get_clock_locked();
287 cur_icount = cpu_get_icount_locked();
288
289 delta = cur_icount - cur_time;
290 /* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */
291 if (delta > 0
292 && last_delta + ICOUNT_WOBBLE < delta * 2
293 && icount_time_shift > 0) {
294 /* The guest is getting too far ahead. Slow time down. */
295 icount_time_shift--;
296 }
297 if (delta < 0
298 && last_delta - ICOUNT_WOBBLE > delta * 2
299 && icount_time_shift < MAX_ICOUNT_SHIFT) {
300 /* The guest is getting too far behind. Speed time up. */
301 icount_time_shift++;
302 }
303 last_delta = delta;
304 timers_state.qemu_icount_bias = cur_icount
305 - (timers_state.qemu_icount << icount_time_shift);
306 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
307 }
308
309 static void icount_adjust_rt(void *opaque)
310 {
311 timer_mod(icount_rt_timer,
312 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
313 icount_adjust();
314 }
315
316 static void icount_adjust_vm(void *opaque)
317 {
318 timer_mod(icount_vm_timer,
319 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
320 get_ticks_per_sec() / 10);
321 icount_adjust();
322 }
323
324 static int64_t qemu_icount_round(int64_t count)
325 {
326 return (count + (1 << icount_time_shift) - 1) >> icount_time_shift;
327 }
328
329 static void icount_warp_rt(void *opaque)
330 {
331 /* The icount_warp_timer is rescheduled soon after vm_clock_warp_start
332 * changes from -1 to another value, so the race here is okay.
333 */
334 if (atomic_read(&vm_clock_warp_start) == -1) {
335 return;
336 }
337
338 seqlock_write_lock(&timers_state.vm_clock_seqlock);
339 if (runstate_is_running()) {
340 int64_t clock = cpu_get_clock_locked();
341 int64_t warp_delta;
342
343 warp_delta = clock - vm_clock_warp_start;
344 if (use_icount == 2) {
345 /*
346 * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too
347 * far ahead of real time.
348 */
349 int64_t cur_icount = cpu_get_icount_locked();
350 int64_t delta = clock - cur_icount;
351 warp_delta = MIN(warp_delta, delta);
352 }
353 timers_state.qemu_icount_bias += warp_delta;
354 }
355 vm_clock_warp_start = -1;
356 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
357
358 if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) {
359 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
360 }
361 }
362
363 void qtest_clock_warp(int64_t dest)
364 {
365 int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
366 AioContext *aio_context;
367 assert(qtest_enabled());
368 aio_context = qemu_get_aio_context();
369 while (clock < dest) {
370 int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
371 int64_t warp = qemu_soonest_timeout(dest - clock, deadline);
372
373 seqlock_write_lock(&timers_state.vm_clock_seqlock);
374 timers_state.qemu_icount_bias += warp;
375 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
376
377 qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
378 timerlist_run_timers(aio_context->tlg.tl[QEMU_CLOCK_VIRTUAL]);
379 clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
380 }
381 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
382 }
383
384 void qemu_clock_warp(QEMUClockType type)
385 {
386 int64_t clock;
387 int64_t deadline;
388
389 /*
390 * There are too many global variables to make the "warp" behavior
391 * applicable to other clocks. But a clock argument removes the
392 * need for if statements all over the place.
393 */
394 if (type != QEMU_CLOCK_VIRTUAL || !use_icount) {
395 return;
396 }
397
398 if (icount_sleep) {
399 /*
400 * If the CPUs have been sleeping, advance QEMU_CLOCK_VIRTUAL timer now.
401 * This ensures that the deadline for the timer is computed correctly
402 * below.
403 * This also makes sure that the insn counter is synchronized before
404 * the CPU starts running, in case the CPU is woken by an event other
405 * than the earliest QEMU_CLOCK_VIRTUAL timer.
406 */
407 icount_warp_rt(NULL);
408 timer_del(icount_warp_timer);
409 }
410 if (!all_cpu_threads_idle()) {
411 return;
412 }
413
414 if (qtest_enabled()) {
415 /* When testing, qtest commands advance icount. */
416 return;
417 }
418
419 /* We want to use the earliest deadline from ALL vm_clocks */
420 clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
421 deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
422 if (deadline < 0) {
423 static bool notified;
424 if (!icount_sleep && !notified) {
425 error_report("WARNING: icount sleep disabled and no active timers");
426 notified = true;
427 }
428 return;
429 }
430
431 if (deadline > 0) {
432 /*
433 * Ensure QEMU_CLOCK_VIRTUAL proceeds even when the virtual CPU goes to
434 * sleep. Otherwise, the CPU might be waiting for a future timer
435 * interrupt to wake it up, but the interrupt never comes because
436 * the vCPU isn't running any insns and thus doesn't advance the
437 * QEMU_CLOCK_VIRTUAL.
438 */
439 if (!icount_sleep) {
440 /*
441 * We never let VCPUs sleep in no sleep icount mode.
442 * If there is a pending QEMU_CLOCK_VIRTUAL timer we just advance
443 * to the next QEMU_CLOCK_VIRTUAL event and notify it.
444 * It is useful when we want a deterministic execution time,
445 * isolated from host latencies.
446 */
447 seqlock_write_lock(&timers_state.vm_clock_seqlock);
448 timers_state.qemu_icount_bias += deadline;
449 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
450 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
451 } else {
452 /*
453 * We do stop VCPUs and only advance QEMU_CLOCK_VIRTUAL after some
454 * "real" time, (related to the time left until the next event) has
455 * passed. The QEMU_CLOCK_VIRTUAL_RT clock will do this.
456 * This avoids that the warps are visible externally; for example,
457 * you will not be sending network packets continuously instead of
458 * every 100ms.
459 */
460 seqlock_write_lock(&timers_state.vm_clock_seqlock);
461 if (vm_clock_warp_start == -1 || vm_clock_warp_start > clock) {
462 vm_clock_warp_start = clock;
463 }
464 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
465 timer_mod_anticipate(icount_warp_timer, clock + deadline);
466 }
467 } else if (deadline == 0) {
468 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
469 }
470 }
471
472 static bool icount_state_needed(void *opaque)
473 {
474 return use_icount;
475 }
476
477 /*
478 * This is a subsection for icount migration.
479 */
480 static const VMStateDescription icount_vmstate_timers = {
481 .name = "timer/icount",
482 .version_id = 1,
483 .minimum_version_id = 1,
484 .needed = icount_state_needed,
485 .fields = (VMStateField[]) {
486 VMSTATE_INT64(qemu_icount_bias, TimersState),
487 VMSTATE_INT64(qemu_icount, TimersState),
488 VMSTATE_END_OF_LIST()
489 }
490 };
491
492 static const VMStateDescription vmstate_timers = {
493 .name = "timer",
494 .version_id = 2,
495 .minimum_version_id = 1,
496 .fields = (VMStateField[]) {
497 VMSTATE_INT64(cpu_ticks_offset, TimersState),
498 VMSTATE_INT64(dummy, TimersState),
499 VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
500 VMSTATE_END_OF_LIST()
501 },
502 .subsections = (const VMStateDescription*[]) {
503 &icount_vmstate_timers,
504 NULL
505 }
506 };
507
508 void cpu_ticks_init(void)
509 {
510 seqlock_init(&timers_state.vm_clock_seqlock, NULL);
511 vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
512 }
513
514 void configure_icount(QemuOpts *opts, Error **errp)
515 {
516 const char *option;
517 char *rem_str = NULL;
518
519 option = qemu_opt_get(opts, "shift");
520 if (!option) {
521 if (qemu_opt_get(opts, "align") != NULL) {
522 error_setg(errp, "Please specify shift option when using align");
523 }
524 return;
525 }
526
527 icount_sleep = qemu_opt_get_bool(opts, "sleep", true);
528 if (icount_sleep) {
529 icount_warp_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
530 icount_warp_rt, NULL);
531 }
532
533 icount_align_option = qemu_opt_get_bool(opts, "align", false);
534
535 if (icount_align_option && !icount_sleep) {
536 error_setg(errp, "align=on and sleep=no are incompatible");
537 }
538 if (strcmp(option, "auto") != 0) {
539 errno = 0;
540 icount_time_shift = strtol(option, &rem_str, 0);
541 if (errno != 0 || *rem_str != '\0' || !strlen(option)) {
542 error_setg(errp, "icount: Invalid shift value");
543 }
544 use_icount = 1;
545 return;
546 } else if (icount_align_option) {
547 error_setg(errp, "shift=auto and align=on are incompatible");
548 } else if (!icount_sleep) {
549 error_setg(errp, "shift=auto and sleep=no are incompatible");
550 }
551
552 use_icount = 2;
553
554 /* 125MIPS seems a reasonable initial guess at the guest speed.
555 It will be corrected fairly quickly anyway. */
556 icount_time_shift = 3;
557
558 /* Have both realtime and virtual time triggers for speed adjustment.
559 The realtime trigger catches emulated time passing too slowly,
560 the virtual time trigger catches emulated time passing too fast.
561 Realtime triggers occur even when idle, so use them less frequently
562 than VM triggers. */
563 icount_rt_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL_RT,
564 icount_adjust_rt, NULL);
565 timer_mod(icount_rt_timer,
566 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
567 icount_vm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
568 icount_adjust_vm, NULL);
569 timer_mod(icount_vm_timer,
570 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
571 get_ticks_per_sec() / 10);
572 }
573
574 /***********************************************************/
575 void hw_error(const char *fmt, ...)
576 {
577 va_list ap;
578 CPUState *cpu;
579
580 va_start(ap, fmt);
581 fprintf(stderr, "qemu: hardware error: ");
582 vfprintf(stderr, fmt, ap);
583 fprintf(stderr, "\n");
584 CPU_FOREACH(cpu) {
585 fprintf(stderr, "CPU #%d:\n", cpu->cpu_index);
586 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU);
587 }
588 va_end(ap);
589 abort();
590 }
591
592 void cpu_synchronize_all_states(void)
593 {
594 CPUState *cpu;
595
596 CPU_FOREACH(cpu) {
597 cpu_synchronize_state(cpu);
598 }
599 }
600
601 void cpu_synchronize_all_post_reset(void)
602 {
603 CPUState *cpu;
604
605 CPU_FOREACH(cpu) {
606 cpu_synchronize_post_reset(cpu);
607 }
608 }
609
610 void cpu_synchronize_all_post_init(void)
611 {
612 CPUState *cpu;
613
614 CPU_FOREACH(cpu) {
615 cpu_synchronize_post_init(cpu);
616 }
617 }
618
619 void cpu_clean_all_dirty(void)
620 {
621 CPUState *cpu;
622
623 CPU_FOREACH(cpu) {
624 cpu_clean_state(cpu);
625 }
626 }
627
628 static int do_vm_stop(RunState state)
629 {
630 int ret = 0;
631
632 if (runstate_is_running()) {
633 cpu_disable_ticks();
634 pause_all_vcpus();
635 runstate_set(state);
636 vm_state_notify(0, state);
637 qapi_event_send_stop(&error_abort);
638 }
639
640 bdrv_drain_all();
641 ret = bdrv_flush_all();
642
643 return ret;
644 }
645
646 static bool cpu_can_run(CPUState *cpu)
647 {
648 if (cpu->stop) {
649 return false;
650 }
651 if (cpu_is_stopped(cpu)) {
652 return false;
653 }
654 return true;
655 }
656
657 static void cpu_handle_guest_debug(CPUState *cpu)
658 {
659 gdb_set_stop_cpu(cpu);
660 qemu_system_debug_request();
661 cpu->stopped = true;
662 }
663
664 #ifdef CONFIG_LINUX
665 static void sigbus_reraise(void)
666 {
667 sigset_t set;
668 struct sigaction action;
669
670 memset(&action, 0, sizeof(action));
671 action.sa_handler = SIG_DFL;
672 if (!sigaction(SIGBUS, &action, NULL)) {
673 raise(SIGBUS);
674 sigemptyset(&set);
675 sigaddset(&set, SIGBUS);
676 sigprocmask(SIG_UNBLOCK, &set, NULL);
677 }
678 perror("Failed to re-raise SIGBUS!\n");
679 abort();
680 }
681
682 static void sigbus_handler(int n, struct qemu_signalfd_siginfo *siginfo,
683 void *ctx)
684 {
685 if (kvm_on_sigbus(siginfo->ssi_code,
686 (void *)(intptr_t)siginfo->ssi_addr)) {
687 sigbus_reraise();
688 }
689 }
690
691 static void qemu_init_sigbus(void)
692 {
693 struct sigaction action;
694
695 memset(&action, 0, sizeof(action));
696 action.sa_flags = SA_SIGINFO;
697 action.sa_sigaction = (void (*)(int, siginfo_t*, void*))sigbus_handler;
698 sigaction(SIGBUS, &action, NULL);
699
700 prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0);
701 }
702
703 static void qemu_kvm_eat_signals(CPUState *cpu)
704 {
705 struct timespec ts = { 0, 0 };
706 siginfo_t siginfo;
707 sigset_t waitset;
708 sigset_t chkset;
709 int r;
710
711 sigemptyset(&waitset);
712 sigaddset(&waitset, SIG_IPI);
713 sigaddset(&waitset, SIGBUS);
714
715 do {
716 r = sigtimedwait(&waitset, &siginfo, &ts);
717 if (r == -1 && !(errno == EAGAIN || errno == EINTR)) {
718 perror("sigtimedwait");
719 exit(1);
720 }
721
722 switch (r) {
723 case SIGBUS:
724 if (kvm_on_sigbus_vcpu(cpu, siginfo.si_code, siginfo.si_addr)) {
725 sigbus_reraise();
726 }
727 break;
728 default:
729 break;
730 }
731
732 r = sigpending(&chkset);
733 if (r == -1) {
734 perror("sigpending");
735 exit(1);
736 }
737 } while (sigismember(&chkset, SIG_IPI) || sigismember(&chkset, SIGBUS));
738 }
739
740 #else /* !CONFIG_LINUX */
741
742 static void qemu_init_sigbus(void)
743 {
744 }
745
746 static void qemu_kvm_eat_signals(CPUState *cpu)
747 {
748 }
749 #endif /* !CONFIG_LINUX */
750
751 #ifndef _WIN32
752 static void dummy_signal(int sig)
753 {
754 }
755
756 static void qemu_kvm_init_cpu_signals(CPUState *cpu)
757 {
758 int r;
759 sigset_t set;
760 struct sigaction sigact;
761
762 memset(&sigact, 0, sizeof(sigact));
763 sigact.sa_handler = dummy_signal;
764 sigaction(SIG_IPI, &sigact, NULL);
765
766 pthread_sigmask(SIG_BLOCK, NULL, &set);
767 sigdelset(&set, SIG_IPI);
768 sigdelset(&set, SIGBUS);
769 r = kvm_set_signal_mask(cpu, &set);
770 if (r) {
771 fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
772 exit(1);
773 }
774 }
775
776 #else /* _WIN32 */
777 static void qemu_kvm_init_cpu_signals(CPUState *cpu)
778 {
779 abort();
780 }
781 #endif /* _WIN32 */
782
783 static QemuMutex qemu_global_mutex;
784 static QemuCond qemu_io_proceeded_cond;
785 static unsigned iothread_requesting_mutex;
786
787 static QemuThread io_thread;
788
789 /* cpu creation */
790 static QemuCond qemu_cpu_cond;
791 /* system init */
792 static QemuCond qemu_pause_cond;
793 static QemuCond qemu_work_cond;
794
795 void qemu_init_cpu_loop(void)
796 {
797 qemu_init_sigbus();
798 qemu_cond_init(&qemu_cpu_cond);
799 qemu_cond_init(&qemu_pause_cond);
800 qemu_cond_init(&qemu_work_cond);
801 qemu_cond_init(&qemu_io_proceeded_cond);
802 qemu_mutex_init(&qemu_global_mutex);
803
804 qemu_thread_get_self(&io_thread);
805 }
806
807 void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
808 {
809 struct qemu_work_item wi;
810
811 if (qemu_cpu_is_self(cpu)) {
812 func(data);
813 return;
814 }
815
816 wi.func = func;
817 wi.data = data;
818 wi.free = false;
819
820 qemu_mutex_lock(&cpu->work_mutex);
821 if (cpu->queued_work_first == NULL) {
822 cpu->queued_work_first = &wi;
823 } else {
824 cpu->queued_work_last->next = &wi;
825 }
826 cpu->queued_work_last = &wi;
827 wi.next = NULL;
828 wi.done = false;
829 qemu_mutex_unlock(&cpu->work_mutex);
830
831 qemu_cpu_kick(cpu);
832 while (!atomic_mb_read(&wi.done)) {
833 CPUState *self_cpu = current_cpu;
834
835 qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex);
836 current_cpu = self_cpu;
837 }
838 }
839
840 void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
841 {
842 struct qemu_work_item *wi;
843
844 if (qemu_cpu_is_self(cpu)) {
845 func(data);
846 return;
847 }
848
849 wi = g_malloc0(sizeof(struct qemu_work_item));
850 wi->func = func;
851 wi->data = data;
852 wi->free = true;
853
854 qemu_mutex_lock(&cpu->work_mutex);
855 if (cpu->queued_work_first == NULL) {
856 cpu->queued_work_first = wi;
857 } else {
858 cpu->queued_work_last->next = wi;
859 }
860 cpu->queued_work_last = wi;
861 wi->next = NULL;
862 wi->done = false;
863 qemu_mutex_unlock(&cpu->work_mutex);
864
865 qemu_cpu_kick(cpu);
866 }
867
868 static void flush_queued_work(CPUState *cpu)
869 {
870 struct qemu_work_item *wi;
871
872 if (cpu->queued_work_first == NULL) {
873 return;
874 }
875
876 qemu_mutex_lock(&cpu->work_mutex);
877 while (cpu->queued_work_first != NULL) {
878 wi = cpu->queued_work_first;
879 cpu->queued_work_first = wi->next;
880 if (!cpu->queued_work_first) {
881 cpu->queued_work_last = NULL;
882 }
883 qemu_mutex_unlock(&cpu->work_mutex);
884 wi->func(wi->data);
885 qemu_mutex_lock(&cpu->work_mutex);
886 if (wi->free) {
887 g_free(wi);
888 } else {
889 atomic_mb_set(&wi->done, true);
890 }
891 }
892 qemu_mutex_unlock(&cpu->work_mutex);
893 qemu_cond_broadcast(&qemu_work_cond);
894 }
895
896 static void qemu_wait_io_event_common(CPUState *cpu)
897 {
898 if (cpu->stop) {
899 cpu->stop = false;
900 cpu->stopped = true;
901 qemu_cond_signal(&qemu_pause_cond);
902 }
903 flush_queued_work(cpu);
904 cpu->thread_kicked = false;
905 }
906
907 static void qemu_tcg_wait_io_event(CPUState *cpu)
908 {
909 while (all_cpu_threads_idle()) {
910 /* Start accounting real time to the virtual clock if the CPUs
911 are idle. */
912 qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
913 qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
914 }
915
916 while (iothread_requesting_mutex) {
917 qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex);
918 }
919
920 CPU_FOREACH(cpu) {
921 qemu_wait_io_event_common(cpu);
922 }
923 }
924
925 static void qemu_kvm_wait_io_event(CPUState *cpu)
926 {
927 while (cpu_thread_is_idle(cpu)) {
928 qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
929 }
930
931 qemu_kvm_eat_signals(cpu);
932 qemu_wait_io_event_common(cpu);
933 }
934
935 static void *qemu_kvm_cpu_thread_fn(void *arg)
936 {
937 CPUState *cpu = arg;
938 int r;
939
940 rcu_register_thread();
941
942 qemu_mutex_lock_iothread();
943 qemu_thread_get_self(cpu->thread);
944 cpu->thread_id = qemu_get_thread_id();
945 cpu->can_do_io = 1;
946 current_cpu = cpu;
947
948 r = kvm_init_vcpu(cpu);
949 if (r < 0) {
950 fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r));
951 exit(1);
952 }
953
954 qemu_kvm_init_cpu_signals(cpu);
955
956 /* signal CPU creation */
957 cpu->created = true;
958 qemu_cond_signal(&qemu_cpu_cond);
959
960 while (1) {
961 if (cpu_can_run(cpu)) {
962 r = kvm_cpu_exec(cpu);
963 if (r == EXCP_DEBUG) {
964 cpu_handle_guest_debug(cpu);
965 }
966 }
967 qemu_kvm_wait_io_event(cpu);
968 }
969
970 return NULL;
971 }
972
973 static void *qemu_dummy_cpu_thread_fn(void *arg)
974 {
975 #ifdef _WIN32
976 fprintf(stderr, "qtest is not supported under Windows\n");
977 exit(1);
978 #else
979 CPUState *cpu = arg;
980 sigset_t waitset;
981 int r;
982
983 rcu_register_thread();
984
985 qemu_mutex_lock_iothread();
986 qemu_thread_get_self(cpu->thread);
987 cpu->thread_id = qemu_get_thread_id();
988 cpu->can_do_io = 1;
989
990 sigemptyset(&waitset);
991 sigaddset(&waitset, SIG_IPI);
992
993 /* signal CPU creation */
994 cpu->created = true;
995 qemu_cond_signal(&qemu_cpu_cond);
996
997 current_cpu = cpu;
998 while (1) {
999 current_cpu = NULL;
1000 qemu_mutex_unlock_iothread();
1001 do {
1002 int sig;
1003 r = sigwait(&waitset, &sig);
1004 } while (r == -1 && (errno == EAGAIN || errno == EINTR));
1005 if (r == -1) {
1006 perror("sigwait");
1007 exit(1);
1008 }
1009 qemu_mutex_lock_iothread();
1010 current_cpu = cpu;
1011 qemu_wait_io_event_common(cpu);
1012 }
1013
1014 return NULL;
1015 #endif
1016 }
1017
1018 static void tcg_exec_all(void);
1019
1020 static void *qemu_tcg_cpu_thread_fn(void *arg)
1021 {
1022 CPUState *cpu = arg;
1023
1024 rcu_register_thread();
1025
1026 qemu_mutex_lock_iothread();
1027 qemu_thread_get_self(cpu->thread);
1028
1029 CPU_FOREACH(cpu) {
1030 cpu->thread_id = qemu_get_thread_id();
1031 cpu->created = true;
1032 cpu->can_do_io = 1;
1033 }
1034 qemu_cond_signal(&qemu_cpu_cond);
1035
1036 /* wait for initial kick-off after machine start */
1037 while (first_cpu->stopped) {
1038 qemu_cond_wait(first_cpu->halt_cond, &qemu_global_mutex);
1039
1040 /* process any pending work */
1041 CPU_FOREACH(cpu) {
1042 qemu_wait_io_event_common(cpu);
1043 }
1044 }
1045
1046 /* process any pending work */
1047 atomic_mb_set(&exit_request, 1);
1048
1049 while (1) {
1050 tcg_exec_all();
1051
1052 if (use_icount) {
1053 int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
1054
1055 if (deadline == 0) {
1056 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
1057 }
1058 }
1059 qemu_tcg_wait_io_event(QTAILQ_FIRST(&cpus));
1060 }
1061
1062 return NULL;
1063 }
1064
1065 static void qemu_cpu_kick_thread(CPUState *cpu)
1066 {
1067 #ifndef _WIN32
1068 int err;
1069
1070 if (cpu->thread_kicked) {
1071 return;
1072 }
1073 cpu->thread_kicked = true;
1074 err = pthread_kill(cpu->thread->thread, SIG_IPI);
1075 if (err) {
1076 fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
1077 exit(1);
1078 }
1079 #else /* _WIN32 */
1080 abort();
1081 #endif
1082 }
1083
1084 static void qemu_cpu_kick_no_halt(void)
1085 {
1086 CPUState *cpu;
1087 /* Ensure whatever caused the exit has reached the CPU threads before
1088 * writing exit_request.
1089 */
1090 atomic_mb_set(&exit_request, 1);
1091 cpu = atomic_mb_read(&tcg_current_cpu);
1092 if (cpu) {
1093 cpu_exit(cpu);
1094 }
1095 }
1096
1097 void qemu_cpu_kick(CPUState *cpu)
1098 {
1099 qemu_cond_broadcast(cpu->halt_cond);
1100 if (tcg_enabled()) {
1101 qemu_cpu_kick_no_halt();
1102 } else {
1103 qemu_cpu_kick_thread(cpu);
1104 }
1105 }
1106
1107 void qemu_cpu_kick_self(void)
1108 {
1109 assert(current_cpu);
1110 qemu_cpu_kick_thread(current_cpu);
1111 }
1112
1113 bool qemu_cpu_is_self(CPUState *cpu)
1114 {
1115 return qemu_thread_is_self(cpu->thread);
1116 }
1117
1118 bool qemu_in_vcpu_thread(void)
1119 {
1120 return current_cpu && qemu_cpu_is_self(current_cpu);
1121 }
1122
1123 static __thread bool iothread_locked = false;
1124
1125 bool qemu_mutex_iothread_locked(void)
1126 {
1127 return iothread_locked;
1128 }
1129
1130 void qemu_mutex_lock_iothread(void)
1131 {
1132 atomic_inc(&iothread_requesting_mutex);
1133 /* In the simple case there is no need to bump the VCPU thread out of
1134 * TCG code execution.
1135 */
1136 if (!tcg_enabled() || qemu_in_vcpu_thread() ||
1137 !first_cpu || !first_cpu->created) {
1138 qemu_mutex_lock(&qemu_global_mutex);
1139 atomic_dec(&iothread_requesting_mutex);
1140 } else {
1141 if (qemu_mutex_trylock(&qemu_global_mutex)) {
1142 qemu_cpu_kick_no_halt();
1143 qemu_mutex_lock(&qemu_global_mutex);
1144 }
1145 atomic_dec(&iothread_requesting_mutex);
1146 qemu_cond_broadcast(&qemu_io_proceeded_cond);
1147 }
1148 iothread_locked = true;
1149 }
1150
1151 void qemu_mutex_unlock_iothread(void)
1152 {
1153 iothread_locked = false;
1154 qemu_mutex_unlock(&qemu_global_mutex);
1155 }
1156
1157 static int all_vcpus_paused(void)
1158 {
1159 CPUState *cpu;
1160
1161 CPU_FOREACH(cpu) {
1162 if (!cpu->stopped) {
1163 return 0;
1164 }
1165 }
1166
1167 return 1;
1168 }
1169
1170 void pause_all_vcpus(void)
1171 {
1172 CPUState *cpu;
1173
1174 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, false);
1175 CPU_FOREACH(cpu) {
1176 cpu->stop = true;
1177 qemu_cpu_kick(cpu);
1178 }
1179
1180 if (qemu_in_vcpu_thread()) {
1181 cpu_stop_current();
1182 if (!kvm_enabled()) {
1183 CPU_FOREACH(cpu) {
1184 cpu->stop = false;
1185 cpu->stopped = true;
1186 }
1187 return;
1188 }
1189 }
1190
1191 while (!all_vcpus_paused()) {
1192 qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
1193 CPU_FOREACH(cpu) {
1194 qemu_cpu_kick(cpu);
1195 }
1196 }
1197 }
1198
1199 void cpu_resume(CPUState *cpu)
1200 {
1201 cpu->stop = false;
1202 cpu->stopped = false;
1203 qemu_cpu_kick(cpu);
1204 }
1205
1206 void resume_all_vcpus(void)
1207 {
1208 CPUState *cpu;
1209
1210 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true);
1211 CPU_FOREACH(cpu) {
1212 cpu_resume(cpu);
1213 }
1214 }
1215
1216 /* For temporary buffers for forming a name */
1217 #define VCPU_THREAD_NAME_SIZE 16
1218
1219 static void qemu_tcg_init_vcpu(CPUState *cpu)
1220 {
1221 char thread_name[VCPU_THREAD_NAME_SIZE];
1222 static QemuCond *tcg_halt_cond;
1223 static QemuThread *tcg_cpu_thread;
1224
1225 tcg_cpu_address_space_init(cpu, cpu->as);
1226
1227 /* share a single thread for all cpus with TCG */
1228 if (!tcg_cpu_thread) {
1229 cpu->thread = g_malloc0(sizeof(QemuThread));
1230 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1231 qemu_cond_init(cpu->halt_cond);
1232 tcg_halt_cond = cpu->halt_cond;
1233 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/TCG",
1234 cpu->cpu_index);
1235 qemu_thread_create(cpu->thread, thread_name, qemu_tcg_cpu_thread_fn,
1236 cpu, QEMU_THREAD_JOINABLE);
1237 #ifdef _WIN32
1238 cpu->hThread = qemu_thread_get_handle(cpu->thread);
1239 #endif
1240 while (!cpu->created) {
1241 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1242 }
1243 tcg_cpu_thread = cpu->thread;
1244 } else {
1245 cpu->thread = tcg_cpu_thread;
1246 cpu->halt_cond = tcg_halt_cond;
1247 }
1248 }
1249
1250 static void qemu_kvm_start_vcpu(CPUState *cpu)
1251 {
1252 char thread_name[VCPU_THREAD_NAME_SIZE];
1253
1254 cpu->thread = g_malloc0(sizeof(QemuThread));
1255 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1256 qemu_cond_init(cpu->halt_cond);
1257 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/KVM",
1258 cpu->cpu_index);
1259 qemu_thread_create(cpu->thread, thread_name, qemu_kvm_cpu_thread_fn,
1260 cpu, QEMU_THREAD_JOINABLE);
1261 while (!cpu->created) {
1262 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1263 }
1264 }
1265
1266 static void qemu_dummy_start_vcpu(CPUState *cpu)
1267 {
1268 char thread_name[VCPU_THREAD_NAME_SIZE];
1269
1270 cpu->thread = g_malloc0(sizeof(QemuThread));
1271 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1272 qemu_cond_init(cpu->halt_cond);
1273 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/DUMMY",
1274 cpu->cpu_index);
1275 qemu_thread_create(cpu->thread, thread_name, qemu_dummy_cpu_thread_fn, cpu,
1276 QEMU_THREAD_JOINABLE);
1277 while (!cpu->created) {
1278 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1279 }
1280 }
1281
1282 void qemu_init_vcpu(CPUState *cpu)
1283 {
1284 cpu->nr_cores = smp_cores;
1285 cpu->nr_threads = smp_threads;
1286 cpu->stopped = true;
1287 if (kvm_enabled()) {
1288 qemu_kvm_start_vcpu(cpu);
1289 } else if (tcg_enabled()) {
1290 qemu_tcg_init_vcpu(cpu);
1291 } else {
1292 qemu_dummy_start_vcpu(cpu);
1293 }
1294 }
1295
1296 void cpu_stop_current(void)
1297 {
1298 if (current_cpu) {
1299 current_cpu->stop = false;
1300 current_cpu->stopped = true;
1301 cpu_exit(current_cpu);
1302 qemu_cond_signal(&qemu_pause_cond);
1303 }
1304 }
1305
1306 int vm_stop(RunState state)
1307 {
1308 if (qemu_in_vcpu_thread()) {
1309 qemu_system_vmstop_request_prepare();
1310 qemu_system_vmstop_request(state);
1311 /*
1312 * FIXME: should not return to device code in case
1313 * vm_stop() has been requested.
1314 */
1315 cpu_stop_current();
1316 return 0;
1317 }
1318
1319 return do_vm_stop(state);
1320 }
1321
1322 /* does a state transition even if the VM is already stopped,
1323 current state is forgotten forever */
1324 int vm_stop_force_state(RunState state)
1325 {
1326 if (runstate_is_running()) {
1327 return vm_stop(state);
1328 } else {
1329 runstate_set(state);
1330 /* Make sure to return an error if the flush in a previous vm_stop()
1331 * failed. */
1332 return bdrv_flush_all();
1333 }
1334 }
1335
1336 static int tcg_cpu_exec(CPUState *cpu)
1337 {
1338 int ret;
1339 #ifdef CONFIG_PROFILER
1340 int64_t ti;
1341 #endif
1342
1343 #ifdef CONFIG_PROFILER
1344 ti = profile_getclock();
1345 #endif
1346 if (use_icount) {
1347 int64_t count;
1348 int64_t deadline;
1349 int decr;
1350 timers_state.qemu_icount -= (cpu->icount_decr.u16.low
1351 + cpu->icount_extra);
1352 cpu->icount_decr.u16.low = 0;
1353 cpu->icount_extra = 0;
1354 deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
1355
1356 /* Maintain prior (possibly buggy) behaviour where if no deadline
1357 * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
1358 * INT32_MAX nanoseconds ahead, we still use INT32_MAX
1359 * nanoseconds.
1360 */
1361 if ((deadline < 0) || (deadline > INT32_MAX)) {
1362 deadline = INT32_MAX;
1363 }
1364
1365 count = qemu_icount_round(deadline);
1366 timers_state.qemu_icount += count;
1367 decr = (count > 0xffff) ? 0xffff : count;
1368 count -= decr;
1369 cpu->icount_decr.u16.low = decr;
1370 cpu->icount_extra = count;
1371 }
1372 ret = cpu_exec(cpu);
1373 #ifdef CONFIG_PROFILER
1374 tcg_time += profile_getclock() - ti;
1375 #endif
1376 if (use_icount) {
1377 /* Fold pending instructions back into the
1378 instruction counter, and clear the interrupt flag. */
1379 timers_state.qemu_icount -= (cpu->icount_decr.u16.low
1380 + cpu->icount_extra);
1381 cpu->icount_decr.u32 = 0;
1382 cpu->icount_extra = 0;
1383 }
1384 return ret;
1385 }
1386
1387 static void tcg_exec_all(void)
1388 {
1389 int r;
1390
1391 /* Account partial waits to QEMU_CLOCK_VIRTUAL. */
1392 qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
1393
1394 if (next_cpu == NULL) {
1395 next_cpu = first_cpu;
1396 }
1397 for (; next_cpu != NULL && !exit_request; next_cpu = CPU_NEXT(next_cpu)) {
1398 CPUState *cpu = next_cpu;
1399
1400 qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
1401 (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
1402
1403 if (cpu_can_run(cpu)) {
1404 r = tcg_cpu_exec(cpu);
1405 if (r == EXCP_DEBUG) {
1406 cpu_handle_guest_debug(cpu);
1407 break;
1408 }
1409 } else if (cpu->stop || cpu->stopped) {
1410 break;
1411 }
1412 }
1413
1414 /* Pairs with smp_wmb in qemu_cpu_kick. */
1415 atomic_mb_set(&exit_request, 0);
1416 }
1417
1418 void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
1419 {
1420 /* XXX: implement xxx_cpu_list for targets that still miss it */
1421 #if defined(cpu_list)
1422 cpu_list(f, cpu_fprintf);
1423 #endif
1424 }
1425
1426 CpuInfoList *qmp_query_cpus(Error **errp)
1427 {
1428 CpuInfoList *head = NULL, *cur_item = NULL;
1429 CPUState *cpu;
1430
1431 CPU_FOREACH(cpu) {
1432 CpuInfoList *info;
1433 #if defined(TARGET_I386)
1434 X86CPU *x86_cpu = X86_CPU(cpu);
1435 CPUX86State *env = &x86_cpu->env;
1436 #elif defined(TARGET_PPC)
1437 PowerPCCPU *ppc_cpu = POWERPC_CPU(cpu);
1438 CPUPPCState *env = &ppc_cpu->env;
1439 #elif defined(TARGET_SPARC)
1440 SPARCCPU *sparc_cpu = SPARC_CPU(cpu);
1441 CPUSPARCState *env = &sparc_cpu->env;
1442 #elif defined(TARGET_MIPS)
1443 MIPSCPU *mips_cpu = MIPS_CPU(cpu);
1444 CPUMIPSState *env = &mips_cpu->env;
1445 #elif defined(TARGET_TRICORE)
1446 TriCoreCPU *tricore_cpu = TRICORE_CPU(cpu);
1447 CPUTriCoreState *env = &tricore_cpu->env;
1448 #endif
1449
1450 cpu_synchronize_state(cpu);
1451
1452 info = g_malloc0(sizeof(*info));
1453 info->value = g_malloc0(sizeof(*info->value));
1454 info->value->CPU = cpu->cpu_index;
1455 info->value->current = (cpu == first_cpu);
1456 info->value->halted = cpu->halted;
1457 info->value->qom_path = object_get_canonical_path(OBJECT(cpu));
1458 info->value->thread_id = cpu->thread_id;
1459 #if defined(TARGET_I386)
1460 info->value->has_pc = true;
1461 info->value->pc = env->eip + env->segs[R_CS].base;
1462 #elif defined(TARGET_PPC)
1463 info->value->has_nip = true;
1464 info->value->nip = env->nip;
1465 #elif defined(TARGET_SPARC)
1466 info->value->has_pc = true;
1467 info->value->pc = env->pc;
1468 info->value->has_npc = true;
1469 info->value->npc = env->npc;
1470 #elif defined(TARGET_MIPS)
1471 info->value->has_PC = true;
1472 info->value->PC = env->active_tc.PC;
1473 #elif defined(TARGET_TRICORE)
1474 info->value->has_PC = true;
1475 info->value->PC = env->PC;
1476 #endif
1477
1478 /* XXX: waiting for the qapi to support GSList */
1479 if (!cur_item) {
1480 head = cur_item = info;
1481 } else {
1482 cur_item->next = info;
1483 cur_item = info;
1484 }
1485 }
1486
1487 return head;
1488 }
1489
1490 void qmp_memsave(int64_t addr, int64_t size, const char *filename,
1491 bool has_cpu, int64_t cpu_index, Error **errp)
1492 {
1493 FILE *f;
1494 uint32_t l;
1495 CPUState *cpu;
1496 uint8_t buf[1024];
1497 int64_t orig_addr = addr, orig_size = size;
1498
1499 if (!has_cpu) {
1500 cpu_index = 0;
1501 }
1502
1503 cpu = qemu_get_cpu(cpu_index);
1504 if (cpu == NULL) {
1505 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cpu-index",
1506 "a CPU number");
1507 return;
1508 }
1509
1510 f = fopen(filename, "wb");
1511 if (!f) {
1512 error_setg_file_open(errp, errno, filename);
1513 return;
1514 }
1515
1516 while (size != 0) {
1517 l = sizeof(buf);
1518 if (l > size)
1519 l = size;
1520 if (cpu_memory_rw_debug(cpu, addr, buf, l, 0) != 0) {
1521 error_setg(errp, "Invalid addr 0x%016" PRIx64 "/size %" PRId64
1522 " specified", orig_addr, orig_size);
1523 goto exit;
1524 }
1525 if (fwrite(buf, 1, l, f) != l) {
1526 error_setg(errp, QERR_IO_ERROR);
1527 goto exit;
1528 }
1529 addr += l;
1530 size -= l;
1531 }
1532
1533 exit:
1534 fclose(f);
1535 }
1536
1537 void qmp_pmemsave(int64_t addr, int64_t size, const char *filename,
1538 Error **errp)
1539 {
1540 FILE *f;
1541 uint32_t l;
1542 uint8_t buf[1024];
1543
1544 f = fopen(filename, "wb");
1545 if (!f) {
1546 error_setg_file_open(errp, errno, filename);
1547 return;
1548 }
1549
1550 while (size != 0) {
1551 l = sizeof(buf);
1552 if (l > size)
1553 l = size;
1554 cpu_physical_memory_read(addr, buf, l);
1555 if (fwrite(buf, 1, l, f) != l) {
1556 error_setg(errp, QERR_IO_ERROR);
1557 goto exit;
1558 }
1559 addr += l;
1560 size -= l;
1561 }
1562
1563 exit:
1564 fclose(f);
1565 }
1566
1567 void qmp_inject_nmi(Error **errp)
1568 {
1569 #if defined(TARGET_I386)
1570 CPUState *cs;
1571
1572 CPU_FOREACH(cs) {
1573 X86CPU *cpu = X86_CPU(cs);
1574
1575 if (!cpu->apic_state) {
1576 cpu_interrupt(cs, CPU_INTERRUPT_NMI);
1577 } else {
1578 apic_deliver_nmi(cpu->apic_state);
1579 }
1580 }
1581 #else
1582 nmi_monitor_handle(monitor_get_cpu_index(), errp);
1583 #endif
1584 }
1585
1586 void dump_drift_info(FILE *f, fprintf_function cpu_fprintf)
1587 {
1588 if (!use_icount) {
1589 return;
1590 }
1591
1592 cpu_fprintf(f, "Host - Guest clock %"PRIi64" ms\n",
1593 (cpu_get_clock() - cpu_get_icount())/SCALE_MS);
1594 if (icount_align_option) {
1595 cpu_fprintf(f, "Max guest delay %"PRIi64" ms\n", -max_delay/SCALE_MS);
1596 cpu_fprintf(f, "Max guest advance %"PRIi64" ms\n", max_advance/SCALE_MS);
1597 } else {
1598 cpu_fprintf(f, "Max guest delay NA\n");
1599 cpu_fprintf(f, "Max guest advance NA\n");
1600 }
1601 }