]> git.proxmox.com Git - mirror_qemu.git/blame - cpus.c
test makefile overhaul
[mirror_qemu.git] / cpus.c
CommitLineData
296af7c9
BS
1/*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25/* Needed early for CONFIG_BSD etc. */
26#include "config-host.h"
27
28#include "monitor.h"
29#include "sysemu.h"
30#include "gdbstub.h"
31#include "dma.h"
32#include "kvm.h"
de0b36b6 33#include "qmp-commands.h"
296af7c9 34
96284e89 35#include "qemu-thread.h"
296af7c9 36#include "cpus.h"
44a9b356 37#include "main-loop.h"
0ff0fc19
JK
38
39#ifndef _WIN32
a8486bc9 40#include "compatfd.h"
0ff0fc19 41#endif
296af7c9 42
6d9cb73c
JK
43#ifdef CONFIG_LINUX
44
45#include <sys/prctl.h>
46
c0532a76
MT
47#ifndef PR_MCE_KILL
48#define PR_MCE_KILL 33
49#endif
50
6d9cb73c
JK
51#ifndef PR_MCE_KILL_SET
52#define PR_MCE_KILL_SET 1
53#endif
54
55#ifndef PR_MCE_KILL_EARLY
56#define PR_MCE_KILL_EARLY 1
57#endif
58
59#endif /* CONFIG_LINUX */
60
9349b4f9 61static CPUArchState *next_cpu;
296af7c9 62
946fb27c
PB
63/***********************************************************/
64/* guest cycle counter */
65
66/* Conversion factor from emulated instructions to virtual clock ticks. */
67static int icount_time_shift;
68/* Arbitrarily pick 1MIPS as the minimum allowable speed. */
69#define MAX_ICOUNT_SHIFT 10
70/* Compensate for varying guest execution speed. */
71static int64_t qemu_icount_bias;
72static QEMUTimer *icount_rt_timer;
73static QEMUTimer *icount_vm_timer;
74static QEMUTimer *icount_warp_timer;
75static int64_t vm_clock_warp_start;
76static int64_t qemu_icount;
77
78typedef struct TimersState {
79 int64_t cpu_ticks_prev;
80 int64_t cpu_ticks_offset;
81 int64_t cpu_clock_offset;
82 int32_t cpu_ticks_enabled;
83 int64_t dummy;
84} TimersState;
85
86TimersState timers_state;
87
88/* Return the virtual CPU time, based on the instruction counter. */
89int64_t cpu_get_icount(void)
90{
91 int64_t icount;
9349b4f9 92 CPUArchState *env = cpu_single_env;
946fb27c
PB
93
94 icount = qemu_icount;
95 if (env) {
96 if (!can_do_io(env)) {
97 fprintf(stderr, "Bad clock read\n");
98 }
99 icount -= (env->icount_decr.u16.low + env->icount_extra);
100 }
101 return qemu_icount_bias + (icount << icount_time_shift);
102}
103
104/* return the host CPU cycle counter and handle stop/restart */
105int64_t cpu_get_ticks(void)
106{
107 if (use_icount) {
108 return cpu_get_icount();
109 }
110 if (!timers_state.cpu_ticks_enabled) {
111 return timers_state.cpu_ticks_offset;
112 } else {
113 int64_t ticks;
114 ticks = cpu_get_real_ticks();
115 if (timers_state.cpu_ticks_prev > ticks) {
116 /* Note: non increasing ticks may happen if the host uses
117 software suspend */
118 timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
119 }
120 timers_state.cpu_ticks_prev = ticks;
121 return ticks + timers_state.cpu_ticks_offset;
122 }
123}
124
125/* return the host CPU monotonic timer and handle stop/restart */
126int64_t cpu_get_clock(void)
127{
128 int64_t ti;
129 if (!timers_state.cpu_ticks_enabled) {
130 return timers_state.cpu_clock_offset;
131 } else {
132 ti = get_clock();
133 return ti + timers_state.cpu_clock_offset;
134 }
135}
136
137/* enable cpu_get_ticks() */
138void cpu_enable_ticks(void)
139{
140 if (!timers_state.cpu_ticks_enabled) {
141 timers_state.cpu_ticks_offset -= cpu_get_real_ticks();
142 timers_state.cpu_clock_offset -= get_clock();
143 timers_state.cpu_ticks_enabled = 1;
144 }
145}
146
147/* disable cpu_get_ticks() : the clock is stopped. You must not call
148 cpu_get_ticks() after that. */
149void cpu_disable_ticks(void)
150{
151 if (timers_state.cpu_ticks_enabled) {
152 timers_state.cpu_ticks_offset = cpu_get_ticks();
153 timers_state.cpu_clock_offset = cpu_get_clock();
154 timers_state.cpu_ticks_enabled = 0;
155 }
156}
157
158/* Correlation between real and virtual time is always going to be
159 fairly approximate, so ignore small variation.
160 When the guest is idle real and virtual time will be aligned in
161 the IO wait loop. */
162#define ICOUNT_WOBBLE (get_ticks_per_sec() / 10)
163
164static void icount_adjust(void)
165{
166 int64_t cur_time;
167 int64_t cur_icount;
168 int64_t delta;
169 static int64_t last_delta;
170 /* If the VM is not running, then do nothing. */
171 if (!runstate_is_running()) {
172 return;
173 }
174 cur_time = cpu_get_clock();
175 cur_icount = qemu_get_clock_ns(vm_clock);
176 delta = cur_icount - cur_time;
177 /* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */
178 if (delta > 0
179 && last_delta + ICOUNT_WOBBLE < delta * 2
180 && icount_time_shift > 0) {
181 /* The guest is getting too far ahead. Slow time down. */
182 icount_time_shift--;
183 }
184 if (delta < 0
185 && last_delta - ICOUNT_WOBBLE > delta * 2
186 && icount_time_shift < MAX_ICOUNT_SHIFT) {
187 /* The guest is getting too far behind. Speed time up. */
188 icount_time_shift++;
189 }
190 last_delta = delta;
191 qemu_icount_bias = cur_icount - (qemu_icount << icount_time_shift);
192}
193
194static void icount_adjust_rt(void *opaque)
195{
196 qemu_mod_timer(icount_rt_timer,
197 qemu_get_clock_ms(rt_clock) + 1000);
198 icount_adjust();
199}
200
201static void icount_adjust_vm(void *opaque)
202{
203 qemu_mod_timer(icount_vm_timer,
204 qemu_get_clock_ns(vm_clock) + get_ticks_per_sec() / 10);
205 icount_adjust();
206}
207
208static int64_t qemu_icount_round(int64_t count)
209{
210 return (count + (1 << icount_time_shift) - 1) >> icount_time_shift;
211}
212
213static void icount_warp_rt(void *opaque)
214{
215 if (vm_clock_warp_start == -1) {
216 return;
217 }
218
219 if (runstate_is_running()) {
220 int64_t clock = qemu_get_clock_ns(rt_clock);
221 int64_t warp_delta = clock - vm_clock_warp_start;
222 if (use_icount == 1) {
223 qemu_icount_bias += warp_delta;
224 } else {
225 /*
226 * In adaptive mode, do not let the vm_clock run too
227 * far ahead of real time.
228 */
229 int64_t cur_time = cpu_get_clock();
230 int64_t cur_icount = qemu_get_clock_ns(vm_clock);
231 int64_t delta = cur_time - cur_icount;
232 qemu_icount_bias += MIN(warp_delta, delta);
233 }
234 if (qemu_clock_expired(vm_clock)) {
235 qemu_notify_event();
236 }
237 }
238 vm_clock_warp_start = -1;
239}
240
241void qemu_clock_warp(QEMUClock *clock)
242{
243 int64_t deadline;
244
245 /*
246 * There are too many global variables to make the "warp" behavior
247 * applicable to other clocks. But a clock argument removes the
248 * need for if statements all over the place.
249 */
250 if (clock != vm_clock || !use_icount) {
251 return;
252 }
253
254 /*
255 * If the CPUs have been sleeping, advance the vm_clock timer now. This
256 * ensures that the deadline for the timer is computed correctly below.
257 * This also makes sure that the insn counter is synchronized before the
258 * CPU starts running, in case the CPU is woken by an event other than
259 * the earliest vm_clock timer.
260 */
261 icount_warp_rt(NULL);
262 if (!all_cpu_threads_idle() || !qemu_clock_has_timers(vm_clock)) {
263 qemu_del_timer(icount_warp_timer);
264 return;
265 }
266
267 vm_clock_warp_start = qemu_get_clock_ns(rt_clock);
268 deadline = qemu_clock_deadline(vm_clock);
269 if (deadline > 0) {
270 /*
271 * Ensure the vm_clock proceeds even when the virtual CPU goes to
272 * sleep. Otherwise, the CPU might be waiting for a future timer
273 * interrupt to wake it up, but the interrupt never comes because
274 * the vCPU isn't running any insns and thus doesn't advance the
275 * vm_clock.
276 *
277 * An extreme solution for this problem would be to never let VCPUs
278 * sleep in icount mode if there is a pending vm_clock timer; rather
279 * time could just advance to the next vm_clock event. Instead, we
280 * do stop VCPUs and only advance vm_clock after some "real" time,
281 * (related to the time left until the next event) has passed. This
282 * rt_clock timer will do this. This avoids that the warps are too
283 * visible externally---for example, you will not be sending network
07f35073 284 * packets continuously instead of every 100ms.
946fb27c
PB
285 */
286 qemu_mod_timer(icount_warp_timer, vm_clock_warp_start + deadline);
287 } else {
288 qemu_notify_event();
289 }
290}
291
292static const VMStateDescription vmstate_timers = {
293 .name = "timer",
294 .version_id = 2,
295 .minimum_version_id = 1,
296 .minimum_version_id_old = 1,
297 .fields = (VMStateField[]) {
298 VMSTATE_INT64(cpu_ticks_offset, TimersState),
299 VMSTATE_INT64(dummy, TimersState),
300 VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
301 VMSTATE_END_OF_LIST()
302 }
303};
304
305void configure_icount(const char *option)
306{
307 vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
308 if (!option) {
309 return;
310 }
311
312 icount_warp_timer = qemu_new_timer_ns(rt_clock, icount_warp_rt, NULL);
313 if (strcmp(option, "auto") != 0) {
314 icount_time_shift = strtol(option, NULL, 0);
315 use_icount = 1;
316 return;
317 }
318
319 use_icount = 2;
320
321 /* 125MIPS seems a reasonable initial guess at the guest speed.
322 It will be corrected fairly quickly anyway. */
323 icount_time_shift = 3;
324
325 /* Have both realtime and virtual time triggers for speed adjustment.
326 The realtime trigger catches emulated time passing too slowly,
327 the virtual time trigger catches emulated time passing too fast.
328 Realtime triggers occur even when idle, so use them less frequently
329 than VM triggers. */
330 icount_rt_timer = qemu_new_timer_ms(rt_clock, icount_adjust_rt, NULL);
331 qemu_mod_timer(icount_rt_timer,
332 qemu_get_clock_ms(rt_clock) + 1000);
333 icount_vm_timer = qemu_new_timer_ns(vm_clock, icount_adjust_vm, NULL);
334 qemu_mod_timer(icount_vm_timer,
335 qemu_get_clock_ns(vm_clock) + get_ticks_per_sec() / 10);
336}
337
296af7c9
BS
338/***********************************************************/
339void hw_error(const char *fmt, ...)
340{
341 va_list ap;
9349b4f9 342 CPUArchState *env;
296af7c9
BS
343
344 va_start(ap, fmt);
345 fprintf(stderr, "qemu: hardware error: ");
346 vfprintf(stderr, fmt, ap);
347 fprintf(stderr, "\n");
348 for(env = first_cpu; env != NULL; env = env->next_cpu) {
349 fprintf(stderr, "CPU #%d:\n", env->cpu_index);
350#ifdef TARGET_I386
351 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU);
352#else
353 cpu_dump_state(env, stderr, fprintf, 0);
354#endif
355 }
356 va_end(ap);
357 abort();
358}
359
360void cpu_synchronize_all_states(void)
361{
9349b4f9 362 CPUArchState *cpu;
296af7c9
BS
363
364 for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
365 cpu_synchronize_state(cpu);
366 }
367}
368
369void cpu_synchronize_all_post_reset(void)
370{
9349b4f9 371 CPUArchState *cpu;
296af7c9
BS
372
373 for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
374 cpu_synchronize_post_reset(cpu);
375 }
376}
377
378void cpu_synchronize_all_post_init(void)
379{
9349b4f9 380 CPUArchState *cpu;
296af7c9
BS
381
382 for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
383 cpu_synchronize_post_init(cpu);
384 }
385}
386
9349b4f9 387int cpu_is_stopped(CPUArchState *env)
3ae9501c 388{
1354869c 389 return !runstate_is_running() || env->stopped;
3ae9501c
MT
390}
391
1dfb4dd9 392static void do_vm_stop(RunState state)
296af7c9 393{
1354869c 394 if (runstate_is_running()) {
296af7c9 395 cpu_disable_ticks();
296af7c9 396 pause_all_vcpus();
f5bbfba1 397 runstate_set(state);
1dfb4dd9 398 vm_state_notify(0, state);
922453bc 399 bdrv_drain_all();
55df6f33 400 bdrv_flush_all();
296af7c9
BS
401 monitor_protocol_event(QEVENT_STOP, NULL);
402 }
403}
404
9349b4f9 405static int cpu_can_run(CPUArchState *env)
296af7c9 406{
0ab07c62 407 if (env->stop) {
296af7c9 408 return 0;
0ab07c62 409 }
1354869c 410 if (env->stopped || !runstate_is_running()) {
296af7c9 411 return 0;
0ab07c62 412 }
296af7c9
BS
413 return 1;
414}
415
9349b4f9 416static bool cpu_thread_is_idle(CPUArchState *env)
296af7c9 417{
16400322
JK
418 if (env->stop || env->queued_work_first) {
419 return false;
420 }
1354869c 421 if (env->stopped || !runstate_is_running()) {
16400322
JK
422 return true;
423 }
f2c1cc81
JK
424 if (!env->halted || qemu_cpu_has_work(env) ||
425 (kvm_enabled() && kvm_irqchip_in_kernel())) {
16400322
JK
426 return false;
427 }
428 return true;
296af7c9
BS
429}
430
ab33fcda 431bool all_cpu_threads_idle(void)
296af7c9 432{
9349b4f9 433 CPUArchState *env;
296af7c9 434
16400322
JK
435 for (env = first_cpu; env != NULL; env = env->next_cpu) {
436 if (!cpu_thread_is_idle(env)) {
437 return false;
438 }
439 }
440 return true;
296af7c9
BS
441}
442
9349b4f9 443static void cpu_handle_guest_debug(CPUArchState *env)
83f338f7 444{
3c638d06 445 gdb_set_stop_cpu(env);
8cf71710 446 qemu_system_debug_request();
83f338f7 447 env->stopped = 1;
3c638d06
JK
448}
449
714bd040
PB
450static void cpu_signal(int sig)
451{
452 if (cpu_single_env) {
453 cpu_exit(cpu_single_env);
454 }
455 exit_request = 1;
456}
714bd040 457
6d9cb73c
JK
458#ifdef CONFIG_LINUX
459static void sigbus_reraise(void)
460{
461 sigset_t set;
462 struct sigaction action;
463
464 memset(&action, 0, sizeof(action));
465 action.sa_handler = SIG_DFL;
466 if (!sigaction(SIGBUS, &action, NULL)) {
467 raise(SIGBUS);
468 sigemptyset(&set);
469 sigaddset(&set, SIGBUS);
470 sigprocmask(SIG_UNBLOCK, &set, NULL);
471 }
472 perror("Failed to re-raise SIGBUS!\n");
473 abort();
474}
475
476static void sigbus_handler(int n, struct qemu_signalfd_siginfo *siginfo,
477 void *ctx)
478{
479 if (kvm_on_sigbus(siginfo->ssi_code,
480 (void *)(intptr_t)siginfo->ssi_addr)) {
481 sigbus_reraise();
482 }
483}
484
485static void qemu_init_sigbus(void)
486{
487 struct sigaction action;
488
489 memset(&action, 0, sizeof(action));
490 action.sa_flags = SA_SIGINFO;
491 action.sa_sigaction = (void (*)(int, siginfo_t*, void*))sigbus_handler;
492 sigaction(SIGBUS, &action, NULL);
493
494 prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0);
495}
496
9349b4f9 497static void qemu_kvm_eat_signals(CPUArchState *env)
1ab3c6c0
JK
498{
499 struct timespec ts = { 0, 0 };
500 siginfo_t siginfo;
501 sigset_t waitset;
502 sigset_t chkset;
503 int r;
504
505 sigemptyset(&waitset);
506 sigaddset(&waitset, SIG_IPI);
507 sigaddset(&waitset, SIGBUS);
508
509 do {
510 r = sigtimedwait(&waitset, &siginfo, &ts);
511 if (r == -1 && !(errno == EAGAIN || errno == EINTR)) {
512 perror("sigtimedwait");
513 exit(1);
514 }
515
516 switch (r) {
517 case SIGBUS:
518 if (kvm_on_sigbus_vcpu(env, siginfo.si_code, siginfo.si_addr)) {
519 sigbus_reraise();
520 }
521 break;
522 default:
523 break;
524 }
525
526 r = sigpending(&chkset);
527 if (r == -1) {
528 perror("sigpending");
529 exit(1);
530 }
531 } while (sigismember(&chkset, SIG_IPI) || sigismember(&chkset, SIGBUS));
1ab3c6c0
JK
532}
533
6d9cb73c
JK
534#else /* !CONFIG_LINUX */
535
536static void qemu_init_sigbus(void)
537{
538}
1ab3c6c0 539
9349b4f9 540static void qemu_kvm_eat_signals(CPUArchState *env)
1ab3c6c0
JK
541{
542}
6d9cb73c
JK
543#endif /* !CONFIG_LINUX */
544
296af7c9 545#ifndef _WIN32
55f8d6ac
JK
546static void dummy_signal(int sig)
547{
548}
55f8d6ac 549
9349b4f9 550static void qemu_kvm_init_cpu_signals(CPUArchState *env)
714bd040
PB
551{
552 int r;
553 sigset_t set;
554 struct sigaction sigact;
555
556 memset(&sigact, 0, sizeof(sigact));
557 sigact.sa_handler = dummy_signal;
558 sigaction(SIG_IPI, &sigact, NULL);
559
714bd040
PB
560 pthread_sigmask(SIG_BLOCK, NULL, &set);
561 sigdelset(&set, SIG_IPI);
714bd040
PB
562 sigdelset(&set, SIGBUS);
563 r = kvm_set_signal_mask(env, &set);
564 if (r) {
565 fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
566 exit(1);
567 }
568}
569
570static void qemu_tcg_init_cpu_signals(void)
571{
714bd040
PB
572 sigset_t set;
573 struct sigaction sigact;
574
575 memset(&sigact, 0, sizeof(sigact));
576 sigact.sa_handler = cpu_signal;
577 sigaction(SIG_IPI, &sigact, NULL);
578
579 sigemptyset(&set);
580 sigaddset(&set, SIG_IPI);
581 pthread_sigmask(SIG_UNBLOCK, &set, NULL);
714bd040
PB
582}
583
55f8d6ac 584#else /* _WIN32 */
9349b4f9 585static void qemu_kvm_init_cpu_signals(CPUArchState *env)
ff48eb5f 586{
714bd040
PB
587 abort();
588}
ff48eb5f 589
714bd040
PB
590static void qemu_tcg_init_cpu_signals(void)
591{
ff48eb5f 592}
714bd040 593#endif /* _WIN32 */
ff48eb5f 594
296af7c9 595QemuMutex qemu_global_mutex;
46daff13
PB
596static QemuCond qemu_io_proceeded_cond;
597static bool iothread_requesting_mutex;
296af7c9
BS
598
599static QemuThread io_thread;
600
601static QemuThread *tcg_cpu_thread;
602static QemuCond *tcg_halt_cond;
603
296af7c9
BS
604/* cpu creation */
605static QemuCond qemu_cpu_cond;
606/* system init */
296af7c9 607static QemuCond qemu_pause_cond;
e82bcec2 608static QemuCond qemu_work_cond;
296af7c9 609
d3b12f5d 610void qemu_init_cpu_loop(void)
296af7c9 611{
6d9cb73c 612 qemu_init_sigbus();
ed94592b 613 qemu_cond_init(&qemu_cpu_cond);
ed94592b
AL
614 qemu_cond_init(&qemu_pause_cond);
615 qemu_cond_init(&qemu_work_cond);
46daff13 616 qemu_cond_init(&qemu_io_proceeded_cond);
296af7c9 617 qemu_mutex_init(&qemu_global_mutex);
296af7c9 618
b7680cb6 619 qemu_thread_get_self(&io_thread);
296af7c9
BS
620}
621
9349b4f9 622void run_on_cpu(CPUArchState *env, void (*func)(void *data), void *data)
e82bcec2
MT
623{
624 struct qemu_work_item wi;
625
b7680cb6 626 if (qemu_cpu_is_self(env)) {
e82bcec2
MT
627 func(data);
628 return;
629 }
630
631 wi.func = func;
632 wi.data = data;
0ab07c62 633 if (!env->queued_work_first) {
e82bcec2 634 env->queued_work_first = &wi;
0ab07c62 635 } else {
e82bcec2 636 env->queued_work_last->next = &wi;
0ab07c62 637 }
e82bcec2
MT
638 env->queued_work_last = &wi;
639 wi.next = NULL;
640 wi.done = false;
641
642 qemu_cpu_kick(env);
643 while (!wi.done) {
9349b4f9 644 CPUArchState *self_env = cpu_single_env;
e82bcec2
MT
645
646 qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex);
647 cpu_single_env = self_env;
648 }
649}
650
9349b4f9 651static void flush_queued_work(CPUArchState *env)
e82bcec2
MT
652{
653 struct qemu_work_item *wi;
654
0ab07c62 655 if (!env->queued_work_first) {
e82bcec2 656 return;
0ab07c62 657 }
e82bcec2
MT
658
659 while ((wi = env->queued_work_first)) {
660 env->queued_work_first = wi->next;
661 wi->func(wi->data);
662 wi->done = true;
663 }
664 env->queued_work_last = NULL;
665 qemu_cond_broadcast(&qemu_work_cond);
666}
667
9349b4f9 668static void qemu_wait_io_event_common(CPUArchState *env)
296af7c9
BS
669{
670 if (env->stop) {
671 env->stop = 0;
672 env->stopped = 1;
673 qemu_cond_signal(&qemu_pause_cond);
674 }
e82bcec2 675 flush_queued_work(env);
aa2c364b 676 env->thread_kicked = false;
296af7c9
BS
677}
678
6cabe1f3 679static void qemu_tcg_wait_io_event(void)
296af7c9 680{
9349b4f9 681 CPUArchState *env;
6cabe1f3 682
16400322 683 while (all_cpu_threads_idle()) {
ab33fcda
PB
684 /* Start accounting real time to the virtual clock if the CPUs
685 are idle. */
686 qemu_clock_warp(vm_clock);
9705fbb5 687 qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
16400322 688 }
296af7c9 689
46daff13
PB
690 while (iothread_requesting_mutex) {
691 qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex);
692 }
6cabe1f3
JK
693
694 for (env = first_cpu; env != NULL; env = env->next_cpu) {
695 qemu_wait_io_event_common(env);
696 }
296af7c9
BS
697}
698
9349b4f9 699static void qemu_kvm_wait_io_event(CPUArchState *env)
296af7c9 700{
16400322 701 while (cpu_thread_is_idle(env)) {
9705fbb5 702 qemu_cond_wait(env->halt_cond, &qemu_global_mutex);
16400322 703 }
296af7c9 704
5db5bdac 705 qemu_kvm_eat_signals(env);
296af7c9
BS
706 qemu_wait_io_event_common(env);
707}
708
7e97cd88 709static void *qemu_kvm_cpu_thread_fn(void *arg)
296af7c9 710{
9349b4f9 711 CPUArchState *env = arg;
84b4915d 712 int r;
296af7c9 713
6164e6d6 714 qemu_mutex_lock(&qemu_global_mutex);
b7680cb6 715 qemu_thread_get_self(env->thread);
dc7a09cf 716 env->thread_id = qemu_get_thread_id();
e479c207 717 cpu_single_env = env;
296af7c9 718
84b4915d
JK
719 r = kvm_init_vcpu(env);
720 if (r < 0) {
721 fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r));
722 exit(1);
723 }
296af7c9 724
55f8d6ac 725 qemu_kvm_init_cpu_signals(env);
296af7c9
BS
726
727 /* signal CPU creation */
296af7c9
BS
728 env->created = 1;
729 qemu_cond_signal(&qemu_cpu_cond);
730
296af7c9 731 while (1) {
0ab07c62 732 if (cpu_can_run(env)) {
6792a57b 733 r = kvm_cpu_exec(env);
83f338f7 734 if (r == EXCP_DEBUG) {
1009d2ed 735 cpu_handle_guest_debug(env);
83f338f7 736 }
0ab07c62 737 }
296af7c9
BS
738 qemu_kvm_wait_io_event(env);
739 }
740
741 return NULL;
742}
743
bdb7ca67
JK
744static void tcg_exec_all(void);
745
7e97cd88 746static void *qemu_tcg_cpu_thread_fn(void *arg)
296af7c9 747{
9349b4f9 748 CPUArchState *env = arg;
296af7c9 749
55f8d6ac 750 qemu_tcg_init_cpu_signals();
b7680cb6 751 qemu_thread_get_self(env->thread);
296af7c9
BS
752
753 /* signal CPU creation */
754 qemu_mutex_lock(&qemu_global_mutex);
0ab07c62 755 for (env = first_cpu; env != NULL; env = env->next_cpu) {
dc7a09cf 756 env->thread_id = qemu_get_thread_id();
296af7c9 757 env->created = 1;
0ab07c62 758 }
296af7c9
BS
759 qemu_cond_signal(&qemu_cpu_cond);
760
fa7d1867
JK
761 /* wait for initial kick-off after machine start */
762 while (first_cpu->stopped) {
763 qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
8e564b4e
JK
764
765 /* process any pending work */
766 for (env = first_cpu; env != NULL; env = env->next_cpu) {
767 qemu_wait_io_event_common(env);
768 }
0ab07c62 769 }
296af7c9
BS
770
771 while (1) {
bdb7ca67 772 tcg_exec_all();
946fb27c 773 if (use_icount && qemu_clock_deadline(vm_clock) <= 0) {
3b2319a3
PB
774 qemu_notify_event();
775 }
6cabe1f3 776 qemu_tcg_wait_io_event();
296af7c9
BS
777 }
778
779 return NULL;
780}
781
9349b4f9 782static void qemu_cpu_kick_thread(CPUArchState *env)
cc015e9a
PB
783{
784#ifndef _WIN32
785 int err;
786
787 err = pthread_kill(env->thread->thread, SIG_IPI);
788 if (err) {
789 fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
790 exit(1);
791 }
792#else /* _WIN32 */
793 if (!qemu_cpu_is_self(env)) {
1ecf47bf 794 SuspendThread(env->hThread);
cc015e9a 795 cpu_signal(0);
1ecf47bf 796 ResumeThread(env->hThread);
cc015e9a
PB
797 }
798#endif
799}
800
296af7c9
BS
801void qemu_cpu_kick(void *_env)
802{
9349b4f9 803 CPUArchState *env = _env;
296af7c9 804
296af7c9 805 qemu_cond_broadcast(env->halt_cond);
eae74cf9 806 if (kvm_enabled() && !env->thread_kicked) {
cc015e9a 807 qemu_cpu_kick_thread(env);
aa2c364b
JK
808 env->thread_kicked = true;
809 }
296af7c9
BS
810}
811
46d62fac 812void qemu_cpu_kick_self(void)
296af7c9 813{
b55c22c6 814#ifndef _WIN32
46d62fac 815 assert(cpu_single_env);
296af7c9 816
46d62fac 817 if (!cpu_single_env->thread_kicked) {
cc015e9a 818 qemu_cpu_kick_thread(cpu_single_env);
46d62fac 819 cpu_single_env->thread_kicked = true;
296af7c9 820 }
b55c22c6
PB
821#else
822 abort();
823#endif
296af7c9
BS
824}
825
b7680cb6 826int qemu_cpu_is_self(void *_env)
296af7c9 827{
9349b4f9 828 CPUArchState *env = _env;
a8486bc9 829
b7680cb6 830 return qemu_thread_is_self(env->thread);
296af7c9
BS
831}
832
296af7c9
BS
833void qemu_mutex_lock_iothread(void)
834{
835 if (kvm_enabled()) {
296af7c9 836 qemu_mutex_lock(&qemu_global_mutex);
1a28cac3 837 } else {
46daff13 838 iothread_requesting_mutex = true;
1a28cac3 839 if (qemu_mutex_trylock(&qemu_global_mutex)) {
cc015e9a 840 qemu_cpu_kick_thread(first_cpu);
1a28cac3
MT
841 qemu_mutex_lock(&qemu_global_mutex);
842 }
46daff13
PB
843 iothread_requesting_mutex = false;
844 qemu_cond_broadcast(&qemu_io_proceeded_cond);
1a28cac3 845 }
296af7c9
BS
846}
847
848void qemu_mutex_unlock_iothread(void)
849{
850 qemu_mutex_unlock(&qemu_global_mutex);
851}
852
853static int all_vcpus_paused(void)
854{
9349b4f9 855 CPUArchState *penv = first_cpu;
296af7c9
BS
856
857 while (penv) {
0ab07c62 858 if (!penv->stopped) {
296af7c9 859 return 0;
0ab07c62 860 }
5207a5e0 861 penv = penv->next_cpu;
296af7c9
BS
862 }
863
864 return 1;
865}
866
867void pause_all_vcpus(void)
868{
9349b4f9 869 CPUArchState *penv = first_cpu;
296af7c9 870
a5c57d64 871 qemu_clock_enable(vm_clock, false);
296af7c9
BS
872 while (penv) {
873 penv->stop = 1;
296af7c9 874 qemu_cpu_kick(penv);
5207a5e0 875 penv = penv->next_cpu;
296af7c9
BS
876 }
877
d798e974
JK
878 if (!qemu_thread_is_self(&io_thread)) {
879 cpu_stop_current();
880 if (!kvm_enabled()) {
881 while (penv) {
882 penv->stop = 0;
883 penv->stopped = 1;
884 penv = penv->next_cpu;
885 }
886 return;
887 }
888 }
889
296af7c9 890 while (!all_vcpus_paused()) {
be7d6c57 891 qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
296af7c9
BS
892 penv = first_cpu;
893 while (penv) {
1fbb22e5 894 qemu_cpu_kick(penv);
5207a5e0 895 penv = penv->next_cpu;
296af7c9
BS
896 }
897 }
898}
899
900void resume_all_vcpus(void)
901{
9349b4f9 902 CPUArchState *penv = first_cpu;
296af7c9 903
47113ab6 904 qemu_clock_enable(vm_clock, true);
296af7c9
BS
905 while (penv) {
906 penv->stop = 0;
907 penv->stopped = 0;
296af7c9 908 qemu_cpu_kick(penv);
5207a5e0 909 penv = penv->next_cpu;
296af7c9
BS
910 }
911}
912
7e97cd88 913static void qemu_tcg_init_vcpu(void *_env)
296af7c9 914{
9349b4f9 915 CPUArchState *env = _env;
0ab07c62 916
296af7c9
BS
917 /* share a single thread for all cpus with TCG */
918 if (!tcg_cpu_thread) {
7267c094
AL
919 env->thread = g_malloc0(sizeof(QemuThread));
920 env->halt_cond = g_malloc0(sizeof(QemuCond));
296af7c9 921 qemu_cond_init(env->halt_cond);
fa7d1867 922 tcg_halt_cond = env->halt_cond;
cf218714 923 qemu_thread_create(env->thread, qemu_tcg_cpu_thread_fn, env,
1ecf47bf
PB
924 QEMU_THREAD_JOINABLE);
925#ifdef _WIN32
926 env->hThread = qemu_thread_get_handle(env->thread);
927#endif
0ab07c62 928 while (env->created == 0) {
18a85728 929 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
0ab07c62 930 }
296af7c9 931 tcg_cpu_thread = env->thread;
296af7c9
BS
932 } else {
933 env->thread = tcg_cpu_thread;
934 env->halt_cond = tcg_halt_cond;
935 }
936}
937
9349b4f9 938static void qemu_kvm_start_vcpu(CPUArchState *env)
296af7c9 939{
7267c094
AL
940 env->thread = g_malloc0(sizeof(QemuThread));
941 env->halt_cond = g_malloc0(sizeof(QemuCond));
296af7c9 942 qemu_cond_init(env->halt_cond);
cf218714 943 qemu_thread_create(env->thread, qemu_kvm_cpu_thread_fn, env,
1ecf47bf 944 QEMU_THREAD_JOINABLE);
0ab07c62 945 while (env->created == 0) {
18a85728 946 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
0ab07c62 947 }
296af7c9
BS
948}
949
950void qemu_init_vcpu(void *_env)
951{
9349b4f9 952 CPUArchState *env = _env;
296af7c9
BS
953
954 env->nr_cores = smp_cores;
955 env->nr_threads = smp_threads;
fa7d1867 956 env->stopped = 1;
0ab07c62 957 if (kvm_enabled()) {
7e97cd88 958 qemu_kvm_start_vcpu(env);
0ab07c62 959 } else {
7e97cd88 960 qemu_tcg_init_vcpu(env);
0ab07c62 961 }
296af7c9
BS
962}
963
b4a3d965 964void cpu_stop_current(void)
296af7c9 965{
b4a3d965 966 if (cpu_single_env) {
67bb172f 967 cpu_single_env->stop = 0;
b4a3d965
JK
968 cpu_single_env->stopped = 1;
969 cpu_exit(cpu_single_env);
67bb172f 970 qemu_cond_signal(&qemu_pause_cond);
b4a3d965 971 }
296af7c9
BS
972}
973
1dfb4dd9 974void vm_stop(RunState state)
296af7c9 975{
b7680cb6 976 if (!qemu_thread_is_self(&io_thread)) {
1dfb4dd9 977 qemu_system_vmstop_request(state);
296af7c9
BS
978 /*
979 * FIXME: should not return to device code in case
980 * vm_stop() has been requested.
981 */
b4a3d965 982 cpu_stop_current();
296af7c9
BS
983 return;
984 }
1dfb4dd9 985 do_vm_stop(state);
296af7c9
BS
986}
987
8a9236f1
LC
988/* does a state transition even if the VM is already stopped,
989 current state is forgotten forever */
990void vm_stop_force_state(RunState state)
991{
992 if (runstate_is_running()) {
993 vm_stop(state);
994 } else {
995 runstate_set(state);
996 }
997}
998
9349b4f9 999static int tcg_cpu_exec(CPUArchState *env)
296af7c9
BS
1000{
1001 int ret;
1002#ifdef CONFIG_PROFILER
1003 int64_t ti;
1004#endif
1005
1006#ifdef CONFIG_PROFILER
1007 ti = profile_getclock();
1008#endif
1009 if (use_icount) {
1010 int64_t count;
1011 int decr;
1012 qemu_icount -= (env->icount_decr.u16.low + env->icount_extra);
1013 env->icount_decr.u16.low = 0;
1014 env->icount_extra = 0;
946fb27c 1015 count = qemu_icount_round(qemu_clock_deadline(vm_clock));
296af7c9
BS
1016 qemu_icount += count;
1017 decr = (count > 0xffff) ? 0xffff : count;
1018 count -= decr;
1019 env->icount_decr.u16.low = decr;
1020 env->icount_extra = count;
1021 }
1022 ret = cpu_exec(env);
1023#ifdef CONFIG_PROFILER
1024 qemu_time += profile_getclock() - ti;
1025#endif
1026 if (use_icount) {
1027 /* Fold pending instructions back into the
1028 instruction counter, and clear the interrupt flag. */
1029 qemu_icount -= (env->icount_decr.u16.low
1030 + env->icount_extra);
1031 env->icount_decr.u32 = 0;
1032 env->icount_extra = 0;
1033 }
1034 return ret;
1035}
1036
bdb7ca67 1037static void tcg_exec_all(void)
296af7c9 1038{
9a36085b
JK
1039 int r;
1040
ab33fcda
PB
1041 /* Account partial waits to the vm_clock. */
1042 qemu_clock_warp(vm_clock);
1043
0ab07c62 1044 if (next_cpu == NULL) {
296af7c9 1045 next_cpu = first_cpu;
0ab07c62 1046 }
c629a4bc 1047 for (; next_cpu != NULL && !exit_request; next_cpu = next_cpu->next_cpu) {
9349b4f9 1048 CPUArchState *env = next_cpu;
296af7c9
BS
1049
1050 qemu_clock_enable(vm_clock,
345f4426 1051 (env->singlestep_enabled & SSTEP_NOTIMER) == 0);
296af7c9 1052
3c638d06 1053 if (cpu_can_run(env)) {
bdb7ca67 1054 r = tcg_cpu_exec(env);
9a36085b 1055 if (r == EXCP_DEBUG) {
1009d2ed 1056 cpu_handle_guest_debug(env);
3c638d06
JK
1057 break;
1058 }
df646dfd 1059 } else if (env->stop || env->stopped) {
296af7c9
BS
1060 break;
1061 }
1062 }
c629a4bc 1063 exit_request = 0;
296af7c9
BS
1064}
1065
1066void set_numa_modes(void)
1067{
9349b4f9 1068 CPUArchState *env;
296af7c9
BS
1069 int i;
1070
1071 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1072 for (i = 0; i < nb_numa_nodes; i++) {
1073 if (node_cpumask[i] & (1 << env->cpu_index)) {
1074 env->numa_node = i;
1075 }
1076 }
1077 }
1078}
1079
1080void set_cpu_log(const char *optarg)
1081{
1082 int mask;
1083 const CPULogItem *item;
1084
1085 mask = cpu_str_to_log_mask(optarg);
1086 if (!mask) {
1087 printf("Log items (comma separated):\n");
1088 for (item = cpu_log_items; item->mask != 0; item++) {
1089 printf("%-10s %s\n", item->name, item->help);
1090 }
1091 exit(1);
1092 }
1093 cpu_set_log(mask);
1094}
29e922b6 1095
c235d738
MF
1096void set_cpu_log_filename(const char *optarg)
1097{
1098 cpu_set_log_filename(optarg);
1099}
1100
9a78eead 1101void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
262353cb
BS
1102{
1103 /* XXX: implement xxx_cpu_list for targets that still miss it */
1104#if defined(cpu_list_id)
1105 cpu_list_id(f, cpu_fprintf, optarg);
1106#elif defined(cpu_list)
1107 cpu_list(f, cpu_fprintf); /* deprecated */
1108#endif
1109}
de0b36b6
LC
1110
1111CpuInfoList *qmp_query_cpus(Error **errp)
1112{
1113 CpuInfoList *head = NULL, *cur_item = NULL;
9349b4f9 1114 CPUArchState *env;
de0b36b6
LC
1115
1116 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1117 CpuInfoList *info;
1118
1119 cpu_synchronize_state(env);
1120
1121 info = g_malloc0(sizeof(*info));
1122 info->value = g_malloc0(sizeof(*info->value));
1123 info->value->CPU = env->cpu_index;
1124 info->value->current = (env == first_cpu);
1125 info->value->halted = env->halted;
1126 info->value->thread_id = env->thread_id;
1127#if defined(TARGET_I386)
1128 info->value->has_pc = true;
1129 info->value->pc = env->eip + env->segs[R_CS].base;
1130#elif defined(TARGET_PPC)
1131 info->value->has_nip = true;
1132 info->value->nip = env->nip;
1133#elif defined(TARGET_SPARC)
1134 info->value->has_pc = true;
1135 info->value->pc = env->pc;
1136 info->value->has_npc = true;
1137 info->value->npc = env->npc;
1138#elif defined(TARGET_MIPS)
1139 info->value->has_PC = true;
1140 info->value->PC = env->active_tc.PC;
1141#endif
1142
1143 /* XXX: waiting for the qapi to support GSList */
1144 if (!cur_item) {
1145 head = cur_item = info;
1146 } else {
1147 cur_item->next = info;
1148 cur_item = info;
1149 }
1150 }
1151
1152 return head;
1153}
0cfd6a9a
LC
1154
1155void qmp_memsave(int64_t addr, int64_t size, const char *filename,
1156 bool has_cpu, int64_t cpu_index, Error **errp)
1157{
1158 FILE *f;
1159 uint32_t l;
9349b4f9 1160 CPUArchState *env;
0cfd6a9a
LC
1161 uint8_t buf[1024];
1162
1163 if (!has_cpu) {
1164 cpu_index = 0;
1165 }
1166
1167 for (env = first_cpu; env; env = env->next_cpu) {
1168 if (cpu_index == env->cpu_index) {
1169 break;
1170 }
1171 }
1172
1173 if (env == NULL) {
1174 error_set(errp, QERR_INVALID_PARAMETER_VALUE, "cpu-index",
1175 "a CPU number");
1176 return;
1177 }
1178
1179 f = fopen(filename, "wb");
1180 if (!f) {
1181 error_set(errp, QERR_OPEN_FILE_FAILED, filename);
1182 return;
1183 }
1184
1185 while (size != 0) {
1186 l = sizeof(buf);
1187 if (l > size)
1188 l = size;
1189 cpu_memory_rw_debug(env, addr, buf, l, 0);
1190 if (fwrite(buf, 1, l, f) != l) {
1191 error_set(errp, QERR_IO_ERROR);
1192 goto exit;
1193 }
1194 addr += l;
1195 size -= l;
1196 }
1197
1198exit:
1199 fclose(f);
1200}
6d3962bf
LC
1201
1202void qmp_pmemsave(int64_t addr, int64_t size, const char *filename,
1203 Error **errp)
1204{
1205 FILE *f;
1206 uint32_t l;
1207 uint8_t buf[1024];
1208
1209 f = fopen(filename, "wb");
1210 if (!f) {
1211 error_set(errp, QERR_OPEN_FILE_FAILED, filename);
1212 return;
1213 }
1214
1215 while (size != 0) {
1216 l = sizeof(buf);
1217 if (l > size)
1218 l = size;
1219 cpu_physical_memory_rw(addr, buf, l, 0);
1220 if (fwrite(buf, 1, l, f) != l) {
1221 error_set(errp, QERR_IO_ERROR);
1222 goto exit;
1223 }
1224 addr += l;
1225 size -= l;
1226 }
1227
1228exit:
1229 fclose(f);
1230}
ab49ab5c
LC
1231
1232void qmp_inject_nmi(Error **errp)
1233{
1234#if defined(TARGET_I386)
9349b4f9 1235 CPUArchState *env;
ab49ab5c
LC
1236
1237 for (env = first_cpu; env != NULL; env = env->next_cpu) {
02c09195
JK
1238 if (!env->apic_state) {
1239 cpu_interrupt(env, CPU_INTERRUPT_NMI);
1240 } else {
1241 apic_deliver_nmi(env->apic_state);
1242 }
ab49ab5c
LC
1243 }
1244#else
1245 error_set(errp, QERR_UNSUPPORTED);
1246#endif
1247}