]> git.proxmox.com Git - qemu.git/blob - cpus.c
gdbstub: Set gdb_set_stop_cpu() argument to CPUState
[qemu.git] / cpus.c
1 /*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 /* Needed early for CONFIG_BSD etc. */
26 #include "config-host.h"
27
28 #include "monitor/monitor.h"
29 #include "sysemu/sysemu.h"
30 #include "exec/gdbstub.h"
31 #include "sysemu/dma.h"
32 #include "sysemu/kvm.h"
33 #include "qmp-commands.h"
34
35 #include "qemu/thread.h"
36 #include "sysemu/cpus.h"
37 #include "sysemu/qtest.h"
38 #include "qemu/main-loop.h"
39 #include "qemu/bitmap.h"
40
41 #ifndef _WIN32
42 #include "qemu/compatfd.h"
43 #endif
44
45 #ifdef CONFIG_LINUX
46
47 #include <sys/prctl.h>
48
49 #ifndef PR_MCE_KILL
50 #define PR_MCE_KILL 33
51 #endif
52
53 #ifndef PR_MCE_KILL_SET
54 #define PR_MCE_KILL_SET 1
55 #endif
56
57 #ifndef PR_MCE_KILL_EARLY
58 #define PR_MCE_KILL_EARLY 1
59 #endif
60
61 #endif /* CONFIG_LINUX */
62
63 static CPUArchState *next_cpu;
64
65 static bool cpu_thread_is_idle(CPUState *cpu)
66 {
67 if (cpu->stop || cpu->queued_work_first) {
68 return false;
69 }
70 if (cpu->stopped || !runstate_is_running()) {
71 return true;
72 }
73 if (!cpu->halted || qemu_cpu_has_work(cpu) ||
74 kvm_async_interrupts_enabled()) {
75 return false;
76 }
77 return true;
78 }
79
80 static bool all_cpu_threads_idle(void)
81 {
82 CPUArchState *env;
83
84 for (env = first_cpu; env != NULL; env = env->next_cpu) {
85 if (!cpu_thread_is_idle(ENV_GET_CPU(env))) {
86 return false;
87 }
88 }
89 return true;
90 }
91
92 /***********************************************************/
93 /* guest cycle counter */
94
95 /* Conversion factor from emulated instructions to virtual clock ticks. */
96 static int icount_time_shift;
97 /* Arbitrarily pick 1MIPS as the minimum allowable speed. */
98 #define MAX_ICOUNT_SHIFT 10
99 /* Compensate for varying guest execution speed. */
100 static int64_t qemu_icount_bias;
101 static QEMUTimer *icount_rt_timer;
102 static QEMUTimer *icount_vm_timer;
103 static QEMUTimer *icount_warp_timer;
104 static int64_t vm_clock_warp_start;
105 static int64_t qemu_icount;
106
107 typedef struct TimersState {
108 int64_t cpu_ticks_prev;
109 int64_t cpu_ticks_offset;
110 int64_t cpu_clock_offset;
111 int32_t cpu_ticks_enabled;
112 int64_t dummy;
113 } TimersState;
114
115 TimersState timers_state;
116
117 /* Return the virtual CPU time, based on the instruction counter. */
118 int64_t cpu_get_icount(void)
119 {
120 int64_t icount;
121 CPUArchState *env = cpu_single_env;
122
123 icount = qemu_icount;
124 if (env) {
125 if (!can_do_io(env)) {
126 fprintf(stderr, "Bad clock read\n");
127 }
128 icount -= (env->icount_decr.u16.low + env->icount_extra);
129 }
130 return qemu_icount_bias + (icount << icount_time_shift);
131 }
132
133 /* return the host CPU cycle counter and handle stop/restart */
134 int64_t cpu_get_ticks(void)
135 {
136 if (use_icount) {
137 return cpu_get_icount();
138 }
139 if (!timers_state.cpu_ticks_enabled) {
140 return timers_state.cpu_ticks_offset;
141 } else {
142 int64_t ticks;
143 ticks = cpu_get_real_ticks();
144 if (timers_state.cpu_ticks_prev > ticks) {
145 /* Note: non increasing ticks may happen if the host uses
146 software suspend */
147 timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
148 }
149 timers_state.cpu_ticks_prev = ticks;
150 return ticks + timers_state.cpu_ticks_offset;
151 }
152 }
153
154 /* return the host CPU monotonic timer and handle stop/restart */
155 int64_t cpu_get_clock(void)
156 {
157 int64_t ti;
158 if (!timers_state.cpu_ticks_enabled) {
159 return timers_state.cpu_clock_offset;
160 } else {
161 ti = get_clock();
162 return ti + timers_state.cpu_clock_offset;
163 }
164 }
165
166 /* enable cpu_get_ticks() */
167 void cpu_enable_ticks(void)
168 {
169 if (!timers_state.cpu_ticks_enabled) {
170 timers_state.cpu_ticks_offset -= cpu_get_real_ticks();
171 timers_state.cpu_clock_offset -= get_clock();
172 timers_state.cpu_ticks_enabled = 1;
173 }
174 }
175
176 /* disable cpu_get_ticks() : the clock is stopped. You must not call
177 cpu_get_ticks() after that. */
178 void cpu_disable_ticks(void)
179 {
180 if (timers_state.cpu_ticks_enabled) {
181 timers_state.cpu_ticks_offset = cpu_get_ticks();
182 timers_state.cpu_clock_offset = cpu_get_clock();
183 timers_state.cpu_ticks_enabled = 0;
184 }
185 }
186
187 /* Correlation between real and virtual time is always going to be
188 fairly approximate, so ignore small variation.
189 When the guest is idle real and virtual time will be aligned in
190 the IO wait loop. */
191 #define ICOUNT_WOBBLE (get_ticks_per_sec() / 10)
192
193 static void icount_adjust(void)
194 {
195 int64_t cur_time;
196 int64_t cur_icount;
197 int64_t delta;
198 static int64_t last_delta;
199 /* If the VM is not running, then do nothing. */
200 if (!runstate_is_running()) {
201 return;
202 }
203 cur_time = cpu_get_clock();
204 cur_icount = qemu_get_clock_ns(vm_clock);
205 delta = cur_icount - cur_time;
206 /* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */
207 if (delta > 0
208 && last_delta + ICOUNT_WOBBLE < delta * 2
209 && icount_time_shift > 0) {
210 /* The guest is getting too far ahead. Slow time down. */
211 icount_time_shift--;
212 }
213 if (delta < 0
214 && last_delta - ICOUNT_WOBBLE > delta * 2
215 && icount_time_shift < MAX_ICOUNT_SHIFT) {
216 /* The guest is getting too far behind. Speed time up. */
217 icount_time_shift++;
218 }
219 last_delta = delta;
220 qemu_icount_bias = cur_icount - (qemu_icount << icount_time_shift);
221 }
222
223 static void icount_adjust_rt(void *opaque)
224 {
225 qemu_mod_timer(icount_rt_timer,
226 qemu_get_clock_ms(rt_clock) + 1000);
227 icount_adjust();
228 }
229
230 static void icount_adjust_vm(void *opaque)
231 {
232 qemu_mod_timer(icount_vm_timer,
233 qemu_get_clock_ns(vm_clock) + get_ticks_per_sec() / 10);
234 icount_adjust();
235 }
236
237 static int64_t qemu_icount_round(int64_t count)
238 {
239 return (count + (1 << icount_time_shift) - 1) >> icount_time_shift;
240 }
241
242 static void icount_warp_rt(void *opaque)
243 {
244 if (vm_clock_warp_start == -1) {
245 return;
246 }
247
248 if (runstate_is_running()) {
249 int64_t clock = qemu_get_clock_ns(rt_clock);
250 int64_t warp_delta = clock - vm_clock_warp_start;
251 if (use_icount == 1) {
252 qemu_icount_bias += warp_delta;
253 } else {
254 /*
255 * In adaptive mode, do not let the vm_clock run too
256 * far ahead of real time.
257 */
258 int64_t cur_time = cpu_get_clock();
259 int64_t cur_icount = qemu_get_clock_ns(vm_clock);
260 int64_t delta = cur_time - cur_icount;
261 qemu_icount_bias += MIN(warp_delta, delta);
262 }
263 if (qemu_clock_expired(vm_clock)) {
264 qemu_notify_event();
265 }
266 }
267 vm_clock_warp_start = -1;
268 }
269
270 void qtest_clock_warp(int64_t dest)
271 {
272 int64_t clock = qemu_get_clock_ns(vm_clock);
273 assert(qtest_enabled());
274 while (clock < dest) {
275 int64_t deadline = qemu_clock_deadline(vm_clock);
276 int64_t warp = MIN(dest - clock, deadline);
277 qemu_icount_bias += warp;
278 qemu_run_timers(vm_clock);
279 clock = qemu_get_clock_ns(vm_clock);
280 }
281 qemu_notify_event();
282 }
283
284 void qemu_clock_warp(QEMUClock *clock)
285 {
286 int64_t deadline;
287
288 /*
289 * There are too many global variables to make the "warp" behavior
290 * applicable to other clocks. But a clock argument removes the
291 * need for if statements all over the place.
292 */
293 if (clock != vm_clock || !use_icount) {
294 return;
295 }
296
297 /*
298 * If the CPUs have been sleeping, advance the vm_clock timer now. This
299 * ensures that the deadline for the timer is computed correctly below.
300 * This also makes sure that the insn counter is synchronized before the
301 * CPU starts running, in case the CPU is woken by an event other than
302 * the earliest vm_clock timer.
303 */
304 icount_warp_rt(NULL);
305 if (!all_cpu_threads_idle() || !qemu_clock_has_timers(vm_clock)) {
306 qemu_del_timer(icount_warp_timer);
307 return;
308 }
309
310 if (qtest_enabled()) {
311 /* When testing, qtest commands advance icount. */
312 return;
313 }
314
315 vm_clock_warp_start = qemu_get_clock_ns(rt_clock);
316 deadline = qemu_clock_deadline(vm_clock);
317 if (deadline > 0) {
318 /*
319 * Ensure the vm_clock proceeds even when the virtual CPU goes to
320 * sleep. Otherwise, the CPU might be waiting for a future timer
321 * interrupt to wake it up, but the interrupt never comes because
322 * the vCPU isn't running any insns and thus doesn't advance the
323 * vm_clock.
324 *
325 * An extreme solution for this problem would be to never let VCPUs
326 * sleep in icount mode if there is a pending vm_clock timer; rather
327 * time could just advance to the next vm_clock event. Instead, we
328 * do stop VCPUs and only advance vm_clock after some "real" time,
329 * (related to the time left until the next event) has passed. This
330 * rt_clock timer will do this. This avoids that the warps are too
331 * visible externally---for example, you will not be sending network
332 * packets continuously instead of every 100ms.
333 */
334 qemu_mod_timer(icount_warp_timer, vm_clock_warp_start + deadline);
335 } else {
336 qemu_notify_event();
337 }
338 }
339
340 static const VMStateDescription vmstate_timers = {
341 .name = "timer",
342 .version_id = 2,
343 .minimum_version_id = 1,
344 .minimum_version_id_old = 1,
345 .fields = (VMStateField[]) {
346 VMSTATE_INT64(cpu_ticks_offset, TimersState),
347 VMSTATE_INT64(dummy, TimersState),
348 VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
349 VMSTATE_END_OF_LIST()
350 }
351 };
352
353 void configure_icount(const char *option)
354 {
355 vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
356 if (!option) {
357 return;
358 }
359
360 icount_warp_timer = qemu_new_timer_ns(rt_clock, icount_warp_rt, NULL);
361 if (strcmp(option, "auto") != 0) {
362 icount_time_shift = strtol(option, NULL, 0);
363 use_icount = 1;
364 return;
365 }
366
367 use_icount = 2;
368
369 /* 125MIPS seems a reasonable initial guess at the guest speed.
370 It will be corrected fairly quickly anyway. */
371 icount_time_shift = 3;
372
373 /* Have both realtime and virtual time triggers for speed adjustment.
374 The realtime trigger catches emulated time passing too slowly,
375 the virtual time trigger catches emulated time passing too fast.
376 Realtime triggers occur even when idle, so use them less frequently
377 than VM triggers. */
378 icount_rt_timer = qemu_new_timer_ms(rt_clock, icount_adjust_rt, NULL);
379 qemu_mod_timer(icount_rt_timer,
380 qemu_get_clock_ms(rt_clock) + 1000);
381 icount_vm_timer = qemu_new_timer_ns(vm_clock, icount_adjust_vm, NULL);
382 qemu_mod_timer(icount_vm_timer,
383 qemu_get_clock_ns(vm_clock) + get_ticks_per_sec() / 10);
384 }
385
386 /***********************************************************/
387 void hw_error(const char *fmt, ...)
388 {
389 va_list ap;
390 CPUArchState *env;
391 CPUState *cpu;
392
393 va_start(ap, fmt);
394 fprintf(stderr, "qemu: hardware error: ");
395 vfprintf(stderr, fmt, ap);
396 fprintf(stderr, "\n");
397 for (env = first_cpu; env != NULL; env = env->next_cpu) {
398 cpu = ENV_GET_CPU(env);
399 fprintf(stderr, "CPU #%d:\n", cpu->cpu_index);
400 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU);
401 }
402 va_end(ap);
403 abort();
404 }
405
406 void cpu_synchronize_all_states(void)
407 {
408 CPUArchState *env;
409
410 for (env = first_cpu; env; env = env->next_cpu) {
411 cpu_synchronize_state(ENV_GET_CPU(env));
412 }
413 }
414
415 void cpu_synchronize_all_post_reset(void)
416 {
417 CPUArchState *cpu;
418
419 for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
420 cpu_synchronize_post_reset(ENV_GET_CPU(cpu));
421 }
422 }
423
424 void cpu_synchronize_all_post_init(void)
425 {
426 CPUArchState *cpu;
427
428 for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
429 cpu_synchronize_post_init(ENV_GET_CPU(cpu));
430 }
431 }
432
433 bool cpu_is_stopped(CPUState *cpu)
434 {
435 return !runstate_is_running() || cpu->stopped;
436 }
437
438 static void do_vm_stop(RunState state)
439 {
440 if (runstate_is_running()) {
441 cpu_disable_ticks();
442 pause_all_vcpus();
443 runstate_set(state);
444 vm_state_notify(0, state);
445 bdrv_drain_all();
446 bdrv_flush_all();
447 monitor_protocol_event(QEVENT_STOP, NULL);
448 }
449 }
450
451 static bool cpu_can_run(CPUState *cpu)
452 {
453 if (cpu->stop) {
454 return false;
455 }
456 if (cpu->stopped || !runstate_is_running()) {
457 return false;
458 }
459 return true;
460 }
461
462 static void cpu_handle_guest_debug(CPUArchState *env)
463 {
464 CPUState *cpu = ENV_GET_CPU(env);
465
466 gdb_set_stop_cpu(cpu);
467 qemu_system_debug_request();
468 cpu->stopped = true;
469 }
470
471 static void cpu_signal(int sig)
472 {
473 if (cpu_single_env) {
474 cpu_exit(ENV_GET_CPU(cpu_single_env));
475 }
476 exit_request = 1;
477 }
478
479 #ifdef CONFIG_LINUX
480 static void sigbus_reraise(void)
481 {
482 sigset_t set;
483 struct sigaction action;
484
485 memset(&action, 0, sizeof(action));
486 action.sa_handler = SIG_DFL;
487 if (!sigaction(SIGBUS, &action, NULL)) {
488 raise(SIGBUS);
489 sigemptyset(&set);
490 sigaddset(&set, SIGBUS);
491 sigprocmask(SIG_UNBLOCK, &set, NULL);
492 }
493 perror("Failed to re-raise SIGBUS!\n");
494 abort();
495 }
496
497 static void sigbus_handler(int n, struct qemu_signalfd_siginfo *siginfo,
498 void *ctx)
499 {
500 if (kvm_on_sigbus(siginfo->ssi_code,
501 (void *)(intptr_t)siginfo->ssi_addr)) {
502 sigbus_reraise();
503 }
504 }
505
506 static void qemu_init_sigbus(void)
507 {
508 struct sigaction action;
509
510 memset(&action, 0, sizeof(action));
511 action.sa_flags = SA_SIGINFO;
512 action.sa_sigaction = (void (*)(int, siginfo_t*, void*))sigbus_handler;
513 sigaction(SIGBUS, &action, NULL);
514
515 prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0);
516 }
517
518 static void qemu_kvm_eat_signals(CPUState *cpu)
519 {
520 struct timespec ts = { 0, 0 };
521 siginfo_t siginfo;
522 sigset_t waitset;
523 sigset_t chkset;
524 int r;
525
526 sigemptyset(&waitset);
527 sigaddset(&waitset, SIG_IPI);
528 sigaddset(&waitset, SIGBUS);
529
530 do {
531 r = sigtimedwait(&waitset, &siginfo, &ts);
532 if (r == -1 && !(errno == EAGAIN || errno == EINTR)) {
533 perror("sigtimedwait");
534 exit(1);
535 }
536
537 switch (r) {
538 case SIGBUS:
539 if (kvm_on_sigbus_vcpu(cpu, siginfo.si_code, siginfo.si_addr)) {
540 sigbus_reraise();
541 }
542 break;
543 default:
544 break;
545 }
546
547 r = sigpending(&chkset);
548 if (r == -1) {
549 perror("sigpending");
550 exit(1);
551 }
552 } while (sigismember(&chkset, SIG_IPI) || sigismember(&chkset, SIGBUS));
553 }
554
555 #else /* !CONFIG_LINUX */
556
557 static void qemu_init_sigbus(void)
558 {
559 }
560
561 static void qemu_kvm_eat_signals(CPUState *cpu)
562 {
563 }
564 #endif /* !CONFIG_LINUX */
565
566 #ifndef _WIN32
567 static void dummy_signal(int sig)
568 {
569 }
570
571 static void qemu_kvm_init_cpu_signals(CPUState *cpu)
572 {
573 int r;
574 sigset_t set;
575 struct sigaction sigact;
576
577 memset(&sigact, 0, sizeof(sigact));
578 sigact.sa_handler = dummy_signal;
579 sigaction(SIG_IPI, &sigact, NULL);
580
581 pthread_sigmask(SIG_BLOCK, NULL, &set);
582 sigdelset(&set, SIG_IPI);
583 sigdelset(&set, SIGBUS);
584 r = kvm_set_signal_mask(cpu, &set);
585 if (r) {
586 fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
587 exit(1);
588 }
589 }
590
591 static void qemu_tcg_init_cpu_signals(void)
592 {
593 sigset_t set;
594 struct sigaction sigact;
595
596 memset(&sigact, 0, sizeof(sigact));
597 sigact.sa_handler = cpu_signal;
598 sigaction(SIG_IPI, &sigact, NULL);
599
600 sigemptyset(&set);
601 sigaddset(&set, SIG_IPI);
602 pthread_sigmask(SIG_UNBLOCK, &set, NULL);
603 }
604
605 #else /* _WIN32 */
606 static void qemu_kvm_init_cpu_signals(CPUState *cpu)
607 {
608 abort();
609 }
610
611 static void qemu_tcg_init_cpu_signals(void)
612 {
613 }
614 #endif /* _WIN32 */
615
616 static QemuMutex qemu_global_mutex;
617 static QemuCond qemu_io_proceeded_cond;
618 static bool iothread_requesting_mutex;
619
620 static QemuThread io_thread;
621
622 static QemuThread *tcg_cpu_thread;
623 static QemuCond *tcg_halt_cond;
624
625 /* cpu creation */
626 static QemuCond qemu_cpu_cond;
627 /* system init */
628 static QemuCond qemu_pause_cond;
629 static QemuCond qemu_work_cond;
630
631 void qemu_init_cpu_loop(void)
632 {
633 qemu_init_sigbus();
634 qemu_cond_init(&qemu_cpu_cond);
635 qemu_cond_init(&qemu_pause_cond);
636 qemu_cond_init(&qemu_work_cond);
637 qemu_cond_init(&qemu_io_proceeded_cond);
638 qemu_mutex_init(&qemu_global_mutex);
639
640 qemu_thread_get_self(&io_thread);
641 }
642
643 void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
644 {
645 struct qemu_work_item wi;
646
647 if (qemu_cpu_is_self(cpu)) {
648 func(data);
649 return;
650 }
651
652 wi.func = func;
653 wi.data = data;
654 if (cpu->queued_work_first == NULL) {
655 cpu->queued_work_first = &wi;
656 } else {
657 cpu->queued_work_last->next = &wi;
658 }
659 cpu->queued_work_last = &wi;
660 wi.next = NULL;
661 wi.done = false;
662
663 qemu_cpu_kick(cpu);
664 while (!wi.done) {
665 CPUArchState *self_env = cpu_single_env;
666
667 qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex);
668 cpu_single_env = self_env;
669 }
670 }
671
672 static void flush_queued_work(CPUState *cpu)
673 {
674 struct qemu_work_item *wi;
675
676 if (cpu->queued_work_first == NULL) {
677 return;
678 }
679
680 while ((wi = cpu->queued_work_first)) {
681 cpu->queued_work_first = wi->next;
682 wi->func(wi->data);
683 wi->done = true;
684 }
685 cpu->queued_work_last = NULL;
686 qemu_cond_broadcast(&qemu_work_cond);
687 }
688
689 static void qemu_wait_io_event_common(CPUState *cpu)
690 {
691 if (cpu->stop) {
692 cpu->stop = false;
693 cpu->stopped = true;
694 qemu_cond_signal(&qemu_pause_cond);
695 }
696 flush_queued_work(cpu);
697 cpu->thread_kicked = false;
698 }
699
700 static void qemu_tcg_wait_io_event(void)
701 {
702 CPUArchState *env;
703
704 while (all_cpu_threads_idle()) {
705 /* Start accounting real time to the virtual clock if the CPUs
706 are idle. */
707 qemu_clock_warp(vm_clock);
708 qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
709 }
710
711 while (iothread_requesting_mutex) {
712 qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex);
713 }
714
715 for (env = first_cpu; env != NULL; env = env->next_cpu) {
716 qemu_wait_io_event_common(ENV_GET_CPU(env));
717 }
718 }
719
720 static void qemu_kvm_wait_io_event(CPUState *cpu)
721 {
722 while (cpu_thread_is_idle(cpu)) {
723 qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
724 }
725
726 qemu_kvm_eat_signals(cpu);
727 qemu_wait_io_event_common(cpu);
728 }
729
730 static void *qemu_kvm_cpu_thread_fn(void *arg)
731 {
732 CPUArchState *env = arg;
733 CPUState *cpu = ENV_GET_CPU(env);
734 int r;
735
736 qemu_mutex_lock(&qemu_global_mutex);
737 qemu_thread_get_self(cpu->thread);
738 cpu->thread_id = qemu_get_thread_id();
739 cpu_single_env = env;
740
741 r = kvm_init_vcpu(cpu);
742 if (r < 0) {
743 fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r));
744 exit(1);
745 }
746
747 qemu_kvm_init_cpu_signals(cpu);
748
749 /* signal CPU creation */
750 cpu->created = true;
751 qemu_cond_signal(&qemu_cpu_cond);
752
753 while (1) {
754 if (cpu_can_run(cpu)) {
755 r = kvm_cpu_exec(cpu);
756 if (r == EXCP_DEBUG) {
757 cpu_handle_guest_debug(env);
758 }
759 }
760 qemu_kvm_wait_io_event(cpu);
761 }
762
763 return NULL;
764 }
765
766 static void *qemu_dummy_cpu_thread_fn(void *arg)
767 {
768 #ifdef _WIN32
769 fprintf(stderr, "qtest is not supported under Windows\n");
770 exit(1);
771 #else
772 CPUArchState *env = arg;
773 CPUState *cpu = ENV_GET_CPU(env);
774 sigset_t waitset;
775 int r;
776
777 qemu_mutex_lock_iothread();
778 qemu_thread_get_self(cpu->thread);
779 cpu->thread_id = qemu_get_thread_id();
780
781 sigemptyset(&waitset);
782 sigaddset(&waitset, SIG_IPI);
783
784 /* signal CPU creation */
785 cpu->created = true;
786 qemu_cond_signal(&qemu_cpu_cond);
787
788 cpu_single_env = env;
789 while (1) {
790 cpu_single_env = NULL;
791 qemu_mutex_unlock_iothread();
792 do {
793 int sig;
794 r = sigwait(&waitset, &sig);
795 } while (r == -1 && (errno == EAGAIN || errno == EINTR));
796 if (r == -1) {
797 perror("sigwait");
798 exit(1);
799 }
800 qemu_mutex_lock_iothread();
801 cpu_single_env = env;
802 qemu_wait_io_event_common(cpu);
803 }
804
805 return NULL;
806 #endif
807 }
808
809 static void tcg_exec_all(void);
810
811 static void tcg_signal_cpu_creation(CPUState *cpu, void *data)
812 {
813 cpu->thread_id = qemu_get_thread_id();
814 cpu->created = true;
815 }
816
817 static void *qemu_tcg_cpu_thread_fn(void *arg)
818 {
819 CPUState *cpu = arg;
820 CPUArchState *env;
821
822 qemu_tcg_init_cpu_signals();
823 qemu_thread_get_self(cpu->thread);
824
825 qemu_mutex_lock(&qemu_global_mutex);
826 qemu_for_each_cpu(tcg_signal_cpu_creation, NULL);
827 qemu_cond_signal(&qemu_cpu_cond);
828
829 /* wait for initial kick-off after machine start */
830 while (ENV_GET_CPU(first_cpu)->stopped) {
831 qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
832
833 /* process any pending work */
834 for (env = first_cpu; env != NULL; env = env->next_cpu) {
835 qemu_wait_io_event_common(ENV_GET_CPU(env));
836 }
837 }
838
839 while (1) {
840 tcg_exec_all();
841 if (use_icount && qemu_clock_deadline(vm_clock) <= 0) {
842 qemu_notify_event();
843 }
844 qemu_tcg_wait_io_event();
845 }
846
847 return NULL;
848 }
849
850 static void qemu_cpu_kick_thread(CPUState *cpu)
851 {
852 #ifndef _WIN32
853 int err;
854
855 err = pthread_kill(cpu->thread->thread, SIG_IPI);
856 if (err) {
857 fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
858 exit(1);
859 }
860 #else /* _WIN32 */
861 if (!qemu_cpu_is_self(cpu)) {
862 CONTEXT tcgContext;
863
864 if (SuspendThread(cpu->hThread) == (DWORD)-1) {
865 fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
866 GetLastError());
867 exit(1);
868 }
869
870 /* On multi-core systems, we are not sure that the thread is actually
871 * suspended until we can get the context.
872 */
873 tcgContext.ContextFlags = CONTEXT_CONTROL;
874 while (GetThreadContext(cpu->hThread, &tcgContext) != 0) {
875 continue;
876 }
877
878 cpu_signal(0);
879
880 if (ResumeThread(cpu->hThread) == (DWORD)-1) {
881 fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
882 GetLastError());
883 exit(1);
884 }
885 }
886 #endif
887 }
888
889 void qemu_cpu_kick(CPUState *cpu)
890 {
891 qemu_cond_broadcast(cpu->halt_cond);
892 if (!tcg_enabled() && !cpu->thread_kicked) {
893 qemu_cpu_kick_thread(cpu);
894 cpu->thread_kicked = true;
895 }
896 }
897
898 void qemu_cpu_kick_self(void)
899 {
900 #ifndef _WIN32
901 assert(cpu_single_env);
902 CPUState *cpu_single_cpu = ENV_GET_CPU(cpu_single_env);
903
904 if (!cpu_single_cpu->thread_kicked) {
905 qemu_cpu_kick_thread(cpu_single_cpu);
906 cpu_single_cpu->thread_kicked = true;
907 }
908 #else
909 abort();
910 #endif
911 }
912
913 bool qemu_cpu_is_self(CPUState *cpu)
914 {
915 return qemu_thread_is_self(cpu->thread);
916 }
917
918 static bool qemu_in_vcpu_thread(void)
919 {
920 return cpu_single_env && qemu_cpu_is_self(ENV_GET_CPU(cpu_single_env));
921 }
922
923 void qemu_mutex_lock_iothread(void)
924 {
925 if (!tcg_enabled()) {
926 qemu_mutex_lock(&qemu_global_mutex);
927 } else {
928 iothread_requesting_mutex = true;
929 if (qemu_mutex_trylock(&qemu_global_mutex)) {
930 qemu_cpu_kick_thread(ENV_GET_CPU(first_cpu));
931 qemu_mutex_lock(&qemu_global_mutex);
932 }
933 iothread_requesting_mutex = false;
934 qemu_cond_broadcast(&qemu_io_proceeded_cond);
935 }
936 }
937
938 void qemu_mutex_unlock_iothread(void)
939 {
940 qemu_mutex_unlock(&qemu_global_mutex);
941 }
942
943 static int all_vcpus_paused(void)
944 {
945 CPUArchState *penv = first_cpu;
946
947 while (penv) {
948 CPUState *pcpu = ENV_GET_CPU(penv);
949 if (!pcpu->stopped) {
950 return 0;
951 }
952 penv = penv->next_cpu;
953 }
954
955 return 1;
956 }
957
958 void pause_all_vcpus(void)
959 {
960 CPUArchState *penv = first_cpu;
961
962 qemu_clock_enable(vm_clock, false);
963 while (penv) {
964 CPUState *pcpu = ENV_GET_CPU(penv);
965 pcpu->stop = true;
966 qemu_cpu_kick(pcpu);
967 penv = penv->next_cpu;
968 }
969
970 if (qemu_in_vcpu_thread()) {
971 cpu_stop_current();
972 if (!kvm_enabled()) {
973 penv = first_cpu;
974 while (penv) {
975 CPUState *pcpu = ENV_GET_CPU(penv);
976 pcpu->stop = false;
977 pcpu->stopped = true;
978 penv = penv->next_cpu;
979 }
980 return;
981 }
982 }
983
984 while (!all_vcpus_paused()) {
985 qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
986 penv = first_cpu;
987 while (penv) {
988 qemu_cpu_kick(ENV_GET_CPU(penv));
989 penv = penv->next_cpu;
990 }
991 }
992 }
993
994 void cpu_resume(CPUState *cpu)
995 {
996 cpu->stop = false;
997 cpu->stopped = false;
998 qemu_cpu_kick(cpu);
999 }
1000
1001 void resume_all_vcpus(void)
1002 {
1003 CPUArchState *penv = first_cpu;
1004
1005 qemu_clock_enable(vm_clock, true);
1006 while (penv) {
1007 CPUState *pcpu = ENV_GET_CPU(penv);
1008 cpu_resume(pcpu);
1009 penv = penv->next_cpu;
1010 }
1011 }
1012
1013 static void qemu_tcg_init_vcpu(CPUState *cpu)
1014 {
1015 /* share a single thread for all cpus with TCG */
1016 if (!tcg_cpu_thread) {
1017 cpu->thread = g_malloc0(sizeof(QemuThread));
1018 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1019 qemu_cond_init(cpu->halt_cond);
1020 tcg_halt_cond = cpu->halt_cond;
1021 qemu_thread_create(cpu->thread, qemu_tcg_cpu_thread_fn, cpu,
1022 QEMU_THREAD_JOINABLE);
1023 #ifdef _WIN32
1024 cpu->hThread = qemu_thread_get_handle(cpu->thread);
1025 #endif
1026 while (!cpu->created) {
1027 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1028 }
1029 tcg_cpu_thread = cpu->thread;
1030 } else {
1031 cpu->thread = tcg_cpu_thread;
1032 cpu->halt_cond = tcg_halt_cond;
1033 }
1034 }
1035
1036 static void qemu_kvm_start_vcpu(CPUArchState *env)
1037 {
1038 CPUState *cpu = ENV_GET_CPU(env);
1039
1040 cpu->thread = g_malloc0(sizeof(QemuThread));
1041 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1042 qemu_cond_init(cpu->halt_cond);
1043 qemu_thread_create(cpu->thread, qemu_kvm_cpu_thread_fn, env,
1044 QEMU_THREAD_JOINABLE);
1045 while (!cpu->created) {
1046 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1047 }
1048 }
1049
1050 static void qemu_dummy_start_vcpu(CPUArchState *env)
1051 {
1052 CPUState *cpu = ENV_GET_CPU(env);
1053
1054 cpu->thread = g_malloc0(sizeof(QemuThread));
1055 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1056 qemu_cond_init(cpu->halt_cond);
1057 qemu_thread_create(cpu->thread, qemu_dummy_cpu_thread_fn, env,
1058 QEMU_THREAD_JOINABLE);
1059 while (!cpu->created) {
1060 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1061 }
1062 }
1063
1064 void qemu_init_vcpu(void *_env)
1065 {
1066 CPUArchState *env = _env;
1067 CPUState *cpu = ENV_GET_CPU(env);
1068
1069 cpu->nr_cores = smp_cores;
1070 cpu->nr_threads = smp_threads;
1071 cpu->stopped = true;
1072 if (kvm_enabled()) {
1073 qemu_kvm_start_vcpu(env);
1074 } else if (tcg_enabled()) {
1075 qemu_tcg_init_vcpu(cpu);
1076 } else {
1077 qemu_dummy_start_vcpu(env);
1078 }
1079 }
1080
1081 void cpu_stop_current(void)
1082 {
1083 if (cpu_single_env) {
1084 CPUState *cpu_single_cpu = ENV_GET_CPU(cpu_single_env);
1085 cpu_single_cpu->stop = false;
1086 cpu_single_cpu->stopped = true;
1087 cpu_exit(cpu_single_cpu);
1088 qemu_cond_signal(&qemu_pause_cond);
1089 }
1090 }
1091
1092 void vm_stop(RunState state)
1093 {
1094 if (qemu_in_vcpu_thread()) {
1095 qemu_system_vmstop_request(state);
1096 /*
1097 * FIXME: should not return to device code in case
1098 * vm_stop() has been requested.
1099 */
1100 cpu_stop_current();
1101 return;
1102 }
1103 do_vm_stop(state);
1104 }
1105
1106 /* does a state transition even if the VM is already stopped,
1107 current state is forgotten forever */
1108 void vm_stop_force_state(RunState state)
1109 {
1110 if (runstate_is_running()) {
1111 vm_stop(state);
1112 } else {
1113 runstate_set(state);
1114 }
1115 }
1116
1117 static int tcg_cpu_exec(CPUArchState *env)
1118 {
1119 int ret;
1120 #ifdef CONFIG_PROFILER
1121 int64_t ti;
1122 #endif
1123
1124 #ifdef CONFIG_PROFILER
1125 ti = profile_getclock();
1126 #endif
1127 if (use_icount) {
1128 int64_t count;
1129 int decr;
1130 qemu_icount -= (env->icount_decr.u16.low + env->icount_extra);
1131 env->icount_decr.u16.low = 0;
1132 env->icount_extra = 0;
1133 count = qemu_icount_round(qemu_clock_deadline(vm_clock));
1134 qemu_icount += count;
1135 decr = (count > 0xffff) ? 0xffff : count;
1136 count -= decr;
1137 env->icount_decr.u16.low = decr;
1138 env->icount_extra = count;
1139 }
1140 ret = cpu_exec(env);
1141 #ifdef CONFIG_PROFILER
1142 qemu_time += profile_getclock() - ti;
1143 #endif
1144 if (use_icount) {
1145 /* Fold pending instructions back into the
1146 instruction counter, and clear the interrupt flag. */
1147 qemu_icount -= (env->icount_decr.u16.low
1148 + env->icount_extra);
1149 env->icount_decr.u32 = 0;
1150 env->icount_extra = 0;
1151 }
1152 return ret;
1153 }
1154
1155 static void tcg_exec_all(void)
1156 {
1157 int r;
1158
1159 /* Account partial waits to the vm_clock. */
1160 qemu_clock_warp(vm_clock);
1161
1162 if (next_cpu == NULL) {
1163 next_cpu = first_cpu;
1164 }
1165 for (; next_cpu != NULL && !exit_request; next_cpu = next_cpu->next_cpu) {
1166 CPUArchState *env = next_cpu;
1167 CPUState *cpu = ENV_GET_CPU(env);
1168
1169 qemu_clock_enable(vm_clock,
1170 (env->singlestep_enabled & SSTEP_NOTIMER) == 0);
1171
1172 if (cpu_can_run(cpu)) {
1173 r = tcg_cpu_exec(env);
1174 if (r == EXCP_DEBUG) {
1175 cpu_handle_guest_debug(env);
1176 break;
1177 }
1178 } else if (cpu->stop || cpu->stopped) {
1179 break;
1180 }
1181 }
1182 exit_request = 0;
1183 }
1184
1185 void set_numa_modes(void)
1186 {
1187 CPUArchState *env;
1188 CPUState *cpu;
1189 int i;
1190
1191 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1192 cpu = ENV_GET_CPU(env);
1193 for (i = 0; i < nb_numa_nodes; i++) {
1194 if (test_bit(cpu->cpu_index, node_cpumask[i])) {
1195 cpu->numa_node = i;
1196 }
1197 }
1198 }
1199 }
1200
1201 void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
1202 {
1203 /* XXX: implement xxx_cpu_list for targets that still miss it */
1204 #if defined(cpu_list)
1205 cpu_list(f, cpu_fprintf);
1206 #endif
1207 }
1208
1209 CpuInfoList *qmp_query_cpus(Error **errp)
1210 {
1211 CpuInfoList *head = NULL, *cur_item = NULL;
1212 CPUArchState *env;
1213
1214 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1215 CPUState *cpu = ENV_GET_CPU(env);
1216 CpuInfoList *info;
1217
1218 cpu_synchronize_state(cpu);
1219
1220 info = g_malloc0(sizeof(*info));
1221 info->value = g_malloc0(sizeof(*info->value));
1222 info->value->CPU = cpu->cpu_index;
1223 info->value->current = (env == first_cpu);
1224 info->value->halted = cpu->halted;
1225 info->value->thread_id = cpu->thread_id;
1226 #if defined(TARGET_I386)
1227 info->value->has_pc = true;
1228 info->value->pc = env->eip + env->segs[R_CS].base;
1229 #elif defined(TARGET_PPC)
1230 info->value->has_nip = true;
1231 info->value->nip = env->nip;
1232 #elif defined(TARGET_SPARC)
1233 info->value->has_pc = true;
1234 info->value->pc = env->pc;
1235 info->value->has_npc = true;
1236 info->value->npc = env->npc;
1237 #elif defined(TARGET_MIPS)
1238 info->value->has_PC = true;
1239 info->value->PC = env->active_tc.PC;
1240 #endif
1241
1242 /* XXX: waiting for the qapi to support GSList */
1243 if (!cur_item) {
1244 head = cur_item = info;
1245 } else {
1246 cur_item->next = info;
1247 cur_item = info;
1248 }
1249 }
1250
1251 return head;
1252 }
1253
1254 void qmp_memsave(int64_t addr, int64_t size, const char *filename,
1255 bool has_cpu, int64_t cpu_index, Error **errp)
1256 {
1257 FILE *f;
1258 uint32_t l;
1259 CPUArchState *env;
1260 CPUState *cpu;
1261 uint8_t buf[1024];
1262
1263 if (!has_cpu) {
1264 cpu_index = 0;
1265 }
1266
1267 cpu = qemu_get_cpu(cpu_index);
1268 if (cpu == NULL) {
1269 error_set(errp, QERR_INVALID_PARAMETER_VALUE, "cpu-index",
1270 "a CPU number");
1271 return;
1272 }
1273 env = cpu->env_ptr;
1274
1275 f = fopen(filename, "wb");
1276 if (!f) {
1277 error_setg_file_open(errp, errno, filename);
1278 return;
1279 }
1280
1281 while (size != 0) {
1282 l = sizeof(buf);
1283 if (l > size)
1284 l = size;
1285 cpu_memory_rw_debug(env, addr, buf, l, 0);
1286 if (fwrite(buf, 1, l, f) != l) {
1287 error_set(errp, QERR_IO_ERROR);
1288 goto exit;
1289 }
1290 addr += l;
1291 size -= l;
1292 }
1293
1294 exit:
1295 fclose(f);
1296 }
1297
1298 void qmp_pmemsave(int64_t addr, int64_t size, const char *filename,
1299 Error **errp)
1300 {
1301 FILE *f;
1302 uint32_t l;
1303 uint8_t buf[1024];
1304
1305 f = fopen(filename, "wb");
1306 if (!f) {
1307 error_setg_file_open(errp, errno, filename);
1308 return;
1309 }
1310
1311 while (size != 0) {
1312 l = sizeof(buf);
1313 if (l > size)
1314 l = size;
1315 cpu_physical_memory_rw(addr, buf, l, 0);
1316 if (fwrite(buf, 1, l, f) != l) {
1317 error_set(errp, QERR_IO_ERROR);
1318 goto exit;
1319 }
1320 addr += l;
1321 size -= l;
1322 }
1323
1324 exit:
1325 fclose(f);
1326 }
1327
1328 void qmp_inject_nmi(Error **errp)
1329 {
1330 #if defined(TARGET_I386)
1331 CPUArchState *env;
1332
1333 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1334 if (!env->apic_state) {
1335 cpu_interrupt(CPU(x86_env_get_cpu(env)), CPU_INTERRUPT_NMI);
1336 } else {
1337 apic_deliver_nmi(env->apic_state);
1338 }
1339 }
1340 #else
1341 error_set(errp, QERR_UNSUPPORTED);
1342 #endif
1343 }