4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 /* Needed early for CONFIG_BSD etc. */
26 #include "config-host.h"
28 #include "monitor/monitor.h"
29 #include "sysemu/sysemu.h"
30 #include "exec/gdbstub.h"
31 #include "sysemu/dma.h"
32 #include "sysemu/kvm.h"
33 #include "qmp-commands.h"
35 #include "qemu/thread.h"
36 #include "sysemu/cpus.h"
37 #include "sysemu/qtest.h"
38 #include "qemu/main-loop.h"
39 #include "qemu/bitmap.h"
42 #include "qemu/compatfd.h"
47 #include <sys/prctl.h>
50 #define PR_MCE_KILL 33
53 #ifndef PR_MCE_KILL_SET
54 #define PR_MCE_KILL_SET 1
57 #ifndef PR_MCE_KILL_EARLY
58 #define PR_MCE_KILL_EARLY 1
61 #endif /* CONFIG_LINUX */
63 static CPUArchState
*next_cpu
;
65 static bool cpu_thread_is_idle(CPUState
*cpu
)
67 if (cpu
->stop
|| cpu
->queued_work_first
) {
70 if (cpu
->stopped
|| !runstate_is_running()) {
73 if (!cpu
->halted
|| qemu_cpu_has_work(cpu
) ||
74 kvm_async_interrupts_enabled()) {
80 static bool all_cpu_threads_idle(void)
84 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
85 if (!cpu_thread_is_idle(ENV_GET_CPU(env
))) {
92 /***********************************************************/
93 /* guest cycle counter */
95 /* Conversion factor from emulated instructions to virtual clock ticks. */
96 static int icount_time_shift
;
97 /* Arbitrarily pick 1MIPS as the minimum allowable speed. */
98 #define MAX_ICOUNT_SHIFT 10
99 /* Compensate for varying guest execution speed. */
100 static int64_t qemu_icount_bias
;
101 static QEMUTimer
*icount_rt_timer
;
102 static QEMUTimer
*icount_vm_timer
;
103 static QEMUTimer
*icount_warp_timer
;
104 static int64_t vm_clock_warp_start
;
105 static int64_t qemu_icount
;
107 typedef struct TimersState
{
108 int64_t cpu_ticks_prev
;
109 int64_t cpu_ticks_offset
;
110 int64_t cpu_clock_offset
;
111 int32_t cpu_ticks_enabled
;
115 TimersState timers_state
;
117 /* Return the virtual CPU time, based on the instruction counter. */
118 int64_t cpu_get_icount(void)
121 CPUArchState
*env
= cpu_single_env
;
123 icount
= qemu_icount
;
125 if (!can_do_io(env
)) {
126 fprintf(stderr
, "Bad clock read\n");
128 icount
-= (env
->icount_decr
.u16
.low
+ env
->icount_extra
);
130 return qemu_icount_bias
+ (icount
<< icount_time_shift
);
133 /* return the host CPU cycle counter and handle stop/restart */
134 int64_t cpu_get_ticks(void)
137 return cpu_get_icount();
139 if (!timers_state
.cpu_ticks_enabled
) {
140 return timers_state
.cpu_ticks_offset
;
143 ticks
= cpu_get_real_ticks();
144 if (timers_state
.cpu_ticks_prev
> ticks
) {
145 /* Note: non increasing ticks may happen if the host uses
147 timers_state
.cpu_ticks_offset
+= timers_state
.cpu_ticks_prev
- ticks
;
149 timers_state
.cpu_ticks_prev
= ticks
;
150 return ticks
+ timers_state
.cpu_ticks_offset
;
154 /* return the host CPU monotonic timer and handle stop/restart */
155 int64_t cpu_get_clock(void)
158 if (!timers_state
.cpu_ticks_enabled
) {
159 return timers_state
.cpu_clock_offset
;
162 return ti
+ timers_state
.cpu_clock_offset
;
166 /* enable cpu_get_ticks() */
167 void cpu_enable_ticks(void)
169 if (!timers_state
.cpu_ticks_enabled
) {
170 timers_state
.cpu_ticks_offset
-= cpu_get_real_ticks();
171 timers_state
.cpu_clock_offset
-= get_clock();
172 timers_state
.cpu_ticks_enabled
= 1;
176 /* disable cpu_get_ticks() : the clock is stopped. You must not call
177 cpu_get_ticks() after that. */
178 void cpu_disable_ticks(void)
180 if (timers_state
.cpu_ticks_enabled
) {
181 timers_state
.cpu_ticks_offset
= cpu_get_ticks();
182 timers_state
.cpu_clock_offset
= cpu_get_clock();
183 timers_state
.cpu_ticks_enabled
= 0;
187 /* Correlation between real and virtual time is always going to be
188 fairly approximate, so ignore small variation.
189 When the guest is idle real and virtual time will be aligned in
191 #define ICOUNT_WOBBLE (get_ticks_per_sec() / 10)
193 static void icount_adjust(void)
198 static int64_t last_delta
;
199 /* If the VM is not running, then do nothing. */
200 if (!runstate_is_running()) {
203 cur_time
= cpu_get_clock();
204 cur_icount
= qemu_get_clock_ns(vm_clock
);
205 delta
= cur_icount
- cur_time
;
206 /* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */
208 && last_delta
+ ICOUNT_WOBBLE
< delta
* 2
209 && icount_time_shift
> 0) {
210 /* The guest is getting too far ahead. Slow time down. */
214 && last_delta
- ICOUNT_WOBBLE
> delta
* 2
215 && icount_time_shift
< MAX_ICOUNT_SHIFT
) {
216 /* The guest is getting too far behind. Speed time up. */
220 qemu_icount_bias
= cur_icount
- (qemu_icount
<< icount_time_shift
);
223 static void icount_adjust_rt(void *opaque
)
225 qemu_mod_timer(icount_rt_timer
,
226 qemu_get_clock_ms(rt_clock
) + 1000);
230 static void icount_adjust_vm(void *opaque
)
232 qemu_mod_timer(icount_vm_timer
,
233 qemu_get_clock_ns(vm_clock
) + get_ticks_per_sec() / 10);
237 static int64_t qemu_icount_round(int64_t count
)
239 return (count
+ (1 << icount_time_shift
) - 1) >> icount_time_shift
;
242 static void icount_warp_rt(void *opaque
)
244 if (vm_clock_warp_start
== -1) {
248 if (runstate_is_running()) {
249 int64_t clock
= qemu_get_clock_ns(rt_clock
);
250 int64_t warp_delta
= clock
- vm_clock_warp_start
;
251 if (use_icount
== 1) {
252 qemu_icount_bias
+= warp_delta
;
255 * In adaptive mode, do not let the vm_clock run too
256 * far ahead of real time.
258 int64_t cur_time
= cpu_get_clock();
259 int64_t cur_icount
= qemu_get_clock_ns(vm_clock
);
260 int64_t delta
= cur_time
- cur_icount
;
261 qemu_icount_bias
+= MIN(warp_delta
, delta
);
263 if (qemu_clock_expired(vm_clock
)) {
267 vm_clock_warp_start
= -1;
270 void qtest_clock_warp(int64_t dest
)
272 int64_t clock
= qemu_get_clock_ns(vm_clock
);
273 assert(qtest_enabled());
274 while (clock
< dest
) {
275 int64_t deadline
= qemu_clock_deadline(vm_clock
);
276 int64_t warp
= MIN(dest
- clock
, deadline
);
277 qemu_icount_bias
+= warp
;
278 qemu_run_timers(vm_clock
);
279 clock
= qemu_get_clock_ns(vm_clock
);
284 void qemu_clock_warp(QEMUClock
*clock
)
289 * There are too many global variables to make the "warp" behavior
290 * applicable to other clocks. But a clock argument removes the
291 * need for if statements all over the place.
293 if (clock
!= vm_clock
|| !use_icount
) {
298 * If the CPUs have been sleeping, advance the vm_clock timer now. This
299 * ensures that the deadline for the timer is computed correctly below.
300 * This also makes sure that the insn counter is synchronized before the
301 * CPU starts running, in case the CPU is woken by an event other than
302 * the earliest vm_clock timer.
304 icount_warp_rt(NULL
);
305 if (!all_cpu_threads_idle() || !qemu_clock_has_timers(vm_clock
)) {
306 qemu_del_timer(icount_warp_timer
);
310 if (qtest_enabled()) {
311 /* When testing, qtest commands advance icount. */
315 vm_clock_warp_start
= qemu_get_clock_ns(rt_clock
);
316 deadline
= qemu_clock_deadline(vm_clock
);
319 * Ensure the vm_clock proceeds even when the virtual CPU goes to
320 * sleep. Otherwise, the CPU might be waiting for a future timer
321 * interrupt to wake it up, but the interrupt never comes because
322 * the vCPU isn't running any insns and thus doesn't advance the
325 * An extreme solution for this problem would be to never let VCPUs
326 * sleep in icount mode if there is a pending vm_clock timer; rather
327 * time could just advance to the next vm_clock event. Instead, we
328 * do stop VCPUs and only advance vm_clock after some "real" time,
329 * (related to the time left until the next event) has passed. This
330 * rt_clock timer will do this. This avoids that the warps are too
331 * visible externally---for example, you will not be sending network
332 * packets continuously instead of every 100ms.
334 qemu_mod_timer(icount_warp_timer
, vm_clock_warp_start
+ deadline
);
340 static const VMStateDescription vmstate_timers
= {
343 .minimum_version_id
= 1,
344 .minimum_version_id_old
= 1,
345 .fields
= (VMStateField
[]) {
346 VMSTATE_INT64(cpu_ticks_offset
, TimersState
),
347 VMSTATE_INT64(dummy
, TimersState
),
348 VMSTATE_INT64_V(cpu_clock_offset
, TimersState
, 2),
349 VMSTATE_END_OF_LIST()
353 void configure_icount(const char *option
)
355 vmstate_register(NULL
, 0, &vmstate_timers
, &timers_state
);
360 icount_warp_timer
= qemu_new_timer_ns(rt_clock
, icount_warp_rt
, NULL
);
361 if (strcmp(option
, "auto") != 0) {
362 icount_time_shift
= strtol(option
, NULL
, 0);
369 /* 125MIPS seems a reasonable initial guess at the guest speed.
370 It will be corrected fairly quickly anyway. */
371 icount_time_shift
= 3;
373 /* Have both realtime and virtual time triggers for speed adjustment.
374 The realtime trigger catches emulated time passing too slowly,
375 the virtual time trigger catches emulated time passing too fast.
376 Realtime triggers occur even when idle, so use them less frequently
378 icount_rt_timer
= qemu_new_timer_ms(rt_clock
, icount_adjust_rt
, NULL
);
379 qemu_mod_timer(icount_rt_timer
,
380 qemu_get_clock_ms(rt_clock
) + 1000);
381 icount_vm_timer
= qemu_new_timer_ns(vm_clock
, icount_adjust_vm
, NULL
);
382 qemu_mod_timer(icount_vm_timer
,
383 qemu_get_clock_ns(vm_clock
) + get_ticks_per_sec() / 10);
386 /***********************************************************/
387 void hw_error(const char *fmt
, ...)
394 fprintf(stderr
, "qemu: hardware error: ");
395 vfprintf(stderr
, fmt
, ap
);
396 fprintf(stderr
, "\n");
397 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
398 cpu
= ENV_GET_CPU(env
);
399 fprintf(stderr
, "CPU #%d:\n", cpu
->cpu_index
);
400 cpu_dump_state(env
, stderr
, fprintf
, CPU_DUMP_FPU
);
406 void cpu_synchronize_all_states(void)
410 for (env
= first_cpu
; env
; env
= env
->next_cpu
) {
411 cpu_synchronize_state(ENV_GET_CPU(env
));
415 void cpu_synchronize_all_post_reset(void)
419 for (cpu
= first_cpu
; cpu
; cpu
= cpu
->next_cpu
) {
420 cpu_synchronize_post_reset(ENV_GET_CPU(cpu
));
424 void cpu_synchronize_all_post_init(void)
428 for (cpu
= first_cpu
; cpu
; cpu
= cpu
->next_cpu
) {
429 cpu_synchronize_post_init(ENV_GET_CPU(cpu
));
433 bool cpu_is_stopped(CPUState
*cpu
)
435 return !runstate_is_running() || cpu
->stopped
;
438 static void do_vm_stop(RunState state
)
440 if (runstate_is_running()) {
444 vm_state_notify(0, state
);
447 monitor_protocol_event(QEVENT_STOP
, NULL
);
451 static bool cpu_can_run(CPUState
*cpu
)
456 if (cpu
->stopped
|| !runstate_is_running()) {
462 static void cpu_handle_guest_debug(CPUArchState
*env
)
464 CPUState
*cpu
= ENV_GET_CPU(env
);
466 gdb_set_stop_cpu(env
);
467 qemu_system_debug_request();
471 static void cpu_signal(int sig
)
473 if (cpu_single_env
) {
474 cpu_exit(ENV_GET_CPU(cpu_single_env
));
480 static void sigbus_reraise(void)
483 struct sigaction action
;
485 memset(&action
, 0, sizeof(action
));
486 action
.sa_handler
= SIG_DFL
;
487 if (!sigaction(SIGBUS
, &action
, NULL
)) {
490 sigaddset(&set
, SIGBUS
);
491 sigprocmask(SIG_UNBLOCK
, &set
, NULL
);
493 perror("Failed to re-raise SIGBUS!\n");
497 static void sigbus_handler(int n
, struct qemu_signalfd_siginfo
*siginfo
,
500 if (kvm_on_sigbus(siginfo
->ssi_code
,
501 (void *)(intptr_t)siginfo
->ssi_addr
)) {
506 static void qemu_init_sigbus(void)
508 struct sigaction action
;
510 memset(&action
, 0, sizeof(action
));
511 action
.sa_flags
= SA_SIGINFO
;
512 action
.sa_sigaction
= (void (*)(int, siginfo_t
*, void*))sigbus_handler
;
513 sigaction(SIGBUS
, &action
, NULL
);
515 prctl(PR_MCE_KILL
, PR_MCE_KILL_SET
, PR_MCE_KILL_EARLY
, 0, 0);
518 static void qemu_kvm_eat_signals(CPUState
*cpu
)
520 struct timespec ts
= { 0, 0 };
526 sigemptyset(&waitset
);
527 sigaddset(&waitset
, SIG_IPI
);
528 sigaddset(&waitset
, SIGBUS
);
531 r
= sigtimedwait(&waitset
, &siginfo
, &ts
);
532 if (r
== -1 && !(errno
== EAGAIN
|| errno
== EINTR
)) {
533 perror("sigtimedwait");
539 if (kvm_on_sigbus_vcpu(cpu
, siginfo
.si_code
, siginfo
.si_addr
)) {
547 r
= sigpending(&chkset
);
549 perror("sigpending");
552 } while (sigismember(&chkset
, SIG_IPI
) || sigismember(&chkset
, SIGBUS
));
555 #else /* !CONFIG_LINUX */
557 static void qemu_init_sigbus(void)
561 static void qemu_kvm_eat_signals(CPUState
*cpu
)
564 #endif /* !CONFIG_LINUX */
567 static void dummy_signal(int sig
)
571 static void qemu_kvm_init_cpu_signals(CPUArchState
*env
)
575 struct sigaction sigact
;
577 memset(&sigact
, 0, sizeof(sigact
));
578 sigact
.sa_handler
= dummy_signal
;
579 sigaction(SIG_IPI
, &sigact
, NULL
);
581 pthread_sigmask(SIG_BLOCK
, NULL
, &set
);
582 sigdelset(&set
, SIG_IPI
);
583 sigdelset(&set
, SIGBUS
);
584 r
= kvm_set_signal_mask(env
, &set
);
586 fprintf(stderr
, "kvm_set_signal_mask: %s\n", strerror(-r
));
591 static void qemu_tcg_init_cpu_signals(void)
594 struct sigaction sigact
;
596 memset(&sigact
, 0, sizeof(sigact
));
597 sigact
.sa_handler
= cpu_signal
;
598 sigaction(SIG_IPI
, &sigact
, NULL
);
601 sigaddset(&set
, SIG_IPI
);
602 pthread_sigmask(SIG_UNBLOCK
, &set
, NULL
);
606 static void qemu_kvm_init_cpu_signals(CPUArchState
*env
)
611 static void qemu_tcg_init_cpu_signals(void)
616 static QemuMutex qemu_global_mutex
;
617 static QemuCond qemu_io_proceeded_cond
;
618 static bool iothread_requesting_mutex
;
620 static QemuThread io_thread
;
622 static QemuThread
*tcg_cpu_thread
;
623 static QemuCond
*tcg_halt_cond
;
626 static QemuCond qemu_cpu_cond
;
628 static QemuCond qemu_pause_cond
;
629 static QemuCond qemu_work_cond
;
631 void qemu_init_cpu_loop(void)
634 qemu_cond_init(&qemu_cpu_cond
);
635 qemu_cond_init(&qemu_pause_cond
);
636 qemu_cond_init(&qemu_work_cond
);
637 qemu_cond_init(&qemu_io_proceeded_cond
);
638 qemu_mutex_init(&qemu_global_mutex
);
640 qemu_thread_get_self(&io_thread
);
643 void run_on_cpu(CPUState
*cpu
, void (*func
)(void *data
), void *data
)
645 struct qemu_work_item wi
;
647 if (qemu_cpu_is_self(cpu
)) {
654 if (cpu
->queued_work_first
== NULL
) {
655 cpu
->queued_work_first
= &wi
;
657 cpu
->queued_work_last
->next
= &wi
;
659 cpu
->queued_work_last
= &wi
;
665 CPUArchState
*self_env
= cpu_single_env
;
667 qemu_cond_wait(&qemu_work_cond
, &qemu_global_mutex
);
668 cpu_single_env
= self_env
;
672 static void flush_queued_work(CPUState
*cpu
)
674 struct qemu_work_item
*wi
;
676 if (cpu
->queued_work_first
== NULL
) {
680 while ((wi
= cpu
->queued_work_first
)) {
681 cpu
->queued_work_first
= wi
->next
;
685 cpu
->queued_work_last
= NULL
;
686 qemu_cond_broadcast(&qemu_work_cond
);
689 static void qemu_wait_io_event_common(CPUState
*cpu
)
694 qemu_cond_signal(&qemu_pause_cond
);
696 flush_queued_work(cpu
);
697 cpu
->thread_kicked
= false;
700 static void qemu_tcg_wait_io_event(void)
704 while (all_cpu_threads_idle()) {
705 /* Start accounting real time to the virtual clock if the CPUs
707 qemu_clock_warp(vm_clock
);
708 qemu_cond_wait(tcg_halt_cond
, &qemu_global_mutex
);
711 while (iothread_requesting_mutex
) {
712 qemu_cond_wait(&qemu_io_proceeded_cond
, &qemu_global_mutex
);
715 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
716 qemu_wait_io_event_common(ENV_GET_CPU(env
));
720 static void qemu_kvm_wait_io_event(CPUState
*cpu
)
722 while (cpu_thread_is_idle(cpu
)) {
723 qemu_cond_wait(cpu
->halt_cond
, &qemu_global_mutex
);
726 qemu_kvm_eat_signals(cpu
);
727 qemu_wait_io_event_common(cpu
);
730 static void *qemu_kvm_cpu_thread_fn(void *arg
)
732 CPUArchState
*env
= arg
;
733 CPUState
*cpu
= ENV_GET_CPU(env
);
736 qemu_mutex_lock(&qemu_global_mutex
);
737 qemu_thread_get_self(cpu
->thread
);
738 cpu
->thread_id
= qemu_get_thread_id();
739 cpu_single_env
= env
;
741 r
= kvm_init_vcpu(cpu
);
743 fprintf(stderr
, "kvm_init_vcpu failed: %s\n", strerror(-r
));
747 qemu_kvm_init_cpu_signals(env
);
749 /* signal CPU creation */
751 qemu_cond_signal(&qemu_cpu_cond
);
754 if (cpu_can_run(cpu
)) {
755 r
= kvm_cpu_exec(env
);
756 if (r
== EXCP_DEBUG
) {
757 cpu_handle_guest_debug(env
);
760 qemu_kvm_wait_io_event(cpu
);
766 static void *qemu_dummy_cpu_thread_fn(void *arg
)
769 fprintf(stderr
, "qtest is not supported under Windows\n");
772 CPUArchState
*env
= arg
;
773 CPUState
*cpu
= ENV_GET_CPU(env
);
777 qemu_mutex_lock_iothread();
778 qemu_thread_get_self(cpu
->thread
);
779 cpu
->thread_id
= qemu_get_thread_id();
781 sigemptyset(&waitset
);
782 sigaddset(&waitset
, SIG_IPI
);
784 /* signal CPU creation */
786 qemu_cond_signal(&qemu_cpu_cond
);
788 cpu_single_env
= env
;
790 cpu_single_env
= NULL
;
791 qemu_mutex_unlock_iothread();
794 r
= sigwait(&waitset
, &sig
);
795 } while (r
== -1 && (errno
== EAGAIN
|| errno
== EINTR
));
800 qemu_mutex_lock_iothread();
801 cpu_single_env
= env
;
802 qemu_wait_io_event_common(cpu
);
809 static void tcg_exec_all(void);
811 static void tcg_signal_cpu_creation(CPUState
*cpu
, void *data
)
813 cpu
->thread_id
= qemu_get_thread_id();
817 static void *qemu_tcg_cpu_thread_fn(void *arg
)
822 qemu_tcg_init_cpu_signals();
823 qemu_thread_get_self(cpu
->thread
);
825 qemu_mutex_lock(&qemu_global_mutex
);
826 qemu_for_each_cpu(tcg_signal_cpu_creation
, NULL
);
827 qemu_cond_signal(&qemu_cpu_cond
);
829 /* wait for initial kick-off after machine start */
830 while (ENV_GET_CPU(first_cpu
)->stopped
) {
831 qemu_cond_wait(tcg_halt_cond
, &qemu_global_mutex
);
833 /* process any pending work */
834 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
835 qemu_wait_io_event_common(ENV_GET_CPU(env
));
841 if (use_icount
&& qemu_clock_deadline(vm_clock
) <= 0) {
844 qemu_tcg_wait_io_event();
850 static void qemu_cpu_kick_thread(CPUState
*cpu
)
855 err
= pthread_kill(cpu
->thread
->thread
, SIG_IPI
);
857 fprintf(stderr
, "qemu:%s: %s", __func__
, strerror(err
));
861 if (!qemu_cpu_is_self(cpu
)) {
864 if (SuspendThread(cpu
->hThread
) == (DWORD
)-1) {
865 fprintf(stderr
, "qemu:%s: GetLastError:%lu\n", __func__
,
870 /* On multi-core systems, we are not sure that the thread is actually
871 * suspended until we can get the context.
873 tcgContext
.ContextFlags
= CONTEXT_CONTROL
;
874 while (GetThreadContext(cpu
->hThread
, &tcgContext
) != 0) {
880 if (ResumeThread(cpu
->hThread
) == (DWORD
)-1) {
881 fprintf(stderr
, "qemu:%s: GetLastError:%lu\n", __func__
,
889 void qemu_cpu_kick(CPUState
*cpu
)
891 qemu_cond_broadcast(cpu
->halt_cond
);
892 if (!tcg_enabled() && !cpu
->thread_kicked
) {
893 qemu_cpu_kick_thread(cpu
);
894 cpu
->thread_kicked
= true;
898 void qemu_cpu_kick_self(void)
901 assert(cpu_single_env
);
902 CPUState
*cpu_single_cpu
= ENV_GET_CPU(cpu_single_env
);
904 if (!cpu_single_cpu
->thread_kicked
) {
905 qemu_cpu_kick_thread(cpu_single_cpu
);
906 cpu_single_cpu
->thread_kicked
= true;
913 bool qemu_cpu_is_self(CPUState
*cpu
)
915 return qemu_thread_is_self(cpu
->thread
);
918 static bool qemu_in_vcpu_thread(void)
920 return cpu_single_env
&& qemu_cpu_is_self(ENV_GET_CPU(cpu_single_env
));
923 void qemu_mutex_lock_iothread(void)
925 if (!tcg_enabled()) {
926 qemu_mutex_lock(&qemu_global_mutex
);
928 iothread_requesting_mutex
= true;
929 if (qemu_mutex_trylock(&qemu_global_mutex
)) {
930 qemu_cpu_kick_thread(ENV_GET_CPU(first_cpu
));
931 qemu_mutex_lock(&qemu_global_mutex
);
933 iothread_requesting_mutex
= false;
934 qemu_cond_broadcast(&qemu_io_proceeded_cond
);
938 void qemu_mutex_unlock_iothread(void)
940 qemu_mutex_unlock(&qemu_global_mutex
);
943 static int all_vcpus_paused(void)
945 CPUArchState
*penv
= first_cpu
;
948 CPUState
*pcpu
= ENV_GET_CPU(penv
);
949 if (!pcpu
->stopped
) {
952 penv
= penv
->next_cpu
;
958 void pause_all_vcpus(void)
960 CPUArchState
*penv
= first_cpu
;
962 qemu_clock_enable(vm_clock
, false);
964 CPUState
*pcpu
= ENV_GET_CPU(penv
);
967 penv
= penv
->next_cpu
;
970 if (qemu_in_vcpu_thread()) {
972 if (!kvm_enabled()) {
975 CPUState
*pcpu
= ENV_GET_CPU(penv
);
977 pcpu
->stopped
= true;
978 penv
= penv
->next_cpu
;
984 while (!all_vcpus_paused()) {
985 qemu_cond_wait(&qemu_pause_cond
, &qemu_global_mutex
);
988 qemu_cpu_kick(ENV_GET_CPU(penv
));
989 penv
= penv
->next_cpu
;
994 void cpu_resume(CPUState
*cpu
)
997 cpu
->stopped
= false;
1001 void resume_all_vcpus(void)
1003 CPUArchState
*penv
= first_cpu
;
1005 qemu_clock_enable(vm_clock
, true);
1007 CPUState
*pcpu
= ENV_GET_CPU(penv
);
1009 penv
= penv
->next_cpu
;
1013 static void qemu_tcg_init_vcpu(CPUState
*cpu
)
1015 /* share a single thread for all cpus with TCG */
1016 if (!tcg_cpu_thread
) {
1017 cpu
->thread
= g_malloc0(sizeof(QemuThread
));
1018 cpu
->halt_cond
= g_malloc0(sizeof(QemuCond
));
1019 qemu_cond_init(cpu
->halt_cond
);
1020 tcg_halt_cond
= cpu
->halt_cond
;
1021 qemu_thread_create(cpu
->thread
, qemu_tcg_cpu_thread_fn
, cpu
,
1022 QEMU_THREAD_JOINABLE
);
1024 cpu
->hThread
= qemu_thread_get_handle(cpu
->thread
);
1026 while (!cpu
->created
) {
1027 qemu_cond_wait(&qemu_cpu_cond
, &qemu_global_mutex
);
1029 tcg_cpu_thread
= cpu
->thread
;
1031 cpu
->thread
= tcg_cpu_thread
;
1032 cpu
->halt_cond
= tcg_halt_cond
;
1036 static void qemu_kvm_start_vcpu(CPUArchState
*env
)
1038 CPUState
*cpu
= ENV_GET_CPU(env
);
1040 cpu
->thread
= g_malloc0(sizeof(QemuThread
));
1041 cpu
->halt_cond
= g_malloc0(sizeof(QemuCond
));
1042 qemu_cond_init(cpu
->halt_cond
);
1043 qemu_thread_create(cpu
->thread
, qemu_kvm_cpu_thread_fn
, env
,
1044 QEMU_THREAD_JOINABLE
);
1045 while (!cpu
->created
) {
1046 qemu_cond_wait(&qemu_cpu_cond
, &qemu_global_mutex
);
1050 static void qemu_dummy_start_vcpu(CPUArchState
*env
)
1052 CPUState
*cpu
= ENV_GET_CPU(env
);
1054 cpu
->thread
= g_malloc0(sizeof(QemuThread
));
1055 cpu
->halt_cond
= g_malloc0(sizeof(QemuCond
));
1056 qemu_cond_init(cpu
->halt_cond
);
1057 qemu_thread_create(cpu
->thread
, qemu_dummy_cpu_thread_fn
, env
,
1058 QEMU_THREAD_JOINABLE
);
1059 while (!cpu
->created
) {
1060 qemu_cond_wait(&qemu_cpu_cond
, &qemu_global_mutex
);
1064 void qemu_init_vcpu(void *_env
)
1066 CPUArchState
*env
= _env
;
1067 CPUState
*cpu
= ENV_GET_CPU(env
);
1069 cpu
->nr_cores
= smp_cores
;
1070 cpu
->nr_threads
= smp_threads
;
1071 cpu
->stopped
= true;
1072 if (kvm_enabled()) {
1073 qemu_kvm_start_vcpu(env
);
1074 } else if (tcg_enabled()) {
1075 qemu_tcg_init_vcpu(cpu
);
1077 qemu_dummy_start_vcpu(env
);
1081 void cpu_stop_current(void)
1083 if (cpu_single_env
) {
1084 CPUState
*cpu_single_cpu
= ENV_GET_CPU(cpu_single_env
);
1085 cpu_single_cpu
->stop
= false;
1086 cpu_single_cpu
->stopped
= true;
1087 cpu_exit(cpu_single_cpu
);
1088 qemu_cond_signal(&qemu_pause_cond
);
1092 void vm_stop(RunState state
)
1094 if (qemu_in_vcpu_thread()) {
1095 qemu_system_vmstop_request(state
);
1097 * FIXME: should not return to device code in case
1098 * vm_stop() has been requested.
1106 /* does a state transition even if the VM is already stopped,
1107 current state is forgotten forever */
1108 void vm_stop_force_state(RunState state
)
1110 if (runstate_is_running()) {
1113 runstate_set(state
);
1117 static int tcg_cpu_exec(CPUArchState
*env
)
1120 #ifdef CONFIG_PROFILER
1124 #ifdef CONFIG_PROFILER
1125 ti
= profile_getclock();
1130 qemu_icount
-= (env
->icount_decr
.u16
.low
+ env
->icount_extra
);
1131 env
->icount_decr
.u16
.low
= 0;
1132 env
->icount_extra
= 0;
1133 count
= qemu_icount_round(qemu_clock_deadline(vm_clock
));
1134 qemu_icount
+= count
;
1135 decr
= (count
> 0xffff) ? 0xffff : count
;
1137 env
->icount_decr
.u16
.low
= decr
;
1138 env
->icount_extra
= count
;
1140 ret
= cpu_exec(env
);
1141 #ifdef CONFIG_PROFILER
1142 qemu_time
+= profile_getclock() - ti
;
1145 /* Fold pending instructions back into the
1146 instruction counter, and clear the interrupt flag. */
1147 qemu_icount
-= (env
->icount_decr
.u16
.low
1148 + env
->icount_extra
);
1149 env
->icount_decr
.u32
= 0;
1150 env
->icount_extra
= 0;
1155 static void tcg_exec_all(void)
1159 /* Account partial waits to the vm_clock. */
1160 qemu_clock_warp(vm_clock
);
1162 if (next_cpu
== NULL
) {
1163 next_cpu
= first_cpu
;
1165 for (; next_cpu
!= NULL
&& !exit_request
; next_cpu
= next_cpu
->next_cpu
) {
1166 CPUArchState
*env
= next_cpu
;
1167 CPUState
*cpu
= ENV_GET_CPU(env
);
1169 qemu_clock_enable(vm_clock
,
1170 (env
->singlestep_enabled
& SSTEP_NOTIMER
) == 0);
1172 if (cpu_can_run(cpu
)) {
1173 r
= tcg_cpu_exec(env
);
1174 if (r
== EXCP_DEBUG
) {
1175 cpu_handle_guest_debug(env
);
1178 } else if (cpu
->stop
|| cpu
->stopped
) {
1185 void set_numa_modes(void)
1191 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1192 cpu
= ENV_GET_CPU(env
);
1193 for (i
= 0; i
< nb_numa_nodes
; i
++) {
1194 if (test_bit(cpu
->cpu_index
, node_cpumask
[i
])) {
1201 void list_cpus(FILE *f
, fprintf_function cpu_fprintf
, const char *optarg
)
1203 /* XXX: implement xxx_cpu_list for targets that still miss it */
1204 #if defined(cpu_list)
1205 cpu_list(f
, cpu_fprintf
);
1209 CpuInfoList
*qmp_query_cpus(Error
**errp
)
1211 CpuInfoList
*head
= NULL
, *cur_item
= NULL
;
1214 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1215 CPUState
*cpu
= ENV_GET_CPU(env
);
1218 cpu_synchronize_state(cpu
);
1220 info
= g_malloc0(sizeof(*info
));
1221 info
->value
= g_malloc0(sizeof(*info
->value
));
1222 info
->value
->CPU
= cpu
->cpu_index
;
1223 info
->value
->current
= (env
== first_cpu
);
1224 info
->value
->halted
= cpu
->halted
;
1225 info
->value
->thread_id
= cpu
->thread_id
;
1226 #if defined(TARGET_I386)
1227 info
->value
->has_pc
= true;
1228 info
->value
->pc
= env
->eip
+ env
->segs
[R_CS
].base
;
1229 #elif defined(TARGET_PPC)
1230 info
->value
->has_nip
= true;
1231 info
->value
->nip
= env
->nip
;
1232 #elif defined(TARGET_SPARC)
1233 info
->value
->has_pc
= true;
1234 info
->value
->pc
= env
->pc
;
1235 info
->value
->has_npc
= true;
1236 info
->value
->npc
= env
->npc
;
1237 #elif defined(TARGET_MIPS)
1238 info
->value
->has_PC
= true;
1239 info
->value
->PC
= env
->active_tc
.PC
;
1242 /* XXX: waiting for the qapi to support GSList */
1244 head
= cur_item
= info
;
1246 cur_item
->next
= info
;
1254 void qmp_memsave(int64_t addr
, int64_t size
, const char *filename
,
1255 bool has_cpu
, int64_t cpu_index
, Error
**errp
)
1267 cpu
= qemu_get_cpu(cpu_index
);
1269 error_set(errp
, QERR_INVALID_PARAMETER_VALUE
, "cpu-index",
1275 f
= fopen(filename
, "wb");
1277 error_setg_file_open(errp
, errno
, filename
);
1285 cpu_memory_rw_debug(env
, addr
, buf
, l
, 0);
1286 if (fwrite(buf
, 1, l
, f
) != l
) {
1287 error_set(errp
, QERR_IO_ERROR
);
1298 void qmp_pmemsave(int64_t addr
, int64_t size
, const char *filename
,
1305 f
= fopen(filename
, "wb");
1307 error_setg_file_open(errp
, errno
, filename
);
1315 cpu_physical_memory_rw(addr
, buf
, l
, 0);
1316 if (fwrite(buf
, 1, l
, f
) != l
) {
1317 error_set(errp
, QERR_IO_ERROR
);
1328 void qmp_inject_nmi(Error
**errp
)
1330 #if defined(TARGET_I386)
1333 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1334 if (!env
->apic_state
) {
1335 cpu_interrupt(CPU(x86_env_get_cpu(env
)), CPU_INTERRUPT_NMI
);
1337 apic_deliver_nmi(env
->apic_state
);
1341 error_set(errp
, QERR_UNSUPPORTED
);