]> git.proxmox.com Git - qemu.git/blob - cpus.c
timer: protect timers_state's clock with seqlock
[qemu.git] / cpus.c
1 /*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 /* Needed early for CONFIG_BSD etc. */
26 #include "config-host.h"
27
28 #include "monitor/monitor.h"
29 #include "sysemu/sysemu.h"
30 #include "exec/gdbstub.h"
31 #include "sysemu/dma.h"
32 #include "sysemu/kvm.h"
33 #include "qmp-commands.h"
34
35 #include "qemu/thread.h"
36 #include "sysemu/cpus.h"
37 #include "sysemu/qtest.h"
38 #include "qemu/main-loop.h"
39 #include "qemu/bitmap.h"
40 #include "qemu/seqlock.h"
41
42 #ifndef _WIN32
43 #include "qemu/compatfd.h"
44 #endif
45
46 #ifdef CONFIG_LINUX
47
48 #include <sys/prctl.h>
49
50 #ifndef PR_MCE_KILL
51 #define PR_MCE_KILL 33
52 #endif
53
54 #ifndef PR_MCE_KILL_SET
55 #define PR_MCE_KILL_SET 1
56 #endif
57
58 #ifndef PR_MCE_KILL_EARLY
59 #define PR_MCE_KILL_EARLY 1
60 #endif
61
62 #endif /* CONFIG_LINUX */
63
64 static CPUState *next_cpu;
65
66 bool cpu_is_stopped(CPUState *cpu)
67 {
68 return cpu->stopped || !runstate_is_running();
69 }
70
71 static bool cpu_thread_is_idle(CPUState *cpu)
72 {
73 if (cpu->stop || cpu->queued_work_first) {
74 return false;
75 }
76 if (cpu_is_stopped(cpu)) {
77 return true;
78 }
79 if (!cpu->halted || qemu_cpu_has_work(cpu) ||
80 kvm_halt_in_kernel()) {
81 return false;
82 }
83 return true;
84 }
85
86 static bool all_cpu_threads_idle(void)
87 {
88 CPUState *cpu;
89
90 CPU_FOREACH(cpu) {
91 if (!cpu_thread_is_idle(cpu)) {
92 return false;
93 }
94 }
95 return true;
96 }
97
98 /***********************************************************/
99 /* guest cycle counter */
100
101 /* Conversion factor from emulated instructions to virtual clock ticks. */
102 static int icount_time_shift;
103 /* Arbitrarily pick 1MIPS as the minimum allowable speed. */
104 #define MAX_ICOUNT_SHIFT 10
105 /* Compensate for varying guest execution speed. */
106 static int64_t qemu_icount_bias;
107 static QEMUTimer *icount_rt_timer;
108 static QEMUTimer *icount_vm_timer;
109 static QEMUTimer *icount_warp_timer;
110 static int64_t vm_clock_warp_start;
111 static int64_t qemu_icount;
112
113 typedef struct TimersState {
114 /* Protected by BQL. */
115 int64_t cpu_ticks_prev;
116 int64_t cpu_ticks_offset;
117
118 /* cpu_clock_offset can be read out of BQL, so protect it with
119 * this lock.
120 */
121 QemuSeqLock vm_clock_seqlock;
122 int64_t cpu_clock_offset;
123 int32_t cpu_ticks_enabled;
124 int64_t dummy;
125 } TimersState;
126
127 static TimersState timers_state;
128
129 /* Return the virtual CPU time, based on the instruction counter. */
130 int64_t cpu_get_icount(void)
131 {
132 int64_t icount;
133 CPUState *cpu = current_cpu;
134
135 icount = qemu_icount;
136 if (cpu) {
137 CPUArchState *env = cpu->env_ptr;
138 if (!can_do_io(env)) {
139 fprintf(stderr, "Bad clock read\n");
140 }
141 icount -= (env->icount_decr.u16.low + env->icount_extra);
142 }
143 return qemu_icount_bias + (icount << icount_time_shift);
144 }
145
146 /* return the host CPU cycle counter and handle stop/restart */
147 /* Caller must hold the BQL */
148 int64_t cpu_get_ticks(void)
149 {
150 if (use_icount) {
151 return cpu_get_icount();
152 }
153 if (!timers_state.cpu_ticks_enabled) {
154 return timers_state.cpu_ticks_offset;
155 } else {
156 int64_t ticks;
157 ticks = cpu_get_real_ticks();
158 if (timers_state.cpu_ticks_prev > ticks) {
159 /* Note: non increasing ticks may happen if the host uses
160 software suspend */
161 timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
162 }
163 timers_state.cpu_ticks_prev = ticks;
164 return ticks + timers_state.cpu_ticks_offset;
165 }
166 }
167
168 static int64_t cpu_get_clock_locked(void)
169 {
170 int64_t ti;
171
172 if (!timers_state.cpu_ticks_enabled) {
173 ti = timers_state.cpu_clock_offset;
174 } else {
175 ti = get_clock();
176 ti += timers_state.cpu_clock_offset;
177 }
178
179 return ti;
180 }
181
182 /* return the host CPU monotonic timer and handle stop/restart */
183 int64_t cpu_get_clock(void)
184 {
185 int64_t ti;
186 unsigned start;
187
188 do {
189 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
190 ti = cpu_get_clock_locked();
191 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
192
193 return ti;
194 }
195
196 /* enable cpu_get_ticks()
197 * Caller must hold BQL which server as mutex for vm_clock_seqlock.
198 */
199 void cpu_enable_ticks(void)
200 {
201 /* Here, the really thing protected by seqlock is cpu_clock_offset. */
202 seqlock_write_lock(&timers_state.vm_clock_seqlock);
203 if (!timers_state.cpu_ticks_enabled) {
204 timers_state.cpu_ticks_offset -= cpu_get_real_ticks();
205 timers_state.cpu_clock_offset -= get_clock();
206 timers_state.cpu_ticks_enabled = 1;
207 }
208 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
209 }
210
211 /* disable cpu_get_ticks() : the clock is stopped. You must not call
212 * cpu_get_ticks() after that.
213 * Caller must hold BQL which server as mutex for vm_clock_seqlock.
214 */
215 void cpu_disable_ticks(void)
216 {
217 /* Here, the really thing protected by seqlock is cpu_clock_offset. */
218 seqlock_write_lock(&timers_state.vm_clock_seqlock);
219 if (timers_state.cpu_ticks_enabled) {
220 timers_state.cpu_ticks_offset = cpu_get_ticks();
221 timers_state.cpu_clock_offset = cpu_get_clock_locked();
222 timers_state.cpu_ticks_enabled = 0;
223 }
224 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
225 }
226
227 /* Correlation between real and virtual time is always going to be
228 fairly approximate, so ignore small variation.
229 When the guest is idle real and virtual time will be aligned in
230 the IO wait loop. */
231 #define ICOUNT_WOBBLE (get_ticks_per_sec() / 10)
232
233 static void icount_adjust(void)
234 {
235 int64_t cur_time;
236 int64_t cur_icount;
237 int64_t delta;
238 static int64_t last_delta;
239 /* If the VM is not running, then do nothing. */
240 if (!runstate_is_running()) {
241 return;
242 }
243 cur_time = cpu_get_clock();
244 cur_icount = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
245 delta = cur_icount - cur_time;
246 /* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */
247 if (delta > 0
248 && last_delta + ICOUNT_WOBBLE < delta * 2
249 && icount_time_shift > 0) {
250 /* The guest is getting too far ahead. Slow time down. */
251 icount_time_shift--;
252 }
253 if (delta < 0
254 && last_delta - ICOUNT_WOBBLE > delta * 2
255 && icount_time_shift < MAX_ICOUNT_SHIFT) {
256 /* The guest is getting too far behind. Speed time up. */
257 icount_time_shift++;
258 }
259 last_delta = delta;
260 qemu_icount_bias = cur_icount - (qemu_icount << icount_time_shift);
261 }
262
263 static void icount_adjust_rt(void *opaque)
264 {
265 timer_mod(icount_rt_timer,
266 qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 1000);
267 icount_adjust();
268 }
269
270 static void icount_adjust_vm(void *opaque)
271 {
272 timer_mod(icount_vm_timer,
273 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
274 get_ticks_per_sec() / 10);
275 icount_adjust();
276 }
277
278 static int64_t qemu_icount_round(int64_t count)
279 {
280 return (count + (1 << icount_time_shift) - 1) >> icount_time_shift;
281 }
282
283 static void icount_warp_rt(void *opaque)
284 {
285 if (vm_clock_warp_start == -1) {
286 return;
287 }
288
289 if (runstate_is_running()) {
290 int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
291 int64_t warp_delta = clock - vm_clock_warp_start;
292 if (use_icount == 1) {
293 qemu_icount_bias += warp_delta;
294 } else {
295 /*
296 * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too
297 * far ahead of real time.
298 */
299 int64_t cur_time = cpu_get_clock();
300 int64_t cur_icount = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
301 int64_t delta = cur_time - cur_icount;
302 qemu_icount_bias += MIN(warp_delta, delta);
303 }
304 if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) {
305 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
306 }
307 }
308 vm_clock_warp_start = -1;
309 }
310
311 void qtest_clock_warp(int64_t dest)
312 {
313 int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
314 assert(qtest_enabled());
315 while (clock < dest) {
316 int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
317 int64_t warp = MIN(dest - clock, deadline);
318 qemu_icount_bias += warp;
319 qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
320 clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
321 }
322 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
323 }
324
325 void qemu_clock_warp(QEMUClockType type)
326 {
327 int64_t deadline;
328
329 /*
330 * There are too many global variables to make the "warp" behavior
331 * applicable to other clocks. But a clock argument removes the
332 * need for if statements all over the place.
333 */
334 if (type != QEMU_CLOCK_VIRTUAL || !use_icount) {
335 return;
336 }
337
338 /*
339 * If the CPUs have been sleeping, advance QEMU_CLOCK_VIRTUAL timer now.
340 * This ensures that the deadline for the timer is computed correctly below.
341 * This also makes sure that the insn counter is synchronized before the
342 * CPU starts running, in case the CPU is woken by an event other than
343 * the earliest QEMU_CLOCK_VIRTUAL timer.
344 */
345 icount_warp_rt(NULL);
346 if (!all_cpu_threads_idle() || !qemu_clock_has_timers(QEMU_CLOCK_VIRTUAL)) {
347 timer_del(icount_warp_timer);
348 return;
349 }
350
351 if (qtest_enabled()) {
352 /* When testing, qtest commands advance icount. */
353 return;
354 }
355
356 vm_clock_warp_start = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
357 /* We want to use the earliest deadline from ALL vm_clocks */
358 deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
359
360 /* Maintain prior (possibly buggy) behaviour where if no deadline
361 * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
362 * INT32_MAX nanoseconds ahead, we still use INT32_MAX
363 * nanoseconds.
364 */
365 if ((deadline < 0) || (deadline > INT32_MAX)) {
366 deadline = INT32_MAX;
367 }
368
369 if (deadline > 0) {
370 /*
371 * Ensure QEMU_CLOCK_VIRTUAL proceeds even when the virtual CPU goes to
372 * sleep. Otherwise, the CPU might be waiting for a future timer
373 * interrupt to wake it up, but the interrupt never comes because
374 * the vCPU isn't running any insns and thus doesn't advance the
375 * QEMU_CLOCK_VIRTUAL.
376 *
377 * An extreme solution for this problem would be to never let VCPUs
378 * sleep in icount mode if there is a pending QEMU_CLOCK_VIRTUAL
379 * timer; rather time could just advance to the next QEMU_CLOCK_VIRTUAL
380 * event. Instead, we do stop VCPUs and only advance QEMU_CLOCK_VIRTUAL
381 * after some e"real" time, (related to the time left until the next
382 * event) has passed. The QEMU_CLOCK_REALTIME timer will do this.
383 * This avoids that the warps are visible externally; for example,
384 * you will not be sending network packets continuously instead of
385 * every 100ms.
386 */
387 timer_mod(icount_warp_timer, vm_clock_warp_start + deadline);
388 } else if (deadline == 0) {
389 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
390 }
391 }
392
393 static const VMStateDescription vmstate_timers = {
394 .name = "timer",
395 .version_id = 2,
396 .minimum_version_id = 1,
397 .minimum_version_id_old = 1,
398 .fields = (VMStateField[]) {
399 VMSTATE_INT64(cpu_ticks_offset, TimersState),
400 VMSTATE_INT64(dummy, TimersState),
401 VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
402 VMSTATE_END_OF_LIST()
403 }
404 };
405
406 void configure_icount(const char *option)
407 {
408 seqlock_init(&timers_state.vm_clock_seqlock, NULL);
409 vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
410 if (!option) {
411 return;
412 }
413
414 icount_warp_timer = timer_new_ns(QEMU_CLOCK_REALTIME,
415 icount_warp_rt, NULL);
416 if (strcmp(option, "auto") != 0) {
417 icount_time_shift = strtol(option, NULL, 0);
418 use_icount = 1;
419 return;
420 }
421
422 use_icount = 2;
423
424 /* 125MIPS seems a reasonable initial guess at the guest speed.
425 It will be corrected fairly quickly anyway. */
426 icount_time_shift = 3;
427
428 /* Have both realtime and virtual time triggers for speed adjustment.
429 The realtime trigger catches emulated time passing too slowly,
430 the virtual time trigger catches emulated time passing too fast.
431 Realtime triggers occur even when idle, so use them less frequently
432 than VM triggers. */
433 icount_rt_timer = timer_new_ms(QEMU_CLOCK_REALTIME,
434 icount_adjust_rt, NULL);
435 timer_mod(icount_rt_timer,
436 qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 1000);
437 icount_vm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
438 icount_adjust_vm, NULL);
439 timer_mod(icount_vm_timer,
440 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
441 get_ticks_per_sec() / 10);
442 }
443
444 /***********************************************************/
445 void hw_error(const char *fmt, ...)
446 {
447 va_list ap;
448 CPUState *cpu;
449
450 va_start(ap, fmt);
451 fprintf(stderr, "qemu: hardware error: ");
452 vfprintf(stderr, fmt, ap);
453 fprintf(stderr, "\n");
454 CPU_FOREACH(cpu) {
455 fprintf(stderr, "CPU #%d:\n", cpu->cpu_index);
456 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU);
457 }
458 va_end(ap);
459 abort();
460 }
461
462 void cpu_synchronize_all_states(void)
463 {
464 CPUState *cpu;
465
466 CPU_FOREACH(cpu) {
467 cpu_synchronize_state(cpu);
468 }
469 }
470
471 void cpu_synchronize_all_post_reset(void)
472 {
473 CPUState *cpu;
474
475 CPU_FOREACH(cpu) {
476 cpu_synchronize_post_reset(cpu);
477 }
478 }
479
480 void cpu_synchronize_all_post_init(void)
481 {
482 CPUState *cpu;
483
484 CPU_FOREACH(cpu) {
485 cpu_synchronize_post_init(cpu);
486 }
487 }
488
489 static int do_vm_stop(RunState state)
490 {
491 int ret = 0;
492
493 if (runstate_is_running()) {
494 cpu_disable_ticks();
495 pause_all_vcpus();
496 runstate_set(state);
497 vm_state_notify(0, state);
498 monitor_protocol_event(QEVENT_STOP, NULL);
499 }
500
501 bdrv_drain_all();
502 ret = bdrv_flush_all();
503
504 return ret;
505 }
506
507 static bool cpu_can_run(CPUState *cpu)
508 {
509 if (cpu->stop) {
510 return false;
511 }
512 if (cpu_is_stopped(cpu)) {
513 return false;
514 }
515 return true;
516 }
517
518 static void cpu_handle_guest_debug(CPUState *cpu)
519 {
520 gdb_set_stop_cpu(cpu);
521 qemu_system_debug_request();
522 cpu->stopped = true;
523 }
524
525 static void cpu_signal(int sig)
526 {
527 if (current_cpu) {
528 cpu_exit(current_cpu);
529 }
530 exit_request = 1;
531 }
532
533 #ifdef CONFIG_LINUX
534 static void sigbus_reraise(void)
535 {
536 sigset_t set;
537 struct sigaction action;
538
539 memset(&action, 0, sizeof(action));
540 action.sa_handler = SIG_DFL;
541 if (!sigaction(SIGBUS, &action, NULL)) {
542 raise(SIGBUS);
543 sigemptyset(&set);
544 sigaddset(&set, SIGBUS);
545 sigprocmask(SIG_UNBLOCK, &set, NULL);
546 }
547 perror("Failed to re-raise SIGBUS!\n");
548 abort();
549 }
550
551 static void sigbus_handler(int n, struct qemu_signalfd_siginfo *siginfo,
552 void *ctx)
553 {
554 if (kvm_on_sigbus(siginfo->ssi_code,
555 (void *)(intptr_t)siginfo->ssi_addr)) {
556 sigbus_reraise();
557 }
558 }
559
560 static void qemu_init_sigbus(void)
561 {
562 struct sigaction action;
563
564 memset(&action, 0, sizeof(action));
565 action.sa_flags = SA_SIGINFO;
566 action.sa_sigaction = (void (*)(int, siginfo_t*, void*))sigbus_handler;
567 sigaction(SIGBUS, &action, NULL);
568
569 prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0);
570 }
571
572 static void qemu_kvm_eat_signals(CPUState *cpu)
573 {
574 struct timespec ts = { 0, 0 };
575 siginfo_t siginfo;
576 sigset_t waitset;
577 sigset_t chkset;
578 int r;
579
580 sigemptyset(&waitset);
581 sigaddset(&waitset, SIG_IPI);
582 sigaddset(&waitset, SIGBUS);
583
584 do {
585 r = sigtimedwait(&waitset, &siginfo, &ts);
586 if (r == -1 && !(errno == EAGAIN || errno == EINTR)) {
587 perror("sigtimedwait");
588 exit(1);
589 }
590
591 switch (r) {
592 case SIGBUS:
593 if (kvm_on_sigbus_vcpu(cpu, siginfo.si_code, siginfo.si_addr)) {
594 sigbus_reraise();
595 }
596 break;
597 default:
598 break;
599 }
600
601 r = sigpending(&chkset);
602 if (r == -1) {
603 perror("sigpending");
604 exit(1);
605 }
606 } while (sigismember(&chkset, SIG_IPI) || sigismember(&chkset, SIGBUS));
607 }
608
609 #else /* !CONFIG_LINUX */
610
611 static void qemu_init_sigbus(void)
612 {
613 }
614
615 static void qemu_kvm_eat_signals(CPUState *cpu)
616 {
617 }
618 #endif /* !CONFIG_LINUX */
619
620 #ifndef _WIN32
621 static void dummy_signal(int sig)
622 {
623 }
624
625 static void qemu_kvm_init_cpu_signals(CPUState *cpu)
626 {
627 int r;
628 sigset_t set;
629 struct sigaction sigact;
630
631 memset(&sigact, 0, sizeof(sigact));
632 sigact.sa_handler = dummy_signal;
633 sigaction(SIG_IPI, &sigact, NULL);
634
635 pthread_sigmask(SIG_BLOCK, NULL, &set);
636 sigdelset(&set, SIG_IPI);
637 sigdelset(&set, SIGBUS);
638 r = kvm_set_signal_mask(cpu, &set);
639 if (r) {
640 fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
641 exit(1);
642 }
643 }
644
645 static void qemu_tcg_init_cpu_signals(void)
646 {
647 sigset_t set;
648 struct sigaction sigact;
649
650 memset(&sigact, 0, sizeof(sigact));
651 sigact.sa_handler = cpu_signal;
652 sigaction(SIG_IPI, &sigact, NULL);
653
654 sigemptyset(&set);
655 sigaddset(&set, SIG_IPI);
656 pthread_sigmask(SIG_UNBLOCK, &set, NULL);
657 }
658
659 #else /* _WIN32 */
660 static void qemu_kvm_init_cpu_signals(CPUState *cpu)
661 {
662 abort();
663 }
664
665 static void qemu_tcg_init_cpu_signals(void)
666 {
667 }
668 #endif /* _WIN32 */
669
670 static QemuMutex qemu_global_mutex;
671 static QemuCond qemu_io_proceeded_cond;
672 static bool iothread_requesting_mutex;
673
674 static QemuThread io_thread;
675
676 static QemuThread *tcg_cpu_thread;
677 static QemuCond *tcg_halt_cond;
678
679 /* cpu creation */
680 static QemuCond qemu_cpu_cond;
681 /* system init */
682 static QemuCond qemu_pause_cond;
683 static QemuCond qemu_work_cond;
684
685 void qemu_init_cpu_loop(void)
686 {
687 qemu_init_sigbus();
688 qemu_cond_init(&qemu_cpu_cond);
689 qemu_cond_init(&qemu_pause_cond);
690 qemu_cond_init(&qemu_work_cond);
691 qemu_cond_init(&qemu_io_proceeded_cond);
692 qemu_mutex_init(&qemu_global_mutex);
693
694 qemu_thread_get_self(&io_thread);
695 }
696
697 void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
698 {
699 struct qemu_work_item wi;
700
701 if (qemu_cpu_is_self(cpu)) {
702 func(data);
703 return;
704 }
705
706 wi.func = func;
707 wi.data = data;
708 wi.free = false;
709 if (cpu->queued_work_first == NULL) {
710 cpu->queued_work_first = &wi;
711 } else {
712 cpu->queued_work_last->next = &wi;
713 }
714 cpu->queued_work_last = &wi;
715 wi.next = NULL;
716 wi.done = false;
717
718 qemu_cpu_kick(cpu);
719 while (!wi.done) {
720 CPUState *self_cpu = current_cpu;
721
722 qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex);
723 current_cpu = self_cpu;
724 }
725 }
726
727 void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
728 {
729 struct qemu_work_item *wi;
730
731 if (qemu_cpu_is_self(cpu)) {
732 func(data);
733 return;
734 }
735
736 wi = g_malloc0(sizeof(struct qemu_work_item));
737 wi->func = func;
738 wi->data = data;
739 wi->free = true;
740 if (cpu->queued_work_first == NULL) {
741 cpu->queued_work_first = wi;
742 } else {
743 cpu->queued_work_last->next = wi;
744 }
745 cpu->queued_work_last = wi;
746 wi->next = NULL;
747 wi->done = false;
748
749 qemu_cpu_kick(cpu);
750 }
751
752 static void flush_queued_work(CPUState *cpu)
753 {
754 struct qemu_work_item *wi;
755
756 if (cpu->queued_work_first == NULL) {
757 return;
758 }
759
760 while ((wi = cpu->queued_work_first)) {
761 cpu->queued_work_first = wi->next;
762 wi->func(wi->data);
763 wi->done = true;
764 if (wi->free) {
765 g_free(wi);
766 }
767 }
768 cpu->queued_work_last = NULL;
769 qemu_cond_broadcast(&qemu_work_cond);
770 }
771
772 static void qemu_wait_io_event_common(CPUState *cpu)
773 {
774 if (cpu->stop) {
775 cpu->stop = false;
776 cpu->stopped = true;
777 qemu_cond_signal(&qemu_pause_cond);
778 }
779 flush_queued_work(cpu);
780 cpu->thread_kicked = false;
781 }
782
783 static void qemu_tcg_wait_io_event(void)
784 {
785 CPUState *cpu;
786
787 while (all_cpu_threads_idle()) {
788 /* Start accounting real time to the virtual clock if the CPUs
789 are idle. */
790 qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
791 qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
792 }
793
794 while (iothread_requesting_mutex) {
795 qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex);
796 }
797
798 CPU_FOREACH(cpu) {
799 qemu_wait_io_event_common(cpu);
800 }
801 }
802
803 static void qemu_kvm_wait_io_event(CPUState *cpu)
804 {
805 while (cpu_thread_is_idle(cpu)) {
806 qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
807 }
808
809 qemu_kvm_eat_signals(cpu);
810 qemu_wait_io_event_common(cpu);
811 }
812
813 static void *qemu_kvm_cpu_thread_fn(void *arg)
814 {
815 CPUState *cpu = arg;
816 int r;
817
818 qemu_mutex_lock(&qemu_global_mutex);
819 qemu_thread_get_self(cpu->thread);
820 cpu->thread_id = qemu_get_thread_id();
821 current_cpu = cpu;
822
823 r = kvm_init_vcpu(cpu);
824 if (r < 0) {
825 fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r));
826 exit(1);
827 }
828
829 qemu_kvm_init_cpu_signals(cpu);
830
831 /* signal CPU creation */
832 cpu->created = true;
833 qemu_cond_signal(&qemu_cpu_cond);
834
835 while (1) {
836 if (cpu_can_run(cpu)) {
837 r = kvm_cpu_exec(cpu);
838 if (r == EXCP_DEBUG) {
839 cpu_handle_guest_debug(cpu);
840 }
841 }
842 qemu_kvm_wait_io_event(cpu);
843 }
844
845 return NULL;
846 }
847
848 static void *qemu_dummy_cpu_thread_fn(void *arg)
849 {
850 #ifdef _WIN32
851 fprintf(stderr, "qtest is not supported under Windows\n");
852 exit(1);
853 #else
854 CPUState *cpu = arg;
855 sigset_t waitset;
856 int r;
857
858 qemu_mutex_lock_iothread();
859 qemu_thread_get_self(cpu->thread);
860 cpu->thread_id = qemu_get_thread_id();
861
862 sigemptyset(&waitset);
863 sigaddset(&waitset, SIG_IPI);
864
865 /* signal CPU creation */
866 cpu->created = true;
867 qemu_cond_signal(&qemu_cpu_cond);
868
869 current_cpu = cpu;
870 while (1) {
871 current_cpu = NULL;
872 qemu_mutex_unlock_iothread();
873 do {
874 int sig;
875 r = sigwait(&waitset, &sig);
876 } while (r == -1 && (errno == EAGAIN || errno == EINTR));
877 if (r == -1) {
878 perror("sigwait");
879 exit(1);
880 }
881 qemu_mutex_lock_iothread();
882 current_cpu = cpu;
883 qemu_wait_io_event_common(cpu);
884 }
885
886 return NULL;
887 #endif
888 }
889
890 static void tcg_exec_all(void);
891
892 static void *qemu_tcg_cpu_thread_fn(void *arg)
893 {
894 CPUState *cpu = arg;
895
896 qemu_tcg_init_cpu_signals();
897 qemu_thread_get_self(cpu->thread);
898
899 qemu_mutex_lock(&qemu_global_mutex);
900 CPU_FOREACH(cpu) {
901 cpu->thread_id = qemu_get_thread_id();
902 cpu->created = true;
903 }
904 qemu_cond_signal(&qemu_cpu_cond);
905
906 /* wait for initial kick-off after machine start */
907 while (QTAILQ_FIRST(&cpus)->stopped) {
908 qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
909
910 /* process any pending work */
911 CPU_FOREACH(cpu) {
912 qemu_wait_io_event_common(cpu);
913 }
914 }
915
916 while (1) {
917 tcg_exec_all();
918
919 if (use_icount) {
920 int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
921
922 if (deadline == 0) {
923 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
924 }
925 }
926 qemu_tcg_wait_io_event();
927 }
928
929 return NULL;
930 }
931
932 static void qemu_cpu_kick_thread(CPUState *cpu)
933 {
934 #ifndef _WIN32
935 int err;
936
937 err = pthread_kill(cpu->thread->thread, SIG_IPI);
938 if (err) {
939 fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
940 exit(1);
941 }
942 #else /* _WIN32 */
943 if (!qemu_cpu_is_self(cpu)) {
944 CONTEXT tcgContext;
945
946 if (SuspendThread(cpu->hThread) == (DWORD)-1) {
947 fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
948 GetLastError());
949 exit(1);
950 }
951
952 /* On multi-core systems, we are not sure that the thread is actually
953 * suspended until we can get the context.
954 */
955 tcgContext.ContextFlags = CONTEXT_CONTROL;
956 while (GetThreadContext(cpu->hThread, &tcgContext) != 0) {
957 continue;
958 }
959
960 cpu_signal(0);
961
962 if (ResumeThread(cpu->hThread) == (DWORD)-1) {
963 fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
964 GetLastError());
965 exit(1);
966 }
967 }
968 #endif
969 }
970
971 void qemu_cpu_kick(CPUState *cpu)
972 {
973 qemu_cond_broadcast(cpu->halt_cond);
974 if (!tcg_enabled() && !cpu->thread_kicked) {
975 qemu_cpu_kick_thread(cpu);
976 cpu->thread_kicked = true;
977 }
978 }
979
980 void qemu_cpu_kick_self(void)
981 {
982 #ifndef _WIN32
983 assert(current_cpu);
984
985 if (!current_cpu->thread_kicked) {
986 qemu_cpu_kick_thread(current_cpu);
987 current_cpu->thread_kicked = true;
988 }
989 #else
990 abort();
991 #endif
992 }
993
994 bool qemu_cpu_is_self(CPUState *cpu)
995 {
996 return qemu_thread_is_self(cpu->thread);
997 }
998
999 static bool qemu_in_vcpu_thread(void)
1000 {
1001 return current_cpu && qemu_cpu_is_self(current_cpu);
1002 }
1003
1004 void qemu_mutex_lock_iothread(void)
1005 {
1006 if (!tcg_enabled()) {
1007 qemu_mutex_lock(&qemu_global_mutex);
1008 } else {
1009 iothread_requesting_mutex = true;
1010 if (qemu_mutex_trylock(&qemu_global_mutex)) {
1011 qemu_cpu_kick_thread(first_cpu);
1012 qemu_mutex_lock(&qemu_global_mutex);
1013 }
1014 iothread_requesting_mutex = false;
1015 qemu_cond_broadcast(&qemu_io_proceeded_cond);
1016 }
1017 }
1018
1019 void qemu_mutex_unlock_iothread(void)
1020 {
1021 qemu_mutex_unlock(&qemu_global_mutex);
1022 }
1023
1024 static int all_vcpus_paused(void)
1025 {
1026 CPUState *cpu;
1027
1028 CPU_FOREACH(cpu) {
1029 if (!cpu->stopped) {
1030 return 0;
1031 }
1032 }
1033
1034 return 1;
1035 }
1036
1037 void pause_all_vcpus(void)
1038 {
1039 CPUState *cpu;
1040
1041 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, false);
1042 CPU_FOREACH(cpu) {
1043 cpu->stop = true;
1044 qemu_cpu_kick(cpu);
1045 }
1046
1047 if (qemu_in_vcpu_thread()) {
1048 cpu_stop_current();
1049 if (!kvm_enabled()) {
1050 CPU_FOREACH(cpu) {
1051 cpu->stop = false;
1052 cpu->stopped = true;
1053 }
1054 return;
1055 }
1056 }
1057
1058 while (!all_vcpus_paused()) {
1059 qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
1060 CPU_FOREACH(cpu) {
1061 qemu_cpu_kick(cpu);
1062 }
1063 }
1064 }
1065
1066 void cpu_resume(CPUState *cpu)
1067 {
1068 cpu->stop = false;
1069 cpu->stopped = false;
1070 qemu_cpu_kick(cpu);
1071 }
1072
1073 void resume_all_vcpus(void)
1074 {
1075 CPUState *cpu;
1076
1077 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true);
1078 CPU_FOREACH(cpu) {
1079 cpu_resume(cpu);
1080 }
1081 }
1082
1083 static void qemu_tcg_init_vcpu(CPUState *cpu)
1084 {
1085 /* share a single thread for all cpus with TCG */
1086 if (!tcg_cpu_thread) {
1087 cpu->thread = g_malloc0(sizeof(QemuThread));
1088 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1089 qemu_cond_init(cpu->halt_cond);
1090 tcg_halt_cond = cpu->halt_cond;
1091 qemu_thread_create(cpu->thread, qemu_tcg_cpu_thread_fn, cpu,
1092 QEMU_THREAD_JOINABLE);
1093 #ifdef _WIN32
1094 cpu->hThread = qemu_thread_get_handle(cpu->thread);
1095 #endif
1096 while (!cpu->created) {
1097 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1098 }
1099 tcg_cpu_thread = cpu->thread;
1100 } else {
1101 cpu->thread = tcg_cpu_thread;
1102 cpu->halt_cond = tcg_halt_cond;
1103 }
1104 }
1105
1106 static void qemu_kvm_start_vcpu(CPUState *cpu)
1107 {
1108 cpu->thread = g_malloc0(sizeof(QemuThread));
1109 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1110 qemu_cond_init(cpu->halt_cond);
1111 qemu_thread_create(cpu->thread, qemu_kvm_cpu_thread_fn, cpu,
1112 QEMU_THREAD_JOINABLE);
1113 while (!cpu->created) {
1114 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1115 }
1116 }
1117
1118 static void qemu_dummy_start_vcpu(CPUState *cpu)
1119 {
1120 cpu->thread = g_malloc0(sizeof(QemuThread));
1121 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1122 qemu_cond_init(cpu->halt_cond);
1123 qemu_thread_create(cpu->thread, qemu_dummy_cpu_thread_fn, cpu,
1124 QEMU_THREAD_JOINABLE);
1125 while (!cpu->created) {
1126 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1127 }
1128 }
1129
1130 void qemu_init_vcpu(CPUState *cpu)
1131 {
1132 cpu->nr_cores = smp_cores;
1133 cpu->nr_threads = smp_threads;
1134 cpu->stopped = true;
1135 if (kvm_enabled()) {
1136 qemu_kvm_start_vcpu(cpu);
1137 } else if (tcg_enabled()) {
1138 qemu_tcg_init_vcpu(cpu);
1139 } else {
1140 qemu_dummy_start_vcpu(cpu);
1141 }
1142 }
1143
1144 void cpu_stop_current(void)
1145 {
1146 if (current_cpu) {
1147 current_cpu->stop = false;
1148 current_cpu->stopped = true;
1149 cpu_exit(current_cpu);
1150 qemu_cond_signal(&qemu_pause_cond);
1151 }
1152 }
1153
1154 int vm_stop(RunState state)
1155 {
1156 if (qemu_in_vcpu_thread()) {
1157 qemu_system_vmstop_request(state);
1158 /*
1159 * FIXME: should not return to device code in case
1160 * vm_stop() has been requested.
1161 */
1162 cpu_stop_current();
1163 return 0;
1164 }
1165
1166 return do_vm_stop(state);
1167 }
1168
1169 /* does a state transition even if the VM is already stopped,
1170 current state is forgotten forever */
1171 int vm_stop_force_state(RunState state)
1172 {
1173 if (runstate_is_running()) {
1174 return vm_stop(state);
1175 } else {
1176 runstate_set(state);
1177 /* Make sure to return an error if the flush in a previous vm_stop()
1178 * failed. */
1179 return bdrv_flush_all();
1180 }
1181 }
1182
1183 static int tcg_cpu_exec(CPUArchState *env)
1184 {
1185 int ret;
1186 #ifdef CONFIG_PROFILER
1187 int64_t ti;
1188 #endif
1189
1190 #ifdef CONFIG_PROFILER
1191 ti = profile_getclock();
1192 #endif
1193 if (use_icount) {
1194 int64_t count;
1195 int64_t deadline;
1196 int decr;
1197 qemu_icount -= (env->icount_decr.u16.low + env->icount_extra);
1198 env->icount_decr.u16.low = 0;
1199 env->icount_extra = 0;
1200 deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
1201
1202 /* Maintain prior (possibly buggy) behaviour where if no deadline
1203 * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
1204 * INT32_MAX nanoseconds ahead, we still use INT32_MAX
1205 * nanoseconds.
1206 */
1207 if ((deadline < 0) || (deadline > INT32_MAX)) {
1208 deadline = INT32_MAX;
1209 }
1210
1211 count = qemu_icount_round(deadline);
1212 qemu_icount += count;
1213 decr = (count > 0xffff) ? 0xffff : count;
1214 count -= decr;
1215 env->icount_decr.u16.low = decr;
1216 env->icount_extra = count;
1217 }
1218 ret = cpu_exec(env);
1219 #ifdef CONFIG_PROFILER
1220 qemu_time += profile_getclock() - ti;
1221 #endif
1222 if (use_icount) {
1223 /* Fold pending instructions back into the
1224 instruction counter, and clear the interrupt flag. */
1225 qemu_icount -= (env->icount_decr.u16.low
1226 + env->icount_extra);
1227 env->icount_decr.u32 = 0;
1228 env->icount_extra = 0;
1229 }
1230 return ret;
1231 }
1232
1233 static void tcg_exec_all(void)
1234 {
1235 int r;
1236
1237 /* Account partial waits to QEMU_CLOCK_VIRTUAL. */
1238 qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
1239
1240 if (next_cpu == NULL) {
1241 next_cpu = first_cpu;
1242 }
1243 for (; next_cpu != NULL && !exit_request; next_cpu = CPU_NEXT(next_cpu)) {
1244 CPUState *cpu = next_cpu;
1245 CPUArchState *env = cpu->env_ptr;
1246
1247 qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
1248 (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
1249
1250 if (cpu_can_run(cpu)) {
1251 r = tcg_cpu_exec(env);
1252 if (r == EXCP_DEBUG) {
1253 cpu_handle_guest_debug(cpu);
1254 break;
1255 }
1256 } else if (cpu->stop || cpu->stopped) {
1257 break;
1258 }
1259 }
1260 exit_request = 0;
1261 }
1262
1263 void set_numa_modes(void)
1264 {
1265 CPUState *cpu;
1266 int i;
1267
1268 CPU_FOREACH(cpu) {
1269 for (i = 0; i < nb_numa_nodes; i++) {
1270 if (test_bit(cpu->cpu_index, node_cpumask[i])) {
1271 cpu->numa_node = i;
1272 }
1273 }
1274 }
1275 }
1276
1277 void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
1278 {
1279 /* XXX: implement xxx_cpu_list for targets that still miss it */
1280 #if defined(cpu_list)
1281 cpu_list(f, cpu_fprintf);
1282 #endif
1283 }
1284
1285 CpuInfoList *qmp_query_cpus(Error **errp)
1286 {
1287 CpuInfoList *head = NULL, *cur_item = NULL;
1288 CPUState *cpu;
1289
1290 CPU_FOREACH(cpu) {
1291 CpuInfoList *info;
1292 #if defined(TARGET_I386)
1293 X86CPU *x86_cpu = X86_CPU(cpu);
1294 CPUX86State *env = &x86_cpu->env;
1295 #elif defined(TARGET_PPC)
1296 PowerPCCPU *ppc_cpu = POWERPC_CPU(cpu);
1297 CPUPPCState *env = &ppc_cpu->env;
1298 #elif defined(TARGET_SPARC)
1299 SPARCCPU *sparc_cpu = SPARC_CPU(cpu);
1300 CPUSPARCState *env = &sparc_cpu->env;
1301 #elif defined(TARGET_MIPS)
1302 MIPSCPU *mips_cpu = MIPS_CPU(cpu);
1303 CPUMIPSState *env = &mips_cpu->env;
1304 #endif
1305
1306 cpu_synchronize_state(cpu);
1307
1308 info = g_malloc0(sizeof(*info));
1309 info->value = g_malloc0(sizeof(*info->value));
1310 info->value->CPU = cpu->cpu_index;
1311 info->value->current = (cpu == first_cpu);
1312 info->value->halted = cpu->halted;
1313 info->value->thread_id = cpu->thread_id;
1314 #if defined(TARGET_I386)
1315 info->value->has_pc = true;
1316 info->value->pc = env->eip + env->segs[R_CS].base;
1317 #elif defined(TARGET_PPC)
1318 info->value->has_nip = true;
1319 info->value->nip = env->nip;
1320 #elif defined(TARGET_SPARC)
1321 info->value->has_pc = true;
1322 info->value->pc = env->pc;
1323 info->value->has_npc = true;
1324 info->value->npc = env->npc;
1325 #elif defined(TARGET_MIPS)
1326 info->value->has_PC = true;
1327 info->value->PC = env->active_tc.PC;
1328 #endif
1329
1330 /* XXX: waiting for the qapi to support GSList */
1331 if (!cur_item) {
1332 head = cur_item = info;
1333 } else {
1334 cur_item->next = info;
1335 cur_item = info;
1336 }
1337 }
1338
1339 return head;
1340 }
1341
1342 void qmp_memsave(int64_t addr, int64_t size, const char *filename,
1343 bool has_cpu, int64_t cpu_index, Error **errp)
1344 {
1345 FILE *f;
1346 uint32_t l;
1347 CPUState *cpu;
1348 uint8_t buf[1024];
1349
1350 if (!has_cpu) {
1351 cpu_index = 0;
1352 }
1353
1354 cpu = qemu_get_cpu(cpu_index);
1355 if (cpu == NULL) {
1356 error_set(errp, QERR_INVALID_PARAMETER_VALUE, "cpu-index",
1357 "a CPU number");
1358 return;
1359 }
1360
1361 f = fopen(filename, "wb");
1362 if (!f) {
1363 error_setg_file_open(errp, errno, filename);
1364 return;
1365 }
1366
1367 while (size != 0) {
1368 l = sizeof(buf);
1369 if (l > size)
1370 l = size;
1371 cpu_memory_rw_debug(cpu, addr, buf, l, 0);
1372 if (fwrite(buf, 1, l, f) != l) {
1373 error_set(errp, QERR_IO_ERROR);
1374 goto exit;
1375 }
1376 addr += l;
1377 size -= l;
1378 }
1379
1380 exit:
1381 fclose(f);
1382 }
1383
1384 void qmp_pmemsave(int64_t addr, int64_t size, const char *filename,
1385 Error **errp)
1386 {
1387 FILE *f;
1388 uint32_t l;
1389 uint8_t buf[1024];
1390
1391 f = fopen(filename, "wb");
1392 if (!f) {
1393 error_setg_file_open(errp, errno, filename);
1394 return;
1395 }
1396
1397 while (size != 0) {
1398 l = sizeof(buf);
1399 if (l > size)
1400 l = size;
1401 cpu_physical_memory_rw(addr, buf, l, 0);
1402 if (fwrite(buf, 1, l, f) != l) {
1403 error_set(errp, QERR_IO_ERROR);
1404 goto exit;
1405 }
1406 addr += l;
1407 size -= l;
1408 }
1409
1410 exit:
1411 fclose(f);
1412 }
1413
1414 void qmp_inject_nmi(Error **errp)
1415 {
1416 #if defined(TARGET_I386)
1417 CPUState *cs;
1418
1419 CPU_FOREACH(cs) {
1420 X86CPU *cpu = X86_CPU(cs);
1421 CPUX86State *env = &cpu->env;
1422
1423 if (!env->apic_state) {
1424 cpu_interrupt(cs, CPU_INTERRUPT_NMI);
1425 } else {
1426 apic_deliver_nmi(env->apic_state);
1427 }
1428 }
1429 #elif defined(TARGET_S390X)
1430 CPUState *cs;
1431 S390CPU *cpu;
1432
1433 CPU_FOREACH(cs) {
1434 cpu = S390_CPU(cs);
1435 if (cpu->env.cpu_num == monitor_get_cpu_index()) {
1436 if (s390_cpu_restart(S390_CPU(cs)) == -1) {
1437 error_set(errp, QERR_UNSUPPORTED);
1438 return;
1439 }
1440 break;
1441 }
1442 }
1443 #else
1444 error_set(errp, QERR_UNSUPPORTED);
1445 #endif
1446 }