]> git.proxmox.com Git - mirror_qemu.git/blame - cpus.c
spice: drop dprint() debug logging
[mirror_qemu.git] / cpus.c
CommitLineData
296af7c9
BS
1/*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
7b31bbc2 25#include "qemu/osdep.h"
8d4e9146 26#include "qemu/config-file.h"
33c11879 27#include "cpu.h"
83c9089e 28#include "monitor/monitor.h"
e688df6b 29#include "qapi/error.h"
112ed241 30#include "qapi/qapi-commands-misc.h"
9af23989 31#include "qapi/qapi-events-run-state.h"
a4e15de9 32#include "qapi/qmp/qerror.h"
d49b6836 33#include "qemu/error-report.h"
9c17d615 34#include "sysemu/sysemu.h"
da31d594 35#include "sysemu/block-backend.h"
022c62cb 36#include "exec/gdbstub.h"
9c17d615 37#include "sysemu/dma.h"
b3946626 38#include "sysemu/hw_accel.h"
9c17d615 39#include "sysemu/kvm.h"
b0cb0a66 40#include "sysemu/hax.h"
c97d6d2c 41#include "sysemu/hvf.h"
19306806 42#include "sysemu/whpx.h"
63c91552 43#include "exec/exec-all.h"
296af7c9 44
1de7afc9 45#include "qemu/thread.h"
9c17d615
PB
46#include "sysemu/cpus.h"
47#include "sysemu/qtest.h"
1de7afc9 48#include "qemu/main-loop.h"
922a01a0 49#include "qemu/option.h"
1de7afc9 50#include "qemu/bitmap.h"
cb365646 51#include "qemu/seqlock.h"
8d4e9146 52#include "tcg.h"
9cb805fd 53#include "hw/nmi.h"
8b427044 54#include "sysemu/replay.h"
afed5a5a 55#include "hw/boards.h"
0ff0fc19 56
6d9cb73c
JK
57#ifdef CONFIG_LINUX
58
59#include <sys/prctl.h>
60
c0532a76
MT
61#ifndef PR_MCE_KILL
62#define PR_MCE_KILL 33
63#endif
64
6d9cb73c
JK
65#ifndef PR_MCE_KILL_SET
66#define PR_MCE_KILL_SET 1
67#endif
68
69#ifndef PR_MCE_KILL_EARLY
70#define PR_MCE_KILL_EARLY 1
71#endif
72
73#endif /* CONFIG_LINUX */
74
27498bef
ST
75int64_t max_delay;
76int64_t max_advance;
296af7c9 77
2adcc85d
JH
78/* vcpu throttling controls */
79static QEMUTimer *throttle_timer;
80static unsigned int throttle_percentage;
81
82#define CPU_THROTTLE_PCT_MIN 1
83#define CPU_THROTTLE_PCT_MAX 99
84#define CPU_THROTTLE_TIMESLICE_NS 10000000
85
321bc0b2
TC
86bool cpu_is_stopped(CPUState *cpu)
87{
88 return cpu->stopped || !runstate_is_running();
89}
90
a98ae1d8 91static bool cpu_thread_is_idle(CPUState *cpu)
ac873f1e 92{
c64ca814 93 if (cpu->stop || cpu->queued_work_first) {
ac873f1e
PM
94 return false;
95 }
321bc0b2 96 if (cpu_is_stopped(cpu)) {
ac873f1e
PM
97 return true;
98 }
8c2e1b00 99 if (!cpu->halted || cpu_has_work(cpu) ||
215e79c0 100 kvm_halt_in_kernel()) {
ac873f1e
PM
101 return false;
102 }
103 return true;
104}
105
106static bool all_cpu_threads_idle(void)
107{
182735ef 108 CPUState *cpu;
ac873f1e 109
bdc44640 110 CPU_FOREACH(cpu) {
182735ef 111 if (!cpu_thread_is_idle(cpu)) {
ac873f1e
PM
112 return false;
113 }
114 }
115 return true;
116}
117
946fb27c
PB
118/***********************************************************/
119/* guest cycle counter */
120
a3270e19
PB
121/* Protected by TimersState seqlock */
122
5045e9d9 123static bool icount_sleep = true;
946fb27c
PB
124/* Conversion factor from emulated instructions to virtual clock ticks. */
125static int icount_time_shift;
126/* Arbitrarily pick 1MIPS as the minimum allowable speed. */
127#define MAX_ICOUNT_SHIFT 10
a3270e19 128
946fb27c 129typedef struct TimersState {
cb365646 130 /* Protected by BQL. */
946fb27c
PB
131 int64_t cpu_ticks_prev;
132 int64_t cpu_ticks_offset;
cb365646
LPF
133
134 /* cpu_clock_offset can be read out of BQL, so protect it with
135 * this lock.
136 */
137 QemuSeqLock vm_clock_seqlock;
946fb27c
PB
138 int64_t cpu_clock_offset;
139 int32_t cpu_ticks_enabled;
140 int64_t dummy;
c96778bb
FK
141
142 /* Compensate for varying guest execution speed. */
143 int64_t qemu_icount_bias;
144 /* Only written by TCG thread */
145 int64_t qemu_icount;
b39e3f34
PD
146 /* for adjusting icount */
147 int64_t vm_clock_warp_start;
148 QEMUTimer *icount_rt_timer;
149 QEMUTimer *icount_vm_timer;
150 QEMUTimer *icount_warp_timer;
946fb27c
PB
151} TimersState;
152
d9cd4007 153static TimersState timers_state;
8d4e9146
FK
154bool mttcg_enabled;
155
156/*
157 * We default to false if we know other options have been enabled
158 * which are currently incompatible with MTTCG. Otherwise when each
159 * guest (target) has been updated to support:
160 * - atomic instructions
161 * - memory ordering primitives (barriers)
162 * they can set the appropriate CONFIG flags in ${target}-softmmu.mak
163 *
164 * Once a guest architecture has been converted to the new primitives
165 * there are two remaining limitations to check.
166 *
167 * - The guest can't be oversized (e.g. 64 bit guest on 32 bit host)
168 * - The host must have a stronger memory order than the guest
169 *
170 * It may be possible in future to support strong guests on weak hosts
171 * but that will require tagging all load/stores in a guest with their
172 * implicit memory order requirements which would likely slow things
173 * down a lot.
174 */
175
176static bool check_tcg_memory_orders_compatible(void)
177{
178#if defined(TCG_GUEST_DEFAULT_MO) && defined(TCG_TARGET_DEFAULT_MO)
179 return (TCG_GUEST_DEFAULT_MO & ~TCG_TARGET_DEFAULT_MO) == 0;
180#else
181 return false;
182#endif
183}
184
185static bool default_mttcg_enabled(void)
186{
83fd9629 187 if (use_icount || TCG_OVERSIZED_GUEST) {
8d4e9146
FK
188 return false;
189 } else {
190#ifdef TARGET_SUPPORTS_MTTCG
191 return check_tcg_memory_orders_compatible();
192#else
193 return false;
194#endif
195 }
196}
197
198void qemu_tcg_configure(QemuOpts *opts, Error **errp)
199{
200 const char *t = qemu_opt_get(opts, "thread");
201 if (t) {
202 if (strcmp(t, "multi") == 0) {
203 if (TCG_OVERSIZED_GUEST) {
204 error_setg(errp, "No MTTCG when guest word size > hosts");
83fd9629
AB
205 } else if (use_icount) {
206 error_setg(errp, "No MTTCG when icount is enabled");
8d4e9146 207 } else {
86953503 208#ifndef TARGET_SUPPORTS_MTTCG
c34c7620
AB
209 error_report("Guest not yet converted to MTTCG - "
210 "you may get unexpected results");
211#endif
8d4e9146
FK
212 if (!check_tcg_memory_orders_compatible()) {
213 error_report("Guest expects a stronger memory ordering "
214 "than the host provides");
8cfef892 215 error_printf("This may cause strange/hard to debug errors\n");
8d4e9146
FK
216 }
217 mttcg_enabled = true;
218 }
219 } else if (strcmp(t, "single") == 0) {
220 mttcg_enabled = false;
221 } else {
222 error_setg(errp, "Invalid 'thread' setting %s", t);
223 }
224 } else {
225 mttcg_enabled = default_mttcg_enabled();
226 }
227}
946fb27c 228
e4cd9657
AB
229/* The current number of executed instructions is based on what we
230 * originally budgeted minus the current state of the decrementing
231 * icount counters in extra/u16.low.
232 */
233static int64_t cpu_get_icount_executed(CPUState *cpu)
234{
235 return cpu->icount_budget - (cpu->icount_decr.u16.low + cpu->icount_extra);
236}
237
512d3c80
AB
238/*
239 * Update the global shared timer_state.qemu_icount to take into
240 * account executed instructions. This is done by the TCG vCPU
241 * thread so the main-loop can see time has moved forward.
242 */
243void cpu_update_icount(CPUState *cpu)
244{
245 int64_t executed = cpu_get_icount_executed(cpu);
246 cpu->icount_budget -= executed;
247
248#ifdef CONFIG_ATOMIC64
249 atomic_set__nocheck(&timers_state.qemu_icount,
250 atomic_read__nocheck(&timers_state.qemu_icount) +
251 executed);
252#else /* FIXME: we need 64bit atomics to do this safely */
253 timers_state.qemu_icount += executed;
254#endif
255}
256
2a62914b 257int64_t cpu_get_icount_raw(void)
946fb27c 258{
4917cf44 259 CPUState *cpu = current_cpu;
946fb27c 260
243c5f77 261 if (cpu && cpu->running) {
414b15c9 262 if (!cpu->can_do_io) {
493d89bf 263 error_report("Bad icount read");
2a62914b 264 exit(1);
946fb27c 265 }
e4cd9657 266 /* Take into account what has run */
1d05906b 267 cpu_update_icount(cpu);
946fb27c 268 }
1d05906b
AB
269#ifdef CONFIG_ATOMIC64
270 return atomic_read__nocheck(&timers_state.qemu_icount);
271#else /* FIXME: we need 64bit atomics to do this safely */
272 return timers_state.qemu_icount;
273#endif
2a62914b
PD
274}
275
276/* Return the virtual CPU time, based on the instruction counter. */
277static int64_t cpu_get_icount_locked(void)
278{
279 int64_t icount = cpu_get_icount_raw();
3f031313 280 return timers_state.qemu_icount_bias + cpu_icount_to_ns(icount);
946fb27c
PB
281}
282
17a15f1b
PB
283int64_t cpu_get_icount(void)
284{
285 int64_t icount;
286 unsigned start;
287
288 do {
289 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
290 icount = cpu_get_icount_locked();
291 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
292
293 return icount;
294}
295
3f031313
FK
296int64_t cpu_icount_to_ns(int64_t icount)
297{
298 return icount << icount_time_shift;
299}
300
d90f3cca
C
301/* return the time elapsed in VM between vm_start and vm_stop. Unless
302 * icount is active, cpu_get_ticks() uses units of the host CPU cycle
303 * counter.
304 *
305 * Caller must hold the BQL
306 */
946fb27c
PB
307int64_t cpu_get_ticks(void)
308{
5f3e3101
PB
309 int64_t ticks;
310
946fb27c
PB
311 if (use_icount) {
312 return cpu_get_icount();
313 }
5f3e3101
PB
314
315 ticks = timers_state.cpu_ticks_offset;
316 if (timers_state.cpu_ticks_enabled) {
4a7428c5 317 ticks += cpu_get_host_ticks();
5f3e3101
PB
318 }
319
320 if (timers_state.cpu_ticks_prev > ticks) {
321 /* Note: non increasing ticks may happen if the host uses
322 software suspend */
323 timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
324 ticks = timers_state.cpu_ticks_prev;
946fb27c 325 }
5f3e3101
PB
326
327 timers_state.cpu_ticks_prev = ticks;
328 return ticks;
946fb27c
PB
329}
330
cb365646 331static int64_t cpu_get_clock_locked(void)
946fb27c 332{
1d45cea5 333 int64_t time;
cb365646 334
1d45cea5 335 time = timers_state.cpu_clock_offset;
5f3e3101 336 if (timers_state.cpu_ticks_enabled) {
1d45cea5 337 time += get_clock();
946fb27c 338 }
cb365646 339
1d45cea5 340 return time;
cb365646
LPF
341}
342
d90f3cca 343/* Return the monotonic time elapsed in VM, i.e.,
8212ff86
PM
344 * the time between vm_start and vm_stop
345 */
cb365646
LPF
346int64_t cpu_get_clock(void)
347{
348 int64_t ti;
349 unsigned start;
350
351 do {
352 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
353 ti = cpu_get_clock_locked();
354 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
355
356 return ti;
946fb27c
PB
357}
358
cb365646 359/* enable cpu_get_ticks()
3224e878 360 * Caller must hold BQL which serves as mutex for vm_clock_seqlock.
cb365646 361 */
946fb27c
PB
362void cpu_enable_ticks(void)
363{
cb365646 364 /* Here, the really thing protected by seqlock is cpu_clock_offset. */
03719e44 365 seqlock_write_begin(&timers_state.vm_clock_seqlock);
946fb27c 366 if (!timers_state.cpu_ticks_enabled) {
4a7428c5 367 timers_state.cpu_ticks_offset -= cpu_get_host_ticks();
946fb27c
PB
368 timers_state.cpu_clock_offset -= get_clock();
369 timers_state.cpu_ticks_enabled = 1;
370 }
03719e44 371 seqlock_write_end(&timers_state.vm_clock_seqlock);
946fb27c
PB
372}
373
374/* disable cpu_get_ticks() : the clock is stopped. You must not call
cb365646 375 * cpu_get_ticks() after that.
3224e878 376 * Caller must hold BQL which serves as mutex for vm_clock_seqlock.
cb365646 377 */
946fb27c
PB
378void cpu_disable_ticks(void)
379{
cb365646 380 /* Here, the really thing protected by seqlock is cpu_clock_offset. */
03719e44 381 seqlock_write_begin(&timers_state.vm_clock_seqlock);
946fb27c 382 if (timers_state.cpu_ticks_enabled) {
4a7428c5 383 timers_state.cpu_ticks_offset += cpu_get_host_ticks();
cb365646 384 timers_state.cpu_clock_offset = cpu_get_clock_locked();
946fb27c
PB
385 timers_state.cpu_ticks_enabled = 0;
386 }
03719e44 387 seqlock_write_end(&timers_state.vm_clock_seqlock);
946fb27c
PB
388}
389
390/* Correlation between real and virtual time is always going to be
391 fairly approximate, so ignore small variation.
392 When the guest is idle real and virtual time will be aligned in
393 the IO wait loop. */
73bcb24d 394#define ICOUNT_WOBBLE (NANOSECONDS_PER_SECOND / 10)
946fb27c
PB
395
396static void icount_adjust(void)
397{
398 int64_t cur_time;
399 int64_t cur_icount;
400 int64_t delta;
a3270e19
PB
401
402 /* Protected by TimersState mutex. */
946fb27c 403 static int64_t last_delta;
468cc7cf 404
946fb27c
PB
405 /* If the VM is not running, then do nothing. */
406 if (!runstate_is_running()) {
407 return;
408 }
468cc7cf 409
03719e44 410 seqlock_write_begin(&timers_state.vm_clock_seqlock);
17a15f1b
PB
411 cur_time = cpu_get_clock_locked();
412 cur_icount = cpu_get_icount_locked();
468cc7cf 413
946fb27c
PB
414 delta = cur_icount - cur_time;
415 /* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */
416 if (delta > 0
417 && last_delta + ICOUNT_WOBBLE < delta * 2
418 && icount_time_shift > 0) {
419 /* The guest is getting too far ahead. Slow time down. */
420 icount_time_shift--;
421 }
422 if (delta < 0
423 && last_delta - ICOUNT_WOBBLE > delta * 2
424 && icount_time_shift < MAX_ICOUNT_SHIFT) {
425 /* The guest is getting too far behind. Speed time up. */
426 icount_time_shift++;
427 }
428 last_delta = delta;
c96778bb
FK
429 timers_state.qemu_icount_bias = cur_icount
430 - (timers_state.qemu_icount << icount_time_shift);
03719e44 431 seqlock_write_end(&timers_state.vm_clock_seqlock);
946fb27c
PB
432}
433
434static void icount_adjust_rt(void *opaque)
435{
b39e3f34 436 timer_mod(timers_state.icount_rt_timer,
1979b908 437 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
946fb27c
PB
438 icount_adjust();
439}
440
441static void icount_adjust_vm(void *opaque)
442{
b39e3f34 443 timer_mod(timers_state.icount_vm_timer,
40daca54 444 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
73bcb24d 445 NANOSECONDS_PER_SECOND / 10);
946fb27c
PB
446 icount_adjust();
447}
448
449static int64_t qemu_icount_round(int64_t count)
450{
451 return (count + (1 << icount_time_shift) - 1) >> icount_time_shift;
452}
453
efab87cf 454static void icount_warp_rt(void)
946fb27c 455{
ccffff48
AB
456 unsigned seq;
457 int64_t warp_start;
458
17a15f1b
PB
459 /* The icount_warp_timer is rescheduled soon after vm_clock_warp_start
460 * changes from -1 to another value, so the race here is okay.
461 */
ccffff48
AB
462 do {
463 seq = seqlock_read_begin(&timers_state.vm_clock_seqlock);
b39e3f34 464 warp_start = timers_state.vm_clock_warp_start;
ccffff48
AB
465 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, seq));
466
467 if (warp_start == -1) {
946fb27c
PB
468 return;
469 }
470
03719e44 471 seqlock_write_begin(&timers_state.vm_clock_seqlock);
946fb27c 472 if (runstate_is_running()) {
8eda206e
PD
473 int64_t clock = REPLAY_CLOCK(REPLAY_CLOCK_VIRTUAL_RT,
474 cpu_get_clock_locked());
8ed961d9
PB
475 int64_t warp_delta;
476
b39e3f34 477 warp_delta = clock - timers_state.vm_clock_warp_start;
8ed961d9 478 if (use_icount == 2) {
946fb27c 479 /*
40daca54 480 * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too
946fb27c
PB
481 * far ahead of real time.
482 */
17a15f1b 483 int64_t cur_icount = cpu_get_icount_locked();
bf2a7ddb 484 int64_t delta = clock - cur_icount;
8ed961d9 485 warp_delta = MIN(warp_delta, delta);
946fb27c 486 }
c96778bb 487 timers_state.qemu_icount_bias += warp_delta;
946fb27c 488 }
b39e3f34 489 timers_state.vm_clock_warp_start = -1;
03719e44 490 seqlock_write_end(&timers_state.vm_clock_seqlock);
8ed961d9
PB
491
492 if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) {
493 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
494 }
946fb27c
PB
495}
496
e76d1798 497static void icount_timer_cb(void *opaque)
efab87cf 498{
e76d1798
PD
499 /* No need for a checkpoint because the timer already synchronizes
500 * with CHECKPOINT_CLOCK_VIRTUAL_RT.
501 */
502 icount_warp_rt();
efab87cf
PD
503}
504
8156be56
PB
505void qtest_clock_warp(int64_t dest)
506{
40daca54 507 int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
efef88b3 508 AioContext *aio_context;
8156be56 509 assert(qtest_enabled());
efef88b3 510 aio_context = qemu_get_aio_context();
8156be56 511 while (clock < dest) {
40daca54 512 int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
c9299e2f 513 int64_t warp = qemu_soonest_timeout(dest - clock, deadline);
efef88b3 514
03719e44 515 seqlock_write_begin(&timers_state.vm_clock_seqlock);
c96778bb 516 timers_state.qemu_icount_bias += warp;
03719e44 517 seqlock_write_end(&timers_state.vm_clock_seqlock);
17a15f1b 518
40daca54 519 qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
efef88b3 520 timerlist_run_timers(aio_context->tlg.tl[QEMU_CLOCK_VIRTUAL]);
40daca54 521 clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
8156be56 522 }
40daca54 523 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
8156be56
PB
524}
525
e76d1798 526void qemu_start_warp_timer(void)
946fb27c 527{
ce78d18c 528 int64_t clock;
946fb27c
PB
529 int64_t deadline;
530
e76d1798 531 if (!use_icount) {
946fb27c
PB
532 return;
533 }
534
8bd7f71d
PD
535 /* Nothing to do if the VM is stopped: QEMU_CLOCK_VIRTUAL timers
536 * do not fire, so computing the deadline does not make sense.
537 */
538 if (!runstate_is_running()) {
539 return;
540 }
541
542 /* warp clock deterministically in record/replay mode */
e76d1798 543 if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_START)) {
8bd7f71d
PD
544 return;
545 }
546
ce78d18c 547 if (!all_cpu_threads_idle()) {
946fb27c
PB
548 return;
549 }
550
8156be56
PB
551 if (qtest_enabled()) {
552 /* When testing, qtest commands advance icount. */
e76d1798 553 return;
8156be56
PB
554 }
555
ac70aafc 556 /* We want to use the earliest deadline from ALL vm_clocks */
bf2a7ddb 557 clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
40daca54 558 deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
ce78d18c 559 if (deadline < 0) {
d7a0f71d
VC
560 static bool notified;
561 if (!icount_sleep && !notified) {
3dc6f869 562 warn_report("icount sleep disabled and no active timers");
d7a0f71d
VC
563 notified = true;
564 }
ce78d18c 565 return;
ac70aafc
AB
566 }
567
946fb27c
PB
568 if (deadline > 0) {
569 /*
40daca54 570 * Ensure QEMU_CLOCK_VIRTUAL proceeds even when the virtual CPU goes to
946fb27c
PB
571 * sleep. Otherwise, the CPU might be waiting for a future timer
572 * interrupt to wake it up, but the interrupt never comes because
573 * the vCPU isn't running any insns and thus doesn't advance the
40daca54 574 * QEMU_CLOCK_VIRTUAL.
946fb27c 575 */
5045e9d9
VC
576 if (!icount_sleep) {
577 /*
578 * We never let VCPUs sleep in no sleep icount mode.
579 * If there is a pending QEMU_CLOCK_VIRTUAL timer we just advance
580 * to the next QEMU_CLOCK_VIRTUAL event and notify it.
581 * It is useful when we want a deterministic execution time,
582 * isolated from host latencies.
583 */
03719e44 584 seqlock_write_begin(&timers_state.vm_clock_seqlock);
5045e9d9 585 timers_state.qemu_icount_bias += deadline;
03719e44 586 seqlock_write_end(&timers_state.vm_clock_seqlock);
5045e9d9
VC
587 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
588 } else {
589 /*
590 * We do stop VCPUs and only advance QEMU_CLOCK_VIRTUAL after some
591 * "real" time, (related to the time left until the next event) has
592 * passed. The QEMU_CLOCK_VIRTUAL_RT clock will do this.
593 * This avoids that the warps are visible externally; for example,
594 * you will not be sending network packets continuously instead of
595 * every 100ms.
596 */
03719e44 597 seqlock_write_begin(&timers_state.vm_clock_seqlock);
b39e3f34
PD
598 if (timers_state.vm_clock_warp_start == -1
599 || timers_state.vm_clock_warp_start > clock) {
600 timers_state.vm_clock_warp_start = clock;
5045e9d9 601 }
03719e44 602 seqlock_write_end(&timers_state.vm_clock_seqlock);
b39e3f34
PD
603 timer_mod_anticipate(timers_state.icount_warp_timer,
604 clock + deadline);
ce78d18c 605 }
ac70aafc 606 } else if (deadline == 0) {
40daca54 607 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
946fb27c
PB
608 }
609}
610
e76d1798
PD
611static void qemu_account_warp_timer(void)
612{
613 if (!use_icount || !icount_sleep) {
614 return;
615 }
616
617 /* Nothing to do if the VM is stopped: QEMU_CLOCK_VIRTUAL timers
618 * do not fire, so computing the deadline does not make sense.
619 */
620 if (!runstate_is_running()) {
621 return;
622 }
623
624 /* warp clock deterministically in record/replay mode */
625 if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_ACCOUNT)) {
626 return;
627 }
628
b39e3f34 629 timer_del(timers_state.icount_warp_timer);
e76d1798
PD
630 icount_warp_rt();
631}
632
d09eae37
FK
633static bool icount_state_needed(void *opaque)
634{
635 return use_icount;
636}
637
b39e3f34
PD
638static bool warp_timer_state_needed(void *opaque)
639{
640 TimersState *s = opaque;
641 return s->icount_warp_timer != NULL;
642}
643
644static bool adjust_timers_state_needed(void *opaque)
645{
646 TimersState *s = opaque;
647 return s->icount_rt_timer != NULL;
648}
649
650/*
651 * Subsection for warp timer migration is optional, because may not be created
652 */
653static const VMStateDescription icount_vmstate_warp_timer = {
654 .name = "timer/icount/warp_timer",
655 .version_id = 1,
656 .minimum_version_id = 1,
657 .needed = warp_timer_state_needed,
658 .fields = (VMStateField[]) {
659 VMSTATE_INT64(vm_clock_warp_start, TimersState),
660 VMSTATE_TIMER_PTR(icount_warp_timer, TimersState),
661 VMSTATE_END_OF_LIST()
662 }
663};
664
665static const VMStateDescription icount_vmstate_adjust_timers = {
666 .name = "timer/icount/timers",
667 .version_id = 1,
668 .minimum_version_id = 1,
669 .needed = adjust_timers_state_needed,
670 .fields = (VMStateField[]) {
671 VMSTATE_TIMER_PTR(icount_rt_timer, TimersState),
672 VMSTATE_TIMER_PTR(icount_vm_timer, TimersState),
673 VMSTATE_END_OF_LIST()
674 }
675};
676
d09eae37
FK
677/*
678 * This is a subsection for icount migration.
679 */
680static const VMStateDescription icount_vmstate_timers = {
681 .name = "timer/icount",
682 .version_id = 1,
683 .minimum_version_id = 1,
5cd8cada 684 .needed = icount_state_needed,
d09eae37
FK
685 .fields = (VMStateField[]) {
686 VMSTATE_INT64(qemu_icount_bias, TimersState),
687 VMSTATE_INT64(qemu_icount, TimersState),
688 VMSTATE_END_OF_LIST()
b39e3f34
PD
689 },
690 .subsections = (const VMStateDescription*[]) {
691 &icount_vmstate_warp_timer,
692 &icount_vmstate_adjust_timers,
693 NULL
d09eae37
FK
694 }
695};
696
946fb27c
PB
697static const VMStateDescription vmstate_timers = {
698 .name = "timer",
699 .version_id = 2,
700 .minimum_version_id = 1,
35d08458 701 .fields = (VMStateField[]) {
946fb27c
PB
702 VMSTATE_INT64(cpu_ticks_offset, TimersState),
703 VMSTATE_INT64(dummy, TimersState),
704 VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
705 VMSTATE_END_OF_LIST()
d09eae37 706 },
5cd8cada
JQ
707 .subsections = (const VMStateDescription*[]) {
708 &icount_vmstate_timers,
709 NULL
946fb27c
PB
710 }
711};
712
14e6fe12 713static void cpu_throttle_thread(CPUState *cpu, run_on_cpu_data opaque)
2adcc85d 714{
2adcc85d
JH
715 double pct;
716 double throttle_ratio;
717 long sleeptime_ns;
718
719 if (!cpu_throttle_get_percentage()) {
720 return;
721 }
722
723 pct = (double)cpu_throttle_get_percentage()/100;
724 throttle_ratio = pct / (1 - pct);
725 sleeptime_ns = (long)(throttle_ratio * CPU_THROTTLE_TIMESLICE_NS);
726
727 qemu_mutex_unlock_iothread();
2adcc85d
JH
728 g_usleep(sleeptime_ns / 1000); /* Convert ns to us for usleep call */
729 qemu_mutex_lock_iothread();
90bb0c04 730 atomic_set(&cpu->throttle_thread_scheduled, 0);
2adcc85d
JH
731}
732
733static void cpu_throttle_timer_tick(void *opaque)
734{
735 CPUState *cpu;
736 double pct;
737
738 /* Stop the timer if needed */
739 if (!cpu_throttle_get_percentage()) {
740 return;
741 }
742 CPU_FOREACH(cpu) {
743 if (!atomic_xchg(&cpu->throttle_thread_scheduled, 1)) {
14e6fe12
PB
744 async_run_on_cpu(cpu, cpu_throttle_thread,
745 RUN_ON_CPU_NULL);
2adcc85d
JH
746 }
747 }
748
749 pct = (double)cpu_throttle_get_percentage()/100;
750 timer_mod(throttle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) +
751 CPU_THROTTLE_TIMESLICE_NS / (1-pct));
752}
753
754void cpu_throttle_set(int new_throttle_pct)
755{
756 /* Ensure throttle percentage is within valid range */
757 new_throttle_pct = MIN(new_throttle_pct, CPU_THROTTLE_PCT_MAX);
758 new_throttle_pct = MAX(new_throttle_pct, CPU_THROTTLE_PCT_MIN);
759
760 atomic_set(&throttle_percentage, new_throttle_pct);
761
762 timer_mod(throttle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) +
763 CPU_THROTTLE_TIMESLICE_NS);
764}
765
766void cpu_throttle_stop(void)
767{
768 atomic_set(&throttle_percentage, 0);
769}
770
771bool cpu_throttle_active(void)
772{
773 return (cpu_throttle_get_percentage() != 0);
774}
775
776int cpu_throttle_get_percentage(void)
777{
778 return atomic_read(&throttle_percentage);
779}
780
4603ea01
PD
781void cpu_ticks_init(void)
782{
ccdb3c1f 783 seqlock_init(&timers_state.vm_clock_seqlock);
4603ea01 784 vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
2adcc85d
JH
785 throttle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
786 cpu_throttle_timer_tick, NULL);
4603ea01
PD
787}
788
1ad9580b 789void configure_icount(QemuOpts *opts, Error **errp)
946fb27c 790{
1ad9580b 791 const char *option;
a8bfac37 792 char *rem_str = NULL;
1ad9580b 793
1ad9580b 794 option = qemu_opt_get(opts, "shift");
946fb27c 795 if (!option) {
a8bfac37
ST
796 if (qemu_opt_get(opts, "align") != NULL) {
797 error_setg(errp, "Please specify shift option when using align");
798 }
946fb27c
PB
799 return;
800 }
f1f4b57e
VC
801
802 icount_sleep = qemu_opt_get_bool(opts, "sleep", true);
5045e9d9 803 if (icount_sleep) {
b39e3f34 804 timers_state.icount_warp_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
e76d1798 805 icount_timer_cb, NULL);
5045e9d9 806 }
f1f4b57e 807
a8bfac37 808 icount_align_option = qemu_opt_get_bool(opts, "align", false);
f1f4b57e
VC
809
810 if (icount_align_option && !icount_sleep) {
778d9f9b 811 error_setg(errp, "align=on and sleep=off are incompatible");
f1f4b57e 812 }
946fb27c 813 if (strcmp(option, "auto") != 0) {
a8bfac37
ST
814 errno = 0;
815 icount_time_shift = strtol(option, &rem_str, 0);
816 if (errno != 0 || *rem_str != '\0' || !strlen(option)) {
817 error_setg(errp, "icount: Invalid shift value");
818 }
946fb27c
PB
819 use_icount = 1;
820 return;
a8bfac37
ST
821 } else if (icount_align_option) {
822 error_setg(errp, "shift=auto and align=on are incompatible");
f1f4b57e 823 } else if (!icount_sleep) {
778d9f9b 824 error_setg(errp, "shift=auto and sleep=off are incompatible");
946fb27c
PB
825 }
826
827 use_icount = 2;
828
829 /* 125MIPS seems a reasonable initial guess at the guest speed.
830 It will be corrected fairly quickly anyway. */
831 icount_time_shift = 3;
832
833 /* Have both realtime and virtual time triggers for speed adjustment.
834 The realtime trigger catches emulated time passing too slowly,
835 the virtual time trigger catches emulated time passing too fast.
836 Realtime triggers occur even when idle, so use them less frequently
837 than VM triggers. */
b39e3f34
PD
838 timers_state.vm_clock_warp_start = -1;
839 timers_state.icount_rt_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL_RT,
bf2a7ddb 840 icount_adjust_rt, NULL);
b39e3f34 841 timer_mod(timers_state.icount_rt_timer,
bf2a7ddb 842 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
b39e3f34 843 timers_state.icount_vm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
40daca54 844 icount_adjust_vm, NULL);
b39e3f34 845 timer_mod(timers_state.icount_vm_timer,
40daca54 846 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
73bcb24d 847 NANOSECONDS_PER_SECOND / 10);
946fb27c
PB
848}
849
6546706d
AB
850/***********************************************************/
851/* TCG vCPU kick timer
852 *
853 * The kick timer is responsible for moving single threaded vCPU
854 * emulation on to the next vCPU. If more than one vCPU is running a
855 * timer event with force a cpu->exit so the next vCPU can get
856 * scheduled.
857 *
858 * The timer is removed if all vCPUs are idle and restarted again once
859 * idleness is complete.
860 */
861
862static QEMUTimer *tcg_kick_vcpu_timer;
791158d9 863static CPUState *tcg_current_rr_cpu;
6546706d
AB
864
865#define TCG_KICK_PERIOD (NANOSECONDS_PER_SECOND / 10)
866
867static inline int64_t qemu_tcg_next_kick(void)
868{
869 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + TCG_KICK_PERIOD;
870}
871
791158d9
AB
872/* Kick the currently round-robin scheduled vCPU */
873static void qemu_cpu_kick_rr_cpu(void)
874{
875 CPUState *cpu;
791158d9
AB
876 do {
877 cpu = atomic_mb_read(&tcg_current_rr_cpu);
878 if (cpu) {
879 cpu_exit(cpu);
880 }
881 } while (cpu != atomic_mb_read(&tcg_current_rr_cpu));
882}
883
6b8f0187
PB
884static void do_nothing(CPUState *cpu, run_on_cpu_data unused)
885{
886}
887
3f53bc61
PB
888void qemu_timer_notify_cb(void *opaque, QEMUClockType type)
889{
6b8f0187
PB
890 if (!use_icount || type != QEMU_CLOCK_VIRTUAL) {
891 qemu_notify_event();
892 return;
893 }
894
895 if (!qemu_in_vcpu_thread() && first_cpu) {
896 /* qemu_cpu_kick is not enough to kick a halted CPU out of
897 * qemu_tcg_wait_io_event. async_run_on_cpu, instead,
898 * causes cpu_thread_is_idle to return false. This way,
899 * handle_icount_deadline can run.
900 */
901 async_run_on_cpu(first_cpu, do_nothing, RUN_ON_CPU_NULL);
902 }
3f53bc61
PB
903}
904
6546706d
AB
905static void kick_tcg_thread(void *opaque)
906{
907 timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());
791158d9 908 qemu_cpu_kick_rr_cpu();
6546706d
AB
909}
910
911static void start_tcg_kick_timer(void)
912{
db08b687
PB
913 assert(!mttcg_enabled);
914 if (!tcg_kick_vcpu_timer && CPU_NEXT(first_cpu)) {
6546706d
AB
915 tcg_kick_vcpu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
916 kick_tcg_thread, NULL);
917 timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());
918 }
919}
920
921static void stop_tcg_kick_timer(void)
922{
db08b687 923 assert(!mttcg_enabled);
6546706d
AB
924 if (tcg_kick_vcpu_timer) {
925 timer_del(tcg_kick_vcpu_timer);
926 tcg_kick_vcpu_timer = NULL;
927 }
928}
929
296af7c9
BS
930/***********************************************************/
931void hw_error(const char *fmt, ...)
932{
933 va_list ap;
55e5c285 934 CPUState *cpu;
296af7c9
BS
935
936 va_start(ap, fmt);
937 fprintf(stderr, "qemu: hardware error: ");
938 vfprintf(stderr, fmt, ap);
939 fprintf(stderr, "\n");
bdc44640 940 CPU_FOREACH(cpu) {
55e5c285 941 fprintf(stderr, "CPU #%d:\n", cpu->cpu_index);
878096ee 942 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU);
296af7c9
BS
943 }
944 va_end(ap);
945 abort();
946}
947
948void cpu_synchronize_all_states(void)
949{
182735ef 950 CPUState *cpu;
296af7c9 951
bdc44640 952 CPU_FOREACH(cpu) {
182735ef 953 cpu_synchronize_state(cpu);
c97d6d2c
SAGDR
954 /* TODO: move to cpu_synchronize_state() */
955 if (hvf_enabled()) {
956 hvf_cpu_synchronize_state(cpu);
957 }
296af7c9
BS
958 }
959}
960
961void cpu_synchronize_all_post_reset(void)
962{
182735ef 963 CPUState *cpu;
296af7c9 964
bdc44640 965 CPU_FOREACH(cpu) {
182735ef 966 cpu_synchronize_post_reset(cpu);
c97d6d2c
SAGDR
967 /* TODO: move to cpu_synchronize_post_reset() */
968 if (hvf_enabled()) {
969 hvf_cpu_synchronize_post_reset(cpu);
970 }
296af7c9
BS
971 }
972}
973
974void cpu_synchronize_all_post_init(void)
975{
182735ef 976 CPUState *cpu;
296af7c9 977
bdc44640 978 CPU_FOREACH(cpu) {
182735ef 979 cpu_synchronize_post_init(cpu);
c97d6d2c
SAGDR
980 /* TODO: move to cpu_synchronize_post_init() */
981 if (hvf_enabled()) {
982 hvf_cpu_synchronize_post_init(cpu);
983 }
296af7c9
BS
984 }
985}
986
75e972da
DG
987void cpu_synchronize_all_pre_loadvm(void)
988{
989 CPUState *cpu;
990
991 CPU_FOREACH(cpu) {
992 cpu_synchronize_pre_loadvm(cpu);
993 }
994}
995
4486e89c 996static int do_vm_stop(RunState state, bool send_stop)
296af7c9 997{
56983463
KW
998 int ret = 0;
999
1354869c 1000 if (runstate_is_running()) {
296af7c9 1001 cpu_disable_ticks();
296af7c9 1002 pause_all_vcpus();
f5bbfba1 1003 runstate_set(state);
1dfb4dd9 1004 vm_state_notify(0, state);
4486e89c
SH
1005 if (send_stop) {
1006 qapi_event_send_stop(&error_abort);
1007 }
296af7c9 1008 }
56983463 1009
594a45ce 1010 bdrv_drain_all();
6d0ceb80 1011 replay_disable_events();
22af08ea 1012 ret = bdrv_flush_all();
594a45ce 1013
56983463 1014 return ret;
296af7c9
BS
1015}
1016
4486e89c
SH
1017/* Special vm_stop() variant for terminating the process. Historically clients
1018 * did not expect a QMP STOP event and so we need to retain compatibility.
1019 */
1020int vm_shutdown(void)
1021{
1022 return do_vm_stop(RUN_STATE_SHUTDOWN, false);
1023}
1024
a1fcaa73 1025static bool cpu_can_run(CPUState *cpu)
296af7c9 1026{
4fdeee7c 1027 if (cpu->stop) {
a1fcaa73 1028 return false;
0ab07c62 1029 }
321bc0b2 1030 if (cpu_is_stopped(cpu)) {
a1fcaa73 1031 return false;
0ab07c62 1032 }
a1fcaa73 1033 return true;
296af7c9
BS
1034}
1035
91325046 1036static void cpu_handle_guest_debug(CPUState *cpu)
83f338f7 1037{
64f6b346 1038 gdb_set_stop_cpu(cpu);
8cf71710 1039 qemu_system_debug_request();
f324e766 1040 cpu->stopped = true;
3c638d06
JK
1041}
1042
6d9cb73c
JK
1043#ifdef CONFIG_LINUX
1044static void sigbus_reraise(void)
1045{
1046 sigset_t set;
1047 struct sigaction action;
1048
1049 memset(&action, 0, sizeof(action));
1050 action.sa_handler = SIG_DFL;
1051 if (!sigaction(SIGBUS, &action, NULL)) {
1052 raise(SIGBUS);
1053 sigemptyset(&set);
1054 sigaddset(&set, SIGBUS);
a2d1761d 1055 pthread_sigmask(SIG_UNBLOCK, &set, NULL);
6d9cb73c
JK
1056 }
1057 perror("Failed to re-raise SIGBUS!\n");
1058 abort();
1059}
1060
d98d4072 1061static void sigbus_handler(int n, siginfo_t *siginfo, void *ctx)
6d9cb73c 1062{
a16fc07e
PB
1063 if (siginfo->si_code != BUS_MCEERR_AO && siginfo->si_code != BUS_MCEERR_AR) {
1064 sigbus_reraise();
1065 }
1066
2ae41db2
PB
1067 if (current_cpu) {
1068 /* Called asynchronously in VCPU thread. */
1069 if (kvm_on_sigbus_vcpu(current_cpu, siginfo->si_code, siginfo->si_addr)) {
1070 sigbus_reraise();
1071 }
1072 } else {
1073 /* Called synchronously (via signalfd) in main thread. */
1074 if (kvm_on_sigbus(siginfo->si_code, siginfo->si_addr)) {
1075 sigbus_reraise();
1076 }
6d9cb73c
JK
1077 }
1078}
1079
1080static void qemu_init_sigbus(void)
1081{
1082 struct sigaction action;
1083
1084 memset(&action, 0, sizeof(action));
1085 action.sa_flags = SA_SIGINFO;
d98d4072 1086 action.sa_sigaction = sigbus_handler;
6d9cb73c
JK
1087 sigaction(SIGBUS, &action, NULL);
1088
1089 prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0);
1090}
6d9cb73c 1091#else /* !CONFIG_LINUX */
6d9cb73c
JK
1092static void qemu_init_sigbus(void)
1093{
1094}
a16fc07e 1095#endif /* !CONFIG_LINUX */
ff48eb5f 1096
b2532d88 1097static QemuMutex qemu_global_mutex;
296af7c9
BS
1098
1099static QemuThread io_thread;
1100
296af7c9
BS
1101/* cpu creation */
1102static QemuCond qemu_cpu_cond;
1103/* system init */
296af7c9
BS
1104static QemuCond qemu_pause_cond;
1105
d3b12f5d 1106void qemu_init_cpu_loop(void)
296af7c9 1107{
6d9cb73c 1108 qemu_init_sigbus();
ed94592b 1109 qemu_cond_init(&qemu_cpu_cond);
ed94592b 1110 qemu_cond_init(&qemu_pause_cond);
296af7c9 1111 qemu_mutex_init(&qemu_global_mutex);
296af7c9 1112
b7680cb6 1113 qemu_thread_get_self(&io_thread);
296af7c9
BS
1114}
1115
14e6fe12 1116void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data)
e82bcec2 1117{
d148d90e 1118 do_run_on_cpu(cpu, func, data, &qemu_global_mutex);
3c02270d
CV
1119}
1120
4c055ab5
GZ
1121static void qemu_kvm_destroy_vcpu(CPUState *cpu)
1122{
1123 if (kvm_destroy_vcpu(cpu) < 0) {
1124 error_report("kvm_destroy_vcpu failed");
1125 exit(EXIT_FAILURE);
1126 }
1127}
1128
1129static void qemu_tcg_destroy_vcpu(CPUState *cpu)
1130{
1131}
1132
ebd05fea
DH
1133static void qemu_cpu_stop(CPUState *cpu, bool exit)
1134{
1135 g_assert(qemu_cpu_is_self(cpu));
1136 cpu->stop = false;
1137 cpu->stopped = true;
1138 if (exit) {
1139 cpu_exit(cpu);
1140 }
1141 qemu_cond_broadcast(&qemu_pause_cond);
1142}
1143
509a0d78 1144static void qemu_wait_io_event_common(CPUState *cpu)
296af7c9 1145{
37257942 1146 atomic_mb_set(&cpu->thread_kicked, false);
4fdeee7c 1147 if (cpu->stop) {
ebd05fea 1148 qemu_cpu_stop(cpu, false);
296af7c9 1149 }
a5403c69 1150 process_queued_cpu_work(cpu);
37257942
AB
1151}
1152
db08b687 1153static void qemu_tcg_rr_wait_io_event(CPUState *cpu)
37257942 1154{
db08b687 1155 while (all_cpu_threads_idle()) {
6546706d 1156 stop_tcg_kick_timer();
d5f8d613 1157 qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
16400322 1158 }
296af7c9 1159
6546706d
AB
1160 start_tcg_kick_timer();
1161
37257942 1162 qemu_wait_io_event_common(cpu);
296af7c9
BS
1163}
1164
db08b687 1165static void qemu_wait_io_event(CPUState *cpu)
296af7c9 1166{
a98ae1d8 1167 while (cpu_thread_is_idle(cpu)) {
f5c121b8 1168 qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
16400322 1169 }
296af7c9 1170
db08b687
PB
1171#ifdef _WIN32
1172 /* Eat dummy APC queued by qemu_cpu_kick_thread. */
1173 if (!tcg_enabled()) {
1174 SleepEx(0, TRUE);
c97d6d2c 1175 }
db08b687 1176#endif
c97d6d2c
SAGDR
1177 qemu_wait_io_event_common(cpu);
1178}
1179
7e97cd88 1180static void *qemu_kvm_cpu_thread_fn(void *arg)
296af7c9 1181{
48a106bd 1182 CPUState *cpu = arg;
84b4915d 1183 int r;
296af7c9 1184
ab28bd23
PB
1185 rcu_register_thread();
1186
2e7f7a3c 1187 qemu_mutex_lock_iothread();
814e612e 1188 qemu_thread_get_self(cpu->thread);
9f09e18a 1189 cpu->thread_id = qemu_get_thread_id();
626cf8f4 1190 cpu->can_do_io = 1;
4917cf44 1191 current_cpu = cpu;
296af7c9 1192
504134d2 1193 r = kvm_init_vcpu(cpu);
84b4915d 1194 if (r < 0) {
493d89bf 1195 error_report("kvm_init_vcpu failed: %s", strerror(-r));
84b4915d
JK
1196 exit(1);
1197 }
296af7c9 1198
18268b60 1199 kvm_init_cpu_signals(cpu);
296af7c9
BS
1200
1201 /* signal CPU creation */
61a46217 1202 cpu->created = true;
296af7c9
BS
1203 qemu_cond_signal(&qemu_cpu_cond);
1204
4c055ab5 1205 do {
a1fcaa73 1206 if (cpu_can_run(cpu)) {
1458c363 1207 r = kvm_cpu_exec(cpu);
83f338f7 1208 if (r == EXCP_DEBUG) {
91325046 1209 cpu_handle_guest_debug(cpu);
83f338f7 1210 }
0ab07c62 1211 }
db08b687 1212 qemu_wait_io_event(cpu);
4c055ab5 1213 } while (!cpu->unplug || cpu_can_run(cpu));
296af7c9 1214
4c055ab5 1215 qemu_kvm_destroy_vcpu(cpu);
2c579042
BR
1216 cpu->created = false;
1217 qemu_cond_signal(&qemu_cpu_cond);
4c055ab5 1218 qemu_mutex_unlock_iothread();
57615ed5 1219 rcu_unregister_thread();
296af7c9
BS
1220 return NULL;
1221}
1222
c7f0f3b1
AL
1223static void *qemu_dummy_cpu_thread_fn(void *arg)
1224{
1225#ifdef _WIN32
493d89bf 1226 error_report("qtest is not supported under Windows");
c7f0f3b1
AL
1227 exit(1);
1228#else
10a9021d 1229 CPUState *cpu = arg;
c7f0f3b1
AL
1230 sigset_t waitset;
1231 int r;
1232
ab28bd23
PB
1233 rcu_register_thread();
1234
c7f0f3b1 1235 qemu_mutex_lock_iothread();
814e612e 1236 qemu_thread_get_self(cpu->thread);
9f09e18a 1237 cpu->thread_id = qemu_get_thread_id();
626cf8f4 1238 cpu->can_do_io = 1;
37257942 1239 current_cpu = cpu;
c7f0f3b1
AL
1240
1241 sigemptyset(&waitset);
1242 sigaddset(&waitset, SIG_IPI);
1243
1244 /* signal CPU creation */
61a46217 1245 cpu->created = true;
c7f0f3b1
AL
1246 qemu_cond_signal(&qemu_cpu_cond);
1247
d2831ab0 1248 do {
c7f0f3b1
AL
1249 qemu_mutex_unlock_iothread();
1250 do {
1251 int sig;
1252 r = sigwait(&waitset, &sig);
1253 } while (r == -1 && (errno == EAGAIN || errno == EINTR));
1254 if (r == -1) {
1255 perror("sigwait");
1256 exit(1);
1257 }
1258 qemu_mutex_lock_iothread();
db08b687 1259 qemu_wait_io_event(cpu);
d2831ab0 1260 } while (!cpu->unplug);
c7f0f3b1 1261
d2831ab0 1262 rcu_unregister_thread();
c7f0f3b1
AL
1263 return NULL;
1264#endif
1265}
1266
1be7fcb8
AB
1267static int64_t tcg_get_icount_limit(void)
1268{
1269 int64_t deadline;
1270
1271 if (replay_mode != REPLAY_MODE_PLAY) {
1272 deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
1273
1274 /* Maintain prior (possibly buggy) behaviour where if no deadline
1275 * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
1276 * INT32_MAX nanoseconds ahead, we still use INT32_MAX
1277 * nanoseconds.
1278 */
1279 if ((deadline < 0) || (deadline > INT32_MAX)) {
1280 deadline = INT32_MAX;
1281 }
1282
1283 return qemu_icount_round(deadline);
1284 } else {
1285 return replay_get_instructions();
1286 }
1287}
1288
12e9700d
AB
1289static void handle_icount_deadline(void)
1290{
6b8f0187 1291 assert(qemu_in_vcpu_thread());
12e9700d
AB
1292 if (use_icount) {
1293 int64_t deadline =
1294 qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
1295
1296 if (deadline == 0) {
6b8f0187 1297 /* Wake up other AioContexts. */
12e9700d 1298 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
6b8f0187 1299 qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
12e9700d
AB
1300 }
1301 }
1302}
1303
05248382 1304static void prepare_icount_for_run(CPUState *cpu)
1be7fcb8 1305{
1be7fcb8 1306 if (use_icount) {
eda5f7c6 1307 int insns_left;
05248382
AB
1308
1309 /* These should always be cleared by process_icount_data after
1310 * each vCPU execution. However u16.high can be raised
1311 * asynchronously by cpu_exit/cpu_interrupt/tcg_handle_interrupt
1312 */
1313 g_assert(cpu->icount_decr.u16.low == 0);
1314 g_assert(cpu->icount_extra == 0);
1315
eda5f7c6
AB
1316 cpu->icount_budget = tcg_get_icount_limit();
1317 insns_left = MIN(0xffff, cpu->icount_budget);
1318 cpu->icount_decr.u16.low = insns_left;
1319 cpu->icount_extra = cpu->icount_budget - insns_left;
1be7fcb8 1320 }
05248382
AB
1321}
1322
1323static void process_icount_data(CPUState *cpu)
1324{
1be7fcb8 1325 if (use_icount) {
e4cd9657 1326 /* Account for executed instructions */
512d3c80 1327 cpu_update_icount(cpu);
05248382
AB
1328
1329 /* Reset the counters */
1330 cpu->icount_decr.u16.low = 0;
1be7fcb8 1331 cpu->icount_extra = 0;
e4cd9657
AB
1332 cpu->icount_budget = 0;
1333
1be7fcb8
AB
1334 replay_account_executed_instructions();
1335 }
05248382
AB
1336}
1337
1338
1339static int tcg_cpu_exec(CPUState *cpu)
1340{
1341 int ret;
1342#ifdef CONFIG_PROFILER
1343 int64_t ti;
1344#endif
1345
1346#ifdef CONFIG_PROFILER
1347 ti = profile_getclock();
1348#endif
1349 qemu_mutex_unlock_iothread();
1350 cpu_exec_start(cpu);
1351 ret = cpu_exec(cpu);
1352 cpu_exec_end(cpu);
1353 qemu_mutex_lock_iothread();
1354#ifdef CONFIG_PROFILER
1355 tcg_time += profile_getclock() - ti;
1356#endif
1be7fcb8
AB
1357 return ret;
1358}
1359
c93bbbef
AB
1360/* Destroy any remaining vCPUs which have been unplugged and have
1361 * finished running
1362 */
1363static void deal_with_unplugged_cpus(void)
1be7fcb8 1364{
c93bbbef 1365 CPUState *cpu;
1be7fcb8 1366
c93bbbef
AB
1367 CPU_FOREACH(cpu) {
1368 if (cpu->unplug && !cpu_can_run(cpu)) {
1369 qemu_tcg_destroy_vcpu(cpu);
1370 cpu->created = false;
1371 qemu_cond_signal(&qemu_cpu_cond);
1be7fcb8
AB
1372 break;
1373 }
1374 }
1be7fcb8 1375}
bdb7ca67 1376
6546706d
AB
1377/* Single-threaded TCG
1378 *
1379 * In the single-threaded case each vCPU is simulated in turn. If
1380 * there is more than a single vCPU we create a simple timer to kick
1381 * the vCPU and ensure we don't get stuck in a tight loop in one vCPU.
1382 * This is done explicitly rather than relying on side-effects
1383 * elsewhere.
1384 */
1385
37257942 1386static void *qemu_tcg_rr_cpu_thread_fn(void *arg)
296af7c9 1387{
c3586ba7 1388 CPUState *cpu = arg;
296af7c9 1389
ab28bd23 1390 rcu_register_thread();
3468b59e 1391 tcg_register_thread();
ab28bd23 1392
2e7f7a3c 1393 qemu_mutex_lock_iothread();
814e612e 1394 qemu_thread_get_self(cpu->thread);
296af7c9 1395
5a9c973b
DH
1396 cpu->thread_id = qemu_get_thread_id();
1397 cpu->created = true;
1398 cpu->can_do_io = 1;
296af7c9
BS
1399 qemu_cond_signal(&qemu_cpu_cond);
1400
fa7d1867 1401 /* wait for initial kick-off after machine start */
c28e399c 1402 while (first_cpu->stopped) {
d5f8d613 1403 qemu_cond_wait(first_cpu->halt_cond, &qemu_global_mutex);
8e564b4e
JK
1404
1405 /* process any pending work */
bdc44640 1406 CPU_FOREACH(cpu) {
37257942 1407 current_cpu = cpu;
182735ef 1408 qemu_wait_io_event_common(cpu);
8e564b4e 1409 }
0ab07c62 1410 }
296af7c9 1411
6546706d
AB
1412 start_tcg_kick_timer();
1413
c93bbbef
AB
1414 cpu = first_cpu;
1415
e5143e30
AB
1416 /* process any pending work */
1417 cpu->exit_request = 1;
1418
296af7c9 1419 while (1) {
c93bbbef
AB
1420 /* Account partial waits to QEMU_CLOCK_VIRTUAL. */
1421 qemu_account_warp_timer();
1422
6b8f0187
PB
1423 /* Run the timers here. This is much more efficient than
1424 * waking up the I/O thread and waiting for completion.
1425 */
1426 handle_icount_deadline();
1427
c93bbbef
AB
1428 if (!cpu) {
1429 cpu = first_cpu;
1430 }
1431
e5143e30
AB
1432 while (cpu && !cpu->queued_work_first && !cpu->exit_request) {
1433
791158d9 1434 atomic_mb_set(&tcg_current_rr_cpu, cpu);
37257942 1435 current_cpu = cpu;
c93bbbef
AB
1436
1437 qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
1438 (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
1439
1440 if (cpu_can_run(cpu)) {
1441 int r;
05248382
AB
1442
1443 prepare_icount_for_run(cpu);
1444
c93bbbef 1445 r = tcg_cpu_exec(cpu);
05248382
AB
1446
1447 process_icount_data(cpu);
1448
c93bbbef
AB
1449 if (r == EXCP_DEBUG) {
1450 cpu_handle_guest_debug(cpu);
1451 break;
08e73c48
PK
1452 } else if (r == EXCP_ATOMIC) {
1453 qemu_mutex_unlock_iothread();
1454 cpu_exec_step_atomic(cpu);
1455 qemu_mutex_lock_iothread();
1456 break;
c93bbbef 1457 }
37257942 1458 } else if (cpu->stop) {
c93bbbef
AB
1459 if (cpu->unplug) {
1460 cpu = CPU_NEXT(cpu);
1461 }
1462 break;
1463 }
1464
e5143e30
AB
1465 cpu = CPU_NEXT(cpu);
1466 } /* while (cpu && !cpu->exit_request).. */
1467
791158d9
AB
1468 /* Does not need atomic_mb_set because a spurious wakeup is okay. */
1469 atomic_set(&tcg_current_rr_cpu, NULL);
c93bbbef 1470
e5143e30
AB
1471 if (cpu && cpu->exit_request) {
1472 atomic_mb_set(&cpu->exit_request, 0);
1473 }
ac70aafc 1474
db08b687 1475 qemu_tcg_rr_wait_io_event(cpu ? cpu : QTAILQ_FIRST(&cpus));
c93bbbef 1476 deal_with_unplugged_cpus();
296af7c9
BS
1477 }
1478
9b0605f9 1479 rcu_unregister_thread();
296af7c9
BS
1480 return NULL;
1481}
1482
b0cb0a66
VP
1483static void *qemu_hax_cpu_thread_fn(void *arg)
1484{
1485 CPUState *cpu = arg;
1486 int r;
b3d3a426 1487
9857c2d2 1488 rcu_register_thread();
b3d3a426 1489 qemu_mutex_lock_iothread();
b0cb0a66 1490 qemu_thread_get_self(cpu->thread);
b0cb0a66
VP
1491
1492 cpu->thread_id = qemu_get_thread_id();
1493 cpu->created = true;
1494 cpu->halted = 0;
1495 current_cpu = cpu;
1496
1497 hax_init_vcpu(cpu);
1498 qemu_cond_signal(&qemu_cpu_cond);
1499
9857c2d2 1500 do {
b0cb0a66
VP
1501 if (cpu_can_run(cpu)) {
1502 r = hax_smp_cpu_exec(cpu);
1503 if (r == EXCP_DEBUG) {
1504 cpu_handle_guest_debug(cpu);
1505 }
1506 }
1507
db08b687 1508 qemu_wait_io_event(cpu);
9857c2d2
PB
1509 } while (!cpu->unplug || cpu_can_run(cpu));
1510 rcu_unregister_thread();
b0cb0a66
VP
1511 return NULL;
1512}
1513
c97d6d2c
SAGDR
1514/* The HVF-specific vCPU thread function. This one should only run when the host
1515 * CPU supports the VMX "unrestricted guest" feature. */
1516static void *qemu_hvf_cpu_thread_fn(void *arg)
1517{
1518 CPUState *cpu = arg;
1519
1520 int r;
1521
1522 assert(hvf_enabled());
1523
1524 rcu_register_thread();
1525
1526 qemu_mutex_lock_iothread();
1527 qemu_thread_get_self(cpu->thread);
1528
1529 cpu->thread_id = qemu_get_thread_id();
1530 cpu->can_do_io = 1;
1531 current_cpu = cpu;
1532
1533 hvf_init_vcpu(cpu);
1534
1535 /* signal CPU creation */
1536 cpu->created = true;
1537 qemu_cond_signal(&qemu_cpu_cond);
1538
1539 do {
1540 if (cpu_can_run(cpu)) {
1541 r = hvf_vcpu_exec(cpu);
1542 if (r == EXCP_DEBUG) {
1543 cpu_handle_guest_debug(cpu);
1544 }
1545 }
db08b687 1546 qemu_wait_io_event(cpu);
c97d6d2c
SAGDR
1547 } while (!cpu->unplug || cpu_can_run(cpu));
1548
1549 hvf_vcpu_destroy(cpu);
1550 cpu->created = false;
1551 qemu_cond_signal(&qemu_cpu_cond);
1552 qemu_mutex_unlock_iothread();
8178e637 1553 rcu_unregister_thread();
c97d6d2c
SAGDR
1554 return NULL;
1555}
1556
19306806
JTV
1557static void *qemu_whpx_cpu_thread_fn(void *arg)
1558{
1559 CPUState *cpu = arg;
1560 int r;
1561
1562 rcu_register_thread();
1563
1564 qemu_mutex_lock_iothread();
1565 qemu_thread_get_self(cpu->thread);
1566 cpu->thread_id = qemu_get_thread_id();
1567 current_cpu = cpu;
1568
1569 r = whpx_init_vcpu(cpu);
1570 if (r < 0) {
1571 fprintf(stderr, "whpx_init_vcpu failed: %s\n", strerror(-r));
1572 exit(1);
1573 }
1574
1575 /* signal CPU creation */
1576 cpu->created = true;
1577 qemu_cond_signal(&qemu_cpu_cond);
1578
1579 do {
1580 if (cpu_can_run(cpu)) {
1581 r = whpx_vcpu_exec(cpu);
1582 if (r == EXCP_DEBUG) {
1583 cpu_handle_guest_debug(cpu);
1584 }
1585 }
1586 while (cpu_thread_is_idle(cpu)) {
1587 qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
1588 }
1589 qemu_wait_io_event_common(cpu);
1590 } while (!cpu->unplug || cpu_can_run(cpu));
1591
1592 whpx_destroy_vcpu(cpu);
1593 cpu->created = false;
1594 qemu_cond_signal(&qemu_cpu_cond);
1595 qemu_mutex_unlock_iothread();
1596 rcu_unregister_thread();
c97d6d2c
SAGDR
1597 return NULL;
1598}
1599
b0cb0a66
VP
1600#ifdef _WIN32
1601static void CALLBACK dummy_apc_func(ULONG_PTR unused)
1602{
1603}
1604#endif
1605
37257942
AB
1606/* Multi-threaded TCG
1607 *
1608 * In the multi-threaded case each vCPU has its own thread. The TLS
1609 * variable current_cpu can be used deep in the code to find the
1610 * current CPUState for a given thread.
1611 */
1612
1613static void *qemu_tcg_cpu_thread_fn(void *arg)
1614{
1615 CPUState *cpu = arg;
1616
bf51c720
AB
1617 g_assert(!use_icount);
1618
37257942 1619 rcu_register_thread();
3468b59e 1620 tcg_register_thread();
37257942
AB
1621
1622 qemu_mutex_lock_iothread();
1623 qemu_thread_get_self(cpu->thread);
1624
1625 cpu->thread_id = qemu_get_thread_id();
1626 cpu->created = true;
1627 cpu->can_do_io = 1;
1628 current_cpu = cpu;
1629 qemu_cond_signal(&qemu_cpu_cond);
1630
1631 /* process any pending work */
1632 cpu->exit_request = 1;
1633
1634 while (1) {
1635 if (cpu_can_run(cpu)) {
1636 int r;
1637 r = tcg_cpu_exec(cpu);
1638 switch (r) {
1639 case EXCP_DEBUG:
1640 cpu_handle_guest_debug(cpu);
1641 break;
1642 case EXCP_HALTED:
1643 /* during start-up the vCPU is reset and the thread is
1644 * kicked several times. If we don't ensure we go back
1645 * to sleep in the halted state we won't cleanly
1646 * start-up when the vCPU is enabled.
1647 *
1648 * cpu->halted should ensure we sleep in wait_io_event
1649 */
1650 g_assert(cpu->halted);
1651 break;
08e73c48
PK
1652 case EXCP_ATOMIC:
1653 qemu_mutex_unlock_iothread();
1654 cpu_exec_step_atomic(cpu);
1655 qemu_mutex_lock_iothread();
37257942
AB
1656 default:
1657 /* Ignore everything else? */
1658 break;
1659 }
1660 }
1661
37257942 1662 atomic_mb_set(&cpu->exit_request, 0);
db08b687 1663 qemu_wait_io_event(cpu);
9b0605f9 1664 } while (!cpu->unplug || cpu_can_run(cpu));
37257942 1665
9b0605f9
PB
1666 qemu_tcg_destroy_vcpu(cpu);
1667 cpu->created = false;
1668 qemu_cond_signal(&qemu_cpu_cond);
1669 qemu_mutex_unlock_iothread();
1670 rcu_unregister_thread();
37257942
AB
1671 return NULL;
1672}
1673
2ff09a40 1674static void qemu_cpu_kick_thread(CPUState *cpu)
cc015e9a
PB
1675{
1676#ifndef _WIN32
1677 int err;
1678
e0c38211
PB
1679 if (cpu->thread_kicked) {
1680 return;
9102deda 1681 }
e0c38211 1682 cpu->thread_kicked = true;
814e612e 1683 err = pthread_kill(cpu->thread->thread, SIG_IPI);
cc015e9a
PB
1684 if (err) {
1685 fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
1686 exit(1);
1687 }
1688#else /* _WIN32 */
b0cb0a66 1689 if (!qemu_cpu_is_self(cpu)) {
19306806
JTV
1690 if (whpx_enabled()) {
1691 whpx_vcpu_kick(cpu);
1692 } else if (!QueueUserAPC(dummy_apc_func, cpu->hThread, 0)) {
b0cb0a66
VP
1693 fprintf(stderr, "%s: QueueUserAPC failed with error %lu\n",
1694 __func__, GetLastError());
1695 exit(1);
1696 }
1697 }
e0c38211
PB
1698#endif
1699}
ed9164a3 1700
c08d7424 1701void qemu_cpu_kick(CPUState *cpu)
296af7c9 1702{
f5c121b8 1703 qemu_cond_broadcast(cpu->halt_cond);
e0c38211 1704 if (tcg_enabled()) {
791158d9 1705 cpu_exit(cpu);
37257942 1706 /* NOP unless doing single-thread RR */
791158d9 1707 qemu_cpu_kick_rr_cpu();
e0c38211 1708 } else {
b0cb0a66
VP
1709 if (hax_enabled()) {
1710 /*
1711 * FIXME: race condition with the exit_request check in
1712 * hax_vcpu_hax_exec
1713 */
1714 cpu->exit_request = 1;
1715 }
e0c38211
PB
1716 qemu_cpu_kick_thread(cpu);
1717 }
296af7c9
BS
1718}
1719
46d62fac 1720void qemu_cpu_kick_self(void)
296af7c9 1721{
4917cf44 1722 assert(current_cpu);
9102deda 1723 qemu_cpu_kick_thread(current_cpu);
296af7c9
BS
1724}
1725
60e82579 1726bool qemu_cpu_is_self(CPUState *cpu)
296af7c9 1727{
814e612e 1728 return qemu_thread_is_self(cpu->thread);
296af7c9
BS
1729}
1730
79e2b9ae 1731bool qemu_in_vcpu_thread(void)
aa723c23 1732{
4917cf44 1733 return current_cpu && qemu_cpu_is_self(current_cpu);
aa723c23
JQ
1734}
1735
afbe7053
PB
1736static __thread bool iothread_locked = false;
1737
1738bool qemu_mutex_iothread_locked(void)
1739{
1740 return iothread_locked;
1741}
1742
296af7c9
BS
1743void qemu_mutex_lock_iothread(void)
1744{
8d04fb55
JK
1745 g_assert(!qemu_mutex_iothread_locked());
1746 qemu_mutex_lock(&qemu_global_mutex);
afbe7053 1747 iothread_locked = true;
296af7c9
BS
1748}
1749
1750void qemu_mutex_unlock_iothread(void)
1751{
8d04fb55 1752 g_assert(qemu_mutex_iothread_locked());
afbe7053 1753 iothread_locked = false;
296af7c9
BS
1754 qemu_mutex_unlock(&qemu_global_mutex);
1755}
1756
e8faee06 1757static bool all_vcpus_paused(void)
296af7c9 1758{
bdc44640 1759 CPUState *cpu;
296af7c9 1760
bdc44640 1761 CPU_FOREACH(cpu) {
182735ef 1762 if (!cpu->stopped) {
e8faee06 1763 return false;
0ab07c62 1764 }
296af7c9
BS
1765 }
1766
e8faee06 1767 return true;
296af7c9
BS
1768}
1769
1770void pause_all_vcpus(void)
1771{
bdc44640 1772 CPUState *cpu;
296af7c9 1773
40daca54 1774 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, false);
bdc44640 1775 CPU_FOREACH(cpu) {
ebd05fea
DH
1776 if (qemu_cpu_is_self(cpu)) {
1777 qemu_cpu_stop(cpu, true);
1778 } else {
1779 cpu->stop = true;
1780 qemu_cpu_kick(cpu);
1781 }
d798e974
JK
1782 }
1783
296af7c9 1784 while (!all_vcpus_paused()) {
be7d6c57 1785 qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
bdc44640 1786 CPU_FOREACH(cpu) {
182735ef 1787 qemu_cpu_kick(cpu);
296af7c9
BS
1788 }
1789 }
1790}
1791
2993683b
IM
1792void cpu_resume(CPUState *cpu)
1793{
1794 cpu->stop = false;
1795 cpu->stopped = false;
1796 qemu_cpu_kick(cpu);
1797}
1798
296af7c9
BS
1799void resume_all_vcpus(void)
1800{
bdc44640 1801 CPUState *cpu;
296af7c9 1802
40daca54 1803 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true);
bdc44640 1804 CPU_FOREACH(cpu) {
182735ef 1805 cpu_resume(cpu);
296af7c9
BS
1806 }
1807}
1808
dbadee4f 1809void cpu_remove_sync(CPUState *cpu)
4c055ab5
GZ
1810{
1811 cpu->stop = true;
1812 cpu->unplug = true;
1813 qemu_cpu_kick(cpu);
dbadee4f
PB
1814 qemu_mutex_unlock_iothread();
1815 qemu_thread_join(cpu->thread);
1816 qemu_mutex_lock_iothread();
2c579042
BR
1817}
1818
4900116e
DDAG
1819/* For temporary buffers for forming a name */
1820#define VCPU_THREAD_NAME_SIZE 16
1821
e5ab30a2 1822static void qemu_tcg_init_vcpu(CPUState *cpu)
296af7c9 1823{
4900116e 1824 char thread_name[VCPU_THREAD_NAME_SIZE];
37257942
AB
1825 static QemuCond *single_tcg_halt_cond;
1826 static QemuThread *single_tcg_cpu_thread;
e8feb96f
EC
1827 static int tcg_region_inited;
1828
1829 /*
1830 * Initialize TCG regions--once. Now is a good time, because:
1831 * (1) TCG's init context, prologue and target globals have been set up.
1832 * (2) qemu_tcg_mttcg_enabled() works now (TCG init code runs before the
1833 * -accel flag is processed, so the check doesn't work then).
1834 */
1835 if (!tcg_region_inited) {
1836 tcg_region_inited = 1;
1837 tcg_region_init();
1838 }
4900116e 1839
37257942 1840 if (qemu_tcg_mttcg_enabled() || !single_tcg_cpu_thread) {
814e612e 1841 cpu->thread = g_malloc0(sizeof(QemuThread));
f5c121b8
AF
1842 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1843 qemu_cond_init(cpu->halt_cond);
37257942
AB
1844
1845 if (qemu_tcg_mttcg_enabled()) {
1846 /* create a thread per vCPU with TCG (MTTCG) */
1847 parallel_cpus = true;
1848 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/TCG",
4900116e 1849 cpu->cpu_index);
37257942
AB
1850
1851 qemu_thread_create(cpu->thread, thread_name, qemu_tcg_cpu_thread_fn,
1852 cpu, QEMU_THREAD_JOINABLE);
1853
1854 } else {
1855 /* share a single thread for all cpus with TCG */
1856 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "ALL CPUs/TCG");
1857 qemu_thread_create(cpu->thread, thread_name,
1858 qemu_tcg_rr_cpu_thread_fn,
1859 cpu, QEMU_THREAD_JOINABLE);
1860
1861 single_tcg_halt_cond = cpu->halt_cond;
1862 single_tcg_cpu_thread = cpu->thread;
1863 }
1ecf47bf 1864#ifdef _WIN32
814e612e 1865 cpu->hThread = qemu_thread_get_handle(cpu->thread);
1ecf47bf 1866#endif
296af7c9 1867 } else {
37257942
AB
1868 /* For non-MTTCG cases we share the thread */
1869 cpu->thread = single_tcg_cpu_thread;
1870 cpu->halt_cond = single_tcg_halt_cond;
a342173a
DH
1871 cpu->thread_id = first_cpu->thread_id;
1872 cpu->can_do_io = 1;
1873 cpu->created = true;
296af7c9
BS
1874 }
1875}
1876
b0cb0a66
VP
1877static void qemu_hax_start_vcpu(CPUState *cpu)
1878{
1879 char thread_name[VCPU_THREAD_NAME_SIZE];
1880
1881 cpu->thread = g_malloc0(sizeof(QemuThread));
1882 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1883 qemu_cond_init(cpu->halt_cond);
1884
1885 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/HAX",
1886 cpu->cpu_index);
1887 qemu_thread_create(cpu->thread, thread_name, qemu_hax_cpu_thread_fn,
1888 cpu, QEMU_THREAD_JOINABLE);
1889#ifdef _WIN32
1890 cpu->hThread = qemu_thread_get_handle(cpu->thread);
1891#endif
b0cb0a66
VP
1892}
1893
48a106bd 1894static void qemu_kvm_start_vcpu(CPUState *cpu)
296af7c9 1895{
4900116e
DDAG
1896 char thread_name[VCPU_THREAD_NAME_SIZE];
1897
814e612e 1898 cpu->thread = g_malloc0(sizeof(QemuThread));
f5c121b8
AF
1899 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1900 qemu_cond_init(cpu->halt_cond);
4900116e
DDAG
1901 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/KVM",
1902 cpu->cpu_index);
1903 qemu_thread_create(cpu->thread, thread_name, qemu_kvm_cpu_thread_fn,
1904 cpu, QEMU_THREAD_JOINABLE);
296af7c9
BS
1905}
1906
c97d6d2c
SAGDR
1907static void qemu_hvf_start_vcpu(CPUState *cpu)
1908{
1909 char thread_name[VCPU_THREAD_NAME_SIZE];
1910
1911 /* HVF currently does not support TCG, and only runs in
1912 * unrestricted-guest mode. */
1913 assert(hvf_enabled());
1914
1915 cpu->thread = g_malloc0(sizeof(QemuThread));
1916 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1917 qemu_cond_init(cpu->halt_cond);
1918
1919 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/HVF",
1920 cpu->cpu_index);
1921 qemu_thread_create(cpu->thread, thread_name, qemu_hvf_cpu_thread_fn,
1922 cpu, QEMU_THREAD_JOINABLE);
c97d6d2c
SAGDR
1923}
1924
19306806
JTV
1925static void qemu_whpx_start_vcpu(CPUState *cpu)
1926{
1927 char thread_name[VCPU_THREAD_NAME_SIZE];
1928
1929 cpu->thread = g_malloc0(sizeof(QemuThread));
1930 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1931 qemu_cond_init(cpu->halt_cond);
1932 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/WHPX",
1933 cpu->cpu_index);
1934 qemu_thread_create(cpu->thread, thread_name, qemu_whpx_cpu_thread_fn,
1935 cpu, QEMU_THREAD_JOINABLE);
1936#ifdef _WIN32
1937 cpu->hThread = qemu_thread_get_handle(cpu->thread);
1938#endif
19306806
JTV
1939}
1940
10a9021d 1941static void qemu_dummy_start_vcpu(CPUState *cpu)
c7f0f3b1 1942{
4900116e
DDAG
1943 char thread_name[VCPU_THREAD_NAME_SIZE];
1944
814e612e 1945 cpu->thread = g_malloc0(sizeof(QemuThread));
f5c121b8
AF
1946 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1947 qemu_cond_init(cpu->halt_cond);
4900116e
DDAG
1948 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/DUMMY",
1949 cpu->cpu_index);
1950 qemu_thread_create(cpu->thread, thread_name, qemu_dummy_cpu_thread_fn, cpu,
c7f0f3b1 1951 QEMU_THREAD_JOINABLE);
c7f0f3b1
AL
1952}
1953
c643bed9 1954void qemu_init_vcpu(CPUState *cpu)
296af7c9 1955{
ce3960eb
AF
1956 cpu->nr_cores = smp_cores;
1957 cpu->nr_threads = smp_threads;
f324e766 1958 cpu->stopped = true;
56943e8c
PM
1959
1960 if (!cpu->as) {
1961 /* If the target cpu hasn't set up any address spaces itself,
1962 * give it the default one.
1963 */
12ebc9a7 1964 cpu->num_ases = 1;
80ceb07a 1965 cpu_address_space_init(cpu, 0, "cpu-memory", cpu->memory);
56943e8c
PM
1966 }
1967
0ab07c62 1968 if (kvm_enabled()) {
48a106bd 1969 qemu_kvm_start_vcpu(cpu);
b0cb0a66
VP
1970 } else if (hax_enabled()) {
1971 qemu_hax_start_vcpu(cpu);
c97d6d2c
SAGDR
1972 } else if (hvf_enabled()) {
1973 qemu_hvf_start_vcpu(cpu);
c7f0f3b1 1974 } else if (tcg_enabled()) {
e5ab30a2 1975 qemu_tcg_init_vcpu(cpu);
19306806
JTV
1976 } else if (whpx_enabled()) {
1977 qemu_whpx_start_vcpu(cpu);
c7f0f3b1 1978 } else {
10a9021d 1979 qemu_dummy_start_vcpu(cpu);
0ab07c62 1980 }
81e96311
DH
1981
1982 while (!cpu->created) {
1983 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1984 }
296af7c9
BS
1985}
1986
b4a3d965 1987void cpu_stop_current(void)
296af7c9 1988{
4917cf44 1989 if (current_cpu) {
ebd05fea 1990 qemu_cpu_stop(current_cpu, true);
b4a3d965 1991 }
296af7c9
BS
1992}
1993
56983463 1994int vm_stop(RunState state)
296af7c9 1995{
aa723c23 1996 if (qemu_in_vcpu_thread()) {
74892d24 1997 qemu_system_vmstop_request_prepare();
1dfb4dd9 1998 qemu_system_vmstop_request(state);
296af7c9
BS
1999 /*
2000 * FIXME: should not return to device code in case
2001 * vm_stop() has been requested.
2002 */
b4a3d965 2003 cpu_stop_current();
56983463 2004 return 0;
296af7c9 2005 }
56983463 2006
4486e89c 2007 return do_vm_stop(state, true);
296af7c9
BS
2008}
2009
2d76e823
CI
2010/**
2011 * Prepare for (re)starting the VM.
2012 * Returns -1 if the vCPUs are not to be restarted (e.g. if they are already
2013 * running or in case of an error condition), 0 otherwise.
2014 */
2015int vm_prepare_start(void)
2016{
2017 RunState requested;
2018 int res = 0;
2019
2020 qemu_vmstop_requested(&requested);
2021 if (runstate_is_running() && requested == RUN_STATE__MAX) {
2022 return -1;
2023 }
2024
2025 /* Ensure that a STOP/RESUME pair of events is emitted if a
2026 * vmstop request was pending. The BLOCK_IO_ERROR event, for
2027 * example, according to documentation is always followed by
2028 * the STOP event.
2029 */
2030 if (runstate_is_running()) {
2031 qapi_event_send_stop(&error_abort);
2032 res = -1;
2033 } else {
2034 replay_enable_events();
2035 cpu_enable_ticks();
2036 runstate_set(RUN_STATE_RUNNING);
2037 vm_state_notify(1, RUN_STATE_RUNNING);
2038 }
2039
2040 /* We are sending this now, but the CPUs will be resumed shortly later */
2041 qapi_event_send_resume(&error_abort);
2042 return res;
2043}
2044
2045void vm_start(void)
2046{
2047 if (!vm_prepare_start()) {
2048 resume_all_vcpus();
2049 }
2050}
2051
8a9236f1
LC
2052/* does a state transition even if the VM is already stopped,
2053 current state is forgotten forever */
56983463 2054int vm_stop_force_state(RunState state)
8a9236f1
LC
2055{
2056 if (runstate_is_running()) {
56983463 2057 return vm_stop(state);
8a9236f1
LC
2058 } else {
2059 runstate_set(state);
b2780d32
WC
2060
2061 bdrv_drain_all();
594a45ce
KW
2062 /* Make sure to return an error if the flush in a previous vm_stop()
2063 * failed. */
22af08ea 2064 return bdrv_flush_all();
8a9236f1
LC
2065 }
2066}
2067
9a78eead 2068void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
262353cb
BS
2069{
2070 /* XXX: implement xxx_cpu_list for targets that still miss it */
e916cbf8
PM
2071#if defined(cpu_list)
2072 cpu_list(f, cpu_fprintf);
262353cb
BS
2073#endif
2074}
de0b36b6
LC
2075
2076CpuInfoList *qmp_query_cpus(Error **errp)
2077{
afed5a5a
IM
2078 MachineState *ms = MACHINE(qdev_get_machine());
2079 MachineClass *mc = MACHINE_GET_CLASS(ms);
de0b36b6 2080 CpuInfoList *head = NULL, *cur_item = NULL;
182735ef 2081 CPUState *cpu;
de0b36b6 2082
bdc44640 2083 CPU_FOREACH(cpu) {
de0b36b6 2084 CpuInfoList *info;
182735ef
AF
2085#if defined(TARGET_I386)
2086 X86CPU *x86_cpu = X86_CPU(cpu);
2087 CPUX86State *env = &x86_cpu->env;
2088#elif defined(TARGET_PPC)
2089 PowerPCCPU *ppc_cpu = POWERPC_CPU(cpu);
2090 CPUPPCState *env = &ppc_cpu->env;
2091#elif defined(TARGET_SPARC)
2092 SPARCCPU *sparc_cpu = SPARC_CPU(cpu);
2093 CPUSPARCState *env = &sparc_cpu->env;
25fa194b
MC
2094#elif defined(TARGET_RISCV)
2095 RISCVCPU *riscv_cpu = RISCV_CPU(cpu);
2096 CPURISCVState *env = &riscv_cpu->env;
182735ef
AF
2097#elif defined(TARGET_MIPS)
2098 MIPSCPU *mips_cpu = MIPS_CPU(cpu);
2099 CPUMIPSState *env = &mips_cpu->env;
48e06fe0
BK
2100#elif defined(TARGET_TRICORE)
2101 TriCoreCPU *tricore_cpu = TRICORE_CPU(cpu);
2102 CPUTriCoreState *env = &tricore_cpu->env;
9d0306df
VM
2103#elif defined(TARGET_S390X)
2104 S390CPU *s390_cpu = S390_CPU(cpu);
2105 CPUS390XState *env = &s390_cpu->env;
182735ef 2106#endif
de0b36b6 2107
cb446eca 2108 cpu_synchronize_state(cpu);
de0b36b6
LC
2109
2110 info = g_malloc0(sizeof(*info));
2111 info->value = g_malloc0(sizeof(*info->value));
55e5c285 2112 info->value->CPU = cpu->cpu_index;
182735ef 2113 info->value->current = (cpu == first_cpu);
259186a7 2114 info->value->halted = cpu->halted;
58f88d4b 2115 info->value->qom_path = object_get_canonical_path(OBJECT(cpu));
9f09e18a 2116 info->value->thread_id = cpu->thread_id;
de0b36b6 2117#if defined(TARGET_I386)
86f4b687 2118 info->value->arch = CPU_INFO_ARCH_X86;
544a3731 2119 info->value->u.x86.pc = env->eip + env->segs[R_CS].base;
de0b36b6 2120#elif defined(TARGET_PPC)
86f4b687 2121 info->value->arch = CPU_INFO_ARCH_PPC;
544a3731 2122 info->value->u.ppc.nip = env->nip;
de0b36b6 2123#elif defined(TARGET_SPARC)
86f4b687 2124 info->value->arch = CPU_INFO_ARCH_SPARC;
544a3731
EB
2125 info->value->u.q_sparc.pc = env->pc;
2126 info->value->u.q_sparc.npc = env->npc;
de0b36b6 2127#elif defined(TARGET_MIPS)
86f4b687 2128 info->value->arch = CPU_INFO_ARCH_MIPS;
544a3731 2129 info->value->u.q_mips.PC = env->active_tc.PC;
48e06fe0 2130#elif defined(TARGET_TRICORE)
86f4b687 2131 info->value->arch = CPU_INFO_ARCH_TRICORE;
544a3731 2132 info->value->u.tricore.PC = env->PC;
9d0306df
VM
2133#elif defined(TARGET_S390X)
2134 info->value->arch = CPU_INFO_ARCH_S390;
2135 info->value->u.s390.cpu_state = env->cpu_state;
25fa194b
MC
2136#elif defined(TARGET_RISCV)
2137 info->value->arch = CPU_INFO_ARCH_RISCV;
2138 info->value->u.riscv.pc = env->pc;
86f4b687
EB
2139#else
2140 info->value->arch = CPU_INFO_ARCH_OTHER;
de0b36b6 2141#endif
afed5a5a
IM
2142 info->value->has_props = !!mc->cpu_index_to_instance_props;
2143 if (info->value->has_props) {
2144 CpuInstanceProperties *props;
2145 props = g_malloc0(sizeof(*props));
2146 *props = mc->cpu_index_to_instance_props(ms, cpu->cpu_index);
2147 info->value->props = props;
2148 }
de0b36b6
LC
2149
2150 /* XXX: waiting for the qapi to support GSList */
2151 if (!cur_item) {
2152 head = cur_item = info;
2153 } else {
2154 cur_item->next = info;
2155 cur_item = info;
2156 }
2157 }
2158
2159 return head;
2160}
0cfd6a9a 2161
ce74ee3d
LC
2162/*
2163 * fast means: we NEVER interrupt vCPU threads to retrieve
2164 * information from KVM.
2165 */
2166CpuInfoFastList *qmp_query_cpus_fast(Error **errp)
2167{
2168 MachineState *ms = MACHINE(qdev_get_machine());
2169 MachineClass *mc = MACHINE_GET_CLASS(ms);
2170 CpuInfoFastList *head = NULL, *cur_item = NULL;
2171 CPUState *cpu;
ca230ff3
VM
2172#if defined(TARGET_S390X)
2173 S390CPU *s390_cpu;
2174 CPUS390XState *env;
2175#endif
ce74ee3d
LC
2176
2177 CPU_FOREACH(cpu) {
2178 CpuInfoFastList *info = g_malloc0(sizeof(*info));
2179 info->value = g_malloc0(sizeof(*info->value));
2180
2181 info->value->cpu_index = cpu->cpu_index;
2182 info->value->qom_path = object_get_canonical_path(OBJECT(cpu));
2183 info->value->thread_id = cpu->thread_id;
2184
2185 info->value->has_props = !!mc->cpu_index_to_instance_props;
2186 if (info->value->has_props) {
2187 CpuInstanceProperties *props;
2188 props = g_malloc0(sizeof(*props));
2189 *props = mc->cpu_index_to_instance_props(ms, cpu->cpu_index);
2190 info->value->props = props;
2191 }
2192
ca230ff3
VM
2193#if defined(TARGET_S390X)
2194 s390_cpu = S390_CPU(cpu);
2195 env = &s390_cpu->env;
2196 info->value->arch = CPU_INFO_ARCH_S390;
2197 info->value->u.s390.cpu_state = env->cpu_state;
2198#endif
ce74ee3d
LC
2199 if (!cur_item) {
2200 head = cur_item = info;
2201 } else {
2202 cur_item->next = info;
2203 cur_item = info;
2204 }
2205 }
2206
2207 return head;
2208}
2209
0cfd6a9a
LC
2210void qmp_memsave(int64_t addr, int64_t size, const char *filename,
2211 bool has_cpu, int64_t cpu_index, Error **errp)
2212{
2213 FILE *f;
2214 uint32_t l;
55e5c285 2215 CPUState *cpu;
0cfd6a9a 2216 uint8_t buf[1024];
0dc9daf0 2217 int64_t orig_addr = addr, orig_size = size;
0cfd6a9a
LC
2218
2219 if (!has_cpu) {
2220 cpu_index = 0;
2221 }
2222
151d1322
AF
2223 cpu = qemu_get_cpu(cpu_index);
2224 if (cpu == NULL) {
c6bd8c70
MA
2225 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cpu-index",
2226 "a CPU number");
0cfd6a9a
LC
2227 return;
2228 }
2229
2230 f = fopen(filename, "wb");
2231 if (!f) {
618da851 2232 error_setg_file_open(errp, errno, filename);
0cfd6a9a
LC
2233 return;
2234 }
2235
2236 while (size != 0) {
2237 l = sizeof(buf);
2238 if (l > size)
2239 l = size;
2f4d0f59 2240 if (cpu_memory_rw_debug(cpu, addr, buf, l, 0) != 0) {
0dc9daf0
BP
2241 error_setg(errp, "Invalid addr 0x%016" PRIx64 "/size %" PRId64
2242 " specified", orig_addr, orig_size);
2f4d0f59
AK
2243 goto exit;
2244 }
0cfd6a9a 2245 if (fwrite(buf, 1, l, f) != l) {
c6bd8c70 2246 error_setg(errp, QERR_IO_ERROR);
0cfd6a9a
LC
2247 goto exit;
2248 }
2249 addr += l;
2250 size -= l;
2251 }
2252
2253exit:
2254 fclose(f);
2255}
6d3962bf
LC
2256
2257void qmp_pmemsave(int64_t addr, int64_t size, const char *filename,
2258 Error **errp)
2259{
2260 FILE *f;
2261 uint32_t l;
2262 uint8_t buf[1024];
2263
2264 f = fopen(filename, "wb");
2265 if (!f) {
618da851 2266 error_setg_file_open(errp, errno, filename);
6d3962bf
LC
2267 return;
2268 }
2269
2270 while (size != 0) {
2271 l = sizeof(buf);
2272 if (l > size)
2273 l = size;
eb6282f2 2274 cpu_physical_memory_read(addr, buf, l);
6d3962bf 2275 if (fwrite(buf, 1, l, f) != l) {
c6bd8c70 2276 error_setg(errp, QERR_IO_ERROR);
6d3962bf
LC
2277 goto exit;
2278 }
2279 addr += l;
2280 size -= l;
2281 }
2282
2283exit:
2284 fclose(f);
2285}
ab49ab5c
LC
2286
2287void qmp_inject_nmi(Error **errp)
2288{
9cb805fd 2289 nmi_monitor_handle(monitor_get_cpu_index(), errp);
ab49ab5c 2290}
27498bef
ST
2291
2292void dump_drift_info(FILE *f, fprintf_function cpu_fprintf)
2293{
2294 if (!use_icount) {
2295 return;
2296 }
2297
2298 cpu_fprintf(f, "Host - Guest clock %"PRIi64" ms\n",
2299 (cpu_get_clock() - cpu_get_icount())/SCALE_MS);
2300 if (icount_align_option) {
2301 cpu_fprintf(f, "Max guest delay %"PRIi64" ms\n", -max_delay/SCALE_MS);
2302 cpu_fprintf(f, "Max guest advance %"PRIi64" ms\n", max_advance/SCALE_MS);
2303 } else {
2304 cpu_fprintf(f, "Max guest delay NA\n");
2305 cpu_fprintf(f, "Max guest advance NA\n");
2306 }
2307}