]> git.proxmox.com Git - mirror_qemu.git/blame - cpus.c
minikconf: do not include variables from MINIKCONF_ARGS in config-all-devices.mak
[mirror_qemu.git] / cpus.c
CommitLineData
296af7c9
BS
1/*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
7b31bbc2 25#include "qemu/osdep.h"
a8d25326 26#include "qemu-common.h"
8d4e9146 27#include "qemu/config-file.h"
33c11879 28#include "cpu.h"
83c9089e 29#include "monitor/monitor.h"
e688df6b 30#include "qapi/error.h"
112ed241 31#include "qapi/qapi-commands-misc.h"
9af23989 32#include "qapi/qapi-events-run-state.h"
a4e15de9 33#include "qapi/qmp/qerror.h"
d49b6836 34#include "qemu/error-report.h"
76c86615 35#include "qemu/qemu-print.h"
9c17d615 36#include "sysemu/sysemu.h"
14a48c1d 37#include "sysemu/tcg.h"
da31d594 38#include "sysemu/block-backend.h"
022c62cb 39#include "exec/gdbstub.h"
9c17d615 40#include "sysemu/dma.h"
b3946626 41#include "sysemu/hw_accel.h"
9c17d615 42#include "sysemu/kvm.h"
b0cb0a66 43#include "sysemu/hax.h"
c97d6d2c 44#include "sysemu/hvf.h"
19306806 45#include "sysemu/whpx.h"
63c91552 46#include "exec/exec-all.h"
296af7c9 47
1de7afc9 48#include "qemu/thread.h"
9c17d615
PB
49#include "sysemu/cpus.h"
50#include "sysemu/qtest.h"
1de7afc9 51#include "qemu/main-loop.h"
922a01a0 52#include "qemu/option.h"
1de7afc9 53#include "qemu/bitmap.h"
cb365646 54#include "qemu/seqlock.h"
9c09a251 55#include "qemu/guest-random.h"
8d4e9146 56#include "tcg.h"
9cb805fd 57#include "hw/nmi.h"
8b427044 58#include "sysemu/replay.h"
afed5a5a 59#include "hw/boards.h"
0ff0fc19 60
6d9cb73c
JK
61#ifdef CONFIG_LINUX
62
63#include <sys/prctl.h>
64
c0532a76
MT
65#ifndef PR_MCE_KILL
66#define PR_MCE_KILL 33
67#endif
68
6d9cb73c
JK
69#ifndef PR_MCE_KILL_SET
70#define PR_MCE_KILL_SET 1
71#endif
72
73#ifndef PR_MCE_KILL_EARLY
74#define PR_MCE_KILL_EARLY 1
75#endif
76
77#endif /* CONFIG_LINUX */
78
27498bef
ST
79int64_t max_delay;
80int64_t max_advance;
296af7c9 81
2adcc85d
JH
82/* vcpu throttling controls */
83static QEMUTimer *throttle_timer;
84static unsigned int throttle_percentage;
85
86#define CPU_THROTTLE_PCT_MIN 1
87#define CPU_THROTTLE_PCT_MAX 99
88#define CPU_THROTTLE_TIMESLICE_NS 10000000
89
321bc0b2
TC
90bool cpu_is_stopped(CPUState *cpu)
91{
92 return cpu->stopped || !runstate_is_running();
93}
94
a98ae1d8 95static bool cpu_thread_is_idle(CPUState *cpu)
ac873f1e 96{
c64ca814 97 if (cpu->stop || cpu->queued_work_first) {
ac873f1e
PM
98 return false;
99 }
321bc0b2 100 if (cpu_is_stopped(cpu)) {
ac873f1e
PM
101 return true;
102 }
8c2e1b00 103 if (!cpu->halted || cpu_has_work(cpu) ||
215e79c0 104 kvm_halt_in_kernel()) {
ac873f1e
PM
105 return false;
106 }
107 return true;
108}
109
110static bool all_cpu_threads_idle(void)
111{
182735ef 112 CPUState *cpu;
ac873f1e 113
bdc44640 114 CPU_FOREACH(cpu) {
182735ef 115 if (!cpu_thread_is_idle(cpu)) {
ac873f1e
PM
116 return false;
117 }
118 }
119 return true;
120}
121
946fb27c
PB
122/***********************************************************/
123/* guest cycle counter */
124
a3270e19
PB
125/* Protected by TimersState seqlock */
126
5045e9d9 127static bool icount_sleep = true;
946fb27c
PB
128/* Arbitrarily pick 1MIPS as the minimum allowable speed. */
129#define MAX_ICOUNT_SHIFT 10
a3270e19 130
946fb27c 131typedef struct TimersState {
cb365646 132 /* Protected by BQL. */
946fb27c
PB
133 int64_t cpu_ticks_prev;
134 int64_t cpu_ticks_offset;
cb365646 135
94377115
PB
136 /* Protect fields that can be respectively read outside the
137 * BQL, and written from multiple threads.
cb365646
LPF
138 */
139 QemuSeqLock vm_clock_seqlock;
94377115
PB
140 QemuSpin vm_clock_lock;
141
142 int16_t cpu_ticks_enabled;
c96778bb 143
c1ff073c 144 /* Conversion factor from emulated instructions to virtual clock ticks. */
94377115
PB
145 int16_t icount_time_shift;
146
c96778bb
FK
147 /* Compensate for varying guest execution speed. */
148 int64_t qemu_icount_bias;
94377115
PB
149
150 int64_t vm_clock_warp_start;
151 int64_t cpu_clock_offset;
152
c96778bb
FK
153 /* Only written by TCG thread */
154 int64_t qemu_icount;
94377115 155
b39e3f34 156 /* for adjusting icount */
b39e3f34
PD
157 QEMUTimer *icount_rt_timer;
158 QEMUTimer *icount_vm_timer;
159 QEMUTimer *icount_warp_timer;
946fb27c
PB
160} TimersState;
161
d9cd4007 162static TimersState timers_state;
8d4e9146
FK
163bool mttcg_enabled;
164
165/*
166 * We default to false if we know other options have been enabled
167 * which are currently incompatible with MTTCG. Otherwise when each
168 * guest (target) has been updated to support:
169 * - atomic instructions
170 * - memory ordering primitives (barriers)
171 * they can set the appropriate CONFIG flags in ${target}-softmmu.mak
172 *
173 * Once a guest architecture has been converted to the new primitives
174 * there are two remaining limitations to check.
175 *
176 * - The guest can't be oversized (e.g. 64 bit guest on 32 bit host)
177 * - The host must have a stronger memory order than the guest
178 *
179 * It may be possible in future to support strong guests on weak hosts
180 * but that will require tagging all load/stores in a guest with their
181 * implicit memory order requirements which would likely slow things
182 * down a lot.
183 */
184
185static bool check_tcg_memory_orders_compatible(void)
186{
187#if defined(TCG_GUEST_DEFAULT_MO) && defined(TCG_TARGET_DEFAULT_MO)
188 return (TCG_GUEST_DEFAULT_MO & ~TCG_TARGET_DEFAULT_MO) == 0;
189#else
190 return false;
191#endif
192}
193
194static bool default_mttcg_enabled(void)
195{
83fd9629 196 if (use_icount || TCG_OVERSIZED_GUEST) {
8d4e9146
FK
197 return false;
198 } else {
199#ifdef TARGET_SUPPORTS_MTTCG
200 return check_tcg_memory_orders_compatible();
201#else
202 return false;
203#endif
204 }
205}
206
207void qemu_tcg_configure(QemuOpts *opts, Error **errp)
208{
209 const char *t = qemu_opt_get(opts, "thread");
210 if (t) {
211 if (strcmp(t, "multi") == 0) {
212 if (TCG_OVERSIZED_GUEST) {
213 error_setg(errp, "No MTTCG when guest word size > hosts");
83fd9629
AB
214 } else if (use_icount) {
215 error_setg(errp, "No MTTCG when icount is enabled");
8d4e9146 216 } else {
86953503 217#ifndef TARGET_SUPPORTS_MTTCG
0765691e
MA
218 warn_report("Guest not yet converted to MTTCG - "
219 "you may get unexpected results");
c34c7620 220#endif
8d4e9146 221 if (!check_tcg_memory_orders_compatible()) {
0765691e
MA
222 warn_report("Guest expects a stronger memory ordering "
223 "than the host provides");
8cfef892 224 error_printf("This may cause strange/hard to debug errors\n");
8d4e9146
FK
225 }
226 mttcg_enabled = true;
227 }
228 } else if (strcmp(t, "single") == 0) {
229 mttcg_enabled = false;
230 } else {
231 error_setg(errp, "Invalid 'thread' setting %s", t);
232 }
233 } else {
234 mttcg_enabled = default_mttcg_enabled();
235 }
236}
946fb27c 237
e4cd9657
AB
238/* The current number of executed instructions is based on what we
239 * originally budgeted minus the current state of the decrementing
240 * icount counters in extra/u16.low.
241 */
242static int64_t cpu_get_icount_executed(CPUState *cpu)
243{
5e140196
RH
244 return (cpu->icount_budget -
245 (cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra));
e4cd9657
AB
246}
247
512d3c80
AB
248/*
249 * Update the global shared timer_state.qemu_icount to take into
250 * account executed instructions. This is done by the TCG vCPU
251 * thread so the main-loop can see time has moved forward.
252 */
9b4e6f49 253static void cpu_update_icount_locked(CPUState *cpu)
512d3c80
AB
254{
255 int64_t executed = cpu_get_icount_executed(cpu);
256 cpu->icount_budget -= executed;
257
38adcb6e
EC
258 atomic_set_i64(&timers_state.qemu_icount,
259 timers_state.qemu_icount + executed);
9b4e6f49
PB
260}
261
262/*
263 * Update the global shared timer_state.qemu_icount to take into
264 * account executed instructions. This is done by the TCG vCPU
265 * thread so the main-loop can see time has moved forward.
266 */
267void cpu_update_icount(CPUState *cpu)
268{
269 seqlock_write_lock(&timers_state.vm_clock_seqlock,
270 &timers_state.vm_clock_lock);
271 cpu_update_icount_locked(cpu);
94377115
PB
272 seqlock_write_unlock(&timers_state.vm_clock_seqlock,
273 &timers_state.vm_clock_lock);
512d3c80
AB
274}
275
c1ff073c 276static int64_t cpu_get_icount_raw_locked(void)
946fb27c 277{
4917cf44 278 CPUState *cpu = current_cpu;
946fb27c 279
243c5f77 280 if (cpu && cpu->running) {
414b15c9 281 if (!cpu->can_do_io) {
493d89bf 282 error_report("Bad icount read");
2a62914b 283 exit(1);
946fb27c 284 }
e4cd9657 285 /* Take into account what has run */
9b4e6f49 286 cpu_update_icount_locked(cpu);
946fb27c 287 }
38adcb6e
EC
288 /* The read is protected by the seqlock, but needs atomic64 to avoid UB */
289 return atomic_read_i64(&timers_state.qemu_icount);
2a62914b
PD
290}
291
2a62914b
PD
292static int64_t cpu_get_icount_locked(void)
293{
c1ff073c 294 int64_t icount = cpu_get_icount_raw_locked();
c97595d1
EC
295 return atomic_read_i64(&timers_state.qemu_icount_bias) +
296 cpu_icount_to_ns(icount);
c1ff073c
PB
297}
298
299int64_t cpu_get_icount_raw(void)
300{
301 int64_t icount;
302 unsigned start;
303
304 do {
305 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
306 icount = cpu_get_icount_raw_locked();
307 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
308
309 return icount;
946fb27c
PB
310}
311
c1ff073c 312/* Return the virtual CPU time, based on the instruction counter. */
17a15f1b
PB
313int64_t cpu_get_icount(void)
314{
315 int64_t icount;
316 unsigned start;
317
318 do {
319 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
320 icount = cpu_get_icount_locked();
321 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
322
323 return icount;
324}
325
3f031313
FK
326int64_t cpu_icount_to_ns(int64_t icount)
327{
c1ff073c 328 return icount << atomic_read(&timers_state.icount_time_shift);
3f031313
FK
329}
330
f2a4ad6d
PB
331static int64_t cpu_get_ticks_locked(void)
332{
333 int64_t ticks = timers_state.cpu_ticks_offset;
334 if (timers_state.cpu_ticks_enabled) {
335 ticks += cpu_get_host_ticks();
336 }
337
338 if (timers_state.cpu_ticks_prev > ticks) {
339 /* Non increasing ticks may happen if the host uses software suspend. */
340 timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
341 ticks = timers_state.cpu_ticks_prev;
342 }
343
344 timers_state.cpu_ticks_prev = ticks;
345 return ticks;
346}
347
d90f3cca
C
348/* return the time elapsed in VM between vm_start and vm_stop. Unless
349 * icount is active, cpu_get_ticks() uses units of the host CPU cycle
350 * counter.
d90f3cca 351 */
946fb27c
PB
352int64_t cpu_get_ticks(void)
353{
5f3e3101
PB
354 int64_t ticks;
355
946fb27c
PB
356 if (use_icount) {
357 return cpu_get_icount();
358 }
5f3e3101 359
f2a4ad6d
PB
360 qemu_spin_lock(&timers_state.vm_clock_lock);
361 ticks = cpu_get_ticks_locked();
362 qemu_spin_unlock(&timers_state.vm_clock_lock);
5f3e3101 363 return ticks;
946fb27c
PB
364}
365
cb365646 366static int64_t cpu_get_clock_locked(void)
946fb27c 367{
1d45cea5 368 int64_t time;
cb365646 369
1d45cea5 370 time = timers_state.cpu_clock_offset;
5f3e3101 371 if (timers_state.cpu_ticks_enabled) {
1d45cea5 372 time += get_clock();
946fb27c 373 }
cb365646 374
1d45cea5 375 return time;
cb365646
LPF
376}
377
d90f3cca 378/* Return the monotonic time elapsed in VM, i.e.,
8212ff86
PM
379 * the time between vm_start and vm_stop
380 */
cb365646
LPF
381int64_t cpu_get_clock(void)
382{
383 int64_t ti;
384 unsigned start;
385
386 do {
387 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
388 ti = cpu_get_clock_locked();
389 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
390
391 return ti;
946fb27c
PB
392}
393
cb365646 394/* enable cpu_get_ticks()
3224e878 395 * Caller must hold BQL which serves as mutex for vm_clock_seqlock.
cb365646 396 */
946fb27c
PB
397void cpu_enable_ticks(void)
398{
94377115
PB
399 seqlock_write_lock(&timers_state.vm_clock_seqlock,
400 &timers_state.vm_clock_lock);
946fb27c 401 if (!timers_state.cpu_ticks_enabled) {
4a7428c5 402 timers_state.cpu_ticks_offset -= cpu_get_host_ticks();
946fb27c
PB
403 timers_state.cpu_clock_offset -= get_clock();
404 timers_state.cpu_ticks_enabled = 1;
405 }
94377115
PB
406 seqlock_write_unlock(&timers_state.vm_clock_seqlock,
407 &timers_state.vm_clock_lock);
946fb27c
PB
408}
409
410/* disable cpu_get_ticks() : the clock is stopped. You must not call
cb365646 411 * cpu_get_ticks() after that.
3224e878 412 * Caller must hold BQL which serves as mutex for vm_clock_seqlock.
cb365646 413 */
946fb27c
PB
414void cpu_disable_ticks(void)
415{
94377115
PB
416 seqlock_write_lock(&timers_state.vm_clock_seqlock,
417 &timers_state.vm_clock_lock);
946fb27c 418 if (timers_state.cpu_ticks_enabled) {
4a7428c5 419 timers_state.cpu_ticks_offset += cpu_get_host_ticks();
cb365646 420 timers_state.cpu_clock_offset = cpu_get_clock_locked();
946fb27c
PB
421 timers_state.cpu_ticks_enabled = 0;
422 }
94377115
PB
423 seqlock_write_unlock(&timers_state.vm_clock_seqlock,
424 &timers_state.vm_clock_lock);
946fb27c
PB
425}
426
427/* Correlation between real and virtual time is always going to be
428 fairly approximate, so ignore small variation.
429 When the guest is idle real and virtual time will be aligned in
430 the IO wait loop. */
73bcb24d 431#define ICOUNT_WOBBLE (NANOSECONDS_PER_SECOND / 10)
946fb27c
PB
432
433static void icount_adjust(void)
434{
435 int64_t cur_time;
436 int64_t cur_icount;
437 int64_t delta;
a3270e19
PB
438
439 /* Protected by TimersState mutex. */
946fb27c 440 static int64_t last_delta;
468cc7cf 441
946fb27c
PB
442 /* If the VM is not running, then do nothing. */
443 if (!runstate_is_running()) {
444 return;
445 }
468cc7cf 446
94377115
PB
447 seqlock_write_lock(&timers_state.vm_clock_seqlock,
448 &timers_state.vm_clock_lock);
17a15f1b
PB
449 cur_time = cpu_get_clock_locked();
450 cur_icount = cpu_get_icount_locked();
468cc7cf 451
946fb27c
PB
452 delta = cur_icount - cur_time;
453 /* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */
454 if (delta > 0
455 && last_delta + ICOUNT_WOBBLE < delta * 2
c1ff073c 456 && timers_state.icount_time_shift > 0) {
946fb27c 457 /* The guest is getting too far ahead. Slow time down. */
c1ff073c
PB
458 atomic_set(&timers_state.icount_time_shift,
459 timers_state.icount_time_shift - 1);
946fb27c
PB
460 }
461 if (delta < 0
462 && last_delta - ICOUNT_WOBBLE > delta * 2
c1ff073c 463 && timers_state.icount_time_shift < MAX_ICOUNT_SHIFT) {
946fb27c 464 /* The guest is getting too far behind. Speed time up. */
c1ff073c
PB
465 atomic_set(&timers_state.icount_time_shift,
466 timers_state.icount_time_shift + 1);
946fb27c
PB
467 }
468 last_delta = delta;
c97595d1
EC
469 atomic_set_i64(&timers_state.qemu_icount_bias,
470 cur_icount - (timers_state.qemu_icount
471 << timers_state.icount_time_shift));
94377115
PB
472 seqlock_write_unlock(&timers_state.vm_clock_seqlock,
473 &timers_state.vm_clock_lock);
946fb27c
PB
474}
475
476static void icount_adjust_rt(void *opaque)
477{
b39e3f34 478 timer_mod(timers_state.icount_rt_timer,
1979b908 479 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
946fb27c
PB
480 icount_adjust();
481}
482
483static void icount_adjust_vm(void *opaque)
484{
b39e3f34 485 timer_mod(timers_state.icount_vm_timer,
40daca54 486 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
73bcb24d 487 NANOSECONDS_PER_SECOND / 10);
946fb27c
PB
488 icount_adjust();
489}
490
491static int64_t qemu_icount_round(int64_t count)
492{
c1ff073c
PB
493 int shift = atomic_read(&timers_state.icount_time_shift);
494 return (count + (1 << shift) - 1) >> shift;
946fb27c
PB
495}
496
efab87cf 497static void icount_warp_rt(void)
946fb27c 498{
ccffff48
AB
499 unsigned seq;
500 int64_t warp_start;
501
17a15f1b
PB
502 /* The icount_warp_timer is rescheduled soon after vm_clock_warp_start
503 * changes from -1 to another value, so the race here is okay.
504 */
ccffff48
AB
505 do {
506 seq = seqlock_read_begin(&timers_state.vm_clock_seqlock);
b39e3f34 507 warp_start = timers_state.vm_clock_warp_start;
ccffff48
AB
508 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, seq));
509
510 if (warp_start == -1) {
946fb27c
PB
511 return;
512 }
513
94377115
PB
514 seqlock_write_lock(&timers_state.vm_clock_seqlock,
515 &timers_state.vm_clock_lock);
946fb27c 516 if (runstate_is_running()) {
74c0b816
PB
517 int64_t clock = REPLAY_CLOCK_LOCKED(REPLAY_CLOCK_VIRTUAL_RT,
518 cpu_get_clock_locked());
8ed961d9
PB
519 int64_t warp_delta;
520
b39e3f34 521 warp_delta = clock - timers_state.vm_clock_warp_start;
8ed961d9 522 if (use_icount == 2) {
946fb27c 523 /*
40daca54 524 * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too
946fb27c
PB
525 * far ahead of real time.
526 */
17a15f1b 527 int64_t cur_icount = cpu_get_icount_locked();
bf2a7ddb 528 int64_t delta = clock - cur_icount;
8ed961d9 529 warp_delta = MIN(warp_delta, delta);
946fb27c 530 }
c97595d1
EC
531 atomic_set_i64(&timers_state.qemu_icount_bias,
532 timers_state.qemu_icount_bias + warp_delta);
946fb27c 533 }
b39e3f34 534 timers_state.vm_clock_warp_start = -1;
94377115
PB
535 seqlock_write_unlock(&timers_state.vm_clock_seqlock,
536 &timers_state.vm_clock_lock);
8ed961d9
PB
537
538 if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) {
539 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
540 }
946fb27c
PB
541}
542
e76d1798 543static void icount_timer_cb(void *opaque)
efab87cf 544{
e76d1798
PD
545 /* No need for a checkpoint because the timer already synchronizes
546 * with CHECKPOINT_CLOCK_VIRTUAL_RT.
547 */
548 icount_warp_rt();
efab87cf
PD
549}
550
8156be56
PB
551void qtest_clock_warp(int64_t dest)
552{
40daca54 553 int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
efef88b3 554 AioContext *aio_context;
8156be56 555 assert(qtest_enabled());
efef88b3 556 aio_context = qemu_get_aio_context();
8156be56 557 while (clock < dest) {
40daca54 558 int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
c9299e2f 559 int64_t warp = qemu_soonest_timeout(dest - clock, deadline);
efef88b3 560
94377115
PB
561 seqlock_write_lock(&timers_state.vm_clock_seqlock,
562 &timers_state.vm_clock_lock);
c97595d1
EC
563 atomic_set_i64(&timers_state.qemu_icount_bias,
564 timers_state.qemu_icount_bias + warp);
94377115
PB
565 seqlock_write_unlock(&timers_state.vm_clock_seqlock,
566 &timers_state.vm_clock_lock);
17a15f1b 567
40daca54 568 qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
efef88b3 569 timerlist_run_timers(aio_context->tlg.tl[QEMU_CLOCK_VIRTUAL]);
40daca54 570 clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
8156be56 571 }
40daca54 572 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
8156be56
PB
573}
574
e76d1798 575void qemu_start_warp_timer(void)
946fb27c 576{
ce78d18c 577 int64_t clock;
946fb27c
PB
578 int64_t deadline;
579
e76d1798 580 if (!use_icount) {
946fb27c
PB
581 return;
582 }
583
8bd7f71d
PD
584 /* Nothing to do if the VM is stopped: QEMU_CLOCK_VIRTUAL timers
585 * do not fire, so computing the deadline does not make sense.
586 */
587 if (!runstate_is_running()) {
588 return;
589 }
590
0c08185f
PD
591 if (replay_mode != REPLAY_MODE_PLAY) {
592 if (!all_cpu_threads_idle()) {
593 return;
594 }
8bd7f71d 595
0c08185f
PD
596 if (qtest_enabled()) {
597 /* When testing, qtest commands advance icount. */
598 return;
599 }
946fb27c 600
0c08185f
PD
601 replay_checkpoint(CHECKPOINT_CLOCK_WARP_START);
602 } else {
603 /* warp clock deterministically in record/replay mode */
604 if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_START)) {
605 /* vCPU is sleeping and warp can't be started.
606 It is probably a race condition: notification sent
607 to vCPU was processed in advance and vCPU went to sleep.
608 Therefore we have to wake it up for doing someting. */
609 if (replay_has_checkpoint()) {
610 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
611 }
612 return;
613 }
8156be56
PB
614 }
615
ac70aafc 616 /* We want to use the earliest deadline from ALL vm_clocks */
bf2a7ddb 617 clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
40daca54 618 deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
ce78d18c 619 if (deadline < 0) {
d7a0f71d
VC
620 static bool notified;
621 if (!icount_sleep && !notified) {
3dc6f869 622 warn_report("icount sleep disabled and no active timers");
d7a0f71d
VC
623 notified = true;
624 }
ce78d18c 625 return;
ac70aafc
AB
626 }
627
946fb27c
PB
628 if (deadline > 0) {
629 /*
40daca54 630 * Ensure QEMU_CLOCK_VIRTUAL proceeds even when the virtual CPU goes to
946fb27c
PB
631 * sleep. Otherwise, the CPU might be waiting for a future timer
632 * interrupt to wake it up, but the interrupt never comes because
633 * the vCPU isn't running any insns and thus doesn't advance the
40daca54 634 * QEMU_CLOCK_VIRTUAL.
946fb27c 635 */
5045e9d9
VC
636 if (!icount_sleep) {
637 /*
638 * We never let VCPUs sleep in no sleep icount mode.
639 * If there is a pending QEMU_CLOCK_VIRTUAL timer we just advance
640 * to the next QEMU_CLOCK_VIRTUAL event and notify it.
641 * It is useful when we want a deterministic execution time,
642 * isolated from host latencies.
643 */
94377115
PB
644 seqlock_write_lock(&timers_state.vm_clock_seqlock,
645 &timers_state.vm_clock_lock);
c97595d1
EC
646 atomic_set_i64(&timers_state.qemu_icount_bias,
647 timers_state.qemu_icount_bias + deadline);
94377115
PB
648 seqlock_write_unlock(&timers_state.vm_clock_seqlock,
649 &timers_state.vm_clock_lock);
5045e9d9
VC
650 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
651 } else {
652 /*
653 * We do stop VCPUs and only advance QEMU_CLOCK_VIRTUAL after some
654 * "real" time, (related to the time left until the next event) has
655 * passed. The QEMU_CLOCK_VIRTUAL_RT clock will do this.
656 * This avoids that the warps are visible externally; for example,
657 * you will not be sending network packets continuously instead of
658 * every 100ms.
659 */
94377115
PB
660 seqlock_write_lock(&timers_state.vm_clock_seqlock,
661 &timers_state.vm_clock_lock);
b39e3f34
PD
662 if (timers_state.vm_clock_warp_start == -1
663 || timers_state.vm_clock_warp_start > clock) {
664 timers_state.vm_clock_warp_start = clock;
5045e9d9 665 }
94377115
PB
666 seqlock_write_unlock(&timers_state.vm_clock_seqlock,
667 &timers_state.vm_clock_lock);
b39e3f34
PD
668 timer_mod_anticipate(timers_state.icount_warp_timer,
669 clock + deadline);
ce78d18c 670 }
ac70aafc 671 } else if (deadline == 0) {
40daca54 672 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
946fb27c
PB
673 }
674}
675
e76d1798
PD
676static void qemu_account_warp_timer(void)
677{
678 if (!use_icount || !icount_sleep) {
679 return;
680 }
681
682 /* Nothing to do if the VM is stopped: QEMU_CLOCK_VIRTUAL timers
683 * do not fire, so computing the deadline does not make sense.
684 */
685 if (!runstate_is_running()) {
686 return;
687 }
688
689 /* warp clock deterministically in record/replay mode */
690 if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_ACCOUNT)) {
691 return;
692 }
693
b39e3f34 694 timer_del(timers_state.icount_warp_timer);
e76d1798
PD
695 icount_warp_rt();
696}
697
d09eae37
FK
698static bool icount_state_needed(void *opaque)
699{
700 return use_icount;
701}
702
b39e3f34
PD
703static bool warp_timer_state_needed(void *opaque)
704{
705 TimersState *s = opaque;
706 return s->icount_warp_timer != NULL;
707}
708
709static bool adjust_timers_state_needed(void *opaque)
710{
711 TimersState *s = opaque;
712 return s->icount_rt_timer != NULL;
713}
714
715/*
716 * Subsection for warp timer migration is optional, because may not be created
717 */
718static const VMStateDescription icount_vmstate_warp_timer = {
719 .name = "timer/icount/warp_timer",
720 .version_id = 1,
721 .minimum_version_id = 1,
722 .needed = warp_timer_state_needed,
723 .fields = (VMStateField[]) {
724 VMSTATE_INT64(vm_clock_warp_start, TimersState),
725 VMSTATE_TIMER_PTR(icount_warp_timer, TimersState),
726 VMSTATE_END_OF_LIST()
727 }
728};
729
730static const VMStateDescription icount_vmstate_adjust_timers = {
731 .name = "timer/icount/timers",
732 .version_id = 1,
733 .minimum_version_id = 1,
734 .needed = adjust_timers_state_needed,
735 .fields = (VMStateField[]) {
736 VMSTATE_TIMER_PTR(icount_rt_timer, TimersState),
737 VMSTATE_TIMER_PTR(icount_vm_timer, TimersState),
738 VMSTATE_END_OF_LIST()
739 }
740};
741
d09eae37
FK
742/*
743 * This is a subsection for icount migration.
744 */
745static const VMStateDescription icount_vmstate_timers = {
746 .name = "timer/icount",
747 .version_id = 1,
748 .minimum_version_id = 1,
5cd8cada 749 .needed = icount_state_needed,
d09eae37
FK
750 .fields = (VMStateField[]) {
751 VMSTATE_INT64(qemu_icount_bias, TimersState),
752 VMSTATE_INT64(qemu_icount, TimersState),
753 VMSTATE_END_OF_LIST()
b39e3f34
PD
754 },
755 .subsections = (const VMStateDescription*[]) {
756 &icount_vmstate_warp_timer,
757 &icount_vmstate_adjust_timers,
758 NULL
d09eae37
FK
759 }
760};
761
946fb27c
PB
762static const VMStateDescription vmstate_timers = {
763 .name = "timer",
764 .version_id = 2,
765 .minimum_version_id = 1,
35d08458 766 .fields = (VMStateField[]) {
946fb27c 767 VMSTATE_INT64(cpu_ticks_offset, TimersState),
c1ff073c 768 VMSTATE_UNUSED(8),
946fb27c
PB
769 VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
770 VMSTATE_END_OF_LIST()
d09eae37 771 },
5cd8cada
JQ
772 .subsections = (const VMStateDescription*[]) {
773 &icount_vmstate_timers,
774 NULL
946fb27c
PB
775 }
776};
777
14e6fe12 778static void cpu_throttle_thread(CPUState *cpu, run_on_cpu_data opaque)
2adcc85d 779{
2adcc85d
JH
780 double pct;
781 double throttle_ratio;
782 long sleeptime_ns;
783
784 if (!cpu_throttle_get_percentage()) {
785 return;
786 }
787
788 pct = (double)cpu_throttle_get_percentage()/100;
789 throttle_ratio = pct / (1 - pct);
790 sleeptime_ns = (long)(throttle_ratio * CPU_THROTTLE_TIMESLICE_NS);
791
792 qemu_mutex_unlock_iothread();
2adcc85d
JH
793 g_usleep(sleeptime_ns / 1000); /* Convert ns to us for usleep call */
794 qemu_mutex_lock_iothread();
90bb0c04 795 atomic_set(&cpu->throttle_thread_scheduled, 0);
2adcc85d
JH
796}
797
798static void cpu_throttle_timer_tick(void *opaque)
799{
800 CPUState *cpu;
801 double pct;
802
803 /* Stop the timer if needed */
804 if (!cpu_throttle_get_percentage()) {
805 return;
806 }
807 CPU_FOREACH(cpu) {
808 if (!atomic_xchg(&cpu->throttle_thread_scheduled, 1)) {
14e6fe12
PB
809 async_run_on_cpu(cpu, cpu_throttle_thread,
810 RUN_ON_CPU_NULL);
2adcc85d
JH
811 }
812 }
813
814 pct = (double)cpu_throttle_get_percentage()/100;
815 timer_mod(throttle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) +
816 CPU_THROTTLE_TIMESLICE_NS / (1-pct));
817}
818
819void cpu_throttle_set(int new_throttle_pct)
820{
821 /* Ensure throttle percentage is within valid range */
822 new_throttle_pct = MIN(new_throttle_pct, CPU_THROTTLE_PCT_MAX);
823 new_throttle_pct = MAX(new_throttle_pct, CPU_THROTTLE_PCT_MIN);
824
825 atomic_set(&throttle_percentage, new_throttle_pct);
826
827 timer_mod(throttle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) +
828 CPU_THROTTLE_TIMESLICE_NS);
829}
830
831void cpu_throttle_stop(void)
832{
833 atomic_set(&throttle_percentage, 0);
834}
835
836bool cpu_throttle_active(void)
837{
838 return (cpu_throttle_get_percentage() != 0);
839}
840
841int cpu_throttle_get_percentage(void)
842{
843 return atomic_read(&throttle_percentage);
844}
845
4603ea01
PD
846void cpu_ticks_init(void)
847{
ccdb3c1f 848 seqlock_init(&timers_state.vm_clock_seqlock);
87a09cdc 849 qemu_spin_init(&timers_state.vm_clock_lock);
4603ea01 850 vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
2adcc85d
JH
851 throttle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
852 cpu_throttle_timer_tick, NULL);
4603ea01
PD
853}
854
1ad9580b 855void configure_icount(QemuOpts *opts, Error **errp)
946fb27c 856{
1ad9580b 857 const char *option;
a8bfac37 858 char *rem_str = NULL;
1ad9580b 859
1ad9580b 860 option = qemu_opt_get(opts, "shift");
946fb27c 861 if (!option) {
a8bfac37
ST
862 if (qemu_opt_get(opts, "align") != NULL) {
863 error_setg(errp, "Please specify shift option when using align");
864 }
946fb27c
PB
865 return;
866 }
f1f4b57e
VC
867
868 icount_sleep = qemu_opt_get_bool(opts, "sleep", true);
5045e9d9 869 if (icount_sleep) {
b39e3f34 870 timers_state.icount_warp_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
e76d1798 871 icount_timer_cb, NULL);
5045e9d9 872 }
f1f4b57e 873
a8bfac37 874 icount_align_option = qemu_opt_get_bool(opts, "align", false);
f1f4b57e
VC
875
876 if (icount_align_option && !icount_sleep) {
778d9f9b 877 error_setg(errp, "align=on and sleep=off are incompatible");
f1f4b57e 878 }
946fb27c 879 if (strcmp(option, "auto") != 0) {
a8bfac37 880 errno = 0;
c1ff073c 881 timers_state.icount_time_shift = strtol(option, &rem_str, 0);
a8bfac37
ST
882 if (errno != 0 || *rem_str != '\0' || !strlen(option)) {
883 error_setg(errp, "icount: Invalid shift value");
884 }
946fb27c
PB
885 use_icount = 1;
886 return;
a8bfac37
ST
887 } else if (icount_align_option) {
888 error_setg(errp, "shift=auto and align=on are incompatible");
f1f4b57e 889 } else if (!icount_sleep) {
778d9f9b 890 error_setg(errp, "shift=auto and sleep=off are incompatible");
946fb27c
PB
891 }
892
893 use_icount = 2;
894
895 /* 125MIPS seems a reasonable initial guess at the guest speed.
896 It will be corrected fairly quickly anyway. */
c1ff073c 897 timers_state.icount_time_shift = 3;
946fb27c
PB
898
899 /* Have both realtime and virtual time triggers for speed adjustment.
900 The realtime trigger catches emulated time passing too slowly,
901 the virtual time trigger catches emulated time passing too fast.
902 Realtime triggers occur even when idle, so use them less frequently
903 than VM triggers. */
b39e3f34
PD
904 timers_state.vm_clock_warp_start = -1;
905 timers_state.icount_rt_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL_RT,
bf2a7ddb 906 icount_adjust_rt, NULL);
b39e3f34 907 timer_mod(timers_state.icount_rt_timer,
bf2a7ddb 908 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
b39e3f34 909 timers_state.icount_vm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
40daca54 910 icount_adjust_vm, NULL);
b39e3f34 911 timer_mod(timers_state.icount_vm_timer,
40daca54 912 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
73bcb24d 913 NANOSECONDS_PER_SECOND / 10);
946fb27c
PB
914}
915
6546706d
AB
916/***********************************************************/
917/* TCG vCPU kick timer
918 *
919 * The kick timer is responsible for moving single threaded vCPU
920 * emulation on to the next vCPU. If more than one vCPU is running a
921 * timer event with force a cpu->exit so the next vCPU can get
922 * scheduled.
923 *
924 * The timer is removed if all vCPUs are idle and restarted again once
925 * idleness is complete.
926 */
927
928static QEMUTimer *tcg_kick_vcpu_timer;
791158d9 929static CPUState *tcg_current_rr_cpu;
6546706d
AB
930
931#define TCG_KICK_PERIOD (NANOSECONDS_PER_SECOND / 10)
932
933static inline int64_t qemu_tcg_next_kick(void)
934{
935 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + TCG_KICK_PERIOD;
936}
937
791158d9
AB
938/* Kick the currently round-robin scheduled vCPU */
939static void qemu_cpu_kick_rr_cpu(void)
940{
941 CPUState *cpu;
791158d9
AB
942 do {
943 cpu = atomic_mb_read(&tcg_current_rr_cpu);
944 if (cpu) {
945 cpu_exit(cpu);
946 }
947 } while (cpu != atomic_mb_read(&tcg_current_rr_cpu));
948}
949
6b8f0187
PB
950static void do_nothing(CPUState *cpu, run_on_cpu_data unused)
951{
952}
953
3f53bc61
PB
954void qemu_timer_notify_cb(void *opaque, QEMUClockType type)
955{
6b8f0187
PB
956 if (!use_icount || type != QEMU_CLOCK_VIRTUAL) {
957 qemu_notify_event();
958 return;
959 }
960
c52e7132
PM
961 if (qemu_in_vcpu_thread()) {
962 /* A CPU is currently running; kick it back out to the
963 * tcg_cpu_exec() loop so it will recalculate its
964 * icount deadline immediately.
965 */
966 qemu_cpu_kick(current_cpu);
967 } else if (first_cpu) {
6b8f0187
PB
968 /* qemu_cpu_kick is not enough to kick a halted CPU out of
969 * qemu_tcg_wait_io_event. async_run_on_cpu, instead,
970 * causes cpu_thread_is_idle to return false. This way,
971 * handle_icount_deadline can run.
c52e7132
PM
972 * If we have no CPUs at all for some reason, we don't
973 * need to do anything.
6b8f0187
PB
974 */
975 async_run_on_cpu(first_cpu, do_nothing, RUN_ON_CPU_NULL);
976 }
3f53bc61
PB
977}
978
6546706d
AB
979static void kick_tcg_thread(void *opaque)
980{
981 timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());
791158d9 982 qemu_cpu_kick_rr_cpu();
6546706d
AB
983}
984
985static void start_tcg_kick_timer(void)
986{
db08b687
PB
987 assert(!mttcg_enabled);
988 if (!tcg_kick_vcpu_timer && CPU_NEXT(first_cpu)) {
6546706d
AB
989 tcg_kick_vcpu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
990 kick_tcg_thread, NULL);
1926ab27
AB
991 }
992 if (tcg_kick_vcpu_timer && !timer_pending(tcg_kick_vcpu_timer)) {
6546706d
AB
993 timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());
994 }
995}
996
997static void stop_tcg_kick_timer(void)
998{
db08b687 999 assert(!mttcg_enabled);
1926ab27 1000 if (tcg_kick_vcpu_timer && timer_pending(tcg_kick_vcpu_timer)) {
6546706d 1001 timer_del(tcg_kick_vcpu_timer);
6546706d
AB
1002 }
1003}
1004
296af7c9
BS
1005/***********************************************************/
1006void hw_error(const char *fmt, ...)
1007{
1008 va_list ap;
55e5c285 1009 CPUState *cpu;
296af7c9
BS
1010
1011 va_start(ap, fmt);
1012 fprintf(stderr, "qemu: hardware error: ");
1013 vfprintf(stderr, fmt, ap);
1014 fprintf(stderr, "\n");
bdc44640 1015 CPU_FOREACH(cpu) {
55e5c285 1016 fprintf(stderr, "CPU #%d:\n", cpu->cpu_index);
90c84c56 1017 cpu_dump_state(cpu, stderr, CPU_DUMP_FPU);
296af7c9
BS
1018 }
1019 va_end(ap);
1020 abort();
1021}
1022
1023void cpu_synchronize_all_states(void)
1024{
182735ef 1025 CPUState *cpu;
296af7c9 1026
bdc44640 1027 CPU_FOREACH(cpu) {
182735ef 1028 cpu_synchronize_state(cpu);
c97d6d2c
SAGDR
1029 /* TODO: move to cpu_synchronize_state() */
1030 if (hvf_enabled()) {
1031 hvf_cpu_synchronize_state(cpu);
1032 }
296af7c9
BS
1033 }
1034}
1035
1036void cpu_synchronize_all_post_reset(void)
1037{
182735ef 1038 CPUState *cpu;
296af7c9 1039
bdc44640 1040 CPU_FOREACH(cpu) {
182735ef 1041 cpu_synchronize_post_reset(cpu);
c97d6d2c
SAGDR
1042 /* TODO: move to cpu_synchronize_post_reset() */
1043 if (hvf_enabled()) {
1044 hvf_cpu_synchronize_post_reset(cpu);
1045 }
296af7c9
BS
1046 }
1047}
1048
1049void cpu_synchronize_all_post_init(void)
1050{
182735ef 1051 CPUState *cpu;
296af7c9 1052
bdc44640 1053 CPU_FOREACH(cpu) {
182735ef 1054 cpu_synchronize_post_init(cpu);
c97d6d2c
SAGDR
1055 /* TODO: move to cpu_synchronize_post_init() */
1056 if (hvf_enabled()) {
1057 hvf_cpu_synchronize_post_init(cpu);
1058 }
296af7c9
BS
1059 }
1060}
1061
75e972da
DG
1062void cpu_synchronize_all_pre_loadvm(void)
1063{
1064 CPUState *cpu;
1065
1066 CPU_FOREACH(cpu) {
1067 cpu_synchronize_pre_loadvm(cpu);
1068 }
1069}
1070
4486e89c 1071static int do_vm_stop(RunState state, bool send_stop)
296af7c9 1072{
56983463
KW
1073 int ret = 0;
1074
1354869c 1075 if (runstate_is_running()) {
296af7c9 1076 cpu_disable_ticks();
296af7c9 1077 pause_all_vcpus();
f5bbfba1 1078 runstate_set(state);
1dfb4dd9 1079 vm_state_notify(0, state);
4486e89c 1080 if (send_stop) {
3ab72385 1081 qapi_event_send_stop();
4486e89c 1082 }
296af7c9 1083 }
56983463 1084
594a45ce 1085 bdrv_drain_all();
6d0ceb80 1086 replay_disable_events();
22af08ea 1087 ret = bdrv_flush_all();
594a45ce 1088
56983463 1089 return ret;
296af7c9
BS
1090}
1091
4486e89c
SH
1092/* Special vm_stop() variant for terminating the process. Historically clients
1093 * did not expect a QMP STOP event and so we need to retain compatibility.
1094 */
1095int vm_shutdown(void)
1096{
1097 return do_vm_stop(RUN_STATE_SHUTDOWN, false);
1098}
1099
a1fcaa73 1100static bool cpu_can_run(CPUState *cpu)
296af7c9 1101{
4fdeee7c 1102 if (cpu->stop) {
a1fcaa73 1103 return false;
0ab07c62 1104 }
321bc0b2 1105 if (cpu_is_stopped(cpu)) {
a1fcaa73 1106 return false;
0ab07c62 1107 }
a1fcaa73 1108 return true;
296af7c9
BS
1109}
1110
91325046 1111static void cpu_handle_guest_debug(CPUState *cpu)
83f338f7 1112{
64f6b346 1113 gdb_set_stop_cpu(cpu);
8cf71710 1114 qemu_system_debug_request();
f324e766 1115 cpu->stopped = true;
3c638d06
JK
1116}
1117
6d9cb73c
JK
1118#ifdef CONFIG_LINUX
1119static void sigbus_reraise(void)
1120{
1121 sigset_t set;
1122 struct sigaction action;
1123
1124 memset(&action, 0, sizeof(action));
1125 action.sa_handler = SIG_DFL;
1126 if (!sigaction(SIGBUS, &action, NULL)) {
1127 raise(SIGBUS);
1128 sigemptyset(&set);
1129 sigaddset(&set, SIGBUS);
a2d1761d 1130 pthread_sigmask(SIG_UNBLOCK, &set, NULL);
6d9cb73c
JK
1131 }
1132 perror("Failed to re-raise SIGBUS!\n");
1133 abort();
1134}
1135
d98d4072 1136static void sigbus_handler(int n, siginfo_t *siginfo, void *ctx)
6d9cb73c 1137{
a16fc07e
PB
1138 if (siginfo->si_code != BUS_MCEERR_AO && siginfo->si_code != BUS_MCEERR_AR) {
1139 sigbus_reraise();
1140 }
1141
2ae41db2
PB
1142 if (current_cpu) {
1143 /* Called asynchronously in VCPU thread. */
1144 if (kvm_on_sigbus_vcpu(current_cpu, siginfo->si_code, siginfo->si_addr)) {
1145 sigbus_reraise();
1146 }
1147 } else {
1148 /* Called synchronously (via signalfd) in main thread. */
1149 if (kvm_on_sigbus(siginfo->si_code, siginfo->si_addr)) {
1150 sigbus_reraise();
1151 }
6d9cb73c
JK
1152 }
1153}
1154
1155static void qemu_init_sigbus(void)
1156{
1157 struct sigaction action;
1158
1159 memset(&action, 0, sizeof(action));
1160 action.sa_flags = SA_SIGINFO;
d98d4072 1161 action.sa_sigaction = sigbus_handler;
6d9cb73c
JK
1162 sigaction(SIGBUS, &action, NULL);
1163
1164 prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0);
1165}
6d9cb73c 1166#else /* !CONFIG_LINUX */
6d9cb73c
JK
1167static void qemu_init_sigbus(void)
1168{
1169}
a16fc07e 1170#endif /* !CONFIG_LINUX */
ff48eb5f 1171
b2532d88 1172static QemuMutex qemu_global_mutex;
296af7c9
BS
1173
1174static QemuThread io_thread;
1175
296af7c9
BS
1176/* cpu creation */
1177static QemuCond qemu_cpu_cond;
1178/* system init */
296af7c9
BS
1179static QemuCond qemu_pause_cond;
1180
d3b12f5d 1181void qemu_init_cpu_loop(void)
296af7c9 1182{
6d9cb73c 1183 qemu_init_sigbus();
ed94592b 1184 qemu_cond_init(&qemu_cpu_cond);
ed94592b 1185 qemu_cond_init(&qemu_pause_cond);
296af7c9 1186 qemu_mutex_init(&qemu_global_mutex);
296af7c9 1187
b7680cb6 1188 qemu_thread_get_self(&io_thread);
296af7c9
BS
1189}
1190
14e6fe12 1191void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data)
e82bcec2 1192{
d148d90e 1193 do_run_on_cpu(cpu, func, data, &qemu_global_mutex);
3c02270d
CV
1194}
1195
4c055ab5
GZ
1196static void qemu_kvm_destroy_vcpu(CPUState *cpu)
1197{
1198 if (kvm_destroy_vcpu(cpu) < 0) {
1199 error_report("kvm_destroy_vcpu failed");
1200 exit(EXIT_FAILURE);
1201 }
1202}
1203
1204static void qemu_tcg_destroy_vcpu(CPUState *cpu)
1205{
1206}
1207
ebd05fea
DH
1208static void qemu_cpu_stop(CPUState *cpu, bool exit)
1209{
1210 g_assert(qemu_cpu_is_self(cpu));
1211 cpu->stop = false;
1212 cpu->stopped = true;
1213 if (exit) {
1214 cpu_exit(cpu);
1215 }
1216 qemu_cond_broadcast(&qemu_pause_cond);
1217}
1218
509a0d78 1219static void qemu_wait_io_event_common(CPUState *cpu)
296af7c9 1220{
37257942 1221 atomic_mb_set(&cpu->thread_kicked, false);
4fdeee7c 1222 if (cpu->stop) {
ebd05fea 1223 qemu_cpu_stop(cpu, false);
296af7c9 1224 }
a5403c69 1225 process_queued_cpu_work(cpu);
37257942
AB
1226}
1227
a8efa606 1228static void qemu_tcg_rr_wait_io_event(void)
37257942 1229{
a8efa606
PB
1230 CPUState *cpu;
1231
db08b687 1232 while (all_cpu_threads_idle()) {
6546706d 1233 stop_tcg_kick_timer();
a8efa606 1234 qemu_cond_wait(first_cpu->halt_cond, &qemu_global_mutex);
16400322 1235 }
296af7c9 1236
6546706d
AB
1237 start_tcg_kick_timer();
1238
a8efa606
PB
1239 CPU_FOREACH(cpu) {
1240 qemu_wait_io_event_common(cpu);
1241 }
296af7c9
BS
1242}
1243
db08b687 1244static void qemu_wait_io_event(CPUState *cpu)
296af7c9 1245{
a98ae1d8 1246 while (cpu_thread_is_idle(cpu)) {
f5c121b8 1247 qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
16400322 1248 }
296af7c9 1249
db08b687
PB
1250#ifdef _WIN32
1251 /* Eat dummy APC queued by qemu_cpu_kick_thread. */
1252 if (!tcg_enabled()) {
1253 SleepEx(0, TRUE);
c97d6d2c 1254 }
db08b687 1255#endif
c97d6d2c
SAGDR
1256 qemu_wait_io_event_common(cpu);
1257}
1258
7e97cd88 1259static void *qemu_kvm_cpu_thread_fn(void *arg)
296af7c9 1260{
48a106bd 1261 CPUState *cpu = arg;
84b4915d 1262 int r;
296af7c9 1263
ab28bd23
PB
1264 rcu_register_thread();
1265
2e7f7a3c 1266 qemu_mutex_lock_iothread();
814e612e 1267 qemu_thread_get_self(cpu->thread);
9f09e18a 1268 cpu->thread_id = qemu_get_thread_id();
626cf8f4 1269 cpu->can_do_io = 1;
4917cf44 1270 current_cpu = cpu;
296af7c9 1271
504134d2 1272 r = kvm_init_vcpu(cpu);
84b4915d 1273 if (r < 0) {
493d89bf 1274 error_report("kvm_init_vcpu failed: %s", strerror(-r));
84b4915d
JK
1275 exit(1);
1276 }
296af7c9 1277
18268b60 1278 kvm_init_cpu_signals(cpu);
296af7c9
BS
1279
1280 /* signal CPU creation */
61a46217 1281 cpu->created = true;
296af7c9 1282 qemu_cond_signal(&qemu_cpu_cond);
9c09a251 1283 qemu_guest_random_seed_thread_part2(cpu->random_seed);
296af7c9 1284
4c055ab5 1285 do {
a1fcaa73 1286 if (cpu_can_run(cpu)) {
1458c363 1287 r = kvm_cpu_exec(cpu);
83f338f7 1288 if (r == EXCP_DEBUG) {
91325046 1289 cpu_handle_guest_debug(cpu);
83f338f7 1290 }
0ab07c62 1291 }
db08b687 1292 qemu_wait_io_event(cpu);
4c055ab5 1293 } while (!cpu->unplug || cpu_can_run(cpu));
296af7c9 1294
4c055ab5 1295 qemu_kvm_destroy_vcpu(cpu);
2c579042
BR
1296 cpu->created = false;
1297 qemu_cond_signal(&qemu_cpu_cond);
4c055ab5 1298 qemu_mutex_unlock_iothread();
57615ed5 1299 rcu_unregister_thread();
296af7c9
BS
1300 return NULL;
1301}
1302
c7f0f3b1
AL
1303static void *qemu_dummy_cpu_thread_fn(void *arg)
1304{
1305#ifdef _WIN32
493d89bf 1306 error_report("qtest is not supported under Windows");
c7f0f3b1
AL
1307 exit(1);
1308#else
10a9021d 1309 CPUState *cpu = arg;
c7f0f3b1
AL
1310 sigset_t waitset;
1311 int r;
1312
ab28bd23
PB
1313 rcu_register_thread();
1314
c7f0f3b1 1315 qemu_mutex_lock_iothread();
814e612e 1316 qemu_thread_get_self(cpu->thread);
9f09e18a 1317 cpu->thread_id = qemu_get_thread_id();
626cf8f4 1318 cpu->can_do_io = 1;
37257942 1319 current_cpu = cpu;
c7f0f3b1
AL
1320
1321 sigemptyset(&waitset);
1322 sigaddset(&waitset, SIG_IPI);
1323
1324 /* signal CPU creation */
61a46217 1325 cpu->created = true;
c7f0f3b1 1326 qemu_cond_signal(&qemu_cpu_cond);
9c09a251 1327 qemu_guest_random_seed_thread_part2(cpu->random_seed);
c7f0f3b1 1328
d2831ab0 1329 do {
c7f0f3b1
AL
1330 qemu_mutex_unlock_iothread();
1331 do {
1332 int sig;
1333 r = sigwait(&waitset, &sig);
1334 } while (r == -1 && (errno == EAGAIN || errno == EINTR));
1335 if (r == -1) {
1336 perror("sigwait");
1337 exit(1);
1338 }
1339 qemu_mutex_lock_iothread();
db08b687 1340 qemu_wait_io_event(cpu);
d2831ab0 1341 } while (!cpu->unplug);
c7f0f3b1 1342
d40bfcbb 1343 qemu_mutex_unlock_iothread();
d2831ab0 1344 rcu_unregister_thread();
c7f0f3b1
AL
1345 return NULL;
1346#endif
1347}
1348
1be7fcb8
AB
1349static int64_t tcg_get_icount_limit(void)
1350{
1351 int64_t deadline;
1352
1353 if (replay_mode != REPLAY_MODE_PLAY) {
1354 deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
1355
1356 /* Maintain prior (possibly buggy) behaviour where if no deadline
1357 * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
1358 * INT32_MAX nanoseconds ahead, we still use INT32_MAX
1359 * nanoseconds.
1360 */
1361 if ((deadline < 0) || (deadline > INT32_MAX)) {
1362 deadline = INT32_MAX;
1363 }
1364
1365 return qemu_icount_round(deadline);
1366 } else {
1367 return replay_get_instructions();
1368 }
1369}
1370
12e9700d
AB
1371static void handle_icount_deadline(void)
1372{
6b8f0187 1373 assert(qemu_in_vcpu_thread());
12e9700d
AB
1374 if (use_icount) {
1375 int64_t deadline =
1376 qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
1377
1378 if (deadline == 0) {
6b8f0187 1379 /* Wake up other AioContexts. */
12e9700d 1380 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
6b8f0187 1381 qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
12e9700d
AB
1382 }
1383 }
1384}
1385
05248382 1386static void prepare_icount_for_run(CPUState *cpu)
1be7fcb8 1387{
1be7fcb8 1388 if (use_icount) {
eda5f7c6 1389 int insns_left;
05248382
AB
1390
1391 /* These should always be cleared by process_icount_data after
1392 * each vCPU execution. However u16.high can be raised
1393 * asynchronously by cpu_exit/cpu_interrupt/tcg_handle_interrupt
1394 */
5e140196 1395 g_assert(cpu_neg(cpu)->icount_decr.u16.low == 0);
05248382
AB
1396 g_assert(cpu->icount_extra == 0);
1397
eda5f7c6
AB
1398 cpu->icount_budget = tcg_get_icount_limit();
1399 insns_left = MIN(0xffff, cpu->icount_budget);
5e140196 1400 cpu_neg(cpu)->icount_decr.u16.low = insns_left;
eda5f7c6 1401 cpu->icount_extra = cpu->icount_budget - insns_left;
d759c951
AB
1402
1403 replay_mutex_lock();
1be7fcb8 1404 }
05248382
AB
1405}
1406
1407static void process_icount_data(CPUState *cpu)
1408{
1be7fcb8 1409 if (use_icount) {
e4cd9657 1410 /* Account for executed instructions */
512d3c80 1411 cpu_update_icount(cpu);
05248382
AB
1412
1413 /* Reset the counters */
5e140196 1414 cpu_neg(cpu)->icount_decr.u16.low = 0;
1be7fcb8 1415 cpu->icount_extra = 0;
e4cd9657
AB
1416 cpu->icount_budget = 0;
1417
1be7fcb8 1418 replay_account_executed_instructions();
d759c951
AB
1419
1420 replay_mutex_unlock();
1be7fcb8 1421 }
05248382
AB
1422}
1423
1424
1425static int tcg_cpu_exec(CPUState *cpu)
1426{
1427 int ret;
1428#ifdef CONFIG_PROFILER
1429 int64_t ti;
1430#endif
1431
f28d0dfd 1432 assert(tcg_enabled());
05248382
AB
1433#ifdef CONFIG_PROFILER
1434 ti = profile_getclock();
1435#endif
05248382
AB
1436 cpu_exec_start(cpu);
1437 ret = cpu_exec(cpu);
1438 cpu_exec_end(cpu);
05248382 1439#ifdef CONFIG_PROFILER
72fd2efb
EC
1440 atomic_set(&tcg_ctx->prof.cpu_exec_time,
1441 tcg_ctx->prof.cpu_exec_time + profile_getclock() - ti);
05248382 1442#endif
1be7fcb8
AB
1443 return ret;
1444}
1445
c93bbbef
AB
1446/* Destroy any remaining vCPUs which have been unplugged and have
1447 * finished running
1448 */
1449static void deal_with_unplugged_cpus(void)
1be7fcb8 1450{
c93bbbef 1451 CPUState *cpu;
1be7fcb8 1452
c93bbbef
AB
1453 CPU_FOREACH(cpu) {
1454 if (cpu->unplug && !cpu_can_run(cpu)) {
1455 qemu_tcg_destroy_vcpu(cpu);
1456 cpu->created = false;
1457 qemu_cond_signal(&qemu_cpu_cond);
1be7fcb8
AB
1458 break;
1459 }
1460 }
1be7fcb8 1461}
bdb7ca67 1462
6546706d
AB
1463/* Single-threaded TCG
1464 *
1465 * In the single-threaded case each vCPU is simulated in turn. If
1466 * there is more than a single vCPU we create a simple timer to kick
1467 * the vCPU and ensure we don't get stuck in a tight loop in one vCPU.
1468 * This is done explicitly rather than relying on side-effects
1469 * elsewhere.
1470 */
1471
37257942 1472static void *qemu_tcg_rr_cpu_thread_fn(void *arg)
296af7c9 1473{
c3586ba7 1474 CPUState *cpu = arg;
296af7c9 1475
f28d0dfd 1476 assert(tcg_enabled());
ab28bd23 1477 rcu_register_thread();
3468b59e 1478 tcg_register_thread();
ab28bd23 1479
2e7f7a3c 1480 qemu_mutex_lock_iothread();
814e612e 1481 qemu_thread_get_self(cpu->thread);
296af7c9 1482
5a9c973b
DH
1483 cpu->thread_id = qemu_get_thread_id();
1484 cpu->created = true;
1485 cpu->can_do_io = 1;
296af7c9 1486 qemu_cond_signal(&qemu_cpu_cond);
9c09a251 1487 qemu_guest_random_seed_thread_part2(cpu->random_seed);
296af7c9 1488
fa7d1867 1489 /* wait for initial kick-off after machine start */
c28e399c 1490 while (first_cpu->stopped) {
d5f8d613 1491 qemu_cond_wait(first_cpu->halt_cond, &qemu_global_mutex);
8e564b4e
JK
1492
1493 /* process any pending work */
bdc44640 1494 CPU_FOREACH(cpu) {
37257942 1495 current_cpu = cpu;
182735ef 1496 qemu_wait_io_event_common(cpu);
8e564b4e 1497 }
0ab07c62 1498 }
296af7c9 1499
6546706d
AB
1500 start_tcg_kick_timer();
1501
c93bbbef
AB
1502 cpu = first_cpu;
1503
e5143e30
AB
1504 /* process any pending work */
1505 cpu->exit_request = 1;
1506
296af7c9 1507 while (1) {
d759c951
AB
1508 qemu_mutex_unlock_iothread();
1509 replay_mutex_lock();
1510 qemu_mutex_lock_iothread();
c93bbbef
AB
1511 /* Account partial waits to QEMU_CLOCK_VIRTUAL. */
1512 qemu_account_warp_timer();
1513
6b8f0187
PB
1514 /* Run the timers here. This is much more efficient than
1515 * waking up the I/O thread and waiting for completion.
1516 */
1517 handle_icount_deadline();
1518
d759c951
AB
1519 replay_mutex_unlock();
1520
c93bbbef
AB
1521 if (!cpu) {
1522 cpu = first_cpu;
1523 }
1524
e5143e30
AB
1525 while (cpu && !cpu->queued_work_first && !cpu->exit_request) {
1526
791158d9 1527 atomic_mb_set(&tcg_current_rr_cpu, cpu);
37257942 1528 current_cpu = cpu;
c93bbbef
AB
1529
1530 qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
1531 (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
1532
1533 if (cpu_can_run(cpu)) {
1534 int r;
05248382 1535
d759c951 1536 qemu_mutex_unlock_iothread();
05248382
AB
1537 prepare_icount_for_run(cpu);
1538
c93bbbef 1539 r = tcg_cpu_exec(cpu);
05248382
AB
1540
1541 process_icount_data(cpu);
d759c951 1542 qemu_mutex_lock_iothread();
05248382 1543
c93bbbef
AB
1544 if (r == EXCP_DEBUG) {
1545 cpu_handle_guest_debug(cpu);
1546 break;
08e73c48
PK
1547 } else if (r == EXCP_ATOMIC) {
1548 qemu_mutex_unlock_iothread();
1549 cpu_exec_step_atomic(cpu);
1550 qemu_mutex_lock_iothread();
1551 break;
c93bbbef 1552 }
37257942 1553 } else if (cpu->stop) {
c93bbbef
AB
1554 if (cpu->unplug) {
1555 cpu = CPU_NEXT(cpu);
1556 }
1557 break;
1558 }
1559
e5143e30
AB
1560 cpu = CPU_NEXT(cpu);
1561 } /* while (cpu && !cpu->exit_request).. */
1562
791158d9
AB
1563 /* Does not need atomic_mb_set because a spurious wakeup is okay. */
1564 atomic_set(&tcg_current_rr_cpu, NULL);
c93bbbef 1565
e5143e30
AB
1566 if (cpu && cpu->exit_request) {
1567 atomic_mb_set(&cpu->exit_request, 0);
1568 }
ac70aafc 1569
013aabdc
CD
1570 if (use_icount && all_cpu_threads_idle()) {
1571 /*
1572 * When all cpus are sleeping (e.g in WFI), to avoid a deadlock
1573 * in the main_loop, wake it up in order to start the warp timer.
1574 */
1575 qemu_notify_event();
1576 }
1577
a8efa606 1578 qemu_tcg_rr_wait_io_event();
c93bbbef 1579 deal_with_unplugged_cpus();
296af7c9
BS
1580 }
1581
9b0605f9 1582 rcu_unregister_thread();
296af7c9
BS
1583 return NULL;
1584}
1585
b0cb0a66
VP
1586static void *qemu_hax_cpu_thread_fn(void *arg)
1587{
1588 CPUState *cpu = arg;
1589 int r;
b3d3a426 1590
9857c2d2 1591 rcu_register_thread();
b3d3a426 1592 qemu_mutex_lock_iothread();
b0cb0a66 1593 qemu_thread_get_self(cpu->thread);
b0cb0a66
VP
1594
1595 cpu->thread_id = qemu_get_thread_id();
1596 cpu->created = true;
b0cb0a66
VP
1597 current_cpu = cpu;
1598
1599 hax_init_vcpu(cpu);
1600 qemu_cond_signal(&qemu_cpu_cond);
9c09a251 1601 qemu_guest_random_seed_thread_part2(cpu->random_seed);
b0cb0a66 1602
9857c2d2 1603 do {
b0cb0a66
VP
1604 if (cpu_can_run(cpu)) {
1605 r = hax_smp_cpu_exec(cpu);
1606 if (r == EXCP_DEBUG) {
1607 cpu_handle_guest_debug(cpu);
1608 }
1609 }
1610
db08b687 1611 qemu_wait_io_event(cpu);
9857c2d2
PB
1612 } while (!cpu->unplug || cpu_can_run(cpu));
1613 rcu_unregister_thread();
b0cb0a66
VP
1614 return NULL;
1615}
1616
c97d6d2c
SAGDR
1617/* The HVF-specific vCPU thread function. This one should only run when the host
1618 * CPU supports the VMX "unrestricted guest" feature. */
1619static void *qemu_hvf_cpu_thread_fn(void *arg)
1620{
1621 CPUState *cpu = arg;
1622
1623 int r;
1624
1625 assert(hvf_enabled());
1626
1627 rcu_register_thread();
1628
1629 qemu_mutex_lock_iothread();
1630 qemu_thread_get_self(cpu->thread);
1631
1632 cpu->thread_id = qemu_get_thread_id();
1633 cpu->can_do_io = 1;
1634 current_cpu = cpu;
1635
1636 hvf_init_vcpu(cpu);
1637
1638 /* signal CPU creation */
1639 cpu->created = true;
1640 qemu_cond_signal(&qemu_cpu_cond);
9c09a251 1641 qemu_guest_random_seed_thread_part2(cpu->random_seed);
c97d6d2c
SAGDR
1642
1643 do {
1644 if (cpu_can_run(cpu)) {
1645 r = hvf_vcpu_exec(cpu);
1646 if (r == EXCP_DEBUG) {
1647 cpu_handle_guest_debug(cpu);
1648 }
1649 }
db08b687 1650 qemu_wait_io_event(cpu);
c97d6d2c
SAGDR
1651 } while (!cpu->unplug || cpu_can_run(cpu));
1652
1653 hvf_vcpu_destroy(cpu);
1654 cpu->created = false;
1655 qemu_cond_signal(&qemu_cpu_cond);
1656 qemu_mutex_unlock_iothread();
8178e637 1657 rcu_unregister_thread();
c97d6d2c
SAGDR
1658 return NULL;
1659}
1660
19306806
JTV
1661static void *qemu_whpx_cpu_thread_fn(void *arg)
1662{
1663 CPUState *cpu = arg;
1664 int r;
1665
1666 rcu_register_thread();
1667
1668 qemu_mutex_lock_iothread();
1669 qemu_thread_get_self(cpu->thread);
1670 cpu->thread_id = qemu_get_thread_id();
1671 current_cpu = cpu;
1672
1673 r = whpx_init_vcpu(cpu);
1674 if (r < 0) {
1675 fprintf(stderr, "whpx_init_vcpu failed: %s\n", strerror(-r));
1676 exit(1);
1677 }
1678
1679 /* signal CPU creation */
1680 cpu->created = true;
1681 qemu_cond_signal(&qemu_cpu_cond);
9c09a251 1682 qemu_guest_random_seed_thread_part2(cpu->random_seed);
19306806
JTV
1683
1684 do {
1685 if (cpu_can_run(cpu)) {
1686 r = whpx_vcpu_exec(cpu);
1687 if (r == EXCP_DEBUG) {
1688 cpu_handle_guest_debug(cpu);
1689 }
1690 }
1691 while (cpu_thread_is_idle(cpu)) {
1692 qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
1693 }
1694 qemu_wait_io_event_common(cpu);
1695 } while (!cpu->unplug || cpu_can_run(cpu));
1696
1697 whpx_destroy_vcpu(cpu);
1698 cpu->created = false;
1699 qemu_cond_signal(&qemu_cpu_cond);
1700 qemu_mutex_unlock_iothread();
1701 rcu_unregister_thread();
c97d6d2c
SAGDR
1702 return NULL;
1703}
1704
b0cb0a66
VP
1705#ifdef _WIN32
1706static void CALLBACK dummy_apc_func(ULONG_PTR unused)
1707{
1708}
1709#endif
1710
37257942
AB
1711/* Multi-threaded TCG
1712 *
1713 * In the multi-threaded case each vCPU has its own thread. The TLS
1714 * variable current_cpu can be used deep in the code to find the
1715 * current CPUState for a given thread.
1716 */
1717
1718static void *qemu_tcg_cpu_thread_fn(void *arg)
1719{
1720 CPUState *cpu = arg;
1721
f28d0dfd 1722 assert(tcg_enabled());
bf51c720
AB
1723 g_assert(!use_icount);
1724
37257942 1725 rcu_register_thread();
3468b59e 1726 tcg_register_thread();
37257942
AB
1727
1728 qemu_mutex_lock_iothread();
1729 qemu_thread_get_self(cpu->thread);
1730
1731 cpu->thread_id = qemu_get_thread_id();
1732 cpu->created = true;
1733 cpu->can_do_io = 1;
1734 current_cpu = cpu;
1735 qemu_cond_signal(&qemu_cpu_cond);
9c09a251 1736 qemu_guest_random_seed_thread_part2(cpu->random_seed);
37257942
AB
1737
1738 /* process any pending work */
1739 cpu->exit_request = 1;
1740
54961aac 1741 do {
37257942
AB
1742 if (cpu_can_run(cpu)) {
1743 int r;
d759c951 1744 qemu_mutex_unlock_iothread();
37257942 1745 r = tcg_cpu_exec(cpu);
d759c951 1746 qemu_mutex_lock_iothread();
37257942
AB
1747 switch (r) {
1748 case EXCP_DEBUG:
1749 cpu_handle_guest_debug(cpu);
1750 break;
1751 case EXCP_HALTED:
1752 /* during start-up the vCPU is reset and the thread is
1753 * kicked several times. If we don't ensure we go back
1754 * to sleep in the halted state we won't cleanly
1755 * start-up when the vCPU is enabled.
1756 *
1757 * cpu->halted should ensure we sleep in wait_io_event
1758 */
1759 g_assert(cpu->halted);
1760 break;
08e73c48
PK
1761 case EXCP_ATOMIC:
1762 qemu_mutex_unlock_iothread();
1763 cpu_exec_step_atomic(cpu);
1764 qemu_mutex_lock_iothread();
37257942
AB
1765 default:
1766 /* Ignore everything else? */
1767 break;
1768 }
1769 }
1770
37257942 1771 atomic_mb_set(&cpu->exit_request, 0);
db08b687 1772 qemu_wait_io_event(cpu);
9b0605f9 1773 } while (!cpu->unplug || cpu_can_run(cpu));
37257942 1774
9b0605f9
PB
1775 qemu_tcg_destroy_vcpu(cpu);
1776 cpu->created = false;
1777 qemu_cond_signal(&qemu_cpu_cond);
1778 qemu_mutex_unlock_iothread();
1779 rcu_unregister_thread();
37257942
AB
1780 return NULL;
1781}
1782
2ff09a40 1783static void qemu_cpu_kick_thread(CPUState *cpu)
cc015e9a
PB
1784{
1785#ifndef _WIN32
1786 int err;
1787
e0c38211
PB
1788 if (cpu->thread_kicked) {
1789 return;
9102deda 1790 }
e0c38211 1791 cpu->thread_kicked = true;
814e612e 1792 err = pthread_kill(cpu->thread->thread, SIG_IPI);
d455ebc4 1793 if (err && err != ESRCH) {
cc015e9a
PB
1794 fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
1795 exit(1);
1796 }
1797#else /* _WIN32 */
b0cb0a66 1798 if (!qemu_cpu_is_self(cpu)) {
19306806
JTV
1799 if (whpx_enabled()) {
1800 whpx_vcpu_kick(cpu);
1801 } else if (!QueueUserAPC(dummy_apc_func, cpu->hThread, 0)) {
b0cb0a66
VP
1802 fprintf(stderr, "%s: QueueUserAPC failed with error %lu\n",
1803 __func__, GetLastError());
1804 exit(1);
1805 }
1806 }
e0c38211
PB
1807#endif
1808}
ed9164a3 1809
c08d7424 1810void qemu_cpu_kick(CPUState *cpu)
296af7c9 1811{
f5c121b8 1812 qemu_cond_broadcast(cpu->halt_cond);
e0c38211 1813 if (tcg_enabled()) {
791158d9 1814 cpu_exit(cpu);
37257942 1815 /* NOP unless doing single-thread RR */
791158d9 1816 qemu_cpu_kick_rr_cpu();
e0c38211 1817 } else {
b0cb0a66
VP
1818 if (hax_enabled()) {
1819 /*
1820 * FIXME: race condition with the exit_request check in
1821 * hax_vcpu_hax_exec
1822 */
1823 cpu->exit_request = 1;
1824 }
e0c38211
PB
1825 qemu_cpu_kick_thread(cpu);
1826 }
296af7c9
BS
1827}
1828
46d62fac 1829void qemu_cpu_kick_self(void)
296af7c9 1830{
4917cf44 1831 assert(current_cpu);
9102deda 1832 qemu_cpu_kick_thread(current_cpu);
296af7c9
BS
1833}
1834
60e82579 1835bool qemu_cpu_is_self(CPUState *cpu)
296af7c9 1836{
814e612e 1837 return qemu_thread_is_self(cpu->thread);
296af7c9
BS
1838}
1839
79e2b9ae 1840bool qemu_in_vcpu_thread(void)
aa723c23 1841{
4917cf44 1842 return current_cpu && qemu_cpu_is_self(current_cpu);
aa723c23
JQ
1843}
1844
afbe7053
PB
1845static __thread bool iothread_locked = false;
1846
1847bool qemu_mutex_iothread_locked(void)
1848{
1849 return iothread_locked;
1850}
1851
cb764d06
EC
1852/*
1853 * The BQL is taken from so many places that it is worth profiling the
1854 * callers directly, instead of funneling them all through a single function.
1855 */
1856void qemu_mutex_lock_iothread_impl(const char *file, int line)
296af7c9 1857{
cb764d06
EC
1858 QemuMutexLockFunc bql_lock = atomic_read(&qemu_bql_mutex_lock_func);
1859
8d04fb55 1860 g_assert(!qemu_mutex_iothread_locked());
cb764d06 1861 bql_lock(&qemu_global_mutex, file, line);
afbe7053 1862 iothread_locked = true;
296af7c9
BS
1863}
1864
1865void qemu_mutex_unlock_iothread(void)
1866{
8d04fb55 1867 g_assert(qemu_mutex_iothread_locked());
afbe7053 1868 iothread_locked = false;
296af7c9
BS
1869 qemu_mutex_unlock(&qemu_global_mutex);
1870}
1871
e8faee06 1872static bool all_vcpus_paused(void)
296af7c9 1873{
bdc44640 1874 CPUState *cpu;
296af7c9 1875
bdc44640 1876 CPU_FOREACH(cpu) {
182735ef 1877 if (!cpu->stopped) {
e8faee06 1878 return false;
0ab07c62 1879 }
296af7c9
BS
1880 }
1881
e8faee06 1882 return true;
296af7c9
BS
1883}
1884
1885void pause_all_vcpus(void)
1886{
bdc44640 1887 CPUState *cpu;
296af7c9 1888
40daca54 1889 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, false);
bdc44640 1890 CPU_FOREACH(cpu) {
ebd05fea
DH
1891 if (qemu_cpu_is_self(cpu)) {
1892 qemu_cpu_stop(cpu, true);
1893 } else {
1894 cpu->stop = true;
1895 qemu_cpu_kick(cpu);
1896 }
d798e974
JK
1897 }
1898
d759c951
AB
1899 /* We need to drop the replay_lock so any vCPU threads woken up
1900 * can finish their replay tasks
1901 */
1902 replay_mutex_unlock();
1903
296af7c9 1904 while (!all_vcpus_paused()) {
be7d6c57 1905 qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
bdc44640 1906 CPU_FOREACH(cpu) {
182735ef 1907 qemu_cpu_kick(cpu);
296af7c9
BS
1908 }
1909 }
d759c951
AB
1910
1911 qemu_mutex_unlock_iothread();
1912 replay_mutex_lock();
1913 qemu_mutex_lock_iothread();
296af7c9
BS
1914}
1915
2993683b
IM
1916void cpu_resume(CPUState *cpu)
1917{
1918 cpu->stop = false;
1919 cpu->stopped = false;
1920 qemu_cpu_kick(cpu);
1921}
1922
296af7c9
BS
1923void resume_all_vcpus(void)
1924{
bdc44640 1925 CPUState *cpu;
296af7c9 1926
40daca54 1927 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true);
bdc44640 1928 CPU_FOREACH(cpu) {
182735ef 1929 cpu_resume(cpu);
296af7c9
BS
1930 }
1931}
1932
dbadee4f 1933void cpu_remove_sync(CPUState *cpu)
4c055ab5
GZ
1934{
1935 cpu->stop = true;
1936 cpu->unplug = true;
1937 qemu_cpu_kick(cpu);
dbadee4f
PB
1938 qemu_mutex_unlock_iothread();
1939 qemu_thread_join(cpu->thread);
1940 qemu_mutex_lock_iothread();
2c579042
BR
1941}
1942
4900116e
DDAG
1943/* For temporary buffers for forming a name */
1944#define VCPU_THREAD_NAME_SIZE 16
1945
e5ab30a2 1946static void qemu_tcg_init_vcpu(CPUState *cpu)
296af7c9 1947{
4900116e 1948 char thread_name[VCPU_THREAD_NAME_SIZE];
37257942
AB
1949 static QemuCond *single_tcg_halt_cond;
1950 static QemuThread *single_tcg_cpu_thread;
e8feb96f
EC
1951 static int tcg_region_inited;
1952
f28d0dfd 1953 assert(tcg_enabled());
e8feb96f
EC
1954 /*
1955 * Initialize TCG regions--once. Now is a good time, because:
1956 * (1) TCG's init context, prologue and target globals have been set up.
1957 * (2) qemu_tcg_mttcg_enabled() works now (TCG init code runs before the
1958 * -accel flag is processed, so the check doesn't work then).
1959 */
1960 if (!tcg_region_inited) {
1961 tcg_region_inited = 1;
1962 tcg_region_init();
1963 }
4900116e 1964
37257942 1965 if (qemu_tcg_mttcg_enabled() || !single_tcg_cpu_thread) {
814e612e 1966 cpu->thread = g_malloc0(sizeof(QemuThread));
f5c121b8
AF
1967 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1968 qemu_cond_init(cpu->halt_cond);
37257942
AB
1969
1970 if (qemu_tcg_mttcg_enabled()) {
1971 /* create a thread per vCPU with TCG (MTTCG) */
1972 parallel_cpus = true;
1973 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/TCG",
4900116e 1974 cpu->cpu_index);
37257942
AB
1975
1976 qemu_thread_create(cpu->thread, thread_name, qemu_tcg_cpu_thread_fn,
1977 cpu, QEMU_THREAD_JOINABLE);
1978
1979 } else {
1980 /* share a single thread for all cpus with TCG */
1981 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "ALL CPUs/TCG");
1982 qemu_thread_create(cpu->thread, thread_name,
1983 qemu_tcg_rr_cpu_thread_fn,
1984 cpu, QEMU_THREAD_JOINABLE);
1985
1986 single_tcg_halt_cond = cpu->halt_cond;
1987 single_tcg_cpu_thread = cpu->thread;
1988 }
1ecf47bf 1989#ifdef _WIN32
814e612e 1990 cpu->hThread = qemu_thread_get_handle(cpu->thread);
1ecf47bf 1991#endif
296af7c9 1992 } else {
37257942
AB
1993 /* For non-MTTCG cases we share the thread */
1994 cpu->thread = single_tcg_cpu_thread;
1995 cpu->halt_cond = single_tcg_halt_cond;
a342173a
DH
1996 cpu->thread_id = first_cpu->thread_id;
1997 cpu->can_do_io = 1;
1998 cpu->created = true;
296af7c9
BS
1999 }
2000}
2001
b0cb0a66
VP
2002static void qemu_hax_start_vcpu(CPUState *cpu)
2003{
2004 char thread_name[VCPU_THREAD_NAME_SIZE];
2005
2006 cpu->thread = g_malloc0(sizeof(QemuThread));
2007 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
2008 qemu_cond_init(cpu->halt_cond);
2009
2010 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/HAX",
2011 cpu->cpu_index);
2012 qemu_thread_create(cpu->thread, thread_name, qemu_hax_cpu_thread_fn,
2013 cpu, QEMU_THREAD_JOINABLE);
2014#ifdef _WIN32
2015 cpu->hThread = qemu_thread_get_handle(cpu->thread);
2016#endif
b0cb0a66
VP
2017}
2018
48a106bd 2019static void qemu_kvm_start_vcpu(CPUState *cpu)
296af7c9 2020{
4900116e
DDAG
2021 char thread_name[VCPU_THREAD_NAME_SIZE];
2022
814e612e 2023 cpu->thread = g_malloc0(sizeof(QemuThread));
f5c121b8
AF
2024 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
2025 qemu_cond_init(cpu->halt_cond);
4900116e
DDAG
2026 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/KVM",
2027 cpu->cpu_index);
2028 qemu_thread_create(cpu->thread, thread_name, qemu_kvm_cpu_thread_fn,
2029 cpu, QEMU_THREAD_JOINABLE);
296af7c9
BS
2030}
2031
c97d6d2c
SAGDR
2032static void qemu_hvf_start_vcpu(CPUState *cpu)
2033{
2034 char thread_name[VCPU_THREAD_NAME_SIZE];
2035
2036 /* HVF currently does not support TCG, and only runs in
2037 * unrestricted-guest mode. */
2038 assert(hvf_enabled());
2039
2040 cpu->thread = g_malloc0(sizeof(QemuThread));
2041 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
2042 qemu_cond_init(cpu->halt_cond);
2043
2044 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/HVF",
2045 cpu->cpu_index);
2046 qemu_thread_create(cpu->thread, thread_name, qemu_hvf_cpu_thread_fn,
2047 cpu, QEMU_THREAD_JOINABLE);
c97d6d2c
SAGDR
2048}
2049
19306806
JTV
2050static void qemu_whpx_start_vcpu(CPUState *cpu)
2051{
2052 char thread_name[VCPU_THREAD_NAME_SIZE];
2053
2054 cpu->thread = g_malloc0(sizeof(QemuThread));
2055 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
2056 qemu_cond_init(cpu->halt_cond);
2057 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/WHPX",
2058 cpu->cpu_index);
2059 qemu_thread_create(cpu->thread, thread_name, qemu_whpx_cpu_thread_fn,
2060 cpu, QEMU_THREAD_JOINABLE);
2061#ifdef _WIN32
2062 cpu->hThread = qemu_thread_get_handle(cpu->thread);
2063#endif
19306806
JTV
2064}
2065
10a9021d 2066static void qemu_dummy_start_vcpu(CPUState *cpu)
c7f0f3b1 2067{
4900116e
DDAG
2068 char thread_name[VCPU_THREAD_NAME_SIZE];
2069
814e612e 2070 cpu->thread = g_malloc0(sizeof(QemuThread));
f5c121b8
AF
2071 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
2072 qemu_cond_init(cpu->halt_cond);
4900116e
DDAG
2073 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/DUMMY",
2074 cpu->cpu_index);
2075 qemu_thread_create(cpu->thread, thread_name, qemu_dummy_cpu_thread_fn, cpu,
c7f0f3b1 2076 QEMU_THREAD_JOINABLE);
c7f0f3b1
AL
2077}
2078
c643bed9 2079void qemu_init_vcpu(CPUState *cpu)
296af7c9 2080{
ce3960eb
AF
2081 cpu->nr_cores = smp_cores;
2082 cpu->nr_threads = smp_threads;
f324e766 2083 cpu->stopped = true;
9c09a251 2084 cpu->random_seed = qemu_guest_random_seed_thread_part1();
56943e8c
PM
2085
2086 if (!cpu->as) {
2087 /* If the target cpu hasn't set up any address spaces itself,
2088 * give it the default one.
2089 */
12ebc9a7 2090 cpu->num_ases = 1;
80ceb07a 2091 cpu_address_space_init(cpu, 0, "cpu-memory", cpu->memory);
56943e8c
PM
2092 }
2093
0ab07c62 2094 if (kvm_enabled()) {
48a106bd 2095 qemu_kvm_start_vcpu(cpu);
b0cb0a66
VP
2096 } else if (hax_enabled()) {
2097 qemu_hax_start_vcpu(cpu);
c97d6d2c
SAGDR
2098 } else if (hvf_enabled()) {
2099 qemu_hvf_start_vcpu(cpu);
c7f0f3b1 2100 } else if (tcg_enabled()) {
e5ab30a2 2101 qemu_tcg_init_vcpu(cpu);
19306806
JTV
2102 } else if (whpx_enabled()) {
2103 qemu_whpx_start_vcpu(cpu);
c7f0f3b1 2104 } else {
10a9021d 2105 qemu_dummy_start_vcpu(cpu);
0ab07c62 2106 }
81e96311
DH
2107
2108 while (!cpu->created) {
2109 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
2110 }
296af7c9
BS
2111}
2112
b4a3d965 2113void cpu_stop_current(void)
296af7c9 2114{
4917cf44 2115 if (current_cpu) {
0ec7e677
PM
2116 current_cpu->stop = true;
2117 cpu_exit(current_cpu);
b4a3d965 2118 }
296af7c9
BS
2119}
2120
56983463 2121int vm_stop(RunState state)
296af7c9 2122{
aa723c23 2123 if (qemu_in_vcpu_thread()) {
74892d24 2124 qemu_system_vmstop_request_prepare();
1dfb4dd9 2125 qemu_system_vmstop_request(state);
296af7c9
BS
2126 /*
2127 * FIXME: should not return to device code in case
2128 * vm_stop() has been requested.
2129 */
b4a3d965 2130 cpu_stop_current();
56983463 2131 return 0;
296af7c9 2132 }
56983463 2133
4486e89c 2134 return do_vm_stop(state, true);
296af7c9
BS
2135}
2136
2d76e823
CI
2137/**
2138 * Prepare for (re)starting the VM.
2139 * Returns -1 if the vCPUs are not to be restarted (e.g. if they are already
2140 * running or in case of an error condition), 0 otherwise.
2141 */
2142int vm_prepare_start(void)
2143{
2144 RunState requested;
2d76e823
CI
2145
2146 qemu_vmstop_requested(&requested);
2147 if (runstate_is_running() && requested == RUN_STATE__MAX) {
2148 return -1;
2149 }
2150
2151 /* Ensure that a STOP/RESUME pair of events is emitted if a
2152 * vmstop request was pending. The BLOCK_IO_ERROR event, for
2153 * example, according to documentation is always followed by
2154 * the STOP event.
2155 */
2156 if (runstate_is_running()) {
3ab72385
PX
2157 qapi_event_send_stop();
2158 qapi_event_send_resume();
f056158d 2159 return -1;
2d76e823
CI
2160 }
2161
2162 /* We are sending this now, but the CPUs will be resumed shortly later */
3ab72385 2163 qapi_event_send_resume();
f056158d
MA
2164
2165 replay_enable_events();
2166 cpu_enable_ticks();
2167 runstate_set(RUN_STATE_RUNNING);
2168 vm_state_notify(1, RUN_STATE_RUNNING);
2169 return 0;
2d76e823
CI
2170}
2171
2172void vm_start(void)
2173{
2174 if (!vm_prepare_start()) {
2175 resume_all_vcpus();
2176 }
2177}
2178
8a9236f1
LC
2179/* does a state transition even if the VM is already stopped,
2180 current state is forgotten forever */
56983463 2181int vm_stop_force_state(RunState state)
8a9236f1
LC
2182{
2183 if (runstate_is_running()) {
56983463 2184 return vm_stop(state);
8a9236f1
LC
2185 } else {
2186 runstate_set(state);
b2780d32
WC
2187
2188 bdrv_drain_all();
594a45ce
KW
2189 /* Make sure to return an error if the flush in a previous vm_stop()
2190 * failed. */
22af08ea 2191 return bdrv_flush_all();
8a9236f1
LC
2192 }
2193}
2194
0442428a 2195void list_cpus(const char *optarg)
262353cb
BS
2196{
2197 /* XXX: implement xxx_cpu_list for targets that still miss it */
e916cbf8 2198#if defined(cpu_list)
0442428a 2199 cpu_list();
262353cb
BS
2200#endif
2201}
de0b36b6
LC
2202
2203CpuInfoList *qmp_query_cpus(Error **errp)
2204{
afed5a5a
IM
2205 MachineState *ms = MACHINE(qdev_get_machine());
2206 MachineClass *mc = MACHINE_GET_CLASS(ms);
de0b36b6 2207 CpuInfoList *head = NULL, *cur_item = NULL;
182735ef 2208 CPUState *cpu;
de0b36b6 2209
bdc44640 2210 CPU_FOREACH(cpu) {
de0b36b6 2211 CpuInfoList *info;
182735ef
AF
2212#if defined(TARGET_I386)
2213 X86CPU *x86_cpu = X86_CPU(cpu);
2214 CPUX86State *env = &x86_cpu->env;
2215#elif defined(TARGET_PPC)
2216 PowerPCCPU *ppc_cpu = POWERPC_CPU(cpu);
2217 CPUPPCState *env = &ppc_cpu->env;
2218#elif defined(TARGET_SPARC)
2219 SPARCCPU *sparc_cpu = SPARC_CPU(cpu);
2220 CPUSPARCState *env = &sparc_cpu->env;
25fa194b
MC
2221#elif defined(TARGET_RISCV)
2222 RISCVCPU *riscv_cpu = RISCV_CPU(cpu);
2223 CPURISCVState *env = &riscv_cpu->env;
182735ef
AF
2224#elif defined(TARGET_MIPS)
2225 MIPSCPU *mips_cpu = MIPS_CPU(cpu);
2226 CPUMIPSState *env = &mips_cpu->env;
48e06fe0
BK
2227#elif defined(TARGET_TRICORE)
2228 TriCoreCPU *tricore_cpu = TRICORE_CPU(cpu);
2229 CPUTriCoreState *env = &tricore_cpu->env;
9d0306df
VM
2230#elif defined(TARGET_S390X)
2231 S390CPU *s390_cpu = S390_CPU(cpu);
2232 CPUS390XState *env = &s390_cpu->env;
182735ef 2233#endif
de0b36b6 2234
cb446eca 2235 cpu_synchronize_state(cpu);
de0b36b6
LC
2236
2237 info = g_malloc0(sizeof(*info));
2238 info->value = g_malloc0(sizeof(*info->value));
55e5c285 2239 info->value->CPU = cpu->cpu_index;
182735ef 2240 info->value->current = (cpu == first_cpu);
259186a7 2241 info->value->halted = cpu->halted;
58f88d4b 2242 info->value->qom_path = object_get_canonical_path(OBJECT(cpu));
9f09e18a 2243 info->value->thread_id = cpu->thread_id;
de0b36b6 2244#if defined(TARGET_I386)
86f4b687 2245 info->value->arch = CPU_INFO_ARCH_X86;
544a3731 2246 info->value->u.x86.pc = env->eip + env->segs[R_CS].base;
de0b36b6 2247#elif defined(TARGET_PPC)
86f4b687 2248 info->value->arch = CPU_INFO_ARCH_PPC;
544a3731 2249 info->value->u.ppc.nip = env->nip;
de0b36b6 2250#elif defined(TARGET_SPARC)
86f4b687 2251 info->value->arch = CPU_INFO_ARCH_SPARC;
544a3731
EB
2252 info->value->u.q_sparc.pc = env->pc;
2253 info->value->u.q_sparc.npc = env->npc;
de0b36b6 2254#elif defined(TARGET_MIPS)
86f4b687 2255 info->value->arch = CPU_INFO_ARCH_MIPS;
544a3731 2256 info->value->u.q_mips.PC = env->active_tc.PC;
48e06fe0 2257#elif defined(TARGET_TRICORE)
86f4b687 2258 info->value->arch = CPU_INFO_ARCH_TRICORE;
544a3731 2259 info->value->u.tricore.PC = env->PC;
9d0306df
VM
2260#elif defined(TARGET_S390X)
2261 info->value->arch = CPU_INFO_ARCH_S390;
2262 info->value->u.s390.cpu_state = env->cpu_state;
25fa194b
MC
2263#elif defined(TARGET_RISCV)
2264 info->value->arch = CPU_INFO_ARCH_RISCV;
2265 info->value->u.riscv.pc = env->pc;
86f4b687
EB
2266#else
2267 info->value->arch = CPU_INFO_ARCH_OTHER;
de0b36b6 2268#endif
afed5a5a
IM
2269 info->value->has_props = !!mc->cpu_index_to_instance_props;
2270 if (info->value->has_props) {
2271 CpuInstanceProperties *props;
2272 props = g_malloc0(sizeof(*props));
2273 *props = mc->cpu_index_to_instance_props(ms, cpu->cpu_index);
2274 info->value->props = props;
2275 }
de0b36b6
LC
2276
2277 /* XXX: waiting for the qapi to support GSList */
2278 if (!cur_item) {
2279 head = cur_item = info;
2280 } else {
2281 cur_item->next = info;
2282 cur_item = info;
2283 }
2284 }
2285
2286 return head;
2287}
0cfd6a9a 2288
daa9d2bc
LE
2289static CpuInfoArch sysemu_target_to_cpuinfo_arch(SysEmuTarget target)
2290{
2291 /*
2292 * The @SysEmuTarget -> @CpuInfoArch mapping below is based on the
2293 * TARGET_ARCH -> TARGET_BASE_ARCH mapping in the "configure" script.
2294 */
2295 switch (target) {
2296 case SYS_EMU_TARGET_I386:
2297 case SYS_EMU_TARGET_X86_64:
2298 return CPU_INFO_ARCH_X86;
2299
2300 case SYS_EMU_TARGET_PPC:
daa9d2bc
LE
2301 case SYS_EMU_TARGET_PPC64:
2302 return CPU_INFO_ARCH_PPC;
2303
2304 case SYS_EMU_TARGET_SPARC:
2305 case SYS_EMU_TARGET_SPARC64:
2306 return CPU_INFO_ARCH_SPARC;
2307
2308 case SYS_EMU_TARGET_MIPS:
2309 case SYS_EMU_TARGET_MIPSEL:
2310 case SYS_EMU_TARGET_MIPS64:
2311 case SYS_EMU_TARGET_MIPS64EL:
2312 return CPU_INFO_ARCH_MIPS;
2313
2314 case SYS_EMU_TARGET_TRICORE:
2315 return CPU_INFO_ARCH_TRICORE;
2316
2317 case SYS_EMU_TARGET_S390X:
2318 return CPU_INFO_ARCH_S390;
2319
2320 case SYS_EMU_TARGET_RISCV32:
2321 case SYS_EMU_TARGET_RISCV64:
2322 return CPU_INFO_ARCH_RISCV;
2323
2324 default:
2325 return CPU_INFO_ARCH_OTHER;
2326 }
2327}
2328
2329static void cpustate_to_cpuinfo_s390(CpuInfoS390 *info, const CPUState *cpu)
2330{
2331#ifdef TARGET_S390X
2332 S390CPU *s390_cpu = S390_CPU(cpu);
2333 CPUS390XState *env = &s390_cpu->env;
2334
2335 info->cpu_state = env->cpu_state;
2336#else
2337 abort();
2338#endif
2339}
2340
ce74ee3d
LC
2341/*
2342 * fast means: we NEVER interrupt vCPU threads to retrieve
2343 * information from KVM.
2344 */
2345CpuInfoFastList *qmp_query_cpus_fast(Error **errp)
2346{
2347 MachineState *ms = MACHINE(qdev_get_machine());
2348 MachineClass *mc = MACHINE_GET_CLASS(ms);
2349 CpuInfoFastList *head = NULL, *cur_item = NULL;
daa9d2bc
LE
2350 SysEmuTarget target = qapi_enum_parse(&SysEmuTarget_lookup, TARGET_NAME,
2351 -1, &error_abort);
ce74ee3d
LC
2352 CPUState *cpu;
2353
2354 CPU_FOREACH(cpu) {
2355 CpuInfoFastList *info = g_malloc0(sizeof(*info));
2356 info->value = g_malloc0(sizeof(*info->value));
2357
2358 info->value->cpu_index = cpu->cpu_index;
2359 info->value->qom_path = object_get_canonical_path(OBJECT(cpu));
2360 info->value->thread_id = cpu->thread_id;
2361
2362 info->value->has_props = !!mc->cpu_index_to_instance_props;
2363 if (info->value->has_props) {
2364 CpuInstanceProperties *props;
2365 props = g_malloc0(sizeof(*props));
2366 *props = mc->cpu_index_to_instance_props(ms, cpu->cpu_index);
2367 info->value->props = props;
2368 }
2369
daa9d2bc
LE
2370 info->value->arch = sysemu_target_to_cpuinfo_arch(target);
2371 info->value->target = target;
2372 if (target == SYS_EMU_TARGET_S390X) {
2373 cpustate_to_cpuinfo_s390(&info->value->u.s390x, cpu);
daa9d2bc
LE
2374 }
2375
ce74ee3d
LC
2376 if (!cur_item) {
2377 head = cur_item = info;
2378 } else {
2379 cur_item->next = info;
2380 cur_item = info;
2381 }
2382 }
2383
2384 return head;
2385}
2386
0cfd6a9a
LC
2387void qmp_memsave(int64_t addr, int64_t size, const char *filename,
2388 bool has_cpu, int64_t cpu_index, Error **errp)
2389{
2390 FILE *f;
2391 uint32_t l;
55e5c285 2392 CPUState *cpu;
0cfd6a9a 2393 uint8_t buf[1024];
0dc9daf0 2394 int64_t orig_addr = addr, orig_size = size;
0cfd6a9a
LC
2395
2396 if (!has_cpu) {
2397 cpu_index = 0;
2398 }
2399
151d1322
AF
2400 cpu = qemu_get_cpu(cpu_index);
2401 if (cpu == NULL) {
c6bd8c70
MA
2402 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cpu-index",
2403 "a CPU number");
0cfd6a9a
LC
2404 return;
2405 }
2406
2407 f = fopen(filename, "wb");
2408 if (!f) {
618da851 2409 error_setg_file_open(errp, errno, filename);
0cfd6a9a
LC
2410 return;
2411 }
2412
2413 while (size != 0) {
2414 l = sizeof(buf);
2415 if (l > size)
2416 l = size;
2f4d0f59 2417 if (cpu_memory_rw_debug(cpu, addr, buf, l, 0) != 0) {
0dc9daf0
BP
2418 error_setg(errp, "Invalid addr 0x%016" PRIx64 "/size %" PRId64
2419 " specified", orig_addr, orig_size);
2f4d0f59
AK
2420 goto exit;
2421 }
0cfd6a9a 2422 if (fwrite(buf, 1, l, f) != l) {
c6bd8c70 2423 error_setg(errp, QERR_IO_ERROR);
0cfd6a9a
LC
2424 goto exit;
2425 }
2426 addr += l;
2427 size -= l;
2428 }
2429
2430exit:
2431 fclose(f);
2432}
6d3962bf
LC
2433
2434void qmp_pmemsave(int64_t addr, int64_t size, const char *filename,
2435 Error **errp)
2436{
2437 FILE *f;
2438 uint32_t l;
2439 uint8_t buf[1024];
2440
2441 f = fopen(filename, "wb");
2442 if (!f) {
618da851 2443 error_setg_file_open(errp, errno, filename);
6d3962bf
LC
2444 return;
2445 }
2446
2447 while (size != 0) {
2448 l = sizeof(buf);
2449 if (l > size)
2450 l = size;
eb6282f2 2451 cpu_physical_memory_read(addr, buf, l);
6d3962bf 2452 if (fwrite(buf, 1, l, f) != l) {
c6bd8c70 2453 error_setg(errp, QERR_IO_ERROR);
6d3962bf
LC
2454 goto exit;
2455 }
2456 addr += l;
2457 size -= l;
2458 }
2459
2460exit:
2461 fclose(f);
2462}
ab49ab5c
LC
2463
2464void qmp_inject_nmi(Error **errp)
2465{
9cb805fd 2466 nmi_monitor_handle(monitor_get_cpu_index(), errp);
ab49ab5c 2467}
27498bef 2468
76c86615 2469void dump_drift_info(void)
27498bef
ST
2470{
2471 if (!use_icount) {
2472 return;
2473 }
2474
76c86615 2475 qemu_printf("Host - Guest clock %"PRIi64" ms\n",
27498bef
ST
2476 (cpu_get_clock() - cpu_get_icount())/SCALE_MS);
2477 if (icount_align_option) {
76c86615
MA
2478 qemu_printf("Max guest delay %"PRIi64" ms\n",
2479 -max_delay / SCALE_MS);
2480 qemu_printf("Max guest advance %"PRIi64" ms\n",
2481 max_advance / SCALE_MS);
27498bef 2482 } else {
76c86615
MA
2483 qemu_printf("Max guest delay NA\n");
2484 qemu_printf("Max guest advance NA\n");
27498bef
ST
2485 }
2486}