]> git.proxmox.com Git - mirror_qemu.git/blame - cpus.c
tcg: add kick timer for single-threaded vCPU emulation
[mirror_qemu.git] / cpus.c
CommitLineData
296af7c9
BS
1/*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25/* Needed early for CONFIG_BSD etc. */
7b31bbc2 26#include "qemu/osdep.h"
33c11879 27#include "qemu-common.h"
8d4e9146 28#include "qemu/config-file.h"
33c11879 29#include "cpu.h"
83c9089e 30#include "monitor/monitor.h"
a4e15de9 31#include "qapi/qmp/qerror.h"
d49b6836 32#include "qemu/error-report.h"
9c17d615 33#include "sysemu/sysemu.h"
da31d594 34#include "sysemu/block-backend.h"
022c62cb 35#include "exec/gdbstub.h"
9c17d615 36#include "sysemu/dma.h"
b3946626 37#include "sysemu/hw_accel.h"
9c17d615 38#include "sysemu/kvm.h"
b0cb0a66 39#include "sysemu/hax.h"
de0b36b6 40#include "qmp-commands.h"
63c91552 41#include "exec/exec-all.h"
296af7c9 42
1de7afc9 43#include "qemu/thread.h"
9c17d615
PB
44#include "sysemu/cpus.h"
45#include "sysemu/qtest.h"
1de7afc9
PB
46#include "qemu/main-loop.h"
47#include "qemu/bitmap.h"
cb365646 48#include "qemu/seqlock.h"
8d4e9146 49#include "tcg.h"
a4e15de9 50#include "qapi-event.h"
9cb805fd 51#include "hw/nmi.h"
8b427044 52#include "sysemu/replay.h"
0ff0fc19
JK
53
54#ifndef _WIN32
1de7afc9 55#include "qemu/compatfd.h"
0ff0fc19 56#endif
296af7c9 57
6d9cb73c
JK
58#ifdef CONFIG_LINUX
59
60#include <sys/prctl.h>
61
c0532a76
MT
62#ifndef PR_MCE_KILL
63#define PR_MCE_KILL 33
64#endif
65
6d9cb73c
JK
66#ifndef PR_MCE_KILL_SET
67#define PR_MCE_KILL_SET 1
68#endif
69
70#ifndef PR_MCE_KILL_EARLY
71#define PR_MCE_KILL_EARLY 1
72#endif
73
74#endif /* CONFIG_LINUX */
75
27498bef
ST
76int64_t max_delay;
77int64_t max_advance;
296af7c9 78
2adcc85d
JH
79/* vcpu throttling controls */
80static QEMUTimer *throttle_timer;
81static unsigned int throttle_percentage;
82
83#define CPU_THROTTLE_PCT_MIN 1
84#define CPU_THROTTLE_PCT_MAX 99
85#define CPU_THROTTLE_TIMESLICE_NS 10000000
86
321bc0b2
TC
87bool cpu_is_stopped(CPUState *cpu)
88{
89 return cpu->stopped || !runstate_is_running();
90}
91
a98ae1d8 92static bool cpu_thread_is_idle(CPUState *cpu)
ac873f1e 93{
c64ca814 94 if (cpu->stop || cpu->queued_work_first) {
ac873f1e
PM
95 return false;
96 }
321bc0b2 97 if (cpu_is_stopped(cpu)) {
ac873f1e
PM
98 return true;
99 }
8c2e1b00 100 if (!cpu->halted || cpu_has_work(cpu) ||
215e79c0 101 kvm_halt_in_kernel()) {
ac873f1e
PM
102 return false;
103 }
104 return true;
105}
106
107static bool all_cpu_threads_idle(void)
108{
182735ef 109 CPUState *cpu;
ac873f1e 110
bdc44640 111 CPU_FOREACH(cpu) {
182735ef 112 if (!cpu_thread_is_idle(cpu)) {
ac873f1e
PM
113 return false;
114 }
115 }
116 return true;
117}
118
946fb27c
PB
119/***********************************************************/
120/* guest cycle counter */
121
a3270e19
PB
122/* Protected by TimersState seqlock */
123
5045e9d9 124static bool icount_sleep = true;
71468395 125static int64_t vm_clock_warp_start = -1;
946fb27c
PB
126/* Conversion factor from emulated instructions to virtual clock ticks. */
127static int icount_time_shift;
128/* Arbitrarily pick 1MIPS as the minimum allowable speed. */
129#define MAX_ICOUNT_SHIFT 10
a3270e19 130
946fb27c
PB
131static QEMUTimer *icount_rt_timer;
132static QEMUTimer *icount_vm_timer;
133static QEMUTimer *icount_warp_timer;
946fb27c
PB
134
135typedef struct TimersState {
cb365646 136 /* Protected by BQL. */
946fb27c
PB
137 int64_t cpu_ticks_prev;
138 int64_t cpu_ticks_offset;
cb365646
LPF
139
140 /* cpu_clock_offset can be read out of BQL, so protect it with
141 * this lock.
142 */
143 QemuSeqLock vm_clock_seqlock;
946fb27c
PB
144 int64_t cpu_clock_offset;
145 int32_t cpu_ticks_enabled;
146 int64_t dummy;
c96778bb
FK
147
148 /* Compensate for varying guest execution speed. */
149 int64_t qemu_icount_bias;
150 /* Only written by TCG thread */
151 int64_t qemu_icount;
946fb27c
PB
152} TimersState;
153
d9cd4007 154static TimersState timers_state;
8d4e9146
FK
155bool mttcg_enabled;
156
157/*
158 * We default to false if we know other options have been enabled
159 * which are currently incompatible with MTTCG. Otherwise when each
160 * guest (target) has been updated to support:
161 * - atomic instructions
162 * - memory ordering primitives (barriers)
163 * they can set the appropriate CONFIG flags in ${target}-softmmu.mak
164 *
165 * Once a guest architecture has been converted to the new primitives
166 * there are two remaining limitations to check.
167 *
168 * - The guest can't be oversized (e.g. 64 bit guest on 32 bit host)
169 * - The host must have a stronger memory order than the guest
170 *
171 * It may be possible in future to support strong guests on weak hosts
172 * but that will require tagging all load/stores in a guest with their
173 * implicit memory order requirements which would likely slow things
174 * down a lot.
175 */
176
177static bool check_tcg_memory_orders_compatible(void)
178{
179#if defined(TCG_GUEST_DEFAULT_MO) && defined(TCG_TARGET_DEFAULT_MO)
180 return (TCG_GUEST_DEFAULT_MO & ~TCG_TARGET_DEFAULT_MO) == 0;
181#else
182 return false;
183#endif
184}
185
186static bool default_mttcg_enabled(void)
187{
188 QemuOpts *icount_opts = qemu_find_opts_singleton("icount");
189 const char *rr = qemu_opt_get(icount_opts, "rr");
190
191 if (rr || TCG_OVERSIZED_GUEST) {
192 return false;
193 } else {
194#ifdef TARGET_SUPPORTS_MTTCG
195 return check_tcg_memory_orders_compatible();
196#else
197 return false;
198#endif
199 }
200}
201
202void qemu_tcg_configure(QemuOpts *opts, Error **errp)
203{
204 const char *t = qemu_opt_get(opts, "thread");
205 if (t) {
206 if (strcmp(t, "multi") == 0) {
207 if (TCG_OVERSIZED_GUEST) {
208 error_setg(errp, "No MTTCG when guest word size > hosts");
209 } else {
210 if (!check_tcg_memory_orders_compatible()) {
211 error_report("Guest expects a stronger memory ordering "
212 "than the host provides");
213 error_printf("This may cause strange/hard to debug errors");
214 }
215 mttcg_enabled = true;
216 }
217 } else if (strcmp(t, "single") == 0) {
218 mttcg_enabled = false;
219 } else {
220 error_setg(errp, "Invalid 'thread' setting %s", t);
221 }
222 } else {
223 mttcg_enabled = default_mttcg_enabled();
224 }
225}
946fb27c 226
2a62914b 227int64_t cpu_get_icount_raw(void)
946fb27c
PB
228{
229 int64_t icount;
4917cf44 230 CPUState *cpu = current_cpu;
946fb27c 231
c96778bb 232 icount = timers_state.qemu_icount;
4917cf44 233 if (cpu) {
414b15c9 234 if (!cpu->can_do_io) {
2a62914b
PD
235 fprintf(stderr, "Bad icount read\n");
236 exit(1);
946fb27c 237 }
28ecfd7a 238 icount -= (cpu->icount_decr.u16.low + cpu->icount_extra);
946fb27c 239 }
2a62914b
PD
240 return icount;
241}
242
243/* Return the virtual CPU time, based on the instruction counter. */
244static int64_t cpu_get_icount_locked(void)
245{
246 int64_t icount = cpu_get_icount_raw();
3f031313 247 return timers_state.qemu_icount_bias + cpu_icount_to_ns(icount);
946fb27c
PB
248}
249
17a15f1b
PB
250int64_t cpu_get_icount(void)
251{
252 int64_t icount;
253 unsigned start;
254
255 do {
256 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
257 icount = cpu_get_icount_locked();
258 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
259
260 return icount;
261}
262
3f031313
FK
263int64_t cpu_icount_to_ns(int64_t icount)
264{
265 return icount << icount_time_shift;
266}
267
d90f3cca
C
268/* return the time elapsed in VM between vm_start and vm_stop. Unless
269 * icount is active, cpu_get_ticks() uses units of the host CPU cycle
270 * counter.
271 *
272 * Caller must hold the BQL
273 */
946fb27c
PB
274int64_t cpu_get_ticks(void)
275{
5f3e3101
PB
276 int64_t ticks;
277
946fb27c
PB
278 if (use_icount) {
279 return cpu_get_icount();
280 }
5f3e3101
PB
281
282 ticks = timers_state.cpu_ticks_offset;
283 if (timers_state.cpu_ticks_enabled) {
4a7428c5 284 ticks += cpu_get_host_ticks();
5f3e3101
PB
285 }
286
287 if (timers_state.cpu_ticks_prev > ticks) {
288 /* Note: non increasing ticks may happen if the host uses
289 software suspend */
290 timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
291 ticks = timers_state.cpu_ticks_prev;
946fb27c 292 }
5f3e3101
PB
293
294 timers_state.cpu_ticks_prev = ticks;
295 return ticks;
946fb27c
PB
296}
297
cb365646 298static int64_t cpu_get_clock_locked(void)
946fb27c 299{
1d45cea5 300 int64_t time;
cb365646 301
1d45cea5 302 time = timers_state.cpu_clock_offset;
5f3e3101 303 if (timers_state.cpu_ticks_enabled) {
1d45cea5 304 time += get_clock();
946fb27c 305 }
cb365646 306
1d45cea5 307 return time;
cb365646
LPF
308}
309
d90f3cca 310/* Return the monotonic time elapsed in VM, i.e.,
8212ff86
PM
311 * the time between vm_start and vm_stop
312 */
cb365646
LPF
313int64_t cpu_get_clock(void)
314{
315 int64_t ti;
316 unsigned start;
317
318 do {
319 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
320 ti = cpu_get_clock_locked();
321 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
322
323 return ti;
946fb27c
PB
324}
325
cb365646 326/* enable cpu_get_ticks()
3224e878 327 * Caller must hold BQL which serves as mutex for vm_clock_seqlock.
cb365646 328 */
946fb27c
PB
329void cpu_enable_ticks(void)
330{
cb365646 331 /* Here, the really thing protected by seqlock is cpu_clock_offset. */
03719e44 332 seqlock_write_begin(&timers_state.vm_clock_seqlock);
946fb27c 333 if (!timers_state.cpu_ticks_enabled) {
4a7428c5 334 timers_state.cpu_ticks_offset -= cpu_get_host_ticks();
946fb27c
PB
335 timers_state.cpu_clock_offset -= get_clock();
336 timers_state.cpu_ticks_enabled = 1;
337 }
03719e44 338 seqlock_write_end(&timers_state.vm_clock_seqlock);
946fb27c
PB
339}
340
341/* disable cpu_get_ticks() : the clock is stopped. You must not call
cb365646 342 * cpu_get_ticks() after that.
3224e878 343 * Caller must hold BQL which serves as mutex for vm_clock_seqlock.
cb365646 344 */
946fb27c
PB
345void cpu_disable_ticks(void)
346{
cb365646 347 /* Here, the really thing protected by seqlock is cpu_clock_offset. */
03719e44 348 seqlock_write_begin(&timers_state.vm_clock_seqlock);
946fb27c 349 if (timers_state.cpu_ticks_enabled) {
4a7428c5 350 timers_state.cpu_ticks_offset += cpu_get_host_ticks();
cb365646 351 timers_state.cpu_clock_offset = cpu_get_clock_locked();
946fb27c
PB
352 timers_state.cpu_ticks_enabled = 0;
353 }
03719e44 354 seqlock_write_end(&timers_state.vm_clock_seqlock);
946fb27c
PB
355}
356
357/* Correlation between real and virtual time is always going to be
358 fairly approximate, so ignore small variation.
359 When the guest is idle real and virtual time will be aligned in
360 the IO wait loop. */
73bcb24d 361#define ICOUNT_WOBBLE (NANOSECONDS_PER_SECOND / 10)
946fb27c
PB
362
363static void icount_adjust(void)
364{
365 int64_t cur_time;
366 int64_t cur_icount;
367 int64_t delta;
a3270e19
PB
368
369 /* Protected by TimersState mutex. */
946fb27c 370 static int64_t last_delta;
468cc7cf 371
946fb27c
PB
372 /* If the VM is not running, then do nothing. */
373 if (!runstate_is_running()) {
374 return;
375 }
468cc7cf 376
03719e44 377 seqlock_write_begin(&timers_state.vm_clock_seqlock);
17a15f1b
PB
378 cur_time = cpu_get_clock_locked();
379 cur_icount = cpu_get_icount_locked();
468cc7cf 380
946fb27c
PB
381 delta = cur_icount - cur_time;
382 /* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */
383 if (delta > 0
384 && last_delta + ICOUNT_WOBBLE < delta * 2
385 && icount_time_shift > 0) {
386 /* The guest is getting too far ahead. Slow time down. */
387 icount_time_shift--;
388 }
389 if (delta < 0
390 && last_delta - ICOUNT_WOBBLE > delta * 2
391 && icount_time_shift < MAX_ICOUNT_SHIFT) {
392 /* The guest is getting too far behind. Speed time up. */
393 icount_time_shift++;
394 }
395 last_delta = delta;
c96778bb
FK
396 timers_state.qemu_icount_bias = cur_icount
397 - (timers_state.qemu_icount << icount_time_shift);
03719e44 398 seqlock_write_end(&timers_state.vm_clock_seqlock);
946fb27c
PB
399}
400
401static void icount_adjust_rt(void *opaque)
402{
40daca54 403 timer_mod(icount_rt_timer,
1979b908 404 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
946fb27c
PB
405 icount_adjust();
406}
407
408static void icount_adjust_vm(void *opaque)
409{
40daca54
AB
410 timer_mod(icount_vm_timer,
411 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
73bcb24d 412 NANOSECONDS_PER_SECOND / 10);
946fb27c
PB
413 icount_adjust();
414}
415
416static int64_t qemu_icount_round(int64_t count)
417{
418 return (count + (1 << icount_time_shift) - 1) >> icount_time_shift;
419}
420
efab87cf 421static void icount_warp_rt(void)
946fb27c 422{
ccffff48
AB
423 unsigned seq;
424 int64_t warp_start;
425
17a15f1b
PB
426 /* The icount_warp_timer is rescheduled soon after vm_clock_warp_start
427 * changes from -1 to another value, so the race here is okay.
428 */
ccffff48
AB
429 do {
430 seq = seqlock_read_begin(&timers_state.vm_clock_seqlock);
431 warp_start = vm_clock_warp_start;
432 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, seq));
433
434 if (warp_start == -1) {
946fb27c
PB
435 return;
436 }
437
03719e44 438 seqlock_write_begin(&timers_state.vm_clock_seqlock);
946fb27c 439 if (runstate_is_running()) {
8eda206e
PD
440 int64_t clock = REPLAY_CLOCK(REPLAY_CLOCK_VIRTUAL_RT,
441 cpu_get_clock_locked());
8ed961d9
PB
442 int64_t warp_delta;
443
444 warp_delta = clock - vm_clock_warp_start;
445 if (use_icount == 2) {
946fb27c 446 /*
40daca54 447 * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too
946fb27c
PB
448 * far ahead of real time.
449 */
17a15f1b 450 int64_t cur_icount = cpu_get_icount_locked();
bf2a7ddb 451 int64_t delta = clock - cur_icount;
8ed961d9 452 warp_delta = MIN(warp_delta, delta);
946fb27c 453 }
c96778bb 454 timers_state.qemu_icount_bias += warp_delta;
946fb27c
PB
455 }
456 vm_clock_warp_start = -1;
03719e44 457 seqlock_write_end(&timers_state.vm_clock_seqlock);
8ed961d9
PB
458
459 if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) {
460 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
461 }
946fb27c
PB
462}
463
e76d1798 464static void icount_timer_cb(void *opaque)
efab87cf 465{
e76d1798
PD
466 /* No need for a checkpoint because the timer already synchronizes
467 * with CHECKPOINT_CLOCK_VIRTUAL_RT.
468 */
469 icount_warp_rt();
efab87cf
PD
470}
471
8156be56
PB
472void qtest_clock_warp(int64_t dest)
473{
40daca54 474 int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
efef88b3 475 AioContext *aio_context;
8156be56 476 assert(qtest_enabled());
efef88b3 477 aio_context = qemu_get_aio_context();
8156be56 478 while (clock < dest) {
40daca54 479 int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
c9299e2f 480 int64_t warp = qemu_soonest_timeout(dest - clock, deadline);
efef88b3 481
03719e44 482 seqlock_write_begin(&timers_state.vm_clock_seqlock);
c96778bb 483 timers_state.qemu_icount_bias += warp;
03719e44 484 seqlock_write_end(&timers_state.vm_clock_seqlock);
17a15f1b 485
40daca54 486 qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
efef88b3 487 timerlist_run_timers(aio_context->tlg.tl[QEMU_CLOCK_VIRTUAL]);
40daca54 488 clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
8156be56 489 }
40daca54 490 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
8156be56
PB
491}
492
e76d1798 493void qemu_start_warp_timer(void)
946fb27c 494{
ce78d18c 495 int64_t clock;
946fb27c
PB
496 int64_t deadline;
497
e76d1798 498 if (!use_icount) {
946fb27c
PB
499 return;
500 }
501
8bd7f71d
PD
502 /* Nothing to do if the VM is stopped: QEMU_CLOCK_VIRTUAL timers
503 * do not fire, so computing the deadline does not make sense.
504 */
505 if (!runstate_is_running()) {
506 return;
507 }
508
509 /* warp clock deterministically in record/replay mode */
e76d1798 510 if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_START)) {
8bd7f71d
PD
511 return;
512 }
513
ce78d18c 514 if (!all_cpu_threads_idle()) {
946fb27c
PB
515 return;
516 }
517
8156be56
PB
518 if (qtest_enabled()) {
519 /* When testing, qtest commands advance icount. */
e76d1798 520 return;
8156be56
PB
521 }
522
ac70aafc 523 /* We want to use the earliest deadline from ALL vm_clocks */
bf2a7ddb 524 clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
40daca54 525 deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
ce78d18c 526 if (deadline < 0) {
d7a0f71d
VC
527 static bool notified;
528 if (!icount_sleep && !notified) {
529 error_report("WARNING: icount sleep disabled and no active timers");
530 notified = true;
531 }
ce78d18c 532 return;
ac70aafc
AB
533 }
534
946fb27c
PB
535 if (deadline > 0) {
536 /*
40daca54 537 * Ensure QEMU_CLOCK_VIRTUAL proceeds even when the virtual CPU goes to
946fb27c
PB
538 * sleep. Otherwise, the CPU might be waiting for a future timer
539 * interrupt to wake it up, but the interrupt never comes because
540 * the vCPU isn't running any insns and thus doesn't advance the
40daca54 541 * QEMU_CLOCK_VIRTUAL.
946fb27c 542 */
5045e9d9
VC
543 if (!icount_sleep) {
544 /*
545 * We never let VCPUs sleep in no sleep icount mode.
546 * If there is a pending QEMU_CLOCK_VIRTUAL timer we just advance
547 * to the next QEMU_CLOCK_VIRTUAL event and notify it.
548 * It is useful when we want a deterministic execution time,
549 * isolated from host latencies.
550 */
03719e44 551 seqlock_write_begin(&timers_state.vm_clock_seqlock);
5045e9d9 552 timers_state.qemu_icount_bias += deadline;
03719e44 553 seqlock_write_end(&timers_state.vm_clock_seqlock);
5045e9d9
VC
554 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
555 } else {
556 /*
557 * We do stop VCPUs and only advance QEMU_CLOCK_VIRTUAL after some
558 * "real" time, (related to the time left until the next event) has
559 * passed. The QEMU_CLOCK_VIRTUAL_RT clock will do this.
560 * This avoids that the warps are visible externally; for example,
561 * you will not be sending network packets continuously instead of
562 * every 100ms.
563 */
03719e44 564 seqlock_write_begin(&timers_state.vm_clock_seqlock);
5045e9d9
VC
565 if (vm_clock_warp_start == -1 || vm_clock_warp_start > clock) {
566 vm_clock_warp_start = clock;
567 }
03719e44 568 seqlock_write_end(&timers_state.vm_clock_seqlock);
5045e9d9 569 timer_mod_anticipate(icount_warp_timer, clock + deadline);
ce78d18c 570 }
ac70aafc 571 } else if (deadline == 0) {
40daca54 572 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
946fb27c
PB
573 }
574}
575
e76d1798
PD
576static void qemu_account_warp_timer(void)
577{
578 if (!use_icount || !icount_sleep) {
579 return;
580 }
581
582 /* Nothing to do if the VM is stopped: QEMU_CLOCK_VIRTUAL timers
583 * do not fire, so computing the deadline does not make sense.
584 */
585 if (!runstate_is_running()) {
586 return;
587 }
588
589 /* warp clock deterministically in record/replay mode */
590 if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_ACCOUNT)) {
591 return;
592 }
593
594 timer_del(icount_warp_timer);
595 icount_warp_rt();
596}
597
d09eae37
FK
598static bool icount_state_needed(void *opaque)
599{
600 return use_icount;
601}
602
603/*
604 * This is a subsection for icount migration.
605 */
606static const VMStateDescription icount_vmstate_timers = {
607 .name = "timer/icount",
608 .version_id = 1,
609 .minimum_version_id = 1,
5cd8cada 610 .needed = icount_state_needed,
d09eae37
FK
611 .fields = (VMStateField[]) {
612 VMSTATE_INT64(qemu_icount_bias, TimersState),
613 VMSTATE_INT64(qemu_icount, TimersState),
614 VMSTATE_END_OF_LIST()
615 }
616};
617
946fb27c
PB
618static const VMStateDescription vmstate_timers = {
619 .name = "timer",
620 .version_id = 2,
621 .minimum_version_id = 1,
35d08458 622 .fields = (VMStateField[]) {
946fb27c
PB
623 VMSTATE_INT64(cpu_ticks_offset, TimersState),
624 VMSTATE_INT64(dummy, TimersState),
625 VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
626 VMSTATE_END_OF_LIST()
d09eae37 627 },
5cd8cada
JQ
628 .subsections = (const VMStateDescription*[]) {
629 &icount_vmstate_timers,
630 NULL
946fb27c
PB
631 }
632};
633
14e6fe12 634static void cpu_throttle_thread(CPUState *cpu, run_on_cpu_data opaque)
2adcc85d 635{
2adcc85d
JH
636 double pct;
637 double throttle_ratio;
638 long sleeptime_ns;
639
640 if (!cpu_throttle_get_percentage()) {
641 return;
642 }
643
644 pct = (double)cpu_throttle_get_percentage()/100;
645 throttle_ratio = pct / (1 - pct);
646 sleeptime_ns = (long)(throttle_ratio * CPU_THROTTLE_TIMESLICE_NS);
647
648 qemu_mutex_unlock_iothread();
649 atomic_set(&cpu->throttle_thread_scheduled, 0);
650 g_usleep(sleeptime_ns / 1000); /* Convert ns to us for usleep call */
651 qemu_mutex_lock_iothread();
652}
653
654static void cpu_throttle_timer_tick(void *opaque)
655{
656 CPUState *cpu;
657 double pct;
658
659 /* Stop the timer if needed */
660 if (!cpu_throttle_get_percentage()) {
661 return;
662 }
663 CPU_FOREACH(cpu) {
664 if (!atomic_xchg(&cpu->throttle_thread_scheduled, 1)) {
14e6fe12
PB
665 async_run_on_cpu(cpu, cpu_throttle_thread,
666 RUN_ON_CPU_NULL);
2adcc85d
JH
667 }
668 }
669
670 pct = (double)cpu_throttle_get_percentage()/100;
671 timer_mod(throttle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) +
672 CPU_THROTTLE_TIMESLICE_NS / (1-pct));
673}
674
675void cpu_throttle_set(int new_throttle_pct)
676{
677 /* Ensure throttle percentage is within valid range */
678 new_throttle_pct = MIN(new_throttle_pct, CPU_THROTTLE_PCT_MAX);
679 new_throttle_pct = MAX(new_throttle_pct, CPU_THROTTLE_PCT_MIN);
680
681 atomic_set(&throttle_percentage, new_throttle_pct);
682
683 timer_mod(throttle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) +
684 CPU_THROTTLE_TIMESLICE_NS);
685}
686
687void cpu_throttle_stop(void)
688{
689 atomic_set(&throttle_percentage, 0);
690}
691
692bool cpu_throttle_active(void)
693{
694 return (cpu_throttle_get_percentage() != 0);
695}
696
697int cpu_throttle_get_percentage(void)
698{
699 return atomic_read(&throttle_percentage);
700}
701
4603ea01
PD
702void cpu_ticks_init(void)
703{
ccdb3c1f 704 seqlock_init(&timers_state.vm_clock_seqlock);
4603ea01 705 vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
2adcc85d
JH
706 throttle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
707 cpu_throttle_timer_tick, NULL);
4603ea01
PD
708}
709
1ad9580b 710void configure_icount(QemuOpts *opts, Error **errp)
946fb27c 711{
1ad9580b 712 const char *option;
a8bfac37 713 char *rem_str = NULL;
1ad9580b 714
1ad9580b 715 option = qemu_opt_get(opts, "shift");
946fb27c 716 if (!option) {
a8bfac37
ST
717 if (qemu_opt_get(opts, "align") != NULL) {
718 error_setg(errp, "Please specify shift option when using align");
719 }
946fb27c
PB
720 return;
721 }
f1f4b57e
VC
722
723 icount_sleep = qemu_opt_get_bool(opts, "sleep", true);
5045e9d9
VC
724 if (icount_sleep) {
725 icount_warp_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
e76d1798 726 icount_timer_cb, NULL);
5045e9d9 727 }
f1f4b57e 728
a8bfac37 729 icount_align_option = qemu_opt_get_bool(opts, "align", false);
f1f4b57e
VC
730
731 if (icount_align_option && !icount_sleep) {
778d9f9b 732 error_setg(errp, "align=on and sleep=off are incompatible");
f1f4b57e 733 }
946fb27c 734 if (strcmp(option, "auto") != 0) {
a8bfac37
ST
735 errno = 0;
736 icount_time_shift = strtol(option, &rem_str, 0);
737 if (errno != 0 || *rem_str != '\0' || !strlen(option)) {
738 error_setg(errp, "icount: Invalid shift value");
739 }
946fb27c
PB
740 use_icount = 1;
741 return;
a8bfac37
ST
742 } else if (icount_align_option) {
743 error_setg(errp, "shift=auto and align=on are incompatible");
f1f4b57e 744 } else if (!icount_sleep) {
778d9f9b 745 error_setg(errp, "shift=auto and sleep=off are incompatible");
946fb27c
PB
746 }
747
748 use_icount = 2;
749
750 /* 125MIPS seems a reasonable initial guess at the guest speed.
751 It will be corrected fairly quickly anyway. */
752 icount_time_shift = 3;
753
754 /* Have both realtime and virtual time triggers for speed adjustment.
755 The realtime trigger catches emulated time passing too slowly,
756 the virtual time trigger catches emulated time passing too fast.
757 Realtime triggers occur even when idle, so use them less frequently
758 than VM triggers. */
bf2a7ddb
PD
759 icount_rt_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL_RT,
760 icount_adjust_rt, NULL);
40daca54 761 timer_mod(icount_rt_timer,
bf2a7ddb 762 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
40daca54
AB
763 icount_vm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
764 icount_adjust_vm, NULL);
765 timer_mod(icount_vm_timer,
766 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
73bcb24d 767 NANOSECONDS_PER_SECOND / 10);
946fb27c
PB
768}
769
6546706d
AB
770/***********************************************************/
771/* TCG vCPU kick timer
772 *
773 * The kick timer is responsible for moving single threaded vCPU
774 * emulation on to the next vCPU. If more than one vCPU is running a
775 * timer event with force a cpu->exit so the next vCPU can get
776 * scheduled.
777 *
778 * The timer is removed if all vCPUs are idle and restarted again once
779 * idleness is complete.
780 */
781
782static QEMUTimer *tcg_kick_vcpu_timer;
783
784static void qemu_cpu_kick_no_halt(void);
785
786#define TCG_KICK_PERIOD (NANOSECONDS_PER_SECOND / 10)
787
788static inline int64_t qemu_tcg_next_kick(void)
789{
790 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + TCG_KICK_PERIOD;
791}
792
793static void kick_tcg_thread(void *opaque)
794{
795 timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());
796 qemu_cpu_kick_no_halt();
797}
798
799static void start_tcg_kick_timer(void)
800{
801 if (!tcg_kick_vcpu_timer && CPU_NEXT(first_cpu)) {
802 tcg_kick_vcpu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
803 kick_tcg_thread, NULL);
804 timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());
805 }
806}
807
808static void stop_tcg_kick_timer(void)
809{
810 if (tcg_kick_vcpu_timer) {
811 timer_del(tcg_kick_vcpu_timer);
812 tcg_kick_vcpu_timer = NULL;
813 }
814}
815
816
296af7c9
BS
817/***********************************************************/
818void hw_error(const char *fmt, ...)
819{
820 va_list ap;
55e5c285 821 CPUState *cpu;
296af7c9
BS
822
823 va_start(ap, fmt);
824 fprintf(stderr, "qemu: hardware error: ");
825 vfprintf(stderr, fmt, ap);
826 fprintf(stderr, "\n");
bdc44640 827 CPU_FOREACH(cpu) {
55e5c285 828 fprintf(stderr, "CPU #%d:\n", cpu->cpu_index);
878096ee 829 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU);
296af7c9
BS
830 }
831 va_end(ap);
832 abort();
833}
834
835void cpu_synchronize_all_states(void)
836{
182735ef 837 CPUState *cpu;
296af7c9 838
bdc44640 839 CPU_FOREACH(cpu) {
182735ef 840 cpu_synchronize_state(cpu);
296af7c9
BS
841 }
842}
843
844void cpu_synchronize_all_post_reset(void)
845{
182735ef 846 CPUState *cpu;
296af7c9 847
bdc44640 848 CPU_FOREACH(cpu) {
182735ef 849 cpu_synchronize_post_reset(cpu);
296af7c9
BS
850 }
851}
852
853void cpu_synchronize_all_post_init(void)
854{
182735ef 855 CPUState *cpu;
296af7c9 856
bdc44640 857 CPU_FOREACH(cpu) {
182735ef 858 cpu_synchronize_post_init(cpu);
296af7c9
BS
859 }
860}
861
56983463 862static int do_vm_stop(RunState state)
296af7c9 863{
56983463
KW
864 int ret = 0;
865
1354869c 866 if (runstate_is_running()) {
296af7c9 867 cpu_disable_ticks();
296af7c9 868 pause_all_vcpus();
f5bbfba1 869 runstate_set(state);
1dfb4dd9 870 vm_state_notify(0, state);
a4e15de9 871 qapi_event_send_stop(&error_abort);
296af7c9 872 }
56983463 873
594a45ce 874 bdrv_drain_all();
6d0ceb80 875 replay_disable_events();
22af08ea 876 ret = bdrv_flush_all();
594a45ce 877
56983463 878 return ret;
296af7c9
BS
879}
880
a1fcaa73 881static bool cpu_can_run(CPUState *cpu)
296af7c9 882{
4fdeee7c 883 if (cpu->stop) {
a1fcaa73 884 return false;
0ab07c62 885 }
321bc0b2 886 if (cpu_is_stopped(cpu)) {
a1fcaa73 887 return false;
0ab07c62 888 }
a1fcaa73 889 return true;
296af7c9
BS
890}
891
91325046 892static void cpu_handle_guest_debug(CPUState *cpu)
83f338f7 893{
64f6b346 894 gdb_set_stop_cpu(cpu);
8cf71710 895 qemu_system_debug_request();
f324e766 896 cpu->stopped = true;
3c638d06
JK
897}
898
6d9cb73c
JK
899#ifdef CONFIG_LINUX
900static void sigbus_reraise(void)
901{
902 sigset_t set;
903 struct sigaction action;
904
905 memset(&action, 0, sizeof(action));
906 action.sa_handler = SIG_DFL;
907 if (!sigaction(SIGBUS, &action, NULL)) {
908 raise(SIGBUS);
909 sigemptyset(&set);
910 sigaddset(&set, SIGBUS);
a2d1761d 911 pthread_sigmask(SIG_UNBLOCK, &set, NULL);
6d9cb73c
JK
912 }
913 perror("Failed to re-raise SIGBUS!\n");
914 abort();
915}
916
917static void sigbus_handler(int n, struct qemu_signalfd_siginfo *siginfo,
918 void *ctx)
919{
920 if (kvm_on_sigbus(siginfo->ssi_code,
921 (void *)(intptr_t)siginfo->ssi_addr)) {
922 sigbus_reraise();
923 }
924}
925
926static void qemu_init_sigbus(void)
927{
928 struct sigaction action;
929
930 memset(&action, 0, sizeof(action));
931 action.sa_flags = SA_SIGINFO;
932 action.sa_sigaction = (void (*)(int, siginfo_t*, void*))sigbus_handler;
933 sigaction(SIGBUS, &action, NULL);
934
935 prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0);
936}
937
290adf38 938static void qemu_kvm_eat_signals(CPUState *cpu)
1ab3c6c0
JK
939{
940 struct timespec ts = { 0, 0 };
941 siginfo_t siginfo;
942 sigset_t waitset;
943 sigset_t chkset;
944 int r;
945
946 sigemptyset(&waitset);
947 sigaddset(&waitset, SIG_IPI);
948 sigaddset(&waitset, SIGBUS);
949
950 do {
951 r = sigtimedwait(&waitset, &siginfo, &ts);
952 if (r == -1 && !(errno == EAGAIN || errno == EINTR)) {
953 perror("sigtimedwait");
954 exit(1);
955 }
956
957 switch (r) {
958 case SIGBUS:
290adf38 959 if (kvm_on_sigbus_vcpu(cpu, siginfo.si_code, siginfo.si_addr)) {
1ab3c6c0
JK
960 sigbus_reraise();
961 }
962 break;
963 default:
964 break;
965 }
966
967 r = sigpending(&chkset);
968 if (r == -1) {
969 perror("sigpending");
970 exit(1);
971 }
972 } while (sigismember(&chkset, SIG_IPI) || sigismember(&chkset, SIGBUS));
1ab3c6c0
JK
973}
974
6d9cb73c
JK
975#else /* !CONFIG_LINUX */
976
977static void qemu_init_sigbus(void)
978{
979}
1ab3c6c0 980
290adf38 981static void qemu_kvm_eat_signals(CPUState *cpu)
1ab3c6c0
JK
982{
983}
6d9cb73c
JK
984#endif /* !CONFIG_LINUX */
985
296af7c9 986#ifndef _WIN32
55f8d6ac
JK
987static void dummy_signal(int sig)
988{
989}
55f8d6ac 990
13618e05 991static void qemu_kvm_init_cpu_signals(CPUState *cpu)
714bd040
PB
992{
993 int r;
994 sigset_t set;
995 struct sigaction sigact;
996
997 memset(&sigact, 0, sizeof(sigact));
998 sigact.sa_handler = dummy_signal;
999 sigaction(SIG_IPI, &sigact, NULL);
1000
714bd040
PB
1001 pthread_sigmask(SIG_BLOCK, NULL, &set);
1002 sigdelset(&set, SIG_IPI);
714bd040 1003 sigdelset(&set, SIGBUS);
491d6e80 1004 r = kvm_set_signal_mask(cpu, &set);
714bd040
PB
1005 if (r) {
1006 fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
1007 exit(1);
1008 }
1009}
1010
55f8d6ac 1011#else /* _WIN32 */
13618e05 1012static void qemu_kvm_init_cpu_signals(CPUState *cpu)
ff48eb5f 1013{
714bd040
PB
1014 abort();
1015}
714bd040 1016#endif /* _WIN32 */
ff48eb5f 1017
b2532d88 1018static QemuMutex qemu_global_mutex;
46daff13 1019static QemuCond qemu_io_proceeded_cond;
6b49809c 1020static unsigned iothread_requesting_mutex;
296af7c9
BS
1021
1022static QemuThread io_thread;
1023
296af7c9
BS
1024/* cpu creation */
1025static QemuCond qemu_cpu_cond;
1026/* system init */
296af7c9
BS
1027static QemuCond qemu_pause_cond;
1028
d3b12f5d 1029void qemu_init_cpu_loop(void)
296af7c9 1030{
6d9cb73c 1031 qemu_init_sigbus();
ed94592b 1032 qemu_cond_init(&qemu_cpu_cond);
ed94592b 1033 qemu_cond_init(&qemu_pause_cond);
46daff13 1034 qemu_cond_init(&qemu_io_proceeded_cond);
296af7c9 1035 qemu_mutex_init(&qemu_global_mutex);
296af7c9 1036
b7680cb6 1037 qemu_thread_get_self(&io_thread);
296af7c9
BS
1038}
1039
14e6fe12 1040void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data)
e82bcec2 1041{
d148d90e 1042 do_run_on_cpu(cpu, func, data, &qemu_global_mutex);
3c02270d
CV
1043}
1044
4c055ab5
GZ
1045static void qemu_kvm_destroy_vcpu(CPUState *cpu)
1046{
1047 if (kvm_destroy_vcpu(cpu) < 0) {
1048 error_report("kvm_destroy_vcpu failed");
1049 exit(EXIT_FAILURE);
1050 }
1051}
1052
1053static void qemu_tcg_destroy_vcpu(CPUState *cpu)
1054{
1055}
1056
509a0d78 1057static void qemu_wait_io_event_common(CPUState *cpu)
296af7c9 1058{
4fdeee7c
AF
1059 if (cpu->stop) {
1060 cpu->stop = false;
f324e766 1061 cpu->stopped = true;
96bce683 1062 qemu_cond_broadcast(&qemu_pause_cond);
296af7c9 1063 }
a5403c69 1064 process_queued_cpu_work(cpu);
216fc9a4 1065 cpu->thread_kicked = false;
296af7c9
BS
1066}
1067
d5f8d613 1068static void qemu_tcg_wait_io_event(CPUState *cpu)
296af7c9 1069{
16400322 1070 while (all_cpu_threads_idle()) {
6546706d 1071 stop_tcg_kick_timer();
d5f8d613 1072 qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
16400322 1073 }
296af7c9 1074
6546706d
AB
1075 start_tcg_kick_timer();
1076
46daff13
PB
1077 while (iothread_requesting_mutex) {
1078 qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex);
1079 }
6cabe1f3 1080
bdc44640 1081 CPU_FOREACH(cpu) {
182735ef 1082 qemu_wait_io_event_common(cpu);
6cabe1f3 1083 }
296af7c9
BS
1084}
1085
fd529e8f 1086static void qemu_kvm_wait_io_event(CPUState *cpu)
296af7c9 1087{
a98ae1d8 1088 while (cpu_thread_is_idle(cpu)) {
f5c121b8 1089 qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
16400322 1090 }
296af7c9 1091
290adf38 1092 qemu_kvm_eat_signals(cpu);
509a0d78 1093 qemu_wait_io_event_common(cpu);
296af7c9
BS
1094}
1095
7e97cd88 1096static void *qemu_kvm_cpu_thread_fn(void *arg)
296af7c9 1097{
48a106bd 1098 CPUState *cpu = arg;
84b4915d 1099 int r;
296af7c9 1100
ab28bd23
PB
1101 rcu_register_thread();
1102
2e7f7a3c 1103 qemu_mutex_lock_iothread();
814e612e 1104 qemu_thread_get_self(cpu->thread);
9f09e18a 1105 cpu->thread_id = qemu_get_thread_id();
626cf8f4 1106 cpu->can_do_io = 1;
4917cf44 1107 current_cpu = cpu;
296af7c9 1108
504134d2 1109 r = kvm_init_vcpu(cpu);
84b4915d
JK
1110 if (r < 0) {
1111 fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r));
1112 exit(1);
1113 }
296af7c9 1114
13618e05 1115 qemu_kvm_init_cpu_signals(cpu);
296af7c9
BS
1116
1117 /* signal CPU creation */
61a46217 1118 cpu->created = true;
296af7c9
BS
1119 qemu_cond_signal(&qemu_cpu_cond);
1120
4c055ab5 1121 do {
a1fcaa73 1122 if (cpu_can_run(cpu)) {
1458c363 1123 r = kvm_cpu_exec(cpu);
83f338f7 1124 if (r == EXCP_DEBUG) {
91325046 1125 cpu_handle_guest_debug(cpu);
83f338f7 1126 }
0ab07c62 1127 }
fd529e8f 1128 qemu_kvm_wait_io_event(cpu);
4c055ab5 1129 } while (!cpu->unplug || cpu_can_run(cpu));
296af7c9 1130
4c055ab5 1131 qemu_kvm_destroy_vcpu(cpu);
2c579042
BR
1132 cpu->created = false;
1133 qemu_cond_signal(&qemu_cpu_cond);
4c055ab5 1134 qemu_mutex_unlock_iothread();
296af7c9
BS
1135 return NULL;
1136}
1137
c7f0f3b1
AL
1138static void *qemu_dummy_cpu_thread_fn(void *arg)
1139{
1140#ifdef _WIN32
1141 fprintf(stderr, "qtest is not supported under Windows\n");
1142 exit(1);
1143#else
10a9021d 1144 CPUState *cpu = arg;
c7f0f3b1
AL
1145 sigset_t waitset;
1146 int r;
1147
ab28bd23
PB
1148 rcu_register_thread();
1149
c7f0f3b1 1150 qemu_mutex_lock_iothread();
814e612e 1151 qemu_thread_get_self(cpu->thread);
9f09e18a 1152 cpu->thread_id = qemu_get_thread_id();
626cf8f4 1153 cpu->can_do_io = 1;
c7f0f3b1
AL
1154
1155 sigemptyset(&waitset);
1156 sigaddset(&waitset, SIG_IPI);
1157
1158 /* signal CPU creation */
61a46217 1159 cpu->created = true;
c7f0f3b1
AL
1160 qemu_cond_signal(&qemu_cpu_cond);
1161
4917cf44 1162 current_cpu = cpu;
c7f0f3b1 1163 while (1) {
4917cf44 1164 current_cpu = NULL;
c7f0f3b1
AL
1165 qemu_mutex_unlock_iothread();
1166 do {
1167 int sig;
1168 r = sigwait(&waitset, &sig);
1169 } while (r == -1 && (errno == EAGAIN || errno == EINTR));
1170 if (r == -1) {
1171 perror("sigwait");
1172 exit(1);
1173 }
1174 qemu_mutex_lock_iothread();
4917cf44 1175 current_cpu = cpu;
509a0d78 1176 qemu_wait_io_event_common(cpu);
c7f0f3b1
AL
1177 }
1178
1179 return NULL;
1180#endif
1181}
1182
1be7fcb8
AB
1183static int64_t tcg_get_icount_limit(void)
1184{
1185 int64_t deadline;
1186
1187 if (replay_mode != REPLAY_MODE_PLAY) {
1188 deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
1189
1190 /* Maintain prior (possibly buggy) behaviour where if no deadline
1191 * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
1192 * INT32_MAX nanoseconds ahead, we still use INT32_MAX
1193 * nanoseconds.
1194 */
1195 if ((deadline < 0) || (deadline > INT32_MAX)) {
1196 deadline = INT32_MAX;
1197 }
1198
1199 return qemu_icount_round(deadline);
1200 } else {
1201 return replay_get_instructions();
1202 }
1203}
1204
12e9700d
AB
1205static void handle_icount_deadline(void)
1206{
1207 if (use_icount) {
1208 int64_t deadline =
1209 qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
1210
1211 if (deadline == 0) {
1212 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
1213 }
1214 }
1215}
1216
1be7fcb8
AB
1217static int tcg_cpu_exec(CPUState *cpu)
1218{
1219 int ret;
1220#ifdef CONFIG_PROFILER
1221 int64_t ti;
1222#endif
1223
1224#ifdef CONFIG_PROFILER
1225 ti = profile_getclock();
1226#endif
1227 if (use_icount) {
1228 int64_t count;
1229 int decr;
1230 timers_state.qemu_icount -= (cpu->icount_decr.u16.low
1231 + cpu->icount_extra);
1232 cpu->icount_decr.u16.low = 0;
1233 cpu->icount_extra = 0;
1234 count = tcg_get_icount_limit();
1235 timers_state.qemu_icount += count;
1236 decr = (count > 0xffff) ? 0xffff : count;
1237 count -= decr;
1238 cpu->icount_decr.u16.low = decr;
1239 cpu->icount_extra = count;
1240 }
1241 cpu_exec_start(cpu);
1242 ret = cpu_exec(cpu);
1243 cpu_exec_end(cpu);
1244#ifdef CONFIG_PROFILER
1245 tcg_time += profile_getclock() - ti;
1246#endif
1247 if (use_icount) {
1248 /* Fold pending instructions back into the
1249 instruction counter, and clear the interrupt flag. */
1250 timers_state.qemu_icount -= (cpu->icount_decr.u16.low
1251 + cpu->icount_extra);
1252 cpu->icount_decr.u32 = 0;
1253 cpu->icount_extra = 0;
1254 replay_account_executed_instructions();
1255 }
1256 return ret;
1257}
1258
c93bbbef
AB
1259/* Destroy any remaining vCPUs which have been unplugged and have
1260 * finished running
1261 */
1262static void deal_with_unplugged_cpus(void)
1be7fcb8 1263{
c93bbbef 1264 CPUState *cpu;
1be7fcb8 1265
c93bbbef
AB
1266 CPU_FOREACH(cpu) {
1267 if (cpu->unplug && !cpu_can_run(cpu)) {
1268 qemu_tcg_destroy_vcpu(cpu);
1269 cpu->created = false;
1270 qemu_cond_signal(&qemu_cpu_cond);
1be7fcb8
AB
1271 break;
1272 }
1273 }
1be7fcb8 1274}
bdb7ca67 1275
6546706d
AB
1276/* Single-threaded TCG
1277 *
1278 * In the single-threaded case each vCPU is simulated in turn. If
1279 * there is more than a single vCPU we create a simple timer to kick
1280 * the vCPU and ensure we don't get stuck in a tight loop in one vCPU.
1281 * This is done explicitly rather than relying on side-effects
1282 * elsewhere.
1283 */
1284
7e97cd88 1285static void *qemu_tcg_cpu_thread_fn(void *arg)
296af7c9 1286{
c3586ba7 1287 CPUState *cpu = arg;
296af7c9 1288
ab28bd23
PB
1289 rcu_register_thread();
1290
2e7f7a3c 1291 qemu_mutex_lock_iothread();
814e612e 1292 qemu_thread_get_self(cpu->thread);
296af7c9 1293
38fcbd3f
AF
1294 CPU_FOREACH(cpu) {
1295 cpu->thread_id = qemu_get_thread_id();
1296 cpu->created = true;
626cf8f4 1297 cpu->can_do_io = 1;
38fcbd3f 1298 }
296af7c9
BS
1299 qemu_cond_signal(&qemu_cpu_cond);
1300
fa7d1867 1301 /* wait for initial kick-off after machine start */
c28e399c 1302 while (first_cpu->stopped) {
d5f8d613 1303 qemu_cond_wait(first_cpu->halt_cond, &qemu_global_mutex);
8e564b4e
JK
1304
1305 /* process any pending work */
bdc44640 1306 CPU_FOREACH(cpu) {
182735ef 1307 qemu_wait_io_event_common(cpu);
8e564b4e 1308 }
0ab07c62 1309 }
296af7c9 1310
6546706d
AB
1311 start_tcg_kick_timer();
1312
21618b3e 1313 /* process any pending work */
aed807c8 1314 atomic_mb_set(&exit_request, 1);
21618b3e 1315
c93bbbef
AB
1316 cpu = first_cpu;
1317
296af7c9 1318 while (1) {
c93bbbef
AB
1319 /* Account partial waits to QEMU_CLOCK_VIRTUAL. */
1320 qemu_account_warp_timer();
1321
1322 if (!cpu) {
1323 cpu = first_cpu;
1324 }
1325
1326 for (; cpu != NULL && !exit_request; cpu = CPU_NEXT(cpu)) {
1327
1328 qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
1329 (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
1330
1331 if (cpu_can_run(cpu)) {
1332 int r;
1333 r = tcg_cpu_exec(cpu);
1334 if (r == EXCP_DEBUG) {
1335 cpu_handle_guest_debug(cpu);
1336 break;
1337 }
1338 } else if (cpu->stop || cpu->stopped) {
1339 if (cpu->unplug) {
1340 cpu = CPU_NEXT(cpu);
1341 }
1342 break;
1343 }
1344
1345 } /* for cpu.. */
1346
1347 /* Pairs with smp_wmb in qemu_cpu_kick. */
1348 atomic_mb_set(&exit_request, 0);
ac70aafc 1349
12e9700d 1350 handle_icount_deadline();
ac70aafc 1351
d5f8d613 1352 qemu_tcg_wait_io_event(QTAILQ_FIRST(&cpus));
c93bbbef 1353 deal_with_unplugged_cpus();
296af7c9
BS
1354 }
1355
1356 return NULL;
1357}
1358
b0cb0a66
VP
1359static void *qemu_hax_cpu_thread_fn(void *arg)
1360{
1361 CPUState *cpu = arg;
1362 int r;
1363 qemu_thread_get_self(cpu->thread);
1364 qemu_mutex_lock(&qemu_global_mutex);
1365
1366 cpu->thread_id = qemu_get_thread_id();
1367 cpu->created = true;
1368 cpu->halted = 0;
1369 current_cpu = cpu;
1370
1371 hax_init_vcpu(cpu);
1372 qemu_cond_signal(&qemu_cpu_cond);
1373
1374 while (1) {
1375 if (cpu_can_run(cpu)) {
1376 r = hax_smp_cpu_exec(cpu);
1377 if (r == EXCP_DEBUG) {
1378 cpu_handle_guest_debug(cpu);
1379 }
1380 }
1381
1382 while (cpu_thread_is_idle(cpu)) {
1383 qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
1384 }
1385#ifdef _WIN32
1386 SleepEx(0, TRUE);
1387#endif
1388 qemu_wait_io_event_common(cpu);
1389 }
1390 return NULL;
1391}
1392
1393#ifdef _WIN32
1394static void CALLBACK dummy_apc_func(ULONG_PTR unused)
1395{
1396}
1397#endif
1398
2ff09a40 1399static void qemu_cpu_kick_thread(CPUState *cpu)
cc015e9a
PB
1400{
1401#ifndef _WIN32
1402 int err;
1403
e0c38211
PB
1404 if (cpu->thread_kicked) {
1405 return;
9102deda 1406 }
e0c38211 1407 cpu->thread_kicked = true;
814e612e 1408 err = pthread_kill(cpu->thread->thread, SIG_IPI);
cc015e9a
PB
1409 if (err) {
1410 fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
1411 exit(1);
1412 }
1413#else /* _WIN32 */
b0cb0a66
VP
1414 if (!qemu_cpu_is_self(cpu)) {
1415 if (!QueueUserAPC(dummy_apc_func, cpu->hThread, 0)) {
1416 fprintf(stderr, "%s: QueueUserAPC failed with error %lu\n",
1417 __func__, GetLastError());
1418 exit(1);
1419 }
1420 }
e0c38211
PB
1421#endif
1422}
ed9164a3 1423
e0c38211
PB
1424static void qemu_cpu_kick_no_halt(void)
1425{
1426 CPUState *cpu;
1427 /* Ensure whatever caused the exit has reached the CPU threads before
1428 * writing exit_request.
1429 */
1430 atomic_mb_set(&exit_request, 1);
1431 cpu = atomic_mb_read(&tcg_current_cpu);
1432 if (cpu) {
1433 cpu_exit(cpu);
cc015e9a 1434 }
cc015e9a
PB
1435}
1436
c08d7424 1437void qemu_cpu_kick(CPUState *cpu)
296af7c9 1438{
f5c121b8 1439 qemu_cond_broadcast(cpu->halt_cond);
e0c38211
PB
1440 if (tcg_enabled()) {
1441 qemu_cpu_kick_no_halt();
1442 } else {
b0cb0a66
VP
1443 if (hax_enabled()) {
1444 /*
1445 * FIXME: race condition with the exit_request check in
1446 * hax_vcpu_hax_exec
1447 */
1448 cpu->exit_request = 1;
1449 }
e0c38211
PB
1450 qemu_cpu_kick_thread(cpu);
1451 }
296af7c9
BS
1452}
1453
46d62fac 1454void qemu_cpu_kick_self(void)
296af7c9 1455{
4917cf44 1456 assert(current_cpu);
9102deda 1457 qemu_cpu_kick_thread(current_cpu);
296af7c9
BS
1458}
1459
60e82579 1460bool qemu_cpu_is_self(CPUState *cpu)
296af7c9 1461{
814e612e 1462 return qemu_thread_is_self(cpu->thread);
296af7c9
BS
1463}
1464
79e2b9ae 1465bool qemu_in_vcpu_thread(void)
aa723c23 1466{
4917cf44 1467 return current_cpu && qemu_cpu_is_self(current_cpu);
aa723c23
JQ
1468}
1469
afbe7053
PB
1470static __thread bool iothread_locked = false;
1471
1472bool qemu_mutex_iothread_locked(void)
1473{
1474 return iothread_locked;
1475}
1476
296af7c9
BS
1477void qemu_mutex_lock_iothread(void)
1478{
21618b3e 1479 atomic_inc(&iothread_requesting_mutex);
2e7f7a3c
PB
1480 /* In the simple case there is no need to bump the VCPU thread out of
1481 * TCG code execution.
1482 */
1483 if (!tcg_enabled() || qemu_in_vcpu_thread() ||
46036b24 1484 !first_cpu || !first_cpu->created) {
296af7c9 1485 qemu_mutex_lock(&qemu_global_mutex);
21618b3e 1486 atomic_dec(&iothread_requesting_mutex);
1a28cac3 1487 } else {
1a28cac3 1488 if (qemu_mutex_trylock(&qemu_global_mutex)) {
e0c38211 1489 qemu_cpu_kick_no_halt();
1a28cac3
MT
1490 qemu_mutex_lock(&qemu_global_mutex);
1491 }
6b49809c 1492 atomic_dec(&iothread_requesting_mutex);
46daff13 1493 qemu_cond_broadcast(&qemu_io_proceeded_cond);
1a28cac3 1494 }
afbe7053 1495 iothread_locked = true;
296af7c9
BS
1496}
1497
1498void qemu_mutex_unlock_iothread(void)
1499{
afbe7053 1500 iothread_locked = false;
296af7c9
BS
1501 qemu_mutex_unlock(&qemu_global_mutex);
1502}
1503
e8faee06 1504static bool all_vcpus_paused(void)
296af7c9 1505{
bdc44640 1506 CPUState *cpu;
296af7c9 1507
bdc44640 1508 CPU_FOREACH(cpu) {
182735ef 1509 if (!cpu->stopped) {
e8faee06 1510 return false;
0ab07c62 1511 }
296af7c9
BS
1512 }
1513
e8faee06 1514 return true;
296af7c9
BS
1515}
1516
1517void pause_all_vcpus(void)
1518{
bdc44640 1519 CPUState *cpu;
296af7c9 1520
40daca54 1521 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, false);
bdc44640 1522 CPU_FOREACH(cpu) {
182735ef
AF
1523 cpu->stop = true;
1524 qemu_cpu_kick(cpu);
296af7c9
BS
1525 }
1526
aa723c23 1527 if (qemu_in_vcpu_thread()) {
d798e974
JK
1528 cpu_stop_current();
1529 if (!kvm_enabled()) {
bdc44640 1530 CPU_FOREACH(cpu) {
182735ef
AF
1531 cpu->stop = false;
1532 cpu->stopped = true;
d798e974
JK
1533 }
1534 return;
1535 }
1536 }
1537
296af7c9 1538 while (!all_vcpus_paused()) {
be7d6c57 1539 qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
bdc44640 1540 CPU_FOREACH(cpu) {
182735ef 1541 qemu_cpu_kick(cpu);
296af7c9
BS
1542 }
1543 }
1544}
1545
2993683b
IM
1546void cpu_resume(CPUState *cpu)
1547{
1548 cpu->stop = false;
1549 cpu->stopped = false;
1550 qemu_cpu_kick(cpu);
1551}
1552
296af7c9
BS
1553void resume_all_vcpus(void)
1554{
bdc44640 1555 CPUState *cpu;
296af7c9 1556
40daca54 1557 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true);
bdc44640 1558 CPU_FOREACH(cpu) {
182735ef 1559 cpu_resume(cpu);
296af7c9
BS
1560 }
1561}
1562
4c055ab5
GZ
1563void cpu_remove(CPUState *cpu)
1564{
1565 cpu->stop = true;
1566 cpu->unplug = true;
1567 qemu_cpu_kick(cpu);
1568}
1569
2c579042
BR
1570void cpu_remove_sync(CPUState *cpu)
1571{
1572 cpu_remove(cpu);
1573 while (cpu->created) {
1574 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1575 }
1576}
1577
4900116e
DDAG
1578/* For temporary buffers for forming a name */
1579#define VCPU_THREAD_NAME_SIZE 16
1580
e5ab30a2 1581static void qemu_tcg_init_vcpu(CPUState *cpu)
296af7c9 1582{
4900116e 1583 char thread_name[VCPU_THREAD_NAME_SIZE];
d5f8d613
FK
1584 static QemuCond *tcg_halt_cond;
1585 static QemuThread *tcg_cpu_thread;
4900116e 1586
296af7c9
BS
1587 /* share a single thread for all cpus with TCG */
1588 if (!tcg_cpu_thread) {
814e612e 1589 cpu->thread = g_malloc0(sizeof(QemuThread));
f5c121b8
AF
1590 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1591 qemu_cond_init(cpu->halt_cond);
1592 tcg_halt_cond = cpu->halt_cond;
4900116e
DDAG
1593 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/TCG",
1594 cpu->cpu_index);
1595 qemu_thread_create(cpu->thread, thread_name, qemu_tcg_cpu_thread_fn,
1596 cpu, QEMU_THREAD_JOINABLE);
1ecf47bf 1597#ifdef _WIN32
814e612e 1598 cpu->hThread = qemu_thread_get_handle(cpu->thread);
1ecf47bf 1599#endif
61a46217 1600 while (!cpu->created) {
18a85728 1601 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
0ab07c62 1602 }
814e612e 1603 tcg_cpu_thread = cpu->thread;
296af7c9 1604 } else {
814e612e 1605 cpu->thread = tcg_cpu_thread;
f5c121b8 1606 cpu->halt_cond = tcg_halt_cond;
296af7c9
BS
1607 }
1608}
1609
b0cb0a66
VP
1610static void qemu_hax_start_vcpu(CPUState *cpu)
1611{
1612 char thread_name[VCPU_THREAD_NAME_SIZE];
1613
1614 cpu->thread = g_malloc0(sizeof(QemuThread));
1615 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1616 qemu_cond_init(cpu->halt_cond);
1617
1618 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/HAX",
1619 cpu->cpu_index);
1620 qemu_thread_create(cpu->thread, thread_name, qemu_hax_cpu_thread_fn,
1621 cpu, QEMU_THREAD_JOINABLE);
1622#ifdef _WIN32
1623 cpu->hThread = qemu_thread_get_handle(cpu->thread);
1624#endif
1625 while (!cpu->created) {
1626 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1627 }
1628}
1629
48a106bd 1630static void qemu_kvm_start_vcpu(CPUState *cpu)
296af7c9 1631{
4900116e
DDAG
1632 char thread_name[VCPU_THREAD_NAME_SIZE];
1633
814e612e 1634 cpu->thread = g_malloc0(sizeof(QemuThread));
f5c121b8
AF
1635 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1636 qemu_cond_init(cpu->halt_cond);
4900116e
DDAG
1637 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/KVM",
1638 cpu->cpu_index);
1639 qemu_thread_create(cpu->thread, thread_name, qemu_kvm_cpu_thread_fn,
1640 cpu, QEMU_THREAD_JOINABLE);
61a46217 1641 while (!cpu->created) {
18a85728 1642 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
0ab07c62 1643 }
296af7c9
BS
1644}
1645
10a9021d 1646static void qemu_dummy_start_vcpu(CPUState *cpu)
c7f0f3b1 1647{
4900116e
DDAG
1648 char thread_name[VCPU_THREAD_NAME_SIZE];
1649
814e612e 1650 cpu->thread = g_malloc0(sizeof(QemuThread));
f5c121b8
AF
1651 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1652 qemu_cond_init(cpu->halt_cond);
4900116e
DDAG
1653 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/DUMMY",
1654 cpu->cpu_index);
1655 qemu_thread_create(cpu->thread, thread_name, qemu_dummy_cpu_thread_fn, cpu,
c7f0f3b1 1656 QEMU_THREAD_JOINABLE);
61a46217 1657 while (!cpu->created) {
c7f0f3b1
AL
1658 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1659 }
1660}
1661
c643bed9 1662void qemu_init_vcpu(CPUState *cpu)
296af7c9 1663{
ce3960eb
AF
1664 cpu->nr_cores = smp_cores;
1665 cpu->nr_threads = smp_threads;
f324e766 1666 cpu->stopped = true;
56943e8c
PM
1667
1668 if (!cpu->as) {
1669 /* If the target cpu hasn't set up any address spaces itself,
1670 * give it the default one.
1671 */
6731d864
PC
1672 AddressSpace *as = address_space_init_shareable(cpu->memory,
1673 "cpu-memory");
12ebc9a7 1674 cpu->num_ases = 1;
6731d864 1675 cpu_address_space_init(cpu, as, 0);
56943e8c
PM
1676 }
1677
0ab07c62 1678 if (kvm_enabled()) {
48a106bd 1679 qemu_kvm_start_vcpu(cpu);
b0cb0a66
VP
1680 } else if (hax_enabled()) {
1681 qemu_hax_start_vcpu(cpu);
c7f0f3b1 1682 } else if (tcg_enabled()) {
e5ab30a2 1683 qemu_tcg_init_vcpu(cpu);
c7f0f3b1 1684 } else {
10a9021d 1685 qemu_dummy_start_vcpu(cpu);
0ab07c62 1686 }
296af7c9
BS
1687}
1688
b4a3d965 1689void cpu_stop_current(void)
296af7c9 1690{
4917cf44
AF
1691 if (current_cpu) {
1692 current_cpu->stop = false;
1693 current_cpu->stopped = true;
1694 cpu_exit(current_cpu);
96bce683 1695 qemu_cond_broadcast(&qemu_pause_cond);
b4a3d965 1696 }
296af7c9
BS
1697}
1698
56983463 1699int vm_stop(RunState state)
296af7c9 1700{
aa723c23 1701 if (qemu_in_vcpu_thread()) {
74892d24 1702 qemu_system_vmstop_request_prepare();
1dfb4dd9 1703 qemu_system_vmstop_request(state);
296af7c9
BS
1704 /*
1705 * FIXME: should not return to device code in case
1706 * vm_stop() has been requested.
1707 */
b4a3d965 1708 cpu_stop_current();
56983463 1709 return 0;
296af7c9 1710 }
56983463
KW
1711
1712 return do_vm_stop(state);
296af7c9
BS
1713}
1714
2d76e823
CI
1715/**
1716 * Prepare for (re)starting the VM.
1717 * Returns -1 if the vCPUs are not to be restarted (e.g. if they are already
1718 * running or in case of an error condition), 0 otherwise.
1719 */
1720int vm_prepare_start(void)
1721{
1722 RunState requested;
1723 int res = 0;
1724
1725 qemu_vmstop_requested(&requested);
1726 if (runstate_is_running() && requested == RUN_STATE__MAX) {
1727 return -1;
1728 }
1729
1730 /* Ensure that a STOP/RESUME pair of events is emitted if a
1731 * vmstop request was pending. The BLOCK_IO_ERROR event, for
1732 * example, according to documentation is always followed by
1733 * the STOP event.
1734 */
1735 if (runstate_is_running()) {
1736 qapi_event_send_stop(&error_abort);
1737 res = -1;
1738 } else {
1739 replay_enable_events();
1740 cpu_enable_ticks();
1741 runstate_set(RUN_STATE_RUNNING);
1742 vm_state_notify(1, RUN_STATE_RUNNING);
1743 }
1744
1745 /* We are sending this now, but the CPUs will be resumed shortly later */
1746 qapi_event_send_resume(&error_abort);
1747 return res;
1748}
1749
1750void vm_start(void)
1751{
1752 if (!vm_prepare_start()) {
1753 resume_all_vcpus();
1754 }
1755}
1756
8a9236f1
LC
1757/* does a state transition even if the VM is already stopped,
1758 current state is forgotten forever */
56983463 1759int vm_stop_force_state(RunState state)
8a9236f1
LC
1760{
1761 if (runstate_is_running()) {
56983463 1762 return vm_stop(state);
8a9236f1
LC
1763 } else {
1764 runstate_set(state);
b2780d32
WC
1765
1766 bdrv_drain_all();
594a45ce
KW
1767 /* Make sure to return an error if the flush in a previous vm_stop()
1768 * failed. */
22af08ea 1769 return bdrv_flush_all();
8a9236f1
LC
1770 }
1771}
1772
9a78eead 1773void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
262353cb
BS
1774{
1775 /* XXX: implement xxx_cpu_list for targets that still miss it */
e916cbf8
PM
1776#if defined(cpu_list)
1777 cpu_list(f, cpu_fprintf);
262353cb
BS
1778#endif
1779}
de0b36b6
LC
1780
1781CpuInfoList *qmp_query_cpus(Error **errp)
1782{
1783 CpuInfoList *head = NULL, *cur_item = NULL;
182735ef 1784 CPUState *cpu;
de0b36b6 1785
bdc44640 1786 CPU_FOREACH(cpu) {
de0b36b6 1787 CpuInfoList *info;
182735ef
AF
1788#if defined(TARGET_I386)
1789 X86CPU *x86_cpu = X86_CPU(cpu);
1790 CPUX86State *env = &x86_cpu->env;
1791#elif defined(TARGET_PPC)
1792 PowerPCCPU *ppc_cpu = POWERPC_CPU(cpu);
1793 CPUPPCState *env = &ppc_cpu->env;
1794#elif defined(TARGET_SPARC)
1795 SPARCCPU *sparc_cpu = SPARC_CPU(cpu);
1796 CPUSPARCState *env = &sparc_cpu->env;
1797#elif defined(TARGET_MIPS)
1798 MIPSCPU *mips_cpu = MIPS_CPU(cpu);
1799 CPUMIPSState *env = &mips_cpu->env;
48e06fe0
BK
1800#elif defined(TARGET_TRICORE)
1801 TriCoreCPU *tricore_cpu = TRICORE_CPU(cpu);
1802 CPUTriCoreState *env = &tricore_cpu->env;
182735ef 1803#endif
de0b36b6 1804
cb446eca 1805 cpu_synchronize_state(cpu);
de0b36b6
LC
1806
1807 info = g_malloc0(sizeof(*info));
1808 info->value = g_malloc0(sizeof(*info->value));
55e5c285 1809 info->value->CPU = cpu->cpu_index;
182735ef 1810 info->value->current = (cpu == first_cpu);
259186a7 1811 info->value->halted = cpu->halted;
58f88d4b 1812 info->value->qom_path = object_get_canonical_path(OBJECT(cpu));
9f09e18a 1813 info->value->thread_id = cpu->thread_id;
de0b36b6 1814#if defined(TARGET_I386)
86f4b687 1815 info->value->arch = CPU_INFO_ARCH_X86;
544a3731 1816 info->value->u.x86.pc = env->eip + env->segs[R_CS].base;
de0b36b6 1817#elif defined(TARGET_PPC)
86f4b687 1818 info->value->arch = CPU_INFO_ARCH_PPC;
544a3731 1819 info->value->u.ppc.nip = env->nip;
de0b36b6 1820#elif defined(TARGET_SPARC)
86f4b687 1821 info->value->arch = CPU_INFO_ARCH_SPARC;
544a3731
EB
1822 info->value->u.q_sparc.pc = env->pc;
1823 info->value->u.q_sparc.npc = env->npc;
de0b36b6 1824#elif defined(TARGET_MIPS)
86f4b687 1825 info->value->arch = CPU_INFO_ARCH_MIPS;
544a3731 1826 info->value->u.q_mips.PC = env->active_tc.PC;
48e06fe0 1827#elif defined(TARGET_TRICORE)
86f4b687 1828 info->value->arch = CPU_INFO_ARCH_TRICORE;
544a3731 1829 info->value->u.tricore.PC = env->PC;
86f4b687
EB
1830#else
1831 info->value->arch = CPU_INFO_ARCH_OTHER;
de0b36b6
LC
1832#endif
1833
1834 /* XXX: waiting for the qapi to support GSList */
1835 if (!cur_item) {
1836 head = cur_item = info;
1837 } else {
1838 cur_item->next = info;
1839 cur_item = info;
1840 }
1841 }
1842
1843 return head;
1844}
0cfd6a9a
LC
1845
1846void qmp_memsave(int64_t addr, int64_t size, const char *filename,
1847 bool has_cpu, int64_t cpu_index, Error **errp)
1848{
1849 FILE *f;
1850 uint32_t l;
55e5c285 1851 CPUState *cpu;
0cfd6a9a 1852 uint8_t buf[1024];
0dc9daf0 1853 int64_t orig_addr = addr, orig_size = size;
0cfd6a9a
LC
1854
1855 if (!has_cpu) {
1856 cpu_index = 0;
1857 }
1858
151d1322
AF
1859 cpu = qemu_get_cpu(cpu_index);
1860 if (cpu == NULL) {
c6bd8c70
MA
1861 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cpu-index",
1862 "a CPU number");
0cfd6a9a
LC
1863 return;
1864 }
1865
1866 f = fopen(filename, "wb");
1867 if (!f) {
618da851 1868 error_setg_file_open(errp, errno, filename);
0cfd6a9a
LC
1869 return;
1870 }
1871
1872 while (size != 0) {
1873 l = sizeof(buf);
1874 if (l > size)
1875 l = size;
2f4d0f59 1876 if (cpu_memory_rw_debug(cpu, addr, buf, l, 0) != 0) {
0dc9daf0
BP
1877 error_setg(errp, "Invalid addr 0x%016" PRIx64 "/size %" PRId64
1878 " specified", orig_addr, orig_size);
2f4d0f59
AK
1879 goto exit;
1880 }
0cfd6a9a 1881 if (fwrite(buf, 1, l, f) != l) {
c6bd8c70 1882 error_setg(errp, QERR_IO_ERROR);
0cfd6a9a
LC
1883 goto exit;
1884 }
1885 addr += l;
1886 size -= l;
1887 }
1888
1889exit:
1890 fclose(f);
1891}
6d3962bf
LC
1892
1893void qmp_pmemsave(int64_t addr, int64_t size, const char *filename,
1894 Error **errp)
1895{
1896 FILE *f;
1897 uint32_t l;
1898 uint8_t buf[1024];
1899
1900 f = fopen(filename, "wb");
1901 if (!f) {
618da851 1902 error_setg_file_open(errp, errno, filename);
6d3962bf
LC
1903 return;
1904 }
1905
1906 while (size != 0) {
1907 l = sizeof(buf);
1908 if (l > size)
1909 l = size;
eb6282f2 1910 cpu_physical_memory_read(addr, buf, l);
6d3962bf 1911 if (fwrite(buf, 1, l, f) != l) {
c6bd8c70 1912 error_setg(errp, QERR_IO_ERROR);
6d3962bf
LC
1913 goto exit;
1914 }
1915 addr += l;
1916 size -= l;
1917 }
1918
1919exit:
1920 fclose(f);
1921}
ab49ab5c
LC
1922
1923void qmp_inject_nmi(Error **errp)
1924{
9cb805fd 1925 nmi_monitor_handle(monitor_get_cpu_index(), errp);
ab49ab5c 1926}
27498bef
ST
1927
1928void dump_drift_info(FILE *f, fprintf_function cpu_fprintf)
1929{
1930 if (!use_icount) {
1931 return;
1932 }
1933
1934 cpu_fprintf(f, "Host - Guest clock %"PRIi64" ms\n",
1935 (cpu_get_clock() - cpu_get_icount())/SCALE_MS);
1936 if (icount_align_option) {
1937 cpu_fprintf(f, "Max guest delay %"PRIi64" ms\n", -max_delay/SCALE_MS);
1938 cpu_fprintf(f, "Max guest advance %"PRIi64" ms\n", max_advance/SCALE_MS);
1939 } else {
1940 cpu_fprintf(f, "Max guest delay NA\n");
1941 cpu_fprintf(f, "Max guest advance NA\n");
1942 }
1943}