]> git.proxmox.com Git - mirror_qemu.git/blame - accel/tcg/cpu-exec.c
tcg/aarch64: Remove unused code in tcg_out_op
[mirror_qemu.git] / accel / tcg / cpu-exec.c
CommitLineData
7d13299d 1/*
e965fc38 2 * emulator main execution loop
5fafdf24 3 *
66321a11 4 * Copyright (c) 2003-2005 Fabrice Bellard
7d13299d 5 *
3ef693a0
FB
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
fb0343d5 9 * version 2.1 of the License, or (at your option) any later version.
7d13299d 10 *
3ef693a0
FB
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
7d13299d 15 *
3ef693a0 16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
7d13299d 18 */
a8d25326 19
7b31bbc2 20#include "qemu/osdep.h"
740b1759 21#include "qemu/qemu-print.h"
3a841ab5
DB
22#include "qapi/error.h"
23#include "qapi/qapi-commands-machine.h"
24#include "qapi/type-helpers.h"
78271684 25#include "hw/core/tcg-cpu-ops.h"
d9bb58e5 26#include "trace.h"
76cad711 27#include "disas/disas.h"
63c91552 28#include "exec/exec-all.h"
dcb32f1d 29#include "tcg/tcg.h"
1de7afc9 30#include "qemu/atomic.h"
c905a368 31#include "qemu/compiler.h"
c2aa5f81 32#include "qemu/timer.h"
79e2b9ae 33#include "qemu/rcu.h"
508127e2 34#include "exec/log.h"
8d04fb55 35#include "qemu/main-loop.h"
6220e900
PD
36#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
37#include "hw/i386/apic.h"
38#endif
d2528bdc 39#include "sysemu/cpus.h"
740b1759
CF
40#include "exec/cpu-all.h"
41#include "sysemu/cpu-timers.h"
6f060969 42#include "sysemu/replay.h"
3a841ab5 43#include "sysemu/tcg.h"
4288eb26 44#include "exec/helper-proto.h"
a976a99a 45#include "tb-jmp-cache.h"
e5ceadff 46#include "tb-hash.h"
e5ceadff 47#include "tb-context.h"
c03f041f 48#include "internal.h"
c2aa5f81
ST
49
50/* -icount align implementation. */
51
52typedef struct SyncClocks {
53 int64_t diff_clk;
54 int64_t last_cpu_icount;
7f7bc144 55 int64_t realtime_clock;
c2aa5f81
ST
56} SyncClocks;
57
58#if !defined(CONFIG_USER_ONLY)
59/* Allow the guest to have a max 3ms advance.
60 * The difference between the 2 clocks could therefore
61 * oscillate around 0.
62 */
63#define VM_CLOCK_ADVANCE 3000000
7f7bc144
ST
64#define THRESHOLD_REDUCE 1.5
65#define MAX_DELAY_PRINT_RATE 2000000000LL
66#define MAX_NB_PRINTS 100
c2aa5f81 67
740b1759
CF
68static int64_t max_delay;
69static int64_t max_advance;
70
5e140196 71static void align_clocks(SyncClocks *sc, CPUState *cpu)
c2aa5f81
ST
72{
73 int64_t cpu_icount;
74
75 if (!icount_align_option) {
76 return;
77 }
78
5e140196 79 cpu_icount = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low;
8191d368 80 sc->diff_clk += icount_to_ns(sc->last_cpu_icount - cpu_icount);
c2aa5f81
ST
81 sc->last_cpu_icount = cpu_icount;
82
83 if (sc->diff_clk > VM_CLOCK_ADVANCE) {
84#ifndef _WIN32
85 struct timespec sleep_delay, rem_delay;
86 sleep_delay.tv_sec = sc->diff_clk / 1000000000LL;
87 sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL;
88 if (nanosleep(&sleep_delay, &rem_delay) < 0) {
a498d0ef 89 sc->diff_clk = rem_delay.tv_sec * 1000000000LL + rem_delay.tv_nsec;
c2aa5f81
ST
90 } else {
91 sc->diff_clk = 0;
92 }
93#else
94 Sleep(sc->diff_clk / SCALE_MS);
95 sc->diff_clk = 0;
96#endif
97 }
98}
99
7f7bc144
ST
100static void print_delay(const SyncClocks *sc)
101{
102 static float threshold_delay;
103 static int64_t last_realtime_clock;
104 static int nb_prints;
105
106 if (icount_align_option &&
107 sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE &&
108 nb_prints < MAX_NB_PRINTS) {
109 if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) ||
110 (-sc->diff_clk / (float)1000000000LL <
111 (threshold_delay - THRESHOLD_REDUCE))) {
112 threshold_delay = (-sc->diff_clk / 1000000000LL) + 1;
740b1759
CF
113 qemu_printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
114 threshold_delay - 1,
115 threshold_delay);
7f7bc144
ST
116 nb_prints++;
117 last_realtime_clock = sc->realtime_clock;
118 }
119 }
120}
121
5e140196 122static void init_delay_params(SyncClocks *sc, CPUState *cpu)
c2aa5f81
ST
123{
124 if (!icount_align_option) {
125 return;
126 }
2e91cc62
PB
127 sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
128 sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock;
5e140196
RH
129 sc->last_cpu_icount
130 = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low;
27498bef
ST
131 if (sc->diff_clk < max_delay) {
132 max_delay = sc->diff_clk;
133 }
134 if (sc->diff_clk > max_advance) {
135 max_advance = sc->diff_clk;
136 }
7f7bc144
ST
137
138 /* Print every 2s max if the guest is late. We limit the number
139 of printed messages to NB_PRINT_MAX(currently 100) */
140 print_delay(sc);
c2aa5f81
ST
141}
142#else
143static void align_clocks(SyncClocks *sc, const CPUState *cpu)
144{
145}
146
147static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
148{
149}
150#endif /* CONFIG USER ONLY */
7d13299d 151
043e35d9
RH
152uint32_t curr_cflags(CPUState *cpu)
153{
84f15616
RH
154 uint32_t cflags = cpu->tcg_cflags;
155
04f5b647 156 /*
c2ffd754
RH
157 * Record gdb single-step. We should be exiting the TB by raising
158 * EXCP_DEBUG, but to simplify other tests, disable chaining too.
159 *
04f5b647
RH
160 * For singlestep and -d nochain, suppress goto_tb so that
161 * we can log -d cpu,exec after every TB.
162 */
c2ffd754
RH
163 if (unlikely(cpu->singlestep_enabled)) {
164 cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | CF_SINGLE_STEP | 1;
165 } else if (singlestep) {
04f5b647
RH
166 cflags |= CF_NO_GOTO_TB | 1;
167 } else if (qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
fb957011 168 cflags |= CF_NO_GOTO_TB;
84f15616
RH
169 }
170
171 return cflags;
043e35d9
RH
172}
173
0c90ba16
RH
174struct tb_desc {
175 target_ulong pc;
176 target_ulong cs_base;
177 CPUArchState *env;
93b99616 178 tb_page_addr_t page_addr0;
0c90ba16
RH
179 uint32_t flags;
180 uint32_t cflags;
181 uint32_t trace_vcpu_dstate;
182};
183
184static bool tb_lookup_cmp(const void *p, const void *d)
185{
186 const TranslationBlock *tb = p;
187 const struct tb_desc *desc = d;
188
8ed558ec 189 if ((TARGET_TB_PCREL || tb_pc(tb) == desc->pc) &&
93b99616 190 tb->page_addr[0] == desc->page_addr0 &&
0c90ba16
RH
191 tb->cs_base == desc->cs_base &&
192 tb->flags == desc->flags &&
193 tb->trace_vcpu_dstate == desc->trace_vcpu_dstate &&
194 tb_cflags(tb) == desc->cflags) {
195 /* check next page if needed */
196 if (tb->page_addr[1] == -1) {
197 return true;
198 } else {
93b99616
RH
199 tb_page_addr_t phys_page1;
200 target_ulong virt_page1;
0c90ba16 201
9867b302
RH
202 /*
203 * We know that the first page matched, and an otherwise valid TB
204 * encountered an incomplete instruction at the end of that page,
205 * therefore we know that generating a new TB from the current PC
206 * must also require reading from the next page -- even if the
207 * second pages do not match, and therefore the resulting insn
208 * is different for the new TB. Therefore any exception raised
209 * here by the faulting lookup is not premature.
210 */
93b99616
RH
211 virt_page1 = TARGET_PAGE_ALIGN(desc->pc);
212 phys_page1 = get_page_addr_code(desc->env, virt_page1);
213 if (tb->page_addr[1] == phys_page1) {
0c90ba16
RH
214 return true;
215 }
216 }
217 }
218 return false;
219}
220
221static TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
222 target_ulong cs_base, uint32_t flags,
223 uint32_t cflags)
224{
225 tb_page_addr_t phys_pc;
226 struct tb_desc desc;
227 uint32_t h;
228
229 desc.env = cpu->env_ptr;
230 desc.cs_base = cs_base;
231 desc.flags = flags;
232 desc.cflags = cflags;
233 desc.trace_vcpu_dstate = *cpu->trace_dstate;
234 desc.pc = pc;
235 phys_pc = get_page_addr_code(desc.env, pc);
236 if (phys_pc == -1) {
237 return NULL;
238 }
93b99616 239 desc.page_addr0 = phys_pc;
8ed558ec
RH
240 h = tb_hash_func(phys_pc, (TARGET_TB_PCREL ? 0 : pc),
241 flags, cflags, *cpu->trace_dstate);
0c90ba16
RH
242 return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
243}
244
632cb63d
RH
245/* Might cause an exception, so have a longjmp destination ready */
246static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
247 target_ulong cs_base,
248 uint32_t flags, uint32_t cflags)
249{
250 TranslationBlock *tb;
8ed558ec 251 CPUJumpCache *jc;
632cb63d
RH
252 uint32_t hash;
253
254 /* we should never be trying to look up an INVALID tb */
255 tcg_debug_assert(!(cflags & CF_INVALID));
256
257 hash = tb_jmp_cache_hash_func(pc);
8ed558ec
RH
258 jc = cpu->tb_jmp_cache;
259 tb = tb_jmp_cache_get_tb(jc, hash);
632cb63d
RH
260
261 if (likely(tb &&
8ed558ec 262 tb_jmp_cache_get_pc(jc, hash, tb) == pc &&
632cb63d
RH
263 tb->cs_base == cs_base &&
264 tb->flags == flags &&
265 tb->trace_vcpu_dstate == *cpu->trace_dstate &&
266 tb_cflags(tb) == cflags)) {
267 return tb;
268 }
269 tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags);
270 if (tb == NULL) {
271 return NULL;
272 }
8ed558ec 273 tb_jmp_cache_set(jc, hash, tb, pc);
632cb63d
RH
274 return tb;
275}
276
fbf59aad
RH
277static void log_cpu_exec(target_ulong pc, CPUState *cpu,
278 const TranslationBlock *tb)
abb0cd93 279{
fbf59aad 280 if (qemu_log_in_addr_range(pc)) {
abb0cd93
RH
281 qemu_log_mask(CPU_LOG_EXEC,
282 "Trace %d: %p [" TARGET_FMT_lx
7eabad36
RH
283 "/" TARGET_FMT_lx "/%08x/%08x] %s\n",
284 cpu->cpu_index, tb->tc.ptr, tb->cs_base, pc,
285 tb->flags, tb->cflags, lookup_symbol(pc));
abb0cd93
RH
286
287#if defined(DEBUG_DISAS)
288 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
c60f599b 289 FILE *logfile = qemu_log_trylock();
78b54858
RH
290 if (logfile) {
291 int flags = 0;
abb0cd93 292
78b54858
RH
293 if (qemu_loglevel_mask(CPU_LOG_TB_FPU)) {
294 flags |= CPU_DUMP_FPU;
295 }
abb0cd93 296#if defined(TARGET_I386)
78b54858 297 flags |= CPU_DUMP_CCOP;
abb0cd93 298#endif
c769fbd7 299 cpu_dump_state(cpu, logfile, flags);
78b54858
RH
300 qemu_log_unlock(logfile);
301 }
abb0cd93
RH
302 }
303#endif /* DEBUG_DISAS */
304 }
305}
306
10c37828
RH
307static bool check_for_breakpoints(CPUState *cpu, target_ulong pc,
308 uint32_t *cflags)
309{
310 CPUBreakpoint *bp;
311 bool match_page = false;
312
313 if (likely(QTAILQ_EMPTY(&cpu->breakpoints))) {
314 return false;
315 }
316
317 /*
318 * Singlestep overrides breakpoints.
319 * This requirement is visible in the record-replay tests, where
320 * we would fail to make forward progress in reverse-continue.
321 *
322 * TODO: gdb singlestep should only override gdb breakpoints,
323 * so that one could (gdb) singlestep into the guest kernel's
324 * architectural breakpoint handler.
325 */
326 if (cpu->singlestep_enabled) {
327 return false;
328 }
329
330 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
331 /*
332 * If we have an exact pc match, trigger the breakpoint.
333 * Otherwise, note matches within the page.
334 */
335 if (pc == bp->pc) {
336 bool match_bp = false;
337
338 if (bp->flags & BP_GDB) {
339 match_bp = true;
340 } else if (bp->flags & BP_CPU) {
341#ifdef CONFIG_USER_ONLY
342 g_assert_not_reached();
343#else
344 CPUClass *cc = CPU_GET_CLASS(cpu);
345 assert(cc->tcg_ops->debug_check_breakpoint);
346 match_bp = cc->tcg_ops->debug_check_breakpoint(cpu);
347#endif
348 }
349
350 if (match_bp) {
351 cpu->exception_index = EXCP_DEBUG;
352 return true;
353 }
354 } else if (((pc ^ bp->pc) & TARGET_PAGE_MASK) == 0) {
355 match_page = true;
356 }
357 }
358
359 /*
360 * Within the same page as a breakpoint, single-step,
361 * returning to helper_lookup_tb_ptr after each insn looking
362 * for the actual breakpoint.
363 *
364 * TODO: Perhaps better to record all of the TBs associated
365 * with a given virtual page that contains a breakpoint, and
366 * then invalidate them when a new overlapping breakpoint is
367 * set on the page. Non-overlapping TBs would not be
368 * invalidated, nor would any TB need to be invalidated as
369 * breakpoints are removed.
370 */
371 if (match_page) {
372 *cflags = (*cflags & ~CF_COUNT_MASK) | CF_NO_GOTO_TB | 1;
373 }
374 return false;
375}
376
4288eb26
RH
377/**
378 * helper_lookup_tb_ptr: quick check for next tb
379 * @env: current cpu state
380 *
381 * Look for an existing TB matching the current cpu state.
382 * If found, return the code pointer. If not found, return
383 * the tcg epilogue so that we return into cpu_tb_exec.
384 */
385const void *HELPER(lookup_tb_ptr)(CPUArchState *env)
386{
387 CPUState *cpu = env_cpu(env);
388 TranslationBlock *tb;
389 target_ulong cs_base, pc;
10c37828 390 uint32_t flags, cflags;
4288eb26
RH
391
392 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
393
10c37828
RH
394 cflags = curr_cflags(cpu);
395 if (check_for_breakpoints(cpu, pc, &cflags)) {
396 cpu_loop_exit(cpu);
397 }
398
399 tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
4288eb26
RH
400 if (tb == NULL) {
401 return tcg_code_gen_epilogue;
402 }
abb0cd93 403
fbf59aad
RH
404 if (qemu_loglevel_mask(CPU_LOG_TB_CPU | CPU_LOG_EXEC)) {
405 log_cpu_exec(pc, cpu, tb);
406 }
abb0cd93 407
4288eb26
RH
408 return tb->tc.ptr;
409}
410
77211379 411/* Execute a TB, and fix up the CPU state afterwards if necessary */
c905a368
DB
412/*
413 * Disable CFI checks.
414 * TCG creates binary blobs at runtime, with the transformed code.
415 * A TB is a blob of binary code, created at runtime and called with an
416 * indirect function call. Since such function did not exist at compile time,
417 * the CFI runtime has no way to verify its signature and would fail.
418 * TCG is not considered a security-sensitive part of QEMU so this does not
419 * affect the impact of CFI in environment with high security requirements
420 */
eba40358
RH
421static inline TranslationBlock * QEMU_DISABLE_CFI
422cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
77211379
PM
423{
424 CPUArchState *env = cpu->env_ptr;
819af24b
SF
425 uintptr_t ret;
426 TranslationBlock *last_tb;
db0c51a3 427 const void *tb_ptr = itb->tc.ptr;
1a830635 428
fbf59aad
RH
429 if (qemu_loglevel_mask(CPU_LOG_TB_CPU | CPU_LOG_EXEC)) {
430 log_cpu_exec(log_pc(cpu, itb), cpu, itb);
431 }
03afa5f8 432
653b87eb 433 qemu_thread_jit_execute();
819af24b 434 ret = tcg_qemu_tb_exec(env, tb_ptr);
626cf8f4 435 cpu->can_do_io = 1;
eba40358
RH
436 /*
437 * TODO: Delay swapping back to the read-write region of the TB
438 * until we actually need to modify the TB. The read-only copy,
439 * coming from the rx region, shares the same host TLB entry as
440 * the code that executed the exit_tb opcode that arrived here.
441 * If we insist on touching both the RX and the RW pages, we
442 * double the host TLB pressure.
443 */
444 last_tb = tcg_splitwx_to_rw((void *)(ret & ~TB_EXIT_MASK));
445 *tb_exit = ret & TB_EXIT_MASK;
446
447 trace_exec_tb_exit(last_tb, *tb_exit);
6db8b538 448
eba40358 449 if (*tb_exit > TB_EXIT_IDX1) {
77211379
PM
450 /* We didn't start executing this TB (eg because the instruction
451 * counter hit zero); we must restore the guest PC to the address
452 * of the start of the TB.
453 */
bdf7ae5b 454 CPUClass *cc = CPU_GET_CLASS(cpu);
fbf59aad 455
78271684
CF
456 if (cc->tcg_ops->synchronize_from_tb) {
457 cc->tcg_ops->synchronize_from_tb(cpu, last_tb);
bdf7ae5b 458 } else {
8ed558ec 459 assert(!TARGET_TB_PCREL);
bdf7ae5b 460 assert(cc->set_pc);
fbf59aad
RH
461 cc->set_pc(cpu, tb_pc(last_tb));
462 }
463 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
464 target_ulong pc = log_pc(cpu, last_tb);
465 if (qemu_log_in_addr_range(pc)) {
466 qemu_log("Stopped execution of TB chain before %p ["
467 TARGET_FMT_lx "] %s\n",
468 last_tb->tc.ptr, pc, lookup_symbol(pc));
469 }
bdf7ae5b 470 }
77211379 471 }
c9460d75
RH
472
473 /*
474 * If gdb single-step, and we haven't raised another exception,
475 * raise a debug exception. Single-step with another exception
476 * is handled in cpu_handle_exception.
477 */
478 if (unlikely(cpu->singlestep_enabled) && cpu->exception_index == -1) {
479 cpu->exception_index = EXCP_DEBUG;
480 cpu_loop_exit(cpu);
481 }
482
eba40358 483 return last_tb;
77211379
PM
484}
485
2e70f6ef 486
035ba06c
EH
487static void cpu_exec_enter(CPUState *cpu)
488{
489 CPUClass *cc = CPU_GET_CLASS(cpu);
490
78271684
CF
491 if (cc->tcg_ops->cpu_exec_enter) {
492 cc->tcg_ops->cpu_exec_enter(cpu);
80c4750b 493 }
035ba06c
EH
494}
495
496static void cpu_exec_exit(CPUState *cpu)
fdbc2b57 497{
08e73c48 498 CPUClass *cc = CPU_GET_CLASS(cpu);
035ba06c 499
78271684
CF
500 if (cc->tcg_ops->cpu_exec_exit) {
501 cc->tcg_ops->cpu_exec_exit(cpu);
80c4750b 502 }
035ba06c
EH
503}
504
505void cpu_exec_step_atomic(CPUState *cpu)
506{
61deada4 507 CPUArchState *env = cpu->env_ptr;
fdbc2b57
RH
508 TranslationBlock *tb;
509 target_ulong cs_base, pc;
258afb48 510 uint32_t flags, cflags;
eba40358 511 int tb_exit;
fdbc2b57 512
08e73c48 513 if (sigsetjmp(cpu->jmp_env, 0) == 0) {
886cc689 514 start_exclusive();
bfff072c
DC
515 g_assert(cpu == current_cpu);
516 g_assert(!cpu->running);
517 cpu->running = true;
886cc689 518
6f04cb1c 519 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
6f04cb1c 520
258afb48
RH
521 cflags = curr_cflags(cpu);
522 /* Execute in a serial context. */
523 cflags &= ~CF_PARALLEL;
524 /* After 1 insn, return and release the exclusive lock. */
525 cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | 1;
10c37828
RH
526 /*
527 * No need to check_for_breakpoints here.
528 * We only arrive in cpu_exec_step_atomic after beginning execution
529 * of an insn that includes an atomic operation we can't handle.
530 * Any breakpoint for this insn will have been recognized earlier.
531 */
258afb48
RH
532
533 tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
4e2ca83e
EC
534 if (tb == NULL) {
535 mmap_lock();
95590e24 536 tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
4e2ca83e
EC
537 mmap_unlock();
538 }
08e73c48 539
035ba06c 540 cpu_exec_enter(cpu);
08e73c48 541 /* execute the generated code */
4e2ca83e 542 trace_exec_tb(tb, pc);
eba40358 543 cpu_tb_exec(cpu, tb, &tb_exit);
035ba06c 544 cpu_exec_exit(cpu);
08e73c48 545 } else {
08e73c48 546#ifndef CONFIG_SOFTMMU
f920ffdd 547 clear_helper_retaddr();
297368c7
RH
548 if (have_mmap_lock()) {
549 mmap_unlock();
550 }
08e73c48 551#endif
6aaa24f9
EC
552 if (qemu_mutex_iothread_locked()) {
553 qemu_mutex_unlock_iothread();
554 }
faa9372c 555 assert_no_pages_locked();
e6d86bed 556 qemu_plugin_disable_mem_helpers(cpu);
08e73c48 557 }
426eeecd 558
886cc689
AB
559 /*
560 * As we start the exclusive region before codegen we must still
561 * be in the region if we longjump out of either the codegen or
562 * the execution.
563 */
564 g_assert(cpu_in_exclusive_context(cpu));
bfff072c 565 cpu->running = false;
886cc689 566 end_exclusive();
fdbc2b57
RH
567}
568
a8583393
RH
569void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr)
570{
571 if (TCG_TARGET_HAS_direct_jump) {
572 uintptr_t offset = tb->jmp_target_arg[n];
e7e168f4 573 uintptr_t tc_ptr = (uintptr_t)tb->tc.ptr;
1acbad0f
RH
574 uintptr_t jmp_rx = tc_ptr + offset;
575 uintptr_t jmp_rw = jmp_rx - tcg_splitwx_diff;
576 tb_target_set_jmp_target(tc_ptr, jmp_rx, jmp_rw, addr);
a8583393
RH
577 } else {
578 tb->jmp_target_arg[n] = addr;
579 }
580}
581
a8583393
RH
582static inline void tb_add_jump(TranslationBlock *tb, int n,
583 TranslationBlock *tb_next)
584{
194125e3
EC
585 uintptr_t old;
586
653b87eb 587 qemu_thread_jit_write();
a8583393 588 assert(n < ARRAY_SIZE(tb->jmp_list_next));
194125e3
EC
589 qemu_spin_lock(&tb_next->jmp_lock);
590
591 /* make sure the destination TB is valid */
592 if (tb_next->cflags & CF_INVALID) {
593 goto out_unlock_next;
594 }
595 /* Atomically claim the jump destination slot only if it was NULL */
d73415a3
SH
596 old = qatomic_cmpxchg(&tb->jmp_dest[n], (uintptr_t)NULL,
597 (uintptr_t)tb_next);
194125e3
EC
598 if (old) {
599 goto out_unlock_next;
a8583393 600 }
194125e3
EC
601
602 /* patch the native jump address */
603 tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc.ptr);
604
605 /* add in TB jmp list */
606 tb->jmp_list_next[n] = tb_next->jmp_list_head;
607 tb_next->jmp_list_head = (uintptr_t)tb | n;
608
609 qemu_spin_unlock(&tb_next->jmp_lock);
610
fbf59aad
RH
611 qemu_log_mask(CPU_LOG_EXEC, "Linking TBs %p index %d -> %p\n",
612 tb->tc.ptr, n, tb_next->tc.ptr);
194125e3 613 return;
a8583393 614
194125e3
EC
615 out_unlock_next:
616 qemu_spin_unlock(&tb_next->jmp_lock);
617 return;
a8583393
RH
618}
619
8b2d34e9
SF
620static inline bool cpu_handle_halt(CPUState *cpu)
621{
0596fa11 622#ifndef CONFIG_USER_ONLY
8b2d34e9 623 if (cpu->halted) {
0596fa11 624#if defined(TARGET_I386)
4084893d 625 if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
8b2d34e9 626 X86CPU *x86_cpu = X86_CPU(cpu);
8d04fb55 627 qemu_mutex_lock_iothread();
8b2d34e9
SF
628 apic_poll_irq(x86_cpu->apic_state);
629 cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
8d04fb55 630 qemu_mutex_unlock_iothread();
8b2d34e9 631 }
0596fa11 632#endif /* TARGET_I386 */
8b2d34e9 633 if (!cpu_has_work(cpu)) {
8b2d34e9
SF
634 return true;
635 }
636
637 cpu->halted = 0;
638 }
0596fa11 639#endif /* !CONFIG_USER_ONLY */
8b2d34e9
SF
640
641 return false;
642}
643
ea284766 644static inline void cpu_handle_debug_exception(CPUState *cpu)
1009d2ed 645{
86025ee4 646 CPUClass *cc = CPU_GET_CLASS(cpu);
1009d2ed
JK
647 CPUWatchpoint *wp;
648
ff4700b0
AF
649 if (!cpu->watchpoint_hit) {
650 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
1009d2ed
JK
651 wp->flags &= ~BP_WATCHPOINT_HIT;
652 }
653 }
86025ee4 654
78271684
CF
655 if (cc->tcg_ops->debug_excp_handler) {
656 cc->tcg_ops->debug_excp_handler(cpu);
710384d0 657 }
1009d2ed
JK
658}
659
ea284766
SF
660static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
661{
17b50b0c
PD
662 if (cpu->exception_index < 0) {
663#ifndef CONFIG_USER_ONLY
664 if (replay_has_exception()
5e140196 665 && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0) {
a11bbb6a 666 /* Execute just one insn to trigger exception pending in the log */
c3e97f64
PD
667 cpu->cflags_next_tb = (curr_cflags(cpu) & ~CF_USE_ICOUNT)
668 | CF_NOIRQ | 1;
17b50b0c
PD
669 }
670#endif
a11bbb6a 671 return false;
17b50b0c 672 }
17b50b0c
PD
673 if (cpu->exception_index >= EXCP_INTERRUPT) {
674 /* exit request from the cpu execution loop */
675 *ret = cpu->exception_index;
676 if (*ret == EXCP_DEBUG) {
677 cpu_handle_debug_exception(cpu);
678 }
679 cpu->exception_index = -1;
680 return true;
681 } else {
ea284766 682#if defined(CONFIG_USER_ONLY)
17b50b0c
PD
683 /* if user mode only, we simulate a fake exception
684 which will be handled outside the cpu execution
685 loop */
ea284766 686#if defined(TARGET_I386)
17b50b0c 687 CPUClass *cc = CPU_GET_CLASS(cpu);
12096421
PMD
688 cc->tcg_ops->fake_user_interrupt(cpu);
689#endif /* TARGET_I386 */
17b50b0c
PD
690 *ret = cpu->exception_index;
691 cpu->exception_index = -1;
692 return true;
693#else
694 if (replay_exception()) {
ea284766 695 CPUClass *cc = CPU_GET_CLASS(cpu);
17b50b0c 696 qemu_mutex_lock_iothread();
78271684 697 cc->tcg_ops->do_interrupt(cpu);
17b50b0c 698 qemu_mutex_unlock_iothread();
ea284766 699 cpu->exception_index = -1;
a7ba744f
LM
700
701 if (unlikely(cpu->singlestep_enabled)) {
702 /*
703 * After processing the exception, ensure an EXCP_DEBUG is
704 * raised when single-stepping so that GDB doesn't miss the
705 * next instruction.
706 */
707 *ret = EXCP_DEBUG;
708 cpu_handle_debug_exception(cpu);
709 return true;
710 }
17b50b0c
PD
711 } else if (!replay_has_interrupt()) {
712 /* give a chance to iothread in replay mode */
713 *ret = EXCP_INTERRUPT;
ea284766 714 return true;
ea284766 715 }
ea284766
SF
716#endif
717 }
718
719 return false;
720}
721
77c0fc4e 722#ifndef CONFIG_USER_ONLY
4084893d
PD
723/*
724 * CPU_INTERRUPT_POLL is a virtual event which gets converted into a
725 * "real" interrupt event later. It does not need to be recorded for
726 * replay purposes.
727 */
728static inline bool need_replay_interrupt(int interrupt_request)
729{
730#if defined(TARGET_I386)
731 return !(interrupt_request & CPU_INTERRUPT_POLL);
732#else
733 return true;
734#endif
735}
77c0fc4e 736#endif /* !CONFIG_USER_ONLY */
4084893d 737
209b71b6 738static inline bool cpu_handle_interrupt(CPUState *cpu,
c385e6e4
SF
739 TranslationBlock **last_tb)
740{
aff0e204
AB
741 /*
742 * If we have requested custom cflags with CF_NOIRQ we should
743 * skip checking here. Any pending interrupts will get picked up
744 * by the next TB we execute under normal cflags.
745 */
746 if (cpu->cflags_next_tb != -1 && cpu->cflags_next_tb & CF_NOIRQ) {
747 return false;
748 }
749
17b50b0c
PD
750 /* Clear the interrupt flag now since we're processing
751 * cpu->interrupt_request and cpu->exit_request.
d84be02d
DH
752 * Ensure zeroing happens before reading cpu->exit_request or
753 * cpu->interrupt_request (see also smp_wmb in cpu_exit())
17b50b0c 754 */
d73415a3 755 qatomic_mb_set(&cpu_neg(cpu)->icount_decr.u16.high, 0);
c385e6e4 756
d73415a3 757 if (unlikely(qatomic_read(&cpu->interrupt_request))) {
8d04fb55
JK
758 int interrupt_request;
759 qemu_mutex_lock_iothread();
760 interrupt_request = cpu->interrupt_request;
c385e6e4
SF
761 if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
762 /* Mask out external interrupts for this step. */
763 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
764 }
765 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
766 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
767 cpu->exception_index = EXCP_DEBUG;
8d04fb55 768 qemu_mutex_unlock_iothread();
209b71b6 769 return true;
c385e6e4 770 }
77c0fc4e 771#if !defined(CONFIG_USER_ONLY)
c385e6e4
SF
772 if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) {
773 /* Do nothing */
774 } else if (interrupt_request & CPU_INTERRUPT_HALT) {
775 replay_interrupt();
776 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
777 cpu->halted = 1;
778 cpu->exception_index = EXCP_HLT;
8d04fb55 779 qemu_mutex_unlock_iothread();
209b71b6 780 return true;
c385e6e4
SF
781 }
782#if defined(TARGET_I386)
783 else if (interrupt_request & CPU_INTERRUPT_INIT) {
784 X86CPU *x86_cpu = X86_CPU(cpu);
785 CPUArchState *env = &x86_cpu->env;
786 replay_interrupt();
65c9d60a 787 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0);
c385e6e4
SF
788 do_cpu_init(x86_cpu);
789 cpu->exception_index = EXCP_HALTED;
8d04fb55 790 qemu_mutex_unlock_iothread();
209b71b6 791 return true;
c385e6e4
SF
792 }
793#else
794 else if (interrupt_request & CPU_INTERRUPT_RESET) {
795 replay_interrupt();
796 cpu_reset(cpu);
8d04fb55 797 qemu_mutex_unlock_iothread();
209b71b6 798 return true;
c385e6e4 799 }
77c0fc4e 800#endif /* !TARGET_I386 */
c385e6e4
SF
801 /* The target hook has 3 exit conditions:
802 False when the interrupt isn't processed,
803 True when it is, and we should restart on a new TB,
804 and via longjmp via cpu_loop_exit. */
805 else {
77c0fc4e
PMD
806 CPUClass *cc = CPU_GET_CLASS(cpu);
807
78271684
CF
808 if (cc->tcg_ops->cpu_exec_interrupt &&
809 cc->tcg_ops->cpu_exec_interrupt(cpu, interrupt_request)) {
4084893d
PD
810 if (need_replay_interrupt(interrupt_request)) {
811 replay_interrupt();
812 }
ba3c35d9
RH
813 /*
814 * After processing the interrupt, ensure an EXCP_DEBUG is
815 * raised when single-stepping so that GDB doesn't miss the
816 * next instruction.
817 */
5b7b197c
LM
818 if (unlikely(cpu->singlestep_enabled)) {
819 cpu->exception_index = EXCP_DEBUG;
820 qemu_mutex_unlock_iothread();
821 return true;
822 }
823 cpu->exception_index = -1;
c385e6e4
SF
824 *last_tb = NULL;
825 }
8b1fe3f4
SF
826 /* The target hook may have updated the 'cpu->interrupt_request';
827 * reload the 'interrupt_request' value */
828 interrupt_request = cpu->interrupt_request;
c385e6e4 829 }
77c0fc4e 830#endif /* !CONFIG_USER_ONLY */
8b1fe3f4 831 if (interrupt_request & CPU_INTERRUPT_EXITTB) {
c385e6e4
SF
832 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
833 /* ensure that no TB jump will be modified as
834 the program flow was changed */
835 *last_tb = NULL;
836 }
8d04fb55
JK
837
838 /* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */
839 qemu_mutex_unlock_iothread();
c385e6e4 840 }
8d04fb55 841
cfb2d02b 842 /* Finally, check if we need to exit to the main loop. */
d73415a3 843 if (unlikely(qatomic_read(&cpu->exit_request))
740b1759 844 || (icount_enabled()
a11bbb6a 845 && (cpu->cflags_next_tb == -1 || cpu->cflags_next_tb & CF_USE_ICOUNT)
5e140196 846 && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0)) {
d73415a3 847 qatomic_set(&cpu->exit_request, 0);
5f3bdfd4
PD
848 if (cpu->exception_index == -1) {
849 cpu->exception_index = EXCP_INTERRUPT;
850 }
209b71b6 851 return true;
c385e6e4 852 }
209b71b6
PB
853
854 return false;
c385e6e4
SF
855}
856
928de9ee 857static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
fbf59aad 858 target_ulong pc,
cfb2d02b 859 TranslationBlock **last_tb, int *tb_exit)
928de9ee 860{
1aab16c2 861 int32_t insns_left;
928de9ee 862
fbf59aad 863 trace_exec_tb(tb, pc);
eba40358 864 tb = cpu_tb_exec(cpu, tb, tb_exit);
1aab16c2
PB
865 if (*tb_exit != TB_EXIT_REQUESTED) {
866 *last_tb = tb;
867 return;
868 }
869
870 *last_tb = NULL;
d73415a3 871 insns_left = qatomic_read(&cpu_neg(cpu)->icount_decr.u32);
1aab16c2 872 if (insns_left < 0) {
e5143e30
AB
873 /* Something asked us to stop executing chained TBs; just
874 * continue round the main loop. Whatever requested the exit
30f3dda2 875 * will also have set something else (eg exit_request or
17b50b0c
PD
876 * interrupt_request) which will be handled by
877 * cpu_handle_interrupt. cpu_handle_interrupt will also
878 * clear cpu->icount_decr.u16.high.
928de9ee 879 */
1aab16c2 880 return;
928de9ee 881 }
1aab16c2
PB
882
883 /* Instruction counter expired. */
740b1759 884 assert(icount_enabled());
1aab16c2 885#ifndef CONFIG_USER_ONLY
eda5f7c6 886 /* Ensure global icount has gone forward */
8191d368 887 icount_update(cpu);
eda5f7c6 888 /* Refill decrementer and continue execution. */
df3a2de5 889 insns_left = MIN(0xffff, cpu->icount_budget);
5e140196 890 cpu_neg(cpu)->icount_decr.u16.low = insns_left;
eda5f7c6 891 cpu->icount_extra = cpu->icount_budget - insns_left;
bc662a33
AB
892
893 /*
894 * If the next tb has more instructions than we have left to
895 * execute we need to ensure we find/generate a TB with exactly
896 * insns_left instructions in it.
897 */
c8cf47a9
PM
898 if (insns_left > 0 && insns_left < tb->icount) {
899 assert(insns_left <= CF_COUNT_MASK);
900 assert(cpu->icount_extra == 0);
bc662a33 901 cpu->cflags_next_tb = (tb->cflags & ~CF_COUNT_MASK) | insns_left;
928de9ee 902 }
1aab16c2 903#endif
928de9ee
SF
904}
905
7d13299d
FB
906/* main execution loop */
907
ea3e9847 908int cpu_exec(CPUState *cpu)
7d13299d 909{
c385e6e4 910 int ret;
cfb2d02b 911 SyncClocks sc = { 0 };
c2aa5f81 912
6f060969
PD
913 /* replay_interrupt may need current_cpu */
914 current_cpu = cpu;
915
8b2d34e9
SF
916 if (cpu_handle_halt(cpu)) {
917 return EXCP_HALTED;
eda48c34 918 }
5a1e3cfc 919
79e2b9ae
PB
920 rcu_read_lock();
921
035ba06c 922 cpu_exec_enter(cpu);
9d27abd9 923
c2aa5f81
ST
924 /* Calculate difference between guest clock and host clock.
925 * This delay includes the delay of the last cycle, so
926 * what we have to do is sleep until it is 0. As for the
927 * advance/delay we gain here, we try to fix it next time.
928 */
929 init_delay_params(&sc, cpu);
930
4515e58d
PB
931 /* prepare setjmp context for exception handling */
932 if (sigsetjmp(cpu->jmp_env, 0) != 0) {
19a84318 933#if defined(__clang__)
e6a41a04
PM
934 /*
935 * Some compilers wrongly smash all local variables after
936 * siglongjmp (the spec requires that only non-volatile locals
937 * which are changed between the sigsetjmp and siglongjmp are
938 * permitted to be trashed). There were bug reports for gcc
939 * 4.5.0 and clang. The bug is fixed in all versions of gcc
940 * that we support, but is still unfixed in clang:
941 * https://bugs.llvm.org/show_bug.cgi?id=21183
942 *
2521c775 943 * Reload an essential local variable here for those compilers.
e6a41a04
PM
944 * Newer versions of gcc would complain about this code (-Wclobbered),
945 * so we only perform the workaround for clang.
946 */
4515e58d 947 cpu = current_cpu;
e6a41a04 948#else
2521c775 949 /* Non-buggy compilers preserve this; assert the correct value. */
4515e58d 950 g_assert(cpu == current_cpu);
e6a41a04
PM
951#endif
952
0ac20318 953#ifndef CONFIG_SOFTMMU
f920ffdd 954 clear_helper_retaddr();
297368c7
RH
955 if (have_mmap_lock()) {
956 mmap_unlock();
957 }
0ac20318 958#endif
8d04fb55
JK
959 if (qemu_mutex_iothread_locked()) {
960 qemu_mutex_unlock_iothread();
961 }
e6d86bed
EC
962 qemu_plugin_disable_mem_helpers(cpu);
963
8fd3a9b8 964 assert_no_pages_locked();
4515e58d
PB
965 }
966
967 /* if an exception is pending, we execute it here */
968 while (!cpu_handle_exception(cpu, &ret)) {
969 TranslationBlock *last_tb = NULL;
970 int tb_exit = 0;
971
972 while (!cpu_handle_interrupt(cpu, &last_tb)) {
9b990ee5 973 TranslationBlock *tb;
11c1d5f8
RH
974 target_ulong cs_base, pc;
975 uint32_t flags, cflags;
976
10c37828
RH
977 cpu_get_tb_cpu_state(cpu->env_ptr, &pc, &cs_base, &flags);
978
11c1d5f8
RH
979 /*
980 * When requested, use an exact setting for cflags for the next
981 * execution. This is used for icount, precise smc, and stop-
982 * after-access watchpoints. Since this request should never
983 * have CF_INVALID set, -1 is a convenient invalid value that
984 * does not require tcg headers for cpu_common_reset.
985 */
986 cflags = cpu->cflags_next_tb;
9b990ee5 987 if (cflags == -1) {
c0ae396a 988 cflags = curr_cflags(cpu);
9b990ee5
RH
989 } else {
990 cpu->cflags_next_tb = -1;
991 }
992
10c37828
RH
993 if (check_for_breakpoints(cpu, pc, &cflags)) {
994 break;
995 }
11c1d5f8
RH
996
997 tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
998 if (tb == NULL) {
a976a99a
RH
999 uint32_t h;
1000
11c1d5f8
RH
1001 mmap_lock();
1002 tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
1003 mmap_unlock();
1004 /*
1005 * We add the TB in the virtual pc hash table
1006 * for the fast lookup
1007 */
a976a99a 1008 h = tb_jmp_cache_hash_func(pc);
8ed558ec 1009 tb_jmp_cache_set(cpu->tb_jmp_cache, h, tb, pc);
11c1d5f8
RH
1010 }
1011
1012#ifndef CONFIG_USER_ONLY
1013 /*
1014 * We don't take care of direct jumps when address mapping
1015 * changes in system emulation. So it's not safe to make a
1016 * direct jump to a TB spanning two pages because the mapping
1017 * for the second page can change.
1018 */
1019 if (tb->page_addr[1] != -1) {
1020 last_tb = NULL;
1021 }
1022#endif
1023 /* See if we can patch the calling TB. */
1024 if (last_tb) {
1025 tb_add_jump(last_tb, tb_exit, tb);
1026 }
1027
fbf59aad 1028 cpu_loop_exec_tb(cpu, tb, pc, &last_tb, &tb_exit);
11c1d5f8 1029
4515e58d
PB
1030 /* Try to align the host and virtual clocks
1031 if the guest is in advance */
1032 align_clocks(&sc, cpu);
7d13299d 1033 }
4515e58d 1034 }
3fb2ded1 1035
035ba06c 1036 cpu_exec_exit(cpu);
79e2b9ae 1037 rcu_read_unlock();
1057eaa7 1038
7d13299d
FB
1039 return ret;
1040}
740b1759 1041
7df5e3d6
CF
1042void tcg_exec_realizefn(CPUState *cpu, Error **errp)
1043{
1044 static bool tcg_target_initialized;
1045 CPUClass *cc = CPU_GET_CLASS(cpu);
1046
1047 if (!tcg_target_initialized) {
78271684 1048 cc->tcg_ops->initialize();
7df5e3d6
CF
1049 tcg_target_initialized = true;
1050 }
1051 tlb_init(cpu);
1052 qemu_plugin_vcpu_init_hook(cpu);
1053
1054#ifndef CONFIG_USER_ONLY
1055 tcg_iommu_init_notifier_list(cpu);
1056#endif /* !CONFIG_USER_ONLY */
1057}
1058
1059/* undo the initializations in reverse order */
1060void tcg_exec_unrealizefn(CPUState *cpu)
1061{
1062#ifndef CONFIG_USER_ONLY
1063 tcg_iommu_free_notifier_list(cpu);
1064#endif /* !CONFIG_USER_ONLY */
1065
1066 qemu_plugin_vcpu_exit_hook(cpu);
1067 tlb_destroy(cpu);
1068}
1069
740b1759
CF
1070#ifndef CONFIG_USER_ONLY
1071
7112ffd9 1072static void dump_drift_info(GString *buf)
740b1759
CF
1073{
1074 if (!icount_enabled()) {
1075 return;
1076 }
1077
3a841ab5
DB
1078 g_string_append_printf(buf, "Host - Guest clock %"PRIi64" ms\n",
1079 (cpu_get_clock() - icount_get()) / SCALE_MS);
740b1759 1080 if (icount_align_option) {
3a841ab5
DB
1081 g_string_append_printf(buf, "Max guest delay %"PRIi64" ms\n",
1082 -max_delay / SCALE_MS);
1083 g_string_append_printf(buf, "Max guest advance %"PRIi64" ms\n",
1084 max_advance / SCALE_MS);
740b1759 1085 } else {
3a841ab5
DB
1086 g_string_append_printf(buf, "Max guest delay NA\n");
1087 g_string_append_printf(buf, "Max guest advance NA\n");
740b1759
CF
1088 }
1089}
1090
3a841ab5
DB
1091HumanReadableText *qmp_x_query_jit(Error **errp)
1092{
1093 g_autoptr(GString) buf = g_string_new("");
1094
1095 if (!tcg_enabled()) {
1096 error_setg(errp, "JIT information is only available with accel=tcg");
1097 return NULL;
1098 }
1099
1100 dump_exec_info(buf);
1101 dump_drift_info(buf);
1102
1103 return human_readable_text_from_str(buf);
1104}
1105
b6a7f3e0
DB
1106HumanReadableText *qmp_x_query_opcount(Error **errp)
1107{
1108 g_autoptr(GString) buf = g_string_new("");
1109
1110 if (!tcg_enabled()) {
1111 error_setg(errp, "Opcode count information is only available with accel=tcg");
1112 return NULL;
1113 }
1114
b01841fa 1115 tcg_dump_op_count(buf);
b6a7f3e0
DB
1116
1117 return human_readable_text_from_str(buf);
1118}
1119
92e28c03
AB
1120#ifdef CONFIG_PROFILER
1121
1122int64_t dev_time;
1123
1124HumanReadableText *qmp_x_query_profile(Error **errp)
1125{
1126 g_autoptr(GString) buf = g_string_new("");
1127 static int64_t last_cpu_exec_time;
1128 int64_t cpu_exec_time;
1129 int64_t delta;
1130
1131 cpu_exec_time = tcg_cpu_exec_time();
1132 delta = cpu_exec_time - last_cpu_exec_time;
1133
1134 g_string_append_printf(buf, "async time %" PRId64 " (%0.3f)\n",
1135 dev_time, dev_time / (double)NANOSECONDS_PER_SECOND);
1136 g_string_append_printf(buf, "qemu time %" PRId64 " (%0.3f)\n",
1137 delta, delta / (double)NANOSECONDS_PER_SECOND);
1138 last_cpu_exec_time = cpu_exec_time;
1139 dev_time = 0;
1140
1141 return human_readable_text_from_str(buf);
1142}
1143#else
1144HumanReadableText *qmp_x_query_profile(Error **errp)
1145{
1146 error_setg(errp, "Internal profiler not compiled");
1147 return NULL;
1148}
1149#endif
1150
740b1759 1151#endif /* !CONFIG_USER_ONLY */