2 * emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/qemu-print.h"
22 #include "qapi/error.h"
23 #include "qapi/qapi-commands-machine.h"
24 #include "qapi/type-helpers.h"
25 #include "hw/core/tcg-cpu-ops.h"
27 #include "disas/disas.h"
28 #include "exec/exec-all.h"
30 #include "qemu/atomic.h"
31 #include "qemu/compiler.h"
32 #include "qemu/timer.h"
35 #include "qemu/main-loop.h"
36 #if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
37 #include "hw/i386/apic.h"
39 #include "sysemu/cpus.h"
40 #include "exec/cpu-all.h"
41 #include "sysemu/cpu-timers.h"
42 #include "sysemu/replay.h"
43 #include "sysemu/tcg.h"
44 #include "exec/helper-proto.h"
45 #include "tb-jmp-cache.h"
47 #include "tb-context.h"
50 /* -icount align implementation. */
52 typedef struct SyncClocks
{
54 int64_t last_cpu_icount
;
55 int64_t realtime_clock
;
58 #if !defined(CONFIG_USER_ONLY)
59 /* Allow the guest to have a max 3ms advance.
60 * The difference between the 2 clocks could therefore
63 #define VM_CLOCK_ADVANCE 3000000
64 #define THRESHOLD_REDUCE 1.5
65 #define MAX_DELAY_PRINT_RATE 2000000000LL
66 #define MAX_NB_PRINTS 100
68 static int64_t max_delay
;
69 static int64_t max_advance
;
71 static void align_clocks(SyncClocks
*sc
, CPUState
*cpu
)
75 if (!icount_align_option
) {
79 cpu_icount
= cpu
->icount_extra
+ cpu_neg(cpu
)->icount_decr
.u16
.low
;
80 sc
->diff_clk
+= icount_to_ns(sc
->last_cpu_icount
- cpu_icount
);
81 sc
->last_cpu_icount
= cpu_icount
;
83 if (sc
->diff_clk
> VM_CLOCK_ADVANCE
) {
85 struct timespec sleep_delay
, rem_delay
;
86 sleep_delay
.tv_sec
= sc
->diff_clk
/ 1000000000LL;
87 sleep_delay
.tv_nsec
= sc
->diff_clk
% 1000000000LL;
88 if (nanosleep(&sleep_delay
, &rem_delay
) < 0) {
89 sc
->diff_clk
= rem_delay
.tv_sec
* 1000000000LL + rem_delay
.tv_nsec
;
94 Sleep(sc
->diff_clk
/ SCALE_MS
);
100 static void print_delay(const SyncClocks
*sc
)
102 static float threshold_delay
;
103 static int64_t last_realtime_clock
;
104 static int nb_prints
;
106 if (icount_align_option
&&
107 sc
->realtime_clock
- last_realtime_clock
>= MAX_DELAY_PRINT_RATE
&&
108 nb_prints
< MAX_NB_PRINTS
) {
109 if ((-sc
->diff_clk
/ (float)1000000000LL > threshold_delay
) ||
110 (-sc
->diff_clk
/ (float)1000000000LL <
111 (threshold_delay
- THRESHOLD_REDUCE
))) {
112 threshold_delay
= (-sc
->diff_clk
/ 1000000000LL) + 1;
113 qemu_printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
117 last_realtime_clock
= sc
->realtime_clock
;
122 static void init_delay_params(SyncClocks
*sc
, CPUState
*cpu
)
124 if (!icount_align_option
) {
127 sc
->realtime_clock
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT
);
128 sc
->diff_clk
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) - sc
->realtime_clock
;
130 = cpu
->icount_extra
+ cpu_neg(cpu
)->icount_decr
.u16
.low
;
131 if (sc
->diff_clk
< max_delay
) {
132 max_delay
= sc
->diff_clk
;
134 if (sc
->diff_clk
> max_advance
) {
135 max_advance
= sc
->diff_clk
;
138 /* Print every 2s max if the guest is late. We limit the number
139 of printed messages to NB_PRINT_MAX(currently 100) */
143 static void align_clocks(SyncClocks
*sc
, const CPUState
*cpu
)
147 static void init_delay_params(SyncClocks
*sc
, const CPUState
*cpu
)
150 #endif /* CONFIG USER ONLY */
152 uint32_t curr_cflags(CPUState
*cpu
)
154 uint32_t cflags
= cpu
->tcg_cflags
;
157 * Record gdb single-step. We should be exiting the TB by raising
158 * EXCP_DEBUG, but to simplify other tests, disable chaining too.
160 * For singlestep and -d nochain, suppress goto_tb so that
161 * we can log -d cpu,exec after every TB.
163 if (unlikely(cpu
->singlestep_enabled
)) {
164 cflags
|= CF_NO_GOTO_TB
| CF_NO_GOTO_PTR
| CF_SINGLE_STEP
| 1;
165 } else if (singlestep
) {
166 cflags
|= CF_NO_GOTO_TB
| 1;
167 } else if (qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN
)) {
168 cflags
|= CF_NO_GOTO_TB
;
176 target_ulong cs_base
;
178 tb_page_addr_t page_addr0
;
181 uint32_t trace_vcpu_dstate
;
184 static bool tb_lookup_cmp(const void *p
, const void *d
)
186 const TranslationBlock
*tb
= p
;
187 const struct tb_desc
*desc
= d
;
189 if ((TARGET_TB_PCREL
|| tb_pc(tb
) == desc
->pc
) &&
190 tb_page_addr0(tb
) == desc
->page_addr0
&&
191 tb
->cs_base
== desc
->cs_base
&&
192 tb
->flags
== desc
->flags
&&
193 tb
->trace_vcpu_dstate
== desc
->trace_vcpu_dstate
&&
194 tb_cflags(tb
) == desc
->cflags
) {
195 /* check next page if needed */
196 tb_page_addr_t tb_phys_page1
= tb_page_addr1(tb
);
197 if (tb_phys_page1
== -1) {
200 tb_page_addr_t phys_page1
;
201 target_ulong virt_page1
;
204 * We know that the first page matched, and an otherwise valid TB
205 * encountered an incomplete instruction at the end of that page,
206 * therefore we know that generating a new TB from the current PC
207 * must also require reading from the next page -- even if the
208 * second pages do not match, and therefore the resulting insn
209 * is different for the new TB. Therefore any exception raised
210 * here by the faulting lookup is not premature.
212 virt_page1
= TARGET_PAGE_ALIGN(desc
->pc
);
213 phys_page1
= get_page_addr_code(desc
->env
, virt_page1
);
214 if (tb_phys_page1
== phys_page1
) {
222 static TranslationBlock
*tb_htable_lookup(CPUState
*cpu
, target_ulong pc
,
223 target_ulong cs_base
, uint32_t flags
,
226 tb_page_addr_t phys_pc
;
230 desc
.env
= cpu
->env_ptr
;
231 desc
.cs_base
= cs_base
;
233 desc
.cflags
= cflags
;
234 desc
.trace_vcpu_dstate
= *cpu
->trace_dstate
;
236 phys_pc
= get_page_addr_code(desc
.env
, pc
);
240 desc
.page_addr0
= phys_pc
;
241 h
= tb_hash_func(phys_pc
, (TARGET_TB_PCREL
? 0 : pc
),
242 flags
, cflags
, *cpu
->trace_dstate
);
243 return qht_lookup_custom(&tb_ctx
.htable
, &desc
, h
, tb_lookup_cmp
);
246 /* Might cause an exception, so have a longjmp destination ready */
247 static inline TranslationBlock
*tb_lookup(CPUState
*cpu
, target_ulong pc
,
248 target_ulong cs_base
,
249 uint32_t flags
, uint32_t cflags
)
251 TranslationBlock
*tb
;
255 /* we should never be trying to look up an INVALID tb */
256 tcg_debug_assert(!(cflags
& CF_INVALID
));
258 hash
= tb_jmp_cache_hash_func(pc
);
259 jc
= cpu
->tb_jmp_cache
;
260 tb
= tb_jmp_cache_get_tb(jc
, hash
);
263 tb_jmp_cache_get_pc(jc
, hash
, tb
) == pc
&&
264 tb
->cs_base
== cs_base
&&
265 tb
->flags
== flags
&&
266 tb
->trace_vcpu_dstate
== *cpu
->trace_dstate
&&
267 tb_cflags(tb
) == cflags
)) {
270 tb
= tb_htable_lookup(cpu
, pc
, cs_base
, flags
, cflags
);
274 tb_jmp_cache_set(jc
, hash
, tb
, pc
);
278 static void log_cpu_exec(target_ulong pc
, CPUState
*cpu
,
279 const TranslationBlock
*tb
)
281 if (qemu_log_in_addr_range(pc
)) {
282 qemu_log_mask(CPU_LOG_EXEC
,
283 "Trace %d: %p [" TARGET_FMT_lx
284 "/" TARGET_FMT_lx
"/%08x/%08x] %s\n",
285 cpu
->cpu_index
, tb
->tc
.ptr
, tb
->cs_base
, pc
,
286 tb
->flags
, tb
->cflags
, lookup_symbol(pc
));
288 #if defined(DEBUG_DISAS)
289 if (qemu_loglevel_mask(CPU_LOG_TB_CPU
)) {
290 FILE *logfile
= qemu_log_trylock();
294 if (qemu_loglevel_mask(CPU_LOG_TB_FPU
)) {
295 flags
|= CPU_DUMP_FPU
;
297 #if defined(TARGET_I386)
298 flags
|= CPU_DUMP_CCOP
;
300 cpu_dump_state(cpu
, logfile
, flags
);
301 qemu_log_unlock(logfile
);
304 #endif /* DEBUG_DISAS */
308 static bool check_for_breakpoints_slow(CPUState
*cpu
, target_ulong pc
,
312 bool match_page
= false;
315 * Singlestep overrides breakpoints.
316 * This requirement is visible in the record-replay tests, where
317 * we would fail to make forward progress in reverse-continue.
319 * TODO: gdb singlestep should only override gdb breakpoints,
320 * so that one could (gdb) singlestep into the guest kernel's
321 * architectural breakpoint handler.
323 if (cpu
->singlestep_enabled
) {
327 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
329 * If we have an exact pc match, trigger the breakpoint.
330 * Otherwise, note matches within the page.
333 bool match_bp
= false;
335 if (bp
->flags
& BP_GDB
) {
337 } else if (bp
->flags
& BP_CPU
) {
338 #ifdef CONFIG_USER_ONLY
339 g_assert_not_reached();
341 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
342 assert(cc
->tcg_ops
->debug_check_breakpoint
);
343 match_bp
= cc
->tcg_ops
->debug_check_breakpoint(cpu
);
348 cpu
->exception_index
= EXCP_DEBUG
;
351 } else if (((pc
^ bp
->pc
) & TARGET_PAGE_MASK
) == 0) {
357 * Within the same page as a breakpoint, single-step,
358 * returning to helper_lookup_tb_ptr after each insn looking
359 * for the actual breakpoint.
361 * TODO: Perhaps better to record all of the TBs associated
362 * with a given virtual page that contains a breakpoint, and
363 * then invalidate them when a new overlapping breakpoint is
364 * set on the page. Non-overlapping TBs would not be
365 * invalidated, nor would any TB need to be invalidated as
366 * breakpoints are removed.
369 *cflags
= (*cflags
& ~CF_COUNT_MASK
) | CF_NO_GOTO_TB
| 1;
374 static inline bool check_for_breakpoints(CPUState
*cpu
, target_ulong pc
,
377 return unlikely(!QTAILQ_EMPTY(&cpu
->breakpoints
)) &&
378 check_for_breakpoints_slow(cpu
, pc
, cflags
);
382 * helper_lookup_tb_ptr: quick check for next tb
383 * @env: current cpu state
385 * Look for an existing TB matching the current cpu state.
386 * If found, return the code pointer. If not found, return
387 * the tcg epilogue so that we return into cpu_tb_exec.
389 const void *HELPER(lookup_tb_ptr
)(CPUArchState
*env
)
391 CPUState
*cpu
= env_cpu(env
);
392 TranslationBlock
*tb
;
393 target_ulong cs_base
, pc
;
394 uint32_t flags
, cflags
;
396 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
398 cflags
= curr_cflags(cpu
);
399 if (check_for_breakpoints(cpu
, pc
, &cflags
)) {
403 tb
= tb_lookup(cpu
, pc
, cs_base
, flags
, cflags
);
405 return tcg_code_gen_epilogue
;
408 if (qemu_loglevel_mask(CPU_LOG_TB_CPU
| CPU_LOG_EXEC
)) {
409 log_cpu_exec(pc
, cpu
, tb
);
415 /* Execute a TB, and fix up the CPU state afterwards if necessary */
417 * Disable CFI checks.
418 * TCG creates binary blobs at runtime, with the transformed code.
419 * A TB is a blob of binary code, created at runtime and called with an
420 * indirect function call. Since such function did not exist at compile time,
421 * the CFI runtime has no way to verify its signature and would fail.
422 * TCG is not considered a security-sensitive part of QEMU so this does not
423 * affect the impact of CFI in environment with high security requirements
425 static inline TranslationBlock
* QEMU_DISABLE_CFI
426 cpu_tb_exec(CPUState
*cpu
, TranslationBlock
*itb
, int *tb_exit
)
428 CPUArchState
*env
= cpu
->env_ptr
;
430 TranslationBlock
*last_tb
;
431 const void *tb_ptr
= itb
->tc
.ptr
;
433 if (qemu_loglevel_mask(CPU_LOG_TB_CPU
| CPU_LOG_EXEC
)) {
434 log_cpu_exec(log_pc(cpu
, itb
), cpu
, itb
);
437 qemu_thread_jit_execute();
438 ret
= tcg_qemu_tb_exec(env
, tb_ptr
);
441 * TODO: Delay swapping back to the read-write region of the TB
442 * until we actually need to modify the TB. The read-only copy,
443 * coming from the rx region, shares the same host TLB entry as
444 * the code that executed the exit_tb opcode that arrived here.
445 * If we insist on touching both the RX and the RW pages, we
446 * double the host TLB pressure.
448 last_tb
= tcg_splitwx_to_rw((void *)(ret
& ~TB_EXIT_MASK
));
449 *tb_exit
= ret
& TB_EXIT_MASK
;
451 trace_exec_tb_exit(last_tb
, *tb_exit
);
453 if (*tb_exit
> TB_EXIT_IDX1
) {
454 /* We didn't start executing this TB (eg because the instruction
455 * counter hit zero); we must restore the guest PC to the address
456 * of the start of the TB.
458 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
460 if (cc
->tcg_ops
->synchronize_from_tb
) {
461 cc
->tcg_ops
->synchronize_from_tb(cpu
, last_tb
);
463 assert(!TARGET_TB_PCREL
);
465 cc
->set_pc(cpu
, tb_pc(last_tb
));
467 if (qemu_loglevel_mask(CPU_LOG_EXEC
)) {
468 target_ulong pc
= log_pc(cpu
, last_tb
);
469 if (qemu_log_in_addr_range(pc
)) {
470 qemu_log("Stopped execution of TB chain before %p ["
471 TARGET_FMT_lx
"] %s\n",
472 last_tb
->tc
.ptr
, pc
, lookup_symbol(pc
));
478 * If gdb single-step, and we haven't raised another exception,
479 * raise a debug exception. Single-step with another exception
480 * is handled in cpu_handle_exception.
482 if (unlikely(cpu
->singlestep_enabled
) && cpu
->exception_index
== -1) {
483 cpu
->exception_index
= EXCP_DEBUG
;
491 static void cpu_exec_enter(CPUState
*cpu
)
493 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
495 if (cc
->tcg_ops
->cpu_exec_enter
) {
496 cc
->tcg_ops
->cpu_exec_enter(cpu
);
500 static void cpu_exec_exit(CPUState
*cpu
)
502 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
504 if (cc
->tcg_ops
->cpu_exec_exit
) {
505 cc
->tcg_ops
->cpu_exec_exit(cpu
);
509 void cpu_exec_step_atomic(CPUState
*cpu
)
511 CPUArchState
*env
= cpu
->env_ptr
;
512 TranslationBlock
*tb
;
513 target_ulong cs_base
, pc
;
514 uint32_t flags
, cflags
;
517 if (sigsetjmp(cpu
->jmp_env
, 0) == 0) {
519 g_assert(cpu
== current_cpu
);
520 g_assert(!cpu
->running
);
523 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
525 cflags
= curr_cflags(cpu
);
526 /* Execute in a serial context. */
527 cflags
&= ~CF_PARALLEL
;
528 /* After 1 insn, return and release the exclusive lock. */
529 cflags
|= CF_NO_GOTO_TB
| CF_NO_GOTO_PTR
| 1;
531 * No need to check_for_breakpoints here.
532 * We only arrive in cpu_exec_step_atomic after beginning execution
533 * of an insn that includes an atomic operation we can't handle.
534 * Any breakpoint for this insn will have been recognized earlier.
537 tb
= tb_lookup(cpu
, pc
, cs_base
, flags
, cflags
);
540 tb
= tb_gen_code(cpu
, pc
, cs_base
, flags
, cflags
);
545 /* execute the generated code */
546 trace_exec_tb(tb
, pc
);
547 cpu_tb_exec(cpu
, tb
, &tb_exit
);
550 #ifndef CONFIG_SOFTMMU
551 clear_helper_retaddr();
552 if (have_mmap_lock()) {
556 if (qemu_mutex_iothread_locked()) {
557 qemu_mutex_unlock_iothread();
559 assert_no_pages_locked();
560 qemu_plugin_disable_mem_helpers(cpu
);
564 * As we start the exclusive region before codegen we must still
565 * be in the region if we longjump out of either the codegen or
568 g_assert(cpu_in_exclusive_context(cpu
));
569 cpu
->running
= false;
573 void tb_set_jmp_target(TranslationBlock
*tb
, int n
, uintptr_t addr
)
575 if (TCG_TARGET_HAS_direct_jump
) {
576 uintptr_t offset
= tb
->jmp_target_arg
[n
];
577 uintptr_t tc_ptr
= (uintptr_t)tb
->tc
.ptr
;
578 uintptr_t jmp_rx
= tc_ptr
+ offset
;
579 uintptr_t jmp_rw
= jmp_rx
- tcg_splitwx_diff
;
580 tb_target_set_jmp_target(tc_ptr
, jmp_rx
, jmp_rw
, addr
);
582 tb
->jmp_target_arg
[n
] = addr
;
586 static inline void tb_add_jump(TranslationBlock
*tb
, int n
,
587 TranslationBlock
*tb_next
)
591 qemu_thread_jit_write();
592 assert(n
< ARRAY_SIZE(tb
->jmp_list_next
));
593 qemu_spin_lock(&tb_next
->jmp_lock
);
595 /* make sure the destination TB is valid */
596 if (tb_next
->cflags
& CF_INVALID
) {
597 goto out_unlock_next
;
599 /* Atomically claim the jump destination slot only if it was NULL */
600 old
= qatomic_cmpxchg(&tb
->jmp_dest
[n
], (uintptr_t)NULL
,
603 goto out_unlock_next
;
606 /* patch the native jump address */
607 tb_set_jmp_target(tb
, n
, (uintptr_t)tb_next
->tc
.ptr
);
609 /* add in TB jmp list */
610 tb
->jmp_list_next
[n
] = tb_next
->jmp_list_head
;
611 tb_next
->jmp_list_head
= (uintptr_t)tb
| n
;
613 qemu_spin_unlock(&tb_next
->jmp_lock
);
615 qemu_log_mask(CPU_LOG_EXEC
, "Linking TBs %p index %d -> %p\n",
616 tb
->tc
.ptr
, n
, tb_next
->tc
.ptr
);
620 qemu_spin_unlock(&tb_next
->jmp_lock
);
624 static inline bool cpu_handle_halt(CPUState
*cpu
)
626 #ifndef CONFIG_USER_ONLY
628 #if defined(TARGET_I386)
629 if (cpu
->interrupt_request
& CPU_INTERRUPT_POLL
) {
630 X86CPU
*x86_cpu
= X86_CPU(cpu
);
631 qemu_mutex_lock_iothread();
632 apic_poll_irq(x86_cpu
->apic_state
);
633 cpu_reset_interrupt(cpu
, CPU_INTERRUPT_POLL
);
634 qemu_mutex_unlock_iothread();
636 #endif /* TARGET_I386 */
637 if (!cpu_has_work(cpu
)) {
643 #endif /* !CONFIG_USER_ONLY */
648 static inline void cpu_handle_debug_exception(CPUState
*cpu
)
650 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
653 if (!cpu
->watchpoint_hit
) {
654 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
655 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
659 if (cc
->tcg_ops
->debug_excp_handler
) {
660 cc
->tcg_ops
->debug_excp_handler(cpu
);
664 static inline bool cpu_handle_exception(CPUState
*cpu
, int *ret
)
666 if (cpu
->exception_index
< 0) {
667 #ifndef CONFIG_USER_ONLY
668 if (replay_has_exception()
669 && cpu_neg(cpu
)->icount_decr
.u16
.low
+ cpu
->icount_extra
== 0) {
670 /* Execute just one insn to trigger exception pending in the log */
671 cpu
->cflags_next_tb
= (curr_cflags(cpu
) & ~CF_USE_ICOUNT
)
677 if (cpu
->exception_index
>= EXCP_INTERRUPT
) {
678 /* exit request from the cpu execution loop */
679 *ret
= cpu
->exception_index
;
680 if (*ret
== EXCP_DEBUG
) {
681 cpu_handle_debug_exception(cpu
);
683 cpu
->exception_index
= -1;
686 #if defined(CONFIG_USER_ONLY)
687 /* if user mode only, we simulate a fake exception
688 which will be handled outside the cpu execution
690 #if defined(TARGET_I386)
691 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
692 cc
->tcg_ops
->fake_user_interrupt(cpu
);
693 #endif /* TARGET_I386 */
694 *ret
= cpu
->exception_index
;
695 cpu
->exception_index
= -1;
698 if (replay_exception()) {
699 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
700 qemu_mutex_lock_iothread();
701 cc
->tcg_ops
->do_interrupt(cpu
);
702 qemu_mutex_unlock_iothread();
703 cpu
->exception_index
= -1;
705 if (unlikely(cpu
->singlestep_enabled
)) {
707 * After processing the exception, ensure an EXCP_DEBUG is
708 * raised when single-stepping so that GDB doesn't miss the
712 cpu_handle_debug_exception(cpu
);
715 } else if (!replay_has_interrupt()) {
716 /* give a chance to iothread in replay mode */
717 *ret
= EXCP_INTERRUPT
;
726 #ifndef CONFIG_USER_ONLY
728 * CPU_INTERRUPT_POLL is a virtual event which gets converted into a
729 * "real" interrupt event later. It does not need to be recorded for
732 static inline bool need_replay_interrupt(int interrupt_request
)
734 #if defined(TARGET_I386)
735 return !(interrupt_request
& CPU_INTERRUPT_POLL
);
740 #endif /* !CONFIG_USER_ONLY */
742 static inline bool cpu_handle_interrupt(CPUState
*cpu
,
743 TranslationBlock
**last_tb
)
746 * If we have requested custom cflags with CF_NOIRQ we should
747 * skip checking here. Any pending interrupts will get picked up
748 * by the next TB we execute under normal cflags.
750 if (cpu
->cflags_next_tb
!= -1 && cpu
->cflags_next_tb
& CF_NOIRQ
) {
754 /* Clear the interrupt flag now since we're processing
755 * cpu->interrupt_request and cpu->exit_request.
756 * Ensure zeroing happens before reading cpu->exit_request or
757 * cpu->interrupt_request (see also smp_wmb in cpu_exit())
759 qatomic_mb_set(&cpu_neg(cpu
)->icount_decr
.u16
.high
, 0);
761 if (unlikely(qatomic_read(&cpu
->interrupt_request
))) {
762 int interrupt_request
;
763 qemu_mutex_lock_iothread();
764 interrupt_request
= cpu
->interrupt_request
;
765 if (unlikely(cpu
->singlestep_enabled
& SSTEP_NOIRQ
)) {
766 /* Mask out external interrupts for this step. */
767 interrupt_request
&= ~CPU_INTERRUPT_SSTEP_MASK
;
769 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
770 cpu
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
771 cpu
->exception_index
= EXCP_DEBUG
;
772 qemu_mutex_unlock_iothread();
775 #if !defined(CONFIG_USER_ONLY)
776 if (replay_mode
== REPLAY_MODE_PLAY
&& !replay_has_interrupt()) {
778 } else if (interrupt_request
& CPU_INTERRUPT_HALT
) {
780 cpu
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
782 cpu
->exception_index
= EXCP_HLT
;
783 qemu_mutex_unlock_iothread();
786 #if defined(TARGET_I386)
787 else if (interrupt_request
& CPU_INTERRUPT_INIT
) {
788 X86CPU
*x86_cpu
= X86_CPU(cpu
);
789 CPUArchState
*env
= &x86_cpu
->env
;
791 cpu_svm_check_intercept_param(env
, SVM_EXIT_INIT
, 0, 0);
792 do_cpu_init(x86_cpu
);
793 cpu
->exception_index
= EXCP_HALTED
;
794 qemu_mutex_unlock_iothread();
798 else if (interrupt_request
& CPU_INTERRUPT_RESET
) {
801 qemu_mutex_unlock_iothread();
804 #endif /* !TARGET_I386 */
805 /* The target hook has 3 exit conditions:
806 False when the interrupt isn't processed,
807 True when it is, and we should restart on a new TB,
808 and via longjmp via cpu_loop_exit. */
810 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
812 if (cc
->tcg_ops
->cpu_exec_interrupt
&&
813 cc
->tcg_ops
->cpu_exec_interrupt(cpu
, interrupt_request
)) {
814 if (need_replay_interrupt(interrupt_request
)) {
818 * After processing the interrupt, ensure an EXCP_DEBUG is
819 * raised when single-stepping so that GDB doesn't miss the
822 if (unlikely(cpu
->singlestep_enabled
)) {
823 cpu
->exception_index
= EXCP_DEBUG
;
824 qemu_mutex_unlock_iothread();
827 cpu
->exception_index
= -1;
830 /* The target hook may have updated the 'cpu->interrupt_request';
831 * reload the 'interrupt_request' value */
832 interrupt_request
= cpu
->interrupt_request
;
834 #endif /* !CONFIG_USER_ONLY */
835 if (interrupt_request
& CPU_INTERRUPT_EXITTB
) {
836 cpu
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
837 /* ensure that no TB jump will be modified as
838 the program flow was changed */
842 /* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */
843 qemu_mutex_unlock_iothread();
846 /* Finally, check if we need to exit to the main loop. */
847 if (unlikely(qatomic_read(&cpu
->exit_request
))
849 && (cpu
->cflags_next_tb
== -1 || cpu
->cflags_next_tb
& CF_USE_ICOUNT
)
850 && cpu_neg(cpu
)->icount_decr
.u16
.low
+ cpu
->icount_extra
== 0)) {
851 qatomic_set(&cpu
->exit_request
, 0);
852 if (cpu
->exception_index
== -1) {
853 cpu
->exception_index
= EXCP_INTERRUPT
;
861 static inline void cpu_loop_exec_tb(CPUState
*cpu
, TranslationBlock
*tb
,
863 TranslationBlock
**last_tb
, int *tb_exit
)
867 trace_exec_tb(tb
, pc
);
868 tb
= cpu_tb_exec(cpu
, tb
, tb_exit
);
869 if (*tb_exit
!= TB_EXIT_REQUESTED
) {
875 insns_left
= qatomic_read(&cpu_neg(cpu
)->icount_decr
.u32
);
876 if (insns_left
< 0) {
877 /* Something asked us to stop executing chained TBs; just
878 * continue round the main loop. Whatever requested the exit
879 * will also have set something else (eg exit_request or
880 * interrupt_request) which will be handled by
881 * cpu_handle_interrupt. cpu_handle_interrupt will also
882 * clear cpu->icount_decr.u16.high.
887 /* Instruction counter expired. */
888 assert(icount_enabled());
889 #ifndef CONFIG_USER_ONLY
890 /* Ensure global icount has gone forward */
892 /* Refill decrementer and continue execution. */
893 insns_left
= MIN(0xffff, cpu
->icount_budget
);
894 cpu_neg(cpu
)->icount_decr
.u16
.low
= insns_left
;
895 cpu
->icount_extra
= cpu
->icount_budget
- insns_left
;
898 * If the next tb has more instructions than we have left to
899 * execute we need to ensure we find/generate a TB with exactly
900 * insns_left instructions in it.
902 if (insns_left
> 0 && insns_left
< tb
->icount
) {
903 assert(insns_left
<= CF_COUNT_MASK
);
904 assert(cpu
->icount_extra
== 0);
905 cpu
->cflags_next_tb
= (tb
->cflags
& ~CF_COUNT_MASK
) | insns_left
;
910 /* main execution loop */
912 int cpu_exec(CPUState
*cpu
)
915 SyncClocks sc
= { 0 };
917 /* replay_interrupt may need current_cpu */
920 if (cpu_handle_halt(cpu
)) {
928 /* Calculate difference between guest clock and host clock.
929 * This delay includes the delay of the last cycle, so
930 * what we have to do is sleep until it is 0. As for the
931 * advance/delay we gain here, we try to fix it next time.
933 init_delay_params(&sc
, cpu
);
935 /* prepare setjmp context for exception handling */
936 if (sigsetjmp(cpu
->jmp_env
, 0) != 0) {
937 #if defined(__clang__)
939 * Some compilers wrongly smash all local variables after
940 * siglongjmp (the spec requires that only non-volatile locals
941 * which are changed between the sigsetjmp and siglongjmp are
942 * permitted to be trashed). There were bug reports for gcc
943 * 4.5.0 and clang. The bug is fixed in all versions of gcc
944 * that we support, but is still unfixed in clang:
945 * https://bugs.llvm.org/show_bug.cgi?id=21183
947 * Reload an essential local variable here for those compilers.
948 * Newer versions of gcc would complain about this code (-Wclobbered),
949 * so we only perform the workaround for clang.
953 /* Non-buggy compilers preserve this; assert the correct value. */
954 g_assert(cpu
== current_cpu
);
957 #ifndef CONFIG_SOFTMMU
958 clear_helper_retaddr();
959 if (have_mmap_lock()) {
963 if (qemu_mutex_iothread_locked()) {
964 qemu_mutex_unlock_iothread();
966 qemu_plugin_disable_mem_helpers(cpu
);
968 assert_no_pages_locked();
971 /* if an exception is pending, we execute it here */
972 while (!cpu_handle_exception(cpu
, &ret
)) {
973 TranslationBlock
*last_tb
= NULL
;
976 while (!cpu_handle_interrupt(cpu
, &last_tb
)) {
977 TranslationBlock
*tb
;
978 target_ulong cs_base
, pc
;
979 uint32_t flags
, cflags
;
981 cpu_get_tb_cpu_state(cpu
->env_ptr
, &pc
, &cs_base
, &flags
);
984 * When requested, use an exact setting for cflags for the next
985 * execution. This is used for icount, precise smc, and stop-
986 * after-access watchpoints. Since this request should never
987 * have CF_INVALID set, -1 is a convenient invalid value that
988 * does not require tcg headers for cpu_common_reset.
990 cflags
= cpu
->cflags_next_tb
;
992 cflags
= curr_cflags(cpu
);
994 cpu
->cflags_next_tb
= -1;
997 if (check_for_breakpoints(cpu
, pc
, &cflags
)) {
1001 tb
= tb_lookup(cpu
, pc
, cs_base
, flags
, cflags
);
1006 tb
= tb_gen_code(cpu
, pc
, cs_base
, flags
, cflags
);
1009 * We add the TB in the virtual pc hash table
1010 * for the fast lookup
1012 h
= tb_jmp_cache_hash_func(pc
);
1013 tb_jmp_cache_set(cpu
->tb_jmp_cache
, h
, tb
, pc
);
1016 #ifndef CONFIG_USER_ONLY
1018 * We don't take care of direct jumps when address mapping
1019 * changes in system emulation. So it's not safe to make a
1020 * direct jump to a TB spanning two pages because the mapping
1021 * for the second page can change.
1023 if (tb_page_addr1(tb
) != -1) {
1027 /* See if we can patch the calling TB. */
1029 tb_add_jump(last_tb
, tb_exit
, tb
);
1032 cpu_loop_exec_tb(cpu
, tb
, pc
, &last_tb
, &tb_exit
);
1034 /* Try to align the host and virtual clocks
1035 if the guest is in advance */
1036 align_clocks(&sc
, cpu
);
1046 void tcg_exec_realizefn(CPUState
*cpu
, Error
**errp
)
1048 static bool tcg_target_initialized
;
1049 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
1051 if (!tcg_target_initialized
) {
1052 cc
->tcg_ops
->initialize();
1053 tcg_target_initialized
= true;
1056 qemu_plugin_vcpu_init_hook(cpu
);
1058 #ifndef CONFIG_USER_ONLY
1059 tcg_iommu_init_notifier_list(cpu
);
1060 #endif /* !CONFIG_USER_ONLY */
1063 /* undo the initializations in reverse order */
1064 void tcg_exec_unrealizefn(CPUState
*cpu
)
1066 #ifndef CONFIG_USER_ONLY
1067 tcg_iommu_free_notifier_list(cpu
);
1068 #endif /* !CONFIG_USER_ONLY */
1070 qemu_plugin_vcpu_exit_hook(cpu
);
1074 #ifndef CONFIG_USER_ONLY
1076 static void dump_drift_info(GString
*buf
)
1078 if (!icount_enabled()) {
1082 g_string_append_printf(buf
, "Host - Guest clock %"PRIi64
" ms\n",
1083 (cpu_get_clock() - icount_get()) / SCALE_MS
);
1084 if (icount_align_option
) {
1085 g_string_append_printf(buf
, "Max guest delay %"PRIi64
" ms\n",
1086 -max_delay
/ SCALE_MS
);
1087 g_string_append_printf(buf
, "Max guest advance %"PRIi64
" ms\n",
1088 max_advance
/ SCALE_MS
);
1090 g_string_append_printf(buf
, "Max guest delay NA\n");
1091 g_string_append_printf(buf
, "Max guest advance NA\n");
1095 HumanReadableText
*qmp_x_query_jit(Error
**errp
)
1097 g_autoptr(GString
) buf
= g_string_new("");
1099 if (!tcg_enabled()) {
1100 error_setg(errp
, "JIT information is only available with accel=tcg");
1104 dump_exec_info(buf
);
1105 dump_drift_info(buf
);
1107 return human_readable_text_from_str(buf
);
1110 HumanReadableText
*qmp_x_query_opcount(Error
**errp
)
1112 g_autoptr(GString
) buf
= g_string_new("");
1114 if (!tcg_enabled()) {
1115 error_setg(errp
, "Opcode count information is only available with accel=tcg");
1119 tcg_dump_op_count(buf
);
1121 return human_readable_text_from_str(buf
);
1124 #ifdef CONFIG_PROFILER
1128 HumanReadableText
*qmp_x_query_profile(Error
**errp
)
1130 g_autoptr(GString
) buf
= g_string_new("");
1131 static int64_t last_cpu_exec_time
;
1132 int64_t cpu_exec_time
;
1135 cpu_exec_time
= tcg_cpu_exec_time();
1136 delta
= cpu_exec_time
- last_cpu_exec_time
;
1138 g_string_append_printf(buf
, "async time %" PRId64
" (%0.3f)\n",
1139 dev_time
, dev_time
/ (double)NANOSECONDS_PER_SECOND
);
1140 g_string_append_printf(buf
, "qemu time %" PRId64
" (%0.3f)\n",
1141 delta
, delta
/ (double)NANOSECONDS_PER_SECOND
);
1142 last_cpu_exec_time
= cpu_exec_time
;
1145 return human_readable_text_from_str(buf
);
1148 HumanReadableText
*qmp_x_query_profile(Error
**errp
)
1150 error_setg(errp
, "Internal profiler not compiled");
1155 #endif /* !CONFIG_USER_ONLY */