2 * emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/qemu-print.h"
22 #include "qapi/error.h"
23 #include "qapi/type-helpers.h"
24 #include "hw/core/tcg-cpu-ops.h"
26 #include "disas/disas.h"
27 #include "exec/exec-all.h"
29 #include "qemu/atomic.h"
32 #include "qemu/main-loop.h"
33 #if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
34 #include "hw/i386/apic.h"
36 #include "sysemu/cpus.h"
37 #include "exec/cpu-all.h"
38 #include "sysemu/cpu-timers.h"
39 #include "exec/replay-core.h"
40 #include "sysemu/tcg.h"
41 #include "exec/helper-proto.h"
42 #include "tb-jmp-cache.h"
44 #include "tb-context.h"
47 /* -icount align implementation. */
49 typedef struct SyncClocks
{
51 int64_t last_cpu_icount
;
52 int64_t realtime_clock
;
55 #if !defined(CONFIG_USER_ONLY)
56 /* Allow the guest to have a max 3ms advance.
57 * The difference between the 2 clocks could therefore
60 #define VM_CLOCK_ADVANCE 3000000
61 #define THRESHOLD_REDUCE 1.5
62 #define MAX_DELAY_PRINT_RATE 2000000000LL
63 #define MAX_NB_PRINTS 100
68 static void align_clocks(SyncClocks
*sc
, CPUState
*cpu
)
72 if (!icount_align_option
) {
76 cpu_icount
= cpu
->icount_extra
+ cpu_neg(cpu
)->icount_decr
.u16
.low
;
77 sc
->diff_clk
+= icount_to_ns(sc
->last_cpu_icount
- cpu_icount
);
78 sc
->last_cpu_icount
= cpu_icount
;
80 if (sc
->diff_clk
> VM_CLOCK_ADVANCE
) {
82 struct timespec sleep_delay
, rem_delay
;
83 sleep_delay
.tv_sec
= sc
->diff_clk
/ 1000000000LL;
84 sleep_delay
.tv_nsec
= sc
->diff_clk
% 1000000000LL;
85 if (nanosleep(&sleep_delay
, &rem_delay
) < 0) {
86 sc
->diff_clk
= rem_delay
.tv_sec
* 1000000000LL + rem_delay
.tv_nsec
;
91 Sleep(sc
->diff_clk
/ SCALE_MS
);
97 static void print_delay(const SyncClocks
*sc
)
99 static float threshold_delay
;
100 static int64_t last_realtime_clock
;
101 static int nb_prints
;
103 if (icount_align_option
&&
104 sc
->realtime_clock
- last_realtime_clock
>= MAX_DELAY_PRINT_RATE
&&
105 nb_prints
< MAX_NB_PRINTS
) {
106 if ((-sc
->diff_clk
/ (float)1000000000LL > threshold_delay
) ||
107 (-sc
->diff_clk
/ (float)1000000000LL <
108 (threshold_delay
- THRESHOLD_REDUCE
))) {
109 threshold_delay
= (-sc
->diff_clk
/ 1000000000LL) + 1;
110 qemu_printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
114 last_realtime_clock
= sc
->realtime_clock
;
119 static void init_delay_params(SyncClocks
*sc
, CPUState
*cpu
)
121 if (!icount_align_option
) {
124 sc
->realtime_clock
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT
);
125 sc
->diff_clk
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) - sc
->realtime_clock
;
127 = cpu
->icount_extra
+ cpu_neg(cpu
)->icount_decr
.u16
.low
;
128 if (sc
->diff_clk
< max_delay
) {
129 max_delay
= sc
->diff_clk
;
131 if (sc
->diff_clk
> max_advance
) {
132 max_advance
= sc
->diff_clk
;
135 /* Print every 2s max if the guest is late. We limit the number
136 of printed messages to NB_PRINT_MAX(currently 100) */
140 static void align_clocks(SyncClocks
*sc
, const CPUState
*cpu
)
144 static void init_delay_params(SyncClocks
*sc
, const CPUState
*cpu
)
147 #endif /* CONFIG USER ONLY */
149 uint32_t curr_cflags(CPUState
*cpu
)
151 uint32_t cflags
= cpu
->tcg_cflags
;
154 * Record gdb single-step. We should be exiting the TB by raising
155 * EXCP_DEBUG, but to simplify other tests, disable chaining too.
157 * For singlestep and -d nochain, suppress goto_tb so that
158 * we can log -d cpu,exec after every TB.
160 if (unlikely(cpu
->singlestep_enabled
)) {
161 cflags
|= CF_NO_GOTO_TB
| CF_NO_GOTO_PTR
| CF_SINGLE_STEP
| 1;
162 } else if (singlestep
) {
163 cflags
|= CF_NO_GOTO_TB
| 1;
164 } else if (qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN
)) {
165 cflags
|= CF_NO_GOTO_TB
;
173 target_ulong cs_base
;
175 tb_page_addr_t page_addr0
;
178 uint32_t trace_vcpu_dstate
;
181 static bool tb_lookup_cmp(const void *p
, const void *d
)
183 const TranslationBlock
*tb
= p
;
184 const struct tb_desc
*desc
= d
;
186 if ((tb_cflags(tb
) & CF_PCREL
|| tb_pc(tb
) == desc
->pc
) &&
187 tb_page_addr0(tb
) == desc
->page_addr0
&&
188 tb
->cs_base
== desc
->cs_base
&&
189 tb
->flags
== desc
->flags
&&
190 tb
->trace_vcpu_dstate
== desc
->trace_vcpu_dstate
&&
191 tb_cflags(tb
) == desc
->cflags
) {
192 /* check next page if needed */
193 tb_page_addr_t tb_phys_page1
= tb_page_addr1(tb
);
194 if (tb_phys_page1
== -1) {
197 tb_page_addr_t phys_page1
;
198 target_ulong virt_page1
;
201 * We know that the first page matched, and an otherwise valid TB
202 * encountered an incomplete instruction at the end of that page,
203 * therefore we know that generating a new TB from the current PC
204 * must also require reading from the next page -- even if the
205 * second pages do not match, and therefore the resulting insn
206 * is different for the new TB. Therefore any exception raised
207 * here by the faulting lookup is not premature.
209 virt_page1
= TARGET_PAGE_ALIGN(desc
->pc
);
210 phys_page1
= get_page_addr_code(desc
->env
, virt_page1
);
211 if (tb_phys_page1
== phys_page1
) {
219 static TranslationBlock
*tb_htable_lookup(CPUState
*cpu
, target_ulong pc
,
220 target_ulong cs_base
, uint32_t flags
,
223 tb_page_addr_t phys_pc
;
227 desc
.env
= cpu
->env_ptr
;
228 desc
.cs_base
= cs_base
;
230 desc
.cflags
= cflags
;
231 desc
.trace_vcpu_dstate
= *cpu
->trace_dstate
;
233 phys_pc
= get_page_addr_code(desc
.env
, pc
);
237 desc
.page_addr0
= phys_pc
;
238 h
= tb_hash_func(phys_pc
, (cflags
& CF_PCREL
? 0 : pc
),
239 flags
, cflags
, *cpu
->trace_dstate
);
240 return qht_lookup_custom(&tb_ctx
.htable
, &desc
, h
, tb_lookup_cmp
);
243 /* Might cause an exception, so have a longjmp destination ready */
244 static inline TranslationBlock
*tb_lookup(CPUState
*cpu
, target_ulong pc
,
245 target_ulong cs_base
,
246 uint32_t flags
, uint32_t cflags
)
248 TranslationBlock
*tb
;
252 /* we should never be trying to look up an INVALID tb */
253 tcg_debug_assert(!(cflags
& CF_INVALID
));
255 hash
= tb_jmp_cache_hash_func(pc
);
256 jc
= cpu
->tb_jmp_cache
;
257 tb
= tb_jmp_cache_get_tb(jc
, cflags
, hash
);
260 tb_jmp_cache_get_pc(jc
, hash
, tb
) == pc
&&
261 tb
->cs_base
== cs_base
&&
262 tb
->flags
== flags
&&
263 tb
->trace_vcpu_dstate
== *cpu
->trace_dstate
&&
264 tb_cflags(tb
) == cflags
)) {
267 tb
= tb_htable_lookup(cpu
, pc
, cs_base
, flags
, cflags
);
271 tb_jmp_cache_set(jc
, hash
, tb
, pc
);
275 static void log_cpu_exec(target_ulong pc
, CPUState
*cpu
,
276 const TranslationBlock
*tb
)
278 if (qemu_log_in_addr_range(pc
)) {
279 qemu_log_mask(CPU_LOG_EXEC
,
280 "Trace %d: %p [" TARGET_FMT_lx
281 "/" TARGET_FMT_lx
"/%08x/%08x] %s\n",
282 cpu
->cpu_index
, tb
->tc
.ptr
, tb
->cs_base
, pc
,
283 tb
->flags
, tb
->cflags
, lookup_symbol(pc
));
285 #if defined(DEBUG_DISAS)
286 if (qemu_loglevel_mask(CPU_LOG_TB_CPU
)) {
287 FILE *logfile
= qemu_log_trylock();
291 if (qemu_loglevel_mask(CPU_LOG_TB_FPU
)) {
292 flags
|= CPU_DUMP_FPU
;
294 #if defined(TARGET_I386)
295 flags
|= CPU_DUMP_CCOP
;
297 cpu_dump_state(cpu
, logfile
, flags
);
298 qemu_log_unlock(logfile
);
301 #endif /* DEBUG_DISAS */
305 static bool check_for_breakpoints_slow(CPUState
*cpu
, target_ulong pc
,
309 bool match_page
= false;
312 * Singlestep overrides breakpoints.
313 * This requirement is visible in the record-replay tests, where
314 * we would fail to make forward progress in reverse-continue.
316 * TODO: gdb singlestep should only override gdb breakpoints,
317 * so that one could (gdb) singlestep into the guest kernel's
318 * architectural breakpoint handler.
320 if (cpu
->singlestep_enabled
) {
324 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
326 * If we have an exact pc match, trigger the breakpoint.
327 * Otherwise, note matches within the page.
330 bool match_bp
= false;
332 if (bp
->flags
& BP_GDB
) {
334 } else if (bp
->flags
& BP_CPU
) {
335 #ifdef CONFIG_USER_ONLY
336 g_assert_not_reached();
338 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
339 assert(cc
->tcg_ops
->debug_check_breakpoint
);
340 match_bp
= cc
->tcg_ops
->debug_check_breakpoint(cpu
);
345 cpu
->exception_index
= EXCP_DEBUG
;
348 } else if (((pc
^ bp
->pc
) & TARGET_PAGE_MASK
) == 0) {
354 * Within the same page as a breakpoint, single-step,
355 * returning to helper_lookup_tb_ptr after each insn looking
356 * for the actual breakpoint.
358 * TODO: Perhaps better to record all of the TBs associated
359 * with a given virtual page that contains a breakpoint, and
360 * then invalidate them when a new overlapping breakpoint is
361 * set on the page. Non-overlapping TBs would not be
362 * invalidated, nor would any TB need to be invalidated as
363 * breakpoints are removed.
366 *cflags
= (*cflags
& ~CF_COUNT_MASK
) | CF_NO_GOTO_TB
| 1;
371 static inline bool check_for_breakpoints(CPUState
*cpu
, target_ulong pc
,
374 return unlikely(!QTAILQ_EMPTY(&cpu
->breakpoints
)) &&
375 check_for_breakpoints_slow(cpu
, pc
, cflags
);
379 * helper_lookup_tb_ptr: quick check for next tb
380 * @env: current cpu state
382 * Look for an existing TB matching the current cpu state.
383 * If found, return the code pointer. If not found, return
384 * the tcg epilogue so that we return into cpu_tb_exec.
386 const void *HELPER(lookup_tb_ptr
)(CPUArchState
*env
)
388 CPUState
*cpu
= env_cpu(env
);
389 TranslationBlock
*tb
;
390 target_ulong cs_base
, pc
;
391 uint32_t flags
, cflags
;
393 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
395 cflags
= curr_cflags(cpu
);
396 if (check_for_breakpoints(cpu
, pc
, &cflags
)) {
400 tb
= tb_lookup(cpu
, pc
, cs_base
, flags
, cflags
);
402 return tcg_code_gen_epilogue
;
405 if (qemu_loglevel_mask(CPU_LOG_TB_CPU
| CPU_LOG_EXEC
)) {
406 log_cpu_exec(pc
, cpu
, tb
);
412 /* Execute a TB, and fix up the CPU state afterwards if necessary */
414 * Disable CFI checks.
415 * TCG creates binary blobs at runtime, with the transformed code.
416 * A TB is a blob of binary code, created at runtime and called with an
417 * indirect function call. Since such function did not exist at compile time,
418 * the CFI runtime has no way to verify its signature and would fail.
419 * TCG is not considered a security-sensitive part of QEMU so this does not
420 * affect the impact of CFI in environment with high security requirements
422 static inline TranslationBlock
* QEMU_DISABLE_CFI
423 cpu_tb_exec(CPUState
*cpu
, TranslationBlock
*itb
, int *tb_exit
)
425 CPUArchState
*env
= cpu
->env_ptr
;
427 TranslationBlock
*last_tb
;
428 const void *tb_ptr
= itb
->tc
.ptr
;
430 if (qemu_loglevel_mask(CPU_LOG_TB_CPU
| CPU_LOG_EXEC
)) {
431 log_cpu_exec(log_pc(cpu
, itb
), cpu
, itb
);
434 qemu_thread_jit_execute();
435 ret
= tcg_qemu_tb_exec(env
, tb_ptr
);
438 * TODO: Delay swapping back to the read-write region of the TB
439 * until we actually need to modify the TB. The read-only copy,
440 * coming from the rx region, shares the same host TLB entry as
441 * the code that executed the exit_tb opcode that arrived here.
442 * If we insist on touching both the RX and the RW pages, we
443 * double the host TLB pressure.
445 last_tb
= tcg_splitwx_to_rw((void *)(ret
& ~TB_EXIT_MASK
));
446 *tb_exit
= ret
& TB_EXIT_MASK
;
448 trace_exec_tb_exit(last_tb
, *tb_exit
);
450 if (*tb_exit
> TB_EXIT_IDX1
) {
451 /* We didn't start executing this TB (eg because the instruction
452 * counter hit zero); we must restore the guest PC to the address
453 * of the start of the TB.
455 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
457 if (cc
->tcg_ops
->synchronize_from_tb
) {
458 cc
->tcg_ops
->synchronize_from_tb(cpu
, last_tb
);
460 tcg_debug_assert(!(tb_cflags(last_tb
) & CF_PCREL
));
462 cc
->set_pc(cpu
, tb_pc(last_tb
));
464 if (qemu_loglevel_mask(CPU_LOG_EXEC
)) {
465 target_ulong pc
= log_pc(cpu
, last_tb
);
466 if (qemu_log_in_addr_range(pc
)) {
467 qemu_log("Stopped execution of TB chain before %p ["
468 TARGET_FMT_lx
"] %s\n",
469 last_tb
->tc
.ptr
, pc
, lookup_symbol(pc
));
475 * If gdb single-step, and we haven't raised another exception,
476 * raise a debug exception. Single-step with another exception
477 * is handled in cpu_handle_exception.
479 if (unlikely(cpu
->singlestep_enabled
) && cpu
->exception_index
== -1) {
480 cpu
->exception_index
= EXCP_DEBUG
;
488 static void cpu_exec_enter(CPUState
*cpu
)
490 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
492 if (cc
->tcg_ops
->cpu_exec_enter
) {
493 cc
->tcg_ops
->cpu_exec_enter(cpu
);
497 static void cpu_exec_exit(CPUState
*cpu
)
499 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
501 if (cc
->tcg_ops
->cpu_exec_exit
) {
502 cc
->tcg_ops
->cpu_exec_exit(cpu
);
504 QEMU_PLUGIN_ASSERT(cpu
->plugin_mem_cbs
== NULL
);
507 void cpu_exec_step_atomic(CPUState
*cpu
)
509 CPUArchState
*env
= cpu
->env_ptr
;
510 TranslationBlock
*tb
;
511 target_ulong cs_base
, pc
;
512 uint32_t flags
, cflags
;
515 if (sigsetjmp(cpu
->jmp_env
, 0) == 0) {
517 g_assert(cpu
== current_cpu
);
518 g_assert(!cpu
->running
);
521 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
523 cflags
= curr_cflags(cpu
);
524 /* Execute in a serial context. */
525 cflags
&= ~CF_PARALLEL
;
526 /* After 1 insn, return and release the exclusive lock. */
527 cflags
|= CF_NO_GOTO_TB
| CF_NO_GOTO_PTR
| 1;
529 * No need to check_for_breakpoints here.
530 * We only arrive in cpu_exec_step_atomic after beginning execution
531 * of an insn that includes an atomic operation we can't handle.
532 * Any breakpoint for this insn will have been recognized earlier.
535 tb
= tb_lookup(cpu
, pc
, cs_base
, flags
, cflags
);
538 tb
= tb_gen_code(cpu
, pc
, cs_base
, flags
, cflags
);
543 /* execute the generated code */
544 trace_exec_tb(tb
, pc
);
545 cpu_tb_exec(cpu
, tb
, &tb_exit
);
548 #ifndef CONFIG_SOFTMMU
549 clear_helper_retaddr();
550 if (have_mmap_lock()) {
554 if (qemu_mutex_iothread_locked()) {
555 qemu_mutex_unlock_iothread();
557 assert_no_pages_locked();
558 qemu_plugin_disable_mem_helpers(cpu
);
562 * As we start the exclusive region before codegen we must still
563 * be in the region if we longjump out of either the codegen or
566 g_assert(cpu_in_exclusive_context(cpu
));
567 cpu
->running
= false;
571 void tb_set_jmp_target(TranslationBlock
*tb
, int n
, uintptr_t addr
)
574 * Get the rx view of the structure, from which we find the
575 * executable code address, and tb_target_set_jmp_target can
576 * produce a pc-relative displacement to jmp_target_addr[n].
578 const TranslationBlock
*c_tb
= tcg_splitwx_to_rx(tb
);
579 uintptr_t offset
= tb
->jmp_insn_offset
[n
];
580 uintptr_t jmp_rx
= (uintptr_t)tb
->tc
.ptr
+ offset
;
581 uintptr_t jmp_rw
= jmp_rx
- tcg_splitwx_diff
;
583 tb
->jmp_target_addr
[n
] = addr
;
584 tb_target_set_jmp_target(c_tb
, n
, jmp_rx
, jmp_rw
);
587 static inline void tb_add_jump(TranslationBlock
*tb
, int n
,
588 TranslationBlock
*tb_next
)
592 qemu_thread_jit_write();
593 assert(n
< ARRAY_SIZE(tb
->jmp_list_next
));
594 qemu_spin_lock(&tb_next
->jmp_lock
);
596 /* make sure the destination TB is valid */
597 if (tb_next
->cflags
& CF_INVALID
) {
598 goto out_unlock_next
;
600 /* Atomically claim the jump destination slot only if it was NULL */
601 old
= qatomic_cmpxchg(&tb
->jmp_dest
[n
], (uintptr_t)NULL
,
604 goto out_unlock_next
;
607 /* patch the native jump address */
608 tb_set_jmp_target(tb
, n
, (uintptr_t)tb_next
->tc
.ptr
);
610 /* add in TB jmp list */
611 tb
->jmp_list_next
[n
] = tb_next
->jmp_list_head
;
612 tb_next
->jmp_list_head
= (uintptr_t)tb
| n
;
614 qemu_spin_unlock(&tb_next
->jmp_lock
);
616 qemu_log_mask(CPU_LOG_EXEC
, "Linking TBs %p index %d -> %p\n",
617 tb
->tc
.ptr
, n
, tb_next
->tc
.ptr
);
621 qemu_spin_unlock(&tb_next
->jmp_lock
);
625 static inline bool cpu_handle_halt(CPUState
*cpu
)
627 #ifndef CONFIG_USER_ONLY
629 #if defined(TARGET_I386)
630 if (cpu
->interrupt_request
& CPU_INTERRUPT_POLL
) {
631 X86CPU
*x86_cpu
= X86_CPU(cpu
);
632 qemu_mutex_lock_iothread();
633 apic_poll_irq(x86_cpu
->apic_state
);
634 cpu_reset_interrupt(cpu
, CPU_INTERRUPT_POLL
);
635 qemu_mutex_unlock_iothread();
637 #endif /* TARGET_I386 */
638 if (!cpu_has_work(cpu
)) {
644 #endif /* !CONFIG_USER_ONLY */
649 static inline void cpu_handle_debug_exception(CPUState
*cpu
)
651 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
654 if (!cpu
->watchpoint_hit
) {
655 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
656 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
660 if (cc
->tcg_ops
->debug_excp_handler
) {
661 cc
->tcg_ops
->debug_excp_handler(cpu
);
665 static inline bool cpu_handle_exception(CPUState
*cpu
, int *ret
)
667 if (cpu
->exception_index
< 0) {
668 #ifndef CONFIG_USER_ONLY
669 if (replay_has_exception()
670 && cpu_neg(cpu
)->icount_decr
.u16
.low
+ cpu
->icount_extra
== 0) {
671 /* Execute just one insn to trigger exception pending in the log */
672 cpu
->cflags_next_tb
= (curr_cflags(cpu
) & ~CF_USE_ICOUNT
)
678 if (cpu
->exception_index
>= EXCP_INTERRUPT
) {
679 /* exit request from the cpu execution loop */
680 *ret
= cpu
->exception_index
;
681 if (*ret
== EXCP_DEBUG
) {
682 cpu_handle_debug_exception(cpu
);
684 cpu
->exception_index
= -1;
687 #if defined(CONFIG_USER_ONLY)
688 /* if user mode only, we simulate a fake exception
689 which will be handled outside the cpu execution
691 #if defined(TARGET_I386)
692 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
693 cc
->tcg_ops
->fake_user_interrupt(cpu
);
694 #endif /* TARGET_I386 */
695 *ret
= cpu
->exception_index
;
696 cpu
->exception_index
= -1;
699 if (replay_exception()) {
700 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
701 qemu_mutex_lock_iothread();
702 cc
->tcg_ops
->do_interrupt(cpu
);
703 qemu_mutex_unlock_iothread();
704 cpu
->exception_index
= -1;
706 if (unlikely(cpu
->singlestep_enabled
)) {
708 * After processing the exception, ensure an EXCP_DEBUG is
709 * raised when single-stepping so that GDB doesn't miss the
713 cpu_handle_debug_exception(cpu
);
716 } else if (!replay_has_interrupt()) {
717 /* give a chance to iothread in replay mode */
718 *ret
= EXCP_INTERRUPT
;
727 #ifndef CONFIG_USER_ONLY
729 * CPU_INTERRUPT_POLL is a virtual event which gets converted into a
730 * "real" interrupt event later. It does not need to be recorded for
733 static inline bool need_replay_interrupt(int interrupt_request
)
735 #if defined(TARGET_I386)
736 return !(interrupt_request
& CPU_INTERRUPT_POLL
);
741 #endif /* !CONFIG_USER_ONLY */
743 static inline bool cpu_handle_interrupt(CPUState
*cpu
,
744 TranslationBlock
**last_tb
)
747 * If we have requested custom cflags with CF_NOIRQ we should
748 * skip checking here. Any pending interrupts will get picked up
749 * by the next TB we execute under normal cflags.
751 if (cpu
->cflags_next_tb
!= -1 && cpu
->cflags_next_tb
& CF_NOIRQ
) {
755 /* Clear the interrupt flag now since we're processing
756 * cpu->interrupt_request and cpu->exit_request.
757 * Ensure zeroing happens before reading cpu->exit_request or
758 * cpu->interrupt_request (see also smp_wmb in cpu_exit())
760 qatomic_mb_set(&cpu_neg(cpu
)->icount_decr
.u16
.high
, 0);
762 if (unlikely(qatomic_read(&cpu
->interrupt_request
))) {
763 int interrupt_request
;
764 qemu_mutex_lock_iothread();
765 interrupt_request
= cpu
->interrupt_request
;
766 if (unlikely(cpu
->singlestep_enabled
& SSTEP_NOIRQ
)) {
767 /* Mask out external interrupts for this step. */
768 interrupt_request
&= ~CPU_INTERRUPT_SSTEP_MASK
;
770 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
771 cpu
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
772 cpu
->exception_index
= EXCP_DEBUG
;
773 qemu_mutex_unlock_iothread();
776 #if !defined(CONFIG_USER_ONLY)
777 if (replay_mode
== REPLAY_MODE_PLAY
&& !replay_has_interrupt()) {
779 } else if (interrupt_request
& CPU_INTERRUPT_HALT
) {
781 cpu
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
783 cpu
->exception_index
= EXCP_HLT
;
784 qemu_mutex_unlock_iothread();
787 #if defined(TARGET_I386)
788 else if (interrupt_request
& CPU_INTERRUPT_INIT
) {
789 X86CPU
*x86_cpu
= X86_CPU(cpu
);
790 CPUArchState
*env
= &x86_cpu
->env
;
792 cpu_svm_check_intercept_param(env
, SVM_EXIT_INIT
, 0, 0);
793 do_cpu_init(x86_cpu
);
794 cpu
->exception_index
= EXCP_HALTED
;
795 qemu_mutex_unlock_iothread();
799 else if (interrupt_request
& CPU_INTERRUPT_RESET
) {
802 qemu_mutex_unlock_iothread();
805 #endif /* !TARGET_I386 */
806 /* The target hook has 3 exit conditions:
807 False when the interrupt isn't processed,
808 True when it is, and we should restart on a new TB,
809 and via longjmp via cpu_loop_exit. */
811 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
813 if (cc
->tcg_ops
->cpu_exec_interrupt
&&
814 cc
->tcg_ops
->cpu_exec_interrupt(cpu
, interrupt_request
)) {
815 if (need_replay_interrupt(interrupt_request
)) {
819 * After processing the interrupt, ensure an EXCP_DEBUG is
820 * raised when single-stepping so that GDB doesn't miss the
823 if (unlikely(cpu
->singlestep_enabled
)) {
824 cpu
->exception_index
= EXCP_DEBUG
;
825 qemu_mutex_unlock_iothread();
828 cpu
->exception_index
= -1;
831 /* The target hook may have updated the 'cpu->interrupt_request';
832 * reload the 'interrupt_request' value */
833 interrupt_request
= cpu
->interrupt_request
;
835 #endif /* !CONFIG_USER_ONLY */
836 if (interrupt_request
& CPU_INTERRUPT_EXITTB
) {
837 cpu
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
838 /* ensure that no TB jump will be modified as
839 the program flow was changed */
843 /* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */
844 qemu_mutex_unlock_iothread();
847 /* Finally, check if we need to exit to the main loop. */
848 if (unlikely(qatomic_read(&cpu
->exit_request
))
850 && (cpu
->cflags_next_tb
== -1 || cpu
->cflags_next_tb
& CF_USE_ICOUNT
)
851 && cpu_neg(cpu
)->icount_decr
.u16
.low
+ cpu
->icount_extra
== 0)) {
852 qatomic_set(&cpu
->exit_request
, 0);
853 if (cpu
->exception_index
== -1) {
854 cpu
->exception_index
= EXCP_INTERRUPT
;
862 static inline void cpu_loop_exec_tb(CPUState
*cpu
, TranslationBlock
*tb
,
864 TranslationBlock
**last_tb
, int *tb_exit
)
868 trace_exec_tb(tb
, pc
);
869 tb
= cpu_tb_exec(cpu
, tb
, tb_exit
);
870 if (*tb_exit
!= TB_EXIT_REQUESTED
) {
876 insns_left
= qatomic_read(&cpu_neg(cpu
)->icount_decr
.u32
);
877 if (insns_left
< 0) {
878 /* Something asked us to stop executing chained TBs; just
879 * continue round the main loop. Whatever requested the exit
880 * will also have set something else (eg exit_request or
881 * interrupt_request) which will be handled by
882 * cpu_handle_interrupt. cpu_handle_interrupt will also
883 * clear cpu->icount_decr.u16.high.
888 /* Instruction counter expired. */
889 assert(icount_enabled());
890 #ifndef CONFIG_USER_ONLY
891 /* Ensure global icount has gone forward */
893 /* Refill decrementer and continue execution. */
894 insns_left
= MIN(0xffff, cpu
->icount_budget
);
895 cpu_neg(cpu
)->icount_decr
.u16
.low
= insns_left
;
896 cpu
->icount_extra
= cpu
->icount_budget
- insns_left
;
899 * If the next tb has more instructions than we have left to
900 * execute we need to ensure we find/generate a TB with exactly
901 * insns_left instructions in it.
903 if (insns_left
> 0 && insns_left
< tb
->icount
) {
904 assert(insns_left
<= CF_COUNT_MASK
);
905 assert(cpu
->icount_extra
== 0);
906 cpu
->cflags_next_tb
= (tb
->cflags
& ~CF_COUNT_MASK
) | insns_left
;
911 /* main execution loop */
913 static int __attribute__((noinline
))
914 cpu_exec_loop(CPUState
*cpu
, SyncClocks
*sc
)
918 /* if an exception is pending, we execute it here */
919 while (!cpu_handle_exception(cpu
, &ret
)) {
920 TranslationBlock
*last_tb
= NULL
;
923 while (!cpu_handle_interrupt(cpu
, &last_tb
)) {
924 TranslationBlock
*tb
;
925 target_ulong cs_base
, pc
;
926 uint32_t flags
, cflags
;
928 cpu_get_tb_cpu_state(cpu
->env_ptr
, &pc
, &cs_base
, &flags
);
931 * When requested, use an exact setting for cflags for the next
932 * execution. This is used for icount, precise smc, and stop-
933 * after-access watchpoints. Since this request should never
934 * have CF_INVALID set, -1 is a convenient invalid value that
935 * does not require tcg headers for cpu_common_reset.
937 cflags
= cpu
->cflags_next_tb
;
939 cflags
= curr_cflags(cpu
);
941 cpu
->cflags_next_tb
= -1;
944 if (check_for_breakpoints(cpu
, pc
, &cflags
)) {
948 tb
= tb_lookup(cpu
, pc
, cs_base
, flags
, cflags
);
953 tb
= tb_gen_code(cpu
, pc
, cs_base
, flags
, cflags
);
956 * We add the TB in the virtual pc hash table
957 * for the fast lookup
959 h
= tb_jmp_cache_hash_func(pc
);
960 tb_jmp_cache_set(cpu
->tb_jmp_cache
, h
, tb
, pc
);
963 #ifndef CONFIG_USER_ONLY
965 * We don't take care of direct jumps when address mapping
966 * changes in system emulation. So it's not safe to make a
967 * direct jump to a TB spanning two pages because the mapping
968 * for the second page can change.
970 if (tb_page_addr1(tb
) != -1) {
974 /* See if we can patch the calling TB. */
976 tb_add_jump(last_tb
, tb_exit
, tb
);
979 cpu_loop_exec_tb(cpu
, tb
, pc
, &last_tb
, &tb_exit
);
981 QEMU_PLUGIN_ASSERT(cpu
->plugin_mem_cbs
== NULL
);
982 /* Try to align the host and virtual clocks
983 if the guest is in advance */
984 align_clocks(sc
, cpu
);
990 static int cpu_exec_setjmp(CPUState
*cpu
, SyncClocks
*sc
)
992 /* Prepare setjmp context for exception handling. */
993 if (unlikely(sigsetjmp(cpu
->jmp_env
, 0) != 0)) {
994 /* Non-buggy compilers preserve this; assert the correct value. */
995 g_assert(cpu
== current_cpu
);
997 #ifndef CONFIG_SOFTMMU
998 clear_helper_retaddr();
999 if (have_mmap_lock()) {
1003 if (qemu_mutex_iothread_locked()) {
1004 qemu_mutex_unlock_iothread();
1006 qemu_plugin_disable_mem_helpers(cpu
);
1008 assert_no_pages_locked();
1011 return cpu_exec_loop(cpu
, sc
);
1014 int cpu_exec(CPUState
*cpu
)
1017 SyncClocks sc
= { 0 };
1019 /* replay_interrupt may need current_cpu */
1022 if (cpu_handle_halt(cpu
)) {
1027 cpu_exec_enter(cpu
);
1030 * Calculate difference between guest clock and host clock.
1031 * This delay includes the delay of the last cycle, so
1032 * what we have to do is sleep until it is 0. As for the
1033 * advance/delay we gain here, we try to fix it next time.
1035 init_delay_params(&sc
, cpu
);
1037 ret
= cpu_exec_setjmp(cpu
, &sc
);
1045 void tcg_exec_realizefn(CPUState
*cpu
, Error
**errp
)
1047 static bool tcg_target_initialized
;
1048 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
1050 if (!tcg_target_initialized
) {
1051 cc
->tcg_ops
->initialize();
1052 tcg_target_initialized
= true;
1055 cpu
->tb_jmp_cache
= g_new0(CPUJumpCache
, 1);
1057 #ifndef CONFIG_USER_ONLY
1058 tcg_iommu_init_notifier_list(cpu
);
1059 #endif /* !CONFIG_USER_ONLY */
1060 /* qemu_plugin_vcpu_init_hook delayed until cpu_index assigned. */
1063 /* undo the initializations in reverse order */
1064 void tcg_exec_unrealizefn(CPUState
*cpu
)
1066 #ifndef CONFIG_USER_ONLY
1067 tcg_iommu_free_notifier_list(cpu
);
1068 #endif /* !CONFIG_USER_ONLY */
1071 g_free_rcu(cpu
->tb_jmp_cache
, rcu
);