2 * emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/qemu-print.h"
22 #include "qapi/error.h"
23 #include "qapi/type-helpers.h"
24 #include "hw/core/tcg-cpu-ops.h"
26 #include "disas/disas.h"
27 #include "exec/exec-all.h"
29 #include "qemu/atomic.h"
32 #include "qemu/main-loop.h"
33 #if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
34 #include "hw/i386/apic.h"
36 #include "sysemu/cpus.h"
37 #include "exec/cpu-all.h"
38 #include "sysemu/cpu-timers.h"
39 #include "exec/replay-core.h"
40 #include "sysemu/tcg.h"
41 #include "exec/helper-proto-common.h"
42 #include "tb-jmp-cache.h"
44 #include "tb-context.h"
47 /* -icount align implementation. */
49 typedef struct SyncClocks
{
51 int64_t last_cpu_icount
;
52 int64_t realtime_clock
;
55 #if !defined(CONFIG_USER_ONLY)
56 /* Allow the guest to have a max 3ms advance.
57 * The difference between the 2 clocks could therefore
60 #define VM_CLOCK_ADVANCE 3000000
61 #define THRESHOLD_REDUCE 1.5
62 #define MAX_DELAY_PRINT_RATE 2000000000LL
63 #define MAX_NB_PRINTS 100
68 static void align_clocks(SyncClocks
*sc
, CPUState
*cpu
)
72 if (!icount_align_option
) {
76 cpu_icount
= cpu
->icount_extra
+ cpu_neg(cpu
)->icount_decr
.u16
.low
;
77 sc
->diff_clk
+= icount_to_ns(sc
->last_cpu_icount
- cpu_icount
);
78 sc
->last_cpu_icount
= cpu_icount
;
80 if (sc
->diff_clk
> VM_CLOCK_ADVANCE
) {
82 struct timespec sleep_delay
, rem_delay
;
83 sleep_delay
.tv_sec
= sc
->diff_clk
/ 1000000000LL;
84 sleep_delay
.tv_nsec
= sc
->diff_clk
% 1000000000LL;
85 if (nanosleep(&sleep_delay
, &rem_delay
) < 0) {
86 sc
->diff_clk
= rem_delay
.tv_sec
* 1000000000LL + rem_delay
.tv_nsec
;
91 Sleep(sc
->diff_clk
/ SCALE_MS
);
97 static void print_delay(const SyncClocks
*sc
)
99 static float threshold_delay
;
100 static int64_t last_realtime_clock
;
101 static int nb_prints
;
103 if (icount_align_option
&&
104 sc
->realtime_clock
- last_realtime_clock
>= MAX_DELAY_PRINT_RATE
&&
105 nb_prints
< MAX_NB_PRINTS
) {
106 if ((-sc
->diff_clk
/ (float)1000000000LL > threshold_delay
) ||
107 (-sc
->diff_clk
/ (float)1000000000LL <
108 (threshold_delay
- THRESHOLD_REDUCE
))) {
109 threshold_delay
= (-sc
->diff_clk
/ 1000000000LL) + 1;
110 qemu_printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
114 last_realtime_clock
= sc
->realtime_clock
;
119 static void init_delay_params(SyncClocks
*sc
, CPUState
*cpu
)
121 if (!icount_align_option
) {
124 sc
->realtime_clock
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT
);
125 sc
->diff_clk
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) - sc
->realtime_clock
;
127 = cpu
->icount_extra
+ cpu_neg(cpu
)->icount_decr
.u16
.low
;
128 if (sc
->diff_clk
< max_delay
) {
129 max_delay
= sc
->diff_clk
;
131 if (sc
->diff_clk
> max_advance
) {
132 max_advance
= sc
->diff_clk
;
135 /* Print every 2s max if the guest is late. We limit the number
136 of printed messages to NB_PRINT_MAX(currently 100) */
140 static void align_clocks(SyncClocks
*sc
, const CPUState
*cpu
)
144 static void init_delay_params(SyncClocks
*sc
, const CPUState
*cpu
)
147 #endif /* CONFIG USER ONLY */
149 uint32_t curr_cflags(CPUState
*cpu
)
151 uint32_t cflags
= cpu
->tcg_cflags
;
154 * Record gdb single-step. We should be exiting the TB by raising
155 * EXCP_DEBUG, but to simplify other tests, disable chaining too.
157 * For singlestep and -d nochain, suppress goto_tb so that
158 * we can log -d cpu,exec after every TB.
160 if (unlikely(cpu
->singlestep_enabled
)) {
161 cflags
|= CF_NO_GOTO_TB
| CF_NO_GOTO_PTR
| CF_SINGLE_STEP
| 1;
162 } else if (qatomic_read(&one_insn_per_tb
)) {
163 cflags
|= CF_NO_GOTO_TB
| 1;
164 } else if (qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN
)) {
165 cflags
|= CF_NO_GOTO_TB
;
175 tb_page_addr_t page_addr0
;
180 static bool tb_lookup_cmp(const void *p
, const void *d
)
182 const TranslationBlock
*tb
= p
;
183 const struct tb_desc
*desc
= d
;
185 if ((tb_cflags(tb
) & CF_PCREL
|| tb
->pc
== desc
->pc
) &&
186 tb_page_addr0(tb
) == desc
->page_addr0
&&
187 tb
->cs_base
== desc
->cs_base
&&
188 tb
->flags
== desc
->flags
&&
189 tb_cflags(tb
) == desc
->cflags
) {
190 /* check next page if needed */
191 tb_page_addr_t tb_phys_page1
= tb_page_addr1(tb
);
192 if (tb_phys_page1
== -1) {
195 tb_page_addr_t phys_page1
;
199 * We know that the first page matched, and an otherwise valid TB
200 * encountered an incomplete instruction at the end of that page,
201 * therefore we know that generating a new TB from the current PC
202 * must also require reading from the next page -- even if the
203 * second pages do not match, and therefore the resulting insn
204 * is different for the new TB. Therefore any exception raised
205 * here by the faulting lookup is not premature.
207 virt_page1
= TARGET_PAGE_ALIGN(desc
->pc
);
208 phys_page1
= get_page_addr_code(desc
->env
, virt_page1
);
209 if (tb_phys_page1
== phys_page1
) {
217 static TranslationBlock
*tb_htable_lookup(CPUState
*cpu
, vaddr pc
,
218 uint64_t cs_base
, uint32_t flags
,
221 tb_page_addr_t phys_pc
;
225 desc
.env
= cpu
->env_ptr
;
226 desc
.cs_base
= cs_base
;
228 desc
.cflags
= cflags
;
230 phys_pc
= get_page_addr_code(desc
.env
, pc
);
234 desc
.page_addr0
= phys_pc
;
235 h
= tb_hash_func(phys_pc
, (cflags
& CF_PCREL
? 0 : pc
),
236 flags
, cs_base
, cflags
);
237 return qht_lookup_custom(&tb_ctx
.htable
, &desc
, h
, tb_lookup_cmp
);
240 /* Might cause an exception, so have a longjmp destination ready */
241 static inline TranslationBlock
*tb_lookup(CPUState
*cpu
, vaddr pc
,
242 uint64_t cs_base
, uint32_t flags
,
245 TranslationBlock
*tb
;
249 /* we should never be trying to look up an INVALID tb */
250 tcg_debug_assert(!(cflags
& CF_INVALID
));
252 hash
= tb_jmp_cache_hash_func(pc
);
253 jc
= cpu
->tb_jmp_cache
;
255 if (cflags
& CF_PCREL
) {
256 /* Use acquire to ensure current load of pc from jc. */
257 tb
= qatomic_load_acquire(&jc
->array
[hash
].tb
);
260 jc
->array
[hash
].pc
== pc
&&
261 tb
->cs_base
== cs_base
&&
262 tb
->flags
== flags
&&
263 tb_cflags(tb
) == cflags
)) {
266 tb
= tb_htable_lookup(cpu
, pc
, cs_base
, flags
, cflags
);
270 jc
->array
[hash
].pc
= pc
;
271 /* Ensure pc is written first. */
272 qatomic_store_release(&jc
->array
[hash
].tb
, tb
);
274 /* Use rcu_read to ensure current load of pc from *tb. */
275 tb
= qatomic_rcu_read(&jc
->array
[hash
].tb
);
279 tb
->cs_base
== cs_base
&&
280 tb
->flags
== flags
&&
281 tb_cflags(tb
) == cflags
)) {
284 tb
= tb_htable_lookup(cpu
, pc
, cs_base
, flags
, cflags
);
288 /* Use the pc value already stored in tb->pc. */
289 qatomic_set(&jc
->array
[hash
].tb
, tb
);
295 static void log_cpu_exec(vaddr pc
, CPUState
*cpu
,
296 const TranslationBlock
*tb
)
298 if (qemu_log_in_addr_range(pc
)) {
299 qemu_log_mask(CPU_LOG_EXEC
,
300 "Trace %d: %p [%08" PRIx64
301 "/%" VADDR_PRIx
"/%08x/%08x] %s\n",
302 cpu
->cpu_index
, tb
->tc
.ptr
, tb
->cs_base
, pc
,
303 tb
->flags
, tb
->cflags
, lookup_symbol(pc
));
305 if (qemu_loglevel_mask(CPU_LOG_TB_CPU
)) {
306 FILE *logfile
= qemu_log_trylock();
310 if (qemu_loglevel_mask(CPU_LOG_TB_FPU
)) {
311 flags
|= CPU_DUMP_FPU
;
313 #if defined(TARGET_I386)
314 flags
|= CPU_DUMP_CCOP
;
316 if (qemu_loglevel_mask(CPU_LOG_TB_VPU
)) {
317 flags
|= CPU_DUMP_VPU
;
319 cpu_dump_state(cpu
, logfile
, flags
);
320 qemu_log_unlock(logfile
);
326 static bool check_for_breakpoints_slow(CPUState
*cpu
, vaddr pc
,
330 bool match_page
= false;
333 * Singlestep overrides breakpoints.
334 * This requirement is visible in the record-replay tests, where
335 * we would fail to make forward progress in reverse-continue.
337 * TODO: gdb singlestep should only override gdb breakpoints,
338 * so that one could (gdb) singlestep into the guest kernel's
339 * architectural breakpoint handler.
341 if (cpu
->singlestep_enabled
) {
345 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
347 * If we have an exact pc match, trigger the breakpoint.
348 * Otherwise, note matches within the page.
351 bool match_bp
= false;
353 if (bp
->flags
& BP_GDB
) {
355 } else if (bp
->flags
& BP_CPU
) {
356 #ifdef CONFIG_USER_ONLY
357 g_assert_not_reached();
359 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
360 assert(cc
->tcg_ops
->debug_check_breakpoint
);
361 match_bp
= cc
->tcg_ops
->debug_check_breakpoint(cpu
);
366 cpu
->exception_index
= EXCP_DEBUG
;
369 } else if (((pc
^ bp
->pc
) & TARGET_PAGE_MASK
) == 0) {
375 * Within the same page as a breakpoint, single-step,
376 * returning to helper_lookup_tb_ptr after each insn looking
377 * for the actual breakpoint.
379 * TODO: Perhaps better to record all of the TBs associated
380 * with a given virtual page that contains a breakpoint, and
381 * then invalidate them when a new overlapping breakpoint is
382 * set on the page. Non-overlapping TBs would not be
383 * invalidated, nor would any TB need to be invalidated as
384 * breakpoints are removed.
387 *cflags
= (*cflags
& ~CF_COUNT_MASK
) | CF_NO_GOTO_TB
| 1;
392 static inline bool check_for_breakpoints(CPUState
*cpu
, vaddr pc
,
395 return unlikely(!QTAILQ_EMPTY(&cpu
->breakpoints
)) &&
396 check_for_breakpoints_slow(cpu
, pc
, cflags
);
400 * helper_lookup_tb_ptr: quick check for next tb
401 * @env: current cpu state
403 * Look for an existing TB matching the current cpu state.
404 * If found, return the code pointer. If not found, return
405 * the tcg epilogue so that we return into cpu_tb_exec.
407 const void *HELPER(lookup_tb_ptr
)(CPUArchState
*env
)
409 CPUState
*cpu
= env_cpu(env
);
410 TranslationBlock
*tb
;
413 uint32_t flags
, cflags
;
415 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
417 cflags
= curr_cflags(cpu
);
418 if (check_for_breakpoints(cpu
, pc
, &cflags
)) {
422 tb
= tb_lookup(cpu
, pc
, cs_base
, flags
, cflags
);
424 return tcg_code_gen_epilogue
;
427 if (qemu_loglevel_mask(CPU_LOG_TB_CPU
| CPU_LOG_EXEC
)) {
428 log_cpu_exec(pc
, cpu
, tb
);
434 /* Execute a TB, and fix up the CPU state afterwards if necessary */
436 * Disable CFI checks.
437 * TCG creates binary blobs at runtime, with the transformed code.
438 * A TB is a blob of binary code, created at runtime and called with an
439 * indirect function call. Since such function did not exist at compile time,
440 * the CFI runtime has no way to verify its signature and would fail.
441 * TCG is not considered a security-sensitive part of QEMU so this does not
442 * affect the impact of CFI in environment with high security requirements
444 static inline TranslationBlock
* QEMU_DISABLE_CFI
445 cpu_tb_exec(CPUState
*cpu
, TranslationBlock
*itb
, int *tb_exit
)
447 CPUArchState
*env
= cpu
->env_ptr
;
449 TranslationBlock
*last_tb
;
450 const void *tb_ptr
= itb
->tc
.ptr
;
452 if (qemu_loglevel_mask(CPU_LOG_TB_CPU
| CPU_LOG_EXEC
)) {
453 log_cpu_exec(log_pc(cpu
, itb
), cpu
, itb
);
456 qemu_thread_jit_execute();
457 ret
= tcg_qemu_tb_exec(env
, tb_ptr
);
459 qemu_plugin_disable_mem_helpers(cpu
);
461 * TODO: Delay swapping back to the read-write region of the TB
462 * until we actually need to modify the TB. The read-only copy,
463 * coming from the rx region, shares the same host TLB entry as
464 * the code that executed the exit_tb opcode that arrived here.
465 * If we insist on touching both the RX and the RW pages, we
466 * double the host TLB pressure.
468 last_tb
= tcg_splitwx_to_rw((void *)(ret
& ~TB_EXIT_MASK
));
469 *tb_exit
= ret
& TB_EXIT_MASK
;
471 trace_exec_tb_exit(last_tb
, *tb_exit
);
473 if (*tb_exit
> TB_EXIT_IDX1
) {
474 /* We didn't start executing this TB (eg because the instruction
475 * counter hit zero); we must restore the guest PC to the address
476 * of the start of the TB.
478 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
480 if (cc
->tcg_ops
->synchronize_from_tb
) {
481 cc
->tcg_ops
->synchronize_from_tb(cpu
, last_tb
);
483 tcg_debug_assert(!(tb_cflags(last_tb
) & CF_PCREL
));
485 cc
->set_pc(cpu
, last_tb
->pc
);
487 if (qemu_loglevel_mask(CPU_LOG_EXEC
)) {
488 vaddr pc
= log_pc(cpu
, last_tb
);
489 if (qemu_log_in_addr_range(pc
)) {
490 qemu_log("Stopped execution of TB chain before %p [%"
492 last_tb
->tc
.ptr
, pc
, lookup_symbol(pc
));
498 * If gdb single-step, and we haven't raised another exception,
499 * raise a debug exception. Single-step with another exception
500 * is handled in cpu_handle_exception.
502 if (unlikely(cpu
->singlestep_enabled
) && cpu
->exception_index
== -1) {
503 cpu
->exception_index
= EXCP_DEBUG
;
511 static void cpu_exec_enter(CPUState
*cpu
)
513 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
515 if (cc
->tcg_ops
->cpu_exec_enter
) {
516 cc
->tcg_ops
->cpu_exec_enter(cpu
);
520 static void cpu_exec_exit(CPUState
*cpu
)
522 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
524 if (cc
->tcg_ops
->cpu_exec_exit
) {
525 cc
->tcg_ops
->cpu_exec_exit(cpu
);
529 void cpu_exec_step_atomic(CPUState
*cpu
)
531 CPUArchState
*env
= cpu
->env_ptr
;
532 TranslationBlock
*tb
;
535 uint32_t flags
, cflags
;
538 if (sigsetjmp(cpu
->jmp_env
, 0) == 0) {
540 g_assert(cpu
== current_cpu
);
541 g_assert(!cpu
->running
);
544 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
546 cflags
= curr_cflags(cpu
);
547 /* Execute in a serial context. */
548 cflags
&= ~CF_PARALLEL
;
549 /* After 1 insn, return and release the exclusive lock. */
550 cflags
|= CF_NO_GOTO_TB
| CF_NO_GOTO_PTR
| 1;
552 * No need to check_for_breakpoints here.
553 * We only arrive in cpu_exec_step_atomic after beginning execution
554 * of an insn that includes an atomic operation we can't handle.
555 * Any breakpoint for this insn will have been recognized earlier.
558 tb
= tb_lookup(cpu
, pc
, cs_base
, flags
, cflags
);
561 tb
= tb_gen_code(cpu
, pc
, cs_base
, flags
, cflags
);
566 /* execute the generated code */
567 trace_exec_tb(tb
, pc
);
568 cpu_tb_exec(cpu
, tb
, &tb_exit
);
571 #ifdef CONFIG_USER_ONLY
572 clear_helper_retaddr();
573 if (have_mmap_lock()) {
577 if (qemu_mutex_iothread_locked()) {
578 qemu_mutex_unlock_iothread();
580 assert_no_pages_locked();
584 * As we start the exclusive region before codegen we must still
585 * be in the region if we longjump out of either the codegen or
588 g_assert(cpu_in_exclusive_context(cpu
));
589 cpu
->running
= false;
593 void tb_set_jmp_target(TranslationBlock
*tb
, int n
, uintptr_t addr
)
596 * Get the rx view of the structure, from which we find the
597 * executable code address, and tb_target_set_jmp_target can
598 * produce a pc-relative displacement to jmp_target_addr[n].
600 const TranslationBlock
*c_tb
= tcg_splitwx_to_rx(tb
);
601 uintptr_t offset
= tb
->jmp_insn_offset
[n
];
602 uintptr_t jmp_rx
= (uintptr_t)tb
->tc
.ptr
+ offset
;
603 uintptr_t jmp_rw
= jmp_rx
- tcg_splitwx_diff
;
605 tb
->jmp_target_addr
[n
] = addr
;
606 tb_target_set_jmp_target(c_tb
, n
, jmp_rx
, jmp_rw
);
609 static inline void tb_add_jump(TranslationBlock
*tb
, int n
,
610 TranslationBlock
*tb_next
)
614 qemu_thread_jit_write();
615 assert(n
< ARRAY_SIZE(tb
->jmp_list_next
));
616 qemu_spin_lock(&tb_next
->jmp_lock
);
618 /* make sure the destination TB is valid */
619 if (tb_next
->cflags
& CF_INVALID
) {
620 goto out_unlock_next
;
622 /* Atomically claim the jump destination slot only if it was NULL */
623 old
= qatomic_cmpxchg(&tb
->jmp_dest
[n
], (uintptr_t)NULL
,
626 goto out_unlock_next
;
629 /* patch the native jump address */
630 tb_set_jmp_target(tb
, n
, (uintptr_t)tb_next
->tc
.ptr
);
632 /* add in TB jmp list */
633 tb
->jmp_list_next
[n
] = tb_next
->jmp_list_head
;
634 tb_next
->jmp_list_head
= (uintptr_t)tb
| n
;
636 qemu_spin_unlock(&tb_next
->jmp_lock
);
638 qemu_log_mask(CPU_LOG_EXEC
, "Linking TBs %p index %d -> %p\n",
639 tb
->tc
.ptr
, n
, tb_next
->tc
.ptr
);
643 qemu_spin_unlock(&tb_next
->jmp_lock
);
647 static inline bool cpu_handle_halt(CPUState
*cpu
)
649 #ifndef CONFIG_USER_ONLY
651 #if defined(TARGET_I386)
652 if (cpu
->interrupt_request
& CPU_INTERRUPT_POLL
) {
653 X86CPU
*x86_cpu
= X86_CPU(cpu
);
654 qemu_mutex_lock_iothread();
655 apic_poll_irq(x86_cpu
->apic_state
);
656 cpu_reset_interrupt(cpu
, CPU_INTERRUPT_POLL
);
657 qemu_mutex_unlock_iothread();
659 #endif /* TARGET_I386 */
660 if (!cpu_has_work(cpu
)) {
666 #endif /* !CONFIG_USER_ONLY */
671 static inline void cpu_handle_debug_exception(CPUState
*cpu
)
673 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
676 if (!cpu
->watchpoint_hit
) {
677 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
678 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
682 if (cc
->tcg_ops
->debug_excp_handler
) {
683 cc
->tcg_ops
->debug_excp_handler(cpu
);
687 static inline bool cpu_handle_exception(CPUState
*cpu
, int *ret
)
689 if (cpu
->exception_index
< 0) {
690 #ifndef CONFIG_USER_ONLY
691 if (replay_has_exception()
692 && cpu_neg(cpu
)->icount_decr
.u16
.low
+ cpu
->icount_extra
== 0) {
693 /* Execute just one insn to trigger exception pending in the log */
694 cpu
->cflags_next_tb
= (curr_cflags(cpu
) & ~CF_USE_ICOUNT
)
700 if (cpu
->exception_index
>= EXCP_INTERRUPT
) {
701 /* exit request from the cpu execution loop */
702 *ret
= cpu
->exception_index
;
703 if (*ret
== EXCP_DEBUG
) {
704 cpu_handle_debug_exception(cpu
);
706 cpu
->exception_index
= -1;
709 #if defined(CONFIG_USER_ONLY)
710 /* if user mode only, we simulate a fake exception
711 which will be handled outside the cpu execution
713 #if defined(TARGET_I386)
714 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
715 cc
->tcg_ops
->fake_user_interrupt(cpu
);
716 #endif /* TARGET_I386 */
717 *ret
= cpu
->exception_index
;
718 cpu
->exception_index
= -1;
721 if (replay_exception()) {
722 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
723 qemu_mutex_lock_iothread();
724 cc
->tcg_ops
->do_interrupt(cpu
);
725 qemu_mutex_unlock_iothread();
726 cpu
->exception_index
= -1;
728 if (unlikely(cpu
->singlestep_enabled
)) {
730 * After processing the exception, ensure an EXCP_DEBUG is
731 * raised when single-stepping so that GDB doesn't miss the
735 cpu_handle_debug_exception(cpu
);
738 } else if (!replay_has_interrupt()) {
739 /* give a chance to iothread in replay mode */
740 *ret
= EXCP_INTERRUPT
;
749 #ifndef CONFIG_USER_ONLY
751 * CPU_INTERRUPT_POLL is a virtual event which gets converted into a
752 * "real" interrupt event later. It does not need to be recorded for
755 static inline bool need_replay_interrupt(int interrupt_request
)
757 #if defined(TARGET_I386)
758 return !(interrupt_request
& CPU_INTERRUPT_POLL
);
763 #endif /* !CONFIG_USER_ONLY */
765 static inline bool cpu_handle_interrupt(CPUState
*cpu
,
766 TranslationBlock
**last_tb
)
769 * If we have requested custom cflags with CF_NOIRQ we should
770 * skip checking here. Any pending interrupts will get picked up
771 * by the next TB we execute under normal cflags.
773 if (cpu
->cflags_next_tb
!= -1 && cpu
->cflags_next_tb
& CF_NOIRQ
) {
777 /* Clear the interrupt flag now since we're processing
778 * cpu->interrupt_request and cpu->exit_request.
779 * Ensure zeroing happens before reading cpu->exit_request or
780 * cpu->interrupt_request (see also smp_wmb in cpu_exit())
782 qatomic_set_mb(&cpu_neg(cpu
)->icount_decr
.u16
.high
, 0);
784 if (unlikely(qatomic_read(&cpu
->interrupt_request
))) {
785 int interrupt_request
;
786 qemu_mutex_lock_iothread();
787 interrupt_request
= cpu
->interrupt_request
;
788 if (unlikely(cpu
->singlestep_enabled
& SSTEP_NOIRQ
)) {
789 /* Mask out external interrupts for this step. */
790 interrupt_request
&= ~CPU_INTERRUPT_SSTEP_MASK
;
792 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
793 cpu
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
794 cpu
->exception_index
= EXCP_DEBUG
;
795 qemu_mutex_unlock_iothread();
798 #if !defined(CONFIG_USER_ONLY)
799 if (replay_mode
== REPLAY_MODE_PLAY
&& !replay_has_interrupt()) {
801 } else if (interrupt_request
& CPU_INTERRUPT_HALT
) {
803 cpu
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
805 cpu
->exception_index
= EXCP_HLT
;
806 qemu_mutex_unlock_iothread();
809 #if defined(TARGET_I386)
810 else if (interrupt_request
& CPU_INTERRUPT_INIT
) {
811 X86CPU
*x86_cpu
= X86_CPU(cpu
);
812 CPUArchState
*env
= &x86_cpu
->env
;
814 cpu_svm_check_intercept_param(env
, SVM_EXIT_INIT
, 0, 0);
815 do_cpu_init(x86_cpu
);
816 cpu
->exception_index
= EXCP_HALTED
;
817 qemu_mutex_unlock_iothread();
821 else if (interrupt_request
& CPU_INTERRUPT_RESET
) {
824 qemu_mutex_unlock_iothread();
827 #endif /* !TARGET_I386 */
828 /* The target hook has 3 exit conditions:
829 False when the interrupt isn't processed,
830 True when it is, and we should restart on a new TB,
831 and via longjmp via cpu_loop_exit. */
833 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
835 if (cc
->tcg_ops
->cpu_exec_interrupt
&&
836 cc
->tcg_ops
->cpu_exec_interrupt(cpu
, interrupt_request
)) {
837 if (need_replay_interrupt(interrupt_request
)) {
841 * After processing the interrupt, ensure an EXCP_DEBUG is
842 * raised when single-stepping so that GDB doesn't miss the
845 if (unlikely(cpu
->singlestep_enabled
)) {
846 cpu
->exception_index
= EXCP_DEBUG
;
847 qemu_mutex_unlock_iothread();
850 cpu
->exception_index
= -1;
853 /* The target hook may have updated the 'cpu->interrupt_request';
854 * reload the 'interrupt_request' value */
855 interrupt_request
= cpu
->interrupt_request
;
857 #endif /* !CONFIG_USER_ONLY */
858 if (interrupt_request
& CPU_INTERRUPT_EXITTB
) {
859 cpu
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
860 /* ensure that no TB jump will be modified as
861 the program flow was changed */
865 /* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */
866 qemu_mutex_unlock_iothread();
869 /* Finally, check if we need to exit to the main loop. */
870 if (unlikely(qatomic_read(&cpu
->exit_request
))
872 && (cpu
->cflags_next_tb
== -1 || cpu
->cflags_next_tb
& CF_USE_ICOUNT
)
873 && cpu_neg(cpu
)->icount_decr
.u16
.low
+ cpu
->icount_extra
== 0)) {
874 qatomic_set(&cpu
->exit_request
, 0);
875 if (cpu
->exception_index
== -1) {
876 cpu
->exception_index
= EXCP_INTERRUPT
;
884 static inline void cpu_loop_exec_tb(CPUState
*cpu
, TranslationBlock
*tb
,
885 vaddr pc
, TranslationBlock
**last_tb
,
890 trace_exec_tb(tb
, pc
);
891 tb
= cpu_tb_exec(cpu
, tb
, tb_exit
);
892 if (*tb_exit
!= TB_EXIT_REQUESTED
) {
898 insns_left
= qatomic_read(&cpu_neg(cpu
)->icount_decr
.u32
);
899 if (insns_left
< 0) {
900 /* Something asked us to stop executing chained TBs; just
901 * continue round the main loop. Whatever requested the exit
902 * will also have set something else (eg exit_request or
903 * interrupt_request) which will be handled by
904 * cpu_handle_interrupt. cpu_handle_interrupt will also
905 * clear cpu->icount_decr.u16.high.
910 /* Instruction counter expired. */
911 assert(icount_enabled());
912 #ifndef CONFIG_USER_ONLY
913 /* Ensure global icount has gone forward */
915 /* Refill decrementer and continue execution. */
916 insns_left
= MIN(0xffff, cpu
->icount_budget
);
917 cpu_neg(cpu
)->icount_decr
.u16
.low
= insns_left
;
918 cpu
->icount_extra
= cpu
->icount_budget
- insns_left
;
921 * If the next tb has more instructions than we have left to
922 * execute we need to ensure we find/generate a TB with exactly
923 * insns_left instructions in it.
925 if (insns_left
> 0 && insns_left
< tb
->icount
) {
926 assert(insns_left
<= CF_COUNT_MASK
);
927 assert(cpu
->icount_extra
== 0);
928 cpu
->cflags_next_tb
= (tb
->cflags
& ~CF_COUNT_MASK
) | insns_left
;
933 /* main execution loop */
935 static int __attribute__((noinline
))
936 cpu_exec_loop(CPUState
*cpu
, SyncClocks
*sc
)
940 /* if an exception is pending, we execute it here */
941 while (!cpu_handle_exception(cpu
, &ret
)) {
942 TranslationBlock
*last_tb
= NULL
;
945 while (!cpu_handle_interrupt(cpu
, &last_tb
)) {
946 TranslationBlock
*tb
;
949 uint32_t flags
, cflags
;
951 cpu_get_tb_cpu_state(cpu
->env_ptr
, &pc
, &cs_base
, &flags
);
954 * When requested, use an exact setting for cflags for the next
955 * execution. This is used for icount, precise smc, and stop-
956 * after-access watchpoints. Since this request should never
957 * have CF_INVALID set, -1 is a convenient invalid value that
958 * does not require tcg headers for cpu_common_reset.
960 cflags
= cpu
->cflags_next_tb
;
962 cflags
= curr_cflags(cpu
);
964 cpu
->cflags_next_tb
= -1;
967 if (check_for_breakpoints(cpu
, pc
, &cflags
)) {
971 tb
= tb_lookup(cpu
, pc
, cs_base
, flags
, cflags
);
977 tb
= tb_gen_code(cpu
, pc
, cs_base
, flags
, cflags
);
981 * We add the TB in the virtual pc hash table
982 * for the fast lookup
984 h
= tb_jmp_cache_hash_func(pc
);
985 jc
= cpu
->tb_jmp_cache
;
986 if (cflags
& CF_PCREL
) {
987 jc
->array
[h
].pc
= pc
;
988 /* Ensure pc is written first. */
989 qatomic_store_release(&jc
->array
[h
].tb
, tb
);
991 /* Use the pc value already stored in tb->pc. */
992 qatomic_set(&jc
->array
[h
].tb
, tb
);
996 #ifndef CONFIG_USER_ONLY
998 * We don't take care of direct jumps when address mapping
999 * changes in system emulation. So it's not safe to make a
1000 * direct jump to a TB spanning two pages because the mapping
1001 * for the second page can change.
1003 if (tb_page_addr1(tb
) != -1) {
1007 /* See if we can patch the calling TB. */
1009 tb_add_jump(last_tb
, tb_exit
, tb
);
1012 cpu_loop_exec_tb(cpu
, tb
, pc
, &last_tb
, &tb_exit
);
1014 /* Try to align the host and virtual clocks
1015 if the guest is in advance */
1016 align_clocks(sc
, cpu
);
1022 static int cpu_exec_setjmp(CPUState
*cpu
, SyncClocks
*sc
)
1024 /* Prepare setjmp context for exception handling. */
1025 if (unlikely(sigsetjmp(cpu
->jmp_env
, 0) != 0)) {
1026 /* Non-buggy compilers preserve this; assert the correct value. */
1027 g_assert(cpu
== current_cpu
);
1029 #ifdef CONFIG_USER_ONLY
1030 clear_helper_retaddr();
1031 if (have_mmap_lock()) {
1035 if (qemu_mutex_iothread_locked()) {
1036 qemu_mutex_unlock_iothread();
1039 assert_no_pages_locked();
1042 return cpu_exec_loop(cpu
, sc
);
1045 int cpu_exec(CPUState
*cpu
)
1048 SyncClocks sc
= { 0 };
1050 /* replay_interrupt may need current_cpu */
1053 if (cpu_handle_halt(cpu
)) {
1058 cpu_exec_enter(cpu
);
1061 * Calculate difference between guest clock and host clock.
1062 * This delay includes the delay of the last cycle, so
1063 * what we have to do is sleep until it is 0. As for the
1064 * advance/delay we gain here, we try to fix it next time.
1066 init_delay_params(&sc
, cpu
);
1068 ret
= cpu_exec_setjmp(cpu
, &sc
);
1076 void tcg_exec_realizefn(CPUState
*cpu
, Error
**errp
)
1078 static bool tcg_target_initialized
;
1079 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
1081 if (!tcg_target_initialized
) {
1082 cc
->tcg_ops
->initialize();
1083 tcg_target_initialized
= true;
1086 cpu
->tb_jmp_cache
= g_new0(CPUJumpCache
, 1);
1088 #ifndef CONFIG_USER_ONLY
1089 tcg_iommu_init_notifier_list(cpu
);
1090 #endif /* !CONFIG_USER_ONLY */
1091 /* qemu_plugin_vcpu_init_hook delayed until cpu_index assigned. */
1094 /* undo the initializations in reverse order */
1095 void tcg_exec_unrealizefn(CPUState
*cpu
)
1097 #ifndef CONFIG_USER_ONLY
1098 tcg_iommu_free_notifier_list(cpu
);
1099 #endif /* !CONFIG_USER_ONLY */
1102 g_free_rcu(cpu
->tb_jmp_cache
, rcu
);