2 * emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/qemu-print.h"
22 #include "qapi/error.h"
23 #include "qapi/type-helpers.h"
24 #include "hw/core/tcg-cpu-ops.h"
26 #include "disas/disas.h"
27 #include "exec/exec-all.h"
29 #include "qemu/atomic.h"
32 #include "qemu/main-loop.h"
33 #if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
34 #include "hw/i386/apic.h"
36 #include "sysemu/cpus.h"
37 #include "exec/cpu-all.h"
38 #include "sysemu/cpu-timers.h"
39 #include "exec/replay-core.h"
40 #include "sysemu/tcg.h"
41 #include "exec/helper-proto-common.h"
42 #include "tb-jmp-cache.h"
44 #include "tb-context.h"
45 #include "internal-common.h"
46 #include "internal-target.h"
48 /* -icount align implementation. */
50 typedef struct SyncClocks
{
52 int64_t last_cpu_icount
;
53 int64_t realtime_clock
;
56 #if !defined(CONFIG_USER_ONLY)
57 /* Allow the guest to have a max 3ms advance.
58 * The difference between the 2 clocks could therefore
61 #define VM_CLOCK_ADVANCE 3000000
62 #define THRESHOLD_REDUCE 1.5
63 #define MAX_DELAY_PRINT_RATE 2000000000LL
64 #define MAX_NB_PRINTS 100
69 static void align_clocks(SyncClocks
*sc
, CPUState
*cpu
)
73 if (!icount_align_option
) {
77 cpu_icount
= cpu
->icount_extra
+ cpu
->neg
.icount_decr
.u16
.low
;
78 sc
->diff_clk
+= icount_to_ns(sc
->last_cpu_icount
- cpu_icount
);
79 sc
->last_cpu_icount
= cpu_icount
;
81 if (sc
->diff_clk
> VM_CLOCK_ADVANCE
) {
83 struct timespec sleep_delay
, rem_delay
;
84 sleep_delay
.tv_sec
= sc
->diff_clk
/ 1000000000LL;
85 sleep_delay
.tv_nsec
= sc
->diff_clk
% 1000000000LL;
86 if (nanosleep(&sleep_delay
, &rem_delay
) < 0) {
87 sc
->diff_clk
= rem_delay
.tv_sec
* 1000000000LL + rem_delay
.tv_nsec
;
92 Sleep(sc
->diff_clk
/ SCALE_MS
);
98 static void print_delay(const SyncClocks
*sc
)
100 static float threshold_delay
;
101 static int64_t last_realtime_clock
;
102 static int nb_prints
;
104 if (icount_align_option
&&
105 sc
->realtime_clock
- last_realtime_clock
>= MAX_DELAY_PRINT_RATE
&&
106 nb_prints
< MAX_NB_PRINTS
) {
107 if ((-sc
->diff_clk
/ (float)1000000000LL > threshold_delay
) ||
108 (-sc
->diff_clk
/ (float)1000000000LL <
109 (threshold_delay
- THRESHOLD_REDUCE
))) {
110 threshold_delay
= (-sc
->diff_clk
/ 1000000000LL) + 1;
111 qemu_printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
115 last_realtime_clock
= sc
->realtime_clock
;
120 static void init_delay_params(SyncClocks
*sc
, CPUState
*cpu
)
122 if (!icount_align_option
) {
125 sc
->realtime_clock
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT
);
126 sc
->diff_clk
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) - sc
->realtime_clock
;
128 = cpu
->icount_extra
+ cpu
->neg
.icount_decr
.u16
.low
;
129 if (sc
->diff_clk
< max_delay
) {
130 max_delay
= sc
->diff_clk
;
132 if (sc
->diff_clk
> max_advance
) {
133 max_advance
= sc
->diff_clk
;
136 /* Print every 2s max if the guest is late. We limit the number
137 of printed messages to NB_PRINT_MAX(currently 100) */
141 static void align_clocks(SyncClocks
*sc
, const CPUState
*cpu
)
145 static void init_delay_params(SyncClocks
*sc
, const CPUState
*cpu
)
148 #endif /* CONFIG USER ONLY */
150 uint32_t curr_cflags(CPUState
*cpu
)
152 uint32_t cflags
= cpu
->tcg_cflags
;
155 * Record gdb single-step. We should be exiting the TB by raising
156 * EXCP_DEBUG, but to simplify other tests, disable chaining too.
158 * For singlestep and -d nochain, suppress goto_tb so that
159 * we can log -d cpu,exec after every TB.
161 if (unlikely(cpu
->singlestep_enabled
)) {
162 cflags
|= CF_NO_GOTO_TB
| CF_NO_GOTO_PTR
| CF_SINGLE_STEP
| 1;
163 } else if (qatomic_read(&one_insn_per_tb
)) {
164 cflags
|= CF_NO_GOTO_TB
| 1;
165 } else if (qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN
)) {
166 cflags
|= CF_NO_GOTO_TB
;
176 tb_page_addr_t page_addr0
;
181 static bool tb_lookup_cmp(const void *p
, const void *d
)
183 const TranslationBlock
*tb
= p
;
184 const struct tb_desc
*desc
= d
;
186 if ((tb_cflags(tb
) & CF_PCREL
|| tb
->pc
== desc
->pc
) &&
187 tb_page_addr0(tb
) == desc
->page_addr0
&&
188 tb
->cs_base
== desc
->cs_base
&&
189 tb
->flags
== desc
->flags
&&
190 tb_cflags(tb
) == desc
->cflags
) {
191 /* check next page if needed */
192 tb_page_addr_t tb_phys_page1
= tb_page_addr1(tb
);
193 if (tb_phys_page1
== -1) {
196 tb_page_addr_t phys_page1
;
200 * We know that the first page matched, and an otherwise valid TB
201 * encountered an incomplete instruction at the end of that page,
202 * therefore we know that generating a new TB from the current PC
203 * must also require reading from the next page -- even if the
204 * second pages do not match, and therefore the resulting insn
205 * is different for the new TB. Therefore any exception raised
206 * here by the faulting lookup is not premature.
208 virt_page1
= TARGET_PAGE_ALIGN(desc
->pc
);
209 phys_page1
= get_page_addr_code(desc
->env
, virt_page1
);
210 if (tb_phys_page1
== phys_page1
) {
218 static TranslationBlock
*tb_htable_lookup(CPUState
*cpu
, vaddr pc
,
219 uint64_t cs_base
, uint32_t flags
,
222 tb_page_addr_t phys_pc
;
226 desc
.env
= cpu_env(cpu
);
227 desc
.cs_base
= cs_base
;
229 desc
.cflags
= cflags
;
231 phys_pc
= get_page_addr_code(desc
.env
, pc
);
235 desc
.page_addr0
= phys_pc
;
236 h
= tb_hash_func(phys_pc
, (cflags
& CF_PCREL
? 0 : pc
),
237 flags
, cs_base
, cflags
);
238 return qht_lookup_custom(&tb_ctx
.htable
, &desc
, h
, tb_lookup_cmp
);
241 /* Might cause an exception, so have a longjmp destination ready */
242 static inline TranslationBlock
*tb_lookup(CPUState
*cpu
, vaddr pc
,
243 uint64_t cs_base
, uint32_t flags
,
246 TranslationBlock
*tb
;
250 /* we should never be trying to look up an INVALID tb */
251 tcg_debug_assert(!(cflags
& CF_INVALID
));
253 hash
= tb_jmp_cache_hash_func(pc
);
254 jc
= cpu
->tb_jmp_cache
;
256 tb
= qatomic_read(&jc
->array
[hash
].tb
);
258 jc
->array
[hash
].pc
== pc
&&
259 tb
->cs_base
== cs_base
&&
260 tb
->flags
== flags
&&
261 tb_cflags(tb
) == cflags
)) {
265 tb
= tb_htable_lookup(cpu
, pc
, cs_base
, flags
, cflags
);
270 jc
->array
[hash
].pc
= pc
;
271 qatomic_set(&jc
->array
[hash
].tb
, tb
);
275 * As long as tb is not NULL, the contents are consistent. Therefore,
276 * the virtual PC has to match for non-CF_PCREL translations.
278 assert((tb_cflags(tb
) & CF_PCREL
) || tb
->pc
== pc
);
282 static void log_cpu_exec(vaddr pc
, CPUState
*cpu
,
283 const TranslationBlock
*tb
)
285 if (qemu_log_in_addr_range(pc
)) {
286 qemu_log_mask(CPU_LOG_EXEC
,
287 "Trace %d: %p [%08" PRIx64
288 "/%016" VADDR_PRIx
"/%08x/%08x] %s\n",
289 cpu
->cpu_index
, tb
->tc
.ptr
, tb
->cs_base
, pc
,
290 tb
->flags
, tb
->cflags
, lookup_symbol(pc
));
292 if (qemu_loglevel_mask(CPU_LOG_TB_CPU
)) {
293 FILE *logfile
= qemu_log_trylock();
297 if (qemu_loglevel_mask(CPU_LOG_TB_FPU
)) {
298 flags
|= CPU_DUMP_FPU
;
300 #if defined(TARGET_I386)
301 flags
|= CPU_DUMP_CCOP
;
303 if (qemu_loglevel_mask(CPU_LOG_TB_VPU
)) {
304 flags
|= CPU_DUMP_VPU
;
306 cpu_dump_state(cpu
, logfile
, flags
);
307 qemu_log_unlock(logfile
);
313 static bool check_for_breakpoints_slow(CPUState
*cpu
, vaddr pc
,
317 bool match_page
= false;
320 * Singlestep overrides breakpoints.
321 * This requirement is visible in the record-replay tests, where
322 * we would fail to make forward progress in reverse-continue.
324 * TODO: gdb singlestep should only override gdb breakpoints,
325 * so that one could (gdb) singlestep into the guest kernel's
326 * architectural breakpoint handler.
328 if (cpu
->singlestep_enabled
) {
332 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
334 * If we have an exact pc match, trigger the breakpoint.
335 * Otherwise, note matches within the page.
338 bool match_bp
= false;
340 if (bp
->flags
& BP_GDB
) {
342 } else if (bp
->flags
& BP_CPU
) {
343 #ifdef CONFIG_USER_ONLY
344 g_assert_not_reached();
346 const TCGCPUOps
*tcg_ops
= cpu
->cc
->tcg_ops
;
347 assert(tcg_ops
->debug_check_breakpoint
);
348 match_bp
= tcg_ops
->debug_check_breakpoint(cpu
);
353 cpu
->exception_index
= EXCP_DEBUG
;
356 } else if (((pc
^ bp
->pc
) & TARGET_PAGE_MASK
) == 0) {
362 * Within the same page as a breakpoint, single-step,
363 * returning to helper_lookup_tb_ptr after each insn looking
364 * for the actual breakpoint.
366 * TODO: Perhaps better to record all of the TBs associated
367 * with a given virtual page that contains a breakpoint, and
368 * then invalidate them when a new overlapping breakpoint is
369 * set on the page. Non-overlapping TBs would not be
370 * invalidated, nor would any TB need to be invalidated as
371 * breakpoints are removed.
374 *cflags
= (*cflags
& ~CF_COUNT_MASK
) | CF_NO_GOTO_TB
| 1;
379 static inline bool check_for_breakpoints(CPUState
*cpu
, vaddr pc
,
382 return unlikely(!QTAILQ_EMPTY(&cpu
->breakpoints
)) &&
383 check_for_breakpoints_slow(cpu
, pc
, cflags
);
387 * helper_lookup_tb_ptr: quick check for next tb
388 * @env: current cpu state
390 * Look for an existing TB matching the current cpu state.
391 * If found, return the code pointer. If not found, return
392 * the tcg epilogue so that we return into cpu_tb_exec.
394 const void *HELPER(lookup_tb_ptr
)(CPUArchState
*env
)
396 CPUState
*cpu
= env_cpu(env
);
397 TranslationBlock
*tb
;
400 uint32_t flags
, cflags
;
402 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
404 cflags
= curr_cflags(cpu
);
405 if (check_for_breakpoints(cpu
, pc
, &cflags
)) {
409 tb
= tb_lookup(cpu
, pc
, cs_base
, flags
, cflags
);
411 return tcg_code_gen_epilogue
;
414 if (qemu_loglevel_mask(CPU_LOG_TB_CPU
| CPU_LOG_EXEC
)) {
415 log_cpu_exec(pc
, cpu
, tb
);
421 /* Execute a TB, and fix up the CPU state afterwards if necessary */
423 * Disable CFI checks.
424 * TCG creates binary blobs at runtime, with the transformed code.
425 * A TB is a blob of binary code, created at runtime and called with an
426 * indirect function call. Since such function did not exist at compile time,
427 * the CFI runtime has no way to verify its signature and would fail.
428 * TCG is not considered a security-sensitive part of QEMU so this does not
429 * affect the impact of CFI in environment with high security requirements
431 static inline TranslationBlock
* QEMU_DISABLE_CFI
432 cpu_tb_exec(CPUState
*cpu
, TranslationBlock
*itb
, int *tb_exit
)
434 CPUArchState
*env
= cpu_env(cpu
);
436 TranslationBlock
*last_tb
;
437 const void *tb_ptr
= itb
->tc
.ptr
;
439 if (qemu_loglevel_mask(CPU_LOG_TB_CPU
| CPU_LOG_EXEC
)) {
440 log_cpu_exec(log_pc(cpu
, itb
), cpu
, itb
);
443 qemu_thread_jit_execute();
444 ret
= tcg_qemu_tb_exec(env
, tb_ptr
);
445 cpu
->neg
.can_do_io
= true;
446 qemu_plugin_disable_mem_helpers(cpu
);
448 * TODO: Delay swapping back to the read-write region of the TB
449 * until we actually need to modify the TB. The read-only copy,
450 * coming from the rx region, shares the same host TLB entry as
451 * the code that executed the exit_tb opcode that arrived here.
452 * If we insist on touching both the RX and the RW pages, we
453 * double the host TLB pressure.
455 last_tb
= tcg_splitwx_to_rw((void *)(ret
& ~TB_EXIT_MASK
));
456 *tb_exit
= ret
& TB_EXIT_MASK
;
458 trace_exec_tb_exit(last_tb
, *tb_exit
);
460 if (*tb_exit
> TB_EXIT_IDX1
) {
461 /* We didn't start executing this TB (eg because the instruction
462 * counter hit zero); we must restore the guest PC to the address
463 * of the start of the TB.
465 CPUClass
*cc
= cpu
->cc
;
466 const TCGCPUOps
*tcg_ops
= cc
->tcg_ops
;
468 if (tcg_ops
->synchronize_from_tb
) {
469 tcg_ops
->synchronize_from_tb(cpu
, last_tb
);
471 tcg_debug_assert(!(tb_cflags(last_tb
) & CF_PCREL
));
473 cc
->set_pc(cpu
, last_tb
->pc
);
475 if (qemu_loglevel_mask(CPU_LOG_EXEC
)) {
476 vaddr pc
= log_pc(cpu
, last_tb
);
477 if (qemu_log_in_addr_range(pc
)) {
478 qemu_log("Stopped execution of TB chain before %p [%016"
480 last_tb
->tc
.ptr
, pc
, lookup_symbol(pc
));
486 * If gdb single-step, and we haven't raised another exception,
487 * raise a debug exception. Single-step with another exception
488 * is handled in cpu_handle_exception.
490 if (unlikely(cpu
->singlestep_enabled
) && cpu
->exception_index
== -1) {
491 cpu
->exception_index
= EXCP_DEBUG
;
499 static void cpu_exec_enter(CPUState
*cpu
)
501 const TCGCPUOps
*tcg_ops
= cpu
->cc
->tcg_ops
;
503 if (tcg_ops
->cpu_exec_enter
) {
504 tcg_ops
->cpu_exec_enter(cpu
);
508 static void cpu_exec_exit(CPUState
*cpu
)
510 const TCGCPUOps
*tcg_ops
= cpu
->cc
->tcg_ops
;
512 if (tcg_ops
->cpu_exec_exit
) {
513 tcg_ops
->cpu_exec_exit(cpu
);
517 static void cpu_exec_longjmp_cleanup(CPUState
*cpu
)
519 /* Non-buggy compilers preserve this; assert the correct value. */
520 g_assert(cpu
== current_cpu
);
522 #ifdef CONFIG_USER_ONLY
523 clear_helper_retaddr();
524 if (have_mmap_lock()) {
529 * For softmmu, a tlb_fill fault during translation will land here,
530 * and we need to release any page locks held. In system mode we
531 * have one tcg_ctx per thread, so we know it was this cpu doing
534 * Alternative 1: Install a cleanup to be called via an exception
535 * handling safe longjmp. It seems plausible that all our hosts
536 * support such a thing. We'd have to properly register unwind info
537 * for the JIT for EH, rather that just for GDB.
539 * Alternative 2: Set and restore cpu->jmp_env in tb_gen_code to
540 * capture the cpu_loop_exit longjmp, perform the cleanup, and
541 * jump again to arrive here.
543 if (tcg_ctx
->gen_tb
) {
544 tb_unlock_pages(tcg_ctx
->gen_tb
);
545 tcg_ctx
->gen_tb
= NULL
;
551 assert_no_pages_locked();
554 void cpu_exec_step_atomic(CPUState
*cpu
)
556 CPUArchState
*env
= cpu_env(cpu
);
557 TranslationBlock
*tb
;
560 uint32_t flags
, cflags
;
563 if (sigsetjmp(cpu
->jmp_env
, 0) == 0) {
565 g_assert(cpu
== current_cpu
);
566 g_assert(!cpu
->running
);
569 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
571 cflags
= curr_cflags(cpu
);
572 /* Execute in a serial context. */
573 cflags
&= ~CF_PARALLEL
;
574 /* After 1 insn, return and release the exclusive lock. */
575 cflags
|= CF_NO_GOTO_TB
| CF_NO_GOTO_PTR
| 1;
577 * No need to check_for_breakpoints here.
578 * We only arrive in cpu_exec_step_atomic after beginning execution
579 * of an insn that includes an atomic operation we can't handle.
580 * Any breakpoint for this insn will have been recognized earlier.
583 tb
= tb_lookup(cpu
, pc
, cs_base
, flags
, cflags
);
586 tb
= tb_gen_code(cpu
, pc
, cs_base
, flags
, cflags
);
591 /* execute the generated code */
592 trace_exec_tb(tb
, pc
);
593 cpu_tb_exec(cpu
, tb
, &tb_exit
);
596 cpu_exec_longjmp_cleanup(cpu
);
600 * As we start the exclusive region before codegen we must still
601 * be in the region if we longjump out of either the codegen or
604 g_assert(cpu_in_exclusive_context(cpu
));
605 cpu
->running
= false;
609 void tb_set_jmp_target(TranslationBlock
*tb
, int n
, uintptr_t addr
)
612 * Get the rx view of the structure, from which we find the
613 * executable code address, and tb_target_set_jmp_target can
614 * produce a pc-relative displacement to jmp_target_addr[n].
616 const TranslationBlock
*c_tb
= tcg_splitwx_to_rx(tb
);
617 uintptr_t offset
= tb
->jmp_insn_offset
[n
];
618 uintptr_t jmp_rx
= (uintptr_t)tb
->tc
.ptr
+ offset
;
619 uintptr_t jmp_rw
= jmp_rx
- tcg_splitwx_diff
;
621 tb
->jmp_target_addr
[n
] = addr
;
622 tb_target_set_jmp_target(c_tb
, n
, jmp_rx
, jmp_rw
);
625 static inline void tb_add_jump(TranslationBlock
*tb
, int n
,
626 TranslationBlock
*tb_next
)
630 qemu_thread_jit_write();
631 assert(n
< ARRAY_SIZE(tb
->jmp_list_next
));
632 qemu_spin_lock(&tb_next
->jmp_lock
);
634 /* make sure the destination TB is valid */
635 if (tb_next
->cflags
& CF_INVALID
) {
636 goto out_unlock_next
;
638 /* Atomically claim the jump destination slot only if it was NULL */
639 old
= qatomic_cmpxchg(&tb
->jmp_dest
[n
], (uintptr_t)NULL
,
642 goto out_unlock_next
;
645 /* patch the native jump address */
646 tb_set_jmp_target(tb
, n
, (uintptr_t)tb_next
->tc
.ptr
);
648 /* add in TB jmp list */
649 tb
->jmp_list_next
[n
] = tb_next
->jmp_list_head
;
650 tb_next
->jmp_list_head
= (uintptr_t)tb
| n
;
652 qemu_spin_unlock(&tb_next
->jmp_lock
);
654 qemu_log_mask(CPU_LOG_EXEC
, "Linking TBs %p index %d -> %p\n",
655 tb
->tc
.ptr
, n
, tb_next
->tc
.ptr
);
659 qemu_spin_unlock(&tb_next
->jmp_lock
);
663 static inline bool cpu_handle_halt(CPUState
*cpu
)
665 #ifndef CONFIG_USER_ONLY
667 #if defined(TARGET_I386)
668 if (cpu
->interrupt_request
& CPU_INTERRUPT_POLL
) {
669 X86CPU
*x86_cpu
= X86_CPU(cpu
);
671 apic_poll_irq(x86_cpu
->apic_state
);
672 cpu_reset_interrupt(cpu
, CPU_INTERRUPT_POLL
);
675 #endif /* TARGET_I386 */
676 if (!cpu_has_work(cpu
)) {
682 #endif /* !CONFIG_USER_ONLY */
687 static inline void cpu_handle_debug_exception(CPUState
*cpu
)
689 const TCGCPUOps
*tcg_ops
= cpu
->cc
->tcg_ops
;
692 if (!cpu
->watchpoint_hit
) {
693 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
694 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
698 if (tcg_ops
->debug_excp_handler
) {
699 tcg_ops
->debug_excp_handler(cpu
);
703 static inline bool cpu_handle_exception(CPUState
*cpu
, int *ret
)
705 if (cpu
->exception_index
< 0) {
706 #ifndef CONFIG_USER_ONLY
707 if (replay_has_exception()
708 && cpu
->neg
.icount_decr
.u16
.low
+ cpu
->icount_extra
== 0) {
709 /* Execute just one insn to trigger exception pending in the log */
710 cpu
->cflags_next_tb
= (curr_cflags(cpu
) & ~CF_USE_ICOUNT
)
717 if (cpu
->exception_index
>= EXCP_INTERRUPT
) {
718 /* exit request from the cpu execution loop */
719 *ret
= cpu
->exception_index
;
720 if (*ret
== EXCP_DEBUG
) {
721 cpu_handle_debug_exception(cpu
);
723 cpu
->exception_index
= -1;
727 #if defined(CONFIG_USER_ONLY)
729 * If user mode only, we simulate a fake exception which will be
730 * handled outside the cpu execution loop.
732 #if defined(TARGET_I386)
733 const TCGCPUOps
*tcg_ops
= cpu
->cc
->tcg_ops
;
734 tcg_ops
->fake_user_interrupt(cpu
);
735 #endif /* TARGET_I386 */
736 *ret
= cpu
->exception_index
;
737 cpu
->exception_index
= -1;
740 if (replay_exception()) {
741 const TCGCPUOps
*tcg_ops
= cpu
->cc
->tcg_ops
;
744 tcg_ops
->do_interrupt(cpu
);
746 cpu
->exception_index
= -1;
748 if (unlikely(cpu
->singlestep_enabled
)) {
750 * After processing the exception, ensure an EXCP_DEBUG is
751 * raised when single-stepping so that GDB doesn't miss the
755 cpu_handle_debug_exception(cpu
);
758 } else if (!replay_has_interrupt()) {
759 /* give a chance to iothread in replay mode */
760 *ret
= EXCP_INTERRUPT
;
768 #ifndef CONFIG_USER_ONLY
770 * CPU_INTERRUPT_POLL is a virtual event which gets converted into a
771 * "real" interrupt event later. It does not need to be recorded for
774 static inline bool need_replay_interrupt(CPUState
*cpu
, int interrupt_request
)
776 #if defined(TARGET_I386)
777 return !(interrupt_request
& CPU_INTERRUPT_POLL
);
779 const TCGCPUOps
*tcg_ops
= cpu
->cc
->tcg_ops
;
780 return !tcg_ops
->need_replay_interrupt
781 || tcg_ops
->need_replay_interrupt(interrupt_request
);
784 #endif /* !CONFIG_USER_ONLY */
786 static inline bool icount_exit_request(CPUState
*cpu
)
788 if (!icount_enabled()) {
791 if (cpu
->cflags_next_tb
!= -1 && !(cpu
->cflags_next_tb
& CF_USE_ICOUNT
)) {
794 return cpu
->neg
.icount_decr
.u16
.low
+ cpu
->icount_extra
== 0;
797 static inline bool cpu_handle_interrupt(CPUState
*cpu
,
798 TranslationBlock
**last_tb
)
801 * If we have requested custom cflags with CF_NOIRQ we should
802 * skip checking here. Any pending interrupts will get picked up
803 * by the next TB we execute under normal cflags.
805 if (cpu
->cflags_next_tb
!= -1 && cpu
->cflags_next_tb
& CF_NOIRQ
) {
809 /* Clear the interrupt flag now since we're processing
810 * cpu->interrupt_request and cpu->exit_request.
811 * Ensure zeroing happens before reading cpu->exit_request or
812 * cpu->interrupt_request (see also smp_wmb in cpu_exit())
814 qatomic_set_mb(&cpu
->neg
.icount_decr
.u16
.high
, 0);
816 if (unlikely(qatomic_read(&cpu
->interrupt_request
))) {
817 int interrupt_request
;
819 interrupt_request
= cpu
->interrupt_request
;
820 if (unlikely(cpu
->singlestep_enabled
& SSTEP_NOIRQ
)) {
821 /* Mask out external interrupts for this step. */
822 interrupt_request
&= ~CPU_INTERRUPT_SSTEP_MASK
;
824 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
825 cpu
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
826 cpu
->exception_index
= EXCP_DEBUG
;
830 #if !defined(CONFIG_USER_ONLY)
831 if (replay_mode
== REPLAY_MODE_PLAY
&& !replay_has_interrupt()) {
833 } else if (interrupt_request
& CPU_INTERRUPT_HALT
) {
835 cpu
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
837 cpu
->exception_index
= EXCP_HLT
;
841 #if defined(TARGET_I386)
842 else if (interrupt_request
& CPU_INTERRUPT_INIT
) {
843 X86CPU
*x86_cpu
= X86_CPU(cpu
);
844 CPUArchState
*env
= &x86_cpu
->env
;
846 cpu_svm_check_intercept_param(env
, SVM_EXIT_INIT
, 0, 0);
847 do_cpu_init(x86_cpu
);
848 cpu
->exception_index
= EXCP_HALTED
;
853 else if (interrupt_request
& CPU_INTERRUPT_RESET
) {
859 #endif /* !TARGET_I386 */
860 /* The target hook has 3 exit conditions:
861 False when the interrupt isn't processed,
862 True when it is, and we should restart on a new TB,
863 and via longjmp via cpu_loop_exit. */
865 const TCGCPUOps
*tcg_ops
= cpu
->cc
->tcg_ops
;
867 if (tcg_ops
->cpu_exec_interrupt
&&
868 tcg_ops
->cpu_exec_interrupt(cpu
, interrupt_request
)) {
869 if (need_replay_interrupt(cpu
, interrupt_request
)) {
873 * After processing the interrupt, ensure an EXCP_DEBUG is
874 * raised when single-stepping so that GDB doesn't miss the
877 if (unlikely(cpu
->singlestep_enabled
)) {
878 cpu
->exception_index
= EXCP_DEBUG
;
882 cpu
->exception_index
= -1;
885 /* The target hook may have updated the 'cpu->interrupt_request';
886 * reload the 'interrupt_request' value */
887 interrupt_request
= cpu
->interrupt_request
;
889 #endif /* !CONFIG_USER_ONLY */
890 if (interrupt_request
& CPU_INTERRUPT_EXITTB
) {
891 cpu
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
892 /* ensure that no TB jump will be modified as
893 the program flow was changed */
897 /* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */
901 /* Finally, check if we need to exit to the main loop. */
902 if (unlikely(qatomic_read(&cpu
->exit_request
)) || icount_exit_request(cpu
)) {
903 qatomic_set(&cpu
->exit_request
, 0);
904 if (cpu
->exception_index
== -1) {
905 cpu
->exception_index
= EXCP_INTERRUPT
;
913 static inline void cpu_loop_exec_tb(CPUState
*cpu
, TranslationBlock
*tb
,
914 vaddr pc
, TranslationBlock
**last_tb
,
919 trace_exec_tb(tb
, pc
);
920 tb
= cpu_tb_exec(cpu
, tb
, tb_exit
);
921 if (*tb_exit
!= TB_EXIT_REQUESTED
) {
927 insns_left
= qatomic_read(&cpu
->neg
.icount_decr
.u32
);
928 if (insns_left
< 0) {
929 /* Something asked us to stop executing chained TBs; just
930 * continue round the main loop. Whatever requested the exit
931 * will also have set something else (eg exit_request or
932 * interrupt_request) which will be handled by
933 * cpu_handle_interrupt. cpu_handle_interrupt will also
934 * clear cpu->icount_decr.u16.high.
939 /* Instruction counter expired. */
940 assert(icount_enabled());
941 #ifndef CONFIG_USER_ONLY
942 /* Ensure global icount has gone forward */
944 /* Refill decrementer and continue execution. */
945 insns_left
= MIN(0xffff, cpu
->icount_budget
);
946 cpu
->neg
.icount_decr
.u16
.low
= insns_left
;
947 cpu
->icount_extra
= cpu
->icount_budget
- insns_left
;
950 * If the next tb has more instructions than we have left to
951 * execute we need to ensure we find/generate a TB with exactly
952 * insns_left instructions in it.
954 if (insns_left
> 0 && insns_left
< tb
->icount
) {
955 assert(insns_left
<= CF_COUNT_MASK
);
956 assert(cpu
->icount_extra
== 0);
957 cpu
->cflags_next_tb
= (tb
->cflags
& ~CF_COUNT_MASK
) | insns_left
;
962 /* main execution loop */
964 static int __attribute__((noinline
))
965 cpu_exec_loop(CPUState
*cpu
, SyncClocks
*sc
)
969 /* if an exception is pending, we execute it here */
970 while (!cpu_handle_exception(cpu
, &ret
)) {
971 TranslationBlock
*last_tb
= NULL
;
974 while (!cpu_handle_interrupt(cpu
, &last_tb
)) {
975 TranslationBlock
*tb
;
978 uint32_t flags
, cflags
;
980 cpu_get_tb_cpu_state(cpu_env(cpu
), &pc
, &cs_base
, &flags
);
983 * When requested, use an exact setting for cflags for the next
984 * execution. This is used for icount, precise smc, and stop-
985 * after-access watchpoints. Since this request should never
986 * have CF_INVALID set, -1 is a convenient invalid value that
987 * does not require tcg headers for cpu_common_reset.
989 cflags
= cpu
->cflags_next_tb
;
991 cflags
= curr_cflags(cpu
);
993 cpu
->cflags_next_tb
= -1;
996 if (check_for_breakpoints(cpu
, pc
, &cflags
)) {
1000 tb
= tb_lookup(cpu
, pc
, cs_base
, flags
, cflags
);
1006 tb
= tb_gen_code(cpu
, pc
, cs_base
, flags
, cflags
);
1010 * We add the TB in the virtual pc hash table
1011 * for the fast lookup
1013 h
= tb_jmp_cache_hash_func(pc
);
1014 jc
= cpu
->tb_jmp_cache
;
1015 jc
->array
[h
].pc
= pc
;
1016 qatomic_set(&jc
->array
[h
].tb
, tb
);
1019 #ifndef CONFIG_USER_ONLY
1021 * We don't take care of direct jumps when address mapping
1022 * changes in system emulation. So it's not safe to make a
1023 * direct jump to a TB spanning two pages because the mapping
1024 * for the second page can change.
1026 if (tb_page_addr1(tb
) != -1) {
1030 /* See if we can patch the calling TB. */
1032 tb_add_jump(last_tb
, tb_exit
, tb
);
1035 cpu_loop_exec_tb(cpu
, tb
, pc
, &last_tb
, &tb_exit
);
1037 /* Try to align the host and virtual clocks
1038 if the guest is in advance */
1039 align_clocks(sc
, cpu
);
1045 static int cpu_exec_setjmp(CPUState
*cpu
, SyncClocks
*sc
)
1047 /* Prepare setjmp context for exception handling. */
1048 if (unlikely(sigsetjmp(cpu
->jmp_env
, 0) != 0)) {
1049 cpu_exec_longjmp_cleanup(cpu
);
1052 return cpu_exec_loop(cpu
, sc
);
1055 int cpu_exec(CPUState
*cpu
)
1058 SyncClocks sc
= { 0 };
1060 /* replay_interrupt may need current_cpu */
1063 if (cpu_handle_halt(cpu
)) {
1067 RCU_READ_LOCK_GUARD();
1068 cpu_exec_enter(cpu
);
1071 * Calculate difference between guest clock and host clock.
1072 * This delay includes the delay of the last cycle, so
1073 * what we have to do is sleep until it is 0. As for the
1074 * advance/delay we gain here, we try to fix it next time.
1076 init_delay_params(&sc
, cpu
);
1078 ret
= cpu_exec_setjmp(cpu
, &sc
);
1084 bool tcg_exec_realizefn(CPUState
*cpu
, Error
**errp
)
1086 static bool tcg_target_initialized
;
1088 if (!tcg_target_initialized
) {
1089 cpu
->cc
->tcg_ops
->initialize();
1090 tcg_target_initialized
= true;
1093 cpu
->tb_jmp_cache
= g_new0(CPUJumpCache
, 1);
1095 #ifndef CONFIG_USER_ONLY
1096 tcg_iommu_init_notifier_list(cpu
);
1097 #endif /* !CONFIG_USER_ONLY */
1098 /* qemu_plugin_vcpu_init_hook delayed until cpu_index assigned. */
1103 /* undo the initializations in reverse order */
1104 void tcg_exec_unrealizefn(CPUState
*cpu
)
1106 #ifndef CONFIG_USER_ONLY
1107 tcg_iommu_free_notifier_list(cpu
);
1108 #endif /* !CONFIG_USER_ONLY */
1111 g_free_rcu(cpu
->tb_jmp_cache
, rcu
);