2 * emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "disas/disas.h"
24 #include "qemu/atomic.h"
25 #include "sysemu/qtest.h"
26 #include "qemu/timer.h"
28 /* -icount align implementation. */
30 typedef struct SyncClocks
{
32 int64_t last_cpu_icount
;
33 int64_t realtime_clock
;
36 #if !defined(CONFIG_USER_ONLY)
37 /* Allow the guest to have a max 3ms advance.
38 * The difference between the 2 clocks could therefore
41 #define VM_CLOCK_ADVANCE 3000000
42 #define THRESHOLD_REDUCE 1.5
43 #define MAX_DELAY_PRINT_RATE 2000000000LL
44 #define MAX_NB_PRINTS 100
46 static void align_clocks(SyncClocks
*sc
, const CPUState
*cpu
)
50 if (!icount_align_option
) {
54 cpu_icount
= cpu
->icount_extra
+ cpu
->icount_decr
.u16
.low
;
55 sc
->diff_clk
+= cpu_icount_to_ns(sc
->last_cpu_icount
- cpu_icount
);
56 sc
->last_cpu_icount
= cpu_icount
;
58 if (sc
->diff_clk
> VM_CLOCK_ADVANCE
) {
60 struct timespec sleep_delay
, rem_delay
;
61 sleep_delay
.tv_sec
= sc
->diff_clk
/ 1000000000LL;
62 sleep_delay
.tv_nsec
= sc
->diff_clk
% 1000000000LL;
63 if (nanosleep(&sleep_delay
, &rem_delay
) < 0) {
64 sc
->diff_clk
-= (sleep_delay
.tv_sec
- rem_delay
.tv_sec
) * 1000000000LL;
65 sc
->diff_clk
-= sleep_delay
.tv_nsec
- rem_delay
.tv_nsec
;
70 Sleep(sc
->diff_clk
/ SCALE_MS
);
76 static void print_delay(const SyncClocks
*sc
)
78 static float threshold_delay
;
79 static int64_t last_realtime_clock
;
82 if (icount_align_option
&&
83 sc
->realtime_clock
- last_realtime_clock
>= MAX_DELAY_PRINT_RATE
&&
84 nb_prints
< MAX_NB_PRINTS
) {
85 if ((-sc
->diff_clk
/ (float)1000000000LL > threshold_delay
) ||
86 (-sc
->diff_clk
/ (float)1000000000LL <
87 (threshold_delay
- THRESHOLD_REDUCE
))) {
88 threshold_delay
= (-sc
->diff_clk
/ 1000000000LL) + 1;
89 printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
93 last_realtime_clock
= sc
->realtime_clock
;
98 static void init_delay_params(SyncClocks
*sc
,
101 if (!icount_align_option
) {
104 sc
->realtime_clock
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
105 sc
->diff_clk
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) -
107 cpu_get_clock_offset();
108 sc
->last_cpu_icount
= cpu
->icount_extra
+ cpu
->icount_decr
.u16
.low
;
109 if (sc
->diff_clk
< max_delay
) {
110 max_delay
= sc
->diff_clk
;
112 if (sc
->diff_clk
> max_advance
) {
113 max_advance
= sc
->diff_clk
;
116 /* Print every 2s max if the guest is late. We limit the number
117 of printed messages to NB_PRINT_MAX(currently 100) */
121 static void align_clocks(SyncClocks
*sc
, const CPUState
*cpu
)
125 static void init_delay_params(SyncClocks
*sc
, const CPUState
*cpu
)
128 #endif /* CONFIG USER ONLY */
130 void cpu_loop_exit(CPUState
*cpu
)
132 cpu
->current_tb
= NULL
;
133 siglongjmp(cpu
->jmp_env
, 1);
136 /* exit the current TB from a signal handler. The host registers are
137 restored in a state compatible with the CPU emulator
139 #if defined(CONFIG_SOFTMMU)
140 void cpu_resume_from_signal(CPUState
*cpu
, void *puc
)
142 /* XXX: restore cpu registers saved in host registers */
144 cpu
->exception_index
= -1;
145 siglongjmp(cpu
->jmp_env
, 1);
149 /* Execute a TB, and fix up the CPU state afterwards if necessary */
150 static inline tcg_target_ulong
cpu_tb_exec(CPUState
*cpu
, uint8_t *tb_ptr
)
152 CPUArchState
*env
= cpu
->env_ptr
;
155 #if defined(DEBUG_DISAS)
156 if (qemu_loglevel_mask(CPU_LOG_TB_CPU
)) {
157 #if defined(TARGET_I386)
158 log_cpu_state(cpu
, CPU_DUMP_CCOP
);
159 #elif defined(TARGET_M68K)
160 /* ??? Should not modify env state for dumping. */
161 cpu_m68k_flush_flags(env
, env
->cc_op
);
162 env
->cc_op
= CC_OP_FLAGS
;
163 env
->sr
= (env
->sr
& 0xffe0) | env
->cc_dest
| (env
->cc_x
<< 4);
164 log_cpu_state(cpu
, 0);
166 log_cpu_state(cpu
, 0);
169 #endif /* DEBUG_DISAS */
171 next_tb
= tcg_qemu_tb_exec(env
, tb_ptr
);
172 trace_exec_tb_exit((void *) (next_tb
& ~TB_EXIT_MASK
),
173 next_tb
& TB_EXIT_MASK
);
175 if ((next_tb
& TB_EXIT_MASK
) > TB_EXIT_IDX1
) {
176 /* We didn't start executing this TB (eg because the instruction
177 * counter hit zero); we must restore the guest PC to the address
178 * of the start of the TB.
180 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
181 TranslationBlock
*tb
= (TranslationBlock
*)(next_tb
& ~TB_EXIT_MASK
);
182 if (cc
->synchronize_from_tb
) {
183 cc
->synchronize_from_tb(cpu
, tb
);
186 cc
->set_pc(cpu
, tb
->pc
);
189 if ((next_tb
& TB_EXIT_MASK
) == TB_EXIT_REQUESTED
) {
190 /* We were asked to stop executing TBs (probably a pending
191 * interrupt. We've now stopped, so clear the flag.
193 cpu
->tcg_exit_req
= 0;
198 /* Execute the code without caching the generated code. An interpreter
199 could be used if available. */
200 static void cpu_exec_nocache(CPUArchState
*env
, int max_cycles
,
201 TranslationBlock
*orig_tb
)
203 CPUState
*cpu
= ENV_GET_CPU(env
);
204 TranslationBlock
*tb
;
206 /* Should never happen.
207 We only end up here when an existing TB is too long. */
208 if (max_cycles
> CF_COUNT_MASK
)
209 max_cycles
= CF_COUNT_MASK
;
211 tb
= tb_gen_code(cpu
, orig_tb
->pc
, orig_tb
->cs_base
, orig_tb
->flags
,
213 cpu
->current_tb
= tb
;
214 /* execute the generated code */
215 trace_exec_tb_nocache(tb
, tb
->pc
);
216 cpu_tb_exec(cpu
, tb
->tc_ptr
);
217 cpu
->current_tb
= NULL
;
218 tb_phys_invalidate(tb
, -1);
222 static TranslationBlock
*tb_find_slow(CPUArchState
*env
,
224 target_ulong cs_base
,
227 CPUState
*cpu
= ENV_GET_CPU(env
);
228 TranslationBlock
*tb
, **ptb1
;
230 tb_page_addr_t phys_pc
, phys_page1
;
231 target_ulong virt_page2
;
233 tcg_ctx
.tb_ctx
.tb_invalidated_flag
= 0;
235 /* find translated block using physical mappings */
236 phys_pc
= get_page_addr_code(env
, pc
);
237 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
238 h
= tb_phys_hash_func(phys_pc
);
239 ptb1
= &tcg_ctx
.tb_ctx
.tb_phys_hash
[h
];
245 tb
->page_addr
[0] == phys_page1
&&
246 tb
->cs_base
== cs_base
&&
247 tb
->flags
== flags
) {
248 /* check next page if needed */
249 if (tb
->page_addr
[1] != -1) {
250 tb_page_addr_t phys_page2
;
252 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
254 phys_page2
= get_page_addr_code(env
, virt_page2
);
255 if (tb
->page_addr
[1] == phys_page2
)
261 ptb1
= &tb
->phys_hash_next
;
264 /* if no translated code available, then translate it now */
265 tb
= tb_gen_code(cpu
, pc
, cs_base
, flags
, 0);
268 /* Move the last found TB to the head of the list */
270 *ptb1
= tb
->phys_hash_next
;
271 tb
->phys_hash_next
= tcg_ctx
.tb_ctx
.tb_phys_hash
[h
];
272 tcg_ctx
.tb_ctx
.tb_phys_hash
[h
] = tb
;
274 /* we add the TB in the virtual pc hash table */
275 cpu
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
279 static inline TranslationBlock
*tb_find_fast(CPUArchState
*env
)
281 CPUState
*cpu
= ENV_GET_CPU(env
);
282 TranslationBlock
*tb
;
283 target_ulong cs_base
, pc
;
286 /* we record a subset of the CPU state. It will
287 always be the same before a given translated block
289 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
290 tb
= cpu
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
291 if (unlikely(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
292 tb
->flags
!= flags
)) {
293 tb
= tb_find_slow(env
, pc
, cs_base
, flags
);
298 static void cpu_handle_debug_exception(CPUArchState
*env
)
300 CPUState
*cpu
= ENV_GET_CPU(env
);
301 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
304 if (!cpu
->watchpoint_hit
) {
305 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
306 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
310 cc
->debug_excp_handler(cpu
);
313 /* main execution loop */
315 volatile sig_atomic_t exit_request
;
317 int cpu_exec(CPUArchState
*env
)
319 CPUState
*cpu
= ENV_GET_CPU(env
);
320 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
322 X86CPU
*x86_cpu
= X86_CPU(cpu
);
324 int ret
, interrupt_request
;
325 TranslationBlock
*tb
;
330 /* This must be volatile so it is not trashed by longjmp() */
331 volatile bool have_tb_lock
= false;
334 if (!cpu_has_work(cpu
)) {
343 /* As long as current_cpu is null, up to the assignment just above,
344 * requests by other threads to exit the execution loop are expected to
345 * be issued using the exit_request global. We must make sure that our
346 * evaluation of the global value is performed past the current_cpu
347 * value transition point, which requires a memory barrier as well as
348 * an instruction scheduling constraint on modern architectures. */
351 if (unlikely(exit_request
)) {
352 cpu
->exit_request
= 1;
355 cc
->cpu_exec_enter(cpu
);
356 cpu
->exception_index
= -1;
358 /* Calculate difference between guest clock and host clock.
359 * This delay includes the delay of the last cycle, so
360 * what we have to do is sleep until it is 0. As for the
361 * advance/delay we gain here, we try to fix it next time.
363 init_delay_params(&sc
, cpu
);
365 /* prepare setjmp context for exception handling */
367 if (sigsetjmp(cpu
->jmp_env
, 0) == 0) {
368 /* if an exception is pending, we execute it here */
369 if (cpu
->exception_index
>= 0) {
370 if (cpu
->exception_index
>= EXCP_INTERRUPT
) {
371 /* exit request from the cpu execution loop */
372 ret
= cpu
->exception_index
;
373 if (ret
== EXCP_DEBUG
) {
374 cpu_handle_debug_exception(env
);
378 #if defined(CONFIG_USER_ONLY)
379 /* if user mode only, we simulate a fake exception
380 which will be handled outside the cpu execution
382 #if defined(TARGET_I386)
383 cc
->do_interrupt(cpu
);
385 ret
= cpu
->exception_index
;
388 cc
->do_interrupt(cpu
);
389 cpu
->exception_index
= -1;
394 next_tb
= 0; /* force lookup of first TB */
396 interrupt_request
= cpu
->interrupt_request
;
397 if (unlikely(interrupt_request
)) {
398 if (unlikely(cpu
->singlestep_enabled
& SSTEP_NOIRQ
)) {
399 /* Mask out external interrupts for this step. */
400 interrupt_request
&= ~CPU_INTERRUPT_SSTEP_MASK
;
402 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
403 cpu
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
404 cpu
->exception_index
= EXCP_DEBUG
;
407 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
408 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
409 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || \
410 defined(TARGET_UNICORE32) || defined(TARGET_TRICORE)
411 if (interrupt_request
& CPU_INTERRUPT_HALT
) {
412 cpu
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
414 cpu
->exception_index
= EXCP_HLT
;
418 #if defined(TARGET_I386)
419 if (interrupt_request
& CPU_INTERRUPT_INIT
) {
420 cpu_svm_check_intercept_param(env
, SVM_EXIT_INIT
, 0);
421 do_cpu_init(x86_cpu
);
422 cpu
->exception_index
= EXCP_HALTED
;
426 if (interrupt_request
& CPU_INTERRUPT_RESET
) {
430 #if defined(TARGET_I386)
431 #if !defined(CONFIG_USER_ONLY)
432 if (interrupt_request
& CPU_INTERRUPT_POLL
) {
433 cpu
->interrupt_request
&= ~CPU_INTERRUPT_POLL
;
434 apic_poll_irq(x86_cpu
->apic_state
);
437 if (interrupt_request
& CPU_INTERRUPT_SIPI
) {
438 do_cpu_sipi(x86_cpu
);
439 } else if (env
->hflags2
& HF2_GIF_MASK
) {
440 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
441 !(env
->hflags
& HF_SMM_MASK
)) {
442 cpu_svm_check_intercept_param(env
, SVM_EXIT_SMI
,
444 cpu
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
445 do_smm_enter(x86_cpu
);
447 } else if ((interrupt_request
& CPU_INTERRUPT_NMI
) &&
448 !(env
->hflags2
& HF2_NMI_MASK
)) {
449 cpu
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
450 env
->hflags2
|= HF2_NMI_MASK
;
451 do_interrupt_x86_hardirq(env
, EXCP02_NMI
, 1);
453 } else if (interrupt_request
& CPU_INTERRUPT_MCE
) {
454 cpu
->interrupt_request
&= ~CPU_INTERRUPT_MCE
;
455 do_interrupt_x86_hardirq(env
, EXCP12_MCHK
, 0);
457 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
458 (((env
->hflags2
& HF2_VINTR_MASK
) &&
459 (env
->hflags2
& HF2_HIF_MASK
)) ||
460 (!(env
->hflags2
& HF2_VINTR_MASK
) &&
461 (env
->eflags
& IF_MASK
&&
462 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
))))) {
464 cpu_svm_check_intercept_param(env
, SVM_EXIT_INTR
,
466 cpu
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
|
468 intno
= cpu_get_pic_interrupt(env
);
469 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing hardware INT=0x%02x\n", intno
);
470 do_interrupt_x86_hardirq(env
, intno
, 1);
471 /* ensure that no TB jump will be modified as
472 the program flow was changed */
474 #if !defined(CONFIG_USER_ONLY)
475 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
476 (env
->eflags
& IF_MASK
) &&
477 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
479 /* FIXME: this should respect TPR */
480 cpu_svm_check_intercept_param(env
, SVM_EXIT_VINTR
,
482 intno
= ldl_phys(cpu
->as
,
484 + offsetof(struct vmcb
,
485 control
.int_vector
));
486 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing virtual hardware INT=0x%02x\n", intno
);
487 do_interrupt_x86_hardirq(env
, intno
, 1);
488 cpu
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
493 #elif defined(TARGET_PPC)
494 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
495 ppc_hw_interrupt(env
);
496 if (env
->pending_interrupts
== 0) {
497 cpu
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
501 #elif defined(TARGET_LM32)
502 if ((interrupt_request
& CPU_INTERRUPT_HARD
)
503 && (env
->ie
& IE_IE
)) {
504 cpu
->exception_index
= EXCP_IRQ
;
505 cc
->do_interrupt(cpu
);
508 #elif defined(TARGET_MICROBLAZE)
509 if ((interrupt_request
& CPU_INTERRUPT_HARD
)
510 && (env
->sregs
[SR_MSR
] & MSR_IE
)
511 && !(env
->sregs
[SR_MSR
] & (MSR_EIP
| MSR_BIP
))
512 && !(env
->iflags
& (D_FLAG
| IMM_FLAG
))) {
513 cpu
->exception_index
= EXCP_IRQ
;
514 cc
->do_interrupt(cpu
);
517 #elif defined(TARGET_MIPS)
518 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
519 cpu_mips_hw_interrupts_pending(env
)) {
521 cpu
->exception_index
= EXCP_EXT_INTERRUPT
;
523 cc
->do_interrupt(cpu
);
526 #elif defined(TARGET_TRICORE)
527 if ((interrupt_request
& CPU_INTERRUPT_HARD
)) {
528 cc
->do_interrupt(cpu
);
532 #elif defined(TARGET_OPENRISC)
535 if ((interrupt_request
& CPU_INTERRUPT_HARD
)
536 && (env
->sr
& SR_IEE
)) {
539 if ((interrupt_request
& CPU_INTERRUPT_TIMER
)
540 && (env
->sr
& SR_TEE
)) {
544 cpu
->exception_index
= idx
;
545 cc
->do_interrupt(cpu
);
549 #elif defined(TARGET_SPARC)
550 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
551 if (cpu_interrupts_enabled(env
) &&
552 env
->interrupt_index
> 0) {
553 int pil
= env
->interrupt_index
& 0xf;
554 int type
= env
->interrupt_index
& 0xf0;
556 if (((type
== TT_EXTINT
) &&
557 cpu_pil_allowed(env
, pil
)) ||
559 cpu
->exception_index
= env
->interrupt_index
;
560 cc
->do_interrupt(cpu
);
566 /* The target hook has 3 exit conditions:
567 False when the interrupt isn't processed,
568 True when it is, and we should restart on a new TB,
569 and via longjmp via cpu_loop_exit. */
570 if (cc
->cpu_exec_interrupt(cpu
, interrupt_request
)) {
573 /* Don't use the cached interrupt_request value,
574 do_interrupt may have updated the EXITTB flag. */
575 if (cpu
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
576 cpu
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
577 /* ensure that no TB jump will be modified as
578 the program flow was changed */
582 if (unlikely(cpu
->exit_request
)) {
583 cpu
->exit_request
= 0;
584 cpu
->exception_index
= EXCP_INTERRUPT
;
587 spin_lock(&tcg_ctx
.tb_ctx
.tb_lock
);
589 tb
= tb_find_fast(env
);
590 /* Note: we do it here to avoid a gcc bug on Mac OS X when
591 doing it in tb_find_slow */
592 if (tcg_ctx
.tb_ctx
.tb_invalidated_flag
) {
593 /* as some TB could have been invalidated because
594 of memory exceptions while generating the code, we
595 must recompute the hash index here */
597 tcg_ctx
.tb_ctx
.tb_invalidated_flag
= 0;
599 if (qemu_loglevel_mask(CPU_LOG_EXEC
)) {
600 qemu_log("Trace %p [" TARGET_FMT_lx
"] %s\n",
601 tb
->tc_ptr
, tb
->pc
, lookup_symbol(tb
->pc
));
603 /* see if we can patch the calling TB. When the TB
604 spans two pages, we cannot safely do a direct
606 if (next_tb
!= 0 && tb
->page_addr
[1] == -1) {
607 tb_add_jump((TranslationBlock
*)(next_tb
& ~TB_EXIT_MASK
),
608 next_tb
& TB_EXIT_MASK
, tb
);
610 have_tb_lock
= false;
611 spin_unlock(&tcg_ctx
.tb_ctx
.tb_lock
);
613 /* cpu_interrupt might be called while translating the
614 TB, but before it is linked into a potentially
615 infinite loop and becomes env->current_tb. Avoid
616 starting execution if there is a pending interrupt. */
617 cpu
->current_tb
= tb
;
619 if (likely(!cpu
->exit_request
)) {
620 trace_exec_tb(tb
, tb
->pc
);
622 /* execute the generated code */
623 next_tb
= cpu_tb_exec(cpu
, tc_ptr
);
624 switch (next_tb
& TB_EXIT_MASK
) {
625 case TB_EXIT_REQUESTED
:
626 /* Something asked us to stop executing
627 * chained TBs; just continue round the main
628 * loop. Whatever requested the exit will also
629 * have set something else (eg exit_request or
630 * interrupt_request) which we will handle
631 * next time around the loop.
633 tb
= (TranslationBlock
*)(next_tb
& ~TB_EXIT_MASK
);
636 case TB_EXIT_ICOUNT_EXPIRED
:
638 /* Instruction counter expired. */
640 tb
= (TranslationBlock
*)(next_tb
& ~TB_EXIT_MASK
);
641 insns_left
= cpu
->icount_decr
.u32
;
642 if (cpu
->icount_extra
&& insns_left
>= 0) {
643 /* Refill decrementer and continue execution. */
644 cpu
->icount_extra
+= insns_left
;
645 if (cpu
->icount_extra
> 0xffff) {
648 insns_left
= cpu
->icount_extra
;
650 cpu
->icount_extra
-= insns_left
;
651 cpu
->icount_decr
.u16
.low
= insns_left
;
653 if (insns_left
> 0) {
654 /* Execute remaining instructions. */
655 cpu_exec_nocache(env
, insns_left
, tb
);
656 align_clocks(&sc
, cpu
);
658 cpu
->exception_index
= EXCP_INTERRUPT
;
668 cpu
->current_tb
= NULL
;
669 /* Try to align the host and virtual clocks
670 if the guest is in advance */
671 align_clocks(&sc
, cpu
);
672 /* reset soft MMU for next block (it can currently
673 only be set by a memory fault) */
676 /* Reload env after longjmp - the compiler may have smashed all
677 * local variables as longjmp is marked 'noreturn'. */
680 cc
= CPU_GET_CLASS(cpu
);
682 x86_cpu
= X86_CPU(cpu
);
685 spin_unlock(&tcg_ctx
.tb_ctx
.tb_lock
);
686 have_tb_lock
= false;
691 cc
->cpu_exec_exit(cpu
);
693 /* fail safe : never use current_cpu outside cpu_exec() */