2 * emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "disas/disas.h"
24 #include "qemu/atomic.h"
25 #include "sysemu/qtest.h"
26 #include "qemu/timer.h"
28 /* -icount align implementation. */
30 typedef struct SyncClocks
{
32 int64_t last_cpu_icount
;
33 int64_t realtime_clock
;
36 #if !defined(CONFIG_USER_ONLY)
37 /* Allow the guest to have a max 3ms advance.
38 * The difference between the 2 clocks could therefore
41 #define VM_CLOCK_ADVANCE 3000000
42 #define THRESHOLD_REDUCE 1.5
43 #define MAX_DELAY_PRINT_RATE 2000000000LL
44 #define MAX_NB_PRINTS 100
46 static void align_clocks(SyncClocks
*sc
, const CPUState
*cpu
)
50 if (!icount_align_option
) {
54 cpu_icount
= cpu
->icount_extra
+ cpu
->icount_decr
.u16
.low
;
55 sc
->diff_clk
+= cpu_icount_to_ns(sc
->last_cpu_icount
- cpu_icount
);
56 sc
->last_cpu_icount
= cpu_icount
;
58 if (sc
->diff_clk
> VM_CLOCK_ADVANCE
) {
60 struct timespec sleep_delay
, rem_delay
;
61 sleep_delay
.tv_sec
= sc
->diff_clk
/ 1000000000LL;
62 sleep_delay
.tv_nsec
= sc
->diff_clk
% 1000000000LL;
63 if (nanosleep(&sleep_delay
, &rem_delay
) < 0) {
64 sc
->diff_clk
-= (sleep_delay
.tv_sec
- rem_delay
.tv_sec
) * 1000000000LL;
65 sc
->diff_clk
-= sleep_delay
.tv_nsec
- rem_delay
.tv_nsec
;
70 Sleep(sc
->diff_clk
/ SCALE_MS
);
76 static void print_delay(const SyncClocks
*sc
)
78 static float threshold_delay
;
79 static int64_t last_realtime_clock
;
82 if (icount_align_option
&&
83 sc
->realtime_clock
- last_realtime_clock
>= MAX_DELAY_PRINT_RATE
&&
84 nb_prints
< MAX_NB_PRINTS
) {
85 if ((-sc
->diff_clk
/ (float)1000000000LL > threshold_delay
) ||
86 (-sc
->diff_clk
/ (float)1000000000LL <
87 (threshold_delay
- THRESHOLD_REDUCE
))) {
88 threshold_delay
= (-sc
->diff_clk
/ 1000000000LL) + 1;
89 printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
93 last_realtime_clock
= sc
->realtime_clock
;
98 static void init_delay_params(SyncClocks
*sc
,
101 if (!icount_align_option
) {
104 sc
->realtime_clock
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
105 sc
->diff_clk
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) -
107 cpu_get_clock_offset();
108 sc
->last_cpu_icount
= cpu
->icount_extra
+ cpu
->icount_decr
.u16
.low
;
109 if (sc
->diff_clk
< max_delay
) {
110 max_delay
= sc
->diff_clk
;
112 if (sc
->diff_clk
> max_advance
) {
113 max_advance
= sc
->diff_clk
;
116 /* Print every 2s max if the guest is late. We limit the number
117 of printed messages to NB_PRINT_MAX(currently 100) */
121 static void align_clocks(SyncClocks
*sc
, const CPUState
*cpu
)
125 static void init_delay_params(SyncClocks
*sc
, const CPUState
*cpu
)
128 #endif /* CONFIG USER ONLY */
130 void cpu_loop_exit(CPUState
*cpu
)
132 cpu
->current_tb
= NULL
;
133 siglongjmp(cpu
->jmp_env
, 1);
136 /* exit the current TB from a signal handler. The host registers are
137 restored in a state compatible with the CPU emulator
139 #if defined(CONFIG_SOFTMMU)
140 void cpu_resume_from_signal(CPUState
*cpu
, void *puc
)
142 /* XXX: restore cpu registers saved in host registers */
144 cpu
->exception_index
= -1;
145 siglongjmp(cpu
->jmp_env
, 1);
149 /* Execute a TB, and fix up the CPU state afterwards if necessary */
150 static inline tcg_target_ulong
cpu_tb_exec(CPUState
*cpu
, uint8_t *tb_ptr
)
152 CPUArchState
*env
= cpu
->env_ptr
;
155 #if defined(DEBUG_DISAS)
156 if (qemu_loglevel_mask(CPU_LOG_TB_CPU
)) {
157 #if defined(TARGET_I386)
158 log_cpu_state(cpu
, CPU_DUMP_CCOP
);
159 #elif defined(TARGET_M68K)
160 /* ??? Should not modify env state for dumping. */
161 cpu_m68k_flush_flags(env
, env
->cc_op
);
162 env
->cc_op
= CC_OP_FLAGS
;
163 env
->sr
= (env
->sr
& 0xffe0) | env
->cc_dest
| (env
->cc_x
<< 4);
164 log_cpu_state(cpu
, 0);
166 log_cpu_state(cpu
, 0);
169 #endif /* DEBUG_DISAS */
171 next_tb
= tcg_qemu_tb_exec(env
, tb_ptr
);
172 trace_exec_tb_exit((void *) (next_tb
& ~TB_EXIT_MASK
),
173 next_tb
& TB_EXIT_MASK
);
175 if ((next_tb
& TB_EXIT_MASK
) > TB_EXIT_IDX1
) {
176 /* We didn't start executing this TB (eg because the instruction
177 * counter hit zero); we must restore the guest PC to the address
178 * of the start of the TB.
180 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
181 TranslationBlock
*tb
= (TranslationBlock
*)(next_tb
& ~TB_EXIT_MASK
);
182 if (cc
->synchronize_from_tb
) {
183 cc
->synchronize_from_tb(cpu
, tb
);
186 cc
->set_pc(cpu
, tb
->pc
);
189 if ((next_tb
& TB_EXIT_MASK
) == TB_EXIT_REQUESTED
) {
190 /* We were asked to stop executing TBs (probably a pending
191 * interrupt. We've now stopped, so clear the flag.
193 cpu
->tcg_exit_req
= 0;
198 /* Execute the code without caching the generated code. An interpreter
199 could be used if available. */
200 static void cpu_exec_nocache(CPUArchState
*env
, int max_cycles
,
201 TranslationBlock
*orig_tb
)
203 CPUState
*cpu
= ENV_GET_CPU(env
);
204 TranslationBlock
*tb
;
206 /* Should never happen.
207 We only end up here when an existing TB is too long. */
208 if (max_cycles
> CF_COUNT_MASK
)
209 max_cycles
= CF_COUNT_MASK
;
211 tb
= tb_gen_code(cpu
, orig_tb
->pc
, orig_tb
->cs_base
, orig_tb
->flags
,
213 cpu
->current_tb
= tb
;
214 /* execute the generated code */
215 trace_exec_tb_nocache(tb
, tb
->pc
);
216 cpu_tb_exec(cpu
, tb
->tc_ptr
);
217 cpu
->current_tb
= NULL
;
218 tb_phys_invalidate(tb
, -1);
222 static TranslationBlock
*tb_find_slow(CPUArchState
*env
,
224 target_ulong cs_base
,
227 CPUState
*cpu
= ENV_GET_CPU(env
);
228 TranslationBlock
*tb
, **ptb1
;
230 tb_page_addr_t phys_pc
, phys_page1
;
231 target_ulong virt_page2
;
233 tcg_ctx
.tb_ctx
.tb_invalidated_flag
= 0;
235 /* find translated block using physical mappings */
236 phys_pc
= get_page_addr_code(env
, pc
);
237 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
238 h
= tb_phys_hash_func(phys_pc
);
239 ptb1
= &tcg_ctx
.tb_ctx
.tb_phys_hash
[h
];
245 tb
->page_addr
[0] == phys_page1
&&
246 tb
->cs_base
== cs_base
&&
247 tb
->flags
== flags
) {
248 /* check next page if needed */
249 if (tb
->page_addr
[1] != -1) {
250 tb_page_addr_t phys_page2
;
252 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
254 phys_page2
= get_page_addr_code(env
, virt_page2
);
255 if (tb
->page_addr
[1] == phys_page2
)
261 ptb1
= &tb
->phys_hash_next
;
264 /* if no translated code available, then translate it now */
265 tb
= tb_gen_code(cpu
, pc
, cs_base
, flags
, 0);
268 /* Move the last found TB to the head of the list */
270 *ptb1
= tb
->phys_hash_next
;
271 tb
->phys_hash_next
= tcg_ctx
.tb_ctx
.tb_phys_hash
[h
];
272 tcg_ctx
.tb_ctx
.tb_phys_hash
[h
] = tb
;
274 /* we add the TB in the virtual pc hash table */
275 cpu
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
279 static inline TranslationBlock
*tb_find_fast(CPUArchState
*env
)
281 CPUState
*cpu
= ENV_GET_CPU(env
);
282 TranslationBlock
*tb
;
283 target_ulong cs_base
, pc
;
286 /* we record a subset of the CPU state. It will
287 always be the same before a given translated block
289 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
290 tb
= cpu
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
291 if (unlikely(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
292 tb
->flags
!= flags
)) {
293 tb
= tb_find_slow(env
, pc
, cs_base
, flags
);
298 static void cpu_handle_debug_exception(CPUArchState
*env
)
300 CPUState
*cpu
= ENV_GET_CPU(env
);
301 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
304 if (!cpu
->watchpoint_hit
) {
305 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
306 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
310 cc
->debug_excp_handler(cpu
);
313 /* main execution loop */
315 volatile sig_atomic_t exit_request
;
317 int cpu_exec(CPUArchState
*env
)
319 CPUState
*cpu
= ENV_GET_CPU(env
);
320 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
322 X86CPU
*x86_cpu
= X86_CPU(cpu
);
324 int ret
, interrupt_request
;
325 TranslationBlock
*tb
;
330 /* This must be volatile so it is not trashed by longjmp() */
331 volatile bool have_tb_lock
= false;
334 if (!cpu_has_work(cpu
)) {
343 /* As long as current_cpu is null, up to the assignment just above,
344 * requests by other threads to exit the execution loop are expected to
345 * be issued using the exit_request global. We must make sure that our
346 * evaluation of the global value is performed past the current_cpu
347 * value transition point, which requires a memory barrier as well as
348 * an instruction scheduling constraint on modern architectures. */
351 if (unlikely(exit_request
)) {
352 cpu
->exit_request
= 1;
355 #if defined(TARGET_M68K)
356 env
->cc_op
= CC_OP_FLAGS
;
357 env
->cc_dest
= env
->sr
& 0xf;
358 env
->cc_x
= (env
->sr
>> 4) & 1;
359 #elif defined(TARGET_PPC)
360 env
->reserve_addr
= -1;
362 cc
->cpu_exec_enter(cpu
);
363 cpu
->exception_index
= -1;
365 /* Calculate difference between guest clock and host clock.
366 * This delay includes the delay of the last cycle, so
367 * what we have to do is sleep until it is 0. As for the
368 * advance/delay we gain here, we try to fix it next time.
370 init_delay_params(&sc
, cpu
);
372 /* prepare setjmp context for exception handling */
374 if (sigsetjmp(cpu
->jmp_env
, 0) == 0) {
375 /* if an exception is pending, we execute it here */
376 if (cpu
->exception_index
>= 0) {
377 if (cpu
->exception_index
>= EXCP_INTERRUPT
) {
378 /* exit request from the cpu execution loop */
379 ret
= cpu
->exception_index
;
380 if (ret
== EXCP_DEBUG
) {
381 cpu_handle_debug_exception(env
);
385 #if defined(CONFIG_USER_ONLY)
386 /* if user mode only, we simulate a fake exception
387 which will be handled outside the cpu execution
389 #if defined(TARGET_I386)
390 cc
->do_interrupt(cpu
);
392 ret
= cpu
->exception_index
;
395 cc
->do_interrupt(cpu
);
396 cpu
->exception_index
= -1;
401 next_tb
= 0; /* force lookup of first TB */
403 interrupt_request
= cpu
->interrupt_request
;
404 if (unlikely(interrupt_request
)) {
405 if (unlikely(cpu
->singlestep_enabled
& SSTEP_NOIRQ
)) {
406 /* Mask out external interrupts for this step. */
407 interrupt_request
&= ~CPU_INTERRUPT_SSTEP_MASK
;
409 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
410 cpu
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
411 cpu
->exception_index
= EXCP_DEBUG
;
414 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
415 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
416 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || \
417 defined(TARGET_UNICORE32) || defined(TARGET_TRICORE)
418 if (interrupt_request
& CPU_INTERRUPT_HALT
) {
419 cpu
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
421 cpu
->exception_index
= EXCP_HLT
;
425 #if defined(TARGET_I386)
426 if (interrupt_request
& CPU_INTERRUPT_INIT
) {
427 cpu_svm_check_intercept_param(env
, SVM_EXIT_INIT
, 0);
428 do_cpu_init(x86_cpu
);
429 cpu
->exception_index
= EXCP_HALTED
;
433 if (interrupt_request
& CPU_INTERRUPT_RESET
) {
437 #if defined(TARGET_I386)
438 #if !defined(CONFIG_USER_ONLY)
439 if (interrupt_request
& CPU_INTERRUPT_POLL
) {
440 cpu
->interrupt_request
&= ~CPU_INTERRUPT_POLL
;
441 apic_poll_irq(x86_cpu
->apic_state
);
444 if (interrupt_request
& CPU_INTERRUPT_SIPI
) {
445 do_cpu_sipi(x86_cpu
);
446 } else if (env
->hflags2
& HF2_GIF_MASK
) {
447 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
448 !(env
->hflags
& HF_SMM_MASK
)) {
449 cpu_svm_check_intercept_param(env
, SVM_EXIT_SMI
,
451 cpu
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
452 do_smm_enter(x86_cpu
);
454 } else if ((interrupt_request
& CPU_INTERRUPT_NMI
) &&
455 !(env
->hflags2
& HF2_NMI_MASK
)) {
456 cpu
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
457 env
->hflags2
|= HF2_NMI_MASK
;
458 do_interrupt_x86_hardirq(env
, EXCP02_NMI
, 1);
460 } else if (interrupt_request
& CPU_INTERRUPT_MCE
) {
461 cpu
->interrupt_request
&= ~CPU_INTERRUPT_MCE
;
462 do_interrupt_x86_hardirq(env
, EXCP12_MCHK
, 0);
464 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
465 (((env
->hflags2
& HF2_VINTR_MASK
) &&
466 (env
->hflags2
& HF2_HIF_MASK
)) ||
467 (!(env
->hflags2
& HF2_VINTR_MASK
) &&
468 (env
->eflags
& IF_MASK
&&
469 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
))))) {
471 cpu_svm_check_intercept_param(env
, SVM_EXIT_INTR
,
473 cpu
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
|
475 intno
= cpu_get_pic_interrupt(env
);
476 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing hardware INT=0x%02x\n", intno
);
477 do_interrupt_x86_hardirq(env
, intno
, 1);
478 /* ensure that no TB jump will be modified as
479 the program flow was changed */
481 #if !defined(CONFIG_USER_ONLY)
482 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
483 (env
->eflags
& IF_MASK
) &&
484 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
486 /* FIXME: this should respect TPR */
487 cpu_svm_check_intercept_param(env
, SVM_EXIT_VINTR
,
489 intno
= ldl_phys(cpu
->as
,
491 + offsetof(struct vmcb
,
492 control
.int_vector
));
493 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing virtual hardware INT=0x%02x\n", intno
);
494 do_interrupt_x86_hardirq(env
, intno
, 1);
495 cpu
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
500 #elif defined(TARGET_PPC)
501 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
502 ppc_hw_interrupt(env
);
503 if (env
->pending_interrupts
== 0) {
504 cpu
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
508 #elif defined(TARGET_LM32)
509 if ((interrupt_request
& CPU_INTERRUPT_HARD
)
510 && (env
->ie
& IE_IE
)) {
511 cpu
->exception_index
= EXCP_IRQ
;
512 cc
->do_interrupt(cpu
);
515 #elif defined(TARGET_MICROBLAZE)
516 if ((interrupt_request
& CPU_INTERRUPT_HARD
)
517 && (env
->sregs
[SR_MSR
] & MSR_IE
)
518 && !(env
->sregs
[SR_MSR
] & (MSR_EIP
| MSR_BIP
))
519 && !(env
->iflags
& (D_FLAG
| IMM_FLAG
))) {
520 cpu
->exception_index
= EXCP_IRQ
;
521 cc
->do_interrupt(cpu
);
524 #elif defined(TARGET_MIPS)
525 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
526 cpu_mips_hw_interrupts_pending(env
)) {
528 cpu
->exception_index
= EXCP_EXT_INTERRUPT
;
530 cc
->do_interrupt(cpu
);
533 #elif defined(TARGET_TRICORE)
534 if ((interrupt_request
& CPU_INTERRUPT_HARD
)) {
535 cc
->do_interrupt(cpu
);
539 #elif defined(TARGET_OPENRISC)
542 if ((interrupt_request
& CPU_INTERRUPT_HARD
)
543 && (env
->sr
& SR_IEE
)) {
546 if ((interrupt_request
& CPU_INTERRUPT_TIMER
)
547 && (env
->sr
& SR_TEE
)) {
551 cpu
->exception_index
= idx
;
552 cc
->do_interrupt(cpu
);
556 #elif defined(TARGET_SPARC)
557 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
558 if (cpu_interrupts_enabled(env
) &&
559 env
->interrupt_index
> 0) {
560 int pil
= env
->interrupt_index
& 0xf;
561 int type
= env
->interrupt_index
& 0xf0;
563 if (((type
== TT_EXTINT
) &&
564 cpu_pil_allowed(env
, pil
)) ||
566 cpu
->exception_index
= env
->interrupt_index
;
567 cc
->do_interrupt(cpu
);
572 #elif defined(TARGET_ARM)
573 if (interrupt_request
& CPU_INTERRUPT_FIQ
574 && !(env
->daif
& PSTATE_F
)) {
575 cpu
->exception_index
= EXCP_FIQ
;
576 cc
->do_interrupt(cpu
);
579 /* ARMv7-M interrupt return works by loading a magic value
580 into the PC. On real hardware the load causes the
581 return to occur. The qemu implementation performs the
582 jump normally, then does the exception return when the
583 CPU tries to execute code at the magic address.
584 This will cause the magic PC value to be pushed to
585 the stack if an interrupt occurred at the wrong time.
586 We avoid this by disabling interrupts when
587 pc contains a magic address. */
588 if (interrupt_request
& CPU_INTERRUPT_HARD
589 && !(env
->daif
& PSTATE_I
)
590 && (!IS_M(env
) || env
->regs
[15] < 0xfffffff0)) {
591 cpu
->exception_index
= EXCP_IRQ
;
592 cc
->do_interrupt(cpu
);
595 #elif defined(TARGET_UNICORE32)
596 if (interrupt_request
& CPU_INTERRUPT_HARD
597 && !(env
->uncached_asr
& ASR_I
)) {
598 cpu
->exception_index
= UC32_EXCP_INTR
;
599 cc
->do_interrupt(cpu
);
602 #elif defined(TARGET_SH4)
603 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
604 cc
->do_interrupt(cpu
);
607 #elif defined(TARGET_ALPHA)
610 /* ??? This hard-codes the OSF/1 interrupt levels. */
611 switch (env
->pal_mode
? 7 : env
->ps
& PS_INT_MASK
) {
613 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
614 idx
= EXCP_DEV_INTERRUPT
;
618 if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
619 idx
= EXCP_CLK_INTERRUPT
;
623 if (interrupt_request
& CPU_INTERRUPT_SMP
) {
624 idx
= EXCP_SMP_INTERRUPT
;
628 if (interrupt_request
& CPU_INTERRUPT_MCHK
) {
633 cpu
->exception_index
= idx
;
635 cc
->do_interrupt(cpu
);
639 #elif defined(TARGET_CRIS)
640 if (interrupt_request
& CPU_INTERRUPT_HARD
641 && (env
->pregs
[PR_CCS
] & I_FLAG
)
642 && !env
->locked_irq
) {
643 cpu
->exception_index
= EXCP_IRQ
;
644 cc
->do_interrupt(cpu
);
647 if (interrupt_request
& CPU_INTERRUPT_NMI
) {
648 unsigned int m_flag_archval
;
649 if (env
->pregs
[PR_VR
] < 32) {
650 m_flag_archval
= M_FLAG_V10
;
652 m_flag_archval
= M_FLAG_V32
;
654 if ((env
->pregs
[PR_CCS
] & m_flag_archval
)) {
655 cpu
->exception_index
= EXCP_NMI
;
656 cc
->do_interrupt(cpu
);
660 #elif defined(TARGET_M68K)
661 if (interrupt_request
& CPU_INTERRUPT_HARD
662 && ((env
->sr
& SR_I
) >> SR_I_SHIFT
)
663 < env
->pending_level
) {
664 /* Real hardware gets the interrupt vector via an
665 IACK cycle at this point. Current emulated
666 hardware doesn't rely on this, so we
667 provide/save the vector when the interrupt is
669 cpu
->exception_index
= env
->pending_vector
;
670 do_interrupt_m68k_hardirq(env
);
673 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
674 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
675 (env
->psw
.mask
& PSW_MASK_EXT
)) {
676 cc
->do_interrupt(cpu
);
679 #elif defined(TARGET_XTENSA)
680 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
681 cpu
->exception_index
= EXC_IRQ
;
682 cc
->do_interrupt(cpu
);
686 /* Don't use the cached interrupt_request value,
687 do_interrupt may have updated the EXITTB flag. */
688 if (cpu
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
689 cpu
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
690 /* ensure that no TB jump will be modified as
691 the program flow was changed */
695 if (unlikely(cpu
->exit_request
)) {
696 cpu
->exit_request
= 0;
697 cpu
->exception_index
= EXCP_INTERRUPT
;
700 spin_lock(&tcg_ctx
.tb_ctx
.tb_lock
);
702 tb
= tb_find_fast(env
);
703 /* Note: we do it here to avoid a gcc bug on Mac OS X when
704 doing it in tb_find_slow */
705 if (tcg_ctx
.tb_ctx
.tb_invalidated_flag
) {
706 /* as some TB could have been invalidated because
707 of memory exceptions while generating the code, we
708 must recompute the hash index here */
710 tcg_ctx
.tb_ctx
.tb_invalidated_flag
= 0;
712 if (qemu_loglevel_mask(CPU_LOG_EXEC
)) {
713 qemu_log("Trace %p [" TARGET_FMT_lx
"] %s\n",
714 tb
->tc_ptr
, tb
->pc
, lookup_symbol(tb
->pc
));
716 /* see if we can patch the calling TB. When the TB
717 spans two pages, we cannot safely do a direct
719 if (next_tb
!= 0 && tb
->page_addr
[1] == -1) {
720 tb_add_jump((TranslationBlock
*)(next_tb
& ~TB_EXIT_MASK
),
721 next_tb
& TB_EXIT_MASK
, tb
);
723 have_tb_lock
= false;
724 spin_unlock(&tcg_ctx
.tb_ctx
.tb_lock
);
726 /* cpu_interrupt might be called while translating the
727 TB, but before it is linked into a potentially
728 infinite loop and becomes env->current_tb. Avoid
729 starting execution if there is a pending interrupt. */
730 cpu
->current_tb
= tb
;
732 if (likely(!cpu
->exit_request
)) {
733 trace_exec_tb(tb
, tb
->pc
);
735 /* execute the generated code */
736 next_tb
= cpu_tb_exec(cpu
, tc_ptr
);
737 switch (next_tb
& TB_EXIT_MASK
) {
738 case TB_EXIT_REQUESTED
:
739 /* Something asked us to stop executing
740 * chained TBs; just continue round the main
741 * loop. Whatever requested the exit will also
742 * have set something else (eg exit_request or
743 * interrupt_request) which we will handle
744 * next time around the loop.
746 tb
= (TranslationBlock
*)(next_tb
& ~TB_EXIT_MASK
);
749 case TB_EXIT_ICOUNT_EXPIRED
:
751 /* Instruction counter expired. */
753 tb
= (TranslationBlock
*)(next_tb
& ~TB_EXIT_MASK
);
754 insns_left
= cpu
->icount_decr
.u32
;
755 if (cpu
->icount_extra
&& insns_left
>= 0) {
756 /* Refill decrementer and continue execution. */
757 cpu
->icount_extra
+= insns_left
;
758 if (cpu
->icount_extra
> 0xffff) {
761 insns_left
= cpu
->icount_extra
;
763 cpu
->icount_extra
-= insns_left
;
764 cpu
->icount_decr
.u16
.low
= insns_left
;
766 if (insns_left
> 0) {
767 /* Execute remaining instructions. */
768 cpu_exec_nocache(env
, insns_left
, tb
);
769 align_clocks(&sc
, cpu
);
771 cpu
->exception_index
= EXCP_INTERRUPT
;
781 cpu
->current_tb
= NULL
;
782 /* Try to align the host and virtual clocks
783 if the guest is in advance */
784 align_clocks(&sc
, cpu
);
785 /* reset soft MMU for next block (it can currently
786 only be set by a memory fault) */
789 /* Reload env after longjmp - the compiler may have smashed all
790 * local variables as longjmp is marked 'noreturn'. */
793 #if !(defined(CONFIG_USER_ONLY) && \
794 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
795 cc
= CPU_GET_CLASS(cpu
);
798 x86_cpu
= X86_CPU(cpu
);
801 spin_unlock(&tcg_ctx
.tb_ctx
.tb_lock
);
802 have_tb_lock
= false;
808 #if defined(TARGET_M68K)
809 cpu_m68k_flush_flags(env
, env
->cc_op
);
810 env
->cc_op
= CC_OP_FLAGS
;
811 env
->sr
= (env
->sr
& 0xffe0)
812 | env
->cc_dest
| (env
->cc_x
<< 4);
814 cc
->cpu_exec_exit(cpu
);
816 /* fail safe : never use current_cpu outside cpu_exec() */