tb = tb_gen_code(env, pc, cs_base, flags, 0);
found:
+ /* Move the last found TB to the head of the list */
+ if (likely(*ptb1)) {
+ *ptb1 = tb->phys_hash_next;
+ tb->phys_hash_next = tb_phys_hash[h];
+ tb_phys_hash[h] = tb;
+ }
/* we add the TB in the virtual pc hash table */
env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
return tb;
{
CPUWatchpoint *wp;
- if (!env->watchpoint_hit)
- QTAILQ_FOREACH(wp, &env->watchpoints, entry)
+ if (!env->watchpoint_hit) {
+ QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
wp->flags &= ~BP_WATCHPOINT_HIT;
-
- if (debug_excp_handler)
+ }
+ }
+ if (debug_excp_handler) {
debug_excp_handler(env);
+ }
}
/* main execution loop */
uint8_t *tc_ptr;
unsigned long next_tb;
- if (cpu_halted(env1) == EXCP_HALTED)
- return EXCP_HALTED;
+ if (env1->halted) {
+ if (!cpu_has_work(env1)) {
+ return EXCP_HALTED;
+ }
+
+ env1->halted = 0;
+ }
cpu_single_env = env1;
}
#if defined(TARGET_I386)
- if (!kvm_enabled()) {
- /* put eflags in CPU temporary format */
- CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
- DF = 1 - (2 * ((env->eflags >> 10) & 1));
- CC_OP = CC_OP_EFLAGS;
- env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
- }
+ /* put eflags in CPU temporary format */
+ CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
+ DF = 1 - (2 * ((env->eflags >> 10) & 1));
+ CC_OP = CC_OP_EFLAGS;
+ env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
#elif defined(TARGET_SPARC)
#elif defined(TARGET_M68K)
env->cc_op = CC_OP_FLAGS;
env->cc_x = (env->sr >> 4) & 1;
#elif defined(TARGET_ALPHA)
#elif defined(TARGET_ARM)
+#elif defined(TARGET_UNICORE32)
#elif defined(TARGET_PPC)
+#elif defined(TARGET_LM32)
#elif defined(TARGET_MICROBLAZE)
#elif defined(TARGET_MIPS)
#elif defined(TARGET_SH4)
if (setjmp(env->jmp_env) == 0) {
#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
#undef env
- env = cpu_single_env;
+ env = cpu_single_env;
#define env cpu_single_env
#endif
/* if an exception is pending, we execute it here */
if (env->exception_index >= EXCP_INTERRUPT) {
/* exit request from the cpu execution loop */
ret = env->exception_index;
- if (ret == EXCP_DEBUG)
+ if (ret == EXCP_DEBUG) {
cpu_handle_debug_exception(env);
+ }
break;
} else {
#if defined(CONFIG_USER_ONLY)
env->old_exception = -1;
#elif defined(TARGET_PPC)
do_interrupt(env);
+#elif defined(TARGET_LM32)
+ do_interrupt(env);
#elif defined(TARGET_MICROBLAZE)
do_interrupt(env);
#elif defined(TARGET_MIPS)
do_interrupt(env);
#elif defined(TARGET_ARM)
do_interrupt(env);
+#elif defined(TARGET_UNICORE32)
+ do_interrupt(env);
#elif defined(TARGET_SH4)
do_interrupt(env);
#elif defined(TARGET_ALPHA)
do_interrupt(env);
#elif defined(TARGET_M68K)
do_interrupt(0);
+#elif defined(TARGET_S390X)
+ do_interrupt(env);
#endif
env->exception_index = -1;
#endif
}
}
- if (kvm_enabled()) {
- kvm_cpu_exec(env);
- longjmp(env->jmp_env, 1);
- }
-
next_tb = 0; /* force lookup of first TB */
for(;;) {
interrupt_request = env->interrupt_request;
if (unlikely(interrupt_request)) {
if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
/* Mask out external interrupts for this step. */
- interrupt_request &= ~(CPU_INTERRUPT_HARD |
- CPU_INTERRUPT_FIQ |
- CPU_INTERRUPT_SMI |
- CPU_INTERRUPT_NMI);
+ interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
}
if (interrupt_request & CPU_INTERRUPT_DEBUG) {
env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
}
#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
- defined(TARGET_MICROBLAZE)
+ defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
if (interrupt_request & CPU_INTERRUPT_HALT) {
env->interrupt_request &= ~CPU_INTERRUPT_HALT;
env->halted = 1;
env->interrupt_request &= ~CPU_INTERRUPT_HARD;
next_tb = 0;
}
+#elif defined(TARGET_LM32)
+ if ((interrupt_request & CPU_INTERRUPT_HARD)
+ && (env->ie & IE_IE)) {
+ env->exception_index = EXCP_IRQ;
+ do_interrupt(env);
+ next_tb = 0;
+ }
#elif defined(TARGET_MICROBLAZE)
if ((interrupt_request & CPU_INTERRUPT_HARD)
&& (env->sregs[SR_MSR] & MSR_IE)
}
#elif defined(TARGET_MIPS)
if ((interrupt_request & CPU_INTERRUPT_HARD) &&
- (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
- (env->CP0_Status & (1 << CP0St_IE)) &&
- !(env->CP0_Status & (1 << CP0St_EXL)) &&
- !(env->CP0_Status & (1 << CP0St_ERL)) &&
- !(env->hflags & MIPS_HFLAG_DM)) {
+ cpu_mips_hw_interrupts_pending(env)) {
/* Raise it */
env->exception_index = EXCP_EXT_INTERRUPT;
env->error_code = 0;
next_tb = 0;
}
}
- } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
- //do_interrupt(0, 0, 0, 0, 0);
- env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
}
#elif defined(TARGET_ARM)
if (interrupt_request & CPU_INTERRUPT_FIQ
jump normally, then does the exception return when the
CPU tries to execute code at the magic address.
This will cause the magic PC value to be pushed to
- the stack if an interrupt occured at the wrong time.
+ the stack if an interrupt occurred at the wrong time.
We avoid this by disabling interrupts when
pc contains a magic address. */
if (interrupt_request & CPU_INTERRUPT_HARD
do_interrupt(env);
next_tb = 0;
}
+#elif defined(TARGET_UNICORE32)
+ if (interrupt_request & CPU_INTERRUPT_HARD
+ && !(env->uncached_asr & ASR_I)) {
+ do_interrupt(env);
+ next_tb = 0;
+ }
#elif defined(TARGET_SH4)
if (interrupt_request & CPU_INTERRUPT_HARD) {
do_interrupt(env);
do_interrupt(1);
next_tb = 0;
}
+#elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
+ if ((interrupt_request & CPU_INTERRUPT_HARD) &&
+ (env->psw.mask & PSW_MASK_EXT)) {
+ do_interrupt(env);
+ next_tb = 0;
+ }
#endif
- /* Don't use the cached interupt_request value,
+ /* Don't use the cached interrupt_request value,
do_interrupt may have updated the EXITTB flag. */
if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
#elif defined(TARGET_ARM)
/* XXX: Save/restore host fpu exception state?. */
+#elif defined(TARGET_UNICORE32)
#elif defined(TARGET_SPARC)
#elif defined(TARGET_PPC)
+#elif defined(TARGET_LM32)
#elif defined(TARGET_M68K)
cpu_m68k_flush_flags(env, env->cc_op);
env->cc_op = CC_OP_FLAGS;
return ret;
}
-/* must only be called from the generated code as an exception can be
- generated */
-void tb_invalidate_page_range(target_ulong start, target_ulong end)
-{
- /* XXX: cannot enable it yet because it yields to MMU exception
- where NIP != read address on PowerPC */
-#if 0
- target_ulong phys_addr;
- phys_addr = get_phys_addr_code(env, start);
- tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
-#endif
-}
-
#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
if (tb) {
/* the PC is inside the translated code. It means that we have
a virtual CPU fault */
- cpu_restore_state(tb, env, pc, puc);
+ cpu_restore_state(tb, env, pc);
}
/* we restore the process signal mask as the sigreturn should