]> git.proxmox.com Git - qemu.git/blobdiff - cpu-exec.c
smc91c111: Fix receive starvation
[qemu.git] / cpu-exec.c
index 625fbb0e0f87a24e467104191956d4b44a87e6bc..30cfa2a63a68bc0eeddca36257946c89fe6f5ec5 100644 (file)
  */
 #include "config.h"
 #include "cpu.h"
-#include "disas.h"
+#include "disas/disas.h"
 #include "tcg.h"
-#include "qemu-barrier.h"
-#include "qtest.h"
+#include "qemu/atomic.h"
+#include "sysemu/qtest.h"
 
-int tb_invalidated_flag;
-
-//#define CONFIG_DEBUG_EXEC
-
-bool qemu_cpu_has_work(CPUArchState *env)
+bool qemu_cpu_has_work(CPUState *cpu)
 {
-    return cpu_has_work(env);
+    return cpu_has_work(cpu);
 }
 
 void cpu_loop_exit(CPUArchState *env)
 {
-    env->current_tb = NULL;
-    longjmp(env->jmp_env, 1);
+    CPUState *cpu = ENV_GET_CPU(env);
+
+    cpu->current_tb = NULL;
+    siglongjmp(env->jmp_env, 1);
 }
 
 /* exit the current TB from a signal handler. The host registers are
@@ -47,16 +45,44 @@ void cpu_resume_from_signal(CPUArchState *env, void *puc)
     /* XXX: restore cpu registers saved in host registers */
 
     env->exception_index = -1;
-    longjmp(env->jmp_env, 1);
+    siglongjmp(env->jmp_env, 1);
 }
 #endif
 
+/* Execute a TB, and fix up the CPU state afterwards if necessary */
+static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
+{
+    CPUArchState *env = cpu->env_ptr;
+    uintptr_t next_tb = tcg_qemu_tb_exec(env, tb_ptr);
+    if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
+        /* We didn't start executing this TB (eg because the instruction
+         * counter hit zero); we must restore the guest PC to the address
+         * of the start of the TB.
+         */
+        CPUClass *cc = CPU_GET_CLASS(cpu);
+        TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
+        if (cc->synchronize_from_tb) {
+            cc->synchronize_from_tb(cpu, tb);
+        } else {
+            assert(cc->set_pc);
+            cc->set_pc(cpu, tb->pc);
+        }
+    }
+    if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
+        /* We were asked to stop executing TBs (probably a pending
+         * interrupt. We've now stopped, so clear the flag.
+         */
+        cpu->tcg_exit_req = 0;
+    }
+    return next_tb;
+}
+
 /* Execute the code without caching the generated code. An interpreter
    could be used if available. */
 static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
                              TranslationBlock *orig_tb)
 {
-    tcg_target_ulong next_tb;
+    CPUState *cpu = ENV_GET_CPU(env);
     TranslationBlock *tb;
 
     /* Should never happen.
@@ -66,16 +92,10 @@ static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
 
     tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
                      max_cycles);
-    env->current_tb = tb;
+    cpu->current_tb = tb;
     /* execute the generated code */
-    next_tb = tcg_qemu_tb_exec(env, tb->tc_ptr);
-    env->current_tb = NULL;
-
-    if ((next_tb & 3) == 2) {
-        /* Restore PC.  This may happen if async event occurs before
-           the TB starts executing.  */
-        cpu_pc_from_tb(env, tb);
-    }
+    cpu_tb_exec(cpu, tb->tc_ptr);
+    cpu->current_tb = NULL;
     tb_phys_invalidate(tb, -1);
     tb_free(tb);
 }
@@ -90,13 +110,13 @@ static TranslationBlock *tb_find_slow(CPUArchState *env,
     tb_page_addr_t phys_pc, phys_page1;
     target_ulong virt_page2;
 
-    tb_invalidated_flag = 0;
+    tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
 
     /* find translated block using physical mappings */
     phys_pc = get_page_addr_code(env, pc);
     phys_page1 = phys_pc & TARGET_PAGE_MASK;
     h = tb_phys_hash_func(phys_pc);
-    ptb1 = &tb_phys_hash[h];
+    ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
     for(;;) {
         tb = *ptb1;
         if (!tb)
@@ -128,8 +148,8 @@ static TranslationBlock *tb_find_slow(CPUArchState *env,
     /* Move the last found TB to the head of the list */
     if (likely(*ptb1)) {
         *ptb1 = tb->phys_hash_next;
-        tb->phys_hash_next = tb_phys_hash[h];
-        tb_phys_hash[h] = tb;
+        tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
+        tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
     }
     /* we add the TB in the virtual pc hash table */
     env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
@@ -181,32 +201,42 @@ volatile sig_atomic_t exit_request;
 
 int cpu_exec(CPUArchState *env)
 {
-#ifdef TARGET_PPC
     CPUState *cpu = ENV_GET_CPU(env);
+#if !(defined(CONFIG_USER_ONLY) && \
+      (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
+    CPUClass *cc = CPU_GET_CLASS(cpu);
 #endif
     int ret, interrupt_request;
     TranslationBlock *tb;
     uint8_t *tc_ptr;
-    tcg_target_ulong next_tb;
+    uintptr_t next_tb;
 
-    if (env->halted) {
-        if (!cpu_has_work(env)) {
+    if (cpu->halted) {
+        if (!cpu_has_work(cpu)) {
             return EXCP_HALTED;
         }
 
-        env->halted = 0;
+        cpu->halted = 0;
     }
 
-    cpu_single_env = env;
+    current_cpu = cpu;
+
+    /* As long as current_cpu is null, up to the assignment just above,
+     * requests by other threads to exit the execution loop are expected to
+     * be issued using the exit_request global. We must make sure that our
+     * evaluation of the global value is performed past the current_cpu
+     * value transition point, which requires a memory barrier as well as
+     * an instruction scheduling constraint on modern architectures.  */
+    smp_mb();
 
     if (unlikely(exit_request)) {
-        env->exit_request = 1;
+        cpu->exit_request = 1;
     }
 
 #if defined(TARGET_I386)
     /* put eflags in CPU temporary format */
     CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
-    DF = 1 - (2 * ((env->eflags >> 10) & 1));
+    env->df = 1 - (2 * ((env->eflags >> 10) & 1));
     CC_OP = CC_OP_EFLAGS;
     env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
 #elif defined(TARGET_SPARC)
@@ -222,6 +252,7 @@ int cpu_exec(CPUArchState *env)
 #elif defined(TARGET_LM32)
 #elif defined(TARGET_MICROBLAZE)
 #elif defined(TARGET_MIPS)
+#elif defined(TARGET_MOXIE)
 #elif defined(TARGET_OPENRISC)
 #elif defined(TARGET_SH4)
 #elif defined(TARGET_CRIS)
@@ -235,7 +266,7 @@ int cpu_exec(CPUArchState *env)
 
     /* prepare setjmp context for exception handling */
     for(;;) {
-        if (setjmp(env->jmp_env) == 0) {
+        if (sigsetjmp(env->jmp_env, 0) == 0) {
             /* if an exception is pending, we execute it here */
             if (env->exception_index >= 0) {
                 if (env->exception_index >= EXCP_INTERRUPT) {
@@ -251,12 +282,12 @@ int cpu_exec(CPUArchState *env)
                        which will be handled outside the cpu execution
                        loop */
 #if defined(TARGET_I386)
-                    do_interrupt(env);
+                    cc->do_interrupt(cpu);
 #endif
                     ret = env->exception_index;
                     break;
 #else
-                    do_interrupt(env);
+                    cc->do_interrupt(cpu);
                     env->exception_index = -1;
 #endif
                 }
@@ -264,14 +295,14 @@ int cpu_exec(CPUArchState *env)
 
             next_tb = 0; /* force lookup of first TB */
             for(;;) {
-                interrupt_request = env->interrupt_request;
+                interrupt_request = cpu->interrupt_request;
                 if (unlikely(interrupt_request)) {
-                    if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
+                    if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
                         /* Mask out external interrupts for this step. */
                         interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
                     }
                     if (interrupt_request & CPU_INTERRUPT_DEBUG) {
-                        env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
+                        cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
                         env->exception_index = EXCP_DEBUG;
                         cpu_loop_exit(env);
                     }
@@ -279,8 +310,8 @@ int cpu_exec(CPUArchState *env)
     defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
     defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
                     if (interrupt_request & CPU_INTERRUPT_HALT) {
-                        env->interrupt_request &= ~CPU_INTERRUPT_HALT;
-                        env->halted = 1;
+                        cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
+                        cpu->halted = 1;
                         env->exception_index = EXCP_HLT;
                         cpu_loop_exit(env);
                     }
@@ -288,7 +319,7 @@ int cpu_exec(CPUArchState *env)
 #if defined(TARGET_I386)
 #if !defined(CONFIG_USER_ONLY)
                     if (interrupt_request & CPU_INTERRUPT_POLL) {
-                        env->interrupt_request &= ~CPU_INTERRUPT_POLL;
+                        cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
                         apic_poll_irq(env->apic_state);
                     }
 #endif
@@ -305,17 +336,17 @@ int cpu_exec(CPUArchState *env)
                             !(env->hflags & HF_SMM_MASK)) {
                             cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
                                                           0);
-                            env->interrupt_request &= ~CPU_INTERRUPT_SMI;
-                            do_smm_enter(env);
+                            cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
+                            do_smm_enter(x86_env_get_cpu(env));
                             next_tb = 0;
                         } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
                                    !(env->hflags2 & HF2_NMI_MASK)) {
-                            env->interrupt_request &= ~CPU_INTERRUPT_NMI;
+                            cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
                             env->hflags2 |= HF2_NMI_MASK;
                             do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
                             next_tb = 0;
                         } else if (interrupt_request & CPU_INTERRUPT_MCE) {
-                            env->interrupt_request &= ~CPU_INTERRUPT_MCE;
+                            cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
                             do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
                             next_tb = 0;
                         } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
@@ -327,17 +358,14 @@ int cpu_exec(CPUArchState *env)
                             int intno;
                             cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
                                                           0);
-                            env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
+                            cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
+                                                        CPU_INTERRUPT_VIRQ);
                             intno = cpu_get_pic_interrupt(env);
-                            if (intno >= 0) {
-                                qemu_log_mask(CPU_LOG_TB_IN_ASM,
-                                              "Servicing hardware INT=0x%02x\n",
-                                              intno);
-                                do_interrupt_x86_hardirq(env, intno, 1);
-                                /* ensure that no TB jump will be modified as
-                                   the program flow was changed */
-                                next_tb = 0;
-                            }
+                            qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
+                            do_interrupt_x86_hardirq(env, intno, 1);
+                            /* ensure that no TB jump will be modified as
+                               the program flow was changed */
+                            next_tb = 0;
 #if !defined(CONFIG_USER_ONLY)
                         } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
                                    (env->eflags & IF_MASK) && 
@@ -349,7 +377,7 @@ int cpu_exec(CPUArchState *env)
                             intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
                             qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
                             do_interrupt_x86_hardirq(env, intno, 1);
-                            env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
+                            cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
                             next_tb = 0;
 #endif
                         }
@@ -360,15 +388,16 @@ int cpu_exec(CPUArchState *env)
                     }
                     if (interrupt_request & CPU_INTERRUPT_HARD) {
                         ppc_hw_interrupt(env);
-                        if (env->pending_interrupts == 0)
-                            env->interrupt_request &= ~CPU_INTERRUPT_HARD;
+                        if (env->pending_interrupts == 0) {
+                            cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
+                        }
                         next_tb = 0;
                     }
 #elif defined(TARGET_LM32)
                     if ((interrupt_request & CPU_INTERRUPT_HARD)
                         && (env->ie & IE_IE)) {
                         env->exception_index = EXCP_IRQ;
-                        do_interrupt(env);
+                        cc->do_interrupt(cpu);
                         next_tb = 0;
                     }
 #elif defined(TARGET_MICROBLAZE)
@@ -377,7 +406,7 @@ int cpu_exec(CPUArchState *env)
                         && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
                         && !(env->iflags & (D_FLAG | IMM_FLAG))) {
                         env->exception_index = EXCP_IRQ;
-                        do_interrupt(env);
+                        cc->do_interrupt(cpu);
                         next_tb = 0;
                     }
 #elif defined(TARGET_MIPS)
@@ -386,7 +415,7 @@ int cpu_exec(CPUArchState *env)
                         /* Raise it */
                         env->exception_index = EXCP_EXT_INTERRUPT;
                         env->error_code = 0;
-                        do_interrupt(env);
+                        cc->do_interrupt(cpu);
                         next_tb = 0;
                     }
 #elif defined(TARGET_OPENRISC)
@@ -402,7 +431,7 @@ int cpu_exec(CPUArchState *env)
                         }
                         if (idx >= 0) {
                             env->exception_index = idx;
-                            do_interrupt(env);
+                            cc->do_interrupt(cpu);
                             next_tb = 0;
                         }
                     }
@@ -417,7 +446,7 @@ int cpu_exec(CPUArchState *env)
                                   cpu_pil_allowed(env, pil)) ||
                                   type != TT_EXTINT) {
                                 env->exception_index = env->interrupt_index;
-                                do_interrupt(env);
+                                cc->do_interrupt(cpu);
                                 next_tb = 0;
                             }
                         }
@@ -426,7 +455,7 @@ int cpu_exec(CPUArchState *env)
                     if (interrupt_request & CPU_INTERRUPT_FIQ
                         && !(env->uncached_cpsr & CPSR_F)) {
                         env->exception_index = EXCP_FIQ;
-                        do_interrupt(env);
+                        cc->do_interrupt(cpu);
                         next_tb = 0;
                     }
                     /* ARMv7-M interrupt return works by loading a magic value
@@ -442,19 +471,19 @@ int cpu_exec(CPUArchState *env)
                         && ((IS_M(env) && env->regs[15] < 0xfffffff0)
                             || !(env->uncached_cpsr & CPSR_I))) {
                         env->exception_index = EXCP_IRQ;
-                        do_interrupt(env);
+                        cc->do_interrupt(cpu);
                         next_tb = 0;
                     }
 #elif defined(TARGET_UNICORE32)
                     if (interrupt_request & CPU_INTERRUPT_HARD
                         && !(env->uncached_asr & ASR_I)) {
                         env->exception_index = UC32_EXCP_INTR;
-                        do_interrupt(env);
+                        cc->do_interrupt(cpu);
                         next_tb = 0;
                     }
 #elif defined(TARGET_SH4)
                     if (interrupt_request & CPU_INTERRUPT_HARD) {
-                        do_interrupt(env);
+                        cc->do_interrupt(cpu);
                         next_tb = 0;
                     }
 #elif defined(TARGET_ALPHA)
@@ -485,7 +514,7 @@ int cpu_exec(CPUArchState *env)
                         if (idx >= 0) {
                             env->exception_index = idx;
                             env->error_code = 0;
-                            do_interrupt(env);
+                            cc->do_interrupt(cpu);
                             next_tb = 0;
                         }
                     }
@@ -494,7 +523,7 @@ int cpu_exec(CPUArchState *env)
                         && (env->pregs[PR_CCS] & I_FLAG)
                         && !env->locked_irq) {
                         env->exception_index = EXCP_IRQ;
-                        do_interrupt(env);
+                        cc->do_interrupt(cpu);
                         next_tb = 0;
                     }
                     if (interrupt_request & CPU_INTERRUPT_NMI) {
@@ -506,7 +535,7 @@ int cpu_exec(CPUArchState *env)
                         }
                         if ((env->pregs[PR_CCS] & m_flag_archval)) {
                             env->exception_index = EXCP_NMI;
-                            do_interrupt(env);
+                            cc->do_interrupt(cpu);
                             next_tb = 0;
                         }
                     }
@@ -526,89 +555,97 @@ int cpu_exec(CPUArchState *env)
 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
                         (env->psw.mask & PSW_MASK_EXT)) {
-                        do_interrupt(env);
+                        cc->do_interrupt(cpu);
                         next_tb = 0;
                     }
 #elif defined(TARGET_XTENSA)
                     if (interrupt_request & CPU_INTERRUPT_HARD) {
                         env->exception_index = EXC_IRQ;
-                        do_interrupt(env);
+                        cc->do_interrupt(cpu);
                         next_tb = 0;
                     }
 #endif
                    /* Don't use the cached interrupt_request value,
                       do_interrupt may have updated the EXITTB flag. */
-                    if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
-                        env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
+                    if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
+                        cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
                         /* ensure that no TB jump will be modified as
                            the program flow was changed */
                         next_tb = 0;
                     }
                 }
-                if (unlikely(env->exit_request)) {
-                    env->exit_request = 0;
+                if (unlikely(cpu->exit_request)) {
+                    cpu->exit_request = 0;
                     env->exception_index = EXCP_INTERRUPT;
                     cpu_loop_exit(env);
                 }
-#if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
+#if defined(DEBUG_DISAS)
                 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
                     /* restore flags in standard format */
 #if defined(TARGET_I386)
-                    env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
-                        | (DF & DF_MASK);
-                    log_cpu_state(env, X86_DUMP_CCOP);
-                    env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
+                    log_cpu_state(cpu, CPU_DUMP_CCOP);
 #elif defined(TARGET_M68K)
                     cpu_m68k_flush_flags(env, env->cc_op);
                     env->cc_op = CC_OP_FLAGS;
                     env->sr = (env->sr & 0xffe0)
                               | env->cc_dest | (env->cc_x << 4);
-                    log_cpu_state(env, 0);
+                    log_cpu_state(cpu, 0);
 #else
-                    log_cpu_state(env, 0);
+                    log_cpu_state(cpu, 0);
 #endif
                 }
-#endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
-                spin_lock(&tb_lock);
+#endif /* DEBUG_DISAS */
+                spin_lock(&tcg_ctx.tb_ctx.tb_lock);
                 tb = tb_find_fast(env);
                 /* Note: we do it here to avoid a gcc bug on Mac OS X when
                    doing it in tb_find_slow */
-                if (tb_invalidated_flag) {
+                if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
                     /* as some TB could have been invalidated because
                        of memory exceptions while generating the code, we
                        must recompute the hash index here */
                     next_tb = 0;
-                    tb_invalidated_flag = 0;
+                    tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
+                }
+                if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
+                    qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
+                             tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
                 }
-#ifdef CONFIG_DEBUG_EXEC
-                qemu_log_mask(CPU_LOG_EXEC, "Trace %p [" TARGET_FMT_lx "] %s\n",
-                             tb->tc_ptr, tb->pc,
-                             lookup_symbol(tb->pc));
-#endif
                 /* see if we can patch the calling TB. When the TB
                    spans two pages, we cannot safely do a direct
                    jump. */
                 if (next_tb != 0 && tb->page_addr[1] == -1) {
-                    tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
+                    tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
+                                next_tb & TB_EXIT_MASK, tb);
                 }
-                spin_unlock(&tb_lock);
+                spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
 
                 /* cpu_interrupt might be called while translating the
                    TB, but before it is linked into a potentially
                    infinite loop and becomes env->current_tb. Avoid
                    starting execution if there is a pending interrupt. */
-                env->current_tb = tb;
+                cpu->current_tb = tb;
                 barrier();
-                if (likely(!env->exit_request)) {
+                if (likely(!cpu->exit_request)) {
                     tc_ptr = tb->tc_ptr;
                     /* execute the generated code */
-                    next_tb = tcg_qemu_tb_exec(env, tc_ptr);
-                    if ((next_tb & 3) == 2) {
+                    next_tb = cpu_tb_exec(cpu, tc_ptr);
+                    switch (next_tb & TB_EXIT_MASK) {
+                    case TB_EXIT_REQUESTED:
+                        /* Something asked us to stop executing
+                         * chained TBs; just continue round the main
+                         * loop. Whatever requested the exit will also
+                         * have set something else (eg exit_request or
+                         * interrupt_request) which we will handle
+                         * next time around the loop.
+                         */
+                        tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
+                        next_tb = 0;
+                        break;
+                    case TB_EXIT_ICOUNT_EXPIRED:
+                    {
                         /* Instruction counter expired.  */
                         int insns_left;
-                        tb = (TranslationBlock *)(next_tb & ~3);
-                        /* Restore PC.  */
-                        cpu_pc_from_tb(env, tb);
+                        tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
                         insns_left = env->icount_decr.u32;
                         if (env->icount_extra && insns_left >= 0) {
                             /* Refill decrementer and continue execution.  */
@@ -629,16 +666,25 @@ int cpu_exec(CPUArchState *env)
                             next_tb = 0;
                             cpu_loop_exit(env);
                         }
+                        break;
+                    }
+                    default:
+                        break;
                     }
                 }
-                env->current_tb = NULL;
+                cpu->current_tb = NULL;
                 /* reset soft MMU for next block (it can currently
                    only be set by a memory fault) */
             } /* for(;;) */
         } else {
             /* Reload env after longjmp - the compiler may have smashed all
              * local variables as longjmp is marked 'noreturn'. */
-            env = cpu_single_env;
+            cpu = current_cpu;
+            env = cpu->env_ptr;
+#if !(defined(CONFIG_USER_ONLY) && \
+      (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
+            cc = CPU_GET_CLASS(cpu);
+#endif
         }
     } /* for(;;) */
 
@@ -646,7 +692,7 @@ int cpu_exec(CPUArchState *env)
 #if defined(TARGET_I386)
     /* restore flags in standard format */
     env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
-        | (DF & DF_MASK);
+        | (env->df & DF_MASK);
 #elif defined(TARGET_ARM)
     /* XXX: Save/restore host fpu exception state?.  */
 #elif defined(TARGET_UNICORE32)
@@ -660,6 +706,7 @@ int cpu_exec(CPUArchState *env)
               | env->cc_dest | (env->cc_x << 4);
 #elif defined(TARGET_MICROBLAZE)
 #elif defined(TARGET_MIPS)
+#elif defined(TARGET_MOXIE)
 #elif defined(TARGET_OPENRISC)
 #elif defined(TARGET_SH4)
 #elif defined(TARGET_ALPHA)
@@ -671,7 +718,7 @@ int cpu_exec(CPUArchState *env)
 #error unsupported target CPU
 #endif
 
-    /* fail safe : never use cpu_single_env outside cpu_exec() */
-    cpu_single_env = NULL;
+    /* fail safe : never use current_cpu outside cpu_exec() */
+    current_cpu = NULL;
     return ret;
 }