]> git.proxmox.com Git - qemu.git/blobdiff - cpu-exec.c
vfio-pci: Fix multifunction=on
[qemu.git] / cpu-exec.c
index 9092145d0b4e6493dd329d1e598bb18d76297f59..30cfa2a63a68bc0eeddca36257946c89fe6f5ec5 100644 (file)
@@ -23,8 +23,6 @@
 #include "qemu/atomic.h"
 #include "sysemu/qtest.h"
 
-//#define CONFIG_DEBUG_EXEC
-
 bool qemu_cpu_has_work(CPUState *cpu)
 {
     return cpu_has_work(cpu);
@@ -55,14 +53,20 @@ void cpu_resume_from_signal(CPUArchState *env, void *puc)
 static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
 {
     CPUArchState *env = cpu->env_ptr;
-    tcg_target_ulong next_tb = tcg_qemu_tb_exec(env, tb_ptr);
+    uintptr_t next_tb = tcg_qemu_tb_exec(env, tb_ptr);
     if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
         /* We didn't start executing this TB (eg because the instruction
          * counter hit zero); we must restore the guest PC to the address
          * of the start of the TB.
          */
+        CPUClass *cc = CPU_GET_CLASS(cpu);
         TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
-        cpu_pc_from_tb(env, tb);
+        if (cc->synchronize_from_tb) {
+            cc->synchronize_from_tb(cpu, tb);
+        } else {
+            assert(cc->set_pc);
+            cc->set_pc(cpu, tb->pc);
+        }
     }
     if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
         /* We were asked to stop executing TBs (probably a pending
@@ -198,20 +202,32 @@ volatile sig_atomic_t exit_request;
 int cpu_exec(CPUArchState *env)
 {
     CPUState *cpu = ENV_GET_CPU(env);
+#if !(defined(CONFIG_USER_ONLY) && \
+      (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
+    CPUClass *cc = CPU_GET_CLASS(cpu);
+#endif
     int ret, interrupt_request;
     TranslationBlock *tb;
     uint8_t *tc_ptr;
-    tcg_target_ulong next_tb;
+    uintptr_t next_tb;
 
-    if (env->halted) {
+    if (cpu->halted) {
         if (!cpu_has_work(cpu)) {
             return EXCP_HALTED;
         }
 
-        env->halted = 0;
+        cpu->halted = 0;
     }
 
-    cpu_single_env = env;
+    current_cpu = cpu;
+
+    /* As long as current_cpu is null, up to the assignment just above,
+     * requests by other threads to exit the execution loop are expected to
+     * be issued using the exit_request global. We must make sure that our
+     * evaluation of the global value is performed past the current_cpu
+     * value transition point, which requires a memory barrier as well as
+     * an instruction scheduling constraint on modern architectures.  */
+    smp_mb();
 
     if (unlikely(exit_request)) {
         cpu->exit_request = 1;
@@ -220,7 +236,7 @@ int cpu_exec(CPUArchState *env)
 #if defined(TARGET_I386)
     /* put eflags in CPU temporary format */
     CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
-    DF = 1 - (2 * ((env->eflags >> 10) & 1));
+    env->df = 1 - (2 * ((env->eflags >> 10) & 1));
     CC_OP = CC_OP_EFLAGS;
     env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
 #elif defined(TARGET_SPARC)
@@ -236,6 +252,7 @@ int cpu_exec(CPUArchState *env)
 #elif defined(TARGET_LM32)
 #elif defined(TARGET_MICROBLAZE)
 #elif defined(TARGET_MIPS)
+#elif defined(TARGET_MOXIE)
 #elif defined(TARGET_OPENRISC)
 #elif defined(TARGET_SH4)
 #elif defined(TARGET_CRIS)
@@ -265,12 +282,12 @@ int cpu_exec(CPUArchState *env)
                        which will be handled outside the cpu execution
                        loop */
 #if defined(TARGET_I386)
-                    do_interrupt(env);
+                    cc->do_interrupt(cpu);
 #endif
                     ret = env->exception_index;
                     break;
 #else
-                    do_interrupt(env);
+                    cc->do_interrupt(cpu);
                     env->exception_index = -1;
 #endif
                 }
@@ -278,14 +295,14 @@ int cpu_exec(CPUArchState *env)
 
             next_tb = 0; /* force lookup of first TB */
             for(;;) {
-                interrupt_request = env->interrupt_request;
+                interrupt_request = cpu->interrupt_request;
                 if (unlikely(interrupt_request)) {
-                    if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
+                    if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
                         /* Mask out external interrupts for this step. */
                         interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
                     }
                     if (interrupt_request & CPU_INTERRUPT_DEBUG) {
-                        env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
+                        cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
                         env->exception_index = EXCP_DEBUG;
                         cpu_loop_exit(env);
                     }
@@ -293,8 +310,8 @@ int cpu_exec(CPUArchState *env)
     defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
     defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
                     if (interrupt_request & CPU_INTERRUPT_HALT) {
-                        env->interrupt_request &= ~CPU_INTERRUPT_HALT;
-                        env->halted = 1;
+                        cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
+                        cpu->halted = 1;
                         env->exception_index = EXCP_HLT;
                         cpu_loop_exit(env);
                     }
@@ -302,7 +319,7 @@ int cpu_exec(CPUArchState *env)
 #if defined(TARGET_I386)
 #if !defined(CONFIG_USER_ONLY)
                     if (interrupt_request & CPU_INTERRUPT_POLL) {
-                        env->interrupt_request &= ~CPU_INTERRUPT_POLL;
+                        cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
                         apic_poll_irq(env->apic_state);
                     }
 #endif
@@ -319,17 +336,17 @@ int cpu_exec(CPUArchState *env)
                             !(env->hflags & HF_SMM_MASK)) {
                             cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
                                                           0);
-                            env->interrupt_request &= ~CPU_INTERRUPT_SMI;
-                            do_smm_enter(env);
+                            cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
+                            do_smm_enter(x86_env_get_cpu(env));
                             next_tb = 0;
                         } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
                                    !(env->hflags2 & HF2_NMI_MASK)) {
-                            env->interrupt_request &= ~CPU_INTERRUPT_NMI;
+                            cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
                             env->hflags2 |= HF2_NMI_MASK;
                             do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
                             next_tb = 0;
                         } else if (interrupt_request & CPU_INTERRUPT_MCE) {
-                            env->interrupt_request &= ~CPU_INTERRUPT_MCE;
+                            cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
                             do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
                             next_tb = 0;
                         } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
@@ -341,7 +358,8 @@ int cpu_exec(CPUArchState *env)
                             int intno;
                             cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
                                                           0);
-                            env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
+                            cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
+                                                        CPU_INTERRUPT_VIRQ);
                             intno = cpu_get_pic_interrupt(env);
                             qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
                             do_interrupt_x86_hardirq(env, intno, 1);
@@ -359,7 +377,7 @@ int cpu_exec(CPUArchState *env)
                             intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
                             qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
                             do_interrupt_x86_hardirq(env, intno, 1);
-                            env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
+                            cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
                             next_tb = 0;
 #endif
                         }
@@ -370,15 +388,16 @@ int cpu_exec(CPUArchState *env)
                     }
                     if (interrupt_request & CPU_INTERRUPT_HARD) {
                         ppc_hw_interrupt(env);
-                        if (env->pending_interrupts == 0)
-                            env->interrupt_request &= ~CPU_INTERRUPT_HARD;
+                        if (env->pending_interrupts == 0) {
+                            cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
+                        }
                         next_tb = 0;
                     }
 #elif defined(TARGET_LM32)
                     if ((interrupt_request & CPU_INTERRUPT_HARD)
                         && (env->ie & IE_IE)) {
                         env->exception_index = EXCP_IRQ;
-                        do_interrupt(env);
+                        cc->do_interrupt(cpu);
                         next_tb = 0;
                     }
 #elif defined(TARGET_MICROBLAZE)
@@ -387,7 +406,7 @@ int cpu_exec(CPUArchState *env)
                         && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
                         && !(env->iflags & (D_FLAG | IMM_FLAG))) {
                         env->exception_index = EXCP_IRQ;
-                        do_interrupt(env);
+                        cc->do_interrupt(cpu);
                         next_tb = 0;
                     }
 #elif defined(TARGET_MIPS)
@@ -396,7 +415,7 @@ int cpu_exec(CPUArchState *env)
                         /* Raise it */
                         env->exception_index = EXCP_EXT_INTERRUPT;
                         env->error_code = 0;
-                        do_interrupt(env);
+                        cc->do_interrupt(cpu);
                         next_tb = 0;
                     }
 #elif defined(TARGET_OPENRISC)
@@ -412,7 +431,7 @@ int cpu_exec(CPUArchState *env)
                         }
                         if (idx >= 0) {
                             env->exception_index = idx;
-                            do_interrupt(env);
+                            cc->do_interrupt(cpu);
                             next_tb = 0;
                         }
                     }
@@ -427,7 +446,7 @@ int cpu_exec(CPUArchState *env)
                                   cpu_pil_allowed(env, pil)) ||
                                   type != TT_EXTINT) {
                                 env->exception_index = env->interrupt_index;
-                                do_interrupt(env);
+                                cc->do_interrupt(cpu);
                                 next_tb = 0;
                             }
                         }
@@ -436,7 +455,7 @@ int cpu_exec(CPUArchState *env)
                     if (interrupt_request & CPU_INTERRUPT_FIQ
                         && !(env->uncached_cpsr & CPSR_F)) {
                         env->exception_index = EXCP_FIQ;
-                        do_interrupt(env);
+                        cc->do_interrupt(cpu);
                         next_tb = 0;
                     }
                     /* ARMv7-M interrupt return works by loading a magic value
@@ -452,19 +471,19 @@ int cpu_exec(CPUArchState *env)
                         && ((IS_M(env) && env->regs[15] < 0xfffffff0)
                             || !(env->uncached_cpsr & CPSR_I))) {
                         env->exception_index = EXCP_IRQ;
-                        do_interrupt(env);
+                        cc->do_interrupt(cpu);
                         next_tb = 0;
                     }
 #elif defined(TARGET_UNICORE32)
                     if (interrupt_request & CPU_INTERRUPT_HARD
                         && !(env->uncached_asr & ASR_I)) {
                         env->exception_index = UC32_EXCP_INTR;
-                        do_interrupt(env);
+                        cc->do_interrupt(cpu);
                         next_tb = 0;
                     }
 #elif defined(TARGET_SH4)
                     if (interrupt_request & CPU_INTERRUPT_HARD) {
-                        do_interrupt(env);
+                        cc->do_interrupt(cpu);
                         next_tb = 0;
                     }
 #elif defined(TARGET_ALPHA)
@@ -495,7 +514,7 @@ int cpu_exec(CPUArchState *env)
                         if (idx >= 0) {
                             env->exception_index = idx;
                             env->error_code = 0;
-                            do_interrupt(env);
+                            cc->do_interrupt(cpu);
                             next_tb = 0;
                         }
                     }
@@ -504,7 +523,7 @@ int cpu_exec(CPUArchState *env)
                         && (env->pregs[PR_CCS] & I_FLAG)
                         && !env->locked_irq) {
                         env->exception_index = EXCP_IRQ;
-                        do_interrupt(env);
+                        cc->do_interrupt(cpu);
                         next_tb = 0;
                     }
                     if (interrupt_request & CPU_INTERRUPT_NMI) {
@@ -516,7 +535,7 @@ int cpu_exec(CPUArchState *env)
                         }
                         if ((env->pregs[PR_CCS] & m_flag_archval)) {
                             env->exception_index = EXCP_NMI;
-                            do_interrupt(env);
+                            cc->do_interrupt(cpu);
                             next_tb = 0;
                         }
                     }
@@ -536,20 +555,20 @@ int cpu_exec(CPUArchState *env)
 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
                         (env->psw.mask & PSW_MASK_EXT)) {
-                        do_interrupt(env);
+                        cc->do_interrupt(cpu);
                         next_tb = 0;
                     }
 #elif defined(TARGET_XTENSA)
                     if (interrupt_request & CPU_INTERRUPT_HARD) {
                         env->exception_index = EXC_IRQ;
-                        do_interrupt(env);
+                        cc->do_interrupt(cpu);
                         next_tb = 0;
                     }
 #endif
                    /* Don't use the cached interrupt_request value,
                       do_interrupt may have updated the EXITTB flag. */
-                    if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
-                        env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
+                    if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
+                        cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
                         /* ensure that no TB jump will be modified as
                            the program flow was changed */
                         next_tb = 0;
@@ -560,25 +579,22 @@ int cpu_exec(CPUArchState *env)
                     env->exception_index = EXCP_INTERRUPT;
                     cpu_loop_exit(env);
                 }
-#if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
+#if defined(DEBUG_DISAS)
                 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
                     /* restore flags in standard format */
 #if defined(TARGET_I386)
-                    env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
-                        | (DF & DF_MASK);
-                    log_cpu_state(env, CPU_DUMP_CCOP);
-                    env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
+                    log_cpu_state(cpu, CPU_DUMP_CCOP);
 #elif defined(TARGET_M68K)
                     cpu_m68k_flush_flags(env, env->cc_op);
                     env->cc_op = CC_OP_FLAGS;
                     env->sr = (env->sr & 0xffe0)
                               | env->cc_dest | (env->cc_x << 4);
-                    log_cpu_state(env, 0);
+                    log_cpu_state(cpu, 0);
 #else
-                    log_cpu_state(env, 0);
+                    log_cpu_state(cpu, 0);
 #endif
                 }
-#endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
+#endif /* DEBUG_DISAS */
                 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
                 tb = tb_find_fast(env);
                 /* Note: we do it here to avoid a gcc bug on Mac OS X when
@@ -590,11 +606,10 @@ int cpu_exec(CPUArchState *env)
                     next_tb = 0;
                     tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
                 }
-#ifdef CONFIG_DEBUG_EXEC
-                qemu_log_mask(CPU_LOG_EXEC, "Trace %p [" TARGET_FMT_lx "] %s\n",
-                             tb->tc_ptr, tb->pc,
-                             lookup_symbol(tb->pc));
-#endif
+                if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
+                    qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
+                             tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
+                }
                 /* see if we can patch the calling TB. When the TB
                    spans two pages, we cannot safely do a direct
                    jump. */
@@ -664,7 +679,12 @@ int cpu_exec(CPUArchState *env)
         } else {
             /* Reload env after longjmp - the compiler may have smashed all
              * local variables as longjmp is marked 'noreturn'. */
-            env = cpu_single_env;
+            cpu = current_cpu;
+            env = cpu->env_ptr;
+#if !(defined(CONFIG_USER_ONLY) && \
+      (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
+            cc = CPU_GET_CLASS(cpu);
+#endif
         }
     } /* for(;;) */
 
@@ -672,7 +692,7 @@ int cpu_exec(CPUArchState *env)
 #if defined(TARGET_I386)
     /* restore flags in standard format */
     env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
-        | (DF & DF_MASK);
+        | (env->df & DF_MASK);
 #elif defined(TARGET_ARM)
     /* XXX: Save/restore host fpu exception state?.  */
 #elif defined(TARGET_UNICORE32)
@@ -686,6 +706,7 @@ int cpu_exec(CPUArchState *env)
               | env->cc_dest | (env->cc_x << 4);
 #elif defined(TARGET_MICROBLAZE)
 #elif defined(TARGET_MIPS)
+#elif defined(TARGET_MOXIE)
 #elif defined(TARGET_OPENRISC)
 #elif defined(TARGET_SH4)
 #elif defined(TARGET_ALPHA)
@@ -697,7 +718,7 @@ int cpu_exec(CPUArchState *env)
 #error unsupported target CPU
 #endif
 
-    /* fail safe : never use cpu_single_env outside cpu_exec() */
-    cpu_single_env = NULL;
+    /* fail safe : never use current_cpu outside cpu_exec() */
+    current_cpu = NULL;
     return ret;
 }