]> git.proxmox.com Git - mirror_qemu.git/blobdiff - cpu-exec.c
scsi: introduce scsi_req_abort
[mirror_qemu.git] / cpu-exec.c
index 5cc937904a3b7d4d2c017d27888a92ed4567e281..16a223ed1ab61f3690ef0854b42598f3af5ef5ee 100644 (file)
@@ -267,6 +267,7 @@ int cpu_exec(CPUState *env1)
     env->cc_x = (env->sr >> 4) & 1;
 #elif defined(TARGET_ALPHA)
 #elif defined(TARGET_ARM)
+#elif defined(TARGET_UNICORE32)
 #elif defined(TARGET_PPC)
 #elif defined(TARGET_LM32)
 #elif defined(TARGET_MICROBLAZE)
@@ -335,6 +336,8 @@ int cpu_exec(CPUState *env1)
                     do_interrupt(env);
 #elif defined(TARGET_ARM)
                     do_interrupt(env);
+#elif defined(TARGET_UNICORE32)
+                    do_interrupt(env);
 #elif defined(TARGET_SH4)
                    do_interrupt(env);
 #elif defined(TARGET_ALPHA)
@@ -343,6 +346,8 @@ int cpu_exec(CPUState *env1)
                     do_interrupt(env);
 #elif defined(TARGET_M68K)
                     do_interrupt(0);
+#elif defined(TARGET_S390X)
+                    do_interrupt(env);
 #endif
                     env->exception_index = -1;
 #endif
@@ -355,10 +360,7 @@ int cpu_exec(CPUState *env1)
                 if (unlikely(interrupt_request)) {
                     if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
                         /* Mask out external interrupts for this step. */
-                        interrupt_request &= ~(CPU_INTERRUPT_HARD |
-                                               CPU_INTERRUPT_FIQ |
-                                               CPU_INTERRUPT_SMI |
-                                               CPU_INTERRUPT_NMI);
+                        interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
                     }
                     if (interrupt_request & CPU_INTERRUPT_DEBUG) {
                         env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
@@ -367,7 +369,7 @@ int cpu_exec(CPUState *env1)
                     }
 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
     defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
-    defined(TARGET_MICROBLAZE) || defined(TARGET_LM32)
+    defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
                     if (interrupt_request & CPU_INTERRUPT_HALT) {
                         env->interrupt_request &= ~CPU_INTERRUPT_HALT;
                         env->halted = 1;
@@ -487,9 +489,6 @@ int cpu_exec(CPUState *env1)
                                 next_tb = 0;
                             }
                         }
-                   } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
-                       //do_interrupt(0, 0, 0, 0, 0);
-                       env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
                    }
 #elif defined(TARGET_ARM)
                     if (interrupt_request & CPU_INTERRUPT_FIQ
@@ -504,7 +503,7 @@ int cpu_exec(CPUState *env1)
                        jump normally, then does the exception return when the
                        CPU tries to execute code at the magic address.
                        This will cause the magic PC value to be pushed to
-                       the stack if an interrupt occured at the wrong time.
+                       the stack if an interrupt occurred at the wrong time.
                        We avoid this by disabling interrupts when
                        pc contains a magic address.  */
                     if (interrupt_request & CPU_INTERRUPT_HARD
@@ -514,6 +513,12 @@ int cpu_exec(CPUState *env1)
                         do_interrupt(env);
                         next_tb = 0;
                     }
+#elif defined(TARGET_UNICORE32)
+                    if (interrupt_request & CPU_INTERRUPT_HARD
+                        && !(env->uncached_asr & ASR_I)) {
+                        do_interrupt(env);
+                        next_tb = 0;
+                    }
 #elif defined(TARGET_SH4)
                     if (interrupt_request & CPU_INTERRUPT_HARD) {
                         do_interrupt(env);
@@ -551,8 +556,14 @@ int cpu_exec(CPUState *env1)
                         do_interrupt(1);
                         next_tb = 0;
                     }
+#elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
+                    if ((interrupt_request & CPU_INTERRUPT_HARD) &&
+                        (env->psw.mask & PSW_MASK_EXT)) {
+                        do_interrupt(env);
+                        next_tb = 0;
+                    }
 #endif
-                   /* Don't use the cached interupt_request value,
+                   /* Don't use the cached interrupt_request value,
                       do_interrupt may have updated the EXITTB flag. */
                     if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
                         env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
@@ -664,6 +675,7 @@ int cpu_exec(CPUState *env1)
     env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
 #elif defined(TARGET_ARM)
     /* XXX: Save/restore host fpu exception state?.  */
+#elif defined(TARGET_UNICORE32)
 #elif defined(TARGET_SPARC)
 #elif defined(TARGET_PPC)
 #elif defined(TARGET_LM32)
@@ -692,19 +704,6 @@ int cpu_exec(CPUState *env1)
     return ret;
 }
 
-/* must only be called from the generated code as an exception can be
-   generated */
-void tb_invalidate_page_range(target_ulong start, target_ulong end)
-{
-    /* XXX: cannot enable it yet because it yields to MMU exception
-       where NIP != read address on PowerPC */
-#if 0
-    target_ulong phys_addr;
-    phys_addr = get_phys_addr_code(env, start);
-    tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
-#endif
-}
-
 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
 
 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
@@ -790,7 +789,7 @@ static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
     if (tb) {
         /* the PC is inside the translated code. It means that we have
            a virtual CPU fault */
-        cpu_restore_state(tb, env, pc, puc);
+        cpu_restore_state(tb, env, pc);
     }
 
     /* we restore the process signal mask as the sigreturn should