]> git.proxmox.com Git - qemu.git/blobdiff - cpu-exec.c
x86_64 and better i386 support
[qemu.git] / cpu-exec.c
index 55758faef299e8439d8bfa267f0f6f75a764eb36..5c648565867dff5b48d11ed574c49c409b6e29de 100644 (file)
 #endif
 
 int tb_invalidated_flag;
+static unsigned long next_tb;
 
 //#define DEBUG_EXEC
 //#define DEBUG_SIGNAL
 
+#define SAVE_GLOBALS()
+#define RESTORE_GLOBALS()
+
+#if defined(__sparc__) && !defined(HOST_SOLARIS)
+#include <features.h>
+#if defined(__GLIBC__) && ((__GLIBC__ < 2) || \
+                           ((__GLIBC__ == 2) && (__GLIBC_MINOR__ <= 90)))
+// Work around ugly bugs in glibc that mangle global register contents
+
+static volatile void *saved_env;
+static volatile unsigned long saved_t0, saved_i7;
+#undef SAVE_GLOBALS
+#define SAVE_GLOBALS() do {                                     \
+        saved_env = env;                                        \
+        saved_t0 = T0;                                          \
+        asm volatile ("st %%i7, [%0]" : : "r" (&saved_i7));     \
+    } while(0)
+
+#undef RESTORE_GLOBALS
+#define RESTORE_GLOBALS() do {                                  \
+        env = (void *)saved_env;                                \
+        T0 = saved_t0;                                          \
+        asm volatile ("ld [%0], %%i7" : : "r" (&saved_i7));     \
+    } while(0)
+
+static int sparc_setjmp(jmp_buf buf)
+{
+    int ret;
+
+    SAVE_GLOBALS();
+    ret = setjmp(buf);
+    RESTORE_GLOBALS();
+    return ret;
+}
+#undef setjmp
+#define setjmp(jmp_buf) sparc_setjmp(jmp_buf)
+
+static void sparc_longjmp(jmp_buf buf, int val)
+{
+    SAVE_GLOBALS();
+    longjmp(buf, val);
+}
+#define longjmp(jmp_buf, val) sparc_longjmp(jmp_buf, val)
+#endif
+#endif
+
 void cpu_loop_exit(void)
 {
     /* NOTE: the register at this point must be saved by hand because
@@ -74,7 +121,6 @@ void cpu_resume_from_signal(CPUState *env1, void *puc)
     longjmp(env->jmp_env, 1);
 }
 
-
 static TranslationBlock *tb_find_slow(target_ulong pc,
                                       target_ulong cs_base,
                                       uint64_t flags)
@@ -133,7 +179,9 @@ static TranslationBlock *tb_find_slow(target_ulong pc,
     tb->tc_ptr = tc_ptr;
     tb->cs_base = cs_base;
     tb->flags = flags;
-    cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
+    SAVE_GLOBALS();
+    cpu_gen_code(env, tb, &code_gen_size);
+    RESTORE_GLOBALS();
     code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
 
     /* check next page if needed */
@@ -173,6 +221,7 @@ static inline TranslationBlock *tb_find_fast(void)
         flags |= (1 << 6);
     if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
         flags |= (1 << 7);
+    flags |= (env->condexec_bits << 8);
     cs_base = 0;
     pc = env->regs[15];
 #elif defined(TARGET_SPARC)
@@ -181,10 +230,8 @@ static inline TranslationBlock *tb_find_fast(void)
     flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
         | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
 #else
-    // FPU enable . MMU Boot . MMU enabled . MMU no-fault . Supervisor
-    flags = (env->psref << 4) | (((env->mmuregs[0] & MMU_BM) >> 14) << 3)
-        | ((env->mmuregs[0] & (MMU_E | MMU_NF)) << 1)
-        | env->psrs;
+    // FPU enable . Supervisor
+    flags = (env->psref << 4) | env->psrs;
 #endif
     cs_base = env->npc;
     pc = env->pc;
@@ -203,13 +250,17 @@ static inline TranslationBlock *tb_find_fast(void)
     cs_base = 0;
     pc = env->pc;
 #elif defined(TARGET_SH4)
-    flags = env->sr & (SR_MD | SR_RB);
-    cs_base = 0;         /* XXXXX */
+    flags = env->flags;
+    cs_base = 0;
     pc = env->pc;
 #elif defined(TARGET_ALPHA)
     flags = env->ps;
     cs_base = 0;
     pc = env->pc;
+#elif defined(TARGET_CRIS)
+    flags = env->pregs[PR_CCS] & U_FLAG;
+    cs_base = 0;
+    pc = env->pc;
 #else
 #error unsupported CPU
 #endif
@@ -223,13 +274,12 @@ static inline TranslationBlock *tb_find_fast(void)
             /* as some TB could have been invalidated because
                of memory exceptions while generating the code, we
                must recompute the hash index here */
-            T0 = 0;
+            next_tb = 0;
         }
     }
     return tb;
 }
 
-
 /* main execution loop */
 
 int cpu_exec(CPUState *env1)
@@ -240,13 +290,9 @@ int cpu_exec(CPUState *env1)
 #if defined(reg_REGWPTR)
     uint32_t *saved_regwptr;
 #endif
-#endif
-#if defined(__sparc__) && !defined(HOST_SOLARIS)
-    int saved_i7;
-    target_ulong tmp_T0;
 #endif
     int ret, interrupt_request;
-    void (*gen_func)(void);
+    unsigned long (*gen_func)(void);
     TranslationBlock *tb;
     uint8_t *tc_ptr;
 
@@ -259,10 +305,7 @@ int cpu_exec(CPUState *env1)
 #define SAVE_HOST_REGS 1
 #include "hostregs_helper.h"
     env = env1;
-#if defined(__sparc__) && !defined(HOST_SOLARIS)
-    /* we also save i7 because longjmp may not restore it */
-    asm volatile ("mov %%i7, %0" : "=r" (saved_i7));
-#endif
+    SAVE_GLOBALS();
 
     env_to_regs();
 #if defined(TARGET_I386)
@@ -284,6 +327,7 @@ int cpu_exec(CPUState *env1)
 #elif defined(TARGET_PPC)
 #elif defined(TARGET_MIPS)
 #elif defined(TARGET_SH4)
+#elif defined(TARGET_CRIS)
     /* XXXXX */
 #else
 #error unsupported target CPU
@@ -335,6 +379,8 @@ int cpu_exec(CPUState *env1)
                    do_interrupt(env);
 #elif defined(TARGET_ALPHA)
                     do_interrupt(env);
+#elif defined(TARGET_CRIS)
+                    do_interrupt(env);
 #elif defined(TARGET_M68K)
                     do_interrupt(0);
 #endif
@@ -367,25 +413,22 @@ int cpu_exec(CPUState *env1)
             }
 #endif
 
-            T0 = 0; /* force lookup of first TB */
+            next_tb = 0; /* force lookup of first TB */
             for(;;) {
-#if defined(__sparc__) && !defined(HOST_SOLARIS)
-                /* g1 can be modified by some libc? functions */
-                tmp_T0 = T0;
-#endif
+                SAVE_GLOBALS();
                 interrupt_request = env->interrupt_request;
                 if (__builtin_expect(interrupt_request, 0)
 #if defined(TARGET_I386)
                        && env->hflags & HF_GIF_MASK
 #endif
-                               ) {
+            && !(env->singlestep_enabled & SSTEP_NOIRQ)) {
                     if (interrupt_request & CPU_INTERRUPT_DEBUG) {
                         env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
                         env->exception_index = EXCP_DEBUG;
                         cpu_loop_exit();
                     }
 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
-    defined(TARGET_PPC) || defined(TARGET_ALPHA)
+    defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
                     if (interrupt_request & CPU_INTERRUPT_HALT) {
                         env->interrupt_request &= ~CPU_INTERRUPT_HALT;
                         env->halted = 1;
@@ -399,11 +442,13 @@ int cpu_exec(CPUState *env1)
                         svm_check_intercept(SVM_EXIT_SMI);
                         env->interrupt_request &= ~CPU_INTERRUPT_SMI;
                         do_smm_enter();
-#if defined(__sparc__) && !defined(HOST_SOLARIS)
-                        tmp_T0 = 0;
-#else
-                        T0 = 0;
-#endif
+                        next_tb = 0;
+                    } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
+                        !(env->hflags & HF_NMI_MASK)) {
+                        env->interrupt_request &= ~CPU_INTERRUPT_NMI;
+                        env->hflags |= HF_NMI_MASK;
+                        do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
+                        next_tb = 0;
                     } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
                         (env->eflags & IF_MASK || env->hflags & HF_HIF_MASK) &&
                         !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
@@ -417,11 +462,7 @@ int cpu_exec(CPUState *env1)
                         do_interrupt(intno, 0, 0, 0, 1);
                         /* ensure that no TB jump will be modified as
                            the program flow was changed */
-#if defined(__sparc__) && !defined(HOST_SOLARIS)
-                        tmp_T0 = 0;
-#else
-                        T0 = 0;
-#endif
+                        next_tb = 0;
 #if !defined(CONFIG_USER_ONLY)
                     } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
                         (env->eflags & IF_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
@@ -435,11 +476,7 @@ int cpu_exec(CPUState *env1)
                         do_interrupt(intno, 0, 0, -1, 1);
                          stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl),
                                   ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)) & ~V_IRQ_MASK);
-#if defined(__sparc__) && !defined(HOST_SOLARIS)
-                         tmp_T0 = 0;
-#else
-                         T0 = 0;
-#endif
+                        next_tb = 0;
 #endif
                     }
 #elif defined(TARGET_PPC)
@@ -452,11 +489,7 @@ int cpu_exec(CPUState *env1)
                         ppc_hw_interrupt(env);
                         if (env->pending_interrupts == 0)
                             env->interrupt_request &= ~CPU_INTERRUPT_HARD;
-#if defined(__sparc__) && !defined(HOST_SOLARIS)
-                        tmp_T0 = 0;
-#else
-                        T0 = 0;
-#endif
+                        next_tb = 0;
                     }
 #elif defined(TARGET_MIPS)
                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
@@ -469,11 +502,7 @@ int cpu_exec(CPUState *env1)
                         env->exception_index = EXCP_EXT_INTERRUPT;
                         env->error_code = 0;
                         do_interrupt(env);
-#if defined(__sparc__) && !defined(HOST_SOLARIS)
-                        tmp_T0 = 0;
-#else
-                        T0 = 0;
-#endif
+                        next_tb = 0;
                     }
 #elif defined(TARGET_SPARC)
                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
@@ -490,11 +519,7 @@ int cpu_exec(CPUState *env1)
 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
                             cpu_check_irqs(env);
 #endif
-#if defined(__sparc__) && !defined(HOST_SOLARIS)
-                            tmp_T0 = 0;
-#else
-                            T0 = 0;
-#endif
+                        next_tb = 0;
                        }
                    } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
                        //do_interrupt(0, 0, 0, 0, 0);
@@ -505,17 +530,38 @@ int cpu_exec(CPUState *env1)
                         && !(env->uncached_cpsr & CPSR_F)) {
                         env->exception_index = EXCP_FIQ;
                         do_interrupt(env);
+                        next_tb = 0;
                     }
+                    /* ARMv7-M interrupt return works by loading a magic value
+                       into the PC.  On real hardware the load causes the
+                       return to occur.  The qemu implementation performs the
+                       jump normally, then does the exception return when the
+                       CPU tries to execute code at the magic address.
+                       This will cause the magic PC value to be pushed to
+                       the stack if an interrupt occured at the wrong time.
+                       We avoid this by disabling interrupts when
+                       pc contains a magic address.  */
                     if (interrupt_request & CPU_INTERRUPT_HARD
-                        && !(env->uncached_cpsr & CPSR_I)) {
+                        && ((IS_M(env) && env->regs[15] < 0xfffffff0)
+                            || !(env->uncached_cpsr & CPSR_I))) {
                         env->exception_index = EXCP_IRQ;
                         do_interrupt(env);
+                        next_tb = 0;
                     }
 #elif defined(TARGET_SH4)
-                   /* XXXXX */
+                    if (interrupt_request & CPU_INTERRUPT_HARD) {
+                        do_interrupt(env);
+                        next_tb = 0;
+                    }
 #elif defined(TARGET_ALPHA)
                     if (interrupt_request & CPU_INTERRUPT_HARD) {
                         do_interrupt(env);
+                        next_tb = 0;
+                    }
+#elif defined(TARGET_CRIS)
+                    if (interrupt_request & CPU_INTERRUPT_HARD) {
+                        do_interrupt(env);
+                        next_tb = 0;
                     }
 #elif defined(TARGET_M68K)
                     if (interrupt_request & CPU_INTERRUPT_HARD
@@ -528,6 +574,7 @@ int cpu_exec(CPUState *env1)
                            first signalled.  */
                         env->exception_index = env->pending_vector;
                         do_interrupt(1);
+                        next_tb = 0;
                     }
 #endif
                    /* Don't use the cached interupt_request value,
@@ -536,11 +583,7 @@ int cpu_exec(CPUState *env1)
                         env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
                         /* ensure that no TB jump will be modified as
                            the program flow was changed */
-#if defined(__sparc__) && !defined(HOST_SOLARIS)
-                        tmp_T0 = 0;
-#else
-                        T0 = 0;
-#endif
+                        next_tb = 0;
                     }
                     if (interrupt_request & CPU_INTERRUPT_EXIT) {
                         env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
@@ -576,6 +619,8 @@ int cpu_exec(CPUState *env1)
                    cpu_dump_state(env, logfile, fprintf, 0);
 #elif defined(TARGET_ALPHA)
                     cpu_dump_state(env, logfile, fprintf, 0);
+#elif defined(TARGET_CRIS)
+                    cpu_dump_state(env, logfile, fprintf, 0);
 #else
 #error unsupported target CPU
 #endif
@@ -589,30 +634,18 @@ int cpu_exec(CPUState *env1)
                             lookup_symbol(tb->pc));
                 }
 #endif
-#if defined(__sparc__) && !defined(HOST_SOLARIS)
-                T0 = tmp_T0;
-#endif
+                RESTORE_GLOBALS();
                 /* see if we can patch the calling TB. When the TB
                    spans two pages, we cannot safely do a direct
                    jump. */
                 {
-                    if (T0 != 0 &&
+                    if (next_tb != 0 &&
 #if USE_KQEMU
                         (env->kqemu_enabled != 2) &&
 #endif
-                        tb->page_addr[1] == -1
-#if defined(TARGET_I386) && defined(USE_CODE_COPY)
-                    && (tb->cflags & CF_CODE_COPY) ==
-                    (((TranslationBlock *)(T0 & ~3))->cflags & CF_CODE_COPY)
-#endif
-                    ) {
+                        tb->page_addr[1] == -1) {
                     spin_lock(&tb_lock);
-                    tb_add_jump((TranslationBlock *)(long)(T0 & ~3), T0 & 3, tb);
-#if defined(USE_CODE_COPY)
-                    /* propagates the FP use info */
-                    ((TranslationBlock *)(T0 & ~3))->cflags |=
-                        (tb->cflags & CF_FP_USED);
-#endif
+                    tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
                     spin_unlock(&tb_lock);
                 }
                 }
@@ -629,6 +662,17 @@ int cpu_exec(CPUState *env1)
                                        "o0", "o1", "o2", "o3", "o4", "o5",
                                        "l0", "l1", "l2", "l3", "l4", "l5",
                                        "l6", "l7");
+#elif defined(__hppa__)
+                asm volatile ("ble  0(%%sr4,%1)\n"
+                              "copy %%r31,%%r18\n"
+                              "copy %%r28,%0\n"
+                              : "=r" (next_tb)
+                              : "r" (gen_func)
+                              : "r1", "r2", "r3", "r4", "r5", "r6", "r7",
+                                "r8", "r9", "r10", "r11", "r12", "r13",
+                                "r18", "r19", "r20", "r21", "r22", "r23",
+                                "r24", "r25", "r26", "r27", "r28", "r29",
+                                "r30", "r31");
 #elif defined(__arm__)
                 asm volatile ("mov pc, %0\n\t"
                               ".global exec_loop\n\t"
@@ -636,80 +680,6 @@ int cpu_exec(CPUState *env1)
                               : /* no outputs */
                               : "r" (gen_func)
                               : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
-#elif defined(TARGET_I386) && defined(USE_CODE_COPY)
-{
-    if (!(tb->cflags & CF_CODE_COPY)) {
-        if ((tb->cflags & CF_FP_USED) && env->native_fp_regs) {
-            save_native_fp_state(env);
-        }
-        gen_func();
-    } else {
-        if ((tb->cflags & CF_FP_USED) && !env->native_fp_regs) {
-            restore_native_fp_state(env);
-        }
-        /* we work with native eflags */
-        CC_SRC = cc_table[CC_OP].compute_all();
-        CC_OP = CC_OP_EFLAGS;
-        asm(".globl exec_loop\n"
-            "\n"
-            "debug1:\n"
-            "    pushl %%ebp\n"
-            "    fs movl %10, %9\n"
-            "    fs movl %11, %%eax\n"
-            "    andl $0x400, %%eax\n"
-            "    fs orl %8, %%eax\n"
-            "    pushl %%eax\n"
-            "    popf\n"
-            "    fs movl %%esp, %12\n"
-            "    fs movl %0, %%eax\n"
-            "    fs movl %1, %%ecx\n"
-            "    fs movl %2, %%edx\n"
-            "    fs movl %3, %%ebx\n"
-            "    fs movl %4, %%esp\n"
-            "    fs movl %5, %%ebp\n"
-            "    fs movl %6, %%esi\n"
-            "    fs movl %7, %%edi\n"
-            "    fs jmp *%9\n"
-            "exec_loop:\n"
-            "    fs movl %%esp, %4\n"
-            "    fs movl %12, %%esp\n"
-            "    fs movl %%eax, %0\n"
-            "    fs movl %%ecx, %1\n"
-            "    fs movl %%edx, %2\n"
-            "    fs movl %%ebx, %3\n"
-            "    fs movl %%ebp, %5\n"
-            "    fs movl %%esi, %6\n"
-            "    fs movl %%edi, %7\n"
-            "    pushf\n"
-            "    popl %%eax\n"
-            "    movl %%eax, %%ecx\n"
-            "    andl $0x400, %%ecx\n"
-            "    shrl $9, %%ecx\n"
-            "    andl $0x8d5, %%eax\n"
-            "    fs movl %%eax, %8\n"
-            "    movl $1, %%eax\n"
-            "    subl %%ecx, %%eax\n"
-            "    fs movl %%eax, %11\n"
-            "    fs movl %9, %%ebx\n" /* get T0 value */
-            "    popl %%ebp\n"
-            :
-            : "m" (*(uint8_t *)offsetof(CPUState, regs[0])),
-            "m" (*(uint8_t *)offsetof(CPUState, regs[1])),
-            "m" (*(uint8_t *)offsetof(CPUState, regs[2])),
-            "m" (*(uint8_t *)offsetof(CPUState, regs[3])),
-            "m" (*(uint8_t *)offsetof(CPUState, regs[4])),
-            "m" (*(uint8_t *)offsetof(CPUState, regs[5])),
-            "m" (*(uint8_t *)offsetof(CPUState, regs[6])),
-            "m" (*(uint8_t *)offsetof(CPUState, regs[7])),
-            "m" (*(uint8_t *)offsetof(CPUState, cc_src)),
-            "m" (*(uint8_t *)offsetof(CPUState, tmp0)),
-            "a" (gen_func),
-            "m" (*(uint8_t *)offsetof(CPUState, df)),
-            "m" (*(uint8_t *)offsetof(CPUState, saved_esp))
-            : "%ecx", "%edx"
-            );
-    }
-}
 #elif defined(__ia64)
                struct fptr {
                        void *ip;
@@ -719,8 +689,29 @@ int cpu_exec(CPUState *env1)
                fp.ip = tc_ptr;
                fp.gp = code_gen_buffer + 2 * (1 << 20);
                (*(void (*)(void)) &fp)();
+#elif defined(__i386)
+                asm volatile ("sub $12, %%esp\n\t"
+                              "push %%ebp\n\t"
+                              "call *%1\n\t"
+                              "pop %%ebp\n\t"
+                              "add $12, %%esp\n\t"
+                              : "=a" (next_tb)
+                              : "a" (gen_func)
+                              : "ebx", "ecx", "edx", "esi", "edi", "cc",
+                                "memory");
+#elif defined(__x86_64__)
+                asm volatile ("sub $8, %%rsp\n\t"
+                              "push %%rbp\n\t"
+                              "call *%1\n\t"
+                              "pop %%rbp\n\t"
+                              "add $8, %%rsp\n\t"
+                              : "=a" (next_tb)
+                              : "a" (gen_func)
+                              : "rbx", "rcx", "rdx", "rsi", "rdi", "r8", "r9",
+                                "r10", "r11", "r12", "r13", "r14", "r15", "cc",
+                                "memory");
 #else
-                gen_func();
+                next_tb = gen_func();
 #endif
                 env->current_tb = NULL;
                 /* reset soft MMU for next block (it can currently
@@ -729,7 +720,7 @@ int cpu_exec(CPUState *env1)
                 if (env->hflags & HF_SOFTMMU_MASK) {
                     env->hflags &= ~HF_SOFTMMU_MASK;
                     /* do not allow linking to another block */
-                    T0 = 0;
+                    next_tb = 0;
                 }
 #endif
 #if defined(USE_KQEMU)
@@ -747,11 +738,6 @@ int cpu_exec(CPUState *env1)
 
 
 #if defined(TARGET_I386)
-#if defined(USE_CODE_COPY)
-    if (env->native_fp_regs) {
-        save_native_fp_state(env);
-    }
-#endif
     /* restore flags in standard format */
     env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
 #elif defined(TARGET_ARM)
@@ -769,15 +755,14 @@ int cpu_exec(CPUState *env1)
 #elif defined(TARGET_MIPS)
 #elif defined(TARGET_SH4)
 #elif defined(TARGET_ALPHA)
+#elif defined(TARGET_CRIS)
     /* XXXXX */
 #else
 #error unsupported target CPU
 #endif
 
     /* restore global registers */
-#if defined(__sparc__) && !defined(HOST_SOLARIS)
-    asm volatile ("mov %0, %%i7" : : "r" (saved_i7));
-#endif
+    RESTORE_GLOBALS();
 #include "hostregs_helper.h"
 
     /* fail safe : never use cpu_single_env outside cpu_exec() */
@@ -816,26 +801,26 @@ void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
     env = saved_env;
 }
 
-void cpu_x86_fsave(CPUX86State *s, uint8_t *ptr, int data32)
+void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
 {
     CPUX86State *saved_env;
 
     saved_env = env;
     env = s;
 
-    helper_fsave((target_ulong)ptr, data32);
+    helper_fsave(ptr, data32);
 
     env = saved_env;
 }
 
-void cpu_x86_frstor(CPUX86State *s, uint8_t *ptr, int data32)
+void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
 {
     CPUX86State *saved_env;
 
     saved_env = env;
     env = s;
 
-    helper_frstor((target_ulong)ptr, data32);
+    helper_frstor(ptr, data32);
 
     env = saved_env;
 }
@@ -869,8 +854,7 @@ static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
     }
 
     /* see if it is an MMU fault */
-    ret = cpu_x86_handle_mmu_fault(env, address, is_write,
-                                   ((env->hflags & HF_CPL_MASK) == 3), 0);
+    ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
     if (ret < 0)
         return 0; /* not an MMU fault */
     if (ret == 0)
@@ -919,7 +903,7 @@ static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
         return 1;
     }
     /* see if it is an MMU fault */
-    ret = cpu_arm_handle_mmu_fault(env, address, is_write, 1, 0);
+    ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
     if (ret < 0)
         return 0; /* not an MMU fault */
     if (ret == 0)
@@ -935,6 +919,8 @@ static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
        do it (XXX: use sigsetjmp) */
     sigprocmask(SIG_SETMASK, old_set, NULL);
     cpu_loop_exit();
+    /* never comes here */
+    return 1;
 }
 #elif defined(TARGET_SPARC)
 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
@@ -955,7 +941,7 @@ static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
         return 1;
     }
     /* see if it is an MMU fault */
-    ret = cpu_sparc_handle_mmu_fault(env, address, is_write, 1, 0);
+    ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
     if (ret < 0)
         return 0; /* not an MMU fault */
     if (ret == 0)
@@ -971,6 +957,8 @@ static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
        do it (XXX: use sigsetjmp) */
     sigprocmask(SIG_SETMASK, old_set, NULL);
     cpu_loop_exit();
+    /* never comes here */
+    return 1;
 }
 #elif defined (TARGET_PPC)
 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
@@ -992,7 +980,7 @@ static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
     }
 
     /* see if it is an MMU fault */
-    ret = cpu_ppc_handle_mmu_fault(env, address, is_write, msr_pr, 0);
+    ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
     if (ret < 0)
         return 0; /* not an MMU fault */
     if (ret == 0)
@@ -1041,7 +1029,7 @@ static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
         return 1;
     }
     /* see if it is an MMU fault */
-    ret = cpu_m68k_handle_mmu_fault(env, address, is_write, 1, 0);
+    ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
     if (ret < 0)
         return 0; /* not an MMU fault */
     if (ret == 0)
@@ -1081,7 +1069,7 @@ static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
     }
 
     /* see if it is an MMU fault */
-    ret = cpu_mips_handle_mmu_fault(env, address, is_write, 1, 0);
+    ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
     if (ret < 0)
         return 0; /* not an MMU fault */
     if (ret == 0)
@@ -1131,7 +1119,7 @@ static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
     }
 
     /* see if it is an MMU fault */
-    ret = cpu_sh4_handle_mmu_fault(env, address, is_write, 1, 0);
+    ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
     if (ret < 0)
         return 0; /* not an MMU fault */
     if (ret == 0)
@@ -1176,7 +1164,7 @@ static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
     }
 
     /* see if it is an MMU fault */
-    ret = cpu_alpha_handle_mmu_fault(env, address, is_write, 1, 0);
+    ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
     if (ret < 0)
         return 0; /* not an MMU fault */
     if (ret == 0)
@@ -1200,6 +1188,47 @@ static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
     /* never comes here */
     return 1;
 }
+#elif defined (TARGET_CRIS)
+static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
+                                    int is_write, sigset_t *old_set,
+                                    void *puc)
+{
+    TranslationBlock *tb;
+    int ret;
+
+    if (cpu_single_env)
+        env = cpu_single_env; /* XXX: find a correct solution for multithread */
+#if defined(DEBUG_SIGNAL)
+    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
+           pc, address, is_write, *(unsigned long *)old_set);
+#endif
+    /* XXX: locking issue */
+    if (is_write && page_unprotect(h2g(address), pc, puc)) {
+        return 1;
+    }
+
+    /* see if it is an MMU fault */
+    ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
+    if (ret < 0)
+        return 0; /* not an MMU fault */
+    if (ret == 0)
+        return 1; /* the MMU fault was handled without causing real CPU fault */
+
+    /* now we have a real cpu fault */
+    tb = tb_find_pc(pc);
+    if (tb) {
+        /* the PC is inside the translated code. It means that we have
+           a virtual CPU fault */
+        cpu_restore_state(tb, env, pc, puc);
+    }
+    /* we restore the process signal mask as the sigreturn should
+       do it (XXX: use sigsetjmp) */
+    sigprocmask(SIG_SETMASK, old_set, NULL);
+    cpu_loop_exit();
+    /* never comes here */
+    return 1;
+}
+
 #else
 #error unsupported target CPU
 #endif
@@ -1218,26 +1247,6 @@ static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
 # define ERROR_sig(context)   ((context)->uc_mcontext.gregs[REG_ERR])
 #endif
 
-#if defined(USE_CODE_COPY)
-static void cpu_send_trap(unsigned long pc, int trap,
-                          struct ucontext *uc)
-{
-    TranslationBlock *tb;
-
-    if (cpu_single_env)
-        env = cpu_single_env; /* XXX: find a correct solution for multithread */
-    /* now we have a real cpu fault */
-    tb = tb_find_pc(pc);
-    if (tb) {
-        /* the PC is inside the translated code. It means that we have
-           a virtual CPU fault */
-        cpu_restore_state(tb, env, pc, uc);
-    }
-    sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
-    raise_exception_err(trap, env->error_code);
-}
-#endif
-
 int cpu_signal_handler(int host_signum, void *pinfo,
                        void *puc)
 {
@@ -1254,17 +1263,10 @@ int cpu_signal_handler(int host_signum, void *pinfo,
 #endif
     pc = EIP_sig(uc);
     trapno = TRAP_sig(uc);
-#if defined(TARGET_I386) && defined(USE_CODE_COPY)
-    if (trapno == 0x00 || trapno == 0x05) {
-        /* send division by zero or bound exception */
-        cpu_send_trap(pc, trapno, uc);
-        return 1;
-    } else
-#endif
-        return handle_cpu_signal(pc, (unsigned long)info->si_addr,
-                                 trapno == 0xe ?
-                                 (ERROR_sig(uc) >> 1) & 1 : 0,
-                                 &uc->uc_sigmask, puc);
+    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
+                             trapno == 0xe ?
+                             (ERROR_sig(uc) >> 1) & 1 : 0,
+                             &uc->uc_sigmask, puc);
 }
 
 #elif defined(__x86_64__)
@@ -1430,7 +1432,7 @@ int cpu_signal_handler(int host_signum, void *pinfo,
     unsigned long pc;
     int is_write;
 
-    pc = uc->uc_mcontext.gregs[R15];
+    pc = uc->uc_mcontext.arm_pc;
     /* XXX: compute is_write */
     is_write = 0;
     return handle_cpu_signal(pc, (unsigned long)info->si_addr,
@@ -1523,6 +1525,24 @@ int cpu_signal_handler(int host_signum, void *pinfo,
                              is_write, &uc->uc_sigmask, puc);
 }
 
+#elif defined(__hppa__)
+
+int cpu_signal_handler(int host_signum, void *pinfo,
+                       void *puc)
+{
+    struct siginfo *info = pinfo;
+    struct ucontext *uc = puc;
+    unsigned long pc;
+    int is_write;
+
+    pc = uc->uc_mcontext.sc_iaoq[0];
+    /* FIXME: compute is_write */
+    is_write = 0;
+    return handle_cpu_signal(pc, (unsigned long)info->si_addr, 
+                             is_write,
+                             &uc->uc_sigmask, puc);
+}
+
 #else
 
 #error host CPU specific signal handler needed