]>
git.proxmox.com Git - mirror_qemu.git/blob - cpu-exec.c
2 * i386 emulator main execution loop
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 int tb_invalidated_flag
;
27 //#define DEBUG_SIGNAL
29 #if defined(TARGET_ARM) || defined(TARGET_SPARC)
30 /* XXX: unify with i386 target */
31 void cpu_loop_exit(void)
33 longjmp(env
->jmp_env
, 1);
37 /* main execution loop */
39 int cpu_exec(CPUState
*env1
)
41 int saved_T0
, saved_T1
, saved_T2
;
70 int code_gen_size
, ret
, interrupt_request
;
71 void (*gen_func
)(void);
72 TranslationBlock
*tb
, **ptb
;
73 uint8_t *tc_ptr
, *cs_base
, *pc
;
76 /* first we save global registers */
83 /* we also save i7 because longjmp may not restore it */
84 asm volatile ("mov %%i7, %0" : "=r" (saved_i7
));
87 #if defined(TARGET_I386)
90 EAX
= env
->regs
[R_EAX
];
94 ECX
= env
->regs
[R_ECX
];
98 EDX
= env
->regs
[R_EDX
];
102 EBX
= env
->regs
[R_EBX
];
106 ESP
= env
->regs
[R_ESP
];
110 EBP
= env
->regs
[R_EBP
];
114 ESI
= env
->regs
[R_ESI
];
118 EDI
= env
->regs
[R_EDI
];
121 /* put eflags in CPU temporary format */
122 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
123 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
124 CC_OP
= CC_OP_EFLAGS
;
125 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
126 #elif defined(TARGET_ARM)
130 env
->CF
= (psr
>> 29) & 1;
131 env
->NZF
= (psr
& 0xc0000000) ^ 0x40000000;
132 env
->VF
= (psr
<< 3) & 0x80000000;
133 env
->cpsr
= psr
& ~0xf0000000;
135 #elif defined(TARGET_SPARC)
136 #elif defined(TARGET_PPC)
138 #error unsupported target CPU
140 env
->exception_index
= -1;
142 /* prepare setjmp context for exception handling */
144 if (setjmp(env
->jmp_env
) == 0) {
145 env
->current_tb
= NULL
;
146 /* if an exception is pending, we execute it here */
147 if (env
->exception_index
>= 0) {
148 if (env
->exception_index
>= EXCP_INTERRUPT
) {
149 /* exit request from the cpu execution loop */
150 ret
= env
->exception_index
;
152 } else if (env
->user_mode_only
) {
153 /* if user mode only, we simulate a fake exception
154 which will be hanlded outside the cpu execution
156 #if defined(TARGET_I386)
157 do_interrupt_user(env
->exception_index
,
158 env
->exception_is_int
,
160 env
->exception_next_eip
);
162 ret
= env
->exception_index
;
165 #if defined(TARGET_I386)
166 /* simulate a real cpu exception. On i386, it can
167 trigger new exceptions, but we do not handle
168 double or triple faults yet. */
169 do_interrupt(env
->exception_index
,
170 env
->exception_is_int
,
172 env
->exception_next_eip
, 0);
173 #elif defined(TARGET_PPC)
177 env
->exception_index
= -1;
179 T0
= 0; /* force lookup of first TB */
182 /* g1 can be modified by some libc? functions */
185 interrupt_request
= env
->interrupt_request
;
186 if (__builtin_expect(interrupt_request
, 0)) {
187 #if defined(TARGET_I386)
188 /* if hardware interrupt pending, we execute it */
189 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
190 (env
->eflags
& IF_MASK
) &&
191 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
193 intno
= cpu_x86_get_pic_interrupt(env
);
194 if (loglevel
& CPU_LOG_TB_IN_ASM
) {
195 fprintf(logfile
, "Servicing hardware INT=0x%02x\n", intno
);
197 do_interrupt(intno
, 0, 0, 0, 1);
198 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
199 /* ensure that no TB jump will be modified as
200 the program flow was changed */
207 #elif defined(TARGET_PPC)
208 if ((interrupt_request
& CPU_INTERRUPT_HARD
)) {
209 do_queue_exception(EXCP_EXTERNAL
);
210 if (check_exception_state(env
))
212 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
215 if (interrupt_request
& CPU_INTERRUPT_EXITTB
) {
216 env
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
217 /* ensure that no TB jump will be modified as
218 the program flow was changed */
225 if (interrupt_request
& CPU_INTERRUPT_EXIT
) {
226 env
->interrupt_request
&= ~CPU_INTERRUPT_EXIT
;
227 env
->exception_index
= EXCP_INTERRUPT
;
232 if (loglevel
& CPU_LOG_EXEC
) {
233 #if defined(TARGET_I386)
234 /* restore flags in standard format */
235 env
->regs
[R_EAX
] = EAX
;
236 env
->regs
[R_EBX
] = EBX
;
237 env
->regs
[R_ECX
] = ECX
;
238 env
->regs
[R_EDX
] = EDX
;
239 env
->regs
[R_ESI
] = ESI
;
240 env
->regs
[R_EDI
] = EDI
;
241 env
->regs
[R_EBP
] = EBP
;
242 env
->regs
[R_ESP
] = ESP
;
243 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
244 cpu_x86_dump_state(env
, logfile
, X86_DUMP_CCOP
);
245 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
246 #elif defined(TARGET_ARM)
247 env
->cpsr
= compute_cpsr();
248 cpu_arm_dump_state(env
, logfile
, 0);
249 env
->cpsr
&= ~0xf0000000;
250 #elif defined(TARGET_SPARC)
251 cpu_sparc_dump_state (env
, logfile
, 0);
252 #elif defined(TARGET_PPC)
253 cpu_ppc_dump_state(env
, logfile
, 0);
255 #error unsupported target CPU
259 /* we record a subset of the CPU state. It will
260 always be the same before a given translated block
262 #if defined(TARGET_I386)
264 flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
265 cs_base
= env
->segs
[R_CS
].base
;
266 pc
= cs_base
+ env
->eip
;
267 #elif defined(TARGET_ARM)
270 pc
= (uint8_t *)env
->regs
[15];
271 #elif defined(TARGET_SPARC)
273 cs_base
= (uint8_t *)env
->npc
;
274 pc
= (uint8_t *) env
->pc
;
275 #elif defined(TARGET_PPC)
278 pc
= (uint8_t *)env
->nip
;
280 #error unsupported CPU
282 tb
= tb_find(&ptb
, (unsigned long)pc
, (unsigned long)cs_base
,
285 TranslationBlock
**ptb1
;
287 target_ulong phys_pc
, phys_page1
, phys_page2
, virt_page2
;
292 tb_invalidated_flag
= 0;
294 /* find translated block using physical mappings */
295 phys_pc
= get_phys_addr_code(env
, (unsigned long)pc
);
296 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
298 h
= tb_phys_hash_func(phys_pc
);
299 ptb1
= &tb_phys_hash
[h
];
304 if (tb
->pc
== (unsigned long)pc
&&
305 tb
->page_addr
[0] == phys_page1
&&
306 tb
->cs_base
== (unsigned long)cs_base
&&
307 tb
->flags
== flags
) {
308 /* check next page if needed */
309 if (tb
->page_addr
[1] != -1) {
310 virt_page2
= ((unsigned long)pc
& TARGET_PAGE_MASK
) +
312 phys_page2
= get_phys_addr_code(env
, virt_page2
);
313 if (tb
->page_addr
[1] == phys_page2
)
319 ptb1
= &tb
->phys_hash_next
;
322 /* if no translated code available, then translate it now */
323 tb
= tb_alloc((unsigned long)pc
);
325 /* flush must be done */
327 /* cannot fail at this point */
328 tb
= tb_alloc((unsigned long)pc
);
329 /* don't forget to invalidate previous TB info */
330 ptb
= &tb_hash
[tb_hash_func((unsigned long)pc
)];
333 tc_ptr
= code_gen_ptr
;
335 tb
->cs_base
= (unsigned long)cs_base
;
337 cpu_gen_code(env
, tb
, CODE_GEN_MAX_SIZE
, &code_gen_size
);
338 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
340 /* check next page if needed */
341 virt_page2
= ((unsigned long)pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
343 if (((unsigned long)pc
& TARGET_PAGE_MASK
) != virt_page2
) {
344 phys_page2
= get_phys_addr_code(env
, virt_page2
);
346 tb_link_phys(tb
, phys_pc
, phys_page2
);
349 if (tb_invalidated_flag
) {
350 /* as some TB could have been invalidated because
351 of memory exceptions while generating the code, we
352 must recompute the hash index here */
353 ptb
= &tb_hash
[tb_hash_func((unsigned long)pc
)];
355 ptb
= &(*ptb
)->hash_next
;
358 /* we add the TB in the virtual pc hash table */
360 tb
->hash_next
= NULL
;
362 spin_unlock(&tb_lock
);
365 if (loglevel
& CPU_LOG_EXEC
) {
366 fprintf(logfile
, "Trace 0x%08lx [0x%08lx] %s\n",
367 (long)tb
->tc_ptr
, (long)tb
->pc
,
368 lookup_symbol((void *)tb
->pc
));
374 /* see if we can patch the calling TB. */
376 #if defined(TARGET_I386) && defined(USE_CODE_COPY)
377 && (tb
->cflags
& CF_CODE_COPY
) ==
378 (((TranslationBlock
*)(T0
& ~3))->cflags
& CF_CODE_COPY
)
382 tb_add_jump((TranslationBlock
*)(T0
& ~3), T0
& 3, tb
);
383 #if defined(USE_CODE_COPY)
384 /* propagates the FP use info */
385 ((TranslationBlock
*)(T0
& ~3))->cflags
|=
386 (tb
->cflags
& CF_FP_USED
);
388 spin_unlock(&tb_lock
);
391 env
->current_tb
= tb
;
392 /* execute the generated code */
393 gen_func
= (void *)tc_ptr
;
394 #if defined(__sparc__)
395 __asm__
__volatile__("call %0\n\t"
399 : "i0", "i1", "i2", "i3", "i4", "i5");
400 #elif defined(__arm__)
401 asm volatile ("mov pc, %0\n\t"
402 ".global exec_loop\n\t"
406 : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
407 #elif defined(TARGET_I386) && defined(USE_CODE_COPY)
409 if (!(tb
->cflags
& CF_CODE_COPY
)) {
410 if ((tb
->cflags
& CF_FP_USED
) && env
->native_fp_regs
) {
411 save_native_fp_state(env
);
415 if ((tb
->cflags
& CF_FP_USED
) && !env
->native_fp_regs
) {
416 restore_native_fp_state(env
);
418 /* we work with native eflags */
419 CC_SRC
= cc_table
[CC_OP
].compute_all();
420 CC_OP
= CC_OP_EFLAGS
;
421 asm(".globl exec_loop\n"
426 " fs movl %11, %%eax\n"
427 " andl $0x400, %%eax\n"
428 " fs orl %8, %%eax\n"
431 " fs movl %%esp, %12\n"
432 " fs movl %0, %%eax\n"
433 " fs movl %1, %%ecx\n"
434 " fs movl %2, %%edx\n"
435 " fs movl %3, %%ebx\n"
436 " fs movl %4, %%esp\n"
437 " fs movl %5, %%ebp\n"
438 " fs movl %6, %%esi\n"
439 " fs movl %7, %%edi\n"
442 " fs movl %%esp, %4\n"
443 " fs movl %12, %%esp\n"
444 " fs movl %%eax, %0\n"
445 " fs movl %%ecx, %1\n"
446 " fs movl %%edx, %2\n"
447 " fs movl %%ebx, %3\n"
448 " fs movl %%ebp, %5\n"
449 " fs movl %%esi, %6\n"
450 " fs movl %%edi, %7\n"
453 " movl %%eax, %%ecx\n"
454 " andl $0x400, %%ecx\n"
456 " andl $0x8d5, %%eax\n"
457 " fs movl %%eax, %8\n"
459 " subl %%ecx, %%eax\n"
460 " fs movl %%eax, %11\n"
461 " fs movl %9, %%ebx\n" /* get T0 value */
464 : "m" (*(uint8_t *)offsetof(CPUState
, regs
[0])),
465 "m" (*(uint8_t *)offsetof(CPUState
, regs
[1])),
466 "m" (*(uint8_t *)offsetof(CPUState
, regs
[2])),
467 "m" (*(uint8_t *)offsetof(CPUState
, regs
[3])),
468 "m" (*(uint8_t *)offsetof(CPUState
, regs
[4])),
469 "m" (*(uint8_t *)offsetof(CPUState
, regs
[5])),
470 "m" (*(uint8_t *)offsetof(CPUState
, regs
[6])),
471 "m" (*(uint8_t *)offsetof(CPUState
, regs
[7])),
472 "m" (*(uint8_t *)offsetof(CPUState
, cc_src
)),
473 "m" (*(uint8_t *)offsetof(CPUState
, tmp0
)),
475 "m" (*(uint8_t *)offsetof(CPUState
, df
)),
476 "m" (*(uint8_t *)offsetof(CPUState
, saved_esp
))
484 env
->current_tb
= NULL
;
485 /* reset soft MMU for next block (it can currently
486 only be set by a memory fault) */
487 #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
488 if (env
->hflags
& HF_SOFTMMU_MASK
) {
489 env
->hflags
&= ~HF_SOFTMMU_MASK
;
490 /* do not allow linking to another block */
500 #if defined(TARGET_I386)
501 #if defined(USE_CODE_COPY)
502 if (env
->native_fp_regs
) {
503 save_native_fp_state(env
);
506 /* restore flags in standard format */
507 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
509 /* restore global registers */
534 #elif defined(TARGET_ARM)
535 env
->cpsr
= compute_cpsr();
536 #elif defined(TARGET_SPARC)
537 #elif defined(TARGET_PPC)
539 #error unsupported target CPU
542 asm volatile ("mov %0, %%i7" : : "r" (saved_i7
));
551 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
553 void cpu_x86_load_seg(CPUX86State
*s
, int seg_reg
, int selector
)
555 CPUX86State
*saved_env
;
559 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
561 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
562 (uint8_t *)(selector
<< 4), 0xffff, 0);
564 load_seg(seg_reg
, selector
);
569 void cpu_x86_fsave(CPUX86State
*s
, uint8_t *ptr
, int data32
)
571 CPUX86State
*saved_env
;
576 helper_fsave(ptr
, data32
);
581 void cpu_x86_frstor(CPUX86State
*s
, uint8_t *ptr
, int data32
)
583 CPUX86State
*saved_env
;
588 helper_frstor(ptr
, data32
);
593 #endif /* TARGET_I386 */
595 #if !defined(CONFIG_SOFTMMU)
607 #include <sys/ucontext.h>
609 #if defined(TARGET_I386)
611 /* 'pc' is the host PC at which the exception was raised. 'address' is
612 the effective address of the memory exception. 'is_write' is 1 if a
613 write caused the exception and otherwise 0'. 'old_set' is the
614 signal set which should be restored */
615 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
616 int is_write
, sigset_t
*old_set
,
619 TranslationBlock
*tb
;
623 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
624 #if defined(DEBUG_SIGNAL)
625 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
626 pc
, address
, is_write
, *(unsigned long *)old_set
);
628 /* XXX: locking issue */
629 if (is_write
&& page_unprotect(address
)) {
632 /* see if it is an MMU fault */
633 ret
= cpu_x86_handle_mmu_fault(env
, address
, is_write
,
634 ((env
->hflags
& HF_CPL_MASK
) == 3), 0);
636 return 0; /* not an MMU fault */
638 return 1; /* the MMU fault was handled without causing real CPU fault */
639 /* now we have a real cpu fault */
642 /* the PC is inside the translated code. It means that we have
643 a virtual CPU fault */
644 cpu_restore_state(tb
, env
, pc
, puc
);
648 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
649 env
->eip
, env
->cr
[2], env
->error_code
);
651 /* we restore the process signal mask as the sigreturn should
652 do it (XXX: use sigsetjmp) */
653 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
654 raise_exception_err(EXCP0E_PAGE
, env
->error_code
);
656 /* activate soft MMU for this block */
657 env
->hflags
|= HF_SOFTMMU_MASK
;
658 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
661 /* never comes here */
665 #elif defined(TARGET_ARM)
666 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
667 int is_write
, sigset_t
*old_set
,
673 #elif defined(TARGET_SPARC)
674 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
675 int is_write
, sigset_t
*old_set
,
678 /* XXX: locking issue */
679 if (is_write
&& page_unprotect(address
)) {
684 #elif defined (TARGET_PPC)
685 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
686 int is_write
, sigset_t
*old_set
,
689 TranslationBlock
*tb
;
694 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
696 #if defined(DEBUG_SIGNAL)
697 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
698 pc
, address
, is_write
, *(unsigned long *)old_set
);
700 /* XXX: locking issue */
701 if (is_write
&& page_unprotect(address
)) {
705 /* see if it is an MMU fault */
706 ret
= cpu_ppc_handle_mmu_fault(env
, address
, is_write
, msr_pr
, 0);
708 return 0; /* not an MMU fault */
710 return 1; /* the MMU fault was handled without causing real CPU fault */
712 /* now we have a real cpu fault */
715 /* the PC is inside the translated code. It means that we have
716 a virtual CPU fault */
717 cpu_restore_state(tb
, env
, pc
, puc
);
721 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
722 env
->nip
, env
->error_code
, tb
);
724 /* we restore the process signal mask as the sigreturn should
725 do it (XXX: use sigsetjmp) */
726 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
727 do_queue_exception_err(env
->exception_index
, env
->error_code
);
729 /* activate soft MMU for this block */
730 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
733 /* never comes here */
737 #error unsupported target CPU
740 #if defined(__i386__)
742 #if defined(USE_CODE_COPY)
743 static void cpu_send_trap(unsigned long pc
, int trap
,
746 TranslationBlock
*tb
;
749 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
750 /* now we have a real cpu fault */
753 /* the PC is inside the translated code. It means that we have
754 a virtual CPU fault */
755 cpu_restore_state(tb
, env
, pc
, uc
);
757 sigprocmask(SIG_SETMASK
, &uc
->uc_sigmask
, NULL
);
758 raise_exception_err(trap
, env
->error_code
);
762 int cpu_signal_handler(int host_signum
, struct siginfo
*info
,
765 struct ucontext
*uc
= puc
;
773 #define REG_TRAPNO TRAPNO
775 pc
= uc
->uc_mcontext
.gregs
[REG_EIP
];
776 trapno
= uc
->uc_mcontext
.gregs
[REG_TRAPNO
];
777 #if defined(TARGET_I386) && defined(USE_CODE_COPY)
778 if (trapno
== 0x00 || trapno
== 0x05) {
779 /* send division by zero or bound exception */
780 cpu_send_trap(pc
, trapno
, uc
);
784 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
786 (uc
->uc_mcontext
.gregs
[REG_ERR
] >> 1) & 1 : 0,
787 &uc
->uc_sigmask
, puc
);
790 #elif defined(__x86_64__)
792 int cpu_signal_handler(int host_signum
, struct siginfo
*info
,
795 struct ucontext
*uc
= puc
;
798 pc
= uc
->uc_mcontext
.gregs
[REG_RIP
];
799 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
800 uc
->uc_mcontext
.gregs
[REG_TRAPNO
] == 0xe ?
801 (uc
->uc_mcontext
.gregs
[REG_ERR
] >> 1) & 1 : 0,
802 &uc
->uc_sigmask
, puc
);
805 #elif defined(__powerpc)
807 int cpu_signal_handler(int host_signum
, struct siginfo
*info
,
810 struct ucontext
*uc
= puc
;
811 struct pt_regs
*regs
= uc
->uc_mcontext
.regs
;
819 if (regs
->dsisr
& 0x00800000)
822 if (regs
->trap
!= 0x400 && (regs
->dsisr
& 0x02000000))
825 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
826 is_write
, &uc
->uc_sigmask
, puc
);
829 #elif defined(__alpha__)
831 int cpu_signal_handler(int host_signum
, struct siginfo
*info
,
834 struct ucontext
*uc
= puc
;
835 uint32_t *pc
= uc
->uc_mcontext
.sc_pc
;
839 /* XXX: need kernel patch to get write flag faster */
840 switch (insn
>> 26) {
855 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
856 is_write
, &uc
->uc_sigmask
, puc
);
858 #elif defined(__sparc__)
860 int cpu_signal_handler(int host_signum
, struct siginfo
*info
,
863 uint32_t *regs
= (uint32_t *)(info
+ 1);
864 void *sigmask
= (regs
+ 20);
869 /* XXX: is there a standard glibc define ? */
871 /* XXX: need kernel patch to get write flag faster */
873 insn
= *(uint32_t *)pc
;
874 if ((insn
>> 30) == 3) {
875 switch((insn
>> 19) & 0x3f) {
887 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
888 is_write
, sigmask
, NULL
);
891 #elif defined(__arm__)
893 int cpu_signal_handler(int host_signum
, struct siginfo
*info
,
896 struct ucontext
*uc
= puc
;
900 pc
= uc
->uc_mcontext
.gregs
[R15
];
901 /* XXX: compute is_write */
903 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
908 #elif defined(__mc68000)
910 int cpu_signal_handler(int host_signum
, struct siginfo
*info
,
913 struct ucontext
*uc
= puc
;
917 pc
= uc
->uc_mcontext
.gregs
[16];
918 /* XXX: compute is_write */
920 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
922 &uc
->uc_sigmask
, puc
);
927 #error host CPU specific signal handler needed
931 #endif /* !defined(CONFIG_SOFTMMU) */