]>
git.proxmox.com Git - mirror_qemu.git/blob - cpu-exec.c
3bfa0bc1f2d92cc3f188aab825469b45051a0286
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #if !defined(CONFIG_SOFTMMU)
35 #include <sys/ucontext.h>
38 int tb_invalidated_flag
;
41 //#define DEBUG_SIGNAL
43 void cpu_loop_exit(void)
45 /* NOTE: the register at this point must be saved by hand because
46 longjmp restore them */
48 longjmp(env
->jmp_env
, 1);
51 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
55 /* exit the current TB from a signal handler. The host registers are
56 restored in a state compatible with the CPU emulator
58 void cpu_resume_from_signal(CPUState
*env1
, void *puc
)
60 #if !defined(CONFIG_SOFTMMU)
61 struct ucontext
*uc
= puc
;
66 /* XXX: restore cpu registers saved in host registers */
68 #if !defined(CONFIG_SOFTMMU)
70 /* XXX: use siglongjmp ? */
71 sigprocmask(SIG_SETMASK
, &uc
->uc_sigmask
, NULL
);
74 longjmp(env
->jmp_env
, 1);
78 static TranslationBlock
*tb_find_slow(target_ulong pc
,
82 TranslationBlock
*tb
, **ptb1
;
85 target_ulong phys_pc
, phys_page1
, phys_page2
, virt_page2
;
90 tb_invalidated_flag
= 0;
92 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
94 /* find translated block using physical mappings */
95 phys_pc
= get_phys_addr_code(env
, pc
);
96 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
98 h
= tb_phys_hash_func(phys_pc
);
99 ptb1
= &tb_phys_hash
[h
];
105 tb
->page_addr
[0] == phys_page1
&&
106 tb
->cs_base
== cs_base
&&
107 tb
->flags
== flags
) {
108 /* check next page if needed */
109 if (tb
->page_addr
[1] != -1) {
110 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
112 phys_page2
= get_phys_addr_code(env
, virt_page2
);
113 if (tb
->page_addr
[1] == phys_page2
)
119 ptb1
= &tb
->phys_hash_next
;
122 /* if no translated code available, then translate it now */
125 /* flush must be done */
127 /* cannot fail at this point */
129 /* don't forget to invalidate previous TB info */
130 tb_invalidated_flag
= 1;
132 tc_ptr
= code_gen_ptr
;
134 tb
->cs_base
= cs_base
;
136 cpu_gen_code(env
, tb
, CODE_GEN_MAX_SIZE
, &code_gen_size
);
137 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
139 /* check next page if needed */
140 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
142 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
143 phys_page2
= get_phys_addr_code(env
, virt_page2
);
145 tb_link_phys(tb
, phys_pc
, phys_page2
);
148 /* we add the TB in the virtual pc hash table */
149 env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
150 spin_unlock(&tb_lock
);
154 static inline TranslationBlock
*tb_find_fast(void)
156 TranslationBlock
*tb
;
157 target_ulong cs_base
, pc
;
160 /* we record a subset of the CPU state. It will
161 always be the same before a given translated block
163 #if defined(TARGET_I386)
165 flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
166 flags
|= env
->intercept
;
167 cs_base
= env
->segs
[R_CS
].base
;
168 pc
= cs_base
+ env
->eip
;
169 #elif defined(TARGET_ARM)
170 flags
= env
->thumb
| (env
->vfp
.vec_len
<< 1)
171 | (env
->vfp
.vec_stride
<< 4);
172 if ((env
->uncached_cpsr
& CPSR_M
) != ARM_CPU_MODE_USR
)
174 if (env
->vfp
.xregs
[ARM_VFP_FPEXC
] & (1 << 30))
178 #elif defined(TARGET_SPARC)
179 #ifdef TARGET_SPARC64
180 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
181 flags
= (((env
->pstate
& PS_PEF
) >> 1) | ((env
->fprs
& FPRS_FEF
) << 2))
182 | (env
->pstate
& PS_PRIV
) | ((env
->lsu
& (DMMU_E
| IMMU_E
)) >> 2);
184 // FPU enable . Supervisor
185 flags
= (env
->psref
<< 4) | env
->psrs
;
189 #elif defined(TARGET_PPC)
193 #elif defined(TARGET_MIPS)
194 flags
= env
->hflags
& (MIPS_HFLAG_TMASK
| MIPS_HFLAG_BMASK
);
196 pc
= env
->PC
[env
->current_tc
];
197 #elif defined(TARGET_M68K)
198 flags
= (env
->fpcr
& M68K_FPCR_PREC
) /* Bit 6 */
199 | (env
->sr
& SR_S
) /* Bit 13 */
200 | ((env
->macsr
>> 4) & 0xf); /* Bits 0-3 */
203 #elif defined(TARGET_SH4)
204 flags
= env
->sr
& (SR_MD
| SR_RB
);
205 cs_base
= 0; /* XXXXX */
207 #elif defined(TARGET_ALPHA)
211 #elif defined(TARGET_CRIS)
216 #error unsupported CPU
218 tb
= env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
219 if (__builtin_expect(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
220 tb
->flags
!= flags
, 0)) {
221 tb
= tb_find_slow(pc
, cs_base
, flags
);
222 /* Note: we do it here to avoid a gcc bug on Mac OS X when
223 doing it in tb_find_slow */
224 if (tb_invalidated_flag
) {
225 /* as some TB could have been invalidated because
226 of memory exceptions while generating the code, we
227 must recompute the hash index here */
235 /* main execution loop */
237 int cpu_exec(CPUState
*env1
)
239 #define DECLARE_HOST_REGS 1
240 #include "hostregs_helper.h"
241 #if defined(TARGET_SPARC)
242 #if defined(reg_REGWPTR)
243 uint32_t *saved_regwptr
;
246 #if defined(__sparc__) && !defined(HOST_SOLARIS)
250 int ret
, interrupt_request
;
251 void (*gen_func
)(void);
252 TranslationBlock
*tb
;
255 if (cpu_halted(env1
) == EXCP_HALTED
)
258 cpu_single_env
= env1
;
260 /* first we save global registers */
261 #define SAVE_HOST_REGS 1
262 #include "hostregs_helper.h"
264 #if defined(__sparc__) && !defined(HOST_SOLARIS)
265 /* we also save i7 because longjmp may not restore it */
266 asm volatile ("mov %%i7, %0" : "=r" (saved_i7
));
270 #if defined(TARGET_I386)
271 /* put eflags in CPU temporary format */
272 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
273 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
274 CC_OP
= CC_OP_EFLAGS
;
275 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
276 #elif defined(TARGET_SPARC)
277 #if defined(reg_REGWPTR)
278 saved_regwptr
= REGWPTR
;
280 #elif defined(TARGET_M68K)
281 env
->cc_op
= CC_OP_FLAGS
;
282 env
->cc_dest
= env
->sr
& 0xf;
283 env
->cc_x
= (env
->sr
>> 4) & 1;
284 #elif defined(TARGET_ALPHA)
285 #elif defined(TARGET_ARM)
286 #elif defined(TARGET_PPC)
287 #elif defined(TARGET_MIPS)
288 #elif defined(TARGET_SH4)
289 #elif defined(TARGET_CRIS)
292 #error unsupported target CPU
294 env
->exception_index
= -1;
296 /* prepare setjmp context for exception handling */
298 if (setjmp(env
->jmp_env
) == 0) {
299 env
->current_tb
= NULL
;
300 /* if an exception is pending, we execute it here */
301 if (env
->exception_index
>= 0) {
302 if (env
->exception_index
>= EXCP_INTERRUPT
) {
303 /* exit request from the cpu execution loop */
304 ret
= env
->exception_index
;
306 } else if (env
->user_mode_only
) {
307 /* if user mode only, we simulate a fake exception
308 which will be handled outside the cpu execution
310 #if defined(TARGET_I386)
311 do_interrupt_user(env
->exception_index
,
312 env
->exception_is_int
,
314 env
->exception_next_eip
);
316 ret
= env
->exception_index
;
319 #if defined(TARGET_I386)
320 /* simulate a real cpu exception. On i386, it can
321 trigger new exceptions, but we do not handle
322 double or triple faults yet. */
323 do_interrupt(env
->exception_index
,
324 env
->exception_is_int
,
326 env
->exception_next_eip
, 0);
327 /* successfully delivered */
328 env
->old_exception
= -1;
329 #elif defined(TARGET_PPC)
331 #elif defined(TARGET_MIPS)
333 #elif defined(TARGET_SPARC)
334 do_interrupt(env
->exception_index
);
335 #elif defined(TARGET_ARM)
337 #elif defined(TARGET_SH4)
339 #elif defined(TARGET_ALPHA)
341 #elif defined(TARGET_CRIS)
343 #elif defined(TARGET_M68K)
347 env
->exception_index
= -1;
350 if (kqemu_is_ok(env
) && env
->interrupt_request
== 0) {
352 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
353 ret
= kqemu_cpu_exec(env
);
354 /* put eflags in CPU temporary format */
355 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
356 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
357 CC_OP
= CC_OP_EFLAGS
;
358 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
361 longjmp(env
->jmp_env
, 1);
362 } else if (ret
== 2) {
363 /* softmmu execution needed */
365 if (env
->interrupt_request
!= 0) {
366 /* hardware interrupt will be executed just after */
368 /* otherwise, we restart */
369 longjmp(env
->jmp_env
, 1);
375 T0
= 0; /* force lookup of first TB */
377 #if defined(__sparc__) && !defined(HOST_SOLARIS)
378 /* g1 can be modified by some libc? functions */
381 interrupt_request
= env
->interrupt_request
;
382 if (__builtin_expect(interrupt_request
, 0)
383 #if defined(TARGET_I386)
384 && env
->hflags
& HF_GIF_MASK
387 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
388 env
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
389 env
->exception_index
= EXCP_DEBUG
;
392 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
393 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
394 if (interrupt_request
& CPU_INTERRUPT_HALT
) {
395 env
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
397 env
->exception_index
= EXCP_HLT
;
401 #if defined(TARGET_I386)
402 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
403 !(env
->hflags
& HF_SMM_MASK
)) {
404 svm_check_intercept(SVM_EXIT_SMI
);
405 env
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
407 #if defined(__sparc__) && !defined(HOST_SOLARIS)
412 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
413 (env
->eflags
& IF_MASK
|| env
->hflags
& HF_HIF_MASK
) &&
414 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
416 svm_check_intercept(SVM_EXIT_INTR
);
417 env
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
| CPU_INTERRUPT_VIRQ
);
418 intno
= cpu_get_pic_interrupt(env
);
419 if (loglevel
& CPU_LOG_TB_IN_ASM
) {
420 fprintf(logfile
, "Servicing hardware INT=0x%02x\n", intno
);
422 do_interrupt(intno
, 0, 0, 0, 1);
423 /* ensure that no TB jump will be modified as
424 the program flow was changed */
425 #if defined(__sparc__) && !defined(HOST_SOLARIS)
430 #if !defined(CONFIG_USER_ONLY)
431 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
432 (env
->eflags
& IF_MASK
) && !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
434 /* FIXME: this should respect TPR */
435 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
436 svm_check_intercept(SVM_EXIT_VINTR
);
437 intno
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_vector
));
438 if (loglevel
& CPU_LOG_TB_IN_ASM
)
439 fprintf(logfile
, "Servicing virtual hardware INT=0x%02x\n", intno
);
440 do_interrupt(intno
, 0, 0, -1, 1);
441 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
),
442 ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
)) & ~V_IRQ_MASK
);
443 #if defined(__sparc__) && !defined(HOST_SOLARIS)
450 #elif defined(TARGET_PPC)
452 if ((interrupt_request
& CPU_INTERRUPT_RESET
)) {
456 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
457 ppc_hw_interrupt(env
);
458 if (env
->pending_interrupts
== 0)
459 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
460 #if defined(__sparc__) && !defined(HOST_SOLARIS)
466 #elif defined(TARGET_MIPS)
467 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
468 (env
->CP0_Status
& env
->CP0_Cause
& CP0Ca_IP_mask
) &&
469 (env
->CP0_Status
& (1 << CP0St_IE
)) &&
470 !(env
->CP0_Status
& (1 << CP0St_EXL
)) &&
471 !(env
->CP0_Status
& (1 << CP0St_ERL
)) &&
472 !(env
->hflags
& MIPS_HFLAG_DM
)) {
474 env
->exception_index
= EXCP_EXT_INTERRUPT
;
477 #if defined(__sparc__) && !defined(HOST_SOLARIS)
483 #elif defined(TARGET_SPARC)
484 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
486 int pil
= env
->interrupt_index
& 15;
487 int type
= env
->interrupt_index
& 0xf0;
489 if (((type
== TT_EXTINT
) &&
490 (pil
== 15 || pil
> env
->psrpil
)) ||
492 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
493 do_interrupt(env
->interrupt_index
);
494 env
->interrupt_index
= 0;
495 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
498 #if defined(__sparc__) && !defined(HOST_SOLARIS)
504 } else if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
505 //do_interrupt(0, 0, 0, 0, 0);
506 env
->interrupt_request
&= ~CPU_INTERRUPT_TIMER
;
508 #elif defined(TARGET_ARM)
509 if (interrupt_request
& CPU_INTERRUPT_FIQ
510 && !(env
->uncached_cpsr
& CPSR_F
)) {
511 env
->exception_index
= EXCP_FIQ
;
514 if (interrupt_request
& CPU_INTERRUPT_HARD
515 && !(env
->uncached_cpsr
& CPSR_I
)) {
516 env
->exception_index
= EXCP_IRQ
;
519 #elif defined(TARGET_SH4)
521 #elif defined(TARGET_ALPHA)
522 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
525 #elif defined(TARGET_CRIS)
526 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
528 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
530 #elif defined(TARGET_M68K)
531 if (interrupt_request
& CPU_INTERRUPT_HARD
532 && ((env
->sr
& SR_I
) >> SR_I_SHIFT
)
533 < env
->pending_level
) {
534 /* Real hardware gets the interrupt vector via an
535 IACK cycle at this point. Current emulated
536 hardware doesn't rely on this, so we
537 provide/save the vector when the interrupt is
539 env
->exception_index
= env
->pending_vector
;
543 /* Don't use the cached interupt_request value,
544 do_interrupt may have updated the EXITTB flag. */
545 if (env
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
546 env
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
547 /* ensure that no TB jump will be modified as
548 the program flow was changed */
549 #if defined(__sparc__) && !defined(HOST_SOLARIS)
555 if (interrupt_request
& CPU_INTERRUPT_EXIT
) {
556 env
->interrupt_request
&= ~CPU_INTERRUPT_EXIT
;
557 env
->exception_index
= EXCP_INTERRUPT
;
562 if ((loglevel
& CPU_LOG_TB_CPU
)) {
563 /* restore flags in standard format */
565 #if defined(TARGET_I386)
566 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
567 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
568 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
569 #elif defined(TARGET_ARM)
570 cpu_dump_state(env
, logfile
, fprintf
, 0);
571 #elif defined(TARGET_SPARC)
572 REGWPTR
= env
->regbase
+ (env
->cwp
* 16);
573 env
->regwptr
= REGWPTR
;
574 cpu_dump_state(env
, logfile
, fprintf
, 0);
575 #elif defined(TARGET_PPC)
576 cpu_dump_state(env
, logfile
, fprintf
, 0);
577 #elif defined(TARGET_M68K)
578 cpu_m68k_flush_flags(env
, env
->cc_op
);
579 env
->cc_op
= CC_OP_FLAGS
;
580 env
->sr
= (env
->sr
& 0xffe0)
581 | env
->cc_dest
| (env
->cc_x
<< 4);
582 cpu_dump_state(env
, logfile
, fprintf
, 0);
583 #elif defined(TARGET_MIPS)
584 cpu_dump_state(env
, logfile
, fprintf
, 0);
585 #elif defined(TARGET_SH4)
586 cpu_dump_state(env
, logfile
, fprintf
, 0);
587 #elif defined(TARGET_ALPHA)
588 cpu_dump_state(env
, logfile
, fprintf
, 0);
589 #elif defined(TARGET_CRIS)
590 cpu_dump_state(env
, logfile
, fprintf
, 0);
592 #error unsupported target CPU
598 if ((loglevel
& CPU_LOG_EXEC
)) {
599 fprintf(logfile
, "Trace 0x%08lx [" TARGET_FMT_lx
"] %s\n",
600 (long)tb
->tc_ptr
, tb
->pc
,
601 lookup_symbol(tb
->pc
));
604 #if defined(__sparc__) && !defined(HOST_SOLARIS)
607 /* see if we can patch the calling TB. When the TB
608 spans two pages, we cannot safely do a direct
613 (env
->kqemu_enabled
!= 2) &&
615 tb
->page_addr
[1] == -1
616 #if defined(TARGET_I386) && defined(USE_CODE_COPY)
617 && (tb
->cflags
& CF_CODE_COPY
) ==
618 (((TranslationBlock
*)(T0
& ~3))->cflags
& CF_CODE_COPY
)
622 tb_add_jump((TranslationBlock
*)(long)(T0
& ~3), T0
& 3, tb
);
623 #if defined(USE_CODE_COPY)
624 /* propagates the FP use info */
625 ((TranslationBlock
*)(T0
& ~3))->cflags
|=
626 (tb
->cflags
& CF_FP_USED
);
628 spin_unlock(&tb_lock
);
632 env
->current_tb
= tb
;
633 /* execute the generated code */
634 gen_func
= (void *)tc_ptr
;
635 #if defined(__sparc__)
636 __asm__
__volatile__("call %0\n\t"
640 : "i0", "i1", "i2", "i3", "i4", "i5",
641 "o0", "o1", "o2", "o3", "o4", "o5",
642 "l0", "l1", "l2", "l3", "l4", "l5",
644 #elif defined(__arm__)
645 asm volatile ("mov pc, %0\n\t"
646 ".global exec_loop\n\t"
650 : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
651 #elif defined(TARGET_I386) && defined(USE_CODE_COPY)
653 if (!(tb
->cflags
& CF_CODE_COPY
)) {
654 if ((tb
->cflags
& CF_FP_USED
) && env
->native_fp_regs
) {
655 save_native_fp_state(env
);
659 if ((tb
->cflags
& CF_FP_USED
) && !env
->native_fp_regs
) {
660 restore_native_fp_state(env
);
662 /* we work with native eflags */
663 CC_SRC
= cc_table
[CC_OP
].compute_all();
664 CC_OP
= CC_OP_EFLAGS
;
665 asm(".globl exec_loop\n"
670 " fs movl %11, %%eax\n"
671 " andl $0x400, %%eax\n"
672 " fs orl %8, %%eax\n"
675 " fs movl %%esp, %12\n"
676 " fs movl %0, %%eax\n"
677 " fs movl %1, %%ecx\n"
678 " fs movl %2, %%edx\n"
679 " fs movl %3, %%ebx\n"
680 " fs movl %4, %%esp\n"
681 " fs movl %5, %%ebp\n"
682 " fs movl %6, %%esi\n"
683 " fs movl %7, %%edi\n"
686 " fs movl %%esp, %4\n"
687 " fs movl %12, %%esp\n"
688 " fs movl %%eax, %0\n"
689 " fs movl %%ecx, %1\n"
690 " fs movl %%edx, %2\n"
691 " fs movl %%ebx, %3\n"
692 " fs movl %%ebp, %5\n"
693 " fs movl %%esi, %6\n"
694 " fs movl %%edi, %7\n"
697 " movl %%eax, %%ecx\n"
698 " andl $0x400, %%ecx\n"
700 " andl $0x8d5, %%eax\n"
701 " fs movl %%eax, %8\n"
703 " subl %%ecx, %%eax\n"
704 " fs movl %%eax, %11\n"
705 " fs movl %9, %%ebx\n" /* get T0 value */
708 : "m" (*(uint8_t *)offsetof(CPUState
, regs
[0])),
709 "m" (*(uint8_t *)offsetof(CPUState
, regs
[1])),
710 "m" (*(uint8_t *)offsetof(CPUState
, regs
[2])),
711 "m" (*(uint8_t *)offsetof(CPUState
, regs
[3])),
712 "m" (*(uint8_t *)offsetof(CPUState
, regs
[4])),
713 "m" (*(uint8_t *)offsetof(CPUState
, regs
[5])),
714 "m" (*(uint8_t *)offsetof(CPUState
, regs
[6])),
715 "m" (*(uint8_t *)offsetof(CPUState
, regs
[7])),
716 "m" (*(uint8_t *)offsetof(CPUState
, cc_src
)),
717 "m" (*(uint8_t *)offsetof(CPUState
, tmp0
)),
719 "m" (*(uint8_t *)offsetof(CPUState
, df
)),
720 "m" (*(uint8_t *)offsetof(CPUState
, saved_esp
))
725 #elif defined(__ia64)
732 fp
.gp
= code_gen_buffer
+ 2 * (1 << 20);
733 (*(void (*)(void)) &fp
)();
737 env
->current_tb
= NULL
;
738 /* reset soft MMU for next block (it can currently
739 only be set by a memory fault) */
740 #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
741 if (env
->hflags
& HF_SOFTMMU_MASK
) {
742 env
->hflags
&= ~HF_SOFTMMU_MASK
;
743 /* do not allow linking to another block */
747 #if defined(USE_KQEMU)
748 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
749 if (kqemu_is_ok(env
) &&
750 (cpu_get_time_fast() - env
->last_io_time
) >= MIN_CYCLE_BEFORE_SWITCH
) {
761 #if defined(TARGET_I386)
762 #if defined(USE_CODE_COPY)
763 if (env
->native_fp_regs
) {
764 save_native_fp_state(env
);
767 /* restore flags in standard format */
768 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
769 #elif defined(TARGET_ARM)
770 /* XXX: Save/restore host fpu exception state?. */
771 #elif defined(TARGET_SPARC)
772 #if defined(reg_REGWPTR)
773 REGWPTR
= saved_regwptr
;
775 #elif defined(TARGET_PPC)
776 #elif defined(TARGET_M68K)
777 cpu_m68k_flush_flags(env
, env
->cc_op
);
778 env
->cc_op
= CC_OP_FLAGS
;
779 env
->sr
= (env
->sr
& 0xffe0)
780 | env
->cc_dest
| (env
->cc_x
<< 4);
781 #elif defined(TARGET_MIPS)
782 #elif defined(TARGET_SH4)
783 #elif defined(TARGET_ALPHA)
784 #elif defined(TARGET_CRIS)
787 #error unsupported target CPU
790 /* restore global registers */
791 #if defined(__sparc__) && !defined(HOST_SOLARIS)
792 asm volatile ("mov %0, %%i7" : : "r" (saved_i7
));
794 #include "hostregs_helper.h"
796 /* fail safe : never use cpu_single_env outside cpu_exec() */
797 cpu_single_env
= NULL
;
801 /* must only be called from the generated code as an exception can be
803 void tb_invalidate_page_range(target_ulong start
, target_ulong end
)
805 /* XXX: cannot enable it yet because it yields to MMU exception
806 where NIP != read address on PowerPC */
808 target_ulong phys_addr
;
809 phys_addr
= get_phys_addr_code(env
, start
);
810 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ end
- start
, 0);
814 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
816 void cpu_x86_load_seg(CPUX86State
*s
, int seg_reg
, int selector
)
818 CPUX86State
*saved_env
;
822 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
824 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
825 (selector
<< 4), 0xffff, 0);
827 load_seg(seg_reg
, selector
);
832 void cpu_x86_fsave(CPUX86State
*s
, uint8_t *ptr
, int data32
)
834 CPUX86State
*saved_env
;
839 helper_fsave((target_ulong
)ptr
, data32
);
844 void cpu_x86_frstor(CPUX86State
*s
, uint8_t *ptr
, int data32
)
846 CPUX86State
*saved_env
;
851 helper_frstor((target_ulong
)ptr
, data32
);
856 #endif /* TARGET_I386 */
858 #if !defined(CONFIG_SOFTMMU)
860 #if defined(TARGET_I386)
862 /* 'pc' is the host PC at which the exception was raised. 'address' is
863 the effective address of the memory exception. 'is_write' is 1 if a
864 write caused the exception and otherwise 0'. 'old_set' is the
865 signal set which should be restored */
866 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
867 int is_write
, sigset_t
*old_set
,
870 TranslationBlock
*tb
;
874 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
875 #if defined(DEBUG_SIGNAL)
876 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
877 pc
, address
, is_write
, *(unsigned long *)old_set
);
879 /* XXX: locking issue */
880 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
884 /* see if it is an MMU fault */
885 ret
= cpu_x86_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
887 return 0; /* not an MMU fault */
889 return 1; /* the MMU fault was handled without causing real CPU fault */
890 /* now we have a real cpu fault */
893 /* the PC is inside the translated code. It means that we have
894 a virtual CPU fault */
895 cpu_restore_state(tb
, env
, pc
, puc
);
899 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
900 env
->eip
, env
->cr
[2], env
->error_code
);
902 /* we restore the process signal mask as the sigreturn should
903 do it (XXX: use sigsetjmp) */
904 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
905 raise_exception_err(env
->exception_index
, env
->error_code
);
907 /* activate soft MMU for this block */
908 env
->hflags
|= HF_SOFTMMU_MASK
;
909 cpu_resume_from_signal(env
, puc
);
911 /* never comes here */
915 #elif defined(TARGET_ARM)
916 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
917 int is_write
, sigset_t
*old_set
,
920 TranslationBlock
*tb
;
924 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
925 #if defined(DEBUG_SIGNAL)
926 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
927 pc
, address
, is_write
, *(unsigned long *)old_set
);
929 /* XXX: locking issue */
930 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
933 /* see if it is an MMU fault */
934 ret
= cpu_arm_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
936 return 0; /* not an MMU fault */
938 return 1; /* the MMU fault was handled without causing real CPU fault */
939 /* now we have a real cpu fault */
942 /* the PC is inside the translated code. It means that we have
943 a virtual CPU fault */
944 cpu_restore_state(tb
, env
, pc
, puc
);
946 /* we restore the process signal mask as the sigreturn should
947 do it (XXX: use sigsetjmp) */
948 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
951 #elif defined(TARGET_SPARC)
952 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
953 int is_write
, sigset_t
*old_set
,
956 TranslationBlock
*tb
;
960 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
961 #if defined(DEBUG_SIGNAL)
962 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
963 pc
, address
, is_write
, *(unsigned long *)old_set
);
965 /* XXX: locking issue */
966 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
969 /* see if it is an MMU fault */
970 ret
= cpu_sparc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
972 return 0; /* not an MMU fault */
974 return 1; /* the MMU fault was handled without causing real CPU fault */
975 /* now we have a real cpu fault */
978 /* the PC is inside the translated code. It means that we have
979 a virtual CPU fault */
980 cpu_restore_state(tb
, env
, pc
, puc
);
982 /* we restore the process signal mask as the sigreturn should
983 do it (XXX: use sigsetjmp) */
984 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
987 #elif defined (TARGET_PPC)
988 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
989 int is_write
, sigset_t
*old_set
,
992 TranslationBlock
*tb
;
996 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
997 #if defined(DEBUG_SIGNAL)
998 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
999 pc
, address
, is_write
, *(unsigned long *)old_set
);
1001 /* XXX: locking issue */
1002 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1006 /* see if it is an MMU fault */
1007 ret
= cpu_ppc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1009 return 0; /* not an MMU fault */
1011 return 1; /* the MMU fault was handled without causing real CPU fault */
1013 /* now we have a real cpu fault */
1014 tb
= tb_find_pc(pc
);
1016 /* the PC is inside the translated code. It means that we have
1017 a virtual CPU fault */
1018 cpu_restore_state(tb
, env
, pc
, puc
);
1022 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1023 env
->nip
, env
->error_code
, tb
);
1025 /* we restore the process signal mask as the sigreturn should
1026 do it (XXX: use sigsetjmp) */
1027 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1028 do_raise_exception_err(env
->exception_index
, env
->error_code
);
1030 /* activate soft MMU for this block */
1031 cpu_resume_from_signal(env
, puc
);
1033 /* never comes here */
1037 #elif defined(TARGET_M68K)
1038 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1039 int is_write
, sigset_t
*old_set
,
1042 TranslationBlock
*tb
;
1046 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1047 #if defined(DEBUG_SIGNAL)
1048 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1049 pc
, address
, is_write
, *(unsigned long *)old_set
);
1051 /* XXX: locking issue */
1052 if (is_write
&& page_unprotect(address
, pc
, puc
)) {
1055 /* see if it is an MMU fault */
1056 ret
= cpu_m68k_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1058 return 0; /* not an MMU fault */
1060 return 1; /* the MMU fault was handled without causing real CPU fault */
1061 /* now we have a real cpu fault */
1062 tb
= tb_find_pc(pc
);
1064 /* the PC is inside the translated code. It means that we have
1065 a virtual CPU fault */
1066 cpu_restore_state(tb
, env
, pc
, puc
);
1068 /* we restore the process signal mask as the sigreturn should
1069 do it (XXX: use sigsetjmp) */
1070 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1072 /* never comes here */
1076 #elif defined (TARGET_MIPS)
1077 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1078 int is_write
, sigset_t
*old_set
,
1081 TranslationBlock
*tb
;
1085 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1086 #if defined(DEBUG_SIGNAL)
1087 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1088 pc
, address
, is_write
, *(unsigned long *)old_set
);
1090 /* XXX: locking issue */
1091 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1095 /* see if it is an MMU fault */
1096 ret
= cpu_mips_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1098 return 0; /* not an MMU fault */
1100 return 1; /* the MMU fault was handled without causing real CPU fault */
1102 /* now we have a real cpu fault */
1103 tb
= tb_find_pc(pc
);
1105 /* the PC is inside the translated code. It means that we have
1106 a virtual CPU fault */
1107 cpu_restore_state(tb
, env
, pc
, puc
);
1111 printf("PF exception: PC=0x" TARGET_FMT_lx
" error=0x%x %p\n",
1112 env
->PC
, env
->error_code
, tb
);
1114 /* we restore the process signal mask as the sigreturn should
1115 do it (XXX: use sigsetjmp) */
1116 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1117 do_raise_exception_err(env
->exception_index
, env
->error_code
);
1119 /* activate soft MMU for this block */
1120 cpu_resume_from_signal(env
, puc
);
1122 /* never comes here */
1126 #elif defined (TARGET_SH4)
1127 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1128 int is_write
, sigset_t
*old_set
,
1131 TranslationBlock
*tb
;
1135 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1136 #if defined(DEBUG_SIGNAL)
1137 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1138 pc
, address
, is_write
, *(unsigned long *)old_set
);
1140 /* XXX: locking issue */
1141 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1145 /* see if it is an MMU fault */
1146 ret
= cpu_sh4_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1148 return 0; /* not an MMU fault */
1150 return 1; /* the MMU fault was handled without causing real CPU fault */
1152 /* now we have a real cpu fault */
1153 tb
= tb_find_pc(pc
);
1155 /* the PC is inside the translated code. It means that we have
1156 a virtual CPU fault */
1157 cpu_restore_state(tb
, env
, pc
, puc
);
1160 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1161 env
->nip
, env
->error_code
, tb
);
1163 /* we restore the process signal mask as the sigreturn should
1164 do it (XXX: use sigsetjmp) */
1165 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1167 /* never comes here */
1171 #elif defined (TARGET_ALPHA)
1172 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1173 int is_write
, sigset_t
*old_set
,
1176 TranslationBlock
*tb
;
1180 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1181 #if defined(DEBUG_SIGNAL)
1182 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1183 pc
, address
, is_write
, *(unsigned long *)old_set
);
1185 /* XXX: locking issue */
1186 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1190 /* see if it is an MMU fault */
1191 ret
= cpu_alpha_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1193 return 0; /* not an MMU fault */
1195 return 1; /* the MMU fault was handled without causing real CPU fault */
1197 /* now we have a real cpu fault */
1198 tb
= tb_find_pc(pc
);
1200 /* the PC is inside the translated code. It means that we have
1201 a virtual CPU fault */
1202 cpu_restore_state(tb
, env
, pc
, puc
);
1205 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1206 env
->nip
, env
->error_code
, tb
);
1208 /* we restore the process signal mask as the sigreturn should
1209 do it (XXX: use sigsetjmp) */
1210 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1212 /* never comes here */
1215 #elif defined (TARGET_CRIS)
1216 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1217 int is_write
, sigset_t
*old_set
,
1220 TranslationBlock
*tb
;
1224 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1225 #if defined(DEBUG_SIGNAL)
1226 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1227 pc
, address
, is_write
, *(unsigned long *)old_set
);
1229 /* XXX: locking issue */
1230 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1234 /* see if it is an MMU fault */
1235 ret
= cpu_cris_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1237 return 0; /* not an MMU fault */
1239 return 1; /* the MMU fault was handled without causing real CPU fault */
1241 /* now we have a real cpu fault */
1242 tb
= tb_find_pc(pc
);
1244 /* the PC is inside the translated code. It means that we have
1245 a virtual CPU fault */
1246 cpu_restore_state(tb
, env
, pc
, puc
);
1249 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1250 env
->nip
, env
->error_code
, tb
);
1252 /* we restore the process signal mask as the sigreturn should
1253 do it (XXX: use sigsetjmp) */
1254 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1256 /* never comes here */
1261 #error unsupported target CPU
1264 #if defined(__i386__)
1266 #if defined(__APPLE__)
1267 # include <sys/ucontext.h>
1269 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1270 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1271 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1273 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1274 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1275 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1278 #if defined(USE_CODE_COPY)
1279 static void cpu_send_trap(unsigned long pc
, int trap
,
1280 struct ucontext
*uc
)
1282 TranslationBlock
*tb
;
1285 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1286 /* now we have a real cpu fault */
1287 tb
= tb_find_pc(pc
);
1289 /* the PC is inside the translated code. It means that we have
1290 a virtual CPU fault */
1291 cpu_restore_state(tb
, env
, pc
, uc
);
1293 sigprocmask(SIG_SETMASK
, &uc
->uc_sigmask
, NULL
);
1294 raise_exception_err(trap
, env
->error_code
);
1298 int cpu_signal_handler(int host_signum
, void *pinfo
,
1301 siginfo_t
*info
= pinfo
;
1302 struct ucontext
*uc
= puc
;
1310 #define REG_TRAPNO TRAPNO
1313 trapno
= TRAP_sig(uc
);
1314 #if defined(TARGET_I386) && defined(USE_CODE_COPY)
1315 if (trapno
== 0x00 || trapno
== 0x05) {
1316 /* send division by zero or bound exception */
1317 cpu_send_trap(pc
, trapno
, uc
);
1321 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1323 (ERROR_sig(uc
) >> 1) & 1 : 0,
1324 &uc
->uc_sigmask
, puc
);
1327 #elif defined(__x86_64__)
1329 int cpu_signal_handler(int host_signum
, void *pinfo
,
1332 siginfo_t
*info
= pinfo
;
1333 struct ucontext
*uc
= puc
;
1336 pc
= uc
->uc_mcontext
.gregs
[REG_RIP
];
1337 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1338 uc
->uc_mcontext
.gregs
[REG_TRAPNO
] == 0xe ?
1339 (uc
->uc_mcontext
.gregs
[REG_ERR
] >> 1) & 1 : 0,
1340 &uc
->uc_sigmask
, puc
);
1343 #elif defined(__powerpc__)
1345 /***********************************************************************
1346 * signal context platform-specific definitions
1350 /* All Registers access - only for local access */
1351 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1352 /* Gpr Registers access */
1353 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1354 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1355 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1356 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1357 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1358 # define LR_sig(context) REG_sig(link, context) /* Link register */
1359 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1360 /* Float Registers access */
1361 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1362 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1363 /* Exception Registers access */
1364 # define DAR_sig(context) REG_sig(dar, context)
1365 # define DSISR_sig(context) REG_sig(dsisr, context)
1366 # define TRAP_sig(context) REG_sig(trap, context)
1370 # include <sys/ucontext.h>
1371 typedef struct ucontext SIGCONTEXT
;
1372 /* All Registers access - only for local access */
1373 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1374 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1375 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1376 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1377 /* Gpr Registers access */
1378 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1379 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1380 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1381 # define CTR_sig(context) REG_sig(ctr, context)
1382 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1383 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1384 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1385 /* Float Registers access */
1386 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1387 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1388 /* Exception Registers access */
1389 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1390 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1391 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1392 #endif /* __APPLE__ */
1394 int cpu_signal_handler(int host_signum
, void *pinfo
,
1397 siginfo_t
*info
= pinfo
;
1398 struct ucontext
*uc
= puc
;
1406 if (DSISR_sig(uc
) & 0x00800000)
1409 if (TRAP_sig(uc
) != 0x400 && (DSISR_sig(uc
) & 0x02000000))
1412 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1413 is_write
, &uc
->uc_sigmask
, puc
);
1416 #elif defined(__alpha__)
1418 int cpu_signal_handler(int host_signum
, void *pinfo
,
1421 siginfo_t
*info
= pinfo
;
1422 struct ucontext
*uc
= puc
;
1423 uint32_t *pc
= uc
->uc_mcontext
.sc_pc
;
1424 uint32_t insn
= *pc
;
1427 /* XXX: need kernel patch to get write flag faster */
1428 switch (insn
>> 26) {
1443 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1444 is_write
, &uc
->uc_sigmask
, puc
);
1446 #elif defined(__sparc__)
1448 int cpu_signal_handler(int host_signum
, void *pinfo
,
1451 siginfo_t
*info
= pinfo
;
1452 uint32_t *regs
= (uint32_t *)(info
+ 1);
1453 void *sigmask
= (regs
+ 20);
1458 /* XXX: is there a standard glibc define ? */
1460 /* XXX: need kernel patch to get write flag faster */
1462 insn
= *(uint32_t *)pc
;
1463 if ((insn
>> 30) == 3) {
1464 switch((insn
>> 19) & 0x3f) {
1476 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1477 is_write
, sigmask
, NULL
);
1480 #elif defined(__arm__)
1482 int cpu_signal_handler(int host_signum
, void *pinfo
,
1485 siginfo_t
*info
= pinfo
;
1486 struct ucontext
*uc
= puc
;
1490 pc
= uc
->uc_mcontext
.gregs
[R15
];
1491 /* XXX: compute is_write */
1493 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1495 &uc
->uc_sigmask
, puc
);
1498 #elif defined(__mc68000)
1500 int cpu_signal_handler(int host_signum
, void *pinfo
,
1503 siginfo_t
*info
= pinfo
;
1504 struct ucontext
*uc
= puc
;
1508 pc
= uc
->uc_mcontext
.gregs
[16];
1509 /* XXX: compute is_write */
1511 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1513 &uc
->uc_sigmask
, puc
);
1516 #elif defined(__ia64)
1519 /* This ought to be in <bits/siginfo.h>... */
1520 # define __ISR_VALID 1
1523 int cpu_signal_handler(int host_signum
, void *pinfo
, void *puc
)
1525 siginfo_t
*info
= pinfo
;
1526 struct ucontext
*uc
= puc
;
1530 ip
= uc
->uc_mcontext
.sc_ip
;
1531 switch (host_signum
) {
1537 if (info
->si_code
&& (info
->si_segvflags
& __ISR_VALID
))
1538 /* ISR.W (write-access) is bit 33: */
1539 is_write
= (info
->si_isr
>> 33) & 1;
1545 return handle_cpu_signal(ip
, (unsigned long)info
->si_addr
,
1547 &uc
->uc_sigmask
, puc
);
1550 #elif defined(__s390__)
1552 int cpu_signal_handler(int host_signum
, void *pinfo
,
1555 siginfo_t
*info
= pinfo
;
1556 struct ucontext
*uc
= puc
;
1560 pc
= uc
->uc_mcontext
.psw
.addr
;
1561 /* XXX: compute is_write */
1563 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1564 is_write
, &uc
->uc_sigmask
, puc
);
1567 #elif defined(__mips__)
1569 int cpu_signal_handler(int host_signum
, void *pinfo
,
1572 siginfo_t
*info
= pinfo
;
1573 struct ucontext
*uc
= puc
;
1574 greg_t pc
= uc
->uc_mcontext
.pc
;
1577 /* XXX: compute is_write */
1579 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1580 is_write
, &uc
->uc_sigmask
, puc
);
1585 #error host CPU specific signal handler needed
1589 #endif /* !defined(CONFIG_SOFTMMU) */