]>
git.proxmox.com Git - qemu.git/blob - cpu-exec.c
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #if !defined(CONFIG_SOFTMMU)
35 #include <sys/ucontext.h>
38 int tb_invalidated_flag
;
41 //#define DEBUG_SIGNAL
43 #if defined(TARGET_ARM) || defined(TARGET_SPARC)
44 /* XXX: unify with i386 target */
45 void cpu_loop_exit(void)
47 longjmp(env
->jmp_env
, 1);
54 /* exit the current TB from a signal handler. The host registers are
55 restored in a state compatible with the CPU emulator
57 void cpu_resume_from_signal(CPUState
*env1
, void *puc
)
59 #if !defined(CONFIG_SOFTMMU)
60 struct ucontext
*uc
= puc
;
65 /* XXX: restore cpu registers saved in host registers */
67 #if !defined(CONFIG_SOFTMMU)
69 /* XXX: use siglongjmp ? */
70 sigprocmask(SIG_SETMASK
, &uc
->uc_sigmask
, NULL
);
73 longjmp(env
->jmp_env
, 1);
77 static TranslationBlock
*tb_find_slow(target_ulong pc
,
81 TranslationBlock
*tb
, **ptb1
;
84 target_ulong phys_pc
, phys_page1
, phys_page2
, virt_page2
;
89 tb_invalidated_flag
= 0;
91 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
93 /* find translated block using physical mappings */
94 phys_pc
= get_phys_addr_code(env
, pc
);
95 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
97 h
= tb_phys_hash_func(phys_pc
);
98 ptb1
= &tb_phys_hash
[h
];
104 tb
->page_addr
[0] == phys_page1
&&
105 tb
->cs_base
== cs_base
&&
106 tb
->flags
== flags
) {
107 /* check next page if needed */
108 if (tb
->page_addr
[1] != -1) {
109 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
111 phys_page2
= get_phys_addr_code(env
, virt_page2
);
112 if (tb
->page_addr
[1] == phys_page2
)
118 ptb1
= &tb
->phys_hash_next
;
121 /* if no translated code available, then translate it now */
124 /* flush must be done */
126 /* cannot fail at this point */
128 /* don't forget to invalidate previous TB info */
129 tb_invalidated_flag
= 1;
131 tc_ptr
= code_gen_ptr
;
133 tb
->cs_base
= cs_base
;
135 cpu_gen_code(env
, tb
, CODE_GEN_MAX_SIZE
, &code_gen_size
);
136 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
138 /* check next page if needed */
139 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
141 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
142 phys_page2
= get_phys_addr_code(env
, virt_page2
);
144 tb_link_phys(tb
, phys_pc
, phys_page2
);
147 /* we add the TB in the virtual pc hash table */
148 env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
149 spin_unlock(&tb_lock
);
153 static inline TranslationBlock
*tb_find_fast(void)
155 TranslationBlock
*tb
;
156 target_ulong cs_base
, pc
;
159 /* we record a subset of the CPU state. It will
160 always be the same before a given translated block
162 #if defined(TARGET_I386)
164 flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
165 cs_base
= env
->segs
[R_CS
].base
;
166 pc
= cs_base
+ env
->eip
;
167 #elif defined(TARGET_ARM)
168 flags
= env
->thumb
| (env
->vfp
.vec_len
<< 1)
169 | (env
->vfp
.vec_stride
<< 4);
170 if ((env
->uncached_cpsr
& CPSR_M
) != ARM_CPU_MODE_USR
)
172 if (env
->vfp
.xregs
[ARM_VFP_FPEXC
] & (1 << 30))
176 #elif defined(TARGET_SPARC)
177 #ifdef TARGET_SPARC64
178 flags
= (env
->pstate
<< 2) | ((env
->lsu
& (DMMU_E
| IMMU_E
)) >> 2);
180 flags
= env
->psrs
| ((env
->mmuregs
[0] & (MMU_E
| MMU_NF
)) << 1);
184 #elif defined(TARGET_PPC)
185 flags
= (msr_pr
<< MSR_PR
) | (msr_fp
<< MSR_FP
) |
186 (msr_se
<< MSR_SE
) | (msr_le
<< MSR_LE
);
189 #elif defined(TARGET_MIPS)
190 flags
= env
->hflags
& (MIPS_HFLAG_TMASK
| MIPS_HFLAG_BMASK
);
193 #elif defined(TARGET_SH4)
194 flags
= env
->sr
& (SR_MD
| SR_RB
);
195 cs_base
= 0; /* XXXXX */
198 #error unsupported CPU
200 tb
= env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
201 if (__builtin_expect(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
202 tb
->flags
!= flags
, 0)) {
203 tb
= tb_find_slow(pc
, cs_base
, flags
);
204 /* Note: we do it here to avoid a gcc bug on Mac OS X when
205 doing it in tb_find_slow */
206 if (tb_invalidated_flag
) {
207 /* as some TB could have been invalidated because
208 of memory exceptions while generating the code, we
209 must recompute the hash index here */
217 /* main execution loop */
219 int cpu_exec(CPUState
*env1
)
221 int saved_T0
, saved_T1
;
226 #if defined(TARGET_I386)
251 #elif defined(TARGET_SPARC)
252 #if defined(reg_REGWPTR)
253 uint32_t *saved_regwptr
;
257 int saved_i7
, tmp_T0
;
259 int ret
, interrupt_request
;
260 void (*gen_func
)(void);
261 TranslationBlock
*tb
;
264 #if defined(TARGET_I386)
265 /* handle exit of HALTED state */
266 if (env1
->hflags
& HF_HALTED_MASK
) {
267 /* disable halt condition */
268 if ((env1
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
269 (env1
->eflags
& IF_MASK
)) {
270 env1
->hflags
&= ~HF_HALTED_MASK
;
275 #elif defined(TARGET_PPC)
277 if (env1
->msr
[MSR_EE
] &&
278 (env1
->interrupt_request
&
279 (CPU_INTERRUPT_HARD
| CPU_INTERRUPT_TIMER
))) {
285 #elif defined(TARGET_SPARC)
287 if ((env1
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
288 (env1
->psret
!= 0)) {
294 #elif defined(TARGET_ARM)
296 /* An interrupt wakes the CPU even if the I and F CPSR bits are
298 if (env1
->interrupt_request
299 & (CPU_INTERRUPT_FIQ
| CPU_INTERRUPT_HARD
)) {
305 #elif defined(TARGET_MIPS)
307 if (env1
->interrupt_request
&
308 (CPU_INTERRUPT_HARD
| CPU_INTERRUPT_TIMER
)) {
316 cpu_single_env
= env1
;
318 /* first we save global registers */
327 /* we also save i7 because longjmp may not restore it */
328 asm volatile ("mov %%i7, %0" : "=r" (saved_i7
));
331 #if defined(TARGET_I386)
358 /* put eflags in CPU temporary format */
359 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
360 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
361 CC_OP
= CC_OP_EFLAGS
;
362 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
363 #elif defined(TARGET_ARM)
364 #elif defined(TARGET_SPARC)
365 #if defined(reg_REGWPTR)
366 saved_regwptr
= REGWPTR
;
368 #elif defined(TARGET_PPC)
369 #elif defined(TARGET_MIPS)
370 #elif defined(TARGET_SH4)
373 #error unsupported target CPU
375 env
->exception_index
= -1;
377 /* prepare setjmp context for exception handling */
379 if (setjmp(env
->jmp_env
) == 0) {
380 env
->current_tb
= NULL
;
381 /* if an exception is pending, we execute it here */
382 if (env
->exception_index
>= 0) {
383 if (env
->exception_index
>= EXCP_INTERRUPT
) {
384 /* exit request from the cpu execution loop */
385 ret
= env
->exception_index
;
387 } else if (env
->user_mode_only
) {
388 /* if user mode only, we simulate a fake exception
389 which will be hanlded outside the cpu execution
391 #if defined(TARGET_I386)
392 do_interrupt_user(env
->exception_index
,
393 env
->exception_is_int
,
395 env
->exception_next_eip
);
397 ret
= env
->exception_index
;
400 #if defined(TARGET_I386)
401 /* simulate a real cpu exception. On i386, it can
402 trigger new exceptions, but we do not handle
403 double or triple faults yet. */
404 do_interrupt(env
->exception_index
,
405 env
->exception_is_int
,
407 env
->exception_next_eip
, 0);
408 #elif defined(TARGET_PPC)
410 #elif defined(TARGET_MIPS)
412 #elif defined(TARGET_SPARC)
413 do_interrupt(env
->exception_index
);
414 #elif defined(TARGET_ARM)
416 #elif defined(TARGET_SH4)
420 env
->exception_index
= -1;
423 if (kqemu_is_ok(env
) && env
->interrupt_request
== 0) {
425 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
426 ret
= kqemu_cpu_exec(env
);
427 /* put eflags in CPU temporary format */
428 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
429 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
430 CC_OP
= CC_OP_EFLAGS
;
431 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
434 longjmp(env
->jmp_env
, 1);
435 } else if (ret
== 2) {
436 /* softmmu execution needed */
438 if (env
->interrupt_request
!= 0) {
439 /* hardware interrupt will be executed just after */
441 /* otherwise, we restart */
442 longjmp(env
->jmp_env
, 1);
448 T0
= 0; /* force lookup of first TB */
451 /* g1 can be modified by some libc? functions */
454 interrupt_request
= env
->interrupt_request
;
455 if (__builtin_expect(interrupt_request
, 0)) {
456 #if defined(TARGET_I386)
457 /* if hardware interrupt pending, we execute it */
458 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
459 (env
->eflags
& IF_MASK
) &&
460 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
462 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
463 intno
= cpu_get_pic_interrupt(env
);
464 if (loglevel
& CPU_LOG_TB_IN_ASM
) {
465 fprintf(logfile
, "Servicing hardware INT=0x%02x\n", intno
);
467 do_interrupt(intno
, 0, 0, 0, 1);
468 /* ensure that no TB jump will be modified as
469 the program flow was changed */
476 #elif defined(TARGET_PPC)
478 if ((interrupt_request
& CPU_INTERRUPT_RESET
)) {
483 if ((interrupt_request
& CPU_INTERRUPT_HARD
)) {
485 env
->exception_index
= EXCP_EXTERNAL
;
488 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
494 } else if ((interrupt_request
& CPU_INTERRUPT_TIMER
)) {
496 env
->exception_index
= EXCP_DECR
;
499 env
->interrupt_request
&= ~CPU_INTERRUPT_TIMER
;
507 #elif defined(TARGET_MIPS)
508 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
509 (env
->CP0_Status
& (1 << CP0St_IE
)) &&
510 (env
->CP0_Status
& env
->CP0_Cause
& 0x0000FF00) &&
511 !(env
->hflags
& MIPS_HFLAG_EXL
) &&
512 !(env
->hflags
& MIPS_HFLAG_ERL
) &&
513 !(env
->hflags
& MIPS_HFLAG_DM
)) {
515 env
->exception_index
= EXCP_EXT_INTERRUPT
;
518 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
525 #elif defined(TARGET_SPARC)
526 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
528 int pil
= env
->interrupt_index
& 15;
529 int type
= env
->interrupt_index
& 0xf0;
531 if (((type
== TT_EXTINT
) &&
532 (pil
== 15 || pil
> env
->psrpil
)) ||
534 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
535 do_interrupt(env
->interrupt_index
);
536 env
->interrupt_index
= 0;
543 } else if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
544 //do_interrupt(0, 0, 0, 0, 0);
545 env
->interrupt_request
&= ~CPU_INTERRUPT_TIMER
;
546 } else if (interrupt_request
& CPU_INTERRUPT_HALT
) {
550 #elif defined(TARGET_ARM)
551 if (interrupt_request
& CPU_INTERRUPT_FIQ
552 && !(env
->uncached_cpsr
& CPSR_F
)) {
553 env
->exception_index
= EXCP_FIQ
;
556 if (interrupt_request
& CPU_INTERRUPT_HARD
557 && !(env
->uncached_cpsr
& CPSR_I
)) {
558 env
->exception_index
= EXCP_IRQ
;
561 #elif defined(TARGET_SH4)
564 if (env
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
565 env
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
566 /* ensure that no TB jump will be modified as
567 the program flow was changed */
574 if (interrupt_request
& CPU_INTERRUPT_EXIT
) {
575 env
->interrupt_request
&= ~CPU_INTERRUPT_EXIT
;
576 env
->exception_index
= EXCP_INTERRUPT
;
581 if ((loglevel
& CPU_LOG_TB_CPU
)) {
582 #if defined(TARGET_I386)
583 /* restore flags in standard format */
585 env
->regs
[R_EAX
] = EAX
;
588 env
->regs
[R_EBX
] = EBX
;
591 env
->regs
[R_ECX
] = ECX
;
594 env
->regs
[R_EDX
] = EDX
;
597 env
->regs
[R_ESI
] = ESI
;
600 env
->regs
[R_EDI
] = EDI
;
603 env
->regs
[R_EBP
] = EBP
;
606 env
->regs
[R_ESP
] = ESP
;
608 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
609 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
610 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
611 #elif defined(TARGET_ARM)
612 cpu_dump_state(env
, logfile
, fprintf
, 0);
613 #elif defined(TARGET_SPARC)
614 REGWPTR
= env
->regbase
+ (env
->cwp
* 16);
615 env
->regwptr
= REGWPTR
;
616 cpu_dump_state(env
, logfile
, fprintf
, 0);
617 #elif defined(TARGET_PPC)
618 cpu_dump_state(env
, logfile
, fprintf
, 0);
619 #elif defined(TARGET_MIPS)
620 cpu_dump_state(env
, logfile
, fprintf
, 0);
621 #elif defined(TARGET_SH4)
622 cpu_dump_state(env
, logfile
, fprintf
, 0);
624 #error unsupported target CPU
630 if ((loglevel
& CPU_LOG_EXEC
)) {
631 fprintf(logfile
, "Trace 0x%08lx [" TARGET_FMT_lx
"] %s\n",
632 (long)tb
->tc_ptr
, tb
->pc
,
633 lookup_symbol(tb
->pc
));
639 /* see if we can patch the calling TB. When the TB
640 spans two pages, we cannot safely do a direct
645 (env
->kqemu_enabled
!= 2) &&
647 tb
->page_addr
[1] == -1
648 #if defined(TARGET_I386) && defined(USE_CODE_COPY)
649 && (tb
->cflags
& CF_CODE_COPY
) ==
650 (((TranslationBlock
*)(T0
& ~3))->cflags
& CF_CODE_COPY
)
654 tb_add_jump((TranslationBlock
*)(long)(T0
& ~3), T0
& 3, tb
);
655 #if defined(USE_CODE_COPY)
656 /* propagates the FP use info */
657 ((TranslationBlock
*)(T0
& ~3))->cflags
|=
658 (tb
->cflags
& CF_FP_USED
);
660 spin_unlock(&tb_lock
);
664 env
->current_tb
= tb
;
665 /* execute the generated code */
666 gen_func
= (void *)tc_ptr
;
667 #if defined(__sparc__)
668 __asm__
__volatile__("call %0\n\t"
672 : "i0", "i1", "i2", "i3", "i4", "i5");
673 #elif defined(__arm__)
674 asm volatile ("mov pc, %0\n\t"
675 ".global exec_loop\n\t"
679 : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
680 #elif defined(TARGET_I386) && defined(USE_CODE_COPY)
682 if (!(tb
->cflags
& CF_CODE_COPY
)) {
683 if ((tb
->cflags
& CF_FP_USED
) && env
->native_fp_regs
) {
684 save_native_fp_state(env
);
688 if ((tb
->cflags
& CF_FP_USED
) && !env
->native_fp_regs
) {
689 restore_native_fp_state(env
);
691 /* we work with native eflags */
692 CC_SRC
= cc_table
[CC_OP
].compute_all();
693 CC_OP
= CC_OP_EFLAGS
;
694 asm(".globl exec_loop\n"
699 " fs movl %11, %%eax\n"
700 " andl $0x400, %%eax\n"
701 " fs orl %8, %%eax\n"
704 " fs movl %%esp, %12\n"
705 " fs movl %0, %%eax\n"
706 " fs movl %1, %%ecx\n"
707 " fs movl %2, %%edx\n"
708 " fs movl %3, %%ebx\n"
709 " fs movl %4, %%esp\n"
710 " fs movl %5, %%ebp\n"
711 " fs movl %6, %%esi\n"
712 " fs movl %7, %%edi\n"
715 " fs movl %%esp, %4\n"
716 " fs movl %12, %%esp\n"
717 " fs movl %%eax, %0\n"
718 " fs movl %%ecx, %1\n"
719 " fs movl %%edx, %2\n"
720 " fs movl %%ebx, %3\n"
721 " fs movl %%ebp, %5\n"
722 " fs movl %%esi, %6\n"
723 " fs movl %%edi, %7\n"
726 " movl %%eax, %%ecx\n"
727 " andl $0x400, %%ecx\n"
729 " andl $0x8d5, %%eax\n"
730 " fs movl %%eax, %8\n"
732 " subl %%ecx, %%eax\n"
733 " fs movl %%eax, %11\n"
734 " fs movl %9, %%ebx\n" /* get T0 value */
737 : "m" (*(uint8_t *)offsetof(CPUState
, regs
[0])),
738 "m" (*(uint8_t *)offsetof(CPUState
, regs
[1])),
739 "m" (*(uint8_t *)offsetof(CPUState
, regs
[2])),
740 "m" (*(uint8_t *)offsetof(CPUState
, regs
[3])),
741 "m" (*(uint8_t *)offsetof(CPUState
, regs
[4])),
742 "m" (*(uint8_t *)offsetof(CPUState
, regs
[5])),
743 "m" (*(uint8_t *)offsetof(CPUState
, regs
[6])),
744 "m" (*(uint8_t *)offsetof(CPUState
, regs
[7])),
745 "m" (*(uint8_t *)offsetof(CPUState
, cc_src
)),
746 "m" (*(uint8_t *)offsetof(CPUState
, tmp0
)),
748 "m" (*(uint8_t *)offsetof(CPUState
, df
)),
749 "m" (*(uint8_t *)offsetof(CPUState
, saved_esp
))
754 #elif defined(__ia64)
761 fp
.gp
= code_gen_buffer
+ 2 * (1 << 20);
762 (*(void (*)(void)) &fp
)();
766 env
->current_tb
= NULL
;
767 /* reset soft MMU for next block (it can currently
768 only be set by a memory fault) */
769 #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
770 if (env
->hflags
& HF_SOFTMMU_MASK
) {
771 env
->hflags
&= ~HF_SOFTMMU_MASK
;
772 /* do not allow linking to another block */
776 #if defined(USE_KQEMU)
777 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
778 if (kqemu_is_ok(env
) &&
779 (cpu_get_time_fast() - env
->last_io_time
) >= MIN_CYCLE_BEFORE_SWITCH
) {
790 #if defined(TARGET_I386)
791 #if defined(USE_CODE_COPY)
792 if (env
->native_fp_regs
) {
793 save_native_fp_state(env
);
796 /* restore flags in standard format */
797 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
799 /* restore global registers */
824 #elif defined(TARGET_ARM)
825 /* XXX: Save/restore host fpu exception state?. */
826 #elif defined(TARGET_SPARC)
827 #if defined(reg_REGWPTR)
828 REGWPTR
= saved_regwptr
;
830 #elif defined(TARGET_PPC)
831 #elif defined(TARGET_MIPS)
832 #elif defined(TARGET_SH4)
835 #error unsupported target CPU
838 asm volatile ("mov %0, %%i7" : : "r" (saved_i7
));
846 /* fail safe : never use cpu_single_env outside cpu_exec() */
847 cpu_single_env
= NULL
;
851 /* must only be called from the generated code as an exception can be
853 void tb_invalidate_page_range(target_ulong start
, target_ulong end
)
855 /* XXX: cannot enable it yet because it yields to MMU exception
856 where NIP != read address on PowerPC */
858 target_ulong phys_addr
;
859 phys_addr
= get_phys_addr_code(env
, start
);
860 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ end
- start
, 0);
864 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
866 void cpu_x86_load_seg(CPUX86State
*s
, int seg_reg
, int selector
)
868 CPUX86State
*saved_env
;
872 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
874 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
875 (selector
<< 4), 0xffff, 0);
877 load_seg(seg_reg
, selector
);
882 void cpu_x86_fsave(CPUX86State
*s
, uint8_t *ptr
, int data32
)
884 CPUX86State
*saved_env
;
889 helper_fsave((target_ulong
)ptr
, data32
);
894 void cpu_x86_frstor(CPUX86State
*s
, uint8_t *ptr
, int data32
)
896 CPUX86State
*saved_env
;
901 helper_frstor((target_ulong
)ptr
, data32
);
906 #endif /* TARGET_I386 */
908 #if !defined(CONFIG_SOFTMMU)
910 #if defined(TARGET_I386)
912 /* 'pc' is the host PC at which the exception was raised. 'address' is
913 the effective address of the memory exception. 'is_write' is 1 if a
914 write caused the exception and otherwise 0'. 'old_set' is the
915 signal set which should be restored */
916 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
917 int is_write
, sigset_t
*old_set
,
920 TranslationBlock
*tb
;
924 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
925 #if defined(DEBUG_SIGNAL)
926 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
927 pc
, address
, is_write
, *(unsigned long *)old_set
);
929 /* XXX: locking issue */
930 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
934 /* see if it is an MMU fault */
935 ret
= cpu_x86_handle_mmu_fault(env
, address
, is_write
,
936 ((env
->hflags
& HF_CPL_MASK
) == 3), 0);
938 return 0; /* not an MMU fault */
940 return 1; /* the MMU fault was handled without causing real CPU fault */
941 /* now we have a real cpu fault */
944 /* the PC is inside the translated code. It means that we have
945 a virtual CPU fault */
946 cpu_restore_state(tb
, env
, pc
, puc
);
950 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
951 env
->eip
, env
->cr
[2], env
->error_code
);
953 /* we restore the process signal mask as the sigreturn should
954 do it (XXX: use sigsetjmp) */
955 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
956 raise_exception_err(env
->exception_index
, env
->error_code
);
958 /* activate soft MMU for this block */
959 env
->hflags
|= HF_SOFTMMU_MASK
;
960 cpu_resume_from_signal(env
, puc
);
962 /* never comes here */
966 #elif defined(TARGET_ARM)
967 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
968 int is_write
, sigset_t
*old_set
,
971 TranslationBlock
*tb
;
975 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
976 #if defined(DEBUG_SIGNAL)
977 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
978 pc
, address
, is_write
, *(unsigned long *)old_set
);
980 /* XXX: locking issue */
981 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
984 /* see if it is an MMU fault */
985 ret
= cpu_arm_handle_mmu_fault(env
, address
, is_write
, 1, 0);
987 return 0; /* not an MMU fault */
989 return 1; /* the MMU fault was handled without causing real CPU fault */
990 /* now we have a real cpu fault */
993 /* the PC is inside the translated code. It means that we have
994 a virtual CPU fault */
995 cpu_restore_state(tb
, env
, pc
, puc
);
997 /* we restore the process signal mask as the sigreturn should
998 do it (XXX: use sigsetjmp) */
999 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1002 #elif defined(TARGET_SPARC)
1003 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1004 int is_write
, sigset_t
*old_set
,
1007 TranslationBlock
*tb
;
1011 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1012 #if defined(DEBUG_SIGNAL)
1013 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1014 pc
, address
, is_write
, *(unsigned long *)old_set
);
1016 /* XXX: locking issue */
1017 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1020 /* see if it is an MMU fault */
1021 ret
= cpu_sparc_handle_mmu_fault(env
, address
, is_write
, 1, 0);
1023 return 0; /* not an MMU fault */
1025 return 1; /* the MMU fault was handled without causing real CPU fault */
1026 /* now we have a real cpu fault */
1027 tb
= tb_find_pc(pc
);
1029 /* the PC is inside the translated code. It means that we have
1030 a virtual CPU fault */
1031 cpu_restore_state(tb
, env
, pc
, puc
);
1033 /* we restore the process signal mask as the sigreturn should
1034 do it (XXX: use sigsetjmp) */
1035 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1038 #elif defined (TARGET_PPC)
1039 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1040 int is_write
, sigset_t
*old_set
,
1043 TranslationBlock
*tb
;
1047 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1048 #if defined(DEBUG_SIGNAL)
1049 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1050 pc
, address
, is_write
, *(unsigned long *)old_set
);
1052 /* XXX: locking issue */
1053 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1057 /* see if it is an MMU fault */
1058 ret
= cpu_ppc_handle_mmu_fault(env
, address
, is_write
, msr_pr
, 0);
1060 return 0; /* not an MMU fault */
1062 return 1; /* the MMU fault was handled without causing real CPU fault */
1064 /* now we have a real cpu fault */
1065 tb
= tb_find_pc(pc
);
1067 /* the PC is inside the translated code. It means that we have
1068 a virtual CPU fault */
1069 cpu_restore_state(tb
, env
, pc
, puc
);
1073 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1074 env
->nip
, env
->error_code
, tb
);
1076 /* we restore the process signal mask as the sigreturn should
1077 do it (XXX: use sigsetjmp) */
1078 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1079 do_raise_exception_err(env
->exception_index
, env
->error_code
);
1081 /* activate soft MMU for this block */
1082 cpu_resume_from_signal(env
, puc
);
1084 /* never comes here */
1088 #elif defined (TARGET_MIPS)
1089 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1090 int is_write
, sigset_t
*old_set
,
1093 TranslationBlock
*tb
;
1097 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1098 #if defined(DEBUG_SIGNAL)
1099 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1100 pc
, address
, is_write
, *(unsigned long *)old_set
);
1102 /* XXX: locking issue */
1103 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1107 /* see if it is an MMU fault */
1108 ret
= cpu_mips_handle_mmu_fault(env
, address
, is_write
, 1, 0);
1110 return 0; /* not an MMU fault */
1112 return 1; /* the MMU fault was handled without causing real CPU fault */
1114 /* now we have a real cpu fault */
1115 tb
= tb_find_pc(pc
);
1117 /* the PC is inside the translated code. It means that we have
1118 a virtual CPU fault */
1119 cpu_restore_state(tb
, env
, pc
, puc
);
1123 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1124 env
->nip
, env
->error_code
, tb
);
1126 /* we restore the process signal mask as the sigreturn should
1127 do it (XXX: use sigsetjmp) */
1128 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1129 do_raise_exception_err(env
->exception_index
, env
->error_code
);
1131 /* activate soft MMU for this block */
1132 cpu_resume_from_signal(env
, puc
);
1134 /* never comes here */
1138 #elif defined (TARGET_SH4)
1139 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1140 int is_write
, sigset_t
*old_set
,
1143 TranslationBlock
*tb
;
1147 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1148 #if defined(DEBUG_SIGNAL)
1149 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1150 pc
, address
, is_write
, *(unsigned long *)old_set
);
1152 /* XXX: locking issue */
1153 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1157 /* see if it is an MMU fault */
1158 ret
= cpu_sh4_handle_mmu_fault(env
, address
, is_write
, 1, 0);
1160 return 0; /* not an MMU fault */
1162 return 1; /* the MMU fault was handled without causing real CPU fault */
1164 /* now we have a real cpu fault */
1165 tb
= tb_find_pc(pc
);
1167 /* the PC is inside the translated code. It means that we have
1168 a virtual CPU fault */
1169 cpu_restore_state(tb
, env
, pc
, puc
);
1173 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1174 env
->nip
, env
->error_code
, tb
);
1176 /* we restore the process signal mask as the sigreturn should
1177 do it (XXX: use sigsetjmp) */
1178 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1179 // do_raise_exception_err(env->exception_index, env->error_code);
1181 /* activate soft MMU for this block */
1182 cpu_resume_from_signal(env
, puc
);
1184 /* never comes here */
1188 #error unsupported target CPU
1191 #if defined(__i386__)
1193 #if defined(USE_CODE_COPY)
1194 static void cpu_send_trap(unsigned long pc
, int trap
,
1195 struct ucontext
*uc
)
1197 TranslationBlock
*tb
;
1200 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1201 /* now we have a real cpu fault */
1202 tb
= tb_find_pc(pc
);
1204 /* the PC is inside the translated code. It means that we have
1205 a virtual CPU fault */
1206 cpu_restore_state(tb
, env
, pc
, uc
);
1208 sigprocmask(SIG_SETMASK
, &uc
->uc_sigmask
, NULL
);
1209 raise_exception_err(trap
, env
->error_code
);
1213 int cpu_signal_handler(int host_signum
, struct siginfo
*info
,
1216 struct ucontext
*uc
= puc
;
1224 #define REG_TRAPNO TRAPNO
1226 pc
= uc
->uc_mcontext
.gregs
[REG_EIP
];
1227 trapno
= uc
->uc_mcontext
.gregs
[REG_TRAPNO
];
1228 #if defined(TARGET_I386) && defined(USE_CODE_COPY)
1229 if (trapno
== 0x00 || trapno
== 0x05) {
1230 /* send division by zero or bound exception */
1231 cpu_send_trap(pc
, trapno
, uc
);
1235 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1237 (uc
->uc_mcontext
.gregs
[REG_ERR
] >> 1) & 1 : 0,
1238 &uc
->uc_sigmask
, puc
);
1241 #elif defined(__x86_64__)
1243 int cpu_signal_handler(int host_signum
, struct siginfo
*info
,
1246 struct ucontext
*uc
= puc
;
1249 pc
= uc
->uc_mcontext
.gregs
[REG_RIP
];
1250 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1251 uc
->uc_mcontext
.gregs
[REG_TRAPNO
] == 0xe ?
1252 (uc
->uc_mcontext
.gregs
[REG_ERR
] >> 1) & 1 : 0,
1253 &uc
->uc_sigmask
, puc
);
1256 #elif defined(__powerpc__)
1258 /***********************************************************************
1259 * signal context platform-specific definitions
1263 /* All Registers access - only for local access */
1264 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1265 /* Gpr Registers access */
1266 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1267 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1268 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1269 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1270 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1271 # define LR_sig(context) REG_sig(link, context) /* Link register */
1272 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1273 /* Float Registers access */
1274 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1275 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1276 /* Exception Registers access */
1277 # define DAR_sig(context) REG_sig(dar, context)
1278 # define DSISR_sig(context) REG_sig(dsisr, context)
1279 # define TRAP_sig(context) REG_sig(trap, context)
1283 # include <sys/ucontext.h>
1284 typedef struct ucontext SIGCONTEXT
;
1285 /* All Registers access - only for local access */
1286 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1287 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1288 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1289 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1290 /* Gpr Registers access */
1291 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1292 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1293 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1294 # define CTR_sig(context) REG_sig(ctr, context)
1295 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1296 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1297 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1298 /* Float Registers access */
1299 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1300 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1301 /* Exception Registers access */
1302 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1303 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1304 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1305 #endif /* __APPLE__ */
1307 int cpu_signal_handler(int host_signum
, struct siginfo
*info
,
1310 struct ucontext
*uc
= puc
;
1318 if (DSISR_sig(uc
) & 0x00800000)
1321 if (TRAP_sig(uc
) != 0x400 && (DSISR_sig(uc
) & 0x02000000))
1324 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1325 is_write
, &uc
->uc_sigmask
, puc
);
1328 #elif defined(__alpha__)
1330 int cpu_signal_handler(int host_signum
, struct siginfo
*info
,
1333 struct ucontext
*uc
= puc
;
1334 uint32_t *pc
= uc
->uc_mcontext
.sc_pc
;
1335 uint32_t insn
= *pc
;
1338 /* XXX: need kernel patch to get write flag faster */
1339 switch (insn
>> 26) {
1354 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1355 is_write
, &uc
->uc_sigmask
, puc
);
1357 #elif defined(__sparc__)
1359 int cpu_signal_handler(int host_signum
, struct siginfo
*info
,
1362 uint32_t *regs
= (uint32_t *)(info
+ 1);
1363 void *sigmask
= (regs
+ 20);
1368 /* XXX: is there a standard glibc define ? */
1370 /* XXX: need kernel patch to get write flag faster */
1372 insn
= *(uint32_t *)pc
;
1373 if ((insn
>> 30) == 3) {
1374 switch((insn
>> 19) & 0x3f) {
1386 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1387 is_write
, sigmask
, NULL
);
1390 #elif defined(__arm__)
1392 int cpu_signal_handler(int host_signum
, struct siginfo
*info
,
1395 struct ucontext
*uc
= puc
;
1399 pc
= uc
->uc_mcontext
.gregs
[R15
];
1400 /* XXX: compute is_write */
1402 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1407 #elif defined(__mc68000)
1409 int cpu_signal_handler(int host_signum
, struct siginfo
*info
,
1412 struct ucontext
*uc
= puc
;
1416 pc
= uc
->uc_mcontext
.gregs
[16];
1417 /* XXX: compute is_write */
1419 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1421 &uc
->uc_sigmask
, puc
);
1424 #elif defined(__ia64)
1427 /* This ought to be in <bits/siginfo.h>... */
1428 # define __ISR_VALID 1
1431 int cpu_signal_handler(int host_signum
, struct siginfo
*info
, void *puc
)
1433 struct ucontext
*uc
= puc
;
1437 ip
= uc
->uc_mcontext
.sc_ip
;
1438 switch (host_signum
) {
1444 if (info
->si_code
&& (info
->si_segvflags
& __ISR_VALID
))
1445 /* ISR.W (write-access) is bit 33: */
1446 is_write
= (info
->si_isr
>> 33) & 1;
1452 return handle_cpu_signal(ip
, (unsigned long)info
->si_addr
,
1454 &uc
->uc_sigmask
, puc
);
1457 #elif defined(__s390__)
1459 int cpu_signal_handler(int host_signum
, struct siginfo
*info
,
1462 struct ucontext
*uc
= puc
;
1466 pc
= uc
->uc_mcontext
.psw
.addr
;
1467 /* XXX: compute is_write */
1469 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1471 &uc
->uc_sigmask
, puc
);
1476 #error host CPU specific signal handler needed
1480 #endif /* !defined(CONFIG_SOFTMMU) */