]>
git.proxmox.com Git - qemu.git/blob - cpu-exec.c
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
24 #include "qemu-barrier.h"
26 #if !defined(CONFIG_SOFTMMU)
38 #include <sys/ucontext.h>
42 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
43 // Work around ugly bugs in glibc that mangle global register contents
45 #define env cpu_single_env
48 int tb_invalidated_flag
;
50 //#define CONFIG_DEBUG_EXEC
51 //#define DEBUG_SIGNAL
53 int qemu_cpu_has_work(CPUState
*env
)
55 return cpu_has_work(env
);
58 void cpu_loop_exit(void)
60 env
->current_tb
= NULL
;
61 longjmp(env
->jmp_env
, 1);
64 /* exit the current TB from a signal handler. The host registers are
65 restored in a state compatible with the CPU emulator
67 #if defined(CONFIG_SOFTMMU)
68 void cpu_resume_from_signal(CPUState
*env1
, void *puc
)
72 /* XXX: restore cpu registers saved in host registers */
74 env
->exception_index
= -1;
75 longjmp(env
->jmp_env
, 1);
80 void cpu_resume_from_signal(CPUState
*env1
, void *puc
)
83 struct ucontext
*uc
= puc
;
84 #elif defined(__OpenBSD__)
85 struct sigcontext
*uc
= puc
;
90 /* XXX: restore cpu registers saved in host registers */
93 /* XXX: use siglongjmp ? */
96 sigprocmask(SIG_SETMASK
, (sigset_t
*)&uc
->uc_sigmask
, NULL
);
98 sigprocmask(SIG_SETMASK
, &uc
->uc_sigmask
, NULL
);
100 #elif defined(__OpenBSD__)
101 sigprocmask(SIG_SETMASK
, &uc
->sc_mask
, NULL
);
104 env
->exception_index
= -1;
105 longjmp(env
->jmp_env
, 1);
109 /* Execute the code without caching the generated code. An interpreter
110 could be used if available. */
111 static void cpu_exec_nocache(int max_cycles
, TranslationBlock
*orig_tb
)
113 unsigned long next_tb
;
114 TranslationBlock
*tb
;
116 /* Should never happen.
117 We only end up here when an existing TB is too long. */
118 if (max_cycles
> CF_COUNT_MASK
)
119 max_cycles
= CF_COUNT_MASK
;
121 tb
= tb_gen_code(env
, orig_tb
->pc
, orig_tb
->cs_base
, orig_tb
->flags
,
123 env
->current_tb
= tb
;
124 /* execute the generated code */
125 next_tb
= tcg_qemu_tb_exec(tb
->tc_ptr
);
126 env
->current_tb
= NULL
;
128 if ((next_tb
& 3) == 2) {
129 /* Restore PC. This may happen if async event occurs before
130 the TB starts executing. */
131 cpu_pc_from_tb(env
, tb
);
133 tb_phys_invalidate(tb
, -1);
137 static TranslationBlock
*tb_find_slow(target_ulong pc
,
138 target_ulong cs_base
,
141 TranslationBlock
*tb
, **ptb1
;
143 tb_page_addr_t phys_pc
, phys_page1
, phys_page2
;
144 target_ulong virt_page2
;
146 tb_invalidated_flag
= 0;
148 /* find translated block using physical mappings */
149 phys_pc
= get_page_addr_code(env
, pc
);
150 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
152 h
= tb_phys_hash_func(phys_pc
);
153 ptb1
= &tb_phys_hash
[h
];
159 tb
->page_addr
[0] == phys_page1
&&
160 tb
->cs_base
== cs_base
&&
161 tb
->flags
== flags
) {
162 /* check next page if needed */
163 if (tb
->page_addr
[1] != -1) {
164 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
166 phys_page2
= get_page_addr_code(env
, virt_page2
);
167 if (tb
->page_addr
[1] == phys_page2
)
173 ptb1
= &tb
->phys_hash_next
;
176 /* if no translated code available, then translate it now */
177 tb
= tb_gen_code(env
, pc
, cs_base
, flags
, 0);
180 /* Move the last found TB to the head of the list */
182 *ptb1
= tb
->phys_hash_next
;
183 tb
->phys_hash_next
= tb_phys_hash
[h
];
184 tb_phys_hash
[h
] = tb
;
186 /* we add the TB in the virtual pc hash table */
187 env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
191 static inline TranslationBlock
*tb_find_fast(void)
193 TranslationBlock
*tb
;
194 target_ulong cs_base
, pc
;
197 /* we record a subset of the CPU state. It will
198 always be the same before a given translated block
200 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
201 tb
= env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
202 if (unlikely(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
203 tb
->flags
!= flags
)) {
204 tb
= tb_find_slow(pc
, cs_base
, flags
);
209 static CPUDebugExcpHandler
*debug_excp_handler
;
211 CPUDebugExcpHandler
*cpu_set_debug_excp_handler(CPUDebugExcpHandler
*handler
)
213 CPUDebugExcpHandler
*old_handler
= debug_excp_handler
;
215 debug_excp_handler
= handler
;
219 static void cpu_handle_debug_exception(CPUState
*env
)
223 if (!env
->watchpoint_hit
) {
224 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
225 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
228 if (debug_excp_handler
) {
229 debug_excp_handler(env
);
233 /* main execution loop */
235 volatile sig_atomic_t exit_request
;
237 int cpu_exec(CPUState
*env1
)
239 volatile host_reg_t saved_env_reg
;
240 int ret
, interrupt_request
;
241 TranslationBlock
*tb
;
243 unsigned long next_tb
;
246 if (!cpu_has_work(env1
)) {
253 cpu_single_env
= env1
;
255 /* the access to env below is actually saving the global register's
256 value, so that files not including target-xyz/exec.h are free to
258 QEMU_BUILD_BUG_ON (sizeof (saved_env_reg
) != sizeof (env
));
259 saved_env_reg
= (host_reg_t
) env
;
263 if (unlikely(exit_request
)) {
264 env
->exit_request
= 1;
267 #if defined(TARGET_I386)
268 /* put eflags in CPU temporary format */
269 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
270 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
271 CC_OP
= CC_OP_EFLAGS
;
272 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
273 #elif defined(TARGET_SPARC)
274 #elif defined(TARGET_M68K)
275 env
->cc_op
= CC_OP_FLAGS
;
276 env
->cc_dest
= env
->sr
& 0xf;
277 env
->cc_x
= (env
->sr
>> 4) & 1;
278 #elif defined(TARGET_ALPHA)
279 #elif defined(TARGET_ARM)
280 #elif defined(TARGET_UNICORE32)
281 #elif defined(TARGET_PPC)
282 #elif defined(TARGET_LM32)
283 #elif defined(TARGET_MICROBLAZE)
284 #elif defined(TARGET_MIPS)
285 #elif defined(TARGET_SH4)
286 #elif defined(TARGET_CRIS)
287 #elif defined(TARGET_S390X)
290 #error unsupported target CPU
292 env
->exception_index
= -1;
294 /* prepare setjmp context for exception handling */
296 if (setjmp(env
->jmp_env
) == 0) {
297 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
299 env
= cpu_single_env
;
300 #define env cpu_single_env
302 /* if an exception is pending, we execute it here */
303 if (env
->exception_index
>= 0) {
304 if (env
->exception_index
>= EXCP_INTERRUPT
) {
305 /* exit request from the cpu execution loop */
306 ret
= env
->exception_index
;
307 if (ret
== EXCP_DEBUG
) {
308 cpu_handle_debug_exception(env
);
312 #if defined(CONFIG_USER_ONLY)
313 /* if user mode only, we simulate a fake exception
314 which will be handled outside the cpu execution
316 #if defined(TARGET_I386)
317 do_interrupt_user(env
->exception_index
,
318 env
->exception_is_int
,
320 env
->exception_next_eip
);
321 /* successfully delivered */
322 env
->old_exception
= -1;
324 ret
= env
->exception_index
;
327 #if defined(TARGET_I386)
328 /* simulate a real cpu exception. On i386, it can
329 trigger new exceptions, but we do not handle
330 double or triple faults yet. */
331 do_interrupt(env
->exception_index
,
332 env
->exception_is_int
,
334 env
->exception_next_eip
, 0);
335 /* successfully delivered */
336 env
->old_exception
= -1;
337 #elif defined(TARGET_PPC)
339 #elif defined(TARGET_LM32)
341 #elif defined(TARGET_MICROBLAZE)
343 #elif defined(TARGET_MIPS)
345 #elif defined(TARGET_SPARC)
347 #elif defined(TARGET_ARM)
349 #elif defined(TARGET_UNICORE32)
351 #elif defined(TARGET_SH4)
353 #elif defined(TARGET_ALPHA)
355 #elif defined(TARGET_CRIS)
357 #elif defined(TARGET_M68K)
359 #elif defined(TARGET_S390X)
362 env
->exception_index
= -1;
367 next_tb
= 0; /* force lookup of first TB */
369 interrupt_request
= env
->interrupt_request
;
370 if (unlikely(interrupt_request
)) {
371 if (unlikely(env
->singlestep_enabled
& SSTEP_NOIRQ
)) {
372 /* Mask out external interrupts for this step. */
373 interrupt_request
&= ~CPU_INTERRUPT_SSTEP_MASK
;
375 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
376 env
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
377 env
->exception_index
= EXCP_DEBUG
;
380 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
381 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
382 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
383 if (interrupt_request
& CPU_INTERRUPT_HALT
) {
384 env
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
386 env
->exception_index
= EXCP_HLT
;
390 #if defined(TARGET_I386)
391 if (interrupt_request
& CPU_INTERRUPT_INIT
) {
392 svm_check_intercept(SVM_EXIT_INIT
);
394 env
->exception_index
= EXCP_HALTED
;
396 } else if (interrupt_request
& CPU_INTERRUPT_SIPI
) {
398 } else if (env
->hflags2
& HF2_GIF_MASK
) {
399 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
400 !(env
->hflags
& HF_SMM_MASK
)) {
401 svm_check_intercept(SVM_EXIT_SMI
);
402 env
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
405 } else if ((interrupt_request
& CPU_INTERRUPT_NMI
) &&
406 !(env
->hflags2
& HF2_NMI_MASK
)) {
407 env
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
408 env
->hflags2
|= HF2_NMI_MASK
;
409 do_interrupt(EXCP02_NMI
, 0, 0, 0, 1);
411 } else if (interrupt_request
& CPU_INTERRUPT_MCE
) {
412 env
->interrupt_request
&= ~CPU_INTERRUPT_MCE
;
413 do_interrupt(EXCP12_MCHK
, 0, 0, 0, 0);
415 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
416 (((env
->hflags2
& HF2_VINTR_MASK
) &&
417 (env
->hflags2
& HF2_HIF_MASK
)) ||
418 (!(env
->hflags2
& HF2_VINTR_MASK
) &&
419 (env
->eflags
& IF_MASK
&&
420 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
))))) {
422 svm_check_intercept(SVM_EXIT_INTR
);
423 env
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
| CPU_INTERRUPT_VIRQ
);
424 intno
= cpu_get_pic_interrupt(env
);
425 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing hardware INT=0x%02x\n", intno
);
426 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
428 env
= cpu_single_env
;
429 #define env cpu_single_env
431 do_interrupt(intno
, 0, 0, 0, 1);
432 /* ensure that no TB jump will be modified as
433 the program flow was changed */
435 #if !defined(CONFIG_USER_ONLY)
436 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
437 (env
->eflags
& IF_MASK
) &&
438 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
440 /* FIXME: this should respect TPR */
441 svm_check_intercept(SVM_EXIT_VINTR
);
442 intno
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_vector
));
443 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing virtual hardware INT=0x%02x\n", intno
);
444 do_interrupt(intno
, 0, 0, 0, 1);
445 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
450 #elif defined(TARGET_PPC)
452 if ((interrupt_request
& CPU_INTERRUPT_RESET
)) {
456 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
457 ppc_hw_interrupt(env
);
458 if (env
->pending_interrupts
== 0)
459 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
462 #elif defined(TARGET_LM32)
463 if ((interrupt_request
& CPU_INTERRUPT_HARD
)
464 && (env
->ie
& IE_IE
)) {
465 env
->exception_index
= EXCP_IRQ
;
469 #elif defined(TARGET_MICROBLAZE)
470 if ((interrupt_request
& CPU_INTERRUPT_HARD
)
471 && (env
->sregs
[SR_MSR
] & MSR_IE
)
472 && !(env
->sregs
[SR_MSR
] & (MSR_EIP
| MSR_BIP
))
473 && !(env
->iflags
& (D_FLAG
| IMM_FLAG
))) {
474 env
->exception_index
= EXCP_IRQ
;
478 #elif defined(TARGET_MIPS)
479 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
480 cpu_mips_hw_interrupts_pending(env
)) {
482 env
->exception_index
= EXCP_EXT_INTERRUPT
;
487 #elif defined(TARGET_SPARC)
488 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
489 if (cpu_interrupts_enabled(env
) &&
490 env
->interrupt_index
> 0) {
491 int pil
= env
->interrupt_index
& 0xf;
492 int type
= env
->interrupt_index
& 0xf0;
494 if (((type
== TT_EXTINT
) &&
495 cpu_pil_allowed(env
, pil
)) ||
497 env
->exception_index
= env
->interrupt_index
;
503 #elif defined(TARGET_ARM)
504 if (interrupt_request
& CPU_INTERRUPT_FIQ
505 && !(env
->uncached_cpsr
& CPSR_F
)) {
506 env
->exception_index
= EXCP_FIQ
;
510 /* ARMv7-M interrupt return works by loading a magic value
511 into the PC. On real hardware the load causes the
512 return to occur. The qemu implementation performs the
513 jump normally, then does the exception return when the
514 CPU tries to execute code at the magic address.
515 This will cause the magic PC value to be pushed to
516 the stack if an interrupt occurred at the wrong time.
517 We avoid this by disabling interrupts when
518 pc contains a magic address. */
519 if (interrupt_request
& CPU_INTERRUPT_HARD
520 && ((IS_M(env
) && env
->regs
[15] < 0xfffffff0)
521 || !(env
->uncached_cpsr
& CPSR_I
))) {
522 env
->exception_index
= EXCP_IRQ
;
526 #elif defined(TARGET_UNICORE32)
527 if (interrupt_request
& CPU_INTERRUPT_HARD
528 && !(env
->uncached_asr
& ASR_I
)) {
532 #elif defined(TARGET_SH4)
533 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
537 #elif defined(TARGET_ALPHA)
538 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
542 #elif defined(TARGET_CRIS)
543 if (interrupt_request
& CPU_INTERRUPT_HARD
544 && (env
->pregs
[PR_CCS
] & I_FLAG
)
545 && !env
->locked_irq
) {
546 env
->exception_index
= EXCP_IRQ
;
550 if (interrupt_request
& CPU_INTERRUPT_NMI
551 && (env
->pregs
[PR_CCS
] & M_FLAG
)) {
552 env
->exception_index
= EXCP_NMI
;
556 #elif defined(TARGET_M68K)
557 if (interrupt_request
& CPU_INTERRUPT_HARD
558 && ((env
->sr
& SR_I
) >> SR_I_SHIFT
)
559 < env
->pending_level
) {
560 /* Real hardware gets the interrupt vector via an
561 IACK cycle at this point. Current emulated
562 hardware doesn't rely on this, so we
563 provide/save the vector when the interrupt is
565 env
->exception_index
= env
->pending_vector
;
569 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
570 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
571 (env
->psw
.mask
& PSW_MASK_EXT
)) {
576 /* Don't use the cached interrupt_request value,
577 do_interrupt may have updated the EXITTB flag. */
578 if (env
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
579 env
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
580 /* ensure that no TB jump will be modified as
581 the program flow was changed */
585 if (unlikely(env
->exit_request
)) {
586 env
->exit_request
= 0;
587 env
->exception_index
= EXCP_INTERRUPT
;
590 #if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
591 if (qemu_loglevel_mask(CPU_LOG_TB_CPU
)) {
592 /* restore flags in standard format */
593 #if defined(TARGET_I386)
594 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
595 log_cpu_state(env
, X86_DUMP_CCOP
);
596 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
597 #elif defined(TARGET_M68K)
598 cpu_m68k_flush_flags(env
, env
->cc_op
);
599 env
->cc_op
= CC_OP_FLAGS
;
600 env
->sr
= (env
->sr
& 0xffe0)
601 | env
->cc_dest
| (env
->cc_x
<< 4);
602 log_cpu_state(env
, 0);
604 log_cpu_state(env
, 0);
607 #endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
610 /* Note: we do it here to avoid a gcc bug on Mac OS X when
611 doing it in tb_find_slow */
612 if (tb_invalidated_flag
) {
613 /* as some TB could have been invalidated because
614 of memory exceptions while generating the code, we
615 must recompute the hash index here */
617 tb_invalidated_flag
= 0;
619 #ifdef CONFIG_DEBUG_EXEC
620 qemu_log_mask(CPU_LOG_EXEC
, "Trace 0x%08lx [" TARGET_FMT_lx
"] %s\n",
621 (long)tb
->tc_ptr
, tb
->pc
,
622 lookup_symbol(tb
->pc
));
624 /* see if we can patch the calling TB. When the TB
625 spans two pages, we cannot safely do a direct
627 if (next_tb
!= 0 && tb
->page_addr
[1] == -1) {
628 tb_add_jump((TranslationBlock
*)(next_tb
& ~3), next_tb
& 3, tb
);
630 spin_unlock(&tb_lock
);
632 /* cpu_interrupt might be called while translating the
633 TB, but before it is linked into a potentially
634 infinite loop and becomes env->current_tb. Avoid
635 starting execution if there is a pending interrupt. */
636 env
->current_tb
= tb
;
638 if (likely(!env
->exit_request
)) {
640 /* execute the generated code */
641 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
643 env
= cpu_single_env
;
644 #define env cpu_single_env
646 next_tb
= tcg_qemu_tb_exec(tc_ptr
);
647 if ((next_tb
& 3) == 2) {
648 /* Instruction counter expired. */
650 tb
= (TranslationBlock
*)(long)(next_tb
& ~3);
652 cpu_pc_from_tb(env
, tb
);
653 insns_left
= env
->icount_decr
.u32
;
654 if (env
->icount_extra
&& insns_left
>= 0) {
655 /* Refill decrementer and continue execution. */
656 env
->icount_extra
+= insns_left
;
657 if (env
->icount_extra
> 0xffff) {
660 insns_left
= env
->icount_extra
;
662 env
->icount_extra
-= insns_left
;
663 env
->icount_decr
.u16
.low
= insns_left
;
665 if (insns_left
> 0) {
666 /* Execute remaining instructions. */
667 cpu_exec_nocache(insns_left
, tb
);
669 env
->exception_index
= EXCP_INTERRUPT
;
675 env
->current_tb
= NULL
;
676 /* reset soft MMU for next block (it can currently
677 only be set by a memory fault) */
683 #if defined(TARGET_I386)
684 /* restore flags in standard format */
685 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
686 #elif defined(TARGET_ARM)
687 /* XXX: Save/restore host fpu exception state?. */
688 #elif defined(TARGET_UNICORE32)
689 #elif defined(TARGET_SPARC)
690 #elif defined(TARGET_PPC)
691 #elif defined(TARGET_LM32)
692 #elif defined(TARGET_M68K)
693 cpu_m68k_flush_flags(env
, env
->cc_op
);
694 env
->cc_op
= CC_OP_FLAGS
;
695 env
->sr
= (env
->sr
& 0xffe0)
696 | env
->cc_dest
| (env
->cc_x
<< 4);
697 #elif defined(TARGET_MICROBLAZE)
698 #elif defined(TARGET_MIPS)
699 #elif defined(TARGET_SH4)
700 #elif defined(TARGET_ALPHA)
701 #elif defined(TARGET_CRIS)
702 #elif defined(TARGET_S390X)
705 #error unsupported target CPU
708 /* restore global registers */
710 env
= (void *) saved_env_reg
;
712 /* fail safe : never use cpu_single_env outside cpu_exec() */
713 cpu_single_env
= NULL
;
717 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
719 void cpu_x86_load_seg(CPUX86State
*s
, int seg_reg
, int selector
)
721 CPUX86State
*saved_env
;
725 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
727 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
728 (selector
<< 4), 0xffff, 0);
730 helper_load_seg(seg_reg
, selector
);
735 void cpu_x86_fsave(CPUX86State
*s
, target_ulong ptr
, int data32
)
737 CPUX86State
*saved_env
;
742 helper_fsave(ptr
, data32
);
747 void cpu_x86_frstor(CPUX86State
*s
, target_ulong ptr
, int data32
)
749 CPUX86State
*saved_env
;
754 helper_frstor(ptr
, data32
);
759 #endif /* TARGET_I386 */
761 #if !defined(CONFIG_SOFTMMU)
763 #if defined(TARGET_I386)
764 #define EXCEPTION_ACTION \
765 raise_exception_err(env->exception_index, env->error_code)
767 #define EXCEPTION_ACTION \
771 /* 'pc' is the host PC at which the exception was raised. 'address' is
772 the effective address of the memory exception. 'is_write' is 1 if a
773 write caused the exception and otherwise 0'. 'old_set' is the
774 signal set which should be restored */
775 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
776 int is_write
, sigset_t
*old_set
,
779 TranslationBlock
*tb
;
782 if (cpu_single_env
) {
783 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
785 #if defined(DEBUG_SIGNAL)
786 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
787 pc
, address
, is_write
, *(unsigned long *)old_set
);
789 /* XXX: locking issue */
790 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
794 /* see if it is an MMU fault */
795 ret
= cpu_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
797 return 0; /* not an MMU fault */
800 return 1; /* the MMU fault was handled without causing real CPU fault */
802 /* now we have a real cpu fault */
805 /* the PC is inside the translated code. It means that we have
806 a virtual CPU fault */
807 cpu_restore_state(tb
, env
, pc
);
810 /* we restore the process signal mask as the sigreturn should
811 do it (XXX: use sigsetjmp) */
812 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
815 /* never comes here */
819 #if defined(__i386__)
821 #if defined(__APPLE__)
822 #include <sys/ucontext.h>
824 #define EIP_sig(context) (*((unsigned long *)&(context)->uc_mcontext->ss.eip))
825 #define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
826 #define ERROR_sig(context) ((context)->uc_mcontext->es.err)
827 #define MASK_sig(context) ((context)->uc_sigmask)
828 #elif defined(__NetBSD__)
829 #include <ucontext.h>
831 #define EIP_sig(context) ((context)->uc_mcontext.__gregs[_REG_EIP])
832 #define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
833 #define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
834 #define MASK_sig(context) ((context)->uc_sigmask)
835 #elif defined(__FreeBSD__) || defined(__DragonFly__)
836 #include <ucontext.h>
838 #define EIP_sig(context) (*((unsigned long *)&(context)->uc_mcontext.mc_eip))
839 #define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
840 #define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
841 #define MASK_sig(context) ((context)->uc_sigmask)
842 #elif defined(__OpenBSD__)
843 #define EIP_sig(context) ((context)->sc_eip)
844 #define TRAP_sig(context) ((context)->sc_trapno)
845 #define ERROR_sig(context) ((context)->sc_err)
846 #define MASK_sig(context) ((context)->sc_mask)
848 #define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
849 #define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
850 #define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
851 #define MASK_sig(context) ((context)->uc_sigmask)
854 int cpu_signal_handler(int host_signum
, void *pinfo
,
857 siginfo_t
*info
= pinfo
;
858 #if defined(__NetBSD__) || defined(__FreeBSD__) || defined(__DragonFly__)
859 ucontext_t
*uc
= puc
;
860 #elif defined(__OpenBSD__)
861 struct sigcontext
*uc
= puc
;
863 struct ucontext
*uc
= puc
;
872 #define REG_TRAPNO TRAPNO
875 trapno
= TRAP_sig(uc
);
876 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
878 (ERROR_sig(uc
) >> 1) & 1 : 0,
882 #elif defined(__x86_64__)
885 #define PC_sig(context) _UC_MACHINE_PC(context)
886 #define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
887 #define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
888 #define MASK_sig(context) ((context)->uc_sigmask)
889 #elif defined(__OpenBSD__)
890 #define PC_sig(context) ((context)->sc_rip)
891 #define TRAP_sig(context) ((context)->sc_trapno)
892 #define ERROR_sig(context) ((context)->sc_err)
893 #define MASK_sig(context) ((context)->sc_mask)
894 #elif defined(__FreeBSD__) || defined(__DragonFly__)
895 #include <ucontext.h>
897 #define PC_sig(context) (*((unsigned long *)&(context)->uc_mcontext.mc_rip))
898 #define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
899 #define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
900 #define MASK_sig(context) ((context)->uc_sigmask)
902 #define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
903 #define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
904 #define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
905 #define MASK_sig(context) ((context)->uc_sigmask)
908 int cpu_signal_handler(int host_signum
, void *pinfo
,
911 siginfo_t
*info
= pinfo
;
913 #if defined(__NetBSD__) || defined(__FreeBSD__) || defined(__DragonFly__)
914 ucontext_t
*uc
= puc
;
915 #elif defined(__OpenBSD__)
916 struct sigcontext
*uc
= puc
;
918 struct ucontext
*uc
= puc
;
922 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
923 TRAP_sig(uc
) == 0xe ?
924 (ERROR_sig(uc
) >> 1) & 1 : 0,
928 #elif defined(_ARCH_PPC)
930 /***********************************************************************
931 * signal context platform-specific definitions
935 /* All Registers access - only for local access */
936 #define REG_sig(reg_name, context) \
937 ((context)->uc_mcontext.regs->reg_name)
938 /* Gpr Registers access */
939 #define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
940 /* Program counter */
941 #define IAR_sig(context) REG_sig(nip, context)
942 /* Machine State Register (Supervisor) */
943 #define MSR_sig(context) REG_sig(msr, context)
945 #define CTR_sig(context) REG_sig(ctr, context)
946 /* User's integer exception register */
947 #define XER_sig(context) REG_sig(xer, context)
949 #define LR_sig(context) REG_sig(link, context)
950 /* Condition register */
951 #define CR_sig(context) REG_sig(ccr, context)
953 /* Float Registers access */
954 #define FLOAT_sig(reg_num, context) \
955 (((double *)((char *)((context)->uc_mcontext.regs + 48 * 4)))[reg_num])
956 #define FPSCR_sig(context) \
957 (*(int *)((char *)((context)->uc_mcontext.regs + (48 + 32 * 2) * 4)))
958 /* Exception Registers access */
959 #define DAR_sig(context) REG_sig(dar, context)
960 #define DSISR_sig(context) REG_sig(dsisr, context)
961 #define TRAP_sig(context) REG_sig(trap, context)
964 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
965 #include <ucontext.h>
966 #define IAR_sig(context) ((context)->uc_mcontext.mc_srr0)
967 #define MSR_sig(context) ((context)->uc_mcontext.mc_srr1)
968 #define CTR_sig(context) ((context)->uc_mcontext.mc_ctr)
969 #define XER_sig(context) ((context)->uc_mcontext.mc_xer)
970 #define LR_sig(context) ((context)->uc_mcontext.mc_lr)
971 #define CR_sig(context) ((context)->uc_mcontext.mc_cr)
972 /* Exception Registers access */
973 #define DAR_sig(context) ((context)->uc_mcontext.mc_dar)
974 #define DSISR_sig(context) ((context)->uc_mcontext.mc_dsisr)
975 #define TRAP_sig(context) ((context)->uc_mcontext.mc_exc)
976 #endif /* __FreeBSD__|| __FreeBSD_kernel__ */
979 #include <sys/ucontext.h>
980 typedef struct ucontext SIGCONTEXT
;
981 /* All Registers access - only for local access */
982 #define REG_sig(reg_name, context) \
983 ((context)->uc_mcontext->ss.reg_name)
984 #define FLOATREG_sig(reg_name, context) \
985 ((context)->uc_mcontext->fs.reg_name)
986 #define EXCEPREG_sig(reg_name, context) \
987 ((context)->uc_mcontext->es.reg_name)
988 #define VECREG_sig(reg_name, context) \
989 ((context)->uc_mcontext->vs.reg_name)
990 /* Gpr Registers access */
991 #define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
992 /* Program counter */
993 #define IAR_sig(context) REG_sig(srr0, context)
994 /* Machine State Register (Supervisor) */
995 #define MSR_sig(context) REG_sig(srr1, context)
996 #define CTR_sig(context) REG_sig(ctr, context)
998 #define XER_sig(context) REG_sig(xer, context)
999 /* User's integer exception register */
1000 #define LR_sig(context) REG_sig(lr, context)
1001 /* Condition register */
1002 #define CR_sig(context) REG_sig(cr, context)
1003 /* Float Registers access */
1004 #define FLOAT_sig(reg_num, context) \
1005 FLOATREG_sig(fpregs[reg_num], context)
1006 #define FPSCR_sig(context) \
1007 ((double)FLOATREG_sig(fpscr, context))
1008 /* Exception Registers access */
1009 /* Fault registers for coredump */
1010 #define DAR_sig(context) EXCEPREG_sig(dar, context)
1011 #define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1012 /* number of powerpc exception taken */
1013 #define TRAP_sig(context) EXCEPREG_sig(exception, context)
1014 #endif /* __APPLE__ */
1016 int cpu_signal_handler(int host_signum
, void *pinfo
,
1019 siginfo_t
*info
= pinfo
;
1020 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
1021 ucontext_t
*uc
= puc
;
1023 struct ucontext
*uc
= puc
;
1032 if (DSISR_sig(uc
) & 0x00800000) {
1036 if (TRAP_sig(uc
) != 0x400 && (DSISR_sig(uc
) & 0x02000000)) {
1040 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1041 is_write
, &uc
->uc_sigmask
, puc
);
1044 #elif defined(__alpha__)
1046 int cpu_signal_handler(int host_signum
, void *pinfo
,
1049 siginfo_t
*info
= pinfo
;
1050 struct ucontext
*uc
= puc
;
1051 uint32_t *pc
= uc
->uc_mcontext
.sc_pc
;
1052 uint32_t insn
= *pc
;
1055 /* XXX: need kernel patch to get write flag faster */
1056 switch (insn
>> 26) {
1057 case 0x0d: /* stw */
1058 case 0x0e: /* stb */
1059 case 0x0f: /* stq_u */
1060 case 0x24: /* stf */
1061 case 0x25: /* stg */
1062 case 0x26: /* sts */
1063 case 0x27: /* stt */
1064 case 0x2c: /* stl */
1065 case 0x2d: /* stq */
1066 case 0x2e: /* stl_c */
1067 case 0x2f: /* stq_c */
1071 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1072 is_write
, &uc
->uc_sigmask
, puc
);
1074 #elif defined(__sparc__)
1076 int cpu_signal_handler(int host_signum
, void *pinfo
,
1079 siginfo_t
*info
= pinfo
;
1082 #if !defined(__arch64__) || defined(CONFIG_SOLARIS)
1083 uint32_t *regs
= (uint32_t *)(info
+ 1);
1084 void *sigmask
= (regs
+ 20);
1085 /* XXX: is there a standard glibc define ? */
1086 unsigned long pc
= regs
[1];
1089 struct sigcontext
*sc
= puc
;
1090 unsigned long pc
= sc
->sigc_regs
.tpc
;
1091 void *sigmask
= (void *)sc
->sigc_mask
;
1092 #elif defined(__OpenBSD__)
1093 struct sigcontext
*uc
= puc
;
1094 unsigned long pc
= uc
->sc_pc
;
1095 void *sigmask
= (void *)(long)uc
->sc_mask
;
1099 /* XXX: need kernel patch to get write flag faster */
1101 insn
= *(uint32_t *)pc
;
1102 if ((insn
>> 30) == 3) {
1103 switch ((insn
>> 19) & 0x3f) {
1104 case 0x05: /* stb */
1105 case 0x15: /* stba */
1106 case 0x06: /* sth */
1107 case 0x16: /* stha */
1109 case 0x14: /* sta */
1110 case 0x07: /* std */
1111 case 0x17: /* stda */
1112 case 0x0e: /* stx */
1113 case 0x1e: /* stxa */
1114 case 0x24: /* stf */
1115 case 0x34: /* stfa */
1116 case 0x27: /* stdf */
1117 case 0x37: /* stdfa */
1118 case 0x26: /* stqf */
1119 case 0x36: /* stqfa */
1120 case 0x25: /* stfsr */
1121 case 0x3c: /* casa */
1122 case 0x3e: /* casxa */
1127 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1128 is_write
, sigmask
, NULL
);
1131 #elif defined(__arm__)
1133 int cpu_signal_handler(int host_signum
, void *pinfo
,
1136 siginfo_t
*info
= pinfo
;
1137 struct ucontext
*uc
= puc
;
1141 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1142 pc
= uc
->uc_mcontext
.gregs
[R15
];
1144 pc
= uc
->uc_mcontext
.arm_pc
;
1146 /* XXX: compute is_write */
1148 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1150 &uc
->uc_sigmask
, puc
);
1153 #elif defined(__mc68000)
1155 int cpu_signal_handler(int host_signum
, void *pinfo
,
1158 siginfo_t
*info
= pinfo
;
1159 struct ucontext
*uc
= puc
;
1163 pc
= uc
->uc_mcontext
.gregs
[16];
1164 /* XXX: compute is_write */
1166 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1168 &uc
->uc_sigmask
, puc
);
1171 #elif defined(__ia64)
1174 /* This ought to be in <bits/siginfo.h>... */
1175 # define __ISR_VALID 1
1178 int cpu_signal_handler(int host_signum
, void *pinfo
, void *puc
)
1180 siginfo_t
*info
= pinfo
;
1181 struct ucontext
*uc
= puc
;
1185 ip
= uc
->uc_mcontext
.sc_ip
;
1186 switch (host_signum
) {
1192 if (info
->si_code
&& (info
->si_segvflags
& __ISR_VALID
)) {
1193 /* ISR.W (write-access) is bit 33: */
1194 is_write
= (info
->si_isr
>> 33) & 1;
1201 return handle_cpu_signal(ip
, (unsigned long)info
->si_addr
,
1203 (sigset_t
*)&uc
->uc_sigmask
, puc
);
1206 #elif defined(__s390__)
1208 int cpu_signal_handler(int host_signum
, void *pinfo
,
1211 siginfo_t
*info
= pinfo
;
1212 struct ucontext
*uc
= puc
;
1217 pc
= uc
->uc_mcontext
.psw
.addr
;
1219 /* ??? On linux, the non-rt signal handler has 4 (!) arguments instead
1220 of the normal 2 arguments. The 3rd argument contains the "int_code"
1221 from the hardware which does in fact contain the is_write value.
1222 The rt signal handler, as far as I can tell, does not give this value
1223 at all. Not that we could get to it from here even if it were. */
1224 /* ??? This is not even close to complete, since it ignores all
1225 of the read-modify-write instructions. */
1226 pinsn
= (uint16_t *)pc
;
1227 switch (pinsn
[0] >> 8) {
1229 case 0x42: /* STC */
1230 case 0x40: /* STH */
1233 case 0xc4: /* RIL format insns */
1234 switch (pinsn
[0] & 0xf) {
1235 case 0xf: /* STRL */
1236 case 0xb: /* STGRL */
1237 case 0x7: /* STHRL */
1241 case 0xe3: /* RXY format insns */
1242 switch (pinsn
[2] & 0xff) {
1243 case 0x50: /* STY */
1244 case 0x24: /* STG */
1245 case 0x72: /* STCY */
1246 case 0x70: /* STHY */
1247 case 0x8e: /* STPQ */
1248 case 0x3f: /* STRVH */
1249 case 0x3e: /* STRV */
1250 case 0x2f: /* STRVG */
1255 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1256 is_write
, &uc
->uc_sigmask
, puc
);
1259 #elif defined(__mips__)
1261 int cpu_signal_handler(int host_signum
, void *pinfo
,
1264 siginfo_t
*info
= pinfo
;
1265 struct ucontext
*uc
= puc
;
1266 greg_t pc
= uc
->uc_mcontext
.pc
;
1269 /* XXX: compute is_write */
1271 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1272 is_write
, &uc
->uc_sigmask
, puc
);
1275 #elif defined(__hppa__)
1277 int cpu_signal_handler(int host_signum
, void *pinfo
,
1280 struct siginfo
*info
= pinfo
;
1281 struct ucontext
*uc
= puc
;
1282 unsigned long pc
= uc
->uc_mcontext
.sc_iaoq
[0];
1283 uint32_t insn
= *(uint32_t *)pc
;
1286 /* XXX: need kernel patch to get write flag faster. */
1287 switch (insn
>> 26) {
1288 case 0x1a: /* STW */
1289 case 0x19: /* STH */
1290 case 0x18: /* STB */
1291 case 0x1b: /* STWM */
1295 case 0x09: /* CSTWX, FSTWX, FSTWS */
1296 case 0x0b: /* CSTDX, FSTDX, FSTDS */
1297 /* Distinguish from coprocessor load ... */
1298 is_write
= (insn
>> 9) & 1;
1302 switch ((insn
>> 6) & 15) {
1303 case 0xa: /* STWS */
1304 case 0x9: /* STHS */
1305 case 0x8: /* STBS */
1306 case 0xe: /* STWAS */
1307 case 0xc: /* STBYS */
1313 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1314 is_write
, &uc
->uc_sigmask
, puc
);
1319 #error host CPU specific signal handler needed
1323 #endif /* !defined(CONFIG_SOFTMMU) */