]> git.proxmox.com Git - qemu.git/blob - cpu-exec.c
Move debug exception handling out of cpu_exec
[qemu.git] / cpu-exec.c
1 /*
2 * i386 emulator main execution loop
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "config.h"
20 #include "exec.h"
21 #include "disas.h"
22 #include "tcg.h"
23 #include "kvm.h"
24 #include "qemu-barrier.h"
25
26 #if !defined(CONFIG_SOFTMMU)
27 #undef EAX
28 #undef ECX
29 #undef EDX
30 #undef EBX
31 #undef ESP
32 #undef EBP
33 #undef ESI
34 #undef EDI
35 #undef EIP
36 #include <signal.h>
37 #ifdef __linux__
38 #include <sys/ucontext.h>
39 #endif
40 #endif
41
42 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
43 // Work around ugly bugs in glibc that mangle global register contents
44 #undef env
45 #define env cpu_single_env
46 #endif
47
48 int tb_invalidated_flag;
49
50 //#define CONFIG_DEBUG_EXEC
51 //#define DEBUG_SIGNAL
52
53 int qemu_cpu_has_work(CPUState *env)
54 {
55 return cpu_has_work(env);
56 }
57
58 void cpu_loop_exit(void)
59 {
60 env->current_tb = NULL;
61 longjmp(env->jmp_env, 1);
62 }
63
64 /* exit the current TB from a signal handler. The host registers are
65 restored in a state compatible with the CPU emulator
66 */
67 void cpu_resume_from_signal(CPUState *env1, void *puc)
68 {
69 #if !defined(CONFIG_SOFTMMU)
70 #ifdef __linux__
71 struct ucontext *uc = puc;
72 #elif defined(__OpenBSD__)
73 struct sigcontext *uc = puc;
74 #endif
75 #endif
76
77 env = env1;
78
79 /* XXX: restore cpu registers saved in host registers */
80
81 #if !defined(CONFIG_SOFTMMU)
82 if (puc) {
83 /* XXX: use siglongjmp ? */
84 #ifdef __linux__
85 #ifdef __ia64
86 sigprocmask(SIG_SETMASK, (sigset_t *)&uc->uc_sigmask, NULL);
87 #else
88 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
89 #endif
90 #elif defined(__OpenBSD__)
91 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
92 #endif
93 }
94 #endif
95 env->exception_index = -1;
96 longjmp(env->jmp_env, 1);
97 }
98
99 /* Execute the code without caching the generated code. An interpreter
100 could be used if available. */
101 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
102 {
103 unsigned long next_tb;
104 TranslationBlock *tb;
105
106 /* Should never happen.
107 We only end up here when an existing TB is too long. */
108 if (max_cycles > CF_COUNT_MASK)
109 max_cycles = CF_COUNT_MASK;
110
111 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
112 max_cycles);
113 env->current_tb = tb;
114 /* execute the generated code */
115 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
116 env->current_tb = NULL;
117
118 if ((next_tb & 3) == 2) {
119 /* Restore PC. This may happen if async event occurs before
120 the TB starts executing. */
121 cpu_pc_from_tb(env, tb);
122 }
123 tb_phys_invalidate(tb, -1);
124 tb_free(tb);
125 }
126
127 static TranslationBlock *tb_find_slow(target_ulong pc,
128 target_ulong cs_base,
129 uint64_t flags)
130 {
131 TranslationBlock *tb, **ptb1;
132 unsigned int h;
133 tb_page_addr_t phys_pc, phys_page1, phys_page2;
134 target_ulong virt_page2;
135
136 tb_invalidated_flag = 0;
137
138 /* find translated block using physical mappings */
139 phys_pc = get_page_addr_code(env, pc);
140 phys_page1 = phys_pc & TARGET_PAGE_MASK;
141 phys_page2 = -1;
142 h = tb_phys_hash_func(phys_pc);
143 ptb1 = &tb_phys_hash[h];
144 for(;;) {
145 tb = *ptb1;
146 if (!tb)
147 goto not_found;
148 if (tb->pc == pc &&
149 tb->page_addr[0] == phys_page1 &&
150 tb->cs_base == cs_base &&
151 tb->flags == flags) {
152 /* check next page if needed */
153 if (tb->page_addr[1] != -1) {
154 virt_page2 = (pc & TARGET_PAGE_MASK) +
155 TARGET_PAGE_SIZE;
156 phys_page2 = get_page_addr_code(env, virt_page2);
157 if (tb->page_addr[1] == phys_page2)
158 goto found;
159 } else {
160 goto found;
161 }
162 }
163 ptb1 = &tb->phys_hash_next;
164 }
165 not_found:
166 /* if no translated code available, then translate it now */
167 tb = tb_gen_code(env, pc, cs_base, flags, 0);
168
169 found:
170 /* Move the last found TB to the head of the list */
171 if (likely(*ptb1)) {
172 *ptb1 = tb->phys_hash_next;
173 tb->phys_hash_next = tb_phys_hash[h];
174 tb_phys_hash[h] = tb;
175 }
176 /* we add the TB in the virtual pc hash table */
177 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
178 return tb;
179 }
180
181 static inline TranslationBlock *tb_find_fast(void)
182 {
183 TranslationBlock *tb;
184 target_ulong cs_base, pc;
185 int flags;
186
187 /* we record a subset of the CPU state. It will
188 always be the same before a given translated block
189 is executed. */
190 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
191 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
192 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
193 tb->flags != flags)) {
194 tb = tb_find_slow(pc, cs_base, flags);
195 }
196 return tb;
197 }
198
199 /* main execution loop */
200
201 volatile sig_atomic_t exit_request;
202
203 int cpu_exec(CPUState *env1)
204 {
205 volatile host_reg_t saved_env_reg;
206 int ret, interrupt_request;
207 TranslationBlock *tb;
208 uint8_t *tc_ptr;
209 unsigned long next_tb;
210
211 if (cpu_halted(env1) == EXCP_HALTED)
212 return EXCP_HALTED;
213
214 cpu_single_env = env1;
215
216 /* the access to env below is actually saving the global register's
217 value, so that files not including target-xyz/exec.h are free to
218 use it. */
219 QEMU_BUILD_BUG_ON (sizeof (saved_env_reg) != sizeof (env));
220 saved_env_reg = (host_reg_t) env;
221 barrier();
222 env = env1;
223
224 if (unlikely(exit_request)) {
225 env->exit_request = 1;
226 }
227
228 #if defined(TARGET_I386)
229 if (!kvm_enabled()) {
230 /* put eflags in CPU temporary format */
231 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
232 DF = 1 - (2 * ((env->eflags >> 10) & 1));
233 CC_OP = CC_OP_EFLAGS;
234 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
235 }
236 #elif defined(TARGET_SPARC)
237 #elif defined(TARGET_M68K)
238 env->cc_op = CC_OP_FLAGS;
239 env->cc_dest = env->sr & 0xf;
240 env->cc_x = (env->sr >> 4) & 1;
241 #elif defined(TARGET_ALPHA)
242 #elif defined(TARGET_ARM)
243 #elif defined(TARGET_PPC)
244 #elif defined(TARGET_MICROBLAZE)
245 #elif defined(TARGET_MIPS)
246 #elif defined(TARGET_SH4)
247 #elif defined(TARGET_CRIS)
248 #elif defined(TARGET_S390X)
249 /* XXXXX */
250 #else
251 #error unsupported target CPU
252 #endif
253 env->exception_index = -1;
254
255 /* prepare setjmp context for exception handling */
256 for(;;) {
257 if (setjmp(env->jmp_env) == 0) {
258 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
259 #undef env
260 env = cpu_single_env;
261 #define env cpu_single_env
262 #endif
263 /* if an exception is pending, we execute it here */
264 if (env->exception_index >= 0) {
265 if (env->exception_index >= EXCP_INTERRUPT) {
266 /* exit request from the cpu execution loop */
267 ret = env->exception_index;
268 break;
269 } else {
270 #if defined(CONFIG_USER_ONLY)
271 /* if user mode only, we simulate a fake exception
272 which will be handled outside the cpu execution
273 loop */
274 #if defined(TARGET_I386)
275 do_interrupt_user(env->exception_index,
276 env->exception_is_int,
277 env->error_code,
278 env->exception_next_eip);
279 /* successfully delivered */
280 env->old_exception = -1;
281 #endif
282 ret = env->exception_index;
283 break;
284 #else
285 #if defined(TARGET_I386)
286 /* simulate a real cpu exception. On i386, it can
287 trigger new exceptions, but we do not handle
288 double or triple faults yet. */
289 do_interrupt(env->exception_index,
290 env->exception_is_int,
291 env->error_code,
292 env->exception_next_eip, 0);
293 /* successfully delivered */
294 env->old_exception = -1;
295 #elif defined(TARGET_PPC)
296 do_interrupt(env);
297 #elif defined(TARGET_MICROBLAZE)
298 do_interrupt(env);
299 #elif defined(TARGET_MIPS)
300 do_interrupt(env);
301 #elif defined(TARGET_SPARC)
302 do_interrupt(env);
303 #elif defined(TARGET_ARM)
304 do_interrupt(env);
305 #elif defined(TARGET_SH4)
306 do_interrupt(env);
307 #elif defined(TARGET_ALPHA)
308 do_interrupt(env);
309 #elif defined(TARGET_CRIS)
310 do_interrupt(env);
311 #elif defined(TARGET_M68K)
312 do_interrupt(0);
313 #endif
314 env->exception_index = -1;
315 #endif
316 }
317 }
318
319 if (kvm_enabled()) {
320 kvm_cpu_exec(env);
321 longjmp(env->jmp_env, 1);
322 }
323
324 next_tb = 0; /* force lookup of first TB */
325 for(;;) {
326 interrupt_request = env->interrupt_request;
327 if (unlikely(interrupt_request)) {
328 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
329 /* Mask out external interrupts for this step. */
330 interrupt_request &= ~(CPU_INTERRUPT_HARD |
331 CPU_INTERRUPT_FIQ |
332 CPU_INTERRUPT_SMI |
333 CPU_INTERRUPT_NMI);
334 }
335 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
336 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
337 env->exception_index = EXCP_DEBUG;
338 cpu_loop_exit();
339 }
340 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
341 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
342 defined(TARGET_MICROBLAZE)
343 if (interrupt_request & CPU_INTERRUPT_HALT) {
344 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
345 env->halted = 1;
346 env->exception_index = EXCP_HLT;
347 cpu_loop_exit();
348 }
349 #endif
350 #if defined(TARGET_I386)
351 if (interrupt_request & CPU_INTERRUPT_INIT) {
352 svm_check_intercept(SVM_EXIT_INIT);
353 do_cpu_init(env);
354 env->exception_index = EXCP_HALTED;
355 cpu_loop_exit();
356 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
357 do_cpu_sipi(env);
358 } else if (env->hflags2 & HF2_GIF_MASK) {
359 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
360 !(env->hflags & HF_SMM_MASK)) {
361 svm_check_intercept(SVM_EXIT_SMI);
362 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
363 do_smm_enter();
364 next_tb = 0;
365 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
366 !(env->hflags2 & HF2_NMI_MASK)) {
367 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
368 env->hflags2 |= HF2_NMI_MASK;
369 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
370 next_tb = 0;
371 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
372 env->interrupt_request &= ~CPU_INTERRUPT_MCE;
373 do_interrupt(EXCP12_MCHK, 0, 0, 0, 0);
374 next_tb = 0;
375 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
376 (((env->hflags2 & HF2_VINTR_MASK) &&
377 (env->hflags2 & HF2_HIF_MASK)) ||
378 (!(env->hflags2 & HF2_VINTR_MASK) &&
379 (env->eflags & IF_MASK &&
380 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
381 int intno;
382 svm_check_intercept(SVM_EXIT_INTR);
383 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
384 intno = cpu_get_pic_interrupt(env);
385 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
386 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
387 #undef env
388 env = cpu_single_env;
389 #define env cpu_single_env
390 #endif
391 do_interrupt(intno, 0, 0, 0, 1);
392 /* ensure that no TB jump will be modified as
393 the program flow was changed */
394 next_tb = 0;
395 #if !defined(CONFIG_USER_ONLY)
396 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
397 (env->eflags & IF_MASK) &&
398 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
399 int intno;
400 /* FIXME: this should respect TPR */
401 svm_check_intercept(SVM_EXIT_VINTR);
402 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
403 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
404 do_interrupt(intno, 0, 0, 0, 1);
405 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
406 next_tb = 0;
407 #endif
408 }
409 }
410 #elif defined(TARGET_PPC)
411 #if 0
412 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
413 cpu_reset(env);
414 }
415 #endif
416 if (interrupt_request & CPU_INTERRUPT_HARD) {
417 ppc_hw_interrupt(env);
418 if (env->pending_interrupts == 0)
419 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
420 next_tb = 0;
421 }
422 #elif defined(TARGET_MICROBLAZE)
423 if ((interrupt_request & CPU_INTERRUPT_HARD)
424 && (env->sregs[SR_MSR] & MSR_IE)
425 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
426 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
427 env->exception_index = EXCP_IRQ;
428 do_interrupt(env);
429 next_tb = 0;
430 }
431 #elif defined(TARGET_MIPS)
432 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
433 cpu_mips_hw_interrupts_pending(env)) {
434 /* Raise it */
435 env->exception_index = EXCP_EXT_INTERRUPT;
436 env->error_code = 0;
437 do_interrupt(env);
438 next_tb = 0;
439 }
440 #elif defined(TARGET_SPARC)
441 if (interrupt_request & CPU_INTERRUPT_HARD) {
442 if (cpu_interrupts_enabled(env) &&
443 env->interrupt_index > 0) {
444 int pil = env->interrupt_index & 0xf;
445 int type = env->interrupt_index & 0xf0;
446
447 if (((type == TT_EXTINT) &&
448 cpu_pil_allowed(env, pil)) ||
449 type != TT_EXTINT) {
450 env->exception_index = env->interrupt_index;
451 do_interrupt(env);
452 next_tb = 0;
453 }
454 }
455 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
456 //do_interrupt(0, 0, 0, 0, 0);
457 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
458 }
459 #elif defined(TARGET_ARM)
460 if (interrupt_request & CPU_INTERRUPT_FIQ
461 && !(env->uncached_cpsr & CPSR_F)) {
462 env->exception_index = EXCP_FIQ;
463 do_interrupt(env);
464 next_tb = 0;
465 }
466 /* ARMv7-M interrupt return works by loading a magic value
467 into the PC. On real hardware the load causes the
468 return to occur. The qemu implementation performs the
469 jump normally, then does the exception return when the
470 CPU tries to execute code at the magic address.
471 This will cause the magic PC value to be pushed to
472 the stack if an interrupt occured at the wrong time.
473 We avoid this by disabling interrupts when
474 pc contains a magic address. */
475 if (interrupt_request & CPU_INTERRUPT_HARD
476 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
477 || !(env->uncached_cpsr & CPSR_I))) {
478 env->exception_index = EXCP_IRQ;
479 do_interrupt(env);
480 next_tb = 0;
481 }
482 #elif defined(TARGET_SH4)
483 if (interrupt_request & CPU_INTERRUPT_HARD) {
484 do_interrupt(env);
485 next_tb = 0;
486 }
487 #elif defined(TARGET_ALPHA)
488 if (interrupt_request & CPU_INTERRUPT_HARD) {
489 do_interrupt(env);
490 next_tb = 0;
491 }
492 #elif defined(TARGET_CRIS)
493 if (interrupt_request & CPU_INTERRUPT_HARD
494 && (env->pregs[PR_CCS] & I_FLAG)
495 && !env->locked_irq) {
496 env->exception_index = EXCP_IRQ;
497 do_interrupt(env);
498 next_tb = 0;
499 }
500 if (interrupt_request & CPU_INTERRUPT_NMI
501 && (env->pregs[PR_CCS] & M_FLAG)) {
502 env->exception_index = EXCP_NMI;
503 do_interrupt(env);
504 next_tb = 0;
505 }
506 #elif defined(TARGET_M68K)
507 if (interrupt_request & CPU_INTERRUPT_HARD
508 && ((env->sr & SR_I) >> SR_I_SHIFT)
509 < env->pending_level) {
510 /* Real hardware gets the interrupt vector via an
511 IACK cycle at this point. Current emulated
512 hardware doesn't rely on this, so we
513 provide/save the vector when the interrupt is
514 first signalled. */
515 env->exception_index = env->pending_vector;
516 do_interrupt(1);
517 next_tb = 0;
518 }
519 #endif
520 /* Don't use the cached interupt_request value,
521 do_interrupt may have updated the EXITTB flag. */
522 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
523 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
524 /* ensure that no TB jump will be modified as
525 the program flow was changed */
526 next_tb = 0;
527 }
528 }
529 if (unlikely(env->exit_request)) {
530 env->exit_request = 0;
531 env->exception_index = EXCP_INTERRUPT;
532 cpu_loop_exit();
533 }
534 #if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
535 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
536 /* restore flags in standard format */
537 #if defined(TARGET_I386)
538 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
539 log_cpu_state(env, X86_DUMP_CCOP);
540 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
541 #elif defined(TARGET_M68K)
542 cpu_m68k_flush_flags(env, env->cc_op);
543 env->cc_op = CC_OP_FLAGS;
544 env->sr = (env->sr & 0xffe0)
545 | env->cc_dest | (env->cc_x << 4);
546 log_cpu_state(env, 0);
547 #else
548 log_cpu_state(env, 0);
549 #endif
550 }
551 #endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
552 spin_lock(&tb_lock);
553 tb = tb_find_fast();
554 /* Note: we do it here to avoid a gcc bug on Mac OS X when
555 doing it in tb_find_slow */
556 if (tb_invalidated_flag) {
557 /* as some TB could have been invalidated because
558 of memory exceptions while generating the code, we
559 must recompute the hash index here */
560 next_tb = 0;
561 tb_invalidated_flag = 0;
562 }
563 #ifdef CONFIG_DEBUG_EXEC
564 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
565 (long)tb->tc_ptr, tb->pc,
566 lookup_symbol(tb->pc));
567 #endif
568 /* see if we can patch the calling TB. When the TB
569 spans two pages, we cannot safely do a direct
570 jump. */
571 if (next_tb != 0 && tb->page_addr[1] == -1) {
572 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
573 }
574 spin_unlock(&tb_lock);
575
576 /* cpu_interrupt might be called while translating the
577 TB, but before it is linked into a potentially
578 infinite loop and becomes env->current_tb. Avoid
579 starting execution if there is a pending interrupt. */
580 env->current_tb = tb;
581 barrier();
582 if (likely(!env->exit_request)) {
583 tc_ptr = tb->tc_ptr;
584 /* execute the generated code */
585 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
586 #undef env
587 env = cpu_single_env;
588 #define env cpu_single_env
589 #endif
590 next_tb = tcg_qemu_tb_exec(tc_ptr);
591 if ((next_tb & 3) == 2) {
592 /* Instruction counter expired. */
593 int insns_left;
594 tb = (TranslationBlock *)(long)(next_tb & ~3);
595 /* Restore PC. */
596 cpu_pc_from_tb(env, tb);
597 insns_left = env->icount_decr.u32;
598 if (env->icount_extra && insns_left >= 0) {
599 /* Refill decrementer and continue execution. */
600 env->icount_extra += insns_left;
601 if (env->icount_extra > 0xffff) {
602 insns_left = 0xffff;
603 } else {
604 insns_left = env->icount_extra;
605 }
606 env->icount_extra -= insns_left;
607 env->icount_decr.u16.low = insns_left;
608 } else {
609 if (insns_left > 0) {
610 /* Execute remaining instructions. */
611 cpu_exec_nocache(insns_left, tb);
612 }
613 env->exception_index = EXCP_INTERRUPT;
614 next_tb = 0;
615 cpu_loop_exit();
616 }
617 }
618 }
619 env->current_tb = NULL;
620 /* reset soft MMU for next block (it can currently
621 only be set by a memory fault) */
622 } /* for(;;) */
623 }
624 } /* for(;;) */
625
626
627 #if defined(TARGET_I386)
628 /* restore flags in standard format */
629 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
630 #elif defined(TARGET_ARM)
631 /* XXX: Save/restore host fpu exception state?. */
632 #elif defined(TARGET_SPARC)
633 #elif defined(TARGET_PPC)
634 #elif defined(TARGET_M68K)
635 cpu_m68k_flush_flags(env, env->cc_op);
636 env->cc_op = CC_OP_FLAGS;
637 env->sr = (env->sr & 0xffe0)
638 | env->cc_dest | (env->cc_x << 4);
639 #elif defined(TARGET_MICROBLAZE)
640 #elif defined(TARGET_MIPS)
641 #elif defined(TARGET_SH4)
642 #elif defined(TARGET_ALPHA)
643 #elif defined(TARGET_CRIS)
644 #elif defined(TARGET_S390X)
645 /* XXXXX */
646 #else
647 #error unsupported target CPU
648 #endif
649
650 /* restore global registers */
651 barrier();
652 env = (void *) saved_env_reg;
653
654 /* fail safe : never use cpu_single_env outside cpu_exec() */
655 cpu_single_env = NULL;
656 return ret;
657 }
658
659 /* must only be called from the generated code as an exception can be
660 generated */
661 void tb_invalidate_page_range(target_ulong start, target_ulong end)
662 {
663 /* XXX: cannot enable it yet because it yields to MMU exception
664 where NIP != read address on PowerPC */
665 #if 0
666 target_ulong phys_addr;
667 phys_addr = get_phys_addr_code(env, start);
668 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
669 #endif
670 }
671
672 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
673
674 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
675 {
676 CPUX86State *saved_env;
677
678 saved_env = env;
679 env = s;
680 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
681 selector &= 0xffff;
682 cpu_x86_load_seg_cache(env, seg_reg, selector,
683 (selector << 4), 0xffff, 0);
684 } else {
685 helper_load_seg(seg_reg, selector);
686 }
687 env = saved_env;
688 }
689
690 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
691 {
692 CPUX86State *saved_env;
693
694 saved_env = env;
695 env = s;
696
697 helper_fsave(ptr, data32);
698
699 env = saved_env;
700 }
701
702 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
703 {
704 CPUX86State *saved_env;
705
706 saved_env = env;
707 env = s;
708
709 helper_frstor(ptr, data32);
710
711 env = saved_env;
712 }
713
714 #endif /* TARGET_I386 */
715
716 #if !defined(CONFIG_SOFTMMU)
717
718 #if defined(TARGET_I386)
719 #define EXCEPTION_ACTION raise_exception_err(env->exception_index, env->error_code)
720 #else
721 #define EXCEPTION_ACTION cpu_loop_exit()
722 #endif
723
724 /* 'pc' is the host PC at which the exception was raised. 'address' is
725 the effective address of the memory exception. 'is_write' is 1 if a
726 write caused the exception and otherwise 0'. 'old_set' is the
727 signal set which should be restored */
728 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
729 int is_write, sigset_t *old_set,
730 void *puc)
731 {
732 TranslationBlock *tb;
733 int ret;
734
735 if (cpu_single_env)
736 env = cpu_single_env; /* XXX: find a correct solution for multithread */
737 #if defined(DEBUG_SIGNAL)
738 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
739 pc, address, is_write, *(unsigned long *)old_set);
740 #endif
741 /* XXX: locking issue */
742 if (is_write && page_unprotect(h2g(address), pc, puc)) {
743 return 1;
744 }
745
746 /* see if it is an MMU fault */
747 ret = cpu_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
748 if (ret < 0)
749 return 0; /* not an MMU fault */
750 if (ret == 0)
751 return 1; /* the MMU fault was handled without causing real CPU fault */
752 /* now we have a real cpu fault */
753 tb = tb_find_pc(pc);
754 if (tb) {
755 /* the PC is inside the translated code. It means that we have
756 a virtual CPU fault */
757 cpu_restore_state(tb, env, pc, puc);
758 }
759
760 /* we restore the process signal mask as the sigreturn should
761 do it (XXX: use sigsetjmp) */
762 sigprocmask(SIG_SETMASK, old_set, NULL);
763 EXCEPTION_ACTION;
764
765 /* never comes here */
766 return 1;
767 }
768
769 #if defined(__i386__)
770
771 #if defined(__APPLE__)
772 # include <sys/ucontext.h>
773
774 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
775 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
776 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
777 # define MASK_sig(context) ((context)->uc_sigmask)
778 #elif defined (__NetBSD__)
779 # include <ucontext.h>
780
781 # define EIP_sig(context) ((context)->uc_mcontext.__gregs[_REG_EIP])
782 # define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
783 # define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
784 # define MASK_sig(context) ((context)->uc_sigmask)
785 #elif defined (__FreeBSD__) || defined(__DragonFly__)
786 # include <ucontext.h>
787
788 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_eip))
789 # define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
790 # define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
791 # define MASK_sig(context) ((context)->uc_sigmask)
792 #elif defined(__OpenBSD__)
793 # define EIP_sig(context) ((context)->sc_eip)
794 # define TRAP_sig(context) ((context)->sc_trapno)
795 # define ERROR_sig(context) ((context)->sc_err)
796 # define MASK_sig(context) ((context)->sc_mask)
797 #else
798 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
799 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
800 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
801 # define MASK_sig(context) ((context)->uc_sigmask)
802 #endif
803
804 int cpu_signal_handler(int host_signum, void *pinfo,
805 void *puc)
806 {
807 siginfo_t *info = pinfo;
808 #if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
809 ucontext_t *uc = puc;
810 #elif defined(__OpenBSD__)
811 struct sigcontext *uc = puc;
812 #else
813 struct ucontext *uc = puc;
814 #endif
815 unsigned long pc;
816 int trapno;
817
818 #ifndef REG_EIP
819 /* for glibc 2.1 */
820 #define REG_EIP EIP
821 #define REG_ERR ERR
822 #define REG_TRAPNO TRAPNO
823 #endif
824 pc = EIP_sig(uc);
825 trapno = TRAP_sig(uc);
826 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
827 trapno == 0xe ?
828 (ERROR_sig(uc) >> 1) & 1 : 0,
829 &MASK_sig(uc), puc);
830 }
831
832 #elif defined(__x86_64__)
833
834 #ifdef __NetBSD__
835 #define PC_sig(context) _UC_MACHINE_PC(context)
836 #define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
837 #define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
838 #define MASK_sig(context) ((context)->uc_sigmask)
839 #elif defined(__OpenBSD__)
840 #define PC_sig(context) ((context)->sc_rip)
841 #define TRAP_sig(context) ((context)->sc_trapno)
842 #define ERROR_sig(context) ((context)->sc_err)
843 #define MASK_sig(context) ((context)->sc_mask)
844 #elif defined (__FreeBSD__) || defined(__DragonFly__)
845 #include <ucontext.h>
846
847 #define PC_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_rip))
848 #define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
849 #define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
850 #define MASK_sig(context) ((context)->uc_sigmask)
851 #else
852 #define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
853 #define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
854 #define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
855 #define MASK_sig(context) ((context)->uc_sigmask)
856 #endif
857
858 int cpu_signal_handler(int host_signum, void *pinfo,
859 void *puc)
860 {
861 siginfo_t *info = pinfo;
862 unsigned long pc;
863 #if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
864 ucontext_t *uc = puc;
865 #elif defined(__OpenBSD__)
866 struct sigcontext *uc = puc;
867 #else
868 struct ucontext *uc = puc;
869 #endif
870
871 pc = PC_sig(uc);
872 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
873 TRAP_sig(uc) == 0xe ?
874 (ERROR_sig(uc) >> 1) & 1 : 0,
875 &MASK_sig(uc), puc);
876 }
877
878 #elif defined(_ARCH_PPC)
879
880 /***********************************************************************
881 * signal context platform-specific definitions
882 * From Wine
883 */
884 #ifdef linux
885 /* All Registers access - only for local access */
886 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
887 /* Gpr Registers access */
888 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
889 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
890 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
891 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
892 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
893 # define LR_sig(context) REG_sig(link, context) /* Link register */
894 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
895 /* Float Registers access */
896 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
897 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
898 /* Exception Registers access */
899 # define DAR_sig(context) REG_sig(dar, context)
900 # define DSISR_sig(context) REG_sig(dsisr, context)
901 # define TRAP_sig(context) REG_sig(trap, context)
902 #endif /* linux */
903
904 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
905 #include <ucontext.h>
906 # define IAR_sig(context) ((context)->uc_mcontext.mc_srr0)
907 # define MSR_sig(context) ((context)->uc_mcontext.mc_srr1)
908 # define CTR_sig(context) ((context)->uc_mcontext.mc_ctr)
909 # define XER_sig(context) ((context)->uc_mcontext.mc_xer)
910 # define LR_sig(context) ((context)->uc_mcontext.mc_lr)
911 # define CR_sig(context) ((context)->uc_mcontext.mc_cr)
912 /* Exception Registers access */
913 # define DAR_sig(context) ((context)->uc_mcontext.mc_dar)
914 # define DSISR_sig(context) ((context)->uc_mcontext.mc_dsisr)
915 # define TRAP_sig(context) ((context)->uc_mcontext.mc_exc)
916 #endif /* __FreeBSD__|| __FreeBSD_kernel__ */
917
918 #ifdef __APPLE__
919 # include <sys/ucontext.h>
920 typedef struct ucontext SIGCONTEXT;
921 /* All Registers access - only for local access */
922 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
923 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
924 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
925 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
926 /* Gpr Registers access */
927 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
928 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
929 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
930 # define CTR_sig(context) REG_sig(ctr, context)
931 # define XER_sig(context) REG_sig(xer, context) /* Link register */
932 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
933 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
934 /* Float Registers access */
935 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
936 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
937 /* Exception Registers access */
938 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
939 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
940 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
941 #endif /* __APPLE__ */
942
943 int cpu_signal_handler(int host_signum, void *pinfo,
944 void *puc)
945 {
946 siginfo_t *info = pinfo;
947 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
948 ucontext_t *uc = puc;
949 #else
950 struct ucontext *uc = puc;
951 #endif
952 unsigned long pc;
953 int is_write;
954
955 pc = IAR_sig(uc);
956 is_write = 0;
957 #if 0
958 /* ppc 4xx case */
959 if (DSISR_sig(uc) & 0x00800000)
960 is_write = 1;
961 #else
962 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
963 is_write = 1;
964 #endif
965 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
966 is_write, &uc->uc_sigmask, puc);
967 }
968
969 #elif defined(__alpha__)
970
971 int cpu_signal_handler(int host_signum, void *pinfo,
972 void *puc)
973 {
974 siginfo_t *info = pinfo;
975 struct ucontext *uc = puc;
976 uint32_t *pc = uc->uc_mcontext.sc_pc;
977 uint32_t insn = *pc;
978 int is_write = 0;
979
980 /* XXX: need kernel patch to get write flag faster */
981 switch (insn >> 26) {
982 case 0x0d: // stw
983 case 0x0e: // stb
984 case 0x0f: // stq_u
985 case 0x24: // stf
986 case 0x25: // stg
987 case 0x26: // sts
988 case 0x27: // stt
989 case 0x2c: // stl
990 case 0x2d: // stq
991 case 0x2e: // stl_c
992 case 0x2f: // stq_c
993 is_write = 1;
994 }
995
996 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
997 is_write, &uc->uc_sigmask, puc);
998 }
999 #elif defined(__sparc__)
1000
1001 int cpu_signal_handler(int host_signum, void *pinfo,
1002 void *puc)
1003 {
1004 siginfo_t *info = pinfo;
1005 int is_write;
1006 uint32_t insn;
1007 #if !defined(__arch64__) || defined(CONFIG_SOLARIS)
1008 uint32_t *regs = (uint32_t *)(info + 1);
1009 void *sigmask = (regs + 20);
1010 /* XXX: is there a standard glibc define ? */
1011 unsigned long pc = regs[1];
1012 #else
1013 #ifdef __linux__
1014 struct sigcontext *sc = puc;
1015 unsigned long pc = sc->sigc_regs.tpc;
1016 void *sigmask = (void *)sc->sigc_mask;
1017 #elif defined(__OpenBSD__)
1018 struct sigcontext *uc = puc;
1019 unsigned long pc = uc->sc_pc;
1020 void *sigmask = (void *)(long)uc->sc_mask;
1021 #endif
1022 #endif
1023
1024 /* XXX: need kernel patch to get write flag faster */
1025 is_write = 0;
1026 insn = *(uint32_t *)pc;
1027 if ((insn >> 30) == 3) {
1028 switch((insn >> 19) & 0x3f) {
1029 case 0x05: // stb
1030 case 0x15: // stba
1031 case 0x06: // sth
1032 case 0x16: // stha
1033 case 0x04: // st
1034 case 0x14: // sta
1035 case 0x07: // std
1036 case 0x17: // stda
1037 case 0x0e: // stx
1038 case 0x1e: // stxa
1039 case 0x24: // stf
1040 case 0x34: // stfa
1041 case 0x27: // stdf
1042 case 0x37: // stdfa
1043 case 0x26: // stqf
1044 case 0x36: // stqfa
1045 case 0x25: // stfsr
1046 case 0x3c: // casa
1047 case 0x3e: // casxa
1048 is_write = 1;
1049 break;
1050 }
1051 }
1052 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1053 is_write, sigmask, NULL);
1054 }
1055
1056 #elif defined(__arm__)
1057
1058 int cpu_signal_handler(int host_signum, void *pinfo,
1059 void *puc)
1060 {
1061 siginfo_t *info = pinfo;
1062 struct ucontext *uc = puc;
1063 unsigned long pc;
1064 int is_write;
1065
1066 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1067 pc = uc->uc_mcontext.gregs[R15];
1068 #else
1069 pc = uc->uc_mcontext.arm_pc;
1070 #endif
1071 /* XXX: compute is_write */
1072 is_write = 0;
1073 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1074 is_write,
1075 &uc->uc_sigmask, puc);
1076 }
1077
1078 #elif defined(__mc68000)
1079
1080 int cpu_signal_handler(int host_signum, void *pinfo,
1081 void *puc)
1082 {
1083 siginfo_t *info = pinfo;
1084 struct ucontext *uc = puc;
1085 unsigned long pc;
1086 int is_write;
1087
1088 pc = uc->uc_mcontext.gregs[16];
1089 /* XXX: compute is_write */
1090 is_write = 0;
1091 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1092 is_write,
1093 &uc->uc_sigmask, puc);
1094 }
1095
1096 #elif defined(__ia64)
1097
1098 #ifndef __ISR_VALID
1099 /* This ought to be in <bits/siginfo.h>... */
1100 # define __ISR_VALID 1
1101 #endif
1102
1103 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1104 {
1105 siginfo_t *info = pinfo;
1106 struct ucontext *uc = puc;
1107 unsigned long ip;
1108 int is_write = 0;
1109
1110 ip = uc->uc_mcontext.sc_ip;
1111 switch (host_signum) {
1112 case SIGILL:
1113 case SIGFPE:
1114 case SIGSEGV:
1115 case SIGBUS:
1116 case SIGTRAP:
1117 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1118 /* ISR.W (write-access) is bit 33: */
1119 is_write = (info->si_isr >> 33) & 1;
1120 break;
1121
1122 default:
1123 break;
1124 }
1125 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1126 is_write,
1127 (sigset_t *)&uc->uc_sigmask, puc);
1128 }
1129
1130 #elif defined(__s390__)
1131
1132 int cpu_signal_handler(int host_signum, void *pinfo,
1133 void *puc)
1134 {
1135 siginfo_t *info = pinfo;
1136 struct ucontext *uc = puc;
1137 unsigned long pc;
1138 uint16_t *pinsn;
1139 int is_write = 0;
1140
1141 pc = uc->uc_mcontext.psw.addr;
1142
1143 /* ??? On linux, the non-rt signal handler has 4 (!) arguments instead
1144 of the normal 2 arguments. The 3rd argument contains the "int_code"
1145 from the hardware which does in fact contain the is_write value.
1146 The rt signal handler, as far as I can tell, does not give this value
1147 at all. Not that we could get to it from here even if it were. */
1148 /* ??? This is not even close to complete, since it ignores all
1149 of the read-modify-write instructions. */
1150 pinsn = (uint16_t *)pc;
1151 switch (pinsn[0] >> 8) {
1152 case 0x50: /* ST */
1153 case 0x42: /* STC */
1154 case 0x40: /* STH */
1155 is_write = 1;
1156 break;
1157 case 0xc4: /* RIL format insns */
1158 switch (pinsn[0] & 0xf) {
1159 case 0xf: /* STRL */
1160 case 0xb: /* STGRL */
1161 case 0x7: /* STHRL */
1162 is_write = 1;
1163 }
1164 break;
1165 case 0xe3: /* RXY format insns */
1166 switch (pinsn[2] & 0xff) {
1167 case 0x50: /* STY */
1168 case 0x24: /* STG */
1169 case 0x72: /* STCY */
1170 case 0x70: /* STHY */
1171 case 0x8e: /* STPQ */
1172 case 0x3f: /* STRVH */
1173 case 0x3e: /* STRV */
1174 case 0x2f: /* STRVG */
1175 is_write = 1;
1176 }
1177 break;
1178 }
1179 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1180 is_write, &uc->uc_sigmask, puc);
1181 }
1182
1183 #elif defined(__mips__)
1184
1185 int cpu_signal_handler(int host_signum, void *pinfo,
1186 void *puc)
1187 {
1188 siginfo_t *info = pinfo;
1189 struct ucontext *uc = puc;
1190 greg_t pc = uc->uc_mcontext.pc;
1191 int is_write;
1192
1193 /* XXX: compute is_write */
1194 is_write = 0;
1195 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1196 is_write, &uc->uc_sigmask, puc);
1197 }
1198
1199 #elif defined(__hppa__)
1200
1201 int cpu_signal_handler(int host_signum, void *pinfo,
1202 void *puc)
1203 {
1204 struct siginfo *info = pinfo;
1205 struct ucontext *uc = puc;
1206 unsigned long pc = uc->uc_mcontext.sc_iaoq[0];
1207 uint32_t insn = *(uint32_t *)pc;
1208 int is_write = 0;
1209
1210 /* XXX: need kernel patch to get write flag faster. */
1211 switch (insn >> 26) {
1212 case 0x1a: /* STW */
1213 case 0x19: /* STH */
1214 case 0x18: /* STB */
1215 case 0x1b: /* STWM */
1216 is_write = 1;
1217 break;
1218
1219 case 0x09: /* CSTWX, FSTWX, FSTWS */
1220 case 0x0b: /* CSTDX, FSTDX, FSTDS */
1221 /* Distinguish from coprocessor load ... */
1222 is_write = (insn >> 9) & 1;
1223 break;
1224
1225 case 0x03:
1226 switch ((insn >> 6) & 15) {
1227 case 0xa: /* STWS */
1228 case 0x9: /* STHS */
1229 case 0x8: /* STBS */
1230 case 0xe: /* STWAS */
1231 case 0xc: /* STBYS */
1232 is_write = 1;
1233 }
1234 break;
1235 }
1236
1237 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1238 is_write, &uc->uc_sigmask, puc);
1239 }
1240
1241 #else
1242
1243 #error host CPU specific signal handler needed
1244
1245 #endif
1246
1247 #endif /* !defined(CONFIG_SOFTMMU) */