]> git.proxmox.com Git - qemu.git/blob - cpu-exec.c
cpu-exec: prepare for user and softmmu split
[qemu.git] / cpu-exec.c
1 /*
2 * i386 emulator main execution loop
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "config.h"
20 #include "exec.h"
21 #include "disas.h"
22 #include "tcg.h"
23 #include "kvm.h"
24 #include "qemu-barrier.h"
25
26 #if !defined(CONFIG_SOFTMMU)
27 #undef EAX
28 #undef ECX
29 #undef EDX
30 #undef EBX
31 #undef ESP
32 #undef EBP
33 #undef ESI
34 #undef EDI
35 #undef EIP
36 #include <signal.h>
37 #ifdef __linux__
38 #include <sys/ucontext.h>
39 #endif
40 #endif
41
42 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
43 // Work around ugly bugs in glibc that mangle global register contents
44 #undef env
45 #define env cpu_single_env
46 #endif
47
48 int tb_invalidated_flag;
49
50 //#define CONFIG_DEBUG_EXEC
51 //#define DEBUG_SIGNAL
52
53 int qemu_cpu_has_work(CPUState *env)
54 {
55 return cpu_has_work(env);
56 }
57
58 void cpu_loop_exit(void)
59 {
60 env->current_tb = NULL;
61 longjmp(env->jmp_env, 1);
62 }
63
64 /* exit the current TB from a signal handler. The host registers are
65 restored in a state compatible with the CPU emulator
66 */
67 #if defined(CONFIG_SOFTMMU)
68 void cpu_resume_from_signal(CPUState *env1, void *puc)
69 {
70 env = env1;
71
72 /* XXX: restore cpu registers saved in host registers */
73
74 env->exception_index = -1;
75 longjmp(env->jmp_env, 1);
76 }
77
78 #else
79
80 void cpu_resume_from_signal(CPUState *env1, void *puc)
81 {
82 #ifdef __linux__
83 struct ucontext *uc = puc;
84 #elif defined(__OpenBSD__)
85 struct sigcontext *uc = puc;
86 #endif
87
88 env = env1;
89
90 /* XXX: restore cpu registers saved in host registers */
91
92 if (puc) {
93 /* XXX: use siglongjmp ? */
94 #ifdef __linux__
95 #ifdef __ia64
96 sigprocmask(SIG_SETMASK, (sigset_t *)&uc->uc_sigmask, NULL);
97 #else
98 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
99 #endif
100 #elif defined(__OpenBSD__)
101 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
102 #endif
103 }
104 env->exception_index = -1;
105 longjmp(env->jmp_env, 1);
106 }
107 #endif
108
109 /* Execute the code without caching the generated code. An interpreter
110 could be used if available. */
111 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
112 {
113 unsigned long next_tb;
114 TranslationBlock *tb;
115
116 /* Should never happen.
117 We only end up here when an existing TB is too long. */
118 if (max_cycles > CF_COUNT_MASK)
119 max_cycles = CF_COUNT_MASK;
120
121 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
122 max_cycles);
123 env->current_tb = tb;
124 /* execute the generated code */
125 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
126 env->current_tb = NULL;
127
128 if ((next_tb & 3) == 2) {
129 /* Restore PC. This may happen if async event occurs before
130 the TB starts executing. */
131 cpu_pc_from_tb(env, tb);
132 }
133 tb_phys_invalidate(tb, -1);
134 tb_free(tb);
135 }
136
137 static TranslationBlock *tb_find_slow(target_ulong pc,
138 target_ulong cs_base,
139 uint64_t flags)
140 {
141 TranslationBlock *tb, **ptb1;
142 unsigned int h;
143 tb_page_addr_t phys_pc, phys_page1, phys_page2;
144 target_ulong virt_page2;
145
146 tb_invalidated_flag = 0;
147
148 /* find translated block using physical mappings */
149 phys_pc = get_page_addr_code(env, pc);
150 phys_page1 = phys_pc & TARGET_PAGE_MASK;
151 phys_page2 = -1;
152 h = tb_phys_hash_func(phys_pc);
153 ptb1 = &tb_phys_hash[h];
154 for(;;) {
155 tb = *ptb1;
156 if (!tb)
157 goto not_found;
158 if (tb->pc == pc &&
159 tb->page_addr[0] == phys_page1 &&
160 tb->cs_base == cs_base &&
161 tb->flags == flags) {
162 /* check next page if needed */
163 if (tb->page_addr[1] != -1) {
164 virt_page2 = (pc & TARGET_PAGE_MASK) +
165 TARGET_PAGE_SIZE;
166 phys_page2 = get_page_addr_code(env, virt_page2);
167 if (tb->page_addr[1] == phys_page2)
168 goto found;
169 } else {
170 goto found;
171 }
172 }
173 ptb1 = &tb->phys_hash_next;
174 }
175 not_found:
176 /* if no translated code available, then translate it now */
177 tb = tb_gen_code(env, pc, cs_base, flags, 0);
178
179 found:
180 /* Move the last found TB to the head of the list */
181 if (likely(*ptb1)) {
182 *ptb1 = tb->phys_hash_next;
183 tb->phys_hash_next = tb_phys_hash[h];
184 tb_phys_hash[h] = tb;
185 }
186 /* we add the TB in the virtual pc hash table */
187 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
188 return tb;
189 }
190
191 static inline TranslationBlock *tb_find_fast(void)
192 {
193 TranslationBlock *tb;
194 target_ulong cs_base, pc;
195 int flags;
196
197 /* we record a subset of the CPU state. It will
198 always be the same before a given translated block
199 is executed. */
200 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
201 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
202 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
203 tb->flags != flags)) {
204 tb = tb_find_slow(pc, cs_base, flags);
205 }
206 return tb;
207 }
208
209 static CPUDebugExcpHandler *debug_excp_handler;
210
211 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
212 {
213 CPUDebugExcpHandler *old_handler = debug_excp_handler;
214
215 debug_excp_handler = handler;
216 return old_handler;
217 }
218
219 static void cpu_handle_debug_exception(CPUState *env)
220 {
221 CPUWatchpoint *wp;
222
223 if (!env->watchpoint_hit) {
224 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
225 wp->flags &= ~BP_WATCHPOINT_HIT;
226 }
227 }
228 if (debug_excp_handler) {
229 debug_excp_handler(env);
230 }
231 }
232
233 /* main execution loop */
234
235 volatile sig_atomic_t exit_request;
236
237 int cpu_exec(CPUState *env1)
238 {
239 volatile host_reg_t saved_env_reg;
240 int ret, interrupt_request;
241 TranslationBlock *tb;
242 uint8_t *tc_ptr;
243 unsigned long next_tb;
244
245 if (env1->halted) {
246 if (!cpu_has_work(env1)) {
247 return EXCP_HALTED;
248 }
249
250 env1->halted = 0;
251 }
252
253 cpu_single_env = env1;
254
255 /* the access to env below is actually saving the global register's
256 value, so that files not including target-xyz/exec.h are free to
257 use it. */
258 QEMU_BUILD_BUG_ON (sizeof (saved_env_reg) != sizeof (env));
259 saved_env_reg = (host_reg_t) env;
260 barrier();
261 env = env1;
262
263 if (unlikely(exit_request)) {
264 env->exit_request = 1;
265 }
266
267 #if defined(TARGET_I386)
268 /* put eflags in CPU temporary format */
269 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
270 DF = 1 - (2 * ((env->eflags >> 10) & 1));
271 CC_OP = CC_OP_EFLAGS;
272 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
273 #elif defined(TARGET_SPARC)
274 #elif defined(TARGET_M68K)
275 env->cc_op = CC_OP_FLAGS;
276 env->cc_dest = env->sr & 0xf;
277 env->cc_x = (env->sr >> 4) & 1;
278 #elif defined(TARGET_ALPHA)
279 #elif defined(TARGET_ARM)
280 #elif defined(TARGET_UNICORE32)
281 #elif defined(TARGET_PPC)
282 #elif defined(TARGET_LM32)
283 #elif defined(TARGET_MICROBLAZE)
284 #elif defined(TARGET_MIPS)
285 #elif defined(TARGET_SH4)
286 #elif defined(TARGET_CRIS)
287 #elif defined(TARGET_S390X)
288 /* XXXXX */
289 #else
290 #error unsupported target CPU
291 #endif
292 env->exception_index = -1;
293
294 /* prepare setjmp context for exception handling */
295 for(;;) {
296 if (setjmp(env->jmp_env) == 0) {
297 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
298 #undef env
299 env = cpu_single_env;
300 #define env cpu_single_env
301 #endif
302 /* if an exception is pending, we execute it here */
303 if (env->exception_index >= 0) {
304 if (env->exception_index >= EXCP_INTERRUPT) {
305 /* exit request from the cpu execution loop */
306 ret = env->exception_index;
307 if (ret == EXCP_DEBUG) {
308 cpu_handle_debug_exception(env);
309 }
310 break;
311 } else {
312 #if defined(CONFIG_USER_ONLY)
313 /* if user mode only, we simulate a fake exception
314 which will be handled outside the cpu execution
315 loop */
316 #if defined(TARGET_I386)
317 do_interrupt_user(env->exception_index,
318 env->exception_is_int,
319 env->error_code,
320 env->exception_next_eip);
321 /* successfully delivered */
322 env->old_exception = -1;
323 #endif
324 ret = env->exception_index;
325 break;
326 #else
327 #if defined(TARGET_I386)
328 /* simulate a real cpu exception. On i386, it can
329 trigger new exceptions, but we do not handle
330 double or triple faults yet. */
331 do_interrupt(env->exception_index,
332 env->exception_is_int,
333 env->error_code,
334 env->exception_next_eip, 0);
335 /* successfully delivered */
336 env->old_exception = -1;
337 #elif defined(TARGET_PPC)
338 do_interrupt(env);
339 #elif defined(TARGET_LM32)
340 do_interrupt(env);
341 #elif defined(TARGET_MICROBLAZE)
342 do_interrupt(env);
343 #elif defined(TARGET_MIPS)
344 do_interrupt(env);
345 #elif defined(TARGET_SPARC)
346 do_interrupt(env);
347 #elif defined(TARGET_ARM)
348 do_interrupt(env);
349 #elif defined(TARGET_UNICORE32)
350 do_interrupt(env);
351 #elif defined(TARGET_SH4)
352 do_interrupt(env);
353 #elif defined(TARGET_ALPHA)
354 do_interrupt(env);
355 #elif defined(TARGET_CRIS)
356 do_interrupt(env);
357 #elif defined(TARGET_M68K)
358 do_interrupt(0);
359 #elif defined(TARGET_S390X)
360 do_interrupt(env);
361 #endif
362 env->exception_index = -1;
363 #endif
364 }
365 }
366
367 next_tb = 0; /* force lookup of first TB */
368 for(;;) {
369 interrupt_request = env->interrupt_request;
370 if (unlikely(interrupt_request)) {
371 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
372 /* Mask out external interrupts for this step. */
373 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
374 }
375 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
376 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
377 env->exception_index = EXCP_DEBUG;
378 cpu_loop_exit();
379 }
380 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
381 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
382 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
383 if (interrupt_request & CPU_INTERRUPT_HALT) {
384 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
385 env->halted = 1;
386 env->exception_index = EXCP_HLT;
387 cpu_loop_exit();
388 }
389 #endif
390 #if defined(TARGET_I386)
391 if (interrupt_request & CPU_INTERRUPT_INIT) {
392 svm_check_intercept(SVM_EXIT_INIT);
393 do_cpu_init(env);
394 env->exception_index = EXCP_HALTED;
395 cpu_loop_exit();
396 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
397 do_cpu_sipi(env);
398 } else if (env->hflags2 & HF2_GIF_MASK) {
399 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
400 !(env->hflags & HF_SMM_MASK)) {
401 svm_check_intercept(SVM_EXIT_SMI);
402 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
403 do_smm_enter();
404 next_tb = 0;
405 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
406 !(env->hflags2 & HF2_NMI_MASK)) {
407 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
408 env->hflags2 |= HF2_NMI_MASK;
409 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
410 next_tb = 0;
411 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
412 env->interrupt_request &= ~CPU_INTERRUPT_MCE;
413 do_interrupt(EXCP12_MCHK, 0, 0, 0, 0);
414 next_tb = 0;
415 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
416 (((env->hflags2 & HF2_VINTR_MASK) &&
417 (env->hflags2 & HF2_HIF_MASK)) ||
418 (!(env->hflags2 & HF2_VINTR_MASK) &&
419 (env->eflags & IF_MASK &&
420 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
421 int intno;
422 svm_check_intercept(SVM_EXIT_INTR);
423 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
424 intno = cpu_get_pic_interrupt(env);
425 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
426 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
427 #undef env
428 env = cpu_single_env;
429 #define env cpu_single_env
430 #endif
431 do_interrupt(intno, 0, 0, 0, 1);
432 /* ensure that no TB jump will be modified as
433 the program flow was changed */
434 next_tb = 0;
435 #if !defined(CONFIG_USER_ONLY)
436 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
437 (env->eflags & IF_MASK) &&
438 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
439 int intno;
440 /* FIXME: this should respect TPR */
441 svm_check_intercept(SVM_EXIT_VINTR);
442 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
443 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
444 do_interrupt(intno, 0, 0, 0, 1);
445 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
446 next_tb = 0;
447 #endif
448 }
449 }
450 #elif defined(TARGET_PPC)
451 #if 0
452 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
453 cpu_reset(env);
454 }
455 #endif
456 if (interrupt_request & CPU_INTERRUPT_HARD) {
457 ppc_hw_interrupt(env);
458 if (env->pending_interrupts == 0)
459 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
460 next_tb = 0;
461 }
462 #elif defined(TARGET_LM32)
463 if ((interrupt_request & CPU_INTERRUPT_HARD)
464 && (env->ie & IE_IE)) {
465 env->exception_index = EXCP_IRQ;
466 do_interrupt(env);
467 next_tb = 0;
468 }
469 #elif defined(TARGET_MICROBLAZE)
470 if ((interrupt_request & CPU_INTERRUPT_HARD)
471 && (env->sregs[SR_MSR] & MSR_IE)
472 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
473 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
474 env->exception_index = EXCP_IRQ;
475 do_interrupt(env);
476 next_tb = 0;
477 }
478 #elif defined(TARGET_MIPS)
479 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
480 cpu_mips_hw_interrupts_pending(env)) {
481 /* Raise it */
482 env->exception_index = EXCP_EXT_INTERRUPT;
483 env->error_code = 0;
484 do_interrupt(env);
485 next_tb = 0;
486 }
487 #elif defined(TARGET_SPARC)
488 if (interrupt_request & CPU_INTERRUPT_HARD) {
489 if (cpu_interrupts_enabled(env) &&
490 env->interrupt_index > 0) {
491 int pil = env->interrupt_index & 0xf;
492 int type = env->interrupt_index & 0xf0;
493
494 if (((type == TT_EXTINT) &&
495 cpu_pil_allowed(env, pil)) ||
496 type != TT_EXTINT) {
497 env->exception_index = env->interrupt_index;
498 do_interrupt(env);
499 next_tb = 0;
500 }
501 }
502 }
503 #elif defined(TARGET_ARM)
504 if (interrupt_request & CPU_INTERRUPT_FIQ
505 && !(env->uncached_cpsr & CPSR_F)) {
506 env->exception_index = EXCP_FIQ;
507 do_interrupt(env);
508 next_tb = 0;
509 }
510 /* ARMv7-M interrupt return works by loading a magic value
511 into the PC. On real hardware the load causes the
512 return to occur. The qemu implementation performs the
513 jump normally, then does the exception return when the
514 CPU tries to execute code at the magic address.
515 This will cause the magic PC value to be pushed to
516 the stack if an interrupt occurred at the wrong time.
517 We avoid this by disabling interrupts when
518 pc contains a magic address. */
519 if (interrupt_request & CPU_INTERRUPT_HARD
520 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
521 || !(env->uncached_cpsr & CPSR_I))) {
522 env->exception_index = EXCP_IRQ;
523 do_interrupt(env);
524 next_tb = 0;
525 }
526 #elif defined(TARGET_UNICORE32)
527 if (interrupt_request & CPU_INTERRUPT_HARD
528 && !(env->uncached_asr & ASR_I)) {
529 do_interrupt(env);
530 next_tb = 0;
531 }
532 #elif defined(TARGET_SH4)
533 if (interrupt_request & CPU_INTERRUPT_HARD) {
534 do_interrupt(env);
535 next_tb = 0;
536 }
537 #elif defined(TARGET_ALPHA)
538 if (interrupt_request & CPU_INTERRUPT_HARD) {
539 do_interrupt(env);
540 next_tb = 0;
541 }
542 #elif defined(TARGET_CRIS)
543 if (interrupt_request & CPU_INTERRUPT_HARD
544 && (env->pregs[PR_CCS] & I_FLAG)
545 && !env->locked_irq) {
546 env->exception_index = EXCP_IRQ;
547 do_interrupt(env);
548 next_tb = 0;
549 }
550 if (interrupt_request & CPU_INTERRUPT_NMI
551 && (env->pregs[PR_CCS] & M_FLAG)) {
552 env->exception_index = EXCP_NMI;
553 do_interrupt(env);
554 next_tb = 0;
555 }
556 #elif defined(TARGET_M68K)
557 if (interrupt_request & CPU_INTERRUPT_HARD
558 && ((env->sr & SR_I) >> SR_I_SHIFT)
559 < env->pending_level) {
560 /* Real hardware gets the interrupt vector via an
561 IACK cycle at this point. Current emulated
562 hardware doesn't rely on this, so we
563 provide/save the vector when the interrupt is
564 first signalled. */
565 env->exception_index = env->pending_vector;
566 do_interrupt(1);
567 next_tb = 0;
568 }
569 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
570 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
571 (env->psw.mask & PSW_MASK_EXT)) {
572 do_interrupt(env);
573 next_tb = 0;
574 }
575 #endif
576 /* Don't use the cached interrupt_request value,
577 do_interrupt may have updated the EXITTB flag. */
578 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
579 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
580 /* ensure that no TB jump will be modified as
581 the program flow was changed */
582 next_tb = 0;
583 }
584 }
585 if (unlikely(env->exit_request)) {
586 env->exit_request = 0;
587 env->exception_index = EXCP_INTERRUPT;
588 cpu_loop_exit();
589 }
590 #if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
591 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
592 /* restore flags in standard format */
593 #if defined(TARGET_I386)
594 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
595 log_cpu_state(env, X86_DUMP_CCOP);
596 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
597 #elif defined(TARGET_M68K)
598 cpu_m68k_flush_flags(env, env->cc_op);
599 env->cc_op = CC_OP_FLAGS;
600 env->sr = (env->sr & 0xffe0)
601 | env->cc_dest | (env->cc_x << 4);
602 log_cpu_state(env, 0);
603 #else
604 log_cpu_state(env, 0);
605 #endif
606 }
607 #endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
608 spin_lock(&tb_lock);
609 tb = tb_find_fast();
610 /* Note: we do it here to avoid a gcc bug on Mac OS X when
611 doing it in tb_find_slow */
612 if (tb_invalidated_flag) {
613 /* as some TB could have been invalidated because
614 of memory exceptions while generating the code, we
615 must recompute the hash index here */
616 next_tb = 0;
617 tb_invalidated_flag = 0;
618 }
619 #ifdef CONFIG_DEBUG_EXEC
620 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
621 (long)tb->tc_ptr, tb->pc,
622 lookup_symbol(tb->pc));
623 #endif
624 /* see if we can patch the calling TB. When the TB
625 spans two pages, we cannot safely do a direct
626 jump. */
627 if (next_tb != 0 && tb->page_addr[1] == -1) {
628 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
629 }
630 spin_unlock(&tb_lock);
631
632 /* cpu_interrupt might be called while translating the
633 TB, but before it is linked into a potentially
634 infinite loop and becomes env->current_tb. Avoid
635 starting execution if there is a pending interrupt. */
636 env->current_tb = tb;
637 barrier();
638 if (likely(!env->exit_request)) {
639 tc_ptr = tb->tc_ptr;
640 /* execute the generated code */
641 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
642 #undef env
643 env = cpu_single_env;
644 #define env cpu_single_env
645 #endif
646 next_tb = tcg_qemu_tb_exec(tc_ptr);
647 if ((next_tb & 3) == 2) {
648 /* Instruction counter expired. */
649 int insns_left;
650 tb = (TranslationBlock *)(long)(next_tb & ~3);
651 /* Restore PC. */
652 cpu_pc_from_tb(env, tb);
653 insns_left = env->icount_decr.u32;
654 if (env->icount_extra && insns_left >= 0) {
655 /* Refill decrementer and continue execution. */
656 env->icount_extra += insns_left;
657 if (env->icount_extra > 0xffff) {
658 insns_left = 0xffff;
659 } else {
660 insns_left = env->icount_extra;
661 }
662 env->icount_extra -= insns_left;
663 env->icount_decr.u16.low = insns_left;
664 } else {
665 if (insns_left > 0) {
666 /* Execute remaining instructions. */
667 cpu_exec_nocache(insns_left, tb);
668 }
669 env->exception_index = EXCP_INTERRUPT;
670 next_tb = 0;
671 cpu_loop_exit();
672 }
673 }
674 }
675 env->current_tb = NULL;
676 /* reset soft MMU for next block (it can currently
677 only be set by a memory fault) */
678 } /* for(;;) */
679 }
680 } /* for(;;) */
681
682
683 #if defined(TARGET_I386)
684 /* restore flags in standard format */
685 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
686 #elif defined(TARGET_ARM)
687 /* XXX: Save/restore host fpu exception state?. */
688 #elif defined(TARGET_UNICORE32)
689 #elif defined(TARGET_SPARC)
690 #elif defined(TARGET_PPC)
691 #elif defined(TARGET_LM32)
692 #elif defined(TARGET_M68K)
693 cpu_m68k_flush_flags(env, env->cc_op);
694 env->cc_op = CC_OP_FLAGS;
695 env->sr = (env->sr & 0xffe0)
696 | env->cc_dest | (env->cc_x << 4);
697 #elif defined(TARGET_MICROBLAZE)
698 #elif defined(TARGET_MIPS)
699 #elif defined(TARGET_SH4)
700 #elif defined(TARGET_ALPHA)
701 #elif defined(TARGET_CRIS)
702 #elif defined(TARGET_S390X)
703 /* XXXXX */
704 #else
705 #error unsupported target CPU
706 #endif
707
708 /* restore global registers */
709 barrier();
710 env = (void *) saved_env_reg;
711
712 /* fail safe : never use cpu_single_env outside cpu_exec() */
713 cpu_single_env = NULL;
714 return ret;
715 }
716
717 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
718
719 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
720 {
721 CPUX86State *saved_env;
722
723 saved_env = env;
724 env = s;
725 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
726 selector &= 0xffff;
727 cpu_x86_load_seg_cache(env, seg_reg, selector,
728 (selector << 4), 0xffff, 0);
729 } else {
730 helper_load_seg(seg_reg, selector);
731 }
732 env = saved_env;
733 }
734
735 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
736 {
737 CPUX86State *saved_env;
738
739 saved_env = env;
740 env = s;
741
742 helper_fsave(ptr, data32);
743
744 env = saved_env;
745 }
746
747 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
748 {
749 CPUX86State *saved_env;
750
751 saved_env = env;
752 env = s;
753
754 helper_frstor(ptr, data32);
755
756 env = saved_env;
757 }
758
759 #endif /* TARGET_I386 */
760
761 #if !defined(CONFIG_SOFTMMU)
762
763 #if defined(TARGET_I386)
764 #define EXCEPTION_ACTION \
765 raise_exception_err(env->exception_index, env->error_code)
766 #else
767 #define EXCEPTION_ACTION \
768 cpu_loop_exit()
769 #endif
770
771 /* 'pc' is the host PC at which the exception was raised. 'address' is
772 the effective address of the memory exception. 'is_write' is 1 if a
773 write caused the exception and otherwise 0'. 'old_set' is the
774 signal set which should be restored */
775 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
776 int is_write, sigset_t *old_set,
777 void *puc)
778 {
779 TranslationBlock *tb;
780 int ret;
781
782 if (cpu_single_env) {
783 env = cpu_single_env; /* XXX: find a correct solution for multithread */
784 }
785 #if defined(DEBUG_SIGNAL)
786 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
787 pc, address, is_write, *(unsigned long *)old_set);
788 #endif
789 /* XXX: locking issue */
790 if (is_write && page_unprotect(h2g(address), pc, puc)) {
791 return 1;
792 }
793
794 /* see if it is an MMU fault */
795 ret = cpu_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
796 if (ret < 0) {
797 return 0; /* not an MMU fault */
798 }
799 if (ret == 0) {
800 return 1; /* the MMU fault was handled without causing real CPU fault */
801 }
802 /* now we have a real cpu fault */
803 tb = tb_find_pc(pc);
804 if (tb) {
805 /* the PC is inside the translated code. It means that we have
806 a virtual CPU fault */
807 cpu_restore_state(tb, env, pc);
808 }
809
810 /* we restore the process signal mask as the sigreturn should
811 do it (XXX: use sigsetjmp) */
812 sigprocmask(SIG_SETMASK, old_set, NULL);
813 EXCEPTION_ACTION;
814
815 /* never comes here */
816 return 1;
817 }
818
819 #if defined(__i386__)
820
821 #if defined(__APPLE__)
822 #include <sys/ucontext.h>
823
824 #define EIP_sig(context) (*((unsigned long *)&(context)->uc_mcontext->ss.eip))
825 #define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
826 #define ERROR_sig(context) ((context)->uc_mcontext->es.err)
827 #define MASK_sig(context) ((context)->uc_sigmask)
828 #elif defined(__NetBSD__)
829 #include <ucontext.h>
830
831 #define EIP_sig(context) ((context)->uc_mcontext.__gregs[_REG_EIP])
832 #define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
833 #define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
834 #define MASK_sig(context) ((context)->uc_sigmask)
835 #elif defined(__FreeBSD__) || defined(__DragonFly__)
836 #include <ucontext.h>
837
838 #define EIP_sig(context) (*((unsigned long *)&(context)->uc_mcontext.mc_eip))
839 #define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
840 #define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
841 #define MASK_sig(context) ((context)->uc_sigmask)
842 #elif defined(__OpenBSD__)
843 #define EIP_sig(context) ((context)->sc_eip)
844 #define TRAP_sig(context) ((context)->sc_trapno)
845 #define ERROR_sig(context) ((context)->sc_err)
846 #define MASK_sig(context) ((context)->sc_mask)
847 #else
848 #define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
849 #define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
850 #define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
851 #define MASK_sig(context) ((context)->uc_sigmask)
852 #endif
853
854 int cpu_signal_handler(int host_signum, void *pinfo,
855 void *puc)
856 {
857 siginfo_t *info = pinfo;
858 #if defined(__NetBSD__) || defined(__FreeBSD__) || defined(__DragonFly__)
859 ucontext_t *uc = puc;
860 #elif defined(__OpenBSD__)
861 struct sigcontext *uc = puc;
862 #else
863 struct ucontext *uc = puc;
864 #endif
865 unsigned long pc;
866 int trapno;
867
868 #ifndef REG_EIP
869 /* for glibc 2.1 */
870 #define REG_EIP EIP
871 #define REG_ERR ERR
872 #define REG_TRAPNO TRAPNO
873 #endif
874 pc = EIP_sig(uc);
875 trapno = TRAP_sig(uc);
876 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
877 trapno == 0xe ?
878 (ERROR_sig(uc) >> 1) & 1 : 0,
879 &MASK_sig(uc), puc);
880 }
881
882 #elif defined(__x86_64__)
883
884 #ifdef __NetBSD__
885 #define PC_sig(context) _UC_MACHINE_PC(context)
886 #define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
887 #define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
888 #define MASK_sig(context) ((context)->uc_sigmask)
889 #elif defined(__OpenBSD__)
890 #define PC_sig(context) ((context)->sc_rip)
891 #define TRAP_sig(context) ((context)->sc_trapno)
892 #define ERROR_sig(context) ((context)->sc_err)
893 #define MASK_sig(context) ((context)->sc_mask)
894 #elif defined(__FreeBSD__) || defined(__DragonFly__)
895 #include <ucontext.h>
896
897 #define PC_sig(context) (*((unsigned long *)&(context)->uc_mcontext.mc_rip))
898 #define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
899 #define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
900 #define MASK_sig(context) ((context)->uc_sigmask)
901 #else
902 #define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
903 #define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
904 #define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
905 #define MASK_sig(context) ((context)->uc_sigmask)
906 #endif
907
908 int cpu_signal_handler(int host_signum, void *pinfo,
909 void *puc)
910 {
911 siginfo_t *info = pinfo;
912 unsigned long pc;
913 #if defined(__NetBSD__) || defined(__FreeBSD__) || defined(__DragonFly__)
914 ucontext_t *uc = puc;
915 #elif defined(__OpenBSD__)
916 struct sigcontext *uc = puc;
917 #else
918 struct ucontext *uc = puc;
919 #endif
920
921 pc = PC_sig(uc);
922 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
923 TRAP_sig(uc) == 0xe ?
924 (ERROR_sig(uc) >> 1) & 1 : 0,
925 &MASK_sig(uc), puc);
926 }
927
928 #elif defined(_ARCH_PPC)
929
930 /***********************************************************************
931 * signal context platform-specific definitions
932 * From Wine
933 */
934 #ifdef linux
935 /* All Registers access - only for local access */
936 #define REG_sig(reg_name, context) \
937 ((context)->uc_mcontext.regs->reg_name)
938 /* Gpr Registers access */
939 #define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
940 /* Program counter */
941 #define IAR_sig(context) REG_sig(nip, context)
942 /* Machine State Register (Supervisor) */
943 #define MSR_sig(context) REG_sig(msr, context)
944 /* Count register */
945 #define CTR_sig(context) REG_sig(ctr, context)
946 /* User's integer exception register */
947 #define XER_sig(context) REG_sig(xer, context)
948 /* Link register */
949 #define LR_sig(context) REG_sig(link, context)
950 /* Condition register */
951 #define CR_sig(context) REG_sig(ccr, context)
952
953 /* Float Registers access */
954 #define FLOAT_sig(reg_num, context) \
955 (((double *)((char *)((context)->uc_mcontext.regs + 48 * 4)))[reg_num])
956 #define FPSCR_sig(context) \
957 (*(int *)((char *)((context)->uc_mcontext.regs + (48 + 32 * 2) * 4)))
958 /* Exception Registers access */
959 #define DAR_sig(context) REG_sig(dar, context)
960 #define DSISR_sig(context) REG_sig(dsisr, context)
961 #define TRAP_sig(context) REG_sig(trap, context)
962 #endif /* linux */
963
964 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
965 #include <ucontext.h>
966 #define IAR_sig(context) ((context)->uc_mcontext.mc_srr0)
967 #define MSR_sig(context) ((context)->uc_mcontext.mc_srr1)
968 #define CTR_sig(context) ((context)->uc_mcontext.mc_ctr)
969 #define XER_sig(context) ((context)->uc_mcontext.mc_xer)
970 #define LR_sig(context) ((context)->uc_mcontext.mc_lr)
971 #define CR_sig(context) ((context)->uc_mcontext.mc_cr)
972 /* Exception Registers access */
973 #define DAR_sig(context) ((context)->uc_mcontext.mc_dar)
974 #define DSISR_sig(context) ((context)->uc_mcontext.mc_dsisr)
975 #define TRAP_sig(context) ((context)->uc_mcontext.mc_exc)
976 #endif /* __FreeBSD__|| __FreeBSD_kernel__ */
977
978 #ifdef __APPLE__
979 #include <sys/ucontext.h>
980 typedef struct ucontext SIGCONTEXT;
981 /* All Registers access - only for local access */
982 #define REG_sig(reg_name, context) \
983 ((context)->uc_mcontext->ss.reg_name)
984 #define FLOATREG_sig(reg_name, context) \
985 ((context)->uc_mcontext->fs.reg_name)
986 #define EXCEPREG_sig(reg_name, context) \
987 ((context)->uc_mcontext->es.reg_name)
988 #define VECREG_sig(reg_name, context) \
989 ((context)->uc_mcontext->vs.reg_name)
990 /* Gpr Registers access */
991 #define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
992 /* Program counter */
993 #define IAR_sig(context) REG_sig(srr0, context)
994 /* Machine State Register (Supervisor) */
995 #define MSR_sig(context) REG_sig(srr1, context)
996 #define CTR_sig(context) REG_sig(ctr, context)
997 /* Link register */
998 #define XER_sig(context) REG_sig(xer, context)
999 /* User's integer exception register */
1000 #define LR_sig(context) REG_sig(lr, context)
1001 /* Condition register */
1002 #define CR_sig(context) REG_sig(cr, context)
1003 /* Float Registers access */
1004 #define FLOAT_sig(reg_num, context) \
1005 FLOATREG_sig(fpregs[reg_num], context)
1006 #define FPSCR_sig(context) \
1007 ((double)FLOATREG_sig(fpscr, context))
1008 /* Exception Registers access */
1009 /* Fault registers for coredump */
1010 #define DAR_sig(context) EXCEPREG_sig(dar, context)
1011 #define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1012 /* number of powerpc exception taken */
1013 #define TRAP_sig(context) EXCEPREG_sig(exception, context)
1014 #endif /* __APPLE__ */
1015
1016 int cpu_signal_handler(int host_signum, void *pinfo,
1017 void *puc)
1018 {
1019 siginfo_t *info = pinfo;
1020 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
1021 ucontext_t *uc = puc;
1022 #else
1023 struct ucontext *uc = puc;
1024 #endif
1025 unsigned long pc;
1026 int is_write;
1027
1028 pc = IAR_sig(uc);
1029 is_write = 0;
1030 #if 0
1031 /* ppc 4xx case */
1032 if (DSISR_sig(uc) & 0x00800000) {
1033 is_write = 1;
1034 }
1035 #else
1036 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000)) {
1037 is_write = 1;
1038 }
1039 #endif
1040 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1041 is_write, &uc->uc_sigmask, puc);
1042 }
1043
1044 #elif defined(__alpha__)
1045
1046 int cpu_signal_handler(int host_signum, void *pinfo,
1047 void *puc)
1048 {
1049 siginfo_t *info = pinfo;
1050 struct ucontext *uc = puc;
1051 uint32_t *pc = uc->uc_mcontext.sc_pc;
1052 uint32_t insn = *pc;
1053 int is_write = 0;
1054
1055 /* XXX: need kernel patch to get write flag faster */
1056 switch (insn >> 26) {
1057 case 0x0d: /* stw */
1058 case 0x0e: /* stb */
1059 case 0x0f: /* stq_u */
1060 case 0x24: /* stf */
1061 case 0x25: /* stg */
1062 case 0x26: /* sts */
1063 case 0x27: /* stt */
1064 case 0x2c: /* stl */
1065 case 0x2d: /* stq */
1066 case 0x2e: /* stl_c */
1067 case 0x2f: /* stq_c */
1068 is_write = 1;
1069 }
1070
1071 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1072 is_write, &uc->uc_sigmask, puc);
1073 }
1074 #elif defined(__sparc__)
1075
1076 int cpu_signal_handler(int host_signum, void *pinfo,
1077 void *puc)
1078 {
1079 siginfo_t *info = pinfo;
1080 int is_write;
1081 uint32_t insn;
1082 #if !defined(__arch64__) || defined(CONFIG_SOLARIS)
1083 uint32_t *regs = (uint32_t *)(info + 1);
1084 void *sigmask = (regs + 20);
1085 /* XXX: is there a standard glibc define ? */
1086 unsigned long pc = regs[1];
1087 #else
1088 #ifdef __linux__
1089 struct sigcontext *sc = puc;
1090 unsigned long pc = sc->sigc_regs.tpc;
1091 void *sigmask = (void *)sc->sigc_mask;
1092 #elif defined(__OpenBSD__)
1093 struct sigcontext *uc = puc;
1094 unsigned long pc = uc->sc_pc;
1095 void *sigmask = (void *)(long)uc->sc_mask;
1096 #endif
1097 #endif
1098
1099 /* XXX: need kernel patch to get write flag faster */
1100 is_write = 0;
1101 insn = *(uint32_t *)pc;
1102 if ((insn >> 30) == 3) {
1103 switch ((insn >> 19) & 0x3f) {
1104 case 0x05: /* stb */
1105 case 0x15: /* stba */
1106 case 0x06: /* sth */
1107 case 0x16: /* stha */
1108 case 0x04: /* st */
1109 case 0x14: /* sta */
1110 case 0x07: /* std */
1111 case 0x17: /* stda */
1112 case 0x0e: /* stx */
1113 case 0x1e: /* stxa */
1114 case 0x24: /* stf */
1115 case 0x34: /* stfa */
1116 case 0x27: /* stdf */
1117 case 0x37: /* stdfa */
1118 case 0x26: /* stqf */
1119 case 0x36: /* stqfa */
1120 case 0x25: /* stfsr */
1121 case 0x3c: /* casa */
1122 case 0x3e: /* casxa */
1123 is_write = 1;
1124 break;
1125 }
1126 }
1127 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1128 is_write, sigmask, NULL);
1129 }
1130
1131 #elif defined(__arm__)
1132
1133 int cpu_signal_handler(int host_signum, void *pinfo,
1134 void *puc)
1135 {
1136 siginfo_t *info = pinfo;
1137 struct ucontext *uc = puc;
1138 unsigned long pc;
1139 int is_write;
1140
1141 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1142 pc = uc->uc_mcontext.gregs[R15];
1143 #else
1144 pc = uc->uc_mcontext.arm_pc;
1145 #endif
1146 /* XXX: compute is_write */
1147 is_write = 0;
1148 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1149 is_write,
1150 &uc->uc_sigmask, puc);
1151 }
1152
1153 #elif defined(__mc68000)
1154
1155 int cpu_signal_handler(int host_signum, void *pinfo,
1156 void *puc)
1157 {
1158 siginfo_t *info = pinfo;
1159 struct ucontext *uc = puc;
1160 unsigned long pc;
1161 int is_write;
1162
1163 pc = uc->uc_mcontext.gregs[16];
1164 /* XXX: compute is_write */
1165 is_write = 0;
1166 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1167 is_write,
1168 &uc->uc_sigmask, puc);
1169 }
1170
1171 #elif defined(__ia64)
1172
1173 #ifndef __ISR_VALID
1174 /* This ought to be in <bits/siginfo.h>... */
1175 # define __ISR_VALID 1
1176 #endif
1177
1178 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1179 {
1180 siginfo_t *info = pinfo;
1181 struct ucontext *uc = puc;
1182 unsigned long ip;
1183 int is_write = 0;
1184
1185 ip = uc->uc_mcontext.sc_ip;
1186 switch (host_signum) {
1187 case SIGILL:
1188 case SIGFPE:
1189 case SIGSEGV:
1190 case SIGBUS:
1191 case SIGTRAP:
1192 if (info->si_code && (info->si_segvflags & __ISR_VALID)) {
1193 /* ISR.W (write-access) is bit 33: */
1194 is_write = (info->si_isr >> 33) & 1;
1195 }
1196 break;
1197
1198 default:
1199 break;
1200 }
1201 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1202 is_write,
1203 (sigset_t *)&uc->uc_sigmask, puc);
1204 }
1205
1206 #elif defined(__s390__)
1207
1208 int cpu_signal_handler(int host_signum, void *pinfo,
1209 void *puc)
1210 {
1211 siginfo_t *info = pinfo;
1212 struct ucontext *uc = puc;
1213 unsigned long pc;
1214 uint16_t *pinsn;
1215 int is_write = 0;
1216
1217 pc = uc->uc_mcontext.psw.addr;
1218
1219 /* ??? On linux, the non-rt signal handler has 4 (!) arguments instead
1220 of the normal 2 arguments. The 3rd argument contains the "int_code"
1221 from the hardware which does in fact contain the is_write value.
1222 The rt signal handler, as far as I can tell, does not give this value
1223 at all. Not that we could get to it from here even if it were. */
1224 /* ??? This is not even close to complete, since it ignores all
1225 of the read-modify-write instructions. */
1226 pinsn = (uint16_t *)pc;
1227 switch (pinsn[0] >> 8) {
1228 case 0x50: /* ST */
1229 case 0x42: /* STC */
1230 case 0x40: /* STH */
1231 is_write = 1;
1232 break;
1233 case 0xc4: /* RIL format insns */
1234 switch (pinsn[0] & 0xf) {
1235 case 0xf: /* STRL */
1236 case 0xb: /* STGRL */
1237 case 0x7: /* STHRL */
1238 is_write = 1;
1239 }
1240 break;
1241 case 0xe3: /* RXY format insns */
1242 switch (pinsn[2] & 0xff) {
1243 case 0x50: /* STY */
1244 case 0x24: /* STG */
1245 case 0x72: /* STCY */
1246 case 0x70: /* STHY */
1247 case 0x8e: /* STPQ */
1248 case 0x3f: /* STRVH */
1249 case 0x3e: /* STRV */
1250 case 0x2f: /* STRVG */
1251 is_write = 1;
1252 }
1253 break;
1254 }
1255 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1256 is_write, &uc->uc_sigmask, puc);
1257 }
1258
1259 #elif defined(__mips__)
1260
1261 int cpu_signal_handler(int host_signum, void *pinfo,
1262 void *puc)
1263 {
1264 siginfo_t *info = pinfo;
1265 struct ucontext *uc = puc;
1266 greg_t pc = uc->uc_mcontext.pc;
1267 int is_write;
1268
1269 /* XXX: compute is_write */
1270 is_write = 0;
1271 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1272 is_write, &uc->uc_sigmask, puc);
1273 }
1274
1275 #elif defined(__hppa__)
1276
1277 int cpu_signal_handler(int host_signum, void *pinfo,
1278 void *puc)
1279 {
1280 struct siginfo *info = pinfo;
1281 struct ucontext *uc = puc;
1282 unsigned long pc = uc->uc_mcontext.sc_iaoq[0];
1283 uint32_t insn = *(uint32_t *)pc;
1284 int is_write = 0;
1285
1286 /* XXX: need kernel patch to get write flag faster. */
1287 switch (insn >> 26) {
1288 case 0x1a: /* STW */
1289 case 0x19: /* STH */
1290 case 0x18: /* STB */
1291 case 0x1b: /* STWM */
1292 is_write = 1;
1293 break;
1294
1295 case 0x09: /* CSTWX, FSTWX, FSTWS */
1296 case 0x0b: /* CSTDX, FSTDX, FSTDS */
1297 /* Distinguish from coprocessor load ... */
1298 is_write = (insn >> 9) & 1;
1299 break;
1300
1301 case 0x03:
1302 switch ((insn >> 6) & 15) {
1303 case 0xa: /* STWS */
1304 case 0x9: /* STHS */
1305 case 0x8: /* STBS */
1306 case 0xe: /* STWAS */
1307 case 0xc: /* STBYS */
1308 is_write = 1;
1309 }
1310 break;
1311 }
1312
1313 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1314 is_write, &uc->uc_sigmask, puc);
1315 }
1316
1317 #else
1318
1319 #error host CPU specific signal handler needed
1320
1321 #endif
1322
1323 #endif /* !defined(CONFIG_SOFTMMU) */