]> git.proxmox.com Git - mirror_qemu.git/blob - cpu-exec.c
gdbstub: Return appropriate watch message to gdb (Jan Kiszka)
[mirror_qemu.git] / cpu-exec.c
1 /*
2 * i386 emulator main execution loop
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include "config.h"
21 #define CPU_NO_GLOBAL_REGS
22 #include "exec.h"
23 #include "disas.h"
24 #include "tcg.h"
25 #include "kvm.h"
26
27 #if !defined(CONFIG_SOFTMMU)
28 #undef EAX
29 #undef ECX
30 #undef EDX
31 #undef EBX
32 #undef ESP
33 #undef EBP
34 #undef ESI
35 #undef EDI
36 #undef EIP
37 #include <signal.h>
38 #ifdef __linux__
39 #include <sys/ucontext.h>
40 #endif
41 #endif
42
43 #if defined(__sparc__) && !defined(HOST_SOLARIS)
44 // Work around ugly bugs in glibc that mangle global register contents
45 #undef env
46 #define env cpu_single_env
47 #endif
48
49 int tb_invalidated_flag;
50
51 //#define DEBUG_EXEC
52 //#define DEBUG_SIGNAL
53
54 void cpu_loop_exit(void)
55 {
56 /* NOTE: the register at this point must be saved by hand because
57 longjmp restore them */
58 regs_to_env();
59 longjmp(env->jmp_env, 1);
60 }
61
62 /* exit the current TB from a signal handler. The host registers are
63 restored in a state compatible with the CPU emulator
64 */
65 void cpu_resume_from_signal(CPUState *env1, void *puc)
66 {
67 #if !defined(CONFIG_SOFTMMU)
68 #ifdef __linux__
69 struct ucontext *uc = puc;
70 #elif defined(__OpenBSD__)
71 struct sigcontext *uc = puc;
72 #endif
73 #endif
74
75 env = env1;
76
77 /* XXX: restore cpu registers saved in host registers */
78
79 #if !defined(CONFIG_SOFTMMU)
80 if (puc) {
81 /* XXX: use siglongjmp ? */
82 #ifdef __linux__
83 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
84 #elif defined(__OpenBSD__)
85 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
86 #endif
87 }
88 #endif
89 longjmp(env->jmp_env, 1);
90 }
91
92 /* Execute the code without caching the generated code. An interpreter
93 could be used if available. */
94 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
95 {
96 unsigned long next_tb;
97 TranslationBlock *tb;
98
99 /* Should never happen.
100 We only end up here when an existing TB is too long. */
101 if (max_cycles > CF_COUNT_MASK)
102 max_cycles = CF_COUNT_MASK;
103
104 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
105 max_cycles);
106 env->current_tb = tb;
107 /* execute the generated code */
108 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
109
110 if ((next_tb & 3) == 2) {
111 /* Restore PC. This may happen if async event occurs before
112 the TB starts executing. */
113 cpu_pc_from_tb(env, tb);
114 }
115 tb_phys_invalidate(tb, -1);
116 tb_free(tb);
117 }
118
119 static TranslationBlock *tb_find_slow(target_ulong pc,
120 target_ulong cs_base,
121 uint64_t flags)
122 {
123 TranslationBlock *tb, **ptb1;
124 unsigned int h;
125 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
126
127 tb_invalidated_flag = 0;
128
129 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
130
131 /* find translated block using physical mappings */
132 phys_pc = get_phys_addr_code(env, pc);
133 phys_page1 = phys_pc & TARGET_PAGE_MASK;
134 phys_page2 = -1;
135 h = tb_phys_hash_func(phys_pc);
136 ptb1 = &tb_phys_hash[h];
137 for(;;) {
138 tb = *ptb1;
139 if (!tb)
140 goto not_found;
141 if (tb->pc == pc &&
142 tb->page_addr[0] == phys_page1 &&
143 tb->cs_base == cs_base &&
144 tb->flags == flags) {
145 /* check next page if needed */
146 if (tb->page_addr[1] != -1) {
147 virt_page2 = (pc & TARGET_PAGE_MASK) +
148 TARGET_PAGE_SIZE;
149 phys_page2 = get_phys_addr_code(env, virt_page2);
150 if (tb->page_addr[1] == phys_page2)
151 goto found;
152 } else {
153 goto found;
154 }
155 }
156 ptb1 = &tb->phys_hash_next;
157 }
158 not_found:
159 /* if no translated code available, then translate it now */
160 tb = tb_gen_code(env, pc, cs_base, flags, 0);
161
162 found:
163 /* we add the TB in the virtual pc hash table */
164 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
165 return tb;
166 }
167
168 static inline TranslationBlock *tb_find_fast(void)
169 {
170 TranslationBlock *tb;
171 target_ulong cs_base, pc;
172 int flags;
173
174 /* we record a subset of the CPU state. It will
175 always be the same before a given translated block
176 is executed. */
177 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
178 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
179 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
180 tb->flags != flags)) {
181 tb = tb_find_slow(pc, cs_base, flags);
182 }
183 return tb;
184 }
185
186 /* main execution loop */
187
188 int cpu_exec(CPUState *env1)
189 {
190 #define DECLARE_HOST_REGS 1
191 #include "hostregs_helper.h"
192 int ret, interrupt_request;
193 TranslationBlock *tb;
194 uint8_t *tc_ptr;
195 unsigned long next_tb;
196
197 if (cpu_halted(env1) == EXCP_HALTED)
198 return EXCP_HALTED;
199
200 cpu_single_env = env1;
201
202 /* first we save global registers */
203 #define SAVE_HOST_REGS 1
204 #include "hostregs_helper.h"
205 env = env1;
206
207 env_to_regs();
208 #if defined(TARGET_I386)
209 /* put eflags in CPU temporary format */
210 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
211 DF = 1 - (2 * ((env->eflags >> 10) & 1));
212 CC_OP = CC_OP_EFLAGS;
213 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
214 #elif defined(TARGET_SPARC)
215 #elif defined(TARGET_M68K)
216 env->cc_op = CC_OP_FLAGS;
217 env->cc_dest = env->sr & 0xf;
218 env->cc_x = (env->sr >> 4) & 1;
219 #elif defined(TARGET_ALPHA)
220 #elif defined(TARGET_ARM)
221 #elif defined(TARGET_PPC)
222 #elif defined(TARGET_MIPS)
223 #elif defined(TARGET_SH4)
224 #elif defined(TARGET_CRIS)
225 /* XXXXX */
226 #else
227 #error unsupported target CPU
228 #endif
229 env->exception_index = -1;
230
231 /* prepare setjmp context for exception handling */
232 for(;;) {
233 if (setjmp(env->jmp_env) == 0) {
234 env->current_tb = NULL;
235 /* if an exception is pending, we execute it here */
236 if (env->exception_index >= 0) {
237 if (env->exception_index >= EXCP_INTERRUPT) {
238 /* exit request from the cpu execution loop */
239 ret = env->exception_index;
240 break;
241 } else if (env->user_mode_only) {
242 /* if user mode only, we simulate a fake exception
243 which will be handled outside the cpu execution
244 loop */
245 #if defined(TARGET_I386)
246 do_interrupt_user(env->exception_index,
247 env->exception_is_int,
248 env->error_code,
249 env->exception_next_eip);
250 /* successfully delivered */
251 env->old_exception = -1;
252 #endif
253 ret = env->exception_index;
254 break;
255 } else {
256 #if defined(TARGET_I386)
257 /* simulate a real cpu exception. On i386, it can
258 trigger new exceptions, but we do not handle
259 double or triple faults yet. */
260 do_interrupt(env->exception_index,
261 env->exception_is_int,
262 env->error_code,
263 env->exception_next_eip, 0);
264 /* successfully delivered */
265 env->old_exception = -1;
266 #elif defined(TARGET_PPC)
267 do_interrupt(env);
268 #elif defined(TARGET_MIPS)
269 do_interrupt(env);
270 #elif defined(TARGET_SPARC)
271 do_interrupt(env);
272 #elif defined(TARGET_ARM)
273 do_interrupt(env);
274 #elif defined(TARGET_SH4)
275 do_interrupt(env);
276 #elif defined(TARGET_ALPHA)
277 do_interrupt(env);
278 #elif defined(TARGET_CRIS)
279 do_interrupt(env);
280 #elif defined(TARGET_M68K)
281 do_interrupt(0);
282 #endif
283 }
284 env->exception_index = -1;
285 }
286 #ifdef USE_KQEMU
287 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
288 int ret;
289 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
290 ret = kqemu_cpu_exec(env);
291 /* put eflags in CPU temporary format */
292 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
293 DF = 1 - (2 * ((env->eflags >> 10) & 1));
294 CC_OP = CC_OP_EFLAGS;
295 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
296 if (ret == 1) {
297 /* exception */
298 longjmp(env->jmp_env, 1);
299 } else if (ret == 2) {
300 /* softmmu execution needed */
301 } else {
302 if (env->interrupt_request != 0) {
303 /* hardware interrupt will be executed just after */
304 } else {
305 /* otherwise, we restart */
306 longjmp(env->jmp_env, 1);
307 }
308 }
309 }
310 #endif
311
312 if (kvm_enabled()) {
313 kvm_cpu_exec(env);
314 longjmp(env->jmp_env, 1);
315 }
316
317 next_tb = 0; /* force lookup of first TB */
318 for(;;) {
319 interrupt_request = env->interrupt_request;
320 if (unlikely(interrupt_request)) {
321 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
322 /* Mask out external interrupts for this step. */
323 interrupt_request &= ~(CPU_INTERRUPT_HARD |
324 CPU_INTERRUPT_FIQ |
325 CPU_INTERRUPT_SMI |
326 CPU_INTERRUPT_NMI);
327 }
328 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
329 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
330 env->exception_index = EXCP_DEBUG;
331 cpu_loop_exit();
332 }
333 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
334 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
335 if (interrupt_request & CPU_INTERRUPT_HALT) {
336 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
337 env->halted = 1;
338 env->exception_index = EXCP_HLT;
339 cpu_loop_exit();
340 }
341 #endif
342 #if defined(TARGET_I386)
343 if (env->hflags2 & HF2_GIF_MASK) {
344 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
345 !(env->hflags & HF_SMM_MASK)) {
346 svm_check_intercept(SVM_EXIT_SMI);
347 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
348 do_smm_enter();
349 next_tb = 0;
350 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
351 !(env->hflags2 & HF2_NMI_MASK)) {
352 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
353 env->hflags2 |= HF2_NMI_MASK;
354 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
355 next_tb = 0;
356 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
357 (((env->hflags2 & HF2_VINTR_MASK) &&
358 (env->hflags2 & HF2_HIF_MASK)) ||
359 (!(env->hflags2 & HF2_VINTR_MASK) &&
360 (env->eflags & IF_MASK &&
361 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
362 int intno;
363 svm_check_intercept(SVM_EXIT_INTR);
364 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
365 intno = cpu_get_pic_interrupt(env);
366 if (loglevel & CPU_LOG_TB_IN_ASM) {
367 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
368 }
369 do_interrupt(intno, 0, 0, 0, 1);
370 /* ensure that no TB jump will be modified as
371 the program flow was changed */
372 next_tb = 0;
373 #if !defined(CONFIG_USER_ONLY)
374 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
375 (env->eflags & IF_MASK) &&
376 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
377 int intno;
378 /* FIXME: this should respect TPR */
379 svm_check_intercept(SVM_EXIT_VINTR);
380 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
381 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
382 if (loglevel & CPU_LOG_TB_IN_ASM)
383 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
384 do_interrupt(intno, 0, 0, 0, 1);
385 next_tb = 0;
386 #endif
387 }
388 }
389 #elif defined(TARGET_PPC)
390 #if 0
391 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
392 cpu_ppc_reset(env);
393 }
394 #endif
395 if (interrupt_request & CPU_INTERRUPT_HARD) {
396 ppc_hw_interrupt(env);
397 if (env->pending_interrupts == 0)
398 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
399 next_tb = 0;
400 }
401 #elif defined(TARGET_MIPS)
402 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
403 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
404 (env->CP0_Status & (1 << CP0St_IE)) &&
405 !(env->CP0_Status & (1 << CP0St_EXL)) &&
406 !(env->CP0_Status & (1 << CP0St_ERL)) &&
407 !(env->hflags & MIPS_HFLAG_DM)) {
408 /* Raise it */
409 env->exception_index = EXCP_EXT_INTERRUPT;
410 env->error_code = 0;
411 do_interrupt(env);
412 next_tb = 0;
413 }
414 #elif defined(TARGET_SPARC)
415 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
416 (env->psret != 0)) {
417 int pil = env->interrupt_index & 15;
418 int type = env->interrupt_index & 0xf0;
419
420 if (((type == TT_EXTINT) &&
421 (pil == 15 || pil > env->psrpil)) ||
422 type != TT_EXTINT) {
423 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
424 env->exception_index = env->interrupt_index;
425 do_interrupt(env);
426 env->interrupt_index = 0;
427 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
428 cpu_check_irqs(env);
429 #endif
430 next_tb = 0;
431 }
432 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
433 //do_interrupt(0, 0, 0, 0, 0);
434 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
435 }
436 #elif defined(TARGET_ARM)
437 if (interrupt_request & CPU_INTERRUPT_FIQ
438 && !(env->uncached_cpsr & CPSR_F)) {
439 env->exception_index = EXCP_FIQ;
440 do_interrupt(env);
441 next_tb = 0;
442 }
443 /* ARMv7-M interrupt return works by loading a magic value
444 into the PC. On real hardware the load causes the
445 return to occur. The qemu implementation performs the
446 jump normally, then does the exception return when the
447 CPU tries to execute code at the magic address.
448 This will cause the magic PC value to be pushed to
449 the stack if an interrupt occured at the wrong time.
450 We avoid this by disabling interrupts when
451 pc contains a magic address. */
452 if (interrupt_request & CPU_INTERRUPT_HARD
453 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
454 || !(env->uncached_cpsr & CPSR_I))) {
455 env->exception_index = EXCP_IRQ;
456 do_interrupt(env);
457 next_tb = 0;
458 }
459 #elif defined(TARGET_SH4)
460 if (interrupt_request & CPU_INTERRUPT_HARD) {
461 do_interrupt(env);
462 next_tb = 0;
463 }
464 #elif defined(TARGET_ALPHA)
465 if (interrupt_request & CPU_INTERRUPT_HARD) {
466 do_interrupt(env);
467 next_tb = 0;
468 }
469 #elif defined(TARGET_CRIS)
470 if (interrupt_request & CPU_INTERRUPT_HARD
471 && (env->pregs[PR_CCS] & I_FLAG)) {
472 env->exception_index = EXCP_IRQ;
473 do_interrupt(env);
474 next_tb = 0;
475 }
476 if (interrupt_request & CPU_INTERRUPT_NMI
477 && (env->pregs[PR_CCS] & M_FLAG)) {
478 env->exception_index = EXCP_NMI;
479 do_interrupt(env);
480 next_tb = 0;
481 }
482 #elif defined(TARGET_M68K)
483 if (interrupt_request & CPU_INTERRUPT_HARD
484 && ((env->sr & SR_I) >> SR_I_SHIFT)
485 < env->pending_level) {
486 /* Real hardware gets the interrupt vector via an
487 IACK cycle at this point. Current emulated
488 hardware doesn't rely on this, so we
489 provide/save the vector when the interrupt is
490 first signalled. */
491 env->exception_index = env->pending_vector;
492 do_interrupt(1);
493 next_tb = 0;
494 }
495 #endif
496 /* Don't use the cached interupt_request value,
497 do_interrupt may have updated the EXITTB flag. */
498 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
499 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
500 /* ensure that no TB jump will be modified as
501 the program flow was changed */
502 next_tb = 0;
503 }
504 if (interrupt_request & CPU_INTERRUPT_EXIT) {
505 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
506 env->exception_index = EXCP_INTERRUPT;
507 cpu_loop_exit();
508 }
509 }
510 #ifdef DEBUG_EXEC
511 if ((loglevel & CPU_LOG_TB_CPU)) {
512 /* restore flags in standard format */
513 regs_to_env();
514 #if defined(TARGET_I386)
515 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
516 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
517 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
518 #elif defined(TARGET_ARM)
519 cpu_dump_state(env, logfile, fprintf, 0);
520 #elif defined(TARGET_SPARC)
521 cpu_dump_state(env, logfile, fprintf, 0);
522 #elif defined(TARGET_PPC)
523 cpu_dump_state(env, logfile, fprintf, 0);
524 #elif defined(TARGET_M68K)
525 cpu_m68k_flush_flags(env, env->cc_op);
526 env->cc_op = CC_OP_FLAGS;
527 env->sr = (env->sr & 0xffe0)
528 | env->cc_dest | (env->cc_x << 4);
529 cpu_dump_state(env, logfile, fprintf, 0);
530 #elif defined(TARGET_MIPS)
531 cpu_dump_state(env, logfile, fprintf, 0);
532 #elif defined(TARGET_SH4)
533 cpu_dump_state(env, logfile, fprintf, 0);
534 #elif defined(TARGET_ALPHA)
535 cpu_dump_state(env, logfile, fprintf, 0);
536 #elif defined(TARGET_CRIS)
537 cpu_dump_state(env, logfile, fprintf, 0);
538 #else
539 #error unsupported target CPU
540 #endif
541 }
542 #endif
543 spin_lock(&tb_lock);
544 tb = tb_find_fast();
545 /* Note: we do it here to avoid a gcc bug on Mac OS X when
546 doing it in tb_find_slow */
547 if (tb_invalidated_flag) {
548 /* as some TB could have been invalidated because
549 of memory exceptions while generating the code, we
550 must recompute the hash index here */
551 next_tb = 0;
552 tb_invalidated_flag = 0;
553 }
554 #ifdef DEBUG_EXEC
555 if ((loglevel & CPU_LOG_EXEC)) {
556 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
557 (long)tb->tc_ptr, tb->pc,
558 lookup_symbol(tb->pc));
559 }
560 #endif
561 /* see if we can patch the calling TB. When the TB
562 spans two pages, we cannot safely do a direct
563 jump. */
564 {
565 if (next_tb != 0 &&
566 #ifdef USE_KQEMU
567 (env->kqemu_enabled != 2) &&
568 #endif
569 tb->page_addr[1] == -1) {
570 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
571 }
572 }
573 spin_unlock(&tb_lock);
574 env->current_tb = tb;
575
576 /* cpu_interrupt might be called while translating the
577 TB, but before it is linked into a potentially
578 infinite loop and becomes env->current_tb. Avoid
579 starting execution if there is a pending interrupt. */
580 if (unlikely (env->interrupt_request & CPU_INTERRUPT_EXIT))
581 env->current_tb = NULL;
582
583 while (env->current_tb) {
584 tc_ptr = tb->tc_ptr;
585 /* execute the generated code */
586 #if defined(__sparc__) && !defined(HOST_SOLARIS)
587 #undef env
588 env = cpu_single_env;
589 #define env cpu_single_env
590 #endif
591 next_tb = tcg_qemu_tb_exec(tc_ptr);
592 env->current_tb = NULL;
593 if ((next_tb & 3) == 2) {
594 /* Instruction counter expired. */
595 int insns_left;
596 tb = (TranslationBlock *)(long)(next_tb & ~3);
597 /* Restore PC. */
598 cpu_pc_from_tb(env, tb);
599 insns_left = env->icount_decr.u32;
600 if (env->icount_extra && insns_left >= 0) {
601 /* Refill decrementer and continue execution. */
602 env->icount_extra += insns_left;
603 if (env->icount_extra > 0xffff) {
604 insns_left = 0xffff;
605 } else {
606 insns_left = env->icount_extra;
607 }
608 env->icount_extra -= insns_left;
609 env->icount_decr.u16.low = insns_left;
610 } else {
611 if (insns_left > 0) {
612 /* Execute remaining instructions. */
613 cpu_exec_nocache(insns_left, tb);
614 }
615 env->exception_index = EXCP_INTERRUPT;
616 next_tb = 0;
617 cpu_loop_exit();
618 }
619 }
620 }
621 /* reset soft MMU for next block (it can currently
622 only be set by a memory fault) */
623 #if defined(USE_KQEMU)
624 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
625 if (kqemu_is_ok(env) &&
626 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
627 cpu_loop_exit();
628 }
629 #endif
630 } /* for(;;) */
631 } else {
632 env_to_regs();
633 }
634 } /* for(;;) */
635
636
637 #if defined(TARGET_I386)
638 /* restore flags in standard format */
639 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
640 #elif defined(TARGET_ARM)
641 /* XXX: Save/restore host fpu exception state?. */
642 #elif defined(TARGET_SPARC)
643 #elif defined(TARGET_PPC)
644 #elif defined(TARGET_M68K)
645 cpu_m68k_flush_flags(env, env->cc_op);
646 env->cc_op = CC_OP_FLAGS;
647 env->sr = (env->sr & 0xffe0)
648 | env->cc_dest | (env->cc_x << 4);
649 #elif defined(TARGET_MIPS)
650 #elif defined(TARGET_SH4)
651 #elif defined(TARGET_ALPHA)
652 #elif defined(TARGET_CRIS)
653 /* XXXXX */
654 #else
655 #error unsupported target CPU
656 #endif
657
658 /* restore global registers */
659 #include "hostregs_helper.h"
660
661 /* fail safe : never use cpu_single_env outside cpu_exec() */
662 cpu_single_env = NULL;
663 return ret;
664 }
665
666 /* must only be called from the generated code as an exception can be
667 generated */
668 void tb_invalidate_page_range(target_ulong start, target_ulong end)
669 {
670 /* XXX: cannot enable it yet because it yields to MMU exception
671 where NIP != read address on PowerPC */
672 #if 0
673 target_ulong phys_addr;
674 phys_addr = get_phys_addr_code(env, start);
675 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
676 #endif
677 }
678
679 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
680
681 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
682 {
683 CPUX86State *saved_env;
684
685 saved_env = env;
686 env = s;
687 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
688 selector &= 0xffff;
689 cpu_x86_load_seg_cache(env, seg_reg, selector,
690 (selector << 4), 0xffff, 0);
691 } else {
692 helper_load_seg(seg_reg, selector);
693 }
694 env = saved_env;
695 }
696
697 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
698 {
699 CPUX86State *saved_env;
700
701 saved_env = env;
702 env = s;
703
704 helper_fsave(ptr, data32);
705
706 env = saved_env;
707 }
708
709 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
710 {
711 CPUX86State *saved_env;
712
713 saved_env = env;
714 env = s;
715
716 helper_frstor(ptr, data32);
717
718 env = saved_env;
719 }
720
721 #endif /* TARGET_I386 */
722
723 #if !defined(CONFIG_SOFTMMU)
724
725 #if defined(TARGET_I386)
726
727 /* 'pc' is the host PC at which the exception was raised. 'address' is
728 the effective address of the memory exception. 'is_write' is 1 if a
729 write caused the exception and otherwise 0'. 'old_set' is the
730 signal set which should be restored */
731 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
732 int is_write, sigset_t *old_set,
733 void *puc)
734 {
735 TranslationBlock *tb;
736 int ret;
737
738 if (cpu_single_env)
739 env = cpu_single_env; /* XXX: find a correct solution for multithread */
740 #if defined(DEBUG_SIGNAL)
741 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
742 pc, address, is_write, *(unsigned long *)old_set);
743 #endif
744 /* XXX: locking issue */
745 if (is_write && page_unprotect(h2g(address), pc, puc)) {
746 return 1;
747 }
748
749 /* see if it is an MMU fault */
750 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
751 if (ret < 0)
752 return 0; /* not an MMU fault */
753 if (ret == 0)
754 return 1; /* the MMU fault was handled without causing real CPU fault */
755 /* now we have a real cpu fault */
756 tb = tb_find_pc(pc);
757 if (tb) {
758 /* the PC is inside the translated code. It means that we have
759 a virtual CPU fault */
760 cpu_restore_state(tb, env, pc, puc);
761 }
762 if (ret == 1) {
763 #if 0
764 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
765 env->eip, env->cr[2], env->error_code);
766 #endif
767 /* we restore the process signal mask as the sigreturn should
768 do it (XXX: use sigsetjmp) */
769 sigprocmask(SIG_SETMASK, old_set, NULL);
770 raise_exception_err(env->exception_index, env->error_code);
771 } else {
772 /* activate soft MMU for this block */
773 env->hflags |= HF_SOFTMMU_MASK;
774 cpu_resume_from_signal(env, puc);
775 }
776 /* never comes here */
777 return 1;
778 }
779
780 #elif defined(TARGET_ARM)
781 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
782 int is_write, sigset_t *old_set,
783 void *puc)
784 {
785 TranslationBlock *tb;
786 int ret;
787
788 if (cpu_single_env)
789 env = cpu_single_env; /* XXX: find a correct solution for multithread */
790 #if defined(DEBUG_SIGNAL)
791 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
792 pc, address, is_write, *(unsigned long *)old_set);
793 #endif
794 /* XXX: locking issue */
795 if (is_write && page_unprotect(h2g(address), pc, puc)) {
796 return 1;
797 }
798 /* see if it is an MMU fault */
799 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
800 if (ret < 0)
801 return 0; /* not an MMU fault */
802 if (ret == 0)
803 return 1; /* the MMU fault was handled without causing real CPU fault */
804 /* now we have a real cpu fault */
805 tb = tb_find_pc(pc);
806 if (tb) {
807 /* the PC is inside the translated code. It means that we have
808 a virtual CPU fault */
809 cpu_restore_state(tb, env, pc, puc);
810 }
811 /* we restore the process signal mask as the sigreturn should
812 do it (XXX: use sigsetjmp) */
813 sigprocmask(SIG_SETMASK, old_set, NULL);
814 cpu_loop_exit();
815 /* never comes here */
816 return 1;
817 }
818 #elif defined(TARGET_SPARC)
819 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
820 int is_write, sigset_t *old_set,
821 void *puc)
822 {
823 TranslationBlock *tb;
824 int ret;
825
826 if (cpu_single_env)
827 env = cpu_single_env; /* XXX: find a correct solution for multithread */
828 #if defined(DEBUG_SIGNAL)
829 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
830 pc, address, is_write, *(unsigned long *)old_set);
831 #endif
832 /* XXX: locking issue */
833 if (is_write && page_unprotect(h2g(address), pc, puc)) {
834 return 1;
835 }
836 /* see if it is an MMU fault */
837 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
838 if (ret < 0)
839 return 0; /* not an MMU fault */
840 if (ret == 0)
841 return 1; /* the MMU fault was handled without causing real CPU fault */
842 /* now we have a real cpu fault */
843 tb = tb_find_pc(pc);
844 if (tb) {
845 /* the PC is inside the translated code. It means that we have
846 a virtual CPU fault */
847 cpu_restore_state(tb, env, pc, puc);
848 }
849 /* we restore the process signal mask as the sigreturn should
850 do it (XXX: use sigsetjmp) */
851 sigprocmask(SIG_SETMASK, old_set, NULL);
852 cpu_loop_exit();
853 /* never comes here */
854 return 1;
855 }
856 #elif defined (TARGET_PPC)
857 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
858 int is_write, sigset_t *old_set,
859 void *puc)
860 {
861 TranslationBlock *tb;
862 int ret;
863
864 if (cpu_single_env)
865 env = cpu_single_env; /* XXX: find a correct solution for multithread */
866 #if defined(DEBUG_SIGNAL)
867 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
868 pc, address, is_write, *(unsigned long *)old_set);
869 #endif
870 /* XXX: locking issue */
871 if (is_write && page_unprotect(h2g(address), pc, puc)) {
872 return 1;
873 }
874
875 /* see if it is an MMU fault */
876 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
877 if (ret < 0)
878 return 0; /* not an MMU fault */
879 if (ret == 0)
880 return 1; /* the MMU fault was handled without causing real CPU fault */
881
882 /* now we have a real cpu fault */
883 tb = tb_find_pc(pc);
884 if (tb) {
885 /* the PC is inside the translated code. It means that we have
886 a virtual CPU fault */
887 cpu_restore_state(tb, env, pc, puc);
888 }
889 if (ret == 1) {
890 #if 0
891 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
892 env->nip, env->error_code, tb);
893 #endif
894 /* we restore the process signal mask as the sigreturn should
895 do it (XXX: use sigsetjmp) */
896 sigprocmask(SIG_SETMASK, old_set, NULL);
897 do_raise_exception_err(env->exception_index, env->error_code);
898 } else {
899 /* activate soft MMU for this block */
900 cpu_resume_from_signal(env, puc);
901 }
902 /* never comes here */
903 return 1;
904 }
905
906 #elif defined(TARGET_M68K)
907 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
908 int is_write, sigset_t *old_set,
909 void *puc)
910 {
911 TranslationBlock *tb;
912 int ret;
913
914 if (cpu_single_env)
915 env = cpu_single_env; /* XXX: find a correct solution for multithread */
916 #if defined(DEBUG_SIGNAL)
917 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
918 pc, address, is_write, *(unsigned long *)old_set);
919 #endif
920 /* XXX: locking issue */
921 if (is_write && page_unprotect(address, pc, puc)) {
922 return 1;
923 }
924 /* see if it is an MMU fault */
925 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
926 if (ret < 0)
927 return 0; /* not an MMU fault */
928 if (ret == 0)
929 return 1; /* the MMU fault was handled without causing real CPU fault */
930 /* now we have a real cpu fault */
931 tb = tb_find_pc(pc);
932 if (tb) {
933 /* the PC is inside the translated code. It means that we have
934 a virtual CPU fault */
935 cpu_restore_state(tb, env, pc, puc);
936 }
937 /* we restore the process signal mask as the sigreturn should
938 do it (XXX: use sigsetjmp) */
939 sigprocmask(SIG_SETMASK, old_set, NULL);
940 cpu_loop_exit();
941 /* never comes here */
942 return 1;
943 }
944
945 #elif defined (TARGET_MIPS)
946 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
947 int is_write, sigset_t *old_set,
948 void *puc)
949 {
950 TranslationBlock *tb;
951 int ret;
952
953 if (cpu_single_env)
954 env = cpu_single_env; /* XXX: find a correct solution for multithread */
955 #if defined(DEBUG_SIGNAL)
956 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
957 pc, address, is_write, *(unsigned long *)old_set);
958 #endif
959 /* XXX: locking issue */
960 if (is_write && page_unprotect(h2g(address), pc, puc)) {
961 return 1;
962 }
963
964 /* see if it is an MMU fault */
965 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
966 if (ret < 0)
967 return 0; /* not an MMU fault */
968 if (ret == 0)
969 return 1; /* the MMU fault was handled without causing real CPU fault */
970
971 /* now we have a real cpu fault */
972 tb = tb_find_pc(pc);
973 if (tb) {
974 /* the PC is inside the translated code. It means that we have
975 a virtual CPU fault */
976 cpu_restore_state(tb, env, pc, puc);
977 }
978 if (ret == 1) {
979 #if 0
980 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
981 env->PC, env->error_code, tb);
982 #endif
983 /* we restore the process signal mask as the sigreturn should
984 do it (XXX: use sigsetjmp) */
985 sigprocmask(SIG_SETMASK, old_set, NULL);
986 do_raise_exception_err(env->exception_index, env->error_code);
987 } else {
988 /* activate soft MMU for this block */
989 cpu_resume_from_signal(env, puc);
990 }
991 /* never comes here */
992 return 1;
993 }
994
995 #elif defined (TARGET_SH4)
996 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
997 int is_write, sigset_t *old_set,
998 void *puc)
999 {
1000 TranslationBlock *tb;
1001 int ret;
1002
1003 if (cpu_single_env)
1004 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1005 #if defined(DEBUG_SIGNAL)
1006 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1007 pc, address, is_write, *(unsigned long *)old_set);
1008 #endif
1009 /* XXX: locking issue */
1010 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1011 return 1;
1012 }
1013
1014 /* see if it is an MMU fault */
1015 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1016 if (ret < 0)
1017 return 0; /* not an MMU fault */
1018 if (ret == 0)
1019 return 1; /* the MMU fault was handled without causing real CPU fault */
1020
1021 /* now we have a real cpu fault */
1022 tb = tb_find_pc(pc);
1023 if (tb) {
1024 /* the PC is inside the translated code. It means that we have
1025 a virtual CPU fault */
1026 cpu_restore_state(tb, env, pc, puc);
1027 }
1028 #if 0
1029 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1030 env->nip, env->error_code, tb);
1031 #endif
1032 /* we restore the process signal mask as the sigreturn should
1033 do it (XXX: use sigsetjmp) */
1034 sigprocmask(SIG_SETMASK, old_set, NULL);
1035 cpu_loop_exit();
1036 /* never comes here */
1037 return 1;
1038 }
1039
1040 #elif defined (TARGET_ALPHA)
1041 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1042 int is_write, sigset_t *old_set,
1043 void *puc)
1044 {
1045 TranslationBlock *tb;
1046 int ret;
1047
1048 if (cpu_single_env)
1049 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1050 #if defined(DEBUG_SIGNAL)
1051 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1052 pc, address, is_write, *(unsigned long *)old_set);
1053 #endif
1054 /* XXX: locking issue */
1055 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1056 return 1;
1057 }
1058
1059 /* see if it is an MMU fault */
1060 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1061 if (ret < 0)
1062 return 0; /* not an MMU fault */
1063 if (ret == 0)
1064 return 1; /* the MMU fault was handled without causing real CPU fault */
1065
1066 /* now we have a real cpu fault */
1067 tb = tb_find_pc(pc);
1068 if (tb) {
1069 /* the PC is inside the translated code. It means that we have
1070 a virtual CPU fault */
1071 cpu_restore_state(tb, env, pc, puc);
1072 }
1073 #if 0
1074 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1075 env->nip, env->error_code, tb);
1076 #endif
1077 /* we restore the process signal mask as the sigreturn should
1078 do it (XXX: use sigsetjmp) */
1079 sigprocmask(SIG_SETMASK, old_set, NULL);
1080 cpu_loop_exit();
1081 /* never comes here */
1082 return 1;
1083 }
1084 #elif defined (TARGET_CRIS)
1085 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1086 int is_write, sigset_t *old_set,
1087 void *puc)
1088 {
1089 TranslationBlock *tb;
1090 int ret;
1091
1092 if (cpu_single_env)
1093 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1094 #if defined(DEBUG_SIGNAL)
1095 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1096 pc, address, is_write, *(unsigned long *)old_set);
1097 #endif
1098 /* XXX: locking issue */
1099 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1100 return 1;
1101 }
1102
1103 /* see if it is an MMU fault */
1104 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1105 if (ret < 0)
1106 return 0; /* not an MMU fault */
1107 if (ret == 0)
1108 return 1; /* the MMU fault was handled without causing real CPU fault */
1109
1110 /* now we have a real cpu fault */
1111 tb = tb_find_pc(pc);
1112 if (tb) {
1113 /* the PC is inside the translated code. It means that we have
1114 a virtual CPU fault */
1115 cpu_restore_state(tb, env, pc, puc);
1116 }
1117 /* we restore the process signal mask as the sigreturn should
1118 do it (XXX: use sigsetjmp) */
1119 sigprocmask(SIG_SETMASK, old_set, NULL);
1120 cpu_loop_exit();
1121 /* never comes here */
1122 return 1;
1123 }
1124
1125 #else
1126 #error unsupported target CPU
1127 #endif
1128
1129 #if defined(__i386__)
1130
1131 #if defined(__APPLE__)
1132 # include <sys/ucontext.h>
1133
1134 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1135 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1136 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1137 #else
1138 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1139 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1140 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1141 #endif
1142
1143 int cpu_signal_handler(int host_signum, void *pinfo,
1144 void *puc)
1145 {
1146 siginfo_t *info = pinfo;
1147 struct ucontext *uc = puc;
1148 unsigned long pc;
1149 int trapno;
1150
1151 #ifndef REG_EIP
1152 /* for glibc 2.1 */
1153 #define REG_EIP EIP
1154 #define REG_ERR ERR
1155 #define REG_TRAPNO TRAPNO
1156 #endif
1157 pc = EIP_sig(uc);
1158 trapno = TRAP_sig(uc);
1159 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1160 trapno == 0xe ?
1161 (ERROR_sig(uc) >> 1) & 1 : 0,
1162 &uc->uc_sigmask, puc);
1163 }
1164
1165 #elif defined(__x86_64__)
1166
1167 int cpu_signal_handler(int host_signum, void *pinfo,
1168 void *puc)
1169 {
1170 siginfo_t *info = pinfo;
1171 struct ucontext *uc = puc;
1172 unsigned long pc;
1173
1174 pc = uc->uc_mcontext.gregs[REG_RIP];
1175 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1176 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1177 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1178 &uc->uc_sigmask, puc);
1179 }
1180
1181 #elif defined(__powerpc__)
1182
1183 /***********************************************************************
1184 * signal context platform-specific definitions
1185 * From Wine
1186 */
1187 #ifdef linux
1188 /* All Registers access - only for local access */
1189 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1190 /* Gpr Registers access */
1191 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1192 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1193 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1194 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1195 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1196 # define LR_sig(context) REG_sig(link, context) /* Link register */
1197 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1198 /* Float Registers access */
1199 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1200 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1201 /* Exception Registers access */
1202 # define DAR_sig(context) REG_sig(dar, context)
1203 # define DSISR_sig(context) REG_sig(dsisr, context)
1204 # define TRAP_sig(context) REG_sig(trap, context)
1205 #endif /* linux */
1206
1207 #ifdef __APPLE__
1208 # include <sys/ucontext.h>
1209 typedef struct ucontext SIGCONTEXT;
1210 /* All Registers access - only for local access */
1211 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1212 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1213 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1214 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1215 /* Gpr Registers access */
1216 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1217 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1218 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1219 # define CTR_sig(context) REG_sig(ctr, context)
1220 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1221 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1222 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1223 /* Float Registers access */
1224 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1225 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1226 /* Exception Registers access */
1227 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1228 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1229 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1230 #endif /* __APPLE__ */
1231
1232 int cpu_signal_handler(int host_signum, void *pinfo,
1233 void *puc)
1234 {
1235 siginfo_t *info = pinfo;
1236 struct ucontext *uc = puc;
1237 unsigned long pc;
1238 int is_write;
1239
1240 pc = IAR_sig(uc);
1241 is_write = 0;
1242 #if 0
1243 /* ppc 4xx case */
1244 if (DSISR_sig(uc) & 0x00800000)
1245 is_write = 1;
1246 #else
1247 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1248 is_write = 1;
1249 #endif
1250 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1251 is_write, &uc->uc_sigmask, puc);
1252 }
1253
1254 #elif defined(__alpha__)
1255
1256 int cpu_signal_handler(int host_signum, void *pinfo,
1257 void *puc)
1258 {
1259 siginfo_t *info = pinfo;
1260 struct ucontext *uc = puc;
1261 uint32_t *pc = uc->uc_mcontext.sc_pc;
1262 uint32_t insn = *pc;
1263 int is_write = 0;
1264
1265 /* XXX: need kernel patch to get write flag faster */
1266 switch (insn >> 26) {
1267 case 0x0d: // stw
1268 case 0x0e: // stb
1269 case 0x0f: // stq_u
1270 case 0x24: // stf
1271 case 0x25: // stg
1272 case 0x26: // sts
1273 case 0x27: // stt
1274 case 0x2c: // stl
1275 case 0x2d: // stq
1276 case 0x2e: // stl_c
1277 case 0x2f: // stq_c
1278 is_write = 1;
1279 }
1280
1281 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1282 is_write, &uc->uc_sigmask, puc);
1283 }
1284 #elif defined(__sparc__)
1285
1286 int cpu_signal_handler(int host_signum, void *pinfo,
1287 void *puc)
1288 {
1289 siginfo_t *info = pinfo;
1290 int is_write;
1291 uint32_t insn;
1292 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1293 uint32_t *regs = (uint32_t *)(info + 1);
1294 void *sigmask = (regs + 20);
1295 /* XXX: is there a standard glibc define ? */
1296 unsigned long pc = regs[1];
1297 #else
1298 #ifdef __linux__
1299 struct sigcontext *sc = puc;
1300 unsigned long pc = sc->sigc_regs.tpc;
1301 void *sigmask = (void *)sc->sigc_mask;
1302 #elif defined(__OpenBSD__)
1303 struct sigcontext *uc = puc;
1304 unsigned long pc = uc->sc_pc;
1305 void *sigmask = (void *)(long)uc->sc_mask;
1306 #endif
1307 #endif
1308
1309 /* XXX: need kernel patch to get write flag faster */
1310 is_write = 0;
1311 insn = *(uint32_t *)pc;
1312 if ((insn >> 30) == 3) {
1313 switch((insn >> 19) & 0x3f) {
1314 case 0x05: // stb
1315 case 0x06: // sth
1316 case 0x04: // st
1317 case 0x07: // std
1318 case 0x24: // stf
1319 case 0x27: // stdf
1320 case 0x25: // stfsr
1321 is_write = 1;
1322 break;
1323 }
1324 }
1325 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1326 is_write, sigmask, NULL);
1327 }
1328
1329 #elif defined(__arm__)
1330
1331 int cpu_signal_handler(int host_signum, void *pinfo,
1332 void *puc)
1333 {
1334 siginfo_t *info = pinfo;
1335 struct ucontext *uc = puc;
1336 unsigned long pc;
1337 int is_write;
1338
1339 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1340 pc = uc->uc_mcontext.gregs[R15];
1341 #else
1342 pc = uc->uc_mcontext.arm_pc;
1343 #endif
1344 /* XXX: compute is_write */
1345 is_write = 0;
1346 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1347 is_write,
1348 &uc->uc_sigmask, puc);
1349 }
1350
1351 #elif defined(__mc68000)
1352
1353 int cpu_signal_handler(int host_signum, void *pinfo,
1354 void *puc)
1355 {
1356 siginfo_t *info = pinfo;
1357 struct ucontext *uc = puc;
1358 unsigned long pc;
1359 int is_write;
1360
1361 pc = uc->uc_mcontext.gregs[16];
1362 /* XXX: compute is_write */
1363 is_write = 0;
1364 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1365 is_write,
1366 &uc->uc_sigmask, puc);
1367 }
1368
1369 #elif defined(__ia64)
1370
1371 #ifndef __ISR_VALID
1372 /* This ought to be in <bits/siginfo.h>... */
1373 # define __ISR_VALID 1
1374 #endif
1375
1376 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1377 {
1378 siginfo_t *info = pinfo;
1379 struct ucontext *uc = puc;
1380 unsigned long ip;
1381 int is_write = 0;
1382
1383 ip = uc->uc_mcontext.sc_ip;
1384 switch (host_signum) {
1385 case SIGILL:
1386 case SIGFPE:
1387 case SIGSEGV:
1388 case SIGBUS:
1389 case SIGTRAP:
1390 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1391 /* ISR.W (write-access) is bit 33: */
1392 is_write = (info->si_isr >> 33) & 1;
1393 break;
1394
1395 default:
1396 break;
1397 }
1398 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1399 is_write,
1400 &uc->uc_sigmask, puc);
1401 }
1402
1403 #elif defined(__s390__)
1404
1405 int cpu_signal_handler(int host_signum, void *pinfo,
1406 void *puc)
1407 {
1408 siginfo_t *info = pinfo;
1409 struct ucontext *uc = puc;
1410 unsigned long pc;
1411 int is_write;
1412
1413 pc = uc->uc_mcontext.psw.addr;
1414 /* XXX: compute is_write */
1415 is_write = 0;
1416 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1417 is_write, &uc->uc_sigmask, puc);
1418 }
1419
1420 #elif defined(__mips__)
1421
1422 int cpu_signal_handler(int host_signum, void *pinfo,
1423 void *puc)
1424 {
1425 siginfo_t *info = pinfo;
1426 struct ucontext *uc = puc;
1427 greg_t pc = uc->uc_mcontext.pc;
1428 int is_write;
1429
1430 /* XXX: compute is_write */
1431 is_write = 0;
1432 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1433 is_write, &uc->uc_sigmask, puc);
1434 }
1435
1436 #elif defined(__hppa__)
1437
1438 int cpu_signal_handler(int host_signum, void *pinfo,
1439 void *puc)
1440 {
1441 struct siginfo *info = pinfo;
1442 struct ucontext *uc = puc;
1443 unsigned long pc;
1444 int is_write;
1445
1446 pc = uc->uc_mcontext.sc_iaoq[0];
1447 /* FIXME: compute is_write */
1448 is_write = 0;
1449 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1450 is_write,
1451 &uc->uc_sigmask, puc);
1452 }
1453
1454 #else
1455
1456 #error host CPU specific signal handler needed
1457
1458 #endif
1459
1460 #endif /* !defined(CONFIG_SOFTMMU) */