]> git.proxmox.com Git - qemu.git/blob - cpu-exec.c
Fix Sparc64 host signal handling
[qemu.git] / cpu-exec.c
1 /*
2 * i386 emulator main execution loop
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include "config.h"
21 #define CPU_NO_GLOBAL_REGS
22 #include "exec.h"
23 #include "disas.h"
24 #include "tcg.h"
25
26 #if !defined(CONFIG_SOFTMMU)
27 #undef EAX
28 #undef ECX
29 #undef EDX
30 #undef EBX
31 #undef ESP
32 #undef EBP
33 #undef ESI
34 #undef EDI
35 #undef EIP
36 #include <signal.h>
37 #include <sys/ucontext.h>
38 #endif
39
40 #if defined(__sparc__) && !defined(HOST_SOLARIS)
41 // Work around ugly bugs in glibc that mangle global register contents
42 #undef env
43 #define env cpu_single_env
44 #endif
45
46 int tb_invalidated_flag;
47 static unsigned long next_tb;
48
49 //#define DEBUG_EXEC
50 //#define DEBUG_SIGNAL
51
52 void cpu_loop_exit(void)
53 {
54 /* NOTE: the register at this point must be saved by hand because
55 longjmp restore them */
56 regs_to_env();
57 longjmp(env->jmp_env, 1);
58 }
59
60 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
61 #define reg_T2
62 #endif
63
64 /* exit the current TB from a signal handler. The host registers are
65 restored in a state compatible with the CPU emulator
66 */
67 void cpu_resume_from_signal(CPUState *env1, void *puc)
68 {
69 #if !defined(CONFIG_SOFTMMU)
70 struct ucontext *uc = puc;
71 #endif
72
73 env = env1;
74
75 /* XXX: restore cpu registers saved in host registers */
76
77 #if !defined(CONFIG_SOFTMMU)
78 if (puc) {
79 /* XXX: use siglongjmp ? */
80 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
81 }
82 #endif
83 longjmp(env->jmp_env, 1);
84 }
85
86 static TranslationBlock *tb_find_slow(target_ulong pc,
87 target_ulong cs_base,
88 uint64_t flags)
89 {
90 TranslationBlock *tb, **ptb1;
91 int code_gen_size;
92 unsigned int h;
93 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
94 uint8_t *tc_ptr;
95
96 spin_lock(&tb_lock);
97
98 tb_invalidated_flag = 0;
99
100 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
101
102 /* find translated block using physical mappings */
103 phys_pc = get_phys_addr_code(env, pc);
104 phys_page1 = phys_pc & TARGET_PAGE_MASK;
105 phys_page2 = -1;
106 h = tb_phys_hash_func(phys_pc);
107 ptb1 = &tb_phys_hash[h];
108 for(;;) {
109 tb = *ptb1;
110 if (!tb)
111 goto not_found;
112 if (tb->pc == pc &&
113 tb->page_addr[0] == phys_page1 &&
114 tb->cs_base == cs_base &&
115 tb->flags == flags) {
116 /* check next page if needed */
117 if (tb->page_addr[1] != -1) {
118 virt_page2 = (pc & TARGET_PAGE_MASK) +
119 TARGET_PAGE_SIZE;
120 phys_page2 = get_phys_addr_code(env, virt_page2);
121 if (tb->page_addr[1] == phys_page2)
122 goto found;
123 } else {
124 goto found;
125 }
126 }
127 ptb1 = &tb->phys_hash_next;
128 }
129 not_found:
130 /* if no translated code available, then translate it now */
131 tb = tb_alloc(pc);
132 if (!tb) {
133 /* flush must be done */
134 tb_flush(env);
135 /* cannot fail at this point */
136 tb = tb_alloc(pc);
137 /* don't forget to invalidate previous TB info */
138 tb_invalidated_flag = 1;
139 }
140 tc_ptr = code_gen_ptr;
141 tb->tc_ptr = tc_ptr;
142 tb->cs_base = cs_base;
143 tb->flags = flags;
144 cpu_gen_code(env, tb, &code_gen_size);
145 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
146
147 /* check next page if needed */
148 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
149 phys_page2 = -1;
150 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
151 phys_page2 = get_phys_addr_code(env, virt_page2);
152 }
153 tb_link_phys(tb, phys_pc, phys_page2);
154
155 found:
156 /* we add the TB in the virtual pc hash table */
157 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
158 spin_unlock(&tb_lock);
159 return tb;
160 }
161
162 static inline TranslationBlock *tb_find_fast(void)
163 {
164 TranslationBlock *tb;
165 target_ulong cs_base, pc;
166 uint64_t flags;
167
168 /* we record a subset of the CPU state. It will
169 always be the same before a given translated block
170 is executed. */
171 #if defined(TARGET_I386)
172 flags = env->hflags;
173 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
174 flags |= env->intercept;
175 cs_base = env->segs[R_CS].base;
176 pc = cs_base + env->eip;
177 #elif defined(TARGET_ARM)
178 flags = env->thumb | (env->vfp.vec_len << 1)
179 | (env->vfp.vec_stride << 4);
180 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
181 flags |= (1 << 6);
182 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
183 flags |= (1 << 7);
184 flags |= (env->condexec_bits << 8);
185 cs_base = 0;
186 pc = env->regs[15];
187 #elif defined(TARGET_SPARC)
188 #ifdef TARGET_SPARC64
189 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
190 flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
191 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
192 #else
193 // FPU enable . Supervisor
194 flags = (env->psref << 4) | env->psrs;
195 #endif
196 cs_base = env->npc;
197 pc = env->pc;
198 #elif defined(TARGET_PPC)
199 flags = env->hflags;
200 cs_base = 0;
201 pc = env->nip;
202 #elif defined(TARGET_MIPS)
203 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
204 cs_base = 0;
205 pc = env->PC[env->current_tc];
206 #elif defined(TARGET_M68K)
207 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
208 | (env->sr & SR_S) /* Bit 13 */
209 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
210 cs_base = 0;
211 pc = env->pc;
212 #elif defined(TARGET_SH4)
213 flags = env->flags;
214 cs_base = 0;
215 pc = env->pc;
216 #elif defined(TARGET_ALPHA)
217 flags = env->ps;
218 cs_base = 0;
219 pc = env->pc;
220 #elif defined(TARGET_CRIS)
221 flags = env->pregs[PR_CCS] & U_FLAG;
222 flags |= env->dslot;
223 cs_base = 0;
224 pc = env->pc;
225 #else
226 #error unsupported CPU
227 #endif
228 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
229 if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
230 tb->flags != flags, 0)) {
231 tb = tb_find_slow(pc, cs_base, flags);
232 /* Note: we do it here to avoid a gcc bug on Mac OS X when
233 doing it in tb_find_slow */
234 if (tb_invalidated_flag) {
235 /* as some TB could have been invalidated because
236 of memory exceptions while generating the code, we
237 must recompute the hash index here */
238 next_tb = 0;
239 }
240 }
241 return tb;
242 }
243
244 /* main execution loop */
245
246 int cpu_exec(CPUState *env1)
247 {
248 #define DECLARE_HOST_REGS 1
249 #include "hostregs_helper.h"
250 #if defined(TARGET_SPARC)
251 #if defined(reg_REGWPTR)
252 uint32_t *saved_regwptr;
253 #endif
254 #endif
255 int ret, interrupt_request;
256 TranslationBlock *tb;
257 uint8_t *tc_ptr;
258
259 if (cpu_halted(env1) == EXCP_HALTED)
260 return EXCP_HALTED;
261
262 cpu_single_env = env1;
263
264 /* first we save global registers */
265 #define SAVE_HOST_REGS 1
266 #include "hostregs_helper.h"
267 env = env1;
268
269 env_to_regs();
270 #if defined(TARGET_I386)
271 /* put eflags in CPU temporary format */
272 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
273 DF = 1 - (2 * ((env->eflags >> 10) & 1));
274 CC_OP = CC_OP_EFLAGS;
275 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
276 #elif defined(TARGET_SPARC)
277 #if defined(reg_REGWPTR)
278 saved_regwptr = REGWPTR;
279 #endif
280 #elif defined(TARGET_M68K)
281 env->cc_op = CC_OP_FLAGS;
282 env->cc_dest = env->sr & 0xf;
283 env->cc_x = (env->sr >> 4) & 1;
284 #elif defined(TARGET_ALPHA)
285 #elif defined(TARGET_ARM)
286 #elif defined(TARGET_PPC)
287 #elif defined(TARGET_MIPS)
288 #elif defined(TARGET_SH4)
289 #elif defined(TARGET_CRIS)
290 /* XXXXX */
291 #else
292 #error unsupported target CPU
293 #endif
294 env->exception_index = -1;
295
296 /* prepare setjmp context for exception handling */
297 for(;;) {
298 if (setjmp(env->jmp_env) == 0) {
299 env->current_tb = NULL;
300 /* if an exception is pending, we execute it here */
301 if (env->exception_index >= 0) {
302 if (env->exception_index >= EXCP_INTERRUPT) {
303 /* exit request from the cpu execution loop */
304 ret = env->exception_index;
305 break;
306 } else if (env->user_mode_only) {
307 /* if user mode only, we simulate a fake exception
308 which will be handled outside the cpu execution
309 loop */
310 #if defined(TARGET_I386)
311 do_interrupt_user(env->exception_index,
312 env->exception_is_int,
313 env->error_code,
314 env->exception_next_eip);
315 /* successfully delivered */
316 env->old_exception = -1;
317 #endif
318 ret = env->exception_index;
319 break;
320 } else {
321 #if defined(TARGET_I386)
322 /* simulate a real cpu exception. On i386, it can
323 trigger new exceptions, but we do not handle
324 double or triple faults yet. */
325 do_interrupt(env->exception_index,
326 env->exception_is_int,
327 env->error_code,
328 env->exception_next_eip, 0);
329 /* successfully delivered */
330 env->old_exception = -1;
331 #elif defined(TARGET_PPC)
332 do_interrupt(env);
333 #elif defined(TARGET_MIPS)
334 do_interrupt(env);
335 #elif defined(TARGET_SPARC)
336 do_interrupt(env->exception_index);
337 #elif defined(TARGET_ARM)
338 do_interrupt(env);
339 #elif defined(TARGET_SH4)
340 do_interrupt(env);
341 #elif defined(TARGET_ALPHA)
342 do_interrupt(env);
343 #elif defined(TARGET_CRIS)
344 do_interrupt(env);
345 #elif defined(TARGET_M68K)
346 do_interrupt(0);
347 #endif
348 }
349 env->exception_index = -1;
350 }
351 #ifdef USE_KQEMU
352 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
353 int ret;
354 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
355 ret = kqemu_cpu_exec(env);
356 /* put eflags in CPU temporary format */
357 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
358 DF = 1 - (2 * ((env->eflags >> 10) & 1));
359 CC_OP = CC_OP_EFLAGS;
360 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
361 if (ret == 1) {
362 /* exception */
363 longjmp(env->jmp_env, 1);
364 } else if (ret == 2) {
365 /* softmmu execution needed */
366 } else {
367 if (env->interrupt_request != 0) {
368 /* hardware interrupt will be executed just after */
369 } else {
370 /* otherwise, we restart */
371 longjmp(env->jmp_env, 1);
372 }
373 }
374 }
375 #endif
376
377 next_tb = 0; /* force lookup of first TB */
378 for(;;) {
379 interrupt_request = env->interrupt_request;
380 if (__builtin_expect(interrupt_request, 0)
381 #if defined(TARGET_I386)
382 && env->hflags & HF_GIF_MASK
383 #endif
384 && likely(!(env->singlestep_enabled & SSTEP_NOIRQ))) {
385 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
386 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
387 env->exception_index = EXCP_DEBUG;
388 cpu_loop_exit();
389 }
390 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
391 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
392 if (interrupt_request & CPU_INTERRUPT_HALT) {
393 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
394 env->halted = 1;
395 env->exception_index = EXCP_HLT;
396 cpu_loop_exit();
397 }
398 #endif
399 #if defined(TARGET_I386)
400 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
401 !(env->hflags & HF_SMM_MASK)) {
402 svm_check_intercept(SVM_EXIT_SMI);
403 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
404 do_smm_enter();
405 next_tb = 0;
406 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
407 !(env->hflags & HF_NMI_MASK)) {
408 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
409 env->hflags |= HF_NMI_MASK;
410 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
411 next_tb = 0;
412 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
413 (env->eflags & IF_MASK || env->hflags & HF_HIF_MASK) &&
414 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
415 int intno;
416 svm_check_intercept(SVM_EXIT_INTR);
417 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
418 intno = cpu_get_pic_interrupt(env);
419 if (loglevel & CPU_LOG_TB_IN_ASM) {
420 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
421 }
422 do_interrupt(intno, 0, 0, 0, 1);
423 /* ensure that no TB jump will be modified as
424 the program flow was changed */
425 next_tb = 0;
426 #if !defined(CONFIG_USER_ONLY)
427 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
428 (env->eflags & IF_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
429 int intno;
430 /* FIXME: this should respect TPR */
431 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
432 svm_check_intercept(SVM_EXIT_VINTR);
433 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
434 if (loglevel & CPU_LOG_TB_IN_ASM)
435 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
436 do_interrupt(intno, 0, 0, -1, 1);
437 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl),
438 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)) & ~V_IRQ_MASK);
439 next_tb = 0;
440 #endif
441 }
442 #elif defined(TARGET_PPC)
443 #if 0
444 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
445 cpu_ppc_reset(env);
446 }
447 #endif
448 if (interrupt_request & CPU_INTERRUPT_HARD) {
449 ppc_hw_interrupt(env);
450 if (env->pending_interrupts == 0)
451 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
452 next_tb = 0;
453 }
454 #elif defined(TARGET_MIPS)
455 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
456 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
457 (env->CP0_Status & (1 << CP0St_IE)) &&
458 !(env->CP0_Status & (1 << CP0St_EXL)) &&
459 !(env->CP0_Status & (1 << CP0St_ERL)) &&
460 !(env->hflags & MIPS_HFLAG_DM)) {
461 /* Raise it */
462 env->exception_index = EXCP_EXT_INTERRUPT;
463 env->error_code = 0;
464 do_interrupt(env);
465 next_tb = 0;
466 }
467 #elif defined(TARGET_SPARC)
468 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
469 (env->psret != 0)) {
470 int pil = env->interrupt_index & 15;
471 int type = env->interrupt_index & 0xf0;
472
473 if (((type == TT_EXTINT) &&
474 (pil == 15 || pil > env->psrpil)) ||
475 type != TT_EXTINT) {
476 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
477 do_interrupt(env->interrupt_index);
478 env->interrupt_index = 0;
479 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
480 cpu_check_irqs(env);
481 #endif
482 next_tb = 0;
483 }
484 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
485 //do_interrupt(0, 0, 0, 0, 0);
486 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
487 }
488 #elif defined(TARGET_ARM)
489 if (interrupt_request & CPU_INTERRUPT_FIQ
490 && !(env->uncached_cpsr & CPSR_F)) {
491 env->exception_index = EXCP_FIQ;
492 do_interrupt(env);
493 next_tb = 0;
494 }
495 /* ARMv7-M interrupt return works by loading a magic value
496 into the PC. On real hardware the load causes the
497 return to occur. The qemu implementation performs the
498 jump normally, then does the exception return when the
499 CPU tries to execute code at the magic address.
500 This will cause the magic PC value to be pushed to
501 the stack if an interrupt occured at the wrong time.
502 We avoid this by disabling interrupts when
503 pc contains a magic address. */
504 if (interrupt_request & CPU_INTERRUPT_HARD
505 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
506 || !(env->uncached_cpsr & CPSR_I))) {
507 env->exception_index = EXCP_IRQ;
508 do_interrupt(env);
509 next_tb = 0;
510 }
511 #elif defined(TARGET_SH4)
512 if (interrupt_request & CPU_INTERRUPT_HARD) {
513 do_interrupt(env);
514 next_tb = 0;
515 }
516 #elif defined(TARGET_ALPHA)
517 if (interrupt_request & CPU_INTERRUPT_HARD) {
518 do_interrupt(env);
519 next_tb = 0;
520 }
521 #elif defined(TARGET_CRIS)
522 if (interrupt_request & CPU_INTERRUPT_HARD) {
523 do_interrupt(env);
524 next_tb = 0;
525 }
526 #elif defined(TARGET_M68K)
527 if (interrupt_request & CPU_INTERRUPT_HARD
528 && ((env->sr & SR_I) >> SR_I_SHIFT)
529 < env->pending_level) {
530 /* Real hardware gets the interrupt vector via an
531 IACK cycle at this point. Current emulated
532 hardware doesn't rely on this, so we
533 provide/save the vector when the interrupt is
534 first signalled. */
535 env->exception_index = env->pending_vector;
536 do_interrupt(1);
537 next_tb = 0;
538 }
539 #endif
540 /* Don't use the cached interupt_request value,
541 do_interrupt may have updated the EXITTB flag. */
542 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
543 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
544 /* ensure that no TB jump will be modified as
545 the program flow was changed */
546 next_tb = 0;
547 }
548 if (interrupt_request & CPU_INTERRUPT_EXIT) {
549 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
550 env->exception_index = EXCP_INTERRUPT;
551 cpu_loop_exit();
552 }
553 }
554 #ifdef DEBUG_EXEC
555 if ((loglevel & CPU_LOG_TB_CPU)) {
556 /* restore flags in standard format */
557 regs_to_env();
558 #if defined(TARGET_I386)
559 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
560 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
561 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
562 #elif defined(TARGET_ARM)
563 cpu_dump_state(env, logfile, fprintf, 0);
564 #elif defined(TARGET_SPARC)
565 REGWPTR = env->regbase + (env->cwp * 16);
566 env->regwptr = REGWPTR;
567 cpu_dump_state(env, logfile, fprintf, 0);
568 #elif defined(TARGET_PPC)
569 cpu_dump_state(env, logfile, fprintf, 0);
570 #elif defined(TARGET_M68K)
571 cpu_m68k_flush_flags(env, env->cc_op);
572 env->cc_op = CC_OP_FLAGS;
573 env->sr = (env->sr & 0xffe0)
574 | env->cc_dest | (env->cc_x << 4);
575 cpu_dump_state(env, logfile, fprintf, 0);
576 #elif defined(TARGET_MIPS)
577 cpu_dump_state(env, logfile, fprintf, 0);
578 #elif defined(TARGET_SH4)
579 cpu_dump_state(env, logfile, fprintf, 0);
580 #elif defined(TARGET_ALPHA)
581 cpu_dump_state(env, logfile, fprintf, 0);
582 #elif defined(TARGET_CRIS)
583 cpu_dump_state(env, logfile, fprintf, 0);
584 #else
585 #error unsupported target CPU
586 #endif
587 }
588 #endif
589 tb = tb_find_fast();
590 #ifdef DEBUG_EXEC
591 if ((loglevel & CPU_LOG_EXEC)) {
592 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
593 (long)tb->tc_ptr, tb->pc,
594 lookup_symbol(tb->pc));
595 }
596 #endif
597 /* see if we can patch the calling TB. When the TB
598 spans two pages, we cannot safely do a direct
599 jump. */
600 {
601 if (next_tb != 0 &&
602 #ifdef USE_KQEMU
603 (env->kqemu_enabled != 2) &&
604 #endif
605 tb->page_addr[1] == -1) {
606 spin_lock(&tb_lock);
607 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
608 spin_unlock(&tb_lock);
609 }
610 }
611 tc_ptr = tb->tc_ptr;
612 env->current_tb = tb;
613 /* execute the generated code */
614 #if defined(__sparc__) && !defined(HOST_SOLARIS)
615 #undef env
616 env = cpu_single_env;
617 #define env cpu_single_env
618 #endif
619 next_tb = tcg_qemu_tb_exec(tc_ptr);
620 env->current_tb = NULL;
621 /* reset soft MMU for next block (it can currently
622 only be set by a memory fault) */
623 #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
624 if (env->hflags & HF_SOFTMMU_MASK) {
625 env->hflags &= ~HF_SOFTMMU_MASK;
626 /* do not allow linking to another block */
627 next_tb = 0;
628 }
629 #endif
630 #if defined(USE_KQEMU)
631 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
632 if (kqemu_is_ok(env) &&
633 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
634 cpu_loop_exit();
635 }
636 #endif
637 } /* for(;;) */
638 } else {
639 env_to_regs();
640 }
641 } /* for(;;) */
642
643
644 #if defined(TARGET_I386)
645 /* restore flags in standard format */
646 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
647 #elif defined(TARGET_ARM)
648 /* XXX: Save/restore host fpu exception state?. */
649 #elif defined(TARGET_SPARC)
650 #if defined(reg_REGWPTR)
651 REGWPTR = saved_regwptr;
652 #endif
653 #elif defined(TARGET_PPC)
654 #elif defined(TARGET_M68K)
655 cpu_m68k_flush_flags(env, env->cc_op);
656 env->cc_op = CC_OP_FLAGS;
657 env->sr = (env->sr & 0xffe0)
658 | env->cc_dest | (env->cc_x << 4);
659 #elif defined(TARGET_MIPS)
660 #elif defined(TARGET_SH4)
661 #elif defined(TARGET_ALPHA)
662 #elif defined(TARGET_CRIS)
663 /* XXXXX */
664 #else
665 #error unsupported target CPU
666 #endif
667
668 /* restore global registers */
669 #include "hostregs_helper.h"
670
671 /* fail safe : never use cpu_single_env outside cpu_exec() */
672 cpu_single_env = NULL;
673 return ret;
674 }
675
676 /* must only be called from the generated code as an exception can be
677 generated */
678 void tb_invalidate_page_range(target_ulong start, target_ulong end)
679 {
680 /* XXX: cannot enable it yet because it yields to MMU exception
681 where NIP != read address on PowerPC */
682 #if 0
683 target_ulong phys_addr;
684 phys_addr = get_phys_addr_code(env, start);
685 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
686 #endif
687 }
688
689 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
690
691 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
692 {
693 CPUX86State *saved_env;
694
695 saved_env = env;
696 env = s;
697 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
698 selector &= 0xffff;
699 cpu_x86_load_seg_cache(env, seg_reg, selector,
700 (selector << 4), 0xffff, 0);
701 } else {
702 helper_load_seg(seg_reg, selector);
703 }
704 env = saved_env;
705 }
706
707 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
708 {
709 CPUX86State *saved_env;
710
711 saved_env = env;
712 env = s;
713
714 helper_fsave(ptr, data32);
715
716 env = saved_env;
717 }
718
719 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
720 {
721 CPUX86State *saved_env;
722
723 saved_env = env;
724 env = s;
725
726 helper_frstor(ptr, data32);
727
728 env = saved_env;
729 }
730
731 #endif /* TARGET_I386 */
732
733 #if !defined(CONFIG_SOFTMMU)
734
735 #if defined(TARGET_I386)
736
737 /* 'pc' is the host PC at which the exception was raised. 'address' is
738 the effective address of the memory exception. 'is_write' is 1 if a
739 write caused the exception and otherwise 0'. 'old_set' is the
740 signal set which should be restored */
741 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
742 int is_write, sigset_t *old_set,
743 void *puc)
744 {
745 TranslationBlock *tb;
746 int ret;
747
748 if (cpu_single_env)
749 env = cpu_single_env; /* XXX: find a correct solution for multithread */
750 #if defined(DEBUG_SIGNAL)
751 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
752 pc, address, is_write, *(unsigned long *)old_set);
753 #endif
754 /* XXX: locking issue */
755 if (is_write && page_unprotect(h2g(address), pc, puc)) {
756 return 1;
757 }
758
759 /* see if it is an MMU fault */
760 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
761 if (ret < 0)
762 return 0; /* not an MMU fault */
763 if (ret == 0)
764 return 1; /* the MMU fault was handled without causing real CPU fault */
765 /* now we have a real cpu fault */
766 tb = tb_find_pc(pc);
767 if (tb) {
768 /* the PC is inside the translated code. It means that we have
769 a virtual CPU fault */
770 cpu_restore_state(tb, env, pc, puc);
771 }
772 if (ret == 1) {
773 #if 0
774 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
775 env->eip, env->cr[2], env->error_code);
776 #endif
777 /* we restore the process signal mask as the sigreturn should
778 do it (XXX: use sigsetjmp) */
779 sigprocmask(SIG_SETMASK, old_set, NULL);
780 raise_exception_err(env->exception_index, env->error_code);
781 } else {
782 /* activate soft MMU for this block */
783 env->hflags |= HF_SOFTMMU_MASK;
784 cpu_resume_from_signal(env, puc);
785 }
786 /* never comes here */
787 return 1;
788 }
789
790 #elif defined(TARGET_ARM)
791 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
792 int is_write, sigset_t *old_set,
793 void *puc)
794 {
795 TranslationBlock *tb;
796 int ret;
797
798 if (cpu_single_env)
799 env = cpu_single_env; /* XXX: find a correct solution for multithread */
800 #if defined(DEBUG_SIGNAL)
801 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
802 pc, address, is_write, *(unsigned long *)old_set);
803 #endif
804 /* XXX: locking issue */
805 if (is_write && page_unprotect(h2g(address), pc, puc)) {
806 return 1;
807 }
808 /* see if it is an MMU fault */
809 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
810 if (ret < 0)
811 return 0; /* not an MMU fault */
812 if (ret == 0)
813 return 1; /* the MMU fault was handled without causing real CPU fault */
814 /* now we have a real cpu fault */
815 tb = tb_find_pc(pc);
816 if (tb) {
817 /* the PC is inside the translated code. It means that we have
818 a virtual CPU fault */
819 cpu_restore_state(tb, env, pc, puc);
820 }
821 /* we restore the process signal mask as the sigreturn should
822 do it (XXX: use sigsetjmp) */
823 sigprocmask(SIG_SETMASK, old_set, NULL);
824 cpu_loop_exit();
825 /* never comes here */
826 return 1;
827 }
828 #elif defined(TARGET_SPARC)
829 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
830 int is_write, sigset_t *old_set,
831 void *puc)
832 {
833 TranslationBlock *tb;
834 int ret;
835
836 if (cpu_single_env)
837 env = cpu_single_env; /* XXX: find a correct solution for multithread */
838 #if defined(DEBUG_SIGNAL)
839 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
840 pc, address, is_write, *(unsigned long *)old_set);
841 #endif
842 /* XXX: locking issue */
843 if (is_write && page_unprotect(h2g(address), pc, puc)) {
844 return 1;
845 }
846 /* see if it is an MMU fault */
847 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
848 if (ret < 0)
849 return 0; /* not an MMU fault */
850 if (ret == 0)
851 return 1; /* the MMU fault was handled without causing real CPU fault */
852 /* now we have a real cpu fault */
853 tb = tb_find_pc(pc);
854 if (tb) {
855 /* the PC is inside the translated code. It means that we have
856 a virtual CPU fault */
857 cpu_restore_state(tb, env, pc, puc);
858 }
859 /* we restore the process signal mask as the sigreturn should
860 do it (XXX: use sigsetjmp) */
861 sigprocmask(SIG_SETMASK, old_set, NULL);
862 cpu_loop_exit();
863 /* never comes here */
864 return 1;
865 }
866 #elif defined (TARGET_PPC)
867 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
868 int is_write, sigset_t *old_set,
869 void *puc)
870 {
871 TranslationBlock *tb;
872 int ret;
873
874 if (cpu_single_env)
875 env = cpu_single_env; /* XXX: find a correct solution for multithread */
876 #if defined(DEBUG_SIGNAL)
877 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
878 pc, address, is_write, *(unsigned long *)old_set);
879 #endif
880 /* XXX: locking issue */
881 if (is_write && page_unprotect(h2g(address), pc, puc)) {
882 return 1;
883 }
884
885 /* see if it is an MMU fault */
886 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
887 if (ret < 0)
888 return 0; /* not an MMU fault */
889 if (ret == 0)
890 return 1; /* the MMU fault was handled without causing real CPU fault */
891
892 /* now we have a real cpu fault */
893 tb = tb_find_pc(pc);
894 if (tb) {
895 /* the PC is inside the translated code. It means that we have
896 a virtual CPU fault */
897 cpu_restore_state(tb, env, pc, puc);
898 }
899 if (ret == 1) {
900 #if 0
901 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
902 env->nip, env->error_code, tb);
903 #endif
904 /* we restore the process signal mask as the sigreturn should
905 do it (XXX: use sigsetjmp) */
906 sigprocmask(SIG_SETMASK, old_set, NULL);
907 do_raise_exception_err(env->exception_index, env->error_code);
908 } else {
909 /* activate soft MMU for this block */
910 cpu_resume_from_signal(env, puc);
911 }
912 /* never comes here */
913 return 1;
914 }
915
916 #elif defined(TARGET_M68K)
917 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
918 int is_write, sigset_t *old_set,
919 void *puc)
920 {
921 TranslationBlock *tb;
922 int ret;
923
924 if (cpu_single_env)
925 env = cpu_single_env; /* XXX: find a correct solution for multithread */
926 #if defined(DEBUG_SIGNAL)
927 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
928 pc, address, is_write, *(unsigned long *)old_set);
929 #endif
930 /* XXX: locking issue */
931 if (is_write && page_unprotect(address, pc, puc)) {
932 return 1;
933 }
934 /* see if it is an MMU fault */
935 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
936 if (ret < 0)
937 return 0; /* not an MMU fault */
938 if (ret == 0)
939 return 1; /* the MMU fault was handled without causing real CPU fault */
940 /* now we have a real cpu fault */
941 tb = tb_find_pc(pc);
942 if (tb) {
943 /* the PC is inside the translated code. It means that we have
944 a virtual CPU fault */
945 cpu_restore_state(tb, env, pc, puc);
946 }
947 /* we restore the process signal mask as the sigreturn should
948 do it (XXX: use sigsetjmp) */
949 sigprocmask(SIG_SETMASK, old_set, NULL);
950 cpu_loop_exit();
951 /* never comes here */
952 return 1;
953 }
954
955 #elif defined (TARGET_MIPS)
956 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
957 int is_write, sigset_t *old_set,
958 void *puc)
959 {
960 TranslationBlock *tb;
961 int ret;
962
963 if (cpu_single_env)
964 env = cpu_single_env; /* XXX: find a correct solution for multithread */
965 #if defined(DEBUG_SIGNAL)
966 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
967 pc, address, is_write, *(unsigned long *)old_set);
968 #endif
969 /* XXX: locking issue */
970 if (is_write && page_unprotect(h2g(address), pc, puc)) {
971 return 1;
972 }
973
974 /* see if it is an MMU fault */
975 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
976 if (ret < 0)
977 return 0; /* not an MMU fault */
978 if (ret == 0)
979 return 1; /* the MMU fault was handled without causing real CPU fault */
980
981 /* now we have a real cpu fault */
982 tb = tb_find_pc(pc);
983 if (tb) {
984 /* the PC is inside the translated code. It means that we have
985 a virtual CPU fault */
986 cpu_restore_state(tb, env, pc, puc);
987 }
988 if (ret == 1) {
989 #if 0
990 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
991 env->PC, env->error_code, tb);
992 #endif
993 /* we restore the process signal mask as the sigreturn should
994 do it (XXX: use sigsetjmp) */
995 sigprocmask(SIG_SETMASK, old_set, NULL);
996 do_raise_exception_err(env->exception_index, env->error_code);
997 } else {
998 /* activate soft MMU for this block */
999 cpu_resume_from_signal(env, puc);
1000 }
1001 /* never comes here */
1002 return 1;
1003 }
1004
1005 #elif defined (TARGET_SH4)
1006 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1007 int is_write, sigset_t *old_set,
1008 void *puc)
1009 {
1010 TranslationBlock *tb;
1011 int ret;
1012
1013 if (cpu_single_env)
1014 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1015 #if defined(DEBUG_SIGNAL)
1016 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1017 pc, address, is_write, *(unsigned long *)old_set);
1018 #endif
1019 /* XXX: locking issue */
1020 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1021 return 1;
1022 }
1023
1024 /* see if it is an MMU fault */
1025 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1026 if (ret < 0)
1027 return 0; /* not an MMU fault */
1028 if (ret == 0)
1029 return 1; /* the MMU fault was handled without causing real CPU fault */
1030
1031 /* now we have a real cpu fault */
1032 tb = tb_find_pc(pc);
1033 if (tb) {
1034 /* the PC is inside the translated code. It means that we have
1035 a virtual CPU fault */
1036 cpu_restore_state(tb, env, pc, puc);
1037 }
1038 #if 0
1039 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1040 env->nip, env->error_code, tb);
1041 #endif
1042 /* we restore the process signal mask as the sigreturn should
1043 do it (XXX: use sigsetjmp) */
1044 sigprocmask(SIG_SETMASK, old_set, NULL);
1045 cpu_loop_exit();
1046 /* never comes here */
1047 return 1;
1048 }
1049
1050 #elif defined (TARGET_ALPHA)
1051 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1052 int is_write, sigset_t *old_set,
1053 void *puc)
1054 {
1055 TranslationBlock *tb;
1056 int ret;
1057
1058 if (cpu_single_env)
1059 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1060 #if defined(DEBUG_SIGNAL)
1061 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1062 pc, address, is_write, *(unsigned long *)old_set);
1063 #endif
1064 /* XXX: locking issue */
1065 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1066 return 1;
1067 }
1068
1069 /* see if it is an MMU fault */
1070 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1071 if (ret < 0)
1072 return 0; /* not an MMU fault */
1073 if (ret == 0)
1074 return 1; /* the MMU fault was handled without causing real CPU fault */
1075
1076 /* now we have a real cpu fault */
1077 tb = tb_find_pc(pc);
1078 if (tb) {
1079 /* the PC is inside the translated code. It means that we have
1080 a virtual CPU fault */
1081 cpu_restore_state(tb, env, pc, puc);
1082 }
1083 #if 0
1084 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1085 env->nip, env->error_code, tb);
1086 #endif
1087 /* we restore the process signal mask as the sigreturn should
1088 do it (XXX: use sigsetjmp) */
1089 sigprocmask(SIG_SETMASK, old_set, NULL);
1090 cpu_loop_exit();
1091 /* never comes here */
1092 return 1;
1093 }
1094 #elif defined (TARGET_CRIS)
1095 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1096 int is_write, sigset_t *old_set,
1097 void *puc)
1098 {
1099 TranslationBlock *tb;
1100 int ret;
1101
1102 if (cpu_single_env)
1103 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1104 #if defined(DEBUG_SIGNAL)
1105 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1106 pc, address, is_write, *(unsigned long *)old_set);
1107 #endif
1108 /* XXX: locking issue */
1109 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1110 return 1;
1111 }
1112
1113 /* see if it is an MMU fault */
1114 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1115 if (ret < 0)
1116 return 0; /* not an MMU fault */
1117 if (ret == 0)
1118 return 1; /* the MMU fault was handled without causing real CPU fault */
1119
1120 /* now we have a real cpu fault */
1121 tb = tb_find_pc(pc);
1122 if (tb) {
1123 /* the PC is inside the translated code. It means that we have
1124 a virtual CPU fault */
1125 cpu_restore_state(tb, env, pc, puc);
1126 }
1127 /* we restore the process signal mask as the sigreturn should
1128 do it (XXX: use sigsetjmp) */
1129 sigprocmask(SIG_SETMASK, old_set, NULL);
1130 cpu_loop_exit();
1131 /* never comes here */
1132 return 1;
1133 }
1134
1135 #else
1136 #error unsupported target CPU
1137 #endif
1138
1139 #if defined(__i386__)
1140
1141 #if defined(__APPLE__)
1142 # include <sys/ucontext.h>
1143
1144 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1145 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1146 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1147 #else
1148 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1149 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1150 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1151 #endif
1152
1153 int cpu_signal_handler(int host_signum, void *pinfo,
1154 void *puc)
1155 {
1156 siginfo_t *info = pinfo;
1157 struct ucontext *uc = puc;
1158 unsigned long pc;
1159 int trapno;
1160
1161 #ifndef REG_EIP
1162 /* for glibc 2.1 */
1163 #define REG_EIP EIP
1164 #define REG_ERR ERR
1165 #define REG_TRAPNO TRAPNO
1166 #endif
1167 pc = EIP_sig(uc);
1168 trapno = TRAP_sig(uc);
1169 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1170 trapno == 0xe ?
1171 (ERROR_sig(uc) >> 1) & 1 : 0,
1172 &uc->uc_sigmask, puc);
1173 }
1174
1175 #elif defined(__x86_64__)
1176
1177 int cpu_signal_handler(int host_signum, void *pinfo,
1178 void *puc)
1179 {
1180 siginfo_t *info = pinfo;
1181 struct ucontext *uc = puc;
1182 unsigned long pc;
1183
1184 pc = uc->uc_mcontext.gregs[REG_RIP];
1185 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1186 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1187 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1188 &uc->uc_sigmask, puc);
1189 }
1190
1191 #elif defined(__powerpc__)
1192
1193 /***********************************************************************
1194 * signal context platform-specific definitions
1195 * From Wine
1196 */
1197 #ifdef linux
1198 /* All Registers access - only for local access */
1199 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1200 /* Gpr Registers access */
1201 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1202 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1203 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1204 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1205 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1206 # define LR_sig(context) REG_sig(link, context) /* Link register */
1207 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1208 /* Float Registers access */
1209 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1210 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1211 /* Exception Registers access */
1212 # define DAR_sig(context) REG_sig(dar, context)
1213 # define DSISR_sig(context) REG_sig(dsisr, context)
1214 # define TRAP_sig(context) REG_sig(trap, context)
1215 #endif /* linux */
1216
1217 #ifdef __APPLE__
1218 # include <sys/ucontext.h>
1219 typedef struct ucontext SIGCONTEXT;
1220 /* All Registers access - only for local access */
1221 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1222 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1223 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1224 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1225 /* Gpr Registers access */
1226 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1227 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1228 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1229 # define CTR_sig(context) REG_sig(ctr, context)
1230 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1231 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1232 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1233 /* Float Registers access */
1234 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1235 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1236 /* Exception Registers access */
1237 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1238 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1239 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1240 #endif /* __APPLE__ */
1241
1242 int cpu_signal_handler(int host_signum, void *pinfo,
1243 void *puc)
1244 {
1245 siginfo_t *info = pinfo;
1246 struct ucontext *uc = puc;
1247 unsigned long pc;
1248 int is_write;
1249
1250 pc = IAR_sig(uc);
1251 is_write = 0;
1252 #if 0
1253 /* ppc 4xx case */
1254 if (DSISR_sig(uc) & 0x00800000)
1255 is_write = 1;
1256 #else
1257 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1258 is_write = 1;
1259 #endif
1260 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1261 is_write, &uc->uc_sigmask, puc);
1262 }
1263
1264 #elif defined(__alpha__)
1265
1266 int cpu_signal_handler(int host_signum, void *pinfo,
1267 void *puc)
1268 {
1269 siginfo_t *info = pinfo;
1270 struct ucontext *uc = puc;
1271 uint32_t *pc = uc->uc_mcontext.sc_pc;
1272 uint32_t insn = *pc;
1273 int is_write = 0;
1274
1275 /* XXX: need kernel patch to get write flag faster */
1276 switch (insn >> 26) {
1277 case 0x0d: // stw
1278 case 0x0e: // stb
1279 case 0x0f: // stq_u
1280 case 0x24: // stf
1281 case 0x25: // stg
1282 case 0x26: // sts
1283 case 0x27: // stt
1284 case 0x2c: // stl
1285 case 0x2d: // stq
1286 case 0x2e: // stl_c
1287 case 0x2f: // stq_c
1288 is_write = 1;
1289 }
1290
1291 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1292 is_write, &uc->uc_sigmask, puc);
1293 }
1294 #elif defined(__sparc__)
1295
1296 int cpu_signal_handler(int host_signum, void *pinfo,
1297 void *puc)
1298 {
1299 siginfo_t *info = pinfo;
1300 int is_write;
1301 uint32_t insn;
1302 #if !defined(__sparc_v9__) || defined(HOST_SOLARIS)
1303 uint32_t *regs = (uint32_t *)(info + 1);
1304 void *sigmask = (regs + 20);
1305 /* XXX: is there a standard glibc define ? */
1306 unsigned long pc = regs[1];
1307 #else
1308 struct sigcontext *sc = puc;
1309 unsigned long pc = sc->sigc_regs.tpc;
1310 void *sigmask = (void *)sc->sigc_mask;
1311 #endif
1312
1313 /* XXX: need kernel patch to get write flag faster */
1314 is_write = 0;
1315 insn = *(uint32_t *)pc;
1316 if ((insn >> 30) == 3) {
1317 switch((insn >> 19) & 0x3f) {
1318 case 0x05: // stb
1319 case 0x06: // sth
1320 case 0x04: // st
1321 case 0x07: // std
1322 case 0x24: // stf
1323 case 0x27: // stdf
1324 case 0x25: // stfsr
1325 is_write = 1;
1326 break;
1327 }
1328 }
1329 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1330 is_write, sigmask, NULL);
1331 }
1332
1333 #elif defined(__arm__)
1334
1335 int cpu_signal_handler(int host_signum, void *pinfo,
1336 void *puc)
1337 {
1338 siginfo_t *info = pinfo;
1339 struct ucontext *uc = puc;
1340 unsigned long pc;
1341 int is_write;
1342
1343 pc = uc->uc_mcontext.arm_pc;
1344 /* XXX: compute is_write */
1345 is_write = 0;
1346 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1347 is_write,
1348 &uc->uc_sigmask, puc);
1349 }
1350
1351 #elif defined(__mc68000)
1352
1353 int cpu_signal_handler(int host_signum, void *pinfo,
1354 void *puc)
1355 {
1356 siginfo_t *info = pinfo;
1357 struct ucontext *uc = puc;
1358 unsigned long pc;
1359 int is_write;
1360
1361 pc = uc->uc_mcontext.gregs[16];
1362 /* XXX: compute is_write */
1363 is_write = 0;
1364 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1365 is_write,
1366 &uc->uc_sigmask, puc);
1367 }
1368
1369 #elif defined(__ia64)
1370
1371 #ifndef __ISR_VALID
1372 /* This ought to be in <bits/siginfo.h>... */
1373 # define __ISR_VALID 1
1374 #endif
1375
1376 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1377 {
1378 siginfo_t *info = pinfo;
1379 struct ucontext *uc = puc;
1380 unsigned long ip;
1381 int is_write = 0;
1382
1383 ip = uc->uc_mcontext.sc_ip;
1384 switch (host_signum) {
1385 case SIGILL:
1386 case SIGFPE:
1387 case SIGSEGV:
1388 case SIGBUS:
1389 case SIGTRAP:
1390 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1391 /* ISR.W (write-access) is bit 33: */
1392 is_write = (info->si_isr >> 33) & 1;
1393 break;
1394
1395 default:
1396 break;
1397 }
1398 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1399 is_write,
1400 &uc->uc_sigmask, puc);
1401 }
1402
1403 #elif defined(__s390__)
1404
1405 int cpu_signal_handler(int host_signum, void *pinfo,
1406 void *puc)
1407 {
1408 siginfo_t *info = pinfo;
1409 struct ucontext *uc = puc;
1410 unsigned long pc;
1411 int is_write;
1412
1413 pc = uc->uc_mcontext.psw.addr;
1414 /* XXX: compute is_write */
1415 is_write = 0;
1416 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1417 is_write, &uc->uc_sigmask, puc);
1418 }
1419
1420 #elif defined(__mips__)
1421
1422 int cpu_signal_handler(int host_signum, void *pinfo,
1423 void *puc)
1424 {
1425 siginfo_t *info = pinfo;
1426 struct ucontext *uc = puc;
1427 greg_t pc = uc->uc_mcontext.pc;
1428 int is_write;
1429
1430 /* XXX: compute is_write */
1431 is_write = 0;
1432 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1433 is_write, &uc->uc_sigmask, puc);
1434 }
1435
1436 #elif defined(__hppa__)
1437
1438 int cpu_signal_handler(int host_signum, void *pinfo,
1439 void *puc)
1440 {
1441 struct siginfo *info = pinfo;
1442 struct ucontext *uc = puc;
1443 unsigned long pc;
1444 int is_write;
1445
1446 pc = uc->uc_mcontext.sc_iaoq[0];
1447 /* FIXME: compute is_write */
1448 is_write = 0;
1449 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1450 is_write,
1451 &uc->uc_sigmask, puc);
1452 }
1453
1454 #else
1455
1456 #error host CPU specific signal handler needed
1457
1458 #endif
1459
1460 #endif /* !defined(CONFIG_SOFTMMU) */