]> git.proxmox.com Git - qemu.git/blob - cpu-exec.c
Move non-op functions from op_helper.c to helper.c and vice versa.
[qemu.git] / cpu-exec.c
1 /*
2 * i386 emulator main execution loop
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include "config.h"
21 #define CPU_NO_GLOBAL_REGS
22 #include "exec.h"
23 #include "disas.h"
24 #include "tcg.h"
25
26 #if !defined(CONFIG_SOFTMMU)
27 #undef EAX
28 #undef ECX
29 #undef EDX
30 #undef EBX
31 #undef ESP
32 #undef EBP
33 #undef ESI
34 #undef EDI
35 #undef EIP
36 #include <signal.h>
37 #include <sys/ucontext.h>
38 #endif
39
40 #if defined(__sparc__) && !defined(HOST_SOLARIS)
41 // Work around ugly bugs in glibc that mangle global register contents
42 #undef env
43 #define env cpu_single_env
44 #endif
45
46 int tb_invalidated_flag;
47 static unsigned long next_tb;
48
49 //#define DEBUG_EXEC
50 //#define DEBUG_SIGNAL
51
52 void cpu_loop_exit(void)
53 {
54 /* NOTE: the register at this point must be saved by hand because
55 longjmp restore them */
56 regs_to_env();
57 longjmp(env->jmp_env, 1);
58 }
59
60 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
61 #define reg_T2
62 #endif
63
64 /* exit the current TB from a signal handler. The host registers are
65 restored in a state compatible with the CPU emulator
66 */
67 void cpu_resume_from_signal(CPUState *env1, void *puc)
68 {
69 #if !defined(CONFIG_SOFTMMU)
70 struct ucontext *uc = puc;
71 #endif
72
73 env = env1;
74
75 /* XXX: restore cpu registers saved in host registers */
76
77 #if !defined(CONFIG_SOFTMMU)
78 if (puc) {
79 /* XXX: use siglongjmp ? */
80 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
81 }
82 #endif
83 longjmp(env->jmp_env, 1);
84 }
85
86 static TranslationBlock *tb_find_slow(target_ulong pc,
87 target_ulong cs_base,
88 uint64_t flags)
89 {
90 TranslationBlock *tb, **ptb1;
91 int code_gen_size;
92 unsigned int h;
93 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
94 uint8_t *tc_ptr;
95
96 spin_lock(&tb_lock);
97
98 tb_invalidated_flag = 0;
99
100 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
101
102 /* find translated block using physical mappings */
103 phys_pc = get_phys_addr_code(env, pc);
104 phys_page1 = phys_pc & TARGET_PAGE_MASK;
105 phys_page2 = -1;
106 h = tb_phys_hash_func(phys_pc);
107 ptb1 = &tb_phys_hash[h];
108 for(;;) {
109 tb = *ptb1;
110 if (!tb)
111 goto not_found;
112 if (tb->pc == pc &&
113 tb->page_addr[0] == phys_page1 &&
114 tb->cs_base == cs_base &&
115 tb->flags == flags) {
116 /* check next page if needed */
117 if (tb->page_addr[1] != -1) {
118 virt_page2 = (pc & TARGET_PAGE_MASK) +
119 TARGET_PAGE_SIZE;
120 phys_page2 = get_phys_addr_code(env, virt_page2);
121 if (tb->page_addr[1] == phys_page2)
122 goto found;
123 } else {
124 goto found;
125 }
126 }
127 ptb1 = &tb->phys_hash_next;
128 }
129 not_found:
130 /* if no translated code available, then translate it now */
131 tb = tb_alloc(pc);
132 if (!tb) {
133 /* flush must be done */
134 tb_flush(env);
135 /* cannot fail at this point */
136 tb = tb_alloc(pc);
137 /* don't forget to invalidate previous TB info */
138 tb_invalidated_flag = 1;
139 }
140 tc_ptr = code_gen_ptr;
141 tb->tc_ptr = tc_ptr;
142 tb->cs_base = cs_base;
143 tb->flags = flags;
144 cpu_gen_code(env, tb, &code_gen_size);
145 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
146
147 /* check next page if needed */
148 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
149 phys_page2 = -1;
150 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
151 phys_page2 = get_phys_addr_code(env, virt_page2);
152 }
153 tb_link_phys(tb, phys_pc, phys_page2);
154
155 found:
156 /* we add the TB in the virtual pc hash table */
157 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
158 spin_unlock(&tb_lock);
159 return tb;
160 }
161
162 static inline TranslationBlock *tb_find_fast(void)
163 {
164 TranslationBlock *tb;
165 target_ulong cs_base, pc;
166 uint64_t flags;
167
168 /* we record a subset of the CPU state. It will
169 always be the same before a given translated block
170 is executed. */
171 #if defined(TARGET_I386)
172 flags = env->hflags;
173 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
174 flags |= env->intercept;
175 cs_base = env->segs[R_CS].base;
176 pc = cs_base + env->eip;
177 #elif defined(TARGET_ARM)
178 flags = env->thumb | (env->vfp.vec_len << 1)
179 | (env->vfp.vec_stride << 4);
180 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
181 flags |= (1 << 6);
182 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
183 flags |= (1 << 7);
184 flags |= (env->condexec_bits << 8);
185 cs_base = 0;
186 pc = env->regs[15];
187 #elif defined(TARGET_SPARC)
188 #ifdef TARGET_SPARC64
189 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
190 flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
191 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
192 #else
193 // FPU enable . Supervisor
194 flags = (env->psref << 4) | env->psrs;
195 #endif
196 cs_base = env->npc;
197 pc = env->pc;
198 #elif defined(TARGET_PPC)
199 flags = env->hflags;
200 cs_base = 0;
201 pc = env->nip;
202 #elif defined(TARGET_MIPS)
203 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
204 cs_base = 0;
205 pc = env->PC[env->current_tc];
206 #elif defined(TARGET_M68K)
207 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
208 | (env->sr & SR_S) /* Bit 13 */
209 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
210 cs_base = 0;
211 pc = env->pc;
212 #elif defined(TARGET_SH4)
213 flags = env->flags;
214 cs_base = 0;
215 pc = env->pc;
216 #elif defined(TARGET_ALPHA)
217 flags = env->ps;
218 cs_base = 0;
219 pc = env->pc;
220 #elif defined(TARGET_CRIS)
221 flags = env->pregs[PR_CCS] & U_FLAG;
222 flags |= env->dslot;
223 cs_base = 0;
224 pc = env->pc;
225 #else
226 #error unsupported CPU
227 #endif
228 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
229 if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
230 tb->flags != flags, 0)) {
231 tb = tb_find_slow(pc, cs_base, flags);
232 /* Note: we do it here to avoid a gcc bug on Mac OS X when
233 doing it in tb_find_slow */
234 if (tb_invalidated_flag) {
235 /* as some TB could have been invalidated because
236 of memory exceptions while generating the code, we
237 must recompute the hash index here */
238 next_tb = 0;
239 }
240 }
241 return tb;
242 }
243
244 /* main execution loop */
245
246 int cpu_exec(CPUState *env1)
247 {
248 #define DECLARE_HOST_REGS 1
249 #include "hostregs_helper.h"
250 #if defined(TARGET_SPARC)
251 #if defined(reg_REGWPTR)
252 uint32_t *saved_regwptr;
253 #endif
254 #endif
255 int ret, interrupt_request;
256 TranslationBlock *tb;
257 uint8_t *tc_ptr;
258
259 if (cpu_halted(env1) == EXCP_HALTED)
260 return EXCP_HALTED;
261
262 cpu_single_env = env1;
263
264 /* first we save global registers */
265 #define SAVE_HOST_REGS 1
266 #include "hostregs_helper.h"
267 env = env1;
268
269 env_to_regs();
270 #if defined(TARGET_I386)
271 /* put eflags in CPU temporary format */
272 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
273 DF = 1 - (2 * ((env->eflags >> 10) & 1));
274 CC_OP = CC_OP_EFLAGS;
275 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
276 #elif defined(TARGET_SPARC)
277 #if defined(reg_REGWPTR)
278 saved_regwptr = REGWPTR;
279 #endif
280 #elif defined(TARGET_M68K)
281 env->cc_op = CC_OP_FLAGS;
282 env->cc_dest = env->sr & 0xf;
283 env->cc_x = (env->sr >> 4) & 1;
284 #elif defined(TARGET_ALPHA)
285 #elif defined(TARGET_ARM)
286 #elif defined(TARGET_PPC)
287 #elif defined(TARGET_MIPS)
288 #elif defined(TARGET_SH4)
289 #elif defined(TARGET_CRIS)
290 /* XXXXX */
291 #else
292 #error unsupported target CPU
293 #endif
294 env->exception_index = -1;
295
296 /* prepare setjmp context for exception handling */
297 for(;;) {
298 if (setjmp(env->jmp_env) == 0) {
299 env->current_tb = NULL;
300 /* if an exception is pending, we execute it here */
301 if (env->exception_index >= 0) {
302 if (env->exception_index >= EXCP_INTERRUPT) {
303 /* exit request from the cpu execution loop */
304 ret = env->exception_index;
305 break;
306 } else if (env->user_mode_only) {
307 /* if user mode only, we simulate a fake exception
308 which will be handled outside the cpu execution
309 loop */
310 #if defined(TARGET_I386)
311 do_interrupt_user(env->exception_index,
312 env->exception_is_int,
313 env->error_code,
314 env->exception_next_eip);
315 /* successfully delivered */
316 env->old_exception = -1;
317 #endif
318 ret = env->exception_index;
319 break;
320 } else {
321 #if defined(TARGET_I386)
322 /* simulate a real cpu exception. On i386, it can
323 trigger new exceptions, but we do not handle
324 double or triple faults yet. */
325 do_interrupt(env->exception_index,
326 env->exception_is_int,
327 env->error_code,
328 env->exception_next_eip, 0);
329 /* successfully delivered */
330 env->old_exception = -1;
331 #elif defined(TARGET_PPC)
332 do_interrupt(env);
333 #elif defined(TARGET_MIPS)
334 do_interrupt(env);
335 #elif defined(TARGET_SPARC)
336 do_interrupt(env);
337 #elif defined(TARGET_ARM)
338 do_interrupt(env);
339 #elif defined(TARGET_SH4)
340 do_interrupt(env);
341 #elif defined(TARGET_ALPHA)
342 do_interrupt(env);
343 #elif defined(TARGET_CRIS)
344 do_interrupt(env);
345 #elif defined(TARGET_M68K)
346 do_interrupt(0);
347 #endif
348 }
349 env->exception_index = -1;
350 }
351 #ifdef USE_KQEMU
352 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
353 int ret;
354 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
355 ret = kqemu_cpu_exec(env);
356 /* put eflags in CPU temporary format */
357 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
358 DF = 1 - (2 * ((env->eflags >> 10) & 1));
359 CC_OP = CC_OP_EFLAGS;
360 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
361 if (ret == 1) {
362 /* exception */
363 longjmp(env->jmp_env, 1);
364 } else if (ret == 2) {
365 /* softmmu execution needed */
366 } else {
367 if (env->interrupt_request != 0) {
368 /* hardware interrupt will be executed just after */
369 } else {
370 /* otherwise, we restart */
371 longjmp(env->jmp_env, 1);
372 }
373 }
374 }
375 #endif
376
377 next_tb = 0; /* force lookup of first TB */
378 for(;;) {
379 interrupt_request = env->interrupt_request;
380 if (__builtin_expect(interrupt_request, 0)
381 #if defined(TARGET_I386)
382 && env->hflags & HF_GIF_MASK
383 #endif
384 && likely(!(env->singlestep_enabled & SSTEP_NOIRQ))) {
385 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
386 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
387 env->exception_index = EXCP_DEBUG;
388 cpu_loop_exit();
389 }
390 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
391 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
392 if (interrupt_request & CPU_INTERRUPT_HALT) {
393 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
394 env->halted = 1;
395 env->exception_index = EXCP_HLT;
396 cpu_loop_exit();
397 }
398 #endif
399 #if defined(TARGET_I386)
400 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
401 !(env->hflags & HF_SMM_MASK)) {
402 svm_check_intercept(SVM_EXIT_SMI);
403 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
404 do_smm_enter();
405 next_tb = 0;
406 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
407 !(env->hflags & HF_NMI_MASK)) {
408 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
409 env->hflags |= HF_NMI_MASK;
410 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
411 next_tb = 0;
412 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
413 (env->eflags & IF_MASK || env->hflags & HF_HIF_MASK) &&
414 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
415 int intno;
416 svm_check_intercept(SVM_EXIT_INTR);
417 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
418 intno = cpu_get_pic_interrupt(env);
419 if (loglevel & CPU_LOG_TB_IN_ASM) {
420 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
421 }
422 do_interrupt(intno, 0, 0, 0, 1);
423 /* ensure that no TB jump will be modified as
424 the program flow was changed */
425 next_tb = 0;
426 #if !defined(CONFIG_USER_ONLY)
427 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
428 (env->eflags & IF_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
429 int intno;
430 /* FIXME: this should respect TPR */
431 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
432 svm_check_intercept(SVM_EXIT_VINTR);
433 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
434 if (loglevel & CPU_LOG_TB_IN_ASM)
435 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
436 do_interrupt(intno, 0, 0, -1, 1);
437 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl),
438 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)) & ~V_IRQ_MASK);
439 next_tb = 0;
440 #endif
441 }
442 #elif defined(TARGET_PPC)
443 #if 0
444 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
445 cpu_ppc_reset(env);
446 }
447 #endif
448 if (interrupt_request & CPU_INTERRUPT_HARD) {
449 ppc_hw_interrupt(env);
450 if (env->pending_interrupts == 0)
451 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
452 next_tb = 0;
453 }
454 #elif defined(TARGET_MIPS)
455 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
456 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
457 (env->CP0_Status & (1 << CP0St_IE)) &&
458 !(env->CP0_Status & (1 << CP0St_EXL)) &&
459 !(env->CP0_Status & (1 << CP0St_ERL)) &&
460 !(env->hflags & MIPS_HFLAG_DM)) {
461 /* Raise it */
462 env->exception_index = EXCP_EXT_INTERRUPT;
463 env->error_code = 0;
464 do_interrupt(env);
465 next_tb = 0;
466 }
467 #elif defined(TARGET_SPARC)
468 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
469 (env->psret != 0)) {
470 int pil = env->interrupt_index & 15;
471 int type = env->interrupt_index & 0xf0;
472
473 if (((type == TT_EXTINT) &&
474 (pil == 15 || pil > env->psrpil)) ||
475 type != TT_EXTINT) {
476 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
477 env->exception_index = env->interrupt_index;
478 do_interrupt(env);
479 env->interrupt_index = 0;
480 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
481 cpu_check_irqs(env);
482 #endif
483 next_tb = 0;
484 }
485 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
486 //do_interrupt(0, 0, 0, 0, 0);
487 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
488 }
489 #elif defined(TARGET_ARM)
490 if (interrupt_request & CPU_INTERRUPT_FIQ
491 && !(env->uncached_cpsr & CPSR_F)) {
492 env->exception_index = EXCP_FIQ;
493 do_interrupt(env);
494 next_tb = 0;
495 }
496 /* ARMv7-M interrupt return works by loading a magic value
497 into the PC. On real hardware the load causes the
498 return to occur. The qemu implementation performs the
499 jump normally, then does the exception return when the
500 CPU tries to execute code at the magic address.
501 This will cause the magic PC value to be pushed to
502 the stack if an interrupt occured at the wrong time.
503 We avoid this by disabling interrupts when
504 pc contains a magic address. */
505 if (interrupt_request & CPU_INTERRUPT_HARD
506 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
507 || !(env->uncached_cpsr & CPSR_I))) {
508 env->exception_index = EXCP_IRQ;
509 do_interrupt(env);
510 next_tb = 0;
511 }
512 #elif defined(TARGET_SH4)
513 if (interrupt_request & CPU_INTERRUPT_HARD) {
514 do_interrupt(env);
515 next_tb = 0;
516 }
517 #elif defined(TARGET_ALPHA)
518 if (interrupt_request & CPU_INTERRUPT_HARD) {
519 do_interrupt(env);
520 next_tb = 0;
521 }
522 #elif defined(TARGET_CRIS)
523 if (interrupt_request & CPU_INTERRUPT_HARD) {
524 do_interrupt(env);
525 next_tb = 0;
526 }
527 #elif defined(TARGET_M68K)
528 if (interrupt_request & CPU_INTERRUPT_HARD
529 && ((env->sr & SR_I) >> SR_I_SHIFT)
530 < env->pending_level) {
531 /* Real hardware gets the interrupt vector via an
532 IACK cycle at this point. Current emulated
533 hardware doesn't rely on this, so we
534 provide/save the vector when the interrupt is
535 first signalled. */
536 env->exception_index = env->pending_vector;
537 do_interrupt(1);
538 next_tb = 0;
539 }
540 #endif
541 /* Don't use the cached interupt_request value,
542 do_interrupt may have updated the EXITTB flag. */
543 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
544 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
545 /* ensure that no TB jump will be modified as
546 the program flow was changed */
547 next_tb = 0;
548 }
549 if (interrupt_request & CPU_INTERRUPT_EXIT) {
550 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
551 env->exception_index = EXCP_INTERRUPT;
552 cpu_loop_exit();
553 }
554 }
555 #ifdef DEBUG_EXEC
556 if ((loglevel & CPU_LOG_TB_CPU)) {
557 /* restore flags in standard format */
558 regs_to_env();
559 #if defined(TARGET_I386)
560 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
561 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
562 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
563 #elif defined(TARGET_ARM)
564 cpu_dump_state(env, logfile, fprintf, 0);
565 #elif defined(TARGET_SPARC)
566 REGWPTR = env->regbase + (env->cwp * 16);
567 env->regwptr = REGWPTR;
568 cpu_dump_state(env, logfile, fprintf, 0);
569 #elif defined(TARGET_PPC)
570 cpu_dump_state(env, logfile, fprintf, 0);
571 #elif defined(TARGET_M68K)
572 cpu_m68k_flush_flags(env, env->cc_op);
573 env->cc_op = CC_OP_FLAGS;
574 env->sr = (env->sr & 0xffe0)
575 | env->cc_dest | (env->cc_x << 4);
576 cpu_dump_state(env, logfile, fprintf, 0);
577 #elif defined(TARGET_MIPS)
578 cpu_dump_state(env, logfile, fprintf, 0);
579 #elif defined(TARGET_SH4)
580 cpu_dump_state(env, logfile, fprintf, 0);
581 #elif defined(TARGET_ALPHA)
582 cpu_dump_state(env, logfile, fprintf, 0);
583 #elif defined(TARGET_CRIS)
584 cpu_dump_state(env, logfile, fprintf, 0);
585 #else
586 #error unsupported target CPU
587 #endif
588 }
589 #endif
590 tb = tb_find_fast();
591 #ifdef DEBUG_EXEC
592 if ((loglevel & CPU_LOG_EXEC)) {
593 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
594 (long)tb->tc_ptr, tb->pc,
595 lookup_symbol(tb->pc));
596 }
597 #endif
598 /* see if we can patch the calling TB. When the TB
599 spans two pages, we cannot safely do a direct
600 jump. */
601 {
602 if (next_tb != 0 &&
603 #ifdef USE_KQEMU
604 (env->kqemu_enabled != 2) &&
605 #endif
606 tb->page_addr[1] == -1) {
607 spin_lock(&tb_lock);
608 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
609 spin_unlock(&tb_lock);
610 }
611 }
612 tc_ptr = tb->tc_ptr;
613 env->current_tb = tb;
614 /* execute the generated code */
615 #if defined(__sparc__) && !defined(HOST_SOLARIS)
616 #undef env
617 env = cpu_single_env;
618 #define env cpu_single_env
619 #endif
620 next_tb = tcg_qemu_tb_exec(tc_ptr);
621 env->current_tb = NULL;
622 /* reset soft MMU for next block (it can currently
623 only be set by a memory fault) */
624 #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
625 if (env->hflags & HF_SOFTMMU_MASK) {
626 env->hflags &= ~HF_SOFTMMU_MASK;
627 /* do not allow linking to another block */
628 next_tb = 0;
629 }
630 #endif
631 #if defined(USE_KQEMU)
632 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
633 if (kqemu_is_ok(env) &&
634 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
635 cpu_loop_exit();
636 }
637 #endif
638 } /* for(;;) */
639 } else {
640 env_to_regs();
641 }
642 } /* for(;;) */
643
644
645 #if defined(TARGET_I386)
646 /* restore flags in standard format */
647 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
648 #elif defined(TARGET_ARM)
649 /* XXX: Save/restore host fpu exception state?. */
650 #elif defined(TARGET_SPARC)
651 #if defined(reg_REGWPTR)
652 REGWPTR = saved_regwptr;
653 #endif
654 #elif defined(TARGET_PPC)
655 #elif defined(TARGET_M68K)
656 cpu_m68k_flush_flags(env, env->cc_op);
657 env->cc_op = CC_OP_FLAGS;
658 env->sr = (env->sr & 0xffe0)
659 | env->cc_dest | (env->cc_x << 4);
660 #elif defined(TARGET_MIPS)
661 #elif defined(TARGET_SH4)
662 #elif defined(TARGET_ALPHA)
663 #elif defined(TARGET_CRIS)
664 /* XXXXX */
665 #else
666 #error unsupported target CPU
667 #endif
668
669 /* restore global registers */
670 #include "hostregs_helper.h"
671
672 /* fail safe : never use cpu_single_env outside cpu_exec() */
673 cpu_single_env = NULL;
674 return ret;
675 }
676
677 /* must only be called from the generated code as an exception can be
678 generated */
679 void tb_invalidate_page_range(target_ulong start, target_ulong end)
680 {
681 /* XXX: cannot enable it yet because it yields to MMU exception
682 where NIP != read address on PowerPC */
683 #if 0
684 target_ulong phys_addr;
685 phys_addr = get_phys_addr_code(env, start);
686 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
687 #endif
688 }
689
690 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
691
692 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
693 {
694 CPUX86State *saved_env;
695
696 saved_env = env;
697 env = s;
698 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
699 selector &= 0xffff;
700 cpu_x86_load_seg_cache(env, seg_reg, selector,
701 (selector << 4), 0xffff, 0);
702 } else {
703 helper_load_seg(seg_reg, selector);
704 }
705 env = saved_env;
706 }
707
708 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
709 {
710 CPUX86State *saved_env;
711
712 saved_env = env;
713 env = s;
714
715 helper_fsave(ptr, data32);
716
717 env = saved_env;
718 }
719
720 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
721 {
722 CPUX86State *saved_env;
723
724 saved_env = env;
725 env = s;
726
727 helper_frstor(ptr, data32);
728
729 env = saved_env;
730 }
731
732 #endif /* TARGET_I386 */
733
734 #if !defined(CONFIG_SOFTMMU)
735
736 #if defined(TARGET_I386)
737
738 /* 'pc' is the host PC at which the exception was raised. 'address' is
739 the effective address of the memory exception. 'is_write' is 1 if a
740 write caused the exception and otherwise 0'. 'old_set' is the
741 signal set which should be restored */
742 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
743 int is_write, sigset_t *old_set,
744 void *puc)
745 {
746 TranslationBlock *tb;
747 int ret;
748
749 if (cpu_single_env)
750 env = cpu_single_env; /* XXX: find a correct solution for multithread */
751 #if defined(DEBUG_SIGNAL)
752 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
753 pc, address, is_write, *(unsigned long *)old_set);
754 #endif
755 /* XXX: locking issue */
756 if (is_write && page_unprotect(h2g(address), pc, puc)) {
757 return 1;
758 }
759
760 /* see if it is an MMU fault */
761 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
762 if (ret < 0)
763 return 0; /* not an MMU fault */
764 if (ret == 0)
765 return 1; /* the MMU fault was handled without causing real CPU fault */
766 /* now we have a real cpu fault */
767 tb = tb_find_pc(pc);
768 if (tb) {
769 /* the PC is inside the translated code. It means that we have
770 a virtual CPU fault */
771 cpu_restore_state(tb, env, pc, puc);
772 }
773 if (ret == 1) {
774 #if 0
775 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
776 env->eip, env->cr[2], env->error_code);
777 #endif
778 /* we restore the process signal mask as the sigreturn should
779 do it (XXX: use sigsetjmp) */
780 sigprocmask(SIG_SETMASK, old_set, NULL);
781 raise_exception_err(env->exception_index, env->error_code);
782 } else {
783 /* activate soft MMU for this block */
784 env->hflags |= HF_SOFTMMU_MASK;
785 cpu_resume_from_signal(env, puc);
786 }
787 /* never comes here */
788 return 1;
789 }
790
791 #elif defined(TARGET_ARM)
792 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
793 int is_write, sigset_t *old_set,
794 void *puc)
795 {
796 TranslationBlock *tb;
797 int ret;
798
799 if (cpu_single_env)
800 env = cpu_single_env; /* XXX: find a correct solution for multithread */
801 #if defined(DEBUG_SIGNAL)
802 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
803 pc, address, is_write, *(unsigned long *)old_set);
804 #endif
805 /* XXX: locking issue */
806 if (is_write && page_unprotect(h2g(address), pc, puc)) {
807 return 1;
808 }
809 /* see if it is an MMU fault */
810 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
811 if (ret < 0)
812 return 0; /* not an MMU fault */
813 if (ret == 0)
814 return 1; /* the MMU fault was handled without causing real CPU fault */
815 /* now we have a real cpu fault */
816 tb = tb_find_pc(pc);
817 if (tb) {
818 /* the PC is inside the translated code. It means that we have
819 a virtual CPU fault */
820 cpu_restore_state(tb, env, pc, puc);
821 }
822 /* we restore the process signal mask as the sigreturn should
823 do it (XXX: use sigsetjmp) */
824 sigprocmask(SIG_SETMASK, old_set, NULL);
825 cpu_loop_exit();
826 /* never comes here */
827 return 1;
828 }
829 #elif defined(TARGET_SPARC)
830 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
831 int is_write, sigset_t *old_set,
832 void *puc)
833 {
834 TranslationBlock *tb;
835 int ret;
836
837 if (cpu_single_env)
838 env = cpu_single_env; /* XXX: find a correct solution for multithread */
839 #if defined(DEBUG_SIGNAL)
840 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
841 pc, address, is_write, *(unsigned long *)old_set);
842 #endif
843 /* XXX: locking issue */
844 if (is_write && page_unprotect(h2g(address), pc, puc)) {
845 return 1;
846 }
847 /* see if it is an MMU fault */
848 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
849 if (ret < 0)
850 return 0; /* not an MMU fault */
851 if (ret == 0)
852 return 1; /* the MMU fault was handled without causing real CPU fault */
853 /* now we have a real cpu fault */
854 tb = tb_find_pc(pc);
855 if (tb) {
856 /* the PC is inside the translated code. It means that we have
857 a virtual CPU fault */
858 cpu_restore_state(tb, env, pc, puc);
859 }
860 /* we restore the process signal mask as the sigreturn should
861 do it (XXX: use sigsetjmp) */
862 sigprocmask(SIG_SETMASK, old_set, NULL);
863 cpu_loop_exit();
864 /* never comes here */
865 return 1;
866 }
867 #elif defined (TARGET_PPC)
868 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
869 int is_write, sigset_t *old_set,
870 void *puc)
871 {
872 TranslationBlock *tb;
873 int ret;
874
875 if (cpu_single_env)
876 env = cpu_single_env; /* XXX: find a correct solution for multithread */
877 #if defined(DEBUG_SIGNAL)
878 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
879 pc, address, is_write, *(unsigned long *)old_set);
880 #endif
881 /* XXX: locking issue */
882 if (is_write && page_unprotect(h2g(address), pc, puc)) {
883 return 1;
884 }
885
886 /* see if it is an MMU fault */
887 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
888 if (ret < 0)
889 return 0; /* not an MMU fault */
890 if (ret == 0)
891 return 1; /* the MMU fault was handled without causing real CPU fault */
892
893 /* now we have a real cpu fault */
894 tb = tb_find_pc(pc);
895 if (tb) {
896 /* the PC is inside the translated code. It means that we have
897 a virtual CPU fault */
898 cpu_restore_state(tb, env, pc, puc);
899 }
900 if (ret == 1) {
901 #if 0
902 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
903 env->nip, env->error_code, tb);
904 #endif
905 /* we restore the process signal mask as the sigreturn should
906 do it (XXX: use sigsetjmp) */
907 sigprocmask(SIG_SETMASK, old_set, NULL);
908 do_raise_exception_err(env->exception_index, env->error_code);
909 } else {
910 /* activate soft MMU for this block */
911 cpu_resume_from_signal(env, puc);
912 }
913 /* never comes here */
914 return 1;
915 }
916
917 #elif defined(TARGET_M68K)
918 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
919 int is_write, sigset_t *old_set,
920 void *puc)
921 {
922 TranslationBlock *tb;
923 int ret;
924
925 if (cpu_single_env)
926 env = cpu_single_env; /* XXX: find a correct solution for multithread */
927 #if defined(DEBUG_SIGNAL)
928 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
929 pc, address, is_write, *(unsigned long *)old_set);
930 #endif
931 /* XXX: locking issue */
932 if (is_write && page_unprotect(address, pc, puc)) {
933 return 1;
934 }
935 /* see if it is an MMU fault */
936 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
937 if (ret < 0)
938 return 0; /* not an MMU fault */
939 if (ret == 0)
940 return 1; /* the MMU fault was handled without causing real CPU fault */
941 /* now we have a real cpu fault */
942 tb = tb_find_pc(pc);
943 if (tb) {
944 /* the PC is inside the translated code. It means that we have
945 a virtual CPU fault */
946 cpu_restore_state(tb, env, pc, puc);
947 }
948 /* we restore the process signal mask as the sigreturn should
949 do it (XXX: use sigsetjmp) */
950 sigprocmask(SIG_SETMASK, old_set, NULL);
951 cpu_loop_exit();
952 /* never comes here */
953 return 1;
954 }
955
956 #elif defined (TARGET_MIPS)
957 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
958 int is_write, sigset_t *old_set,
959 void *puc)
960 {
961 TranslationBlock *tb;
962 int ret;
963
964 if (cpu_single_env)
965 env = cpu_single_env; /* XXX: find a correct solution for multithread */
966 #if defined(DEBUG_SIGNAL)
967 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
968 pc, address, is_write, *(unsigned long *)old_set);
969 #endif
970 /* XXX: locking issue */
971 if (is_write && page_unprotect(h2g(address), pc, puc)) {
972 return 1;
973 }
974
975 /* see if it is an MMU fault */
976 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
977 if (ret < 0)
978 return 0; /* not an MMU fault */
979 if (ret == 0)
980 return 1; /* the MMU fault was handled without causing real CPU fault */
981
982 /* now we have a real cpu fault */
983 tb = tb_find_pc(pc);
984 if (tb) {
985 /* the PC is inside the translated code. It means that we have
986 a virtual CPU fault */
987 cpu_restore_state(tb, env, pc, puc);
988 }
989 if (ret == 1) {
990 #if 0
991 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
992 env->PC, env->error_code, tb);
993 #endif
994 /* we restore the process signal mask as the sigreturn should
995 do it (XXX: use sigsetjmp) */
996 sigprocmask(SIG_SETMASK, old_set, NULL);
997 do_raise_exception_err(env->exception_index, env->error_code);
998 } else {
999 /* activate soft MMU for this block */
1000 cpu_resume_from_signal(env, puc);
1001 }
1002 /* never comes here */
1003 return 1;
1004 }
1005
1006 #elif defined (TARGET_SH4)
1007 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1008 int is_write, sigset_t *old_set,
1009 void *puc)
1010 {
1011 TranslationBlock *tb;
1012 int ret;
1013
1014 if (cpu_single_env)
1015 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1016 #if defined(DEBUG_SIGNAL)
1017 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1018 pc, address, is_write, *(unsigned long *)old_set);
1019 #endif
1020 /* XXX: locking issue */
1021 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1022 return 1;
1023 }
1024
1025 /* see if it is an MMU fault */
1026 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1027 if (ret < 0)
1028 return 0; /* not an MMU fault */
1029 if (ret == 0)
1030 return 1; /* the MMU fault was handled without causing real CPU fault */
1031
1032 /* now we have a real cpu fault */
1033 tb = tb_find_pc(pc);
1034 if (tb) {
1035 /* the PC is inside the translated code. It means that we have
1036 a virtual CPU fault */
1037 cpu_restore_state(tb, env, pc, puc);
1038 }
1039 #if 0
1040 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1041 env->nip, env->error_code, tb);
1042 #endif
1043 /* we restore the process signal mask as the sigreturn should
1044 do it (XXX: use sigsetjmp) */
1045 sigprocmask(SIG_SETMASK, old_set, NULL);
1046 cpu_loop_exit();
1047 /* never comes here */
1048 return 1;
1049 }
1050
1051 #elif defined (TARGET_ALPHA)
1052 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1053 int is_write, sigset_t *old_set,
1054 void *puc)
1055 {
1056 TranslationBlock *tb;
1057 int ret;
1058
1059 if (cpu_single_env)
1060 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1061 #if defined(DEBUG_SIGNAL)
1062 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1063 pc, address, is_write, *(unsigned long *)old_set);
1064 #endif
1065 /* XXX: locking issue */
1066 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1067 return 1;
1068 }
1069
1070 /* see if it is an MMU fault */
1071 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1072 if (ret < 0)
1073 return 0; /* not an MMU fault */
1074 if (ret == 0)
1075 return 1; /* the MMU fault was handled without causing real CPU fault */
1076
1077 /* now we have a real cpu fault */
1078 tb = tb_find_pc(pc);
1079 if (tb) {
1080 /* the PC is inside the translated code. It means that we have
1081 a virtual CPU fault */
1082 cpu_restore_state(tb, env, pc, puc);
1083 }
1084 #if 0
1085 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1086 env->nip, env->error_code, tb);
1087 #endif
1088 /* we restore the process signal mask as the sigreturn should
1089 do it (XXX: use sigsetjmp) */
1090 sigprocmask(SIG_SETMASK, old_set, NULL);
1091 cpu_loop_exit();
1092 /* never comes here */
1093 return 1;
1094 }
1095 #elif defined (TARGET_CRIS)
1096 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1097 int is_write, sigset_t *old_set,
1098 void *puc)
1099 {
1100 TranslationBlock *tb;
1101 int ret;
1102
1103 if (cpu_single_env)
1104 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1105 #if defined(DEBUG_SIGNAL)
1106 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1107 pc, address, is_write, *(unsigned long *)old_set);
1108 #endif
1109 /* XXX: locking issue */
1110 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1111 return 1;
1112 }
1113
1114 /* see if it is an MMU fault */
1115 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1116 if (ret < 0)
1117 return 0; /* not an MMU fault */
1118 if (ret == 0)
1119 return 1; /* the MMU fault was handled without causing real CPU fault */
1120
1121 /* now we have a real cpu fault */
1122 tb = tb_find_pc(pc);
1123 if (tb) {
1124 /* the PC is inside the translated code. It means that we have
1125 a virtual CPU fault */
1126 cpu_restore_state(tb, env, pc, puc);
1127 }
1128 /* we restore the process signal mask as the sigreturn should
1129 do it (XXX: use sigsetjmp) */
1130 sigprocmask(SIG_SETMASK, old_set, NULL);
1131 cpu_loop_exit();
1132 /* never comes here */
1133 return 1;
1134 }
1135
1136 #else
1137 #error unsupported target CPU
1138 #endif
1139
1140 #if defined(__i386__)
1141
1142 #if defined(__APPLE__)
1143 # include <sys/ucontext.h>
1144
1145 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1146 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1147 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1148 #else
1149 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1150 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1151 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1152 #endif
1153
1154 int cpu_signal_handler(int host_signum, void *pinfo,
1155 void *puc)
1156 {
1157 siginfo_t *info = pinfo;
1158 struct ucontext *uc = puc;
1159 unsigned long pc;
1160 int trapno;
1161
1162 #ifndef REG_EIP
1163 /* for glibc 2.1 */
1164 #define REG_EIP EIP
1165 #define REG_ERR ERR
1166 #define REG_TRAPNO TRAPNO
1167 #endif
1168 pc = EIP_sig(uc);
1169 trapno = TRAP_sig(uc);
1170 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1171 trapno == 0xe ?
1172 (ERROR_sig(uc) >> 1) & 1 : 0,
1173 &uc->uc_sigmask, puc);
1174 }
1175
1176 #elif defined(__x86_64__)
1177
1178 int cpu_signal_handler(int host_signum, void *pinfo,
1179 void *puc)
1180 {
1181 siginfo_t *info = pinfo;
1182 struct ucontext *uc = puc;
1183 unsigned long pc;
1184
1185 pc = uc->uc_mcontext.gregs[REG_RIP];
1186 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1187 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1188 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1189 &uc->uc_sigmask, puc);
1190 }
1191
1192 #elif defined(__powerpc__)
1193
1194 /***********************************************************************
1195 * signal context platform-specific definitions
1196 * From Wine
1197 */
1198 #ifdef linux
1199 /* All Registers access - only for local access */
1200 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1201 /* Gpr Registers access */
1202 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1203 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1204 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1205 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1206 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1207 # define LR_sig(context) REG_sig(link, context) /* Link register */
1208 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1209 /* Float Registers access */
1210 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1211 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1212 /* Exception Registers access */
1213 # define DAR_sig(context) REG_sig(dar, context)
1214 # define DSISR_sig(context) REG_sig(dsisr, context)
1215 # define TRAP_sig(context) REG_sig(trap, context)
1216 #endif /* linux */
1217
1218 #ifdef __APPLE__
1219 # include <sys/ucontext.h>
1220 typedef struct ucontext SIGCONTEXT;
1221 /* All Registers access - only for local access */
1222 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1223 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1224 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1225 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1226 /* Gpr Registers access */
1227 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1228 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1229 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1230 # define CTR_sig(context) REG_sig(ctr, context)
1231 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1232 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1233 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1234 /* Float Registers access */
1235 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1236 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1237 /* Exception Registers access */
1238 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1239 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1240 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1241 #endif /* __APPLE__ */
1242
1243 int cpu_signal_handler(int host_signum, void *pinfo,
1244 void *puc)
1245 {
1246 siginfo_t *info = pinfo;
1247 struct ucontext *uc = puc;
1248 unsigned long pc;
1249 int is_write;
1250
1251 pc = IAR_sig(uc);
1252 is_write = 0;
1253 #if 0
1254 /* ppc 4xx case */
1255 if (DSISR_sig(uc) & 0x00800000)
1256 is_write = 1;
1257 #else
1258 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1259 is_write = 1;
1260 #endif
1261 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1262 is_write, &uc->uc_sigmask, puc);
1263 }
1264
1265 #elif defined(__alpha__)
1266
1267 int cpu_signal_handler(int host_signum, void *pinfo,
1268 void *puc)
1269 {
1270 siginfo_t *info = pinfo;
1271 struct ucontext *uc = puc;
1272 uint32_t *pc = uc->uc_mcontext.sc_pc;
1273 uint32_t insn = *pc;
1274 int is_write = 0;
1275
1276 /* XXX: need kernel patch to get write flag faster */
1277 switch (insn >> 26) {
1278 case 0x0d: // stw
1279 case 0x0e: // stb
1280 case 0x0f: // stq_u
1281 case 0x24: // stf
1282 case 0x25: // stg
1283 case 0x26: // sts
1284 case 0x27: // stt
1285 case 0x2c: // stl
1286 case 0x2d: // stq
1287 case 0x2e: // stl_c
1288 case 0x2f: // stq_c
1289 is_write = 1;
1290 }
1291
1292 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1293 is_write, &uc->uc_sigmask, puc);
1294 }
1295 #elif defined(__sparc__)
1296
1297 int cpu_signal_handler(int host_signum, void *pinfo,
1298 void *puc)
1299 {
1300 siginfo_t *info = pinfo;
1301 int is_write;
1302 uint32_t insn;
1303 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1304 uint32_t *regs = (uint32_t *)(info + 1);
1305 void *sigmask = (regs + 20);
1306 /* XXX: is there a standard glibc define ? */
1307 unsigned long pc = regs[1];
1308 #else
1309 struct sigcontext *sc = puc;
1310 unsigned long pc = sc->sigc_regs.tpc;
1311 void *sigmask = (void *)sc->sigc_mask;
1312 #endif
1313
1314 /* XXX: need kernel patch to get write flag faster */
1315 is_write = 0;
1316 insn = *(uint32_t *)pc;
1317 if ((insn >> 30) == 3) {
1318 switch((insn >> 19) & 0x3f) {
1319 case 0x05: // stb
1320 case 0x06: // sth
1321 case 0x04: // st
1322 case 0x07: // std
1323 case 0x24: // stf
1324 case 0x27: // stdf
1325 case 0x25: // stfsr
1326 is_write = 1;
1327 break;
1328 }
1329 }
1330 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1331 is_write, sigmask, NULL);
1332 }
1333
1334 #elif defined(__arm__)
1335
1336 int cpu_signal_handler(int host_signum, void *pinfo,
1337 void *puc)
1338 {
1339 siginfo_t *info = pinfo;
1340 struct ucontext *uc = puc;
1341 unsigned long pc;
1342 int is_write;
1343
1344 pc = uc->uc_mcontext.arm_pc;
1345 /* XXX: compute is_write */
1346 is_write = 0;
1347 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1348 is_write,
1349 &uc->uc_sigmask, puc);
1350 }
1351
1352 #elif defined(__mc68000)
1353
1354 int cpu_signal_handler(int host_signum, void *pinfo,
1355 void *puc)
1356 {
1357 siginfo_t *info = pinfo;
1358 struct ucontext *uc = puc;
1359 unsigned long pc;
1360 int is_write;
1361
1362 pc = uc->uc_mcontext.gregs[16];
1363 /* XXX: compute is_write */
1364 is_write = 0;
1365 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1366 is_write,
1367 &uc->uc_sigmask, puc);
1368 }
1369
1370 #elif defined(__ia64)
1371
1372 #ifndef __ISR_VALID
1373 /* This ought to be in <bits/siginfo.h>... */
1374 # define __ISR_VALID 1
1375 #endif
1376
1377 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1378 {
1379 siginfo_t *info = pinfo;
1380 struct ucontext *uc = puc;
1381 unsigned long ip;
1382 int is_write = 0;
1383
1384 ip = uc->uc_mcontext.sc_ip;
1385 switch (host_signum) {
1386 case SIGILL:
1387 case SIGFPE:
1388 case SIGSEGV:
1389 case SIGBUS:
1390 case SIGTRAP:
1391 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1392 /* ISR.W (write-access) is bit 33: */
1393 is_write = (info->si_isr >> 33) & 1;
1394 break;
1395
1396 default:
1397 break;
1398 }
1399 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1400 is_write,
1401 &uc->uc_sigmask, puc);
1402 }
1403
1404 #elif defined(__s390__)
1405
1406 int cpu_signal_handler(int host_signum, void *pinfo,
1407 void *puc)
1408 {
1409 siginfo_t *info = pinfo;
1410 struct ucontext *uc = puc;
1411 unsigned long pc;
1412 int is_write;
1413
1414 pc = uc->uc_mcontext.psw.addr;
1415 /* XXX: compute is_write */
1416 is_write = 0;
1417 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1418 is_write, &uc->uc_sigmask, puc);
1419 }
1420
1421 #elif defined(__mips__)
1422
1423 int cpu_signal_handler(int host_signum, void *pinfo,
1424 void *puc)
1425 {
1426 siginfo_t *info = pinfo;
1427 struct ucontext *uc = puc;
1428 greg_t pc = uc->uc_mcontext.pc;
1429 int is_write;
1430
1431 /* XXX: compute is_write */
1432 is_write = 0;
1433 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1434 is_write, &uc->uc_sigmask, puc);
1435 }
1436
1437 #elif defined(__hppa__)
1438
1439 int cpu_signal_handler(int host_signum, void *pinfo,
1440 void *puc)
1441 {
1442 struct siginfo *info = pinfo;
1443 struct ucontext *uc = puc;
1444 unsigned long pc;
1445 int is_write;
1446
1447 pc = uc->uc_mcontext.sc_iaoq[0];
1448 /* FIXME: compute is_write */
1449 is_write = 0;
1450 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1451 is_write,
1452 &uc->uc_sigmask, puc);
1453 }
1454
1455 #else
1456
1457 #error host CPU specific signal handler needed
1458
1459 #endif
1460
1461 #endif /* !defined(CONFIG_SOFTMMU) */