]> git.proxmox.com Git - qemu.git/blob - cpu-exec.c
x86: Introduce CPU_INTERRUPT_NMI
[qemu.git] / cpu-exec.c
1 /*
2 * i386 emulator main execution loop
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include "config.h"
21 #include "exec.h"
22 #include "disas.h"
23
24 #if !defined(CONFIG_SOFTMMU)
25 #undef EAX
26 #undef ECX
27 #undef EDX
28 #undef EBX
29 #undef ESP
30 #undef EBP
31 #undef ESI
32 #undef EDI
33 #undef EIP
34 #include <signal.h>
35 #include <sys/ucontext.h>
36 #endif
37
38 int tb_invalidated_flag;
39
40 //#define DEBUG_EXEC
41 //#define DEBUG_SIGNAL
42
43 #define SAVE_GLOBALS()
44 #define RESTORE_GLOBALS()
45
46 #if defined(__sparc__) && !defined(HOST_SOLARIS)
47 #include <features.h>
48 #if defined(__GLIBC__) && ((__GLIBC__ < 2) || \
49 ((__GLIBC__ == 2) && (__GLIBC_MINOR__ <= 90)))
50 // Work around ugly bugs in glibc that mangle global register contents
51
52 static volatile void *saved_env;
53 static volatile unsigned long saved_t0, saved_i7;
54 #undef SAVE_GLOBALS
55 #define SAVE_GLOBALS() do { \
56 saved_env = env; \
57 saved_t0 = T0; \
58 asm volatile ("st %%i7, [%0]" : : "r" (&saved_i7)); \
59 } while(0)
60
61 #undef RESTORE_GLOBALS
62 #define RESTORE_GLOBALS() do { \
63 env = (void *)saved_env; \
64 T0 = saved_t0; \
65 asm volatile ("ld [%0], %%i7" : : "r" (&saved_i7)); \
66 } while(0)
67
68 static int sparc_setjmp(jmp_buf buf)
69 {
70 int ret;
71
72 SAVE_GLOBALS();
73 ret = setjmp(buf);
74 RESTORE_GLOBALS();
75 return ret;
76 }
77 #undef setjmp
78 #define setjmp(jmp_buf) sparc_setjmp(jmp_buf)
79
80 static void sparc_longjmp(jmp_buf buf, int val)
81 {
82 SAVE_GLOBALS();
83 longjmp(buf, val);
84 }
85 #define longjmp(jmp_buf, val) sparc_longjmp(jmp_buf, val)
86 #endif
87 #endif
88
89 void cpu_loop_exit(void)
90 {
91 /* NOTE: the register at this point must be saved by hand because
92 longjmp restore them */
93 regs_to_env();
94 longjmp(env->jmp_env, 1);
95 }
96
97 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
98 #define reg_T2
99 #endif
100
101 /* exit the current TB from a signal handler. The host registers are
102 restored in a state compatible with the CPU emulator
103 */
104 void cpu_resume_from_signal(CPUState *env1, void *puc)
105 {
106 #if !defined(CONFIG_SOFTMMU)
107 struct ucontext *uc = puc;
108 #endif
109
110 env = env1;
111
112 /* XXX: restore cpu registers saved in host registers */
113
114 #if !defined(CONFIG_SOFTMMU)
115 if (puc) {
116 /* XXX: use siglongjmp ? */
117 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
118 }
119 #endif
120 longjmp(env->jmp_env, 1);
121 }
122
123 static TranslationBlock *tb_find_slow(target_ulong pc,
124 target_ulong cs_base,
125 uint64_t flags)
126 {
127 TranslationBlock *tb, **ptb1;
128 int code_gen_size;
129 unsigned int h;
130 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
131 uint8_t *tc_ptr;
132
133 spin_lock(&tb_lock);
134
135 tb_invalidated_flag = 0;
136
137 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
138
139 /* find translated block using physical mappings */
140 phys_pc = get_phys_addr_code(env, pc);
141 phys_page1 = phys_pc & TARGET_PAGE_MASK;
142 phys_page2 = -1;
143 h = tb_phys_hash_func(phys_pc);
144 ptb1 = &tb_phys_hash[h];
145 for(;;) {
146 tb = *ptb1;
147 if (!tb)
148 goto not_found;
149 if (tb->pc == pc &&
150 tb->page_addr[0] == phys_page1 &&
151 tb->cs_base == cs_base &&
152 tb->flags == flags) {
153 /* check next page if needed */
154 if (tb->page_addr[1] != -1) {
155 virt_page2 = (pc & TARGET_PAGE_MASK) +
156 TARGET_PAGE_SIZE;
157 phys_page2 = get_phys_addr_code(env, virt_page2);
158 if (tb->page_addr[1] == phys_page2)
159 goto found;
160 } else {
161 goto found;
162 }
163 }
164 ptb1 = &tb->phys_hash_next;
165 }
166 not_found:
167 /* if no translated code available, then translate it now */
168 tb = tb_alloc(pc);
169 if (!tb) {
170 /* flush must be done */
171 tb_flush(env);
172 /* cannot fail at this point */
173 tb = tb_alloc(pc);
174 /* don't forget to invalidate previous TB info */
175 tb_invalidated_flag = 1;
176 }
177 tc_ptr = code_gen_ptr;
178 tb->tc_ptr = tc_ptr;
179 tb->cs_base = cs_base;
180 tb->flags = flags;
181 SAVE_GLOBALS();
182 cpu_gen_code(env, tb, &code_gen_size);
183 RESTORE_GLOBALS();
184 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
185
186 /* check next page if needed */
187 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
188 phys_page2 = -1;
189 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
190 phys_page2 = get_phys_addr_code(env, virt_page2);
191 }
192 tb_link_phys(tb, phys_pc, phys_page2);
193
194 found:
195 /* we add the TB in the virtual pc hash table */
196 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
197 spin_unlock(&tb_lock);
198 return tb;
199 }
200
201 static inline TranslationBlock *tb_find_fast(void)
202 {
203 TranslationBlock *tb;
204 target_ulong cs_base, pc;
205 uint64_t flags;
206
207 /* we record a subset of the CPU state. It will
208 always be the same before a given translated block
209 is executed. */
210 #if defined(TARGET_I386)
211 flags = env->hflags;
212 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
213 flags |= env->intercept;
214 cs_base = env->segs[R_CS].base;
215 pc = cs_base + env->eip;
216 #elif defined(TARGET_ARM)
217 flags = env->thumb | (env->vfp.vec_len << 1)
218 | (env->vfp.vec_stride << 4);
219 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
220 flags |= (1 << 6);
221 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
222 flags |= (1 << 7);
223 flags |= (env->condexec_bits << 8);
224 cs_base = 0;
225 pc = env->regs[15];
226 #elif defined(TARGET_SPARC)
227 #ifdef TARGET_SPARC64
228 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
229 flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
230 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
231 #else
232 // FPU enable . Supervisor
233 flags = (env->psref << 4) | env->psrs;
234 #endif
235 cs_base = env->npc;
236 pc = env->pc;
237 #elif defined(TARGET_PPC)
238 flags = env->hflags;
239 cs_base = 0;
240 pc = env->nip;
241 #elif defined(TARGET_MIPS)
242 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
243 cs_base = 0;
244 pc = env->PC[env->current_tc];
245 #elif defined(TARGET_M68K)
246 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
247 | (env->sr & SR_S) /* Bit 13 */
248 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
249 cs_base = 0;
250 pc = env->pc;
251 #elif defined(TARGET_SH4)
252 flags = env->flags;
253 cs_base = 0;
254 pc = env->pc;
255 #elif defined(TARGET_ALPHA)
256 flags = env->ps;
257 cs_base = 0;
258 pc = env->pc;
259 #elif defined(TARGET_CRIS)
260 flags = 0;
261 cs_base = 0;
262 pc = env->pc;
263 #else
264 #error unsupported CPU
265 #endif
266 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
267 if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
268 tb->flags != flags, 0)) {
269 tb = tb_find_slow(pc, cs_base, flags);
270 /* Note: we do it here to avoid a gcc bug on Mac OS X when
271 doing it in tb_find_slow */
272 if (tb_invalidated_flag) {
273 /* as some TB could have been invalidated because
274 of memory exceptions while generating the code, we
275 must recompute the hash index here */
276 T0 = 0;
277 }
278 }
279 return tb;
280 }
281
282 #define BREAK_CHAIN T0 = 0
283
284 /* main execution loop */
285
286 int cpu_exec(CPUState *env1)
287 {
288 #define DECLARE_HOST_REGS 1
289 #include "hostregs_helper.h"
290 #if defined(TARGET_SPARC)
291 #if defined(reg_REGWPTR)
292 uint32_t *saved_regwptr;
293 #endif
294 #endif
295 int ret, interrupt_request;
296 long (*gen_func)(void);
297 TranslationBlock *tb;
298 uint8_t *tc_ptr;
299
300 if (cpu_halted(env1) == EXCP_HALTED)
301 return EXCP_HALTED;
302
303 cpu_single_env = env1;
304
305 /* first we save global registers */
306 #define SAVE_HOST_REGS 1
307 #include "hostregs_helper.h"
308 env = env1;
309 SAVE_GLOBALS();
310
311 env_to_regs();
312 #if defined(TARGET_I386)
313 /* put eflags in CPU temporary format */
314 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
315 DF = 1 - (2 * ((env->eflags >> 10) & 1));
316 CC_OP = CC_OP_EFLAGS;
317 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
318 #elif defined(TARGET_SPARC)
319 #if defined(reg_REGWPTR)
320 saved_regwptr = REGWPTR;
321 #endif
322 #elif defined(TARGET_M68K)
323 env->cc_op = CC_OP_FLAGS;
324 env->cc_dest = env->sr & 0xf;
325 env->cc_x = (env->sr >> 4) & 1;
326 #elif defined(TARGET_ALPHA)
327 #elif defined(TARGET_ARM)
328 #elif defined(TARGET_PPC)
329 #elif defined(TARGET_MIPS)
330 #elif defined(TARGET_SH4)
331 #elif defined(TARGET_CRIS)
332 /* XXXXX */
333 #else
334 #error unsupported target CPU
335 #endif
336 env->exception_index = -1;
337
338 /* prepare setjmp context for exception handling */
339 for(;;) {
340 if (setjmp(env->jmp_env) == 0) {
341 env->current_tb = NULL;
342 /* if an exception is pending, we execute it here */
343 if (env->exception_index >= 0) {
344 if (env->exception_index >= EXCP_INTERRUPT) {
345 /* exit request from the cpu execution loop */
346 ret = env->exception_index;
347 break;
348 } else if (env->user_mode_only) {
349 /* if user mode only, we simulate a fake exception
350 which will be handled outside the cpu execution
351 loop */
352 #if defined(TARGET_I386)
353 do_interrupt_user(env->exception_index,
354 env->exception_is_int,
355 env->error_code,
356 env->exception_next_eip);
357 #endif
358 ret = env->exception_index;
359 break;
360 } else {
361 #if defined(TARGET_I386)
362 /* simulate a real cpu exception. On i386, it can
363 trigger new exceptions, but we do not handle
364 double or triple faults yet. */
365 do_interrupt(env->exception_index,
366 env->exception_is_int,
367 env->error_code,
368 env->exception_next_eip, 0);
369 /* successfully delivered */
370 env->old_exception = -1;
371 #elif defined(TARGET_PPC)
372 do_interrupt(env);
373 #elif defined(TARGET_MIPS)
374 do_interrupt(env);
375 #elif defined(TARGET_SPARC)
376 do_interrupt(env->exception_index);
377 #elif defined(TARGET_ARM)
378 do_interrupt(env);
379 #elif defined(TARGET_SH4)
380 do_interrupt(env);
381 #elif defined(TARGET_ALPHA)
382 do_interrupt(env);
383 #elif defined(TARGET_CRIS)
384 do_interrupt(env);
385 #elif defined(TARGET_M68K)
386 do_interrupt(0);
387 #endif
388 }
389 env->exception_index = -1;
390 }
391 #ifdef USE_KQEMU
392 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
393 int ret;
394 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
395 ret = kqemu_cpu_exec(env);
396 /* put eflags in CPU temporary format */
397 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
398 DF = 1 - (2 * ((env->eflags >> 10) & 1));
399 CC_OP = CC_OP_EFLAGS;
400 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
401 if (ret == 1) {
402 /* exception */
403 longjmp(env->jmp_env, 1);
404 } else if (ret == 2) {
405 /* softmmu execution needed */
406 } else {
407 if (env->interrupt_request != 0) {
408 /* hardware interrupt will be executed just after */
409 } else {
410 /* otherwise, we restart */
411 longjmp(env->jmp_env, 1);
412 }
413 }
414 }
415 #endif
416
417 T0 = 0; /* force lookup of first TB */
418 for(;;) {
419 SAVE_GLOBALS();
420 interrupt_request = env->interrupt_request;
421 if (__builtin_expect(interrupt_request, 0)
422 #if defined(TARGET_I386)
423 && env->hflags & HF_GIF_MASK
424 #endif
425 ) {
426 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
427 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
428 env->exception_index = EXCP_DEBUG;
429 cpu_loop_exit();
430 }
431 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
432 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
433 if (interrupt_request & CPU_INTERRUPT_HALT) {
434 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
435 env->halted = 1;
436 env->exception_index = EXCP_HLT;
437 cpu_loop_exit();
438 }
439 #endif
440 #if defined(TARGET_I386)
441 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
442 !(env->hflags & HF_SMM_MASK)) {
443 svm_check_intercept(SVM_EXIT_SMI);
444 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
445 do_smm_enter();
446 BREAK_CHAIN;
447 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
448 !(env->hflags & HF_NMI_MASK)) {
449 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
450 env->hflags |= HF_NMI_MASK;
451 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
452 BREAK_CHAIN;
453 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
454 (env->eflags & IF_MASK || env->hflags & HF_HIF_MASK) &&
455 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
456 int intno;
457 svm_check_intercept(SVM_EXIT_INTR);
458 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
459 intno = cpu_get_pic_interrupt(env);
460 if (loglevel & CPU_LOG_TB_IN_ASM) {
461 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
462 }
463 do_interrupt(intno, 0, 0, 0, 1);
464 /* ensure that no TB jump will be modified as
465 the program flow was changed */
466 BREAK_CHAIN;
467 #if !defined(CONFIG_USER_ONLY)
468 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
469 (env->eflags & IF_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
470 int intno;
471 /* FIXME: this should respect TPR */
472 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
473 svm_check_intercept(SVM_EXIT_VINTR);
474 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
475 if (loglevel & CPU_LOG_TB_IN_ASM)
476 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
477 do_interrupt(intno, 0, 0, -1, 1);
478 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl),
479 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)) & ~V_IRQ_MASK);
480 BREAK_CHAIN;
481 #endif
482 }
483 #elif defined(TARGET_PPC)
484 #if 0
485 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
486 cpu_ppc_reset(env);
487 }
488 #endif
489 if (interrupt_request & CPU_INTERRUPT_HARD) {
490 ppc_hw_interrupt(env);
491 if (env->pending_interrupts == 0)
492 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
493 BREAK_CHAIN;
494 }
495 #elif defined(TARGET_MIPS)
496 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
497 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
498 (env->CP0_Status & (1 << CP0St_IE)) &&
499 !(env->CP0_Status & (1 << CP0St_EXL)) &&
500 !(env->CP0_Status & (1 << CP0St_ERL)) &&
501 !(env->hflags & MIPS_HFLAG_DM)) {
502 /* Raise it */
503 env->exception_index = EXCP_EXT_INTERRUPT;
504 env->error_code = 0;
505 do_interrupt(env);
506 BREAK_CHAIN;
507 }
508 #elif defined(TARGET_SPARC)
509 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
510 (env->psret != 0)) {
511 int pil = env->interrupt_index & 15;
512 int type = env->interrupt_index & 0xf0;
513
514 if (((type == TT_EXTINT) &&
515 (pil == 15 || pil > env->psrpil)) ||
516 type != TT_EXTINT) {
517 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
518 do_interrupt(env->interrupt_index);
519 env->interrupt_index = 0;
520 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
521 cpu_check_irqs(env);
522 #endif
523 BREAK_CHAIN;
524 }
525 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
526 //do_interrupt(0, 0, 0, 0, 0);
527 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
528 }
529 #elif defined(TARGET_ARM)
530 if (interrupt_request & CPU_INTERRUPT_FIQ
531 && !(env->uncached_cpsr & CPSR_F)) {
532 env->exception_index = EXCP_FIQ;
533 do_interrupt(env);
534 BREAK_CHAIN;
535 }
536 /* ARMv7-M interrupt return works by loading a magic value
537 into the PC. On real hardware the load causes the
538 return to occur. The qemu implementation performs the
539 jump normally, then does the exception return when the
540 CPU tries to execute code at the magic address.
541 This will cause the magic PC value to be pushed to
542 the stack if an interrupt occured at the wrong time.
543 We avoid this by disabling interrupts when
544 pc contains a magic address. */
545 if (interrupt_request & CPU_INTERRUPT_HARD
546 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
547 || !(env->uncached_cpsr & CPSR_I))) {
548 env->exception_index = EXCP_IRQ;
549 do_interrupt(env);
550 BREAK_CHAIN;
551 }
552 #elif defined(TARGET_SH4)
553 if (interrupt_request & CPU_INTERRUPT_HARD) {
554 do_interrupt(env);
555 BREAK_CHAIN;
556 }
557 #elif defined(TARGET_ALPHA)
558 if (interrupt_request & CPU_INTERRUPT_HARD) {
559 do_interrupt(env);
560 BREAK_CHAIN;
561 }
562 #elif defined(TARGET_CRIS)
563 if (interrupt_request & CPU_INTERRUPT_HARD) {
564 do_interrupt(env);
565 BREAK_CHAIN;
566 }
567 #elif defined(TARGET_M68K)
568 if (interrupt_request & CPU_INTERRUPT_HARD
569 && ((env->sr & SR_I) >> SR_I_SHIFT)
570 < env->pending_level) {
571 /* Real hardware gets the interrupt vector via an
572 IACK cycle at this point. Current emulated
573 hardware doesn't rely on this, so we
574 provide/save the vector when the interrupt is
575 first signalled. */
576 env->exception_index = env->pending_vector;
577 do_interrupt(1);
578 BREAK_CHAIN;
579 }
580 #endif
581 /* Don't use the cached interupt_request value,
582 do_interrupt may have updated the EXITTB flag. */
583 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
584 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
585 /* ensure that no TB jump will be modified as
586 the program flow was changed */
587 BREAK_CHAIN;
588 }
589 if (interrupt_request & CPU_INTERRUPT_EXIT) {
590 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
591 env->exception_index = EXCP_INTERRUPT;
592 cpu_loop_exit();
593 }
594 }
595 #ifdef DEBUG_EXEC
596 if ((loglevel & CPU_LOG_TB_CPU)) {
597 /* restore flags in standard format */
598 regs_to_env();
599 #if defined(TARGET_I386)
600 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
601 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
602 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
603 #elif defined(TARGET_ARM)
604 cpu_dump_state(env, logfile, fprintf, 0);
605 #elif defined(TARGET_SPARC)
606 REGWPTR = env->regbase + (env->cwp * 16);
607 env->regwptr = REGWPTR;
608 cpu_dump_state(env, logfile, fprintf, 0);
609 #elif defined(TARGET_PPC)
610 cpu_dump_state(env, logfile, fprintf, 0);
611 #elif defined(TARGET_M68K)
612 cpu_m68k_flush_flags(env, env->cc_op);
613 env->cc_op = CC_OP_FLAGS;
614 env->sr = (env->sr & 0xffe0)
615 | env->cc_dest | (env->cc_x << 4);
616 cpu_dump_state(env, logfile, fprintf, 0);
617 #elif defined(TARGET_MIPS)
618 cpu_dump_state(env, logfile, fprintf, 0);
619 #elif defined(TARGET_SH4)
620 cpu_dump_state(env, logfile, fprintf, 0);
621 #elif defined(TARGET_ALPHA)
622 cpu_dump_state(env, logfile, fprintf, 0);
623 #elif defined(TARGET_CRIS)
624 cpu_dump_state(env, logfile, fprintf, 0);
625 #else
626 #error unsupported target CPU
627 #endif
628 }
629 #endif
630 tb = tb_find_fast();
631 #ifdef DEBUG_EXEC
632 if ((loglevel & CPU_LOG_EXEC)) {
633 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
634 (long)tb->tc_ptr, tb->pc,
635 lookup_symbol(tb->pc));
636 }
637 #endif
638 RESTORE_GLOBALS();
639 /* see if we can patch the calling TB. When the TB
640 spans two pages, we cannot safely do a direct
641 jump. */
642 {
643 if (T0 != 0 &&
644 #if USE_KQEMU
645 (env->kqemu_enabled != 2) &&
646 #endif
647 tb->page_addr[1] == -1) {
648 spin_lock(&tb_lock);
649 tb_add_jump((TranslationBlock *)(long)(T0 & ~3), T0 & 3, tb);
650 spin_unlock(&tb_lock);
651 }
652 }
653 tc_ptr = tb->tc_ptr;
654 env->current_tb = tb;
655 /* execute the generated code */
656 gen_func = (void *)tc_ptr;
657 #if defined(__sparc__)
658 __asm__ __volatile__("call %0\n\t"
659 "mov %%o7,%%i0"
660 : /* no outputs */
661 : "r" (gen_func)
662 : "i0", "i1", "i2", "i3", "i4", "i5",
663 "o0", "o1", "o2", "o3", "o4", "o5",
664 "l0", "l1", "l2", "l3", "l4", "l5",
665 "l6", "l7");
666 #elif defined(__hppa__)
667 asm volatile ("ble 0(%%sr4,%1)\n"
668 "copy %%r31,%%r18\n"
669 "copy %%r28,%0\n"
670 : "=r" (T0)
671 : "r" (gen_func)
672 : "r1", "r2", "r3", "r4", "r5", "r6", "r7",
673 "r8", "r9", "r10", "r11", "r12", "r13",
674 "r18", "r19", "r20", "r21", "r22", "r23",
675 "r24", "r25", "r26", "r27", "r28", "r29",
676 "r30", "r31");
677 #elif defined(__arm__)
678 asm volatile ("mov pc, %0\n\t"
679 ".global exec_loop\n\t"
680 "exec_loop:\n\t"
681 : /* no outputs */
682 : "r" (gen_func)
683 : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
684 #elif defined(__ia64)
685 struct fptr {
686 void *ip;
687 void *gp;
688 } fp;
689
690 fp.ip = tc_ptr;
691 fp.gp = code_gen_buffer + 2 * (1 << 20);
692 (*(void (*)(void)) &fp)();
693 #else
694 T0 = gen_func();
695 #endif
696 env->current_tb = NULL;
697 /* reset soft MMU for next block (it can currently
698 only be set by a memory fault) */
699 #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
700 if (env->hflags & HF_SOFTMMU_MASK) {
701 env->hflags &= ~HF_SOFTMMU_MASK;
702 /* do not allow linking to another block */
703 T0 = 0;
704 }
705 #endif
706 #if defined(USE_KQEMU)
707 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
708 if (kqemu_is_ok(env) &&
709 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
710 cpu_loop_exit();
711 }
712 #endif
713 } /* for(;;) */
714 } else {
715 env_to_regs();
716 }
717 } /* for(;;) */
718
719
720 #if defined(TARGET_I386)
721 /* restore flags in standard format */
722 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
723 #elif defined(TARGET_ARM)
724 /* XXX: Save/restore host fpu exception state?. */
725 #elif defined(TARGET_SPARC)
726 #if defined(reg_REGWPTR)
727 REGWPTR = saved_regwptr;
728 #endif
729 #elif defined(TARGET_PPC)
730 #elif defined(TARGET_M68K)
731 cpu_m68k_flush_flags(env, env->cc_op);
732 env->cc_op = CC_OP_FLAGS;
733 env->sr = (env->sr & 0xffe0)
734 | env->cc_dest | (env->cc_x << 4);
735 #elif defined(TARGET_MIPS)
736 #elif defined(TARGET_SH4)
737 #elif defined(TARGET_ALPHA)
738 #elif defined(TARGET_CRIS)
739 /* XXXXX */
740 #else
741 #error unsupported target CPU
742 #endif
743
744 /* restore global registers */
745 RESTORE_GLOBALS();
746 #include "hostregs_helper.h"
747
748 /* fail safe : never use cpu_single_env outside cpu_exec() */
749 cpu_single_env = NULL;
750 return ret;
751 }
752
753 /* must only be called from the generated code as an exception can be
754 generated */
755 void tb_invalidate_page_range(target_ulong start, target_ulong end)
756 {
757 /* XXX: cannot enable it yet because it yields to MMU exception
758 where NIP != read address on PowerPC */
759 #if 0
760 target_ulong phys_addr;
761 phys_addr = get_phys_addr_code(env, start);
762 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
763 #endif
764 }
765
766 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
767
768 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
769 {
770 CPUX86State *saved_env;
771
772 saved_env = env;
773 env = s;
774 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
775 selector &= 0xffff;
776 cpu_x86_load_seg_cache(env, seg_reg, selector,
777 (selector << 4), 0xffff, 0);
778 } else {
779 load_seg(seg_reg, selector);
780 }
781 env = saved_env;
782 }
783
784 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
785 {
786 CPUX86State *saved_env;
787
788 saved_env = env;
789 env = s;
790
791 helper_fsave(ptr, data32);
792
793 env = saved_env;
794 }
795
796 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
797 {
798 CPUX86State *saved_env;
799
800 saved_env = env;
801 env = s;
802
803 helper_frstor(ptr, data32);
804
805 env = saved_env;
806 }
807
808 #endif /* TARGET_I386 */
809
810 #if !defined(CONFIG_SOFTMMU)
811
812 #if defined(TARGET_I386)
813
814 /* 'pc' is the host PC at which the exception was raised. 'address' is
815 the effective address of the memory exception. 'is_write' is 1 if a
816 write caused the exception and otherwise 0'. 'old_set' is the
817 signal set which should be restored */
818 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
819 int is_write, sigset_t *old_set,
820 void *puc)
821 {
822 TranslationBlock *tb;
823 int ret;
824
825 if (cpu_single_env)
826 env = cpu_single_env; /* XXX: find a correct solution for multithread */
827 #if defined(DEBUG_SIGNAL)
828 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
829 pc, address, is_write, *(unsigned long *)old_set);
830 #endif
831 /* XXX: locking issue */
832 if (is_write && page_unprotect(h2g(address), pc, puc)) {
833 return 1;
834 }
835
836 /* see if it is an MMU fault */
837 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
838 if (ret < 0)
839 return 0; /* not an MMU fault */
840 if (ret == 0)
841 return 1; /* the MMU fault was handled without causing real CPU fault */
842 /* now we have a real cpu fault */
843 tb = tb_find_pc(pc);
844 if (tb) {
845 /* the PC is inside the translated code. It means that we have
846 a virtual CPU fault */
847 cpu_restore_state(tb, env, pc, puc);
848 }
849 if (ret == 1) {
850 #if 0
851 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
852 env->eip, env->cr[2], env->error_code);
853 #endif
854 /* we restore the process signal mask as the sigreturn should
855 do it (XXX: use sigsetjmp) */
856 sigprocmask(SIG_SETMASK, old_set, NULL);
857 raise_exception_err(env->exception_index, env->error_code);
858 } else {
859 /* activate soft MMU for this block */
860 env->hflags |= HF_SOFTMMU_MASK;
861 cpu_resume_from_signal(env, puc);
862 }
863 /* never comes here */
864 return 1;
865 }
866
867 #elif defined(TARGET_ARM)
868 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
869 int is_write, sigset_t *old_set,
870 void *puc)
871 {
872 TranslationBlock *tb;
873 int ret;
874
875 if (cpu_single_env)
876 env = cpu_single_env; /* XXX: find a correct solution for multithread */
877 #if defined(DEBUG_SIGNAL)
878 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
879 pc, address, is_write, *(unsigned long *)old_set);
880 #endif
881 /* XXX: locking issue */
882 if (is_write && page_unprotect(h2g(address), pc, puc)) {
883 return 1;
884 }
885 /* see if it is an MMU fault */
886 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
887 if (ret < 0)
888 return 0; /* not an MMU fault */
889 if (ret == 0)
890 return 1; /* the MMU fault was handled without causing real CPU fault */
891 /* now we have a real cpu fault */
892 tb = tb_find_pc(pc);
893 if (tb) {
894 /* the PC is inside the translated code. It means that we have
895 a virtual CPU fault */
896 cpu_restore_state(tb, env, pc, puc);
897 }
898 /* we restore the process signal mask as the sigreturn should
899 do it (XXX: use sigsetjmp) */
900 sigprocmask(SIG_SETMASK, old_set, NULL);
901 cpu_loop_exit();
902 /* never comes here */
903 return 1;
904 }
905 #elif defined(TARGET_SPARC)
906 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
907 int is_write, sigset_t *old_set,
908 void *puc)
909 {
910 TranslationBlock *tb;
911 int ret;
912
913 if (cpu_single_env)
914 env = cpu_single_env; /* XXX: find a correct solution for multithread */
915 #if defined(DEBUG_SIGNAL)
916 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
917 pc, address, is_write, *(unsigned long *)old_set);
918 #endif
919 /* XXX: locking issue */
920 if (is_write && page_unprotect(h2g(address), pc, puc)) {
921 return 1;
922 }
923 /* see if it is an MMU fault */
924 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
925 if (ret < 0)
926 return 0; /* not an MMU fault */
927 if (ret == 0)
928 return 1; /* the MMU fault was handled without causing real CPU fault */
929 /* now we have a real cpu fault */
930 tb = tb_find_pc(pc);
931 if (tb) {
932 /* the PC is inside the translated code. It means that we have
933 a virtual CPU fault */
934 cpu_restore_state(tb, env, pc, puc);
935 }
936 /* we restore the process signal mask as the sigreturn should
937 do it (XXX: use sigsetjmp) */
938 sigprocmask(SIG_SETMASK, old_set, NULL);
939 cpu_loop_exit();
940 /* never comes here */
941 return 1;
942 }
943 #elif defined (TARGET_PPC)
944 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
945 int is_write, sigset_t *old_set,
946 void *puc)
947 {
948 TranslationBlock *tb;
949 int ret;
950
951 if (cpu_single_env)
952 env = cpu_single_env; /* XXX: find a correct solution for multithread */
953 #if defined(DEBUG_SIGNAL)
954 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
955 pc, address, is_write, *(unsigned long *)old_set);
956 #endif
957 /* XXX: locking issue */
958 if (is_write && page_unprotect(h2g(address), pc, puc)) {
959 return 1;
960 }
961
962 /* see if it is an MMU fault */
963 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
964 if (ret < 0)
965 return 0; /* not an MMU fault */
966 if (ret == 0)
967 return 1; /* the MMU fault was handled without causing real CPU fault */
968
969 /* now we have a real cpu fault */
970 tb = tb_find_pc(pc);
971 if (tb) {
972 /* the PC is inside the translated code. It means that we have
973 a virtual CPU fault */
974 cpu_restore_state(tb, env, pc, puc);
975 }
976 if (ret == 1) {
977 #if 0
978 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
979 env->nip, env->error_code, tb);
980 #endif
981 /* we restore the process signal mask as the sigreturn should
982 do it (XXX: use sigsetjmp) */
983 sigprocmask(SIG_SETMASK, old_set, NULL);
984 do_raise_exception_err(env->exception_index, env->error_code);
985 } else {
986 /* activate soft MMU for this block */
987 cpu_resume_from_signal(env, puc);
988 }
989 /* never comes here */
990 return 1;
991 }
992
993 #elif defined(TARGET_M68K)
994 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
995 int is_write, sigset_t *old_set,
996 void *puc)
997 {
998 TranslationBlock *tb;
999 int ret;
1000
1001 if (cpu_single_env)
1002 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1003 #if defined(DEBUG_SIGNAL)
1004 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1005 pc, address, is_write, *(unsigned long *)old_set);
1006 #endif
1007 /* XXX: locking issue */
1008 if (is_write && page_unprotect(address, pc, puc)) {
1009 return 1;
1010 }
1011 /* see if it is an MMU fault */
1012 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1013 if (ret < 0)
1014 return 0; /* not an MMU fault */
1015 if (ret == 0)
1016 return 1; /* the MMU fault was handled without causing real CPU fault */
1017 /* now we have a real cpu fault */
1018 tb = tb_find_pc(pc);
1019 if (tb) {
1020 /* the PC is inside the translated code. It means that we have
1021 a virtual CPU fault */
1022 cpu_restore_state(tb, env, pc, puc);
1023 }
1024 /* we restore the process signal mask as the sigreturn should
1025 do it (XXX: use sigsetjmp) */
1026 sigprocmask(SIG_SETMASK, old_set, NULL);
1027 cpu_loop_exit();
1028 /* never comes here */
1029 return 1;
1030 }
1031
1032 #elif defined (TARGET_MIPS)
1033 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1034 int is_write, sigset_t *old_set,
1035 void *puc)
1036 {
1037 TranslationBlock *tb;
1038 int ret;
1039
1040 if (cpu_single_env)
1041 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1042 #if defined(DEBUG_SIGNAL)
1043 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1044 pc, address, is_write, *(unsigned long *)old_set);
1045 #endif
1046 /* XXX: locking issue */
1047 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1048 return 1;
1049 }
1050
1051 /* see if it is an MMU fault */
1052 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1053 if (ret < 0)
1054 return 0; /* not an MMU fault */
1055 if (ret == 0)
1056 return 1; /* the MMU fault was handled without causing real CPU fault */
1057
1058 /* now we have a real cpu fault */
1059 tb = tb_find_pc(pc);
1060 if (tb) {
1061 /* the PC is inside the translated code. It means that we have
1062 a virtual CPU fault */
1063 cpu_restore_state(tb, env, pc, puc);
1064 }
1065 if (ret == 1) {
1066 #if 0
1067 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1068 env->PC, env->error_code, tb);
1069 #endif
1070 /* we restore the process signal mask as the sigreturn should
1071 do it (XXX: use sigsetjmp) */
1072 sigprocmask(SIG_SETMASK, old_set, NULL);
1073 do_raise_exception_err(env->exception_index, env->error_code);
1074 } else {
1075 /* activate soft MMU for this block */
1076 cpu_resume_from_signal(env, puc);
1077 }
1078 /* never comes here */
1079 return 1;
1080 }
1081
1082 #elif defined (TARGET_SH4)
1083 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1084 int is_write, sigset_t *old_set,
1085 void *puc)
1086 {
1087 TranslationBlock *tb;
1088 int ret;
1089
1090 if (cpu_single_env)
1091 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1092 #if defined(DEBUG_SIGNAL)
1093 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1094 pc, address, is_write, *(unsigned long *)old_set);
1095 #endif
1096 /* XXX: locking issue */
1097 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1098 return 1;
1099 }
1100
1101 /* see if it is an MMU fault */
1102 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1103 if (ret < 0)
1104 return 0; /* not an MMU fault */
1105 if (ret == 0)
1106 return 1; /* the MMU fault was handled without causing real CPU fault */
1107
1108 /* now we have a real cpu fault */
1109 tb = tb_find_pc(pc);
1110 if (tb) {
1111 /* the PC is inside the translated code. It means that we have
1112 a virtual CPU fault */
1113 cpu_restore_state(tb, env, pc, puc);
1114 }
1115 #if 0
1116 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1117 env->nip, env->error_code, tb);
1118 #endif
1119 /* we restore the process signal mask as the sigreturn should
1120 do it (XXX: use sigsetjmp) */
1121 sigprocmask(SIG_SETMASK, old_set, NULL);
1122 cpu_loop_exit();
1123 /* never comes here */
1124 return 1;
1125 }
1126
1127 #elif defined (TARGET_ALPHA)
1128 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1129 int is_write, sigset_t *old_set,
1130 void *puc)
1131 {
1132 TranslationBlock *tb;
1133 int ret;
1134
1135 if (cpu_single_env)
1136 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1137 #if defined(DEBUG_SIGNAL)
1138 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1139 pc, address, is_write, *(unsigned long *)old_set);
1140 #endif
1141 /* XXX: locking issue */
1142 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1143 return 1;
1144 }
1145
1146 /* see if it is an MMU fault */
1147 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1148 if (ret < 0)
1149 return 0; /* not an MMU fault */
1150 if (ret == 0)
1151 return 1; /* the MMU fault was handled without causing real CPU fault */
1152
1153 /* now we have a real cpu fault */
1154 tb = tb_find_pc(pc);
1155 if (tb) {
1156 /* the PC is inside the translated code. It means that we have
1157 a virtual CPU fault */
1158 cpu_restore_state(tb, env, pc, puc);
1159 }
1160 #if 0
1161 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1162 env->nip, env->error_code, tb);
1163 #endif
1164 /* we restore the process signal mask as the sigreturn should
1165 do it (XXX: use sigsetjmp) */
1166 sigprocmask(SIG_SETMASK, old_set, NULL);
1167 cpu_loop_exit();
1168 /* never comes here */
1169 return 1;
1170 }
1171 #elif defined (TARGET_CRIS)
1172 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1173 int is_write, sigset_t *old_set,
1174 void *puc)
1175 {
1176 TranslationBlock *tb;
1177 int ret;
1178
1179 if (cpu_single_env)
1180 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1181 #if defined(DEBUG_SIGNAL)
1182 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1183 pc, address, is_write, *(unsigned long *)old_set);
1184 #endif
1185 /* XXX: locking issue */
1186 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1187 return 1;
1188 }
1189
1190 /* see if it is an MMU fault */
1191 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1192 if (ret < 0)
1193 return 0; /* not an MMU fault */
1194 if (ret == 0)
1195 return 1; /* the MMU fault was handled without causing real CPU fault */
1196
1197 /* now we have a real cpu fault */
1198 tb = tb_find_pc(pc);
1199 if (tb) {
1200 /* the PC is inside the translated code. It means that we have
1201 a virtual CPU fault */
1202 cpu_restore_state(tb, env, pc, puc);
1203 }
1204 /* we restore the process signal mask as the sigreturn should
1205 do it (XXX: use sigsetjmp) */
1206 sigprocmask(SIG_SETMASK, old_set, NULL);
1207 cpu_loop_exit();
1208 /* never comes here */
1209 return 1;
1210 }
1211
1212 #else
1213 #error unsupported target CPU
1214 #endif
1215
1216 #if defined(__i386__)
1217
1218 #if defined(__APPLE__)
1219 # include <sys/ucontext.h>
1220
1221 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1222 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1223 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1224 #else
1225 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1226 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1227 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1228 #endif
1229
1230 int cpu_signal_handler(int host_signum, void *pinfo,
1231 void *puc)
1232 {
1233 siginfo_t *info = pinfo;
1234 struct ucontext *uc = puc;
1235 unsigned long pc;
1236 int trapno;
1237
1238 #ifndef REG_EIP
1239 /* for glibc 2.1 */
1240 #define REG_EIP EIP
1241 #define REG_ERR ERR
1242 #define REG_TRAPNO TRAPNO
1243 #endif
1244 pc = EIP_sig(uc);
1245 trapno = TRAP_sig(uc);
1246 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1247 trapno == 0xe ?
1248 (ERROR_sig(uc) >> 1) & 1 : 0,
1249 &uc->uc_sigmask, puc);
1250 }
1251
1252 #elif defined(__x86_64__)
1253
1254 int cpu_signal_handler(int host_signum, void *pinfo,
1255 void *puc)
1256 {
1257 siginfo_t *info = pinfo;
1258 struct ucontext *uc = puc;
1259 unsigned long pc;
1260
1261 pc = uc->uc_mcontext.gregs[REG_RIP];
1262 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1263 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1264 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1265 &uc->uc_sigmask, puc);
1266 }
1267
1268 #elif defined(__powerpc__)
1269
1270 /***********************************************************************
1271 * signal context platform-specific definitions
1272 * From Wine
1273 */
1274 #ifdef linux
1275 /* All Registers access - only for local access */
1276 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1277 /* Gpr Registers access */
1278 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1279 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1280 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1281 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1282 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1283 # define LR_sig(context) REG_sig(link, context) /* Link register */
1284 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1285 /* Float Registers access */
1286 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1287 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1288 /* Exception Registers access */
1289 # define DAR_sig(context) REG_sig(dar, context)
1290 # define DSISR_sig(context) REG_sig(dsisr, context)
1291 # define TRAP_sig(context) REG_sig(trap, context)
1292 #endif /* linux */
1293
1294 #ifdef __APPLE__
1295 # include <sys/ucontext.h>
1296 typedef struct ucontext SIGCONTEXT;
1297 /* All Registers access - only for local access */
1298 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1299 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1300 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1301 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1302 /* Gpr Registers access */
1303 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1304 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1305 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1306 # define CTR_sig(context) REG_sig(ctr, context)
1307 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1308 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1309 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1310 /* Float Registers access */
1311 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1312 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1313 /* Exception Registers access */
1314 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1315 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1316 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1317 #endif /* __APPLE__ */
1318
1319 int cpu_signal_handler(int host_signum, void *pinfo,
1320 void *puc)
1321 {
1322 siginfo_t *info = pinfo;
1323 struct ucontext *uc = puc;
1324 unsigned long pc;
1325 int is_write;
1326
1327 pc = IAR_sig(uc);
1328 is_write = 0;
1329 #if 0
1330 /* ppc 4xx case */
1331 if (DSISR_sig(uc) & 0x00800000)
1332 is_write = 1;
1333 #else
1334 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1335 is_write = 1;
1336 #endif
1337 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1338 is_write, &uc->uc_sigmask, puc);
1339 }
1340
1341 #elif defined(__alpha__)
1342
1343 int cpu_signal_handler(int host_signum, void *pinfo,
1344 void *puc)
1345 {
1346 siginfo_t *info = pinfo;
1347 struct ucontext *uc = puc;
1348 uint32_t *pc = uc->uc_mcontext.sc_pc;
1349 uint32_t insn = *pc;
1350 int is_write = 0;
1351
1352 /* XXX: need kernel patch to get write flag faster */
1353 switch (insn >> 26) {
1354 case 0x0d: // stw
1355 case 0x0e: // stb
1356 case 0x0f: // stq_u
1357 case 0x24: // stf
1358 case 0x25: // stg
1359 case 0x26: // sts
1360 case 0x27: // stt
1361 case 0x2c: // stl
1362 case 0x2d: // stq
1363 case 0x2e: // stl_c
1364 case 0x2f: // stq_c
1365 is_write = 1;
1366 }
1367
1368 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1369 is_write, &uc->uc_sigmask, puc);
1370 }
1371 #elif defined(__sparc__)
1372
1373 int cpu_signal_handler(int host_signum, void *pinfo,
1374 void *puc)
1375 {
1376 siginfo_t *info = pinfo;
1377 uint32_t *regs = (uint32_t *)(info + 1);
1378 void *sigmask = (regs + 20);
1379 unsigned long pc;
1380 int is_write;
1381 uint32_t insn;
1382
1383 /* XXX: is there a standard glibc define ? */
1384 pc = regs[1];
1385 /* XXX: need kernel patch to get write flag faster */
1386 is_write = 0;
1387 insn = *(uint32_t *)pc;
1388 if ((insn >> 30) == 3) {
1389 switch((insn >> 19) & 0x3f) {
1390 case 0x05: // stb
1391 case 0x06: // sth
1392 case 0x04: // st
1393 case 0x07: // std
1394 case 0x24: // stf
1395 case 0x27: // stdf
1396 case 0x25: // stfsr
1397 is_write = 1;
1398 break;
1399 }
1400 }
1401 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1402 is_write, sigmask, NULL);
1403 }
1404
1405 #elif defined(__arm__)
1406
1407 int cpu_signal_handler(int host_signum, void *pinfo,
1408 void *puc)
1409 {
1410 siginfo_t *info = pinfo;
1411 struct ucontext *uc = puc;
1412 unsigned long pc;
1413 int is_write;
1414
1415 pc = uc->uc_mcontext.gregs[R15];
1416 /* XXX: compute is_write */
1417 is_write = 0;
1418 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1419 is_write,
1420 &uc->uc_sigmask, puc);
1421 }
1422
1423 #elif defined(__mc68000)
1424
1425 int cpu_signal_handler(int host_signum, void *pinfo,
1426 void *puc)
1427 {
1428 siginfo_t *info = pinfo;
1429 struct ucontext *uc = puc;
1430 unsigned long pc;
1431 int is_write;
1432
1433 pc = uc->uc_mcontext.gregs[16];
1434 /* XXX: compute is_write */
1435 is_write = 0;
1436 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1437 is_write,
1438 &uc->uc_sigmask, puc);
1439 }
1440
1441 #elif defined(__ia64)
1442
1443 #ifndef __ISR_VALID
1444 /* This ought to be in <bits/siginfo.h>... */
1445 # define __ISR_VALID 1
1446 #endif
1447
1448 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1449 {
1450 siginfo_t *info = pinfo;
1451 struct ucontext *uc = puc;
1452 unsigned long ip;
1453 int is_write = 0;
1454
1455 ip = uc->uc_mcontext.sc_ip;
1456 switch (host_signum) {
1457 case SIGILL:
1458 case SIGFPE:
1459 case SIGSEGV:
1460 case SIGBUS:
1461 case SIGTRAP:
1462 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1463 /* ISR.W (write-access) is bit 33: */
1464 is_write = (info->si_isr >> 33) & 1;
1465 break;
1466
1467 default:
1468 break;
1469 }
1470 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1471 is_write,
1472 &uc->uc_sigmask, puc);
1473 }
1474
1475 #elif defined(__s390__)
1476
1477 int cpu_signal_handler(int host_signum, void *pinfo,
1478 void *puc)
1479 {
1480 siginfo_t *info = pinfo;
1481 struct ucontext *uc = puc;
1482 unsigned long pc;
1483 int is_write;
1484
1485 pc = uc->uc_mcontext.psw.addr;
1486 /* XXX: compute is_write */
1487 is_write = 0;
1488 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1489 is_write, &uc->uc_sigmask, puc);
1490 }
1491
1492 #elif defined(__mips__)
1493
1494 int cpu_signal_handler(int host_signum, void *pinfo,
1495 void *puc)
1496 {
1497 siginfo_t *info = pinfo;
1498 struct ucontext *uc = puc;
1499 greg_t pc = uc->uc_mcontext.pc;
1500 int is_write;
1501
1502 /* XXX: compute is_write */
1503 is_write = 0;
1504 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1505 is_write, &uc->uc_sigmask, puc);
1506 }
1507
1508 #elif defined(__hppa__)
1509
1510 int cpu_signal_handler(int host_signum, void *pinfo,
1511 void *puc)
1512 {
1513 struct siginfo *info = pinfo;
1514 struct ucontext *uc = puc;
1515 unsigned long pc;
1516 int is_write;
1517
1518 pc = uc->uc_mcontext.sc_iaoq[0];
1519 /* FIXME: compute is_write */
1520 is_write = 0;
1521 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1522 is_write,
1523 &uc->uc_sigmask, puc);
1524 }
1525
1526 #else
1527
1528 #error host CPU specific signal handler needed
1529
1530 #endif
1531
1532 #endif /* !defined(CONFIG_SOFTMMU) */