]> git.proxmox.com Git - mirror_qemu.git/blob - cpu-exec.c
Improve PowerPC target implementation, using computed hflags as TB flags.
[mirror_qemu.git] / cpu-exec.c
1 /*
2 * i386 emulator main execution loop
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include "config.h"
21 #include "exec.h"
22 #include "disas.h"
23
24 #if !defined(CONFIG_SOFTMMU)
25 #undef EAX
26 #undef ECX
27 #undef EDX
28 #undef EBX
29 #undef ESP
30 #undef EBP
31 #undef ESI
32 #undef EDI
33 #undef EIP
34 #include <signal.h>
35 #include <sys/ucontext.h>
36 #endif
37
38 int tb_invalidated_flag;
39
40 //#define DEBUG_EXEC
41 //#define DEBUG_SIGNAL
42
43 void cpu_loop_exit(void)
44 {
45 /* NOTE: the register at this point must be saved by hand because
46 longjmp restore them */
47 regs_to_env();
48 longjmp(env->jmp_env, 1);
49 }
50
51 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
52 #define reg_T2
53 #endif
54
55 /* exit the current TB from a signal handler. The host registers are
56 restored in a state compatible with the CPU emulator
57 */
58 void cpu_resume_from_signal(CPUState *env1, void *puc)
59 {
60 #if !defined(CONFIG_SOFTMMU)
61 struct ucontext *uc = puc;
62 #endif
63
64 env = env1;
65
66 /* XXX: restore cpu registers saved in host registers */
67
68 #if !defined(CONFIG_SOFTMMU)
69 if (puc) {
70 /* XXX: use siglongjmp ? */
71 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
72 }
73 #endif
74 longjmp(env->jmp_env, 1);
75 }
76
77
78 static TranslationBlock *tb_find_slow(target_ulong pc,
79 target_ulong cs_base,
80 unsigned int flags)
81 {
82 TranslationBlock *tb, **ptb1;
83 int code_gen_size;
84 unsigned int h;
85 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
86 uint8_t *tc_ptr;
87
88 spin_lock(&tb_lock);
89
90 tb_invalidated_flag = 0;
91
92 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
93
94 /* find translated block using physical mappings */
95 phys_pc = get_phys_addr_code(env, pc);
96 phys_page1 = phys_pc & TARGET_PAGE_MASK;
97 phys_page2 = -1;
98 h = tb_phys_hash_func(phys_pc);
99 ptb1 = &tb_phys_hash[h];
100 for(;;) {
101 tb = *ptb1;
102 if (!tb)
103 goto not_found;
104 if (tb->pc == pc &&
105 tb->page_addr[0] == phys_page1 &&
106 tb->cs_base == cs_base &&
107 tb->flags == flags) {
108 /* check next page if needed */
109 if (tb->page_addr[1] != -1) {
110 virt_page2 = (pc & TARGET_PAGE_MASK) +
111 TARGET_PAGE_SIZE;
112 phys_page2 = get_phys_addr_code(env, virt_page2);
113 if (tb->page_addr[1] == phys_page2)
114 goto found;
115 } else {
116 goto found;
117 }
118 }
119 ptb1 = &tb->phys_hash_next;
120 }
121 not_found:
122 /* if no translated code available, then translate it now */
123 tb = tb_alloc(pc);
124 if (!tb) {
125 /* flush must be done */
126 tb_flush(env);
127 /* cannot fail at this point */
128 tb = tb_alloc(pc);
129 /* don't forget to invalidate previous TB info */
130 tb_invalidated_flag = 1;
131 }
132 tc_ptr = code_gen_ptr;
133 tb->tc_ptr = tc_ptr;
134 tb->cs_base = cs_base;
135 tb->flags = flags;
136 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
137 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
138
139 /* check next page if needed */
140 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
141 phys_page2 = -1;
142 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
143 phys_page2 = get_phys_addr_code(env, virt_page2);
144 }
145 tb_link_phys(tb, phys_pc, phys_page2);
146
147 found:
148 /* we add the TB in the virtual pc hash table */
149 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
150 spin_unlock(&tb_lock);
151 return tb;
152 }
153
154 static inline TranslationBlock *tb_find_fast(void)
155 {
156 TranslationBlock *tb;
157 target_ulong cs_base, pc;
158 unsigned int flags;
159
160 /* we record a subset of the CPU state. It will
161 always be the same before a given translated block
162 is executed. */
163 #if defined(TARGET_I386)
164 flags = env->hflags;
165 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
166 cs_base = env->segs[R_CS].base;
167 pc = cs_base + env->eip;
168 #elif defined(TARGET_ARM)
169 flags = env->thumb | (env->vfp.vec_len << 1)
170 | (env->vfp.vec_stride << 4);
171 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
172 flags |= (1 << 6);
173 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
174 flags |= (1 << 7);
175 cs_base = 0;
176 pc = env->regs[15];
177 #elif defined(TARGET_SPARC)
178 #ifdef TARGET_SPARC64
179 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
180 flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
181 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
182 #else
183 // FPU enable . MMU enabled . MMU no-fault . Supervisor
184 flags = (env->psref << 3) | ((env->mmuregs[0] & (MMU_E | MMU_NF)) << 1)
185 | env->psrs;
186 #endif
187 cs_base = env->npc;
188 pc = env->pc;
189 #elif defined(TARGET_PPC)
190 flags = env->hflags;
191 cs_base = 0;
192 pc = env->nip;
193 #elif defined(TARGET_MIPS)
194 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
195 cs_base = 0;
196 pc = env->PC[env->current_tc];
197 #elif defined(TARGET_M68K)
198 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
199 | (env->sr & SR_S) /* Bit 13 */
200 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
201 cs_base = 0;
202 pc = env->pc;
203 #elif defined(TARGET_SH4)
204 flags = env->sr & (SR_MD | SR_RB);
205 cs_base = 0; /* XXXXX */
206 pc = env->pc;
207 #elif defined(TARGET_ALPHA)
208 flags = env->ps;
209 cs_base = 0;
210 pc = env->pc;
211 #else
212 #error unsupported CPU
213 #endif
214 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
215 if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
216 tb->flags != flags, 0)) {
217 tb = tb_find_slow(pc, cs_base, flags);
218 /* Note: we do it here to avoid a gcc bug on Mac OS X when
219 doing it in tb_find_slow */
220 if (tb_invalidated_flag) {
221 /* as some TB could have been invalidated because
222 of memory exceptions while generating the code, we
223 must recompute the hash index here */
224 T0 = 0;
225 }
226 }
227 return tb;
228 }
229
230
231 /* main execution loop */
232
233 int cpu_exec(CPUState *env1)
234 {
235 #define DECLARE_HOST_REGS 1
236 #include "hostregs_helper.h"
237 #if defined(TARGET_SPARC)
238 #if defined(reg_REGWPTR)
239 uint32_t *saved_regwptr;
240 #endif
241 #endif
242 #if defined(__sparc__) && !defined(HOST_SOLARIS)
243 int saved_i7;
244 target_ulong tmp_T0;
245 #endif
246 int ret, interrupt_request;
247 void (*gen_func)(void);
248 TranslationBlock *tb;
249 uint8_t *tc_ptr;
250
251 if (cpu_halted(env1) == EXCP_HALTED)
252 return EXCP_HALTED;
253
254 cpu_single_env = env1;
255
256 /* first we save global registers */
257 #define SAVE_HOST_REGS 1
258 #include "hostregs_helper.h"
259 env = env1;
260 #if defined(__sparc__) && !defined(HOST_SOLARIS)
261 /* we also save i7 because longjmp may not restore it */
262 asm volatile ("mov %%i7, %0" : "=r" (saved_i7));
263 #endif
264
265 env_to_regs();
266 #if defined(TARGET_I386)
267 /* put eflags in CPU temporary format */
268 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
269 DF = 1 - (2 * ((env->eflags >> 10) & 1));
270 CC_OP = CC_OP_EFLAGS;
271 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
272 #elif defined(TARGET_SPARC)
273 #if defined(reg_REGWPTR)
274 saved_regwptr = REGWPTR;
275 #endif
276 #elif defined(TARGET_M68K)
277 env->cc_op = CC_OP_FLAGS;
278 env->cc_dest = env->sr & 0xf;
279 env->cc_x = (env->sr >> 4) & 1;
280 #elif defined(TARGET_ALPHA)
281 #elif defined(TARGET_ARM)
282 #elif defined(TARGET_PPC)
283 #elif defined(TARGET_MIPS)
284 #elif defined(TARGET_SH4)
285 /* XXXXX */
286 #else
287 #error unsupported target CPU
288 #endif
289 env->exception_index = -1;
290
291 /* prepare setjmp context for exception handling */
292 for(;;) {
293 if (setjmp(env->jmp_env) == 0) {
294 env->current_tb = NULL;
295 /* if an exception is pending, we execute it here */
296 if (env->exception_index >= 0) {
297 if (env->exception_index >= EXCP_INTERRUPT) {
298 /* exit request from the cpu execution loop */
299 ret = env->exception_index;
300 break;
301 } else if (env->user_mode_only) {
302 /* if user mode only, we simulate a fake exception
303 which will be handled outside the cpu execution
304 loop */
305 #if defined(TARGET_I386)
306 do_interrupt_user(env->exception_index,
307 env->exception_is_int,
308 env->error_code,
309 env->exception_next_eip);
310 #endif
311 ret = env->exception_index;
312 break;
313 } else {
314 #if defined(TARGET_I386)
315 /* simulate a real cpu exception. On i386, it can
316 trigger new exceptions, but we do not handle
317 double or triple faults yet. */
318 do_interrupt(env->exception_index,
319 env->exception_is_int,
320 env->error_code,
321 env->exception_next_eip, 0);
322 /* successfully delivered */
323 env->old_exception = -1;
324 #elif defined(TARGET_PPC)
325 do_interrupt(env);
326 #elif defined(TARGET_MIPS)
327 do_interrupt(env);
328 #elif defined(TARGET_SPARC)
329 do_interrupt(env->exception_index);
330 #elif defined(TARGET_ARM)
331 do_interrupt(env);
332 #elif defined(TARGET_SH4)
333 do_interrupt(env);
334 #elif defined(TARGET_ALPHA)
335 do_interrupt(env);
336 #elif defined(TARGET_M68K)
337 do_interrupt(0);
338 #endif
339 }
340 env->exception_index = -1;
341 }
342 #ifdef USE_KQEMU
343 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
344 int ret;
345 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
346 ret = kqemu_cpu_exec(env);
347 /* put eflags in CPU temporary format */
348 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
349 DF = 1 - (2 * ((env->eflags >> 10) & 1));
350 CC_OP = CC_OP_EFLAGS;
351 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
352 if (ret == 1) {
353 /* exception */
354 longjmp(env->jmp_env, 1);
355 } else if (ret == 2) {
356 /* softmmu execution needed */
357 } else {
358 if (env->interrupt_request != 0) {
359 /* hardware interrupt will be executed just after */
360 } else {
361 /* otherwise, we restart */
362 longjmp(env->jmp_env, 1);
363 }
364 }
365 }
366 #endif
367
368 T0 = 0; /* force lookup of first TB */
369 for(;;) {
370 #if defined(__sparc__) && !defined(HOST_SOLARIS)
371 /* g1 can be modified by some libc? functions */
372 tmp_T0 = T0;
373 #endif
374 interrupt_request = env->interrupt_request;
375 if (__builtin_expect(interrupt_request, 0)) {
376 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
377 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
378 env->exception_index = EXCP_DEBUG;
379 cpu_loop_exit();
380 }
381 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
382 defined(TARGET_PPC) || defined(TARGET_ALPHA)
383 if (interrupt_request & CPU_INTERRUPT_HALT) {
384 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
385 env->halted = 1;
386 env->exception_index = EXCP_HLT;
387 cpu_loop_exit();
388 }
389 #endif
390 #if defined(TARGET_I386)
391 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
392 !(env->hflags & HF_SMM_MASK)) {
393 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
394 do_smm_enter();
395 #if defined(__sparc__) && !defined(HOST_SOLARIS)
396 tmp_T0 = 0;
397 #else
398 T0 = 0;
399 #endif
400 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
401 (env->eflags & IF_MASK) &&
402 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
403 int intno;
404 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
405 intno = cpu_get_pic_interrupt(env);
406 if (loglevel & CPU_LOG_TB_IN_ASM) {
407 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
408 }
409 do_interrupt(intno, 0, 0, 0, 1);
410 /* ensure that no TB jump will be modified as
411 the program flow was changed */
412 #if defined(__sparc__) && !defined(HOST_SOLARIS)
413 tmp_T0 = 0;
414 #else
415 T0 = 0;
416 #endif
417 }
418 #elif defined(TARGET_PPC)
419 #if 0
420 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
421 cpu_ppc_reset(env);
422 }
423 #endif
424 if (interrupt_request & CPU_INTERRUPT_HARD) {
425 ppc_hw_interrupt(env);
426 if (env->pending_interrupts == 0)
427 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
428 #if defined(__sparc__) && !defined(HOST_SOLARIS)
429 tmp_T0 = 0;
430 #else
431 T0 = 0;
432 #endif
433 }
434 #elif defined(TARGET_MIPS)
435 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
436 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
437 (env->CP0_Status & (1 << CP0St_IE)) &&
438 !(env->CP0_Status & (1 << CP0St_EXL)) &&
439 !(env->CP0_Status & (1 << CP0St_ERL)) &&
440 !(env->hflags & MIPS_HFLAG_DM)) {
441 /* Raise it */
442 env->exception_index = EXCP_EXT_INTERRUPT;
443 env->error_code = 0;
444 do_interrupt(env);
445 #if defined(__sparc__) && !defined(HOST_SOLARIS)
446 tmp_T0 = 0;
447 #else
448 T0 = 0;
449 #endif
450 }
451 #elif defined(TARGET_SPARC)
452 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
453 (env->psret != 0)) {
454 int pil = env->interrupt_index & 15;
455 int type = env->interrupt_index & 0xf0;
456
457 if (((type == TT_EXTINT) &&
458 (pil == 15 || pil > env->psrpil)) ||
459 type != TT_EXTINT) {
460 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
461 do_interrupt(env->interrupt_index);
462 env->interrupt_index = 0;
463 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
464 cpu_check_irqs(env);
465 #endif
466 #if defined(__sparc__) && !defined(HOST_SOLARIS)
467 tmp_T0 = 0;
468 #else
469 T0 = 0;
470 #endif
471 }
472 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
473 //do_interrupt(0, 0, 0, 0, 0);
474 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
475 }
476 #elif defined(TARGET_ARM)
477 if (interrupt_request & CPU_INTERRUPT_FIQ
478 && !(env->uncached_cpsr & CPSR_F)) {
479 env->exception_index = EXCP_FIQ;
480 do_interrupt(env);
481 }
482 if (interrupt_request & CPU_INTERRUPT_HARD
483 && !(env->uncached_cpsr & CPSR_I)) {
484 env->exception_index = EXCP_IRQ;
485 do_interrupt(env);
486 }
487 #elif defined(TARGET_SH4)
488 /* XXXXX */
489 #elif defined(TARGET_ALPHA)
490 if (interrupt_request & CPU_INTERRUPT_HARD) {
491 do_interrupt(env);
492 }
493 #elif defined(TARGET_M68K)
494 if (interrupt_request & CPU_INTERRUPT_HARD
495 && ((env->sr & SR_I) >> SR_I_SHIFT)
496 < env->pending_level) {
497 /* Real hardware gets the interrupt vector via an
498 IACK cycle at this point. Current emulated
499 hardware doesn't rely on this, so we
500 provide/save the vector when the interrupt is
501 first signalled. */
502 env->exception_index = env->pending_vector;
503 do_interrupt(1);
504 }
505 #endif
506 /* Don't use the cached interupt_request value,
507 do_interrupt may have updated the EXITTB flag. */
508 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
509 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
510 /* ensure that no TB jump will be modified as
511 the program flow was changed */
512 #if defined(__sparc__) && !defined(HOST_SOLARIS)
513 tmp_T0 = 0;
514 #else
515 T0 = 0;
516 #endif
517 }
518 if (interrupt_request & CPU_INTERRUPT_EXIT) {
519 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
520 env->exception_index = EXCP_INTERRUPT;
521 cpu_loop_exit();
522 }
523 }
524 #ifdef DEBUG_EXEC
525 if ((loglevel & CPU_LOG_TB_CPU)) {
526 /* restore flags in standard format */
527 regs_to_env();
528 #if defined(TARGET_I386)
529 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
530 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
531 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
532 #elif defined(TARGET_ARM)
533 cpu_dump_state(env, logfile, fprintf, 0);
534 #elif defined(TARGET_SPARC)
535 REGWPTR = env->regbase + (env->cwp * 16);
536 env->regwptr = REGWPTR;
537 cpu_dump_state(env, logfile, fprintf, 0);
538 #elif defined(TARGET_PPC)
539 cpu_dump_state(env, logfile, fprintf, 0);
540 #elif defined(TARGET_M68K)
541 cpu_m68k_flush_flags(env, env->cc_op);
542 env->cc_op = CC_OP_FLAGS;
543 env->sr = (env->sr & 0xffe0)
544 | env->cc_dest | (env->cc_x << 4);
545 cpu_dump_state(env, logfile, fprintf, 0);
546 #elif defined(TARGET_MIPS)
547 cpu_dump_state(env, logfile, fprintf, 0);
548 #elif defined(TARGET_SH4)
549 cpu_dump_state(env, logfile, fprintf, 0);
550 #elif defined(TARGET_ALPHA)
551 cpu_dump_state(env, logfile, fprintf, 0);
552 #else
553 #error unsupported target CPU
554 #endif
555 }
556 #endif
557 tb = tb_find_fast();
558 #ifdef DEBUG_EXEC
559 if ((loglevel & CPU_LOG_EXEC)) {
560 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
561 (long)tb->tc_ptr, tb->pc,
562 lookup_symbol(tb->pc));
563 }
564 #endif
565 #if defined(__sparc__) && !defined(HOST_SOLARIS)
566 T0 = tmp_T0;
567 #endif
568 /* see if we can patch the calling TB. When the TB
569 spans two pages, we cannot safely do a direct
570 jump. */
571 {
572 if (T0 != 0 &&
573 #if USE_KQEMU
574 (env->kqemu_enabled != 2) &&
575 #endif
576 tb->page_addr[1] == -1
577 #if defined(TARGET_I386) && defined(USE_CODE_COPY)
578 && (tb->cflags & CF_CODE_COPY) ==
579 (((TranslationBlock *)(T0 & ~3))->cflags & CF_CODE_COPY)
580 #endif
581 ) {
582 spin_lock(&tb_lock);
583 tb_add_jump((TranslationBlock *)(long)(T0 & ~3), T0 & 3, tb);
584 #if defined(USE_CODE_COPY)
585 /* propagates the FP use info */
586 ((TranslationBlock *)(T0 & ~3))->cflags |=
587 (tb->cflags & CF_FP_USED);
588 #endif
589 spin_unlock(&tb_lock);
590 }
591 }
592 tc_ptr = tb->tc_ptr;
593 env->current_tb = tb;
594 /* execute the generated code */
595 gen_func = (void *)tc_ptr;
596 #if defined(__sparc__)
597 __asm__ __volatile__("call %0\n\t"
598 "mov %%o7,%%i0"
599 : /* no outputs */
600 : "r" (gen_func)
601 : "i0", "i1", "i2", "i3", "i4", "i5",
602 "o0", "o1", "o2", "o3", "o4", "o5",
603 "l0", "l1", "l2", "l3", "l4", "l5",
604 "l6", "l7");
605 #elif defined(__arm__)
606 asm volatile ("mov pc, %0\n\t"
607 ".global exec_loop\n\t"
608 "exec_loop:\n\t"
609 : /* no outputs */
610 : "r" (gen_func)
611 : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
612 #elif defined(TARGET_I386) && defined(USE_CODE_COPY)
613 {
614 if (!(tb->cflags & CF_CODE_COPY)) {
615 if ((tb->cflags & CF_FP_USED) && env->native_fp_regs) {
616 save_native_fp_state(env);
617 }
618 gen_func();
619 } else {
620 if ((tb->cflags & CF_FP_USED) && !env->native_fp_regs) {
621 restore_native_fp_state(env);
622 }
623 /* we work with native eflags */
624 CC_SRC = cc_table[CC_OP].compute_all();
625 CC_OP = CC_OP_EFLAGS;
626 asm(".globl exec_loop\n"
627 "\n"
628 "debug1:\n"
629 " pushl %%ebp\n"
630 " fs movl %10, %9\n"
631 " fs movl %11, %%eax\n"
632 " andl $0x400, %%eax\n"
633 " fs orl %8, %%eax\n"
634 " pushl %%eax\n"
635 " popf\n"
636 " fs movl %%esp, %12\n"
637 " fs movl %0, %%eax\n"
638 " fs movl %1, %%ecx\n"
639 " fs movl %2, %%edx\n"
640 " fs movl %3, %%ebx\n"
641 " fs movl %4, %%esp\n"
642 " fs movl %5, %%ebp\n"
643 " fs movl %6, %%esi\n"
644 " fs movl %7, %%edi\n"
645 " fs jmp *%9\n"
646 "exec_loop:\n"
647 " fs movl %%esp, %4\n"
648 " fs movl %12, %%esp\n"
649 " fs movl %%eax, %0\n"
650 " fs movl %%ecx, %1\n"
651 " fs movl %%edx, %2\n"
652 " fs movl %%ebx, %3\n"
653 " fs movl %%ebp, %5\n"
654 " fs movl %%esi, %6\n"
655 " fs movl %%edi, %7\n"
656 " pushf\n"
657 " popl %%eax\n"
658 " movl %%eax, %%ecx\n"
659 " andl $0x400, %%ecx\n"
660 " shrl $9, %%ecx\n"
661 " andl $0x8d5, %%eax\n"
662 " fs movl %%eax, %8\n"
663 " movl $1, %%eax\n"
664 " subl %%ecx, %%eax\n"
665 " fs movl %%eax, %11\n"
666 " fs movl %9, %%ebx\n" /* get T0 value */
667 " popl %%ebp\n"
668 :
669 : "m" (*(uint8_t *)offsetof(CPUState, regs[0])),
670 "m" (*(uint8_t *)offsetof(CPUState, regs[1])),
671 "m" (*(uint8_t *)offsetof(CPUState, regs[2])),
672 "m" (*(uint8_t *)offsetof(CPUState, regs[3])),
673 "m" (*(uint8_t *)offsetof(CPUState, regs[4])),
674 "m" (*(uint8_t *)offsetof(CPUState, regs[5])),
675 "m" (*(uint8_t *)offsetof(CPUState, regs[6])),
676 "m" (*(uint8_t *)offsetof(CPUState, regs[7])),
677 "m" (*(uint8_t *)offsetof(CPUState, cc_src)),
678 "m" (*(uint8_t *)offsetof(CPUState, tmp0)),
679 "a" (gen_func),
680 "m" (*(uint8_t *)offsetof(CPUState, df)),
681 "m" (*(uint8_t *)offsetof(CPUState, saved_esp))
682 : "%ecx", "%edx"
683 );
684 }
685 }
686 #elif defined(__ia64)
687 struct fptr {
688 void *ip;
689 void *gp;
690 } fp;
691
692 fp.ip = tc_ptr;
693 fp.gp = code_gen_buffer + 2 * (1 << 20);
694 (*(void (*)(void)) &fp)();
695 #else
696 gen_func();
697 #endif
698 env->current_tb = NULL;
699 /* reset soft MMU for next block (it can currently
700 only be set by a memory fault) */
701 #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
702 if (env->hflags & HF_SOFTMMU_MASK) {
703 env->hflags &= ~HF_SOFTMMU_MASK;
704 /* do not allow linking to another block */
705 T0 = 0;
706 }
707 #endif
708 #if defined(USE_KQEMU)
709 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
710 if (kqemu_is_ok(env) &&
711 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
712 cpu_loop_exit();
713 }
714 #endif
715 } /* for(;;) */
716 } else {
717 env_to_regs();
718 }
719 } /* for(;;) */
720
721
722 #if defined(TARGET_I386)
723 #if defined(USE_CODE_COPY)
724 if (env->native_fp_regs) {
725 save_native_fp_state(env);
726 }
727 #endif
728 /* restore flags in standard format */
729 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
730 #elif defined(TARGET_ARM)
731 /* XXX: Save/restore host fpu exception state?. */
732 #elif defined(TARGET_SPARC)
733 #if defined(reg_REGWPTR)
734 REGWPTR = saved_regwptr;
735 #endif
736 #elif defined(TARGET_PPC)
737 #elif defined(TARGET_M68K)
738 cpu_m68k_flush_flags(env, env->cc_op);
739 env->cc_op = CC_OP_FLAGS;
740 env->sr = (env->sr & 0xffe0)
741 | env->cc_dest | (env->cc_x << 4);
742 #elif defined(TARGET_MIPS)
743 #elif defined(TARGET_SH4)
744 #elif defined(TARGET_ALPHA)
745 /* XXXXX */
746 #else
747 #error unsupported target CPU
748 #endif
749
750 /* restore global registers */
751 #if defined(__sparc__) && !defined(HOST_SOLARIS)
752 asm volatile ("mov %0, %%i7" : : "r" (saved_i7));
753 #endif
754 #include "hostregs_helper.h"
755
756 /* fail safe : never use cpu_single_env outside cpu_exec() */
757 cpu_single_env = NULL;
758 return ret;
759 }
760
761 /* must only be called from the generated code as an exception can be
762 generated */
763 void tb_invalidate_page_range(target_ulong start, target_ulong end)
764 {
765 /* XXX: cannot enable it yet because it yields to MMU exception
766 where NIP != read address on PowerPC */
767 #if 0
768 target_ulong phys_addr;
769 phys_addr = get_phys_addr_code(env, start);
770 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
771 #endif
772 }
773
774 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
775
776 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
777 {
778 CPUX86State *saved_env;
779
780 saved_env = env;
781 env = s;
782 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
783 selector &= 0xffff;
784 cpu_x86_load_seg_cache(env, seg_reg, selector,
785 (selector << 4), 0xffff, 0);
786 } else {
787 load_seg(seg_reg, selector);
788 }
789 env = saved_env;
790 }
791
792 void cpu_x86_fsave(CPUX86State *s, uint8_t *ptr, int data32)
793 {
794 CPUX86State *saved_env;
795
796 saved_env = env;
797 env = s;
798
799 helper_fsave((target_ulong)ptr, data32);
800
801 env = saved_env;
802 }
803
804 void cpu_x86_frstor(CPUX86State *s, uint8_t *ptr, int data32)
805 {
806 CPUX86State *saved_env;
807
808 saved_env = env;
809 env = s;
810
811 helper_frstor((target_ulong)ptr, data32);
812
813 env = saved_env;
814 }
815
816 #endif /* TARGET_I386 */
817
818 #if !defined(CONFIG_SOFTMMU)
819
820 #if defined(TARGET_I386)
821
822 /* 'pc' is the host PC at which the exception was raised. 'address' is
823 the effective address of the memory exception. 'is_write' is 1 if a
824 write caused the exception and otherwise 0'. 'old_set' is the
825 signal set which should be restored */
826 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
827 int is_write, sigset_t *old_set,
828 void *puc)
829 {
830 TranslationBlock *tb;
831 int ret;
832
833 if (cpu_single_env)
834 env = cpu_single_env; /* XXX: find a correct solution for multithread */
835 #if defined(DEBUG_SIGNAL)
836 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
837 pc, address, is_write, *(unsigned long *)old_set);
838 #endif
839 /* XXX: locking issue */
840 if (is_write && page_unprotect(h2g(address), pc, puc)) {
841 return 1;
842 }
843
844 /* see if it is an MMU fault */
845 ret = cpu_x86_handle_mmu_fault(env, address, is_write,
846 ((env->hflags & HF_CPL_MASK) == 3), 0);
847 if (ret < 0)
848 return 0; /* not an MMU fault */
849 if (ret == 0)
850 return 1; /* the MMU fault was handled without causing real CPU fault */
851 /* now we have a real cpu fault */
852 tb = tb_find_pc(pc);
853 if (tb) {
854 /* the PC is inside the translated code. It means that we have
855 a virtual CPU fault */
856 cpu_restore_state(tb, env, pc, puc);
857 }
858 if (ret == 1) {
859 #if 0
860 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
861 env->eip, env->cr[2], env->error_code);
862 #endif
863 /* we restore the process signal mask as the sigreturn should
864 do it (XXX: use sigsetjmp) */
865 sigprocmask(SIG_SETMASK, old_set, NULL);
866 raise_exception_err(env->exception_index, env->error_code);
867 } else {
868 /* activate soft MMU for this block */
869 env->hflags |= HF_SOFTMMU_MASK;
870 cpu_resume_from_signal(env, puc);
871 }
872 /* never comes here */
873 return 1;
874 }
875
876 #elif defined(TARGET_ARM)
877 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
878 int is_write, sigset_t *old_set,
879 void *puc)
880 {
881 TranslationBlock *tb;
882 int ret;
883
884 if (cpu_single_env)
885 env = cpu_single_env; /* XXX: find a correct solution for multithread */
886 #if defined(DEBUG_SIGNAL)
887 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
888 pc, address, is_write, *(unsigned long *)old_set);
889 #endif
890 /* XXX: locking issue */
891 if (is_write && page_unprotect(h2g(address), pc, puc)) {
892 return 1;
893 }
894 /* see if it is an MMU fault */
895 ret = cpu_arm_handle_mmu_fault(env, address, is_write, 1, 0);
896 if (ret < 0)
897 return 0; /* not an MMU fault */
898 if (ret == 0)
899 return 1; /* the MMU fault was handled without causing real CPU fault */
900 /* now we have a real cpu fault */
901 tb = tb_find_pc(pc);
902 if (tb) {
903 /* the PC is inside the translated code. It means that we have
904 a virtual CPU fault */
905 cpu_restore_state(tb, env, pc, puc);
906 }
907 /* we restore the process signal mask as the sigreturn should
908 do it (XXX: use sigsetjmp) */
909 sigprocmask(SIG_SETMASK, old_set, NULL);
910 cpu_loop_exit();
911 }
912 #elif defined(TARGET_SPARC)
913 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
914 int is_write, sigset_t *old_set,
915 void *puc)
916 {
917 TranslationBlock *tb;
918 int ret;
919
920 if (cpu_single_env)
921 env = cpu_single_env; /* XXX: find a correct solution for multithread */
922 #if defined(DEBUG_SIGNAL)
923 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
924 pc, address, is_write, *(unsigned long *)old_set);
925 #endif
926 /* XXX: locking issue */
927 if (is_write && page_unprotect(h2g(address), pc, puc)) {
928 return 1;
929 }
930 /* see if it is an MMU fault */
931 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, 1, 0);
932 if (ret < 0)
933 return 0; /* not an MMU fault */
934 if (ret == 0)
935 return 1; /* the MMU fault was handled without causing real CPU fault */
936 /* now we have a real cpu fault */
937 tb = tb_find_pc(pc);
938 if (tb) {
939 /* the PC is inside the translated code. It means that we have
940 a virtual CPU fault */
941 cpu_restore_state(tb, env, pc, puc);
942 }
943 /* we restore the process signal mask as the sigreturn should
944 do it (XXX: use sigsetjmp) */
945 sigprocmask(SIG_SETMASK, old_set, NULL);
946 cpu_loop_exit();
947 }
948 #elif defined (TARGET_PPC)
949 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
950 int is_write, sigset_t *old_set,
951 void *puc)
952 {
953 TranslationBlock *tb;
954 int ret;
955
956 if (cpu_single_env)
957 env = cpu_single_env; /* XXX: find a correct solution for multithread */
958 #if defined(DEBUG_SIGNAL)
959 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
960 pc, address, is_write, *(unsigned long *)old_set);
961 #endif
962 /* XXX: locking issue */
963 if (is_write && page_unprotect(h2g(address), pc, puc)) {
964 return 1;
965 }
966
967 /* see if it is an MMU fault */
968 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, msr_pr, 0);
969 if (ret < 0)
970 return 0; /* not an MMU fault */
971 if (ret == 0)
972 return 1; /* the MMU fault was handled without causing real CPU fault */
973
974 /* now we have a real cpu fault */
975 tb = tb_find_pc(pc);
976 if (tb) {
977 /* the PC is inside the translated code. It means that we have
978 a virtual CPU fault */
979 cpu_restore_state(tb, env, pc, puc);
980 }
981 if (ret == 1) {
982 #if 0
983 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
984 env->nip, env->error_code, tb);
985 #endif
986 /* we restore the process signal mask as the sigreturn should
987 do it (XXX: use sigsetjmp) */
988 sigprocmask(SIG_SETMASK, old_set, NULL);
989 do_raise_exception_err(env->exception_index, env->error_code);
990 } else {
991 /* activate soft MMU for this block */
992 cpu_resume_from_signal(env, puc);
993 }
994 /* never comes here */
995 return 1;
996 }
997
998 #elif defined(TARGET_M68K)
999 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1000 int is_write, sigset_t *old_set,
1001 void *puc)
1002 {
1003 TranslationBlock *tb;
1004 int ret;
1005
1006 if (cpu_single_env)
1007 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1008 #if defined(DEBUG_SIGNAL)
1009 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1010 pc, address, is_write, *(unsigned long *)old_set);
1011 #endif
1012 /* XXX: locking issue */
1013 if (is_write && page_unprotect(address, pc, puc)) {
1014 return 1;
1015 }
1016 /* see if it is an MMU fault */
1017 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, 1, 0);
1018 if (ret < 0)
1019 return 0; /* not an MMU fault */
1020 if (ret == 0)
1021 return 1; /* the MMU fault was handled without causing real CPU fault */
1022 /* now we have a real cpu fault */
1023 tb = tb_find_pc(pc);
1024 if (tb) {
1025 /* the PC is inside the translated code. It means that we have
1026 a virtual CPU fault */
1027 cpu_restore_state(tb, env, pc, puc);
1028 }
1029 /* we restore the process signal mask as the sigreturn should
1030 do it (XXX: use sigsetjmp) */
1031 sigprocmask(SIG_SETMASK, old_set, NULL);
1032 cpu_loop_exit();
1033 /* never comes here */
1034 return 1;
1035 }
1036
1037 #elif defined (TARGET_MIPS)
1038 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1039 int is_write, sigset_t *old_set,
1040 void *puc)
1041 {
1042 TranslationBlock *tb;
1043 int ret;
1044
1045 if (cpu_single_env)
1046 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1047 #if defined(DEBUG_SIGNAL)
1048 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1049 pc, address, is_write, *(unsigned long *)old_set);
1050 #endif
1051 /* XXX: locking issue */
1052 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1053 return 1;
1054 }
1055
1056 /* see if it is an MMU fault */
1057 ret = cpu_mips_handle_mmu_fault(env, address, is_write, 1, 0);
1058 if (ret < 0)
1059 return 0; /* not an MMU fault */
1060 if (ret == 0)
1061 return 1; /* the MMU fault was handled without causing real CPU fault */
1062
1063 /* now we have a real cpu fault */
1064 tb = tb_find_pc(pc);
1065 if (tb) {
1066 /* the PC is inside the translated code. It means that we have
1067 a virtual CPU fault */
1068 cpu_restore_state(tb, env, pc, puc);
1069 }
1070 if (ret == 1) {
1071 #if 0
1072 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1073 env->PC, env->error_code, tb);
1074 #endif
1075 /* we restore the process signal mask as the sigreturn should
1076 do it (XXX: use sigsetjmp) */
1077 sigprocmask(SIG_SETMASK, old_set, NULL);
1078 do_raise_exception_err(env->exception_index, env->error_code);
1079 } else {
1080 /* activate soft MMU for this block */
1081 cpu_resume_from_signal(env, puc);
1082 }
1083 /* never comes here */
1084 return 1;
1085 }
1086
1087 #elif defined (TARGET_SH4)
1088 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1089 int is_write, sigset_t *old_set,
1090 void *puc)
1091 {
1092 TranslationBlock *tb;
1093 int ret;
1094
1095 if (cpu_single_env)
1096 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1097 #if defined(DEBUG_SIGNAL)
1098 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1099 pc, address, is_write, *(unsigned long *)old_set);
1100 #endif
1101 /* XXX: locking issue */
1102 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1103 return 1;
1104 }
1105
1106 /* see if it is an MMU fault */
1107 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, 1, 0);
1108 if (ret < 0)
1109 return 0; /* not an MMU fault */
1110 if (ret == 0)
1111 return 1; /* the MMU fault was handled without causing real CPU fault */
1112
1113 /* now we have a real cpu fault */
1114 tb = tb_find_pc(pc);
1115 if (tb) {
1116 /* the PC is inside the translated code. It means that we have
1117 a virtual CPU fault */
1118 cpu_restore_state(tb, env, pc, puc);
1119 }
1120 #if 0
1121 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1122 env->nip, env->error_code, tb);
1123 #endif
1124 /* we restore the process signal mask as the sigreturn should
1125 do it (XXX: use sigsetjmp) */
1126 sigprocmask(SIG_SETMASK, old_set, NULL);
1127 cpu_loop_exit();
1128 /* never comes here */
1129 return 1;
1130 }
1131
1132 #elif defined (TARGET_ALPHA)
1133 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1134 int is_write, sigset_t *old_set,
1135 void *puc)
1136 {
1137 TranslationBlock *tb;
1138 int ret;
1139
1140 if (cpu_single_env)
1141 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1142 #if defined(DEBUG_SIGNAL)
1143 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1144 pc, address, is_write, *(unsigned long *)old_set);
1145 #endif
1146 /* XXX: locking issue */
1147 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1148 return 1;
1149 }
1150
1151 /* see if it is an MMU fault */
1152 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, 1, 0);
1153 if (ret < 0)
1154 return 0; /* not an MMU fault */
1155 if (ret == 0)
1156 return 1; /* the MMU fault was handled without causing real CPU fault */
1157
1158 /* now we have a real cpu fault */
1159 tb = tb_find_pc(pc);
1160 if (tb) {
1161 /* the PC is inside the translated code. It means that we have
1162 a virtual CPU fault */
1163 cpu_restore_state(tb, env, pc, puc);
1164 }
1165 #if 0
1166 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1167 env->nip, env->error_code, tb);
1168 #endif
1169 /* we restore the process signal mask as the sigreturn should
1170 do it (XXX: use sigsetjmp) */
1171 sigprocmask(SIG_SETMASK, old_set, NULL);
1172 cpu_loop_exit();
1173 /* never comes here */
1174 return 1;
1175 }
1176 #else
1177 #error unsupported target CPU
1178 #endif
1179
1180 #if defined(__i386__)
1181
1182 #if defined(__APPLE__)
1183 # include <sys/ucontext.h>
1184
1185 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1186 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1187 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1188 #else
1189 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1190 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1191 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1192 #endif
1193
1194 #if defined(USE_CODE_COPY)
1195 static void cpu_send_trap(unsigned long pc, int trap,
1196 struct ucontext *uc)
1197 {
1198 TranslationBlock *tb;
1199
1200 if (cpu_single_env)
1201 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1202 /* now we have a real cpu fault */
1203 tb = tb_find_pc(pc);
1204 if (tb) {
1205 /* the PC is inside the translated code. It means that we have
1206 a virtual CPU fault */
1207 cpu_restore_state(tb, env, pc, uc);
1208 }
1209 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
1210 raise_exception_err(trap, env->error_code);
1211 }
1212 #endif
1213
1214 int cpu_signal_handler(int host_signum, void *pinfo,
1215 void *puc)
1216 {
1217 siginfo_t *info = pinfo;
1218 struct ucontext *uc = puc;
1219 unsigned long pc;
1220 int trapno;
1221
1222 #ifndef REG_EIP
1223 /* for glibc 2.1 */
1224 #define REG_EIP EIP
1225 #define REG_ERR ERR
1226 #define REG_TRAPNO TRAPNO
1227 #endif
1228 pc = EIP_sig(uc);
1229 trapno = TRAP_sig(uc);
1230 #if defined(TARGET_I386) && defined(USE_CODE_COPY)
1231 if (trapno == 0x00 || trapno == 0x05) {
1232 /* send division by zero or bound exception */
1233 cpu_send_trap(pc, trapno, uc);
1234 return 1;
1235 } else
1236 #endif
1237 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1238 trapno == 0xe ?
1239 (ERROR_sig(uc) >> 1) & 1 : 0,
1240 &uc->uc_sigmask, puc);
1241 }
1242
1243 #elif defined(__x86_64__)
1244
1245 int cpu_signal_handler(int host_signum, void *pinfo,
1246 void *puc)
1247 {
1248 siginfo_t *info = pinfo;
1249 struct ucontext *uc = puc;
1250 unsigned long pc;
1251
1252 pc = uc->uc_mcontext.gregs[REG_RIP];
1253 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1254 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1255 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1256 &uc->uc_sigmask, puc);
1257 }
1258
1259 #elif defined(__powerpc__)
1260
1261 /***********************************************************************
1262 * signal context platform-specific definitions
1263 * From Wine
1264 */
1265 #ifdef linux
1266 /* All Registers access - only for local access */
1267 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1268 /* Gpr Registers access */
1269 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1270 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1271 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1272 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1273 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1274 # define LR_sig(context) REG_sig(link, context) /* Link register */
1275 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1276 /* Float Registers access */
1277 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1278 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1279 /* Exception Registers access */
1280 # define DAR_sig(context) REG_sig(dar, context)
1281 # define DSISR_sig(context) REG_sig(dsisr, context)
1282 # define TRAP_sig(context) REG_sig(trap, context)
1283 #endif /* linux */
1284
1285 #ifdef __APPLE__
1286 # include <sys/ucontext.h>
1287 typedef struct ucontext SIGCONTEXT;
1288 /* All Registers access - only for local access */
1289 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1290 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1291 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1292 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1293 /* Gpr Registers access */
1294 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1295 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1296 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1297 # define CTR_sig(context) REG_sig(ctr, context)
1298 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1299 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1300 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1301 /* Float Registers access */
1302 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1303 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1304 /* Exception Registers access */
1305 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1306 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1307 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1308 #endif /* __APPLE__ */
1309
1310 int cpu_signal_handler(int host_signum, void *pinfo,
1311 void *puc)
1312 {
1313 siginfo_t *info = pinfo;
1314 struct ucontext *uc = puc;
1315 unsigned long pc;
1316 int is_write;
1317
1318 pc = IAR_sig(uc);
1319 is_write = 0;
1320 #if 0
1321 /* ppc 4xx case */
1322 if (DSISR_sig(uc) & 0x00800000)
1323 is_write = 1;
1324 #else
1325 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1326 is_write = 1;
1327 #endif
1328 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1329 is_write, &uc->uc_sigmask, puc);
1330 }
1331
1332 #elif defined(__alpha__)
1333
1334 int cpu_signal_handler(int host_signum, void *pinfo,
1335 void *puc)
1336 {
1337 siginfo_t *info = pinfo;
1338 struct ucontext *uc = puc;
1339 uint32_t *pc = uc->uc_mcontext.sc_pc;
1340 uint32_t insn = *pc;
1341 int is_write = 0;
1342
1343 /* XXX: need kernel patch to get write flag faster */
1344 switch (insn >> 26) {
1345 case 0x0d: // stw
1346 case 0x0e: // stb
1347 case 0x0f: // stq_u
1348 case 0x24: // stf
1349 case 0x25: // stg
1350 case 0x26: // sts
1351 case 0x27: // stt
1352 case 0x2c: // stl
1353 case 0x2d: // stq
1354 case 0x2e: // stl_c
1355 case 0x2f: // stq_c
1356 is_write = 1;
1357 }
1358
1359 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1360 is_write, &uc->uc_sigmask, puc);
1361 }
1362 #elif defined(__sparc__)
1363
1364 int cpu_signal_handler(int host_signum, void *pinfo,
1365 void *puc)
1366 {
1367 siginfo_t *info = pinfo;
1368 uint32_t *regs = (uint32_t *)(info + 1);
1369 void *sigmask = (regs + 20);
1370 unsigned long pc;
1371 int is_write;
1372 uint32_t insn;
1373
1374 /* XXX: is there a standard glibc define ? */
1375 pc = regs[1];
1376 /* XXX: need kernel patch to get write flag faster */
1377 is_write = 0;
1378 insn = *(uint32_t *)pc;
1379 if ((insn >> 30) == 3) {
1380 switch((insn >> 19) & 0x3f) {
1381 case 0x05: // stb
1382 case 0x06: // sth
1383 case 0x04: // st
1384 case 0x07: // std
1385 case 0x24: // stf
1386 case 0x27: // stdf
1387 case 0x25: // stfsr
1388 is_write = 1;
1389 break;
1390 }
1391 }
1392 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1393 is_write, sigmask, NULL);
1394 }
1395
1396 #elif defined(__arm__)
1397
1398 int cpu_signal_handler(int host_signum, void *pinfo,
1399 void *puc)
1400 {
1401 siginfo_t *info = pinfo;
1402 struct ucontext *uc = puc;
1403 unsigned long pc;
1404 int is_write;
1405
1406 pc = uc->uc_mcontext.gregs[R15];
1407 /* XXX: compute is_write */
1408 is_write = 0;
1409 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1410 is_write,
1411 &uc->uc_sigmask, puc);
1412 }
1413
1414 #elif defined(__mc68000)
1415
1416 int cpu_signal_handler(int host_signum, void *pinfo,
1417 void *puc)
1418 {
1419 siginfo_t *info = pinfo;
1420 struct ucontext *uc = puc;
1421 unsigned long pc;
1422 int is_write;
1423
1424 pc = uc->uc_mcontext.gregs[16];
1425 /* XXX: compute is_write */
1426 is_write = 0;
1427 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1428 is_write,
1429 &uc->uc_sigmask, puc);
1430 }
1431
1432 #elif defined(__ia64)
1433
1434 #ifndef __ISR_VALID
1435 /* This ought to be in <bits/siginfo.h>... */
1436 # define __ISR_VALID 1
1437 #endif
1438
1439 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1440 {
1441 siginfo_t *info = pinfo;
1442 struct ucontext *uc = puc;
1443 unsigned long ip;
1444 int is_write = 0;
1445
1446 ip = uc->uc_mcontext.sc_ip;
1447 switch (host_signum) {
1448 case SIGILL:
1449 case SIGFPE:
1450 case SIGSEGV:
1451 case SIGBUS:
1452 case SIGTRAP:
1453 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1454 /* ISR.W (write-access) is bit 33: */
1455 is_write = (info->si_isr >> 33) & 1;
1456 break;
1457
1458 default:
1459 break;
1460 }
1461 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1462 is_write,
1463 &uc->uc_sigmask, puc);
1464 }
1465
1466 #elif defined(__s390__)
1467
1468 int cpu_signal_handler(int host_signum, void *pinfo,
1469 void *puc)
1470 {
1471 siginfo_t *info = pinfo;
1472 struct ucontext *uc = puc;
1473 unsigned long pc;
1474 int is_write;
1475
1476 pc = uc->uc_mcontext.psw.addr;
1477 /* XXX: compute is_write */
1478 is_write = 0;
1479 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1480 is_write, &uc->uc_sigmask, puc);
1481 }
1482
1483 #elif defined(__mips__)
1484
1485 int cpu_signal_handler(int host_signum, void *pinfo,
1486 void *puc)
1487 {
1488 siginfo_t *info = pinfo;
1489 struct ucontext *uc = puc;
1490 greg_t pc = uc->uc_mcontext.pc;
1491 int is_write;
1492
1493 /* XXX: compute is_write */
1494 is_write = 0;
1495 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1496 is_write, &uc->uc_sigmask, puc);
1497 }
1498
1499 #else
1500
1501 #error host CPU specific signal handler needed
1502
1503 #endif
1504
1505 #endif /* !defined(CONFIG_SOFTMMU) */