]> git.proxmox.com Git - qemu.git/blob - cpu-exec.c
Fix code generation buffer overflow reported by TeLeMan
[qemu.git] / cpu-exec.c
1 /*
2 * i386 emulator main execution loop
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include "config.h"
21 #include "exec.h"
22 #include "disas.h"
23
24 #if !defined(CONFIG_SOFTMMU)
25 #undef EAX
26 #undef ECX
27 #undef EDX
28 #undef EBX
29 #undef ESP
30 #undef EBP
31 #undef ESI
32 #undef EDI
33 #undef EIP
34 #include <signal.h>
35 #include <sys/ucontext.h>
36 #endif
37
38 int tb_invalidated_flag;
39
40 //#define DEBUG_EXEC
41 //#define DEBUG_SIGNAL
42
43 void cpu_loop_exit(void)
44 {
45 /* NOTE: the register at this point must be saved by hand because
46 longjmp restore them */
47 regs_to_env();
48 longjmp(env->jmp_env, 1);
49 }
50
51 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
52 #define reg_T2
53 #endif
54
55 /* exit the current TB from a signal handler. The host registers are
56 restored in a state compatible with the CPU emulator
57 */
58 void cpu_resume_from_signal(CPUState *env1, void *puc)
59 {
60 #if !defined(CONFIG_SOFTMMU)
61 struct ucontext *uc = puc;
62 #endif
63
64 env = env1;
65
66 /* XXX: restore cpu registers saved in host registers */
67
68 #if !defined(CONFIG_SOFTMMU)
69 if (puc) {
70 /* XXX: use siglongjmp ? */
71 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
72 }
73 #endif
74 longjmp(env->jmp_env, 1);
75 }
76
77
78 static TranslationBlock *tb_find_slow(target_ulong pc,
79 target_ulong cs_base,
80 uint64_t flags)
81 {
82 TranslationBlock *tb, **ptb1;
83 int code_gen_size;
84 unsigned int h;
85 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
86 uint8_t *tc_ptr;
87
88 spin_lock(&tb_lock);
89
90 tb_invalidated_flag = 0;
91
92 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
93
94 /* find translated block using physical mappings */
95 phys_pc = get_phys_addr_code(env, pc);
96 phys_page1 = phys_pc & TARGET_PAGE_MASK;
97 phys_page2 = -1;
98 h = tb_phys_hash_func(phys_pc);
99 ptb1 = &tb_phys_hash[h];
100 for(;;) {
101 tb = *ptb1;
102 if (!tb)
103 goto not_found;
104 if (tb->pc == pc &&
105 tb->page_addr[0] == phys_page1 &&
106 tb->cs_base == cs_base &&
107 tb->flags == flags) {
108 /* check next page if needed */
109 if (tb->page_addr[1] != -1) {
110 virt_page2 = (pc & TARGET_PAGE_MASK) +
111 TARGET_PAGE_SIZE;
112 phys_page2 = get_phys_addr_code(env, virt_page2);
113 if (tb->page_addr[1] == phys_page2)
114 goto found;
115 } else {
116 goto found;
117 }
118 }
119 ptb1 = &tb->phys_hash_next;
120 }
121 not_found:
122 /* if no translated code available, then translate it now */
123 tb = tb_alloc(pc);
124 if (!tb) {
125 /* flush must be done */
126 tb_flush(env);
127 /* cannot fail at this point */
128 tb = tb_alloc(pc);
129 /* don't forget to invalidate previous TB info */
130 tb_invalidated_flag = 1;
131 }
132 tc_ptr = code_gen_ptr;
133 tb->tc_ptr = tc_ptr;
134 tb->cs_base = cs_base;
135 tb->flags = flags;
136 cpu_gen_code(env, tb, &code_gen_size);
137 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
138
139 /* check next page if needed */
140 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
141 phys_page2 = -1;
142 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
143 phys_page2 = get_phys_addr_code(env, virt_page2);
144 }
145 tb_link_phys(tb, phys_pc, phys_page2);
146
147 found:
148 /* we add the TB in the virtual pc hash table */
149 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
150 spin_unlock(&tb_lock);
151 return tb;
152 }
153
154 static inline TranslationBlock *tb_find_fast(void)
155 {
156 TranslationBlock *tb;
157 target_ulong cs_base, pc;
158 uint64_t flags;
159
160 /* we record a subset of the CPU state. It will
161 always be the same before a given translated block
162 is executed. */
163 #if defined(TARGET_I386)
164 flags = env->hflags;
165 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
166 flags |= env->intercept;
167 cs_base = env->segs[R_CS].base;
168 pc = cs_base + env->eip;
169 #elif defined(TARGET_ARM)
170 flags = env->thumb | (env->vfp.vec_len << 1)
171 | (env->vfp.vec_stride << 4);
172 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
173 flags |= (1 << 6);
174 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
175 flags |= (1 << 7);
176 flags |= (env->condexec_bits << 8);
177 cs_base = 0;
178 pc = env->regs[15];
179 #elif defined(TARGET_SPARC)
180 #ifdef TARGET_SPARC64
181 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
182 flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
183 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
184 #else
185 // FPU enable . Supervisor
186 flags = (env->psref << 4) | env->psrs;
187 #endif
188 cs_base = env->npc;
189 pc = env->pc;
190 #elif defined(TARGET_PPC)
191 flags = env->hflags;
192 cs_base = 0;
193 pc = env->nip;
194 #elif defined(TARGET_MIPS)
195 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
196 cs_base = 0;
197 pc = env->PC[env->current_tc];
198 #elif defined(TARGET_M68K)
199 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
200 | (env->sr & SR_S) /* Bit 13 */
201 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
202 cs_base = 0;
203 pc = env->pc;
204 #elif defined(TARGET_SH4)
205 flags = env->flags;
206 cs_base = 0;
207 pc = env->pc;
208 #elif defined(TARGET_ALPHA)
209 flags = env->ps;
210 cs_base = 0;
211 pc = env->pc;
212 #elif defined(TARGET_CRIS)
213 flags = 0;
214 cs_base = 0;
215 pc = env->pc;
216 #else
217 #error unsupported CPU
218 #endif
219 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
220 if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
221 tb->flags != flags, 0)) {
222 tb = tb_find_slow(pc, cs_base, flags);
223 /* Note: we do it here to avoid a gcc bug on Mac OS X when
224 doing it in tb_find_slow */
225 if (tb_invalidated_flag) {
226 /* as some TB could have been invalidated because
227 of memory exceptions while generating the code, we
228 must recompute the hash index here */
229 T0 = 0;
230 }
231 }
232 return tb;
233 }
234
235 #if defined(__sparc__) && !defined(HOST_SOLARIS)
236 #define BREAK_CHAIN tmp_T0 = 0
237 #else
238 #define BREAK_CHAIN T0 = 0
239 #endif
240
241 /* main execution loop */
242
243 int cpu_exec(CPUState *env1)
244 {
245 #define DECLARE_HOST_REGS 1
246 #include "hostregs_helper.h"
247 #if defined(TARGET_SPARC)
248 #if defined(reg_REGWPTR)
249 uint32_t *saved_regwptr;
250 #endif
251 #endif
252 #if defined(__sparc__) && !defined(HOST_SOLARIS)
253 int saved_i7;
254 target_ulong tmp_T0;
255 #endif
256 int ret, interrupt_request;
257 void (*gen_func)(void);
258 TranslationBlock *tb;
259 uint8_t *tc_ptr;
260
261 if (cpu_halted(env1) == EXCP_HALTED)
262 return EXCP_HALTED;
263
264 cpu_single_env = env1;
265
266 /* first we save global registers */
267 #define SAVE_HOST_REGS 1
268 #include "hostregs_helper.h"
269 env = env1;
270 #if defined(__sparc__) && !defined(HOST_SOLARIS)
271 /* we also save i7 because longjmp may not restore it */
272 asm volatile ("mov %%i7, %0" : "=r" (saved_i7));
273 #endif
274
275 env_to_regs();
276 #if defined(TARGET_I386)
277 /* put eflags in CPU temporary format */
278 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
279 DF = 1 - (2 * ((env->eflags >> 10) & 1));
280 CC_OP = CC_OP_EFLAGS;
281 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
282 #elif defined(TARGET_SPARC)
283 #if defined(reg_REGWPTR)
284 saved_regwptr = REGWPTR;
285 #endif
286 #elif defined(TARGET_M68K)
287 env->cc_op = CC_OP_FLAGS;
288 env->cc_dest = env->sr & 0xf;
289 env->cc_x = (env->sr >> 4) & 1;
290 #elif defined(TARGET_ALPHA)
291 #elif defined(TARGET_ARM)
292 #elif defined(TARGET_PPC)
293 #elif defined(TARGET_MIPS)
294 #elif defined(TARGET_SH4)
295 #elif defined(TARGET_CRIS)
296 /* XXXXX */
297 #else
298 #error unsupported target CPU
299 #endif
300 env->exception_index = -1;
301
302 /* prepare setjmp context for exception handling */
303 for(;;) {
304 if (setjmp(env->jmp_env) == 0) {
305 env->current_tb = NULL;
306 /* if an exception is pending, we execute it here */
307 if (env->exception_index >= 0) {
308 if (env->exception_index >= EXCP_INTERRUPT) {
309 /* exit request from the cpu execution loop */
310 ret = env->exception_index;
311 break;
312 } else if (env->user_mode_only) {
313 /* if user mode only, we simulate a fake exception
314 which will be handled outside the cpu execution
315 loop */
316 #if defined(TARGET_I386)
317 do_interrupt_user(env->exception_index,
318 env->exception_is_int,
319 env->error_code,
320 env->exception_next_eip);
321 #endif
322 ret = env->exception_index;
323 break;
324 } else {
325 #if defined(TARGET_I386)
326 /* simulate a real cpu exception. On i386, it can
327 trigger new exceptions, but we do not handle
328 double or triple faults yet. */
329 do_interrupt(env->exception_index,
330 env->exception_is_int,
331 env->error_code,
332 env->exception_next_eip, 0);
333 /* successfully delivered */
334 env->old_exception = -1;
335 #elif defined(TARGET_PPC)
336 do_interrupt(env);
337 #elif defined(TARGET_MIPS)
338 do_interrupt(env);
339 #elif defined(TARGET_SPARC)
340 do_interrupt(env->exception_index);
341 #elif defined(TARGET_ARM)
342 do_interrupt(env);
343 #elif defined(TARGET_SH4)
344 do_interrupt(env);
345 #elif defined(TARGET_ALPHA)
346 do_interrupt(env);
347 #elif defined(TARGET_CRIS)
348 do_interrupt(env);
349 #elif defined(TARGET_M68K)
350 do_interrupt(0);
351 #endif
352 }
353 env->exception_index = -1;
354 }
355 #ifdef USE_KQEMU
356 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
357 int ret;
358 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
359 ret = kqemu_cpu_exec(env);
360 /* put eflags in CPU temporary format */
361 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
362 DF = 1 - (2 * ((env->eflags >> 10) & 1));
363 CC_OP = CC_OP_EFLAGS;
364 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
365 if (ret == 1) {
366 /* exception */
367 longjmp(env->jmp_env, 1);
368 } else if (ret == 2) {
369 /* softmmu execution needed */
370 } else {
371 if (env->interrupt_request != 0) {
372 /* hardware interrupt will be executed just after */
373 } else {
374 /* otherwise, we restart */
375 longjmp(env->jmp_env, 1);
376 }
377 }
378 }
379 #endif
380
381 T0 = 0; /* force lookup of first TB */
382 for(;;) {
383 #if defined(__sparc__) && !defined(HOST_SOLARIS)
384 /* g1 can be modified by some libc? functions */
385 tmp_T0 = T0;
386 #endif
387 interrupt_request = env->interrupt_request;
388 if (__builtin_expect(interrupt_request, 0)
389 #if defined(TARGET_I386)
390 && env->hflags & HF_GIF_MASK
391 #endif
392 ) {
393 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
394 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
395 env->exception_index = EXCP_DEBUG;
396 cpu_loop_exit();
397 }
398 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
399 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
400 if (interrupt_request & CPU_INTERRUPT_HALT) {
401 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
402 env->halted = 1;
403 env->exception_index = EXCP_HLT;
404 cpu_loop_exit();
405 }
406 #endif
407 #if defined(TARGET_I386)
408 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
409 !(env->hflags & HF_SMM_MASK)) {
410 svm_check_intercept(SVM_EXIT_SMI);
411 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
412 do_smm_enter();
413 BREAK_CHAIN;
414 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
415 (env->eflags & IF_MASK || env->hflags & HF_HIF_MASK) &&
416 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
417 int intno;
418 svm_check_intercept(SVM_EXIT_INTR);
419 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
420 intno = cpu_get_pic_interrupt(env);
421 if (loglevel & CPU_LOG_TB_IN_ASM) {
422 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
423 }
424 do_interrupt(intno, 0, 0, 0, 1);
425 /* ensure that no TB jump will be modified as
426 the program flow was changed */
427 BREAK_CHAIN;
428 #if !defined(CONFIG_USER_ONLY)
429 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
430 (env->eflags & IF_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
431 int intno;
432 /* FIXME: this should respect TPR */
433 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
434 svm_check_intercept(SVM_EXIT_VINTR);
435 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
436 if (loglevel & CPU_LOG_TB_IN_ASM)
437 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
438 do_interrupt(intno, 0, 0, -1, 1);
439 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl),
440 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)) & ~V_IRQ_MASK);
441 BREAK_CHAIN;
442 #endif
443 }
444 #elif defined(TARGET_PPC)
445 #if 0
446 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
447 cpu_ppc_reset(env);
448 }
449 #endif
450 if (interrupt_request & CPU_INTERRUPT_HARD) {
451 ppc_hw_interrupt(env);
452 if (env->pending_interrupts == 0)
453 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
454 BREAK_CHAIN;
455 }
456 #elif defined(TARGET_MIPS)
457 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
458 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
459 (env->CP0_Status & (1 << CP0St_IE)) &&
460 !(env->CP0_Status & (1 << CP0St_EXL)) &&
461 !(env->CP0_Status & (1 << CP0St_ERL)) &&
462 !(env->hflags & MIPS_HFLAG_DM)) {
463 /* Raise it */
464 env->exception_index = EXCP_EXT_INTERRUPT;
465 env->error_code = 0;
466 do_interrupt(env);
467 BREAK_CHAIN;
468 }
469 #elif defined(TARGET_SPARC)
470 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
471 (env->psret != 0)) {
472 int pil = env->interrupt_index & 15;
473 int type = env->interrupt_index & 0xf0;
474
475 if (((type == TT_EXTINT) &&
476 (pil == 15 || pil > env->psrpil)) ||
477 type != TT_EXTINT) {
478 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
479 do_interrupt(env->interrupt_index);
480 env->interrupt_index = 0;
481 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
482 cpu_check_irqs(env);
483 #endif
484 BREAK_CHAIN;
485 }
486 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
487 //do_interrupt(0, 0, 0, 0, 0);
488 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
489 }
490 #elif defined(TARGET_ARM)
491 if (interrupt_request & CPU_INTERRUPT_FIQ
492 && !(env->uncached_cpsr & CPSR_F)) {
493 env->exception_index = EXCP_FIQ;
494 do_interrupt(env);
495 BREAK_CHAIN;
496 }
497 /* ARMv7-M interrupt return works by loading a magic value
498 into the PC. On real hardware the load causes the
499 return to occur. The qemu implementation performs the
500 jump normally, then does the exception return when the
501 CPU tries to execute code at the magic address.
502 This will cause the magic PC value to be pushed to
503 the stack if an interrupt occured at the wrong time.
504 We avoid this by disabling interrupts when
505 pc contains a magic address. */
506 if (interrupt_request & CPU_INTERRUPT_HARD
507 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
508 || !(env->uncached_cpsr & CPSR_I))) {
509 env->exception_index = EXCP_IRQ;
510 do_interrupt(env);
511 BREAK_CHAIN;
512 }
513 #elif defined(TARGET_SH4)
514 if (interrupt_request & CPU_INTERRUPT_HARD) {
515 do_interrupt(env);
516 BREAK_CHAIN;
517 }
518 #elif defined(TARGET_ALPHA)
519 if (interrupt_request & CPU_INTERRUPT_HARD) {
520 do_interrupt(env);
521 BREAK_CHAIN;
522 }
523 #elif defined(TARGET_CRIS)
524 if (interrupt_request & CPU_INTERRUPT_HARD) {
525 do_interrupt(env);
526 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
527 BREAK_CHAIN;
528 }
529 #elif defined(TARGET_M68K)
530 if (interrupt_request & CPU_INTERRUPT_HARD
531 && ((env->sr & SR_I) >> SR_I_SHIFT)
532 < env->pending_level) {
533 /* Real hardware gets the interrupt vector via an
534 IACK cycle at this point. Current emulated
535 hardware doesn't rely on this, so we
536 provide/save the vector when the interrupt is
537 first signalled. */
538 env->exception_index = env->pending_vector;
539 do_interrupt(1);
540 BREAK_CHAIN;
541 }
542 #endif
543 /* Don't use the cached interupt_request value,
544 do_interrupt may have updated the EXITTB flag. */
545 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
546 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
547 /* ensure that no TB jump will be modified as
548 the program flow was changed */
549 BREAK_CHAIN;
550 }
551 if (interrupt_request & CPU_INTERRUPT_EXIT) {
552 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
553 env->exception_index = EXCP_INTERRUPT;
554 cpu_loop_exit();
555 }
556 }
557 #ifdef DEBUG_EXEC
558 if ((loglevel & CPU_LOG_TB_CPU)) {
559 /* restore flags in standard format */
560 regs_to_env();
561 #if defined(TARGET_I386)
562 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
563 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
564 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
565 #elif defined(TARGET_ARM)
566 cpu_dump_state(env, logfile, fprintf, 0);
567 #elif defined(TARGET_SPARC)
568 REGWPTR = env->regbase + (env->cwp * 16);
569 env->regwptr = REGWPTR;
570 cpu_dump_state(env, logfile, fprintf, 0);
571 #elif defined(TARGET_PPC)
572 cpu_dump_state(env, logfile, fprintf, 0);
573 #elif defined(TARGET_M68K)
574 cpu_m68k_flush_flags(env, env->cc_op);
575 env->cc_op = CC_OP_FLAGS;
576 env->sr = (env->sr & 0xffe0)
577 | env->cc_dest | (env->cc_x << 4);
578 cpu_dump_state(env, logfile, fprintf, 0);
579 #elif defined(TARGET_MIPS)
580 cpu_dump_state(env, logfile, fprintf, 0);
581 #elif defined(TARGET_SH4)
582 cpu_dump_state(env, logfile, fprintf, 0);
583 #elif defined(TARGET_ALPHA)
584 cpu_dump_state(env, logfile, fprintf, 0);
585 #elif defined(TARGET_CRIS)
586 cpu_dump_state(env, logfile, fprintf, 0);
587 #else
588 #error unsupported target CPU
589 #endif
590 }
591 #endif
592 tb = tb_find_fast();
593 #ifdef DEBUG_EXEC
594 if ((loglevel & CPU_LOG_EXEC)) {
595 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
596 (long)tb->tc_ptr, tb->pc,
597 lookup_symbol(tb->pc));
598 }
599 #endif
600 #if defined(__sparc__) && !defined(HOST_SOLARIS)
601 T0 = tmp_T0;
602 #endif
603 /* see if we can patch the calling TB. When the TB
604 spans two pages, we cannot safely do a direct
605 jump. */
606 {
607 if (T0 != 0 &&
608 #if USE_KQEMU
609 (env->kqemu_enabled != 2) &&
610 #endif
611 tb->page_addr[1] == -1) {
612 spin_lock(&tb_lock);
613 tb_add_jump((TranslationBlock *)(long)(T0 & ~3), T0 & 3, tb);
614 spin_unlock(&tb_lock);
615 }
616 }
617 tc_ptr = tb->tc_ptr;
618 env->current_tb = tb;
619 /* execute the generated code */
620 gen_func = (void *)tc_ptr;
621 #if defined(__sparc__)
622 __asm__ __volatile__("call %0\n\t"
623 "mov %%o7,%%i0"
624 : /* no outputs */
625 : "r" (gen_func)
626 : "i0", "i1", "i2", "i3", "i4", "i5",
627 "o0", "o1", "o2", "o3", "o4", "o5",
628 "l0", "l1", "l2", "l3", "l4", "l5",
629 "l6", "l7");
630 #elif defined(__arm__)
631 asm volatile ("mov pc, %0\n\t"
632 ".global exec_loop\n\t"
633 "exec_loop:\n\t"
634 : /* no outputs */
635 : "r" (gen_func)
636 : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
637 #elif defined(__ia64)
638 struct fptr {
639 void *ip;
640 void *gp;
641 } fp;
642
643 fp.ip = tc_ptr;
644 fp.gp = code_gen_buffer + 2 * (1 << 20);
645 (*(void (*)(void)) &fp)();
646 #else
647 gen_func();
648 #endif
649 env->current_tb = NULL;
650 /* reset soft MMU for next block (it can currently
651 only be set by a memory fault) */
652 #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
653 if (env->hflags & HF_SOFTMMU_MASK) {
654 env->hflags &= ~HF_SOFTMMU_MASK;
655 /* do not allow linking to another block */
656 T0 = 0;
657 }
658 #endif
659 #if defined(USE_KQEMU)
660 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
661 if (kqemu_is_ok(env) &&
662 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
663 cpu_loop_exit();
664 }
665 #endif
666 } /* for(;;) */
667 } else {
668 env_to_regs();
669 }
670 } /* for(;;) */
671
672
673 #if defined(TARGET_I386)
674 /* restore flags in standard format */
675 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
676 #elif defined(TARGET_ARM)
677 /* XXX: Save/restore host fpu exception state?. */
678 #elif defined(TARGET_SPARC)
679 #if defined(reg_REGWPTR)
680 REGWPTR = saved_regwptr;
681 #endif
682 #elif defined(TARGET_PPC)
683 #elif defined(TARGET_M68K)
684 cpu_m68k_flush_flags(env, env->cc_op);
685 env->cc_op = CC_OP_FLAGS;
686 env->sr = (env->sr & 0xffe0)
687 | env->cc_dest | (env->cc_x << 4);
688 #elif defined(TARGET_MIPS)
689 #elif defined(TARGET_SH4)
690 #elif defined(TARGET_ALPHA)
691 #elif defined(TARGET_CRIS)
692 /* XXXXX */
693 #else
694 #error unsupported target CPU
695 #endif
696
697 /* restore global registers */
698 #if defined(__sparc__) && !defined(HOST_SOLARIS)
699 asm volatile ("mov %0, %%i7" : : "r" (saved_i7));
700 #endif
701 #include "hostregs_helper.h"
702
703 /* fail safe : never use cpu_single_env outside cpu_exec() */
704 cpu_single_env = NULL;
705 return ret;
706 }
707
708 /* must only be called from the generated code as an exception can be
709 generated */
710 void tb_invalidate_page_range(target_ulong start, target_ulong end)
711 {
712 /* XXX: cannot enable it yet because it yields to MMU exception
713 where NIP != read address on PowerPC */
714 #if 0
715 target_ulong phys_addr;
716 phys_addr = get_phys_addr_code(env, start);
717 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
718 #endif
719 }
720
721 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
722
723 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
724 {
725 CPUX86State *saved_env;
726
727 saved_env = env;
728 env = s;
729 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
730 selector &= 0xffff;
731 cpu_x86_load_seg_cache(env, seg_reg, selector,
732 (selector << 4), 0xffff, 0);
733 } else {
734 load_seg(seg_reg, selector);
735 }
736 env = saved_env;
737 }
738
739 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
740 {
741 CPUX86State *saved_env;
742
743 saved_env = env;
744 env = s;
745
746 helper_fsave(ptr, data32);
747
748 env = saved_env;
749 }
750
751 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
752 {
753 CPUX86State *saved_env;
754
755 saved_env = env;
756 env = s;
757
758 helper_frstor(ptr, data32);
759
760 env = saved_env;
761 }
762
763 #endif /* TARGET_I386 */
764
765 #if !defined(CONFIG_SOFTMMU)
766
767 #if defined(TARGET_I386)
768
769 /* 'pc' is the host PC at which the exception was raised. 'address' is
770 the effective address of the memory exception. 'is_write' is 1 if a
771 write caused the exception and otherwise 0'. 'old_set' is the
772 signal set which should be restored */
773 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
774 int is_write, sigset_t *old_set,
775 void *puc)
776 {
777 TranslationBlock *tb;
778 int ret;
779
780 if (cpu_single_env)
781 env = cpu_single_env; /* XXX: find a correct solution for multithread */
782 #if defined(DEBUG_SIGNAL)
783 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
784 pc, address, is_write, *(unsigned long *)old_set);
785 #endif
786 /* XXX: locking issue */
787 if (is_write && page_unprotect(h2g(address), pc, puc)) {
788 return 1;
789 }
790
791 /* see if it is an MMU fault */
792 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
793 if (ret < 0)
794 return 0; /* not an MMU fault */
795 if (ret == 0)
796 return 1; /* the MMU fault was handled without causing real CPU fault */
797 /* now we have a real cpu fault */
798 tb = tb_find_pc(pc);
799 if (tb) {
800 /* the PC is inside the translated code. It means that we have
801 a virtual CPU fault */
802 cpu_restore_state(tb, env, pc, puc);
803 }
804 if (ret == 1) {
805 #if 0
806 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
807 env->eip, env->cr[2], env->error_code);
808 #endif
809 /* we restore the process signal mask as the sigreturn should
810 do it (XXX: use sigsetjmp) */
811 sigprocmask(SIG_SETMASK, old_set, NULL);
812 raise_exception_err(env->exception_index, env->error_code);
813 } else {
814 /* activate soft MMU for this block */
815 env->hflags |= HF_SOFTMMU_MASK;
816 cpu_resume_from_signal(env, puc);
817 }
818 /* never comes here */
819 return 1;
820 }
821
822 #elif defined(TARGET_ARM)
823 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
824 int is_write, sigset_t *old_set,
825 void *puc)
826 {
827 TranslationBlock *tb;
828 int ret;
829
830 if (cpu_single_env)
831 env = cpu_single_env; /* XXX: find a correct solution for multithread */
832 #if defined(DEBUG_SIGNAL)
833 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
834 pc, address, is_write, *(unsigned long *)old_set);
835 #endif
836 /* XXX: locking issue */
837 if (is_write && page_unprotect(h2g(address), pc, puc)) {
838 return 1;
839 }
840 /* see if it is an MMU fault */
841 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
842 if (ret < 0)
843 return 0; /* not an MMU fault */
844 if (ret == 0)
845 return 1; /* the MMU fault was handled without causing real CPU fault */
846 /* now we have a real cpu fault */
847 tb = tb_find_pc(pc);
848 if (tb) {
849 /* the PC is inside the translated code. It means that we have
850 a virtual CPU fault */
851 cpu_restore_state(tb, env, pc, puc);
852 }
853 /* we restore the process signal mask as the sigreturn should
854 do it (XXX: use sigsetjmp) */
855 sigprocmask(SIG_SETMASK, old_set, NULL);
856 cpu_loop_exit();
857 }
858 #elif defined(TARGET_SPARC)
859 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
860 int is_write, sigset_t *old_set,
861 void *puc)
862 {
863 TranslationBlock *tb;
864 int ret;
865
866 if (cpu_single_env)
867 env = cpu_single_env; /* XXX: find a correct solution for multithread */
868 #if defined(DEBUG_SIGNAL)
869 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
870 pc, address, is_write, *(unsigned long *)old_set);
871 #endif
872 /* XXX: locking issue */
873 if (is_write && page_unprotect(h2g(address), pc, puc)) {
874 return 1;
875 }
876 /* see if it is an MMU fault */
877 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
878 if (ret < 0)
879 return 0; /* not an MMU fault */
880 if (ret == 0)
881 return 1; /* the MMU fault was handled without causing real CPU fault */
882 /* now we have a real cpu fault */
883 tb = tb_find_pc(pc);
884 if (tb) {
885 /* the PC is inside the translated code. It means that we have
886 a virtual CPU fault */
887 cpu_restore_state(tb, env, pc, puc);
888 }
889 /* we restore the process signal mask as the sigreturn should
890 do it (XXX: use sigsetjmp) */
891 sigprocmask(SIG_SETMASK, old_set, NULL);
892 cpu_loop_exit();
893 }
894 #elif defined (TARGET_PPC)
895 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
896 int is_write, sigset_t *old_set,
897 void *puc)
898 {
899 TranslationBlock *tb;
900 int ret;
901
902 if (cpu_single_env)
903 env = cpu_single_env; /* XXX: find a correct solution for multithread */
904 #if defined(DEBUG_SIGNAL)
905 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
906 pc, address, is_write, *(unsigned long *)old_set);
907 #endif
908 /* XXX: locking issue */
909 if (is_write && page_unprotect(h2g(address), pc, puc)) {
910 return 1;
911 }
912
913 /* see if it is an MMU fault */
914 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
915 if (ret < 0)
916 return 0; /* not an MMU fault */
917 if (ret == 0)
918 return 1; /* the MMU fault was handled without causing real CPU fault */
919
920 /* now we have a real cpu fault */
921 tb = tb_find_pc(pc);
922 if (tb) {
923 /* the PC is inside the translated code. It means that we have
924 a virtual CPU fault */
925 cpu_restore_state(tb, env, pc, puc);
926 }
927 if (ret == 1) {
928 #if 0
929 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
930 env->nip, env->error_code, tb);
931 #endif
932 /* we restore the process signal mask as the sigreturn should
933 do it (XXX: use sigsetjmp) */
934 sigprocmask(SIG_SETMASK, old_set, NULL);
935 do_raise_exception_err(env->exception_index, env->error_code);
936 } else {
937 /* activate soft MMU for this block */
938 cpu_resume_from_signal(env, puc);
939 }
940 /* never comes here */
941 return 1;
942 }
943
944 #elif defined(TARGET_M68K)
945 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
946 int is_write, sigset_t *old_set,
947 void *puc)
948 {
949 TranslationBlock *tb;
950 int ret;
951
952 if (cpu_single_env)
953 env = cpu_single_env; /* XXX: find a correct solution for multithread */
954 #if defined(DEBUG_SIGNAL)
955 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
956 pc, address, is_write, *(unsigned long *)old_set);
957 #endif
958 /* XXX: locking issue */
959 if (is_write && page_unprotect(address, pc, puc)) {
960 return 1;
961 }
962 /* see if it is an MMU fault */
963 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
964 if (ret < 0)
965 return 0; /* not an MMU fault */
966 if (ret == 0)
967 return 1; /* the MMU fault was handled without causing real CPU fault */
968 /* now we have a real cpu fault */
969 tb = tb_find_pc(pc);
970 if (tb) {
971 /* the PC is inside the translated code. It means that we have
972 a virtual CPU fault */
973 cpu_restore_state(tb, env, pc, puc);
974 }
975 /* we restore the process signal mask as the sigreturn should
976 do it (XXX: use sigsetjmp) */
977 sigprocmask(SIG_SETMASK, old_set, NULL);
978 cpu_loop_exit();
979 /* never comes here */
980 return 1;
981 }
982
983 #elif defined (TARGET_MIPS)
984 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
985 int is_write, sigset_t *old_set,
986 void *puc)
987 {
988 TranslationBlock *tb;
989 int ret;
990
991 if (cpu_single_env)
992 env = cpu_single_env; /* XXX: find a correct solution for multithread */
993 #if defined(DEBUG_SIGNAL)
994 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
995 pc, address, is_write, *(unsigned long *)old_set);
996 #endif
997 /* XXX: locking issue */
998 if (is_write && page_unprotect(h2g(address), pc, puc)) {
999 return 1;
1000 }
1001
1002 /* see if it is an MMU fault */
1003 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1004 if (ret < 0)
1005 return 0; /* not an MMU fault */
1006 if (ret == 0)
1007 return 1; /* the MMU fault was handled without causing real CPU fault */
1008
1009 /* now we have a real cpu fault */
1010 tb = tb_find_pc(pc);
1011 if (tb) {
1012 /* the PC is inside the translated code. It means that we have
1013 a virtual CPU fault */
1014 cpu_restore_state(tb, env, pc, puc);
1015 }
1016 if (ret == 1) {
1017 #if 0
1018 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1019 env->PC, env->error_code, tb);
1020 #endif
1021 /* we restore the process signal mask as the sigreturn should
1022 do it (XXX: use sigsetjmp) */
1023 sigprocmask(SIG_SETMASK, old_set, NULL);
1024 do_raise_exception_err(env->exception_index, env->error_code);
1025 } else {
1026 /* activate soft MMU for this block */
1027 cpu_resume_from_signal(env, puc);
1028 }
1029 /* never comes here */
1030 return 1;
1031 }
1032
1033 #elif defined (TARGET_SH4)
1034 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1035 int is_write, sigset_t *old_set,
1036 void *puc)
1037 {
1038 TranslationBlock *tb;
1039 int ret;
1040
1041 if (cpu_single_env)
1042 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1043 #if defined(DEBUG_SIGNAL)
1044 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1045 pc, address, is_write, *(unsigned long *)old_set);
1046 #endif
1047 /* XXX: locking issue */
1048 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1049 return 1;
1050 }
1051
1052 /* see if it is an MMU fault */
1053 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1054 if (ret < 0)
1055 return 0; /* not an MMU fault */
1056 if (ret == 0)
1057 return 1; /* the MMU fault was handled without causing real CPU fault */
1058
1059 /* now we have a real cpu fault */
1060 tb = tb_find_pc(pc);
1061 if (tb) {
1062 /* the PC is inside the translated code. It means that we have
1063 a virtual CPU fault */
1064 cpu_restore_state(tb, env, pc, puc);
1065 }
1066 #if 0
1067 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1068 env->nip, env->error_code, tb);
1069 #endif
1070 /* we restore the process signal mask as the sigreturn should
1071 do it (XXX: use sigsetjmp) */
1072 sigprocmask(SIG_SETMASK, old_set, NULL);
1073 cpu_loop_exit();
1074 /* never comes here */
1075 return 1;
1076 }
1077
1078 #elif defined (TARGET_ALPHA)
1079 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1080 int is_write, sigset_t *old_set,
1081 void *puc)
1082 {
1083 TranslationBlock *tb;
1084 int ret;
1085
1086 if (cpu_single_env)
1087 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1088 #if defined(DEBUG_SIGNAL)
1089 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1090 pc, address, is_write, *(unsigned long *)old_set);
1091 #endif
1092 /* XXX: locking issue */
1093 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1094 return 1;
1095 }
1096
1097 /* see if it is an MMU fault */
1098 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1099 if (ret < 0)
1100 return 0; /* not an MMU fault */
1101 if (ret == 0)
1102 return 1; /* the MMU fault was handled without causing real CPU fault */
1103
1104 /* now we have a real cpu fault */
1105 tb = tb_find_pc(pc);
1106 if (tb) {
1107 /* the PC is inside the translated code. It means that we have
1108 a virtual CPU fault */
1109 cpu_restore_state(tb, env, pc, puc);
1110 }
1111 #if 0
1112 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1113 env->nip, env->error_code, tb);
1114 #endif
1115 /* we restore the process signal mask as the sigreturn should
1116 do it (XXX: use sigsetjmp) */
1117 sigprocmask(SIG_SETMASK, old_set, NULL);
1118 cpu_loop_exit();
1119 /* never comes here */
1120 return 1;
1121 }
1122 #elif defined (TARGET_CRIS)
1123 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1124 int is_write, sigset_t *old_set,
1125 void *puc)
1126 {
1127 TranslationBlock *tb;
1128 int ret;
1129
1130 if (cpu_single_env)
1131 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1132 #if defined(DEBUG_SIGNAL)
1133 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1134 pc, address, is_write, *(unsigned long *)old_set);
1135 #endif
1136 /* XXX: locking issue */
1137 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1138 return 1;
1139 }
1140
1141 /* see if it is an MMU fault */
1142 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1143 if (ret < 0)
1144 return 0; /* not an MMU fault */
1145 if (ret == 0)
1146 return 1; /* the MMU fault was handled without causing real CPU fault */
1147
1148 /* now we have a real cpu fault */
1149 tb = tb_find_pc(pc);
1150 if (tb) {
1151 /* the PC is inside the translated code. It means that we have
1152 a virtual CPU fault */
1153 cpu_restore_state(tb, env, pc, puc);
1154 }
1155 #if 0
1156 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1157 env->nip, env->error_code, tb);
1158 #endif
1159 /* we restore the process signal mask as the sigreturn should
1160 do it (XXX: use sigsetjmp) */
1161 sigprocmask(SIG_SETMASK, old_set, NULL);
1162 cpu_loop_exit();
1163 /* never comes here */
1164 return 1;
1165 }
1166
1167 #else
1168 #error unsupported target CPU
1169 #endif
1170
1171 #if defined(__i386__)
1172
1173 #if defined(__APPLE__)
1174 # include <sys/ucontext.h>
1175
1176 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1177 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1178 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1179 #else
1180 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1181 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1182 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1183 #endif
1184
1185 int cpu_signal_handler(int host_signum, void *pinfo,
1186 void *puc)
1187 {
1188 siginfo_t *info = pinfo;
1189 struct ucontext *uc = puc;
1190 unsigned long pc;
1191 int trapno;
1192
1193 #ifndef REG_EIP
1194 /* for glibc 2.1 */
1195 #define REG_EIP EIP
1196 #define REG_ERR ERR
1197 #define REG_TRAPNO TRAPNO
1198 #endif
1199 pc = EIP_sig(uc);
1200 trapno = TRAP_sig(uc);
1201 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1202 trapno == 0xe ?
1203 (ERROR_sig(uc) >> 1) & 1 : 0,
1204 &uc->uc_sigmask, puc);
1205 }
1206
1207 #elif defined(__x86_64__)
1208
1209 int cpu_signal_handler(int host_signum, void *pinfo,
1210 void *puc)
1211 {
1212 siginfo_t *info = pinfo;
1213 struct ucontext *uc = puc;
1214 unsigned long pc;
1215
1216 pc = uc->uc_mcontext.gregs[REG_RIP];
1217 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1218 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1219 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1220 &uc->uc_sigmask, puc);
1221 }
1222
1223 #elif defined(__powerpc__)
1224
1225 /***********************************************************************
1226 * signal context platform-specific definitions
1227 * From Wine
1228 */
1229 #ifdef linux
1230 /* All Registers access - only for local access */
1231 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1232 /* Gpr Registers access */
1233 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1234 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1235 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1236 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1237 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1238 # define LR_sig(context) REG_sig(link, context) /* Link register */
1239 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1240 /* Float Registers access */
1241 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1242 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1243 /* Exception Registers access */
1244 # define DAR_sig(context) REG_sig(dar, context)
1245 # define DSISR_sig(context) REG_sig(dsisr, context)
1246 # define TRAP_sig(context) REG_sig(trap, context)
1247 #endif /* linux */
1248
1249 #ifdef __APPLE__
1250 # include <sys/ucontext.h>
1251 typedef struct ucontext SIGCONTEXT;
1252 /* All Registers access - only for local access */
1253 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1254 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1255 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1256 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1257 /* Gpr Registers access */
1258 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1259 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1260 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1261 # define CTR_sig(context) REG_sig(ctr, context)
1262 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1263 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1264 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1265 /* Float Registers access */
1266 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1267 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1268 /* Exception Registers access */
1269 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1270 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1271 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1272 #endif /* __APPLE__ */
1273
1274 int cpu_signal_handler(int host_signum, void *pinfo,
1275 void *puc)
1276 {
1277 siginfo_t *info = pinfo;
1278 struct ucontext *uc = puc;
1279 unsigned long pc;
1280 int is_write;
1281
1282 pc = IAR_sig(uc);
1283 is_write = 0;
1284 #if 0
1285 /* ppc 4xx case */
1286 if (DSISR_sig(uc) & 0x00800000)
1287 is_write = 1;
1288 #else
1289 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1290 is_write = 1;
1291 #endif
1292 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1293 is_write, &uc->uc_sigmask, puc);
1294 }
1295
1296 #elif defined(__alpha__)
1297
1298 int cpu_signal_handler(int host_signum, void *pinfo,
1299 void *puc)
1300 {
1301 siginfo_t *info = pinfo;
1302 struct ucontext *uc = puc;
1303 uint32_t *pc = uc->uc_mcontext.sc_pc;
1304 uint32_t insn = *pc;
1305 int is_write = 0;
1306
1307 /* XXX: need kernel patch to get write flag faster */
1308 switch (insn >> 26) {
1309 case 0x0d: // stw
1310 case 0x0e: // stb
1311 case 0x0f: // stq_u
1312 case 0x24: // stf
1313 case 0x25: // stg
1314 case 0x26: // sts
1315 case 0x27: // stt
1316 case 0x2c: // stl
1317 case 0x2d: // stq
1318 case 0x2e: // stl_c
1319 case 0x2f: // stq_c
1320 is_write = 1;
1321 }
1322
1323 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1324 is_write, &uc->uc_sigmask, puc);
1325 }
1326 #elif defined(__sparc__)
1327
1328 int cpu_signal_handler(int host_signum, void *pinfo,
1329 void *puc)
1330 {
1331 siginfo_t *info = pinfo;
1332 uint32_t *regs = (uint32_t *)(info + 1);
1333 void *sigmask = (regs + 20);
1334 unsigned long pc;
1335 int is_write;
1336 uint32_t insn;
1337
1338 /* XXX: is there a standard glibc define ? */
1339 pc = regs[1];
1340 /* XXX: need kernel patch to get write flag faster */
1341 is_write = 0;
1342 insn = *(uint32_t *)pc;
1343 if ((insn >> 30) == 3) {
1344 switch((insn >> 19) & 0x3f) {
1345 case 0x05: // stb
1346 case 0x06: // sth
1347 case 0x04: // st
1348 case 0x07: // std
1349 case 0x24: // stf
1350 case 0x27: // stdf
1351 case 0x25: // stfsr
1352 is_write = 1;
1353 break;
1354 }
1355 }
1356 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1357 is_write, sigmask, NULL);
1358 }
1359
1360 #elif defined(__arm__)
1361
1362 int cpu_signal_handler(int host_signum, void *pinfo,
1363 void *puc)
1364 {
1365 siginfo_t *info = pinfo;
1366 struct ucontext *uc = puc;
1367 unsigned long pc;
1368 int is_write;
1369
1370 pc = uc->uc_mcontext.gregs[R15];
1371 /* XXX: compute is_write */
1372 is_write = 0;
1373 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1374 is_write,
1375 &uc->uc_sigmask, puc);
1376 }
1377
1378 #elif defined(__mc68000)
1379
1380 int cpu_signal_handler(int host_signum, void *pinfo,
1381 void *puc)
1382 {
1383 siginfo_t *info = pinfo;
1384 struct ucontext *uc = puc;
1385 unsigned long pc;
1386 int is_write;
1387
1388 pc = uc->uc_mcontext.gregs[16];
1389 /* XXX: compute is_write */
1390 is_write = 0;
1391 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1392 is_write,
1393 &uc->uc_sigmask, puc);
1394 }
1395
1396 #elif defined(__ia64)
1397
1398 #ifndef __ISR_VALID
1399 /* This ought to be in <bits/siginfo.h>... */
1400 # define __ISR_VALID 1
1401 #endif
1402
1403 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1404 {
1405 siginfo_t *info = pinfo;
1406 struct ucontext *uc = puc;
1407 unsigned long ip;
1408 int is_write = 0;
1409
1410 ip = uc->uc_mcontext.sc_ip;
1411 switch (host_signum) {
1412 case SIGILL:
1413 case SIGFPE:
1414 case SIGSEGV:
1415 case SIGBUS:
1416 case SIGTRAP:
1417 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1418 /* ISR.W (write-access) is bit 33: */
1419 is_write = (info->si_isr >> 33) & 1;
1420 break;
1421
1422 default:
1423 break;
1424 }
1425 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1426 is_write,
1427 &uc->uc_sigmask, puc);
1428 }
1429
1430 #elif defined(__s390__)
1431
1432 int cpu_signal_handler(int host_signum, void *pinfo,
1433 void *puc)
1434 {
1435 siginfo_t *info = pinfo;
1436 struct ucontext *uc = puc;
1437 unsigned long pc;
1438 int is_write;
1439
1440 pc = uc->uc_mcontext.psw.addr;
1441 /* XXX: compute is_write */
1442 is_write = 0;
1443 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1444 is_write, &uc->uc_sigmask, puc);
1445 }
1446
1447 #elif defined(__mips__)
1448
1449 int cpu_signal_handler(int host_signum, void *pinfo,
1450 void *puc)
1451 {
1452 siginfo_t *info = pinfo;
1453 struct ucontext *uc = puc;
1454 greg_t pc = uc->uc_mcontext.pc;
1455 int is_write;
1456
1457 /* XXX: compute is_write */
1458 is_write = 0;
1459 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1460 is_write, &uc->uc_sigmask, puc);
1461 }
1462
1463 #else
1464
1465 #error host CPU specific signal handler needed
1466
1467 #endif
1468
1469 #endif /* !defined(CONFIG_SOFTMMU) */