]> git.proxmox.com Git - qemu.git/blob - cpu-exec.c
SH4 delay slot code update, by Magnus Damm.
[qemu.git] / cpu-exec.c
1 /*
2 * i386 emulator main execution loop
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include "config.h"
21 #include "exec.h"
22 #include "disas.h"
23
24 #if !defined(CONFIG_SOFTMMU)
25 #undef EAX
26 #undef ECX
27 #undef EDX
28 #undef EBX
29 #undef ESP
30 #undef EBP
31 #undef ESI
32 #undef EDI
33 #undef EIP
34 #include <signal.h>
35 #include <sys/ucontext.h>
36 #endif
37
38 int tb_invalidated_flag;
39
40 //#define DEBUG_EXEC
41 //#define DEBUG_SIGNAL
42
43 void cpu_loop_exit(void)
44 {
45 /* NOTE: the register at this point must be saved by hand because
46 longjmp restore them */
47 regs_to_env();
48 longjmp(env->jmp_env, 1);
49 }
50
51 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
52 #define reg_T2
53 #endif
54
55 /* exit the current TB from a signal handler. The host registers are
56 restored in a state compatible with the CPU emulator
57 */
58 void cpu_resume_from_signal(CPUState *env1, void *puc)
59 {
60 #if !defined(CONFIG_SOFTMMU)
61 struct ucontext *uc = puc;
62 #endif
63
64 env = env1;
65
66 /* XXX: restore cpu registers saved in host registers */
67
68 #if !defined(CONFIG_SOFTMMU)
69 if (puc) {
70 /* XXX: use siglongjmp ? */
71 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
72 }
73 #endif
74 longjmp(env->jmp_env, 1);
75 }
76
77
78 static TranslationBlock *tb_find_slow(target_ulong pc,
79 target_ulong cs_base,
80 uint64_t flags)
81 {
82 TranslationBlock *tb, **ptb1;
83 int code_gen_size;
84 unsigned int h;
85 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
86 uint8_t *tc_ptr;
87
88 spin_lock(&tb_lock);
89
90 tb_invalidated_flag = 0;
91
92 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
93
94 /* find translated block using physical mappings */
95 phys_pc = get_phys_addr_code(env, pc);
96 phys_page1 = phys_pc & TARGET_PAGE_MASK;
97 phys_page2 = -1;
98 h = tb_phys_hash_func(phys_pc);
99 ptb1 = &tb_phys_hash[h];
100 for(;;) {
101 tb = *ptb1;
102 if (!tb)
103 goto not_found;
104 if (tb->pc == pc &&
105 tb->page_addr[0] == phys_page1 &&
106 tb->cs_base == cs_base &&
107 tb->flags == flags) {
108 /* check next page if needed */
109 if (tb->page_addr[1] != -1) {
110 virt_page2 = (pc & TARGET_PAGE_MASK) +
111 TARGET_PAGE_SIZE;
112 phys_page2 = get_phys_addr_code(env, virt_page2);
113 if (tb->page_addr[1] == phys_page2)
114 goto found;
115 } else {
116 goto found;
117 }
118 }
119 ptb1 = &tb->phys_hash_next;
120 }
121 not_found:
122 /* if no translated code available, then translate it now */
123 tb = tb_alloc(pc);
124 if (!tb) {
125 /* flush must be done */
126 tb_flush(env);
127 /* cannot fail at this point */
128 tb = tb_alloc(pc);
129 /* don't forget to invalidate previous TB info */
130 tb_invalidated_flag = 1;
131 }
132 tc_ptr = code_gen_ptr;
133 tb->tc_ptr = tc_ptr;
134 tb->cs_base = cs_base;
135 tb->flags = flags;
136 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
137 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
138
139 /* check next page if needed */
140 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
141 phys_page2 = -1;
142 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
143 phys_page2 = get_phys_addr_code(env, virt_page2);
144 }
145 tb_link_phys(tb, phys_pc, phys_page2);
146
147 found:
148 /* we add the TB in the virtual pc hash table */
149 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
150 spin_unlock(&tb_lock);
151 return tb;
152 }
153
154 static inline TranslationBlock *tb_find_fast(void)
155 {
156 TranslationBlock *tb;
157 target_ulong cs_base, pc;
158 uint64_t flags;
159
160 /* we record a subset of the CPU state. It will
161 always be the same before a given translated block
162 is executed. */
163 #if defined(TARGET_I386)
164 flags = env->hflags;
165 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
166 flags |= env->intercept;
167 cs_base = env->segs[R_CS].base;
168 pc = cs_base + env->eip;
169 #elif defined(TARGET_ARM)
170 flags = env->thumb | (env->vfp.vec_len << 1)
171 | (env->vfp.vec_stride << 4);
172 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
173 flags |= (1 << 6);
174 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
175 flags |= (1 << 7);
176 flags |= (env->condexec_bits << 8);
177 cs_base = 0;
178 pc = env->regs[15];
179 #elif defined(TARGET_SPARC)
180 #ifdef TARGET_SPARC64
181 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
182 flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
183 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
184 #else
185 // FPU enable . Supervisor
186 flags = (env->psref << 4) | env->psrs;
187 #endif
188 cs_base = env->npc;
189 pc = env->pc;
190 #elif defined(TARGET_PPC)
191 flags = env->hflags;
192 cs_base = 0;
193 pc = env->nip;
194 #elif defined(TARGET_MIPS)
195 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
196 cs_base = 0;
197 pc = env->PC[env->current_tc];
198 #elif defined(TARGET_M68K)
199 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
200 | (env->sr & SR_S) /* Bit 13 */
201 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
202 cs_base = 0;
203 pc = env->pc;
204 #elif defined(TARGET_SH4)
205 flags = env->flags;
206 cs_base = 0;
207 pc = env->pc;
208 #elif defined(TARGET_ALPHA)
209 flags = env->ps;
210 cs_base = 0;
211 pc = env->pc;
212 #elif defined(TARGET_CRIS)
213 flags = 0;
214 cs_base = 0;
215 pc = env->pc;
216 #else
217 #error unsupported CPU
218 #endif
219 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
220 if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
221 tb->flags != flags, 0)) {
222 tb = tb_find_slow(pc, cs_base, flags);
223 /* Note: we do it here to avoid a gcc bug on Mac OS X when
224 doing it in tb_find_slow */
225 if (tb_invalidated_flag) {
226 /* as some TB could have been invalidated because
227 of memory exceptions while generating the code, we
228 must recompute the hash index here */
229 T0 = 0;
230 }
231 }
232 return tb;
233 }
234
235 #if defined(__sparc__) && !defined(HOST_SOLARIS)
236 #define BREAK_CHAIN tmp_T0 = 0
237 #else
238 #define BREAK_CHAIN T0 = 0
239 #endif
240
241 /* main execution loop */
242
243 int cpu_exec(CPUState *env1)
244 {
245 #define DECLARE_HOST_REGS 1
246 #include "hostregs_helper.h"
247 #if defined(TARGET_SPARC)
248 #if defined(reg_REGWPTR)
249 uint32_t *saved_regwptr;
250 #endif
251 #endif
252 #if defined(__sparc__) && !defined(HOST_SOLARIS)
253 int saved_i7;
254 target_ulong tmp_T0;
255 #endif
256 int ret, interrupt_request;
257 void (*gen_func)(void);
258 TranslationBlock *tb;
259 uint8_t *tc_ptr;
260
261 if (cpu_halted(env1) == EXCP_HALTED)
262 return EXCP_HALTED;
263
264 cpu_single_env = env1;
265
266 /* first we save global registers */
267 #define SAVE_HOST_REGS 1
268 #include "hostregs_helper.h"
269 env = env1;
270 #if defined(__sparc__) && !defined(HOST_SOLARIS)
271 /* we also save i7 because longjmp may not restore it */
272 asm volatile ("mov %%i7, %0" : "=r" (saved_i7));
273 #endif
274
275 env_to_regs();
276 #if defined(TARGET_I386)
277 /* put eflags in CPU temporary format */
278 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
279 DF = 1 - (2 * ((env->eflags >> 10) & 1));
280 CC_OP = CC_OP_EFLAGS;
281 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
282 #elif defined(TARGET_SPARC)
283 #if defined(reg_REGWPTR)
284 saved_regwptr = REGWPTR;
285 #endif
286 #elif defined(TARGET_M68K)
287 env->cc_op = CC_OP_FLAGS;
288 env->cc_dest = env->sr & 0xf;
289 env->cc_x = (env->sr >> 4) & 1;
290 #elif defined(TARGET_ALPHA)
291 #elif defined(TARGET_ARM)
292 #elif defined(TARGET_PPC)
293 #elif defined(TARGET_MIPS)
294 #elif defined(TARGET_SH4)
295 #elif defined(TARGET_CRIS)
296 /* XXXXX */
297 #else
298 #error unsupported target CPU
299 #endif
300 env->exception_index = -1;
301
302 /* prepare setjmp context for exception handling */
303 for(;;) {
304 if (setjmp(env->jmp_env) == 0) {
305 env->current_tb = NULL;
306 /* if an exception is pending, we execute it here */
307 if (env->exception_index >= 0) {
308 if (env->exception_index >= EXCP_INTERRUPT) {
309 /* exit request from the cpu execution loop */
310 ret = env->exception_index;
311 break;
312 } else if (env->user_mode_only) {
313 /* if user mode only, we simulate a fake exception
314 which will be handled outside the cpu execution
315 loop */
316 #if defined(TARGET_I386)
317 do_interrupt_user(env->exception_index,
318 env->exception_is_int,
319 env->error_code,
320 env->exception_next_eip);
321 #endif
322 ret = env->exception_index;
323 break;
324 } else {
325 #if defined(TARGET_I386)
326 /* simulate a real cpu exception. On i386, it can
327 trigger new exceptions, but we do not handle
328 double or triple faults yet. */
329 do_interrupt(env->exception_index,
330 env->exception_is_int,
331 env->error_code,
332 env->exception_next_eip, 0);
333 /* successfully delivered */
334 env->old_exception = -1;
335 #elif defined(TARGET_PPC)
336 do_interrupt(env);
337 #elif defined(TARGET_MIPS)
338 do_interrupt(env);
339 #elif defined(TARGET_SPARC)
340 do_interrupt(env->exception_index);
341 #elif defined(TARGET_ARM)
342 do_interrupt(env);
343 #elif defined(TARGET_SH4)
344 do_interrupt(env);
345 #elif defined(TARGET_ALPHA)
346 do_interrupt(env);
347 #elif defined(TARGET_CRIS)
348 do_interrupt(env);
349 #elif defined(TARGET_M68K)
350 do_interrupt(0);
351 #endif
352 }
353 env->exception_index = -1;
354 }
355 #ifdef USE_KQEMU
356 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
357 int ret;
358 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
359 ret = kqemu_cpu_exec(env);
360 /* put eflags in CPU temporary format */
361 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
362 DF = 1 - (2 * ((env->eflags >> 10) & 1));
363 CC_OP = CC_OP_EFLAGS;
364 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
365 if (ret == 1) {
366 /* exception */
367 longjmp(env->jmp_env, 1);
368 } else if (ret == 2) {
369 /* softmmu execution needed */
370 } else {
371 if (env->interrupt_request != 0) {
372 /* hardware interrupt will be executed just after */
373 } else {
374 /* otherwise, we restart */
375 longjmp(env->jmp_env, 1);
376 }
377 }
378 }
379 #endif
380
381 T0 = 0; /* force lookup of first TB */
382 for(;;) {
383 #if defined(__sparc__) && !defined(HOST_SOLARIS)
384 /* g1 can be modified by some libc? functions */
385 tmp_T0 = T0;
386 #endif
387 interrupt_request = env->interrupt_request;
388 if (__builtin_expect(interrupt_request, 0)
389 #if defined(TARGET_I386)
390 && env->hflags & HF_GIF_MASK
391 #endif
392 ) {
393 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
394 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
395 env->exception_index = EXCP_DEBUG;
396 cpu_loop_exit();
397 }
398 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
399 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
400 if (interrupt_request & CPU_INTERRUPT_HALT) {
401 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
402 env->halted = 1;
403 env->exception_index = EXCP_HLT;
404 cpu_loop_exit();
405 }
406 #endif
407 #if defined(TARGET_I386)
408 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
409 !(env->hflags & HF_SMM_MASK)) {
410 svm_check_intercept(SVM_EXIT_SMI);
411 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
412 do_smm_enter();
413 BREAK_CHAIN;
414 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
415 (env->eflags & IF_MASK || env->hflags & HF_HIF_MASK) &&
416 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
417 int intno;
418 svm_check_intercept(SVM_EXIT_INTR);
419 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
420 intno = cpu_get_pic_interrupt(env);
421 if (loglevel & CPU_LOG_TB_IN_ASM) {
422 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
423 }
424 do_interrupt(intno, 0, 0, 0, 1);
425 /* ensure that no TB jump will be modified as
426 the program flow was changed */
427 BREAK_CHAIN;
428 #if !defined(CONFIG_USER_ONLY)
429 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
430 (env->eflags & IF_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
431 int intno;
432 /* FIXME: this should respect TPR */
433 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
434 svm_check_intercept(SVM_EXIT_VINTR);
435 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
436 if (loglevel & CPU_LOG_TB_IN_ASM)
437 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
438 do_interrupt(intno, 0, 0, -1, 1);
439 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl),
440 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)) & ~V_IRQ_MASK);
441 BREAK_CHAIN;
442 #endif
443 }
444 #elif defined(TARGET_PPC)
445 #if 0
446 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
447 cpu_ppc_reset(env);
448 }
449 #endif
450 if (interrupt_request & CPU_INTERRUPT_HARD) {
451 ppc_hw_interrupt(env);
452 if (env->pending_interrupts == 0)
453 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
454 BREAK_CHAIN;
455 }
456 #elif defined(TARGET_MIPS)
457 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
458 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
459 (env->CP0_Status & (1 << CP0St_IE)) &&
460 !(env->CP0_Status & (1 << CP0St_EXL)) &&
461 !(env->CP0_Status & (1 << CP0St_ERL)) &&
462 !(env->hflags & MIPS_HFLAG_DM)) {
463 /* Raise it */
464 env->exception_index = EXCP_EXT_INTERRUPT;
465 env->error_code = 0;
466 do_interrupt(env);
467 BREAK_CHAIN;
468 }
469 #elif defined(TARGET_SPARC)
470 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
471 (env->psret != 0)) {
472 int pil = env->interrupt_index & 15;
473 int type = env->interrupt_index & 0xf0;
474
475 if (((type == TT_EXTINT) &&
476 (pil == 15 || pil > env->psrpil)) ||
477 type != TT_EXTINT) {
478 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
479 do_interrupt(env->interrupt_index);
480 env->interrupt_index = 0;
481 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
482 cpu_check_irqs(env);
483 #endif
484 BREAK_CHAIN;
485 }
486 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
487 //do_interrupt(0, 0, 0, 0, 0);
488 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
489 }
490 #elif defined(TARGET_ARM)
491 if (interrupt_request & CPU_INTERRUPT_FIQ
492 && !(env->uncached_cpsr & CPSR_F)) {
493 env->exception_index = EXCP_FIQ;
494 do_interrupt(env);
495 BREAK_CHAIN;
496 }
497 /* ARMv7-M interrupt return works by loading a magic value
498 into the PC. On real hardware the load causes the
499 return to occur. The qemu implementation performs the
500 jump normally, then does the exception return when the
501 CPU tries to execute code at the magic address.
502 This will cause the magic PC value to be pushed to
503 the stack if an interrupt occured at the wrong time.
504 We avoid this by disabling interrupts when
505 pc contains a magic address. */
506 if (interrupt_request & CPU_INTERRUPT_HARD
507 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
508 || !(env->uncached_cpsr & CPSR_I))) {
509 env->exception_index = EXCP_IRQ;
510 do_interrupt(env);
511 BREAK_CHAIN;
512 }
513 #elif defined(TARGET_SH4)
514 /* XXXXX */
515 #elif defined(TARGET_ALPHA)
516 if (interrupt_request & CPU_INTERRUPT_HARD) {
517 do_interrupt(env);
518 BREAK_CHAIN;
519 }
520 #elif defined(TARGET_CRIS)
521 if (interrupt_request & CPU_INTERRUPT_HARD) {
522 do_interrupt(env);
523 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
524 BREAK_CHAIN;
525 }
526 #elif defined(TARGET_M68K)
527 if (interrupt_request & CPU_INTERRUPT_HARD
528 && ((env->sr & SR_I) >> SR_I_SHIFT)
529 < env->pending_level) {
530 /* Real hardware gets the interrupt vector via an
531 IACK cycle at this point. Current emulated
532 hardware doesn't rely on this, so we
533 provide/save the vector when the interrupt is
534 first signalled. */
535 env->exception_index = env->pending_vector;
536 do_interrupt(1);
537 BREAK_CHAIN;
538 }
539 #endif
540 /* Don't use the cached interupt_request value,
541 do_interrupt may have updated the EXITTB flag. */
542 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
543 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
544 /* ensure that no TB jump will be modified as
545 the program flow was changed */
546 BREAK_CHAIN;
547 }
548 if (interrupt_request & CPU_INTERRUPT_EXIT) {
549 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
550 env->exception_index = EXCP_INTERRUPT;
551 cpu_loop_exit();
552 }
553 }
554 #ifdef DEBUG_EXEC
555 if ((loglevel & CPU_LOG_TB_CPU)) {
556 /* restore flags in standard format */
557 regs_to_env();
558 #if defined(TARGET_I386)
559 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
560 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
561 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
562 #elif defined(TARGET_ARM)
563 cpu_dump_state(env, logfile, fprintf, 0);
564 #elif defined(TARGET_SPARC)
565 REGWPTR = env->regbase + (env->cwp * 16);
566 env->regwptr = REGWPTR;
567 cpu_dump_state(env, logfile, fprintf, 0);
568 #elif defined(TARGET_PPC)
569 cpu_dump_state(env, logfile, fprintf, 0);
570 #elif defined(TARGET_M68K)
571 cpu_m68k_flush_flags(env, env->cc_op);
572 env->cc_op = CC_OP_FLAGS;
573 env->sr = (env->sr & 0xffe0)
574 | env->cc_dest | (env->cc_x << 4);
575 cpu_dump_state(env, logfile, fprintf, 0);
576 #elif defined(TARGET_MIPS)
577 cpu_dump_state(env, logfile, fprintf, 0);
578 #elif defined(TARGET_SH4)
579 cpu_dump_state(env, logfile, fprintf, 0);
580 #elif defined(TARGET_ALPHA)
581 cpu_dump_state(env, logfile, fprintf, 0);
582 #elif defined(TARGET_CRIS)
583 cpu_dump_state(env, logfile, fprintf, 0);
584 #else
585 #error unsupported target CPU
586 #endif
587 }
588 #endif
589 tb = tb_find_fast();
590 #ifdef DEBUG_EXEC
591 if ((loglevel & CPU_LOG_EXEC)) {
592 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
593 (long)tb->tc_ptr, tb->pc,
594 lookup_symbol(tb->pc));
595 }
596 #endif
597 #if defined(__sparc__) && !defined(HOST_SOLARIS)
598 T0 = tmp_T0;
599 #endif
600 /* see if we can patch the calling TB. When the TB
601 spans two pages, we cannot safely do a direct
602 jump. */
603 {
604 if (T0 != 0 &&
605 #if USE_KQEMU
606 (env->kqemu_enabled != 2) &&
607 #endif
608 tb->page_addr[1] == -1) {
609 spin_lock(&tb_lock);
610 tb_add_jump((TranslationBlock *)(long)(T0 & ~3), T0 & 3, tb);
611 spin_unlock(&tb_lock);
612 }
613 }
614 tc_ptr = tb->tc_ptr;
615 env->current_tb = tb;
616 /* execute the generated code */
617 gen_func = (void *)tc_ptr;
618 #if defined(__sparc__)
619 __asm__ __volatile__("call %0\n\t"
620 "mov %%o7,%%i0"
621 : /* no outputs */
622 : "r" (gen_func)
623 : "i0", "i1", "i2", "i3", "i4", "i5",
624 "o0", "o1", "o2", "o3", "o4", "o5",
625 "l0", "l1", "l2", "l3", "l4", "l5",
626 "l6", "l7");
627 #elif defined(__arm__)
628 asm volatile ("mov pc, %0\n\t"
629 ".global exec_loop\n\t"
630 "exec_loop:\n\t"
631 : /* no outputs */
632 : "r" (gen_func)
633 : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
634 #elif defined(__ia64)
635 struct fptr {
636 void *ip;
637 void *gp;
638 } fp;
639
640 fp.ip = tc_ptr;
641 fp.gp = code_gen_buffer + 2 * (1 << 20);
642 (*(void (*)(void)) &fp)();
643 #else
644 gen_func();
645 #endif
646 env->current_tb = NULL;
647 /* reset soft MMU for next block (it can currently
648 only be set by a memory fault) */
649 #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
650 if (env->hflags & HF_SOFTMMU_MASK) {
651 env->hflags &= ~HF_SOFTMMU_MASK;
652 /* do not allow linking to another block */
653 T0 = 0;
654 }
655 #endif
656 #if defined(USE_KQEMU)
657 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
658 if (kqemu_is_ok(env) &&
659 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
660 cpu_loop_exit();
661 }
662 #endif
663 } /* for(;;) */
664 } else {
665 env_to_regs();
666 }
667 } /* for(;;) */
668
669
670 #if defined(TARGET_I386)
671 /* restore flags in standard format */
672 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
673 #elif defined(TARGET_ARM)
674 /* XXX: Save/restore host fpu exception state?. */
675 #elif defined(TARGET_SPARC)
676 #if defined(reg_REGWPTR)
677 REGWPTR = saved_regwptr;
678 #endif
679 #elif defined(TARGET_PPC)
680 #elif defined(TARGET_M68K)
681 cpu_m68k_flush_flags(env, env->cc_op);
682 env->cc_op = CC_OP_FLAGS;
683 env->sr = (env->sr & 0xffe0)
684 | env->cc_dest | (env->cc_x << 4);
685 #elif defined(TARGET_MIPS)
686 #elif defined(TARGET_SH4)
687 #elif defined(TARGET_ALPHA)
688 #elif defined(TARGET_CRIS)
689 /* XXXXX */
690 #else
691 #error unsupported target CPU
692 #endif
693
694 /* restore global registers */
695 #if defined(__sparc__) && !defined(HOST_SOLARIS)
696 asm volatile ("mov %0, %%i7" : : "r" (saved_i7));
697 #endif
698 #include "hostregs_helper.h"
699
700 /* fail safe : never use cpu_single_env outside cpu_exec() */
701 cpu_single_env = NULL;
702 return ret;
703 }
704
705 /* must only be called from the generated code as an exception can be
706 generated */
707 void tb_invalidate_page_range(target_ulong start, target_ulong end)
708 {
709 /* XXX: cannot enable it yet because it yields to MMU exception
710 where NIP != read address on PowerPC */
711 #if 0
712 target_ulong phys_addr;
713 phys_addr = get_phys_addr_code(env, start);
714 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
715 #endif
716 }
717
718 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
719
720 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
721 {
722 CPUX86State *saved_env;
723
724 saved_env = env;
725 env = s;
726 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
727 selector &= 0xffff;
728 cpu_x86_load_seg_cache(env, seg_reg, selector,
729 (selector << 4), 0xffff, 0);
730 } else {
731 load_seg(seg_reg, selector);
732 }
733 env = saved_env;
734 }
735
736 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
737 {
738 CPUX86State *saved_env;
739
740 saved_env = env;
741 env = s;
742
743 helper_fsave(ptr, data32);
744
745 env = saved_env;
746 }
747
748 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
749 {
750 CPUX86State *saved_env;
751
752 saved_env = env;
753 env = s;
754
755 helper_frstor(ptr, data32);
756
757 env = saved_env;
758 }
759
760 #endif /* TARGET_I386 */
761
762 #if !defined(CONFIG_SOFTMMU)
763
764 #if defined(TARGET_I386)
765
766 /* 'pc' is the host PC at which the exception was raised. 'address' is
767 the effective address of the memory exception. 'is_write' is 1 if a
768 write caused the exception and otherwise 0'. 'old_set' is the
769 signal set which should be restored */
770 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
771 int is_write, sigset_t *old_set,
772 void *puc)
773 {
774 TranslationBlock *tb;
775 int ret;
776
777 if (cpu_single_env)
778 env = cpu_single_env; /* XXX: find a correct solution for multithread */
779 #if defined(DEBUG_SIGNAL)
780 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
781 pc, address, is_write, *(unsigned long *)old_set);
782 #endif
783 /* XXX: locking issue */
784 if (is_write && page_unprotect(h2g(address), pc, puc)) {
785 return 1;
786 }
787
788 /* see if it is an MMU fault */
789 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
790 if (ret < 0)
791 return 0; /* not an MMU fault */
792 if (ret == 0)
793 return 1; /* the MMU fault was handled without causing real CPU fault */
794 /* now we have a real cpu fault */
795 tb = tb_find_pc(pc);
796 if (tb) {
797 /* the PC is inside the translated code. It means that we have
798 a virtual CPU fault */
799 cpu_restore_state(tb, env, pc, puc);
800 }
801 if (ret == 1) {
802 #if 0
803 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
804 env->eip, env->cr[2], env->error_code);
805 #endif
806 /* we restore the process signal mask as the sigreturn should
807 do it (XXX: use sigsetjmp) */
808 sigprocmask(SIG_SETMASK, old_set, NULL);
809 raise_exception_err(env->exception_index, env->error_code);
810 } else {
811 /* activate soft MMU for this block */
812 env->hflags |= HF_SOFTMMU_MASK;
813 cpu_resume_from_signal(env, puc);
814 }
815 /* never comes here */
816 return 1;
817 }
818
819 #elif defined(TARGET_ARM)
820 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
821 int is_write, sigset_t *old_set,
822 void *puc)
823 {
824 TranslationBlock *tb;
825 int ret;
826
827 if (cpu_single_env)
828 env = cpu_single_env; /* XXX: find a correct solution for multithread */
829 #if defined(DEBUG_SIGNAL)
830 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
831 pc, address, is_write, *(unsigned long *)old_set);
832 #endif
833 /* XXX: locking issue */
834 if (is_write && page_unprotect(h2g(address), pc, puc)) {
835 return 1;
836 }
837 /* see if it is an MMU fault */
838 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
839 if (ret < 0)
840 return 0; /* not an MMU fault */
841 if (ret == 0)
842 return 1; /* the MMU fault was handled without causing real CPU fault */
843 /* now we have a real cpu fault */
844 tb = tb_find_pc(pc);
845 if (tb) {
846 /* the PC is inside the translated code. It means that we have
847 a virtual CPU fault */
848 cpu_restore_state(tb, env, pc, puc);
849 }
850 /* we restore the process signal mask as the sigreturn should
851 do it (XXX: use sigsetjmp) */
852 sigprocmask(SIG_SETMASK, old_set, NULL);
853 cpu_loop_exit();
854 }
855 #elif defined(TARGET_SPARC)
856 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
857 int is_write, sigset_t *old_set,
858 void *puc)
859 {
860 TranslationBlock *tb;
861 int ret;
862
863 if (cpu_single_env)
864 env = cpu_single_env; /* XXX: find a correct solution for multithread */
865 #if defined(DEBUG_SIGNAL)
866 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
867 pc, address, is_write, *(unsigned long *)old_set);
868 #endif
869 /* XXX: locking issue */
870 if (is_write && page_unprotect(h2g(address), pc, puc)) {
871 return 1;
872 }
873 /* see if it is an MMU fault */
874 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
875 if (ret < 0)
876 return 0; /* not an MMU fault */
877 if (ret == 0)
878 return 1; /* the MMU fault was handled without causing real CPU fault */
879 /* now we have a real cpu fault */
880 tb = tb_find_pc(pc);
881 if (tb) {
882 /* the PC is inside the translated code. It means that we have
883 a virtual CPU fault */
884 cpu_restore_state(tb, env, pc, puc);
885 }
886 /* we restore the process signal mask as the sigreturn should
887 do it (XXX: use sigsetjmp) */
888 sigprocmask(SIG_SETMASK, old_set, NULL);
889 cpu_loop_exit();
890 }
891 #elif defined (TARGET_PPC)
892 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
893 int is_write, sigset_t *old_set,
894 void *puc)
895 {
896 TranslationBlock *tb;
897 int ret;
898
899 if (cpu_single_env)
900 env = cpu_single_env; /* XXX: find a correct solution for multithread */
901 #if defined(DEBUG_SIGNAL)
902 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
903 pc, address, is_write, *(unsigned long *)old_set);
904 #endif
905 /* XXX: locking issue */
906 if (is_write && page_unprotect(h2g(address), pc, puc)) {
907 return 1;
908 }
909
910 /* see if it is an MMU fault */
911 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
912 if (ret < 0)
913 return 0; /* not an MMU fault */
914 if (ret == 0)
915 return 1; /* the MMU fault was handled without causing real CPU fault */
916
917 /* now we have a real cpu fault */
918 tb = tb_find_pc(pc);
919 if (tb) {
920 /* the PC is inside the translated code. It means that we have
921 a virtual CPU fault */
922 cpu_restore_state(tb, env, pc, puc);
923 }
924 if (ret == 1) {
925 #if 0
926 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
927 env->nip, env->error_code, tb);
928 #endif
929 /* we restore the process signal mask as the sigreturn should
930 do it (XXX: use sigsetjmp) */
931 sigprocmask(SIG_SETMASK, old_set, NULL);
932 do_raise_exception_err(env->exception_index, env->error_code);
933 } else {
934 /* activate soft MMU for this block */
935 cpu_resume_from_signal(env, puc);
936 }
937 /* never comes here */
938 return 1;
939 }
940
941 #elif defined(TARGET_M68K)
942 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
943 int is_write, sigset_t *old_set,
944 void *puc)
945 {
946 TranslationBlock *tb;
947 int ret;
948
949 if (cpu_single_env)
950 env = cpu_single_env; /* XXX: find a correct solution for multithread */
951 #if defined(DEBUG_SIGNAL)
952 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
953 pc, address, is_write, *(unsigned long *)old_set);
954 #endif
955 /* XXX: locking issue */
956 if (is_write && page_unprotect(address, pc, puc)) {
957 return 1;
958 }
959 /* see if it is an MMU fault */
960 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
961 if (ret < 0)
962 return 0; /* not an MMU fault */
963 if (ret == 0)
964 return 1; /* the MMU fault was handled without causing real CPU fault */
965 /* now we have a real cpu fault */
966 tb = tb_find_pc(pc);
967 if (tb) {
968 /* the PC is inside the translated code. It means that we have
969 a virtual CPU fault */
970 cpu_restore_state(tb, env, pc, puc);
971 }
972 /* we restore the process signal mask as the sigreturn should
973 do it (XXX: use sigsetjmp) */
974 sigprocmask(SIG_SETMASK, old_set, NULL);
975 cpu_loop_exit();
976 /* never comes here */
977 return 1;
978 }
979
980 #elif defined (TARGET_MIPS)
981 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
982 int is_write, sigset_t *old_set,
983 void *puc)
984 {
985 TranslationBlock *tb;
986 int ret;
987
988 if (cpu_single_env)
989 env = cpu_single_env; /* XXX: find a correct solution for multithread */
990 #if defined(DEBUG_SIGNAL)
991 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
992 pc, address, is_write, *(unsigned long *)old_set);
993 #endif
994 /* XXX: locking issue */
995 if (is_write && page_unprotect(h2g(address), pc, puc)) {
996 return 1;
997 }
998
999 /* see if it is an MMU fault */
1000 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1001 if (ret < 0)
1002 return 0; /* not an MMU fault */
1003 if (ret == 0)
1004 return 1; /* the MMU fault was handled without causing real CPU fault */
1005
1006 /* now we have a real cpu fault */
1007 tb = tb_find_pc(pc);
1008 if (tb) {
1009 /* the PC is inside the translated code. It means that we have
1010 a virtual CPU fault */
1011 cpu_restore_state(tb, env, pc, puc);
1012 }
1013 if (ret == 1) {
1014 #if 0
1015 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1016 env->PC, env->error_code, tb);
1017 #endif
1018 /* we restore the process signal mask as the sigreturn should
1019 do it (XXX: use sigsetjmp) */
1020 sigprocmask(SIG_SETMASK, old_set, NULL);
1021 do_raise_exception_err(env->exception_index, env->error_code);
1022 } else {
1023 /* activate soft MMU for this block */
1024 cpu_resume_from_signal(env, puc);
1025 }
1026 /* never comes here */
1027 return 1;
1028 }
1029
1030 #elif defined (TARGET_SH4)
1031 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1032 int is_write, sigset_t *old_set,
1033 void *puc)
1034 {
1035 TranslationBlock *tb;
1036 int ret;
1037
1038 if (cpu_single_env)
1039 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1040 #if defined(DEBUG_SIGNAL)
1041 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1042 pc, address, is_write, *(unsigned long *)old_set);
1043 #endif
1044 /* XXX: locking issue */
1045 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1046 return 1;
1047 }
1048
1049 /* see if it is an MMU fault */
1050 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1051 if (ret < 0)
1052 return 0; /* not an MMU fault */
1053 if (ret == 0)
1054 return 1; /* the MMU fault was handled without causing real CPU fault */
1055
1056 /* now we have a real cpu fault */
1057 tb = tb_find_pc(pc);
1058 if (tb) {
1059 /* the PC is inside the translated code. It means that we have
1060 a virtual CPU fault */
1061 cpu_restore_state(tb, env, pc, puc);
1062 }
1063 #if 0
1064 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1065 env->nip, env->error_code, tb);
1066 #endif
1067 /* we restore the process signal mask as the sigreturn should
1068 do it (XXX: use sigsetjmp) */
1069 sigprocmask(SIG_SETMASK, old_set, NULL);
1070 cpu_loop_exit();
1071 /* never comes here */
1072 return 1;
1073 }
1074
1075 #elif defined (TARGET_ALPHA)
1076 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1077 int is_write, sigset_t *old_set,
1078 void *puc)
1079 {
1080 TranslationBlock *tb;
1081 int ret;
1082
1083 if (cpu_single_env)
1084 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1085 #if defined(DEBUG_SIGNAL)
1086 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1087 pc, address, is_write, *(unsigned long *)old_set);
1088 #endif
1089 /* XXX: locking issue */
1090 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1091 return 1;
1092 }
1093
1094 /* see if it is an MMU fault */
1095 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1096 if (ret < 0)
1097 return 0; /* not an MMU fault */
1098 if (ret == 0)
1099 return 1; /* the MMU fault was handled without causing real CPU fault */
1100
1101 /* now we have a real cpu fault */
1102 tb = tb_find_pc(pc);
1103 if (tb) {
1104 /* the PC is inside the translated code. It means that we have
1105 a virtual CPU fault */
1106 cpu_restore_state(tb, env, pc, puc);
1107 }
1108 #if 0
1109 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1110 env->nip, env->error_code, tb);
1111 #endif
1112 /* we restore the process signal mask as the sigreturn should
1113 do it (XXX: use sigsetjmp) */
1114 sigprocmask(SIG_SETMASK, old_set, NULL);
1115 cpu_loop_exit();
1116 /* never comes here */
1117 return 1;
1118 }
1119 #elif defined (TARGET_CRIS)
1120 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1121 int is_write, sigset_t *old_set,
1122 void *puc)
1123 {
1124 TranslationBlock *tb;
1125 int ret;
1126
1127 if (cpu_single_env)
1128 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1129 #if defined(DEBUG_SIGNAL)
1130 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1131 pc, address, is_write, *(unsigned long *)old_set);
1132 #endif
1133 /* XXX: locking issue */
1134 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1135 return 1;
1136 }
1137
1138 /* see if it is an MMU fault */
1139 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1140 if (ret < 0)
1141 return 0; /* not an MMU fault */
1142 if (ret == 0)
1143 return 1; /* the MMU fault was handled without causing real CPU fault */
1144
1145 /* now we have a real cpu fault */
1146 tb = tb_find_pc(pc);
1147 if (tb) {
1148 /* the PC is inside the translated code. It means that we have
1149 a virtual CPU fault */
1150 cpu_restore_state(tb, env, pc, puc);
1151 }
1152 #if 0
1153 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1154 env->nip, env->error_code, tb);
1155 #endif
1156 /* we restore the process signal mask as the sigreturn should
1157 do it (XXX: use sigsetjmp) */
1158 sigprocmask(SIG_SETMASK, old_set, NULL);
1159 cpu_loop_exit();
1160 /* never comes here */
1161 return 1;
1162 }
1163
1164 #else
1165 #error unsupported target CPU
1166 #endif
1167
1168 #if defined(__i386__)
1169
1170 #if defined(__APPLE__)
1171 # include <sys/ucontext.h>
1172
1173 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1174 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1175 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1176 #else
1177 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1178 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1179 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1180 #endif
1181
1182 int cpu_signal_handler(int host_signum, void *pinfo,
1183 void *puc)
1184 {
1185 siginfo_t *info = pinfo;
1186 struct ucontext *uc = puc;
1187 unsigned long pc;
1188 int trapno;
1189
1190 #ifndef REG_EIP
1191 /* for glibc 2.1 */
1192 #define REG_EIP EIP
1193 #define REG_ERR ERR
1194 #define REG_TRAPNO TRAPNO
1195 #endif
1196 pc = EIP_sig(uc);
1197 trapno = TRAP_sig(uc);
1198 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1199 trapno == 0xe ?
1200 (ERROR_sig(uc) >> 1) & 1 : 0,
1201 &uc->uc_sigmask, puc);
1202 }
1203
1204 #elif defined(__x86_64__)
1205
1206 int cpu_signal_handler(int host_signum, void *pinfo,
1207 void *puc)
1208 {
1209 siginfo_t *info = pinfo;
1210 struct ucontext *uc = puc;
1211 unsigned long pc;
1212
1213 pc = uc->uc_mcontext.gregs[REG_RIP];
1214 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1215 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1216 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1217 &uc->uc_sigmask, puc);
1218 }
1219
1220 #elif defined(__powerpc__)
1221
1222 /***********************************************************************
1223 * signal context platform-specific definitions
1224 * From Wine
1225 */
1226 #ifdef linux
1227 /* All Registers access - only for local access */
1228 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1229 /* Gpr Registers access */
1230 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1231 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1232 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1233 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1234 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1235 # define LR_sig(context) REG_sig(link, context) /* Link register */
1236 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1237 /* Float Registers access */
1238 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1239 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1240 /* Exception Registers access */
1241 # define DAR_sig(context) REG_sig(dar, context)
1242 # define DSISR_sig(context) REG_sig(dsisr, context)
1243 # define TRAP_sig(context) REG_sig(trap, context)
1244 #endif /* linux */
1245
1246 #ifdef __APPLE__
1247 # include <sys/ucontext.h>
1248 typedef struct ucontext SIGCONTEXT;
1249 /* All Registers access - only for local access */
1250 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1251 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1252 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1253 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1254 /* Gpr Registers access */
1255 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1256 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1257 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1258 # define CTR_sig(context) REG_sig(ctr, context)
1259 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1260 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1261 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1262 /* Float Registers access */
1263 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1264 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1265 /* Exception Registers access */
1266 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1267 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1268 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1269 #endif /* __APPLE__ */
1270
1271 int cpu_signal_handler(int host_signum, void *pinfo,
1272 void *puc)
1273 {
1274 siginfo_t *info = pinfo;
1275 struct ucontext *uc = puc;
1276 unsigned long pc;
1277 int is_write;
1278
1279 pc = IAR_sig(uc);
1280 is_write = 0;
1281 #if 0
1282 /* ppc 4xx case */
1283 if (DSISR_sig(uc) & 0x00800000)
1284 is_write = 1;
1285 #else
1286 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1287 is_write = 1;
1288 #endif
1289 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1290 is_write, &uc->uc_sigmask, puc);
1291 }
1292
1293 #elif defined(__alpha__)
1294
1295 int cpu_signal_handler(int host_signum, void *pinfo,
1296 void *puc)
1297 {
1298 siginfo_t *info = pinfo;
1299 struct ucontext *uc = puc;
1300 uint32_t *pc = uc->uc_mcontext.sc_pc;
1301 uint32_t insn = *pc;
1302 int is_write = 0;
1303
1304 /* XXX: need kernel patch to get write flag faster */
1305 switch (insn >> 26) {
1306 case 0x0d: // stw
1307 case 0x0e: // stb
1308 case 0x0f: // stq_u
1309 case 0x24: // stf
1310 case 0x25: // stg
1311 case 0x26: // sts
1312 case 0x27: // stt
1313 case 0x2c: // stl
1314 case 0x2d: // stq
1315 case 0x2e: // stl_c
1316 case 0x2f: // stq_c
1317 is_write = 1;
1318 }
1319
1320 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1321 is_write, &uc->uc_sigmask, puc);
1322 }
1323 #elif defined(__sparc__)
1324
1325 int cpu_signal_handler(int host_signum, void *pinfo,
1326 void *puc)
1327 {
1328 siginfo_t *info = pinfo;
1329 uint32_t *regs = (uint32_t *)(info + 1);
1330 void *sigmask = (regs + 20);
1331 unsigned long pc;
1332 int is_write;
1333 uint32_t insn;
1334
1335 /* XXX: is there a standard glibc define ? */
1336 pc = regs[1];
1337 /* XXX: need kernel patch to get write flag faster */
1338 is_write = 0;
1339 insn = *(uint32_t *)pc;
1340 if ((insn >> 30) == 3) {
1341 switch((insn >> 19) & 0x3f) {
1342 case 0x05: // stb
1343 case 0x06: // sth
1344 case 0x04: // st
1345 case 0x07: // std
1346 case 0x24: // stf
1347 case 0x27: // stdf
1348 case 0x25: // stfsr
1349 is_write = 1;
1350 break;
1351 }
1352 }
1353 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1354 is_write, sigmask, NULL);
1355 }
1356
1357 #elif defined(__arm__)
1358
1359 int cpu_signal_handler(int host_signum, void *pinfo,
1360 void *puc)
1361 {
1362 siginfo_t *info = pinfo;
1363 struct ucontext *uc = puc;
1364 unsigned long pc;
1365 int is_write;
1366
1367 pc = uc->uc_mcontext.gregs[R15];
1368 /* XXX: compute is_write */
1369 is_write = 0;
1370 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1371 is_write,
1372 &uc->uc_sigmask, puc);
1373 }
1374
1375 #elif defined(__mc68000)
1376
1377 int cpu_signal_handler(int host_signum, void *pinfo,
1378 void *puc)
1379 {
1380 siginfo_t *info = pinfo;
1381 struct ucontext *uc = puc;
1382 unsigned long pc;
1383 int is_write;
1384
1385 pc = uc->uc_mcontext.gregs[16];
1386 /* XXX: compute is_write */
1387 is_write = 0;
1388 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1389 is_write,
1390 &uc->uc_sigmask, puc);
1391 }
1392
1393 #elif defined(__ia64)
1394
1395 #ifndef __ISR_VALID
1396 /* This ought to be in <bits/siginfo.h>... */
1397 # define __ISR_VALID 1
1398 #endif
1399
1400 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1401 {
1402 siginfo_t *info = pinfo;
1403 struct ucontext *uc = puc;
1404 unsigned long ip;
1405 int is_write = 0;
1406
1407 ip = uc->uc_mcontext.sc_ip;
1408 switch (host_signum) {
1409 case SIGILL:
1410 case SIGFPE:
1411 case SIGSEGV:
1412 case SIGBUS:
1413 case SIGTRAP:
1414 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1415 /* ISR.W (write-access) is bit 33: */
1416 is_write = (info->si_isr >> 33) & 1;
1417 break;
1418
1419 default:
1420 break;
1421 }
1422 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1423 is_write,
1424 &uc->uc_sigmask, puc);
1425 }
1426
1427 #elif defined(__s390__)
1428
1429 int cpu_signal_handler(int host_signum, void *pinfo,
1430 void *puc)
1431 {
1432 siginfo_t *info = pinfo;
1433 struct ucontext *uc = puc;
1434 unsigned long pc;
1435 int is_write;
1436
1437 pc = uc->uc_mcontext.psw.addr;
1438 /* XXX: compute is_write */
1439 is_write = 0;
1440 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1441 is_write, &uc->uc_sigmask, puc);
1442 }
1443
1444 #elif defined(__mips__)
1445
1446 int cpu_signal_handler(int host_signum, void *pinfo,
1447 void *puc)
1448 {
1449 siginfo_t *info = pinfo;
1450 struct ucontext *uc = puc;
1451 greg_t pc = uc->uc_mcontext.pc;
1452 int is_write;
1453
1454 /* XXX: compute is_write */
1455 is_write = 0;
1456 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1457 is_write, &uc->uc_sigmask, puc);
1458 }
1459
1460 #else
1461
1462 #error host CPU specific signal handler needed
1463
1464 #endif
1465
1466 #endif /* !defined(CONFIG_SOFTMMU) */