]> git.proxmox.com Git - qemu.git/blob - cpu-exec.c
Mention output overlaps.
[qemu.git] / cpu-exec.c
1 /*
2 * i386 emulator main execution loop
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include "config.h"
21 #define CPU_NO_GLOBAL_REGS
22 #include "exec.h"
23 #include "disas.h"
24 #include "tcg.h"
25
26 #if !defined(CONFIG_SOFTMMU)
27 #undef EAX
28 #undef ECX
29 #undef EDX
30 #undef EBX
31 #undef ESP
32 #undef EBP
33 #undef ESI
34 #undef EDI
35 #undef EIP
36 #include <signal.h>
37 #ifdef __linux__
38 #include <sys/ucontext.h>
39 #endif
40 #endif
41
42 #if defined(__sparc__) && !defined(HOST_SOLARIS)
43 // Work around ugly bugs in glibc that mangle global register contents
44 #undef env
45 #define env cpu_single_env
46 #endif
47
48 int tb_invalidated_flag;
49
50 //#define DEBUG_EXEC
51 //#define DEBUG_SIGNAL
52
53 void cpu_loop_exit(void)
54 {
55 /* NOTE: the register at this point must be saved by hand because
56 longjmp restore them */
57 regs_to_env();
58 longjmp(env->jmp_env, 1);
59 }
60
61 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
62 #define reg_T2
63 #endif
64
65 /* exit the current TB from a signal handler. The host registers are
66 restored in a state compatible with the CPU emulator
67 */
68 void cpu_resume_from_signal(CPUState *env1, void *puc)
69 {
70 #if !defined(CONFIG_SOFTMMU)
71 #ifdef __linux__
72 struct ucontext *uc = puc;
73 #elif defined(__OpenBSD__)
74 struct sigcontext *uc = puc;
75 #endif
76 #endif
77
78 env = env1;
79
80 /* XXX: restore cpu registers saved in host registers */
81
82 #if !defined(CONFIG_SOFTMMU)
83 if (puc) {
84 /* XXX: use siglongjmp ? */
85 #ifdef __linux__
86 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
87 #elif defined(__OpenBSD__)
88 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
89 #endif
90 }
91 #endif
92 longjmp(env->jmp_env, 1);
93 }
94
95 /* Execute the code without caching the generated code. An interpreter
96 could be used if available. */
97 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
98 {
99 unsigned long next_tb;
100 TranslationBlock *tb;
101
102 /* Should never happen.
103 We only end up here when an existing TB is too long. */
104 if (max_cycles > CF_COUNT_MASK)
105 max_cycles = CF_COUNT_MASK;
106
107 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
108 max_cycles);
109 env->current_tb = tb;
110 /* execute the generated code */
111 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
112
113 if ((next_tb & 3) == 2) {
114 /* Restore PC. This may happen if async event occurs before
115 the TB starts executing. */
116 CPU_PC_FROM_TB(env, tb);
117 }
118 tb_phys_invalidate(tb, -1);
119 tb_free(tb);
120 }
121
122 static TranslationBlock *tb_find_slow(target_ulong pc,
123 target_ulong cs_base,
124 uint64_t flags)
125 {
126 TranslationBlock *tb, **ptb1;
127 unsigned int h;
128 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
129
130 tb_invalidated_flag = 0;
131
132 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
133
134 /* find translated block using physical mappings */
135 phys_pc = get_phys_addr_code(env, pc);
136 phys_page1 = phys_pc & TARGET_PAGE_MASK;
137 phys_page2 = -1;
138 h = tb_phys_hash_func(phys_pc);
139 ptb1 = &tb_phys_hash[h];
140 for(;;) {
141 tb = *ptb1;
142 if (!tb)
143 goto not_found;
144 if (tb->pc == pc &&
145 tb->page_addr[0] == phys_page1 &&
146 tb->cs_base == cs_base &&
147 tb->flags == flags) {
148 /* check next page if needed */
149 if (tb->page_addr[1] != -1) {
150 virt_page2 = (pc & TARGET_PAGE_MASK) +
151 TARGET_PAGE_SIZE;
152 phys_page2 = get_phys_addr_code(env, virt_page2);
153 if (tb->page_addr[1] == phys_page2)
154 goto found;
155 } else {
156 goto found;
157 }
158 }
159 ptb1 = &tb->phys_hash_next;
160 }
161 not_found:
162 /* if no translated code available, then translate it now */
163 tb = tb_gen_code(env, pc, cs_base, flags, 0);
164
165 found:
166 /* we add the TB in the virtual pc hash table */
167 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
168 return tb;
169 }
170
171 static inline TranslationBlock *tb_find_fast(void)
172 {
173 TranslationBlock *tb;
174 target_ulong cs_base, pc;
175 uint64_t flags;
176
177 /* we record a subset of the CPU state. It will
178 always be the same before a given translated block
179 is executed. */
180 #if defined(TARGET_I386)
181 flags = env->hflags;
182 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
183 cs_base = env->segs[R_CS].base;
184 pc = cs_base + env->eip;
185 #elif defined(TARGET_ARM)
186 flags = env->thumb | (env->vfp.vec_len << 1)
187 | (env->vfp.vec_stride << 4);
188 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
189 flags |= (1 << 6);
190 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
191 flags |= (1 << 7);
192 flags |= (env->condexec_bits << 8);
193 cs_base = 0;
194 pc = env->regs[15];
195 #elif defined(TARGET_SPARC)
196 #ifdef TARGET_SPARC64
197 // AM . Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
198 flags = ((env->pstate & PS_AM) << 2)
199 | (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
200 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
201 #else
202 // FPU enable . Supervisor
203 flags = (env->psref << 4) | env->psrs;
204 #endif
205 cs_base = env->npc;
206 pc = env->pc;
207 #elif defined(TARGET_PPC)
208 flags = env->hflags;
209 cs_base = 0;
210 pc = env->nip;
211 #elif defined(TARGET_MIPS)
212 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
213 cs_base = 0;
214 pc = env->active_tc.PC;
215 #elif defined(TARGET_M68K)
216 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
217 | (env->sr & SR_S) /* Bit 13 */
218 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
219 cs_base = 0;
220 pc = env->pc;
221 #elif defined(TARGET_SH4)
222 flags = (env->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL
223 | DELAY_SLOT_TRUE | DELAY_SLOT_CLEARME)) /* Bits 0- 3 */
224 | (env->fpscr & (FPSCR_FR | FPSCR_SZ | FPSCR_PR)) /* Bits 19-21 */
225 | (env->sr & (SR_MD | SR_RB)); /* Bits 29-30 */
226 cs_base = 0;
227 pc = env->pc;
228 #elif defined(TARGET_ALPHA)
229 flags = env->ps;
230 cs_base = 0;
231 pc = env->pc;
232 #elif defined(TARGET_CRIS)
233 flags = env->pregs[PR_CCS] & (S_FLAG | P_FLAG | U_FLAG | X_FLAG);
234 flags |= env->dslot;
235 cs_base = 0;
236 pc = env->pc;
237 #else
238 #error unsupported CPU
239 #endif
240 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
241 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
242 tb->flags != flags)) {
243 tb = tb_find_slow(pc, cs_base, flags);
244 }
245 return tb;
246 }
247
248 /* main execution loop */
249
250 int cpu_exec(CPUState *env1)
251 {
252 #define DECLARE_HOST_REGS 1
253 #include "hostregs_helper.h"
254 int ret, interrupt_request;
255 TranslationBlock *tb;
256 uint8_t *tc_ptr;
257 unsigned long next_tb;
258
259 if (cpu_halted(env1) == EXCP_HALTED)
260 return EXCP_HALTED;
261
262 cpu_single_env = env1;
263
264 /* first we save global registers */
265 #define SAVE_HOST_REGS 1
266 #include "hostregs_helper.h"
267 env = env1;
268
269 env_to_regs();
270 #if defined(TARGET_I386)
271 /* put eflags in CPU temporary format */
272 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
273 DF = 1 - (2 * ((env->eflags >> 10) & 1));
274 CC_OP = CC_OP_EFLAGS;
275 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
276 #elif defined(TARGET_SPARC)
277 #elif defined(TARGET_M68K)
278 env->cc_op = CC_OP_FLAGS;
279 env->cc_dest = env->sr & 0xf;
280 env->cc_x = (env->sr >> 4) & 1;
281 #elif defined(TARGET_ALPHA)
282 #elif defined(TARGET_ARM)
283 #elif defined(TARGET_PPC)
284 #elif defined(TARGET_MIPS)
285 #elif defined(TARGET_SH4)
286 #elif defined(TARGET_CRIS)
287 /* XXXXX */
288 #else
289 #error unsupported target CPU
290 #endif
291 env->exception_index = -1;
292
293 /* prepare setjmp context for exception handling */
294 for(;;) {
295 if (setjmp(env->jmp_env) == 0) {
296 env->current_tb = NULL;
297 /* if an exception is pending, we execute it here */
298 if (env->exception_index >= 0) {
299 if (env->exception_index >= EXCP_INTERRUPT) {
300 /* exit request from the cpu execution loop */
301 ret = env->exception_index;
302 break;
303 } else if (env->user_mode_only) {
304 /* if user mode only, we simulate a fake exception
305 which will be handled outside the cpu execution
306 loop */
307 #if defined(TARGET_I386)
308 do_interrupt_user(env->exception_index,
309 env->exception_is_int,
310 env->error_code,
311 env->exception_next_eip);
312 /* successfully delivered */
313 env->old_exception = -1;
314 #endif
315 ret = env->exception_index;
316 break;
317 } else {
318 #if defined(TARGET_I386)
319 /* simulate a real cpu exception. On i386, it can
320 trigger new exceptions, but we do not handle
321 double or triple faults yet. */
322 do_interrupt(env->exception_index,
323 env->exception_is_int,
324 env->error_code,
325 env->exception_next_eip, 0);
326 /* successfully delivered */
327 env->old_exception = -1;
328 #elif defined(TARGET_PPC)
329 do_interrupt(env);
330 #elif defined(TARGET_MIPS)
331 do_interrupt(env);
332 #elif defined(TARGET_SPARC)
333 do_interrupt(env);
334 #elif defined(TARGET_ARM)
335 do_interrupt(env);
336 #elif defined(TARGET_SH4)
337 do_interrupt(env);
338 #elif defined(TARGET_ALPHA)
339 do_interrupt(env);
340 #elif defined(TARGET_CRIS)
341 do_interrupt(env);
342 #elif defined(TARGET_M68K)
343 do_interrupt(0);
344 #endif
345 }
346 env->exception_index = -1;
347 }
348 #ifdef USE_KQEMU
349 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
350 int ret;
351 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
352 ret = kqemu_cpu_exec(env);
353 /* put eflags in CPU temporary format */
354 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
355 DF = 1 - (2 * ((env->eflags >> 10) & 1));
356 CC_OP = CC_OP_EFLAGS;
357 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
358 if (ret == 1) {
359 /* exception */
360 longjmp(env->jmp_env, 1);
361 } else if (ret == 2) {
362 /* softmmu execution needed */
363 } else {
364 if (env->interrupt_request != 0) {
365 /* hardware interrupt will be executed just after */
366 } else {
367 /* otherwise, we restart */
368 longjmp(env->jmp_env, 1);
369 }
370 }
371 }
372 #endif
373
374 next_tb = 0; /* force lookup of first TB */
375 for(;;) {
376 interrupt_request = env->interrupt_request;
377 if (unlikely(interrupt_request) &&
378 likely(!(env->singlestep_enabled & SSTEP_NOIRQ))) {
379 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
380 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
381 env->exception_index = EXCP_DEBUG;
382 cpu_loop_exit();
383 }
384 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
385 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
386 if (interrupt_request & CPU_INTERRUPT_HALT) {
387 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
388 env->halted = 1;
389 env->exception_index = EXCP_HLT;
390 cpu_loop_exit();
391 }
392 #endif
393 #if defined(TARGET_I386)
394 if (env->hflags2 & HF2_GIF_MASK) {
395 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
396 !(env->hflags & HF_SMM_MASK)) {
397 svm_check_intercept(SVM_EXIT_SMI);
398 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
399 do_smm_enter();
400 next_tb = 0;
401 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
402 !(env->hflags2 & HF2_NMI_MASK)) {
403 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
404 env->hflags2 |= HF2_NMI_MASK;
405 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
406 next_tb = 0;
407 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
408 (((env->hflags2 & HF2_VINTR_MASK) &&
409 (env->hflags2 & HF2_HIF_MASK)) ||
410 (!(env->hflags2 & HF2_VINTR_MASK) &&
411 (env->eflags & IF_MASK &&
412 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
413 int intno;
414 svm_check_intercept(SVM_EXIT_INTR);
415 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
416 intno = cpu_get_pic_interrupt(env);
417 if (loglevel & CPU_LOG_TB_IN_ASM) {
418 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
419 }
420 do_interrupt(intno, 0, 0, 0, 1);
421 /* ensure that no TB jump will be modified as
422 the program flow was changed */
423 next_tb = 0;
424 #if !defined(CONFIG_USER_ONLY)
425 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
426 (env->eflags & IF_MASK) &&
427 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
428 int intno;
429 /* FIXME: this should respect TPR */
430 svm_check_intercept(SVM_EXIT_VINTR);
431 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
432 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
433 if (loglevel & CPU_LOG_TB_IN_ASM)
434 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
435 do_interrupt(intno, 0, 0, 0, 1);
436 next_tb = 0;
437 #endif
438 }
439 }
440 #elif defined(TARGET_PPC)
441 #if 0
442 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
443 cpu_ppc_reset(env);
444 }
445 #endif
446 if (interrupt_request & CPU_INTERRUPT_HARD) {
447 ppc_hw_interrupt(env);
448 if (env->pending_interrupts == 0)
449 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
450 next_tb = 0;
451 }
452 #elif defined(TARGET_MIPS)
453 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
454 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
455 (env->CP0_Status & (1 << CP0St_IE)) &&
456 !(env->CP0_Status & (1 << CP0St_EXL)) &&
457 !(env->CP0_Status & (1 << CP0St_ERL)) &&
458 !(env->hflags & MIPS_HFLAG_DM)) {
459 /* Raise it */
460 env->exception_index = EXCP_EXT_INTERRUPT;
461 env->error_code = 0;
462 do_interrupt(env);
463 next_tb = 0;
464 }
465 #elif defined(TARGET_SPARC)
466 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
467 (env->psret != 0)) {
468 int pil = env->interrupt_index & 15;
469 int type = env->interrupt_index & 0xf0;
470
471 if (((type == TT_EXTINT) &&
472 (pil == 15 || pil > env->psrpil)) ||
473 type != TT_EXTINT) {
474 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
475 env->exception_index = env->interrupt_index;
476 do_interrupt(env);
477 env->interrupt_index = 0;
478 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
479 cpu_check_irqs(env);
480 #endif
481 next_tb = 0;
482 }
483 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
484 //do_interrupt(0, 0, 0, 0, 0);
485 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
486 }
487 #elif defined(TARGET_ARM)
488 if (interrupt_request & CPU_INTERRUPT_FIQ
489 && !(env->uncached_cpsr & CPSR_F)) {
490 env->exception_index = EXCP_FIQ;
491 do_interrupt(env);
492 next_tb = 0;
493 }
494 /* ARMv7-M interrupt return works by loading a magic value
495 into the PC. On real hardware the load causes the
496 return to occur. The qemu implementation performs the
497 jump normally, then does the exception return when the
498 CPU tries to execute code at the magic address.
499 This will cause the magic PC value to be pushed to
500 the stack if an interrupt occured at the wrong time.
501 We avoid this by disabling interrupts when
502 pc contains a magic address. */
503 if (interrupt_request & CPU_INTERRUPT_HARD
504 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
505 || !(env->uncached_cpsr & CPSR_I))) {
506 env->exception_index = EXCP_IRQ;
507 do_interrupt(env);
508 next_tb = 0;
509 }
510 #elif defined(TARGET_SH4)
511 if (interrupt_request & CPU_INTERRUPT_HARD) {
512 do_interrupt(env);
513 next_tb = 0;
514 }
515 #elif defined(TARGET_ALPHA)
516 if (interrupt_request & CPU_INTERRUPT_HARD) {
517 do_interrupt(env);
518 next_tb = 0;
519 }
520 #elif defined(TARGET_CRIS)
521 if (interrupt_request & CPU_INTERRUPT_HARD
522 && (env->pregs[PR_CCS] & I_FLAG)) {
523 env->exception_index = EXCP_IRQ;
524 do_interrupt(env);
525 next_tb = 0;
526 }
527 if (interrupt_request & CPU_INTERRUPT_NMI
528 && (env->pregs[PR_CCS] & M_FLAG)) {
529 env->exception_index = EXCP_NMI;
530 do_interrupt(env);
531 next_tb = 0;
532 }
533 #elif defined(TARGET_M68K)
534 if (interrupt_request & CPU_INTERRUPT_HARD
535 && ((env->sr & SR_I) >> SR_I_SHIFT)
536 < env->pending_level) {
537 /* Real hardware gets the interrupt vector via an
538 IACK cycle at this point. Current emulated
539 hardware doesn't rely on this, so we
540 provide/save the vector when the interrupt is
541 first signalled. */
542 env->exception_index = env->pending_vector;
543 do_interrupt(1);
544 next_tb = 0;
545 }
546 #endif
547 /* Don't use the cached interupt_request value,
548 do_interrupt may have updated the EXITTB flag. */
549 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
550 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
551 /* ensure that no TB jump will be modified as
552 the program flow was changed */
553 next_tb = 0;
554 }
555 if (interrupt_request & CPU_INTERRUPT_EXIT) {
556 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
557 env->exception_index = EXCP_INTERRUPT;
558 cpu_loop_exit();
559 }
560 }
561 #ifdef DEBUG_EXEC
562 if ((loglevel & CPU_LOG_TB_CPU)) {
563 /* restore flags in standard format */
564 regs_to_env();
565 #if defined(TARGET_I386)
566 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
567 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
568 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
569 #elif defined(TARGET_ARM)
570 cpu_dump_state(env, logfile, fprintf, 0);
571 #elif defined(TARGET_SPARC)
572 cpu_dump_state(env, logfile, fprintf, 0);
573 #elif defined(TARGET_PPC)
574 cpu_dump_state(env, logfile, fprintf, 0);
575 #elif defined(TARGET_M68K)
576 cpu_m68k_flush_flags(env, env->cc_op);
577 env->cc_op = CC_OP_FLAGS;
578 env->sr = (env->sr & 0xffe0)
579 | env->cc_dest | (env->cc_x << 4);
580 cpu_dump_state(env, logfile, fprintf, 0);
581 #elif defined(TARGET_MIPS)
582 cpu_dump_state(env, logfile, fprintf, 0);
583 #elif defined(TARGET_SH4)
584 cpu_dump_state(env, logfile, fprintf, 0);
585 #elif defined(TARGET_ALPHA)
586 cpu_dump_state(env, logfile, fprintf, 0);
587 #elif defined(TARGET_CRIS)
588 cpu_dump_state(env, logfile, fprintf, 0);
589 #else
590 #error unsupported target CPU
591 #endif
592 }
593 #endif
594 spin_lock(&tb_lock);
595 tb = tb_find_fast();
596 /* Note: we do it here to avoid a gcc bug on Mac OS X when
597 doing it in tb_find_slow */
598 if (tb_invalidated_flag) {
599 /* as some TB could have been invalidated because
600 of memory exceptions while generating the code, we
601 must recompute the hash index here */
602 next_tb = 0;
603 tb_invalidated_flag = 0;
604 }
605 #ifdef DEBUG_EXEC
606 if ((loglevel & CPU_LOG_EXEC)) {
607 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
608 (long)tb->tc_ptr, tb->pc,
609 lookup_symbol(tb->pc));
610 }
611 #endif
612 /* see if we can patch the calling TB. When the TB
613 spans two pages, we cannot safely do a direct
614 jump. */
615 {
616 if (next_tb != 0 &&
617 #ifdef USE_KQEMU
618 (env->kqemu_enabled != 2) &&
619 #endif
620 tb->page_addr[1] == -1) {
621 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
622 }
623 }
624 spin_unlock(&tb_lock);
625 env->current_tb = tb;
626 while (env->current_tb) {
627 tc_ptr = tb->tc_ptr;
628 /* execute the generated code */
629 #if defined(__sparc__) && !defined(HOST_SOLARIS)
630 #undef env
631 env = cpu_single_env;
632 #define env cpu_single_env
633 #endif
634 next_tb = tcg_qemu_tb_exec(tc_ptr);
635 env->current_tb = NULL;
636 if ((next_tb & 3) == 2) {
637 /* Instruction counter expired. */
638 int insns_left;
639 tb = (TranslationBlock *)(long)(next_tb & ~3);
640 /* Restore PC. */
641 CPU_PC_FROM_TB(env, tb);
642 insns_left = env->icount_decr.u32;
643 if (env->icount_extra && insns_left >= 0) {
644 /* Refill decrementer and continue execution. */
645 env->icount_extra += insns_left;
646 if (env->icount_extra > 0xffff) {
647 insns_left = 0xffff;
648 } else {
649 insns_left = env->icount_extra;
650 }
651 env->icount_extra -= insns_left;
652 env->icount_decr.u16.low = insns_left;
653 } else {
654 if (insns_left > 0) {
655 /* Execute remaining instructions. */
656 cpu_exec_nocache(insns_left, tb);
657 }
658 env->exception_index = EXCP_INTERRUPT;
659 next_tb = 0;
660 cpu_loop_exit();
661 }
662 }
663 }
664 /* reset soft MMU for next block (it can currently
665 only be set by a memory fault) */
666 #if defined(USE_KQEMU)
667 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
668 if (kqemu_is_ok(env) &&
669 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
670 cpu_loop_exit();
671 }
672 #endif
673 } /* for(;;) */
674 } else {
675 env_to_regs();
676 }
677 } /* for(;;) */
678
679
680 #if defined(TARGET_I386)
681 /* restore flags in standard format */
682 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
683 #elif defined(TARGET_ARM)
684 /* XXX: Save/restore host fpu exception state?. */
685 #elif defined(TARGET_SPARC)
686 #elif defined(TARGET_PPC)
687 #elif defined(TARGET_M68K)
688 cpu_m68k_flush_flags(env, env->cc_op);
689 env->cc_op = CC_OP_FLAGS;
690 env->sr = (env->sr & 0xffe0)
691 | env->cc_dest | (env->cc_x << 4);
692 #elif defined(TARGET_MIPS)
693 #elif defined(TARGET_SH4)
694 #elif defined(TARGET_ALPHA)
695 #elif defined(TARGET_CRIS)
696 /* XXXXX */
697 #else
698 #error unsupported target CPU
699 #endif
700
701 /* restore global registers */
702 #include "hostregs_helper.h"
703
704 /* fail safe : never use cpu_single_env outside cpu_exec() */
705 cpu_single_env = NULL;
706 return ret;
707 }
708
709 /* must only be called from the generated code as an exception can be
710 generated */
711 void tb_invalidate_page_range(target_ulong start, target_ulong end)
712 {
713 /* XXX: cannot enable it yet because it yields to MMU exception
714 where NIP != read address on PowerPC */
715 #if 0
716 target_ulong phys_addr;
717 phys_addr = get_phys_addr_code(env, start);
718 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
719 #endif
720 }
721
722 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
723
724 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
725 {
726 CPUX86State *saved_env;
727
728 saved_env = env;
729 env = s;
730 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
731 selector &= 0xffff;
732 cpu_x86_load_seg_cache(env, seg_reg, selector,
733 (selector << 4), 0xffff, 0);
734 } else {
735 helper_load_seg(seg_reg, selector);
736 }
737 env = saved_env;
738 }
739
740 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
741 {
742 CPUX86State *saved_env;
743
744 saved_env = env;
745 env = s;
746
747 helper_fsave(ptr, data32);
748
749 env = saved_env;
750 }
751
752 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
753 {
754 CPUX86State *saved_env;
755
756 saved_env = env;
757 env = s;
758
759 helper_frstor(ptr, data32);
760
761 env = saved_env;
762 }
763
764 #endif /* TARGET_I386 */
765
766 #if !defined(CONFIG_SOFTMMU)
767
768 #if defined(TARGET_I386)
769
770 /* 'pc' is the host PC at which the exception was raised. 'address' is
771 the effective address of the memory exception. 'is_write' is 1 if a
772 write caused the exception and otherwise 0'. 'old_set' is the
773 signal set which should be restored */
774 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
775 int is_write, sigset_t *old_set,
776 void *puc)
777 {
778 TranslationBlock *tb;
779 int ret;
780
781 if (cpu_single_env)
782 env = cpu_single_env; /* XXX: find a correct solution for multithread */
783 #if defined(DEBUG_SIGNAL)
784 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
785 pc, address, is_write, *(unsigned long *)old_set);
786 #endif
787 /* XXX: locking issue */
788 if (is_write && page_unprotect(h2g(address), pc, puc)) {
789 return 1;
790 }
791
792 /* see if it is an MMU fault */
793 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
794 if (ret < 0)
795 return 0; /* not an MMU fault */
796 if (ret == 0)
797 return 1; /* the MMU fault was handled without causing real CPU fault */
798 /* now we have a real cpu fault */
799 tb = tb_find_pc(pc);
800 if (tb) {
801 /* the PC is inside the translated code. It means that we have
802 a virtual CPU fault */
803 cpu_restore_state(tb, env, pc, puc);
804 }
805 if (ret == 1) {
806 #if 0
807 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
808 env->eip, env->cr[2], env->error_code);
809 #endif
810 /* we restore the process signal mask as the sigreturn should
811 do it (XXX: use sigsetjmp) */
812 sigprocmask(SIG_SETMASK, old_set, NULL);
813 raise_exception_err(env->exception_index, env->error_code);
814 } else {
815 /* activate soft MMU for this block */
816 env->hflags |= HF_SOFTMMU_MASK;
817 cpu_resume_from_signal(env, puc);
818 }
819 /* never comes here */
820 return 1;
821 }
822
823 #elif defined(TARGET_ARM)
824 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
825 int is_write, sigset_t *old_set,
826 void *puc)
827 {
828 TranslationBlock *tb;
829 int ret;
830
831 if (cpu_single_env)
832 env = cpu_single_env; /* XXX: find a correct solution for multithread */
833 #if defined(DEBUG_SIGNAL)
834 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
835 pc, address, is_write, *(unsigned long *)old_set);
836 #endif
837 /* XXX: locking issue */
838 if (is_write && page_unprotect(h2g(address), pc, puc)) {
839 return 1;
840 }
841 /* see if it is an MMU fault */
842 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
843 if (ret < 0)
844 return 0; /* not an MMU fault */
845 if (ret == 0)
846 return 1; /* the MMU fault was handled without causing real CPU fault */
847 /* now we have a real cpu fault */
848 tb = tb_find_pc(pc);
849 if (tb) {
850 /* the PC is inside the translated code. It means that we have
851 a virtual CPU fault */
852 cpu_restore_state(tb, env, pc, puc);
853 }
854 /* we restore the process signal mask as the sigreturn should
855 do it (XXX: use sigsetjmp) */
856 sigprocmask(SIG_SETMASK, old_set, NULL);
857 cpu_loop_exit();
858 /* never comes here */
859 return 1;
860 }
861 #elif defined(TARGET_SPARC)
862 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
863 int is_write, sigset_t *old_set,
864 void *puc)
865 {
866 TranslationBlock *tb;
867 int ret;
868
869 if (cpu_single_env)
870 env = cpu_single_env; /* XXX: find a correct solution for multithread */
871 #if defined(DEBUG_SIGNAL)
872 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
873 pc, address, is_write, *(unsigned long *)old_set);
874 #endif
875 /* XXX: locking issue */
876 if (is_write && page_unprotect(h2g(address), pc, puc)) {
877 return 1;
878 }
879 /* see if it is an MMU fault */
880 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
881 if (ret < 0)
882 return 0; /* not an MMU fault */
883 if (ret == 0)
884 return 1; /* the MMU fault was handled without causing real CPU fault */
885 /* now we have a real cpu fault */
886 tb = tb_find_pc(pc);
887 if (tb) {
888 /* the PC is inside the translated code. It means that we have
889 a virtual CPU fault */
890 cpu_restore_state(tb, env, pc, puc);
891 }
892 /* we restore the process signal mask as the sigreturn should
893 do it (XXX: use sigsetjmp) */
894 sigprocmask(SIG_SETMASK, old_set, NULL);
895 cpu_loop_exit();
896 /* never comes here */
897 return 1;
898 }
899 #elif defined (TARGET_PPC)
900 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
901 int is_write, sigset_t *old_set,
902 void *puc)
903 {
904 TranslationBlock *tb;
905 int ret;
906
907 if (cpu_single_env)
908 env = cpu_single_env; /* XXX: find a correct solution for multithread */
909 #if defined(DEBUG_SIGNAL)
910 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
911 pc, address, is_write, *(unsigned long *)old_set);
912 #endif
913 /* XXX: locking issue */
914 if (is_write && page_unprotect(h2g(address), pc, puc)) {
915 return 1;
916 }
917
918 /* see if it is an MMU fault */
919 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
920 if (ret < 0)
921 return 0; /* not an MMU fault */
922 if (ret == 0)
923 return 1; /* the MMU fault was handled without causing real CPU fault */
924
925 /* now we have a real cpu fault */
926 tb = tb_find_pc(pc);
927 if (tb) {
928 /* the PC is inside the translated code. It means that we have
929 a virtual CPU fault */
930 cpu_restore_state(tb, env, pc, puc);
931 }
932 if (ret == 1) {
933 #if 0
934 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
935 env->nip, env->error_code, tb);
936 #endif
937 /* we restore the process signal mask as the sigreturn should
938 do it (XXX: use sigsetjmp) */
939 sigprocmask(SIG_SETMASK, old_set, NULL);
940 do_raise_exception_err(env->exception_index, env->error_code);
941 } else {
942 /* activate soft MMU for this block */
943 cpu_resume_from_signal(env, puc);
944 }
945 /* never comes here */
946 return 1;
947 }
948
949 #elif defined(TARGET_M68K)
950 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
951 int is_write, sigset_t *old_set,
952 void *puc)
953 {
954 TranslationBlock *tb;
955 int ret;
956
957 if (cpu_single_env)
958 env = cpu_single_env; /* XXX: find a correct solution for multithread */
959 #if defined(DEBUG_SIGNAL)
960 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
961 pc, address, is_write, *(unsigned long *)old_set);
962 #endif
963 /* XXX: locking issue */
964 if (is_write && page_unprotect(address, pc, puc)) {
965 return 1;
966 }
967 /* see if it is an MMU fault */
968 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
969 if (ret < 0)
970 return 0; /* not an MMU fault */
971 if (ret == 0)
972 return 1; /* the MMU fault was handled without causing real CPU fault */
973 /* now we have a real cpu fault */
974 tb = tb_find_pc(pc);
975 if (tb) {
976 /* the PC is inside the translated code. It means that we have
977 a virtual CPU fault */
978 cpu_restore_state(tb, env, pc, puc);
979 }
980 /* we restore the process signal mask as the sigreturn should
981 do it (XXX: use sigsetjmp) */
982 sigprocmask(SIG_SETMASK, old_set, NULL);
983 cpu_loop_exit();
984 /* never comes here */
985 return 1;
986 }
987
988 #elif defined (TARGET_MIPS)
989 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
990 int is_write, sigset_t *old_set,
991 void *puc)
992 {
993 TranslationBlock *tb;
994 int ret;
995
996 if (cpu_single_env)
997 env = cpu_single_env; /* XXX: find a correct solution for multithread */
998 #if defined(DEBUG_SIGNAL)
999 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1000 pc, address, is_write, *(unsigned long *)old_set);
1001 #endif
1002 /* XXX: locking issue */
1003 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1004 return 1;
1005 }
1006
1007 /* see if it is an MMU fault */
1008 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1009 if (ret < 0)
1010 return 0; /* not an MMU fault */
1011 if (ret == 0)
1012 return 1; /* the MMU fault was handled without causing real CPU fault */
1013
1014 /* now we have a real cpu fault */
1015 tb = tb_find_pc(pc);
1016 if (tb) {
1017 /* the PC is inside the translated code. It means that we have
1018 a virtual CPU fault */
1019 cpu_restore_state(tb, env, pc, puc);
1020 }
1021 if (ret == 1) {
1022 #if 0
1023 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1024 env->PC, env->error_code, tb);
1025 #endif
1026 /* we restore the process signal mask as the sigreturn should
1027 do it (XXX: use sigsetjmp) */
1028 sigprocmask(SIG_SETMASK, old_set, NULL);
1029 do_raise_exception_err(env->exception_index, env->error_code);
1030 } else {
1031 /* activate soft MMU for this block */
1032 cpu_resume_from_signal(env, puc);
1033 }
1034 /* never comes here */
1035 return 1;
1036 }
1037
1038 #elif defined (TARGET_SH4)
1039 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1040 int is_write, sigset_t *old_set,
1041 void *puc)
1042 {
1043 TranslationBlock *tb;
1044 int ret;
1045
1046 if (cpu_single_env)
1047 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1048 #if defined(DEBUG_SIGNAL)
1049 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1050 pc, address, is_write, *(unsigned long *)old_set);
1051 #endif
1052 /* XXX: locking issue */
1053 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1054 return 1;
1055 }
1056
1057 /* see if it is an MMU fault */
1058 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1059 if (ret < 0)
1060 return 0; /* not an MMU fault */
1061 if (ret == 0)
1062 return 1; /* the MMU fault was handled without causing real CPU fault */
1063
1064 /* now we have a real cpu fault */
1065 tb = tb_find_pc(pc);
1066 if (tb) {
1067 /* the PC is inside the translated code. It means that we have
1068 a virtual CPU fault */
1069 cpu_restore_state(tb, env, pc, puc);
1070 }
1071 #if 0
1072 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1073 env->nip, env->error_code, tb);
1074 #endif
1075 /* we restore the process signal mask as the sigreturn should
1076 do it (XXX: use sigsetjmp) */
1077 sigprocmask(SIG_SETMASK, old_set, NULL);
1078 cpu_loop_exit();
1079 /* never comes here */
1080 return 1;
1081 }
1082
1083 #elif defined (TARGET_ALPHA)
1084 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1085 int is_write, sigset_t *old_set,
1086 void *puc)
1087 {
1088 TranslationBlock *tb;
1089 int ret;
1090
1091 if (cpu_single_env)
1092 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1093 #if defined(DEBUG_SIGNAL)
1094 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1095 pc, address, is_write, *(unsigned long *)old_set);
1096 #endif
1097 /* XXX: locking issue */
1098 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1099 return 1;
1100 }
1101
1102 /* see if it is an MMU fault */
1103 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1104 if (ret < 0)
1105 return 0; /* not an MMU fault */
1106 if (ret == 0)
1107 return 1; /* the MMU fault was handled without causing real CPU fault */
1108
1109 /* now we have a real cpu fault */
1110 tb = tb_find_pc(pc);
1111 if (tb) {
1112 /* the PC is inside the translated code. It means that we have
1113 a virtual CPU fault */
1114 cpu_restore_state(tb, env, pc, puc);
1115 }
1116 #if 0
1117 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1118 env->nip, env->error_code, tb);
1119 #endif
1120 /* we restore the process signal mask as the sigreturn should
1121 do it (XXX: use sigsetjmp) */
1122 sigprocmask(SIG_SETMASK, old_set, NULL);
1123 cpu_loop_exit();
1124 /* never comes here */
1125 return 1;
1126 }
1127 #elif defined (TARGET_CRIS)
1128 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1129 int is_write, sigset_t *old_set,
1130 void *puc)
1131 {
1132 TranslationBlock *tb;
1133 int ret;
1134
1135 if (cpu_single_env)
1136 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1137 #if defined(DEBUG_SIGNAL)
1138 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1139 pc, address, is_write, *(unsigned long *)old_set);
1140 #endif
1141 /* XXX: locking issue */
1142 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1143 return 1;
1144 }
1145
1146 /* see if it is an MMU fault */
1147 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1148 if (ret < 0)
1149 return 0; /* not an MMU fault */
1150 if (ret == 0)
1151 return 1; /* the MMU fault was handled without causing real CPU fault */
1152
1153 /* now we have a real cpu fault */
1154 tb = tb_find_pc(pc);
1155 if (tb) {
1156 /* the PC is inside the translated code. It means that we have
1157 a virtual CPU fault */
1158 cpu_restore_state(tb, env, pc, puc);
1159 }
1160 /* we restore the process signal mask as the sigreturn should
1161 do it (XXX: use sigsetjmp) */
1162 sigprocmask(SIG_SETMASK, old_set, NULL);
1163 cpu_loop_exit();
1164 /* never comes here */
1165 return 1;
1166 }
1167
1168 #else
1169 #error unsupported target CPU
1170 #endif
1171
1172 #if defined(__i386__)
1173
1174 #if defined(__APPLE__)
1175 # include <sys/ucontext.h>
1176
1177 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1178 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1179 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1180 #else
1181 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1182 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1183 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1184 #endif
1185
1186 int cpu_signal_handler(int host_signum, void *pinfo,
1187 void *puc)
1188 {
1189 siginfo_t *info = pinfo;
1190 struct ucontext *uc = puc;
1191 unsigned long pc;
1192 int trapno;
1193
1194 #ifndef REG_EIP
1195 /* for glibc 2.1 */
1196 #define REG_EIP EIP
1197 #define REG_ERR ERR
1198 #define REG_TRAPNO TRAPNO
1199 #endif
1200 pc = EIP_sig(uc);
1201 trapno = TRAP_sig(uc);
1202 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1203 trapno == 0xe ?
1204 (ERROR_sig(uc) >> 1) & 1 : 0,
1205 &uc->uc_sigmask, puc);
1206 }
1207
1208 #elif defined(__x86_64__)
1209
1210 int cpu_signal_handler(int host_signum, void *pinfo,
1211 void *puc)
1212 {
1213 siginfo_t *info = pinfo;
1214 struct ucontext *uc = puc;
1215 unsigned long pc;
1216
1217 pc = uc->uc_mcontext.gregs[REG_RIP];
1218 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1219 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1220 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1221 &uc->uc_sigmask, puc);
1222 }
1223
1224 #elif defined(__powerpc__)
1225
1226 /***********************************************************************
1227 * signal context platform-specific definitions
1228 * From Wine
1229 */
1230 #ifdef linux
1231 /* All Registers access - only for local access */
1232 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1233 /* Gpr Registers access */
1234 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1235 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1236 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1237 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1238 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1239 # define LR_sig(context) REG_sig(link, context) /* Link register */
1240 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1241 /* Float Registers access */
1242 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1243 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1244 /* Exception Registers access */
1245 # define DAR_sig(context) REG_sig(dar, context)
1246 # define DSISR_sig(context) REG_sig(dsisr, context)
1247 # define TRAP_sig(context) REG_sig(trap, context)
1248 #endif /* linux */
1249
1250 #ifdef __APPLE__
1251 # include <sys/ucontext.h>
1252 typedef struct ucontext SIGCONTEXT;
1253 /* All Registers access - only for local access */
1254 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1255 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1256 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1257 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1258 /* Gpr Registers access */
1259 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1260 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1261 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1262 # define CTR_sig(context) REG_sig(ctr, context)
1263 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1264 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1265 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1266 /* Float Registers access */
1267 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1268 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1269 /* Exception Registers access */
1270 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1271 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1272 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1273 #endif /* __APPLE__ */
1274
1275 int cpu_signal_handler(int host_signum, void *pinfo,
1276 void *puc)
1277 {
1278 siginfo_t *info = pinfo;
1279 struct ucontext *uc = puc;
1280 unsigned long pc;
1281 int is_write;
1282
1283 pc = IAR_sig(uc);
1284 is_write = 0;
1285 #if 0
1286 /* ppc 4xx case */
1287 if (DSISR_sig(uc) & 0x00800000)
1288 is_write = 1;
1289 #else
1290 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1291 is_write = 1;
1292 #endif
1293 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1294 is_write, &uc->uc_sigmask, puc);
1295 }
1296
1297 #elif defined(__alpha__)
1298
1299 int cpu_signal_handler(int host_signum, void *pinfo,
1300 void *puc)
1301 {
1302 siginfo_t *info = pinfo;
1303 struct ucontext *uc = puc;
1304 uint32_t *pc = uc->uc_mcontext.sc_pc;
1305 uint32_t insn = *pc;
1306 int is_write = 0;
1307
1308 /* XXX: need kernel patch to get write flag faster */
1309 switch (insn >> 26) {
1310 case 0x0d: // stw
1311 case 0x0e: // stb
1312 case 0x0f: // stq_u
1313 case 0x24: // stf
1314 case 0x25: // stg
1315 case 0x26: // sts
1316 case 0x27: // stt
1317 case 0x2c: // stl
1318 case 0x2d: // stq
1319 case 0x2e: // stl_c
1320 case 0x2f: // stq_c
1321 is_write = 1;
1322 }
1323
1324 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1325 is_write, &uc->uc_sigmask, puc);
1326 }
1327 #elif defined(__sparc__)
1328
1329 int cpu_signal_handler(int host_signum, void *pinfo,
1330 void *puc)
1331 {
1332 siginfo_t *info = pinfo;
1333 int is_write;
1334 uint32_t insn;
1335 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1336 uint32_t *regs = (uint32_t *)(info + 1);
1337 void *sigmask = (regs + 20);
1338 /* XXX: is there a standard glibc define ? */
1339 unsigned long pc = regs[1];
1340 #else
1341 #ifdef __linux__
1342 struct sigcontext *sc = puc;
1343 unsigned long pc = sc->sigc_regs.tpc;
1344 void *sigmask = (void *)sc->sigc_mask;
1345 #elif defined(__OpenBSD__)
1346 struct sigcontext *uc = puc;
1347 unsigned long pc = uc->sc_pc;
1348 void *sigmask = (void *)(long)uc->sc_mask;
1349 #endif
1350 #endif
1351
1352 /* XXX: need kernel patch to get write flag faster */
1353 is_write = 0;
1354 insn = *(uint32_t *)pc;
1355 if ((insn >> 30) == 3) {
1356 switch((insn >> 19) & 0x3f) {
1357 case 0x05: // stb
1358 case 0x06: // sth
1359 case 0x04: // st
1360 case 0x07: // std
1361 case 0x24: // stf
1362 case 0x27: // stdf
1363 case 0x25: // stfsr
1364 is_write = 1;
1365 break;
1366 }
1367 }
1368 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1369 is_write, sigmask, NULL);
1370 }
1371
1372 #elif defined(__arm__)
1373
1374 int cpu_signal_handler(int host_signum, void *pinfo,
1375 void *puc)
1376 {
1377 siginfo_t *info = pinfo;
1378 struct ucontext *uc = puc;
1379 unsigned long pc;
1380 int is_write;
1381
1382 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1383 pc = uc->uc_mcontext.gregs[R15];
1384 #else
1385 pc = uc->uc_mcontext.arm_pc;
1386 #endif
1387 /* XXX: compute is_write */
1388 is_write = 0;
1389 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1390 is_write,
1391 &uc->uc_sigmask, puc);
1392 }
1393
1394 #elif defined(__mc68000)
1395
1396 int cpu_signal_handler(int host_signum, void *pinfo,
1397 void *puc)
1398 {
1399 siginfo_t *info = pinfo;
1400 struct ucontext *uc = puc;
1401 unsigned long pc;
1402 int is_write;
1403
1404 pc = uc->uc_mcontext.gregs[16];
1405 /* XXX: compute is_write */
1406 is_write = 0;
1407 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1408 is_write,
1409 &uc->uc_sigmask, puc);
1410 }
1411
1412 #elif defined(__ia64)
1413
1414 #ifndef __ISR_VALID
1415 /* This ought to be in <bits/siginfo.h>... */
1416 # define __ISR_VALID 1
1417 #endif
1418
1419 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1420 {
1421 siginfo_t *info = pinfo;
1422 struct ucontext *uc = puc;
1423 unsigned long ip;
1424 int is_write = 0;
1425
1426 ip = uc->uc_mcontext.sc_ip;
1427 switch (host_signum) {
1428 case SIGILL:
1429 case SIGFPE:
1430 case SIGSEGV:
1431 case SIGBUS:
1432 case SIGTRAP:
1433 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1434 /* ISR.W (write-access) is bit 33: */
1435 is_write = (info->si_isr >> 33) & 1;
1436 break;
1437
1438 default:
1439 break;
1440 }
1441 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1442 is_write,
1443 &uc->uc_sigmask, puc);
1444 }
1445
1446 #elif defined(__s390__)
1447
1448 int cpu_signal_handler(int host_signum, void *pinfo,
1449 void *puc)
1450 {
1451 siginfo_t *info = pinfo;
1452 struct ucontext *uc = puc;
1453 unsigned long pc;
1454 int is_write;
1455
1456 pc = uc->uc_mcontext.psw.addr;
1457 /* XXX: compute is_write */
1458 is_write = 0;
1459 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1460 is_write, &uc->uc_sigmask, puc);
1461 }
1462
1463 #elif defined(__mips__)
1464
1465 int cpu_signal_handler(int host_signum, void *pinfo,
1466 void *puc)
1467 {
1468 siginfo_t *info = pinfo;
1469 struct ucontext *uc = puc;
1470 greg_t pc = uc->uc_mcontext.pc;
1471 int is_write;
1472
1473 /* XXX: compute is_write */
1474 is_write = 0;
1475 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1476 is_write, &uc->uc_sigmask, puc);
1477 }
1478
1479 #elif defined(__hppa__)
1480
1481 int cpu_signal_handler(int host_signum, void *pinfo,
1482 void *puc)
1483 {
1484 struct siginfo *info = pinfo;
1485 struct ucontext *uc = puc;
1486 unsigned long pc;
1487 int is_write;
1488
1489 pc = uc->uc_mcontext.sc_iaoq[0];
1490 /* FIXME: compute is_write */
1491 is_write = 0;
1492 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1493 is_write,
1494 &uc->uc_sigmask, puc);
1495 }
1496
1497 #else
1498
1499 #error host CPU specific signal handler needed
1500
1501 #endif
1502
1503 #endif /* !defined(CONFIG_SOFTMMU) */