]> git.proxmox.com Git - qemu.git/blob - cpu-exec.c
CRIS: Add the P flag to the tb dependent flags.
[qemu.git] / cpu-exec.c
1 /*
2 * i386 emulator main execution loop
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include "config.h"
21 #define CPU_NO_GLOBAL_REGS
22 #include "exec.h"
23 #include "disas.h"
24 #include "tcg.h"
25
26 #if !defined(CONFIG_SOFTMMU)
27 #undef EAX
28 #undef ECX
29 #undef EDX
30 #undef EBX
31 #undef ESP
32 #undef EBP
33 #undef ESI
34 #undef EDI
35 #undef EIP
36 #include <signal.h>
37 #include <sys/ucontext.h>
38 #endif
39
40 #if defined(__sparc__) && !defined(HOST_SOLARIS)
41 // Work around ugly bugs in glibc that mangle global register contents
42 #undef env
43 #define env cpu_single_env
44 #endif
45
46 int tb_invalidated_flag;
47 static unsigned long next_tb;
48
49 //#define DEBUG_EXEC
50 //#define DEBUG_SIGNAL
51
52 void cpu_loop_exit(void)
53 {
54 /* NOTE: the register at this point must be saved by hand because
55 longjmp restore them */
56 regs_to_env();
57 longjmp(env->jmp_env, 1);
58 }
59
60 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
61 #define reg_T2
62 #endif
63
64 /* exit the current TB from a signal handler. The host registers are
65 restored in a state compatible with the CPU emulator
66 */
67 void cpu_resume_from_signal(CPUState *env1, void *puc)
68 {
69 #if !defined(CONFIG_SOFTMMU)
70 struct ucontext *uc = puc;
71 #endif
72
73 env = env1;
74
75 /* XXX: restore cpu registers saved in host registers */
76
77 #if !defined(CONFIG_SOFTMMU)
78 if (puc) {
79 /* XXX: use siglongjmp ? */
80 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
81 }
82 #endif
83 longjmp(env->jmp_env, 1);
84 }
85
86 static TranslationBlock *tb_find_slow(target_ulong pc,
87 target_ulong cs_base,
88 uint64_t flags)
89 {
90 TranslationBlock *tb, **ptb1;
91 int code_gen_size;
92 unsigned int h;
93 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
94 uint8_t *tc_ptr;
95
96 spin_lock(&tb_lock);
97
98 tb_invalidated_flag = 0;
99
100 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
101
102 /* find translated block using physical mappings */
103 phys_pc = get_phys_addr_code(env, pc);
104 phys_page1 = phys_pc & TARGET_PAGE_MASK;
105 phys_page2 = -1;
106 h = tb_phys_hash_func(phys_pc);
107 ptb1 = &tb_phys_hash[h];
108 for(;;) {
109 tb = *ptb1;
110 if (!tb)
111 goto not_found;
112 if (tb->pc == pc &&
113 tb->page_addr[0] == phys_page1 &&
114 tb->cs_base == cs_base &&
115 tb->flags == flags) {
116 /* check next page if needed */
117 if (tb->page_addr[1] != -1) {
118 virt_page2 = (pc & TARGET_PAGE_MASK) +
119 TARGET_PAGE_SIZE;
120 phys_page2 = get_phys_addr_code(env, virt_page2);
121 if (tb->page_addr[1] == phys_page2)
122 goto found;
123 } else {
124 goto found;
125 }
126 }
127 ptb1 = &tb->phys_hash_next;
128 }
129 not_found:
130 /* if no translated code available, then translate it now */
131 tb = tb_alloc(pc);
132 if (!tb) {
133 /* flush must be done */
134 tb_flush(env);
135 /* cannot fail at this point */
136 tb = tb_alloc(pc);
137 /* don't forget to invalidate previous TB info */
138 tb_invalidated_flag = 1;
139 }
140 tc_ptr = code_gen_ptr;
141 tb->tc_ptr = tc_ptr;
142 tb->cs_base = cs_base;
143 tb->flags = flags;
144 cpu_gen_code(env, tb, &code_gen_size);
145 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
146
147 /* check next page if needed */
148 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
149 phys_page2 = -1;
150 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
151 phys_page2 = get_phys_addr_code(env, virt_page2);
152 }
153 tb_link_phys(tb, phys_pc, phys_page2);
154
155 found:
156 /* we add the TB in the virtual pc hash table */
157 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
158 spin_unlock(&tb_lock);
159 return tb;
160 }
161
162 static inline TranslationBlock *tb_find_fast(void)
163 {
164 TranslationBlock *tb;
165 target_ulong cs_base, pc;
166 uint64_t flags;
167
168 /* we record a subset of the CPU state. It will
169 always be the same before a given translated block
170 is executed. */
171 #if defined(TARGET_I386)
172 flags = env->hflags;
173 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
174 cs_base = env->segs[R_CS].base;
175 pc = cs_base + env->eip;
176 #elif defined(TARGET_ARM)
177 flags = env->thumb | (env->vfp.vec_len << 1)
178 | (env->vfp.vec_stride << 4);
179 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
180 flags |= (1 << 6);
181 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
182 flags |= (1 << 7);
183 flags |= (env->condexec_bits << 8);
184 cs_base = 0;
185 pc = env->regs[15];
186 #elif defined(TARGET_SPARC)
187 #ifdef TARGET_SPARC64
188 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
189 flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
190 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
191 #else
192 // FPU enable . Supervisor
193 flags = (env->psref << 4) | env->psrs;
194 #endif
195 cs_base = env->npc;
196 pc = env->pc;
197 #elif defined(TARGET_PPC)
198 flags = env->hflags;
199 cs_base = 0;
200 pc = env->nip;
201 #elif defined(TARGET_MIPS)
202 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
203 cs_base = 0;
204 pc = env->PC[env->current_tc];
205 #elif defined(TARGET_M68K)
206 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
207 | (env->sr & SR_S) /* Bit 13 */
208 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
209 cs_base = 0;
210 pc = env->pc;
211 #elif defined(TARGET_SH4)
212 flags = env->flags;
213 cs_base = 0;
214 pc = env->pc;
215 #elif defined(TARGET_ALPHA)
216 flags = env->ps;
217 cs_base = 0;
218 pc = env->pc;
219 #elif defined(TARGET_CRIS)
220 flags = env->pregs[PR_CCS] & (P_FLAG | U_FLAG | X_FLAG);
221 flags |= env->dslot;
222 cs_base = 0;
223 pc = env->pc;
224 #else
225 #error unsupported CPU
226 #endif
227 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
228 if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
229 tb->flags != flags, 0)) {
230 tb = tb_find_slow(pc, cs_base, flags);
231 /* Note: we do it here to avoid a gcc bug on Mac OS X when
232 doing it in tb_find_slow */
233 if (tb_invalidated_flag) {
234 /* as some TB could have been invalidated because
235 of memory exceptions while generating the code, we
236 must recompute the hash index here */
237 next_tb = 0;
238 }
239 }
240 return tb;
241 }
242
243 /* main execution loop */
244
245 int cpu_exec(CPUState *env1)
246 {
247 #define DECLARE_HOST_REGS 1
248 #include "hostregs_helper.h"
249 int ret, interrupt_request;
250 TranslationBlock *tb;
251 uint8_t *tc_ptr;
252
253 if (cpu_halted(env1) == EXCP_HALTED)
254 return EXCP_HALTED;
255
256 cpu_single_env = env1;
257
258 /* first we save global registers */
259 #define SAVE_HOST_REGS 1
260 #include "hostregs_helper.h"
261 env = env1;
262
263 env_to_regs();
264 #if defined(TARGET_I386)
265 /* put eflags in CPU temporary format */
266 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
267 DF = 1 - (2 * ((env->eflags >> 10) & 1));
268 CC_OP = CC_OP_EFLAGS;
269 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
270 #elif defined(TARGET_SPARC)
271 #elif defined(TARGET_M68K)
272 env->cc_op = CC_OP_FLAGS;
273 env->cc_dest = env->sr & 0xf;
274 env->cc_x = (env->sr >> 4) & 1;
275 #elif defined(TARGET_ALPHA)
276 #elif defined(TARGET_ARM)
277 #elif defined(TARGET_PPC)
278 #elif defined(TARGET_MIPS)
279 #elif defined(TARGET_SH4)
280 #elif defined(TARGET_CRIS)
281 /* XXXXX */
282 #else
283 #error unsupported target CPU
284 #endif
285 env->exception_index = -1;
286
287 /* prepare setjmp context for exception handling */
288 for(;;) {
289 if (setjmp(env->jmp_env) == 0) {
290 env->current_tb = NULL;
291 /* if an exception is pending, we execute it here */
292 if (env->exception_index >= 0) {
293 if (env->exception_index >= EXCP_INTERRUPT) {
294 /* exit request from the cpu execution loop */
295 ret = env->exception_index;
296 break;
297 } else if (env->user_mode_only) {
298 /* if user mode only, we simulate a fake exception
299 which will be handled outside the cpu execution
300 loop */
301 #if defined(TARGET_I386)
302 do_interrupt_user(env->exception_index,
303 env->exception_is_int,
304 env->error_code,
305 env->exception_next_eip);
306 /* successfully delivered */
307 env->old_exception = -1;
308 #endif
309 ret = env->exception_index;
310 break;
311 } else {
312 #if defined(TARGET_I386)
313 /* simulate a real cpu exception. On i386, it can
314 trigger new exceptions, but we do not handle
315 double or triple faults yet. */
316 do_interrupt(env->exception_index,
317 env->exception_is_int,
318 env->error_code,
319 env->exception_next_eip, 0);
320 /* successfully delivered */
321 env->old_exception = -1;
322 #elif defined(TARGET_PPC)
323 do_interrupt(env);
324 #elif defined(TARGET_MIPS)
325 do_interrupt(env);
326 #elif defined(TARGET_SPARC)
327 do_interrupt(env);
328 #elif defined(TARGET_ARM)
329 do_interrupt(env);
330 #elif defined(TARGET_SH4)
331 do_interrupt(env);
332 #elif defined(TARGET_ALPHA)
333 do_interrupt(env);
334 #elif defined(TARGET_CRIS)
335 do_interrupt(env);
336 #elif defined(TARGET_M68K)
337 do_interrupt(0);
338 #endif
339 }
340 env->exception_index = -1;
341 }
342 #ifdef USE_KQEMU
343 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
344 int ret;
345 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
346 ret = kqemu_cpu_exec(env);
347 /* put eflags in CPU temporary format */
348 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
349 DF = 1 - (2 * ((env->eflags >> 10) & 1));
350 CC_OP = CC_OP_EFLAGS;
351 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
352 if (ret == 1) {
353 /* exception */
354 longjmp(env->jmp_env, 1);
355 } else if (ret == 2) {
356 /* softmmu execution needed */
357 } else {
358 if (env->interrupt_request != 0) {
359 /* hardware interrupt will be executed just after */
360 } else {
361 /* otherwise, we restart */
362 longjmp(env->jmp_env, 1);
363 }
364 }
365 }
366 #endif
367
368 next_tb = 0; /* force lookup of first TB */
369 for(;;) {
370 interrupt_request = env->interrupt_request;
371 if (__builtin_expect(interrupt_request, 0) &&
372 likely(!(env->singlestep_enabled & SSTEP_NOIRQ))) {
373 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
374 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
375 env->exception_index = EXCP_DEBUG;
376 cpu_loop_exit();
377 }
378 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
379 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
380 if (interrupt_request & CPU_INTERRUPT_HALT) {
381 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
382 env->halted = 1;
383 env->exception_index = EXCP_HLT;
384 cpu_loop_exit();
385 }
386 #endif
387 #if defined(TARGET_I386)
388 if (env->hflags2 & HF2_GIF_MASK) {
389 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
390 !(env->hflags & HF_SMM_MASK)) {
391 svm_check_intercept(SVM_EXIT_SMI);
392 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
393 do_smm_enter();
394 next_tb = 0;
395 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
396 !(env->hflags2 & HF2_NMI_MASK)) {
397 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
398 env->hflags2 |= HF2_NMI_MASK;
399 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
400 next_tb = 0;
401 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
402 (((env->hflags2 & HF2_VINTR_MASK) &&
403 (env->hflags2 & HF2_HIF_MASK)) ||
404 (!(env->hflags2 & HF2_VINTR_MASK) &&
405 (env->eflags & IF_MASK &&
406 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
407 int intno;
408 svm_check_intercept(SVM_EXIT_INTR);
409 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
410 intno = cpu_get_pic_interrupt(env);
411 if (loglevel & CPU_LOG_TB_IN_ASM) {
412 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
413 }
414 do_interrupt(intno, 0, 0, 0, 1);
415 /* ensure that no TB jump will be modified as
416 the program flow was changed */
417 next_tb = 0;
418 #if !defined(CONFIG_USER_ONLY)
419 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
420 (env->eflags & IF_MASK) &&
421 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
422 int intno;
423 /* FIXME: this should respect TPR */
424 svm_check_intercept(SVM_EXIT_VINTR);
425 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
426 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
427 if (loglevel & CPU_LOG_TB_IN_ASM)
428 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
429 do_interrupt(intno, 0, 0, 0, 1);
430 next_tb = 0;
431 #endif
432 }
433 }
434 #elif defined(TARGET_PPC)
435 #if 0
436 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
437 cpu_ppc_reset(env);
438 }
439 #endif
440 if (interrupt_request & CPU_INTERRUPT_HARD) {
441 ppc_hw_interrupt(env);
442 if (env->pending_interrupts == 0)
443 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
444 next_tb = 0;
445 }
446 #elif defined(TARGET_MIPS)
447 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
448 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
449 (env->CP0_Status & (1 << CP0St_IE)) &&
450 !(env->CP0_Status & (1 << CP0St_EXL)) &&
451 !(env->CP0_Status & (1 << CP0St_ERL)) &&
452 !(env->hflags & MIPS_HFLAG_DM)) {
453 /* Raise it */
454 env->exception_index = EXCP_EXT_INTERRUPT;
455 env->error_code = 0;
456 do_interrupt(env);
457 next_tb = 0;
458 }
459 #elif defined(TARGET_SPARC)
460 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
461 (env->psret != 0)) {
462 int pil = env->interrupt_index & 15;
463 int type = env->interrupt_index & 0xf0;
464
465 if (((type == TT_EXTINT) &&
466 (pil == 15 || pil > env->psrpil)) ||
467 type != TT_EXTINT) {
468 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
469 env->exception_index = env->interrupt_index;
470 do_interrupt(env);
471 env->interrupt_index = 0;
472 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
473 cpu_check_irqs(env);
474 #endif
475 next_tb = 0;
476 }
477 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
478 //do_interrupt(0, 0, 0, 0, 0);
479 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
480 }
481 #elif defined(TARGET_ARM)
482 if (interrupt_request & CPU_INTERRUPT_FIQ
483 && !(env->uncached_cpsr & CPSR_F)) {
484 env->exception_index = EXCP_FIQ;
485 do_interrupt(env);
486 next_tb = 0;
487 }
488 /* ARMv7-M interrupt return works by loading a magic value
489 into the PC. On real hardware the load causes the
490 return to occur. The qemu implementation performs the
491 jump normally, then does the exception return when the
492 CPU tries to execute code at the magic address.
493 This will cause the magic PC value to be pushed to
494 the stack if an interrupt occured at the wrong time.
495 We avoid this by disabling interrupts when
496 pc contains a magic address. */
497 if (interrupt_request & CPU_INTERRUPT_HARD
498 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
499 || !(env->uncached_cpsr & CPSR_I))) {
500 env->exception_index = EXCP_IRQ;
501 do_interrupt(env);
502 next_tb = 0;
503 }
504 #elif defined(TARGET_SH4)
505 if (interrupt_request & CPU_INTERRUPT_HARD) {
506 do_interrupt(env);
507 next_tb = 0;
508 }
509 #elif defined(TARGET_ALPHA)
510 if (interrupt_request & CPU_INTERRUPT_HARD) {
511 do_interrupt(env);
512 next_tb = 0;
513 }
514 #elif defined(TARGET_CRIS)
515 if (interrupt_request & CPU_INTERRUPT_HARD) {
516 do_interrupt(env);
517 next_tb = 0;
518 }
519 #elif defined(TARGET_M68K)
520 if (interrupt_request & CPU_INTERRUPT_HARD
521 && ((env->sr & SR_I) >> SR_I_SHIFT)
522 < env->pending_level) {
523 /* Real hardware gets the interrupt vector via an
524 IACK cycle at this point. Current emulated
525 hardware doesn't rely on this, so we
526 provide/save the vector when the interrupt is
527 first signalled. */
528 env->exception_index = env->pending_vector;
529 do_interrupt(1);
530 next_tb = 0;
531 }
532 #endif
533 /* Don't use the cached interupt_request value,
534 do_interrupt may have updated the EXITTB flag. */
535 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
536 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
537 /* ensure that no TB jump will be modified as
538 the program flow was changed */
539 next_tb = 0;
540 }
541 if (interrupt_request & CPU_INTERRUPT_EXIT) {
542 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
543 env->exception_index = EXCP_INTERRUPT;
544 cpu_loop_exit();
545 }
546 }
547 #ifdef DEBUG_EXEC
548 if ((loglevel & CPU_LOG_TB_CPU)) {
549 /* restore flags in standard format */
550 regs_to_env();
551 #if defined(TARGET_I386)
552 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
553 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
554 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
555 #elif defined(TARGET_ARM)
556 cpu_dump_state(env, logfile, fprintf, 0);
557 #elif defined(TARGET_SPARC)
558 cpu_dump_state(env, logfile, fprintf, 0);
559 #elif defined(TARGET_PPC)
560 cpu_dump_state(env, logfile, fprintf, 0);
561 #elif defined(TARGET_M68K)
562 cpu_m68k_flush_flags(env, env->cc_op);
563 env->cc_op = CC_OP_FLAGS;
564 env->sr = (env->sr & 0xffe0)
565 | env->cc_dest | (env->cc_x << 4);
566 cpu_dump_state(env, logfile, fprintf, 0);
567 #elif defined(TARGET_MIPS)
568 cpu_dump_state(env, logfile, fprintf, 0);
569 #elif defined(TARGET_SH4)
570 cpu_dump_state(env, logfile, fprintf, 0);
571 #elif defined(TARGET_ALPHA)
572 cpu_dump_state(env, logfile, fprintf, 0);
573 #elif defined(TARGET_CRIS)
574 cpu_dump_state(env, logfile, fprintf, 0);
575 #else
576 #error unsupported target CPU
577 #endif
578 }
579 #endif
580 tb = tb_find_fast();
581 #ifdef DEBUG_EXEC
582 if ((loglevel & CPU_LOG_EXEC)) {
583 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
584 (long)tb->tc_ptr, tb->pc,
585 lookup_symbol(tb->pc));
586 }
587 #endif
588 /* see if we can patch the calling TB. When the TB
589 spans two pages, we cannot safely do a direct
590 jump. */
591 {
592 if (next_tb != 0 &&
593 #ifdef USE_KQEMU
594 (env->kqemu_enabled != 2) &&
595 #endif
596 tb->page_addr[1] == -1) {
597 spin_lock(&tb_lock);
598 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
599 spin_unlock(&tb_lock);
600 }
601 }
602 tc_ptr = tb->tc_ptr;
603 env->current_tb = tb;
604 /* execute the generated code */
605 #if defined(__sparc__) && !defined(HOST_SOLARIS)
606 #undef env
607 env = cpu_single_env;
608 #define env cpu_single_env
609 #endif
610 next_tb = tcg_qemu_tb_exec(tc_ptr);
611 env->current_tb = NULL;
612 /* reset soft MMU for next block (it can currently
613 only be set by a memory fault) */
614 #if defined(USE_KQEMU)
615 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
616 if (kqemu_is_ok(env) &&
617 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
618 cpu_loop_exit();
619 }
620 #endif
621 } /* for(;;) */
622 } else {
623 env_to_regs();
624 }
625 } /* for(;;) */
626
627
628 #if defined(TARGET_I386)
629 /* restore flags in standard format */
630 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
631 #elif defined(TARGET_ARM)
632 /* XXX: Save/restore host fpu exception state?. */
633 #elif defined(TARGET_SPARC)
634 #elif defined(TARGET_PPC)
635 #elif defined(TARGET_M68K)
636 cpu_m68k_flush_flags(env, env->cc_op);
637 env->cc_op = CC_OP_FLAGS;
638 env->sr = (env->sr & 0xffe0)
639 | env->cc_dest | (env->cc_x << 4);
640 #elif defined(TARGET_MIPS)
641 #elif defined(TARGET_SH4)
642 #elif defined(TARGET_ALPHA)
643 #elif defined(TARGET_CRIS)
644 /* XXXXX */
645 #else
646 #error unsupported target CPU
647 #endif
648
649 /* restore global registers */
650 #include "hostregs_helper.h"
651
652 /* fail safe : never use cpu_single_env outside cpu_exec() */
653 cpu_single_env = NULL;
654 return ret;
655 }
656
657 /* must only be called from the generated code as an exception can be
658 generated */
659 void tb_invalidate_page_range(target_ulong start, target_ulong end)
660 {
661 /* XXX: cannot enable it yet because it yields to MMU exception
662 where NIP != read address on PowerPC */
663 #if 0
664 target_ulong phys_addr;
665 phys_addr = get_phys_addr_code(env, start);
666 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
667 #endif
668 }
669
670 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
671
672 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
673 {
674 CPUX86State *saved_env;
675
676 saved_env = env;
677 env = s;
678 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
679 selector &= 0xffff;
680 cpu_x86_load_seg_cache(env, seg_reg, selector,
681 (selector << 4), 0xffff, 0);
682 } else {
683 helper_load_seg(seg_reg, selector);
684 }
685 env = saved_env;
686 }
687
688 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
689 {
690 CPUX86State *saved_env;
691
692 saved_env = env;
693 env = s;
694
695 helper_fsave(ptr, data32);
696
697 env = saved_env;
698 }
699
700 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
701 {
702 CPUX86State *saved_env;
703
704 saved_env = env;
705 env = s;
706
707 helper_frstor(ptr, data32);
708
709 env = saved_env;
710 }
711
712 #endif /* TARGET_I386 */
713
714 #if !defined(CONFIG_SOFTMMU)
715
716 #if defined(TARGET_I386)
717
718 /* 'pc' is the host PC at which the exception was raised. 'address' is
719 the effective address of the memory exception. 'is_write' is 1 if a
720 write caused the exception and otherwise 0'. 'old_set' is the
721 signal set which should be restored */
722 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
723 int is_write, sigset_t *old_set,
724 void *puc)
725 {
726 TranslationBlock *tb;
727 int ret;
728
729 if (cpu_single_env)
730 env = cpu_single_env; /* XXX: find a correct solution for multithread */
731 #if defined(DEBUG_SIGNAL)
732 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
733 pc, address, is_write, *(unsigned long *)old_set);
734 #endif
735 /* XXX: locking issue */
736 if (is_write && page_unprotect(h2g(address), pc, puc)) {
737 return 1;
738 }
739
740 /* see if it is an MMU fault */
741 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
742 if (ret < 0)
743 return 0; /* not an MMU fault */
744 if (ret == 0)
745 return 1; /* the MMU fault was handled without causing real CPU fault */
746 /* now we have a real cpu fault */
747 tb = tb_find_pc(pc);
748 if (tb) {
749 /* the PC is inside the translated code. It means that we have
750 a virtual CPU fault */
751 cpu_restore_state(tb, env, pc, puc);
752 }
753 if (ret == 1) {
754 #if 0
755 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
756 env->eip, env->cr[2], env->error_code);
757 #endif
758 /* we restore the process signal mask as the sigreturn should
759 do it (XXX: use sigsetjmp) */
760 sigprocmask(SIG_SETMASK, old_set, NULL);
761 raise_exception_err(env->exception_index, env->error_code);
762 } else {
763 /* activate soft MMU for this block */
764 env->hflags |= HF_SOFTMMU_MASK;
765 cpu_resume_from_signal(env, puc);
766 }
767 /* never comes here */
768 return 1;
769 }
770
771 #elif defined(TARGET_ARM)
772 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
773 int is_write, sigset_t *old_set,
774 void *puc)
775 {
776 TranslationBlock *tb;
777 int ret;
778
779 if (cpu_single_env)
780 env = cpu_single_env; /* XXX: find a correct solution for multithread */
781 #if defined(DEBUG_SIGNAL)
782 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
783 pc, address, is_write, *(unsigned long *)old_set);
784 #endif
785 /* XXX: locking issue */
786 if (is_write && page_unprotect(h2g(address), pc, puc)) {
787 return 1;
788 }
789 /* see if it is an MMU fault */
790 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
791 if (ret < 0)
792 return 0; /* not an MMU fault */
793 if (ret == 0)
794 return 1; /* the MMU fault was handled without causing real CPU fault */
795 /* now we have a real cpu fault */
796 tb = tb_find_pc(pc);
797 if (tb) {
798 /* the PC is inside the translated code. It means that we have
799 a virtual CPU fault */
800 cpu_restore_state(tb, env, pc, puc);
801 }
802 /* we restore the process signal mask as the sigreturn should
803 do it (XXX: use sigsetjmp) */
804 sigprocmask(SIG_SETMASK, old_set, NULL);
805 cpu_loop_exit();
806 /* never comes here */
807 return 1;
808 }
809 #elif defined(TARGET_SPARC)
810 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
811 int is_write, sigset_t *old_set,
812 void *puc)
813 {
814 TranslationBlock *tb;
815 int ret;
816
817 if (cpu_single_env)
818 env = cpu_single_env; /* XXX: find a correct solution for multithread */
819 #if defined(DEBUG_SIGNAL)
820 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
821 pc, address, is_write, *(unsigned long *)old_set);
822 #endif
823 /* XXX: locking issue */
824 if (is_write && page_unprotect(h2g(address), pc, puc)) {
825 return 1;
826 }
827 /* see if it is an MMU fault */
828 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
829 if (ret < 0)
830 return 0; /* not an MMU fault */
831 if (ret == 0)
832 return 1; /* the MMU fault was handled without causing real CPU fault */
833 /* now we have a real cpu fault */
834 tb = tb_find_pc(pc);
835 if (tb) {
836 /* the PC is inside the translated code. It means that we have
837 a virtual CPU fault */
838 cpu_restore_state(tb, env, pc, puc);
839 }
840 /* we restore the process signal mask as the sigreturn should
841 do it (XXX: use sigsetjmp) */
842 sigprocmask(SIG_SETMASK, old_set, NULL);
843 cpu_loop_exit();
844 /* never comes here */
845 return 1;
846 }
847 #elif defined (TARGET_PPC)
848 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
849 int is_write, sigset_t *old_set,
850 void *puc)
851 {
852 TranslationBlock *tb;
853 int ret;
854
855 if (cpu_single_env)
856 env = cpu_single_env; /* XXX: find a correct solution for multithread */
857 #if defined(DEBUG_SIGNAL)
858 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
859 pc, address, is_write, *(unsigned long *)old_set);
860 #endif
861 /* XXX: locking issue */
862 if (is_write && page_unprotect(h2g(address), pc, puc)) {
863 return 1;
864 }
865
866 /* see if it is an MMU fault */
867 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
868 if (ret < 0)
869 return 0; /* not an MMU fault */
870 if (ret == 0)
871 return 1; /* the MMU fault was handled without causing real CPU fault */
872
873 /* now we have a real cpu fault */
874 tb = tb_find_pc(pc);
875 if (tb) {
876 /* the PC is inside the translated code. It means that we have
877 a virtual CPU fault */
878 cpu_restore_state(tb, env, pc, puc);
879 }
880 if (ret == 1) {
881 #if 0
882 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
883 env->nip, env->error_code, tb);
884 #endif
885 /* we restore the process signal mask as the sigreturn should
886 do it (XXX: use sigsetjmp) */
887 sigprocmask(SIG_SETMASK, old_set, NULL);
888 do_raise_exception_err(env->exception_index, env->error_code);
889 } else {
890 /* activate soft MMU for this block */
891 cpu_resume_from_signal(env, puc);
892 }
893 /* never comes here */
894 return 1;
895 }
896
897 #elif defined(TARGET_M68K)
898 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
899 int is_write, sigset_t *old_set,
900 void *puc)
901 {
902 TranslationBlock *tb;
903 int ret;
904
905 if (cpu_single_env)
906 env = cpu_single_env; /* XXX: find a correct solution for multithread */
907 #if defined(DEBUG_SIGNAL)
908 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
909 pc, address, is_write, *(unsigned long *)old_set);
910 #endif
911 /* XXX: locking issue */
912 if (is_write && page_unprotect(address, pc, puc)) {
913 return 1;
914 }
915 /* see if it is an MMU fault */
916 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
917 if (ret < 0)
918 return 0; /* not an MMU fault */
919 if (ret == 0)
920 return 1; /* the MMU fault was handled without causing real CPU fault */
921 /* now we have a real cpu fault */
922 tb = tb_find_pc(pc);
923 if (tb) {
924 /* the PC is inside the translated code. It means that we have
925 a virtual CPU fault */
926 cpu_restore_state(tb, env, pc, puc);
927 }
928 /* we restore the process signal mask as the sigreturn should
929 do it (XXX: use sigsetjmp) */
930 sigprocmask(SIG_SETMASK, old_set, NULL);
931 cpu_loop_exit();
932 /* never comes here */
933 return 1;
934 }
935
936 #elif defined (TARGET_MIPS)
937 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
938 int is_write, sigset_t *old_set,
939 void *puc)
940 {
941 TranslationBlock *tb;
942 int ret;
943
944 if (cpu_single_env)
945 env = cpu_single_env; /* XXX: find a correct solution for multithread */
946 #if defined(DEBUG_SIGNAL)
947 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
948 pc, address, is_write, *(unsigned long *)old_set);
949 #endif
950 /* XXX: locking issue */
951 if (is_write && page_unprotect(h2g(address), pc, puc)) {
952 return 1;
953 }
954
955 /* see if it is an MMU fault */
956 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
957 if (ret < 0)
958 return 0; /* not an MMU fault */
959 if (ret == 0)
960 return 1; /* the MMU fault was handled without causing real CPU fault */
961
962 /* now we have a real cpu fault */
963 tb = tb_find_pc(pc);
964 if (tb) {
965 /* the PC is inside the translated code. It means that we have
966 a virtual CPU fault */
967 cpu_restore_state(tb, env, pc, puc);
968 }
969 if (ret == 1) {
970 #if 0
971 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
972 env->PC, env->error_code, tb);
973 #endif
974 /* we restore the process signal mask as the sigreturn should
975 do it (XXX: use sigsetjmp) */
976 sigprocmask(SIG_SETMASK, old_set, NULL);
977 do_raise_exception_err(env->exception_index, env->error_code);
978 } else {
979 /* activate soft MMU for this block */
980 cpu_resume_from_signal(env, puc);
981 }
982 /* never comes here */
983 return 1;
984 }
985
986 #elif defined (TARGET_SH4)
987 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
988 int is_write, sigset_t *old_set,
989 void *puc)
990 {
991 TranslationBlock *tb;
992 int ret;
993
994 if (cpu_single_env)
995 env = cpu_single_env; /* XXX: find a correct solution for multithread */
996 #if defined(DEBUG_SIGNAL)
997 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
998 pc, address, is_write, *(unsigned long *)old_set);
999 #endif
1000 /* XXX: locking issue */
1001 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1002 return 1;
1003 }
1004
1005 /* see if it is an MMU fault */
1006 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1007 if (ret < 0)
1008 return 0; /* not an MMU fault */
1009 if (ret == 0)
1010 return 1; /* the MMU fault was handled without causing real CPU fault */
1011
1012 /* now we have a real cpu fault */
1013 tb = tb_find_pc(pc);
1014 if (tb) {
1015 /* the PC is inside the translated code. It means that we have
1016 a virtual CPU fault */
1017 cpu_restore_state(tb, env, pc, puc);
1018 }
1019 #if 0
1020 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1021 env->nip, env->error_code, tb);
1022 #endif
1023 /* we restore the process signal mask as the sigreturn should
1024 do it (XXX: use sigsetjmp) */
1025 sigprocmask(SIG_SETMASK, old_set, NULL);
1026 cpu_loop_exit();
1027 /* never comes here */
1028 return 1;
1029 }
1030
1031 #elif defined (TARGET_ALPHA)
1032 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1033 int is_write, sigset_t *old_set,
1034 void *puc)
1035 {
1036 TranslationBlock *tb;
1037 int ret;
1038
1039 if (cpu_single_env)
1040 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1041 #if defined(DEBUG_SIGNAL)
1042 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1043 pc, address, is_write, *(unsigned long *)old_set);
1044 #endif
1045 /* XXX: locking issue */
1046 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1047 return 1;
1048 }
1049
1050 /* see if it is an MMU fault */
1051 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1052 if (ret < 0)
1053 return 0; /* not an MMU fault */
1054 if (ret == 0)
1055 return 1; /* the MMU fault was handled without causing real CPU fault */
1056
1057 /* now we have a real cpu fault */
1058 tb = tb_find_pc(pc);
1059 if (tb) {
1060 /* the PC is inside the translated code. It means that we have
1061 a virtual CPU fault */
1062 cpu_restore_state(tb, env, pc, puc);
1063 }
1064 #if 0
1065 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1066 env->nip, env->error_code, tb);
1067 #endif
1068 /* we restore the process signal mask as the sigreturn should
1069 do it (XXX: use sigsetjmp) */
1070 sigprocmask(SIG_SETMASK, old_set, NULL);
1071 cpu_loop_exit();
1072 /* never comes here */
1073 return 1;
1074 }
1075 #elif defined (TARGET_CRIS)
1076 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1077 int is_write, sigset_t *old_set,
1078 void *puc)
1079 {
1080 TranslationBlock *tb;
1081 int ret;
1082
1083 if (cpu_single_env)
1084 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1085 #if defined(DEBUG_SIGNAL)
1086 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1087 pc, address, is_write, *(unsigned long *)old_set);
1088 #endif
1089 /* XXX: locking issue */
1090 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1091 return 1;
1092 }
1093
1094 /* see if it is an MMU fault */
1095 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1096 if (ret < 0)
1097 return 0; /* not an MMU fault */
1098 if (ret == 0)
1099 return 1; /* the MMU fault was handled without causing real CPU fault */
1100
1101 /* now we have a real cpu fault */
1102 tb = tb_find_pc(pc);
1103 if (tb) {
1104 /* the PC is inside the translated code. It means that we have
1105 a virtual CPU fault */
1106 cpu_restore_state(tb, env, pc, puc);
1107 }
1108 /* we restore the process signal mask as the sigreturn should
1109 do it (XXX: use sigsetjmp) */
1110 sigprocmask(SIG_SETMASK, old_set, NULL);
1111 cpu_loop_exit();
1112 /* never comes here */
1113 return 1;
1114 }
1115
1116 #else
1117 #error unsupported target CPU
1118 #endif
1119
1120 #if defined(__i386__)
1121
1122 #if defined(__APPLE__)
1123 # include <sys/ucontext.h>
1124
1125 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1126 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1127 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1128 #else
1129 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1130 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1131 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1132 #endif
1133
1134 int cpu_signal_handler(int host_signum, void *pinfo,
1135 void *puc)
1136 {
1137 siginfo_t *info = pinfo;
1138 struct ucontext *uc = puc;
1139 unsigned long pc;
1140 int trapno;
1141
1142 #ifndef REG_EIP
1143 /* for glibc 2.1 */
1144 #define REG_EIP EIP
1145 #define REG_ERR ERR
1146 #define REG_TRAPNO TRAPNO
1147 #endif
1148 pc = EIP_sig(uc);
1149 trapno = TRAP_sig(uc);
1150 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1151 trapno == 0xe ?
1152 (ERROR_sig(uc) >> 1) & 1 : 0,
1153 &uc->uc_sigmask, puc);
1154 }
1155
1156 #elif defined(__x86_64__)
1157
1158 int cpu_signal_handler(int host_signum, void *pinfo,
1159 void *puc)
1160 {
1161 siginfo_t *info = pinfo;
1162 struct ucontext *uc = puc;
1163 unsigned long pc;
1164
1165 pc = uc->uc_mcontext.gregs[REG_RIP];
1166 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1167 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1168 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1169 &uc->uc_sigmask, puc);
1170 }
1171
1172 #elif defined(__powerpc__)
1173
1174 /***********************************************************************
1175 * signal context platform-specific definitions
1176 * From Wine
1177 */
1178 #ifdef linux
1179 /* All Registers access - only for local access */
1180 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1181 /* Gpr Registers access */
1182 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1183 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1184 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1185 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1186 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1187 # define LR_sig(context) REG_sig(link, context) /* Link register */
1188 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1189 /* Float Registers access */
1190 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1191 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1192 /* Exception Registers access */
1193 # define DAR_sig(context) REG_sig(dar, context)
1194 # define DSISR_sig(context) REG_sig(dsisr, context)
1195 # define TRAP_sig(context) REG_sig(trap, context)
1196 #endif /* linux */
1197
1198 #ifdef __APPLE__
1199 # include <sys/ucontext.h>
1200 typedef struct ucontext SIGCONTEXT;
1201 /* All Registers access - only for local access */
1202 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1203 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1204 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1205 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1206 /* Gpr Registers access */
1207 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1208 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1209 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1210 # define CTR_sig(context) REG_sig(ctr, context)
1211 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1212 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1213 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1214 /* Float Registers access */
1215 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1216 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1217 /* Exception Registers access */
1218 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1219 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1220 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1221 #endif /* __APPLE__ */
1222
1223 int cpu_signal_handler(int host_signum, void *pinfo,
1224 void *puc)
1225 {
1226 siginfo_t *info = pinfo;
1227 struct ucontext *uc = puc;
1228 unsigned long pc;
1229 int is_write;
1230
1231 pc = IAR_sig(uc);
1232 is_write = 0;
1233 #if 0
1234 /* ppc 4xx case */
1235 if (DSISR_sig(uc) & 0x00800000)
1236 is_write = 1;
1237 #else
1238 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1239 is_write = 1;
1240 #endif
1241 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1242 is_write, &uc->uc_sigmask, puc);
1243 }
1244
1245 #elif defined(__alpha__)
1246
1247 int cpu_signal_handler(int host_signum, void *pinfo,
1248 void *puc)
1249 {
1250 siginfo_t *info = pinfo;
1251 struct ucontext *uc = puc;
1252 uint32_t *pc = uc->uc_mcontext.sc_pc;
1253 uint32_t insn = *pc;
1254 int is_write = 0;
1255
1256 /* XXX: need kernel patch to get write flag faster */
1257 switch (insn >> 26) {
1258 case 0x0d: // stw
1259 case 0x0e: // stb
1260 case 0x0f: // stq_u
1261 case 0x24: // stf
1262 case 0x25: // stg
1263 case 0x26: // sts
1264 case 0x27: // stt
1265 case 0x2c: // stl
1266 case 0x2d: // stq
1267 case 0x2e: // stl_c
1268 case 0x2f: // stq_c
1269 is_write = 1;
1270 }
1271
1272 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1273 is_write, &uc->uc_sigmask, puc);
1274 }
1275 #elif defined(__sparc__)
1276
1277 int cpu_signal_handler(int host_signum, void *pinfo,
1278 void *puc)
1279 {
1280 siginfo_t *info = pinfo;
1281 int is_write;
1282 uint32_t insn;
1283 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1284 uint32_t *regs = (uint32_t *)(info + 1);
1285 void *sigmask = (regs + 20);
1286 /* XXX: is there a standard glibc define ? */
1287 unsigned long pc = regs[1];
1288 #else
1289 struct sigcontext *sc = puc;
1290 unsigned long pc = sc->sigc_regs.tpc;
1291 void *sigmask = (void *)sc->sigc_mask;
1292 #endif
1293
1294 /* XXX: need kernel patch to get write flag faster */
1295 is_write = 0;
1296 insn = *(uint32_t *)pc;
1297 if ((insn >> 30) == 3) {
1298 switch((insn >> 19) & 0x3f) {
1299 case 0x05: // stb
1300 case 0x06: // sth
1301 case 0x04: // st
1302 case 0x07: // std
1303 case 0x24: // stf
1304 case 0x27: // stdf
1305 case 0x25: // stfsr
1306 is_write = 1;
1307 break;
1308 }
1309 }
1310 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1311 is_write, sigmask, NULL);
1312 }
1313
1314 #elif defined(__arm__)
1315
1316 int cpu_signal_handler(int host_signum, void *pinfo,
1317 void *puc)
1318 {
1319 siginfo_t *info = pinfo;
1320 struct ucontext *uc = puc;
1321 unsigned long pc;
1322 int is_write;
1323
1324 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ =< 3))
1325 pc = uc->uc_mcontext.gregs[R15];
1326 #else
1327 pc = uc->uc_mcontext.arm_pc;
1328 #endif
1329 /* XXX: compute is_write */
1330 is_write = 0;
1331 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1332 is_write,
1333 &uc->uc_sigmask, puc);
1334 }
1335
1336 #elif defined(__mc68000)
1337
1338 int cpu_signal_handler(int host_signum, void *pinfo,
1339 void *puc)
1340 {
1341 siginfo_t *info = pinfo;
1342 struct ucontext *uc = puc;
1343 unsigned long pc;
1344 int is_write;
1345
1346 pc = uc->uc_mcontext.gregs[16];
1347 /* XXX: compute is_write */
1348 is_write = 0;
1349 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1350 is_write,
1351 &uc->uc_sigmask, puc);
1352 }
1353
1354 #elif defined(__ia64)
1355
1356 #ifndef __ISR_VALID
1357 /* This ought to be in <bits/siginfo.h>... */
1358 # define __ISR_VALID 1
1359 #endif
1360
1361 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1362 {
1363 siginfo_t *info = pinfo;
1364 struct ucontext *uc = puc;
1365 unsigned long ip;
1366 int is_write = 0;
1367
1368 ip = uc->uc_mcontext.sc_ip;
1369 switch (host_signum) {
1370 case SIGILL:
1371 case SIGFPE:
1372 case SIGSEGV:
1373 case SIGBUS:
1374 case SIGTRAP:
1375 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1376 /* ISR.W (write-access) is bit 33: */
1377 is_write = (info->si_isr >> 33) & 1;
1378 break;
1379
1380 default:
1381 break;
1382 }
1383 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1384 is_write,
1385 &uc->uc_sigmask, puc);
1386 }
1387
1388 #elif defined(__s390__)
1389
1390 int cpu_signal_handler(int host_signum, void *pinfo,
1391 void *puc)
1392 {
1393 siginfo_t *info = pinfo;
1394 struct ucontext *uc = puc;
1395 unsigned long pc;
1396 int is_write;
1397
1398 pc = uc->uc_mcontext.psw.addr;
1399 /* XXX: compute is_write */
1400 is_write = 0;
1401 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1402 is_write, &uc->uc_sigmask, puc);
1403 }
1404
1405 #elif defined(__mips__)
1406
1407 int cpu_signal_handler(int host_signum, void *pinfo,
1408 void *puc)
1409 {
1410 siginfo_t *info = pinfo;
1411 struct ucontext *uc = puc;
1412 greg_t pc = uc->uc_mcontext.pc;
1413 int is_write;
1414
1415 /* XXX: compute is_write */
1416 is_write = 0;
1417 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1418 is_write, &uc->uc_sigmask, puc);
1419 }
1420
1421 #elif defined(__hppa__)
1422
1423 int cpu_signal_handler(int host_signum, void *pinfo,
1424 void *puc)
1425 {
1426 struct siginfo *info = pinfo;
1427 struct ucontext *uc = puc;
1428 unsigned long pc;
1429 int is_write;
1430
1431 pc = uc->uc_mcontext.sc_iaoq[0];
1432 /* FIXME: compute is_write */
1433 is_write = 0;
1434 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1435 is_write,
1436 &uc->uc_sigmask, puc);
1437 }
1438
1439 #else
1440
1441 #error host CPU specific signal handler needed
1442
1443 #endif
1444
1445 #endif /* !defined(CONFIG_SOFTMMU) */