]> git.proxmox.com Git - qemu.git/blob - cpu-exec.c
Add option to disable TB cache, by Herve Poussineau.
[qemu.git] / cpu-exec.c
1 /*
2 * i386 emulator main execution loop
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include "config.h"
21 #include "exec.h"
22 #include "disas.h"
23 #include <string.h>
24
25 #if !defined(CONFIG_SOFTMMU)
26 #undef EAX
27 #undef ECX
28 #undef EDX
29 #undef EBX
30 #undef ESP
31 #undef EBP
32 #undef ESI
33 #undef EDI
34 #undef EIP
35 #include <signal.h>
36 #include <sys/ucontext.h>
37 #endif
38
39 int tb_invalidated_flag;
40
41 //#define DEBUG_EXEC
42 //#define DEBUG_SIGNAL
43
44 /* translation settings */
45 int translation_settings = 0;
46
47 #define SAVE_GLOBALS()
48 #define RESTORE_GLOBALS()
49
50 #if defined(__sparc__) && !defined(HOST_SOLARIS)
51 #include <features.h>
52 #if defined(__GLIBC__) && ((__GLIBC__ < 2) || \
53 ((__GLIBC__ == 2) && (__GLIBC_MINOR__ <= 90)))
54 // Work around ugly bugs in glibc that mangle global register contents
55
56 static volatile void *saved_env;
57 static volatile unsigned long saved_t0, saved_i7;
58 #undef SAVE_GLOBALS
59 #define SAVE_GLOBALS() do { \
60 saved_env = env; \
61 saved_t0 = T0; \
62 asm volatile ("st %%i7, [%0]" : : "r" (&saved_i7)); \
63 } while(0)
64
65 #undef RESTORE_GLOBALS
66 #define RESTORE_GLOBALS() do { \
67 env = (void *)saved_env; \
68 T0 = saved_t0; \
69 asm volatile ("ld [%0], %%i7" : : "r" (&saved_i7)); \
70 } while(0)
71
72 static int sparc_setjmp(jmp_buf buf)
73 {
74 int ret;
75
76 SAVE_GLOBALS();
77 ret = setjmp(buf);
78 RESTORE_GLOBALS();
79 return ret;
80 }
81 #undef setjmp
82 #define setjmp(jmp_buf) sparc_setjmp(jmp_buf)
83
84 static void sparc_longjmp(jmp_buf buf, int val)
85 {
86 SAVE_GLOBALS();
87 longjmp(buf, val);
88 }
89 #define longjmp(jmp_buf, val) sparc_longjmp(jmp_buf, val)
90 #endif
91 #endif
92
93 void cpu_loop_exit(void)
94 {
95 /* NOTE: the register at this point must be saved by hand because
96 longjmp restore them */
97 regs_to_env();
98 longjmp(env->jmp_env, 1);
99 }
100
101 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
102 #define reg_T2
103 #endif
104
105 /* exit the current TB from a signal handler. The host registers are
106 restored in a state compatible with the CPU emulator
107 */
108 void cpu_resume_from_signal(CPUState *env1, void *puc)
109 {
110 #if !defined(CONFIG_SOFTMMU)
111 struct ucontext *uc = puc;
112 #endif
113
114 env = env1;
115
116 /* XXX: restore cpu registers saved in host registers */
117
118 #if !defined(CONFIG_SOFTMMU)
119 if (puc) {
120 /* XXX: use siglongjmp ? */
121 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
122 }
123 #endif
124 longjmp(env->jmp_env, 1);
125 }
126
127 CPUTranslationSetting cpu_translation_settings[] = {
128 { CPU_SETTING_NO_CACHE, "no-cache",
129 "Do not use translation blocks cache (very slow!)" },
130 { 0, NULL, NULL },
131 };
132
133 void cpu_set_translation_settings(int translation_flags)
134 {
135 translation_settings = translation_flags;
136 }
137
138 static int cmp1(const char *s1, int n, const char *s2)
139 {
140 if (strlen(s2) != n)
141 return 0;
142 return memcmp(s1, s2, n) == 0;
143 }
144
145 /* takes a comma separated list of translation settings. Return 0 if error. */
146 int cpu_str_to_translation_mask(const char *str)
147 {
148 CPUTranslationSetting *setting;
149 int mask;
150 const char *p, *p1;
151
152 p = str;
153 mask = 0;
154 for(;;) {
155 p1 = strchr(p, ',');
156 if (!p1)
157 p1 = p + strlen(p);
158 if(cmp1(p,p1-p,"all")) {
159 for(setting = cpu_translation_settings; setting->mask != 0; setting++) {
160 mask |= setting->mask;
161 }
162 } else {
163 for(setting = cpu_translation_settings; setting->mask != 0; setting++) {
164 if (cmp1(p, p1 - p, setting->name))
165 goto found;
166 }
167 return 0;
168 }
169 found:
170 mask |= setting->mask;
171 if (*p1 != ',')
172 break;
173 p = p1 + 1;
174 }
175 return mask;
176 }
177
178 static TranslationBlock *tb_find_slow(target_ulong pc,
179 target_ulong cs_base,
180 uint64_t flags)
181 {
182 TranslationBlock *tb, **ptb1;
183 int code_gen_size;
184 unsigned int h;
185 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
186 uint8_t *tc_ptr;
187
188 spin_lock(&tb_lock);
189
190 tb_invalidated_flag = 0;
191
192 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
193
194 /* find translated block using physical mappings */
195 phys_pc = get_phys_addr_code(env, pc);
196 phys_page1 = phys_pc & TARGET_PAGE_MASK;
197 phys_page2 = -1;
198 if (translation_settings & CPU_SETTING_NO_CACHE)
199 goto not_found;
200
201 h = tb_phys_hash_func(phys_pc);
202 ptb1 = &tb_phys_hash[h];
203 for(;;) {
204 tb = *ptb1;
205 if (!tb)
206 goto not_found;
207 if (tb->pc == pc &&
208 tb->page_addr[0] == phys_page1 &&
209 tb->cs_base == cs_base &&
210 tb->flags == flags) {
211 /* check next page if needed */
212 if (tb->page_addr[1] != -1) {
213 virt_page2 = (pc & TARGET_PAGE_MASK) +
214 TARGET_PAGE_SIZE;
215 phys_page2 = get_phys_addr_code(env, virt_page2);
216 if (tb->page_addr[1] == phys_page2)
217 goto found;
218 } else {
219 goto found;
220 }
221 }
222 ptb1 = &tb->phys_hash_next;
223 }
224 not_found:
225 /* if no translated code available, then translate it now */
226 tb = tb_alloc(pc);
227 if (!tb) {
228 /* flush must be done */
229 tb_flush(env);
230 /* cannot fail at this point */
231 tb = tb_alloc(pc);
232 /* don't forget to invalidate previous TB info */
233 tb_invalidated_flag = 1;
234 }
235 tc_ptr = code_gen_ptr;
236 tb->tc_ptr = tc_ptr;
237 tb->cs_base = cs_base;
238 tb->flags = flags;
239 SAVE_GLOBALS();
240 cpu_gen_code(env, tb, &code_gen_size);
241 RESTORE_GLOBALS();
242 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
243
244 /* check next page if needed */
245 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
246 phys_page2 = -1;
247 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
248 phys_page2 = get_phys_addr_code(env, virt_page2);
249 }
250 tb_link_phys(tb, phys_pc, phys_page2);
251
252 found:
253 /* we add the TB in the virtual pc hash table */
254 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
255 spin_unlock(&tb_lock);
256 return tb;
257 }
258
259 static inline TranslationBlock *tb_find_fast(void)
260 {
261 TranslationBlock *tb;
262 target_ulong cs_base, pc;
263 uint64_t flags;
264
265 /* we record a subset of the CPU state. It will
266 always be the same before a given translated block
267 is executed. */
268 #if defined(TARGET_I386)
269 flags = env->hflags;
270 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
271 flags |= env->intercept;
272 cs_base = env->segs[R_CS].base;
273 pc = cs_base + env->eip;
274 #elif defined(TARGET_ARM)
275 flags = env->thumb | (env->vfp.vec_len << 1)
276 | (env->vfp.vec_stride << 4);
277 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
278 flags |= (1 << 6);
279 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
280 flags |= (1 << 7);
281 flags |= (env->condexec_bits << 8);
282 cs_base = 0;
283 pc = env->regs[15];
284 #elif defined(TARGET_SPARC)
285 #ifdef TARGET_SPARC64
286 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
287 flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
288 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
289 #else
290 // FPU enable . Supervisor
291 flags = (env->psref << 4) | env->psrs;
292 #endif
293 cs_base = env->npc;
294 pc = env->pc;
295 #elif defined(TARGET_PPC)
296 flags = env->hflags;
297 cs_base = 0;
298 pc = env->nip;
299 #elif defined(TARGET_MIPS)
300 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
301 cs_base = 0;
302 pc = env->PC[env->current_tc];
303 #elif defined(TARGET_M68K)
304 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
305 | (env->sr & SR_S) /* Bit 13 */
306 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
307 cs_base = 0;
308 pc = env->pc;
309 #elif defined(TARGET_SH4)
310 flags = env->flags;
311 cs_base = 0;
312 pc = env->pc;
313 #elif defined(TARGET_ALPHA)
314 flags = env->ps;
315 cs_base = 0;
316 pc = env->pc;
317 #elif defined(TARGET_CRIS)
318 flags = 0;
319 cs_base = 0;
320 pc = env->pc;
321 #else
322 #error unsupported CPU
323 #endif
324 if (translation_settings & CPU_SETTING_NO_CACHE)
325 tb = NULL;
326 else
327 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
328 if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
329 tb->flags != flags, 0)) {
330 tb = tb_find_slow(pc, cs_base, flags);
331 /* Note: we do it here to avoid a gcc bug on Mac OS X when
332 doing it in tb_find_slow */
333 if (tb_invalidated_flag) {
334 /* as some TB could have been invalidated because
335 of memory exceptions while generating the code, we
336 must recompute the hash index here */
337 T0 = 0;
338 }
339 }
340 return tb;
341 }
342
343 #define BREAK_CHAIN T0 = 0
344
345 /* main execution loop */
346
347 int cpu_exec(CPUState *env1)
348 {
349 #define DECLARE_HOST_REGS 1
350 #include "hostregs_helper.h"
351 #if defined(TARGET_SPARC)
352 #if defined(reg_REGWPTR)
353 uint32_t *saved_regwptr;
354 #endif
355 #endif
356 int ret, interrupt_request;
357 void (*gen_func)(void);
358 TranslationBlock *tb;
359 uint8_t *tc_ptr;
360
361 if (cpu_halted(env1) == EXCP_HALTED)
362 return EXCP_HALTED;
363
364 cpu_single_env = env1;
365
366 /* first we save global registers */
367 #define SAVE_HOST_REGS 1
368 #include "hostregs_helper.h"
369 env = env1;
370 SAVE_GLOBALS();
371
372 env_to_regs();
373 #if defined(TARGET_I386)
374 /* put eflags in CPU temporary format */
375 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
376 DF = 1 - (2 * ((env->eflags >> 10) & 1));
377 CC_OP = CC_OP_EFLAGS;
378 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
379 #elif defined(TARGET_SPARC)
380 #if defined(reg_REGWPTR)
381 saved_regwptr = REGWPTR;
382 #endif
383 #elif defined(TARGET_M68K)
384 env->cc_op = CC_OP_FLAGS;
385 env->cc_dest = env->sr & 0xf;
386 env->cc_x = (env->sr >> 4) & 1;
387 #elif defined(TARGET_ALPHA)
388 #elif defined(TARGET_ARM)
389 #elif defined(TARGET_PPC)
390 #elif defined(TARGET_MIPS)
391 #elif defined(TARGET_SH4)
392 #elif defined(TARGET_CRIS)
393 /* XXXXX */
394 #else
395 #error unsupported target CPU
396 #endif
397 env->exception_index = -1;
398
399 /* prepare setjmp context for exception handling */
400 for(;;) {
401 if (setjmp(env->jmp_env) == 0) {
402 env->current_tb = NULL;
403 /* if an exception is pending, we execute it here */
404 if (env->exception_index >= 0) {
405 if (env->exception_index >= EXCP_INTERRUPT) {
406 /* exit request from the cpu execution loop */
407 ret = env->exception_index;
408 break;
409 } else if (env->user_mode_only) {
410 /* if user mode only, we simulate a fake exception
411 which will be handled outside the cpu execution
412 loop */
413 #if defined(TARGET_I386)
414 do_interrupt_user(env->exception_index,
415 env->exception_is_int,
416 env->error_code,
417 env->exception_next_eip);
418 #endif
419 ret = env->exception_index;
420 break;
421 } else {
422 #if defined(TARGET_I386)
423 /* simulate a real cpu exception. On i386, it can
424 trigger new exceptions, but we do not handle
425 double or triple faults yet. */
426 do_interrupt(env->exception_index,
427 env->exception_is_int,
428 env->error_code,
429 env->exception_next_eip, 0);
430 /* successfully delivered */
431 env->old_exception = -1;
432 #elif defined(TARGET_PPC)
433 do_interrupt(env);
434 #elif defined(TARGET_MIPS)
435 do_interrupt(env);
436 #elif defined(TARGET_SPARC)
437 do_interrupt(env->exception_index);
438 #elif defined(TARGET_ARM)
439 do_interrupt(env);
440 #elif defined(TARGET_SH4)
441 do_interrupt(env);
442 #elif defined(TARGET_ALPHA)
443 do_interrupt(env);
444 #elif defined(TARGET_CRIS)
445 do_interrupt(env);
446 #elif defined(TARGET_M68K)
447 do_interrupt(0);
448 #endif
449 }
450 env->exception_index = -1;
451 }
452 #ifdef USE_KQEMU
453 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
454 int ret;
455 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
456 ret = kqemu_cpu_exec(env);
457 /* put eflags in CPU temporary format */
458 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
459 DF = 1 - (2 * ((env->eflags >> 10) & 1));
460 CC_OP = CC_OP_EFLAGS;
461 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
462 if (ret == 1) {
463 /* exception */
464 longjmp(env->jmp_env, 1);
465 } else if (ret == 2) {
466 /* softmmu execution needed */
467 } else {
468 if (env->interrupt_request != 0) {
469 /* hardware interrupt will be executed just after */
470 } else {
471 /* otherwise, we restart */
472 longjmp(env->jmp_env, 1);
473 }
474 }
475 }
476 #endif
477
478 T0 = 0; /* force lookup of first TB */
479 for(;;) {
480 SAVE_GLOBALS();
481 interrupt_request = env->interrupt_request;
482 if (__builtin_expect(interrupt_request, 0)
483 #if defined(TARGET_I386)
484 && env->hflags & HF_GIF_MASK
485 #endif
486 ) {
487 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
488 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
489 env->exception_index = EXCP_DEBUG;
490 cpu_loop_exit();
491 }
492 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
493 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
494 if (interrupt_request & CPU_INTERRUPT_HALT) {
495 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
496 env->halted = 1;
497 env->exception_index = EXCP_HLT;
498 cpu_loop_exit();
499 }
500 #endif
501 #if defined(TARGET_I386)
502 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
503 !(env->hflags & HF_SMM_MASK)) {
504 svm_check_intercept(SVM_EXIT_SMI);
505 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
506 do_smm_enter();
507 BREAK_CHAIN;
508 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
509 (env->eflags & IF_MASK || env->hflags & HF_HIF_MASK) &&
510 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
511 int intno;
512 svm_check_intercept(SVM_EXIT_INTR);
513 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
514 intno = cpu_get_pic_interrupt(env);
515 if (loglevel & CPU_LOG_TB_IN_ASM) {
516 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
517 }
518 do_interrupt(intno, 0, 0, 0, 1);
519 /* ensure that no TB jump will be modified as
520 the program flow was changed */
521 BREAK_CHAIN;
522 #if !defined(CONFIG_USER_ONLY)
523 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
524 (env->eflags & IF_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
525 int intno;
526 /* FIXME: this should respect TPR */
527 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
528 svm_check_intercept(SVM_EXIT_VINTR);
529 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
530 if (loglevel & CPU_LOG_TB_IN_ASM)
531 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
532 do_interrupt(intno, 0, 0, -1, 1);
533 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl),
534 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)) & ~V_IRQ_MASK);
535 BREAK_CHAIN;
536 #endif
537 }
538 #elif defined(TARGET_PPC)
539 #if 0
540 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
541 cpu_ppc_reset(env);
542 }
543 #endif
544 if (interrupt_request & CPU_INTERRUPT_HARD) {
545 ppc_hw_interrupt(env);
546 if (env->pending_interrupts == 0)
547 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
548 BREAK_CHAIN;
549 }
550 #elif defined(TARGET_MIPS)
551 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
552 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
553 (env->CP0_Status & (1 << CP0St_IE)) &&
554 !(env->CP0_Status & (1 << CP0St_EXL)) &&
555 !(env->CP0_Status & (1 << CP0St_ERL)) &&
556 !(env->hflags & MIPS_HFLAG_DM)) {
557 /* Raise it */
558 env->exception_index = EXCP_EXT_INTERRUPT;
559 env->error_code = 0;
560 do_interrupt(env);
561 BREAK_CHAIN;
562 }
563 #elif defined(TARGET_SPARC)
564 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
565 (env->psret != 0)) {
566 int pil = env->interrupt_index & 15;
567 int type = env->interrupt_index & 0xf0;
568
569 if (((type == TT_EXTINT) &&
570 (pil == 15 || pil > env->psrpil)) ||
571 type != TT_EXTINT) {
572 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
573 do_interrupt(env->interrupt_index);
574 env->interrupt_index = 0;
575 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
576 cpu_check_irqs(env);
577 #endif
578 BREAK_CHAIN;
579 }
580 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
581 //do_interrupt(0, 0, 0, 0, 0);
582 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
583 }
584 #elif defined(TARGET_ARM)
585 if (interrupt_request & CPU_INTERRUPT_FIQ
586 && !(env->uncached_cpsr & CPSR_F)) {
587 env->exception_index = EXCP_FIQ;
588 do_interrupt(env);
589 BREAK_CHAIN;
590 }
591 /* ARMv7-M interrupt return works by loading a magic value
592 into the PC. On real hardware the load causes the
593 return to occur. The qemu implementation performs the
594 jump normally, then does the exception return when the
595 CPU tries to execute code at the magic address.
596 This will cause the magic PC value to be pushed to
597 the stack if an interrupt occured at the wrong time.
598 We avoid this by disabling interrupts when
599 pc contains a magic address. */
600 if (interrupt_request & CPU_INTERRUPT_HARD
601 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
602 || !(env->uncached_cpsr & CPSR_I))) {
603 env->exception_index = EXCP_IRQ;
604 do_interrupt(env);
605 BREAK_CHAIN;
606 }
607 #elif defined(TARGET_SH4)
608 if (interrupt_request & CPU_INTERRUPT_HARD) {
609 do_interrupt(env);
610 BREAK_CHAIN;
611 }
612 #elif defined(TARGET_ALPHA)
613 if (interrupt_request & CPU_INTERRUPT_HARD) {
614 do_interrupt(env);
615 BREAK_CHAIN;
616 }
617 #elif defined(TARGET_CRIS)
618 if (interrupt_request & CPU_INTERRUPT_HARD) {
619 do_interrupt(env);
620 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
621 BREAK_CHAIN;
622 }
623 #elif defined(TARGET_M68K)
624 if (interrupt_request & CPU_INTERRUPT_HARD
625 && ((env->sr & SR_I) >> SR_I_SHIFT)
626 < env->pending_level) {
627 /* Real hardware gets the interrupt vector via an
628 IACK cycle at this point. Current emulated
629 hardware doesn't rely on this, so we
630 provide/save the vector when the interrupt is
631 first signalled. */
632 env->exception_index = env->pending_vector;
633 do_interrupt(1);
634 BREAK_CHAIN;
635 }
636 #endif
637 /* Don't use the cached interupt_request value,
638 do_interrupt may have updated the EXITTB flag. */
639 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
640 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
641 /* ensure that no TB jump will be modified as
642 the program flow was changed */
643 BREAK_CHAIN;
644 }
645 if (interrupt_request & CPU_INTERRUPT_EXIT) {
646 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
647 env->exception_index = EXCP_INTERRUPT;
648 cpu_loop_exit();
649 }
650 }
651 #ifdef DEBUG_EXEC
652 if ((loglevel & CPU_LOG_TB_CPU)) {
653 /* restore flags in standard format */
654 regs_to_env();
655 #if defined(TARGET_I386)
656 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
657 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
658 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
659 #elif defined(TARGET_ARM)
660 cpu_dump_state(env, logfile, fprintf, 0);
661 #elif defined(TARGET_SPARC)
662 REGWPTR = env->regbase + (env->cwp * 16);
663 env->regwptr = REGWPTR;
664 cpu_dump_state(env, logfile, fprintf, 0);
665 #elif defined(TARGET_PPC)
666 cpu_dump_state(env, logfile, fprintf, 0);
667 #elif defined(TARGET_M68K)
668 cpu_m68k_flush_flags(env, env->cc_op);
669 env->cc_op = CC_OP_FLAGS;
670 env->sr = (env->sr & 0xffe0)
671 | env->cc_dest | (env->cc_x << 4);
672 cpu_dump_state(env, logfile, fprintf, 0);
673 #elif defined(TARGET_MIPS)
674 cpu_dump_state(env, logfile, fprintf, 0);
675 #elif defined(TARGET_SH4)
676 cpu_dump_state(env, logfile, fprintf, 0);
677 #elif defined(TARGET_ALPHA)
678 cpu_dump_state(env, logfile, fprintf, 0);
679 #elif defined(TARGET_CRIS)
680 cpu_dump_state(env, logfile, fprintf, 0);
681 #else
682 #error unsupported target CPU
683 #endif
684 }
685 #endif
686 tb = tb_find_fast();
687 #ifdef DEBUG_EXEC
688 if ((loglevel & CPU_LOG_EXEC)) {
689 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
690 (long)tb->tc_ptr, tb->pc,
691 lookup_symbol(tb->pc));
692 }
693 #endif
694 RESTORE_GLOBALS();
695 /* see if we can patch the calling TB. When the TB
696 spans two pages, we cannot safely do a direct
697 jump. */
698 {
699 if (T0 != 0 &&
700 #if USE_KQEMU
701 (env->kqemu_enabled != 2) &&
702 #endif
703 tb->page_addr[1] == -1) {
704 spin_lock(&tb_lock);
705 tb_add_jump((TranslationBlock *)(long)(T0 & ~3), T0 & 3, tb);
706 spin_unlock(&tb_lock);
707 }
708 }
709 tc_ptr = tb->tc_ptr;
710 env->current_tb = tb;
711 /* execute the generated code */
712 gen_func = (void *)tc_ptr;
713 #if defined(__sparc__)
714 __asm__ __volatile__("call %0\n\t"
715 "mov %%o7,%%i0"
716 : /* no outputs */
717 : "r" (gen_func)
718 : "i0", "i1", "i2", "i3", "i4", "i5",
719 "o0", "o1", "o2", "o3", "o4", "o5",
720 "l0", "l1", "l2", "l3", "l4", "l5",
721 "l6", "l7");
722 #elif defined(__arm__)
723 asm volatile ("mov pc, %0\n\t"
724 ".global exec_loop\n\t"
725 "exec_loop:\n\t"
726 : /* no outputs */
727 : "r" (gen_func)
728 : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
729 #elif defined(__ia64)
730 struct fptr {
731 void *ip;
732 void *gp;
733 } fp;
734
735 fp.ip = tc_ptr;
736 fp.gp = code_gen_buffer + 2 * (1 << 20);
737 (*(void (*)(void)) &fp)();
738 #else
739 gen_func();
740 #endif
741 env->current_tb = NULL;
742 /* reset soft MMU for next block (it can currently
743 only be set by a memory fault) */
744 #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
745 if (env->hflags & HF_SOFTMMU_MASK) {
746 env->hflags &= ~HF_SOFTMMU_MASK;
747 /* do not allow linking to another block */
748 T0 = 0;
749 }
750 #endif
751 #if defined(USE_KQEMU)
752 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
753 if (kqemu_is_ok(env) &&
754 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
755 cpu_loop_exit();
756 }
757 #endif
758 } /* for(;;) */
759 } else {
760 env_to_regs();
761 }
762 } /* for(;;) */
763
764
765 #if defined(TARGET_I386)
766 /* restore flags in standard format */
767 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
768 #elif defined(TARGET_ARM)
769 /* XXX: Save/restore host fpu exception state?. */
770 #elif defined(TARGET_SPARC)
771 #if defined(reg_REGWPTR)
772 REGWPTR = saved_regwptr;
773 #endif
774 #elif defined(TARGET_PPC)
775 #elif defined(TARGET_M68K)
776 cpu_m68k_flush_flags(env, env->cc_op);
777 env->cc_op = CC_OP_FLAGS;
778 env->sr = (env->sr & 0xffe0)
779 | env->cc_dest | (env->cc_x << 4);
780 #elif defined(TARGET_MIPS)
781 #elif defined(TARGET_SH4)
782 #elif defined(TARGET_ALPHA)
783 #elif defined(TARGET_CRIS)
784 /* XXXXX */
785 #else
786 #error unsupported target CPU
787 #endif
788
789 /* restore global registers */
790 RESTORE_GLOBALS();
791 #include "hostregs_helper.h"
792
793 /* fail safe : never use cpu_single_env outside cpu_exec() */
794 cpu_single_env = NULL;
795 return ret;
796 }
797
798 /* must only be called from the generated code as an exception can be
799 generated */
800 void tb_invalidate_page_range(target_ulong start, target_ulong end)
801 {
802 /* XXX: cannot enable it yet because it yields to MMU exception
803 where NIP != read address on PowerPC */
804 #if 0
805 target_ulong phys_addr;
806 phys_addr = get_phys_addr_code(env, start);
807 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
808 #endif
809 }
810
811 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
812
813 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
814 {
815 CPUX86State *saved_env;
816
817 saved_env = env;
818 env = s;
819 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
820 selector &= 0xffff;
821 cpu_x86_load_seg_cache(env, seg_reg, selector,
822 (selector << 4), 0xffff, 0);
823 } else {
824 load_seg(seg_reg, selector);
825 }
826 env = saved_env;
827 }
828
829 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
830 {
831 CPUX86State *saved_env;
832
833 saved_env = env;
834 env = s;
835
836 helper_fsave(ptr, data32);
837
838 env = saved_env;
839 }
840
841 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
842 {
843 CPUX86State *saved_env;
844
845 saved_env = env;
846 env = s;
847
848 helper_frstor(ptr, data32);
849
850 env = saved_env;
851 }
852
853 #endif /* TARGET_I386 */
854
855 #if !defined(CONFIG_SOFTMMU)
856
857 #if defined(TARGET_I386)
858
859 /* 'pc' is the host PC at which the exception was raised. 'address' is
860 the effective address of the memory exception. 'is_write' is 1 if a
861 write caused the exception and otherwise 0'. 'old_set' is the
862 signal set which should be restored */
863 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
864 int is_write, sigset_t *old_set,
865 void *puc)
866 {
867 TranslationBlock *tb;
868 int ret;
869
870 if (cpu_single_env)
871 env = cpu_single_env; /* XXX: find a correct solution for multithread */
872 #if defined(DEBUG_SIGNAL)
873 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
874 pc, address, is_write, *(unsigned long *)old_set);
875 #endif
876 /* XXX: locking issue */
877 if (is_write && page_unprotect(h2g(address), pc, puc)) {
878 return 1;
879 }
880
881 /* see if it is an MMU fault */
882 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
883 if (ret < 0)
884 return 0; /* not an MMU fault */
885 if (ret == 0)
886 return 1; /* the MMU fault was handled without causing real CPU fault */
887 /* now we have a real cpu fault */
888 tb = tb_find_pc(pc);
889 if (tb) {
890 /* the PC is inside the translated code. It means that we have
891 a virtual CPU fault */
892 cpu_restore_state(tb, env, pc, puc);
893 }
894 if (ret == 1) {
895 #if 0
896 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
897 env->eip, env->cr[2], env->error_code);
898 #endif
899 /* we restore the process signal mask as the sigreturn should
900 do it (XXX: use sigsetjmp) */
901 sigprocmask(SIG_SETMASK, old_set, NULL);
902 raise_exception_err(env->exception_index, env->error_code);
903 } else {
904 /* activate soft MMU for this block */
905 env->hflags |= HF_SOFTMMU_MASK;
906 cpu_resume_from_signal(env, puc);
907 }
908 /* never comes here */
909 return 1;
910 }
911
912 #elif defined(TARGET_ARM)
913 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
914 int is_write, sigset_t *old_set,
915 void *puc)
916 {
917 TranslationBlock *tb;
918 int ret;
919
920 if (cpu_single_env)
921 env = cpu_single_env; /* XXX: find a correct solution for multithread */
922 #if defined(DEBUG_SIGNAL)
923 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
924 pc, address, is_write, *(unsigned long *)old_set);
925 #endif
926 /* XXX: locking issue */
927 if (is_write && page_unprotect(h2g(address), pc, puc)) {
928 return 1;
929 }
930 /* see if it is an MMU fault */
931 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
932 if (ret < 0)
933 return 0; /* not an MMU fault */
934 if (ret == 0)
935 return 1; /* the MMU fault was handled without causing real CPU fault */
936 /* now we have a real cpu fault */
937 tb = tb_find_pc(pc);
938 if (tb) {
939 /* the PC is inside the translated code. It means that we have
940 a virtual CPU fault */
941 cpu_restore_state(tb, env, pc, puc);
942 }
943 /* we restore the process signal mask as the sigreturn should
944 do it (XXX: use sigsetjmp) */
945 sigprocmask(SIG_SETMASK, old_set, NULL);
946 cpu_loop_exit();
947 }
948 #elif defined(TARGET_SPARC)
949 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
950 int is_write, sigset_t *old_set,
951 void *puc)
952 {
953 TranslationBlock *tb;
954 int ret;
955
956 if (cpu_single_env)
957 env = cpu_single_env; /* XXX: find a correct solution for multithread */
958 #if defined(DEBUG_SIGNAL)
959 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
960 pc, address, is_write, *(unsigned long *)old_set);
961 #endif
962 /* XXX: locking issue */
963 if (is_write && page_unprotect(h2g(address), pc, puc)) {
964 return 1;
965 }
966 /* see if it is an MMU fault */
967 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
968 if (ret < 0)
969 return 0; /* not an MMU fault */
970 if (ret == 0)
971 return 1; /* the MMU fault was handled without causing real CPU fault */
972 /* now we have a real cpu fault */
973 tb = tb_find_pc(pc);
974 if (tb) {
975 /* the PC is inside the translated code. It means that we have
976 a virtual CPU fault */
977 cpu_restore_state(tb, env, pc, puc);
978 }
979 /* we restore the process signal mask as the sigreturn should
980 do it (XXX: use sigsetjmp) */
981 sigprocmask(SIG_SETMASK, old_set, NULL);
982 cpu_loop_exit();
983 }
984 #elif defined (TARGET_PPC)
985 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
986 int is_write, sigset_t *old_set,
987 void *puc)
988 {
989 TranslationBlock *tb;
990 int ret;
991
992 if (cpu_single_env)
993 env = cpu_single_env; /* XXX: find a correct solution for multithread */
994 #if defined(DEBUG_SIGNAL)
995 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
996 pc, address, is_write, *(unsigned long *)old_set);
997 #endif
998 /* XXX: locking issue */
999 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1000 return 1;
1001 }
1002
1003 /* see if it is an MMU fault */
1004 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1005 if (ret < 0)
1006 return 0; /* not an MMU fault */
1007 if (ret == 0)
1008 return 1; /* the MMU fault was handled without causing real CPU fault */
1009
1010 /* now we have a real cpu fault */
1011 tb = tb_find_pc(pc);
1012 if (tb) {
1013 /* the PC is inside the translated code. It means that we have
1014 a virtual CPU fault */
1015 cpu_restore_state(tb, env, pc, puc);
1016 }
1017 if (ret == 1) {
1018 #if 0
1019 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1020 env->nip, env->error_code, tb);
1021 #endif
1022 /* we restore the process signal mask as the sigreturn should
1023 do it (XXX: use sigsetjmp) */
1024 sigprocmask(SIG_SETMASK, old_set, NULL);
1025 do_raise_exception_err(env->exception_index, env->error_code);
1026 } else {
1027 /* activate soft MMU for this block */
1028 cpu_resume_from_signal(env, puc);
1029 }
1030 /* never comes here */
1031 return 1;
1032 }
1033
1034 #elif defined(TARGET_M68K)
1035 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1036 int is_write, sigset_t *old_set,
1037 void *puc)
1038 {
1039 TranslationBlock *tb;
1040 int ret;
1041
1042 if (cpu_single_env)
1043 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1044 #if defined(DEBUG_SIGNAL)
1045 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1046 pc, address, is_write, *(unsigned long *)old_set);
1047 #endif
1048 /* XXX: locking issue */
1049 if (is_write && page_unprotect(address, pc, puc)) {
1050 return 1;
1051 }
1052 /* see if it is an MMU fault */
1053 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1054 if (ret < 0)
1055 return 0; /* not an MMU fault */
1056 if (ret == 0)
1057 return 1; /* the MMU fault was handled without causing real CPU fault */
1058 /* now we have a real cpu fault */
1059 tb = tb_find_pc(pc);
1060 if (tb) {
1061 /* the PC is inside the translated code. It means that we have
1062 a virtual CPU fault */
1063 cpu_restore_state(tb, env, pc, puc);
1064 }
1065 /* we restore the process signal mask as the sigreturn should
1066 do it (XXX: use sigsetjmp) */
1067 sigprocmask(SIG_SETMASK, old_set, NULL);
1068 cpu_loop_exit();
1069 /* never comes here */
1070 return 1;
1071 }
1072
1073 #elif defined (TARGET_MIPS)
1074 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1075 int is_write, sigset_t *old_set,
1076 void *puc)
1077 {
1078 TranslationBlock *tb;
1079 int ret;
1080
1081 if (cpu_single_env)
1082 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1083 #if defined(DEBUG_SIGNAL)
1084 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1085 pc, address, is_write, *(unsigned long *)old_set);
1086 #endif
1087 /* XXX: locking issue */
1088 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1089 return 1;
1090 }
1091
1092 /* see if it is an MMU fault */
1093 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1094 if (ret < 0)
1095 return 0; /* not an MMU fault */
1096 if (ret == 0)
1097 return 1; /* the MMU fault was handled without causing real CPU fault */
1098
1099 /* now we have a real cpu fault */
1100 tb = tb_find_pc(pc);
1101 if (tb) {
1102 /* the PC is inside the translated code. It means that we have
1103 a virtual CPU fault */
1104 cpu_restore_state(tb, env, pc, puc);
1105 }
1106 if (ret == 1) {
1107 #if 0
1108 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1109 env->PC, env->error_code, tb);
1110 #endif
1111 /* we restore the process signal mask as the sigreturn should
1112 do it (XXX: use sigsetjmp) */
1113 sigprocmask(SIG_SETMASK, old_set, NULL);
1114 do_raise_exception_err(env->exception_index, env->error_code);
1115 } else {
1116 /* activate soft MMU for this block */
1117 cpu_resume_from_signal(env, puc);
1118 }
1119 /* never comes here */
1120 return 1;
1121 }
1122
1123 #elif defined (TARGET_SH4)
1124 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1125 int is_write, sigset_t *old_set,
1126 void *puc)
1127 {
1128 TranslationBlock *tb;
1129 int ret;
1130
1131 if (cpu_single_env)
1132 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1133 #if defined(DEBUG_SIGNAL)
1134 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1135 pc, address, is_write, *(unsigned long *)old_set);
1136 #endif
1137 /* XXX: locking issue */
1138 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1139 return 1;
1140 }
1141
1142 /* see if it is an MMU fault */
1143 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1144 if (ret < 0)
1145 return 0; /* not an MMU fault */
1146 if (ret == 0)
1147 return 1; /* the MMU fault was handled without causing real CPU fault */
1148
1149 /* now we have a real cpu fault */
1150 tb = tb_find_pc(pc);
1151 if (tb) {
1152 /* the PC is inside the translated code. It means that we have
1153 a virtual CPU fault */
1154 cpu_restore_state(tb, env, pc, puc);
1155 }
1156 #if 0
1157 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1158 env->nip, env->error_code, tb);
1159 #endif
1160 /* we restore the process signal mask as the sigreturn should
1161 do it (XXX: use sigsetjmp) */
1162 sigprocmask(SIG_SETMASK, old_set, NULL);
1163 cpu_loop_exit();
1164 /* never comes here */
1165 return 1;
1166 }
1167
1168 #elif defined (TARGET_ALPHA)
1169 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1170 int is_write, sigset_t *old_set,
1171 void *puc)
1172 {
1173 TranslationBlock *tb;
1174 int ret;
1175
1176 if (cpu_single_env)
1177 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1178 #if defined(DEBUG_SIGNAL)
1179 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1180 pc, address, is_write, *(unsigned long *)old_set);
1181 #endif
1182 /* XXX: locking issue */
1183 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1184 return 1;
1185 }
1186
1187 /* see if it is an MMU fault */
1188 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1189 if (ret < 0)
1190 return 0; /* not an MMU fault */
1191 if (ret == 0)
1192 return 1; /* the MMU fault was handled without causing real CPU fault */
1193
1194 /* now we have a real cpu fault */
1195 tb = tb_find_pc(pc);
1196 if (tb) {
1197 /* the PC is inside the translated code. It means that we have
1198 a virtual CPU fault */
1199 cpu_restore_state(tb, env, pc, puc);
1200 }
1201 #if 0
1202 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1203 env->nip, env->error_code, tb);
1204 #endif
1205 /* we restore the process signal mask as the sigreturn should
1206 do it (XXX: use sigsetjmp) */
1207 sigprocmask(SIG_SETMASK, old_set, NULL);
1208 cpu_loop_exit();
1209 /* never comes here */
1210 return 1;
1211 }
1212 #elif defined (TARGET_CRIS)
1213 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1214 int is_write, sigset_t *old_set,
1215 void *puc)
1216 {
1217 TranslationBlock *tb;
1218 int ret;
1219
1220 if (cpu_single_env)
1221 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1222 #if defined(DEBUG_SIGNAL)
1223 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1224 pc, address, is_write, *(unsigned long *)old_set);
1225 #endif
1226 /* XXX: locking issue */
1227 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1228 return 1;
1229 }
1230
1231 /* see if it is an MMU fault */
1232 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1233 if (ret < 0)
1234 return 0; /* not an MMU fault */
1235 if (ret == 0)
1236 return 1; /* the MMU fault was handled without causing real CPU fault */
1237
1238 /* now we have a real cpu fault */
1239 tb = tb_find_pc(pc);
1240 if (tb) {
1241 /* the PC is inside the translated code. It means that we have
1242 a virtual CPU fault */
1243 cpu_restore_state(tb, env, pc, puc);
1244 }
1245 #if 0
1246 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1247 env->nip, env->error_code, tb);
1248 #endif
1249 /* we restore the process signal mask as the sigreturn should
1250 do it (XXX: use sigsetjmp) */
1251 sigprocmask(SIG_SETMASK, old_set, NULL);
1252 cpu_loop_exit();
1253 /* never comes here */
1254 return 1;
1255 }
1256
1257 #else
1258 #error unsupported target CPU
1259 #endif
1260
1261 #if defined(__i386__)
1262
1263 #if defined(__APPLE__)
1264 # include <sys/ucontext.h>
1265
1266 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1267 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1268 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1269 #else
1270 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1271 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1272 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1273 #endif
1274
1275 int cpu_signal_handler(int host_signum, void *pinfo,
1276 void *puc)
1277 {
1278 siginfo_t *info = pinfo;
1279 struct ucontext *uc = puc;
1280 unsigned long pc;
1281 int trapno;
1282
1283 #ifndef REG_EIP
1284 /* for glibc 2.1 */
1285 #define REG_EIP EIP
1286 #define REG_ERR ERR
1287 #define REG_TRAPNO TRAPNO
1288 #endif
1289 pc = EIP_sig(uc);
1290 trapno = TRAP_sig(uc);
1291 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1292 trapno == 0xe ?
1293 (ERROR_sig(uc) >> 1) & 1 : 0,
1294 &uc->uc_sigmask, puc);
1295 }
1296
1297 #elif defined(__x86_64__)
1298
1299 int cpu_signal_handler(int host_signum, void *pinfo,
1300 void *puc)
1301 {
1302 siginfo_t *info = pinfo;
1303 struct ucontext *uc = puc;
1304 unsigned long pc;
1305
1306 pc = uc->uc_mcontext.gregs[REG_RIP];
1307 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1308 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1309 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1310 &uc->uc_sigmask, puc);
1311 }
1312
1313 #elif defined(__powerpc__)
1314
1315 /***********************************************************************
1316 * signal context platform-specific definitions
1317 * From Wine
1318 */
1319 #ifdef linux
1320 /* All Registers access - only for local access */
1321 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1322 /* Gpr Registers access */
1323 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1324 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1325 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1326 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1327 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1328 # define LR_sig(context) REG_sig(link, context) /* Link register */
1329 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1330 /* Float Registers access */
1331 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1332 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1333 /* Exception Registers access */
1334 # define DAR_sig(context) REG_sig(dar, context)
1335 # define DSISR_sig(context) REG_sig(dsisr, context)
1336 # define TRAP_sig(context) REG_sig(trap, context)
1337 #endif /* linux */
1338
1339 #ifdef __APPLE__
1340 # include <sys/ucontext.h>
1341 typedef struct ucontext SIGCONTEXT;
1342 /* All Registers access - only for local access */
1343 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1344 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1345 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1346 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1347 /* Gpr Registers access */
1348 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1349 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1350 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1351 # define CTR_sig(context) REG_sig(ctr, context)
1352 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1353 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1354 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1355 /* Float Registers access */
1356 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1357 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1358 /* Exception Registers access */
1359 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1360 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1361 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1362 #endif /* __APPLE__ */
1363
1364 int cpu_signal_handler(int host_signum, void *pinfo,
1365 void *puc)
1366 {
1367 siginfo_t *info = pinfo;
1368 struct ucontext *uc = puc;
1369 unsigned long pc;
1370 int is_write;
1371
1372 pc = IAR_sig(uc);
1373 is_write = 0;
1374 #if 0
1375 /* ppc 4xx case */
1376 if (DSISR_sig(uc) & 0x00800000)
1377 is_write = 1;
1378 #else
1379 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1380 is_write = 1;
1381 #endif
1382 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1383 is_write, &uc->uc_sigmask, puc);
1384 }
1385
1386 #elif defined(__alpha__)
1387
1388 int cpu_signal_handler(int host_signum, void *pinfo,
1389 void *puc)
1390 {
1391 siginfo_t *info = pinfo;
1392 struct ucontext *uc = puc;
1393 uint32_t *pc = uc->uc_mcontext.sc_pc;
1394 uint32_t insn = *pc;
1395 int is_write = 0;
1396
1397 /* XXX: need kernel patch to get write flag faster */
1398 switch (insn >> 26) {
1399 case 0x0d: // stw
1400 case 0x0e: // stb
1401 case 0x0f: // stq_u
1402 case 0x24: // stf
1403 case 0x25: // stg
1404 case 0x26: // sts
1405 case 0x27: // stt
1406 case 0x2c: // stl
1407 case 0x2d: // stq
1408 case 0x2e: // stl_c
1409 case 0x2f: // stq_c
1410 is_write = 1;
1411 }
1412
1413 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1414 is_write, &uc->uc_sigmask, puc);
1415 }
1416 #elif defined(__sparc__)
1417
1418 int cpu_signal_handler(int host_signum, void *pinfo,
1419 void *puc)
1420 {
1421 siginfo_t *info = pinfo;
1422 uint32_t *regs = (uint32_t *)(info + 1);
1423 void *sigmask = (regs + 20);
1424 unsigned long pc;
1425 int is_write;
1426 uint32_t insn;
1427
1428 /* XXX: is there a standard glibc define ? */
1429 pc = regs[1];
1430 /* XXX: need kernel patch to get write flag faster */
1431 is_write = 0;
1432 insn = *(uint32_t *)pc;
1433 if ((insn >> 30) == 3) {
1434 switch((insn >> 19) & 0x3f) {
1435 case 0x05: // stb
1436 case 0x06: // sth
1437 case 0x04: // st
1438 case 0x07: // std
1439 case 0x24: // stf
1440 case 0x27: // stdf
1441 case 0x25: // stfsr
1442 is_write = 1;
1443 break;
1444 }
1445 }
1446 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1447 is_write, sigmask, NULL);
1448 }
1449
1450 #elif defined(__arm__)
1451
1452 int cpu_signal_handler(int host_signum, void *pinfo,
1453 void *puc)
1454 {
1455 siginfo_t *info = pinfo;
1456 struct ucontext *uc = puc;
1457 unsigned long pc;
1458 int is_write;
1459
1460 pc = uc->uc_mcontext.gregs[R15];
1461 /* XXX: compute is_write */
1462 is_write = 0;
1463 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1464 is_write,
1465 &uc->uc_sigmask, puc);
1466 }
1467
1468 #elif defined(__mc68000)
1469
1470 int cpu_signal_handler(int host_signum, void *pinfo,
1471 void *puc)
1472 {
1473 siginfo_t *info = pinfo;
1474 struct ucontext *uc = puc;
1475 unsigned long pc;
1476 int is_write;
1477
1478 pc = uc->uc_mcontext.gregs[16];
1479 /* XXX: compute is_write */
1480 is_write = 0;
1481 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1482 is_write,
1483 &uc->uc_sigmask, puc);
1484 }
1485
1486 #elif defined(__ia64)
1487
1488 #ifndef __ISR_VALID
1489 /* This ought to be in <bits/siginfo.h>... */
1490 # define __ISR_VALID 1
1491 #endif
1492
1493 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1494 {
1495 siginfo_t *info = pinfo;
1496 struct ucontext *uc = puc;
1497 unsigned long ip;
1498 int is_write = 0;
1499
1500 ip = uc->uc_mcontext.sc_ip;
1501 switch (host_signum) {
1502 case SIGILL:
1503 case SIGFPE:
1504 case SIGSEGV:
1505 case SIGBUS:
1506 case SIGTRAP:
1507 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1508 /* ISR.W (write-access) is bit 33: */
1509 is_write = (info->si_isr >> 33) & 1;
1510 break;
1511
1512 default:
1513 break;
1514 }
1515 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1516 is_write,
1517 &uc->uc_sigmask, puc);
1518 }
1519
1520 #elif defined(__s390__)
1521
1522 int cpu_signal_handler(int host_signum, void *pinfo,
1523 void *puc)
1524 {
1525 siginfo_t *info = pinfo;
1526 struct ucontext *uc = puc;
1527 unsigned long pc;
1528 int is_write;
1529
1530 pc = uc->uc_mcontext.psw.addr;
1531 /* XXX: compute is_write */
1532 is_write = 0;
1533 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1534 is_write, &uc->uc_sigmask, puc);
1535 }
1536
1537 #elif defined(__mips__)
1538
1539 int cpu_signal_handler(int host_signum, void *pinfo,
1540 void *puc)
1541 {
1542 siginfo_t *info = pinfo;
1543 struct ucontext *uc = puc;
1544 greg_t pc = uc->uc_mcontext.pc;
1545 int is_write;
1546
1547 /* XXX: compute is_write */
1548 is_write = 0;
1549 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1550 is_write, &uc->uc_sigmask, puc);
1551 }
1552
1553 #else
1554
1555 #error host CPU specific signal handler needed
1556
1557 #endif
1558
1559 #endif /* !defined(CONFIG_SOFTMMU) */