]> git.proxmox.com Git - qemu.git/blob - cpu-exec.c
5c648565867dff5b48d11ed574c49c409b6e29de
[qemu.git] / cpu-exec.c
1 /*
2 * i386 emulator main execution loop
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include "config.h"
21 #include "exec.h"
22 #include "disas.h"
23
24 #if !defined(CONFIG_SOFTMMU)
25 #undef EAX
26 #undef ECX
27 #undef EDX
28 #undef EBX
29 #undef ESP
30 #undef EBP
31 #undef ESI
32 #undef EDI
33 #undef EIP
34 #include <signal.h>
35 #include <sys/ucontext.h>
36 #endif
37
38 int tb_invalidated_flag;
39 static unsigned long next_tb;
40
41 //#define DEBUG_EXEC
42 //#define DEBUG_SIGNAL
43
44 #define SAVE_GLOBALS()
45 #define RESTORE_GLOBALS()
46
47 #if defined(__sparc__) && !defined(HOST_SOLARIS)
48 #include <features.h>
49 #if defined(__GLIBC__) && ((__GLIBC__ < 2) || \
50 ((__GLIBC__ == 2) && (__GLIBC_MINOR__ <= 90)))
51 // Work around ugly bugs in glibc that mangle global register contents
52
53 static volatile void *saved_env;
54 static volatile unsigned long saved_t0, saved_i7;
55 #undef SAVE_GLOBALS
56 #define SAVE_GLOBALS() do { \
57 saved_env = env; \
58 saved_t0 = T0; \
59 asm volatile ("st %%i7, [%0]" : : "r" (&saved_i7)); \
60 } while(0)
61
62 #undef RESTORE_GLOBALS
63 #define RESTORE_GLOBALS() do { \
64 env = (void *)saved_env; \
65 T0 = saved_t0; \
66 asm volatile ("ld [%0], %%i7" : : "r" (&saved_i7)); \
67 } while(0)
68
69 static int sparc_setjmp(jmp_buf buf)
70 {
71 int ret;
72
73 SAVE_GLOBALS();
74 ret = setjmp(buf);
75 RESTORE_GLOBALS();
76 return ret;
77 }
78 #undef setjmp
79 #define setjmp(jmp_buf) sparc_setjmp(jmp_buf)
80
81 static void sparc_longjmp(jmp_buf buf, int val)
82 {
83 SAVE_GLOBALS();
84 longjmp(buf, val);
85 }
86 #define longjmp(jmp_buf, val) sparc_longjmp(jmp_buf, val)
87 #endif
88 #endif
89
90 void cpu_loop_exit(void)
91 {
92 /* NOTE: the register at this point must be saved by hand because
93 longjmp restore them */
94 regs_to_env();
95 longjmp(env->jmp_env, 1);
96 }
97
98 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
99 #define reg_T2
100 #endif
101
102 /* exit the current TB from a signal handler. The host registers are
103 restored in a state compatible with the CPU emulator
104 */
105 void cpu_resume_from_signal(CPUState *env1, void *puc)
106 {
107 #if !defined(CONFIG_SOFTMMU)
108 struct ucontext *uc = puc;
109 #endif
110
111 env = env1;
112
113 /* XXX: restore cpu registers saved in host registers */
114
115 #if !defined(CONFIG_SOFTMMU)
116 if (puc) {
117 /* XXX: use siglongjmp ? */
118 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
119 }
120 #endif
121 longjmp(env->jmp_env, 1);
122 }
123
124 static TranslationBlock *tb_find_slow(target_ulong pc,
125 target_ulong cs_base,
126 uint64_t flags)
127 {
128 TranslationBlock *tb, **ptb1;
129 int code_gen_size;
130 unsigned int h;
131 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
132 uint8_t *tc_ptr;
133
134 spin_lock(&tb_lock);
135
136 tb_invalidated_flag = 0;
137
138 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
139
140 /* find translated block using physical mappings */
141 phys_pc = get_phys_addr_code(env, pc);
142 phys_page1 = phys_pc & TARGET_PAGE_MASK;
143 phys_page2 = -1;
144 h = tb_phys_hash_func(phys_pc);
145 ptb1 = &tb_phys_hash[h];
146 for(;;) {
147 tb = *ptb1;
148 if (!tb)
149 goto not_found;
150 if (tb->pc == pc &&
151 tb->page_addr[0] == phys_page1 &&
152 tb->cs_base == cs_base &&
153 tb->flags == flags) {
154 /* check next page if needed */
155 if (tb->page_addr[1] != -1) {
156 virt_page2 = (pc & TARGET_PAGE_MASK) +
157 TARGET_PAGE_SIZE;
158 phys_page2 = get_phys_addr_code(env, virt_page2);
159 if (tb->page_addr[1] == phys_page2)
160 goto found;
161 } else {
162 goto found;
163 }
164 }
165 ptb1 = &tb->phys_hash_next;
166 }
167 not_found:
168 /* if no translated code available, then translate it now */
169 tb = tb_alloc(pc);
170 if (!tb) {
171 /* flush must be done */
172 tb_flush(env);
173 /* cannot fail at this point */
174 tb = tb_alloc(pc);
175 /* don't forget to invalidate previous TB info */
176 tb_invalidated_flag = 1;
177 }
178 tc_ptr = code_gen_ptr;
179 tb->tc_ptr = tc_ptr;
180 tb->cs_base = cs_base;
181 tb->flags = flags;
182 SAVE_GLOBALS();
183 cpu_gen_code(env, tb, &code_gen_size);
184 RESTORE_GLOBALS();
185 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
186
187 /* check next page if needed */
188 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
189 phys_page2 = -1;
190 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
191 phys_page2 = get_phys_addr_code(env, virt_page2);
192 }
193 tb_link_phys(tb, phys_pc, phys_page2);
194
195 found:
196 /* we add the TB in the virtual pc hash table */
197 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
198 spin_unlock(&tb_lock);
199 return tb;
200 }
201
202 static inline TranslationBlock *tb_find_fast(void)
203 {
204 TranslationBlock *tb;
205 target_ulong cs_base, pc;
206 uint64_t flags;
207
208 /* we record a subset of the CPU state. It will
209 always be the same before a given translated block
210 is executed. */
211 #if defined(TARGET_I386)
212 flags = env->hflags;
213 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
214 flags |= env->intercept;
215 cs_base = env->segs[R_CS].base;
216 pc = cs_base + env->eip;
217 #elif defined(TARGET_ARM)
218 flags = env->thumb | (env->vfp.vec_len << 1)
219 | (env->vfp.vec_stride << 4);
220 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
221 flags |= (1 << 6);
222 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
223 flags |= (1 << 7);
224 flags |= (env->condexec_bits << 8);
225 cs_base = 0;
226 pc = env->regs[15];
227 #elif defined(TARGET_SPARC)
228 #ifdef TARGET_SPARC64
229 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
230 flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
231 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
232 #else
233 // FPU enable . Supervisor
234 flags = (env->psref << 4) | env->psrs;
235 #endif
236 cs_base = env->npc;
237 pc = env->pc;
238 #elif defined(TARGET_PPC)
239 flags = env->hflags;
240 cs_base = 0;
241 pc = env->nip;
242 #elif defined(TARGET_MIPS)
243 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
244 cs_base = 0;
245 pc = env->PC[env->current_tc];
246 #elif defined(TARGET_M68K)
247 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
248 | (env->sr & SR_S) /* Bit 13 */
249 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
250 cs_base = 0;
251 pc = env->pc;
252 #elif defined(TARGET_SH4)
253 flags = env->flags;
254 cs_base = 0;
255 pc = env->pc;
256 #elif defined(TARGET_ALPHA)
257 flags = env->ps;
258 cs_base = 0;
259 pc = env->pc;
260 #elif defined(TARGET_CRIS)
261 flags = env->pregs[PR_CCS] & U_FLAG;
262 cs_base = 0;
263 pc = env->pc;
264 #else
265 #error unsupported CPU
266 #endif
267 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
268 if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
269 tb->flags != flags, 0)) {
270 tb = tb_find_slow(pc, cs_base, flags);
271 /* Note: we do it here to avoid a gcc bug on Mac OS X when
272 doing it in tb_find_slow */
273 if (tb_invalidated_flag) {
274 /* as some TB could have been invalidated because
275 of memory exceptions while generating the code, we
276 must recompute the hash index here */
277 next_tb = 0;
278 }
279 }
280 return tb;
281 }
282
283 /* main execution loop */
284
285 int cpu_exec(CPUState *env1)
286 {
287 #define DECLARE_HOST_REGS 1
288 #include "hostregs_helper.h"
289 #if defined(TARGET_SPARC)
290 #if defined(reg_REGWPTR)
291 uint32_t *saved_regwptr;
292 #endif
293 #endif
294 int ret, interrupt_request;
295 unsigned long (*gen_func)(void);
296 TranslationBlock *tb;
297 uint8_t *tc_ptr;
298
299 if (cpu_halted(env1) == EXCP_HALTED)
300 return EXCP_HALTED;
301
302 cpu_single_env = env1;
303
304 /* first we save global registers */
305 #define SAVE_HOST_REGS 1
306 #include "hostregs_helper.h"
307 env = env1;
308 SAVE_GLOBALS();
309
310 env_to_regs();
311 #if defined(TARGET_I386)
312 /* put eflags in CPU temporary format */
313 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
314 DF = 1 - (2 * ((env->eflags >> 10) & 1));
315 CC_OP = CC_OP_EFLAGS;
316 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
317 #elif defined(TARGET_SPARC)
318 #if defined(reg_REGWPTR)
319 saved_regwptr = REGWPTR;
320 #endif
321 #elif defined(TARGET_M68K)
322 env->cc_op = CC_OP_FLAGS;
323 env->cc_dest = env->sr & 0xf;
324 env->cc_x = (env->sr >> 4) & 1;
325 #elif defined(TARGET_ALPHA)
326 #elif defined(TARGET_ARM)
327 #elif defined(TARGET_PPC)
328 #elif defined(TARGET_MIPS)
329 #elif defined(TARGET_SH4)
330 #elif defined(TARGET_CRIS)
331 /* XXXXX */
332 #else
333 #error unsupported target CPU
334 #endif
335 env->exception_index = -1;
336
337 /* prepare setjmp context for exception handling */
338 for(;;) {
339 if (setjmp(env->jmp_env) == 0) {
340 env->current_tb = NULL;
341 /* if an exception is pending, we execute it here */
342 if (env->exception_index >= 0) {
343 if (env->exception_index >= EXCP_INTERRUPT) {
344 /* exit request from the cpu execution loop */
345 ret = env->exception_index;
346 break;
347 } else if (env->user_mode_only) {
348 /* if user mode only, we simulate a fake exception
349 which will be handled outside the cpu execution
350 loop */
351 #if defined(TARGET_I386)
352 do_interrupt_user(env->exception_index,
353 env->exception_is_int,
354 env->error_code,
355 env->exception_next_eip);
356 #endif
357 ret = env->exception_index;
358 break;
359 } else {
360 #if defined(TARGET_I386)
361 /* simulate a real cpu exception. On i386, it can
362 trigger new exceptions, but we do not handle
363 double or triple faults yet. */
364 do_interrupt(env->exception_index,
365 env->exception_is_int,
366 env->error_code,
367 env->exception_next_eip, 0);
368 /* successfully delivered */
369 env->old_exception = -1;
370 #elif defined(TARGET_PPC)
371 do_interrupt(env);
372 #elif defined(TARGET_MIPS)
373 do_interrupt(env);
374 #elif defined(TARGET_SPARC)
375 do_interrupt(env->exception_index);
376 #elif defined(TARGET_ARM)
377 do_interrupt(env);
378 #elif defined(TARGET_SH4)
379 do_interrupt(env);
380 #elif defined(TARGET_ALPHA)
381 do_interrupt(env);
382 #elif defined(TARGET_CRIS)
383 do_interrupt(env);
384 #elif defined(TARGET_M68K)
385 do_interrupt(0);
386 #endif
387 }
388 env->exception_index = -1;
389 }
390 #ifdef USE_KQEMU
391 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
392 int ret;
393 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
394 ret = kqemu_cpu_exec(env);
395 /* put eflags in CPU temporary format */
396 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
397 DF = 1 - (2 * ((env->eflags >> 10) & 1));
398 CC_OP = CC_OP_EFLAGS;
399 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
400 if (ret == 1) {
401 /* exception */
402 longjmp(env->jmp_env, 1);
403 } else if (ret == 2) {
404 /* softmmu execution needed */
405 } else {
406 if (env->interrupt_request != 0) {
407 /* hardware interrupt will be executed just after */
408 } else {
409 /* otherwise, we restart */
410 longjmp(env->jmp_env, 1);
411 }
412 }
413 }
414 #endif
415
416 next_tb = 0; /* force lookup of first TB */
417 for(;;) {
418 SAVE_GLOBALS();
419 interrupt_request = env->interrupt_request;
420 if (__builtin_expect(interrupt_request, 0)
421 #if defined(TARGET_I386)
422 && env->hflags & HF_GIF_MASK
423 #endif
424 && !(env->singlestep_enabled & SSTEP_NOIRQ)) {
425 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
426 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
427 env->exception_index = EXCP_DEBUG;
428 cpu_loop_exit();
429 }
430 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
431 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
432 if (interrupt_request & CPU_INTERRUPT_HALT) {
433 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
434 env->halted = 1;
435 env->exception_index = EXCP_HLT;
436 cpu_loop_exit();
437 }
438 #endif
439 #if defined(TARGET_I386)
440 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
441 !(env->hflags & HF_SMM_MASK)) {
442 svm_check_intercept(SVM_EXIT_SMI);
443 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
444 do_smm_enter();
445 next_tb = 0;
446 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
447 !(env->hflags & HF_NMI_MASK)) {
448 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
449 env->hflags |= HF_NMI_MASK;
450 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
451 next_tb = 0;
452 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
453 (env->eflags & IF_MASK || env->hflags & HF_HIF_MASK) &&
454 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
455 int intno;
456 svm_check_intercept(SVM_EXIT_INTR);
457 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
458 intno = cpu_get_pic_interrupt(env);
459 if (loglevel & CPU_LOG_TB_IN_ASM) {
460 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
461 }
462 do_interrupt(intno, 0, 0, 0, 1);
463 /* ensure that no TB jump will be modified as
464 the program flow was changed */
465 next_tb = 0;
466 #if !defined(CONFIG_USER_ONLY)
467 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
468 (env->eflags & IF_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
469 int intno;
470 /* FIXME: this should respect TPR */
471 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
472 svm_check_intercept(SVM_EXIT_VINTR);
473 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
474 if (loglevel & CPU_LOG_TB_IN_ASM)
475 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
476 do_interrupt(intno, 0, 0, -1, 1);
477 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl),
478 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)) & ~V_IRQ_MASK);
479 next_tb = 0;
480 #endif
481 }
482 #elif defined(TARGET_PPC)
483 #if 0
484 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
485 cpu_ppc_reset(env);
486 }
487 #endif
488 if (interrupt_request & CPU_INTERRUPT_HARD) {
489 ppc_hw_interrupt(env);
490 if (env->pending_interrupts == 0)
491 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
492 next_tb = 0;
493 }
494 #elif defined(TARGET_MIPS)
495 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
496 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
497 (env->CP0_Status & (1 << CP0St_IE)) &&
498 !(env->CP0_Status & (1 << CP0St_EXL)) &&
499 !(env->CP0_Status & (1 << CP0St_ERL)) &&
500 !(env->hflags & MIPS_HFLAG_DM)) {
501 /* Raise it */
502 env->exception_index = EXCP_EXT_INTERRUPT;
503 env->error_code = 0;
504 do_interrupt(env);
505 next_tb = 0;
506 }
507 #elif defined(TARGET_SPARC)
508 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
509 (env->psret != 0)) {
510 int pil = env->interrupt_index & 15;
511 int type = env->interrupt_index & 0xf0;
512
513 if (((type == TT_EXTINT) &&
514 (pil == 15 || pil > env->psrpil)) ||
515 type != TT_EXTINT) {
516 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
517 do_interrupt(env->interrupt_index);
518 env->interrupt_index = 0;
519 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
520 cpu_check_irqs(env);
521 #endif
522 next_tb = 0;
523 }
524 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
525 //do_interrupt(0, 0, 0, 0, 0);
526 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
527 }
528 #elif defined(TARGET_ARM)
529 if (interrupt_request & CPU_INTERRUPT_FIQ
530 && !(env->uncached_cpsr & CPSR_F)) {
531 env->exception_index = EXCP_FIQ;
532 do_interrupt(env);
533 next_tb = 0;
534 }
535 /* ARMv7-M interrupt return works by loading a magic value
536 into the PC. On real hardware the load causes the
537 return to occur. The qemu implementation performs the
538 jump normally, then does the exception return when the
539 CPU tries to execute code at the magic address.
540 This will cause the magic PC value to be pushed to
541 the stack if an interrupt occured at the wrong time.
542 We avoid this by disabling interrupts when
543 pc contains a magic address. */
544 if (interrupt_request & CPU_INTERRUPT_HARD
545 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
546 || !(env->uncached_cpsr & CPSR_I))) {
547 env->exception_index = EXCP_IRQ;
548 do_interrupt(env);
549 next_tb = 0;
550 }
551 #elif defined(TARGET_SH4)
552 if (interrupt_request & CPU_INTERRUPT_HARD) {
553 do_interrupt(env);
554 next_tb = 0;
555 }
556 #elif defined(TARGET_ALPHA)
557 if (interrupt_request & CPU_INTERRUPT_HARD) {
558 do_interrupt(env);
559 next_tb = 0;
560 }
561 #elif defined(TARGET_CRIS)
562 if (interrupt_request & CPU_INTERRUPT_HARD) {
563 do_interrupt(env);
564 next_tb = 0;
565 }
566 #elif defined(TARGET_M68K)
567 if (interrupt_request & CPU_INTERRUPT_HARD
568 && ((env->sr & SR_I) >> SR_I_SHIFT)
569 < env->pending_level) {
570 /* Real hardware gets the interrupt vector via an
571 IACK cycle at this point. Current emulated
572 hardware doesn't rely on this, so we
573 provide/save the vector when the interrupt is
574 first signalled. */
575 env->exception_index = env->pending_vector;
576 do_interrupt(1);
577 next_tb = 0;
578 }
579 #endif
580 /* Don't use the cached interupt_request value,
581 do_interrupt may have updated the EXITTB flag. */
582 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
583 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
584 /* ensure that no TB jump will be modified as
585 the program flow was changed */
586 next_tb = 0;
587 }
588 if (interrupt_request & CPU_INTERRUPT_EXIT) {
589 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
590 env->exception_index = EXCP_INTERRUPT;
591 cpu_loop_exit();
592 }
593 }
594 #ifdef DEBUG_EXEC
595 if ((loglevel & CPU_LOG_TB_CPU)) {
596 /* restore flags in standard format */
597 regs_to_env();
598 #if defined(TARGET_I386)
599 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
600 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
601 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
602 #elif defined(TARGET_ARM)
603 cpu_dump_state(env, logfile, fprintf, 0);
604 #elif defined(TARGET_SPARC)
605 REGWPTR = env->regbase + (env->cwp * 16);
606 env->regwptr = REGWPTR;
607 cpu_dump_state(env, logfile, fprintf, 0);
608 #elif defined(TARGET_PPC)
609 cpu_dump_state(env, logfile, fprintf, 0);
610 #elif defined(TARGET_M68K)
611 cpu_m68k_flush_flags(env, env->cc_op);
612 env->cc_op = CC_OP_FLAGS;
613 env->sr = (env->sr & 0xffe0)
614 | env->cc_dest | (env->cc_x << 4);
615 cpu_dump_state(env, logfile, fprintf, 0);
616 #elif defined(TARGET_MIPS)
617 cpu_dump_state(env, logfile, fprintf, 0);
618 #elif defined(TARGET_SH4)
619 cpu_dump_state(env, logfile, fprintf, 0);
620 #elif defined(TARGET_ALPHA)
621 cpu_dump_state(env, logfile, fprintf, 0);
622 #elif defined(TARGET_CRIS)
623 cpu_dump_state(env, logfile, fprintf, 0);
624 #else
625 #error unsupported target CPU
626 #endif
627 }
628 #endif
629 tb = tb_find_fast();
630 #ifdef DEBUG_EXEC
631 if ((loglevel & CPU_LOG_EXEC)) {
632 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
633 (long)tb->tc_ptr, tb->pc,
634 lookup_symbol(tb->pc));
635 }
636 #endif
637 RESTORE_GLOBALS();
638 /* see if we can patch the calling TB. When the TB
639 spans two pages, we cannot safely do a direct
640 jump. */
641 {
642 if (next_tb != 0 &&
643 #if USE_KQEMU
644 (env->kqemu_enabled != 2) &&
645 #endif
646 tb->page_addr[1] == -1) {
647 spin_lock(&tb_lock);
648 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
649 spin_unlock(&tb_lock);
650 }
651 }
652 tc_ptr = tb->tc_ptr;
653 env->current_tb = tb;
654 /* execute the generated code */
655 gen_func = (void *)tc_ptr;
656 #if defined(__sparc__)
657 __asm__ __volatile__("call %0\n\t"
658 "mov %%o7,%%i0"
659 : /* no outputs */
660 : "r" (gen_func)
661 : "i0", "i1", "i2", "i3", "i4", "i5",
662 "o0", "o1", "o2", "o3", "o4", "o5",
663 "l0", "l1", "l2", "l3", "l4", "l5",
664 "l6", "l7");
665 #elif defined(__hppa__)
666 asm volatile ("ble 0(%%sr4,%1)\n"
667 "copy %%r31,%%r18\n"
668 "copy %%r28,%0\n"
669 : "=r" (next_tb)
670 : "r" (gen_func)
671 : "r1", "r2", "r3", "r4", "r5", "r6", "r7",
672 "r8", "r9", "r10", "r11", "r12", "r13",
673 "r18", "r19", "r20", "r21", "r22", "r23",
674 "r24", "r25", "r26", "r27", "r28", "r29",
675 "r30", "r31");
676 #elif defined(__arm__)
677 asm volatile ("mov pc, %0\n\t"
678 ".global exec_loop\n\t"
679 "exec_loop:\n\t"
680 : /* no outputs */
681 : "r" (gen_func)
682 : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
683 #elif defined(__ia64)
684 struct fptr {
685 void *ip;
686 void *gp;
687 } fp;
688
689 fp.ip = tc_ptr;
690 fp.gp = code_gen_buffer + 2 * (1 << 20);
691 (*(void (*)(void)) &fp)();
692 #elif defined(__i386)
693 asm volatile ("sub $12, %%esp\n\t"
694 "push %%ebp\n\t"
695 "call *%1\n\t"
696 "pop %%ebp\n\t"
697 "add $12, %%esp\n\t"
698 : "=a" (next_tb)
699 : "a" (gen_func)
700 : "ebx", "ecx", "edx", "esi", "edi", "cc",
701 "memory");
702 #elif defined(__x86_64__)
703 asm volatile ("sub $8, %%rsp\n\t"
704 "push %%rbp\n\t"
705 "call *%1\n\t"
706 "pop %%rbp\n\t"
707 "add $8, %%rsp\n\t"
708 : "=a" (next_tb)
709 : "a" (gen_func)
710 : "rbx", "rcx", "rdx", "rsi", "rdi", "r8", "r9",
711 "r10", "r11", "r12", "r13", "r14", "r15", "cc",
712 "memory");
713 #else
714 next_tb = gen_func();
715 #endif
716 env->current_tb = NULL;
717 /* reset soft MMU for next block (it can currently
718 only be set by a memory fault) */
719 #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
720 if (env->hflags & HF_SOFTMMU_MASK) {
721 env->hflags &= ~HF_SOFTMMU_MASK;
722 /* do not allow linking to another block */
723 next_tb = 0;
724 }
725 #endif
726 #if defined(USE_KQEMU)
727 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
728 if (kqemu_is_ok(env) &&
729 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
730 cpu_loop_exit();
731 }
732 #endif
733 } /* for(;;) */
734 } else {
735 env_to_regs();
736 }
737 } /* for(;;) */
738
739
740 #if defined(TARGET_I386)
741 /* restore flags in standard format */
742 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
743 #elif defined(TARGET_ARM)
744 /* XXX: Save/restore host fpu exception state?. */
745 #elif defined(TARGET_SPARC)
746 #if defined(reg_REGWPTR)
747 REGWPTR = saved_regwptr;
748 #endif
749 #elif defined(TARGET_PPC)
750 #elif defined(TARGET_M68K)
751 cpu_m68k_flush_flags(env, env->cc_op);
752 env->cc_op = CC_OP_FLAGS;
753 env->sr = (env->sr & 0xffe0)
754 | env->cc_dest | (env->cc_x << 4);
755 #elif defined(TARGET_MIPS)
756 #elif defined(TARGET_SH4)
757 #elif defined(TARGET_ALPHA)
758 #elif defined(TARGET_CRIS)
759 /* XXXXX */
760 #else
761 #error unsupported target CPU
762 #endif
763
764 /* restore global registers */
765 RESTORE_GLOBALS();
766 #include "hostregs_helper.h"
767
768 /* fail safe : never use cpu_single_env outside cpu_exec() */
769 cpu_single_env = NULL;
770 return ret;
771 }
772
773 /* must only be called from the generated code as an exception can be
774 generated */
775 void tb_invalidate_page_range(target_ulong start, target_ulong end)
776 {
777 /* XXX: cannot enable it yet because it yields to MMU exception
778 where NIP != read address on PowerPC */
779 #if 0
780 target_ulong phys_addr;
781 phys_addr = get_phys_addr_code(env, start);
782 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
783 #endif
784 }
785
786 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
787
788 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
789 {
790 CPUX86State *saved_env;
791
792 saved_env = env;
793 env = s;
794 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
795 selector &= 0xffff;
796 cpu_x86_load_seg_cache(env, seg_reg, selector,
797 (selector << 4), 0xffff, 0);
798 } else {
799 load_seg(seg_reg, selector);
800 }
801 env = saved_env;
802 }
803
804 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
805 {
806 CPUX86State *saved_env;
807
808 saved_env = env;
809 env = s;
810
811 helper_fsave(ptr, data32);
812
813 env = saved_env;
814 }
815
816 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
817 {
818 CPUX86State *saved_env;
819
820 saved_env = env;
821 env = s;
822
823 helper_frstor(ptr, data32);
824
825 env = saved_env;
826 }
827
828 #endif /* TARGET_I386 */
829
830 #if !defined(CONFIG_SOFTMMU)
831
832 #if defined(TARGET_I386)
833
834 /* 'pc' is the host PC at which the exception was raised. 'address' is
835 the effective address of the memory exception. 'is_write' is 1 if a
836 write caused the exception and otherwise 0'. 'old_set' is the
837 signal set which should be restored */
838 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
839 int is_write, sigset_t *old_set,
840 void *puc)
841 {
842 TranslationBlock *tb;
843 int ret;
844
845 if (cpu_single_env)
846 env = cpu_single_env; /* XXX: find a correct solution for multithread */
847 #if defined(DEBUG_SIGNAL)
848 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
849 pc, address, is_write, *(unsigned long *)old_set);
850 #endif
851 /* XXX: locking issue */
852 if (is_write && page_unprotect(h2g(address), pc, puc)) {
853 return 1;
854 }
855
856 /* see if it is an MMU fault */
857 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
858 if (ret < 0)
859 return 0; /* not an MMU fault */
860 if (ret == 0)
861 return 1; /* the MMU fault was handled without causing real CPU fault */
862 /* now we have a real cpu fault */
863 tb = tb_find_pc(pc);
864 if (tb) {
865 /* the PC is inside the translated code. It means that we have
866 a virtual CPU fault */
867 cpu_restore_state(tb, env, pc, puc);
868 }
869 if (ret == 1) {
870 #if 0
871 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
872 env->eip, env->cr[2], env->error_code);
873 #endif
874 /* we restore the process signal mask as the sigreturn should
875 do it (XXX: use sigsetjmp) */
876 sigprocmask(SIG_SETMASK, old_set, NULL);
877 raise_exception_err(env->exception_index, env->error_code);
878 } else {
879 /* activate soft MMU for this block */
880 env->hflags |= HF_SOFTMMU_MASK;
881 cpu_resume_from_signal(env, puc);
882 }
883 /* never comes here */
884 return 1;
885 }
886
887 #elif defined(TARGET_ARM)
888 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
889 int is_write, sigset_t *old_set,
890 void *puc)
891 {
892 TranslationBlock *tb;
893 int ret;
894
895 if (cpu_single_env)
896 env = cpu_single_env; /* XXX: find a correct solution for multithread */
897 #if defined(DEBUG_SIGNAL)
898 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
899 pc, address, is_write, *(unsigned long *)old_set);
900 #endif
901 /* XXX: locking issue */
902 if (is_write && page_unprotect(h2g(address), pc, puc)) {
903 return 1;
904 }
905 /* see if it is an MMU fault */
906 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
907 if (ret < 0)
908 return 0; /* not an MMU fault */
909 if (ret == 0)
910 return 1; /* the MMU fault was handled without causing real CPU fault */
911 /* now we have a real cpu fault */
912 tb = tb_find_pc(pc);
913 if (tb) {
914 /* the PC is inside the translated code. It means that we have
915 a virtual CPU fault */
916 cpu_restore_state(tb, env, pc, puc);
917 }
918 /* we restore the process signal mask as the sigreturn should
919 do it (XXX: use sigsetjmp) */
920 sigprocmask(SIG_SETMASK, old_set, NULL);
921 cpu_loop_exit();
922 /* never comes here */
923 return 1;
924 }
925 #elif defined(TARGET_SPARC)
926 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
927 int is_write, sigset_t *old_set,
928 void *puc)
929 {
930 TranslationBlock *tb;
931 int ret;
932
933 if (cpu_single_env)
934 env = cpu_single_env; /* XXX: find a correct solution for multithread */
935 #if defined(DEBUG_SIGNAL)
936 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
937 pc, address, is_write, *(unsigned long *)old_set);
938 #endif
939 /* XXX: locking issue */
940 if (is_write && page_unprotect(h2g(address), pc, puc)) {
941 return 1;
942 }
943 /* see if it is an MMU fault */
944 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
945 if (ret < 0)
946 return 0; /* not an MMU fault */
947 if (ret == 0)
948 return 1; /* the MMU fault was handled without causing real CPU fault */
949 /* now we have a real cpu fault */
950 tb = tb_find_pc(pc);
951 if (tb) {
952 /* the PC is inside the translated code. It means that we have
953 a virtual CPU fault */
954 cpu_restore_state(tb, env, pc, puc);
955 }
956 /* we restore the process signal mask as the sigreturn should
957 do it (XXX: use sigsetjmp) */
958 sigprocmask(SIG_SETMASK, old_set, NULL);
959 cpu_loop_exit();
960 /* never comes here */
961 return 1;
962 }
963 #elif defined (TARGET_PPC)
964 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
965 int is_write, sigset_t *old_set,
966 void *puc)
967 {
968 TranslationBlock *tb;
969 int ret;
970
971 if (cpu_single_env)
972 env = cpu_single_env; /* XXX: find a correct solution for multithread */
973 #if defined(DEBUG_SIGNAL)
974 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
975 pc, address, is_write, *(unsigned long *)old_set);
976 #endif
977 /* XXX: locking issue */
978 if (is_write && page_unprotect(h2g(address), pc, puc)) {
979 return 1;
980 }
981
982 /* see if it is an MMU fault */
983 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
984 if (ret < 0)
985 return 0; /* not an MMU fault */
986 if (ret == 0)
987 return 1; /* the MMU fault was handled without causing real CPU fault */
988
989 /* now we have a real cpu fault */
990 tb = tb_find_pc(pc);
991 if (tb) {
992 /* the PC is inside the translated code. It means that we have
993 a virtual CPU fault */
994 cpu_restore_state(tb, env, pc, puc);
995 }
996 if (ret == 1) {
997 #if 0
998 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
999 env->nip, env->error_code, tb);
1000 #endif
1001 /* we restore the process signal mask as the sigreturn should
1002 do it (XXX: use sigsetjmp) */
1003 sigprocmask(SIG_SETMASK, old_set, NULL);
1004 do_raise_exception_err(env->exception_index, env->error_code);
1005 } else {
1006 /* activate soft MMU for this block */
1007 cpu_resume_from_signal(env, puc);
1008 }
1009 /* never comes here */
1010 return 1;
1011 }
1012
1013 #elif defined(TARGET_M68K)
1014 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1015 int is_write, sigset_t *old_set,
1016 void *puc)
1017 {
1018 TranslationBlock *tb;
1019 int ret;
1020
1021 if (cpu_single_env)
1022 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1023 #if defined(DEBUG_SIGNAL)
1024 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1025 pc, address, is_write, *(unsigned long *)old_set);
1026 #endif
1027 /* XXX: locking issue */
1028 if (is_write && page_unprotect(address, pc, puc)) {
1029 return 1;
1030 }
1031 /* see if it is an MMU fault */
1032 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1033 if (ret < 0)
1034 return 0; /* not an MMU fault */
1035 if (ret == 0)
1036 return 1; /* the MMU fault was handled without causing real CPU fault */
1037 /* now we have a real cpu fault */
1038 tb = tb_find_pc(pc);
1039 if (tb) {
1040 /* the PC is inside the translated code. It means that we have
1041 a virtual CPU fault */
1042 cpu_restore_state(tb, env, pc, puc);
1043 }
1044 /* we restore the process signal mask as the sigreturn should
1045 do it (XXX: use sigsetjmp) */
1046 sigprocmask(SIG_SETMASK, old_set, NULL);
1047 cpu_loop_exit();
1048 /* never comes here */
1049 return 1;
1050 }
1051
1052 #elif defined (TARGET_MIPS)
1053 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1054 int is_write, sigset_t *old_set,
1055 void *puc)
1056 {
1057 TranslationBlock *tb;
1058 int ret;
1059
1060 if (cpu_single_env)
1061 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1062 #if defined(DEBUG_SIGNAL)
1063 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1064 pc, address, is_write, *(unsigned long *)old_set);
1065 #endif
1066 /* XXX: locking issue */
1067 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1068 return 1;
1069 }
1070
1071 /* see if it is an MMU fault */
1072 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1073 if (ret < 0)
1074 return 0; /* not an MMU fault */
1075 if (ret == 0)
1076 return 1; /* the MMU fault was handled without causing real CPU fault */
1077
1078 /* now we have a real cpu fault */
1079 tb = tb_find_pc(pc);
1080 if (tb) {
1081 /* the PC is inside the translated code. It means that we have
1082 a virtual CPU fault */
1083 cpu_restore_state(tb, env, pc, puc);
1084 }
1085 if (ret == 1) {
1086 #if 0
1087 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1088 env->PC, env->error_code, tb);
1089 #endif
1090 /* we restore the process signal mask as the sigreturn should
1091 do it (XXX: use sigsetjmp) */
1092 sigprocmask(SIG_SETMASK, old_set, NULL);
1093 do_raise_exception_err(env->exception_index, env->error_code);
1094 } else {
1095 /* activate soft MMU for this block */
1096 cpu_resume_from_signal(env, puc);
1097 }
1098 /* never comes here */
1099 return 1;
1100 }
1101
1102 #elif defined (TARGET_SH4)
1103 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1104 int is_write, sigset_t *old_set,
1105 void *puc)
1106 {
1107 TranslationBlock *tb;
1108 int ret;
1109
1110 if (cpu_single_env)
1111 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1112 #if defined(DEBUG_SIGNAL)
1113 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1114 pc, address, is_write, *(unsigned long *)old_set);
1115 #endif
1116 /* XXX: locking issue */
1117 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1118 return 1;
1119 }
1120
1121 /* see if it is an MMU fault */
1122 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1123 if (ret < 0)
1124 return 0; /* not an MMU fault */
1125 if (ret == 0)
1126 return 1; /* the MMU fault was handled without causing real CPU fault */
1127
1128 /* now we have a real cpu fault */
1129 tb = tb_find_pc(pc);
1130 if (tb) {
1131 /* the PC is inside the translated code. It means that we have
1132 a virtual CPU fault */
1133 cpu_restore_state(tb, env, pc, puc);
1134 }
1135 #if 0
1136 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1137 env->nip, env->error_code, tb);
1138 #endif
1139 /* we restore the process signal mask as the sigreturn should
1140 do it (XXX: use sigsetjmp) */
1141 sigprocmask(SIG_SETMASK, old_set, NULL);
1142 cpu_loop_exit();
1143 /* never comes here */
1144 return 1;
1145 }
1146
1147 #elif defined (TARGET_ALPHA)
1148 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1149 int is_write, sigset_t *old_set,
1150 void *puc)
1151 {
1152 TranslationBlock *tb;
1153 int ret;
1154
1155 if (cpu_single_env)
1156 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1157 #if defined(DEBUG_SIGNAL)
1158 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1159 pc, address, is_write, *(unsigned long *)old_set);
1160 #endif
1161 /* XXX: locking issue */
1162 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1163 return 1;
1164 }
1165
1166 /* see if it is an MMU fault */
1167 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1168 if (ret < 0)
1169 return 0; /* not an MMU fault */
1170 if (ret == 0)
1171 return 1; /* the MMU fault was handled without causing real CPU fault */
1172
1173 /* now we have a real cpu fault */
1174 tb = tb_find_pc(pc);
1175 if (tb) {
1176 /* the PC is inside the translated code. It means that we have
1177 a virtual CPU fault */
1178 cpu_restore_state(tb, env, pc, puc);
1179 }
1180 #if 0
1181 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1182 env->nip, env->error_code, tb);
1183 #endif
1184 /* we restore the process signal mask as the sigreturn should
1185 do it (XXX: use sigsetjmp) */
1186 sigprocmask(SIG_SETMASK, old_set, NULL);
1187 cpu_loop_exit();
1188 /* never comes here */
1189 return 1;
1190 }
1191 #elif defined (TARGET_CRIS)
1192 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1193 int is_write, sigset_t *old_set,
1194 void *puc)
1195 {
1196 TranslationBlock *tb;
1197 int ret;
1198
1199 if (cpu_single_env)
1200 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1201 #if defined(DEBUG_SIGNAL)
1202 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1203 pc, address, is_write, *(unsigned long *)old_set);
1204 #endif
1205 /* XXX: locking issue */
1206 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1207 return 1;
1208 }
1209
1210 /* see if it is an MMU fault */
1211 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1212 if (ret < 0)
1213 return 0; /* not an MMU fault */
1214 if (ret == 0)
1215 return 1; /* the MMU fault was handled without causing real CPU fault */
1216
1217 /* now we have a real cpu fault */
1218 tb = tb_find_pc(pc);
1219 if (tb) {
1220 /* the PC is inside the translated code. It means that we have
1221 a virtual CPU fault */
1222 cpu_restore_state(tb, env, pc, puc);
1223 }
1224 /* we restore the process signal mask as the sigreturn should
1225 do it (XXX: use sigsetjmp) */
1226 sigprocmask(SIG_SETMASK, old_set, NULL);
1227 cpu_loop_exit();
1228 /* never comes here */
1229 return 1;
1230 }
1231
1232 #else
1233 #error unsupported target CPU
1234 #endif
1235
1236 #if defined(__i386__)
1237
1238 #if defined(__APPLE__)
1239 # include <sys/ucontext.h>
1240
1241 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1242 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1243 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1244 #else
1245 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1246 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1247 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1248 #endif
1249
1250 int cpu_signal_handler(int host_signum, void *pinfo,
1251 void *puc)
1252 {
1253 siginfo_t *info = pinfo;
1254 struct ucontext *uc = puc;
1255 unsigned long pc;
1256 int trapno;
1257
1258 #ifndef REG_EIP
1259 /* for glibc 2.1 */
1260 #define REG_EIP EIP
1261 #define REG_ERR ERR
1262 #define REG_TRAPNO TRAPNO
1263 #endif
1264 pc = EIP_sig(uc);
1265 trapno = TRAP_sig(uc);
1266 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1267 trapno == 0xe ?
1268 (ERROR_sig(uc) >> 1) & 1 : 0,
1269 &uc->uc_sigmask, puc);
1270 }
1271
1272 #elif defined(__x86_64__)
1273
1274 int cpu_signal_handler(int host_signum, void *pinfo,
1275 void *puc)
1276 {
1277 siginfo_t *info = pinfo;
1278 struct ucontext *uc = puc;
1279 unsigned long pc;
1280
1281 pc = uc->uc_mcontext.gregs[REG_RIP];
1282 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1283 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1284 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1285 &uc->uc_sigmask, puc);
1286 }
1287
1288 #elif defined(__powerpc__)
1289
1290 /***********************************************************************
1291 * signal context platform-specific definitions
1292 * From Wine
1293 */
1294 #ifdef linux
1295 /* All Registers access - only for local access */
1296 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1297 /* Gpr Registers access */
1298 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1299 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1300 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1301 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1302 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1303 # define LR_sig(context) REG_sig(link, context) /* Link register */
1304 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1305 /* Float Registers access */
1306 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1307 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1308 /* Exception Registers access */
1309 # define DAR_sig(context) REG_sig(dar, context)
1310 # define DSISR_sig(context) REG_sig(dsisr, context)
1311 # define TRAP_sig(context) REG_sig(trap, context)
1312 #endif /* linux */
1313
1314 #ifdef __APPLE__
1315 # include <sys/ucontext.h>
1316 typedef struct ucontext SIGCONTEXT;
1317 /* All Registers access - only for local access */
1318 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1319 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1320 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1321 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1322 /* Gpr Registers access */
1323 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1324 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1325 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1326 # define CTR_sig(context) REG_sig(ctr, context)
1327 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1328 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1329 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1330 /* Float Registers access */
1331 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1332 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1333 /* Exception Registers access */
1334 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1335 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1336 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1337 #endif /* __APPLE__ */
1338
1339 int cpu_signal_handler(int host_signum, void *pinfo,
1340 void *puc)
1341 {
1342 siginfo_t *info = pinfo;
1343 struct ucontext *uc = puc;
1344 unsigned long pc;
1345 int is_write;
1346
1347 pc = IAR_sig(uc);
1348 is_write = 0;
1349 #if 0
1350 /* ppc 4xx case */
1351 if (DSISR_sig(uc) & 0x00800000)
1352 is_write = 1;
1353 #else
1354 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1355 is_write = 1;
1356 #endif
1357 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1358 is_write, &uc->uc_sigmask, puc);
1359 }
1360
1361 #elif defined(__alpha__)
1362
1363 int cpu_signal_handler(int host_signum, void *pinfo,
1364 void *puc)
1365 {
1366 siginfo_t *info = pinfo;
1367 struct ucontext *uc = puc;
1368 uint32_t *pc = uc->uc_mcontext.sc_pc;
1369 uint32_t insn = *pc;
1370 int is_write = 0;
1371
1372 /* XXX: need kernel patch to get write flag faster */
1373 switch (insn >> 26) {
1374 case 0x0d: // stw
1375 case 0x0e: // stb
1376 case 0x0f: // stq_u
1377 case 0x24: // stf
1378 case 0x25: // stg
1379 case 0x26: // sts
1380 case 0x27: // stt
1381 case 0x2c: // stl
1382 case 0x2d: // stq
1383 case 0x2e: // stl_c
1384 case 0x2f: // stq_c
1385 is_write = 1;
1386 }
1387
1388 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1389 is_write, &uc->uc_sigmask, puc);
1390 }
1391 #elif defined(__sparc__)
1392
1393 int cpu_signal_handler(int host_signum, void *pinfo,
1394 void *puc)
1395 {
1396 siginfo_t *info = pinfo;
1397 uint32_t *regs = (uint32_t *)(info + 1);
1398 void *sigmask = (regs + 20);
1399 unsigned long pc;
1400 int is_write;
1401 uint32_t insn;
1402
1403 /* XXX: is there a standard glibc define ? */
1404 pc = regs[1];
1405 /* XXX: need kernel patch to get write flag faster */
1406 is_write = 0;
1407 insn = *(uint32_t *)pc;
1408 if ((insn >> 30) == 3) {
1409 switch((insn >> 19) & 0x3f) {
1410 case 0x05: // stb
1411 case 0x06: // sth
1412 case 0x04: // st
1413 case 0x07: // std
1414 case 0x24: // stf
1415 case 0x27: // stdf
1416 case 0x25: // stfsr
1417 is_write = 1;
1418 break;
1419 }
1420 }
1421 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1422 is_write, sigmask, NULL);
1423 }
1424
1425 #elif defined(__arm__)
1426
1427 int cpu_signal_handler(int host_signum, void *pinfo,
1428 void *puc)
1429 {
1430 siginfo_t *info = pinfo;
1431 struct ucontext *uc = puc;
1432 unsigned long pc;
1433 int is_write;
1434
1435 pc = uc->uc_mcontext.arm_pc;
1436 /* XXX: compute is_write */
1437 is_write = 0;
1438 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1439 is_write,
1440 &uc->uc_sigmask, puc);
1441 }
1442
1443 #elif defined(__mc68000)
1444
1445 int cpu_signal_handler(int host_signum, void *pinfo,
1446 void *puc)
1447 {
1448 siginfo_t *info = pinfo;
1449 struct ucontext *uc = puc;
1450 unsigned long pc;
1451 int is_write;
1452
1453 pc = uc->uc_mcontext.gregs[16];
1454 /* XXX: compute is_write */
1455 is_write = 0;
1456 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1457 is_write,
1458 &uc->uc_sigmask, puc);
1459 }
1460
1461 #elif defined(__ia64)
1462
1463 #ifndef __ISR_VALID
1464 /* This ought to be in <bits/siginfo.h>... */
1465 # define __ISR_VALID 1
1466 #endif
1467
1468 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1469 {
1470 siginfo_t *info = pinfo;
1471 struct ucontext *uc = puc;
1472 unsigned long ip;
1473 int is_write = 0;
1474
1475 ip = uc->uc_mcontext.sc_ip;
1476 switch (host_signum) {
1477 case SIGILL:
1478 case SIGFPE:
1479 case SIGSEGV:
1480 case SIGBUS:
1481 case SIGTRAP:
1482 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1483 /* ISR.W (write-access) is bit 33: */
1484 is_write = (info->si_isr >> 33) & 1;
1485 break;
1486
1487 default:
1488 break;
1489 }
1490 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1491 is_write,
1492 &uc->uc_sigmask, puc);
1493 }
1494
1495 #elif defined(__s390__)
1496
1497 int cpu_signal_handler(int host_signum, void *pinfo,
1498 void *puc)
1499 {
1500 siginfo_t *info = pinfo;
1501 struct ucontext *uc = puc;
1502 unsigned long pc;
1503 int is_write;
1504
1505 pc = uc->uc_mcontext.psw.addr;
1506 /* XXX: compute is_write */
1507 is_write = 0;
1508 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1509 is_write, &uc->uc_sigmask, puc);
1510 }
1511
1512 #elif defined(__mips__)
1513
1514 int cpu_signal_handler(int host_signum, void *pinfo,
1515 void *puc)
1516 {
1517 siginfo_t *info = pinfo;
1518 struct ucontext *uc = puc;
1519 greg_t pc = uc->uc_mcontext.pc;
1520 int is_write;
1521
1522 /* XXX: compute is_write */
1523 is_write = 0;
1524 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1525 is_write, &uc->uc_sigmask, puc);
1526 }
1527
1528 #elif defined(__hppa__)
1529
1530 int cpu_signal_handler(int host_signum, void *pinfo,
1531 void *puc)
1532 {
1533 struct siginfo *info = pinfo;
1534 struct ucontext *uc = puc;
1535 unsigned long pc;
1536 int is_write;
1537
1538 pc = uc->uc_mcontext.sc_iaoq[0];
1539 /* FIXME: compute is_write */
1540 is_write = 0;
1541 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1542 is_write,
1543 &uc->uc_sigmask, puc);
1544 }
1545
1546 #else
1547
1548 #error host CPU specific signal handler needed
1549
1550 #endif
1551
1552 #endif /* !defined(CONFIG_SOFTMMU) */