]> git.proxmox.com Git - qemu.git/blob - cpu-exec.c
a3e6e8e124e48a9b7eca050761cc77740a25eff6
[qemu.git] / cpu-exec.c
1 /*
2 * i386 emulator main execution loop
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include "config.h"
21 #include "exec.h"
22 #include "disas.h"
23
24 #if !defined(CONFIG_SOFTMMU)
25 #undef EAX
26 #undef ECX
27 #undef EDX
28 #undef EBX
29 #undef ESP
30 #undef EBP
31 #undef ESI
32 #undef EDI
33 #undef EIP
34 #include <signal.h>
35 #include <sys/ucontext.h>
36 #endif
37
38 int tb_invalidated_flag;
39
40 //#define DEBUG_EXEC
41 //#define DEBUG_SIGNAL
42
43 #define SAVE_GLOBALS()
44 #define RESTORE_GLOBALS()
45
46 #if defined(__sparc__) && !defined(HOST_SOLARIS)
47 #include <features.h>
48 #if defined(__GLIBC__) && ((__GLIBC__ < 2) || \
49 ((__GLIBC__ == 2) && (__GLIBC_MINOR__ <= 90)))
50 // Work around ugly bugs in glibc that mangle global register contents
51
52 static volatile void *saved_env;
53 static volatile unsigned long saved_t0, saved_i7;
54 #undef SAVE_GLOBALS
55 #define SAVE_GLOBALS() do { \
56 saved_env = env; \
57 saved_t0 = T0; \
58 asm volatile ("st %%i7, [%0]" : : "r" (&saved_i7)); \
59 } while(0)
60
61 #undef RESTORE_GLOBALS
62 #define RESTORE_GLOBALS() do { \
63 env = (void *)saved_env; \
64 T0 = saved_t0; \
65 asm volatile ("ld [%0], %%i7" : : "r" (&saved_i7)); \
66 } while(0)
67
68 static int sparc_setjmp(jmp_buf buf)
69 {
70 int ret;
71
72 SAVE_GLOBALS();
73 ret = setjmp(buf);
74 RESTORE_GLOBALS();
75 return ret;
76 }
77 #undef setjmp
78 #define setjmp(jmp_buf) sparc_setjmp(jmp_buf)
79
80 static void sparc_longjmp(jmp_buf buf, int val)
81 {
82 SAVE_GLOBALS();
83 longjmp(buf, val);
84 }
85 #define longjmp(jmp_buf, val) sparc_longjmp(jmp_buf, val)
86 #endif
87 #endif
88
89 void cpu_loop_exit(void)
90 {
91 /* NOTE: the register at this point must be saved by hand because
92 longjmp restore them */
93 regs_to_env();
94 longjmp(env->jmp_env, 1);
95 }
96
97 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
98 #define reg_T2
99 #endif
100
101 /* exit the current TB from a signal handler. The host registers are
102 restored in a state compatible with the CPU emulator
103 */
104 void cpu_resume_from_signal(CPUState *env1, void *puc)
105 {
106 #if !defined(CONFIG_SOFTMMU)
107 struct ucontext *uc = puc;
108 #endif
109
110 env = env1;
111
112 /* XXX: restore cpu registers saved in host registers */
113
114 #if !defined(CONFIG_SOFTMMU)
115 if (puc) {
116 /* XXX: use siglongjmp ? */
117 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
118 }
119 #endif
120 longjmp(env->jmp_env, 1);
121 }
122
123 static TranslationBlock *tb_find_slow(target_ulong pc,
124 target_ulong cs_base,
125 uint64_t flags)
126 {
127 TranslationBlock *tb, **ptb1;
128 int code_gen_size;
129 unsigned int h;
130 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
131 uint8_t *tc_ptr;
132
133 spin_lock(&tb_lock);
134
135 tb_invalidated_flag = 0;
136
137 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
138
139 /* find translated block using physical mappings */
140 phys_pc = get_phys_addr_code(env, pc);
141 phys_page1 = phys_pc & TARGET_PAGE_MASK;
142 phys_page2 = -1;
143 h = tb_phys_hash_func(phys_pc);
144 ptb1 = &tb_phys_hash[h];
145 for(;;) {
146 tb = *ptb1;
147 if (!tb)
148 goto not_found;
149 if (tb->pc == pc &&
150 tb->page_addr[0] == phys_page1 &&
151 tb->cs_base == cs_base &&
152 tb->flags == flags) {
153 /* check next page if needed */
154 if (tb->page_addr[1] != -1) {
155 virt_page2 = (pc & TARGET_PAGE_MASK) +
156 TARGET_PAGE_SIZE;
157 phys_page2 = get_phys_addr_code(env, virt_page2);
158 if (tb->page_addr[1] == phys_page2)
159 goto found;
160 } else {
161 goto found;
162 }
163 }
164 ptb1 = &tb->phys_hash_next;
165 }
166 not_found:
167 /* if no translated code available, then translate it now */
168 tb = tb_alloc(pc);
169 if (!tb) {
170 /* flush must be done */
171 tb_flush(env);
172 /* cannot fail at this point */
173 tb = tb_alloc(pc);
174 /* don't forget to invalidate previous TB info */
175 tb_invalidated_flag = 1;
176 }
177 tc_ptr = code_gen_ptr;
178 tb->tc_ptr = tc_ptr;
179 tb->cs_base = cs_base;
180 tb->flags = flags;
181 SAVE_GLOBALS();
182 cpu_gen_code(env, tb, &code_gen_size);
183 RESTORE_GLOBALS();
184 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
185
186 /* check next page if needed */
187 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
188 phys_page2 = -1;
189 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
190 phys_page2 = get_phys_addr_code(env, virt_page2);
191 }
192 tb_link_phys(tb, phys_pc, phys_page2);
193
194 found:
195 /* we add the TB in the virtual pc hash table */
196 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
197 spin_unlock(&tb_lock);
198 return tb;
199 }
200
201 static inline TranslationBlock *tb_find_fast(void)
202 {
203 TranslationBlock *tb;
204 target_ulong cs_base, pc;
205 uint64_t flags;
206
207 /* we record a subset of the CPU state. It will
208 always be the same before a given translated block
209 is executed. */
210 #if defined(TARGET_I386)
211 flags = env->hflags;
212 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
213 flags |= env->intercept;
214 cs_base = env->segs[R_CS].base;
215 pc = cs_base + env->eip;
216 #elif defined(TARGET_ARM)
217 flags = env->thumb | (env->vfp.vec_len << 1)
218 | (env->vfp.vec_stride << 4);
219 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
220 flags |= (1 << 6);
221 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
222 flags |= (1 << 7);
223 flags |= (env->condexec_bits << 8);
224 cs_base = 0;
225 pc = env->regs[15];
226 #elif defined(TARGET_SPARC)
227 #ifdef TARGET_SPARC64
228 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
229 flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
230 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
231 #else
232 // FPU enable . Supervisor
233 flags = (env->psref << 4) | env->psrs;
234 #endif
235 cs_base = env->npc;
236 pc = env->pc;
237 #elif defined(TARGET_PPC)
238 flags = env->hflags;
239 cs_base = 0;
240 pc = env->nip;
241 #elif defined(TARGET_MIPS)
242 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
243 cs_base = 0;
244 pc = env->PC[env->current_tc];
245 #elif defined(TARGET_M68K)
246 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
247 | (env->sr & SR_S) /* Bit 13 */
248 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
249 cs_base = 0;
250 pc = env->pc;
251 #elif defined(TARGET_SH4)
252 flags = env->flags;
253 cs_base = 0;
254 pc = env->pc;
255 #elif defined(TARGET_ALPHA)
256 flags = env->ps;
257 cs_base = 0;
258 pc = env->pc;
259 #elif defined(TARGET_CRIS)
260 flags = 0;
261 cs_base = 0;
262 pc = env->pc;
263 #else
264 #error unsupported CPU
265 #endif
266 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
267 if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
268 tb->flags != flags, 0)) {
269 tb = tb_find_slow(pc, cs_base, flags);
270 /* Note: we do it here to avoid a gcc bug on Mac OS X when
271 doing it in tb_find_slow */
272 if (tb_invalidated_flag) {
273 /* as some TB could have been invalidated because
274 of memory exceptions while generating the code, we
275 must recompute the hash index here */
276 T0 = 0;
277 }
278 }
279 return tb;
280 }
281
282 #define BREAK_CHAIN T0 = 0
283
284 /* main execution loop */
285
286 int cpu_exec(CPUState *env1)
287 {
288 #define DECLARE_HOST_REGS 1
289 #include "hostregs_helper.h"
290 #if defined(TARGET_SPARC)
291 #if defined(reg_REGWPTR)
292 uint32_t *saved_regwptr;
293 #endif
294 #endif
295 int ret, interrupt_request;
296 long (*gen_func)(void);
297 TranslationBlock *tb;
298 uint8_t *tc_ptr;
299
300 if (cpu_halted(env1) == EXCP_HALTED)
301 return EXCP_HALTED;
302
303 cpu_single_env = env1;
304
305 /* first we save global registers */
306 #define SAVE_HOST_REGS 1
307 #include "hostregs_helper.h"
308 env = env1;
309 SAVE_GLOBALS();
310
311 env_to_regs();
312 #if defined(TARGET_I386)
313 /* put eflags in CPU temporary format */
314 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
315 DF = 1 - (2 * ((env->eflags >> 10) & 1));
316 CC_OP = CC_OP_EFLAGS;
317 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
318 #elif defined(TARGET_SPARC)
319 #if defined(reg_REGWPTR)
320 saved_regwptr = REGWPTR;
321 #endif
322 #elif defined(TARGET_M68K)
323 env->cc_op = CC_OP_FLAGS;
324 env->cc_dest = env->sr & 0xf;
325 env->cc_x = (env->sr >> 4) & 1;
326 #elif defined(TARGET_ALPHA)
327 #elif defined(TARGET_ARM)
328 #elif defined(TARGET_PPC)
329 #elif defined(TARGET_MIPS)
330 #elif defined(TARGET_SH4)
331 #elif defined(TARGET_CRIS)
332 /* XXXXX */
333 #else
334 #error unsupported target CPU
335 #endif
336 env->exception_index = -1;
337
338 /* prepare setjmp context for exception handling */
339 for(;;) {
340 if (setjmp(env->jmp_env) == 0) {
341 env->current_tb = NULL;
342 /* if an exception is pending, we execute it here */
343 if (env->exception_index >= 0) {
344 if (env->exception_index >= EXCP_INTERRUPT) {
345 /* exit request from the cpu execution loop */
346 ret = env->exception_index;
347 break;
348 } else if (env->user_mode_only) {
349 /* if user mode only, we simulate a fake exception
350 which will be handled outside the cpu execution
351 loop */
352 #if defined(TARGET_I386)
353 do_interrupt_user(env->exception_index,
354 env->exception_is_int,
355 env->error_code,
356 env->exception_next_eip);
357 #endif
358 ret = env->exception_index;
359 break;
360 } else {
361 #if defined(TARGET_I386)
362 /* simulate a real cpu exception. On i386, it can
363 trigger new exceptions, but we do not handle
364 double or triple faults yet. */
365 do_interrupt(env->exception_index,
366 env->exception_is_int,
367 env->error_code,
368 env->exception_next_eip, 0);
369 /* successfully delivered */
370 env->old_exception = -1;
371 #elif defined(TARGET_PPC)
372 do_interrupt(env);
373 #elif defined(TARGET_MIPS)
374 do_interrupt(env);
375 #elif defined(TARGET_SPARC)
376 do_interrupt(env->exception_index);
377 #elif defined(TARGET_ARM)
378 do_interrupt(env);
379 #elif defined(TARGET_SH4)
380 do_interrupt(env);
381 #elif defined(TARGET_ALPHA)
382 do_interrupt(env);
383 #elif defined(TARGET_CRIS)
384 do_interrupt(env);
385 #elif defined(TARGET_M68K)
386 do_interrupt(0);
387 #endif
388 }
389 env->exception_index = -1;
390 }
391 #ifdef USE_KQEMU
392 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
393 int ret;
394 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
395 ret = kqemu_cpu_exec(env);
396 /* put eflags in CPU temporary format */
397 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
398 DF = 1 - (2 * ((env->eflags >> 10) & 1));
399 CC_OP = CC_OP_EFLAGS;
400 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
401 if (ret == 1) {
402 /* exception */
403 longjmp(env->jmp_env, 1);
404 } else if (ret == 2) {
405 /* softmmu execution needed */
406 } else {
407 if (env->interrupt_request != 0) {
408 /* hardware interrupt will be executed just after */
409 } else {
410 /* otherwise, we restart */
411 longjmp(env->jmp_env, 1);
412 }
413 }
414 }
415 #endif
416
417 T0 = 0; /* force lookup of first TB */
418 for(;;) {
419 SAVE_GLOBALS();
420 interrupt_request = env->interrupt_request;
421 if (__builtin_expect(interrupt_request, 0)
422 #if defined(TARGET_I386)
423 && env->hflags & HF_GIF_MASK
424 #endif
425 ) {
426 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
427 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
428 env->exception_index = EXCP_DEBUG;
429 cpu_loop_exit();
430 }
431 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
432 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
433 if (interrupt_request & CPU_INTERRUPT_HALT) {
434 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
435 env->halted = 1;
436 env->exception_index = EXCP_HLT;
437 cpu_loop_exit();
438 }
439 #endif
440 #if defined(TARGET_I386)
441 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
442 !(env->hflags & HF_SMM_MASK)) {
443 svm_check_intercept(SVM_EXIT_SMI);
444 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
445 do_smm_enter();
446 BREAK_CHAIN;
447 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
448 (env->eflags & IF_MASK || env->hflags & HF_HIF_MASK) &&
449 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
450 int intno;
451 svm_check_intercept(SVM_EXIT_INTR);
452 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
453 intno = cpu_get_pic_interrupt(env);
454 if (loglevel & CPU_LOG_TB_IN_ASM) {
455 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
456 }
457 do_interrupt(intno, 0, 0, 0, 1);
458 /* ensure that no TB jump will be modified as
459 the program flow was changed */
460 BREAK_CHAIN;
461 #if !defined(CONFIG_USER_ONLY)
462 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
463 (env->eflags & IF_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
464 int intno;
465 /* FIXME: this should respect TPR */
466 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
467 svm_check_intercept(SVM_EXIT_VINTR);
468 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
469 if (loglevel & CPU_LOG_TB_IN_ASM)
470 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
471 do_interrupt(intno, 0, 0, -1, 1);
472 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl),
473 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)) & ~V_IRQ_MASK);
474 BREAK_CHAIN;
475 #endif
476 }
477 #elif defined(TARGET_PPC)
478 #if 0
479 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
480 cpu_ppc_reset(env);
481 }
482 #endif
483 if (interrupt_request & CPU_INTERRUPT_HARD) {
484 ppc_hw_interrupt(env);
485 if (env->pending_interrupts == 0)
486 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
487 BREAK_CHAIN;
488 }
489 #elif defined(TARGET_MIPS)
490 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
491 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
492 (env->CP0_Status & (1 << CP0St_IE)) &&
493 !(env->CP0_Status & (1 << CP0St_EXL)) &&
494 !(env->CP0_Status & (1 << CP0St_ERL)) &&
495 !(env->hflags & MIPS_HFLAG_DM)) {
496 /* Raise it */
497 env->exception_index = EXCP_EXT_INTERRUPT;
498 env->error_code = 0;
499 do_interrupt(env);
500 BREAK_CHAIN;
501 }
502 #elif defined(TARGET_SPARC)
503 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
504 (env->psret != 0)) {
505 int pil = env->interrupt_index & 15;
506 int type = env->interrupt_index & 0xf0;
507
508 if (((type == TT_EXTINT) &&
509 (pil == 15 || pil > env->psrpil)) ||
510 type != TT_EXTINT) {
511 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
512 do_interrupt(env->interrupt_index);
513 env->interrupt_index = 0;
514 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
515 cpu_check_irqs(env);
516 #endif
517 BREAK_CHAIN;
518 }
519 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
520 //do_interrupt(0, 0, 0, 0, 0);
521 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
522 }
523 #elif defined(TARGET_ARM)
524 if (interrupt_request & CPU_INTERRUPT_FIQ
525 && !(env->uncached_cpsr & CPSR_F)) {
526 env->exception_index = EXCP_FIQ;
527 do_interrupt(env);
528 BREAK_CHAIN;
529 }
530 /* ARMv7-M interrupt return works by loading a magic value
531 into the PC. On real hardware the load causes the
532 return to occur. The qemu implementation performs the
533 jump normally, then does the exception return when the
534 CPU tries to execute code at the magic address.
535 This will cause the magic PC value to be pushed to
536 the stack if an interrupt occured at the wrong time.
537 We avoid this by disabling interrupts when
538 pc contains a magic address. */
539 if (interrupt_request & CPU_INTERRUPT_HARD
540 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
541 || !(env->uncached_cpsr & CPSR_I))) {
542 env->exception_index = EXCP_IRQ;
543 do_interrupt(env);
544 BREAK_CHAIN;
545 }
546 #elif defined(TARGET_SH4)
547 if (interrupt_request & CPU_INTERRUPT_HARD) {
548 do_interrupt(env);
549 BREAK_CHAIN;
550 }
551 #elif defined(TARGET_ALPHA)
552 if (interrupt_request & CPU_INTERRUPT_HARD) {
553 do_interrupt(env);
554 BREAK_CHAIN;
555 }
556 #elif defined(TARGET_CRIS)
557 if (interrupt_request & CPU_INTERRUPT_HARD) {
558 do_interrupt(env);
559 BREAK_CHAIN;
560 }
561 #elif defined(TARGET_M68K)
562 if (interrupt_request & CPU_INTERRUPT_HARD
563 && ((env->sr & SR_I) >> SR_I_SHIFT)
564 < env->pending_level) {
565 /* Real hardware gets the interrupt vector via an
566 IACK cycle at this point. Current emulated
567 hardware doesn't rely on this, so we
568 provide/save the vector when the interrupt is
569 first signalled. */
570 env->exception_index = env->pending_vector;
571 do_interrupt(1);
572 BREAK_CHAIN;
573 }
574 #endif
575 /* Don't use the cached interupt_request value,
576 do_interrupt may have updated the EXITTB flag. */
577 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
578 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
579 /* ensure that no TB jump will be modified as
580 the program flow was changed */
581 BREAK_CHAIN;
582 }
583 if (interrupt_request & CPU_INTERRUPT_EXIT) {
584 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
585 env->exception_index = EXCP_INTERRUPT;
586 cpu_loop_exit();
587 }
588 }
589 #ifdef DEBUG_EXEC
590 if ((loglevel & CPU_LOG_TB_CPU)) {
591 /* restore flags in standard format */
592 regs_to_env();
593 #if defined(TARGET_I386)
594 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
595 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
596 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
597 #elif defined(TARGET_ARM)
598 cpu_dump_state(env, logfile, fprintf, 0);
599 #elif defined(TARGET_SPARC)
600 REGWPTR = env->regbase + (env->cwp * 16);
601 env->regwptr = REGWPTR;
602 cpu_dump_state(env, logfile, fprintf, 0);
603 #elif defined(TARGET_PPC)
604 cpu_dump_state(env, logfile, fprintf, 0);
605 #elif defined(TARGET_M68K)
606 cpu_m68k_flush_flags(env, env->cc_op);
607 env->cc_op = CC_OP_FLAGS;
608 env->sr = (env->sr & 0xffe0)
609 | env->cc_dest | (env->cc_x << 4);
610 cpu_dump_state(env, logfile, fprintf, 0);
611 #elif defined(TARGET_MIPS)
612 cpu_dump_state(env, logfile, fprintf, 0);
613 #elif defined(TARGET_SH4)
614 cpu_dump_state(env, logfile, fprintf, 0);
615 #elif defined(TARGET_ALPHA)
616 cpu_dump_state(env, logfile, fprintf, 0);
617 #elif defined(TARGET_CRIS)
618 cpu_dump_state(env, logfile, fprintf, 0);
619 #else
620 #error unsupported target CPU
621 #endif
622 }
623 #endif
624 tb = tb_find_fast();
625 #ifdef DEBUG_EXEC
626 if ((loglevel & CPU_LOG_EXEC)) {
627 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
628 (long)tb->tc_ptr, tb->pc,
629 lookup_symbol(tb->pc));
630 }
631 #endif
632 RESTORE_GLOBALS();
633 /* see if we can patch the calling TB. When the TB
634 spans two pages, we cannot safely do a direct
635 jump. */
636 {
637 if (T0 != 0 &&
638 #if USE_KQEMU
639 (env->kqemu_enabled != 2) &&
640 #endif
641 tb->page_addr[1] == -1) {
642 spin_lock(&tb_lock);
643 tb_add_jump((TranslationBlock *)(long)(T0 & ~3), T0 & 3, tb);
644 spin_unlock(&tb_lock);
645 }
646 }
647 tc_ptr = tb->tc_ptr;
648 env->current_tb = tb;
649 /* execute the generated code */
650 gen_func = (void *)tc_ptr;
651 #if defined(__sparc__)
652 __asm__ __volatile__("call %0\n\t"
653 "mov %%o7,%%i0"
654 : /* no outputs */
655 : "r" (gen_func)
656 : "i0", "i1", "i2", "i3", "i4", "i5",
657 "o0", "o1", "o2", "o3", "o4", "o5",
658 "l0", "l1", "l2", "l3", "l4", "l5",
659 "l6", "l7");
660 #elif defined(__arm__)
661 asm volatile ("mov pc, %0\n\t"
662 ".global exec_loop\n\t"
663 "exec_loop:\n\t"
664 : /* no outputs */
665 : "r" (gen_func)
666 : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
667 #elif defined(__ia64)
668 struct fptr {
669 void *ip;
670 void *gp;
671 } fp;
672
673 fp.ip = tc_ptr;
674 fp.gp = code_gen_buffer + 2 * (1 << 20);
675 (*(void (*)(void)) &fp)();
676 #else
677 T0 = gen_func();
678 #endif
679 env->current_tb = NULL;
680 /* reset soft MMU for next block (it can currently
681 only be set by a memory fault) */
682 #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
683 if (env->hflags & HF_SOFTMMU_MASK) {
684 env->hflags &= ~HF_SOFTMMU_MASK;
685 /* do not allow linking to another block */
686 T0 = 0;
687 }
688 #endif
689 #if defined(USE_KQEMU)
690 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
691 if (kqemu_is_ok(env) &&
692 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
693 cpu_loop_exit();
694 }
695 #endif
696 } /* for(;;) */
697 } else {
698 env_to_regs();
699 }
700 } /* for(;;) */
701
702
703 #if defined(TARGET_I386)
704 /* restore flags in standard format */
705 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
706 #elif defined(TARGET_ARM)
707 /* XXX: Save/restore host fpu exception state?. */
708 #elif defined(TARGET_SPARC)
709 #if defined(reg_REGWPTR)
710 REGWPTR = saved_regwptr;
711 #endif
712 #elif defined(TARGET_PPC)
713 #elif defined(TARGET_M68K)
714 cpu_m68k_flush_flags(env, env->cc_op);
715 env->cc_op = CC_OP_FLAGS;
716 env->sr = (env->sr & 0xffe0)
717 | env->cc_dest | (env->cc_x << 4);
718 #elif defined(TARGET_MIPS)
719 #elif defined(TARGET_SH4)
720 #elif defined(TARGET_ALPHA)
721 #elif defined(TARGET_CRIS)
722 /* XXXXX */
723 #else
724 #error unsupported target CPU
725 #endif
726
727 /* restore global registers */
728 RESTORE_GLOBALS();
729 #include "hostregs_helper.h"
730
731 /* fail safe : never use cpu_single_env outside cpu_exec() */
732 cpu_single_env = NULL;
733 return ret;
734 }
735
736 /* must only be called from the generated code as an exception can be
737 generated */
738 void tb_invalidate_page_range(target_ulong start, target_ulong end)
739 {
740 /* XXX: cannot enable it yet because it yields to MMU exception
741 where NIP != read address on PowerPC */
742 #if 0
743 target_ulong phys_addr;
744 phys_addr = get_phys_addr_code(env, start);
745 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
746 #endif
747 }
748
749 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
750
751 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
752 {
753 CPUX86State *saved_env;
754
755 saved_env = env;
756 env = s;
757 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
758 selector &= 0xffff;
759 cpu_x86_load_seg_cache(env, seg_reg, selector,
760 (selector << 4), 0xffff, 0);
761 } else {
762 load_seg(seg_reg, selector);
763 }
764 env = saved_env;
765 }
766
767 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
768 {
769 CPUX86State *saved_env;
770
771 saved_env = env;
772 env = s;
773
774 helper_fsave(ptr, data32);
775
776 env = saved_env;
777 }
778
779 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
780 {
781 CPUX86State *saved_env;
782
783 saved_env = env;
784 env = s;
785
786 helper_frstor(ptr, data32);
787
788 env = saved_env;
789 }
790
791 #endif /* TARGET_I386 */
792
793 #if !defined(CONFIG_SOFTMMU)
794
795 #if defined(TARGET_I386)
796
797 /* 'pc' is the host PC at which the exception was raised. 'address' is
798 the effective address of the memory exception. 'is_write' is 1 if a
799 write caused the exception and otherwise 0'. 'old_set' is the
800 signal set which should be restored */
801 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
802 int is_write, sigset_t *old_set,
803 void *puc)
804 {
805 TranslationBlock *tb;
806 int ret;
807
808 if (cpu_single_env)
809 env = cpu_single_env; /* XXX: find a correct solution for multithread */
810 #if defined(DEBUG_SIGNAL)
811 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
812 pc, address, is_write, *(unsigned long *)old_set);
813 #endif
814 /* XXX: locking issue */
815 if (is_write && page_unprotect(h2g(address), pc, puc)) {
816 return 1;
817 }
818
819 /* see if it is an MMU fault */
820 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
821 if (ret < 0)
822 return 0; /* not an MMU fault */
823 if (ret == 0)
824 return 1; /* the MMU fault was handled without causing real CPU fault */
825 /* now we have a real cpu fault */
826 tb = tb_find_pc(pc);
827 if (tb) {
828 /* the PC is inside the translated code. It means that we have
829 a virtual CPU fault */
830 cpu_restore_state(tb, env, pc, puc);
831 }
832 if (ret == 1) {
833 #if 0
834 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
835 env->eip, env->cr[2], env->error_code);
836 #endif
837 /* we restore the process signal mask as the sigreturn should
838 do it (XXX: use sigsetjmp) */
839 sigprocmask(SIG_SETMASK, old_set, NULL);
840 raise_exception_err(env->exception_index, env->error_code);
841 } else {
842 /* activate soft MMU for this block */
843 env->hflags |= HF_SOFTMMU_MASK;
844 cpu_resume_from_signal(env, puc);
845 }
846 /* never comes here */
847 return 1;
848 }
849
850 #elif defined(TARGET_ARM)
851 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
852 int is_write, sigset_t *old_set,
853 void *puc)
854 {
855 TranslationBlock *tb;
856 int ret;
857
858 if (cpu_single_env)
859 env = cpu_single_env; /* XXX: find a correct solution for multithread */
860 #if defined(DEBUG_SIGNAL)
861 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
862 pc, address, is_write, *(unsigned long *)old_set);
863 #endif
864 /* XXX: locking issue */
865 if (is_write && page_unprotect(h2g(address), pc, puc)) {
866 return 1;
867 }
868 /* see if it is an MMU fault */
869 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
870 if (ret < 0)
871 return 0; /* not an MMU fault */
872 if (ret == 0)
873 return 1; /* the MMU fault was handled without causing real CPU fault */
874 /* now we have a real cpu fault */
875 tb = tb_find_pc(pc);
876 if (tb) {
877 /* the PC is inside the translated code. It means that we have
878 a virtual CPU fault */
879 cpu_restore_state(tb, env, pc, puc);
880 }
881 /* we restore the process signal mask as the sigreturn should
882 do it (XXX: use sigsetjmp) */
883 sigprocmask(SIG_SETMASK, old_set, NULL);
884 cpu_loop_exit();
885 /* never comes here */
886 return 1;
887 }
888 #elif defined(TARGET_SPARC)
889 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
890 int is_write, sigset_t *old_set,
891 void *puc)
892 {
893 TranslationBlock *tb;
894 int ret;
895
896 if (cpu_single_env)
897 env = cpu_single_env; /* XXX: find a correct solution for multithread */
898 #if defined(DEBUG_SIGNAL)
899 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
900 pc, address, is_write, *(unsigned long *)old_set);
901 #endif
902 /* XXX: locking issue */
903 if (is_write && page_unprotect(h2g(address), pc, puc)) {
904 return 1;
905 }
906 /* see if it is an MMU fault */
907 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
908 if (ret < 0)
909 return 0; /* not an MMU fault */
910 if (ret == 0)
911 return 1; /* the MMU fault was handled without causing real CPU fault */
912 /* now we have a real cpu fault */
913 tb = tb_find_pc(pc);
914 if (tb) {
915 /* the PC is inside the translated code. It means that we have
916 a virtual CPU fault */
917 cpu_restore_state(tb, env, pc, puc);
918 }
919 /* we restore the process signal mask as the sigreturn should
920 do it (XXX: use sigsetjmp) */
921 sigprocmask(SIG_SETMASK, old_set, NULL);
922 cpu_loop_exit();
923 /* never comes here */
924 return 1;
925 }
926 #elif defined (TARGET_PPC)
927 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
928 int is_write, sigset_t *old_set,
929 void *puc)
930 {
931 TranslationBlock *tb;
932 int ret;
933
934 if (cpu_single_env)
935 env = cpu_single_env; /* XXX: find a correct solution for multithread */
936 #if defined(DEBUG_SIGNAL)
937 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
938 pc, address, is_write, *(unsigned long *)old_set);
939 #endif
940 /* XXX: locking issue */
941 if (is_write && page_unprotect(h2g(address), pc, puc)) {
942 return 1;
943 }
944
945 /* see if it is an MMU fault */
946 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
947 if (ret < 0)
948 return 0; /* not an MMU fault */
949 if (ret == 0)
950 return 1; /* the MMU fault was handled without causing real CPU fault */
951
952 /* now we have a real cpu fault */
953 tb = tb_find_pc(pc);
954 if (tb) {
955 /* the PC is inside the translated code. It means that we have
956 a virtual CPU fault */
957 cpu_restore_state(tb, env, pc, puc);
958 }
959 if (ret == 1) {
960 #if 0
961 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
962 env->nip, env->error_code, tb);
963 #endif
964 /* we restore the process signal mask as the sigreturn should
965 do it (XXX: use sigsetjmp) */
966 sigprocmask(SIG_SETMASK, old_set, NULL);
967 do_raise_exception_err(env->exception_index, env->error_code);
968 } else {
969 /* activate soft MMU for this block */
970 cpu_resume_from_signal(env, puc);
971 }
972 /* never comes here */
973 return 1;
974 }
975
976 #elif defined(TARGET_M68K)
977 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
978 int is_write, sigset_t *old_set,
979 void *puc)
980 {
981 TranslationBlock *tb;
982 int ret;
983
984 if (cpu_single_env)
985 env = cpu_single_env; /* XXX: find a correct solution for multithread */
986 #if defined(DEBUG_SIGNAL)
987 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
988 pc, address, is_write, *(unsigned long *)old_set);
989 #endif
990 /* XXX: locking issue */
991 if (is_write && page_unprotect(address, pc, puc)) {
992 return 1;
993 }
994 /* see if it is an MMU fault */
995 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
996 if (ret < 0)
997 return 0; /* not an MMU fault */
998 if (ret == 0)
999 return 1; /* the MMU fault was handled without causing real CPU fault */
1000 /* now we have a real cpu fault */
1001 tb = tb_find_pc(pc);
1002 if (tb) {
1003 /* the PC is inside the translated code. It means that we have
1004 a virtual CPU fault */
1005 cpu_restore_state(tb, env, pc, puc);
1006 }
1007 /* we restore the process signal mask as the sigreturn should
1008 do it (XXX: use sigsetjmp) */
1009 sigprocmask(SIG_SETMASK, old_set, NULL);
1010 cpu_loop_exit();
1011 /* never comes here */
1012 return 1;
1013 }
1014
1015 #elif defined (TARGET_MIPS)
1016 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1017 int is_write, sigset_t *old_set,
1018 void *puc)
1019 {
1020 TranslationBlock *tb;
1021 int ret;
1022
1023 if (cpu_single_env)
1024 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1025 #if defined(DEBUG_SIGNAL)
1026 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1027 pc, address, is_write, *(unsigned long *)old_set);
1028 #endif
1029 /* XXX: locking issue */
1030 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1031 return 1;
1032 }
1033
1034 /* see if it is an MMU fault */
1035 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1036 if (ret < 0)
1037 return 0; /* not an MMU fault */
1038 if (ret == 0)
1039 return 1; /* the MMU fault was handled without causing real CPU fault */
1040
1041 /* now we have a real cpu fault */
1042 tb = tb_find_pc(pc);
1043 if (tb) {
1044 /* the PC is inside the translated code. It means that we have
1045 a virtual CPU fault */
1046 cpu_restore_state(tb, env, pc, puc);
1047 }
1048 if (ret == 1) {
1049 #if 0
1050 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1051 env->PC, env->error_code, tb);
1052 #endif
1053 /* we restore the process signal mask as the sigreturn should
1054 do it (XXX: use sigsetjmp) */
1055 sigprocmask(SIG_SETMASK, old_set, NULL);
1056 do_raise_exception_err(env->exception_index, env->error_code);
1057 } else {
1058 /* activate soft MMU for this block */
1059 cpu_resume_from_signal(env, puc);
1060 }
1061 /* never comes here */
1062 return 1;
1063 }
1064
1065 #elif defined (TARGET_SH4)
1066 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1067 int is_write, sigset_t *old_set,
1068 void *puc)
1069 {
1070 TranslationBlock *tb;
1071 int ret;
1072
1073 if (cpu_single_env)
1074 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1075 #if defined(DEBUG_SIGNAL)
1076 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1077 pc, address, is_write, *(unsigned long *)old_set);
1078 #endif
1079 /* XXX: locking issue */
1080 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1081 return 1;
1082 }
1083
1084 /* see if it is an MMU fault */
1085 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1086 if (ret < 0)
1087 return 0; /* not an MMU fault */
1088 if (ret == 0)
1089 return 1; /* the MMU fault was handled without causing real CPU fault */
1090
1091 /* now we have a real cpu fault */
1092 tb = tb_find_pc(pc);
1093 if (tb) {
1094 /* the PC is inside the translated code. It means that we have
1095 a virtual CPU fault */
1096 cpu_restore_state(tb, env, pc, puc);
1097 }
1098 #if 0
1099 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1100 env->nip, env->error_code, tb);
1101 #endif
1102 /* we restore the process signal mask as the sigreturn should
1103 do it (XXX: use sigsetjmp) */
1104 sigprocmask(SIG_SETMASK, old_set, NULL);
1105 cpu_loop_exit();
1106 /* never comes here */
1107 return 1;
1108 }
1109
1110 #elif defined (TARGET_ALPHA)
1111 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1112 int is_write, sigset_t *old_set,
1113 void *puc)
1114 {
1115 TranslationBlock *tb;
1116 int ret;
1117
1118 if (cpu_single_env)
1119 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1120 #if defined(DEBUG_SIGNAL)
1121 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1122 pc, address, is_write, *(unsigned long *)old_set);
1123 #endif
1124 /* XXX: locking issue */
1125 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1126 return 1;
1127 }
1128
1129 /* see if it is an MMU fault */
1130 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1131 if (ret < 0)
1132 return 0; /* not an MMU fault */
1133 if (ret == 0)
1134 return 1; /* the MMU fault was handled without causing real CPU fault */
1135
1136 /* now we have a real cpu fault */
1137 tb = tb_find_pc(pc);
1138 if (tb) {
1139 /* the PC is inside the translated code. It means that we have
1140 a virtual CPU fault */
1141 cpu_restore_state(tb, env, pc, puc);
1142 }
1143 #if 0
1144 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1145 env->nip, env->error_code, tb);
1146 #endif
1147 /* we restore the process signal mask as the sigreturn should
1148 do it (XXX: use sigsetjmp) */
1149 sigprocmask(SIG_SETMASK, old_set, NULL);
1150 cpu_loop_exit();
1151 /* never comes here */
1152 return 1;
1153 }
1154 #elif defined (TARGET_CRIS)
1155 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1156 int is_write, sigset_t *old_set,
1157 void *puc)
1158 {
1159 TranslationBlock *tb;
1160 int ret;
1161
1162 if (cpu_single_env)
1163 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1164 #if defined(DEBUG_SIGNAL)
1165 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1166 pc, address, is_write, *(unsigned long *)old_set);
1167 #endif
1168 /* XXX: locking issue */
1169 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1170 return 1;
1171 }
1172
1173 /* see if it is an MMU fault */
1174 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1175 if (ret < 0)
1176 return 0; /* not an MMU fault */
1177 if (ret == 0)
1178 return 1; /* the MMU fault was handled without causing real CPU fault */
1179
1180 /* now we have a real cpu fault */
1181 tb = tb_find_pc(pc);
1182 if (tb) {
1183 /* the PC is inside the translated code. It means that we have
1184 a virtual CPU fault */
1185 cpu_restore_state(tb, env, pc, puc);
1186 }
1187 /* we restore the process signal mask as the sigreturn should
1188 do it (XXX: use sigsetjmp) */
1189 sigprocmask(SIG_SETMASK, old_set, NULL);
1190 cpu_loop_exit();
1191 /* never comes here */
1192 return 1;
1193 }
1194
1195 #else
1196 #error unsupported target CPU
1197 #endif
1198
1199 #if defined(__i386__)
1200
1201 #if defined(__APPLE__)
1202 # include <sys/ucontext.h>
1203
1204 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1205 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1206 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1207 #else
1208 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1209 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1210 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1211 #endif
1212
1213 int cpu_signal_handler(int host_signum, void *pinfo,
1214 void *puc)
1215 {
1216 siginfo_t *info = pinfo;
1217 struct ucontext *uc = puc;
1218 unsigned long pc;
1219 int trapno;
1220
1221 #ifndef REG_EIP
1222 /* for glibc 2.1 */
1223 #define REG_EIP EIP
1224 #define REG_ERR ERR
1225 #define REG_TRAPNO TRAPNO
1226 #endif
1227 pc = EIP_sig(uc);
1228 trapno = TRAP_sig(uc);
1229 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1230 trapno == 0xe ?
1231 (ERROR_sig(uc) >> 1) & 1 : 0,
1232 &uc->uc_sigmask, puc);
1233 }
1234
1235 #elif defined(__x86_64__)
1236
1237 int cpu_signal_handler(int host_signum, void *pinfo,
1238 void *puc)
1239 {
1240 siginfo_t *info = pinfo;
1241 struct ucontext *uc = puc;
1242 unsigned long pc;
1243
1244 pc = uc->uc_mcontext.gregs[REG_RIP];
1245 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1246 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1247 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1248 &uc->uc_sigmask, puc);
1249 }
1250
1251 #elif defined(__powerpc__)
1252
1253 /***********************************************************************
1254 * signal context platform-specific definitions
1255 * From Wine
1256 */
1257 #ifdef linux
1258 /* All Registers access - only for local access */
1259 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1260 /* Gpr Registers access */
1261 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1262 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1263 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1264 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1265 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1266 # define LR_sig(context) REG_sig(link, context) /* Link register */
1267 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1268 /* Float Registers access */
1269 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1270 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1271 /* Exception Registers access */
1272 # define DAR_sig(context) REG_sig(dar, context)
1273 # define DSISR_sig(context) REG_sig(dsisr, context)
1274 # define TRAP_sig(context) REG_sig(trap, context)
1275 #endif /* linux */
1276
1277 #ifdef __APPLE__
1278 # include <sys/ucontext.h>
1279 typedef struct ucontext SIGCONTEXT;
1280 /* All Registers access - only for local access */
1281 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1282 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1283 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1284 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1285 /* Gpr Registers access */
1286 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1287 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1288 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1289 # define CTR_sig(context) REG_sig(ctr, context)
1290 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1291 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1292 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1293 /* Float Registers access */
1294 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1295 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1296 /* Exception Registers access */
1297 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1298 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1299 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1300 #endif /* __APPLE__ */
1301
1302 int cpu_signal_handler(int host_signum, void *pinfo,
1303 void *puc)
1304 {
1305 siginfo_t *info = pinfo;
1306 struct ucontext *uc = puc;
1307 unsigned long pc;
1308 int is_write;
1309
1310 pc = IAR_sig(uc);
1311 is_write = 0;
1312 #if 0
1313 /* ppc 4xx case */
1314 if (DSISR_sig(uc) & 0x00800000)
1315 is_write = 1;
1316 #else
1317 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1318 is_write = 1;
1319 #endif
1320 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1321 is_write, &uc->uc_sigmask, puc);
1322 }
1323
1324 #elif defined(__alpha__)
1325
1326 int cpu_signal_handler(int host_signum, void *pinfo,
1327 void *puc)
1328 {
1329 siginfo_t *info = pinfo;
1330 struct ucontext *uc = puc;
1331 uint32_t *pc = uc->uc_mcontext.sc_pc;
1332 uint32_t insn = *pc;
1333 int is_write = 0;
1334
1335 /* XXX: need kernel patch to get write flag faster */
1336 switch (insn >> 26) {
1337 case 0x0d: // stw
1338 case 0x0e: // stb
1339 case 0x0f: // stq_u
1340 case 0x24: // stf
1341 case 0x25: // stg
1342 case 0x26: // sts
1343 case 0x27: // stt
1344 case 0x2c: // stl
1345 case 0x2d: // stq
1346 case 0x2e: // stl_c
1347 case 0x2f: // stq_c
1348 is_write = 1;
1349 }
1350
1351 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1352 is_write, &uc->uc_sigmask, puc);
1353 }
1354 #elif defined(__sparc__)
1355
1356 int cpu_signal_handler(int host_signum, void *pinfo,
1357 void *puc)
1358 {
1359 siginfo_t *info = pinfo;
1360 uint32_t *regs = (uint32_t *)(info + 1);
1361 void *sigmask = (regs + 20);
1362 unsigned long pc;
1363 int is_write;
1364 uint32_t insn;
1365
1366 /* XXX: is there a standard glibc define ? */
1367 pc = regs[1];
1368 /* XXX: need kernel patch to get write flag faster */
1369 is_write = 0;
1370 insn = *(uint32_t *)pc;
1371 if ((insn >> 30) == 3) {
1372 switch((insn >> 19) & 0x3f) {
1373 case 0x05: // stb
1374 case 0x06: // sth
1375 case 0x04: // st
1376 case 0x07: // std
1377 case 0x24: // stf
1378 case 0x27: // stdf
1379 case 0x25: // stfsr
1380 is_write = 1;
1381 break;
1382 }
1383 }
1384 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1385 is_write, sigmask, NULL);
1386 }
1387
1388 #elif defined(__arm__)
1389
1390 int cpu_signal_handler(int host_signum, void *pinfo,
1391 void *puc)
1392 {
1393 siginfo_t *info = pinfo;
1394 struct ucontext *uc = puc;
1395 unsigned long pc;
1396 int is_write;
1397
1398 pc = uc->uc_mcontext.gregs[R15];
1399 /* XXX: compute is_write */
1400 is_write = 0;
1401 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1402 is_write,
1403 &uc->uc_sigmask, puc);
1404 }
1405
1406 #elif defined(__mc68000)
1407
1408 int cpu_signal_handler(int host_signum, void *pinfo,
1409 void *puc)
1410 {
1411 siginfo_t *info = pinfo;
1412 struct ucontext *uc = puc;
1413 unsigned long pc;
1414 int is_write;
1415
1416 pc = uc->uc_mcontext.gregs[16];
1417 /* XXX: compute is_write */
1418 is_write = 0;
1419 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1420 is_write,
1421 &uc->uc_sigmask, puc);
1422 }
1423
1424 #elif defined(__ia64)
1425
1426 #ifndef __ISR_VALID
1427 /* This ought to be in <bits/siginfo.h>... */
1428 # define __ISR_VALID 1
1429 #endif
1430
1431 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1432 {
1433 siginfo_t *info = pinfo;
1434 struct ucontext *uc = puc;
1435 unsigned long ip;
1436 int is_write = 0;
1437
1438 ip = uc->uc_mcontext.sc_ip;
1439 switch (host_signum) {
1440 case SIGILL:
1441 case SIGFPE:
1442 case SIGSEGV:
1443 case SIGBUS:
1444 case SIGTRAP:
1445 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1446 /* ISR.W (write-access) is bit 33: */
1447 is_write = (info->si_isr >> 33) & 1;
1448 break;
1449
1450 default:
1451 break;
1452 }
1453 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1454 is_write,
1455 &uc->uc_sigmask, puc);
1456 }
1457
1458 #elif defined(__s390__)
1459
1460 int cpu_signal_handler(int host_signum, void *pinfo,
1461 void *puc)
1462 {
1463 siginfo_t *info = pinfo;
1464 struct ucontext *uc = puc;
1465 unsigned long pc;
1466 int is_write;
1467
1468 pc = uc->uc_mcontext.psw.addr;
1469 /* XXX: compute is_write */
1470 is_write = 0;
1471 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1472 is_write, &uc->uc_sigmask, puc);
1473 }
1474
1475 #elif defined(__mips__)
1476
1477 int cpu_signal_handler(int host_signum, void *pinfo,
1478 void *puc)
1479 {
1480 siginfo_t *info = pinfo;
1481 struct ucontext *uc = puc;
1482 greg_t pc = uc->uc_mcontext.pc;
1483 int is_write;
1484
1485 /* XXX: compute is_write */
1486 is_write = 0;
1487 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1488 is_write, &uc->uc_sigmask, puc);
1489 }
1490
1491 #else
1492
1493 #error host CPU specific signal handler needed
1494
1495 #endif
1496
1497 #endif /* !defined(CONFIG_SOFTMMU) */