]> git.proxmox.com Git - mirror_qemu.git/blob - cpu-exec.c
MIPS fixes (Daniel Jacobowitz)
[mirror_qemu.git] / cpu-exec.c
1 /*
2 * i386 emulator main execution loop
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include "config.h"
21 #include "exec.h"
22 #include "disas.h"
23
24 #if !defined(CONFIG_SOFTMMU)
25 #undef EAX
26 #undef ECX
27 #undef EDX
28 #undef EBX
29 #undef ESP
30 #undef EBP
31 #undef ESI
32 #undef EDI
33 #undef EIP
34 #include <signal.h>
35 #include <sys/ucontext.h>
36 #endif
37
38 int tb_invalidated_flag;
39
40 //#define DEBUG_EXEC
41 //#define DEBUG_SIGNAL
42
43 #if defined(TARGET_ARM) || defined(TARGET_SPARC)
44 /* XXX: unify with i386 target */
45 void cpu_loop_exit(void)
46 {
47 longjmp(env->jmp_env, 1);
48 }
49 #endif
50 #ifndef TARGET_SPARC
51 #define reg_T2
52 #endif
53
54 /* exit the current TB from a signal handler. The host registers are
55 restored in a state compatible with the CPU emulator
56 */
57 void cpu_resume_from_signal(CPUState *env1, void *puc)
58 {
59 #if !defined(CONFIG_SOFTMMU)
60 struct ucontext *uc = puc;
61 #endif
62
63 env = env1;
64
65 /* XXX: restore cpu registers saved in host registers */
66
67 #if !defined(CONFIG_SOFTMMU)
68 if (puc) {
69 /* XXX: use siglongjmp ? */
70 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
71 }
72 #endif
73 longjmp(env->jmp_env, 1);
74 }
75
76
77 static TranslationBlock *tb_find_slow(target_ulong pc,
78 target_ulong cs_base,
79 unsigned int flags)
80 {
81 TranslationBlock *tb, **ptb1;
82 int code_gen_size;
83 unsigned int h;
84 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
85 uint8_t *tc_ptr;
86
87 spin_lock(&tb_lock);
88
89 tb_invalidated_flag = 0;
90
91 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
92
93 /* find translated block using physical mappings */
94 phys_pc = get_phys_addr_code(env, pc);
95 phys_page1 = phys_pc & TARGET_PAGE_MASK;
96 phys_page2 = -1;
97 h = tb_phys_hash_func(phys_pc);
98 ptb1 = &tb_phys_hash[h];
99 for(;;) {
100 tb = *ptb1;
101 if (!tb)
102 goto not_found;
103 if (tb->pc == pc &&
104 tb->page_addr[0] == phys_page1 &&
105 tb->cs_base == cs_base &&
106 tb->flags == flags) {
107 /* check next page if needed */
108 if (tb->page_addr[1] != -1) {
109 virt_page2 = (pc & TARGET_PAGE_MASK) +
110 TARGET_PAGE_SIZE;
111 phys_page2 = get_phys_addr_code(env, virt_page2);
112 if (tb->page_addr[1] == phys_page2)
113 goto found;
114 } else {
115 goto found;
116 }
117 }
118 ptb1 = &tb->phys_hash_next;
119 }
120 not_found:
121 /* if no translated code available, then translate it now */
122 tb = tb_alloc(pc);
123 if (!tb) {
124 /* flush must be done */
125 tb_flush(env);
126 /* cannot fail at this point */
127 tb = tb_alloc(pc);
128 /* don't forget to invalidate previous TB info */
129 T0 = 0;
130 }
131 tc_ptr = code_gen_ptr;
132 tb->tc_ptr = tc_ptr;
133 tb->cs_base = cs_base;
134 tb->flags = flags;
135 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
136 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
137
138 /* check next page if needed */
139 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
140 phys_page2 = -1;
141 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
142 phys_page2 = get_phys_addr_code(env, virt_page2);
143 }
144 tb_link_phys(tb, phys_pc, phys_page2);
145
146 found:
147 if (tb_invalidated_flag) {
148 /* as some TB could have been invalidated because
149 of memory exceptions while generating the code, we
150 must recompute the hash index here */
151 T0 = 0;
152 }
153 /* we add the TB in the virtual pc hash table */
154 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
155 spin_unlock(&tb_lock);
156 return tb;
157 }
158
159 static inline TranslationBlock *tb_find_fast(void)
160 {
161 TranslationBlock *tb;
162 target_ulong cs_base, pc;
163 unsigned int flags;
164
165 /* we record a subset of the CPU state. It will
166 always be the same before a given translated block
167 is executed. */
168 #if defined(TARGET_I386)
169 flags = env->hflags;
170 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
171 cs_base = env->segs[R_CS].base;
172 pc = cs_base + env->eip;
173 #elif defined(TARGET_ARM)
174 flags = env->thumb | (env->vfp.vec_len << 1)
175 | (env->vfp.vec_stride << 4);
176 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
177 flags |= (1 << 6);
178 cs_base = 0;
179 pc = env->regs[15];
180 #elif defined(TARGET_SPARC)
181 #ifdef TARGET_SPARC64
182 flags = (env->pstate << 2) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
183 #else
184 flags = env->psrs | ((env->mmuregs[0] & (MMU_E | MMU_NF)) << 1);
185 #endif
186 cs_base = env->npc;
187 pc = env->pc;
188 #elif defined(TARGET_PPC)
189 flags = (msr_pr << MSR_PR) | (msr_fp << MSR_FP) |
190 (msr_se << MSR_SE) | (msr_le << MSR_LE);
191 cs_base = 0;
192 pc = env->nip;
193 #elif defined(TARGET_MIPS)
194 flags = env->hflags & (MIPS_HFLAGS_TMASK | MIPS_HFLAG_BMASK);
195 cs_base = 0;
196 pc = env->PC;
197 #else
198 #error unsupported CPU
199 #endif
200 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
201 if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
202 tb->flags != flags, 0)) {
203 tb = tb_find_slow(pc, cs_base, flags);
204 }
205 return tb;
206 }
207
208
209 /* main execution loop */
210
211 int cpu_exec(CPUState *env1)
212 {
213 int saved_T0, saved_T1;
214 #if defined(reg_T2)
215 int saved_T2;
216 #endif
217 CPUState *saved_env;
218 #if defined(TARGET_I386)
219 #ifdef reg_EAX
220 int saved_EAX;
221 #endif
222 #ifdef reg_ECX
223 int saved_ECX;
224 #endif
225 #ifdef reg_EDX
226 int saved_EDX;
227 #endif
228 #ifdef reg_EBX
229 int saved_EBX;
230 #endif
231 #ifdef reg_ESP
232 int saved_ESP;
233 #endif
234 #ifdef reg_EBP
235 int saved_EBP;
236 #endif
237 #ifdef reg_ESI
238 int saved_ESI;
239 #endif
240 #ifdef reg_EDI
241 int saved_EDI;
242 #endif
243 #elif defined(TARGET_SPARC)
244 #if defined(reg_REGWPTR)
245 uint32_t *saved_regwptr;
246 #endif
247 #endif
248 #ifdef __sparc__
249 int saved_i7, tmp_T0;
250 #endif
251 int ret, interrupt_request;
252 void (*gen_func)(void);
253 TranslationBlock *tb;
254 uint8_t *tc_ptr;
255
256 #if defined(TARGET_I386)
257 /* handle exit of HALTED state */
258 if (env1->hflags & HF_HALTED_MASK) {
259 /* disable halt condition */
260 if ((env1->interrupt_request & CPU_INTERRUPT_HARD) &&
261 (env1->eflags & IF_MASK)) {
262 env1->hflags &= ~HF_HALTED_MASK;
263 } else {
264 return EXCP_HALTED;
265 }
266 }
267 #elif defined(TARGET_PPC)
268 if (env1->halted) {
269 if (env1->msr[MSR_EE] &&
270 (env1->interrupt_request &
271 (CPU_INTERRUPT_HARD | CPU_INTERRUPT_TIMER))) {
272 env1->halted = 0;
273 } else {
274 return EXCP_HALTED;
275 }
276 }
277 #elif defined(TARGET_ARM)
278 if (env1->halted) {
279 /* An interrupt wakes the CPU even if the I and F CPSR bits are
280 set. */
281 if (env1->interrupt_request
282 & (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD)) {
283 env1->halted = 0;
284 } else {
285 return EXCP_HALTED;
286 }
287 }
288 #elif defined(TARGET_MIPS)
289 if (env1->halted) {
290 if (env1->interrupt_request &
291 (CPU_INTERRUPT_HARD | CPU_INTERRUPT_TIMER)) {
292 env1->halted = 0;
293 } else {
294 return EXCP_HALTED;
295 }
296 }
297 #endif
298
299 cpu_single_env = env1;
300
301 /* first we save global registers */
302 saved_env = env;
303 env = env1;
304 saved_T0 = T0;
305 saved_T1 = T1;
306 #if defined(reg_T2)
307 saved_T2 = T2;
308 #endif
309 #ifdef __sparc__
310 /* we also save i7 because longjmp may not restore it */
311 asm volatile ("mov %%i7, %0" : "=r" (saved_i7));
312 #endif
313
314 #if defined(TARGET_I386)
315 #ifdef reg_EAX
316 saved_EAX = EAX;
317 #endif
318 #ifdef reg_ECX
319 saved_ECX = ECX;
320 #endif
321 #ifdef reg_EDX
322 saved_EDX = EDX;
323 #endif
324 #ifdef reg_EBX
325 saved_EBX = EBX;
326 #endif
327 #ifdef reg_ESP
328 saved_ESP = ESP;
329 #endif
330 #ifdef reg_EBP
331 saved_EBP = EBP;
332 #endif
333 #ifdef reg_ESI
334 saved_ESI = ESI;
335 #endif
336 #ifdef reg_EDI
337 saved_EDI = EDI;
338 #endif
339
340 env_to_regs();
341 /* put eflags in CPU temporary format */
342 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
343 DF = 1 - (2 * ((env->eflags >> 10) & 1));
344 CC_OP = CC_OP_EFLAGS;
345 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
346 #elif defined(TARGET_ARM)
347 #elif defined(TARGET_SPARC)
348 #if defined(reg_REGWPTR)
349 saved_regwptr = REGWPTR;
350 #endif
351 #elif defined(TARGET_PPC)
352 #elif defined(TARGET_MIPS)
353 #else
354 #error unsupported target CPU
355 #endif
356 env->exception_index = -1;
357
358 /* prepare setjmp context for exception handling */
359 for(;;) {
360 if (setjmp(env->jmp_env) == 0) {
361 env->current_tb = NULL;
362 /* if an exception is pending, we execute it here */
363 if (env->exception_index >= 0) {
364 if (env->exception_index >= EXCP_INTERRUPT) {
365 /* exit request from the cpu execution loop */
366 ret = env->exception_index;
367 break;
368 } else if (env->user_mode_only) {
369 /* if user mode only, we simulate a fake exception
370 which will be hanlded outside the cpu execution
371 loop */
372 #if defined(TARGET_I386)
373 do_interrupt_user(env->exception_index,
374 env->exception_is_int,
375 env->error_code,
376 env->exception_next_eip);
377 #endif
378 ret = env->exception_index;
379 break;
380 } else {
381 #if defined(TARGET_I386)
382 /* simulate a real cpu exception. On i386, it can
383 trigger new exceptions, but we do not handle
384 double or triple faults yet. */
385 do_interrupt(env->exception_index,
386 env->exception_is_int,
387 env->error_code,
388 env->exception_next_eip, 0);
389 #elif defined(TARGET_PPC)
390 do_interrupt(env);
391 #elif defined(TARGET_MIPS)
392 do_interrupt(env);
393 #elif defined(TARGET_SPARC)
394 do_interrupt(env->exception_index);
395 #elif defined(TARGET_ARM)
396 do_interrupt(env);
397 #endif
398 }
399 env->exception_index = -1;
400 }
401 #ifdef USE_KQEMU
402 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
403 int ret;
404 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
405 ret = kqemu_cpu_exec(env);
406 /* put eflags in CPU temporary format */
407 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
408 DF = 1 - (2 * ((env->eflags >> 10) & 1));
409 CC_OP = CC_OP_EFLAGS;
410 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
411 if (ret == 1) {
412 /* exception */
413 longjmp(env->jmp_env, 1);
414 } else if (ret == 2) {
415 /* softmmu execution needed */
416 } else {
417 if (env->interrupt_request != 0) {
418 /* hardware interrupt will be executed just after */
419 } else {
420 /* otherwise, we restart */
421 longjmp(env->jmp_env, 1);
422 }
423 }
424 }
425 #endif
426
427 T0 = 0; /* force lookup of first TB */
428 for(;;) {
429 #ifdef __sparc__
430 /* g1 can be modified by some libc? functions */
431 tmp_T0 = T0;
432 #endif
433 interrupt_request = env->interrupt_request;
434 if (__builtin_expect(interrupt_request, 0)) {
435 #if defined(TARGET_I386)
436 /* if hardware interrupt pending, we execute it */
437 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
438 (env->eflags & IF_MASK) &&
439 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
440 int intno;
441 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
442 intno = cpu_get_pic_interrupt(env);
443 if (loglevel & CPU_LOG_TB_IN_ASM) {
444 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
445 }
446 do_interrupt(intno, 0, 0, 0, 1);
447 /* ensure that no TB jump will be modified as
448 the program flow was changed */
449 #ifdef __sparc__
450 tmp_T0 = 0;
451 #else
452 T0 = 0;
453 #endif
454 }
455 #elif defined(TARGET_PPC)
456 #if 0
457 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
458 cpu_ppc_reset(env);
459 }
460 #endif
461 if (msr_ee != 0) {
462 if ((interrupt_request & CPU_INTERRUPT_HARD)) {
463 /* Raise it */
464 env->exception_index = EXCP_EXTERNAL;
465 env->error_code = 0;
466 do_interrupt(env);
467 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
468 #ifdef __sparc__
469 tmp_T0 = 0;
470 #else
471 T0 = 0;
472 #endif
473 } else if ((interrupt_request & CPU_INTERRUPT_TIMER)) {
474 /* Raise it */
475 env->exception_index = EXCP_DECR;
476 env->error_code = 0;
477 do_interrupt(env);
478 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
479 #ifdef __sparc__
480 tmp_T0 = 0;
481 #else
482 T0 = 0;
483 #endif
484 }
485 }
486 #elif defined(TARGET_MIPS)
487 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
488 (env->CP0_Status & (1 << CP0St_IE)) &&
489 (env->CP0_Status & env->CP0_Cause & 0x0000FF00) &&
490 !(env->hflags & MIPS_HFLAG_EXL) &&
491 !(env->hflags & MIPS_HFLAG_ERL) &&
492 !(env->hflags & MIPS_HFLAG_DM)) {
493 /* Raise it */
494 env->exception_index = EXCP_EXT_INTERRUPT;
495 env->error_code = 0;
496 do_interrupt(env);
497 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
498 #ifdef __sparc__
499 tmp_T0 = 0;
500 #else
501 T0 = 0;
502 #endif
503 }
504 #elif defined(TARGET_SPARC)
505 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
506 (env->psret != 0)) {
507 int pil = env->interrupt_index & 15;
508 int type = env->interrupt_index & 0xf0;
509
510 if (((type == TT_EXTINT) &&
511 (pil == 15 || pil > env->psrpil)) ||
512 type != TT_EXTINT) {
513 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
514 do_interrupt(env->interrupt_index);
515 env->interrupt_index = 0;
516 #ifdef __sparc__
517 tmp_T0 = 0;
518 #else
519 T0 = 0;
520 #endif
521 }
522 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
523 //do_interrupt(0, 0, 0, 0, 0);
524 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
525 }
526 #elif defined(TARGET_ARM)
527 if (interrupt_request & CPU_INTERRUPT_FIQ
528 && !(env->uncached_cpsr & CPSR_F)) {
529 env->exception_index = EXCP_FIQ;
530 do_interrupt(env);
531 }
532 if (interrupt_request & CPU_INTERRUPT_HARD
533 && !(env->uncached_cpsr & CPSR_I)) {
534 env->exception_index = EXCP_IRQ;
535 do_interrupt(env);
536 }
537 #endif
538 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
539 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
540 /* ensure that no TB jump will be modified as
541 the program flow was changed */
542 #ifdef __sparc__
543 tmp_T0 = 0;
544 #else
545 T0 = 0;
546 #endif
547 }
548 if (interrupt_request & CPU_INTERRUPT_EXIT) {
549 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
550 env->exception_index = EXCP_INTERRUPT;
551 cpu_loop_exit();
552 }
553 }
554 #ifdef DEBUG_EXEC
555 if ((loglevel & CPU_LOG_TB_CPU)) {
556 #if defined(TARGET_I386)
557 /* restore flags in standard format */
558 #ifdef reg_EAX
559 env->regs[R_EAX] = EAX;
560 #endif
561 #ifdef reg_EBX
562 env->regs[R_EBX] = EBX;
563 #endif
564 #ifdef reg_ECX
565 env->regs[R_ECX] = ECX;
566 #endif
567 #ifdef reg_EDX
568 env->regs[R_EDX] = EDX;
569 #endif
570 #ifdef reg_ESI
571 env->regs[R_ESI] = ESI;
572 #endif
573 #ifdef reg_EDI
574 env->regs[R_EDI] = EDI;
575 #endif
576 #ifdef reg_EBP
577 env->regs[R_EBP] = EBP;
578 #endif
579 #ifdef reg_ESP
580 env->regs[R_ESP] = ESP;
581 #endif
582 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
583 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
584 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
585 #elif defined(TARGET_ARM)
586 cpu_dump_state(env, logfile, fprintf, 0);
587 #elif defined(TARGET_SPARC)
588 REGWPTR = env->regbase + (env->cwp * 16);
589 env->regwptr = REGWPTR;
590 cpu_dump_state(env, logfile, fprintf, 0);
591 #elif defined(TARGET_PPC)
592 cpu_dump_state(env, logfile, fprintf, 0);
593 #elif defined(TARGET_MIPS)
594 cpu_dump_state(env, logfile, fprintf, 0);
595 #else
596 #error unsupported target CPU
597 #endif
598 }
599 #endif
600 tb = tb_find_fast();
601 #ifdef DEBUG_EXEC
602 if ((loglevel & CPU_LOG_EXEC)) {
603 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
604 (long)tb->tc_ptr, tb->pc,
605 lookup_symbol(tb->pc));
606 }
607 #endif
608 #ifdef __sparc__
609 T0 = tmp_T0;
610 #endif
611 /* see if we can patch the calling TB. When the TB
612 spans two pages, we cannot safely do a direct
613 jump. */
614 {
615 if (T0 != 0 &&
616 tb->page_addr[1] == -1
617 #if defined(TARGET_I386) && defined(USE_CODE_COPY)
618 && (tb->cflags & CF_CODE_COPY) ==
619 (((TranslationBlock *)(T0 & ~3))->cflags & CF_CODE_COPY)
620 #endif
621 ) {
622 spin_lock(&tb_lock);
623 tb_add_jump((TranslationBlock *)(long)(T0 & ~3), T0 & 3, tb);
624 #if defined(USE_CODE_COPY)
625 /* propagates the FP use info */
626 ((TranslationBlock *)(T0 & ~3))->cflags |=
627 (tb->cflags & CF_FP_USED);
628 #endif
629 spin_unlock(&tb_lock);
630 }
631 }
632 tc_ptr = tb->tc_ptr;
633 env->current_tb = tb;
634 /* execute the generated code */
635 gen_func = (void *)tc_ptr;
636 #if defined(__sparc__)
637 __asm__ __volatile__("call %0\n\t"
638 "mov %%o7,%%i0"
639 : /* no outputs */
640 : "r" (gen_func)
641 : "i0", "i1", "i2", "i3", "i4", "i5");
642 #elif defined(__arm__)
643 asm volatile ("mov pc, %0\n\t"
644 ".global exec_loop\n\t"
645 "exec_loop:\n\t"
646 : /* no outputs */
647 : "r" (gen_func)
648 : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
649 #elif defined(TARGET_I386) && defined(USE_CODE_COPY)
650 {
651 if (!(tb->cflags & CF_CODE_COPY)) {
652 if ((tb->cflags & CF_FP_USED) && env->native_fp_regs) {
653 save_native_fp_state(env);
654 }
655 gen_func();
656 } else {
657 if ((tb->cflags & CF_FP_USED) && !env->native_fp_regs) {
658 restore_native_fp_state(env);
659 }
660 /* we work with native eflags */
661 CC_SRC = cc_table[CC_OP].compute_all();
662 CC_OP = CC_OP_EFLAGS;
663 asm(".globl exec_loop\n"
664 "\n"
665 "debug1:\n"
666 " pushl %%ebp\n"
667 " fs movl %10, %9\n"
668 " fs movl %11, %%eax\n"
669 " andl $0x400, %%eax\n"
670 " fs orl %8, %%eax\n"
671 " pushl %%eax\n"
672 " popf\n"
673 " fs movl %%esp, %12\n"
674 " fs movl %0, %%eax\n"
675 " fs movl %1, %%ecx\n"
676 " fs movl %2, %%edx\n"
677 " fs movl %3, %%ebx\n"
678 " fs movl %4, %%esp\n"
679 " fs movl %5, %%ebp\n"
680 " fs movl %6, %%esi\n"
681 " fs movl %7, %%edi\n"
682 " fs jmp *%9\n"
683 "exec_loop:\n"
684 " fs movl %%esp, %4\n"
685 " fs movl %12, %%esp\n"
686 " fs movl %%eax, %0\n"
687 " fs movl %%ecx, %1\n"
688 " fs movl %%edx, %2\n"
689 " fs movl %%ebx, %3\n"
690 " fs movl %%ebp, %5\n"
691 " fs movl %%esi, %6\n"
692 " fs movl %%edi, %7\n"
693 " pushf\n"
694 " popl %%eax\n"
695 " movl %%eax, %%ecx\n"
696 " andl $0x400, %%ecx\n"
697 " shrl $9, %%ecx\n"
698 " andl $0x8d5, %%eax\n"
699 " fs movl %%eax, %8\n"
700 " movl $1, %%eax\n"
701 " subl %%ecx, %%eax\n"
702 " fs movl %%eax, %11\n"
703 " fs movl %9, %%ebx\n" /* get T0 value */
704 " popl %%ebp\n"
705 :
706 : "m" (*(uint8_t *)offsetof(CPUState, regs[0])),
707 "m" (*(uint8_t *)offsetof(CPUState, regs[1])),
708 "m" (*(uint8_t *)offsetof(CPUState, regs[2])),
709 "m" (*(uint8_t *)offsetof(CPUState, regs[3])),
710 "m" (*(uint8_t *)offsetof(CPUState, regs[4])),
711 "m" (*(uint8_t *)offsetof(CPUState, regs[5])),
712 "m" (*(uint8_t *)offsetof(CPUState, regs[6])),
713 "m" (*(uint8_t *)offsetof(CPUState, regs[7])),
714 "m" (*(uint8_t *)offsetof(CPUState, cc_src)),
715 "m" (*(uint8_t *)offsetof(CPUState, tmp0)),
716 "a" (gen_func),
717 "m" (*(uint8_t *)offsetof(CPUState, df)),
718 "m" (*(uint8_t *)offsetof(CPUState, saved_esp))
719 : "%ecx", "%edx"
720 );
721 }
722 }
723 #elif defined(__ia64)
724 struct fptr {
725 void *ip;
726 void *gp;
727 } fp;
728
729 fp.ip = tc_ptr;
730 fp.gp = code_gen_buffer + 2 * (1 << 20);
731 (*(void (*)(void)) &fp)();
732 #else
733 gen_func();
734 #endif
735 env->current_tb = NULL;
736 /* reset soft MMU for next block (it can currently
737 only be set by a memory fault) */
738 #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
739 if (env->hflags & HF_SOFTMMU_MASK) {
740 env->hflags &= ~HF_SOFTMMU_MASK;
741 /* do not allow linking to another block */
742 T0 = 0;
743 }
744 #endif
745 }
746 } else {
747 env_to_regs();
748 }
749 } /* for(;;) */
750
751
752 #if defined(TARGET_I386)
753 #if defined(USE_CODE_COPY)
754 if (env->native_fp_regs) {
755 save_native_fp_state(env);
756 }
757 #endif
758 /* restore flags in standard format */
759 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
760
761 /* restore global registers */
762 #ifdef reg_EAX
763 EAX = saved_EAX;
764 #endif
765 #ifdef reg_ECX
766 ECX = saved_ECX;
767 #endif
768 #ifdef reg_EDX
769 EDX = saved_EDX;
770 #endif
771 #ifdef reg_EBX
772 EBX = saved_EBX;
773 #endif
774 #ifdef reg_ESP
775 ESP = saved_ESP;
776 #endif
777 #ifdef reg_EBP
778 EBP = saved_EBP;
779 #endif
780 #ifdef reg_ESI
781 ESI = saved_ESI;
782 #endif
783 #ifdef reg_EDI
784 EDI = saved_EDI;
785 #endif
786 #elif defined(TARGET_ARM)
787 /* XXX: Save/restore host fpu exception state?. */
788 #elif defined(TARGET_SPARC)
789 #if defined(reg_REGWPTR)
790 REGWPTR = saved_regwptr;
791 #endif
792 #elif defined(TARGET_PPC)
793 #elif defined(TARGET_MIPS)
794 #else
795 #error unsupported target CPU
796 #endif
797 #ifdef __sparc__
798 asm volatile ("mov %0, %%i7" : : "r" (saved_i7));
799 #endif
800 T0 = saved_T0;
801 T1 = saved_T1;
802 #if defined(reg_T2)
803 T2 = saved_T2;
804 #endif
805 env = saved_env;
806 /* fail safe : never use cpu_single_env outside cpu_exec() */
807 cpu_single_env = NULL;
808 return ret;
809 }
810
811 /* must only be called from the generated code as an exception can be
812 generated */
813 void tb_invalidate_page_range(target_ulong start, target_ulong end)
814 {
815 /* XXX: cannot enable it yet because it yields to MMU exception
816 where NIP != read address on PowerPC */
817 #if 0
818 target_ulong phys_addr;
819 phys_addr = get_phys_addr_code(env, start);
820 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
821 #endif
822 }
823
824 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
825
826 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
827 {
828 CPUX86State *saved_env;
829
830 saved_env = env;
831 env = s;
832 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
833 selector &= 0xffff;
834 cpu_x86_load_seg_cache(env, seg_reg, selector,
835 (selector << 4), 0xffff, 0);
836 } else {
837 load_seg(seg_reg, selector);
838 }
839 env = saved_env;
840 }
841
842 void cpu_x86_fsave(CPUX86State *s, uint8_t *ptr, int data32)
843 {
844 CPUX86State *saved_env;
845
846 saved_env = env;
847 env = s;
848
849 helper_fsave((target_ulong)ptr, data32);
850
851 env = saved_env;
852 }
853
854 void cpu_x86_frstor(CPUX86State *s, uint8_t *ptr, int data32)
855 {
856 CPUX86State *saved_env;
857
858 saved_env = env;
859 env = s;
860
861 helper_frstor((target_ulong)ptr, data32);
862
863 env = saved_env;
864 }
865
866 #endif /* TARGET_I386 */
867
868 #if !defined(CONFIG_SOFTMMU)
869
870 #if defined(TARGET_I386)
871
872 /* 'pc' is the host PC at which the exception was raised. 'address' is
873 the effective address of the memory exception. 'is_write' is 1 if a
874 write caused the exception and otherwise 0'. 'old_set' is the
875 signal set which should be restored */
876 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
877 int is_write, sigset_t *old_set,
878 void *puc)
879 {
880 TranslationBlock *tb;
881 int ret;
882
883 if (cpu_single_env)
884 env = cpu_single_env; /* XXX: find a correct solution for multithread */
885 #if defined(DEBUG_SIGNAL)
886 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
887 pc, address, is_write, *(unsigned long *)old_set);
888 #endif
889 /* XXX: locking issue */
890 if (is_write && page_unprotect(address, pc, puc)) {
891 return 1;
892 }
893
894 /* see if it is an MMU fault */
895 ret = cpu_x86_handle_mmu_fault(env, address, is_write,
896 ((env->hflags & HF_CPL_MASK) == 3), 0);
897 if (ret < 0)
898 return 0; /* not an MMU fault */
899 if (ret == 0)
900 return 1; /* the MMU fault was handled without causing real CPU fault */
901 /* now we have a real cpu fault */
902 tb = tb_find_pc(pc);
903 if (tb) {
904 /* the PC is inside the translated code. It means that we have
905 a virtual CPU fault */
906 cpu_restore_state(tb, env, pc, puc);
907 }
908 if (ret == 1) {
909 #if 0
910 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
911 env->eip, env->cr[2], env->error_code);
912 #endif
913 /* we restore the process signal mask as the sigreturn should
914 do it (XXX: use sigsetjmp) */
915 sigprocmask(SIG_SETMASK, old_set, NULL);
916 raise_exception_err(env->exception_index, env->error_code);
917 } else {
918 /* activate soft MMU for this block */
919 env->hflags |= HF_SOFTMMU_MASK;
920 cpu_resume_from_signal(env, puc);
921 }
922 /* never comes here */
923 return 1;
924 }
925
926 #elif defined(TARGET_ARM)
927 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
928 int is_write, sigset_t *old_set,
929 void *puc)
930 {
931 TranslationBlock *tb;
932 int ret;
933
934 if (cpu_single_env)
935 env = cpu_single_env; /* XXX: find a correct solution for multithread */
936 #if defined(DEBUG_SIGNAL)
937 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
938 pc, address, is_write, *(unsigned long *)old_set);
939 #endif
940 /* XXX: locking issue */
941 if (is_write && page_unprotect(address, pc, puc)) {
942 return 1;
943 }
944 /* see if it is an MMU fault */
945 ret = cpu_arm_handle_mmu_fault(env, address, is_write, 1, 0);
946 if (ret < 0)
947 return 0; /* not an MMU fault */
948 if (ret == 0)
949 return 1; /* the MMU fault was handled without causing real CPU fault */
950 /* now we have a real cpu fault */
951 tb = tb_find_pc(pc);
952 if (tb) {
953 /* the PC is inside the translated code. It means that we have
954 a virtual CPU fault */
955 cpu_restore_state(tb, env, pc, puc);
956 }
957 /* we restore the process signal mask as the sigreturn should
958 do it (XXX: use sigsetjmp) */
959 sigprocmask(SIG_SETMASK, old_set, NULL);
960 cpu_loop_exit();
961 }
962 #elif defined(TARGET_SPARC)
963 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
964 int is_write, sigset_t *old_set,
965 void *puc)
966 {
967 TranslationBlock *tb;
968 int ret;
969
970 if (cpu_single_env)
971 env = cpu_single_env; /* XXX: find a correct solution for multithread */
972 #if defined(DEBUG_SIGNAL)
973 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
974 pc, address, is_write, *(unsigned long *)old_set);
975 #endif
976 /* XXX: locking issue */
977 if (is_write && page_unprotect(address, pc, puc)) {
978 return 1;
979 }
980 /* see if it is an MMU fault */
981 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, 1, 0);
982 if (ret < 0)
983 return 0; /* not an MMU fault */
984 if (ret == 0)
985 return 1; /* the MMU fault was handled without causing real CPU fault */
986 /* now we have a real cpu fault */
987 tb = tb_find_pc(pc);
988 if (tb) {
989 /* the PC is inside the translated code. It means that we have
990 a virtual CPU fault */
991 cpu_restore_state(tb, env, pc, puc);
992 }
993 /* we restore the process signal mask as the sigreturn should
994 do it (XXX: use sigsetjmp) */
995 sigprocmask(SIG_SETMASK, old_set, NULL);
996 cpu_loop_exit();
997 }
998 #elif defined (TARGET_PPC)
999 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1000 int is_write, sigset_t *old_set,
1001 void *puc)
1002 {
1003 TranslationBlock *tb;
1004 int ret;
1005
1006 if (cpu_single_env)
1007 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1008 #if defined(DEBUG_SIGNAL)
1009 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1010 pc, address, is_write, *(unsigned long *)old_set);
1011 #endif
1012 /* XXX: locking issue */
1013 if (is_write && page_unprotect(address, pc, puc)) {
1014 return 1;
1015 }
1016
1017 /* see if it is an MMU fault */
1018 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, msr_pr, 0);
1019 if (ret < 0)
1020 return 0; /* not an MMU fault */
1021 if (ret == 0)
1022 return 1; /* the MMU fault was handled without causing real CPU fault */
1023
1024 /* now we have a real cpu fault */
1025 tb = tb_find_pc(pc);
1026 if (tb) {
1027 /* the PC is inside the translated code. It means that we have
1028 a virtual CPU fault */
1029 cpu_restore_state(tb, env, pc, puc);
1030 }
1031 if (ret == 1) {
1032 #if 0
1033 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1034 env->nip, env->error_code, tb);
1035 #endif
1036 /* we restore the process signal mask as the sigreturn should
1037 do it (XXX: use sigsetjmp) */
1038 sigprocmask(SIG_SETMASK, old_set, NULL);
1039 do_raise_exception_err(env->exception_index, env->error_code);
1040 } else {
1041 /* activate soft MMU for this block */
1042 cpu_resume_from_signal(env, puc);
1043 }
1044 /* never comes here */
1045 return 1;
1046 }
1047
1048 #elif defined (TARGET_MIPS)
1049 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1050 int is_write, sigset_t *old_set,
1051 void *puc)
1052 {
1053 TranslationBlock *tb;
1054 int ret;
1055
1056 if (cpu_single_env)
1057 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1058 #if defined(DEBUG_SIGNAL)
1059 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1060 pc, address, is_write, *(unsigned long *)old_set);
1061 #endif
1062 /* XXX: locking issue */
1063 if (is_write && page_unprotect(address, pc, puc)) {
1064 return 1;
1065 }
1066
1067 /* see if it is an MMU fault */
1068 ret = cpu_mips_handle_mmu_fault(env, address, is_write, 1, 0);
1069 if (ret < 0)
1070 return 0; /* not an MMU fault */
1071 if (ret == 0)
1072 return 1; /* the MMU fault was handled without causing real CPU fault */
1073
1074 /* now we have a real cpu fault */
1075 tb = tb_find_pc(pc);
1076 if (tb) {
1077 /* the PC is inside the translated code. It means that we have
1078 a virtual CPU fault */
1079 cpu_restore_state(tb, env, pc, puc);
1080 }
1081 if (ret == 1) {
1082 #if 0
1083 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1084 env->nip, env->error_code, tb);
1085 #endif
1086 /* we restore the process signal mask as the sigreturn should
1087 do it (XXX: use sigsetjmp) */
1088 sigprocmask(SIG_SETMASK, old_set, NULL);
1089 do_raise_exception_err(env->exception_index, env->error_code);
1090 } else {
1091 /* activate soft MMU for this block */
1092 cpu_resume_from_signal(env, puc);
1093 }
1094 /* never comes here */
1095 return 1;
1096 }
1097
1098 #else
1099 #error unsupported target CPU
1100 #endif
1101
1102 #if defined(__i386__)
1103
1104 #if defined(USE_CODE_COPY)
1105 static void cpu_send_trap(unsigned long pc, int trap,
1106 struct ucontext *uc)
1107 {
1108 TranslationBlock *tb;
1109
1110 if (cpu_single_env)
1111 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1112 /* now we have a real cpu fault */
1113 tb = tb_find_pc(pc);
1114 if (tb) {
1115 /* the PC is inside the translated code. It means that we have
1116 a virtual CPU fault */
1117 cpu_restore_state(tb, env, pc, uc);
1118 }
1119 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
1120 raise_exception_err(trap, env->error_code);
1121 }
1122 #endif
1123
1124 int cpu_signal_handler(int host_signum, struct siginfo *info,
1125 void *puc)
1126 {
1127 struct ucontext *uc = puc;
1128 unsigned long pc;
1129 int trapno;
1130
1131 #ifndef REG_EIP
1132 /* for glibc 2.1 */
1133 #define REG_EIP EIP
1134 #define REG_ERR ERR
1135 #define REG_TRAPNO TRAPNO
1136 #endif
1137 pc = uc->uc_mcontext.gregs[REG_EIP];
1138 trapno = uc->uc_mcontext.gregs[REG_TRAPNO];
1139 #if defined(TARGET_I386) && defined(USE_CODE_COPY)
1140 if (trapno == 0x00 || trapno == 0x05) {
1141 /* send division by zero or bound exception */
1142 cpu_send_trap(pc, trapno, uc);
1143 return 1;
1144 } else
1145 #endif
1146 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1147 trapno == 0xe ?
1148 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1149 &uc->uc_sigmask, puc);
1150 }
1151
1152 #elif defined(__x86_64__)
1153
1154 int cpu_signal_handler(int host_signum, struct siginfo *info,
1155 void *puc)
1156 {
1157 struct ucontext *uc = puc;
1158 unsigned long pc;
1159
1160 pc = uc->uc_mcontext.gregs[REG_RIP];
1161 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1162 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1163 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1164 &uc->uc_sigmask, puc);
1165 }
1166
1167 #elif defined(__powerpc__)
1168
1169 /***********************************************************************
1170 * signal context platform-specific definitions
1171 * From Wine
1172 */
1173 #ifdef linux
1174 /* All Registers access - only for local access */
1175 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1176 /* Gpr Registers access */
1177 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1178 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1179 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1180 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1181 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1182 # define LR_sig(context) REG_sig(link, context) /* Link register */
1183 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1184 /* Float Registers access */
1185 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1186 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1187 /* Exception Registers access */
1188 # define DAR_sig(context) REG_sig(dar, context)
1189 # define DSISR_sig(context) REG_sig(dsisr, context)
1190 # define TRAP_sig(context) REG_sig(trap, context)
1191 #endif /* linux */
1192
1193 #ifdef __APPLE__
1194 # include <sys/ucontext.h>
1195 typedef struct ucontext SIGCONTEXT;
1196 /* All Registers access - only for local access */
1197 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1198 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1199 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1200 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1201 /* Gpr Registers access */
1202 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1203 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1204 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1205 # define CTR_sig(context) REG_sig(ctr, context)
1206 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1207 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1208 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1209 /* Float Registers access */
1210 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1211 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1212 /* Exception Registers access */
1213 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1214 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1215 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1216 #endif /* __APPLE__ */
1217
1218 int cpu_signal_handler(int host_signum, struct siginfo *info,
1219 void *puc)
1220 {
1221 struct ucontext *uc = puc;
1222 unsigned long pc;
1223 int is_write;
1224
1225 pc = IAR_sig(uc);
1226 is_write = 0;
1227 #if 0
1228 /* ppc 4xx case */
1229 if (DSISR_sig(uc) & 0x00800000)
1230 is_write = 1;
1231 #else
1232 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1233 is_write = 1;
1234 #endif
1235 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1236 is_write, &uc->uc_sigmask, puc);
1237 }
1238
1239 #elif defined(__alpha__)
1240
1241 int cpu_signal_handler(int host_signum, struct siginfo *info,
1242 void *puc)
1243 {
1244 struct ucontext *uc = puc;
1245 uint32_t *pc = uc->uc_mcontext.sc_pc;
1246 uint32_t insn = *pc;
1247 int is_write = 0;
1248
1249 /* XXX: need kernel patch to get write flag faster */
1250 switch (insn >> 26) {
1251 case 0x0d: // stw
1252 case 0x0e: // stb
1253 case 0x0f: // stq_u
1254 case 0x24: // stf
1255 case 0x25: // stg
1256 case 0x26: // sts
1257 case 0x27: // stt
1258 case 0x2c: // stl
1259 case 0x2d: // stq
1260 case 0x2e: // stl_c
1261 case 0x2f: // stq_c
1262 is_write = 1;
1263 }
1264
1265 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1266 is_write, &uc->uc_sigmask, puc);
1267 }
1268 #elif defined(__sparc__)
1269
1270 int cpu_signal_handler(int host_signum, struct siginfo *info,
1271 void *puc)
1272 {
1273 uint32_t *regs = (uint32_t *)(info + 1);
1274 void *sigmask = (regs + 20);
1275 unsigned long pc;
1276 int is_write;
1277 uint32_t insn;
1278
1279 /* XXX: is there a standard glibc define ? */
1280 pc = regs[1];
1281 /* XXX: need kernel patch to get write flag faster */
1282 is_write = 0;
1283 insn = *(uint32_t *)pc;
1284 if ((insn >> 30) == 3) {
1285 switch((insn >> 19) & 0x3f) {
1286 case 0x05: // stb
1287 case 0x06: // sth
1288 case 0x04: // st
1289 case 0x07: // std
1290 case 0x24: // stf
1291 case 0x27: // stdf
1292 case 0x25: // stfsr
1293 is_write = 1;
1294 break;
1295 }
1296 }
1297 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1298 is_write, sigmask, NULL);
1299 }
1300
1301 #elif defined(__arm__)
1302
1303 int cpu_signal_handler(int host_signum, struct siginfo *info,
1304 void *puc)
1305 {
1306 struct ucontext *uc = puc;
1307 unsigned long pc;
1308 int is_write;
1309
1310 pc = uc->uc_mcontext.gregs[R15];
1311 /* XXX: compute is_write */
1312 is_write = 0;
1313 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1314 is_write,
1315 &uc->uc_sigmask);
1316 }
1317
1318 #elif defined(__mc68000)
1319
1320 int cpu_signal_handler(int host_signum, struct siginfo *info,
1321 void *puc)
1322 {
1323 struct ucontext *uc = puc;
1324 unsigned long pc;
1325 int is_write;
1326
1327 pc = uc->uc_mcontext.gregs[16];
1328 /* XXX: compute is_write */
1329 is_write = 0;
1330 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1331 is_write,
1332 &uc->uc_sigmask, puc);
1333 }
1334
1335 #elif defined(__ia64)
1336
1337 #ifndef __ISR_VALID
1338 /* This ought to be in <bits/siginfo.h>... */
1339 # define __ISR_VALID 1
1340 # define si_flags _sifields._sigfault._si_pad0
1341 #endif
1342
1343 int cpu_signal_handler(int host_signum, struct siginfo *info, void *puc)
1344 {
1345 struct ucontext *uc = puc;
1346 unsigned long ip;
1347 int is_write = 0;
1348
1349 ip = uc->uc_mcontext.sc_ip;
1350 switch (host_signum) {
1351 case SIGILL:
1352 case SIGFPE:
1353 case SIGSEGV:
1354 case SIGBUS:
1355 case SIGTRAP:
1356 if (info->si_code && (info->si_flags & __ISR_VALID))
1357 /* ISR.W (write-access) is bit 33: */
1358 is_write = (info->si_isr >> 33) & 1;
1359 break;
1360
1361 default:
1362 break;
1363 }
1364 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1365 is_write,
1366 &uc->uc_sigmask, puc);
1367 }
1368
1369 #elif defined(__s390__)
1370
1371 int cpu_signal_handler(int host_signum, struct siginfo *info,
1372 void *puc)
1373 {
1374 struct ucontext *uc = puc;
1375 unsigned long pc;
1376 int is_write;
1377
1378 pc = uc->uc_mcontext.psw.addr;
1379 /* XXX: compute is_write */
1380 is_write = 0;
1381 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1382 is_write,
1383 &uc->uc_sigmask, puc);
1384 }
1385
1386 #else
1387
1388 #error host CPU specific signal handler needed
1389
1390 #endif
1391
1392 #endif /* !defined(CONFIG_SOFTMMU) */