]> git.proxmox.com Git - mirror_qemu.git/blame - cpu-exec.c
Don't link usermode emulation against libqemu_common.a
[mirror_qemu.git] / cpu-exec.c
CommitLineData
7d13299d
FB
1/*
2 * i386 emulator main execution loop
5fafdf24 3 *
66321a11 4 * Copyright (c) 2003-2005 Fabrice Bellard
7d13299d 5 *
3ef693a0
FB
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
7d13299d 10 *
3ef693a0
FB
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
7d13299d 15 *
3ef693a0
FB
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
7d13299d 19 */
e4533c7a 20#include "config.h"
7cb69cae 21#define CPU_NO_GLOBAL_REGS
93ac68bc 22#include "exec.h"
956034d7 23#include "disas.h"
7cb69cae 24#include "tcg.h"
7d13299d 25
fbf9eeb3
FB
26#if !defined(CONFIG_SOFTMMU)
27#undef EAX
28#undef ECX
29#undef EDX
30#undef EBX
31#undef ESP
32#undef EBP
33#undef ESI
34#undef EDI
35#undef EIP
36#include <signal.h>
37#include <sys/ucontext.h>
38#endif
39
572a9d4a
BS
40#if defined(__sparc__) && !defined(HOST_SOLARIS)
41// Work around ugly bugs in glibc that mangle global register contents
42#undef env
43#define env cpu_single_env
44#endif
45
36bdbe54 46int tb_invalidated_flag;
b5fc09ae 47static unsigned long next_tb;
36bdbe54 48
dc99065b 49//#define DEBUG_EXEC
9de5e440 50//#define DEBUG_SIGNAL
7d13299d 51
e4533c7a
FB
52void cpu_loop_exit(void)
53{
bfed01fc
TS
54 /* NOTE: the register at this point must be saved by hand because
55 longjmp restore them */
56 regs_to_env();
e4533c7a
FB
57 longjmp(env->jmp_env, 1);
58}
bfed01fc 59
e6e5906b 60#if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
3475187d
FB
61#define reg_T2
62#endif
e4533c7a 63
fbf9eeb3
FB
64/* exit the current TB from a signal handler. The host registers are
65 restored in a state compatible with the CPU emulator
66 */
5fafdf24 67void cpu_resume_from_signal(CPUState *env1, void *puc)
fbf9eeb3
FB
68{
69#if !defined(CONFIG_SOFTMMU)
70 struct ucontext *uc = puc;
71#endif
72
73 env = env1;
74
75 /* XXX: restore cpu registers saved in host registers */
76
77#if !defined(CONFIG_SOFTMMU)
78 if (puc) {
79 /* XXX: use siglongjmp ? */
80 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
81 }
82#endif
83 longjmp(env->jmp_env, 1);
84}
85
8a40a180
FB
86static TranslationBlock *tb_find_slow(target_ulong pc,
87 target_ulong cs_base,
c068688b 88 uint64_t flags)
8a40a180
FB
89{
90 TranslationBlock *tb, **ptb1;
91 int code_gen_size;
92 unsigned int h;
93 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
94 uint8_t *tc_ptr;
3b46e624 95
8a40a180
FB
96 spin_lock(&tb_lock);
97
98 tb_invalidated_flag = 0;
3b46e624 99
8a40a180 100 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
3b46e624 101
8a40a180
FB
102 /* find translated block using physical mappings */
103 phys_pc = get_phys_addr_code(env, pc);
104 phys_page1 = phys_pc & TARGET_PAGE_MASK;
105 phys_page2 = -1;
106 h = tb_phys_hash_func(phys_pc);
107 ptb1 = &tb_phys_hash[h];
108 for(;;) {
109 tb = *ptb1;
110 if (!tb)
111 goto not_found;
5fafdf24 112 if (tb->pc == pc &&
8a40a180 113 tb->page_addr[0] == phys_page1 &&
5fafdf24 114 tb->cs_base == cs_base &&
8a40a180
FB
115 tb->flags == flags) {
116 /* check next page if needed */
117 if (tb->page_addr[1] != -1) {
5fafdf24 118 virt_page2 = (pc & TARGET_PAGE_MASK) +
8a40a180
FB
119 TARGET_PAGE_SIZE;
120 phys_page2 = get_phys_addr_code(env, virt_page2);
121 if (tb->page_addr[1] == phys_page2)
122 goto found;
123 } else {
124 goto found;
125 }
126 }
127 ptb1 = &tb->phys_hash_next;
128 }
129 not_found:
130 /* if no translated code available, then translate it now */
131 tb = tb_alloc(pc);
132 if (!tb) {
133 /* flush must be done */
134 tb_flush(env);
135 /* cannot fail at this point */
136 tb = tb_alloc(pc);
137 /* don't forget to invalidate previous TB info */
15388002 138 tb_invalidated_flag = 1;
8a40a180
FB
139 }
140 tc_ptr = code_gen_ptr;
141 tb->tc_ptr = tc_ptr;
142 tb->cs_base = cs_base;
143 tb->flags = flags;
d07bde88 144 cpu_gen_code(env, tb, &code_gen_size);
8a40a180 145 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 146
8a40a180
FB
147 /* check next page if needed */
148 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
149 phys_page2 = -1;
150 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
151 phys_page2 = get_phys_addr_code(env, virt_page2);
152 }
153 tb_link_phys(tb, phys_pc, phys_page2);
3b46e624 154
8a40a180 155 found:
8a40a180
FB
156 /* we add the TB in the virtual pc hash table */
157 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
158 spin_unlock(&tb_lock);
159 return tb;
160}
161
162static inline TranslationBlock *tb_find_fast(void)
163{
164 TranslationBlock *tb;
165 target_ulong cs_base, pc;
c068688b 166 uint64_t flags;
8a40a180
FB
167
168 /* we record a subset of the CPU state. It will
169 always be the same before a given translated block
170 is executed. */
171#if defined(TARGET_I386)
172 flags = env->hflags;
173 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
174 cs_base = env->segs[R_CS].base;
175 pc = cs_base + env->eip;
176#elif defined(TARGET_ARM)
177 flags = env->thumb | (env->vfp.vec_len << 1)
b5ff1b31
FB
178 | (env->vfp.vec_stride << 4);
179 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
180 flags |= (1 << 6);
40f137e1
PB
181 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
182 flags |= (1 << 7);
9ee6e8bb 183 flags |= (env->condexec_bits << 8);
8a40a180
FB
184 cs_base = 0;
185 pc = env->regs[15];
186#elif defined(TARGET_SPARC)
187#ifdef TARGET_SPARC64
a80dde08
FB
188 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
189 flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
190 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
8a40a180 191#else
6d5f237a
BS
192 // FPU enable . Supervisor
193 flags = (env->psref << 4) | env->psrs;
8a40a180
FB
194#endif
195 cs_base = env->npc;
196 pc = env->pc;
197#elif defined(TARGET_PPC)
1527c87e 198 flags = env->hflags;
8a40a180
FB
199 cs_base = 0;
200 pc = env->nip;
201#elif defined(TARGET_MIPS)
56b19403 202 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
cc9442b9 203 cs_base = 0;
ead9360e 204 pc = env->PC[env->current_tc];
e6e5906b 205#elif defined(TARGET_M68K)
acf930aa
PB
206 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
207 | (env->sr & SR_S) /* Bit 13 */
208 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
e6e5906b
PB
209 cs_base = 0;
210 pc = env->pc;
fdf9b3e8 211#elif defined(TARGET_SH4)
823029f9
TS
212 flags = env->flags;
213 cs_base = 0;
fdf9b3e8 214 pc = env->pc;
eddf68a6
JM
215#elif defined(TARGET_ALPHA)
216 flags = env->ps;
217 cs_base = 0;
218 pc = env->pc;
f1ccf904 219#elif defined(TARGET_CRIS)
3878e2c9 220 flags = env->pregs[PR_CCS] & (U_FLAG | X_FLAG);
cf1d97f0 221 flags |= env->dslot;
f1ccf904
TS
222 cs_base = 0;
223 pc = env->pc;
8a40a180
FB
224#else
225#error unsupported CPU
226#endif
bce61846 227 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
8a40a180
FB
228 if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
229 tb->flags != flags, 0)) {
230 tb = tb_find_slow(pc, cs_base, flags);
15388002
FB
231 /* Note: we do it here to avoid a gcc bug on Mac OS X when
232 doing it in tb_find_slow */
233 if (tb_invalidated_flag) {
234 /* as some TB could have been invalidated because
235 of memory exceptions while generating the code, we
236 must recompute the hash index here */
b5fc09ae 237 next_tb = 0;
15388002 238 }
8a40a180
FB
239 }
240 return tb;
241}
242
7d13299d
FB
243/* main execution loop */
244
e4533c7a 245int cpu_exec(CPUState *env1)
7d13299d 246{
1057eaa7
PB
247#define DECLARE_HOST_REGS 1
248#include "hostregs_helper.h"
249#if defined(TARGET_SPARC)
3475187d
FB
250#if defined(reg_REGWPTR)
251 uint32_t *saved_regwptr;
252#endif
04369ff2 253#endif
8a40a180 254 int ret, interrupt_request;
8a40a180 255 TranslationBlock *tb;
c27004ec 256 uint8_t *tc_ptr;
8c6939c0 257
bfed01fc
TS
258 if (cpu_halted(env1) == EXCP_HALTED)
259 return EXCP_HALTED;
5a1e3cfc 260
5fafdf24 261 cpu_single_env = env1;
6a00d601 262
7d13299d 263 /* first we save global registers */
1057eaa7
PB
264#define SAVE_HOST_REGS 1
265#include "hostregs_helper.h"
c27004ec 266 env = env1;
e4533c7a 267
0d1a29f9 268 env_to_regs();
ecb644f4 269#if defined(TARGET_I386)
9de5e440 270 /* put eflags in CPU temporary format */
fc2b4c48
FB
271 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
272 DF = 1 - (2 * ((env->eflags >> 10) & 1));
9de5e440 273 CC_OP = CC_OP_EFLAGS;
fc2b4c48 274 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
93ac68bc 275#elif defined(TARGET_SPARC)
3475187d
FB
276#if defined(reg_REGWPTR)
277 saved_regwptr = REGWPTR;
278#endif
e6e5906b
PB
279#elif defined(TARGET_M68K)
280 env->cc_op = CC_OP_FLAGS;
281 env->cc_dest = env->sr & 0xf;
282 env->cc_x = (env->sr >> 4) & 1;
ecb644f4
TS
283#elif defined(TARGET_ALPHA)
284#elif defined(TARGET_ARM)
285#elif defined(TARGET_PPC)
6af0bf9c 286#elif defined(TARGET_MIPS)
fdf9b3e8 287#elif defined(TARGET_SH4)
f1ccf904 288#elif defined(TARGET_CRIS)
fdf9b3e8 289 /* XXXXX */
e4533c7a
FB
290#else
291#error unsupported target CPU
292#endif
3fb2ded1 293 env->exception_index = -1;
9d27abd9 294
7d13299d 295 /* prepare setjmp context for exception handling */
3fb2ded1
FB
296 for(;;) {
297 if (setjmp(env->jmp_env) == 0) {
ee8b7021 298 env->current_tb = NULL;
3fb2ded1
FB
299 /* if an exception is pending, we execute it here */
300 if (env->exception_index >= 0) {
301 if (env->exception_index >= EXCP_INTERRUPT) {
302 /* exit request from the cpu execution loop */
303 ret = env->exception_index;
304 break;
305 } else if (env->user_mode_only) {
306 /* if user mode only, we simulate a fake exception
9f083493 307 which will be handled outside the cpu execution
3fb2ded1 308 loop */
83479e77 309#if defined(TARGET_I386)
5fafdf24
TS
310 do_interrupt_user(env->exception_index,
311 env->exception_is_int,
312 env->error_code,
3fb2ded1 313 env->exception_next_eip);
eba01623
FB
314 /* successfully delivered */
315 env->old_exception = -1;
83479e77 316#endif
3fb2ded1
FB
317 ret = env->exception_index;
318 break;
319 } else {
83479e77 320#if defined(TARGET_I386)
3fb2ded1
FB
321 /* simulate a real cpu exception. On i386, it can
322 trigger new exceptions, but we do not handle
323 double or triple faults yet. */
5fafdf24
TS
324 do_interrupt(env->exception_index,
325 env->exception_is_int,
326 env->error_code,
d05e66d2 327 env->exception_next_eip, 0);
678dde13
TS
328 /* successfully delivered */
329 env->old_exception = -1;
ce09776b
FB
330#elif defined(TARGET_PPC)
331 do_interrupt(env);
6af0bf9c
FB
332#elif defined(TARGET_MIPS)
333 do_interrupt(env);
e95c8d51 334#elif defined(TARGET_SPARC)
f2bc7e7f 335 do_interrupt(env);
b5ff1b31
FB
336#elif defined(TARGET_ARM)
337 do_interrupt(env);
fdf9b3e8
FB
338#elif defined(TARGET_SH4)
339 do_interrupt(env);
eddf68a6
JM
340#elif defined(TARGET_ALPHA)
341 do_interrupt(env);
f1ccf904
TS
342#elif defined(TARGET_CRIS)
343 do_interrupt(env);
0633879f
PB
344#elif defined(TARGET_M68K)
345 do_interrupt(0);
83479e77 346#endif
3fb2ded1
FB
347 }
348 env->exception_index = -1;
5fafdf24 349 }
9df217a3
FB
350#ifdef USE_KQEMU
351 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
352 int ret;
353 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
354 ret = kqemu_cpu_exec(env);
355 /* put eflags in CPU temporary format */
356 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
357 DF = 1 - (2 * ((env->eflags >> 10) & 1));
358 CC_OP = CC_OP_EFLAGS;
359 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
360 if (ret == 1) {
361 /* exception */
362 longjmp(env->jmp_env, 1);
363 } else if (ret == 2) {
364 /* softmmu execution needed */
365 } else {
366 if (env->interrupt_request != 0) {
367 /* hardware interrupt will be executed just after */
368 } else {
369 /* otherwise, we restart */
370 longjmp(env->jmp_env, 1);
371 }
372 }
3fb2ded1 373 }
9df217a3
FB
374#endif
375
b5fc09ae 376 next_tb = 0; /* force lookup of first TB */
3fb2ded1 377 for(;;) {
68a79315 378 interrupt_request = env->interrupt_request;
0573fbfc
TS
379 if (__builtin_expect(interrupt_request, 0)
380#if defined(TARGET_I386)
381 && env->hflags & HF_GIF_MASK
382#endif
21b20814 383 && likely(!(env->singlestep_enabled & SSTEP_NOIRQ))) {
6658ffb8
PB
384 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
385 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
386 env->exception_index = EXCP_DEBUG;
387 cpu_loop_exit();
388 }
a90b7318 389#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
f1ccf904 390 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
a90b7318
AZ
391 if (interrupt_request & CPU_INTERRUPT_HALT) {
392 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
393 env->halted = 1;
394 env->exception_index = EXCP_HLT;
395 cpu_loop_exit();
396 }
397#endif
68a79315 398#if defined(TARGET_I386)
3b21e03e
FB
399 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
400 !(env->hflags & HF_SMM_MASK)) {
0573fbfc 401 svm_check_intercept(SVM_EXIT_SMI);
3b21e03e
FB
402 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
403 do_smm_enter();
b5fc09ae 404 next_tb = 0;
474ea849
AJ
405 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
406 !(env->hflags & HF_NMI_MASK)) {
407 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
408 env->hflags |= HF_NMI_MASK;
409 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
b5fc09ae 410 next_tb = 0;
3b21e03e 411 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
0573fbfc 412 (env->eflags & IF_MASK || env->hflags & HF_HIF_MASK) &&
3f337316 413 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
68a79315 414 int intno;
0573fbfc 415 svm_check_intercept(SVM_EXIT_INTR);
52621688 416 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
a541f297 417 intno = cpu_get_pic_interrupt(env);
f193c797 418 if (loglevel & CPU_LOG_TB_IN_ASM) {
68a79315
FB
419 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
420 }
d05e66d2 421 do_interrupt(intno, 0, 0, 0, 1);
907a5b26
FB
422 /* ensure that no TB jump will be modified as
423 the program flow was changed */
b5fc09ae 424 next_tb = 0;
0573fbfc
TS
425#if !defined(CONFIG_USER_ONLY)
426 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
427 (env->eflags & IF_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
428 int intno;
429 /* FIXME: this should respect TPR */
430 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
52621688 431 svm_check_intercept(SVM_EXIT_VINTR);
0573fbfc
TS
432 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
433 if (loglevel & CPU_LOG_TB_IN_ASM)
434 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
435 do_interrupt(intno, 0, 0, -1, 1);
52621688
TS
436 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl),
437 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)) & ~V_IRQ_MASK);
b5fc09ae 438 next_tb = 0;
907a5b26 439#endif
68a79315 440 }
ce09776b 441#elif defined(TARGET_PPC)
9fddaa0c
FB
442#if 0
443 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
444 cpu_ppc_reset(env);
445 }
446#endif
47103572 447 if (interrupt_request & CPU_INTERRUPT_HARD) {
e9df014c
JM
448 ppc_hw_interrupt(env);
449 if (env->pending_interrupts == 0)
450 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
b5fc09ae 451 next_tb = 0;
ce09776b 452 }
6af0bf9c
FB
453#elif defined(TARGET_MIPS)
454 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
24c7b0e3 455 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
6af0bf9c 456 (env->CP0_Status & (1 << CP0St_IE)) &&
24c7b0e3
TS
457 !(env->CP0_Status & (1 << CP0St_EXL)) &&
458 !(env->CP0_Status & (1 << CP0St_ERL)) &&
6af0bf9c
FB
459 !(env->hflags & MIPS_HFLAG_DM)) {
460 /* Raise it */
461 env->exception_index = EXCP_EXT_INTERRUPT;
462 env->error_code = 0;
463 do_interrupt(env);
b5fc09ae 464 next_tb = 0;
6af0bf9c 465 }
e95c8d51 466#elif defined(TARGET_SPARC)
66321a11
FB
467 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
468 (env->psret != 0)) {
469 int pil = env->interrupt_index & 15;
470 int type = env->interrupt_index & 0xf0;
471
472 if (((type == TT_EXTINT) &&
473 (pil == 15 || pil > env->psrpil)) ||
474 type != TT_EXTINT) {
475 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
f2bc7e7f
BS
476 env->exception_index = env->interrupt_index;
477 do_interrupt(env);
66321a11 478 env->interrupt_index = 0;
327ac2e7
BS
479#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
480 cpu_check_irqs(env);
481#endif
b5fc09ae 482 next_tb = 0;
66321a11 483 }
e95c8d51
FB
484 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
485 //do_interrupt(0, 0, 0, 0, 0);
486 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
a90b7318 487 }
b5ff1b31
FB
488#elif defined(TARGET_ARM)
489 if (interrupt_request & CPU_INTERRUPT_FIQ
490 && !(env->uncached_cpsr & CPSR_F)) {
491 env->exception_index = EXCP_FIQ;
492 do_interrupt(env);
b5fc09ae 493 next_tb = 0;
b5ff1b31 494 }
9ee6e8bb
PB
495 /* ARMv7-M interrupt return works by loading a magic value
496 into the PC. On real hardware the load causes the
497 return to occur. The qemu implementation performs the
498 jump normally, then does the exception return when the
499 CPU tries to execute code at the magic address.
500 This will cause the magic PC value to be pushed to
501 the stack if an interrupt occured at the wrong time.
502 We avoid this by disabling interrupts when
503 pc contains a magic address. */
b5ff1b31 504 if (interrupt_request & CPU_INTERRUPT_HARD
9ee6e8bb
PB
505 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
506 || !(env->uncached_cpsr & CPSR_I))) {
b5ff1b31
FB
507 env->exception_index = EXCP_IRQ;
508 do_interrupt(env);
b5fc09ae 509 next_tb = 0;
b5ff1b31 510 }
fdf9b3e8 511#elif defined(TARGET_SH4)
e96e2044
TS
512 if (interrupt_request & CPU_INTERRUPT_HARD) {
513 do_interrupt(env);
b5fc09ae 514 next_tb = 0;
e96e2044 515 }
eddf68a6
JM
516#elif defined(TARGET_ALPHA)
517 if (interrupt_request & CPU_INTERRUPT_HARD) {
518 do_interrupt(env);
b5fc09ae 519 next_tb = 0;
eddf68a6 520 }
f1ccf904
TS
521#elif defined(TARGET_CRIS)
522 if (interrupt_request & CPU_INTERRUPT_HARD) {
523 do_interrupt(env);
b5fc09ae 524 next_tb = 0;
f1ccf904 525 }
0633879f
PB
526#elif defined(TARGET_M68K)
527 if (interrupt_request & CPU_INTERRUPT_HARD
528 && ((env->sr & SR_I) >> SR_I_SHIFT)
529 < env->pending_level) {
530 /* Real hardware gets the interrupt vector via an
531 IACK cycle at this point. Current emulated
532 hardware doesn't rely on this, so we
533 provide/save the vector when the interrupt is
534 first signalled. */
535 env->exception_index = env->pending_vector;
536 do_interrupt(1);
b5fc09ae 537 next_tb = 0;
0633879f 538 }
68a79315 539#endif
9d05095e
FB
540 /* Don't use the cached interupt_request value,
541 do_interrupt may have updated the EXITTB flag. */
b5ff1b31 542 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
bf3e8bf1
FB
543 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
544 /* ensure that no TB jump will be modified as
545 the program flow was changed */
b5fc09ae 546 next_tb = 0;
bf3e8bf1 547 }
68a79315
FB
548 if (interrupt_request & CPU_INTERRUPT_EXIT) {
549 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
550 env->exception_index = EXCP_INTERRUPT;
551 cpu_loop_exit();
552 }
3fb2ded1 553 }
7d13299d 554#ifdef DEBUG_EXEC
b5ff1b31 555 if ((loglevel & CPU_LOG_TB_CPU)) {
3fb2ded1 556 /* restore flags in standard format */
ecb644f4
TS
557 regs_to_env();
558#if defined(TARGET_I386)
3fb2ded1 559 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
7fe48483 560 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
3fb2ded1 561 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
e4533c7a 562#elif defined(TARGET_ARM)
7fe48483 563 cpu_dump_state(env, logfile, fprintf, 0);
93ac68bc 564#elif defined(TARGET_SPARC)
3475187d
FB
565 REGWPTR = env->regbase + (env->cwp * 16);
566 env->regwptr = REGWPTR;
567 cpu_dump_state(env, logfile, fprintf, 0);
67867308 568#elif defined(TARGET_PPC)
7fe48483 569 cpu_dump_state(env, logfile, fprintf, 0);
e6e5906b
PB
570#elif defined(TARGET_M68K)
571 cpu_m68k_flush_flags(env, env->cc_op);
572 env->cc_op = CC_OP_FLAGS;
573 env->sr = (env->sr & 0xffe0)
574 | env->cc_dest | (env->cc_x << 4);
575 cpu_dump_state(env, logfile, fprintf, 0);
6af0bf9c
FB
576#elif defined(TARGET_MIPS)
577 cpu_dump_state(env, logfile, fprintf, 0);
fdf9b3e8
FB
578#elif defined(TARGET_SH4)
579 cpu_dump_state(env, logfile, fprintf, 0);
eddf68a6
JM
580#elif defined(TARGET_ALPHA)
581 cpu_dump_state(env, logfile, fprintf, 0);
f1ccf904
TS
582#elif defined(TARGET_CRIS)
583 cpu_dump_state(env, logfile, fprintf, 0);
e4533c7a 584#else
5fafdf24 585#error unsupported target CPU
e4533c7a 586#endif
3fb2ded1 587 }
7d13299d 588#endif
8a40a180 589 tb = tb_find_fast();
9d27abd9 590#ifdef DEBUG_EXEC
c1135f61 591 if ((loglevel & CPU_LOG_EXEC)) {
c27004ec
FB
592 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
593 (long)tb->tc_ptr, tb->pc,
594 lookup_symbol(tb->pc));
3fb2ded1 595 }
9d27abd9 596#endif
8a40a180
FB
597 /* see if we can patch the calling TB. When the TB
598 spans two pages, we cannot safely do a direct
599 jump. */
c27004ec 600 {
b5fc09ae 601 if (next_tb != 0 &&
4d7a0880 602#ifdef USE_KQEMU
f32fc648
FB
603 (env->kqemu_enabled != 2) &&
604#endif
ec6338ba 605 tb->page_addr[1] == -1) {
3fb2ded1 606 spin_lock(&tb_lock);
b5fc09ae 607 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
3fb2ded1
FB
608 spin_unlock(&tb_lock);
609 }
c27004ec 610 }
3fb2ded1 611 tc_ptr = tb->tc_ptr;
83479e77 612 env->current_tb = tb;
3fb2ded1 613 /* execute the generated code */
572a9d4a
BS
614#if defined(__sparc__) && !defined(HOST_SOLARIS)
615#undef env
616 env = cpu_single_env;
617#define env cpu_single_env
618#endif
7cb69cae 619 next_tb = tcg_qemu_tb_exec(tc_ptr);
83479e77 620 env->current_tb = NULL;
4cbf74b6
FB
621 /* reset soft MMU for next block (it can currently
622 only be set by a memory fault) */
f32fc648
FB
623#if defined(USE_KQEMU)
624#define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
625 if (kqemu_is_ok(env) &&
626 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
627 cpu_loop_exit();
628 }
4cbf74b6 629#endif
50a518e3 630 } /* for(;;) */
3fb2ded1 631 } else {
0d1a29f9 632 env_to_regs();
7d13299d 633 }
3fb2ded1
FB
634 } /* for(;;) */
635
7d13299d 636
e4533c7a 637#if defined(TARGET_I386)
9de5e440 638 /* restore flags in standard format */
fc2b4c48 639 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
e4533c7a 640#elif defined(TARGET_ARM)
b7bcbe95 641 /* XXX: Save/restore host fpu exception state?. */
93ac68bc 642#elif defined(TARGET_SPARC)
3475187d
FB
643#if defined(reg_REGWPTR)
644 REGWPTR = saved_regwptr;
645#endif
67867308 646#elif defined(TARGET_PPC)
e6e5906b
PB
647#elif defined(TARGET_M68K)
648 cpu_m68k_flush_flags(env, env->cc_op);
649 env->cc_op = CC_OP_FLAGS;
650 env->sr = (env->sr & 0xffe0)
651 | env->cc_dest | (env->cc_x << 4);
6af0bf9c 652#elif defined(TARGET_MIPS)
fdf9b3e8 653#elif defined(TARGET_SH4)
eddf68a6 654#elif defined(TARGET_ALPHA)
f1ccf904 655#elif defined(TARGET_CRIS)
fdf9b3e8 656 /* XXXXX */
e4533c7a
FB
657#else
658#error unsupported target CPU
659#endif
1057eaa7
PB
660
661 /* restore global registers */
1057eaa7
PB
662#include "hostregs_helper.h"
663
6a00d601 664 /* fail safe : never use cpu_single_env outside cpu_exec() */
5fafdf24 665 cpu_single_env = NULL;
7d13299d
FB
666 return ret;
667}
6dbad63e 668
fbf9eeb3
FB
669/* must only be called from the generated code as an exception can be
670 generated */
671void tb_invalidate_page_range(target_ulong start, target_ulong end)
672{
dc5d0b3d
FB
673 /* XXX: cannot enable it yet because it yields to MMU exception
674 where NIP != read address on PowerPC */
675#if 0
fbf9eeb3
FB
676 target_ulong phys_addr;
677 phys_addr = get_phys_addr_code(env, start);
678 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
dc5d0b3d 679#endif
fbf9eeb3
FB
680}
681
1a18c71b 682#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
e4533c7a 683
6dbad63e
FB
684void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
685{
686 CPUX86State *saved_env;
687
688 saved_env = env;
689 env = s;
a412ac57 690 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
a513fe19 691 selector &= 0xffff;
5fafdf24 692 cpu_x86_load_seg_cache(env, seg_reg, selector,
c27004ec 693 (selector << 4), 0xffff, 0);
a513fe19 694 } else {
5d97559d 695 helper_load_seg(seg_reg, selector);
a513fe19 696 }
6dbad63e
FB
697 env = saved_env;
698}
9de5e440 699
6f12a2a6 700void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
d0a1ffc9
FB
701{
702 CPUX86State *saved_env;
703
704 saved_env = env;
705 env = s;
3b46e624 706
6f12a2a6 707 helper_fsave(ptr, data32);
d0a1ffc9
FB
708
709 env = saved_env;
710}
711
6f12a2a6 712void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
d0a1ffc9
FB
713{
714 CPUX86State *saved_env;
715
716 saved_env = env;
717 env = s;
3b46e624 718
6f12a2a6 719 helper_frstor(ptr, data32);
d0a1ffc9
FB
720
721 env = saved_env;
722}
723
e4533c7a
FB
724#endif /* TARGET_I386 */
725
67b915a5
FB
726#if !defined(CONFIG_SOFTMMU)
727
3fb2ded1
FB
728#if defined(TARGET_I386)
729
b56dad1c 730/* 'pc' is the host PC at which the exception was raised. 'address' is
fd6ce8f6
FB
731 the effective address of the memory exception. 'is_write' is 1 if a
732 write caused the exception and otherwise 0'. 'old_set' is the
733 signal set which should be restored */
2b413144 734static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
5fafdf24 735 int is_write, sigset_t *old_set,
bf3e8bf1 736 void *puc)
9de5e440 737{
a513fe19
FB
738 TranslationBlock *tb;
739 int ret;
68a79315 740
83479e77
FB
741 if (cpu_single_env)
742 env = cpu_single_env; /* XXX: find a correct solution for multithread */
fd6ce8f6 743#if defined(DEBUG_SIGNAL)
5fafdf24 744 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
bf3e8bf1 745 pc, address, is_write, *(unsigned long *)old_set);
9de5e440 746#endif
25eb4484 747 /* XXX: locking issue */
53a5960a 748 if (is_write && page_unprotect(h2g(address), pc, puc)) {
fd6ce8f6
FB
749 return 1;
750 }
fbf9eeb3 751
3fb2ded1 752 /* see if it is an MMU fault */
6ebbf390 753 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
3fb2ded1
FB
754 if (ret < 0)
755 return 0; /* not an MMU fault */
756 if (ret == 0)
757 return 1; /* the MMU fault was handled without causing real CPU fault */
758 /* now we have a real cpu fault */
a513fe19
FB
759 tb = tb_find_pc(pc);
760 if (tb) {
9de5e440
FB
761 /* the PC is inside the translated code. It means that we have
762 a virtual CPU fault */
bf3e8bf1 763 cpu_restore_state(tb, env, pc, puc);
3fb2ded1 764 }
4cbf74b6 765 if (ret == 1) {
3fb2ded1 766#if 0
5fafdf24 767 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
4cbf74b6 768 env->eip, env->cr[2], env->error_code);
3fb2ded1 769#endif
4cbf74b6
FB
770 /* we restore the process signal mask as the sigreturn should
771 do it (XXX: use sigsetjmp) */
772 sigprocmask(SIG_SETMASK, old_set, NULL);
54ca9095 773 raise_exception_err(env->exception_index, env->error_code);
4cbf74b6
FB
774 } else {
775 /* activate soft MMU for this block */
3f337316 776 env->hflags |= HF_SOFTMMU_MASK;
fbf9eeb3 777 cpu_resume_from_signal(env, puc);
4cbf74b6 778 }
3fb2ded1
FB
779 /* never comes here */
780 return 1;
781}
782
e4533c7a 783#elif defined(TARGET_ARM)
3fb2ded1 784static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
bf3e8bf1
FB
785 int is_write, sigset_t *old_set,
786 void *puc)
3fb2ded1 787{
68016c62
FB
788 TranslationBlock *tb;
789 int ret;
790
791 if (cpu_single_env)
792 env = cpu_single_env; /* XXX: find a correct solution for multithread */
793#if defined(DEBUG_SIGNAL)
5fafdf24 794 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
68016c62
FB
795 pc, address, is_write, *(unsigned long *)old_set);
796#endif
9f0777ed 797 /* XXX: locking issue */
53a5960a 798 if (is_write && page_unprotect(h2g(address), pc, puc)) {
9f0777ed
FB
799 return 1;
800 }
68016c62 801 /* see if it is an MMU fault */
6ebbf390 802 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
68016c62
FB
803 if (ret < 0)
804 return 0; /* not an MMU fault */
805 if (ret == 0)
806 return 1; /* the MMU fault was handled without causing real CPU fault */
807 /* now we have a real cpu fault */
808 tb = tb_find_pc(pc);
809 if (tb) {
810 /* the PC is inside the translated code. It means that we have
811 a virtual CPU fault */
812 cpu_restore_state(tb, env, pc, puc);
813 }
814 /* we restore the process signal mask as the sigreturn should
815 do it (XXX: use sigsetjmp) */
816 sigprocmask(SIG_SETMASK, old_set, NULL);
817 cpu_loop_exit();
968c74da
AJ
818 /* never comes here */
819 return 1;
3fb2ded1 820}
93ac68bc
FB
821#elif defined(TARGET_SPARC)
822static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
bf3e8bf1
FB
823 int is_write, sigset_t *old_set,
824 void *puc)
93ac68bc 825{
68016c62
FB
826 TranslationBlock *tb;
827 int ret;
828
829 if (cpu_single_env)
830 env = cpu_single_env; /* XXX: find a correct solution for multithread */
831#if defined(DEBUG_SIGNAL)
5fafdf24 832 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
68016c62
FB
833 pc, address, is_write, *(unsigned long *)old_set);
834#endif
b453b70b 835 /* XXX: locking issue */
53a5960a 836 if (is_write && page_unprotect(h2g(address), pc, puc)) {
b453b70b
FB
837 return 1;
838 }
68016c62 839 /* see if it is an MMU fault */
6ebbf390 840 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
68016c62
FB
841 if (ret < 0)
842 return 0; /* not an MMU fault */
843 if (ret == 0)
844 return 1; /* the MMU fault was handled without causing real CPU fault */
845 /* now we have a real cpu fault */
846 tb = tb_find_pc(pc);
847 if (tb) {
848 /* the PC is inside the translated code. It means that we have
849 a virtual CPU fault */
850 cpu_restore_state(tb, env, pc, puc);
851 }
852 /* we restore the process signal mask as the sigreturn should
853 do it (XXX: use sigsetjmp) */
854 sigprocmask(SIG_SETMASK, old_set, NULL);
855 cpu_loop_exit();
968c74da
AJ
856 /* never comes here */
857 return 1;
93ac68bc 858}
67867308
FB
859#elif defined (TARGET_PPC)
860static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
bf3e8bf1
FB
861 int is_write, sigset_t *old_set,
862 void *puc)
67867308
FB
863{
864 TranslationBlock *tb;
ce09776b 865 int ret;
3b46e624 866
67867308
FB
867 if (cpu_single_env)
868 env = cpu_single_env; /* XXX: find a correct solution for multithread */
67867308 869#if defined(DEBUG_SIGNAL)
5fafdf24 870 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
67867308
FB
871 pc, address, is_write, *(unsigned long *)old_set);
872#endif
873 /* XXX: locking issue */
53a5960a 874 if (is_write && page_unprotect(h2g(address), pc, puc)) {
67867308
FB
875 return 1;
876 }
877
ce09776b 878 /* see if it is an MMU fault */
6ebbf390 879 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
ce09776b
FB
880 if (ret < 0)
881 return 0; /* not an MMU fault */
882 if (ret == 0)
883 return 1; /* the MMU fault was handled without causing real CPU fault */
884
67867308
FB
885 /* now we have a real cpu fault */
886 tb = tb_find_pc(pc);
887 if (tb) {
888 /* the PC is inside the translated code. It means that we have
889 a virtual CPU fault */
bf3e8bf1 890 cpu_restore_state(tb, env, pc, puc);
67867308 891 }
ce09776b 892 if (ret == 1) {
67867308 893#if 0
5fafdf24 894 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
ce09776b 895 env->nip, env->error_code, tb);
67867308
FB
896#endif
897 /* we restore the process signal mask as the sigreturn should
898 do it (XXX: use sigsetjmp) */
bf3e8bf1 899 sigprocmask(SIG_SETMASK, old_set, NULL);
9fddaa0c 900 do_raise_exception_err(env->exception_index, env->error_code);
ce09776b
FB
901 } else {
902 /* activate soft MMU for this block */
fbf9eeb3 903 cpu_resume_from_signal(env, puc);
ce09776b 904 }
67867308 905 /* never comes here */
e6e5906b
PB
906 return 1;
907}
908
909#elif defined(TARGET_M68K)
910static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
911 int is_write, sigset_t *old_set,
912 void *puc)
913{
914 TranslationBlock *tb;
915 int ret;
916
917 if (cpu_single_env)
918 env = cpu_single_env; /* XXX: find a correct solution for multithread */
919#if defined(DEBUG_SIGNAL)
5fafdf24 920 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
e6e5906b
PB
921 pc, address, is_write, *(unsigned long *)old_set);
922#endif
923 /* XXX: locking issue */
924 if (is_write && page_unprotect(address, pc, puc)) {
925 return 1;
926 }
927 /* see if it is an MMU fault */
6ebbf390 928 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
e6e5906b
PB
929 if (ret < 0)
930 return 0; /* not an MMU fault */
931 if (ret == 0)
932 return 1; /* the MMU fault was handled without causing real CPU fault */
933 /* now we have a real cpu fault */
934 tb = tb_find_pc(pc);
935 if (tb) {
936 /* the PC is inside the translated code. It means that we have
937 a virtual CPU fault */
938 cpu_restore_state(tb, env, pc, puc);
939 }
940 /* we restore the process signal mask as the sigreturn should
941 do it (XXX: use sigsetjmp) */
942 sigprocmask(SIG_SETMASK, old_set, NULL);
943 cpu_loop_exit();
944 /* never comes here */
67867308
FB
945 return 1;
946}
6af0bf9c
FB
947
948#elif defined (TARGET_MIPS)
949static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
950 int is_write, sigset_t *old_set,
951 void *puc)
952{
953 TranslationBlock *tb;
954 int ret;
3b46e624 955
6af0bf9c
FB
956 if (cpu_single_env)
957 env = cpu_single_env; /* XXX: find a correct solution for multithread */
958#if defined(DEBUG_SIGNAL)
5fafdf24 959 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
6af0bf9c
FB
960 pc, address, is_write, *(unsigned long *)old_set);
961#endif
962 /* XXX: locking issue */
53a5960a 963 if (is_write && page_unprotect(h2g(address), pc, puc)) {
6af0bf9c
FB
964 return 1;
965 }
966
967 /* see if it is an MMU fault */
6ebbf390 968 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
6af0bf9c
FB
969 if (ret < 0)
970 return 0; /* not an MMU fault */
971 if (ret == 0)
972 return 1; /* the MMU fault was handled without causing real CPU fault */
973
974 /* now we have a real cpu fault */
975 tb = tb_find_pc(pc);
976 if (tb) {
977 /* the PC is inside the translated code. It means that we have
978 a virtual CPU fault */
979 cpu_restore_state(tb, env, pc, puc);
980 }
981 if (ret == 1) {
982#if 0
5fafdf24 983 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1eb5207b 984 env->PC, env->error_code, tb);
6af0bf9c
FB
985#endif
986 /* we restore the process signal mask as the sigreturn should
987 do it (XXX: use sigsetjmp) */
988 sigprocmask(SIG_SETMASK, old_set, NULL);
989 do_raise_exception_err(env->exception_index, env->error_code);
990 } else {
991 /* activate soft MMU for this block */
992 cpu_resume_from_signal(env, puc);
993 }
994 /* never comes here */
995 return 1;
996}
997
fdf9b3e8
FB
998#elif defined (TARGET_SH4)
999static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1000 int is_write, sigset_t *old_set,
1001 void *puc)
1002{
1003 TranslationBlock *tb;
1004 int ret;
3b46e624 1005
fdf9b3e8
FB
1006 if (cpu_single_env)
1007 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1008#if defined(DEBUG_SIGNAL)
5fafdf24 1009 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
fdf9b3e8
FB
1010 pc, address, is_write, *(unsigned long *)old_set);
1011#endif
1012 /* XXX: locking issue */
1013 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1014 return 1;
1015 }
1016
1017 /* see if it is an MMU fault */
6ebbf390 1018 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
fdf9b3e8
FB
1019 if (ret < 0)
1020 return 0; /* not an MMU fault */
1021 if (ret == 0)
1022 return 1; /* the MMU fault was handled without causing real CPU fault */
1023
1024 /* now we have a real cpu fault */
eddf68a6
JM
1025 tb = tb_find_pc(pc);
1026 if (tb) {
1027 /* the PC is inside the translated code. It means that we have
1028 a virtual CPU fault */
1029 cpu_restore_state(tb, env, pc, puc);
1030 }
1031#if 0
5fafdf24 1032 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
eddf68a6
JM
1033 env->nip, env->error_code, tb);
1034#endif
1035 /* we restore the process signal mask as the sigreturn should
1036 do it (XXX: use sigsetjmp) */
1037 sigprocmask(SIG_SETMASK, old_set, NULL);
1038 cpu_loop_exit();
1039 /* never comes here */
1040 return 1;
1041}
1042
1043#elif defined (TARGET_ALPHA)
1044static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1045 int is_write, sigset_t *old_set,
1046 void *puc)
1047{
1048 TranslationBlock *tb;
1049 int ret;
3b46e624 1050
eddf68a6
JM
1051 if (cpu_single_env)
1052 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1053#if defined(DEBUG_SIGNAL)
5fafdf24 1054 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
eddf68a6
JM
1055 pc, address, is_write, *(unsigned long *)old_set);
1056#endif
1057 /* XXX: locking issue */
1058 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1059 return 1;
1060 }
1061
1062 /* see if it is an MMU fault */
6ebbf390 1063 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
eddf68a6
JM
1064 if (ret < 0)
1065 return 0; /* not an MMU fault */
1066 if (ret == 0)
1067 return 1; /* the MMU fault was handled without causing real CPU fault */
1068
1069 /* now we have a real cpu fault */
fdf9b3e8
FB
1070 tb = tb_find_pc(pc);
1071 if (tb) {
1072 /* the PC is inside the translated code. It means that we have
1073 a virtual CPU fault */
1074 cpu_restore_state(tb, env, pc, puc);
1075 }
fdf9b3e8 1076#if 0
5fafdf24 1077 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
fdf9b3e8
FB
1078 env->nip, env->error_code, tb);
1079#endif
1080 /* we restore the process signal mask as the sigreturn should
1081 do it (XXX: use sigsetjmp) */
355fb23d
PB
1082 sigprocmask(SIG_SETMASK, old_set, NULL);
1083 cpu_loop_exit();
fdf9b3e8
FB
1084 /* never comes here */
1085 return 1;
1086}
f1ccf904
TS
1087#elif defined (TARGET_CRIS)
1088static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1089 int is_write, sigset_t *old_set,
1090 void *puc)
1091{
1092 TranslationBlock *tb;
1093 int ret;
1094
1095 if (cpu_single_env)
1096 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1097#if defined(DEBUG_SIGNAL)
1098 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1099 pc, address, is_write, *(unsigned long *)old_set);
1100#endif
1101 /* XXX: locking issue */
1102 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1103 return 1;
1104 }
1105
1106 /* see if it is an MMU fault */
6ebbf390 1107 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
f1ccf904
TS
1108 if (ret < 0)
1109 return 0; /* not an MMU fault */
1110 if (ret == 0)
1111 return 1; /* the MMU fault was handled without causing real CPU fault */
1112
1113 /* now we have a real cpu fault */
1114 tb = tb_find_pc(pc);
1115 if (tb) {
1116 /* the PC is inside the translated code. It means that we have
1117 a virtual CPU fault */
1118 cpu_restore_state(tb, env, pc, puc);
1119 }
f1ccf904
TS
1120 /* we restore the process signal mask as the sigreturn should
1121 do it (XXX: use sigsetjmp) */
1122 sigprocmask(SIG_SETMASK, old_set, NULL);
1123 cpu_loop_exit();
1124 /* never comes here */
1125 return 1;
1126}
1127
e4533c7a
FB
1128#else
1129#error unsupported target CPU
1130#endif
9de5e440 1131
2b413144
FB
1132#if defined(__i386__)
1133
d8ecc0b9
FB
1134#if defined(__APPLE__)
1135# include <sys/ucontext.h>
1136
1137# define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1138# define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1139# define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1140#else
1141# define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1142# define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1143# define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1144#endif
1145
5fafdf24 1146int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1147 void *puc)
9de5e440 1148{
5a7b542b 1149 siginfo_t *info = pinfo;
9de5e440
FB
1150 struct ucontext *uc = puc;
1151 unsigned long pc;
bf3e8bf1 1152 int trapno;
97eb5b14 1153
d691f669
FB
1154#ifndef REG_EIP
1155/* for glibc 2.1 */
fd6ce8f6
FB
1156#define REG_EIP EIP
1157#define REG_ERR ERR
1158#define REG_TRAPNO TRAPNO
d691f669 1159#endif
d8ecc0b9
FB
1160 pc = EIP_sig(uc);
1161 trapno = TRAP_sig(uc);
ec6338ba
FB
1162 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1163 trapno == 0xe ?
1164 (ERROR_sig(uc) >> 1) & 1 : 0,
1165 &uc->uc_sigmask, puc);
2b413144
FB
1166}
1167
bc51c5c9
FB
1168#elif defined(__x86_64__)
1169
5a7b542b 1170int cpu_signal_handler(int host_signum, void *pinfo,
bc51c5c9
FB
1171 void *puc)
1172{
5a7b542b 1173 siginfo_t *info = pinfo;
bc51c5c9
FB
1174 struct ucontext *uc = puc;
1175 unsigned long pc;
1176
1177 pc = uc->uc_mcontext.gregs[REG_RIP];
5fafdf24
TS
1178 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1179 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
bc51c5c9
FB
1180 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1181 &uc->uc_sigmask, puc);
1182}
1183
83fb7adf 1184#elif defined(__powerpc__)
2b413144 1185
83fb7adf
FB
1186/***********************************************************************
1187 * signal context platform-specific definitions
1188 * From Wine
1189 */
1190#ifdef linux
1191/* All Registers access - only for local access */
1192# define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1193/* Gpr Registers access */
1194# define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1195# define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1196# define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1197# define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1198# define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1199# define LR_sig(context) REG_sig(link, context) /* Link register */
1200# define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1201/* Float Registers access */
1202# define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1203# define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1204/* Exception Registers access */
1205# define DAR_sig(context) REG_sig(dar, context)
1206# define DSISR_sig(context) REG_sig(dsisr, context)
1207# define TRAP_sig(context) REG_sig(trap, context)
1208#endif /* linux */
1209
1210#ifdef __APPLE__
1211# include <sys/ucontext.h>
1212typedef struct ucontext SIGCONTEXT;
1213/* All Registers access - only for local access */
1214# define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1215# define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1216# define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1217# define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1218/* Gpr Registers access */
1219# define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1220# define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1221# define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1222# define CTR_sig(context) REG_sig(ctr, context)
1223# define XER_sig(context) REG_sig(xer, context) /* Link register */
1224# define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1225# define CR_sig(context) REG_sig(cr, context) /* Condition register */
1226/* Float Registers access */
1227# define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1228# define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1229/* Exception Registers access */
1230# define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1231# define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1232# define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1233#endif /* __APPLE__ */
1234
5fafdf24 1235int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1236 void *puc)
2b413144 1237{
5a7b542b 1238 siginfo_t *info = pinfo;
25eb4484 1239 struct ucontext *uc = puc;
25eb4484 1240 unsigned long pc;
25eb4484
FB
1241 int is_write;
1242
83fb7adf 1243 pc = IAR_sig(uc);
25eb4484
FB
1244 is_write = 0;
1245#if 0
1246 /* ppc 4xx case */
83fb7adf 1247 if (DSISR_sig(uc) & 0x00800000)
25eb4484
FB
1248 is_write = 1;
1249#else
83fb7adf 1250 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
25eb4484
FB
1251 is_write = 1;
1252#endif
5fafdf24 1253 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
bf3e8bf1 1254 is_write, &uc->uc_sigmask, puc);
2b413144
FB
1255}
1256
2f87c607
FB
1257#elif defined(__alpha__)
1258
5fafdf24 1259int cpu_signal_handler(int host_signum, void *pinfo,
2f87c607
FB
1260 void *puc)
1261{
5a7b542b 1262 siginfo_t *info = pinfo;
2f87c607
FB
1263 struct ucontext *uc = puc;
1264 uint32_t *pc = uc->uc_mcontext.sc_pc;
1265 uint32_t insn = *pc;
1266 int is_write = 0;
1267
8c6939c0 1268 /* XXX: need kernel patch to get write flag faster */
2f87c607
FB
1269 switch (insn >> 26) {
1270 case 0x0d: // stw
1271 case 0x0e: // stb
1272 case 0x0f: // stq_u
1273 case 0x24: // stf
1274 case 0x25: // stg
1275 case 0x26: // sts
1276 case 0x27: // stt
1277 case 0x2c: // stl
1278 case 0x2d: // stq
1279 case 0x2e: // stl_c
1280 case 0x2f: // stq_c
1281 is_write = 1;
1282 }
1283
5fafdf24 1284 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
bf3e8bf1 1285 is_write, &uc->uc_sigmask, puc);
2f87c607 1286}
8c6939c0
FB
1287#elif defined(__sparc__)
1288
5fafdf24 1289int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1290 void *puc)
8c6939c0 1291{
5a7b542b 1292 siginfo_t *info = pinfo;
8c6939c0
FB
1293 int is_write;
1294 uint32_t insn;
6b4c11cd 1295#if !defined(__arch64__) || defined(HOST_SOLARIS)
c9e1e2b0
BS
1296 uint32_t *regs = (uint32_t *)(info + 1);
1297 void *sigmask = (regs + 20);
8c6939c0 1298 /* XXX: is there a standard glibc define ? */
c9e1e2b0
BS
1299 unsigned long pc = regs[1];
1300#else
1301 struct sigcontext *sc = puc;
1302 unsigned long pc = sc->sigc_regs.tpc;
1303 void *sigmask = (void *)sc->sigc_mask;
1304#endif
1305
8c6939c0
FB
1306 /* XXX: need kernel patch to get write flag faster */
1307 is_write = 0;
1308 insn = *(uint32_t *)pc;
1309 if ((insn >> 30) == 3) {
1310 switch((insn >> 19) & 0x3f) {
1311 case 0x05: // stb
1312 case 0x06: // sth
1313 case 0x04: // st
1314 case 0x07: // std
1315 case 0x24: // stf
1316 case 0x27: // stdf
1317 case 0x25: // stfsr
1318 is_write = 1;
1319 break;
1320 }
1321 }
5fafdf24 1322 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
bf3e8bf1 1323 is_write, sigmask, NULL);
8c6939c0
FB
1324}
1325
1326#elif defined(__arm__)
1327
5fafdf24 1328int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1329 void *puc)
8c6939c0 1330{
5a7b542b 1331 siginfo_t *info = pinfo;
8c6939c0
FB
1332 struct ucontext *uc = puc;
1333 unsigned long pc;
1334 int is_write;
3b46e624 1335
4eee57f5 1336 pc = uc->uc_mcontext.arm_pc;
8c6939c0
FB
1337 /* XXX: compute is_write */
1338 is_write = 0;
5fafdf24 1339 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
8c6939c0 1340 is_write,
f3a9676a 1341 &uc->uc_sigmask, puc);
8c6939c0
FB
1342}
1343
38e584a0
FB
1344#elif defined(__mc68000)
1345
5fafdf24 1346int cpu_signal_handler(int host_signum, void *pinfo,
38e584a0
FB
1347 void *puc)
1348{
5a7b542b 1349 siginfo_t *info = pinfo;
38e584a0
FB
1350 struct ucontext *uc = puc;
1351 unsigned long pc;
1352 int is_write;
3b46e624 1353
38e584a0
FB
1354 pc = uc->uc_mcontext.gregs[16];
1355 /* XXX: compute is_write */
1356 is_write = 0;
5fafdf24 1357 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
38e584a0 1358 is_write,
bf3e8bf1 1359 &uc->uc_sigmask, puc);
38e584a0
FB
1360}
1361
b8076a74
FB
1362#elif defined(__ia64)
1363
1364#ifndef __ISR_VALID
1365 /* This ought to be in <bits/siginfo.h>... */
1366# define __ISR_VALID 1
b8076a74
FB
1367#endif
1368
5a7b542b 1369int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
b8076a74 1370{
5a7b542b 1371 siginfo_t *info = pinfo;
b8076a74
FB
1372 struct ucontext *uc = puc;
1373 unsigned long ip;
1374 int is_write = 0;
1375
1376 ip = uc->uc_mcontext.sc_ip;
1377 switch (host_signum) {
1378 case SIGILL:
1379 case SIGFPE:
1380 case SIGSEGV:
1381 case SIGBUS:
1382 case SIGTRAP:
fd4a43e4 1383 if (info->si_code && (info->si_segvflags & __ISR_VALID))
b8076a74
FB
1384 /* ISR.W (write-access) is bit 33: */
1385 is_write = (info->si_isr >> 33) & 1;
1386 break;
1387
1388 default:
1389 break;
1390 }
1391 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1392 is_write,
1393 &uc->uc_sigmask, puc);
1394}
1395
90cb9493
FB
1396#elif defined(__s390__)
1397
5fafdf24 1398int cpu_signal_handler(int host_signum, void *pinfo,
90cb9493
FB
1399 void *puc)
1400{
5a7b542b 1401 siginfo_t *info = pinfo;
90cb9493
FB
1402 struct ucontext *uc = puc;
1403 unsigned long pc;
1404 int is_write;
3b46e624 1405
90cb9493
FB
1406 pc = uc->uc_mcontext.psw.addr;
1407 /* XXX: compute is_write */
1408 is_write = 0;
5fafdf24 1409 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
c4b89d18
TS
1410 is_write, &uc->uc_sigmask, puc);
1411}
1412
1413#elif defined(__mips__)
1414
5fafdf24 1415int cpu_signal_handler(int host_signum, void *pinfo,
c4b89d18
TS
1416 void *puc)
1417{
9617efe8 1418 siginfo_t *info = pinfo;
c4b89d18
TS
1419 struct ucontext *uc = puc;
1420 greg_t pc = uc->uc_mcontext.pc;
1421 int is_write;
3b46e624 1422
c4b89d18
TS
1423 /* XXX: compute is_write */
1424 is_write = 0;
5fafdf24 1425 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
c4b89d18 1426 is_write, &uc->uc_sigmask, puc);
90cb9493
FB
1427}
1428
f54b3f92
AJ
1429#elif defined(__hppa__)
1430
1431int cpu_signal_handler(int host_signum, void *pinfo,
1432 void *puc)
1433{
1434 struct siginfo *info = pinfo;
1435 struct ucontext *uc = puc;
1436 unsigned long pc;
1437 int is_write;
1438
1439 pc = uc->uc_mcontext.sc_iaoq[0];
1440 /* FIXME: compute is_write */
1441 is_write = 0;
1442 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1443 is_write,
1444 &uc->uc_sigmask, puc);
1445}
1446
9de5e440 1447#else
2b413144 1448
3fb2ded1 1449#error host CPU specific signal handler needed
2b413144 1450
9de5e440 1451#endif
67b915a5
FB
1452
1453#endif /* !defined(CONFIG_SOFTMMU) */