]> git.proxmox.com Git - mirror_qemu.git/blame - cpu-exec.c
Document usage of new options remove stray variables, check for ALSA/FMOD/ESD
[mirror_qemu.git] / cpu-exec.c
CommitLineData
7d13299d
FB
1/*
2 * i386 emulator main execution loop
5fafdf24 3 *
66321a11 4 * Copyright (c) 2003-2005 Fabrice Bellard
7d13299d 5 *
3ef693a0
FB
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
7d13299d 10 *
3ef693a0
FB
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
7d13299d 15 *
3ef693a0
FB
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
7d13299d 19 */
e4533c7a 20#include "config.h"
7cb69cae 21#define CPU_NO_GLOBAL_REGS
93ac68bc 22#include "exec.h"
956034d7 23#include "disas.h"
7cb69cae 24#include "tcg.h"
7d13299d 25
fbf9eeb3
FB
26#if !defined(CONFIG_SOFTMMU)
27#undef EAX
28#undef ECX
29#undef EDX
30#undef EBX
31#undef ESP
32#undef EBP
33#undef ESI
34#undef EDI
35#undef EIP
36#include <signal.h>
37#include <sys/ucontext.h>
38#endif
39
572a9d4a
BS
40#if defined(__sparc__) && !defined(HOST_SOLARIS)
41// Work around ugly bugs in glibc that mangle global register contents
42#undef env
43#define env cpu_single_env
44#endif
45
36bdbe54
FB
46int tb_invalidated_flag;
47
dc99065b 48//#define DEBUG_EXEC
9de5e440 49//#define DEBUG_SIGNAL
7d13299d 50
e4533c7a
FB
51void cpu_loop_exit(void)
52{
bfed01fc
TS
53 /* NOTE: the register at this point must be saved by hand because
54 longjmp restore them */
55 regs_to_env();
e4533c7a
FB
56 longjmp(env->jmp_env, 1);
57}
bfed01fc 58
e6e5906b 59#if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
3475187d
FB
60#define reg_T2
61#endif
e4533c7a 62
fbf9eeb3
FB
63/* exit the current TB from a signal handler. The host registers are
64 restored in a state compatible with the CPU emulator
65 */
5fafdf24 66void cpu_resume_from_signal(CPUState *env1, void *puc)
fbf9eeb3
FB
67{
68#if !defined(CONFIG_SOFTMMU)
69 struct ucontext *uc = puc;
70#endif
71
72 env = env1;
73
74 /* XXX: restore cpu registers saved in host registers */
75
76#if !defined(CONFIG_SOFTMMU)
77 if (puc) {
78 /* XXX: use siglongjmp ? */
79 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
80 }
81#endif
82 longjmp(env->jmp_env, 1);
83}
84
8a40a180
FB
85static TranslationBlock *tb_find_slow(target_ulong pc,
86 target_ulong cs_base,
c068688b 87 uint64_t flags)
8a40a180
FB
88{
89 TranslationBlock *tb, **ptb1;
90 int code_gen_size;
91 unsigned int h;
92 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
93 uint8_t *tc_ptr;
3b46e624 94
8a40a180 95 tb_invalidated_flag = 0;
3b46e624 96
8a40a180 97 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
3b46e624 98
8a40a180
FB
99 /* find translated block using physical mappings */
100 phys_pc = get_phys_addr_code(env, pc);
101 phys_page1 = phys_pc & TARGET_PAGE_MASK;
102 phys_page2 = -1;
103 h = tb_phys_hash_func(phys_pc);
104 ptb1 = &tb_phys_hash[h];
105 for(;;) {
106 tb = *ptb1;
107 if (!tb)
108 goto not_found;
5fafdf24 109 if (tb->pc == pc &&
8a40a180 110 tb->page_addr[0] == phys_page1 &&
5fafdf24 111 tb->cs_base == cs_base &&
8a40a180
FB
112 tb->flags == flags) {
113 /* check next page if needed */
114 if (tb->page_addr[1] != -1) {
5fafdf24 115 virt_page2 = (pc & TARGET_PAGE_MASK) +
8a40a180
FB
116 TARGET_PAGE_SIZE;
117 phys_page2 = get_phys_addr_code(env, virt_page2);
118 if (tb->page_addr[1] == phys_page2)
119 goto found;
120 } else {
121 goto found;
122 }
123 }
124 ptb1 = &tb->phys_hash_next;
125 }
126 not_found:
127 /* if no translated code available, then translate it now */
128 tb = tb_alloc(pc);
129 if (!tb) {
130 /* flush must be done */
131 tb_flush(env);
132 /* cannot fail at this point */
133 tb = tb_alloc(pc);
134 /* don't forget to invalidate previous TB info */
15388002 135 tb_invalidated_flag = 1;
8a40a180
FB
136 }
137 tc_ptr = code_gen_ptr;
138 tb->tc_ptr = tc_ptr;
139 tb->cs_base = cs_base;
140 tb->flags = flags;
d07bde88 141 cpu_gen_code(env, tb, &code_gen_size);
8a40a180 142 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 143
8a40a180
FB
144 /* check next page if needed */
145 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
146 phys_page2 = -1;
147 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
148 phys_page2 = get_phys_addr_code(env, virt_page2);
149 }
150 tb_link_phys(tb, phys_pc, phys_page2);
3b46e624 151
8a40a180 152 found:
8a40a180
FB
153 /* we add the TB in the virtual pc hash table */
154 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
8a40a180
FB
155 return tb;
156}
157
158static inline TranslationBlock *tb_find_fast(void)
159{
160 TranslationBlock *tb;
161 target_ulong cs_base, pc;
c068688b 162 uint64_t flags;
8a40a180
FB
163
164 /* we record a subset of the CPU state. It will
165 always be the same before a given translated block
166 is executed. */
167#if defined(TARGET_I386)
168 flags = env->hflags;
169 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
170 cs_base = env->segs[R_CS].base;
171 pc = cs_base + env->eip;
172#elif defined(TARGET_ARM)
173 flags = env->thumb | (env->vfp.vec_len << 1)
b5ff1b31
FB
174 | (env->vfp.vec_stride << 4);
175 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
176 flags |= (1 << 6);
40f137e1
PB
177 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
178 flags |= (1 << 7);
9ee6e8bb 179 flags |= (env->condexec_bits << 8);
8a40a180
FB
180 cs_base = 0;
181 pc = env->regs[15];
182#elif defined(TARGET_SPARC)
183#ifdef TARGET_SPARC64
a80dde08
FB
184 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
185 flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
186 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
8a40a180 187#else
6d5f237a
BS
188 // FPU enable . Supervisor
189 flags = (env->psref << 4) | env->psrs;
8a40a180
FB
190#endif
191 cs_base = env->npc;
192 pc = env->pc;
193#elif defined(TARGET_PPC)
1527c87e 194 flags = env->hflags;
8a40a180
FB
195 cs_base = 0;
196 pc = env->nip;
197#elif defined(TARGET_MIPS)
56b19403 198 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
cc9442b9 199 cs_base = 0;
b5dc7732 200 pc = env->active_tc.PC;
e6e5906b 201#elif defined(TARGET_M68K)
acf930aa
PB
202 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
203 | (env->sr & SR_S) /* Bit 13 */
204 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
e6e5906b
PB
205 cs_base = 0;
206 pc = env->pc;
fdf9b3e8 207#elif defined(TARGET_SH4)
823029f9
TS
208 flags = env->flags;
209 cs_base = 0;
fdf9b3e8 210 pc = env->pc;
eddf68a6
JM
211#elif defined(TARGET_ALPHA)
212 flags = env->ps;
213 cs_base = 0;
214 pc = env->pc;
f1ccf904 215#elif defined(TARGET_CRIS)
7e15e603 216 flags = env->pregs[PR_CCS] & (P_FLAG | U_FLAG | X_FLAG);
cf1d97f0 217 flags |= env->dslot;
f1ccf904
TS
218 cs_base = 0;
219 pc = env->pc;
8a40a180
FB
220#else
221#error unsupported CPU
222#endif
bce61846 223 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
8a40a180
FB
224 if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
225 tb->flags != flags, 0)) {
226 tb = tb_find_slow(pc, cs_base, flags);
227 }
228 return tb;
229}
230
7d13299d
FB
231/* main execution loop */
232
e4533c7a 233int cpu_exec(CPUState *env1)
7d13299d 234{
1057eaa7
PB
235#define DECLARE_HOST_REGS 1
236#include "hostregs_helper.h"
8a40a180 237 int ret, interrupt_request;
8a40a180 238 TranslationBlock *tb;
c27004ec 239 uint8_t *tc_ptr;
d5975363 240 unsigned long next_tb;
8c6939c0 241
bfed01fc
TS
242 if (cpu_halted(env1) == EXCP_HALTED)
243 return EXCP_HALTED;
5a1e3cfc 244
5fafdf24 245 cpu_single_env = env1;
6a00d601 246
7d13299d 247 /* first we save global registers */
1057eaa7
PB
248#define SAVE_HOST_REGS 1
249#include "hostregs_helper.h"
c27004ec 250 env = env1;
e4533c7a 251
0d1a29f9 252 env_to_regs();
ecb644f4 253#if defined(TARGET_I386)
9de5e440 254 /* put eflags in CPU temporary format */
fc2b4c48
FB
255 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
256 DF = 1 - (2 * ((env->eflags >> 10) & 1));
9de5e440 257 CC_OP = CC_OP_EFLAGS;
fc2b4c48 258 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
93ac68bc 259#elif defined(TARGET_SPARC)
e6e5906b
PB
260#elif defined(TARGET_M68K)
261 env->cc_op = CC_OP_FLAGS;
262 env->cc_dest = env->sr & 0xf;
263 env->cc_x = (env->sr >> 4) & 1;
ecb644f4
TS
264#elif defined(TARGET_ALPHA)
265#elif defined(TARGET_ARM)
266#elif defined(TARGET_PPC)
6af0bf9c 267#elif defined(TARGET_MIPS)
fdf9b3e8 268#elif defined(TARGET_SH4)
f1ccf904 269#elif defined(TARGET_CRIS)
fdf9b3e8 270 /* XXXXX */
e4533c7a
FB
271#else
272#error unsupported target CPU
273#endif
3fb2ded1 274 env->exception_index = -1;
9d27abd9 275
7d13299d 276 /* prepare setjmp context for exception handling */
3fb2ded1
FB
277 for(;;) {
278 if (setjmp(env->jmp_env) == 0) {
ee8b7021 279 env->current_tb = NULL;
3fb2ded1
FB
280 /* if an exception is pending, we execute it here */
281 if (env->exception_index >= 0) {
282 if (env->exception_index >= EXCP_INTERRUPT) {
283 /* exit request from the cpu execution loop */
284 ret = env->exception_index;
285 break;
286 } else if (env->user_mode_only) {
287 /* if user mode only, we simulate a fake exception
9f083493 288 which will be handled outside the cpu execution
3fb2ded1 289 loop */
83479e77 290#if defined(TARGET_I386)
5fafdf24
TS
291 do_interrupt_user(env->exception_index,
292 env->exception_is_int,
293 env->error_code,
3fb2ded1 294 env->exception_next_eip);
eba01623
FB
295 /* successfully delivered */
296 env->old_exception = -1;
83479e77 297#endif
3fb2ded1
FB
298 ret = env->exception_index;
299 break;
300 } else {
83479e77 301#if defined(TARGET_I386)
3fb2ded1
FB
302 /* simulate a real cpu exception. On i386, it can
303 trigger new exceptions, but we do not handle
304 double or triple faults yet. */
5fafdf24
TS
305 do_interrupt(env->exception_index,
306 env->exception_is_int,
307 env->error_code,
d05e66d2 308 env->exception_next_eip, 0);
678dde13
TS
309 /* successfully delivered */
310 env->old_exception = -1;
ce09776b
FB
311#elif defined(TARGET_PPC)
312 do_interrupt(env);
6af0bf9c
FB
313#elif defined(TARGET_MIPS)
314 do_interrupt(env);
e95c8d51 315#elif defined(TARGET_SPARC)
f2bc7e7f 316 do_interrupt(env);
b5ff1b31
FB
317#elif defined(TARGET_ARM)
318 do_interrupt(env);
fdf9b3e8
FB
319#elif defined(TARGET_SH4)
320 do_interrupt(env);
eddf68a6
JM
321#elif defined(TARGET_ALPHA)
322 do_interrupt(env);
f1ccf904
TS
323#elif defined(TARGET_CRIS)
324 do_interrupt(env);
0633879f
PB
325#elif defined(TARGET_M68K)
326 do_interrupt(0);
83479e77 327#endif
3fb2ded1
FB
328 }
329 env->exception_index = -1;
5fafdf24 330 }
9df217a3
FB
331#ifdef USE_KQEMU
332 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
333 int ret;
334 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
335 ret = kqemu_cpu_exec(env);
336 /* put eflags in CPU temporary format */
337 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
338 DF = 1 - (2 * ((env->eflags >> 10) & 1));
339 CC_OP = CC_OP_EFLAGS;
340 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
341 if (ret == 1) {
342 /* exception */
343 longjmp(env->jmp_env, 1);
344 } else if (ret == 2) {
345 /* softmmu execution needed */
346 } else {
347 if (env->interrupt_request != 0) {
348 /* hardware interrupt will be executed just after */
349 } else {
350 /* otherwise, we restart */
351 longjmp(env->jmp_env, 1);
352 }
353 }
3fb2ded1 354 }
9df217a3
FB
355#endif
356
b5fc09ae 357 next_tb = 0; /* force lookup of first TB */
3fb2ded1 358 for(;;) {
68a79315 359 interrupt_request = env->interrupt_request;
db620f46
FB
360 if (__builtin_expect(interrupt_request, 0) &&
361 likely(!(env->singlestep_enabled & SSTEP_NOIRQ))) {
6658ffb8
PB
362 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
363 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
364 env->exception_index = EXCP_DEBUG;
365 cpu_loop_exit();
366 }
a90b7318 367#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
f1ccf904 368 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
a90b7318
AZ
369 if (interrupt_request & CPU_INTERRUPT_HALT) {
370 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
371 env->halted = 1;
372 env->exception_index = EXCP_HLT;
373 cpu_loop_exit();
374 }
375#endif
68a79315 376#if defined(TARGET_I386)
db620f46
FB
377 if (env->hflags2 & HF2_GIF_MASK) {
378 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
379 !(env->hflags & HF_SMM_MASK)) {
380 svm_check_intercept(SVM_EXIT_SMI);
381 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
382 do_smm_enter();
383 next_tb = 0;
384 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
385 !(env->hflags2 & HF2_NMI_MASK)) {
386 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
387 env->hflags2 |= HF2_NMI_MASK;
388 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
389 next_tb = 0;
390 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
391 (((env->hflags2 & HF2_VINTR_MASK) &&
392 (env->hflags2 & HF2_HIF_MASK)) ||
393 (!(env->hflags2 & HF2_VINTR_MASK) &&
394 (env->eflags & IF_MASK &&
395 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
396 int intno;
397 svm_check_intercept(SVM_EXIT_INTR);
398 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
399 intno = cpu_get_pic_interrupt(env);
400 if (loglevel & CPU_LOG_TB_IN_ASM) {
401 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
402 }
403 do_interrupt(intno, 0, 0, 0, 1);
404 /* ensure that no TB jump will be modified as
405 the program flow was changed */
406 next_tb = 0;
0573fbfc 407#if !defined(CONFIG_USER_ONLY)
db620f46
FB
408 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
409 (env->eflags & IF_MASK) &&
410 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
411 int intno;
412 /* FIXME: this should respect TPR */
413 svm_check_intercept(SVM_EXIT_VINTR);
414 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
415 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
416 if (loglevel & CPU_LOG_TB_IN_ASM)
417 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
418 do_interrupt(intno, 0, 0, 0, 1);
419 next_tb = 0;
907a5b26 420#endif
db620f46 421 }
68a79315 422 }
ce09776b 423#elif defined(TARGET_PPC)
9fddaa0c
FB
424#if 0
425 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
426 cpu_ppc_reset(env);
427 }
428#endif
47103572 429 if (interrupt_request & CPU_INTERRUPT_HARD) {
e9df014c
JM
430 ppc_hw_interrupt(env);
431 if (env->pending_interrupts == 0)
432 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
b5fc09ae 433 next_tb = 0;
ce09776b 434 }
6af0bf9c
FB
435#elif defined(TARGET_MIPS)
436 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
24c7b0e3 437 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
6af0bf9c 438 (env->CP0_Status & (1 << CP0St_IE)) &&
24c7b0e3
TS
439 !(env->CP0_Status & (1 << CP0St_EXL)) &&
440 !(env->CP0_Status & (1 << CP0St_ERL)) &&
6af0bf9c
FB
441 !(env->hflags & MIPS_HFLAG_DM)) {
442 /* Raise it */
443 env->exception_index = EXCP_EXT_INTERRUPT;
444 env->error_code = 0;
445 do_interrupt(env);
b5fc09ae 446 next_tb = 0;
6af0bf9c 447 }
e95c8d51 448#elif defined(TARGET_SPARC)
66321a11
FB
449 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
450 (env->psret != 0)) {
451 int pil = env->interrupt_index & 15;
452 int type = env->interrupt_index & 0xf0;
453
454 if (((type == TT_EXTINT) &&
455 (pil == 15 || pil > env->psrpil)) ||
456 type != TT_EXTINT) {
457 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
f2bc7e7f
BS
458 env->exception_index = env->interrupt_index;
459 do_interrupt(env);
66321a11 460 env->interrupt_index = 0;
327ac2e7
BS
461#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
462 cpu_check_irqs(env);
463#endif
b5fc09ae 464 next_tb = 0;
66321a11 465 }
e95c8d51
FB
466 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
467 //do_interrupt(0, 0, 0, 0, 0);
468 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
a90b7318 469 }
b5ff1b31
FB
470#elif defined(TARGET_ARM)
471 if (interrupt_request & CPU_INTERRUPT_FIQ
472 && !(env->uncached_cpsr & CPSR_F)) {
473 env->exception_index = EXCP_FIQ;
474 do_interrupt(env);
b5fc09ae 475 next_tb = 0;
b5ff1b31 476 }
9ee6e8bb
PB
477 /* ARMv7-M interrupt return works by loading a magic value
478 into the PC. On real hardware the load causes the
479 return to occur. The qemu implementation performs the
480 jump normally, then does the exception return when the
481 CPU tries to execute code at the magic address.
482 This will cause the magic PC value to be pushed to
483 the stack if an interrupt occured at the wrong time.
484 We avoid this by disabling interrupts when
485 pc contains a magic address. */
b5ff1b31 486 if (interrupt_request & CPU_INTERRUPT_HARD
9ee6e8bb
PB
487 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
488 || !(env->uncached_cpsr & CPSR_I))) {
b5ff1b31
FB
489 env->exception_index = EXCP_IRQ;
490 do_interrupt(env);
b5fc09ae 491 next_tb = 0;
b5ff1b31 492 }
fdf9b3e8 493#elif defined(TARGET_SH4)
e96e2044
TS
494 if (interrupt_request & CPU_INTERRUPT_HARD) {
495 do_interrupt(env);
b5fc09ae 496 next_tb = 0;
e96e2044 497 }
eddf68a6
JM
498#elif defined(TARGET_ALPHA)
499 if (interrupt_request & CPU_INTERRUPT_HARD) {
500 do_interrupt(env);
b5fc09ae 501 next_tb = 0;
eddf68a6 502 }
f1ccf904 503#elif defined(TARGET_CRIS)
1b1a38b0
EI
504 if (interrupt_request & CPU_INTERRUPT_HARD
505 && (env->pregs[PR_CCS] & I_FLAG)) {
506 env->exception_index = EXCP_IRQ;
507 do_interrupt(env);
508 next_tb = 0;
509 }
510 if (interrupt_request & CPU_INTERRUPT_NMI
511 && (env->pregs[PR_CCS] & M_FLAG)) {
512 env->exception_index = EXCP_NMI;
f1ccf904 513 do_interrupt(env);
b5fc09ae 514 next_tb = 0;
f1ccf904 515 }
0633879f
PB
516#elif defined(TARGET_M68K)
517 if (interrupt_request & CPU_INTERRUPT_HARD
518 && ((env->sr & SR_I) >> SR_I_SHIFT)
519 < env->pending_level) {
520 /* Real hardware gets the interrupt vector via an
521 IACK cycle at this point. Current emulated
522 hardware doesn't rely on this, so we
523 provide/save the vector when the interrupt is
524 first signalled. */
525 env->exception_index = env->pending_vector;
526 do_interrupt(1);
b5fc09ae 527 next_tb = 0;
0633879f 528 }
68a79315 529#endif
9d05095e
FB
530 /* Don't use the cached interupt_request value,
531 do_interrupt may have updated the EXITTB flag. */
b5ff1b31 532 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
bf3e8bf1
FB
533 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
534 /* ensure that no TB jump will be modified as
535 the program flow was changed */
b5fc09ae 536 next_tb = 0;
bf3e8bf1 537 }
68a79315
FB
538 if (interrupt_request & CPU_INTERRUPT_EXIT) {
539 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
540 env->exception_index = EXCP_INTERRUPT;
541 cpu_loop_exit();
542 }
3fb2ded1 543 }
7d13299d 544#ifdef DEBUG_EXEC
b5ff1b31 545 if ((loglevel & CPU_LOG_TB_CPU)) {
3fb2ded1 546 /* restore flags in standard format */
ecb644f4
TS
547 regs_to_env();
548#if defined(TARGET_I386)
3fb2ded1 549 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
7fe48483 550 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
3fb2ded1 551 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
e4533c7a 552#elif defined(TARGET_ARM)
7fe48483 553 cpu_dump_state(env, logfile, fprintf, 0);
93ac68bc 554#elif defined(TARGET_SPARC)
3475187d 555 cpu_dump_state(env, logfile, fprintf, 0);
67867308 556#elif defined(TARGET_PPC)
7fe48483 557 cpu_dump_state(env, logfile, fprintf, 0);
e6e5906b
PB
558#elif defined(TARGET_M68K)
559 cpu_m68k_flush_flags(env, env->cc_op);
560 env->cc_op = CC_OP_FLAGS;
561 env->sr = (env->sr & 0xffe0)
562 | env->cc_dest | (env->cc_x << 4);
563 cpu_dump_state(env, logfile, fprintf, 0);
6af0bf9c
FB
564#elif defined(TARGET_MIPS)
565 cpu_dump_state(env, logfile, fprintf, 0);
fdf9b3e8
FB
566#elif defined(TARGET_SH4)
567 cpu_dump_state(env, logfile, fprintf, 0);
eddf68a6
JM
568#elif defined(TARGET_ALPHA)
569 cpu_dump_state(env, logfile, fprintf, 0);
f1ccf904
TS
570#elif defined(TARGET_CRIS)
571 cpu_dump_state(env, logfile, fprintf, 0);
e4533c7a 572#else
5fafdf24 573#error unsupported target CPU
e4533c7a 574#endif
3fb2ded1 575 }
7d13299d 576#endif
d5975363 577 spin_lock(&tb_lock);
8a40a180 578 tb = tb_find_fast();
d5975363
PB
579 /* Note: we do it here to avoid a gcc bug on Mac OS X when
580 doing it in tb_find_slow */
581 if (tb_invalidated_flag) {
582 /* as some TB could have been invalidated because
583 of memory exceptions while generating the code, we
584 must recompute the hash index here */
585 next_tb = 0;
586 }
9d27abd9 587#ifdef DEBUG_EXEC
c1135f61 588 if ((loglevel & CPU_LOG_EXEC)) {
c27004ec
FB
589 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
590 (long)tb->tc_ptr, tb->pc,
591 lookup_symbol(tb->pc));
3fb2ded1 592 }
9d27abd9 593#endif
8a40a180
FB
594 /* see if we can patch the calling TB. When the TB
595 spans two pages, we cannot safely do a direct
596 jump. */
c27004ec 597 {
b5fc09ae 598 if (next_tb != 0 &&
4d7a0880 599#ifdef USE_KQEMU
f32fc648
FB
600 (env->kqemu_enabled != 2) &&
601#endif
ec6338ba 602 tb->page_addr[1] == -1) {
b5fc09ae 603 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
3fb2ded1 604 }
c27004ec 605 }
d5975363 606 spin_unlock(&tb_lock);
3fb2ded1 607 tc_ptr = tb->tc_ptr;
83479e77 608 env->current_tb = tb;
3fb2ded1 609 /* execute the generated code */
572a9d4a
BS
610#if defined(__sparc__) && !defined(HOST_SOLARIS)
611#undef env
612 env = cpu_single_env;
613#define env cpu_single_env
614#endif
7cb69cae 615 next_tb = tcg_qemu_tb_exec(tc_ptr);
83479e77 616 env->current_tb = NULL;
4cbf74b6
FB
617 /* reset soft MMU for next block (it can currently
618 only be set by a memory fault) */
f32fc648
FB
619#if defined(USE_KQEMU)
620#define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
621 if (kqemu_is_ok(env) &&
622 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
623 cpu_loop_exit();
624 }
4cbf74b6 625#endif
50a518e3 626 } /* for(;;) */
3fb2ded1 627 } else {
0d1a29f9 628 env_to_regs();
7d13299d 629 }
3fb2ded1
FB
630 } /* for(;;) */
631
7d13299d 632
e4533c7a 633#if defined(TARGET_I386)
9de5e440 634 /* restore flags in standard format */
fc2b4c48 635 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
e4533c7a 636#elif defined(TARGET_ARM)
b7bcbe95 637 /* XXX: Save/restore host fpu exception state?. */
93ac68bc 638#elif defined(TARGET_SPARC)
67867308 639#elif defined(TARGET_PPC)
e6e5906b
PB
640#elif defined(TARGET_M68K)
641 cpu_m68k_flush_flags(env, env->cc_op);
642 env->cc_op = CC_OP_FLAGS;
643 env->sr = (env->sr & 0xffe0)
644 | env->cc_dest | (env->cc_x << 4);
6af0bf9c 645#elif defined(TARGET_MIPS)
fdf9b3e8 646#elif defined(TARGET_SH4)
eddf68a6 647#elif defined(TARGET_ALPHA)
f1ccf904 648#elif defined(TARGET_CRIS)
fdf9b3e8 649 /* XXXXX */
e4533c7a
FB
650#else
651#error unsupported target CPU
652#endif
1057eaa7
PB
653
654 /* restore global registers */
1057eaa7
PB
655#include "hostregs_helper.h"
656
6a00d601 657 /* fail safe : never use cpu_single_env outside cpu_exec() */
5fafdf24 658 cpu_single_env = NULL;
7d13299d
FB
659 return ret;
660}
6dbad63e 661
fbf9eeb3
FB
662/* must only be called from the generated code as an exception can be
663 generated */
664void tb_invalidate_page_range(target_ulong start, target_ulong end)
665{
dc5d0b3d
FB
666 /* XXX: cannot enable it yet because it yields to MMU exception
667 where NIP != read address on PowerPC */
668#if 0
fbf9eeb3
FB
669 target_ulong phys_addr;
670 phys_addr = get_phys_addr_code(env, start);
671 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
dc5d0b3d 672#endif
fbf9eeb3
FB
673}
674
1a18c71b 675#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
e4533c7a 676
6dbad63e
FB
677void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
678{
679 CPUX86State *saved_env;
680
681 saved_env = env;
682 env = s;
a412ac57 683 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
a513fe19 684 selector &= 0xffff;
5fafdf24 685 cpu_x86_load_seg_cache(env, seg_reg, selector,
c27004ec 686 (selector << 4), 0xffff, 0);
a513fe19 687 } else {
5d97559d 688 helper_load_seg(seg_reg, selector);
a513fe19 689 }
6dbad63e
FB
690 env = saved_env;
691}
9de5e440 692
6f12a2a6 693void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
d0a1ffc9
FB
694{
695 CPUX86State *saved_env;
696
697 saved_env = env;
698 env = s;
3b46e624 699
6f12a2a6 700 helper_fsave(ptr, data32);
d0a1ffc9
FB
701
702 env = saved_env;
703}
704
6f12a2a6 705void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
d0a1ffc9
FB
706{
707 CPUX86State *saved_env;
708
709 saved_env = env;
710 env = s;
3b46e624 711
6f12a2a6 712 helper_frstor(ptr, data32);
d0a1ffc9
FB
713
714 env = saved_env;
715}
716
e4533c7a
FB
717#endif /* TARGET_I386 */
718
67b915a5
FB
719#if !defined(CONFIG_SOFTMMU)
720
3fb2ded1
FB
721#if defined(TARGET_I386)
722
b56dad1c 723/* 'pc' is the host PC at which the exception was raised. 'address' is
fd6ce8f6
FB
724 the effective address of the memory exception. 'is_write' is 1 if a
725 write caused the exception and otherwise 0'. 'old_set' is the
726 signal set which should be restored */
2b413144 727static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
5fafdf24 728 int is_write, sigset_t *old_set,
bf3e8bf1 729 void *puc)
9de5e440 730{
a513fe19
FB
731 TranslationBlock *tb;
732 int ret;
68a79315 733
83479e77
FB
734 if (cpu_single_env)
735 env = cpu_single_env; /* XXX: find a correct solution for multithread */
fd6ce8f6 736#if defined(DEBUG_SIGNAL)
5fafdf24 737 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
bf3e8bf1 738 pc, address, is_write, *(unsigned long *)old_set);
9de5e440 739#endif
25eb4484 740 /* XXX: locking issue */
53a5960a 741 if (is_write && page_unprotect(h2g(address), pc, puc)) {
fd6ce8f6
FB
742 return 1;
743 }
fbf9eeb3 744
3fb2ded1 745 /* see if it is an MMU fault */
6ebbf390 746 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
3fb2ded1
FB
747 if (ret < 0)
748 return 0; /* not an MMU fault */
749 if (ret == 0)
750 return 1; /* the MMU fault was handled without causing real CPU fault */
751 /* now we have a real cpu fault */
a513fe19
FB
752 tb = tb_find_pc(pc);
753 if (tb) {
9de5e440
FB
754 /* the PC is inside the translated code. It means that we have
755 a virtual CPU fault */
bf3e8bf1 756 cpu_restore_state(tb, env, pc, puc);
3fb2ded1 757 }
4cbf74b6 758 if (ret == 1) {
3fb2ded1 759#if 0
5fafdf24 760 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
4cbf74b6 761 env->eip, env->cr[2], env->error_code);
3fb2ded1 762#endif
4cbf74b6
FB
763 /* we restore the process signal mask as the sigreturn should
764 do it (XXX: use sigsetjmp) */
765 sigprocmask(SIG_SETMASK, old_set, NULL);
54ca9095 766 raise_exception_err(env->exception_index, env->error_code);
4cbf74b6
FB
767 } else {
768 /* activate soft MMU for this block */
3f337316 769 env->hflags |= HF_SOFTMMU_MASK;
fbf9eeb3 770 cpu_resume_from_signal(env, puc);
4cbf74b6 771 }
3fb2ded1
FB
772 /* never comes here */
773 return 1;
774}
775
e4533c7a 776#elif defined(TARGET_ARM)
3fb2ded1 777static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
bf3e8bf1
FB
778 int is_write, sigset_t *old_set,
779 void *puc)
3fb2ded1 780{
68016c62
FB
781 TranslationBlock *tb;
782 int ret;
783
784 if (cpu_single_env)
785 env = cpu_single_env; /* XXX: find a correct solution for multithread */
786#if defined(DEBUG_SIGNAL)
5fafdf24 787 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
68016c62
FB
788 pc, address, is_write, *(unsigned long *)old_set);
789#endif
9f0777ed 790 /* XXX: locking issue */
53a5960a 791 if (is_write && page_unprotect(h2g(address), pc, puc)) {
9f0777ed
FB
792 return 1;
793 }
68016c62 794 /* see if it is an MMU fault */
6ebbf390 795 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
68016c62
FB
796 if (ret < 0)
797 return 0; /* not an MMU fault */
798 if (ret == 0)
799 return 1; /* the MMU fault was handled without causing real CPU fault */
800 /* now we have a real cpu fault */
801 tb = tb_find_pc(pc);
802 if (tb) {
803 /* the PC is inside the translated code. It means that we have
804 a virtual CPU fault */
805 cpu_restore_state(tb, env, pc, puc);
806 }
807 /* we restore the process signal mask as the sigreturn should
808 do it (XXX: use sigsetjmp) */
809 sigprocmask(SIG_SETMASK, old_set, NULL);
810 cpu_loop_exit();
968c74da
AJ
811 /* never comes here */
812 return 1;
3fb2ded1 813}
93ac68bc
FB
814#elif defined(TARGET_SPARC)
815static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
bf3e8bf1
FB
816 int is_write, sigset_t *old_set,
817 void *puc)
93ac68bc 818{
68016c62
FB
819 TranslationBlock *tb;
820 int ret;
821
822 if (cpu_single_env)
823 env = cpu_single_env; /* XXX: find a correct solution for multithread */
824#if defined(DEBUG_SIGNAL)
5fafdf24 825 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
68016c62
FB
826 pc, address, is_write, *(unsigned long *)old_set);
827#endif
b453b70b 828 /* XXX: locking issue */
53a5960a 829 if (is_write && page_unprotect(h2g(address), pc, puc)) {
b453b70b
FB
830 return 1;
831 }
68016c62 832 /* see if it is an MMU fault */
6ebbf390 833 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
68016c62
FB
834 if (ret < 0)
835 return 0; /* not an MMU fault */
836 if (ret == 0)
837 return 1; /* the MMU fault was handled without causing real CPU fault */
838 /* now we have a real cpu fault */
839 tb = tb_find_pc(pc);
840 if (tb) {
841 /* the PC is inside the translated code. It means that we have
842 a virtual CPU fault */
843 cpu_restore_state(tb, env, pc, puc);
844 }
845 /* we restore the process signal mask as the sigreturn should
846 do it (XXX: use sigsetjmp) */
847 sigprocmask(SIG_SETMASK, old_set, NULL);
848 cpu_loop_exit();
968c74da
AJ
849 /* never comes here */
850 return 1;
93ac68bc 851}
67867308
FB
852#elif defined (TARGET_PPC)
853static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
bf3e8bf1
FB
854 int is_write, sigset_t *old_set,
855 void *puc)
67867308
FB
856{
857 TranslationBlock *tb;
ce09776b 858 int ret;
3b46e624 859
67867308
FB
860 if (cpu_single_env)
861 env = cpu_single_env; /* XXX: find a correct solution for multithread */
67867308 862#if defined(DEBUG_SIGNAL)
5fafdf24 863 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
67867308
FB
864 pc, address, is_write, *(unsigned long *)old_set);
865#endif
866 /* XXX: locking issue */
53a5960a 867 if (is_write && page_unprotect(h2g(address), pc, puc)) {
67867308
FB
868 return 1;
869 }
870
ce09776b 871 /* see if it is an MMU fault */
6ebbf390 872 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
ce09776b
FB
873 if (ret < 0)
874 return 0; /* not an MMU fault */
875 if (ret == 0)
876 return 1; /* the MMU fault was handled without causing real CPU fault */
877
67867308
FB
878 /* now we have a real cpu fault */
879 tb = tb_find_pc(pc);
880 if (tb) {
881 /* the PC is inside the translated code. It means that we have
882 a virtual CPU fault */
bf3e8bf1 883 cpu_restore_state(tb, env, pc, puc);
67867308 884 }
ce09776b 885 if (ret == 1) {
67867308 886#if 0
5fafdf24 887 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
ce09776b 888 env->nip, env->error_code, tb);
67867308
FB
889#endif
890 /* we restore the process signal mask as the sigreturn should
891 do it (XXX: use sigsetjmp) */
bf3e8bf1 892 sigprocmask(SIG_SETMASK, old_set, NULL);
9fddaa0c 893 do_raise_exception_err(env->exception_index, env->error_code);
ce09776b
FB
894 } else {
895 /* activate soft MMU for this block */
fbf9eeb3 896 cpu_resume_from_signal(env, puc);
ce09776b 897 }
67867308 898 /* never comes here */
e6e5906b
PB
899 return 1;
900}
901
902#elif defined(TARGET_M68K)
903static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
904 int is_write, sigset_t *old_set,
905 void *puc)
906{
907 TranslationBlock *tb;
908 int ret;
909
910 if (cpu_single_env)
911 env = cpu_single_env; /* XXX: find a correct solution for multithread */
912#if defined(DEBUG_SIGNAL)
5fafdf24 913 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
e6e5906b
PB
914 pc, address, is_write, *(unsigned long *)old_set);
915#endif
916 /* XXX: locking issue */
917 if (is_write && page_unprotect(address, pc, puc)) {
918 return 1;
919 }
920 /* see if it is an MMU fault */
6ebbf390 921 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
e6e5906b
PB
922 if (ret < 0)
923 return 0; /* not an MMU fault */
924 if (ret == 0)
925 return 1; /* the MMU fault was handled without causing real CPU fault */
926 /* now we have a real cpu fault */
927 tb = tb_find_pc(pc);
928 if (tb) {
929 /* the PC is inside the translated code. It means that we have
930 a virtual CPU fault */
931 cpu_restore_state(tb, env, pc, puc);
932 }
933 /* we restore the process signal mask as the sigreturn should
934 do it (XXX: use sigsetjmp) */
935 sigprocmask(SIG_SETMASK, old_set, NULL);
936 cpu_loop_exit();
937 /* never comes here */
67867308
FB
938 return 1;
939}
6af0bf9c
FB
940
941#elif defined (TARGET_MIPS)
942static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
943 int is_write, sigset_t *old_set,
944 void *puc)
945{
946 TranslationBlock *tb;
947 int ret;
3b46e624 948
6af0bf9c
FB
949 if (cpu_single_env)
950 env = cpu_single_env; /* XXX: find a correct solution for multithread */
951#if defined(DEBUG_SIGNAL)
5fafdf24 952 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
6af0bf9c
FB
953 pc, address, is_write, *(unsigned long *)old_set);
954#endif
955 /* XXX: locking issue */
53a5960a 956 if (is_write && page_unprotect(h2g(address), pc, puc)) {
6af0bf9c
FB
957 return 1;
958 }
959
960 /* see if it is an MMU fault */
6ebbf390 961 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
6af0bf9c
FB
962 if (ret < 0)
963 return 0; /* not an MMU fault */
964 if (ret == 0)
965 return 1; /* the MMU fault was handled without causing real CPU fault */
966
967 /* now we have a real cpu fault */
968 tb = tb_find_pc(pc);
969 if (tb) {
970 /* the PC is inside the translated code. It means that we have
971 a virtual CPU fault */
972 cpu_restore_state(tb, env, pc, puc);
973 }
974 if (ret == 1) {
975#if 0
5fafdf24 976 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1eb5207b 977 env->PC, env->error_code, tb);
6af0bf9c
FB
978#endif
979 /* we restore the process signal mask as the sigreturn should
980 do it (XXX: use sigsetjmp) */
981 sigprocmask(SIG_SETMASK, old_set, NULL);
982 do_raise_exception_err(env->exception_index, env->error_code);
983 } else {
984 /* activate soft MMU for this block */
985 cpu_resume_from_signal(env, puc);
986 }
987 /* never comes here */
988 return 1;
989}
990
fdf9b3e8
FB
991#elif defined (TARGET_SH4)
992static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
993 int is_write, sigset_t *old_set,
994 void *puc)
995{
996 TranslationBlock *tb;
997 int ret;
3b46e624 998
fdf9b3e8
FB
999 if (cpu_single_env)
1000 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1001#if defined(DEBUG_SIGNAL)
5fafdf24 1002 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
fdf9b3e8
FB
1003 pc, address, is_write, *(unsigned long *)old_set);
1004#endif
1005 /* XXX: locking issue */
1006 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1007 return 1;
1008 }
1009
1010 /* see if it is an MMU fault */
6ebbf390 1011 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
fdf9b3e8
FB
1012 if (ret < 0)
1013 return 0; /* not an MMU fault */
1014 if (ret == 0)
1015 return 1; /* the MMU fault was handled without causing real CPU fault */
1016
1017 /* now we have a real cpu fault */
eddf68a6
JM
1018 tb = tb_find_pc(pc);
1019 if (tb) {
1020 /* the PC is inside the translated code. It means that we have
1021 a virtual CPU fault */
1022 cpu_restore_state(tb, env, pc, puc);
1023 }
1024#if 0
5fafdf24 1025 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
eddf68a6
JM
1026 env->nip, env->error_code, tb);
1027#endif
1028 /* we restore the process signal mask as the sigreturn should
1029 do it (XXX: use sigsetjmp) */
1030 sigprocmask(SIG_SETMASK, old_set, NULL);
1031 cpu_loop_exit();
1032 /* never comes here */
1033 return 1;
1034}
1035
1036#elif defined (TARGET_ALPHA)
1037static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1038 int is_write, sigset_t *old_set,
1039 void *puc)
1040{
1041 TranslationBlock *tb;
1042 int ret;
3b46e624 1043
eddf68a6
JM
1044 if (cpu_single_env)
1045 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1046#if defined(DEBUG_SIGNAL)
5fafdf24 1047 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
eddf68a6
JM
1048 pc, address, is_write, *(unsigned long *)old_set);
1049#endif
1050 /* XXX: locking issue */
1051 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1052 return 1;
1053 }
1054
1055 /* see if it is an MMU fault */
6ebbf390 1056 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
eddf68a6
JM
1057 if (ret < 0)
1058 return 0; /* not an MMU fault */
1059 if (ret == 0)
1060 return 1; /* the MMU fault was handled without causing real CPU fault */
1061
1062 /* now we have a real cpu fault */
fdf9b3e8
FB
1063 tb = tb_find_pc(pc);
1064 if (tb) {
1065 /* the PC is inside the translated code. It means that we have
1066 a virtual CPU fault */
1067 cpu_restore_state(tb, env, pc, puc);
1068 }
fdf9b3e8 1069#if 0
5fafdf24 1070 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
fdf9b3e8
FB
1071 env->nip, env->error_code, tb);
1072#endif
1073 /* we restore the process signal mask as the sigreturn should
1074 do it (XXX: use sigsetjmp) */
355fb23d
PB
1075 sigprocmask(SIG_SETMASK, old_set, NULL);
1076 cpu_loop_exit();
fdf9b3e8
FB
1077 /* never comes here */
1078 return 1;
1079}
f1ccf904
TS
1080#elif defined (TARGET_CRIS)
1081static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1082 int is_write, sigset_t *old_set,
1083 void *puc)
1084{
1085 TranslationBlock *tb;
1086 int ret;
1087
1088 if (cpu_single_env)
1089 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1090#if defined(DEBUG_SIGNAL)
1091 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1092 pc, address, is_write, *(unsigned long *)old_set);
1093#endif
1094 /* XXX: locking issue */
1095 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1096 return 1;
1097 }
1098
1099 /* see if it is an MMU fault */
6ebbf390 1100 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
f1ccf904
TS
1101 if (ret < 0)
1102 return 0; /* not an MMU fault */
1103 if (ret == 0)
1104 return 1; /* the MMU fault was handled without causing real CPU fault */
1105
1106 /* now we have a real cpu fault */
1107 tb = tb_find_pc(pc);
1108 if (tb) {
1109 /* the PC is inside the translated code. It means that we have
1110 a virtual CPU fault */
1111 cpu_restore_state(tb, env, pc, puc);
1112 }
f1ccf904
TS
1113 /* we restore the process signal mask as the sigreturn should
1114 do it (XXX: use sigsetjmp) */
1115 sigprocmask(SIG_SETMASK, old_set, NULL);
1116 cpu_loop_exit();
1117 /* never comes here */
1118 return 1;
1119}
1120
e4533c7a
FB
1121#else
1122#error unsupported target CPU
1123#endif
9de5e440 1124
2b413144
FB
1125#if defined(__i386__)
1126
d8ecc0b9
FB
1127#if defined(__APPLE__)
1128# include <sys/ucontext.h>
1129
1130# define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1131# define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1132# define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1133#else
1134# define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1135# define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1136# define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1137#endif
1138
5fafdf24 1139int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1140 void *puc)
9de5e440 1141{
5a7b542b 1142 siginfo_t *info = pinfo;
9de5e440
FB
1143 struct ucontext *uc = puc;
1144 unsigned long pc;
bf3e8bf1 1145 int trapno;
97eb5b14 1146
d691f669
FB
1147#ifndef REG_EIP
1148/* for glibc 2.1 */
fd6ce8f6
FB
1149#define REG_EIP EIP
1150#define REG_ERR ERR
1151#define REG_TRAPNO TRAPNO
d691f669 1152#endif
d8ecc0b9
FB
1153 pc = EIP_sig(uc);
1154 trapno = TRAP_sig(uc);
ec6338ba
FB
1155 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1156 trapno == 0xe ?
1157 (ERROR_sig(uc) >> 1) & 1 : 0,
1158 &uc->uc_sigmask, puc);
2b413144
FB
1159}
1160
bc51c5c9
FB
1161#elif defined(__x86_64__)
1162
5a7b542b 1163int cpu_signal_handler(int host_signum, void *pinfo,
bc51c5c9
FB
1164 void *puc)
1165{
5a7b542b 1166 siginfo_t *info = pinfo;
bc51c5c9
FB
1167 struct ucontext *uc = puc;
1168 unsigned long pc;
1169
1170 pc = uc->uc_mcontext.gregs[REG_RIP];
5fafdf24
TS
1171 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1172 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
bc51c5c9
FB
1173 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1174 &uc->uc_sigmask, puc);
1175}
1176
83fb7adf 1177#elif defined(__powerpc__)
2b413144 1178
83fb7adf
FB
1179/***********************************************************************
1180 * signal context platform-specific definitions
1181 * From Wine
1182 */
1183#ifdef linux
1184/* All Registers access - only for local access */
1185# define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1186/* Gpr Registers access */
1187# define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1188# define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1189# define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1190# define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1191# define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1192# define LR_sig(context) REG_sig(link, context) /* Link register */
1193# define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1194/* Float Registers access */
1195# define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1196# define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1197/* Exception Registers access */
1198# define DAR_sig(context) REG_sig(dar, context)
1199# define DSISR_sig(context) REG_sig(dsisr, context)
1200# define TRAP_sig(context) REG_sig(trap, context)
1201#endif /* linux */
1202
1203#ifdef __APPLE__
1204# include <sys/ucontext.h>
1205typedef struct ucontext SIGCONTEXT;
1206/* All Registers access - only for local access */
1207# define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1208# define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1209# define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1210# define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1211/* Gpr Registers access */
1212# define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1213# define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1214# define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1215# define CTR_sig(context) REG_sig(ctr, context)
1216# define XER_sig(context) REG_sig(xer, context) /* Link register */
1217# define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1218# define CR_sig(context) REG_sig(cr, context) /* Condition register */
1219/* Float Registers access */
1220# define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1221# define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1222/* Exception Registers access */
1223# define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1224# define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1225# define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1226#endif /* __APPLE__ */
1227
5fafdf24 1228int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1229 void *puc)
2b413144 1230{
5a7b542b 1231 siginfo_t *info = pinfo;
25eb4484 1232 struct ucontext *uc = puc;
25eb4484 1233 unsigned long pc;
25eb4484
FB
1234 int is_write;
1235
83fb7adf 1236 pc = IAR_sig(uc);
25eb4484
FB
1237 is_write = 0;
1238#if 0
1239 /* ppc 4xx case */
83fb7adf 1240 if (DSISR_sig(uc) & 0x00800000)
25eb4484
FB
1241 is_write = 1;
1242#else
83fb7adf 1243 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
25eb4484
FB
1244 is_write = 1;
1245#endif
5fafdf24 1246 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
bf3e8bf1 1247 is_write, &uc->uc_sigmask, puc);
2b413144
FB
1248}
1249
2f87c607
FB
1250#elif defined(__alpha__)
1251
5fafdf24 1252int cpu_signal_handler(int host_signum, void *pinfo,
2f87c607
FB
1253 void *puc)
1254{
5a7b542b 1255 siginfo_t *info = pinfo;
2f87c607
FB
1256 struct ucontext *uc = puc;
1257 uint32_t *pc = uc->uc_mcontext.sc_pc;
1258 uint32_t insn = *pc;
1259 int is_write = 0;
1260
8c6939c0 1261 /* XXX: need kernel patch to get write flag faster */
2f87c607
FB
1262 switch (insn >> 26) {
1263 case 0x0d: // stw
1264 case 0x0e: // stb
1265 case 0x0f: // stq_u
1266 case 0x24: // stf
1267 case 0x25: // stg
1268 case 0x26: // sts
1269 case 0x27: // stt
1270 case 0x2c: // stl
1271 case 0x2d: // stq
1272 case 0x2e: // stl_c
1273 case 0x2f: // stq_c
1274 is_write = 1;
1275 }
1276
5fafdf24 1277 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
bf3e8bf1 1278 is_write, &uc->uc_sigmask, puc);
2f87c607 1279}
8c6939c0
FB
1280#elif defined(__sparc__)
1281
5fafdf24 1282int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1283 void *puc)
8c6939c0 1284{
5a7b542b 1285 siginfo_t *info = pinfo;
8c6939c0
FB
1286 int is_write;
1287 uint32_t insn;
6b4c11cd 1288#if !defined(__arch64__) || defined(HOST_SOLARIS)
c9e1e2b0
BS
1289 uint32_t *regs = (uint32_t *)(info + 1);
1290 void *sigmask = (regs + 20);
8c6939c0 1291 /* XXX: is there a standard glibc define ? */
c9e1e2b0
BS
1292 unsigned long pc = regs[1];
1293#else
1294 struct sigcontext *sc = puc;
1295 unsigned long pc = sc->sigc_regs.tpc;
1296 void *sigmask = (void *)sc->sigc_mask;
1297#endif
1298
8c6939c0
FB
1299 /* XXX: need kernel patch to get write flag faster */
1300 is_write = 0;
1301 insn = *(uint32_t *)pc;
1302 if ((insn >> 30) == 3) {
1303 switch((insn >> 19) & 0x3f) {
1304 case 0x05: // stb
1305 case 0x06: // sth
1306 case 0x04: // st
1307 case 0x07: // std
1308 case 0x24: // stf
1309 case 0x27: // stdf
1310 case 0x25: // stfsr
1311 is_write = 1;
1312 break;
1313 }
1314 }
5fafdf24 1315 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
bf3e8bf1 1316 is_write, sigmask, NULL);
8c6939c0
FB
1317}
1318
1319#elif defined(__arm__)
1320
5fafdf24 1321int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1322 void *puc)
8c6939c0 1323{
5a7b542b 1324 siginfo_t *info = pinfo;
8c6939c0
FB
1325 struct ucontext *uc = puc;
1326 unsigned long pc;
1327 int is_write;
3b46e624 1328
5c49b363
AZ
1329#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ =< 3))
1330 pc = uc->uc_mcontext.gregs[R15];
1331#else
4eee57f5 1332 pc = uc->uc_mcontext.arm_pc;
5c49b363 1333#endif
8c6939c0
FB
1334 /* XXX: compute is_write */
1335 is_write = 0;
5fafdf24 1336 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
8c6939c0 1337 is_write,
f3a9676a 1338 &uc->uc_sigmask, puc);
8c6939c0
FB
1339}
1340
38e584a0
FB
1341#elif defined(__mc68000)
1342
5fafdf24 1343int cpu_signal_handler(int host_signum, void *pinfo,
38e584a0
FB
1344 void *puc)
1345{
5a7b542b 1346 siginfo_t *info = pinfo;
38e584a0
FB
1347 struct ucontext *uc = puc;
1348 unsigned long pc;
1349 int is_write;
3b46e624 1350
38e584a0
FB
1351 pc = uc->uc_mcontext.gregs[16];
1352 /* XXX: compute is_write */
1353 is_write = 0;
5fafdf24 1354 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
38e584a0 1355 is_write,
bf3e8bf1 1356 &uc->uc_sigmask, puc);
38e584a0
FB
1357}
1358
b8076a74
FB
1359#elif defined(__ia64)
1360
1361#ifndef __ISR_VALID
1362 /* This ought to be in <bits/siginfo.h>... */
1363# define __ISR_VALID 1
b8076a74
FB
1364#endif
1365
5a7b542b 1366int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
b8076a74 1367{
5a7b542b 1368 siginfo_t *info = pinfo;
b8076a74
FB
1369 struct ucontext *uc = puc;
1370 unsigned long ip;
1371 int is_write = 0;
1372
1373 ip = uc->uc_mcontext.sc_ip;
1374 switch (host_signum) {
1375 case SIGILL:
1376 case SIGFPE:
1377 case SIGSEGV:
1378 case SIGBUS:
1379 case SIGTRAP:
fd4a43e4 1380 if (info->si_code && (info->si_segvflags & __ISR_VALID))
b8076a74
FB
1381 /* ISR.W (write-access) is bit 33: */
1382 is_write = (info->si_isr >> 33) & 1;
1383 break;
1384
1385 default:
1386 break;
1387 }
1388 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1389 is_write,
1390 &uc->uc_sigmask, puc);
1391}
1392
90cb9493
FB
1393#elif defined(__s390__)
1394
5fafdf24 1395int cpu_signal_handler(int host_signum, void *pinfo,
90cb9493
FB
1396 void *puc)
1397{
5a7b542b 1398 siginfo_t *info = pinfo;
90cb9493
FB
1399 struct ucontext *uc = puc;
1400 unsigned long pc;
1401 int is_write;
3b46e624 1402
90cb9493
FB
1403 pc = uc->uc_mcontext.psw.addr;
1404 /* XXX: compute is_write */
1405 is_write = 0;
5fafdf24 1406 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
c4b89d18
TS
1407 is_write, &uc->uc_sigmask, puc);
1408}
1409
1410#elif defined(__mips__)
1411
5fafdf24 1412int cpu_signal_handler(int host_signum, void *pinfo,
c4b89d18
TS
1413 void *puc)
1414{
9617efe8 1415 siginfo_t *info = pinfo;
c4b89d18
TS
1416 struct ucontext *uc = puc;
1417 greg_t pc = uc->uc_mcontext.pc;
1418 int is_write;
3b46e624 1419
c4b89d18
TS
1420 /* XXX: compute is_write */
1421 is_write = 0;
5fafdf24 1422 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
c4b89d18 1423 is_write, &uc->uc_sigmask, puc);
90cb9493
FB
1424}
1425
f54b3f92
AJ
1426#elif defined(__hppa__)
1427
1428int cpu_signal_handler(int host_signum, void *pinfo,
1429 void *puc)
1430{
1431 struct siginfo *info = pinfo;
1432 struct ucontext *uc = puc;
1433 unsigned long pc;
1434 int is_write;
1435
1436 pc = uc->uc_mcontext.sc_iaoq[0];
1437 /* FIXME: compute is_write */
1438 is_write = 0;
1439 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1440 is_write,
1441 &uc->uc_sigmask, puc);
1442}
1443
9de5e440 1444#else
2b413144 1445
3fb2ded1 1446#error host CPU specific signal handler needed
2b413144 1447
9de5e440 1448#endif
67b915a5
FB
1449
1450#endif /* !defined(CONFIG_SOFTMMU) */