]> git.proxmox.com Git - qemu.git/blame - cpu-exec.c
PPC TCG Fixes
[qemu.git] / cpu-exec.c
CommitLineData
7d13299d
FB
1/*
2 * i386 emulator main execution loop
5fafdf24 3 *
66321a11 4 * Copyright (c) 2003-2005 Fabrice Bellard
7d13299d 5 *
3ef693a0
FB
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
7d13299d 10 *
3ef693a0
FB
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
7d13299d 15 *
3ef693a0
FB
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
7d13299d 19 */
e4533c7a 20#include "config.h"
7cb69cae 21#define CPU_NO_GLOBAL_REGS
93ac68bc 22#include "exec.h"
956034d7 23#include "disas.h"
7cb69cae 24#include "tcg.h"
7d13299d 25
fbf9eeb3
FB
26#if !defined(CONFIG_SOFTMMU)
27#undef EAX
28#undef ECX
29#undef EDX
30#undef EBX
31#undef ESP
32#undef EBP
33#undef ESI
34#undef EDI
35#undef EIP
36#include <signal.h>
37#include <sys/ucontext.h>
38#endif
39
572a9d4a
BS
40#if defined(__sparc__) && !defined(HOST_SOLARIS)
41// Work around ugly bugs in glibc that mangle global register contents
42#undef env
43#define env cpu_single_env
44#endif
45
36bdbe54 46int tb_invalidated_flag;
b5fc09ae 47static unsigned long next_tb;
36bdbe54 48
dc99065b 49//#define DEBUG_EXEC
9de5e440 50//#define DEBUG_SIGNAL
7d13299d 51
e4533c7a
FB
52void cpu_loop_exit(void)
53{
bfed01fc
TS
54 /* NOTE: the register at this point must be saved by hand because
55 longjmp restore them */
56 regs_to_env();
e4533c7a
FB
57 longjmp(env->jmp_env, 1);
58}
bfed01fc 59
e6e5906b 60#if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
3475187d
FB
61#define reg_T2
62#endif
e4533c7a 63
fbf9eeb3
FB
64/* exit the current TB from a signal handler. The host registers are
65 restored in a state compatible with the CPU emulator
66 */
5fafdf24 67void cpu_resume_from_signal(CPUState *env1, void *puc)
fbf9eeb3
FB
68{
69#if !defined(CONFIG_SOFTMMU)
70 struct ucontext *uc = puc;
71#endif
72
73 env = env1;
74
75 /* XXX: restore cpu registers saved in host registers */
76
77#if !defined(CONFIG_SOFTMMU)
78 if (puc) {
79 /* XXX: use siglongjmp ? */
80 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
81 }
82#endif
83 longjmp(env->jmp_env, 1);
84}
85
8a40a180
FB
86static TranslationBlock *tb_find_slow(target_ulong pc,
87 target_ulong cs_base,
c068688b 88 uint64_t flags)
8a40a180
FB
89{
90 TranslationBlock *tb, **ptb1;
91 int code_gen_size;
92 unsigned int h;
93 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
94 uint8_t *tc_ptr;
3b46e624 95
8a40a180
FB
96 spin_lock(&tb_lock);
97
98 tb_invalidated_flag = 0;
3b46e624 99
8a40a180 100 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
3b46e624 101
8a40a180
FB
102 /* find translated block using physical mappings */
103 phys_pc = get_phys_addr_code(env, pc);
104 phys_page1 = phys_pc & TARGET_PAGE_MASK;
105 phys_page2 = -1;
106 h = tb_phys_hash_func(phys_pc);
107 ptb1 = &tb_phys_hash[h];
108 for(;;) {
109 tb = *ptb1;
110 if (!tb)
111 goto not_found;
5fafdf24 112 if (tb->pc == pc &&
8a40a180 113 tb->page_addr[0] == phys_page1 &&
5fafdf24 114 tb->cs_base == cs_base &&
8a40a180
FB
115 tb->flags == flags) {
116 /* check next page if needed */
117 if (tb->page_addr[1] != -1) {
5fafdf24 118 virt_page2 = (pc & TARGET_PAGE_MASK) +
8a40a180
FB
119 TARGET_PAGE_SIZE;
120 phys_page2 = get_phys_addr_code(env, virt_page2);
121 if (tb->page_addr[1] == phys_page2)
122 goto found;
123 } else {
124 goto found;
125 }
126 }
127 ptb1 = &tb->phys_hash_next;
128 }
129 not_found:
130 /* if no translated code available, then translate it now */
131 tb = tb_alloc(pc);
132 if (!tb) {
133 /* flush must be done */
134 tb_flush(env);
135 /* cannot fail at this point */
136 tb = tb_alloc(pc);
137 /* don't forget to invalidate previous TB info */
15388002 138 tb_invalidated_flag = 1;
8a40a180
FB
139 }
140 tc_ptr = code_gen_ptr;
141 tb->tc_ptr = tc_ptr;
142 tb->cs_base = cs_base;
143 tb->flags = flags;
d07bde88 144 cpu_gen_code(env, tb, &code_gen_size);
8a40a180 145 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 146
8a40a180
FB
147 /* check next page if needed */
148 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
149 phys_page2 = -1;
150 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
151 phys_page2 = get_phys_addr_code(env, virt_page2);
152 }
153 tb_link_phys(tb, phys_pc, phys_page2);
3b46e624 154
8a40a180 155 found:
8a40a180
FB
156 /* we add the TB in the virtual pc hash table */
157 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
158 spin_unlock(&tb_lock);
159 return tb;
160}
161
162static inline TranslationBlock *tb_find_fast(void)
163{
164 TranslationBlock *tb;
165 target_ulong cs_base, pc;
c068688b 166 uint64_t flags;
8a40a180
FB
167
168 /* we record a subset of the CPU state. It will
169 always be the same before a given translated block
170 is executed. */
171#if defined(TARGET_I386)
172 flags = env->hflags;
173 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
174 cs_base = env->segs[R_CS].base;
175 pc = cs_base + env->eip;
176#elif defined(TARGET_ARM)
177 flags = env->thumb | (env->vfp.vec_len << 1)
b5ff1b31
FB
178 | (env->vfp.vec_stride << 4);
179 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
180 flags |= (1 << 6);
40f137e1
PB
181 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
182 flags |= (1 << 7);
9ee6e8bb 183 flags |= (env->condexec_bits << 8);
8a40a180
FB
184 cs_base = 0;
185 pc = env->regs[15];
186#elif defined(TARGET_SPARC)
187#ifdef TARGET_SPARC64
a80dde08
FB
188 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
189 flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
190 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
8a40a180 191#else
6d5f237a
BS
192 // FPU enable . Supervisor
193 flags = (env->psref << 4) | env->psrs;
8a40a180
FB
194#endif
195 cs_base = env->npc;
196 pc = env->pc;
197#elif defined(TARGET_PPC)
1527c87e 198 flags = env->hflags;
8a40a180
FB
199 cs_base = 0;
200 pc = env->nip;
201#elif defined(TARGET_MIPS)
56b19403 202 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
cc9442b9 203 cs_base = 0;
ead9360e 204 pc = env->PC[env->current_tc];
e6e5906b 205#elif defined(TARGET_M68K)
acf930aa
PB
206 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
207 | (env->sr & SR_S) /* Bit 13 */
208 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
e6e5906b
PB
209 cs_base = 0;
210 pc = env->pc;
fdf9b3e8 211#elif defined(TARGET_SH4)
823029f9
TS
212 flags = env->flags;
213 cs_base = 0;
fdf9b3e8 214 pc = env->pc;
eddf68a6
JM
215#elif defined(TARGET_ALPHA)
216 flags = env->ps;
217 cs_base = 0;
218 pc = env->pc;
f1ccf904 219#elif defined(TARGET_CRIS)
7e15e603 220 flags = env->pregs[PR_CCS] & (P_FLAG | U_FLAG | X_FLAG);
cf1d97f0 221 flags |= env->dslot;
f1ccf904
TS
222 cs_base = 0;
223 pc = env->pc;
8a40a180
FB
224#else
225#error unsupported CPU
226#endif
bce61846 227 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
8a40a180
FB
228 if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
229 tb->flags != flags, 0)) {
230 tb = tb_find_slow(pc, cs_base, flags);
15388002
FB
231 /* Note: we do it here to avoid a gcc bug on Mac OS X when
232 doing it in tb_find_slow */
233 if (tb_invalidated_flag) {
234 /* as some TB could have been invalidated because
235 of memory exceptions while generating the code, we
236 must recompute the hash index here */
b5fc09ae 237 next_tb = 0;
15388002 238 }
8a40a180
FB
239 }
240 return tb;
241}
242
7d13299d
FB
243/* main execution loop */
244
e4533c7a 245int cpu_exec(CPUState *env1)
7d13299d 246{
1057eaa7
PB
247#define DECLARE_HOST_REGS 1
248#include "hostregs_helper.h"
8a40a180 249 int ret, interrupt_request;
8a40a180 250 TranslationBlock *tb;
c27004ec 251 uint8_t *tc_ptr;
8c6939c0 252
bfed01fc
TS
253 if (cpu_halted(env1) == EXCP_HALTED)
254 return EXCP_HALTED;
5a1e3cfc 255
5fafdf24 256 cpu_single_env = env1;
6a00d601 257
7d13299d 258 /* first we save global registers */
1057eaa7
PB
259#define SAVE_HOST_REGS 1
260#include "hostregs_helper.h"
c27004ec 261 env = env1;
e4533c7a 262
0d1a29f9 263 env_to_regs();
ecb644f4 264#if defined(TARGET_I386)
9de5e440 265 /* put eflags in CPU temporary format */
fc2b4c48
FB
266 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
267 DF = 1 - (2 * ((env->eflags >> 10) & 1));
9de5e440 268 CC_OP = CC_OP_EFLAGS;
fc2b4c48 269 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
93ac68bc 270#elif defined(TARGET_SPARC)
e6e5906b
PB
271#elif defined(TARGET_M68K)
272 env->cc_op = CC_OP_FLAGS;
273 env->cc_dest = env->sr & 0xf;
274 env->cc_x = (env->sr >> 4) & 1;
ecb644f4
TS
275#elif defined(TARGET_ALPHA)
276#elif defined(TARGET_ARM)
277#elif defined(TARGET_PPC)
6af0bf9c 278#elif defined(TARGET_MIPS)
fdf9b3e8 279#elif defined(TARGET_SH4)
f1ccf904 280#elif defined(TARGET_CRIS)
fdf9b3e8 281 /* XXXXX */
e4533c7a
FB
282#else
283#error unsupported target CPU
284#endif
3fb2ded1 285 env->exception_index = -1;
9d27abd9 286
7d13299d 287 /* prepare setjmp context for exception handling */
3fb2ded1
FB
288 for(;;) {
289 if (setjmp(env->jmp_env) == 0) {
ee8b7021 290 env->current_tb = NULL;
3fb2ded1
FB
291 /* if an exception is pending, we execute it here */
292 if (env->exception_index >= 0) {
293 if (env->exception_index >= EXCP_INTERRUPT) {
294 /* exit request from the cpu execution loop */
295 ret = env->exception_index;
296 break;
297 } else if (env->user_mode_only) {
298 /* if user mode only, we simulate a fake exception
9f083493 299 which will be handled outside the cpu execution
3fb2ded1 300 loop */
83479e77 301#if defined(TARGET_I386)
5fafdf24
TS
302 do_interrupt_user(env->exception_index,
303 env->exception_is_int,
304 env->error_code,
3fb2ded1 305 env->exception_next_eip);
eba01623
FB
306 /* successfully delivered */
307 env->old_exception = -1;
83479e77 308#endif
3fb2ded1
FB
309 ret = env->exception_index;
310 break;
311 } else {
83479e77 312#if defined(TARGET_I386)
3fb2ded1
FB
313 /* simulate a real cpu exception. On i386, it can
314 trigger new exceptions, but we do not handle
315 double or triple faults yet. */
5fafdf24
TS
316 do_interrupt(env->exception_index,
317 env->exception_is_int,
318 env->error_code,
d05e66d2 319 env->exception_next_eip, 0);
678dde13
TS
320 /* successfully delivered */
321 env->old_exception = -1;
ce09776b
FB
322#elif defined(TARGET_PPC)
323 do_interrupt(env);
6af0bf9c
FB
324#elif defined(TARGET_MIPS)
325 do_interrupt(env);
e95c8d51 326#elif defined(TARGET_SPARC)
f2bc7e7f 327 do_interrupt(env);
b5ff1b31
FB
328#elif defined(TARGET_ARM)
329 do_interrupt(env);
fdf9b3e8
FB
330#elif defined(TARGET_SH4)
331 do_interrupt(env);
eddf68a6
JM
332#elif defined(TARGET_ALPHA)
333 do_interrupt(env);
f1ccf904
TS
334#elif defined(TARGET_CRIS)
335 do_interrupt(env);
0633879f
PB
336#elif defined(TARGET_M68K)
337 do_interrupt(0);
83479e77 338#endif
3fb2ded1
FB
339 }
340 env->exception_index = -1;
5fafdf24 341 }
9df217a3
FB
342#ifdef USE_KQEMU
343 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
344 int ret;
345 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
346 ret = kqemu_cpu_exec(env);
347 /* put eflags in CPU temporary format */
348 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
349 DF = 1 - (2 * ((env->eflags >> 10) & 1));
350 CC_OP = CC_OP_EFLAGS;
351 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
352 if (ret == 1) {
353 /* exception */
354 longjmp(env->jmp_env, 1);
355 } else if (ret == 2) {
356 /* softmmu execution needed */
357 } else {
358 if (env->interrupt_request != 0) {
359 /* hardware interrupt will be executed just after */
360 } else {
361 /* otherwise, we restart */
362 longjmp(env->jmp_env, 1);
363 }
364 }
3fb2ded1 365 }
9df217a3
FB
366#endif
367
b5fc09ae 368 next_tb = 0; /* force lookup of first TB */
3fb2ded1 369 for(;;) {
68a79315 370 interrupt_request = env->interrupt_request;
db620f46
FB
371 if (__builtin_expect(interrupt_request, 0) &&
372 likely(!(env->singlestep_enabled & SSTEP_NOIRQ))) {
6658ffb8
PB
373 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
374 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
375 env->exception_index = EXCP_DEBUG;
376 cpu_loop_exit();
377 }
a90b7318 378#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
f1ccf904 379 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
a90b7318
AZ
380 if (interrupt_request & CPU_INTERRUPT_HALT) {
381 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
382 env->halted = 1;
383 env->exception_index = EXCP_HLT;
384 cpu_loop_exit();
385 }
386#endif
68a79315 387#if defined(TARGET_I386)
db620f46
FB
388 if (env->hflags2 & HF2_GIF_MASK) {
389 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
390 !(env->hflags & HF_SMM_MASK)) {
391 svm_check_intercept(SVM_EXIT_SMI);
392 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
393 do_smm_enter();
394 next_tb = 0;
395 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
396 !(env->hflags2 & HF2_NMI_MASK)) {
397 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
398 env->hflags2 |= HF2_NMI_MASK;
399 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
400 next_tb = 0;
401 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
402 (((env->hflags2 & HF2_VINTR_MASK) &&
403 (env->hflags2 & HF2_HIF_MASK)) ||
404 (!(env->hflags2 & HF2_VINTR_MASK) &&
405 (env->eflags & IF_MASK &&
406 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
407 int intno;
408 svm_check_intercept(SVM_EXIT_INTR);
409 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
410 intno = cpu_get_pic_interrupt(env);
411 if (loglevel & CPU_LOG_TB_IN_ASM) {
412 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
413 }
414 do_interrupt(intno, 0, 0, 0, 1);
415 /* ensure that no TB jump will be modified as
416 the program flow was changed */
417 next_tb = 0;
0573fbfc 418#if !defined(CONFIG_USER_ONLY)
db620f46
FB
419 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
420 (env->eflags & IF_MASK) &&
421 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
422 int intno;
423 /* FIXME: this should respect TPR */
424 svm_check_intercept(SVM_EXIT_VINTR);
425 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
426 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
427 if (loglevel & CPU_LOG_TB_IN_ASM)
428 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
429 do_interrupt(intno, 0, 0, 0, 1);
430 next_tb = 0;
907a5b26 431#endif
db620f46 432 }
68a79315 433 }
ce09776b 434#elif defined(TARGET_PPC)
9fddaa0c
FB
435#if 0
436 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
437 cpu_ppc_reset(env);
438 }
439#endif
47103572 440 if (interrupt_request & CPU_INTERRUPT_HARD) {
e9df014c
JM
441 ppc_hw_interrupt(env);
442 if (env->pending_interrupts == 0)
443 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
b5fc09ae 444 next_tb = 0;
ce09776b 445 }
6af0bf9c
FB
446#elif defined(TARGET_MIPS)
447 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
24c7b0e3 448 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
6af0bf9c 449 (env->CP0_Status & (1 << CP0St_IE)) &&
24c7b0e3
TS
450 !(env->CP0_Status & (1 << CP0St_EXL)) &&
451 !(env->CP0_Status & (1 << CP0St_ERL)) &&
6af0bf9c
FB
452 !(env->hflags & MIPS_HFLAG_DM)) {
453 /* Raise it */
454 env->exception_index = EXCP_EXT_INTERRUPT;
455 env->error_code = 0;
456 do_interrupt(env);
b5fc09ae 457 next_tb = 0;
6af0bf9c 458 }
e95c8d51 459#elif defined(TARGET_SPARC)
66321a11
FB
460 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
461 (env->psret != 0)) {
462 int pil = env->interrupt_index & 15;
463 int type = env->interrupt_index & 0xf0;
464
465 if (((type == TT_EXTINT) &&
466 (pil == 15 || pil > env->psrpil)) ||
467 type != TT_EXTINT) {
468 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
f2bc7e7f
BS
469 env->exception_index = env->interrupt_index;
470 do_interrupt(env);
66321a11 471 env->interrupt_index = 0;
327ac2e7
BS
472#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
473 cpu_check_irqs(env);
474#endif
b5fc09ae 475 next_tb = 0;
66321a11 476 }
e95c8d51
FB
477 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
478 //do_interrupt(0, 0, 0, 0, 0);
479 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
a90b7318 480 }
b5ff1b31
FB
481#elif defined(TARGET_ARM)
482 if (interrupt_request & CPU_INTERRUPT_FIQ
483 && !(env->uncached_cpsr & CPSR_F)) {
484 env->exception_index = EXCP_FIQ;
485 do_interrupt(env);
b5fc09ae 486 next_tb = 0;
b5ff1b31 487 }
9ee6e8bb
PB
488 /* ARMv7-M interrupt return works by loading a magic value
489 into the PC. On real hardware the load causes the
490 return to occur. The qemu implementation performs the
491 jump normally, then does the exception return when the
492 CPU tries to execute code at the magic address.
493 This will cause the magic PC value to be pushed to
494 the stack if an interrupt occured at the wrong time.
495 We avoid this by disabling interrupts when
496 pc contains a magic address. */
b5ff1b31 497 if (interrupt_request & CPU_INTERRUPT_HARD
9ee6e8bb
PB
498 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
499 || !(env->uncached_cpsr & CPSR_I))) {
b5ff1b31
FB
500 env->exception_index = EXCP_IRQ;
501 do_interrupt(env);
b5fc09ae 502 next_tb = 0;
b5ff1b31 503 }
fdf9b3e8 504#elif defined(TARGET_SH4)
e96e2044
TS
505 if (interrupt_request & CPU_INTERRUPT_HARD) {
506 do_interrupt(env);
b5fc09ae 507 next_tb = 0;
e96e2044 508 }
eddf68a6
JM
509#elif defined(TARGET_ALPHA)
510 if (interrupt_request & CPU_INTERRUPT_HARD) {
511 do_interrupt(env);
b5fc09ae 512 next_tb = 0;
eddf68a6 513 }
f1ccf904
TS
514#elif defined(TARGET_CRIS)
515 if (interrupt_request & CPU_INTERRUPT_HARD) {
516 do_interrupt(env);
b5fc09ae 517 next_tb = 0;
f1ccf904 518 }
0633879f
PB
519#elif defined(TARGET_M68K)
520 if (interrupt_request & CPU_INTERRUPT_HARD
521 && ((env->sr & SR_I) >> SR_I_SHIFT)
522 < env->pending_level) {
523 /* Real hardware gets the interrupt vector via an
524 IACK cycle at this point. Current emulated
525 hardware doesn't rely on this, so we
526 provide/save the vector when the interrupt is
527 first signalled. */
528 env->exception_index = env->pending_vector;
529 do_interrupt(1);
b5fc09ae 530 next_tb = 0;
0633879f 531 }
68a79315 532#endif
9d05095e
FB
533 /* Don't use the cached interupt_request value,
534 do_interrupt may have updated the EXITTB flag. */
b5ff1b31 535 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
bf3e8bf1
FB
536 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
537 /* ensure that no TB jump will be modified as
538 the program flow was changed */
b5fc09ae 539 next_tb = 0;
bf3e8bf1 540 }
68a79315
FB
541 if (interrupt_request & CPU_INTERRUPT_EXIT) {
542 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
543 env->exception_index = EXCP_INTERRUPT;
544 cpu_loop_exit();
545 }
3fb2ded1 546 }
7d13299d 547#ifdef DEBUG_EXEC
b5ff1b31 548 if ((loglevel & CPU_LOG_TB_CPU)) {
3fb2ded1 549 /* restore flags in standard format */
ecb644f4
TS
550 regs_to_env();
551#if defined(TARGET_I386)
3fb2ded1 552 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
7fe48483 553 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
3fb2ded1 554 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
e4533c7a 555#elif defined(TARGET_ARM)
7fe48483 556 cpu_dump_state(env, logfile, fprintf, 0);
93ac68bc 557#elif defined(TARGET_SPARC)
3475187d 558 cpu_dump_state(env, logfile, fprintf, 0);
67867308 559#elif defined(TARGET_PPC)
7fe48483 560 cpu_dump_state(env, logfile, fprintf, 0);
e6e5906b
PB
561#elif defined(TARGET_M68K)
562 cpu_m68k_flush_flags(env, env->cc_op);
563 env->cc_op = CC_OP_FLAGS;
564 env->sr = (env->sr & 0xffe0)
565 | env->cc_dest | (env->cc_x << 4);
566 cpu_dump_state(env, logfile, fprintf, 0);
6af0bf9c
FB
567#elif defined(TARGET_MIPS)
568 cpu_dump_state(env, logfile, fprintf, 0);
fdf9b3e8
FB
569#elif defined(TARGET_SH4)
570 cpu_dump_state(env, logfile, fprintf, 0);
eddf68a6
JM
571#elif defined(TARGET_ALPHA)
572 cpu_dump_state(env, logfile, fprintf, 0);
f1ccf904
TS
573#elif defined(TARGET_CRIS)
574 cpu_dump_state(env, logfile, fprintf, 0);
e4533c7a 575#else
5fafdf24 576#error unsupported target CPU
e4533c7a 577#endif
3fb2ded1 578 }
7d13299d 579#endif
8a40a180 580 tb = tb_find_fast();
9d27abd9 581#ifdef DEBUG_EXEC
c1135f61 582 if ((loglevel & CPU_LOG_EXEC)) {
c27004ec
FB
583 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
584 (long)tb->tc_ptr, tb->pc,
585 lookup_symbol(tb->pc));
3fb2ded1 586 }
9d27abd9 587#endif
8a40a180
FB
588 /* see if we can patch the calling TB. When the TB
589 spans two pages, we cannot safely do a direct
590 jump. */
c27004ec 591 {
b5fc09ae 592 if (next_tb != 0 &&
4d7a0880 593#ifdef USE_KQEMU
f32fc648
FB
594 (env->kqemu_enabled != 2) &&
595#endif
ec6338ba 596 tb->page_addr[1] == -1) {
3fb2ded1 597 spin_lock(&tb_lock);
b5fc09ae 598 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
3fb2ded1
FB
599 spin_unlock(&tb_lock);
600 }
c27004ec 601 }
3fb2ded1 602 tc_ptr = tb->tc_ptr;
83479e77 603 env->current_tb = tb;
3fb2ded1 604 /* execute the generated code */
572a9d4a
BS
605#if defined(__sparc__) && !defined(HOST_SOLARIS)
606#undef env
607 env = cpu_single_env;
608#define env cpu_single_env
609#endif
7cb69cae 610 next_tb = tcg_qemu_tb_exec(tc_ptr);
83479e77 611 env->current_tb = NULL;
4cbf74b6
FB
612 /* reset soft MMU for next block (it can currently
613 only be set by a memory fault) */
f32fc648
FB
614#if defined(USE_KQEMU)
615#define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
616 if (kqemu_is_ok(env) &&
617 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
618 cpu_loop_exit();
619 }
4cbf74b6 620#endif
50a518e3 621 } /* for(;;) */
3fb2ded1 622 } else {
0d1a29f9 623 env_to_regs();
7d13299d 624 }
3fb2ded1
FB
625 } /* for(;;) */
626
7d13299d 627
e4533c7a 628#if defined(TARGET_I386)
9de5e440 629 /* restore flags in standard format */
fc2b4c48 630 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
e4533c7a 631#elif defined(TARGET_ARM)
b7bcbe95 632 /* XXX: Save/restore host fpu exception state?. */
93ac68bc 633#elif defined(TARGET_SPARC)
67867308 634#elif defined(TARGET_PPC)
e6e5906b
PB
635#elif defined(TARGET_M68K)
636 cpu_m68k_flush_flags(env, env->cc_op);
637 env->cc_op = CC_OP_FLAGS;
638 env->sr = (env->sr & 0xffe0)
639 | env->cc_dest | (env->cc_x << 4);
6af0bf9c 640#elif defined(TARGET_MIPS)
fdf9b3e8 641#elif defined(TARGET_SH4)
eddf68a6 642#elif defined(TARGET_ALPHA)
f1ccf904 643#elif defined(TARGET_CRIS)
fdf9b3e8 644 /* XXXXX */
e4533c7a
FB
645#else
646#error unsupported target CPU
647#endif
1057eaa7
PB
648
649 /* restore global registers */
1057eaa7
PB
650#include "hostregs_helper.h"
651
6a00d601 652 /* fail safe : never use cpu_single_env outside cpu_exec() */
5fafdf24 653 cpu_single_env = NULL;
7d13299d
FB
654 return ret;
655}
6dbad63e 656
fbf9eeb3
FB
657/* must only be called from the generated code as an exception can be
658 generated */
659void tb_invalidate_page_range(target_ulong start, target_ulong end)
660{
dc5d0b3d
FB
661 /* XXX: cannot enable it yet because it yields to MMU exception
662 where NIP != read address on PowerPC */
663#if 0
fbf9eeb3
FB
664 target_ulong phys_addr;
665 phys_addr = get_phys_addr_code(env, start);
666 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
dc5d0b3d 667#endif
fbf9eeb3
FB
668}
669
1a18c71b 670#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
e4533c7a 671
6dbad63e
FB
672void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
673{
674 CPUX86State *saved_env;
675
676 saved_env = env;
677 env = s;
a412ac57 678 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
a513fe19 679 selector &= 0xffff;
5fafdf24 680 cpu_x86_load_seg_cache(env, seg_reg, selector,
c27004ec 681 (selector << 4), 0xffff, 0);
a513fe19 682 } else {
5d97559d 683 helper_load_seg(seg_reg, selector);
a513fe19 684 }
6dbad63e
FB
685 env = saved_env;
686}
9de5e440 687
6f12a2a6 688void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
d0a1ffc9
FB
689{
690 CPUX86State *saved_env;
691
692 saved_env = env;
693 env = s;
3b46e624 694
6f12a2a6 695 helper_fsave(ptr, data32);
d0a1ffc9
FB
696
697 env = saved_env;
698}
699
6f12a2a6 700void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
d0a1ffc9
FB
701{
702 CPUX86State *saved_env;
703
704 saved_env = env;
705 env = s;
3b46e624 706
6f12a2a6 707 helper_frstor(ptr, data32);
d0a1ffc9
FB
708
709 env = saved_env;
710}
711
e4533c7a
FB
712#endif /* TARGET_I386 */
713
67b915a5
FB
714#if !defined(CONFIG_SOFTMMU)
715
3fb2ded1
FB
716#if defined(TARGET_I386)
717
b56dad1c 718/* 'pc' is the host PC at which the exception was raised. 'address' is
fd6ce8f6
FB
719 the effective address of the memory exception. 'is_write' is 1 if a
720 write caused the exception and otherwise 0'. 'old_set' is the
721 signal set which should be restored */
2b413144 722static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
5fafdf24 723 int is_write, sigset_t *old_set,
bf3e8bf1 724 void *puc)
9de5e440 725{
a513fe19
FB
726 TranslationBlock *tb;
727 int ret;
68a79315 728
83479e77
FB
729 if (cpu_single_env)
730 env = cpu_single_env; /* XXX: find a correct solution for multithread */
fd6ce8f6 731#if defined(DEBUG_SIGNAL)
5fafdf24 732 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
bf3e8bf1 733 pc, address, is_write, *(unsigned long *)old_set);
9de5e440 734#endif
25eb4484 735 /* XXX: locking issue */
53a5960a 736 if (is_write && page_unprotect(h2g(address), pc, puc)) {
fd6ce8f6
FB
737 return 1;
738 }
fbf9eeb3 739
3fb2ded1 740 /* see if it is an MMU fault */
6ebbf390 741 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
3fb2ded1
FB
742 if (ret < 0)
743 return 0; /* not an MMU fault */
744 if (ret == 0)
745 return 1; /* the MMU fault was handled without causing real CPU fault */
746 /* now we have a real cpu fault */
a513fe19
FB
747 tb = tb_find_pc(pc);
748 if (tb) {
9de5e440
FB
749 /* the PC is inside the translated code. It means that we have
750 a virtual CPU fault */
bf3e8bf1 751 cpu_restore_state(tb, env, pc, puc);
3fb2ded1 752 }
4cbf74b6 753 if (ret == 1) {
3fb2ded1 754#if 0
5fafdf24 755 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
4cbf74b6 756 env->eip, env->cr[2], env->error_code);
3fb2ded1 757#endif
4cbf74b6
FB
758 /* we restore the process signal mask as the sigreturn should
759 do it (XXX: use sigsetjmp) */
760 sigprocmask(SIG_SETMASK, old_set, NULL);
54ca9095 761 raise_exception_err(env->exception_index, env->error_code);
4cbf74b6
FB
762 } else {
763 /* activate soft MMU for this block */
3f337316 764 env->hflags |= HF_SOFTMMU_MASK;
fbf9eeb3 765 cpu_resume_from_signal(env, puc);
4cbf74b6 766 }
3fb2ded1
FB
767 /* never comes here */
768 return 1;
769}
770
e4533c7a 771#elif defined(TARGET_ARM)
3fb2ded1 772static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
bf3e8bf1
FB
773 int is_write, sigset_t *old_set,
774 void *puc)
3fb2ded1 775{
68016c62
FB
776 TranslationBlock *tb;
777 int ret;
778
779 if (cpu_single_env)
780 env = cpu_single_env; /* XXX: find a correct solution for multithread */
781#if defined(DEBUG_SIGNAL)
5fafdf24 782 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
68016c62
FB
783 pc, address, is_write, *(unsigned long *)old_set);
784#endif
9f0777ed 785 /* XXX: locking issue */
53a5960a 786 if (is_write && page_unprotect(h2g(address), pc, puc)) {
9f0777ed
FB
787 return 1;
788 }
68016c62 789 /* see if it is an MMU fault */
6ebbf390 790 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
68016c62
FB
791 if (ret < 0)
792 return 0; /* not an MMU fault */
793 if (ret == 0)
794 return 1; /* the MMU fault was handled without causing real CPU fault */
795 /* now we have a real cpu fault */
796 tb = tb_find_pc(pc);
797 if (tb) {
798 /* the PC is inside the translated code. It means that we have
799 a virtual CPU fault */
800 cpu_restore_state(tb, env, pc, puc);
801 }
802 /* we restore the process signal mask as the sigreturn should
803 do it (XXX: use sigsetjmp) */
804 sigprocmask(SIG_SETMASK, old_set, NULL);
805 cpu_loop_exit();
968c74da
AJ
806 /* never comes here */
807 return 1;
3fb2ded1 808}
93ac68bc
FB
809#elif defined(TARGET_SPARC)
810static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
bf3e8bf1
FB
811 int is_write, sigset_t *old_set,
812 void *puc)
93ac68bc 813{
68016c62
FB
814 TranslationBlock *tb;
815 int ret;
816
817 if (cpu_single_env)
818 env = cpu_single_env; /* XXX: find a correct solution for multithread */
819#if defined(DEBUG_SIGNAL)
5fafdf24 820 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
68016c62
FB
821 pc, address, is_write, *(unsigned long *)old_set);
822#endif
b453b70b 823 /* XXX: locking issue */
53a5960a 824 if (is_write && page_unprotect(h2g(address), pc, puc)) {
b453b70b
FB
825 return 1;
826 }
68016c62 827 /* see if it is an MMU fault */
6ebbf390 828 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
68016c62
FB
829 if (ret < 0)
830 return 0; /* not an MMU fault */
831 if (ret == 0)
832 return 1; /* the MMU fault was handled without causing real CPU fault */
833 /* now we have a real cpu fault */
834 tb = tb_find_pc(pc);
835 if (tb) {
836 /* the PC is inside the translated code. It means that we have
837 a virtual CPU fault */
838 cpu_restore_state(tb, env, pc, puc);
839 }
840 /* we restore the process signal mask as the sigreturn should
841 do it (XXX: use sigsetjmp) */
842 sigprocmask(SIG_SETMASK, old_set, NULL);
843 cpu_loop_exit();
968c74da
AJ
844 /* never comes here */
845 return 1;
93ac68bc 846}
67867308
FB
847#elif defined (TARGET_PPC)
848static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
bf3e8bf1
FB
849 int is_write, sigset_t *old_set,
850 void *puc)
67867308
FB
851{
852 TranslationBlock *tb;
ce09776b 853 int ret;
3b46e624 854
67867308
FB
855 if (cpu_single_env)
856 env = cpu_single_env; /* XXX: find a correct solution for multithread */
67867308 857#if defined(DEBUG_SIGNAL)
5fafdf24 858 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
67867308
FB
859 pc, address, is_write, *(unsigned long *)old_set);
860#endif
861 /* XXX: locking issue */
53a5960a 862 if (is_write && page_unprotect(h2g(address), pc, puc)) {
67867308
FB
863 return 1;
864 }
865
ce09776b 866 /* see if it is an MMU fault */
6ebbf390 867 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
ce09776b
FB
868 if (ret < 0)
869 return 0; /* not an MMU fault */
870 if (ret == 0)
871 return 1; /* the MMU fault was handled without causing real CPU fault */
872
67867308
FB
873 /* now we have a real cpu fault */
874 tb = tb_find_pc(pc);
875 if (tb) {
876 /* the PC is inside the translated code. It means that we have
877 a virtual CPU fault */
bf3e8bf1 878 cpu_restore_state(tb, env, pc, puc);
67867308 879 }
ce09776b 880 if (ret == 1) {
67867308 881#if 0
5fafdf24 882 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
ce09776b 883 env->nip, env->error_code, tb);
67867308
FB
884#endif
885 /* we restore the process signal mask as the sigreturn should
886 do it (XXX: use sigsetjmp) */
bf3e8bf1 887 sigprocmask(SIG_SETMASK, old_set, NULL);
9fddaa0c 888 do_raise_exception_err(env->exception_index, env->error_code);
ce09776b
FB
889 } else {
890 /* activate soft MMU for this block */
fbf9eeb3 891 cpu_resume_from_signal(env, puc);
ce09776b 892 }
67867308 893 /* never comes here */
e6e5906b
PB
894 return 1;
895}
896
897#elif defined(TARGET_M68K)
898static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
899 int is_write, sigset_t *old_set,
900 void *puc)
901{
902 TranslationBlock *tb;
903 int ret;
904
905 if (cpu_single_env)
906 env = cpu_single_env; /* XXX: find a correct solution for multithread */
907#if defined(DEBUG_SIGNAL)
5fafdf24 908 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
e6e5906b
PB
909 pc, address, is_write, *(unsigned long *)old_set);
910#endif
911 /* XXX: locking issue */
912 if (is_write && page_unprotect(address, pc, puc)) {
913 return 1;
914 }
915 /* see if it is an MMU fault */
6ebbf390 916 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
e6e5906b
PB
917 if (ret < 0)
918 return 0; /* not an MMU fault */
919 if (ret == 0)
920 return 1; /* the MMU fault was handled without causing real CPU fault */
921 /* now we have a real cpu fault */
922 tb = tb_find_pc(pc);
923 if (tb) {
924 /* the PC is inside the translated code. It means that we have
925 a virtual CPU fault */
926 cpu_restore_state(tb, env, pc, puc);
927 }
928 /* we restore the process signal mask as the sigreturn should
929 do it (XXX: use sigsetjmp) */
930 sigprocmask(SIG_SETMASK, old_set, NULL);
931 cpu_loop_exit();
932 /* never comes here */
67867308
FB
933 return 1;
934}
6af0bf9c
FB
935
936#elif defined (TARGET_MIPS)
937static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
938 int is_write, sigset_t *old_set,
939 void *puc)
940{
941 TranslationBlock *tb;
942 int ret;
3b46e624 943
6af0bf9c
FB
944 if (cpu_single_env)
945 env = cpu_single_env; /* XXX: find a correct solution for multithread */
946#if defined(DEBUG_SIGNAL)
5fafdf24 947 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
6af0bf9c
FB
948 pc, address, is_write, *(unsigned long *)old_set);
949#endif
950 /* XXX: locking issue */
53a5960a 951 if (is_write && page_unprotect(h2g(address), pc, puc)) {
6af0bf9c
FB
952 return 1;
953 }
954
955 /* see if it is an MMU fault */
6ebbf390 956 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
6af0bf9c
FB
957 if (ret < 0)
958 return 0; /* not an MMU fault */
959 if (ret == 0)
960 return 1; /* the MMU fault was handled without causing real CPU fault */
961
962 /* now we have a real cpu fault */
963 tb = tb_find_pc(pc);
964 if (tb) {
965 /* the PC is inside the translated code. It means that we have
966 a virtual CPU fault */
967 cpu_restore_state(tb, env, pc, puc);
968 }
969 if (ret == 1) {
970#if 0
5fafdf24 971 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1eb5207b 972 env->PC, env->error_code, tb);
6af0bf9c
FB
973#endif
974 /* we restore the process signal mask as the sigreturn should
975 do it (XXX: use sigsetjmp) */
976 sigprocmask(SIG_SETMASK, old_set, NULL);
977 do_raise_exception_err(env->exception_index, env->error_code);
978 } else {
979 /* activate soft MMU for this block */
980 cpu_resume_from_signal(env, puc);
981 }
982 /* never comes here */
983 return 1;
984}
985
fdf9b3e8
FB
986#elif defined (TARGET_SH4)
987static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
988 int is_write, sigset_t *old_set,
989 void *puc)
990{
991 TranslationBlock *tb;
992 int ret;
3b46e624 993
fdf9b3e8
FB
994 if (cpu_single_env)
995 env = cpu_single_env; /* XXX: find a correct solution for multithread */
996#if defined(DEBUG_SIGNAL)
5fafdf24 997 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
fdf9b3e8
FB
998 pc, address, is_write, *(unsigned long *)old_set);
999#endif
1000 /* XXX: locking issue */
1001 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1002 return 1;
1003 }
1004
1005 /* see if it is an MMU fault */
6ebbf390 1006 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
fdf9b3e8
FB
1007 if (ret < 0)
1008 return 0; /* not an MMU fault */
1009 if (ret == 0)
1010 return 1; /* the MMU fault was handled without causing real CPU fault */
1011
1012 /* now we have a real cpu fault */
eddf68a6
JM
1013 tb = tb_find_pc(pc);
1014 if (tb) {
1015 /* the PC is inside the translated code. It means that we have
1016 a virtual CPU fault */
1017 cpu_restore_state(tb, env, pc, puc);
1018 }
1019#if 0
5fafdf24 1020 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
eddf68a6
JM
1021 env->nip, env->error_code, tb);
1022#endif
1023 /* we restore the process signal mask as the sigreturn should
1024 do it (XXX: use sigsetjmp) */
1025 sigprocmask(SIG_SETMASK, old_set, NULL);
1026 cpu_loop_exit();
1027 /* never comes here */
1028 return 1;
1029}
1030
1031#elif defined (TARGET_ALPHA)
1032static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1033 int is_write, sigset_t *old_set,
1034 void *puc)
1035{
1036 TranslationBlock *tb;
1037 int ret;
3b46e624 1038
eddf68a6
JM
1039 if (cpu_single_env)
1040 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1041#if defined(DEBUG_SIGNAL)
5fafdf24 1042 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
eddf68a6
JM
1043 pc, address, is_write, *(unsigned long *)old_set);
1044#endif
1045 /* XXX: locking issue */
1046 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1047 return 1;
1048 }
1049
1050 /* see if it is an MMU fault */
6ebbf390 1051 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
eddf68a6
JM
1052 if (ret < 0)
1053 return 0; /* not an MMU fault */
1054 if (ret == 0)
1055 return 1; /* the MMU fault was handled without causing real CPU fault */
1056
1057 /* now we have a real cpu fault */
fdf9b3e8
FB
1058 tb = tb_find_pc(pc);
1059 if (tb) {
1060 /* the PC is inside the translated code. It means that we have
1061 a virtual CPU fault */
1062 cpu_restore_state(tb, env, pc, puc);
1063 }
fdf9b3e8 1064#if 0
5fafdf24 1065 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
fdf9b3e8
FB
1066 env->nip, env->error_code, tb);
1067#endif
1068 /* we restore the process signal mask as the sigreturn should
1069 do it (XXX: use sigsetjmp) */
355fb23d
PB
1070 sigprocmask(SIG_SETMASK, old_set, NULL);
1071 cpu_loop_exit();
fdf9b3e8
FB
1072 /* never comes here */
1073 return 1;
1074}
f1ccf904
TS
1075#elif defined (TARGET_CRIS)
1076static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1077 int is_write, sigset_t *old_set,
1078 void *puc)
1079{
1080 TranslationBlock *tb;
1081 int ret;
1082
1083 if (cpu_single_env)
1084 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1085#if defined(DEBUG_SIGNAL)
1086 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1087 pc, address, is_write, *(unsigned long *)old_set);
1088#endif
1089 /* XXX: locking issue */
1090 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1091 return 1;
1092 }
1093
1094 /* see if it is an MMU fault */
6ebbf390 1095 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
f1ccf904
TS
1096 if (ret < 0)
1097 return 0; /* not an MMU fault */
1098 if (ret == 0)
1099 return 1; /* the MMU fault was handled without causing real CPU fault */
1100
1101 /* now we have a real cpu fault */
1102 tb = tb_find_pc(pc);
1103 if (tb) {
1104 /* the PC is inside the translated code. It means that we have
1105 a virtual CPU fault */
1106 cpu_restore_state(tb, env, pc, puc);
1107 }
f1ccf904
TS
1108 /* we restore the process signal mask as the sigreturn should
1109 do it (XXX: use sigsetjmp) */
1110 sigprocmask(SIG_SETMASK, old_set, NULL);
1111 cpu_loop_exit();
1112 /* never comes here */
1113 return 1;
1114}
1115
e4533c7a
FB
1116#else
1117#error unsupported target CPU
1118#endif
9de5e440 1119
2b413144
FB
1120#if defined(__i386__)
1121
d8ecc0b9
FB
1122#if defined(__APPLE__)
1123# include <sys/ucontext.h>
1124
1125# define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1126# define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1127# define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1128#else
1129# define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1130# define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1131# define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1132#endif
1133
5fafdf24 1134int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1135 void *puc)
9de5e440 1136{
5a7b542b 1137 siginfo_t *info = pinfo;
9de5e440
FB
1138 struct ucontext *uc = puc;
1139 unsigned long pc;
bf3e8bf1 1140 int trapno;
97eb5b14 1141
d691f669
FB
1142#ifndef REG_EIP
1143/* for glibc 2.1 */
fd6ce8f6
FB
1144#define REG_EIP EIP
1145#define REG_ERR ERR
1146#define REG_TRAPNO TRAPNO
d691f669 1147#endif
d8ecc0b9
FB
1148 pc = EIP_sig(uc);
1149 trapno = TRAP_sig(uc);
ec6338ba
FB
1150 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1151 trapno == 0xe ?
1152 (ERROR_sig(uc) >> 1) & 1 : 0,
1153 &uc->uc_sigmask, puc);
2b413144
FB
1154}
1155
bc51c5c9
FB
1156#elif defined(__x86_64__)
1157
5a7b542b 1158int cpu_signal_handler(int host_signum, void *pinfo,
bc51c5c9
FB
1159 void *puc)
1160{
5a7b542b 1161 siginfo_t *info = pinfo;
bc51c5c9
FB
1162 struct ucontext *uc = puc;
1163 unsigned long pc;
1164
1165 pc = uc->uc_mcontext.gregs[REG_RIP];
5fafdf24
TS
1166 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1167 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
bc51c5c9
FB
1168 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1169 &uc->uc_sigmask, puc);
1170}
1171
83fb7adf 1172#elif defined(__powerpc__)
2b413144 1173
83fb7adf
FB
1174/***********************************************************************
1175 * signal context platform-specific definitions
1176 * From Wine
1177 */
1178#ifdef linux
1179/* All Registers access - only for local access */
1180# define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1181/* Gpr Registers access */
1182# define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1183# define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1184# define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1185# define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1186# define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1187# define LR_sig(context) REG_sig(link, context) /* Link register */
1188# define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1189/* Float Registers access */
1190# define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1191# define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1192/* Exception Registers access */
1193# define DAR_sig(context) REG_sig(dar, context)
1194# define DSISR_sig(context) REG_sig(dsisr, context)
1195# define TRAP_sig(context) REG_sig(trap, context)
1196#endif /* linux */
1197
1198#ifdef __APPLE__
1199# include <sys/ucontext.h>
1200typedef struct ucontext SIGCONTEXT;
1201/* All Registers access - only for local access */
1202# define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1203# define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1204# define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1205# define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1206/* Gpr Registers access */
1207# define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1208# define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1209# define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1210# define CTR_sig(context) REG_sig(ctr, context)
1211# define XER_sig(context) REG_sig(xer, context) /* Link register */
1212# define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1213# define CR_sig(context) REG_sig(cr, context) /* Condition register */
1214/* Float Registers access */
1215# define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1216# define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1217/* Exception Registers access */
1218# define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1219# define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1220# define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1221#endif /* __APPLE__ */
1222
5fafdf24 1223int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1224 void *puc)
2b413144 1225{
5a7b542b 1226 siginfo_t *info = pinfo;
25eb4484 1227 struct ucontext *uc = puc;
25eb4484 1228 unsigned long pc;
25eb4484
FB
1229 int is_write;
1230
83fb7adf 1231 pc = IAR_sig(uc);
25eb4484
FB
1232 is_write = 0;
1233#if 0
1234 /* ppc 4xx case */
83fb7adf 1235 if (DSISR_sig(uc) & 0x00800000)
25eb4484
FB
1236 is_write = 1;
1237#else
83fb7adf 1238 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
25eb4484
FB
1239 is_write = 1;
1240#endif
5fafdf24 1241 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
bf3e8bf1 1242 is_write, &uc->uc_sigmask, puc);
2b413144
FB
1243}
1244
2f87c607
FB
1245#elif defined(__alpha__)
1246
5fafdf24 1247int cpu_signal_handler(int host_signum, void *pinfo,
2f87c607
FB
1248 void *puc)
1249{
5a7b542b 1250 siginfo_t *info = pinfo;
2f87c607
FB
1251 struct ucontext *uc = puc;
1252 uint32_t *pc = uc->uc_mcontext.sc_pc;
1253 uint32_t insn = *pc;
1254 int is_write = 0;
1255
8c6939c0 1256 /* XXX: need kernel patch to get write flag faster */
2f87c607
FB
1257 switch (insn >> 26) {
1258 case 0x0d: // stw
1259 case 0x0e: // stb
1260 case 0x0f: // stq_u
1261 case 0x24: // stf
1262 case 0x25: // stg
1263 case 0x26: // sts
1264 case 0x27: // stt
1265 case 0x2c: // stl
1266 case 0x2d: // stq
1267 case 0x2e: // stl_c
1268 case 0x2f: // stq_c
1269 is_write = 1;
1270 }
1271
5fafdf24 1272 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
bf3e8bf1 1273 is_write, &uc->uc_sigmask, puc);
2f87c607 1274}
8c6939c0
FB
1275#elif defined(__sparc__)
1276
5fafdf24 1277int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1278 void *puc)
8c6939c0 1279{
5a7b542b 1280 siginfo_t *info = pinfo;
8c6939c0
FB
1281 int is_write;
1282 uint32_t insn;
6b4c11cd 1283#if !defined(__arch64__) || defined(HOST_SOLARIS)
c9e1e2b0
BS
1284 uint32_t *regs = (uint32_t *)(info + 1);
1285 void *sigmask = (regs + 20);
8c6939c0 1286 /* XXX: is there a standard glibc define ? */
c9e1e2b0
BS
1287 unsigned long pc = regs[1];
1288#else
1289 struct sigcontext *sc = puc;
1290 unsigned long pc = sc->sigc_regs.tpc;
1291 void *sigmask = (void *)sc->sigc_mask;
1292#endif
1293
8c6939c0
FB
1294 /* XXX: need kernel patch to get write flag faster */
1295 is_write = 0;
1296 insn = *(uint32_t *)pc;
1297 if ((insn >> 30) == 3) {
1298 switch((insn >> 19) & 0x3f) {
1299 case 0x05: // stb
1300 case 0x06: // sth
1301 case 0x04: // st
1302 case 0x07: // std
1303 case 0x24: // stf
1304 case 0x27: // stdf
1305 case 0x25: // stfsr
1306 is_write = 1;
1307 break;
1308 }
1309 }
5fafdf24 1310 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
bf3e8bf1 1311 is_write, sigmask, NULL);
8c6939c0
FB
1312}
1313
1314#elif defined(__arm__)
1315
5fafdf24 1316int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1317 void *puc)
8c6939c0 1318{
5a7b542b 1319 siginfo_t *info = pinfo;
8c6939c0
FB
1320 struct ucontext *uc = puc;
1321 unsigned long pc;
1322 int is_write;
3b46e624 1323
5c49b363
AZ
1324#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ =< 3))
1325 pc = uc->uc_mcontext.gregs[R15];
1326#else
4eee57f5 1327 pc = uc->uc_mcontext.arm_pc;
5c49b363 1328#endif
8c6939c0
FB
1329 /* XXX: compute is_write */
1330 is_write = 0;
5fafdf24 1331 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
8c6939c0 1332 is_write,
f3a9676a 1333 &uc->uc_sigmask, puc);
8c6939c0
FB
1334}
1335
38e584a0
FB
1336#elif defined(__mc68000)
1337
5fafdf24 1338int cpu_signal_handler(int host_signum, void *pinfo,
38e584a0
FB
1339 void *puc)
1340{
5a7b542b 1341 siginfo_t *info = pinfo;
38e584a0
FB
1342 struct ucontext *uc = puc;
1343 unsigned long pc;
1344 int is_write;
3b46e624 1345
38e584a0
FB
1346 pc = uc->uc_mcontext.gregs[16];
1347 /* XXX: compute is_write */
1348 is_write = 0;
5fafdf24 1349 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
38e584a0 1350 is_write,
bf3e8bf1 1351 &uc->uc_sigmask, puc);
38e584a0
FB
1352}
1353
b8076a74
FB
1354#elif defined(__ia64)
1355
1356#ifndef __ISR_VALID
1357 /* This ought to be in <bits/siginfo.h>... */
1358# define __ISR_VALID 1
b8076a74
FB
1359#endif
1360
5a7b542b 1361int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
b8076a74 1362{
5a7b542b 1363 siginfo_t *info = pinfo;
b8076a74
FB
1364 struct ucontext *uc = puc;
1365 unsigned long ip;
1366 int is_write = 0;
1367
1368 ip = uc->uc_mcontext.sc_ip;
1369 switch (host_signum) {
1370 case SIGILL:
1371 case SIGFPE:
1372 case SIGSEGV:
1373 case SIGBUS:
1374 case SIGTRAP:
fd4a43e4 1375 if (info->si_code && (info->si_segvflags & __ISR_VALID))
b8076a74
FB
1376 /* ISR.W (write-access) is bit 33: */
1377 is_write = (info->si_isr >> 33) & 1;
1378 break;
1379
1380 default:
1381 break;
1382 }
1383 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1384 is_write,
1385 &uc->uc_sigmask, puc);
1386}
1387
90cb9493
FB
1388#elif defined(__s390__)
1389
5fafdf24 1390int cpu_signal_handler(int host_signum, void *pinfo,
90cb9493
FB
1391 void *puc)
1392{
5a7b542b 1393 siginfo_t *info = pinfo;
90cb9493
FB
1394 struct ucontext *uc = puc;
1395 unsigned long pc;
1396 int is_write;
3b46e624 1397
90cb9493
FB
1398 pc = uc->uc_mcontext.psw.addr;
1399 /* XXX: compute is_write */
1400 is_write = 0;
5fafdf24 1401 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
c4b89d18
TS
1402 is_write, &uc->uc_sigmask, puc);
1403}
1404
1405#elif defined(__mips__)
1406
5fafdf24 1407int cpu_signal_handler(int host_signum, void *pinfo,
c4b89d18
TS
1408 void *puc)
1409{
9617efe8 1410 siginfo_t *info = pinfo;
c4b89d18
TS
1411 struct ucontext *uc = puc;
1412 greg_t pc = uc->uc_mcontext.pc;
1413 int is_write;
3b46e624 1414
c4b89d18
TS
1415 /* XXX: compute is_write */
1416 is_write = 0;
5fafdf24 1417 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
c4b89d18 1418 is_write, &uc->uc_sigmask, puc);
90cb9493
FB
1419}
1420
f54b3f92
AJ
1421#elif defined(__hppa__)
1422
1423int cpu_signal_handler(int host_signum, void *pinfo,
1424 void *puc)
1425{
1426 struct siginfo *info = pinfo;
1427 struct ucontext *uc = puc;
1428 unsigned long pc;
1429 int is_write;
1430
1431 pc = uc->uc_mcontext.sc_iaoq[0];
1432 /* FIXME: compute is_write */
1433 is_write = 0;
1434 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1435 is_write,
1436 &uc->uc_sigmask, puc);
1437}
1438
9de5e440 1439#else
2b413144 1440
3fb2ded1 1441#error host CPU specific signal handler needed
2b413144 1442
9de5e440 1443#endif
67b915a5
FB
1444
1445#endif /* !defined(CONFIG_SOFTMMU) */