]> git.proxmox.com Git - qemu.git/blame - cpu-exec.c
converted bit test operations to TCG
[qemu.git] / cpu-exec.c
CommitLineData
7d13299d
FB
1/*
2 * i386 emulator main execution loop
5fafdf24 3 *
66321a11 4 * Copyright (c) 2003-2005 Fabrice Bellard
7d13299d 5 *
3ef693a0
FB
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
7d13299d 10 *
3ef693a0
FB
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
7d13299d 15 *
3ef693a0
FB
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
7d13299d 19 */
e4533c7a 20#include "config.h"
7cb69cae 21#define CPU_NO_GLOBAL_REGS
93ac68bc 22#include "exec.h"
956034d7 23#include "disas.h"
7cb69cae 24#include "tcg.h"
7d13299d 25
fbf9eeb3
FB
26#if !defined(CONFIG_SOFTMMU)
27#undef EAX
28#undef ECX
29#undef EDX
30#undef EBX
31#undef ESP
32#undef EBP
33#undef ESI
34#undef EDI
35#undef EIP
36#include <signal.h>
37#include <sys/ucontext.h>
38#endif
39
572a9d4a
BS
40#if defined(__sparc__) && !defined(HOST_SOLARIS)
41// Work around ugly bugs in glibc that mangle global register contents
42#undef env
43#define env cpu_single_env
44#endif
45
36bdbe54 46int tb_invalidated_flag;
b5fc09ae 47static unsigned long next_tb;
36bdbe54 48
dc99065b 49//#define DEBUG_EXEC
9de5e440 50//#define DEBUG_SIGNAL
7d13299d 51
e4533c7a
FB
52void cpu_loop_exit(void)
53{
bfed01fc
TS
54 /* NOTE: the register at this point must be saved by hand because
55 longjmp restore them */
56 regs_to_env();
e4533c7a
FB
57 longjmp(env->jmp_env, 1);
58}
bfed01fc 59
e6e5906b 60#if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
3475187d
FB
61#define reg_T2
62#endif
e4533c7a 63
fbf9eeb3
FB
64/* exit the current TB from a signal handler. The host registers are
65 restored in a state compatible with the CPU emulator
66 */
5fafdf24 67void cpu_resume_from_signal(CPUState *env1, void *puc)
fbf9eeb3
FB
68{
69#if !defined(CONFIG_SOFTMMU)
70 struct ucontext *uc = puc;
71#endif
72
73 env = env1;
74
75 /* XXX: restore cpu registers saved in host registers */
76
77#if !defined(CONFIG_SOFTMMU)
78 if (puc) {
79 /* XXX: use siglongjmp ? */
80 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
81 }
82#endif
83 longjmp(env->jmp_env, 1);
84}
85
8a40a180
FB
86static TranslationBlock *tb_find_slow(target_ulong pc,
87 target_ulong cs_base,
c068688b 88 uint64_t flags)
8a40a180
FB
89{
90 TranslationBlock *tb, **ptb1;
91 int code_gen_size;
92 unsigned int h;
93 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
94 uint8_t *tc_ptr;
3b46e624 95
8a40a180
FB
96 spin_lock(&tb_lock);
97
98 tb_invalidated_flag = 0;
3b46e624 99
8a40a180 100 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
3b46e624 101
8a40a180
FB
102 /* find translated block using physical mappings */
103 phys_pc = get_phys_addr_code(env, pc);
104 phys_page1 = phys_pc & TARGET_PAGE_MASK;
105 phys_page2 = -1;
106 h = tb_phys_hash_func(phys_pc);
107 ptb1 = &tb_phys_hash[h];
108 for(;;) {
109 tb = *ptb1;
110 if (!tb)
111 goto not_found;
5fafdf24 112 if (tb->pc == pc &&
8a40a180 113 tb->page_addr[0] == phys_page1 &&
5fafdf24 114 tb->cs_base == cs_base &&
8a40a180
FB
115 tb->flags == flags) {
116 /* check next page if needed */
117 if (tb->page_addr[1] != -1) {
5fafdf24 118 virt_page2 = (pc & TARGET_PAGE_MASK) +
8a40a180
FB
119 TARGET_PAGE_SIZE;
120 phys_page2 = get_phys_addr_code(env, virt_page2);
121 if (tb->page_addr[1] == phys_page2)
122 goto found;
123 } else {
124 goto found;
125 }
126 }
127 ptb1 = &tb->phys_hash_next;
128 }
129 not_found:
130 /* if no translated code available, then translate it now */
131 tb = tb_alloc(pc);
132 if (!tb) {
133 /* flush must be done */
134 tb_flush(env);
135 /* cannot fail at this point */
136 tb = tb_alloc(pc);
137 /* don't forget to invalidate previous TB info */
15388002 138 tb_invalidated_flag = 1;
8a40a180
FB
139 }
140 tc_ptr = code_gen_ptr;
141 tb->tc_ptr = tc_ptr;
142 tb->cs_base = cs_base;
143 tb->flags = flags;
d07bde88 144 cpu_gen_code(env, tb, &code_gen_size);
8a40a180 145 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 146
8a40a180
FB
147 /* check next page if needed */
148 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
149 phys_page2 = -1;
150 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
151 phys_page2 = get_phys_addr_code(env, virt_page2);
152 }
153 tb_link_phys(tb, phys_pc, phys_page2);
3b46e624 154
8a40a180 155 found:
8a40a180
FB
156 /* we add the TB in the virtual pc hash table */
157 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
158 spin_unlock(&tb_lock);
159 return tb;
160}
161
162static inline TranslationBlock *tb_find_fast(void)
163{
164 TranslationBlock *tb;
165 target_ulong cs_base, pc;
c068688b 166 uint64_t flags;
8a40a180
FB
167
168 /* we record a subset of the CPU state. It will
169 always be the same before a given translated block
170 is executed. */
171#if defined(TARGET_I386)
172 flags = env->hflags;
173 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
0573fbfc 174 flags |= env->intercept;
8a40a180
FB
175 cs_base = env->segs[R_CS].base;
176 pc = cs_base + env->eip;
177#elif defined(TARGET_ARM)
178 flags = env->thumb | (env->vfp.vec_len << 1)
b5ff1b31
FB
179 | (env->vfp.vec_stride << 4);
180 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
181 flags |= (1 << 6);
40f137e1
PB
182 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
183 flags |= (1 << 7);
9ee6e8bb 184 flags |= (env->condexec_bits << 8);
8a40a180
FB
185 cs_base = 0;
186 pc = env->regs[15];
187#elif defined(TARGET_SPARC)
188#ifdef TARGET_SPARC64
a80dde08
FB
189 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
190 flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
191 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
8a40a180 192#else
6d5f237a
BS
193 // FPU enable . Supervisor
194 flags = (env->psref << 4) | env->psrs;
8a40a180
FB
195#endif
196 cs_base = env->npc;
197 pc = env->pc;
198#elif defined(TARGET_PPC)
1527c87e 199 flags = env->hflags;
8a40a180
FB
200 cs_base = 0;
201 pc = env->nip;
202#elif defined(TARGET_MIPS)
56b19403 203 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
cc9442b9 204 cs_base = 0;
ead9360e 205 pc = env->PC[env->current_tc];
e6e5906b 206#elif defined(TARGET_M68K)
acf930aa
PB
207 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
208 | (env->sr & SR_S) /* Bit 13 */
209 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
e6e5906b
PB
210 cs_base = 0;
211 pc = env->pc;
fdf9b3e8 212#elif defined(TARGET_SH4)
823029f9
TS
213 flags = env->flags;
214 cs_base = 0;
fdf9b3e8 215 pc = env->pc;
eddf68a6
JM
216#elif defined(TARGET_ALPHA)
217 flags = env->ps;
218 cs_base = 0;
219 pc = env->pc;
f1ccf904 220#elif defined(TARGET_CRIS)
17a594d7 221 flags = env->pregs[PR_CCS] & U_FLAG;
cf1d97f0 222 flags |= env->dslot;
f1ccf904
TS
223 cs_base = 0;
224 pc = env->pc;
8a40a180
FB
225#else
226#error unsupported CPU
227#endif
bce61846 228 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
8a40a180
FB
229 if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
230 tb->flags != flags, 0)) {
231 tb = tb_find_slow(pc, cs_base, flags);
15388002
FB
232 /* Note: we do it here to avoid a gcc bug on Mac OS X when
233 doing it in tb_find_slow */
234 if (tb_invalidated_flag) {
235 /* as some TB could have been invalidated because
236 of memory exceptions while generating the code, we
237 must recompute the hash index here */
b5fc09ae 238 next_tb = 0;
15388002 239 }
8a40a180
FB
240 }
241 return tb;
242}
243
7d13299d
FB
244/* main execution loop */
245
e4533c7a 246int cpu_exec(CPUState *env1)
7d13299d 247{
1057eaa7
PB
248#define DECLARE_HOST_REGS 1
249#include "hostregs_helper.h"
250#if defined(TARGET_SPARC)
3475187d
FB
251#if defined(reg_REGWPTR)
252 uint32_t *saved_regwptr;
253#endif
04369ff2 254#endif
8a40a180 255 int ret, interrupt_request;
8a40a180 256 TranslationBlock *tb;
c27004ec 257 uint8_t *tc_ptr;
8c6939c0 258
bfed01fc
TS
259 if (cpu_halted(env1) == EXCP_HALTED)
260 return EXCP_HALTED;
5a1e3cfc 261
5fafdf24 262 cpu_single_env = env1;
6a00d601 263
7d13299d 264 /* first we save global registers */
1057eaa7
PB
265#define SAVE_HOST_REGS 1
266#include "hostregs_helper.h"
c27004ec 267 env = env1;
e4533c7a 268
0d1a29f9 269 env_to_regs();
ecb644f4 270#if defined(TARGET_I386)
9de5e440 271 /* put eflags in CPU temporary format */
fc2b4c48
FB
272 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
273 DF = 1 - (2 * ((env->eflags >> 10) & 1));
9de5e440 274 CC_OP = CC_OP_EFLAGS;
fc2b4c48 275 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
93ac68bc 276#elif defined(TARGET_SPARC)
3475187d
FB
277#if defined(reg_REGWPTR)
278 saved_regwptr = REGWPTR;
279#endif
e6e5906b
PB
280#elif defined(TARGET_M68K)
281 env->cc_op = CC_OP_FLAGS;
282 env->cc_dest = env->sr & 0xf;
283 env->cc_x = (env->sr >> 4) & 1;
ecb644f4
TS
284#elif defined(TARGET_ALPHA)
285#elif defined(TARGET_ARM)
286#elif defined(TARGET_PPC)
6af0bf9c 287#elif defined(TARGET_MIPS)
fdf9b3e8 288#elif defined(TARGET_SH4)
f1ccf904 289#elif defined(TARGET_CRIS)
fdf9b3e8 290 /* XXXXX */
e4533c7a
FB
291#else
292#error unsupported target CPU
293#endif
3fb2ded1 294 env->exception_index = -1;
9d27abd9 295
7d13299d 296 /* prepare setjmp context for exception handling */
3fb2ded1
FB
297 for(;;) {
298 if (setjmp(env->jmp_env) == 0) {
ee8b7021 299 env->current_tb = NULL;
3fb2ded1
FB
300 /* if an exception is pending, we execute it here */
301 if (env->exception_index >= 0) {
302 if (env->exception_index >= EXCP_INTERRUPT) {
303 /* exit request from the cpu execution loop */
304 ret = env->exception_index;
305 break;
306 } else if (env->user_mode_only) {
307 /* if user mode only, we simulate a fake exception
9f083493 308 which will be handled outside the cpu execution
3fb2ded1 309 loop */
83479e77 310#if defined(TARGET_I386)
5fafdf24
TS
311 do_interrupt_user(env->exception_index,
312 env->exception_is_int,
313 env->error_code,
3fb2ded1 314 env->exception_next_eip);
eba01623
FB
315 /* successfully delivered */
316 env->old_exception = -1;
83479e77 317#endif
3fb2ded1
FB
318 ret = env->exception_index;
319 break;
320 } else {
83479e77 321#if defined(TARGET_I386)
3fb2ded1
FB
322 /* simulate a real cpu exception. On i386, it can
323 trigger new exceptions, but we do not handle
324 double or triple faults yet. */
5fafdf24
TS
325 do_interrupt(env->exception_index,
326 env->exception_is_int,
327 env->error_code,
d05e66d2 328 env->exception_next_eip, 0);
678dde13
TS
329 /* successfully delivered */
330 env->old_exception = -1;
ce09776b
FB
331#elif defined(TARGET_PPC)
332 do_interrupt(env);
6af0bf9c
FB
333#elif defined(TARGET_MIPS)
334 do_interrupt(env);
e95c8d51 335#elif defined(TARGET_SPARC)
1a0c3292 336 do_interrupt(env->exception_index);
b5ff1b31
FB
337#elif defined(TARGET_ARM)
338 do_interrupt(env);
fdf9b3e8
FB
339#elif defined(TARGET_SH4)
340 do_interrupt(env);
eddf68a6
JM
341#elif defined(TARGET_ALPHA)
342 do_interrupt(env);
f1ccf904
TS
343#elif defined(TARGET_CRIS)
344 do_interrupt(env);
0633879f
PB
345#elif defined(TARGET_M68K)
346 do_interrupt(0);
83479e77 347#endif
3fb2ded1
FB
348 }
349 env->exception_index = -1;
5fafdf24 350 }
9df217a3
FB
351#ifdef USE_KQEMU
352 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
353 int ret;
354 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
355 ret = kqemu_cpu_exec(env);
356 /* put eflags in CPU temporary format */
357 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
358 DF = 1 - (2 * ((env->eflags >> 10) & 1));
359 CC_OP = CC_OP_EFLAGS;
360 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
361 if (ret == 1) {
362 /* exception */
363 longjmp(env->jmp_env, 1);
364 } else if (ret == 2) {
365 /* softmmu execution needed */
366 } else {
367 if (env->interrupt_request != 0) {
368 /* hardware interrupt will be executed just after */
369 } else {
370 /* otherwise, we restart */
371 longjmp(env->jmp_env, 1);
372 }
373 }
3fb2ded1 374 }
9df217a3
FB
375#endif
376
b5fc09ae 377 next_tb = 0; /* force lookup of first TB */
3fb2ded1 378 for(;;) {
68a79315 379 interrupt_request = env->interrupt_request;
0573fbfc
TS
380 if (__builtin_expect(interrupt_request, 0)
381#if defined(TARGET_I386)
382 && env->hflags & HF_GIF_MASK
383#endif
21b20814 384 && likely(!(env->singlestep_enabled & SSTEP_NOIRQ))) {
6658ffb8
PB
385 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
386 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
387 env->exception_index = EXCP_DEBUG;
388 cpu_loop_exit();
389 }
a90b7318 390#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
f1ccf904 391 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
a90b7318
AZ
392 if (interrupt_request & CPU_INTERRUPT_HALT) {
393 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
394 env->halted = 1;
395 env->exception_index = EXCP_HLT;
396 cpu_loop_exit();
397 }
398#endif
68a79315 399#if defined(TARGET_I386)
3b21e03e
FB
400 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
401 !(env->hflags & HF_SMM_MASK)) {
0573fbfc 402 svm_check_intercept(SVM_EXIT_SMI);
3b21e03e
FB
403 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
404 do_smm_enter();
b5fc09ae 405 next_tb = 0;
474ea849
AJ
406 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
407 !(env->hflags & HF_NMI_MASK)) {
408 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
409 env->hflags |= HF_NMI_MASK;
410 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
b5fc09ae 411 next_tb = 0;
3b21e03e 412 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
0573fbfc 413 (env->eflags & IF_MASK || env->hflags & HF_HIF_MASK) &&
3f337316 414 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
68a79315 415 int intno;
0573fbfc 416 svm_check_intercept(SVM_EXIT_INTR);
52621688 417 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
a541f297 418 intno = cpu_get_pic_interrupt(env);
f193c797 419 if (loglevel & CPU_LOG_TB_IN_ASM) {
68a79315
FB
420 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
421 }
d05e66d2 422 do_interrupt(intno, 0, 0, 0, 1);
907a5b26
FB
423 /* ensure that no TB jump will be modified as
424 the program flow was changed */
b5fc09ae 425 next_tb = 0;
0573fbfc
TS
426#if !defined(CONFIG_USER_ONLY)
427 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
428 (env->eflags & IF_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
429 int intno;
430 /* FIXME: this should respect TPR */
431 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
52621688 432 svm_check_intercept(SVM_EXIT_VINTR);
0573fbfc
TS
433 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
434 if (loglevel & CPU_LOG_TB_IN_ASM)
435 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
436 do_interrupt(intno, 0, 0, -1, 1);
52621688
TS
437 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl),
438 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)) & ~V_IRQ_MASK);
b5fc09ae 439 next_tb = 0;
907a5b26 440#endif
68a79315 441 }
ce09776b 442#elif defined(TARGET_PPC)
9fddaa0c
FB
443#if 0
444 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
445 cpu_ppc_reset(env);
446 }
447#endif
47103572 448 if (interrupt_request & CPU_INTERRUPT_HARD) {
e9df014c
JM
449 ppc_hw_interrupt(env);
450 if (env->pending_interrupts == 0)
451 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
b5fc09ae 452 next_tb = 0;
ce09776b 453 }
6af0bf9c
FB
454#elif defined(TARGET_MIPS)
455 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
24c7b0e3 456 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
6af0bf9c 457 (env->CP0_Status & (1 << CP0St_IE)) &&
24c7b0e3
TS
458 !(env->CP0_Status & (1 << CP0St_EXL)) &&
459 !(env->CP0_Status & (1 << CP0St_ERL)) &&
6af0bf9c
FB
460 !(env->hflags & MIPS_HFLAG_DM)) {
461 /* Raise it */
462 env->exception_index = EXCP_EXT_INTERRUPT;
463 env->error_code = 0;
464 do_interrupt(env);
b5fc09ae 465 next_tb = 0;
6af0bf9c 466 }
e95c8d51 467#elif defined(TARGET_SPARC)
66321a11
FB
468 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
469 (env->psret != 0)) {
470 int pil = env->interrupt_index & 15;
471 int type = env->interrupt_index & 0xf0;
472
473 if (((type == TT_EXTINT) &&
474 (pil == 15 || pil > env->psrpil)) ||
475 type != TT_EXTINT) {
476 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
477 do_interrupt(env->interrupt_index);
478 env->interrupt_index = 0;
327ac2e7
BS
479#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
480 cpu_check_irqs(env);
481#endif
b5fc09ae 482 next_tb = 0;
66321a11 483 }
e95c8d51
FB
484 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
485 //do_interrupt(0, 0, 0, 0, 0);
486 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
a90b7318 487 }
b5ff1b31
FB
488#elif defined(TARGET_ARM)
489 if (interrupt_request & CPU_INTERRUPT_FIQ
490 && !(env->uncached_cpsr & CPSR_F)) {
491 env->exception_index = EXCP_FIQ;
492 do_interrupt(env);
b5fc09ae 493 next_tb = 0;
b5ff1b31 494 }
9ee6e8bb
PB
495 /* ARMv7-M interrupt return works by loading a magic value
496 into the PC. On real hardware the load causes the
497 return to occur. The qemu implementation performs the
498 jump normally, then does the exception return when the
499 CPU tries to execute code at the magic address.
500 This will cause the magic PC value to be pushed to
501 the stack if an interrupt occured at the wrong time.
502 We avoid this by disabling interrupts when
503 pc contains a magic address. */
b5ff1b31 504 if (interrupt_request & CPU_INTERRUPT_HARD
9ee6e8bb
PB
505 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
506 || !(env->uncached_cpsr & CPSR_I))) {
b5ff1b31
FB
507 env->exception_index = EXCP_IRQ;
508 do_interrupt(env);
b5fc09ae 509 next_tb = 0;
b5ff1b31 510 }
fdf9b3e8 511#elif defined(TARGET_SH4)
e96e2044
TS
512 if (interrupt_request & CPU_INTERRUPT_HARD) {
513 do_interrupt(env);
b5fc09ae 514 next_tb = 0;
e96e2044 515 }
eddf68a6
JM
516#elif defined(TARGET_ALPHA)
517 if (interrupt_request & CPU_INTERRUPT_HARD) {
518 do_interrupt(env);
b5fc09ae 519 next_tb = 0;
eddf68a6 520 }
f1ccf904
TS
521#elif defined(TARGET_CRIS)
522 if (interrupt_request & CPU_INTERRUPT_HARD) {
523 do_interrupt(env);
b5fc09ae 524 next_tb = 0;
f1ccf904 525 }
0633879f
PB
526#elif defined(TARGET_M68K)
527 if (interrupt_request & CPU_INTERRUPT_HARD
528 && ((env->sr & SR_I) >> SR_I_SHIFT)
529 < env->pending_level) {
530 /* Real hardware gets the interrupt vector via an
531 IACK cycle at this point. Current emulated
532 hardware doesn't rely on this, so we
533 provide/save the vector when the interrupt is
534 first signalled. */
535 env->exception_index = env->pending_vector;
536 do_interrupt(1);
b5fc09ae 537 next_tb = 0;
0633879f 538 }
68a79315 539#endif
9d05095e
FB
540 /* Don't use the cached interupt_request value,
541 do_interrupt may have updated the EXITTB flag. */
b5ff1b31 542 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
bf3e8bf1
FB
543 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
544 /* ensure that no TB jump will be modified as
545 the program flow was changed */
b5fc09ae 546 next_tb = 0;
bf3e8bf1 547 }
68a79315
FB
548 if (interrupt_request & CPU_INTERRUPT_EXIT) {
549 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
550 env->exception_index = EXCP_INTERRUPT;
551 cpu_loop_exit();
552 }
3fb2ded1 553 }
7d13299d 554#ifdef DEBUG_EXEC
b5ff1b31 555 if ((loglevel & CPU_LOG_TB_CPU)) {
3fb2ded1 556 /* restore flags in standard format */
ecb644f4
TS
557 regs_to_env();
558#if defined(TARGET_I386)
3fb2ded1 559 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
7fe48483 560 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
3fb2ded1 561 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
e4533c7a 562#elif defined(TARGET_ARM)
7fe48483 563 cpu_dump_state(env, logfile, fprintf, 0);
93ac68bc 564#elif defined(TARGET_SPARC)
3475187d
FB
565 REGWPTR = env->regbase + (env->cwp * 16);
566 env->regwptr = REGWPTR;
567 cpu_dump_state(env, logfile, fprintf, 0);
67867308 568#elif defined(TARGET_PPC)
7fe48483 569 cpu_dump_state(env, logfile, fprintf, 0);
e6e5906b
PB
570#elif defined(TARGET_M68K)
571 cpu_m68k_flush_flags(env, env->cc_op);
572 env->cc_op = CC_OP_FLAGS;
573 env->sr = (env->sr & 0xffe0)
574 | env->cc_dest | (env->cc_x << 4);
575 cpu_dump_state(env, logfile, fprintf, 0);
6af0bf9c
FB
576#elif defined(TARGET_MIPS)
577 cpu_dump_state(env, logfile, fprintf, 0);
fdf9b3e8
FB
578#elif defined(TARGET_SH4)
579 cpu_dump_state(env, logfile, fprintf, 0);
eddf68a6
JM
580#elif defined(TARGET_ALPHA)
581 cpu_dump_state(env, logfile, fprintf, 0);
f1ccf904
TS
582#elif defined(TARGET_CRIS)
583 cpu_dump_state(env, logfile, fprintf, 0);
e4533c7a 584#else
5fafdf24 585#error unsupported target CPU
e4533c7a 586#endif
3fb2ded1 587 }
7d13299d 588#endif
8a40a180 589 tb = tb_find_fast();
9d27abd9 590#ifdef DEBUG_EXEC
c1135f61 591 if ((loglevel & CPU_LOG_EXEC)) {
c27004ec
FB
592 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
593 (long)tb->tc_ptr, tb->pc,
594 lookup_symbol(tb->pc));
3fb2ded1 595 }
9d27abd9 596#endif
8a40a180
FB
597 /* see if we can patch the calling TB. When the TB
598 spans two pages, we cannot safely do a direct
599 jump. */
c27004ec 600 {
b5fc09ae 601 if (next_tb != 0 &&
4d7a0880 602#ifdef USE_KQEMU
f32fc648
FB
603 (env->kqemu_enabled != 2) &&
604#endif
ec6338ba 605 tb->page_addr[1] == -1) {
3fb2ded1 606 spin_lock(&tb_lock);
b5fc09ae 607 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
3fb2ded1
FB
608 spin_unlock(&tb_lock);
609 }
c27004ec 610 }
3fb2ded1 611 tc_ptr = tb->tc_ptr;
83479e77 612 env->current_tb = tb;
3fb2ded1 613 /* execute the generated code */
572a9d4a
BS
614#if defined(__sparc__) && !defined(HOST_SOLARIS)
615#undef env
616 env = cpu_single_env;
617#define env cpu_single_env
618#endif
7cb69cae 619 next_tb = tcg_qemu_tb_exec(tc_ptr);
83479e77 620 env->current_tb = NULL;
4cbf74b6
FB
621 /* reset soft MMU for next block (it can currently
622 only be set by a memory fault) */
623#if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
3f337316
FB
624 if (env->hflags & HF_SOFTMMU_MASK) {
625 env->hflags &= ~HF_SOFTMMU_MASK;
4cbf74b6 626 /* do not allow linking to another block */
b5fc09ae 627 next_tb = 0;
4cbf74b6 628 }
f32fc648
FB
629#endif
630#if defined(USE_KQEMU)
631#define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
632 if (kqemu_is_ok(env) &&
633 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
634 cpu_loop_exit();
635 }
4cbf74b6 636#endif
50a518e3 637 } /* for(;;) */
3fb2ded1 638 } else {
0d1a29f9 639 env_to_regs();
7d13299d 640 }
3fb2ded1
FB
641 } /* for(;;) */
642
7d13299d 643
e4533c7a 644#if defined(TARGET_I386)
9de5e440 645 /* restore flags in standard format */
fc2b4c48 646 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
e4533c7a 647#elif defined(TARGET_ARM)
b7bcbe95 648 /* XXX: Save/restore host fpu exception state?. */
93ac68bc 649#elif defined(TARGET_SPARC)
3475187d
FB
650#if defined(reg_REGWPTR)
651 REGWPTR = saved_regwptr;
652#endif
67867308 653#elif defined(TARGET_PPC)
e6e5906b
PB
654#elif defined(TARGET_M68K)
655 cpu_m68k_flush_flags(env, env->cc_op);
656 env->cc_op = CC_OP_FLAGS;
657 env->sr = (env->sr & 0xffe0)
658 | env->cc_dest | (env->cc_x << 4);
6af0bf9c 659#elif defined(TARGET_MIPS)
fdf9b3e8 660#elif defined(TARGET_SH4)
eddf68a6 661#elif defined(TARGET_ALPHA)
f1ccf904 662#elif defined(TARGET_CRIS)
fdf9b3e8 663 /* XXXXX */
e4533c7a
FB
664#else
665#error unsupported target CPU
666#endif
1057eaa7
PB
667
668 /* restore global registers */
1057eaa7
PB
669#include "hostregs_helper.h"
670
6a00d601 671 /* fail safe : never use cpu_single_env outside cpu_exec() */
5fafdf24 672 cpu_single_env = NULL;
7d13299d
FB
673 return ret;
674}
6dbad63e 675
fbf9eeb3
FB
676/* must only be called from the generated code as an exception can be
677 generated */
678void tb_invalidate_page_range(target_ulong start, target_ulong end)
679{
dc5d0b3d
FB
680 /* XXX: cannot enable it yet because it yields to MMU exception
681 where NIP != read address on PowerPC */
682#if 0
fbf9eeb3
FB
683 target_ulong phys_addr;
684 phys_addr = get_phys_addr_code(env, start);
685 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
dc5d0b3d 686#endif
fbf9eeb3
FB
687}
688
1a18c71b 689#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
e4533c7a 690
6dbad63e
FB
691void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
692{
693 CPUX86State *saved_env;
694
695 saved_env = env;
696 env = s;
a412ac57 697 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
a513fe19 698 selector &= 0xffff;
5fafdf24 699 cpu_x86_load_seg_cache(env, seg_reg, selector,
c27004ec 700 (selector << 4), 0xffff, 0);
a513fe19 701 } else {
5d97559d 702 helper_load_seg(seg_reg, selector);
a513fe19 703 }
6dbad63e
FB
704 env = saved_env;
705}
9de5e440 706
6f12a2a6 707void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
d0a1ffc9
FB
708{
709 CPUX86State *saved_env;
710
711 saved_env = env;
712 env = s;
3b46e624 713
6f12a2a6 714 helper_fsave(ptr, data32);
d0a1ffc9
FB
715
716 env = saved_env;
717}
718
6f12a2a6 719void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
d0a1ffc9
FB
720{
721 CPUX86State *saved_env;
722
723 saved_env = env;
724 env = s;
3b46e624 725
6f12a2a6 726 helper_frstor(ptr, data32);
d0a1ffc9
FB
727
728 env = saved_env;
729}
730
e4533c7a
FB
731#endif /* TARGET_I386 */
732
67b915a5
FB
733#if !defined(CONFIG_SOFTMMU)
734
3fb2ded1
FB
735#if defined(TARGET_I386)
736
b56dad1c 737/* 'pc' is the host PC at which the exception was raised. 'address' is
fd6ce8f6
FB
738 the effective address of the memory exception. 'is_write' is 1 if a
739 write caused the exception and otherwise 0'. 'old_set' is the
740 signal set which should be restored */
2b413144 741static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
5fafdf24 742 int is_write, sigset_t *old_set,
bf3e8bf1 743 void *puc)
9de5e440 744{
a513fe19
FB
745 TranslationBlock *tb;
746 int ret;
68a79315 747
83479e77
FB
748 if (cpu_single_env)
749 env = cpu_single_env; /* XXX: find a correct solution for multithread */
fd6ce8f6 750#if defined(DEBUG_SIGNAL)
5fafdf24 751 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
bf3e8bf1 752 pc, address, is_write, *(unsigned long *)old_set);
9de5e440 753#endif
25eb4484 754 /* XXX: locking issue */
53a5960a 755 if (is_write && page_unprotect(h2g(address), pc, puc)) {
fd6ce8f6
FB
756 return 1;
757 }
fbf9eeb3 758
3fb2ded1 759 /* see if it is an MMU fault */
6ebbf390 760 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
3fb2ded1
FB
761 if (ret < 0)
762 return 0; /* not an MMU fault */
763 if (ret == 0)
764 return 1; /* the MMU fault was handled without causing real CPU fault */
765 /* now we have a real cpu fault */
a513fe19
FB
766 tb = tb_find_pc(pc);
767 if (tb) {
9de5e440
FB
768 /* the PC is inside the translated code. It means that we have
769 a virtual CPU fault */
bf3e8bf1 770 cpu_restore_state(tb, env, pc, puc);
3fb2ded1 771 }
4cbf74b6 772 if (ret == 1) {
3fb2ded1 773#if 0
5fafdf24 774 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
4cbf74b6 775 env->eip, env->cr[2], env->error_code);
3fb2ded1 776#endif
4cbf74b6
FB
777 /* we restore the process signal mask as the sigreturn should
778 do it (XXX: use sigsetjmp) */
779 sigprocmask(SIG_SETMASK, old_set, NULL);
54ca9095 780 raise_exception_err(env->exception_index, env->error_code);
4cbf74b6
FB
781 } else {
782 /* activate soft MMU for this block */
3f337316 783 env->hflags |= HF_SOFTMMU_MASK;
fbf9eeb3 784 cpu_resume_from_signal(env, puc);
4cbf74b6 785 }
3fb2ded1
FB
786 /* never comes here */
787 return 1;
788}
789
e4533c7a 790#elif defined(TARGET_ARM)
3fb2ded1 791static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
bf3e8bf1
FB
792 int is_write, sigset_t *old_set,
793 void *puc)
3fb2ded1 794{
68016c62
FB
795 TranslationBlock *tb;
796 int ret;
797
798 if (cpu_single_env)
799 env = cpu_single_env; /* XXX: find a correct solution for multithread */
800#if defined(DEBUG_SIGNAL)
5fafdf24 801 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
68016c62
FB
802 pc, address, is_write, *(unsigned long *)old_set);
803#endif
9f0777ed 804 /* XXX: locking issue */
53a5960a 805 if (is_write && page_unprotect(h2g(address), pc, puc)) {
9f0777ed
FB
806 return 1;
807 }
68016c62 808 /* see if it is an MMU fault */
6ebbf390 809 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
68016c62
FB
810 if (ret < 0)
811 return 0; /* not an MMU fault */
812 if (ret == 0)
813 return 1; /* the MMU fault was handled without causing real CPU fault */
814 /* now we have a real cpu fault */
815 tb = tb_find_pc(pc);
816 if (tb) {
817 /* the PC is inside the translated code. It means that we have
818 a virtual CPU fault */
819 cpu_restore_state(tb, env, pc, puc);
820 }
821 /* we restore the process signal mask as the sigreturn should
822 do it (XXX: use sigsetjmp) */
823 sigprocmask(SIG_SETMASK, old_set, NULL);
824 cpu_loop_exit();
968c74da
AJ
825 /* never comes here */
826 return 1;
3fb2ded1 827}
93ac68bc
FB
828#elif defined(TARGET_SPARC)
829static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
bf3e8bf1
FB
830 int is_write, sigset_t *old_set,
831 void *puc)
93ac68bc 832{
68016c62
FB
833 TranslationBlock *tb;
834 int ret;
835
836 if (cpu_single_env)
837 env = cpu_single_env; /* XXX: find a correct solution for multithread */
838#if defined(DEBUG_SIGNAL)
5fafdf24 839 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
68016c62
FB
840 pc, address, is_write, *(unsigned long *)old_set);
841#endif
b453b70b 842 /* XXX: locking issue */
53a5960a 843 if (is_write && page_unprotect(h2g(address), pc, puc)) {
b453b70b
FB
844 return 1;
845 }
68016c62 846 /* see if it is an MMU fault */
6ebbf390 847 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
68016c62
FB
848 if (ret < 0)
849 return 0; /* not an MMU fault */
850 if (ret == 0)
851 return 1; /* the MMU fault was handled without causing real CPU fault */
852 /* now we have a real cpu fault */
853 tb = tb_find_pc(pc);
854 if (tb) {
855 /* the PC is inside the translated code. It means that we have
856 a virtual CPU fault */
857 cpu_restore_state(tb, env, pc, puc);
858 }
859 /* we restore the process signal mask as the sigreturn should
860 do it (XXX: use sigsetjmp) */
861 sigprocmask(SIG_SETMASK, old_set, NULL);
862 cpu_loop_exit();
968c74da
AJ
863 /* never comes here */
864 return 1;
93ac68bc 865}
67867308
FB
866#elif defined (TARGET_PPC)
867static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
bf3e8bf1
FB
868 int is_write, sigset_t *old_set,
869 void *puc)
67867308
FB
870{
871 TranslationBlock *tb;
ce09776b 872 int ret;
3b46e624 873
67867308
FB
874 if (cpu_single_env)
875 env = cpu_single_env; /* XXX: find a correct solution for multithread */
67867308 876#if defined(DEBUG_SIGNAL)
5fafdf24 877 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
67867308
FB
878 pc, address, is_write, *(unsigned long *)old_set);
879#endif
880 /* XXX: locking issue */
53a5960a 881 if (is_write && page_unprotect(h2g(address), pc, puc)) {
67867308
FB
882 return 1;
883 }
884
ce09776b 885 /* see if it is an MMU fault */
6ebbf390 886 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
ce09776b
FB
887 if (ret < 0)
888 return 0; /* not an MMU fault */
889 if (ret == 0)
890 return 1; /* the MMU fault was handled without causing real CPU fault */
891
67867308
FB
892 /* now we have a real cpu fault */
893 tb = tb_find_pc(pc);
894 if (tb) {
895 /* the PC is inside the translated code. It means that we have
896 a virtual CPU fault */
bf3e8bf1 897 cpu_restore_state(tb, env, pc, puc);
67867308 898 }
ce09776b 899 if (ret == 1) {
67867308 900#if 0
5fafdf24 901 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
ce09776b 902 env->nip, env->error_code, tb);
67867308
FB
903#endif
904 /* we restore the process signal mask as the sigreturn should
905 do it (XXX: use sigsetjmp) */
bf3e8bf1 906 sigprocmask(SIG_SETMASK, old_set, NULL);
9fddaa0c 907 do_raise_exception_err(env->exception_index, env->error_code);
ce09776b
FB
908 } else {
909 /* activate soft MMU for this block */
fbf9eeb3 910 cpu_resume_from_signal(env, puc);
ce09776b 911 }
67867308 912 /* never comes here */
e6e5906b
PB
913 return 1;
914}
915
916#elif defined(TARGET_M68K)
917static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
918 int is_write, sigset_t *old_set,
919 void *puc)
920{
921 TranslationBlock *tb;
922 int ret;
923
924 if (cpu_single_env)
925 env = cpu_single_env; /* XXX: find a correct solution for multithread */
926#if defined(DEBUG_SIGNAL)
5fafdf24 927 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
e6e5906b
PB
928 pc, address, is_write, *(unsigned long *)old_set);
929#endif
930 /* XXX: locking issue */
931 if (is_write && page_unprotect(address, pc, puc)) {
932 return 1;
933 }
934 /* see if it is an MMU fault */
6ebbf390 935 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
e6e5906b
PB
936 if (ret < 0)
937 return 0; /* not an MMU fault */
938 if (ret == 0)
939 return 1; /* the MMU fault was handled without causing real CPU fault */
940 /* now we have a real cpu fault */
941 tb = tb_find_pc(pc);
942 if (tb) {
943 /* the PC is inside the translated code. It means that we have
944 a virtual CPU fault */
945 cpu_restore_state(tb, env, pc, puc);
946 }
947 /* we restore the process signal mask as the sigreturn should
948 do it (XXX: use sigsetjmp) */
949 sigprocmask(SIG_SETMASK, old_set, NULL);
950 cpu_loop_exit();
951 /* never comes here */
67867308
FB
952 return 1;
953}
6af0bf9c
FB
954
955#elif defined (TARGET_MIPS)
956static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
957 int is_write, sigset_t *old_set,
958 void *puc)
959{
960 TranslationBlock *tb;
961 int ret;
3b46e624 962
6af0bf9c
FB
963 if (cpu_single_env)
964 env = cpu_single_env; /* XXX: find a correct solution for multithread */
965#if defined(DEBUG_SIGNAL)
5fafdf24 966 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
6af0bf9c
FB
967 pc, address, is_write, *(unsigned long *)old_set);
968#endif
969 /* XXX: locking issue */
53a5960a 970 if (is_write && page_unprotect(h2g(address), pc, puc)) {
6af0bf9c
FB
971 return 1;
972 }
973
974 /* see if it is an MMU fault */
6ebbf390 975 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
6af0bf9c
FB
976 if (ret < 0)
977 return 0; /* not an MMU fault */
978 if (ret == 0)
979 return 1; /* the MMU fault was handled without causing real CPU fault */
980
981 /* now we have a real cpu fault */
982 tb = tb_find_pc(pc);
983 if (tb) {
984 /* the PC is inside the translated code. It means that we have
985 a virtual CPU fault */
986 cpu_restore_state(tb, env, pc, puc);
987 }
988 if (ret == 1) {
989#if 0
5fafdf24 990 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1eb5207b 991 env->PC, env->error_code, tb);
6af0bf9c
FB
992#endif
993 /* we restore the process signal mask as the sigreturn should
994 do it (XXX: use sigsetjmp) */
995 sigprocmask(SIG_SETMASK, old_set, NULL);
996 do_raise_exception_err(env->exception_index, env->error_code);
997 } else {
998 /* activate soft MMU for this block */
999 cpu_resume_from_signal(env, puc);
1000 }
1001 /* never comes here */
1002 return 1;
1003}
1004
fdf9b3e8
FB
1005#elif defined (TARGET_SH4)
1006static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1007 int is_write, sigset_t *old_set,
1008 void *puc)
1009{
1010 TranslationBlock *tb;
1011 int ret;
3b46e624 1012
fdf9b3e8
FB
1013 if (cpu_single_env)
1014 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1015#if defined(DEBUG_SIGNAL)
5fafdf24 1016 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
fdf9b3e8
FB
1017 pc, address, is_write, *(unsigned long *)old_set);
1018#endif
1019 /* XXX: locking issue */
1020 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1021 return 1;
1022 }
1023
1024 /* see if it is an MMU fault */
6ebbf390 1025 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
fdf9b3e8
FB
1026 if (ret < 0)
1027 return 0; /* not an MMU fault */
1028 if (ret == 0)
1029 return 1; /* the MMU fault was handled without causing real CPU fault */
1030
1031 /* now we have a real cpu fault */
eddf68a6
JM
1032 tb = tb_find_pc(pc);
1033 if (tb) {
1034 /* the PC is inside the translated code. It means that we have
1035 a virtual CPU fault */
1036 cpu_restore_state(tb, env, pc, puc);
1037 }
1038#if 0
5fafdf24 1039 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
eddf68a6
JM
1040 env->nip, env->error_code, tb);
1041#endif
1042 /* we restore the process signal mask as the sigreturn should
1043 do it (XXX: use sigsetjmp) */
1044 sigprocmask(SIG_SETMASK, old_set, NULL);
1045 cpu_loop_exit();
1046 /* never comes here */
1047 return 1;
1048}
1049
1050#elif defined (TARGET_ALPHA)
1051static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1052 int is_write, sigset_t *old_set,
1053 void *puc)
1054{
1055 TranslationBlock *tb;
1056 int ret;
3b46e624 1057
eddf68a6
JM
1058 if (cpu_single_env)
1059 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1060#if defined(DEBUG_SIGNAL)
5fafdf24 1061 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
eddf68a6
JM
1062 pc, address, is_write, *(unsigned long *)old_set);
1063#endif
1064 /* XXX: locking issue */
1065 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1066 return 1;
1067 }
1068
1069 /* see if it is an MMU fault */
6ebbf390 1070 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
eddf68a6
JM
1071 if (ret < 0)
1072 return 0; /* not an MMU fault */
1073 if (ret == 0)
1074 return 1; /* the MMU fault was handled without causing real CPU fault */
1075
1076 /* now we have a real cpu fault */
fdf9b3e8
FB
1077 tb = tb_find_pc(pc);
1078 if (tb) {
1079 /* the PC is inside the translated code. It means that we have
1080 a virtual CPU fault */
1081 cpu_restore_state(tb, env, pc, puc);
1082 }
fdf9b3e8 1083#if 0
5fafdf24 1084 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
fdf9b3e8
FB
1085 env->nip, env->error_code, tb);
1086#endif
1087 /* we restore the process signal mask as the sigreturn should
1088 do it (XXX: use sigsetjmp) */
355fb23d
PB
1089 sigprocmask(SIG_SETMASK, old_set, NULL);
1090 cpu_loop_exit();
fdf9b3e8
FB
1091 /* never comes here */
1092 return 1;
1093}
f1ccf904
TS
1094#elif defined (TARGET_CRIS)
1095static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1096 int is_write, sigset_t *old_set,
1097 void *puc)
1098{
1099 TranslationBlock *tb;
1100 int ret;
1101
1102 if (cpu_single_env)
1103 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1104#if defined(DEBUG_SIGNAL)
1105 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1106 pc, address, is_write, *(unsigned long *)old_set);
1107#endif
1108 /* XXX: locking issue */
1109 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1110 return 1;
1111 }
1112
1113 /* see if it is an MMU fault */
6ebbf390 1114 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
f1ccf904
TS
1115 if (ret < 0)
1116 return 0; /* not an MMU fault */
1117 if (ret == 0)
1118 return 1; /* the MMU fault was handled without causing real CPU fault */
1119
1120 /* now we have a real cpu fault */
1121 tb = tb_find_pc(pc);
1122 if (tb) {
1123 /* the PC is inside the translated code. It means that we have
1124 a virtual CPU fault */
1125 cpu_restore_state(tb, env, pc, puc);
1126 }
f1ccf904
TS
1127 /* we restore the process signal mask as the sigreturn should
1128 do it (XXX: use sigsetjmp) */
1129 sigprocmask(SIG_SETMASK, old_set, NULL);
1130 cpu_loop_exit();
1131 /* never comes here */
1132 return 1;
1133}
1134
e4533c7a
FB
1135#else
1136#error unsupported target CPU
1137#endif
9de5e440 1138
2b413144
FB
1139#if defined(__i386__)
1140
d8ecc0b9
FB
1141#if defined(__APPLE__)
1142# include <sys/ucontext.h>
1143
1144# define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1145# define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1146# define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1147#else
1148# define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1149# define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1150# define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1151#endif
1152
5fafdf24 1153int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1154 void *puc)
9de5e440 1155{
5a7b542b 1156 siginfo_t *info = pinfo;
9de5e440
FB
1157 struct ucontext *uc = puc;
1158 unsigned long pc;
bf3e8bf1 1159 int trapno;
97eb5b14 1160
d691f669
FB
1161#ifndef REG_EIP
1162/* for glibc 2.1 */
fd6ce8f6
FB
1163#define REG_EIP EIP
1164#define REG_ERR ERR
1165#define REG_TRAPNO TRAPNO
d691f669 1166#endif
d8ecc0b9
FB
1167 pc = EIP_sig(uc);
1168 trapno = TRAP_sig(uc);
ec6338ba
FB
1169 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1170 trapno == 0xe ?
1171 (ERROR_sig(uc) >> 1) & 1 : 0,
1172 &uc->uc_sigmask, puc);
2b413144
FB
1173}
1174
bc51c5c9
FB
1175#elif defined(__x86_64__)
1176
5a7b542b 1177int cpu_signal_handler(int host_signum, void *pinfo,
bc51c5c9
FB
1178 void *puc)
1179{
5a7b542b 1180 siginfo_t *info = pinfo;
bc51c5c9
FB
1181 struct ucontext *uc = puc;
1182 unsigned long pc;
1183
1184 pc = uc->uc_mcontext.gregs[REG_RIP];
5fafdf24
TS
1185 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1186 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
bc51c5c9
FB
1187 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1188 &uc->uc_sigmask, puc);
1189}
1190
83fb7adf 1191#elif defined(__powerpc__)
2b413144 1192
83fb7adf
FB
1193/***********************************************************************
1194 * signal context platform-specific definitions
1195 * From Wine
1196 */
1197#ifdef linux
1198/* All Registers access - only for local access */
1199# define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1200/* Gpr Registers access */
1201# define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1202# define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1203# define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1204# define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1205# define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1206# define LR_sig(context) REG_sig(link, context) /* Link register */
1207# define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1208/* Float Registers access */
1209# define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1210# define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1211/* Exception Registers access */
1212# define DAR_sig(context) REG_sig(dar, context)
1213# define DSISR_sig(context) REG_sig(dsisr, context)
1214# define TRAP_sig(context) REG_sig(trap, context)
1215#endif /* linux */
1216
1217#ifdef __APPLE__
1218# include <sys/ucontext.h>
1219typedef struct ucontext SIGCONTEXT;
1220/* All Registers access - only for local access */
1221# define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1222# define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1223# define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1224# define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1225/* Gpr Registers access */
1226# define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1227# define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1228# define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1229# define CTR_sig(context) REG_sig(ctr, context)
1230# define XER_sig(context) REG_sig(xer, context) /* Link register */
1231# define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1232# define CR_sig(context) REG_sig(cr, context) /* Condition register */
1233/* Float Registers access */
1234# define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1235# define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1236/* Exception Registers access */
1237# define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1238# define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1239# define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1240#endif /* __APPLE__ */
1241
5fafdf24 1242int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1243 void *puc)
2b413144 1244{
5a7b542b 1245 siginfo_t *info = pinfo;
25eb4484 1246 struct ucontext *uc = puc;
25eb4484 1247 unsigned long pc;
25eb4484
FB
1248 int is_write;
1249
83fb7adf 1250 pc = IAR_sig(uc);
25eb4484
FB
1251 is_write = 0;
1252#if 0
1253 /* ppc 4xx case */
83fb7adf 1254 if (DSISR_sig(uc) & 0x00800000)
25eb4484
FB
1255 is_write = 1;
1256#else
83fb7adf 1257 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
25eb4484
FB
1258 is_write = 1;
1259#endif
5fafdf24 1260 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
bf3e8bf1 1261 is_write, &uc->uc_sigmask, puc);
2b413144
FB
1262}
1263
2f87c607
FB
1264#elif defined(__alpha__)
1265
5fafdf24 1266int cpu_signal_handler(int host_signum, void *pinfo,
2f87c607
FB
1267 void *puc)
1268{
5a7b542b 1269 siginfo_t *info = pinfo;
2f87c607
FB
1270 struct ucontext *uc = puc;
1271 uint32_t *pc = uc->uc_mcontext.sc_pc;
1272 uint32_t insn = *pc;
1273 int is_write = 0;
1274
8c6939c0 1275 /* XXX: need kernel patch to get write flag faster */
2f87c607
FB
1276 switch (insn >> 26) {
1277 case 0x0d: // stw
1278 case 0x0e: // stb
1279 case 0x0f: // stq_u
1280 case 0x24: // stf
1281 case 0x25: // stg
1282 case 0x26: // sts
1283 case 0x27: // stt
1284 case 0x2c: // stl
1285 case 0x2d: // stq
1286 case 0x2e: // stl_c
1287 case 0x2f: // stq_c
1288 is_write = 1;
1289 }
1290
5fafdf24 1291 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
bf3e8bf1 1292 is_write, &uc->uc_sigmask, puc);
2f87c607 1293}
8c6939c0
FB
1294#elif defined(__sparc__)
1295
5fafdf24 1296int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1297 void *puc)
8c6939c0 1298{
5a7b542b 1299 siginfo_t *info = pinfo;
8c6939c0
FB
1300 uint32_t *regs = (uint32_t *)(info + 1);
1301 void *sigmask = (regs + 20);
1302 unsigned long pc;
1303 int is_write;
1304 uint32_t insn;
3b46e624 1305
8c6939c0
FB
1306 /* XXX: is there a standard glibc define ? */
1307 pc = regs[1];
1308 /* XXX: need kernel patch to get write flag faster */
1309 is_write = 0;
1310 insn = *(uint32_t *)pc;
1311 if ((insn >> 30) == 3) {
1312 switch((insn >> 19) & 0x3f) {
1313 case 0x05: // stb
1314 case 0x06: // sth
1315 case 0x04: // st
1316 case 0x07: // std
1317 case 0x24: // stf
1318 case 0x27: // stdf
1319 case 0x25: // stfsr
1320 is_write = 1;
1321 break;
1322 }
1323 }
5fafdf24 1324 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
bf3e8bf1 1325 is_write, sigmask, NULL);
8c6939c0
FB
1326}
1327
1328#elif defined(__arm__)
1329
5fafdf24 1330int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1331 void *puc)
8c6939c0 1332{
5a7b542b 1333 siginfo_t *info = pinfo;
8c6939c0
FB
1334 struct ucontext *uc = puc;
1335 unsigned long pc;
1336 int is_write;
3b46e624 1337
4eee57f5 1338 pc = uc->uc_mcontext.arm_pc;
8c6939c0
FB
1339 /* XXX: compute is_write */
1340 is_write = 0;
5fafdf24 1341 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
8c6939c0 1342 is_write,
f3a9676a 1343 &uc->uc_sigmask, puc);
8c6939c0
FB
1344}
1345
38e584a0
FB
1346#elif defined(__mc68000)
1347
5fafdf24 1348int cpu_signal_handler(int host_signum, void *pinfo,
38e584a0
FB
1349 void *puc)
1350{
5a7b542b 1351 siginfo_t *info = pinfo;
38e584a0
FB
1352 struct ucontext *uc = puc;
1353 unsigned long pc;
1354 int is_write;
3b46e624 1355
38e584a0
FB
1356 pc = uc->uc_mcontext.gregs[16];
1357 /* XXX: compute is_write */
1358 is_write = 0;
5fafdf24 1359 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
38e584a0 1360 is_write,
bf3e8bf1 1361 &uc->uc_sigmask, puc);
38e584a0
FB
1362}
1363
b8076a74
FB
1364#elif defined(__ia64)
1365
1366#ifndef __ISR_VALID
1367 /* This ought to be in <bits/siginfo.h>... */
1368# define __ISR_VALID 1
b8076a74
FB
1369#endif
1370
5a7b542b 1371int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
b8076a74 1372{
5a7b542b 1373 siginfo_t *info = pinfo;
b8076a74
FB
1374 struct ucontext *uc = puc;
1375 unsigned long ip;
1376 int is_write = 0;
1377
1378 ip = uc->uc_mcontext.sc_ip;
1379 switch (host_signum) {
1380 case SIGILL:
1381 case SIGFPE:
1382 case SIGSEGV:
1383 case SIGBUS:
1384 case SIGTRAP:
fd4a43e4 1385 if (info->si_code && (info->si_segvflags & __ISR_VALID))
b8076a74
FB
1386 /* ISR.W (write-access) is bit 33: */
1387 is_write = (info->si_isr >> 33) & 1;
1388 break;
1389
1390 default:
1391 break;
1392 }
1393 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1394 is_write,
1395 &uc->uc_sigmask, puc);
1396}
1397
90cb9493
FB
1398#elif defined(__s390__)
1399
5fafdf24 1400int cpu_signal_handler(int host_signum, void *pinfo,
90cb9493
FB
1401 void *puc)
1402{
5a7b542b 1403 siginfo_t *info = pinfo;
90cb9493
FB
1404 struct ucontext *uc = puc;
1405 unsigned long pc;
1406 int is_write;
3b46e624 1407
90cb9493
FB
1408 pc = uc->uc_mcontext.psw.addr;
1409 /* XXX: compute is_write */
1410 is_write = 0;
5fafdf24 1411 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
c4b89d18
TS
1412 is_write, &uc->uc_sigmask, puc);
1413}
1414
1415#elif defined(__mips__)
1416
5fafdf24 1417int cpu_signal_handler(int host_signum, void *pinfo,
c4b89d18
TS
1418 void *puc)
1419{
9617efe8 1420 siginfo_t *info = pinfo;
c4b89d18
TS
1421 struct ucontext *uc = puc;
1422 greg_t pc = uc->uc_mcontext.pc;
1423 int is_write;
3b46e624 1424
c4b89d18
TS
1425 /* XXX: compute is_write */
1426 is_write = 0;
5fafdf24 1427 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
c4b89d18 1428 is_write, &uc->uc_sigmask, puc);
90cb9493
FB
1429}
1430
f54b3f92
AJ
1431#elif defined(__hppa__)
1432
1433int cpu_signal_handler(int host_signum, void *pinfo,
1434 void *puc)
1435{
1436 struct siginfo *info = pinfo;
1437 struct ucontext *uc = puc;
1438 unsigned long pc;
1439 int is_write;
1440
1441 pc = uc->uc_mcontext.sc_iaoq[0];
1442 /* FIXME: compute is_write */
1443 is_write = 0;
1444 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1445 is_write,
1446 &uc->uc_sigmask, puc);
1447}
1448
9de5e440 1449#else
2b413144 1450
3fb2ded1 1451#error host CPU specific signal handler needed
2b413144 1452
9de5e440 1453#endif
67b915a5
FB
1454
1455#endif /* !defined(CONFIG_SOFTMMU) */