]> git.proxmox.com Git - mirror_qemu.git/blame - cpu-exec.c
fixed global variable handling with qemu load/stores - initial global prologue/epilog...
[mirror_qemu.git] / cpu-exec.c
CommitLineData
7d13299d
FB
1/*
2 * i386 emulator main execution loop
5fafdf24 3 *
66321a11 4 * Copyright (c) 2003-2005 Fabrice Bellard
7d13299d 5 *
3ef693a0
FB
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
7d13299d 10 *
3ef693a0
FB
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
7d13299d 15 *
3ef693a0
FB
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
7d13299d 19 */
e4533c7a 20#include "config.h"
93ac68bc 21#include "exec.h"
956034d7 22#include "disas.h"
7d13299d 23
fbf9eeb3
FB
24#if !defined(CONFIG_SOFTMMU)
25#undef EAX
26#undef ECX
27#undef EDX
28#undef EBX
29#undef ESP
30#undef EBP
31#undef ESI
32#undef EDI
33#undef EIP
34#include <signal.h>
35#include <sys/ucontext.h>
36#endif
37
36bdbe54 38int tb_invalidated_flag;
b5fc09ae 39static unsigned long next_tb;
36bdbe54 40
dc99065b 41//#define DEBUG_EXEC
9de5e440 42//#define DEBUG_SIGNAL
7d13299d 43
66f1cdbd
BS
44#define SAVE_GLOBALS()
45#define RESTORE_GLOBALS()
46
47#if defined(__sparc__) && !defined(HOST_SOLARIS)
48#include <features.h>
49#if defined(__GLIBC__) && ((__GLIBC__ < 2) || \
50 ((__GLIBC__ == 2) && (__GLIBC_MINOR__ <= 90)))
51// Work around ugly bugs in glibc that mangle global register contents
52
53static volatile void *saved_env;
54static volatile unsigned long saved_t0, saved_i7;
55#undef SAVE_GLOBALS
56#define SAVE_GLOBALS() do { \
57 saved_env = env; \
58 saved_t0 = T0; \
59 asm volatile ("st %%i7, [%0]" : : "r" (&saved_i7)); \
60 } while(0)
61
62#undef RESTORE_GLOBALS
63#define RESTORE_GLOBALS() do { \
64 env = (void *)saved_env; \
65 T0 = saved_t0; \
66 asm volatile ("ld [%0], %%i7" : : "r" (&saved_i7)); \
67 } while(0)
68
69static int sparc_setjmp(jmp_buf buf)
70{
71 int ret;
72
73 SAVE_GLOBALS();
74 ret = setjmp(buf);
75 RESTORE_GLOBALS();
76 return ret;
77}
78#undef setjmp
79#define setjmp(jmp_buf) sparc_setjmp(jmp_buf)
80
81static void sparc_longjmp(jmp_buf buf, int val)
82{
83 SAVE_GLOBALS();
84 longjmp(buf, val);
85}
86#define longjmp(jmp_buf, val) sparc_longjmp(jmp_buf, val)
87#endif
88#endif
89
e4533c7a
FB
90void cpu_loop_exit(void)
91{
bfed01fc
TS
92 /* NOTE: the register at this point must be saved by hand because
93 longjmp restore them */
94 regs_to_env();
e4533c7a
FB
95 longjmp(env->jmp_env, 1);
96}
bfed01fc 97
e6e5906b 98#if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
3475187d
FB
99#define reg_T2
100#endif
e4533c7a 101
fbf9eeb3
FB
102/* exit the current TB from a signal handler. The host registers are
103 restored in a state compatible with the CPU emulator
104 */
5fafdf24 105void cpu_resume_from_signal(CPUState *env1, void *puc)
fbf9eeb3
FB
106{
107#if !defined(CONFIG_SOFTMMU)
108 struct ucontext *uc = puc;
109#endif
110
111 env = env1;
112
113 /* XXX: restore cpu registers saved in host registers */
114
115#if !defined(CONFIG_SOFTMMU)
116 if (puc) {
117 /* XXX: use siglongjmp ? */
118 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
119 }
120#endif
121 longjmp(env->jmp_env, 1);
122}
123
8a40a180
FB
124static TranslationBlock *tb_find_slow(target_ulong pc,
125 target_ulong cs_base,
c068688b 126 uint64_t flags)
8a40a180
FB
127{
128 TranslationBlock *tb, **ptb1;
129 int code_gen_size;
130 unsigned int h;
131 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
132 uint8_t *tc_ptr;
3b46e624 133
8a40a180
FB
134 spin_lock(&tb_lock);
135
136 tb_invalidated_flag = 0;
3b46e624 137
8a40a180 138 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
3b46e624 139
8a40a180
FB
140 /* find translated block using physical mappings */
141 phys_pc = get_phys_addr_code(env, pc);
142 phys_page1 = phys_pc & TARGET_PAGE_MASK;
143 phys_page2 = -1;
144 h = tb_phys_hash_func(phys_pc);
145 ptb1 = &tb_phys_hash[h];
146 for(;;) {
147 tb = *ptb1;
148 if (!tb)
149 goto not_found;
5fafdf24 150 if (tb->pc == pc &&
8a40a180 151 tb->page_addr[0] == phys_page1 &&
5fafdf24 152 tb->cs_base == cs_base &&
8a40a180
FB
153 tb->flags == flags) {
154 /* check next page if needed */
155 if (tb->page_addr[1] != -1) {
5fafdf24 156 virt_page2 = (pc & TARGET_PAGE_MASK) +
8a40a180
FB
157 TARGET_PAGE_SIZE;
158 phys_page2 = get_phys_addr_code(env, virt_page2);
159 if (tb->page_addr[1] == phys_page2)
160 goto found;
161 } else {
162 goto found;
163 }
164 }
165 ptb1 = &tb->phys_hash_next;
166 }
167 not_found:
168 /* if no translated code available, then translate it now */
169 tb = tb_alloc(pc);
170 if (!tb) {
171 /* flush must be done */
172 tb_flush(env);
173 /* cannot fail at this point */
174 tb = tb_alloc(pc);
175 /* don't forget to invalidate previous TB info */
15388002 176 tb_invalidated_flag = 1;
8a40a180
FB
177 }
178 tc_ptr = code_gen_ptr;
179 tb->tc_ptr = tc_ptr;
180 tb->cs_base = cs_base;
181 tb->flags = flags;
66f1cdbd 182 SAVE_GLOBALS();
d07bde88 183 cpu_gen_code(env, tb, &code_gen_size);
66f1cdbd 184 RESTORE_GLOBALS();
8a40a180 185 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 186
8a40a180
FB
187 /* check next page if needed */
188 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
189 phys_page2 = -1;
190 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
191 phys_page2 = get_phys_addr_code(env, virt_page2);
192 }
193 tb_link_phys(tb, phys_pc, phys_page2);
3b46e624 194
8a40a180 195 found:
8a40a180
FB
196 /* we add the TB in the virtual pc hash table */
197 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
198 spin_unlock(&tb_lock);
199 return tb;
200}
201
202static inline TranslationBlock *tb_find_fast(void)
203{
204 TranslationBlock *tb;
205 target_ulong cs_base, pc;
c068688b 206 uint64_t flags;
8a40a180
FB
207
208 /* we record a subset of the CPU state. It will
209 always be the same before a given translated block
210 is executed. */
211#if defined(TARGET_I386)
212 flags = env->hflags;
213 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
0573fbfc 214 flags |= env->intercept;
8a40a180
FB
215 cs_base = env->segs[R_CS].base;
216 pc = cs_base + env->eip;
217#elif defined(TARGET_ARM)
218 flags = env->thumb | (env->vfp.vec_len << 1)
b5ff1b31
FB
219 | (env->vfp.vec_stride << 4);
220 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
221 flags |= (1 << 6);
40f137e1
PB
222 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
223 flags |= (1 << 7);
9ee6e8bb 224 flags |= (env->condexec_bits << 8);
8a40a180
FB
225 cs_base = 0;
226 pc = env->regs[15];
227#elif defined(TARGET_SPARC)
228#ifdef TARGET_SPARC64
a80dde08
FB
229 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
230 flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
231 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
8a40a180 232#else
6d5f237a
BS
233 // FPU enable . Supervisor
234 flags = (env->psref << 4) | env->psrs;
8a40a180
FB
235#endif
236 cs_base = env->npc;
237 pc = env->pc;
238#elif defined(TARGET_PPC)
1527c87e 239 flags = env->hflags;
8a40a180
FB
240 cs_base = 0;
241 pc = env->nip;
242#elif defined(TARGET_MIPS)
56b19403 243 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
cc9442b9 244 cs_base = 0;
ead9360e 245 pc = env->PC[env->current_tc];
e6e5906b 246#elif defined(TARGET_M68K)
acf930aa
PB
247 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
248 | (env->sr & SR_S) /* Bit 13 */
249 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
e6e5906b
PB
250 cs_base = 0;
251 pc = env->pc;
fdf9b3e8 252#elif defined(TARGET_SH4)
823029f9
TS
253 flags = env->flags;
254 cs_base = 0;
fdf9b3e8 255 pc = env->pc;
eddf68a6
JM
256#elif defined(TARGET_ALPHA)
257 flags = env->ps;
258 cs_base = 0;
259 pc = env->pc;
f1ccf904 260#elif defined(TARGET_CRIS)
17a594d7 261 flags = env->pregs[PR_CCS] & U_FLAG;
f1ccf904
TS
262 cs_base = 0;
263 pc = env->pc;
8a40a180
FB
264#else
265#error unsupported CPU
266#endif
bce61846 267 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
8a40a180
FB
268 if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
269 tb->flags != flags, 0)) {
270 tb = tb_find_slow(pc, cs_base, flags);
15388002
FB
271 /* Note: we do it here to avoid a gcc bug on Mac OS X when
272 doing it in tb_find_slow */
273 if (tb_invalidated_flag) {
274 /* as some TB could have been invalidated because
275 of memory exceptions while generating the code, we
276 must recompute the hash index here */
b5fc09ae 277 next_tb = 0;
15388002 278 }
8a40a180
FB
279 }
280 return tb;
281}
282
7d13299d
FB
283/* main execution loop */
284
e4533c7a 285int cpu_exec(CPUState *env1)
7d13299d 286{
1057eaa7
PB
287#define DECLARE_HOST_REGS 1
288#include "hostregs_helper.h"
289#if defined(TARGET_SPARC)
3475187d
FB
290#if defined(reg_REGWPTR)
291 uint32_t *saved_regwptr;
292#endif
04369ff2 293#endif
8a40a180 294 int ret, interrupt_request;
b5fc09ae 295 unsigned long (*gen_func)(void);
8a40a180 296 TranslationBlock *tb;
c27004ec 297 uint8_t *tc_ptr;
8c6939c0 298
bfed01fc
TS
299 if (cpu_halted(env1) == EXCP_HALTED)
300 return EXCP_HALTED;
5a1e3cfc 301
5fafdf24 302 cpu_single_env = env1;
6a00d601 303
7d13299d 304 /* first we save global registers */
1057eaa7
PB
305#define SAVE_HOST_REGS 1
306#include "hostregs_helper.h"
c27004ec 307 env = env1;
66f1cdbd 308 SAVE_GLOBALS();
e4533c7a 309
0d1a29f9 310 env_to_regs();
ecb644f4 311#if defined(TARGET_I386)
9de5e440 312 /* put eflags in CPU temporary format */
fc2b4c48
FB
313 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
314 DF = 1 - (2 * ((env->eflags >> 10) & 1));
9de5e440 315 CC_OP = CC_OP_EFLAGS;
fc2b4c48 316 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
93ac68bc 317#elif defined(TARGET_SPARC)
3475187d
FB
318#if defined(reg_REGWPTR)
319 saved_regwptr = REGWPTR;
320#endif
e6e5906b
PB
321#elif defined(TARGET_M68K)
322 env->cc_op = CC_OP_FLAGS;
323 env->cc_dest = env->sr & 0xf;
324 env->cc_x = (env->sr >> 4) & 1;
ecb644f4
TS
325#elif defined(TARGET_ALPHA)
326#elif defined(TARGET_ARM)
327#elif defined(TARGET_PPC)
6af0bf9c 328#elif defined(TARGET_MIPS)
fdf9b3e8 329#elif defined(TARGET_SH4)
f1ccf904 330#elif defined(TARGET_CRIS)
fdf9b3e8 331 /* XXXXX */
e4533c7a
FB
332#else
333#error unsupported target CPU
334#endif
3fb2ded1 335 env->exception_index = -1;
9d27abd9 336
7d13299d 337 /* prepare setjmp context for exception handling */
3fb2ded1
FB
338 for(;;) {
339 if (setjmp(env->jmp_env) == 0) {
ee8b7021 340 env->current_tb = NULL;
3fb2ded1
FB
341 /* if an exception is pending, we execute it here */
342 if (env->exception_index >= 0) {
343 if (env->exception_index >= EXCP_INTERRUPT) {
344 /* exit request from the cpu execution loop */
345 ret = env->exception_index;
346 break;
347 } else if (env->user_mode_only) {
348 /* if user mode only, we simulate a fake exception
9f083493 349 which will be handled outside the cpu execution
3fb2ded1 350 loop */
83479e77 351#if defined(TARGET_I386)
5fafdf24
TS
352 do_interrupt_user(env->exception_index,
353 env->exception_is_int,
354 env->error_code,
3fb2ded1 355 env->exception_next_eip);
83479e77 356#endif
3fb2ded1
FB
357 ret = env->exception_index;
358 break;
359 } else {
83479e77 360#if defined(TARGET_I386)
3fb2ded1
FB
361 /* simulate a real cpu exception. On i386, it can
362 trigger new exceptions, but we do not handle
363 double or triple faults yet. */
5fafdf24
TS
364 do_interrupt(env->exception_index,
365 env->exception_is_int,
366 env->error_code,
d05e66d2 367 env->exception_next_eip, 0);
678dde13
TS
368 /* successfully delivered */
369 env->old_exception = -1;
ce09776b
FB
370#elif defined(TARGET_PPC)
371 do_interrupt(env);
6af0bf9c
FB
372#elif defined(TARGET_MIPS)
373 do_interrupt(env);
e95c8d51 374#elif defined(TARGET_SPARC)
1a0c3292 375 do_interrupt(env->exception_index);
b5ff1b31
FB
376#elif defined(TARGET_ARM)
377 do_interrupt(env);
fdf9b3e8
FB
378#elif defined(TARGET_SH4)
379 do_interrupt(env);
eddf68a6
JM
380#elif defined(TARGET_ALPHA)
381 do_interrupt(env);
f1ccf904
TS
382#elif defined(TARGET_CRIS)
383 do_interrupt(env);
0633879f
PB
384#elif defined(TARGET_M68K)
385 do_interrupt(0);
83479e77 386#endif
3fb2ded1
FB
387 }
388 env->exception_index = -1;
5fafdf24 389 }
9df217a3
FB
390#ifdef USE_KQEMU
391 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
392 int ret;
393 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
394 ret = kqemu_cpu_exec(env);
395 /* put eflags in CPU temporary format */
396 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
397 DF = 1 - (2 * ((env->eflags >> 10) & 1));
398 CC_OP = CC_OP_EFLAGS;
399 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
400 if (ret == 1) {
401 /* exception */
402 longjmp(env->jmp_env, 1);
403 } else if (ret == 2) {
404 /* softmmu execution needed */
405 } else {
406 if (env->interrupt_request != 0) {
407 /* hardware interrupt will be executed just after */
408 } else {
409 /* otherwise, we restart */
410 longjmp(env->jmp_env, 1);
411 }
412 }
3fb2ded1 413 }
9df217a3
FB
414#endif
415
b5fc09ae 416 next_tb = 0; /* force lookup of first TB */
3fb2ded1 417 for(;;) {
66f1cdbd 418 SAVE_GLOBALS();
68a79315 419 interrupt_request = env->interrupt_request;
0573fbfc
TS
420 if (__builtin_expect(interrupt_request, 0)
421#if defined(TARGET_I386)
422 && env->hflags & HF_GIF_MASK
423#endif
60897d36 424 && !(env->singlestep_enabled & SSTEP_NOIRQ)) {
6658ffb8
PB
425 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
426 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
427 env->exception_index = EXCP_DEBUG;
428 cpu_loop_exit();
429 }
a90b7318 430#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
f1ccf904 431 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
a90b7318
AZ
432 if (interrupt_request & CPU_INTERRUPT_HALT) {
433 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
434 env->halted = 1;
435 env->exception_index = EXCP_HLT;
436 cpu_loop_exit();
437 }
438#endif
68a79315 439#if defined(TARGET_I386)
3b21e03e
FB
440 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
441 !(env->hflags & HF_SMM_MASK)) {
0573fbfc 442 svm_check_intercept(SVM_EXIT_SMI);
3b21e03e
FB
443 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
444 do_smm_enter();
b5fc09ae 445 next_tb = 0;
474ea849
AJ
446 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
447 !(env->hflags & HF_NMI_MASK)) {
448 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
449 env->hflags |= HF_NMI_MASK;
450 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
b5fc09ae 451 next_tb = 0;
3b21e03e 452 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
0573fbfc 453 (env->eflags & IF_MASK || env->hflags & HF_HIF_MASK) &&
3f337316 454 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
68a79315 455 int intno;
0573fbfc 456 svm_check_intercept(SVM_EXIT_INTR);
52621688 457 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
a541f297 458 intno = cpu_get_pic_interrupt(env);
f193c797 459 if (loglevel & CPU_LOG_TB_IN_ASM) {
68a79315
FB
460 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
461 }
d05e66d2 462 do_interrupt(intno, 0, 0, 0, 1);
907a5b26
FB
463 /* ensure that no TB jump will be modified as
464 the program flow was changed */
b5fc09ae 465 next_tb = 0;
0573fbfc
TS
466#if !defined(CONFIG_USER_ONLY)
467 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
468 (env->eflags & IF_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
469 int intno;
470 /* FIXME: this should respect TPR */
471 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
52621688 472 svm_check_intercept(SVM_EXIT_VINTR);
0573fbfc
TS
473 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
474 if (loglevel & CPU_LOG_TB_IN_ASM)
475 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
476 do_interrupt(intno, 0, 0, -1, 1);
52621688
TS
477 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl),
478 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)) & ~V_IRQ_MASK);
b5fc09ae 479 next_tb = 0;
907a5b26 480#endif
68a79315 481 }
ce09776b 482#elif defined(TARGET_PPC)
9fddaa0c
FB
483#if 0
484 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
485 cpu_ppc_reset(env);
486 }
487#endif
47103572 488 if (interrupt_request & CPU_INTERRUPT_HARD) {
e9df014c
JM
489 ppc_hw_interrupt(env);
490 if (env->pending_interrupts == 0)
491 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
b5fc09ae 492 next_tb = 0;
ce09776b 493 }
6af0bf9c
FB
494#elif defined(TARGET_MIPS)
495 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
24c7b0e3 496 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
6af0bf9c 497 (env->CP0_Status & (1 << CP0St_IE)) &&
24c7b0e3
TS
498 !(env->CP0_Status & (1 << CP0St_EXL)) &&
499 !(env->CP0_Status & (1 << CP0St_ERL)) &&
6af0bf9c
FB
500 !(env->hflags & MIPS_HFLAG_DM)) {
501 /* Raise it */
502 env->exception_index = EXCP_EXT_INTERRUPT;
503 env->error_code = 0;
504 do_interrupt(env);
b5fc09ae 505 next_tb = 0;
6af0bf9c 506 }
e95c8d51 507#elif defined(TARGET_SPARC)
66321a11
FB
508 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
509 (env->psret != 0)) {
510 int pil = env->interrupt_index & 15;
511 int type = env->interrupt_index & 0xf0;
512
513 if (((type == TT_EXTINT) &&
514 (pil == 15 || pil > env->psrpil)) ||
515 type != TT_EXTINT) {
516 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
517 do_interrupt(env->interrupt_index);
518 env->interrupt_index = 0;
327ac2e7
BS
519#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
520 cpu_check_irqs(env);
521#endif
b5fc09ae 522 next_tb = 0;
66321a11 523 }
e95c8d51
FB
524 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
525 //do_interrupt(0, 0, 0, 0, 0);
526 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
a90b7318 527 }
b5ff1b31
FB
528#elif defined(TARGET_ARM)
529 if (interrupt_request & CPU_INTERRUPT_FIQ
530 && !(env->uncached_cpsr & CPSR_F)) {
531 env->exception_index = EXCP_FIQ;
532 do_interrupt(env);
b5fc09ae 533 next_tb = 0;
b5ff1b31 534 }
9ee6e8bb
PB
535 /* ARMv7-M interrupt return works by loading a magic value
536 into the PC. On real hardware the load causes the
537 return to occur. The qemu implementation performs the
538 jump normally, then does the exception return when the
539 CPU tries to execute code at the magic address.
540 This will cause the magic PC value to be pushed to
541 the stack if an interrupt occured at the wrong time.
542 We avoid this by disabling interrupts when
543 pc contains a magic address. */
b5ff1b31 544 if (interrupt_request & CPU_INTERRUPT_HARD
9ee6e8bb
PB
545 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
546 || !(env->uncached_cpsr & CPSR_I))) {
b5ff1b31
FB
547 env->exception_index = EXCP_IRQ;
548 do_interrupt(env);
b5fc09ae 549 next_tb = 0;
b5ff1b31 550 }
fdf9b3e8 551#elif defined(TARGET_SH4)
e96e2044
TS
552 if (interrupt_request & CPU_INTERRUPT_HARD) {
553 do_interrupt(env);
b5fc09ae 554 next_tb = 0;
e96e2044 555 }
eddf68a6
JM
556#elif defined(TARGET_ALPHA)
557 if (interrupt_request & CPU_INTERRUPT_HARD) {
558 do_interrupt(env);
b5fc09ae 559 next_tb = 0;
eddf68a6 560 }
f1ccf904
TS
561#elif defined(TARGET_CRIS)
562 if (interrupt_request & CPU_INTERRUPT_HARD) {
563 do_interrupt(env);
b5fc09ae 564 next_tb = 0;
f1ccf904 565 }
0633879f
PB
566#elif defined(TARGET_M68K)
567 if (interrupt_request & CPU_INTERRUPT_HARD
568 && ((env->sr & SR_I) >> SR_I_SHIFT)
569 < env->pending_level) {
570 /* Real hardware gets the interrupt vector via an
571 IACK cycle at this point. Current emulated
572 hardware doesn't rely on this, so we
573 provide/save the vector when the interrupt is
574 first signalled. */
575 env->exception_index = env->pending_vector;
576 do_interrupt(1);
b5fc09ae 577 next_tb = 0;
0633879f 578 }
68a79315 579#endif
9d05095e
FB
580 /* Don't use the cached interupt_request value,
581 do_interrupt may have updated the EXITTB flag. */
b5ff1b31 582 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
bf3e8bf1
FB
583 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
584 /* ensure that no TB jump will be modified as
585 the program flow was changed */
b5fc09ae 586 next_tb = 0;
bf3e8bf1 587 }
68a79315
FB
588 if (interrupt_request & CPU_INTERRUPT_EXIT) {
589 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
590 env->exception_index = EXCP_INTERRUPT;
591 cpu_loop_exit();
592 }
3fb2ded1 593 }
7d13299d 594#ifdef DEBUG_EXEC
b5ff1b31 595 if ((loglevel & CPU_LOG_TB_CPU)) {
3fb2ded1 596 /* restore flags in standard format */
ecb644f4
TS
597 regs_to_env();
598#if defined(TARGET_I386)
3fb2ded1 599 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
7fe48483 600 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
3fb2ded1 601 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
e4533c7a 602#elif defined(TARGET_ARM)
7fe48483 603 cpu_dump_state(env, logfile, fprintf, 0);
93ac68bc 604#elif defined(TARGET_SPARC)
3475187d
FB
605 REGWPTR = env->regbase + (env->cwp * 16);
606 env->regwptr = REGWPTR;
607 cpu_dump_state(env, logfile, fprintf, 0);
67867308 608#elif defined(TARGET_PPC)
7fe48483 609 cpu_dump_state(env, logfile, fprintf, 0);
e6e5906b
PB
610#elif defined(TARGET_M68K)
611 cpu_m68k_flush_flags(env, env->cc_op);
612 env->cc_op = CC_OP_FLAGS;
613 env->sr = (env->sr & 0xffe0)
614 | env->cc_dest | (env->cc_x << 4);
615 cpu_dump_state(env, logfile, fprintf, 0);
6af0bf9c
FB
616#elif defined(TARGET_MIPS)
617 cpu_dump_state(env, logfile, fprintf, 0);
fdf9b3e8
FB
618#elif defined(TARGET_SH4)
619 cpu_dump_state(env, logfile, fprintf, 0);
eddf68a6
JM
620#elif defined(TARGET_ALPHA)
621 cpu_dump_state(env, logfile, fprintf, 0);
f1ccf904
TS
622#elif defined(TARGET_CRIS)
623 cpu_dump_state(env, logfile, fprintf, 0);
e4533c7a 624#else
5fafdf24 625#error unsupported target CPU
e4533c7a 626#endif
3fb2ded1 627 }
7d13299d 628#endif
8a40a180 629 tb = tb_find_fast();
9d27abd9 630#ifdef DEBUG_EXEC
c1135f61 631 if ((loglevel & CPU_LOG_EXEC)) {
c27004ec
FB
632 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
633 (long)tb->tc_ptr, tb->pc,
634 lookup_symbol(tb->pc));
3fb2ded1 635 }
9d27abd9 636#endif
66f1cdbd 637 RESTORE_GLOBALS();
8a40a180
FB
638 /* see if we can patch the calling TB. When the TB
639 spans two pages, we cannot safely do a direct
640 jump. */
c27004ec 641 {
b5fc09ae 642 if (next_tb != 0 &&
4d7a0880 643#ifdef USE_KQEMU
f32fc648
FB
644 (env->kqemu_enabled != 2) &&
645#endif
ec6338ba 646 tb->page_addr[1] == -1) {
3fb2ded1 647 spin_lock(&tb_lock);
b5fc09ae 648 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
3fb2ded1
FB
649 spin_unlock(&tb_lock);
650 }
c27004ec 651 }
3fb2ded1 652 tc_ptr = tb->tc_ptr;
83479e77 653 env->current_tb = tb;
3fb2ded1
FB
654 /* execute the generated code */
655 gen_func = (void *)tc_ptr;
8c6939c0 656#if defined(__sparc__)
3fb2ded1
FB
657 __asm__ __volatile__("call %0\n\t"
658 "mov %%o7,%%i0"
659 : /* no outputs */
5fafdf24 660 : "r" (gen_func)
fdbb4691 661 : "i0", "i1", "i2", "i3", "i4", "i5",
faab7592 662 "o0", "o1", "o2", "o3", "o4", "o5",
fdbb4691
FB
663 "l0", "l1", "l2", "l3", "l4", "l5",
664 "l6", "l7");
f54b3f92
AJ
665#elif defined(__hppa__)
666 asm volatile ("ble 0(%%sr4,%1)\n"
667 "copy %%r31,%%r18\n"
668 "copy %%r28,%0\n"
b5fc09ae 669 : "=r" (next_tb)
f54b3f92
AJ
670 : "r" (gen_func)
671 : "r1", "r2", "r3", "r4", "r5", "r6", "r7",
672 "r8", "r9", "r10", "r11", "r12", "r13",
673 "r18", "r19", "r20", "r21", "r22", "r23",
674 "r24", "r25", "r26", "r27", "r28", "r29",
675 "r30", "r31");
8c6939c0 676#elif defined(__arm__)
3fb2ded1
FB
677 asm volatile ("mov pc, %0\n\t"
678 ".global exec_loop\n\t"
679 "exec_loop:\n\t"
680 : /* no outputs */
681 : "r" (gen_func)
682 : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
b8076a74
FB
683#elif defined(__ia64)
684 struct fptr {
685 void *ip;
686 void *gp;
687 } fp;
688
689 fp.ip = tc_ptr;
690 fp.gp = code_gen_buffer + 2 * (1 << 20);
691 (*(void (*)(void)) &fp)();
b5fc09ae
BS
692#elif defined(__i386)
693 asm volatile ("sub $12, %%esp\n\t"
694 "push %%ebp\n\t"
695 "call *%1\n\t"
696 "pop %%ebp\n\t"
697 "add $12, %%esp\n\t"
698 : "=a" (next_tb)
699 : "a" (gen_func)
700 : "ebx", "ecx", "edx", "esi", "edi", "cc",
701 "memory");
702#elif defined(__x86_64__)
703 asm volatile ("sub $8, %%rsp\n\t"
704 "push %%rbp\n\t"
705 "call *%1\n\t"
706 "pop %%rbp\n\t"
707 "add $8, %%rsp\n\t"
708 : "=a" (next_tb)
709 : "a" (gen_func)
710 : "rbx", "rcx", "rdx", "rsi", "rdi", "r8", "r9",
711 "r10", "r11", "r12", "r13", "r14", "r15", "cc",
712 "memory");
ae228531 713#else
b5fc09ae 714 next_tb = gen_func();
ae228531 715#endif
83479e77 716 env->current_tb = NULL;
4cbf74b6
FB
717 /* reset soft MMU for next block (it can currently
718 only be set by a memory fault) */
719#if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
3f337316
FB
720 if (env->hflags & HF_SOFTMMU_MASK) {
721 env->hflags &= ~HF_SOFTMMU_MASK;
4cbf74b6 722 /* do not allow linking to another block */
b5fc09ae 723 next_tb = 0;
4cbf74b6 724 }
f32fc648
FB
725#endif
726#if defined(USE_KQEMU)
727#define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
728 if (kqemu_is_ok(env) &&
729 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
730 cpu_loop_exit();
731 }
4cbf74b6 732#endif
50a518e3 733 } /* for(;;) */
3fb2ded1 734 } else {
0d1a29f9 735 env_to_regs();
7d13299d 736 }
3fb2ded1
FB
737 } /* for(;;) */
738
7d13299d 739
e4533c7a 740#if defined(TARGET_I386)
9de5e440 741 /* restore flags in standard format */
fc2b4c48 742 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
e4533c7a 743#elif defined(TARGET_ARM)
b7bcbe95 744 /* XXX: Save/restore host fpu exception state?. */
93ac68bc 745#elif defined(TARGET_SPARC)
3475187d
FB
746#if defined(reg_REGWPTR)
747 REGWPTR = saved_regwptr;
748#endif
67867308 749#elif defined(TARGET_PPC)
e6e5906b
PB
750#elif defined(TARGET_M68K)
751 cpu_m68k_flush_flags(env, env->cc_op);
752 env->cc_op = CC_OP_FLAGS;
753 env->sr = (env->sr & 0xffe0)
754 | env->cc_dest | (env->cc_x << 4);
6af0bf9c 755#elif defined(TARGET_MIPS)
fdf9b3e8 756#elif defined(TARGET_SH4)
eddf68a6 757#elif defined(TARGET_ALPHA)
f1ccf904 758#elif defined(TARGET_CRIS)
fdf9b3e8 759 /* XXXXX */
e4533c7a
FB
760#else
761#error unsupported target CPU
762#endif
1057eaa7
PB
763
764 /* restore global registers */
66f1cdbd 765 RESTORE_GLOBALS();
1057eaa7
PB
766#include "hostregs_helper.h"
767
6a00d601 768 /* fail safe : never use cpu_single_env outside cpu_exec() */
5fafdf24 769 cpu_single_env = NULL;
7d13299d
FB
770 return ret;
771}
6dbad63e 772
fbf9eeb3
FB
773/* must only be called from the generated code as an exception can be
774 generated */
775void tb_invalidate_page_range(target_ulong start, target_ulong end)
776{
dc5d0b3d
FB
777 /* XXX: cannot enable it yet because it yields to MMU exception
778 where NIP != read address on PowerPC */
779#if 0
fbf9eeb3
FB
780 target_ulong phys_addr;
781 phys_addr = get_phys_addr_code(env, start);
782 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
dc5d0b3d 783#endif
fbf9eeb3
FB
784}
785
1a18c71b 786#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
e4533c7a 787
6dbad63e
FB
788void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
789{
790 CPUX86State *saved_env;
791
792 saved_env = env;
793 env = s;
a412ac57 794 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
a513fe19 795 selector &= 0xffff;
5fafdf24 796 cpu_x86_load_seg_cache(env, seg_reg, selector,
c27004ec 797 (selector << 4), 0xffff, 0);
a513fe19 798 } else {
b453b70b 799 load_seg(seg_reg, selector);
a513fe19 800 }
6dbad63e
FB
801 env = saved_env;
802}
9de5e440 803
6f12a2a6 804void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
d0a1ffc9
FB
805{
806 CPUX86State *saved_env;
807
808 saved_env = env;
809 env = s;
3b46e624 810
6f12a2a6 811 helper_fsave(ptr, data32);
d0a1ffc9
FB
812
813 env = saved_env;
814}
815
6f12a2a6 816void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
d0a1ffc9
FB
817{
818 CPUX86State *saved_env;
819
820 saved_env = env;
821 env = s;
3b46e624 822
6f12a2a6 823 helper_frstor(ptr, data32);
d0a1ffc9
FB
824
825 env = saved_env;
826}
827
e4533c7a
FB
828#endif /* TARGET_I386 */
829
67b915a5
FB
830#if !defined(CONFIG_SOFTMMU)
831
3fb2ded1
FB
832#if defined(TARGET_I386)
833
b56dad1c 834/* 'pc' is the host PC at which the exception was raised. 'address' is
fd6ce8f6
FB
835 the effective address of the memory exception. 'is_write' is 1 if a
836 write caused the exception and otherwise 0'. 'old_set' is the
837 signal set which should be restored */
2b413144 838static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
5fafdf24 839 int is_write, sigset_t *old_set,
bf3e8bf1 840 void *puc)
9de5e440 841{
a513fe19
FB
842 TranslationBlock *tb;
843 int ret;
68a79315 844
83479e77
FB
845 if (cpu_single_env)
846 env = cpu_single_env; /* XXX: find a correct solution for multithread */
fd6ce8f6 847#if defined(DEBUG_SIGNAL)
5fafdf24 848 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
bf3e8bf1 849 pc, address, is_write, *(unsigned long *)old_set);
9de5e440 850#endif
25eb4484 851 /* XXX: locking issue */
53a5960a 852 if (is_write && page_unprotect(h2g(address), pc, puc)) {
fd6ce8f6
FB
853 return 1;
854 }
fbf9eeb3 855
3fb2ded1 856 /* see if it is an MMU fault */
6ebbf390 857 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
3fb2ded1
FB
858 if (ret < 0)
859 return 0; /* not an MMU fault */
860 if (ret == 0)
861 return 1; /* the MMU fault was handled without causing real CPU fault */
862 /* now we have a real cpu fault */
a513fe19
FB
863 tb = tb_find_pc(pc);
864 if (tb) {
9de5e440
FB
865 /* the PC is inside the translated code. It means that we have
866 a virtual CPU fault */
bf3e8bf1 867 cpu_restore_state(tb, env, pc, puc);
3fb2ded1 868 }
4cbf74b6 869 if (ret == 1) {
3fb2ded1 870#if 0
5fafdf24 871 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
4cbf74b6 872 env->eip, env->cr[2], env->error_code);
3fb2ded1 873#endif
4cbf74b6
FB
874 /* we restore the process signal mask as the sigreturn should
875 do it (XXX: use sigsetjmp) */
876 sigprocmask(SIG_SETMASK, old_set, NULL);
54ca9095 877 raise_exception_err(env->exception_index, env->error_code);
4cbf74b6
FB
878 } else {
879 /* activate soft MMU for this block */
3f337316 880 env->hflags |= HF_SOFTMMU_MASK;
fbf9eeb3 881 cpu_resume_from_signal(env, puc);
4cbf74b6 882 }
3fb2ded1
FB
883 /* never comes here */
884 return 1;
885}
886
e4533c7a 887#elif defined(TARGET_ARM)
3fb2ded1 888static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
bf3e8bf1
FB
889 int is_write, sigset_t *old_set,
890 void *puc)
3fb2ded1 891{
68016c62
FB
892 TranslationBlock *tb;
893 int ret;
894
895 if (cpu_single_env)
896 env = cpu_single_env; /* XXX: find a correct solution for multithread */
897#if defined(DEBUG_SIGNAL)
5fafdf24 898 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
68016c62
FB
899 pc, address, is_write, *(unsigned long *)old_set);
900#endif
9f0777ed 901 /* XXX: locking issue */
53a5960a 902 if (is_write && page_unprotect(h2g(address), pc, puc)) {
9f0777ed
FB
903 return 1;
904 }
68016c62 905 /* see if it is an MMU fault */
6ebbf390 906 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
68016c62
FB
907 if (ret < 0)
908 return 0; /* not an MMU fault */
909 if (ret == 0)
910 return 1; /* the MMU fault was handled without causing real CPU fault */
911 /* now we have a real cpu fault */
912 tb = tb_find_pc(pc);
913 if (tb) {
914 /* the PC is inside the translated code. It means that we have
915 a virtual CPU fault */
916 cpu_restore_state(tb, env, pc, puc);
917 }
918 /* we restore the process signal mask as the sigreturn should
919 do it (XXX: use sigsetjmp) */
920 sigprocmask(SIG_SETMASK, old_set, NULL);
921 cpu_loop_exit();
968c74da
AJ
922 /* never comes here */
923 return 1;
3fb2ded1 924}
93ac68bc
FB
925#elif defined(TARGET_SPARC)
926static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
bf3e8bf1
FB
927 int is_write, sigset_t *old_set,
928 void *puc)
93ac68bc 929{
68016c62
FB
930 TranslationBlock *tb;
931 int ret;
932
933 if (cpu_single_env)
934 env = cpu_single_env; /* XXX: find a correct solution for multithread */
935#if defined(DEBUG_SIGNAL)
5fafdf24 936 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
68016c62
FB
937 pc, address, is_write, *(unsigned long *)old_set);
938#endif
b453b70b 939 /* XXX: locking issue */
53a5960a 940 if (is_write && page_unprotect(h2g(address), pc, puc)) {
b453b70b
FB
941 return 1;
942 }
68016c62 943 /* see if it is an MMU fault */
6ebbf390 944 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
68016c62
FB
945 if (ret < 0)
946 return 0; /* not an MMU fault */
947 if (ret == 0)
948 return 1; /* the MMU fault was handled without causing real CPU fault */
949 /* now we have a real cpu fault */
950 tb = tb_find_pc(pc);
951 if (tb) {
952 /* the PC is inside the translated code. It means that we have
953 a virtual CPU fault */
954 cpu_restore_state(tb, env, pc, puc);
955 }
956 /* we restore the process signal mask as the sigreturn should
957 do it (XXX: use sigsetjmp) */
958 sigprocmask(SIG_SETMASK, old_set, NULL);
959 cpu_loop_exit();
968c74da
AJ
960 /* never comes here */
961 return 1;
93ac68bc 962}
67867308
FB
963#elif defined (TARGET_PPC)
964static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
bf3e8bf1
FB
965 int is_write, sigset_t *old_set,
966 void *puc)
67867308
FB
967{
968 TranslationBlock *tb;
ce09776b 969 int ret;
3b46e624 970
67867308
FB
971 if (cpu_single_env)
972 env = cpu_single_env; /* XXX: find a correct solution for multithread */
67867308 973#if defined(DEBUG_SIGNAL)
5fafdf24 974 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
67867308
FB
975 pc, address, is_write, *(unsigned long *)old_set);
976#endif
977 /* XXX: locking issue */
53a5960a 978 if (is_write && page_unprotect(h2g(address), pc, puc)) {
67867308
FB
979 return 1;
980 }
981
ce09776b 982 /* see if it is an MMU fault */
6ebbf390 983 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
ce09776b
FB
984 if (ret < 0)
985 return 0; /* not an MMU fault */
986 if (ret == 0)
987 return 1; /* the MMU fault was handled without causing real CPU fault */
988
67867308
FB
989 /* now we have a real cpu fault */
990 tb = tb_find_pc(pc);
991 if (tb) {
992 /* the PC is inside the translated code. It means that we have
993 a virtual CPU fault */
bf3e8bf1 994 cpu_restore_state(tb, env, pc, puc);
67867308 995 }
ce09776b 996 if (ret == 1) {
67867308 997#if 0
5fafdf24 998 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
ce09776b 999 env->nip, env->error_code, tb);
67867308
FB
1000#endif
1001 /* we restore the process signal mask as the sigreturn should
1002 do it (XXX: use sigsetjmp) */
bf3e8bf1 1003 sigprocmask(SIG_SETMASK, old_set, NULL);
9fddaa0c 1004 do_raise_exception_err(env->exception_index, env->error_code);
ce09776b
FB
1005 } else {
1006 /* activate soft MMU for this block */
fbf9eeb3 1007 cpu_resume_from_signal(env, puc);
ce09776b 1008 }
67867308 1009 /* never comes here */
e6e5906b
PB
1010 return 1;
1011}
1012
1013#elif defined(TARGET_M68K)
1014static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1015 int is_write, sigset_t *old_set,
1016 void *puc)
1017{
1018 TranslationBlock *tb;
1019 int ret;
1020
1021 if (cpu_single_env)
1022 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1023#if defined(DEBUG_SIGNAL)
5fafdf24 1024 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
e6e5906b
PB
1025 pc, address, is_write, *(unsigned long *)old_set);
1026#endif
1027 /* XXX: locking issue */
1028 if (is_write && page_unprotect(address, pc, puc)) {
1029 return 1;
1030 }
1031 /* see if it is an MMU fault */
6ebbf390 1032 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
e6e5906b
PB
1033 if (ret < 0)
1034 return 0; /* not an MMU fault */
1035 if (ret == 0)
1036 return 1; /* the MMU fault was handled without causing real CPU fault */
1037 /* now we have a real cpu fault */
1038 tb = tb_find_pc(pc);
1039 if (tb) {
1040 /* the PC is inside the translated code. It means that we have
1041 a virtual CPU fault */
1042 cpu_restore_state(tb, env, pc, puc);
1043 }
1044 /* we restore the process signal mask as the sigreturn should
1045 do it (XXX: use sigsetjmp) */
1046 sigprocmask(SIG_SETMASK, old_set, NULL);
1047 cpu_loop_exit();
1048 /* never comes here */
67867308
FB
1049 return 1;
1050}
6af0bf9c
FB
1051
1052#elif defined (TARGET_MIPS)
1053static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1054 int is_write, sigset_t *old_set,
1055 void *puc)
1056{
1057 TranslationBlock *tb;
1058 int ret;
3b46e624 1059
6af0bf9c
FB
1060 if (cpu_single_env)
1061 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1062#if defined(DEBUG_SIGNAL)
5fafdf24 1063 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
6af0bf9c
FB
1064 pc, address, is_write, *(unsigned long *)old_set);
1065#endif
1066 /* XXX: locking issue */
53a5960a 1067 if (is_write && page_unprotect(h2g(address), pc, puc)) {
6af0bf9c
FB
1068 return 1;
1069 }
1070
1071 /* see if it is an MMU fault */
6ebbf390 1072 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
6af0bf9c
FB
1073 if (ret < 0)
1074 return 0; /* not an MMU fault */
1075 if (ret == 0)
1076 return 1; /* the MMU fault was handled without causing real CPU fault */
1077
1078 /* now we have a real cpu fault */
1079 tb = tb_find_pc(pc);
1080 if (tb) {
1081 /* the PC is inside the translated code. It means that we have
1082 a virtual CPU fault */
1083 cpu_restore_state(tb, env, pc, puc);
1084 }
1085 if (ret == 1) {
1086#if 0
5fafdf24 1087 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1eb5207b 1088 env->PC, env->error_code, tb);
6af0bf9c
FB
1089#endif
1090 /* we restore the process signal mask as the sigreturn should
1091 do it (XXX: use sigsetjmp) */
1092 sigprocmask(SIG_SETMASK, old_set, NULL);
1093 do_raise_exception_err(env->exception_index, env->error_code);
1094 } else {
1095 /* activate soft MMU for this block */
1096 cpu_resume_from_signal(env, puc);
1097 }
1098 /* never comes here */
1099 return 1;
1100}
1101
fdf9b3e8
FB
1102#elif defined (TARGET_SH4)
1103static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1104 int is_write, sigset_t *old_set,
1105 void *puc)
1106{
1107 TranslationBlock *tb;
1108 int ret;
3b46e624 1109
fdf9b3e8
FB
1110 if (cpu_single_env)
1111 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1112#if defined(DEBUG_SIGNAL)
5fafdf24 1113 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
fdf9b3e8
FB
1114 pc, address, is_write, *(unsigned long *)old_set);
1115#endif
1116 /* XXX: locking issue */
1117 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1118 return 1;
1119 }
1120
1121 /* see if it is an MMU fault */
6ebbf390 1122 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
fdf9b3e8
FB
1123 if (ret < 0)
1124 return 0; /* not an MMU fault */
1125 if (ret == 0)
1126 return 1; /* the MMU fault was handled without causing real CPU fault */
1127
1128 /* now we have a real cpu fault */
eddf68a6
JM
1129 tb = tb_find_pc(pc);
1130 if (tb) {
1131 /* the PC is inside the translated code. It means that we have
1132 a virtual CPU fault */
1133 cpu_restore_state(tb, env, pc, puc);
1134 }
1135#if 0
5fafdf24 1136 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
eddf68a6
JM
1137 env->nip, env->error_code, tb);
1138#endif
1139 /* we restore the process signal mask as the sigreturn should
1140 do it (XXX: use sigsetjmp) */
1141 sigprocmask(SIG_SETMASK, old_set, NULL);
1142 cpu_loop_exit();
1143 /* never comes here */
1144 return 1;
1145}
1146
1147#elif defined (TARGET_ALPHA)
1148static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1149 int is_write, sigset_t *old_set,
1150 void *puc)
1151{
1152 TranslationBlock *tb;
1153 int ret;
3b46e624 1154
eddf68a6
JM
1155 if (cpu_single_env)
1156 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1157#if defined(DEBUG_SIGNAL)
5fafdf24 1158 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
eddf68a6
JM
1159 pc, address, is_write, *(unsigned long *)old_set);
1160#endif
1161 /* XXX: locking issue */
1162 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1163 return 1;
1164 }
1165
1166 /* see if it is an MMU fault */
6ebbf390 1167 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
eddf68a6
JM
1168 if (ret < 0)
1169 return 0; /* not an MMU fault */
1170 if (ret == 0)
1171 return 1; /* the MMU fault was handled without causing real CPU fault */
1172
1173 /* now we have a real cpu fault */
fdf9b3e8
FB
1174 tb = tb_find_pc(pc);
1175 if (tb) {
1176 /* the PC is inside the translated code. It means that we have
1177 a virtual CPU fault */
1178 cpu_restore_state(tb, env, pc, puc);
1179 }
fdf9b3e8 1180#if 0
5fafdf24 1181 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
fdf9b3e8
FB
1182 env->nip, env->error_code, tb);
1183#endif
1184 /* we restore the process signal mask as the sigreturn should
1185 do it (XXX: use sigsetjmp) */
355fb23d
PB
1186 sigprocmask(SIG_SETMASK, old_set, NULL);
1187 cpu_loop_exit();
fdf9b3e8
FB
1188 /* never comes here */
1189 return 1;
1190}
f1ccf904
TS
1191#elif defined (TARGET_CRIS)
1192static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1193 int is_write, sigset_t *old_set,
1194 void *puc)
1195{
1196 TranslationBlock *tb;
1197 int ret;
1198
1199 if (cpu_single_env)
1200 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1201#if defined(DEBUG_SIGNAL)
1202 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1203 pc, address, is_write, *(unsigned long *)old_set);
1204#endif
1205 /* XXX: locking issue */
1206 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1207 return 1;
1208 }
1209
1210 /* see if it is an MMU fault */
6ebbf390 1211 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
f1ccf904
TS
1212 if (ret < 0)
1213 return 0; /* not an MMU fault */
1214 if (ret == 0)
1215 return 1; /* the MMU fault was handled without causing real CPU fault */
1216
1217 /* now we have a real cpu fault */
1218 tb = tb_find_pc(pc);
1219 if (tb) {
1220 /* the PC is inside the translated code. It means that we have
1221 a virtual CPU fault */
1222 cpu_restore_state(tb, env, pc, puc);
1223 }
f1ccf904
TS
1224 /* we restore the process signal mask as the sigreturn should
1225 do it (XXX: use sigsetjmp) */
1226 sigprocmask(SIG_SETMASK, old_set, NULL);
1227 cpu_loop_exit();
1228 /* never comes here */
1229 return 1;
1230}
1231
e4533c7a
FB
1232#else
1233#error unsupported target CPU
1234#endif
9de5e440 1235
2b413144
FB
1236#if defined(__i386__)
1237
d8ecc0b9
FB
1238#if defined(__APPLE__)
1239# include <sys/ucontext.h>
1240
1241# define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1242# define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1243# define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1244#else
1245# define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1246# define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1247# define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1248#endif
1249
5fafdf24 1250int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1251 void *puc)
9de5e440 1252{
5a7b542b 1253 siginfo_t *info = pinfo;
9de5e440
FB
1254 struct ucontext *uc = puc;
1255 unsigned long pc;
bf3e8bf1 1256 int trapno;
97eb5b14 1257
d691f669
FB
1258#ifndef REG_EIP
1259/* for glibc 2.1 */
fd6ce8f6
FB
1260#define REG_EIP EIP
1261#define REG_ERR ERR
1262#define REG_TRAPNO TRAPNO
d691f669 1263#endif
d8ecc0b9
FB
1264 pc = EIP_sig(uc);
1265 trapno = TRAP_sig(uc);
ec6338ba
FB
1266 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1267 trapno == 0xe ?
1268 (ERROR_sig(uc) >> 1) & 1 : 0,
1269 &uc->uc_sigmask, puc);
2b413144
FB
1270}
1271
bc51c5c9
FB
1272#elif defined(__x86_64__)
1273
5a7b542b 1274int cpu_signal_handler(int host_signum, void *pinfo,
bc51c5c9
FB
1275 void *puc)
1276{
5a7b542b 1277 siginfo_t *info = pinfo;
bc51c5c9
FB
1278 struct ucontext *uc = puc;
1279 unsigned long pc;
1280
1281 pc = uc->uc_mcontext.gregs[REG_RIP];
5fafdf24
TS
1282 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1283 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
bc51c5c9
FB
1284 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1285 &uc->uc_sigmask, puc);
1286}
1287
83fb7adf 1288#elif defined(__powerpc__)
2b413144 1289
83fb7adf
FB
1290/***********************************************************************
1291 * signal context platform-specific definitions
1292 * From Wine
1293 */
1294#ifdef linux
1295/* All Registers access - only for local access */
1296# define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1297/* Gpr Registers access */
1298# define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1299# define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1300# define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1301# define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1302# define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1303# define LR_sig(context) REG_sig(link, context) /* Link register */
1304# define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1305/* Float Registers access */
1306# define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1307# define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1308/* Exception Registers access */
1309# define DAR_sig(context) REG_sig(dar, context)
1310# define DSISR_sig(context) REG_sig(dsisr, context)
1311# define TRAP_sig(context) REG_sig(trap, context)
1312#endif /* linux */
1313
1314#ifdef __APPLE__
1315# include <sys/ucontext.h>
1316typedef struct ucontext SIGCONTEXT;
1317/* All Registers access - only for local access */
1318# define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1319# define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1320# define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1321# define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1322/* Gpr Registers access */
1323# define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1324# define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1325# define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1326# define CTR_sig(context) REG_sig(ctr, context)
1327# define XER_sig(context) REG_sig(xer, context) /* Link register */
1328# define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1329# define CR_sig(context) REG_sig(cr, context) /* Condition register */
1330/* Float Registers access */
1331# define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1332# define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1333/* Exception Registers access */
1334# define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1335# define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1336# define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1337#endif /* __APPLE__ */
1338
5fafdf24 1339int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1340 void *puc)
2b413144 1341{
5a7b542b 1342 siginfo_t *info = pinfo;
25eb4484 1343 struct ucontext *uc = puc;
25eb4484 1344 unsigned long pc;
25eb4484
FB
1345 int is_write;
1346
83fb7adf 1347 pc = IAR_sig(uc);
25eb4484
FB
1348 is_write = 0;
1349#if 0
1350 /* ppc 4xx case */
83fb7adf 1351 if (DSISR_sig(uc) & 0x00800000)
25eb4484
FB
1352 is_write = 1;
1353#else
83fb7adf 1354 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
25eb4484
FB
1355 is_write = 1;
1356#endif
5fafdf24 1357 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
bf3e8bf1 1358 is_write, &uc->uc_sigmask, puc);
2b413144
FB
1359}
1360
2f87c607
FB
1361#elif defined(__alpha__)
1362
5fafdf24 1363int cpu_signal_handler(int host_signum, void *pinfo,
2f87c607
FB
1364 void *puc)
1365{
5a7b542b 1366 siginfo_t *info = pinfo;
2f87c607
FB
1367 struct ucontext *uc = puc;
1368 uint32_t *pc = uc->uc_mcontext.sc_pc;
1369 uint32_t insn = *pc;
1370 int is_write = 0;
1371
8c6939c0 1372 /* XXX: need kernel patch to get write flag faster */
2f87c607
FB
1373 switch (insn >> 26) {
1374 case 0x0d: // stw
1375 case 0x0e: // stb
1376 case 0x0f: // stq_u
1377 case 0x24: // stf
1378 case 0x25: // stg
1379 case 0x26: // sts
1380 case 0x27: // stt
1381 case 0x2c: // stl
1382 case 0x2d: // stq
1383 case 0x2e: // stl_c
1384 case 0x2f: // stq_c
1385 is_write = 1;
1386 }
1387
5fafdf24 1388 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
bf3e8bf1 1389 is_write, &uc->uc_sigmask, puc);
2f87c607 1390}
8c6939c0
FB
1391#elif defined(__sparc__)
1392
5fafdf24 1393int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1394 void *puc)
8c6939c0 1395{
5a7b542b 1396 siginfo_t *info = pinfo;
8c6939c0
FB
1397 uint32_t *regs = (uint32_t *)(info + 1);
1398 void *sigmask = (regs + 20);
1399 unsigned long pc;
1400 int is_write;
1401 uint32_t insn;
3b46e624 1402
8c6939c0
FB
1403 /* XXX: is there a standard glibc define ? */
1404 pc = regs[1];
1405 /* XXX: need kernel patch to get write flag faster */
1406 is_write = 0;
1407 insn = *(uint32_t *)pc;
1408 if ((insn >> 30) == 3) {
1409 switch((insn >> 19) & 0x3f) {
1410 case 0x05: // stb
1411 case 0x06: // sth
1412 case 0x04: // st
1413 case 0x07: // std
1414 case 0x24: // stf
1415 case 0x27: // stdf
1416 case 0x25: // stfsr
1417 is_write = 1;
1418 break;
1419 }
1420 }
5fafdf24 1421 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
bf3e8bf1 1422 is_write, sigmask, NULL);
8c6939c0
FB
1423}
1424
1425#elif defined(__arm__)
1426
5fafdf24 1427int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1428 void *puc)
8c6939c0 1429{
5a7b542b 1430 siginfo_t *info = pinfo;
8c6939c0
FB
1431 struct ucontext *uc = puc;
1432 unsigned long pc;
1433 int is_write;
3b46e624 1434
4eee57f5 1435 pc = uc->uc_mcontext.arm_pc;
8c6939c0
FB
1436 /* XXX: compute is_write */
1437 is_write = 0;
5fafdf24 1438 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
8c6939c0 1439 is_write,
f3a9676a 1440 &uc->uc_sigmask, puc);
8c6939c0
FB
1441}
1442
38e584a0
FB
1443#elif defined(__mc68000)
1444
5fafdf24 1445int cpu_signal_handler(int host_signum, void *pinfo,
38e584a0
FB
1446 void *puc)
1447{
5a7b542b 1448 siginfo_t *info = pinfo;
38e584a0
FB
1449 struct ucontext *uc = puc;
1450 unsigned long pc;
1451 int is_write;
3b46e624 1452
38e584a0
FB
1453 pc = uc->uc_mcontext.gregs[16];
1454 /* XXX: compute is_write */
1455 is_write = 0;
5fafdf24 1456 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
38e584a0 1457 is_write,
bf3e8bf1 1458 &uc->uc_sigmask, puc);
38e584a0
FB
1459}
1460
b8076a74
FB
1461#elif defined(__ia64)
1462
1463#ifndef __ISR_VALID
1464 /* This ought to be in <bits/siginfo.h>... */
1465# define __ISR_VALID 1
b8076a74
FB
1466#endif
1467
5a7b542b 1468int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
b8076a74 1469{
5a7b542b 1470 siginfo_t *info = pinfo;
b8076a74
FB
1471 struct ucontext *uc = puc;
1472 unsigned long ip;
1473 int is_write = 0;
1474
1475 ip = uc->uc_mcontext.sc_ip;
1476 switch (host_signum) {
1477 case SIGILL:
1478 case SIGFPE:
1479 case SIGSEGV:
1480 case SIGBUS:
1481 case SIGTRAP:
fd4a43e4 1482 if (info->si_code && (info->si_segvflags & __ISR_VALID))
b8076a74
FB
1483 /* ISR.W (write-access) is bit 33: */
1484 is_write = (info->si_isr >> 33) & 1;
1485 break;
1486
1487 default:
1488 break;
1489 }
1490 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1491 is_write,
1492 &uc->uc_sigmask, puc);
1493}
1494
90cb9493
FB
1495#elif defined(__s390__)
1496
5fafdf24 1497int cpu_signal_handler(int host_signum, void *pinfo,
90cb9493
FB
1498 void *puc)
1499{
5a7b542b 1500 siginfo_t *info = pinfo;
90cb9493
FB
1501 struct ucontext *uc = puc;
1502 unsigned long pc;
1503 int is_write;
3b46e624 1504
90cb9493
FB
1505 pc = uc->uc_mcontext.psw.addr;
1506 /* XXX: compute is_write */
1507 is_write = 0;
5fafdf24 1508 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
c4b89d18
TS
1509 is_write, &uc->uc_sigmask, puc);
1510}
1511
1512#elif defined(__mips__)
1513
5fafdf24 1514int cpu_signal_handler(int host_signum, void *pinfo,
c4b89d18
TS
1515 void *puc)
1516{
9617efe8 1517 siginfo_t *info = pinfo;
c4b89d18
TS
1518 struct ucontext *uc = puc;
1519 greg_t pc = uc->uc_mcontext.pc;
1520 int is_write;
3b46e624 1521
c4b89d18
TS
1522 /* XXX: compute is_write */
1523 is_write = 0;
5fafdf24 1524 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
c4b89d18 1525 is_write, &uc->uc_sigmask, puc);
90cb9493
FB
1526}
1527
f54b3f92
AJ
1528#elif defined(__hppa__)
1529
1530int cpu_signal_handler(int host_signum, void *pinfo,
1531 void *puc)
1532{
1533 struct siginfo *info = pinfo;
1534 struct ucontext *uc = puc;
1535 unsigned long pc;
1536 int is_write;
1537
1538 pc = uc->uc_mcontext.sc_iaoq[0];
1539 /* FIXME: compute is_write */
1540 is_write = 0;
1541 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1542 is_write,
1543 &uc->uc_sigmask, puc);
1544}
1545
9de5e440 1546#else
2b413144 1547
3fb2ded1 1548#error host CPU specific signal handler needed
2b413144 1549
9de5e440 1550#endif
67b915a5
FB
1551
1552#endif /* !defined(CONFIG_SOFTMMU) */