]> git.proxmox.com Git - qemu.git/blame - cpu-exec.c
Cut the translation block after translating a break insn. This avoids an issue where...
[qemu.git] / cpu-exec.c
CommitLineData
7d13299d
FB
1/*
2 * i386 emulator main execution loop
5fafdf24 3 *
66321a11 4 * Copyright (c) 2003-2005 Fabrice Bellard
7d13299d 5 *
3ef693a0
FB
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
7d13299d 10 *
3ef693a0
FB
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
7d13299d 15 *
3ef693a0
FB
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
7d13299d 19 */
e4533c7a 20#include "config.h"
93ac68bc 21#include "exec.h"
956034d7 22#include "disas.h"
7d13299d 23
fbf9eeb3
FB
24#if !defined(CONFIG_SOFTMMU)
25#undef EAX
26#undef ECX
27#undef EDX
28#undef EBX
29#undef ESP
30#undef EBP
31#undef ESI
32#undef EDI
33#undef EIP
34#include <signal.h>
35#include <sys/ucontext.h>
36#endif
37
36bdbe54
FB
38int tb_invalidated_flag;
39
dc99065b 40//#define DEBUG_EXEC
9de5e440 41//#define DEBUG_SIGNAL
7d13299d 42
66f1cdbd
BS
43#define SAVE_GLOBALS()
44#define RESTORE_GLOBALS()
45
46#if defined(__sparc__) && !defined(HOST_SOLARIS)
47#include <features.h>
48#if defined(__GLIBC__) && ((__GLIBC__ < 2) || \
49 ((__GLIBC__ == 2) && (__GLIBC_MINOR__ <= 90)))
50// Work around ugly bugs in glibc that mangle global register contents
51
52static volatile void *saved_env;
53static volatile unsigned long saved_t0, saved_i7;
54#undef SAVE_GLOBALS
55#define SAVE_GLOBALS() do { \
56 saved_env = env; \
57 saved_t0 = T0; \
58 asm volatile ("st %%i7, [%0]" : : "r" (&saved_i7)); \
59 } while(0)
60
61#undef RESTORE_GLOBALS
62#define RESTORE_GLOBALS() do { \
63 env = (void *)saved_env; \
64 T0 = saved_t0; \
65 asm volatile ("ld [%0], %%i7" : : "r" (&saved_i7)); \
66 } while(0)
67
68static int sparc_setjmp(jmp_buf buf)
69{
70 int ret;
71
72 SAVE_GLOBALS();
73 ret = setjmp(buf);
74 RESTORE_GLOBALS();
75 return ret;
76}
77#undef setjmp
78#define setjmp(jmp_buf) sparc_setjmp(jmp_buf)
79
80static void sparc_longjmp(jmp_buf buf, int val)
81{
82 SAVE_GLOBALS();
83 longjmp(buf, val);
84}
85#define longjmp(jmp_buf, val) sparc_longjmp(jmp_buf, val)
86#endif
87#endif
88
e4533c7a
FB
89void cpu_loop_exit(void)
90{
bfed01fc
TS
91 /* NOTE: the register at this point must be saved by hand because
92 longjmp restore them */
93 regs_to_env();
e4533c7a
FB
94 longjmp(env->jmp_env, 1);
95}
bfed01fc 96
e6e5906b 97#if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
3475187d
FB
98#define reg_T2
99#endif
e4533c7a 100
fbf9eeb3
FB
101/* exit the current TB from a signal handler. The host registers are
102 restored in a state compatible with the CPU emulator
103 */
5fafdf24 104void cpu_resume_from_signal(CPUState *env1, void *puc)
fbf9eeb3
FB
105{
106#if !defined(CONFIG_SOFTMMU)
107 struct ucontext *uc = puc;
108#endif
109
110 env = env1;
111
112 /* XXX: restore cpu registers saved in host registers */
113
114#if !defined(CONFIG_SOFTMMU)
115 if (puc) {
116 /* XXX: use siglongjmp ? */
117 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
118 }
119#endif
120 longjmp(env->jmp_env, 1);
121}
122
8a40a180
FB
123static TranslationBlock *tb_find_slow(target_ulong pc,
124 target_ulong cs_base,
c068688b 125 uint64_t flags)
8a40a180
FB
126{
127 TranslationBlock *tb, **ptb1;
128 int code_gen_size;
129 unsigned int h;
130 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
131 uint8_t *tc_ptr;
3b46e624 132
8a40a180
FB
133 spin_lock(&tb_lock);
134
135 tb_invalidated_flag = 0;
3b46e624 136
8a40a180 137 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
3b46e624 138
8a40a180
FB
139 /* find translated block using physical mappings */
140 phys_pc = get_phys_addr_code(env, pc);
141 phys_page1 = phys_pc & TARGET_PAGE_MASK;
142 phys_page2 = -1;
143 h = tb_phys_hash_func(phys_pc);
144 ptb1 = &tb_phys_hash[h];
145 for(;;) {
146 tb = *ptb1;
147 if (!tb)
148 goto not_found;
5fafdf24 149 if (tb->pc == pc &&
8a40a180 150 tb->page_addr[0] == phys_page1 &&
5fafdf24 151 tb->cs_base == cs_base &&
8a40a180
FB
152 tb->flags == flags) {
153 /* check next page if needed */
154 if (tb->page_addr[1] != -1) {
5fafdf24 155 virt_page2 = (pc & TARGET_PAGE_MASK) +
8a40a180
FB
156 TARGET_PAGE_SIZE;
157 phys_page2 = get_phys_addr_code(env, virt_page2);
158 if (tb->page_addr[1] == phys_page2)
159 goto found;
160 } else {
161 goto found;
162 }
163 }
164 ptb1 = &tb->phys_hash_next;
165 }
166 not_found:
167 /* if no translated code available, then translate it now */
168 tb = tb_alloc(pc);
169 if (!tb) {
170 /* flush must be done */
171 tb_flush(env);
172 /* cannot fail at this point */
173 tb = tb_alloc(pc);
174 /* don't forget to invalidate previous TB info */
15388002 175 tb_invalidated_flag = 1;
8a40a180
FB
176 }
177 tc_ptr = code_gen_ptr;
178 tb->tc_ptr = tc_ptr;
179 tb->cs_base = cs_base;
180 tb->flags = flags;
66f1cdbd 181 SAVE_GLOBALS();
d07bde88 182 cpu_gen_code(env, tb, &code_gen_size);
66f1cdbd 183 RESTORE_GLOBALS();
8a40a180 184 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 185
8a40a180
FB
186 /* check next page if needed */
187 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
188 phys_page2 = -1;
189 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
190 phys_page2 = get_phys_addr_code(env, virt_page2);
191 }
192 tb_link_phys(tb, phys_pc, phys_page2);
3b46e624 193
8a40a180 194 found:
8a40a180
FB
195 /* we add the TB in the virtual pc hash table */
196 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
197 spin_unlock(&tb_lock);
198 return tb;
199}
200
201static inline TranslationBlock *tb_find_fast(void)
202{
203 TranslationBlock *tb;
204 target_ulong cs_base, pc;
c068688b 205 uint64_t flags;
8a40a180
FB
206
207 /* we record a subset of the CPU state. It will
208 always be the same before a given translated block
209 is executed. */
210#if defined(TARGET_I386)
211 flags = env->hflags;
212 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
0573fbfc 213 flags |= env->intercept;
8a40a180
FB
214 cs_base = env->segs[R_CS].base;
215 pc = cs_base + env->eip;
216#elif defined(TARGET_ARM)
217 flags = env->thumb | (env->vfp.vec_len << 1)
b5ff1b31
FB
218 | (env->vfp.vec_stride << 4);
219 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
220 flags |= (1 << 6);
40f137e1
PB
221 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
222 flags |= (1 << 7);
9ee6e8bb 223 flags |= (env->condexec_bits << 8);
8a40a180
FB
224 cs_base = 0;
225 pc = env->regs[15];
226#elif defined(TARGET_SPARC)
227#ifdef TARGET_SPARC64
a80dde08
FB
228 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
229 flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
230 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
8a40a180 231#else
6d5f237a
BS
232 // FPU enable . Supervisor
233 flags = (env->psref << 4) | env->psrs;
8a40a180
FB
234#endif
235 cs_base = env->npc;
236 pc = env->pc;
237#elif defined(TARGET_PPC)
1527c87e 238 flags = env->hflags;
8a40a180
FB
239 cs_base = 0;
240 pc = env->nip;
241#elif defined(TARGET_MIPS)
56b19403 242 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
cc9442b9 243 cs_base = 0;
ead9360e 244 pc = env->PC[env->current_tc];
e6e5906b 245#elif defined(TARGET_M68K)
acf930aa
PB
246 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
247 | (env->sr & SR_S) /* Bit 13 */
248 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
e6e5906b
PB
249 cs_base = 0;
250 pc = env->pc;
fdf9b3e8 251#elif defined(TARGET_SH4)
823029f9
TS
252 flags = env->flags;
253 cs_base = 0;
fdf9b3e8 254 pc = env->pc;
eddf68a6
JM
255#elif defined(TARGET_ALPHA)
256 flags = env->ps;
257 cs_base = 0;
258 pc = env->pc;
f1ccf904
TS
259#elif defined(TARGET_CRIS)
260 flags = 0;
261 cs_base = 0;
262 pc = env->pc;
8a40a180
FB
263#else
264#error unsupported CPU
265#endif
bce61846 266 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
8a40a180
FB
267 if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
268 tb->flags != flags, 0)) {
269 tb = tb_find_slow(pc, cs_base, flags);
15388002
FB
270 /* Note: we do it here to avoid a gcc bug on Mac OS X when
271 doing it in tb_find_slow */
272 if (tb_invalidated_flag) {
273 /* as some TB could have been invalidated because
274 of memory exceptions while generating the code, we
275 must recompute the hash index here */
276 T0 = 0;
277 }
8a40a180
FB
278 }
279 return tb;
280}
281
497ad68c 282#define BREAK_CHAIN T0 = 0
8a40a180 283
7d13299d
FB
284/* main execution loop */
285
e4533c7a 286int cpu_exec(CPUState *env1)
7d13299d 287{
1057eaa7
PB
288#define DECLARE_HOST_REGS 1
289#include "hostregs_helper.h"
290#if defined(TARGET_SPARC)
3475187d
FB
291#if defined(reg_REGWPTR)
292 uint32_t *saved_regwptr;
293#endif
04369ff2 294#endif
8a40a180 295 int ret, interrupt_request;
57fec1fe 296 long (*gen_func)(void);
8a40a180 297 TranslationBlock *tb;
c27004ec 298 uint8_t *tc_ptr;
8c6939c0 299
bfed01fc
TS
300 if (cpu_halted(env1) == EXCP_HALTED)
301 return EXCP_HALTED;
5a1e3cfc 302
5fafdf24 303 cpu_single_env = env1;
6a00d601 304
7d13299d 305 /* first we save global registers */
1057eaa7
PB
306#define SAVE_HOST_REGS 1
307#include "hostregs_helper.h"
c27004ec 308 env = env1;
66f1cdbd 309 SAVE_GLOBALS();
e4533c7a 310
0d1a29f9 311 env_to_regs();
ecb644f4 312#if defined(TARGET_I386)
9de5e440 313 /* put eflags in CPU temporary format */
fc2b4c48
FB
314 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
315 DF = 1 - (2 * ((env->eflags >> 10) & 1));
9de5e440 316 CC_OP = CC_OP_EFLAGS;
fc2b4c48 317 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
93ac68bc 318#elif defined(TARGET_SPARC)
3475187d
FB
319#if defined(reg_REGWPTR)
320 saved_regwptr = REGWPTR;
321#endif
e6e5906b
PB
322#elif defined(TARGET_M68K)
323 env->cc_op = CC_OP_FLAGS;
324 env->cc_dest = env->sr & 0xf;
325 env->cc_x = (env->sr >> 4) & 1;
ecb644f4
TS
326#elif defined(TARGET_ALPHA)
327#elif defined(TARGET_ARM)
328#elif defined(TARGET_PPC)
6af0bf9c 329#elif defined(TARGET_MIPS)
fdf9b3e8 330#elif defined(TARGET_SH4)
f1ccf904 331#elif defined(TARGET_CRIS)
fdf9b3e8 332 /* XXXXX */
e4533c7a
FB
333#else
334#error unsupported target CPU
335#endif
3fb2ded1 336 env->exception_index = -1;
9d27abd9 337
7d13299d 338 /* prepare setjmp context for exception handling */
3fb2ded1
FB
339 for(;;) {
340 if (setjmp(env->jmp_env) == 0) {
ee8b7021 341 env->current_tb = NULL;
3fb2ded1
FB
342 /* if an exception is pending, we execute it here */
343 if (env->exception_index >= 0) {
344 if (env->exception_index >= EXCP_INTERRUPT) {
345 /* exit request from the cpu execution loop */
346 ret = env->exception_index;
347 break;
348 } else if (env->user_mode_only) {
349 /* if user mode only, we simulate a fake exception
9f083493 350 which will be handled outside the cpu execution
3fb2ded1 351 loop */
83479e77 352#if defined(TARGET_I386)
5fafdf24
TS
353 do_interrupt_user(env->exception_index,
354 env->exception_is_int,
355 env->error_code,
3fb2ded1 356 env->exception_next_eip);
83479e77 357#endif
3fb2ded1
FB
358 ret = env->exception_index;
359 break;
360 } else {
83479e77 361#if defined(TARGET_I386)
3fb2ded1
FB
362 /* simulate a real cpu exception. On i386, it can
363 trigger new exceptions, but we do not handle
364 double or triple faults yet. */
5fafdf24
TS
365 do_interrupt(env->exception_index,
366 env->exception_is_int,
367 env->error_code,
d05e66d2 368 env->exception_next_eip, 0);
678dde13
TS
369 /* successfully delivered */
370 env->old_exception = -1;
ce09776b
FB
371#elif defined(TARGET_PPC)
372 do_interrupt(env);
6af0bf9c
FB
373#elif defined(TARGET_MIPS)
374 do_interrupt(env);
e95c8d51 375#elif defined(TARGET_SPARC)
1a0c3292 376 do_interrupt(env->exception_index);
b5ff1b31
FB
377#elif defined(TARGET_ARM)
378 do_interrupt(env);
fdf9b3e8
FB
379#elif defined(TARGET_SH4)
380 do_interrupt(env);
eddf68a6
JM
381#elif defined(TARGET_ALPHA)
382 do_interrupt(env);
f1ccf904
TS
383#elif defined(TARGET_CRIS)
384 do_interrupt(env);
0633879f
PB
385#elif defined(TARGET_M68K)
386 do_interrupt(0);
83479e77 387#endif
3fb2ded1
FB
388 }
389 env->exception_index = -1;
5fafdf24 390 }
9df217a3
FB
391#ifdef USE_KQEMU
392 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
393 int ret;
394 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
395 ret = kqemu_cpu_exec(env);
396 /* put eflags in CPU temporary format */
397 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
398 DF = 1 - (2 * ((env->eflags >> 10) & 1));
399 CC_OP = CC_OP_EFLAGS;
400 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
401 if (ret == 1) {
402 /* exception */
403 longjmp(env->jmp_env, 1);
404 } else if (ret == 2) {
405 /* softmmu execution needed */
406 } else {
407 if (env->interrupt_request != 0) {
408 /* hardware interrupt will be executed just after */
409 } else {
410 /* otherwise, we restart */
411 longjmp(env->jmp_env, 1);
412 }
413 }
3fb2ded1 414 }
9df217a3
FB
415#endif
416
3fb2ded1
FB
417 T0 = 0; /* force lookup of first TB */
418 for(;;) {
66f1cdbd 419 SAVE_GLOBALS();
68a79315 420 interrupt_request = env->interrupt_request;
0573fbfc
TS
421 if (__builtin_expect(interrupt_request, 0)
422#if defined(TARGET_I386)
423 && env->hflags & HF_GIF_MASK
424#endif
425 ) {
6658ffb8
PB
426 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
427 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
428 env->exception_index = EXCP_DEBUG;
429 cpu_loop_exit();
430 }
a90b7318 431#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
f1ccf904 432 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
a90b7318
AZ
433 if (interrupt_request & CPU_INTERRUPT_HALT) {
434 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
435 env->halted = 1;
436 env->exception_index = EXCP_HLT;
437 cpu_loop_exit();
438 }
439#endif
68a79315 440#if defined(TARGET_I386)
3b21e03e
FB
441 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
442 !(env->hflags & HF_SMM_MASK)) {
0573fbfc 443 svm_check_intercept(SVM_EXIT_SMI);
3b21e03e
FB
444 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
445 do_smm_enter();
497ad68c 446 BREAK_CHAIN;
3b21e03e 447 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
0573fbfc 448 (env->eflags & IF_MASK || env->hflags & HF_HIF_MASK) &&
3f337316 449 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
68a79315 450 int intno;
0573fbfc 451 svm_check_intercept(SVM_EXIT_INTR);
52621688 452 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
a541f297 453 intno = cpu_get_pic_interrupt(env);
f193c797 454 if (loglevel & CPU_LOG_TB_IN_ASM) {
68a79315
FB
455 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
456 }
d05e66d2 457 do_interrupt(intno, 0, 0, 0, 1);
907a5b26
FB
458 /* ensure that no TB jump will be modified as
459 the program flow was changed */
497ad68c 460 BREAK_CHAIN;
0573fbfc
TS
461#if !defined(CONFIG_USER_ONLY)
462 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
463 (env->eflags & IF_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
464 int intno;
465 /* FIXME: this should respect TPR */
466 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
52621688 467 svm_check_intercept(SVM_EXIT_VINTR);
0573fbfc
TS
468 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
469 if (loglevel & CPU_LOG_TB_IN_ASM)
470 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
471 do_interrupt(intno, 0, 0, -1, 1);
52621688
TS
472 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl),
473 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)) & ~V_IRQ_MASK);
497ad68c 474 BREAK_CHAIN;
907a5b26 475#endif
68a79315 476 }
ce09776b 477#elif defined(TARGET_PPC)
9fddaa0c
FB
478#if 0
479 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
480 cpu_ppc_reset(env);
481 }
482#endif
47103572 483 if (interrupt_request & CPU_INTERRUPT_HARD) {
e9df014c
JM
484 ppc_hw_interrupt(env);
485 if (env->pending_interrupts == 0)
486 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
497ad68c 487 BREAK_CHAIN;
ce09776b 488 }
6af0bf9c
FB
489#elif defined(TARGET_MIPS)
490 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
24c7b0e3 491 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
6af0bf9c 492 (env->CP0_Status & (1 << CP0St_IE)) &&
24c7b0e3
TS
493 !(env->CP0_Status & (1 << CP0St_EXL)) &&
494 !(env->CP0_Status & (1 << CP0St_ERL)) &&
6af0bf9c
FB
495 !(env->hflags & MIPS_HFLAG_DM)) {
496 /* Raise it */
497 env->exception_index = EXCP_EXT_INTERRUPT;
498 env->error_code = 0;
499 do_interrupt(env);
497ad68c 500 BREAK_CHAIN;
6af0bf9c 501 }
e95c8d51 502#elif defined(TARGET_SPARC)
66321a11
FB
503 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
504 (env->psret != 0)) {
505 int pil = env->interrupt_index & 15;
506 int type = env->interrupt_index & 0xf0;
507
508 if (((type == TT_EXTINT) &&
509 (pil == 15 || pil > env->psrpil)) ||
510 type != TT_EXTINT) {
511 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
512 do_interrupt(env->interrupt_index);
513 env->interrupt_index = 0;
327ac2e7
BS
514#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
515 cpu_check_irqs(env);
516#endif
497ad68c 517 BREAK_CHAIN;
66321a11 518 }
e95c8d51
FB
519 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
520 //do_interrupt(0, 0, 0, 0, 0);
521 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
a90b7318 522 }
b5ff1b31
FB
523#elif defined(TARGET_ARM)
524 if (interrupt_request & CPU_INTERRUPT_FIQ
525 && !(env->uncached_cpsr & CPSR_F)) {
526 env->exception_index = EXCP_FIQ;
527 do_interrupt(env);
497ad68c 528 BREAK_CHAIN;
b5ff1b31 529 }
9ee6e8bb
PB
530 /* ARMv7-M interrupt return works by loading a magic value
531 into the PC. On real hardware the load causes the
532 return to occur. The qemu implementation performs the
533 jump normally, then does the exception return when the
534 CPU tries to execute code at the magic address.
535 This will cause the magic PC value to be pushed to
536 the stack if an interrupt occured at the wrong time.
537 We avoid this by disabling interrupts when
538 pc contains a magic address. */
b5ff1b31 539 if (interrupt_request & CPU_INTERRUPT_HARD
9ee6e8bb
PB
540 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
541 || !(env->uncached_cpsr & CPSR_I))) {
b5ff1b31
FB
542 env->exception_index = EXCP_IRQ;
543 do_interrupt(env);
497ad68c 544 BREAK_CHAIN;
b5ff1b31 545 }
fdf9b3e8 546#elif defined(TARGET_SH4)
e96e2044
TS
547 if (interrupt_request & CPU_INTERRUPT_HARD) {
548 do_interrupt(env);
549 BREAK_CHAIN;
550 }
eddf68a6
JM
551#elif defined(TARGET_ALPHA)
552 if (interrupt_request & CPU_INTERRUPT_HARD) {
553 do_interrupt(env);
497ad68c 554 BREAK_CHAIN;
eddf68a6 555 }
f1ccf904
TS
556#elif defined(TARGET_CRIS)
557 if (interrupt_request & CPU_INTERRUPT_HARD) {
558 do_interrupt(env);
559 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
497ad68c 560 BREAK_CHAIN;
f1ccf904 561 }
0633879f
PB
562#elif defined(TARGET_M68K)
563 if (interrupt_request & CPU_INTERRUPT_HARD
564 && ((env->sr & SR_I) >> SR_I_SHIFT)
565 < env->pending_level) {
566 /* Real hardware gets the interrupt vector via an
567 IACK cycle at this point. Current emulated
568 hardware doesn't rely on this, so we
569 provide/save the vector when the interrupt is
570 first signalled. */
571 env->exception_index = env->pending_vector;
572 do_interrupt(1);
497ad68c 573 BREAK_CHAIN;
0633879f 574 }
68a79315 575#endif
9d05095e
FB
576 /* Don't use the cached interupt_request value,
577 do_interrupt may have updated the EXITTB flag. */
b5ff1b31 578 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
bf3e8bf1
FB
579 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
580 /* ensure that no TB jump will be modified as
581 the program flow was changed */
497ad68c 582 BREAK_CHAIN;
bf3e8bf1 583 }
68a79315
FB
584 if (interrupt_request & CPU_INTERRUPT_EXIT) {
585 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
586 env->exception_index = EXCP_INTERRUPT;
587 cpu_loop_exit();
588 }
3fb2ded1 589 }
7d13299d 590#ifdef DEBUG_EXEC
b5ff1b31 591 if ((loglevel & CPU_LOG_TB_CPU)) {
3fb2ded1 592 /* restore flags in standard format */
ecb644f4
TS
593 regs_to_env();
594#if defined(TARGET_I386)
3fb2ded1 595 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
7fe48483 596 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
3fb2ded1 597 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
e4533c7a 598#elif defined(TARGET_ARM)
7fe48483 599 cpu_dump_state(env, logfile, fprintf, 0);
93ac68bc 600#elif defined(TARGET_SPARC)
3475187d
FB
601 REGWPTR = env->regbase + (env->cwp * 16);
602 env->regwptr = REGWPTR;
603 cpu_dump_state(env, logfile, fprintf, 0);
67867308 604#elif defined(TARGET_PPC)
7fe48483 605 cpu_dump_state(env, logfile, fprintf, 0);
e6e5906b
PB
606#elif defined(TARGET_M68K)
607 cpu_m68k_flush_flags(env, env->cc_op);
608 env->cc_op = CC_OP_FLAGS;
609 env->sr = (env->sr & 0xffe0)
610 | env->cc_dest | (env->cc_x << 4);
611 cpu_dump_state(env, logfile, fprintf, 0);
6af0bf9c
FB
612#elif defined(TARGET_MIPS)
613 cpu_dump_state(env, logfile, fprintf, 0);
fdf9b3e8
FB
614#elif defined(TARGET_SH4)
615 cpu_dump_state(env, logfile, fprintf, 0);
eddf68a6
JM
616#elif defined(TARGET_ALPHA)
617 cpu_dump_state(env, logfile, fprintf, 0);
f1ccf904
TS
618#elif defined(TARGET_CRIS)
619 cpu_dump_state(env, logfile, fprintf, 0);
e4533c7a 620#else
5fafdf24 621#error unsupported target CPU
e4533c7a 622#endif
3fb2ded1 623 }
7d13299d 624#endif
8a40a180 625 tb = tb_find_fast();
9d27abd9 626#ifdef DEBUG_EXEC
c1135f61 627 if ((loglevel & CPU_LOG_EXEC)) {
c27004ec
FB
628 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
629 (long)tb->tc_ptr, tb->pc,
630 lookup_symbol(tb->pc));
3fb2ded1 631 }
9d27abd9 632#endif
66f1cdbd 633 RESTORE_GLOBALS();
8a40a180
FB
634 /* see if we can patch the calling TB. When the TB
635 spans two pages, we cannot safely do a direct
636 jump. */
c27004ec 637 {
8a40a180 638 if (T0 != 0 &&
f32fc648
FB
639#if USE_KQEMU
640 (env->kqemu_enabled != 2) &&
641#endif
ec6338ba 642 tb->page_addr[1] == -1) {
3fb2ded1 643 spin_lock(&tb_lock);
c27004ec 644 tb_add_jump((TranslationBlock *)(long)(T0 & ~3), T0 & 3, tb);
3fb2ded1
FB
645 spin_unlock(&tb_lock);
646 }
c27004ec 647 }
3fb2ded1 648 tc_ptr = tb->tc_ptr;
83479e77 649 env->current_tb = tb;
3fb2ded1
FB
650 /* execute the generated code */
651 gen_func = (void *)tc_ptr;
8c6939c0 652#if defined(__sparc__)
3fb2ded1
FB
653 __asm__ __volatile__("call %0\n\t"
654 "mov %%o7,%%i0"
655 : /* no outputs */
5fafdf24 656 : "r" (gen_func)
fdbb4691 657 : "i0", "i1", "i2", "i3", "i4", "i5",
faab7592 658 "o0", "o1", "o2", "o3", "o4", "o5",
fdbb4691
FB
659 "l0", "l1", "l2", "l3", "l4", "l5",
660 "l6", "l7");
8c6939c0 661#elif defined(__arm__)
3fb2ded1
FB
662 asm volatile ("mov pc, %0\n\t"
663 ".global exec_loop\n\t"
664 "exec_loop:\n\t"
665 : /* no outputs */
666 : "r" (gen_func)
667 : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
b8076a74
FB
668#elif defined(__ia64)
669 struct fptr {
670 void *ip;
671 void *gp;
672 } fp;
673
674 fp.ip = tc_ptr;
675 fp.gp = code_gen_buffer + 2 * (1 << 20);
676 (*(void (*)(void)) &fp)();
ae228531 677#else
57fec1fe 678 T0 = gen_func();
ae228531 679#endif
83479e77 680 env->current_tb = NULL;
4cbf74b6
FB
681 /* reset soft MMU for next block (it can currently
682 only be set by a memory fault) */
683#if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
3f337316
FB
684 if (env->hflags & HF_SOFTMMU_MASK) {
685 env->hflags &= ~HF_SOFTMMU_MASK;
4cbf74b6
FB
686 /* do not allow linking to another block */
687 T0 = 0;
688 }
f32fc648
FB
689#endif
690#if defined(USE_KQEMU)
691#define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
692 if (kqemu_is_ok(env) &&
693 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
694 cpu_loop_exit();
695 }
4cbf74b6 696#endif
50a518e3 697 } /* for(;;) */
3fb2ded1 698 } else {
0d1a29f9 699 env_to_regs();
7d13299d 700 }
3fb2ded1
FB
701 } /* for(;;) */
702
7d13299d 703
e4533c7a 704#if defined(TARGET_I386)
9de5e440 705 /* restore flags in standard format */
fc2b4c48 706 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
e4533c7a 707#elif defined(TARGET_ARM)
b7bcbe95 708 /* XXX: Save/restore host fpu exception state?. */
93ac68bc 709#elif defined(TARGET_SPARC)
3475187d
FB
710#if defined(reg_REGWPTR)
711 REGWPTR = saved_regwptr;
712#endif
67867308 713#elif defined(TARGET_PPC)
e6e5906b
PB
714#elif defined(TARGET_M68K)
715 cpu_m68k_flush_flags(env, env->cc_op);
716 env->cc_op = CC_OP_FLAGS;
717 env->sr = (env->sr & 0xffe0)
718 | env->cc_dest | (env->cc_x << 4);
6af0bf9c 719#elif defined(TARGET_MIPS)
fdf9b3e8 720#elif defined(TARGET_SH4)
eddf68a6 721#elif defined(TARGET_ALPHA)
f1ccf904 722#elif defined(TARGET_CRIS)
fdf9b3e8 723 /* XXXXX */
e4533c7a
FB
724#else
725#error unsupported target CPU
726#endif
1057eaa7
PB
727
728 /* restore global registers */
66f1cdbd 729 RESTORE_GLOBALS();
1057eaa7
PB
730#include "hostregs_helper.h"
731
6a00d601 732 /* fail safe : never use cpu_single_env outside cpu_exec() */
5fafdf24 733 cpu_single_env = NULL;
7d13299d
FB
734 return ret;
735}
6dbad63e 736
fbf9eeb3
FB
737/* must only be called from the generated code as an exception can be
738 generated */
739void tb_invalidate_page_range(target_ulong start, target_ulong end)
740{
dc5d0b3d
FB
741 /* XXX: cannot enable it yet because it yields to MMU exception
742 where NIP != read address on PowerPC */
743#if 0
fbf9eeb3
FB
744 target_ulong phys_addr;
745 phys_addr = get_phys_addr_code(env, start);
746 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
dc5d0b3d 747#endif
fbf9eeb3
FB
748}
749
1a18c71b 750#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
e4533c7a 751
6dbad63e
FB
752void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
753{
754 CPUX86State *saved_env;
755
756 saved_env = env;
757 env = s;
a412ac57 758 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
a513fe19 759 selector &= 0xffff;
5fafdf24 760 cpu_x86_load_seg_cache(env, seg_reg, selector,
c27004ec 761 (selector << 4), 0xffff, 0);
a513fe19 762 } else {
b453b70b 763 load_seg(seg_reg, selector);
a513fe19 764 }
6dbad63e
FB
765 env = saved_env;
766}
9de5e440 767
6f12a2a6 768void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
d0a1ffc9
FB
769{
770 CPUX86State *saved_env;
771
772 saved_env = env;
773 env = s;
3b46e624 774
6f12a2a6 775 helper_fsave(ptr, data32);
d0a1ffc9
FB
776
777 env = saved_env;
778}
779
6f12a2a6 780void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
d0a1ffc9
FB
781{
782 CPUX86State *saved_env;
783
784 saved_env = env;
785 env = s;
3b46e624 786
6f12a2a6 787 helper_frstor(ptr, data32);
d0a1ffc9
FB
788
789 env = saved_env;
790}
791
e4533c7a
FB
792#endif /* TARGET_I386 */
793
67b915a5
FB
794#if !defined(CONFIG_SOFTMMU)
795
3fb2ded1
FB
796#if defined(TARGET_I386)
797
b56dad1c 798/* 'pc' is the host PC at which the exception was raised. 'address' is
fd6ce8f6
FB
799 the effective address of the memory exception. 'is_write' is 1 if a
800 write caused the exception and otherwise 0'. 'old_set' is the
801 signal set which should be restored */
2b413144 802static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
5fafdf24 803 int is_write, sigset_t *old_set,
bf3e8bf1 804 void *puc)
9de5e440 805{
a513fe19
FB
806 TranslationBlock *tb;
807 int ret;
68a79315 808
83479e77
FB
809 if (cpu_single_env)
810 env = cpu_single_env; /* XXX: find a correct solution for multithread */
fd6ce8f6 811#if defined(DEBUG_SIGNAL)
5fafdf24 812 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
bf3e8bf1 813 pc, address, is_write, *(unsigned long *)old_set);
9de5e440 814#endif
25eb4484 815 /* XXX: locking issue */
53a5960a 816 if (is_write && page_unprotect(h2g(address), pc, puc)) {
fd6ce8f6
FB
817 return 1;
818 }
fbf9eeb3 819
3fb2ded1 820 /* see if it is an MMU fault */
6ebbf390 821 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
3fb2ded1
FB
822 if (ret < 0)
823 return 0; /* not an MMU fault */
824 if (ret == 0)
825 return 1; /* the MMU fault was handled without causing real CPU fault */
826 /* now we have a real cpu fault */
a513fe19
FB
827 tb = tb_find_pc(pc);
828 if (tb) {
9de5e440
FB
829 /* the PC is inside the translated code. It means that we have
830 a virtual CPU fault */
bf3e8bf1 831 cpu_restore_state(tb, env, pc, puc);
3fb2ded1 832 }
4cbf74b6 833 if (ret == 1) {
3fb2ded1 834#if 0
5fafdf24 835 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
4cbf74b6 836 env->eip, env->cr[2], env->error_code);
3fb2ded1 837#endif
4cbf74b6
FB
838 /* we restore the process signal mask as the sigreturn should
839 do it (XXX: use sigsetjmp) */
840 sigprocmask(SIG_SETMASK, old_set, NULL);
54ca9095 841 raise_exception_err(env->exception_index, env->error_code);
4cbf74b6
FB
842 } else {
843 /* activate soft MMU for this block */
3f337316 844 env->hflags |= HF_SOFTMMU_MASK;
fbf9eeb3 845 cpu_resume_from_signal(env, puc);
4cbf74b6 846 }
3fb2ded1
FB
847 /* never comes here */
848 return 1;
849}
850
e4533c7a 851#elif defined(TARGET_ARM)
3fb2ded1 852static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
bf3e8bf1
FB
853 int is_write, sigset_t *old_set,
854 void *puc)
3fb2ded1 855{
68016c62
FB
856 TranslationBlock *tb;
857 int ret;
858
859 if (cpu_single_env)
860 env = cpu_single_env; /* XXX: find a correct solution for multithread */
861#if defined(DEBUG_SIGNAL)
5fafdf24 862 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
68016c62
FB
863 pc, address, is_write, *(unsigned long *)old_set);
864#endif
9f0777ed 865 /* XXX: locking issue */
53a5960a 866 if (is_write && page_unprotect(h2g(address), pc, puc)) {
9f0777ed
FB
867 return 1;
868 }
68016c62 869 /* see if it is an MMU fault */
6ebbf390 870 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
68016c62
FB
871 if (ret < 0)
872 return 0; /* not an MMU fault */
873 if (ret == 0)
874 return 1; /* the MMU fault was handled without causing real CPU fault */
875 /* now we have a real cpu fault */
876 tb = tb_find_pc(pc);
877 if (tb) {
878 /* the PC is inside the translated code. It means that we have
879 a virtual CPU fault */
880 cpu_restore_state(tb, env, pc, puc);
881 }
882 /* we restore the process signal mask as the sigreturn should
883 do it (XXX: use sigsetjmp) */
884 sigprocmask(SIG_SETMASK, old_set, NULL);
885 cpu_loop_exit();
3fb2ded1 886}
93ac68bc
FB
887#elif defined(TARGET_SPARC)
888static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
bf3e8bf1
FB
889 int is_write, sigset_t *old_set,
890 void *puc)
93ac68bc 891{
68016c62
FB
892 TranslationBlock *tb;
893 int ret;
894
895 if (cpu_single_env)
896 env = cpu_single_env; /* XXX: find a correct solution for multithread */
897#if defined(DEBUG_SIGNAL)
5fafdf24 898 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
68016c62
FB
899 pc, address, is_write, *(unsigned long *)old_set);
900#endif
b453b70b 901 /* XXX: locking issue */
53a5960a 902 if (is_write && page_unprotect(h2g(address), pc, puc)) {
b453b70b
FB
903 return 1;
904 }
68016c62 905 /* see if it is an MMU fault */
6ebbf390 906 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
68016c62
FB
907 if (ret < 0)
908 return 0; /* not an MMU fault */
909 if (ret == 0)
910 return 1; /* the MMU fault was handled without causing real CPU fault */
911 /* now we have a real cpu fault */
912 tb = tb_find_pc(pc);
913 if (tb) {
914 /* the PC is inside the translated code. It means that we have
915 a virtual CPU fault */
916 cpu_restore_state(tb, env, pc, puc);
917 }
918 /* we restore the process signal mask as the sigreturn should
919 do it (XXX: use sigsetjmp) */
920 sigprocmask(SIG_SETMASK, old_set, NULL);
921 cpu_loop_exit();
93ac68bc 922}
67867308
FB
923#elif defined (TARGET_PPC)
924static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
bf3e8bf1
FB
925 int is_write, sigset_t *old_set,
926 void *puc)
67867308
FB
927{
928 TranslationBlock *tb;
ce09776b 929 int ret;
3b46e624 930
67867308
FB
931 if (cpu_single_env)
932 env = cpu_single_env; /* XXX: find a correct solution for multithread */
67867308 933#if defined(DEBUG_SIGNAL)
5fafdf24 934 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
67867308
FB
935 pc, address, is_write, *(unsigned long *)old_set);
936#endif
937 /* XXX: locking issue */
53a5960a 938 if (is_write && page_unprotect(h2g(address), pc, puc)) {
67867308
FB
939 return 1;
940 }
941
ce09776b 942 /* see if it is an MMU fault */
6ebbf390 943 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
ce09776b
FB
944 if (ret < 0)
945 return 0; /* not an MMU fault */
946 if (ret == 0)
947 return 1; /* the MMU fault was handled without causing real CPU fault */
948
67867308
FB
949 /* now we have a real cpu fault */
950 tb = tb_find_pc(pc);
951 if (tb) {
952 /* the PC is inside the translated code. It means that we have
953 a virtual CPU fault */
bf3e8bf1 954 cpu_restore_state(tb, env, pc, puc);
67867308 955 }
ce09776b 956 if (ret == 1) {
67867308 957#if 0
5fafdf24 958 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
ce09776b 959 env->nip, env->error_code, tb);
67867308
FB
960#endif
961 /* we restore the process signal mask as the sigreturn should
962 do it (XXX: use sigsetjmp) */
bf3e8bf1 963 sigprocmask(SIG_SETMASK, old_set, NULL);
9fddaa0c 964 do_raise_exception_err(env->exception_index, env->error_code);
ce09776b
FB
965 } else {
966 /* activate soft MMU for this block */
fbf9eeb3 967 cpu_resume_from_signal(env, puc);
ce09776b 968 }
67867308 969 /* never comes here */
e6e5906b
PB
970 return 1;
971}
972
973#elif defined(TARGET_M68K)
974static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
975 int is_write, sigset_t *old_set,
976 void *puc)
977{
978 TranslationBlock *tb;
979 int ret;
980
981 if (cpu_single_env)
982 env = cpu_single_env; /* XXX: find a correct solution for multithread */
983#if defined(DEBUG_SIGNAL)
5fafdf24 984 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
e6e5906b
PB
985 pc, address, is_write, *(unsigned long *)old_set);
986#endif
987 /* XXX: locking issue */
988 if (is_write && page_unprotect(address, pc, puc)) {
989 return 1;
990 }
991 /* see if it is an MMU fault */
6ebbf390 992 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
e6e5906b
PB
993 if (ret < 0)
994 return 0; /* not an MMU fault */
995 if (ret == 0)
996 return 1; /* the MMU fault was handled without causing real CPU fault */
997 /* now we have a real cpu fault */
998 tb = tb_find_pc(pc);
999 if (tb) {
1000 /* the PC is inside the translated code. It means that we have
1001 a virtual CPU fault */
1002 cpu_restore_state(tb, env, pc, puc);
1003 }
1004 /* we restore the process signal mask as the sigreturn should
1005 do it (XXX: use sigsetjmp) */
1006 sigprocmask(SIG_SETMASK, old_set, NULL);
1007 cpu_loop_exit();
1008 /* never comes here */
67867308
FB
1009 return 1;
1010}
6af0bf9c
FB
1011
1012#elif defined (TARGET_MIPS)
1013static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1014 int is_write, sigset_t *old_set,
1015 void *puc)
1016{
1017 TranslationBlock *tb;
1018 int ret;
3b46e624 1019
6af0bf9c
FB
1020 if (cpu_single_env)
1021 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1022#if defined(DEBUG_SIGNAL)
5fafdf24 1023 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
6af0bf9c
FB
1024 pc, address, is_write, *(unsigned long *)old_set);
1025#endif
1026 /* XXX: locking issue */
53a5960a 1027 if (is_write && page_unprotect(h2g(address), pc, puc)) {
6af0bf9c
FB
1028 return 1;
1029 }
1030
1031 /* see if it is an MMU fault */
6ebbf390 1032 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
6af0bf9c
FB
1033 if (ret < 0)
1034 return 0; /* not an MMU fault */
1035 if (ret == 0)
1036 return 1; /* the MMU fault was handled without causing real CPU fault */
1037
1038 /* now we have a real cpu fault */
1039 tb = tb_find_pc(pc);
1040 if (tb) {
1041 /* the PC is inside the translated code. It means that we have
1042 a virtual CPU fault */
1043 cpu_restore_state(tb, env, pc, puc);
1044 }
1045 if (ret == 1) {
1046#if 0
5fafdf24 1047 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1eb5207b 1048 env->PC, env->error_code, tb);
6af0bf9c
FB
1049#endif
1050 /* we restore the process signal mask as the sigreturn should
1051 do it (XXX: use sigsetjmp) */
1052 sigprocmask(SIG_SETMASK, old_set, NULL);
1053 do_raise_exception_err(env->exception_index, env->error_code);
1054 } else {
1055 /* activate soft MMU for this block */
1056 cpu_resume_from_signal(env, puc);
1057 }
1058 /* never comes here */
1059 return 1;
1060}
1061
fdf9b3e8
FB
1062#elif defined (TARGET_SH4)
1063static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1064 int is_write, sigset_t *old_set,
1065 void *puc)
1066{
1067 TranslationBlock *tb;
1068 int ret;
3b46e624 1069
fdf9b3e8
FB
1070 if (cpu_single_env)
1071 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1072#if defined(DEBUG_SIGNAL)
5fafdf24 1073 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
fdf9b3e8
FB
1074 pc, address, is_write, *(unsigned long *)old_set);
1075#endif
1076 /* XXX: locking issue */
1077 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1078 return 1;
1079 }
1080
1081 /* see if it is an MMU fault */
6ebbf390 1082 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
fdf9b3e8
FB
1083 if (ret < 0)
1084 return 0; /* not an MMU fault */
1085 if (ret == 0)
1086 return 1; /* the MMU fault was handled without causing real CPU fault */
1087
1088 /* now we have a real cpu fault */
eddf68a6
JM
1089 tb = tb_find_pc(pc);
1090 if (tb) {
1091 /* the PC is inside the translated code. It means that we have
1092 a virtual CPU fault */
1093 cpu_restore_state(tb, env, pc, puc);
1094 }
1095#if 0
5fafdf24 1096 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
eddf68a6
JM
1097 env->nip, env->error_code, tb);
1098#endif
1099 /* we restore the process signal mask as the sigreturn should
1100 do it (XXX: use sigsetjmp) */
1101 sigprocmask(SIG_SETMASK, old_set, NULL);
1102 cpu_loop_exit();
1103 /* never comes here */
1104 return 1;
1105}
1106
1107#elif defined (TARGET_ALPHA)
1108static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1109 int is_write, sigset_t *old_set,
1110 void *puc)
1111{
1112 TranslationBlock *tb;
1113 int ret;
3b46e624 1114
eddf68a6
JM
1115 if (cpu_single_env)
1116 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1117#if defined(DEBUG_SIGNAL)
5fafdf24 1118 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
eddf68a6
JM
1119 pc, address, is_write, *(unsigned long *)old_set);
1120#endif
1121 /* XXX: locking issue */
1122 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1123 return 1;
1124 }
1125
1126 /* see if it is an MMU fault */
6ebbf390 1127 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
eddf68a6
JM
1128 if (ret < 0)
1129 return 0; /* not an MMU fault */
1130 if (ret == 0)
1131 return 1; /* the MMU fault was handled without causing real CPU fault */
1132
1133 /* now we have a real cpu fault */
fdf9b3e8
FB
1134 tb = tb_find_pc(pc);
1135 if (tb) {
1136 /* the PC is inside the translated code. It means that we have
1137 a virtual CPU fault */
1138 cpu_restore_state(tb, env, pc, puc);
1139 }
fdf9b3e8 1140#if 0
5fafdf24 1141 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
fdf9b3e8
FB
1142 env->nip, env->error_code, tb);
1143#endif
1144 /* we restore the process signal mask as the sigreturn should
1145 do it (XXX: use sigsetjmp) */
355fb23d
PB
1146 sigprocmask(SIG_SETMASK, old_set, NULL);
1147 cpu_loop_exit();
fdf9b3e8
FB
1148 /* never comes here */
1149 return 1;
1150}
f1ccf904
TS
1151#elif defined (TARGET_CRIS)
1152static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1153 int is_write, sigset_t *old_set,
1154 void *puc)
1155{
1156 TranslationBlock *tb;
1157 int ret;
1158
1159 if (cpu_single_env)
1160 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1161#if defined(DEBUG_SIGNAL)
1162 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1163 pc, address, is_write, *(unsigned long *)old_set);
1164#endif
1165 /* XXX: locking issue */
1166 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1167 return 1;
1168 }
1169
1170 /* see if it is an MMU fault */
6ebbf390 1171 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
f1ccf904
TS
1172 if (ret < 0)
1173 return 0; /* not an MMU fault */
1174 if (ret == 0)
1175 return 1; /* the MMU fault was handled without causing real CPU fault */
1176
1177 /* now we have a real cpu fault */
1178 tb = tb_find_pc(pc);
1179 if (tb) {
1180 /* the PC is inside the translated code. It means that we have
1181 a virtual CPU fault */
1182 cpu_restore_state(tb, env, pc, puc);
1183 }
1184#if 0
1185 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1186 env->nip, env->error_code, tb);
1187#endif
1188 /* we restore the process signal mask as the sigreturn should
1189 do it (XXX: use sigsetjmp) */
1190 sigprocmask(SIG_SETMASK, old_set, NULL);
1191 cpu_loop_exit();
1192 /* never comes here */
1193 return 1;
1194}
1195
e4533c7a
FB
1196#else
1197#error unsupported target CPU
1198#endif
9de5e440 1199
2b413144
FB
1200#if defined(__i386__)
1201
d8ecc0b9
FB
1202#if defined(__APPLE__)
1203# include <sys/ucontext.h>
1204
1205# define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1206# define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1207# define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1208#else
1209# define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1210# define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1211# define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1212#endif
1213
5fafdf24 1214int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1215 void *puc)
9de5e440 1216{
5a7b542b 1217 siginfo_t *info = pinfo;
9de5e440
FB
1218 struct ucontext *uc = puc;
1219 unsigned long pc;
bf3e8bf1 1220 int trapno;
97eb5b14 1221
d691f669
FB
1222#ifndef REG_EIP
1223/* for glibc 2.1 */
fd6ce8f6
FB
1224#define REG_EIP EIP
1225#define REG_ERR ERR
1226#define REG_TRAPNO TRAPNO
d691f669 1227#endif
d8ecc0b9
FB
1228 pc = EIP_sig(uc);
1229 trapno = TRAP_sig(uc);
ec6338ba
FB
1230 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1231 trapno == 0xe ?
1232 (ERROR_sig(uc) >> 1) & 1 : 0,
1233 &uc->uc_sigmask, puc);
2b413144
FB
1234}
1235
bc51c5c9
FB
1236#elif defined(__x86_64__)
1237
5a7b542b 1238int cpu_signal_handler(int host_signum, void *pinfo,
bc51c5c9
FB
1239 void *puc)
1240{
5a7b542b 1241 siginfo_t *info = pinfo;
bc51c5c9
FB
1242 struct ucontext *uc = puc;
1243 unsigned long pc;
1244
1245 pc = uc->uc_mcontext.gregs[REG_RIP];
5fafdf24
TS
1246 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1247 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
bc51c5c9
FB
1248 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1249 &uc->uc_sigmask, puc);
1250}
1251
83fb7adf 1252#elif defined(__powerpc__)
2b413144 1253
83fb7adf
FB
1254/***********************************************************************
1255 * signal context platform-specific definitions
1256 * From Wine
1257 */
1258#ifdef linux
1259/* All Registers access - only for local access */
1260# define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1261/* Gpr Registers access */
1262# define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1263# define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1264# define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1265# define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1266# define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1267# define LR_sig(context) REG_sig(link, context) /* Link register */
1268# define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1269/* Float Registers access */
1270# define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1271# define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1272/* Exception Registers access */
1273# define DAR_sig(context) REG_sig(dar, context)
1274# define DSISR_sig(context) REG_sig(dsisr, context)
1275# define TRAP_sig(context) REG_sig(trap, context)
1276#endif /* linux */
1277
1278#ifdef __APPLE__
1279# include <sys/ucontext.h>
1280typedef struct ucontext SIGCONTEXT;
1281/* All Registers access - only for local access */
1282# define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1283# define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1284# define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1285# define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1286/* Gpr Registers access */
1287# define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1288# define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1289# define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1290# define CTR_sig(context) REG_sig(ctr, context)
1291# define XER_sig(context) REG_sig(xer, context) /* Link register */
1292# define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1293# define CR_sig(context) REG_sig(cr, context) /* Condition register */
1294/* Float Registers access */
1295# define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1296# define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1297/* Exception Registers access */
1298# define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1299# define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1300# define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1301#endif /* __APPLE__ */
1302
5fafdf24 1303int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1304 void *puc)
2b413144 1305{
5a7b542b 1306 siginfo_t *info = pinfo;
25eb4484 1307 struct ucontext *uc = puc;
25eb4484 1308 unsigned long pc;
25eb4484
FB
1309 int is_write;
1310
83fb7adf 1311 pc = IAR_sig(uc);
25eb4484
FB
1312 is_write = 0;
1313#if 0
1314 /* ppc 4xx case */
83fb7adf 1315 if (DSISR_sig(uc) & 0x00800000)
25eb4484
FB
1316 is_write = 1;
1317#else
83fb7adf 1318 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
25eb4484
FB
1319 is_write = 1;
1320#endif
5fafdf24 1321 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
bf3e8bf1 1322 is_write, &uc->uc_sigmask, puc);
2b413144
FB
1323}
1324
2f87c607
FB
1325#elif defined(__alpha__)
1326
5fafdf24 1327int cpu_signal_handler(int host_signum, void *pinfo,
2f87c607
FB
1328 void *puc)
1329{
5a7b542b 1330 siginfo_t *info = pinfo;
2f87c607
FB
1331 struct ucontext *uc = puc;
1332 uint32_t *pc = uc->uc_mcontext.sc_pc;
1333 uint32_t insn = *pc;
1334 int is_write = 0;
1335
8c6939c0 1336 /* XXX: need kernel patch to get write flag faster */
2f87c607
FB
1337 switch (insn >> 26) {
1338 case 0x0d: // stw
1339 case 0x0e: // stb
1340 case 0x0f: // stq_u
1341 case 0x24: // stf
1342 case 0x25: // stg
1343 case 0x26: // sts
1344 case 0x27: // stt
1345 case 0x2c: // stl
1346 case 0x2d: // stq
1347 case 0x2e: // stl_c
1348 case 0x2f: // stq_c
1349 is_write = 1;
1350 }
1351
5fafdf24 1352 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
bf3e8bf1 1353 is_write, &uc->uc_sigmask, puc);
2f87c607 1354}
8c6939c0
FB
1355#elif defined(__sparc__)
1356
5fafdf24 1357int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1358 void *puc)
8c6939c0 1359{
5a7b542b 1360 siginfo_t *info = pinfo;
8c6939c0
FB
1361 uint32_t *regs = (uint32_t *)(info + 1);
1362 void *sigmask = (regs + 20);
1363 unsigned long pc;
1364 int is_write;
1365 uint32_t insn;
3b46e624 1366
8c6939c0
FB
1367 /* XXX: is there a standard glibc define ? */
1368 pc = regs[1];
1369 /* XXX: need kernel patch to get write flag faster */
1370 is_write = 0;
1371 insn = *(uint32_t *)pc;
1372 if ((insn >> 30) == 3) {
1373 switch((insn >> 19) & 0x3f) {
1374 case 0x05: // stb
1375 case 0x06: // sth
1376 case 0x04: // st
1377 case 0x07: // std
1378 case 0x24: // stf
1379 case 0x27: // stdf
1380 case 0x25: // stfsr
1381 is_write = 1;
1382 break;
1383 }
1384 }
5fafdf24 1385 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
bf3e8bf1 1386 is_write, sigmask, NULL);
8c6939c0
FB
1387}
1388
1389#elif defined(__arm__)
1390
5fafdf24 1391int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1392 void *puc)
8c6939c0 1393{
5a7b542b 1394 siginfo_t *info = pinfo;
8c6939c0
FB
1395 struct ucontext *uc = puc;
1396 unsigned long pc;
1397 int is_write;
3b46e624 1398
8c6939c0
FB
1399 pc = uc->uc_mcontext.gregs[R15];
1400 /* XXX: compute is_write */
1401 is_write = 0;
5fafdf24 1402 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
8c6939c0 1403 is_write,
f3a9676a 1404 &uc->uc_sigmask, puc);
8c6939c0
FB
1405}
1406
38e584a0
FB
1407#elif defined(__mc68000)
1408
5fafdf24 1409int cpu_signal_handler(int host_signum, void *pinfo,
38e584a0
FB
1410 void *puc)
1411{
5a7b542b 1412 siginfo_t *info = pinfo;
38e584a0
FB
1413 struct ucontext *uc = puc;
1414 unsigned long pc;
1415 int is_write;
3b46e624 1416
38e584a0
FB
1417 pc = uc->uc_mcontext.gregs[16];
1418 /* XXX: compute is_write */
1419 is_write = 0;
5fafdf24 1420 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
38e584a0 1421 is_write,
bf3e8bf1 1422 &uc->uc_sigmask, puc);
38e584a0
FB
1423}
1424
b8076a74
FB
1425#elif defined(__ia64)
1426
1427#ifndef __ISR_VALID
1428 /* This ought to be in <bits/siginfo.h>... */
1429# define __ISR_VALID 1
b8076a74
FB
1430#endif
1431
5a7b542b 1432int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
b8076a74 1433{
5a7b542b 1434 siginfo_t *info = pinfo;
b8076a74
FB
1435 struct ucontext *uc = puc;
1436 unsigned long ip;
1437 int is_write = 0;
1438
1439 ip = uc->uc_mcontext.sc_ip;
1440 switch (host_signum) {
1441 case SIGILL:
1442 case SIGFPE:
1443 case SIGSEGV:
1444 case SIGBUS:
1445 case SIGTRAP:
fd4a43e4 1446 if (info->si_code && (info->si_segvflags & __ISR_VALID))
b8076a74
FB
1447 /* ISR.W (write-access) is bit 33: */
1448 is_write = (info->si_isr >> 33) & 1;
1449 break;
1450
1451 default:
1452 break;
1453 }
1454 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1455 is_write,
1456 &uc->uc_sigmask, puc);
1457}
1458
90cb9493
FB
1459#elif defined(__s390__)
1460
5fafdf24 1461int cpu_signal_handler(int host_signum, void *pinfo,
90cb9493
FB
1462 void *puc)
1463{
5a7b542b 1464 siginfo_t *info = pinfo;
90cb9493
FB
1465 struct ucontext *uc = puc;
1466 unsigned long pc;
1467 int is_write;
3b46e624 1468
90cb9493
FB
1469 pc = uc->uc_mcontext.psw.addr;
1470 /* XXX: compute is_write */
1471 is_write = 0;
5fafdf24 1472 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
c4b89d18
TS
1473 is_write, &uc->uc_sigmask, puc);
1474}
1475
1476#elif defined(__mips__)
1477
5fafdf24 1478int cpu_signal_handler(int host_signum, void *pinfo,
c4b89d18
TS
1479 void *puc)
1480{
9617efe8 1481 siginfo_t *info = pinfo;
c4b89d18
TS
1482 struct ucontext *uc = puc;
1483 greg_t pc = uc->uc_mcontext.pc;
1484 int is_write;
3b46e624 1485
c4b89d18
TS
1486 /* XXX: compute is_write */
1487 is_write = 0;
5fafdf24 1488 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
c4b89d18 1489 is_write, &uc->uc_sigmask, puc);
90cb9493
FB
1490}
1491
9de5e440 1492#else
2b413144 1493
3fb2ded1 1494#error host CPU specific signal handler needed
2b413144 1495
9de5e440 1496#endif
67b915a5
FB
1497
1498#endif /* !defined(CONFIG_SOFTMMU) */