]> git.proxmox.com Git - mirror_qemu.git/blame - cpu-exec.c
Use correct memory types in do_physical_memory_save()
[mirror_qemu.git] / cpu-exec.c
CommitLineData
7d13299d
FB
1/*
2 * i386 emulator main execution loop
5fafdf24 3 *
66321a11 4 * Copyright (c) 2003-2005 Fabrice Bellard
7d13299d 5 *
3ef693a0
FB
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
7d13299d 10 *
3ef693a0
FB
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
7d13299d 15 *
3ef693a0
FB
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
7d13299d 19 */
e4533c7a 20#include "config.h"
93ac68bc 21#include "exec.h"
956034d7 22#include "disas.h"
7d13299d 23
fbf9eeb3
FB
24#if !defined(CONFIG_SOFTMMU)
25#undef EAX
26#undef ECX
27#undef EDX
28#undef EBX
29#undef ESP
30#undef EBP
31#undef ESI
32#undef EDI
33#undef EIP
34#include <signal.h>
35#include <sys/ucontext.h>
36#endif
37
36bdbe54
FB
38int tb_invalidated_flag;
39
dc99065b 40//#define DEBUG_EXEC
9de5e440 41//#define DEBUG_SIGNAL
7d13299d 42
66f1cdbd
BS
43#define SAVE_GLOBALS()
44#define RESTORE_GLOBALS()
45
46#if defined(__sparc__) && !defined(HOST_SOLARIS)
47#include <features.h>
48#if defined(__GLIBC__) && ((__GLIBC__ < 2) || \
49 ((__GLIBC__ == 2) && (__GLIBC_MINOR__ <= 90)))
50// Work around ugly bugs in glibc that mangle global register contents
51
52static volatile void *saved_env;
53static volatile unsigned long saved_t0, saved_i7;
54#undef SAVE_GLOBALS
55#define SAVE_GLOBALS() do { \
56 saved_env = env; \
57 saved_t0 = T0; \
58 asm volatile ("st %%i7, [%0]" : : "r" (&saved_i7)); \
59 } while(0)
60
61#undef RESTORE_GLOBALS
62#define RESTORE_GLOBALS() do { \
63 env = (void *)saved_env; \
64 T0 = saved_t0; \
65 asm volatile ("ld [%0], %%i7" : : "r" (&saved_i7)); \
66 } while(0)
67
68static int sparc_setjmp(jmp_buf buf)
69{
70 int ret;
71
72 SAVE_GLOBALS();
73 ret = setjmp(buf);
74 RESTORE_GLOBALS();
75 return ret;
76}
77#undef setjmp
78#define setjmp(jmp_buf) sparc_setjmp(jmp_buf)
79
80static void sparc_longjmp(jmp_buf buf, int val)
81{
82 SAVE_GLOBALS();
83 longjmp(buf, val);
84}
85#define longjmp(jmp_buf, val) sparc_longjmp(jmp_buf, val)
86#endif
87#endif
88
e4533c7a
FB
89void cpu_loop_exit(void)
90{
bfed01fc
TS
91 /* NOTE: the register at this point must be saved by hand because
92 longjmp restore them */
93 regs_to_env();
e4533c7a
FB
94 longjmp(env->jmp_env, 1);
95}
bfed01fc 96
e6e5906b 97#if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
3475187d
FB
98#define reg_T2
99#endif
e4533c7a 100
fbf9eeb3
FB
101/* exit the current TB from a signal handler. The host registers are
102 restored in a state compatible with the CPU emulator
103 */
5fafdf24 104void cpu_resume_from_signal(CPUState *env1, void *puc)
fbf9eeb3
FB
105{
106#if !defined(CONFIG_SOFTMMU)
107 struct ucontext *uc = puc;
108#endif
109
110 env = env1;
111
112 /* XXX: restore cpu registers saved in host registers */
113
114#if !defined(CONFIG_SOFTMMU)
115 if (puc) {
116 /* XXX: use siglongjmp ? */
117 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
118 }
119#endif
120 longjmp(env->jmp_env, 1);
121}
122
8a40a180
FB
123static TranslationBlock *tb_find_slow(target_ulong pc,
124 target_ulong cs_base,
c068688b 125 uint64_t flags)
8a40a180
FB
126{
127 TranslationBlock *tb, **ptb1;
128 int code_gen_size;
129 unsigned int h;
130 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
131 uint8_t *tc_ptr;
3b46e624 132
8a40a180
FB
133 spin_lock(&tb_lock);
134
135 tb_invalidated_flag = 0;
3b46e624 136
8a40a180 137 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
3b46e624 138
8a40a180
FB
139 /* find translated block using physical mappings */
140 phys_pc = get_phys_addr_code(env, pc);
141 phys_page1 = phys_pc & TARGET_PAGE_MASK;
142 phys_page2 = -1;
143 h = tb_phys_hash_func(phys_pc);
144 ptb1 = &tb_phys_hash[h];
145 for(;;) {
146 tb = *ptb1;
147 if (!tb)
148 goto not_found;
5fafdf24 149 if (tb->pc == pc &&
8a40a180 150 tb->page_addr[0] == phys_page1 &&
5fafdf24 151 tb->cs_base == cs_base &&
8a40a180
FB
152 tb->flags == flags) {
153 /* check next page if needed */
154 if (tb->page_addr[1] != -1) {
5fafdf24 155 virt_page2 = (pc & TARGET_PAGE_MASK) +
8a40a180
FB
156 TARGET_PAGE_SIZE;
157 phys_page2 = get_phys_addr_code(env, virt_page2);
158 if (tb->page_addr[1] == phys_page2)
159 goto found;
160 } else {
161 goto found;
162 }
163 }
164 ptb1 = &tb->phys_hash_next;
165 }
166 not_found:
167 /* if no translated code available, then translate it now */
168 tb = tb_alloc(pc);
169 if (!tb) {
170 /* flush must be done */
171 tb_flush(env);
172 /* cannot fail at this point */
173 tb = tb_alloc(pc);
174 /* don't forget to invalidate previous TB info */
15388002 175 tb_invalidated_flag = 1;
8a40a180
FB
176 }
177 tc_ptr = code_gen_ptr;
178 tb->tc_ptr = tc_ptr;
179 tb->cs_base = cs_base;
180 tb->flags = flags;
66f1cdbd 181 SAVE_GLOBALS();
d07bde88 182 cpu_gen_code(env, tb, &code_gen_size);
66f1cdbd 183 RESTORE_GLOBALS();
8a40a180 184 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 185
8a40a180
FB
186 /* check next page if needed */
187 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
188 phys_page2 = -1;
189 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
190 phys_page2 = get_phys_addr_code(env, virt_page2);
191 }
192 tb_link_phys(tb, phys_pc, phys_page2);
3b46e624 193
8a40a180 194 found:
8a40a180
FB
195 /* we add the TB in the virtual pc hash table */
196 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
197 spin_unlock(&tb_lock);
198 return tb;
199}
200
201static inline TranslationBlock *tb_find_fast(void)
202{
203 TranslationBlock *tb;
204 target_ulong cs_base, pc;
c068688b 205 uint64_t flags;
8a40a180
FB
206
207 /* we record a subset of the CPU state. It will
208 always be the same before a given translated block
209 is executed. */
210#if defined(TARGET_I386)
211 flags = env->hflags;
212 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
0573fbfc 213 flags |= env->intercept;
8a40a180
FB
214 cs_base = env->segs[R_CS].base;
215 pc = cs_base + env->eip;
216#elif defined(TARGET_ARM)
217 flags = env->thumb | (env->vfp.vec_len << 1)
b5ff1b31
FB
218 | (env->vfp.vec_stride << 4);
219 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
220 flags |= (1 << 6);
40f137e1
PB
221 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
222 flags |= (1 << 7);
9ee6e8bb 223 flags |= (env->condexec_bits << 8);
8a40a180
FB
224 cs_base = 0;
225 pc = env->regs[15];
226#elif defined(TARGET_SPARC)
227#ifdef TARGET_SPARC64
a80dde08
FB
228 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
229 flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
230 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
8a40a180 231#else
6d5f237a
BS
232 // FPU enable . Supervisor
233 flags = (env->psref << 4) | env->psrs;
8a40a180
FB
234#endif
235 cs_base = env->npc;
236 pc = env->pc;
237#elif defined(TARGET_PPC)
1527c87e 238 flags = env->hflags;
8a40a180
FB
239 cs_base = 0;
240 pc = env->nip;
241#elif defined(TARGET_MIPS)
56b19403 242 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
cc9442b9 243 cs_base = 0;
ead9360e 244 pc = env->PC[env->current_tc];
e6e5906b 245#elif defined(TARGET_M68K)
acf930aa
PB
246 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
247 | (env->sr & SR_S) /* Bit 13 */
248 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
e6e5906b
PB
249 cs_base = 0;
250 pc = env->pc;
fdf9b3e8 251#elif defined(TARGET_SH4)
823029f9
TS
252 flags = env->flags;
253 cs_base = 0;
fdf9b3e8 254 pc = env->pc;
eddf68a6
JM
255#elif defined(TARGET_ALPHA)
256 flags = env->ps;
257 cs_base = 0;
258 pc = env->pc;
f1ccf904
TS
259#elif defined(TARGET_CRIS)
260 flags = 0;
261 cs_base = 0;
262 pc = env->pc;
8a40a180
FB
263#else
264#error unsupported CPU
265#endif
bce61846 266 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
8a40a180
FB
267 if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
268 tb->flags != flags, 0)) {
269 tb = tb_find_slow(pc, cs_base, flags);
15388002
FB
270 /* Note: we do it here to avoid a gcc bug on Mac OS X when
271 doing it in tb_find_slow */
272 if (tb_invalidated_flag) {
273 /* as some TB could have been invalidated because
274 of memory exceptions while generating the code, we
275 must recompute the hash index here */
276 T0 = 0;
277 }
8a40a180
FB
278 }
279 return tb;
280}
281
497ad68c 282#define BREAK_CHAIN T0 = 0
8a40a180 283
7d13299d
FB
284/* main execution loop */
285
e4533c7a 286int cpu_exec(CPUState *env1)
7d13299d 287{
1057eaa7
PB
288#define DECLARE_HOST_REGS 1
289#include "hostregs_helper.h"
290#if defined(TARGET_SPARC)
3475187d
FB
291#if defined(reg_REGWPTR)
292 uint32_t *saved_regwptr;
293#endif
04369ff2 294#endif
8a40a180 295 int ret, interrupt_request;
57fec1fe 296 long (*gen_func)(void);
8a40a180 297 TranslationBlock *tb;
c27004ec 298 uint8_t *tc_ptr;
8c6939c0 299
bfed01fc
TS
300 if (cpu_halted(env1) == EXCP_HALTED)
301 return EXCP_HALTED;
5a1e3cfc 302
5fafdf24 303 cpu_single_env = env1;
6a00d601 304
7d13299d 305 /* first we save global registers */
1057eaa7
PB
306#define SAVE_HOST_REGS 1
307#include "hostregs_helper.h"
c27004ec 308 env = env1;
66f1cdbd 309 SAVE_GLOBALS();
e4533c7a 310
0d1a29f9 311 env_to_regs();
ecb644f4 312#if defined(TARGET_I386)
9de5e440 313 /* put eflags in CPU temporary format */
fc2b4c48
FB
314 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
315 DF = 1 - (2 * ((env->eflags >> 10) & 1));
9de5e440 316 CC_OP = CC_OP_EFLAGS;
fc2b4c48 317 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
93ac68bc 318#elif defined(TARGET_SPARC)
3475187d
FB
319#if defined(reg_REGWPTR)
320 saved_regwptr = REGWPTR;
321#endif
e6e5906b
PB
322#elif defined(TARGET_M68K)
323 env->cc_op = CC_OP_FLAGS;
324 env->cc_dest = env->sr & 0xf;
325 env->cc_x = (env->sr >> 4) & 1;
ecb644f4
TS
326#elif defined(TARGET_ALPHA)
327#elif defined(TARGET_ARM)
328#elif defined(TARGET_PPC)
6af0bf9c 329#elif defined(TARGET_MIPS)
fdf9b3e8 330#elif defined(TARGET_SH4)
f1ccf904 331#elif defined(TARGET_CRIS)
fdf9b3e8 332 /* XXXXX */
e4533c7a
FB
333#else
334#error unsupported target CPU
335#endif
3fb2ded1 336 env->exception_index = -1;
9d27abd9 337
7d13299d 338 /* prepare setjmp context for exception handling */
3fb2ded1
FB
339 for(;;) {
340 if (setjmp(env->jmp_env) == 0) {
ee8b7021 341 env->current_tb = NULL;
3fb2ded1
FB
342 /* if an exception is pending, we execute it here */
343 if (env->exception_index >= 0) {
344 if (env->exception_index >= EXCP_INTERRUPT) {
345 /* exit request from the cpu execution loop */
346 ret = env->exception_index;
347 break;
348 } else if (env->user_mode_only) {
349 /* if user mode only, we simulate a fake exception
9f083493 350 which will be handled outside the cpu execution
3fb2ded1 351 loop */
83479e77 352#if defined(TARGET_I386)
5fafdf24
TS
353 do_interrupt_user(env->exception_index,
354 env->exception_is_int,
355 env->error_code,
3fb2ded1 356 env->exception_next_eip);
83479e77 357#endif
3fb2ded1
FB
358 ret = env->exception_index;
359 break;
360 } else {
83479e77 361#if defined(TARGET_I386)
3fb2ded1
FB
362 /* simulate a real cpu exception. On i386, it can
363 trigger new exceptions, but we do not handle
364 double or triple faults yet. */
5fafdf24
TS
365 do_interrupt(env->exception_index,
366 env->exception_is_int,
367 env->error_code,
d05e66d2 368 env->exception_next_eip, 0);
678dde13
TS
369 /* successfully delivered */
370 env->old_exception = -1;
ce09776b
FB
371#elif defined(TARGET_PPC)
372 do_interrupt(env);
6af0bf9c
FB
373#elif defined(TARGET_MIPS)
374 do_interrupt(env);
e95c8d51 375#elif defined(TARGET_SPARC)
1a0c3292 376 do_interrupt(env->exception_index);
b5ff1b31
FB
377#elif defined(TARGET_ARM)
378 do_interrupt(env);
fdf9b3e8
FB
379#elif defined(TARGET_SH4)
380 do_interrupt(env);
eddf68a6
JM
381#elif defined(TARGET_ALPHA)
382 do_interrupt(env);
f1ccf904
TS
383#elif defined(TARGET_CRIS)
384 do_interrupt(env);
0633879f
PB
385#elif defined(TARGET_M68K)
386 do_interrupt(0);
83479e77 387#endif
3fb2ded1
FB
388 }
389 env->exception_index = -1;
5fafdf24 390 }
9df217a3
FB
391#ifdef USE_KQEMU
392 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
393 int ret;
394 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
395 ret = kqemu_cpu_exec(env);
396 /* put eflags in CPU temporary format */
397 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
398 DF = 1 - (2 * ((env->eflags >> 10) & 1));
399 CC_OP = CC_OP_EFLAGS;
400 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
401 if (ret == 1) {
402 /* exception */
403 longjmp(env->jmp_env, 1);
404 } else if (ret == 2) {
405 /* softmmu execution needed */
406 } else {
407 if (env->interrupt_request != 0) {
408 /* hardware interrupt will be executed just after */
409 } else {
410 /* otherwise, we restart */
411 longjmp(env->jmp_env, 1);
412 }
413 }
3fb2ded1 414 }
9df217a3
FB
415#endif
416
3fb2ded1
FB
417 T0 = 0; /* force lookup of first TB */
418 for(;;) {
66f1cdbd 419 SAVE_GLOBALS();
68a79315 420 interrupt_request = env->interrupt_request;
0573fbfc
TS
421 if (__builtin_expect(interrupt_request, 0)
422#if defined(TARGET_I386)
423 && env->hflags & HF_GIF_MASK
424#endif
425 ) {
6658ffb8
PB
426 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
427 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
428 env->exception_index = EXCP_DEBUG;
429 cpu_loop_exit();
430 }
a90b7318 431#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
f1ccf904 432 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
a90b7318
AZ
433 if (interrupt_request & CPU_INTERRUPT_HALT) {
434 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
435 env->halted = 1;
436 env->exception_index = EXCP_HLT;
437 cpu_loop_exit();
438 }
439#endif
68a79315 440#if defined(TARGET_I386)
3b21e03e
FB
441 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
442 !(env->hflags & HF_SMM_MASK)) {
0573fbfc 443 svm_check_intercept(SVM_EXIT_SMI);
3b21e03e
FB
444 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
445 do_smm_enter();
497ad68c 446 BREAK_CHAIN;
3b21e03e 447 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
0573fbfc 448 (env->eflags & IF_MASK || env->hflags & HF_HIF_MASK) &&
3f337316 449 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
68a79315 450 int intno;
0573fbfc 451 svm_check_intercept(SVM_EXIT_INTR);
52621688 452 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
a541f297 453 intno = cpu_get_pic_interrupt(env);
f193c797 454 if (loglevel & CPU_LOG_TB_IN_ASM) {
68a79315
FB
455 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
456 }
d05e66d2 457 do_interrupt(intno, 0, 0, 0, 1);
907a5b26
FB
458 /* ensure that no TB jump will be modified as
459 the program flow was changed */
497ad68c 460 BREAK_CHAIN;
0573fbfc
TS
461#if !defined(CONFIG_USER_ONLY)
462 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
463 (env->eflags & IF_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
464 int intno;
465 /* FIXME: this should respect TPR */
466 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
52621688 467 svm_check_intercept(SVM_EXIT_VINTR);
0573fbfc
TS
468 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
469 if (loglevel & CPU_LOG_TB_IN_ASM)
470 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
471 do_interrupt(intno, 0, 0, -1, 1);
52621688
TS
472 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl),
473 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)) & ~V_IRQ_MASK);
497ad68c 474 BREAK_CHAIN;
907a5b26 475#endif
68a79315 476 }
ce09776b 477#elif defined(TARGET_PPC)
9fddaa0c
FB
478#if 0
479 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
480 cpu_ppc_reset(env);
481 }
482#endif
47103572 483 if (interrupt_request & CPU_INTERRUPT_HARD) {
e9df014c
JM
484 ppc_hw_interrupt(env);
485 if (env->pending_interrupts == 0)
486 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
497ad68c 487 BREAK_CHAIN;
ce09776b 488 }
6af0bf9c
FB
489#elif defined(TARGET_MIPS)
490 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
24c7b0e3 491 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
6af0bf9c 492 (env->CP0_Status & (1 << CP0St_IE)) &&
24c7b0e3
TS
493 !(env->CP0_Status & (1 << CP0St_EXL)) &&
494 !(env->CP0_Status & (1 << CP0St_ERL)) &&
6af0bf9c
FB
495 !(env->hflags & MIPS_HFLAG_DM)) {
496 /* Raise it */
497 env->exception_index = EXCP_EXT_INTERRUPT;
498 env->error_code = 0;
499 do_interrupt(env);
497ad68c 500 BREAK_CHAIN;
6af0bf9c 501 }
e95c8d51 502#elif defined(TARGET_SPARC)
66321a11
FB
503 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
504 (env->psret != 0)) {
505 int pil = env->interrupt_index & 15;
506 int type = env->interrupt_index & 0xf0;
507
508 if (((type == TT_EXTINT) &&
509 (pil == 15 || pil > env->psrpil)) ||
510 type != TT_EXTINT) {
511 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
512 do_interrupt(env->interrupt_index);
513 env->interrupt_index = 0;
327ac2e7
BS
514#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
515 cpu_check_irqs(env);
516#endif
497ad68c 517 BREAK_CHAIN;
66321a11 518 }
e95c8d51
FB
519 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
520 //do_interrupt(0, 0, 0, 0, 0);
521 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
a90b7318 522 }
b5ff1b31
FB
523#elif defined(TARGET_ARM)
524 if (interrupt_request & CPU_INTERRUPT_FIQ
525 && !(env->uncached_cpsr & CPSR_F)) {
526 env->exception_index = EXCP_FIQ;
527 do_interrupt(env);
497ad68c 528 BREAK_CHAIN;
b5ff1b31 529 }
9ee6e8bb
PB
530 /* ARMv7-M interrupt return works by loading a magic value
531 into the PC. On real hardware the load causes the
532 return to occur. The qemu implementation performs the
533 jump normally, then does the exception return when the
534 CPU tries to execute code at the magic address.
535 This will cause the magic PC value to be pushed to
536 the stack if an interrupt occured at the wrong time.
537 We avoid this by disabling interrupts when
538 pc contains a magic address. */
b5ff1b31 539 if (interrupt_request & CPU_INTERRUPT_HARD
9ee6e8bb
PB
540 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
541 || !(env->uncached_cpsr & CPSR_I))) {
b5ff1b31
FB
542 env->exception_index = EXCP_IRQ;
543 do_interrupt(env);
497ad68c 544 BREAK_CHAIN;
b5ff1b31 545 }
fdf9b3e8 546#elif defined(TARGET_SH4)
e96e2044
TS
547 if (interrupt_request & CPU_INTERRUPT_HARD) {
548 do_interrupt(env);
549 BREAK_CHAIN;
550 }
eddf68a6
JM
551#elif defined(TARGET_ALPHA)
552 if (interrupt_request & CPU_INTERRUPT_HARD) {
553 do_interrupt(env);
497ad68c 554 BREAK_CHAIN;
eddf68a6 555 }
f1ccf904
TS
556#elif defined(TARGET_CRIS)
557 if (interrupt_request & CPU_INTERRUPT_HARD) {
558 do_interrupt(env);
497ad68c 559 BREAK_CHAIN;
f1ccf904 560 }
0633879f
PB
561#elif defined(TARGET_M68K)
562 if (interrupt_request & CPU_INTERRUPT_HARD
563 && ((env->sr & SR_I) >> SR_I_SHIFT)
564 < env->pending_level) {
565 /* Real hardware gets the interrupt vector via an
566 IACK cycle at this point. Current emulated
567 hardware doesn't rely on this, so we
568 provide/save the vector when the interrupt is
569 first signalled. */
570 env->exception_index = env->pending_vector;
571 do_interrupt(1);
497ad68c 572 BREAK_CHAIN;
0633879f 573 }
68a79315 574#endif
9d05095e
FB
575 /* Don't use the cached interupt_request value,
576 do_interrupt may have updated the EXITTB flag. */
b5ff1b31 577 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
bf3e8bf1
FB
578 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
579 /* ensure that no TB jump will be modified as
580 the program flow was changed */
497ad68c 581 BREAK_CHAIN;
bf3e8bf1 582 }
68a79315
FB
583 if (interrupt_request & CPU_INTERRUPT_EXIT) {
584 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
585 env->exception_index = EXCP_INTERRUPT;
586 cpu_loop_exit();
587 }
3fb2ded1 588 }
7d13299d 589#ifdef DEBUG_EXEC
b5ff1b31 590 if ((loglevel & CPU_LOG_TB_CPU)) {
3fb2ded1 591 /* restore flags in standard format */
ecb644f4
TS
592 regs_to_env();
593#if defined(TARGET_I386)
3fb2ded1 594 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
7fe48483 595 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
3fb2ded1 596 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
e4533c7a 597#elif defined(TARGET_ARM)
7fe48483 598 cpu_dump_state(env, logfile, fprintf, 0);
93ac68bc 599#elif defined(TARGET_SPARC)
3475187d
FB
600 REGWPTR = env->regbase + (env->cwp * 16);
601 env->regwptr = REGWPTR;
602 cpu_dump_state(env, logfile, fprintf, 0);
67867308 603#elif defined(TARGET_PPC)
7fe48483 604 cpu_dump_state(env, logfile, fprintf, 0);
e6e5906b
PB
605#elif defined(TARGET_M68K)
606 cpu_m68k_flush_flags(env, env->cc_op);
607 env->cc_op = CC_OP_FLAGS;
608 env->sr = (env->sr & 0xffe0)
609 | env->cc_dest | (env->cc_x << 4);
610 cpu_dump_state(env, logfile, fprintf, 0);
6af0bf9c
FB
611#elif defined(TARGET_MIPS)
612 cpu_dump_state(env, logfile, fprintf, 0);
fdf9b3e8
FB
613#elif defined(TARGET_SH4)
614 cpu_dump_state(env, logfile, fprintf, 0);
eddf68a6
JM
615#elif defined(TARGET_ALPHA)
616 cpu_dump_state(env, logfile, fprintf, 0);
f1ccf904
TS
617#elif defined(TARGET_CRIS)
618 cpu_dump_state(env, logfile, fprintf, 0);
e4533c7a 619#else
5fafdf24 620#error unsupported target CPU
e4533c7a 621#endif
3fb2ded1 622 }
7d13299d 623#endif
8a40a180 624 tb = tb_find_fast();
9d27abd9 625#ifdef DEBUG_EXEC
c1135f61 626 if ((loglevel & CPU_LOG_EXEC)) {
c27004ec
FB
627 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
628 (long)tb->tc_ptr, tb->pc,
629 lookup_symbol(tb->pc));
3fb2ded1 630 }
9d27abd9 631#endif
66f1cdbd 632 RESTORE_GLOBALS();
8a40a180
FB
633 /* see if we can patch the calling TB. When the TB
634 spans two pages, we cannot safely do a direct
635 jump. */
c27004ec 636 {
8a40a180 637 if (T0 != 0 &&
f32fc648
FB
638#if USE_KQEMU
639 (env->kqemu_enabled != 2) &&
640#endif
ec6338ba 641 tb->page_addr[1] == -1) {
3fb2ded1 642 spin_lock(&tb_lock);
c27004ec 643 tb_add_jump((TranslationBlock *)(long)(T0 & ~3), T0 & 3, tb);
3fb2ded1
FB
644 spin_unlock(&tb_lock);
645 }
c27004ec 646 }
3fb2ded1 647 tc_ptr = tb->tc_ptr;
83479e77 648 env->current_tb = tb;
3fb2ded1
FB
649 /* execute the generated code */
650 gen_func = (void *)tc_ptr;
8c6939c0 651#if defined(__sparc__)
3fb2ded1
FB
652 __asm__ __volatile__("call %0\n\t"
653 "mov %%o7,%%i0"
654 : /* no outputs */
5fafdf24 655 : "r" (gen_func)
fdbb4691 656 : "i0", "i1", "i2", "i3", "i4", "i5",
faab7592 657 "o0", "o1", "o2", "o3", "o4", "o5",
fdbb4691
FB
658 "l0", "l1", "l2", "l3", "l4", "l5",
659 "l6", "l7");
8c6939c0 660#elif defined(__arm__)
3fb2ded1
FB
661 asm volatile ("mov pc, %0\n\t"
662 ".global exec_loop\n\t"
663 "exec_loop:\n\t"
664 : /* no outputs */
665 : "r" (gen_func)
666 : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
b8076a74
FB
667#elif defined(__ia64)
668 struct fptr {
669 void *ip;
670 void *gp;
671 } fp;
672
673 fp.ip = tc_ptr;
674 fp.gp = code_gen_buffer + 2 * (1 << 20);
675 (*(void (*)(void)) &fp)();
ae228531 676#else
57fec1fe 677 T0 = gen_func();
ae228531 678#endif
83479e77 679 env->current_tb = NULL;
4cbf74b6
FB
680 /* reset soft MMU for next block (it can currently
681 only be set by a memory fault) */
682#if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
3f337316
FB
683 if (env->hflags & HF_SOFTMMU_MASK) {
684 env->hflags &= ~HF_SOFTMMU_MASK;
4cbf74b6
FB
685 /* do not allow linking to another block */
686 T0 = 0;
687 }
f32fc648
FB
688#endif
689#if defined(USE_KQEMU)
690#define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
691 if (kqemu_is_ok(env) &&
692 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
693 cpu_loop_exit();
694 }
4cbf74b6 695#endif
50a518e3 696 } /* for(;;) */
3fb2ded1 697 } else {
0d1a29f9 698 env_to_regs();
7d13299d 699 }
3fb2ded1
FB
700 } /* for(;;) */
701
7d13299d 702
e4533c7a 703#if defined(TARGET_I386)
9de5e440 704 /* restore flags in standard format */
fc2b4c48 705 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
e4533c7a 706#elif defined(TARGET_ARM)
b7bcbe95 707 /* XXX: Save/restore host fpu exception state?. */
93ac68bc 708#elif defined(TARGET_SPARC)
3475187d
FB
709#if defined(reg_REGWPTR)
710 REGWPTR = saved_regwptr;
711#endif
67867308 712#elif defined(TARGET_PPC)
e6e5906b
PB
713#elif defined(TARGET_M68K)
714 cpu_m68k_flush_flags(env, env->cc_op);
715 env->cc_op = CC_OP_FLAGS;
716 env->sr = (env->sr & 0xffe0)
717 | env->cc_dest | (env->cc_x << 4);
6af0bf9c 718#elif defined(TARGET_MIPS)
fdf9b3e8 719#elif defined(TARGET_SH4)
eddf68a6 720#elif defined(TARGET_ALPHA)
f1ccf904 721#elif defined(TARGET_CRIS)
fdf9b3e8 722 /* XXXXX */
e4533c7a
FB
723#else
724#error unsupported target CPU
725#endif
1057eaa7
PB
726
727 /* restore global registers */
66f1cdbd 728 RESTORE_GLOBALS();
1057eaa7
PB
729#include "hostregs_helper.h"
730
6a00d601 731 /* fail safe : never use cpu_single_env outside cpu_exec() */
5fafdf24 732 cpu_single_env = NULL;
7d13299d
FB
733 return ret;
734}
6dbad63e 735
fbf9eeb3
FB
736/* must only be called from the generated code as an exception can be
737 generated */
738void tb_invalidate_page_range(target_ulong start, target_ulong end)
739{
dc5d0b3d
FB
740 /* XXX: cannot enable it yet because it yields to MMU exception
741 where NIP != read address on PowerPC */
742#if 0
fbf9eeb3
FB
743 target_ulong phys_addr;
744 phys_addr = get_phys_addr_code(env, start);
745 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
dc5d0b3d 746#endif
fbf9eeb3
FB
747}
748
1a18c71b 749#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
e4533c7a 750
6dbad63e
FB
751void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
752{
753 CPUX86State *saved_env;
754
755 saved_env = env;
756 env = s;
a412ac57 757 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
a513fe19 758 selector &= 0xffff;
5fafdf24 759 cpu_x86_load_seg_cache(env, seg_reg, selector,
c27004ec 760 (selector << 4), 0xffff, 0);
a513fe19 761 } else {
b453b70b 762 load_seg(seg_reg, selector);
a513fe19 763 }
6dbad63e
FB
764 env = saved_env;
765}
9de5e440 766
6f12a2a6 767void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
d0a1ffc9
FB
768{
769 CPUX86State *saved_env;
770
771 saved_env = env;
772 env = s;
3b46e624 773
6f12a2a6 774 helper_fsave(ptr, data32);
d0a1ffc9
FB
775
776 env = saved_env;
777}
778
6f12a2a6 779void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
d0a1ffc9
FB
780{
781 CPUX86State *saved_env;
782
783 saved_env = env;
784 env = s;
3b46e624 785
6f12a2a6 786 helper_frstor(ptr, data32);
d0a1ffc9
FB
787
788 env = saved_env;
789}
790
e4533c7a
FB
791#endif /* TARGET_I386 */
792
67b915a5
FB
793#if !defined(CONFIG_SOFTMMU)
794
3fb2ded1
FB
795#if defined(TARGET_I386)
796
b56dad1c 797/* 'pc' is the host PC at which the exception was raised. 'address' is
fd6ce8f6
FB
798 the effective address of the memory exception. 'is_write' is 1 if a
799 write caused the exception and otherwise 0'. 'old_set' is the
800 signal set which should be restored */
2b413144 801static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
5fafdf24 802 int is_write, sigset_t *old_set,
bf3e8bf1 803 void *puc)
9de5e440 804{
a513fe19
FB
805 TranslationBlock *tb;
806 int ret;
68a79315 807
83479e77
FB
808 if (cpu_single_env)
809 env = cpu_single_env; /* XXX: find a correct solution for multithread */
fd6ce8f6 810#if defined(DEBUG_SIGNAL)
5fafdf24 811 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
bf3e8bf1 812 pc, address, is_write, *(unsigned long *)old_set);
9de5e440 813#endif
25eb4484 814 /* XXX: locking issue */
53a5960a 815 if (is_write && page_unprotect(h2g(address), pc, puc)) {
fd6ce8f6
FB
816 return 1;
817 }
fbf9eeb3 818
3fb2ded1 819 /* see if it is an MMU fault */
6ebbf390 820 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
3fb2ded1
FB
821 if (ret < 0)
822 return 0; /* not an MMU fault */
823 if (ret == 0)
824 return 1; /* the MMU fault was handled without causing real CPU fault */
825 /* now we have a real cpu fault */
a513fe19
FB
826 tb = tb_find_pc(pc);
827 if (tb) {
9de5e440
FB
828 /* the PC is inside the translated code. It means that we have
829 a virtual CPU fault */
bf3e8bf1 830 cpu_restore_state(tb, env, pc, puc);
3fb2ded1 831 }
4cbf74b6 832 if (ret == 1) {
3fb2ded1 833#if 0
5fafdf24 834 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
4cbf74b6 835 env->eip, env->cr[2], env->error_code);
3fb2ded1 836#endif
4cbf74b6
FB
837 /* we restore the process signal mask as the sigreturn should
838 do it (XXX: use sigsetjmp) */
839 sigprocmask(SIG_SETMASK, old_set, NULL);
54ca9095 840 raise_exception_err(env->exception_index, env->error_code);
4cbf74b6
FB
841 } else {
842 /* activate soft MMU for this block */
3f337316 843 env->hflags |= HF_SOFTMMU_MASK;
fbf9eeb3 844 cpu_resume_from_signal(env, puc);
4cbf74b6 845 }
3fb2ded1
FB
846 /* never comes here */
847 return 1;
848}
849
e4533c7a 850#elif defined(TARGET_ARM)
3fb2ded1 851static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
bf3e8bf1
FB
852 int is_write, sigset_t *old_set,
853 void *puc)
3fb2ded1 854{
68016c62
FB
855 TranslationBlock *tb;
856 int ret;
857
858 if (cpu_single_env)
859 env = cpu_single_env; /* XXX: find a correct solution for multithread */
860#if defined(DEBUG_SIGNAL)
5fafdf24 861 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
68016c62
FB
862 pc, address, is_write, *(unsigned long *)old_set);
863#endif
9f0777ed 864 /* XXX: locking issue */
53a5960a 865 if (is_write && page_unprotect(h2g(address), pc, puc)) {
9f0777ed
FB
866 return 1;
867 }
68016c62 868 /* see if it is an MMU fault */
6ebbf390 869 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
68016c62
FB
870 if (ret < 0)
871 return 0; /* not an MMU fault */
872 if (ret == 0)
873 return 1; /* the MMU fault was handled without causing real CPU fault */
874 /* now we have a real cpu fault */
875 tb = tb_find_pc(pc);
876 if (tb) {
877 /* the PC is inside the translated code. It means that we have
878 a virtual CPU fault */
879 cpu_restore_state(tb, env, pc, puc);
880 }
881 /* we restore the process signal mask as the sigreturn should
882 do it (XXX: use sigsetjmp) */
883 sigprocmask(SIG_SETMASK, old_set, NULL);
884 cpu_loop_exit();
968c74da
AJ
885 /* never comes here */
886 return 1;
3fb2ded1 887}
93ac68bc
FB
888#elif defined(TARGET_SPARC)
889static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
bf3e8bf1
FB
890 int is_write, sigset_t *old_set,
891 void *puc)
93ac68bc 892{
68016c62
FB
893 TranslationBlock *tb;
894 int ret;
895
896 if (cpu_single_env)
897 env = cpu_single_env; /* XXX: find a correct solution for multithread */
898#if defined(DEBUG_SIGNAL)
5fafdf24 899 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
68016c62
FB
900 pc, address, is_write, *(unsigned long *)old_set);
901#endif
b453b70b 902 /* XXX: locking issue */
53a5960a 903 if (is_write && page_unprotect(h2g(address), pc, puc)) {
b453b70b
FB
904 return 1;
905 }
68016c62 906 /* see if it is an MMU fault */
6ebbf390 907 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
68016c62
FB
908 if (ret < 0)
909 return 0; /* not an MMU fault */
910 if (ret == 0)
911 return 1; /* the MMU fault was handled without causing real CPU fault */
912 /* now we have a real cpu fault */
913 tb = tb_find_pc(pc);
914 if (tb) {
915 /* the PC is inside the translated code. It means that we have
916 a virtual CPU fault */
917 cpu_restore_state(tb, env, pc, puc);
918 }
919 /* we restore the process signal mask as the sigreturn should
920 do it (XXX: use sigsetjmp) */
921 sigprocmask(SIG_SETMASK, old_set, NULL);
922 cpu_loop_exit();
968c74da
AJ
923 /* never comes here */
924 return 1;
93ac68bc 925}
67867308
FB
926#elif defined (TARGET_PPC)
927static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
bf3e8bf1
FB
928 int is_write, sigset_t *old_set,
929 void *puc)
67867308
FB
930{
931 TranslationBlock *tb;
ce09776b 932 int ret;
3b46e624 933
67867308
FB
934 if (cpu_single_env)
935 env = cpu_single_env; /* XXX: find a correct solution for multithread */
67867308 936#if defined(DEBUG_SIGNAL)
5fafdf24 937 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
67867308
FB
938 pc, address, is_write, *(unsigned long *)old_set);
939#endif
940 /* XXX: locking issue */
53a5960a 941 if (is_write && page_unprotect(h2g(address), pc, puc)) {
67867308
FB
942 return 1;
943 }
944
ce09776b 945 /* see if it is an MMU fault */
6ebbf390 946 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
ce09776b
FB
947 if (ret < 0)
948 return 0; /* not an MMU fault */
949 if (ret == 0)
950 return 1; /* the MMU fault was handled without causing real CPU fault */
951
67867308
FB
952 /* now we have a real cpu fault */
953 tb = tb_find_pc(pc);
954 if (tb) {
955 /* the PC is inside the translated code. It means that we have
956 a virtual CPU fault */
bf3e8bf1 957 cpu_restore_state(tb, env, pc, puc);
67867308 958 }
ce09776b 959 if (ret == 1) {
67867308 960#if 0
5fafdf24 961 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
ce09776b 962 env->nip, env->error_code, tb);
67867308
FB
963#endif
964 /* we restore the process signal mask as the sigreturn should
965 do it (XXX: use sigsetjmp) */
bf3e8bf1 966 sigprocmask(SIG_SETMASK, old_set, NULL);
9fddaa0c 967 do_raise_exception_err(env->exception_index, env->error_code);
ce09776b
FB
968 } else {
969 /* activate soft MMU for this block */
fbf9eeb3 970 cpu_resume_from_signal(env, puc);
ce09776b 971 }
67867308 972 /* never comes here */
e6e5906b
PB
973 return 1;
974}
975
976#elif defined(TARGET_M68K)
977static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
978 int is_write, sigset_t *old_set,
979 void *puc)
980{
981 TranslationBlock *tb;
982 int ret;
983
984 if (cpu_single_env)
985 env = cpu_single_env; /* XXX: find a correct solution for multithread */
986#if defined(DEBUG_SIGNAL)
5fafdf24 987 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
e6e5906b
PB
988 pc, address, is_write, *(unsigned long *)old_set);
989#endif
990 /* XXX: locking issue */
991 if (is_write && page_unprotect(address, pc, puc)) {
992 return 1;
993 }
994 /* see if it is an MMU fault */
6ebbf390 995 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
e6e5906b
PB
996 if (ret < 0)
997 return 0; /* not an MMU fault */
998 if (ret == 0)
999 return 1; /* the MMU fault was handled without causing real CPU fault */
1000 /* now we have a real cpu fault */
1001 tb = tb_find_pc(pc);
1002 if (tb) {
1003 /* the PC is inside the translated code. It means that we have
1004 a virtual CPU fault */
1005 cpu_restore_state(tb, env, pc, puc);
1006 }
1007 /* we restore the process signal mask as the sigreturn should
1008 do it (XXX: use sigsetjmp) */
1009 sigprocmask(SIG_SETMASK, old_set, NULL);
1010 cpu_loop_exit();
1011 /* never comes here */
67867308
FB
1012 return 1;
1013}
6af0bf9c
FB
1014
1015#elif defined (TARGET_MIPS)
1016static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1017 int is_write, sigset_t *old_set,
1018 void *puc)
1019{
1020 TranslationBlock *tb;
1021 int ret;
3b46e624 1022
6af0bf9c
FB
1023 if (cpu_single_env)
1024 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1025#if defined(DEBUG_SIGNAL)
5fafdf24 1026 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
6af0bf9c
FB
1027 pc, address, is_write, *(unsigned long *)old_set);
1028#endif
1029 /* XXX: locking issue */
53a5960a 1030 if (is_write && page_unprotect(h2g(address), pc, puc)) {
6af0bf9c
FB
1031 return 1;
1032 }
1033
1034 /* see if it is an MMU fault */
6ebbf390 1035 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
6af0bf9c
FB
1036 if (ret < 0)
1037 return 0; /* not an MMU fault */
1038 if (ret == 0)
1039 return 1; /* the MMU fault was handled without causing real CPU fault */
1040
1041 /* now we have a real cpu fault */
1042 tb = tb_find_pc(pc);
1043 if (tb) {
1044 /* the PC is inside the translated code. It means that we have
1045 a virtual CPU fault */
1046 cpu_restore_state(tb, env, pc, puc);
1047 }
1048 if (ret == 1) {
1049#if 0
5fafdf24 1050 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1eb5207b 1051 env->PC, env->error_code, tb);
6af0bf9c
FB
1052#endif
1053 /* we restore the process signal mask as the sigreturn should
1054 do it (XXX: use sigsetjmp) */
1055 sigprocmask(SIG_SETMASK, old_set, NULL);
1056 do_raise_exception_err(env->exception_index, env->error_code);
1057 } else {
1058 /* activate soft MMU for this block */
1059 cpu_resume_from_signal(env, puc);
1060 }
1061 /* never comes here */
1062 return 1;
1063}
1064
fdf9b3e8
FB
1065#elif defined (TARGET_SH4)
1066static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1067 int is_write, sigset_t *old_set,
1068 void *puc)
1069{
1070 TranslationBlock *tb;
1071 int ret;
3b46e624 1072
fdf9b3e8
FB
1073 if (cpu_single_env)
1074 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1075#if defined(DEBUG_SIGNAL)
5fafdf24 1076 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
fdf9b3e8
FB
1077 pc, address, is_write, *(unsigned long *)old_set);
1078#endif
1079 /* XXX: locking issue */
1080 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1081 return 1;
1082 }
1083
1084 /* see if it is an MMU fault */
6ebbf390 1085 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
fdf9b3e8
FB
1086 if (ret < 0)
1087 return 0; /* not an MMU fault */
1088 if (ret == 0)
1089 return 1; /* the MMU fault was handled without causing real CPU fault */
1090
1091 /* now we have a real cpu fault */
eddf68a6
JM
1092 tb = tb_find_pc(pc);
1093 if (tb) {
1094 /* the PC is inside the translated code. It means that we have
1095 a virtual CPU fault */
1096 cpu_restore_state(tb, env, pc, puc);
1097 }
1098#if 0
5fafdf24 1099 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
eddf68a6
JM
1100 env->nip, env->error_code, tb);
1101#endif
1102 /* we restore the process signal mask as the sigreturn should
1103 do it (XXX: use sigsetjmp) */
1104 sigprocmask(SIG_SETMASK, old_set, NULL);
1105 cpu_loop_exit();
1106 /* never comes here */
1107 return 1;
1108}
1109
1110#elif defined (TARGET_ALPHA)
1111static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1112 int is_write, sigset_t *old_set,
1113 void *puc)
1114{
1115 TranslationBlock *tb;
1116 int ret;
3b46e624 1117
eddf68a6
JM
1118 if (cpu_single_env)
1119 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1120#if defined(DEBUG_SIGNAL)
5fafdf24 1121 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
eddf68a6
JM
1122 pc, address, is_write, *(unsigned long *)old_set);
1123#endif
1124 /* XXX: locking issue */
1125 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1126 return 1;
1127 }
1128
1129 /* see if it is an MMU fault */
6ebbf390 1130 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
eddf68a6
JM
1131 if (ret < 0)
1132 return 0; /* not an MMU fault */
1133 if (ret == 0)
1134 return 1; /* the MMU fault was handled without causing real CPU fault */
1135
1136 /* now we have a real cpu fault */
fdf9b3e8
FB
1137 tb = tb_find_pc(pc);
1138 if (tb) {
1139 /* the PC is inside the translated code. It means that we have
1140 a virtual CPU fault */
1141 cpu_restore_state(tb, env, pc, puc);
1142 }
fdf9b3e8 1143#if 0
5fafdf24 1144 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
fdf9b3e8
FB
1145 env->nip, env->error_code, tb);
1146#endif
1147 /* we restore the process signal mask as the sigreturn should
1148 do it (XXX: use sigsetjmp) */
355fb23d
PB
1149 sigprocmask(SIG_SETMASK, old_set, NULL);
1150 cpu_loop_exit();
fdf9b3e8
FB
1151 /* never comes here */
1152 return 1;
1153}
f1ccf904
TS
1154#elif defined (TARGET_CRIS)
1155static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1156 int is_write, sigset_t *old_set,
1157 void *puc)
1158{
1159 TranslationBlock *tb;
1160 int ret;
1161
1162 if (cpu_single_env)
1163 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1164#if defined(DEBUG_SIGNAL)
1165 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1166 pc, address, is_write, *(unsigned long *)old_set);
1167#endif
1168 /* XXX: locking issue */
1169 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1170 return 1;
1171 }
1172
1173 /* see if it is an MMU fault */
6ebbf390 1174 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
f1ccf904
TS
1175 if (ret < 0)
1176 return 0; /* not an MMU fault */
1177 if (ret == 0)
1178 return 1; /* the MMU fault was handled without causing real CPU fault */
1179
1180 /* now we have a real cpu fault */
1181 tb = tb_find_pc(pc);
1182 if (tb) {
1183 /* the PC is inside the translated code. It means that we have
1184 a virtual CPU fault */
1185 cpu_restore_state(tb, env, pc, puc);
1186 }
f1ccf904
TS
1187 /* we restore the process signal mask as the sigreturn should
1188 do it (XXX: use sigsetjmp) */
1189 sigprocmask(SIG_SETMASK, old_set, NULL);
1190 cpu_loop_exit();
1191 /* never comes here */
1192 return 1;
1193}
1194
e4533c7a
FB
1195#else
1196#error unsupported target CPU
1197#endif
9de5e440 1198
2b413144
FB
1199#if defined(__i386__)
1200
d8ecc0b9
FB
1201#if defined(__APPLE__)
1202# include <sys/ucontext.h>
1203
1204# define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1205# define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1206# define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1207#else
1208# define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1209# define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1210# define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1211#endif
1212
5fafdf24 1213int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1214 void *puc)
9de5e440 1215{
5a7b542b 1216 siginfo_t *info = pinfo;
9de5e440
FB
1217 struct ucontext *uc = puc;
1218 unsigned long pc;
bf3e8bf1 1219 int trapno;
97eb5b14 1220
d691f669
FB
1221#ifndef REG_EIP
1222/* for glibc 2.1 */
fd6ce8f6
FB
1223#define REG_EIP EIP
1224#define REG_ERR ERR
1225#define REG_TRAPNO TRAPNO
d691f669 1226#endif
d8ecc0b9
FB
1227 pc = EIP_sig(uc);
1228 trapno = TRAP_sig(uc);
ec6338ba
FB
1229 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1230 trapno == 0xe ?
1231 (ERROR_sig(uc) >> 1) & 1 : 0,
1232 &uc->uc_sigmask, puc);
2b413144
FB
1233}
1234
bc51c5c9
FB
1235#elif defined(__x86_64__)
1236
5a7b542b 1237int cpu_signal_handler(int host_signum, void *pinfo,
bc51c5c9
FB
1238 void *puc)
1239{
5a7b542b 1240 siginfo_t *info = pinfo;
bc51c5c9
FB
1241 struct ucontext *uc = puc;
1242 unsigned long pc;
1243
1244 pc = uc->uc_mcontext.gregs[REG_RIP];
5fafdf24
TS
1245 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1246 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
bc51c5c9
FB
1247 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1248 &uc->uc_sigmask, puc);
1249}
1250
83fb7adf 1251#elif defined(__powerpc__)
2b413144 1252
83fb7adf
FB
1253/***********************************************************************
1254 * signal context platform-specific definitions
1255 * From Wine
1256 */
1257#ifdef linux
1258/* All Registers access - only for local access */
1259# define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1260/* Gpr Registers access */
1261# define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1262# define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1263# define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1264# define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1265# define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1266# define LR_sig(context) REG_sig(link, context) /* Link register */
1267# define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1268/* Float Registers access */
1269# define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1270# define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1271/* Exception Registers access */
1272# define DAR_sig(context) REG_sig(dar, context)
1273# define DSISR_sig(context) REG_sig(dsisr, context)
1274# define TRAP_sig(context) REG_sig(trap, context)
1275#endif /* linux */
1276
1277#ifdef __APPLE__
1278# include <sys/ucontext.h>
1279typedef struct ucontext SIGCONTEXT;
1280/* All Registers access - only for local access */
1281# define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1282# define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1283# define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1284# define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1285/* Gpr Registers access */
1286# define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1287# define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1288# define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1289# define CTR_sig(context) REG_sig(ctr, context)
1290# define XER_sig(context) REG_sig(xer, context) /* Link register */
1291# define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1292# define CR_sig(context) REG_sig(cr, context) /* Condition register */
1293/* Float Registers access */
1294# define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1295# define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1296/* Exception Registers access */
1297# define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1298# define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1299# define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1300#endif /* __APPLE__ */
1301
5fafdf24 1302int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1303 void *puc)
2b413144 1304{
5a7b542b 1305 siginfo_t *info = pinfo;
25eb4484 1306 struct ucontext *uc = puc;
25eb4484 1307 unsigned long pc;
25eb4484
FB
1308 int is_write;
1309
83fb7adf 1310 pc = IAR_sig(uc);
25eb4484
FB
1311 is_write = 0;
1312#if 0
1313 /* ppc 4xx case */
83fb7adf 1314 if (DSISR_sig(uc) & 0x00800000)
25eb4484
FB
1315 is_write = 1;
1316#else
83fb7adf 1317 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
25eb4484
FB
1318 is_write = 1;
1319#endif
5fafdf24 1320 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
bf3e8bf1 1321 is_write, &uc->uc_sigmask, puc);
2b413144
FB
1322}
1323
2f87c607
FB
1324#elif defined(__alpha__)
1325
5fafdf24 1326int cpu_signal_handler(int host_signum, void *pinfo,
2f87c607
FB
1327 void *puc)
1328{
5a7b542b 1329 siginfo_t *info = pinfo;
2f87c607
FB
1330 struct ucontext *uc = puc;
1331 uint32_t *pc = uc->uc_mcontext.sc_pc;
1332 uint32_t insn = *pc;
1333 int is_write = 0;
1334
8c6939c0 1335 /* XXX: need kernel patch to get write flag faster */
2f87c607
FB
1336 switch (insn >> 26) {
1337 case 0x0d: // stw
1338 case 0x0e: // stb
1339 case 0x0f: // stq_u
1340 case 0x24: // stf
1341 case 0x25: // stg
1342 case 0x26: // sts
1343 case 0x27: // stt
1344 case 0x2c: // stl
1345 case 0x2d: // stq
1346 case 0x2e: // stl_c
1347 case 0x2f: // stq_c
1348 is_write = 1;
1349 }
1350
5fafdf24 1351 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
bf3e8bf1 1352 is_write, &uc->uc_sigmask, puc);
2f87c607 1353}
8c6939c0
FB
1354#elif defined(__sparc__)
1355
5fafdf24 1356int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1357 void *puc)
8c6939c0 1358{
5a7b542b 1359 siginfo_t *info = pinfo;
8c6939c0
FB
1360 uint32_t *regs = (uint32_t *)(info + 1);
1361 void *sigmask = (regs + 20);
1362 unsigned long pc;
1363 int is_write;
1364 uint32_t insn;
3b46e624 1365
8c6939c0
FB
1366 /* XXX: is there a standard glibc define ? */
1367 pc = regs[1];
1368 /* XXX: need kernel patch to get write flag faster */
1369 is_write = 0;
1370 insn = *(uint32_t *)pc;
1371 if ((insn >> 30) == 3) {
1372 switch((insn >> 19) & 0x3f) {
1373 case 0x05: // stb
1374 case 0x06: // sth
1375 case 0x04: // st
1376 case 0x07: // std
1377 case 0x24: // stf
1378 case 0x27: // stdf
1379 case 0x25: // stfsr
1380 is_write = 1;
1381 break;
1382 }
1383 }
5fafdf24 1384 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
bf3e8bf1 1385 is_write, sigmask, NULL);
8c6939c0
FB
1386}
1387
1388#elif defined(__arm__)
1389
5fafdf24 1390int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1391 void *puc)
8c6939c0 1392{
5a7b542b 1393 siginfo_t *info = pinfo;
8c6939c0
FB
1394 struct ucontext *uc = puc;
1395 unsigned long pc;
1396 int is_write;
3b46e624 1397
8c6939c0
FB
1398 pc = uc->uc_mcontext.gregs[R15];
1399 /* XXX: compute is_write */
1400 is_write = 0;
5fafdf24 1401 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
8c6939c0 1402 is_write,
f3a9676a 1403 &uc->uc_sigmask, puc);
8c6939c0
FB
1404}
1405
38e584a0
FB
1406#elif defined(__mc68000)
1407
5fafdf24 1408int cpu_signal_handler(int host_signum, void *pinfo,
38e584a0
FB
1409 void *puc)
1410{
5a7b542b 1411 siginfo_t *info = pinfo;
38e584a0
FB
1412 struct ucontext *uc = puc;
1413 unsigned long pc;
1414 int is_write;
3b46e624 1415
38e584a0
FB
1416 pc = uc->uc_mcontext.gregs[16];
1417 /* XXX: compute is_write */
1418 is_write = 0;
5fafdf24 1419 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
38e584a0 1420 is_write,
bf3e8bf1 1421 &uc->uc_sigmask, puc);
38e584a0
FB
1422}
1423
b8076a74
FB
1424#elif defined(__ia64)
1425
1426#ifndef __ISR_VALID
1427 /* This ought to be in <bits/siginfo.h>... */
1428# define __ISR_VALID 1
b8076a74
FB
1429#endif
1430
5a7b542b 1431int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
b8076a74 1432{
5a7b542b 1433 siginfo_t *info = pinfo;
b8076a74
FB
1434 struct ucontext *uc = puc;
1435 unsigned long ip;
1436 int is_write = 0;
1437
1438 ip = uc->uc_mcontext.sc_ip;
1439 switch (host_signum) {
1440 case SIGILL:
1441 case SIGFPE:
1442 case SIGSEGV:
1443 case SIGBUS:
1444 case SIGTRAP:
fd4a43e4 1445 if (info->si_code && (info->si_segvflags & __ISR_VALID))
b8076a74
FB
1446 /* ISR.W (write-access) is bit 33: */
1447 is_write = (info->si_isr >> 33) & 1;
1448 break;
1449
1450 default:
1451 break;
1452 }
1453 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1454 is_write,
1455 &uc->uc_sigmask, puc);
1456}
1457
90cb9493
FB
1458#elif defined(__s390__)
1459
5fafdf24 1460int cpu_signal_handler(int host_signum, void *pinfo,
90cb9493
FB
1461 void *puc)
1462{
5a7b542b 1463 siginfo_t *info = pinfo;
90cb9493
FB
1464 struct ucontext *uc = puc;
1465 unsigned long pc;
1466 int is_write;
3b46e624 1467
90cb9493
FB
1468 pc = uc->uc_mcontext.psw.addr;
1469 /* XXX: compute is_write */
1470 is_write = 0;
5fafdf24 1471 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
c4b89d18
TS
1472 is_write, &uc->uc_sigmask, puc);
1473}
1474
1475#elif defined(__mips__)
1476
5fafdf24 1477int cpu_signal_handler(int host_signum, void *pinfo,
c4b89d18
TS
1478 void *puc)
1479{
9617efe8 1480 siginfo_t *info = pinfo;
c4b89d18
TS
1481 struct ucontext *uc = puc;
1482 greg_t pc = uc->uc_mcontext.pc;
1483 int is_write;
3b46e624 1484
c4b89d18
TS
1485 /* XXX: compute is_write */
1486 is_write = 0;
5fafdf24 1487 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
c4b89d18 1488 is_write, &uc->uc_sigmask, puc);
90cb9493
FB
1489}
1490
9de5e440 1491#else
2b413144 1492
3fb2ded1 1493#error host CPU specific signal handler needed
2b413144 1494
9de5e440 1495#endif
67b915a5
FB
1496
1497#endif /* !defined(CONFIG_SOFTMMU) */