]> git.proxmox.com Git - mirror_qemu.git/blame - cpu-exec.c
ARMv7 support.
[mirror_qemu.git] / cpu-exec.c
CommitLineData
7d13299d
FB
1/*
2 * i386 emulator main execution loop
5fafdf24 3 *
66321a11 4 * Copyright (c) 2003-2005 Fabrice Bellard
7d13299d 5 *
3ef693a0
FB
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
7d13299d 10 *
3ef693a0
FB
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
7d13299d 15 *
3ef693a0
FB
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
7d13299d 19 */
e4533c7a 20#include "config.h"
93ac68bc 21#include "exec.h"
956034d7 22#include "disas.h"
7d13299d 23
fbf9eeb3
FB
24#if !defined(CONFIG_SOFTMMU)
25#undef EAX
26#undef ECX
27#undef EDX
28#undef EBX
29#undef ESP
30#undef EBP
31#undef ESI
32#undef EDI
33#undef EIP
34#include <signal.h>
35#include <sys/ucontext.h>
36#endif
37
36bdbe54
FB
38int tb_invalidated_flag;
39
dc99065b 40//#define DEBUG_EXEC
9de5e440 41//#define DEBUG_SIGNAL
7d13299d 42
e4533c7a
FB
43void cpu_loop_exit(void)
44{
bfed01fc
TS
45 /* NOTE: the register at this point must be saved by hand because
46 longjmp restore them */
47 regs_to_env();
e4533c7a
FB
48 longjmp(env->jmp_env, 1);
49}
bfed01fc 50
e6e5906b 51#if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
3475187d
FB
52#define reg_T2
53#endif
e4533c7a 54
fbf9eeb3
FB
55/* exit the current TB from a signal handler. The host registers are
56 restored in a state compatible with the CPU emulator
57 */
5fafdf24 58void cpu_resume_from_signal(CPUState *env1, void *puc)
fbf9eeb3
FB
59{
60#if !defined(CONFIG_SOFTMMU)
61 struct ucontext *uc = puc;
62#endif
63
64 env = env1;
65
66 /* XXX: restore cpu registers saved in host registers */
67
68#if !defined(CONFIG_SOFTMMU)
69 if (puc) {
70 /* XXX: use siglongjmp ? */
71 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
72 }
73#endif
74 longjmp(env->jmp_env, 1);
75}
76
8a40a180
FB
77
78static TranslationBlock *tb_find_slow(target_ulong pc,
79 target_ulong cs_base,
c068688b 80 uint64_t flags)
8a40a180
FB
81{
82 TranslationBlock *tb, **ptb1;
83 int code_gen_size;
84 unsigned int h;
85 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
86 uint8_t *tc_ptr;
3b46e624 87
8a40a180
FB
88 spin_lock(&tb_lock);
89
90 tb_invalidated_flag = 0;
3b46e624 91
8a40a180 92 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
3b46e624 93
8a40a180
FB
94 /* find translated block using physical mappings */
95 phys_pc = get_phys_addr_code(env, pc);
96 phys_page1 = phys_pc & TARGET_PAGE_MASK;
97 phys_page2 = -1;
98 h = tb_phys_hash_func(phys_pc);
99 ptb1 = &tb_phys_hash[h];
100 for(;;) {
101 tb = *ptb1;
102 if (!tb)
103 goto not_found;
5fafdf24 104 if (tb->pc == pc &&
8a40a180 105 tb->page_addr[0] == phys_page1 &&
5fafdf24 106 tb->cs_base == cs_base &&
8a40a180
FB
107 tb->flags == flags) {
108 /* check next page if needed */
109 if (tb->page_addr[1] != -1) {
5fafdf24 110 virt_page2 = (pc & TARGET_PAGE_MASK) +
8a40a180
FB
111 TARGET_PAGE_SIZE;
112 phys_page2 = get_phys_addr_code(env, virt_page2);
113 if (tb->page_addr[1] == phys_page2)
114 goto found;
115 } else {
116 goto found;
117 }
118 }
119 ptb1 = &tb->phys_hash_next;
120 }
121 not_found:
122 /* if no translated code available, then translate it now */
123 tb = tb_alloc(pc);
124 if (!tb) {
125 /* flush must be done */
126 tb_flush(env);
127 /* cannot fail at this point */
128 tb = tb_alloc(pc);
129 /* don't forget to invalidate previous TB info */
15388002 130 tb_invalidated_flag = 1;
8a40a180
FB
131 }
132 tc_ptr = code_gen_ptr;
133 tb->tc_ptr = tc_ptr;
134 tb->cs_base = cs_base;
135 tb->flags = flags;
136 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
137 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 138
8a40a180
FB
139 /* check next page if needed */
140 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
141 phys_page2 = -1;
142 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
143 phys_page2 = get_phys_addr_code(env, virt_page2);
144 }
145 tb_link_phys(tb, phys_pc, phys_page2);
3b46e624 146
8a40a180 147 found:
8a40a180
FB
148 /* we add the TB in the virtual pc hash table */
149 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
150 spin_unlock(&tb_lock);
151 return tb;
152}
153
154static inline TranslationBlock *tb_find_fast(void)
155{
156 TranslationBlock *tb;
157 target_ulong cs_base, pc;
c068688b 158 uint64_t flags;
8a40a180
FB
159
160 /* we record a subset of the CPU state. It will
161 always be the same before a given translated block
162 is executed. */
163#if defined(TARGET_I386)
164 flags = env->hflags;
165 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
0573fbfc 166 flags |= env->intercept;
8a40a180
FB
167 cs_base = env->segs[R_CS].base;
168 pc = cs_base + env->eip;
169#elif defined(TARGET_ARM)
170 flags = env->thumb | (env->vfp.vec_len << 1)
b5ff1b31
FB
171 | (env->vfp.vec_stride << 4);
172 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
173 flags |= (1 << 6);
40f137e1
PB
174 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
175 flags |= (1 << 7);
9ee6e8bb 176 flags |= (env->condexec_bits << 8);
8a40a180
FB
177 cs_base = 0;
178 pc = env->regs[15];
179#elif defined(TARGET_SPARC)
180#ifdef TARGET_SPARC64
a80dde08
FB
181 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
182 flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
183 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
8a40a180 184#else
6d5f237a
BS
185 // FPU enable . Supervisor
186 flags = (env->psref << 4) | env->psrs;
8a40a180
FB
187#endif
188 cs_base = env->npc;
189 pc = env->pc;
190#elif defined(TARGET_PPC)
1527c87e 191 flags = env->hflags;
8a40a180
FB
192 cs_base = 0;
193 pc = env->nip;
194#elif defined(TARGET_MIPS)
56b19403 195 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
cc9442b9 196 cs_base = 0;
ead9360e 197 pc = env->PC[env->current_tc];
e6e5906b 198#elif defined(TARGET_M68K)
acf930aa
PB
199 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
200 | (env->sr & SR_S) /* Bit 13 */
201 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
e6e5906b
PB
202 cs_base = 0;
203 pc = env->pc;
fdf9b3e8
FB
204#elif defined(TARGET_SH4)
205 flags = env->sr & (SR_MD | SR_RB);
206 cs_base = 0; /* XXXXX */
207 pc = env->pc;
eddf68a6
JM
208#elif defined(TARGET_ALPHA)
209 flags = env->ps;
210 cs_base = 0;
211 pc = env->pc;
f1ccf904
TS
212#elif defined(TARGET_CRIS)
213 flags = 0;
214 cs_base = 0;
215 pc = env->pc;
8a40a180
FB
216#else
217#error unsupported CPU
218#endif
219 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
220 if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
221 tb->flags != flags, 0)) {
222 tb = tb_find_slow(pc, cs_base, flags);
15388002
FB
223 /* Note: we do it here to avoid a gcc bug on Mac OS X when
224 doing it in tb_find_slow */
225 if (tb_invalidated_flag) {
226 /* as some TB could have been invalidated because
227 of memory exceptions while generating the code, we
228 must recompute the hash index here */
229 T0 = 0;
230 }
8a40a180
FB
231 }
232 return tb;
233}
234
235
7d13299d
FB
236/* main execution loop */
237
e4533c7a 238int cpu_exec(CPUState *env1)
7d13299d 239{
1057eaa7
PB
240#define DECLARE_HOST_REGS 1
241#include "hostregs_helper.h"
242#if defined(TARGET_SPARC)
3475187d
FB
243#if defined(reg_REGWPTR)
244 uint32_t *saved_regwptr;
245#endif
246#endif
fdbb4691 247#if defined(__sparc__) && !defined(HOST_SOLARIS)
b49d07ba
TS
248 int saved_i7;
249 target_ulong tmp_T0;
04369ff2 250#endif
8a40a180 251 int ret, interrupt_request;
7d13299d 252 void (*gen_func)(void);
8a40a180 253 TranslationBlock *tb;
c27004ec 254 uint8_t *tc_ptr;
8c6939c0 255
bfed01fc
TS
256 if (cpu_halted(env1) == EXCP_HALTED)
257 return EXCP_HALTED;
5a1e3cfc 258
5fafdf24 259 cpu_single_env = env1;
6a00d601 260
7d13299d 261 /* first we save global registers */
1057eaa7
PB
262#define SAVE_HOST_REGS 1
263#include "hostregs_helper.h"
c27004ec 264 env = env1;
fdbb4691 265#if defined(__sparc__) && !defined(HOST_SOLARIS)
e4533c7a
FB
266 /* we also save i7 because longjmp may not restore it */
267 asm volatile ("mov %%i7, %0" : "=r" (saved_i7));
268#endif
269
0d1a29f9 270 env_to_regs();
ecb644f4 271#if defined(TARGET_I386)
9de5e440 272 /* put eflags in CPU temporary format */
fc2b4c48
FB
273 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
274 DF = 1 - (2 * ((env->eflags >> 10) & 1));
9de5e440 275 CC_OP = CC_OP_EFLAGS;
fc2b4c48 276 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
93ac68bc 277#elif defined(TARGET_SPARC)
3475187d
FB
278#if defined(reg_REGWPTR)
279 saved_regwptr = REGWPTR;
280#endif
e6e5906b
PB
281#elif defined(TARGET_M68K)
282 env->cc_op = CC_OP_FLAGS;
283 env->cc_dest = env->sr & 0xf;
284 env->cc_x = (env->sr >> 4) & 1;
ecb644f4
TS
285#elif defined(TARGET_ALPHA)
286#elif defined(TARGET_ARM)
287#elif defined(TARGET_PPC)
6af0bf9c 288#elif defined(TARGET_MIPS)
fdf9b3e8 289#elif defined(TARGET_SH4)
f1ccf904 290#elif defined(TARGET_CRIS)
fdf9b3e8 291 /* XXXXX */
e4533c7a
FB
292#else
293#error unsupported target CPU
294#endif
3fb2ded1 295 env->exception_index = -1;
9d27abd9 296
7d13299d 297 /* prepare setjmp context for exception handling */
3fb2ded1
FB
298 for(;;) {
299 if (setjmp(env->jmp_env) == 0) {
ee8b7021 300 env->current_tb = NULL;
3fb2ded1
FB
301 /* if an exception is pending, we execute it here */
302 if (env->exception_index >= 0) {
303 if (env->exception_index >= EXCP_INTERRUPT) {
304 /* exit request from the cpu execution loop */
305 ret = env->exception_index;
306 break;
307 } else if (env->user_mode_only) {
308 /* if user mode only, we simulate a fake exception
9f083493 309 which will be handled outside the cpu execution
3fb2ded1 310 loop */
83479e77 311#if defined(TARGET_I386)
5fafdf24
TS
312 do_interrupt_user(env->exception_index,
313 env->exception_is_int,
314 env->error_code,
3fb2ded1 315 env->exception_next_eip);
83479e77 316#endif
3fb2ded1
FB
317 ret = env->exception_index;
318 break;
319 } else {
83479e77 320#if defined(TARGET_I386)
3fb2ded1
FB
321 /* simulate a real cpu exception. On i386, it can
322 trigger new exceptions, but we do not handle
323 double or triple faults yet. */
5fafdf24
TS
324 do_interrupt(env->exception_index,
325 env->exception_is_int,
326 env->error_code,
d05e66d2 327 env->exception_next_eip, 0);
678dde13
TS
328 /* successfully delivered */
329 env->old_exception = -1;
ce09776b
FB
330#elif defined(TARGET_PPC)
331 do_interrupt(env);
6af0bf9c
FB
332#elif defined(TARGET_MIPS)
333 do_interrupt(env);
e95c8d51 334#elif defined(TARGET_SPARC)
1a0c3292 335 do_interrupt(env->exception_index);
b5ff1b31
FB
336#elif defined(TARGET_ARM)
337 do_interrupt(env);
fdf9b3e8
FB
338#elif defined(TARGET_SH4)
339 do_interrupt(env);
eddf68a6
JM
340#elif defined(TARGET_ALPHA)
341 do_interrupt(env);
f1ccf904
TS
342#elif defined(TARGET_CRIS)
343 do_interrupt(env);
0633879f
PB
344#elif defined(TARGET_M68K)
345 do_interrupt(0);
83479e77 346#endif
3fb2ded1
FB
347 }
348 env->exception_index = -1;
5fafdf24 349 }
9df217a3
FB
350#ifdef USE_KQEMU
351 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
352 int ret;
353 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
354 ret = kqemu_cpu_exec(env);
355 /* put eflags in CPU temporary format */
356 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
357 DF = 1 - (2 * ((env->eflags >> 10) & 1));
358 CC_OP = CC_OP_EFLAGS;
359 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
360 if (ret == 1) {
361 /* exception */
362 longjmp(env->jmp_env, 1);
363 } else if (ret == 2) {
364 /* softmmu execution needed */
365 } else {
366 if (env->interrupt_request != 0) {
367 /* hardware interrupt will be executed just after */
368 } else {
369 /* otherwise, we restart */
370 longjmp(env->jmp_env, 1);
371 }
372 }
3fb2ded1 373 }
9df217a3
FB
374#endif
375
3fb2ded1
FB
376 T0 = 0; /* force lookup of first TB */
377 for(;;) {
fdbb4691 378#if defined(__sparc__) && !defined(HOST_SOLARIS)
5fafdf24 379 /* g1 can be modified by some libc? functions */
3fb2ded1 380 tmp_T0 = T0;
3b46e624 381#endif
68a79315 382 interrupt_request = env->interrupt_request;
0573fbfc
TS
383 if (__builtin_expect(interrupt_request, 0)
384#if defined(TARGET_I386)
385 && env->hflags & HF_GIF_MASK
386#endif
387 ) {
6658ffb8
PB
388 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
389 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
390 env->exception_index = EXCP_DEBUG;
391 cpu_loop_exit();
392 }
a90b7318 393#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
f1ccf904 394 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
a90b7318
AZ
395 if (interrupt_request & CPU_INTERRUPT_HALT) {
396 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
397 env->halted = 1;
398 env->exception_index = EXCP_HLT;
399 cpu_loop_exit();
400 }
401#endif
68a79315 402#if defined(TARGET_I386)
3b21e03e
FB
403 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
404 !(env->hflags & HF_SMM_MASK)) {
0573fbfc 405 svm_check_intercept(SVM_EXIT_SMI);
3b21e03e
FB
406 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
407 do_smm_enter();
408#if defined(__sparc__) && !defined(HOST_SOLARIS)
409 tmp_T0 = 0;
410#else
411 T0 = 0;
412#endif
413 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
0573fbfc 414 (env->eflags & IF_MASK || env->hflags & HF_HIF_MASK) &&
3f337316 415 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
68a79315 416 int intno;
0573fbfc 417 svm_check_intercept(SVM_EXIT_INTR);
52621688 418 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
a541f297 419 intno = cpu_get_pic_interrupt(env);
f193c797 420 if (loglevel & CPU_LOG_TB_IN_ASM) {
68a79315
FB
421 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
422 }
d05e66d2 423 do_interrupt(intno, 0, 0, 0, 1);
907a5b26
FB
424 /* ensure that no TB jump will be modified as
425 the program flow was changed */
fdbb4691 426#if defined(__sparc__) && !defined(HOST_SOLARIS)
907a5b26
FB
427 tmp_T0 = 0;
428#else
429 T0 = 0;
0573fbfc
TS
430#endif
431#if !defined(CONFIG_USER_ONLY)
432 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
433 (env->eflags & IF_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
434 int intno;
435 /* FIXME: this should respect TPR */
436 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
52621688 437 svm_check_intercept(SVM_EXIT_VINTR);
0573fbfc
TS
438 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
439 if (loglevel & CPU_LOG_TB_IN_ASM)
440 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
441 do_interrupt(intno, 0, 0, -1, 1);
52621688
TS
442 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl),
443 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)) & ~V_IRQ_MASK);
0573fbfc
TS
444#if defined(__sparc__) && !defined(HOST_SOLARIS)
445 tmp_T0 = 0;
446#else
447 T0 = 0;
448#endif
907a5b26 449#endif
68a79315 450 }
ce09776b 451#elif defined(TARGET_PPC)
9fddaa0c
FB
452#if 0
453 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
454 cpu_ppc_reset(env);
455 }
456#endif
47103572 457 if (interrupt_request & CPU_INTERRUPT_HARD) {
e9df014c
JM
458 ppc_hw_interrupt(env);
459 if (env->pending_interrupts == 0)
460 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
fdbb4691 461#if defined(__sparc__) && !defined(HOST_SOLARIS)
e9df014c 462 tmp_T0 = 0;
8a40a180 463#else
e9df014c 464 T0 = 0;
8a40a180 465#endif
ce09776b 466 }
6af0bf9c
FB
467#elif defined(TARGET_MIPS)
468 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
24c7b0e3 469 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
6af0bf9c 470 (env->CP0_Status & (1 << CP0St_IE)) &&
24c7b0e3
TS
471 !(env->CP0_Status & (1 << CP0St_EXL)) &&
472 !(env->CP0_Status & (1 << CP0St_ERL)) &&
6af0bf9c
FB
473 !(env->hflags & MIPS_HFLAG_DM)) {
474 /* Raise it */
475 env->exception_index = EXCP_EXT_INTERRUPT;
476 env->error_code = 0;
477 do_interrupt(env);
fdbb4691 478#if defined(__sparc__) && !defined(HOST_SOLARIS)
8a40a180
FB
479 tmp_T0 = 0;
480#else
481 T0 = 0;
482#endif
6af0bf9c 483 }
e95c8d51 484#elif defined(TARGET_SPARC)
66321a11
FB
485 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
486 (env->psret != 0)) {
487 int pil = env->interrupt_index & 15;
488 int type = env->interrupt_index & 0xf0;
489
490 if (((type == TT_EXTINT) &&
491 (pil == 15 || pil > env->psrpil)) ||
492 type != TT_EXTINT) {
493 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
494 do_interrupt(env->interrupt_index);
495 env->interrupt_index = 0;
327ac2e7
BS
496#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
497 cpu_check_irqs(env);
498#endif
fdbb4691 499#if defined(__sparc__) && !defined(HOST_SOLARIS)
8a40a180
FB
500 tmp_T0 = 0;
501#else
502 T0 = 0;
503#endif
66321a11 504 }
e95c8d51
FB
505 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
506 //do_interrupt(0, 0, 0, 0, 0);
507 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
a90b7318 508 }
b5ff1b31
FB
509#elif defined(TARGET_ARM)
510 if (interrupt_request & CPU_INTERRUPT_FIQ
511 && !(env->uncached_cpsr & CPSR_F)) {
512 env->exception_index = EXCP_FIQ;
513 do_interrupt(env);
514 }
9ee6e8bb
PB
515 /* ARMv7-M interrupt return works by loading a magic value
516 into the PC. On real hardware the load causes the
517 return to occur. The qemu implementation performs the
518 jump normally, then does the exception return when the
519 CPU tries to execute code at the magic address.
520 This will cause the magic PC value to be pushed to
521 the stack if an interrupt occured at the wrong time.
522 We avoid this by disabling interrupts when
523 pc contains a magic address. */
b5ff1b31 524 if (interrupt_request & CPU_INTERRUPT_HARD
9ee6e8bb
PB
525 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
526 || !(env->uncached_cpsr & CPSR_I))) {
b5ff1b31
FB
527 env->exception_index = EXCP_IRQ;
528 do_interrupt(env);
529 }
fdf9b3e8
FB
530#elif defined(TARGET_SH4)
531 /* XXXXX */
eddf68a6
JM
532#elif defined(TARGET_ALPHA)
533 if (interrupt_request & CPU_INTERRUPT_HARD) {
534 do_interrupt(env);
535 }
f1ccf904
TS
536#elif defined(TARGET_CRIS)
537 if (interrupt_request & CPU_INTERRUPT_HARD) {
538 do_interrupt(env);
539 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
540 }
0633879f
PB
541#elif defined(TARGET_M68K)
542 if (interrupt_request & CPU_INTERRUPT_HARD
543 && ((env->sr & SR_I) >> SR_I_SHIFT)
544 < env->pending_level) {
545 /* Real hardware gets the interrupt vector via an
546 IACK cycle at this point. Current emulated
547 hardware doesn't rely on this, so we
548 provide/save the vector when the interrupt is
549 first signalled. */
550 env->exception_index = env->pending_vector;
551 do_interrupt(1);
552 }
68a79315 553#endif
9d05095e
FB
554 /* Don't use the cached interupt_request value,
555 do_interrupt may have updated the EXITTB flag. */
b5ff1b31 556 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
bf3e8bf1
FB
557 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
558 /* ensure that no TB jump will be modified as
559 the program flow was changed */
fdbb4691 560#if defined(__sparc__) && !defined(HOST_SOLARIS)
bf3e8bf1
FB
561 tmp_T0 = 0;
562#else
563 T0 = 0;
564#endif
565 }
68a79315
FB
566 if (interrupt_request & CPU_INTERRUPT_EXIT) {
567 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
568 env->exception_index = EXCP_INTERRUPT;
569 cpu_loop_exit();
570 }
3fb2ded1 571 }
7d13299d 572#ifdef DEBUG_EXEC
b5ff1b31 573 if ((loglevel & CPU_LOG_TB_CPU)) {
3fb2ded1 574 /* restore flags in standard format */
ecb644f4
TS
575 regs_to_env();
576#if defined(TARGET_I386)
3fb2ded1 577 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
7fe48483 578 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
3fb2ded1 579 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
e4533c7a 580#elif defined(TARGET_ARM)
7fe48483 581 cpu_dump_state(env, logfile, fprintf, 0);
93ac68bc 582#elif defined(TARGET_SPARC)
3475187d
FB
583 REGWPTR = env->regbase + (env->cwp * 16);
584 env->regwptr = REGWPTR;
585 cpu_dump_state(env, logfile, fprintf, 0);
67867308 586#elif defined(TARGET_PPC)
7fe48483 587 cpu_dump_state(env, logfile, fprintf, 0);
e6e5906b
PB
588#elif defined(TARGET_M68K)
589 cpu_m68k_flush_flags(env, env->cc_op);
590 env->cc_op = CC_OP_FLAGS;
591 env->sr = (env->sr & 0xffe0)
592 | env->cc_dest | (env->cc_x << 4);
593 cpu_dump_state(env, logfile, fprintf, 0);
6af0bf9c
FB
594#elif defined(TARGET_MIPS)
595 cpu_dump_state(env, logfile, fprintf, 0);
fdf9b3e8
FB
596#elif defined(TARGET_SH4)
597 cpu_dump_state(env, logfile, fprintf, 0);
eddf68a6
JM
598#elif defined(TARGET_ALPHA)
599 cpu_dump_state(env, logfile, fprintf, 0);
f1ccf904
TS
600#elif defined(TARGET_CRIS)
601 cpu_dump_state(env, logfile, fprintf, 0);
e4533c7a 602#else
5fafdf24 603#error unsupported target CPU
e4533c7a 604#endif
3fb2ded1 605 }
7d13299d 606#endif
8a40a180 607 tb = tb_find_fast();
9d27abd9 608#ifdef DEBUG_EXEC
c1135f61 609 if ((loglevel & CPU_LOG_EXEC)) {
c27004ec
FB
610 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
611 (long)tb->tc_ptr, tb->pc,
612 lookup_symbol(tb->pc));
3fb2ded1 613 }
9d27abd9 614#endif
fdbb4691 615#if defined(__sparc__) && !defined(HOST_SOLARIS)
3fb2ded1 616 T0 = tmp_T0;
3b46e624 617#endif
8a40a180
FB
618 /* see if we can patch the calling TB. When the TB
619 spans two pages, we cannot safely do a direct
620 jump. */
c27004ec 621 {
8a40a180 622 if (T0 != 0 &&
f32fc648
FB
623#if USE_KQEMU
624 (env->kqemu_enabled != 2) &&
625#endif
ec6338ba 626 tb->page_addr[1] == -1) {
3fb2ded1 627 spin_lock(&tb_lock);
c27004ec 628 tb_add_jump((TranslationBlock *)(long)(T0 & ~3), T0 & 3, tb);
3fb2ded1
FB
629 spin_unlock(&tb_lock);
630 }
c27004ec 631 }
3fb2ded1 632 tc_ptr = tb->tc_ptr;
83479e77 633 env->current_tb = tb;
3fb2ded1
FB
634 /* execute the generated code */
635 gen_func = (void *)tc_ptr;
8c6939c0 636#if defined(__sparc__)
3fb2ded1
FB
637 __asm__ __volatile__("call %0\n\t"
638 "mov %%o7,%%i0"
639 : /* no outputs */
5fafdf24 640 : "r" (gen_func)
fdbb4691 641 : "i0", "i1", "i2", "i3", "i4", "i5",
faab7592 642 "o0", "o1", "o2", "o3", "o4", "o5",
fdbb4691
FB
643 "l0", "l1", "l2", "l3", "l4", "l5",
644 "l6", "l7");
8c6939c0 645#elif defined(__arm__)
3fb2ded1
FB
646 asm volatile ("mov pc, %0\n\t"
647 ".global exec_loop\n\t"
648 "exec_loop:\n\t"
649 : /* no outputs */
650 : "r" (gen_func)
651 : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
b8076a74
FB
652#elif defined(__ia64)
653 struct fptr {
654 void *ip;
655 void *gp;
656 } fp;
657
658 fp.ip = tc_ptr;
659 fp.gp = code_gen_buffer + 2 * (1 << 20);
660 (*(void (*)(void)) &fp)();
ae228531 661#else
3fb2ded1 662 gen_func();
ae228531 663#endif
83479e77 664 env->current_tb = NULL;
4cbf74b6
FB
665 /* reset soft MMU for next block (it can currently
666 only be set by a memory fault) */
667#if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
3f337316
FB
668 if (env->hflags & HF_SOFTMMU_MASK) {
669 env->hflags &= ~HF_SOFTMMU_MASK;
4cbf74b6
FB
670 /* do not allow linking to another block */
671 T0 = 0;
672 }
f32fc648
FB
673#endif
674#if defined(USE_KQEMU)
675#define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
676 if (kqemu_is_ok(env) &&
677 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
678 cpu_loop_exit();
679 }
4cbf74b6 680#endif
50a518e3 681 } /* for(;;) */
3fb2ded1 682 } else {
0d1a29f9 683 env_to_regs();
7d13299d 684 }
3fb2ded1
FB
685 } /* for(;;) */
686
7d13299d 687
e4533c7a 688#if defined(TARGET_I386)
9de5e440 689 /* restore flags in standard format */
fc2b4c48 690 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
e4533c7a 691#elif defined(TARGET_ARM)
b7bcbe95 692 /* XXX: Save/restore host fpu exception state?. */
93ac68bc 693#elif defined(TARGET_SPARC)
3475187d
FB
694#if defined(reg_REGWPTR)
695 REGWPTR = saved_regwptr;
696#endif
67867308 697#elif defined(TARGET_PPC)
e6e5906b
PB
698#elif defined(TARGET_M68K)
699 cpu_m68k_flush_flags(env, env->cc_op);
700 env->cc_op = CC_OP_FLAGS;
701 env->sr = (env->sr & 0xffe0)
702 | env->cc_dest | (env->cc_x << 4);
6af0bf9c 703#elif defined(TARGET_MIPS)
fdf9b3e8 704#elif defined(TARGET_SH4)
eddf68a6 705#elif defined(TARGET_ALPHA)
f1ccf904 706#elif defined(TARGET_CRIS)
fdf9b3e8 707 /* XXXXX */
e4533c7a
FB
708#else
709#error unsupported target CPU
710#endif
1057eaa7
PB
711
712 /* restore global registers */
fdbb4691 713#if defined(__sparc__) && !defined(HOST_SOLARIS)
8c6939c0 714 asm volatile ("mov %0, %%i7" : : "r" (saved_i7));
04369ff2 715#endif
1057eaa7
PB
716#include "hostregs_helper.h"
717
6a00d601 718 /* fail safe : never use cpu_single_env outside cpu_exec() */
5fafdf24 719 cpu_single_env = NULL;
7d13299d
FB
720 return ret;
721}
6dbad63e 722
fbf9eeb3
FB
723/* must only be called from the generated code as an exception can be
724 generated */
725void tb_invalidate_page_range(target_ulong start, target_ulong end)
726{
dc5d0b3d
FB
727 /* XXX: cannot enable it yet because it yields to MMU exception
728 where NIP != read address on PowerPC */
729#if 0
fbf9eeb3
FB
730 target_ulong phys_addr;
731 phys_addr = get_phys_addr_code(env, start);
732 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
dc5d0b3d 733#endif
fbf9eeb3
FB
734}
735
1a18c71b 736#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
e4533c7a 737
6dbad63e
FB
738void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
739{
740 CPUX86State *saved_env;
741
742 saved_env = env;
743 env = s;
a412ac57 744 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
a513fe19 745 selector &= 0xffff;
5fafdf24 746 cpu_x86_load_seg_cache(env, seg_reg, selector,
c27004ec 747 (selector << 4), 0xffff, 0);
a513fe19 748 } else {
b453b70b 749 load_seg(seg_reg, selector);
a513fe19 750 }
6dbad63e
FB
751 env = saved_env;
752}
9de5e440 753
d0a1ffc9
FB
754void cpu_x86_fsave(CPUX86State *s, uint8_t *ptr, int data32)
755{
756 CPUX86State *saved_env;
757
758 saved_env = env;
759 env = s;
3b46e624 760
c27004ec 761 helper_fsave((target_ulong)ptr, data32);
d0a1ffc9
FB
762
763 env = saved_env;
764}
765
766void cpu_x86_frstor(CPUX86State *s, uint8_t *ptr, int data32)
767{
768 CPUX86State *saved_env;
769
770 saved_env = env;
771 env = s;
3b46e624 772
c27004ec 773 helper_frstor((target_ulong)ptr, data32);
d0a1ffc9
FB
774
775 env = saved_env;
776}
777
e4533c7a
FB
778#endif /* TARGET_I386 */
779
67b915a5
FB
780#if !defined(CONFIG_SOFTMMU)
781
3fb2ded1
FB
782#if defined(TARGET_I386)
783
b56dad1c 784/* 'pc' is the host PC at which the exception was raised. 'address' is
fd6ce8f6
FB
785 the effective address of the memory exception. 'is_write' is 1 if a
786 write caused the exception and otherwise 0'. 'old_set' is the
787 signal set which should be restored */
2b413144 788static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
5fafdf24 789 int is_write, sigset_t *old_set,
bf3e8bf1 790 void *puc)
9de5e440 791{
a513fe19
FB
792 TranslationBlock *tb;
793 int ret;
68a79315 794
83479e77
FB
795 if (cpu_single_env)
796 env = cpu_single_env; /* XXX: find a correct solution for multithread */
fd6ce8f6 797#if defined(DEBUG_SIGNAL)
5fafdf24 798 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
bf3e8bf1 799 pc, address, is_write, *(unsigned long *)old_set);
9de5e440 800#endif
25eb4484 801 /* XXX: locking issue */
53a5960a 802 if (is_write && page_unprotect(h2g(address), pc, puc)) {
fd6ce8f6
FB
803 return 1;
804 }
fbf9eeb3 805
3fb2ded1 806 /* see if it is an MMU fault */
6ebbf390 807 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
3fb2ded1
FB
808 if (ret < 0)
809 return 0; /* not an MMU fault */
810 if (ret == 0)
811 return 1; /* the MMU fault was handled without causing real CPU fault */
812 /* now we have a real cpu fault */
a513fe19
FB
813 tb = tb_find_pc(pc);
814 if (tb) {
9de5e440
FB
815 /* the PC is inside the translated code. It means that we have
816 a virtual CPU fault */
bf3e8bf1 817 cpu_restore_state(tb, env, pc, puc);
3fb2ded1 818 }
4cbf74b6 819 if (ret == 1) {
3fb2ded1 820#if 0
5fafdf24 821 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
4cbf74b6 822 env->eip, env->cr[2], env->error_code);
3fb2ded1 823#endif
4cbf74b6
FB
824 /* we restore the process signal mask as the sigreturn should
825 do it (XXX: use sigsetjmp) */
826 sigprocmask(SIG_SETMASK, old_set, NULL);
54ca9095 827 raise_exception_err(env->exception_index, env->error_code);
4cbf74b6
FB
828 } else {
829 /* activate soft MMU for this block */
3f337316 830 env->hflags |= HF_SOFTMMU_MASK;
fbf9eeb3 831 cpu_resume_from_signal(env, puc);
4cbf74b6 832 }
3fb2ded1
FB
833 /* never comes here */
834 return 1;
835}
836
e4533c7a 837#elif defined(TARGET_ARM)
3fb2ded1 838static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
bf3e8bf1
FB
839 int is_write, sigset_t *old_set,
840 void *puc)
3fb2ded1 841{
68016c62
FB
842 TranslationBlock *tb;
843 int ret;
844
845 if (cpu_single_env)
846 env = cpu_single_env; /* XXX: find a correct solution for multithread */
847#if defined(DEBUG_SIGNAL)
5fafdf24 848 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
68016c62
FB
849 pc, address, is_write, *(unsigned long *)old_set);
850#endif
9f0777ed 851 /* XXX: locking issue */
53a5960a 852 if (is_write && page_unprotect(h2g(address), pc, puc)) {
9f0777ed
FB
853 return 1;
854 }
68016c62 855 /* see if it is an MMU fault */
6ebbf390 856 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
68016c62
FB
857 if (ret < 0)
858 return 0; /* not an MMU fault */
859 if (ret == 0)
860 return 1; /* the MMU fault was handled without causing real CPU fault */
861 /* now we have a real cpu fault */
862 tb = tb_find_pc(pc);
863 if (tb) {
864 /* the PC is inside the translated code. It means that we have
865 a virtual CPU fault */
866 cpu_restore_state(tb, env, pc, puc);
867 }
868 /* we restore the process signal mask as the sigreturn should
869 do it (XXX: use sigsetjmp) */
870 sigprocmask(SIG_SETMASK, old_set, NULL);
871 cpu_loop_exit();
3fb2ded1 872}
93ac68bc
FB
873#elif defined(TARGET_SPARC)
874static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
bf3e8bf1
FB
875 int is_write, sigset_t *old_set,
876 void *puc)
93ac68bc 877{
68016c62
FB
878 TranslationBlock *tb;
879 int ret;
880
881 if (cpu_single_env)
882 env = cpu_single_env; /* XXX: find a correct solution for multithread */
883#if defined(DEBUG_SIGNAL)
5fafdf24 884 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
68016c62
FB
885 pc, address, is_write, *(unsigned long *)old_set);
886#endif
b453b70b 887 /* XXX: locking issue */
53a5960a 888 if (is_write && page_unprotect(h2g(address), pc, puc)) {
b453b70b
FB
889 return 1;
890 }
68016c62 891 /* see if it is an MMU fault */
6ebbf390 892 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
68016c62
FB
893 if (ret < 0)
894 return 0; /* not an MMU fault */
895 if (ret == 0)
896 return 1; /* the MMU fault was handled without causing real CPU fault */
897 /* now we have a real cpu fault */
898 tb = tb_find_pc(pc);
899 if (tb) {
900 /* the PC is inside the translated code. It means that we have
901 a virtual CPU fault */
902 cpu_restore_state(tb, env, pc, puc);
903 }
904 /* we restore the process signal mask as the sigreturn should
905 do it (XXX: use sigsetjmp) */
906 sigprocmask(SIG_SETMASK, old_set, NULL);
907 cpu_loop_exit();
93ac68bc 908}
67867308
FB
909#elif defined (TARGET_PPC)
910static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
bf3e8bf1
FB
911 int is_write, sigset_t *old_set,
912 void *puc)
67867308
FB
913{
914 TranslationBlock *tb;
ce09776b 915 int ret;
3b46e624 916
67867308
FB
917 if (cpu_single_env)
918 env = cpu_single_env; /* XXX: find a correct solution for multithread */
67867308 919#if defined(DEBUG_SIGNAL)
5fafdf24 920 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
67867308
FB
921 pc, address, is_write, *(unsigned long *)old_set);
922#endif
923 /* XXX: locking issue */
53a5960a 924 if (is_write && page_unprotect(h2g(address), pc, puc)) {
67867308
FB
925 return 1;
926 }
927
ce09776b 928 /* see if it is an MMU fault */
6ebbf390 929 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
ce09776b
FB
930 if (ret < 0)
931 return 0; /* not an MMU fault */
932 if (ret == 0)
933 return 1; /* the MMU fault was handled without causing real CPU fault */
934
67867308
FB
935 /* now we have a real cpu fault */
936 tb = tb_find_pc(pc);
937 if (tb) {
938 /* the PC is inside the translated code. It means that we have
939 a virtual CPU fault */
bf3e8bf1 940 cpu_restore_state(tb, env, pc, puc);
67867308 941 }
ce09776b 942 if (ret == 1) {
67867308 943#if 0
5fafdf24 944 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
ce09776b 945 env->nip, env->error_code, tb);
67867308
FB
946#endif
947 /* we restore the process signal mask as the sigreturn should
948 do it (XXX: use sigsetjmp) */
bf3e8bf1 949 sigprocmask(SIG_SETMASK, old_set, NULL);
9fddaa0c 950 do_raise_exception_err(env->exception_index, env->error_code);
ce09776b
FB
951 } else {
952 /* activate soft MMU for this block */
fbf9eeb3 953 cpu_resume_from_signal(env, puc);
ce09776b 954 }
67867308 955 /* never comes here */
e6e5906b
PB
956 return 1;
957}
958
959#elif defined(TARGET_M68K)
960static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
961 int is_write, sigset_t *old_set,
962 void *puc)
963{
964 TranslationBlock *tb;
965 int ret;
966
967 if (cpu_single_env)
968 env = cpu_single_env; /* XXX: find a correct solution for multithread */
969#if defined(DEBUG_SIGNAL)
5fafdf24 970 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
e6e5906b
PB
971 pc, address, is_write, *(unsigned long *)old_set);
972#endif
973 /* XXX: locking issue */
974 if (is_write && page_unprotect(address, pc, puc)) {
975 return 1;
976 }
977 /* see if it is an MMU fault */
6ebbf390 978 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
e6e5906b
PB
979 if (ret < 0)
980 return 0; /* not an MMU fault */
981 if (ret == 0)
982 return 1; /* the MMU fault was handled without causing real CPU fault */
983 /* now we have a real cpu fault */
984 tb = tb_find_pc(pc);
985 if (tb) {
986 /* the PC is inside the translated code. It means that we have
987 a virtual CPU fault */
988 cpu_restore_state(tb, env, pc, puc);
989 }
990 /* we restore the process signal mask as the sigreturn should
991 do it (XXX: use sigsetjmp) */
992 sigprocmask(SIG_SETMASK, old_set, NULL);
993 cpu_loop_exit();
994 /* never comes here */
67867308
FB
995 return 1;
996}
6af0bf9c
FB
997
998#elif defined (TARGET_MIPS)
999static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1000 int is_write, sigset_t *old_set,
1001 void *puc)
1002{
1003 TranslationBlock *tb;
1004 int ret;
3b46e624 1005
6af0bf9c
FB
1006 if (cpu_single_env)
1007 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1008#if defined(DEBUG_SIGNAL)
5fafdf24 1009 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
6af0bf9c
FB
1010 pc, address, is_write, *(unsigned long *)old_set);
1011#endif
1012 /* XXX: locking issue */
53a5960a 1013 if (is_write && page_unprotect(h2g(address), pc, puc)) {
6af0bf9c
FB
1014 return 1;
1015 }
1016
1017 /* see if it is an MMU fault */
6ebbf390 1018 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
6af0bf9c
FB
1019 if (ret < 0)
1020 return 0; /* not an MMU fault */
1021 if (ret == 0)
1022 return 1; /* the MMU fault was handled without causing real CPU fault */
1023
1024 /* now we have a real cpu fault */
1025 tb = tb_find_pc(pc);
1026 if (tb) {
1027 /* the PC is inside the translated code. It means that we have
1028 a virtual CPU fault */
1029 cpu_restore_state(tb, env, pc, puc);
1030 }
1031 if (ret == 1) {
1032#if 0
5fafdf24 1033 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1eb5207b 1034 env->PC, env->error_code, tb);
6af0bf9c
FB
1035#endif
1036 /* we restore the process signal mask as the sigreturn should
1037 do it (XXX: use sigsetjmp) */
1038 sigprocmask(SIG_SETMASK, old_set, NULL);
1039 do_raise_exception_err(env->exception_index, env->error_code);
1040 } else {
1041 /* activate soft MMU for this block */
1042 cpu_resume_from_signal(env, puc);
1043 }
1044 /* never comes here */
1045 return 1;
1046}
1047
fdf9b3e8
FB
1048#elif defined (TARGET_SH4)
1049static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1050 int is_write, sigset_t *old_set,
1051 void *puc)
1052{
1053 TranslationBlock *tb;
1054 int ret;
3b46e624 1055
fdf9b3e8
FB
1056 if (cpu_single_env)
1057 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1058#if defined(DEBUG_SIGNAL)
5fafdf24 1059 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
fdf9b3e8
FB
1060 pc, address, is_write, *(unsigned long *)old_set);
1061#endif
1062 /* XXX: locking issue */
1063 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1064 return 1;
1065 }
1066
1067 /* see if it is an MMU fault */
6ebbf390 1068 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
fdf9b3e8
FB
1069 if (ret < 0)
1070 return 0; /* not an MMU fault */
1071 if (ret == 0)
1072 return 1; /* the MMU fault was handled without causing real CPU fault */
1073
1074 /* now we have a real cpu fault */
eddf68a6
JM
1075 tb = tb_find_pc(pc);
1076 if (tb) {
1077 /* the PC is inside the translated code. It means that we have
1078 a virtual CPU fault */
1079 cpu_restore_state(tb, env, pc, puc);
1080 }
1081#if 0
5fafdf24 1082 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
eddf68a6
JM
1083 env->nip, env->error_code, tb);
1084#endif
1085 /* we restore the process signal mask as the sigreturn should
1086 do it (XXX: use sigsetjmp) */
1087 sigprocmask(SIG_SETMASK, old_set, NULL);
1088 cpu_loop_exit();
1089 /* never comes here */
1090 return 1;
1091}
1092
1093#elif defined (TARGET_ALPHA)
1094static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1095 int is_write, sigset_t *old_set,
1096 void *puc)
1097{
1098 TranslationBlock *tb;
1099 int ret;
3b46e624 1100
eddf68a6
JM
1101 if (cpu_single_env)
1102 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1103#if defined(DEBUG_SIGNAL)
5fafdf24 1104 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
eddf68a6
JM
1105 pc, address, is_write, *(unsigned long *)old_set);
1106#endif
1107 /* XXX: locking issue */
1108 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1109 return 1;
1110 }
1111
1112 /* see if it is an MMU fault */
6ebbf390 1113 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
eddf68a6
JM
1114 if (ret < 0)
1115 return 0; /* not an MMU fault */
1116 if (ret == 0)
1117 return 1; /* the MMU fault was handled without causing real CPU fault */
1118
1119 /* now we have a real cpu fault */
fdf9b3e8
FB
1120 tb = tb_find_pc(pc);
1121 if (tb) {
1122 /* the PC is inside the translated code. It means that we have
1123 a virtual CPU fault */
1124 cpu_restore_state(tb, env, pc, puc);
1125 }
fdf9b3e8 1126#if 0
5fafdf24 1127 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
fdf9b3e8
FB
1128 env->nip, env->error_code, tb);
1129#endif
1130 /* we restore the process signal mask as the sigreturn should
1131 do it (XXX: use sigsetjmp) */
355fb23d
PB
1132 sigprocmask(SIG_SETMASK, old_set, NULL);
1133 cpu_loop_exit();
fdf9b3e8
FB
1134 /* never comes here */
1135 return 1;
1136}
f1ccf904
TS
1137#elif defined (TARGET_CRIS)
1138static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1139 int is_write, sigset_t *old_set,
1140 void *puc)
1141{
1142 TranslationBlock *tb;
1143 int ret;
1144
1145 if (cpu_single_env)
1146 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1147#if defined(DEBUG_SIGNAL)
1148 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1149 pc, address, is_write, *(unsigned long *)old_set);
1150#endif
1151 /* XXX: locking issue */
1152 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1153 return 1;
1154 }
1155
1156 /* see if it is an MMU fault */
6ebbf390 1157 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
f1ccf904
TS
1158 if (ret < 0)
1159 return 0; /* not an MMU fault */
1160 if (ret == 0)
1161 return 1; /* the MMU fault was handled without causing real CPU fault */
1162
1163 /* now we have a real cpu fault */
1164 tb = tb_find_pc(pc);
1165 if (tb) {
1166 /* the PC is inside the translated code. It means that we have
1167 a virtual CPU fault */
1168 cpu_restore_state(tb, env, pc, puc);
1169 }
1170#if 0
1171 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1172 env->nip, env->error_code, tb);
1173#endif
1174 /* we restore the process signal mask as the sigreturn should
1175 do it (XXX: use sigsetjmp) */
1176 sigprocmask(SIG_SETMASK, old_set, NULL);
1177 cpu_loop_exit();
1178 /* never comes here */
1179 return 1;
1180}
1181
e4533c7a
FB
1182#else
1183#error unsupported target CPU
1184#endif
9de5e440 1185
2b413144
FB
1186#if defined(__i386__)
1187
d8ecc0b9
FB
1188#if defined(__APPLE__)
1189# include <sys/ucontext.h>
1190
1191# define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1192# define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1193# define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1194#else
1195# define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1196# define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1197# define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1198#endif
1199
5fafdf24 1200int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1201 void *puc)
9de5e440 1202{
5a7b542b 1203 siginfo_t *info = pinfo;
9de5e440
FB
1204 struct ucontext *uc = puc;
1205 unsigned long pc;
bf3e8bf1 1206 int trapno;
97eb5b14 1207
d691f669
FB
1208#ifndef REG_EIP
1209/* for glibc 2.1 */
fd6ce8f6
FB
1210#define REG_EIP EIP
1211#define REG_ERR ERR
1212#define REG_TRAPNO TRAPNO
d691f669 1213#endif
d8ecc0b9
FB
1214 pc = EIP_sig(uc);
1215 trapno = TRAP_sig(uc);
ec6338ba
FB
1216 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1217 trapno == 0xe ?
1218 (ERROR_sig(uc) >> 1) & 1 : 0,
1219 &uc->uc_sigmask, puc);
2b413144
FB
1220}
1221
bc51c5c9
FB
1222#elif defined(__x86_64__)
1223
5a7b542b 1224int cpu_signal_handler(int host_signum, void *pinfo,
bc51c5c9
FB
1225 void *puc)
1226{
5a7b542b 1227 siginfo_t *info = pinfo;
bc51c5c9
FB
1228 struct ucontext *uc = puc;
1229 unsigned long pc;
1230
1231 pc = uc->uc_mcontext.gregs[REG_RIP];
5fafdf24
TS
1232 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1233 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
bc51c5c9
FB
1234 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1235 &uc->uc_sigmask, puc);
1236}
1237
83fb7adf 1238#elif defined(__powerpc__)
2b413144 1239
83fb7adf
FB
1240/***********************************************************************
1241 * signal context platform-specific definitions
1242 * From Wine
1243 */
1244#ifdef linux
1245/* All Registers access - only for local access */
1246# define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1247/* Gpr Registers access */
1248# define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1249# define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1250# define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1251# define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1252# define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1253# define LR_sig(context) REG_sig(link, context) /* Link register */
1254# define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1255/* Float Registers access */
1256# define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1257# define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1258/* Exception Registers access */
1259# define DAR_sig(context) REG_sig(dar, context)
1260# define DSISR_sig(context) REG_sig(dsisr, context)
1261# define TRAP_sig(context) REG_sig(trap, context)
1262#endif /* linux */
1263
1264#ifdef __APPLE__
1265# include <sys/ucontext.h>
1266typedef struct ucontext SIGCONTEXT;
1267/* All Registers access - only for local access */
1268# define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1269# define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1270# define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1271# define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1272/* Gpr Registers access */
1273# define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1274# define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1275# define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1276# define CTR_sig(context) REG_sig(ctr, context)
1277# define XER_sig(context) REG_sig(xer, context) /* Link register */
1278# define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1279# define CR_sig(context) REG_sig(cr, context) /* Condition register */
1280/* Float Registers access */
1281# define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1282# define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1283/* Exception Registers access */
1284# define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1285# define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1286# define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1287#endif /* __APPLE__ */
1288
5fafdf24 1289int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1290 void *puc)
2b413144 1291{
5a7b542b 1292 siginfo_t *info = pinfo;
25eb4484 1293 struct ucontext *uc = puc;
25eb4484 1294 unsigned long pc;
25eb4484
FB
1295 int is_write;
1296
83fb7adf 1297 pc = IAR_sig(uc);
25eb4484
FB
1298 is_write = 0;
1299#if 0
1300 /* ppc 4xx case */
83fb7adf 1301 if (DSISR_sig(uc) & 0x00800000)
25eb4484
FB
1302 is_write = 1;
1303#else
83fb7adf 1304 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
25eb4484
FB
1305 is_write = 1;
1306#endif
5fafdf24 1307 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
bf3e8bf1 1308 is_write, &uc->uc_sigmask, puc);
2b413144
FB
1309}
1310
2f87c607
FB
1311#elif defined(__alpha__)
1312
5fafdf24 1313int cpu_signal_handler(int host_signum, void *pinfo,
2f87c607
FB
1314 void *puc)
1315{
5a7b542b 1316 siginfo_t *info = pinfo;
2f87c607
FB
1317 struct ucontext *uc = puc;
1318 uint32_t *pc = uc->uc_mcontext.sc_pc;
1319 uint32_t insn = *pc;
1320 int is_write = 0;
1321
8c6939c0 1322 /* XXX: need kernel patch to get write flag faster */
2f87c607
FB
1323 switch (insn >> 26) {
1324 case 0x0d: // stw
1325 case 0x0e: // stb
1326 case 0x0f: // stq_u
1327 case 0x24: // stf
1328 case 0x25: // stg
1329 case 0x26: // sts
1330 case 0x27: // stt
1331 case 0x2c: // stl
1332 case 0x2d: // stq
1333 case 0x2e: // stl_c
1334 case 0x2f: // stq_c
1335 is_write = 1;
1336 }
1337
5fafdf24 1338 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
bf3e8bf1 1339 is_write, &uc->uc_sigmask, puc);
2f87c607 1340}
8c6939c0
FB
1341#elif defined(__sparc__)
1342
5fafdf24 1343int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1344 void *puc)
8c6939c0 1345{
5a7b542b 1346 siginfo_t *info = pinfo;
8c6939c0
FB
1347 uint32_t *regs = (uint32_t *)(info + 1);
1348 void *sigmask = (regs + 20);
1349 unsigned long pc;
1350 int is_write;
1351 uint32_t insn;
3b46e624 1352
8c6939c0
FB
1353 /* XXX: is there a standard glibc define ? */
1354 pc = regs[1];
1355 /* XXX: need kernel patch to get write flag faster */
1356 is_write = 0;
1357 insn = *(uint32_t *)pc;
1358 if ((insn >> 30) == 3) {
1359 switch((insn >> 19) & 0x3f) {
1360 case 0x05: // stb
1361 case 0x06: // sth
1362 case 0x04: // st
1363 case 0x07: // std
1364 case 0x24: // stf
1365 case 0x27: // stdf
1366 case 0x25: // stfsr
1367 is_write = 1;
1368 break;
1369 }
1370 }
5fafdf24 1371 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
bf3e8bf1 1372 is_write, sigmask, NULL);
8c6939c0
FB
1373}
1374
1375#elif defined(__arm__)
1376
5fafdf24 1377int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1378 void *puc)
8c6939c0 1379{
5a7b542b 1380 siginfo_t *info = pinfo;
8c6939c0
FB
1381 struct ucontext *uc = puc;
1382 unsigned long pc;
1383 int is_write;
3b46e624 1384
8c6939c0
FB
1385 pc = uc->uc_mcontext.gregs[R15];
1386 /* XXX: compute is_write */
1387 is_write = 0;
5fafdf24 1388 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
8c6939c0 1389 is_write,
f3a9676a 1390 &uc->uc_sigmask, puc);
8c6939c0
FB
1391}
1392
38e584a0
FB
1393#elif defined(__mc68000)
1394
5fafdf24 1395int cpu_signal_handler(int host_signum, void *pinfo,
38e584a0
FB
1396 void *puc)
1397{
5a7b542b 1398 siginfo_t *info = pinfo;
38e584a0
FB
1399 struct ucontext *uc = puc;
1400 unsigned long pc;
1401 int is_write;
3b46e624 1402
38e584a0
FB
1403 pc = uc->uc_mcontext.gregs[16];
1404 /* XXX: compute is_write */
1405 is_write = 0;
5fafdf24 1406 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
38e584a0 1407 is_write,
bf3e8bf1 1408 &uc->uc_sigmask, puc);
38e584a0
FB
1409}
1410
b8076a74
FB
1411#elif defined(__ia64)
1412
1413#ifndef __ISR_VALID
1414 /* This ought to be in <bits/siginfo.h>... */
1415# define __ISR_VALID 1
b8076a74
FB
1416#endif
1417
5a7b542b 1418int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
b8076a74 1419{
5a7b542b 1420 siginfo_t *info = pinfo;
b8076a74
FB
1421 struct ucontext *uc = puc;
1422 unsigned long ip;
1423 int is_write = 0;
1424
1425 ip = uc->uc_mcontext.sc_ip;
1426 switch (host_signum) {
1427 case SIGILL:
1428 case SIGFPE:
1429 case SIGSEGV:
1430 case SIGBUS:
1431 case SIGTRAP:
fd4a43e4 1432 if (info->si_code && (info->si_segvflags & __ISR_VALID))
b8076a74
FB
1433 /* ISR.W (write-access) is bit 33: */
1434 is_write = (info->si_isr >> 33) & 1;
1435 break;
1436
1437 default:
1438 break;
1439 }
1440 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1441 is_write,
1442 &uc->uc_sigmask, puc);
1443}
1444
90cb9493
FB
1445#elif defined(__s390__)
1446
5fafdf24 1447int cpu_signal_handler(int host_signum, void *pinfo,
90cb9493
FB
1448 void *puc)
1449{
5a7b542b 1450 siginfo_t *info = pinfo;
90cb9493
FB
1451 struct ucontext *uc = puc;
1452 unsigned long pc;
1453 int is_write;
3b46e624 1454
90cb9493
FB
1455 pc = uc->uc_mcontext.psw.addr;
1456 /* XXX: compute is_write */
1457 is_write = 0;
5fafdf24 1458 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
c4b89d18
TS
1459 is_write, &uc->uc_sigmask, puc);
1460}
1461
1462#elif defined(__mips__)
1463
5fafdf24 1464int cpu_signal_handler(int host_signum, void *pinfo,
c4b89d18
TS
1465 void *puc)
1466{
9617efe8 1467 siginfo_t *info = pinfo;
c4b89d18
TS
1468 struct ucontext *uc = puc;
1469 greg_t pc = uc->uc_mcontext.pc;
1470 int is_write;
3b46e624 1471
c4b89d18
TS
1472 /* XXX: compute is_write */
1473 is_write = 0;
5fafdf24 1474 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
c4b89d18 1475 is_write, &uc->uc_sigmask, puc);
90cb9493
FB
1476}
1477
9de5e440 1478#else
2b413144 1479
3fb2ded1 1480#error host CPU specific signal handler needed
2b413144 1481
9de5e440 1482#endif
67b915a5
FB
1483
1484#endif /* !defined(CONFIG_SOFTMMU) */