]> git.proxmox.com Git - qemu.git/blame - cpu-exec.c
microblaze: Add syscall, signal and termbits defs for linux-user.
[qemu.git] / cpu-exec.c
CommitLineData
7d13299d
FB
1/*
2 * i386 emulator main execution loop
5fafdf24 3 *
66321a11 4 * Copyright (c) 2003-2005 Fabrice Bellard
7d13299d 5 *
3ef693a0
FB
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
7d13299d 10 *
3ef693a0
FB
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
7d13299d 15 *
3ef693a0
FB
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
fad6cb1a 18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
7d13299d 19 */
e4533c7a 20#include "config.h"
93ac68bc 21#include "exec.h"
956034d7 22#include "disas.h"
7cb69cae 23#include "tcg.h"
7ba1e619 24#include "kvm.h"
7d13299d 25
fbf9eeb3
FB
26#if !defined(CONFIG_SOFTMMU)
27#undef EAX
28#undef ECX
29#undef EDX
30#undef EBX
31#undef ESP
32#undef EBP
33#undef ESI
34#undef EDI
35#undef EIP
36#include <signal.h>
84778508 37#ifdef __linux__
fbf9eeb3
FB
38#include <sys/ucontext.h>
39#endif
84778508 40#endif
fbf9eeb3 41
572a9d4a
BS
42#if defined(__sparc__) && !defined(HOST_SOLARIS)
43// Work around ugly bugs in glibc that mangle global register contents
44#undef env
45#define env cpu_single_env
46#endif
47
36bdbe54
FB
48int tb_invalidated_flag;
49
dc99065b 50//#define DEBUG_EXEC
9de5e440 51//#define DEBUG_SIGNAL
7d13299d 52
6a4955a8
AL
53int qemu_cpu_has_work(CPUState *env)
54{
55 return cpu_has_work(env);
56}
57
e4533c7a
FB
58void cpu_loop_exit(void)
59{
bfed01fc
TS
60 /* NOTE: the register at this point must be saved by hand because
61 longjmp restore them */
62 regs_to_env();
e4533c7a
FB
63 longjmp(env->jmp_env, 1);
64}
bfed01fc 65
fbf9eeb3
FB
66/* exit the current TB from a signal handler. The host registers are
67 restored in a state compatible with the CPU emulator
68 */
5fafdf24 69void cpu_resume_from_signal(CPUState *env1, void *puc)
fbf9eeb3
FB
70{
71#if !defined(CONFIG_SOFTMMU)
84778508 72#ifdef __linux__
fbf9eeb3 73 struct ucontext *uc = puc;
84778508
BS
74#elif defined(__OpenBSD__)
75 struct sigcontext *uc = puc;
76#endif
fbf9eeb3
FB
77#endif
78
79 env = env1;
80
81 /* XXX: restore cpu registers saved in host registers */
82
83#if !defined(CONFIG_SOFTMMU)
84 if (puc) {
85 /* XXX: use siglongjmp ? */
84778508 86#ifdef __linux__
fbf9eeb3 87 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
84778508
BS
88#elif defined(__OpenBSD__)
89 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
90#endif
fbf9eeb3
FB
91 }
92#endif
9a3ea654 93 env->exception_index = -1;
fbf9eeb3
FB
94 longjmp(env->jmp_env, 1);
95}
96
2e70f6ef
PB
97/* Execute the code without caching the generated code. An interpreter
98 could be used if available. */
99static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
100{
101 unsigned long next_tb;
102 TranslationBlock *tb;
103
104 /* Should never happen.
105 We only end up here when an existing TB is too long. */
106 if (max_cycles > CF_COUNT_MASK)
107 max_cycles = CF_COUNT_MASK;
108
109 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
110 max_cycles);
111 env->current_tb = tb;
112 /* execute the generated code */
113 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
114
115 if ((next_tb & 3) == 2) {
116 /* Restore PC. This may happen if async event occurs before
117 the TB starts executing. */
622ed360 118 cpu_pc_from_tb(env, tb);
2e70f6ef
PB
119 }
120 tb_phys_invalidate(tb, -1);
121 tb_free(tb);
122}
123
8a40a180
FB
124static TranslationBlock *tb_find_slow(target_ulong pc,
125 target_ulong cs_base,
c068688b 126 uint64_t flags)
8a40a180
FB
127{
128 TranslationBlock *tb, **ptb1;
8a40a180
FB
129 unsigned int h;
130 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
3b46e624 131
8a40a180 132 tb_invalidated_flag = 0;
3b46e624 133
8a40a180 134 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
3b46e624 135
8a40a180
FB
136 /* find translated block using physical mappings */
137 phys_pc = get_phys_addr_code(env, pc);
138 phys_page1 = phys_pc & TARGET_PAGE_MASK;
139 phys_page2 = -1;
140 h = tb_phys_hash_func(phys_pc);
141 ptb1 = &tb_phys_hash[h];
142 for(;;) {
143 tb = *ptb1;
144 if (!tb)
145 goto not_found;
5fafdf24 146 if (tb->pc == pc &&
8a40a180 147 tb->page_addr[0] == phys_page1 &&
5fafdf24 148 tb->cs_base == cs_base &&
8a40a180
FB
149 tb->flags == flags) {
150 /* check next page if needed */
151 if (tb->page_addr[1] != -1) {
5fafdf24 152 virt_page2 = (pc & TARGET_PAGE_MASK) +
8a40a180
FB
153 TARGET_PAGE_SIZE;
154 phys_page2 = get_phys_addr_code(env, virt_page2);
155 if (tb->page_addr[1] == phys_page2)
156 goto found;
157 } else {
158 goto found;
159 }
160 }
161 ptb1 = &tb->phys_hash_next;
162 }
163 not_found:
2e70f6ef
PB
164 /* if no translated code available, then translate it now */
165 tb = tb_gen_code(env, pc, cs_base, flags, 0);
3b46e624 166
8a40a180 167 found:
8a40a180
FB
168 /* we add the TB in the virtual pc hash table */
169 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
8a40a180
FB
170 return tb;
171}
172
173static inline TranslationBlock *tb_find_fast(void)
174{
175 TranslationBlock *tb;
176 target_ulong cs_base, pc;
6b917547 177 int flags;
8a40a180
FB
178
179 /* we record a subset of the CPU state. It will
180 always be the same before a given translated block
181 is executed. */
6b917547 182 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
bce61846 183 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
551bd27f
TS
184 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
185 tb->flags != flags)) {
8a40a180
FB
186 tb = tb_find_slow(pc, cs_base, flags);
187 }
188 return tb;
189}
190
dde2367e
AL
191static CPUDebugExcpHandler *debug_excp_handler;
192
193CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
194{
195 CPUDebugExcpHandler *old_handler = debug_excp_handler;
196
197 debug_excp_handler = handler;
198 return old_handler;
199}
200
6e140f28
AL
201static void cpu_handle_debug_exception(CPUState *env)
202{
203 CPUWatchpoint *wp;
204
205 if (!env->watchpoint_hit)
c0ce998e 206 TAILQ_FOREACH(wp, &env->watchpoints, entry)
6e140f28 207 wp->flags &= ~BP_WATCHPOINT_HIT;
dde2367e
AL
208
209 if (debug_excp_handler)
210 debug_excp_handler(env);
6e140f28
AL
211}
212
7d13299d
FB
213/* main execution loop */
214
e4533c7a 215int cpu_exec(CPUState *env1)
7d13299d 216{
1057eaa7
PB
217#define DECLARE_HOST_REGS 1
218#include "hostregs_helper.h"
8a40a180 219 int ret, interrupt_request;
8a40a180 220 TranslationBlock *tb;
c27004ec 221 uint8_t *tc_ptr;
d5975363 222 unsigned long next_tb;
8c6939c0 223
bfed01fc
TS
224 if (cpu_halted(env1) == EXCP_HALTED)
225 return EXCP_HALTED;
5a1e3cfc 226
5fafdf24 227 cpu_single_env = env1;
6a00d601 228
7d13299d 229 /* first we save global registers */
1057eaa7
PB
230#define SAVE_HOST_REGS 1
231#include "hostregs_helper.h"
c27004ec 232 env = env1;
e4533c7a 233
0d1a29f9 234 env_to_regs();
ecb644f4 235#if defined(TARGET_I386)
9de5e440 236 /* put eflags in CPU temporary format */
fc2b4c48
FB
237 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
238 DF = 1 - (2 * ((env->eflags >> 10) & 1));
9de5e440 239 CC_OP = CC_OP_EFLAGS;
fc2b4c48 240 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
93ac68bc 241#elif defined(TARGET_SPARC)
e6e5906b
PB
242#elif defined(TARGET_M68K)
243 env->cc_op = CC_OP_FLAGS;
244 env->cc_dest = env->sr & 0xf;
245 env->cc_x = (env->sr >> 4) & 1;
ecb644f4
TS
246#elif defined(TARGET_ALPHA)
247#elif defined(TARGET_ARM)
248#elif defined(TARGET_PPC)
6af0bf9c 249#elif defined(TARGET_MIPS)
fdf9b3e8 250#elif defined(TARGET_SH4)
f1ccf904 251#elif defined(TARGET_CRIS)
fdf9b3e8 252 /* XXXXX */
e4533c7a
FB
253#else
254#error unsupported target CPU
255#endif
3fb2ded1 256 env->exception_index = -1;
9d27abd9 257
7d13299d 258 /* prepare setjmp context for exception handling */
3fb2ded1
FB
259 for(;;) {
260 if (setjmp(env->jmp_env) == 0) {
9ddff3d2
BS
261#if defined(__sparc__) && !defined(HOST_SOLARIS)
262#undef env
263 env = cpu_single_env;
264#define env cpu_single_env
265#endif
ee8b7021 266 env->current_tb = NULL;
3fb2ded1
FB
267 /* if an exception is pending, we execute it here */
268 if (env->exception_index >= 0) {
269 if (env->exception_index >= EXCP_INTERRUPT) {
270 /* exit request from the cpu execution loop */
271 ret = env->exception_index;
6e140f28
AL
272 if (ret == EXCP_DEBUG)
273 cpu_handle_debug_exception(env);
3fb2ded1 274 break;
72d239ed
AJ
275 } else {
276#if defined(CONFIG_USER_ONLY)
3fb2ded1 277 /* if user mode only, we simulate a fake exception
9f083493 278 which will be handled outside the cpu execution
3fb2ded1 279 loop */
83479e77 280#if defined(TARGET_I386)
5fafdf24
TS
281 do_interrupt_user(env->exception_index,
282 env->exception_is_int,
283 env->error_code,
3fb2ded1 284 env->exception_next_eip);
eba01623
FB
285 /* successfully delivered */
286 env->old_exception = -1;
83479e77 287#endif
3fb2ded1
FB
288 ret = env->exception_index;
289 break;
72d239ed 290#else
83479e77 291#if defined(TARGET_I386)
3fb2ded1
FB
292 /* simulate a real cpu exception. On i386, it can
293 trigger new exceptions, but we do not handle
294 double or triple faults yet. */
5fafdf24
TS
295 do_interrupt(env->exception_index,
296 env->exception_is_int,
297 env->error_code,
d05e66d2 298 env->exception_next_eip, 0);
678dde13
TS
299 /* successfully delivered */
300 env->old_exception = -1;
ce09776b
FB
301#elif defined(TARGET_PPC)
302 do_interrupt(env);
6af0bf9c
FB
303#elif defined(TARGET_MIPS)
304 do_interrupt(env);
e95c8d51 305#elif defined(TARGET_SPARC)
f2bc7e7f 306 do_interrupt(env);
b5ff1b31
FB
307#elif defined(TARGET_ARM)
308 do_interrupt(env);
fdf9b3e8
FB
309#elif defined(TARGET_SH4)
310 do_interrupt(env);
eddf68a6
JM
311#elif defined(TARGET_ALPHA)
312 do_interrupt(env);
f1ccf904
TS
313#elif defined(TARGET_CRIS)
314 do_interrupt(env);
0633879f
PB
315#elif defined(TARGET_M68K)
316 do_interrupt(0);
72d239ed 317#endif
83479e77 318#endif
3fb2ded1
FB
319 }
320 env->exception_index = -1;
5fafdf24 321 }
640f42e4 322#ifdef CONFIG_KQEMU
be214e6c 323 if (kqemu_is_ok(env) && env->interrupt_request == 0 && env->exit_request == 0) {
9df217a3 324 int ret;
a7812ae4 325 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
9df217a3
FB
326 ret = kqemu_cpu_exec(env);
327 /* put eflags in CPU temporary format */
328 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
329 DF = 1 - (2 * ((env->eflags >> 10) & 1));
330 CC_OP = CC_OP_EFLAGS;
331 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
332 if (ret == 1) {
333 /* exception */
334 longjmp(env->jmp_env, 1);
335 } else if (ret == 2) {
336 /* softmmu execution needed */
337 } else {
be214e6c 338 if (env->interrupt_request != 0 || env->exit_request != 0) {
9df217a3
FB
339 /* hardware interrupt will be executed just after */
340 } else {
341 /* otherwise, we restart */
342 longjmp(env->jmp_env, 1);
343 }
344 }
3fb2ded1 345 }
9df217a3
FB
346#endif
347
7ba1e619 348 if (kvm_enabled()) {
becfc390
AL
349 kvm_cpu_exec(env);
350 longjmp(env->jmp_env, 1);
7ba1e619
AL
351 }
352
b5fc09ae 353 next_tb = 0; /* force lookup of first TB */
3fb2ded1 354 for(;;) {
68a79315 355 interrupt_request = env->interrupt_request;
e1638bd8 356 if (unlikely(interrupt_request)) {
357 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
358 /* Mask out external interrupts for this step. */
359 interrupt_request &= ~(CPU_INTERRUPT_HARD |
360 CPU_INTERRUPT_FIQ |
361 CPU_INTERRUPT_SMI |
362 CPU_INTERRUPT_NMI);
363 }
6658ffb8
PB
364 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
365 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
366 env->exception_index = EXCP_DEBUG;
367 cpu_loop_exit();
368 }
a90b7318 369#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
f1ccf904 370 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
a90b7318
AZ
371 if (interrupt_request & CPU_INTERRUPT_HALT) {
372 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
373 env->halted = 1;
374 env->exception_index = EXCP_HLT;
375 cpu_loop_exit();
376 }
377#endif
68a79315 378#if defined(TARGET_I386)
db620f46
FB
379 if (env->hflags2 & HF2_GIF_MASK) {
380 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
381 !(env->hflags & HF_SMM_MASK)) {
382 svm_check_intercept(SVM_EXIT_SMI);
383 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
384 do_smm_enter();
385 next_tb = 0;
386 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
387 !(env->hflags2 & HF2_NMI_MASK)) {
388 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
389 env->hflags2 |= HF2_NMI_MASK;
390 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
391 next_tb = 0;
392 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
393 (((env->hflags2 & HF2_VINTR_MASK) &&
394 (env->hflags2 & HF2_HIF_MASK)) ||
395 (!(env->hflags2 & HF2_VINTR_MASK) &&
396 (env->eflags & IF_MASK &&
397 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
398 int intno;
399 svm_check_intercept(SVM_EXIT_INTR);
400 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
401 intno = cpu_get_pic_interrupt(env);
93fcfe39 402 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
9ddff3d2
BS
403#if defined(__sparc__) && !defined(HOST_SOLARIS)
404#undef env
405 env = cpu_single_env;
406#define env cpu_single_env
407#endif
db620f46
FB
408 do_interrupt(intno, 0, 0, 0, 1);
409 /* ensure that no TB jump will be modified as
410 the program flow was changed */
411 next_tb = 0;
0573fbfc 412#if !defined(CONFIG_USER_ONLY)
db620f46
FB
413 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
414 (env->eflags & IF_MASK) &&
415 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
416 int intno;
417 /* FIXME: this should respect TPR */
418 svm_check_intercept(SVM_EXIT_VINTR);
db620f46 419 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
93fcfe39 420 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
db620f46 421 do_interrupt(intno, 0, 0, 0, 1);
d40c54d6 422 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
db620f46 423 next_tb = 0;
907a5b26 424#endif
db620f46 425 }
68a79315 426 }
ce09776b 427#elif defined(TARGET_PPC)
9fddaa0c
FB
428#if 0
429 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
430 cpu_ppc_reset(env);
431 }
432#endif
47103572 433 if (interrupt_request & CPU_INTERRUPT_HARD) {
e9df014c
JM
434 ppc_hw_interrupt(env);
435 if (env->pending_interrupts == 0)
436 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
b5fc09ae 437 next_tb = 0;
ce09776b 438 }
6af0bf9c
FB
439#elif defined(TARGET_MIPS)
440 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
24c7b0e3 441 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
6af0bf9c 442 (env->CP0_Status & (1 << CP0St_IE)) &&
24c7b0e3
TS
443 !(env->CP0_Status & (1 << CP0St_EXL)) &&
444 !(env->CP0_Status & (1 << CP0St_ERL)) &&
6af0bf9c
FB
445 !(env->hflags & MIPS_HFLAG_DM)) {
446 /* Raise it */
447 env->exception_index = EXCP_EXT_INTERRUPT;
448 env->error_code = 0;
449 do_interrupt(env);
b5fc09ae 450 next_tb = 0;
6af0bf9c 451 }
e95c8d51 452#elif defined(TARGET_SPARC)
66321a11
FB
453 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
454 (env->psret != 0)) {
455 int pil = env->interrupt_index & 15;
456 int type = env->interrupt_index & 0xf0;
457
458 if (((type == TT_EXTINT) &&
459 (pil == 15 || pil > env->psrpil)) ||
460 type != TT_EXTINT) {
461 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
f2bc7e7f
BS
462 env->exception_index = env->interrupt_index;
463 do_interrupt(env);
66321a11 464 env->interrupt_index = 0;
327ac2e7
BS
465#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
466 cpu_check_irqs(env);
467#endif
b5fc09ae 468 next_tb = 0;
66321a11 469 }
e95c8d51
FB
470 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
471 //do_interrupt(0, 0, 0, 0, 0);
472 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
a90b7318 473 }
b5ff1b31
FB
474#elif defined(TARGET_ARM)
475 if (interrupt_request & CPU_INTERRUPT_FIQ
476 && !(env->uncached_cpsr & CPSR_F)) {
477 env->exception_index = EXCP_FIQ;
478 do_interrupt(env);
b5fc09ae 479 next_tb = 0;
b5ff1b31 480 }
9ee6e8bb
PB
481 /* ARMv7-M interrupt return works by loading a magic value
482 into the PC. On real hardware the load causes the
483 return to occur. The qemu implementation performs the
484 jump normally, then does the exception return when the
485 CPU tries to execute code at the magic address.
486 This will cause the magic PC value to be pushed to
487 the stack if an interrupt occured at the wrong time.
488 We avoid this by disabling interrupts when
489 pc contains a magic address. */
b5ff1b31 490 if (interrupt_request & CPU_INTERRUPT_HARD
9ee6e8bb
PB
491 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
492 || !(env->uncached_cpsr & CPSR_I))) {
b5ff1b31
FB
493 env->exception_index = EXCP_IRQ;
494 do_interrupt(env);
b5fc09ae 495 next_tb = 0;
b5ff1b31 496 }
fdf9b3e8 497#elif defined(TARGET_SH4)
e96e2044
TS
498 if (interrupt_request & CPU_INTERRUPT_HARD) {
499 do_interrupt(env);
b5fc09ae 500 next_tb = 0;
e96e2044 501 }
eddf68a6
JM
502#elif defined(TARGET_ALPHA)
503 if (interrupt_request & CPU_INTERRUPT_HARD) {
504 do_interrupt(env);
b5fc09ae 505 next_tb = 0;
eddf68a6 506 }
f1ccf904 507#elif defined(TARGET_CRIS)
1b1a38b0
EI
508 if (interrupt_request & CPU_INTERRUPT_HARD
509 && (env->pregs[PR_CCS] & I_FLAG)) {
510 env->exception_index = EXCP_IRQ;
511 do_interrupt(env);
512 next_tb = 0;
513 }
514 if (interrupt_request & CPU_INTERRUPT_NMI
515 && (env->pregs[PR_CCS] & M_FLAG)) {
516 env->exception_index = EXCP_NMI;
f1ccf904 517 do_interrupt(env);
b5fc09ae 518 next_tb = 0;
f1ccf904 519 }
0633879f
PB
520#elif defined(TARGET_M68K)
521 if (interrupt_request & CPU_INTERRUPT_HARD
522 && ((env->sr & SR_I) >> SR_I_SHIFT)
523 < env->pending_level) {
524 /* Real hardware gets the interrupt vector via an
525 IACK cycle at this point. Current emulated
526 hardware doesn't rely on this, so we
527 provide/save the vector when the interrupt is
528 first signalled. */
529 env->exception_index = env->pending_vector;
530 do_interrupt(1);
b5fc09ae 531 next_tb = 0;
0633879f 532 }
68a79315 533#endif
9d05095e
FB
534 /* Don't use the cached interupt_request value,
535 do_interrupt may have updated the EXITTB flag. */
b5ff1b31 536 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
bf3e8bf1
FB
537 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
538 /* ensure that no TB jump will be modified as
539 the program flow was changed */
b5fc09ae 540 next_tb = 0;
bf3e8bf1 541 }
be214e6c
AJ
542 }
543 if (unlikely(env->exit_request)) {
544 env->exit_request = 0;
545 env->exception_index = EXCP_INTERRUPT;
546 cpu_loop_exit();
3fb2ded1 547 }
7d13299d 548#ifdef DEBUG_EXEC
8fec2b8c 549 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
3fb2ded1 550 /* restore flags in standard format */
ecb644f4
TS
551 regs_to_env();
552#if defined(TARGET_I386)
a7812ae4 553 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
93fcfe39 554 log_cpu_state(env, X86_DUMP_CCOP);
3fb2ded1 555 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
e4533c7a 556#elif defined(TARGET_ARM)
93fcfe39 557 log_cpu_state(env, 0);
93ac68bc 558#elif defined(TARGET_SPARC)
93fcfe39 559 log_cpu_state(env, 0);
67867308 560#elif defined(TARGET_PPC)
93fcfe39 561 log_cpu_state(env, 0);
e6e5906b
PB
562#elif defined(TARGET_M68K)
563 cpu_m68k_flush_flags(env, env->cc_op);
564 env->cc_op = CC_OP_FLAGS;
565 env->sr = (env->sr & 0xffe0)
566 | env->cc_dest | (env->cc_x << 4);
93fcfe39 567 log_cpu_state(env, 0);
6af0bf9c 568#elif defined(TARGET_MIPS)
93fcfe39 569 log_cpu_state(env, 0);
fdf9b3e8 570#elif defined(TARGET_SH4)
93fcfe39 571 log_cpu_state(env, 0);
eddf68a6 572#elif defined(TARGET_ALPHA)
93fcfe39 573 log_cpu_state(env, 0);
f1ccf904 574#elif defined(TARGET_CRIS)
93fcfe39 575 log_cpu_state(env, 0);
e4533c7a 576#else
5fafdf24 577#error unsupported target CPU
e4533c7a 578#endif
3fb2ded1 579 }
7d13299d 580#endif
d5975363 581 spin_lock(&tb_lock);
8a40a180 582 tb = tb_find_fast();
d5975363
PB
583 /* Note: we do it here to avoid a gcc bug on Mac OS X when
584 doing it in tb_find_slow */
585 if (tb_invalidated_flag) {
586 /* as some TB could have been invalidated because
587 of memory exceptions while generating the code, we
588 must recompute the hash index here */
589 next_tb = 0;
2e70f6ef 590 tb_invalidated_flag = 0;
d5975363 591 }
9d27abd9 592#ifdef DEBUG_EXEC
93fcfe39
AL
593 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
594 (long)tb->tc_ptr, tb->pc,
595 lookup_symbol(tb->pc));
9d27abd9 596#endif
8a40a180
FB
597 /* see if we can patch the calling TB. When the TB
598 spans two pages, we cannot safely do a direct
599 jump. */
c27004ec 600 {
b5fc09ae 601 if (next_tb != 0 &&
640f42e4 602#ifdef CONFIG_KQEMU
f32fc648
FB
603 (env->kqemu_enabled != 2) &&
604#endif
ec6338ba 605 tb->page_addr[1] == -1) {
b5fc09ae 606 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
3fb2ded1 607 }
c27004ec 608 }
d5975363 609 spin_unlock(&tb_lock);
83479e77 610 env->current_tb = tb;
55e8b85e 611
612 /* cpu_interrupt might be called while translating the
613 TB, but before it is linked into a potentially
614 infinite loop and becomes env->current_tb. Avoid
615 starting execution if there is a pending interrupt. */
be214e6c 616 if (unlikely (env->exit_request))
55e8b85e 617 env->current_tb = NULL;
618
2e70f6ef
PB
619 while (env->current_tb) {
620 tc_ptr = tb->tc_ptr;
3fb2ded1 621 /* execute the generated code */
572a9d4a
BS
622#if defined(__sparc__) && !defined(HOST_SOLARIS)
623#undef env
2e70f6ef 624 env = cpu_single_env;
572a9d4a
BS
625#define env cpu_single_env
626#endif
2e70f6ef
PB
627 next_tb = tcg_qemu_tb_exec(tc_ptr);
628 env->current_tb = NULL;
629 if ((next_tb & 3) == 2) {
bf20dc07 630 /* Instruction counter expired. */
2e70f6ef
PB
631 int insns_left;
632 tb = (TranslationBlock *)(long)(next_tb & ~3);
633 /* Restore PC. */
622ed360 634 cpu_pc_from_tb(env, tb);
2e70f6ef
PB
635 insns_left = env->icount_decr.u32;
636 if (env->icount_extra && insns_left >= 0) {
637 /* Refill decrementer and continue execution. */
638 env->icount_extra += insns_left;
639 if (env->icount_extra > 0xffff) {
640 insns_left = 0xffff;
641 } else {
642 insns_left = env->icount_extra;
643 }
644 env->icount_extra -= insns_left;
645 env->icount_decr.u16.low = insns_left;
646 } else {
647 if (insns_left > 0) {
648 /* Execute remaining instructions. */
649 cpu_exec_nocache(insns_left, tb);
650 }
651 env->exception_index = EXCP_INTERRUPT;
652 next_tb = 0;
653 cpu_loop_exit();
654 }
655 }
656 }
4cbf74b6
FB
657 /* reset soft MMU for next block (it can currently
658 only be set by a memory fault) */
640f42e4 659#if defined(CONFIG_KQEMU)
f32fc648
FB
660#define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
661 if (kqemu_is_ok(env) &&
662 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
663 cpu_loop_exit();
664 }
4cbf74b6 665#endif
50a518e3 666 } /* for(;;) */
3fb2ded1 667 } else {
0d1a29f9 668 env_to_regs();
7d13299d 669 }
3fb2ded1
FB
670 } /* for(;;) */
671
7d13299d 672
e4533c7a 673#if defined(TARGET_I386)
9de5e440 674 /* restore flags in standard format */
a7812ae4 675 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
e4533c7a 676#elif defined(TARGET_ARM)
b7bcbe95 677 /* XXX: Save/restore host fpu exception state?. */
93ac68bc 678#elif defined(TARGET_SPARC)
67867308 679#elif defined(TARGET_PPC)
e6e5906b
PB
680#elif defined(TARGET_M68K)
681 cpu_m68k_flush_flags(env, env->cc_op);
682 env->cc_op = CC_OP_FLAGS;
683 env->sr = (env->sr & 0xffe0)
684 | env->cc_dest | (env->cc_x << 4);
6af0bf9c 685#elif defined(TARGET_MIPS)
fdf9b3e8 686#elif defined(TARGET_SH4)
eddf68a6 687#elif defined(TARGET_ALPHA)
f1ccf904 688#elif defined(TARGET_CRIS)
fdf9b3e8 689 /* XXXXX */
e4533c7a
FB
690#else
691#error unsupported target CPU
692#endif
1057eaa7
PB
693
694 /* restore global registers */
1057eaa7
PB
695#include "hostregs_helper.h"
696
6a00d601 697 /* fail safe : never use cpu_single_env outside cpu_exec() */
5fafdf24 698 cpu_single_env = NULL;
7d13299d
FB
699 return ret;
700}
6dbad63e 701
fbf9eeb3
FB
702/* must only be called from the generated code as an exception can be
703 generated */
704void tb_invalidate_page_range(target_ulong start, target_ulong end)
705{
dc5d0b3d
FB
706 /* XXX: cannot enable it yet because it yields to MMU exception
707 where NIP != read address on PowerPC */
708#if 0
fbf9eeb3
FB
709 target_ulong phys_addr;
710 phys_addr = get_phys_addr_code(env, start);
711 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
dc5d0b3d 712#endif
fbf9eeb3
FB
713}
714
1a18c71b 715#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
e4533c7a 716
6dbad63e
FB
717void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
718{
719 CPUX86State *saved_env;
720
721 saved_env = env;
722 env = s;
a412ac57 723 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
a513fe19 724 selector &= 0xffff;
5fafdf24 725 cpu_x86_load_seg_cache(env, seg_reg, selector,
c27004ec 726 (selector << 4), 0xffff, 0);
a513fe19 727 } else {
5d97559d 728 helper_load_seg(seg_reg, selector);
a513fe19 729 }
6dbad63e
FB
730 env = saved_env;
731}
9de5e440 732
6f12a2a6 733void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
d0a1ffc9
FB
734{
735 CPUX86State *saved_env;
736
737 saved_env = env;
738 env = s;
3b46e624 739
6f12a2a6 740 helper_fsave(ptr, data32);
d0a1ffc9
FB
741
742 env = saved_env;
743}
744
6f12a2a6 745void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
d0a1ffc9
FB
746{
747 CPUX86State *saved_env;
748
749 saved_env = env;
750 env = s;
3b46e624 751
6f12a2a6 752 helper_frstor(ptr, data32);
d0a1ffc9
FB
753
754 env = saved_env;
755}
756
e4533c7a
FB
757#endif /* TARGET_I386 */
758
67b915a5
FB
759#if !defined(CONFIG_SOFTMMU)
760
3fb2ded1
FB
761#if defined(TARGET_I386)
762
b56dad1c 763/* 'pc' is the host PC at which the exception was raised. 'address' is
fd6ce8f6
FB
764 the effective address of the memory exception. 'is_write' is 1 if a
765 write caused the exception and otherwise 0'. 'old_set' is the
766 signal set which should be restored */
2b413144 767static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
5fafdf24 768 int is_write, sigset_t *old_set,
bf3e8bf1 769 void *puc)
9de5e440 770{
a513fe19
FB
771 TranslationBlock *tb;
772 int ret;
68a79315 773
83479e77
FB
774 if (cpu_single_env)
775 env = cpu_single_env; /* XXX: find a correct solution for multithread */
fd6ce8f6 776#if defined(DEBUG_SIGNAL)
5fafdf24 777 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
bf3e8bf1 778 pc, address, is_write, *(unsigned long *)old_set);
9de5e440 779#endif
25eb4484 780 /* XXX: locking issue */
53a5960a 781 if (is_write && page_unprotect(h2g(address), pc, puc)) {
fd6ce8f6
FB
782 return 1;
783 }
fbf9eeb3 784
3fb2ded1 785 /* see if it is an MMU fault */
6ebbf390 786 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
3fb2ded1
FB
787 if (ret < 0)
788 return 0; /* not an MMU fault */
789 if (ret == 0)
790 return 1; /* the MMU fault was handled without causing real CPU fault */
791 /* now we have a real cpu fault */
a513fe19
FB
792 tb = tb_find_pc(pc);
793 if (tb) {
9de5e440
FB
794 /* the PC is inside the translated code. It means that we have
795 a virtual CPU fault */
bf3e8bf1 796 cpu_restore_state(tb, env, pc, puc);
3fb2ded1 797 }
4cbf74b6 798 if (ret == 1) {
3fb2ded1 799#if 0
5fafdf24 800 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
4cbf74b6 801 env->eip, env->cr[2], env->error_code);
3fb2ded1 802#endif
4cbf74b6
FB
803 /* we restore the process signal mask as the sigreturn should
804 do it (XXX: use sigsetjmp) */
805 sigprocmask(SIG_SETMASK, old_set, NULL);
54ca9095 806 raise_exception_err(env->exception_index, env->error_code);
4cbf74b6
FB
807 } else {
808 /* activate soft MMU for this block */
3f337316 809 env->hflags |= HF_SOFTMMU_MASK;
fbf9eeb3 810 cpu_resume_from_signal(env, puc);
4cbf74b6 811 }
3fb2ded1
FB
812 /* never comes here */
813 return 1;
814}
815
e4533c7a 816#elif defined(TARGET_ARM)
3fb2ded1 817static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
bf3e8bf1
FB
818 int is_write, sigset_t *old_set,
819 void *puc)
3fb2ded1 820{
68016c62
FB
821 TranslationBlock *tb;
822 int ret;
823
824 if (cpu_single_env)
825 env = cpu_single_env; /* XXX: find a correct solution for multithread */
826#if defined(DEBUG_SIGNAL)
5fafdf24 827 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
68016c62
FB
828 pc, address, is_write, *(unsigned long *)old_set);
829#endif
9f0777ed 830 /* XXX: locking issue */
53a5960a 831 if (is_write && page_unprotect(h2g(address), pc, puc)) {
9f0777ed
FB
832 return 1;
833 }
68016c62 834 /* see if it is an MMU fault */
6ebbf390 835 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
68016c62
FB
836 if (ret < 0)
837 return 0; /* not an MMU fault */
838 if (ret == 0)
839 return 1; /* the MMU fault was handled without causing real CPU fault */
840 /* now we have a real cpu fault */
841 tb = tb_find_pc(pc);
842 if (tb) {
843 /* the PC is inside the translated code. It means that we have
844 a virtual CPU fault */
845 cpu_restore_state(tb, env, pc, puc);
846 }
847 /* we restore the process signal mask as the sigreturn should
848 do it (XXX: use sigsetjmp) */
849 sigprocmask(SIG_SETMASK, old_set, NULL);
850 cpu_loop_exit();
968c74da
AJ
851 /* never comes here */
852 return 1;
3fb2ded1 853}
93ac68bc
FB
854#elif defined(TARGET_SPARC)
855static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
bf3e8bf1
FB
856 int is_write, sigset_t *old_set,
857 void *puc)
93ac68bc 858{
68016c62
FB
859 TranslationBlock *tb;
860 int ret;
861
862 if (cpu_single_env)
863 env = cpu_single_env; /* XXX: find a correct solution for multithread */
864#if defined(DEBUG_SIGNAL)
5fafdf24 865 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
68016c62
FB
866 pc, address, is_write, *(unsigned long *)old_set);
867#endif
b453b70b 868 /* XXX: locking issue */
53a5960a 869 if (is_write && page_unprotect(h2g(address), pc, puc)) {
b453b70b
FB
870 return 1;
871 }
68016c62 872 /* see if it is an MMU fault */
6ebbf390 873 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
68016c62
FB
874 if (ret < 0)
875 return 0; /* not an MMU fault */
876 if (ret == 0)
877 return 1; /* the MMU fault was handled without causing real CPU fault */
878 /* now we have a real cpu fault */
879 tb = tb_find_pc(pc);
880 if (tb) {
881 /* the PC is inside the translated code. It means that we have
882 a virtual CPU fault */
883 cpu_restore_state(tb, env, pc, puc);
884 }
885 /* we restore the process signal mask as the sigreturn should
886 do it (XXX: use sigsetjmp) */
887 sigprocmask(SIG_SETMASK, old_set, NULL);
888 cpu_loop_exit();
968c74da
AJ
889 /* never comes here */
890 return 1;
93ac68bc 891}
67867308
FB
892#elif defined (TARGET_PPC)
893static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
bf3e8bf1
FB
894 int is_write, sigset_t *old_set,
895 void *puc)
67867308
FB
896{
897 TranslationBlock *tb;
ce09776b 898 int ret;
3b46e624 899
67867308
FB
900 if (cpu_single_env)
901 env = cpu_single_env; /* XXX: find a correct solution for multithread */
67867308 902#if defined(DEBUG_SIGNAL)
5fafdf24 903 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
67867308
FB
904 pc, address, is_write, *(unsigned long *)old_set);
905#endif
906 /* XXX: locking issue */
53a5960a 907 if (is_write && page_unprotect(h2g(address), pc, puc)) {
67867308
FB
908 return 1;
909 }
910
ce09776b 911 /* see if it is an MMU fault */
6ebbf390 912 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
ce09776b
FB
913 if (ret < 0)
914 return 0; /* not an MMU fault */
915 if (ret == 0)
916 return 1; /* the MMU fault was handled without causing real CPU fault */
917
67867308
FB
918 /* now we have a real cpu fault */
919 tb = tb_find_pc(pc);
920 if (tb) {
921 /* the PC is inside the translated code. It means that we have
922 a virtual CPU fault */
bf3e8bf1 923 cpu_restore_state(tb, env, pc, puc);
67867308 924 }
ce09776b 925 if (ret == 1) {
67867308 926#if 0
5fafdf24 927 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
ce09776b 928 env->nip, env->error_code, tb);
67867308
FB
929#endif
930 /* we restore the process signal mask as the sigreturn should
931 do it (XXX: use sigsetjmp) */
bf3e8bf1 932 sigprocmask(SIG_SETMASK, old_set, NULL);
e06fcd75 933 cpu_loop_exit();
ce09776b
FB
934 } else {
935 /* activate soft MMU for this block */
fbf9eeb3 936 cpu_resume_from_signal(env, puc);
ce09776b 937 }
67867308 938 /* never comes here */
e6e5906b
PB
939 return 1;
940}
941
942#elif defined(TARGET_M68K)
943static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
944 int is_write, sigset_t *old_set,
945 void *puc)
946{
947 TranslationBlock *tb;
948 int ret;
949
950 if (cpu_single_env)
951 env = cpu_single_env; /* XXX: find a correct solution for multithread */
952#if defined(DEBUG_SIGNAL)
5fafdf24 953 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
e6e5906b
PB
954 pc, address, is_write, *(unsigned long *)old_set);
955#endif
956 /* XXX: locking issue */
957 if (is_write && page_unprotect(address, pc, puc)) {
958 return 1;
959 }
960 /* see if it is an MMU fault */
6ebbf390 961 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
e6e5906b
PB
962 if (ret < 0)
963 return 0; /* not an MMU fault */
964 if (ret == 0)
965 return 1; /* the MMU fault was handled without causing real CPU fault */
966 /* now we have a real cpu fault */
967 tb = tb_find_pc(pc);
968 if (tb) {
969 /* the PC is inside the translated code. It means that we have
970 a virtual CPU fault */
971 cpu_restore_state(tb, env, pc, puc);
972 }
973 /* we restore the process signal mask as the sigreturn should
974 do it (XXX: use sigsetjmp) */
975 sigprocmask(SIG_SETMASK, old_set, NULL);
976 cpu_loop_exit();
977 /* never comes here */
67867308
FB
978 return 1;
979}
6af0bf9c
FB
980
981#elif defined (TARGET_MIPS)
982static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
983 int is_write, sigset_t *old_set,
984 void *puc)
985{
986 TranslationBlock *tb;
987 int ret;
3b46e624 988
6af0bf9c
FB
989 if (cpu_single_env)
990 env = cpu_single_env; /* XXX: find a correct solution for multithread */
991#if defined(DEBUG_SIGNAL)
5fafdf24 992 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
6af0bf9c
FB
993 pc, address, is_write, *(unsigned long *)old_set);
994#endif
995 /* XXX: locking issue */
53a5960a 996 if (is_write && page_unprotect(h2g(address), pc, puc)) {
6af0bf9c
FB
997 return 1;
998 }
999
1000 /* see if it is an MMU fault */
6ebbf390 1001 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
6af0bf9c
FB
1002 if (ret < 0)
1003 return 0; /* not an MMU fault */
1004 if (ret == 0)
1005 return 1; /* the MMU fault was handled without causing real CPU fault */
1006
1007 /* now we have a real cpu fault */
1008 tb = tb_find_pc(pc);
1009 if (tb) {
1010 /* the PC is inside the translated code. It means that we have
1011 a virtual CPU fault */
1012 cpu_restore_state(tb, env, pc, puc);
1013 }
1014 if (ret == 1) {
1015#if 0
5fafdf24 1016 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1eb5207b 1017 env->PC, env->error_code, tb);
6af0bf9c
FB
1018#endif
1019 /* we restore the process signal mask as the sigreturn should
1020 do it (XXX: use sigsetjmp) */
1021 sigprocmask(SIG_SETMASK, old_set, NULL);
f9480ffc 1022 cpu_loop_exit();
6af0bf9c
FB
1023 } else {
1024 /* activate soft MMU for this block */
1025 cpu_resume_from_signal(env, puc);
1026 }
1027 /* never comes here */
1028 return 1;
1029}
1030
fdf9b3e8
FB
1031#elif defined (TARGET_SH4)
1032static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1033 int is_write, sigset_t *old_set,
1034 void *puc)
1035{
1036 TranslationBlock *tb;
1037 int ret;
3b46e624 1038
fdf9b3e8
FB
1039 if (cpu_single_env)
1040 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1041#if defined(DEBUG_SIGNAL)
5fafdf24 1042 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
fdf9b3e8
FB
1043 pc, address, is_write, *(unsigned long *)old_set);
1044#endif
1045 /* XXX: locking issue */
1046 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1047 return 1;
1048 }
1049
1050 /* see if it is an MMU fault */
6ebbf390 1051 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
fdf9b3e8
FB
1052 if (ret < 0)
1053 return 0; /* not an MMU fault */
1054 if (ret == 0)
1055 return 1; /* the MMU fault was handled without causing real CPU fault */
1056
1057 /* now we have a real cpu fault */
eddf68a6
JM
1058 tb = tb_find_pc(pc);
1059 if (tb) {
1060 /* the PC is inside the translated code. It means that we have
1061 a virtual CPU fault */
1062 cpu_restore_state(tb, env, pc, puc);
1063 }
1064#if 0
5fafdf24 1065 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
eddf68a6
JM
1066 env->nip, env->error_code, tb);
1067#endif
1068 /* we restore the process signal mask as the sigreturn should
1069 do it (XXX: use sigsetjmp) */
1070 sigprocmask(SIG_SETMASK, old_set, NULL);
1071 cpu_loop_exit();
1072 /* never comes here */
1073 return 1;
1074}
1075
1076#elif defined (TARGET_ALPHA)
1077static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1078 int is_write, sigset_t *old_set,
1079 void *puc)
1080{
1081 TranslationBlock *tb;
1082 int ret;
3b46e624 1083
eddf68a6
JM
1084 if (cpu_single_env)
1085 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1086#if defined(DEBUG_SIGNAL)
5fafdf24 1087 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
eddf68a6
JM
1088 pc, address, is_write, *(unsigned long *)old_set);
1089#endif
1090 /* XXX: locking issue */
1091 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1092 return 1;
1093 }
1094
1095 /* see if it is an MMU fault */
6ebbf390 1096 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
eddf68a6
JM
1097 if (ret < 0)
1098 return 0; /* not an MMU fault */
1099 if (ret == 0)
1100 return 1; /* the MMU fault was handled without causing real CPU fault */
1101
1102 /* now we have a real cpu fault */
fdf9b3e8
FB
1103 tb = tb_find_pc(pc);
1104 if (tb) {
1105 /* the PC is inside the translated code. It means that we have
1106 a virtual CPU fault */
1107 cpu_restore_state(tb, env, pc, puc);
1108 }
fdf9b3e8 1109#if 0
5fafdf24 1110 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
fdf9b3e8
FB
1111 env->nip, env->error_code, tb);
1112#endif
1113 /* we restore the process signal mask as the sigreturn should
1114 do it (XXX: use sigsetjmp) */
355fb23d
PB
1115 sigprocmask(SIG_SETMASK, old_set, NULL);
1116 cpu_loop_exit();
fdf9b3e8
FB
1117 /* never comes here */
1118 return 1;
1119}
f1ccf904
TS
1120#elif defined (TARGET_CRIS)
1121static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1122 int is_write, sigset_t *old_set,
1123 void *puc)
1124{
1125 TranslationBlock *tb;
1126 int ret;
1127
1128 if (cpu_single_env)
1129 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1130#if defined(DEBUG_SIGNAL)
1131 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1132 pc, address, is_write, *(unsigned long *)old_set);
1133#endif
1134 /* XXX: locking issue */
1135 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1136 return 1;
1137 }
1138
1139 /* see if it is an MMU fault */
6ebbf390 1140 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
f1ccf904
TS
1141 if (ret < 0)
1142 return 0; /* not an MMU fault */
1143 if (ret == 0)
1144 return 1; /* the MMU fault was handled without causing real CPU fault */
1145
1146 /* now we have a real cpu fault */
1147 tb = tb_find_pc(pc);
1148 if (tb) {
1149 /* the PC is inside the translated code. It means that we have
1150 a virtual CPU fault */
1151 cpu_restore_state(tb, env, pc, puc);
1152 }
f1ccf904
TS
1153 /* we restore the process signal mask as the sigreturn should
1154 do it (XXX: use sigsetjmp) */
1155 sigprocmask(SIG_SETMASK, old_set, NULL);
1156 cpu_loop_exit();
1157 /* never comes here */
1158 return 1;
1159}
1160
e4533c7a
FB
1161#else
1162#error unsupported target CPU
1163#endif
9de5e440 1164
2b413144
FB
1165#if defined(__i386__)
1166
d8ecc0b9
FB
1167#if defined(__APPLE__)
1168# include <sys/ucontext.h>
1169
1170# define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1171# define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1172# define ERROR_sig(context) ((context)->uc_mcontext->es.err)
d39bb24a
BS
1173# define MASK_sig(context) ((context)->uc_sigmask)
1174#elif defined(__OpenBSD__)
1175# define EIP_sig(context) ((context)->sc_eip)
1176# define TRAP_sig(context) ((context)->sc_trapno)
1177# define ERROR_sig(context) ((context)->sc_err)
1178# define MASK_sig(context) ((context)->sc_mask)
d8ecc0b9
FB
1179#else
1180# define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1181# define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1182# define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
d39bb24a 1183# define MASK_sig(context) ((context)->uc_sigmask)
d8ecc0b9
FB
1184#endif
1185
5fafdf24 1186int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1187 void *puc)
9de5e440 1188{
5a7b542b 1189 siginfo_t *info = pinfo;
d39bb24a
BS
1190#if defined(__OpenBSD__)
1191 struct sigcontext *uc = puc;
1192#else
9de5e440 1193 struct ucontext *uc = puc;
d39bb24a 1194#endif
9de5e440 1195 unsigned long pc;
bf3e8bf1 1196 int trapno;
97eb5b14 1197
d691f669
FB
1198#ifndef REG_EIP
1199/* for glibc 2.1 */
fd6ce8f6
FB
1200#define REG_EIP EIP
1201#define REG_ERR ERR
1202#define REG_TRAPNO TRAPNO
d691f669 1203#endif
d8ecc0b9
FB
1204 pc = EIP_sig(uc);
1205 trapno = TRAP_sig(uc);
ec6338ba
FB
1206 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1207 trapno == 0xe ?
1208 (ERROR_sig(uc) >> 1) & 1 : 0,
d39bb24a 1209 &MASK_sig(uc), puc);
2b413144
FB
1210}
1211
bc51c5c9
FB
1212#elif defined(__x86_64__)
1213
b3efe5c8 1214#ifdef __NetBSD__
d397abbd
BS
1215#define PC_sig(context) _UC_MACHINE_PC(context)
1216#define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
1217#define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
1218#define MASK_sig(context) ((context)->uc_sigmask)
1219#elif defined(__OpenBSD__)
1220#define PC_sig(context) ((context)->sc_rip)
1221#define TRAP_sig(context) ((context)->sc_trapno)
1222#define ERROR_sig(context) ((context)->sc_err)
1223#define MASK_sig(context) ((context)->sc_mask)
b3efe5c8 1224#else
d397abbd
BS
1225#define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
1226#define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1227#define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1228#define MASK_sig(context) ((context)->uc_sigmask)
b3efe5c8
BS
1229#endif
1230
5a7b542b 1231int cpu_signal_handler(int host_signum, void *pinfo,
bc51c5c9
FB
1232 void *puc)
1233{
5a7b542b 1234 siginfo_t *info = pinfo;
bc51c5c9 1235 unsigned long pc;
b3efe5c8
BS
1236#ifdef __NetBSD__
1237 ucontext_t *uc = puc;
d397abbd
BS
1238#elif defined(__OpenBSD__)
1239 struct sigcontext *uc = puc;
b3efe5c8
BS
1240#else
1241 struct ucontext *uc = puc;
1242#endif
bc51c5c9 1243
d397abbd 1244 pc = PC_sig(uc);
5fafdf24 1245 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
d397abbd
BS
1246 TRAP_sig(uc) == 0xe ?
1247 (ERROR_sig(uc) >> 1) & 1 : 0,
1248 &MASK_sig(uc), puc);
bc51c5c9
FB
1249}
1250
e58ffeb3 1251#elif defined(_ARCH_PPC)
2b413144 1252
83fb7adf
FB
1253/***********************************************************************
1254 * signal context platform-specific definitions
1255 * From Wine
1256 */
1257#ifdef linux
1258/* All Registers access - only for local access */
1259# define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1260/* Gpr Registers access */
1261# define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1262# define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1263# define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1264# define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1265# define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1266# define LR_sig(context) REG_sig(link, context) /* Link register */
1267# define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1268/* Float Registers access */
1269# define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1270# define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1271/* Exception Registers access */
1272# define DAR_sig(context) REG_sig(dar, context)
1273# define DSISR_sig(context) REG_sig(dsisr, context)
1274# define TRAP_sig(context) REG_sig(trap, context)
1275#endif /* linux */
1276
1277#ifdef __APPLE__
1278# include <sys/ucontext.h>
1279typedef struct ucontext SIGCONTEXT;
1280/* All Registers access - only for local access */
1281# define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1282# define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1283# define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1284# define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1285/* Gpr Registers access */
1286# define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1287# define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1288# define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1289# define CTR_sig(context) REG_sig(ctr, context)
1290# define XER_sig(context) REG_sig(xer, context) /* Link register */
1291# define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1292# define CR_sig(context) REG_sig(cr, context) /* Condition register */
1293/* Float Registers access */
1294# define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1295# define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1296/* Exception Registers access */
1297# define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1298# define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1299# define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1300#endif /* __APPLE__ */
1301
5fafdf24 1302int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1303 void *puc)
2b413144 1304{
5a7b542b 1305 siginfo_t *info = pinfo;
25eb4484 1306 struct ucontext *uc = puc;
25eb4484 1307 unsigned long pc;
25eb4484
FB
1308 int is_write;
1309
83fb7adf 1310 pc = IAR_sig(uc);
25eb4484
FB
1311 is_write = 0;
1312#if 0
1313 /* ppc 4xx case */
83fb7adf 1314 if (DSISR_sig(uc) & 0x00800000)
25eb4484
FB
1315 is_write = 1;
1316#else
83fb7adf 1317 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
25eb4484
FB
1318 is_write = 1;
1319#endif
5fafdf24 1320 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
bf3e8bf1 1321 is_write, &uc->uc_sigmask, puc);
2b413144
FB
1322}
1323
2f87c607
FB
1324#elif defined(__alpha__)
1325
5fafdf24 1326int cpu_signal_handler(int host_signum, void *pinfo,
2f87c607
FB
1327 void *puc)
1328{
5a7b542b 1329 siginfo_t *info = pinfo;
2f87c607
FB
1330 struct ucontext *uc = puc;
1331 uint32_t *pc = uc->uc_mcontext.sc_pc;
1332 uint32_t insn = *pc;
1333 int is_write = 0;
1334
8c6939c0 1335 /* XXX: need kernel patch to get write flag faster */
2f87c607
FB
1336 switch (insn >> 26) {
1337 case 0x0d: // stw
1338 case 0x0e: // stb
1339 case 0x0f: // stq_u
1340 case 0x24: // stf
1341 case 0x25: // stg
1342 case 0x26: // sts
1343 case 0x27: // stt
1344 case 0x2c: // stl
1345 case 0x2d: // stq
1346 case 0x2e: // stl_c
1347 case 0x2f: // stq_c
1348 is_write = 1;
1349 }
1350
5fafdf24 1351 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
bf3e8bf1 1352 is_write, &uc->uc_sigmask, puc);
2f87c607 1353}
8c6939c0
FB
1354#elif defined(__sparc__)
1355
5fafdf24 1356int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1357 void *puc)
8c6939c0 1358{
5a7b542b 1359 siginfo_t *info = pinfo;
8c6939c0
FB
1360 int is_write;
1361 uint32_t insn;
6b4c11cd 1362#if !defined(__arch64__) || defined(HOST_SOLARIS)
c9e1e2b0
BS
1363 uint32_t *regs = (uint32_t *)(info + 1);
1364 void *sigmask = (regs + 20);
8c6939c0 1365 /* XXX: is there a standard glibc define ? */
c9e1e2b0
BS
1366 unsigned long pc = regs[1];
1367#else
84778508 1368#ifdef __linux__
c9e1e2b0
BS
1369 struct sigcontext *sc = puc;
1370 unsigned long pc = sc->sigc_regs.tpc;
1371 void *sigmask = (void *)sc->sigc_mask;
84778508
BS
1372#elif defined(__OpenBSD__)
1373 struct sigcontext *uc = puc;
1374 unsigned long pc = uc->sc_pc;
1375 void *sigmask = (void *)(long)uc->sc_mask;
1376#endif
c9e1e2b0
BS
1377#endif
1378
8c6939c0
FB
1379 /* XXX: need kernel patch to get write flag faster */
1380 is_write = 0;
1381 insn = *(uint32_t *)pc;
1382 if ((insn >> 30) == 3) {
1383 switch((insn >> 19) & 0x3f) {
1384 case 0x05: // stb
d877fa5a 1385 case 0x15: // stba
8c6939c0 1386 case 0x06: // sth
d877fa5a 1387 case 0x16: // stha
8c6939c0 1388 case 0x04: // st
d877fa5a 1389 case 0x14: // sta
8c6939c0 1390 case 0x07: // std
d877fa5a
BS
1391 case 0x17: // stda
1392 case 0x0e: // stx
1393 case 0x1e: // stxa
8c6939c0 1394 case 0x24: // stf
d877fa5a 1395 case 0x34: // stfa
8c6939c0 1396 case 0x27: // stdf
d877fa5a
BS
1397 case 0x37: // stdfa
1398 case 0x26: // stqf
1399 case 0x36: // stqfa
8c6939c0 1400 case 0x25: // stfsr
d877fa5a
BS
1401 case 0x3c: // casa
1402 case 0x3e: // casxa
8c6939c0
FB
1403 is_write = 1;
1404 break;
1405 }
1406 }
5fafdf24 1407 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
bf3e8bf1 1408 is_write, sigmask, NULL);
8c6939c0
FB
1409}
1410
1411#elif defined(__arm__)
1412
5fafdf24 1413int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1414 void *puc)
8c6939c0 1415{
5a7b542b 1416 siginfo_t *info = pinfo;
8c6939c0
FB
1417 struct ucontext *uc = puc;
1418 unsigned long pc;
1419 int is_write;
3b46e624 1420
48bbf11b 1421#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
5c49b363
AZ
1422 pc = uc->uc_mcontext.gregs[R15];
1423#else
4eee57f5 1424 pc = uc->uc_mcontext.arm_pc;
5c49b363 1425#endif
8c6939c0
FB
1426 /* XXX: compute is_write */
1427 is_write = 0;
5fafdf24 1428 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
8c6939c0 1429 is_write,
f3a9676a 1430 &uc->uc_sigmask, puc);
8c6939c0
FB
1431}
1432
38e584a0
FB
1433#elif defined(__mc68000)
1434
5fafdf24 1435int cpu_signal_handler(int host_signum, void *pinfo,
38e584a0
FB
1436 void *puc)
1437{
5a7b542b 1438 siginfo_t *info = pinfo;
38e584a0
FB
1439 struct ucontext *uc = puc;
1440 unsigned long pc;
1441 int is_write;
3b46e624 1442
38e584a0
FB
1443 pc = uc->uc_mcontext.gregs[16];
1444 /* XXX: compute is_write */
1445 is_write = 0;
5fafdf24 1446 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
38e584a0 1447 is_write,
bf3e8bf1 1448 &uc->uc_sigmask, puc);
38e584a0
FB
1449}
1450
b8076a74
FB
1451#elif defined(__ia64)
1452
1453#ifndef __ISR_VALID
1454 /* This ought to be in <bits/siginfo.h>... */
1455# define __ISR_VALID 1
b8076a74
FB
1456#endif
1457
5a7b542b 1458int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
b8076a74 1459{
5a7b542b 1460 siginfo_t *info = pinfo;
b8076a74
FB
1461 struct ucontext *uc = puc;
1462 unsigned long ip;
1463 int is_write = 0;
1464
1465 ip = uc->uc_mcontext.sc_ip;
1466 switch (host_signum) {
1467 case SIGILL:
1468 case SIGFPE:
1469 case SIGSEGV:
1470 case SIGBUS:
1471 case SIGTRAP:
fd4a43e4 1472 if (info->si_code && (info->si_segvflags & __ISR_VALID))
b8076a74
FB
1473 /* ISR.W (write-access) is bit 33: */
1474 is_write = (info->si_isr >> 33) & 1;
1475 break;
1476
1477 default:
1478 break;
1479 }
1480 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1481 is_write,
1482 &uc->uc_sigmask, puc);
1483}
1484
90cb9493
FB
1485#elif defined(__s390__)
1486
5fafdf24 1487int cpu_signal_handler(int host_signum, void *pinfo,
90cb9493
FB
1488 void *puc)
1489{
5a7b542b 1490 siginfo_t *info = pinfo;
90cb9493
FB
1491 struct ucontext *uc = puc;
1492 unsigned long pc;
1493 int is_write;
3b46e624 1494
90cb9493
FB
1495 pc = uc->uc_mcontext.psw.addr;
1496 /* XXX: compute is_write */
1497 is_write = 0;
5fafdf24 1498 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
c4b89d18
TS
1499 is_write, &uc->uc_sigmask, puc);
1500}
1501
1502#elif defined(__mips__)
1503
5fafdf24 1504int cpu_signal_handler(int host_signum, void *pinfo,
c4b89d18
TS
1505 void *puc)
1506{
9617efe8 1507 siginfo_t *info = pinfo;
c4b89d18
TS
1508 struct ucontext *uc = puc;
1509 greg_t pc = uc->uc_mcontext.pc;
1510 int is_write;
3b46e624 1511
c4b89d18
TS
1512 /* XXX: compute is_write */
1513 is_write = 0;
5fafdf24 1514 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
c4b89d18 1515 is_write, &uc->uc_sigmask, puc);
90cb9493
FB
1516}
1517
f54b3f92
AJ
1518#elif defined(__hppa__)
1519
1520int cpu_signal_handler(int host_signum, void *pinfo,
1521 void *puc)
1522{
1523 struct siginfo *info = pinfo;
1524 struct ucontext *uc = puc;
1525 unsigned long pc;
1526 int is_write;
1527
1528 pc = uc->uc_mcontext.sc_iaoq[0];
1529 /* FIXME: compute is_write */
1530 is_write = 0;
1531 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1532 is_write,
1533 &uc->uc_sigmask, puc);
1534}
1535
9de5e440 1536#else
2b413144 1537
3fb2ded1 1538#error host CPU specific signal handler needed
2b413144 1539
9de5e440 1540#endif
67b915a5
FB
1541
1542#endif /* !defined(CONFIG_SOFTMMU) */