]> git.proxmox.com Git - qemu.git/blame - cpu-exec.c
global s/fflush(logfile)/qemu_log_flush()/ (Eduardo Habkost)
[qemu.git] / cpu-exec.c
CommitLineData
7d13299d
FB
1/*
2 * i386 emulator main execution loop
5fafdf24 3 *
66321a11 4 * Copyright (c) 2003-2005 Fabrice Bellard
7d13299d 5 *
3ef693a0
FB
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
7d13299d 10 *
3ef693a0
FB
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
7d13299d 15 *
3ef693a0
FB
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
fad6cb1a 18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
7d13299d 19 */
e4533c7a 20#include "config.h"
7cb69cae 21#define CPU_NO_GLOBAL_REGS
93ac68bc 22#include "exec.h"
956034d7 23#include "disas.h"
7cb69cae 24#include "tcg.h"
7ba1e619 25#include "kvm.h"
7d13299d 26
fbf9eeb3
FB
27#if !defined(CONFIG_SOFTMMU)
28#undef EAX
29#undef ECX
30#undef EDX
31#undef EBX
32#undef ESP
33#undef EBP
34#undef ESI
35#undef EDI
36#undef EIP
37#include <signal.h>
84778508 38#ifdef __linux__
fbf9eeb3
FB
39#include <sys/ucontext.h>
40#endif
84778508 41#endif
fbf9eeb3 42
572a9d4a
BS
43#if defined(__sparc__) && !defined(HOST_SOLARIS)
44// Work around ugly bugs in glibc that mangle global register contents
45#undef env
46#define env cpu_single_env
47#endif
48
36bdbe54
FB
49int tb_invalidated_flag;
50
dc99065b 51//#define DEBUG_EXEC
9de5e440 52//#define DEBUG_SIGNAL
7d13299d 53
e4533c7a
FB
54void cpu_loop_exit(void)
55{
bfed01fc
TS
56 /* NOTE: the register at this point must be saved by hand because
57 longjmp restore them */
58 regs_to_env();
e4533c7a
FB
59 longjmp(env->jmp_env, 1);
60}
bfed01fc 61
fbf9eeb3
FB
62/* exit the current TB from a signal handler. The host registers are
63 restored in a state compatible with the CPU emulator
64 */
5fafdf24 65void cpu_resume_from_signal(CPUState *env1, void *puc)
fbf9eeb3
FB
66{
67#if !defined(CONFIG_SOFTMMU)
84778508 68#ifdef __linux__
fbf9eeb3 69 struct ucontext *uc = puc;
84778508
BS
70#elif defined(__OpenBSD__)
71 struct sigcontext *uc = puc;
72#endif
fbf9eeb3
FB
73#endif
74
75 env = env1;
76
77 /* XXX: restore cpu registers saved in host registers */
78
79#if !defined(CONFIG_SOFTMMU)
80 if (puc) {
81 /* XXX: use siglongjmp ? */
84778508 82#ifdef __linux__
fbf9eeb3 83 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
84778508
BS
84#elif defined(__OpenBSD__)
85 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
86#endif
fbf9eeb3
FB
87 }
88#endif
9a3ea654 89 env->exception_index = -1;
fbf9eeb3
FB
90 longjmp(env->jmp_env, 1);
91}
92
2e70f6ef
PB
93/* Execute the code without caching the generated code. An interpreter
94 could be used if available. */
95static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
96{
97 unsigned long next_tb;
98 TranslationBlock *tb;
99
100 /* Should never happen.
101 We only end up here when an existing TB is too long. */
102 if (max_cycles > CF_COUNT_MASK)
103 max_cycles = CF_COUNT_MASK;
104
105 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
106 max_cycles);
107 env->current_tb = tb;
108 /* execute the generated code */
109 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
110
111 if ((next_tb & 3) == 2) {
112 /* Restore PC. This may happen if async event occurs before
113 the TB starts executing. */
622ed360 114 cpu_pc_from_tb(env, tb);
2e70f6ef
PB
115 }
116 tb_phys_invalidate(tb, -1);
117 tb_free(tb);
118}
119
8a40a180
FB
120static TranslationBlock *tb_find_slow(target_ulong pc,
121 target_ulong cs_base,
c068688b 122 uint64_t flags)
8a40a180
FB
123{
124 TranslationBlock *tb, **ptb1;
8a40a180
FB
125 unsigned int h;
126 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
3b46e624 127
8a40a180 128 tb_invalidated_flag = 0;
3b46e624 129
8a40a180 130 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
3b46e624 131
8a40a180
FB
132 /* find translated block using physical mappings */
133 phys_pc = get_phys_addr_code(env, pc);
134 phys_page1 = phys_pc & TARGET_PAGE_MASK;
135 phys_page2 = -1;
136 h = tb_phys_hash_func(phys_pc);
137 ptb1 = &tb_phys_hash[h];
138 for(;;) {
139 tb = *ptb1;
140 if (!tb)
141 goto not_found;
5fafdf24 142 if (tb->pc == pc &&
8a40a180 143 tb->page_addr[0] == phys_page1 &&
5fafdf24 144 tb->cs_base == cs_base &&
8a40a180
FB
145 tb->flags == flags) {
146 /* check next page if needed */
147 if (tb->page_addr[1] != -1) {
5fafdf24 148 virt_page2 = (pc & TARGET_PAGE_MASK) +
8a40a180
FB
149 TARGET_PAGE_SIZE;
150 phys_page2 = get_phys_addr_code(env, virt_page2);
151 if (tb->page_addr[1] == phys_page2)
152 goto found;
153 } else {
154 goto found;
155 }
156 }
157 ptb1 = &tb->phys_hash_next;
158 }
159 not_found:
2e70f6ef
PB
160 /* if no translated code available, then translate it now */
161 tb = tb_gen_code(env, pc, cs_base, flags, 0);
3b46e624 162
8a40a180 163 found:
8a40a180
FB
164 /* we add the TB in the virtual pc hash table */
165 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
8a40a180
FB
166 return tb;
167}
168
169static inline TranslationBlock *tb_find_fast(void)
170{
171 TranslationBlock *tb;
172 target_ulong cs_base, pc;
6b917547 173 int flags;
8a40a180
FB
174
175 /* we record a subset of the CPU state. It will
176 always be the same before a given translated block
177 is executed. */
6b917547 178 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
bce61846 179 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
551bd27f
TS
180 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
181 tb->flags != flags)) {
8a40a180
FB
182 tb = tb_find_slow(pc, cs_base, flags);
183 }
184 return tb;
185}
186
dde2367e
AL
187static CPUDebugExcpHandler *debug_excp_handler;
188
189CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
190{
191 CPUDebugExcpHandler *old_handler = debug_excp_handler;
192
193 debug_excp_handler = handler;
194 return old_handler;
195}
196
6e140f28
AL
197static void cpu_handle_debug_exception(CPUState *env)
198{
199 CPUWatchpoint *wp;
200
201 if (!env->watchpoint_hit)
c0ce998e 202 TAILQ_FOREACH(wp, &env->watchpoints, entry)
6e140f28 203 wp->flags &= ~BP_WATCHPOINT_HIT;
dde2367e
AL
204
205 if (debug_excp_handler)
206 debug_excp_handler(env);
6e140f28
AL
207}
208
7d13299d
FB
209/* main execution loop */
210
e4533c7a 211int cpu_exec(CPUState *env1)
7d13299d 212{
1057eaa7
PB
213#define DECLARE_HOST_REGS 1
214#include "hostregs_helper.h"
8a40a180 215 int ret, interrupt_request;
8a40a180 216 TranslationBlock *tb;
c27004ec 217 uint8_t *tc_ptr;
d5975363 218 unsigned long next_tb;
8c6939c0 219
bfed01fc
TS
220 if (cpu_halted(env1) == EXCP_HALTED)
221 return EXCP_HALTED;
5a1e3cfc 222
5fafdf24 223 cpu_single_env = env1;
6a00d601 224
7d13299d 225 /* first we save global registers */
1057eaa7
PB
226#define SAVE_HOST_REGS 1
227#include "hostregs_helper.h"
c27004ec 228 env = env1;
e4533c7a 229
0d1a29f9 230 env_to_regs();
ecb644f4 231#if defined(TARGET_I386)
9de5e440 232 /* put eflags in CPU temporary format */
fc2b4c48
FB
233 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
234 DF = 1 - (2 * ((env->eflags >> 10) & 1));
9de5e440 235 CC_OP = CC_OP_EFLAGS;
fc2b4c48 236 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
93ac68bc 237#elif defined(TARGET_SPARC)
e6e5906b
PB
238#elif defined(TARGET_M68K)
239 env->cc_op = CC_OP_FLAGS;
240 env->cc_dest = env->sr & 0xf;
241 env->cc_x = (env->sr >> 4) & 1;
ecb644f4
TS
242#elif defined(TARGET_ALPHA)
243#elif defined(TARGET_ARM)
244#elif defined(TARGET_PPC)
6af0bf9c 245#elif defined(TARGET_MIPS)
fdf9b3e8 246#elif defined(TARGET_SH4)
f1ccf904 247#elif defined(TARGET_CRIS)
fdf9b3e8 248 /* XXXXX */
e4533c7a
FB
249#else
250#error unsupported target CPU
251#endif
3fb2ded1 252 env->exception_index = -1;
9d27abd9 253
7d13299d 254 /* prepare setjmp context for exception handling */
3fb2ded1
FB
255 for(;;) {
256 if (setjmp(env->jmp_env) == 0) {
ee8b7021 257 env->current_tb = NULL;
3fb2ded1
FB
258 /* if an exception is pending, we execute it here */
259 if (env->exception_index >= 0) {
260 if (env->exception_index >= EXCP_INTERRUPT) {
261 /* exit request from the cpu execution loop */
262 ret = env->exception_index;
6e140f28
AL
263 if (ret == EXCP_DEBUG)
264 cpu_handle_debug_exception(env);
3fb2ded1 265 break;
72d239ed
AJ
266 } else {
267#if defined(CONFIG_USER_ONLY)
3fb2ded1 268 /* if user mode only, we simulate a fake exception
9f083493 269 which will be handled outside the cpu execution
3fb2ded1 270 loop */
83479e77 271#if defined(TARGET_I386)
5fafdf24
TS
272 do_interrupt_user(env->exception_index,
273 env->exception_is_int,
274 env->error_code,
3fb2ded1 275 env->exception_next_eip);
eba01623
FB
276 /* successfully delivered */
277 env->old_exception = -1;
83479e77 278#endif
3fb2ded1
FB
279 ret = env->exception_index;
280 break;
72d239ed 281#else
83479e77 282#if defined(TARGET_I386)
3fb2ded1
FB
283 /* simulate a real cpu exception. On i386, it can
284 trigger new exceptions, but we do not handle
285 double or triple faults yet. */
5fafdf24
TS
286 do_interrupt(env->exception_index,
287 env->exception_is_int,
288 env->error_code,
d05e66d2 289 env->exception_next_eip, 0);
678dde13
TS
290 /* successfully delivered */
291 env->old_exception = -1;
ce09776b
FB
292#elif defined(TARGET_PPC)
293 do_interrupt(env);
6af0bf9c
FB
294#elif defined(TARGET_MIPS)
295 do_interrupt(env);
e95c8d51 296#elif defined(TARGET_SPARC)
f2bc7e7f 297 do_interrupt(env);
b5ff1b31
FB
298#elif defined(TARGET_ARM)
299 do_interrupt(env);
fdf9b3e8
FB
300#elif defined(TARGET_SH4)
301 do_interrupt(env);
eddf68a6
JM
302#elif defined(TARGET_ALPHA)
303 do_interrupt(env);
f1ccf904
TS
304#elif defined(TARGET_CRIS)
305 do_interrupt(env);
0633879f
PB
306#elif defined(TARGET_M68K)
307 do_interrupt(0);
72d239ed 308#endif
83479e77 309#endif
3fb2ded1
FB
310 }
311 env->exception_index = -1;
5fafdf24 312 }
9df217a3
FB
313#ifdef USE_KQEMU
314 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
315 int ret;
a7812ae4 316 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
9df217a3
FB
317 ret = kqemu_cpu_exec(env);
318 /* put eflags in CPU temporary format */
319 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
320 DF = 1 - (2 * ((env->eflags >> 10) & 1));
321 CC_OP = CC_OP_EFLAGS;
322 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
323 if (ret == 1) {
324 /* exception */
325 longjmp(env->jmp_env, 1);
326 } else if (ret == 2) {
327 /* softmmu execution needed */
328 } else {
329 if (env->interrupt_request != 0) {
330 /* hardware interrupt will be executed just after */
331 } else {
332 /* otherwise, we restart */
333 longjmp(env->jmp_env, 1);
334 }
335 }
3fb2ded1 336 }
9df217a3
FB
337#endif
338
7ba1e619 339 if (kvm_enabled()) {
becfc390
AL
340 kvm_cpu_exec(env);
341 longjmp(env->jmp_env, 1);
7ba1e619
AL
342 }
343
b5fc09ae 344 next_tb = 0; /* force lookup of first TB */
3fb2ded1 345 for(;;) {
68a79315 346 interrupt_request = env->interrupt_request;
e1638bd8 347 if (unlikely(interrupt_request)) {
348 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
349 /* Mask out external interrupts for this step. */
350 interrupt_request &= ~(CPU_INTERRUPT_HARD |
351 CPU_INTERRUPT_FIQ |
352 CPU_INTERRUPT_SMI |
353 CPU_INTERRUPT_NMI);
354 }
6658ffb8
PB
355 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
356 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
357 env->exception_index = EXCP_DEBUG;
358 cpu_loop_exit();
359 }
a90b7318 360#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
f1ccf904 361 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
a90b7318
AZ
362 if (interrupt_request & CPU_INTERRUPT_HALT) {
363 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
364 env->halted = 1;
365 env->exception_index = EXCP_HLT;
366 cpu_loop_exit();
367 }
368#endif
68a79315 369#if defined(TARGET_I386)
db620f46
FB
370 if (env->hflags2 & HF2_GIF_MASK) {
371 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
372 !(env->hflags & HF_SMM_MASK)) {
373 svm_check_intercept(SVM_EXIT_SMI);
374 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
375 do_smm_enter();
376 next_tb = 0;
377 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
378 !(env->hflags2 & HF2_NMI_MASK)) {
379 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
380 env->hflags2 |= HF2_NMI_MASK;
381 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
382 next_tb = 0;
383 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
384 (((env->hflags2 & HF2_VINTR_MASK) &&
385 (env->hflags2 & HF2_HIF_MASK)) ||
386 (!(env->hflags2 & HF2_VINTR_MASK) &&
387 (env->eflags & IF_MASK &&
388 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
389 int intno;
390 svm_check_intercept(SVM_EXIT_INTR);
391 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
392 intno = cpu_get_pic_interrupt(env);
93fcfe39 393 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
db620f46
FB
394 do_interrupt(intno, 0, 0, 0, 1);
395 /* ensure that no TB jump will be modified as
396 the program flow was changed */
397 next_tb = 0;
0573fbfc 398#if !defined(CONFIG_USER_ONLY)
db620f46
FB
399 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
400 (env->eflags & IF_MASK) &&
401 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
402 int intno;
403 /* FIXME: this should respect TPR */
404 svm_check_intercept(SVM_EXIT_VINTR);
db620f46 405 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
93fcfe39 406 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
db620f46 407 do_interrupt(intno, 0, 0, 0, 1);
d40c54d6 408 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
db620f46 409 next_tb = 0;
907a5b26 410#endif
db620f46 411 }
68a79315 412 }
ce09776b 413#elif defined(TARGET_PPC)
9fddaa0c
FB
414#if 0
415 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
416 cpu_ppc_reset(env);
417 }
418#endif
47103572 419 if (interrupt_request & CPU_INTERRUPT_HARD) {
e9df014c
JM
420 ppc_hw_interrupt(env);
421 if (env->pending_interrupts == 0)
422 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
b5fc09ae 423 next_tb = 0;
ce09776b 424 }
6af0bf9c
FB
425#elif defined(TARGET_MIPS)
426 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
24c7b0e3 427 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
6af0bf9c 428 (env->CP0_Status & (1 << CP0St_IE)) &&
24c7b0e3
TS
429 !(env->CP0_Status & (1 << CP0St_EXL)) &&
430 !(env->CP0_Status & (1 << CP0St_ERL)) &&
6af0bf9c
FB
431 !(env->hflags & MIPS_HFLAG_DM)) {
432 /* Raise it */
433 env->exception_index = EXCP_EXT_INTERRUPT;
434 env->error_code = 0;
435 do_interrupt(env);
b5fc09ae 436 next_tb = 0;
6af0bf9c 437 }
e95c8d51 438#elif defined(TARGET_SPARC)
66321a11
FB
439 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
440 (env->psret != 0)) {
441 int pil = env->interrupt_index & 15;
442 int type = env->interrupt_index & 0xf0;
443
444 if (((type == TT_EXTINT) &&
445 (pil == 15 || pil > env->psrpil)) ||
446 type != TT_EXTINT) {
447 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
f2bc7e7f
BS
448 env->exception_index = env->interrupt_index;
449 do_interrupt(env);
66321a11 450 env->interrupt_index = 0;
327ac2e7
BS
451#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
452 cpu_check_irqs(env);
453#endif
b5fc09ae 454 next_tb = 0;
66321a11 455 }
e95c8d51
FB
456 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
457 //do_interrupt(0, 0, 0, 0, 0);
458 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
a90b7318 459 }
b5ff1b31
FB
460#elif defined(TARGET_ARM)
461 if (interrupt_request & CPU_INTERRUPT_FIQ
462 && !(env->uncached_cpsr & CPSR_F)) {
463 env->exception_index = EXCP_FIQ;
464 do_interrupt(env);
b5fc09ae 465 next_tb = 0;
b5ff1b31 466 }
9ee6e8bb
PB
467 /* ARMv7-M interrupt return works by loading a magic value
468 into the PC. On real hardware the load causes the
469 return to occur. The qemu implementation performs the
470 jump normally, then does the exception return when the
471 CPU tries to execute code at the magic address.
472 This will cause the magic PC value to be pushed to
473 the stack if an interrupt occured at the wrong time.
474 We avoid this by disabling interrupts when
475 pc contains a magic address. */
b5ff1b31 476 if (interrupt_request & CPU_INTERRUPT_HARD
9ee6e8bb
PB
477 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
478 || !(env->uncached_cpsr & CPSR_I))) {
b5ff1b31
FB
479 env->exception_index = EXCP_IRQ;
480 do_interrupt(env);
b5fc09ae 481 next_tb = 0;
b5ff1b31 482 }
fdf9b3e8 483#elif defined(TARGET_SH4)
e96e2044
TS
484 if (interrupt_request & CPU_INTERRUPT_HARD) {
485 do_interrupt(env);
b5fc09ae 486 next_tb = 0;
e96e2044 487 }
eddf68a6
JM
488#elif defined(TARGET_ALPHA)
489 if (interrupt_request & CPU_INTERRUPT_HARD) {
490 do_interrupt(env);
b5fc09ae 491 next_tb = 0;
eddf68a6 492 }
f1ccf904 493#elif defined(TARGET_CRIS)
1b1a38b0
EI
494 if (interrupt_request & CPU_INTERRUPT_HARD
495 && (env->pregs[PR_CCS] & I_FLAG)) {
496 env->exception_index = EXCP_IRQ;
497 do_interrupt(env);
498 next_tb = 0;
499 }
500 if (interrupt_request & CPU_INTERRUPT_NMI
501 && (env->pregs[PR_CCS] & M_FLAG)) {
502 env->exception_index = EXCP_NMI;
f1ccf904 503 do_interrupt(env);
b5fc09ae 504 next_tb = 0;
f1ccf904 505 }
0633879f
PB
506#elif defined(TARGET_M68K)
507 if (interrupt_request & CPU_INTERRUPT_HARD
508 && ((env->sr & SR_I) >> SR_I_SHIFT)
509 < env->pending_level) {
510 /* Real hardware gets the interrupt vector via an
511 IACK cycle at this point. Current emulated
512 hardware doesn't rely on this, so we
513 provide/save the vector when the interrupt is
514 first signalled. */
515 env->exception_index = env->pending_vector;
516 do_interrupt(1);
b5fc09ae 517 next_tb = 0;
0633879f 518 }
68a79315 519#endif
9d05095e
FB
520 /* Don't use the cached interupt_request value,
521 do_interrupt may have updated the EXITTB flag. */
b5ff1b31 522 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
bf3e8bf1
FB
523 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
524 /* ensure that no TB jump will be modified as
525 the program flow was changed */
b5fc09ae 526 next_tb = 0;
bf3e8bf1 527 }
68a79315
FB
528 if (interrupt_request & CPU_INTERRUPT_EXIT) {
529 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
530 env->exception_index = EXCP_INTERRUPT;
531 cpu_loop_exit();
532 }
3fb2ded1 533 }
7d13299d 534#ifdef DEBUG_EXEC
b5ff1b31 535 if ((loglevel & CPU_LOG_TB_CPU)) {
3fb2ded1 536 /* restore flags in standard format */
ecb644f4
TS
537 regs_to_env();
538#if defined(TARGET_I386)
a7812ae4 539 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
93fcfe39 540 log_cpu_state(env, X86_DUMP_CCOP);
3fb2ded1 541 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
e4533c7a 542#elif defined(TARGET_ARM)
93fcfe39 543 log_cpu_state(env, 0);
93ac68bc 544#elif defined(TARGET_SPARC)
93fcfe39 545 log_cpu_state(env, 0);
67867308 546#elif defined(TARGET_PPC)
93fcfe39 547 log_cpu_state(env, 0);
e6e5906b
PB
548#elif defined(TARGET_M68K)
549 cpu_m68k_flush_flags(env, env->cc_op);
550 env->cc_op = CC_OP_FLAGS;
551 env->sr = (env->sr & 0xffe0)
552 | env->cc_dest | (env->cc_x << 4);
93fcfe39 553 log_cpu_state(env, 0);
6af0bf9c 554#elif defined(TARGET_MIPS)
93fcfe39 555 log_cpu_state(env, 0);
fdf9b3e8 556#elif defined(TARGET_SH4)
93fcfe39 557 log_cpu_state(env, 0);
eddf68a6 558#elif defined(TARGET_ALPHA)
93fcfe39 559 log_cpu_state(env, 0);
f1ccf904 560#elif defined(TARGET_CRIS)
93fcfe39 561 log_cpu_state(env, 0);
e4533c7a 562#else
5fafdf24 563#error unsupported target CPU
e4533c7a 564#endif
3fb2ded1 565 }
7d13299d 566#endif
d5975363 567 spin_lock(&tb_lock);
8a40a180 568 tb = tb_find_fast();
d5975363
PB
569 /* Note: we do it here to avoid a gcc bug on Mac OS X when
570 doing it in tb_find_slow */
571 if (tb_invalidated_flag) {
572 /* as some TB could have been invalidated because
573 of memory exceptions while generating the code, we
574 must recompute the hash index here */
575 next_tb = 0;
2e70f6ef 576 tb_invalidated_flag = 0;
d5975363 577 }
9d27abd9 578#ifdef DEBUG_EXEC
93fcfe39
AL
579 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
580 (long)tb->tc_ptr, tb->pc,
581 lookup_symbol(tb->pc));
9d27abd9 582#endif
8a40a180
FB
583 /* see if we can patch the calling TB. When the TB
584 spans two pages, we cannot safely do a direct
585 jump. */
c27004ec 586 {
b5fc09ae 587 if (next_tb != 0 &&
4d7a0880 588#ifdef USE_KQEMU
f32fc648
FB
589 (env->kqemu_enabled != 2) &&
590#endif
ec6338ba 591 tb->page_addr[1] == -1) {
b5fc09ae 592 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
3fb2ded1 593 }
c27004ec 594 }
d5975363 595 spin_unlock(&tb_lock);
83479e77 596 env->current_tb = tb;
55e8b85e 597
598 /* cpu_interrupt might be called while translating the
599 TB, but before it is linked into a potentially
600 infinite loop and becomes env->current_tb. Avoid
601 starting execution if there is a pending interrupt. */
602 if (unlikely (env->interrupt_request & CPU_INTERRUPT_EXIT))
603 env->current_tb = NULL;
604
2e70f6ef
PB
605 while (env->current_tb) {
606 tc_ptr = tb->tc_ptr;
3fb2ded1 607 /* execute the generated code */
572a9d4a
BS
608#if defined(__sparc__) && !defined(HOST_SOLARIS)
609#undef env
2e70f6ef 610 env = cpu_single_env;
572a9d4a
BS
611#define env cpu_single_env
612#endif
2e70f6ef
PB
613 next_tb = tcg_qemu_tb_exec(tc_ptr);
614 env->current_tb = NULL;
615 if ((next_tb & 3) == 2) {
bf20dc07 616 /* Instruction counter expired. */
2e70f6ef
PB
617 int insns_left;
618 tb = (TranslationBlock *)(long)(next_tb & ~3);
619 /* Restore PC. */
622ed360 620 cpu_pc_from_tb(env, tb);
2e70f6ef
PB
621 insns_left = env->icount_decr.u32;
622 if (env->icount_extra && insns_left >= 0) {
623 /* Refill decrementer and continue execution. */
624 env->icount_extra += insns_left;
625 if (env->icount_extra > 0xffff) {
626 insns_left = 0xffff;
627 } else {
628 insns_left = env->icount_extra;
629 }
630 env->icount_extra -= insns_left;
631 env->icount_decr.u16.low = insns_left;
632 } else {
633 if (insns_left > 0) {
634 /* Execute remaining instructions. */
635 cpu_exec_nocache(insns_left, tb);
636 }
637 env->exception_index = EXCP_INTERRUPT;
638 next_tb = 0;
639 cpu_loop_exit();
640 }
641 }
642 }
4cbf74b6
FB
643 /* reset soft MMU for next block (it can currently
644 only be set by a memory fault) */
f32fc648
FB
645#if defined(USE_KQEMU)
646#define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
647 if (kqemu_is_ok(env) &&
648 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
649 cpu_loop_exit();
650 }
4cbf74b6 651#endif
50a518e3 652 } /* for(;;) */
3fb2ded1 653 } else {
0d1a29f9 654 env_to_regs();
7d13299d 655 }
3fb2ded1
FB
656 } /* for(;;) */
657
7d13299d 658
e4533c7a 659#if defined(TARGET_I386)
9de5e440 660 /* restore flags in standard format */
a7812ae4 661 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
e4533c7a 662#elif defined(TARGET_ARM)
b7bcbe95 663 /* XXX: Save/restore host fpu exception state?. */
93ac68bc 664#elif defined(TARGET_SPARC)
67867308 665#elif defined(TARGET_PPC)
e6e5906b
PB
666#elif defined(TARGET_M68K)
667 cpu_m68k_flush_flags(env, env->cc_op);
668 env->cc_op = CC_OP_FLAGS;
669 env->sr = (env->sr & 0xffe0)
670 | env->cc_dest | (env->cc_x << 4);
6af0bf9c 671#elif defined(TARGET_MIPS)
fdf9b3e8 672#elif defined(TARGET_SH4)
eddf68a6 673#elif defined(TARGET_ALPHA)
f1ccf904 674#elif defined(TARGET_CRIS)
fdf9b3e8 675 /* XXXXX */
e4533c7a
FB
676#else
677#error unsupported target CPU
678#endif
1057eaa7
PB
679
680 /* restore global registers */
1057eaa7
PB
681#include "hostregs_helper.h"
682
6a00d601 683 /* fail safe : never use cpu_single_env outside cpu_exec() */
5fafdf24 684 cpu_single_env = NULL;
7d13299d
FB
685 return ret;
686}
6dbad63e 687
fbf9eeb3
FB
688/* must only be called from the generated code as an exception can be
689 generated */
690void tb_invalidate_page_range(target_ulong start, target_ulong end)
691{
dc5d0b3d
FB
692 /* XXX: cannot enable it yet because it yields to MMU exception
693 where NIP != read address on PowerPC */
694#if 0
fbf9eeb3
FB
695 target_ulong phys_addr;
696 phys_addr = get_phys_addr_code(env, start);
697 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
dc5d0b3d 698#endif
fbf9eeb3
FB
699}
700
1a18c71b 701#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
e4533c7a 702
6dbad63e
FB
703void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
704{
705 CPUX86State *saved_env;
706
707 saved_env = env;
708 env = s;
a412ac57 709 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
a513fe19 710 selector &= 0xffff;
5fafdf24 711 cpu_x86_load_seg_cache(env, seg_reg, selector,
c27004ec 712 (selector << 4), 0xffff, 0);
a513fe19 713 } else {
5d97559d 714 helper_load_seg(seg_reg, selector);
a513fe19 715 }
6dbad63e
FB
716 env = saved_env;
717}
9de5e440 718
6f12a2a6 719void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
d0a1ffc9
FB
720{
721 CPUX86State *saved_env;
722
723 saved_env = env;
724 env = s;
3b46e624 725
6f12a2a6 726 helper_fsave(ptr, data32);
d0a1ffc9
FB
727
728 env = saved_env;
729}
730
6f12a2a6 731void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
d0a1ffc9
FB
732{
733 CPUX86State *saved_env;
734
735 saved_env = env;
736 env = s;
3b46e624 737
6f12a2a6 738 helper_frstor(ptr, data32);
d0a1ffc9
FB
739
740 env = saved_env;
741}
742
e4533c7a
FB
743#endif /* TARGET_I386 */
744
67b915a5
FB
745#if !defined(CONFIG_SOFTMMU)
746
3fb2ded1
FB
747#if defined(TARGET_I386)
748
b56dad1c 749/* 'pc' is the host PC at which the exception was raised. 'address' is
fd6ce8f6
FB
750 the effective address of the memory exception. 'is_write' is 1 if a
751 write caused the exception and otherwise 0'. 'old_set' is the
752 signal set which should be restored */
2b413144 753static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
5fafdf24 754 int is_write, sigset_t *old_set,
bf3e8bf1 755 void *puc)
9de5e440 756{
a513fe19
FB
757 TranslationBlock *tb;
758 int ret;
68a79315 759
83479e77
FB
760 if (cpu_single_env)
761 env = cpu_single_env; /* XXX: find a correct solution for multithread */
fd6ce8f6 762#if defined(DEBUG_SIGNAL)
5fafdf24 763 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
bf3e8bf1 764 pc, address, is_write, *(unsigned long *)old_set);
9de5e440 765#endif
25eb4484 766 /* XXX: locking issue */
53a5960a 767 if (is_write && page_unprotect(h2g(address), pc, puc)) {
fd6ce8f6
FB
768 return 1;
769 }
fbf9eeb3 770
3fb2ded1 771 /* see if it is an MMU fault */
6ebbf390 772 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
3fb2ded1
FB
773 if (ret < 0)
774 return 0; /* not an MMU fault */
775 if (ret == 0)
776 return 1; /* the MMU fault was handled without causing real CPU fault */
777 /* now we have a real cpu fault */
a513fe19
FB
778 tb = tb_find_pc(pc);
779 if (tb) {
9de5e440
FB
780 /* the PC is inside the translated code. It means that we have
781 a virtual CPU fault */
bf3e8bf1 782 cpu_restore_state(tb, env, pc, puc);
3fb2ded1 783 }
4cbf74b6 784 if (ret == 1) {
3fb2ded1 785#if 0
5fafdf24 786 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
4cbf74b6 787 env->eip, env->cr[2], env->error_code);
3fb2ded1 788#endif
4cbf74b6
FB
789 /* we restore the process signal mask as the sigreturn should
790 do it (XXX: use sigsetjmp) */
791 sigprocmask(SIG_SETMASK, old_set, NULL);
54ca9095 792 raise_exception_err(env->exception_index, env->error_code);
4cbf74b6
FB
793 } else {
794 /* activate soft MMU for this block */
3f337316 795 env->hflags |= HF_SOFTMMU_MASK;
fbf9eeb3 796 cpu_resume_from_signal(env, puc);
4cbf74b6 797 }
3fb2ded1
FB
798 /* never comes here */
799 return 1;
800}
801
e4533c7a 802#elif defined(TARGET_ARM)
3fb2ded1 803static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
bf3e8bf1
FB
804 int is_write, sigset_t *old_set,
805 void *puc)
3fb2ded1 806{
68016c62
FB
807 TranslationBlock *tb;
808 int ret;
809
810 if (cpu_single_env)
811 env = cpu_single_env; /* XXX: find a correct solution for multithread */
812#if defined(DEBUG_SIGNAL)
5fafdf24 813 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
68016c62
FB
814 pc, address, is_write, *(unsigned long *)old_set);
815#endif
9f0777ed 816 /* XXX: locking issue */
53a5960a 817 if (is_write && page_unprotect(h2g(address), pc, puc)) {
9f0777ed
FB
818 return 1;
819 }
68016c62 820 /* see if it is an MMU fault */
6ebbf390 821 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
68016c62
FB
822 if (ret < 0)
823 return 0; /* not an MMU fault */
824 if (ret == 0)
825 return 1; /* the MMU fault was handled without causing real CPU fault */
826 /* now we have a real cpu fault */
827 tb = tb_find_pc(pc);
828 if (tb) {
829 /* the PC is inside the translated code. It means that we have
830 a virtual CPU fault */
831 cpu_restore_state(tb, env, pc, puc);
832 }
833 /* we restore the process signal mask as the sigreturn should
834 do it (XXX: use sigsetjmp) */
835 sigprocmask(SIG_SETMASK, old_set, NULL);
836 cpu_loop_exit();
968c74da
AJ
837 /* never comes here */
838 return 1;
3fb2ded1 839}
93ac68bc
FB
840#elif defined(TARGET_SPARC)
841static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
bf3e8bf1
FB
842 int is_write, sigset_t *old_set,
843 void *puc)
93ac68bc 844{
68016c62
FB
845 TranslationBlock *tb;
846 int ret;
847
848 if (cpu_single_env)
849 env = cpu_single_env; /* XXX: find a correct solution for multithread */
850#if defined(DEBUG_SIGNAL)
5fafdf24 851 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
68016c62
FB
852 pc, address, is_write, *(unsigned long *)old_set);
853#endif
b453b70b 854 /* XXX: locking issue */
53a5960a 855 if (is_write && page_unprotect(h2g(address), pc, puc)) {
b453b70b
FB
856 return 1;
857 }
68016c62 858 /* see if it is an MMU fault */
6ebbf390 859 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
68016c62
FB
860 if (ret < 0)
861 return 0; /* not an MMU fault */
862 if (ret == 0)
863 return 1; /* the MMU fault was handled without causing real CPU fault */
864 /* now we have a real cpu fault */
865 tb = tb_find_pc(pc);
866 if (tb) {
867 /* the PC is inside the translated code. It means that we have
868 a virtual CPU fault */
869 cpu_restore_state(tb, env, pc, puc);
870 }
871 /* we restore the process signal mask as the sigreturn should
872 do it (XXX: use sigsetjmp) */
873 sigprocmask(SIG_SETMASK, old_set, NULL);
874 cpu_loop_exit();
968c74da
AJ
875 /* never comes here */
876 return 1;
93ac68bc 877}
67867308
FB
878#elif defined (TARGET_PPC)
879static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
bf3e8bf1
FB
880 int is_write, sigset_t *old_set,
881 void *puc)
67867308
FB
882{
883 TranslationBlock *tb;
ce09776b 884 int ret;
3b46e624 885
67867308
FB
886 if (cpu_single_env)
887 env = cpu_single_env; /* XXX: find a correct solution for multithread */
67867308 888#if defined(DEBUG_SIGNAL)
5fafdf24 889 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
67867308
FB
890 pc, address, is_write, *(unsigned long *)old_set);
891#endif
892 /* XXX: locking issue */
53a5960a 893 if (is_write && page_unprotect(h2g(address), pc, puc)) {
67867308
FB
894 return 1;
895 }
896
ce09776b 897 /* see if it is an MMU fault */
6ebbf390 898 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
ce09776b
FB
899 if (ret < 0)
900 return 0; /* not an MMU fault */
901 if (ret == 0)
902 return 1; /* the MMU fault was handled without causing real CPU fault */
903
67867308
FB
904 /* now we have a real cpu fault */
905 tb = tb_find_pc(pc);
906 if (tb) {
907 /* the PC is inside the translated code. It means that we have
908 a virtual CPU fault */
bf3e8bf1 909 cpu_restore_state(tb, env, pc, puc);
67867308 910 }
ce09776b 911 if (ret == 1) {
67867308 912#if 0
5fafdf24 913 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
ce09776b 914 env->nip, env->error_code, tb);
67867308
FB
915#endif
916 /* we restore the process signal mask as the sigreturn should
917 do it (XXX: use sigsetjmp) */
bf3e8bf1 918 sigprocmask(SIG_SETMASK, old_set, NULL);
e06fcd75 919 cpu_loop_exit();
ce09776b
FB
920 } else {
921 /* activate soft MMU for this block */
fbf9eeb3 922 cpu_resume_from_signal(env, puc);
ce09776b 923 }
67867308 924 /* never comes here */
e6e5906b
PB
925 return 1;
926}
927
928#elif defined(TARGET_M68K)
929static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
930 int is_write, sigset_t *old_set,
931 void *puc)
932{
933 TranslationBlock *tb;
934 int ret;
935
936 if (cpu_single_env)
937 env = cpu_single_env; /* XXX: find a correct solution for multithread */
938#if defined(DEBUG_SIGNAL)
5fafdf24 939 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
e6e5906b
PB
940 pc, address, is_write, *(unsigned long *)old_set);
941#endif
942 /* XXX: locking issue */
943 if (is_write && page_unprotect(address, pc, puc)) {
944 return 1;
945 }
946 /* see if it is an MMU fault */
6ebbf390 947 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
e6e5906b
PB
948 if (ret < 0)
949 return 0; /* not an MMU fault */
950 if (ret == 0)
951 return 1; /* the MMU fault was handled without causing real CPU fault */
952 /* now we have a real cpu fault */
953 tb = tb_find_pc(pc);
954 if (tb) {
955 /* the PC is inside the translated code. It means that we have
956 a virtual CPU fault */
957 cpu_restore_state(tb, env, pc, puc);
958 }
959 /* we restore the process signal mask as the sigreturn should
960 do it (XXX: use sigsetjmp) */
961 sigprocmask(SIG_SETMASK, old_set, NULL);
962 cpu_loop_exit();
963 /* never comes here */
67867308
FB
964 return 1;
965}
6af0bf9c
FB
966
967#elif defined (TARGET_MIPS)
968static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
969 int is_write, sigset_t *old_set,
970 void *puc)
971{
972 TranslationBlock *tb;
973 int ret;
3b46e624 974
6af0bf9c
FB
975 if (cpu_single_env)
976 env = cpu_single_env; /* XXX: find a correct solution for multithread */
977#if defined(DEBUG_SIGNAL)
5fafdf24 978 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
6af0bf9c
FB
979 pc, address, is_write, *(unsigned long *)old_set);
980#endif
981 /* XXX: locking issue */
53a5960a 982 if (is_write && page_unprotect(h2g(address), pc, puc)) {
6af0bf9c
FB
983 return 1;
984 }
985
986 /* see if it is an MMU fault */
6ebbf390 987 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
6af0bf9c
FB
988 if (ret < 0)
989 return 0; /* not an MMU fault */
990 if (ret == 0)
991 return 1; /* the MMU fault was handled without causing real CPU fault */
992
993 /* now we have a real cpu fault */
994 tb = tb_find_pc(pc);
995 if (tb) {
996 /* the PC is inside the translated code. It means that we have
997 a virtual CPU fault */
998 cpu_restore_state(tb, env, pc, puc);
999 }
1000 if (ret == 1) {
1001#if 0
5fafdf24 1002 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1eb5207b 1003 env->PC, env->error_code, tb);
6af0bf9c
FB
1004#endif
1005 /* we restore the process signal mask as the sigreturn should
1006 do it (XXX: use sigsetjmp) */
1007 sigprocmask(SIG_SETMASK, old_set, NULL);
f9480ffc 1008 cpu_loop_exit();
6af0bf9c
FB
1009 } else {
1010 /* activate soft MMU for this block */
1011 cpu_resume_from_signal(env, puc);
1012 }
1013 /* never comes here */
1014 return 1;
1015}
1016
fdf9b3e8
FB
1017#elif defined (TARGET_SH4)
1018static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1019 int is_write, sigset_t *old_set,
1020 void *puc)
1021{
1022 TranslationBlock *tb;
1023 int ret;
3b46e624 1024
fdf9b3e8
FB
1025 if (cpu_single_env)
1026 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1027#if defined(DEBUG_SIGNAL)
5fafdf24 1028 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
fdf9b3e8
FB
1029 pc, address, is_write, *(unsigned long *)old_set);
1030#endif
1031 /* XXX: locking issue */
1032 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1033 return 1;
1034 }
1035
1036 /* see if it is an MMU fault */
6ebbf390 1037 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
fdf9b3e8
FB
1038 if (ret < 0)
1039 return 0; /* not an MMU fault */
1040 if (ret == 0)
1041 return 1; /* the MMU fault was handled without causing real CPU fault */
1042
1043 /* now we have a real cpu fault */
eddf68a6
JM
1044 tb = tb_find_pc(pc);
1045 if (tb) {
1046 /* the PC is inside the translated code. It means that we have
1047 a virtual CPU fault */
1048 cpu_restore_state(tb, env, pc, puc);
1049 }
1050#if 0
5fafdf24 1051 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
eddf68a6
JM
1052 env->nip, env->error_code, tb);
1053#endif
1054 /* we restore the process signal mask as the sigreturn should
1055 do it (XXX: use sigsetjmp) */
1056 sigprocmask(SIG_SETMASK, old_set, NULL);
1057 cpu_loop_exit();
1058 /* never comes here */
1059 return 1;
1060}
1061
1062#elif defined (TARGET_ALPHA)
1063static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1064 int is_write, sigset_t *old_set,
1065 void *puc)
1066{
1067 TranslationBlock *tb;
1068 int ret;
3b46e624 1069
eddf68a6
JM
1070 if (cpu_single_env)
1071 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1072#if defined(DEBUG_SIGNAL)
5fafdf24 1073 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
eddf68a6
JM
1074 pc, address, is_write, *(unsigned long *)old_set);
1075#endif
1076 /* XXX: locking issue */
1077 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1078 return 1;
1079 }
1080
1081 /* see if it is an MMU fault */
6ebbf390 1082 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
eddf68a6
JM
1083 if (ret < 0)
1084 return 0; /* not an MMU fault */
1085 if (ret == 0)
1086 return 1; /* the MMU fault was handled without causing real CPU fault */
1087
1088 /* now we have a real cpu fault */
fdf9b3e8
FB
1089 tb = tb_find_pc(pc);
1090 if (tb) {
1091 /* the PC is inside the translated code. It means that we have
1092 a virtual CPU fault */
1093 cpu_restore_state(tb, env, pc, puc);
1094 }
fdf9b3e8 1095#if 0
5fafdf24 1096 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
fdf9b3e8
FB
1097 env->nip, env->error_code, tb);
1098#endif
1099 /* we restore the process signal mask as the sigreturn should
1100 do it (XXX: use sigsetjmp) */
355fb23d
PB
1101 sigprocmask(SIG_SETMASK, old_set, NULL);
1102 cpu_loop_exit();
fdf9b3e8
FB
1103 /* never comes here */
1104 return 1;
1105}
f1ccf904
TS
1106#elif defined (TARGET_CRIS)
1107static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1108 int is_write, sigset_t *old_set,
1109 void *puc)
1110{
1111 TranslationBlock *tb;
1112 int ret;
1113
1114 if (cpu_single_env)
1115 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1116#if defined(DEBUG_SIGNAL)
1117 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1118 pc, address, is_write, *(unsigned long *)old_set);
1119#endif
1120 /* XXX: locking issue */
1121 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1122 return 1;
1123 }
1124
1125 /* see if it is an MMU fault */
6ebbf390 1126 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
f1ccf904
TS
1127 if (ret < 0)
1128 return 0; /* not an MMU fault */
1129 if (ret == 0)
1130 return 1; /* the MMU fault was handled without causing real CPU fault */
1131
1132 /* now we have a real cpu fault */
1133 tb = tb_find_pc(pc);
1134 if (tb) {
1135 /* the PC is inside the translated code. It means that we have
1136 a virtual CPU fault */
1137 cpu_restore_state(tb, env, pc, puc);
1138 }
f1ccf904
TS
1139 /* we restore the process signal mask as the sigreturn should
1140 do it (XXX: use sigsetjmp) */
1141 sigprocmask(SIG_SETMASK, old_set, NULL);
1142 cpu_loop_exit();
1143 /* never comes here */
1144 return 1;
1145}
1146
e4533c7a
FB
1147#else
1148#error unsupported target CPU
1149#endif
9de5e440 1150
2b413144
FB
1151#if defined(__i386__)
1152
d8ecc0b9
FB
1153#if defined(__APPLE__)
1154# include <sys/ucontext.h>
1155
1156# define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1157# define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1158# define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1159#else
1160# define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1161# define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1162# define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1163#endif
1164
5fafdf24 1165int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1166 void *puc)
9de5e440 1167{
5a7b542b 1168 siginfo_t *info = pinfo;
9de5e440
FB
1169 struct ucontext *uc = puc;
1170 unsigned long pc;
bf3e8bf1 1171 int trapno;
97eb5b14 1172
d691f669
FB
1173#ifndef REG_EIP
1174/* for glibc 2.1 */
fd6ce8f6
FB
1175#define REG_EIP EIP
1176#define REG_ERR ERR
1177#define REG_TRAPNO TRAPNO
d691f669 1178#endif
d8ecc0b9
FB
1179 pc = EIP_sig(uc);
1180 trapno = TRAP_sig(uc);
ec6338ba
FB
1181 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1182 trapno == 0xe ?
1183 (ERROR_sig(uc) >> 1) & 1 : 0,
1184 &uc->uc_sigmask, puc);
2b413144
FB
1185}
1186
bc51c5c9
FB
1187#elif defined(__x86_64__)
1188
b3efe5c8
BS
1189#ifdef __NetBSD__
1190#define REG_ERR _REG_ERR
1191#define REG_TRAPNO _REG_TRAPNO
1192
1193#define QEMU_UC_MCONTEXT_GREGS(uc, reg) (uc)->uc_mcontext.__gregs[(reg)]
1194#define QEMU_UC_MACHINE_PC(uc) _UC_MACHINE_PC(uc)
1195#else
1196#define QEMU_UC_MCONTEXT_GREGS(uc, reg) (uc)->uc_mcontext.gregs[(reg)]
1197#define QEMU_UC_MACHINE_PC(uc) QEMU_UC_MCONTEXT_GREGS(uc, REG_RIP)
1198#endif
1199
5a7b542b 1200int cpu_signal_handler(int host_signum, void *pinfo,
bc51c5c9
FB
1201 void *puc)
1202{
5a7b542b 1203 siginfo_t *info = pinfo;
bc51c5c9 1204 unsigned long pc;
b3efe5c8
BS
1205#ifdef __NetBSD__
1206 ucontext_t *uc = puc;
1207#else
1208 struct ucontext *uc = puc;
1209#endif
bc51c5c9 1210
b3efe5c8 1211 pc = QEMU_UC_MACHINE_PC(uc);
5fafdf24 1212 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
b3efe5c8
BS
1213 QEMU_UC_MCONTEXT_GREGS(uc, REG_TRAPNO) == 0xe ?
1214 (QEMU_UC_MCONTEXT_GREGS(uc, REG_ERR) >> 1) & 1 : 0,
bc51c5c9
FB
1215 &uc->uc_sigmask, puc);
1216}
1217
e58ffeb3 1218#elif defined(_ARCH_PPC)
2b413144 1219
83fb7adf
FB
1220/***********************************************************************
1221 * signal context platform-specific definitions
1222 * From Wine
1223 */
1224#ifdef linux
1225/* All Registers access - only for local access */
1226# define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1227/* Gpr Registers access */
1228# define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1229# define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1230# define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1231# define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1232# define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1233# define LR_sig(context) REG_sig(link, context) /* Link register */
1234# define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1235/* Float Registers access */
1236# define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1237# define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1238/* Exception Registers access */
1239# define DAR_sig(context) REG_sig(dar, context)
1240# define DSISR_sig(context) REG_sig(dsisr, context)
1241# define TRAP_sig(context) REG_sig(trap, context)
1242#endif /* linux */
1243
1244#ifdef __APPLE__
1245# include <sys/ucontext.h>
1246typedef struct ucontext SIGCONTEXT;
1247/* All Registers access - only for local access */
1248# define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1249# define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1250# define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1251# define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1252/* Gpr Registers access */
1253# define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1254# define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1255# define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1256# define CTR_sig(context) REG_sig(ctr, context)
1257# define XER_sig(context) REG_sig(xer, context) /* Link register */
1258# define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1259# define CR_sig(context) REG_sig(cr, context) /* Condition register */
1260/* Float Registers access */
1261# define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1262# define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1263/* Exception Registers access */
1264# define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1265# define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1266# define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1267#endif /* __APPLE__ */
1268
5fafdf24 1269int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1270 void *puc)
2b413144 1271{
5a7b542b 1272 siginfo_t *info = pinfo;
25eb4484 1273 struct ucontext *uc = puc;
25eb4484 1274 unsigned long pc;
25eb4484
FB
1275 int is_write;
1276
83fb7adf 1277 pc = IAR_sig(uc);
25eb4484
FB
1278 is_write = 0;
1279#if 0
1280 /* ppc 4xx case */
83fb7adf 1281 if (DSISR_sig(uc) & 0x00800000)
25eb4484
FB
1282 is_write = 1;
1283#else
83fb7adf 1284 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
25eb4484
FB
1285 is_write = 1;
1286#endif
5fafdf24 1287 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
bf3e8bf1 1288 is_write, &uc->uc_sigmask, puc);
2b413144
FB
1289}
1290
2f87c607
FB
1291#elif defined(__alpha__)
1292
5fafdf24 1293int cpu_signal_handler(int host_signum, void *pinfo,
2f87c607
FB
1294 void *puc)
1295{
5a7b542b 1296 siginfo_t *info = pinfo;
2f87c607
FB
1297 struct ucontext *uc = puc;
1298 uint32_t *pc = uc->uc_mcontext.sc_pc;
1299 uint32_t insn = *pc;
1300 int is_write = 0;
1301
8c6939c0 1302 /* XXX: need kernel patch to get write flag faster */
2f87c607
FB
1303 switch (insn >> 26) {
1304 case 0x0d: // stw
1305 case 0x0e: // stb
1306 case 0x0f: // stq_u
1307 case 0x24: // stf
1308 case 0x25: // stg
1309 case 0x26: // sts
1310 case 0x27: // stt
1311 case 0x2c: // stl
1312 case 0x2d: // stq
1313 case 0x2e: // stl_c
1314 case 0x2f: // stq_c
1315 is_write = 1;
1316 }
1317
5fafdf24 1318 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
bf3e8bf1 1319 is_write, &uc->uc_sigmask, puc);
2f87c607 1320}
8c6939c0
FB
1321#elif defined(__sparc__)
1322
5fafdf24 1323int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1324 void *puc)
8c6939c0 1325{
5a7b542b 1326 siginfo_t *info = pinfo;
8c6939c0
FB
1327 int is_write;
1328 uint32_t insn;
6b4c11cd 1329#if !defined(__arch64__) || defined(HOST_SOLARIS)
c9e1e2b0
BS
1330 uint32_t *regs = (uint32_t *)(info + 1);
1331 void *sigmask = (regs + 20);
8c6939c0 1332 /* XXX: is there a standard glibc define ? */
c9e1e2b0
BS
1333 unsigned long pc = regs[1];
1334#else
84778508 1335#ifdef __linux__
c9e1e2b0
BS
1336 struct sigcontext *sc = puc;
1337 unsigned long pc = sc->sigc_regs.tpc;
1338 void *sigmask = (void *)sc->sigc_mask;
84778508
BS
1339#elif defined(__OpenBSD__)
1340 struct sigcontext *uc = puc;
1341 unsigned long pc = uc->sc_pc;
1342 void *sigmask = (void *)(long)uc->sc_mask;
1343#endif
c9e1e2b0
BS
1344#endif
1345
8c6939c0
FB
1346 /* XXX: need kernel patch to get write flag faster */
1347 is_write = 0;
1348 insn = *(uint32_t *)pc;
1349 if ((insn >> 30) == 3) {
1350 switch((insn >> 19) & 0x3f) {
1351 case 0x05: // stb
1352 case 0x06: // sth
1353 case 0x04: // st
1354 case 0x07: // std
1355 case 0x24: // stf
1356 case 0x27: // stdf
1357 case 0x25: // stfsr
1358 is_write = 1;
1359 break;
1360 }
1361 }
5fafdf24 1362 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
bf3e8bf1 1363 is_write, sigmask, NULL);
8c6939c0
FB
1364}
1365
1366#elif defined(__arm__)
1367
5fafdf24 1368int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1369 void *puc)
8c6939c0 1370{
5a7b542b 1371 siginfo_t *info = pinfo;
8c6939c0
FB
1372 struct ucontext *uc = puc;
1373 unsigned long pc;
1374 int is_write;
3b46e624 1375
48bbf11b 1376#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
5c49b363
AZ
1377 pc = uc->uc_mcontext.gregs[R15];
1378#else
4eee57f5 1379 pc = uc->uc_mcontext.arm_pc;
5c49b363 1380#endif
8c6939c0
FB
1381 /* XXX: compute is_write */
1382 is_write = 0;
5fafdf24 1383 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
8c6939c0 1384 is_write,
f3a9676a 1385 &uc->uc_sigmask, puc);
8c6939c0
FB
1386}
1387
38e584a0
FB
1388#elif defined(__mc68000)
1389
5fafdf24 1390int cpu_signal_handler(int host_signum, void *pinfo,
38e584a0
FB
1391 void *puc)
1392{
5a7b542b 1393 siginfo_t *info = pinfo;
38e584a0
FB
1394 struct ucontext *uc = puc;
1395 unsigned long pc;
1396 int is_write;
3b46e624 1397
38e584a0
FB
1398 pc = uc->uc_mcontext.gregs[16];
1399 /* XXX: compute is_write */
1400 is_write = 0;
5fafdf24 1401 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
38e584a0 1402 is_write,
bf3e8bf1 1403 &uc->uc_sigmask, puc);
38e584a0
FB
1404}
1405
b8076a74
FB
1406#elif defined(__ia64)
1407
1408#ifndef __ISR_VALID
1409 /* This ought to be in <bits/siginfo.h>... */
1410# define __ISR_VALID 1
b8076a74
FB
1411#endif
1412
5a7b542b 1413int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
b8076a74 1414{
5a7b542b 1415 siginfo_t *info = pinfo;
b8076a74
FB
1416 struct ucontext *uc = puc;
1417 unsigned long ip;
1418 int is_write = 0;
1419
1420 ip = uc->uc_mcontext.sc_ip;
1421 switch (host_signum) {
1422 case SIGILL:
1423 case SIGFPE:
1424 case SIGSEGV:
1425 case SIGBUS:
1426 case SIGTRAP:
fd4a43e4 1427 if (info->si_code && (info->si_segvflags & __ISR_VALID))
b8076a74
FB
1428 /* ISR.W (write-access) is bit 33: */
1429 is_write = (info->si_isr >> 33) & 1;
1430 break;
1431
1432 default:
1433 break;
1434 }
1435 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1436 is_write,
1437 &uc->uc_sigmask, puc);
1438}
1439
90cb9493
FB
1440#elif defined(__s390__)
1441
5fafdf24 1442int cpu_signal_handler(int host_signum, void *pinfo,
90cb9493
FB
1443 void *puc)
1444{
5a7b542b 1445 siginfo_t *info = pinfo;
90cb9493
FB
1446 struct ucontext *uc = puc;
1447 unsigned long pc;
1448 int is_write;
3b46e624 1449
90cb9493
FB
1450 pc = uc->uc_mcontext.psw.addr;
1451 /* XXX: compute is_write */
1452 is_write = 0;
5fafdf24 1453 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
c4b89d18
TS
1454 is_write, &uc->uc_sigmask, puc);
1455}
1456
1457#elif defined(__mips__)
1458
5fafdf24 1459int cpu_signal_handler(int host_signum, void *pinfo,
c4b89d18
TS
1460 void *puc)
1461{
9617efe8 1462 siginfo_t *info = pinfo;
c4b89d18
TS
1463 struct ucontext *uc = puc;
1464 greg_t pc = uc->uc_mcontext.pc;
1465 int is_write;
3b46e624 1466
c4b89d18
TS
1467 /* XXX: compute is_write */
1468 is_write = 0;
5fafdf24 1469 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
c4b89d18 1470 is_write, &uc->uc_sigmask, puc);
90cb9493
FB
1471}
1472
f54b3f92
AJ
1473#elif defined(__hppa__)
1474
1475int cpu_signal_handler(int host_signum, void *pinfo,
1476 void *puc)
1477{
1478 struct siginfo *info = pinfo;
1479 struct ucontext *uc = puc;
1480 unsigned long pc;
1481 int is_write;
1482
1483 pc = uc->uc_mcontext.sc_iaoq[0];
1484 /* FIXME: compute is_write */
1485 is_write = 0;
1486 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1487 is_write,
1488 &uc->uc_sigmask, puc);
1489}
1490
9de5e440 1491#else
2b413144 1492
3fb2ded1 1493#error host CPU specific signal handler needed
2b413144 1494
9de5e440 1495#endif
67b915a5
FB
1496
1497#endif /* !defined(CONFIG_SOFTMMU) */