]> git.proxmox.com Git - qemu.git/blame - cpu-exec.c
exploiting the new interface in vnc.c (Stefano Stabellini)
[qemu.git] / cpu-exec.c
CommitLineData
7d13299d
FB
1/*
2 * i386 emulator main execution loop
5fafdf24 3 *
66321a11 4 * Copyright (c) 2003-2005 Fabrice Bellard
7d13299d 5 *
3ef693a0
FB
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
7d13299d 10 *
3ef693a0
FB
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
7d13299d 15 *
3ef693a0
FB
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
fad6cb1a 18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
7d13299d 19 */
e4533c7a 20#include "config.h"
7cb69cae 21#define CPU_NO_GLOBAL_REGS
93ac68bc 22#include "exec.h"
956034d7 23#include "disas.h"
7cb69cae 24#include "tcg.h"
7ba1e619 25#include "kvm.h"
7d13299d 26
fbf9eeb3
FB
27#if !defined(CONFIG_SOFTMMU)
28#undef EAX
29#undef ECX
30#undef EDX
31#undef EBX
32#undef ESP
33#undef EBP
34#undef ESI
35#undef EDI
36#undef EIP
37#include <signal.h>
84778508 38#ifdef __linux__
fbf9eeb3
FB
39#include <sys/ucontext.h>
40#endif
84778508 41#endif
fbf9eeb3 42
572a9d4a
BS
43#if defined(__sparc__) && !defined(HOST_SOLARIS)
44// Work around ugly bugs in glibc that mangle global register contents
45#undef env
46#define env cpu_single_env
47#endif
48
36bdbe54
FB
49int tb_invalidated_flag;
50
dc99065b 51//#define DEBUG_EXEC
9de5e440 52//#define DEBUG_SIGNAL
7d13299d 53
e4533c7a
FB
54void cpu_loop_exit(void)
55{
bfed01fc
TS
56 /* NOTE: the register at this point must be saved by hand because
57 longjmp restore them */
58 regs_to_env();
e4533c7a
FB
59 longjmp(env->jmp_env, 1);
60}
bfed01fc 61
fbf9eeb3
FB
62/* exit the current TB from a signal handler. The host registers are
63 restored in a state compatible with the CPU emulator
64 */
5fafdf24 65void cpu_resume_from_signal(CPUState *env1, void *puc)
fbf9eeb3
FB
66{
67#if !defined(CONFIG_SOFTMMU)
84778508 68#ifdef __linux__
fbf9eeb3 69 struct ucontext *uc = puc;
84778508
BS
70#elif defined(__OpenBSD__)
71 struct sigcontext *uc = puc;
72#endif
fbf9eeb3
FB
73#endif
74
75 env = env1;
76
77 /* XXX: restore cpu registers saved in host registers */
78
79#if !defined(CONFIG_SOFTMMU)
80 if (puc) {
81 /* XXX: use siglongjmp ? */
84778508 82#ifdef __linux__
fbf9eeb3 83 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
84778508
BS
84#elif defined(__OpenBSD__)
85 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
86#endif
fbf9eeb3
FB
87 }
88#endif
9a3ea654 89 env->exception_index = -1;
fbf9eeb3
FB
90 longjmp(env->jmp_env, 1);
91}
92
2e70f6ef
PB
93/* Execute the code without caching the generated code. An interpreter
94 could be used if available. */
95static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
96{
97 unsigned long next_tb;
98 TranslationBlock *tb;
99
100 /* Should never happen.
101 We only end up here when an existing TB is too long. */
102 if (max_cycles > CF_COUNT_MASK)
103 max_cycles = CF_COUNT_MASK;
104
105 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
106 max_cycles);
107 env->current_tb = tb;
108 /* execute the generated code */
109 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
110
111 if ((next_tb & 3) == 2) {
112 /* Restore PC. This may happen if async event occurs before
113 the TB starts executing. */
622ed360 114 cpu_pc_from_tb(env, tb);
2e70f6ef
PB
115 }
116 tb_phys_invalidate(tb, -1);
117 tb_free(tb);
118}
119
8a40a180
FB
120static TranslationBlock *tb_find_slow(target_ulong pc,
121 target_ulong cs_base,
c068688b 122 uint64_t flags)
8a40a180
FB
123{
124 TranslationBlock *tb, **ptb1;
8a40a180
FB
125 unsigned int h;
126 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
3b46e624 127
8a40a180 128 tb_invalidated_flag = 0;
3b46e624 129
8a40a180 130 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
3b46e624 131
8a40a180
FB
132 /* find translated block using physical mappings */
133 phys_pc = get_phys_addr_code(env, pc);
134 phys_page1 = phys_pc & TARGET_PAGE_MASK;
135 phys_page2 = -1;
136 h = tb_phys_hash_func(phys_pc);
137 ptb1 = &tb_phys_hash[h];
138 for(;;) {
139 tb = *ptb1;
140 if (!tb)
141 goto not_found;
5fafdf24 142 if (tb->pc == pc &&
8a40a180 143 tb->page_addr[0] == phys_page1 &&
5fafdf24 144 tb->cs_base == cs_base &&
8a40a180
FB
145 tb->flags == flags) {
146 /* check next page if needed */
147 if (tb->page_addr[1] != -1) {
5fafdf24 148 virt_page2 = (pc & TARGET_PAGE_MASK) +
8a40a180
FB
149 TARGET_PAGE_SIZE;
150 phys_page2 = get_phys_addr_code(env, virt_page2);
151 if (tb->page_addr[1] == phys_page2)
152 goto found;
153 } else {
154 goto found;
155 }
156 }
157 ptb1 = &tb->phys_hash_next;
158 }
159 not_found:
2e70f6ef
PB
160 /* if no translated code available, then translate it now */
161 tb = tb_gen_code(env, pc, cs_base, flags, 0);
3b46e624 162
8a40a180 163 found:
8a40a180
FB
164 /* we add the TB in the virtual pc hash table */
165 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
8a40a180
FB
166 return tb;
167}
168
169static inline TranslationBlock *tb_find_fast(void)
170{
171 TranslationBlock *tb;
172 target_ulong cs_base, pc;
6b917547 173 int flags;
8a40a180
FB
174
175 /* we record a subset of the CPU state. It will
176 always be the same before a given translated block
177 is executed. */
6b917547 178 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
bce61846 179 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
551bd27f
TS
180 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
181 tb->flags != flags)) {
8a40a180
FB
182 tb = tb_find_slow(pc, cs_base, flags);
183 }
184 return tb;
185}
186
dde2367e
AL
187static CPUDebugExcpHandler *debug_excp_handler;
188
189CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
190{
191 CPUDebugExcpHandler *old_handler = debug_excp_handler;
192
193 debug_excp_handler = handler;
194 return old_handler;
195}
196
6e140f28
AL
197static void cpu_handle_debug_exception(CPUState *env)
198{
199 CPUWatchpoint *wp;
200
201 if (!env->watchpoint_hit)
c0ce998e 202 TAILQ_FOREACH(wp, &env->watchpoints, entry)
6e140f28 203 wp->flags &= ~BP_WATCHPOINT_HIT;
dde2367e
AL
204
205 if (debug_excp_handler)
206 debug_excp_handler(env);
6e140f28
AL
207}
208
7d13299d
FB
209/* main execution loop */
210
e4533c7a 211int cpu_exec(CPUState *env1)
7d13299d 212{
1057eaa7
PB
213#define DECLARE_HOST_REGS 1
214#include "hostregs_helper.h"
8a40a180 215 int ret, interrupt_request;
8a40a180 216 TranslationBlock *tb;
c27004ec 217 uint8_t *tc_ptr;
d5975363 218 unsigned long next_tb;
8c6939c0 219
bfed01fc
TS
220 if (cpu_halted(env1) == EXCP_HALTED)
221 return EXCP_HALTED;
5a1e3cfc 222
5fafdf24 223 cpu_single_env = env1;
6a00d601 224
7d13299d 225 /* first we save global registers */
1057eaa7
PB
226#define SAVE_HOST_REGS 1
227#include "hostregs_helper.h"
c27004ec 228 env = env1;
e4533c7a 229
0d1a29f9 230 env_to_regs();
ecb644f4 231#if defined(TARGET_I386)
9de5e440 232 /* put eflags in CPU temporary format */
fc2b4c48
FB
233 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
234 DF = 1 - (2 * ((env->eflags >> 10) & 1));
9de5e440 235 CC_OP = CC_OP_EFLAGS;
fc2b4c48 236 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
93ac68bc 237#elif defined(TARGET_SPARC)
e6e5906b
PB
238#elif defined(TARGET_M68K)
239 env->cc_op = CC_OP_FLAGS;
240 env->cc_dest = env->sr & 0xf;
241 env->cc_x = (env->sr >> 4) & 1;
ecb644f4
TS
242#elif defined(TARGET_ALPHA)
243#elif defined(TARGET_ARM)
244#elif defined(TARGET_PPC)
6af0bf9c 245#elif defined(TARGET_MIPS)
fdf9b3e8 246#elif defined(TARGET_SH4)
f1ccf904 247#elif defined(TARGET_CRIS)
fdf9b3e8 248 /* XXXXX */
e4533c7a
FB
249#else
250#error unsupported target CPU
251#endif
3fb2ded1 252 env->exception_index = -1;
9d27abd9 253
7d13299d 254 /* prepare setjmp context for exception handling */
3fb2ded1
FB
255 for(;;) {
256 if (setjmp(env->jmp_env) == 0) {
ee8b7021 257 env->current_tb = NULL;
3fb2ded1
FB
258 /* if an exception is pending, we execute it here */
259 if (env->exception_index >= 0) {
260 if (env->exception_index >= EXCP_INTERRUPT) {
261 /* exit request from the cpu execution loop */
262 ret = env->exception_index;
6e140f28
AL
263 if (ret == EXCP_DEBUG)
264 cpu_handle_debug_exception(env);
3fb2ded1 265 break;
72d239ed
AJ
266 } else {
267#if defined(CONFIG_USER_ONLY)
3fb2ded1 268 /* if user mode only, we simulate a fake exception
9f083493 269 which will be handled outside the cpu execution
3fb2ded1 270 loop */
83479e77 271#if defined(TARGET_I386)
5fafdf24
TS
272 do_interrupt_user(env->exception_index,
273 env->exception_is_int,
274 env->error_code,
3fb2ded1 275 env->exception_next_eip);
eba01623
FB
276 /* successfully delivered */
277 env->old_exception = -1;
83479e77 278#endif
3fb2ded1
FB
279 ret = env->exception_index;
280 break;
72d239ed 281#else
83479e77 282#if defined(TARGET_I386)
3fb2ded1
FB
283 /* simulate a real cpu exception. On i386, it can
284 trigger new exceptions, but we do not handle
285 double or triple faults yet. */
5fafdf24
TS
286 do_interrupt(env->exception_index,
287 env->exception_is_int,
288 env->error_code,
d05e66d2 289 env->exception_next_eip, 0);
678dde13
TS
290 /* successfully delivered */
291 env->old_exception = -1;
ce09776b
FB
292#elif defined(TARGET_PPC)
293 do_interrupt(env);
6af0bf9c
FB
294#elif defined(TARGET_MIPS)
295 do_interrupt(env);
e95c8d51 296#elif defined(TARGET_SPARC)
f2bc7e7f 297 do_interrupt(env);
b5ff1b31
FB
298#elif defined(TARGET_ARM)
299 do_interrupt(env);
fdf9b3e8
FB
300#elif defined(TARGET_SH4)
301 do_interrupt(env);
eddf68a6
JM
302#elif defined(TARGET_ALPHA)
303 do_interrupt(env);
f1ccf904
TS
304#elif defined(TARGET_CRIS)
305 do_interrupt(env);
0633879f
PB
306#elif defined(TARGET_M68K)
307 do_interrupt(0);
72d239ed 308#endif
83479e77 309#endif
3fb2ded1
FB
310 }
311 env->exception_index = -1;
5fafdf24 312 }
9df217a3
FB
313#ifdef USE_KQEMU
314 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
315 int ret;
a7812ae4 316 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
9df217a3
FB
317 ret = kqemu_cpu_exec(env);
318 /* put eflags in CPU temporary format */
319 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
320 DF = 1 - (2 * ((env->eflags >> 10) & 1));
321 CC_OP = CC_OP_EFLAGS;
322 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
323 if (ret == 1) {
324 /* exception */
325 longjmp(env->jmp_env, 1);
326 } else if (ret == 2) {
327 /* softmmu execution needed */
328 } else {
329 if (env->interrupt_request != 0) {
330 /* hardware interrupt will be executed just after */
331 } else {
332 /* otherwise, we restart */
333 longjmp(env->jmp_env, 1);
334 }
335 }
3fb2ded1 336 }
9df217a3
FB
337#endif
338
7ba1e619 339 if (kvm_enabled()) {
becfc390
AL
340 kvm_cpu_exec(env);
341 longjmp(env->jmp_env, 1);
7ba1e619
AL
342 }
343
b5fc09ae 344 next_tb = 0; /* force lookup of first TB */
3fb2ded1 345 for(;;) {
68a79315 346 interrupt_request = env->interrupt_request;
e1638bd8 347 if (unlikely(interrupt_request)) {
348 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
349 /* Mask out external interrupts for this step. */
350 interrupt_request &= ~(CPU_INTERRUPT_HARD |
351 CPU_INTERRUPT_FIQ |
352 CPU_INTERRUPT_SMI |
353 CPU_INTERRUPT_NMI);
354 }
6658ffb8
PB
355 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
356 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
357 env->exception_index = EXCP_DEBUG;
358 cpu_loop_exit();
359 }
a90b7318 360#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
f1ccf904 361 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
a90b7318
AZ
362 if (interrupt_request & CPU_INTERRUPT_HALT) {
363 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
364 env->halted = 1;
365 env->exception_index = EXCP_HLT;
366 cpu_loop_exit();
367 }
368#endif
68a79315 369#if defined(TARGET_I386)
db620f46
FB
370 if (env->hflags2 & HF2_GIF_MASK) {
371 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
372 !(env->hflags & HF_SMM_MASK)) {
373 svm_check_intercept(SVM_EXIT_SMI);
374 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
375 do_smm_enter();
376 next_tb = 0;
377 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
378 !(env->hflags2 & HF2_NMI_MASK)) {
379 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
380 env->hflags2 |= HF2_NMI_MASK;
381 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
382 next_tb = 0;
383 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
384 (((env->hflags2 & HF2_VINTR_MASK) &&
385 (env->hflags2 & HF2_HIF_MASK)) ||
386 (!(env->hflags2 & HF2_VINTR_MASK) &&
387 (env->eflags & IF_MASK &&
388 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
389 int intno;
390 svm_check_intercept(SVM_EXIT_INTR);
391 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
392 intno = cpu_get_pic_interrupt(env);
393 if (loglevel & CPU_LOG_TB_IN_ASM) {
394 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
395 }
396 do_interrupt(intno, 0, 0, 0, 1);
397 /* ensure that no TB jump will be modified as
398 the program flow was changed */
399 next_tb = 0;
0573fbfc 400#if !defined(CONFIG_USER_ONLY)
db620f46
FB
401 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
402 (env->eflags & IF_MASK) &&
403 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
404 int intno;
405 /* FIXME: this should respect TPR */
406 svm_check_intercept(SVM_EXIT_VINTR);
db620f46
FB
407 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
408 if (loglevel & CPU_LOG_TB_IN_ASM)
409 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
410 do_interrupt(intno, 0, 0, 0, 1);
d40c54d6 411 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
db620f46 412 next_tb = 0;
907a5b26 413#endif
db620f46 414 }
68a79315 415 }
ce09776b 416#elif defined(TARGET_PPC)
9fddaa0c
FB
417#if 0
418 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
419 cpu_ppc_reset(env);
420 }
421#endif
47103572 422 if (interrupt_request & CPU_INTERRUPT_HARD) {
e9df014c
JM
423 ppc_hw_interrupt(env);
424 if (env->pending_interrupts == 0)
425 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
b5fc09ae 426 next_tb = 0;
ce09776b 427 }
6af0bf9c
FB
428#elif defined(TARGET_MIPS)
429 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
24c7b0e3 430 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
6af0bf9c 431 (env->CP0_Status & (1 << CP0St_IE)) &&
24c7b0e3
TS
432 !(env->CP0_Status & (1 << CP0St_EXL)) &&
433 !(env->CP0_Status & (1 << CP0St_ERL)) &&
6af0bf9c
FB
434 !(env->hflags & MIPS_HFLAG_DM)) {
435 /* Raise it */
436 env->exception_index = EXCP_EXT_INTERRUPT;
437 env->error_code = 0;
438 do_interrupt(env);
b5fc09ae 439 next_tb = 0;
6af0bf9c 440 }
e95c8d51 441#elif defined(TARGET_SPARC)
66321a11
FB
442 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
443 (env->psret != 0)) {
444 int pil = env->interrupt_index & 15;
445 int type = env->interrupt_index & 0xf0;
446
447 if (((type == TT_EXTINT) &&
448 (pil == 15 || pil > env->psrpil)) ||
449 type != TT_EXTINT) {
450 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
f2bc7e7f
BS
451 env->exception_index = env->interrupt_index;
452 do_interrupt(env);
66321a11 453 env->interrupt_index = 0;
327ac2e7
BS
454#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
455 cpu_check_irqs(env);
456#endif
b5fc09ae 457 next_tb = 0;
66321a11 458 }
e95c8d51
FB
459 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
460 //do_interrupt(0, 0, 0, 0, 0);
461 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
a90b7318 462 }
b5ff1b31
FB
463#elif defined(TARGET_ARM)
464 if (interrupt_request & CPU_INTERRUPT_FIQ
465 && !(env->uncached_cpsr & CPSR_F)) {
466 env->exception_index = EXCP_FIQ;
467 do_interrupt(env);
b5fc09ae 468 next_tb = 0;
b5ff1b31 469 }
9ee6e8bb
PB
470 /* ARMv7-M interrupt return works by loading a magic value
471 into the PC. On real hardware the load causes the
472 return to occur. The qemu implementation performs the
473 jump normally, then does the exception return when the
474 CPU tries to execute code at the magic address.
475 This will cause the magic PC value to be pushed to
476 the stack if an interrupt occured at the wrong time.
477 We avoid this by disabling interrupts when
478 pc contains a magic address. */
b5ff1b31 479 if (interrupt_request & CPU_INTERRUPT_HARD
9ee6e8bb
PB
480 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
481 || !(env->uncached_cpsr & CPSR_I))) {
b5ff1b31
FB
482 env->exception_index = EXCP_IRQ;
483 do_interrupt(env);
b5fc09ae 484 next_tb = 0;
b5ff1b31 485 }
fdf9b3e8 486#elif defined(TARGET_SH4)
e96e2044
TS
487 if (interrupt_request & CPU_INTERRUPT_HARD) {
488 do_interrupt(env);
b5fc09ae 489 next_tb = 0;
e96e2044 490 }
eddf68a6
JM
491#elif defined(TARGET_ALPHA)
492 if (interrupt_request & CPU_INTERRUPT_HARD) {
493 do_interrupt(env);
b5fc09ae 494 next_tb = 0;
eddf68a6 495 }
f1ccf904 496#elif defined(TARGET_CRIS)
1b1a38b0
EI
497 if (interrupt_request & CPU_INTERRUPT_HARD
498 && (env->pregs[PR_CCS] & I_FLAG)) {
499 env->exception_index = EXCP_IRQ;
500 do_interrupt(env);
501 next_tb = 0;
502 }
503 if (interrupt_request & CPU_INTERRUPT_NMI
504 && (env->pregs[PR_CCS] & M_FLAG)) {
505 env->exception_index = EXCP_NMI;
f1ccf904 506 do_interrupt(env);
b5fc09ae 507 next_tb = 0;
f1ccf904 508 }
0633879f
PB
509#elif defined(TARGET_M68K)
510 if (interrupt_request & CPU_INTERRUPT_HARD
511 && ((env->sr & SR_I) >> SR_I_SHIFT)
512 < env->pending_level) {
513 /* Real hardware gets the interrupt vector via an
514 IACK cycle at this point. Current emulated
515 hardware doesn't rely on this, so we
516 provide/save the vector when the interrupt is
517 first signalled. */
518 env->exception_index = env->pending_vector;
519 do_interrupt(1);
b5fc09ae 520 next_tb = 0;
0633879f 521 }
68a79315 522#endif
9d05095e
FB
523 /* Don't use the cached interupt_request value,
524 do_interrupt may have updated the EXITTB flag. */
b5ff1b31 525 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
bf3e8bf1
FB
526 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
527 /* ensure that no TB jump will be modified as
528 the program flow was changed */
b5fc09ae 529 next_tb = 0;
bf3e8bf1 530 }
68a79315
FB
531 if (interrupt_request & CPU_INTERRUPT_EXIT) {
532 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
533 env->exception_index = EXCP_INTERRUPT;
534 cpu_loop_exit();
535 }
3fb2ded1 536 }
7d13299d 537#ifdef DEBUG_EXEC
b5ff1b31 538 if ((loglevel & CPU_LOG_TB_CPU)) {
3fb2ded1 539 /* restore flags in standard format */
ecb644f4
TS
540 regs_to_env();
541#if defined(TARGET_I386)
a7812ae4 542 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
7fe48483 543 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
3fb2ded1 544 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
e4533c7a 545#elif defined(TARGET_ARM)
7fe48483 546 cpu_dump_state(env, logfile, fprintf, 0);
93ac68bc 547#elif defined(TARGET_SPARC)
3475187d 548 cpu_dump_state(env, logfile, fprintf, 0);
67867308 549#elif defined(TARGET_PPC)
7fe48483 550 cpu_dump_state(env, logfile, fprintf, 0);
e6e5906b
PB
551#elif defined(TARGET_M68K)
552 cpu_m68k_flush_flags(env, env->cc_op);
553 env->cc_op = CC_OP_FLAGS;
554 env->sr = (env->sr & 0xffe0)
555 | env->cc_dest | (env->cc_x << 4);
556 cpu_dump_state(env, logfile, fprintf, 0);
6af0bf9c
FB
557#elif defined(TARGET_MIPS)
558 cpu_dump_state(env, logfile, fprintf, 0);
fdf9b3e8
FB
559#elif defined(TARGET_SH4)
560 cpu_dump_state(env, logfile, fprintf, 0);
eddf68a6
JM
561#elif defined(TARGET_ALPHA)
562 cpu_dump_state(env, logfile, fprintf, 0);
f1ccf904
TS
563#elif defined(TARGET_CRIS)
564 cpu_dump_state(env, logfile, fprintf, 0);
e4533c7a 565#else
5fafdf24 566#error unsupported target CPU
e4533c7a 567#endif
3fb2ded1 568 }
7d13299d 569#endif
d5975363 570 spin_lock(&tb_lock);
8a40a180 571 tb = tb_find_fast();
d5975363
PB
572 /* Note: we do it here to avoid a gcc bug on Mac OS X when
573 doing it in tb_find_slow */
574 if (tb_invalidated_flag) {
575 /* as some TB could have been invalidated because
576 of memory exceptions while generating the code, we
577 must recompute the hash index here */
578 next_tb = 0;
2e70f6ef 579 tb_invalidated_flag = 0;
d5975363 580 }
9d27abd9 581#ifdef DEBUG_EXEC
c1135f61 582 if ((loglevel & CPU_LOG_EXEC)) {
c27004ec
FB
583 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
584 (long)tb->tc_ptr, tb->pc,
585 lookup_symbol(tb->pc));
3fb2ded1 586 }
9d27abd9 587#endif
8a40a180
FB
588 /* see if we can patch the calling TB. When the TB
589 spans two pages, we cannot safely do a direct
590 jump. */
c27004ec 591 {
b5fc09ae 592 if (next_tb != 0 &&
4d7a0880 593#ifdef USE_KQEMU
f32fc648
FB
594 (env->kqemu_enabled != 2) &&
595#endif
ec6338ba 596 tb->page_addr[1] == -1) {
b5fc09ae 597 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
3fb2ded1 598 }
c27004ec 599 }
d5975363 600 spin_unlock(&tb_lock);
83479e77 601 env->current_tb = tb;
55e8b85e 602
603 /* cpu_interrupt might be called while translating the
604 TB, but before it is linked into a potentially
605 infinite loop and becomes env->current_tb. Avoid
606 starting execution if there is a pending interrupt. */
607 if (unlikely (env->interrupt_request & CPU_INTERRUPT_EXIT))
608 env->current_tb = NULL;
609
2e70f6ef
PB
610 while (env->current_tb) {
611 tc_ptr = tb->tc_ptr;
3fb2ded1 612 /* execute the generated code */
572a9d4a
BS
613#if defined(__sparc__) && !defined(HOST_SOLARIS)
614#undef env
2e70f6ef 615 env = cpu_single_env;
572a9d4a
BS
616#define env cpu_single_env
617#endif
2e70f6ef
PB
618 next_tb = tcg_qemu_tb_exec(tc_ptr);
619 env->current_tb = NULL;
620 if ((next_tb & 3) == 2) {
bf20dc07 621 /* Instruction counter expired. */
2e70f6ef
PB
622 int insns_left;
623 tb = (TranslationBlock *)(long)(next_tb & ~3);
624 /* Restore PC. */
622ed360 625 cpu_pc_from_tb(env, tb);
2e70f6ef
PB
626 insns_left = env->icount_decr.u32;
627 if (env->icount_extra && insns_left >= 0) {
628 /* Refill decrementer and continue execution. */
629 env->icount_extra += insns_left;
630 if (env->icount_extra > 0xffff) {
631 insns_left = 0xffff;
632 } else {
633 insns_left = env->icount_extra;
634 }
635 env->icount_extra -= insns_left;
636 env->icount_decr.u16.low = insns_left;
637 } else {
638 if (insns_left > 0) {
639 /* Execute remaining instructions. */
640 cpu_exec_nocache(insns_left, tb);
641 }
642 env->exception_index = EXCP_INTERRUPT;
643 next_tb = 0;
644 cpu_loop_exit();
645 }
646 }
647 }
4cbf74b6
FB
648 /* reset soft MMU for next block (it can currently
649 only be set by a memory fault) */
f32fc648
FB
650#if defined(USE_KQEMU)
651#define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
652 if (kqemu_is_ok(env) &&
653 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
654 cpu_loop_exit();
655 }
4cbf74b6 656#endif
50a518e3 657 } /* for(;;) */
3fb2ded1 658 } else {
0d1a29f9 659 env_to_regs();
7d13299d 660 }
3fb2ded1
FB
661 } /* for(;;) */
662
7d13299d 663
e4533c7a 664#if defined(TARGET_I386)
9de5e440 665 /* restore flags in standard format */
a7812ae4 666 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
e4533c7a 667#elif defined(TARGET_ARM)
b7bcbe95 668 /* XXX: Save/restore host fpu exception state?. */
93ac68bc 669#elif defined(TARGET_SPARC)
67867308 670#elif defined(TARGET_PPC)
e6e5906b
PB
671#elif defined(TARGET_M68K)
672 cpu_m68k_flush_flags(env, env->cc_op);
673 env->cc_op = CC_OP_FLAGS;
674 env->sr = (env->sr & 0xffe0)
675 | env->cc_dest | (env->cc_x << 4);
6af0bf9c 676#elif defined(TARGET_MIPS)
fdf9b3e8 677#elif defined(TARGET_SH4)
eddf68a6 678#elif defined(TARGET_ALPHA)
f1ccf904 679#elif defined(TARGET_CRIS)
fdf9b3e8 680 /* XXXXX */
e4533c7a
FB
681#else
682#error unsupported target CPU
683#endif
1057eaa7
PB
684
685 /* restore global registers */
1057eaa7
PB
686#include "hostregs_helper.h"
687
6a00d601 688 /* fail safe : never use cpu_single_env outside cpu_exec() */
5fafdf24 689 cpu_single_env = NULL;
7d13299d
FB
690 return ret;
691}
6dbad63e 692
fbf9eeb3
FB
693/* must only be called from the generated code as an exception can be
694 generated */
695void tb_invalidate_page_range(target_ulong start, target_ulong end)
696{
dc5d0b3d
FB
697 /* XXX: cannot enable it yet because it yields to MMU exception
698 where NIP != read address on PowerPC */
699#if 0
fbf9eeb3
FB
700 target_ulong phys_addr;
701 phys_addr = get_phys_addr_code(env, start);
702 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
dc5d0b3d 703#endif
fbf9eeb3
FB
704}
705
1a18c71b 706#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
e4533c7a 707
6dbad63e
FB
708void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
709{
710 CPUX86State *saved_env;
711
712 saved_env = env;
713 env = s;
a412ac57 714 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
a513fe19 715 selector &= 0xffff;
5fafdf24 716 cpu_x86_load_seg_cache(env, seg_reg, selector,
c27004ec 717 (selector << 4), 0xffff, 0);
a513fe19 718 } else {
5d97559d 719 helper_load_seg(seg_reg, selector);
a513fe19 720 }
6dbad63e
FB
721 env = saved_env;
722}
9de5e440 723
6f12a2a6 724void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
d0a1ffc9
FB
725{
726 CPUX86State *saved_env;
727
728 saved_env = env;
729 env = s;
3b46e624 730
6f12a2a6 731 helper_fsave(ptr, data32);
d0a1ffc9
FB
732
733 env = saved_env;
734}
735
6f12a2a6 736void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
d0a1ffc9
FB
737{
738 CPUX86State *saved_env;
739
740 saved_env = env;
741 env = s;
3b46e624 742
6f12a2a6 743 helper_frstor(ptr, data32);
d0a1ffc9
FB
744
745 env = saved_env;
746}
747
e4533c7a
FB
748#endif /* TARGET_I386 */
749
67b915a5
FB
750#if !defined(CONFIG_SOFTMMU)
751
3fb2ded1
FB
752#if defined(TARGET_I386)
753
b56dad1c 754/* 'pc' is the host PC at which the exception was raised. 'address' is
fd6ce8f6
FB
755 the effective address of the memory exception. 'is_write' is 1 if a
756 write caused the exception and otherwise 0'. 'old_set' is the
757 signal set which should be restored */
2b413144 758static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
5fafdf24 759 int is_write, sigset_t *old_set,
bf3e8bf1 760 void *puc)
9de5e440 761{
a513fe19
FB
762 TranslationBlock *tb;
763 int ret;
68a79315 764
83479e77
FB
765 if (cpu_single_env)
766 env = cpu_single_env; /* XXX: find a correct solution for multithread */
fd6ce8f6 767#if defined(DEBUG_SIGNAL)
5fafdf24 768 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
bf3e8bf1 769 pc, address, is_write, *(unsigned long *)old_set);
9de5e440 770#endif
25eb4484 771 /* XXX: locking issue */
53a5960a 772 if (is_write && page_unprotect(h2g(address), pc, puc)) {
fd6ce8f6
FB
773 return 1;
774 }
fbf9eeb3 775
3fb2ded1 776 /* see if it is an MMU fault */
6ebbf390 777 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
3fb2ded1
FB
778 if (ret < 0)
779 return 0; /* not an MMU fault */
780 if (ret == 0)
781 return 1; /* the MMU fault was handled without causing real CPU fault */
782 /* now we have a real cpu fault */
a513fe19
FB
783 tb = tb_find_pc(pc);
784 if (tb) {
9de5e440
FB
785 /* the PC is inside the translated code. It means that we have
786 a virtual CPU fault */
bf3e8bf1 787 cpu_restore_state(tb, env, pc, puc);
3fb2ded1 788 }
4cbf74b6 789 if (ret == 1) {
3fb2ded1 790#if 0
5fafdf24 791 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
4cbf74b6 792 env->eip, env->cr[2], env->error_code);
3fb2ded1 793#endif
4cbf74b6
FB
794 /* we restore the process signal mask as the sigreturn should
795 do it (XXX: use sigsetjmp) */
796 sigprocmask(SIG_SETMASK, old_set, NULL);
54ca9095 797 raise_exception_err(env->exception_index, env->error_code);
4cbf74b6
FB
798 } else {
799 /* activate soft MMU for this block */
3f337316 800 env->hflags |= HF_SOFTMMU_MASK;
fbf9eeb3 801 cpu_resume_from_signal(env, puc);
4cbf74b6 802 }
3fb2ded1
FB
803 /* never comes here */
804 return 1;
805}
806
e4533c7a 807#elif defined(TARGET_ARM)
3fb2ded1 808static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
bf3e8bf1
FB
809 int is_write, sigset_t *old_set,
810 void *puc)
3fb2ded1 811{
68016c62
FB
812 TranslationBlock *tb;
813 int ret;
814
815 if (cpu_single_env)
816 env = cpu_single_env; /* XXX: find a correct solution for multithread */
817#if defined(DEBUG_SIGNAL)
5fafdf24 818 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
68016c62
FB
819 pc, address, is_write, *(unsigned long *)old_set);
820#endif
9f0777ed 821 /* XXX: locking issue */
53a5960a 822 if (is_write && page_unprotect(h2g(address), pc, puc)) {
9f0777ed
FB
823 return 1;
824 }
68016c62 825 /* see if it is an MMU fault */
6ebbf390 826 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
68016c62
FB
827 if (ret < 0)
828 return 0; /* not an MMU fault */
829 if (ret == 0)
830 return 1; /* the MMU fault was handled without causing real CPU fault */
831 /* now we have a real cpu fault */
832 tb = tb_find_pc(pc);
833 if (tb) {
834 /* the PC is inside the translated code. It means that we have
835 a virtual CPU fault */
836 cpu_restore_state(tb, env, pc, puc);
837 }
838 /* we restore the process signal mask as the sigreturn should
839 do it (XXX: use sigsetjmp) */
840 sigprocmask(SIG_SETMASK, old_set, NULL);
841 cpu_loop_exit();
968c74da
AJ
842 /* never comes here */
843 return 1;
3fb2ded1 844}
93ac68bc
FB
845#elif defined(TARGET_SPARC)
846static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
bf3e8bf1
FB
847 int is_write, sigset_t *old_set,
848 void *puc)
93ac68bc 849{
68016c62
FB
850 TranslationBlock *tb;
851 int ret;
852
853 if (cpu_single_env)
854 env = cpu_single_env; /* XXX: find a correct solution for multithread */
855#if defined(DEBUG_SIGNAL)
5fafdf24 856 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
68016c62
FB
857 pc, address, is_write, *(unsigned long *)old_set);
858#endif
b453b70b 859 /* XXX: locking issue */
53a5960a 860 if (is_write && page_unprotect(h2g(address), pc, puc)) {
b453b70b
FB
861 return 1;
862 }
68016c62 863 /* see if it is an MMU fault */
6ebbf390 864 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
68016c62
FB
865 if (ret < 0)
866 return 0; /* not an MMU fault */
867 if (ret == 0)
868 return 1; /* the MMU fault was handled without causing real CPU fault */
869 /* now we have a real cpu fault */
870 tb = tb_find_pc(pc);
871 if (tb) {
872 /* the PC is inside the translated code. It means that we have
873 a virtual CPU fault */
874 cpu_restore_state(tb, env, pc, puc);
875 }
876 /* we restore the process signal mask as the sigreturn should
877 do it (XXX: use sigsetjmp) */
878 sigprocmask(SIG_SETMASK, old_set, NULL);
879 cpu_loop_exit();
968c74da
AJ
880 /* never comes here */
881 return 1;
93ac68bc 882}
67867308
FB
883#elif defined (TARGET_PPC)
884static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
bf3e8bf1
FB
885 int is_write, sigset_t *old_set,
886 void *puc)
67867308
FB
887{
888 TranslationBlock *tb;
ce09776b 889 int ret;
3b46e624 890
67867308
FB
891 if (cpu_single_env)
892 env = cpu_single_env; /* XXX: find a correct solution for multithread */
67867308 893#if defined(DEBUG_SIGNAL)
5fafdf24 894 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
67867308
FB
895 pc, address, is_write, *(unsigned long *)old_set);
896#endif
897 /* XXX: locking issue */
53a5960a 898 if (is_write && page_unprotect(h2g(address), pc, puc)) {
67867308
FB
899 return 1;
900 }
901
ce09776b 902 /* see if it is an MMU fault */
6ebbf390 903 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
ce09776b
FB
904 if (ret < 0)
905 return 0; /* not an MMU fault */
906 if (ret == 0)
907 return 1; /* the MMU fault was handled without causing real CPU fault */
908
67867308
FB
909 /* now we have a real cpu fault */
910 tb = tb_find_pc(pc);
911 if (tb) {
912 /* the PC is inside the translated code. It means that we have
913 a virtual CPU fault */
bf3e8bf1 914 cpu_restore_state(tb, env, pc, puc);
67867308 915 }
ce09776b 916 if (ret == 1) {
67867308 917#if 0
5fafdf24 918 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
ce09776b 919 env->nip, env->error_code, tb);
67867308
FB
920#endif
921 /* we restore the process signal mask as the sigreturn should
922 do it (XXX: use sigsetjmp) */
bf3e8bf1 923 sigprocmask(SIG_SETMASK, old_set, NULL);
e06fcd75 924 cpu_loop_exit();
ce09776b
FB
925 } else {
926 /* activate soft MMU for this block */
fbf9eeb3 927 cpu_resume_from_signal(env, puc);
ce09776b 928 }
67867308 929 /* never comes here */
e6e5906b
PB
930 return 1;
931}
932
933#elif defined(TARGET_M68K)
934static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
935 int is_write, sigset_t *old_set,
936 void *puc)
937{
938 TranslationBlock *tb;
939 int ret;
940
941 if (cpu_single_env)
942 env = cpu_single_env; /* XXX: find a correct solution for multithread */
943#if defined(DEBUG_SIGNAL)
5fafdf24 944 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
e6e5906b
PB
945 pc, address, is_write, *(unsigned long *)old_set);
946#endif
947 /* XXX: locking issue */
948 if (is_write && page_unprotect(address, pc, puc)) {
949 return 1;
950 }
951 /* see if it is an MMU fault */
6ebbf390 952 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
e6e5906b
PB
953 if (ret < 0)
954 return 0; /* not an MMU fault */
955 if (ret == 0)
956 return 1; /* the MMU fault was handled without causing real CPU fault */
957 /* now we have a real cpu fault */
958 tb = tb_find_pc(pc);
959 if (tb) {
960 /* the PC is inside the translated code. It means that we have
961 a virtual CPU fault */
962 cpu_restore_state(tb, env, pc, puc);
963 }
964 /* we restore the process signal mask as the sigreturn should
965 do it (XXX: use sigsetjmp) */
966 sigprocmask(SIG_SETMASK, old_set, NULL);
967 cpu_loop_exit();
968 /* never comes here */
67867308
FB
969 return 1;
970}
6af0bf9c
FB
971
972#elif defined (TARGET_MIPS)
973static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
974 int is_write, sigset_t *old_set,
975 void *puc)
976{
977 TranslationBlock *tb;
978 int ret;
3b46e624 979
6af0bf9c
FB
980 if (cpu_single_env)
981 env = cpu_single_env; /* XXX: find a correct solution for multithread */
982#if defined(DEBUG_SIGNAL)
5fafdf24 983 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
6af0bf9c
FB
984 pc, address, is_write, *(unsigned long *)old_set);
985#endif
986 /* XXX: locking issue */
53a5960a 987 if (is_write && page_unprotect(h2g(address), pc, puc)) {
6af0bf9c
FB
988 return 1;
989 }
990
991 /* see if it is an MMU fault */
6ebbf390 992 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
6af0bf9c
FB
993 if (ret < 0)
994 return 0; /* not an MMU fault */
995 if (ret == 0)
996 return 1; /* the MMU fault was handled without causing real CPU fault */
997
998 /* now we have a real cpu fault */
999 tb = tb_find_pc(pc);
1000 if (tb) {
1001 /* the PC is inside the translated code. It means that we have
1002 a virtual CPU fault */
1003 cpu_restore_state(tb, env, pc, puc);
1004 }
1005 if (ret == 1) {
1006#if 0
5fafdf24 1007 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1eb5207b 1008 env->PC, env->error_code, tb);
6af0bf9c
FB
1009#endif
1010 /* we restore the process signal mask as the sigreturn should
1011 do it (XXX: use sigsetjmp) */
1012 sigprocmask(SIG_SETMASK, old_set, NULL);
f9480ffc 1013 cpu_loop_exit();
6af0bf9c
FB
1014 } else {
1015 /* activate soft MMU for this block */
1016 cpu_resume_from_signal(env, puc);
1017 }
1018 /* never comes here */
1019 return 1;
1020}
1021
fdf9b3e8
FB
1022#elif defined (TARGET_SH4)
1023static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1024 int is_write, sigset_t *old_set,
1025 void *puc)
1026{
1027 TranslationBlock *tb;
1028 int ret;
3b46e624 1029
fdf9b3e8
FB
1030 if (cpu_single_env)
1031 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1032#if defined(DEBUG_SIGNAL)
5fafdf24 1033 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
fdf9b3e8
FB
1034 pc, address, is_write, *(unsigned long *)old_set);
1035#endif
1036 /* XXX: locking issue */
1037 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1038 return 1;
1039 }
1040
1041 /* see if it is an MMU fault */
6ebbf390 1042 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
fdf9b3e8
FB
1043 if (ret < 0)
1044 return 0; /* not an MMU fault */
1045 if (ret == 0)
1046 return 1; /* the MMU fault was handled without causing real CPU fault */
1047
1048 /* now we have a real cpu fault */
eddf68a6
JM
1049 tb = tb_find_pc(pc);
1050 if (tb) {
1051 /* the PC is inside the translated code. It means that we have
1052 a virtual CPU fault */
1053 cpu_restore_state(tb, env, pc, puc);
1054 }
1055#if 0
5fafdf24 1056 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
eddf68a6
JM
1057 env->nip, env->error_code, tb);
1058#endif
1059 /* we restore the process signal mask as the sigreturn should
1060 do it (XXX: use sigsetjmp) */
1061 sigprocmask(SIG_SETMASK, old_set, NULL);
1062 cpu_loop_exit();
1063 /* never comes here */
1064 return 1;
1065}
1066
1067#elif defined (TARGET_ALPHA)
1068static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1069 int is_write, sigset_t *old_set,
1070 void *puc)
1071{
1072 TranslationBlock *tb;
1073 int ret;
3b46e624 1074
eddf68a6
JM
1075 if (cpu_single_env)
1076 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1077#if defined(DEBUG_SIGNAL)
5fafdf24 1078 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
eddf68a6
JM
1079 pc, address, is_write, *(unsigned long *)old_set);
1080#endif
1081 /* XXX: locking issue */
1082 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1083 return 1;
1084 }
1085
1086 /* see if it is an MMU fault */
6ebbf390 1087 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
eddf68a6
JM
1088 if (ret < 0)
1089 return 0; /* not an MMU fault */
1090 if (ret == 0)
1091 return 1; /* the MMU fault was handled without causing real CPU fault */
1092
1093 /* now we have a real cpu fault */
fdf9b3e8
FB
1094 tb = tb_find_pc(pc);
1095 if (tb) {
1096 /* the PC is inside the translated code. It means that we have
1097 a virtual CPU fault */
1098 cpu_restore_state(tb, env, pc, puc);
1099 }
fdf9b3e8 1100#if 0
5fafdf24 1101 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
fdf9b3e8
FB
1102 env->nip, env->error_code, tb);
1103#endif
1104 /* we restore the process signal mask as the sigreturn should
1105 do it (XXX: use sigsetjmp) */
355fb23d
PB
1106 sigprocmask(SIG_SETMASK, old_set, NULL);
1107 cpu_loop_exit();
fdf9b3e8
FB
1108 /* never comes here */
1109 return 1;
1110}
f1ccf904
TS
1111#elif defined (TARGET_CRIS)
1112static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1113 int is_write, sigset_t *old_set,
1114 void *puc)
1115{
1116 TranslationBlock *tb;
1117 int ret;
1118
1119 if (cpu_single_env)
1120 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1121#if defined(DEBUG_SIGNAL)
1122 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1123 pc, address, is_write, *(unsigned long *)old_set);
1124#endif
1125 /* XXX: locking issue */
1126 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1127 return 1;
1128 }
1129
1130 /* see if it is an MMU fault */
6ebbf390 1131 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
f1ccf904
TS
1132 if (ret < 0)
1133 return 0; /* not an MMU fault */
1134 if (ret == 0)
1135 return 1; /* the MMU fault was handled without causing real CPU fault */
1136
1137 /* now we have a real cpu fault */
1138 tb = tb_find_pc(pc);
1139 if (tb) {
1140 /* the PC is inside the translated code. It means that we have
1141 a virtual CPU fault */
1142 cpu_restore_state(tb, env, pc, puc);
1143 }
f1ccf904
TS
1144 /* we restore the process signal mask as the sigreturn should
1145 do it (XXX: use sigsetjmp) */
1146 sigprocmask(SIG_SETMASK, old_set, NULL);
1147 cpu_loop_exit();
1148 /* never comes here */
1149 return 1;
1150}
1151
e4533c7a
FB
1152#else
1153#error unsupported target CPU
1154#endif
9de5e440 1155
2b413144
FB
1156#if defined(__i386__)
1157
d8ecc0b9
FB
1158#if defined(__APPLE__)
1159# include <sys/ucontext.h>
1160
1161# define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1162# define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1163# define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1164#else
1165# define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1166# define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1167# define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1168#endif
1169
5fafdf24 1170int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1171 void *puc)
9de5e440 1172{
5a7b542b 1173 siginfo_t *info = pinfo;
9de5e440
FB
1174 struct ucontext *uc = puc;
1175 unsigned long pc;
bf3e8bf1 1176 int trapno;
97eb5b14 1177
d691f669
FB
1178#ifndef REG_EIP
1179/* for glibc 2.1 */
fd6ce8f6
FB
1180#define REG_EIP EIP
1181#define REG_ERR ERR
1182#define REG_TRAPNO TRAPNO
d691f669 1183#endif
d8ecc0b9
FB
1184 pc = EIP_sig(uc);
1185 trapno = TRAP_sig(uc);
ec6338ba
FB
1186 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1187 trapno == 0xe ?
1188 (ERROR_sig(uc) >> 1) & 1 : 0,
1189 &uc->uc_sigmask, puc);
2b413144
FB
1190}
1191
bc51c5c9
FB
1192#elif defined(__x86_64__)
1193
b3efe5c8
BS
1194#ifdef __NetBSD__
1195#define REG_ERR _REG_ERR
1196#define REG_TRAPNO _REG_TRAPNO
1197
1198#define QEMU_UC_MCONTEXT_GREGS(uc, reg) (uc)->uc_mcontext.__gregs[(reg)]
1199#define QEMU_UC_MACHINE_PC(uc) _UC_MACHINE_PC(uc)
1200#else
1201#define QEMU_UC_MCONTEXT_GREGS(uc, reg) (uc)->uc_mcontext.gregs[(reg)]
1202#define QEMU_UC_MACHINE_PC(uc) QEMU_UC_MCONTEXT_GREGS(uc, REG_RIP)
1203#endif
1204
5a7b542b 1205int cpu_signal_handler(int host_signum, void *pinfo,
bc51c5c9
FB
1206 void *puc)
1207{
5a7b542b 1208 siginfo_t *info = pinfo;
bc51c5c9 1209 unsigned long pc;
b3efe5c8
BS
1210#ifdef __NetBSD__
1211 ucontext_t *uc = puc;
1212#else
1213 struct ucontext *uc = puc;
1214#endif
bc51c5c9 1215
b3efe5c8 1216 pc = QEMU_UC_MACHINE_PC(uc);
5fafdf24 1217 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
b3efe5c8
BS
1218 QEMU_UC_MCONTEXT_GREGS(uc, REG_TRAPNO) == 0xe ?
1219 (QEMU_UC_MCONTEXT_GREGS(uc, REG_ERR) >> 1) & 1 : 0,
bc51c5c9
FB
1220 &uc->uc_sigmask, puc);
1221}
1222
e58ffeb3 1223#elif defined(_ARCH_PPC)
2b413144 1224
83fb7adf
FB
1225/***********************************************************************
1226 * signal context platform-specific definitions
1227 * From Wine
1228 */
1229#ifdef linux
1230/* All Registers access - only for local access */
1231# define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1232/* Gpr Registers access */
1233# define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1234# define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1235# define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1236# define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1237# define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1238# define LR_sig(context) REG_sig(link, context) /* Link register */
1239# define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1240/* Float Registers access */
1241# define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1242# define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1243/* Exception Registers access */
1244# define DAR_sig(context) REG_sig(dar, context)
1245# define DSISR_sig(context) REG_sig(dsisr, context)
1246# define TRAP_sig(context) REG_sig(trap, context)
1247#endif /* linux */
1248
1249#ifdef __APPLE__
1250# include <sys/ucontext.h>
1251typedef struct ucontext SIGCONTEXT;
1252/* All Registers access - only for local access */
1253# define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1254# define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1255# define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1256# define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1257/* Gpr Registers access */
1258# define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1259# define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1260# define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1261# define CTR_sig(context) REG_sig(ctr, context)
1262# define XER_sig(context) REG_sig(xer, context) /* Link register */
1263# define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1264# define CR_sig(context) REG_sig(cr, context) /* Condition register */
1265/* Float Registers access */
1266# define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1267# define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1268/* Exception Registers access */
1269# define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1270# define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1271# define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1272#endif /* __APPLE__ */
1273
5fafdf24 1274int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1275 void *puc)
2b413144 1276{
5a7b542b 1277 siginfo_t *info = pinfo;
25eb4484 1278 struct ucontext *uc = puc;
25eb4484 1279 unsigned long pc;
25eb4484
FB
1280 int is_write;
1281
83fb7adf 1282 pc = IAR_sig(uc);
25eb4484
FB
1283 is_write = 0;
1284#if 0
1285 /* ppc 4xx case */
83fb7adf 1286 if (DSISR_sig(uc) & 0x00800000)
25eb4484
FB
1287 is_write = 1;
1288#else
83fb7adf 1289 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
25eb4484
FB
1290 is_write = 1;
1291#endif
5fafdf24 1292 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
bf3e8bf1 1293 is_write, &uc->uc_sigmask, puc);
2b413144
FB
1294}
1295
2f87c607
FB
1296#elif defined(__alpha__)
1297
5fafdf24 1298int cpu_signal_handler(int host_signum, void *pinfo,
2f87c607
FB
1299 void *puc)
1300{
5a7b542b 1301 siginfo_t *info = pinfo;
2f87c607
FB
1302 struct ucontext *uc = puc;
1303 uint32_t *pc = uc->uc_mcontext.sc_pc;
1304 uint32_t insn = *pc;
1305 int is_write = 0;
1306
8c6939c0 1307 /* XXX: need kernel patch to get write flag faster */
2f87c607
FB
1308 switch (insn >> 26) {
1309 case 0x0d: // stw
1310 case 0x0e: // stb
1311 case 0x0f: // stq_u
1312 case 0x24: // stf
1313 case 0x25: // stg
1314 case 0x26: // sts
1315 case 0x27: // stt
1316 case 0x2c: // stl
1317 case 0x2d: // stq
1318 case 0x2e: // stl_c
1319 case 0x2f: // stq_c
1320 is_write = 1;
1321 }
1322
5fafdf24 1323 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
bf3e8bf1 1324 is_write, &uc->uc_sigmask, puc);
2f87c607 1325}
8c6939c0
FB
1326#elif defined(__sparc__)
1327
5fafdf24 1328int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1329 void *puc)
8c6939c0 1330{
5a7b542b 1331 siginfo_t *info = pinfo;
8c6939c0
FB
1332 int is_write;
1333 uint32_t insn;
6b4c11cd 1334#if !defined(__arch64__) || defined(HOST_SOLARIS)
c9e1e2b0
BS
1335 uint32_t *regs = (uint32_t *)(info + 1);
1336 void *sigmask = (regs + 20);
8c6939c0 1337 /* XXX: is there a standard glibc define ? */
c9e1e2b0
BS
1338 unsigned long pc = regs[1];
1339#else
84778508 1340#ifdef __linux__
c9e1e2b0
BS
1341 struct sigcontext *sc = puc;
1342 unsigned long pc = sc->sigc_regs.tpc;
1343 void *sigmask = (void *)sc->sigc_mask;
84778508
BS
1344#elif defined(__OpenBSD__)
1345 struct sigcontext *uc = puc;
1346 unsigned long pc = uc->sc_pc;
1347 void *sigmask = (void *)(long)uc->sc_mask;
1348#endif
c9e1e2b0
BS
1349#endif
1350
8c6939c0
FB
1351 /* XXX: need kernel patch to get write flag faster */
1352 is_write = 0;
1353 insn = *(uint32_t *)pc;
1354 if ((insn >> 30) == 3) {
1355 switch((insn >> 19) & 0x3f) {
1356 case 0x05: // stb
1357 case 0x06: // sth
1358 case 0x04: // st
1359 case 0x07: // std
1360 case 0x24: // stf
1361 case 0x27: // stdf
1362 case 0x25: // stfsr
1363 is_write = 1;
1364 break;
1365 }
1366 }
5fafdf24 1367 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
bf3e8bf1 1368 is_write, sigmask, NULL);
8c6939c0
FB
1369}
1370
1371#elif defined(__arm__)
1372
5fafdf24 1373int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1374 void *puc)
8c6939c0 1375{
5a7b542b 1376 siginfo_t *info = pinfo;
8c6939c0
FB
1377 struct ucontext *uc = puc;
1378 unsigned long pc;
1379 int is_write;
3b46e624 1380
48bbf11b 1381#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
5c49b363
AZ
1382 pc = uc->uc_mcontext.gregs[R15];
1383#else
4eee57f5 1384 pc = uc->uc_mcontext.arm_pc;
5c49b363 1385#endif
8c6939c0
FB
1386 /* XXX: compute is_write */
1387 is_write = 0;
5fafdf24 1388 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
8c6939c0 1389 is_write,
f3a9676a 1390 &uc->uc_sigmask, puc);
8c6939c0
FB
1391}
1392
38e584a0
FB
1393#elif defined(__mc68000)
1394
5fafdf24 1395int cpu_signal_handler(int host_signum, void *pinfo,
38e584a0
FB
1396 void *puc)
1397{
5a7b542b 1398 siginfo_t *info = pinfo;
38e584a0
FB
1399 struct ucontext *uc = puc;
1400 unsigned long pc;
1401 int is_write;
3b46e624 1402
38e584a0
FB
1403 pc = uc->uc_mcontext.gregs[16];
1404 /* XXX: compute is_write */
1405 is_write = 0;
5fafdf24 1406 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
38e584a0 1407 is_write,
bf3e8bf1 1408 &uc->uc_sigmask, puc);
38e584a0
FB
1409}
1410
b8076a74
FB
1411#elif defined(__ia64)
1412
1413#ifndef __ISR_VALID
1414 /* This ought to be in <bits/siginfo.h>... */
1415# define __ISR_VALID 1
b8076a74
FB
1416#endif
1417
5a7b542b 1418int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
b8076a74 1419{
5a7b542b 1420 siginfo_t *info = pinfo;
b8076a74
FB
1421 struct ucontext *uc = puc;
1422 unsigned long ip;
1423 int is_write = 0;
1424
1425 ip = uc->uc_mcontext.sc_ip;
1426 switch (host_signum) {
1427 case SIGILL:
1428 case SIGFPE:
1429 case SIGSEGV:
1430 case SIGBUS:
1431 case SIGTRAP:
fd4a43e4 1432 if (info->si_code && (info->si_segvflags & __ISR_VALID))
b8076a74
FB
1433 /* ISR.W (write-access) is bit 33: */
1434 is_write = (info->si_isr >> 33) & 1;
1435 break;
1436
1437 default:
1438 break;
1439 }
1440 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1441 is_write,
1442 &uc->uc_sigmask, puc);
1443}
1444
90cb9493
FB
1445#elif defined(__s390__)
1446
5fafdf24 1447int cpu_signal_handler(int host_signum, void *pinfo,
90cb9493
FB
1448 void *puc)
1449{
5a7b542b 1450 siginfo_t *info = pinfo;
90cb9493
FB
1451 struct ucontext *uc = puc;
1452 unsigned long pc;
1453 int is_write;
3b46e624 1454
90cb9493
FB
1455 pc = uc->uc_mcontext.psw.addr;
1456 /* XXX: compute is_write */
1457 is_write = 0;
5fafdf24 1458 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
c4b89d18
TS
1459 is_write, &uc->uc_sigmask, puc);
1460}
1461
1462#elif defined(__mips__)
1463
5fafdf24 1464int cpu_signal_handler(int host_signum, void *pinfo,
c4b89d18
TS
1465 void *puc)
1466{
9617efe8 1467 siginfo_t *info = pinfo;
c4b89d18
TS
1468 struct ucontext *uc = puc;
1469 greg_t pc = uc->uc_mcontext.pc;
1470 int is_write;
3b46e624 1471
c4b89d18
TS
1472 /* XXX: compute is_write */
1473 is_write = 0;
5fafdf24 1474 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
c4b89d18 1475 is_write, &uc->uc_sigmask, puc);
90cb9493
FB
1476}
1477
f54b3f92
AJ
1478#elif defined(__hppa__)
1479
1480int cpu_signal_handler(int host_signum, void *pinfo,
1481 void *puc)
1482{
1483 struct siginfo *info = pinfo;
1484 struct ucontext *uc = puc;
1485 unsigned long pc;
1486 int is_write;
1487
1488 pc = uc->uc_mcontext.sc_iaoq[0];
1489 /* FIXME: compute is_write */
1490 is_write = 0;
1491 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1492 is_write,
1493 &uc->uc_sigmask, puc);
1494}
1495
9de5e440 1496#else
2b413144 1497
3fb2ded1 1498#error host CPU specific signal handler needed
2b413144 1499
9de5e440 1500#endif
67b915a5
FB
1501
1502#endif /* !defined(CONFIG_SOFTMMU) */